summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHilko Bengen <bengen@debian.org>2014-06-07 12:02:12 +0200
committerHilko Bengen <bengen@debian.org>2014-06-07 12:02:12 +0200
commitd5ed89b946297270ec28abf44bef2371a06f1f4f (patch)
treece2d945e4dde69af90bd9905a70d8d27f4936776
downloadelasticsearch-d5ed89b946297270ec28abf44bef2371a06f1f4f.tar.gz
Imported Upstream version 1.0.3upstream/1.0.3
-rw-r--r--.gitignore36
-rw-r--r--.settings/org.eclipse.core.resources.prefs2
-rw-r--r--.settings/org.eclipse.jdt.core.prefs10
-rw-r--r--.settings/org.eclipse.jdt.ui.prefs11
-rw-r--r--.travis.yml12
-rw-r--r--CONTRIBUTING.md107
-rw-r--r--LICENSE.txt202
-rw-r--r--NOTICE.txt5
-rw-r--r--README.textile227
-rw-r--r--TESTING.asciidoc207
-rwxr-xr-xbin/elasticsearch214
-rw-r--r--bin/elasticsearch-service-mgr.exebin0 -> 104448 bytes
-rw-r--r--bin/elasticsearch-service-x64.exebin0 -> 103936 bytes
-rw-r--r--bin/elasticsearch-service-x86.exebin0 -> 80896 bytes
-rw-r--r--bin/elasticsearch.bat79
-rw-r--r--bin/elasticsearch.in.sh64
-rwxr-xr-xbin/plugin49
-rw-r--r--bin/plugin.bat22
-rw-r--r--bin/service.bat231
-rw-r--r--config/elasticsearch.yml377
-rw-r--r--config/logging.yml56
-rw-r--r--core-signatures.txt35
-rw-r--r--dev-tools/ElasticSearch.launch17
-rw-r--r--dev-tools/build_randomization.rb116
-rw-r--r--dev-tools/build_release.py607
-rw-r--r--dev-tools/client_tests_urls.prop21
-rw-r--r--dev-tools/elasticsearch_license_header.txt16
-rw-r--r--dev-tools/es_release_notes.pl186
-rw-r--r--dev-tools/extract_party_license.rb54
-rw-r--r--dev-tools/license_header_definition.xml13
-rw-r--r--dev-tools/tests.policy53
-rw-r--r--dev-tools/upload-s3.py67
-rw-r--r--docs/README.md4
-rw-r--r--docs/community/clients.asciidoc192
-rw-r--r--docs/community/frontends.asciidoc17
-rw-r--r--docs/community/github.asciidoc6
-rw-r--r--docs/community/index.asciidoc17
-rw-r--r--docs/community/integrations.asciidoc81
-rw-r--r--docs/community/misc.asciidoc15
-rw-r--r--docs/community/monitoring.asciidoc31
-rw-r--r--docs/groovy-api/anatomy.asciidoc102
-rw-r--r--docs/groovy-api/client.asciidoc59
-rw-r--r--docs/groovy-api/count.asciidoc22
-rw-r--r--docs/groovy-api/delete.asciidoc15
-rw-r--r--docs/groovy-api/get.asciidoc18
-rw-r--r--docs/groovy-api/index.asciidoc51
-rw-r--r--docs/groovy-api/index_.asciidoc31
-rw-r--r--docs/groovy-api/search.asciidoc115
-rw-r--r--docs/java-api/bulk.asciidoc38
-rw-r--r--docs/java-api/client.asciidoc187
-rw-r--r--docs/java-api/count.asciidoc38
-rw-r--r--docs/java-api/delete-by-query.asciidoc21
-rw-r--r--docs/java-api/delete.asciidoc40
-rw-r--r--docs/java-api/facets.asciidoc494
-rw-r--r--docs/java-api/get.asciidoc38
-rw-r--r--docs/java-api/index.asciidoc61
-rw-r--r--docs/java-api/index_.asciidoc205
-rw-r--r--docs/java-api/percolate.asciidoc50
-rw-r--r--docs/java-api/query-dsl-filters.asciidoc462
-rw-r--r--docs/java-api/query-dsl-queries.asciidoc450
-rw-r--r--docs/java-api/search.asciidoc140
-rw-r--r--docs/javascript/index.asciidoc138
-rw-r--r--docs/perl/index.asciidoc107
-rw-r--r--docs/python/index.asciidoc90
-rw-r--r--docs/reference/analysis.asciidoc77
-rw-r--r--docs/reference/analysis/analyzers.asciidoc71
-rw-r--r--docs/reference/analysis/analyzers/custom-analyzer.asciidoc52
-rw-r--r--docs/reference/analysis/analyzers/keyword-analyzer.asciidoc7
-rw-r--r--docs/reference/analysis/analyzers/lang-analyzer.asciidoc21
-rw-r--r--docs/reference/analysis/analyzers/pattern-analyzer.asciidoc130
-rw-r--r--docs/reference/analysis/analyzers/simple-analyzer.asciidoc6
-rw-r--r--docs/reference/analysis/analyzers/snowball-analyzer.asciidoc64
-rw-r--r--docs/reference/analysis/analyzers/standard-analyzer.asciidoc27
-rw-r--r--docs/reference/analysis/analyzers/stop-analyzer.asciidoc22
-rw-r--r--docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc6
-rw-r--r--docs/reference/analysis/charfilters.asciidoc16
-rw-r--r--docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc5
-rw-r--r--docs/reference/analysis/charfilters/mapping-charfilter.asciidoc38
-rw-r--r--docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc37
-rw-r--r--docs/reference/analysis/icu-plugin.asciidoc220
-rw-r--r--docs/reference/analysis/tokenfilters.asciidoc74
-rw-r--r--docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc7
-rw-r--r--docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc61
-rw-r--r--docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc48
-rw-r--r--docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc16
-rw-r--r--docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc16
-rw-r--r--docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc28
-rw-r--r--docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc115
-rw-r--r--docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc49
-rw-r--r--docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc34
-rw-r--r--docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc26
-rw-r--r--docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc6
-rw-r--r--docs/reference/analysis/tokenfilters/length-tokenfilter.asciidoc16
-rw-r--r--docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc32
-rw-r--r--docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc37
-rw-r--r--docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc15
-rw-r--r--docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc15
-rw-r--r--docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc134
-rw-r--r--docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc9
-rw-r--r--docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc5
-rw-r--r--docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc15
-rw-r--r--docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc4
-rw-r--r--docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc36
-rw-r--r--docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc33
-rw-r--r--docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc7
-rw-r--r--docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc34
-rw-r--r--docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc78
-rw-r--r--docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc35
-rw-r--r--docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc123
-rw-r--r--docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc4
-rw-r--r--docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc10
-rw-r--r--docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc7
-rw-r--r--docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc80
-rw-r--r--docs/reference/analysis/tokenizers.asciidoc30
-rw-r--r--docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc80
-rw-r--r--docs/reference/analysis/tokenizers/keyword-tokenizer.asciidoc15
-rw-r--r--docs/reference/analysis/tokenizers/letter-tokenizer.asciidoc7
-rw-r--r--docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc15
-rw-r--r--docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc57
-rw-r--r--docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc32
-rw-r--r--docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc29
-rw-r--r--docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc18
-rw-r--r--docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc16
-rw-r--r--docs/reference/analysis/tokenizers/whitespace-tokenizer.asciidoc4
-rw-r--r--docs/reference/api-conventions.asciidoc269
-rw-r--r--docs/reference/cat.asciidoc117
-rw-r--r--docs/reference/cat/alias.asciidoc22
-rw-r--r--docs/reference/cat/allocation.asciidoc17
-rw-r--r--docs/reference/cat/count.asciidoc16
-rw-r--r--docs/reference/cat/health.asciidoc61
-rw-r--r--docs/reference/cat/indices.asciidoc57
-rw-r--r--docs/reference/cat/master.asciidoc27
-rw-r--r--docs/reference/cat/nodes.asciidoc48
-rw-r--r--docs/reference/cat/pending_tasks.asciidoc19
-rw-r--r--docs/reference/cat/recovery.asciidoc57
-rw-r--r--docs/reference/cat/shards.asciidoc90
-rw-r--r--docs/reference/cat/thread_pool.asciidoc44
-rw-r--r--docs/reference/cluster.asciidoc50
-rw-r--r--docs/reference/cluster/health.asciidoc87
-rw-r--r--docs/reference/cluster/nodes-hot-threads.asciidoc16
-rw-r--r--docs/reference/cluster/nodes-info.asciidoc93
-rw-r--r--docs/reference/cluster/nodes-shutdown.asciidoc57
-rw-r--r--docs/reference/cluster/nodes-stats.asciidoc97
-rw-r--r--docs/reference/cluster/pending.asciidoc44
-rw-r--r--docs/reference/cluster/reroute.asciidoc68
-rw-r--r--docs/reference/cluster/state.asciidoc67
-rw-r--r--docs/reference/cluster/stats.asciidoc168
-rw-r--r--docs/reference/cluster/update-settings.asciidoc209
-rw-r--r--docs/reference/docs.asciidoc44
-rw-r--r--docs/reference/docs/bulk-udp.asciidoc57
-rw-r--r--docs/reference/docs/bulk.asciidoc182
-rw-r--r--docs/reference/docs/delete-by-query.asciidoc150
-rw-r--r--docs/reference/docs/delete.asciidoc142
-rw-r--r--docs/reference/docs/get.asciidoc214
-rw-r--r--docs/reference/docs/index_.asciidoc362
-rw-r--r--docs/reference/docs/multi-get.asciidoc176
-rw-r--r--docs/reference/docs/multi-termvectors.asciidoc93
-rw-r--r--docs/reference/docs/termvectors.asciidoc224
-rw-r--r--docs/reference/docs/update.asciidoc176
-rw-r--r--docs/reference/glossary.asciidoc190
-rw-r--r--docs/reference/images/Exponential.pngbin0 -> 3068 bytes
-rw-r--r--docs/reference/images/Gaussian.pngbin0 -> 3323 bytes
-rw-r--r--docs/reference/images/Linear.pngbin0 -> 3363 bytes
-rw-r--r--docs/reference/images/service-manager-win.pngbin0 -> 29772 bytes
-rw-r--r--docs/reference/index-modules.asciidoc81
-rw-r--r--docs/reference/index-modules/allocation.asciidoc136
-rw-r--r--docs/reference/index-modules/analysis.asciidoc18
-rw-r--r--docs/reference/index-modules/cache.asciidoc56
-rw-r--r--docs/reference/index-modules/codec.asciidoc278
-rw-r--r--docs/reference/index-modules/fielddata.asciidoc270
-rw-r--r--docs/reference/index-modules/mapper.asciidoc39
-rw-r--r--docs/reference/index-modules/merge.asciidoc215
-rw-r--r--docs/reference/index-modules/similarity.asciidoc140
-rw-r--r--docs/reference/index-modules/slowlog.asciidoc87
-rw-r--r--docs/reference/index-modules/store.asciidoc122
-rw-r--r--docs/reference/index-modules/translog.asciidoc28
-rw-r--r--docs/reference/index.asciidoc35
-rw-r--r--docs/reference/indices.asciidoc106
-rw-r--r--docs/reference/indices/aliases.asciidoc348
-rw-r--r--docs/reference/indices/analyze.asciidoc50
-rw-r--r--docs/reference/indices/clearcache.asciidoc34
-rw-r--r--docs/reference/indices/create-index.asciidoc106
-rw-r--r--docs/reference/indices/delete-index.asciidoc19
-rw-r--r--docs/reference/indices/delete-mapping.asciidoc25
-rw-r--r--docs/reference/indices/flush.asciidoc27
-rw-r--r--docs/reference/indices/gateway-snapshot.asciidoc29
-rw-r--r--docs/reference/indices/get-field-mapping.asciidoc140
-rw-r--r--docs/reference/indices/get-mapping.asciidoc37
-rw-r--r--docs/reference/indices/get-settings.asciidoc50
-rw-r--r--docs/reference/indices/indices-exists.asciidoc12
-rw-r--r--docs/reference/indices/open-close.asciidoc29
-rw-r--r--docs/reference/indices/optimize.asciidoc51
-rw-r--r--docs/reference/indices/put-mapping.asciidoc83
-rw-r--r--docs/reference/indices/refresh.asciidoc26
-rw-r--r--docs/reference/indices/segments.asciidoc76
-rw-r--r--docs/reference/indices/stats.asciidoc76
-rw-r--r--docs/reference/indices/status.asciidoc27
-rw-r--r--docs/reference/indices/templates.asciidoc155
-rw-r--r--docs/reference/indices/types-exists.asciidoc12
-rw-r--r--docs/reference/indices/update-settings.asciidoc226
-rw-r--r--docs/reference/indices/warmers.asciidoc183
-rw-r--r--docs/reference/mapping.asciidoc77
-rw-r--r--docs/reference/mapping/conf-mappings.asciidoc19
-rw-r--r--docs/reference/mapping/date-format.asciidoc206
-rw-r--r--docs/reference/mapping/dynamic-mapping.asciidoc65
-rw-r--r--docs/reference/mapping/fields.asciidoc33
-rw-r--r--docs/reference/mapping/fields/all-field.asciidoc78
-rw-r--r--docs/reference/mapping/fields/analyzer-field.asciidoc41
-rw-r--r--docs/reference/mapping/fields/boost-field.asciidoc72
-rw-r--r--docs/reference/mapping/fields/id-field.asciidoc52
-rw-r--r--docs/reference/mapping/fields/index-field.asciidoc15
-rw-r--r--docs/reference/mapping/fields/parent-field.asciidoc21
-rw-r--r--docs/reference/mapping/fields/routing-field.asciidoc69
-rw-r--r--docs/reference/mapping/fields/size-field.asciidoc26
-rw-r--r--docs/reference/mapping/fields/source-field.asciidoc41
-rw-r--r--docs/reference/mapping/fields/timestamp-field.asciidoc82
-rw-r--r--docs/reference/mapping/fields/ttl-field.asciidoc70
-rw-r--r--docs/reference/mapping/fields/type-field.asciidoc31
-rw-r--r--docs/reference/mapping/fields/uid-field.asciidoc11
-rw-r--r--docs/reference/mapping/meta.asciidoc25
-rw-r--r--docs/reference/mapping/types.asciidoc24
-rw-r--r--docs/reference/mapping/types/array-type.asciidoc74
-rw-r--r--docs/reference/mapping/types/attachment-type.asciidoc90
-rw-r--r--docs/reference/mapping/types/core-types.asciidoc754
-rw-r--r--docs/reference/mapping/types/geo-point-type.asciidoc207
-rw-r--r--docs/reference/mapping/types/geo-shape-type.asciidoc232
-rw-r--r--docs/reference/mapping/types/ip-type.asciidoc36
-rw-r--r--docs/reference/mapping/types/nested-type.asciidoc81
-rw-r--r--docs/reference/mapping/types/object-type.asciidoc244
-rw-r--r--docs/reference/mapping/types/root-object-type.asciidoc224
-rw-r--r--docs/reference/migration/migrate_1_0.asciidoc376
-rw-r--r--docs/reference/modules.asciidoc37
-rw-r--r--docs/reference/modules/advanced-scripting.asciidoc184
-rw-r--r--docs/reference/modules/cluster.asciidoc239
-rw-r--r--docs/reference/modules/discovery.asciidoc30
-rw-r--r--docs/reference/modules/discovery/azure.asciidoc6
-rw-r--r--docs/reference/modules/discovery/ec2.asciidoc6
-rw-r--r--docs/reference/modules/discovery/gce.asciidoc6
-rw-r--r--docs/reference/modules/discovery/zen.asciidoc161
-rw-r--r--docs/reference/modules/gateway.asciidoc75
-rw-r--r--docs/reference/modules/gateway/fs.asciidoc39
-rw-r--r--docs/reference/modules/gateway/hadoop.asciidoc36
-rw-r--r--docs/reference/modules/gateway/local.asciidoc57
-rw-r--r--docs/reference/modules/gateway/s3.asciidoc51
-rw-r--r--docs/reference/modules/http.asciidoc51
-rw-r--r--docs/reference/modules/indices.asciidoc76
-rw-r--r--docs/reference/modules/memcached.asciidoc69
-rw-r--r--docs/reference/modules/network.asciidoc89
-rw-r--r--docs/reference/modules/node.asciidoc32
-rw-r--r--docs/reference/modules/plugins.asciidoc283
-rw-r--r--docs/reference/modules/scripting.asciidoc316
-rw-r--r--docs/reference/modules/snapshots.asciidoc207
-rw-r--r--docs/reference/modules/threadpool.asciidoc117
-rw-r--r--docs/reference/modules/thrift.asciidoc25
-rw-r--r--docs/reference/modules/transport.asciidoc49
-rw-r--r--docs/reference/modules/tribe.asciidoc58
-rw-r--r--docs/reference/query-dsl.asciidoc42
-rw-r--r--docs/reference/query-dsl/filters.asciidoc104
-rw-r--r--docs/reference/query-dsl/filters/and-filter.asciidoc69
-rw-r--r--docs/reference/query-dsl/filters/bool-filter.asciidoc49
-rw-r--r--docs/reference/query-dsl/filters/exists-filter.asciidoc20
-rw-r--r--docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc240
-rw-r--r--docs/reference/query-dsl/filters/geo-distance-filter.asciidoc184
-rw-r--r--docs/reference/query-dsl/filters/geo-distance-range-filter.asciidoc30
-rw-r--r--docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc126
-rw-r--r--docs/reference/query-dsl/filters/geo-shape-filter.asciidoc121
-rw-r--r--docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc62
-rw-r--r--docs/reference/query-dsl/filters/has-child-filter.asciidoc57
-rw-r--r--docs/reference/query-dsl/filters/has-parent-filter.asciidoc60
-rw-r--r--docs/reference/query-dsl/filters/ids-filter.asciidoc20
-rw-r--r--docs/reference/query-dsl/filters/indices-filter.asciidoc37
-rw-r--r--docs/reference/query-dsl/filters/limit-filter.asciidoc19
-rw-r--r--docs/reference/query-dsl/filters/match-all-filter.asciidoc15
-rw-r--r--docs/reference/query-dsl/filters/missing-filter.asciidoc41
-rw-r--r--docs/reference/query-dsl/filters/nested-filter.asciidoc78
-rw-r--r--docs/reference/query-dsl/filters/not-filter.asciidoc82
-rw-r--r--docs/reference/query-dsl/filters/or-filter.asciidoc59
-rw-r--r--docs/reference/query-dsl/filters/prefix-filter.asciidoc37
-rw-r--r--docs/reference/query-dsl/filters/query-filter.asciidoc50
-rw-r--r--docs/reference/query-dsl/filters/range-filter.asciidoc55
-rw-r--r--docs/reference/query-dsl/filters/regexp-filter.asciidoc53
-rw-r--r--docs/reference/query-dsl/filters/script-filter.asciidoc53
-rw-r--r--docs/reference/query-dsl/filters/term-filter.asciidoc38
-rw-r--r--docs/reference/query-dsl/filters/terms-filter.asciidoc227
-rw-r--r--docs/reference/query-dsl/filters/type-filter.asciidoc15
-rw-r--r--docs/reference/query-dsl/queries.asciidoc83
-rw-r--r--docs/reference/query-dsl/queries/bool-query.asciidoc54
-rw-r--r--docs/reference/query-dsl/queries/boosting-query.asciidoc26
-rw-r--r--docs/reference/query-dsl/queries/common-terms-query.asciidoc263
-rw-r--r--docs/reference/query-dsl/queries/constant-score-query.asciidoc36
-rw-r--r--docs/reference/query-dsl/queries/dis-max-query.asciidoc44
-rw-r--r--docs/reference/query-dsl/queries/filtered-query.asciidoc25
-rw-r--r--docs/reference/query-dsl/queries/flt-field-query.asciidoc47
-rw-r--r--docs/reference/query-dsl/queries/flt-query.asciidoc65
-rw-r--r--docs/reference/query-dsl/queries/function-score-query.asciidoc491
-rw-r--r--docs/reference/query-dsl/queries/fuzzy-query.asciidoc102
-rw-r--r--docs/reference/query-dsl/queries/geo-shape-query.asciidoc49
-rw-r--r--docs/reference/query-dsl/queries/has-child-query.asciidoc61
-rw-r--r--docs/reference/query-dsl/queries/has-parent-query.asciidoc57
-rw-r--r--docs/reference/query-dsl/queries/ids-query.asciidoc20
-rw-r--r--docs/reference/query-dsl/queries/indices-query.asciidoc37
-rw-r--r--docs/reference/query-dsl/queries/match-all-query.asciidoc20
-rw-r--r--docs/reference/query-dsl/queries/match-query.asciidoc234
-rw-r--r--docs/reference/query-dsl/queries/minimum-should-match.asciidoc56
-rw-r--r--docs/reference/query-dsl/queries/mlt-field-query.asciidoc68
-rw-r--r--docs/reference/query-dsl/queries/mlt-query.asciidoc67
-rw-r--r--docs/reference/query-dsl/queries/multi-match-query.asciidoc64
-rw-r--r--docs/reference/query-dsl/queries/multi-term-rewrite.asciidoc42
-rw-r--r--docs/reference/query-dsl/queries/nested-query.asciidoc58
-rw-r--r--docs/reference/query-dsl/queries/prefix-query.asciidoc36
-rw-r--r--docs/reference/query-dsl/queries/query-string-query.asciidoc161
-rw-r--r--docs/reference/query-dsl/queries/query-string-syntax.asciidoc295
-rw-r--r--docs/reference/query-dsl/queries/range-query.asciidoc31
-rw-r--r--docs/reference/query-dsl/queries/regexp-query.asciidoc54
-rw-r--r--docs/reference/query-dsl/queries/regexp-syntax.asciidoc280
-rw-r--r--docs/reference/query-dsl/queries/simple-query-string-query.asciidoc100
-rw-r--r--docs/reference/query-dsl/queries/span-first-query.asciidoc20
-rw-r--r--docs/reference/query-dsl/queries/span-multi-term-query.asciidoc30
-rw-r--r--docs/reference/query-dsl/queries/span-near-query.asciidoc27
-rw-r--r--docs/reference/query-dsl/queries/span-not-query.asciidoc24
-rw-r--r--docs/reference/query-dsl/queries/span-or-query.asciidoc20
-rw-r--r--docs/reference/query-dsl/queries/span-term-query.asciidoc30
-rw-r--r--docs/reference/query-dsl/queries/term-query.asciidoc31
-rw-r--r--docs/reference/query-dsl/queries/terms-query.asciidoc19
-rw-r--r--docs/reference/query-dsl/queries/top-children-query.asciidoc71
-rw-r--r--docs/reference/query-dsl/queries/wildcard-query.asciidoc39
-rw-r--r--docs/reference/search.asciidoc101
-rw-r--r--docs/reference/search/aggregations.asciidoc137
-rw-r--r--docs/reference/search/aggregations/bucket.asciidoc25
-rw-r--r--docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc136
-rw-r--r--docs/reference/search/aggregations/bucket/daterange-aggregation.asciidoc110
-rw-r--r--docs/reference/search/aggregations/bucket/filter-aggregation.asciidoc38
-rw-r--r--docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc105
-rw-r--r--docs/reference/search/aggregations/bucket/geohashgrid-aggregation.asciidoc127
-rw-r--r--docs/reference/search/aggregations/bucket/global-aggregation.asciidoc51
-rw-r--r--docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc283
-rw-r--r--docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc98
-rw-r--r--docs/reference/search/aggregations/bucket/missing-aggregation.asciidoc34
-rw-r--r--docs/reference/search/aggregations/bucket/nested-aggregation.asciidoc67
-rw-r--r--docs/reference/search/aggregations/bucket/range-aggregation.asciidoc274
-rw-r--r--docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc294
-rw-r--r--docs/reference/search/aggregations/metrics.asciidoc15
-rw-r--r--docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc73
-rw-r--r--docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc82
-rw-r--r--docs/reference/search/aggregations/metrics/max-aggregation.asciidoc68
-rw-r--r--docs/reference/search/aggregations/metrics/min-aggregation.asciidoc67
-rw-r--r--docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc79
-rw-r--r--docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc77
-rw-r--r--docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc35
-rw-r--r--docs/reference/search/count.asciidoc89
-rw-r--r--docs/reference/search/explain.asciidoc112
-rw-r--r--docs/reference/search/facets.asciidoc283
-rw-r--r--docs/reference/search/facets/date-histogram-facet.asciidoc134
-rw-r--r--docs/reference/search/facets/filter-facet.asciidoc24
-rw-r--r--docs/reference/search/facets/geo-distance-facet.asciidoc252
-rw-r--r--docs/reference/search/facets/histogram-facet.asciidoc138
-rw-r--r--docs/reference/search/facets/query-facet.asciidoc19
-rw-r--r--docs/reference/search/facets/range-facet.asciidoc119
-rw-r--r--docs/reference/search/facets/statistical-facet.asciidoc101
-rw-r--r--docs/reference/search/facets/terms-facet.asciidoc288
-rw-r--r--docs/reference/search/facets/terms-stats-facet.asciidoc51
-rw-r--r--docs/reference/search/more-like-this.asciidoc27
-rw-r--r--docs/reference/search/multi-search.asciidoc78
-rw-r--r--docs/reference/search/percolate.asciidoc447
-rw-r--r--docs/reference/search/request-body.asciidoc112
-rw-r--r--docs/reference/search/request/explain.asciidoc14
-rw-r--r--docs/reference/search/request/fielddata-fields.asciidoc21
-rw-r--r--docs/reference/search/request/fields.asciidoc103
-rw-r--r--docs/reference/search/request/from-size.asciidoc21
-rw-r--r--docs/reference/search/request/highlighting.asciidoc549
-rw-r--r--docs/reference/search/request/index-boost.asciidoc17
-rw-r--r--docs/reference/search/request/min-score.asciidoc18
-rw-r--r--docs/reference/search/request/named-queries-and-filters.asciidoc57
-rw-r--r--docs/reference/search/request/post-filter.asciidoc90
-rw-r--r--docs/reference/search/request/preference.asciidoc42
-rw-r--r--docs/reference/search/request/query.asciidoc14
-rw-r--r--docs/reference/search/request/rescore.asciidoc81
-rw-r--r--docs/reference/search/request/script-fields.asciidoc60
-rw-r--r--docs/reference/search/request/scroll.asciidoc42
-rw-r--r--docs/reference/search/request/search-type.asciidoc156
-rw-r--r--docs/reference/search/request/sort.asciidoc319
-rw-r--r--docs/reference/search/request/source-filtering.asciidoc65
-rw-r--r--docs/reference/search/request/version.asciidoc14
-rw-r--r--docs/reference/search/search.asciidoc52
-rw-r--r--docs/reference/search/suggesters.asciidoc275
-rw-r--r--docs/reference/search/suggesters/completion-suggest.asciidoc231
-rw-r--r--docs/reference/search/suggesters/phrase-suggest.asciidoc319
-rw-r--r--docs/reference/search/suggesters/term-suggest.asciidoc110
-rw-r--r--docs/reference/search/uri-request.asciidoc101
-rw-r--r--docs/reference/search/validate.asciidoc77
-rw-r--r--docs/reference/setup.asciidoc63
-rw-r--r--docs/reference/setup/as-a-service-win.asciidoc63
-rw-r--r--docs/reference/setup/as-a-service.asciidoc86
-rw-r--r--docs/reference/setup/configuration.asciidoc237
-rw-r--r--docs/reference/setup/dir-layout.asciidoc47
-rw-r--r--docs/reference/setup/repositories.asciidoc52
-rw-r--r--docs/reference/testing.asciidoc16
-rw-r--r--docs/reference/testing/testing-framework.asciidoc230
-rw-r--r--docs/river/couchdb.asciidoc11
-rw-r--r--docs/river/index.asciidoc77
-rw-r--r--docs/river/rabbitmq.asciidoc9
-rw-r--r--docs/river/twitter.asciidoc10
-rw-r--r--docs/river/wikipedia.asciidoc8
-rw-r--r--docs/ruby/index.asciidoc86
-rw-r--r--lib/sigar/libsigar-amd64-freebsd-6.sobin0 -> 210641 bytes
-rw-r--r--lib/sigar/libsigar-amd64-linux.sobin0 -> 246605 bytes
-rw-r--r--lib/sigar/libsigar-amd64-solaris.sobin0 -> 251360 bytes
-rw-r--r--lib/sigar/libsigar-ia64-linux.sobin0 -> 494929 bytes
-rw-r--r--lib/sigar/libsigar-sparc-solaris.sobin0 -> 285004 bytes
-rw-r--r--lib/sigar/libsigar-sparc64-solaris.sobin0 -> 261896 bytes
-rw-r--r--lib/sigar/libsigar-universal-macosx.dylibbin0 -> 377668 bytes
-rw-r--r--lib/sigar/libsigar-universal64-macosx.dylibbin0 -> 397440 bytes
-rw-r--r--lib/sigar/libsigar-x86-freebsd-5.sobin0 -> 179751 bytes
-rw-r--r--lib/sigar/libsigar-x86-freebsd-6.sobin0 -> 179379 bytes
-rw-r--r--lib/sigar/libsigar-x86-linux.sobin0 -> 233385 bytes
-rw-r--r--lib/sigar/libsigar-x86-solaris.sobin0 -> 242880 bytes
-rw-r--r--lib/sigar/sigar-1.6.4.jarbin0 -> 428580 bytes
-rw-r--r--lib/sigar/sigar-amd64-winnt.dllbin0 -> 402432 bytes
-rw-r--r--lib/sigar/sigar-x86-winnt.dllbin0 -> 266240 bytes
-rw-r--r--lib/sigar/sigar-x86-winnt.libbin0 -> 99584 bytes
-rw-r--r--pom.xml1197
-rw-r--r--rest-api-spec/.gitignore4
-rw-r--r--rest-api-spec/LICENSE.txt13
-rw-r--r--rest-api-spec/README.markdown65
-rw-r--r--rest-api-spec/api/bulk.json58
-rw-r--r--rest-api-spec/api/cat.aliases.json41
-rw-r--r--rest-api-spec/api/cat.allocation.json46
-rw-r--r--rest-api-spec/api/cat.count.json41
-rw-r--r--rest-api-spec/api/cat.health.json42
-rw-r--r--rest-api-spec/api/cat.help.json20
-rw-r--r--rest-api-spec/api/cat.indices.json51
-rw-r--r--rest-api-spec/api/cat.master.json37
-rw-r--r--rest-api-spec/api/cat.nodes.json37
-rw-r--r--rest-api-spec/api/cat.pending_tasks.json37
-rw-r--r--rest-api-spec/api/cat.recovery.json46
-rw-r--r--rest-api-spec/api/cat.shards.json41
-rw-r--r--rest-api-spec/api/cat.thread_pool.json42
-rw-r--r--rest-api-spec/api/clear_scroll.json21
-rw-r--r--rest-api-spec/api/cluster.get_settings.json26
-rw-r--r--rest-api-spec/api/cluster.health.json55
-rw-r--r--rest-api-spec/api/cluster.pending_tasks.json23
-rw-r--r--rest-api-spec/api/cluster.put_settings.json20
-rw-r--r--rest-api-spec/api/cluster.reroute.json33
-rw-r--r--rest-api-spec/api/cluster.state.json44
-rw-r--r--rest-api-spec/api/cluster.stats.json28
-rw-r--r--rest-api-spec/api/count.json55
-rw-r--r--rest-api-spec/api/count_percolate.json72
-rw-r--r--rest-api-spec/api/delete.json66
-rw-r--r--rest-api-spec/api/delete_by_query.json81
-rw-r--r--rest-api-spec/api/exists.json50
-rw-r--r--rest-api-spec/api/explain.json94
-rw-r--r--rest-api-spec/api/get.json75
-rw-r--r--rest-api-spec/api/get_source.json71
-rw-r--r--rest-api-spec/api/index.json82
-rw-r--r--rest-api-spec/api/indices.analyze.json55
-rw-r--r--rest-api-spec/api/indices.clear_cache.json73
-rw-r--r--rest-api-spec/api/indices.close.json42
-rw-r--r--rest-api-spec/api/indices.create.json30
-rw-r--r--rest-api-spec/api/indices.delete.json27
-rw-r--r--rest-api-spec/api/indices.delete_alias.json33
-rw-r--r--rest-api-spec/api/indices.delete_mapping.json29
-rw-r--r--rest-api-spec/api/indices.delete_template.json28
-rw-r--r--rest-api-spec/api/indices.delete_warmer.json33
-rw-r--r--rest-api-spec/api/indices.exists.json38
-rw-r--r--rest-api-spec/api/indices.exists_alias.json41
-rw-r--r--rest-api-spec/api/indices.exists_template.json24
-rw-r--r--rest-api-spec/api/indices.exists_type.json43
-rw-r--r--rest-api-spec/api/indices.flush.json41
-rw-r--r--rest-api-spec/api/indices.get_alias.json41
-rw-r--r--rest-api-spec/api/indices.get_aliases.json31
-rw-r--r--rest-api-spec/api/indices.get_field_mapping.json50
-rw-r--r--rest-api-spec/api/indices.get_mapping.json41
-rw-r--r--rest-api-spec/api/indices.get_settings.json45
-rw-r--r--rest-api-spec/api/indices.get_template.json28
-rw-r--r--rest-api-spec/api/indices.get_warmer.json45
-rw-r--r--rest-api-spec/api/indices.open.json42
-rw-r--r--rest-api-spec/api/indices.optimize.json52
-rw-r--r--rest-api-spec/api/indices.put_alias.json35
-rw-r--r--rest-api-spec/api/indices.put_mapping.json53
-rw-r--r--rest-api-spec/api/indices.put_settings.json44
-rw-r--r--rest-api-spec/api/indices.put_template.json39
-rw-r--r--rest-api-spec/api/indices.put_warmer.json49
-rw-r--r--rest-api-spec/api/indices.refresh.json41
-rw-r--r--rest-api-spec/api/indices.segments.json41
-rw-r--r--rest-api-spec/api/indices.snapshot_index.json33
-rw-r--r--rest-api-spec/api/indices.stats.json60
-rw-r--r--rest-api-spec/api/indices.status.json49
-rw-r--r--rest-api-spec/api/indices.update_aliases.json26
-rw-r--r--rest-api-spec/api/indices.validate_query.json54
-rw-r--r--rest-api-spec/api/info.json15
-rw-r--r--rest-api-spec/api/mget.json54
-rw-r--r--rest-api-spec/api/mlt.json108
-rw-r--r--rest-api-spec/api/mpercolate.json41
-rw-r--r--rest-api-spec/api/msearch.json32
-rw-r--r--rest-api-spec/api/mtermvectors.json86
-rw-r--r--rest-api-spec/api/nodes.hot_threads.json36
-rw-r--r--rest-api-spec/api/nodes.info.json33
-rw-r--r--rest-api-spec/api/nodes.shutdown.json27
-rw-r--r--rest-api-spec/api/nodes.stats.json67
-rw-r--r--rest-api-spec/api/percolate.json72
-rw-r--r--rest-api-spec/api/ping.json15
-rw-r--r--rest-api-spec/api/scroll.json29
-rw-r--r--rest-api-spec/api/search.json156
-rw-r--r--rest-api-spec/api/snapshot.create.json37
-rw-r--r--rest-api-spec/api/snapshot.create_repository.json31
-rw-r--r--rest-api-spec/api/snapshot.delete.json29
-rw-r--r--rest-api-spec/api/snapshot.delete_repository.json28
-rw-r--r--rest-api-spec/api/snapshot.get.json29
-rw-r--r--rest-api-spec/api/snapshot.get_repository.json27
-rw-r--r--rest-api-spec/api/snapshot.restore.json37
-rw-r--r--rest-api-spec/api/suggest.json48
-rw-r--r--rest-api-spec/api/termvector.json83
-rw-r--r--rest-api-spec/api/update.json91
-rw-r--r--rest-api-spec/test/README.asciidoc233
-rw-r--r--rest-api-spec/test/bulk/10_basic.yaml25
-rw-r--r--rest-api-spec/test/bulk/20_list_of_strings.yaml17
-rw-r--r--rest-api-spec/test/bulk/30_big_string.yaml17
-rw-r--r--rest-api-spec/test/cat.aliases/10_basic.yaml190
-rw-r--r--rest-api-spec/test/cat.allocation/10_basic.yaml194
-rw-r--r--rest-api-spec/test/cat.count/10_basic.yaml76
-rw-r--r--rest-api-spec/test/cat.shards/10_basic.yaml49
-rw-r--r--rest-api-spec/test/cat.thread_pool/10_basic.yaml40
-rw-r--r--rest-api-spec/test/cluster.pending_tasks/10_basic.yaml14
-rw-r--r--rest-api-spec/test/cluster.put_settings/10_basic.yaml16
-rw-r--r--rest-api-spec/test/cluster.reroute/10_basic.yaml4
-rw-r--r--rest-api-spec/test/cluster.state/10_basic.yaml6
-rw-r--r--rest-api-spec/test/cluster.state/20_filtering.yaml164
-rw-r--r--rest-api-spec/test/create/10_with_id.yaml33
-rw-r--r--rest-api-spec/test/create/15_without_id.yaml25
-rw-r--r--rest-api-spec/test/create/30_internal_version.yaml20
-rw-r--r--rest-api-spec/test/create/35_external_version.yaml33
-rw-r--r--rest-api-spec/test/create/40_routing.yaml41
-rw-r--r--rest-api-spec/test/create/50_parent.yaml43
-rw-r--r--rest-api-spec/test/create/55_parent_with_routing.yaml54
-rw-r--r--rest-api-spec/test/create/60_refresh.yaml46
-rw-r--r--rest-api-spec/test/create/70_timestamp.yaml81
-rw-r--r--rest-api-spec/test/create/75_ttl.yaml104
-rw-r--r--rest-api-spec/test/create/TODO.txt5
-rw-r--r--rest-api-spec/test/delete/10_basic.yaml19
-rw-r--r--rest-api-spec/test/delete/20_internal_version.yaml28
-rw-r--r--rest-api-spec/test/delete/25_external_version.yaml32
-rw-r--r--rest-api-spec/test/delete/30_routing.yaml29
-rw-r--r--rest-api-spec/test/delete/40_parent.yaml36
-rw-r--r--rest-api-spec/test/delete/42_missing_parent.yml28
-rw-r--r--rest-api-spec/test/delete/45_parent_with_routing.yaml43
-rw-r--r--rest-api-spec/test/delete/50_refresh.yaml73
-rw-r--r--rest-api-spec/test/delete/60_missing.yaml19
-rw-r--r--rest-api-spec/test/delete/TODO.txt6
-rw-r--r--rest-api-spec/test/delete_by_query/10_basic.yaml42
-rw-r--r--rest-api-spec/test/exists/10_basic.yaml27
-rw-r--r--rest-api-spec/test/exists/30_parent.yaml42
-rw-r--r--rest-api-spec/test/exists/40_routing.yaml39
-rw-r--r--rest-api-spec/test/exists/55_parent_with_routing.yaml56
-rw-r--r--rest-api-spec/test/exists/60_realtime_refresh.yaml50
-rw-r--r--rest-api-spec/test/exists/70_defaults.yaml17
-rw-r--r--rest-api-spec/test/exists/TODO.txt3
-rw-r--r--rest-api-spec/test/explain/10_basic.yaml23
-rw-r--r--rest-api-spec/test/explain/20_source_filtering.yaml44
-rw-r--r--rest-api-spec/test/explain/TODO.txt6
-rw-r--r--rest-api-spec/test/get/10_basic.yaml31
-rw-r--r--rest-api-spec/test/get/15_default_values.yaml21
-rw-r--r--rest-api-spec/test/get/20_fields.yaml45
-rw-r--r--rest-api-spec/test/get/30_parent.yaml45
-rw-r--r--rest-api-spec/test/get/40_routing.yaml41
-rw-r--r--rest-api-spec/test/get/55_parent_with_routing.yaml54
-rw-r--r--rest-api-spec/test/get/60_realtime_refresh.yaml49
-rw-r--r--rest-api-spec/test/get/70_source_filtering.yaml56
-rw-r--r--rest-api-spec/test/get/80_missing.yaml19
-rw-r--r--rest-api-spec/test/get/TODO.txt3
-rw-r--r--rest-api-spec/test/get_source/10_basic.yaml24
-rw-r--r--rest-api-spec/test/get_source/15_default_values.yaml16
-rw-r--r--rest-api-spec/test/get_source/30_parent.yaml43
-rw-r--r--rest-api-spec/test/get_source/40_routing.yaml39
-rw-r--r--rest-api-spec/test/get_source/55_parent_with_routing.yaml51
-rw-r--r--rest-api-spec/test/get_source/60_realtime_refresh.yaml50
-rw-r--r--rest-api-spec/test/get_source/70_source_filtering.yaml26
-rw-r--r--rest-api-spec/test/get_source/80_missing.yaml19
-rw-r--r--rest-api-spec/test/get_source/TODO.txt3
-rw-r--r--rest-api-spec/test/index/10_with_id.yaml26
-rw-r--r--rest-api-spec/test/index/15_without_id.yaml26
-rw-r--r--rest-api-spec/test/index/20_optype.yaml29
-rw-r--r--rest-api-spec/test/index/30_internal_version.yaml36
-rw-r--r--rest-api-spec/test/index/35_external_version.yaml34
-rw-r--r--rest-api-spec/test/index/40_routing.yaml41
-rw-r--r--rest-api-spec/test/index/50_parent.yaml42
-rw-r--r--rest-api-spec/test/index/55_parent_with_routing.yaml54
-rw-r--r--rest-api-spec/test/index/60_refresh.yaml46
-rw-r--r--rest-api-spec/test/index/70_timestamp.yaml71
-rw-r--r--rest-api-spec/test/index/75_ttl.yaml88
-rw-r--r--rest-api-spec/test/index/TODO.txt5
-rw-r--r--rest-api-spec/test/indices.analyze/10_analyze.yaml50
-rw-r--r--rest-api-spec/test/indices.clear_cache/10_basic.yaml4
-rw-r--r--rest-api-spec/test/indices.create/10_basic.yaml85
-rw-r--r--rest-api-spec/test/indices.delete_alias/10_basic.yaml32
-rw-r--r--rest-api-spec/test/indices.delete_alias/all_path_options.yaml225
-rw-r--r--rest-api-spec/test/indices.delete_mapping/10_basic.yaml31
-rw-r--r--rest-api-spec/test/indices.delete_mapping/all_path_options.yaml272
-rw-r--r--rest-api-spec/test/indices.delete_warmer/all_path_options.yaml218
-rw-r--r--rest-api-spec/test/indices.exists/10_basic.yaml25
-rw-r--r--rest-api-spec/test/indices.exists_alias/10_basic.yaml45
-rw-r--r--rest-api-spec/test/indices.exists_template/10_basic.yaml38
-rw-r--r--rest-api-spec/test/indices.exists_type/10_basic.yaml40
-rw-r--r--rest-api-spec/test/indices.get_alias/10_basic.yaml219
-rw-r--r--rest-api-spec/test/indices.get_aliases/10_basic.yaml224
-rw-r--r--rest-api-spec/test/indices.get_field_mapping/10_basic.yaml79
-rw-r--r--rest-api-spec/test/indices.get_field_mapping/20_missing_field.yaml24
-rw-r--r--rest-api-spec/test/indices.get_field_mapping/30_missing_type.yaml24
-rw-r--r--rest-api-spec/test/indices.get_field_mapping/40_missing_index.yaml11
-rw-r--r--rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yaml148
-rw-r--r--rest-api-spec/test/indices.get_mapping/10_basic.yaml161
-rw-r--r--rest-api-spec/test/indices.get_mapping/20_missing_type.yaml19
-rw-r--r--rest-api-spec/test/indices.get_mapping/30_missing_index.yaml9
-rw-r--r--rest-api-spec/test/indices.get_mapping/40_aliases.yaml26
-rw-r--r--rest-api-spec/test/indices.get_settings/10_basic.yaml165
-rw-r--r--rest-api-spec/test/indices.get_settings/20_aliases.yaml26
-rw-r--r--rest-api-spec/test/indices.get_template/10_basic.yaml45
-rw-r--r--rest-api-spec/test/indices.get_template/20_get_missing.yaml13
-rw-r--r--rest-api-spec/test/indices.get_warmer/10_basic.yaml201
-rw-r--r--rest-api-spec/test/indices.open/10_basic.yaml31
-rw-r--r--rest-api-spec/test/indices.open/20_multiple_indices.yaml83
-rw-r--r--rest-api-spec/test/indices.optimize/10_basic.yaml10
-rw-r--r--rest-api-spec/test/indices.put_alias/10_basic.yaml29
-rw-r--r--rest-api-spec/test/indices.put_alias/all_path_options.yaml127
-rw-r--r--rest-api-spec/test/indices.put_mapping/10_basic.yaml62
-rw-r--r--rest-api-spec/test/indices.put_mapping/all_path_options.yaml178
-rw-r--r--rest-api-spec/test/indices.put_settings/10_basic.yaml30
-rw-r--r--rest-api-spec/test/indices.put_settings/all_path_options.yaml113
-rw-r--r--rest-api-spec/test/indices.put_template/10_basic.yaml17
-rw-r--r--rest-api-spec/test/indices.put_warmer/10_basic.yaml145
-rw-r--r--rest-api-spec/test/indices.put_warmer/20_aliases.yaml30
-rw-r--r--rest-api-spec/test/indices.put_warmer/all_path_options.yaml134
-rw-r--r--rest-api-spec/test/indices.segments/10_basic.yaml5
-rw-r--r--rest-api-spec/test/indices.snapshot_index/10_basic.yaml4
-rw-r--r--rest-api-spec/test/indices.stats/10_basic.yaml4
-rw-r--r--rest-api-spec/test/indices.status/10_basic.yaml9
-rw-r--r--rest-api-spec/test/indices.update_aliases/10_basic.yaml34
-rw-r--r--rest-api-spec/test/indices.update_aliases/20_routing.yaml135
-rw-r--r--rest-api-spec/test/indices.validate_query/10_basic.yaml27
-rw-r--r--rest-api-spec/test/info/10_info.yaml8
-rw-r--r--rest-api-spec/test/info/20_lucene_version.yaml8
-rw-r--r--rest-api-spec/test/mget/10_basic.yaml45
-rw-r--r--rest-api-spec/test/mget/11_default_index_type.yaml48
-rw-r--r--rest-api-spec/test/mget/12_non_existent_index.yaml35
-rw-r--r--rest-api-spec/test/mget/13_missing_metadata.yaml51
-rw-r--r--rest-api-spec/test/mget/15_ids.yaml72
-rw-r--r--rest-api-spec/test/mget/20_fields.yaml109
-rw-r--r--rest-api-spec/test/mget/30_parent.yaml57
-rw-r--r--rest-api-spec/test/mget/40_routing.yaml42
-rw-r--r--rest-api-spec/test/mget/55_parent_with_routing.yaml46
-rw-r--r--rest-api-spec/test/mget/60_realtime_refresh.yaml53
-rw-r--r--rest-api-spec/test/mget/70_source_filtering.yaml119
-rw-r--r--rest-api-spec/test/mget/TODO.txt3
-rw-r--r--rest-api-spec/test/mlt/10_basic.yaml25
-rw-r--r--rest-api-spec/test/mpercolate/10_basic.yaml41
-rw-r--r--rest-api-spec/test/msearch/10_basic.yaml45
-rw-r--r--rest-api-spec/test/mtermvectors/10_basic.yaml85
-rw-r--r--rest-api-spec/test/nodes.info/10_basic.yaml7
-rw-r--r--rest-api-spec/test/nodes.stats/10_basic.yaml8
-rw-r--r--rest-api-spec/test/percolate/15_new.yaml40
-rw-r--r--rest-api-spec/test/percolate/16_existing_doc.yaml86
-rw-r--r--rest-api-spec/test/percolate/17_empty.yaml31
-rw-r--r--rest-api-spec/test/percolate/18_highligh_with_query.yaml36
-rw-r--r--rest-api-spec/test/ping/10_ping.yaml5
-rw-r--r--rest-api-spec/test/scroll/10_basic.yaml32
-rw-r--r--rest-api-spec/test/scroll/11_clear.yaml34
-rw-r--r--rest-api-spec/test/search/10_source_filtering.yaml92
-rw-r--r--rest-api-spec/test/search/20_default_values.yaml43
-rw-r--r--rest-api-spec/test/snapshot.get_repository/10_basic.yaml50
-rw-r--r--rest-api-spec/test/suggest/10_basic.yaml24
-rw-r--r--rest-api-spec/test/termvector/10_basic.yaml35
-rw-r--r--rest-api-spec/test/update/10_doc.yaml40
-rw-r--r--rest-api-spec/test/update/15_script.yaml79
-rw-r--r--rest-api-spec/test/update/20_doc_upsert.yaml41
-rw-r--r--rest-api-spec/test/update/22_doc_as_upsert.yaml41
-rw-r--r--rest-api-spec/test/update/25_script_upsert.yaml41
-rw-r--r--rest-api-spec/test/update/30_internal_version.yaml47
-rw-r--r--rest-api-spec/test/update/35_external_version.yaml40
-rw-r--r--rest-api-spec/test/update/40_routing.yaml56
-rw-r--r--rest-api-spec/test/update/50_parent.yaml79
-rw-r--r--rest-api-spec/test/update/55_parent_with_routing.yaml63
-rw-r--r--rest-api-spec/test/update/60_refresh.yaml50
-rw-r--r--rest-api-spec/test/update/70_timestamp.yaml77
-rw-r--r--rest-api-spec/test/update/75_ttl.yaml96
-rw-r--r--rest-api-spec/test/update/80_fields.yaml20
-rw-r--r--rest-api-spec/test/update/85_fields_meta.yaml51
-rw-r--r--rest-api-spec/test/update/90_missing.yaml42
-rw-r--r--rest-api-spec/test/update/TODO.txt7
-rw-r--r--rest-api-spec/utils/Gemfile9
-rw-r--r--rest-api-spec/utils/Thorfile3
-rw-r--r--rest-api-spec/utils/thor/generate_api.rb189
-rw-r--r--rest-api-spec/utils/thor/generate_source.rb117
-rw-r--r--rest-api-spec/utils/thor/lister.rb41
-rw-r--r--rest-api-spec/utils/thor/templates/ruby/method.erb64
-rw-r--r--rest-api-spec/utils/thor/templates/ruby/test.erb26
-rw-r--r--rest-api-spec/utils/thor/templates/ruby/test_helper.rb54
-rw-r--r--src/deb/control/conffiles4
-rw-r--r--src/deb/control/control38
-rwxr-xr-xsrc/deb/control/postinst52
-rwxr-xr-xsrc/deb/control/postrm33
-rwxr-xr-xsrc/deb/control/prerm26
-rw-r--r--src/deb/copyright17
-rw-r--r--src/deb/default/elasticsearch44
-rwxr-xr-xsrc/deb/init.d/elasticsearch203
-rw-r--r--src/deb/lintian/elasticsearch8
-rw-r--r--src/main/assemblies/common-bin.xml45
-rw-r--r--src/main/assemblies/targz-bin.xml38
-rw-r--r--src/main/assemblies/zip-bin.xml56
-rw-r--r--src/main/java/jsr166e/CompletableFuture.java3314
-rw-r--r--src/main/java/jsr166e/CompletionException.java61
-rw-r--r--src/main/java/jsr166e/ConcurrentHashMapV8.java6308
-rw-r--r--src/main/java/jsr166e/CountedCompleter.java750
-rw-r--r--src/main/java/jsr166e/DoubleAdder.java198
-rw-r--r--src/main/java/jsr166e/DoubleMaxUpdater.java193
-rw-r--r--src/main/java/jsr166e/ForkJoinPool.java3344
-rw-r--r--src/main/java/jsr166e/ForkJoinTask.java1554
-rw-r--r--src/main/java/jsr166e/ForkJoinWorkerThread.java123
-rw-r--r--src/main/java/jsr166e/LongAdder.java199
-rw-r--r--src/main/java/jsr166e/LongAdderTable.java189
-rw-r--r--src/main/java/jsr166e/LongMaxUpdater.java183
-rw-r--r--src/main/java/jsr166e/RecursiveAction.java164
-rw-r--r--src/main/java/jsr166e/RecursiveTask.java69
-rw-r--r--src/main/java/jsr166e/StampedLock.java1419
-rw-r--r--src/main/java/jsr166e/Striped64.java341
-rw-r--r--src/main/java/jsr166e/extra/AtomicDouble.java280
-rw-r--r--src/main/java/jsr166e/extra/AtomicDoubleArray.java327
-rw-r--r--src/main/java/jsr166e/extra/ReadMostlyVector.java1631
-rw-r--r--src/main/java/jsr166e/extra/SequenceLock.java639
-rw-r--r--src/main/java/jsr166y/ConcurrentLinkedDeque.java1468
-rw-r--r--src/main/java/jsr166y/CountedCompleter.java744
-rw-r--r--src/main/java/jsr166y/ForkJoinPool.java3427
-rw-r--r--src/main/java/jsr166y/ForkJoinTask.java1509
-rw-r--r--src/main/java/jsr166y/ForkJoinWorkerThread.java121
-rw-r--r--src/main/java/jsr166y/LinkedTransferQueue.java1353
-rw-r--r--src/main/java/jsr166y/Phaser.java1164
-rw-r--r--src/main/java/jsr166y/RecursiveAction.java164
-rw-r--r--src/main/java/jsr166y/RecursiveTask.java68
-rw-r--r--src/main/java/jsr166y/ThreadLocalRandom.java197
-rw-r--r--src/main/java/jsr166y/TransferQueue.java133
-rw-r--r--src/main/java/jsr166y/package-info.java28
-rw-r--r--src/main/java/org/apache/lucene/analysis/CustomAnalyzerWrapper.java77
-rw-r--r--src/main/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.java56
-rw-r--r--src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java90
-rw-r--r--src/main/java/org/apache/lucene/index/TrackingConcurrentMergeScheduler.java149
-rw-r--r--src/main/java/org/apache/lucene/index/TrackingSerialMergeScheduler.java165
-rw-r--r--src/main/java/org/apache/lucene/index/memory/ExtendedMemoryIndex.java31
-rw-r--r--src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java170
-rw-r--r--src/main/java/org/apache/lucene/queries/XTermsFilter.java328
-rw-r--r--src/main/java/org/apache/lucene/queryparser/XSimpleQueryParser.java521
-rw-r--r--src/main/java/org/apache/lucene/queryparser/classic/ExistsFieldQueryExtension.java38
-rw-r--r--src/main/java/org/apache/lucene/queryparser/classic/FieldQueryExtension.java31
-rw-r--r--src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java884
-rw-r--r--src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java39
-rw-r--r--src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java376
-rw-r--r--src/main/java/org/apache/lucene/search/XReferenceManager.java326
-rw-r--r--src/main/java/org/apache/lucene/search/XSearcherManager.java177
-rw-r--r--src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java78
-rw-r--r--src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java187
-rw-r--r--src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java50
-rw-r--r--src/main/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighter.java777
-rw-r--r--src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java1052
-rw-r--r--src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java263
-rw-r--r--src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java161
-rw-r--r--src/main/java/org/apache/lucene/store/BufferedChecksumIndexOutput.java115
-rw-r--r--src/main/java/org/apache/lucene/store/RateLimitedFSDirectory.java143
-rw-r--r--src/main/java/org/apache/lucene/store/StoreRateLimiting.java94
-rw-r--r--src/main/java/org/apache/lucene/store/StoreUtils.java44
-rw-r--r--src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferAllocator.java97
-rw-r--r--src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferDirectory.java182
-rw-r--r--src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferFile.java102
-rw-r--r--src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferFileOutput.java65
-rw-r--r--src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferIndexInput.java198
-rw-r--r--src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferIndexOutput.java132
-rw-r--r--src/main/java/org/apache/lucene/store/bytebuffer/CachingByteBufferAllocator.java82
-rw-r--r--src/main/java/org/apache/lucene/store/bytebuffer/PlainByteBufferAllocator.java67
-rw-r--r--src/main/java/org/elasticsearch/Build.java96
-rw-r--r--src/main/java/org/elasticsearch/ElasticsearchException.java155
-rw-r--r--src/main/java/org/elasticsearch/ElasticsearchGenerationException.java36
-rw-r--r--src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java45
-rw-r--r--src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java38
-rw-r--r--src/main/java/org/elasticsearch/ElasticsearchNullPointerException.java38
-rw-r--r--src/main/java/org/elasticsearch/ElasticsearchParseException.java41
-rw-r--r--src/main/java/org/elasticsearch/ElasticsearchTimeoutException.java36
-rw-r--r--src/main/java/org/elasticsearch/ElasticsearchWrapperException.java28
-rw-r--r--src/main/java/org/elasticsearch/ExceptionsHelper.java113
-rw-r--r--src/main/java/org/elasticsearch/Version.java427
-rw-r--r--src/main/java/org/elasticsearch/action/Action.java35
-rw-r--r--src/main/java/org/elasticsearch/action/ActionFuture.java98
-rw-r--r--src/main/java/org/elasticsearch/action/ActionListener.java38
-rw-r--r--src/main/java/org/elasticsearch/action/ActionModule.java308
-rw-r--r--src/main/java/org/elasticsearch/action/ActionRequest.java76
-rw-r--r--src/main/java/org/elasticsearch/action/ActionRequestBuilder.java89
-rw-r--r--src/main/java/org/elasticsearch/action/ActionRequestValidationException.java62
-rw-r--r--src/main/java/org/elasticsearch/action/ActionResponse.java42
-rw-r--r--src/main/java/org/elasticsearch/action/FailedNodeException.java39
-rw-r--r--src/main/java/org/elasticsearch/action/GenericAction.java67
-rw-r--r--src/main/java/org/elasticsearch/action/ListenableActionFuture.java38
-rw-r--r--src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java47
-rw-r--r--src/main/java/org/elasticsearch/action/NoSuchNodeException.java30
-rw-r--r--src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java32
-rw-r--r--src/main/java/org/elasticsearch/action/RoutingMissingException.java59
-rw-r--r--src/main/java/org/elasticsearch/action/ShardOperationFailedException.java53
-rw-r--r--src/main/java/org/elasticsearch/action/ThreadingModel.java114
-rw-r--r--src/main/java/org/elasticsearch/action/TimestampParsingException.java38
-rw-r--r--src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java94
-rw-r--r--src/main/java/org/elasticsearch/action/UnavailableShardsException.java47
-rw-r--r--src/main/java/org/elasticsearch/action/ValidateActions.java34
-rw-r--r--src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java71
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/ClusterAction.java39
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java195
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java95
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java323
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java54
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterIndexHealth.java293
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterShardHealth.java110
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java233
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java64
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java100
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java55
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java58
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java137
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java311
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java250
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java125
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java153
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginInfo.java199
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsInfo.java89
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java134
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartRequest.java79
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartRequestBuilder.java57
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartResponse.java76
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/restart/TransportNodesRestartAction.java181
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequest.java110
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java73
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java73
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java317
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java333
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java273
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java142
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java91
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java134
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java47
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java93
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java64
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java52
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java92
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java47
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java100
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java78
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java92
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java102
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java47
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java281
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java124
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java52
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java94
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java123
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java68
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java65
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java127
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java162
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java107
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java72
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java248
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java97
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java178
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java93
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java107
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java100
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java47
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java488
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java193
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java103
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java119
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java47
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java129
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java78
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponse.java52
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java89
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java47
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java126
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java89
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java90
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java101
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java47
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java530
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java216
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java92
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java117
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java147
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java102
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java67
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java129
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java426
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java110
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java694
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java51
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java40
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java171
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java194
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java50
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java39
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java135
-rw-r--r--src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java69
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/IndicesAction.java39
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java49
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java341
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java197
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java52
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java139
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java46
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java39
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistResponse.java60
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java71
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java72
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java100
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java38
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java81
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java75
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java165
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java88
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java163
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java236
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java123
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java65
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java54
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ShardClearIndicesCacheRequest.java104
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ShardClearIndicesCacheResponse.java49
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java188
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexClusterStateUpdateRequest.java31
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java114
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java67
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java51
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java108
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java111
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java382
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java212
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java51
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java108
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/create/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java129
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java70
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java51
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java126
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/delete/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java85
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java56
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsResponse.java54
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java91
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java102
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java44
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequest.java103
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java71
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsResponse.java54
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java99
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java54
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java66
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java50
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java135
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/flush/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/gateway/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotAction.java47
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequest.java48
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequestBuilder.java42
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotResponse.java55
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotRequest.java49
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotResponse.java49
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportGatewaySnapshotAction.java137
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/package-info.java24
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingClusterStateUpdateRequest.java49
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingRequest.java140
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingRequestBuilder.java67
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingResponse.java52
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/TransportDeleteMappingAction.java188
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java46
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java101
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java150
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java79
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java165
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java46
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java34
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java39
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java82
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java142
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java250
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java66
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java65
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java275
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java110
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java52
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java104
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/mapping/put/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java31
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java114
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java67
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java51
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java108
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequest.java148
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequestBuilder.java81
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeResponse.java54
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/optimize/ShardOptimizeRequest.java82
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/optimize/ShardOptimizeResponse.java49
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java141
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/optimize/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java70
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java51
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java54
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java58
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java49
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java137
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/refresh/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java69
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java54
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java184
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java36
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java40
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java114
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java162
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java46
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java94
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java66
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java78
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java98
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java97
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java50
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java159
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java94
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java51
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java638
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java239
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java118
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java106
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java261
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java166
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java228
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java104
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java226
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/DocsStatus.java51
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/GatewayRecoveryStatus.java133
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/GatewaySnapshotStatus.java104
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/IndexShardStatus.java170
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/IndexStatus.java178
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequest.java83
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequestBuilder.java56
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusResponse.java322
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/PeerRecoveryStatus.java133
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/ShardStatus.java282
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/status/TransportIndicesStatusAction.java294
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java74
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java43
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateResponse.java50
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java89
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java89
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java43
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java66
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java92
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java380
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java194
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java50
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java103
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java89
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java122
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java79
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java207
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java225
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java114
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java98
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/validate/query/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequest.java142
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequestBuilder.java65
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerResponse.java51
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java194
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java46
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequest.java62
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequestBuilder.java50
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java88
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java66
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequest.java123
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestBuilder.java70
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerResponse.java52
-rw-r--r--src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java197
-rw-r--r--src/main/java/org/elasticsearch/action/admin/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/BulkAction.java55
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java90
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java302
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java340
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/BulkRequest.java519
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java167
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/BulkResponse.java120
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java104
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java72
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java347
-rw-r--r--src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java631
-rw-r--r--src/main/java/org/elasticsearch/action/count/CountAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/count/CountRequest.java258
-rw-r--r--src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java134
-rw-r--r--src/main/java/org/elasticsearch/action/count/CountResponse.java86
-rw-r--r--src/main/java/org/elasticsearch/action/count/ShardCountRequest.java125
-rw-r--r--src/main/java/org/elasticsearch/action/count/ShardCountResponse.java61
-rw-r--r--src/main/java/org/elasticsearch/action/count/TransportCountAction.java201
-rw-r--r--src/main/java/org/elasticsearch/action/count/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/delete/DeleteAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/delete/DeleteRequest.java224
-rw-r--r--src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java125
-rw-r--r--src/main/java/org/elasticsearch/action/delete/DeleteResponse.java108
-rw-r--r--src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java224
-rw-r--r--src/main/java/org/elasticsearch/action/delete/index/IndexDeleteRequest.java86
-rw-r--r--src/main/java/org/elasticsearch/action/delete/index/IndexDeleteResponse.java105
-rw-r--r--src/main/java/org/elasticsearch/action/delete/index/ShardDeleteRequest.java113
-rw-r--r--src/main/java/org/elasticsearch/action/delete/index/ShardDeleteResponse.java65
-rw-r--r--src/main/java/org/elasticsearch/action/delete/index/TransportIndexDeleteAction.java86
-rw-r--r--src/main/java/org/elasticsearch/action/delete/index/TransportShardDeleteAction.java142
-rw-r--r--src/main/java/org/elasticsearch/action/delete/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java220
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java174
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java103
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java143
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java112
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java152
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java42
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java107
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java86
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java162
-rw-r--r--src/main/java/org/elasticsearch/action/deletebyquery/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/explain/ExplainAction.java44
-rw-r--r--src/main/java/org/elasticsearch/action/explain/ExplainRequest.java223
-rw-r--r--src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java194
-rw-r--r--src/main/java/org/elasticsearch/action/explain/ExplainResponse.java107
-rw-r--r--src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java175
-rw-r--r--src/main/java/org/elasticsearch/action/explain/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/get/GetAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/get/GetRequest.java306
-rw-r--r--src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java186
-rw-r--r--src/main/java/org/elasticsearch/action/get/GetResponse.java165
-rw-r--r--src/main/java/org/elasticsearch/action/get/MultiGetAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java122
-rw-r--r--src/main/java/org/elasticsearch/action/get/MultiGetRequest.java444
-rw-r--r--src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java90
-rw-r--r--src/main/java/org/elasticsearch/action/get/MultiGetResponse.java173
-rw-r--r--src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java196
-rw-r--r--src/main/java/org/elasticsearch/action/get/MultiGetShardResponse.java99
-rw-r--r--src/main/java/org/elasticsearch/action/get/TransportGetAction.java123
-rw-r--r--src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java168
-rw-r--r--src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java141
-rw-r--r--src/main/java/org/elasticsearch/action/get/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/index/IndexAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/index/IndexRequest.java650
-rw-r--r--src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java316
-rw-r--r--src/main/java/org/elasticsearch/action/index/IndexResponse.java108
-rw-r--r--src/main/java/org/elasticsearch/action/index/TransportIndexAction.java310
-rw-r--r--src/main/java/org/elasticsearch/action/index/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java46
-rw-r--r--src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java664
-rw-r--r--src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java252
-rw-r--r--src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java363
-rw-r--r--src/main/java/org/elasticsearch/action/mlt/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/MultiPercolateAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java382
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequestBuilder.java65
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/MultiPercolateResponse.java162
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/PercolateAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java236
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java260
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java294
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/PercolateShardRequest.java112
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java183
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java295
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java363
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java202
-rw-r--r--src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java282
-rw-r--r--src/main/java/org/elasticsearch/action/search/ClearScrollAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java80
-rw-r--r--src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java51
-rw-r--r--src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java56
-rw-r--r--src/main/java/org/elasticsearch/action/search/MultiSearchAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java276
-rw-r--r--src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java81
-rw-r--r--src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java182
-rw-r--r--src/main/java/org/elasticsearch/action/search/ReduceSearchPhaseException.java37
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchOperationThreading.java82
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java86
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchRequest.java501
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java988
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchResponse.java274
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchScrollAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java139
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java94
-rw-r--r--src/main/java/org/elasticsearch/action/search/SearchType.java127
-rw-r--r--src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java159
-rw-r--r--src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java193
-rw-r--r--src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java125
-rw-r--r--src/main/java/org/elasticsearch/action/search/TransportSearchAction.java161
-rw-r--r--src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java118
-rw-r--r--src/main/java/org/elasticsearch/action/search/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/ParsedScrollId.java67
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java84
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java191
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java300
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchHelper.java142
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java97
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java201
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java81
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java254
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java285
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java283
-rw-r--r--src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java426
-rw-r--r--src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java59
-rw-r--r--src/main/java/org/elasticsearch/action/suggest/ShardSuggestResponse.java60
-rw-r--r--src/main/java/org/elasticsearch/action/suggest/SuggestAction.java46
-rw-r--r--src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java168
-rw-r--r--src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java98
-rw-r--r--src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java82
-rw-r--r--src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java175
-rw-r--r--src/main/java/org/elasticsearch/action/suggest/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java138
-rw-r--r--src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java112
-rw-r--r--src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java102
-rw-r--r--src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java121
-rw-r--r--src/main/java/org/elasticsearch/action/support/DestructiveOperations.java85
-rw-r--r--src/main/java/org/elasticsearch/action/support/IndicesOptions.java168
-rw-r--r--src/main/java/org/elasticsearch/action/support/PlainActionFuture.java35
-rw-r--r--src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java37
-rw-r--r--src/main/java/org/elasticsearch/action/support/QuerySourceBuilder.java76
-rw-r--r--src/main/java/org/elasticsearch/action/support/TransportAction.java128
-rw-r--r--src/main/java/org/elasticsearch/action/support/TransportActions.java58
-rw-r--r--src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequest.java119
-rw-r--r--src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java64
-rw-r--r--src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java106
-rw-r--r--src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationThreading.java76
-rw-r--r--src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java44
-rw-r--r--src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationRequest.java71
-rw-r--r--src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationResponse.java66
-rw-r--r--src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java416
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java86
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java52
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java64
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequest.java69
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequestBuilder.java55
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequest.java76
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java43
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java291
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java49
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java86
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java64
-rw-r--r--src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java53
-rw-r--r--src/main/java/org/elasticsearch/action/support/nodes/NodeOperationRequest.java55
-rw-r--r--src/main/java/org/elasticsearch/action/support/nodes/NodeOperationResponse.java62
-rw-r--r--src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequest.java101
-rw-r--r--src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java52
-rw-r--r--src/main/java/org/elasticsearch/action/support/nodes/NodesOperationResponse.java91
-rw-r--r--src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java296
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java118
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java154
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java98
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/ReplicationShardOperationFailedException.java44
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/ReplicationType.java82
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java183
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequestBuilder.java97
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java220
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java175
-rw-r--r--src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java806
-rw-r--r--src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequest.java94
-rw-r--r--src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequestBuilder.java52
-rw-r--r--src/main/java/org/elasticsearch/action/support/single/custom/TransportSingleCustomOperationAction.java369
-rw-r--r--src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java111
-rw-r--r--src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java59
-rw-r--r--src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java336
-rw-r--r--src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java101
-rw-r--r--src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequestBuilder.java52
-rw-r--r--src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java306
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsItemResponse.java123
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsRequest.java158
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsRequestBuilder.java56
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsResponse.java173
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsShardRequest.java99
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsShardResponse.java99
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/TermVectorAction.java46
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/TermVectorFields.java484
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/TermVectorRequest.java400
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/TermVectorRequestBuilder.java101
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/TermVectorResponse.java362
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/TermVectorWriter.java233
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/TransportMultiTermVectorsAction.java174
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/TransportSingleShardMultiTermsVectorAction.java118
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/TransportSingleShardTermVectorAction.java108
-rw-r--r--src/main/java/org/elasticsearch/action/termvector/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java292
-rw-r--r--src/main/java/org/elasticsearch/action/update/UpdateAction.java45
-rw-r--r--src/main/java/org/elasticsearch/action/update/UpdateHelper.java283
-rw-r--r--src/main/java/org/elasticsearch/action/update/UpdateRequest.java656
-rw-r--r--src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java343
-rw-r--r--src/main/java/org/elasticsearch/action/update/UpdateResponse.java124
-rw-r--r--src/main/java/org/elasticsearch/bootstrap/Bootstrap.java279
-rw-r--r--src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java34
-rw-r--r--src/main/java/org/elasticsearch/bootstrap/ElasticsearchF.java36
-rw-r--r--src/main/java/org/elasticsearch/bulk/udp/BulkUdpModule.java32
-rw-r--r--src/main/java/org/elasticsearch/bulk/udp/BulkUdpService.java221
-rw-r--r--src/main/java/org/elasticsearch/cache/NodeCache.java59
-rw-r--r--src/main/java/org/elasticsearch/cache/NodeCacheModule.java42
-rw-r--r--src/main/java/org/elasticsearch/cache/memory/ByteBufferCache.java108
-rw-r--r--src/main/java/org/elasticsearch/cache/recycler/CacheRecycler.java325
-rw-r--r--src/main/java/org/elasticsearch/cache/recycler/CacheRecyclerModule.java39
-rw-r--r--src/main/java/org/elasticsearch/cache/recycler/DefaultPageCacheRecyclerModule.java39
-rw-r--r--src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java252
-rw-r--r--src/main/java/org/elasticsearch/cache/recycler/PageCacheRecyclerModule.java50
-rw-r--r--src/main/java/org/elasticsearch/client/AdminClient.java39
-rw-r--r--src/main/java/org/elasticsearch/client/Client.java558
-rw-r--r--src/main/java/org/elasticsearch/client/ClusterAdminClient.java432
-rw-r--r--src/main/java/org/elasticsearch/client/IndicesAdminClient.java753
-rw-r--r--src/main/java/org/elasticsearch/client/Requests.java548
-rw-r--r--src/main/java/org/elasticsearch/client/internal/InternalClient.java31
-rw-r--r--src/main/java/org/elasticsearch/client/internal/InternalClusterAdminClient.java28
-rw-r--r--src/main/java/org/elasticsearch/client/internal/InternalGenericClient.java29
-rw-r--r--src/main/java/org/elasticsearch/client/internal/InternalIndicesAdminClient.java29
-rw-r--r--src/main/java/org/elasticsearch/client/node/NodeAdminClient.java54
-rw-r--r--src/main/java/org/elasticsearch/client/node/NodeClient.java94
-rw-r--r--src/main/java/org/elasticsearch/client/node/NodeClientModule.java40
-rw-r--r--src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java74
-rw-r--r--src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java74
-rw-r--r--src/main/java/org/elasticsearch/client/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/client/support/AbstractClient.java384
-rw-r--r--src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java402
-rw-r--r--src/main/java/org/elasticsearch/client/support/AbstractIndicesAdminClient.java622
-rw-r--r--src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java41
-rw-r--r--src/main/java/org/elasticsearch/client/transport/NoNodeAvailableException.java38
-rw-r--r--src/main/java/org/elasticsearch/client/transport/TransportClient.java492
-rw-r--r--src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java504
-rw-r--r--src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java59
-rw-r--r--src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java113
-rw-r--r--src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java93
-rw-r--r--src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java92
-rw-r--r--src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java55
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java168
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterInfo.java49
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterInfoService.java28
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterModule.java80
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterName.java93
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterNameModule.java40
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterService.java113
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterState.java596
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterStateListener.java33
-rw-r--r--src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java37
-rw-r--r--src/main/java/org/elasticsearch/cluster/DiskUsage.java60
-rw-r--r--src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java49
-rw-r--r--src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java328
-rw-r--r--src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java51
-rw-r--r--src/main/java/org/elasticsearch/cluster/ProcessedClusterStateUpdateTask.java33
-rw-r--r--src/main/java/org/elasticsearch/cluster/TimeoutClusterStateListener.java36
-rw-r--r--src/main/java/org/elasticsearch/cluster/TimeoutClusterStateUpdateTask.java35
-rw-r--r--src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateListener.java37
-rw-r--r--src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateRequest.java65
-rw-r--r--src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateResponse.java39
-rw-r--r--src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java43
-rw-r--r--src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java197
-rw-r--r--src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java208
-rw-r--r--src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java140
-rw-r--r--src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java349
-rw-r--r--src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java176
-rw-r--r--src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java71
-rw-r--r--src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java55
-rw-r--r--src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java337
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java232
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java330
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java715
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java422
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java692
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MetaData.java1380
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java467
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java257
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java217
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java247
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java325
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java637
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java47
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java331
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/ProcessClusterEventTimeoutException.java38
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java204
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java102
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java527
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java129
-rw-r--r--src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java410
-rw-r--r--src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java326
-rw-r--r--src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java186
-rw-r--r--src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java86
-rw-r--r--src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java638
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/GroupShardsIterator.java87
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java45
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/ImmutableShardRouting.java351
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java526
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java604
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/MutableShardRouting.java160
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/PlainShardIterator.java63
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java112
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java111
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/RotationShardShuffler.java48
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/RoutingException.java36
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java189
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java767
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/RoutingService.java170
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java458
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java174
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/RoutingValidationException.java38
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/ShardIterator.java38
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java145
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java75
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/ShardShuffler.java47
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java80
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationExplanation.java153
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationModule.java53
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java539
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java46
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java220
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java50
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java1096
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java245
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/GatewayAllocator.java40
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java78
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java77
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java88
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java214
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java89
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java224
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java219
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java181
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java83
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java158
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersModule.java80
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java247
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java113
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java76
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java150
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DisableAllocationDecider.java131
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java346
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java126
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java163
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java77
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java46
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java54
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java96
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java114
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java117
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java133
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/operation/OperationRouting.java52
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/operation/OperationRoutingModule.java53
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/operation/hash/HashFunction.java41
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/operation/hash/djb/DjbHashFunction.java70
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/operation/hash/simple/SimpleHashFunction.java38
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRouting.java292
-rw-r--r--src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRoutingModule.java34
-rw-r--r--src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java702
-rw-r--r--src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java101
-rw-r--r--src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java38
-rw-r--r--src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java99
-rw-r--r--src/main/java/org/elasticsearch/cluster/settings/DynamicSettings.java71
-rw-r--r--src/main/java/org/elasticsearch/cluster/settings/Validator.java212
-rw-r--r--src/main/java/org/elasticsearch/common/Base64.java2095
-rw-r--r--src/main/java/org/elasticsearch/common/Booleans.java99
-rw-r--r--src/main/java/org/elasticsearch/common/Classes.java145
-rw-r--r--src/main/java/org/elasticsearch/common/Explicit.java57
-rw-r--r--src/main/java/org/elasticsearch/common/Names.java91
-rw-r--r--src/main/java/org/elasticsearch/common/Nullable.java35
-rw-r--r--src/main/java/org/elasticsearch/common/Numbers.java174
-rw-r--r--src/main/java/org/elasticsearch/common/ParseField.java82
-rw-r--r--src/main/java/org/elasticsearch/common/Preconditions.java490
-rw-r--r--src/main/java/org/elasticsearch/common/Priority.java97
-rw-r--r--src/main/java/org/elasticsearch/common/StopWatch.java295
-rw-r--r--src/main/java/org/elasticsearch/common/Strings.java1581
-rw-r--r--src/main/java/org/elasticsearch/common/Table.java198
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java64
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/BlobMetaData.java30
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/BlobPath.java78
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/BlobStore.java31
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/BlobStoreException.java36
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/ImmutableBlobContainer.java39
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/fs/AbstractFsBlobContainer.java105
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java113
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/fs/FsImmutableBlobContainer.java103
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java120
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/support/BlobStores.java64
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/support/PlainBlobMetaData.java52
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobContainer.java117
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java151
-rw-r--r--src/main/java/org/elasticsearch/common/blobstore/url/URLImmutableBlobContainer.java60
-rw-r--r--src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java32
-rw-r--r--src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java179
-rw-r--r--src/main/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java174
-rw-r--r--src/main/java/org/elasticsearch/common/bytes/BytesArray.java174
-rw-r--r--src/main/java/org/elasticsearch/common/bytes/BytesReference.java147
-rw-r--r--src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java136
-rw-r--r--src/main/java/org/elasticsearch/common/bytes/HashedBytesArray.java145
-rw-r--r--src/main/java/org/elasticsearch/common/collect/BoundedTreeSet.java63
-rw-r--r--src/main/java/org/elasticsearch/common/collect/HppcMaps.java148
-rw-r--r--src/main/java/org/elasticsearch/common/collect/IdentityHashSet.java193
-rw-r--r--src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java329
-rw-r--r--src/main/java/org/elasticsearch/common/collect/ImmutableOpenLongMap.java329
-rw-r--r--src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java342
-rw-r--r--src/main/java/org/elasticsearch/common/collect/Iterators2.java64
-rw-r--r--src/main/java/org/elasticsearch/common/collect/MapBuilder.java90
-rw-r--r--src/main/java/org/elasticsearch/common/collect/Tuple.java71
-rw-r--r--src/main/java/org/elasticsearch/common/component/AbstractComponent.java76
-rw-r--r--src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java133
-rw-r--r--src/main/java/org/elasticsearch/common/component/CloseableComponent.java30
-rw-r--r--src/main/java/org/elasticsearch/common/component/Lifecycle.java195
-rw-r--r--src/main/java/org/elasticsearch/common/component/LifecycleComponent.java38
-rw-r--r--src/main/java/org/elasticsearch/common/component/LifecycleListener.java38
-rw-r--r--src/main/java/org/elasticsearch/common/compress/CompressedIndexInput.java217
-rw-r--r--src/main/java/org/elasticsearch/common/compress/CompressedStreamInput.java183
-rw-r--r--src/main/java/org/elasticsearch/common/compress/CompressedStreamOutput.java145
-rw-r--r--src/main/java/org/elasticsearch/common/compress/CompressedString.java141
-rw-r--r--src/main/java/org/elasticsearch/common/compress/Compressor.java65
-rw-r--r--src/main/java/org/elasticsearch/common/compress/CompressorContext.java25
-rw-r--r--src/main/java/org/elasticsearch/common/compress/CompressorFactory.java173
-rw-r--r--src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedIndexInput.java74
-rw-r--r--src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamInput.java73
-rw-r--r--src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java64
-rw-r--r--src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java147
-rw-r--r--src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressorContext.java29
-rw-r--r--src/main/java/org/elasticsearch/common/geo/GeoDistance.java368
-rw-r--r--src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java487
-rw-r--r--src/main/java/org/elasticsearch/common/geo/GeoPoint.java245
-rw-r--r--src/main/java/org/elasticsearch/common/geo/GeoUtils.java288
-rw-r--r--src/main/java/org/elasticsearch/common/geo/ShapeRelation.java53
-rw-r--r--src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java55
-rw-r--r--src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java52
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/BaseLineStringBuilder.java128
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/BasePolygonBuilder.java475
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java119
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java73
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java45
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java125
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java54
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java115
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java66
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/PointCollection.java129
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java42
-rw-r--r--src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java629
-rw-r--r--src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java384
-rw-r--r--src/main/java/org/elasticsearch/common/inject/AbstractModule.java240
-rw-r--r--src/main/java/org/elasticsearch/common/inject/AbstractProcessor.java106
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Binder.java377
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Binding.java91
-rw-r--r--src/main/java/org/elasticsearch/common/inject/BindingAnnotation.java42
-rw-r--r--src/main/java/org/elasticsearch/common/inject/BindingProcessor.java268
-rw-r--r--src/main/java/org/elasticsearch/common/inject/BoundProviderFactory.java68
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ConfigurationException.java83
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ConstantFactory.java43
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ConstructionProxy.java47
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java30
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java101
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java109
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java76
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ContextualCallable.java27
-rw-r--r--src/main/java/org/elasticsearch/common/inject/CreationException.java59
-rw-r--r--src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java71
-rw-r--r--src/main/java/org/elasticsearch/common/inject/DeferredLookups.java60
-rw-r--r--src/main/java/org/elasticsearch/common/inject/EncounterImpl.java114
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Exposed.java37
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ExposedKeyFactory.java56
-rw-r--r--src/main/java/org/elasticsearch/common/inject/FactoryProxy.java62
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Guice.java98
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ImplementedBy.java38
-rw-r--r--src/main/java/org/elasticsearch/common/inject/InheritingState.java150
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Initializable.java33
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Initializables.java42
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Initializer.java164
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Inject.java67
-rw-r--r--src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java124
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Injector.java211
-rw-r--r--src/main/java/org/elasticsearch/common/inject/InjectorBuilder.java290
-rw-r--r--src/main/java/org/elasticsearch/common/inject/InjectorImpl.java863
-rw-r--r--src/main/java/org/elasticsearch/common/inject/InjectorShell.java250
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Injectors.java244
-rw-r--r--src/main/java/org/elasticsearch/common/inject/InternalFactoryToProviderAdapter.java55
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Key.java511
-rw-r--r--src/main/java/org/elasticsearch/common/inject/LookupProcessor.java61
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Lookups.java30
-rw-r--r--src/main/java/org/elasticsearch/common/inject/MembersInjector.java41
-rw-r--r--src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java119
-rw-r--r--src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java125
-rw-r--r--src/main/java/org/elasticsearch/common/inject/MessageProcessor.java54
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Module.java43
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Modules.java69
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java76
-rw-r--r--src/main/java/org/elasticsearch/common/inject/OutOfScopeException.java39
-rw-r--r--src/main/java/org/elasticsearch/common/inject/PreProcessModule.java31
-rw-r--r--src/main/java/org/elasticsearch/common/inject/PrivateBinder.java52
-rw-r--r--src/main/java/org/elasticsearch/common/inject/PrivateElementProcessor.java53
-rw-r--r--src/main/java/org/elasticsearch/common/inject/PrivateModule.java287
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ProvidedBy.java38
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Provider.java55
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java59
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Provides.java37
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ProvisionException.java70
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Reflection.java49
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Scope.java59
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ScopeAnnotation.java42
-rw-r--r--src/main/java/org/elasticsearch/common/inject/ScopeBindingProcessor.java64
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Scopes.java136
-rw-r--r--src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java67
-rw-r--r--src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java30
-rw-r--r--src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java88
-rw-r--r--src/main/java/org/elasticsearch/common/inject/SingleParameterInjector.java75
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Singleton.java35
-rw-r--r--src/main/java/org/elasticsearch/common/inject/SpawnModules.java28
-rw-r--r--src/main/java/org/elasticsearch/common/inject/Stage.java44
-rw-r--r--src/main/java/org/elasticsearch/common/inject/State.java165
-rw-r--r--src/main/java/org/elasticsearch/common/inject/TypeConverterBindingProcessor.java184
-rw-r--r--src/main/java/org/elasticsearch/common/inject/TypeListenerBindingProcessor.java38
-rw-r--r--src/main/java/org/elasticsearch/common/inject/TypeLiteral.java344
-rw-r--r--src/main/java/org/elasticsearch/common/inject/WeakKeySet.java46
-rw-r--r--src/main/java/org/elasticsearch/common/inject/assistedinject/Assisted.java43
-rw-r--r--src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedConstructor.java100
-rw-r--r--src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedInject.java44
-rw-r--r--src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java338
-rw-r--r--src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java262
-rw-r--r--src/main/java/org/elasticsearch/common/inject/assistedinject/Parameter.java154
-rw-r--r--src/main/java/org/elasticsearch/common/inject/assistedinject/ParameterListKey.java66
-rw-r--r--src/main/java/org/elasticsearch/common/inject/assistedinject/package-info.java21
-rw-r--r--src/main/java/org/elasticsearch/common/inject/binder/AnnotatedBindingBuilder.java38
-rw-r--r--src/main/java/org/elasticsearch/common/inject/binder/AnnotatedConstantBindingBuilder.java38
-rw-r--r--src/main/java/org/elasticsearch/common/inject/binder/AnnotatedElementBuilder.java38
-rw-r--r--src/main/java/org/elasticsearch/common/inject/binder/ConstantBindingBuilder.java73
-rw-r--r--src/main/java/org/elasticsearch/common/inject/binder/LinkedBindingBuilder.java70
-rw-r--r--src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java47
-rw-r--r--src/main/java/org/elasticsearch/common/inject/binder/package-info.java21
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java136
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/Annotations.java123
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/AsynchronousComputationException.java29
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java134
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/BindingImpl.java120
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ComputationException.java27
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ConstantBindingBuilderImpl.java128
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java124
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ErrorHandler.java37
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/Errors.java640
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ErrorsException.java38
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ExpirationTimer.java26
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ExposedBindingImpl.java78
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ExposureBuilder.java69
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/FailableCache.java64
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java96
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/InternalContext.java53
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/InternalFactory.java61
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/Join.java320
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java70
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java74
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/MatcherAndConverter.java58
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/MoreTypes.java667
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/NullOutputException.java30
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/Nullability.java32
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/Nullable.java34
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java142
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java92
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java111
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java132
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/Scoping.java223
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/SourceProvider.java81
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/StackTraceElements.java50
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/Stopwatch.java49
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/Strings.java58
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/ToStringBuilder.java53
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/UniqueAnnotations.java77
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java63
-rw-r--r--src/main/java/org/elasticsearch/common/inject/internal/package-info.java21
-rw-r--r--src/main/java/org/elasticsearch/common/inject/matcher/AbstractMatcher.java99
-rw-r--r--src/main/java/org/elasticsearch/common/inject/matcher/Matcher.java42
-rw-r--r--src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java429
-rw-r--r--src/main/java/org/elasticsearch/common/inject/matcher/package-info.java21
-rw-r--r--src/main/java/org/elasticsearch/common/inject/multibindings/Element.java39
-rw-r--r--src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java374
-rw-r--r--src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java322
-rw-r--r--src/main/java/org/elasticsearch/common/inject/multibindings/RealElement.java66
-rw-r--r--src/main/java/org/elasticsearch/common/inject/multibindings/package-info.java21
-rw-r--r--src/main/java/org/elasticsearch/common/inject/name/Named.java37
-rw-r--r--src/main/java/org/elasticsearch/common/inject/name/NamedImpl.java59
-rw-r--r--src/main/java/org/elasticsearch/common/inject/name/Names.java71
-rw-r--r--src/main/java/org/elasticsearch/common/inject/name/package-info.java20
-rw-r--r--src/main/java/org/elasticsearch/common/inject/package-info.java47
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/BindingScopingVisitor.java57
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/BindingTargetVisitor.java83
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ConstructorBinding.java45
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ConvertedConstantBinding.java48
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingScopingVisitor.java56
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingTargetVisitor.java76
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/DefaultElementVisitor.java78
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/Dependency.java130
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/Element.java64
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ElementVisitor.java81
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/Elements.java334
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ExposedBinding.java39
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/HasDependencies.java38
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java35
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java401
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/InjectionRequest.java84
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/InstanceBinding.java44
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/LinkedKeyBinding.java36
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/MembersInjectorLookup.java104
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/Message.java142
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/PrivateElements.java61
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ProviderBinding.java38
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ProviderInstanceBinding.java46
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ProviderKeyBinding.java39
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ProviderLookup.java99
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ProviderWithDependencies.java28
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/ScopeBinding.java67
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/StaticInjectionRequest.java76
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/TypeConverter.java33
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/TypeConverterBinding.java66
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/TypeEncounter.java100
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/TypeListener.java47
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/TypeListenerBinding.java71
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/UntargettedBinding.java29
-rw-r--r--src/main/java/org/elasticsearch/common/inject/spi/package-info.java21
-rw-r--r--src/main/java/org/elasticsearch/common/inject/util/Modules.java269
-rw-r--r--src/main/java/org/elasticsearch/common/inject/util/Providers.java54
-rw-r--r--src/main/java/org/elasticsearch/common/inject/util/Types.java135
-rw-r--r--src/main/java/org/elasticsearch/common/inject/util/package-info.java20
-rw-r--r--src/main/java/org/elasticsearch/common/io/BooleanStreamable.java59
-rw-r--r--src/main/java/org/elasticsearch/common/io/BytesStream.java27
-rw-r--r--src/main/java/org/elasticsearch/common/io/CachedStreams.java29
-rw-r--r--src/main/java/org/elasticsearch/common/io/CharSequenceReader.java28
-rw-r--r--src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java220
-rw-r--r--src/main/java/org/elasticsearch/common/io/FastCharArrayWriter.java271
-rw-r--r--src/main/java/org/elasticsearch/common/io/FastStringReader.java205
-rw-r--r--src/main/java/org/elasticsearch/common/io/FileChannelInputStream.java109
-rw-r--r--src/main/java/org/elasticsearch/common/io/FileSystemUtils.java234
-rw-r--r--src/main/java/org/elasticsearch/common/io/Streams.java300
-rw-r--r--src/main/java/org/elasticsearch/common/io/ThrowableObjectInputStream.java99
-rw-r--r--src/main/java/org/elasticsearch/common/io/ThrowableObjectOutputStream.java68
-rw-r--r--src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java333
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/AdapterStreamInput.java174
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/AdapterStreamOutput.java183
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java104
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/BytesStreamInput.java164
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java150
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/CachedStreamInput.java72
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/DataOutputStreamOutput.java63
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/HandlesStreamInput.java96
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/HandlesStreamOutput.java84
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java86
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/OutputStreamStreamOutput.java59
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/StreamInput.java496
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java467
-rw-r--r--src/main/java/org/elasticsearch/common/io/stream/Streamable.java32
-rw-r--r--src/main/java/org/elasticsearch/common/jna/CLibrary.java55
-rw-r--r--src/main/java/org/elasticsearch/common/jna/Natives.java62
-rw-r--r--src/main/java/org/elasticsearch/common/joda/DateMathParser.java263
-rw-r--r--src/main/java/org/elasticsearch/common/joda/FormatDateTimeFormatter.java66
-rw-r--r--src/main/java/org/elasticsearch/common/joda/Joda.java190
-rw-r--r--src/main/java/org/elasticsearch/common/lease/Releasable.java30
-rw-r--r--src/main/java/org/elasticsearch/common/lease/Releasables.java89
-rw-r--r--src/main/java/org/elasticsearch/common/logging/ESLogger.java122
-rw-r--r--src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java80
-rw-r--r--src/main/java/org/elasticsearch/common/logging/Loggers.java159
-rw-r--r--src/main/java/org/elasticsearch/common/logging/jdk/JdkESLogger.java146
-rw-r--r--src/main/java/org/elasticsearch/common/logging/jdk/JdkESLoggerFactory.java42
-rw-r--r--src/main/java/org/elasticsearch/common/logging/log4j/ConsoleAppender.java245
-rw-r--r--src/main/java/org/elasticsearch/common/logging/log4j/Log4jESLogger.java141
-rw-r--r--src/main/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerFactory.java42
-rw-r--r--src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java113
-rw-r--r--src/main/java/org/elasticsearch/common/logging/slf4j/Slf4jESLogger.java127
-rw-r--r--src/main/java/org/elasticsearch/common/logging/slf4j/Slf4jESLoggerFactory.java41
-rw-r--r--src/main/java/org/elasticsearch/common/logging/support/AbstractESLogger.java133
-rw-r--r--src/main/java/org/elasticsearch/common/logging/support/LoggerMessageFormat.java267
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/BytesRefs.java65
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/Directories.java53
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/HashedBytesRef.java86
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/IndexCommitDelegate.java98
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java90
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/Lucene.java376
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java70
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/MultiCollector.java96
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/ReaderContextAware.java29
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/ScorerAware.java30
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/SegmentReaderUtils.java92
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java239
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/all/AllField.java68
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java199
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java79
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/docset/AllDocIdSet.java94
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/docset/AndDocIdSet.java231
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/docset/BitsDocIdSetIterator.java67
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/docset/ContextDocIdSet.java37
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java148
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/docset/MatchDocIdSet.java170
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/docset/MatchDocIdSetIterator.java68
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/docset/NotDocIdSet.java175
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/docset/OrDocIdSet.java241
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/AndFilter.java98
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/ApplyAcceptedDocsFilter.java170
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java32
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/EmptyScorer.java67
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java75
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/LimitFilter.java68
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilter.java65
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsFilter.java64
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java97
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java277
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java279
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java79
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/NoopCollector.java51
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/NotFilter.java74
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/OrFilter.java104
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/Queries.java186
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/RegexpFilter.java109
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/XBooleanFilter.java416
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/XCollector.java32
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/XConstantScoreQuery.java44
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/XFilteredQuery.java261
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java82
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java202
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java379
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java208
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java92
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/function/ScoreFunction.java46
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java77
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/store/ChecksumIndexOutput.java97
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java105
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/store/OutputStreamIndexOutput.java61
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/store/ThreadSafeInputStreamIndexInput.java39
-rw-r--r--src/main/java/org/elasticsearch/common/lucene/uid/Versions.java139
-rw-r--r--src/main/java/org/elasticsearch/common/math/UnboxedMathUtils.java587
-rw-r--r--src/main/java/org/elasticsearch/common/metrics/CounterMetric.java49
-rw-r--r--src/main/java/org/elasticsearch/common/metrics/EWMA.java119
-rw-r--r--src/main/java/org/elasticsearch/common/metrics/MeanMetric.java61
-rw-r--r--src/main/java/org/elasticsearch/common/metrics/MeterMetric.java123
-rw-r--r--src/main/java/org/elasticsearch/common/metrics/Metric.java25
-rw-r--r--src/main/java/org/elasticsearch/common/netty/KeepFrameDecoder.java37
-rw-r--r--src/main/java/org/elasticsearch/common/netty/NettyStaticSetup.java55
-rw-r--r--src/main/java/org/elasticsearch/common/netty/OpenChannelsHandler.java90
-rw-r--r--src/main/java/org/elasticsearch/common/network/NetworkModule.java33
-rw-r--r--src/main/java/org/elasticsearch/common/network/NetworkService.java192
-rw-r--r--src/main/java/org/elasticsearch/common/network/NetworkUtils.java321
-rw-r--r--src/main/java/org/elasticsearch/common/os/OsUtils.java58
-rw-r--r--src/main/java/org/elasticsearch/common/path/PathTrie.java229
-rw-r--r--src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java174
-rw-r--r--src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java40
-rw-r--r--src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java68
-rw-r--r--src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java97
-rw-r--r--src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java47
-rw-r--r--src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java70
-rw-r--r--src/main/java/org/elasticsearch/common/recycler/Recycler.java56
-rw-r--r--src/main/java/org/elasticsearch/common/recycler/Recyclers.java252
-rw-r--r--src/main/java/org/elasticsearch/common/regex/Regex.java194
-rw-r--r--src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java69
-rw-r--r--src/main/java/org/elasticsearch/common/rounding/Rounding.java142
-rw-r--r--src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java509
-rw-r--r--src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java1043
-rw-r--r--src/main/java/org/elasticsearch/common/settings/NoClassSettingsException.java37
-rw-r--r--src/main/java/org/elasticsearch/common/settings/Settings.java315
-rw-r--r--src/main/java/org/elasticsearch/common/settings/SettingsException.java38
-rw-r--r--src/main/java/org/elasticsearch/common/settings/SettingsFilter.java58
-rw-r--r--src/main/java/org/elasticsearch/common/settings/SettingsModule.java42
-rw-r--r--src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java34
-rw-r--r--src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java68
-rw-r--r--src/main/java/org/elasticsearch/common/settings/loader/SettingsLoader.java107
-rw-r--r--src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java62
-rw-r--r--src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java131
-rw-r--r--src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java43
-rw-r--r--src/main/java/org/elasticsearch/common/settings/loader/package-info.java24
-rw-r--r--src/main/java/org/elasticsearch/common/settings/package-info.java23
-rw-r--r--src/main/java/org/elasticsearch/common/text/BytesText.java82
-rw-r--r--src/main/java/org/elasticsearch/common/text/StringAndBytesText.java108
-rw-r--r--src/main/java/org/elasticsearch/common/text/StringText.java94
-rw-r--r--src/main/java/org/elasticsearch/common/text/Text.java56
-rw-r--r--src/main/java/org/elasticsearch/common/text/UTF8SortedAsUnicodeComparator.java76
-rw-r--r--src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java79
-rw-r--r--src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java49
-rw-r--r--src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java127
-rw-r--r--src/main/java/org/elasticsearch/common/transport/LocalTransportAddress.java81
-rw-r--r--src/main/java/org/elasticsearch/common/transport/NetworkExceptionHelper.java61
-rw-r--r--src/main/java/org/elasticsearch/common/transport/PortsRange.java82
-rw-r--r--src/main/java/org/elasticsearch/common/transport/TransportAddress.java32
-rw-r--r--src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java87
-rw-r--r--src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java250
-rw-r--r--src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java253
-rw-r--r--src/main/java/org/elasticsearch/common/unit/DistanceUnit.java324
-rw-r--r--src/main/java/org/elasticsearch/common/unit/Fuzziness.java256
-rw-r--r--src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java50
-rw-r--r--src/main/java/org/elasticsearch/common/unit/Percent.java63
-rw-r--r--src/main/java/org/elasticsearch/common/unit/SizeUnit.java244
-rw-r--r--src/main/java/org/elasticsearch/common/unit/SizeValue.java233
-rw-r--r--src/main/java/org/elasticsearch/common/unit/TimeValue.java301
-rw-r--r--src/main/java/org/elasticsearch/common/util/AbstractArray.java43
-rw-r--r--src/main/java/org/elasticsearch/common/util/AbstractBigArray.java162
-rw-r--r--src/main/java/org/elasticsearch/common/util/BigArray.java30
-rw-r--r--src/main/java/org/elasticsearch/common/util/BigArrays.java459
-rw-r--r--src/main/java/org/elasticsearch/common/util/BigByteArray.java130
-rw-r--r--src/main/java/org/elasticsearch/common/util/BigDoubleArray.java110
-rw-r--r--src/main/java/org/elasticsearch/common/util/BigDoubleArrayList.java79
-rw-r--r--src/main/java/org/elasticsearch/common/util/BigFloatArrayList.java74
-rw-r--r--src/main/java/org/elasticsearch/common/util/BigIntArray.java93
-rw-r--r--src/main/java/org/elasticsearch/common/util/BigLongArray.java110
-rw-r--r--src/main/java/org/elasticsearch/common/util/BigObjectArray.java88
-rw-r--r--src/main/java/org/elasticsearch/common/util/BloomFilter.java557
-rw-r--r--src/main/java/org/elasticsearch/common/util/ByteArray.java49
-rw-r--r--src/main/java/org/elasticsearch/common/util/ByteUtils.java140
-rw-r--r--src/main/java/org/elasticsearch/common/util/CollectionUtils.java255
-rw-r--r--src/main/java/org/elasticsearch/common/util/Comparators.java43
-rw-r--r--src/main/java/org/elasticsearch/common/util/DoubleArray.java47
-rw-r--r--src/main/java/org/elasticsearch/common/util/IntArray.java42
-rw-r--r--src/main/java/org/elasticsearch/common/util/LongArray.java47
-rw-r--r--src/main/java/org/elasticsearch/common/util/MinimalMap.java71
-rw-r--r--src/main/java/org/elasticsearch/common/util/ObjectArray.java37
-rw-r--r--src/main/java/org/elasticsearch/common/util/SlicedDoubleList.java163
-rw-r--r--src/main/java/org/elasticsearch/common/util/SlicedLongList.java164
-rw-r--r--src/main/java/org/elasticsearch/common/util/SlicedObjectList.java106
-rw-r--r--src/main/java/org/elasticsearch/common/util/UnsafeUtils.java114
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/AbstractRunnable.java33
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java133
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/BaseFuture.java365
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java99
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java149
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentMapLong.java36
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java81
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java69
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java158
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java45
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java78
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java96
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java45
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java63
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java176
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java61
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java208
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java303
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java36
-rw-r--r--src/main/java/org/elasticsearch/common/util/concurrent/XRejectedExecutionHandler.java32
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/ToXContent.java150
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/XContent.java77
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java1208
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/XContentBuilderString.java45
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java264
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java127
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java428
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/XContentParser.java194
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/XContentString.java32
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/XContentType.java123
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java107
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java341
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java239
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java105
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java103
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java39
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentGenerator.java90
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java316
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java390
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java103
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java103
-rw-r--r--src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java39
-rw-r--r--src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java71
-rw-r--r--src/main/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandler.java60
-rw-r--r--src/main/java/org/elasticsearch/discovery/ClusterStatePublishResponseHandler.java49
-rw-r--r--src/main/java/org/elasticsearch/discovery/Discovery.java76
-rw-r--r--src/main/java/org/elasticsearch/discovery/DiscoveryException.java36
-rw-r--r--src/main/java/org/elasticsearch/discovery/DiscoveryModule.java58
-rw-r--r--src/main/java/org/elasticsearch/discovery/DiscoveryService.java131
-rw-r--r--src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java33
-rw-r--r--src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java42
-rw-r--r--src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java379
-rw-r--r--src/main/java/org/elasticsearch/discovery/local/LocalDiscoveryModule.java34
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java35
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java969
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/ZenDiscoveryModule.java58
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java126
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java449
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java347
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java267
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java114
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingException.java36
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java182
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java568
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastHostsProvider.java35
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java484
-rw-r--r--src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java201
-rw-r--r--src/main/java/org/elasticsearch/env/Environment.java209
-rw-r--r--src/main/java/org/elasticsearch/env/EnvironmentModule.java39
-rw-r--r--src/main/java/org/elasticsearch/env/FailedToResolveConfigException.java36
-rw-r--r--src/main/java/org/elasticsearch/env/NodeEnvironment.java245
-rw-r--r--src/main/java/org/elasticsearch/env/NodeEnvironmentModule.java48
-rw-r--r--src/main/java/org/elasticsearch/gateway/Gateway.java44
-rw-r--r--src/main/java/org/elasticsearch/gateway/GatewayException.java36
-rw-r--r--src/main/java/org/elasticsearch/gateway/GatewayModule.java50
-rw-r--r--src/main/java/org/elasticsearch/gateway/GatewayService.java303
-rw-r--r--src/main/java/org/elasticsearch/gateway/blobstore/BlobReuseExistingGatewayAllocator.java315
-rw-r--r--src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGateway.java220
-rw-r--r--src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGatewayModule.java38
-rw-r--r--src/main/java/org/elasticsearch/gateway/fs/FsGateway.java82
-rw-r--r--src/main/java/org/elasticsearch/gateway/fs/FsGatewayModule.java34
-rw-r--r--src/main/java/org/elasticsearch/gateway/local/LocalGateway.java211
-rw-r--r--src/main/java/org/elasticsearch/gateway/local/LocalGatewayAllocator.java465
-rw-r--r--src/main/java/org/elasticsearch/gateway/local/LocalGatewayModule.java54
-rw-r--r--src/main/java/org/elasticsearch/gateway/local/state/meta/LocalAllocateDangledIndices.java238
-rw-r--r--src/main/java/org/elasticsearch/gateway/local/state/meta/LocalGatewayMetaState.java695
-rw-r--r--src/main/java/org/elasticsearch/gateway/local/state/meta/TransportNodesListGatewayMetaState.java242
-rw-r--r--src/main/java/org/elasticsearch/gateway/local/state/shards/LocalGatewayShardsState.java453
-rw-r--r--src/main/java/org/elasticsearch/gateway/local/state/shards/ShardStateInfo.java38
-rw-r--r--src/main/java/org/elasticsearch/gateway/local/state/shards/TransportNodesListGatewayStartedShards.java262
-rw-r--r--src/main/java/org/elasticsearch/gateway/none/NoneGateway.java134
-rw-r--r--src/main/java/org/elasticsearch/gateway/none/NoneGatewayAllocator.java43
-rw-r--r--src/main/java/org/elasticsearch/gateway/none/NoneGatewayModule.java43
-rw-r--r--src/main/java/org/elasticsearch/gateway/shared/SharedStorageGateway.java197
-rw-r--r--src/main/java/org/elasticsearch/http/BindHttpException.java34
-rw-r--r--src/main/java/org/elasticsearch/http/HttpChannel.java29
-rw-r--r--src/main/java/org/elasticsearch/http/HttpException.java36
-rw-r--r--src/main/java/org/elasticsearch/http/HttpInfo.java101
-rw-r--r--src/main/java/org/elasticsearch/http/HttpRequest.java29
-rw-r--r--src/main/java/org/elasticsearch/http/HttpResponse.java29
-rw-r--r--src/main/java/org/elasticsearch/http/HttpServer.java247
-rw-r--r--src/main/java/org/elasticsearch/http/HttpServerAdapter.java28
-rw-r--r--src/main/java/org/elasticsearch/http/HttpServerModule.java51
-rw-r--r--src/main/java/org/elasticsearch/http/HttpServerTransport.java37
-rw-r--r--src/main/java/org/elasticsearch/http/HttpStats.java85
-rw-r--r--src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java51
-rw-r--r--src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java260
-rw-r--r--src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java172
-rw-r--r--src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java362
-rw-r--r--src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransportModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/AbstractIndexComponent.java77
-rw-r--r--src/main/java/org/elasticsearch/index/AlreadyExpiredException.java66
-rw-r--r--src/main/java/org/elasticsearch/index/CloseableIndexComponent.java34
-rw-r--r--src/main/java/org/elasticsearch/index/Index.java85
-rw-r--r--src/main/java/org/elasticsearch/index/IndexComponent.java28
-rw-r--r--src/main/java/org/elasticsearch/index/IndexException.java47
-rw-r--r--src/main/java/org/elasticsearch/index/IndexModule.java42
-rw-r--r--src/main/java/org/elasticsearch/index/IndexNameModule.java39
-rw-r--r--src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/IndexShardMissingException.java39
-rw-r--r--src/main/java/org/elasticsearch/index/LocalNodeId.java40
-rw-r--r--src/main/java/org/elasticsearch/index/LocalNodeIdModule.java39
-rw-r--r--src/main/java/org/elasticsearch/index/VersionType.java117
-rw-r--r--src/main/java/org/elasticsearch/index/aliases/IndexAlias.java58
-rw-r--r--src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java152
-rw-r--r--src/main/java/org/elasticsearch/index/aliases/IndexAliasesServiceModule.java33
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactory.java44
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AbstractCharFilterFactory.java44
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AbstractIndexAnalyzerProvider.java77
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java51
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java52
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/Analysis.java310
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java557
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AnalysisService.java304
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AnalysisSettingsRequired.java32
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java35
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AnalyzerProviderFactory.java30
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java29
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ArabicNormalizationFilterFactory.java43
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ArabicStemTokenFilterFactory.java44
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java48
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CJKBigramFilterFactory.java82
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CJKWidthFilterFactory.java40
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java32
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CharFilterFactoryFactory.java30
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CharMatcher.java137
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java46
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java68
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java100
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java95
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/CzechStemTokenFilterFactory.java40
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/DelimitedPayloadTokenFilterFactory.java75
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java49
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java80
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java94
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java49
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java65
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java49
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java49
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java48
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/HtmlStripCharFilterFactory.java58
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java76
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/KStemTokenFilterFactory.java41
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java99
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java46
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java56
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java49
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java63
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java46
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactory.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java60
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java46
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java123
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java55
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java126
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java99
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericAnalyzer.java44
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericDateAnalyzer.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java41
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java65
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericDoubleTokenizer.java40
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericFloatAnalyzer.java65
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java40
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericIntegerAnalyzer.java65
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java40
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java65
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java40
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java99
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java75
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java72
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PatternCaptureGroupTokenFilterFactory.java61
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PatternReplaceCharFilterFactory.java62
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PatternReplaceTokenFilterFactory.java58
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PatternTokenizerFactory.java57
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java48
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PersianNormalizationFilterFactory.java44
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java46
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProvider.java52
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java56
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactory.java47
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactory.java46
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactory.java47
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java44
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/RussianStemTokenFilterFactory.java44
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java114
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java46
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java79
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/SnowballTokenFilterFactory.java49
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java63
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java69
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java59
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java45
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java51
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/StemmerOverrideTokenFilterFactory.java81
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java175
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java49
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java86
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java116
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java48
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java32
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/TokenFilterFactoryFactory.java30
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java34
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/TokenizerFactoryFactory.java30
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java59
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java53
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java50
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java52
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java48
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java46
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java46
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java192
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java58
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java51
-rw-r--r--src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java71
-rw-r--r--src/main/java/org/elasticsearch/index/cache/IndexCache.java116
-rw-r--r--src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java49
-rw-r--r--src/main/java/org/elasticsearch/index/cache/docset/DocSetCache.java38
-rw-r--r--src/main/java/org/elasticsearch/index/cache/docset/DocSetCacheModule.java44
-rw-r--r--src/main/java/org/elasticsearch/index/cache/docset/none/NoneDocSetCache.java58
-rw-r--r--src/main/java/org/elasticsearch/index/cache/docset/simple/SimpleDocSetCache.java91
-rw-r--r--src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java54
-rw-r--r--src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java48
-rw-r--r--src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java97
-rw-r--r--src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java63
-rw-r--r--src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCacheModule.java32
-rw-r--r--src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java76
-rw-r--r--src/main/java/org/elasticsearch/index/cache/filter/support/CacheKeyFilter.java111
-rw-r--r--src/main/java/org/elasticsearch/index/cache/filter/support/FilterCacheValue.java33
-rw-r--r--src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java250
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/IdCache.java46
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/IdCacheModule.java47
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/IdCacheStats.java86
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/IdReaderCache.java35
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/IdReaderTypeCache.java50
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/ShardIdCache.java51
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/ShardIdCacheModule.java32
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdCache.java353
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdReaderCache.java86
-rw-r--r--src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdReaderTypeCache.java120
-rw-r--r--src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java38
-rw-r--r--src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java44
-rw-r--r--src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java58
-rw-r--r--src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java85
-rw-r--r--src/main/java/org/elasticsearch/index/codec/CodecModule.java183
-rw-r--r--src/main/java/org/elasticsearch/index/codec/CodecService.java116
-rw-r--r--src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java74
-rw-r--r--src/main/java/org/elasticsearch/index/codec/docvaluesformat/AbstractDocValuesFormatProvider.java39
-rw-r--r--src/main/java/org/elasticsearch/index/codec/docvaluesformat/DefaultDocValuesFormatProvider.java45
-rw-r--r--src/main/java/org/elasticsearch/index/codec/docvaluesformat/DiskDocValuesFormatProvider.java45
-rw-r--r--src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormatProvider.java91
-rw-r--r--src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormatService.java91
-rw-r--r--src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormats.java59
-rw-r--r--src/main/java/org/elasticsearch/index/codec/docvaluesformat/MemoryDocValuesFormatProvider.java45
-rw-r--r--src/main/java/org/elasticsearch/index/codec/docvaluesformat/PreBuiltDocValuesFormatProvider.java79
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/AbstractPostingsFormatProvider.java39
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterLucenePostingsFormatProvider.java106
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java459
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormatProvider.java57
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/DefaultPostingsFormatProvider.java66
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/DirectPostingsFormatProvider.java72
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/Elasticsearch090PostingsFormat.java82
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/MemoryPostingsFormatProvider.java68
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/PostingFormats.java104
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/PostingsFormatProvider.java111
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/PostingsFormatService.java92
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/PreBuiltPostingsFormatProvider.java79
-rw-r--r--src/main/java/org/elasticsearch/index/codec/postingsformat/PulsingPostingsFormatProvider.java81
-rw-r--r--src/main/java/org/elasticsearch/index/deletionpolicy/AbstractESDeletionPolicy.java72
-rw-r--r--src/main/java/org/elasticsearch/index/deletionpolicy/DeletionPolicyModule.java54
-rw-r--r--src/main/java/org/elasticsearch/index/deletionpolicy/KeepLastNDeletionPolicy.java63
-rw-r--r--src/main/java/org/elasticsearch/index/deletionpolicy/KeepOnlyLastDeletionPolicy.java63
-rw-r--r--src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicy.java220
-rw-r--r--src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommit.java74
-rw-r--r--src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommits.java57
-rw-r--r--src/main/java/org/elasticsearch/index/engine/CloseEngineException.java38
-rw-r--r--src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java46
-rw-r--r--src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/engine/DocumentAlreadyExistsException.java37
-rw-r--r--src/main/java/org/elasticsearch/index/engine/DocumentMissingException.java37
-rw-r--r--src/main/java/org/elasticsearch/index/engine/DocumentSourceMissingException.java37
-rw-r--r--src/main/java/org/elasticsearch/index/engine/Engine.java937
-rw-r--r--src/main/java/org/elasticsearch/index/engine/EngineAlreadyStartedException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/engine/EngineClosedException.java42
-rw-r--r--src/main/java/org/elasticsearch/index/engine/EngineCreationFailureException.java35
-rw-r--r--src/main/java/org/elasticsearch/index/engine/EngineException.java37
-rw-r--r--src/main/java/org/elasticsearch/index/engine/EngineModule.java48
-rw-r--r--src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java36
-rw-r--r--src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java38
-rw-r--r--src/main/java/org/elasticsearch/index/engine/IgnoreOnRecoveryEngineException.java26
-rw-r--r--src/main/java/org/elasticsearch/index/engine/IndexEngine.java33
-rw-r--r--src/main/java/org/elasticsearch/index/engine/IndexEngineModule.java58
-rw-r--r--src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java46
-rw-r--r--src/main/java/org/elasticsearch/index/engine/OptimizeFailedEngineException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java39
-rw-r--r--src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/engine/RollbackFailedEngineException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/engine/RollbackNotAllowedEngineException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/engine/Segment.java160
-rw-r--r--src/main/java/org/elasticsearch/index/engine/SegmentsStats.java120
-rw-r--r--src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java33
-rw-r--r--src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java51
-rw-r--r--src/main/java/org/elasticsearch/index/engine/internal/InternalEngine.java1653
-rw-r--r--src/main/java/org/elasticsearch/index/engine/internal/InternalEngineModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/engine/internal/InternalIndexEngine.java48
-rw-r--r--src/main/java/org/elasticsearch/index/engine/internal/InternalIndexEngineModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/AbstractAtomicNumericFieldData.java95
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/AbstractIndexFieldData.java117
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java114
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/AtomicGeoPointFieldData.java52
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/AtomicNumericFieldData.java30
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/BytesValues.java189
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/DoubleValues.java174
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java156
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/FieldDataType.java95
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/FilterDoubleValues.java50
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/FilterLongValues.java50
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/GeoPointValues.java114
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java191
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java189
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataModule.java38
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java249
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/IndexGeoPointFieldData.java37
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java196
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/LongValues.java174
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/RamAccountingTermsEnum.java98
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java399
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java103
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/ShardFieldDataModule.java32
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java69
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefOrdValComparator.java386
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefValComparator.java123
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleScriptDataComparator.java150
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparator.java74
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorBase.java69
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java56
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparator.java73
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java55
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/GeoDistanceComparator.java184
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/GeoDistanceComparatorSource.java61
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparator.java72
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorBase.java76
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java55
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/NestedWrappableComparator.java42
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/NumberComparatorBase.java44
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/SortMode.java411
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/StringScriptDataComparator.java147
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/ordinals/DocIdOrdinals.java137
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/ordinals/EmptyOrdinals.java123
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java198
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/ordinals/Ordinals.java150
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java491
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java150
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/AbstractBytesIndexFieldData.java148
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointIndexFieldData.java145
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVAtomicFieldData.java119
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java54
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericAtomicFieldData.java167
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericIndexFieldData.java81
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/DenseDoubleValues.java37
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/DenseLongValues.java37
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/DisabledIndexFieldData.java71
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java106
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/DoubleArrayAtomicFieldData.java359
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/DoubleArrayIndexFieldData.java143
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/EmptyByteValuesWithOrdinals.java56
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/FSTBytesAtomicFieldData.java215
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/FSTBytesIndexFieldData.java116
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/FloatArrayAtomicFieldData.java358
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/FloatArrayIndexFieldData.java142
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVAtomicFieldData.java105
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java77
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointCompressedAtomicFieldData.java280
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointCompressedIndexFieldData.java153
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointDoubleArrayAtomicFieldData.java269
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointDoubleArrayIndexFieldData.java116
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/NonEstimatingEstimator.java57
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVAtomicFieldData.java146
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java59
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/PackedArrayAtomicFieldData.java381
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/PackedArrayIndexFieldData.java274
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesAtomicFieldData.java222
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java260
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVAtomicFieldData.java281
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java49
-rw-r--r--src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesIndexFieldData.java53
-rw-r--r--src/main/java/org/elasticsearch/index/fieldvisitor/AllFieldsVisitor.java36
-rw-r--r--src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java55
-rw-r--r--src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java151
-rw-r--r--src/main/java/org/elasticsearch/index/fieldvisitor/JustSourceFieldsVisitor.java37
-rw-r--r--src/main/java/org/elasticsearch/index/fieldvisitor/JustUidFieldsVisitor.java37
-rw-r--r--src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java64
-rw-r--r--src/main/java/org/elasticsearch/index/fieldvisitor/UidAndRoutingFieldsVisitor.java56
-rw-r--r--src/main/java/org/elasticsearch/index/fieldvisitor/UidAndSourceFieldsVisitor.java41
-rw-r--r--src/main/java/org/elasticsearch/index/flush/FlushStats.java114
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/CommitPoint.java154
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/CommitPoints.java204
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IgnoreGatewayRecoveryException.java39
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IndexGateway.java34
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IndexGatewayModule.java52
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java153
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayException.java37
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayModule.java43
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayRecoveryException.java37
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java395
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotFailedException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotNotAllowedException.java32
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/RecoveryStatus.java220
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/SnapshotStatus.java151
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexGateway.java87
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexShardGateway.java879
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGateway.java49
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGatewayModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/fs/FsIndexShardGateway.java89
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/local/LocalIndexGateway.java58
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/local/LocalIndexGatewayModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/local/LocalIndexShardGateway.java323
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGateway.java58
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGatewayModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/gateway/none/NoneIndexShardGateway.java117
-rw-r--r--src/main/java/org/elasticsearch/index/get/GetField.java96
-rw-r--r--src/main/java/org/elasticsearch/index/get/GetResult.java313
-rw-r--r--src/main/java/org/elasticsearch/index/get/GetStats.java155
-rw-r--r--src/main/java/org/elasticsearch/index/get/ShardGetModule.java32
-rw-r--r--src/main/java/org/elasticsearch/index/get/ShardGetService.java408
-rw-r--r--src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java107
-rw-r--r--src/main/java/org/elasticsearch/index/indexing/IndexingStats.java255
-rw-r--r--src/main/java/org/elasticsearch/index/indexing/ShardIndexingModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java273
-rw-r--r--src/main/java/org/elasticsearch/index/indexing/slowlog/ShardSlowLogIndexingService.java172
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/ContentPath.java112
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java146
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java710
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java292
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/DocumentTypeListener.java40
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/FieldMapper.java259
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java46
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/FieldMappers.java89
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java242
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/InternalMapper.java28
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/Mapper.java154
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/MapperBuilders.java172
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/MapperCompressionException.java34
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/MapperException.java36
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java42
-rwxr-xr-xsrc/main/java/org/elasticsearch/index/mapper/MapperService.java1068
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/MapperServiceModule.java33
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/MergeContext.java59
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/MergeMappingException.java46
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/ObjectMapperListener.java48
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java124
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/ParseContext.java416
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java140
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/RootMapper.java41
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/SourceToParse.java163
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/StrictDynamicMappingException.java35
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/Uid.java205
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java1109
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java241
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java242
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java387
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java442
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java586
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java423
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java429
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java391
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java372
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java514
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java388
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java496
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java200
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java382
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/default-mapping.json4
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java761
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java307
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java360
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/AnalyzerMapper.java169
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/BoostFieldMapper.java324
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/EnabledAttributeMapper.java33
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java389
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java231
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java360
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java269
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java187
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java431
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java254
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java273
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java218
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java265
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java216
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java369
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/object/ArrayValueMapperParser.java29
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java240
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java964
-rw-r--r--src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java283
-rw-r--r--src/main/java/org/elasticsearch/index/merge/EnableMergeScheduler.java86
-rw-r--r--src/main/java/org/elasticsearch/index/merge/MergeStats.java180
-rw-r--r--src/main/java/org/elasticsearch/index/merge/Merges.java107
-rw-r--r--src/main/java/org/elasticsearch/index/merge/OnGoingMerge.java53
-rw-r--r--src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java66
-rw-r--r--src/main/java/org/elasticsearch/index/merge/policy/IndexUpgraderMergePolicy.java232
-rw-r--r--src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java191
-rw-r--r--src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java176
-rw-r--r--src/main/java/org/elasticsearch/index/merge/policy/MergePolicyModule.java43
-rw-r--r--src/main/java/org/elasticsearch/index/merge/policy/MergePolicyProvider.java32
-rw-r--r--src/main/java/org/elasticsearch/index/merge/policy/TieredMergePolicyProvider.java241
-rw-r--r--src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java131
-rw-r--r--src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerModule.java45
-rw-r--r--src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerProvider.java130
-rw-r--r--src/main/java/org/elasticsearch/index/merge/scheduler/SerialMergeSchedulerProvider.java113
-rw-r--r--src/main/java/org/elasticsearch/index/percolator/PercolatorException.java36
-rw-r--r--src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java337
-rw-r--r--src/main/java/org/elasticsearch/index/percolator/PercolatorShardModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java103
-rw-r--r--src/main/java/org/elasticsearch/index/percolator/stats/PercolateStats.java175
-rw-r--r--src/main/java/org/elasticsearch/index/percolator/stats/ShardPercolateService.java93
-rw-r--r--src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java96
-rw-r--r--src/main/java/org/elasticsearch/index/query/AndFilterParser.java125
-rw-r--r--src/main/java/org/elasticsearch/index/query/BaseFilterBuilder.java72
-rw-r--r--src/main/java/org/elasticsearch/index/query/BaseQueryBuilder.java72
-rw-r--r--src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java162
-rw-r--r--src/main/java/org/elasticsearch/index/query/BoolFilterParser.java147
-rw-r--r--src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java186
-rw-r--r--src/main/java/org/elasticsearch/index/query/BoolQueryParser.java150
-rw-r--r--src/main/java/org/elasticsearch/index/query/BoostableQueryBuilder.java33
-rw-r--r--src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java97
-rw-r--r--src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java103
-rw-r--r--src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java200
-rw-r--r--src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java233
-rw-r--r--src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java87
-rw-r--r--src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java111
-rw-r--r--src/main/java/org/elasticsearch/index/query/CustomBoostFactorQueryBuilder.java67
-rw-r--r--src/main/java/org/elasticsearch/index/query/CustomBoostFactorQueryParser.java91
-rw-r--r--src/main/java/org/elasticsearch/index/query/CustomFiltersScoreQueryBuilder.java163
-rw-r--r--src/main/java/org/elasticsearch/index/query/CustomFiltersScoreQueryParser.java190
-rw-r--r--src/main/java/org/elasticsearch/index/query/CustomScoreQueryBuilder.java146
-rw-r--r--src/main/java/org/elasticsearch/index/query/CustomScoreQueryParser.java116
-rw-r--r--src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java101
-rw-r--r--src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java116
-rw-r--r--src/main/java/org/elasticsearch/index/query/ExistsFilterBuilder.java59
-rw-r--r--src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java124
-rw-r--r--src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java97
-rw-r--r--src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java72
-rw-r--r--src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java104
-rw-r--r--src/main/java/org/elasticsearch/index/query/FilterBuilder.java36
-rw-r--r--src/main/java/org/elasticsearch/index/query/FilterBuilders.java563
-rw-r--r--src/main/java/org/elasticsearch/index/query/FilterParser.java48
-rw-r--r--src/main/java/org/elasticsearch/index/query/FilterParserFactory.java30
-rw-r--r--src/main/java/org/elasticsearch/index/query/FilteredQueryBuilder.java85
-rw-r--r--src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java153
-rw-r--r--src/main/java/org/elasticsearch/index/query/FuzzyLikeThisFieldQueryBuilder.java147
-rw-r--r--src/main/java/org/elasticsearch/index/query/FuzzyLikeThisFieldQueryParser.java163
-rw-r--r--src/main/java/org/elasticsearch/index/query/FuzzyLikeThisQueryBuilder.java159
-rw-r--r--src/main/java/org/elasticsearch/index/query/FuzzyLikeThisQueryParser.java162
-rw-r--r--src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java129
-rw-r--r--src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java136
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java201
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java193
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java145
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java177
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java184
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java219
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java115
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java160
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java217
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java197
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java167
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java181
-rw-r--r--src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java263
-rw-r--r--src/main/java/org/elasticsearch/index/query/HasChildFilterBuilder.java99
-rw-r--r--src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java156
-rw-r--r--src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java101
-rw-r--r--src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java159
-rw-r--r--src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java94
-rw-r--r--src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java174
-rw-r--r--src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java84
-rw-r--r--src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java178
-rw-r--r--src/main/java/org/elasticsearch/index/query/IdsFilterBuilder.java96
-rw-r--r--src/main/java/org/elasticsearch/index/query/IdsFilterParser.java117
-rw-r--r--src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java105
-rw-r--r--src/main/java/org/elasticsearch/index/query/IdsQueryParser.java128
-rw-r--r--src/main/java/org/elasticsearch/index/query/IndexQueryParserModule.java198
-rw-r--r--src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java339
-rw-r--r--src/main/java/org/elasticsearch/index/query/IndicesFilterBuilder.java89
-rw-r--r--src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java156
-rw-r--r--src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java87
-rw-r--r--src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java154
-rw-r--r--src/main/java/org/elasticsearch/index/query/LimitFilterBuilder.java40
-rw-r--r--src/main/java/org/elasticsearch/index/query/LimitFilterParser.java67
-rw-r--r--src/main/java/org/elasticsearch/index/query/MatchAllFilterBuilder.java43
-rw-r--r--src/main/java/org/elasticsearch/index/query/MatchAllFilterParser.java56
-rw-r--r--src/main/java/org/elasticsearch/index/query/MatchAllQueryBuilder.java65
-rw-r--r--src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java77
-rw-r--r--src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java278
-rw-r--r--src/main/java/org/elasticsearch/index/query/MatchQueryParser.java181
-rw-r--r--src/main/java/org/elasticsearch/index/query/MissingFilterBuilder.java84
-rw-r--r--src/main/java/org/elasticsearch/index/query/MissingFilterParser.java180
-rw-r--r--src/main/java/org/elasticsearch/index/query/MoreLikeThisFieldQueryBuilder.java232
-rw-r--r--src/main/java/org/elasticsearch/index/query/MoreLikeThisFieldQueryParser.java158
-rw-r--r--src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java245
-rw-r--r--src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java169
-rw-r--r--src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java300
-rw-r--r--src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java197
-rw-r--r--src/main/java/org/elasticsearch/index/query/MultiTermQueryBuilder.java26
-rw-r--r--src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java101
-rw-r--r--src/main/java/org/elasticsearch/index/query/NestedFilterParser.java174
-rw-r--r--src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java98
-rw-r--r--src/main/java/org/elasticsearch/index/query/NestedQueryParser.java197
-rw-r--r--src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java69
-rw-r--r--src/main/java/org/elasticsearch/index/query/NotFilterParser.java104
-rw-r--r--src/main/java/org/elasticsearch/index/query/NumericRangeFilterBuilder.java381
-rw-r--r--src/main/java/org/elasticsearch/index/query/NumericRangeFilterParser.java134
-rw-r--r--src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java93
-rw-r--r--src/main/java/org/elasticsearch/index/query/OrFilterParser.java124
-rw-r--r--src/main/java/org/elasticsearch/index/query/ParsedFilter.java44
-rw-r--r--src/main/java/org/elasticsearch/index/query/ParsedQuery.java62
-rw-r--r--src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java91
-rw-r--r--src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java113
-rw-r--r--src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java97
-rw-r--r--src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java128
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryBuilder.java35
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryBuilders.java787
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java82
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryFilterParser.java53
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryParseContext.java353
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryParser.java45
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryParserFactory.java30
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryParsingException.java43
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java397
-rw-r--r--src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java238
-rw-r--r--src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java400
-rw-r--r--src/main/java/org/elasticsearch/index/query/RangeFilterParser.java173
-rw-r--r--src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java418
-rw-r--r--src/main/java/org/elasticsearch/index/query/RangeQueryParser.java129
-rw-r--r--src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java114
-rw-r--r--src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java139
-rw-r--r--src/main/java/org/elasticsearch/index/query/RegexpFlag.java136
-rw-r--r--src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java111
-rw-r--r--src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java138
-rw-r--r--src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java116
-rw-r--r--src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java198
-rw-r--r--src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java138
-rw-r--r--src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java77
-rw-r--r--src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java207
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanFirstQueryBuilder.java71
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java98
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java45
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java71
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java107
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java111
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java84
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java102
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java76
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java97
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanQueryBuilder.java27
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java95
-rw-r--r--src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java114
-rw-r--r--src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java139
-rw-r--r--src/main/java/org/elasticsearch/index/query/TermFilterParser.java138
-rw-r--r--src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java147
-rw-r--r--src/main/java/org/elasticsearch/index/query/TermQueryParser.java118
-rw-r--r--src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java169
-rw-r--r--src/main/java/org/elasticsearch/index/query/TermsFilterParser.java332
-rw-r--r--src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java138
-rw-r--r--src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java192
-rw-r--r--src/main/java/org/elasticsearch/index/query/TermsQueryParser.java143
-rw-r--r--src/main/java/org/elasticsearch/index/query/TopChildrenQueryBuilder.java115
-rw-r--r--src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java137
-rw-r--r--src/main/java/org/elasticsearch/index/query/TypeFilterBuilder.java40
-rw-r--r--src/main/java/org/elasticsearch/index/query/TypeFilterParser.java76
-rw-r--r--src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java107
-rw-r--r--src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java116
-rw-r--r--src/main/java/org/elasticsearch/index/query/WrapperFilterBuilder.java62
-rw-r--r--src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java71
-rw-r--r--src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java74
-rw-r--r--src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java71
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/DecayFunction.java54
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java78
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java418
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreModule.java61
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java162
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java201
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java28
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilders.java80
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParser.java40
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java57
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionBuilder.java36
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java64
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java57
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorParser.java54
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionBuilder.java36
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java65
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionBuilder.java35
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java64
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionBuilder.java61
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java87
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java99
-rw-r--r--src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java93
-rw-r--r--src/main/java/org/elasticsearch/index/query/support/QueryParsers.java122
-rw-r--r--src/main/java/org/elasticsearch/index/refresh/RefreshStats.java114
-rw-r--r--src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java266
-rw-r--r--src/main/java/org/elasticsearch/index/search/MatchQuery.java392
-rw-r--r--src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java92
-rw-r--r--src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java369
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java288
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java463
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/ConstantScorer.java77
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java135
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/DeleteByQueryWrappingFilter.java129
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java247
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/ParentIdCollector.java59
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java114
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/ParentQuery.java315
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/ScoreType.java56
-rw-r--r--src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java389
-rw-r--r--src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java194
-rw-r--r--src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java211
-rw-r--r--src/main/java/org/elasticsearch/index/search/geo/GeoPolygonFilter.java113
-rw-r--r--src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java140
-rw-r--r--src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java164
-rw-r--r--src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java297
-rw-r--r--src/main/java/org/elasticsearch/index/search/nested/NestedDocsFilter.java59
-rw-r--r--src/main/java/org/elasticsearch/index/search/nested/NestedFieldComparatorSource.java396
-rw-r--r--src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java68
-rw-r--r--src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java93
-rw-r--r--src/main/java/org/elasticsearch/index/search/shape/ShapeModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/search/slowlog/ShardSlowLogSearchService.java224
-rw-r--r--src/main/java/org/elasticsearch/index/search/stats/SearchStats.java267
-rw-r--r--src/main/java/org/elasticsearch/index/search/stats/ShardSearchModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/search/stats/ShardSearchService.java204
-rw-r--r--src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java47
-rw-r--r--src/main/java/org/elasticsearch/index/service/IndexService.java91
-rw-r--r--src/main/java/org/elasticsearch/index/service/InternalIndexService.java452
-rw-r--r--src/main/java/org/elasticsearch/index/settings/IndexDynamicSettings.java39
-rw-r--r--src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java134
-rw-r--r--src/main/java/org/elasticsearch/index/settings/IndexSettings.java41
-rw-r--r--src/main/java/org/elasticsearch/index/settings/IndexSettingsModule.java46
-rw-r--r--src/main/java/org/elasticsearch/index/settings/IndexSettingsProvider.java44
-rw-r--r--src/main/java/org/elasticsearch/index/settings/IndexSettingsService.java79
-rw-r--r--src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java69
-rw-r--r--src/main/java/org/elasticsearch/index/shard/DocsStats.java95
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java49
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java33
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java32
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardCreationException.java29
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardException.java43
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardModule.java48
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java30
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java30
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java30
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java30
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java30
-rw-r--r--src/main/java/org/elasticsearch/index/shard/IndexShardState.java60
-rw-r--r--src/main/java/org/elasticsearch/index/shard/ShardId.java113
-rw-r--r--src/main/java/org/elasticsearch/index/shard/ShardUtils.java57
-rw-r--r--src/main/java/org/elasticsearch/index/shard/service/IndexShard.java175
-rw-r--r--src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java1045
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java79
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java61
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java112
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java56
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java106
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/PreBuiltSimilarityProvider.java73
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/Similarities.java55
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/SimilarityLookupService.java86
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java95
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java60
-rw-r--r--src/main/java/org/elasticsearch/index/similarity/SimilarityService.java107
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java63
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreException.java36
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreFailedException.java35
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java130
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotException.java36
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotFailedException.java35
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotModule.java33
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java183
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java760
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java413
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java86
-rw-r--r--src/main/java/org/elasticsearch/index/snapshots/blobstore/RateLimitingInputStream.java101
-rw-r--r--src/main/java/org/elasticsearch/index/store/DirectoryService.java37
-rw-r--r--src/main/java/org/elasticsearch/index/store/DirectoryUtils.java89
-rw-r--r--src/main/java/org/elasticsearch/index/store/IndexStore.java73
-rw-r--r--src/main/java/org/elasticsearch/index/store/IndexStoreModule.java79
-rw-r--r--src/main/java/org/elasticsearch/index/store/Store.java662
-rw-r--r--src/main/java/org/elasticsearch/index/store/StoreException.java37
-rw-r--r--src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java116
-rw-r--r--src/main/java/org/elasticsearch/index/store/StoreModule.java72
-rw-r--r--src/main/java/org/elasticsearch/index/store/StoreStats.java118
-rw-r--r--src/main/java/org/elasticsearch/index/store/distributor/AbstractDistributor.java75
-rw-r--r--src/main/java/org/elasticsearch/index/store/distributor/Distributor.java44
-rw-r--r--src/main/java/org/elasticsearch/index/store/distributor/LeastUsedDistributor.java67
-rw-r--r--src/main/java/org/elasticsearch/index/store/distributor/RandomWeightedDistributor.java68
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java142
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/FsIndexStore.java135
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java47
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java45
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java47
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java45
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java47
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java45
-rw-r--r--src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/store/memory/ByteBufferDirectoryService.java91
-rw-r--r--src/main/java/org/elasticsearch/index/store/memory/ByteBufferIndexStore.java76
-rw-r--r--src/main/java/org/elasticsearch/index/store/memory/MemoryIndexStoreModule.java41
-rw-r--r--src/main/java/org/elasticsearch/index/store/ram/RamDirectoryService.java78
-rw-r--r--src/main/java/org/elasticsearch/index/store/ram/RamIndexStore.java63
-rw-r--r--src/main/java/org/elasticsearch/index/store/ram/RamIndexStoreModule.java34
-rw-r--r--src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java128
-rw-r--r--src/main/java/org/elasticsearch/index/store/support/ForceSyncDirectory.java34
-rw-r--r--src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorModule.java32
-rw-r--r--src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorService.java84
-rw-r--r--src/main/java/org/elasticsearch/index/translog/Translog.java656
-rw-r--r--src/main/java/org/elasticsearch/index/translog/TranslogException.java37
-rw-r--r--src/main/java/org/elasticsearch/index/translog/TranslogModule.java49
-rw-r--r--src/main/java/org/elasticsearch/index/translog/TranslogService.java211
-rw-r--r--src/main/java/org/elasticsearch/index/translog/TranslogStats.java79
-rw-r--r--src/main/java/org/elasticsearch/index/translog/TranslogStreams.java84
-rw-r--r--src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java214
-rw-r--r--src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java145
-rw-r--r--src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java416
-rw-r--r--src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java79
-rw-r--r--src/main/java/org/elasticsearch/index/translog/fs/RafReference.java81
-rw-r--r--src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java124
-rw-r--r--src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java62
-rw-r--r--src/main/java/org/elasticsearch/index/warmer/WarmerStats.java127
-rw-r--r--src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java39
-rw-r--r--src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java43
-rw-r--r--src/main/java/org/elasticsearch/indices/IndexCreationException.java33
-rw-r--r--src/main/java/org/elasticsearch/indices/IndexMissingException.java39
-rw-r--r--src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java40
-rw-r--r--src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java44
-rw-r--r--src/main/java/org/elasticsearch/indices/IndexTemplateMissingException.java45
-rw-r--r--src/main/java/org/elasticsearch/indices/IndicesLifecycle.java151
-rw-r--r--src/main/java/org/elasticsearch/indices/IndicesModule.java87
-rw-r--r--src/main/java/org/elasticsearch/indices/IndicesService.java62
-rw-r--r--src/main/java/org/elasticsearch/indices/InternalIndicesLifecycle.java174
-rw-r--r--src/main/java/org/elasticsearch/indices/InternalIndicesService.java377
-rw-r--r--src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java39
-rw-r--r--src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java40
-rw-r--r--src/main/java/org/elasticsearch/indices/InvalidIndexTemplateException.java44
-rw-r--r--src/main/java/org/elasticsearch/indices/InvalidTypeNameException.java39
-rw-r--r--src/main/java/org/elasticsearch/indices/NodeIndicesStats.java254
-rw-r--r--src/main/java/org/elasticsearch/indices/TypeMissingException.java46
-rw-r--r--src/main/java/org/elasticsearch/indices/analysis/HunspellService.java257
-rw-r--r--src/main/java/org/elasticsearch/indices/analysis/IndicesAnalysisModule.java47
-rw-r--r--src/main/java/org/elasticsearch/indices/analysis/IndicesAnalysisService.java186
-rw-r--r--src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java419
-rw-r--r--src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java114
-rw-r--r--src/main/java/org/elasticsearch/indices/analysis/PreBuiltCharFilters.java83
-rw-r--r--src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java324
-rw-r--r--src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java166
-rw-r--r--src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java209
-rw-r--r--src/main/java/org/elasticsearch/indices/cache/filter/terms/IndicesTermsFilterCache.java162
-rw-r--r--src/main/java/org/elasticsearch/indices/cache/filter/terms/TermsLookup.java83
-rw-r--r--src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java842
-rw-r--r--src/main/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerService.java40
-rw-r--r--src/main/java/org/elasticsearch/indices/fielddata/breaker/FieldDataBreakerStats.java102
-rw-r--r--src/main/java/org/elasticsearch/indices/fielddata/breaker/InternalCircuitBreakerService.java122
-rw-r--r--src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java225
-rw-r--r--src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java55
-rw-r--r--src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java299
-rw-r--r--src/main/java/org/elasticsearch/indices/query/IndicesQueriesModule.java154
-rw-r--r--src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java91
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/DelayRecoveryException.java38
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/IgnoreRecoveryException.java38
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java49
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java84
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryFailedException.java38
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java123
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java130
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java67
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java66
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java122
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java194
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java342
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryStatus.java151
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java660
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java85
-rw-r--r--src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java127
-rw-r--r--src/main/java/org/elasticsearch/indices/store/IndicesStore.java198
-rw-r--r--src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java427
-rw-r--r--src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java352
-rw-r--r--src/main/java/org/elasticsearch/indices/warmer/IndicesWarmer.java78
-rw-r--r--src/main/java/org/elasticsearch/indices/warmer/InternalIndicesWarmer.java112
-rw-r--r--src/main/java/org/elasticsearch/monitor/MonitorModule.java141
-rw-r--r--src/main/java/org/elasticsearch/monitor/MonitorService.java97
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/AbstractDump.java95
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/Dump.java47
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/DumpContributionFailedException.java41
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/DumpContributor.java30
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/DumpContributorFactory.java30
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/DumpException.java36
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/DumpGenerationFailedException.java34
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/DumpGenerator.java54
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/DumpMonitorService.java121
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/SimpleDump.java49
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/SimpleDumpGenerator.java86
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/cluster/ClusterDumpContributor.java73
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/heap/HeapDumpContributor.java77
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/summary/SummaryDumpContributor.java114
-rw-r--r--src/main/java/org/elasticsearch/monitor/dump/thread/ThreadDumpContributor.java136
-rw-r--r--src/main/java/org/elasticsearch/monitor/fs/FsProbe.java27
-rw-r--r--src/main/java/org/elasticsearch/monitor/fs/FsService.java55
-rw-r--r--src/main/java/org/elasticsearch/monitor/fs/FsStats.java371
-rw-r--r--src/main/java/org/elasticsearch/monitor/fs/JmxFsProbe.java59
-rw-r--r--src/main/java/org/elasticsearch/monitor/fs/SigarFsProbe.java103
-rw-r--r--src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java174
-rw-r--r--src/main/java/org/elasticsearch/monitor/jvm/GcNames.java55
-rw-r--r--src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java280
-rw-r--r--src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java462
-rw-r--r--src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java262
-rw-r--r--src/main/java/org/elasticsearch/monitor/jvm/JvmService.java59
-rw-r--r--src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java981
-rw-r--r--src/main/java/org/elasticsearch/monitor/network/JmxNetworkProbe.java53
-rw-r--r--src/main/java/org/elasticsearch/monitor/network/NetworkInfo.java161
-rw-r--r--src/main/java/org/elasticsearch/monitor/network/NetworkProbe.java32
-rw-r--r--src/main/java/org/elasticsearch/monitor/network/NetworkService.java123
-rw-r--r--src/main/java/org/elasticsearch/monitor/network/NetworkStats.java249
-rw-r--r--src/main/java/org/elasticsearch/monitor/network/SigarNetworkProbe.java166
-rw-r--r--src/main/java/org/elasticsearch/monitor/os/JmxOsProbe.java47
-rw-r--r--src/main/java/org/elasticsearch/monitor/os/OsInfo.java377
-rw-r--r--src/main/java/org/elasticsearch/monitor/os/OsProbe.java30
-rw-r--r--src/main/java/org/elasticsearch/monitor/os/OsService.java65
-rw-r--r--src/main/java/org/elasticsearch/monitor/os/OsStats.java441
-rw-r--r--src/main/java/org/elasticsearch/monitor/os/SigarOsProbe.java133
-rw-r--r--src/main/java/org/elasticsearch/monitor/process/JmxProcessProbe.java101
-rw-r--r--src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java134
-rw-r--r--src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java30
-rw-r--r--src/main/java/org/elasticsearch/monitor/process/ProcessService.java64
-rw-r--r--src/main/java/org/elasticsearch/monitor/process/ProcessStats.java325
-rw-r--r--src/main/java/org/elasticsearch/monitor/process/SigarProcessProbe.java79
-rw-r--r--src/main/java/org/elasticsearch/monitor/sigar/SigarService.java66
-rw-r--r--src/main/java/org/elasticsearch/node/Node.java65
-rw-r--r--src/main/java/org/elasticsearch/node/NodeBuilder.java168
-rw-r--r--src/main/java/org/elasticsearch/node/NodeClosedException.java35
-rw-r--r--src/main/java/org/elasticsearch/node/internal/InternalNode.java406
-rw-r--r--src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java145
-rw-r--r--src/main/java/org/elasticsearch/node/internal/NodeModule.java44
-rw-r--r--src/main/java/org/elasticsearch/node/package-info.java24
-rw-r--r--src/main/java/org/elasticsearch/node/service/NodeService.java169
-rw-r--r--src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java123
-rw-r--r--src/main/java/org/elasticsearch/percolator/PercolateContext.java732
-rw-r--r--src/main/java/org/elasticsearch/percolator/PercolateException.java46
-rw-r--r--src/main/java/org/elasticsearch/percolator/PercolatorModule.java32
-rw-r--r--src/main/java/org/elasticsearch/percolator/PercolatorService.java913
-rw-r--r--src/main/java/org/elasticsearch/percolator/QueryCollector.java393
-rw-r--r--src/main/java/org/elasticsearch/plugins/AbstractPlugin.java122
-rw-r--r--src/main/java/org/elasticsearch/plugins/IndexPluginsModule.java67
-rw-r--r--src/main/java/org/elasticsearch/plugins/Plugin.java105
-rw-r--r--src/main/java/org/elasticsearch/plugins/PluginManager.java546
-rw-r--r--src/main/java/org/elasticsearch/plugins/PluginsModule.java68
-rw-r--r--src/main/java/org/elasticsearch/plugins/PluginsService.java519
-rw-r--r--src/main/java/org/elasticsearch/plugins/ShardsPluginsModule.java67
-rw-r--r--src/main/java/org/elasticsearch/repositories/RepositoriesModule.java67
-rw-r--r--src/main/java/org/elasticsearch/repositories/RepositoriesService.java503
-rw-r--r--src/main/java/org/elasticsearch/repositories/Repository.java112
-rw-r--r--src/main/java/org/elasticsearch/repositories/RepositoryException.java47
-rw-r--r--src/main/java/org/elasticsearch/repositories/RepositoryMissingException.java38
-rw-r--r--src/main/java/org/elasticsearch/repositories/RepositoryModule.java92
-rw-r--r--src/main/java/org/elasticsearch/repositories/RepositoryName.java71
-rw-r--r--src/main/java/org/elasticsearch/repositories/RepositoryNameModule.java39
-rw-r--r--src/main/java/org/elasticsearch/repositories/RepositorySettings.java46
-rw-r--r--src/main/java/org/elasticsearch/repositories/RepositoryTypesRegistry.java49
-rw-r--r--src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java648
-rw-r--r--src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreSnapshot.java505
-rw-r--r--src/main/java/org/elasticsearch/repositories/fs/FsRepository.java117
-rw-r--r--src/main/java/org/elasticsearch/repositories/fs/FsRepositoryModule.java46
-rw-r--r--src/main/java/org/elasticsearch/repositories/uri/URLRepository.java109
-rw-r--r--src/main/java/org/elasticsearch/repositories/uri/URLRepositoryModule.java46
-rw-r--r--src/main/java/org/elasticsearch/rest/AbstractRestResponse.java81
-rw-r--r--src/main/java/org/elasticsearch/rest/AbstractRestResponseActionListener.java52
-rw-r--r--src/main/java/org/elasticsearch/rest/AcknowledgedRestResponseActionListener.java64
-rw-r--r--src/main/java/org/elasticsearch/rest/BaseRestHandler.java37
-rw-r--r--src/main/java/org/elasticsearch/rest/BytesRestResponse.java64
-rw-r--r--src/main/java/org/elasticsearch/rest/HttpRedirectRestResponse.java35
-rw-r--r--src/main/java/org/elasticsearch/rest/RestChannel.java28
-rw-r--r--src/main/java/org/elasticsearch/rest/RestController.java238
-rw-r--r--src/main/java/org/elasticsearch/rest/RestFilter.java48
-rw-r--r--src/main/java/org/elasticsearch/rest/RestFilterChain.java32
-rw-r--r--src/main/java/org/elasticsearch/rest/RestHandler.java28
-rw-r--r--src/main/java/org/elasticsearch/rest/RestModule.java51
-rw-r--r--src/main/java/org/elasticsearch/rest/RestRequest.java171
-rw-r--r--src/main/java/org/elasticsearch/rest/RestResponse.java71
-rw-r--r--src/main/java/org/elasticsearch/rest/RestStatus.java493
-rw-r--r--src/main/java/org/elasticsearch/rest/StringRestResponse.java43
-rw-r--r--src/main/java/org/elasticsearch/rest/Utf8RestResponse.java116
-rw-r--r--src/main/java/org/elasticsearch/rest/XContentRestResponse.java148
-rw-r--r--src/main/java/org/elasticsearch/rest/XContentThrowableRestResponse.java82
-rw-r--r--src/main/java/org/elasticsearch/rest/action/RestActionModule.java230
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java109
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java88
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java130
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/node/restart/RestNodesRestartAction.java88
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java91
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java138
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java52
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java86
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java55
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java89
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java85
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java90
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java91
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java82
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/delete/RestDeleteSnapshotAction.java55
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/get/RestGetSnapshotsAction.java84
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/restore/RestRestoreSnapshotAction.java79
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java113
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java75
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/cluster/tasks/RestPendingClusterTasksAction.java77
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java148
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesMissingException.java46
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/RestIndexDeleteAliasesAction.java53
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetAliasesAction.java134
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java112
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/alias/head/RestAliasesExistAction.java83
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java137
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java98
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java129
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java52
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java66
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java52
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java84
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java79
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java96
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/gateway/snapshot/RestGatewaySnapshotAction.java84
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/delete/RestDeleteMappingAction.java61
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetFieldMappingAction.java130
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java128
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java78
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java52
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java110
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java95
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java88
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestGetSettingsAction.java103
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java92
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java127
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/status/RestIndicesStatusAction.java97
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java46
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java99
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/template/head/RestHeadIndexTemplateAction.java76
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java74
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java148
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java54
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java106
-rw-r--r--src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java70
-rw-r--r--src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java188
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java65
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java124
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java188
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java62
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java122
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java121
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java448
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java115
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java307
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java102
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java181
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java254
-rw-r--r--src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java273
-rw-r--r--src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java127
-rw-r--r--src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java117
-rw-r--r--src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java134
-rw-r--r--src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java174
-rw-r--r--src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java104
-rw-r--r--src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java102
-rw-r--r--src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java86
-rw-r--r--src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java109
-rw-r--r--src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java147
-rw-r--r--src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java120
-rw-r--r--src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java128
-rw-r--r--src/main/java/org/elasticsearch/rest/action/percolate/RestMultiPercolateAction.java103
-rw-r--r--src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java176
-rw-r--r--src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java91
-rw-r--r--src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java107
-rw-r--r--src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java308
-rw-r--r--src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java112
-rw-r--r--src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java121
-rw-r--r--src/main/java/org/elasticsearch/rest/action/support/RestActions.java125
-rw-r--r--src/main/java/org/elasticsearch/rest/action/support/RestTable.java291
-rw-r--r--src/main/java/org/elasticsearch/rest/action/support/RestXContentBuilder.java84
-rw-r--r--src/main/java/org/elasticsearch/rest/action/termvector/RestMultiTermVectorsAction.java96
-rw-r--r--src/main/java/org/elasticsearch/rest/action/termvector/RestTermVectorAction.java142
-rw-r--r--src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java175
-rw-r--r--src/main/java/org/elasticsearch/rest/support/RestUtils.java219
-rw-r--r--src/main/java/org/elasticsearch/river/AbstractRiverComponent.java51
-rw-r--r--src/main/java/org/elasticsearch/river/River.java41
-rw-r--r--src/main/java/org/elasticsearch/river/RiverComponent.java28
-rw-r--r--src/main/java/org/elasticsearch/river/RiverException.java47
-rw-r--r--src/main/java/org/elasticsearch/river/RiverIndexName.java50
-rw-r--r--src/main/java/org/elasticsearch/river/RiverModule.java93
-rw-r--r--src/main/java/org/elasticsearch/river/RiverName.java73
-rw-r--r--src/main/java/org/elasticsearch/river/RiverNameModule.java39
-rw-r--r--src/main/java/org/elasticsearch/river/RiverSettings.java48
-rw-r--r--src/main/java/org/elasticsearch/river/RiversManager.java68
-rw-r--r--src/main/java/org/elasticsearch/river/RiversModule.java64
-rw-r--r--src/main/java/org/elasticsearch/river/RiversPluginsModule.java51
-rw-r--r--src/main/java/org/elasticsearch/river/RiversService.java314
-rw-r--r--src/main/java/org/elasticsearch/river/RiversTypesRegistry.java39
-rw-r--r--src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java132
-rw-r--r--src/main/java/org/elasticsearch/river/cluster/RiverClusterChangedEvent.java53
-rw-r--r--src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java171
-rw-r--r--src/main/java/org/elasticsearch/river/cluster/RiverClusterState.java96
-rw-r--r--src/main/java/org/elasticsearch/river/cluster/RiverClusterStateListener.java28
-rw-r--r--src/main/java/org/elasticsearch/river/cluster/RiverClusterStateUpdateTask.java28
-rw-r--r--src/main/java/org/elasticsearch/river/cluster/RiverNodeHelper.java55
-rw-r--r--src/main/java/org/elasticsearch/river/dummy/DummyRiver.java48
-rw-r--r--src/main/java/org/elasticsearch/river/dummy/DummyRiverModule.java34
-rw-r--r--src/main/java/org/elasticsearch/river/routing/RiverRouting.java87
-rw-r--r--src/main/java/org/elasticsearch/river/routing/RiversRouter.java256
-rw-r--r--src/main/java/org/elasticsearch/river/routing/RiversRouting.java123
-rw-r--r--src/main/java/org/elasticsearch/script/AbstractDoubleSearchScript.java45
-rw-r--r--src/main/java/org/elasticsearch/script/AbstractExecutableScript.java32
-rw-r--r--src/main/java/org/elasticsearch/script/AbstractFloatSearchScript.java45
-rw-r--r--src/main/java/org/elasticsearch/script/AbstractLongSearchScript.java45
-rw-r--r--src/main/java/org/elasticsearch/script/AbstractSearchScript.java150
-rw-r--r--src/main/java/org/elasticsearch/script/CompiledScript.java43
-rw-r--r--src/main/java/org/elasticsearch/script/ExecutableScript.java39
-rw-r--r--src/main/java/org/elasticsearch/script/ExplainableSearchScript.java36
-rw-r--r--src/main/java/org/elasticsearch/script/NativeScriptEngineService.java91
-rw-r--r--src/main/java/org/elasticsearch/script/NativeScriptFactory.java44
-rw-r--r--src/main/java/org/elasticsearch/script/ScriptEngineService.java47
-rw-r--r--src/main/java/org/elasticsearch/script/ScriptException.java36
-rw-r--r--src/main/java/org/elasticsearch/script/ScriptModule.java89
-rw-r--r--src/main/java/org/elasticsearch/script/ScriptService.java286
-rw-r--r--src/main/java/org/elasticsearch/script/SearchScript.java77
-rw-r--r--src/main/java/org/elasticsearch/script/mvel/MvelScriptEngineService.java214
-rw-r--r--src/main/java/org/elasticsearch/search/Scroll.java81
-rw-r--r--src/main/java/org/elasticsearch/search/SearchContextException.java53
-rw-r--r--src/main/java/org/elasticsearch/search/SearchContextMissingException.java39
-rw-r--r--src/main/java/org/elasticsearch/search/SearchException.java44
-rw-r--r--src/main/java/org/elasticsearch/search/SearchHit.java195
-rw-r--r--src/main/java/org/elasticsearch/search/SearchHitField.java67
-rw-r--r--src/main/java/org/elasticsearch/search/SearchHits.java66
-rw-r--r--src/main/java/org/elasticsearch/search/SearchModule.java74
-rw-r--r--src/main/java/org/elasticsearch/search/SearchParseElement.java31
-rw-r--r--src/main/java/org/elasticsearch/search/SearchParseException.java42
-rw-r--r--src/main/java/org/elasticsearch/search/SearchPhase.java40
-rw-r--r--src/main/java/org/elasticsearch/search/SearchPhaseResult.java34
-rw-r--r--src/main/java/org/elasticsearch/search/SearchService.java863
-rw-r--r--src/main/java/org/elasticsearch/search/SearchShardTarget.java150
-rw-r--r--src/main/java/org/elasticsearch/search/TransportSearchModule.java42
-rw-r--r--src/main/java/org/elasticsearch/search/action/SearchServiceListener.java30
-rw-r--r--src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java800
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java36
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/Aggregation.java31
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregationBinaryParseElement.java47
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java132
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java124
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregationExecutionException.java35
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java35
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java96
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregationParseElement.java63
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java184
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregationStreams.java67
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/Aggregations.java49
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/Aggregator.java219
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java191
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java87
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java130
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java145
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java225
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java64
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java79
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/ValuesSourceAggregationBuilder.java141
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractHash.java138
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java49
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java126
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/BytesRefHash.java171
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java107
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/LongHash.java142
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java140
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregation.java39
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java40
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/filter/Filter.java27
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java53
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java99
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterParser.java45
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java59
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGrid.java63
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java161
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java85
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java200
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java316
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/global/Global.java27
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java73
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalBuilder.java40
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalParser.java44
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java59
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogram.java95
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramBuilder.java154
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java271
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java137
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java167
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramBuilder.java98
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java168
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java143
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java403
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java123
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java61
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/missing/Missing.java27
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java81
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingBuilder.java51
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java79
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java60
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/nested/Nested.java27
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java154
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedBuilder.java53
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java66
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java81
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java320
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/Range.java53
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java312
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeBuilder.java58
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java156
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRange.java44
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeBuilder.java77
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java181
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java109
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistance.java38
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceBuilder.java191
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java306
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java98
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IPv4Range.java43
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IPv4RangeBuilder.java130
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java112
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java187
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java220
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java138
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java278
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java184
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java217
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java137
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java145
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java312
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java129
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java136
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsBuilder.java182
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java278
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java91
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTermsAggregator.java63
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/BucketPriorityQueue.java39
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java67
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregation.java60
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregationBuilder.java44
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java53
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregationBuilder.java97
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregatorParser.java111
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/avg/Avg.java29
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java122
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgBuilder.java32
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java41
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java122
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java120
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/max/Max.java29
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java122
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxBuilder.java32
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java41
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java122
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/min/Min.java29
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java117
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinBuilder.java32
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java45
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java214
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/Stats.java53
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java162
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsBuilder.java32
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java40
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStats.java34
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java178
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsBuilder.java32
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java40
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java140
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java120
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/sum/Sum.java29
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java116
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumBuilder.java32
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java40
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java110
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCount.java34
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java115
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountBuilder.java49
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java81
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java291
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java54
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/FieldDataSource.java813
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/ScriptValueType.java51
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/ScriptValues.java30
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/ValueSourceAggregatorFactory.java87
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java35
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceBased.java28
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java117
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/bytes/BytesValuesSource.java62
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/bytes/ScriptBytesValues.java105
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/geopoints/GeoPointValuesSource.java51
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/numeric/NumericValuesSource.java73
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/numeric/ScriptDoubleValues.java100
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/numeric/ScriptLongValues.java100
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueFormatter.java224
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueFormatterStreams.java64
-rw-r--r--src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueParser.java114
-rw-r--r--src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java976
-rw-r--r--src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java36
-rw-r--r--src/main/java/org/elasticsearch/search/controller/ScoreDocQueue.java45
-rw-r--r--src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java443
-rw-r--r--src/main/java/org/elasticsearch/search/controller/ShardFieldDocSortedHitQueue.java137
-rw-r--r--src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java102
-rw-r--r--src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java125
-rw-r--r--src/main/java/org/elasticsearch/search/dfs/DfsPhase.java155
-rw-r--r--src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java33
-rw-r--r--src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java239
-rw-r--r--src/main/java/org/elasticsearch/search/facet/DoubleFacetAggregatorBase.java50
-rw-r--r--src/main/java/org/elasticsearch/search/facet/Facet.java36
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetBinaryParseElement.java47
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetBuilder.java102
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetBuilders.java97
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetExecutor.java160
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetModule.java69
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetParseElement.java142
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetParser.java52
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetParsers.java48
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetPhase.java204
-rw-r--r--src/main/java/org/elasticsearch/search/facet/FacetPhaseExecutionException.java36
-rw-r--r--src/main/java/org/elasticsearch/search/facet/Facets.java56
-rw-r--r--src/main/java/org/elasticsearch/search/facet/InternalFacet.java109
-rw-r--r--src/main/java/org/elasticsearch/search/facet/InternalFacets.java164
-rw-r--r--src/main/java/org/elasticsearch/search/facet/LongFacetAggregatorBase.java50
-rw-r--r--src/main/java/org/elasticsearch/search/facet/SearchContextFacets.java102
-rw-r--r--src/main/java/org/elasticsearch/search/facet/TransportFacetModule.java49
-rw-r--r--src/main/java/org/elasticsearch/search/facet/datehistogram/CountDateHistogramFacetExecutor.java120
-rw-r--r--src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacet.java180
-rw-r--r--src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacetBuilder.java264
-rw-r--r--src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacetParser.java223
-rw-r--r--src/main/java/org/elasticsearch/search/facet/datehistogram/InternalCountDateHistogramFacet.java217
-rw-r--r--src/main/java/org/elasticsearch/search/facet/datehistogram/InternalDateHistogramFacet.java45
-rw-r--r--src/main/java/org/elasticsearch/search/facet/datehistogram/InternalFullDateHistogramFacet.java265
-rw-r--r--src/main/java/org/elasticsearch/search/facet/datehistogram/ValueDateHistogramFacetExecutor.java150
-rw-r--r--src/main/java/org/elasticsearch/search/facet/datehistogram/ValueScriptDateHistogramFacetExecutor.java147
-rw-r--r--src/main/java/org/elasticsearch/search/facet/filter/FilterFacet.java38
-rw-r--r--src/main/java/org/elasticsearch/search/facet/filter/FilterFacetBuilder.java82
-rw-r--r--src/main/java/org/elasticsearch/search/facet/filter/FilterFacetExecutor.java106
-rw-r--r--src/main/java/org/elasticsearch/search/facet/filter/FilterFacetParser.java64
-rw-r--r--src/main/java/org/elasticsearch/search/facet/filter/InternalFilterFacet.java124
-rw-r--r--src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacet.java107
-rw-r--r--src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetBuilder.java290
-rw-r--r--src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetExecutor.java137
-rw-r--r--src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetParser.java178
-rw-r--r--src/main/java/org/elasticsearch/search/facet/geodistance/InternalGeoDistanceFacet.java180
-rw-r--r--src/main/java/org/elasticsearch/search/facet/geodistance/ScriptGeoDistanceFacetExecutor.java104
-rw-r--r--src/main/java/org/elasticsearch/search/facet/geodistance/ValueGeoDistanceFacetExecutor.java91
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/CountHistogramFacetExecutor.java122
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/FullHistogramFacetExecutor.java133
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacet.java180
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacetBuilder.java151
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacetParser.java142
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/HistogramScriptFacetBuilder.java148
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/InternalCountHistogramFacet.java215
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/InternalFullHistogramFacet.java262
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/InternalHistogramFacet.java44
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/ScriptHistogramFacetExecutor.java135
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/ValueHistogramFacetExecutor.java146
-rw-r--r--src/main/java/org/elasticsearch/search/facet/histogram/ValueScriptHistogramFacetExecutor.java153
-rw-r--r--src/main/java/org/elasticsearch/search/facet/nested/NestedFacetExecutor.java217
-rw-r--r--src/main/java/org/elasticsearch/search/facet/query/InternalQueryFacet.java122
-rw-r--r--src/main/java/org/elasticsearch/search/facet/query/QueryFacet.java38
-rw-r--r--src/main/java/org/elasticsearch/search/facet/query/QueryFacetBuilder.java83
-rw-r--r--src/main/java/org/elasticsearch/search/facet/query/QueryFacetExecutor.java135
-rw-r--r--src/main/java/org/elasticsearch/search/facet/query/QueryFacetParser.java63
-rw-r--r--src/main/java/org/elasticsearch/search/facet/range/InternalRangeFacet.java221
-rw-r--r--src/main/java/org/elasticsearch/search/facet/range/KeyValueRangeFacetExecutor.java117
-rw-r--r--src/main/java/org/elasticsearch/search/facet/range/RangeFacet.java113
-rw-r--r--src/main/java/org/elasticsearch/search/facet/range/RangeFacetBuilder.java213
-rw-r--r--src/main/java/org/elasticsearch/search/facet/range/RangeFacetExecutor.java112
-rw-r--r--src/main/java/org/elasticsearch/search/facet/range/RangeFacetParser.java167
-rw-r--r--src/main/java/org/elasticsearch/search/facet/range/RangeScriptFacetBuilder.java185
-rw-r--r--src/main/java/org/elasticsearch/search/facet/range/ScriptRangeFacetExecutor.java98
-rw-r--r--src/main/java/org/elasticsearch/search/facet/statistical/InternalStatisticalFacet.java203
-rw-r--r--src/main/java/org/elasticsearch/search/facet/statistical/ScriptStatisticalFacetExecutor.java101
-rw-r--r--src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacet.java73
-rw-r--r--src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetBuilder.java99
-rw-r--r--src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetExecutor.java124
-rw-r--r--src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetParser.java131
-rw-r--r--src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFieldsFacetExecutor.java116
-rw-r--r--src/main/java/org/elasticsearch/search/facet/statistical/StatisticalScriptFacetBuilder.java108
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/InternalTermsFacet.java48
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/TermsFacet.java163
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/TermsFacetBuilder.java274
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/TermsFacetParser.java225
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/doubles/InternalDoubleTermsFacet.java292
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/doubles/TermsDoubleFacetExecutor.java236
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/index/IndexNameFacetExecutor.java76
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/longs/InternalLongTermsFacet.java293
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/longs/TermsLongFacetExecutor.java236
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/strings/FieldsTermsStringFacetExecutor.java131
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/strings/HashedAggregator.java275
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/strings/HashedScriptAggregator.java100
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/strings/InternalStringTermsFacet.java309
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/strings/ScriptTermsStringFieldFacetExecutor.java208
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/strings/TermsStringFacetExecutor.java153
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/strings/TermsStringOrdinalsFacetExecutor.java289
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/support/EntryPriorityQueue.java42
-rw-r--r--src/main/java/org/elasticsearch/search/facet/terms/unmapped/UnmappedFieldExecutor.java76
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/InternalTermsStatsFacet.java46
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacet.java402
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacetBuilder.java172
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacetParser.java151
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/doubles/InternalTermsStatsDoubleFacet.java323
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/doubles/TermsStatsDoubleFacetExecutor.java223
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/longs/InternalTermsStatsLongFacet.java323
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/longs/TermsStatsLongFacetExecutor.java225
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/strings/InternalTermsStatsStringFacet.java328
-rw-r--r--src/main/java/org/elasticsearch/search/facet/termsstats/strings/TermsStatsStringFacetExecutor.java232
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/FetchPhase.java233
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java37
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/FetchSearchRequest.java82
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java106
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java30
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java116
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/FieldsParseElement.java52
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java97
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java70
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java79
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/explain/ExplainParseElement.java38
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsContext.java54
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java92
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java45
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java104
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsContext.java67
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsFetchSubPhase.java84
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsParseElement.java95
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsContext.java68
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java95
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java78
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/source/FetchSourceContext.java165
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/source/FetchSourceParseElement.java96
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java84
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java78
-rw-r--r--src/main/java/org/elasticsearch/search/fetch/version/VersionParseElement.java37
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java94
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java194
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java539
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/HighlightField.java116
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/HighlightModule.java52
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java149
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java70
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/Highlighter.java29
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/HighlighterContext.java71
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java232
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/Highlighters.java48
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java194
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java293
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/SearchContextHighlight.java354
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/vectorhighlight/FragmentBuilderHelper.java106
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SimpleFragmentsBuilder.java45
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SourceScoreOrderFragmentsBuilder.java78
-rw-r--r--src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SourceSimpleFragmentsBuilder.java66
-rw-r--r--src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java206
-rw-r--r--src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java705
-rw-r--r--src/main/java/org/elasticsearch/search/internal/DocIdSetCollector.java110
-rw-r--r--src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java87
-rw-r--r--src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java732
-rw-r--r--src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java113
-rw-r--r--src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java259
-rw-r--r--src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java147
-rw-r--r--src/main/java/org/elasticsearch/search/internal/SearchContext.java300
-rw-r--r--src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java209
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/CachedPositionIterator.java138
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/DocLookup.java160
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/FieldLookup.java100
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java175
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/IndexField.java137
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java284
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/IndexLookup.java241
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/PositionIterator.java143
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/SearchLookup.java96
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/SourceLookup.java211
-rw-r--r--src/main/java/org/elasticsearch/search/lookup/TermPosition.java59
-rw-r--r--src/main/java/org/elasticsearch/search/query/FilterBinaryParseElement.java45
-rw-r--r--src/main/java/org/elasticsearch/search/query/FromParseElement.java43
-rw-r--r--src/main/java/org/elasticsearch/search/query/IndicesBoostParseElement.java54
-rw-r--r--src/main/java/org/elasticsearch/search/query/MinScoreParseElement.java38
-rw-r--r--src/main/java/org/elasticsearch/search/query/PostFilterParseElement.java38
-rw-r--r--src/main/java/org/elasticsearch/search/query/QueryBinaryParseElement.java42
-rw-r--r--src/main/java/org/elasticsearch/search/query/QueryParseElement.java35
-rw-r--r--src/main/java/org/elasticsearch/search/query/QueryPhase.java137
-rw-r--r--src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java33
-rw-r--r--src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java71
-rw-r--r--src/main/java/org/elasticsearch/search/query/QuerySearchResult.java197
-rw-r--r--src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java35
-rw-r--r--src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java70
-rw-r--r--src/main/java/org/elasticsearch/search/query/SizeParseElement.java43
-rw-r--r--src/main/java/org/elasticsearch/search/query/TimeoutParseElement.java41
-rw-r--r--src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java417
-rw-r--r--src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java134
-rw-r--r--src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java68
-rw-r--r--src/main/java/org/elasticsearch/search/rescore/RescorePhase.java70
-rw-r--r--src/main/java/org/elasticsearch/search/rescore/RescoreSearchContext.java57
-rw-r--r--src/main/java/org/elasticsearch/search/rescore/Rescorer.java94
-rw-r--r--src/main/java/org/elasticsearch/search/scan/ScanContext.java169
-rw-r--r--src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java141
-rw-r--r--src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java170
-rw-r--r--src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java155
-rw-r--r--src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java58
-rw-r--r--src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java163
-rw-r--r--src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java141
-rw-r--r--src/main/java/org/elasticsearch/search/sort/SortBuilder.java39
-rw-r--r--src/main/java/org/elasticsearch/search/sort/SortBuilders.java63
-rw-r--r--src/main/java/org/elasticsearch/search/sort/SortOrder.java46
-rw-r--r--src/main/java/org/elasticsearch/search/sort/SortParseElement.java255
-rw-r--r--src/main/java/org/elasticsearch/search/sort/SortParser.java34
-rw-r--r--src/main/java/org/elasticsearch/search/sort/TrackScoresParseElement.java37
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java119
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/Suggest.java654
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java220
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java29
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/SuggestModule.java58
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java113
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java98
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java305
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/Suggester.java45
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/Suggesters.java48
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java126
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java360
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java372
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatProvider.java41
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java150
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java88
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java120
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java135
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java41
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java98
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionFuzzyBuilder.java112
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java160
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/completion/PayloadProcessor.java37
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java50
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java117
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/Correction.java76
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java258
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java61
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpoatingScorer.java65
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java79
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java156
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java314
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java118
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java109
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java613
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java183
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoffScorer.java72
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java105
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java65
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java104
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java201
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java224
-rw-r--r--src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionContext.java37
-rw-r--r--src/main/java/org/elasticsearch/search/warmer/IndexWarmerMissingException.java47
-rw-r--r--src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java218
-rw-r--r--src/main/java/org/elasticsearch/snapshots/ConcurrentSnapshotExecutionException.java41
-rw-r--r--src/main/java/org/elasticsearch/snapshots/InvalidSnapshotNameException.java41
-rw-r--r--src/main/java/org/elasticsearch/snapshots/RestoreInfo.java196
-rw-r--r--src/main/java/org/elasticsearch/snapshots/RestoreService.java787
-rw-r--r--src/main/java/org/elasticsearch/snapshots/Snapshot.java101
-rw-r--r--src/main/java/org/elasticsearch/snapshots/SnapshotCreationException.java37
-rw-r--r--src/main/java/org/elasticsearch/snapshots/SnapshotException.java43
-rw-r--r--src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java316
-rw-r--r--src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java44
-rw-r--r--src/main/java/org/elasticsearch/snapshots/SnapshotRestoreException.java35
-rw-r--r--src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java215
-rw-r--r--src/main/java/org/elasticsearch/snapshots/SnapshotState.java84
-rw-r--r--src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java121
-rw-r--r--src/main/java/org/elasticsearch/snapshots/SnapshotsService.java1280
-rw-r--r--src/main/java/org/elasticsearch/threadpool/ThreadPool.java670
-rw-r--r--src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java91
-rw-r--r--src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java40
-rw-r--r--src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java197
-rw-r--r--src/main/java/org/elasticsearch/transport/ActionNotFoundTransportException.java39
-rw-r--r--src/main/java/org/elasticsearch/transport/ActionTransportException.java81
-rw-r--r--src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java34
-rw-r--r--src/main/java/org/elasticsearch/transport/BaseTransportResponseHandler.java27
-rw-r--r--src/main/java/org/elasticsearch/transport/BindTransportException.java34
-rw-r--r--src/main/java/org/elasticsearch/transport/BytesTransportRequest.java76
-rw-r--r--src/main/java/org/elasticsearch/transport/ConnectTransportException.java51
-rw-r--r--src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java54
-rw-r--r--src/main/java/org/elasticsearch/transport/FailedCommunicationException.java34
-rw-r--r--src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java41
-rw-r--r--src/main/java/org/elasticsearch/transport/NodeDisconnectedException.java39
-rw-r--r--src/main/java/org/elasticsearch/transport/NodeNotConnectedException.java38
-rw-r--r--src/main/java/org/elasticsearch/transport/NodeShouldNotConnectException.java31
-rw-r--r--src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java45
-rw-r--r--src/main/java/org/elasticsearch/transport/PlainTransportFuture.java109
-rw-r--r--src/main/java/org/elasticsearch/transport/ReceiveTimeoutTransportException.java36
-rw-r--r--src/main/java/org/elasticsearch/transport/RemoteTransportException.java46
-rw-r--r--src/main/java/org/elasticsearch/transport/ResponseHandlerFailureTransportException.java37
-rw-r--r--src/main/java/org/elasticsearch/transport/ResponseHandlerNotFoundTransportException.java37
-rw-r--r--src/main/java/org/elasticsearch/transport/SendRequestTransportException.java33
-rw-r--r--src/main/java/org/elasticsearch/transport/Transport.java82
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportChannel.java36
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportConnectionListener.java32
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportException.java36
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportFuture.java44
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportInfo.java85
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportModule.java60
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportRequest.java99
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportRequestHandler.java37
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportRequestOptions.java96
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportResponse.java99
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportResponseHandler.java40
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportResponseOptions.java43
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportSerializationException.java34
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportService.java431
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java40
-rw-r--r--src/main/java/org/elasticsearch/transport/TransportStats.java138
-rw-r--r--src/main/java/org/elasticsearch/transport/local/LocalTransport.java324
-rw-r--r--src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java116
-rw-r--r--src/main/java/org/elasticsearch/transport/local/LocalTransportModule.java42
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java147
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInputFactory.java36
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java289
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/NettyHeader.java46
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java95
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/NettyInternalESLoggerFactory.java35
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/NettyTransport.java952
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java117
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/NettyTransportModule.java42
-rw-r--r--src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java65
-rw-r--r--src/main/java/org/elasticsearch/transport/support/TransportStatus.java61
-rw-r--r--src/main/java/org/elasticsearch/tribe/TribeModule.java32
-rw-r--r--src/main/java/org/elasticsearch/tribe/TribeService.java299
-rw-r--r--src/main/java/org/elasticsearch/watcher/AbstractResourceWatcher.java79
-rw-r--r--src/main/java/org/elasticsearch/watcher/FileChangesListener.java75
-rw-r--r--src/main/java/org/elasticsearch/watcher/FileWatcher.java274
-rw-r--r--src/main/java/org/elasticsearch/watcher/ResourceWatcher.java37
-rw-r--r--src/main/java/org/elasticsearch/watcher/ResourceWatcherModule.java31
-rw-r--r--src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java106
-rw-r--r--src/main/java/org/joda/time/base/BaseDateTime.java347
-rw-r--r--src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec0
-rw-r--r--src/main/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat0
-rw-r--r--src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat3
-rw-r--r--src/main/resources/config/names.txt2826
-rw-r--r--src/main/resources/es-build.properties3
-rw-r--r--src/rpm/init.d/elasticsearch158
-rw-r--r--src/rpm/scripts/postinstall50
-rw-r--r--src/rpm/scripts/postremove15
-rw-r--r--src/rpm/scripts/preinstall4
-rw-r--r--src/rpm/scripts/preremove29
-rw-r--r--src/rpm/sysconfig/elasticsearch46
-rw-r--r--src/rpm/systemd/elasticsearch.conf1
-rw-r--r--src/rpm/systemd/elasticsearch.service20
-rw-r--r--src/rpm/systemd/sysctl.d/elasticsearch.conf1
-rw-r--r--src/test/java/com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.java68
-rw-r--r--src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java71
-rw-r--r--src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java65
-rw-r--r--src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java107
-rw-r--r--src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java487
-rw-r--r--src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java1693
-rw-r--r--src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java367
-rw-r--r--src/test/java/org/apache/lucene/util/SloppyMathTests.java98
-rw-r--r--src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java47
-rw-r--r--src/test/java/org/elasticsearch/VersionTests.java75
-rw-r--r--src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java134
-rw-r--r--src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java147
-rw-r--r--src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java37
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java70
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java101
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/bulk-log.json24
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk.json5
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json5
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json5
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json7
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json5
-rw-r--r--src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java164
-rw-r--r--src/test/java/org/elasticsearch/action/percolate/mpercolate1.json12
-rw-r--r--src/test/java/org/elasticsearch/action/percolate/mpercolate2.json6
-rw-r--r--src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java90
-rw-r--r--src/test/java/org/elasticsearch/action/search/simple-msearch1.json10
-rw-r--r--src/test/java/org/elasticsearch/action/search/simple-msearch2.json10
-rw-r--r--src/test/java/org/elasticsearch/action/search/simple-msearch3.json8
-rw-r--r--src/test/java/org/elasticsearch/action/suggest/SuggestActionTests.java55
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/AbstractTermVectorTests.java411
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/GetTermVectorCheckDocFreqTests.java256
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/GetTermVectorTests.java544
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/MultiTermVectorsTests.java73
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/TermVectorUnitTests.java328
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/multiRequest1.json13
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/multiRequest2.json26
-rw-r--r--src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java834
-rw-r--r--src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java138
-rw-r--r--src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java63
-rw-r--r--src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java85
-rw-r--r--src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java87
-rw-r--r--src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java96
-rw-r--r--src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java123
-rw-r--r--src/test/java/org/elasticsearch/benchmark/common/util/BytesRefComparisonsBenchmark.java140
-rw-r--r--src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java66
-rw-r--r--src/test/java/org/elasticsearch/benchmark/fielddata/LongFieldDataBenchmark.java160
-rw-r--r--src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java81
-rw-r--r--src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java55
-rw-r--r--src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java256
-rw-r--r--src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java159
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java337
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java101
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java136
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java83
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java45
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java54
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java49
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java74
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java72
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java72
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java166
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java311
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java182
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java367
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java253
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java434
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java208
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java202
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java193
-rw-r--r--src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java283
-rw-r--r--src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java123
-rw-r--r--src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java109
-rw-r--r--src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java70
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java59
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java72
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java150
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java191
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java157
-rw-r--r--src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java65
-rw-r--r--src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java173
-rw-r--r--src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java111
-rw-r--r--src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java142
-rw-r--r--src/test/java/org/elasticsearch/client/transport/TransportClientTests.java39
-rw-r--r--src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java57
-rw-r--r--src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java216
-rw-r--r--src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java65
-rw-r--r--src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java658
-rw-r--r--src/test/java/org/elasticsearch/cluster/DiskUsageTests.java60
-rw-r--r--src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java307
-rw-r--r--src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java130
-rw-r--r--src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java120
-rw-r--r--src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java76
-rw-r--r--src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java138
-rw-r--r--src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java62
-rw-r--r--src/test/java/org/elasticsearch/cluster/ZenUnicastDiscoveryTests.java112
-rw-r--r--src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java129
-rw-r--r--src/test/java/org/elasticsearch/cluster/ack/AckTests.java422
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java228
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java215
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java147
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java68
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java80
-rw-r--r--src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java346
-rw-r--r--src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java485
-rw-r--r--src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java193
-rw-r--r--src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java119
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java435
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java67
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java392
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java827
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java509
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java626
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java154
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java245
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java146
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java113
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java163
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java485
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java168
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java538
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java340
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java123
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java102
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java143
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java93
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java214
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java150
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java105
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java415
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java36
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java93
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java101
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java188
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java412
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java169
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java178
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java173
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java177
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java597
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java148
-rw-r--r--src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java88
-rw-r--r--src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java67
-rw-r--r--src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java109
-rw-r--r--src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java87
-rw-r--r--src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java126
-rw-r--r--src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java361
-rw-r--r--src/test/java/org/elasticsearch/codecs/CodecTests.java122
-rw-r--r--src/test/java/org/elasticsearch/common/BooleansTests.java43
-rw-r--r--src/test/java/org/elasticsearch/common/ParseFieldTests.java74
-rw-r--r--src/test/java/org/elasticsearch/common/StringsTests.java36
-rw-r--r--src/test/java/org/elasticsearch/common/TableTests.java153
-rw-r--r--src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java106
-rw-r--r--src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java50
-rw-r--r--src/test/java/org/elasticsearch/common/compress/CompressedStringTests.java55
-rw-r--r--src/test/java/org/elasticsearch/common/geo/GeoHashTests.java59
-rw-r--r--src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java256
-rw-r--r--src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java196
-rw-r--r--src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java101
-rw-r--r--src/test/java/org/elasticsearch/common/io/StreamsTests.java91
-rw-r--r--src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java90
-rw-r--r--src/test/java/org/elasticsearch/common/io/streams/HandlesStreamsTests.java80
-rw-r--r--src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java66
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/LuceneTest.java49
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java342
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilterTests.java69
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java74
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java66
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java119
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java391
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterTests.java567
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java266
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java285
-rw-r--r--src/test/java/org/elasticsearch/common/path/PathTrieTests.java160
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java92
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/SoftConcurrentRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/SoftThreadLocalRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/ThreadLocalRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/regex/RegexTests.java71
-rw-r--r--src/test/java/org/elasticsearch/common/rounding/RoundingTests.java44
-rw-r--r--src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java94
-rw-r--r--src/test/java/org/elasticsearch/common/settings/ImmutableSettingsTests.java178
-rw-r--r--src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java24
-rw-r--r--src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java24
-rw-r--r--src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java52
-rw-r--r--src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java52
-rw-r--r--src/test/java/org/elasticsearch/common/settings/loader/test-settings.json10
-rw-r--r--src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml8
-rw-r--r--src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java83
-rw-r--r--src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java98
-rw-r--r--src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java62
-rw-r--r--src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java199
-rw-r--r--src/test/java/org/elasticsearch/common/unit/TimeValueTests.java69
-rw-r--r--src/test/java/org/elasticsearch/common/util/BigArraysTests.java195
-rw-r--r--src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java109
-rw-r--r--src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java64
-rw-r--r--src/test/java/org/elasticsearch/common/util/SlicedDoubleListTests.java120
-rw-r--r--src/test/java/org/elasticsearch/common/util/SlicedLongListTests.java119
-rw-r--r--src/test/java/org/elasticsearch/common/util/SlicedObjectListTests.java147
-rw-r--r--src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java105
-rw-r--r--src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java236
-rw-r--r--src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java277
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java124
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java156
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java105
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java68
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java456
-rw-r--r--src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java97
-rw-r--r--src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java827
-rw-r--r--src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java149
-rw-r--r--src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java142
-rw-r--r--src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java83
-rw-r--r--src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java255
-rw-r--r--src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java262
-rw-r--r--src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java147
-rw-r--r--src/test/java/org/elasticsearch/discovery/DiscoveryTests.java55
-rw-r--r--src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java157
-rw-r--r--src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java115
-rw-r--r--src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java45
-rw-r--r--src/test/java/org/elasticsearch/document/BulkTests.java593
-rw-r--r--src/test/java/org/elasticsearch/document/DocumentActionsTests.java291
-rw-r--r--src/test/java/org/elasticsearch/explain/ExplainActionTests.java239
-rw-r--r--src/test/java/org/elasticsearch/flt/FuzzyLikeThisActionTests.java94
-rw-r--r--src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java384
-rw-r--r--src/test/java/org/elasticsearch/gateway/local/LocalGatewayIndexStateTests.java535
-rw-r--r--src/test/java/org/elasticsearch/gateway/local/QuorumLocalGatewayTests.java189
-rw-r--r--src/test/java/org/elasticsearch/gateway/local/SimpleRecoveryLocalGatewayTests.java428
-rw-r--r--src/test/java/org/elasticsearch/gateway/none/RecoverAfterNodesTests.java161
-rw-r--r--src/test/java/org/elasticsearch/get/GetActionTests.java870
-rw-r--r--src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java61
-rw-r--r--src/test/java/org/elasticsearch/index/VersionTypeTests.java112
-rw-r--r--src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java166
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java238
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java59
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java69
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java76
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java90
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java117
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java104
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java113
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java92
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java230
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java63
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java73
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java48
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java172
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java49
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java63
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java51
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java71
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java62
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java97
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java128
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json37
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java216
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt2
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json29
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json31
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java42
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/keep_analysis.json19
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/pattern_capture.json46
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json16
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/stop.json18
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java105
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json52
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt3
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt3
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/test1.json80
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/test1.yml61
-rw-r--r--src/test/java/org/elasticsearch/index/cache/filter/FilterCacheTests.java95
-rw-r--r--src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java410
-rw-r--r--src/test/java/org/elasticsearch/index/codec/CodecTests.java430
-rw-r--r--src/test/java/org/elasticsearch/index/codec/postingformat/DefaultPostingsFormatTests.java123
-rw-r--r--src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java178
-rw-r--r--src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java55
-rw-r--r--src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java67
-rw-r--r--src/test/java/org/elasticsearch/index/engine/internal/InternalEngineIntegrationTest.java199
-rw-r--r--src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java1162
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java358
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java104
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java477
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java426
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java93
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java194
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java625
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java32
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java88
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java201
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java193
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java170
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java416
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java82
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java32
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java289
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java80
-rw-r--r--src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java77
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/MapperTestUtils.java83
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/UidTests.java46
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java283
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/mapping.json57
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json56
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json55
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json56
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/test1.json18
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/analyzer/AnalyzerMapperTests.java159
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java54
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/boost/BoostMappingTests.java96
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java68
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java177
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java58
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java114
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java75
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java90
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java226
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java222
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java92
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java360
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java152
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java64
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json5
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json14
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java77
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json15
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json30
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java173
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json7
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json36
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java48
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java78
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java249
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java120
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/LatLonAndGeohashMappingGeoPointTests.java95
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/LatLonMappingGeoPointTests.java411
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java118
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java114
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java52
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java89
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java121
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java142
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java408
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java309
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java199
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json4
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json11
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json27
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json32
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json18
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json25
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json30
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json16
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json8
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json30
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json30
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json32
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json55
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json50
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java318
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java325
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java75
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java59
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java102
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java73
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json32
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java92
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java147
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json98
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json40
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json41
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json43
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test1.json41
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java120
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java98
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java202
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java322
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java156
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java140
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java242
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java50
-rw-r--r--src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java178
-rw-r--r--src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java36
-rw-r--r--src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterCachingTests.java199
-rw-r--r--src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java2298
-rw-r--r--src/test/java/org/elasticsearch/index/query/and-filter-cache.json21
-rw-r--r--src/test/java/org/elasticsearch/index/query/and-filter-named.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/and-filter.json25
-rw-r--r--src/test/java/org/elasticsearch/index/query/and-filter2.json23
-rw-r--r--src/test/java/org/elasticsearch/index/query/bool-filter.json35
-rw-r--r--src/test/java/org/elasticsearch/index/query/bool.json30
-rw-r--r--src/test/java/org/elasticsearch/index/query/boosting-query.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/child-mapping.json12
-rw-r--r--src/test/java/org/elasticsearch/index/query/commonTerms-query1.json11
-rw-r--r--src/test/java/org/elasticsearch/index/query/commonTerms-query2.json11
-rw-r--r--src/test/java/org/elasticsearch/index/query/commonTerms-query3.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/constantScore-query.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/custom-boost-factor-query.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/custom_score1.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/data.json45
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean.json25
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/disMax.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/disMax2.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/field3.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/filtered-query.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/filtered-query2.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/filtered-query3.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/filtered-query4.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/fquery-filter.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/function-filter-score-query.json30
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzy.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzyLikeThis.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzyLikeThisField.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/geoShape-filter.json21
-rw-r--r--src/test/java/org/elasticsearch/index/query/geoShape-query.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json21
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance-named.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance1.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance10.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance11.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance12.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance2.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance3.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance4.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance5.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance6.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance7.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance8.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance9.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon-named.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon1.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon2.json27
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon3.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon4.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java107
-rw-r--r--src/test/java/org/elasticsearch/index/query/guice/MyJsonFilterParser.java64
-rw-r--r--src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java64
-rw-r--r--src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/has-child.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/limit-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/mapping.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/match-query-bad-type.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/matchAll.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/match_all_empty1.json3
-rw-r--r--src/test/java/org/elasticsearch/index/query/match_all_empty2.json3
-rw-r--r--src/test/java/org/elasticsearch/index/query/mlt.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/mltField.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json6
-rw-r--r--src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json6
-rw-r--r--src/test/java/org/elasticsearch/index/query/not-filter.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/not-filter2.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/not-filter3.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/numeric_range-filter.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/or-filter.json25
-rw-r--r--src/test/java/org/elasticsearch/index/query/or-filter2.json23
-rw-r--r--src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java103
-rw-r--r--src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java111
-rw-r--r--src/test/java/org/elasticsearch/index/query/plugin/PluginJsonFilterParser.java64
-rw-r--r--src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java64
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix-filter-named.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix-with-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-fields-match.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-fields1.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-fields2.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-fields3.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-filter.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/query.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query2.json6
-rw-r--r--src/test/java/org/elasticsearch/index/query/range-filter-named.json20
-rw-r--r--src/test/java/org/elasticsearch/index/query/range-filter.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/range.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/range2.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json20
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-filter-named.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/simple-query-string.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json12
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json29
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanFirst.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanNear.json24
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanNot.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanOr.json21
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanOr2.json30
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanTerm.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/starColonStar.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/term-filter-named.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/term-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/term-with-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/term.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/terms-filter-named.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/terms-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/terms-query.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/wildcard-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/wildcard.json5
-rw-r--r--src/test/java/org/elasticsearch/index/search/FieldDataTermsFilterTests.java253
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java50
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java347
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java256
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/MockScorer.java85
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java245
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java241
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/TestSearchContext.java583
-rw-r--r--src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java68
-rw-r--r--src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java89
-rw-r--r--src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java258
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java347
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java81
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java81
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java50
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java281
-rw-r--r--src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java177
-rw-r--r--src/test/java/org/elasticsearch/index/store/distributor/DistributorTests.java210
-rw-r--r--src/test/java/org/elasticsearch/index/store/memory/SimpleByteBufferStoreTests.java179
-rw-r--r--src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java267
-rw-r--r--src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java55
-rw-r--r--src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java46
-rw-r--r--src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java46
-rw-r--r--src/test/java/org/elasticsearch/indexing/IndexActionTests.java130
-rw-r--r--src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java404
-rw-r--r--src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java157
-rw-r--r--src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java919
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java47
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java55
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java37
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java41
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java36
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java43
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java30
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java33
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java37
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java207
-rw-r--r--src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java142
-rw-r--r--src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java93
-rw-r--r--src/test/java/org/elasticsearch/indices/cache/CacheTests.java177
-rw-r--r--src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java72
-rw-r--r--src/test/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerServiceTests.java157
-rw-r--r--src/test/java/org/elasticsearch/indices/fielddata/breaker/DummyCircuitBreakerService.java47
-rw-r--r--src/test/java/org/elasticsearch/indices/fielddata/breaker/RandomExceptionCircuitBreakerTests.java259
-rw-r--r--src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java132
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java92
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java45
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/SimpleDeleteMappingTests.java106
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java157
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java143
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java477
-rw-r--r--src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java254
-rw-r--r--src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java115
-rw-r--r--src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java105
-rw-r--r--src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java291
-rw-r--r--src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java161
-rw-r--r--src/test/java/org/elasticsearch/indices/stats/SimpleIndexStatsTests.java425
-rw-r--r--src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java115
-rw-r--r--src/test/java/org/elasticsearch/indices/store/SimpleDistributorTests.java152
-rw-r--r--src/test/java/org/elasticsearch/indices/store/StrictDistributor.java55
-rw-r--r--src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java89
-rw-r--r--src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java325
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template0.json7
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template1.json7
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template2.json9
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template3.json9
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template4.json9
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template5.json11
-rw-r--r--src/test/java/org/elasticsearch/indices/warmer/LocalGatewayIndicesWarmerTests.java159
-rw-r--r--src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java352
-rw-r--r--src/test/java/org/elasticsearch/mget/SimpleMgetTests.java164
-rw-r--r--src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java254
-rw-r--r--src/test/java/org/elasticsearch/nested/SimpleNestedTests.java1229
-rw-r--r--src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java55
-rw-r--r--src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java271
-rw-r--r--src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java40
-rw-r--r--src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java40
-rw-r--r--src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java179
-rw-r--r--src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java399
-rw-r--r--src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java264
-rw-r--r--src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java125
-rw-r--r--src/test/java/org/elasticsearch/percolator/PercolatorTests.java1766
-rw-r--r--src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java443
-rw-r--r--src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java214
-rw-r--r--src/test/java/org/elasticsearch/plugin/PluginManagerTests.java318
-rw-r--r--src/test/java/org/elasticsearch/plugin/ResponseHeaderPluginTests.java71
-rw-r--r--src/test/java/org/elasticsearch/plugin/SitePluginTests.java104
-rw-r--r--src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderPlugin.java40
-rw-r--r--src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderRestAction.java46
-rw-r--r--src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java124
-rw-r--r--src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java460
-rw-r--r--src/test/java/org/elasticsearch/recovery/RelocationTests.java419
-rw-r--r--src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java119
-rw-r--r--src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java34
-rw-r--r--src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java34
-rw-r--r--src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java34
-rw-r--r--src/test/java/org/elasticsearch/rest/helper/HttpClient.java120
-rw-r--r--src/test/java/org/elasticsearch/rest/helper/HttpClientResponse.java63
-rw-r--r--src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java125
-rw-r--r--src/test/java/org/elasticsearch/river/RiverTests.java158
-rw-r--r--src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java145
-rw-r--r--src/test/java/org/elasticsearch/routing/AliasRoutingTests.java434
-rw-r--r--src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java440
-rw-r--r--src/test/java/org/elasticsearch/script/IndexLookupTests.java625
-rw-r--r--src/test/java/org/elasticsearch/script/NativeScriptTests.java66
-rw-r--r--src/test/java/org/elasticsearch/script/ScriptFieldTests.java152
-rw-r--r--src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java67
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/CombiTests.java146
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java49
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/RandomTests.java276
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/BytesRefHashTests.java254
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java1077
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java1058
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java880
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java177
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java439
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java244
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java132
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java790
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java865
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/LongHashTests.java69
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java848
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java381
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java222
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java183
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java345
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java947
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java324
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java361
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java1024
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java26
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java108
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java269
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java398
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java269
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java283
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java370
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java270
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java127
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java26
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/package-info.java26
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/support/FieldDataSourceTests.java141
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java162
-rw-r--r--src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java112
-rw-r--r--src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java123
-rw-r--r--src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java309
-rw-r--r--src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java128
-rw-r--r--src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java439
-rw-r--r--src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java2246
-rw-r--r--src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java95
-rw-r--r--src/test/java/org/elasticsearch/search/customscore/CustomScoreSearchTests.java1048
-rw-r--r--src/test/java/org/elasticsearch/search/facet/ConcurrentDuel.java90
-rw-r--r--src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTests.java355
-rw-r--r--src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTestsMultiShardMultiNodeTests.java35
-rw-r--r--src/test/java/org/elasticsearch/search/facet/SimpleFacetsTests.java2378
-rw-r--r--src/test/java/org/elasticsearch/search/facet/terms/ShardSizeTermsFacetTests.java423
-rw-r--r--src/test/java/org/elasticsearch/search/facet/terms/UnmappedFieldsTermsFacetsTests.java387
-rw-r--r--src/test/java/org/elasticsearch/search/facet/termsstats/ShardSizeTermsStatsFacetTests.java548
-rw-r--r--src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java486
-rw-r--r--src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java715
-rw-r--r--src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java169
-rw-r--r--src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java154
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java280
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoDistanceFacetTests.java261
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java659
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java603
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java369
-rw-r--r--src/test/java/org/elasticsearch/search/geo/gzippedmap.jsonbin0 -> 7740 bytes
-rw-r--r--src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java53
-rw-r--r--src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java39
-rw-r--r--src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java105
-rw-r--r--src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java2703
-rw-r--r--src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java120
-rw-r--r--src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java246
-rw-r--r--src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java57
-rw-r--r--src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java108
-rw-r--r--src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java375
-rw-r--r--src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java2179
-rw-r--r--src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java464
-rw-r--r--src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java105
-rw-r--r--src/test/java/org/elasticsearch/search/scan/SearchScanTests.java88
-rw-r--r--src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java158
-rw-r--r--src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java453
-rw-r--r--src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java198
-rw-r--r--src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java1533
-rw-r--r--src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java85
-rw-r--r--src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java158
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java1111
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java191
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java84
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java42
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java85
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java977
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java337
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java329
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java403
-rw-r--r--src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java60
-rw-r--r--src/test/java/org/elasticsearch/similarity/SimilarityTests.java79
-rw-r--r--src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java120
-rw-r--r--src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java205
-rw-r--r--src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java154
-rw-r--r--src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java983
-rw-r--r--src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java56
-rw-r--r--src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java55
-rw-r--r--src/test/java/org/elasticsearch/snapshots/mockstore/ImmutableBlobContainerWrapper.java93
-rw-r--r--src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java316
-rw-r--r--src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java42
-rw-r--r--src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java87
-rw-r--r--src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java242
-rw-r--r--src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java73
-rw-r--r--src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java97
-rw-r--r--src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java106
-rw-r--r--src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java71
-rw-r--r--src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java121
-rw-r--r--src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java51
-rw-r--r--src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java35
-rw-r--r--src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java77
-rw-r--r--src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java102
-rw-r--r--src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java125
-rw-r--r--src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java97
-rw-r--r--src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java125
-rw-r--r--src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java367
-rw-r--r--src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java115
-rw-r--r--src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java237
-rw-r--r--src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java427
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java115
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java878
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchLuceneTestCase.java51
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java277
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchThreadFilter.java32
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java48
-rw-r--r--src/test/java/org/elasticsearch/test/NodeSettingsSource.java76
-rw-r--r--src/test/java/org/elasticsearch/test/TestCluster.java1300
-rw-r--r--src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java33
-rw-r--r--src/test/java/org/elasticsearch/test/client/RandomizingClient.java431
-rw-r--r--src/test/java/org/elasticsearch/test/engine/MockEngineModule.java31
-rw-r--r--src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java222
-rw-r--r--src/test/java/org/elasticsearch/test/engine/ThrowingAtomicReaderWrapper.java192
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java33
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java59
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java449
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java204
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java118
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java62
-rw-r--r--src/test/java/org/elasticsearch/test/junit/annotations/Network.java34
-rw-r--r--src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java41
-rw-r--r--src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java106
-rw-r--r--src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java135
-rw-r--r--src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTests.java37
-rw-r--r--src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java153
-rw-r--r--src/test/java/org/elasticsearch/test/rest/Stash.java117
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/RestClient.java243
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/RestException.java41
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/RestResponse.java88
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java40
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java40
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java186
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java97
-rw-r--r--src/test/java/org/elasticsearch/test/rest/json/JsonPath.java111
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/DescriptionHelper.java79
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/RestReproduceInfoPrinter.java107
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/RestTestCandidate.java83
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/RestTestSuiteRunner.java598
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/RunAfter.java56
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java89
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java39
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java34
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java34
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java48
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java39
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java36
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java33
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java33
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java66
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java165
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java115
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java57
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java66
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java83
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java71
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/Assertion.java76
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/DoSection.java136
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java34
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java54
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java61
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java55
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java64
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java54
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java76
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java78
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/SetSection.java52
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/SetupSection.java60
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/SkipSection.java83
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/TestSection.java74
-rw-r--r--src/test/java/org/elasticsearch/test/rest/spec/RestApi.java216
-rw-r--r--src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java139
-rw-r--r--src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java66
-rw-r--r--src/test/java/org/elasticsearch/test/rest/support/Features.java60
-rw-r--r--src/test/java/org/elasticsearch/test/rest/support/FileUtils.java143
-rw-r--r--src/test/java/org/elasticsearch/test/rest/support/VersionUtils.java87
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java41
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java174
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java393
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java116
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java150
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java97
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java197
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java536
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java77
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java125
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java117
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java273
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/VersionUtilsTests.java120
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java166
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java56
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java43
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java32
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockRamDirectoryService.java63
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockRamIndexStore.java61
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockRamIndexStoreModule.java32
-rw-r--r--src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java60
-rw-r--r--src/test/java/org/elasticsearch/test/transport/AssertingLocalTransportModule.java42
-rw-r--r--src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java130
-rw-r--r--src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java218
-rw-r--r--src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java85
-rw-r--r--src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java875
-rw-r--r--src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java33
-rw-r--r--src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java141
-rw-r--r--src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java53
-rw-r--r--src/test/java/org/elasticsearch/tribe/TribeTests.java185
-rw-r--r--src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java182
-rw-r--r--src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java103
-rw-r--r--src/test/java/org/elasticsearch/update/UpdateTests.java503
-rw-r--r--src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java271
-rw-r--r--src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java80
-rw-r--r--src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java259
-rw-r--r--src/test/java/org/elasticsearch/watcher/FileWatcherTest.java386
-rwxr-xr-xsrc/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff201
-rwxr-xr-xsrc/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic62120
-rwxr-xr-xsrc/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff201
-rwxr-xr-xsrc/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic62120
-rw-r--r--src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml2
-rw-r--r--src/test/resources/jmeter/index-count.jmx240
-rw-r--r--src/test/resources/jmeter/index-get.jmx211
-rw-r--r--src/test/resources/jmeter/index-search.jmx240
-rw-r--r--src/test/resources/jmeter/index.jmx210
-rw-r--r--src/test/resources/jmeter/ping-single.jmx210
-rw-r--r--src/test/resources/log4j.properties9
-rw-r--r--src/test/resources/org/apache/lucene/search/postingshighlight/CambridgeMA.utf81
-rw-r--r--src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties21
-rw-r--r--src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/index.html9
-rw-r--r--src/test/resources/org/elasticsearch/nodesinfo/node4/dummy/_site/index.html9
-rw-r--r--src/test/resources/org/elasticsearch/nodesinfo/node4/test-no-version-plugin/_site/index.html9
-rw-r--r--src/test/resources/org/elasticsearch/plugin/anotherplugin/_site/index.html9
-rw-r--r--src/test/resources/org/elasticsearch/plugin/dummy/_site/index.html9
-rw-r--r--src/test/resources/org/elasticsearch/plugin/plugin_folder_file.zipbin0 -> 503 bytes
-rw-r--r--src/test/resources/org/elasticsearch/plugin/plugin_folder_site.zipbin0 -> 373 bytes
-rw-r--r--src/test/resources/org/elasticsearch/plugin/plugin_single_folder.zipbin0 -> 405 bytes
-rw-r--r--src/test/resources/org/elasticsearch/plugin/plugin_with_classfile.zipbin0 -> 191 bytes
-rw-r--r--src/test/resources/org/elasticsearch/plugin/plugin_with_sourcefiles.zipbin0 -> 2412 bytes
-rw-r--r--src/test/resources/org/elasticsearch/plugin/plugin_without_folders.zipbin0 -> 221 bytes
-rw-r--r--src/test/resources/org/elasticsearch/plugin/subdir/_site/dir/index.html9
-rw-r--r--src/test/resources/org/elasticsearch/plugin/subdir/_site/dir_without_index/page.html9
4528 files changed, 749406 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f4ec129
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,36 @@
+.idea/
+.gradle/
+*.iml
+work/
+/data/
+logs/
+.DS_Store
+build/
+target/
+*-execution-hints.log
+docs/html/
+docs/build.log
+/tmp/
+
+## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects)
+## The only configuration files which are not ignored are certain files in
+## .settings (as listed below) since these files ensure common coding
+## style across Eclipse and IDEA.
+## Other files (.project, .classpath) should be generated through Maven which
+## will correctly set the classpath based on the declared dependencies.
+.project
+.classpath
+eclipse-build
+*/.project
+*/.classpath
+*/eclipse-build
+/.settings/
+!/.settings/org.eclipse.core.resources.prefs
+!/.settings/org.eclipse.jdt.core.prefs
+!/.settings/org.eclipse.jdt.ui.prefs
+
+## netbeans ignores
+nb-configuration.xml
+nbactions.xml
+
+/dependency-reduced-pom.xml
diff --git a/.settings/org.eclipse.core.resources.prefs b/.settings/org.eclipse.core.resources.prefs
new file mode 100644
index 0000000..99f26c0
--- /dev/null
+++ b/.settings/org.eclipse.core.resources.prefs
@@ -0,0 +1,2 @@
+eclipse.preferences.version=1
+encoding/<project>=UTF-8
diff --git a/.settings/org.eclipse.jdt.core.prefs b/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000..a35498a
--- /dev/null
+++ b/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,10 @@
+eclipse.preferences.version=1
+# We target Java 1.6
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
+org.eclipse.jdt.core.compiler.source=1.6
+# Lines should be splitted at 140 chars
+org.eclipse.jdt.core.formatter.lineSplit=140
+# Indentation is 4 spaces
+org.eclipse.jdt.core.formatter.tabulation.char=space
+org.eclipse.jdt.core.formatter.tabulation.size=4
diff --git a/.settings/org.eclipse.jdt.ui.prefs b/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000..6890ec8
--- /dev/null
+++ b/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,11 @@
+eclipse.preferences.version=1
+formatter_settings_version=12
+# Intellij IDEA import rules
+org.eclipse.jdt.ui.importorder=;java;javax;\#;
+org.eclipse.jdt.ui.ondemandthreshold=5
+org.eclipse.jdt.ui.staticondemandthreshold=3
+# Organize imports on save. This is needed because Eclipse can fail at properly adding imports to an existing list of imports since we don't use its default import rules.
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+sp_cleanup.organize_imports=true
+# License header
+org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UTF-8" standalone\="no"?><templates><template autoinsert\="false" context\="gettercomment_context" deleted\="false" description\="Comment for getter method" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.gettercomment" name\="gettercomment">/**\n *\n */</template><template autoinsert\="false" context\="settercomment_context" deleted\="false" description\="Comment for setter method" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.settercomment" name\="settercomment">/**\n *\n */</template><template autoinsert\="true" context\="constructorcomment_context" deleted\="false" description\="Comment for created constructors" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.constructorcomment" name\="constructorcomment">/**\n * ${tags}\n */</template><template autoinsert\="true" context\="filecomment_context" deleted\="false" description\="Comment for created Java files" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.filecomment" name\="filecomment">/**\n *\n */</template><template autoinsert\="false" context\="typecomment_context" deleted\="false" description\="Comment for created types" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.typecomment" name\="typecomment">/**\n *\n */</template><template autoinsert\="true" context\="fieldcomment_context" deleted\="false" description\="Comment for fields" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.fieldcomment" name\="fieldcomment">/**\n *\n */</template><template autoinsert\="true" context\="methodcomment_context" deleted\="false" description\="Comment for non-overriding methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.methodcomment" name\="methodcomment">/**\n * ${tags}\n */</template><template autoinsert\="false" context\="overridecomment_context" deleted\="false" description\="Comment for overriding methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.overridecomment" name\="overridecomment">/*\n *\n */</template><template autoinsert\="true" context\="delegatecomment_context" deleted\="false" description\="Comment for delegate methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.delegatecomment" name\="delegatecomment">/**\n * ${tags}\n * ${see_to_target}\n */</template><template autoinsert\="false" context\="newtype_context" deleted\="false" description\="Newly created files" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.newtype" name\="newtype">${filecomment}\n/*\n * Licensed to Elasticsearch under one or more contributor\n * license agreements. See the NOTICE file distributed with\n * this work for additional information regarding copyright\n * ownership. Elasticsearch licenses this file to you under\n * the Apache License, Version 2.0 (the "License"); you may\n * not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n${package_declaration}\n\n${typecomment}\n${type_declaration}</template><template autoinsert\="true" context\="classbody_context" deleted\="false" description\="Code in new class type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.classbody" name\="classbody">\n</template><template autoinsert\="true" context\="interfacebody_context" deleted\="false" description\="Code in new interface type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.interfacebody" name\="interfacebody">\n</template><template autoinsert\="true" context\="enumbody_context" deleted\="false" description\="Code in new enum type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.enumbody" name\="enumbody">\n</template><template autoinsert\="true" context\="annotationbody_context" deleted\="false" description\="Code in new annotation type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.annotationbody" name\="annotationbody">\n</template><template autoinsert\="false" context\="catchblock_context" deleted\="false" description\="Code in new catch blocks" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.catchblock" name\="catchblock">throw new RuntimeException();</template><template autoinsert\="false" context\="methodbody_context" deleted\="false" description\="Code in created method stubs" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.methodbody" name\="methodbody">${body_statement}</template><template autoinsert\="false" context\="constructorbody_context" deleted\="false" description\="Code in created constructor stubs" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.constructorbody" name\="constructorbody">${body_statement}</template><template autoinsert\="true" context\="getterbody_context" deleted\="false" description\="Code in created getters" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.getterbody" name\="getterbody">return ${field};</template><template autoinsert\="true" context\="setterbody_context" deleted\="false" description\="Code in created setters" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.setterbody" name\="setterbody">${field} \= ${param};</template></templates>
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..a8cf8dd
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,12 @@
+language: java
+jdk:
+ - openjdk6
+ - openjdk7
+ - oraclejdk7
+
+env:
+ - ES_TEST_LOCAL=true
+ - ES_TEST_LOCAL=false
+
+notifications:
+ email: false
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..5e7e770
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,107 @@
+Contributing to elasticsearch
+=============================
+
+Elasticsearch is an open source project and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into Elasticsearch itself.
+
+Bug reports
+-----------
+
+If you think you have found a bug in Elasticsearch, first make sure that you are testing against the [latest version of Elasticsearch](http://www.elasticsearch.org/download/) - your issue may already have been fixed. If not, search our [issues list](https://github.com/elasticsearch/elasticsearch/issues) on GitHub in case a similar issue has already been opened.
+
+It is very helpful if you can prepare a reproduction of the bug. In other words, provide a small test case which we can run to confirm your bug. It makes it easier to find the problem and to fix it. Test cases should be provided as `curl` commands which we can copy and paste into a terminal to run it locally, for example:
+
+```sh
+# delete the index
+curl -XDELETE localhost:9200/test
+
+# insert a document
+curl -XPUT localhost:9200/test/test/1 -d '{
+ "title": "test document"
+}'
+
+# this should return XXXX but instead returns YYY
+curl ....
+```
+
+Provide as much information as you can. You may think that the problem lies with your query, when actually it depends on how your data is indexed. The easier it is for us to recreate your problem, the faster it is likely to be fixed.
+
+Feature requests
+----------------
+
+If you find yourself wishing for a feature that doesn't exist in Elasticsearch, you are probably not alone. There are bound to be others out there with similar needs. Many of the features that Elasticsearch has today have been added because our users saw the need.
+Open an issue on our [issues list](https://github.com/elasticsearch/elasticsearch/issues) on GitHub which describes the feature you would like to see, why you need it, and how it should work.
+
+Contributing code and documentation changes
+-------------------------------------------
+
+If you have a bugfix or new feature that you would like to contribute to Elasticsearch, please find or open an issue about it first. Talk about what you would like to do. It may be that somebody is already working on it, or that there are particular issues that you should know about before implementing the change.
+
+We enjoy working with contributors to get their code accepted. There are many approaches to fixing a problem and it is important to find the best approach before writing too much code.
+
+The process for contributing to any of the [Elasticsearch repositories](https://github.com/elasticsearch/) is similar. Details for individual projects can be found below.
+
+### Fork and clone the repository
+
+You will need to fork the main Elasticsearch code or documentation repository and clone it to your local machine. See
+[github help page](https://help.github.com/articles/fork-a-repo) for help.
+
+Further instructions for specific projects are given below.
+
+### Submitting your changes
+
+Once your changes and tests are ready to submit for review:
+
+1. Test your changes
+
+ Run the test suite to make sure that nothing is broken. See the
+[TESTING](TESTING.asciidoc) file for help running tests.
+
+2. Sign the Contributor License Agreement
+
+ Please make sure you have signed our [Contributor License Agreement](http://www.elasticsearch.org/contributor-agreement/). We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. You only need to sign the CLA once.
+
+3. Rebase your changes
+
+ Update your local repository with the most recent code from the main Elasticsearch repository, and rebase your branch on top of the latest master branch. We prefer your changes to be squashed into a single commit.
+
+4. Submit a pull request
+
+ Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests). In the pull request, describe what your changes do and mention the number of the issue where discussion has taken place, eg "Closes #123".
+
+Then sit back and wait. There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into Elasticsearch.
+
+
+Contributing to the Elasticsearch codebase
+------------------------------------------
+
+**Repository:** [https://github.com/elasticsearch/elasticsearch](https://github.com/elasticsearch/elasticsearch)
+
+Make sure you have [Maven](http://maven.apache.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE by running `mvn eclipse:eclipse` and then importing the project into their workspace: `File > Import > Existing project into workspace`.
+
+Elasticsearch also works perfectly with Eclipse's [http://www.eclipse.org/m2e/](m2e). Once you've installed m2e you can import Elasticsearch as an `Existing Maven Project`.
+
+Please follow these formatting guidelines:
+
+* Java indent is 4 spaces
+* Line width is 140 characters
+* The rest is left to Java coding standards
+* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
+* Don't worry too much about imports. Try not to change the order but don't worry about fighting your IDE to stop it from switching from * imports to specific imports or from specific to * imports.
+
+To create a distribution from the source, simply run:
+
+```sh
+cd elasticsearch/
+mvn clean package -DskipTests
+```
+
+You will find the newly built packages under: `./target/releases/`.
+
+Before submitting your changes, run the test suite to make sure that nothing is broken, with:
+
+```sh
+ES_TEST_LOCAL=true
+mvn clean test
+```
+
+Source: [Contributing to elasticsearch](http://www.elasticsearch.org/contributing-to-elasticsearch/)
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/NOTICE.txt b/NOTICE.txt
new file mode 100644
index 0000000..3b7afec
--- /dev/null
+++ b/NOTICE.txt
@@ -0,0 +1,5 @@
+Elasticsearch
+Copyright 2009-2014 Elasticsearch
+
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
diff --git a/README.textile b/README.textile
new file mode 100644
index 0000000..9d8fba6
--- /dev/null
+++ b/README.textile
@@ -0,0 +1,227 @@
+h1. Elasticsearch
+
+h2. A Distributed RESTful Search Engine
+
+h3. "http://www.elasticsearch.org":http://www.elasticsearch.org
+
+Elasticsearch is a distributed RESTful search engine built for the cloud. Features include:
+
+* Distributed and Highly Available Search Engine.
+** Each index is fully sharded with a configurable number of shards.
+** Each shard can have one or more replicas.
+** Read / Search operations performed on either one of the replica shard.
+* Multi Tenant with Multi Types.
+** Support for more than one index.
+** Support for more than one type per index.
+** Index level configuration (number of shards, index storage, ...).
+* Various set of APIs
+** HTTP RESTful API
+** Native Java API.
+** All APIs perform automatic node operation rerouting.
+* Document oriented
+** No need for upfront schema definition.
+** Schema can be defined per type for customization of the indexing process.
+* Reliable, Asynchronous Write Behind for long term persistency.
+* (Near) Real Time Search.
+* Built on top of Lucene
+** Each shard is a fully functional Lucene index
+** All the power of Lucene easily exposed through simple configuration / plugins.
+* Per operation consistency
+** Single document level operations are atomic, consistent, isolated and durable.
+* Open Source under Apache 2 License.
+
+h2. Getting Started
+
+First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about.
+
+h3. Installation
+
+* "Download":http://www.elasticsearch.org/download and unzip the Elasticsearch official distribution.
+* Run @bin/elasticsearch@ on unix, or @bin/elasticsearch.bat@ on windows.
+* Run @curl -X GET http://localhost:9200/@.
+* Start more servers ...
+
+h3. Indexing
+
+Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
+
+<pre>
+curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
+
+curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
+{
+ "user": "kimchy",
+ "postDate": "2009-11-15T13:12:00",
+ "message": "Trying out Elasticsearch, so far so good?"
+}'
+
+curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
+{
+ "user": "kimchy",
+ "postDate": "2009-11-15T14:12:12",
+ "message": "Another tweet, will it be indexed?"
+}'
+</pre>
+
+Now, let's see if the information was added by GETting it:
+
+<pre>
+curl -XGET 'http://localhost:9200/twitter/user/kimchy?pretty=true'
+curl -XGET 'http://localhost:9200/twitter/tweet/1?pretty=true'
+curl -XGET 'http://localhost:9200/twitter/tweet/2?pretty=true'
+</pre>
+
+h3. Searching
+
+Mmm search..., shouldn't it be elastic?
+Let's find all the tweets that @kimchy@ posted:
+
+<pre>
+curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy&pretty=true'
+</pre>
+
+We can also use the JSON query language Elasticsearch provides instead of a query string:
+
+<pre>
+curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d '
+{
+ "query" : {
+ "text" : { "user": "kimchy" }
+ }
+}'
+</pre>
+
+Just for kicks, let's get all the documents stored (we should see the user as well):
+
+<pre>
+curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
+{
+ "query" : {
+ "matchAll" : {}
+ }
+}'
+</pre>
+
+We can also do range search (the @postDate@ was automatically identified as date)
+
+<pre>
+curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
+{
+ "query" : {
+ "range" : {
+ "postDate" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" }
+ }
+ }
+}'
+</pre>
+
+There are many more options to perform search, after all, its a search product no? All the familiar Lucene queries are available through the JSON query language, or through the query parser.
+
+h3. Multi Tenant - Indices and Types
+
+Maan, that twitter index might get big (in this case, index size == valuation). Let's see if we can structure our twitter system a bit differently in order to support such large amount of data.
+
+Elasticsearch support multiple indices, as well as multiple types per index. In the previous example we used an index called @twitter@, with two types, @user@ and @tweet@.
+
+Another way to define our simple twitter system is to have a different index per user (though note that an index has an overhead). Here is the indexing curl's in this case:
+
+<pre>
+curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
+
+curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
+{
+ "user": "kimchy",
+ "postDate": "2009-11-15T13:12:00",
+ "message": "Trying out Elasticsearch, so far so good?"
+}'
+
+curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
+{
+ "user": "kimchy",
+ "postDate": "2009-11-15T14:12:12",
+ "message": "Another tweet, will it be indexed?"
+}'
+</pre>
+
+The above index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index.
+
+Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
+
+<pre>
+curl -XPUT http://localhost:9200/another_user/ -d '
+{
+ "index" : {
+ "numberOfShards" : 1,
+ "numberOfReplicas" : 1
+ }
+}'
+</pre>
+
+Search (and similar operations) are multi index aware. This means that we can easily search on more than one
+index (twitter user), for example:
+
+<pre>
+curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
+{
+ "query" : {
+ "matchAll" : {}
+ }
+}'
+</pre>
+
+Or on all the indices:
+
+<pre>
+curl -XGET 'http://localhost:9200/_search?pretty=true' -d '
+{
+ "query" : {
+ "matchAll" : {}
+ }
+}'
+</pre>
+
+{One liner teaser}: And the cool part about that? You can easily search on multiple twitter users (indices), with different boost levels per user (index), making social search so much simpler (results from my friends rank higher than results from my friends friends).
+
+h3. Distributed, Highly Available
+
+Let's face it, things will fail....
+
+Elasticsearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more replica. By default, an index is created with 5 shards and 1 replica per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards).
+
+In order to play with Elasticsearch distributed nature, simply bring more nodes up and shut down nodes. The system will continue to serve requests (make sure you use the correct http port) with the latest data indexed.
+
+h3. Where to go from here?
+
+We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elasticsearch.org":http://www.elasticsearch.org website.
+
+h3. Building from Source
+
+Elasticsearch uses "Maven":http://maven.apache.org for its build system.
+
+In order to create a distribution, simply run the @mvn clean package
+-DskipTests@ command in the cloned directory.
+
+The distribution will be created under @target/releases@.
+
+See the "TESTING":TESTING.asciidoc file for more information about
+running the Elasticsearch test suite.
+
+h1. License
+
+<pre>
+This software is licensed under the Apache 2 license, quoted below.
+
+Copyright 2009-2014 Elasticsearch <http://www.elasticsearch.org>
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not
+use this file except in compliance with the License. You may obtain a copy of
+the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations under
+the License.
+</pre>
diff --git a/TESTING.asciidoc b/TESTING.asciidoc
new file mode 100644
index 0000000..282c7e0
--- /dev/null
+++ b/TESTING.asciidoc
@@ -0,0 +1,207 @@
+[[Testing Framework Cheatsheet]]
+= Testing
+
+[partintro]
+
+Elasticsearch uses jUnit for testing, it also uses randomness in the
+tests, that can be set using a seed, the following is a cheatsheet of
+options for running the tests for ES.
+
+== Creating packages
+
+To create a distribution without running the tests, simply run the
+following:
+
+-----------------------------
+mvn clean package -DskipTests
+-----------------------------
+
+== Other test options
+
+To disable and enable network transport, set the `Des.node.mode`.
+
+Use network transport (default):
+
+------------------------------------
+-Des.node.mode=network
+------------------------------------
+
+Use local transport:
+
+-------------------------------------
+-Des.node.mode=local
+-------------------------------------
+
+Alternatively, you can set the `ES_TEST_LOCAL` environment variable:
+
+-------------------------------------
+export ES_TEST_LOCAL=true && mvn test
+-------------------------------------
+
+=== Test case filtering.
+
+- `tests.class` is a class-filtering shell-like glob pattern,
+- `tests.method` is a method-filtering glob pattern.
+
+Run a single test case (variants)
+
+----------------------------------------------------------
+mvn test -Dtests.class=org.elasticsearch.package.ClassName
+mvn test "-Dtests.class=*.ClassName"
+----------------------------------------------------------
+
+Run all tests in a package and sub-packages
+
+----------------------------------------------------
+mvn test "-Dtests.class=org.elasticsearch.package.*"
+----------------------------------------------------
+
+Run any test methods that contain 'esi' (like: ...r*esi*ze...).
+
+-------------------------------
+mvn test "-Dtests.method=*esi*"
+-------------------------------
+
+=== Seed and repetitions.
+
+Run with a given seed (seed is a hex-encoded long).
+
+------------------------------
+mvn test -Dtests.seed=DEADBEEF
+------------------------------
+
+=== Repeats _all_ tests of ClassName N times.
+
+Every test repetition will have a different method seed
+(derived from a single random master seed).
+
+--------------------------------------------------
+mvn test -Dtests.iters=N -Dtests.class=*.ClassName
+--------------------------------------------------
+
+=== Repeats _all_ tests of ClassName N times.
+
+Every test repetition will have exactly the same master (0xdead) and
+method-level (0xbeef) seed.
+
+------------------------------------------------------------------------
+mvn test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.seed=DEAD:BEEF
+------------------------------------------------------------------------
+
+=== Repeats a given test N times
+
+(note the filters - individual test repetitions are given suffixes,
+ie: testFoo[0], testFoo[1], etc... so using testmethod or tests.method
+ending in a glob is necessary to ensure iterations are run).
+
+-------------------------------------------------------------------------
+mvn test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.method=mytest*
+-------------------------------------------------------------------------
+
+Repeats N times but skips any tests after the first failure or M initial failures.
+
+-------------------------------------------------------------
+mvn test -Dtests.iters=N -Dtests.failfast=true -Dtestcase=...
+mvn test -Dtests.iters=N -Dtests.maxfailures=M -Dtestcase=...
+-------------------------------------------------------------
+
+=== Test groups.
+
+Test groups can be enabled or disabled (true/false).
+
+Default value provided below in [brackets].
+
+------------------------------------------------------------------
+mvn test -Dtests.nightly=[false] - nightly test group (@Nightly)
+mvn test -Dtests.weekly=[false] - weekly tests (@Weekly)
+mvn test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix)
+mvn test -Dtests.slow=[true] - slow tests (@Slow)
+------------------------------------------------------------------
+
+=== Load balancing and caches.
+
+By default, the tests run sequentially on a single forked JVM.
+
+To run with more forked JVMs than the default use:
+
+----------------------------
+mvn test -Dtests.jvms=8 test
+----------------------------
+
+Don't count hypercores for CPU-intense tests and leave some slack
+for JVM-internal threads (like the garbage collector). Make sure there is
+enough RAM to handle child JVMs.
+
+
+=== Miscellaneous.
+
+Run all tests without stopping on errors (inspect log files).
+
+-----------------------------------------
+mvn test -Dtests.haltonfailure=false test
+-----------------------------------------
+
+Run more verbose output (slave JVM parameters, etc.).
+
+----------------------
+mvn test -verbose test
+----------------------
+
+Change the default suite timeout to 5 seconds for all
+tests (note the exclamation mark).
+
+---------------------------------------
+mvn test -Dtests.timeoutSuite=5000! ...
+---------------------------------------
+
+Change the logging level of ES (not mvn)
+
+--------------------------------
+mvn test -Des.logger.level=DEBUG
+--------------------------------
+
+Print all the logging output from the test runs to the commandline
+even if tests are passing.
+
+------------------------------
+mvn test -Dtests.output=always
+------------------------------
+
+== Testing the REST layer
+
+The available integration tests make use of the java API to communicate with
+the elasticsearch nodes, using the internal binary transport (port 9300 by
+default).
+The REST layer is tested through specific tests that are shared between all
+the elasticsearch official clients and consist of YAML files that describe the
+operations to be executed and the obtained results that need to be tested.
+
+The REST tests are run automatically when executing the maven test command. To run only the
+REST tests use the following command:
+
+---------------------------------------------------------------------------
+mvn test -Dtests.class=org.elasticsearch.test.rest.ElasticsearchRestTests
+---------------------------------------------------------------------------
+
+`ElasticsearchRestTests` is the executable test class that runs all the
+yaml suites available within the `rest-api-spec` folder.
+
+The following are the options supported by the REST tests runner:
+
+* `tests.rest[true|false|host:port]`: determines whether the REST tests need
+to be run and if so whether to rely on an external cluster (providing host
+and port) or fire a test cluster (default). It's possible to provide a
+comma separated list of addresses to send requests in a round-robin fashion.
+* `tests.rest.suite`: comma separated paths of the test suites to be run
+(by default loaded from /rest-api-spec/test). It is possible to run only a subset
+of the tests providing a sub-folder or even a single yaml file (the default
+/rest-api-spec/test prefix is optional when files are loaded from classpath)
+e.g. -Dtests.rest.suite=index,get,create/10_with_id
+* `tests.rest.section`: regex that allows to filter the test sections that
+are going to be run. If provided, only the section names that match (case
+insensitive) against it will be executed
+* `tests.rest.spec`: REST spec path (default /rest-api-spec/api)
+* `tests.iters`: runs multiple iterations
+* `tests.seed`: seed to base the random behaviours on
+* `tests.appendseed[true|false]`: enables adding the seed to each test
+section's description (default false)
diff --git a/bin/elasticsearch b/bin/elasticsearch
new file mode 100755
index 0000000..e3eb514
--- /dev/null
+++ b/bin/elasticsearch
@@ -0,0 +1,214 @@
+#!/bin/sh
+
+# OPTIONS:
+# -d: daemonize, start in the background
+# -p <filename>: log the pid to a file (useful to kill it later)
+
+# CONTROLLING STARTUP:
+#
+# This script relies on few environment variables to determine startup
+# behavior, those variables are:
+#
+# ES_CLASSPATH -- A Java classpath containing everything necessary to run.
+# JAVA_OPTS -- Additional arguments to the JVM for heap size, etc
+# ES_JAVA_OPTS -- External Java Opts on top of the defaults set
+#
+#
+# Optionally, exact memory values can be set using the following values, note,
+# they can still be set using the `ES_JAVA_OPTS`. Sample format include "512m", and "10g".
+#
+# ES_HEAP_SIZE -- Sets both the minimum and maximum memory to allocate (recommended)
+#
+# As a convenience, a fragment of shell is sourced in order to set one or
+# more of these variables. This so-called `include' can be placed in a
+# number of locations and will be searched for in order. The lowest
+# priority search path is the same directory as the startup script, and
+# since this is the location of the sample in the project tree, it should
+# almost work Out Of The Box.
+#
+# Any serious use-case though will likely require customization of the
+# include. For production installations, it is recommended that you copy
+# the sample to one of /usr/share/elasticsearch/elasticsearch.in.sh,
+# /usr/local/share/elasticsearch/elasticsearch.in.sh, or
+# /opt/elasticsearch/elasticsearch.in.sh and make your modifications there.
+#
+# Another option is to specify the full path to the include file in the
+# environment. For example:
+#
+# $ ES_INCLUDE=/path/to/in.sh elasticsearch -p /var/run/es.pid
+#
+# Note: This is particularly handy for running multiple instances on a
+# single installation, or for quick tests.
+#
+# If you would rather configure startup entirely from the environment, you
+# can disable the include by exporting an empty ES_INCLUDE, or by
+# ensuring that no include files exist in the aforementioned search list.
+# Be aware that you will be entirely responsible for populating the needed
+# environment variables.
+
+
+# Maven will replace the project.name with elasticsearch below. If that
+# hasn't been done, we assume that this is not a packaged version and the
+# user has forgotten to run Maven to create a package.
+IS_PACKAGED_VERSION='${project.name}'
+if [ "$IS_PACKAGED_VERSION" != "elasticsearch" ]; then
+ cat >&2 << EOF
+Error: You must build the project with Maven or download a pre-built package
+before you can run Elasticsearch. See 'Building from Source' in README.textile
+or visit http://www.elasticsearch.org/download to get a pre-built package.
+EOF
+ exit 1
+fi
+
+CDPATH=""
+SCRIPT="$0"
+
+# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
+while [ -h "$SCRIPT" ] ; do
+ ls=`ls -ld "$SCRIPT"`
+ # Drop everything prior to ->
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ SCRIPT="$link"
+ else
+ SCRIPT=`dirname "$SCRIPT"`/"$link"
+ fi
+done
+
+# determine elasticsearch home
+ES_HOME=`dirname "$SCRIPT"`/..
+
+# make ELASTICSEARCH_HOME absolute
+ES_HOME=`cd "$ES_HOME"; pwd`
+
+
+# If an include wasn't specified in the environment, then search for one...
+if [ "x$ES_INCLUDE" = "x" ]; then
+ # Locations (in order) to use when searching for an include file.
+ for include in /usr/share/elasticsearch/elasticsearch.in.sh \
+ /usr/local/share/elasticsearch/elasticsearch.in.sh \
+ /opt/elasticsearch/elasticsearch.in.sh \
+ ~/.elasticsearch.in.sh \
+ "`dirname "$0"`"/elasticsearch.in.sh; do
+ if [ -r "$include" ]; then
+ . "$include"
+ break
+ fi
+ done
+# ...otherwise, source the specified include.
+elif [ -r "$ES_INCLUDE" ]; then
+ . "$ES_INCLUDE"
+fi
+
+if [ -x "$JAVA_HOME/bin/java" ]; then
+ JAVA="$JAVA_HOME/bin/java"
+else
+ JAVA=`which java`
+fi
+
+if [ ! -x "$JAVA" ]; then
+ echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
+ exit 1
+fi
+
+if [ -z "$ES_CLASSPATH" ]; then
+ echo "You must set the ES_CLASSPATH var" >&2
+ exit 1
+fi
+
+# Special-case path variables.
+case `uname` in
+ CYGWIN*)
+ ES_CLASSPATH=`cygpath -p -w "$ES_CLASSPATH"`
+ ES_HOME=`cygpath -p -w "$ES_HOME"`
+ ;;
+esac
+
+launch_service()
+{
+ pidpath=$1
+ daemonized=$2
+ props=$3
+ es_parms="-Delasticsearch"
+
+ if [ "x$pidpath" != "x" ]; then
+ es_parms="$es_parms -Des.pidfile=$pidpath"
+ fi
+
+ # The es-foreground option will tell Elasticsearch not to close stdout/stderr, but it's up to us not to daemonize.
+ if [ "x$daemonized" = "x" ]; then
+ es_parms="$es_parms -Des.foreground=yes"
+ exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \
+ org.elasticsearch.bootstrap.Elasticsearch
+ # exec without running it in the background, makes it replace this shell, we'll never get here...
+ # no need to return something
+ else
+ # Startup Elasticsearch, background it, and write the pid.
+ exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \
+ org.elasticsearch.bootstrap.Elasticsearch <&- &
+ return $?
+ fi
+}
+
+# Parse any long getopt options and put them into properties before calling getopt below
+# Be dash compatible to make sure running under ubuntu works
+ARGV=""
+while [ $# -gt 0 ]
+do
+ case $1 in
+ --*=*) properties="$properties -Des.${1#--}"
+ shift 1
+ ;;
+ --*) properties="$properties -Des.${1#--}=$2"
+ shift 2
+ ;;
+ *) ARGV="$ARGV $1" ; shift
+ esac
+done
+
+# Parse any command line options.
+args=`getopt vdhp:D:X: $ARGV`
+eval set -- "$args"
+
+while true; do
+ case $1 in
+ -v)
+ "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \
+ org.elasticsearch.Version
+ exit 0
+ ;;
+ -p)
+ pidfile="$2"
+ shift 2
+ ;;
+ -d)
+ daemonized="yes"
+ shift
+ ;;
+ -h)
+ echo "Usage: $0 [-d] [-h] [-p pidfile]"
+ exit 0
+ ;;
+ -D)
+ properties="$properties -D$2"
+ shift 2
+ ;;
+ -X)
+ properties="$properties -X$2"
+ shift 2
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ echo "Error parsing argument $1!" >&2
+ exit 1
+ ;;
+ esac
+done
+
+# Start up the service
+launch_service "$pidfile" "$daemonized" "$properties"
+
+exit $?
diff --git a/bin/elasticsearch-service-mgr.exe b/bin/elasticsearch-service-mgr.exe
new file mode 100644
index 0000000..7302404
--- /dev/null
+++ b/bin/elasticsearch-service-mgr.exe
Binary files differ
diff --git a/bin/elasticsearch-service-x64.exe b/bin/elasticsearch-service-x64.exe
new file mode 100644
index 0000000..dab7def
--- /dev/null
+++ b/bin/elasticsearch-service-x64.exe
Binary files differ
diff --git a/bin/elasticsearch-service-x86.exe b/bin/elasticsearch-service-x86.exe
new file mode 100644
index 0000000..4240720
--- /dev/null
+++ b/bin/elasticsearch-service-x86.exe
Binary files differ
diff --git a/bin/elasticsearch.bat b/bin/elasticsearch.bat
new file mode 100644
index 0000000..b9c1bbb
--- /dev/null
+++ b/bin/elasticsearch.bat
@@ -0,0 +1,79 @@
+@echo off
+
+SETLOCAL
+
+if NOT DEFINED JAVA_HOME goto err
+
+set SCRIPT_DIR=%~dp0
+for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI
+
+
+REM ***** JAVA options *****
+
+if "%ES_MIN_MEM%" == "" (
+set ES_MIN_MEM=256m
+)
+
+if "%ES_MAX_MEM%" == "" (
+set ES_MAX_MEM=1g
+)
+
+if NOT "%ES_HEAP_SIZE%" == "" (
+set ES_MIN_MEM=%ES_HEAP_SIZE%
+set ES_MAX_MEM=%ES_HEAP_SIZE%
+)
+
+set JAVA_OPTS=%JAVA_OPTS% -Xms%ES_MIN_MEM% -Xmx%ES_MAX_MEM%
+
+if NOT "%ES_HEAP_NEWSIZE%" == "" (
+set JAVA_OPTS=%JAVA_OPTS% -Xmn%ES_HEAP_NEWSIZE%
+)
+
+if NOT "%ES_DIRECT_SIZE%" == "" (
+set JAVA_OPTS=%JAVA_OPTS% -XX:MaxDirectMemorySize=%ES_DIRECT_SIZE%
+)
+
+set JAVA_OPTS=%JAVA_OPTS% -Xss256k
+
+REM Enable aggressive optimizations in the JVM
+REM - Disabled by default as it might cause the JVM to crash
+REM set JAVA_OPTS=%JAVA_OPTS% -XX:+AggressiveOpts
+
+set JAVA_OPTS=%JAVA_OPTS% -XX:+UseParNewGC
+set JAVA_OPTS=%JAVA_OPTS% -XX:+UseConcMarkSweepGC
+
+set JAVA_OPTS=%JAVA_OPTS% -XX:CMSInitiatingOccupancyFraction=75
+set JAVA_OPTS=%JAVA_OPTS% -XX:+UseCMSInitiatingOccupancyOnly
+
+REM When running under Java 7
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+UseCondCardMark
+
+REM GC logging options -- uncomment to enable
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCDetails
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCTimeStamps
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintClassHistogram
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintTenuringDistribution
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCApplicationStoppedTime
+REM JAVA_OPTS=%JAVA_OPTS% -Xloggc:/var/log/elasticsearch/gc.log
+
+REM Causes the JVM to dump its heap on OutOfMemory.
+set JAVA_OPTS=%JAVA_OPTS% -XX:+HeapDumpOnOutOfMemoryError
+REM The path to the heap dump location, note directory must exists and have enough
+REM space for a full heap dump.
+REM JAVA_OPTS=%JAVA_OPTS% -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof
+
+set ES_CLASSPATH=%ES_CLASSPATH%;%ES_HOME%/lib/${project.build.finalName}.jar;%ES_HOME%/lib/*;%ES_HOME%/lib/sigar/*
+set ES_PARAMS=-Delasticsearch -Des-foreground=yes -Des.path.home="%ES_HOME%"
+
+"%JAVA_HOME%\bin\java" %JAVA_OPTS% %ES_JAVA_OPTS% %ES_PARAMS% %* -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch"
+goto finally
+
+
+:err
+echo JAVA_HOME environment variable must be set!
+pause
+
+
+:finally
+
+ENDLOCAL
diff --git a/bin/elasticsearch.in.sh b/bin/elasticsearch.in.sh
new file mode 100644
index 0000000..8713205
--- /dev/null
+++ b/bin/elasticsearch.in.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+ES_CLASSPATH=$ES_CLASSPATH:$ES_HOME/lib/${project.build.finalName}.jar:$ES_HOME/lib/*:$ES_HOME/lib/sigar/*
+
+if [ "x$ES_MIN_MEM" = "x" ]; then
+ ES_MIN_MEM=256m
+fi
+if [ "x$ES_MAX_MEM" = "x" ]; then
+ ES_MAX_MEM=1g
+fi
+if [ "x$ES_HEAP_SIZE" != "x" ]; then
+ ES_MIN_MEM=$ES_HEAP_SIZE
+ ES_MAX_MEM=$ES_HEAP_SIZE
+fi
+
+# min and max heap sizes should be set to the same value to avoid
+# stop-the-world GC pauses during resize, and so that we can lock the
+# heap in memory on startup to prevent any of it from being swapped
+# out.
+JAVA_OPTS="$JAVA_OPTS -Xms${ES_MIN_MEM}"
+JAVA_OPTS="$JAVA_OPTS -Xmx${ES_MAX_MEM}"
+
+# new generation
+if [ "x$ES_HEAP_NEWSIZE" != "x" ]; then
+ JAVA_OPTS="$JAVA_OPTS -Xmn${ES_HEAP_NEWSIZE}"
+fi
+
+# max direct memory
+if [ "x$ES_DIRECT_SIZE" != "x" ]; then
+ JAVA_OPTS="$JAVA_OPTS -XX:MaxDirectMemorySize=${ES_DIRECT_SIZE}"
+fi
+
+# reduce the per-thread stack size
+JAVA_OPTS="$JAVA_OPTS -Xss256k"
+
+# set to headless, just in case
+JAVA_OPTS="$JAVA_OPTS -Djava.awt.headless=true"
+
+# Force the JVM to use IPv4 stack
+if [ "x$ES_USE_IPV4" != "x" ]; then
+ JAVA_OPTS="$JAVA_OPTS -Djava.net.preferIPv4Stack=true"
+fi
+
+JAVA_OPTS="$JAVA_OPTS -XX:+UseParNewGC"
+JAVA_OPTS="$JAVA_OPTS -XX:+UseConcMarkSweepGC"
+
+JAVA_OPTS="$JAVA_OPTS -XX:CMSInitiatingOccupancyFraction=75"
+JAVA_OPTS="$JAVA_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
+
+# GC logging options
+if [ "x$ES_USE_GC_LOGGING" != "x" ]; then
+ JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails"
+ JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCTimeStamps"
+ JAVA_OPTS="$JAVA_OPTS -XX:+PrintClassHistogram"
+ JAVA_OPTS="$JAVA_OPTS -XX:+PrintTenuringDistribution"
+ JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCApplicationStoppedTime"
+ JAVA_OPTS="$JAVA_OPTS -Xloggc:/var/log/elasticsearch/gc.log"
+fi
+
+# Causes the JVM to dump its heap on OutOfMemory.
+JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError"
+# The path to the heap dump location, note directory must exists and have enough
+# space for a full heap dump.
+#JAVA_OPTS="$JAVA_OPTS -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof"
diff --git a/bin/plugin b/bin/plugin
new file mode 100755
index 0000000..1cabad2
--- /dev/null
+++ b/bin/plugin
@@ -0,0 +1,49 @@
+#!/bin/sh
+
+CDPATH=""
+SCRIPT="$0"
+
+# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
+while [ -h "$SCRIPT" ] ; do
+ ls=`ls -ld "$SCRIPT"`
+ # Drop everything prior to ->
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ SCRIPT="$link"
+ else
+ SCRIPT=`dirname "$SCRIPT"`/"$link"
+ fi
+done
+
+# determine elasticsearch home
+ES_HOME=`dirname "$SCRIPT"`/..
+
+# make ELASTICSEARCH_HOME absolute
+ES_HOME=`cd "$ES_HOME"; pwd`
+
+
+if [ -x "$JAVA_HOME/bin/java" ]; then
+ JAVA=$JAVA_HOME/bin/java
+else
+ JAVA=`which java`
+fi
+
+# real getopt cannot be used because we need to hand options over to the PluginManager
+while [ $# -gt 0 ]; do
+ case $1 in
+ -D*=*)
+ properties="$properties $1"
+ ;;
+ -D*)
+ var=$1
+ shift
+ properties="$properties $var=$1"
+ ;;
+ *)
+ args="$args $1"
+ esac
+ shift
+done
+
+exec $JAVA $JAVA_OPTS -Xmx64m -Xms16m -Delasticsearch -Des.path.home="$ES_HOME" $properties -cp "$ES_HOME/lib/*" org.elasticsearch.plugins.PluginManager $args
+
diff --git a/bin/plugin.bat b/bin/plugin.bat
new file mode 100644
index 0000000..9946fa3
--- /dev/null
+++ b/bin/plugin.bat
@@ -0,0 +1,22 @@
+@echo off
+
+SETLOCAL
+
+if NOT DEFINED JAVA_HOME goto err
+
+set SCRIPT_DIR=%~dp0
+for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI
+
+
+"%JAVA_HOME%\bin\java" %JAVA_OPTS% -Xmx64m -Xms16m -Des.path.home="%ES_HOME%" -cp "%ES_HOME%/lib/*;" "org.elasticsearch.plugins.PluginManager" %*
+goto finally
+
+
+:err
+echo JAVA_HOME environment variable must be set!
+pause
+
+
+:finally
+
+ENDLOCAL
diff --git a/bin/service.bat b/bin/service.bat
new file mode 100644
index 0000000..166f40a
--- /dev/null
+++ b/bin/service.bat
@@ -0,0 +1,231 @@
+@echo off
+SETLOCAL
+
+if NOT DEFINED JAVA_HOME goto err
+
+set SCRIPT_DIR=%~dp0
+for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI
+
+rem Detect JVM version to figure out appropriate executable to use
+if not exist "%JAVA_HOME%\bin\java.exe" (
+echo JAVA_HOME points to an invalid Java installation (no java.exe found in "%JAVA_HOME%"^). Existing...
+goto:eof
+)
+"%JAVA_HOME%\bin\java" -version 2>&1 | find "64-Bit" >nul:
+
+if errorlevel 1 goto x86
+set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x64.exe
+set SERVICE_ID=elasticsearch-service-x64
+set ARCH=64-bit
+goto checkExe
+
+:x86
+set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x86.exe
+set SERVICE_ID=elasticsearch-service-x86
+set ARCH=32-bit
+
+:checkExe
+if EXIST "%EXECUTABLE%" goto okExe
+echo elasticsearch-service-(x86|x64).exe was not found...
+
+:okExe
+set ES_VERSION=${project.version}
+
+if "%LOG_DIR%" == "" set LOG_DIR=%ES_HOME%\logs
+
+if "x%1x" == "xx" goto displayUsage
+set SERVICE_CMD=%1
+shift
+if "x%1x" == "xx" goto checkServiceCmd
+set SERVICE_ID=%1
+
+:checkServiceCmd
+
+if "%LOG_OPTS%" == "" set LOG_OPTS=--LogPath "%LOG_DIR%" --LogPrefix "%SERVICE_ID%" --StdError auto --StdOutput auto
+
+if /i %SERVICE_CMD% == install goto doInstall
+if /i %SERVICE_CMD% == remove goto doRemove
+if /i %SERVICE_CMD% == start goto doStart
+if /i %SERVICE_CMD% == stop goto doStop
+if /i %SERVICE_CMD% == manager goto doManagment
+echo Unknown option "%SERVICE_CMD%"
+
+:displayUsage
+echo.
+echo Usage: service.bat install^|remove^|start^|stop^|manager [SERVICE_ID]
+goto:eof
+
+:doStart
+"%EXECUTABLE%" //ES//%SERVICE_ID% %LOG_OPTS%
+if not errorlevel 1 goto started
+echo Failed starting '%SERVICE_ID%' service
+goto:eof
+:started
+echo The service '%SERVICE_ID%' has been started
+goto:eof
+
+:doStop
+"%EXECUTABLE%" //SS//%SERVICE_ID% %LOG_OPTS%
+if not errorlevel 1 goto stopped
+echo Failed stopping '%SERVICE_ID%' service
+goto:eof
+:stopped
+echo The service '%SERVICE_ID%' has been stopped
+goto:eof
+
+:doManagment
+set EXECUTABLE_MGR=%ES_HOME%\bin\elasticsearch-service-mgr.exe
+"%EXECUTABLE_MGR%" //ES//%SERVICE_ID%
+if not errorlevel 1 goto managed
+echo Failed starting service manager for '%SERVICE_ID%'
+goto:eof
+:managed
+echo Succesfully started service manager for '%SERVICE_ID%'.
+goto:eof
+
+:doRemove
+rem Remove the service
+"%EXECUTABLE%" //DS//%SERVICE_ID% %LOG_OPTS%
+if not errorlevel 1 goto removed
+echo Failed removing '%SERVICE_ID%' service
+goto:eof
+:removed
+echo The service '%SERVICE_ID%' has been removed
+goto:eof
+
+:doInstall
+echo Installing service : "%SERVICE_ID%"
+echo Using JAVA_HOME (%ARCH%): "%JAVA_HOME%"
+
+rem Check JVM server dll first
+set JVM_DLL=%JAVA_HOME%\jre\bin\server\jvm.dll
+if exist "%JVM_DLL%" goto foundJVM
+
+rem Check 'server' JRE (JRE installed on Windows Server)
+set JVM_DLL=%JAVA_HOME%\bin\server\jvm.dll
+if exist "%JVM_DLL%" goto foundJVM
+
+rem Fallback to 'client' JRE
+set JVM_DLL=%JAVA_HOME%\bin\client\jvm.dll
+
+if exist "%JVM_DLL%" (
+echo Warning: JAVA_HOME points to a JRE and not JDK installation; a client (not a server^) JVM will be used...
+) else (
+echo JAVA_HOME points to an invalid Java installation (no jvm.dll found in "%JAVA_HOME%"^). Existing...
+goto:eof
+)
+
+:foundJVM
+if "%ES_MIN_MEM%" == "" set ES_MIN_MEM=256m
+if "%ES_MAX_MEM%" == "" set ES_MAX_MEM=1g
+
+if NOT "%ES_HEAP_SIZE%" == "" set ES_MIN_MEM=%ES_HEAP_SIZE%
+if NOT "%ES_HEAP_SIZE%" == "" set ES_MAX_MEM=%ES_HEAP_SIZE%
+
+call:convertxm %ES_MIN_MEM% JVM_XMS
+call:convertxm %ES_MAX_MEM% JVM_XMX
+
+rem java_opts might be empty - init to avoid tripping commons daemon (if the command starts with ;)
+if not "%JAVA_OPTS%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+UseParNewGC
+if "%JAVA_OPTS%" == "" set JAVA_OPTS=-XX:+UseParNewGC
+
+if NOT "%ES_HEAP_NEWSIZE%" == "" set JAVA_OPTS=%JAVA_OPTS% -Xmn%ES_HEAP_NEWSIZE%
+
+if NOT "%ES_DIRECT_SIZE%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:MaxDirectMemorySize=%ES_DIRECT_SIZE%
+
+rem thread stack size
+set JVM_SS=256
+
+REM Enable aggressive optimizations in the JVM
+REM - Disabled by default as it might cause the JVM to crash
+REM set JAVA_OPTS=%JAVA_OPTS% -XX:+AggressiveOpts
+
+
+set JAVA_OPTS=%JAVA_OPTS% -XX:+UseConcMarkSweepGC
+
+set JAVA_OPTS=%JAVA_OPTS% -XX:CMSInitiatingOccupancyFraction=75
+set JAVA_OPTS=%JAVA_OPTS% -XX:+UseCMSInitiatingOccupancyOnly
+
+REM GC logging options -- uncomment to enable
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCDetails
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCTimeStamps
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintClassHistogram
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintTenuringDistribution
+REM JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCApplicationStoppedTime
+REM JAVA_OPTS=%JAVA_OPTS% -Xloggc:/var/log/elasticsearch/gc.log
+
+REM Causes the JVM to dump its heap on OutOfMemory.
+set JAVA_OPTS=%JAVA_OPTS% -XX:+HeapDumpOnOutOfMemoryError
+REM The path to the heap dump location, note directory must exists and have enough
+REM space for a full heap dump.
+REM JAVA_OPTS=%JAVA_OPTS% -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof
+
+if "%DATA_DIR%" == "" set DATA_DIR=%ES_HOME%\data
+
+if "%WORK_DIR%" == "" set WORK_DIR=%ES_HOME%
+
+if "%CONF_DIR%" == "" set CONF_DIR=%ES_HOME%\config
+
+if "%CONF_FILE%" == "" set CONF_FILE=%ES_HOME%\config\elasticsearch.yml
+
+set ES_CLASSPATH=%ES_CLASSPATH%;%ES_HOME%/lib/elasticsearch-%ES_VERSION%.jar;%ES_HOME%/lib/*;%ES_HOME%/lib/sigar/*
+set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.config="%CONF_FILE%";-Des.default.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.work="%WORK_DIR%";-Des.default.path.conf="%CONF_DIR%"
+
+set JVM_OPTS=%JAVA_OPTS: =;%
+
+if not "%ES_JAVA_OPTS%" == "" set JVM_ES_JAVA_OPTS=%ES_JAVA_OPTS: =#%
+if not "%ES_JAVA_OPTS%" == "" set JVM_OPTS=%JVM_OPTS%;%JVM_ES_JAVA_OPTS%
+
+if "%ES_START_TYPE%" == "" set ES_START_TYPE=manual
+if "%ES_STOP_TIMEOUT%" == "" set ES_STOP_TIMEOUT=0
+
+"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmSs %JVM_SS% --JvmMs %JVM_XMS% --JvmMx %JVM_XMX% --JvmOptions %JVM_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "Elasticsearch %ES_VERSION% (%SERVICE_ID%)" --Description "Elasticsearch %ES_VERSION% Windows Service - http://elasticsearch.org" --Jvm "%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%"
+
+
+if not errorlevel 1 goto installed
+echo Failed installing '%SERVICE_ID%' service
+goto:eof
+
+:installed
+echo The service '%SERVICE_ID%' has been installed.
+goto:eof
+
+:err
+echo JAVA_HOME environment variable must be set!
+pause
+
+goto:eof
+
+rem ---
+rem Function for converting Xm[s|x] values into MB which Commons Daemon accepts
+rem ---
+:convertxm
+set value=%~1
+rem extract last char (unit)
+set unit=%value:~-1%
+rem assume the unit is specified
+set conv=%value:~0,-1%
+
+if "%unit%" == "k" goto kilo
+if "%unit%" == "K" goto kilo
+if "%unit%" == "m" goto mega
+if "%unit%" == "M" goto mega
+if "%unit%" == "g" goto giga
+if "%unit%" == "G" goto giga
+
+rem no unit found, must be bytes; consider the whole value
+set conv=%value%
+rem convert to KB
+set /a conv=%conv% / 1024
+:kilo
+rem convert to MB
+set /a conv=%conv% / 1024
+goto mega
+:giga
+rem convert to MB
+set /a conv=%conv% * 1024
+:mega
+set "%~2=%conv%"
+goto:eof
+
+ENDLOCAL
diff --git a/config/elasticsearch.yml b/config/elasticsearch.yml
new file mode 100644
index 0000000..51fcea4
--- /dev/null
+++ b/config/elasticsearch.yml
@@ -0,0 +1,377 @@
+##################### Elasticsearch Configuration Example #####################
+
+# This file contains an overview of various configuration settings,
+# targeted at operations staff. Application developers should
+# consult the guide at <http://elasticsearch.org/guide>.
+#
+# The installation procedure is covered at
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
+#
+# Elasticsearch comes with reasonable defaults for most settings,
+# so you can try it out without bothering with configuration.
+#
+# Most of the time, these defaults are just fine for running a production
+# cluster. If you're fine-tuning your cluster, or wondering about the
+# effect of certain configuration option, please _do ask_ on the
+# mailing list or IRC channel [http://elasticsearch.org/community].
+
+# Any element in the configuration can be replaced with environment variables
+# by placing them in ${...} notation. For example:
+#
+# node.rack: ${RACK_ENV_VAR}
+
+# For information on supported formats and syntax for the config file, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
+
+
+################################### Cluster ###################################
+
+# Cluster name identifies your cluster for auto-discovery. If you're running
+# multiple clusters on the same network, make sure you're using unique names.
+#
+# cluster.name: elasticsearch
+
+
+#################################### Node #####################################
+
+# Node names are generated dynamically on startup, so you're relieved
+# from configuring them manually. You can tie this node to a specific name:
+#
+# node.name: "Franz Kafka"
+
+# Every node can be configured to allow or deny being eligible as the master,
+# and to allow or deny to store the data.
+#
+# Allow this node to be eligible as a master node (enabled by default):
+#
+# node.master: true
+#
+# Allow this node to store data (enabled by default):
+#
+# node.data: true
+
+# You can exploit these settings to design advanced cluster topologies.
+#
+# 1. You want this node to never become a master node, only to hold data.
+# This will be the "workhorse" of your cluster.
+#
+# node.master: false
+# node.data: true
+#
+# 2. You want this node to only serve as a master: to not store any data and
+# to have free resources. This will be the "coordinator" of your cluster.
+#
+# node.master: true
+# node.data: false
+#
+# 3. You want this node to be neither master nor data node, but
+# to act as a "search load balancer" (fetching data from nodes,
+# aggregating results, etc.)
+#
+# node.master: false
+# node.data: false
+
+# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
+# Node Info API [http://localhost:9200/_nodes] or GUI tools
+# such as <http://www.elasticsearch.org/overview/marvel/>,
+# <http://github.com/karmi/elasticsearch-paramedic>,
+# <http://github.com/lukas-vlcek/bigdesk> and
+# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
+
+# A node can have generic attributes associated with it, which can later be used
+# for customized shard allocation filtering, or allocation awareness. An attribute
+# is a simple key value pair, similar to node.key: value, here is an example:
+#
+# node.rack: rack314
+
+# By default, multiple nodes are allowed to start from the same installation location
+# to disable it, set the following:
+# node.max_local_storage_nodes: 1
+
+
+#################################### Index ####################################
+
+# You can set a number of options (such as shard/replica options, mapping
+# or analyzer definitions, translog settings, ...) for indices globally,
+# in this file.
+#
+# Note, that it makes more sense to configure index settings specifically for
+# a certain index, either when creating it or by using the index templates API.
+#
+# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
+# for more information.
+
+# Set the number of shards (splits) of an index (5 by default):
+#
+# index.number_of_shards: 5
+
+# Set the number of replicas (additional copies) of an index (1 by default):
+#
+# index.number_of_replicas: 1
+
+# Note, that for development on a local machine, with small indices, it usually
+# makes sense to "disable" the distributed features:
+#
+# index.number_of_shards: 1
+# index.number_of_replicas: 0
+
+# These settings directly affect the performance of index and search operations
+# in your cluster. Assuming you have enough machines to hold shards and
+# replicas, the rule of thumb is:
+#
+# 1. Having more *shards* enhances the _indexing_ performance and allows to
+# _distribute_ a big index across machines.
+# 2. Having more *replicas* enhances the _search_ performance and improves the
+# cluster _availability_.
+#
+# The "number_of_shards" is a one-time setting for an index.
+#
+# The "number_of_replicas" can be increased or decreased anytime,
+# by using the Index Update Settings API.
+#
+# Elasticsearch takes care about load balancing, relocating, gathering the
+# results from nodes, etc. Experiment with different settings to fine-tune
+# your setup.
+
+# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
+# the index status.
+
+
+#################################### Paths ####################################
+
+# Path to directory containing configuration (this file and logging.yml):
+#
+# path.conf: /path/to/conf
+
+# Path to directory where to store index data allocated for this node.
+#
+# path.data: /path/to/data
+#
+# Can optionally include more than one location, causing data to be striped across
+# the locations (a la RAID 0) on a file level, favouring locations with most free
+# space on creation. For example:
+#
+# path.data: /path/to/data1,/path/to/data2
+
+# Path to temporary files:
+#
+# path.work: /path/to/work
+
+# Path to log files:
+#
+# path.logs: /path/to/logs
+
+# Path to where plugins are installed:
+#
+# path.plugins: /path/to/plugins
+
+
+#################################### Plugin ###################################
+
+# If a plugin listed here is not installed for current node, the node will not start.
+#
+# plugin.mandatory: mapper-attachments,lang-groovy
+
+
+################################### Memory ####################################
+
+# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
+# it _never_ swaps.
+#
+# Set this property to true to lock the memory:
+#
+# bootstrap.mlockall: true
+
+# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
+# to the same value, and that the machine has enough memory to allocate
+# for Elasticsearch, leaving enough memory for the operating system itself.
+#
+# You should also make sure that the Elasticsearch process is allowed to lock
+# the memory, eg. by using `ulimit -l unlimited`.
+
+
+############################## Network And HTTP ###############################
+
+# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
+# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
+# communication. (the range means that if the port is busy, it will automatically
+# try the next port).
+
+# Set the bind address specifically (IPv4 or IPv6):
+#
+# network.bind_host: 192.168.0.1
+
+# Set the address other nodes will use to communicate with this node. If not
+# set, it is automatically derived. It must point to an actual IP address.
+#
+# network.publish_host: 192.168.0.1
+
+# Set both 'bind_host' and 'publish_host':
+#
+# network.host: 192.168.0.1
+
+# Set a custom port for the node to node communication (9300 by default):
+#
+# transport.tcp.port: 9300
+
+# Enable compression for all communication between nodes (disabled by default):
+#
+# transport.tcp.compress: true
+
+# Set a custom port to listen for HTTP traffic:
+#
+# http.port: 9200
+
+# Set a custom allowed content length:
+#
+# http.max_content_length: 100mb
+
+# Disable HTTP completely:
+#
+# http.enabled: false
+
+
+################################### Gateway ###################################
+
+# The gateway allows for persisting the cluster state between full cluster
+# restarts. Every change to the state (such as adding an index) will be stored
+# in the gateway, and when the cluster starts up for the first time,
+# it will read its state from the gateway.
+
+# There are several types of gateway implementations. For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
+
+# The default gateway type is the "local" gateway (recommended):
+#
+# gateway.type: local
+
+# Settings below control how and when to start the initial recovery process on
+# a full cluster restart (to reuse as much local data as possible when using shared
+# gateway).
+
+# Allow recovery process after N nodes in a cluster are up:
+#
+# gateway.recover_after_nodes: 1
+
+# Set the timeout to initiate the recovery process, once the N nodes
+# from previous setting are up (accepts time value):
+#
+# gateway.recover_after_time: 5m
+
+# Set how many nodes are expected in this cluster. Once these N nodes
+# are up (and recover_after_nodes is met), begin recovery process immediately
+# (without waiting for recover_after_time to expire):
+#
+# gateway.expected_nodes: 2
+
+
+############################# Recovery Throttling #############################
+
+# These settings allow to control the process of shards allocation between
+# nodes during initial recovery, replica allocation, rebalancing,
+# or when adding and removing nodes.
+
+# Set the number of concurrent recoveries happening on a node:
+#
+# 1. During the initial recovery
+#
+# cluster.routing.allocation.node_initial_primaries_recoveries: 4
+#
+# 2. During adding/removing nodes, rebalancing, etc
+#
+# cluster.routing.allocation.node_concurrent_recoveries: 2
+
+# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
+#
+# indices.recovery.max_bytes_per_sec: 20mb
+
+# Set to limit the number of open concurrent streams when
+# recovering a shard from a peer:
+#
+# indices.recovery.concurrent_streams: 5
+
+
+################################## Discovery ##################################
+
+# Discovery infrastructure ensures nodes can be found within a cluster
+# and master node is elected. Multicast discovery is the default.
+
+# Set to ensure a node sees N other master eligible nodes to be considered
+# operational within the cluster. Its recommended to set it to a higher value
+# than 1 when running more than 2 nodes in the cluster.
+#
+# discovery.zen.minimum_master_nodes: 1
+
+# Set the time to wait for ping responses from other nodes when discovering.
+# Set this option to a higher value on a slow or congested network
+# to minimize discovery failures:
+#
+# discovery.zen.ping.timeout: 3s
+
+# For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
+
+# Unicast discovery allows to explicitly control which nodes will be used
+# to discover the cluster. It can be used when multicast is not present,
+# or to restrict the cluster communication-wise.
+#
+# 1. Disable multicast discovery (enabled by default):
+#
+# discovery.zen.ping.multicast.enabled: false
+#
+# 2. Configure an initial list of master nodes in the cluster
+# to perform discovery when new nodes (master or data) are started:
+#
+# discovery.zen.ping.unicast.hosts: ["host1", "host2:port"]
+
+# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
+#
+# You have to install the cloud-aws plugin for enabling the EC2 discovery.
+#
+# For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
+#
+# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
+# for a step-by-step tutorial.
+
+# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
+#
+# You have to install the cloud-gce plugin for enabling the GCE discovery.
+#
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
+
+# Azure discovery allows to use Azure API in order to perform discovery.
+#
+# You have to install the cloud-azure plugin for enabling the Azure discovery.
+#
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
+
+################################## Slow Log ##################################
+
+# Shard level query and fetch threshold logging.
+
+#index.search.slowlog.threshold.query.warn: 10s
+#index.search.slowlog.threshold.query.info: 5s
+#index.search.slowlog.threshold.query.debug: 2s
+#index.search.slowlog.threshold.query.trace: 500ms
+
+#index.search.slowlog.threshold.fetch.warn: 1s
+#index.search.slowlog.threshold.fetch.info: 800ms
+#index.search.slowlog.threshold.fetch.debug: 500ms
+#index.search.slowlog.threshold.fetch.trace: 200ms
+
+#index.indexing.slowlog.threshold.index.warn: 10s
+#index.indexing.slowlog.threshold.index.info: 5s
+#index.indexing.slowlog.threshold.index.debug: 2s
+#index.indexing.slowlog.threshold.index.trace: 500ms
+
+################################## GC Logging ################################
+
+#monitor.jvm.gc.young.warn: 1000ms
+#monitor.jvm.gc.young.info: 700ms
+#monitor.jvm.gc.young.debug: 400ms
+
+#monitor.jvm.gc.old.warn: 10s
+#monitor.jvm.gc.old.info: 5s
+#monitor.jvm.gc.old.debug: 2s
diff --git a/config/logging.yml b/config/logging.yml
new file mode 100644
index 0000000..9e00d01
--- /dev/null
+++ b/config/logging.yml
@@ -0,0 +1,56 @@
+# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
+es.logger.level: INFO
+rootLogger: ${es.logger.level}, console, file
+logger:
+ # log action execution errors for easier debugging
+ action: DEBUG
+ # reduce the logging for aws, too much is logged under the default INFO
+ com.amazonaws: WARN
+
+ # gateway
+ #gateway: DEBUG
+ #index.gateway: DEBUG
+
+ # peer shard recovery
+ #indices.recovery: DEBUG
+
+ # discovery
+ #discovery: TRACE
+
+ index.search.slowlog: TRACE, index_search_slow_log_file
+ index.indexing.slowlog: TRACE, index_indexing_slow_log_file
+
+additivity:
+ index.search.slowlog: false
+ index.indexing.slowlog: false
+
+appender:
+ console:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_search_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_search_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_indexing_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
diff --git a/core-signatures.txt b/core-signatures.txt
new file mode 100644
index 0000000..9503978
--- /dev/null
+++ b/core-signatures.txt
@@ -0,0 +1,35 @@
+@defaultMessage spawns threads with vague names; use a custom thread factory and name threads so that you can tell (by its name) which executor it is associated with
+
+java.util.concurrent.Executors#newFixedThreadPool(int)
+java.util.concurrent.Executors#newSingleThreadExecutor()
+java.util.concurrent.Executors#newCachedThreadPool()
+java.util.concurrent.Executors#newSingleThreadScheduledExecutor()
+java.util.concurrent.Executors#newScheduledThreadPool(int)
+java.util.concurrent.Executors#defaultThreadFactory()
+java.util.concurrent.Executors#privilegedThreadFactory()
+
+java.lang.Character#codePointBefore(char[],int) @ Implicit start offset is error-prone when the char[] is a buffer and the first chars are random chars
+java.lang.Character#codePointAt(char[],int) @ Implicit end offset is error-prone when the char[] is a buffer and the last chars are random chars
+
+@defaultMessage Collections.sort dumps data into an array, sorts the array and reinserts data into the list, one should rather use Lucene's CollectionUtil sort methods which sort in place
+
+java.util.Collections#sort(java.util.List)
+java.util.Collections#sort(java.util.List,java.util.Comparator)
+
+java.io.StringReader#<init>(java.lang.String) @ Use FastStringReader instead
+
+org.apache.lucene.util.RamUsageEstimator#sizeOf(java.lang.Object) @ This can be a perfromance trap
+
+@defaultMessage Reference management is tricky, leave it to SearcherManager
+org.apache.lucene.index.IndexReader#decRef()
+org.apache.lucene.index.IndexReader#incRef()
+org.apache.lucene.index.IndexReader#tryIncRef()
+
+org.apache.lucene.index.IndexWriter#maybeMerge() @ use Merges#maybeMerge
+org.apache.lucene.index.IndexWriter#forceMerge(int) @ use Merges#forceMerge
+org.apache.lucene.index.IndexWriter#forceMerge(int,boolean) @ use Merges#forceMerge
+org.apache.lucene.index.IndexWriter#forceMergeDeletes() @ use Merges#forceMergeDeletes
+org.apache.lucene.index.IndexWriter#forceMergeDeletes(boolean) @ use Merges#forceMergeDeletes
+
+@defaultMessage QueryWrapperFilter is cachable by default - use Queries#wrap instead
+org.apache.lucene.search.QueryWrapperFilter#<init>(org.apache.lucene.search.Query)
diff --git a/dev-tools/ElasticSearch.launch b/dev-tools/ElasticSearch.launch
new file mode 100644
index 0000000..107c5c0
--- /dev/null
+++ b/dev-tools/ElasticSearch.launch
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<launchConfiguration type="org.eclipse.jdt.launching.localJavaApplication">
+<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
+<listEntry value="/elasticsearch/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java"/>
+</listAttribute>
+<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
+<listEntry value="1"/>
+</listAttribute>
+<mapAttribute key="org.eclipse.debug.core.environmentVariables">
+<mapEntry key="ES_HOME" value="${target_home}"/>
+</mapAttribute>
+<stringAttribute key="org.eclipse.jdt.launching.CLASSPATH_PROVIDER" value="org.eclipse.m2e.launchconfig.classpathProvider"/>
+<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="org.elasticsearch.bootstrap.Elasticsearch"/>
+<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="elasticsearch"/>
+<stringAttribute key="org.eclipse.jdt.launching.SOURCE_PATH_PROVIDER" value="org.eclipse.m2e.launchconfig.sourcepathProvider"/>
+<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xms256m -Xmx1g -Xss256k -Djava.awt.headless=true -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=logs/heapdump.hprof -Delasticsearch -Des.foreground=yes -Djava.library.path=lib/sigar -ea"/>
+</launchConfiguration>
diff --git a/dev-tools/build_randomization.rb b/dev-tools/build_randomization.rb
new file mode 100644
index 0000000..2a5c313
--- /dev/null
+++ b/dev-tools/build_randomization.rb
@@ -0,0 +1,116 @@
+#!/usr/bin/env ruby
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on
+# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+# either express or implied. See the License for the specific
+# language governing permissions and limitations under the License
+
+#
+# generate property file for the jdk randomization test
+#
+#
+require 'yaml'
+
+class JDKSelector
+ attr_reader :directory, :jdk_list
+
+ def initialize(directory)
+ @directory = directory
+ end
+
+ # get selection of available jdks from jenkins automatic install directory
+ def get_jdk
+ @jdk_list = Dir.entries(directory).select do |x|
+ x.chars.first == 'J'
+ end.map do |y|
+ File.join(directory, y)
+ end
+ self
+ end
+
+ # do ranomize selection from a given array
+ def select_one(selection_array = nil)
+ selection_array ||= @jdk_list
+ selection_array[rand(selection_array.size)]
+ get_random_one(selection_array)
+ end
+end
+
+def get_random_one(data_array)
+ data_array[rand(data_array.size)]
+end
+
+# given a jdk directory selection, generate relevant environment variables
+def get_env_matrix(data_array)
+
+ #refactoring target
+ es_test_jvm_option1 = get_random_one(['-server']) #only server for now get_random_one(['-client', '-server'])
+ greater_than_six = File.basename([*data_array].first).split(/[^0-9]/)[-1].to_i > 6
+ es_test_jvm_option2 = greater_than_six ? get_random_one(['-XX:+UseConcMarkSweepGC', '-XX:+UseParallelGC', '-XX:+UseSerialGC',
+ '-XX:+UseG1GC']) :
+ get_random_one(['-XX:+UseConcMarkSweepGC', '-XX:+UseParallelGC', '-XX:+UseSerialGC'])
+
+ es_test_jvm_option3 = get_random_one(['-XX:+UseCompressedOops', '-XX:-UseCompressedOops'])
+ es_node_mode = get_random_one(['local', 'network'])
+ tests_nightly = get_random_one([true, false])
+ tests_nightly = get_random_one([false]) #bug
+
+ test_assert_off = (rand(10) == 9) #10 percent chance turning it off
+ tests_security_manager = (rand(10) != 9) #10 percent chance running without security manager
+ arg_line = [es_test_jvm_option1, es_test_jvm_option2, es_test_jvm_option3]
+ [*data_array].map do |x|
+ data_hash = {
+ 'PATH' => File.join(x,'bin') + ':' + ENV['PATH'],
+ 'JAVA_HOME' => x,
+ 'BUILD_DESC' => "%s,%s,%s%s,%s %s%s%s"%[File.basename(x), es_node_mode, tests_nightly ? 'nightly,':'',
+ es_test_jvm_option1[1..-1], es_test_jvm_option2[4..-1], es_test_jvm_option3[4..-1],
+ test_assert_off ? ',assert off' : '', tests_security_manager ? ', security manager enabled' : ''],
+ 'es.node.mode' => es_node_mode,
+ 'tests.nightly' => tests_nightly,
+ 'tests.security.manager' => tests_security_manager,
+ 'tests.jvm.argline' => arg_line.join(" "),
+ }
+ data_hash['tests.assertion.disabled'] = 'org.elasticsearch' if test_assert_off
+ data_hash
+ end
+end
+
+# pick first element out of array of hashes, generate write java property file
+def generate_property_file(directory, data)
+ #array transformation
+ content = data.first.map do |key, value|
+ "%s=%s"%[key, value]
+ end
+ file_name = (ENV['BUILD_ID'] + ENV['BUILD_NUMBER']) || 'prop' rescue 'prop'
+ file_name = file_name.split(File::SEPARATOR).first + '.txt'
+ File.open(File.join(directory, file_name), 'w') do |file|
+ file.write(content.join("\n"))
+ end
+end
+
+working_directory = ENV['WORKSPACE'] || '/var/tmp'
+unless(ENV['BUILD_ID'])
+ #local mode set up fake environment
+ test_directory = 'tools/hudson.model.JDK/'
+ unless(File.exist?(test_directory))
+ puts "running local mode, setting up running environment"
+ puts "properties are written to file prop.txt"
+ system("mkdir -p %sJDK{6,7}"%test_directory)
+ end
+ working_directory = ENV['PWD']
+end
+# jenkins sets pwd prior to execution
+jdk_selector = JDKSelector.new(File.join(ENV['PWD'],'tools','hudson.model.JDK'))
+environment_matrix = get_env_matrix(jdk_selector.get_jdk.select_one)
+
+generate_property_file(working_directory, environment_matrix)
diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py
new file mode 100644
index 0000000..44c6aeb
--- /dev/null
+++ b/dev-tools/build_release.py
@@ -0,0 +1,607 @@
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on
+# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+# either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+import re
+import tempfile
+import shutil
+import os
+import datetime
+import json
+import time
+import sys
+import argparse
+import hmac
+import urllib
+import fnmatch
+import socket
+import urllib.request
+
+from http.client import HTTPConnection
+from http.client import HTTPSConnection
+
+
+"""
+ This tool builds a release from the a given elasticsearch branch.
+ In order to execute it go in the top level directory and run:
+ $ python3 dev_tools/build_release.py --branch 0.90 --publish --remote origin
+
+ By default this script runs in 'dry' mode which essentially simulates a release. If the
+ '--publish' option is set the actual release is done. The script takes over almost all
+ steps necessary for a release from a high level point of view it does the following things:
+
+ - run prerequisit checks ie. check for Java 1.6 being presend or S3 credentials available as env variables
+ - detect the version to release from the specified branch (--branch) or the current branch
+ - creates a release branch & updates pom.xml and Version.java to point to a release version rather than a snapshot
+ - builds the artifacts and runs smoke-tests on the build zip & tar.gz files
+ - commits the new version and merges the release branch into the source branch
+ - creates a tag and pushes the commit to the specified origin (--remote)
+ - publishes the releases to sonar-type and S3
+
+Once it's done it will print all the remaining steps.
+
+ Prerequisites:
+ - Python 3k for script execution
+ - Boto for S3 Upload ($ apt-get install python-boto)
+ - RPM for RPM building ($ apt-get install rpm)
+ - S3 keys exported via ENV Variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
+"""
+env = os.environ
+
+PLUGINS = [('bigdesk', 'lukas-vlcek/bigdesk'),
+ ('paramedic', 'karmi/elasticsearch-paramedic'),
+ ('segmentspy', 'polyfractal/elasticsearch-segmentspy'),
+ ('inquisitor', 'polyfractal/elasticsearch-inquisitor'),
+ ('head', 'mobz/elasticsearch-head')]
+
+LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log')
+
+def log(msg):
+ log_plain('\n%s' % msg)
+
+def log_plain(msg):
+ f = open(LOG, mode='ab')
+ f.write(msg.encode('utf-8'))
+ f.close()
+
+def run(command, quiet=False):
+ log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
+ if os.system('%s >> %s 2>&1' % (command, LOG)):
+ msg = ' FAILED: %s [see log %s]' % (command, LOG)
+ if not quiet:
+ print(msg)
+ raise RuntimeError(msg)
+
+try:
+ JAVA_HOME = env['JAVA_HOME']
+except KeyError:
+ raise RuntimeError("""
+ Please set JAVA_HOME in the env before running release tool
+ On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.6*'`""")
+
+try:
+ JAVA_HOME = env['JAVA6_HOME']
+except KeyError:
+ pass #no JAVA6_HOME - we rely on JAVA_HOME
+
+
+try:
+ MVN='mvn'
+ # make sure mvn3 is used if mvn3 is available
+ # some systems use maven 2 as default
+ run('mvn3 --version', quiet=True)
+ MVN='mvn3'
+except RuntimeError:
+ pass
+
+
+def java_exe():
+ path = JAVA_HOME
+ return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
+
+def verify_java_version(version):
+ s = os.popen('%s; java -version 2>&1' % java_exe()).read()
+ if s.find(' version "%s.' % version) == -1:
+ raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
+
+# Verifies the java version. We guarantee that we run with Java 1.6
+# If 1.6 is not available fail the build!
+def verify_mvn_java_version(version, mvn):
+ s = os.popen('%s; %s --version 2>&1' % (java_exe(), mvn)).read()
+ if s.find('Java version: %s' % version) == -1:
+ raise RuntimeError('got wrong java version for %s %s:\n%s' % (mvn, version, s))
+
+# Returns the hash of the current git HEAD revision
+def get_head_hash():
+ return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip()
+
+# Returns the hash of the given tag revision
+def get_tag_hash(tag):
+ return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip()
+
+# Returns the name of the current branch
+def get_current_branch():
+ return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip()
+
+verify_java_version('1.6') # we require to build with 1.6
+verify_mvn_java_version('1.6', MVN)
+
+# Utility that returns the name of the release branch for a given version
+def release_branch(version):
+ return 'release_branch_%s' % version
+
+# runs get fetch on the given remote
+def fetch(remote):
+ run('git fetch %s' % remote)
+
+# Creates a new release branch from the given source branch
+# and rebases the source branch from the remote before creating
+# the release branch. Note: This fails if the source branch
+# doesn't exist on the provided remote.
+def create_release_branch(remote, src_branch, release):
+ run('git checkout %s' % src_branch)
+ run('git pull --rebase %s %s' % (remote, src_branch))
+ run('git checkout -b %s' % (release_branch(release)))
+
+
+# Reads the given file and applies the
+# callback to it. If the callback changed
+# a line the given file is replaced with
+# the modified input.
+def process_file(file_path, line_callback):
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with open(abs_path,'w', encoding='utf-8') as new_file:
+ with open(file_path, encoding='utf-8') as old_file:
+ for line in old_file:
+ new_line = line_callback(line)
+ modified = modified or (new_line != line)
+ new_file.write(new_line)
+ os.close(fh)
+ if modified:
+ #Remove original file
+ os.remove(file_path)
+ #Move new file
+ shutil.move(abs_path, file_path)
+ return True
+ else:
+ # nothing to do - just remove the tmp file
+ os.remove(abs_path)
+ return False
+
+# Walks the given directory path (defaults to 'docs')
+# and replaces all 'coming[$version]' tags with
+# 'added[$version]'. This method only accesses asciidoc files.
+def update_reference_docs(release_version, path='docs'):
+ pattern = 'coming[%s' % (release_version)
+ replacement = 'added[%s' % (release_version)
+ pending_files = []
+ def callback(line):
+ return line.replace(pattern, replacement)
+ for root, _, file_names in os.walk(path):
+ for file_name in fnmatch.filter(file_names, '*.asciidoc'):
+ full_path = os.path.join(root, file_name)
+ if process_file(full_path, callback):
+ pending_files.append(os.path.join(root, file_name))
+ return pending_files
+
+# Moves the pom.xml file from a snapshot to a release
+def remove_maven_snapshot(pom, release):
+ pattern = '<version>%s-SNAPSHOT</version>' % (release)
+ replacement = '<version>%s</version>' % (release)
+ def callback(line):
+ return line.replace(pattern, replacement)
+ process_file(pom, callback)
+
+# Moves the Version.java file from a snapshot to a release
+def remove_version_snapshot(version_file, release):
+ # 1.0.0.Beta1 -> 1_0_0_Beat1
+ release = release.replace('.', '_')
+ pattern = 'new Version(V_%s_ID, true' % (release)
+ replacement = 'new Version(V_%s_ID, false' % (release)
+ def callback(line):
+ return line.replace(pattern, replacement)
+ process_file(version_file, callback)
+
+# Stages the given files for the next git commit
+def add_pending_files(*files):
+ for file in files:
+ run('git add %s' % (file))
+
+# Executes a git commit with 'release [version]' as the commit message
+def commit_release(release):
+ run('git commit -m "release [%s]"' % release)
+
+def tag_release(release):
+ run('git tag -a v%s -m "Tag release version %s"' % (release, release))
+
+def run_mvn(*cmd):
+ for c in cmd:
+ run('%s; %s %s' % (java_exe(), MVN, c))
+
+def build_release(run_tests=False, dry_run=True, cpus=1):
+ target = 'deploy'
+ if dry_run:
+ target = 'package'
+ if run_tests:
+ run_mvn('clean',
+ 'test -Dtests.jvms=%s -Des.node.mode=local' % (cpus),
+ 'test -Dtests.jvms=%s -Des.node.mode=network' % (cpus))
+ run_mvn('clean %s -DskipTests' %(target))
+ success = False
+ try:
+ run_mvn('-DskipTests rpm:rpm')
+ success = True
+ finally:
+ if not success:
+ print("""
+ RPM Bulding failed make sure "rpm" tools are installed.
+ Use on of the following commands to install:
+ $ brew install rpm # on OSX
+ $ apt-get install rpm # on Ubuntu et.al
+ """)
+
+# Uses the github API to fetch open tickets for the given release version
+# if it finds any tickets open for that version it will throw an exception
+def ensure_no_open_tickets(version):
+ version = "v%s" % version
+ conn = HTTPSConnection('api.github.com')
+ try:
+ log('Checking for open tickets on Github for version %s' % version)
+ log('Check if node is available')
+ conn.request('GET', '/repos/elasticsearch/elasticsearch/issues?state=open&labels=%s' % version, headers= {'User-Agent' : 'Elasticsearch version checker'})
+ res = conn.getresponse()
+ if res.status == 200:
+ issues = json.loads(res.read().decode("utf-8"))
+ if issues:
+ urls = []
+ for issue in issues:
+ urls.append(issue['url'])
+ raise RuntimeError('Found open issues for release version %s see - %s' % (version, urls))
+ else:
+ log("No open issues found for version %s" % version)
+ else:
+ raise RuntimeError('Failed to fetch issue list from Github for release version %s' % version)
+ except socket.error as e:
+ log("Failed to fetch issue list from Github for release version %s' % version - Exception: [%s]" % (version, e))
+ #that is ok it might not be there yet
+ finally:
+ conn.close()
+
+def wait_for_node_startup(host='127.0.0.1', port=9200,timeout=15):
+ for _ in range(timeout):
+ conn = HTTPConnection(host, port, timeout)
+ try:
+ log('Waiting until node becomes available for 1 second')
+ time.sleep(1)
+ log('Check if node is available')
+ conn.request('GET', '')
+ res = conn.getresponse()
+ if res.status == 200:
+ return True
+ except socket.error as e:
+ log("Failed while waiting for node - Exception: [%s]" % e)
+ #that is ok it might not be there yet
+ finally:
+ conn.close()
+
+ return False
+
+# Checks the pom.xml for the release version.
+# This method fails if the pom file has no SNAPSHOT version set ie.
+# if the version is already on a release version we fail.
+# Returns the next version string ie. 0.90.7
+def find_release_version(src_branch):
+ run('git checkout %s' % src_branch)
+ with open('pom.xml', encoding='utf-8') as file:
+ for line in file:
+ match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
+ if match:
+ return match.group(1)
+ raise RuntimeError('Could not find release version in branch %s' % src_branch)
+
+def artifact_names(release, path = ''):
+ return [os.path.join(path, 'elasticsearch-%s.%s' % (release, t)) for t in ['deb', 'tar.gz', 'zip']]
+
+def get_artifacts(release):
+ common_artifacts = artifact_names(release, 'target/releases/')
+ for f in common_artifacts:
+ if not os.path.isfile(f):
+ raise RuntimeError('Could not find required artifact at %s' % f)
+ rpm = os.path.join('target/rpm/elasticsearch/RPMS/noarch/', 'elasticsearch-%s-1.noarch.rpm' % release)
+ if os.path.isfile(rpm):
+ log('RPM [%s] contains: ' % rpm)
+ run('rpm -pqli %s' % rpm)
+ # this is an oddness of RPM that is attches -1 so we have to rename it
+ renamed_rpm = os.path.join('target/rpm/elasticsearch/RPMS/noarch/', 'elasticsearch-%s.noarch.rpm' % release)
+ shutil.move(rpm, renamed_rpm)
+ common_artifacts.append(renamed_rpm)
+ else:
+ raise RuntimeError('Could not find required artifact at %s' % rpm)
+ return common_artifacts
+
+# Generates sha1 checsums for all files
+# and returns the checksum files as well
+# as the given files in a list
+def generate_checksums(files):
+ res = []
+ for release_file in files:
+ directory = os.path.dirname(release_file)
+ file = os.path.basename(release_file)
+ checksum_file = '%s.sha1.txt' % file
+
+ if os.system('cd %s; shasum %s > %s' % (directory, file, checksum_file)):
+ raise RuntimeError('Failed to generate checksum for file %s' % release_file)
+ res = res + [os.path.join(directory, checksum_file), release_file]
+ return res
+
+def download_and_verify(release, files, plugins=None, base_url='https://download.elasticsearch.org/elasticsearch/elasticsearch'):
+ print('Downloading and verifying release %s from %s' % (release, base_url))
+ tmp_dir = tempfile.mkdtemp()
+ try:
+ downloaded_files = []
+ for file in files:
+ name = os.path.basename(file)
+ url = '%s/%s' % (base_url, name)
+ abs_file_path = os.path.join(tmp_dir, name)
+ print(' Downloading %s' % (url))
+ downloaded_files.append(abs_file_path)
+ urllib.request.urlretrieve(url, abs_file_path)
+ url = ''.join([url, '.sha1.txt'])
+ checksum_file = os.path.join(tmp_dir, ''.join([abs_file_path, '.sha1.txt']))
+ urllib.request.urlretrieve(url, checksum_file)
+ print(' Verifying checksum %s' % (checksum_file))
+ run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file)))
+ smoke_test_release(release, downloaded_files, get_tag_hash('v%s' % release), plugins)
+ print(' SUCCESS')
+ finally:
+ shutil.rmtree(tmp_dir)
+
+def smoke_test_release(release, files, expected_hash, plugins):
+ for release_file in files:
+ if not os.path.isfile(release_file):
+ raise RuntimeError('Smoketest failed missing file %s' % (release_file))
+ tmp_dir = tempfile.mkdtemp()
+ if release_file.endswith('tar.gz'):
+ run('tar -xzf %s -C %s' % (release_file, tmp_dir))
+ elif release_file.endswith('zip'):
+ run('unzip %s -d %s' % (release_file, tmp_dir))
+ else:
+ log('Skip SmokeTest for [%s]' % release_file)
+ continue # nothing to do here
+ es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
+ print(' Smoke testing package [%s]' % release_file)
+ es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release),'bin/plugin')
+ plugin_names = {}
+ for name, plugin in plugins:
+ print(' Install plugin [%s] from [%s]' % (name, plugin))
+ run('%s %s %s' % (es_plugin_path, '-install', plugin))
+ plugin_names[name] = True
+
+ if release.startswith("0.90."):
+ background = '' # 0.90.x starts in background automatically
+ else:
+ background = '-d'
+ print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
+ run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.discovery.zen.ping.multicast.enabled=false %s'
+ % (java_exe(), es_run_path, background))
+ conn = HTTPConnection('127.0.0.1', 9200, 20);
+ wait_for_node_startup()
+ try:
+ try:
+ conn.request('GET', '')
+ res = conn.getresponse()
+ if res.status == 200:
+ version = json.loads(res.read().decode("utf-8"))['version']
+ if release != version['number']:
+ raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
+ if version['build_snapshot']:
+ raise RuntimeError('Expected non snapshot version')
+ if version['build_hash'].strip() != expected_hash:
+ raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
+ print(' Running REST Spec tests against package [%s]' % release_file)
+ run_mvn('test -Dtests.rest=%s -Dtests.class=*.*RestTests' % ("127.0.0.1:9200"))
+ print(' Verify if plugins are listed in _nodes')
+ conn.request('GET', '/_nodes?plugin=true&pretty=true')
+ res = conn.getresponse()
+ if res.status == 200:
+ nodes = json.loads(res.read().decode("utf-8"))['nodes']
+ for _, node in nodes.items():
+ node_plugins = node['plugins']
+ for node_plugin in node_plugins:
+ if not plugin_names.get(node_plugin['name'], False):
+ raise RuntimeError('Unexpeced plugin %s' % node_plugin['name'])
+ del plugin_names[node_plugin['name']]
+ if plugin_names:
+ raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
+
+ else:
+ raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
+ else:
+ raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
+ finally:
+ conn.request('POST', '/_cluster/nodes/_local/_shutdown')
+ time.sleep(1) # give the node some time to shut down
+ if conn.getresponse().status != 200:
+ raise RuntimeError('Expected HTTP 200 but got %s on node shutdown' % res.status)
+
+ finally:
+ conn.close()
+ shutil.rmtree(tmp_dir)
+
+def merge_tag_push(remote, src_branch, release_version, dry_run):
+ run('git checkout %s' % src_branch)
+ run('git merge %s' % release_branch(release_version))
+ run('git tag v%s' % release_version)
+ if not dry_run:
+ run('git push %s %s' % (remote, src_branch)) # push the commit
+ run('git push %s v%s' % (remote, release_version)) # push the tag
+ else:
+ print(' dryrun [True] -- skipping push to remote %s' % remote)
+
+def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=True):
+ location = os.path.dirname(os.path.realpath(__file__))
+ for artifact in artifacts:
+ if dry_run:
+ print('Skip Uploading %s to Amazon S3' % artifact)
+ else:
+ print('Uploading %s to Amazon S3' % artifact)
+ # requires boto to be installed but it is not available on python3k yet so we use a dedicated tool
+ run('python %s/upload-s3.py --file %s ' % (location, os.path.abspath(artifact)))
+
+def print_sonartype_notice():
+ settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
+ if os.path.isfile(settings):
+ with open(settings, encoding='utf-8') as settings_file:
+ for line in settings_file:
+ if line.strip() == '<id>sonatype-nexus-snapshots</id>':
+ # moving out - we found the indicator no need to print the warning
+ return
+ print("""
+ NOTE: No sonartype settings detected, make sure you have configured
+ your sonartype credentials in '~/.m2/settings.xml':
+
+ <settings>
+ ...
+ <servers>
+ <server>
+ <id>sonatype-nexus-snapshots</id>
+ <username>your-jira-id</username>
+ <password>your-jira-pwd</password>
+ </server>
+ <server>
+ <id>sonatype-nexus-staging</id>
+ <username>your-jira-id</username>
+ <password>your-jira-pwd</password>
+ </server>
+ </servers>
+ ...
+ </settings>
+ """)
+
+def check_s3_credentials():
+ if not env.get('AWS_ACCESS_KEY_ID', None) or not env.get('AWS_SECRET_ACCESS_KEY', None):
+ raise RuntimeError('Could not find "AWS_ACCESS_KEY_ID" / "AWS_SECRET_ACCESS_KEY" in the env variables please export in order to upload to S3')
+
+VERSION_FILE = 'src/main/java/org/elasticsearch/Version.java'
+POM_FILE = 'pom.xml'
+
+# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml
+print_sonartype_notice()
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
+ parser.add_argument('--branch', '-b', metavar='master', default=get_current_branch(),
+ help='The branch to release from. Defaults to the current branch.')
+ parser.add_argument('--cpus', '-c', metavar='1', default=1,
+ help='The number of cpus to use for running the test. Default is [1]')
+ parser.add_argument('--skiptests', '-t', dest='tests', action='store_false',
+ help='Skips tests before release. Tests are run by default.')
+ parser.set_defaults(tests=True)
+ parser.add_argument('--remote', '-r', metavar='origin', default='origin',
+ help='The remote to push the release commit and tag to. Default is [origin]')
+ parser.add_argument('--publish', '-d', dest='dryrun', action='store_false',
+ help='Publishes the release. Disable by default.')
+ parser.add_argument('--smoke', '-s', dest='smoke', default='',
+ help='Smoke tests the given release')
+
+ parser.set_defaults(dryrun=True)
+ parser.set_defaults(smoke=None)
+ args = parser.parse_args()
+
+ src_branch = args.branch
+ remote = args.remote
+ run_tests = args.tests
+ dry_run = args.dryrun
+ cpus = args.cpus
+ build = not args.smoke
+ smoke_test_version = args.smoke
+ if not dry_run:
+ check_s3_credentials()
+ print('WARNING: dryrun is set to "false" - this will push and publish the release')
+ input('Press Enter to continue...')
+
+ print(''.join(['-' for _ in range(80)]))
+ print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run))
+ print(' JAVA_HOME is [%s]' % JAVA_HOME)
+ print(' Running with maven command: [%s] ' % (MVN))
+
+ if build:
+ release_version = find_release_version(src_branch)
+ ensure_no_open_tickets(release_version)
+ if not dry_run:
+ smoke_test_version = release_version
+ head_hash = get_head_hash()
+ run_mvn('clean') # clean the env!
+ print(' Release version: [%s]' % release_version)
+ create_release_branch(remote, src_branch, release_version)
+ print(' Created release branch [%s]' % (release_branch(release_version)))
+ success = False
+ try:
+ pending_files = [POM_FILE, VERSION_FILE]
+ remove_maven_snapshot(POM_FILE, release_version)
+ remove_version_snapshot(VERSION_FILE, release_version)
+ pending_files = pending_files + update_reference_docs(release_version)
+ print(' Done removing snapshot version')
+ add_pending_files(*pending_files) # expects var args use * to expand
+ commit_release(release_version)
+ print(' Committed release version [%s]' % release_version)
+ print(''.join(['-' for _ in range(80)]))
+ print('Building Release candidate')
+ input('Press Enter to continue...')
+ if not dry_run:
+ print(' Running maven builds now and publish to sonartype - run-tests [%s]' % run_tests)
+ else:
+ print(' Running maven builds now run-tests [%s]' % run_tests)
+ build_release(run_tests=run_tests, dry_run=dry_run, cpus=cpus)
+ artifacts = get_artifacts(release_version)
+ artifacts_and_checksum = generate_checksums(artifacts)
+ smoke_test_release(release_version, artifacts, get_head_hash(), PLUGINS)
+ print(''.join(['-' for _ in range(80)]))
+ print('Finish Release -- dry_run: %s' % dry_run)
+ input('Press Enter to continue...')
+ print(' merge release branch, tag and push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run))
+ merge_tag_push(remote, src_branch, release_version, dry_run)
+ print(' publish artifacts to S3 -- dry_run: %s' % dry_run)
+ publish_artifacts(artifacts_and_checksum, dry_run=dry_run)
+ pending_msg = """
+ Release successful pending steps:
+ * create a version tag on github for version 'v%(version)s'
+ * check if there are pending issues for this version (https://github.com/elasticsearch/elasticsearch/issues?labels=v%(version)s&page=1&state=open)
+ * publish the maven artifacts on sonartype: https://oss.sonatype.org/index.html
+ - here is a guide: https://docs.sonatype.org/display/Repository/Sonatype+OSS+Maven+Repository+Usage+Guide#SonatypeOSSMavenRepositoryUsageGuide-8a.ReleaseIt
+ * check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/elasticsearch/%(version)s
+ * announce the release on the website / blog post
+ * tweet about the release
+ * announce the release in the google group/mailinglist
+ """
+ print(pending_msg % { 'version' : release_version} )
+ success = True
+ finally:
+ if not success:
+ run('git reset --hard HEAD')
+ run('git checkout %s' % src_branch)
+ elif dry_run:
+ run('git reset --hard %s' % head_hash)
+ run('git tag -d v%s' % release_version)
+ # we delete this one anyways
+ run('git branch -D %s' % (release_branch(release_version)))
+ else:
+ print("Skipping build - smoketest only against version %s" % smoke_test_version)
+ run_mvn('clean') # clean the env!
+
+ if smoke_test_version:
+ fetch(remote)
+ download_and_verify(smoke_test_version, artifact_names(smoke_test_version), plugins=PLUGINS)
diff --git a/dev-tools/client_tests_urls.prop b/dev-tools/client_tests_urls.prop
new file mode 100644
index 0000000..9ef3748
--- /dev/null
+++ b/dev-tools/client_tests_urls.prop
@@ -0,0 +1,21 @@
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on
+# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+# either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+#
+# This is used for client testings to pull in master, 090 bits
+#
+URL_MASTER=http://s3-us-west-2.amazonaws.com/build.elasticsearch.org/origin/master/nightly/JDK6/elasticsearch-latest-SNAPSHOT.zip
+URL_090=http://s3-us-west-2.amazonaws.com/build.elasticsearch.org/origin/0.90/nightly/JDK6/elasticsearch-latest-SNAPSHOT.zip
diff --git a/dev-tools/elasticsearch_license_header.txt b/dev-tools/elasticsearch_license_header.txt
new file mode 100644
index 0000000..ca94332
--- /dev/null
+++ b/dev-tools/elasticsearch_license_header.txt
@@ -0,0 +1,16 @@
+Licensed to Elasticsearch under one or more contributor
+license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright
+ownership. Elasticsearch licenses this file to you under
+the Apache License, Version 2.0 (the "License"); you may
+not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
diff --git a/dev-tools/es_release_notes.pl b/dev-tools/es_release_notes.pl
new file mode 100644
index 0000000..a55f7ab
--- /dev/null
+++ b/dev-tools/es_release_notes.pl
@@ -0,0 +1,186 @@
+#!/usr/bin/env perl
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on
+# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+# either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+use strict;
+use warnings;
+
+use HTTP::Tiny;
+use IO::Socket::SSL 1.52;
+
+my $Base_URL = 'https://api.github.com/repos/';
+my $User_Repo = 'elasticsearch/elasticsearch/';
+my $Issue_URL = "http://github.com/${User_Repo}issues/issue/";
+
+my @Groups = qw(breaking feature enhancement bug regression doc test);
+my %Group_Labels = (
+ breaking => 'Breaking changes',
+ doc => 'Docs',
+ feature => 'New features',
+ enhancement => 'Enhancements',
+ bug => 'Bug fixes',
+ regression => 'Regression',
+ test => 'Tests',
+ other => 'Not classified',
+);
+
+use JSON();
+use Encode qw(encode_utf8);
+
+my $json = JSON->new->utf8(1);
+
+my %All_Labels = fetch_labels();
+
+my $version = shift @ARGV
+ or dump_labels();
+
+dump_labels("Unknown version '$version'")
+ unless $All_Labels{$version};
+
+my $format = shift @ARGV || "html";
+
+my $issues = fetch_issues($version);
+dump_issues( $version, $issues );
+
+#===================================
+sub dump_issues {
+#===================================
+ my $version = shift;
+ my $issues = shift;
+
+ $version =~ s/v//;
+ my ( $day, $month, $year ) = (gmtime)[ 3 .. 5 ];
+ $month++;
+ $year += 1900;
+
+ for my $group ( @Groups, 'other' ) {
+ my $group_issues = $issues->{$group} or next;
+ $format eq 'html' and print "<h2>$Group_Labels{$group}</h2>\n\n<ul>\n";
+ $format eq 'markdown' and print "## $Group_Labels{$group}\n\n";
+
+ for my $header ( sort keys %$group_issues ) {
+ my $header_issues = $group_issues->{$header};
+ my $prefix = "<li>";
+ if ($format eq 'html') {
+ if ( $header && @$header_issues > 1 ) {
+ print "<li>$header:<ul>";
+ $prefix = "<li>";
+ }
+ elsif ($header) {
+ $prefix = "<li>$header: ";
+ }
+ }
+ for my $issue (@$header_issues) {
+ my $title = $issue->{title};
+ if ( $issue->{state} eq 'open' ) {
+ $title .= " [OPEN]";
+ }
+ my $number = $issue->{number};
+ $format eq 'markdown' and print encode_utf8( "* "
+ . $title
+ . qq( [#$number](${Issue_URL}${number})\n)
+ );
+ $format eq 'html' and print encode_utf8( $prefix
+ . $title
+ . qq[ <a href="${Issue_URL}${number}">#${number}</a></li>\n]
+ );
+ }
+ if ($format eq 'html' && $header && @$header_issues > 1 ) {
+ print "</li></ul></li>\n";
+ }
+ }
+ $format eq 'html' and print "</ul>";
+ print "\n\n"
+ }
+}
+
+#===================================
+sub fetch_issues {
+#===================================
+ my $version = shift;
+ my @issues;
+ for my $state ( 'open', 'closed' ) {
+ my $page = 1;
+ while (1) {
+ my $tranche
+ = fetch( $User_Repo
+ . 'issues?labels='
+ . $version
+ . '&pagesize=100&state='
+ . $state
+ . '&page='
+ . $page )
+ or die "Couldn't fetch issues for version '$version'";
+ last unless @$tranche;
+ push @issues, @$tranche;
+ $page++;
+ }
+ }
+
+ my %group;
+ISSUE:
+ for my $issue (@issues) {
+ my %labels = map { $_->{name} => 1 } @{ $issue->{labels} };
+ my $header = $issue->{title} =~ s/^([^:]+):\s+// ? $1 : '';
+ for (@Groups) {
+ if ( $labels{$_} ) {
+ push @{ $group{$_}{$header} }, $issue;
+ next ISSUE;
+ }
+ }
+ push @{ $group{other}{$header} }, $issue;
+ }
+
+ return \%group;
+}
+
+#===================================
+sub fetch_labels {
+#===================================
+ my $labels = fetch( $User_Repo . 'labels' )
+ or die "Couldn't retrieve version labels";
+ return map { $_ => 1 } grep {/^v/} map { $_->{name} } @$labels;
+}
+
+#===================================
+sub fetch {
+#===================================
+ my $url = $Base_URL . shift();
+ my $response = HTTP::Tiny->new->get($url);
+ die "$response->{status} $response->{reason}\n"
+ unless $response->{success};
+
+ # print $response->{content};
+ return $json->decode( $response->{content} );
+}
+
+#===================================
+sub dump_labels {
+#===================================
+ my $error = shift || '';
+ if ($error) {
+ $error = "\nERROR: $error\n";
+ }
+ my $labels = join( "\n - ", '', ( sort keys %All_Labels ) );
+ die <<USAGE
+ $error
+ USAGE: $0 version > outfile
+
+ Known versions:$labels
+
+USAGE
+
+}
diff --git a/dev-tools/extract_party_license.rb b/dev-tools/extract_party_license.rb
new file mode 100644
index 0000000..e3326e6
--- /dev/null
+++ b/dev-tools/extract_party_license.rb
@@ -0,0 +1,54 @@
+#!/bin/env ruby
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on
+# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+# either express or implied. See the License for the specific
+# language governing permissions and limitations under the License
+#
+# The script extract license information out of logstash project
+# First it will use license information out of gem object.
+# If that is not available, it will try to print out the license file
+#
+require 'rubygems'
+require 'bundler/setup'
+require 'yaml'
+
+# get all local installed gems
+def all_installed_gems
+ Gem::Specification.all = nil
+ all = Gem::Specification
+ Gem::Specification.reset
+ all
+end
+
+all_installed_gems.select {|y| y.gem_dir.include?('vendor') }.sort {|v, u| v.name <=> u.name }.each do |x|
+ puts '='*80 #seperator
+ if(x.license) #ah gem has license information
+ puts "%s,%s,%s,%s,%s"%[x.name, x.version, x.license, x.homepage, x.email]
+ else
+ puts "%s,%s,%s,%s"%[x.name, x.version, x.homepage, x.email]
+ license_file = Dir.glob(File.join(x.gem_dir,'LICENSE*')).first #see if there is a license file
+ if(license_file)
+ file = File.open(license_file, 'r')
+ data = file.read
+ file.close
+ puts data
+ else
+ #no license file.. try to print some gem information
+ puts 'No License File'
+ puts x.gem_dir
+ puts x.files.join("\n")
+ end
+ end
+end
+
diff --git a/dev-tools/license_header_definition.xml b/dev-tools/license_header_definition.xml
new file mode 100644
index 0000000..cfe25e6
--- /dev/null
+++ b/dev-tools/license_header_definition.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="ISO-8859-1"?>
+<additionalHeaders>
+ <javadoc_style>
+ <firstLine>/*</firstLine>
+ <beforeEachLine> * </beforeEachLine>
+ <endLine> */</endLine>
+ <!--skipLine></skipLine-->
+ <firstLineDetectionPattern>(\s|\t)*/\*.*$</firstLineDetectionPattern>
+ <lastLineDetectionPattern>.*\*/(\s|\t)*$</lastLineDetectionPattern>
+ <allowBlankLines>false</allowBlankLines>
+ <isMultiline>true</isMultiline>
+ </javadoc_style>
+</additionalHeaders>
diff --git a/dev-tools/tests.policy b/dev-tools/tests.policy
new file mode 100644
index 0000000..8abbfd8
--- /dev/null
+++ b/dev-tools/tests.policy
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+// Policy file to prevent tests from writing outside the test sandbox directory
+// PLEASE NOTE: You may need to enable other permissions when new tests are added,
+// everything not allowed here is forbidden!
+
+grant {
+ // permissions for file access, write access only to sandbox:
+ permission java.io.FilePermission "<<ALL FILES>>", "read,execute";
+ permission java.io.FilePermission "${junit4.childvm.cwd}", "read,execute,write";
+ permission java.io.FilePermission "${junit4.childvm.cwd}${/}-", "read,execute,write,delete";
+ permission java.io.FilePermission "${junit4.tempDir}${/}*", "read,execute,write,delete";
+
+ // Allow connecting to the internet anywhere
+ permission java.net.SocketPermission "*", "accept,listen,connect,resolve";
+
+ // Basic permissions needed for Lucene / Elasticsearch to work:
+ permission java.util.PropertyPermission "*", "read,write";
+ permission java.lang.reflect.ReflectPermission "*";
+ permission java.lang.RuntimePermission "*";
+
+ // These two *have* to be spelled out a separate
+ permission java.lang.management.ManagementPermission "control";
+ permission java.lang.management.ManagementPermission "monitor";
+
+ permission java.net.NetPermission "*";
+ permission java.util.logging.LoggingPermission "control";
+ permission javax.management.MBeanPermission "*", "*";
+ permission javax.management.MBeanServerPermission "*";
+ permission javax.management.MBeanTrustPermission "*";
+
+ // Needed for some things in DNS caching in the JVM
+ permission java.security.SecurityPermission "getProperty.networkaddress.cache.ttl";
+ permission java.security.SecurityPermission "getProperty.networkaddress.cache.negative.ttl";
+
+};
diff --git a/dev-tools/upload-s3.py b/dev-tools/upload-s3.py
new file mode 100644
index 0000000..95ea576
--- /dev/null
+++ b/dev-tools/upload-s3.py
@@ -0,0 +1,67 @@
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on
+# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+# either express or implied. See the License for the specific
+# language governing permissions and limitations under the License.
+
+import os
+import sys
+import argparse
+try:
+ import boto.s3
+except:
+ raise RuntimeError("""
+ S3 upload requires boto to be installed
+ Use one of:
+ 'pip install -U boto'
+ 'apt-get install python-boto'
+ 'easy_install boto'
+ """)
+
+import boto.s3
+
+
+def list_buckets(conn):
+ return conn.get_all_buckets()
+
+
+def upload_s3(conn, path, key, file, bucket):
+ print 'Uploading %s to Amazon S3 bucket %s/%s' % \
+ (file, bucket, os.path.join(path, key))
+ def percent_cb(complete, total):
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ bucket = conn.create_bucket(bucket)
+ k = bucket.new_key(os.path.join(path, key))
+ k.set_contents_from_filename(file, cb=percent_cb, num_cb=100)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Uploads files to Amazon S3')
+ parser.add_argument('--file', '-f', metavar='path to file',
+ help='the branch to release from', required=True)
+ parser.add_argument('--bucket', '-b', metavar='B42', default='download.elasticsearch.org',
+ help='The S3 Bucket to upload to')
+ parser.add_argument('--path', '-p', metavar='elasticsearch/elasticsearch', default='elasticsearch/elasticsearch',
+ help='The key path to use')
+ parser.add_argument('--key', '-k', metavar='key', default=None,
+ help='The key - uses the file name as default key')
+ args = parser.parse_args()
+ if args.key:
+ key = args.key
+ else:
+ key = os.path.basename(args.file)
+
+ connection = boto.connect_s3()
+ upload_s3(connection, args.path, key, args.file, args.bucket);
+
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..fa61e5a
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,4 @@
+The Elasticsearch docs are in AsciiDoc format and can be built using the Elasticsearch documentation build process
+
+See: https://github.com/elasticsearch/docs
+
diff --git a/docs/community/clients.asciidoc b/docs/community/clients.asciidoc
new file mode 100644
index 0000000..d9427bd
--- /dev/null
+++ b/docs/community/clients.asciidoc
@@ -0,0 +1,192 @@
+[[clients]]
+== Clients
+
+
+[[community-perl]]
+=== Perl
+
+See the {client}/perl-api/current/index.html[official Elasticsearch Perl client].
+
+[[community-python]]
+=== Python
+
+See the {client}/python-api/current/index.html[official Elasticsearch Python client].
+
+* http://github.com/rhec/pyelasticsearch[pyelasticsearch]:
+ Python client.
+
+* https://github.com/eriky/ESClient[ESClient]:
+ A lightweight and easy to use Python client for Elasticsearch.
+
+* https://github.com/humangeo/rawes[rawes]:
+ Python low level client.
+
+* https://github.com/mozilla/elasticutils/[elasticutils]:
+ A friendly chainable Elasticsearch interface for Python.
+
+* http://intridea.github.io/surfiki-refine-elasticsearch/[Surfiki Refine]:
+ Python Map-Reduce engine targeting Elasticsearch indices.
+
+* http://github.com/aparo/pyes[pyes]:
+ Python client.
+
+
+[[community-ruby]]
+=== Ruby
+
+See the {client}/ruby-api/current/index.html[official Elasticsearch Ruby client].
+
+* http://github.com/karmi/tire[Tire]:
+ Ruby API & DSL, with ActiveRecord/ActiveModel integration.
+
+* https://github.com/PoseBiz/stretcher[stretcher]:
+ Ruby client.
+
+* https://github.com/wireframe/elastic_searchable/[elastic_searchable]:
+ Ruby client + Rails integration.
+
+* https://github.com/ddnexus/flex[Flex]:
+ Ruby Client.
+
+
+[[community-php]]
+=== PHP
+
+See the {client}/php-api/current/index.html[official Elasticsearch PHP client].
+
+* http://github.com/ruflin/Elastica[Elastica]:
+ PHP client.
+
+* http://github.com/nervetattoo/elasticsearch[elasticsearch] PHP client.
+
+* http://github.com/polyfractal/Sherlock[Sherlock]:
+ PHP client, one-to-one mapping with query DSL, fluid interface.
+
+* https://github.com/nervetattoo/elasticsearch[elasticsearch]
+ PHP 5.3 client
+
+[[community-java]]
+=== Java
+
+* https://github.com/searchbox-io/Jest[Jest]:
+ Java Rest client.
+
+
+[[community-javascript]]
+=== JavaScript
+
+See the {client}/javascript-api/current/index.html[official Elasticsearch JavaScript client].
+
+* https://github.com/fullscale/elastic.js[Elastic.js]:
+ A JavaScript implementation of the Elasticsearch Query DSL and Core API.
+
+* https://github.com/phillro/node-elasticsearch-client[node-elasticsearch-client]:
+ A NodeJS client for Elasticsearch.
+
+* https://github.com/ramv/node-elastical[node-elastical]:
+ Node.js client for the Elasticsearch REST API
+
+* https://github.com/printercu/elastics[elastics]: Simple tiny client that just works
+
+
+[[community-dotnet]]
+=== .Net
+
+* https://github.com/Yegoroff/PlainElastic.Net[PlainElastic.Net]:
+ .NET client.
+
+* https://github.com/Mpdreamz/NEST[NEST]:
+ .NET client.
+
+* https://github.com/medcl/ElasticSearch.Net[ElasticSearch.NET]:
+ .NET client.
+
+
+[[community-scala]]
+=== Scala
+
+* https://github.com/sksamuel/elastic4s[elastic4s]:
+ Scala DSL.
+
+* https://github.com/scalastuff/esclient[esclient]:
+ Thin Scala client.
+
+* https://github.com/bsadeh/scalastic[scalastic]:
+ Scala client.
+
+* https://github.com/gphat/wabisabi[wabisabi]:
+ Asynchronous REST API Scala client.
+
+
+[[community-clojure]]
+=== Clojure
+
+* http://github.com/clojurewerkz/elastisch[Elastisch]:
+ Clojure client.
+
+
+[[community-go]]
+=== Go
+
+* https://github.com/mattbaird/elastigo[elastigo]:
+ Go client.
+
+* https://github.com/belogik/goes[goes]:
+ Go lib.
+
+
+[[community-erlang]]
+=== Erlang
+
+* http://github.com/tsloughter/erlastic_search[erlastic_search]:
+ Erlang client using HTTP.
+
+* https://github.com/dieswaytoofast/erlasticsearch[erlasticsearch]:
+ Erlang client using Thrift.
+
+* https://github.com/datahogs/tirexs[Tirexs]:
+ An https://github.com/elixir-lang/elixir[Elixir] based API/DSL, inspired by
+ http://github.com/karmi/tire[Tire]. Ready to use in pure Erlang
+ environment.
+
+
+[[community-eventmachine]]
+=== EventMachine
+
+* http://github.com/vangberg/em-elasticsearch[em-elasticsearch]:
+ elasticsearch library for eventmachine.
+
+
+[[community-command-line]]
+=== Command Line
+
+* https://github.com/elasticsearch/es2unix[es2unix]:
+ Elasticsearch API consumable by the Linux command line.
+
+* https://github.com/javanna/elasticshell[elasticshell]:
+ command line shell for elasticsearch.
+
+
+[[community-ocaml]]
+=== OCaml
+
+* https://github.com/tovbinm/ocaml-elasticsearch[ocaml-elasticsearch]:
+ OCaml client for Elasticsearch
+
+
+[[community-smalltalk]]
+=== Smalltalk
+
+* http://ss3.gemstone.com/ss/Elasticsearch.html[Elasticsearch] -
+ Smalltalk client for Elasticsearch
+
+[[community-cold-fusion]]
+=== Cold Fusion
+
+* https://github.com/jasonfill/ColdFusion-ElasticSearch-Client[ColdFusion-Elasticsearch-Client]
+ Cold Fusion client for Elasticsearch
+
+[[community-nodejs]]
+=== NodeJS
+* https://github.com/phillro/node-elasticsearch-client[Node-Elasticsearch-Client]
+ A node.js client for elasticsearch
diff --git a/docs/community/frontends.asciidoc b/docs/community/frontends.asciidoc
new file mode 100644
index 0000000..b6c2258
--- /dev/null
+++ b/docs/community/frontends.asciidoc
@@ -0,0 +1,17 @@
+[[front-ends]]
+== Front Ends
+
+* https://chrome.google.com/webstore/detail/sense/doinijnbnggojdlcjifpdckfokbbfpbo[Sense]:
+ Chrome curl-like plugin for running requests against an Elasticsearch node
+
+* https://github.com/mobz/elasticsearch-head[elasticsearch-head]:
+ A web front end for an Elasticsearch cluster.
+
+* https://github.com/OlegKunitsyn/elasticsearch-browser[browser]:
+ Web front-end over elasticsearch data.
+
+* https://github.com/polyfractal/elasticsearch-inquisitor[Inquisitor]:
+ Front-end to help debug/diagnose queries and analyzers
+
+* http://elastichammer.exploringelasticsearch.com/[Hammer]:
+ Web front-end for elasticsearch
diff --git a/docs/community/github.asciidoc b/docs/community/github.asciidoc
new file mode 100644
index 0000000..698e4c2
--- /dev/null
+++ b/docs/community/github.asciidoc
@@ -0,0 +1,6 @@
+[[github]]
+== GitHub
+
+GitHub is a place where a lot of development is done around
+*elasticsearch*, here is a simple search for
+https://github.com/search?q=elasticsearch&type=Repositories[repositories].
diff --git a/docs/community/index.asciidoc b/docs/community/index.asciidoc
new file mode 100644
index 0000000..88135d8
--- /dev/null
+++ b/docs/community/index.asciidoc
@@ -0,0 +1,17 @@
+= Community Supported Clients
+
+:client: http://www.elasticsearch.org/guide/en/elasticsearch/client
+
+
+include::clients.asciidoc[]
+
+include::frontends.asciidoc[]
+
+include::integrations.asciidoc[]
+
+include::misc.asciidoc[]
+
+include::monitoring.asciidoc[]
+
+include::github.asciidoc[]
+
diff --git a/docs/community/integrations.asciidoc b/docs/community/integrations.asciidoc
new file mode 100644
index 0000000..5e6f1d2
--- /dev/null
+++ b/docs/community/integrations.asciidoc
@@ -0,0 +1,81 @@
+[[integrations]]
+== Integrations
+
+
+* http://grails.org/plugin/elasticsearch[Grails]:
+ Elasticsearch Grails plugin.
+
+* https://github.com/carrot2/elasticsearch-carrot2[carrot2]:
+ Results clustering with carrot2
+
+* https://github.com/angelf/escargot[escargot]:
+ Elasticsearch connector for Rails (WIP).
+
+* https://metacpan.org/module/Catalyst::Model::Search::Elasticsearch[Catalyst]:
+ Elasticsearch and Catalyst integration.
+
+* http://github.com/aparo/django-elasticsearch[django-elasticsearch]:
+ Django Elasticsearch Backend.
+
+* http://github.com/Aconex/elasticflume[elasticflume]:
+ http://github.com/cloudera/flume[Flume] sink implementation.
+
+* http://code.google.com/p/terrastore/wiki/Search_Integration[Terrastore Search]:
+ http://code.google.com/p/terrastore/[Terrastore] integration module with elasticsearch.
+
+* https://github.com/infochimps-labs/wonderdog[Wonderdog]:
+ Hadoop bulk loader into elasticsearch.
+
+* http://geeks.aretotally.in/play-framework-module-elastic-search-distributed-searching-with-json-http-rest-or-java[Play!Framework]:
+ Integrate with Play! Framework Application.
+
+* https://github.com/Exercise/FOQElasticaBundle[ElasticaBundle]:
+ Symfony2 Bundle wrapping Elastica.
+
+* http://drupal.org/project/elasticsearch[Drupal]:
+ Drupal Elasticsearch integration.
+
+* https://github.com/refuge/couch_es[couch_es]:
+ elasticsearch helper for couchdb based products (apache couchdb, bigcouch & refuge)
+
+* https://github.com/sonian/elasticsearch-jetty[Jetty]:
+ Jetty HTTP Transport
+
+* https://github.com/dadoonet/spring-elasticsearch[Spring Elasticsearch]:
+ Spring Factory for Elasticsearch
+
+* https://github.com/spring-projects/spring-data-elasticsearch[Spring Data Elasticsearch]:
+ Spring Data implementation for Elasticsearch
+
+* https://camel.apache.org/elasticsearch.html[Apache Camel Integration]:
+ An Apache camel component to integrate elasticsearch
+
+* https://github.com/tlrx/elasticsearch-test[elasticsearch-test]:
+ Elasticsearch Java annotations for unit testing with
+ http://www.junit.org/[JUnit]
+
+* http://searchbox-io.github.com/wp-elasticsearch/[Wp-Elasticsearch]:
+ Elasticsearch WordPress Plugin
+
+* https://github.com/OlegKunitsyn/eslogd[eslogd]:
+ Linux daemon that replicates events to a central Elasticsearch server in real-time
+
+* https://github.com/drewr/elasticsearch-clojure-repl[elasticsearch-clojure-repl]:
+ Plugin that embeds nREPL for run-time introspective adventure! Also
+ serves as an nREPL transport.
+
+* http://haystacksearch.org/[Haystack]:
+ Modular search for Django
+
+* https://github.com/cleverage/play2-elasticsearch[play2-elasticsearch]:
+ Elasticsearch module for Play Framework 2.x
+
+* https://github.com/fullscale/dangle[dangle]:
+ A set of AngularJS directives that provide common visualizations for elasticsearch based on
+ D3.
+
+* https://github.com/roundscope/ember-data-elasticsearch-kit[ember-data-elasticsearch-kit]:
+ An ember-data kit for both pushing and querying objects to Elasticsearch cluster
+
+* https://github.com/kzwang/elasticsearch-osem[elasticsearch-osem]:
+ A Java Object Search Engine Mapping (OSEM) for Elasticsearch
diff --git a/docs/community/misc.asciidoc b/docs/community/misc.asciidoc
new file mode 100644
index 0000000..7ba49c6
--- /dev/null
+++ b/docs/community/misc.asciidoc
@@ -0,0 +1,15 @@
+[[misc]]
+== Misc
+
+* https://github.com/elasticsearch/puppet-elasticsearch[Puppet]:
+ Elasticsearch puppet module.
+
+* http://github.com/elasticsearch/cookbook-elasticsearch[Chef]:
+ Chef cookbook for Elasticsearch
+
+* http://www.github.com/neogenix/daikon[daikon]:
+ Daikon Elasticsearch CLI
+
+* https://github.com/Aconex/scrutineer[Scrutineer]:
+ A high performance consistency checker to compare what you've indexed
+ with your source of truth content (e.g. DB)
diff --git a/docs/community/monitoring.asciidoc b/docs/community/monitoring.asciidoc
new file mode 100644
index 0000000..6f9d22f
--- /dev/null
+++ b/docs/community/monitoring.asciidoc
@@ -0,0 +1,31 @@
+[[health]]
+== Health and Performance Monitoring
+
+* https://github.com/lukas-vlcek/bigdesk[bigdesk]:
+ Live charts and statistics for elasticsearch cluster.
+
+* https://github.com/karmi/elasticsearch-paramedic[paramedic]:
+ Live charts with cluster stats and indices/shards information.
+
+* http://www.elastichq.org/[ElasticsearchHQ]:
+ Free cluster health monitoring tool
+
+* http://sematext.com/spm/index.html[SPM for Elasticsearch]:
+ Performance monitoring with live charts showing cluster and node stats, integrated
+ alerts, email reports, etc.
+
+* https://github.com/radu-gheorghe/check-es[check-es]:
+ Nagios/Shinken plugins for checking on elasticsearch
+
+* https://github.com/anchor/nagios-plugin-elasticsearch[check_elasticsearch]:
+ An Elasticsearch availability and performance monitoring plugin for
+ Nagios.
+
+* https://github.com/rbramley/Opsview-elasticsearch[opsview-elasticsearch]:
+ Opsview plugin written in Perl for monitoring Elasticsearch
+
+* https://github.com/polyfractal/elasticsearch-segmentspy[SegmentSpy]:
+ Plugin to watch Lucene segment merges across your cluster
+
+* https://github.com/mattweber/es2graphite[es2graphite]:
+ Send cluster and indices stats and status to Graphite for monitoring and graphing.
diff --git a/docs/groovy-api/anatomy.asciidoc b/docs/groovy-api/anatomy.asciidoc
new file mode 100644
index 0000000..fe17226
--- /dev/null
+++ b/docs/groovy-api/anatomy.asciidoc
@@ -0,0 +1,102 @@
+[[anatomy]]
+== API Anatomy
+
+Once a <<client,GClient>> has been
+obtained, all of Elasticsearch APIs can be executed on it. Each Groovy
+API is exposed using three different mechanisms.
+
+
+[[closure]]
+=== Closure Request
+
+The first type is to simply provide the request as a Closure, which
+automatically gets resolved into the respective request instance (for
+the index API, its the `IndexRequest` class). The API returns a special
+future, called `GActionFuture`. This is a groovier version of
+elasticsearch Java `ActionFuture` (in turn a nicer extension to Java own
+`Future`) which allows to register listeners (closures) on it for
+success and failures, as well as blocking for the response. For example:
+
+[source,js]
+--------------------------------------------------
+def indexR = client.index {
+ index "test"
+ type "type1"
+ id "1"
+ source {
+ test = "value"
+ complex {
+ value1 = "value1"
+ value2 = "value2"
+ }
+ }
+}
+
+println "Indexed $indexR.response.id into $indexR.response.index/$indexR.response.type"
+--------------------------------------------------
+
+In the above example, calling `indexR.response` will simply block for
+the response. We can also block for the response for a specific timeout:
+
+[source,js]
+--------------------------------------------------
+IndexResponse response = indexR.response "5s" // block for 5 seconds, same as:
+response = indexR.response 5, TimeValue.SECONDS //
+--------------------------------------------------
+
+We can also register closures that will be called on success and on
+failure:
+
+[source,js]
+--------------------------------------------------
+indexR.success = {IndexResponse response ->
+ pritnln "Indexed $response.id into $response.index/$response.type"
+}
+indexR.failure = {Throwable t ->
+ println "Failed to index: $t.message"
+}
+--------------------------------------------------
+
+
+[[request]]
+=== Request
+
+This option allows to pass the actual instance of the request (instead
+of a closure) as a parameter. The rest is similar to the closure as a
+parameter option (the `GActionFuture` handling). For example:
+
+[source,js]
+--------------------------------------------------
+def indexR = client.index (new IndexRequest(
+ index: "test",
+ type: "type1",
+ id: "1",
+ source: {
+ test = "value"
+ complex {
+ value1 = "value1"
+ value2 = "value2"
+ }
+ }))
+
+println "Indexed $indexR.response.id into $indexR.response.index/$indexR.response.type"
+--------------------------------------------------
+
+
+[[java-like]]
+=== Java Like
+
+The last option is to provide an actual instance of the API request, and
+an `ActionListener` for the callback. This is exactly like the Java API
+with the added `gexecute` which returns the `GActionFuture`:
+
+[source,js]
+--------------------------------------------------
+def indexR = node.client.prepareIndex("test", "type1", "1").setSource({
+ test = "value"
+ complex {
+ value1 = "value1"
+ value2 = "value2"
+ }
+}).gexecute()
+--------------------------------------------------
diff --git a/docs/groovy-api/client.asciidoc b/docs/groovy-api/client.asciidoc
new file mode 100644
index 0000000..c0a6d68
--- /dev/null
+++ b/docs/groovy-api/client.asciidoc
@@ -0,0 +1,59 @@
+[[client]]
+== Client
+
+Obtaining an elasticsearch Groovy `GClient` (a `GClient` is a simple
+wrapper on top of the Java `Client`) is simple. The most common way to
+get a client is by starting an embedded `Node` which acts as a node
+within the cluster.
+
+
+[[node-client]]
+=== Node Client
+
+A Node based client is the simplest form to get a `GClient` to start
+executing operations against elasticsearch.
+
+[source,js]
+--------------------------------------------------
+import org.elasticsearch.groovy.client.GClient
+import org.elasticsearch.groovy.node.GNode
+import static org.elasticsearch.groovy.node.GNodeBuilder.nodeBuilder
+
+// on startup
+
+GNode node = nodeBuilder().node();
+GClient client = node.client();
+
+// on shutdown
+
+node.close();
+--------------------------------------------------
+
+Since elasticsearch allows to configure it using JSON based settings,
+the configuration itself can be done using a closure that represent the
+JSON:
+
+[source,js]
+--------------------------------------------------
+import org.elasticsearch.groovy.node.GNode
+import org.elasticsearch.groovy.node.GNodeBuilder
+import static org.elasticsearch.groovy.node.GNodeBuilder.*
+
+// on startup
+
+GNodeBuilder nodeBuilder = nodeBuilder();
+nodeBuilder.settings {
+ node {
+ client = true
+ }
+ cluster {
+ name = "test"
+ }
+}
+
+GNode node = nodeBuilder.node()
+
+// on shutdown
+
+node.stop().close()
+--------------------------------------------------
diff --git a/docs/groovy-api/count.asciidoc b/docs/groovy-api/count.asciidoc
new file mode 100644
index 0000000..48e8c18
--- /dev/null
+++ b/docs/groovy-api/count.asciidoc
@@ -0,0 +1,22 @@
+[[count]]
+== Count API
+
+The count API is very similar to the
+{java}/count.html[Java count API]. The Groovy
+extension allows to provide the query to execute as a `Closure` (similar
+to GORM criteria builder):
+
+[source,js]
+--------------------------------------------------
+def count = client.count {
+ indices "test"
+ types "type1"
+ query {
+ term {
+ test = "value"
+ }
+ }
+}
+--------------------------------------------------
+
+The query follows the same {ref}/query-dsl.html[Query DSL].
diff --git a/docs/groovy-api/delete.asciidoc b/docs/groovy-api/delete.asciidoc
new file mode 100644
index 0000000..45020df
--- /dev/null
+++ b/docs/groovy-api/delete.asciidoc
@@ -0,0 +1,15 @@
+[[delete]]
+== Delete API
+
+The delete API is very similar to the
+{java}/delete.html[Java delete API], here is an
+example:
+
+[source,js]
+--------------------------------------------------
+def deleteF = node.client.delete {
+ index "test"
+ type "type1"
+ id "1"
+}
+--------------------------------------------------
diff --git a/docs/groovy-api/get.asciidoc b/docs/groovy-api/get.asciidoc
new file mode 100644
index 0000000..f5255be
--- /dev/null
+++ b/docs/groovy-api/get.asciidoc
@@ -0,0 +1,18 @@
+[[get]]
+== Get API
+
+The get API is very similar to the
+{java}/get.html[Java get API]. The main benefit
+of using groovy is handling the source content. It can be automatically
+converted to a `Map` which means using Groovy to navigate it is simple:
+
+[source,js]
+--------------------------------------------------
+def getF = node.client.get {
+ index "test"
+ type "type1"
+ id "1"
+}
+
+println "Result of field2: $getF.response.source.complex.field2"
+--------------------------------------------------
diff --git a/docs/groovy-api/index.asciidoc b/docs/groovy-api/index.asciidoc
new file mode 100644
index 0000000..87f9a73
--- /dev/null
+++ b/docs/groovy-api/index.asciidoc
@@ -0,0 +1,51 @@
+= Groovy API
+:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current
+:java: http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current
+
+[preface]
+== Preface
+
+This section describes the http://groovy.codehaus.org/[Groovy] API
+elasticsearch provides. All elasticsearch APIs are executed using a
+<<client,GClient>>, and are completely
+asynchronous in nature (they either accept a listener, or return a
+future).
+
+The Groovy API is a wrapper on top of the
+{java}[Java API] exposing it in a groovier
+manner. The execution options for each API follow a similar manner and
+covered in <<anatomy>>.
+
+
+[[maven]]
+=== Maven Repository
+
+The Groovy API is hosted on
+http://search.maven.org/#search%7Cga%7C1%7Ca%3A%22elasticsearch-client-groovy%22[Maven
+Central].
+
+For example, you can define the latest version in your `pom.xml` file:
+
+[source,xml]
+--------------------------------------------------
+<dependency>
+ <groupId>org.elasticsearch</groupId>
+ <artifactId>elasticsearch-client-groovy</artifactId>
+ <version>${es.version}</version>
+</dependency>
+--------------------------------------------------
+
+include::anatomy.asciidoc[]
+
+include::client.asciidoc[]
+
+include::index_.asciidoc[]
+
+include::get.asciidoc[]
+
+include::delete.asciidoc[]
+
+include::search.asciidoc[]
+
+include::count.asciidoc[]
+
diff --git a/docs/groovy-api/index_.asciidoc b/docs/groovy-api/index_.asciidoc
new file mode 100644
index 0000000..0e65a11
--- /dev/null
+++ b/docs/groovy-api/index_.asciidoc
@@ -0,0 +1,31 @@
+[[index_]]
+== Index API
+
+The index API is very similar to the
+{java}/index_.html[Java index API]. The Groovy
+extension to it is the ability to provide the indexed source using a
+closure. For example:
+
+[source,js]
+--------------------------------------------------
+def indexR = client.index {
+ index "test"
+ type "type1"
+ id "1"
+ source {
+ test = "value"
+ complex {
+ value1 = "value1"
+ value2 = "value2"
+ }
+ }
+}
+--------------------------------------------------
+
+In the above example, the source closure itself gets transformed into an
+XContent (defaults to JSON). In order to change how the source closure
+is serialized, a global (static) setting can be set on the `GClient` by
+changing the `indexContentType` field.
+
+Note also that the `source` can be set using the typical Java based
+APIs, the `Closure` option is a Groovy extension.
diff --git a/docs/groovy-api/search.asciidoc b/docs/groovy-api/search.asciidoc
new file mode 100644
index 0000000..eb9b501
--- /dev/null
+++ b/docs/groovy-api/search.asciidoc
@@ -0,0 +1,115 @@
+[[search]]
+== Search API
+
+The search API is very similar to the
+{java}/search.html[Java search API]. The Groovy
+extension allows to provide the search source to execute as a `Closure`
+including the query itself (similar to GORM criteria builder):
+
+[source,js]
+--------------------------------------------------
+def search = node.client.search {
+ indices "test"
+ types "type1"
+ source {
+ query {
+ term(test: "value")
+ }
+ }
+}
+
+search.response.hits.each {SearchHit hit ->
+ println "Got hit $hit.id from $hit.index/$hit.type"
+}
+--------------------------------------------------
+
+It can also be executed using the "Java API" while still using a closure
+for the query:
+
+[source,js]
+--------------------------------------------------
+def search = node.client.prepareSearch("test").setQuery({
+ term(test: "value")
+}).gexecute();
+
+search.response.hits.each {SearchHit hit ->
+ println "Got hit $hit.id from $hit.index/$hit.type"
+}
+--------------------------------------------------
+
+The format of the search `Closure` follows the same JSON syntax as the
+{ref}/search-search.html[Search API] request.
+
+
+[[more-examples]]
+=== More examples
+
+Term query where multiple values are provided (see
+{ref}/query-dsl-terms-query.html[terms]):
+
+[source,js]
+--------------------------------------------------
+def search = node.client.search {
+ indices "test"
+ types "type1"
+ source {
+ query {
+ terms(test: ["value1", "value2"])
+ }
+ }
+}
+--------------------------------------------------
+
+Query string (see
+{ref}/query-dsl-query-string-query.html[query string]):
+
+[source,js]
+--------------------------------------------------
+def search = node.client.search {
+ indices "test"
+ types "type1"
+ source {
+ query {
+ query_string(
+ fields: ["test"],
+ query: "value1 value2")
+ }
+ }
+}
+--------------------------------------------------
+
+Pagination (see
+{ref}/search-request-from-size.html[from/size]):
+
+[source,js]
+--------------------------------------------------
+def search = node.client.search {
+ indices "test"
+ types "type1"
+ source {
+ from = 0
+ size = 10
+ query {
+ term(test: "value")
+ }
+ }
+}
+--------------------------------------------------
+
+Sorting (see {ref}/search-request-sort.html[sort]):
+
+[source,js]
+--------------------------------------------------
+def search = node.client.search {
+ indices "test"
+ types "type1"
+ source {
+ query {
+ term(test: "value")
+ }
+ sort = [
+ date : [ order: "desc"]
+ ]
+ }
+}
+--------------------------------------------------
diff --git a/docs/java-api/bulk.asciidoc b/docs/java-api/bulk.asciidoc
new file mode 100644
index 0000000..9b53d3a
--- /dev/null
+++ b/docs/java-api/bulk.asciidoc
@@ -0,0 +1,38 @@
+[[bulk]]
+== Bulk API
+
+The bulk API allows one to index and delete several documents in a
+single request. Here is a sample usage:
+
+[source,java]
+--------------------------------------------------
+import static org.elasticsearch.common.xcontent.XContentFactory.*;
+
+BulkRequestBuilder bulkRequest = client.prepareBulk();
+
+// either use client#prepare, or use Requests# to directly build index/delete requests
+bulkRequest.add(client.prepareIndex("twitter", "tweet", "1")
+ .setSource(jsonBuilder()
+ .startObject()
+ .field("user", "kimchy")
+ .field("postDate", new Date())
+ .field("message", "trying out Elasticsearch")
+ .endObject()
+ )
+ );
+
+bulkRequest.add(client.prepareIndex("twitter", "tweet", "2")
+ .setSource(jsonBuilder()
+ .startObject()
+ .field("user", "kimchy")
+ .field("postDate", new Date())
+ .field("message", "another post")
+ .endObject()
+ )
+ );
+
+BulkResponse bulkResponse = bulkRequest.execute().actionGet();
+if (bulkResponse.hasFailures()) {
+ // process failures by iterating through each bulk response item
+}
+--------------------------------------------------
diff --git a/docs/java-api/client.asciidoc b/docs/java-api/client.asciidoc
new file mode 100644
index 0000000..57f55ec
--- /dev/null
+++ b/docs/java-api/client.asciidoc
@@ -0,0 +1,187 @@
+[[client]]
+== Client
+
+You can use the *java client* in multiple ways:
+
+* Perform standard <<index_,index>>, <<get,get>>,
+ <<delete,delete>> and <<search,search>> operations on an
+ existing cluster
+* Perform administrative tasks on a running cluster
+* Start full nodes when you want to run Elasticsearch embedded in your
+ own application or when you want to launch unit or integration tests
+
+Obtaining an elasticsearch `Client` is simple. The most common way to
+get a client is by:
+
+1. creating an embedded link:#nodeclient[`Node`] that acts as a node
+within a cluster
+2. requesting a `Client` from your embedded `Node`.
+
+Another manner is by creating a link:#transport-client[`TransportClient`]
+that connects to a cluster.
+
+*Important:*
+
+______________________________________________________________________________________________________________________________________________________________
+Please note that you are encouraged to use the same version on client
+and cluster sides. You may hit some incompatibilities issues when mixing
+major versions.
+______________________________________________________________________________________________________________________________________________________________
+
+
+[[node-client]]
+=== Node Client
+
+Instantiating a node based client is the simplest way to get a `Client`
+that can execute operations against elasticsearch.
+
+[source,java]
+--------------------------------------------------
+import static org.elasticsearch.node.NodeBuilder.*;
+
+// on startup
+
+Node node = nodeBuilder().node();
+Client client = node.client();
+
+// on shutdown
+
+node.close();
+--------------------------------------------------
+
+When you start a `Node`, it joins an elasticsearch cluster. You can have
+different clusters by simple setting the `cluster.name` setting, or
+explicitly using the `clusterName` method on the builder.
+
+You can define `cluster.name` in `/src/main/resources/elasticsearch.yml`
+dir in your project. As long as `elasticsearch.yml` is present in the
+classloader, it will be used when you start your node.
+
+[source,java]
+--------------------------------------------------
+cluster.name=yourclustername
+--------------------------------------------------
+
+Or in Java:
+
+[source,java]
+--------------------------------------------------
+Node node = nodeBuilder().clusterName("yourclustername").node();
+Client client = node.client();
+--------------------------------------------------
+
+The benefit of using the `Client` is the fact that operations are
+automatically routed to the node(s) the operations need to be executed
+on, without performing a "double hop". For example, the index operation
+will automatically be executed on the shard that it will end up existing
+at.
+
+When you start a `Node`, the most important decision is whether it
+should hold data or not. In other words, should indices and shards be
+allocated to it. Many times we would like to have the clients just be
+clients, without shards being allocated to them. This is simple to
+configure by setting either `node.data` setting to `false` or
+`node.client` to `true` (the `NodeBuilder` respective helper methods on
+it):
+
+[source,java]
+--------------------------------------------------
+import static org.elasticsearch.node.NodeBuilder.*;
+
+// on startup
+
+Node node = nodeBuilder().client(true).node();
+Client client = node.client();
+
+// on shutdown
+
+node.close();
+--------------------------------------------------
+
+Another common usage is to start the `Node` and use the `Client` in
+unit/integration tests. In such a case, we would like to start a "local"
+`Node` (with a "local" discovery and transport). Again, this is just a
+matter of a simple setting when starting the `Node`. Note, "local" here
+means local on the JVM (well, actually class loader) level, meaning that
+two *local* servers started within the same JVM will discover themselves
+and form a cluster.
+
+[source,java]
+--------------------------------------------------
+import static org.elasticsearch.node.NodeBuilder.*;
+
+// on startup
+
+Node node = nodeBuilder().local(true).node();
+Client client = node.client();
+
+// on shutdown
+
+node.close();
+--------------------------------------------------
+
+
+[[transport-client]]
+=== Transport Client
+
+The `TransportClient` connects remotely to an elasticsearch cluster
+using the transport module. It does not join the cluster, but simply
+gets one or more initial transport addresses and communicates with them
+in round robin fashion on each action (though most actions will probably
+be "two hop" operations).
+
+[source,java]
+--------------------------------------------------
+// on startup
+
+Client client = new TransportClient()
+ .addTransportAddress(new InetSocketTransportAddress("host1", 9300))
+ .addTransportAddress(new InetSocketTransportAddress("host2", 9300));
+
+// on shutdown
+
+client.close();
+--------------------------------------------------
+
+Note that you have to set the cluster name if you use one different to
+"elasticsearch":
+
+[source,java]
+--------------------------------------------------
+Settings settings = ImmutableSettings.settingsBuilder()
+ .put("cluster.name", "myClusterName").build();
+Client client = new TransportClient(settings);
+//Add transport addresses and do something with the client...
+--------------------------------------------------
+
+Or using `elasticsearch.yml` file as shown in the link:#nodeclient[Node
+Client section]
+
+The client allows to sniff the rest of the cluster, and add those into
+its list of machines to use. In this case, note that the ip addresses
+used will be the ones that the other nodes were started with (the
+"publish" address). In order to enable it, set the
+`client.transport.sniff` to `true`:
+
+[source,java]
+--------------------------------------------------
+Settings settings = ImmutableSettings.settingsBuilder()
+ .put("client.transport.sniff", true).build();
+TransportClient client = new TransportClient(settings);
+--------------------------------------------------
+
+Other transport client level settings include:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Parameter |Description
+|`client.transport.ignore_cluster_name` |Set to `true` to ignore cluster
+name validation of connected nodes. (since 0.19.4)
+
+|`client.transport.ping_timeout` |The time to wait for a ping response
+from a node. Defaults to `5s`.
+
+|`client.transport.nodes_sampler_interval` |How often to sample / ping
+the nodes listed and connected. Defaults to `5s`.
+|=======================================================================
+
diff --git a/docs/java-api/count.asciidoc b/docs/java-api/count.asciidoc
new file mode 100644
index 0000000..a18ad75
--- /dev/null
+++ b/docs/java-api/count.asciidoc
@@ -0,0 +1,38 @@
+[[count]]
+== Count API
+
+The count API allows to easily execute a query and get the number of
+matches for that query. It can be executed across one or more indices
+and across one or more types. The query can be provided using the
+{ref}/query-dsl.html[Query DSL].
+
+[source,java]
+--------------------------------------------------
+import static org.elasticsearch.index.query.xcontent.FilterBuilders.*;
+import static org.elasticsearch.index.query.xcontent.QueryBuilders.*;
+
+CountResponse response = client.prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .execute()
+ .actionGet();
+--------------------------------------------------
+
+For more information on the count operation, check out the REST
+{ref}/search-count.html[count] docs.
+
+
+=== Operation Threading
+
+The count API allows to set the threading model the operation will be
+performed when the actual execution of the API is performed on the same
+node (the API is executed on a shard that is allocated on the same
+server).
+
+There are three threading modes.The `NO_THREADS` mode means that the
+count operation will be executed on the calling thread. The
+`SINGLE_THREAD` mode means that the count operation will be executed on
+a single different thread for all local shards. The `THREAD_PER_SHARD`
+mode means that the count operation will be executed on a different
+thread for each local shard.
+
+The default mode is `SINGLE_THREAD`.
diff --git a/docs/java-api/delete-by-query.asciidoc b/docs/java-api/delete-by-query.asciidoc
new file mode 100644
index 0000000..3eab109
--- /dev/null
+++ b/docs/java-api/delete-by-query.asciidoc
@@ -0,0 +1,21 @@
+[[delete-by-query]]
+== Delete By Query API
+
+The delete by query API allows to delete documents from one or more
+indices and one or more types based on a <<query-dsl-queries,query>>. Here
+is an example:
+
+[source,java]
+--------------------------------------------------
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+
+DeleteByQueryResponse response = client.prepareDeleteByQuery("test")
+ .setQuery(termQuery("_type", "type1"))
+ .execute()
+ .actionGet();
+--------------------------------------------------
+
+For more information on the delete by query operation, check out the
+{ref}/docs-delete-by-query.html[delete_by_query API]
+docs.
diff --git a/docs/java-api/delete.asciidoc b/docs/java-api/delete.asciidoc
new file mode 100644
index 0000000..409b5be
--- /dev/null
+++ b/docs/java-api/delete.asciidoc
@@ -0,0 +1,40 @@
+[[delete]]
+== Delete API
+
+The delete API allows to delete a typed JSON document from a specific
+index based on its id. The following example deletes the JSON document
+from an index called twitter, under a type called tweet, with id valued
+1:
+
+[source,java]
+--------------------------------------------------
+DeleteResponse response = client.prepareDelete("twitter", "tweet", "1")
+ .execute()
+ .actionGet();
+--------------------------------------------------
+
+For more information on the delete operation, check out the
+{ref}/docs-delete.html[delete API] docs.
+
+
+[[operation-threading]]
+=== Operation Threading
+
+The delete API allows to set the threading model the operation will be
+performed when the actual execution of the API is performed on the same
+node (the API is executed on a shard that is allocated on the same
+server).
+
+The options are to execute the operation on a different thread, or to
+execute it on the calling thread (note that the API is still async). By
+default, `operationThreaded` is set to `true` which means the operation
+is executed on a different thread. Here is an example that sets it to
+`false`:
+
+[source,java]
+--------------------------------------------------
+DeleteResponse response = client.prepareDelete("twitter", "tweet", "1")
+ .setOperationThreaded(false)
+ .execute()
+ .actionGet();
+--------------------------------------------------
diff --git a/docs/java-api/facets.asciidoc b/docs/java-api/facets.asciidoc
new file mode 100644
index 0000000..34353c6
--- /dev/null
+++ b/docs/java-api/facets.asciidoc
@@ -0,0 +1,494 @@
+[[java-facets]]
+== Facets
+
+Elasticsearch provides a full Java API to play with facets. See the
+{ref}/search-facets.html[Facets guide].
+
+Use the factory for facet builders (`FacetBuilders`) and add each facet
+you want to compute when querying and add it to your search request:
+
+[source,java]
+--------------------------------------------------
+SearchResponse sr = node.client().prepareSearch()
+ .setQuery( /* your query */ )
+ .addFacet( /* add a facet */ )
+ .execute().actionGet();
+--------------------------------------------------
+
+Note that you can add more than one facet. See
+{ref}/search-search.html[Search Java API] for details.
+
+To build facet requests, use `FacetBuilders` helpers. Just import them
+in your class:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.FacetBuilders.*;
+--------------------------------------------------
+
+
+=== Facets
+
+
+[[java-facet-terms]]
+==== Terms Facet
+
+Here is how you can use
+{ref}/search-facets-terms-facet.html[Terms Facet]
+with Java API.
+
+
+===== Prepare facet request
+
+Here is an example on how to create the facet request:
+
+[source,java]
+--------------------------------------------------
+FacetBuilders.termsFacet("f")
+ .field("brand")
+ .size(10);
+--------------------------------------------------
+
+
+===== Use facet response
+
+Import Facet definition classes:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.terms.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// sr is here your SearchResponse object
+TermsFacet f = (TermsFacet) sr.getFacets().facetsAsMap().get("f");
+
+f.getTotalCount(); // Total terms doc count
+f.getOtherCount(); // Not shown terms doc count
+f.getMissingCount(); // Without term doc count
+
+// For each entry
+for (TermsFacet.Entry entry : f) {
+ entry.getTerm(); // Term
+ entry.getCount(); // Doc count
+}
+--------------------------------------------------
+
+
+[[java-facet-range]]
+==== Range Facet
+
+Here is how you can use
+{ref}/search-facets-range-facet.html[Range Facet]
+with Java API.
+
+
+===== Prepare facet request
+
+Here is an example on how to create the facet request:
+
+[source,java]
+--------------------------------------------------
+FacetBuilders.rangeFacet("f")
+ .field("price") // Field to compute on
+ .addUnboundedFrom(3) // from -infinity to 3 (excluded)
+ .addRange(3, 6) // from 3 to 6 (excluded)
+ .addUnboundedTo(6); // from 6 to +infinity
+--------------------------------------------------
+
+
+===== Use facet response
+
+Import Facet definition classes:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.range.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// sr is here your SearchResponse object
+RangeFacet f = (RangeFacet) sr.getFacets().facetsAsMap().get("f");
+
+// For each entry
+for (RangeFacet.Entry entry : f) {
+ entry.getFrom(); // Range from requested
+ entry.getTo(); // Range to requested
+ entry.getCount(); // Doc count
+ entry.getMin(); // Min value
+ entry.getMax(); // Max value
+ entry.getMean(); // Mean
+ entry.getTotal(); // Sum of values
+}
+--------------------------------------------------
+
+
+[[histogram]]
+==== Histogram Facet
+
+Here is how you can use
+{ref}/search-facets-histogram-facet.html[Histogram
+Facet] with Java API.
+
+
+===== Prepare facet request
+
+Here is an example on how to create the facet request:
+
+[source,java]
+--------------------------------------------------
+HistogramFacetBuilder facet = FacetBuilders.histogramFacet("f")
+ .field("price")
+ .interval(1);
+--------------------------------------------------
+
+
+===== Use facet response
+
+Import Facet definition classes:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.histogram.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// sr is here your SearchResponse object
+HistogramFacet f = (HistogramFacet) sr.getFacets().facetsAsMap().get("f");
+
+// For each entry
+for (HistogramFacet.Entry entry : f) {
+ entry.getKey(); // Key (X-Axis)
+ entry.getCount(); // Doc count (Y-Axis)
+}
+--------------------------------------------------
+
+
+[[date-histogram]]
+==== Date Histogram Facet
+
+Here is how you can use
+{ref}/search-facets-date-histogram-facet.html[Date
+Histogram Facet] with Java API.
+
+
+===== Prepare facet request
+
+Here is an example on how to create the facet request:
+
+[source,java]
+--------------------------------------------------
+FacetBuilders.dateHistogramFacet("f")
+ .field("date") // Your date field
+ .interval("year"); // You can also use "quarter", "month", "week", "day",
+ // "hour" and "minute" or notation like "1.5h" or "2w"
+--------------------------------------------------
+
+
+===== Use facet response
+
+Import Facet definition classes:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.datehistogram.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// sr is here your SearchResponse object
+DateHistogramFacet f = (DateHistogramFacet) sr.getFacets().facetsAsMap().get("f");
+
+// For each entry
+for (DateHistogramFacet.Entry entry : f) {
+ entry.getTime(); // Date in ms since epoch (X-Axis)
+ entry.getCount(); // Doc count (Y-Axis)
+}
+--------------------------------------------------
+
+
+[[filter]]
+==== Filter Facet (not facet filter)
+
+Here is how you can use
+{ref}/search-facets-filter-facet.html[Filter Facet]
+with Java API.
+
+If you are looking on how to apply a filter to a facet, have a look at
+link:#facet-filter[facet filter] using Java API.
+
+
+===== Prepare facet request
+
+Here is an example on how to create the facet request:
+
+[source,java]
+--------------------------------------------------
+FacetBuilders.filterFacet("f",
+ FilterBuilders.termFilter("brand", "heineken")); // Your Filter here
+--------------------------------------------------
+
+See <<query-dsl-filters,Filters>> to
+learn how to build filters using Java.
+
+
+===== Use facet response
+
+Import Facet definition classes:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.filter.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// sr is here your SearchResponse object
+FilterFacet f = (FilterFacet) sr.getFacets().facetsAsMap().get("f");
+
+f.getCount(); // Number of docs that matched
+--------------------------------------------------
+
+
+[[query]]
+==== Query Facet
+
+Here is how you can use
+{ref}/search-facets-query-facet.html[Query Facet]
+with Java API.
+
+
+===== Prepare facet request
+
+Here is an example on how to create the facet request:
+
+[source,java]
+--------------------------------------------------
+FacetBuilders.queryFacet("f",
+ QueryBuilders.matchQuery("brand", "heineken"));
+--------------------------------------------------
+
+
+===== Use facet response
+
+Import Facet definition classes:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.query.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// sr is here your SearchResponse object
+QueryFacet f = (QueryFacet) sr.getFacets().facetsAsMap().get("f");
+
+f.getCount(); // Number of docs that matched
+--------------------------------------------------
+
+See <<query-dsl-queries,Queries>> to
+learn how to build queries using Java.
+
+
+[[statistical]]
+==== Statistical
+
+Here is how you can use
+{ref}/search-facets-statistical-facet.html[Statistical
+Facet] with Java API.
+
+
+===== Prepare facet request
+
+Here is an example on how to create the facet request:
+
+[source,java]
+--------------------------------------------------
+FacetBuilders.statisticalFacet("f")
+ .field("price");
+--------------------------------------------------
+
+
+===== Use facet response
+
+Import Facet definition classes:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.statistical.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// sr is here your SearchResponse object
+StatisticalFacet f = (StatisticalFacet) sr.getFacets().facetsAsMap().get("f");
+
+f.getCount(); // Doc count
+f.getMin(); // Min value
+f.getMax(); // Max value
+f.getMean(); // Mean
+f.getTotal(); // Sum of values
+f.getStdDeviation(); // Standard Deviation
+f.getSumOfSquares(); // Sum of Squares
+f.getVariance(); // Variance
+--------------------------------------------------
+
+
+[[terms-stats]]
+==== Terms Stats Facet
+
+Here is how you can use
+{ref}/search-facets-terms-stats-facet.html[Terms
+Stats Facet] with Java API.
+
+
+===== Prepare facet request
+
+Here is an example on how to create the facet request:
+
+[source,java]
+--------------------------------------------------
+FacetBuilders.termsStatsFacet("f")
+ .keyField("brand")
+ .valueField("price");
+--------------------------------------------------
+
+
+===== Use facet response
+
+Import Facet definition classes:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.termsstats.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// sr is here your SearchResponse object
+TermsStatsFacet f = (TermsStatsFacet) sr.getFacets().facetsAsMap().get("f");
+f.getTotalCount(); // Total terms doc count
+f.getOtherCount(); // Not shown terms doc count
+f.getMissingCount(); // Without term doc count
+
+// For each entry
+for (TermsStatsFacet.Entry entry : f) {
+ entry.getTerm(); // Term
+ entry.getCount(); // Doc count
+ entry.getMin(); // Min value
+ entry.getMax(); // Max value
+ entry.getMean(); // Mean
+ entry.getTotal(); // Sum of values
+}
+--------------------------------------------------
+
+
+[[geo-distance]]
+==== Geo Distance Facet
+
+Here is how you can use
+{ref}/search-facets-geo-distance-facet.html[Geo
+Distance Facet] with Java API.
+
+
+===== Prepare facet request
+
+Here is an example on how to create the facet request:
+
+[source,java]
+--------------------------------------------------
+FacetBuilders.geoDistanceFacet("f")
+ .field("pin.location") // Field containing coordinates we want to compare with
+ .point(40, -70) // Point from where we start (0)
+ .addUnboundedFrom(10) // 0 to 10 km (excluded)
+ .addRange(10, 20) // 10 to 20 km (excluded)
+ .addRange(20, 100) // 20 to 100 km (excluded)
+ .addUnboundedTo(100) // from 100 km to infinity (and beyond ;-) )
+ .unit(DistanceUnit.KILOMETERS); // All distances are in kilometers. Can be MILES
+--------------------------------------------------
+
+
+===== Use facet response
+
+Import Facet definition classes:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.search.facet.geodistance.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// sr is here your SearchResponse object
+GeoDistanceFacet f = (GeoDistanceFacet) sr.getFacets().facetsAsMap().get("f");
+
+// For each entry
+for (GeoDistanceFacet.Entry entry : f) {
+ entry.getFrom(); // Distance from requested
+ entry.getTo(); // Distance to requested
+ entry.getCount(); // Doc count
+ entry.getMin(); // Min value
+ entry.getMax(); // Max value
+ entry.getTotal(); // Sum of values
+ entry.getMean(); // Mean
+}
+--------------------------------------------------
+
+
+[[facet-filter]]
+=== Facet filters (not Filter Facet)
+
+By default, facets are applied on the query resultset whatever filters
+exists or are.
+
+If you need to compute facets with the same filters or even with other
+filters, you can add the filter to any facet using
+`AbstractFacetBuilder#facetFilter(FilterBuilder)` method:
+
+[source,java]
+--------------------------------------------------
+FacetBuilders
+ .termsFacet("f").field("brand") // Your facet
+ .facetFilter( // Your filter here
+ FilterBuilders.termFilter("colour", "pale")
+ );
+--------------------------------------------------
+
+For example, you can reuse the same filter you created for your query:
+
+[source,java]
+--------------------------------------------------
+// A common filter
+FilterBuilder filter = FilterBuilders.termFilter("colour", "pale");
+
+TermsFacetBuilder facet = FacetBuilders.termsFacet("f")
+ .field("brand")
+ .facetFilter(filter); // We apply it to the facet
+
+SearchResponse sr = node.client().prepareSearch()
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFilter(filter) // We apply it to the query
+ .addFacet(facet)
+ .execute().actionGet();
+--------------------------------------------------
+
+See documentation on how to build
+<<query-dsl-filters,Filters>>.
+
+
+[[scope]]
+=== Scope
+
+By default, facets are computed within the query resultset. But, you can
+compute facets from all documents in the index whatever the query is,
+using `global` parameter:
+
+[source,java]
+--------------------------------------------------
+TermsFacetBuilder facet = FacetBuilders.termsFacet("f")
+ .field("brand")
+ .global(true);
+--------------------------------------------------
diff --git a/docs/java-api/get.asciidoc b/docs/java-api/get.asciidoc
new file mode 100644
index 0000000..c87dbef
--- /dev/null
+++ b/docs/java-api/get.asciidoc
@@ -0,0 +1,38 @@
+[[get]]
+== Get API
+
+The get API allows to get a typed JSON document from the index based on
+its id. The following example gets a JSON document from an index called
+twitter, under a type called tweet, with id valued 1:
+
+[source,java]
+--------------------------------------------------
+GetResponse response = client.prepareGet("twitter", "tweet", "1")
+ .execute()
+ .actionGet();
+--------------------------------------------------
+
+For more information on the get operation, check out the REST
+{ref}/docs-get.html[get] docs.
+
+
+=== Operation Threading
+
+The get API allows to set the threading model the operation will be
+performed when the actual execution of the API is performed on the same
+node (the API is executed on a shard that is allocated on the same
+server).
+
+The options are to execute the operation on a different thread, or to
+execute it on the calling thread (note that the API is still async). By
+default, `operationThreaded` is set to `true` which means the operation
+is executed on a different thread. Here is an example that sets it to
+`false`:
+
+[source,java]
+--------------------------------------------------
+GetResponse response = client.prepareGet("twitter", "tweet", "1")
+ .setOperationThreaded(false)
+ .execute()
+ .actionGet();
+--------------------------------------------------
diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc
new file mode 100644
index 0000000..a9e3015
--- /dev/null
+++ b/docs/java-api/index.asciidoc
@@ -0,0 +1,61 @@
+[[java-api]]
+= Java API
+:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current
+
+[preface]
+== Preface
+This section describes the Java API that elasticsearch provides. All
+elasticsearch operations are executed using a
+<<client,Client>> object. All
+operations are completely asynchronous in nature (either accepts a
+listener, or return a future).
+
+Additionally, operations on a client may be accumulated and executed in
+<<bulk,Bulk>>.
+
+Note, all the APIs are exposed through the
+Java API (actually, the Java API is used internally to execute them).
+
+
+== Maven Repository
+
+Elasticsearch is hosted on
+http://search.maven.org/#search%7Cga%7C1%7Ca%3A%22elasticsearch%22[Maven
+Central].
+
+For example, you can define the latest version in your `pom.xml` file:
+
+[source,xml]
+--------------------------------------------------
+<dependency>
+ <groupId>org.elasticsearch</groupId>
+ <artifactId>elasticsearch</artifactId>
+ <version>${es.version}</version>
+</dependency>
+--------------------------------------------------
+
+
+include::client.asciidoc[]
+
+include::index_.asciidoc[]
+
+include::get.asciidoc[]
+
+include::delete.asciidoc[]
+
+include::bulk.asciidoc[]
+
+include::search.asciidoc[]
+
+include::count.asciidoc[]
+
+include::delete-by-query.asciidoc[]
+
+include::facets.asciidoc[]
+
+include::percolate.asciidoc[]
+
+include::query-dsl-queries.asciidoc[]
+
+include::query-dsl-filters.asciidoc[]
+
diff --git a/docs/java-api/index_.asciidoc b/docs/java-api/index_.asciidoc
new file mode 100644
index 0000000..5ad9ef5
--- /dev/null
+++ b/docs/java-api/index_.asciidoc
@@ -0,0 +1,205 @@
+[[index_]]
+== Index API
+
+The index API allows one to index a typed JSON document into a specific
+index and make it searchable.
+
+
+[[generate]]
+=== Generate JSON document
+
+There are different way of generating JSON document:
+
+* Manually (aka do it yourself) using native `byte[]` or as a `String`
+
+* Using `Map` that will be automatically converted to its JSON
+equivalent
+
+* Using a third party library to serialize your beans such as
+http://wiki.fasterxml.com/JacksonHome[Jackson]
+
+* Using built-in helpers XContentFactory.jsonBuilder()
+
+Internally, each type is converted to `byte[]` (so a String is converted
+to a `byte[]`). Therefore, if the object is in this form already, then
+use it. The `jsonBuilder` is highly optimized JSON generator that
+directly constructs a `byte[]`.
+
+
+==== Do It Yourself
+
+Nothing really difficult here but note that you will have to encode
+dates regarding to the
+{ref}/mapping-date-format.html[Date Format].
+
+[source,java]
+--------------------------------------------------
+String json = "{" +
+ "\"user\":\"kimchy\"," +
+ "\"postDate\":\"2013-01-30\"," +
+ "\"message\":\"trying out Elasticsearch\"" +
+ "}";
+--------------------------------------------------
+
+
+[[using-map]]
+==== Using Map
+
+Map is a key:values pair collection. It represents very well a JSON
+structure:
+
+[source,java]
+--------------------------------------------------
+Map<String, Object> json = new HashMap<String, Object>();
+json.put("user","kimchy");
+json.put("postDate",new Date());
+json.put("message","trying out Elasticsearch");
+--------------------------------------------------
+
+
+[[beans]]
+==== Serialize your beans
+
+Elasticsearch already use Jackson but shade it under
+`org.elasticsearch.common.jackson` package. +
+ So, you can add your own Jackson version in your `pom.xml` file or in
+your classpath. See http://wiki.fasterxml.com/JacksonDownload[Jackson
+Download Page].
+
+For example:
+
+[source,xml]
+--------------------------------------------------
+<dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
+ <version>2.1.3</version>
+</dependency>
+--------------------------------------------------
+
+Then, you can start serializing your beans to JSON:
+
+[source,java]
+--------------------------------------------------
+import com.fasterxml.jackson.databind.*;
+
+// instance a json mapper
+ObjectMapper mapper = new ObjectMapper(); // create once, reuse
+
+// generate json
+String json = mapper.writeValueAsString(yourbeaninstance);
+--------------------------------------------------
+
+
+[[helpers]]
+==== Use Elasticsearch helpers
+
+Elasticsearch provides built-in helpers to generate JSON content.
+
+[source,java]
+--------------------------------------------------
+import static org.elasticsearch.common.xcontent.XContentFactory.*;
+
+XContentBuilder builder = jsonBuilder()
+ .startObject()
+ .field("user", "kimchy")
+ .field("postDate", new Date())
+ .field("message", "trying out Elasticsearch")
+ .endObject()
+--------------------------------------------------
+
+Note that you can also add arrays with `startArray(String)` and
+`endArray()` methods. By the way, `field` method +
+ accept many object types. You can pass directly numbers, dates and even
+other XContentBuilder objects.
+
+If you need to see the generated JSON content, you can use the
+`string()` method.
+
+[source,java]
+--------------------------------------------------
+String json = builder.string();
+--------------------------------------------------
+
+
+[[index-doc]]
+=== Index document
+
+The following example indexes a JSON document into an index called
+twitter, under a type called tweet, with id valued 1:
+
+[source,java]
+--------------------------------------------------
+import static org.elasticsearch.common.xcontent.XContentFactory.*;
+
+IndexResponse response = client.prepareIndex("twitter", "tweet", "1")
+ .setSource(jsonBuilder()
+ .startObject()
+ .field("user", "kimchy")
+ .field("postDate", new Date())
+ .field("message", "trying out Elasticsearch")
+ .endObject()
+ )
+ .execute()
+ .actionGet();
+--------------------------------------------------
+
+Note that you can also index your documents as JSON String and that you
+don't have to give an ID:
+
+[source,java]
+--------------------------------------------------
+String json = "{" +
+ "\"user\":\"kimchy\"," +
+ "\"postDate\":\"2013-01-30\"," +
+ "\"message\":\"trying out Elasticsearch\"" +
+ "}";
+
+IndexResponse response = client.prepareIndex("twitter", "tweet")
+ .setSource(json)
+ .execute()
+ .actionGet();
+--------------------------------------------------
+
+`IndexResponse` object will give you report:
+
+[source,java]
+--------------------------------------------------
+// Index name
+String _index = response.index();
+// Type name
+String _type = response.type();
+// Document ID (generated or not)
+String _id = response.id();
+// Version (if it's the first time you index this document, you will get: 1)
+long _version = response.version();
+--------------------------------------------------
+
+If you use percolation while indexing, `IndexResponse` object will give
+you percolator that have matched:
+
+[source,java]
+--------------------------------------------------
+IndexResponse response = client.prepareIndex("twitter", "tweet", "1")
+ .setSource(json)
+ .execute()
+ .actionGet();
+
+List<String> matches = response.matches();
+--------------------------------------------------
+
+For more information on the index operation, check out the REST
+{ref}/docs-index_.html[index] docs.
+
+
+=== Operation Threading
+
+The index API allows to set the threading model the operation will be
+performed when the actual execution of the API is performed on the same
+node (the API is executed on a shard that is allocated on the same
+server).
+
+The options are to execute the operation on a different thread, or to
+execute it on the calling thread (note that the API is still async). By
+default, `operationThreaded` is set to `true` which means the operation
+is executed on a different thread.
diff --git a/docs/java-api/percolate.asciidoc b/docs/java-api/percolate.asciidoc
new file mode 100644
index 0000000..f0187fd
--- /dev/null
+++ b/docs/java-api/percolate.asciidoc
@@ -0,0 +1,50 @@
+[[percolate]]
+== Percolate API
+
+The percolator allows to register queries against an index, and then
+send `percolate` requests which include a doc, and getting back the
+queries that match on that doc out of the set of registered queries.
+
+Read the main {ref}/search-percolate.html[percolate]
+documentation before reading this guide.
+
+[source,java]
+--------------------------------------------------
+//This is the query we're registering in the percolator
+QueryBuilder qb = termQuery("content", "amazing");
+
+//Index the query = register it in the percolator
+client.prepareIndex("myIndexName", ".percolator", "myDesignatedQueryName")
+ .setSource(jsonBuilder()
+ .startObject()
+ .field("query", qb) // Register the query
+ .endObject())
+ .setRefresh(true) // Needed when the query shall be available immediately
+ .execute().actionGet();
+--------------------------------------------------
+
+This indexes the above term query under the name
+*myDesignatedQueryName*.
+
+In order to check a document against the registered queries, use this
+code:
+
+[source,java]
+--------------------------------------------------
+//Build a document to check against the percolator
+XContentBuilder docBuilder = XContentFactory.jsonBuilder().startObject();
+docBuilder.field("doc").startObject(); //This is needed to designate the document
+docBuilder.field("content", "This is amazing!");
+docBuilder.endObject(); //End of the doc field
+docBuilder.endObject(); //End of the JSON root object
+//Percolate
+PercolateResponse response = client.preparePercolate()
+ .setIndices("myIndexName")
+ .setDocumentType("myDocumentType")
+ .setSource(docBuilder).execute().actionGet();
+//Iterate over the results
+for(PercolateResponse.Match match : response) {
+ //Handle the result which is the name of
+ //the query in the percolator
+}
+--------------------------------------------------
diff --git a/docs/java-api/query-dsl-filters.asciidoc b/docs/java-api/query-dsl-filters.asciidoc
new file mode 100644
index 0000000..2164ee8
--- /dev/null
+++ b/docs/java-api/query-dsl-filters.asciidoc
@@ -0,0 +1,462 @@
+[[query-dsl-filters]]
+== Query DSL - Filters
+
+elasticsearch provides a full Java query dsl in a similar manner to the
+REST {ref}/query-dsl.html[Query DSL]. The factory for filter
+builders is `FilterBuilders`.
+
+Once your query is ready, you can use the <<search,Search API>>.
+
+See also how to build <<query-dsl-queries,Queries>>.
+
+To use `FilterBuilders` just import them in your class:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.index.query.FilterBuilders.*;
+--------------------------------------------------
+
+Note that you can easily print (aka debug) JSON generated queries using
+`toString()` method on `FilterBuilder` object.
+
+
+[[and-filter]]
+=== And Filter
+
+See {ref}/query-dsl-and-filter.html[And Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.andFilter(
+ FilterBuilders.rangeFilter("postDate").from("2010-03-01").to("2010-04-01"),
+ FilterBuilders.prefixFilter("name.second", "ba")
+ );
+--------------------------------------------------
+
+Note that you can cache the result using
+`AndFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+
+[[bool-filter]]
+=== Bool Filter
+
+See {ref}/query-dsl-bool-filter.html[Bool Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.boolFilter()
+ .must(FilterBuilders.termFilter("tag", "wow"))
+ .mustNot(FilterBuilders.rangeFilter("age").from("10").to("20"))
+ .should(FilterBuilders.termFilter("tag", "sometag"))
+ .should(FilterBuilders.termFilter("tag", "sometagtag"));
+--------------------------------------------------
+
+Note that you can cache the result using
+`BoolFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+
+[[exists-filter]]
+=== Exists Filter
+
+See {ref}/query-dsl-exists-filter.html[Exists Filter].
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.existsFilter("user");
+--------------------------------------------------
+
+
+[[ids-filter]]
+=== Ids Filter
+
+See {ref}/query-dsl-ids-filter.html[IDs Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.idsFilter("my_type", "type2").addIds("1", "4", "100");
+
+// Type is optional
+FilterBuilders.idsFilter().addIds("1", "4", "100");
+--------------------------------------------------
+
+
+[[limit-filter]]
+=== Limit Filter
+
+See {ref}/query-dsl-limit-filter.html[Limit Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.limitFilter(100);
+--------------------------------------------------
+
+
+[[type-filter]]
+=== Type Filter
+
+See {ref}/query-dsl-type-filter.html[Type Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.typeFilter("my_type");
+--------------------------------------------------
+
+
+[[geo-bbox-filter]]
+=== Geo Bounding Box Filter
+
+See {ref}/query-dsl-geo-bounding-box-filter.html[Geo
+Bounding Box Filter]
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.geoBoundingBoxFilter("pin.location")
+ .topLeft(40.73, -74.1)
+ .bottomRight(40.717, -73.99);
+--------------------------------------------------
+
+Note that you can cache the result using
+`GeoBoundingBoxFilterBuilder#cache(boolean)` method. See
+<<query-dsl-filters-caching>>.
+
+
+[[geo-distance-filter]]
+=== GeoDistance Filter
+
+See {ref}/query-dsl-geo-distance-filter.html[Geo
+Distance Filter]
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.geoDistanceFilter("pin.location")
+ .point(40, -70)
+ .distance(200, DistanceUnit.KILOMETERS)
+ .optimizeBbox("memory") // Can be also "indexed" or "none"
+ .geoDistance(GeoDistance.ARC); // Or GeoDistance.PLANE
+--------------------------------------------------
+
+Note that you can cache the result using
+`GeoDistanceFilterBuilder#cache(boolean)` method. See
+<<query-dsl-filters-caching>>.
+
+
+[[geo-distance-range-filter]]
+=== Geo Distance Range Filter
+
+See {ref}/query-dsl-geo-distance-range-filter.html[Geo
+Distance Range Filter]
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.geoDistanceRangeFilter("pin.location")
+ .point(40, -70)
+ .from("200km")
+ .to("400km")
+ .includeLower(true)
+ .includeUpper(false)
+ .optimizeBbox("memory") // Can be also "indexed" or "none"
+ .geoDistance(GeoDistance.ARC); // Or GeoDistance.PLANE
+--------------------------------------------------
+
+Note that you can cache the result using
+`GeoDistanceRangeFilterBuilder#cache(boolean)` method. See
+<<query-dsl-filters-caching>>.
+
+
+[[geo-poly-filter]]
+=== Geo Polygon Filter
+
+See {ref}/query-dsl-geo-polygon-filter.html[Geo Polygon
+Filter]
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.geoPolygonFilter("pin.location")
+ .addPoint(40, -70)
+ .addPoint(30, -80)
+ .addPoint(20, -90);
+--------------------------------------------------
+
+Note that you can cache the result using
+`GeoPolygonFilterBuilder#cache(boolean)` method. See
+<<query-dsl-filters-caching>>.
+
+
+[[geo-shape-filter]]
+=== Geo Shape Filter
+
+See {ref}/query-dsl-geo-shape-filter.html[Geo Shape
+Filter]
+
+Note: the `geo_shape` type uses `Spatial4J` and `JTS`, both of which are
+optional dependencies. Consequently you must add `Spatial4J` and `JTS`
+to your classpath in order to use this type:
+
+[source,xml]
+-----------------------------------------------
+<dependency>
+ <groupId>com.spatial4j</groupId>
+ <artifactId>spatial4j</artifactId>
+ <version>0.3</version>
+</dependency>
+
+<dependency>
+ <groupId>com.vividsolutions</groupId>
+ <artifactId>jts</artifactId>
+ <version>1.12</version>
+ <exclusions>
+ <exclusion>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ </exclusion>
+ </exclusions>
+</dependency>
+-----------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// Import Spatial4J shapes
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.impl.RectangleImpl;
+
+// Also import ShapeRelation
+import org.elasticsearch.common.geo.ShapeRelation;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// Shape within another
+filter = FilterBuilders.geoShapeFilter("location",
+ new RectangleImpl(0,10,0,10,SpatialContext.GEO))
+ .relation(ShapeRelation.WITHIN);
+
+// Intersect shapes
+filter = FilterBuilders.geoShapeFilter("location",
+ new PointImpl(0, 0, SpatialContext.GEO))
+ .relation(ShapeRelation.INTERSECTS);
+
+// Using pre-indexed shapes
+filter = FilterBuilders.geoShapeFilter("location", "New Zealand", "countries")
+ .relation(ShapeRelation.DISJOINT);
+--------------------------------------------------
+
+
+[[has-child-parent-filter]]
+=== Has Child / Has Parent Filters
+
+See:
+ * {ref}/query-dsl-has-child-filter.html[Has Child Filter]
+ * {ref}/query-dsl-has-parent-filter.html[Has Parent Filter]
+
+[source,java]
+--------------------------------------------------
+// Has Child
+QFilterBuilders.hasChildFilter("blog_tag",
+ QueryBuilders.termQuery("tag", "something"));
+
+// Has Parent
+QFilterBuilders.hasParentFilter("blog",
+ QueryBuilders.termQuery("tag", "something"));
+--------------------------------------------------
+
+
+[[match-all-filter]]
+=== Match All Filter
+
+See {ref}/query-dsl-match-all-filter.html[Match All Filter]
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.matchAllFilter();
+--------------------------------------------------
+
+
+[[missing-filter]]
+=== Missing Filter
+
+See {ref}/query-dsl-missing-filter.html[Missing Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.missingFilter("user")
+ .existence(true)
+ .nullValue(true);
+--------------------------------------------------
+
+
+[[not-filter]]
+=== Not Filter
+
+See {ref}/query-dsl-not-filter.html[Not Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.notFilter(
+ FilterBuilders.rangeFilter("price").from("1").to("2"));
+--------------------------------------------------
+
+
+[[or-filter]]
+=== Or Filter
+
+See {ref}/query-dsl-or-filter.html[Or Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.orFilter(
+ FilterBuilders.termFilter("name.second", "banon"),
+ FilterBuilders.termFilter("name.nick", "kimchy")
+ );
+--------------------------------------------------
+
+Note that you can cache the result using
+`OrFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+
+[[prefix-filter]]
+=== Prefix Filter
+
+See {ref}/query-dsl-prefix-filter.html[Prefix Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.prefixFilter("user", "ki");
+--------------------------------------------------
+
+Note that you can cache the result using
+`PrefixFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+
+[[query-filter]]
+=== Query Filter
+
+See {ref}/query-dsl-query-filter.html[Query Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.queryFilter(
+ QueryBuilders.queryString("this AND that OR thus")
+ );
+--------------------------------------------------
+
+Note that you can cache the result using
+`QueryFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+
+[[range-filter]]
+=== Range Filter
+
+See {ref}/query-dsl-range-filter.html[Range Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.rangeFilter("age")
+ .from("10")
+ .to("20")
+ .includeLower(true)
+ .includeUpper(false);
+
+// A simplified form using gte, gt, lt or lte
+FilterBuilders.rangeFilter("age")
+ .gte("10")
+ .lt("20");
+--------------------------------------------------
+
+Note that you can ask not to cache the result using
+`RangeFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+
+[[script-filter]]
+=== Script Filter
+
+See {ref}/query-dsl-script-filter.html[Script Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilder filter = FilterBuilders.scriptFilter(
+ "doc['age'].value > param1"
+ ).addParam("param1", 10);
+--------------------------------------------------
+
+Note that you can cache the result using
+`ScriptFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+
+[[term-filter]]
+=== Term Filter
+
+See {ref}/query-dsl-term-filter.html[Term Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.termFilter("user", "kimchy");
+--------------------------------------------------
+
+Note that you can ask not to cache the result using
+`TermFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+
+[[terms-filter]]
+=== Terms Filter
+
+See {ref}/query-dsl-terms-filter.html[Terms Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.termsFilter("user", "kimchy", "elasticsearch")
+ .execution("plain"); // Optional, can be also "bool", "and" or "or"
+ // or "bool_nocache", "and_nocache" or "or_nocache"
+--------------------------------------------------
+
+Note that you can ask not to cache the result using
+`TermsFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+
+[[nested-filter]]
+=== Nested Filter
+
+See {ref}/query-dsl-nested-filter.html[Nested Filter]
+
+
+[source,java]
+--------------------------------------------------
+FilterBuilders.nestedFilter("obj1",
+ QueryBuilders.boolQuery()
+ .must(QueryBuilders.matchQuery("obj1.name", "blue"))
+ .must(QueryBuilders.rangeQuery("obj1.count").gt(5))
+ );
+--------------------------------------------------
+
+Note that you can ask not to cache the result using
+`NestedFilterBuilder#cache(boolean)` method. See <<query-dsl-filters-caching>>.
+
+[[query-dsl-filters-caching]]
+=== Caching
+
+By default, some filters are cached or not cached. You can have a fine
+tuning control using `cache(boolean)` method when exists. For example:
+
+[source,java]
+--------------------------------------------------
+FilterBuilder filter = FilterBuilders.andFilter(
+ FilterBuilders.rangeFilter("postDate").from("2010-03-01").to("2010-04-01"),
+ FilterBuilders.prefixFilter("name.second", "ba")
+ )
+ .cache(true);
+--------------------------------------------------
diff --git a/docs/java-api/query-dsl-queries.asciidoc b/docs/java-api/query-dsl-queries.asciidoc
new file mode 100644
index 0000000..5760753
--- /dev/null
+++ b/docs/java-api/query-dsl-queries.asciidoc
@@ -0,0 +1,450 @@
+[[query-dsl-queries]]
+== Query DSL - Queries
+
+elasticsearch provides a full Java query dsl in a similar manner to the
+REST {ref}/query-dsl.html[Query DSL]. The factory for query
+builders is `QueryBuilders`. Once your query is ready, you can use the
+<<search,Search API>>.
+
+See also how to build <<query-dsl-filters,Filters>>
+
+To use `QueryBuilders` just import them in your class:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.index.query.QueryBuilders.*;
+--------------------------------------------------
+
+Note that you can easily print (aka debug) JSON generated queries using
+`toString()` method on `QueryBuilder` object.
+
+The `QueryBuilder` can then be used with any API that accepts a query,
+such as `count` and `search`.
+
+
+[[match]]
+=== Match Query
+
+See {ref}/query-dsl-match-query.html[Match Query]
+
+
+[source,java]
+--------------------------------------------------
+QueryBuilder qb = QueryBuilders.matchQuery("name", "kimchy elasticsearch");
+--------------------------------------------------
+
+
+[[multimatch]]
+=== MultiMatch Query
+
+See {ref}/query-dsl-multi-match-query.html[MultiMatch
+Query]
+
+[source,java]
+--------------------------------------------------
+QueryBuilder qb = QueryBuilders.multiMatchQuery(
+ "kimchy elasticsearch", // Text you are looking for
+ "user", "message" // Fields you query on
+ );
+--------------------------------------------------
+
+
+[[bool]]
+=== Boolean Query
+
+See {ref}/query-dsl-bool-query.html[Boolean Query]
+
+
+[source,java]
+--------------------------------------------------
+QueryBuilder qb = QueryBuilders
+ .boolQuery()
+ .must(termQuery("content", "test1"))
+ .must(termQuery("content", "test4"))
+ .mustNot(termQuery("content", "test2"))
+ .should(termQuery("content", "test3"));
+--------------------------------------------------
+
+
+[[boosting]]
+=== Boosting Query
+
+See {ref}/query-dsl-boosting-query.html[Boosting Query]
+
+
+[source,java]
+--------------------------------------------------
+QueryBuilders.boostingQuery()
+ .positive(QueryBuilders.termQuery("name","kimchy"))
+ .negative(QueryBuilders.termQuery("name","dadoonet"))
+ .negativeBoost(0.2f);
+--------------------------------------------------
+
+
+[[ids]]
+=== IDs Query
+
+See {ref}/query-dsl-ids-query.html[IDs Query]
+
+
+[source,java]
+--------------------------------------------------
+QueryBuilders.idsQuery().ids("1", "2");
+--------------------------------------------------
+
+[[constant-score]]
+=== Constant Score Query
+
+See {ref}/query-dsl-constant-score-query.html[Constant
+Score Query]
+
+[source,java]
+--------------------------------------------------
+// Using with Filters
+QueryBuilders.constantScoreQuery(FilterBuilders.termFilter("name","kimchy"))
+ .boost(2.0f);
+
+// With Queries
+QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("name","kimchy"))
+ .boost(2.0f);
+--------------------------------------------------
+
+
+[[dismax]]
+=== Disjunction Max Query
+
+See {ref}/query-dsl-dis-max-query.html[Disjunction Max
+Query]
+
+[source,java]
+--------------------------------------------------
+QueryBuilders.disMaxQuery()
+ .add(QueryBuilders.termQuery("name","kimchy")) // Your queries
+ .add(QueryBuilders.termQuery("name","elasticsearch")) // Your queries
+ .boost(1.2f)
+ .tieBreaker(0.7f);
+--------------------------------------------------
+
+
+[[flt]]
+=== Fuzzy Like This (Field) Query (flt and flt_field)
+
+See:
+ * {ref}/query-dsl-flt-query.html[Fuzzy Like This Query]
+ * {ref}/query-dsl-flt-field-query.html[Fuzzy Like This Field Query]
+
+[source,java]
+--------------------------------------------------
+// flt Query
+QueryBuilders.fuzzyLikeThisQuery("name.first", "name.last") // Fields
+ .likeText("text like this one") // Text
+ .maxQueryTerms(12); // Max num of Terms
+ // in generated queries
+
+// flt_field Query
+QueryBuilders.fuzzyLikeThisFieldQuery("name.first") // Only on single field
+ .likeText("text like this one")
+ .maxQueryTerms(12);
+--------------------------------------------------
+
+
+[[fuzzy]]
+=== FuzzyQuery
+
+See {ref}/query-dsl-fuzzy-query.html[Fuzzy Query]
+
+
+[source,java]
+--------------------------------------------------
+QueryBuilder qb = QueryBuilders.fuzzyQuery("name", "kimzhy");
+--------------------------------------------------
+
+
+[[has-child-parent]]
+=== Has Child / Has Parent
+
+See:
+ * {ref}/query-dsl-has-child-query.html[Has Child Query]
+ * {ref}/query-dsl-has-parent-query.html[Has Parent]
+
+[source,java]
+--------------------------------------------------
+// Has Child
+QueryBuilders.hasChildQuery("blog_tag",
+ QueryBuilders.termQuery("tag","something"))
+
+// Has Parent
+QueryBuilders.hasParentQuery("blog",
+ QueryBuilders.termQuery("tag","something"));
+--------------------------------------------------
+
+
+[[match-all]]
+=== MatchAll Query
+
+See {ref}/query-dsl-match-all-query.html[Match All
+Query]
+
+[source,java]
+--------------------------------------------------
+QueryBuilder qb = QueryBuilders.matchAllQuery();
+--------------------------------------------------
+
+
+[[mlt]]
+=== More Like This (Field) Query (mlt and mlt_field)
+
+See:
+ * {ref}/query-dsl-mlt-query.html[More Like This Query]
+ * {ref}/query-dsl-mlt-field-query.html[More Like This Field Query]
+
+[source,java]
+--------------------------------------------------
+// mlt Query
+QueryBuilders.moreLikeThisQuery("name.first", "name.last") // Fields
+ .likeText("text like this one") // Text
+ .minTermFreq(1) // Ignore Threshold
+ .maxQueryTerms(12); // Max num of Terms
+ // in generated queries
+
+// mlt_field Query
+QueryBuilders.moreLikeThisFieldQuery("name.first") // Only on single field
+ .likeText("text like this one")
+ .minTermFreq(1)
+ .maxQueryTerms(12);
+--------------------------------------------------
+
+
+[[prefix]]
+=== Prefix Query
+
+See {ref}/query-dsl-prefix-query.html[Prefix Query]
+
+[source,java]
+--------------------------------------------------
+QueryBuilders.prefixQuery("brand", "heine");
+--------------------------------------------------
+
+
+[[query-string]]
+=== QueryString Query
+
+See {ref}/query-dsl-query-string-query.html[QueryString Query]
+
+[source,java]
+--------------------------------------------------
+QueryBuilder qb = QueryBuilders.queryString("+kimchy -elasticsearch");
+--------------------------------------------------
+
+
+[[java-range]]
+=== Range Query
+
+See {ref}/query-dsl-range-query.html[Range Query]
+
+[source,java]
+--------------------------------------------------
+QueryBuilder qb = QueryBuilders
+ .rangeQuery("price")
+ .from(5)
+ .to(10)
+ .includeLower(true)
+ .includeUpper(false);
+--------------------------------------------------
+
+
+=== Span Queries (first, near, not, or, term)
+
+See:
+ * {ref}/query-dsl-span-first-query.html[Span First Query]
+ * {ref}/query-dsl-span-near-query.html[Span Near Query]
+ * {ref}/query-dsl-span-not-query.html[Span Not Query]
+ * {ref}/query-dsl-span-or-query.html[Span Or Query]
+ * {ref}/query-dsl-span-term-query.html[Span Term Query]
+
+[source,java]
+--------------------------------------------------
+// Span First
+QueryBuilders.spanFirstQuery(
+ QueryBuilders.spanTermQuery("user", "kimchy"), // Query
+ 3 // Max End position
+ );
+
+// Span Near
+QueryBuilders.spanNearQuery()
+ .clause(QueryBuilders.spanTermQuery("field","value1")) // Span Term Queries
+ .clause(QueryBuilders.spanTermQuery("field","value2"))
+ .clause(QueryBuilders.spanTermQuery("field","value3"))
+ .slop(12) // Slop factor
+ .inOrder(false)
+ .collectPayloads(false);
+
+// Span Not
+QueryBuilders.spanNotQuery()
+ .include(QueryBuilders.spanTermQuery("field","value1"))
+ .exclude(QueryBuilders.spanTermQuery("field","value2"));
+
+// Span Or
+QueryBuilders.spanOrQuery()
+ .clause(QueryBuilders.spanTermQuery("field","value1"))
+ .clause(QueryBuilders.spanTermQuery("field","value2"))
+ .clause(QueryBuilders.spanTermQuery("field","value3"));
+
+// Span Term
+QueryBuilders.spanTermQuery("user","kimchy");
+--------------------------------------------------
+
+
+[[term]]
+=== Term Query
+
+See {ref}/query-dsl-term-query.html[Term Query]
+
+[source,java]
+--------------------------------------------------
+QueryBuilder qb = QueryBuilders.termQuery("name", "kimchy");
+--------------------------------------------------
+
+
+[[java-terms]]
+=== Terms Query
+
+See {ref}/query-dsl-terms-query.html[Terms Query]
+
+[source,java]
+--------------------------------------------------
+QueryBuilders.termsQuery("tags", // field
+ "blue", "pill") // values
+ .minimumMatch(1); // How many terms must match
+--------------------------------------------------
+
+
+[[top-children]]
+=== Top Children Query
+
+See {ref}/query-dsl-top-children-query.html[Top Children Query]
+
+[source,java]
+--------------------------------------------------
+QueryBuilders.topChildrenQuery(
+ "blog_tag", // field
+ QueryBuilders.termQuery("tag", "something") // Query
+ )
+ .score("max") // max, sum or avg
+ .factor(5)
+ .incrementalFactor(2);
+--------------------------------------------------
+
+
+[[wildcard]]
+=== Wildcard Query
+
+See {ref}/query-dsl-wildcard-query.html[Wildcard Query]
+
+
+[source,java]
+--------------------------------------------------
+QueryBuilders.wildcardQuery("user", "k?mc*");
+--------------------------------------------------
+
+
+[[nested]]
+=== Nested Query
+
+See {ref}/query-dsl-nested-query.html[Nested Query]
+
+
+[source,java]
+--------------------------------------------------
+QueryBuilders.nestedQuery("obj1", // Path
+ QueryBuilders.boolQuery() // Your query
+ .must(QueryBuilders.matchQuery("obj1.name", "blue"))
+ .must(QueryBuilders.rangeQuery("obj1.count").gt(5))
+ )
+ .scoreMode("avg"); // max, total, avg or none
+--------------------------------------------------
+
+
+
+[[indices]]
+=== Indices Query
+
+See {ref}/query-dsl-indices-query.html[Indices Query]
+
+
+[source,java]
+--------------------------------------------------
+// Using another query when no match for the main one
+QueryBuilders.indicesQuery(
+ QueryBuilders.termQuery("tag", "wow"),
+ "index1", "index2"
+ )
+ .noMatchQuery(QueryBuilders.termQuery("tag", "kow"));
+
+// Using all (match all) or none (match no documents)
+QueryBuilders.indicesQuery(
+ QueryBuilders.termQuery("tag", "wow"),
+ "index1", "index2"
+ )
+ .noMatchQuery("all"); // all or none
+--------------------------------------------------
+
+
+[[geo-shape]]
+=== GeoShape Query
+
+See {ref}/query-dsl-geo-shape-query.html[GeoShape Query]
+
+
+Note: the `geo_shape` type uses `Spatial4J` and `JTS`, both of which are
+optional dependencies. Consequently you must add `Spatial4J` and `JTS`
+to your classpath in order to use this type:
+
+[source,java]
+--------------------------------------------------
+<dependency>
+ <groupId>com.spatial4j</groupId>
+ <artifactId>spatial4j</artifactId>
+ <version>0.3</version>
+</dependency>
+
+<dependency>
+ <groupId>com.vividsolutions</groupId>
+ <artifactId>jts</artifactId>
+ <version>1.12</version>
+ <exclusions>
+ <exclusion>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ </exclusion>
+ </exclusions>
+</dependency>
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// Import Spatial4J shapes
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.impl.RectangleImpl;
+
+// Also import ShapeRelation
+import org.elasticsearch.common.geo.ShapeRelation;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+// Shape within another
+QueryBuilders.geoShapeQuery("location",
+ new RectangleImpl(0,10,0,10,SpatialContext.GEO))
+ .relation(ShapeRelation.WITHIN);
+
+// Intersect shapes
+QueryBuilders.geoShapeQuery("location",
+ new PointImpl(0, 0, SpatialContext.GEO))
+ .relation(ShapeRelation.INTERSECTS);
+
+// Using pre-indexed shapes
+QueryBuilders.geoShapeQuery("location", "New Zealand", "countries")
+ .relation(ShapeRelation.DISJOINT);
+--------------------------------------------------
diff --git a/docs/java-api/search.asciidoc b/docs/java-api/search.asciidoc
new file mode 100644
index 0000000..5cb27b4
--- /dev/null
+++ b/docs/java-api/search.asciidoc
@@ -0,0 +1,140 @@
+[[search]]
+== Search API
+
+The search API allows to execute a search query and get back search hits
+that match the query. It can be executed across one or more indices and
+across one or more types. The query can either be provided using the
+<<query-dsl-queries,query Java API>> or
+the <<query-dsl-filters,filter Java API>>.
+The body of the search request is built using the
+`SearchSourceBuilder`. Here is an example:
+
+[source,java]
+--------------------------------------------------
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.index.query.FilterBuilders.*;
+import org.elasticsearch.index.query.QueryBuilders.*;
+--------------------------------------------------
+
+[source,java]
+--------------------------------------------------
+SearchResponse response = client.prepareSearch("index1", "index2")
+ .setTypes("type1", "type2")
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.termQuery("multi", "test")) // Query
+ .setPostFilter(FilterBuilders.rangeFilter("age").from(12).to(18)) // Filter
+ .setFrom(0).setSize(60).setExplain(true)
+ .execute()
+ .actionGet();
+--------------------------------------------------
+
+Note that all parameters are optional. Here is the smallest search call
+you can write:
+
+[source,java]
+--------------------------------------------------
+// MatchAll on the whole cluster with all default options
+SearchResponse response = client.prepareSearch().execute().actionGet();
+--------------------------------------------------
+
+For more information on the search operation, check out the REST
+{ref}/search.html[search] docs.
+
+
+[[scrolling]]
+=== Using scrolls in Java
+
+Read the {ref}/search-request-scroll.html[scroll documentation]
+first!
+
+[source,java]
+--------------------------------------------------
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+
+QueryBuilder qb = termQuery("multi", "test");
+
+SearchResponse scrollResp = client.prepareSearch(test)
+ .setSearchType(SearchType.SCAN)
+ .setScroll(new TimeValue(60000))
+ .setQuery(qb)
+ .setSize(100).execute().actionGet(); //100 hits per shard will be returned for each scroll
+//Scroll until no hits are returned
+while (true) {
+ scrollResp = client.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(600000)).execute().actionGet();
+ for (SearchHit hit : scrollResp.getHits()) {
+ //Handle the hit...
+ }
+ //Break condition: No hits are returned
+ if (scrollResp.getHits().getHits().length == 0) {
+ break;
+ }
+}
+--------------------------------------------------
+
+
+=== Operation Threading
+
+The search API allows to set the threading model the operation will be
+performed when the actual execution of the API is performed on the same
+node (the API is executed on a shard that is allocated on the same
+server).
+
+There are three threading modes.The `NO_THREADS` mode means that the
+search operation will be executed on the calling thread. The
+`SINGLE_THREAD` mode means that the search operation will be executed on
+a single different thread for all local shards. The `THREAD_PER_SHARD`
+mode means that the search operation will be executed on a different
+thread for each local shard.
+
+The default mode is `THREAD_PER_SHARD`.
+
+
+[[msearch]]
+=== MultiSearch API
+
+See {ref}/search-multi-search.html[MultiSearch API Query]
+documentation
+
+[source,java]
+--------------------------------------------------
+SearchRequestBuilder srb1 = node.client()
+ .prepareSearch().setQuery(QueryBuilders.queryString("elasticsearch")).setSize(1);
+SearchRequestBuilder srb2 = node.client()
+ .prepareSearch().setQuery(QueryBuilders.matchQuery("name", "kimchy")).setSize(1);
+
+MultiSearchResponse sr = node.client().prepareMultiSearch()
+ .add(srb1)
+ .add(srb2)
+ .execute().actionGet();
+
+// You will get all individual responses from MultiSearchResponse#getResponses()
+long nbHits = 0;
+for (MultiSearchResponse.Item item : sr.getResponses()) {
+ SearchResponse response = item.getResponse();
+ nbHits += response.getHits().getTotalHits();
+}
+--------------------------------------------------
+
+
+[[java-search-facets]]
+=== Using Facets
+
+The following code shows how to add two facets within your search:
+
+[source,java]
+--------------------------------------------------
+SearchResponse sr = node.client().prepareSearch()
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addFacet(FacetBuilders.termsFacet("f1").field("field"))
+ .addFacet(FacetBuilders.dateHistogramFacet("f2").field("birth").interval("year"))
+ .execute().actionGet();
+
+// Get your facet results
+TermsFacet f1 = (TermsFacet) sr.getFacets().facetsAsMap().get("f1");
+DateHistogramFacet f2 = (DateHistogramFacet) sr.getFacets().facetsAsMap().get("f2");
+--------------------------------------------------
+
+See <<java-facets,Facets Java API>>
+documentation for details.
diff --git a/docs/javascript/index.asciidoc b/docs/javascript/index.asciidoc
new file mode 100644
index 0000000..2364cc5
--- /dev/null
+++ b/docs/javascript/index.asciidoc
@@ -0,0 +1,138 @@
+= elasticsearch-js
+
+== Overview
+
+Official low-level client for Elasticsearch. Its goal is to provide common
+ground for all Elasticsearch-related code in JavaScript; because of this it tries
+to be opinion-free and very extendable.
+
+The full documentation is available at http://elasticsearch.github.io/elasticsearch-js
+
+
+=== Getting the Node.js module
+
+To install the module into an existing Node.js project use npm:
+
+[source,sh]
+------------------------------------
+npm install elasticsearch
+------------------------------------
+
+=== Getting the browser client
+
+For a browser-based projects, builds for modern browsers are available http://elasticsearch.github.io/elasticsearch-js#browser-builds[here]. Download one of the archives and extract it, inside you'll find three files, pick the one that best matches your environment:
+
+ * elasticsearch.jquery.js - for projects that already use jQuery
+ * elasticsearch.angular.js - for Angular projects
+ * elasticsearch.js - generic build for all other projects
+
+Each of the library specific builds tie into the AJAX and Promise creation facilities provided by their respective libraries. This is an example of how Elasticsearch.js can be extended to provide a more opinionated approach when appropriate.
+
+=== Setting up the client
+
+Now you are ready to get busy! First thing you'll need to do is create an instance of `elasticsearch.Client`. Here are several examples of configuration parameters you can use when creating that instance. For a full list of configuration options see http://elasticsearch.github.io/elasticsearch-js/index.html#configuration[the configuration docs].
+
+[source,javascript]
+------------------------------------
+var elasticsearch = require('elasticsearch');
+
+// Connect to localhost:9200 and use the default settings
+var client = new elasticsearch.Client();
+
+// Connect the client to two nodes, requests will be
+// load-balanced between them using round-robin
+var client = elasticsearch.Client({
+ hosts: [
+ 'elasticsearch1:9200',
+ 'elasticsearch2:9200'
+ ]
+});
+
+// Connect to the this host's cluster, sniff
+// for the rest of the cluster right away, and
+// again every 5 minutes
+var client = elasticsearch.Client({
+ host: 'elasticsearch1:9200',
+ sniffOnStart: true,
+ sniffInterval: 300000
+});
+
+// Connect to this host using https, basic auth,
+// a path prefix, and static query string values
+var client = new elasticsearch.Client({
+ host: 'https://user:password@elasticsearch1/search?app=blog'
+});
+------------------------------------
+
+
+=== Setting up the client in the browser
+
+The params accepted by the `Client` constructor are the same in the browser versions of the client, but how you access the Client constructor is different based on the build you are using. Below is an example of instantiating a client in each build.
+
+[source,javascript]
+------------------------------------
+// elasticsearch.js adds the elasticsearch namespace to the window
+var client = elasticsearch.Client({ ... });
+
+// elasticsearch.jquery.js adds the es namespace to the jQuery object
+var client = jQuery.es.Client({ ... });
+
+// elasticsearch.angular.js creates an elasticsearch
+// module, which provides an esFactory
+var app = angular.module('app', ['elasticsearch']);
+app.service('es', function (esFactory) {
+ return esFactory({ ... });
+});
+------------------------------------
+
+=== Using the client instance to make API calls.
+
+Once you create the client, making API calls is simple.
+
+[source,javascript]
+------------------------------------
+// get the current status of the entire cluster.
+// Note: params are always optional, you can just send a callback
+client.cluster.health(function (err, resp) {
+ if (err) {
+ console.error(err.message);
+ } else {
+ console.dir(resp);
+ }
+});
+
+// index a document
+client.index({
+ index: 'blog',
+ type: 'post',
+ id: 1,
+ body: {
+ title: 'JavaScript Everywhere!',
+ content: 'It all started when...',
+ date: '2013-12-17'
+ }
+}, function (err, resp) {
+ // ...
+});
+
+// search for documents (and also promises!!)
+client.search({
+ index: 'users',
+ size: 50,
+ body: {
+ query: {
+ match: {
+ profile: 'elasticsearch'
+ }
+ }
+ }
+}).then(function (resp) {
+ var hits = resp.body.hits;
+});
+------------------------------------
+
+== Copyright and License
+
+This software is Copyright (c) 2013 by Elasticsearch BV.
+
+This is free software, licensed under The Apache License Version 2.0. \ No newline at end of file
diff --git a/docs/perl/index.asciidoc b/docs/perl/index.asciidoc
new file mode 100644
index 0000000..717dd20
--- /dev/null
+++ b/docs/perl/index.asciidoc
@@ -0,0 +1,107 @@
+= Elasticsearch.pm
+
+== Overview
+
+Elasticsearch.pm is the official Perl API for Elasticsearch. The full
+documentation is available on https://metacpan.org/module/Elasticsearch.
+
+It can be installed with:
+
+[source,sh]
+------------------------------------
+cpanm Elasticsearch
+------------------------------------
+
+=== Features
+
+This client provides:
+
+* Full support for all Elasticsearch APIs
+
+* HTTP backend (currently synchronous only - Any::Event support will be added later)
+
+* Robust networking support which handles load balancing, failure detection and failover
+
+* Good defaults
+
+* Helper utilities for more complex operations, such as bulk indexing, scrolled searches and reindexing.
+
+* Logging support via Log::Any
+
+* Compatibility with the official clients for Python, Ruby, PHP and Javascript
+
+* Easy extensibility
+
+== Synopsis
+
+[source,perl]
+------------------------------------
+use Elasticsearch;
+
+# Connect to localhost:9200:
+my $e = Elasticsearch->new();
+
+# Round-robin between two nodes:
+my $e = Elasticsearch->new(
+ nodes => [
+ 'search1:9200',
+ 'search2:9200'
+ ]
+);
+
+# Connect to cluster at search1:9200, sniff all nodes and round-robin between them:
+my $e = Elasticsearch->new(
+ nodes => 'search1:9200',
+ cxn_pool => 'Sniff'
+);
+
+# Index a document:
+$e->index(
+ index => 'my_app',
+ type => 'blog_post',
+ id => 1,
+ body => {
+ title => 'Elasticsearch clients',
+ content => 'Interesting content...',
+ date => '2013-09-24'
+ }
+);
+
+# Get the document:
+my $doc = $e->get(
+ index => 'my_app',
+ type => 'blog_post',
+ id => 1
+);
+
+# Search:
+my $results = $e->search(
+ index => 'my_app',
+ body => {
+ query => {
+ match => { title => 'elasticsearch' }
+ }
+ }
+);
+------------------------------------
+
+== Reporting issues
+
+The GitHub repository is http://github.com/elasticsearch/elasticsearch-perl
+and any issues can be reported on the issues list at
+http://github.com/elasticsearch/elasticsearch-perl/issues.
+
+== Contributing
+
+Open source contributions are welcome. Please read our
+https://github.com/elasticsearch/elasticsearch-perl/blob/master/CONTRIBUTING.asciidoc[guide to contributing].
+
+== Copyright and License
+
+This software is Copyright (c) 2013 by Elasticsearch BV.
+
+This is free software, licensed under:
+https://github.com/elasticsearch/elasticsearch-perl/blob/master/LICENSE.txt[The Apache License Version 2.0].
+
+
+
diff --git a/docs/python/index.asciidoc b/docs/python/index.asciidoc
new file mode 100644
index 0000000..ac8eb05
--- /dev/null
+++ b/docs/python/index.asciidoc
@@ -0,0 +1,90 @@
+= elasticsearch-py
+
+== Overview
+
+Official low-level client for Elasticsearch. Its goal is to provide common
+ground for all Elasticsearch-related code in Python; because of this it tries
+to be opinion-free and very extendable. The full documentation is available at
+http://elasticsearch-py.rtfd.org/
+
+It can be installed with:
+
+[source,sh]
+------------------------------------
+pip install elasticsearch
+------------------------------------
+
+=== Versioning
+
+There are two branches for development - `master` and `0.4`. Master branch is
+used to track all the changes for Elasticsearch 1.0 and beyond whereas 0.4
+tracks Elasticsearch 0.90.
+
+Releases with major version 1 (1.X.Y) are to be used with Elasticsearch 1.* and
+later, 0.4 releases are meant to work with Elasticsearch 0.90.*.
+
+=== Example use
+
+Simple use-case:
+
+[source,python]
+------------------------------------
+>>> from datetime import datetime
+>>> from elasticsearch import Elasticsearch
+
+# by default we connect to localhost:9200
+>>> es = Elasticsearch()
+
+# datetimes will be serialized
+>>> es.index(index="my-index", doc_type="test-type", id=42, body={"any": "data", "timestamp": datetime.now()})
+{u'_id': u'42', u'_index': u'my-index', u'_type': u'test-type', u'_version': 1, u'ok': True}
+
+# but not deserialized
+>>> es.get(index="my-index", doc_type="test-type", id=42)['_source']
+{u'any': u'data', u'timestamp': u'2013-05-12T19:45:31.804229'}
+------------------------------------
+
+[NOTE]
+All the API calls map the raw REST api as closely as possible, including
+the distinction between required and optional arguments to the calls. This
+means that the code makes distinction between positional and keyword arguments;
+we, however, recommend that people use keyword arguments for all calls for
+consistency and safety.
+
+=== Features
+
+The client's features include:
+
+* translating basic Python data types to and from json (datetimes are not
+ decoded for performance reasons)
+
+* configurable automatic discovery of cluster nodes
+
+* persistent connections
+
+* load balancing (with pluggable selection strategy) across all available nodes
+
+* failed connection penalization (time based - failed connections won't be
+ retried until a timeout is reached)
+
+* thread safety
+
+* pluggable architecture
+
+
+=== License
+
+Copyright 2013 Elasticsearch
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
diff --git a/docs/reference/analysis.asciidoc b/docs/reference/analysis.asciidoc
new file mode 100644
index 0000000..7009ca3
--- /dev/null
+++ b/docs/reference/analysis.asciidoc
@@ -0,0 +1,77 @@
+[[analysis]]
+= Analysis
+
+[partintro]
+--
+The index analysis module acts as a configurable registry of Analyzers
+that can be used in order to both break indexed (analyzed) fields when a
+document is indexed and process query strings. It maps to the Lucene
+`Analyzer`.
+
+
+Analyzers are composed of a single <<analysis-tokenizers,Tokenizer>>
+and zero or more <<analysis-tokenfilters,TokenFilters>>. The tokenizer may
+be preceded by one or more <<analysis-charfilters,CharFilters>>. The
+analysis module allows one to register `TokenFilters`, `Tokenizers` and
+`Analyzers` under logical names that can then be referenced either in
+mapping definitions or in certain APIs. The Analysis module
+automatically registers (*if not explicitly defined*) built in
+analyzers, token filters, and tokenizers.
+
+Here is a sample configuration:
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ standard :
+ type : standard
+ stopwords : [stop1, stop2]
+ myAnalyzer1 :
+ type : standard
+ stopwords : [stop1, stop2, stop3]
+ max_token_length : 500
+ # configure a custom analyzer which is
+ # exactly like the default standard analyzer
+ myAnalyzer2 :
+ tokenizer : standard
+ filter : [standard, lowercase, stop]
+ tokenizer :
+ myTokenizer1 :
+ type : standard
+ max_token_length : 900
+ myTokenizer2 :
+ type : keyword
+ buffer_size : 512
+ filter :
+ myTokenFilter1 :
+ type : stop
+ stopwords : [stop1, stop2, stop3, stop4]
+ myTokenFilter2 :
+ type : length
+ min : 0
+ max : 2000
+--------------------------------------------------
+
+[float]
+[[backwards-compatibility]]
+=== Backwards compatibility
+
+All analyzers, tokenizers, and token filters can be configured with a
+`version` parameter to control which Lucene version behavior they should
+use. Possible values are: `3.0` - `3.6`, `4.0` - `4.3` (the highest
+version number is the default option).
+
+--
+
+include::analysis/analyzers.asciidoc[]
+
+include::analysis/tokenizers.asciidoc[]
+
+include::analysis/tokenfilters.asciidoc[]
+
+include::analysis/charfilters.asciidoc[]
+
+include::analysis/icu-plugin.asciidoc[]
+
diff --git a/docs/reference/analysis/analyzers.asciidoc b/docs/reference/analysis/analyzers.asciidoc
new file mode 100644
index 0000000..b97231b
--- /dev/null
+++ b/docs/reference/analysis/analyzers.asciidoc
@@ -0,0 +1,71 @@
+[[analysis-analyzers]]
+== Analyzers
+
+Analyzers are composed of a single <<analysis-tokenizers,Tokenizer>>
+and zero or more <<analysis-tokenfilters,TokenFilters>>. The tokenizer may
+be preceded by one or more <<analysis-charfilters,CharFilters>>.
+The analysis module allows you to register `Analyzers` under logical
+names which can then be referenced either in mapping definitions or in
+certain APIs.
+
+Elasticsearch comes with a number of prebuilt analyzers which are
+ready to use. Alternatively, you can combine the built in
+character filters, tokenizers and token filters to create
+<<analysis-custom-analyzer,custom analyzers>>.
+
+[float]
+[[default-analyzers]]
+=== Default Analyzers
+
+An analyzer is registered under a logical name. It can then be
+referenced from mapping definitions or certain APIs. When none are
+defined, defaults are used. There is an option to define which analyzers
+will be used by default when none can be derived.
+
+The `default` logical name allows one to configure an analyzer that will
+be used both for indexing and for searching APIs. The `default_index`
+logical name can be used to configure a default analyzer that will be
+used just when indexing, and the `default_search` can be used to
+configure a default analyzer that will be used just when searching.
+
+[float]
+[[aliasing-analyzers]]
+=== Aliasing Analyzers
+
+Analyzers can be aliased to have several registered lookup names
+associated with them. For example, the following will allow
+the `standard` analyzer to also be referenced with `alias1`
+and `alias2` values.
+
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ standard :
+ alias: [alias1, alias2]
+ type : standard
+ stopwords : [test1, test2, test3]
+--------------------------------------------------
+
+Below is a list of the built in analyzers.
+
+include::analyzers/standard-analyzer.asciidoc[]
+
+include::analyzers/simple-analyzer.asciidoc[]
+
+include::analyzers/whitespace-analyzer.asciidoc[]
+
+include::analyzers/stop-analyzer.asciidoc[]
+
+include::analyzers/keyword-analyzer.asciidoc[]
+
+include::analyzers/pattern-analyzer.asciidoc[]
+
+include::analyzers/lang-analyzer.asciidoc[]
+
+include::analyzers/snowball-analyzer.asciidoc[]
+
+include::analyzers/custom-analyzer.asciidoc[]
+
diff --git a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc
new file mode 100644
index 0000000..5c778a6
--- /dev/null
+++ b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc
@@ -0,0 +1,52 @@
+[[analysis-custom-analyzer]]
+=== Custom Analyzer
+
+An analyzer of type `custom` that allows to combine a `Tokenizer` with
+zero or more `Token Filters`, and zero or more `Char Filters`. The
+custom analyzer accepts a logical/registered name of the tokenizer to
+use, and a list of logical/registered names of token filters.
+
+The following are settings that can be set for a `custom` analyzer type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`tokenizer` |The logical / registered name of the tokenizer to use.
+
+|`filter` |An optional list of logical / registered name of token
+filters.
+
+|`char_filter` |An optional list of logical / registered name of char
+filters.
+|=======================================================================
+
+Here is an example:
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ myAnalyzer2 :
+ type : custom
+ tokenizer : myTokenizer1
+ filter : [myTokenFilter1, myTokenFilter2]
+ char_filter : [my_html]
+ tokenizer :
+ myTokenizer1 :
+ type : standard
+ max_token_length : 900
+ filter :
+ myTokenFilter1 :
+ type : stop
+ stopwords : [stop1, stop2, stop3, stop4]
+ myTokenFilter2 :
+ type : length
+ min : 0
+ max : 2000
+ char_filter :
+ my_html :
+ type : html_strip
+ escaped_tags : [xxx, yyy]
+ read_ahead : 1024
+--------------------------------------------------
diff --git a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc
new file mode 100644
index 0000000..7704895
--- /dev/null
+++ b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc
@@ -0,0 +1,7 @@
+[[analysis-keyword-analyzer]]
+=== Keyword Analyzer
+
+An analyzer of type `keyword` that "tokenizes" an entire stream as a
+single token. This is useful for data like zip codes, ids and so on.
+Note, when using mapping definitions, it might make more sense to simply
+mark the field as `not_analyzed`.
diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc
new file mode 100644
index 0000000..f963e4b
--- /dev/null
+++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc
@@ -0,0 +1,21 @@
+[[analysis-lang-analyzer]]
+=== Language Analyzers
+
+A set of analyzers aimed at analyzing specific language text. The
+following types are supported: `arabic`, `armenian`, `basque`,
+`brazilian`, `bulgarian`, `catalan`, `chinese`, `cjk`, `czech`,
+`danish`, `dutch`, `english`, `finnish`, `french`, `galician`, `german`,
+`greek`, `hindi`, `hungarian`, `indonesian`, `italian`, `norwegian`,
+`persian`, `portuguese`, `romanian`, `russian`, `spanish`, `swedish`,
+`turkish`, `thai`.
+
+All analyzers support setting custom `stopwords` either internally in
+the config, or by using an external stopwords file by setting
+`stopwords_path`. Check <<analysis-stop-analyzer,Stop Analyzer>> for
+more details.
+
+The following analyzers support setting custom `stem_exclusion` list:
+`arabic`, `armenian`, `basque`, `brazilian`, `bulgarian`, `catalan`,
+`czech`, `danish`, `dutch`, `english`, `finnish`, `french`, `galician`,
+`german`, `hindi`, `hungarian`, `indonesian`, `italian`, `norwegian`,
+`portuguese`, `romanian`, `russian`, `spanish`, `swedish`, `turkish`.
diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc
new file mode 100644
index 0000000..424ecfa
--- /dev/null
+++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc
@@ -0,0 +1,130 @@
+[[analysis-pattern-analyzer]]
+=== Pattern Analyzer
+
+An analyzer of type `pattern` that can flexibly separate text into terms
+via a regular expression. Accepts the following settings:
+
+The following are settings that can be set for a `pattern` analyzer
+type:
+
+[cols="<,<",options="header",]
+|===================================================================
+|Setting |Description
+|`lowercase` |Should terms be lowercased or not. Defaults to `true`.
+|`pattern` |The regular expression pattern, defaults to `\W+`.
+|`flags` |The regular expression flags.
+|`stopwords` |A list of stopwords to initialize the stop filter with.
+Defaults to an 'empty' stopword list added[1.0.0.RC1, Previously
+defaulted to the English stopwords list]. Check
+<<analysis-stop-analyzer,Stop Analyzer>> for more details.
+|===================================================================
+
+*IMPORTANT*: The regular expression should match the *token separators*,
+not the tokens themselves.
+
+Flags should be pipe-separated, eg `"CASE_INSENSITIVE|COMMENTS"`. Check
+http://download.oracle.com/javase/6/docs/api/java/util/regex/Pattern.html#field_summary[Java
+Pattern API] for more details about `flags` options.
+
+[float]
+==== Pattern Analyzer Examples
+
+In order to try out these examples, you should delete the `test` index
+before running each example:
+
+[source,js]
+--------------------------------------------------
+ curl -XDELETE localhost:9200/test
+--------------------------------------------------
+
+[float]
+===== Whitespace tokenizer
+
+[source,js]
+--------------------------------------------------
+ curl -XPUT 'localhost:9200/test' -d '
+ {
+ "settings":{
+ "analysis": {
+ "analyzer": {
+ "whitespace":{
+ "type": "pattern",
+ "pattern":"\\\\s+"
+ }
+ }
+ }
+ }
+ }'
+
+ curl 'localhost:9200/test/_analyze?pretty=1&analyzer=whitespace' -d 'foo,bar baz'
+ # "foo,bar", "baz"
+--------------------------------------------------
+
+[float]
+===== Non-word character tokenizer
+
+[source,js]
+--------------------------------------------------
+
+ curl -XPUT 'localhost:9200/test' -d '
+ {
+ "settings":{
+ "analysis": {
+ "analyzer": {
+ "nonword":{
+ "type": "pattern",
+ "pattern":"[^\\\\w]+"
+ }
+ }
+ }
+ }
+ }'
+
+ curl 'localhost:9200/test/_analyze?pretty=1&analyzer=nonword' -d 'foo,bar baz'
+ # "foo,bar baz" becomes "foo", "bar", "baz"
+
+ curl 'localhost:9200/test/_analyze?pretty=1&analyzer=nonword' -d 'type_1-type_4'
+ # "type_1","type_4"
+--------------------------------------------------
+
+[float]
+===== CamelCase tokenizer
+
+[source,js]
+--------------------------------------------------
+
+ curl -XPUT 'localhost:9200/test?pretty=1' -d '
+ {
+ "settings":{
+ "analysis": {
+ "analyzer": {
+ "camel":{
+ "type": "pattern",
+ "pattern":"([^\\\\p{L}\\\\d]+)|(?<=\\\\D)(?=\\\\d)|(?<=\\\\d)(?=\\\\D)|(?<=[\\\\p{L}&&[^\\\\p{Lu}]])(?=\\\\p{Lu})|(?<=\\\\p{Lu})(?=\\\\p{Lu}[\\\\p{L}&&[^\\\\p{Lu}]])"
+ }
+ }
+ }
+ }
+ }'
+
+ curl 'localhost:9200/test/_analyze?pretty=1&analyzer=camel' -d '
+ MooseX::FTPClass2_beta
+ '
+ # "moose","x","ftp","class","2","beta"
+--------------------------------------------------
+
+The regex above is easier to understand as:
+
+[source,js]
+--------------------------------------------------
+
+ ([^\\p{L}\\d]+) # swallow non letters and numbers,
+ | (?<=\\D)(?=\\d) # or non-number followed by number,
+ | (?<=\\d)(?=\\D) # or number followed by non-number,
+ | (?<=[ \\p{L} && [^\\p{Lu}]]) # or lower case
+ (?=\\p{Lu}) # followed by upper case,
+ | (?<=\\p{Lu}) # or upper case
+ (?=\\p{Lu} # followed by upper case
+ [\\p{L}&&[^\\p{Lu}]] # then lower case
+ )
+--------------------------------------------------
diff --git a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc
new file mode 100644
index 0000000..9d7a7c3
--- /dev/null
+++ b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc
@@ -0,0 +1,6 @@
+[[analysis-simple-analyzer]]
+=== Simple Analyzer
+
+An analyzer of type `simple` that is built using a
+<<analysis-lowercase-tokenizer,Lower
+Case Tokenizer>>.
diff --git a/docs/reference/analysis/analyzers/snowball-analyzer.asciidoc b/docs/reference/analysis/analyzers/snowball-analyzer.asciidoc
new file mode 100644
index 0000000..64804fc
--- /dev/null
+++ b/docs/reference/analysis/analyzers/snowball-analyzer.asciidoc
@@ -0,0 +1,64 @@
+[[analysis-snowball-analyzer]]
+=== Snowball Analyzer
+
+An analyzer of type `snowball` that uses the
+<<analysis-standard-tokenizer,standard
+tokenizer>>, with
+<<analysis-standard-tokenfilter,standard
+filter>>,
+<<analysis-lowercase-tokenfilter,lowercase
+filter>>,
+<<analysis-stop-tokenfilter,stop
+filter>>, and
+<<analysis-snowball-tokenfilter,snowball
+filter>>.
+
+The Snowball Analyzer is a stemming analyzer from Lucene that is
+originally based on the snowball project from
+http://snowball.tartarus.org[snowball.tartarus.org].
+
+Sample usage:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "my_analyzer" : {
+ "type" : "snowball",
+ "language" : "English"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The `language` parameter can have the same values as the
+<<analysis-snowball-tokenfilter,snowball
+filter>> and defaults to `English`. Note that not all the language
+analyzers have a default set of stopwords provided.
+
+The `stopwords` parameter can be used to provide stopwords for the
+languages that have no defaults, or to simply replace the default set
+with your custom list. Check <<analysis-stop-analyzer,Stop Analyzer>>
+for more details. A default set of stopwords for many of these
+languages is available from for instance
+https://github.com/apache/lucene-solr/tree/trunk/lucene/analysis/common/src/resources/org/apache/lucene/analysis/[here]
+and
+https://github.com/apache/lucene-solr/tree/trunk/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball[here.]
+
+A sample configuration (in YAML format) specifying Swedish with
+stopwords:
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ my_analyzer:
+ type: snowball
+ language: Swedish
+ stopwords: "och,det,att,i,en,jag,hon,som,han,på,den,med,var,sig,för,så,till,är,men,ett,om,hade,de,av,icke,mig,du,henne,då,sin,nu,har,inte,hans,honom,skulle,hennes,där,min,man,ej,vid,kunde,något,från,ut,när,efter,upp,vi,dem,vara,vad,över,än,dig,kan,sina,här,ha,mot,alla,under,någon,allt,mycket,sedan,ju,denna,själv,detta,åt,utan,varit,hur,ingen,mitt,ni,bli,blev,oss,din,dessa,några,deras,blir,mina,samma,vilken,er,sådan,vår,blivit,dess,inom,mellan,sådant,varför,varje,vilka,ditt,vem,vilket,sitta,sådana,vart,dina,vars,vårt,våra,ert,era,vilkas"
+--------------------------------------------------
diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc
new file mode 100644
index 0000000..4aae94a
--- /dev/null
+++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc
@@ -0,0 +1,27 @@
+[[analysis-standard-analyzer]]
+=== Standard Analyzer
+
+An analyzer of type `standard` is built using the
+<<analysis-standard-tokenizer,Standard
+Tokenizer>> with the
+<<analysis-standard-tokenfilter,Standard
+Token Filter>>,
+<<analysis-lowercase-tokenfilter,Lower
+Case Token Filter>>, and
+<<analysis-stop-tokenfilter,Stop
+Token Filter>>.
+
+The following are settings that can be set for a `standard` analyzer
+type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`stopwords` |A list of stopwords to initialize the stop filter with.
+Defaults to an 'empty' stopword list added[1.0.0.Beta1, Previously
+defaulted to the English stopwords list]. Check
+<<analysis-stop-analyzer,Stop Analyzer>> for more details.
+|`max_token_length` |The maximum token length. If a token is seen that
+exceeds this length then it is discarded. Defaults to `255`.
+|=======================================================================
+
diff --git a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc
new file mode 100644
index 0000000..9a19772
--- /dev/null
+++ b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc
@@ -0,0 +1,22 @@
+[[analysis-stop-analyzer]]
+=== Stop Analyzer
+
+An analyzer of type `stop` that is built using a
+<<analysis-lowercase-tokenizer,Lower
+Case Tokenizer>>, with
+<<analysis-stop-tokenfilter,Stop
+Token Filter>>.
+
+The following are settings that can be set for a `stop` analyzer type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`stopwords` |A list of stopwords to initialize the stop filter with.
+Defaults to the english stop words.
+|`stopwords_path` |A path (either relative to `config` location, or
+absolute) to a stopwords file configuration.
+|=======================================================================
+
+Use `stopwords: _none_` to explicitly specify an 'empty' stopword list.
+
diff --git a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc
new file mode 100644
index 0000000..2095686
--- /dev/null
+++ b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc
@@ -0,0 +1,6 @@
+[[analysis-whitespace-analyzer]]
+=== Whitespace Analyzer
+
+An analyzer of type `whitespace` that is built using a
+<<analysis-whitespace-tokenizer,Whitespace
+Tokenizer>>.
diff --git a/docs/reference/analysis/charfilters.asciidoc b/docs/reference/analysis/charfilters.asciidoc
new file mode 100644
index 0000000..a40cfff
--- /dev/null
+++ b/docs/reference/analysis/charfilters.asciidoc
@@ -0,0 +1,16 @@
+[[analysis-charfilters]]
+== Character Filters
+
+Character filters are used to preprocess the string of
+characters before it is passed to the <<analysis-tokenizers,tokenizer>>.
+A character filter may be used to strip out HTML markup, , or to convert
+`"&"` characters to the word `"and"`.
+
+Elasticsearch has built in characters filters which can be
+used to build <<analysis-custom-analyzer,custom analyzers>>.
+
+include::charfilters/mapping-charfilter.asciidoc[]
+
+include::charfilters/htmlstrip-charfilter.asciidoc[]
+
+include::charfilters/pattern-replace-charfilter.asciidoc[]
diff --git a/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc b/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc
new file mode 100644
index 0000000..f12238a
--- /dev/null
+++ b/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc
@@ -0,0 +1,5 @@
+[[analysis-htmlstrip-charfilter]]
+=== HTML Strip Char Filter
+
+A char filter of type `html_strip` stripping out HTML elements from an
+analyzed text.
diff --git a/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc b/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc
new file mode 100644
index 0000000..ef4df81
--- /dev/null
+++ b/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc
@@ -0,0 +1,38 @@
+[[analysis-mapping-charfilter]]
+=== Mapping Char Filter
+
+A char filter of type `mapping` replacing characters of an analyzed text
+with given mapping.
+
+Here is a sample configuration:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "char_filter" : {
+ "my_mapping" : {
+ "type" : "mapping",
+ "mappings" : ["ph=>f", "qu=>q"]
+ }
+ },
+ "analyzer" : {
+ "custom_with_char_filter" : {
+ "tokenizer" : "standard",
+ "char_filter" : ["my_mapping"]
+ },
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Otherwise the setting `mappings_path` can specify a file where you can
+put the list of char mapping :
+
+[source,js]
+--------------------------------------------------
+ph => f
+qu => k
+--------------------------------------------------
diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc
new file mode 100644
index 0000000..5a0cf28
--- /dev/null
+++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc
@@ -0,0 +1,37 @@
+[[analysis-pattern-replace-charfilter]]
+=== Pattern Replace Char Filter
+
+The `pattern_replace` char filter allows the use of a regex to
+manipulate the characters in a string before analysis. The regular
+expression is defined using the `pattern` parameter, and the replacement
+string can be provided using the `replacement` parameter (supporting
+referencing the original text, as explained
+http://docs.oracle.com/javase/6/docs/api/java/util/regex/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[here]).
+For more information check the
+http://lucene.apache.org/core/4_3_1/analyzers-common/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.html[lucene
+documentation]
+
+Here is a sample configuration:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "char_filter" : {
+ "my_pattern":{
+ "type":"pattern_replace",
+ "pattern":"sample(.*)",
+ "replacement":"replacedSample $1"
+ }
+ },
+ "analyzer" : {
+ "custom_with_char_filter" : {
+ "tokenizer" : "standard",
+ "char_filter" : ["my_pattern"]
+ },
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/analysis/icu-plugin.asciidoc b/docs/reference/analysis/icu-plugin.asciidoc
new file mode 100644
index 0000000..c1be216
--- /dev/null
+++ b/docs/reference/analysis/icu-plugin.asciidoc
@@ -0,0 +1,220 @@
+[[analysis-icu-plugin]]
+== ICU Analysis Plugin
+
+The http://icu-project.org/[ICU] analysis plugin allows for unicode
+normalization, collation and folding. The plugin is called
+https://github.com/elasticsearch/elasticsearch-analysis-icu[elasticsearch-analysis-icu].
+
+The plugin includes the following analysis components:
+
+[float]
+[[icu-normalization]]
+=== ICU Normalization
+
+Normalizes characters as explained
+http://userguide.icu-project.org/transforms/normalization[here]. It
+registers itself by default under `icu_normalizer` or `icuNormalizer`
+using the default settings. Allows for the name parameter to be provided
+which can include the following values: `nfc`, `nfkc`, and `nfkc_cf`.
+Here is a sample settings:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "normalization" : {
+ "tokenizer" : "keyword",
+ "filter" : ["icu_normalizer"]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+[[icu-folding]]
+=== ICU Folding
+
+Folding of unicode characters based on `UTR#30`. It registers itself
+under `icu_folding` and `icuFolding` names.
+The filter also does lowercasing, which means the lowercase filter can
+normally be left out. Sample setting:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "folding" : {
+ "tokenizer" : "keyword",
+ "filter" : ["icu_folding"]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+[[icu-filtering]]
+==== Filtering
+
+The folding can be filtered by a set of unicode characters with the
+parameter `unicodeSetFilter`. This is useful for a non-internationalized
+search engine where retaining a set of national characters which are
+primary letters in a specific language is wanted. See syntax for the
+UnicodeSet
+http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[here].
+
+The Following example exempts Swedish characters from the folding. Note
+that the filtered characters are NOT lowercased which is why we add that
+filter below.
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "folding" : {
+ "tokenizer" : "standard",
+ "filter" : ["my_icu_folding", "lowercase"]
+ }
+ }
+ "filter" : {
+ "my_icu_folding" : {
+ "type" : "icu_folding"
+ "unicodeSetFilter" : "[^åäöÅÄÖ]"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+[[icu-collation]]
+=== ICU Collation
+
+Uses collation token filter. Allows to either specify the rules for
+collation (defined
+http://www.icu-project.org/userguide/Collate_Customization.html[here])
+using the `rules` parameter (can point to a location or expressed in the
+settings, location can be relative to config location), or using the
+`language` parameter (further specialized by country and variant). By
+default registers under `icu_collation` or `icuCollation` and uses the
+default locale.
+
+Here is a sample settings:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "collation" : {
+ "tokenizer" : "keyword",
+ "filter" : ["icu_collation"]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+And here is a sample of custom collation:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "collation" : {
+ "tokenizer" : "keyword",
+ "filter" : ["myCollator"]
+ }
+ },
+ "filter" : {
+ "myCollator" : {
+ "type" : "icu_collation",
+ "language" : "en"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Options
+
+[horizontal]
+`strength`::
+ The strength property determines the minimum level of difference considered significant during comparison.
+ The default strength for the Collator is `tertiary`, unless specified otherwise by the locale used to create the Collator.
+ Possible values: `primary`, `secondary`, `tertiary`, `quaternary` or `identical`.
+ +
+ See http://icu-project.org/apiref/icu4j/com/ibm/icu/text/Collator.html[ICU Collation] documentation for a more detailed
+ explanation for the specific values.
+
+`decomposition`::
+ Possible values: `no` or `canonical`. Defaults to `no`. Setting this decomposition property with
+ `canonical` allows the Collator to handle un-normalized text properly, producing the same results as if the text were
+ normalized. If `no` is set, it is the user's responsibility to insure that all text is already in the appropriate form
+ before a comparison or before getting a CollationKey. Adjusting decomposition mode allows the user to select between
+ faster and more complete collation behavior. Since a great many of the world's languages do not require text
+ normalization, most locales set `no` as the default decomposition mode.
+
+[float]
+==== Expert options:
+
+[horizontal]
+`alternate`::
+ Possible values: `shifted` or `non-ignorable`. Sets the alternate handling for strength `quaternary`
+ to be either shifted or non-ignorable. What boils down to ignoring punctuation and whitespace.
+
+`caseLevel`::
+ Possible values: `true` or `false`. Default is `false`. Whether case level sorting is required. When
+ strength is set to `primary` this will ignore accent differences.
+
+`caseFirst`::
+ Possible values: `lower` or `upper`. Useful to control which case is sorted first when case is not ignored
+ for strength `tertiary`.
+
+`numeric`::
+ Possible values: `true` or `false`. Whether digits are sorted according to numeric representation. For
+ example the value `egg-9` is sorted before the value `egg-21`. Defaults to `false`.
+
+`variableTop`::
+ Single character or contraction. Controls what is variable for `alternate`.
+
+`hiraganaQuaternaryMode`::
+ Possible values: `true` or `false`. Defaults to `false`. Distinguishing between Katakana and
+ Hiragana characters in `quaternary` strength .
+
+[float]
+=== ICU Tokenizer
+
+Breaks text into words according to UAX #29: Unicode Text Segmentation ((http://www.unicode.org/reports/tr29/)).
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "collation" : {
+ "tokenizer" : "icu_tokenizer",
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
diff --git a/docs/reference/analysis/tokenfilters.asciidoc b/docs/reference/analysis/tokenfilters.asciidoc
new file mode 100644
index 0000000..ad72fb7
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters.asciidoc
@@ -0,0 +1,74 @@
+[[analysis-tokenfilters]]
+== Token Filters
+
+Token filters accept a stream of tokens from a
+<<analysis-tokenizers,tokenizer>> and can modify tokens
+(eg lowercasing), delete tokens (eg remove stopwords)
+or add tokens (eg synonyms).
+
+Elasticsearch has a number of built in token filters which can be
+used to build <<analysis-custom-analyzer,custom analyzers>>.
+
+include::tokenfilters/standard-tokenfilter.asciidoc[]
+
+include::tokenfilters/asciifolding-tokenfilter.asciidoc[]
+
+include::tokenfilters/length-tokenfilter.asciidoc[]
+
+include::tokenfilters/lowercase-tokenfilter.asciidoc[]
+
+include::tokenfilters/ngram-tokenfilter.asciidoc[]
+
+include::tokenfilters/edgengram-tokenfilter.asciidoc[]
+
+include::tokenfilters/porterstem-tokenfilter.asciidoc[]
+
+include::tokenfilters/shingle-tokenfilter.asciidoc[]
+
+include::tokenfilters/stop-tokenfilter.asciidoc[]
+
+include::tokenfilters/word-delimiter-tokenfilter.asciidoc[]
+
+include::tokenfilters/stemmer-tokenfilter.asciidoc[]
+
+include::tokenfilters/stemmer-override-tokenfilter.asciidoc[]
+
+include::tokenfilters/keyword-marker-tokenfilter.asciidoc[]
+
+include::tokenfilters/keyword-repeat-tokenfilter.asciidoc[]
+
+include::tokenfilters/kstem-tokenfilter.asciidoc[]
+
+include::tokenfilters/snowball-tokenfilter.asciidoc[]
+
+include::tokenfilters/phonetic-tokenfilter.asciidoc[]
+
+include::tokenfilters/synonym-tokenfilter.asciidoc[]
+
+include::tokenfilters/compound-word-tokenfilter.asciidoc[]
+
+include::tokenfilters/reverse-tokenfilter.asciidoc[]
+
+include::tokenfilters/elision-tokenfilter.asciidoc[]
+
+include::tokenfilters/truncate-tokenfilter.asciidoc[]
+
+include::tokenfilters/unique-tokenfilter.asciidoc[]
+
+include::tokenfilters/pattern-capture-tokenfilter.asciidoc[]
+
+include::tokenfilters/pattern_replace-tokenfilter.asciidoc[]
+
+include::tokenfilters/trim-tokenfilter.asciidoc[]
+
+include::tokenfilters/limit-token-count-tokenfilter.asciidoc[]
+
+include::tokenfilters/hunspell-tokenfilter.asciidoc[]
+
+include::tokenfilters/common-grams-tokenfilter.asciidoc[]
+
+include::tokenfilters/normalization-tokenfilter.asciidoc[]
+
+include::tokenfilters/delimited-payload-tokenfilter.asciidoc[]
+
+include::tokenfilters/keep-words-tokenfilter.asciidoc[]
diff --git a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc
new file mode 100644
index 0000000..aaca0eb
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc
@@ -0,0 +1,7 @@
+[[analysis-asciifolding-tokenfilter]]
+=== ASCII Folding Token Filter
+
+A token filter of type `asciifolding` that converts alphabetic, numeric,
+and symbolic Unicode characters which are not in the first 127 ASCII
+characters (the "Basic Latin" Unicode block) into their ASCII
+equivalents, if one exists.
diff --git a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc
new file mode 100644
index 0000000..f0659e0
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc
@@ -0,0 +1,61 @@
+[[analysis-common-grams-tokenfilter]]
+=== Common Grams Token Filter
+
+Token filter that generates bigrams for frequently occuring terms.
+Single terms are still indexed. It can be used as an alternative to the
+<<analysis-stop-tokenfilter,Stop
+Token Filter>> when we don't want to completely ignore common terms.
+
+For example, the text "the quick brown is a fox" will be tokenized as
+"the", "the_quick", "quick", "brown", "brown_is", "is_a", "a_fox",
+"fox". Assuming "the", "is" and "a" are common words.
+
+When `query_mode` is enabled, the token filter removes common words and
+single terms followed by a common word. This parameter should be enabled
+in the search analyzer.
+
+For example, the query "the quick brown is a fox" will be tokenized as
+"the_quick", "quick", "brown_is", "is_a", "a_fox", "fox".
+
+The following are settings that can be set:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`common_words` |A list of common words to use.
+
+|`common_words_path` |A path (either relative to `config` location, or
+absolute) to a list of common words. Each word should be in its own
+"line" (separated by a line break). The file must be UTF-8 encoded.
+
+|`ignore_case` |If true, common words matching will be case insensitive
+(defaults to `false`).
+
+|`query_mode` |Generates bigrams then removes common words and single
+terms followed by a common word (defaults to `false`).
+|=======================================================================
+
+Note, `common_words` or `common_words_path` field is required.
+
+Here is an example:
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ index_grams :
+ tokenizer : whitespace
+ filter : [common_grams]
+ search_grams :
+ tokenizer : whitespace
+ filter : [common_grams_query]
+ filter :
+ common_grams :
+ type : common_grams
+ common_words: [a, an, the]
+ common_grams_query :
+ type : common_grams
+ query_mode: true
+ common_words: [a, an, the]
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc
new file mode 100644
index 0000000..6719a9c
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc
@@ -0,0 +1,48 @@
+[[analysis-compound-word-tokenfilter]]
+=== Compound Word Token Filter
+
+Token filters that allow to decompose compound words. There are two
+types available: `dictionary_decompounder` and
+`hyphenation_decompounder`.
+
+The following are settings that can be set for a compound word token
+filter type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`word_list` |A list of words to use.
+
+|`word_list_path` |A path (either relative to `config` location, or
+absolute) to a list of words.
+
+|`min_word_size` |Minimum word size(Integer). Defaults to 5.
+
+|`min_subword_size` |Minimum subword size(Integer). Defaults to 2.
+
+|`max_subword_size` |Maximum subword size(Integer). Defaults to 15.
+
+|`only_longest_match` |Only matching the longest(Boolean). Defaults to
+`false`
+|=======================================================================
+
+Here is an example:
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ myAnalyzer2 :
+ type : custom
+ tokenizer : standard
+ filter : [myTokenFilter1, myTokenFilter2]
+ filter :
+ myTokenFilter1 :
+ type : dictionary_decompounder
+ word_list: [one, two, three]
+ myTokenFilter2 :
+ type : hyphenation_decompounder
+ word_list_path: path/to/words.txt
+ max_subword_size : 22
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc
new file mode 100644
index 0000000..293b51a
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc
@@ -0,0 +1,16 @@
+[[analysis-delimited-payload-tokenfilter]]
+=== Delimited Payload Token Filter
+
+Named `delimited_payload_filter`. Splits tokens into tokens and payload whenever a delimiter character is found.
+
+Example: "the|1 quick|2 fox|3" is split per default int to tokens `fox`, `quick` and `the` with payloads `1`, `2` and `3` respectively.
+
+
+
+Parameters:
+
+`delimiter`::
+ Character used for splitting the tokens. Default is `|`.
+
+`encoding`::
+ The type of the payload. `int` for integer, `float` for float and `identity` for characters. Default is `float`. \ No newline at end of file
diff --git a/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc
new file mode 100644
index 0000000..3ba0ede
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc
@@ -0,0 +1,16 @@
+[[analysis-edgengram-tokenfilter]]
+=== Edge NGram Token Filter
+
+A token filter of type `edgeNGram`.
+
+The following are settings that can be set for a `edgeNGram` token
+filter type:
+
+[cols="<,<",options="header",]
+|======================================================
+|Setting |Description
+|`min_gram` |Defaults to `1`.
+|`max_gram` |Defaults to `2`.
+|`side` |Either `front` or `back`. Defaults to `front`.
+|======================================================
+
diff --git a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc
new file mode 100644
index 0000000..c44ccff
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc
@@ -0,0 +1,28 @@
+[[analysis-elision-tokenfilter]]
+=== Elision Token Filter
+
+A token filter which removes elisions. For example, "l'avion" (the
+plane) will tokenized as "avion" (plane).
+
+Accepts `articles` setting which is a set of stop words articles. For
+example:
+
+[source,js]
+--------------------------------------------------
+"index" : {
+ "analysis" : {
+ "analyzer" : {
+ "default" : {
+ "tokenizer" : "standard",
+ "filter" : ["standard", "elision"]
+ }
+ },
+ "filter" : {
+ "elision" : {
+ "type" : "elision",
+ "articles" : ["l", "m", "t", "qu", "n", "s", "j"]
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc
new file mode 100644
index 0000000..9a235dd
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc
@@ -0,0 +1,115 @@
+[[analysis-hunspell-tokenfilter]]
+=== Hunspell Token Filter
+
+Basic support for hunspell stemming. Hunspell dictionaries will be
+picked up from a dedicated hunspell directory on the filesystem
+(defaults to `<path.conf>/hunspell`). Each dictionary is expected to
+have its own directory named after its associated locale (language).
+This dictionary directory is expected to hold both the \*.aff and \*.dic
+files (all of which will automatically be picked up). For example,
+assuming the default hunspell location is used, the following directory
+layout will define the `en_US` dictionary:
+
+[source,js]
+--------------------------------------------------
+- conf
+ |-- hunspell
+ | |-- en_US
+ | | |-- en_US.dic
+ | | |-- en_US.aff
+--------------------------------------------------
+
+The location of the hunspell directory can be configured using the
+`indices.analysis.hunspell.dictionary.location` settings in
+_elasticsearch.yml_.
+
+Each dictionary can be configured with two settings:
+
+`ignore_case`::
+ If true, dictionary matching will be case insensitive
+ (defaults to `false`)
+
+`strict_affix_parsing`::
+ Determines whether errors while reading a
+ affix rules file will cause exception or simple be ignored (defaults to
+ `true`)
+
+These settings can be configured globally in `elasticsearch.yml` using
+
+* `indices.analysis.hunspell.dictionary.ignore_case` and
+* `indices.analysis.hunspell.dictionary.strict_affix_parsing`
+
+or for specific dictionaries:
+
+* `indices.analysis.hunspell.dictionary.en_US.ignore_case` and
+* `indices.analysis.hunspell.dictionary.en_US.strict_affix_parsing`.
+
+It is also possible to add `settings.yml` file under the dictionary
+directory which holds these settings (this will override any other
+settings defined in the `elasticsearch.yml`).
+
+One can use the hunspell stem filter by configuring it the analysis
+settings:
+
+[source,js]
+--------------------------------------------------
+{
+ "analysis" : {
+ "analyzer" : {
+ "en" : {
+ "tokenizer" : "standard",
+ "filter" : [ "lowercase", "en_US" ]
+ }
+ },
+ "filter" : {
+ "en_US" : {
+ "type" : "hunspell",
+ "locale" : "en_US",
+ "dedup" : true
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The hunspell token filter accepts four options:
+
+`locale`::
+ A locale for this filter. If this is unset, the `lang` or
+ `language` are used instead - so one of these has to be set.
+
+`dictionary`::
+ The name of a dictionary. The path to your hunspell
+ dictionaries should be configured via
+ `indices.analysis.hunspell.dictionary.location` before.
+
+`dedup`::
+ If only unique terms should be returned, this needs to be
+ set to `true`. Defaults to `true`.
+
+`recursion_level`::
+ Configures the recursion level a
+ stemmer can go into. Defaults to `2`. Some languages (for example czech)
+ give better results when set to `1` or `0`, so you should test it out.
+
+NOTE: As opposed to the snowball stemmers (which are algorithm based)
+this is a dictionary lookup based stemmer and therefore the quality of
+the stemming is determined by the quality of the dictionary.
+
+[float]
+==== References
+
+Hunspell is a spell checker and morphological analyzer designed for
+languages with rich morphology and complex word compounding and
+character encoding.
+
+1. Wikipedia, http://en.wikipedia.org/wiki/Hunspell
+
+2. Source code, http://hunspell.sourceforge.net/
+
+3. Open Office Hunspell dictionaries, http://wiki.openoffice.org/wiki/Dictionaries
+
+4. Mozilla Hunspell dictionaries, https://addons.mozilla.org/en-US/firefox/language-tools/
+
+5. Chromium Hunspell dictionaries,
+ http://src.chromium.org/viewvc/chrome/trunk/deps/third_party/hunspell_dictionaries/
diff --git a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc
new file mode 100644
index 0000000..e4abbef
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc
@@ -0,0 +1,49 @@
+[[analysis-keep-words-tokenfilter]]
+=== Keep Words Token Filter
+
+A token filter of type `keep` that only keeps tokens with text contained in a
+predefined set of words. The set of words can be defined in the settings or
+loaded from a text file containing one word per line.
+
+
+[float]
+=== Options
+[horizontal]
+keep_words:: a list of words to keep
+keep_words_path:: a path to a words file
+keep_words_case:: a boolean indicating whether to lower case the words (defaults to `false`)
+
+
+
+[float]
+=== Settings example
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "my_analyzer" : {
+ "tokenizer" : "standard",
+ "filter" : ["standard", "lowercase", "words_till_three"]
+ },
+ "my_analyzer1" : {
+ "tokenizer" : "standard",
+ "filter" : ["standard", "lowercase", "words_on_file"]
+ }
+ },
+ "filter" : {
+ "words_till_three" : {
+ "type" : "keep",
+ "keep_words" : [ "one", "two", "three"]
+ },
+ "words_on_file" : {
+ "type" : "keep",
+ "keep_words_path" : "/path/to/word/file"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc
new file mode 100644
index 0000000..465bfd5
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc
@@ -0,0 +1,34 @@
+[[analysis-keyword-marker-tokenfilter]]
+=== Keyword Marker Token Filter
+
+Protects words from being modified by stemmers. Must be placed before
+any stemming filters.
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`keywords` |A list of words to use.
+
+|`keywords_path` |A path (either relative to `config` location, or
+absolute) to a list of words.
+
+|`ignore_case` |Set to `true` to lower case all words first. Defaults to
+`false`.
+|=======================================================================
+
+Here is an example:
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ myAnalyzer :
+ type : custom
+ tokenizer : standard
+ filter : [lowercase, protwods, porter_stem]
+ filter :
+ protwods :
+ type : keyword_marker
+ keywords_path : analysis/protwords.txt
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc
new file mode 100644
index 0000000..fddab34
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc
@@ -0,0 +1,26 @@
+[[analysis-keyword-repeat-tokenfilter]]
+=== Keyword Repeat Token Filter
+
+The `keyword_repeat` token filter Emits each incoming token twice once
+as keyword and once as a non-keyword to allow an un-stemmed version of a
+term to be indexed side by site to the stemmed version of the term.
+Given the nature of this filter each token that isn't transformed by a
+subsequent stemmer will be indexed twice. Therefore, consider adding a
+`unique` filter with `only_on_same_position` set to `true` to drop
+unnecessary duplicates.
+
+Here is an example:
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ myAnalyzer :
+ type : custom
+ tokenizer : standard
+ filter : [lowercase, keyword_repeat, porter_stem, unique_stem]
+ unique_stem:
+ type: unique
+ only_on_same_position : true
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc
new file mode 100644
index 0000000..ff0695e
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc
@@ -0,0 +1,6 @@
+[[analysis-kstem-tokenfilter]]
+=== KStem Token Filter
+
+The `kstem` token filter is a high performance filter for english. All
+terms must already be lowercased (use `lowercase` filter) for this
+filter to work correctly.
diff --git a/docs/reference/analysis/tokenfilters/length-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/length-tokenfilter.asciidoc
new file mode 100644
index 0000000..2651980
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/length-tokenfilter.asciidoc
@@ -0,0 +1,16 @@
+[[analysis-length-tokenfilter]]
+=== Length Token Filter
+
+A token filter of type `length` that removes words that are too long or
+too short for the stream.
+
+The following are settings that can be set for a `length` token filter
+type:
+
+[cols="<,<",options="header",]
+|===========================================================
+|Setting |Description
+|`min` |The minimum number. Defaults to `0`.
+|`max` |The maximum number. Defaults to `Integer.MAX_VALUE`.
+|===========================================================
+
diff --git a/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc
new file mode 100644
index 0000000..a6598be
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc
@@ -0,0 +1,32 @@
+[[analysis-limit-token-count-tokenfilter]]
+=== Limit Token Count Token Filter
+
+Limits the number of tokens that are indexed per document and field.
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`max_token_count` |The maximum number of tokens that should be indexed
+per document and field. The default is `1`
+
+|`consume_all_tokens` |If set to `true` the filter exhaust the stream
+even if `max_token_count` tokens have been consumed already. The default
+is `false`.
+|=======================================================================
+
+Here is an example:
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ myAnalyzer :
+ type : custom
+ tokenizer : standard
+ filter : [lowercase, five_token_limit]
+ filter :
+ five_token_limit :
+ type : limit
+ max_token_count : 5
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc
new file mode 100644
index 0000000..857c0d7
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc
@@ -0,0 +1,37 @@
+[[analysis-lowercase-tokenfilter]]
+=== Lowercase Token Filter
+
+A token filter of type `lowercase` that normalizes token text to lower
+case.
+
+Lowercase token filter supports Greek and Turkish lowercase token
+filters through the `language` parameter. Below is a usage example in a
+custom analyzer
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ myAnalyzer2 :
+ type : custom
+ tokenizer : myTokenizer1
+ filter : [myTokenFilter1, myGreekLowerCaseFilter]
+ char_filter : [my_html]
+ tokenizer :
+ myTokenizer1 :
+ type : standard
+ max_token_length : 900
+ filter :
+ myTokenFilter1 :
+ type : stop
+ stopwords : [stop1, stop2, stop3, stop4]
+ myGreekLowerCaseFilter :
+ type : lowercase
+ language : greek
+ char_filter :
+ my_html :
+ type : html_strip
+ escaped_tags : [xxx, yyy]
+ read_ahead : 1024
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc
new file mode 100644
index 0000000..5f91136
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc
@@ -0,0 +1,15 @@
+[[analysis-ngram-tokenfilter]]
+=== NGram Token Filter
+
+A token filter of type `nGram`.
+
+The following are settings that can be set for a `nGram` token filter
+type:
+
+[cols="<,<",options="header",]
+|============================
+|Setting |Description
+|`min_gram` |Defaults to `1`.
+|`max_gram` |Defaults to `2`.
+|============================
+
diff --git a/docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc
new file mode 100644
index 0000000..8751872
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc
@@ -0,0 +1,15 @@
+[[analysis-normalization-tokenfilter]]
+=== Normalization Token Filter
+
+There are several token filters available which try to normalize special
+characters of a certain language.
+
+You can currently choose between `arabic_normalization` and
+`persian_normalization` normalization in your token filter
+configuration. For more information check the
+http://lucene.apache.org/core/4_3_1/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizer.html[ArabicNormalizer]
+or the
+http://lucene.apache.org/core/4_3_1/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizer.html[PersianNormalizer]
+documentation.
+
+*Note:* These filters are available since `0.90.2`
diff --git a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc
new file mode 100644
index 0000000..4091296
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc
@@ -0,0 +1,134 @@
+[[analysis-pattern-capture-tokenfilter]]
+=== Pattern Capture Token Filter
+
+The `pattern_capture` token filter, unlike the `pattern` tokenizer,
+emits a token for every capture group in the regular expression.
+Patterns are not anchored to the beginning and end of the string, so
+each pattern can match multiple times, and matches are allowed to
+overlap.
+
+For instance a pattern like :
+
+[source,js]
+--------------------------------------------------
+"(([a-z]+)(\d*))"
+--------------------------------------------------
+
+when matched against:
+
+[source,js]
+--------------------------------------------------
+"abc123def456"
+--------------------------------------------------
+
+would produce the tokens: [ `abc123`, `abc`, `123`, `def456`, `def`,
+`456` ]
+
+If `preserve_original` is set to `true` (the default) then it would also
+emit the original token: `abc123def456`.
+
+This is particularly useful for indexing text like camel-case code, eg
+`stripHTML` where a user may search for `"strip html"` or `"striphtml"`:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/ -d '
+{
+ "settings" : {
+ "analysis" : {
+ "filter" : {
+ "code" : {
+ "type" : "pattern_capture",
+ "preserve_original" : 1,
+ "patterns" : [
+ "(\\p{Ll}+|\\p{Lu}\\p{Ll}+|\\p{Lu}+)",
+ "(\\d+)"
+ ]
+ }
+ },
+ "analyzer" : {
+ "code" : {
+ "tokenizer" : "pattern",
+ "filter" : [ "code", "lowercase" ]
+ }
+ }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+When used to analyze the text
+
+[source,js]
+--------------------------------------------------
+import static org.apache.commons.lang.StringEscapeUtils.escapeHtml
+--------------------------------------------------
+
+this emits the tokens: [ `import`, `static`, `org`, `apache`, `commons`,
+`lang`, `stringescapeutils`, `string`, `escape`, `utils`, `escapehtml`,
+`escape`, `html` ]
+
+Another example is analyzing email addresses:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/ -d '
+{
+ "settings" : {
+ "analysis" : {
+ "filter" : {
+ "email" : {
+ "type" : "pattern_capture",
+ "preserve_original" : 1,
+ "patterns" : [
+ "(\\w+)",
+ "(\\p{L}+)",
+ "(\\d+)",
+ "@(.+)"
+ ]
+ }
+ },
+ "analyzer" : {
+ "email" : {
+ "tokenizer" : "uax_url_email",
+ "filter" : [ "email", "lowercase", "unique" ]
+ }
+ }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+When the above analyzer is used on an email address like:
+
+[source,js]
+--------------------------------------------------
+john-smith_123@foo-bar.com
+--------------------------------------------------
+
+it would produce the following tokens: [ `john-smith_123`,
+`foo-bar.com`, `john`, `smith_123`, `smith`, `123`, `foo`,
+`foo-bar.com`, `bar`, `com` ]
+
+Multiple patterns are required to allow overlapping captures, but also
+means that patterns are less dense and easier to understand.
+
+*Note:* All tokens are emitted in the same position, and with the same
+character offsets, so when combined with highlighting, the whole
+original token will be highlighted, not just the matching subset. For
+instance, querying the above email address for `"smith"` would
+highlight:
+
+[source,js]
+--------------------------------------------------
+ <em>john-smith_123@foo-bar.com</em>
+--------------------------------------------------
+
+not:
+
+[source,js]
+--------------------------------------------------
+ john-<em>smith</em>_123@foo-bar.com
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc
new file mode 100644
index 0000000..54e0842
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc
@@ -0,0 +1,9 @@
+[[analysis-pattern_replace-tokenfilter]]
+=== Pattern Replace Token Filter
+
+The `pattern_replace` token filter allows to easily handle string
+replacements based on a regular expression. The regular expression is
+defined using the `pattern` parameter, and the replacement string can be
+provided using the `replacement` parameter (supporting referencing the
+original text, as explained
+http://docs.oracle.com/javase/6/docs/api/java/util/regex/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[here]).
diff --git a/docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc
new file mode 100644
index 0000000..b7e9334
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc
@@ -0,0 +1,5 @@
+[[analysis-phonetic-tokenfilter]]
+=== Phonetic Token Filter
+
+The `phonetic` token filter is provided as a plugin and located
+https://github.com/elasticsearch/elasticsearch-analysis-phonetic[here].
diff --git a/docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc
new file mode 100644
index 0000000..fc2edf5
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc
@@ -0,0 +1,15 @@
+[[analysis-porterstem-tokenfilter]]
+=== Porter Stem Token Filter
+
+A token filter of type `porter_stem` that transforms the token stream as
+per the Porter stemming algorithm.
+
+Note, the input to the stemming filter must already be in lower case, so
+you will need to use
+<<analysis-lowercase-tokenfilter,Lower
+Case Token Filter>> or
+<<analysis-lowercase-tokenizer,Lower
+Case Tokenizer>> farther down the Tokenizer chain in order for this to
+work properly!. For example, when using custom analyzer, make sure the
+`lowercase` filter comes before the `porter_stem` filter in the list of
+filters.
diff --git a/docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc
new file mode 100644
index 0000000..b004998
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc
@@ -0,0 +1,4 @@
+[[analysis-reverse-tokenfilter]]
+=== Reverse Token Filter
+
+A token filter of type `reverse` that simply reverses each token.
diff --git a/docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc
new file mode 100644
index 0000000..e3c6c44
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc
@@ -0,0 +1,36 @@
+[[analysis-shingle-tokenfilter]]
+=== Shingle Token Filter
+
+A token filter of type `shingle` that constructs shingles (token
+n-grams) from a token stream. In other words, it creates combinations of
+tokens as a single token. For example, the sentence "please divide this
+sentence into shingles" might be tokenized into shingles "please
+divide", "divide this", "this sentence", "sentence into", and "into
+shingles".
+
+This filter handles position increments > 1 by inserting filler tokens
+(tokens with termtext "_"). It does not handle a position increment of
+0.
+
+The following are settings that can be set for a `shingle` token filter
+type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`max_shingle_size` |The maximum shingle size. Defaults to `2`.
+
+|`min_shingle_size` |The minimum shingle size. Defaults to `2`.
+
+|`output_unigrams` |If `true` the output will contain the input tokens
+(unigrams) as well as the shingles. Defaults to `true`.
+
+|`output_unigrams_if_no_shingles` |If `output_unigrams` is `false` the
+output will contain the input tokens (unigrams) if no shingles are
+available. Note if `output_unigrams` is set to `true` this setting has
+no effect. Defaults to `false`.
+
+|`token_separator` |The string to use when joining adjacent tokens to
+form a shingle. Defaults to `" "`.
+|=======================================================================
+
diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc
new file mode 100644
index 0000000..58d8898
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc
@@ -0,0 +1,33 @@
+[[analysis-snowball-tokenfilter]]
+=== Snowball Token Filter
+
+A filter that stems words using a Snowball-generated stemmer. The
+`language` parameter controls the stemmer with the following available
+values: `Armenian`, `Basque`, `Catalan`, `Danish`, `Dutch`, `English`,
+`Finnish`, `French`, `German`, `German2`, `Hungarian`, `Italian`, `Kp`,
+`Lovins`, `Norwegian`, `Porter`, `Portuguese`, `Romanian`, `Russian`,
+`Spanish`, `Swedish`, `Turkish`.
+
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "my_analyzer" : {
+ "tokenizer" : "standard",
+ "filter" : ["standard", "lowercase", "my_snow"]
+ }
+ },
+ "filter" : {
+ "my_snow" : {
+ "type" : "snowball",
+ "language" : "Lovins"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc
new file mode 100644
index 0000000..3dd4fbf
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc
@@ -0,0 +1,7 @@
+[[analysis-standard-tokenfilter]]
+=== Standard Token Filter
+
+A token filter of type `standard` that normalizes tokens extracted with
+the
+<<analysis-standard-tokenizer,Standard
+Tokenizer>>.
diff --git a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc
new file mode 100644
index 0000000..649366c
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc
@@ -0,0 +1,34 @@
+[[analysis-stemmer-override-tokenfilter]]
+=== Stemmer Override Token Filter
+
+Overrides stemming algorithms, by applying a custom mapping, then
+protecting these terms from being modified by stemmers. Must be placed
+before any stemming filters.
+
+Rules are separated by "=>"
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`rules` |A list of mapping rules to use.
+
+|`rules_path` |A path (either relative to `config` location, or
+absolute) to a list of mappings.
+|=======================================================================
+
+Here is an example:
+
+[source,js]
+--------------------------------------------------
+index :
+ analysis :
+ analyzer :
+ myAnalyzer :
+ type : custom
+ tokenizer : standard
+ filter : [lowercase, custom_stems, porter_stem]
+ filter:
+ custom_stems:
+ type: stemmer_override
+ rules_path : analysis/custom_stems.txt
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc
new file mode 100644
index 0000000..6526f37
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc
@@ -0,0 +1,78 @@
+[[analysis-stemmer-tokenfilter]]
+=== Stemmer Token Filter
+
+A filter that stems words (similar to `snowball`, but with more
+options). The `language`/`name` parameter controls the stemmer with the
+following available values:
+
+http://lucene.apache.org/core/4_3_0/analyzers-common/index.html?org%2Fapache%2Flucene%2Fanalysis%2Far%2FArabicStemmer.html[arabic],
+http://snowball.tartarus.org/algorithms/armenian/stemmer.html[armenian],
+http://snowball.tartarus.org/algorithms/basque/stemmer.html[basque],
+http://lucene.apache.org/core/4_3_0/analyzers-common/index.html?org%2Fapache%2Flucene%2Fanalysis%2Fbr%2FBrazilianStemmer.html[brazilian],
+http://members.unine.ch/jacques.savoy/Papers/BUIR.pdf[bulgarian],
+http://snowball.tartarus.org/algorithms/catalan/stemmer.html[catalan],
+http://portal.acm.org/citation.cfm?id=1598600[czech],
+http://snowball.tartarus.org/algorithms/danish/stemmer.html[danish],
+http://snowball.tartarus.org/algorithms/dutch/stemmer.html[dutch],
+http://snowball.tartarus.org/algorithms/english/stemmer.html[english],
+http://snowball.tartarus.org/algorithms/finnish/stemmer.html[finnish],
+http://snowball.tartarus.org/algorithms/french/stemmer.html[french],
+http://snowball.tartarus.org/algorithms/german/stemmer.html[german],
+http://snowball.tartarus.org/algorithms/german2/stemmer.html[german2],
+http://sais.se/mthprize/2007/ntais2007.pdf[greek],
+http://snowball.tartarus.org/algorithms/hungarian/stemmer.html[hungarian],
+http://snowball.tartarus.org/algorithms/italian/stemmer.html[italian],
+http://snowball.tartarus.org/algorithms/kraaij_pohlmann/stemmer.html[kp],
+http://ciir.cs.umass.edu/pubfiles/ir-35.pdf[kstem],
+http://snowball.tartarus.org/algorithms/lovins/stemmer.html[lovins],
+http://lucene.apache.org/core/4_3_0/analyzers-common/index.html?org%2Fapache%2Flucene%2Fanalysis%2Flv%2FLatvianStemmer.html[latvian],
+http://snowball.tartarus.org/algorithms/norwegian/stemmer.html[norwegian],
+http://lucene.apache.org/core/4_3_0/analyzers-common/index.html?org%2Fapache%2Flucene%2Fanalysis%2Fno%2FNorwegianMinimalStemFilter.html[minimal_norwegian],
+http://snowball.tartarus.org/algorithms/porter/stemmer.html[porter],
+http://snowball.tartarus.org/algorithms/portuguese/stemmer.html[portuguese],
+http://snowball.tartarus.org/algorithms/romanian/stemmer.html[romanian],
+http://snowball.tartarus.org/algorithms/russian/stemmer.html[russian],
+http://snowball.tartarus.org/algorithms/spanish/stemmer.html[spanish],
+http://snowball.tartarus.org/algorithms/swedish/stemmer.html[swedish],
+http://snowball.tartarus.org/algorithms/turkish/stemmer.html[turkish],
+http://www.medialab.tfe.umu.se/courses/mdm0506a/material/fulltext_ID%3D10049387%26PLACEBO%3DIE.pdf[minimal_english],
+http://lucene.apache.org/core/4_3_0/analyzers-common/index.html?org%2Fapache%2Flucene%2Fanalysis%2Fen%2FEnglishPossessiveFilter.html[possessive_english],
+http://clef.isti.cnr.it/2003/WN_web/22.pdf[light_finnish],
+http://dl.acm.org/citation.cfm?id=1141523[light_french],
+http://dl.acm.org/citation.cfm?id=318984[minimal_french],
+http://dl.acm.org/citation.cfm?id=1141523[light_german],
+http://members.unine.ch/jacques.savoy/clef/morpho.pdf[minimal_german],
+http://computing.open.ac.uk/Sites/EACLSouthAsia/Papers/p6-Ramanathan.pdf[hindi],
+http://dl.acm.org/citation.cfm?id=1141523&dl=ACM&coll=DL&CFID=179095584&CFTOKEN=80067181[light_hungarian],
+http://www.illc.uva.nl/Publications/ResearchReports/MoL-2003-02.text.pdf[indonesian],
+http://www.ercim.eu/publication/ws-proceedings/CLEF2/savoy.pdf[light_italian],
+http://dl.acm.org/citation.cfm?id=1141523&dl=ACM&coll=DL&CFID=179095584&CFTOKEN=80067181[light_portuguese],
+http://www.inf.ufrgs.br/\~buriol/papers/Orengo_CLEF07.pdf[minimal_portuguese],
+http://www.inf.ufrgs.br/\~viviane/rslp/index.htm[portuguese],
+http://doc.rero.ch/lm.php?url=1000%2C43%2C4%2C20091209094227-CA%2FDolamic_Ljiljana_-_Indexing_and_Searching_Strategies_for_the_Russian_20091209.pdf[light_russian],
+http://www.ercim.eu/publication/ws-proceedings/CLEF2/savoy.pdf[light_spanish],
+http://clef.isti.cnr.it/2003/WN_web/22.pdf[light_swedish].
+
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "my_analyzer" : {
+ "tokenizer" : "standard",
+ "filter" : ["standard", "lowercase", "my_stemmer"]
+ }
+ },
+ "filter" : {
+ "my_stemmer" : {
+ "type" : "stemmer",
+ "name" : "light_german"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
new file mode 100644
index 0000000..14b3a32
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
@@ -0,0 +1,35 @@
+[[analysis-stop-tokenfilter]]
+=== Stop Token Filter
+
+A token filter of type `stop` that removes stop words from token
+streams.
+
+The following are settings that can be set for a `stop` token filter
+type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`stopwords` |A list of stop words to use. Defaults to english stop
+words.
+
+|`stopwords_path` |A path (either relative to `config` location, or
+absolute) to a stopwords file configuration. Each stop word should be in
+its own "line" (separated by a line break). The file must be UTF-8
+encoded.
+
+|`ignore_case` |Set to `true` to lower case all words first. Defaults to
+`false`.
+
+|`remove_trailing` |Set to `false` in order to not ignore the last term of
+a search if it is a stop word. This is very useful for the completion
+suggester as a query like `green a` can be extended to `green apple` even
+though you remove stop words in general. Defaults to `true`.
+|=======================================================================
+
+stopwords allow for custom language specific expansion of default
+stopwords. It follows the `_lang_` notation and supports: arabic,
+armenian, basque, brazilian, bulgarian, catalan, czech, danish, dutch,
+english, finnish, french, galician, german, greek, hindi, hungarian,
+indonesian, italian, norwegian, persian, portuguese, romanian, russian,
+spanish, swedish, turkish.
diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
new file mode 100644
index 0000000..ce6e1ed
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
@@ -0,0 +1,123 @@
+[[analysis-synonym-tokenfilter]]
+=== Synonym Token Filter
+
+The `synonym` token filter allows to easily handle synonyms during the
+analysis process. Synonyms are configured using a configuration file.
+Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "analysis" : {
+ "analyzer" : {
+ "synonym" : {
+ "tokenizer" : "whitespace",
+ "filter" : ["synonym"]
+ }
+ },
+ "filter" : {
+ "synonym" : {
+ "type" : "synonym",
+ "synonyms_path" : "analysis/synonym.txt"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The above configures a `synonym` filter, with a path of
+`analysis/synonym.txt` (relative to the `config` location). The
+`synonym` analyzer is then configured with the filter. Additional
+settings are: `ignore_case` (defaults to `false`), and `expand`
+(defaults to `true`).
+
+The `tokenizer` parameter controls the tokenizers that will be used to
+tokenize the synonym, and defaults to the `whitespace` tokenizer.
+
+Two synonym formats are supported: Solr, WordNet.
+
+[float]
+==== Solr synonyms
+
+The following is a sample format of the file:
+
+[source,js]
+--------------------------------------------------
+# blank lines and lines starting with pound are comments.
+
+#Explicit mappings match any token sequence on the LHS of "=>"
+#and replace with all alternatives on the RHS. These types of mappings
+#ignore the expand parameter in the schema.
+#Examples:
+i-pod, i pod => ipod,
+sea biscuit, sea biscit => seabiscuit
+
+#Equivalent synonyms may be separated with commas and give
+#no explicit mapping. In this case the mapping behavior will
+#be taken from the expand parameter in the schema. This allows
+#the same synonym file to be used in different synonym handling strategies.
+#Examples:
+ipod, i-pod, i pod
+foozball , foosball
+universe , cosmos
+
+# If expand==true, "ipod, i-pod, i pod" is equivalent to the explicit mapping:
+ipod, i-pod, i pod => ipod, i-pod, i pod
+# If expand==false, "ipod, i-pod, i pod" is equivalent to the explicit mapping:
+ipod, i-pod, i pod => ipod
+
+#multiple synonym mapping entries are merged.
+foo => foo bar
+foo => baz
+#is equivalent to
+foo => foo bar, baz
+--------------------------------------------------
+
+You can also define synonyms for the filter directly in the
+configuration file (note use of `synonyms` instead of `synonyms_path`):
+
+[source,js]
+--------------------------------------------------
+{
+ "filter" : {
+ "synonym" : {
+ "type" : "synonym",
+ "synonyms" : [
+ "i-pod, i pod => ipod",
+ "universe, cosmos"
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+However, it is recommended to define large synonyms set in a file using
+`synonyms_path`.
+
+[float]
+==== WordNet synonyms
+
+Synonyms based on http://wordnet.princeton.edu/[WordNet] format can be
+declared using `format`:
+
+[source,js]
+--------------------------------------------------
+{
+ "filter" : {
+ "synonym" : {
+ "type" : "synonym",
+ "format" : "wordnet",
+ "synonyms" : [
+ "s(100000001,1,'abstain',v,1,0).",
+ "s(100000001,2,'refrain',v,1,0).",
+ "s(100000001,3,'desist',v,1,0)."
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+Using `synonyms_path` to define WordNet synonyms in a file is supported
+as well.
diff --git a/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc
new file mode 100644
index 0000000..34a0e93
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc
@@ -0,0 +1,4 @@
+[[analysis-trim-tokenfilter]]
+=== Trim Token Filter
+
+The `trim` token filter trims the whitespace surrounding a token.
diff --git a/docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc
new file mode 100644
index 0000000..14652f4
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc
@@ -0,0 +1,10 @@
+[[analysis-truncate-tokenfilter]]
+=== Truncate Token Filter
+
+The `truncate` token filter can be used to truncate tokens into a
+specific length. This can come in handy with keyword (single token)
+based mapped fields that are used for sorting in order to reduce memory
+usage.
+
+It accepts a `length` parameter which control the number of characters
+to truncate to, defaults to `10`.
diff --git a/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc
new file mode 100644
index 0000000..8b42f6b
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc
@@ -0,0 +1,7 @@
+[[analysis-unique-tokenfilter]]
+=== Unique Token Filter
+
+The `unique` token filter can be used to only index unique tokens during
+analysis. By default it is applied on all the token stream. If
+`only_on_same_position` is set to `true`, it will only remove duplicate
+tokens on the same position.
diff --git a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc
new file mode 100644
index 0000000..9ce81e1
--- /dev/null
+++ b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc
@@ -0,0 +1,80 @@
+[[analysis-word-delimiter-tokenfilter]]
+=== Word Delimiter Token Filter
+
+Named `word_delimiter`, it Splits words into subwords and performs
+optional transformations on subword groups. Words are split into
+subwords with the following rules:
+
+* split on intra-word delimiters (by default, all non alpha-numeric
+characters).
+* "Wi-Fi" -> "Wi", "Fi"
+* split on case transitions: "PowerShot" -> "Power", "Shot"
+* split on letter-number transitions: "SD500" -> "SD", "500"
+* leading and trailing intra-word delimiters on each subword are
+ignored: "//hello---there, 'dude'" -> "hello", "there", "dude"
+* trailing "'s" are removed for each subword: "O'Neil's" -> "O", "Neil"
+
+Parameters include:
+
+`generate_word_parts`::
+ If `true` causes parts of words to be
+ generated: "PowerShot" => "Power" "Shot". Defaults to `true`.
+
+`generate_number_parts`::
+ If `true` causes number subwords to be
+ generated: "500-42" => "500" "42". Defaults to `true`.
+
+`catenate_words`::
+ If `true` causes maximum runs of word parts to be
+ catenated: "wi-fi" => "wifi". Defaults to `false`.
+
+`catenate_numbers`::
+ If `true` causes maximum runs of number parts to
+ be catenated: "500-42" => "50042". Defaults to `false`.
+
+`catenate_all`::
+ If `true` causes all subword parts to be catenated:
+ "wi-fi-4000" => "wifi4000". Defaults to `false`.
+
+`split_on_case_change`::
+ If `true` causes "PowerShot" to be two tokens;
+ ("Power-Shot" remains two parts regards). Defaults to `true`.
+
+`preserve_original`::
+ If `true` includes original words in subwords:
+ "500-42" => "500-42" "500" "42". Defaults to `false`.
+
+`split_on_numerics`::
+ If `true` causes "j2se" to be three tokens; "j"
+ "2" "se". Defaults to `true`.
+
+`stem_english_possessive`::
+ If `true` causes trailing "'s" to be
+ removed for each subword: "O'Neil's" => "O", "Neil". Defaults to `true`.
+
+Advance settings include:
+
+`protected_words`::
+ A list of protected words from being delimiter.
+ Either an array, or also can set `protected_words_path` which resolved
+ to a file configured with protected words (one on each line).
+ Automatically resolves to `config/` based location if exists.
+
+`type_table`::
+ A custom type mapping table, for example (when configured
+ using `type_table_path`):
+
+[source,js]
+--------------------------------------------------
+ # Map the $, %, '.', and ',' characters to DIGIT
+ # This might be useful for financial data.
+ $ => DIGIT
+ % => DIGIT
+ . => DIGIT
+ \\u002C => DIGIT
+
+ # in some cases you might not want to split on ZWJ
+ # this also tests the case where we need a bigger byte[]
+ # see http://en.wikipedia.org/wiki/Zero-width_joiner
+ \\u200D => ALPHANUM
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenizers.asciidoc b/docs/reference/analysis/tokenizers.asciidoc
new file mode 100644
index 0000000..3118b0d
--- /dev/null
+++ b/docs/reference/analysis/tokenizers.asciidoc
@@ -0,0 +1,30 @@
+[[analysis-tokenizers]]
+== Tokenizers
+
+Tokenizers are used to break a string down into a stream of terms
+or tokens. A simple tokenizer might split the string up into terms
+wherever it encounters whitespace or punctuation.
+
+Elasticsearch has a number of built in tokenizers which can be
+used to build <<analysis-custom-analyzer,custom analyzers>>.
+
+include::tokenizers/standard-tokenizer.asciidoc[]
+
+include::tokenizers/edgengram-tokenizer.asciidoc[]
+
+include::tokenizers/keyword-tokenizer.asciidoc[]
+
+include::tokenizers/letter-tokenizer.asciidoc[]
+
+include::tokenizers/lowercase-tokenizer.asciidoc[]
+
+include::tokenizers/ngram-tokenizer.asciidoc[]
+
+include::tokenizers/whitespace-tokenizer.asciidoc[]
+
+include::tokenizers/pattern-tokenizer.asciidoc[]
+
+include::tokenizers/uaxurlemail-tokenizer.asciidoc[]
+
+include::tokenizers/pathhierarchy-tokenizer.asciidoc[]
+
diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc
new file mode 100644
index 0000000..41cc233
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc
@@ -0,0 +1,80 @@
+[[analysis-edgengram-tokenizer]]
+=== Edge NGram Tokenizer
+
+A tokenizer of type `edgeNGram`.
+
+This tokenizer is very similar to `nGram` but only keeps n-grams which
+start at the beginning of a token.
+
+The following are settings that can be set for a `edgeNGram` tokenizer
+type:
+
+[cols="<,<,<",options="header",]
+|=======================================================================
+|Setting |Description |Default value
+|`min_gram` |Minimum size in codepoints of a single n-gram |`1`.
+
+|`max_gram` |Maximum size in codepoints of a single n-gram |`2`.
+
+|`token_chars` | Characters classes to keep in the
+tokens, Elasticsearch will split on characters that don't belong to any
+of these classes. |`[]` (Keep all characters)
+|=======================================================================
+
+
+`token_chars` accepts the following character classes:
+
+[horizontal]
+`letter`:: for example `a`, `b`, `ï` or `京`
+`digit`:: for example `3` or `7`
+`whitespace`:: for example `" "` or `"\n"`
+`punctuation`:: for example `!` or `"`
+`symbol`:: for example `$` or `√`
+
+[float]
+==== Example
+
+[source,js]
+--------------------------------------------------
+ curl -XPUT 'localhost:9200/test' -d '
+ {
+ "settings" : {
+ "analysis" : {
+ "analyzer" : {
+ "my_edge_ngram_analyzer" : {
+ "tokenizer" : "my_edge_ngram_tokenizer"
+ }
+ },
+ "tokenizer" : {
+ "my_edge_ngram_tokenizer" : {
+ "type" : "edgeNGram",
+ "min_gram" : "2",
+ "max_gram" : "5",
+ "token_chars": [ "letter", "digit" ]
+ }
+ }
+ }
+ }
+ }'
+
+ curl 'localhost:9200/test/_analyze?pretty=1&analyzer=my_edge_ngram_analyzer' -d 'FC Schalke 04'
+ # FC, Sc, Sch, Scha, Schal, 04
+--------------------------------------------------
+
+[float]
+==== `side` deprecated
+
+There used to be a `side` parameter up to `0.90.1` but it is now deprecated. In
+order to emulate the behavior of `"side" : "BACK"` a
+<<analysis-reverse-tokenfilter,`reverse` token filter>> should be used together
+with the <<analysis-edgengram-tokenfilter,`edgeNGram` token filter>>. The
+`edgeNGram` filter must be enclosed in `reverse` filters like this:
+
+[source,js]
+--------------------------------------------------
+ "filter" : ["reverse", "edgeNGram", "reverse"]
+--------------------------------------------------
+
+which essentially reverses the token, builds front `EdgeNGrams` and reverses
+the ngram again. This has the same effect as the previous `"side" : "BACK"` setting.
+
diff --git a/docs/reference/analysis/tokenizers/keyword-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/keyword-tokenizer.asciidoc
new file mode 100644
index 0000000..be75f3d
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/keyword-tokenizer.asciidoc
@@ -0,0 +1,15 @@
+[[analysis-keyword-tokenizer]]
+=== Keyword Tokenizer
+
+A tokenizer of type `keyword` that emits the entire input as a single
+input.
+
+The following are settings that can be set for a `keyword` tokenizer
+type:
+
+[cols="<,<",options="header",]
+|=======================================================
+|Setting |Description
+|`buffer_size` |The term buffer size. Defaults to `256`.
+|=======================================================
+
diff --git a/docs/reference/analysis/tokenizers/letter-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/letter-tokenizer.asciidoc
new file mode 100644
index 0000000..03025cc
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/letter-tokenizer.asciidoc
@@ -0,0 +1,7 @@
+[[analysis-letter-tokenizer]]
+=== Letter Tokenizer
+
+A tokenizer of type `letter` that divides text at non-letters. That's to
+say, it defines tokens as maximal strings of adjacent letters. Note,
+this does a decent job for most European languages, but does a terrible
+job for some Asian languages, where words are not separated by spaces.
diff --git a/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc
new file mode 100644
index 0000000..0cdbbc3
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc
@@ -0,0 +1,15 @@
+[[analysis-lowercase-tokenizer]]
+=== Lowercase Tokenizer
+
+A tokenizer of type `lowercase` that performs the function of
+<<analysis-letter-tokenizer,Letter
+Tokenizer>> and
+<<analysis-lowercase-tokenfilter,Lower
+Case Token Filter>> together. It divides text at non-letters and converts
+them to lower case. While it is functionally equivalent to the
+combination of
+<<analysis-letter-tokenizer,Letter
+Tokenizer>> and
+<<analysis-lowercase-tokenfilter,Lower
+Case Token Filter>>, there is a performance advantage to doing the two
+tasks at once, hence this (redundant) implementation.
diff --git a/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc
new file mode 100644
index 0000000..23e6bc5
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc
@@ -0,0 +1,57 @@
+[[analysis-ngram-tokenizer]]
+=== NGram Tokenizer
+
+A tokenizer of type `nGram`.
+
+The following are settings that can be set for a `nGram` tokenizer type:
+
+[cols="<,<,<",options="header",]
+|=======================================================================
+|Setting |Description |Default value
+|`min_gram` |Minimum size in codepoints of a single n-gram |`1`.
+
+|`max_gram` |Maximum size in codepoints of a single n-gram |`2`.
+
+|`token_chars` |Characters classes to keep in the
+tokens, Elasticsearch will split on characters that don't belong to any
+of these classes. |`[]` (Keep all characters)
+|=======================================================================
+
+`token_chars` accepts the following character classes:
+
+[horizontal]
+`letter`:: for example `a`, `b`, `ï` or `京`
+`digit`:: for example `3` or `7`
+`whitespace`:: for example `" "` or `"\n"`
+`punctuation`:: for example `!` or `"`
+`symbol`:: for example `$` or `√`
+
+[float]
+==== Example
+
+[source,js]
+--------------------------------------------------
+ curl -XPUT 'localhost:9200/test' -d '
+ {
+ "settings" : {
+ "analysis" : {
+ "analyzer" : {
+ "my_ngram_analyzer" : {
+ "tokenizer" : "my_ngram_tokenizer"
+ }
+ },
+ "tokenizer" : {
+ "my_ngram_tokenizer" : {
+ "type" : "nGram",
+ "min_gram" : "2",
+ "max_gram" : "3",
+ "token_chars": [ "letter", "digit" ]
+ }
+ }
+ }
+ }
+ }'
+
+ curl 'localhost:9200/test/_analyze?pretty=1&analyzer=my_ngram_analyzer' -d 'FC Schalke 04'
+ # FC, Sc, Sch, ch, cha, ha, hal, al, alk, lk, lke, ke, 04
+--------------------------------------------------
diff --git a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc
new file mode 100644
index 0000000..e6876f5
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc
@@ -0,0 +1,32 @@
+[[analysis-pathhierarchy-tokenizer]]
+=== Path Hierarchy Tokenizer
+
+The `path_hierarchy` tokenizer takes something like this:
+
+-------------------------
+/something/something/else
+-------------------------
+
+And produces tokens:
+
+-------------------------
+/something
+/something/something
+/something/something/else
+-------------------------
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`delimiter` |The character delimiter to use, defaults to `/`.
+
+|`replacement` |An optional replacement character to use. Defaults to
+the `delimiter`.
+
+|`buffer_size` |The buffer size to use, defaults to `1024`.
+
+|`reverse` |Generates tokens in reverse order, defaults to `false`.
+
+|`skip` |Controls initial tokens to skip, defaults to `0`.
+|=======================================================================
+
diff --git a/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc
new file mode 100644
index 0000000..72ca604
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc
@@ -0,0 +1,29 @@
+[[analysis-pattern-tokenizer]]
+=== Pattern Tokenizer
+
+A tokenizer of type `pattern` that can flexibly separate text into terms
+via a regular expression. Accepts the following settings:
+
+[cols="<,<",options="header",]
+|======================================================================
+|Setting |Description
+|`pattern` |The regular expression pattern, defaults to `\\W+`.
+|`flags` |The regular expression flags.
+|`group` |Which group to extract into tokens. Defaults to `-1` (split).
+|======================================================================
+
+*IMPORTANT*: The regular expression should match the *token separators*,
+not the tokens themselves.
+
+`group` set to `-1` (the default) is equivalent to "split". Using group
+>= 0 selects the matching group as the token. For example, if you have:
+
+------------------------
+pattern = \\'([^\']+)\\'
+group = 0
+input = aaa 'bbb' 'ccc'
+------------------------
+
+the output will be two tokens: 'bbb' and 'ccc' (including the ' marks).
+With the same input but using group=1, the output would be: bbb and ccc
+(no ' marks).
diff --git a/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc
new file mode 100644
index 0000000..c8b405b
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc
@@ -0,0 +1,18 @@
+[[analysis-standard-tokenizer]]
+=== Standard Tokenizer
+
+A tokenizer of type `standard` providing grammar based tokenizer that is
+a good tokenizer for most European language documents. The tokenizer
+implements the Unicode Text Segmentation algorithm, as specified in
+http://unicode.org/reports/tr29/[Unicode Standard Annex #29].
+
+The following are settings that can be set for a `standard` tokenizer
+type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`max_token_length` |The maximum token length. If a token is seen that
+exceeds this length then it is discarded. Defaults to `255`.
+|=======================================================================
+
diff --git a/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc
new file mode 100644
index 0000000..9ed28e6
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc
@@ -0,0 +1,16 @@
+[[analysis-uaxurlemail-tokenizer]]
+=== UAX Email URL Tokenizer
+
+A tokenizer of type `uax_url_email` which works exactly like the
+`standard` tokenizer, but tokenizes emails and urls as single tokens.
+
+The following are settings that can be set for a `uax_url_email`
+tokenizer type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`max_token_length` |The maximum token length. If a token is seen that
+exceeds this length then it is discarded. Defaults to `255`.
+|=======================================================================
+
diff --git a/docs/reference/analysis/tokenizers/whitespace-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/whitespace-tokenizer.asciidoc
new file mode 100644
index 0000000..f0e1ce2
--- /dev/null
+++ b/docs/reference/analysis/tokenizers/whitespace-tokenizer.asciidoc
@@ -0,0 +1,4 @@
+[[analysis-whitespace-tokenizer]]
+=== Whitespace Tokenizer
+
+A tokenizer of type `whitespace` that divides text at whitespace.
diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc
new file mode 100644
index 0000000..b9e9516
--- /dev/null
+++ b/docs/reference/api-conventions.asciidoc
@@ -0,0 +1,269 @@
+[[api-conventions]]
+= API Conventions
+
+[partintro]
+--
+The *elasticsearch* REST APIs are exposed using:
+
+* <<modules-http,JSON over HTTP>>,
+* <<modules-thrift,thrift>>,
+* <<modules-memcached,memcached>>.
+
+The conventions listed in this chapter can be applied throughout the REST
+API, unless otherwise specified.
+
+* <<multi-index>>
+* <<common-options>>
+
+--
+
+[[multi-index]]
+== Multiple Indices
+
+Most APIs that refer to an `index` parameter support execution across multiple indices,
+using simple `test1,test2,test3` notation (or `_all` for all indices). It also
+support wildcards, for example: `test*`, and the ability to "add" (`+`)
+and "remove" (`-`), for example: `+test*,-test3`.
+
+All multi indices API support the following url query string parameters:
+
+`ignore_unavailable`::
+
+Controls whether to ignore if any specified indices are unavailable, this
+includes indices that don't exist or closed indices. Either `true` or `false`
+can be specified.
+
+`allow_no_indices`::
+
+Controls whether to fail if a wildcard indices expressions results into no
+concrete indices. Either `true` or `false` can be specified. For example if
+the wildcard expression `foo*` is specified and no indices are available that
+start with `foo` then depending on this setting the request will fail. This
+setting is also applicable when `_all`, `*` or no index has been specified.
+
+`expand_wildcards`::
+
+Controls to what kind of concrete indices wildcard indices expression expand
+to. If `open` is specified then the wildcard expression if expanded to only
+open indices and if `closed` is specified then the wildcard expression if
+expanded only to closed indices. Also both values (`open,closed`) can be
+specified to expand to all indices.
+
+The defaults settings for the above parameters depend on the api being used.
+
+NOTE: Single index APIs such as the <<docs>> and the
+<<indices-aliases,single-index `alias` APIs>> do not support multiple indices.
+
+[[common-options]]
+== Common options
+
+The following options can be applied to all of the REST APIs.
+
+[float]
+=== Pretty Results
+
+When appending `?pretty=true` to any request made, the JSON returned
+will be pretty formatted (use it for debugging only!). Another option is
+to set `format=yaml` which will cause the result to be returned in the
+(sometimes) more readable yaml format.
+
+
+[float]
+=== Human readable output
+
+Statistics are returned in a format suitable for humans
+(eg `"exists_time": "1h"` or `"size": "1kb"`) and for computers
+(eg `"exists_time_in_millis": 3600000`` or `"size_in_bytes": 1024`).
+The human readable values can be turned off by adding `?human=false`
+to the query string. This makes sense when the stats results are
+being consumed by a monitoring tool, rather than intended for human
+consumption. The default for the `human` flag is
+`false`. added[1.00.Beta,Previously defaulted to `true`]
+
+[float]
+=== Flat Settings
+
+The `flat_settings` flag affects rendering of the lists of settings. When
+flat_settings` flag is `true` settings are returned in a flat format:
+
+[source,js]
+--------------------------------------------------
+{
+ "persistent" : { },
+ "transient" : {
+ "discovery.zen.minimum_master_nodes" : "1"
+ }
+}
+--------------------------------------------------
+
+When the `flat_settings` flag is `false` settings are returned in a more
+human readable structured format:
+
+[source,js]
+--------------------------------------------------
+{
+ "persistent" : { },
+ "transient" : {
+ "discovery" : {
+ "zen" : {
+ "minimum_master_nodes" : "1"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+By default the `flat_settings` is set to `false`.
+
+[float]
+=== Parameters
+
+Rest parameters (when using HTTP, map to HTTP URL parameters) follow the
+convention of using underscore casing.
+
+[float]
+=== Boolean Values
+
+All REST APIs parameters (both request parameters and JSON body) support
+providing boolean "false" as the values: `false`, `0`, `no` and `off`.
+All other values are considered "true". Note, this is not related to
+fields within a document indexed treated as boolean fields.
+
+[float]
+=== Number Values
+
+All REST APIs support providing numbered parameters as `string` on top
+of supporting the native JSON number types.
+
+[[time-units]]
+[float]
+=== Time units
+
+Whenever durations need to be specified, eg for a `timeout` parameter, the duration
+can be specified as a whole number representing time in milliseconds, or as a time value like `2d` for 2 days. The supported units are:
+
+[horizontal]
+`y`:: Year
+`M`:: Month
+`w`:: Week
+`h`:: Hour
+`m`:: Minute
+`s`:: Second
+
+[[distance-units]]
+[float]
+=== Distance Units
+
+Wherever distances need to be specified, such as the `distance` parameter in
+the <<query-dsl-geo-distance-filter>>) or the `precision` parameter in the
+<<query-dsl-geohash-cell-filter>>, the default unit if none is specified is
+the meter. Distances can be specified in other units, such as `"1km"` or
+`"2mi"` (2 miles).
+
+The full list of units is listed below:
+
+[horizontal]
+Mile:: `mi` or `miles`
+Yard:: `yd` or `yards`
+Feet:: `ft` or `feet`
+Inch:: `in` or `inch`
+Kilometer:: `km` or `kilometers`
+Meter:: `m` or `meters`
+Centimeter:: `cm` or `centimeters`
+Millimeter:: `mm` or `millimeters`
+
+
+[[fuzziness]]
+[float]
+=== Fuzziness
+
+Some queries and APIs support parameters to allow inexact _fuzzy_ matching,
+using the `fuzziness` parameter. The `fuzziness` parameter is context
+sensitive which means that it depends on the type of the field being queried:
+
+[float]
+==== Numeric, date and IPv4 fields
+
+When querying numeric, date and IPv4 fields, `fuzziness` is interpreted as a
+`+/- margin. It behaves like a <<query-dsl-range-query>> where:
+
+ -fuzziness <= field value <= +fuzziness
+
+The `fuzziness` parameter should be set to a numeric value, eg `2` or `2.0`. A
+`date` field interprets a long as milliseconds, but also accepts a string
+containing a time value -- `"1h"` -- as explained in <<time-units>>. An `ip`
+field accepts a long or another IPv4 address (which will be converted into a
+long).
+
+[float]
+==== String fields
+
+When querying `string` fields, `fuzziness` is interpreted as a
+http://en.wikipedia.org/wiki/Levenshtein_distance[Levenshtein Edit Distance]
+-- the number of one character changes that need to be made to one string to
+make it the same as another string.
+
+The `fuzziness` parameter can be specified as:
+
+`0`, `1`, `2`::
+
+the maximum allowed Levenshtein Edit Distance (or number of edits)
+
+`AUTO`::
++
+--
+generates an edit distance based on the length of the term. For lengths:
+
+`0..1`:: must match exactly
+`1..4`:: one edit allowed
+`>4`:: two edits allowed
+
+`AUTO` should generally be the preferred value for `fuzziness`.
+--
+
+`0.0..1.0`::
+
+converted into an edit distance using the formula: `length(term) * (1.0 -
+fuzziness)`, eg a `fuzziness` of `0.6` with a term of length 10 would result
+in an edit distance of `4`. Note: in all APIs except for the
+<<query-dsl-flt-query>>, the maximum allowed edit distance is `2`.
+
+
+
+[float]
+=== Result Casing
+
+All REST APIs accept the `case` parameter. When set to `camelCase`, all
+field names in the result will be returned in camel casing, otherwise,
+underscore casing will be used. Note, this does not apply to the source
+document indexed.
+
+[float]
+=== JSONP
+
+All REST APIs accept a `callback` parameter resulting in a
+http://en.wikipedia.org/wiki/JSONP[JSONP] result.
+
+[float]
+=== Request body in query string
+
+For libraries that don't accept a request body for non-POST requests,
+you can pass the request body as the `source` query string parameter
+instead.
+
+[[url-access-control]]
+== URL-based access control
+
+Many users use a proxy with URL-based access control to secure access to
+Elasticsearch indices. For <<search-multi-search,multi-search>>,
+<<docs-multi-get,multi-get>> and <<docs-bulk,bulk>> requests, the user has
+the choice of specifying an index in the URL and on each individual request
+within the request body. This can make URL-based access control challenging.
+
+To prevent the user from overriding the index which has been specified in the
+URL, add this setting to the `config.yml` file:
+
+ rest.action.multi.allow_explicit_index: false
+
+The default value is `true`, but when set to `false`, Elasticsearch will
+reject requests that have an explicit index specified in the request body.
diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc
new file mode 100644
index 0000000..0aa2d6f
--- /dev/null
+++ b/docs/reference/cat.asciidoc
@@ -0,0 +1,117 @@
+[[cat]]
+= cat APIs
+
+[partintro]
+--
+
+["float",id="intro"]
+== Introduction
+
+JSON is great... for computers. Even if it's pretty-printed, trying
+to find relationships in the data is tedious. Human eyes, especially
+when looking at an ssh terminal, need compact and aligned text. The
+cat API aims to meet this need.
+
+All the cat commands accept a query string parameter `help` to see all
+the headers and info they provide, and the `/_cat` command alone lists all
+the available commands.
+
+[float]
+[[common-parameters]]
+== Common parameters
+
+[float]
+[[verbose]]
+=== Verbose
+
+Each of the commands accepts a query string parameter `v` to turn on
+verbose output.
+
+[source,shell]
+--------------------------------------------------
+% curl 'localhost:9200/_cat/master?v'
+id ip node
+EGtKWZlWQYWDmX29fUnp3Q 127.0.0.1 Grey, Sara
+--------------------------------------------------
+
+[float]
+[[help]]
+=== Help
+
+Each of the commands accepts a query string parameter `help` which will
+output its available columns.
+
+[source,shell]
+--------------------------------------------------
+% curl 'localhost:9200/_cat/master?help'
+id | node id
+ip | node transport ip address
+node | node name
+--------------------------------------------------
+
+[float]
+[[headers]]
+=== Headers
+
+Each of the commands accepts a query string parameter `h` which forces
+only those columns to appear.
+
+[source,shell]
+--------------------------------------------------
+% curl 'n1:9200/_cat/nodes?h=ip,port,heapPercent,name'
+192.168.56.40 9300 40.3 Captain Universe
+192.168.56.20 9300 15.3 Kaluu
+192.168.56.50 9300 17.0 Yellowjacket
+192.168.56.10 9300 12.3 Remy LeBeau
+192.168.56.30 9300 43.9 Ramsey, Doug
+--------------------------------------------------
+
+[float]
+[[numeric-formats]]
+=== Numeric formats
+
+Many commands provide a few types of numeric output, either a byte
+value or a time value. By default, these types are human-formatted,
+for example, `3.5mb` instead of `3763212`. The human values are not
+sortable numerically, so in order to operate on these values where
+order is important, you can change it.
+
+Say you want to find the largest index in your cluster (storage used
+by all the shards, not number of documents). The `/_cat/indices` API
+is ideal. We only need to tweak two things. First, we want to turn
+off human mode. We'll use a byte-level resolution. Then we'll pipe
+our output into `sort` using the appropriate column, which in this
+case is the eight one.
+
+[source,shell]
+--------------------------------------------------
+% curl '192.168.56.10:9200/_cat/indices?bytes=b' | sort -rnk8
+green wiki2 3 0 10000 0 105274918 105274918
+green wiki1 3 0 10000 413 103776272 103776272
+green foo 1 0 227 0 2065131 2065131
+--------------------------------------------------
+
+
+--
+
+include::cat/alias.asciidoc[]
+
+include::cat/allocation.asciidoc[]
+
+include::cat/count.asciidoc[]
+
+include::cat/health.asciidoc[]
+
+include::cat/indices.asciidoc[]
+
+include::cat/master.asciidoc[]
+
+include::cat/nodes.asciidoc[]
+
+include::cat/pending_tasks.asciidoc[]
+
+include::cat/recovery.asciidoc[]
+
+include::cat/thread_pool.asciidoc[]
+
+include::cat/shards.asciidoc[]
diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc
new file mode 100644
index 0000000..301a9f8
--- /dev/null
+++ b/docs/reference/cat/alias.asciidoc
@@ -0,0 +1,22 @@
+[[cat-alias]]
+== Aliases
+
+`aliases` shows information about currently configured aliases to indices
+including filter and routing infos.
+
+[source,shell]
+--------------------------------------------------
+% curl '192.168.56.10:9200/_cat/aliases?v'
+alias index filter indexRouting searchRouting
+alias2 test1 * - -
+alias4 test1 - 2 1,2
+alias1 test1 - - -
+alias3 test1 - 1 1
+--------------------------------------------------
+
+The output shows that `alias` has configured a filter, and specific routing
+configurations in `alias3` and `alias4`.
+
+If you only want to get information about a single alias, you can specify
+the alias in the URL, for example `/_cat/aliases/alias1`.
+
diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc
new file mode 100644
index 0000000..4b3e49a
--- /dev/null
+++ b/docs/reference/cat/allocation.asciidoc
@@ -0,0 +1,17 @@
+[[cat-allocation]]
+== Allocation
+
+`allocation` provides a snapshot of how shards have located around the
+cluster and the state of disk usage.
+
+[source,shell]
+--------------------------------------------------
+% curl '192.168.56.10:9200/_cat/allocation?v'
+shards diskUsed diskAvail diskRatio ip node
+ 1 5.6gb 72.2gb 7.8% 192.168.56.10 Jarella
+ 1 5.6gb 72.2gb 7.8% 192.168.56.30 Solarr
+ 1 5.5gb 72.3gb 7.6% 192.168.56.20 Adam II
+--------------------------------------------------
+
+Here we can see that each node has been allocated a single shard and
+that they're all using about the same amount of space.
diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc
new file mode 100644
index 0000000..fb57638
--- /dev/null
+++ b/docs/reference/cat/count.asciidoc
@@ -0,0 +1,16 @@
+[[cat-count]]
+== Count
+
+`count` provides quick access to the document count of the entire
+cluster, or individual indices.
+
+[source,shell]
+--------------------------------------------------
+% curl 192.168.56.10:9200/_cat/indices
+green wiki1 3 0 10000 331 168.5mb 168.5mb
+green wiki2 3 0 428 0 8mb 8mb
+% curl 192.168.56.10:9200/_cat/count
+1384314124582 19:42:04 10428
+% curl 192.168.56.10:9200/_cat/count/wiki2
+1384314139815 19:42:19 428
+--------------------------------------------------
diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc
new file mode 100644
index 0000000..654e446
--- /dev/null
+++ b/docs/reference/cat/health.asciidoc
@@ -0,0 +1,61 @@
+[[cat-health]]
+== Health
+
+`health` is a terse, one-line representation of the same information
+from `/_cluster/health`. It has one option `ts` to disable the
+timestamping.
+
+[source,shell]
+--------------------------------------------------
+% curl 192.168.56.10:9200/_cat/health
+1384308967 18:16:07 foo green 3 3 3 3 0 0 0
+% curl '192.168.56.10:9200/_cat/health?v&ts=0'
+cluster status nodeTotal nodeData shards pri relo init unassign
+foo green 3 3 3 3 0 0 0
+--------------------------------------------------
+
+A common use of this command is to verify the health is consistent
+across nodes:
+
+[source,shell]
+--------------------------------------------------
+% pssh -i -h list.of.cluster.hosts curl -s localhost:9200/_cat/health
+[1] 20:20:52 [SUCCESS] es3.vm
+1384309218 18:20:18 foo green 3 3 3 3 0 0 0
+[2] 20:20:52 [SUCCESS] es1.vm
+1384309218 18:20:18 foo green 3 3 3 3 0 0 0
+[3] 20:20:52 [SUCCESS] es2.vm
+1384309218 18:20:18 foo green 3 3 3 3 0 0 0
+--------------------------------------------------
+
+A less obvious use is to track recovery of a large cluster over
+time. With enough shards, starting a cluster, or even recovering after
+losing a node, can take time (depending on your network & disk). A way
+to track its progress is by using this command in a delayed loop:
+
+[source,shell]
+--------------------------------------------------
+% while true; do curl 192.168.56.10:9200/_cat/health; sleep 120; done
+1384309446 18:24:06 foo red 3 3 20 20 0 0 1812
+1384309566 18:26:06 foo yellow 3 3 950 916 0 12 870
+1384309686 18:28:06 foo yellow 3 3 1328 916 0 12 492
+1384309806 18:30:06 foo green 3 3 1832 916 4 0 0
+^C
+--------------------------------------------------
+
+In this scenario, we can tell that recovery took roughly four minutes.
+If this were going on for hours, we would be able to watch the
+`UNASSIGNED` shards drop precipitously. If that number remained
+static, we would have an idea that there is a problem.
+
+[float]
+[[timestamp]]
+=== Why the timestamp?
+
+You typically are using the `health` command when a cluster is
+malfunctioning. During this period, it's extremely important to
+correlate activities across log files, alerting systems, etc.
+
+There are two outputs. The `HH:MM:SS` output is simply for quick
+human consumption. The epoch time retains more information, including
+date, and is machine sortable if your recovery spans days.
diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc
new file mode 100644
index 0000000..0cd202f
--- /dev/null
+++ b/docs/reference/cat/indices.asciidoc
@@ -0,0 +1,57 @@
+[[cat-indices]]
+== Indices
+
+The `indices` command provides a cross-section of each index. This
+information *spans nodes*.
+
+[source,shell]
+--------------------------------------------------
+% curl 'localhost:9200/_cat/indices/twi*?v'
+health index pri rep docs.count docs.deleted store.size pri.store.size
+green twitter 5 1 11434 0 64mb 32mb
+green twitter2 2 0 2030 0 5.8mb 5.8mb
+--------------------------------------------------
+
+We can tell quickly how many shards make up an index, the number of
+docs, deleted docs, primary store size, and total store size (all
+shards including replicas).
+
+[float]
+[[pri-flag]]
+=== Primaries
+
+The index stats by default will show them for all of an index's
+shards, including replicas. A `pri` flag can be supplied to enable
+the view of relevant stats in the context of only the primaries.
+
+[float]
+[[examples]]
+=== Examples
+
+Which indices are yellow?
+
+[source,shell]
+--------------------------------------------------
+% curl localhost:9200/_cat/indices | grep ^yell
+yellow wiki 2 1 6401 1115 151.4mb 151.4mb
+yellow twitter 5 1 11434 0 32mb 32mb
+--------------------------------------------------
+
+What's my largest index by disk usage not including replicas?
+
+[source,shell]
+--------------------------------------------------
+% curl 'localhost:9200/_cat/indices?bytes=b' | sort -rnk7
+green wiki 2 0 6401 1115 158843725 158843725
+green twitter 5 1 11434 0 67155614 33577857
+green twitter2 2 0 2030 0 6125085 6125085
+--------------------------------------------------
+
+How many merge operations have the shards for the `wiki` completed?
+
+[source,shell]
+--------------------------------------------------
+% curl 'localhost:9200/_cat/indices/wiki?pri&v&h=health,index,prirep,docs.count,mt'
+health index docs.count mt pri.mt
+green wiki 9646 16 16
+--------------------------------------------------
diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc
new file mode 100644
index 0000000..9c2e249
--- /dev/null
+++ b/docs/reference/cat/master.asciidoc
@@ -0,0 +1,27 @@
+[[cat-master]]
+== Master
+
+`master` doesn't have any extra options. It simply displays the
+master's node ID, bound IP address, and node name.
+
+[source,shell]
+--------------------------------------------------
+% curl 'localhost:9200/_cat/master?v'
+id ip node
+Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 Solarr
+--------------------------------------------------
+
+This information is also available via the `nodes` command, but this
+is slightly shorter when all you want to do, for example, is verify
+all nodes agree on the master:
+
+[source,shell]
+--------------------------------------------------
+% pssh -i -h list.of.cluster.hosts curl -s localhost:9200/_cat/master
+[1] 19:16:37 [SUCCESS] es3.vm
+Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 Solarr
+[2] 19:16:37 [SUCCESS] es2.vm
+Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 Solarr
+[3] 19:16:37 [SUCCESS] es1.vm
+Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 Solarr
+--------------------------------------------------
diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc
new file mode 100644
index 0000000..a500825
--- /dev/null
+++ b/docs/reference/cat/nodes.asciidoc
@@ -0,0 +1,48 @@
+[[cat-nodes]]
+== Nodes
+
+The `nodes` command shows the cluster topology.
+
+[source,shell]
+--------------------------------------------------
+% curl 192.168.56.10:9200/_cat/nodes
+SP4H 4727 192.168.56.30 9300 1.0.0.Beta2 1.6.0_27 72.1gb 35.4 93.9mb 79 239.1mb 0.45 3.4h d m Boneyard
+_uhJ 5134 192.168.56.10 9300 1.0.0.Beta2 1.6.0_27 72.1gb 33.3 93.9mb 85 239.1mb 0.06 3.4h d * Athena
+HfDp 4562 192.168.56.20 9300 1.0.0.Beta2 1.6.0_27 72.2gb 74.5 93.9mb 83 239.1mb 0.12 3.4h d m Zarek
+--------------------------------------------------
+
+The first few columns tell you where your nodes live. For sanity it
+also tells you what version of ES and the JVM each one runs.
+
+[source,shell]
+--------------------------------------------------
+nodeId pid ip port es jdk
+u2PZ 4234 192.168.56.30 9300 1.0.0.Beta1 1.6.0_27
+URzf 5443 192.168.56.10 9300 1.0.0.Beta1 1.6.0_27
+ActN 3806 192.168.56.20 9300 1.0.0.Beta1 1.6.0_27
+--------------------------------------------------
+
+
+The next few give a picture of your heap, memory, and load.
+
+[source,shell]
+--------------------------------------------------
+diskAvail heapPercent heapMax ramPercent ramMax load
+ 72.1gb 31.3 93.9mb 81 239.1mb 0.24
+ 72.1gb 19.6 93.9mb 82 239.1mb 0.05
+ 72.2gb 64.9 93.9mb 84 239.1mb 0.12
+--------------------------------------------------
+
+The last columns provide ancillary information that can often be
+useful when looking at the cluster as a whole, particularly large
+ones. How many master-eligible nodes do I have? How many client
+nodes? It looks like someone restarted a node recently; which one was
+it?
+
+[source,shell]
+--------------------------------------------------
+uptime data/client master name
+ 3.5h d m Boneyard
+ 3.5h d * Athena
+ 3.5h d m Zarek
+--------------------------------------------------
diff --git a/docs/reference/cat/pending_tasks.asciidoc b/docs/reference/cat/pending_tasks.asciidoc
new file mode 100644
index 0000000..62f90b6
--- /dev/null
+++ b/docs/reference/cat/pending_tasks.asciidoc
@@ -0,0 +1,19 @@
+[[cat-pending-tasks]]
+== Cluster Pending Tasks
+
+`pending_tasks` provides the same information as the
+<<cluster-pending,`/_cluster/pending_tasks`>> API in a
+convenient tabular format.
+
+[source,shell]
+--------------------------------------------------
+% curl 'localhost:9200/_cat/pending_tasks?v'
+insertOrder timeInQueue priority source
+ 1685 855ms HIGH update-mapping [foo][t]
+ 1686 843ms HIGH update-mapping [foo][t]
+ 1693 753ms HIGH refresh-mapping [foo][[t]]
+ 1688 816ms HIGH update-mapping [foo][t]
+ 1689 802ms HIGH update-mapping [foo][t]
+ 1690 787ms HIGH update-mapping [foo][t]
+ 1691 773ms HIGH update-mapping [foo][t]
+--------------------------------------------------
diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc
new file mode 100644
index 0000000..ae6b5de
--- /dev/null
+++ b/docs/reference/cat/recovery.asciidoc
@@ -0,0 +1,57 @@
+[[cat-recovery]]
+== Recovery
+
+`recovery` is a view of shard replication. It will show information
+anytime data from at least one shard is copying to a different node.
+It can also show up on cluster restarts. If your recovery process
+seems stuck, try it to see if there's any movement.
+
+As an example, let's enable replicas on a cluster which has two
+indices, three shards each. Afterward we'll have twelve total shards,
+but before those replica shards are `STARTED`, we'll take a snapshot
+of the recovery:
+
+[source,shell]
+--------------------------------------------------
+% curl -XPUT 192.168.56.30:9200/_settings -d'{"number_of_replicas":1}'
+{"acknowledged":true}
+% curl '192.168.56.30:9200/_cat/recovery?v'
+index shard target recovered % ip node
+wiki1 2 68083830 7865837 11.6% 192.168.56.20 Adam II
+wiki2 1 2542400 444175 17.5% 192.168.56.20 Adam II
+wiki2 2 3242108 329039 10.1% 192.168.56.10 Jarella
+wiki2 0 2614132 0 0.0% 192.168.56.30 Solarr
+wiki1 0 60992898 4719290 7.7% 192.168.56.30 Solarr
+wiki1 1 47630362 6798313 14.3% 192.168.56.10 Jarella
+--------------------------------------------------
+
+We have six total shards in recovery (a replica for each primary), at
+varying points of progress.
+
+Let's restart the cluster and then lose a node. This output shows us
+what was moving around shortly after the node left the cluster.
+
+[source,shell]
+--------------------------------------------------
+% curl 192.168.56.30:9200/_cat/health; curl 192.168.56.30:9200/_cat/recovery
+1384315040 19:57:20 foo yellow 2 2 8 6 0 4 0
+wiki2 2 1621477 0 0.0% 192.168.56.30 Garrett, Jonathan "John"
+wiki2 0 1307488 0 0.0% 192.168.56.20 Commander Kraken
+wiki1 0 32696794 20984240 64.2% 192.168.56.20 Commander Kraken
+wiki1 1 31123128 21951695 70.5% 192.168.56.30 Garrett, Jonathan "John"
+--------------------------------------------------
+
+[float]
+[[big-percent]]
+=== Why am I seeing recovery percentages greater than 100?
+
+This can happen if a shard copy goes away and comes back while the
+primary was indexing. The replica shard will catch up with the
+primary by receiving any new segments created during its outage.
+These new segments can contain data from segments it already has
+because they're the result of merging that happened on the primary,
+but now live in different, larger segments. After the new segments
+are copied over the replica will delete unneeded segments, resulting
+in a dataset that more closely matches the primary (or exactly,
+assuming indexing isn't still happening).
+
diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc
new file mode 100644
index 0000000..bb91027
--- /dev/null
+++ b/docs/reference/cat/shards.asciidoc
@@ -0,0 +1,90 @@
+[[cat-shards]]
+== Shards
+
+The `shards` command is the detailed view of what nodes contain which
+shards. It will tell you if it's a primary or replica, the number of
+docs, the bytes it takes on disk, and the node where it's located.
+
+Here we see a single index, with three primary shards and no replicas:
+
+[source,shell]
+--------------------------------------------------
+% curl 192.168.56.20:9200/_cat/shards
+wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 Stiletto
+wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 Frankie Raye
+wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 Commander Kraken
+--------------------------------------------------
+
+[[index-pattern]]
+=== Index pattern
+
+If you have many shards, you may wish to limit which indices show up
+in the output. You can always do this with `grep`, but you can save
+some bandwidth by supplying an index pattern to the end.
+
+[source,shell]
+--------------------------------------------------
+% curl 192.168.56.20:9200/_cat/shards/wiki2
+wiki2 0 p STARTED 197 3.2mb 192.168.56.10 Stiletto
+wiki2 1 p STARTED 205 5.9mb 192.168.56.30 Frankie Raye
+wiki2 2 p STARTED 275 7.8mb 192.168.56.20 Commander Kraken
+--------------------------------------------------
+
+
+[[relocation]]
+=== Relocation
+
+Let's say you've checked your health and you see two relocating
+shards. Where are they from and where are they going?
+
+[source,shell]
+--------------------------------------------------
+% curl 192.168.56.10:9200/_cat/health
+1384315316 20:01:56 foo green 3 3 12 6 2 0 0
+% curl 192.168.56.10:9200/_cat/shards | fgrep RELO
+wiki1 0 r RELOCATING 3014 31.1mb 192.168.56.20 Commander Kraken -> 192.168.56.30 Frankie Raye
+wiki1 1 r RELOCATING 3013 29.6mb 192.168.56.10 Stiletto -> 192.168.56.30 Frankie Raye
+--------------------------------------------------
+
+[[states]]
+=== Shard states
+
+Before a shard can be used, it goes through an `INITIALIZING` state.
+`shards` can show you which ones.
+
+[source,shell]
+--------------------------------------------------
+% curl -XPUT 192.168.56.20:9200/_settings -d'{"number_of_replicas":1}'
+{"acknowledged":true}
+% curl 192.168.56.20:9200/_cat/shards
+wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 Stiletto
+wiki1 0 r INITIALIZING 0 14.3mb 192.168.56.30 Frankie Raye
+wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 Frankie Raye
+wiki1 1 r INITIALIZING 0 13.1mb 192.168.56.20 Commander Kraken
+wiki1 2 r INITIALIZING 0 14mb 192.168.56.10 Stiletto
+wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 Commander Kraken
+--------------------------------------------------
+
+If a shard cannot be assigned, for example you've overallocated the
+number of replicas for the number of nodes in the cluster, they will
+remain `UNASSIGNED`.
+
+[source,shell]
+--------------------------------------------------
+% curl -XPUT 192.168.56.20:9200/_settings -d'{"number_of_replicas":3}'
+% curl 192.168.56.20:9200/_cat/health
+1384316325 20:18:45 foo yellow 3 3 9 3 0 0 3
+% curl 192.168.56.20:9200/_cat/shards
+wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 Stiletto
+wiki1 0 r STARTED 3014 31.1mb 192.168.56.30 Frankie Raye
+wiki1 0 r STARTED 3014 31.1mb 192.168.56.20 Commander Kraken
+wiki1 0 r UNASSIGNED
+wiki1 1 r STARTED 3013 29.6mb 192.168.56.10 Stiletto
+wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 Frankie Raye
+wiki1 1 r STARTED 3013 29.6mb 192.168.56.20 Commander Kraken
+wiki1 1 r UNASSIGNED
+wiki1 2 r STARTED 3973 38.1mb 192.168.56.10 Stiletto
+wiki1 2 r STARTED 3973 38.1mb 192.168.56.30 Frankie Raye
+wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 Commander Kraken
+wiki1 2 r UNASSIGNED
+--------------------------------------------------
diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc
new file mode 100644
index 0000000..5ed0a15
--- /dev/null
+++ b/docs/reference/cat/thread_pool.asciidoc
@@ -0,0 +1,44 @@
+[[cat-thread-pool]]
+== Thread pool
+
+The `thread_pool` command shows cluster wide thread pool statistics per node. By default the active, queue and rejected
+statistics are returned for the bulk, index and search thread pools.
+
+[source,shell]
+--------------------------------------------------
+% curl 192.168.56.10:9200/_cat/thread_pool
+host1 192.168.1.35 0 0 0 0 0 0 0 0 0
+host2 192.168.1.36 0 0 0 0 0 0 0 0 0
+--------------------------------------------------
+
+The first two columns contain the host and ip of a node.
+
+[source,shell]
+--------------------------------------------------
+host ip
+host1 192.168.1.35
+host2 192.168.1.36
+--------------------------------------------------
+
+The next three columns show the active queue and rejected statistics for the bulk thread pool.
+
+[source,shell]
+--------------------------------------------------
+bulk.active bulk.queue bulk.rejected
+ 0 0 0
+--------------------------------------------------
+
+The remaining columns show the active queue and rejected statistics of the index and search thread pool respectively.
+
+Also other statistics of different thread pools can be retrieved by using the `h` (header) parameter.
+
+[source,shell]
+--------------------------------------------------
+% curl 'localhost:9200/_cat/thread_pool?v&h=id,host,suggest.active,suggest.rejected,suggest.completed'
+host suggest.active suggest.rejected suggest.completed
+host1 0 0 0
+host2 0 0 0
+--------------------------------------------------
+
+Here the host columns and the active, rejected and completed suggest thread pool statistic are displayed. The suggest
+thread pool won't be displayed by default, so you always need be specific about what statistic you want to display. \ No newline at end of file
diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc
new file mode 100644
index 0000000..088de8d
--- /dev/null
+++ b/docs/reference/cluster.asciidoc
@@ -0,0 +1,50 @@
+[[cluster]]
+= Cluster APIs
+
+[partintro]
+--
+["float",id="cluster-nodes"]
+== Node specification
+
+Most cluster level APIs allow to specify which nodes to execute on (for
+example, getting the node stats for a node). Nodes can be identified in
+the APIs either using their internal node id, the node name, address,
+custom attributes, or just the `_local` node receiving the request. For
+example, here are some sample executions of nodes info:
+
+[source,js]
+--------------------------------------------------
+# Local
+curl localhost:9200/_nodes/_local
+# Address
+curl localhost:9200/_nodes/10.0.0.3,10.0.0.4
+curl localhost:9200/_nodes/10.0.0.*
+# Names
+curl localhost:9200/_nodes/node_name_goes_here
+curl localhost:9200/_nodes/node_name_goes_*
+# Attributes (set something like node.rack: 2 in the config)
+curl localhost:9200/_nodes/rack:2
+curl localhost:9200/_nodes/ra*:2
+curl localhost:9200/_nodes/ra*:2*
+--------------------------------------------------
+--
+
+include::cluster/health.asciidoc[]
+
+include::cluster/state.asciidoc[]
+
+include::cluster/stats.asciidoc[]
+
+include::cluster/pending.asciidoc[]
+
+include::cluster/reroute.asciidoc[]
+
+include::cluster/update-settings.asciidoc[]
+
+include::cluster/nodes-stats.asciidoc[]
+
+include::cluster/nodes-info.asciidoc[]
+
+include::cluster/nodes-hot-threads.asciidoc[]
+
+include::cluster/nodes-shutdown.asciidoc[]
diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc
new file mode 100644
index 0000000..48108e1
--- /dev/null
+++ b/docs/reference/cluster/health.asciidoc
@@ -0,0 +1,87 @@
+[[cluster-health]]
+== Cluster Health
+
+The cluster health API allows to get a very simple status on the health
+of the cluster.
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/health?pretty=true'
+{
+ "cluster_name" : "testcluster",
+ "status" : "green",
+ "timed_out" : false,
+ "number_of_nodes" : 2,
+ "number_of_data_nodes" : 2,
+ "active_primary_shards" : 5,
+ "active_shards" : 10,
+ "relocating_shards" : 0,
+ "initializing_shards" : 0,
+ "unassigned_shards" : 0
+}
+--------------------------------------------------
+
+The API can also be executed against one or more indices to get just the
+specified indices health:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/health/test1,test2'
+--------------------------------------------------
+
+The cluster health status is: `green`, `yellow` or `red`. On the shard
+level, a `red` status indicates that the specific shard is not allocated
+in the cluster, `yellow` means that the primary shard is allocated but
+replicas are not, and `green` means that all shards are allocated. The
+index level status is controlled by the worst shard status. The cluster
+status is controlled by the worst index status.
+
+One of the main benefits of the API is the ability to wait until the
+cluster reaches a certain high water-mark health level. For example, the
+following will wait for 50 seconds for the cluster to reach the `yellow`
+level (if it reaches the `green` or `yellow` status before 50 seconds elapse,
+it will return at that point):
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/health?wait_for_status=yellow&timeout=50s'
+--------------------------------------------------
+
+[float]
+[[request-params]]
+=== Request Parameters
+
+The cluster health API accepts the following request parameters:
+
+`level`::
+ Can be one of `cluster`, `indices` or `shards`. Controls the
+ details level of the health information returned. Defaults to `cluster`.
+
+`wait_for_status`::
+ One of `green`, `yellow` or `red`. Will wait (until
+ the timeout provided) until the status of the cluster changes to the one
+ provided. By default, will not wait for any status.
+
+`wait_for_relocating_shards`::
+ A number controlling to how many relocating
+ shards to wait for. Usually will be `0` to indicate to wait till all
+ relocation have happened. Defaults to not to wait.
+
+`wait_for_nodes`::
+ The request waits until the specified number `N` of
+ nodes is available. It also accepts `>=N`, `<=N`, `>N` and `<N`.
+ Alternatively, it is possible to use `ge(N)`, `le(N)`, `gt(N)` and
+ `lt(N)` notation.
+
+`timeout`::
+ A time based parameter controlling how long to wait if one of
+ the wait_for_XXX are provided. Defaults to `30s`.
+
+
+The following is an example of getting the cluster health at the
+`shards` level:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/health/twitter?level=shards'
+--------------------------------------------------
diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc
new file mode 100644
index 0000000..dc8b153
--- /dev/null
+++ b/docs/reference/cluster/nodes-hot-threads.asciidoc
@@ -0,0 +1,16 @@
+[[cluster-nodes-hot-threads]]
+== Nodes hot_threads
+
+An API allowing to get the current hot threads on each node in the
+cluster. Endpoints are `/_nodes/hot_threads`, and
+`/_nodes/{nodesIds}/hot_threads`.
+
+The output is plain text with a breakdown of each node's top hot
+threads. Parameters allowed are:
+
+[horizontal]
+`threads`:: number of hot threads to provide, defaults to 3.
+`interval`:: the interval to do the second sampling of threads.
+ Defaults to 500ms.
+`type`:: The type to sample, defaults to cpu, but supports wait and
+ block to see hot threads that are in wait or block state.
diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc
new file mode 100644
index 0000000..96b5fb5
--- /dev/null
+++ b/docs/reference/cluster/nodes-info.asciidoc
@@ -0,0 +1,93 @@
+[[cluster-nodes-info]]
+== Nodes Info
+
+The cluster nodes info API allows to retrieve one or more (or all) of
+the cluster nodes information.
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/_nodes'
+curl -XGET 'http://localhost:9200/_nodes/nodeId1,nodeId2'
+--------------------------------------------------
+
+The first command retrieves information of all the nodes in the cluster.
+The second command selectively retrieves nodes information of only
+`nodeId1` and `nodeId2`. All the nodes selective options are explained
+<<cluster-nodes,here>>.
+
+By default, it just returns all attributes and core settings for a node.
+It also allows to get only information on `settings`, `os`, `process`, `jvm`,
+`thread_pool`, `network`, `transport`, `http` and `plugin`:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/_nodes/process'
+curl -XGET 'http://localhost:9200/_nodes/_all/process'
+curl -XGET 'http://localhost:9200/_nodes/nodeId1,nodeId2/jvm,process'
+# same as above
+curl -XGET 'http://localhost:9200/_nodes/nodeId1,nodeId2/info/jvm,process'
+
+curl -XGET 'http://localhost:9200/_nodes/nodeId1,nodeId2/_all
+--------------------------------------------------
+
+The `all` flag can be set to return all the information - or you can simply omit it.
+
+`plugin` - if set, the result will contain details about the loaded
+plugins per node:
+
+* `name`: plugin name
+* `description`: plugin description if any
+* `site`: `true` if the plugin is a site plugin
+* `jvm`: `true` if the plugin is a plugin running in the JVM
+* `url`: URL if the plugin is a site plugin
+
+The result will look similar to:
+
+[source,js]
+--------------------------------------------------
+{
+ "cluster_name" : "test-cluster-MacBook-Air-de-David.local",
+ "nodes" : {
+ "hJLXmY_NTrCytiIMbX4_1g" : {
+ "name" : "node4",
+ "transport_address" : "inet[/172.18.58.139:9303]",
+ "hostname" : "MacBook-Air-de-David.local",
+ "version" : "0.90.0.Beta2-SNAPSHOT",
+ "http_address" : "inet[/172.18.58.139:9203]",
+ "plugins" : [ {
+ "name" : "test-plugin",
+ "description" : "test-plugin description",
+ "site" : true,
+ "jvm" : false
+ }, {
+ "name" : "test-no-version-plugin",
+ "description" : "test-no-version-plugin description",
+ "site" : true,
+ "jvm" : false
+ }, {
+ "name" : "dummy",
+ "description" : "No description found for dummy.",
+ "url" : "/_plugin/dummy/",
+ "site" : false,
+ "jvm" : true
+ } ]
+ }
+ }
+}
+--------------------------------------------------
+
+if your `plugin` data is subject to change use
+`plugins.info_refresh_interval` to change or disable the caching
+interval:
+
+[source,js]
+--------------------------------------------------
+# Change cache to 20 seconds
+plugins.info_refresh_interval: 20s
+
+# Infinite cache
+plugins.info_refresh_interval: -1
+
+# Disable cache
+plugins.info_refresh_interval: 0
+--------------------------------------------------
diff --git a/docs/reference/cluster/nodes-shutdown.asciidoc b/docs/reference/cluster/nodes-shutdown.asciidoc
new file mode 100644
index 0000000..65030a3
--- /dev/null
+++ b/docs/reference/cluster/nodes-shutdown.asciidoc
@@ -0,0 +1,57 @@
+[[cluster-nodes-shutdown]]
+== Nodes Shutdown
+
+The nodes shutdown API allows to shutdown one or more (or all) nodes in
+the cluster. Here is an example of shutting the `_local` node the
+request is directed to:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_local/_shutdown'
+--------------------------------------------------
+
+Specific node(s) can be shutdown as well using their respective node ids
+(or other selective options as explained
+<<cluster-nodes,here>> .):
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/_cluster/nodes/nodeId1,nodeId2/_shutdown'
+--------------------------------------------------
+
+The master (of the cluster) can also be shutdown using:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_master/_shutdown'
+--------------------------------------------------
+
+Finally, all nodes can be shutdown using one of the options below:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/_shutdown'
+
+$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_shutdown'
+
+$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_all/_shutdown'
+--------------------------------------------------
+
+[float]
+[[delay]]
+=== Delay
+
+By default, the shutdown will be executed after a 1 second delay (`1s`).
+The delay can be customized by setting the `delay` parameter in a time
+value format. For example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_local/_shutdown?delay=10s'
+--------------------------------------------------
+
+[float]
+=== Disable Shutdown
+
+The shutdown API can be disabled by setting `action.disable_shutdown` in
+the node configuration.
diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc
new file mode 100644
index 0000000..82d7b38
--- /dev/null
+++ b/docs/reference/cluster/nodes-stats.asciidoc
@@ -0,0 +1,97 @@
+[[cluster-nodes-stats]]
+== Nodes Stats
+
+[float]
+=== Nodes statistics
+
+The cluster nodes stats API allows to retrieve one or more (or all) of
+the cluster nodes statistics.
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/_nodes/stats'
+curl -XGET 'http://localhost:9200/_nodes/nodeId1,nodeId2/stats'
+--------------------------------------------------
+
+The first command retrieves stats of all the nodes in the cluster. The
+second command selectively retrieves nodes stats of only `nodeId1` and
+`nodeId2`. All the nodes selective options are explained
+<<cluster-nodes,here>>.
+
+By default, all stats are returned. You can limit this by combining any
+of `indices`, `os`, `process`, `jvm`, `network`, `transport`, `http`,
+`fs`, `breaker` and `thread_pool`. For example:
+
+[horizontal]
+`indices`::
+ Indices stats about size, document count, indexing and
+ deletion times, search times, field cache size , merges and flushes
+
+`fs`::
+ File system information, data path, free disk space, read/write
+ stats
+
+`http`::
+ HTTP connection information
+
+`jvm`::
+ JVM stats, memory pool information, garbage collection, buffer
+ pools
+
+`network`::
+ TCP information
+
+`os`::
+ Operating system stats, load average, cpu, mem, swap
+
+`process`::
+ Process statistics, memory consumption, cpu usage, open
+ file descriptors
+
+`thread_pool`::
+ Statistics about each thread pool, including current
+ size, queue and rejected tasks
+
+`transport`::
+ Transport statistics about sent and received bytes in
+ cluster communication
+
+`breaker`::
+ Statistics about the field data circuit breaker
+
+`clear`::
+ Clears all the flags (first). Useful, if you only want to
+ retrieve specific stats.
+
+[source,js]
+--------------------------------------------------
+# return indices and os
+curl -XGET 'http://localhost:9200/_nodes/stats/os'
+# return just os and process
+curl -XGET 'http://localhost:9200/_nodes/stats/os,process'
+# specific type endpoint
+curl -XGET 'http://localhost:9200/_nodes/stats/process'
+curl -XGET 'http://localhost:9200/_nodes/10.0.0.1/stats/process'
+--------------------------------------------------
+
+The `all` flag can be set to return all the stats.
+
+[float]
+[[field-data]]
+=== Field data statistics
+
+You can get information about field data memory usage on node
+level or on index level.
+
+[source,js]
+--------------------------------------------------
+# Node Stats
+curl localhost:9200/_nodes/stats/indices/field1,field2?pretty
+
+# Indices Stat
+curl localhost:9200/_stats/fielddata/field1,field2?pretty
+
+# You can use wildcards for field names
+curl localhost:9200/_stats/fielddata/field*?pretty
+curl localhost:9200/_nodes/stats/indices/field*?pretty
+--------------------------------------------------
diff --git a/docs/reference/cluster/pending.asciidoc b/docs/reference/cluster/pending.asciidoc
new file mode 100644
index 0000000..0e018e4
--- /dev/null
+++ b/docs/reference/cluster/pending.asciidoc
@@ -0,0 +1,44 @@
+[[cluster-pending]]
+== Pending cluster tasks
+
+The pending cluster tasks API returns a list of any cluster-level changes
+(e.g. create index, update mapping, allocate or fail shard) which have not yet
+been executed.
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/pending_tasks'
+--------------------------------------------------
+
+Usually this will return an empty list as cluster-level changes are usually
+fast. However if there are tasks queued up, the output will look something
+like this:
+
+[source,js]
+--------------------------------------------------
+{
+ "tasks": [
+ {
+ "insert_order": 101,
+ "priority": "URGENT",
+ "source": "create-index [foo_9], cause [api]",
+ "time_in_queue_millis": 86,
+ "time_in_queue": "86ms"
+ },
+ {
+ "insert_order": 46,
+ "priority": "HIGH",
+ "source": "shard-started ([foo_2][1], node[tMTocMvQQgGCkj7QDHl3OA], [P], s[INITIALIZING]), reason [after recovery from gateway]",
+ "time_in_queue_millis": 842,
+ "time_in_queue": "842ms"
+ },
+ {
+ "insert_order": 45,
+ "priority": "HIGH",
+ "source": "shard-started ([foo_2][0], node[tMTocMvQQgGCkj7QDHl3OA], [P], s[INITIALIZING]), reason [after recovery from gateway]",
+ "time_in_queue_millis": 858,
+ "time_in_queue": "858ms"
+ }
+ ]
+}
+--------------------------------------------------
diff --git a/docs/reference/cluster/reroute.asciidoc b/docs/reference/cluster/reroute.asciidoc
new file mode 100644
index 0000000..8348edb
--- /dev/null
+++ b/docs/reference/cluster/reroute.asciidoc
@@ -0,0 +1,68 @@
+[[cluster-reroute]]
+== Cluster Reroute
+
+The reroute command allows to explicitly execute a cluster reroute
+allocation command including specific commands. For example, a shard can
+be moved from one node to another explicitly, an allocation can be
+canceled, or an unassigned shard can be explicitly allocated on a
+specific node.
+
+Here is a short example of how a simple reroute API call:
+
+[source,js]
+--------------------------------------------------
+
+curl -XPOST 'localhost:9200/_cluster/reroute' -d '{
+ "commands" : [ {
+ "move" :
+ {
+ "index" : "test", "shard" : 0,
+ "from_node" : "node1", "to_node" : "node2"
+ }
+ },
+ {
+ "allocate" : {
+ "index" : "test", "shard" : 1, "node" : "node3"
+ }
+ }
+ ]
+}'
+--------------------------------------------------
+
+An important aspect to remember is the fact that once when an allocation
+occurs, the cluster will aim at re-balancing its state back to an even
+state. For example, if the allocation includes moving a shard from
+`node1` to `node2`, in an `even` state, then another shard will be moved
+from `node2` to `node1` to even things out.
+
+The cluster can be set to disable allocations, which means that only the
+explicitly allocations will be performed. Obviously, only once all
+commands has been applied, the cluster will aim to be re-balance its
+state.
+
+Another option is to run the commands in `dry_run` (as a URI flag, or in
+the request body). This will cause the commands to apply to the current
+cluster state, and return the resulting cluster after the commands (and
+re-balancing) has been applied.
+
+The commands supported are:
+
+`move`::
+ Move a started shard from one node to another node. Accepts
+ `index` and `shard` for index name and shard number, `from_node` for the
+ node to move the shard `from`, and `to_node` for the node to move the
+ shard to.
+
+`cancel`::
+ Cancel allocation of a shard (or recovery). Accepts `index`
+ and `shard` for index name and shard number, and `node` for the node to
+ cancel the shard allocation on. It also accepts `allow_primary` flag to
+ explicitly specify that it is allowed to cancel allocation for a primary
+ shard.
+
+`allocate`::
+ Allocate an unassigned shard to a node. Accepts the
+ `index` and `shard` for index name and shard number, and `node` to
+ allocate the shard to. It also accepts `allow_primary` flag to
+ explicitly specify that it is allowed to explicitly allocate a primary
+ shard (might result in data loss).
diff --git a/docs/reference/cluster/state.asciidoc b/docs/reference/cluster/state.asciidoc
new file mode 100644
index 0000000..3a18be2
--- /dev/null
+++ b/docs/reference/cluster/state.asciidoc
@@ -0,0 +1,67 @@
+[[cluster-state]]
+== Cluster State
+
+The cluster state API allows to get a comprehensive state information of
+the whole cluster.
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/state'
+--------------------------------------------------
+
+By default, the cluster state request is routed to the master node, to
+ensure that the latest cluster state is returned.
+For debugging purposes, you can retrieve the cluster state local to a
+particular node by adding `local=true` to the query string.
+
+[float]
+=== Response Filters
+
+As the cluster state can grow (depending on the number of shards and indices, your mapping, templates),
+it is possible to filter the cluster state response specifying the parts in the URL.
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/state/{metrics}/{indices}'
+--------------------------------------------------
+
+`metrics` can be a comma-separated list of
+
+`version`::
+ Shows the cluster state version.
+
+`master_node`::
+ Shows the elected `master_node` part of the response
+
+`nodes`::
+ Shows the `nodes` part of the response
+
+`routing_table`::
+ Shows the `routing_table` part of the response. If you supply a comma separated list of indices, the returned output will only contain the indices listed.
+
+`metadata`::
+ Shows the `metadata` part of the response. If you supply a comma separated list of indices, the returned output will only contain the indices listed.
+
+`blocks`::
+ Shows the `blocks` part of the response
+
+In addition the `index_templates` parameter can be specified, which returns the specified index templates only. This works only if the `metadata` is asked to be returned.
+
+A couple of example calls:
+
+[source,js]
+--------------------------------------------------
+# return only metadata and routing_table data for specified indices
+$ curl -XGET 'http://localhost:9200/_cluster/state/metadata,routing_table/foo,bar'
+
+# return everything for these two indices
+$ curl -XGET 'http://localhost:9200/_cluster/state/_all/foo,bar'
+
+# Return only blocks data
+$ curl -XGET 'http://localhost:9200/_cluster/state/blocks'
+
+# Return only metadata and a specific index_template
+# You should use the dedicated template endpoint for this
+$ curl -XGET 'http://localhost:9200/_cluster/state/metadata?index_templates=template_1'
+--------------------------------------------------
+
diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc
new file mode 100644
index 0000000..43c3f7a
--- /dev/null
+++ b/docs/reference/cluster/stats.asciidoc
@@ -0,0 +1,168 @@
+[[cluster-stats]]
+== Cluster Stats
+
+The Cluster Stats API allows to retrieve statistics from a cluster wide perspective.
+The API returns basic index metrics (shard numbers, store size, memory usage) and
+information about the current nodes that form the cluster (number, roles, os, jvm
+versions, memory usage, cpu and installed plugins).
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/_cluster/stats?human'
+--------------------------------------------------
+
+Will return, for example:
+[source,js]
+--------------------------------------------------
+{
+ "cluster_name": "elasticsearch",
+ "status": "green",
+ "indices": {
+ "count": 3,
+ "shards": {
+ "total": 35,
+ "primaries": 15,
+ "replication": 1.333333333333333,
+ "index": {
+ "shards": {
+ "min": 10,
+ "max": 15,
+ "avg": 11.66666666666666
+ },
+ "primaries": {
+ "min": 5,
+ "max": 5,
+ "avg": 5
+ },
+ "replication": {
+ "min": 1,
+ "max": 2,
+ "avg": 1.3333333333333333
+ }
+ }
+ },
+ "docs": {
+ "count": 2,
+ "deleted": 0
+ },
+ "store": {
+ "size": "5.6kb",
+ "size_in_bytes": 5770,
+ "throttle_time": "0s",
+ "throttle_time_in_millis": 0
+ },
+ "fielddata": {
+ "memory_size": "0b",
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "filter_cache": {
+ "memory_size": "0b",
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "id_cache": {
+ "memory_size": "0b",
+ "memory_size_in_bytes": 0
+ },
+ "completion": {
+ "size": "0b",
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 2
+ }
+ },
+ "nodes": {
+ "count": {
+ "total": 2,
+ "master_only": 0,
+ "data_only": 0,
+ "master_data": 2,
+ "client": 0
+ },
+ "versions": [
+ "0.90.8"
+ ],
+ "os": {
+ "available_processors": 4,
+ "mem": {
+ "total": "8gb",
+ "total_in_bytes": 8589934592
+ },
+ "cpu": [
+ {
+ "vendor": "Intel",
+ "model": "MacBookAir5,2",
+ "mhz": 2000,
+ "total_cores": 4,
+ "total_sockets": 4,
+ "cores_per_socket": 16,
+ "cache_size": "256b",
+ "cache_size_in_bytes": 256,
+ "count": 1
+ }
+ ]
+ },
+ "process": {
+ "cpu": {
+ "percent": 3
+ },
+ "open_file_descriptors": {
+ "min": 200,
+ "max": 346,
+ "avg": 273
+ }
+ },
+ "jvm": {
+ "max_uptime": "24s",
+ "max_uptime_in_millis": 24054,
+ "version": [
+ {
+ "version": "1.6.0_45",
+ "vm_name": "Java HotSpot(TM) 64-Bit Server VM",
+ "vm_version": "20.45-b01-451",
+ "vm_vendor": "Apple Inc.",
+ "count": 2
+ }
+ ],
+ "mem": {
+ "heap_used": "38.3mb",
+ "heap_used_in_bytes": 40237120,
+ "heap_max": "1.9gb",
+ "heap_max_in_bytes": 2130051072
+ },
+ "threads": 89
+ },
+ "fs":
+ {
+ "total": "232.9gb",
+ "total_in_bytes": 250140434432,
+ "free": "31.3gb",
+ "free_in_bytes": 33705881600,
+ "available": "31.1gb",
+ "available_in_bytes": 33443737600,
+ "disk_reads": 21202753,
+ "disk_writes": 27028840,
+ "disk_io_op": 48231593,
+ "disk_read_size": "528gb",
+ "disk_read_size_in_bytes": 566980806656,
+ "disk_write_size": "617.9gb",
+ "disk_write_size_in_bytes": 663525366784,
+ "disk_io_size": "1145.9gb",
+ "disk_io_size_in_bytes": 1230506173440
+ },
+ "plugins": [
+ // all plugins installed on nodes
+ {
+ "name": "inquisitor",
+ "description": "",
+ "url": "/_plugin/inquisitor/",
+ "jvm": false,
+ "site": true
+ }
+ ]
+ }
+}
+--------------------------------------------------
+
diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc
new file mode 100644
index 0000000..4e60868
--- /dev/null
+++ b/docs/reference/cluster/update-settings.asciidoc
@@ -0,0 +1,209 @@
+[[cluster-update-settings]]
+== Cluster Update Settings
+
+Allows to update cluster wide specific settings. Settings updated can
+either be persistent (applied cross restarts) or transient (will not
+survive a full cluster restart). Here is an example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/_cluster/settings -d '{
+ "persistent" : {
+ "discovery.zen.minimum_master_nodes" : 2
+ }
+}'
+--------------------------------------------------
+
+Or:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/_cluster/settings -d '{
+ "transient" : {
+ "discovery.zen.minimum_master_nodes" : 2
+ }
+}'
+--------------------------------------------------
+
+The cluster responds with the settings updated. So the response for the
+last example will be:
+
+[source,js]
+--------------------------------------------------
+{
+ "persistent" : {},
+ "transient" : {
+ "discovery.zen.minimum_master_nodes" : "2"
+ }
+}'
+--------------------------------------------------
+
+Cluster wide settings can be returned using:
+
+[source,js]
+--------------------------------------------------
+curl -XGET localhost:9200/_cluster/settings
+--------------------------------------------------
+
+There is a specific list of settings that can be updated, those include:
+
+[float]
+[[cluster-settings]]
+=== Cluster settings
+
+[float]
+==== Routing allocation
+
+[float]
+===== Awareness
+
+`cluster.routing.allocation.awareness.attributes`::
+ See <<modules-cluster>>.
+
+`cluster.routing.allocation.awareness.force.*`::
+ See <<modules-cluster>>.
+
+[float]
+===== Balanced Shards
+
+`cluster.routing.allocation.balance.shard`::
+ Defines the weight factor for shards allocated on a node
+ (float). Defaults to `0.45f`.
+
+`cluster.routing.allocation.balance.index`::
+ Defines a factor to the number of shards per index allocated
+ on a specific node (float). Defaults to `0.5f`.
+
+`cluster.routing.allocation.balance.primary`::
+ defines a weight factor for the number of primaries of a specific index
+ allocated on a node (float). `0.05f`.
+
+`cluster.routing.allocation.balance.threshold`::
+ minimal optimization value of operations that should be performed (non
+ negative float). Defaults to `1.0f`.
+
+[float]
+===== Concurrent Rebalance
+
+`cluster.routing.allocation.cluster_concurrent_rebalance`::
+ Allow to control how many concurrent rebalancing of shards are
+ allowed cluster wide, and default it to `2` (integer). `-1` for
+ unlimited. See also <<modules-cluster>>.
+
+[float]
+===== Disable allocation
+
+added[1.0.0.RC1]
+
+All the disable allocation settings have been deprecated in favour for
+`cluster.routing.allocation.enable` setting.
+
+`cluster.routing.allocation.disable_allocation`::
+ See <<modules-cluster>>.
+
+`cluster.routing.allocation.disable_replica_allocation`::
+ See <<modules-cluster>>.
+
+`cluster.routing.allocation.disable_new_allocation`::
+ See <<modules-cluster>>.
+
+[float]
+===== Enable allocation
+
+`cluster.routing.allocation.enable`::
+ See <<modules-cluster>>.
+
+[float]
+===== Throttling allocation
+
+`cluster.routing.allocation.node_initial_primaries_recoveries`::
+ See <<modules-cluster>>.
+
+`cluster.routing.allocation.node_concurrent_recoveries`::
+ See <<modules-cluster>>.
+
+[float]
+===== Filter allocation
+
+`cluster.routing.allocation.include.*`::
+ See <<modules-cluster>>.
+
+`cluster.routing.allocation.exclude.*`::
+ See <<modules-cluster>>.
+
+`cluster.routing.allocation.require.*`
+ See <<modules-cluster>>.
+
+[float]
+==== Metadata
+
+`cluster.blocks.read_only`::
+ Have the whole cluster read only (indices do not accept write operations), metadata is not allowed to be modified (create or delete indices).
+
+[float]
+==== Discovery
+
+`discovery.zen.minimum_master_nodes`::
+ See <<modules-discovery-zen>>
+
+[float]
+==== Threadpools
+
+`threadpool.*`::
+ See <<modules-threadpool>>
+
+[float]
+[[cluster-index-settings]]
+=== Index settings
+
+[float]
+==== Index filter cache
+
+`indices.cache.filter.size`::
+ See <<index-modules-cache>>
+
+`indices.cache.filter.expire` (time)::
+ See <<index-modules-cache>>
+
+[float]
+==== TTL interval
+
+`indices.ttl.interval` (time)::
+ See <<mapping-ttl-field>>
+
+[float]
+==== Recovery
+
+`indices.recovery.concurrent_streams`::
+ See <<modules-indices>>
+
+`indices.recovery.file_chunk_size`::
+ See <<modules-indices>>
+
+`indices.recovery.translog_ops`::
+ See <<modules-indices>>
+
+`indices.recovery.translog_size`::
+ See <<modules-indices>>
+
+`indices.recovery.compress`::
+ See <<modules-indices>>
+
+`indices.recovery.max_bytes_per_sec`::
+ See <<modules-indices>>
+
+[float]
+==== Store level throttling
+
+`indices.store.throttle.type`::
+ See <<index-modules-store>>
+
+`indices.store.throttle.max_bytes_per_sec`::
+ See <<index-modules-store>>
+
+[float]
+[[logger]]
+=== Logger
+
+Logger values can also be updated by setting `logger.` prefix. More
+settings will be allowed to be updated.
diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc
new file mode 100644
index 0000000..469bc19
--- /dev/null
+++ b/docs/reference/docs.asciidoc
@@ -0,0 +1,44 @@
+[[docs]]
+= Document APIs
+
+[partintro]
+--
+
+This section describes the following CRUD APIs:
+
+.Single document APIs
+* <<docs-index_>>
+* <<docs-get>>
+* <<docs-delete>>
+* <<docs-update>>
+
+.Multi-document APIs
+* <<docs-multi-get>>
+* <<docs-bulk>>
+* <<docs-bulk-udp>>
+* <<docs-delete-by-query>>
+
+NOTE: All CRUD APIs are single-index APIs. The `index` parameter accepts a single
+index name, or an `alias` which points to a single index.
+
+--
+
+include::docs/index_.asciidoc[]
+
+include::docs/get.asciidoc[]
+
+include::docs/delete.asciidoc[]
+
+include::docs/update.asciidoc[]
+
+include::docs/multi-get.asciidoc[]
+
+include::docs/bulk.asciidoc[]
+
+include::docs/delete-by-query.asciidoc[]
+
+include::docs/bulk-udp.asciidoc[]
+
+include::docs/termvectors.asciidoc[]
+
+include::docs/multi-termvectors.asciidoc[]
diff --git a/docs/reference/docs/bulk-udp.asciidoc b/docs/reference/docs/bulk-udp.asciidoc
new file mode 100644
index 0000000..74565a3
--- /dev/null
+++ b/docs/reference/docs/bulk-udp.asciidoc
@@ -0,0 +1,57 @@
+[[docs-bulk-udp]]
+== Bulk UDP API
+
+A Bulk UDP service is a service listening over UDP for bulk format
+requests. The idea is to provide a low latency UDP service that allows
+to easily index data that is not of critical nature.
+
+The Bulk UDP service is disabled by default, but can be enabled by
+setting `bulk.udp.enabled` to `true`.
+
+The bulk UDP service performs internal bulk aggregation of the data and
+then flushes it based on several parameters:
+
+`bulk.udp.bulk_actions`::
+ The number of actions to flush a bulk after,
+ defaults to `1000`.
+
+`bulk.udp.bulk_size`::
+ The size of the current bulk request to flush
+ the request once exceeded, defaults to `5mb`.
+
+`bulk.udp.flush_interval`::
+ An interval after which the current
+ request is flushed, regardless of the above limits. Defaults to `5s`.
+`bulk.udp.concurrent_requests`::
+ The number on max in flight bulk
+ requests allowed. Defaults to `4`.
+
+The allowed network settings are:
+
+`bulk.udp.host`::
+ The host to bind to, defaults to `network.host`
+ which defaults to any.
+
+`bulk.udp.port`::
+ The port to use, defaults to `9700-9800`.
+
+`bulk.udp.receive_buffer_size`::
+ The receive buffer size, defaults to `10mb`.
+
+Here is an example of how it can be used:
+
+[source,js]
+--------------------------------------------------
+> cat bulk.txt
+{ "index" : { "_index" : "test", "_type" : "type1" } }
+{ "field1" : "value1" }
+{ "index" : { "_index" : "test", "_type" : "type1" } }
+{ "field1" : "value1" }
+--------------------------------------------------
+
+[source,js]
+--------------------------------------------------
+> cat bulk.txt | nc -w 0 -u localhost 9700
+--------------------------------------------------
+
+
diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc
new file mode 100644
index 0000000..6db77b0
--- /dev/null
+++ b/docs/reference/docs/bulk.asciidoc
@@ -0,0 +1,182 @@
+[[docs-bulk]]
+== Bulk API
+
+The bulk API makes it possible to perform many index/delete operations
+in a single API call. This can greatly increase the indexing speed. The
+REST API endpoint is `/_bulk`, and it expects the following JSON
+structure:
+
+[source,js]
+--------------------------------------------------
+action_and_meta_data\n
+optional_source\n
+action_and_meta_data\n
+optional_source\n
+....
+action_and_meta_data\n
+optional_source\n
+--------------------------------------------------
+
+*NOTE*: the final line of data must end with a newline character `\n`.
+
+The possible actions are `index`, `create`, `delete` and `update`.
+`index` and `create` expect a source on the next
+line, and have the same semantics as the `op_type` parameter to the
+standard index API (i.e. create will fail if a document with the same
+index and type exists already, whereas index will add or replace a
+document as necessary). `delete` does not expect a source on the
+following line, and has the same semantics as the standard delete API.
+`update` expects that the partial doc, upsert and script and its options
+are specified on the next line.
+
+If you're providing text file input to `curl`, you *must* use the
+`--data-binary` flag instead of plain `-d`. The latter doesn't preserve
+newlines. Example:
+
+[source,js]
+--------------------------------------------------
+$ cat requests
+{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
+{ "field1" : "value1" }
+$ curl -s -XPOST localhost:9200/_bulk --data-binary @requests; echo
+{"took":7,"items":[{"create":{"_index":"test","_type":"type1","_id":"1","_version":1}}]}
+--------------------------------------------------
+
+Because this format uses literal `\n`'s as delimiters, please be sure
+that the JSON actions and sources are not pretty printed. Here is an
+example of a correct sequence of bulk commands:
+
+[source,js]
+--------------------------------------------------
+{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
+{ "field1" : "value1" }
+{ "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
+{ "create" : { "_index" : "test", "_type" : "type1", "_id" : "3" } }
+{ "field1" : "value3" }
+{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "index1"} }
+{ "doc" : {"field2" : "value2"} }
+--------------------------------------------------
+
+In the above example `doc` for the `update` action is a partial
+document, that will be merged with the already stored document.
+
+The endpoints are `/_bulk`, `/{index}/_bulk`, and `{index}/type/_bulk`.
+When the index or the index/type are provided, they will be used by
+default on bulk items that don't provide them explicitly.
+
+A note on the format. The idea here is to make processing of this as
+fast as possible. As some of the actions will be redirected to other
+shards on other nodes, only `action_meta_data` is parsed on the
+receiving node side.
+
+Client libraries using this protocol should try and strive to do
+something similar on the client side, and reduce buffering as much as
+possible.
+
+The response to a bulk action is a large JSON structure with the
+individual results of each action that was performed. The failure of a
+single action does not affect the remaining actions.
+
+There is no "correct" number of actions to perform in a single bulk
+call. You should experiment with different settings to find the optimum
+size for your particular workload.
+
+If using the HTTP API, make sure that the client does not send HTTP
+chunks, as this will slow things down.
+
+[float]
+[[bulk-versioning]]
+=== Versioning
+
+Each bulk item can include the version value using the
+`_version`/`version` field. It automatically follows the behavior of the
+index / delete operation based on the `_version` mapping. It also
+support the `version_type`/`_version_type` when using `external`
+versioning.
+
+[float]
+[[bulk-routing]]
+=== Routing
+
+Each bulk item can include the routing value using the
+`_routing`/`routing` field. It automatically follows the behavior of the
+index / delete operation based on the `_routing` mapping.
+
+[float]
+[[bulk-parent]]
+=== Parent
+
+Each bulk item can include the parent value using the `_parent`/`parent`
+field. It automatically follows the behavior of the index / delete
+operation based on the `_parent` / `_routing` mapping.
+
+[float]
+[[bulk-timestamp]]
+=== Timestamp
+
+Each bulk item can include the timestamp value using the
+`_timestamp`/`timestamp` field. It automatically follows the behavior of
+the index operation based on the `_timestamp` mapping.
+
+[float]
+[[bulk-ttl]]
+=== TTL
+
+Each bulk item can include the ttl value using the `_ttl`/`ttl` field.
+It automatically follows the behavior of the index operation based on
+the `_ttl` mapping.
+
+[float]
+[[bulk-consistency]]
+=== Write Consistency
+
+When making bulk calls, you can require a minimum number of active
+shards in the partition through the `consistency` parameter. The values
+allowed are `one`, `quorum`, and `all`. It defaults to the node level
+setting of `action.write_consistency`, which in turn defaults to
+`quorum`.
+
+For example, in a N shards with 2 replicas index, there will have to be
+at least 2 active shards within the relevant partition (`quorum`) for
+the operation to succeed. In a N shards with 1 replica scenario, there
+will need to be a single shard active (in this case, `one` and `quorum`
+is the same).
+
+[float]
+[[bulk-refresh]]
+=== Refresh
+
+The `refresh` parameter can be set to `true` in order to refresh the
+relevant shards immediately after the bulk operation has occurred and
+make it searchable, instead of waiting for the normal refresh interval
+to expire. Setting it to `true` can trigger additional load, and may
+slow down indexing.
+
+[float]
+[[bulk-update]]
+=== Update
+
+When using `update` action `_retry_on_conflict` can be used as field in
+the action itself (not in the extra payload line), to specify how many
+times an update should be retried in the case of a version conflict.
+
+The `update` action payload, supports the following options: `doc`
+(partial document), `upsert`, `doc_as_upsert`, `script`, `params` (for
+script), `lang` (for script). See update documentation for details on
+the options. Curl example with update actions:
+
+[source,js]
+--------------------------------------------------
+{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} }
+{ "doc" : {"field" : "value"} }
+{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} }
+{ "script" : "ctx._source.counter += param1", "lang" : "js", "params" : {"param1" : 1}, "upsert" : {"counter" : 1}}
+{ "update" : {"_id" : "2", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} }
+{ "doc" : {"field" : "value"}, "doc_as_upsert" : true }
+--------------------------------------------------
+
+[float]
+[[bulk-security]]
+=== Security
+
+See <<url-access-control>>
diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc
new file mode 100644
index 0000000..93e1c05
--- /dev/null
+++ b/docs/reference/docs/delete-by-query.asciidoc
@@ -0,0 +1,150 @@
+[[docs-delete-by-query]]
+== Delete By Query API
+
+The delete by query API allows to delete documents from one or more
+indices and one or more types based on a query. The query can either be
+provided using a simple query string as a parameter, or using the
+<<query-dsl,Query DSL>> defined within the request
+body. Here is an example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XDELETE 'http://localhost:9200/twitter/tweet/_query?q=user:kimchy'
+
+$ curl -XDELETE 'http://localhost:9200/twitter/tweet/_query' -d '{
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+'
+--------------------------------------------------
+
+NOTE: The query being sent in the body must be nested in a `query` key, same as
+the <<search-search,search api>> works added[1.0.0.RC1,The query was previously the top-level object].
+
+Both above examples end up doing the same thing, which is delete all
+tweets from the twitter index for a certain user. The result of the
+commands is:
+
+[source,js]
+--------------------------------------------------
+{
+ "_indices" : {
+ "twitter" : {
+ "_shards" : {
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Note, delete by query bypasses versioning support. Also, it is not
+recommended to delete "large chunks of the data in an index", many
+times, it's better to simply reindex into a new index.
+
+[float]
+[[multiple-indices]]
+=== Multiple Indices and Types
+
+The delete by query API can be applied to multiple types within an
+index, and across multiple indices. For example, we can delete all
+documents across all types within the twitter index:
+
+[source,js]
+--------------------------------------------------
+$ curl -XDELETE 'http://localhost:9200/twitter/_query?q=user:kimchy'
+--------------------------------------------------
+
+We can also delete within specific types:
+
+[source,js]
+--------------------------------------------------
+$ curl -XDELETE 'http://localhost:9200/twitter/tweet,user/_query?q=user:kimchy'
+--------------------------------------------------
+
+We can also delete all tweets with a certain tag across several indices
+(for example, when each user has his own index):
+
+[source,js]
+--------------------------------------------------
+$ curl -XDELETE 'http://localhost:9200/kimchy,elasticsearch/_query?q=tag:wow'
+--------------------------------------------------
+
+Or even delete across all indices:
+
+[source,js]
+--------------------------------------------------
+$ curl -XDELETE 'http://localhost:9200/_all/_query?q=tag:wow'
+--------------------------------------------------
+
+[float]
+[[delete-by-query-parameters]]
+=== Request Parameters
+
+When executing a delete by query using the query parameter `q`, the
+query passed is a query string using Lucene query parser. There are
+additional parameters that can be passed:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Name |Description
+|df |The default field to use when no field prefix is defined within the
+query.
+
+|analyzer |The analyzer name to be used when analyzing the query string.
+
+|default_operator |The default operator to be used, can be `AND` or
+`OR`. Defaults to `OR`.
+|=======================================================================
+
+[float]
+[[request-body]]
+=== Request Body
+
+The delete by query can use the <<query-dsl,Query
+DSL>> within its body in order to express the query that should be
+executed and delete all documents. The body content can also be passed
+as a REST parameter named `source`.
+
+[float]
+[[delete-by-query-distributed]]
+=== Distributed
+
+The delete by query API is broadcast across all primary shards, and from
+there, replicated across all shards replicas.
+
+[float]
+[[delete-by-query-routing]]
+=== Routing
+
+The routing value (a comma separated list of the routing values) can be
+specified to control which shards the delete by query request will be
+executed on.
+
+[float]
+[[replication-type]]
+=== Replication Type
+
+The replication of the operation can be done in an asynchronous manner
+to the replicas (the operation will return once it has be executed on
+the primary shard). The `replication` parameter can be set to `async`
+(defaults to `sync`) in order to enable it.
+
+[float]
+[[delete-by-query-consistency]]
+=== Write Consistency
+
+Control if the operation will be allowed to execute based on the number
+of active shards within that partition (replication group). The values
+allowed are `one`, `quorum`, and `all`. The parameter to set it is
+`consistency`, and it defaults to the node level setting of
+`action.write_consistency` which in turn defaults to `quorum`.
+
+For example, in a N shards with 2 replicas index, there will have to be
+at least 2 active shards within the relevant partition (`quorum`) for
+the operation to succeed. In a N shards with 1 replica scenario, there
+will need to be a single shard active (in this case, `one` and `quorum`
+is the same).
diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc
new file mode 100644
index 0000000..c1db8b5
--- /dev/null
+++ b/docs/reference/docs/delete.asciidoc
@@ -0,0 +1,142 @@
+[[docs-delete]]
+== Delete API
+
+The delete API allows to delete a typed JSON document from a specific
+index based on its id. The following example deletes the JSON document
+from an index called twitter, under a type called tweet, with id valued
+1:
+
+[source,js]
+--------------------------------------------------
+$ curl -XDELETE 'http://localhost:9200/twitter/tweet/1'
+--------------------------------------------------
+
+The result of the above delete operation is:
+
+[source,js]
+--------------------------------------------------
+{
+ "found" : true,
+ "_index" : "twitter",
+ "_type" : "tweet",
+ "_id" : "1",
+ "_version" : 2
+}
+--------------------------------------------------
+
+[float]
+[[delete-versioning]]
+=== Versioning
+
+Each document indexed is versioned. When deleting a document, the
+`version` can be specified to make sure the relevant document we are
+trying to delete is actually being deleted and it has not changed in the
+meantime. Every write operation executed on a document, deletes included,
+causes its version to be incremented.
+
+[float]
+[[delete-routing]]
+=== Routing
+
+When indexing using the ability to control the routing, in order to
+delete a document, the routing value should also be provided. For
+example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XDELETE 'http://localhost:9200/twitter/tweet/1?routing=kimchy'
+--------------------------------------------------
+
+The above will delete a tweet with id 1, but will be routed based on the
+user. Note, issuing a delete without the correct routing, will cause the
+document to not be deleted.
+
+Many times, the routing value is not known when deleting a document. For
+those cases, when specifying the `_routing` mapping as `required`, and
+no routing value is specified, the delete will be broadcasted
+automatically to all shards.
+
+[float]
+[[delete-parent]]
+=== Parent
+
+The `parent` parameter can be set, which will basically be the same as
+setting the routing parameter.
+
+Note that deleting a parent document does not automatically delete its
+children. One way of deleting all child documents given a parent's id is
+to perform a <<docs-delete-by-query,delete by query>> on the child
+index with the automatically generated (and indexed)
+field _parent, which is in the format parent_type#parent_id.
+
+[float]
+[[delete-index-creation]]
+=== Automatic index creation
+
+The delete operation automatically creates an index if it has not been
+created before (check out the <<indices-create-index,create index API>>
+for manually creating an index), and also automatically creates a
+dynamic type mapping for the specific type if it has not been created
+before (check out the <<indices-put-mapping,put mapping>>
+API for manually creating type mapping).
+
+[float]
+[[delete-distributed]]
+=== Distributed
+
+The delete operation gets hashed into a specific shard id. It then gets
+redirected into the primary shard within that id group, and replicated
+(if needed) to shard replicas within that id group.
+
+[float]
+[[delete-replication]]
+=== Replication Type
+
+The replication of the operation can be done in an asynchronous manner
+to the replicas (the operation will return once it has be executed on
+the primary shard). The `replication` parameter can be set to `async`
+(defaults to `sync`) in order to enable it.
+
+[float]
+[[delete-consistency]]
+=== Write Consistency
+
+Control if the operation will be allowed to execute based on the number
+of active shards within that partition (replication group). The values
+allowed are `one`, `quorum`, and `all`. The parameter to set it is
+`consistency`, and it defaults to the node level setting of
+`action.write_consistency` which in turn defaults to `quorum`.
+
+For example, in a N shards with 2 replicas index, there will have to be
+at least 2 active shards within the relevant partition (`quorum`) for
+the operation to succeed. In a N shards with 1 replica scenario, there
+will need to be a single shard active (in this case, `one` and `quorum`
+is the same).
+
+[float]
+[[delete-refresh]]
+=== Refresh
+
+The `refresh` parameter can be set to `true` in order to refresh the
+relevant shard after the delete operation has occurred and make it
+searchable. Setting it to `true` should be done after careful thought
+and verification that this does not cause a heavy load on the system
+(and slows down indexing).
+
+[float]
+[[delete-timeout]]
+=== Timeout
+
+The primary shard assigned to perform the delete operation might not be
+available when the delete operation is executed. Some reasons for this
+might be that the primary shard is currently recovering from a gateway
+or undergoing relocation. By default, the delete operation will wait on
+the primary shard to become available for up to 1 minute before failing
+and responding with an error. The `timeout` parameter can be used to
+explicitly specify how long it waits. Here is an example of setting it
+to 5 minutes:
+
+[source,js]
+--------------------------------------------------
+$ curl -XDELETE 'http://localhost:9200/twitter/tweet/1?timeout=5m'
+--------------------------------------------------
diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc
new file mode 100644
index 0000000..cb8fb53
--- /dev/null
+++ b/docs/reference/docs/get.asciidoc
@@ -0,0 +1,214 @@
+[[docs-get]]
+== Get API
+
+The get API allows to get a typed JSON document from the index based on
+its id. The following example gets a JSON document from an index called
+twitter, under a type called tweet, with id valued 1:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1'
+--------------------------------------------------
+
+The result of the above get operation is:
+
+[source,js]
+--------------------------------------------------
+{
+ "_index" : "twitter",
+ "_type" : "tweet",
+ "_id" : "1",
+ "_version" : 1,
+ "found": true,
+ "_source" : {
+ "user" : "kimchy",
+ "postDate" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+ }
+}
+--------------------------------------------------
+
+The above result includes the `_index`, `_type`, `_id` and `_version`
+of the document we wish to retrieve, including the actual `_source`
+of the document if it could be found (as indicated by the `found`
+field in the response).
+
+The API also allows to check for the existence of a document using
+`HEAD`, for example:
+
+[source,js]
+--------------------------------------------------
+curl -XHEAD -i 'http://localhost:9200/twitter/tweet/1'
+--------------------------------------------------
+
+[float]
+[[realtime]]
+=== Realtime
+
+By default, the get API is realtime, and is not affected by the refresh
+rate of the index (when data will become visible for search).
+
+In order to disable realtime GET, one can either set `realtime`
+parameter to `false`, or globally default it to by setting the
+`action.get.realtime` to `false` in the node configuration.
+
+When getting a document, one can specify `fields` to fetch from it. They
+will, when possible, be fetched as stored fields (fields mapped as
+stored in the mapping). When using realtime GET, there is no notion of
+stored fields (at least for a period of time, basically, until the next
+flush), so they will be extracted from the source itself (note, even if
+source is not enabled). It is a good practice to assume that the fields
+will be loaded from source when using realtime GET, even if the fields
+are stored.
+
+[float]
+[[type]]
+=== Optional Type
+
+The get API allows for `_type` to be optional. Set it to `_all` in order
+to fetch the first document matching the id across all types.
+
+
+[float]
+[[get-source-filtering]]
+=== Source filtering
+
+added[1.0.0.Beta1]
+
+By default, the get operation returns the contents of the `_source` field unless
+you have used the `fields` parameter or if the `_source` field is disabled.
+You can turn off `_source` retrieval by using the `_source` parameter:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1?_source=false'
+--------------------------------------------------
+
+If you only need one or two fields from the complete `_source`, you can use the `_source_include`
+& `_source_exclude` parameters to include or filter out that parts you need. This can be especially helpful
+with large documents where partial retrieval can save on network overhead. Both parameters take a comma separated list
+of fields or wildcard expressions. Example:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1?_source_include=*.id&_source_exclude=entities'
+--------------------------------------------------
+
+If you only want to specify includes, you can use a shorter notation:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1?_source=*.id,retweeted'
+--------------------------------------------------
+
+
+[float]
+[[get-fields]]
+=== Fields
+
+The get operation allows specifying a set of stored fields that will be
+returned by passing the `fields` parameter. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1?fields=title,content'
+--------------------------------------------------
+
+For backward compatibility, if the requested fields are not stored, they will be fetched
+from the `_source` (parsed and extracted). This functionality has been replaced by the
+<<get-source-filtering,source filtering>> parameter.
+
+Field values fetched from the document it self are always returned as an array. Metadata fields like `_routing` and
+`_parent` fields are never returned as an array.
+
+Also only leaf fields can be returned via the `field` option. So object fields can't be returned and such requests
+will fail.
+
+[float]
+[[_source]]
+=== Getting the _source directly
+
+Use the `/{index}/{type}/{id}/_source` endpoint to get
+just the `_source` field of the document,
+without any additional content around it. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1/_source'
+--------------------------------------------------
+
+You can also use the same source filtering parameters to control which parts of the `_source` will be returned:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1/_source?_source_include=*.id&_source_exclude=entities'
+--------------------------------------------------
+
+Note, there is also a HEAD variant for the _source endpoint to efficiently test for document existence.
+Curl example:
+
+[source,js]
+--------------------------------------------------
+curl -XHEAD -i 'http://localhost:9200/twitter/tweet/1/_source'
+--------------------------------------------------
+
+[float]
+[[get-routing]]
+=== Routing
+
+When indexing using the ability to control the routing, in order to get
+a document, the routing value should also be provided. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1?routing=kimchy'
+--------------------------------------------------
+
+The above will get a tweet with id 1, but will be routed based on the
+user. Note, issuing a get without the correct routing, will cause the
+document not to be fetched.
+
+[float]
+[[preference]]
+=== Preference
+
+Controls a `preference` of which shard replicas to execute the get
+request on. By default, the operation is randomized between the shard
+replicas.
+
+The `preference` can be set to:
+
+`_primary`::
+ The operation will go and be executed only on the primary
+ shards.
+
+`_local`::
+ The operation will prefer to be executed on a local
+ allocated shard if possible.
+
+Custom (string) value::
+ A custom value will be used to guarantee that
+ the same shards will be used for the same custom value. This can help
+ with "jumping values" when hitting different shards in different refresh
+ states. A sample value can be something like the web session id, or the
+ user name.
+
+[float]
+[[get-refresh]]
+=== Refresh
+
+The `refresh` parameter can be set to `true` in order to refresh the
+relevant shard before the get operation and make it searchable. Setting
+it to `true` should be done after careful thought and verification that
+this does not cause a heavy load on the system (and slows down
+indexing).
+
+[float]
+[[get-distributed]]
+=== Distributed
+
+The get operation gets hashed into a specific shard id. It then gets
+redirected to one of the replicas within that shard id and returns the
+result. The replicas are the primary shard and its replicas within that
+shard id group. This means that the more replicas we will have, the
+better GET scaling we will have.
diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc
new file mode 100644
index 0000000..6c93e82
--- /dev/null
+++ b/docs/reference/docs/index_.asciidoc
@@ -0,0 +1,362 @@
+[[docs-index_]]
+== Index API
+
+The index API adds or updates a typed JSON document in a specific index,
+making it searchable. The following example inserts the JSON document
+into the "twitter" index, under a type called "tweet" with an id of 1:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '{
+ "user" : "kimchy",
+ "post_date" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+}'
+--------------------------------------------------
+
+The result of the above index operation is:
+
+[source,js]
+--------------------------------------------------
+{
+ "_index" : "twitter",
+ "_type" : "tweet",
+ "_id" : "1",
+ "_version" : 1,
+ "created" : true
+}
+--------------------------------------------------
+
+[float]
+[[index-creation]]
+=== Automatic Index Creation
+
+The index operation automatically creates an index if it has not been
+created before (check out the
+<<indices-create-index,create index API>> for manually
+creating an index), and also automatically creates a
+dynamic type mapping for the specific type if one has not yet been
+created (check out the <<indices-put-mapping,put mapping>>
+API for manually creating a type mapping).
+
+The mapping itself is very flexible and is schema-free. New fields and
+objects will automatically be added to the mapping definition of the
+type specified. Check out the <<mapping,mapping>>
+section for more information on mapping definitions.
+
+Note that the format of the JSON document can also include the type (very handy
+when using JSON mappers) if the `index.mapping.allow_type_wrapper` setting is
+set to true, for example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/twitter' -d '{
+ "settings": {
+ "index": {
+ "mapping.allow_type_wrapper": true
+ }
+ }
+}'
+{"acknowledged":true}
+
+$ curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '{
+ "tweet" : {
+ "user" : "kimchy",
+ "post_date" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+ }
+}'
+--------------------------------------------------
+
+Automatic index creation can be disabled by setting
+`action.auto_create_index` to `false` in the config file of all nodes.
+Automatic mapping creation can be disabled by setting
+`index.mapper.dynamic` to `false` in the config files of all nodes (or
+on the specific index settings).
+
+Automatic index creation can include a pattern based white/black list,
+for example, set `action.auto_create_index` to `+aaa*,-bbb*,+ccc*,-*` (+
+meaning allowed, and - meaning disallowed).
+
+[float]
+[[index-versioning]]
+=== Versioning
+
+Each indexed document is given a version number. The associated
+`version` number is returned as part of the response to the index API
+request. The index API optionally allows for
+http://en.wikipedia.org/wiki/Optimistic_concurrency_control[optimistic
+concurrency control] when the `version` parameter is specified. This
+will control the version of the document the operation is intended to be
+executed against. A good example of a use case for versioning is
+performing a transactional read-then-update. Specifying a `version` from
+the document initially read ensures no changes have happened in the
+meantime (when reading in order to update, it is recommended to set
+`preference` to `_primary`). For example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'localhost:9200/twitter/tweet/1?version=2' -d '{
+ "message" : "elasticsearch now has versioning support, double cool!"
+}'
+--------------------------------------------------
+
+*NOTE:* versioning is completely real time, and is not affected by the
+near real time aspects of search operations. If no version is provided,
+then the operation is executed without any version checks.
+
+By default, internal versioning is used that starts at 1 and increments
+with each update, deletes included. Optionally, the version number can be
+supplemented with an external value (for example, if maintained in a
+database). To enable this functionality, `version_type` should be set to
+`external`. The value provided must be a numeric, long value greater than 0,
+and less than around 9.2e+18. When using the external version type, instead
+of checking for a matching version number, the system checks to see if
+the version number passed to the index request is greater than the
+version of the currently stored document. If true, the document will be
+indexed and the new version number used. If the value provided is less
+than or equal to the stored document's version number, a version
+conflict will occur and the index operation will fail.
+
+A nice side effect is that there is no need to maintain strict ordering
+of async indexing operations executed as a result of changes to a source
+database, as long as version numbers from the source database are used.
+Even the simple case of updating the elasticsearch index using data from
+a database is simplified if external versioning is used, as only the
+latest version will be used if the index operations are out of order for
+whatever reason.
+
+[float]
+[[operation-type]]
+=== Operation Type
+
+The index operation also accepts an `op_type` that can be used to force
+a `create` operation, allowing for "put-if-absent" behavior. When
+`create` is used, the index operation will fail if a document by that id
+already exists in the index.
+
+Here is an example of using the `op_type` parameter:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/twitter/tweet/1?op_type=create' -d '{
+ "user" : "kimchy",
+ "post_date" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+}'
+--------------------------------------------------
+
+Another option to specify `create` is to use the following uri:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/twitter/tweet/1/_create' -d '{
+ "user" : "kimchy",
+ "post_date" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+}'
+--------------------------------------------------
+
+[float]
+=== Automatic ID Generation
+
+The index operation can be executed without specifying the id. In such a
+case, an id will be generated automatically. In addition, the `op_type`
+will automatically be set to `create`. Here is an example (note the
+*POST* used instead of *PUT*):
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/twitter/tweet/' -d '{
+ "user" : "kimchy",
+ "post_date" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+}'
+--------------------------------------------------
+
+The result of the above index operation is:
+
+[source,js]
+--------------------------------------------------
+{
+ "_index" : "twitter",
+ "_type" : "tweet",
+ "_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32",
+ "_version" : 1,
+ "created" : true
+}
+--------------------------------------------------
+
+[float]
+[[index-routing]]
+=== Routing
+
+By default, shard placement — or `routing` — is controlled by using a
+hash of the document's id value. For more explicit control, the value
+fed into the hash function used by the router can be directly specified
+on a per-operation basis using the `routing` parameter. For example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/twitter/tweet?routing=kimchy' -d '{
+ "user" : "kimchy",
+ "post_date" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+}'
+--------------------------------------------------
+
+In the example above, the "tweet" document is routed to a shard based on
+the `routing` parameter provided: "kimchy".
+
+When setting up explicit mapping, the `_routing` field can be optionally
+used to direct the index operation to extract the routing value from the
+document itself. This does come at the (very minimal) cost of an
+additional document parsing pass. If the `_routing` mapping is defined,
+and set to be `required`, the index operation will fail if no routing
+value is provided or extracted.
+
+[float]
+[[parent-children]]
+=== Parents & Children
+
+A child document can be indexed by specifying its parent when indexing.
+For example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT localhost:9200/blogs/blog_tag/1122?parent=1111 -d '{
+ "tag" : "something"
+}'
+--------------------------------------------------
+
+When indexing a child document, the routing value is automatically set
+to be the same as its parent, unless the routing value is explicitly
+specified using the `routing` parameter.
+
+[float]
+[[index-timestamp]]
+=== Timestamp
+
+A document can be indexed with a `timestamp` associated with it. The
+`timestamp` value of a document can be set using the `timestamp`
+parameter. For example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT localhost:9200/twitter/tweet/1?timestamp=2009-11-15T14%3A12%3A12 -d '{
+ "user" : "kimchy",
+ "message" : "trying out Elasticsearch",
+}'
+--------------------------------------------------
+
+If the `timestamp` value is not provided externally or in the `_source`,
+the `timestamp` will be automatically set to the date the document was
+processed by the indexing chain. More information can be found on the
+<<mapping-timestamp-field,_timestamp mapping
+page>>.
+
+[float]
+[[index-ttl]]
+=== TTL
+
+A document can be indexed with a `ttl` (time to live) associated with
+it. Expired documents will be expunged automatically. The expiration
+date that will be set for a document with a provided `ttl` is relative
+to the `timestamp` of the document, meaning it can be based on the time
+of indexing or on any time provided. The provided `ttl` must be strictly
+positive and can be a number (in milliseconds) or any valid time value
+as shown in the following examples:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'http://localhost:9200/twitter/tweet/1?ttl=86400000' -d '{
+ "user": "kimchy",
+ "message": "Trying out elasticsearch, so far so good?"
+}'
+--------------------------------------------------
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'http://localhost:9200/twitter/tweet/1?ttl=1d' -d '{
+ "user": "kimchy",
+ "message": "Trying out elasticsearch, so far so good?"
+}'
+--------------------------------------------------
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '{
+ "_ttl": "1d",
+ "user": "kimchy",
+ "message": "Trying out elasticsearch, so far so good?"
+}'
+--------------------------------------------------
+
+More information can be found on the
+<<mapping-ttl-field,_ttl mapping page>>.
+
+[float]
+[[index-distributed]]
+=== Distributed
+
+The index operation is directed to the primary shard based on its route
+(see the Routing section above) and performed on the actual node
+containing this shard. After the primary shard completes the operation,
+if needed, the update is distributed to applicable replicas.
+
+[float]
+[[index-consistency]]
+=== Write Consistency
+
+To prevent writes from taking place on the "wrong" side of a network
+partition, by default, index operations only succeed if a quorum
+(>replicas/2+1) of active shards are available. This default can be
+overridden on a node-by-node basis using the `action.write_consistency`
+setting. To alter this behavior per-operation, the `consistency` request
+parameter can be used.
+
+Valid write consistency values are `one`, `quorum`, and `all`.
+
+[float]
+[[index-replication]]
+=== Asynchronous Replication
+
+By default, the index operation only returns after all shards within the
+replication group have indexed the document (sync replication). To
+enable asynchronous replication, causing the replication process to take
+place in the background, set the `replication` parameter to `async`.
+When asynchronous replication is used, the index operation will return
+as soon as the operation succeeds on the primary shard.
+
+[float]
+[[index-refresh]]
+=== Refresh
+
+To refresh the index immediately after the operation occurs, so that the
+document appears in search results immediately, the `refresh` parameter
+can be set to `true`. Setting this option to `true` should *ONLY* be
+done after careful thought and verification that it does not lead to
+poor performance, both from an indexing and a search standpoint. Note,
+getting a document using the get API is completely realtime.
+
+[float]
+[[timeout]]
+=== Timeout
+
+The primary shard assigned to perform the index operation might not be
+available when the index operation is executed. Some reasons for this
+might be that the primary shard is currently recovering from a gateway
+or undergoing relocation. By default, the index operation will wait on
+the primary shard to become available for up to 1 minute before failing
+and responding with an error. The `timeout` parameter can be used to
+explicitly specify how long it waits. Here is an example of setting it
+to 5 minutes:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/twitter/tweet/1?timeout=5m' -d '{
+ "user" : "kimchy",
+ "post_date" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+}'
+--------------------------------------------------
diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc
new file mode 100644
index 0000000..d189a95
--- /dev/null
+++ b/docs/reference/docs/multi-get.asciidoc
@@ -0,0 +1,176 @@
+[[docs-multi-get]]
+== Multi Get API
+
+Multi GET API allows to get multiple documents based on an index, type
+(optional) and id (and possibly routing). The response includes a `docs`
+array with all the fetched documents, each element similar in structure
+to a document provided by the <<docs-get,get>>
+API. Here is an example:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/_mget' -d '{
+ "docs" : [
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "1"
+ },
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "2"
+ }
+ ]
+}'
+--------------------------------------------------
+
+The `mget` endpoint can also be used against an index (in which case it
+is not required in the body):
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/test/_mget' -d '{
+ "docs" : [
+ {
+ "_type" : "type",
+ "_id" : "1"
+ },
+ {
+ "_type" : "type",
+ "_id" : "2"
+ }
+ ]
+}'
+--------------------------------------------------
+
+And type:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/test/type/_mget' -d '{
+ "docs" : [
+ {
+ "_id" : "1"
+ },
+ {
+ "_id" : "2"
+ }
+ ]
+}'
+--------------------------------------------------
+
+In which case, the `ids` element can directly be used to simplify the
+request:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/test/type/_mget' -d '{
+ "ids" : ["1", "2"]
+}'
+--------------------------------------------------
+
+[float]
+[[mget-source-filtering]]
+=== Source filtering
+
+added[1.0.0.Beta1]
+
+By default, the `_source` field will be returned for every document (if stored).
+Similar to the <<get-source-filtering,get>> API, you can retrieve only parts of
+the `_source` (or not at all) by using the `_source` parameter. You can also use
+the url parameters `_source`,`_source_include` & `_source_exclude` to specify defaults,
+which will be used when there are no per-document instructions.
+
+For example:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/_mget' -d '{
+ "docs" : [
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "1",
+ "_source" : false
+ },
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "2",
+ "_source" : ["field3", "field4"]
+ },
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "3",
+ "_source" : {
+ "include": ["user"],
+ "_exclude": ["user.location"]
+ }
+ }
+ ]
+}'
+--------------------------------------------------
+
+
+[float]
+[[mget-fields]]
+=== Fields
+
+Specific stored fields can be specified to be retrieved per document to get, similar to the <<get-fields,fields>> parameter of the Get API.
+For example:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/_mget' -d '{
+ "docs" : [
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "1",
+ "fields" : ["field1", "field2"]
+ },
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "2",
+ "fields" : ["field3", "field4"]
+ }
+ ]
+}'
+--------------------------------------------------
+
+[float]
+[[mget-routing]]
+=== Routing
+
+You can specify also specify routing value as a parameter:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/_mget?routing=key1' -d '{
+ "docs" : [
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "1",
+ "_routing" : "key2"
+ },
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "2"
+ }
+ ]
+}'
+--------------------------------------------------
+
+In this example, document `test/type/2` will be fetch from shard corresponding to routing key `key1` but
+document `test/type/1` will be fetch from shard corresponding to routing key `key2`.
+
+[float]
+[[mget-security]]
+=== Security
+
+See <<url-access-control>>
diff --git a/docs/reference/docs/multi-termvectors.asciidoc b/docs/reference/docs/multi-termvectors.asciidoc
new file mode 100644
index 0000000..207fee8
--- /dev/null
+++ b/docs/reference/docs/multi-termvectors.asciidoc
@@ -0,0 +1,93 @@
+[[docs-multi-termvectors]]
+== Multi termvectors API
+
+Multi termvectors API allows to get multiple termvectors based on an index, type and id. The response includes a `docs`
+array with all the fetched termvectors, each element having the structure
+provided by the <<docs-termvectors,termvectors>>
+API. Here is an example:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/_mtermvectors' -d '{
+{
+ "docs": [
+ {
+ "_index": "testidx",
+ "_type": "test",
+ "_id": "2",
+ "term_statistics": true
+ },
+ {
+ "_index": "testidx",
+ "_type": "test",
+ "_id": "1",
+ "fields": [
+ "text"
+ ]
+ }
+ ]
+}'
+--------------------------------------------------
+
+See the <<docs-termvectors,termvectors>> API for a description of possible parameters.
+
+The `_mtermvectors` endpoint can also be used against an index (in which case it
+is not required in the body):
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/testidx/_mtermvectors' -d '{
+ "docs": [
+ {
+ "_type": "test",
+ "_id": "2",
+ "fields": [
+ "text"
+ ],
+ "term_statistics": true
+ },
+ {
+ "_type": "test",
+ "_id": "1"
+ }
+ ]
+}'
+--------------------------------------------------
+
+And type:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/testidx/test/_mtermvectors' -d '{
+ "docs": [
+ {
+ "_id": "2",
+ "fields": [
+ "text"
+ ],
+ "term_statistics": true
+ },
+ {
+ "_id": "1"
+ }
+ ]
+}'
+--------------------------------------------------
+
+If all requested documents are on same index and have same type and also the parameters are the same, the request can be simplified:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/testidx/test/_mtermvectors' -d '{
+ "ids" : ["1", "2"],
+ "parameters": {
+ "fields": [
+ "text"
+ ],
+ "term_statistics": true,
+ …
+ }
+}'
+--------------------------------------------------
+
+Parameters can aslo be set by passing them as uri parameters (see <<docs-termvectors,termvectors>>). uri parameters are the default parameters and are overwritten by any parameter setting defined in the body.
diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc
new file mode 100644
index 0000000..b44f4fc
--- /dev/null
+++ b/docs/reference/docs/termvectors.asciidoc
@@ -0,0 +1,224 @@
+[[docs-termvectors]]
+== Term Vectors
+
+added[1.0.0.Beta1]
+
+Returns information and statistics on terms in the fields of a
+particular document as stored in the index.
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1/_termvector?pretty=true'
+--------------------------------------------------
+
+Optionally, you can specify the fields for which the information is
+retrieved either with a parameter in the url
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/1/_termvector?fields=text,...'
+--------------------------------------------------
+
+or adding by adding the requested fields in the request body (see
+example below).
+
+[float]
+=== Return values
+
+Three types of values can be requested: _term information_, _term statistics_
+and _field statistics_. By default, all term information and field
+statistics are returned for all fields but no term statistics.
+
+[float]
+==== Term information
+
+ * term frequency in the field (always returned)
+ * term positions (`positions` : true)
+ * start and end offsets (`offsets` : true)
+ * term payloads (`payloads` : true), as base64 encoded bytes
+
+If the requested information wasn't stored in the index, it will be
+omitted without further warning. See <<mapping-types,type mapping>>
+for how to configure your index to store term vectors.
+
+[WARNING]
+======
+Start and end offsets assume UTF-16 encoding is being used. If you want to use
+these offsets in order to get the original text that produced this token, you
+should make sure that the string you are taking a sub-string of is also encoded
+using UTF-16.
+======
+
+[float]
+==== Term statistics
+
+Setting `term_statistics` to `true` (default is `false`) will
+return
+
+ * total term frequency (how often a term occurs in all documents) +
+ * document frequency (the number of documents containing the current
+ term)
+
+By default these values are not returned since term statistics can
+have a serious performance impact.
+
+[float]
+==== Field statistics
+
+Setting `field_statistics` to `false` (default is `true`) will
+omit :
+
+ * document count (how many documents contain this field)
+ * sum of document frequencies (the sum of document frequencies for all
+ terms in this field)
+ * sum of total term frequencies (the sum of total term frequencies of
+ each term in this field)
+
+[float]
+=== Behaviour
+
+The term and field statistics are not accurate. Deleted documents
+are not taken into account. The information is only retrieved for the
+shard the requested document resides in. The term and field statistics
+are therefore only useful as relative measures whereas the absolute
+numbers have no meaning in this context.
+
+[float]
+=== Example
+
+First, we create an index that stores term vectors, payloads etc. :
+
+[source,js]
+--------------------------------------------------
+curl -s -XPUT 'http://localhost:9200/twitter/' -d '{
+ "mappings": {
+ "tweet": {
+ "properties": {
+ "text": {
+ "type": "string",
+ "term_vector": "with_positions_offsets_payloads",
+ "store" : true,
+ "index_analyzer" : "fulltext_analyzer"
+ },
+ "fullname": {
+ "type": "string",
+ "term_vector": "with_positions_offsets_payloads",
+ "index_analyzer" : "fulltext_analyzer"
+ }
+ }
+ }
+ },
+ "settings" : {
+ "index" : {
+ "number_of_shards" : 1,
+ "number_of_replicas" : 0
+ },
+ "analysis": {
+ "analyzer": {
+ "fulltext_analyzer": {
+ "type": "custom",
+ "tokenizer": "whitespace",
+ "filter": [
+ "lowercase",
+ "type_as_payload"
+ ]
+ }
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+Second, we add some documents:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'http://localhost:9200/twitter/tweet/1?pretty=true' -d '{
+ "fullname" : "John Doe",
+ "text" : "twitter test test test "
+}'
+
+curl -XPUT 'http://localhost:9200/twitter/tweet/2?pretty=true' -d '{
+ "fullname" : "Jane Doe",
+ "text" : "Another twitter test ..."
+}'
+--------------------------------------------------
+
+The following request returns all information and statistics for field
+`text` in document `1` (John Doe):
+
+[source,js]
+--------------------------------------------------
+
+curl -XGET 'http://localhost:9200/twitter/tweet/1/_termvector?pretty=true' -d '{
+ "fields" : ["text"],
+ "offsets" : true,
+ "payloads" : true,
+ "positions" : true,
+ "term_statistics" : true,
+ "field_statistics" : true
+}'
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+
+{
+ "_id": "1",
+ "_index": "twitter",
+ "_type": "tweet",
+ "_version": 1,
+ "found": true,
+ "term_vectors": {
+ "text": {
+ "field_statistics": {
+ "doc_count": 2,
+ "sum_doc_freq": 6,
+ "sum_ttf": 8
+ },
+ "terms": {
+ "test": {
+ "doc_freq": 2,
+ "term_freq": 3,
+ "tokens": [
+ {
+ "end_offset": 12,
+ "payload": "d29yZA==",
+ "position": 1,
+ "start_offset": 8
+ },
+ {
+ "end_offset": 17,
+ "payload": "d29yZA==",
+ "position": 2,
+ "start_offset": 13
+ },
+ {
+ "end_offset": 22,
+ "payload": "d29yZA==",
+ "position": 3,
+ "start_offset": 18
+ }
+ ],
+ "ttf": 4
+ },
+ "twitter": {
+ "doc_freq": 2,
+ "term_freq": 1,
+ "tokens": [
+ {
+ "end_offset": 7,
+ "payload": "d29yZA==",
+ "position": 0,
+ "start_offset": 0
+ }
+ ],
+ "ttf": 2
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc
new file mode 100644
index 0000000..6c48a9b
--- /dev/null
+++ b/docs/reference/docs/update.asciidoc
@@ -0,0 +1,176 @@
+[[docs-update]]
+== Update API
+
+The update API allows to update a document based on a script provided.
+The operation gets the document (collocated with the shard) from the
+index, runs the script (with optional script language and parameters),
+and index back the result (also allows to delete, or ignore the
+operation). It uses versioning to make sure no updates have happened
+during the "get" and "reindex".
+
+Note, this operation still means full reindex of the document, it just
+removes some network roundtrips and reduces chances of version conflicts
+between the get and the index. The `_source` field need to be enabled
+for this feature to work.
+
+For example, lets index a simple doc:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/type1/1 -d '{
+ "counter" : 1,
+ "tags" : ["red"]
+}'
+--------------------------------------------------
+
+Now, we can execute a script that would increment the counter:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+ "script" : "ctx._source.counter += count",
+ "params" : {
+ "count" : 4
+ }
+}'
+--------------------------------------------------
+
+We can also add a tag to the list of tags (note, if the tag exists, it
+will still add it, since its a list):
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+ "script" : "ctx._source.tags += tag",
+ "params" : {
+ "tag" : "blue"
+ }
+}'
+--------------------------------------------------
+
+We can also add a new field to the document:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+ "script" : "ctx._source.text = \"some text\""
+}'
+--------------------------------------------------
+
+We can also remove a field from the document:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+ "script" : "ctx._source.remove(\"text\")"
+}'
+--------------------------------------------------
+
+And, we can delete the doc if the tags contain blue, or ignore (noop):
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+ "script" : "ctx._source.tags.contains(tag) ? ctx.op = \"delete\" : ctx.op = \"none\"",
+ "params" : {
+ "tag" : "blue"
+ }
+}'
+--------------------------------------------------
+
+*Note*: Be aware of MVEL and handling of ternary operators and
+assignments. Assignment operations have lower precedence than the
+ternary operator. Compare the following statements:
+
+[source,js]
+--------------------------------------------------
+// Will NOT update the tags array
+ctx._source.tags.contains(tag) ? ctx.op = \"none\" : ctx._source.tags += tag
+// Will update
+ctx._source.tags.contains(tag) ? (ctx.op = \"none\") : ctx._source.tags += tag
+// Also works
+if (ctx._source.tags.contains(tag)) { ctx.op = \"none\" } else { ctx._source.tags += tag }
+--------------------------------------------------
+
+The update API also support passing a partial document,
+which will be merged into the existing document (simple recursive merge,
+inner merging of objects, replacing core "keys/values" and arrays). For
+example:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+ "doc" : {
+ "name" : "new_name"
+ }
+}'
+--------------------------------------------------
+
+If both `doc` and `script` is specified, then `doc` is ignored. Best is
+to put your field pairs of the partial document in the script itself.
+
+There is also support for `upsert`. If the document does
+not already exists, the content of the `upsert` element will be used to
+index the fresh doc:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+ "script" : "ctx._source.counter += count",
+ "params" : {
+ "count" : 4
+ },
+ "upsert" : {
+ "counter" : 1
+ }
+}'
+--------------------------------------------------
+
+Last it also supports `doc_as_upsert`. So that the
+provided document will be inserted if the document does not already
+exist. This will reduce the amount of data that needs to be sent to
+elasticsearch.
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{
+ "doc" : {
+ "name" : "new_name"
+ },
+ "doc_as_upsert" : true
+}'
+--------------------------------------------------
+
+The update operation supports similar parameters as the index API,
+including:
+
+[horizontal]
+`routing`:: Sets the routing that will be used to route the
+ document to the relevant shard.
+
+`parent`:: Simply sets the routing.
+
+`timeout`:: Timeout waiting for a shard to become available.
+
+`replication`:: The replication type for the delete/index operation
+ (sync or async).
+
+`consistency`:: The write consistency of the index/delete operation.
+
+`refresh`:: Refresh the index immediately after the operation occurs,
+ so that the updated document appears in search results
+ immediately.
+
+`fields`:: return the relevant fields from the updated document.
+ Support `_source` to return the full updated
+ source.
+
+
+And also support `retry_on_conflict` which controls how many times to
+retry if there is a version conflict between getting the document and
+indexing / deleting it. Defaults to `0`.
+
+It also allows to update the `ttl` of a document using `ctx._ttl` and
+timestamp using `ctx._timestamp`. Note that if the timestamp is not
+updated and not extracted from the `_source` it will be set to the
+update date.
diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc
new file mode 100644
index 0000000..6f7061f
--- /dev/null
+++ b/docs/reference/glossary.asciidoc
@@ -0,0 +1,190 @@
+[glossary]
+[[glossary]]
+= Glossary of terms
+
+[glossary]
+[[glossary-analysis]] analysis ::
+
+ Analysis is the process of converting <<glossary-text,full text>> to
+ <<glossary-term,terms>>. Depending on which analyzer is used, these phrases:
+ `FOO BAR`, `Foo-Bar`, `foo,bar` will probably all result in the
+ terms `foo` and `bar`. These terms are what is actually stored in
+ the index.
+ +
+ A full text query (not a <<glossary-term,term>> query) for `FoO:bAR` will
+ also be analyzed to the terms `foo`,`bar` and will thus match the
+ terms stored in the index.
+ +
+ It is this process of analysis (both at index time and at search time)
+ that allows elasticsearch to perform full text queries.
+ +
+ Also see <<glossary-text,text>> and <<glossary-term,term>>.
+
+[[glossary-cluster]] cluster ::
+
+ A cluster consists of one or more <<glossary-node,nodes>> which share the
+ same cluster name. Each cluster has a single master node which is
+ chosen automatically by the cluster and which can be replaced if the
+ current master node fails.
+
+[[glossary-document]] document ::
+
+ A document is a JSON document which is stored in elasticsearch. It is
+ like a row in a table in a relational database. Each document is
+ stored in an <<glossary-index,index>> and has a <<glossary-type,type>> and an
+ <<glossary-id,id>>.
+ +
+ A document is a JSON object (also known in other languages as a hash /
+ hashmap / associative array) which contains zero or more
+ <<glossary-field,fields>>, or key-value pairs.
+ +
+ The original JSON document that is indexed will be stored in the
+ <<glossary-source_field,`_source` field>>, which is returned by default when
+ getting or searching for a document.
+
+[[glossary-id]] id ::
+
+ The ID of a <<glossary-document,document>> identifies a document. The
+ `index/type/id` of a document must be unique. If no ID is provided,
+ then it will be auto-generated. (also see <<glossary-routing,routing>>)
+
+[[glossary-field]] field ::
+
+ A <<glossary-document,document>> contains a list of fields, or key-value
+ pairs. The value can be a simple (scalar) value (eg a string, integer,
+ date), or a nested structure like an array or an object. A field is
+ similar to a column in a table in a relational database.
+ +
+ The <<glossary-mapping,mapping>> for each field has a field _type_ (not to
+ be confused with document <<glossary-type,type>>) which indicates the type
+ of data that can be stored in that field, eg `integer`, `string`,
+ `object`. The mapping also allows you to define (amongst other things)
+ how the value for a field should be analyzed.
+
+[[glossary-index]] index ::
+
+ An index is like a _database_ in a relational database. It has a
+ <<glossary-mapping,mapping>> which defines multiple <<glossary-type,types>>.
+ +
+ An index is a logical namespace which maps to one or more
+ <<glossary-primary-shard,primary shards>> and can have zero or more
+ <<glossary-replica-shard,replica shards>>.
+
+[[glossary-mapping]] mapping ::
+
+ A mapping is like a _schema definition_ in a relational database. Each
+ <<glossary-index,index>> has a mapping, which defines each <<glossary-type,type>>
+ within the index, plus a number of index-wide settings.
+ +
+ A mapping can either be defined explicitly, or it will be generated
+ automatically when a document is indexed.
+
+[[glossary-node]] node ::
+
+ A node is a running instance of elasticsearch which belongs to a
+ <<glossary-cluster,cluster>>. Multiple nodes can be started on a single
+ server for testing purposes, but usually you should have one node per
+ server.
+ +
+ At startup, a node will use unicast (or multicast, if specified) to
+ discover an existing cluster with the same cluster name and will try
+ to join that cluster.
+
+ [[glossary-primary-shard]] primary shard ::
+
+ Each document is stored in a single primary <<glossary-shard,shard>>. When
+ you index a document, it is indexed first on the primary shard, then
+ on all <<glossary-replica-shard,replicas>> of the primary shard.
+ +
+ By default, an <<glossary-index,index>> has 5 primary shards. You can
+ specify fewer or more primary shards to scale the number of
+ <<glossary-document,documents>> that your index can handle.
+ +
+ You cannot change the number of primary shards in an index, once the
+ index is created.
+ +
+ See also <<glossary-routing,routing>>
+
+ [[glossary-replica-shard]] replica shard ::
+
+ Each <<glossary-primary-shard,primary shard>> can have zero or more
+ replicas. A replica is a copy of the primary shard, and has two
+ purposes:
+ +
+ 1. increase failover: a replica shard can be promoted to a primary
+ shard if the primary fails
+ 2. increase performance: get and search requests can be handled by
+ primary or replica shards.
+ +
+ By default, each primary shard has one replica, but the number of
+ replicas can be changed dynamically on an existing index. A replica
+ shard will never be started on the same node as its primary shard.
+
+[[glossary-routing]] routing ::
+
+ When you index a document, it is stored on a single
+ <<glossary-primary-shard,primary shard>>. That shard is chosen by hashing
+ the `routing` value. By default, the `routing` value is derived from
+ the ID of the document or, if the document has a specified parent
+ document, from the ID of the parent document (to ensure that child and
+ parent documents are stored on the same shard).
+ +
+ This value can be overridden by specifying a `routing` value at index
+ time, or a <<mapping-routing-field,routing
+ field>> in the <<glossary-mapping,mapping>>.
+
+[[glossary-shard]] shard ::
+
+ A shard is a single Lucene instance. It is a low-level “worker” unit
+ which is managed automatically by elasticsearch. An index is a logical
+ namespace which points to <<glossary-primary-shard,primary>> and
+ <<glossary-replica-shard,replica>> shards.
+ +
+ Other than defining the number of primary and replica shards that an
+ index should have, you never need to refer to shards directly.
+ Instead, your code should deal only with an index.
+ +
+ Elasticsearch distributes shards amongst all <<glossary-node,nodes>> in the
+ <<glossary-cluster,cluster>>, and can move shards automatically from one
+ node to another in the case of node failure, or the addition of new
+ nodes.
+
+ [[glossary-source_field]] source field ::
+
+ By default, the JSON document that you index will be stored in the
+ `_source` field and will be returned by all get and search requests.
+ This allows you access to the original object directly from search
+ results, rather than requiring a second step to retrieve the object
+ from an ID.
+ +
+ Note: the exact JSON string that you indexed will be returned to you,
+ even if it contains invalid JSON. The contents of this field do not
+ indicate anything about how the data in the object has been indexed.
+
+[[glossary-term]] term ::
+
+ A term is an exact value that is indexed in elasticsearch. The terms
+ `foo`, `Foo`, `FOO` are NOT equivalent. Terms (i.e. exact values) can
+ be searched for using _term_ queries. +
+ See also <<glossary-text,text>> and <<glossary-analysis,analysis>>.
+
+[[glossary-text]] text ::
+
+ Text (or full text) is ordinary unstructured text, such as this
+ paragraph. By default, text will be <<glossary-analysis,analyzed>> into
+ <<glossary-term,terms>>, which is what is actually stored in the index.
+ +
+ Text <<glossary-field,fields>> need to be analyzed at index time in order to
+ be searchable as full text, and keywords in full text queries must be
+ analyzed at search time to produce (and search for) the same terms
+ that were generated at index time.
+ +
+ See also <<glossary-term,term>> and <<glossary-analysis,analysis>>.
+
+[[glossary-type]] type ::
+
+ A type is like a _table_ in a relational database. Each type has a
+ list of <<glossary-field,fields>> that can be specified for
+ <<glossary-document,documents>> of that type. The <<glossary-mapping,mapping>>
+ defines how each field in the document is analyzed.
+
diff --git a/docs/reference/images/Exponential.png b/docs/reference/images/Exponential.png
new file mode 100644
index 0000000..e4bab5c
--- /dev/null
+++ b/docs/reference/images/Exponential.png
Binary files differ
diff --git a/docs/reference/images/Gaussian.png b/docs/reference/images/Gaussian.png
new file mode 100644
index 0000000..ec88565
--- /dev/null
+++ b/docs/reference/images/Gaussian.png
Binary files differ
diff --git a/docs/reference/images/Linear.png b/docs/reference/images/Linear.png
new file mode 100644
index 0000000..38718ca
--- /dev/null
+++ b/docs/reference/images/Linear.png
Binary files differ
diff --git a/docs/reference/images/service-manager-win.png b/docs/reference/images/service-manager-win.png
new file mode 100644
index 0000000..7b99220
--- /dev/null
+++ b/docs/reference/images/service-manager-win.png
Binary files differ
diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc
new file mode 100644
index 0000000..49d6cc3
--- /dev/null
+++ b/docs/reference/index-modules.asciidoc
@@ -0,0 +1,81 @@
+[[index-modules]]
+= Index Modules
+
+[partintro]
+--
+Index Modules are modules created per index and control all aspects
+related to an index. Since those modules lifecycle are tied to an index,
+all the relevant modules settings can be provided when creating an index
+(and it is actually the recommended way to configure an index).
+
+[float]
+[[index-modules-settings]]
+== Index Settings
+
+There are specific index level settings that are not associated with any
+specific module. These include:
+
+[[index-compound-format]]`index.compound_format`::
+
+ Should the compound file format be used (boolean setting).
+ The compound format was created to reduce the number of open
+ file handles when using file based storage. However, by default it is set
+ to `false` as the non-compound format gives better performance. It is important
+ that OS is configured to give Elasticsearch ``enough'' file handles.
+ See <<file-descriptors>>.
++
+Alternatively, `compound_format` can be set to a number between `0` and
+`1`, where `0` means `false`, `1` means `true` and a number inbetween
+represents a percentage: if the merged segment is less than this
+percentage of the total index, then it is written in compound format,
+otherwise it is written in non-compound format.
+
+[[index-compound-on-flush]]`index.compound_on_flush`::
+
+ Should a new segment (create by indexing, not by merging) be written
+ in compound format or non-compound format? Defaults to `true`.
+ This is a dynamic setting.
+
+`index.refresh_interval`::
+ A time setting controlling how often the
+ refresh operation will be executed. Defaults to `1s`. Can be set to `-1`
+ in order to disable it.
+
+`index.shard.check_on_startup`::
+ Should shard consistency be checked upon opening.
+ When `true`, the shard will be checked, preventing it from being open in
+ case some segments appear to be corrupted.
+ When `fix`, the shard will also be checked but segments that were reported
+ as corrupted will be automatically removed.
+ Default value is `false`, which doesn't check shards.
+
+NOTE: Checking shards may take a lot of time on large indices.
+
+WARNING: Setting `index.shard.check_on_startup` to `fix` may result in data loss,
+ use with caution.
+
+--
+
+include::index-modules/analysis.asciidoc[]
+
+include::index-modules/allocation.asciidoc[]
+
+include::index-modules/slowlog.asciidoc[]
+
+include::index-modules/merge.asciidoc[]
+
+include::index-modules/store.asciidoc[]
+
+include::index-modules/mapper.asciidoc[]
+
+include::index-modules/translog.asciidoc[]
+
+include::index-modules/cache.asciidoc[]
+
+include::index-modules/fielddata.asciidoc[]
+
+include::index-modules/codec.asciidoc[]
+
+include::index-modules/similarity.asciidoc[]
+
+
diff --git a/docs/reference/index-modules/allocation.asciidoc b/docs/reference/index-modules/allocation.asciidoc
new file mode 100644
index 0000000..c1a1618
--- /dev/null
+++ b/docs/reference/index-modules/allocation.asciidoc
@@ -0,0 +1,136 @@
+[[index-modules-allocation]]
+== Index Shard Allocation
+
+[float]
+[[shard-allocation-filtering]]
+=== Shard Allocation Filtering
+
+Allow to control allocation if indices on nodes based on include/exclude
+filters. The filters can be set both on the index level and on the
+cluster level. Lets start with an example of setting it on the cluster
+level:
+
+Lets say we have 4 nodes, each has specific attribute called `tag`
+associated with it (the name of the attribute can be any name). Each
+node has a specific value associated with `tag`. Node 1 has a setting
+`node.tag: value1`, Node 2 a setting of `node.tag: value2`, and so on.
+
+We can create an index that will only deploy on nodes that have `tag`
+set to `value1` and `value2` by setting
+`index.routing.allocation.include.tag` to `value1,value2`. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/_settings -d '{
+ "index.routing.allocation.include.tag" : "value1,value2"
+}'
+--------------------------------------------------
+
+On the other hand, we can create an index that will be deployed on all
+nodes except for nodes with a `tag` of value `value3` by setting
+`index.routing.allocation.exclude.tag` to `value3`. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/_settings -d '{
+ "index.routing.allocation.exclude.tag" : "value3"
+}'
+--------------------------------------------------
+
+`index.routing.allocation.require.*` can be used to
+specify a number of rules, all of which MUST match in order for a shard
+to be allocated to a node. This is in contrast to `include` which will
+include a node if ANY rule matches.
+
+The `include`, `exclude` and `require` values can have generic simple
+matching wildcards, for example, `value1*`. A special attribute name
+called `_ip` can be used to match on node ip values.
+
+Obviously a node can have several attributes associated with it, and
+both the attribute name and value are controlled in the setting. For
+example, here is a sample of several node configurations:
+
+[source,js]
+--------------------------------------------------
+node.group1: group1_value1
+node.group2: group2_value4
+--------------------------------------------------
+
+In the same manner, `include`, `exclude` and `require` can work against
+several attributes, for example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/_settings -d '{
+ "index.routing.allocation.include.group1" : "xxx"
+ "index.routing.allocation.include.group2" : "yyy",
+ "index.routing.allocation.exclude.group3" : "zzz",
+ "index.routing.allocation.require.group4" : "aaa",
+}'
+--------------------------------------------------
+
+The provided settings can also be updated in real time using the update
+settings API, allowing to "move" indices (shards) around in realtime.
+
+Cluster wide filtering can also be defined, and be updated in real time
+using the cluster update settings API. This setting can come in handy
+for things like decommissioning nodes (even if the replica count is set
+to 0). Here is a sample of how to decommission a node based on `_ip`
+address:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/_cluster/settings -d '{
+ "transient" : {
+ "cluster.routing.allocation.exclude._ip" : "10.0.0.1"
+ }
+}'
+--------------------------------------------------
+
+[float]
+=== Total Shards Per Node
+
+The `index.routing.allocation.total_shards_per_node` setting allows to
+control how many total shards for an index will be allocated per node.
+It can be dynamically set on a live index using the update index
+settings API.
+
+[float]
+[[disk]]
+=== Disk-based Shard Allocation
+
+Elasticsearch con be configured to prevent shard
+allocation on nodes depending on disk usage for the node. This
+functionality is disabled by default, and can be changed either in the
+configuration file, or dynamically using:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/_cluster/settings -d '{
+ "transient" : {
+ "cluster.routing.allocation.disk.threshold_enabled" : true
+ }
+}'
+--------------------------------------------------
+
+Once enabled, Elasticsearch uses two watermarks to decide whether
+shards should be allocated or can remain on the node.
+
+`cluster.routing.allocation.disk.watermark.low` controls the low
+watermark for disk usage. It defaults to 0.70, meaning ES will not
+allocate new shards to nodes once they have more than 70% disk
+used. It can also be set to an absolute byte value (like 500mb) to
+prevent ES from allocating shards if less than the configured amount
+of space is available.
+
+`cluster.routing.allocation.disk.watermark.high` controls the high
+watermark. It defaults to 0.85, meaning ES will attempt to relocate
+shards to another node if the node disk usage rises above 85%. It can
+also be set to an absolute byte value (similar to the low watermark)
+to relocate shards once less than the configured amount of space is
+available on the node.
+
+Both watermark settings can be changed dynamically using the cluster
+settings API. By default, Elasticsearch will retrieve information
+about the disk usage of the nodes every 30 seconds. This can also be
+changed by setting the `cluster.info.update.interval` setting.
diff --git a/docs/reference/index-modules/analysis.asciidoc b/docs/reference/index-modules/analysis.asciidoc
new file mode 100644
index 0000000..1cf33e8
--- /dev/null
+++ b/docs/reference/index-modules/analysis.asciidoc
@@ -0,0 +1,18 @@
+[[index-modules-analysis]]
+== Analysis
+
+The index analysis module acts as a configurable registry of Analyzers
+that can be used in order to both break indexed (analyzed) fields when a
+document is indexed and process query strings. It maps to the Lucene
+`Analyzer`.
+
+Analyzers are (generally) composed of a single `Tokenizer` and zero or
+more `TokenFilters`. A set of `CharFilters` can be associated with an
+analyzer to process the characters prior to other analysis steps. The
+analysis module allows one to register `TokenFilters`, `Tokenizers` and
+`Analyzers` under logical names that can then be referenced either in
+mapping definitions or in certain APIs. The Analysis module
+automatically registers (*if not explicitly defined*) built in
+analyzers, token filters, and tokenizers.
+
+See <<analysis>> for configuration details. \ No newline at end of file
diff --git a/docs/reference/index-modules/cache.asciidoc b/docs/reference/index-modules/cache.asciidoc
new file mode 100644
index 0000000..829091e
--- /dev/null
+++ b/docs/reference/index-modules/cache.asciidoc
@@ -0,0 +1,56 @@
+[[index-modules-cache]]
+== Cache
+
+There are different caching inner modules associated with an index. They
+include `filter` and others.
+
+[float]
+[[filter]]
+=== Filter Cache
+
+The filter cache is responsible for caching the results of filters (used
+in the query). The default implementation of a filter cache (and the one
+recommended to use in almost all cases) is the `node` filter cache type.
+
+[float]
+[[node-filter]]
+==== Node Filter Cache
+
+The `node` filter cache may be configured to use either a percentage of
+the total memory allocated to the process or an specific amount of
+memory. All shards present on a node share a single node cache (thats
+why its called `node``). The cache implements an LRU eviction policy:
+when a cache becomes full, the least recently used data is evicted to
+make way for new data.
+
+The setting that allows one to control the memory size for the filter
+cache is `indices.cache.filter.size`, which defaults to `20%`. *Note*,
+this is *not* an index level setting but a node level setting (can be
+configured in the node configuration).
+
+`indices.cache.filter.size` can accept either a percentage value, like
+`30%`, or an exact value, like `512mb`.
+
+[float]
+[[index-filter]]
+==== Index Filter Cache
+
+A filter cache that exists on the index level (on each node). Generally,
+not recommended for use since its memory usage depends on which shards
+are allocated on each node and its hard to predict it. The types are:
+`resident`, `soft` and `weak`.
+
+All types support the following settings:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`index.cache.filter.max_size` |The max size (count, not byte size) of
+the cache (per search segment in a shard). Defaults to not set (`-1`),
+which is usually fine with `soft` cache and proper cacheable filters.
+
+|`index.cache.filter.expire` |A time based setting that expires filters
+after a certain time of inactivity. Defaults to `-1`. For example, can
+be set to `5m` for a 5 minute expiry.
+|=======================================================================
+
diff --git a/docs/reference/index-modules/codec.asciidoc b/docs/reference/index-modules/codec.asciidoc
new file mode 100644
index 0000000..f53c18f
--- /dev/null
+++ b/docs/reference/index-modules/codec.asciidoc
@@ -0,0 +1,278 @@
+[[index-modules-codec]]
+== Codec module
+
+Codecs define how documents are written to disk and read from disk. The
+postings format is the part of the codec that responsible for reading
+and writing the term dictionary, postings lists and positions, payloads
+and offsets stored in the postings list. The doc values format is
+responsible for reading column-stride storage for a field and is typically
+used for sorting or faceting. When a field doesn't have doc values enabled,
+it is still possible to sort or facet by loading field values from the
+inverted index into main memory.
+
+Configuring custom postings or doc values formats is an expert feature and
+most likely using the builtin formats will suit your needs as is described
+in the <<mapping-core-types,mapping section>>.
+
+**********************************
+Only the default codec, postings format and doc values format are supported:
+other formats may break backward compatibility between minor versions of
+Elasticsearch, requiring data to be reindexed.
+**********************************
+
+
+[float]
+[[custom-postings]]
+=== Configuring a custom postings format
+
+Custom postings format can be defined in the index settings in the
+`codec` part. The `codec` part can be configured when creating an index
+or updating index settings. An example on how to define your custom
+postings format:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'http://localhost:9200/twitter/' -d '{
+ "settings" : {
+ "index" : {
+ "codec" : {
+ "postings_format" : {
+ "my_format" : {
+ "type" : "pulsing",
+ "freq_cut_off" : "5"
+ }
+ }
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+Then we defining your mapping your can use the `my_format` name in the
+`postings_format` option as the example below illustrates:
+
+[source,js]
+--------------------------------------------------
+{
+ "person" : {
+ "properties" : {
+ "second_person_id" : {"type" : "string", "postings_format" : "my_format"}
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+=== Available postings formats
+
+[float]
+[[direct-postings]]
+==== Direct postings format
+
+Wraps the default postings format for on-disk storage, but then at read
+time loads and stores all terms & postings directly in RAM. This
+postings format makes no effort to compress the terms and posting list
+and therefore is memory intensive, but because of this it gives a
+substantial increase in search performance. Because this holds all term
+bytes as a single byte[], you cannot have more than 2.1GB worth of terms
+in a single segment.
+
+This postings format offers the following parameters:
+
+`min_skip_count`::
+ The minimum number terms with a shared prefix to
+ allow a skip pointer to be written. The default is *8*.
+
+`low_freq_cutoff`::
+ Terms with a lower document frequency use a
+ single array object representation for postings and positions. The
+ default is *32*.
+
+Type name: `direct`
+
+[float]
+[[memory-postings]]
+==== Memory postings format
+
+A postings format that stores terms & postings (docs, positions,
+payloads) in RAM, using an FST. This postings format does write to disk,
+but loads everything into memory. The memory postings format has the
+following options:
+
+`pack_fst`::
+ A boolean option that defines if the in memory structure
+ should be packed once its build. Packed will reduce the size for the
+ data-structure in memory but requires more memory during building.
+ Default is *false*.
+
+`acceptable_overhead_ratio`::
+ The compression ratio specified as a
+ float, that is used to compress internal structures. Example ratios `0`
+ (Compact, no memory overhead at all, but the returned implementation may
+ be slow), `0.5` (Fast, at most 50% memory overhead, always select a
+ reasonably fast implementation), `7` (Fastest, at most 700% memory
+ overhead, no compression). Default is `0.2`.
+
+Type name: `memory`
+
+[float]
+[[bloom-postings]]
+==== Bloom filter posting format
+
+The bloom filter postings format wraps a delegate postings format and on
+top of this creates a bloom filter that is written to disk. During
+opening this bloom filter is loaded into memory and used to offer
+"fast-fail" reads. This postings format is useful for low doc-frequency
+fields such as primary keys. The bloom filter postings format has the
+following options:
+
+`delegate`::
+ The name of the configured postings format that the
+ bloom filter postings format will wrap.
+
+`fpp`::
+ The desired false positive probability specified as a
+ floating point number between 0 and 1.0. The `fpp` can be configured for
+ multiple expected insertions. Example expression: *10k=0.01,1m=0.03*. If
+ number docs per index segment is larger than *1m* then use *0.03* as fpp
+ and if number of docs per segment is larger than *10k* use *0.01* as
+ fpp. The last fallback value is always *0.03*. This example expression
+ is also the default.
+
+Type name: `bloom`
+
+[[codec-bloom-load]]
+[TIP]
+==================================================
+
+It can sometime make sense to disable bloom filters. For instance, if you are
+logging into an index per day, and you have thousands of indices, the bloom
+filters can take up a sizable amount of memory. For most queries you are only
+interested in recent indices, so you don't mind CRUD operations on older
+indices taking slightly longer.
+
+In these cases you can disable loading of the bloom filter on a per-index
+basis by updating the index settings:
+
+[source,js]
+--------------------------------------------------
+PUT /old_index/_settings?index.codec.bloom.load=false
+--------------------------------------------------
+
+This setting, which defaults to `true`, can be updated on a live index. Note,
+however, that changing the value will cause the index to be reopened, which
+will invalidate any existing caches.
+
+==================================================
+
+[float]
+[[pulsing-postings]]
+==== Pulsing postings format
+
+The pulsing implementation in-lines the posting lists for very low
+frequent terms in the term dictionary. This is useful to improve lookup
+performance for low-frequent terms. This postings format offers the
+following parameters:
+
+`min_block_size`::
+ The minimum block size the default Lucene term
+ dictionary uses to encode on-disk blocks. Defaults to *25*.
+
+`max_block_size`::
+ The maximum block size the default Lucene term
+ dictionary uses to encode on-disk blocks. Defaults to *48*.
+
+`freq_cut_off`::
+ The document frequency cut off where pulsing
+ in-lines posting lists into the term dictionary. Terms with a document
+ frequency less or equal to the cutoff will be in-lined. The default is
+ *1*.
+
+Type name: `pulsing`
+
+[float]
+[[default-postings]]
+==== Default postings format
+
+The default postings format has the following options:
+
+`min_block_size`::
+ The minimum block size the default Lucene term
+ dictionary uses to encode on-disk blocks. Defaults to *25*.
+
+`max_block_size`::
+ The maximum block size the default Lucene term
+ dictionary uses to encode on-disk blocks. Defaults to *48*.
+
+Type name: `default`
+
+[float]
+=== Configuring a custom doc values format
+
+Custom doc values format can be defined in the index settings in the
+`codec` part. The `codec` part can be configured when creating an index
+or updating index settings. An example on how to define your custom
+doc values format:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'http://localhost:9200/twitter/' -d '{
+ "settings" : {
+ "index" : {
+ "codec" : {
+ "doc_values_format" : {
+ "my_format" : {
+ "type" : "disk"
+ }
+ }
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+Then we defining your mapping your can use the `my_format` name in the
+`doc_values_format` option as the example below illustrates:
+
+[source,js]
+--------------------------------------------------
+{
+ "product" : {
+ "properties" : {
+ "price" : {"type" : "integer", "doc_values_format" : "my_format"}
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+=== Available doc values formats
+
+[float]
+==== Memory doc values format
+
+A doc values format that stores all values in a FST in RAM. This format does
+write to disk but the whole data-structure is loaded into memory when reading
+the index. The memory postings format has no options.
+
+Type name: `memory`
+
+[float]
+==== Disk doc values format
+
+A doc values format that stores and reads everything from disk. Although it may
+be slightly slower than the default doc values format, this doc values format
+will require almost no memory from the JVM. The disk doc values format has no
+options.
+
+Type name: `disk`
+
+[float]
+==== Default doc values format
+
+The default doc values format tries to make a good compromise between speed and
+memory usage by only loading into memory data-structures that matter for
+performance. This makes this doc values format a good fit for most use-cases.
+The default doc values format has no options.
+
+Type name: `default`
diff --git a/docs/reference/index-modules/fielddata.asciidoc b/docs/reference/index-modules/fielddata.asciidoc
new file mode 100644
index 0000000..c958dcb
--- /dev/null
+++ b/docs/reference/index-modules/fielddata.asciidoc
@@ -0,0 +1,270 @@
+[[index-modules-fielddata]]
+== Field data
+
+The field data cache is used mainly when sorting on or faceting on a
+field. It loads all the field values to memory in order to provide fast
+document based access to those values. The field data cache can be
+expensive to build for a field, so its recommended to have enough memory
+to allocate it, and to keep it loaded.
+
+The amount of memory used for the field
+data cache can be controlled using `indices.fielddata.cache.size`. Note:
+reloading the field data which does not fit into your cache will be expensive
+and perform poorly.
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`indices.fielddata.cache.size` |The max size of the field data cache,
+eg `30%` of node heap space, or an absolute value, eg `12GB`. Defaults
+to unbounded.
+
+|`indices.fielddata.cache.expire` |A time based setting that expires
+field data after a certain time of inactivity. Defaults to `-1`. For
+example, can be set to `5m` for a 5 minute expiry.
+|=======================================================================
+
+[float]
+[[fielddata-circuit-breaker]]
+=== Field data circuit breaker
+The field data circuit breaker allows Elasticsearch to estimate the amount of
+memory a field will required to be loaded into memory. It can then prevent the
+field data loading by raising and exception. By default the limit is configured
+to 80% of the maximum JVM heap. It can be configured with the following
+parameters:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`indices.fielddata.breaker.limit` |Maximum size of estimated field data
+to allow loading. Defaults to 80% of the maximum JVM heap.
+|`indices.fielddata.breaker.overhead` |A constant that all field data
+estimations are multiplied with to determine a final estimation. Defaults to
+1.03
+|=======================================================================
+
+Both the `indices.fielddata.breaker.limit` and
+`indices.fielddata.breaker.overhead` can be changed dynamically using the
+cluster update settings API.
+
+[float]
+[[fielddata-monitoring]]
+=== Monitoring field data
+
+You can monitor memory usage for field data as well as the field data circuit
+breaker using
+<<cluster-nodes-stats,Nodes Stats API>>
+
+[[fielddata-formats]]
+== Field data formats
+
+The field data format controls how field data should be stored.
+
+Depending on the field type, there might be several field data types
+available. In particular, string and numeric types support the `doc_values`
+format which allows for computing the field data data-structures at indexing
+time and storing them on disk. Although it will make the index larger and may
+be slightly slower, this implementation will be more near-realtime-friendly
+and will require much less memory from the JVM than other implementations.
+
+Here is an example of how to configure the `tag` field to use the `fst` field
+data format.
+
+[source,js]
+--------------------------------------------------
+{
+ tag: {
+ type: "string",
+ fielddata: {
+ format: "fst"
+ }
+ }
+}
+--------------------------------------------------
+
+It is possible to change the field data format (and the field data settings
+in general) on a live index by using the update mapping API. When doing so,
+field data which had already been loaded for existing segments will remain
+alive while new segments will use the new field data configuration. Thanks to
+the background merging process, all segments will eventually use the new
+field data format.
+
+[float]
+==== String field data types
+
+`paged_bytes` (default)::
+ Stores unique terms sequentially in a large buffer and maps documents to
+ the indices of the terms they contain in this large buffer.
+
+`fst`::
+ Stores terms in a FST. Slower to build than `paged_bytes` but can help lower
+ memory usage if many terms share common prefixes and/or suffixes.
+
+`doc_values`::
+ Computes and stores field data data-structures on disk at indexing time.
+ Lowers memory usage but only works on non-analyzed strings (`index`: `no` or
+ `not_analyzed`) and doesn't support filtering.
+
+[float]
+==== Numeric field data types
+
+`array` (default)::
+ Stores field values in memory using arrays.
+
+`doc_values`::
+ Computes and stores field data data-structures on disk at indexing time.
+ Doesn't support filtering.
+
+[float]
+==== Geo point field data types
+
+`array` (default)::
+ Stores latitudes and longitudes in arrays.
+
+`doc_values`::
+ Computes and stores field data data-structures on disk at indexing time.
+
+[float]
+=== Fielddata loading
+
+By default, field data is loaded lazily, ie. the first time that a query that
+requires them is executed. However, this can make the first requests that
+follow a merge operation quite slow since fielddata loading is a heavy
+operation.
+
+It is possible to force field data to be loaded and cached eagerly through the
+`loading` setting of fielddata:
+
+[source,js]
+--------------------------------------------------
+{
+ category: {
+ type: "string",
+ fielddata: {
+ loading: "eager"
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Disabling field data loading
+
+Field data can take a lot of RAM so it makes sense to disable field data
+loading on the fields that don't need field data, for example those that are
+used for full-text search only. In order to disable field data loading, just
+change the field data format to `disabled`. When disabled, all requests that
+will try to load field data, e.g. when they include aggregations and/or sorting,
+will return an error.
+
+[source,js]
+--------------------------------------------------
+{
+ text: {
+ type: "string",
+ fielddata: {
+ format: "disabled"
+ }
+ }
+}
+--------------------------------------------------
+
+The `disabled` format is supported by all field types.
+
+[float]
+[[field-data-filtering]]
+=== Filtering fielddata
+
+It is possible to control which field values are loaded into memory,
+which is particularly useful for string fields. When specifying the
+<<mapping-core-types,mapping>> for a field, you
+can also specify a fielddata filter.
+
+Fielddata filters can be changed using the
+<<indices-put-mapping,PUT mapping>>
+API. After changing the filters, use the
+<<indices-clearcache,Clear Cache>> API
+to reload the fielddata using the new filters.
+
+[float]
+==== Filtering by frequency:
+
+The frequency filter allows you to only load terms whose frequency falls
+between a `min` and `max` value, which can be expressed an absolute
+number or as a percentage (eg `0.01` is `1%`). Frequency is calculated
+*per segment*. Percentages are based on the number of docs which have a
+value for the field, as opposed to all docs in the segment.
+
+Small segments can be excluded completely by specifying the minimum
+number of docs that the segment should contain with `min_segment_size`:
+
+[source,js]
+--------------------------------------------------
+{
+ tag: {
+ type: "string",
+ fielddata: {
+ filter: {
+ frequency: {
+ min: 0.001,
+ max: 0.1,
+ min_segment_size: 500
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Filtering by regex
+
+Terms can also be filtered by regular expression - only values which
+match the regular expression are loaded. Note: the regular expression is
+applied to each term in the field, not to the whole field value. For
+instance, to only load hashtags from a tweet, we can use a regular
+expression which matches terms beginning with `#`:
+
+[source,js]
+--------------------------------------------------
+{
+ tweet: {
+ type: "string",
+ analyzer: "whitespace"
+ fielddata: {
+ filter: {
+ regex: {
+ pattern: "^#.*"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Combining filters
+
+The `frequency` and `regex` filters can be combined:
+
+[source,js]
+--------------------------------------------------
+{
+ tweet: {
+ type: "string",
+ analyzer: "whitespace"
+ fielddata: {
+ filter: {
+ regex: {
+ pattern: "^#.*",
+ },
+ frequency: {
+ min: 0.001,
+ max: 0.1,
+ min_segment_size: 500
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/index-modules/mapper.asciidoc b/docs/reference/index-modules/mapper.asciidoc
new file mode 100644
index 0000000..1728969
--- /dev/null
+++ b/docs/reference/index-modules/mapper.asciidoc
@@ -0,0 +1,39 @@
+[[index-modules-mapper]]
+== Mapper
+
+The mapper module acts as a registry for the type mapping definitions
+added to an index either when creating it or by using the put mapping
+api. It also handles the dynamic mapping support for types that have no
+explicit mappings pre defined. For more information about mapping
+definitions, check out the <<mapping,mapping section>>.
+
+[float]
+=== Dynamic / Default Mappings
+
+Dynamic mappings allow to automatically apply generic mapping definition
+to types that do not have mapping pre defined or applied to new mapping
+definitions (overridden). This is mainly done thanks to the fact that
+the `object` type and namely the root `object` type allow for schema
+less dynamic addition of unmapped fields.
+
+The default mapping definition is plain mapping definition that is
+embedded within Elasticsearch:
+
+[source,js]
+--------------------------------------------------
+{
+ _default_ : {
+ }
+}
+--------------------------------------------------
+
+Pretty short, no? Basically, everything is defaulted, especially the
+dynamic nature of the root object mapping. The default mapping
+definition can be overridden in several manners. The simplest manner is
+to simply define a file called `default-mapping.json` and placed it
+under the `config` directory (which can be configured to exist in a
+different location). It can also be explicitly set using the
+`index.mapper.default_mapping_location` setting.
+
+Dynamic creation of mappings for unmapped types can be completely
+disabled by setting `index.mapper.dynamic` to `false`.
diff --git a/docs/reference/index-modules/merge.asciidoc b/docs/reference/index-modules/merge.asciidoc
new file mode 100644
index 0000000..84d2675
--- /dev/null
+++ b/docs/reference/index-modules/merge.asciidoc
@@ -0,0 +1,215 @@
+[[index-modules-merge]]
+== Merge
+
+A shard in elasticsearch is a Lucene index, and a Lucene index is broken
+down into segments. Segments are internal storage elements in the index
+where the index data is stored, and are immutable up to delete markers.
+Segments are, periodically, merged into larger segments to keep the
+index size at bay and expunge deletes.
+
+The more segments one has in the Lucene index means slower searches and
+more memory used. Segment merging is used to reduce the number of segments,
+however merges can be expensive to perform, especially on low IO environments.
+Merges can be throttled using <<store-throttling,store level throttling>>.
+
+
+[float]
+[[policy]]
+=== Policy
+
+The index merge policy module allows one to control which segments of a
+shard index are to be merged. There are several types of policies with
+the default set to `tiered`.
+
+[float]
+[[tiered]]
+==== tiered
+
+Merges segments of approximately equal size, subject to an allowed
+number of segments per tier. This is similar to `log_bytes_size` merge
+policy, except this merge policy is able to merge non-adjacent segment,
+and separates how many segments are merged at once from how many
+segments are allowed per tier. This merge policy also does not
+over-merge (i.e., cascade merges).
+
+This policy has the following settings:
+
+`index.merge.policy.expunge_deletes_allowed`::
+
+ When expungeDeletes is called, we only merge away a segment if its delete
+ percentage is over this threshold. Default is `10`.
+
+`index.merge.policy.floor_segment`::
+
+ Segments smaller than this are "rounded up" to this size, i.e. treated as
+ equal (floor) size for merge selection. This is to prevent frequent
+ flushing of tiny segments from allowing a long tail in the index. Default
+ is `2mb`.
+
+`index.merge.policy.max_merge_at_once`::
+
+ Maximum number of segments to be merged at a time during "normal" merging.
+ Default is `10`.
+
+`index.merge.policy.max_merge_at_once_explicit`::
+
+ Maximum number of segments to be merged at a time, during optimize or
+ expungeDeletes. Default is `30`.
+
+`index.merge.policy.max_merged_segment`::
+
+ Maximum sized segment to produce during normal merging (not explicit
+ optimize). This setting is approximate: the estimate of the merged segment
+ size is made by summing sizes of to-be-merged segments (compensating for
+ percent deleted docs). Default is `5gb`.
+
+`index.merge.policy.segments_per_tier`::
+
+ Sets the allowed number of segments per tier. Smaller values mean more
+ merging but fewer segments. Default is `10`. Note, this value needs to be
+ >= then the `max_merge_at_once` otherwise you'll force too many merges to
+ occur.
+
+`index.reclaim_deletes_weight`::
+
+ Controls how aggressively merges that reclaim more deletions are favored.
+ Higher values favor selecting merges that reclaim deletions. A value of
+ `0.0` means deletions don't impact merge selection. Defaults to `2.0`.
+
+`index.compound_format`::
+
+ Should the index be stored in compound format or not. Defaults to `false`.
+ See <<index-compound-format,`index.compound_format`>> in
+ <<index-modules-settings>>.
+
+For normal merging, this policy first computes a "budget" of how many
+segments are allowed by be in the index. If the index is over-budget,
+then the policy sorts segments by decreasing size (pro-rating by percent
+deletes), and then finds the least-cost merge. Merge cost is measured by
+a combination of the "skew" of the merge (size of largest seg divided by
+smallest seg), total merge size and pct deletes reclaimed, so that
+merges with lower skew, smaller size and those reclaiming more deletes,
+are favored.
+
+If a merge will produce a segment that's larger than
+`max_merged_segment` then the policy will merge fewer segments (down to
+1 at once, if that one has deletions) to keep the segment size under
+budget.
+
+Note, this can mean that for large shards that holds many gigabytes of
+data, the default of `max_merged_segment` (`5gb`) can cause for many
+segments to be in an index, and causing searches to be slower. Use the
+indices segments API to see the segments that an index have, and
+possibly either increase the `max_merged_segment` or issue an optimize
+call for the index (try and aim to issue it on a low traffic time).
+
+[float]
+[[log-byte-size]]
+==== log_byte_size
+
+A merge policy that merges segments into levels of exponentially
+increasing *byte size*, where each level has fewer segments than the
+value of the merge factor. Whenever extra segments (beyond the merge
+factor upper bound) are encountered, all segments within the level are
+merged.
+
+This policy has the following settings:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|index.merge.policy.merge_factor |Determines how often segment indices
+are merged by index operation. With smaller values, less RAM is used
+while indexing, and searches on unoptimized indices are faster, but
+indexing speed is slower. With larger values, more RAM is used during
+indexing, and while searches on unoptimized indices are slower, indexing
+is faster. Thus larger values (greater than 10) are best for batch index
+creation, and smaller values (lower than 10) for indices that are
+interactively maintained. Defaults to `10`.
+
+|index.merge.policy.min_merge_size |A size setting type which sets the
+minimum size for the lowest level segments. Any segments below this size
+are considered to be on the same level (even if they vary drastically in
+size) and will be merged whenever there are mergeFactor of them. This
+effectively truncates the "long tail" of small segments that would
+otherwise be created into a single level. If you set this too large, it
+could greatly increase the merging cost during indexing (if you flush
+many small segments). Defaults to `1.6mb`
+
+|index.merge.policy.max_merge_size |A size setting type which sets the
+largest segment (measured by total byte size of the segment's files)
+that may be merged with other segments. Defaults to unbounded.
+
+|index.merge.policy.max_merge_docs |Determines the largest segment
+(measured by document count) that may be merged with other segments.
+Defaults to unbounded.
+|=======================================================================
+
+[float]
+[[log-doc]]
+==== log_doc
+
+A merge policy that tries to merge segments into levels of exponentially
+increasing *document count*, where each level has fewer segments than
+the value of the merge factor. Whenever extra segments (beyond the merge
+factor upper bound) are encountered, all segments within the level are
+merged.
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|index.merge.policy.merge_factor |Determines how often segment indices
+are merged by index operation. With smaller values, less RAM is used
+while indexing, and searches on unoptimized indices are faster, but
+indexing speed is slower. With larger values, more RAM is used during
+indexing, and while searches on unoptimized indices are slower, indexing
+is faster. Thus larger values (greater than 10) are best for batch index
+creation, and smaller values (lower than 10) for indices that are
+interactively maintained. Defaults to `10`.
+
+|index.merge.policy.min_merge_docs |Sets the minimum size for the lowest
+level segments. Any segments below this size are considered to be on the
+same level (even if they vary drastically in size) and will be merged
+whenever there are mergeFactor of them. This effectively truncates the
+"long tail" of small segments that would otherwise be created into a
+single level. If you set this too large, it could greatly increase the
+merging cost during indexing (if you flush many small segments).
+Defaults to `1000`.
+
+|index.merge.policy.max_merge_docs |Determines the largest segment
+(measured by document count) that may be merged with other segments.
+Defaults to unbounded.
+|=======================================================================
+
+[float]
+[[scheduling]]
+=== Scheduling
+
+The merge schedule controls the execution of merge operations once they
+are needed (according to the merge policy). The following types are
+supported, with the default being the `ConcurrentMergeScheduler`.
+
+[float]
+==== ConcurrentMergeScheduler
+
+A merge scheduler that runs merges using a separated thread, until the
+maximum number of threads at which when a merge is needed, the thread(s)
+that are updating the index will pause until one or more merges
+completes.
+
+The scheduler supports the following settings:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|index.merge.scheduler.max_thread_count |The maximum number of threads
+to perform the merge operation. Defaults to
+`Math.max(1, Math.min(3, Runtime.getRuntime().availableProcessors() / 2))`.
+|=======================================================================
+
+[float]
+==== SerialMergeScheduler
+
+A merge scheduler that simply does each merge sequentially using the
+calling thread (blocking the operations that triggered the merge, the
+index operation).
diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc
new file mode 100644
index 0000000..ae9f368
--- /dev/null
+++ b/docs/reference/index-modules/similarity.asciidoc
@@ -0,0 +1,140 @@
+[[index-modules-similarity]]
+== Similarity module
+
+A similarity (scoring / ranking model) defines how matching documents
+are scored. Similarity is per field, meaning that via the mapping one
+can define a different similarity per field.
+
+Configuring a custom similarity is considered a expert feature and the
+builtin similarities are most likely sufficient as is described in the
+<<mapping-core-types,mapping section>>
+
+[float]
+[[configuration]]
+=== Configuring a similarity
+
+Most existing or custom Similarities have configuration options which
+can be configured via the index settings as shown below. The index
+options can be provided when creating an index or updating index
+settings.
+
+[source,js]
+--------------------------------------------------
+"similarity" : {
+ "my_similarity" : {
+ "type" : "DFR",
+ "basic_model" : "g",
+ "after_effect" : "l",
+ "normalization" : "h2",
+ "normalization.h2.c" : "3.0"
+ }
+}
+--------------------------------------------------
+
+Here we configure the DFRSimilarity so it can be referenced as
+`my_similarity` in mappings as is illustrate in the below example:
+
+[source,js]
+--------------------------------------------------
+{
+ "book" : {
+ "properties" : {
+ "title" : { "type" : "string", "similarity" : "my_similarity" }
+ }
+}
+--------------------------------------------------
+
+[float]
+=== Available similarities
+
+[float]
+[[default-similarity]]
+==== Default similarity
+
+The default similarity that is based on the TF/IDF model. This
+similarity has the following option:
+
+`discount_overlaps`::
+ Determines whether overlap tokens (Tokens with
+ 0 position increment) are ignored when computing norm. By default this
+ is true, meaning overlap tokens do not count when computing norms.
+
+Type name: `default`
+
+[float]
+[[bm25]]
+==== BM25 similarity
+
+Another TF/IDF based similarity that has built-in tf normalization and
+is supposed to work better for short fields (like names). See
+http://en.wikipedia.org/wiki/Okapi_BM25[Okapi_BM25] for more details.
+This similarity has the following options:
+
+[horizontal]
+`k1`::
+ Controls non-linear term frequency normalization
+ (saturation).
+
+`b`::
+ Controls to what degree document length normalizes tf values.
+
+`discount_overlaps`::
+ Determines whether overlap tokens (Tokens with
+ 0 position increment) are ignored when computing norm. By default this
+ is true, meaning overlap tokens do not count when computing norms.
+
+Type name: `BM25`
+
+[float]
+[[drf]]
+==== DFR similarity
+
+Similarity that implements the
+http://lucene.apache.org/core/4_1_0/core/org/apache/lucene/search/similarities/DFRSimilarity.html[divergence
+from randomness] framework. This similarity has the following options:
+
+[horizontal]
+`basic_model`::
+ Possible values: `be`, `d`, `g`, `if`, `in`, `ine` and `p`.
+
+`after_effect`::
+ Possible values: `no`, `b` and `l`.
+
+`normalization`::
+ Possible values: `no`, `h1`, `h2`, `h3` and `z`.
+
+All options but the first option need a normalization value.
+
+Type name: `DFR`
+
+[float]
+[[ib]]
+==== IB similarity.
+
+http://lucene.apache.org/core/4_1_0/core/org/apache/lucene/search/similarities/IBSimilarity.html[Information
+based model] . This similarity has the following options:
+
+[horizontal]
+`distribution`:: Possible values: `ll` and `spl`.
+`lambda`:: Possible values: `df` and `ttf`.
+`normalization`:: Same as in `DFR` similarity.
+
+Type name: `IB`
+
+[float]
+[[default-base]]
+==== Default and Base Similarities
+
+By default, Elasticsearch will use whatever similarity is configured as
+`default`. However, the similarity functions `queryNorm()` and `coord()`
+are not per-field. Consequently, for expert users wanting to change the
+implementation used for these two methods, while not changing the
+`default`, it is possible to configure a similarity with the name
+`base`. This similarity will then be used for the two methods.
+
+You can change the default similarity for all fields like this:
+
+[source,js]
+--------------------------------------------------
+index.similarity.default.type: BM25
+--------------------------------------------------
diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc
new file mode 100644
index 0000000..00029cf
--- /dev/null
+++ b/docs/reference/index-modules/slowlog.asciidoc
@@ -0,0 +1,87 @@
+[[index-modules-slowlog]]
+== Index Slow Log
+
+[float]
+[[search-slow-log]]
+=== Search Slow Log
+
+Shard level slow search log allows to log slow search (query and fetch
+executions) into a dedicated log file.
+
+Thresholds can be set for both the query phase of the execution, and
+fetch phase, here is a sample:
+
+[source,js]
+--------------------------------------------------
+#index.search.slowlog.threshold.query.warn: 10s
+#index.search.slowlog.threshold.query.info: 5s
+#index.search.slowlog.threshold.query.debug: 2s
+#index.search.slowlog.threshold.query.trace: 500ms
+
+#index.search.slowlog.threshold.fetch.warn: 1s
+#index.search.slowlog.threshold.fetch.info: 800ms
+#index.search.slowlog.threshold.fetch.debug: 500ms
+#index.search.slowlog.threshold.fetch.trace: 200ms
+--------------------------------------------------
+
+By default, none are enabled (set to `-1`). Levels (`warn`, `info`,
+`debug`, `trace`) allow to control under which logging level the log
+will be logged. Not all are required to be configured (for example, only
+`warn` threshold can be set). The benefit of several levels is the
+ability to quickly "grep" for specific thresholds breached.
+
+The logging is done on the shard level scope, meaning the execution of a
+search request within a specific shard. It does not encompass the whole
+search request, which can be broadcast to several shards in order to
+execute. Some of the benefits of shard level logging is the association
+of the actual execution on the specific machine, compared with request
+level.
+
+All settings are index level settings (and each index can have different
+values for it), and can be changed in runtime using the index update
+settings API.
+
+The logging file is configured by default using the following
+configuration (found in `logging.yml`):
+
+[source,js]
+--------------------------------------------------
+index_search_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_search_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+--------------------------------------------------
+
+[float]
+[[index-slow-log]]
+=== Index Slow log
+
+p.The indexing slow log, similar in functionality to the search slow
+log. The log file is ends with `_index_indexing_slowlog.log`. Log and
+the thresholds are configured in the elasticsearch.yml file in the same
+way as the search slowlog. Index slowlog sample:
+
+[source,js]
+--------------------------------------------------
+#index.indexing.slowlog.threshold.index.warn: 10s
+#index.indexing.slowlog.threshold.index.info: 5s
+#index.indexing.slowlog.threshold.index.debug: 2s
+#index.indexing.slowlog.threshold.index.trace: 500ms
+--------------------------------------------------
+
+The index slow log file is configured by default in the `logging.yml`
+file:
+
+[source,js]
+--------------------------------------------------
+index_indexing_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+--------------------------------------------------
diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc
new file mode 100644
index 0000000..8388ee2
--- /dev/null
+++ b/docs/reference/index-modules/store.asciidoc
@@ -0,0 +1,122 @@
+[[index-modules-store]]
+== Store
+
+The store module allows you to control how index data is stored.
+
+The index can either be stored in-memory (no persistence) or on-disk
+(the default). In-memory indices provide better performance at the cost
+of limiting the index size to the amount of available physical memory.
+
+When using a local gateway (the default), file system storage with *no*
+in memory storage is required to maintain index consistency. This is
+required since the local gateway constructs its state from the local
+index state of each node.
+
+Another important aspect of memory based storage is the fact that
+Elasticsearch supports storing the index in memory *outside of the JVM
+heap space* using the "Memory" (see below) storage type. It translates
+to the fact that there is no need for extra large JVM heaps (with their
+own consequences) for storing the index in memory.
+
+
+[float]
+[[store-throttling]]
+=== Store Level Throttling
+
+The way Lucene, the IR library elasticsearch uses under the covers,
+works is by creating immutable segments (up to deletes) and constantly
+merging them (the merge policy settings allow to control how those
+merges happen). The merge process happens in an asynchronous manner
+without affecting the indexing / search speed. The problem though,
+especially on systems with low IO, is that the merge process can be
+expensive and affect search / index operation simply by the fact that
+the box is now taxed with more IO happening.
+
+The store module allows to have throttling configured for merges (or
+all) either on the node level, or on the index level. The node level
+throttling will make sure that out of all the shards allocated on that
+node, the merge process won't pass the specific setting bytes per
+second. It can be set by setting `indices.store.throttle.type` to
+`merge`, and setting `indices.store.throttle.max_bytes_per_sec` to
+something like `5mb`. The node level settings can be changed dynamically
+using the cluster update settings API. The default is set
+to `20mb` with type `merge`.
+
+If specific index level configuration is needed, regardless of the node
+level settings, it can be set as well using the
+`index.store.throttle.type`, and
+`index.store.throttle.max_bytes_per_sec`. The default value for the type
+is `node`, meaning it will throttle based on the node level settings and
+participate in the global throttling happening. Both settings can be set
+using the index update settings API dynamically.
+
+The following sections lists all the different storage types supported.
+
+[float]
+[[file-system]]
+=== File System
+
+File system based storage is the default storage used. There are
+different implementations or storage types. The best one for the
+operating environment will be automatically chosen: `mmapfs` on
+Solaris/Linux/Windows 64bit, `simplefs` on Windows 32bit, and
+`niofs` for the rest.
+
+The following are the different file system based storage types:
+
+[float]
+==== Simple FS
+
+The `simplefs` type is a straightforward implementation of file system
+storage (maps to Lucene `SimpleFsDirectory`) using a random access file.
+This implementation has poor concurrent performance (multiple threads
+will bottleneck). It is usually better to use the `niofs` when you need
+index persistence.
+
+[float]
+==== NIO FS
+
+The `niofs` type stores the shard index on the file system (maps to
+Lucene `NIOFSDirectory`) using NIO. It allows multiple threads to read
+from the same file concurrently. It is not recommended on Windows
+because of a bug in the SUN Java implementation.
+
+[[mmapfs]]
+[float]
+==== MMap FS
+
+The `mmapfs` type stores the shard index on the file system (maps to
+Lucene `MMapDirectory`) by mapping a file into memory (mmap). Memory
+mapping uses up a portion of the virtual memory address space in your
+process equal to the size of the file being mapped. Before using this
+class, be sure your have plenty of virtual address space.
+
+[float]
+[[store-memory]]
+=== Memory
+
+The `memory` type stores the index in main memory with the following
+configuration options:
+
+There are also *node* level settings that control the caching of buffers
+(important when using direct buffers):
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`cache.memory.direct` |Should the memory be allocated outside of the
+JVM heap. Defaults to `true`.
+
+|`cache.memory.small_buffer_size` |The small buffer size, defaults to
+`1kb`.
+
+|`cache.memory.large_buffer_size` |The large buffer size, defaults to
+`1mb`.
+
+|`cache.memory.small_cache_size` |The small buffer cache size, defaults
+to `10mb`.
+
+|`cache.memory.large_cache_size` |The large buffer cache size, defaults
+to `500mb`.
+|=======================================================================
+
diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc
new file mode 100644
index 0000000..e5215fe
--- /dev/null
+++ b/docs/reference/index-modules/translog.asciidoc
@@ -0,0 +1,28 @@
+[[index-modules-translog]]
+== Translog
+
+Each shard has a transaction log or write ahead log associated with it.
+It allows to guarantee that when an index/delete operation occurs, it is
+applied atomically, while not "committing" the internal Lucene index for
+each request. A flush ("commit") still happens based on several
+parameters:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|index.translog.flush_threshold_ops |After how many operations to flush.
+Defaults to `5000`.
+
+|index.translog.flush_threshold_size |Once the translog hits this size,
+a flush will happen. Defaults to `200mb`.
+
+|index.translog.flush_threshold_period |The period with no flush
+happening to force a flush. Defaults to `30m`.
+
+|index.translog.interval |How often to check if a flush is needed, randomized
+between the interval value and 2x the interval value. Defaults to `5s`.
+|=======================================================================
+
+Note: these parameters can be updated at runtime using the Index
+Settings Update API (for example, these number can be increased when
+executing bulk updates to support higher TPS)
diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc
new file mode 100644
index 0000000..1ae7a20
--- /dev/null
+++ b/docs/reference/index.asciidoc
@@ -0,0 +1,35 @@
+[[elasticsearch-reference]]
+= Reference
+
+include::setup.asciidoc[]
+
+include::migration/migrate_1_0.asciidoc[]
+
+include::api-conventions.asciidoc[]
+
+include::docs.asciidoc[]
+
+include::search.asciidoc[]
+
+include::indices.asciidoc[]
+
+include::cat.asciidoc[]
+
+include::cluster.asciidoc[]
+
+include::query-dsl.asciidoc[]
+
+include::mapping.asciidoc[]
+
+include::analysis.asciidoc[]
+
+include::modules.asciidoc[]
+
+include::index-modules.asciidoc[]
+
+include::testing.asciidoc[]
+
+include::glossary.asciidoc[]
+
+
+
diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc
new file mode 100644
index 0000000..70d801b
--- /dev/null
+++ b/docs/reference/indices.asciidoc
@@ -0,0 +1,106 @@
+[[indices]]
+= Indices APIs
+
+[partintro]
+--
+The indices APIs are used to manage individual indices,
+index settings, aliases, mappings, index templates
+and warmers.
+
+[float]
+[[index-management]]
+== Index management:
+
+* <<indices-create-index>>
+* <<indices-delete-index>>
+* <<indices-exists>>
+* <<indices-open-close>>
+
+[float]
+[[mapping-management]]
+== Mapping management:
+
+* <<indices-put-mapping>>
+* <<indices-get-mapping>>
+* <<indices-get-field-mapping>>
+* <<indices-delete-mapping>>
+* <<indices-types-exists>>
+
+[float]
+[[alias-management]]
+== Alias management:
+* <<indices-aliases>>
+
+[float]
+[[index-settings]]
+== Index settings:
+* <<indices-update-settings>>
+* <<indices-get-settings>>
+* <<indices-analyze>>
+* <<indices-templates>>
+* <<indices-warmers>>
+
+[float]
+[[monitoring]]
+== Monitoring:
+* <<indices-status>>
+* <<indices-stats>>
+* <<indices-segments>>
+
+[float]
+[[status-management]]
+== Status management:
+* <<indices-clearcache>>
+* <<indices-refresh>>
+* <<indices-flush>>
+* <<indices-optimize>>
+* <<indices-gateway-snapshot>>
+
+--
+
+include::indices/create-index.asciidoc[]
+
+include::indices/delete-index.asciidoc[]
+
+include::indices/indices-exists.asciidoc[]
+
+include::indices/open-close.asciidoc[]
+
+include::indices/put-mapping.asciidoc[]
+
+include::indices/get-mapping.asciidoc[]
+
+include::indices/get-field-mapping.asciidoc[]
+
+include::indices/types-exists.asciidoc[]
+
+include::indices/delete-mapping.asciidoc[]
+
+include::indices/aliases.asciidoc[]
+
+include::indices/update-settings.asciidoc[]
+
+include::indices/get-settings.asciidoc[]
+
+include::indices/analyze.asciidoc[]
+
+include::indices/templates.asciidoc[]
+
+include::indices/warmers.asciidoc[]
+
+include::indices/status.asciidoc[]
+
+include::indices/stats.asciidoc[]
+
+include::indices/segments.asciidoc[]
+
+include::indices/clearcache.asciidoc[]
+
+include::indices/flush.asciidoc[]
+
+include::indices/refresh.asciidoc[]
+
+include::indices/optimize.asciidoc[]
+
+include::indices/gateway-snapshot.asciidoc[]
+
diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc
new file mode 100644
index 0000000..53f484c
--- /dev/null
+++ b/docs/reference/indices/aliases.asciidoc
@@ -0,0 +1,348 @@
+[[indices-aliases]]
+== Index Aliases
+
+APIs in elasticsearch accept an index name when working against a
+specific index, and several indices when applicable. The index aliases
+API allow to alias an index with a name, with all APIs automatically
+converting the alias name to the actual index name. An alias can also be
+mapped to more than one index, and when specifying it, the alias will
+automatically expand to the aliases indices. An alias can also be
+associated with a filter that will automatically be applied when
+searching, and routing values.
+
+Here is a sample of associating the alias `alias1` with index `test1`:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'http://localhost:9200/_aliases' -d '
+{
+ "actions" : [
+ { "add" : { "index" : "test1", "alias" : "alias1" } }
+ ]
+}'
+--------------------------------------------------
+
+An alias can also be removed, for example:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'http://localhost:9200/_aliases' -d '
+{
+ "actions" : [
+ { "remove" : { "index" : "test1", "alias" : "alias1" } }
+ ]
+}'
+--------------------------------------------------
+
+Renaming an alias is a simple `remove` then `add` operation within the
+same API. This operation is atomic, no need to worry about a short
+period of time where the alias does not point to an index:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'http://localhost:9200/_aliases' -d '
+{
+ "actions" : [
+ { "remove" : { "index" : "test1", "alias" : "alias1" } },
+ { "add" : { "index" : "test1", "alias" : "alias2" } }
+ ]
+}'
+--------------------------------------------------
+
+Associating an alias with more than one index are simply several `add`
+actions:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'http://localhost:9200/_aliases' -d '
+{
+ "actions" : [
+ { "add" : { "index" : "test1", "alias" : "alias1" } },
+ { "add" : { "index" : "test2", "alias" : "alias1" } }
+ ]
+}'
+--------------------------------------------------
+
+It is an error to index to an alias which points to more than one index.
+
+[float]
+[[filtered]]
+=== Filtered Aliases
+
+Aliases with filters provide an easy way to create different "views" of
+the same index. The filter can be defined using Query DSL and is applied
+to all Search, Count, Delete By Query and More Like This operations with
+this alias. Here is an example:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'http://localhost:9200/_aliases' -d '
+{
+ "actions" : [
+ {
+ "add" : {
+ "index" : "test1",
+ "alias" : "alias2",
+ "filter" : { "term" : { "user" : "kimchy" } }
+ }
+ }
+ ]
+}'
+--------------------------------------------------
+
+[float]
+[[aliases-routing]]
+==== Routing
+
+It is possible to associate routing values with aliases. This feature
+can be used together with filtering aliases in order to avoid
+unnecessary shard operations.
+
+The following command creates a new alias `alias1` that points to index
+`test`. After `alias1` is created, all operations with this alias are
+automatically modified to use value `1` for routing:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'http://localhost:9200/_aliases' -d '
+{
+ "actions" : [
+ {
+ "add" : {
+ "index" : "test",
+ "alias" : "alias1",
+ "routing" : "1"
+ }
+ }
+ ]
+}'
+--------------------------------------------------
+
+It's also possible to specify different routing values for searching
+and indexing operations:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'http://localhost:9200/_aliases' -d '
+{
+ "actions" : [
+ {
+ "add" : {
+ "index" : "test",
+ "alias" : "alias2",
+ "search_routing" : "1,2",
+ "index_routing" : "2"
+ }
+ }
+ ]
+}'
+--------------------------------------------------
+
+As shown in the example above, search routing may contain several values
+separated by comma. Index routing can contain only a single value.
+
+If an operation that uses routing alias also has a routing parameter, an
+intersection of both alias routing and routing specified in the
+parameter is used. For example the following command will use "2" as a
+routing value:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/alias2/_search?q=user:kimchy&routing=2,3'
+--------------------------------------------------
+
+[float]
+[[alias-adding]]
+=== Add a single alias
+
+An alias can also be added with the endpoint
+
+`PUT /{index}/_alias/{name}`
+
+
+where
+
+[horizontal]
+`index`:: The index to alias refers to. Can be any of `blank | * | _all | glob pattern | name1, name2, …`
+`name`:: The name of the alias. This is a required option.
+`routing`:: An optional routing that can be associated with an alias.
+`filter`:: An optional filter that can be associated with an alias.
+
+You can also use the plural `_aliases`.
+
+[float]
+==== Examples:
+
+Adding time based alias:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'localhost:9200/logs_201305/_alias/2013'
+--------------------------------------------------
+
+Adding user alias:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'localhost:9200/users/_alias/user_12' -d '{
+ "routing" : "12",
+ "filter" : {
+ "term" : {
+ "user_id" : 12
+ }
+ }
+}'
+--------------------------------------------------
+
+[float]
+[[deleting]]
+=== Delete aliases
+
+
+The rest endpoint is: `/{index}/_alias/{name}`
+
+where
+
+[horizontal]
+`index`:: `* | _all | glob pattern | name1, name2, …`
+`name`:: `* | _all | glob pattern | name1, name2, …`
+
+Alternatively you can use the plural `_aliases`. Example:
+
+[source,js]
+--------------------------------------------------
+curl -XDELETE 'localhost:9200/users/_alias/user_12'
+--------------------------------------------------
+
+[float]
+[[alias-retrieving]]
+=== Retrieving existing aliases
+
+The get index alias api allows to filter by
+alias name and index name. This api redirects to the master and fetches
+the requested index aliases, if available. This api only serialises the
+found index aliases.
+
+Possible options:
+[horizontal]
+`index`::
+
+ The index name to get aliases for. Partially names are
+ supported via wildcards, also multiple index names can be specified
+ separated with a comma. Also the alias name for an index can be used.
+
+`alias`::
+ The name of alias to return in the response. Like the index
+ option, this option supports wildcards and the option the specify
+ multiple alias names separated by a comma.
+
+`ignore_unavailable`::
+ What to do is an specified index name doesn't
+ exist. If set to `true` then those indices are ignored.
+
+The rest endpoint is: `/{index}/_alias/{alias}`.
+
+[float]
+==== Examples:
+
+All aliases for the index users:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/users/_alias/*'
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+ {
+ "users" : {
+ "aliases" : {
+ "user_13" : {
+ "filter" : {
+ "term" : {
+ "user_id" : 13
+ }
+ },
+ "index_routing" : "13",
+ "search_routing" : "13"
+ },
+ "user_14" : {
+ "filter" : {
+ "term" : {
+ "user_id" : 14
+ }
+ },
+ "index_routing" : "14",
+ "search_routing" : "14"
+ },
+ "user_12" : {
+ "filter" : {
+ "term" : {
+ "user_id" : 12
+ }
+ },
+ "index_routing" : "12",
+ "search_routing" : "12"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+All aliases with the name 2013 in any index:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/_alias/2013'
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "logs_201304" : {
+ "aliases" : {
+ "2013" : { }
+ }
+ },
+ "logs_201305" : {
+ "aliases" : {
+ "2013" : { }
+ }
+ }
+}
+--------------------------------------------------
+
+All aliases that start with 2013_01 in any index:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/_alias/2013_01*'
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "logs_20130101" : {
+ "aliases" : {
+ "2013_01" : { }
+ }
+ }
+}
+--------------------------------------------------
+
+There is also a HEAD variant of the get indices aliases api to check if
+index aliases exist. The indices aliases exists api supports the same
+option as the get indices aliases api. Examples:
+
+[source,js]
+--------------------------------------------------
+curl -XHEAD 'localhost:9200/_alias/2013'
+curl -XHEAD 'localhost:9200/_alias/2013_01*'
+curl -XHEAD 'localhost:9200/users/_alias/*'
+--------------------------------------------------
diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc
new file mode 100644
index 0000000..a9712d6
--- /dev/null
+++ b/docs/reference/indices/analyze.asciidoc
@@ -0,0 +1,50 @@
+[[indices-analyze]]
+== Analyze
+
+Performs the analysis process on a text and return the tokens breakdown
+of the text.
+
+Can be used without specifying an index against one of the many built in
+analyzers:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/_analyze?analyzer=standard' -d 'this is a test'
+--------------------------------------------------
+
+Or by building a custom transient analyzer out of tokenizers and
+filters:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filters=lowercase' -d 'this is a test'
+--------------------------------------------------
+
+It can also run against a specific index:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/test/_analyze?text=this+is+a+test'
+--------------------------------------------------
+
+The above will run an analysis on the "this is a test" text, using the
+default index analyzer associated with the `test` index. An `analyzer`
+can also be provided to use a different analyzer:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/test/_analyze?analyzer=whitespace' -d 'this is a test'
+--------------------------------------------------
+
+Also, the analyzer can be derived based on a field mapping, for example:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/test/_analyze?field=obj1.field1' -d 'this is a test'
+--------------------------------------------------
+
+Will cause the analysis to happen based on the analyzer configure in the
+mapping for `obj1.field1` (and if not, the default index analyzer).
+
+Also, the text can be provided as part of the request body, and not as a
+parameter.
diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc
new file mode 100644
index 0000000..aed2470
--- /dev/null
+++ b/docs/reference/indices/clearcache.asciidoc
@@ -0,0 +1,34 @@
+[[indices-clearcache]]
+== Clear Cache
+
+The clear cache API allows to clear either all caches or specific cached
+associated with one ore more indices.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/twitter/_cache/clear'
+--------------------------------------------------
+
+The API, by default, will clear all caches. Specific caches can be
+cleaned explicitly by setting `filter`, `field_data` or `id_cache` to
+`true`.
+
+All caches relating to a specific field(s) can also be cleared by
+specifying `fields` parameter with a comma delimited list of the
+relevant fields.
+
+[float]
+=== Multi Index
+
+The clear cache API can be applied to more than one index with a single
+call, or even on `_all` the indices.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_cache/clear'
+
+$ curl -XPOST 'http://localhost:9200/_cache/clear'
+--------------------------------------------------
+
+NOTE: The `filter` cache is not cleared immediately but is scheduled to be
+cleared within 60 seconds.
diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc
new file mode 100644
index 0000000..0b069d3
--- /dev/null
+++ b/docs/reference/indices/create-index.asciidoc
@@ -0,0 +1,106 @@
+[[indices-create-index]]
+== Create Index
+
+The create index API allows to instantiate an index. Elasticsearch
+provides support for multiple indices, including executing operations
+across several indices.
+
+[float]
+[[create-index-settings]]
+=== Index Settings
+
+Each index created can have specific settings
+associated with it.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/twitter/'
+
+$ curl -XPUT 'http://localhost:9200/twitter/' -d '
+index :
+ number_of_shards : 3
+ number_of_replicas : 2
+'
+--------------------------------------------------
+
+The above second curl example shows how an index called `twitter` can be
+created with specific settings for it using http://www.yaml.org[YAML].
+In this case, creating an index with 3 shards, each with 2 replicas. The
+index settings can also be defined with http://www.json.org[JSON]:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/twitter/' -d '{
+ "settings" : {
+ "index" : {
+ "number_of_shards" : 3,
+ "number_of_replicas" : 2
+ }
+ }
+}'
+--------------------------------------------------
+
+or more simplified
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/twitter/' -d '{
+ "settings" : {
+ "number_of_shards" : 3,
+ "number_of_replicas" : 2
+ }
+}'
+--------------------------------------------------
+
+[NOTE]
+You do not have to explicitly specify `index` section inside the
+`settings` section.
+
+For more information regarding all the different index level settings
+that can be set when creating an index, please check the
+<<index-modules,index modules>> section.
+
+
+[float]
+[[mappings]]
+=== Mappings
+
+The create index API allows to provide a set of one or more mappings:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST localhost:9200/test -d '{
+ "settings" : {
+ "number_of_shards" : 1
+ },
+ "mappings" : {
+ "type1" : {
+ "_source" : { "enabled" : false },
+ "properties" : {
+ "field1" : { "type" : "string", "index" : "not_analyzed" }
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+[float]
+[[warmers]]
+=== Warmers
+
+The create index API allows also to provide a set of <<indices-warmers,warmers>>:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test -d '{
+ "warmers" : {
+ "warmer_1" : {
+ "source" : {
+ "query" : {
+ ...
+ }
+ }
+ }
+ }
+}'
+--------------------------------------------------
diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc
new file mode 100644
index 0000000..25d1766
--- /dev/null
+++ b/docs/reference/indices/delete-index.asciidoc
@@ -0,0 +1,19 @@
+[[indices-delete-index]]
+== Delete Index
+
+The delete index API allows to delete an existing index.
+
+[source,js]
+--------------------------------------------------
+$ curl -XDELETE 'http://localhost:9200/twitter/'
+--------------------------------------------------
+
+The above example deletes an index called `twitter`. Specifying an index,
+alias or wildcard expression is required.
+
+The delete index API can also be applied to more than one index, or on
+all indices (be careful!) by using `_all` or `*` as index.
+
+In order to disable allowing to delete indices via wildcards or `_all`,
+set `action.destructive_requires_name` setting in the config to `true`.
+This setting can also be changed via the cluster update settings api. \ No newline at end of file
diff --git a/docs/reference/indices/delete-mapping.asciidoc b/docs/reference/indices/delete-mapping.asciidoc
new file mode 100644
index 0000000..9b72d9c
--- /dev/null
+++ b/docs/reference/indices/delete-mapping.asciidoc
@@ -0,0 +1,25 @@
+[[indices-delete-mapping]]
+== Delete Mapping
+
+Allow to delete a mapping (type) along with its data. The REST endpoints are
+
+[source,js]
+--------------------------------------------------
+
+[DELETE] /{index}/{type}
+
+[DELETE] /{index}/{type}/_mapping
+
+[DELETE] /{index}/_mapping/{type}
+
+--------------------------------------------------
+
+where
+
+[horizontal]
+
+`index`:: `* | _all | glob pattern | name1, name2, …`
+`type`:: `* | _all | glob pattern | name1, name2, …`
+
+Note, most times, it make more sense to reindex the data into a fresh
+index compared to delete large chunks of it.
diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc
new file mode 100644
index 0000000..75b935a
--- /dev/null
+++ b/docs/reference/indices/flush.asciidoc
@@ -0,0 +1,27 @@
+[[indices-flush]]
+== Flush
+
+The flush API allows to flush one or more indices through an API. The
+flush process of an index basically frees memory from the index by
+flushing data to the index storage and clearing the internal
+<<index-modules-translog,transaction log>>. By
+default, Elasticsearch uses memory heuristics in order to automatically
+trigger flush operations as required in order to clear memory.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/twitter/_flush'
+--------------------------------------------------
+
+[float]
+=== Multi Index
+
+The flush API can be applied to more than one index with a single call,
+or even on `_all` the indices.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_flush'
+
+$ curl -XPOST 'http://localhost:9200/_flush'
+--------------------------------------------------
diff --git a/docs/reference/indices/gateway-snapshot.asciidoc b/docs/reference/indices/gateway-snapshot.asciidoc
new file mode 100644
index 0000000..188d1aa
--- /dev/null
+++ b/docs/reference/indices/gateway-snapshot.asciidoc
@@ -0,0 +1,29 @@
+[[indices-gateway-snapshot]]
+== Gateway Snapshot
+
+The gateway snapshot API allows to explicitly perform a snapshot through
+the gateway of one or more indices (backup them). By default, each index
+gateway periodically snapshot changes, though it can be disabled and be
+controlled completely through this API.
+
+Note, this API only applies when using shared storage gateway
+implementation, and does not apply when using the (default) local
+gateway.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/twitter/_gateway/snapshot'
+--------------------------------------------------
+
+[float]
+=== Multi Index
+
+The gateway snapshot API can be applied to more than one index with a
+single call, or even on `_all` the indices.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_gateway/snapshot'
+
+$ curl -XPOST 'http://localhost:9200/_gateway/snapshot'
+--------------------------------------------------
diff --git a/docs/reference/indices/get-field-mapping.asciidoc b/docs/reference/indices/get-field-mapping.asciidoc
new file mode 100644
index 0000000..0633744
--- /dev/null
+++ b/docs/reference/indices/get-field-mapping.asciidoc
@@ -0,0 +1,140 @@
+[[indices-get-field-mapping]]
+== Get Field Mapping
+
+The get field mapping API allows you to retrieve mapping definitions for one or more fields.
+This is useful when you do not need the complete type mapping returned by
+the <<indices-get-mapping>> API.
+
+The following returns the mapping of the field `text` only:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/_mapping/field/text'
+--------------------------------------------------
+
+For which the response is (assuming `text` is a default string field):
+
+[source,js]
+--------------------------------------------------
+{
+ "twitter": {
+ "tweet": {
+ "text": {
+ "full_name": "text",
+ "mapping": {
+ "text": { "type": "string" }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+
+
+[float]
+=== Multiple Indices, Types and Fields
+
+The get field mapping API can be used to get the mapping of multiple fields from more than one index or type
+with a single call. General usage of the API follows the
+following syntax: `host:port/{index}/{type}/_mapping/field/{field}` where
+`{index}`, `{type}` and `{field}` can stand for comma-separated list of names or wild cards. To
+get mappings for all indices you can use `_all` for `{index}`. The
+following are some examples:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter,kimchy/_mapping/field/message'
+
+curl -XGET 'http://localhost:9200/_all/tweet,book/_mapping/field/message,user.id'
+
+curl -XGET 'http://localhost:9200/_all/tw*/_mapping/field/*.id'
+--------------------------------------------------
+
+[float]
+=== Specifying fields
+
+The get mapping api allows you to specify one or more fields separated with by a comma.
+You can also use wildcards. The field names can be any of the following:
+
+[horizontal]
+Full names:: the full path, including any parent object name the field is
+ part of (ex. `user.id`).
+Index names:: the name of the lucene field (can be different than the
+ field name if the `index_name` option of the mapping is used).
+Field names:: the name of the field without the path to it (ex. `id` for `{ "user" : { "id" : 1 } }`).
+
+The above options are specified in the order the `field` parameter is resolved.
+The first field found which matches is returned. This is especially important
+if index names or field names are used as those can be ambiguous.
+
+For example, consider the following mapping:
+
+[source,js]
+--------------------------------------------------
+ {
+ "article": {
+ "properties": {
+ "id": { "type": "string" },
+ "title": { "type": "string", "index_name": "text" },
+ "abstract": { "type": "string", "index_name": "text" },
+ "author": {
+ "properties": {
+ "id": { "type": "string" },
+ "name": { "type": "string", "index_name": "author" }
+ }
+ }
+ }
+ }
+ }
+--------------------------------------------------
+
+To select the `id` of the `author` field, you can use its full name `author.id`. Using `text` will return
+the mapping of `abstract` as it is one of the fields which map to the Lucene field `text`. `name` will return
+the field `author.name`:
+
+[source,js]
+--------------------------------------------------
+curl -XGET "http://localhost:9200/publications/article/_mapping/field/author.id,text,name"
+--------------------------------------------------
+
+returns:
+
+[source,js]
+--------------------------------------------------
+{
+ "publications": {
+ "article": {
+ "text": {
+ "full_name": "abstract",
+ "mapping": {
+ "abstract": { "type": "string", "index_name": "text" }
+ }
+ },
+ "author.id": {
+ "full_name": "author.id",
+ "mapping": {
+ "id": { "type": "string" }
+ }
+ },
+ "name": {
+ "full_name": "author.name",
+ "mapping": {
+ "name": { "type": "string", "index_name": "author" }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Note how the response always use the same fields specified in the request as keys.
+The `full_name` in every entry contains the full name of the field whose mapping were returned.
+This is useful when the request can refer to to multiple fields (like `text` above).
+
+[float]
+=== Other options
+
+[horizontal]
+include_defaults:: adding `include_defaults=true` to the query string will cause the response to
+include default values, which are normally suppressed.
diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc
new file mode 100644
index 0000000..a64b14d
--- /dev/null
+++ b/docs/reference/indices/get-mapping.asciidoc
@@ -0,0 +1,37 @@
+[[indices-get-mapping]]
+== Get Mapping
+
+The get mapping API allows to retrieve mapping definition of index or
+index/type.
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/_mapping'
+--------------------------------------------------
+
+[float]
+=== Multiple Indices and Types
+
+The get mapping API can be used to get more than one index or type
+mapping with a single call. General usage of the API follows the
+following syntax: `host:port/{index}/{type}/_mapping` where both
+`{index}` and `{type}` can stand for comma-separated list of names. To
+get mappings for all indices you can use `_all` for `{index}`. The
+following are some examples:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter,kimchy/_mapping'
+
+curl -XGET 'http://localhost:9200/_all/tweet,book/_mapping'
+--------------------------------------------------
+
+If you want to get mappings of all indices and types then the following
+two examples are equivalent:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/_all/_mapping'
+
+curl -XGET 'http://localhost:9200/_mapping'
+--------------------------------------------------
diff --git a/docs/reference/indices/get-settings.asciidoc b/docs/reference/indices/get-settings.asciidoc
new file mode 100644
index 0000000..a5950c2
--- /dev/null
+++ b/docs/reference/indices/get-settings.asciidoc
@@ -0,0 +1,50 @@
+[[indices-get-settings]]
+== Get Settings
+
+The get settings API allows to retrieve settings of index/indices:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/twitter/_settings'
+--------------------------------------------------
+
+[float]
+=== Multiple Indices and Types
+
+The get settings API can be used to get settings for more than one index
+with a single call. General usage of the API follows the
+following syntax: `host:port/{index}/_settings` where
+`{index}` can stand for comma-separated list of index names and aliases. To
+get settings for all indices you can use `_all` for `{index}`.
+Wildcard expressions are also supported. The following are some examples:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter,kimchy/_settings'
+
+curl -XGET 'http://localhost:9200/_all/_settings'
+
+curl -XGET 'http://localhost:9200/2013-*/_settings'
+--------------------------------------------------
+
+[float]
+=== Prefix option
+
+There is also support for a `prefix` query string option
+that allows to include only settings matches the specified prefix.
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/my-index/_settings?prefix=index.'
+
+curl -XGET 'http://localhost:9200/_all/_settings?prefix=index.routing.allocation.'
+
+curl -XGET 'http://localhost:9200/2013-*/_settings?name=index.merge.*'
+
+curl -XGET 'http://localhost:9200/2013-*/_settings/index.merge.*'
+--------------------------------------------------
+
+The first example returns all index settings the start with `index.` in the index `my-index`,
+the second example gets all index settings that start with `index.routing.allocation.` for
+all indices, lastly the third example returns all index settings that start with `index.merge.`
+in indices that start with `2013-`.
diff --git a/docs/reference/indices/indices-exists.asciidoc b/docs/reference/indices/indices-exists.asciidoc
new file mode 100644
index 0000000..422dcba
--- /dev/null
+++ b/docs/reference/indices/indices-exists.asciidoc
@@ -0,0 +1,12 @@
+[[indices-exists]]
+== Indices Exists
+
+Used to check if the index (indices) exists or not. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XHEAD 'http://localhost:9200/twitter'
+--------------------------------------------------
+
+The HTTP status code indicates if the index exists or not. A `404` means
+it does not exist, and `200` means it does.
diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc
new file mode 100644
index 0000000..29259d2
--- /dev/null
+++ b/docs/reference/indices/open-close.asciidoc
@@ -0,0 +1,29 @@
+[[indices-open-close]]
+== Open / Close Index API
+
+The open and close index APIs allow to close an index, and later on
+opening it. A closed index has almost no overhead on the cluster (except
+for maintaining its metadata), and is blocked for read/write operations.
+A closed index can be opened which will then go through the normal
+recovery process.
+
+The REST endpoint is `/{index}/_close` and `/{index}/_open`. For
+example:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/my_index/_close'
+
+curl -XPOST 'localhost:9200/my_index/_open'
+--------------------------------------------------
+
+It is possible to open and close multiple indices. An error will be thrown
+if the request explicitly refers to a missing index. This behaviour can be
+disabled using the `ignore_unavailable=true` parameter.
+
+All indices can be opened or closed at once using `_all` as the index name
+or specifying patterns that identify them all (e.g. `*`).
+
+Identifying indices via wildcards or `_all` can be disabled by setting the
+`action.destructive_requires_name` flag in the config file to `true`.
+This setting can also be changed via the cluster update settings api. \ No newline at end of file
diff --git a/docs/reference/indices/optimize.asciidoc b/docs/reference/indices/optimize.asciidoc
new file mode 100644
index 0000000..d8ebc8b
--- /dev/null
+++ b/docs/reference/indices/optimize.asciidoc
@@ -0,0 +1,51 @@
+[[indices-optimize]]
+== Optimize
+
+The optimize API allows to optimize one or more indices through an API.
+The optimize process basically optimizes the index for faster search
+operations (and relates to the number of segments a Lucene index holds
+within each shard). The optimize operation allows to reduce the number
+of segments by merging them.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/twitter/_optimize'
+--------------------------------------------------
+
+[float]
+[[optimize-parameters]]
+=== Request Parameters
+
+The optimize API accepts the following request parameters:
+
+[horizontal]
+`max_num_segments`:: The number of segments to optimize to. To fully
+optimize the index, set it to `1`. Defaults to simply checking if a
+merge needs to execute, and if so, executes it.
+
+`only_expunge_deletes`:: Should the optimize process only expunge segments
+with deletes in it. In Lucene, a document is not deleted from a segment,
+just marked as deleted. During a merge process of segments, a new
+segment is created that does not have those deletes. This flag allow to
+only merge segments that have deletes. Defaults to `false`.
+
+`flush`:: Should a flush be performed after the optimize. Defaults to
+`true`.
+
+`wait_for_merge`:: Should the request wait for the merge to end. Defaults
+to `true`. Note, a merge can potentially be a very heavy operation, so
+it might make sense to run it set to `false`.
+
+[float]
+[[optimize-multi-index]]
+=== Multi Index
+
+The optimize API can be applied to more than one index with a single
+call, or even on `_all` the indices.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_optimize'
+
+$ curl -XPOST 'http://localhost:9200/_optimize'
+--------------------------------------------------
diff --git a/docs/reference/indices/put-mapping.asciidoc b/docs/reference/indices/put-mapping.asciidoc
new file mode 100644
index 0000000..7748625
--- /dev/null
+++ b/docs/reference/indices/put-mapping.asciidoc
@@ -0,0 +1,83 @@
+[[indices-put-mapping]]
+== Put Mapping
+
+The put mapping API allows to register specific mapping definition for a
+specific type.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/twitter/tweet/_mapping' -d '
+{
+ "tweet" : {
+ "properties" : {
+ "message" : {"type" : "string", "store" : true }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+The above example creates a mapping called `tweet` within the `twitter`
+index. The mapping simply defines that the `message` field should be
+stored (by default, fields are not stored, just indexed) so we can
+retrieve it later on using selective loading.
+
+More information on how to define type mappings can be found in the
+<<mapping,mapping>> section.
+
+[float]
+[[merging-conflicts]]
+=== Merging & Conflicts
+
+When an existing mapping already exists under the given type, the two
+mapping definitions, the one already defined, and the new ones are
+merged. The `ignore_conflicts` parameters can be used to control if
+conflicts should be ignored or not, by default, it is set to `false`
+which means conflicts are *not* ignored.
+
+The definition of conflict is really dependent on the type merged, but
+in general, if a different core type is defined, it is considered as a
+conflict. New mapping definitions can be added to object types, and core
+type mapping can be upgraded by specifying multi fields on a core type.
+
+[float]
+[[put-mapping-multi-index]]
+=== Multi Index
+
+The put mapping API can be applied to more than one index with a single
+call, or even on `_all` the indices.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/kimchy,elasticsearch/tweet/_mapping' -d '
+{
+ "tweet" : {
+ "properties" : {
+ "message" : {"type" : "string", "store" : true }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+All options:
+
+[source,js]
+--------------------------------------------------
+
+PUT /{index}/_mapping/{type}
+
+
+--------------------------------------------------
+
+
+where
+
+[horizontal]
+`{index}`:: `blank | * | _all | glob pattern | name1, name2, …`
+
+`{type}`:: Name of the type to add. Must be the name of the type defined in the body.
+
+
+Instead of `_mapping` you can also use the plural `_mappings`.
+The uri `PUT /{index}/{type}/_mapping` is still supported for backwardscompatibility.
diff --git a/docs/reference/indices/refresh.asciidoc b/docs/reference/indices/refresh.asciidoc
new file mode 100644
index 0000000..bbc1f20
--- /dev/null
+++ b/docs/reference/indices/refresh.asciidoc
@@ -0,0 +1,26 @@
+[[indices-refresh]]
+== Refresh
+
+The refresh API allows to explicitly refresh one or more index, making
+all operations performed since the last refresh available for search.
+The (near) real-time capabilities depend on the index engine used. For
+example, the internal one requires refresh to be called, but by default a
+refresh is scheduled periodically.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/twitter/_refresh'
+--------------------------------------------------
+
+[float]
+=== Multi Index
+
+The refresh API can be applied to more than one index with a single
+call, or even on `_all` the indices.
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_refresh'
+
+$ curl -XPOST 'http://localhost:9200/_refresh'
+--------------------------------------------------
diff --git a/docs/reference/indices/segments.asciidoc b/docs/reference/indices/segments.asciidoc
new file mode 100644
index 0000000..bc4a7ff
--- /dev/null
+++ b/docs/reference/indices/segments.asciidoc
@@ -0,0 +1,76 @@
+[[indices-segments]]
+== Indices Segments
+
+Provide low level segments information that a Lucene index (shard level)
+is built with. Allows to be used to provide more information on the
+state of a shard and an index, possibly optimization information, data
+"wasted" on deletes, and so on.
+
+Endpoints include segments for a specific index, several indices, or
+all:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/test/_segments'
+curl -XGET 'http://localhost:9200/test1,test2/_segments'
+curl -XGET 'http://localhost:9200/_segments'
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+ "_3": {
+ "generation": 3,
+ "num_docs": 1121,
+ "deleted_docs": 53,
+ "size_in_bytes": 228288,
+ "memory_in_bytes": 3211,
+ "committed": true,
+ "search": true,
+ "version": "4.6",
+ "compound": true
+ }
+ ...
+}
+--------------------------------------------------
+
+_0:: The key of the JSON document is the name of the segment. This name
+ is used to generate file names: all files starting with this
+ segment name in the directory of the shard belong to this segment.
+
+generation:: A generation number that is basically incremented when needing to
+ write a new segment. The segment name is derived from this
+ generation number.
+
+num_docs:: The number of non-deleted documents that are stored in this segment.
+
+deleted_docs:: The number of deleted documents that are stored in this segment.
+ It is perfectly fine if this number is greater than 0, space is
+ going to be reclaimed when this segment gets merged.
+
+size_in_bytes:: The amount of disk space that this segment uses, in bytes.
+
+memory_in_bytes:: Segments need to store some data into memory in order to be
+ searchable efficiently. This number returns the number of bytes
+ that are used for that purpose. A value of -1 indicates that
+ Elasticsearch was not able to compute this number.
+
+committed:: Whether the segment has been sync'ed on disk. Segments that are
+ committed would survive a hard reboot. No need to worry in case
+ of false, the data from uncommitted segments is also stored in
+ the transaction log so that Elasticsearch is able to replay
+ changes on the next start.
+
+search:: Whether the segment is searchable. A value of false would most
+ likely mean that the segment has been written to disk but no
+ refresh occurred since then to make it searchable.
+
+version:: The version of Lucene that has been used to write this segment.
+
+compound:: Whether the segment is stored in a compound file. When true, this
+ means that Lucene merged all files from the segment in a single
+ one in order to save file descriptors.
+
diff --git a/docs/reference/indices/stats.asciidoc b/docs/reference/indices/stats.asciidoc
new file mode 100644
index 0000000..b0dd311
--- /dev/null
+++ b/docs/reference/indices/stats.asciidoc
@@ -0,0 +1,76 @@
+[[indices-stats]]
+== Indices Stats
+
+Indices level stats provide statistics on different operations happening
+on an index. The API provides statistics on the index level scope
+(though most stats can also be retrieved using node level scope).
+
+The following returns high level aggregation and index level stats for
+all indices:
+
+[source,js]
+--------------------------------------------------
+curl localhost:9200/_stats
+--------------------------------------------------
+
+Specific index stats can be retrieved using:
+
+[source,js]
+--------------------------------------------------
+curl localhost:9200/index1,index2/_stats
+--------------------------------------------------
+
+By default, all stats are returned, returning only specific stats can be
+specified as well in the URI. Those stats can be any of:
+
+[horizontal]
+`docs`:: The number of docs / deleted docs (docs not yet merged out).
+ Note, affected by refreshing the index.
+
+`store`:: The size of the index.
+
+`indexing`:: Indexing statistics, can be combined with a comma
+ separated list of `types` to provide document type level stats.
+
+`get`:: Get statistics, including missing stats.
+
+`search`:: Search statistics. You can include statistics for custom groups by adding
+ an extra `groups` parameter (search operations can be associated with one or more
+ groups). The `groups` parameter accepts a comma separated list of group names.
+ Use `_all` to return statistics for all groups.
+
+`warmer`:: Warmer statistics.
+`merge`:: Merge statistics.
+`fielddata`:: Fielddata statistics.
+`flush`:: Flush statistics.
+`completion`:: Completion suggest statistics.
+`refresh`:: Refresh statistics.
+
+Some statistics allow per field granularity which accepts a list comma-separated list of included fields. By default all fields are included:
+
+[horizontal]
+`fields`:: List of fields to be included in the statistics. This is used as the default list unless a more specific field list is provided (see below).
+`completion_fields`:: List of fields to be included in the Completion Suggest statistics
+`fielddata_fields`:: List of fields to be included in the Fielddata statistics
+
+Here are some samples:
+
+[source,js]
+--------------------------------------------------
+# Get back stats for merge and refresh only for all indices
+curl 'localhost:9200/_stats/merge,refresh'
+# Get back stats for type1 and type2 documents for the my_index index
+curl 'localhost:9200/my_index/_stats/indexing?types=type1,type2
+# Get back just search stats for group1 and group2
+curl 'localhost:9200/_stats/search?groups=group1,group2
+--------------------------------------------------
+
+The stats returned are aggregated on the index level, with
+`primaries` and `total` aggregations. In order to get back shard level
+stats, set the `level` parameter to `shards`.
+
+Note, as shards move around the cluster, their stats will be cleared as
+they are created on other nodes. On the other hand, even though a shard
+"left" a node, that node will still retain the stats that shard
+contributed to.
+
diff --git a/docs/reference/indices/status.asciidoc b/docs/reference/indices/status.asciidoc
new file mode 100644
index 0000000..c454b0f
--- /dev/null
+++ b/docs/reference/indices/status.asciidoc
@@ -0,0 +1,27 @@
+[[indices-status]]
+== Status
+
+The indices status API allows to get a comprehensive status information
+of one or more indices.
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/_status'
+--------------------------------------------------
+
+In order to see the recovery status of shards, pass `recovery` flag and
+set it to `true`. For snapshot status, pass the `snapshot` flag and set
+it to `true`.
+
+[float]
+=== Multi Index
+
+The status API can be applied to more than one index with a single call,
+or even on `_all` the indices.
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/kimchy,elasticsearch/_status'
+
+curl -XGET 'http://localhost:9200/_status'
+--------------------------------------------------
diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc
new file mode 100644
index 0000000..ce1c33c
--- /dev/null
+++ b/docs/reference/indices/templates.asciidoc
@@ -0,0 +1,155 @@
+[[indices-templates]]
+== Index Templates
+
+Index templates allow to define templates that will automatically be
+applied to new indices created. The templates include both settings and
+mappings, and a simple pattern template that controls if the template
+will be applied to the index created. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/_template/template_1 -d '
+{
+ "template" : "te*",
+ "settings" : {
+ "number_of_shards" : 1
+ },
+ "mappings" : {
+ "type1" : {
+ "_source" : { "enabled" : false }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+Defines a template named template_1, with a template pattern of `te*`.
+The settings and mappings will be applied to any index name that matches
+the `te*` template.
+
+[float]
+[[delete]]
+=== Deleting a Template
+
+Index templates are identified by a name (in the above case
+`template_1`) and can be delete as well:
+
+[source,js]
+--------------------------------------------------
+curl -XDELETE localhost:9200/_template/template_1
+--------------------------------------------------
+
+[float]
+[[getting]]
+=== GETting templates
+
+Index templates are identified by a name (in the above case
+`template_1`) and can be retrieved using the following:
+
+[source,js]
+--------------------------------------------------
+curl -XGET localhost:9200/_template/template_1
+--------------------------------------------------
+
+You can also match several templates by using wildcards like:
+
+[source,js]
+--------------------------------------------------
+curl -XGET localhost:9200/_template/temp*
+curl -XGET localhost:9200/_template/template_1,template_2
+--------------------------------------------------
+
+To get list of all index templates you can use
+<<cluster-state,Cluster State>> API
+and check for the metadata/templates section of the response.
+
+Or run:
+
+[source,js]
+--------------------------------------------------
+curl -XGET localhost:9200/_template/
+--------------------------------------------------
+
+
+[float]
+[[multiple-templates]]
+=== Multiple Template Matching
+
+Multiple index templates can potentially match an index, in this case,
+both the settings and mappings are merged into the final configuration
+of the index. The order of the merging can be controlled using the
+`order` parameter, with lower order being applied first, and higher
+orders overriding them. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/_template/template_1 -d '
+{
+ "template" : "*",
+ "order" : 0,
+ "settings" : {
+ "number_of_shards" : 1
+ },
+ "mappings" : {
+ "type1" : {
+ "_source" : { "enabled" : false }
+ }
+ }
+}
+'
+
+curl -XPUT localhost:9200/_template/template_2 -d '
+{
+ "template" : "te*",
+ "order" : 1,
+ "settings" : {
+ "number_of_shards" : 1
+ },
+ "mappings" : {
+ "type1" : {
+ "_source" : { "enabled" : true }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+The above will disable storing the `_source` on all `type1` types, but
+for indices of that start with `te*`, source will still be enabled.
+Note, for mappings, the merging is "deep", meaning that specific
+object/property based mappings can easily be added/overridden on higher
+order templates, with lower order templates providing the basis.
+
+[float]
+[[config]]
+=== Config
+
+Index templates can also be placed within the config location
+(`path.conf`) under the `templates` directory (note, make sure to place
+them on all master eligible nodes). For example, a file called
+`template_1.json` can be placed under `config/templates` and it will be
+added if it matches an index. Here is a sample of the mentioned file:
+
+[source,js]
+--------------------------------------------------
+{
+ "template_1" : {
+ "template" : "*",
+ "settings" : {
+ "index.number_of_shards" : 2
+ },
+ "mappings" : {
+ "_default_" : {
+ "_source" : {
+ "enabled" : false
+ }
+ },
+ "type1" : {
+ "_all" : {
+ "enabled" : false
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/indices/types-exists.asciidoc b/docs/reference/indices/types-exists.asciidoc
new file mode 100644
index 0000000..87b0aa3
--- /dev/null
+++ b/docs/reference/indices/types-exists.asciidoc
@@ -0,0 +1,12 @@
+[[indices-types-exists]]
+== Types Exists
+
+Used to check if a type/types exists in an index/indices.
+
+[source,js]
+--------------------------------------------------
+curl -XHEAD 'http://localhost:9200/twitter/tweet'
+--------------------------------------------------
+
+The HTTP status code indicates if the type exists or not. A `404` means
+it does not exist, and `200` means it does.
diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc
new file mode 100644
index 0000000..19af576
--- /dev/null
+++ b/docs/reference/indices/update-settings.asciidoc
@@ -0,0 +1,226 @@
+[[indices-update-settings]]
+== Update Indices Settings
+
+Change specific index level settings in real time.
+
+The REST endpoint is `/_settings` (to update all indices) or
+`{index}/_settings` to update one (or more) indices settings. The body
+of the request includes the updated settings, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "index" : {
+ "number_of_replicas" : 4
+ } }
+--------------------------------------------------
+
+The above will change the number of replicas to 4 from the current
+number of replicas. Here is a curl example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'localhost:9200/my_index/_settings' -d '
+{
+ "index" : {
+ "number_of_replicas" : 4
+ } }
+'
+--------------------------------------------------
+
+Below is the list of settings that can be changed using the update
+settings API:
+
+`index.number_of_replicas`::
+ The number of replicas each shard has.
+
+`index.auto_expand_replicas`::
+ Set to an actual value (like `0-all`) or `false` to disable it.
+
+`index.blocks.read_only`::
+ Set to `true` to have the index read only, `false` to allow writes
+ and metadata changes.
+
+`index.blocks.read`::
+ Set to `true` to disable read operations againstthe index.
+
+`index.blocks.write`::
+ Set to `true` to disable write operations against the index.
+
+`index.blocks.metadata`::
+ Set to `true` to disable metadata operations against the index.
+
+`index.refresh_interval`::
+ The async refresh interval of a shard.
+
+`index.index_concurrency`::
+ Defaults to `8`.
+
+`index.codec`::
+ Codec. Default to `default`.
+
+`index.codec.bloom.load`::
+ Whether to load the bloom filter. Defaults to `true`.
+ See <<bloom-postings>>.
+
+`index.fail_on_merge_failure`::
+ Default to `true`.
+
+`index.translog.flush_threshold_ops`::
+ When to flush based on operations.
+
+`index.translog.flush_threshold_size`::
+ When to flush based on translog (bytes) size.
+
+`index.translog.flush_threshold_period`::
+ When to flush based on a period of not flushing.
+
+`index.translog.disable_flush`::
+ Disables flushing. Note, should be set for a short
+ interval and then enabled.
+
+`index.cache.filter.max_size`::
+ The maximum size of filter cache (per segment in shard).
+ Set to `-1` to disable.
+
+`index.cache.filter.expire`::
+ The expire after access time for filter cache.
+ Set to `-1` to disable.
+
+`index.gateway.snapshot_interval`::
+ The gateway snapshot interval (only applies to shared gateways).
+ Defaults to 10s.
+
+<<index-modules-merge,merge policy>>::
+ All the settings for the merge policy currently configured.
+ A different merge policy can't be set.
+
+`index.routing.allocation.include.*`::
+ A node matching any rule will be allowed to host shards from the index.
+
+`index.routing.allocation.exclude.*`::
+ A node matching any rule will NOT be allowed to host shards from the index.
+
+`index.routing.allocation.require.*`::
+ Only nodes matching all rules will be allowed to host shards from the index.
+
+`index.routing.allocation.disable_allocation`::
+ Disable allocation. Defaults to `false`. Deprecated in favour for `index.routing.allocation.enable`.
+
+`index.routing.allocation.disable_new_allocation`::
+ Disable new allocation. Defaults to `false`. Deprecated in favour for `index.routing.allocation.enable`.
+
+`index.routing.allocation.disable_replica_allocation`::
+ Disable replica allocation. Defaults to `false`. Deprecated in favour for `index.routing.allocation.enable`.
+
+added[1.0.0.RC1]
+
+`index.routing.allocation.enable`::
+ Enables shard allocation for a specific index. It can be set to:
+ * `all` (default) - Allows shard allocation for all shards.
+ * `primaries` - Allows shard allocation only for primary shards.
+ * `new_primaries` - Allows shard allocation only for primary shards for new indices.
+ * `none` - No shard allocation is allowed.
+
+`index.routing.allocation.total_shards_per_node`::
+ Controls the total number of shards allowed to be allocated on a single node. Defaults to unbounded (`-1`).
+
+`index.recovery.initial_shards`::
+ When using local gateway a particular shard is recovered only if there can be allocated quorum shards in the cluster. It can be set to:
+ * `quorum` (default)
+ * `quorum-1` (or `half`)
+ * `full`
+ * `full-1`.
+ * Number values are also supported, e.g. `1`.
+
+`index.gc_deletes`::
+
+`index.ttl.disable_purge`::
+ Disables temporarily the purge of expired docs.
+
+<<index-modules-store,store level throttling>>::
+ All the settings for the store level throttling policy currently configured.
+
+`index.translog.fs.type`::
+ Either `simple` or `buffered` (default).
+
+`index.compound_format`::
+ See <<index-compound-format,`index.compound_format`>> in
+ <<index-modules-settings>>.
+
+`index.compound_on_flush`::
+ See <<index-compound-on-flush,`index.compound_on_flush>> in
+ <<index-modules-settings>>.
+
+<<index-modules-slowlog>>::
+ All the settings for slow log.
+
+`index.warmer.enabled`::
+ See <<indices-warmers>>. Defaults to `true`.
+
+[float]
+[[bulk]]
+=== Bulk Indexing Usage
+
+For example, the update settings API can be used to dynamically change
+the index from being more performant for bulk indexing, and then move it
+to more real time indexing state. Before the bulk indexing is started,
+use:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/_settings -d '{
+ "index" : {
+ "refresh_interval" : "-1"
+ } }'
+--------------------------------------------------
+
+(Another optimization option is to start the index without any replicas,
+and only later adding them, but that really depends on the use case).
+
+Then, once bulk indexing is done, the settings can be updated (back to
+the defaults for example):
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/_settings -d '{
+ "index" : {
+ "refresh_interval" : "1s"
+ } }'
+--------------------------------------------------
+
+And, an optimize should be called:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'http://localhost:9200/test/_optimize?max_num_segments=5'
+--------------------------------------------------
+
+[float]
+[[update-settings-analysis]]
+=== Updating Index Analysis
+
+It is also possible to define new <<analysis,analyzers>> for the index.
+But it is required to <<indices-open-close,close>> the index
+first and <<indices-open-close,open>> it after the changes are made.
+
+For example if `content` analyzer hasn't been defined on `myindex` yet
+you can use the following commands to add it:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/myindex/_close'
+
+curl -XPUT 'localhost:9200/myindex/_settings' -d '{
+ "analysis" : {
+ "analyzer":{
+ "content":{
+ "type":"custom",
+ "tokenizer":"whitespace"
+ }
+ }
+ }
+}'
+
+curl -XPOST 'localhost:9200/myindex/_open'
+--------------------------------------------------
diff --git a/docs/reference/indices/warmers.asciidoc b/docs/reference/indices/warmers.asciidoc
new file mode 100644
index 0000000..bc71094
--- /dev/null
+++ b/docs/reference/indices/warmers.asciidoc
@@ -0,0 +1,183 @@
+[[indices-warmers]]
+== Warmers
+
+Index warming allows to run registered search requests to warm up the
+index before it is available for search. With the near real time aspect
+of search, cold data (segments) will be warmed up before they become
+available for search.
+
+Warmup searches typically include requests that require heavy loading of
+data, such as faceting or sorting on specific fields. The warmup APIs
+allows to register warmup (search) under specific names, remove them,
+and get them.
+
+Index warmup can be disabled by setting `index.warmer.enabled` to
+`false`. It is supported as a realtime setting using update settings
+API. This can be handy when doing initial bulk indexing, disabling pre
+registered warmers to make indexing faster and less expensive and then
+enable it.
+
+[float]
+[[creation]]
+=== Index Creation / Templates
+
+Warmers can be registered when an index gets created, for example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test -d '{
+ "warmers" : {
+ "warmer_1" : {
+ "types" : [],
+ "source" : {
+ "query" : {
+ ...
+ },
+ "facets" : {
+ ...
+ }
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+Or, in an index template:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/_template/template_1 -d '
+{
+ "template" : "te*",
+ "warmers" : {
+ "warmer_1" : {
+ "types" : [],
+ "source" : {
+ "query" : {
+ ...
+ },
+ "facets" : {
+ ...
+ }
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+[float]
+[[warmer-adding]]
+=== Put Warmer
+
+Allows to put a warmup search request on a specific index (or indices),
+with the body composing of a regular search request. Types can be
+provided as part of the URI if the search request is designed to be run
+only against the specific types.
+
+Here is an example that registers a warmup called `warmer_1` against
+index `test` (can be alias or several indices), for a search request
+that runs against all types:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/_warmer/warmer_1 -d '{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "facet_1" : {
+ "terms" : {
+ "field" : "field"
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+And an example that registers a warmup against specific types:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/type1/_warmer/warmer_1 -d '{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "facet_1" : {
+ "terms" : {
+ "field" : "field"
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+All options:
+
+[source,js]
+--------------------------------------------------
+
+PUT _warmer/{warmer_name}
+
+PUT /{index}/_warmer/{warmer_name}
+
+PUT /{index}/{type}/_warmer/{warmer_name}
+
+--------------------------------------------------
+
+
+where
+
+[horizontal]
+`{index}`:: `* | _all | glob pattern | name1, name2, …`
+
+`{type}`:: `* | _all | glob pattern | name1, name2, …`
+
+Instead of `_warmer` you can also use the plural `_warmers`.
+
+
+
+[float]
+[[removing]]
+=== Delete Warmers
+
+Warmers can be deleted using the following endpoint:
+
+
+
+[source,js]
+--------------------------------------------------
+
+[DELETE] /{index}/_warmer/{name}
+
+--------------------------------------------------
+
+
+where
+
+[horizontal]
+`{index}`:: `* | _all | glob pattern | name1, name2, …`
+
+`{name}`:: `* | _all | glob pattern | name1, name2, …`
+
+Instead of `_warmer` you can also use the plural `_warmers`.
+
+[float]
+[[warmer-retrieving]]
+=== GETting Warmer
+
+Getting a warmer for specific index (or alias, or several indices) based
+on its name. The provided name can be a simple wildcard expression or
+omitted to get all warmers. Some examples:
+
+[source,js]
+--------------------------------------------------
+# get warmer named warmer_1 on test index
+curl -XGET localhost:9200/test/_warmer/warmer_1
+
+# get all warmers that start with warm on test index
+curl -XGET localhost:9200/test/_warmer/warm*
+
+# get all warmers for test index
+curl -XGET localhost:9200/test/_warmer/
+--------------------------------------------------
diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc
new file mode 100644
index 0000000..3c5a225
--- /dev/null
+++ b/docs/reference/mapping.asciidoc
@@ -0,0 +1,77 @@
+[[mapping]]
+= Mapping
+
+[partintro]
+--
+Mapping is the process of defining how a document should be mapped to
+the Search Engine, including its searchable characteristics such as
+which fields are searchable and if/how they are tokenized. In
+Elasticsearch, an index may store documents of different "mapping
+types". Elasticsearch allows one to associate multiple mapping
+definitions for each mapping type.
+
+Explicit mapping is defined on an index/type level. By default, there
+isn't a need to define an explicit mapping, since one is automatically
+created and registered when a new type or new field is introduced (with
+no performance overhead) and have sensible defaults. Only when the
+defaults need to be overridden must a mapping definition be provided.
+
+[float]
+[[all-mapping-types]]
+=== Mapping Types
+
+Mapping types are a way to divide the documents in an index into logical
+groups. Think of it as tables in a database. Though there is separation
+between types, it's not a full separation (all end up as a document
+within the same Lucene index).
+
+Field names with the same name across types are highly recommended to
+have the same type and same mapping characteristics (analysis settings
+for example). There is an effort to allow to explicitly "choose" which
+field to use by using type prefix (`my_type.my_field`), but it's not
+complete, and there are places where it will never work (like faceting
+on the field).
+
+In practice though, this restriction is almost never an issue. The field
+name usually ends up being a good indication to its "typeness" (e.g.
+"first_name" will always be a string). Note also, that this does not
+apply to the cross index case.
+
+[float]
+[[mapping-api]]
+=== Mapping API
+
+To create a mapping, you will need the <<indices-put-mapping,Put Mapping
+API>>, or you can add multiple mappings when you <<indices-create-index,create an
+index>>.
+
+[float]
+[[mapping-settings]]
+=== Global Settings
+
+The `index.mapping.ignore_malformed` global setting can be set on the
+index level to allow to ignore malformed content globally across all
+mapping types (malformed content example is trying to index a text string
+value as a numeric type).
+
+The `index.mapping.coerce` global setting can be set on the
+index level to coerce numeric content globally across all
+mapping types (The default setting is true and coercions attempted are
+to convert strings with numbers into numeric types and also numeric values
+with fractions to any integer/short/long values minus the fraction part).
+When the permitted conversions fail in their attempts, the value is considered
+malformed and the ignore_malformed setting dictates what will happen next.
+--
+
+include::mapping/fields.asciidoc[]
+
+include::mapping/types.asciidoc[]
+
+include::mapping/date-format.asciidoc[]
+
+include::mapping/dynamic-mapping.asciidoc[]
+
+include::mapping/conf-mappings.asciidoc[]
+
+include::mapping/meta.asciidoc[]
+
diff --git a/docs/reference/mapping/conf-mappings.asciidoc b/docs/reference/mapping/conf-mappings.asciidoc
new file mode 100644
index 0000000..e9bb3f9
--- /dev/null
+++ b/docs/reference/mapping/conf-mappings.asciidoc
@@ -0,0 +1,19 @@
+[[mapping-conf-mappings]]
+== Config Mappings
+
+Creating new mappings can be done using the
+<<indices-put-mapping,Put Mapping>>
+API. When a document is indexed with no mapping associated with it in
+the specific index, the
+<<mapping-dynamic-mapping,dynamic / default
+mapping>> feature will kick in and automatically create mapping
+definition for it.
+
+Mappings can also be provided on the node level, meaning that each index
+created will automatically be started with all the mappings defined
+within a certain location.
+
+Mappings can be defined within files called `[mapping_name].json` and be
+placed either under `config/mappings/_default` location, or under
+`config/mappings/[index_name]` (for mappings that should be associated
+only with a specific index).
diff --git a/docs/reference/mapping/date-format.asciidoc b/docs/reference/mapping/date-format.asciidoc
new file mode 100644
index 0000000..eada734
--- /dev/null
+++ b/docs/reference/mapping/date-format.asciidoc
@@ -0,0 +1,206 @@
+[[mapping-date-format]]
+== Date Format
+
+In JSON documents, dates are represented as strings. Elasticsearch uses a set
+of pre-configured format to recognize and convert those, but you can change the
+defaults by specifying the `format` option when defining a `date` type, or by
+specifying `dynamic_date_formats` in the `root object` mapping (which will
+be used unless explicitly overridden by a `date` type). There are built in
+formats supported, as well as complete custom one.
+
+The parsing of dates uses http://joda-time.sourceforge.net/[Joda]. The
+default date parsing used if no format is specified is
+http://joda-time.sourceforge.net/api-release/org/joda/time/format/ISODateTimeFormat.html#dateOptionalTimeParser()[ISODateTimeFormat.dateOptionalTimeParser].
+
+An extension to the format allow to define several formats using `||`
+separator. This allows to define less strict formats that can be used,
+for example, the `yyyy/MM/dd HH:mm:ss||yyyy/MM/dd` format will parse
+both `yyyy/MM/dd HH:mm:ss` and `yyyy/MM/dd`. The first format will also
+act as the one that converts back from milliseconds to a string
+representation.
+
+[float]
+[[date-math]]
+=== Date Math
+
+The `date` type supports using date math expression when using it in a
+query/filter (mainly make sense in `range` query/filter).
+
+The expression starts with an "anchor" date, which can be either `now`
+or a date string (in the applicable format) ending with `||`. It can
+then follow by a math expression, supporting `+`, `-` and `/`
+(rounding). The units supported are `y` (year), `M` (month), `w` (week), `h` (hour),
+`m` (minute), and `s` (second).
+
+Here are some samples: `now+1h`, `now+1h+1m`, `now+1h/d`,
+`2012-01-01||+1M/d`.
+
+Note, when doing `range` type searches, and the upper value is
+inclusive, the rounding will properly be rounded to the ceiling instead
+of flooring it.
+
+To change this behavior, set
+`"mapping.date.round_ceil": false`.
+
+
+[float]
+[[built-in]]
+=== Built In Formats
+
+The following tables lists all the defaults ISO formats supported:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Name |Description
+|`basic_date`|A basic formatter for a full date as four digit year, two
+digit month of year, and two digit day of month (yyyyMMdd).
+
+|`basic_date_time`|A basic formatter that combines a basic date and time,
+separated by a 'T' (yyyyMMdd'T'HHmmss.SSSZ).
+
+|`basic_date_time_no_millis`|A basic formatter that combines a basic date
+and time without millis, separated by a 'T' (yyyyMMdd'T'HHmmssZ).
+
+|`basic_ordinal_date`|A formatter for a full ordinal date, using a four
+digit year and three digit dayOfYear (yyyyDDD).
+
+|`basic_ordinal_date_time`|A formatter for a full ordinal date and time,
+using a four digit year and three digit dayOfYear
+(yyyyDDD'T'HHmmss.SSSZ).
+
+|`basic_ordinal_date_time_no_millis`|A formatter for a full ordinal date
+and time without millis, using a four digit year and three digit
+dayOfYear (yyyyDDD'T'HHmmssZ).
+
+|`basic_time`|A basic formatter for a two digit hour of day, two digit
+minute of hour, two digit second of minute, three digit millis, and time
+zone offset (HHmmss.SSSZ).
+
+|`basic_time_no_millis`|A basic formatter for a two digit hour of day,
+two digit minute of hour, two digit second of minute, and time zone
+offset (HHmmssZ).
+
+|`basic_t_time`|A basic formatter for a two digit hour of day, two digit
+minute of hour, two digit second of minute, three digit millis, and time
+zone off set prefixed by 'T' ('T'HHmmss.SSSZ).
+
+|`basic_t_time_no_millis`|A basic formatter for a two digit hour of day,
+two digit minute of hour, two digit second of minute, and time zone
+offset prefixed by 'T' ('T'HHmmssZ).
+
+|`basic_week_date`|A basic formatter for a full date as four digit
+weekyear, two digit week of weekyear, and one digit day of week
+(xxxx'W'wwe).
+
+|`basic_week_date_time`|A basic formatter that combines a basic weekyear
+date and time, separated by a 'T' (xxxx'W'wwe'T'HHmmss.SSSZ).
+
+|`basic_week_date_time_no_millis`|A basic formatter that combines a basic
+weekyear date and time without millis, separated by a 'T'
+(xxxx'W'wwe'T'HHmmssZ).
+
+|`date`|A formatter for a full date as four digit year, two digit month
+of year, and two digit day of month (yyyy-MM-dd).
+
+|`date_hour`|A formatter that combines a full date and two digit hour of
+day.
+
+|`date_hour_minute`|A formatter that combines a full date, two digit hour
+of day, and two digit minute of hour.
+
+|`date_hour_minute_second`|A formatter that combines a full date, two
+digit hour of day, two digit minute of hour, and two digit second of
+minute.
+
+|`date_hour_minute_second_fraction`|A formatter that combines a full
+date, two digit hour of day, two digit minute of hour, two digit second
+of minute, and three digit fraction of second
+(yyyy-MM-dd'T'HH:mm:ss.SSS).
+
+|`date_hour_minute_second_millis`|A formatter that combines a full date,
+two digit hour of day, two digit minute of hour, two digit second of
+minute, and three digit fraction of second (yyyy-MM-dd'T'HH:mm:ss.SSS).
+
+|`date_optional_time`|a generic ISO datetime parser where the date is
+mandatory and the time is optional.
+
+|`date_time`|A formatter that combines a full date and time, separated by
+a 'T' (yyyy-MM-dd'T'HH:mm:ss.SSSZZ).
+
+|`date_time_no_millis`|A formatter that combines a full date and time
+without millis, separated by a 'T' (yyyy-MM-dd'T'HH:mm:ssZZ).
+
+|`hour`|A formatter for a two digit hour of day.
+
+|`hour_minute`|A formatter for a two digit hour of day and two digit
+minute of hour.
+
+|`hour_minute_second`|A formatter for a two digit hour of day, two digit
+minute of hour, and two digit second of minute.
+
+|`hour_minute_second_fraction`|A formatter for a two digit hour of day,
+two digit minute of hour, two digit second of minute, and three digit
+fraction of second (HH:mm:ss.SSS).
+
+|`hour_minute_second_millis`|A formatter for a two digit hour of day, two
+digit minute of hour, two digit second of minute, and three digit
+fraction of second (HH:mm:ss.SSS).
+
+|`ordinal_date`|A formatter for a full ordinal date, using a four digit
+year and three digit dayOfYear (yyyy-DDD).
+
+|`ordinal_date_time`|A formatter for a full ordinal date and time, using
+a four digit year and three digit dayOfYear (yyyy-DDD'T'HH:mm:ss.SSSZZ).
+
+|`ordinal_date_time_no_millis`|A formatter for a full ordinal date and
+time without millis, using a four digit year and three digit dayOfYear
+(yyyy-DDD'T'HH:mm:ssZZ).
+
+|`time`|A formatter for a two digit hour of day, two digit minute of
+hour, two digit second of minute, three digit fraction of second, and
+time zone offset (HH:mm:ss.SSSZZ).
+
+|`time_no_millis`|A formatter for a two digit hour of day, two digit
+minute of hour, two digit second of minute, and time zone offset
+(HH:mm:ssZZ).
+
+|`t_time`|A formatter for a two digit hour of day, two digit minute of
+hour, two digit second of minute, three digit fraction of second, and
+time zone offset prefixed by 'T' ('T'HH:mm:ss.SSSZZ).
+
+|`t_time_no_millis`|A formatter for a two digit hour of day, two digit
+minute of hour, two digit second of minute, and time zone offset
+prefixed by 'T' ('T'HH:mm:ssZZ).
+
+|`week_date`|A formatter for a full date as four digit weekyear, two
+digit week of weekyear, and one digit day of week (xxxx-'W'ww-e).
+
+|`week_date_time`|A formatter that combines a full weekyear date and
+time, separated by a 'T' (xxxx-'W'ww-e'T'HH:mm:ss.SSSZZ).
+
+|`weekDateTimeNoMillis`|A formatter that combines a full weekyear date
+and time without millis, separated by a 'T' (xxxx-'W'ww-e'T'HH:mm:ssZZ).
+
+|`week_year`|A formatter for a four digit weekyear.
+
+|`weekyearWeek`|A formatter for a four digit weekyear and two digit week
+of weekyear.
+
+|`weekyearWeekDay`|A formatter for a four digit weekyear, two digit week
+of weekyear, and one digit day of week.
+
+|`year`|A formatter for a four digit year.
+
+|`year_month`|A formatter for a four digit year and two digit month of
+year.
+
+|`year_month_day`|A formatter for a four digit year, two digit month of
+year, and two digit day of month.
+|=======================================================================
+
+[float]
+[[custom]]
+=== Custom Format
+
+Allows for a completely customizable date format explained
+http://joda-time.sourceforge.net/api-release/org/joda/time/format/DateTimeFormat.html[here].
diff --git a/docs/reference/mapping/dynamic-mapping.asciidoc b/docs/reference/mapping/dynamic-mapping.asciidoc
new file mode 100644
index 0000000..b10bced
--- /dev/null
+++ b/docs/reference/mapping/dynamic-mapping.asciidoc
@@ -0,0 +1,65 @@
+[[mapping-dynamic-mapping]]
+== Dynamic Mapping
+
+Default mappings allow to automatically apply generic mapping definition
+to types that do not have mapping pre defined. This is mainly done
+thanks to the fact that the
+<<mapping-object-type,object mapping>> and
+namely the <<mapping-root-object-type,root
+object mapping>> allow for schema-less dynamic addition of unmapped
+fields.
+
+The default mapping definition is plain mapping definition that is
+embedded within the distribution:
+
+[source,js]
+--------------------------------------------------
+{
+ "_default_" : {
+ }
+}
+--------------------------------------------------
+
+Pretty short, no? Basically, everything is defaulted, especially the
+dynamic nature of the root object mapping. The default mapping
+definition can be overridden in several manners. The simplest manner is
+to simply define a file called `default-mapping.json` and placed it
+under the `config` directory (which can be configured to exist in a
+different location). It can also be explicitly set using the
+`index.mapper.default_mapping_location` setting.
+
+The dynamic creation of mappings for unmapped types can be completely
+disabled by setting `index.mapper.dynamic` to `false`.
+
+The dynamic creation of fields within a type can be completely
+disabled by setting the `dynamic` property of the type to `strict`.
+
+Here is a <<indices-put-mapping,Put Mapping>> example that
+disables dynamic field creation for a `tweet`:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPUT 'http://localhost:9200/twitter/tweet/_mapping' -d '
+{
+ "tweet" : {
+ "dynamic": "strict",
+ "properties" : {
+ "message" : {"type" : "string", "store" : true }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+Here is how we can change the default
+<<mapping-date-format,date_formats>> used in the
+root and inner object types:
+
+[source,js]
+--------------------------------------------------
+{
+ "_default_" : {
+ "date_formats" : ["yyyy-MM-dd", "dd-MM-yyyy", "date_optional_time"]
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/mapping/fields.asciidoc b/docs/reference/mapping/fields.asciidoc
new file mode 100644
index 0000000..a1f7e98
--- /dev/null
+++ b/docs/reference/mapping/fields.asciidoc
@@ -0,0 +1,33 @@
+[[mapping-fields]]
+== Fields
+
+Each mapping has a number of fields associated with it
+which can be used to control how the document metadata
+(eg <<mapping-all-field>>) is indexed.
+
+include::fields/uid-field.asciidoc[]
+
+include::fields/id-field.asciidoc[]
+
+include::fields/type-field.asciidoc[]
+
+include::fields/source-field.asciidoc[]
+
+include::fields/all-field.asciidoc[]
+
+include::fields/analyzer-field.asciidoc[]
+
+include::fields/boost-field.asciidoc[]
+
+include::fields/parent-field.asciidoc[]
+
+include::fields/routing-field.asciidoc[]
+
+include::fields/index-field.asciidoc[]
+
+include::fields/size-field.asciidoc[]
+
+include::fields/timestamp-field.asciidoc[]
+
+include::fields/ttl-field.asciidoc[]
+
diff --git a/docs/reference/mapping/fields/all-field.asciidoc b/docs/reference/mapping/fields/all-field.asciidoc
new file mode 100644
index 0000000..65453ef
--- /dev/null
+++ b/docs/reference/mapping/fields/all-field.asciidoc
@@ -0,0 +1,78 @@
+[[mapping-all-field]]
+=== `_all`
+
+The idea of the `_all` field is that it includes the text of one or more
+other fields within the document indexed. It can come very handy
+especially for search requests, where we want to execute a search query
+against the content of a document, without knowing which fields to
+search on. This comes at the expense of CPU cycles and index size.
+
+The `_all` fields can be completely disabled. Explicit field mapping and
+object mapping can be excluded / included in the `_all` field. By
+default, it is enabled and all fields are included in it for ease of
+use.
+
+When disabling the `_all` field, it is a good practice to set
+`index.query.default_field` to a different value (for example, if you
+have a main "message" field in your data, set it to `message`).
+
+One of the nice features of the `_all` field is that it takes into
+account specific fields boost levels. Meaning that if a title field is
+boosted more than content, the title (part) in the `_all` field will
+mean more than the content (part) in the `_all` field.
+
+Here is a sample mapping:
+
+[source,js]
+--------------------------------------------------
+{
+ "person" : {
+ "_all" : {"enabled" : true},
+ "properties" : {
+ "name" : {
+ "type" : "object",
+ "dynamic" : false,
+ "properties" : {
+ "first" : {"type" : "string", "store" : true , "include_in_all" : false},
+ "last" : {"type" : "string", "index" : "not_analyzed"}
+ }
+ },
+ "address" : {
+ "type" : "object",
+ "include_in_all" : false,
+ "properties" : {
+ "first" : {
+ "properties" : {
+ "location" : {"type" : "string", "store" : true, "index_name" : "firstLocation"}
+ }
+ },
+ "last" : {
+ "properties" : {
+ "location" : {"type" : "string"}
+ }
+ }
+ }
+ },
+ "simple1" : {"type" : "long", "include_in_all" : true},
+ "simple2" : {"type" : "long", "include_in_all" : false}
+ }
+ }
+}
+--------------------------------------------------
+
+The `_all` fields allows for `store`, `term_vector` and `analyzer` (with
+specific `index_analyzer` and `search_analyzer`) to be set.
+
+[float]
+[[highlighting]]
+==== Highlighting
+
+For any field to allow
+<<search-request-highlighting,highlighting>> it has
+to be either stored or part of the `_source` field. By default `_all`
+field does not qualify for either, so highlighting for it does not yield
+any data.
+
+Although it is possible to `store` the `_all` field, it is basically an
+aggregation of all fields, which means more data will be stored, and
+highlighting it might produce strange results.
diff --git a/docs/reference/mapping/fields/analyzer-field.asciidoc b/docs/reference/mapping/fields/analyzer-field.asciidoc
new file mode 100644
index 0000000..30bb072
--- /dev/null
+++ b/docs/reference/mapping/fields/analyzer-field.asciidoc
@@ -0,0 +1,41 @@
+[[mapping-analyzer-field]]
+=== `_analyzer`
+
+The `_analyzer` mapping allows to use a document field property as the
+name of the analyzer that will be used to index the document. The
+analyzer will be used for any field that does not explicitly defines an
+`analyzer` or `index_analyzer` when indexing.
+
+Here is a simple mapping:
+
+[source,js]
+--------------------------------------------------
+{
+ "type1" : {
+ "_analyzer" : {
+ "path" : "my_field"
+ }
+ }
+}
+--------------------------------------------------
+
+The above will use the value of the `my_field` to lookup an analyzer
+registered under it. For example, indexing a the following doc:
+
+[source,js]
+--------------------------------------------------
+{
+ "my_field" : "whitespace"
+}
+--------------------------------------------------
+
+Will cause the `whitespace` analyzer to be used as the index analyzer
+for all fields without explicit analyzer setting.
+
+The default path value is `_analyzer`, so the analyzer can be driven for
+a specific document by setting `_analyzer` field in it. If custom json
+field name is needed, an explicit mapping with a different path should
+be set.
+
+By default, the `_analyzer` field is indexed, it can be disabled by
+settings `index` to `no` in the mapping.
diff --git a/docs/reference/mapping/fields/boost-field.asciidoc b/docs/reference/mapping/fields/boost-field.asciidoc
new file mode 100644
index 0000000..1d00845
--- /dev/null
+++ b/docs/reference/mapping/fields/boost-field.asciidoc
@@ -0,0 +1,72 @@
+[[mapping-boost-field]]
+=== `_boost`
+
+deprecated[1.0.0.RC1,See <<function-score-instead-of-boost>>]
+
+Boosting is the process of enhancing the relevancy of a document or
+field. Field level mapping allows to define explicit boost level on a
+specific field. The boost field mapping (applied on the
+<<mapping-root-object-type,root object>>) allows
+to define a boost field mapping where *its content will control the
+boost level of the document*. For example, consider the following
+mapping:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_boost" : {"name" : "my_boost", "null_value" : 1.0}
+ }
+}
+--------------------------------------------------
+
+The above mapping defines mapping for a field named `my_boost`. If the
+`my_boost` field exists within the JSON document indexed, its value will
+control the boost level of the document indexed. For example, the
+following JSON document will be indexed with a boost value of `2.2`:
+
+[source,js]
+--------------------------------------------------
+{
+ "my_boost" : 2.2,
+ "message" : "This is a tweet!"
+}
+--------------------------------------------------
+
+[[function-score-instead-of-boost]]
+==== Function score instead of boost
+
+Support for document boosting via the `_boost` field has been removed
+from Lucene and is deprecated in Elasticsearch as of v1.0.0.RC1. The
+implementation in Lucene resulted in unpredictable result when
+used with multiple fields or multi-value fields.
+
+Instead, the <<query-dsl-function-score-query>> can be used to achieve
+the desired functionality by boosting each document by the value in
+any field the document:
+
+[source,js]
+--------------------------------------------------
+{
+ "query": {
+ "function_score": {
+ "query": { <1>
+ "match": {
+ "title": "your main query"
+ }
+ },
+ "functions": [{
+ "script_score": { <2>
+ "script": "doc['my_boost_field'].value"
+ }
+ }],
+ "score_mode": "multiply"
+ }
+ }
+}
+--------------------------------------------------
+<1> The original query, now wrapped in a `function_score` query.
+<2> This script returns the value in `my_boost_field`, which is then
+ multiplied by the query `_score` for each document.
+
+
diff --git a/docs/reference/mapping/fields/id-field.asciidoc b/docs/reference/mapping/fields/id-field.asciidoc
new file mode 100644
index 0000000..1adab49
--- /dev/null
+++ b/docs/reference/mapping/fields/id-field.asciidoc
@@ -0,0 +1,52 @@
+[[mapping-id-field]]
+=== `_id`
+
+Each document indexed is associated with an id and a type. The `_id`
+field can be used to index just the id, and possible also store it. By
+default it is not indexed and not stored (thus, not created).
+
+Note, even though the `_id` is not indexed, all the APIs still work
+(since they work with the `_uid` field), as well as fetching by ids
+using `term`, `terms` or `prefix` queries/filters (including the
+specific `ids` query/filter).
+
+The `_id` field can be enabled to be indexed, and possibly stored,
+using:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_id" : {"index": "not_analyzed", "store" : false }
+ }
+}
+--------------------------------------------------
+
+The `_id` mapping can also be associated with a `path` that will be used
+to extract the id from a different location in the source document. For
+example, having the following mapping:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_id" : {
+ "path" : "post_id"
+ }
+ }
+}
+--------------------------------------------------
+
+Will cause `1` to be used as the id for:
+
+[source,js]
+--------------------------------------------------
+{
+ "message" : "You know, for Search",
+ "post_id" : "1"
+}
+--------------------------------------------------
+
+This does require an additional lightweight parsing step while indexing,
+in order to extract the id to decide which shard the index operation
+will be executed on.
diff --git a/docs/reference/mapping/fields/index-field.asciidoc b/docs/reference/mapping/fields/index-field.asciidoc
new file mode 100644
index 0000000..96a320b
--- /dev/null
+++ b/docs/reference/mapping/fields/index-field.asciidoc
@@ -0,0 +1,15 @@
+[[mapping-index-field]]
+=== `_index`
+
+The ability to store in a document the index it belongs to. By default
+it is disabled, in order to enable it, the following mapping should be
+defined:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_index" : { "enabled" : true }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/mapping/fields/parent-field.asciidoc b/docs/reference/mapping/fields/parent-field.asciidoc
new file mode 100644
index 0000000..3225b53
--- /dev/null
+++ b/docs/reference/mapping/fields/parent-field.asciidoc
@@ -0,0 +1,21 @@
+[[mapping-parent-field]]
+=== `_parent`
+
+The parent field mapping is defined on a child mapping, and points to
+the parent type this child relates to. For example, in case of a `blog`
+type and a `blog_tag` type child document, the mapping for `blog_tag`
+should be:
+
+[source,js]
+--------------------------------------------------
+{
+ "blog_tag" : {
+ "_parent" : {
+ "type" : "blog"
+ }
+ }
+}
+--------------------------------------------------
+
+The mapping is automatically stored and indexed (meaning it can be
+searched on using the `_parent` field notation).
diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc
new file mode 100644
index 0000000..8ca2286
--- /dev/null
+++ b/docs/reference/mapping/fields/routing-field.asciidoc
@@ -0,0 +1,69 @@
+[[mapping-routing-field]]
+=== `_routing`
+
+The routing field allows to control the `_routing` aspect when indexing
+data and explicit routing control is required.
+
+[float]
+==== store / index
+
+The first thing the `_routing` mapping does is to store the routing
+value provided (`store` set to `false`) and index it (`index` set to
+`not_analyzed`). The reason why the routing is stored by default is so
+reindexing data will be possible if the routing value is completely
+external and not part of the docs.
+
+[float]
+==== required
+
+Another aspect of the `_routing` mapping is the ability to define it as
+required by setting `required` to `true`. This is very important to set
+when using routing features, as it allows different APIs to make use of
+it. For example, an index operation will be rejected if no routing value
+has been provided (or derived from the doc). A delete operation will be
+broadcasted to all shards if no routing value is provided and `_routing`
+is required.
+
+[float]
+==== path
+
+The routing value can be provided as an external value when indexing
+(and still stored as part of the document, in much the same way
+`_source` is stored). But, it can also be automatically extracted from
+the index doc based on a `path`. For example, having the following
+mapping:
+
+[source,js]
+--------------------------------------------------
+{
+ "comment" : {
+ "_routing" : {
+ "required" : true,
+ "path" : "blog.post_id"
+ }
+ }
+}
+--------------------------------------------------
+
+Will cause the following doc to be routed based on the `111222` value:
+
+[source,js]
+--------------------------------------------------
+{
+ "text" : "the comment text"
+ "blog" : {
+ "post_id" : "111222"
+ }
+}
+--------------------------------------------------
+
+Note, using `path` without explicit routing value provided required an
+additional (though quite fast) parsing phase.
+
+[float]
+==== id uniqueness
+
+When indexing documents specifying a custom `_routing`, the uniqueness
+of the `_id` is not guaranteed throughout all the shards that the index
+is composed of. In fact, documents with the same `_id` might end up in
+different shards if indexed with different `_routing` values.
diff --git a/docs/reference/mapping/fields/size-field.asciidoc b/docs/reference/mapping/fields/size-field.asciidoc
new file mode 100644
index 0000000..7abfd40
--- /dev/null
+++ b/docs/reference/mapping/fields/size-field.asciidoc
@@ -0,0 +1,26 @@
+[[mapping-size-field]]
+=== `_size`
+
+The `_size` field allows to automatically index the size of the original
+`_source` indexed. By default, it's disabled. In order to enable it, set
+the mapping to:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_size" : {"enabled" : true}
+ }
+}
+--------------------------------------------------
+
+In order to also store it, use:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_size" : {"enabled" : true, "store" : true }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc
new file mode 100644
index 0000000..22bb963
--- /dev/null
+++ b/docs/reference/mapping/fields/source-field.asciidoc
@@ -0,0 +1,41 @@
+[[mapping-source-field]]
+=== `_source`
+
+The `_source` field is an automatically generated field that stores the
+actual JSON that was used as the indexed document. It is not indexed
+(searchable), just stored. When executing "fetch" requests, like
+<<docs-get,get>> or
+<<search-search,search>>, the `_source` field is
+returned by default.
+
+Though very handy to have around, the source field does incur storage
+overhead within the index. For this reason, it can be disabled. For
+example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_source" : {"enabled" : false}
+ }
+}
+--------------------------------------------------
+
+[float]
+[[include-exclude]]
+==== Includes / Excludes
+
+Allow to specify paths in the source that would be included / excluded
+when it's stored, supporting `*` as wildcard annotation. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "my_type" : {
+ "_source" : {
+ "includes" : ["path1.*", "path2.*"],
+ "excludes" : ["pat3.*"]
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/mapping/fields/timestamp-field.asciidoc b/docs/reference/mapping/fields/timestamp-field.asciidoc
new file mode 100644
index 0000000..97bca8d
--- /dev/null
+++ b/docs/reference/mapping/fields/timestamp-field.asciidoc
@@ -0,0 +1,82 @@
+[[mapping-timestamp-field]]
+=== `_timestamp`
+
+The `_timestamp` field allows to automatically index the timestamp of a
+document. It can be provided externally via the index request or in the
+`_source`. If it is not provided externally it will be automatically set
+to the date the document was processed by the indexing chain.
+
+[float]
+==== enabled
+
+By default it is disabled, in order to enable it, the following mapping
+should be defined:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_timestamp" : { "enabled" : true }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== store / index
+
+By default the `_timestamp` field has `store` set to `false` and `index`
+set to `not_analyzed`. It can be queried as a standard date field.
+
+[float]
+==== path
+
+The `_timestamp` value can be provided as an external value when
+indexing. But, it can also be automatically extracted from the document
+to index based on a `path`. For example, having the following mapping:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_timestamp" : {
+ "enabled" : true,
+ "path" : "post_date"
+ }
+ }
+}
+--------------------------------------------------
+
+Will cause `2009-11-15T14:12:12` to be used as the timestamp value for:
+
+[source,js]
+--------------------------------------------------
+{
+ "message" : "You know, for Search",
+ "post_date" : "2009-11-15T14:12:12"
+}
+--------------------------------------------------
+
+Note, using `path` without explicit timestamp value provided require an
+additional (though quite fast) parsing phase.
+
+[float]
+==== format
+
+You can define the <<mapping-date-format,date
+format>> used to parse the provided timestamp value. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_timestamp" : {
+ "enabled" : true,
+ "path" : "post_date",
+ "format" : "YYYY-MM-dd"
+ }
+ }
+}
+--------------------------------------------------
+
+Note, the default format is `dateOptionalTime`. The timestamp value will
+first be parsed as a number and if it fails the format will be tried.
diff --git a/docs/reference/mapping/fields/ttl-field.asciidoc b/docs/reference/mapping/fields/ttl-field.asciidoc
new file mode 100644
index 0000000..d47aaca
--- /dev/null
+++ b/docs/reference/mapping/fields/ttl-field.asciidoc
@@ -0,0 +1,70 @@
+[[mapping-ttl-field]]
+=== `_ttl`
+
+A lot of documents naturally come with an expiration date. Documents can
+therefore have a `_ttl` (time to live), which will cause the expired
+documents to be deleted automatically.
+
+[float]
+==== enabled
+
+By default it is disabled, in order to enable it, the following mapping
+should be defined:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_ttl" : { "enabled" : true }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== store / index
+
+By default the `_ttl` field has `store` set to `true` and `index` set to
+`not_analyzed`. Note that `index` property has to be set to
+`not_analyzed` in order for the purge process to work.
+
+[float]
+==== default
+
+You can provide a per index/type default `_ttl` value as follows:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_ttl" : { "enabled" : true, "default" : "1d" }
+ }
+}
+--------------------------------------------------
+
+In this case, if you don't provide a `_ttl` value in your query or in
+the `_source` all tweets will have a `_ttl` of one day.
+
+In case you do not specify a time unit like `d` (days), `m` (minutes),
+`h` (hours), `ms` (milliseconds) or `w` (weeks), milliseconds is used as
+default unit.
+
+If no `default` is set and no `_ttl` value is given then the document
+has an infinite `_ttl` and will not expire.
+
+You can dynamically update the `default` value using the put mapping
+API. It won't change the `_ttl` of already indexed documents but will be
+used for future documents.
+
+[float]
+==== Note on documents expiration
+
+Expired documents will be automatically deleted regularly. You can
+dynamically set the `indices.ttl.interval` to fit your needs. The
+default value is `60s`.
+
+The deletion orders are processed by bulk. You can set
+`indices.ttl.bulk_size` to fit your needs. The default value is `10000`.
+
+Note that the expiration procedure handle versioning properly so if a
+document is updated between the collection of documents to expire and
+the delete order, the document won't be deleted.
diff --git a/docs/reference/mapping/fields/type-field.asciidoc b/docs/reference/mapping/fields/type-field.asciidoc
new file mode 100644
index 0000000..bac7457
--- /dev/null
+++ b/docs/reference/mapping/fields/type-field.asciidoc
@@ -0,0 +1,31 @@
+[[mapping-type-field]]
+=== Type Field
+
+Each document indexed is associated with an id and a type. The type,
+when indexing, is automatically indexed into a `_type` field. By
+default, the `_type` field is indexed (but *not* analyzed) and not
+stored. This means that the `_type` field can be queried.
+
+The `_type` field can be stored as well, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_type" : {"store" : true}
+ }
+}
+--------------------------------------------------
+
+The `_type` field can also not be indexed, and all the APIs will still
+work except for specific queries (term queries / filters) or faceting
+done on the `_type` field.
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_type" : {"index" : "no"}
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/mapping/fields/uid-field.asciidoc b/docs/reference/mapping/fields/uid-field.asciidoc
new file mode 100644
index 0000000..f9ce245
--- /dev/null
+++ b/docs/reference/mapping/fields/uid-field.asciidoc
@@ -0,0 +1,11 @@
+[[mapping-uid-field]]
+=== `_uid`
+
+Each document indexed is associated with an id and a type, the internal
+`_uid` field is the unique identifier of a document within an index and
+is composed of the type and the id (meaning that different types can
+have the same id and still maintain uniqueness).
+
+The `_uid` field is automatically used when `_type` is not indexed to
+perform type based filtering, and does not require the `_id` to be
+indexed.
diff --git a/docs/reference/mapping/meta.asciidoc b/docs/reference/mapping/meta.asciidoc
new file mode 100644
index 0000000..5cb0c14
--- /dev/null
+++ b/docs/reference/mapping/meta.asciidoc
@@ -0,0 +1,25 @@
+[[mapping-meta]]
+== Meta
+
+Each mapping can have custom meta data associated with it. These are
+simple storage elements that are simply persisted along with the mapping
+and can be retrieved when fetching the mapping definition. The meta is
+defined under the `_meta` element, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "_meta" : {
+ "attr1" : "value1",
+ "attr2" : {
+ "attr3" : "value3"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Meta can be handy for example for client libraries that perform
+serialization and deserialization to store its meta model (for example,
+the class the document maps to).
diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc
new file mode 100644
index 0000000..0cc967e
--- /dev/null
+++ b/docs/reference/mapping/types.asciidoc
@@ -0,0 +1,24 @@
+[[mapping-types]]
+== Types
+
+The datatype for each field in a document (eg strings, numbers,
+objects etc) can be controlled via the type mapping.
+
+include::types/core-types.asciidoc[]
+
+include::types/array-type.asciidoc[]
+
+include::types/object-type.asciidoc[]
+
+include::types/root-object-type.asciidoc[]
+
+include::types/nested-type.asciidoc[]
+
+include::types/ip-type.asciidoc[]
+
+include::types/geo-point-type.asciidoc[]
+
+include::types/geo-shape-type.asciidoc[]
+
+include::types/attachment-type.asciidoc[]
+
diff --git a/docs/reference/mapping/types/array-type.asciidoc b/docs/reference/mapping/types/array-type.asciidoc
new file mode 100644
index 0000000..3f887b1
--- /dev/null
+++ b/docs/reference/mapping/types/array-type.asciidoc
@@ -0,0 +1,74 @@
+[[mapping-array-type]]
+=== Array Type
+
+JSON documents allow to define an array (list) of fields or objects.
+Mapping array types could not be simpler since arrays gets automatically
+detected and mapping them can be done either with
+<<mapping-core-types,Core Types>> or
+<<mapping-object-type,Object Type>> mappings.
+For example, the following JSON defines several arrays:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "message" : "some arrays in this tweet...",
+ "tags" : ["elasticsearch", "wow"],
+ "lists" : [
+ {
+ "name" : "prog_list",
+ "description" : "programming list"
+ },
+ {
+ "name" : "cool_list",
+ "description" : "cool stuff list"
+ }
+ ]
+ }
+}
+--------------------------------------------------
+
+The above JSON has the `tags` property defining a list of a simple
+`string` type, and the `lists` property is an `object` type array. Here
+is a sample explicit mapping:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "message" : {"type" : "string"},
+ "tags" : {"type" : "string", "index_name" : "tag"},
+ "lists" : {
+ "properties" : {
+ "name" : {"type" : "string"},
+ "description" : {"type" : "string"}
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The fact that array types are automatically supported can be shown by
+the fact that the following JSON document is perfectly fine:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "message" : "some arrays in this tweet...",
+ "tags" : "elasticsearch",
+ "lists" : {
+ "name" : "prog_list",
+ "description" : "programming list"
+ }
+ }
+}
+--------------------------------------------------
+
+Note also, that thanks to the fact that we used the `index_name` to use
+the non plural form (`tag` instead of `tags`), we can actually refer to
+the field using the `index_name` as well. For example, we can execute a
+query using `tweet.tags:wow` or `tweet.tag:wow`. We could, of course,
+name the field as `tag` and skip the `index_name` all together).
diff --git a/docs/reference/mapping/types/attachment-type.asciidoc b/docs/reference/mapping/types/attachment-type.asciidoc
new file mode 100644
index 0000000..54f9701
--- /dev/null
+++ b/docs/reference/mapping/types/attachment-type.asciidoc
@@ -0,0 +1,90 @@
+[[mapping-attachment-type]]
+=== Attachment Type
+
+The `attachment` type allows to index different "attachment" type field
+(encoded as `base64`), for example, Microsoft Office formats, open
+document formats, ePub, HTML, and so on (full list can be found
+http://lucene.apache.org/tika/0.10/formats.html[here]).
+
+The `attachment` type is provided as a
+https://github.com/elasticsearch/elasticsearch-mapper-attachments[plugin
+extension]. The plugin is a simple zip file that can be downloaded and
+placed under `$ES_HOME/plugins` location. It will be automatically
+detected and the `attachment` type will be added.
+
+Note, the `attachment` type is experimental.
+
+Using the attachment type is simple, in your mapping JSON, simply set a
+certain JSON element as attachment, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "person" : {
+ "properties" : {
+ "my_attachment" : { "type" : "attachment" }
+ }
+ }
+}
+--------------------------------------------------
+
+In this case, the JSON to index can be:
+
+[source,js]
+--------------------------------------------------
+{
+ "my_attachment" : "... base64 encoded attachment ..."
+}
+--------------------------------------------------
+
+Or it is possible to use more elaborated JSON if content type or
+resource name need to be set explicitly:
+
+[source,js]
+--------------------------------------------------
+{
+ "my_attachment" : {
+ "_content_type" : "application/pdf",
+ "_name" : "resource/name/of/my.pdf",
+ "content" : "... base64 encoded attachment ..."
+ }
+}
+--------------------------------------------------
+
+The `attachment` type not only indexes the content of the doc, but also
+automatically adds meta data on the attachment as well (when available).
+The metadata supported are: `date`, `title`, `author`, and `keywords`.
+They can be queried using the "dot notation", for example:
+`my_attachment.author`.
+
+Both the meta data and the actual content are simple core type mappers
+(string, date, ...), thus, they can be controlled in the mappings. For
+example:
+
+[source,js]
+--------------------------------------------------
+{
+ "person" : {
+ "properties" : {
+ "file" : {
+ "type" : "attachment",
+ "fields" : {
+ "file" : {"index" : "no"},
+ "date" : {"store" : true},
+ "author" : {"analyzer" : "myAnalyzer"}
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+In the above example, the actual content indexed is mapped under
+`fields` name `file`, and we decide not to index it, so it will only be
+available in the `_all` field. The other fields map to their respective
+metadata names, but there is no need to specify the `type` (like
+`string` or `date`) since it is already known.
+
+The plugin uses http://lucene.apache.org/tika/[Apache Tika] to parse
+attachments, so many formats are supported, listed
+http://lucene.apache.org/tika/0.10/formats.html[here].
diff --git a/docs/reference/mapping/types/core-types.asciidoc b/docs/reference/mapping/types/core-types.asciidoc
new file mode 100644
index 0000000..90ec792
--- /dev/null
+++ b/docs/reference/mapping/types/core-types.asciidoc
@@ -0,0 +1,754 @@
+[[mapping-core-types]]
+=== Core Types
+
+Each JSON field can be mapped to a specific core type. JSON itself
+already provides us with some typing, with its support for `string`,
+`integer`/`long`, `float`/`double`, `boolean`, and `null`.
+
+The following sample tweet JSON document will be used to explain the
+core types:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" {
+ "user" : "kimchy"
+ "message" : "This is a tweet!",
+ "postDate" : "2009-11-15T14:12:12",
+ "priority" : 4,
+ "rank" : 12.3
+ }
+}
+--------------------------------------------------
+
+Explicit mapping for the above JSON tweet can be:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "user" : {"type" : "string", "index" : "not_analyzed"},
+ "message" : {"type" : "string", "null_value" : "na"},
+ "postDate" : {"type" : "date"},
+ "priority" : {"type" : "integer"},
+ "rank" : {"type" : "float"}
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+[[string]]
+==== String
+
+The text based string type is the most basic type, and contains one or
+more characters. An example mapping can be:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "message" : {
+ "type" : "string",
+ "store" : true,
+ "index" : "analyzed",
+ "null_value" : "na"
+ },
+ "user" : {
+ "type" : "string",
+ "index" : "not_analyzed",
+ "norms" : {
+ "enabled" : false
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The above mapping defines a `string` `message` property/field within the
+`tweet` type. The field is stored in the index (so it can later be
+retrieved using selective loading when searching), and it gets analyzed
+(broken down into searchable terms). If the message has a `null` value,
+then the value that will be stored is `na`. There is also a `string` `user`
+which is indexed as-is (not broken down into tokens) and has norms
+disabled (so that matching this field is a binary decision, no match is
+better than another one).
+
+The following table lists all the attributes that can be used with the
+`string` type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Attribute |Description
+|`index_name` |The name of the field that will be stored in the index.
+Defaults to the property/field name.
+
+|`store` |Set to `true` to store actual field in the index, `false` to not
+store it. Defaults to `false` (note, the JSON document itself is stored,
+and it can be retrieved from it).
+
+|`index` |Set to `analyzed` for the field to be indexed and searchable
+after being broken down into token using an analyzer. `not_analyzed`
+means that its still searchable, but does not go through any analysis
+process or broken down into tokens. `no` means that it won't be
+searchable at all (as an individual field; it may still be included in
+`_all`). Setting to `no` disables `include_in_all`. Defaults to
+`analyzed`.
+
+|`doc_values` |Set to `true` to store field values in a column-stride fashion.
+Automatically set to `true` when the fielddata format is `doc_values`.
+
+|`term_vector` |Possible values are `no`, `yes`, `with_offsets`,
+`with_positions`, `with_positions_offsets`. Defaults to `no`.
+
+|`boost` |The boost value. Defaults to `1.0`.
+
+|`null_value` |When there is a (JSON) null value for the field, use the
+`null_value` as the field value. Defaults to not adding the field at
+all.
+
+|`norms.enabled` |Boolean value if norms should be enabled or not. Defaults
+to `true` for `analyzed` fields, and to `false` for `not_analyzed` fields.
+
+|`norms.loading` |Describes how norms should be loaded, possible values are
+`eager` and `lazy` (default). It is possible to change the default value to
+eager for all fields by configuring the index setting `index.norms.loading`
+to `eager`.
+
+|`index_options` | Allows to set the indexing
+options, possible values are `docs` (only doc numbers are indexed),
+`freqs` (doc numbers and term frequencies), and `positions` (doc
+numbers, term frequencies and positions). Defaults to `positions` for
+`analyzed` fields, and to `docs` for `not_analyzed` fields. It
+is also possible to set it to `offsets` (doc numbers, term
+frequencies, positions and offsets).
+
+|`analyzer` |The analyzer used to analyze the text contents when
+`analyzed` during indexing and when searching using a query string.
+Defaults to the globally configured analyzer.
+
+|`index_analyzer` |The analyzer used to analyze the text contents when
+`analyzed` during indexing.
+
+|`search_analyzer` |The analyzer used to analyze the field when part of
+a query string. Can be updated on an existing field.
+
+|`include_in_all` |Should the field be included in the `_all` field (if
+enabled). If `index` is set to `no` this defaults to `false`, otherwise,
+defaults to `true` or to the parent `object` type setting.
+
+|`ignore_above` |The analyzer will ignore strings larger than this size.
+Useful for generic `not_analyzed` fields that should ignore long text.
+
+|`position_offset_gap` |Position increment gap between field instances
+with the same field name. Defaults to 0.
+|=======================================================================
+
+The `string` type also support custom indexing parameters associated
+with the indexed value. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "message" : {
+ "_value": "boosted value",
+ "_boost": 2.0
+ }
+}
+--------------------------------------------------
+
+The mapping is required to disambiguate the meaning of the document.
+Otherwise, the structure would interpret "message" as a value of type
+"object". The key `_value` (or `value`) in the inner document specifies
+the real string content that should eventually be indexed. The `_boost`
+(or `boost`) key specifies the per field document boost (here 2.0).
+
+[float]
+[[number]]
+==== Number
+
+A number based type supporting `float`, `double`, `byte`, `short`,
+`integer`, and `long`. It uses specific constructs within Lucene in
+order to support numeric values. The number types have the same ranges
+as corresponding
+http://docs.oracle.com/javase/tutorial/java/nutsandbolts/datatypes.html[Java
+types]. An example mapping can be:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "rank" : {
+ "type" : "float",
+ "null_value" : 1.0
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The following table lists all the attributes that can be used with a
+numbered type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Attribute |Description
+|`type` |The type of the number. Can be `float`, `double`, `integer`,
+`long`, `short`, `byte`. Required.
+
+|`index_name` |The name of the field that will be stored in the index.
+Defaults to the property/field name.
+
+|`store` |Set to `true` to store actual field in the index, `false` to not
+store it. Defaults to `false` (note, the JSON document itself is stored,
+and it can be retrieved from it).
+
+|`index` |Set to `no` if the value should not be indexed. Setting to
+`no` disables `include_in_all`. If set to `no` the field can be stored
+in `_source`, have `include_in_all` enabled, or `store` should be set to
+`true` for this to be useful.
+
+|`doc_values` |Set to `true` to store field values in a column-stride fashion.
+Automatically set to `true` when the fielddata format is `doc_values`.
+
+|`precision_step` |The precision step (number of terms generated for
+each number value). Defaults to `4`.
+
+|`boost` |The boost value. Defaults to `1.0`.
+
+|`null_value` |When there is a (JSON) null value for the field, use the
+`null_value` as the field value. Defaults to not adding the field at
+all.
+
+|`include_in_all` |Should the field be included in the `_all` field (if
+enabled). If `index` is set to `no` this defaults to `false`, otherwise,
+defaults to `true` or to the parent `object` type setting.
+
+|`ignore_malformed` |Ignored a malformed number. Defaults to `false`.
+
+|`coerce` |Try convert strings to numbers and truncate fractions for integers. Defaults to `true`.
+
+|=======================================================================
+
+[float]
+[[token_count]]
+==== Token Count
+The `token_count` type maps to the JSON string type but indexes and stores
+the number of tokens in the string rather than the string itself. For
+example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "name" : {
+ "type" : "string",
+ "fields" : {
+ "word_count": {
+ "type" : "token_count",
+ "store" : "yes",
+ "analyzer" : "standard"
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+All the configuration that can be specified for a number can be specified
+for a token_count. The only extra configuration is the required
+`analyzer` field which specifies which analyzer to use to break the string
+into tokens. For best performance, use an analyzer with no token filters.
+
+[NOTE]
+===================================================================
+Technically the `token_count` type sums position increments rather than
+counting tokens. This means that even if the analyzer filters out stop
+words they are included in the count.
+===================================================================
+
+[float]
+[[date]]
+==== Date
+
+The date type is a special type which maps to JSON string type. It
+follows a specific format that can be explicitly set. All dates are
+`UTC`. Internally, a date maps to a number type `long`, with the added
+parsing stage from string to long and from long to string. An example
+mapping:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "postDate" : {
+ "type" : "date",
+ "format" : "YYYY-MM-dd"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The date type will also accept a long number representing UTC
+milliseconds since the epoch, regardless of the format it can handle.
+
+The following table lists all the attributes that can be used with a
+date type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Attribute |Description
+|`index_name` |The name of the field that will be stored in the index.
+Defaults to the property/field name.
+
+|`format` |The <<mapping-date-format,date
+format>>. Defaults to `dateOptionalTime`.
+
+|`store` |Set to `true` to store actual field in the index, `false` to not
+store it. Defaults to `false` (note, the JSON document itself is stored,
+and it can be retrieved from it).
+
+|`index` |Set to `no` if the value should not be indexed. Setting to
+`no` disables `include_in_all`. If set to `no` the field can be stored
+in `_source`, have `include_in_all` enabled, or `store` should be set to
+`true` for this to be useful.
+
+|`doc_values` |Set to `true` to store field values in a column-stride fashion.
+Automatically set to `true` when the fielddata format is `doc_values`.
+
+|`precision_step` |The precision step (number of terms generated for
+each number value). Defaults to `4`.
+
+|`boost` |The boost value. Defaults to `1.0`.
+
+|`null_value` |When there is a (JSON) null value for the field, use the
+`null_value` as the field value. Defaults to not adding the field at
+all.
+
+|`include_in_all` |Should the field be included in the `_all` field (if
+enabled). If `index` is set to `no` this defaults to `false`, otherwise,
+defaults to `true` or to the parent `object` type setting.
+
+|`ignore_malformed` |Ignored a malformed number. Defaults to `false`.
+
+|=======================================================================
+
+[float]
+[[boolean]]
+==== Boolean
+
+The boolean type Maps to the JSON boolean type. It ends up storing
+within the index either `T` or `F`, with automatic translation to `true`
+and `false` respectively.
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "hes_my_special_tweet" : {
+ "type" : "boolean",
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The boolean type also supports passing the value as a number (in this
+case `0` is `false`, all other values are `true`).
+
+The following table lists all the attributes that can be used with the
+boolean type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Attribute |Description
+|`index_name` |The name of the field that will be stored in the index.
+Defaults to the property/field name.
+
+|`store` |Set to `true` to store actual field in the index, `false` to not
+store it. Defaults to `false` (note, the JSON document itself is stored,
+and it can be retrieved from it).
+
+|`index` |Set to `no` if the value should not be indexed. Setting to
+`no` disables `include_in_all`. If set to `no` the field can be stored
+in `_source`, have `include_in_all` enabled, or `store` should be set to
+`true` for this to be useful.
+
+|`boost` |The boost value. Defaults to `1.0`.
+
+|`null_value` |When there is a (JSON) null value for the field, use the
+`null_value` as the field value. Defaults to not adding the field at
+all.
+
+|`include_in_all` |Should the field be included in the `_all` field (if
+enabled). If `index` is set to `no` this defaults to `false`, otherwise,
+defaults to `true` or to the parent `object` type setting.
+|=======================================================================
+
+[float]
+[[binary]]
+==== Binary
+
+The binary type is a base64 representation of binary data that can be
+stored in the index. The field is not stored by default and not indexed at
+all.
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "image" : {
+ "type" : "binary",
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The following table lists all the attributes that can be used with the
+binary type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Attribute |Description
+|`index_name` |The name of the field that will be stored in the index.
+Defaults to the property/field name.
+|`store` |Set to `true` to store actual field in the index, `false` to not
+store it. Defaults to `false` (note, the JSON document itself is stored,
+and it can be retrieved from it).
+|=======================================================================
+
+[float]
+[[fielddata-filters]]
+==== Fielddata filters
+
+It is possible to control which field values are loaded into memory,
+which is particularly useful for faceting on string fields, using
+fielddata filters, which are explained in detail in the
+<<index-modules-fielddata,Fielddata>> section.
+
+Fielddata filters can exclude terms which do not match a regex, or which
+don't fall between a `min` and `max` frequency range:
+
+[source,js]
+--------------------------------------------------
+{
+ tweet: {
+ type: "string",
+ analyzer: "whitespace"
+ fielddata: {
+ filter: {
+ regex: {
+ "pattern": "^#.*"
+ },
+ frequency: {
+ min: 0.001,
+ max: 0.1,
+ min_segment_size: 500
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+These filters can be updated on an existing field mapping and will take
+effect the next time the fielddata for a segment is loaded. Use the
+<<indices-clearcache,Clear Cache>> API
+to reload the fielddata using the new filters.
+
+[float]
+[[postings]]
+==== Postings format
+
+Posting formats define how fields are written into the index and how
+fields are represented into memory. Posting formats can be defined per
+field via the `postings_format` option. Postings format are configurable.
+Elasticsearch has several builtin formats:
+
+`direct`::
+ A postings format that uses disk-based storage but loads
+ its terms and postings directly into memory. Note this postings format
+ is very memory intensive and has certain limitation that don't allow
+ segments to grow beyond 2.1GB see \{@link DirectPostingsFormat} for
+ details.
+
+`memory`::
+ A postings format that stores its entire terms, postings,
+ positions and payloads in a finite state transducer. This format should
+ only be used for primary keys or with fields where each term is
+ contained in a very low number of documents.
+
+`pulsing`::
+ A postings format in-lines the posting lists for very low
+ frequent terms in the term dictionary. This is useful to improve lookup
+ performance for low-frequent terms.
+
+`bloom_default`::
+ A postings format that uses a bloom filter to
+ improve term lookup performance. This is useful for primarily keys or
+ fields that are used as a delete key.
+
+`bloom_pulsing`::
+ A postings format that combines the advantages of
+ *bloom* and *pulsing* to further improve lookup performance.
+
+`default`::
+ The default Elasticsearch postings format offering best
+ general purpose performance. This format is used if no postings format
+ is specified in the field mapping.
+
+[float]
+===== Postings format example
+
+On all field types it possible to configure a `postings_format`
+attribute:
+
+[source,js]
+--------------------------------------------------
+{
+ "person" : {
+ "properties" : {
+ "second_person_id" : {"type" : "string", "postings_format" : "pulsing"}
+ }
+ }
+}
+--------------------------------------------------
+
+On top of using the built-in posting formats it is possible define
+custom postings format. See
+<<index-modules-codec,codec module>> for more
+information.
+
+[float]
+==== Doc values format
+
+Doc values formats define how fields are written into column-stride storage in
+the index for the purpose of sorting or faceting. Fields that have doc values
+enabled will have special field data instances, which will not be uninverted
+from the inverted index, but directly read from disk. This makes _refresh faster
+and ultimately allows for having field data stored on disk depending on the
+configured doc values format.
+
+Doc values formats are configurable. Elasticsearch has several builtin formats:
+
+`memory`::
+ A doc values format which stores data in memory. Compared to the default
+ field data implementations, using doc values with this format will have
+ similar performance but will be faster to load, making '_refresh' less
+ time-consuming.
+
+`disk`::
+ A doc values format which stores all data on disk, requiring almost no
+ memory from the JVM at the cost of a slight performance degradation.
+
+`default`::
+ The default Elasticsearch doc values format, offering good performance
+ with low memory usage. This format is used if no format is specified in
+ the field mapping.
+
+[float]
+===== Doc values format example
+
+On all field types, it is possible to configure a `doc_values_format` attribute:
+
+[source,js]
+--------------------------------------------------
+{
+ "product" : {
+ "properties" : {
+ "price" : {"type" : "integer", "doc_values_format" : "memory"}
+ }
+ }
+}
+--------------------------------------------------
+
+On top of using the built-in doc values formats it is possible to define
+custom doc values formats. See
+<<index-modules-codec,codec module>> for more information.
+
+[float]
+==== Similarity
+
+Elasticsearch allows you to configure a similarity (scoring algorithm) per field.
+Allowing users a simpler extension beyond the usual TF/IDF algorithm. As
+part of this, new algorithms have been added including BM25. Also as
+part of the changes, it is now possible to define a Similarity per
+field, giving even greater control over scoring.
+
+You can configure similarities via the
+<<index-modules-similarity,similarity module>>
+
+[float]
+===== Configuring Similarity per Field
+
+Defining the Similarity for a field is done via the `similarity` mapping
+property, as this example shows:
+
+[source,js]
+--------------------------------------------------
+{
+ "book" : {
+ "properties" : {
+ "title" : { "type" : "string", "similarity" : "BM25" }
+ }
+}
+--------------------------------------------------
+
+The following Similarities are configured out-of-box:
+
+`default`::
+ The Default TF/IDF algorithm used by Elasticsearch and
+ Lucene in previous versions.
+
+`BM25`::
+ The BM25 algorithm.
+ http://en.wikipedia.org/wiki/Okapi_BM25[See Okapi_BM25] for more
+ details.
+
+
+[[copy-to]]
+[float]
+===== Copy to field
+
+added[1.0.0.RC2]
+
+Adding `copy_to` parameter to any field mapping will cause all values of this field to be copied to fields specified in
+the parameter. In the following example all values from fields `title` and `abstract` will be copied to the field
+`meta_data`.
+
+
+[source,js]
+--------------------------------------------------
+{
+ "book" : {
+ "properties" : {
+ "title" : { "type" : "string", "copy_to" : "meta_data" },
+ "abstract" : { "type" : "string", "copy_to" : "meta_data" },
+ "meta_data" : { "type" : "string" },
+ }
+}
+--------------------------------------------------
+
+Multiple fields are also supported:
+
+[source,js]
+--------------------------------------------------
+{
+ "book" : {
+ "properties" : {
+ "title" : { "type" : "string", "copy_to" : ["meta_data", "article_info"] },
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Multi fields
+
+added[1.0.0.RC1]
+
+The `fields` options allows to map several core types fields into a single
+json source field. This can be useful if a single field need to be
+used in different ways. For example a single field is to be used for both
+free text search and sorting.
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "name" : {
+ "type" : "string",
+ "index" : "analyzed",
+ "fields" : {
+ "raw" : {"type" : "string", "index" : "not_analyzed"}
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+In the above example the field `name` gets processed twice. The first time it gets
+processed as an analyzed string and this version is accessible under the field name
+`name`, this is the main field and is in fact just like any other field. The second time
+it gets processed as a not analyzed string and is accessible under the name `name.raw`.
+
+[float]
+==== Include in All
+
+The `include_in_all` setting is ignored on any field that is defined in
+the `fields` options. Setting the `include_in_all` only makes sense on
+the main field, since the raw field value to copied to the `_all` field,
+the tokens aren't copied.
+
+[float]
+==== Updating a field
+
+In the essence a field can't be updated. However multi fields can be
+added to existing fields. This allows for example to have a different
+`index_analyzer` configuration in addition to the already configured
+`index_analyzer` configuration specified in the main and other multi fields.
+
+Also the new multi field will only be applied on document that have been
+added after the multi field has been added and in fact the new multi field
+doesn't exist in existing documents.
+
+Another important note is that new multi fields will be merged into the
+list of existing multi fields, so when adding new multi fields for a field
+previous added multi fields don't need to be specified.
+
+[float]
+==== Accessing Fields
+
+deprecated[1.0.0,Use <<copy-to,`copy_to`>> instead]
+
+The multi fields defined in the `fields` are prefixed with the
+name of the main field and can be accessed by their full path using the
+navigation notation: `name.raw`, or using the typed navigation notation
+`tweet.name.raw`. The `path` option allows to control how fields are accessed.
+If the `path` option is set to `full`, then the full path of the main field
+is prefixed, but if the `path` option is set to `just_name` the actual
+multi field name without any prefix is used. The default value for
+the `path` option is `full`.
+
+The `just_name` setting, among other things, allows indexing content of multiple
+fields under the same name. In the example below the content of both fields
+`first_name` and `last_name` can be accessed by using `any_name` or `tweet.any_name`.
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties": {
+ "first_name": {
+ "type": "string",
+ "index": "analyzed",
+ "path": "just_name",
+ "fields": {
+ "any_name": {"type": "string","index": "analyzed"}
+ }
+ },
+ "last_name": {
+ "type": "string",
+ "index": "analyzed",
+ "path": "just_name",
+ "fields": {
+ "any_name": {"type": "string","index": "analyzed"}
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
diff --git a/docs/reference/mapping/types/geo-point-type.asciidoc b/docs/reference/mapping/types/geo-point-type.asciidoc
new file mode 100644
index 0000000..19b38e5
--- /dev/null
+++ b/docs/reference/mapping/types/geo-point-type.asciidoc
@@ -0,0 +1,207 @@
+[[mapping-geo-point-type]]
+=== Geo Point Type
+
+Mapper type called `geo_point` to support geo based points. The
+declaration looks as follows:
+
+[source,js]
+--------------------------------------------------
+{
+ "pin" : {
+ "properties" : {
+ "location" : {
+ "type" : "geo_point"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Indexed Fields
+
+The `geo_point` mapping will index a single field with the format of
+`lat,lon`. The `lat_lon` option can be set to also index the `.lat` and
+`.lon` as numeric fields, and `geohash` can be set to `true` to also
+index `.geohash` value.
+
+A good practice is to enable indexing `lat_lon` as well, since both the
+geo distance and bounding box filters can either be executed using in
+memory checks, or using the indexed lat lon values, and it really
+depends on the data set which one performs better. Note though, that
+indexed lat lon only make sense when there is a single geo point value
+for the field, and not multi values.
+
+[float]
+==== Geohashes
+
+Geohashes are a form of lat/lon encoding which divides the earth up into
+a grid. Each cell in this grid is represented by a geohash string. Each
+cell in turn can be further subdivided into smaller cells which are
+represented by a longer string. So the longer the geohash, the smaller
+(and thus more accurate) the cell is.
+
+Because geohashes are just strings, they can be stored in an inverted
+index like any other string, which makes querying them very efficient.
+
+If you enable the `geohash` option, a `geohash` ``sub-field'' will be
+indexed as, eg `pin.geohash`. The length of the geohash is controlled by
+the `geohash_precision` parameter, which can either be set to an absolute
+length (eg `12`, the default) or to a distance (eg `1km`).
+
+More usefully, set the `geohash_prefix` option to `true` to not only index
+the geohash value, but all the enclosing cells as well. For instance, a
+geohash of `u30` will be indexed as `[u,u3,u30]`. This option can be used
+by the <<query-dsl-geohash-cell-filter>> to find geopoints within a
+particular cell very efficiently.
+
+[float]
+==== Input Structure
+
+The above mapping defines a `geo_point`, which accepts different
+formats. The following formats are supported:
+
+[float]
+===== Lat Lon as Properties
+
+[source,js]
+--------------------------------------------------
+{
+ "pin" : {
+ "location" : {
+ "lat" : 41.12,
+ "lon" : -71.34
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Lat Lon as String
+
+Format in `lat,lon`.
+
+[source,js]
+--------------------------------------------------
+{
+ "pin" : {
+ "location" : "41.12,-71.34"
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Geohash
+
+[source,js]
+--------------------------------------------------
+{
+ "pin" : {
+ "location" : "drm3btev3e86"
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Lat Lon as Array
+
+Format in `[lon, lat]`, note, the order of lon/lat here in order to
+conform with http://geojson.org/[GeoJSON].
+
+[source,js]
+--------------------------------------------------
+{
+ "pin" : {
+ "location" : [-71.34, 41.12]
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Mapping Options
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Option |Description
+|`lat_lon` |Set to `true` to also index the `.lat` and `.lon` as fields.
+Defaults to `false`.
+
+|`geohash` |Set to `true` to also index the `.geohash` as a field.
+Defaults to `false`.
+
+|`geohash_precision` |Sets the geohash precision. It can be set to an
+absolute geohash length or a distance value (eg 1km, 1m, 1ml) defining
+the size of the smallest cell. Defaults to an absolute length of 12.
+
+|`geohash_prefix` |If this option is set to `true`, not only the geohash
+but also all its parent cells (true prefixes) will be indexed as well. The
+number of terms that will be indexed depends on the `geohash_precision`.
+Defaults to `false`. *Note*: This option implicitly enables `geohash`.
+
+|`validate` |Set to `true` to reject geo points with invalid latitude or
+longitude (default is `false`) *Note*: Validation only works when
+normalization has been disabled.
+
+|`validate_lat` |Set to `true` to reject geo points with an invalid
+latitude
+
+|`validate_lon` |Set to `true` to reject geo points with an invalid
+longitude
+
+|`normalize` |Set to `true` to normalize latitude and longitude (default
+is `true`)
+
+|`normalize_lat` |Set to `true` to normalize latitude
+
+|`normalize_lon` |Set to `true` to normalize longitude
+|=======================================================================
+
+[float]
+==== Field data
+
+By default, geo points use the `array` format which loads geo points into two
+parallel double arrays, making sure there is no precision loss. However, this
+can require a non-negligible amount of memory (16 bytes per document) which is
+why Elasticsearch also provides a field data implementation with lossy
+compression called `compressed`:
+
+[source,js]
+--------------------------------------------------
+{
+ "pin" : {
+ "properties" : {
+ "location" : {
+ "type" : "geo_point",
+ "fielddata" : {
+ "format" : "compressed",
+ "precision" : "1cm"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+This field data format comes with a `precision` option which allows to
+configure how much precision can be traded for memory. The default value is
+`1cm`. The following table presents values of the memory savings given various
+precisions:
+
+|=============================================
+| Precision | Bytes per point | Size reduction
+| 1km | 4 | 75%
+| 3m | 6 | 62.5%
+| 1cm | 8 | 50%
+| 1mm | 10 | 37.5%
+|=============================================
+
+Precision can be changed on a live index by using the update mapping API.
+
+[float]
+==== Usage in Scripts
+
+When using `doc[geo_field_name]` (in the above mapping,
+`doc['location']`), the `doc[...].value` returns a `GeoPoint`, which
+then allows access to `lat` and `lon` (for example,
+`doc[...].value.lat`). For performance, it is better to access the `lat`
+and `lon` directly using `doc[...].lat` and `doc[...].lon`.
diff --git a/docs/reference/mapping/types/geo-shape-type.asciidoc b/docs/reference/mapping/types/geo-shape-type.asciidoc
new file mode 100644
index 0000000..600900a
--- /dev/null
+++ b/docs/reference/mapping/types/geo-shape-type.asciidoc
@@ -0,0 +1,232 @@
+[[mapping-geo-shape-type]]
+=== Geo Shape Type
+
+The `geo_shape` mapping type facilitates the indexing of and searching
+with arbitrary geo shapes such as rectangles and polygons. It should be
+used when either the data being indexed or the queries being executed
+contain shapes other than just points.
+
+You can query documents using this type using
+<<query-dsl-geo-shape-filter,geo_shape Filter>>
+or <<query-dsl-geo-shape-query,geo_shape
+Query>>.
+
+Note, the `geo_shape` type uses
+https://github.com/spatial4j/spatial4j[Spatial4J] and
+http://www.vividsolutions.com/jts/jtshome.htm[JTS], both of which are
+optional dependencies. Consequently you must add Spatial4J v0.3 and JTS
+v1.12 to your classpath in order to use this type.
+
+[float]
+==== Mapping Options
+
+The geo_shape mapping maps geo_json geometry objects to the geo_shape
+type. To enable it, users must explicitly map fields to the geo_shape
+type.
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Option |Description
+
+|`tree` |Name of the PrefixTree implementation to be used: `geohash` for
+GeohashPrefixTree and `quadtree` for QuadPrefixTree. Defaults to
+`geohash`.
+
+|`precision` |This parameter may be used instead of `tree_levels` to set
+an appropriate value for the `tree_levels` parameter. The value
+specifies the desired precision and Elasticsearch will calculate the
+best tree_levels value to honor this precision. The value should be a
+number followed by an optional distance unit. Valid distance units
+include: `in`, `inch`, `yd`, `yard`, `mi`, `miles`, `km`, `kilometers`,
+`m`,`meters` (default), `cm`,`centimeters`, `mm`, `millimeters`.
+
+|`tree_levels` |Maximum number of layers to be used by the PrefixTree.
+This can be used to control the precision of shape representations and
+therefore how many terms are indexed. Defaults to the default value of
+the chosen PrefixTree implementation. Since this parameter requires a
+certain level of understanding of the underlying implementation, users
+may use the `precision` parameter instead. However, Elasticsearch only
+uses the tree_levels parameter internally and this is what is returned
+via the mapping API even if you use the precision parameter.
+
+|`distance_error_pct` |Used as a hint to the PrefixTree about how
+precise it should be. Defaults to 0.025 (2.5%) with 0.5 as the maximum
+supported value.
+|=======================================================================
+
+[float]
+==== Prefix trees
+
+To efficiently represent shapes in the index, Shapes are converted into
+a series of hashes representing grid squares using implementations of a
+PrefixTree. The tree notion comes from the fact that the PrefixTree uses
+multiple grid layers, each with an increasing level of precision to
+represent the Earth.
+
+Multiple PrefixTree implementations are provided:
+
+* GeohashPrefixTree - Uses
+http://en.wikipedia.org/wiki/Geohash[geohashes] for grid squares.
+Geohashes are base32 encoded strings of the bits of the latitude and
+longitude interleaved. So the longer the hash, the more precise it is.
+Each character added to the geohash represents another tree level and
+adds 5 bits of precision to the geohash. A geohash represents a
+rectangular area and has 32 sub rectangles. The maximum amount of levels
+in Elasticsearch is 24.
+* QuadPrefixTree - Uses a
+http://en.wikipedia.org/wiki/Quadtree[quadtree] for grid squares.
+Similar to geohash, quad trees interleave the bits of the latitude and
+longitude the resulting hash is a bit set. A tree level in a quad tree
+represents 2 bits in this bit set, one for each coordinate. The maximum
+amount of levels for the quad trees in Elasticsearch is 50.
+
+[float]
+===== Accuracy
+
+Geo_shape does not provide 100% accuracy and depending on how it is
+configured it may return some false positives or false negatives for
+certain queries. To mitigate this, it is important to select an
+appropriate value for the tree_levels parameter and to adjust
+expectations accordingly. For example, a point may be near the border of
+a particular grid cell. And may not match a query that only matches the
+cell right next to it even though the shape is very close to the point.
+
+[float]
+===== Example
+
+[source,js]
+--------------------------------------------------
+{
+ "properties": {
+ "location": {
+ "type": "geo_shape",
+ "tree": "quadtree",
+ "precision": "1m"
+ }
+ }
+}
+--------------------------------------------------
+
+This mapping maps the location field to the geo_shape type using the
+quad_tree implementation and a precision of 1m. Elasticsearch translates
+this into a tree_levels setting of 26.
+
+[float]
+===== Performance considerations
+
+Elasticsearch uses the paths in the prefix tree as terms in the index
+and in queries. The higher the levels is (and thus the precision), the
+more terms are generated. Both calculating the terms, keeping them in
+memory, and storing them has a price of course. Especially with higher
+tree levels, indices can become extremely large even with a modest
+amount of data. Additionally, the size of the features also matters.
+Big, complex polygons can take up a lot of space at higher tree levels.
+Which setting is right depends on the use case. Generally one trades off
+accuracy against index size and query performance.
+
+The defaults in Elasticsearch for both implementations are a compromise
+between index size and a reasonable level of precision of 50m at the
+equator. This allows for indexing tens of millions of shapes without
+overly bloating the resulting index too much relative to the input size.
+
+[float]
+==== Input Structure
+
+The http://www.geojson.org[GeoJSON] format is used to represent Shapes
+as input as follows:
+
+[source,js]
+--------------------------------------------------
+{
+ "location" : {
+ "type" : "point",
+ "coordinates" : [45.0, -45.0]
+ }
+}
+--------------------------------------------------
+
+Note, both the `type` and `coordinates` fields are required.
+
+The supported `types` are `point`, `linestring`, `polygon`, `multipoint`
+and `multipolygon`.
+
+Note, in geojson the correct order is longitude, latitude coordinate
+arrays. This differs from some APIs such as e.g. Google Maps that
+generally use latitude, longitude.
+
+[float]
+===== Envelope
+
+Elasticsearch supports an `envelope` type which consists of coordinates
+for upper left and lower right points of the shape:
+
+[source,js]
+--------------------------------------------------
+{
+ "location" : {
+ "type" : "envelope",
+ "coordinates" : [[-45.0, 45.0], [45.0, -45.0]]
+ }
+}
+--------------------------------------------------
+
+[float]
+===== http://www.geojson.org/geojson-spec.html#id4[Polygon]
+
+A polygon is defined by a list of a list of points. The first and last
+points in each list must be the same (the polygon must be closed).
+
+[source,js]
+--------------------------------------------------
+{
+ "location" : {
+ "type" : "polygon",
+ "coordinates" : [
+ [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]
+ ]
+ }
+}
+--------------------------------------------------
+
+The first array represents the outer boundary of the polygon, the other
+arrays represent the interior shapes ("holes"):
+
+[source,js]
+--------------------------------------------------
+{
+ "location" : {
+ "type" : "polygon",
+ "coordinates" : [
+ [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ],
+ [ [100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2] ]
+ ]
+ }
+}
+--------------------------------------------------
+
+[float]
+===== http://www.geojson.org/geojson-spec.html#id7[MultiPolygon]
+
+A list of geojson polygons.
+
+[source,js]
+--------------------------------------------------
+{
+ "location" : {
+ "type" : "multipolygon",
+ "coordinates" : [
+ [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
+ [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
+ [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]
+ ]
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Sorting and Retrieving index Shapes
+
+Due to the complex input structure and index representation of shapes,
+it is not currently possible to sort shapes or retrieve their fields
+directly. The geo_shape value is only retrievable through the `_source`
+field.
diff --git a/docs/reference/mapping/types/ip-type.asciidoc b/docs/reference/mapping/types/ip-type.asciidoc
new file mode 100644
index 0000000..51f3c5a
--- /dev/null
+++ b/docs/reference/mapping/types/ip-type.asciidoc
@@ -0,0 +1,36 @@
+[[mapping-ip-type]]
+=== IP Type
+
+An `ip` mapping type allows to store _ipv4_ addresses in a numeric form
+allowing to easily sort, and range query it (using ip values).
+
+The following table lists all the attributes that can be used with an ip
+type:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Attribute |Description
+|`index_name` |The name of the field that will be stored in the index.
+Defaults to the property/field name.
+
+|`store` |Set to `true` to store actual field in the index, `false` to not
+store it. Defaults to `false` (note, the JSON document itself is stored,
+and it can be retrieved from it).
+
+|`index` |Set to `no` if the value should not be indexed. In this case,
+`store` should be set to `true`, since if it's not indexed and not
+stored, there is nothing to do with it.
+
+|`precision_step` |The precision step (number of terms generated for
+each number value). Defaults to `4`.
+
+|`boost` |The boost value. Defaults to `1.0`.
+
+|`null_value` |When there is a (JSON) null value for the field, use the
+`null_value` as the field value. Defaults to not adding the field at
+all.
+
+|`include_in_all` |Should the field be included in the `_all` field (if
+enabled). Defaults to `true` or to the parent `object` type setting.
+|=======================================================================
+
diff --git a/docs/reference/mapping/types/nested-type.asciidoc b/docs/reference/mapping/types/nested-type.asciidoc
new file mode 100644
index 0000000..17d8a13
--- /dev/null
+++ b/docs/reference/mapping/types/nested-type.asciidoc
@@ -0,0 +1,81 @@
+[[mapping-nested-type]]
+=== Nested Type
+
+Nested objects/documents allow to map certain sections in the document
+indexed as nested allowing to query them as if they are separate docs
+joining with the parent owning doc.
+
+One of the problems when indexing inner objects that occur several times
+in a doc is that "cross object" search match will occur, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "obj1" : [
+ {
+ "name" : "blue",
+ "count" : 4
+ },
+ {
+ "name" : "green",
+ "count" : 6
+ }
+ ]
+}
+--------------------------------------------------
+
+Searching for name set to blue and count higher than 5 will match the
+doc, because in the first element the name matches blue, and in the
+second element, count matches "higher than 5".
+
+Nested mapping allows mapping certain inner objects (usually multi
+instance ones), for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "type1" : {
+ "properties" : {
+ "obj1" : {
+ "type" : "nested"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The above will cause all `obj1` to be indexed as a nested doc. The
+mapping is similar in nature to setting `type` to `object`, except that
+it's `nested`.
+
+Note: changing an object type to nested type requires reindexing.
+
+The `nested` object fields can also be automatically added to the
+immediate parent by setting `include_in_parent` to true, and also
+included in the root object by setting `include_in_root` to true.
+
+Nested docs will also automatically use the root doc `_all` field.
+
+Searching on nested docs can be done using either the
+<<query-dsl-nested-query,nested query>> or
+<<query-dsl-nested-filter,nested filter>>.
+
+[float]
+==== Internal Implementation
+
+Internally, nested objects are indexed as additional documents, but,
+since they can be guaranteed to be indexed within the same "block", it
+allows for extremely fast joining with parent docs.
+
+Those internal nested documents are automatically masked away when doing
+operations against the index (like searching with a match_all query),
+and they bubble out when using the nested query.
+
+Because nested docs are always masked to the parent doc, the nested docs
+can never be accessed outside the scope of the `nested` query. For example
+stored fields can be enabled on fields inside nested objects, but there is
+no way of retrieving them, since stored fields are fetched outside of
+the `nested` query scope.
+
+The `_source` field is always associated with the parent document and
+because of that field values via the source can be fetched for nested object.
diff --git a/docs/reference/mapping/types/object-type.asciidoc b/docs/reference/mapping/types/object-type.asciidoc
new file mode 100644
index 0000000..ce28239
--- /dev/null
+++ b/docs/reference/mapping/types/object-type.asciidoc
@@ -0,0 +1,244 @@
+[[mapping-object-type]]
+=== Object Type
+
+JSON documents are hierarchical in nature, allowing them to define inner
+"objects" within the actual JSON. Elasticsearch completely understands
+the nature of these inner objects and can map them easily, providing
+query support for their inner fields. Because each document can have
+objects with different fields each time, objects mapped this way are
+known as "dynamic". Dynamic mapping is enabled by default. Let's take
+the following JSON as an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "person" : {
+ "name" : {
+ "first_name" : "Shay",
+ "last_name" : "Banon"
+ },
+ "sid" : "12345"
+ },
+ "message" : "This is a tweet!"
+ }
+}
+--------------------------------------------------
+
+The above shows an example where a tweet includes the actual `person`
+details. A `person` is an object, with a `sid`, and a `name` object
+which has `first_name` and `last_name`. It's important to note that
+`tweet` is also an object, although it is a special
+<<mapping-root-object-type,root object type>>
+which allows for additional mapping definitions.
+
+The following is an example of explicit mapping for the above JSON:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "person" : {
+ "type" : "object",
+ "properties" : {
+ "name" : {
+ "properties" : {
+ "first_name" : {"type" : "string"},
+ "last_name" : {"type" : "string"}
+ }
+ },
+ "sid" : {"type" : "string", "index" : "not_analyzed"}
+ }
+ },
+ "message" : {"type" : "string"}
+ }
+ }
+}
+--------------------------------------------------
+
+In order to mark a mapping of type `object`, set the `type` to object.
+This is an optional step, since if there are `properties` defined for
+it, it will automatically be identified as an `object` mapping.
+
+[float]
+==== properties
+
+An object mapping can optionally define one or more properties using the
+`properties` tag for a field. Each property can be either another
+`object`, or one of the
+<<mapping-core-types,core_types>>.
+
+[float]
+==== dynamic
+
+One of the most important features of Elasticsearch is its ability to be
+schema-less. This means that, in our example above, the `person` object
+can be indexed later with a new property -- `age`, for example -- and it
+will automatically be added to the mapping definitions. Same goes for
+the `tweet` root object.
+
+This feature is by default turned on, and it's the `dynamic` nature of
+each object mapped. Each object mapped is automatically dynamic, though
+it can be explicitly turned off:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "person" : {
+ "type" : "object",
+ "properties" : {
+ "name" : {
+ "dynamic" : false,
+ "properties" : {
+ "first_name" : {"type" : "string"},
+ "last_name" : {"type" : "string"}
+ }
+ },
+ "sid" : {"type" : "string", "index" : "not_analyzed"}
+ }
+ },
+ "message" : {"type" : "string"}
+ }
+ }
+}
+--------------------------------------------------
+
+In the above example, the `name` object mapped is not dynamic, meaning
+that if, in the future, we try to index JSON with a `middle_name` within
+the `name` object, it will get discarded and not added.
+
+There is no performance overhead if an `object` is dynamic, the ability
+to turn it off is provided as a safety mechanism so "malformed" objects
+won't, by mistake, index data that we do not wish to be indexed.
+
+If a dynamic object contains yet another inner `object`, it will be
+automatically added to the index and mapped as well.
+
+When processing dynamic new fields, their type is automatically derived.
+For example, if it is a `number`, it will automatically be treated as
+number <<mapping-core-types,core_type>>. Dynamic
+fields default to their default attributes, for example, they are not
+stored and they are always indexed.
+
+Date fields are special since they are represented as a `string`. Date
+fields are detected if they can be parsed as a date when they are first
+introduced into the system. The set of date formats that are tested
+against can be configured using the `dynamic_date_formats` on the root object,
+which is explained later.
+
+Note, once a field has been added, *its type can not change*. For
+example, if we added age and its value is a number, then it can't be
+treated as a string.
+
+The `dynamic` parameter can also be set to `strict`, meaning that not
+only new fields will not be introduced into the mapping, parsing
+(indexing) docs with such new fields will fail.
+
+[float]
+==== enabled
+
+The `enabled` flag allows to disable parsing and indexing a named object
+completely. This is handy when a portion of the JSON document contains
+arbitrary JSON which should not be indexed, nor added to the mapping.
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "properties" : {
+ "person" : {
+ "type" : "object",
+ "properties" : {
+ "name" : {
+ "type" : "object",
+ "enabled" : false
+ },
+ "sid" : {"type" : "string", "index" : "not_analyzed"}
+ }
+ },
+ "message" : {"type" : "string"}
+ }
+ }
+}
+--------------------------------------------------
+
+In the above, `name` and its content will not be indexed at all.
+
+
+[float]
+==== include_in_all
+
+`include_in_all` can be set on the `object` type level. When set, it
+propagates down to all the inner mapping defined within the `object`
+that do no explicitly set it.
+
+[float]
+==== path
+
+deprecated[1.0.0,Use <<copy-to,`copy_to`>> instead]
+
+In the <<mapping-core-types,core_types>>
+section, a field can have a `index_name` associated with it in order to
+control the name of the field that will be stored within the index. When
+that field exists within an object(s) that are not the root object, the
+name of the field of the index can either include the full "path" to the
+field with its `index_name`, or just the `index_name`. For example
+(under mapping of _type_ `person`, removed the tweet type for clarity):
+
+[source,js]
+--------------------------------------------------
+{
+ "person" : {
+ "properties" : {
+ "name1" : {
+ "type" : "object",
+ "path" : "just_name",
+ "properties" : {
+ "first1" : {"type" : "string"},
+ "last1" : {"type" : "string", "index_name" : "i_last_1"}
+ }
+ },
+ "name2" : {
+ "type" : "object",
+ "path" : "full",
+ "properties" : {
+ "first2" : {"type" : "string"},
+ "last2" : {"type" : "string", "index_name" : "i_last_2"}
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+In the above example, the `name1` and `name2` objects within the
+`person` object have different combination of `path` and `index_name`.
+The document fields that will be stored in the index as a result of that
+are:
+
+[cols="<,<",options="header",]
+|=================================
+|JSON Name |Document Field Name
+|`name1`/`first1` |`first1`
+|`name1`/`last1` |`i_last_1`
+|`name2`/`first2` |`name2.first2`
+|`name2`/`last2` |`name2.i_last_2`
+|=================================
+
+Note, when querying or using a field name in any of the APIs provided
+(search, query, selective loading, ...), there is an automatic detection
+from logical full path and into the `index_name` and vice versa. For
+example, even though `name1`/`last1` defines that it is stored with
+`just_name` and a different `index_name`, it can either be referred to
+using `name1.last1` (logical name), or its actual indexed name of
+`i_last_1`.
+
+More over, where applicable, for example, in queries, the full path
+including the type can be used such as `person.name.last1`, in this
+case, both the actual indexed name will be resolved to match against the
+index, and an automatic query filter will be added to only match
+`person` types.
diff --git a/docs/reference/mapping/types/root-object-type.asciidoc b/docs/reference/mapping/types/root-object-type.asciidoc
new file mode 100644
index 0000000..ac368c4
--- /dev/null
+++ b/docs/reference/mapping/types/root-object-type.asciidoc
@@ -0,0 +1,224 @@
+[[mapping-root-object-type]]
+=== Root Object Type
+
+The root object mapping is an
+<<mapping-object-type,object type mapping>> that
+maps the root object (the type itself). On top of all the different
+mappings that can be set using the
+<<mapping-object-type,object type mapping>>, it
+allows for additional, type level mapping definitions.
+
+The root object mapping allows to index a JSON document that either
+starts with the actual mapping type, or only contains its fields. For
+example, the following `tweet` JSON can be indexed:
+
+[source,js]
+--------------------------------------------------
+{
+ "message" : "This is a tweet!"
+}
+--------------------------------------------------
+
+But, also the following JSON can be indexed:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "message" : "This is a tweet!"
+ }
+}
+--------------------------------------------------
+
+Out of the two, it is preferable to use the document *without* the type
+explicitly set.
+
+[float]
+==== Index / Search Analyzers
+
+The root object allows to define type mapping level analyzers for index
+and search that will be used with all different fields that do not
+explicitly set analyzers on their own. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "index_analyzer" : "standard",
+ "search_analyzer" : "standard"
+ }
+}
+--------------------------------------------------
+
+The above simply explicitly defines both the `index_analyzer` and
+`search_analyzer` that will be used. There is also an option to use the
+`analyzer` attribute to set both the `search_analyzer` and
+`index_analyzer`.
+
+[float]
+==== dynamic_date_formats
+
+`dynamic_date_formats` (old setting called `date_formats` still works)
+is the ability to set one or more date formats that will be used to
+detect `date` fields. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "dynamic_date_formats" : ["yyyy-MM-dd", "dd-MM-yyyy"],
+ "properties" : {
+ "message" : {"type" : "string"}
+ }
+ }
+}
+--------------------------------------------------
+
+In the above mapping, if a new JSON field of type string is detected,
+the date formats specified will be used in order to check if its a date.
+If it passes parsing, then the field will be declared with `date` type,
+and will use the matching format as its format attribute. The date
+format itself is explained
+<<mapping-date-format,here>>.
+
+The default formats are: `dateOptionalTime` (ISO) and
+`yyyy/MM/dd HH:mm:ss Z||yyyy/MM/dd Z`.
+
+*Note:* `dynamic_date_formats` are used *only* for dynamically added
+date fields, not for `date` fields that you specify in your mapping.
+
+[float]
+==== date_detection
+
+Allows to disable automatic date type detection (a new field introduced
+and matches the provided format), for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "date_detection" : false,
+ "properties" : {
+ "message" : {"type" : "string"}
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== numeric_detection
+
+Sometimes, even though json has support for native numeric types,
+numeric values are still provided as strings. In order to try and
+automatically detect numeric values from string, the `numeric_detection`
+can be set to `true`. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "tweet" : {
+ "numeric_detection" : true,
+ "properties" : {
+ "message" : {"type" : "string"}
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== dynamic_templates
+
+Dynamic templates allow to define mapping templates that will be applied
+when dynamic introduction of fields / objects happens.
+
+For example, we might want to have all fields to be stored by default,
+or all `string` fields to be stored, or have `string` fields to always
+be indexed with multi fields syntax, once analyzed and once not_analyzed.
+Here is a simple example:
+
+[source,js]
+--------------------------------------------------
+{
+ "person" : {
+ "dynamic_templates" : [
+ {
+ "template_1" : {
+ "match" : "multi*",
+ "mapping" : {
+ "type" : "{dynamic_type}",
+ "index" : "analyzed",
+ "fields" : {
+ "org" : {"type": "{dynamic_type}", "index" : "not_analyzed"}
+ }
+ }
+ }
+ },
+ {
+ "template_2" : {
+ "match" : "*",
+ "match_mapping_type" : "string",
+ "mapping" : {
+ "type" : "string",
+ "index" : "not_analyzed"
+ }
+ }
+ }
+ ]
+ }
+}
+--------------------------------------------------
+
+The above mapping will create a field with multi fields for all field
+names starting with multi, and will map all `string` types to be
+`not_analyzed`.
+
+Dynamic templates are named to allow for simple merge behavior. A new
+mapping, just with a new template can be "put" and that template will be
+added, or if it has the same name, the template will be replaced.
+
+The `match` allow to define matching on the field name. An `unmatch`
+option is also available to exclude fields if they do match on `match`.
+The `match_mapping_type` controls if this template will be applied only
+for dynamic fields of the specified type (as guessed by the json
+format).
+
+Another option is to use `path_match`, which allows to match the dynamic
+template against the "full" dot notation name of the field (for example
+`obj1.*.value` or `obj1.obj2.*`), with the respective `path_unmatch`.
+
+The format of all the matching is simple format, allowing to use * as a
+matching element supporting simple patterns such as xxx*, *xxx, xxx*yyy
+(with arbitrary number of pattern types), as well as direct equality.
+The `match_pattern` can be set to `regex` to allow for regular
+expression based matching.
+
+The `mapping` element provides the actual mapping definition. The
+`{name}` keyword can be used and will be replaced with the actual
+dynamic field name being introduced. The `{dynamic_type}` (or
+`{dynamicType}`) can be used and will be replaced with the mapping
+derived based on the field type (or the derived type, like `date`).
+
+Complete generic settings can also be applied, for example, to have all
+mappings be stored, just set:
+
+[source,js]
+--------------------------------------------------
+{
+ "person" : {
+ "dynamic_templates" : [
+ {
+ "store_generic" : {
+ "match" : "*",
+ "mapping" : {
+ "store" : true
+ }
+ }
+ }
+ ]
+ }
+}
+--------------------------------------------------
+
+Such generic templates should be placed at the end of the
+`dynamic_templates` list because when two or more dynamic templates
+match a field, only the first matching one from the list is used.
diff --git a/docs/reference/migration/migrate_1_0.asciidoc b/docs/reference/migration/migrate_1_0.asciidoc
new file mode 100644
index 0000000..7fefd00
--- /dev/null
+++ b/docs/reference/migration/migrate_1_0.asciidoc
@@ -0,0 +1,376 @@
+[[breaking-changes]]
+= Breaking changes in 1.0
+
+[partintro]
+--
+This section discusses the changes that you need to be aware of when migrating
+your application to Elasticsearch 1.0.
+--
+
+== System and settings
+
+* Elasticsearch now runs in the foreground by default. There is no more `-f`
+ flag on the command line. Instead, to run elasticsearch as a daemon, use
+ the `-d` flag:
+
+[source,sh]
+---------------
+./bin/elasticsearch -d
+---------------
+
+* Command line settings can now be passed without the `-Des.` prefix, for
+ instance:
+
+[source,sh]
+---------------
+./bin/elasticsearch --node.name=search_1 --cluster.name=production
+---------------
+
+* Elasticsearch on 64 bit Linux now uses <<mmapfs,`mmapfs`>> by default. Make
+ sure that you set <<setup-service,`MAX_MAP_COUNT`>> to a sufficiently high
+ number. The RPM and Debian packages default this value to `262144`.
+
+* The RPM and Debian packages no longer start Elasticsearch by default.
+
+* The `cluster.routing.allocation` settings (`disable_allocation`,
+ `disable_new_allocation` and `disable_replica_location`) have been
+ <<modules-cluster,replaced by the single setting>>:
++
+[source,yaml]
+---------------
+cluster.routing.allocation.enable: all|primaries|new_primaries|none
+---------------
+
+== Stats and Info APIs
+
+The <<cluster-state,`cluster_state`>>, <<cluster-nodes-info,`nodes_info`>>,
+<<cluster-nodes-stats,`nodes_stats`>> and <<indices-stats,`indices_stats`>>
+APIs have all been changed to make their format more RESTful and less clumsy.
+
+For instance, if you just want the `nodes` section of the the `cluster_state`,
+instead of:
+
+[source,sh]
+---------------
+GET /_cluster/state?filter_metadata&filter_routing_table&filter_blocks
+---------------
+
+you now use:
+
+[source,sh]
+---------------
+GET /_cluster/state/nodes
+---------------
+
+Simliarly for the `nodes_stats` API, if you want the `transport` and `http`
+metrics only, instead of:
+
+[source,sh]
+---------------
+GET /_nodes/stats?clear&transport&http
+---------------
+
+you now use:
+
+[source,sh]
+---------------
+GET /_nodes/stats/transport,http
+---------------
+
+See the links above for full details.
+
+
+== Indices APIs
+
+The `mapping`, `alias`, `settings`, and `warmer` index APIs are all similar
+but there are subtle differences in the order of the URL and the response
+body. For instance, adding a mapping and a warmer look slightly different:
+
+[source,sh]
+---------------
+PUT /{index}/{type}/_mapping
+PUT /{index}/_warmer/{name}
+---------------
+
+These URLs have been unified as:
+
+[source,sh]
+---------------
+PUT /{indices}/_mapping/{type}
+PUT /{indices}/_alias/{name}
+PUT /{indices}/_warmer/{name}
+
+GET /{indices}/_mapping/{types}
+GET /{indices}/_alias/{names}
+GET /{indices}/_settings/{names}
+GET /{indices}/_warmer/{names}
+
+DELETE /{indices}/_mapping/{types}
+DELETE /{indices}/_alias/{names}
+DELETE /{indices}/_warmer/{names}
+---------------
+
+All of the `{indices}`, `{types}` and `{names}` parameters can be replaced by:
+
+ * `_all`, `*` or blank (ie left out altogether), all of which mean ``all''
+ * wildcards like `test*`
+ * comma-separated lists: `index_1,test_*`
+
+The only exception is `DELETE` which doesn't accept blank (missing)
+parameters. If you want to delete something, you should be specific.
+
+Similarly, the return values for `GET` have been unified with the following
+rules:
+
+* Only return values that exist. If you try to `GET` a mapping which doesn't
+ exist, then the result will be an empty object: `{}`. We no longer throw a
+ `404` if the requested mapping/warmer/alias/setting doesn't exist.
+
+* The response format always has the index name, then the section, then the
+ element name, for instance:
++
+[source,json]
+---------------
+{
+ "my_index": {
+ "mappings": {
+ "my_type": {...}
+ }
+ }
+}
+---------------
++
+This is a breaking change for the `get_mapping` API.
+
+In the future we will also provide plural versions to allow putting multiple mappings etc in a single request.
+
+See <<indices-put-mapping,`put-mapping`>>, <<indices-get-mapping,`get-
+mapping`>>, <<indices-get-field-mapping,`get-field-mapping`>>,
+<<indices-delete-mapping,`delete-mapping`>>,
+<<indices-update-settings,`update-settings`>>, <<indices-get-settings,`get-settings`>>,
+<<indices-warmers,`warmers`>>, and <<indices-aliases,`aliases`>> for more details.
+
+== Index request
+
+Previously a document could be indexed as itself, or wrapped in an outer
+object which specified the `type` name:
+
+[source,json]
+---------------
+PUT /my_index/my_type/1
+{
+ "my_type": {
+ ... doc fields ...
+ }
+}
+---------------
+
+This led to some ambiguity when a document also included a field with the same
+name as the `type`. We no longer accept the outer `type` wrapper, but this
+behaviour can be reenabled on an index-by-index basis with the setting:
+`index.mapping.allow_type_wrapper`.
+
+== Search requests
+
+While the `search` API takes a top-level `query` parameter, the
+<<search-count,`count`>>, <<docs-delete-by-query,`delete-by-query`>> and
+<<search-validate,`validate-query`>> requests expected the whole body to be a
+query. These have been changed to all accept a top-level `query` parameter:
+
+[source,json]
+---------------
+GET /_count
+{
+ "query": {
+ "match": {
+ "title": "Interesting stuff"
+ }
+ }
+}
+---------------
+
+Also, the top-level `filter` parameter in search has been renamed to
+<<search-request-post-filter,`post_filter`>>, to indicate that it should not
+be used as the primary way to filter search results (use a
+<<query-dsl-filtered-query,`filtered` query>> instead), but only to filter
+results AFTER facets/aggregations have been calculated.
+
+This example counts the top colors in all matching docs, but only returns docs
+with color `red`:
+
+[source,json]
+---------------
+GET /_search
+{
+ "query": {
+ "match_all": {}
+ },
+ "aggs": {
+ "colors": {
+ "terms": { "field": "color" }
+ }
+ },
+ "post_filter": {
+ "term": {
+ "color": "red"
+ }
+ }
+}
+---------------
+
+== Multi-fields
+
+Multi-fields are dead! Long live multi-fields! Well, the field type
+`multi_field` has been removed. Instead, any of the core field types
+(excluding `object` and `nested`) now accept a `fields` parameter. It's the
+same thing, but nicer. Instead of:
+
+[source,json]
+---------------
+"title": {
+ "type": "multi_field",
+ "fields": {
+ "title": { "type": "string" },
+ "raw": { "type": "string", "index": "not_analyzed" }
+ }
+}
+---------------
+
+you can now write:
+
+[source,json]
+---------------
+"title": {
+ "type": "string",
+ "fields": {
+ "raw": { "type": "string", "index": "not_analyzed" }
+ }
+}
+---------------
+
+Existing multi-fields will be upgraded to the new format automatically.
+
+Also, instead of having to use the arcane `path` and `index_name` parameters
+in order to index multiple fields into a single ``custom +_all+ field'', you
+can now use the <<copy-to,`copy_to` parameter>>.
+
+== Stopwords
+
+Previously, the <<analysis-standard-analyzer,`standard`>> and
+<<analysis-pattern-analyzer,`pattern`>> analyzers used the list of English stopwords
+by default, which caused some hard to debug indexing issues. Now they are set to
+use the empty stopwords list (ie `_none_`) instead.
+
+== Dates without years
+
+When dates are specified without a year, for example: `Dec 15 10:00:00` they
+are treated as dates in 2000 during indexing and range searches... except for
+the upper included bound `lte` where they were treated as dates in 1970! Now,
+all https://github.com/elasticsearch/elasticsearch/issues/4451[dates without years]
+use `1970` as the default.
+
+== Parameters
+
+* Geo queries used to use `miles` as the default unit. And we
+ http://en.wikipedia.org/wiki/Mars_Climate_Orbiter[all know what
+ happened at NASA] because of that decision. The new default unit is
+ https://github.com/elasticsearch/elasticsearch/issues/4515[`meters`].
+
+* For all queries that support _fuzziness_, the `min_similarity`, `fuzziness`
+ and `edit_distance` parameters have been unified as the single parameter
+ `fuzziness`. See <<fuzziness>> for details of accepted values.
+
+* The `ignore_missing` parameter has been replaced by the `expand_wildcards`,
+ `ignore_unavailable` and `allow_no_indices` parameters, all of which have
+ sensible defaults. See <<multi-index,the multi-index docs>> for more.
+
+* An index name (or pattern) is now required for destructive operations like
+ deleting indices:
++
+[source,sh]
+---------------
+# v0.90 - delete all indices:
+DELETE /
+
+# v1.0 - delete all indices:
+DELETE /_all
+DELETE /*
+---------------
++
+Setting `action.destructive_requires_name` to `true` provides further safety
+by disabling wildcard expansion on destructive actions.
+
+== Return values
+
+* The `ok` return value has been removed from all response bodies as it added
+ no useful information.
+
+* The `found`, `not_found` and `exists` return values have been unified as
+ `found` on all relevant APIs.
+
+* Field values, in response to the <<search-request-fields,`fields`>>
+ parameter, are now always returned as arrays. A field could have single or
+ multiple values, which meant that sometimes they were returned as scalars
+ and sometimes as arrays. By always returning arrays, this simplifies user
+ code. The only exception to this rule is when `fields` is used to retrieve
+ metadata like the `routing` value, which are always singular. Metadata
+ fields are always returned as scalars.
++
+The `fields` parameter is intended to be used for retrieving stored fields,
+rather than for fields extracted from the `_source`. That means that it can no
+longer be used to return whole objects and it no longer accepts the
+`_source.fieldname` format. For these you should use the
+<<search-request-source-filtering,`_source`&#32; `_source_include` and `_source_exclude`>>
+parameters instead.
+
+* Settings, like `index.analysis.analyzer.default` are now returned as proper
+ nested JSON objects, which makes them easier to work with programatically:
++
+[source,json]
+---------------
+{
+ "index": {
+ "analysis": {
+ "analyzer": {
+ "default": xxx
+ }
+ }
+ }
+}
+---------------
++
+You can choose to return them in flattened format by passing `?flat_settings`
+in the query string.
+
+* The <<indices-analyze,`analyze`>> API no longer supports the text response
+ format, but does support JSON and YAML.
+
+== Deprecations
+
+* The `text` query has been removed. Use the
+ <<query-dsl-match-query,`match`>> query instead.
+
+* The `field` query has been removed. Use the
+ <<query-dsl-query-string-query,`query_string`>> query instead.
+
+* Per-document boosting with the <<mapping-boost-field,`_boost`>> field has
+ been removed. You can use the
+ <<function-score-instead-of-boost,`function_score`>> instead.
+
+* The `path` parameter in mappings has been deprecated. Use the
+ <<copy-to,`copy_to`>> parameter instead.
+
+* The `custom_score` and `custom_boost_score` is no longer supported. You can
+ use <<query-dsl-function-score-query,`function_score`>> instead.
+
+== Percolator
+
+The percolator has been redesigned and because of this the dedicated `_percolator` index is no longer used by the percolator,
+but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elasticsearch.org/blog/percolator-redesign-blog-post/[redesigned percolator]
+blog post for the reasons why the percolator has been redesigned.
+
+Elasticsearch will *not* delete the `_percolator` index when upgrading, only the percolate api will not use the queries
+stored in the `_percolator` index. In order to use the already stored queries, you can just re-index the queries from the
+`_percolator` index into any index under the reserved `.percolator` type. The format in which the percolate queries
+were stored has *not* been changed. So a simple script that does a scan search to retrieve all the percolator queries
+and then does a bulk request into another index should be sufficient.
diff --git a/docs/reference/modules.asciidoc b/docs/reference/modules.asciidoc
new file mode 100644
index 0000000..7915036
--- /dev/null
+++ b/docs/reference/modules.asciidoc
@@ -0,0 +1,37 @@
+[[modules]]
+= Modules
+
+include::modules/cluster.asciidoc[]
+
+include::modules/discovery.asciidoc[]
+
+include::modules/gateway.asciidoc[]
+
+include::modules/http.asciidoc[]
+
+include::modules/indices.asciidoc[]
+
+include::modules/memcached.asciidoc[]
+
+include::modules/network.asciidoc[]
+
+include::modules/node.asciidoc[]
+
+include::modules/tribe.asciidoc[]
+
+include::modules/plugins.asciidoc[]
+
+include::modules/scripting.asciidoc[]
+
+include::modules/advanced-scripting.asciidoc[]
+
+include::modules/threadpool.asciidoc[]
+
+include::modules/thrift.asciidoc[]
+
+include::modules/transport.asciidoc[]
+
+include::modules/snapshots.asciidoc[]
+
+
+
diff --git a/docs/reference/modules/advanced-scripting.asciidoc b/docs/reference/modules/advanced-scripting.asciidoc
new file mode 100644
index 0000000..d215661
--- /dev/null
+++ b/docs/reference/modules/advanced-scripting.asciidoc
@@ -0,0 +1,184 @@
+[[modules-advanced-scripting]]
+== Text scoring in scripts
+
+
+Text features, such as term or document frequency for a specific term can be accessed in scripts (see <<modules-scripting, scripting documentation>> ) with the `_index` variable. This can be useful if, for example, you want to implement your own scoring model using for example a script inside a <<query-dsl-function-score-query,function score query>>.
+Statistics over the document collection are computed *per shard*, not per
+index.
+
+[float]
+=== Nomenclature:
+
+
+[horizontal]
+`df`::
+
+ document frequency. The number of documents a term appears in. Computed
+ per field.
+
+
+`tf`::
+
+ term frequency. The number times a term appears in a field in one specific
+ document.
+
+`ttf`::
+
+ total term frequency. The number of times this term appears in all
+ documents, that is, the sum of `tf` over all documents. Computed per
+ field.
+
+`df` and `ttf` are computed per shard and therefore these numbers can vary
+depending on the shard the current document resides in.
+
+
+[float]
+=== Shard statistics:
+
+`_index.numDocs()`::
+
+ Number of documents in shard.
+
+`_index.maxDoc()`::
+
+ Maximal document number in shard.
+
+`_index.numDeletedDocs()`::
+
+ Number of deleted documents in shard.
+
+
+[float]
+=== Field statistics:
+
+Field statistics can be accessed with a subscript operator like this:
+`_index['FIELD']`.
+
+
+`_index['FIELD'].docCount()`::
+
+ Number of documents containing the field `FIELD`. Does not take deleted documents into account.
+
+`_index['FIELD'].sumttf()`::
+
+ Sum of `ttf` over all terms that appear in field `FIELD` in all documents.
+
+`_index['FIELD'].sumdf()`::
+
+ The sum of `df` s over all terms that appear in field `FIELD` in all
+ documents.
+
+
+Field statistics are computed per shard and therfore these numbers can vary
+depending on the shard the current document resides in.
+The number of terms in a field cannot be accessed using the `_index` variable. See <<mapping-core-types, word count mapping type>> on how to do that.
+
+[float]
+=== Term statistics:
+
+Term statistics for a field can be accessed with a subscript operator like
+this: `_index['FIELD']['TERM']`. This will never return null, even if term or field does not exist.
+If you do not need the term frequency, call `_index['FIELD'].get('TERM', 0)`
+to avoid uneccesary initialization of the frequencies. The flag will have only
+affect is your set the `index_options` to `docs` (see <<mapping-core-types, mapping documentation>>).
+
+
+`_index['FIELD']['TERM'].df()`::
+
+ `df` of term `TERM` in field `FIELD`. Will be returned, even if the term
+ is not present in the current document.
+
+`_index['FIELD']['TERM'].ttf()`::
+
+ The sum of term frequencys of term `TERM` in field `FIELD` over all
+ documents. Will be returned, even if the term is not present in the
+ current document.
+
+`_index['FIELD']['TERM'].tf()`::
+
+ `tf` of term `TERM` in field `FIELD`. Will be 0 if the term is not present
+ in the current document.
+
+
+[float]
+=== Term positions, offsets and payloads:
+
+If you need information on the positions of terms in a field, call
+`_index['FIELD'].get('TERM', flag)` where flag can be
+
+[horizontal]
+`_POSITIONS`:: if you need the positions of the term
+`_OFFSETS`:: if you need the offests of the term
+`_PAYLOADS`:: if you need the payloads of the term
+`_CACHE`:: if you need to iterate over all positions several times
+
+The iterator uses the underlying lucene classes to iterate over positions. For efficiency reasons, you can only iterate over positions once. If you need to iterate over the positions several times, set the `_CACHE` flag.
+
+You can combine the operators with a `|` if you need more than one info. For
+example, the following will return an object holding the positions and payloads,
+as well as all statistics:
+
+
+ `_index['FIELD'].get('TERM', _POSITIONS | _PAYLOADS)`
+
+
+Positions can be accessed with an iterator that returns an object
+(`POS_OBJECT`) holding position, offsets and payload for each term position.
+
+`POS_OBJECT.position`::
+
+ The position of the term.
+
+`POS_OBJECT.startOffset`::
+
+ The start offset of the term.
+
+`POS_OBJECT.endOffset`::
+
+ The end offset of the term.
+
+`POS_OBJECT.payload`::
+
+ The payload of the term.
+
+`POS_OBJECT.payloadAsInt(missingValue)`::
+
+ The payload of the term converted to integer. If the current position has
+ no payload, the `missingValue` will be returned. Call this only if you
+ know that your payloads are integers.
+
+`POS_OBJECT.payloadAsFloat(missingValue)`::
+
+ The payload of the term converted to float. If the current position has no
+ payload, the `missingValue` will be returned. Call this only if you know
+ that your payloads are floats.
+
+`POS_OBJECT.payloadAsString()`::
+
+ The payload of the term converted to string. If the current position has
+ no payload, `null` will be returned. Call this only if you know that your
+ payloads are strings.
+
+
+Example: sums up all payloads for the term `foo`.
+
+[source,mvel]
+---------------------------------------------------------
+termInfo = _index['my_field'].get('foo',_PAYLOADS);
+score = 0;
+for (pos : termInfo) {
+ score = score + pos.payloadAsInt(0);
+}
+return score;
+---------------------------------------------------------
+
+
+[float]
+=== Term vectors:
+
+The `_index` variable can only be used to gather statistics for single terms. If you want to use information on all terms in a field, you must store the term vectors (set `term_vector` in the mapping as described in the <<mapping-core-types,mapping documentation>>). To access them, call
+`_index.getTermVectors()` to get a
+https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html[Fields]
+instance. This object can then be used as described in https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html[lucene doc] to iterate over fields and then for each field iterate over each term in the field.
+The method will return null if the term vectors were not stored.
+
diff --git a/docs/reference/modules/cluster.asciidoc b/docs/reference/modules/cluster.asciidoc
new file mode 100644
index 0000000..e61fbf6
--- /dev/null
+++ b/docs/reference/modules/cluster.asciidoc
@@ -0,0 +1,239 @@
+[[modules-cluster]]
+== Cluster
+
+[float]
+[[shards-allocation]]
+=== Shards Allocation
+
+Shards allocation is the process of allocating shards to nodes. This can
+happen during initial recovery, replica allocation, rebalancing, or
+handling nodes being added or removed.
+
+The following settings may be used:
+
+`cluster.routing.allocation.allow_rebalance`::
+ Allow to control when rebalancing will happen based on the total
+ state of all the indices shards in the cluster. `always`,
+ `indices_primaries_active`, and `indices_all_active` are allowed,
+ defaulting to `indices_all_active` to reduce chatter during
+ initial recovery.
+
+
+`cluster.routing.allocation.cluster_concurrent_rebalance`::
+ Allow to control how many concurrent rebalancing of shards are
+ allowed cluster wide, and default it to `2`.
+
+
+`cluster.routing.allocation.node_initial_primaries_recoveries`::
+ Allow to control specifically the number of initial recoveries
+ of primaries that are allowed per node. Since most times local
+ gateway is used, those should be fast and we can handle more of
+ those per node without creating load.
+
+
+`cluster.routing.allocation.node_concurrent_recoveries`::
+ How many concurrent recoveries are allowed to happen on a node.
+ Defaults to `2`.
+
+`cluster.routing.allocation.enable`::
+ Controls shard allocation for all indices, by allowing specific
+ kinds of shard to be allocated.
+ added[1.0.0.RC1,Replaces `cluster.routing.allocation.disable*`]
+ Can be set to:
+ * `all` (default) - Allows shard allocation for all kinds of shards.
+ * `primaries` - Allows shard allocation only for primary shards.
+ * `new_primaries` - Allows shard allocation only for primary shards for new indices.
+ * `none` - No shard allocations of any kind are allowed for all indices.
+
+`cluster.routing.allocation.disable_new_allocation`::
+ deprecated[1.0.0.RC1,Replaced by `cluster.routing.allocation.enable`]
+
+`cluster.routing.allocation.disable_allocation`::
+ deprecated[1.0.0.RC1,Replaced by `cluster.routing.allocation.enable`]
+
+
+`cluster.routing.allocation.disable_replica_allocation`::
+ deprecated[1.0.0.RC1,Replaced by `cluster.routing.allocation.enable`]
+
+`cluster.routing.allocation.same_shard.host`::
+ Allows to perform a check to prevent allocation of multiple instances
+ of the same shard on a single host, based on host name and host address.
+ Defaults to `false`, meaning that no check is performed by default. This
+ setting only applies if multiple nodes are started on the same machine.
+
+`indices.recovery.concurrent_streams`::
+ The number of streams to open (on a *node* level) to recover a
+ shard from a peer shard. Defaults to `3`.
+
+[float]
+[[allocation-awareness]]
+=== Shard Allocation Awareness
+
+Cluster allocation awareness allows to configure shard and replicas
+allocation across generic attributes associated the nodes. Lets explain
+it through an example:
+
+Assume we have several racks. When we start a node, we can configure an
+attribute called `rack_id` (any attribute name works), for example, here
+is a sample config:
+
+----------------------
+node.rack_id: rack_one
+----------------------
+
+The above sets an attribute called `rack_id` for the relevant node with
+a value of `rack_one`. Now, we need to configure the `rack_id` attribute
+as one of the awareness allocation attributes (set it on *all* (master
+eligible) nodes config):
+
+--------------------------------------------------------
+cluster.routing.allocation.awareness.attributes: rack_id
+--------------------------------------------------------
+
+The above will mean that the `rack_id` attribute will be used to do
+awareness based allocation of shard and its replicas. For example, lets
+say we start 2 nodes with `node.rack_id` set to `rack_one`, and deploy a
+single index with 5 shards and 1 replica. The index will be fully
+deployed on the current nodes (5 shards and 1 replica each, total of 10
+shards).
+
+Now, if we start two more nodes, with `node.rack_id` set to `rack_two`,
+shards will relocate to even the number of shards across the nodes, but,
+a shard and its replica will not be allocated in the same `rack_id`
+value.
+
+The awareness attributes can hold several values, for example:
+
+-------------------------------------------------------------
+cluster.routing.allocation.awareness.attributes: rack_id,zone
+-------------------------------------------------------------
+
+*NOTE*: When using awareness attributes, shards will not be allocated to
+nodes that don't have values set for those attributes.
+
+[float]
+[[forced-awareness]]
+=== Forced Awareness
+
+Sometimes, we know in advance the number of values an awareness
+attribute can have, and more over, we would like never to have more
+replicas then needed allocated on a specific group of nodes with the
+same awareness attribute value. For that, we can force awareness on
+specific attributes.
+
+For example, lets say we have an awareness attribute called `zone`, and
+we know we are going to have two zones, `zone1` and `zone2`. Here is how
+we can force awareness one a node:
+
+[source,js]
+-------------------------------------------------------------------
+cluster.routing.allocation.awareness.force.zone.values: zone1,zone2
+cluster.routing.allocation.awareness.attributes: zone
+-------------------------------------------------------------------
+
+Now, lets say we start 2 nodes with `node.zone` set to `zone1` and
+create an index with 5 shards and 1 replica. The index will be created,
+but only 5 shards will be allocated (with no replicas). Only when we
+start more shards with `node.zone` set to `zone2` will the replicas be
+allocated.
+
+[float]
+==== Automatic Preference When Searching / GETing
+
+When executing a search, or doing a get, the node receiving the request
+will prefer to execute the request on shards that exists on nodes that
+have the same attribute values as the executing node.
+
+[float]
+==== Realtime Settings Update
+
+The settings can be updated using the <<cluster-update-settings,cluster update settings API>> on a live cluster.
+
+[float]
+[[allocation-filtering]]
+=== Shard Allocation Filtering
+
+Allow to control allocation if indices on nodes based on include/exclude
+filters. The filters can be set both on the index level and on the
+cluster level. Lets start with an example of setting it on the cluster
+level:
+
+Lets say we have 4 nodes, each has specific attribute called `tag`
+associated with it (the name of the attribute can be any name). Each
+node has a specific value associated with `tag`. Node 1 has a setting
+`node.tag: value1`, Node 2 a setting of `node.tag: value2`, and so on.
+
+We can create an index that will only deploy on nodes that have `tag`
+set to `value1` and `value2` by setting
+`index.routing.allocation.include.tag` to `value1,value2`. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/_settings -d '{
+ "index.routing.allocation.include.tag" : "value1,value2"
+}'
+--------------------------------------------------
+
+On the other hand, we can create an index that will be deployed on all
+nodes except for nodes with a `tag` of value `value3` by setting
+`index.routing.allocation.exclude.tag` to `value3`. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/_settings -d '{
+ "index.routing.allocation.exclude.tag" : "value3"
+}'
+--------------------------------------------------
+
+`index.routing.allocation.require.*` can be used to
+specify a number of rules, all of which MUST match in order for a shard
+to be allocated to a node. This is in contrast to `include` which will
+include a node if ANY rule matches.
+
+The `include`, `exclude` and `require` values can have generic simple
+matching wildcards, for example, `value1*`. A special attribute name
+called `_ip` can be used to match on node ip values. In addition `_host`
+attribute can be used to match on either the node's hostname or its ip
+address. Similarly `_name` and `_id` attributes can be used to match on
+node name and node id accordingly.
+
+Obviously a node can have several attributes associated with it, and
+both the attribute name and value are controlled in the setting. For
+example, here is a sample of several node configurations:
+
+[source,js]
+--------------------------------------------------
+node.group1: group1_value1
+node.group2: group2_value4
+--------------------------------------------------
+
+In the same manner, `include`, `exclude` and `require` can work against
+several attributes, for example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/test/_settings -d '{
+ "index.routing.allocation.include.group1" : "xxx"
+ "index.routing.allocation.include.group2" : "yyy",
+ "index.routing.allocation.exclude.group3" : "zzz",
+ "index.routing.allocation.require.group4" : "aaa"
+}'
+--------------------------------------------------
+
+The provided settings can also be updated in real time using the update
+settings API, allowing to "move" indices (shards) around in realtime.
+
+Cluster wide filtering can also be defined, and be updated in real time
+using the cluster update settings API. This setting can come in handy
+for things like decommissioning nodes (even if the replica count is set
+to 0). Here is a sample of how to decommission a node based on `_ip`
+address:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/_cluster/settings -d '{
+ "transient" : {
+ "cluster.routing.allocation.exclude._ip" : "10.0.0.1"
+ }
+}'
+--------------------------------------------------
diff --git a/docs/reference/modules/discovery.asciidoc b/docs/reference/modules/discovery.asciidoc
new file mode 100644
index 0000000..292748d
--- /dev/null
+++ b/docs/reference/modules/discovery.asciidoc
@@ -0,0 +1,30 @@
+[[modules-discovery]]
+== Discovery
+
+The discovery module is responsible for discovering nodes within a
+cluster, as well as electing a master node.
+
+Note, Elasticsearch is a peer to peer based system, nodes communicate
+with one another directly if operations are delegated / broadcast. All
+the main APIs (index, delete, search) do not communicate with the master
+node. The responsibility of the master node is to maintain the global
+cluster state, and act if nodes join or leave the cluster by reassigning
+shards. Each time a cluster state is changed, the state is made known to
+the other nodes in the cluster (the manner depends on the actual
+discovery implementation).
+
+[float]
+=== Settings
+
+The `cluster.name` allows to create separated clusters from one another.
+The default value for the cluster name is `elasticsearch`, though it is
+recommended to change this to reflect the logical group name of the
+cluster running.
+
+include::discovery/azure.asciidoc[]
+
+include::discovery/ec2.asciidoc[]
+
+include::discovery/gce.asciidoc[]
+
+include::discovery/zen.asciidoc[]
diff --git a/docs/reference/modules/discovery/azure.asciidoc b/docs/reference/modules/discovery/azure.asciidoc
new file mode 100644
index 0000000..bb6fdc8
--- /dev/null
+++ b/docs/reference/modules/discovery/azure.asciidoc
@@ -0,0 +1,6 @@
+[[modules-discovery-azure]]
+=== Azure Discovery
+
+Azure discovery allows to use the Azure APIs to perform automatic discovery (similar to multicast).
+Please check the https://github.com/elasticsearch/elasticsearch-cloud-azure[plugin website]
+to find the full documentation.
diff --git a/docs/reference/modules/discovery/ec2.asciidoc b/docs/reference/modules/discovery/ec2.asciidoc
new file mode 100644
index 0000000..9d0fa3f
--- /dev/null
+++ b/docs/reference/modules/discovery/ec2.asciidoc
@@ -0,0 +1,6 @@
+[[modules-discovery-ec2]]
+=== EC2 Discovery
+
+EC2 discovery allows to use the EC2 APIs to perform automatic discovery (similar to multicast).
+Please check the https://github.com/elasticsearch/elasticsearch-cloud-aws[plugin website]
+to find the full documentation.
diff --git a/docs/reference/modules/discovery/gce.asciidoc b/docs/reference/modules/discovery/gce.asciidoc
new file mode 100644
index 0000000..bb9c89f
--- /dev/null
+++ b/docs/reference/modules/discovery/gce.asciidoc
@@ -0,0 +1,6 @@
+[[modules-discovery-gce]]
+=== Google Compute Engine Discovery
+
+Google Compute Engine (GCE) discovery allows to use the GCE APIs to perform automatic discovery (similar to multicast).
+Please check the https://github.com/elasticsearch/elasticsearch-cloud-gce[plugin website]
+to find the full documentation.
diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc
new file mode 100644
index 0000000..64281d9
--- /dev/null
+++ b/docs/reference/modules/discovery/zen.asciidoc
@@ -0,0 +1,161 @@
+[[modules-discovery-zen]]
+=== Zen Discovery
+
+The zen discovery is the built in discovery module for elasticsearch and
+the default. It provides both multicast and unicast discovery as well
+being easily extended to support cloud environments.
+
+The zen discovery is integrated with other modules, for example, all
+communication between nodes is done using the
+<<modules-transport,transport>> module.
+
+It is separated into several sub modules, which are explained below:
+
+[float]
+[[ping]]
+==== Ping
+
+This is the process where a node uses the discovery mechanisms to find
+other nodes. There is support for both multicast and unicast based
+discovery (can be used in conjunction as well).
+
+[float]
+[[multicast]]
+===== Multicast
+
+Multicast ping discovery of other nodes is done by sending one or more
+multicast requests where existing nodes that exists will receive and
+respond to. It provides the following settings with the
+`discovery.zen.ping.multicast` prefix:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`group` |The group address to use. Defaults to `224.2.2.4`.
+
+|`port` |The port to use. Defaults to `54328`.
+
+|`ttl` |The ttl of the multicast message. Defaults to `3`.
+
+|`address` |The address to bind to, defaults to `null` which means it
+will bind to all available network interfaces.
+
+|`enabled` |Whether multicast ping discovery is enabled. Defaults to `true`.
+|=======================================================================
+
+[float]
+[[unicast]]
+===== Unicast
+
+The unicast discovery allows to perform the discovery when multicast is
+not enabled. It basically requires a list of hosts to use that will act
+as gossip routers. It provides the following settings with the
+`discovery.zen.ping.unicast` prefix:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`hosts` |Either an array setting or a comma delimited setting. Each
+value is either in the form of `host:port`, or in the form of
+`host[port1-port2]`.
+|=======================================================================
+
+The unicast discovery uses the
+<<modules-transport,transport>> module to
+perform the discovery.
+
+[float]
+[[master-election]]
+==== Master Election
+
+As part of the initial ping process a master of the cluster is either
+elected or joined to. This is done automatically. The
+`discovery.zen.ping_timeout` (which defaults to `3s`) allows to
+configure the election to handle cases of slow or congested networks
+(higher values assure less chance of failure).
+
+Nodes can be excluded from becoming a master by setting `node.master` to
+`false`. Note, once a node is a client node (`node.client` set to
+`true`), it will not be allowed to become a master (`node.master` is
+automatically set to `false`).
+
+The `discovery.zen.minimum_master_nodes` allows to control the minimum
+number of master eligible nodes a node should "see" in order to operate
+within the cluster. Its recommended to set it to a higher value than 1
+when running more than 2 nodes in the cluster.
+
+[float]
+[[fault-detection]]
+==== Fault Detection
+
+There are two fault detection processes running. The first is by the
+master, to ping all the other nodes in the cluster and verify that they
+are alive. And on the other end, each node pings to master to verify if
+its still alive or an election process needs to be initiated.
+
+The following settings control the fault detection process using the
+`discovery.zen.fd` prefix:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`ping_interval` |How often a node gets pinged. Defaults to `1s`.
+
+|`ping_timeout` |How long to wait for a ping response, defaults to
+`30s`.
+
+|`ping_retries` |How many ping failures / timeouts cause a node to be
+considered failed. Defaults to `3`.
+|=======================================================================
+
+[float]
+==== External Multicast
+
+The multicast discovery also supports external multicast requests to
+discover nodes. The external client can send a request to the multicast
+IP/group and port, in the form of:
+
+[source,js]
+--------------------------------------------------
+{
+ "request" : {
+ "cluster_name": "test_cluster"
+ }
+}
+--------------------------------------------------
+
+And the response will be similar to node info response (with node level
+information only, including transport/http addresses, and node
+attributes):
+
+[source,js]
+--------------------------------------------------
+{
+ "response" : {
+ "cluster_name" : "test_cluster",
+ "transport_address" : "...",
+ "http_address" : "...",
+ "attributes" : {
+ "..."
+ }
+ }
+}
+--------------------------------------------------
+
+Note, it can still be enabled, with disabled internal multicast
+discovery, but still have external discovery working by keeping
+`discovery.zen.ping.multicast.enabled` set to `true` (the default), but,
+setting `discovery.zen.ping.multicast.ping.enabled` to `false`.
+
+[float]
+==== Cluster state updates
+
+The master node is the only node in a cluster that can make changes to the
+cluster state. The master node processes one cluster state update at a time,
+applies the required changes and publishes the updated cluster state to all
+the other nodes in the cluster. Each node receives the publish message,
+updates its own cluster state and replies to the master node, which waits for
+all nodes to respond, up to a timeout, before going ahead processing the next
+updates in the queue. The `discovery.zen.publish_timeout` is set by default
+to 30 seconds and can be changed dynamically through the
+<<cluster-update-settings,cluster update settings api>>.
diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc
new file mode 100644
index 0000000..539a57d
--- /dev/null
+++ b/docs/reference/modules/gateway.asciidoc
@@ -0,0 +1,75 @@
+[[modules-gateway]]
+== Gateway
+
+The gateway module allows one to store the state of the cluster meta
+data across full cluster restarts. The cluster meta data mainly holds
+all the indices created with their respective (index level) settings and
+explicit type mappings.
+
+Each time the cluster meta data changes (for example, when an index is
+added or deleted), those changes will be persisted using the gateway.
+When the cluster first starts up, the state will be read from the
+gateway and applied.
+
+The gateway set on the node level will automatically control the index
+gateway that will be used. For example, if the `fs` gateway is used,
+then automatically, each index created on the node will also use its own
+respective index level `fs` gateway. In this case, if an index should
+not persist its state, it should be explicitly set to `none` (which is
+the only other value it can be set to).
+
+The default gateway used is the
+<<modules-gateway-local,local>> gateway.
+
+[float]
+[[recover-after]]
+=== Recovery After Nodes / Time
+
+In many cases, the actual cluster meta data should only be recovered
+after specific nodes have started in the cluster, or a timeout has
+passed. This is handy when restarting the cluster, and each node local
+index storage still exists to be reused and not recovered from the
+gateway (which reduces the time it takes to recover from the gateway).
+
+The `gateway.recover_after_nodes` setting (which accepts a number)
+controls after how many data and master eligible nodes within the
+cluster recovery will start. The `gateway.recover_after_data_nodes` and
+`gateway.recover_after_master_nodes` setting work in a similar fashion,
+except they consider only the number of data nodes and only the number
+of master nodes respectively. The `gateway.recover_after_time` setting
+(which accepts a time value) sets the time to wait till recovery happens
+once all `gateway.recover_after...nodes` conditions are met.
+
+The `gateway.expected_nodes` allows to set how many data and master
+eligible nodes are expected to be in the cluster, and once met, the
+`recover_after_time` is ignored and recovery starts. The
+`gateway.expected_data_nodes` and `gateway.expected_master_nodes`
+settings are also supported. For example setting:
+
+[source,js]
+--------------------------------------------------
+gateway:
+ recover_after_nodes: 1
+ recover_after_time: 5m
+ expected_nodes: 2
+--------------------------------------------------
+
+In an expected 2 nodes cluster will cause recovery to start 5 minutes
+after the first node is up, but once there are 2 nodes in the cluster,
+recovery will begin immediately (without waiting).
+
+Note, once the meta data has been recovered from the gateway (which
+indices to create, mappings and so on), then this setting is no longer
+effective until the next full restart of the cluster.
+
+Operations are blocked while the cluster meta data has not been
+recovered in order not to mix with the actual cluster meta data that
+will be recovered once the settings has been reached.
+
+include::gateway/local.asciidoc[]
+
+include::gateway/fs.asciidoc[]
+
+include::gateway/hadoop.asciidoc[]
+
+include::gateway/s3.asciidoc[]
diff --git a/docs/reference/modules/gateway/fs.asciidoc b/docs/reference/modules/gateway/fs.asciidoc
new file mode 100644
index 0000000..8d765d3
--- /dev/null
+++ b/docs/reference/modules/gateway/fs.asciidoc
@@ -0,0 +1,39 @@
+[[modules-gateway-fs]]
+=== Shared FS Gateway
+
+*The shared FS gateway is deprecated and will be removed in a future
+version. Please use the
+<<modules-gateway-local,local gateway>>
+instead.*
+
+The file system based gateway stores the cluster meta data and indices
+in a *shared* file system. Note, since it is a distributed system, the
+file system should be shared between all different nodes. Here is an
+example config to enable it:
+
+[source,js]
+--------------------------------------------------
+gateway:
+ type: fs
+--------------------------------------------------
+
+[float]
+==== location
+
+The location where the gateway stores the cluster state can be set using
+the `gateway.fs.location` setting. By default, it will be stored under
+the `work` directory. Note, the `work` directory is considered a
+temporal directory with Elasticsearch (meaning it is safe to `rm -rf`
+it), the default location of the persistent gateway in work intentional,
+*it should be changed*.
+
+When explicitly specifying the `gateway.fs.location`, each node will
+append its `cluster.name` to the provided location. It means that the
+location provided can safely support several clusters.
+
+[float]
+==== concurrent_streams
+
+The `gateway.fs.concurrent_streams` allow to throttle the number of
+streams (per node) opened against the shared gateway performing the
+snapshot operation. It defaults to `5`.
diff --git a/docs/reference/modules/gateway/hadoop.asciidoc b/docs/reference/modules/gateway/hadoop.asciidoc
new file mode 100644
index 0000000..b55a4be
--- /dev/null
+++ b/docs/reference/modules/gateway/hadoop.asciidoc
@@ -0,0 +1,36 @@
+[[modules-gateway-hadoop]]
+=== Hadoop Gateway
+
+*The hadoop gateway is deprecated and will be removed in a future
+version. Please use the
+<<modules-gateway-local,local gateway>>
+instead.*
+
+The hadoop (HDFS) based gateway stores the cluster meta and indices data
+in hadoop. Hadoop support is provided as a plugin and installing is
+explained https://github.com/elasticsearch/elasticsearch-hadoop[here] or
+downloading the hadoop plugin and placing it under the `plugins`
+directory. Here is an example config to enable it:
+
+[source,js]
+--------------------------------------------------
+gateway:
+ type: hdfs
+ hdfs:
+ uri: hdfs://myhost:8022
+--------------------------------------------------
+
+[float]
+==== Settings
+
+The hadoop gateway requires two simple settings. The `gateway.hdfs.uri`
+controls the URI to connect to the hadoop cluster, for example:
+`hdfs://myhost:8022`. The `gateway.hdfs.path` controls the path under
+which the gateway will store the data.
+
+[float]
+==== concurrent_streams
+
+The `gateway.hdfs.concurrent_streams` allow to throttle the number of
+streams (per node) opened against the shared gateway performing the
+snapshot operation. It defaults to `5`.
diff --git a/docs/reference/modules/gateway/local.asciidoc b/docs/reference/modules/gateway/local.asciidoc
new file mode 100644
index 0000000..eb4c4f3
--- /dev/null
+++ b/docs/reference/modules/gateway/local.asciidoc
@@ -0,0 +1,57 @@
+[[modules-gateway-local]]
+=== Local Gateway
+
+The local gateway allows for recovery of the full cluster state and
+indices from the local storage of each node, and does not require a
+common node level shared storage.
+
+Note, different from shared gateway types, the persistency to the local
+gateway is *not* done in an async manner. Once an operation is
+performed, the data is there for the local gateway to recover it in case
+of full cluster failure.
+
+It is important to configure the `gateway.recover_after_nodes` setting
+to include most of the expected nodes to be started after a full cluster
+restart. This will insure that the latest cluster state is recovered.
+For example:
+
+[source,js]
+--------------------------------------------------
+gateway:
+ recover_after_nodes: 1
+ recover_after_time: 5m
+ expected_nodes: 2
+--------------------------------------------------
+
+[float]
+==== Dangling indices
+
+When a node joins the cluster, any shards/indices stored in its local `data/`
+directory which do not already exist in the cluster will be imported into the
+cluster by default. This functionality has two purposes:
+
+1. If a new master node is started which is unaware of the other indices in
+ the cluster, adding the old nodes will cause the old indices to be
+ imported, instead of being deleted.
+
+2. An old index can be added to an existing cluster by copying it to the
+ `data/` directory of a new node, starting the node and letting it join
+ the cluster. Once the index has been replicated to other nodes in the
+ cluster, the new node can be shut down and removed.
+
+The import of dangling indices can be controlled with the
+`gateway.local.auto_import_dangled` which accepts:
+
+[horizontal]
+`yes`::
+
+ Import dangling indices into the cluster (default).
+
+`close`::
+
+ Import dangling indices into the cluster state, but leave them closed.
+
+`no`::
+
+ Delete dangling indices after `gateway.local.dangling_timeout`, which
+ defaults to 2 hours.
diff --git a/docs/reference/modules/gateway/s3.asciidoc b/docs/reference/modules/gateway/s3.asciidoc
new file mode 100644
index 0000000..8f2f5d9
--- /dev/null
+++ b/docs/reference/modules/gateway/s3.asciidoc
@@ -0,0 +1,51 @@
+[[modules-gateway-s3]]
+=== S3 Gateway
+
+*The S3 gateway is deprecated and will be removed in a future version.
+Please use the <<modules-gateway-local,local
+gateway>> instead.*
+
+S3 based gateway allows to do long term reliable async persistency of
+the cluster state and indices directly to Amazon S3. Here is how it can
+be configured:
+
+[source,js]
+--------------------------------------------------
+cloud:
+ aws:
+ access_key: AKVAIQBF2RECL7FJWGJQ
+ secret_key: vExyMThREXeRMm/b/LRzEB8jWwvzQeXgjqMX+6br
+
+
+gateway:
+ type: s3
+ s3:
+ bucket: bucket_name
+--------------------------------------------------
+
+You’ll need to install the `cloud-aws` plugin, by running
+`bin/plugin install cloud-aws` before (re)starting elasticsearch.
+
+The following are a list of settings (prefixed with `gateway.s3`) that
+can further control the S3 gateway:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`chunk_size` |Big files are broken down into chunks (to overcome AWS 5g
+limit and use concurrent snapshotting). Default set to `100m`.
+|=======================================================================
+
+[float]
+==== concurrent_streams
+
+The `gateway.s3.concurrent_streams` allow to throttle the number of
+streams (per node) opened against the shared gateway performing the
+snapshot operation. It defaults to `5`.
+
+[float]
+==== Region
+
+The `cloud.aws.region` can be set to a region and will automatically use
+the relevant settings for both `ec2` and `s3`. The available values are:
+`us-east-1`, `us-west-1`, `ap-southeast-1`, `eu-west-1`.
diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc
new file mode 100644
index 0000000..fbc153b
--- /dev/null
+++ b/docs/reference/modules/http.asciidoc
@@ -0,0 +1,51 @@
+[[modules-http]]
+== HTTP
+
+The http module allows to expose *elasticsearch* APIs
+over HTTP.
+
+The http mechanism is completely asynchronous in nature, meaning that
+there is no blocking thread waiting for a response. The benefit of using
+asynchronous communication for HTTP is solving the
+http://en.wikipedia.org/wiki/C10k_problem[C10k problem].
+
+When possible, consider using
+http://en.wikipedia.org/wiki/Keepalive#HTTP_Keepalive[HTTP keep alive]
+when connecting for better performance and try to get your favorite
+client not to do
+http://en.wikipedia.org/wiki/Chunked_transfer_encoding[HTTP chunking].
+
+[float]
+=== Settings
+
+The following are the settings the can be configured for HTTP:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`http.port` |A bind port range. Defaults to `9200-9300`.
+
+|`http.max_content_length` |The max content of an HTTP request. Defaults
+to `100mb`
+
+|`http.max_initial_line_length` |The max length of an HTTP URL. Defaults
+to `4kb`
+
+|`http.compression` |Support for compression when possible (with
+Accept-Encoding). Defaults to `false`.
+
+|`http.compression_level` |Defines the compression level to use.
+Defaults to `6`.
+|=======================================================================
+
+It also shares the uses the common
+<<modules-network,network settings>>.
+
+[float]
+=== Disable HTTP
+
+The http module can be completely disabled and not started by setting
+`http.enabled` to `false`. This make sense when creating non
+<<modules-node,data nodes>> which accept HTTP
+requests, and communicate with data nodes using the internal
+<<modules-transport,transport>>.
diff --git a/docs/reference/modules/indices.asciidoc b/docs/reference/modules/indices.asciidoc
new file mode 100644
index 0000000..75fd8f5
--- /dev/null
+++ b/docs/reference/modules/indices.asciidoc
@@ -0,0 +1,76 @@
+[[modules-indices]]
+== Indices
+
+The indices module allow to control settings that are globally managed
+for all indices.
+
+[float]
+[[buffer]]
+=== Indexing Buffer
+
+The indexing buffer setting allows to control how much memory will be
+allocated for the indexing process. It is a global setting that bubbles
+down to all the different shards allocated on a specific node.
+
+The `indices.memory.index_buffer_size` accepts either a percentage or a
+byte size value. It defaults to `10%`, meaning that `10%` of the total
+memory allocated to a node will be used as the indexing buffer size.
+This amount is then divided between all the different shards. Also, if
+percentage is used, allow to set `min_index_buffer_size` (defaults to
+`48mb`) and `max_index_buffer_size` which by default is unbounded.
+
+The `indices.memory.min_shard_index_buffer_size` allows to set a hard
+lower limit for the memory allocated per shard for its own indexing
+buffer. It defaults to `4mb`.
+
+[float]
+[[indices-ttl]]
+=== TTL interval
+
+You can dynamically set the `indices.ttl.interval` allows to set how
+often expired documents will be automatically deleted. The default value
+is 60s.
+
+The deletion orders are processed by bulk. You can set
+`indices.ttl.bulk_size` to fit your needs. The default value is 10000.
+
+See also <<mapping-ttl-field>>.
+
+[float]
+[[recovery]]
+=== Recovery
+
+The following settings can be set to manage recovery policy:
+
+[horizontal]
+`indices.recovery.concurrent_streams`::
+ defaults to `3`.
+
+`indices.recovery.file_chunk_size`::
+ defaults to `512kb`.
+
+`indices.recovery.translog_ops`::
+ defaults to `1000`.
+
+`indices.recovery.translog_size`::
+ defaults to `512kb`.
+
+`indices.recovery.compress`::
+ defaults to `true`.
+
+`indices.recovery.max_bytes_per_sec`::
+ defaults to `20mb`.
+
+[float]
+[[throttling]]
+=== Store level throttling
+
+The following settings can be set to control store throttling:
+
+[horizontal]
+`indices.store.throttle.type`::
+ could be `merge` (default), `not` or `all`. See <<index-modules-store>>.
+
+`indices.store.throttle.max_bytes_per_sec`::
+ defaults to `20mb`.
+
diff --git a/docs/reference/modules/memcached.asciidoc b/docs/reference/modules/memcached.asciidoc
new file mode 100644
index 0000000..20276d0
--- /dev/null
+++ b/docs/reference/modules/memcached.asciidoc
@@ -0,0 +1,69 @@
+[[modules-memcached]]
+== memcached
+
+The memcached module allows to expose *elasticsearch*
+APIs over the memcached protocol (as closely
+as possible).
+
+It is provided as a plugin called `transport-memcached` and installing
+is explained
+https://github.com/elasticsearch/elasticsearch-transport-memcached[here]
+. Another option is to download the memcached plugin and placing it
+under the `plugins` directory.
+
+The memcached protocol supports both the binary and the text protocol,
+automatically detecting the correct one to use.
+
+[float]
+=== Mapping REST to Memcached Protocol
+
+Memcached commands are mapped to REST and handled by the same generic
+REST layer in elasticsearch. Here is a list of the memcached commands
+supported:
+
+[float]
+==== GET
+
+The memcached `GET` command maps to a REST `GET`. The key used is the
+URI (with parameters). The main downside is the fact that the memcached
+`GET` does not allow body in the request (and `SET` does not allow to
+return a result...). For this reason, most REST APIs (like search) allow
+to accept the "source" as a URI parameter as well.
+
+[float]
+==== SET
+
+The memcached `SET` command maps to a REST `POST`. The key used is the
+URI (with parameters), and the body maps to the REST body.
+
+[float]
+==== DELETE
+
+The memcached `DELETE` command maps to a REST `DELETE`. The key used is
+the URI (with parameters).
+
+[float]
+==== QUIT
+
+The memcached `QUIT` command is supported and disconnects the client.
+
+[float]
+=== Settings
+
+The following are the settings the can be configured for memcached:
+
+[cols="<,<",options="header",]
+|===============================================================
+|Setting |Description
+|`memcached.port` |A bind port range. Defaults to `11211-11311`.
+|===============================================================
+
+It also shares the uses the common
+<<modules-network,network settings>>.
+
+[float]
+=== Disable memcached
+
+The memcached module can be completely disabled and not started using by
+setting `memcached.enabled` to `false`. By default it is enabled once it
+is detected as a plugin.
diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc
new file mode 100644
index 0000000..835b6e4
--- /dev/null
+++ b/docs/reference/modules/network.asciidoc
@@ -0,0 +1,89 @@
+[[modules-network]]
+== Network Settings
+
+There are several modules within a Node that use network based
+configuration, for example, the
+<<modules-transport,transport>> and
+<<modules-http,http>> modules. Node level
+network settings allows to set common settings that will be shared among
+all network based modules (unless explicitly overridden in each module).
+
+The `network.bind_host` setting allows to control the host different
+network components will bind on. By default, the bind host will be
+`anyLocalAddress` (typically `0.0.0.0` or `::0`).
+
+The `network.publish_host` setting allows to control the host the node
+will publish itself within the cluster so other nodes will be able to
+connect to it. Of course, this can't be the `anyLocalAddress`, and by
+default, it will be the first non loopback address (if possible), or the
+local address.
+
+The `network.host` setting is a simple setting to automatically set both
+`network.bind_host` and `network.publish_host` to the same host value.
+
+Both settings allows to be configured with either explicit host address
+or host name. The settings also accept logical setting values explained
+in the following table:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Logical Host Setting Value |Description
+|`_local_` |Will be resolved to the local ip address.
+
+|`_non_loopback_` |The first non loopback address.
+
+|`_non_loopback:ipv4_` |The first non loopback IPv4 address.
+
+|`_non_loopback:ipv6_` |The first non loopback IPv6 address.
+
+|`_[networkInterface]_` |Resolves to the ip address of the provided
+network interface. For example `_en0_`.
+
+|`_[networkInterface]:ipv4_` |Resolves to the ipv4 address of the
+provided network interface. For example `_en0:ipv4_`.
+
+|`_[networkInterface]:ipv6_` |Resolves to the ipv6 address of the
+provided network interface. For example `_en0:ipv6_`.
+|=======================================================================
+
+When the `cloud-aws` plugin is installed, the following are also allowed
+as valid network host settings:
+
+[cols="<,<",options="header",]
+|==================================================================
+|EC2 Host Value |Description
+|`_ec2:privateIpv4_` |The private IP address (ipv4) of the machine.
+|`_ec2:privateDns_` |The private host of the machine.
+|`_ec2:publicIpv4_` |The public IP address (ipv4) of the machine.
+|`_ec2:publicDns_` |The public host of the machine.
+|`_ec2_` |Less verbose option for the private ip address.
+|`_ec2:privateIp_` |Less verbose option for the private ip address.
+|`_ec2:publicIp_` |Less verbose option for the public ip address.
+|==================================================================
+
+[float]
+[[tcp-settings]]
+=== TCP Settings
+
+Any component that uses TCP (like the HTTP, Transport and Memcached)
+share the following allowed settings:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`network.tcp.no_delay` |Enable or disable tcp no delay setting.
+Defaults to `true`.
+
+|`network.tcp.keep_alive` |Enable or disable tcp keep alive. By default
+not explicitly set.
+
+|`network.tcp.reuse_address` |Should an address be reused or not.
+Defaults to `true` on none windows machines.
+
+|`network.tcp.send_buffer_size` |The size of the tcp send buffer size
+(in size setting format). By default not explicitly set.
+
+|`network.tcp.receive_buffer_size` |The size of the tcp receive buffer
+size (in size setting format). By default not explicitly set.
+|=======================================================================
+
diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc
new file mode 100644
index 0000000..5e40522
--- /dev/null
+++ b/docs/reference/modules/node.asciidoc
@@ -0,0 +1,32 @@
+[[modules-node]]
+== Node
+
+*elasticsearch* allows to configure a node to either be allowed to store
+data locally or not. Storing data locally basically means that shards of
+different indices are allowed to be allocated on that node. By default,
+each node is considered to be a data node, and it can be turned off by
+setting `node.data` to `false`.
+
+This is a powerful setting allowing to simply create smart load
+balancers that take part in some of different API processing. Lets take
+an example:
+
+We can start a whole cluster of data nodes which do not even start an
+HTTP transport by setting `http.enabled` to `false`. Such nodes will
+communicate with one another using the
+<<modules-transport,transport>> module. In front
+of the cluster we can start one or more "non data" nodes which will
+start with HTTP enabled. All HTTP communication will be performed
+through these "non data" nodes.
+
+The benefit of using that is first the ability to create smart load
+balancers. These "non data" nodes are still part of the cluster, and
+they redirect operations exactly to the node that holds the relevant
+data. The other benefit is the fact that for scatter / gather based
+operations (such as search), these nodes will take part of the
+processing since they will start the scatter process, and perform the
+actual gather processing.
+
+This relieves the data nodes to do the heavy duty of indexing and
+searching, without needing to process HTTP requests (parsing), overload
+the network, or perform the gather processing.
diff --git a/docs/reference/modules/plugins.asciidoc b/docs/reference/modules/plugins.asciidoc
new file mode 100644
index 0000000..6c39d72
--- /dev/null
+++ b/docs/reference/modules/plugins.asciidoc
@@ -0,0 +1,283 @@
+[[modules-plugins]]
+== Plugins
+
+[float]
+=== Plugins
+
+Plugins are a way to enhance the basic elasticsearch functionality in a
+custom manner. They range from adding custom mapping types, custom
+analyzers (in a more built in fashion), native scripts, custom discovery
+and more.
+
+[float]
+[[installing]]
+==== Installing plugins
+
+Installing plugins can either be done manually by placing them under the
+`plugins` directory, or using the `plugin` script. Several plugins can
+be found under the https://github.com/elasticsearch[elasticsearch]
+organization in GitHub, starting with `elasticsearch-`.
+
+Installing plugins typically take the following form:
+
+[source,shell]
+-----------------------------------
+plugin --install <org>/<user/component>/<version>
+-----------------------------------
+
+The plugins will be
+automatically downloaded in this case from `download.elasticsearch.org`,
+and in case they don't exist there, from maven (central and sonatype).
+
+Note that when the plugin is located in maven central or sonatype
+repository, `<org>` is the artifact `groupId` and `<user/component>` is
+the `artifactId`.
+
+A plugin can also be installed directly by specifying the URL for it,
+for example:
+
+[source,shell]
+-----------------------------------
+bin/plugin --url file:///path/to/plugin --install plugin-name
+-----------------------------------
+
+
+You can run `bin/plugin -h`.
+
+[float]
+[[site-plugins]]
+==== Site Plugins
+
+Plugins can have "sites" in them, any plugin that exists under the
+`plugins` directory with a `_site` directory, its content will be
+statically served when hitting `/_plugin/[plugin_name]/` url. Those can
+be added even after the process has started.
+
+Installed plugins that do not contain any java related content, will
+automatically be detected as site plugins, and their content will be
+moved under `_site`.
+
+The ability to install plugins from Github allows to easily install site
+plugins hosted there by downloading the actual repo, for example,
+running:
+
+[source,js]
+--------------------------------------------------
+bin/plugin --install mobz/elasticsearch-head
+bin/plugin --install lukas-vlcek/bigdesk
+--------------------------------------------------
+
+Will install both of those site plugins, with `elasticsearch-head`
+available under `http://localhost:9200/_plugin/head/` and `bigdesk`
+available under `http://localhost:9200/_plugin/bigdesk/`.
+
+[float]
+==== Mandatory Plugins
+
+If you rely on some plugins, you can define mandatory plugins using the
+`plugin.mandatory` attribute, for example, here is a sample config:
+
+[source,js]
+--------------------------------------------------
+plugin.mandatory: mapper-attachments,lang-groovy
+--------------------------------------------------
+
+For safety reasons, if a mandatory plugin is not installed, the node
+will not start.
+
+[float]
+==== Installed Plugins
+
+A list of the currently loaded plugins can be retrieved using the
+<<cluster-nodes-info,Node Info API>>.
+
+[float]
+==== Removing plugins
+
+Removing plugins can either be done manually by removing them under the
+`plugins` directory, or using the `plugin` script.
+
+Removing plugins typically take the following form:
+
+[source,shell]
+-----------------------------------
+plugin --remove <pluginname>
+-----------------------------------
+
+[float]
+==== Silent/Verbose mode
+
+When running the `plugin` script, you can get more information (debug mode) using `--verbose`.
+On the opposite, if you want `plugin` script to be silent, use `--silent` option.
+
+Note that exit codes could be:
+
+* `0`: everything was OK
+* `64`: unknown command or incorrect option parameter
+* `74`: IO error
+* `70`: other errors
+
+[source,shell]
+-----------------------------------
+bin/plugin --install mobz/elasticsearch-head --verbose
+plugin --remove head --silent
+-----------------------------------
+
+[float]
+==== Timeout settings
+
+By default, the `plugin` script will wait indefinitely when downloading before failing.
+The timeout parameter can be used to explicitly specify how long it waits. Here is some examples of setting it to
+different values:
+
+[source,shell]
+-----------------------------------
+# Wait for 30 seconds before failing
+bin/plugin --install mobz/elasticsearch-head --timeout 30s
+
+# Wait for 1 minute before failing
+bin/plugin --install mobz/elasticsearch-head --timeout 1m
+
+# Wait forever (default)
+bin/plugin --install mobz/elasticsearch-head --timeout 0
+-----------------------------------
+
+[float]
+[[known-plugins]]
+=== Known Plugins
+
+[float]
+[[analysis-plugins]]
+==== Analysis Plugins
+
+.Supported by Elasticsearch
+* https://github.com/elasticsearch/elasticsearch-analysis-icu[ICU Analysis plugin]
+* https://github.com/elasticsearch/elasticsearch-analysis-kuromoji[Japanese (Kuromoji) Analysis plugin].
+* https://github.com/elasticsearch/elasticsearch-analysis-smartcn[Smart Chinese Analysis Plugin]
+* https://github.com/elasticsearch/elasticsearch-analysis-stempel[Stempel (Polish) Analysis plugin]
+
+.Supported by the community
+* https://github.com/barminator/elasticsearch-analysis-annotation[Annotation Analysis Plugin] (by Michal Samek)
+* https://github.com/yakaz/elasticsearch-analysis-combo/[Combo Analysis Plugin] (by Olivier Favre, Yakaz)
+* https://github.com/jprante/elasticsearch-analysis-hunspell[Hunspell Analysis Plugin] (by Jörg Prante)
+* https://github.com/medcl/elasticsearch-analysis-ik[IK Analysis Plugin] (by Medcl)
+* https://github.com/suguru/elasticsearch-analysis-japanese[Japanese Analysis plugin] (by suguru).
+* https://github.com/medcl/elasticsearch-analysis-mmseg[Mmseg Analysis Plugin] (by Medcl)
+* https://github.com/chytreg/elasticsearch-analysis-morfologik[Morfologik (Polish) Analysis plugin] (by chytreg)
+* https://github.com/imotov/elasticsearch-analysis-morphology[Russian and English Morphological Analysis Plugin] (by Igor Motov)
+* https://github.com/medcl/elasticsearch-analysis-pinyin[Pinyin Analysis Plugin] (by Medcl)
+* https://github.com/medcl/elasticsearch-analysis-string2int[String2Integer Analysis Plugin] (by Medcl)
+
+[float]
+[[discovery-plugins]]
+==== Discovery Plugins
+
+.Supported by Elasticsearch
+* https://github.com/elasticsearch/elasticsearch-cloud-aws[AWS Cloud Plugin] - EC2 discovery and S3 Repository
+* https://github.com/elasticsearch/elasticsearch-cloud-azure[Azure Cloud Plugin] - Azure discovery
+* https://github.com/elasticsearch/elasticsearch-cloud-gce[Google Compute Engine Cloud Plugin] - GCE discovery
+
+[float]
+[[river]]
+==== River Plugins
+
+.Supported by Elasticsearch
+* https://github.com/elasticsearch/elasticsearch-river-couchdb[CouchDB River Plugin]
+* https://github.com/elasticsearch/elasticsearch-river-rabbitmq[RabbitMQ River Plugin]
+* https://github.com/elasticsearch/elasticsearch-river-twitter[Twitter River Plugin]
+* https://github.com/elasticsearch/elasticsearch-river-wikipedia[Wikipedia River Plugin]
+
+.Supported by the community
+* https://github.com/domdorn/elasticsearch-river-activemq/[ActiveMQ River Plugin] (by Dominik Dorn)
+* https://github.com/albogdano/elasticsearch-river-amazonsqs[Amazon SQS River Plugin] (by Alex Bogdanovski)
+* https://github.com/xxBedy/elasticsearch-river-csv[CSV River Plugin] (by Martin Bednar)
+* http://www.pilato.fr/dropbox/[Dropbox River Plugin] (by David Pilato)
+* http://www.pilato.fr/fsriver/[FileSystem River Plugin] (by David Pilato)
+* https://github.com/obazoud/elasticsearch-river-git[Git River Plugin] (by Olivier Bazoud)
+* https://github.com/uberVU/elasticsearch-river-github[GitHub River Plugin] (by uberVU)
+* https://github.com/sksamuel/elasticsearch-river-hazelcast[Hazelcast River Plugin] (by Steve Samuel)
+* https://github.com/jprante/elasticsearch-river-jdbc[JDBC River Plugin] (by Jörg Prante)
+* https://github.com/qotho/elasticsearch-river-jms[JMS River Plugin] (by Steve Sarandos)
+* https://github.com/endgameinc/elasticsearch-river-kafka[Kafka River Plugin] (by Endgame Inc.)
+* https://github.com/tlrx/elasticsearch-river-ldap[LDAP River Plugin] (by Tanguy Leroux)
+* https://github.com/richardwilly98/elasticsearch-river-mongodb/[MongoDB River Plugin] (by Richard Louapre)
+* https://github.com/sksamuel/elasticsearch-river-neo4j[Neo4j River Plugin] (by Steve Samuel)
+* https://github.com/jprante/elasticsearch-river-oai/[Open Archives Initiative (OAI) River Plugin] (by Jörg Prante)
+* https://github.com/sksamuel/elasticsearch-river-redis[Redis River Plugin] (by Steve Samuel)
+* http://dadoonet.github.com/rssriver/[RSS River Plugin] (by David Pilato)
+* https://github.com/adamlofts/elasticsearch-river-sofa[Sofa River Plugin] (by adamlofts)
+* https://github.com/javanna/elasticsearch-river-solr/[Solr River Plugin] (by Luca Cavanna)
+* https://github.com/sunnygleason/elasticsearch-river-st9[St9 River Plugin] (by Sunny Gleason)
+* https://github.com/plombard/SubversionRiver[Subversion River Plugin] (by Pascal Lombard)
+* https://github.com/kzwang/elasticsearch-river-dynamodb[DynamoDB River Plugin] (by Kevin Wang)
+
+[float]
+[[transport]]
+==== Transport Plugins
+
+.Supported by Elasticsearch
+* https://github.com/elasticsearch/elasticsearch-transport-memcached[Memcached transport plugin]
+* https://github.com/elasticsearch/elasticsearch-transport-thrift[Thrift Transport]
+* https://github.com/elasticsearch/elasticsearch-transport-wares[Servlet transport]
+
+.Supported by the community
+* https://github.com/tlrx/transport-zeromq[ZeroMQ transport layer plugin] (by Tanguy Leroux)
+* https://github.com/sonian/elasticsearch-jetty[Jetty HTTP transport plugin] (by Sonian Inc.)
+* https://github.com/kzwang/elasticsearch-transport-redis[Redis transport plugin] (by Kevin Wang)
+
+[float]
+[[scripting]]
+==== Scripting Plugins
+
+.Supported by Elasticsearch
+* https://github.com/hiredman/elasticsearch-lang-clojure[Clojure Language Plugin] (by Kevin Downey)
+* https://github.com/elasticsearch/elasticsearch-lang-groovy[Groovy lang Plugin]
+* https://github.com/elasticsearch/elasticsearch-lang-javascript[JavaScript language Plugin]
+* https://github.com/elasticsearch/elasticsearch-lang-python[Python language Plugin]
+
+[float]
+[[site]]
+==== Site Plugins
+
+.Supported by the community
+* https://github.com/lukas-vlcek/bigdesk[BigDesk Plugin] (by Lukáš Vlček)
+* https://github.com/mobz/elasticsearch-head[Elasticsearch Head Plugin] (by Ben Birch)
+* https://github.com/royrusso/elasticsearch-HQ[Elasticsearch HQ] (by Roy Russo)
+* https://github.com/andrewvc/elastic-hammer[Hammer Plugin] (by Andrew Cholakian)
+* https://github.com/polyfractal/elasticsearch-inquisitor[Inquisitor Plugin] (by Zachary Tong)
+* https://github.com/karmi/elasticsearch-paramedic[Paramedic Plugin] (by Karel Minařík)
+* https://github.com/polyfractal/elasticsearch-segmentspy[SegmentSpy Plugin] (by Zachary Tong)
+
+[float]
+[[repository-plugins]]
+==== Snapshot/Restore Repository Plugins
+
+.Supported by Elasticsearch
+
+* https://github.com/elasticsearch/elasticsearch-hadoop/tree/master/repository-hdfs[Hadoop HDFS] Repository
+* https://github.com/elasticsearch/elasticsearch-cloud-aws#s3-repository[AWS S3] Repository
+
+.Supported by the community
+
+* https://github.com/kzwang/elasticsearch-repository-gridfs[GridFS] Repository (by Kevin Wang)
+
+[float]
+[[misc]]
+==== Misc Plugins
+
+.Supported by Elasticsearch
+* https://github.com/elasticsearch/elasticsearch-mapper-attachments[Mapper Attachments Type plugin]
+
+.Supported by the community
+* https://github.com/carrot2/elasticsearch-carrot2[carrot2 Plugin]: Results clustering with carrot2 (by Dawid Weiss)
+* https://github.com/derryx/elasticsearch-changes-plugin[Elasticsearch Changes Plugin] (by Thomas Peuss)
+* https://github.com/johtani/elasticsearch-extended-analyze[Extended Analyze Plugin] (by Jun Ohtani)
+* https://github.com/spinscale/elasticsearch-graphite-plugin[Elasticsearch Graphite Plugin] (by Alexander Reelsen)
+* https://github.com/mattweber/elasticsearch-mocksolrplugin[Elasticsearch Mock Solr Plugin] (by Matt Weber)
+* https://github.com/viniciusccarvalho/elasticsearch-newrelic[Elasticsearch New Relic Plugin] (by Vinicius Carvalho)
+* https://github.com/swoop-inc/elasticsearch-statsd-plugin[Elasticsearch Statsd Plugin] (by Swoop Inc.)
+* https://github.com/endgameinc/elasticsearch-term-plugin[Terms Component Plugin] (by Endgame Inc.)
+* http://tlrx.github.com/elasticsearch-view-plugin[Elasticsearch View Plugin] (by Tanguy Leroux)
+* https://github.com/sonian/elasticsearch-zookeeper[ZooKeeper Discovery Plugin] (by Sonian Inc.)
+
+
diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc
new file mode 100644
index 0000000..166a030
--- /dev/null
+++ b/docs/reference/modules/scripting.asciidoc
@@ -0,0 +1,316 @@
+[[modules-scripting]]
+== Scripting
+
+The scripting module allows to use scripts in order to evaluate custom
+expressions. For example, scripts can be used to return "script fields"
+as part of a search request, or can be used to evaluate a custom score
+for a query and so on.
+
+The scripting module uses by default http://mvel.codehaus.org/[mvel] as
+the scripting language with some extensions. mvel is used since it is
+extremely fast and very simple to use, and in most cases, simple
+expressions are needed (for example, mathematical equations).
+
+Additional `lang` plugins are provided to allow to execute scripts in
+different languages. Currently supported plugins are `lang-javascript`
+for JavaScript, `lang-groovy` for Groovy, and `lang-python` for Python.
+All places where a `script` parameter can be used, a `lang` parameter
+(on the same level) can be provided to define the language of the
+script. The `lang` options are `mvel`, `js`, `groovy`, `python`, and
+`native`.
+
+[float]
+=== Default Scripting Language
+
+The default scripting language (assuming no `lang` parameter is
+provided) is `mvel`. In order to change it set the `script.default_lang`
+to the appropriate language.
+
+[float]
+=== Preloaded Scripts
+
+Scripts can always be provided as part of the relevant API, but they can
+also be preloaded by placing them under `config/scripts` and then
+referencing them by the script name (instead of providing the full
+script). This helps reduce the amount of data passed between the client
+and the nodes.
+
+The name of the script is derived from the hierarchy of directories it
+exists under, and the file name without the lang extension. For example,
+a script placed under `config/scripts/group1/group2/test.py` will be
+named `group1_group2_test`.
+
+[float]
+=== Disabling dynamic scripts
+
+We recommend running Elasticsearch behind an application or proxy,
+which protects Elasticsearch from the outside world. If users are
+allowed to run dynamic scripts (even in a search request), then they
+have the same access to your box as the user that Elasticsearch is
+running as.
+
+First, you should not run Elasticsearch as the `root` user, as this
+would allow a script to access or do *anything* on your server, without
+limitations. Second, you should not expose Elasticsearch directly to
+users, but instead have a proxy application inbetween. If you *do*
+intend to expose Elasticsearch directly to your users, then you have
+to decide whether you trust them enough to run scripts on your box or
+not. If not, then even if you have a proxy which only allows `GET`
+requests, you should disable dynamic scripting by adding the following
+setting to the `config/elasticsearch.yml` file on every node:
+
+[source,yaml]
+-----------------------------------
+script.disable_dynamic: true
+-----------------------------------
+
+This will still allow execution of named scripts provided in the config, or
+_native_ Java scripts registered through plugins, however it will prevent
+users from running arbitrary scripts via the API.
+
+[float]
+=== Automatic Script Reloading
+
+The `config/scripts` directory is scanned periodically for changes.
+New and changed scripts are reloaded and deleted script are removed
+from preloaded scripts cache. The reload frequency can be specified
+using `watcher.interval` setting, which defaults to `60s`.
+To disable script reloading completely set `script.auto_reload_enabled`
+to `false`.
+
+[float]
+=== Native (Java) Scripts
+
+Even though `mvel` is pretty fast, allow to register native Java based
+scripts for faster execution.
+
+In order to allow for scripts, the `NativeScriptFactory` needs to be
+implemented that constructs the script that will be executed. There are
+two main types, one that extends `AbstractExecutableScript` and one that
+extends `AbstractSearchScript` (probably the one most users will extend,
+with additional helper classes in `AbstractLongSearchScript`,
+`AbstractDoubleSearchScript`, and `AbstractFloatSearchScript`).
+
+Registering them can either be done by settings, for example:
+`script.native.my.type` set to `sample.MyNativeScriptFactory` will
+register a script named `my`. Another option is in a plugin, access
+`ScriptModule` and call `registerScript` on it.
+
+Executing the script is done by specifying the `lang` as `native`, and
+the name of the script as the `script`.
+
+Note, the scripts need to be in the classpath of elasticsearch. One
+simple way to do it is to create a directory under plugins (choose a
+descriptive name), and place the jar / classes files there, they will be
+automatically loaded.
+
+[float]
+=== Score
+
+In all scripts that can be used in facets, allow to access the current
+doc score using `doc.score`.
+
+[float]
+=== Computing scores based on terms in scripts
+
+see <<modules-advanced-scripting, advanced scripting documentation>>
+
+[float]
+=== Document Fields
+
+Most scripting revolve around the use of specific document fields data.
+The `doc['field_name']` can be used to access specific field data within
+a document (the document in question is usually derived by the context
+the script is used). Document fields are very fast to access since they
+end up being loaded into memory (all the relevant field values/tokens
+are loaded to memory).
+
+The following data can be extracted from a field:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Expression |Description
+|`doc['field_name'].value` |The native value of the field. For example,
+if its a short type, it will be short.
+
+|`doc['field_name'].values` |The native array values of the field. For
+example, if its a short type, it will be short[]. Remember, a field can
+have several values within a single doc. Returns an empty array if the
+field has no values.
+
+|`doc['field_name'].empty` |A boolean indicating if the field has no
+values within the doc.
+
+|`doc['field_name'].multiValued` |A boolean indicating that the field
+has several values within the corpus.
+
+|`doc['field_name'].lat` |The latitude of a geo point type.
+
+|`doc['field_name'].lon` |The longitude of a geo point type.
+
+|`doc['field_name'].lats` |The latitudes of a geo point type.
+
+|`doc['field_name'].lons` |The longitudes of a geo point type.
+
+|`doc['field_name'].distance(lat, lon)` |The `plane` distance (in meters)
+of this geo point field from the provided lat/lon.
+
+|`doc['field_name'].distanceWithDefault(lat, lon, default)` |The `plane` distance (in meters)
+of this geo point field from the provided lat/lon with a default value.
+
+|`doc['field_name'].distanceInMiles(lat, lon)` |The `plane` distance (in
+miles) of this geo point field from the provided lat/lon.
+
+|`doc['field_name'].distanceInMilesWithDefault(lat, lon, default)` |The `plane` distance (in
+miles) of this geo point field from the provided lat/lon with a default value.
+
+|`doc['field_name'].distanceInKm(lat, lon)` |The `plane` distance (in
+km) of this geo point field from the provided lat/lon.
+
+|`doc['field_name'].distanceInKmWithDefault(lat, lon, default)` |The `plane` distance (in
+km) of this geo point field from the provided lat/lon with a default value.
+
+|`doc['field_name'].arcDistance(lat, lon)` |The `arc` distance (in
+meters) of this geo point field from the provided lat/lon.
+
+|`doc['field_name'].arcDistanceWithDefault(lat, lon, default)` |The `arc` distance (in
+meters) of this geo point field from the provided lat/lon with a default value.
+
+|`doc['field_name'].arcDistanceInMiles(lat, lon)` |The `arc` distance (in
+miles) of this geo point field from the provided lat/lon.
+
+|`doc['field_name'].arcDistanceInMilesWithDefault(lat, lon, default)` |The `arc` distance (in
+miles) of this geo point field from the provided lat/lon with a default value.
+
+|`doc['field_name'].arcDistanceInKm(lat, lon)` |The `arc` distance (in
+km) of this geo point field from the provided lat/lon.
+
+|`doc['field_name'].arcDistanceInKmWithDefault(lat, lon, default)` |The `arc` distance (in
+km) of this geo point field from the provided lat/lon with a default value.
+
+|`doc['field_name'].factorDistance(lat, lon)` |The distance factor of this geo point field from the provided lat/lon.
+
+|`doc['field_name'].factorDistance(lat, lon, default)` |The distance factor of this geo point field from the provided lat/lon with a default value.
+
+
+|=======================================================================
+
+[float]
+=== Stored Fields
+
+Stored fields can also be accessed when executed a script. Note, they
+are much slower to access compared with document fields, but are not
+loaded into memory. They can be simply accessed using
+`_fields['my_field_name'].value` or `_fields['my_field_name'].values`.
+
+[float]
+=== Source Field
+
+The source field can also be accessed when executing a script. The
+source field is loaded per doc, parsed, and then provided to the script
+for evaluation. The `_source` forms the context under which the source
+field can be accessed, for example `_source.obj2.obj1.field3`.
+
+Accessing `_source` is much slower compared to using `_doc`
+but the data is not loaded into memory. For a single field access `_fields` may be
+faster than using `_source` due to the extra overhead of potentially parsing large documents.
+However, `_source` may be faster if you access multiple fields or if the source has already been
+loaded for other purposes.
+
+
+[float]
+=== mvel Built In Functions
+
+There are several built in functions that can be used within scripts.
+They include:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Function |Description
+|`time()` |The current time in milliseconds.
+
+|`sin(a)` |Returns the trigonometric sine of an angle.
+
+|`cos(a)` |Returns the trigonometric cosine of an angle.
+
+|`tan(a)` |Returns the trigonometric tangent of an angle.
+
+|`asin(a)` |Returns the arc sine of a value.
+
+|`acos(a)` |Returns the arc cosine of a value.
+
+|`atan(a)` |Returns the arc tangent of a value.
+
+|`toRadians(angdeg)` |Converts an angle measured in degrees to an
+approximately equivalent angle measured in radians
+
+|`toDegrees(angrad)` |Converts an angle measured in radians to an
+approximately equivalent angle measured in degrees.
+
+|`exp(a)` |Returns Euler's number _e_ raised to the power of value.
+
+|`log(a)` |Returns the natural logarithm (base _e_) of a value.
+
+|`log10(a)` |Returns the base 10 logarithm of a value.
+
+|`sqrt(a)` |Returns the correctly rounded positive square root of a
+value.
+
+|`cbrt(a)` |Returns the cube root of a double value.
+
+|`IEEEremainder(f1, f2)` |Computes the remainder operation on two
+arguments as prescribed by the IEEE 754 standard.
+
+|`ceil(a)` |Returns the smallest (closest to negative infinity) value
+that is greater than or equal to the argument and is equal to a
+mathematical integer.
+
+|`floor(a)` |Returns the largest (closest to positive infinity) value
+that is less than or equal to the argument and is equal to a
+mathematical integer.
+
+|`rint(a)` |Returns the value that is closest in value to the argument
+and is equal to a mathematical integer.
+
+|`atan2(y, x)` |Returns the angle _theta_ from the conversion of
+rectangular coordinates (_x_, _y_) to polar coordinates (r,_theta_).
+
+|`pow(a, b)` |Returns the value of the first argument raised to the
+power of the second argument.
+
+|`round(a)` |Returns the closest _int_ to the argument.
+
+|`random()` |Returns a random _double_ value.
+
+|`abs(a)` |Returns the absolute value of a value.
+
+|`max(a, b)` |Returns the greater of two values.
+
+|`min(a, b)` |Returns the smaller of two values.
+
+|`ulp(d)` |Returns the size of an ulp of the argument.
+
+|`signum(d)` |Returns the signum function of the argument.
+
+|`sinh(x)` |Returns the hyperbolic sine of a value.
+
+|`cosh(x)` |Returns the hyperbolic cosine of a value.
+
+|`tanh(x)` |Returns the hyperbolic tangent of a value.
+
+|`hypot(x, y)` |Returns sqrt(_x2_ + _y2_) without intermediate overflow
+or underflow.
+|=======================================================================
+
+[float]
+=== Arithmetic precision in MVEL
+
+When dividing two numbers using MVEL based scripts, the engine tries to
+be smart and adheres to the default behaviour of java. This means if you
+divide two integers (you might have configured the fields as integer in
+the mapping), the result will also be an integer. This means, if a
+calculation like `1/num` is happening in your scripts and `num` is an
+integer with the value of `8`, the result is `0` even though you were
+expecting it to be `0.125`. You may need to enforce precision by
+explicitly using a double like `1.0/num` in order to get the expected
+result.
diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc
new file mode 100644
index 0000000..309b5f8
--- /dev/null
+++ b/docs/reference/modules/snapshots.asciidoc
@@ -0,0 +1,207 @@
+[[modules-snapshots]]
+== Snapshot And Restore
+
+The snapshot and restore module allows to create snapshots of individual indices or an entire cluster into a remote
+repository. At the time of the initial release only shared file system repository is supported.
+
+[float]
+=== Repositories
+
+Before any snapshot or restore operation can be performed a snapshot repository should be registered in
+Elasticsearch. The following command registers a shared file system repository with the name `my_backup` that
+will use location `/mount/backups/my_backup` to store snapshots.
+
+[source,js]
+-----------------------------------
+$ curl -XPUT 'http://localhost:9200/_snapshot/my_backup' -d '{
+ "type": "fs",
+ "settings": {
+ "location": "/mount/backups/my_backup",
+ "compress": true
+ }
+}'
+-----------------------------------
+
+Once repository is registered, its information can be obtained using the following command:
+
+[source,js]
+-----------------------------------
+$ curl -XGET 'http://localhost:9200/_snapshot/my_backup?pretty'
+-----------------------------------
+[source,js]
+-----------------------------------
+{
+ "my_backup" : {
+ "type" : "fs",
+ "settings" : {
+ "compress" : "false",
+ "location" : "/mount/backups/my_backup"
+ }
+ }
+}
+-----------------------------------
+
+If a repository name is not specified, or `_all` is used as repository name Elasticsearch will return information about
+all repositories currently registered in the cluster:
+
+[source,js]
+-----------------------------------
+$ curl -XGET 'http://localhost:9200/_snapshot'
+-----------------------------------
+
+or
+
+[source,js]
+-----------------------------------
+$ curl -XGET 'http://localhost:9200/_snapshot/_all'
+-----------------------------------
+
+[float]
+===== Shared File System Repository
+
+The shared file system repository (`"type": "fs"`) is using shared file system to store snapshot. The path
+specified in the `location` parameter should point to the same location in the shared filesystem and be accessible
+on all data and master nodes. The following settings are supported:
+
+[horizontal]
+`location`:: Location of the snapshots. Mandatory.
+`compress`:: Turns on compression of the snapshot files. Defaults to `true`.
+`concurrent_streams`:: Throttles the number of streams (per node) preforming snapshot operation. Defaults to `5`
+`chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by
+ using size value notation, i.e. 1g, 10m, 5k. Defaults to `null` (unlimited chunk size).
+`max_restore_bytes_per_sec`:: Throttles per node restore rate. Defaults to `20mb` per second.
+`max_snapshot_bytes_per_sec`:: Throttles per node snapshot rate. Defaults to `20mb` per second.
+
+
+[float]
+===== Read-only URL Repository
+
+The URL repository (`"type": "url"`) can be used as an alternative read-only way to access data created by shared file
+system repository is using shared file system to store snapshot. The URL specified in the `url` parameter should
+point to the root of the shared filesystem repository. The following settings are supported:
+
+[horizontal]
+`url`:: Location of the snapshots. Mandatory.
+`concurrent_streams`:: Throttles the number of streams (per node) preforming snapshot operation. Defaults to `5`
+
+
+[float]
+===== Repository plugins
+
+Other repository backends are available in these official plugins:
+
+* https://github.com/elasticsearch/elasticsearch-cloud-aws#s3-repository[AWS Cloud Plugin] for S3 repositories
+* https://github.com/elasticsearch/elasticsearch-hadoop/tree/master/repository-hdfs[HDFS Plugin] for Hadoop environments
+* https://github.com/elasticsearch/elasticsearch-cloud-azure#azure-repository[Azure Cloud Plugin] for Azure storage repositories
+
+[float]
+=== Snapshot
+
+A repository can contain multiple snapshots of the same cluster. Snapshot are identified by unique names within the
+cluster. A snapshot with the name `snapshot_1` in the repository `my_backup` can be created by executing the following
+command:
+
+[source,js]
+-----------------------------------
+$ curl -XPUT "localhost:9200/_snapshot/my_backup/snapshot_1?wait_for_completion=true"
+-----------------------------------
+
+The `wait_for_completion` parameter specifies whether or not the request should return immediately or wait for snapshot
+completion. By default snapshot of all open and started indices in the cluster is created. This behavior can be changed
+by specifying the list of indices in the body of the snapshot request.
+
+[source,js]
+-----------------------------------
+$ curl -XPUT "localhost:9200/_snapshot/my_backup/snapshot_1" -d '{
+ "indices": "index_1,index_2",
+ "ignore_unavailable": "true",
+ "include_global_state": false
+}'
+-----------------------------------
+
+The list of indices that should be included into the snapshot can be specified using the `indices` parameter that
+supports <<search-multi-index-type,multi index syntax>>. The snapshot request also supports the
+`ignore_unavailable` option. Setting it to `true` will cause indices that do not exist to be ignored during snapshot
+creation. By default, when `ignore_unavailable` option is not set and an index is missing the snapshot request will fail.
+By setting `include_global_state` to false it's possible to prevent the cluster global state to be stored as part of
+the snapshot. By default, entire snapshot will fail if one or more indices participating in the snapshot don't have
+all primary shards available. This behaviour can be changed by setting `partial` to `true`.
+
+The index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses
+the list of the index files that are already stored in the repository and copies only files that were created or
+changed since the last snapshot. That allows multiple snapshots to be preserved in the repository in a compact form.
+Snapshotting process is executed in non-blocking fashion. All indexing and searching operation can continue to be
+executed against the index that is being snapshotted. However, a snapshot represents the point-in-time view of the index
+at the moment when snapshot was created, so no records that were added to the index after snapshot process had started
+will be present in the snapshot.
+
+Besides creating a copy of each index the snapshot process can also store global cluster metadata, which includes persistent
+cluster settings and templates. The transient settings and registered snapshot repositories are not stored as part of
+the snapshot.
+
+Only one snapshot process can be executed in the cluster at any time. While snapshot of a particular shard is being
+created this shard cannot be moved to another node, which can interfere with rebalancing process and allocation
+filtering. Once snapshot of the shard is finished Elasticsearch will be able to move shard to another node according
+to the current allocation filtering settings and rebalancing algorithm.
+
+Once a snapshot is created information about this snapshot can be obtained using the following command:
+
+[source,shell]
+-----------------------------------
+$ curl -XGET "localhost:9200/_snapshot/my_backup/snapshot_1"
+-----------------------------------
+
+All snapshots currently stored in the repository can be listed using the following command:
+
+[source,shell]
+-----------------------------------
+$ curl -XGET "localhost:9200/_snapshot/my_backup/_all"
+-----------------------------------
+
+A snapshot can be deleted from the repository using the following command:
+
+[source,shell]
+-----------------------------------
+$ curl -XDELETE "localhost:9200/_snapshot/my_backup/snapshot_1"
+-----------------------------------
+
+When a snapshot is deleted from a repository, Elasticsearch deletes all files that are associated with the deleted
+snapshot and not used by any other snapshots. If the deleted snapshot operation is executed while the snapshot is being
+created the snapshotting process will be aborted and all files created as part of the snapshotting process will be
+cleaned. Therefore, the delete snapshot operation can be used to cancel long running snapshot operations that were
+started by mistake.
+
+
+[float]
+=== Restore
+
+A snapshot can be restored using this following command:
+
+[source,shell]
+-----------------------------------
+$ curl -XPOST "localhost:9200/_snapshot/my_backup/snapshot_1/_restore"
+-----------------------------------
+
+By default, all indices in the snapshot as well as cluster state are restored. It's possible to select indices that
+should be restored as well as prevent global cluster state from being restored by using `indices` and
+`include_global_state` options in the restore request body. The list of indices supports
+<<search-multi-index-type,multi index syntax>>. The `rename_pattern` and `rename_replacement` options can be also used to
+rename index on restore using regular expression that supports referencing the original text as explained
+http://docs.oracle.com/javase/6/docs/api/java/util/regex/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[here].
+
+[source,js]
+-----------------------------------
+$ curl -XPOST "localhost:9200/_snapshot/my_backup/snapshot_1/_restore" -d '{
+ "indices": "index_1,index_2",
+ "ignore_unavailable": "true",
+ "include_global_state": false,
+ "rename_pattern": "index_(.+)",
+ "rename_replacement": "restored_index_$1"
+}'
+-----------------------------------
+
+The restore operation can be performed on a functioning cluster. However, an existing index can be only restored if it's
+closed. The restore operation automatically opens restored indices if they were closed and creates new indices if they
+didn't exist in the cluster. If cluster state is restored, the restored templates that don't currently exist in the
+cluster are added and existing templates with the same name are replaced by the restored templates. The restored
+persistent settings are added to the existing persistent settings.
diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc
new file mode 100644
index 0000000..50d7a92
--- /dev/null
+++ b/docs/reference/modules/threadpool.asciidoc
@@ -0,0 +1,117 @@
+[[modules-threadpool]]
+== Thread Pool
+
+A node holds several thread pools in order to improve how threads are
+managed and memory consumption within a node. There are several thread
+pools, but the important ones include:
+
+[horizontal]
+`index`::
+ For index/delete operations, defaults to `fixed`,
+ size `# of available processors`.
+ queue_size `200`.
+
+`search`::
+ For count/search operations, defaults to `fixed`,
+ size `3x # of available processors`.
+ queue_size `1000`.
+
+`suggest`::
+ For suggest operations, defaults to `fixed`,
+ size `# of available processors`.
+ queue_size `1000`.
+
+`get`::
+ For get operations, defaults to `fixed`
+ size `# of available processors`.
+ queue_size `1000`.
+
+`bulk`::
+ For bulk operations, defaults to `fixed`
+ size `# of available processors`.
+ queue_size `50`.
+
+`percolate`::
+ For percolate operations, defaults to `fixed`
+ size `# of available processors`.
+ queue_size `1000`.
+
+`warmer`::
+ For segment warm-up operations, defaults to `scaling`
+ with a `5m` keep-alive.
+
+`refresh`::
+ For refresh operations, defaults to `scaling`
+ with a `5m` keep-alive.
+
+Changing a specific thread pool can be done by setting its type and
+specific type parameters, for example, changing the `index` thread pool
+to have more threads:
+
+[source,js]
+--------------------------------------------------
+threadpool:
+ index:
+ type: fixed
+ size: 30
+--------------------------------------------------
+
+NOTE: you can update threadpool settings live using
+ <<cluster-update-settings>>.
+
+
+[float]
+[[types]]
+=== Thread pool types
+
+The following are the types of thread pools that can be used and their
+respective parameters:
+
+[float]
+==== `cache`
+
+The `cache` thread pool is an unbounded thread pool that will spawn a
+thread if there are pending requests. Here is an example of how to set
+it:
+
+[source,js]
+--------------------------------------------------
+threadpool:
+ index:
+ type: cached
+--------------------------------------------------
+
+[float]
+==== `fixed`
+
+The `fixed` thread pool holds a fixed size of threads to handle the
+requests with a queue (optionally bounded) for pending requests that
+have no threads to service them.
+
+The `size` parameter controls the number of threads, and defaults to the
+number of cores times 5.
+
+The `queue_size` allows to control the size of the queue of pending
+requests that have no threads to execute them. By default, it is set to
+`-1` which means its unbounded. When a request comes in and the queue is
+full, it will abort the request.
+
+[source,js]
+--------------------------------------------------
+threadpool:
+ index:
+ type: fixed
+ size: 30
+ queue_size: 1000
+--------------------------------------------------
+
+[float]
+[[processors]]
+=== Processors setting
+The number of processors is automatically detected, and the thread pool
+settings are automatically set based on it. Sometimes, the number of processors
+are wrongly detected, in such cases, the number of processors can be
+explicitly set using the `processors` setting.
+
+In order to check the number of processors detected, use the nodes info
+API with the `os` flag.
diff --git a/docs/reference/modules/thrift.asciidoc b/docs/reference/modules/thrift.asciidoc
new file mode 100644
index 0000000..85e229f
--- /dev/null
+++ b/docs/reference/modules/thrift.asciidoc
@@ -0,0 +1,25 @@
+[[modules-thrift]]
+== Thrift
+
+The thrift transport module allows to expose the REST interface of
+elasticsearch using thrift. Thrift should provide better performance
+over http. Since thrift provides both the wire protocol and the
+transport, it should make using it simpler (thought its lacking on
+docs...).
+
+Using thrift requires installing the `transport-thrift` plugin, located
+https://github.com/elasticsearch/elasticsearch-transport-thrift[here].
+
+The thrift
+https://github.com/elasticsearch/elasticsearch-transport-thrift/blob/master/elasticsearch.thrift[schema]
+can be used to generate thrift clients.
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`thrift.port` |The port to bind to. Defaults to 9500-9600
+
+|`thrift.frame` |Defaults to `-1`, which means no framing. Set to a
+higher value to specify the frame size (like `15mb`).
+|=======================================================================
+
diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc
new file mode 100644
index 0000000..62fe6d0
--- /dev/null
+++ b/docs/reference/modules/transport.asciidoc
@@ -0,0 +1,49 @@
+[[modules-transport]]
+== Transport
+
+The transport module is used for internal communication between nodes
+within the cluster. Each call that goes from one node to the other uses
+the transport module (for example, when an HTTP GET request is processed
+by one node, and should actually be processed by another node that holds
+the data).
+
+The transport mechanism is completely asynchronous in nature, meaning
+that there is no blocking thread waiting for a response. The benefit of
+using asynchronous communication is first solving the
+http://en.wikipedia.org/wiki/C10k_problem[C10k problem], as well as
+being the idle solution for scatter (broadcast) / gather operations such
+as search in Elasticsearch.
+
+[float]
+=== TCP Transport
+
+The TCP transport is an implementation of the transport module using
+TCP. It allows for the following settings:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Setting |Description
+|`transport.tcp.port` |A bind port range. Defaults to `9300-9400`.
+
+|`transport.publish_port` |The port that other nodes in the cluster
+should use when communicating with this node. Useful when a cluster node
+is behind a proxy or firewall and the `transport.tcp.port` is not directly
+addressable from the outside. Defaults to the actual port assigned via
+`transport.tcp.port`.
+
+|`transport.tcp.connect_timeout` |The socket connect timeout setting (in
+time setting format). Defaults to `30s`.
+
+|`transport.tcp.compress` |Set to `true` to enable compression (LZF)
+between all nodes. Defaults to `false`.
+|=======================================================================
+
+It also shares the uses the common
+<<modules-network,network settings>>.
+
+[float]
+=== Local Transport
+
+This is a handy transport to use when running integration tests within
+the JVM. It is automatically enabled when using
+`NodeBuilder#local(true)`.
diff --git a/docs/reference/modules/tribe.asciidoc b/docs/reference/modules/tribe.asciidoc
new file mode 100644
index 0000000..fb998bf
--- /dev/null
+++ b/docs/reference/modules/tribe.asciidoc
@@ -0,0 +1,58 @@
+[[modules-tribe]]
+== Tribe node
+
+The _tribes_ feature allows a _tribe node_ to act as a federated client across
+multiple clusters.
+
+The tribe node works by retrieving the cluster state from all connected
+clusters and merging them into a global cluster state. With this information
+at hand, it is able to perform read and write operations against the nodes in
+all clusters as if they were local.
+
+The `elasticsearch.yml` config file for a tribe node just needs to list the
+clusters that should be joined, for instance:
+
+[source,yaml]
+--------------------------------
+tribe:
+ t1: <1>
+ cluster.name: cluster_one
+ t2: <1>
+ cluster.name: cluster_two
+--------------------------------
+<1> `t1` and `t2` are aribitrary names representing the connection to each
+ cluster.
+
+The example above configures connections to two clusters, name `t1` and `t2`
+respectively. The tribe node will create a <<modules-node,node client>> to
+connect each cluster using <<multicast,multicast discovery>> by default. Any
+other settings for the connection can be configured under `tribe.{name}`, just
+like the `cluster.name` in the example.
+
+The merged global cluster state means that almost all operations work in the
+same way as a single cluster: distributed search, suggest, percolation,
+indexing, etc.
+
+However, there are a few exceptions:
+
+* The merged view cannot handle indices with the same name in multiple
+ clusters. It will pick one of them and discard the other.
+
+* Master level read operations (eg <<cluster-state>>, <<cluster-health>>)
+ will automatically execute with a local flag set to true since there is
+ no master.
+
+* Master level write operations (eg <<indices-create-index>>) are not
+ allowed. These should be performed on a single cluster.
+
+The tribe node can be configured to block all write operations and all
+metadata operations with:
+
+[source,yaml]
+--------------------------------
+tribe:
+ blocks:
+ write: true
+ metadata: true
+--------------------------------
+
diff --git a/docs/reference/query-dsl.asciidoc b/docs/reference/query-dsl.asciidoc
new file mode 100644
index 0000000..32c1fb5
--- /dev/null
+++ b/docs/reference/query-dsl.asciidoc
@@ -0,0 +1,42 @@
+[[query-dsl]]
+= Query DSL
+
+[partintro]
+--
+*elasticsearch* provides a full Query DSL based on JSON to define
+queries. In general, there are basic queries such as
+<<query-dsl-term-query,term>> or
+<<query-dsl-prefix-query,prefix>>. There are
+also compound queries like the
+<<query-dsl-bool-query,bool>> query. Queries can
+also have filters associated with them such as the
+<<query-dsl-filtered-query,filtered>> or
+<<query-dsl-constant-score-query,constant_score>>
+queries, with specific filter queries.
+
+Think of the Query DSL as an AST of queries. Certain queries can contain
+other queries (like the
+<<query-dsl-bool-query,bool>> query), others can
+contain filters (like the
+<<query-dsl-constant-score-query,constant_score>>),
+and some can contain both a query and a filter (like the
+<<query-dsl-filtered-query,filtered>>). Each of
+those can contain *any* query of the list of queries or *any* filter
+from the list of filters, resulting in the ability to build quite
+complex (and interesting) queries.
+
+Both queries and filters can be used in different APIs. For example,
+within a <<search-request-query,search query>>, or
+as a <<search-facets,facet filter>>. This
+section explains the components (queries and filters) that can form the
+AST one can use.
+
+Filters are very handy since they perform an order of magnitude better
+than plain queries since no scoring is performed and they are
+automatically cached.
+
+--
+
+include::query-dsl/queries.asciidoc[]
+
+include::query-dsl/filters.asciidoc[]
diff --git a/docs/reference/query-dsl/filters.asciidoc b/docs/reference/query-dsl/filters.asciidoc
new file mode 100644
index 0000000..7e4149f
--- /dev/null
+++ b/docs/reference/query-dsl/filters.asciidoc
@@ -0,0 +1,104 @@
+[[query-dsl-filters]]
+== Filters
+
+As a general rule, filters should be used instead of queries:
+
+* for binary yes/no searches
+* for queries on exact values
+
+[float]
+[[caching]]
+=== Filters and Caching
+
+Filters can be a great candidate for caching. Caching the result of a
+filter does not require a lot of memory, and will cause other queries
+executing against the same filter (same parameters) to be blazingly
+fast.
+
+Some filters already produce a result that is easily cacheable, and the
+difference between caching and not caching them is the act of placing
+the result in the cache or not. These filters, which include the
+<<query-dsl-term-filter,term>>,
+<<query-dsl-terms-filter,terms>>,
+<<query-dsl-prefix-filter,prefix>>, and
+<<query-dsl-range-filter,range>> filters, are by
+default cached and are recommended to use (compared to the equivalent
+query version) when the same filter (same parameters) will be used
+across multiple different queries (for example, a range filter with age
+higher than 10).
+
+Other filters, usually already working with the field data loaded into
+memory, are not cached by default. Those filters are already very fast,
+and the process of caching them requires extra processing in order to
+allow the filter result to be used with different queries than the one
+executed. These filters, including the geo,
+and <<query-dsl-script-filter,script>> filters
+are not cached by default.
+
+The last type of filters are those working with other filters. The
+<<query-dsl-and-filter,and>>,
+<<query-dsl-not-filter,not>> and
+<<query-dsl-or-filter,or>> filters are not
+cached as they basically just manipulate the internal filters.
+
+All filters allow to set `_cache` element on them to explicitly control
+caching. They also allow to set `_cache_key` which will be used as the
+caching key for that filter. This can be handy when using very large
+filters (like a terms filter with many elements in it).
+
+include::filters/and-filter.asciidoc[]
+
+include::filters/bool-filter.asciidoc[]
+
+include::filters/exists-filter.asciidoc[]
+
+include::filters/geo-bounding-box-filter.asciidoc[]
+
+include::filters/geo-distance-filter.asciidoc[]
+
+include::filters/geo-distance-range-filter.asciidoc[]
+
+include::filters/geo-polygon-filter.asciidoc[]
+
+include::filters/geo-shape-filter.asciidoc[]
+
+include::filters/geohash-cell-filter.asciidoc[]
+
+include::filters/has-child-filter.asciidoc[]
+
+include::filters/has-parent-filter.asciidoc[]
+
+include::filters/ids-filter.asciidoc[]
+
+include::filters/indices-filter.asciidoc[]
+
+include::filters/limit-filter.asciidoc[]
+
+include::filters/match-all-filter.asciidoc[]
+
+include::filters/missing-filter.asciidoc[]
+
+include::filters/nested-filter.asciidoc[]
+
+include::filters/not-filter.asciidoc[]
+
+include::filters/or-filter.asciidoc[]
+
+include::filters/prefix-filter.asciidoc[]
+
+include::filters/query-filter.asciidoc[]
+
+include::filters/range-filter.asciidoc[]
+
+include::filters/regexp-filter.asciidoc[]
+
+include::filters/script-filter.asciidoc[]
+
+include::filters/term-filter.asciidoc[]
+
+include::filters/terms-filter.asciidoc[]
+
+include::filters/type-filter.asciidoc[]
+
+
+
diff --git a/docs/reference/query-dsl/filters/and-filter.asciidoc b/docs/reference/query-dsl/filters/and-filter.asciidoc
new file mode 100644
index 0000000..6f171cf
--- /dev/null
+++ b/docs/reference/query-dsl/filters/and-filter.asciidoc
@@ -0,0 +1,69 @@
+[[query-dsl-and-filter]]
+=== And Filter
+
+A filter that matches documents using `AND` boolean operator on other
+filters. Can be placed within queries that accept a filter.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "term" : { "name.first" : "shay" }
+ },
+ "filter" : {
+ "and" : [
+ {
+ "range" : {
+ "postDate" : {
+ "from" : "2010-03-01",
+ "to" : "2010-04-01"
+ }
+ }
+ },
+ {
+ "prefix" : { "name.second" : "ba" }
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is not cached by default. The `_cache` can be
+set to `true` in order to cache it (though usually not needed). Since
+the `_cache` element requires to be set on the `and` filter itself, the
+structure then changes a bit to have the filters provided within a
+`filters` element:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "term" : { "name.first" : "shay" }
+ },
+ "filter" : {
+ "and" : {
+ "filters": [
+ {
+ "range" : {
+ "postDate" : {
+ "from" : "2010-03-01",
+ "to" : "2010-04-01"
+ }
+ }
+ },
+ {
+ "prefix" : { "name.second" : "ba" }
+ }
+ ],
+ "_cache" : true
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/bool-filter.asciidoc b/docs/reference/query-dsl/filters/bool-filter.asciidoc
new file mode 100644
index 0000000..bf36d26
--- /dev/null
+++ b/docs/reference/query-dsl/filters/bool-filter.asciidoc
@@ -0,0 +1,49 @@
+[[query-dsl-bool-filter]]
+=== Bool Filter
+
+A filter that matches documents matching boolean combinations of other
+queries. Similar in concept to
+<<query-dsl-bool-query,Boolean query>>, except
+that the clauses are other filters. Can be placed within queries that
+accept a filter.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "queryString" : {
+ "default_field" : "message",
+ "query" : "elasticsearch"
+ }
+ },
+ "filter" : {
+ "bool" : {
+ "must" : {
+ "term" : { "tag" : "wow" }
+ },
+ "must_not" : {
+ "range" : {
+ "age" : { "from" : 10, "to" : 20 }
+ }
+ },
+ "should" : [
+ {
+ "term" : { "tag" : "sometag" }
+ },
+ {
+ "term" : { "tag" : "sometagtag" }
+ }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the `bool` filter is not cached by default (though
+internal filters might be). The `_cache` can be set to `true` in order
+to enable caching.
diff --git a/docs/reference/query-dsl/filters/exists-filter.asciidoc b/docs/reference/query-dsl/filters/exists-filter.asciidoc
new file mode 100644
index 0000000..80e9495
--- /dev/null
+++ b/docs/reference/query-dsl/filters/exists-filter.asciidoc
@@ -0,0 +1,20 @@
+[[query-dsl-exists-filter]]
+=== Exists Filter
+
+Filters documents where a specific field has a value in them.
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "exists" : { "field" : "user" }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is always cached.
diff --git a/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc b/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc
new file mode 100644
index 0000000..7f16ec5
--- /dev/null
+++ b/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc
@@ -0,0 +1,240 @@
+[[query-dsl-geo-bounding-box-filter]]
+=== Geo Bounding Box Filter
+
+A filter allowing to filter hits based on a point location using a
+bounding box. Assuming the following indexed document:
+
+[source,js]
+--------------------------------------------------
+{
+ "pin" : {
+ "location" : {
+ "lat" : 40.12,
+ "lon" : -71.34
+ }
+ }
+}
+--------------------------------------------------
+
+Then the following simple query can be executed with a
+`geo_bounding_box` filter:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_bounding_box" : {
+ "pin.location" : {
+ "top_left" : {
+ "lat" : 40.73,
+ "lon" : -74.1
+ },
+ "bottom_right" : {
+ "lat" : 40.01,
+ "lon" : -71.12
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Accepted Formats
+
+In much the same way the geo_point type can accept different
+representation of the geo point, the filter can accept it as well:
+
+[float]
+===== Lat Lon As Properties
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_bounding_box" : {
+ "pin.location" : {
+ "top_left" : {
+ "lat" : 40.73,
+ "lon" : -74.1
+ },
+ "bottom_right" : {
+ "lat" : 40.01,
+ "lon" : -71.12
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Lat Lon As Array
+
+Format in `[lon, lat]`, note, the order of lon/lat here in order to
+conform with http://geojson.org/[GeoJSON].
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_bounding_box" : {
+ "pin.location" : {
+ "top_left" : [-74.1, 40.73],
+ "bottom_right" : [-71.12, 40.01]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Lat Lon As String
+
+Format in `lat,lon`.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_bounding_box" : {
+ "pin.location" : {
+ "top_left" : "40.73, -74.1",
+ "bottom_right" : "40.01, -71.12"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Geohash
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_bounding_box" : {
+ "pin.location" : {
+ "top_left" : "dr5r9ydj2y73",
+ "bottom_right" : "drj7teegpus6"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Vertices
+
+The vertices of the bounding box can either be set by `top_left` and
+`bottom_right` or by `top_right` and `bottom_left` parameters. More
+over the names `topLeft`, `bottomRight`, `topRight` and `bottomLeft`
+are supported. Instead of setting the values pairwise, one can use
+the simple names `top`, `left`, `bottom` and `right` to set the
+values separately.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_bounding_box" : {
+ "pin.location" : {
+ "top" : -74.1,
+ "left" : 40.73,
+ "bottom" : -71.12,
+ "right" : 40.01
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+
+[float]
+==== geo_point Type
+
+The filter *requires* the `geo_point` type to be set on the relevant
+field.
+
+[float]
+==== Multi Location Per Document
+
+The filter can work with multiple locations / points per document. Once
+a single location / point matches the filter, the document will be
+included in the filter
+
+[float]
+==== Type
+
+The type of the bounding box execution by default is set to `memory`,
+which means in memory checks if the doc falls within the bounding box
+range. In some cases, an `indexed` option will perform faster (but note
+that the `geo_point` type must have lat and lon indexed in this case).
+Note, when using the indexed option, multi locations per document field
+are not supported. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_bounding_box" : {
+ "pin.location" : {
+ "top_left" : {
+ "lat" : 40.73,
+ "lon" : -74.1
+ },
+ "bottom_right" : {
+ "lat" : 40.10,
+ "lon" : -71.12
+ }
+ },
+ "type" : "indexed"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is not cached by default. The `_cache` can be
+set to `true` to cache the *result* of the filter. This is handy when
+the same bounding box parameters are used on several (many) other
+queries. Note, the process of caching the first execution is higher when
+caching (since it needs to satisfy different queries).
diff --git a/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc b/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc
new file mode 100644
index 0000000..f501398
--- /dev/null
+++ b/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc
@@ -0,0 +1,184 @@
+[[query-dsl-geo-distance-filter]]
+=== Geo Distance Filter
+
+Filters documents that include only hits that exists within a specific
+distance from a geo point. Assuming the following indexed json:
+
+[source,js]
+--------------------------------------------------
+{
+ "pin" : {
+ "location" : {
+ "lat" : 40.12,
+ "lon" : -71.34
+ }
+ }
+}
+--------------------------------------------------
+
+Then the following simple query can be executed with a `geo_distance`
+filter:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_distance" : {
+ "distance" : "200km",
+ "pin.location" : {
+ "lat" : 40,
+ "lon" : -70
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Accepted Formats
+
+In much the same way the `geo_point` type can accept different
+representation of the geo point, the filter can accept it as well:
+
+[float]
+===== Lat Lon As Properties
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_distance" : {
+ "distance" : "12km",
+ "pin.location" : {
+ "lat" : 40,
+ "lon" : -70
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Lat Lon As Array
+
+Format in `[lon, lat]`, note, the order of lon/lat here in order to
+conform with http://geojson.org/[GeoJSON].
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_distance" : {
+ "distance" : "12km",
+ "pin.location" : [40, -70]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Lat Lon As String
+
+Format in `lat,lon`.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_distance" : {
+ "distance" : "12km",
+ "pin.location" : "40,-70"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Geohash
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_distance" : {
+ "distance" : "12km",
+ "pin.location" : "drm3btev3e86"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Options
+
+The following are options allowed on the filter:
+
+[horizontal]
+
+`distance`::
+
+ The radius of the circle centred on the specified location. Points which
+ fall into this circle are considered to be matches. The `distance` can be
+ specified in various units. See <<distance-units>>.
+
+`distance_type`::
+
+ How to compute the distance. Can either be `arc` (better precision),
+ `sloppy_arc` (faster but less precise) or `plane` (fastest). Defaults
+ to `sloppy_arc`.
+
+`optimize_bbox`::
+
+ Whether to use the optimization of first running a bounding box check
+ before the distance check. Defaults to `memory` which will do in memory
+ checks. Can also have values of `indexed` to use indexed value check (make
+ sure the `geo_point` type index lat lon in this case), or `none` which
+ disables bounding box optimization.
+
+
+[float]
+==== geo_point Type
+
+The filter *requires* the `geo_point` type to be set on the relevant
+field.
+
+[float]
+==== Multi Location Per Document
+
+The `geo_distance` filter can work with multiple locations / points per
+document. Once a single location / point matches the filter, the
+document will be included in the filter.
+
+[float]
+==== Caching
+
+The result of the filter is not cached by default. The `_cache` can be
+set to `true` to cache the *result* of the filter. This is handy when
+the same point and distance parameters are used on several (many) other
+queries. Note, the process of caching the first execution is higher when
+caching (since it needs to satisfy different queries).
diff --git a/docs/reference/query-dsl/filters/geo-distance-range-filter.asciidoc b/docs/reference/query-dsl/filters/geo-distance-range-filter.asciidoc
new file mode 100644
index 0000000..1bc4197
--- /dev/null
+++ b/docs/reference/query-dsl/filters/geo-distance-range-filter.asciidoc
@@ -0,0 +1,30 @@
+[[query-dsl-geo-distance-range-filter]]
+=== Geo Distance Range Filter
+
+Filters documents that exists within a range from a specific point:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_distance_range" : {
+ "from" : "200km",
+ "to" : "400km"
+ "pin.location" : {
+ "lat" : 40,
+ "lon" : -70
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Supports the same point location parameter as the
+<<query-dsl-geo-distance-filter,geo_distance>>
+filter. And also support the common parameters for range (lt, lte, gt,
+gte, from, to, include_upper and include_lower).
diff --git a/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc b/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc
new file mode 100644
index 0000000..a421234
--- /dev/null
+++ b/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc
@@ -0,0 +1,126 @@
+[[query-dsl-geo-polygon-filter]]
+=== Geo Polygon Filter
+
+A filter allowing to include hits that only fall within a polygon of
+points. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_polygon" : {
+ "person.location" : {
+ "points" : [
+ {"lat" : 40, "lon" : -70},
+ {"lat" : 30, "lon" : -80},
+ {"lat" : 20, "lon" : -90}
+ ]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Allowed Formats
+
+[float]
+===== Lat Long as Array
+
+Format in `[lon, lat]`, note, the order of lon/lat here in order to
+conform with http://geojson.org/[GeoJSON].
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_polygon" : {
+ "person.location" : {
+ "points" : [
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Lat Lon as String
+
+Format in `lat,lon`.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_polygon" : {
+ "person.location" : {
+ "points" : [
+ "40, -70",
+ "30, -80",
+ "20, -90"
+ ]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Geohash
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_polygon" : {
+ "person.location" : {
+ "points" : [
+ "drn5x1g8cu2y",
+ "30, -80",
+ "20, -90"
+ ]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== geo_point Type
+
+The filter *requires* the
+<<mapping-geo-point-type,geo_point>> type to be
+set on the relevant field.
+
+[float]
+==== Caching
+
+The result of the filter is not cached by default. The `_cache` can be
+set to `true` to cache the *result* of the filter. This is handy when
+the same points parameters are used on several (many) other queries.
+Note, the process of caching the first execution is higher when caching
+(since it needs to satisfy different queries).
diff --git a/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc b/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc
new file mode 100644
index 0000000..f97e798
--- /dev/null
+++ b/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc
@@ -0,0 +1,121 @@
+[[query-dsl-geo-shape-filter]]
+=== GeoShape Filter
+
+Filter documents indexed using the `geo_shape` type.
+
+Requires the <<mapping-geo-shape-type,geo_shape
+Mapping>>.
+
+You may also use the
+<<query-dsl-geo-shape-query,geo_shape Query>>.
+
+The `geo_shape` Filter uses the same grid square representation as the
+geo_shape mapping to find documents that have a shape that intersects
+with the query shape. It will also use the same PrefixTree configuration
+as defined for the field mapping.
+
+[float]
+==== Filter Format
+
+The Filter supports two ways of defining the Filter shape, either by
+providing a whole shape defintion, or by referencing the name of a shape
+pre-indexed in another index. Both formats are defined below with
+examples.
+
+[float]
+===== Provided Shape Definition
+
+Similar to the `geo_shape` type, the `geo_shape` Filter uses
+http://www.geojson.org[GeoJSON] to represent shapes.
+
+Given a document that looks like this:
+
+[source,js]
+--------------------------------------------------
+{
+ "name": "Wind & Wetter, Berlin, Germany",
+ "location": {
+ "type": "Point",
+ "coordinates": [13.400544, 52.530286]
+ }
+}
+--------------------------------------------------
+
+The following query will find the point using the Elasticsearch's
+`envelope` GeoJSON extension:
+
+[source,js]
+--------------------------------------------------
+{
+ "query":{
+ "filtered": {
+ "query": {
+ "match_all": {}
+ },
+ "filter": {
+ "geo_shape": {
+ "location": {
+ "shape": {
+ "type": "envelope",
+ "coordinates" : [[13.0, 53.0], [14.0, 52.0]]
+ }
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Pre-Indexed Shape
+
+The Filter also supports using a shape which has already been indexed in
+another index and/or index type. This is particularly useful for when
+you have a pre-defined list of shapes which are useful to your
+application and you want to reference this using a logical name (for
+example 'New Zealand') rather than having to provide their coordinates
+each time. In this situation it is only necessary to provide:
+
+* `id` - The ID of the document that containing the pre-indexed shape.
+* `index` - Name of the index where the pre-indexed shape is. Defaults
+to 'shapes'.
+* `type` - Index type where the pre-indexed shape is.
+* `path` - The field specified as path containing the pre-indexed shape.
+Defaults to 'shape'.
+
+The following is an example of using the Filter with a pre-indexed
+shape:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered": {
+ "query": {
+ "match_all": {}
+ },
+ "filter": {
+ "geo_shape": {
+ "location": {
+ "indexed_shape": {
+ "id": "DEU",
+ "type": "countries",
+ "index": "shapes",
+ "path": "location"
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the Filter is not cached by default. Setting `_cache` to
+`true` will mean the results of the Filter will be cached. Since shapes
+can contain 10s-100s of coordinates and any one differing means a new
+shape, it may make sense to only using caching when you are sure that
+the shapes will remain reasonably static.
+
diff --git a/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc b/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc
new file mode 100644
index 0000000..3440902
--- /dev/null
+++ b/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc
@@ -0,0 +1,62 @@
+[[query-dsl-geohash-cell-filter]]
+=== Geohash Cell Filter
+
+The `geohash_cell` filter provides access to a hierarchy of geohashes.
+By defining a geohash cell, only <<mapping-geo-point-type,geopoints>>
+within this cell will match this filter.
+
+To get this filter work all prefixes of a geohash need to be indexed. In
+example a geohash `u30` needs to be decomposed into three terms: `u30`,
+`u3` and `u`. This decomposition must be enabled in the mapping of the
+<<mapping-geo-point-type,geopoint>> field that's going to be filtered by
+setting the `geohash_prefix` option:
+
+[source,js]
+--------------------------------------------------
+{
+ "mappings" : {
+ "location": {
+ "properties": {
+ "pin": {
+ "type": "geo_point",
+ "geohash": true,
+ "geohash_prefix": true,
+ "geohash_precision": 10
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The geohash cell can defined by all formats of `geo_points`. If such a cell is
+defined by a latitude and longitude pair the size of the cell needs to be
+setup. This can be done by the `precision` parameter of the filter. This
+parameter can be set to an integer value which sets the length of the geohash
+prefix. Instead of setting a geohash length directly it is also possible to
+define the precision as distance, in example `"precision": "50m"`. (See
+<<distance-units>>.)
+
+The `neighbor` option of the filter offers the possibility to filter cells
+next to the given cell.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geohash_cell": {
+ "pin": {
+ "lat": 13.4080,
+ "lon": 52.5186
+ },
+ "precision": 3,
+ "neighbors": true
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/has-child-filter.asciidoc b/docs/reference/query-dsl/filters/has-child-filter.asciidoc
new file mode 100644
index 0000000..1a8e116
--- /dev/null
+++ b/docs/reference/query-dsl/filters/has-child-filter.asciidoc
@@ -0,0 +1,57 @@
+[[query-dsl-has-child-filter]]
+=== Has Child Filter
+
+The `has_child` filter accepts a query and the child type to run
+against, and results in parent documents that have child docs matching
+the query. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "has_child" : {
+ "type" : "blog_tag",
+ "query" : {
+ "term" : {
+ "tag" : "something"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The `type` is the child type to query against. The parent type to return
+is automatically detected based on the mappings.
+
+The way that the filter is implemented is by first running the child
+query, doing the matching up to the parent doc for each document
+matched.
+
+The `has_child` filter also accepts a filter instead of a query:
+
+[source,js]
+--------------------------------------------------
+{
+ "has_child" : {
+ "type" : "comment",
+ "filter" : {
+ "term" : {
+ "user" : "john"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Memory Considerations
+
+With the current implementation, all `_id` values are loaded to memory
+(heap) in order to support fast lookups, so make sure there is enough
+memory for it.
+
+[float]
+==== Caching
+
+The `has_child` filter cannot be cached in the filter cache. The `_cache`
+and `_cache_key` options are a no-op in this filter. Also any filter that
+wraps the `has_child` filter either directly or indirectly will not be cached. \ No newline at end of file
diff --git a/docs/reference/query-dsl/filters/has-parent-filter.asciidoc b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc
new file mode 100644
index 0000000..fe4f66d
--- /dev/null
+++ b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc
@@ -0,0 +1,60 @@
+[[query-dsl-has-parent-filter]]
+=== Has Parent Filter
+
+The `has_parent` filter accepts a query and a parent type. The query is
+executed in the parent document space, which is specified by the parent
+type. This filter return child documents which associated parents have
+matched. For the rest `has_parent` filter has the same options and works
+in the same manner as the `has_child` filter.
+
+[float]
+==== Filter example
+
+[source,js]
+--------------------------------------------------
+{
+ "has_parent" : {
+ "parent_type" : "blog",
+ "query" : {
+ "term" : {
+ "tag" : "something"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The `parent_type` field name can also be abbreviated to `type`.
+
+The way that the filter is implemented is by first running the parent
+query, doing the matching up to the child doc for each document matched.
+
+The `has_parent` filter also accepts a filter instead of a query:
+
+[source,js]
+--------------------------------------------------
+{
+ "has_parent" : {
+ "type" : "blog",
+ "filter" : {
+ "term" : {
+ "text" : "bonsai three"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Memory considerations
+
+With the current implementation, all `_id` values are loaded to memory
+(heap) in order to support fast lookups, so make sure there is enough
+memory for it.
+
+[float]
+==== Caching
+
+The `has_parent` filter cannot be cached in the filter cache. The `_cache`
+and `_cache_key` options are a no-op in this filter. Also any filter that
+wraps the `has_parent` filter either directly or indirectly will not be cached. \ No newline at end of file
diff --git a/docs/reference/query-dsl/filters/ids-filter.asciidoc b/docs/reference/query-dsl/filters/ids-filter.asciidoc
new file mode 100644
index 0000000..303fffb
--- /dev/null
+++ b/docs/reference/query-dsl/filters/ids-filter.asciidoc
@@ -0,0 +1,20 @@
+[[query-dsl-ids-filter]]
+=== Ids Filter
+
+Filters documents that only have the provided ids. Note, this filter
+does not require the <<mapping-id-field,_id>>
+field to be indexed since it works using the
+<<mapping-uid-field,_uid>> field.
+
+[source,js]
+--------------------------------------------------
+{
+ "ids" : {
+ "type" : "my_type",
+ "values" : ["1", "4", "100"]
+ }
+}
+--------------------------------------------------
+
+The `type` is optional and can be omitted, and can also accept an array
+of values.
diff --git a/docs/reference/query-dsl/filters/indices-filter.asciidoc b/docs/reference/query-dsl/filters/indices-filter.asciidoc
new file mode 100644
index 0000000..05ad232
--- /dev/null
+++ b/docs/reference/query-dsl/filters/indices-filter.asciidoc
@@ -0,0 +1,37 @@
+[[query-dsl-indices-filter]]
+=== Indices Filter
+
+The `indices` filter can be used when executed across multiple indices,
+allowing to have a filter that executes only when executed on an index
+that matches a specific list of indices, and another filter that executes
+when it is executed on an index that does not match the listed indices.
+
+[source,js]
+--------------------------------------------------
+{
+ "indices" : {
+ "indices" : ["index1", "index2"],
+ "filter" : {
+ "term" : { "tag" : "wow" }
+ },
+ "no_match_filter" : {
+ "term" : { "tag" : "kow" }
+ }
+ }
+}
+--------------------------------------------------
+
+You can use the `index` field to provide a single index.
+
+`no_match_filter` can also have "string" value of `none` (to match no
+documents), and `all` (to match all). Defaults to `all`.
+
+`filter` is mandatory, as well as `indices` (or `index`).
+
+[TIP]
+===================================================================
+The fields order is important: if the `indices` are provided before `filter`
+or `no_match_filter`, the related filters get parsed only against the indices
+that they are going to be executed on. This is useful to avoid parsing filters
+when it is not necessary and prevent potential mapping errors.
+===================================================================
diff --git a/docs/reference/query-dsl/filters/limit-filter.asciidoc b/docs/reference/query-dsl/filters/limit-filter.asciidoc
new file mode 100644
index 0000000..a590c25
--- /dev/null
+++ b/docs/reference/query-dsl/filters/limit-filter.asciidoc
@@ -0,0 +1,19 @@
+[[query-dsl-limit-filter]]
+=== Limit Filter
+
+A limit filter limits the number of documents (per shard) to execute on.
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "filter" : {
+ "limit" : {"value" : 100}
+ },
+ "query" : {
+ "term" : { "name.first" : "shay" }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/match-all-filter.asciidoc b/docs/reference/query-dsl/filters/match-all-filter.asciidoc
new file mode 100644
index 0000000..97adbd1
--- /dev/null
+++ b/docs/reference/query-dsl/filters/match-all-filter.asciidoc
@@ -0,0 +1,15 @@
+[[query-dsl-match-all-filter]]
+=== Match All Filter
+
+A filter that matches on all documents:
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "match_all" : { }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/missing-filter.asciidoc b/docs/reference/query-dsl/filters/missing-filter.asciidoc
new file mode 100644
index 0000000..70685bd
--- /dev/null
+++ b/docs/reference/query-dsl/filters/missing-filter.asciidoc
@@ -0,0 +1,41 @@
+[[query-dsl-missing-filter]]
+=== Missing Filter
+
+Filters documents where a specific field has no value in them.
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "missing" : { "field" : "user" }
+ }
+ }
+}
+--------------------------------------------------
+
+By default, the filter will only find "missing" fields, i.e., fields
+that have no values. It can be configured also to find fields with an
+explicit `null_value` mapped for them. Here is an example that will both
+find missing field that don't exists (`existence` set to `true`), or
+have null values (`null_value` set to `true`).
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "missing" : {
+ "field" : "user",
+ "existence" : true,
+ "null_value" : true
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is always cached.
diff --git a/docs/reference/query-dsl/filters/nested-filter.asciidoc b/docs/reference/query-dsl/filters/nested-filter.asciidoc
new file mode 100644
index 0000000..6235be2
--- /dev/null
+++ b/docs/reference/query-dsl/filters/nested-filter.asciidoc
@@ -0,0 +1,78 @@
+[[query-dsl-nested-filter]]
+=== Nested Filter
+
+A `nested` filter works in a similar fashion to the
+<<query-dsl-nested-query,nested>> query, except it's
+used as a filter. It follows exactly the same structure, but also allows
+to cache the results (set `_cache` to `true`), and have it named (set
+the `_name` value). For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : { "match_all" : {} },
+ "filter" : {
+ "nested" : {
+ "path" : "obj1",
+ "filter" : {
+ "bool" : {
+ "must" : [
+ {
+ "term" : {"obj1.name" : "blue"}
+ },
+ {
+ "range" : {"obj1.count" : {"gt" : 5}}
+ }
+ ]
+ }
+ },
+ "_cache" : true
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Join option
+
+The nested filter also supports a `join` option which controls whether to perform the block join or not.
+By default, it's enabled. But when it's disabled, it emits the hidden nested documents as hits instead of the joined root document.
+
+This is useful when a `nested` filter is used in a facet where nested is enabled, like you can see in the example below:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "nested" : {
+ "path" : "offers",
+ "query" : {
+ "match" : {
+ "offers.color" : "blue"
+ }
+ }
+ }
+ },
+ "facets" : {
+ "size" : {
+ "terms" : {
+ "field" : "offers.size"
+ },
+ "facet_filter" : {
+ "nested" : {
+ "path" : "offers",
+ "query" : {
+ "match" : {
+ "offers.color" : "blue"
+ }
+ },
+ "join" : false
+ }
+ },
+ "nested" : "offers"
+ }
+ }
+}'
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/not-filter.asciidoc b/docs/reference/query-dsl/filters/not-filter.asciidoc
new file mode 100644
index 0000000..629cb17
--- /dev/null
+++ b/docs/reference/query-dsl/filters/not-filter.asciidoc
@@ -0,0 +1,82 @@
+[[query-dsl-not-filter]]
+=== Not Filter
+
+A filter that filters out matched documents using a query. Can be placed
+within queries that accept a filter.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "term" : { "name.first" : "shay" }
+ },
+ "filter" : {
+ "not" : {
+ "range" : {
+ "postDate" : {
+ "from" : "2010-03-01",
+ "to" : "2010-04-01"
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Or, in a longer form with a `filter` element:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "term" : { "name.first" : "shay" }
+ },
+ "filter" : {
+ "not" : {
+ "filter" : {
+ "range" : {
+ "postDate" : {
+ "from" : "2010-03-01",
+ "to" : "2010-04-01"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is not cached by default. The `_cache` can be
+set to `true` in order to cache it (though usually not needed). Here is
+an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "term" : { "name.first" : "shay" }
+ },
+ "filter" : {
+ "not" : {
+ "filter" : {
+ "range" : {
+ "postDate" : {
+ "from" : "2010-03-01",
+ "to" : "2010-04-01"
+ }
+ }
+ },
+ "_cache" : true
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/or-filter.asciidoc b/docs/reference/query-dsl/filters/or-filter.asciidoc
new file mode 100644
index 0000000..9c68cb9
--- /dev/null
+++ b/docs/reference/query-dsl/filters/or-filter.asciidoc
@@ -0,0 +1,59 @@
+[[query-dsl-or-filter]]
+=== Or Filter
+
+A filter that matches documents using `OR` boolean operator on other
+queries. Can be placed within queries that accept a filter.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "term" : { "name.first" : "shay" }
+ },
+ "filter" : {
+ "or" : [
+ {
+ "term" : { "name.second" : "banon" }
+ },
+ {
+ "term" : { "name.nick" : "kimchy" }
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is not cached by default. The `_cache` can be
+set to `true` in order to cache it (though usually not needed). Since
+the `_cache` element requires to be set on the `or` filter itself, the
+structure then changes a bit to have the filters provided within a
+`filters` element:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "term" : { "name.first" : "shay" }
+ },
+ "filter" : {
+ "or" : {
+ "filters" : [
+ {
+ "term" : { "name.second" : "banon" }
+ },
+ {
+ "term" : { "name.nick" : "kimchy" }
+ }
+ ],
+ "_cache" : true
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/prefix-filter.asciidoc b/docs/reference/query-dsl/filters/prefix-filter.asciidoc
new file mode 100644
index 0000000..d29f570
--- /dev/null
+++ b/docs/reference/query-dsl/filters/prefix-filter.asciidoc
@@ -0,0 +1,37 @@
+[[query-dsl-prefix-filter]]
+=== Prefix Filter
+
+Filters documents that have fields containing terms with a specified
+prefix (*not analyzed*). Similar to phrase query, except that it acts as
+a filter. Can be placed within queries that accept a filter.
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "prefix" : { "user" : "ki" }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is cached by default. The `_cache` can be set
+to `false` in order not to cache it. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "prefix" : {
+ "user" : "ki",
+ "_cache" : false
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/query-filter.asciidoc b/docs/reference/query-dsl/filters/query-filter.asciidoc
new file mode 100644
index 0000000..8cd3858
--- /dev/null
+++ b/docs/reference/query-dsl/filters/query-filter.asciidoc
@@ -0,0 +1,50 @@
+[[query-dsl-query-filter]]
+=== Query Filter
+
+Wraps any query to be used as a filter. Can be placed within queries
+that accept a filter.
+
+[source,js]
+--------------------------------------------------
+{
+ "constantScore" : {
+ "filter" : {
+ "query" : {
+ "query_string" : {
+ "query" : "this AND that OR thus"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is not cached by default. The `_cache` can be
+set to `true` to cache the *result* of the filter. This is handy when
+the same query is used on several (many) other queries. Note, the
+process of caching the first execution is higher when not caching (since
+it needs to satisfy different queries).
+
+Setting the `_cache` element requires a different format for the
+`query`:
+
+[source,js]
+--------------------------------------------------
+{
+ "constantScore" : {
+ "filter" : {
+ "fquery" : {
+ "query" : {
+ "query_string" : {
+ "query" : "this AND that OR thus"
+ }
+ },
+ "_cache" : true
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/range-filter.asciidoc b/docs/reference/query-dsl/filters/range-filter.asciidoc
new file mode 100644
index 0000000..7399d00
--- /dev/null
+++ b/docs/reference/query-dsl/filters/range-filter.asciidoc
@@ -0,0 +1,55 @@
+[[query-dsl-range-filter]]
+=== Range Filter
+
+Filters documents with fields that have terms within a certain range.
+Similar to <<query-dsl-range-query,range
+query>>, except that it acts as a filter. Can be placed within queries
+that accept a filter.
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "range" : {
+ "age" : {
+ "gte": 10,
+ "lte": 20
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The `range` filter accepts the following parameters:
+
+[horizontal]
+`gte`:: Greater-than or equal to
+`gt`:: Greater-than
+`lte`:: Less-than or equal to
+`lt`:: Less-than
+
+[float]
+==== Execution
+
+The `execution` option controls how the range filter internally executes. The `execution` option accepts the following values:
+
+[horizontal]
+`index`:: Uses field's inverted in order to determine of documents fall with in the range filter's from and to range
+`fielddata`:: Uses field data in order to determine of documents fall with in the range filter's from and to range.
+
+In general for small ranges the `index` execution is faster and for longer ranges the `fielddata` execution is faster.
+
+The `fielddata` execution as the same suggests uses field data and therefor requires more memory, so make you have
+sufficient memory on your nodes in order to use this execution mode. It usually makes sense to use it on fields you're
+already faceting or sorting by.
+
+[float]
+==== Caching
+
+The result of the filter is only automatically cached by default if the `execution` is set to `index`. The
+`_cache` can be set to `false` to turn it off.
+
+If the `now` date math expression is used without rounding then a range filter will never be cached even if `_cache` is
+set to `true`. Also any filter that wraps this filter will never be cached.
diff --git a/docs/reference/query-dsl/filters/regexp-filter.asciidoc b/docs/reference/query-dsl/filters/regexp-filter.asciidoc
new file mode 100644
index 0000000..72cc169
--- /dev/null
+++ b/docs/reference/query-dsl/filters/regexp-filter.asciidoc
@@ -0,0 +1,53 @@
+[[query-dsl-regexp-filter]]
+=== Regexp Filter
+
+The `regexp` filter is similar to the
+<<query-dsl-regexp-query,regexp>> query, except
+that it is cacheable and can speedup performance in case you are reusing
+this filter in your queries.
+
+See <<regexp-syntax>> for details of the supported regular expression language.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered": {
+ "query": {
+ "match_all": {}
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : "s.*y"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+You can also select the cache name and use the same regexp flags in the
+filter as in the query.
+
+*Note*: You have to enable caching explicitly in order to have the
+`regexp` filter cached.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered": {
+ "query": {
+ "match_all": {}
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : {
+ "value" : "s.*y",
+ "flags" : "INTERSECTION|COMPLEMENT|EMPTY"
+ },
+ "_name":"test",
+ "_cache" : true,
+ "_cache_key" : "key"
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/script-filter.asciidoc b/docs/reference/query-dsl/filters/script-filter.asciidoc
new file mode 100644
index 0000000..f9e0cd1
--- /dev/null
+++ b/docs/reference/query-dsl/filters/script-filter.asciidoc
@@ -0,0 +1,53 @@
+[[query-dsl-script-filter]]
+=== Script Filter
+
+A filter allowing to define
+<<modules-scripting,scripts>> as filters. For
+example:
+
+[source,js]
+----------------------------------------------
+"filtered" : {
+ "query" : {
+ ...
+ },
+ "filter" : {
+ "script" : {
+ "script" : "doc['num1'].value > 1"
+ }
+ }
+}
+----------------------------------------------
+
+[float]
+==== Custom Parameters
+
+Scripts are compiled and cached for faster execution. If the same script
+can be used, just with different parameters provider, it is preferable
+to use the ability to pass parameters to the script itself, for example:
+
+[source,js]
+----------------------------------------------
+"filtered" : {
+ "query" : {
+ ...
+ },
+ "filter" : {
+ "script" : {
+ "script" : "doc['num1'].value > param1"
+ "params" : {
+ "param1" : 5
+ }
+ }
+ }
+}
+----------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is not cached by default. The `_cache` can be
+set to `true` to cache the *result* of the filter. This is handy when
+the same script and parameters are used on several (many) other queries.
+Note, the process of caching the first execution is higher when caching
+(since it needs to satisfy different queries).
diff --git a/docs/reference/query-dsl/filters/term-filter.asciidoc b/docs/reference/query-dsl/filters/term-filter.asciidoc
new file mode 100644
index 0000000..09cd32d
--- /dev/null
+++ b/docs/reference/query-dsl/filters/term-filter.asciidoc
@@ -0,0 +1,38 @@
+[[query-dsl-term-filter]]
+=== Term Filter
+
+Filters documents that have fields that contain a term (*not analyzed*).
+Similar to <<query-dsl-term-query,term query>>,
+except that it acts as a filter. Can be placed within queries that
+accept a filter, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "term" : { "user" : "kimchy"}
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is automatically cached by default. The
+`_cache` can be set to `false` to turn it off. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "term" : {
+ "user" : "kimchy",
+ "_cache" : false
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/filters/terms-filter.asciidoc b/docs/reference/query-dsl/filters/terms-filter.asciidoc
new file mode 100644
index 0000000..4de6f12
--- /dev/null
+++ b/docs/reference/query-dsl/filters/terms-filter.asciidoc
@@ -0,0 +1,227 @@
+[[query-dsl-terms-filter]]
+=== Terms Filter
+
+Filters documents that have fields that match any of the provided terms
+(*not analyzed*). For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "terms" : { "user" : ["kimchy", "elasticsearch"]}
+ }
+ }
+}
+--------------------------------------------------
+
+The `terms` filter is also aliased with `in` as the filter name for
+simpler usage.
+
+[float]
+==== Execution Mode
+
+The way terms filter executes is by iterating over the terms provided
+and finding matches docs (loading into a bitset) and caching it.
+Sometimes, we want a different execution model that can still be
+achieved by building more complex queries in the DSL, but we can support
+them in the more compact model that terms filter provides.
+
+The `execution` option now has the following options :
+
+[horizontal]
+`plain`::
+ The default. Works as today. Iterates over all the terms,
+ building a bit set matching it, and filtering. The total filter is
+ cached.
+
+`fielddata`::
+ Generates a terms filters that uses the fielddata cache to
+ compare terms. This execution mode is great to use when filtering
+ on a field that is already loaded into the fielddata cache from
+ faceting, sorting, or index warmers. When filtering on
+ a large number of terms, this execution can be considerably faster
+ than the other modes. The total filter is not cached unless
+ explicitly configured to do so.
+
+`bool`::
+ Generates a term filter (which is cached) for each term, and
+ wraps those in a bool filter. The bool filter itself is not cached as it
+ can operate very quickly on the cached term filters.
+
+`and`::
+ Generates a term filter (which is cached) for each term, and
+ wraps those in an and filter. The and filter itself is not cached.
+
+`or`::
+ Generates a term filter (which is cached) for each term, and
+ wraps those in an or filter. The or filter itself is not cached.
+ Generally, the `bool` execution mode should be preferred.
+
+If you don't want the generated individual term queries to be cached,
+you can use: `bool_nocache`, `and_nocache` or `or_nocache` instead, but
+be aware that this will affect performance.
+
+The "total" terms filter caching can still be explicitly controlled
+using the `_cache` option. Note the default value for it depends on the
+execution value.
+
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "terms" : {
+ "user" : ["kimchy", "elasticsearch"],
+ "execution" : "bool",
+ "_cache": true
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Caching
+
+The result of the filter is automatically cached by default. The
+`_cache` can be set to `false` to turn it off.
+
+[float]
+==== Terms lookup mechanism
+
+When it's needed to specify a `terms` filter with a lot of terms it can
+be beneficial to fetch those term values from a document in an index. A
+concrete example would be to filter tweets tweeted by your followers.
+Potentially the amount of user ids specified in the terms filter can be
+a lot. In this scenario it makes sense to use the terms filter's terms
+lookup mechanism.
+
+The terms lookup mechanism supports the following options:
+
+[horizontal]
+`index`::
+ The index to fetch the term values from. Defaults to the
+ current index.
+
+`type`::
+ The type to fetch the term values from.
+
+`id`::
+ The id of the document to fetch the term values from.
+
+`path`::
+ The field specified as path to fetch the actual values for the
+ `terms` filter.
+
+`routing`::
+ A custom routing value to be used when retrieving the
+ external terms doc.
+
+`cache`::
+ Whether to cache the filter built from the retrieved document
+ (`true` - default) or whether to fetch and rebuild the filter on every
+ request (`false`). See "<<query-dsl-terms-filter-lookup-caching,Terms lookup caching>>" below
+
+The values for the `terms` filter will be fetched from a field in a
+document with the specified id in the specified type and index.
+Internally a get request is executed to fetch the values from the
+specified path. At the moment for this feature to work the `_source`
+needs to be stored.
+
+Also, consider using an index with a single shard and fully replicated
+across all nodes if the "reference" terms data is not large. The lookup
+terms filter will prefer to execute the get request on a local node if
+possible, reducing the need for networking.
+
+["float",id="query-dsl-terms-filter-lookup-caching"]
+==== Terms lookup caching
+
+There is an additional cache involved, which caches the lookup of the
+lookup document to the actual terms. This lookup cache is a LRU cache.
+This cache has the following options:
+
+`indices.cache.filter.terms.size`::
+ The size of the lookup cache. The default is `10mb`.
+
+`indices.cache.filter.terms.expire_after_access`::
+ The time after the last read an entry should expire. Disabled by default.
+
+`indices.cache.filter.terms.expire_after_write`:
+ The time after the last write an entry should expire. Disabled by default.
+
+All options for the lookup of the documents cache can only be configured
+via the `elasticsearch.yml` file.
+
+When using the terms lookup the `execution` option isn't taken into
+account and behaves as if the execution mode was set to `plain`.
+
+[float]
+==== Terms lookup twitter example
+
+[source,js]
+--------------------------------------------------
+# index the information for user with id 2, specifically, its followers
+curl -XPUT localhost:9200/users/user/2 -d '{
+ "followers" : ["1", "3"]
+}'
+
+# index a tweet, from user with id 2
+curl -XPUT localhost:9200/tweets/tweet/1 -d '{
+ "user" : "2"
+}'
+
+# search on all the tweets that match the followers of user 2
+curl -XGET localhost:9200/tweets/_search -d '{
+ "query" : {
+ "filtered" : {
+ "filter" : {
+ "terms" : {
+ "user" : {
+ "index" : "users",
+ "type" : "user",
+ "id" : "2",
+ "path" : "followers"
+ },
+ "_cache_key" : "user_2_friends"
+ }
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+The above is highly optimized, both in a sense that the list of
+followers will not be fetched if the filter is already cached in the
+filter cache, and with internal LRU cache for fetching external values
+for the terms filter. Also, the entry in the filter cache will not hold
+`all` the terms reducing the memory required for it.
+
+`_cache_key` is recommended to be set, so its simple to clear the cache
+associated with it using the clear cache API. For example:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/tweets/_cache/clear?filter_keys=user_2_friends'
+--------------------------------------------------
+
+The structure of the external terms document can also include array of
+inner objects, for example:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT localhost:9200/users/user/2 -d '{
+ "followers" : [
+ {
+ "id" : "1"
+ },
+ {
+ "id" : "2"
+ }
+ ]
+}'
+--------------------------------------------------
+
+In which case, the lookup path will be `followers.id`.
diff --git a/docs/reference/query-dsl/filters/type-filter.asciidoc b/docs/reference/query-dsl/filters/type-filter.asciidoc
new file mode 100644
index 0000000..07bde38
--- /dev/null
+++ b/docs/reference/query-dsl/filters/type-filter.asciidoc
@@ -0,0 +1,15 @@
+[[query-dsl-type-filter]]
+=== Type Filter
+
+Filters documents matching the provided document / mapping type. Note,
+this filter can work even when the `_type` field is not indexed (using
+the <<mapping-uid-field,_uid>> field).
+
+[source,js]
+--------------------------------------------------
+{
+ "type" : {
+ "value" : "my_type"
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/queries.asciidoc b/docs/reference/query-dsl/queries.asciidoc
new file mode 100644
index 0000000..e82600a
--- /dev/null
+++ b/docs/reference/query-dsl/queries.asciidoc
@@ -0,0 +1,83 @@
+[[query-dsl-queries]]
+== Queries
+
+As a general rule, queries should be used instead of filters:
+
+* for full text search
+* where the result depends on a relevance score
+
+include::queries/match-query.asciidoc[]
+
+include::queries/multi-match-query.asciidoc[]
+
+include::queries/bool-query.asciidoc[]
+
+include::queries/boosting-query.asciidoc[]
+
+include::queries/common-terms-query.asciidoc[]
+
+include::queries/constant-score-query.asciidoc[]
+
+include::queries/dis-max-query.asciidoc[]
+
+include::queries/filtered-query.asciidoc[]
+
+include::queries/flt-query.asciidoc[]
+
+include::queries/flt-field-query.asciidoc[]
+
+include::queries/function-score-query.asciidoc[]
+
+include::queries/fuzzy-query.asciidoc[]
+
+include::queries/geo-shape-query.asciidoc[]
+
+include::queries/has-child-query.asciidoc[]
+
+include::queries/has-parent-query.asciidoc[]
+
+include::queries/ids-query.asciidoc[]
+
+include::queries/indices-query.asciidoc[]
+
+include::queries/match-all-query.asciidoc[]
+
+include::queries/mlt-query.asciidoc[]
+
+include::queries/mlt-field-query.asciidoc[]
+
+include::queries/nested-query.asciidoc[]
+
+include::queries/prefix-query.asciidoc[]
+
+include::queries/query-string-query.asciidoc[]
+
+include::queries/simple-query-string-query.asciidoc[]
+
+include::queries/range-query.asciidoc[]
+
+include::queries/regexp-query.asciidoc[]
+
+include::queries/span-first-query.asciidoc[]
+
+include::queries/span-multi-term-query.asciidoc[]
+
+include::queries/span-near-query.asciidoc[]
+
+include::queries/span-not-query.asciidoc[]
+
+include::queries/span-or-query.asciidoc[]
+
+include::queries/span-term-query.asciidoc[]
+
+include::queries/term-query.asciidoc[]
+
+include::queries/terms-query.asciidoc[]
+
+include::queries/top-children-query.asciidoc[]
+
+include::queries/wildcard-query.asciidoc[]
+
+include::queries/minimum-should-match.asciidoc[]
+
+include::queries/multi-term-rewrite.asciidoc[]
diff --git a/docs/reference/query-dsl/queries/bool-query.asciidoc b/docs/reference/query-dsl/queries/bool-query.asciidoc
new file mode 100644
index 0000000..a9b565c
--- /dev/null
+++ b/docs/reference/query-dsl/queries/bool-query.asciidoc
@@ -0,0 +1,54 @@
+[[query-dsl-bool-query]]
+=== Bool Query
+
+A query that matches documents matching boolean combinations of other
+queries. The bool query maps to Lucene `BooleanQuery`. It is built using
+one or more boolean clauses, each clause with a typed occurrence. The
+occurrence types are:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Occur |Description
+|`must` |The clause (query) must appear in matching documents.
+
+|`should` |The clause (query) should appear in the matching document. In
+a boolean query with no `must` clauses, one or more `should` clauses
+must match a document. The minimum number of should clauses to match can
+be set using the
+<<query-dsl-minimum-should-match,`minimum_should_match`>>
+parameter.
+
+|`must_not` |The clause (query) must not appear in the matching
+documents.
+|=======================================================================
+
+The bool query also supports `disable_coord` parameter (defaults to
+`false`). Basically the coord similarity computes a score factor based
+on the fraction of all query terms that a document contains. See Lucene
+`BooleanQuery` for more details.
+
+[source,js]
+--------------------------------------------------
+{
+ "bool" : {
+ "must" : {
+ "term" : { "user" : "kimchy" }
+ },
+ "must_not" : {
+ "range" : {
+ "age" : { "from" : 10, "to" : 20 }
+ }
+ },
+ "should" : [
+ {
+ "term" : { "tag" : "wow" }
+ },
+ {
+ "term" : { "tag" : "elasticsearch" }
+ }
+ ],
+ "minimum_should_match" : 1,
+ "boost" : 1.0
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/queries/boosting-query.asciidoc b/docs/reference/query-dsl/queries/boosting-query.asciidoc
new file mode 100644
index 0000000..969b3bb
--- /dev/null
+++ b/docs/reference/query-dsl/queries/boosting-query.asciidoc
@@ -0,0 +1,26 @@
+[[query-dsl-boosting-query]]
+=== Boosting Query
+
+The `boosting` query can be used to effectively demote results that
+match a given query. Unlike the "NOT" clause in bool query, this still
+selects documents that contain undesirable terms, but reduces their
+overall score.
+
+[source,js]
+--------------------------------------------------
+{
+ "boosting" : {
+ "positive" : {
+ "term" : {
+ "field1" : "value1"
+ }
+ },
+ "negative" : {
+ "term" : {
+ "field2" : "value2"
+ }
+ },
+ "negative_boost" : 0.2
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/queries/common-terms-query.asciidoc b/docs/reference/query-dsl/queries/common-terms-query.asciidoc
new file mode 100644
index 0000000..256d9bb
--- /dev/null
+++ b/docs/reference/query-dsl/queries/common-terms-query.asciidoc
@@ -0,0 +1,263 @@
+[[query-dsl-common-terms-query]]
+=== Common Terms Query
+
+The `common` terms query is a modern alternative to stopwords which
+improves the precision and recall of search results (by taking stopwords
+into account), without sacrificing performance.
+
+[float]
+==== The problem
+
+Every term in a query has a cost. A search for `"The brown fox"`
+requires three term queries, one for each of `"the"`, `"brown"` and
+`"fox"`, all of which are executed against all documents in the index.
+The query for `"the"` is likely to match many documents and thus has a
+much smaller impact on relevance than the other two terms.
+
+Previously, the solution to this problem was to ignore terms with high
+frequency. By treating `"the"` as a _stopword_, we reduce the index size
+and reduce the number of term queries that need to be executed.
+
+The problem with this approach is that, while stopwords have a small
+impact on relevance, they are still important. If we remove stopwords,
+we lose precision, (eg we are unable to distinguish between `"happy"`
+and `"not happy"`) and we lose recall (eg text like `"The The"` or
+`"To be or not to be"` would simply not exist in the index).
+
+[float]
+==== The solution
+
+The `common` terms query divides the query terms into two groups: more
+important (ie _low frequency_ terms) and less important (ie _high
+frequency_ terms which would previously have been stopwords).
+
+First it searches for documents which match the more important terms.
+These are the terms which appear in fewer documents and have a greater
+impact on relevance.
+
+Then, it executes a second query for the less important terms -- terms
+which appear frequently and have a low impact on relevance. But instead
+of calculating the relevance score for *all* matching documents, it only
+calculates the `_score` for documents already matched by the first
+query. In this way the high frequency terms can improve the relevance
+calculation without paying the cost of poor performance.
+
+If a query consists only of high frequency terms, then a single query is
+executed as an `AND` (conjunction) query, in other words all terms are
+required. Even though each individual term will match many documents,
+the combination of terms narrows down the resultset to only the most
+relevant. The single query can also be executed as an `OR` with a
+specific
+<<query-dsl-minimum-should-match,`minimum_should_match`>>,
+in this case a high enough value should probably be used.
+
+Terms are allocated to the high or low frequency groups based on the
+`cutoff_frequency`, which can be specified as an absolute frequency
+(`>=1`) or as a relative frequency (`0.0 .. 1.0`).
+
+Perhaps the most interesting property of this query is that it adapts to
+domain specific stopwords automatically. For example, on a video hosting
+site, common terms like `"clip"` or `"video"` will automatically behave
+as stopwords without the need to maintain a manual list.
+
+[float]
+==== Examples
+
+In this example, words that have a document frequency greater than 0.1%
+(eg `"this"` and `"is"`) will be treated as _common terms_.
+
+[source,js]
+--------------------------------------------------
+{
+ "common": {
+ "body": {
+ "query": "this is bonsai cool",
+ "cutoff_frequency": 0.001
+ }
+ }
+}
+--------------------------------------------------
+
+The number of terms which should match can be controlled with the
+<<query-dsl-minimum-should-match,`minimum_should_match`>>
+(`high_freq`, `low_freq`), `low_freq_operator` (default `"or"`) and
+`high_freq_operator` (default `"or"`) parameters.
+
+For low frequency terms, set the `low_freq_operator` to `"and"` to make
+all terms required:
+
+[source,js]
+--------------------------------------------------
+{
+ "common": {
+ "body": {
+ "query": "nelly the elephant as a cartoon",
+ "cutoff_frequency": 0.001,
+ "low_freq_operator" "and"
+ }
+ }
+}
+--------------------------------------------------
+
+which is roughly equivalent to:
+
+[source,js]
+--------------------------------------------------
+{
+ "bool": {
+ "must": [
+ { "term": { "body": "nelly"}},
+ { "term": { "body": "elephant"}},
+ { "term": { "body": "cartoon"}}
+ ],
+ "should": [
+ { "term": { "body": "the"}}
+ { "term": { "body": "as"}}
+ { "term": { "body": "a"}}
+ ]
+ }
+}
+--------------------------------------------------
+
+Alternatively use
+<<query-dsl-minimum-should-match,`minimum_should_match`>>
+to specify a minimum number or percentage of low frequency terms which
+must be present, for instance:
+
+[source,js]
+--------------------------------------------------
+{
+ "common": {
+ "body": {
+ "query": "nelly the elephant as a cartoon",
+ "cutoff_frequency": 0.001,
+ "minimum_should_match": 2
+ }
+ }
+}
+--------------------------------------------------
+
+which is roughly equivalent to:
+
+[source,js]
+--------------------------------------------------
+{
+ "bool": {
+ "must": {
+ "bool": {
+ "should": [
+ { "term": { "body": "nelly"}},
+ { "term": { "body": "elephant"}},
+ { "term": { "body": "cartoon"}}
+ ],
+ "minimum_should_match": 2
+ }
+ },
+ "should": [
+ { "term": { "body": "the"}}
+ { "term": { "body": "as"}}
+ { "term": { "body": "a"}}
+ ]
+ }
+}
+--------------------------------------------------
+
+minimum_should_match
+
+A different
+<<query-dsl-minimum-should-match,`minimum_should_match`>>
+can be applied for low and high frequency terms with the additional
+`low_freq` and `high_freq` parameters Here is an example when providing
+additional parameters (note the change in structure):
+
+[source,js]
+--------------------------------------------------
+{
+ "common": {
+ "body": {
+ "query": "nelly the elephant not as a cartoon",
+ "cutoff_frequency": 0.001,
+ "minimum_should_match": {
+ "low_freq" : 2,
+ "high_freq" : 3
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+which is roughly equivalent to:
+
+[source,js]
+--------------------------------------------------
+{
+ "bool": {
+ "must": {
+ "bool": {
+ "should": [
+ { "term": { "body": "nelly"}},
+ { "term": { "body": "elephant"}},
+ { "term": { "body": "cartoon"}}
+ ],
+ "minimum_should_match": 2
+ }
+ },
+ "should": {
+ "bool": {
+ "should": [
+ { "term": { "body": "the"}},
+ { "term": { "body": "not"}},
+ { "term": { "body": "as"}},
+ { "term": { "body": "a"}}
+ ],
+ "minimum_should_match": 3
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+In this case it means the high frequency terms have only an impact on
+relevance when there are at least three of them. But the most
+interesting use of the
+<<query-dsl-minimum-should-match,`minimum_should_match`>>
+for high frequency terms is when there are only high frequency terms:
+
+[source,js]
+--------------------------------------------------
+{
+ "common": {
+ "body": {
+ "query": "how not to be",
+ "cutoff_frequency": 0.001,
+ "minimum_should_match": {
+ "low_freq" : 2,
+ "high_freq" : 3
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+which is roughly equivalent to:
+
+[source,js]
+--------------------------------------------------
+{
+ "bool": {
+ "should": [
+ { "term": { "body": "how"}},
+ { "term": { "body": "not"}},
+ { "term": { "body": "to"}},
+ { "term": { "body": "be"}}
+ ],
+ "minimum_should_match": "3<50%"
+ }
+}
+--------------------------------------------------
+
+The high frequency generated query is then slightly less restrictive
+than with an `AND`.
+
+The `common` terms query also supports `boost`, `analyzer` and
+`disable_coord` as parameters.
diff --git a/docs/reference/query-dsl/queries/constant-score-query.asciidoc b/docs/reference/query-dsl/queries/constant-score-query.asciidoc
new file mode 100644
index 0000000..06ed6f7
--- /dev/null
+++ b/docs/reference/query-dsl/queries/constant-score-query.asciidoc
@@ -0,0 +1,36 @@
+[[query-dsl-constant-score-query]]
+=== Constant Score Query
+
+A query that wraps a filter or another query and simply returns a
+constant score equal to the query boost for every document in the
+filter. Maps to Lucene `ConstantScoreQuery`.
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "filter" : {
+ "term" : { "user" : "kimchy"}
+ },
+ "boost" : 1.2
+ }
+}
+--------------------------------------------------
+
+The filter object can hold only filter elements, not queries. Filters
+can be much faster compared to queries since they don't perform any
+scoring, especially when they are cached.
+
+A query can also be wrapped in a `constant_score` query:
+
+[source,js]
+--------------------------------------------------
+{
+ "constant_score" : {
+ "query" : {
+ "term" : { "user" : "kimchy"}
+ },
+ "boost" : 1.2
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/queries/dis-max-query.asciidoc b/docs/reference/query-dsl/queries/dis-max-query.asciidoc
new file mode 100644
index 0000000..2938c8d
--- /dev/null
+++ b/docs/reference/query-dsl/queries/dis-max-query.asciidoc
@@ -0,0 +1,44 @@
+[[query-dsl-dis-max-query]]
+=== Dis Max Query
+
+A query that generates the union of documents produced by its
+subqueries, and that scores each document with the maximum score for
+that document as produced by any subquery, plus a tie breaking increment
+for any additional matching subqueries.
+
+This is useful when searching for a word in multiple fields with
+different boost factors (so that the fields cannot be combined
+equivalently into a single search field). We want the primary score to
+be the one associated with the highest boost, not the sum of the field
+scores (as Boolean Query would give). If the query is "albino elephant"
+this ensures that "albino" matching one field and "elephant" matching
+another gets a higher score than "albino" matching both fields. To get
+this result, use both Boolean Query and DisjunctionMax Query: for each
+term a DisjunctionMaxQuery searches for it in each field, while the set
+of these DisjunctionMaxQuery's is combined into a BooleanQuery.
+
+The tie breaker capability allows results that include the same term in
+multiple fields to be judged better than results that include this term
+in only the best of those multiple fields, without confusing this with
+the better case of two different terms in the multiple fields.The
+default `tie_breaker` is `0.0`.
+
+This query maps to Lucene `DisjunctionMaxQuery`.
+
+[source,js]
+--------------------------------------------------
+{
+ "dis_max" : {
+ "tie_breaker" : 0.7,
+ "boost" : 1.2,
+ "queries" : [
+ {
+ "term" : { "age" : 34 }
+ },
+ {
+ "term" : { "age" : 35 }
+ }
+ ]
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/queries/filtered-query.asciidoc b/docs/reference/query-dsl/queries/filtered-query.asciidoc
new file mode 100644
index 0000000..bf51e9c
--- /dev/null
+++ b/docs/reference/query-dsl/queries/filtered-query.asciidoc
@@ -0,0 +1,25 @@
+[[query-dsl-filtered-query]]
+=== Filtered Query
+
+A query that applies a filter to the results of another query. This
+query maps to Lucene `FilteredQuery`.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "term" : { "tag" : "wow" }
+ },
+ "filter" : {
+ "range" : {
+ "age" : { "from" : 10, "to" : 20 }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The filter object can hold only filter elements, not queries. Filters
+can be much faster compared to queries since they don't perform any
+scoring, especially when they are cached.
diff --git a/docs/reference/query-dsl/queries/flt-field-query.asciidoc b/docs/reference/query-dsl/queries/flt-field-query.asciidoc
new file mode 100644
index 0000000..205dc61
--- /dev/null
+++ b/docs/reference/query-dsl/queries/flt-field-query.asciidoc
@@ -0,0 +1,47 @@
+[[query-dsl-flt-field-query]]
+=== Fuzzy Like This Field Query
+
+The `fuzzy_like_this_field` query is the same as the `fuzzy_like_this`
+query, except that it runs against a single field. It provides nicer
+query DSL over the generic `fuzzy_like_this` query, and support typed
+fields query (automatically wraps typed fields with type filter to match
+only on the specific type).
+
+[source,js]
+--------------------------------------------------
+{
+ "fuzzy_like_this_field" : {
+ "name.first" : {
+ "like_text" : "text like this one",
+ "max_query_terms" : 12
+ }
+ }
+}
+--------------------------------------------------
+
+`fuzzy_like_this_field` can be shortened to `flt_field`.
+
+The `fuzzy_like_this_field` top level parameters include:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Parameter |Description
+|`like_text` |The text to find documents like it, *required*.
+
+|`ignore_tf` |Should term frequency be ignored. Defaults to `false`.
+
+|`max_query_terms` |The maximum number of query terms that will be
+included in any generated query. Defaults to `25`.
+
+|`fuzziness` |The fuzziness of the term variants. Defaults
+to `0.5`. See <<fuzziness>>.
+
+|`prefix_length` |Length of required common prefix on variant terms.
+Defaults to `0`.
+
+|`boost` |Sets the boost value of the query. Defaults to `1.0`.
+
+|`analyzer` |The analyzer that will be used to analyze the text.
+Defaults to the analyzer associated with the field.
+|=======================================================================
+
diff --git a/docs/reference/query-dsl/queries/flt-query.asciidoc b/docs/reference/query-dsl/queries/flt-query.asciidoc
new file mode 100644
index 0000000..231de6b
--- /dev/null
+++ b/docs/reference/query-dsl/queries/flt-query.asciidoc
@@ -0,0 +1,65 @@
+[[query-dsl-flt-query]]
+=== Fuzzy Like This Query
+
+Fuzzy like this query find documents that are "like" provided text by
+running it against one or more fields.
+
+[source,js]
+--------------------------------------------------
+{
+ "fuzzy_like_this" : {
+ "fields" : ["name.first", "name.last"],
+ "like_text" : "text like this one",
+ "max_query_terms" : 12
+ }
+}
+--------------------------------------------------
+
+`fuzzy_like_this` can be shortened to `flt`.
+
+The `fuzzy_like_this` top level parameters include:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Parameter |Description
+|`fields` |A list of the fields to run the more like this query against.
+Defaults to the `_all` field.
+
+|`like_text` |The text to find documents like it, *required*.
+
+|`ignore_tf` |Should term frequency be ignored. Defaults to `false`.
+
+|`max_query_terms` |The maximum number of query terms that will be
+included in any generated query. Defaults to `25`.
+
+|`fuzziness` |The minimum similarity of the term variants. Defaults
+to `0.5`. See <<fuzziness>>.
+
+|`prefix_length` |Length of required common prefix on variant terms.
+Defaults to `0`.
+
+|`boost` |Sets the boost value of the query. Defaults to `1.0`.
+
+|`analyzer` |The analyzer that will be used to analyze the text.
+Defaults to the analyzer associated with the field.
+|=======================================================================
+
+[float]
+==== How it Works
+
+Fuzzifies ALL terms provided as strings and then picks the best n
+differentiating terms. In effect this mixes the behaviour of FuzzyQuery
+and MoreLikeThis but with special consideration of fuzzy scoring
+factors. This generally produces good results for queries where users
+may provide details in a number of fields and have no knowledge of
+boolean query syntax and also want a degree of fuzzy matching and a fast
+query.
+
+For each source term the fuzzy variants are held in a BooleanQuery with
+no coord factor (because we are not looking for matches on multiple
+variants in any one doc). Additionally, a specialized TermQuery is used
+for variants and does not use that variant term's IDF because this would
+favor rarer terms, such as misspellings. Instead, all variants use the
+same IDF ranking (the one for the source query term) and this is
+factored into the variant's boost. If the source query term does not
+exist in the index the average IDF of the variants is used.
diff --git a/docs/reference/query-dsl/queries/function-score-query.asciidoc b/docs/reference/query-dsl/queries/function-score-query.asciidoc
new file mode 100644
index 0000000..fa5b2bd
--- /dev/null
+++ b/docs/reference/query-dsl/queries/function-score-query.asciidoc
@@ -0,0 +1,491 @@
+[[query-dsl-function-score-query]]
+=== Function Score Query
+
+The `function_score` allows you to modify the score of documents that are
+retrieved by a query. This can be useful if, for example, a score
+function is computationally expensive and it is sufficient to compute
+the score on a filtered set of documents.
+
+`function_score` provides the same functionality that
+`custom_boost_factor`, `custom_score` and
+`custom_filters_score` provided
+but furthermore adds futher scoring functionality such as
+distance and recency scoring (see description below).
+
+==== Using function score
+
+To use `function_score`, the user has to define a query and one or
+several functions, that compute a new score for each document returned
+by the query.
+
+`function_score` can be used with only one function like this:
+
+[source,js]
+--------------------------------------------------
+"function_score": {
+ "(query|filter)": {},
+ "boost": "boost for the whole query",
+ "FUNCTION": {},
+ "boost_mode":"(multiply|replace|...)"
+}
+--------------------------------------------------
+
+Furthermore, several functions can be combined. In this case one can
+optionally choose to apply the function only if a document matches a
+given filter:
+
+[source,js]
+--------------------------------------------------
+"function_score": {
+ "(query|filter)": {},
+ "boost": "boost for the whole query",
+ "functions": [
+ {
+ "filter": {},
+ "FUNCTION": {}
+ },
+ {
+ "FUNCTION": {}
+ }
+ ],
+ "max_boost": number,
+ "score_mode": "(multiply|max|...)",
+ "boost_mode": "(multiply|replace|...)"
+}
+--------------------------------------------------
+
+If no filter is given with a function this is equivalent to specifying
+`"match_all": {}`
+
+First, each document is scored by the defined functons. The parameter
+`score_mode` specifies how the computed scores are combined:
+
+[horizontal]
+`multiply`:: scores are multiplied (default)
+`sum`:: scores are summed
+`avg`:: scores are averaged
+`first`:: the first function that has a matching filter
+ is applied
+`max`:: maximum score is used
+`min`:: minimum score is used
+
+The new score can be restricted to not exceed a certain limit by setting
+the `max_boost` parameter. The default for `max_boost` is FLT_MAX.
+
+Finally, the newly computed score is combined with the score of the
+query. The parameter `boost_mode` defines how:
+
+[horizontal]
+`multiply`:: query score and function score is multiplied (default)
+`replace`:: only function score is used, the query score is ignored
+`sum`:: query score and function score are added
+`avg`:: average
+`max`:: max of query score and function score
+`min`:: min of query score and function score
+
+
+==== Score functions
+
+The `function_score` query provides several types of score functions.
+
+===== Script score
+
+The `script_score` function allows you to wrap another query and customize
+the scoring of it optionally with a computation derived from other numeric
+field values in the doc using a script expression. Here is a
+simple sample:
+
+[source,js]
+--------------------------------------------------
+"script_score" : {
+ "script" : "_score * doc['my_numeric_field'].value"
+}
+--------------------------------------------------
+
+On top of the different scripting field values and expression, the
+`_score` script parameter can be used to retrieve the score based on the
+wrapped query.
+
+Scripts are cached for faster execution. If the script has parameters
+that it needs to take into account, it is preferable to reuse the same
+script, and provide parameters to it:
+
+[source,js]
+--------------------------------------------------
+"script_score": {
+ "lang": "lang",
+ "params": {
+ "param1": value1,
+ "param2": value2
+ },
+ "script": "_score * doc['my_numeric_field'].value / pow(param1, param2)"
+}
+--------------------------------------------------
+
+Note that unlike the `custom_score` query, the
+score of the query is multiplied with the result of the script scoring. If
+you wish to inhibit this, set `"boost_mode": "replace"`
+
+===== Boost factor
+
+The `boost_factor` score allows you to multiply the score by the provided
+`boost_factor`. This can sometimes be desired since boost value set on
+specific queries gets normalized, while for this score function it does
+not.
+
+[source,js]
+--------------------------------------------------
+"boost_factor" : number
+--------------------------------------------------
+
+===== Random
+
+The `random_score` generates scores via a pseudo random number algorithm
+that is initialized with a `seed`.
+
+[source,js]
+--------------------------------------------------
+"random_score": {
+ "seed" : number
+}
+--------------------------------------------------
+
+===== Decay functions
+
+Decay functions score a document with a function that decays depending
+on the distance of a numeric field value of the document from a user
+given origin. This is similar to a range query, but with smooth edges
+instead of boxes.
+
+To use distance scoring on a query that has numerical fields, the user
+has to define an `origin` and a `scale` for each field. The `origin`
+is needed to define the ``central point'' from which the distance
+is calculated, and the `scale` to define the rate of decay. The
+decay function is specified as
+
+[source,js]
+--------------------------------------------------
+"DECAY_FUNCTION": {
+ "FIELD_NAME": {
+ "origin": "11, 12",
+ "scale": "2km",
+ "offset": "0km",
+ "decay": 0.33
+ }
+}
+--------------------------------------------------
+
+where `DECAY_FUNCTION` can be "linear", "exp" and "gauss" (see below). The specified field must be a numeric field. In the above example, the field is a <<mapping-geo-point-type>> and origin can be provided in geo format. `scale` and `offset` must be given with a unit in this case. If your field is a date field, you can set `scale` and `offset` as days, weeks, and so on. Example:
+
+[source,js]
+--------------------------------------------------
+ "DECAY_FUNCTION": {
+ "FIELD_NAME": {
+ "origin": "2013-09-17",
+ "scale": "10d",
+ "offset": "5d",
+ "decay" : 0.5
+ }
+ }
+--------------------------------------------------
+
+
+The format of the origin depends on the <<mapping-date-format>> defined in your mapping. If you do not define the origin, the current time is used.
+
+
+The `offset` and `decay` parameters are optional.
+
+[horizontal]
+`offset`::
+ If an `offset` is defined, the decay function will only compute a the
+ decay function for documents with a distance greater that the defined
+ `offset`. The default is 0.
+
+`decay`::
+ The `decay` parameter defines how documents are scored at the distance
+ given at `scale`. If no `decay` is defined, documents at the distance
+ `scale` will be scored 0.5.
+
+In the first example, your documents might represents hotels and contain a geo
+location field. You want to compute a decay function depending on how
+far the hotel is from a given location. You might not immediately see
+what scale to choose for the gauss function, but you can say something
+like: "At a distance of 2km from the desired location, the score should
+be reduced by one third."
+The parameter "scale" will then be adjusted automatically to assure that
+the score function computes a score of 0.5 for hotels that are 2km away
+from the desired location.
+
+
+In the second example, documents with a field value between 2013-09-12 and 2013-09-22 would get a weight of 1.0 and documents which are 15 days from that date a weight of 0.5.
+
+
+
+The `DECAY_FUNCTION` determines the shape of the decay:
+
+[horizontal]
+`gauss`::
+
+Normal decay, computed as:
++
+image:images/Gaussian.png[]
+
+`exp`::
+
+Exponential decay, computed as:
++
+image:images/Exponential.png[]
+
+
+`linear`::
+Linear decay, computed as:
++
+image:images/Linear.png[].
++
+In contrast to the normal and exponential decay, this function actually
+sets the score to 0 if the field value exceeds twice the user given
+scale value.
+
+==== Detailed example
+
+Suppose you are searching for a hotel in a certain town. Your budget is
+limited. Also, you would like the hotel to be close to the town center,
+so the farther the hotel is from the desired location the less likely
+you are to check in.
+
+You would like the query results that match your criterion (for
+example, "hotel, Nancy, non-smoker") to be scored with respect to
+distance to the town center and also the price.
+
+Intuitively, you would like to define the town center as the origin and
+maybe you are willing to walk 2km to the town center from the hotel. +
+In this case your *origin* for the location field is the town center
+and the *scale* is ~2km.
+
+If your budget is low, you would probably prefer something cheap above
+something expensive. For the price field, the *origin* would be 0 Euros
+and the *scale* depends on how much you are willing to pay, for example 20 Euros.
+
+In this example, the fields might be called "price" for the price of the
+hotel and "location" for the coordinates of this hotel.
+
+The function for `price` in this case would be
+
+[source,js]
+--------------------------------------------------
+"DECAY_FUNCTION": {
+ "price": {
+ "origin": "0",
+ "scale": "20"
+ }
+}
+--------------------------------------------------
+
+and for `location`:
+
+[source,js]
+--------------------------------------------------
+
+"DECAY_FUNCTION": {
+ "location": {
+ "origin": "11, 12",
+ "scale": "2km"
+ }
+}
+--------------------------------------------------
+
+where `DECAY_FUNCTION` can be "linear", "exp" and "gauss".
+
+Suppose you want to multiply these two functions on the original score,
+the request would look like this:
+
+[source,js]
+--------------------------------------------------
+curl 'localhost:9200/hotels/_search/' -d '{
+"query": {
+ "function_score": {
+ "functions": [
+ {
+ "DECAY_FUNCTION": {
+ "price": {
+ "origin": "0",
+ "scale": "20"
+ }
+ }
+ },
+ {
+ "DECAY_FUNCTION": {
+ "location": {
+ "origin": "11, 12",
+ "scale": "2km"
+ }
+ }
+ }
+ ],
+ "query": {
+ "match": {
+ "properties": "balcony"
+ }
+ },
+ "score_mode": "multiply"
+ }
+}
+}'
+--------------------------------------------------
+
+Next, we show how the computed score looks like for each of the three
+possible decay functions.
+
+===== Normal decay, keyword `gauss`
+
+When choosing `gauss` as the decay function in the above example, the
+contour and surface plot of the multiplier looks like this:
+
+image::https://f.cloud.github.com/assets/4320215/768157/cd0e18a6-e898-11e2-9b3c-f0145078bd6f.png[width="700px"]
+
+image::https://f.cloud.github.com/assets/4320215/768160/ec43c928-e898-11e2-8e0d-f3c4519dbd89.png[width="700px"]
+
+Suppose your original search results matches three hotels :
+
+* "Backback Nap"
+* "Drink n Drive"
+* "BnB Bellevue".
+
+"Drink n Drive" is pretty far from your defined location (nearly 2 km)
+and is not too cheap (about 13 Euros) so it gets a low factor a factor
+of 0.56. "BnB Bellevue" and "Backback Nap" are both pretty close to the
+defined location but "BnB Bellevue" is cheaper, so it gets a multiplier
+of 0.86 whereas "Backpack Nap" gets a value of 0.66.
+
+===== Exponential decay, keyword `exp`
+
+When choosing `exp` as the decay function in the above example, the
+contour and surface plot of the multiplier looks like this:
+
+image::https://f.cloud.github.com/assets/4320215/768161/082975c0-e899-11e2-86f7-174c3a729d64.png[width="700px"]
+
+image::https://f.cloud.github.com/assets/4320215/768162/0b606884-e899-11e2-907b-aefc77eefef6.png[width="700px"]
+
+===== Linear' decay, keyword `linear`
+
+When choosing `linear` as the decay function in the above example, the
+contour and surface plot of the multiplier looks like this:
+
+image::https://f.cloud.github.com/assets/4320215/768164/1775b0ca-e899-11e2-9f4a-776b406305c6.png[width="700px"]
+
+image::https://f.cloud.github.com/assets/4320215/768165/19d8b1aa-e899-11e2-91bc-6b0553e8d722.png[width="700px"]
+
+==== Supported fields for decay functions
+
+Only single valued numeric fields, including time and geo locations,
+are supported.
+
+==== What is a field is missing?
+
+If the numeric field is missing in the document, the function will
+return 1.
+
+==== Relation to `custom_boost`, `custom_score` and `custom_filters_score`
+
+The `custom_boost_factor` query
+
+[source,js]
+--------------------------------------------------
+"custom_boost_factor": {
+ "boost_factor": 5.2,
+ "query": {...}
+}
+--------------------------------------------------
+
+becomes
+
+[source,js]
+--------------------------------------------------
+"function_score": {
+ "boost_factor": 5.2,
+ "query": {...}
+}
+--------------------------------------------------
+
+The `custom_score` query
+
+[source,js]
+--------------------------------------------------
+"custom_score": {
+ "params": {
+ "param1": 2,
+ "param2": 3.1
+ },
+ "query": {...},
+ "script": "_score * doc['my_numeric_field'].value / pow(param1, param2)"
+}
+--------------------------------------------------
+
+becomes
+
+[source,js]
+--------------------------------------------------
+"function_score": {
+ "boost_mode": "replace",
+ "query": {...},
+ "script_score": {
+ "params": {
+ "param1": 2,
+ "param2": 3.1
+ },
+ "script": "_score * doc['my_numeric_field'].value / pow(param1, param2)"
+ }
+}
+--------------------------------------------------
+
+and the `custom_filters_score`
+
+[source,js]
+--------------------------------------------------
+"custom_filters_score": {
+ "filters": [
+ {
+ "boost_factor": "3",
+ "filter": {...}
+ },
+ {
+ "filter": {…},
+ "script": "_score * doc['my_numeric_field'].value / pow(param1, param2)"
+ }
+ ],
+ "params": {
+ "param1": 2,
+ "param2": 3.1
+ },
+ "query": {...},
+ "score_mode": "first"
+}
+--------------------------------------------------
+
+becomes:
+
+[source,js]
+--------------------------------------------------
+"function_score": {
+ "functions": [
+ {
+ "boost_factor": "3",
+ "filter": {...}
+ },
+ {
+ "filter": {...},
+ "script_score": {
+ "params": {
+ "param1": 2,
+ "param2": 3.1
+ },
+ "script": "_score * doc['my_numeric_field'].value / pow(param1, param2)"
+ }
+ }
+ ],
+ "query": {...},
+ "score_mode": "first"
+}
+--------------------------------------------------
+
+
diff --git a/docs/reference/query-dsl/queries/fuzzy-query.asciidoc b/docs/reference/query-dsl/queries/fuzzy-query.asciidoc
new file mode 100644
index 0000000..082f3f1
--- /dev/null
+++ b/docs/reference/query-dsl/queries/fuzzy-query.asciidoc
@@ -0,0 +1,102 @@
+[[query-dsl-fuzzy-query]]
+=== Fuzzy Query
+
+The fuzzy query uses similarity based on Levenshtein edit distance for
+`string` fields, and a `+/-` margin on numeric and date fields.
+
+==== String fields
+
+The `fuzzy` query generates all possible matching terms that are within the
+maximum edit distance specified in `fuzziness` and then checks the term
+dictionary to find out which of those generated terms actually exist in the
+index.
+
+Here is a simple example:
+
+[source,js]
+--------------------------------------------------
+{
+ "fuzzy" : { "user" : "ki" }
+}
+--------------------------------------------------
+
+Or with more advanced settings:
+
+[source,js]
+--------------------------------------------------
+{
+ "fuzzy" : {
+ "user" : {
+ "value" : "ki",
+ "boost" : 1.0,
+ "fuzziness" : 2,
+ "prefix_length" : 0,
+ "max_expansions": 100
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== Parameters
+
+[horizontal]
+`fuzziness`::
+
+ The maximum edit distance. Defaults to `AUTO`. See <<fuzziness>>.
+
+`prefix_length`::
+
+ The number of initial characters which will not be ``fuzzified''. This
+ helps to reduce the number of terms which must be examined. Defaults
+ to `0`.
+
+`max_expansions`::
+
+ The maximum number of terms that the `fuzzy` query will expand to.
+ Defaults to `0`.
+
+
+WARNING: this query can be very heavy if `prefix_length` and `max_expansions`
+are both set to their defaults of `0`. This could cause every term in the
+index to be examined!
+
+
+[float]
+==== Numeric and date fields
+
+Performs a <<query-dsl-range-query>> ``around'' the value using the
+`fuzziness` value as a `+/-` range, where:
+
+ -fuzziness <= field value <= +fuzziness
+
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "fuzzy" : {
+ "price" : {
+ "value" : 12,
+ "fuzziness" : 2
+ }
+ }
+}
+--------------------------------------------------
+
+Will result in a range query between 10 and 14. Date fields support
+<<time-units,time values>>, eg:
+
+[source,js]
+--------------------------------------------------
+{
+ "fuzzy" : {
+ "created" : {
+ "value" : "2010-02-05T12:05:07",
+ "fuzziness" : "1d"
+ }
+ }
+}
+--------------------------------------------------
+
+See <<fuzziness>> for more details about accepted values.
diff --git a/docs/reference/query-dsl/queries/geo-shape-query.asciidoc b/docs/reference/query-dsl/queries/geo-shape-query.asciidoc
new file mode 100644
index 0000000..94cd039
--- /dev/null
+++ b/docs/reference/query-dsl/queries/geo-shape-query.asciidoc
@@ -0,0 +1,49 @@
+[[query-dsl-geo-shape-query]]
+=== GeoShape Query
+
+Query version of the
+<<query-dsl-geo-shape-filter,geo_shape Filter>>.
+
+Requires the <<mapping-geo-shape-type,geo_shape
+Mapping>>.
+
+Given a document that looks like this:
+
+[source,js]
+--------------------------------------------------
+{
+ "name": "Wind & Wetter, Berlin, Germany",
+ "location": {
+ "type": "Point",
+ "coordinates": [13.400544, 52.530286]
+ }
+}
+--------------------------------------------------
+
+The following query will find the point:
+
+[source,js]
+--------------------------------------------------
+{
+ "query": {
+ "geo_shape": {
+ "location": {
+ "shape": {
+ "type": "envelope",
+ "coordinates": [[13, 53],[14, 52]]
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+See the Filter's documentation for more information.
+
+[float]
+==== Relevancy and Score
+
+Currently Elasticsearch does not have any notion of geo shape relevancy,
+consequently the Query internally uses a `constant_score` Query which
+wraps a <<query-dsl-geo-shape-filter,geo_shape
+filter>>.
diff --git a/docs/reference/query-dsl/queries/has-child-query.asciidoc b/docs/reference/query-dsl/queries/has-child-query.asciidoc
new file mode 100644
index 0000000..c562c2b
--- /dev/null
+++ b/docs/reference/query-dsl/queries/has-child-query.asciidoc
@@ -0,0 +1,61 @@
+[[query-dsl-has-child-query]]
+=== Has Child Query
+
+The `has_child` query works the same as the
+<<query-dsl-has-child-filter,has_child>> filter,
+by automatically wrapping the filter with a
+<<query-dsl-constant-score-query,constant_score>>
+(when using the default score type). It has the same syntax as the
+<<query-dsl-has-child-filter,has_child>> filter:
+
+[source,js]
+--------------------------------------------------
+{
+ "has_child" : {
+ "type" : "blog_tag",
+ "query" : {
+ "term" : {
+ "tag" : "something"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+An important difference with the `top_children` query is that this query
+is always executed in two iterations whereas the `top_children` query
+can be executed in one or more iteration. When using the `has_child`
+query the `total_hits` is always correct.
+
+[float]
+==== Scoring capabilities
+
+The `has_child` also has scoring support. The
+supported score types are `max`, `sum`, `avg` or `none`. The default is
+`none` and yields the same behaviour as in previous versions. If the
+score type is set to another value than `none`, the scores of all the
+matching child documents are aggregated into the associated parent
+documents. The score type can be specified with the `score_type` field
+inside the `has_child` query:
+
+[source,js]
+--------------------------------------------------
+{
+ "has_child" : {
+ "type" : "blog_tag",
+ "score_type" : "sum",
+ "query" : {
+ "term" : {
+ "tag" : "something"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Memory Considerations
+
+With the current implementation, all `_id` values are loaded to memory
+(heap) in order to support fast lookups, so make sure there is enough
+memory for it.
diff --git a/docs/reference/query-dsl/queries/has-parent-query.asciidoc b/docs/reference/query-dsl/queries/has-parent-query.asciidoc
new file mode 100644
index 0000000..8ef1f4e
--- /dev/null
+++ b/docs/reference/query-dsl/queries/has-parent-query.asciidoc
@@ -0,0 +1,57 @@
+[[query-dsl-has-parent-query]]
+=== Has Parent Query
+
+The `has_parent` query works the same as the
+<<query-dsl-has-parent-filter,has_parent>>
+filter, by automatically wrapping the filter with a constant_score (when
+using the default score type). It has the same syntax as the
+<<query-dsl-has-parent-filter,has_parent>>
+filter.
+
+[source,js]
+--------------------------------------------------
+{
+ "has_parent" : {
+ "parent_type" : "blog",
+ "query" : {
+ "term" : {
+ "tag" : "something"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Scoring capabilities
+
+The `has_parent` also has scoring support. The
+supported score types are `score` or `none`. The default is `none` and
+this ignores the score from the parent document. The score is in this
+case equal to the boost on the `has_parent` query (Defaults to 1). If
+the score type is set to `score`, then the score of the matching parent
+document is aggregated into the child documents belonging to the
+matching parent document. The score type can be specified with the
+`score_type` field inside the `has_parent` query:
+
+[source,js]
+--------------------------------------------------
+{
+ "has_parent" : {
+ "parent_type" : "blog",
+ "score_type" : "score",
+ "query" : {
+ "term" : {
+ "tag" : "something"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Memory Considerations
+
+With the current implementation, all `_id` values are loaded to memory
+(heap) in order to support fast lookups, so make sure there is enough
+memory for it.
diff --git a/docs/reference/query-dsl/queries/ids-query.asciidoc b/docs/reference/query-dsl/queries/ids-query.asciidoc
new file mode 100644
index 0000000..8de62d7
--- /dev/null
+++ b/docs/reference/query-dsl/queries/ids-query.asciidoc
@@ -0,0 +1,20 @@
+[[query-dsl-ids-query]]
+=== Ids Query
+
+Filters documents that only have the provided ids. Note, this filter
+does not require the <<mapping-id-field,_id>>
+field to be indexed since it works using the
+<<mapping-uid-field,_uid>> field.
+
+[source,js]
+--------------------------------------------------
+{
+ "ids" : {
+ "type" : "my_type",
+ "values" : ["1", "4", "100"]
+ }
+}
+--------------------------------------------------
+
+The `type` is optional and can be omitted, and can also accept an array
+of values.
diff --git a/docs/reference/query-dsl/queries/indices-query.asciidoc b/docs/reference/query-dsl/queries/indices-query.asciidoc
new file mode 100644
index 0000000..c597833
--- /dev/null
+++ b/docs/reference/query-dsl/queries/indices-query.asciidoc
@@ -0,0 +1,37 @@
+[[query-dsl-indices-query]]
+=== Indices Query
+
+The `indices` query can be used when executed across multiple indices,
+allowing to have a query that executes only when executed on an index
+that matches a specific list of indices, and another query that executes
+when it is executed on an index that does not match the listed indices.
+
+[source,js]
+--------------------------------------------------
+{
+ "indices" : {
+ "indices" : ["index1", "index2"],
+ "query" : {
+ "term" : { "tag" : "wow" }
+ },
+ "no_match_query" : {
+ "term" : { "tag" : "kow" }
+ }
+ }
+}
+--------------------------------------------------
+
+You can use the `index` field to provide a single index.
+
+`no_match_query` can also have "string" value of `none` (to match no
+documents), and `all` (to match all). Defaults to `all`.
+
+`query` is mandatory, as well as `indices` (or `index`).
+
+[TIP]
+===================================================================
+The fields order is important: if the `indices` are provided before `query`
+or `no_match_query`, the related queries get parsed only against the indices
+that they are going to be executed on. This is useful to avoid parsing queries
+when it is not necessary and prevent potential mapping errors.
+===================================================================
diff --git a/docs/reference/query-dsl/queries/match-all-query.asciidoc b/docs/reference/query-dsl/queries/match-all-query.asciidoc
new file mode 100644
index 0000000..2ea3d41
--- /dev/null
+++ b/docs/reference/query-dsl/queries/match-all-query.asciidoc
@@ -0,0 +1,20 @@
+[[query-dsl-match-all-query]]
+=== Match All Query
+
+A query that matches all documents. Maps to Lucene `MatchAllDocsQuery`.
+
+[source,js]
+--------------------------------------------------
+{
+ "match_all" : { }
+}
+--------------------------------------------------
+
+Which can also have boost associated with it:
+
+[source,js]
+--------------------------------------------------
+{
+ "match_all" : { "boost" : 1.2 }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/queries/match-query.asciidoc b/docs/reference/query-dsl/queries/match-query.asciidoc
new file mode 100644
index 0000000..d514768
--- /dev/null
+++ b/docs/reference/query-dsl/queries/match-query.asciidoc
@@ -0,0 +1,234 @@
+[[query-dsl-match-query]]
+=== Match Query
+
+A family of `match` queries that accept text/numerics/dates, analyzes
+it, and constructs a query out of it. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "match" : {
+ "message" : "this is a test"
+ }
+}
+--------------------------------------------------
+
+Note, `message` is the name of a field, you can substitute the name of
+any field (including `_all`) instead.
+
+[float]
+==== Types of Match Queries
+
+[float]
+===== boolean
+
+The default `match` query is of type `boolean`. It means that the text
+provided is analyzed and the analysis process constructs a boolean query
+from the provided text. The `operator` flag can be set to `or` or `and`
+to control the boolean clauses (defaults to `or`). The minimum number of
+should clauses to match can be set using the
+<<query-dsl-minimum-should-match,`minimum_should_match`>>
+parameter.
+
+The `analyzer` can be set to control which analyzer will perform the
+analysis process on the text. It default to the field explicit mapping
+definition, or the default search analyzer.
+
+`fuzziness` allows _fuzzy matching_ based on the type of field being queried.
+See <<fuzziness>> for allowed settings.
+
+The `prefix_length` and
+`max_expansions` can be set in this case to control the fuzzy process.
+If the fuzzy option is set the query will use `constant_score_rewrite`
+as its <<query-dsl-multi-term-rewrite,rewrite
+method>> the `rewrite` parameter allows to control how the query will get
+rewritten.
+
+Here is an example when providing additional parameters (note the slight
+change in structure, `message` is the field name):
+
+[source,js]
+--------------------------------------------------
+{
+ "match" : {
+ "message" : {
+ "query" : "this is a test",
+ "operator" : "and"
+ }
+ }
+}
+--------------------------------------------------
+
+.zero_terms_query
+If the analyzer used removes all tokens in a query like a `stop` filter
+does, the default behavior is to match no documents at all. In order to
+change that the `zero_terms_query` option can be used, which accepts
+`none` (default) and `all` which corresponds to a `match_all` query.
+
+[source,js]
+--------------------------------------------------
+{
+ "match" : {
+ "message" : {
+ "query" : "to be or not to be",
+ "operator" : "and",
+ "zero_terms_query": "all"
+ }
+ }
+}
+--------------------------------------------------
+
+.cutoff_frequency
+The match query supports a `cutoff_frequency` that allows
+specifying an absolute or relative document frequency where high
+frequent terms are moved into an optional subquery and are only scored
+if one of the low frequent (below the cutoff) terms in the case of an
+`or` operator or all of the low frequent terms in the case of an `and`
+operator match.
+
+This query allows handling `stopwords` dynamically at runtime, is domain
+independent and doesn't require on a stopword file. It prevent scoring /
+iterating high frequent terms and only takes the terms into account if a
+more significant / lower frequent terms match a document. Yet, if all of
+the query terms are above the given `cutoff_frequency` the query is
+automatically transformed into a pure conjunction (`and`) query to
+ensure fast execution.
+
+The `cutoff_frequency` can either be relative to the number of documents
+in the index if in the range `[0..1)` or absolute if greater or equal to
+`1.0`.
+
+Note: If the `cutoff_frequency` is used and the operator is `and`
+_stacked tokens_ (tokens that are on the same position like `synonym` filter emits)
+are not handled gracefully as they are in a pure `and` query. For instance the query
+`fast fox` is analyzed into 3 terms `[fast, quick, fox]` where `quick` is a synonym
+for `fast` on the same token positions the query might require `fast` and `quick` to
+match if the operator is `and`.
+
+Here is an example showing a query composed of stopwords exclusivly:
+
+[source,js]
+--------------------------------------------------
+{
+ "match" : {
+ "message" : {
+ "query" : "to be or not to be",
+ "cutoff_frequency" : 0.001
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== phrase
+
+The `match_phrase` query analyzes the text and creates a `phrase` query
+out of the analyzed text. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "match_phrase" : {
+ "message" : "this is a test"
+ }
+}
+--------------------------------------------------
+
+Since `match_phrase` is only a `type` of a `match` query, it can also be
+used in the following manner:
+
+[source,js]
+--------------------------------------------------
+{
+ "match" : {
+ "message" : {
+ "query" : "this is a test",
+ "type" : "phrase"
+ }
+ }
+}
+--------------------------------------------------
+
+A phrase query matches terms up to a configurable `slop`
+(which defaults to 0) in any order. Transposed terms have a slop of 2.
+
+The `analyzer` can be set to control which analyzer will perform the
+analysis process on the text. It default to the field explicit mapping
+definition, or the default search analyzer, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "match_phrase" : {
+ "message" : {
+ "query" : "this is a test",
+ "analyzer" : "my_analyzer"
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+===== match_phrase_prefix
+
+The `match_phrase_prefix` is the same as `match_phrase`, except that it
+allows for prefix matches on the last term in the text. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "match_phrase_prefix" : {
+ "message" : "this is a test"
+ }
+}
+--------------------------------------------------
+
+Or:
+
+[source,js]
+--------------------------------------------------
+{
+ "match" : {
+ "message" : {
+ "query" : "this is a test",
+ "type" : "phrase_prefix"
+ }
+ }
+}
+--------------------------------------------------
+
+It accepts the same parameters as the phrase type. In addition, it also
+accepts a `max_expansions` parameter that can control to how many
+prefixes the last term will be expanded. It is highly recommended to set
+it to an acceptable value to control the execution time of the query.
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "match_phrase_prefix" : {
+ "message" : {
+ "query" : "this is a test",
+ "max_expansions" : 10
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Comparison to query_string / field
+
+The match family of queries does not go through a "query parsing"
+process. It does not support field name prefixes, wildcard characters,
+or other "advance" features. For this reason, chances of it failing are
+very small / non existent, and it provides an excellent behavior when it
+comes to just analyze and run that text as a query behavior (which is
+usually what a text search box does). Also, the `phrase_prefix` type can
+provide a great "as you type" behavior to automatically load search
+results.
+
+[float]
+==== Other options
+
+* `lenient` - If set to true will cause format based failures (like
+providing text to a numeric field) to be ignored. Defaults to false.
diff --git a/docs/reference/query-dsl/queries/minimum-should-match.asciidoc b/docs/reference/query-dsl/queries/minimum-should-match.asciidoc
new file mode 100644
index 0000000..bedd496
--- /dev/null
+++ b/docs/reference/query-dsl/queries/minimum-should-match.asciidoc
@@ -0,0 +1,56 @@
+[[query-dsl-minimum-should-match]]
+=== Minimum Should Match
+
+The `minimum_should_match` parameter possible values:
+
+[cols="<,<,<",options="header",]
+|=======================================================================
+|Type |Example |Description
+|Integer |`3` |Indicates a fixed value regardless of the number of
+optional clauses.
+
+|Negative integer |`-2` |Indicates that the total number of optional
+clauses, minus this number should be mandatory.
+
+|Percentage |`75%` |Indicates that this percent of the total number of
+optional clauses are necessary. The number computed from the percentage
+is rounded down and used as the minimum.
+
+|Negative percentage |`-25%` |Indicates that this percent of the total
+number of optional clauses can be missing. The number computed from the
+percentage is rounded down, before being subtracted from the total to
+determine the minimum.
+
+|Combination |`3<90%` |A positive integer, followed by the less-than
+symbol, followed by any of the previously mentioned specifiers is a
+conditional specification. It indicates that if the number of optional
+clauses is equal to (or less than) the integer, they are all required,
+but if it's greater than the integer, the specification applies. In this
+example: if there are 1 to 3 clauses they are all required, but for 4 or
+more clauses only 90% are required.
+
+|Multiple combinations |`2<-25 9<-3` |Multiple conditional
+specifications can be separated by spaces, each one only being valid for
+numbers greater than the one before it. In this example: if there are 1
+or 2 clauses both are required, if there are 3-9 clauses all but 25% are
+required, and if there are more than 9 clauses, all but three are
+required.
+|=======================================================================
+
+*NOTE:*
+
+When dealing with percentages, negative values can be used to get
+different behavior in edge cases. 75% and -25% mean the same thing when
+dealing with 4 clauses, but when dealing with 5 clauses 75% means 3 are
+required, but -25% means 4 are required.
+
+If the calculations based on the specification determine that no
+optional clauses are needed, the usual rules about BooleanQueries still
+apply at search time (a BooleanQuery containing no required clauses must
+still match at least one optional clause)
+
+No matter what number the calculation arrives at, a value greater than
+the number of optional clauses, or a value less than 1 will never be
+used. (ie: no matter how low or how high the result of the calculation
+result is, the minimum number of required matches will never be lower
+than 1 or greater than the number of clauses.
diff --git a/docs/reference/query-dsl/queries/mlt-field-query.asciidoc b/docs/reference/query-dsl/queries/mlt-field-query.asciidoc
new file mode 100644
index 0000000..e4f28bb
--- /dev/null
+++ b/docs/reference/query-dsl/queries/mlt-field-query.asciidoc
@@ -0,0 +1,68 @@
+[[query-dsl-mlt-field-query]]
+=== More Like This Field Query
+
+The `more_like_this_field` query is the same as the `more_like_this`
+query, except it runs against a single field. It provides nicer query
+DSL over the generic `more_like_this` query, and support typed fields
+query (automatically wraps typed fields with type filter to match only
+on the specific type).
+
+[source,js]
+--------------------------------------------------
+{
+ "more_like_this_field" : {
+ "name.first" : {
+ "like_text" : "text like this one",
+ "min_term_freq" : 1,
+ "max_query_terms" : 12
+ }
+ }
+}
+--------------------------------------------------
+
+`more_like_this_field` can be shortened to `mlt_field`.
+
+The `more_like_this_field` top level parameters include:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Parameter |Description
+|`like_text` |The text to find documents like it, *required*.
+
+|`percent_terms_to_match` |The percentage of terms to match on (float
+value). Defaults to `0.3` (30 percent).
+
+|`min_term_freq` |The frequency below which terms will be ignored in the
+source doc. The default frequency is `2`.
+
+|`max_query_terms` |The maximum number of query terms that will be
+included in any generated query. Defaults to `25`.
+
+|`stop_words` |An array of stop words. Any word in this set is
+considered "uninteresting" and ignored. Even if your Analyzer allows
+stopwords, you might want to tell the MoreLikeThis code to ignore them,
+as for the purposes of document similarity it seems reasonable to assume
+that "a stop word is never interesting".
+
+|`min_doc_freq` |The frequency at which words will be ignored which do
+not occur in at least this many docs. Defaults to `5`.
+
+|`max_doc_freq` |The maximum frequency in which words may still appear.
+Words that appear in more than this many docs will be ignored. Defaults
+to unbounded.
+
+|`min_word_length` |The minimum word length below which words will be
+ignored. Defaults to `0`. (Old name "min_word_len" is deprecated)
+
+|`max_word_length` |The maximum word length above which words will be
+ignored. Defaults to unbounded (`0`). (Old name "max_word_len" is deprecated)
+
+|`boost_terms` |Sets the boost factor to use when boosting terms.
+Defaults to `1`.
+
+|`boost` |Sets the boost value of the query. Defaults to `1.0`.
+
+|`analyzer` |The analyzer that will be used to analyze the text.
+Defaults to the analyzer associated with the field.
+|=======================================================================
+
diff --git a/docs/reference/query-dsl/queries/mlt-query.asciidoc b/docs/reference/query-dsl/queries/mlt-query.asciidoc
new file mode 100644
index 0000000..bea704a
--- /dev/null
+++ b/docs/reference/query-dsl/queries/mlt-query.asciidoc
@@ -0,0 +1,67 @@
+[[query-dsl-mlt-query]]
+=== More Like This Query
+
+More like this query find documents that are "like" provided text by
+running it against one or more fields.
+
+[source,js]
+--------------------------------------------------
+{
+ "more_like_this" : {
+ "fields" : ["name.first", "name.last"],
+ "like_text" : "text like this one",
+ "min_term_freq" : 1,
+ "max_query_terms" : 12
+ }
+}
+--------------------------------------------------
+
+`more_like_this` can be shortened to `mlt`.
+
+The `more_like_this` top level parameters include:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Parameter |Description
+|`fields` |A list of the fields to run the more like this query against.
+Defaults to the `_all` field.
+
+|`like_text` |The text to find documents like it, *required*.
+
+|`percent_terms_to_match` |The percentage of terms to match on (float
+value). Defaults to `0.3` (30 percent).
+
+|`min_term_freq` |The frequency below which terms will be ignored in the
+source doc. The default frequency is `2`.
+
+|`max_query_terms` |The maximum number of query terms that will be
+included in any generated query. Defaults to `25`.
+
+|`stop_words` |An array of stop words. Any word in this set is
+considered "uninteresting" and ignored. Even if your Analyzer allows
+stopwords, you might want to tell the MoreLikeThis code to ignore them,
+as for the purposes of document similarity it seems reasonable to assume
+that "a stop word is never interesting".
+
+|`min_doc_freq` |The frequency at which words will be ignored which do
+not occur in at least this many docs. Defaults to `5`.
+
+|`max_doc_freq` |The maximum frequency in which words may still appear.
+Words that appear in more than this many docs will be ignored. Defaults
+to unbounded.
+
+|`min_word_length` |The minimum word length below which words will be
+ignored. Defaults to `0`.(Old name "min_word_len" is deprecated)
+
+|`max_word_length` |The maximum word length above which words will be
+ignored. Defaults to unbounded (`0`). (Old name "max_word_len" is deprecated)
+
+|`boost_terms` |Sets the boost factor to use when boosting terms.
+Defaults to `1`.
+
+|`boost` |Sets the boost value of the query. Defaults to `1.0`.
+
+|`analyzer` |The analyzer that will be used to analyze the text.
+Defaults to the analyzer associated with the field.
+|=======================================================================
+
diff --git a/docs/reference/query-dsl/queries/multi-match-query.asciidoc b/docs/reference/query-dsl/queries/multi-match-query.asciidoc
new file mode 100644
index 0000000..cb098cd
--- /dev/null
+++ b/docs/reference/query-dsl/queries/multi-match-query.asciidoc
@@ -0,0 +1,64 @@
+[[query-dsl-multi-match-query]]
+=== Multi Match Query
+
+The `multi_match` query builds further on top of the `match` query by
+allowing multiple fields to be specified. The idea here is to allow to
+more easily build a concise match type query over multiple fields
+instead of using a relatively more expressive query by using multiple
+match queries within a `bool` query.
+
+The structure of the query is a bit different. Instead of a nested json
+object defining the query field, there is a top json level field for
+defining the query fields. Example:
+
+[source,js]
+--------------------------------------------------
+{
+ "multi_match" : {
+ "query" : "this is a test",
+ "fields" : [ "subject", "message" ]
+ }
+}
+--------------------------------------------------
+
+The `multi_match` query creates either a `bool` or a `dis_max` top level
+query. Each field is a query clause in this top level query. The query
+clause contains the actual query (the specified 'type' defines what
+query this will be). Each query clause is basically a `should` clause.
+
+[float]
+[float]
+==== Options
+
+All options that apply on the `match` query also apply on the
+`multi_match` query. The `match` query options apply only on the
+individual clauses inside the top level query.
+
+* `fields` - Fields to be used in the query.
+* `use_dis_max` - Boolean indicating to either create a `dis_max` query
+or a `bool` query. Defaults to `true`.
+* `tie_breaker` - Multiplier value to balance the scores between lower
+and higher scoring fields. Only applicable when `use_dis_max` is set to
+true. Defaults to `0.0`.
+
+The query accepts all the options that a regular `match` query accepts.
+
+[float]
+[float]
+==== Boosting
+
+The `multi_match` query supports field boosting via `^` notation in the
+fields json field.
+
+[source,js]
+--------------------------------------------------
+{
+ "multi_match" : {
+ "query" : "this is a test",
+ "fields" : [ "subject^2", "message" ]
+ }
+}
+--------------------------------------------------
+
+In the above example hits in the `subject` field are 2 times more
+important than in the `message` field.
diff --git a/docs/reference/query-dsl/queries/multi-term-rewrite.asciidoc b/docs/reference/query-dsl/queries/multi-term-rewrite.asciidoc
new file mode 100644
index 0000000..135be67
--- /dev/null
+++ b/docs/reference/query-dsl/queries/multi-term-rewrite.asciidoc
@@ -0,0 +1,42 @@
+[[query-dsl-multi-term-rewrite]]
+=== Multi Term Query Rewrite
+
+Multi term queries, like
+<<query-dsl-wildcard-query,wildcard>> and
+<<query-dsl-prefix-query,prefix>> are called
+multi term queries and end up going through a process of rewrite. This
+also happens on the
+<<query-dsl-query-string-query,query_string>>.
+All of those queries allow to control how they will get rewritten using
+the `rewrite` parameter:
+
+* When not set, or set to `constant_score_auto`, defaults to
+automatically choosing either `constant_score_boolean` or
+`constant_score_filter` based on query characteristics.
+* `scoring_boolean`: A rewrite method that first translates each term
+into a should clause in a boolean query, and keeps the scores as
+computed by the query. Note that typically such scores are meaningless
+to the user, and require non-trivial CPU to compute, so it's almost
+always better to use `constant_score_auto`. This rewrite method will hit
+too many clauses failure if it exceeds the boolean query limit (defaults
+to `1024`).
+* `constant_score_boolean`: Similar to `scoring_boolean` except scores
+are not computed. Instead, each matching document receives a constant
+score equal to the query's boost. This rewrite method will hit too many
+clauses failure if it exceeds the boolean query limit (defaults to
+`1024`).
+* `constant_score_filter`: A rewrite method that first creates a private
+Filter by visiting each term in sequence and marking all docs for that
+term. Matching documents are assigned a constant score equal to the
+query's boost.
+* `top_terms_N`: A rewrite method that first translates each term into
+should clause in boolean query, and keeps the scores as computed by the
+query. This rewrite method only uses the top scoring terms so it will
+not overflow boolean max clause count. The `N` controls the size of the
+top scoring terms to use.
+* `top_terms_boost_N`: A rewrite method that first translates each term
+into should clause in boolean query, but the scores are only computed as
+the boost. This rewrite method only uses the top scoring terms so it
+will not overflow the boolean max clause count. The `N` controls the
+size of the top scoring terms to use.
+
diff --git a/docs/reference/query-dsl/queries/nested-query.asciidoc b/docs/reference/query-dsl/queries/nested-query.asciidoc
new file mode 100644
index 0000000..bc7e07c
--- /dev/null
+++ b/docs/reference/query-dsl/queries/nested-query.asciidoc
@@ -0,0 +1,58 @@
+[[query-dsl-nested-query]]
+=== Nested Query
+
+Nested query allows to query nested objects / docs (see
+<<mapping-nested-type,nested mapping>>). The
+query is executed against the nested objects / docs as if they were
+indexed as separate docs (they are, internally) and resulting in the
+root parent doc (or parent nested mapping). Here is a sample mapping we
+will work with:
+
+[source,js]
+--------------------------------------------------
+{
+ "type1" : {
+ "properties" : {
+ "obj1" : {
+ "type" : "nested"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+And here is a sample nested query usage:
+
+[source,js]
+--------------------------------------------------
+{
+ "nested" : {
+ "path" : "obj1",
+ "score_mode" : "avg",
+ "query" : {
+ "bool" : {
+ "must" : [
+ {
+ "match" : {"obj1.name" : "blue"}
+ },
+ {
+ "range" : {"obj1.count" : {"gt" : 5}}
+ }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The query `path` points to the nested object path, and the `query` (or
+`filter`) includes the query that will run on the nested docs matching
+the direct path, and joining with the root parent docs.
+
+The `score_mode` allows to set how inner children matching affects
+scoring of parent. It defaults to `avg`, but can be `total`, `max` and
+`none`.
+
+Multi level nesting is automatically supported, and detected, resulting
+in an inner nested query to automatically match the relevant nesting
+level (and not root) if it exists within another nested query.
diff --git a/docs/reference/query-dsl/queries/prefix-query.asciidoc b/docs/reference/query-dsl/queries/prefix-query.asciidoc
new file mode 100644
index 0000000..1bcf75a
--- /dev/null
+++ b/docs/reference/query-dsl/queries/prefix-query.asciidoc
@@ -0,0 +1,36 @@
+[[query-dsl-prefix-query]]
+=== Prefix Query
+
+Matches documents that have fields containing terms with a specified
+prefix (*not analyzed*). The prefix query maps to Lucene `PrefixQuery`.
+The following matches documents where the user field contains a term
+that starts with `ki`:
+
+[source,js]
+--------------------------------------------------
+{
+ "prefix" : { "user" : "ki" }
+}
+--------------------------------------------------
+
+A boost can also be associated with the query:
+
+[source,js]
+--------------------------------------------------
+{
+ "prefix" : { "user" : { "value" : "ki", "boost" : 2.0 } }
+}
+--------------------------------------------------
+
+Or :
+
+[source,js]
+--------------------------------------------------
+{
+ "prefix" : { "user" : { "prefix" : "ki", "boost" : 2.0 } }
+}
+--------------------------------------------------
+
+This multi term query allows to control how it gets rewritten using the
+<<query-dsl-multi-term-rewrite,rewrite>>
+parameter.
diff --git a/docs/reference/query-dsl/queries/query-string-query.asciidoc b/docs/reference/query-dsl/queries/query-string-query.asciidoc
new file mode 100644
index 0000000..5ce0b4e
--- /dev/null
+++ b/docs/reference/query-dsl/queries/query-string-query.asciidoc
@@ -0,0 +1,161 @@
+[[query-dsl-query-string-query]]
+=== Query String Query
+
+A query that uses a query parser in order to parse its content. Here is
+an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query_string" : {
+ "default_field" : "content",
+ "query" : "this AND that OR thus"
+ }
+}
+--------------------------------------------------
+
+The `query_string` top level parameters include:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Parameter |Description
+|`query` |The actual query to be parsed. See <<query-string-syntax>>.
+
+|`default_field` |The default field for query terms if no prefix field
+is specified. Defaults to the `index.query.default_field` index
+settings, which in turn defaults to `_all`.
+
+|`default_operator` |The default operator used if no explicit operator
+is specified. For example, with a default operator of `OR`, the query
+`capital of Hungary` is translated to `capital OR of OR Hungary`, and
+with default operator of `AND`, the same query is translated to
+`capital AND of AND Hungary`. The default value is `OR`.
+
+|`analyzer` |The analyzer name used to analyze the query string.
+
+|`allow_leading_wildcard` |When set, `*` or `?` are allowed as the first
+character. Defaults to `true`.
+
+|`lowercase_expanded_terms` |Whether terms of wildcard, prefix, fuzzy,
+and range queries are to be automatically lower-cased or not (since they
+are not analyzed). Default it `true`.
+
+|`enable_position_increments` |Set to `true` to enable position
+increments in result queries. Defaults to `true`.
+
+|`fuzzy_max_expansions` |Controls the number of terms fuzzy queries will
+expand to. Defaults to `50`
+
+|`fuzziness` |Set the fuzziness for fuzzy queries. Defaults
+to `AUTO`. See <<fuzziness>> for allowed settings.
+
+|`fuzzy_prefix_length` |Set the prefix length for fuzzy queries. Default
+is `0`.
+
+|`phrase_slop` |Sets the default slop for phrases. If zero, then exact
+phrase matches are required. Default value is `0`.
+
+|`boost` |Sets the boost value of the query. Defaults to `1.0`.
+
+|`analyze_wildcard` |By default, wildcards terms in a query string are
+not analyzed. By setting this value to `true`, a best effort will be
+made to analyze those as well.
+
+|`auto_generate_phrase_queries` |Default to `false`.
+
+|`minimum_should_match` |A value controlling how many "should" clauses
+in the resulting boolean query should match. It can be an absolute value
+(`2`), a percentage (`30%`) or a
+<<query-dsl-minimum-should-match,combination of
+both>>.
+
+|`lenient` |If set to `true` will cause format based failures (like
+providing text to a numeric field) to be ignored.
+|=======================================================================
+
+When a multi term query is being generated, one can control how it gets
+rewritten using the
+<<query-dsl-multi-term-rewrite,rewrite>>
+parameter.
+
+[float]
+==== Default Field
+
+When not explicitly specifying the field to search on in the query
+string syntax, the `index.query.default_field` will be used to derive
+which field to search on. It defaults to `_all` field.
+
+So, if `_all` field is disabled, it might make sense to change it to set
+a different default field.
+
+[float]
+==== Multi Field
+
+The `query_string` query can also run against multiple fields. The idea
+of running the `query_string` query against multiple fields is by
+internally creating several queries for the same query string, each with
+`default_field` that match the fields provided. Since several queries
+are generated, combining them can be automatically done either using a
+`dis_max` query or a simple `bool` query. For example (the `name` is
+boosted by 5 using `^5` notation):
+
+[source,js]
+--------------------------------------------------
+{
+ "query_string" : {
+ "fields" : ["content", "name^5"],
+ "query" : "this AND that OR thus",
+ "use_dis_max" : true
+ }
+}
+--------------------------------------------------
+
+Simple wildcard can also be used to search "within" specific inner
+elements of the document. For example, if we have a `city` object with
+several fields (or inner object with fields) in it, we can automatically
+search on all "city" fields:
+
+[source,js]
+--------------------------------------------------
+{
+ "query_string" : {
+ "fields" : ["city.*"],
+ "query" : "this AND that OR thus",
+ "use_dis_max" : true
+ }
+}
+--------------------------------------------------
+
+Another option is to provide the wildcard fields search in the query
+string itself (properly escaping the `*` sign), for example:
+`city.\*:something`.
+
+When running the `query_string` query against multiple fields, the
+following additional parameters are allowed:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Parameter |Description
+|`use_dis_max` |Should the queries be combined using `dis_max` (set it
+to `true`), or a `bool` query (set it to `false`). Defaults to `true`.
+
+|`tie_breaker` |When using `dis_max`, the disjunction max tie breaker.
+Defaults to `0`.
+|=======================================================================
+
+The fields parameter can also include pattern based field names,
+allowing to automatically expand to the relevant fields (dynamically
+introduced fields included). For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query_string" : {
+ "fields" : ["content", "name.*^5"],
+ "query" : "this AND that OR thus",
+ "use_dis_max" : true
+ }
+}
+--------------------------------------------------
+
+include::query-string-syntax.asciidoc[]
diff --git a/docs/reference/query-dsl/queries/query-string-syntax.asciidoc b/docs/reference/query-dsl/queries/query-string-syntax.asciidoc
new file mode 100644
index 0000000..dbc47c6
--- /dev/null
+++ b/docs/reference/query-dsl/queries/query-string-syntax.asciidoc
@@ -0,0 +1,295 @@
+[[query-string-syntax]]
+
+==== Query string syntax
+
+The query string ``mini-language'' is used by the
+<<query-dsl-query-string-query>> and by the
+`q` query string parameter in the <<search-search,`search` API>>.
+
+The query string is parsed into a series of _terms_ and _operators_. A
+term can be a single word -- `quick` or `brown` -- or a phrase, surrounded by
+double quotes -- `"quick brown"` -- which searches for all the words in the
+phrase, in the same order.
+
+Operators allow you to customize the search -- the available options are
+explained below.
+
+===== Field names
+
+As mentioned in <<query-dsl-query-string-query>>, the `default_field` is searched for the
+search terms, but it is possible to specify other fields in the query syntax:
+
+* where the `status` field contains `active`
+
+ status:active
+
+* where the `title` field contains `quick` or `brown`
+
+ title:(quick brown)
+
+* where the `author` field contains the exact phrase `"john smith"`
+
+ author:"John Smith"
+
+* where any of the fields `book.title`, `book.content` or `book.date` contains
+ `quick` or `brown` (note how we need to escape the `*` with a backslash):
+
+ book.\*:(quick brown)
+
+* where the field `title` has no value (or is missing):
+
+ _missing_:title
+
+* where the field `title` has any non-null value:
+
+ _exists_:title
+
+===== Wildcards
+
+Wildcard searches can be run on individual terms, using `?` to replace
+a single character, and `*` to replace zero or more characters:
+
+ qu?ck bro*
+
+Be aware that wildcard queries can use an enormous amount of memory and
+perform very badly -- just think how many terms need to be queried to
+match the query string `"a* b* c*"`.
+
+[WARNING]
+======
+Allowing a wildcard at the beginning of a word (eg `"*ing"`) is particularly
+heavy, because all terms in the index need to be examined, just in case
+they match. Leading wildcards can be disabled by setting
+`allow_leading_wildcard` to `false`.
+======
+
+Wildcarded terms are not analyzed by default -- they are lowercased
+(`lowercase_expanded_terms` defaults to `true`) but no further analysis
+is done, mainly because it is impossible to accurately analyze a word that
+is missing some of its letters. However, by setting `analyze_wildcard` to
+`true`, an attempt will be made to analyze wildcarded words before searching
+the term list for matching terms.
+
+===== Regular expressions
+
+Regular expression patterns can be embedded in the query string by
+wrapping them in forward-slashes (`"/"`):
+
+ name:/joh?n(ath[oa]n)/
+
+The supported regular expression syntax is explained in <<regexp-syntax>>.
+
+[WARNING]
+======
+The `allow_leading_wildcard` parameter does not have any control over
+regular expressions. A query string such as the following would force
+Elasticsearch to visit every term in the index:
+
+ /.*n/
+
+Use with caution!
+======
+
+===== Fuzziness
+
+We can search for terms that are
+similar to, but not exactly like our search terms, using the ``fuzzy''
+operator:
+
+ quikc~ brwn~ foks~
+
+This uses the
+http://en.wikipedia.org/wiki/Damerau-Levenshtein_distance[Damerau-Levenshtein distance]
+to find all terms with a maximum of
+two changes, where a change is the insertion, deletion
+or substitution of a single character, or transposition of two adjacent
+characters.
+
+The default _edit distance_ is `2`, but an edit distance of `1` should be
+sufficient to catch 80% of all human misspellings. It can be specified as:
+
+ quikc~1
+
+===== Proximity searches
+
+While a phrase query (eg `"john smith"`) expects all of the terms in exactly
+the same order, a proximity query allows the specified words to be further
+apart or in a different order. In the same way that fuzzy queries can
+specify a maximum edit distance for characters in a word, a proximity search
+allows us to specify a maximum edit distance of words in a phrase:
+
+ "fox quick"~5
+
+The closer the text in a field is to the original order specified in the
+query string, the more relevant that document is considered to be. When
+compared to the above example query, the phrase `"quick fox"` would be
+considered more relevant than `"quick brown fox"`.
+
+===== Ranges
+
+Ranges can be specified for date, numeric or string fields. Inclusive ranges
+are specified with square brackets `[min TO max]` and exclusive ranges with
+curly brackets `{min TO max}`.
+
+* All days in 2012:
+
+ date:[2012/01/01 TO 2012/12/31]
+
+* Numbers 1..5
+
+ count:[1 TO 5]
+
+* Tags between `alpha` and `omega`, excluding `alpha` and `omega`:
+
+ tag:{alpha TO omega}
+
+* Numbers from 10 upwards
+
+ count:[10 TO *]
+
+* Dates before 2012
+
+ date:{* TO 2012/01/01}
+
+Curly and square brackets can be combined:
+
+* Numbers from 1 up to but not including 5
+
+ count:[1..5}
+
+
+Ranges with one side unbounded can use the following syntax:
+
+ age:>10
+ age:>=10
+ age:<10
+ age:<=10
+
+[NOTE]
+===================================================================
+To combine an upper and lower bound with the simplified syntax, you
+would need to join two clauses with an `AND` operator:
+
+ age:(>=10 AND < 20)
+ age:(+>=10 +<20)
+
+===================================================================
+
+The parsing of ranges in query strings can be complex and error prone. It is
+much more reliable to use an explicit <<query-dsl-range-filter,`range` filter>>.
+
+
+===== Boosting
+
+Use the _boost_ operator `^` to make one term more relevant than another.
+For instance, if we want to find all documents about foxes, but we are
+especially interested in quick foxes:
+
+ quick^2 fox
+
+The default `boost` value is 1, but can be any positive floating point number.
+Boosts between 0 and 1 reduce relevance.
+
+Boosts can also be applied to phrases or to groups:
+
+ "john smith"^2 (foo bar)^4
+
+===== Boolean operators
+
+By default, all terms are optional, as long as one term matches. A search
+for `foo bar baz` will find any document that contains one or more of
+`foo` or `bar` or `baz`. We have already discussed the `default_operator`
+above which allows you to force all terms to be required, but there are
+also _boolean operators_ which can be used in the query string itself
+to provide more control.
+
+The preferred operators are `+` (this term *must* be present) and `-`
+(this term *must not* be present). All other terms are optional.
+For example, this query:
+
+ quick brown +fox -news
+
+states that:
+
+* `fox` must be present
+* `news` must not be present
+* `quick` and `brown` are optional -- their presence increases the relevance
+
+The familiar operators `AND`, `OR` and `NOT` (also written `&&`, `||` and `!`)
+are also supported. However, the effects of these operators can be more
+complicated than is obvious at first glance. `NOT` takes precedence over
+`AND`, which takes precedence over `OR`. While the `+` and `-` only affect
+the term to the right of the operator, `AND` and `OR` can affect the terms to
+the left and right.
+
+****
+Rewriting the above query using `AND`, `OR` and `NOT` demonstrates the
+complexity:
+
+`quick OR brown AND fox AND NOT news`::
+
+This is incorrect, because `brown` is now a required term.
+
+`(quick OR brown) AND fox AND NOT news`::
+
+This is incorrect because at least one of `quick` or `brown` is now required
+and the search for those terms would be scored differently from the original
+query.
+
+`((quick AND fox) OR (brown AND fox) OR fox) AND NOT news`::
+
+This form now replicates the logic from the original query correctly, but
+the relevance scoring bares little resemblance to the original.
+
+In contrast, the same query rewritten using the <<query-dsl-match-query,`match` query>>
+would look like this:
+
+ {
+ "bool": {
+ "must": { "match": "fox" },
+ "should": { "match": "quick brown" },
+ "must_not": { "match": "news" }
+ }
+ }
+
+****
+
+===== Grouping
+
+Multiple terms or clauses can be grouped together with parentheses, to form
+sub-queries:
+
+ (quick OR brown) AND fox
+
+Groups can be used to target a particular field, or to boost the result
+of a sub-query:
+
+ status:(active OR pending) title:(full text search)^2
+
+===== Reserved characters
+
+If you need to use any of the characters which function as operators in your
+query itself (and not as operators), then you should escape them with
+a leading backslash. For instance, to search for `(1+1)=2`, you would
+need to write your query as `\(1\+1\)=2`.
+
+The reserved characters are: `+ - && || ! ( ) { } [ ] ^ " ~ * ? : \ /`
+
+Failing to escape these special characters correctly could lead to a syntax
+error which prevents your query from running.
+
+.Watch this space
+****
+A space may also be a reserved character. For instance, if you have a
+synonym list which converts `"wi fi"` to `"wifi"`, a `query_string` search
+for `"wi fi"` would fail. The query string parser would interpret your
+query as a search for `"wi OR fi"`, while the token stored in your
+index is actually `"wifi"`. Escaping the space will protect it from
+being touched by the query string parser: `"wi\ fi"`.
+****
+
+===== Empty Query
+
+If the query string is empty or only contains whitespaces the
+query string is interpreted as a `no_docs_query` and will yield
+an empty result set.
diff --git a/docs/reference/query-dsl/queries/range-query.asciidoc b/docs/reference/query-dsl/queries/range-query.asciidoc
new file mode 100644
index 0000000..cf8a9da
--- /dev/null
+++ b/docs/reference/query-dsl/queries/range-query.asciidoc
@@ -0,0 +1,31 @@
+[[query-dsl-range-query]]
+=== Range Query
+
+Matches documents with fields that have terms within a certain range.
+The type of the Lucene query depends on the field type, for `string`
+fields, the `TermRangeQuery`, while for number/date fields, the query is
+a `NumericRangeQuery`. The following example returns all documents where
+`age` is between `10` and `20`:
+
+[source,js]
+--------------------------------------------------
+{
+ "range" : {
+ "age" : {
+ "gte" : 10,
+ "lte" : 20,
+ "boost" : 2.0
+ }
+ }
+}
+--------------------------------------------------
+
+The `range` query accepts the following parameters:
+
+[horizontal]
+`gte`:: Greater-than or equal to
+`gt`:: Greater-than
+`lte`:: Less-than or equal to
+`lt`:: Less-than
+`boost`:: Sets the boost value of the query, defaults to `1.0`
+
diff --git a/docs/reference/query-dsl/queries/regexp-query.asciidoc b/docs/reference/query-dsl/queries/regexp-query.asciidoc
new file mode 100644
index 0000000..3345773
--- /dev/null
+++ b/docs/reference/query-dsl/queries/regexp-query.asciidoc
@@ -0,0 +1,54 @@
+[[query-dsl-regexp-query]]
+=== Regexp Query
+
+The `regexp` query allows you to use regular expression term queries.
+See <<regexp-syntax>> for details of the supported regular expression language.
+
+*Note*: The performance of a `regexp` query heavily depends on the
+regular expression chosen. Matching everything like `.*` is very slow as
+well as using lookaround regular expressions. If possible, you should
+try to use a long prefix before your regular expression starts. Wildcard
+matchers like `.*?+` will mostly lower performance.
+
+[source,js]
+--------------------------------------------------
+{
+ "regexp":{
+ "name.first": "s.*y"
+ }
+}
+--------------------------------------------------
+
+Boosting is also supported
+
+[source,js]
+--------------------------------------------------
+{
+ "regexp":{
+ "name.first":{
+ "value":"s.*y",
+ "boost":1.2
+ }
+ }
+}
+--------------------------------------------------
+
+You can also use special flags
+
+[source,js]
+--------------------------------------------------
+{
+ "regexp":{
+ "name.first": "s.*y",
+ "flags" : "INTERSECTION|COMPLEMENT|EMPTY"
+ }
+}
+--------------------------------------------------
+
+Possible flags are `ALL`, `ANYSTRING`, `AUTOMATON`, `COMPLEMENT`,
+`EMPTY`, `INTERSECTION`, `INTERVAL`, or `NONE`. Please check the
+http://lucene.apache.org/core/4_3_0/core/index.html?org%2Fapache%2Flucene%2Futil%2Fautomaton%2FRegExp.html[Lucene
+documentation] for their meaning
+
+
+include::regexp-syntax.asciidoc[]
diff --git a/docs/reference/query-dsl/queries/regexp-syntax.asciidoc b/docs/reference/query-dsl/queries/regexp-syntax.asciidoc
new file mode 100644
index 0000000..5d2c061
--- /dev/null
+++ b/docs/reference/query-dsl/queries/regexp-syntax.asciidoc
@@ -0,0 +1,280 @@
+[[regexp-syntax]]
+==== Regular expression syntax
+
+Regular expression queries are supported by the `regexp` and the `query_string`
+queries. The Lucene regular expression engine
+is not Perl-compatible but supports a smaller range of operators.
+
+[NOTE]
+====
+We will not attempt to explain regular expressions, but
+just explain the supported operators.
+====
+
+===== Standard operators
+
+Anchoring::
++
+--
+
+Most regular expression engines allow you to match any part of a string.
+If you want the regexp pattern to start at the beginning of the string or
+finish at the end of the string, then you have to _anchor_ it specifically,
+using `^` to indicate the beginning or `$` to indicate the end.
+
+Lucene's patterns are always anchored. The pattern provided must match
+the entire string. For string `"abcde"`:
+
+ ab.* # match
+ abcd # no match
+
+--
+
+Allowed characters::
++
+--
+
+Any Unicode characters may be used in the pattern, but certain characters
+are reserved and must be escaped. The standard reserved characters are:
+
+....
+. ? + * | { } [ ] ( ) " \
+....
+
+If you enable optional features (see below) then these characters may
+also be reserved:
+
+ # @ & < > ~
+
+Any reserved character can be escaped with a backslash `"\*"` including
+a literal backslash character: `"\\"`
+
+Additionally, any characters (except double quotes) are interpreted literally
+when surrounded by double quotes:
+
+ john"@smith.com"
+
+
+--
+
+Match any character::
++
+--
+
+The period `"."` can be used to represent any character. For string `"abcde"`:
+
+ ab... # match
+ a.c.e # match
+
+--
+
+One-or-more::
++
+--
+
+The plus sign `"+"` can be used to repeat the preceding shortest pattern
+once or more times. For string `"aaabbb"`:
+
+ a+b+ # match
+ aa+bb+ # match
+ a+.+ # match
+ aa+bbb+ # no match
+
+--
+
+Zero-or-more::
++
+--
+
+The asterisk `"*"` can be used to match the preceding shortest pattern
+zero-or-more times. For string `"aaabbb`":
+
+ a*b* # match
+ a*b*c* # match
+ .*bbb.* # match
+ aaa*bbb* # match
+
+--
+
+Zero-or-one::
++
+--
+
+The question mark `"?"` makes the preceding shortest pattern optional. It
+matches zero or one times. For string `"aaabbb"`:
+
+ aaa?bbb? # match
+ aaaa?bbbb? # match
+ .....?.? # match
+ aa?bb? # no match
+
+--
+
+Min-to-max::
++
+--
+
+Curly brackets `"{}"` can be used to specify a minimum and (optionally)
+a maximum number of times the preceding shortest pattern can repeat. The
+allowed forms are:
+
+ {5} # repeat exactly 5 times
+ {2,5} # repeat at least twice and at most 5 times
+ {2,} # repeat at least twice
+
+For string `"aaabbb"`:
+
+ a{3}b{3} # match
+ a{2,4}b{2,4} # match
+ a{2,}b{2,} # match
+ .{3}.{3} # match
+ a{4}b{4} # no match
+ a{4,6}b{4,6} # no match
+ a{4,}b{4,} # no match
+
+--
+
+Grouping::
++
+--
+
+Parentheses `"()"` can be used to form sub-patterns. The quantity operators
+listed above operate on the shortest previous pattern, which can be a group.
+For string `"ababab"`:
+
+ (ab)+ # match
+ ab(ab)+ # match
+ (..)+ # match
+ (...)+ # no match
+ (ab)* # match
+ abab(ab)? # match
+ ab(ab)? # no match
+ (ab){3} # match
+ (ab){1,2} # no match
+
+--
+
+Alternation::
++
+--
+
+The pipe symbol `"|"` acts as an OR operator. The match will succeed if
+the pattern on either the left-hand side OR the right-hand side matches.
+The alternation applies to the _longest pattern_, not the shortest.
+For string `"aabb"`:
+
+ aabb|bbaa # match
+ aacc|bb # no match
+ aa(cc|bb) # match
+ a+|b+ # no match
+ a+b+|b+a+ # match
+ a+(b|c)+ # match
+
+--
+
+Character classes::
++
+--
+
+Ranges of potential characters may be represented as character classes
+by enclosing them in square brackets `"[]"`. A leading `^`
+negates the character class. The allowed forms are:
+
+ [abc] # 'a' or 'b' or 'c'
+ [a-c] # 'a' or 'b' or 'c'
+ [-abc] # '-' or 'a' or 'b' or 'c'
+ [abc\-] # '-' or 'a' or 'b' or 'c'
+ [^a-c] # any character except 'a' or 'b' or 'c'
+ [^a-c] # any character except 'a' or 'b' or 'c'
+ [-abc] # '-' or 'a' or 'b' or 'c'
+ [abc\-] # '-' or 'a' or 'b' or 'c'
+
+Note that the dash `"-"` indicates a range of characeters, unless it is
+the first character or if it is escaped with a backslash.
+
+For string `"abcd"`:
+
+ ab[cd]+ # match
+ [a-d]+ # match
+ [^a-d]+ # no match
+
+--
+
+===== Optional operators
+
+These operators are only available when they are explicitly enabled, by
+passing `flags` to the query.
+
+Multiple flags can be enabled either using the `ALL` flag, or by
+concatenating flags with a pipe `"|"`:
+
+ {
+ "regexp": {
+ "username": {
+ "value": "john~athon<1-5>",
+ "flags": "COMPLEMENT|INTERVAL"
+ }
+ }
+ }
+
+Complement::
++
+--
+
+The complement is probably the most useful option. The shortest pattern that
+follows a tilde `"~"` is negated. For the string `"abcdef"`:
+
+ ab~df # match
+ ab~cf # no match
+ a~(cd)f # match
+ a~(bc)f # no match
+
+Enabled with the `COMPLEMENT` or `ALL` flags.
+
+--
+
+Interval::
++
+--
+
+The interval option enables the use of numeric ranges, enclosed by angle
+brackets `"<>"`. For string: `"foo80"`:
+
+ foo<1-100> # match
+ foo<01-100> # match
+ foo<001-100> # no match
+
+Enabled with the `INTERVAL` or `ALL` flags.
+
+
+--
+
+Intersection::
++
+--
+
+The ampersand `"&"` joins two patterns in a way that both of them have to
+match. For string `"aaabbb"`:
+
+ aaa.+&.+bbb # match
+ aaa&bbb # no match
+
+Using this feature usually means that you should rewrite your regular
+expression.
+
+Enabled with the `INTERSECTION` or `ALL` flags.
+
+--
+
+Any string::
++
+--
+
+The at sign `"@"` matches any string in its entirety. This could be combined
+with the intersection and complement above to express ``everything except''.
+For instance:
+
+ @&~(foo.+) # anything except string beginning with "foo"
+
+Enabled with the `ANYSTRING` or `ALL` flags.
+--
diff --git a/docs/reference/query-dsl/queries/simple-query-string-query.asciidoc b/docs/reference/query-dsl/queries/simple-query-string-query.asciidoc
new file mode 100644
index 0000000..a817b23
--- /dev/null
+++ b/docs/reference/query-dsl/queries/simple-query-string-query.asciidoc
@@ -0,0 +1,100 @@
+[[query-dsl-simple-query-string-query]]
+=== Simple Query String Query
+
+A query that uses the SimpleQueryParser to parse its context. Unlike the
+regular `query_string` query, the `simple_query_string` query will never
+throw an exception, and discards invalid parts of the query. Here is
+an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "simple_query_string" : {
+ "query": "\"fried eggs\" +(eggplant | potato) -frittata",
+ "analyzer": "snowball",
+ "fields": ["body^5","_all"],
+ "default_operator": "and"
+ }
+}
+--------------------------------------------------
+
+The `simple_query_string` top level parameters include:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Parameter |Description
+|`query` |The actual query to be parsed. See below for syntax.
+
+|`fields` |The fields to perform the parsed query against. Defaults to the
+`index.query.default_field` index settings, which in turn defaults to `_all`.
+
+|`default_operator` |The default operator used if no explicit operator
+is specified. For example, with a default operator of `OR`, the query
+`capital of Hungary` is translated to `capital OR of OR Hungary`, and
+with default operator of `AND`, the same query is translated to
+`capital AND of AND Hungary`. The default value is `OR`.
+
+|`analyzer` |The analyzer used to analyze each term of the query when
+creating composite queries.
+
+|`flags` |Flags specifying which features of the `simple_query_string` to
+enable. Defaults to `ALL`.
+|=======================================================================
+
+[float]
+==== Simple Query String Syntax
+The `simple_query_string` supports the following special characters:
+
+* `+` signifies AND operation
+* `|` signifies OR operation
+* `-` negates a single token
+* `"` wraps a number of tokens to signify a phrase for searching
+* `*` at the end of a term signifies a prefix query
+* `(` and `)` signify precedence
+
+In order to search for any of these special characters, they will need to
+be escaped with `\`.
+
+[float]
+==== Default Field
+When not explicitly specifying the field to search on in the query
+string syntax, the `index.query.default_field` will be used to derive
+which field to search on. It defaults to `_all` field.
+
+So, if `_all` field is disabled, it might make sense to change it to set
+a different default field.
+
+[float]
+==== Multi Field
+The fields parameter can also include pattern based field names,
+allowing to automatically expand to the relevant fields (dynamically
+introduced fields included). For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "simple_query_string" : {
+ "fields" : ["content", "name.*^5"],
+ "query" : "foo bar baz"
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Flags
+`simple_query_string` support multiple flags to specify which parsing features
+should be enabled. It is specified as a `|`-delimited string with the
+`flags` parameter:
+
+[source,js]
+--------------------------------------------------
+{
+ "simple_query_string" : {
+ "query" : "foo | bar & baz*",
+ "flags" : "OR|AND|PREFIX"
+ }
+}
+--------------------------------------------------
+
+The available flags are: `ALL`, `NONE`, `AND`, `OR`, `PREFIX`, `PHRASE`,
+`PRECEDENCE`, `ESCAPE`, and `WHITESPACE`. \ No newline at end of file
diff --git a/docs/reference/query-dsl/queries/span-first-query.asciidoc b/docs/reference/query-dsl/queries/span-first-query.asciidoc
new file mode 100644
index 0000000..74fe7ff
--- /dev/null
+++ b/docs/reference/query-dsl/queries/span-first-query.asciidoc
@@ -0,0 +1,20 @@
+[[query-dsl-span-first-query]]
+=== Span First Query
+
+Matches spans near the beginning of a field. The span first query maps
+to Lucene `SpanFirstQuery`. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "span_first" : {
+ "match" : {
+ "span_term" : { "user" : "kimchy" }
+ },
+ "end" : 3
+ }
+}
+--------------------------------------------------
+
+The `match` clause can be any other span type query. The `end` controls
+the maximum end position permitted in a match.
diff --git a/docs/reference/query-dsl/queries/span-multi-term-query.asciidoc b/docs/reference/query-dsl/queries/span-multi-term-query.asciidoc
new file mode 100644
index 0000000..76985fa
--- /dev/null
+++ b/docs/reference/query-dsl/queries/span-multi-term-query.asciidoc
@@ -0,0 +1,30 @@
+[[query-dsl-span-multi-term-query]]
+=== Span Multi Term Query
+
+The `span_multi` query allows you to wrap a `multi term query` (one of
+fuzzy, prefix, term range or regexp query) as a `span query`, so
+it can be nested. Example:
+
+[source,js]
+--------------------------------------------------
+{
+ "span_multi":{
+ "match":{
+ "prefix" : { "user" : { "value" : "ki" } }
+ }
+ }
+}
+--------------------------------------------------
+
+A boost can also be associated with the query:
+
+[source,js]
+--------------------------------------------------
+{
+ "span_multi":{
+ "match":{
+ "prefix" : { "user" : { "value" : "ki", "boost" : 1.08 } }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/queries/span-near-query.asciidoc b/docs/reference/query-dsl/queries/span-near-query.asciidoc
new file mode 100644
index 0000000..39982e2
--- /dev/null
+++ b/docs/reference/query-dsl/queries/span-near-query.asciidoc
@@ -0,0 +1,27 @@
+[[query-dsl-span-near-query]]
+=== Span Near Query
+
+Matches spans which are near one another. One can specify _slop_, the
+maximum number of intervening unmatched positions, as well as whether
+matches are required to be in-order. The span near query maps to Lucene
+`SpanNearQuery`. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "span_near" : {
+ "clauses" : [
+ { "span_term" : { "field" : "value1" } },
+ { "span_term" : { "field" : "value2" } },
+ { "span_term" : { "field" : "value3" } }
+ ],
+ "slop" : 12,
+ "in_order" : false,
+ "collect_payloads" : false
+ }
+}
+--------------------------------------------------
+
+The `clauses` element is a list of one or more other span type queries
+and the `slop` controls the maximum number of intervening unmatched
+positions permitted.
diff --git a/docs/reference/query-dsl/queries/span-not-query.asciidoc b/docs/reference/query-dsl/queries/span-not-query.asciidoc
new file mode 100644
index 0000000..b035720
--- /dev/null
+++ b/docs/reference/query-dsl/queries/span-not-query.asciidoc
@@ -0,0 +1,24 @@
+[[query-dsl-span-not-query]]
+=== Span Not Query
+
+Removes matches which overlap with another span query. The span not
+query maps to Lucene `SpanNotQuery`. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "span_not" : {
+ "include" : {
+ "span_term" : { "field1" : "value1" }
+ },
+ "exclude" : {
+ "span_term" : { "field2" : "value2" }
+ }
+ }
+}
+--------------------------------------------------
+
+The `include` and `exclude` clauses can be any span type query. The
+`include` clause is the span query whose matches are filtered, and the
+`exclude` clause is the span query whose matches must not overlap those
+returned.
diff --git a/docs/reference/query-dsl/queries/span-or-query.asciidoc b/docs/reference/query-dsl/queries/span-or-query.asciidoc
new file mode 100644
index 0000000..72a4ce8
--- /dev/null
+++ b/docs/reference/query-dsl/queries/span-or-query.asciidoc
@@ -0,0 +1,20 @@
+[[query-dsl-span-or-query]]
+=== Span Or Query
+
+Matches the union of its span clauses. The span or query maps to Lucene
+`SpanOrQuery`. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "span_or" : {
+ "clauses" : [
+ { "span_term" : { "field" : "value1" } },
+ { "span_term" : { "field" : "value2" } },
+ { "span_term" : { "field" : "value3" } }
+ ]
+ }
+}
+--------------------------------------------------
+
+The `clauses` element is a list of one or more other span type queries.
diff --git a/docs/reference/query-dsl/queries/span-term-query.asciidoc b/docs/reference/query-dsl/queries/span-term-query.asciidoc
new file mode 100644
index 0000000..9de86d4
--- /dev/null
+++ b/docs/reference/query-dsl/queries/span-term-query.asciidoc
@@ -0,0 +1,30 @@
+[[query-dsl-span-term-query]]
+=== Span Term Query
+
+Matches spans containing a term. The span term query maps to Lucene
+`SpanTermQuery`. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "span_term" : { "user" : "kimchy" }
+}
+--------------------------------------------------
+
+A boost can also be associated with the query:
+
+[source,js]
+--------------------------------------------------
+{
+ "span_term" : { "user" : { "value" : "kimchy", "boost" : 2.0 } }
+}
+--------------------------------------------------
+
+Or :
+
+[source,js]
+--------------------------------------------------
+{
+ "span_term" : { "user" : { "term" : "kimchy", "boost" : 2.0 } }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/queries/term-query.asciidoc b/docs/reference/query-dsl/queries/term-query.asciidoc
new file mode 100644
index 0000000..cd9537d
--- /dev/null
+++ b/docs/reference/query-dsl/queries/term-query.asciidoc
@@ -0,0 +1,31 @@
+[[query-dsl-term-query]]
+=== Term Query
+
+Matches documents that have fields that contain a term (*not analyzed*).
+The term query maps to Lucene `TermQuery`. The following matches
+documents where the user field contains the term `kimchy`:
+
+[source,js]
+--------------------------------------------------
+{
+ "term" : { "user" : "kimchy" }
+}
+--------------------------------------------------
+
+A boost can also be associated with the query:
+
+[source,js]
+--------------------------------------------------
+{
+ "term" : { "user" : { "value" : "kimchy", "boost" : 2.0 } }
+}
+--------------------------------------------------
+
+Or :
+
+[source,js]
+--------------------------------------------------
+{
+ "term" : { "user" : { "term" : "kimchy", "boost" : 2.0 } }
+}
+--------------------------------------------------
diff --git a/docs/reference/query-dsl/queries/terms-query.asciidoc b/docs/reference/query-dsl/queries/terms-query.asciidoc
new file mode 100644
index 0000000..a1f62a3
--- /dev/null
+++ b/docs/reference/query-dsl/queries/terms-query.asciidoc
@@ -0,0 +1,19 @@
+[[query-dsl-terms-query]]
+=== Terms Query
+
+A query that match on any (configurable) of the provided terms. This is
+a simpler syntax query for using a `bool` query with several `term`
+queries in the `should` clauses. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "terms" : {
+ "tags" : [ "blue", "pill" ],
+ "minimum_should_match" : 1
+ }
+}
+--------------------------------------------------
+
+The `terms` query is also aliased with `in` as the query name for
+simpler usage.
diff --git a/docs/reference/query-dsl/queries/top-children-query.asciidoc b/docs/reference/query-dsl/queries/top-children-query.asciidoc
new file mode 100644
index 0000000..00c32bf
--- /dev/null
+++ b/docs/reference/query-dsl/queries/top-children-query.asciidoc
@@ -0,0 +1,71 @@
+[[query-dsl-top-children-query]]
+=== Top Children Query
+
+The `top_children` query runs the child query with an estimated hits
+size, and out of the hit docs, aggregates it into parent docs. If there
+aren't enough parent docs matching the requested from/size search
+request, then it is run again with a wider (more hits) search.
+
+The `top_children` also provide scoring capabilities, with the ability
+to specify `max`, `sum` or `avg` as the score type.
+
+One downside of using the `top_children` is that if there are more child
+docs matching the required hits when executing the child query, then the
+`total_hits` result of the search response will be incorrect.
+
+How many hits are asked for in the first child query run is controlled
+using the `factor` parameter (defaults to `5`). For example, when asking
+for 10 parent docs (with `from` set to 0), then the child query will
+execute with 50 hits expected. If not enough parents are found (in our
+example 10), and there are still more child docs to query, then the
+child search hits are expanded by multiplying by the
+`incremental_factor` (defaults to `2`).
+
+The required parameters are the `query` and `type` (the child type to
+execute the query on). Here is an example with all different parameters,
+including the default values:
+
+[source,js]
+--------------------------------------------------
+{
+ "top_children" : {
+ "type": "blog_tag",
+ "query" : {
+ "term" : {
+ "tag" : "something"
+ }
+ },
+ "score" : "max",
+ "factor" : 5,
+ "incremental_factor" : 2
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Scope
+
+A `_scope` can be defined on the query allowing to run facets on the
+same scope name that will work against the child documents. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "top_children" : {
+ "_scope" : "my_scope",
+ "type": "blog_tag",
+ "query" : {
+ "term" : {
+ "tag" : "something"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+[float]
+==== Memory Considerations
+
+With the current implementation, all `_id` values are loaded to memory
+(heap) in order to support fast lookups, so make sure there is enough
+memory for it.
diff --git a/docs/reference/query-dsl/queries/wildcard-query.asciidoc b/docs/reference/query-dsl/queries/wildcard-query.asciidoc
new file mode 100644
index 0000000..d72dbec
--- /dev/null
+++ b/docs/reference/query-dsl/queries/wildcard-query.asciidoc
@@ -0,0 +1,39 @@
+[[query-dsl-wildcard-query]]
+=== Wildcard Query
+
+Matches documents that have fields matching a wildcard expression (*not
+analyzed*). Supported wildcards are `*`, which matches any character
+sequence (including the empty one), and `?`, which matches any single
+character. Note this query can be slow, as it needs to iterate over many
+terms. In order to prevent extremely slow wildcard queries, a wildcard
+term should not start with one of the wildcards `*` or `?`. The wildcard
+query maps to Lucene `WildcardQuery`.
+
+[source,js]
+--------------------------------------------------
+{
+ "wildcard" : { "user" : "ki*y" }
+}
+--------------------------------------------------
+
+A boost can also be associated with the query:
+
+[source,js]
+--------------------------------------------------
+{
+ "wildcard" : { "user" : { "value" : "ki*y", "boost" : 2.0 } }
+}
+--------------------------------------------------
+
+Or :
+
+[source,js]
+--------------------------------------------------
+{
+ "wildcard" : { "user" : { "wildcard" : "ki*y", "boost" : 2.0 } }
+}
+--------------------------------------------------
+
+This multi term query allows to control how it gets rewritten using the
+<<query-dsl-multi-term-rewrite,rewrite>>
+parameter.
diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc
new file mode 100644
index 0000000..68df74c
--- /dev/null
+++ b/docs/reference/search.asciidoc
@@ -0,0 +1,101 @@
+[[search]]
+= Search APIs
+
+[partintro]
+--
+
+Most search APIs are <<search-multi-index-type,multi-index&#44; multi-type>>, with the
+exception of the <<search-explain>> endpoints.
+
+[float]
+[[search-routing]]
+== Routing
+
+When executing a search, it will be broadcasted to all the index/indices
+shards (round robin between replicas). Which shards will be searched on
+can be controlled by providing the `routing` parameter. For example,
+when indexing tweets, the routing value can be the user name:
+
+[source,js]
+--------------------------------------------------
+$ curl -XPOST 'http://localhost:9200/twitter/tweet?routing=kimchy' -d '{
+ "user" : "kimchy",
+ "postDate" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+}
+'
+--------------------------------------------------
+
+In such a case, if we want to search only on the tweets for a specific
+user, we can specify it as the routing, resulting in the search hitting
+only the relevant shard:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/twitter/tweet/_search?routing=kimchy' -d '{
+ "query": {
+ "filtered" : {
+ "query" : {
+ "query_string" : {
+ "query" : "some query string here"
+ }
+ },
+ "filter" : {
+ "term" : { "user" : "kimchy" }
+ }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+The routing parameter can be multi valued represented as a comma
+separated string. This will result in hitting the relevant shards where
+the routing values match to.
+
+[float]
+[[stats-groups]]
+== Stats Groups
+
+A search can be associated with stats groups, which maintains a
+statistics aggregation per group. It can later be retrieved using the
+<<indices-stats,indices stats>> API
+specifically. For example, here is a search body request that associate
+the request with two different groups:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "stats" : ["group1", "group2"]
+}
+--------------------------------------------------
+
+--
+
+include::search/search.asciidoc[]
+
+include::search/uri-request.asciidoc[]
+
+include::search/request-body.asciidoc[]
+
+include::search/facets.asciidoc[]
+
+include::search/aggregations.asciidoc[]
+
+include::search/suggesters.asciidoc[]
+
+include::search/multi-search.asciidoc[]
+
+include::search/count.asciidoc[]
+
+include::search/validate.asciidoc[]
+
+include::search/explain.asciidoc[]
+
+include::search/percolate.asciidoc[]
+
+include::search/more-like-this.asciidoc[]
+
diff --git a/docs/reference/search/aggregations.asciidoc b/docs/reference/search/aggregations.asciidoc
new file mode 100644
index 0000000..cc68059
--- /dev/null
+++ b/docs/reference/search/aggregations.asciidoc
@@ -0,0 +1,137 @@
+[[search-aggregations]]
+== Aggregations
+
+Aggregations grew out of the <<search-facets, facets>> module and the long experience of how users use it
+(and would like to use it) for real-time data analytics purposes. As such, it serves as the next generation
+replacement for the functionality we currently refer to as "faceting".
+
+<<search-facets, Facets>> provide a great way to aggregate data within a document set context.
+This context is defined by the executed query in combination with the different levels of filters that can be defined
+(filtered queries, top-level filters, and facet level filters). While powerful, their implementation is not designed
+from the ground up to support complex aggregations and is thus limited.
+
+.Are facets deprecated?
+**********************************
+As the functionality facets offer is a subset of the one offered by aggregations, over time, we would like to
+see users move to aggregations for all realtime data analytics. That said, we are well aware that such
+transitions/migrations take time, and for this reason we are keeping facets around for the time being.
+Facets are not officially deprecated yet but are likely to be in the future.
+**********************************
+
+The aggregations module breaks the barriers the current facet implementation put in place. The new name ("Aggregations")
+also indicates the intention here - a generic yet extremely powerful framework for building aggregations - any types of
+aggregations.
+
+An aggregation can be seen as a _unit-of-work_ that builds analytic information over a set of documents. The context of
+the execution defines what this document set is (e.g. a top-level aggregation executes within the context of the executed
+query/filters of the search request).
+
+There are many different types of aggregations, each with its own purpose and output. To better understand these types,
+it is often easier to break them into two main families:
+
+_Bucketing_::
+ A family of aggregations that build buckets, where each bucket is associated with a _key_ and a document
+ criterion. When the aggregation is executed, all the buckets criteria are evaluated on every document in
+ the context and when a criterion matches, the document is considered to "fall in" the relevant bucket.
+ By the end of the aggregation process, we'll end up with a list of buckets - each one with a set of
+ documents that "belong" to it.
+
+_Metric_::
+ Aggregations that keep track and compute metrics over a set of documents.
+
+The interesting part comes next. Since each bucket effectively defines a document set (all documents belonging to
+the bucket), one can potentially associate aggregations on the bucket level, and those will execute within the context
+of that bucket. This is where the real power of aggregations kicks in: *aggregations can be nested!*
+
+NOTE: Bucketing aggregations can have sub-aggregations (bucketing or metric). The sub-aggregations will be computed for
+ the buckets which their parent aggregation generates. There is no hard limit on the level/depth of nested
+ aggregations (one can nest an aggregation under a "parent" aggregation, which is itself a sub-aggregation of
+ another higher-level aggregation).
+
+[float]
+=== Structuring Aggregations
+
+The following snippet captures the basic structure of aggregations:
+
+[source,js]
+--------------------------------------------------
+"aggregations" : {
+ "<aggregation_name>" : {
+ "<aggregation_type>" : {
+ <aggregation_body>
+ }
+ [,"aggregations" : { [<sub_aggregation>]+ } ]?
+ }
+ [,"<aggregation_name_2>" : { ... } ]*
+}
+--------------------------------------------------
+
+The `aggregations` object (the key `aggs` can also be used) in the JSON holds the aggregations to be computed. Each aggregation
+is associated with a logical name that the user defines (e.g. if the aggregation computes the average price, then it would
+make sense to name it `avg_price`). These logical names will also be used to uniquely identify the aggregations in the
+response. Each aggregation has a specific type (`<aggregation_type>` in the above snippet) and is typically the first
+key within the named aggregation body. Each type of aggregation defines its own body, depending on the nature of the
+aggregation (e.g. an `avg` aggregation on a specific field will define the field on which the average will be calculated).
+At the same level of the aggregation type definition, one can optionally define a set of additional aggregations,
+though this only makes sense if the aggregation you defined is of a bucketing nature. In this scenario, the
+sub-aggregations you define on the bucketing aggregation level will be computed for all the buckets built by the
+bucketing aggregation. For example, if you define a set of aggregations under the `range` aggregation, the
+sub-aggregations will be computed for the range buckets that are defined.
+
+[float]
+==== Values Source
+
+Some aggregations work on values extracted from the aggregated documents. Typically, the values will be extracted from
+a specific document field which is set using the `field` key for the aggregations. It is also possible to define a
+<<modules-scripting,`script`>> which will generate the values (per document).
+
+When both `field` and `script` settings are configured for the aggregation, the script will be treated as a
+`value script`. While normal scripts are evaluated on a document level (i.e. the script has access to all the data
+associated with the document), value scripts are evaluated on the *value* level. In this mode, the values are extracted
+from the configured `field` and the `script` is used to apply a "transformation" over these value/s.
+
+["NOTE",id="aggs-script-note"]
+===============================
+When working with scripts, the `lang` and `params` settings can also be defined. The former defines the scripting
+language which is used (assuming the proper language is available in Elasticsearch, either by default or as a plugin). The latter
+enables defining all the "dynamic" expressions in the script as parameters, which enables the script to keep itself static
+between calls (this will ensure the use of the cached compiled scripts in Elasticsearch).
+===============================
+
+Scripts can generate a single value or multiple values per document. When generating multiple values, one can use the
+`script_values_sorted` settings to indicate whether these values are sorted or not. Internally, Elasticsearch can
+perform optimizations when dealing with sorted values (for example, with the `min` aggregations, knowing the values are
+sorted, Elasticsearch will skip the iterations over all the values and rely on the first value in the list to be the
+minimum value among all other values associated with the same document).
+
+[float]
+=== Metrics Aggregations
+
+The aggregations in this family compute metrics based on values extracted in one way or another from the documents that
+are being aggregated. The values are typically extracted from the fields of the document (using the field data), but
+can also be generated using scripts. Some aggregations output a single metric (e.g. `avg`) and are called `single-value
+metrics aggregation`, others generate multiple metrics (e.g. `stats`) and are called `multi-value metrics aggregation`.
+The distinction between single-value and multi-value metrics aggregations plays a role when these aggregations serve as
+direct sub-aggregations of some bucket aggregations (some bucket aggregation enable you to sort the returned buckets based
+on the metrics in each bucket).
+
+
+[float]
+=== Bucket Aggregations
+
+Bucket aggregations don't calculate metrics over fields like the metrics aggregations do, but instead, they create
+buckets of documents. Each bucket is associated with a criteria (depending on the aggregation type) which determines
+whether or not a document in the current context "falls" into it. In other words, the buckets effectively define document
+sets. In addition to the buckets themselves, the `bucket` aggregations also compute and return the number of documents
+that "fell in" to each bucket.
+
+Bucket aggregations, as opposed to `metrics` aggregations, can hold sub-aggregations. These sub-aggregations will be
+aggregated for the buckets created by their "parent" bucket aggregation.
+
+There are different bucket aggregators, each with a different "bucketing" strategy. Some define a single bucket, some
+define fixed number of multiple buckets, and others dynamically create the buckets during the aggregation process.
+
+
+include::aggregations/metrics.asciidoc[]
+
+include::aggregations/bucket.asciidoc[]
diff --git a/docs/reference/search/aggregations/bucket.asciidoc b/docs/reference/search/aggregations/bucket.asciidoc
new file mode 100644
index 0000000..6bdd504
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket.asciidoc
@@ -0,0 +1,25 @@
+[[search-aggregations-bucket]]
+
+include::bucket/global-aggregation.asciidoc[]
+
+include::bucket/filter-aggregation.asciidoc[]
+
+include::bucket/missing-aggregation.asciidoc[]
+
+include::bucket/nested-aggregation.asciidoc[]
+
+include::bucket/terms-aggregation.asciidoc[]
+
+include::bucket/range-aggregation.asciidoc[]
+
+include::bucket/daterange-aggregation.asciidoc[]
+
+include::bucket/iprange-aggregation.asciidoc[]
+
+include::bucket/histogram-aggregation.asciidoc[]
+
+include::bucket/datehistogram-aggregation.asciidoc[]
+
+include::bucket/geodistance-aggregation.asciidoc[]
+
+include::bucket/geohashgrid-aggregation.asciidoc[] \ No newline at end of file
diff --git a/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc
new file mode 100644
index 0000000..f48ce4a
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc
@@ -0,0 +1,136 @@
+[[search-aggregations-bucket-datehistogram-aggregation]]
+=== Date Histogram
+
+A multi-bucket aggregation similar to the <<search-aggregations-bucket-histogram-aggregation,histogram>> except it can
+only be applied on date values. Since dates are represented in elasticsearch internally as long values, it is possible
+to use the normal `histogram` on dates as well, though accuracy will be compromised. The reason for this is in the fact
+that time based intervals are not fixed (think of leap years and on the number of days in a month). For this reason,
+we need a special support for time based data. From a functionality perspective, this histogram supports the same features
+as the normal <<search-aggregations-bucket-histogram-aggregation,histogram>>. The main difference is that the interval can be specified by date/time expressions.
+
+Requesting bucket intervals of a month.
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "articles_over_time" : {
+ "date_histogram" : {
+ "field" : "date",
+ "interval" : "month"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+fractional values are allowed, for example 1.5 hours:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "articles_over_time" : {
+ "date_histogram" : {
+ "field" : "date",
+ "interval" : "1.5h"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Other available expressions for interval: `year`, `quarter`, `week`, `day`, `hour`, `minute`, `second`
+
+==== Time Zone
+
+By default, times are stored as UTC milliseconds since the epoch. Thus, all computation and "bucketing" / "rounding" is
+done on UTC. It is possible to provide a time zone (both pre rounding, and post rounding) value, which will cause all
+computations to take the relevant zone into account. The time returned for each bucket/entry is milliseconds since the
+epoch of the provided time zone.
+
+The parameters are `pre_zone` (pre rounding based on interval) and `post_zone` (post rounding based on interval). The
+`time_zone` parameter simply sets the `pre_zone` parameter. By default, those are set to `UTC`.
+
+The zone value accepts either a numeric value for the hours offset, for example: `"time_zone" : -2`. It also accepts a
+format of hours and minutes, like `"time_zone" : "-02:30"`. Another option is to provide a time zone accepted as one of
+the values listed here.
+
+Lets take an example. For `2012-04-01T04:15:30Z`, with a `pre_zone` of `-08:00`. For day interval, the actual time by
+applying the time zone and rounding falls under `2012-03-31`, so the returned value will be (in millis) of
+`2012-03-31T00:00:00Z` (UTC). For hour interval, applying the time zone results in `2012-03-31T20:15:30`, rounding it
+results in `2012-03-31T20:00:00`, but, we want to return it in UTC (`post_zone` is not set), so we convert it back to
+UTC: `2012-04-01T04:00:00Z`. Note, we are consistent in the results, returning the rounded value in UTC.
+
+`post_zone` simply takes the result, and adds the relevant offset.
+
+Sometimes, we want to apply the same conversion to UTC we did above for hour also for day (and up) intervals. We can
+set `pre_zone_adjust_large_interval` to `true`, which will apply the same conversion done for hour interval in the
+example, to day and above intervals (it can be set regardless of the interval, but only kick in when using day and
+higher intervals).
+
+==== Factor
+
+The date histogram works on numeric values (since time is stored in milliseconds since the epoch in UTC). But,
+sometimes, systems will store a different resolution (like seconds since UTC) in a numeric field. The `factor`
+parameter can be used to change the value in the field to milliseconds to actual do the relevant rounding, and then
+be applied again to get to the original unit. For example, when storing in a numeric field seconds resolution, the
+factor can be set to 1000.
+
+==== Pre/Post Offset
+
+Specific offsets can be provided for pre rounding and post rounding. The `pre_offset` for pre rounding, and
+`post_offset` for post rounding. The format is the date time format (`1h`, `1d`, etc...).
+
+==== Keys
+
+Since internally, dates are represented as 64bit numbers, these numbers are returned as the bucket keys (each key
+representing a date - milliseconds since the epoch). It is also possible to define a date format, which will result in
+returning the dates as formatted strings next to the numeric key values:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "articles_over_time" : {
+ "date_histogram" : {
+ "field" : "date",
+ "interval" : "1M",
+ "format" : "yyyy-MM-dd" <1>
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+<1> Supports expressive date <<date-format-pattern,format pattern>>
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "articles_over_time": {
+ "buckets": [
+ {
+ "key_as_string": "2013-02-02",
+ "key": 1328140800000,
+ "doc_count": 1
+ },
+ {
+ "key_as_string": "2013-03-02",
+ "key": 1330646400000,
+ "doc_count": 2
+ },
+ ...
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+Like with the normal <<search-aggregations-bucket-histogram-aggregation,histogram>>, both document level scripts and
+value level scripts are supported. It is also possilbe to control the order of the returned buckets using the `order`
+settings and filter the returned buckets based on a `min_doc_count` setting (by defaults to all buckets with
+`min_doc_count > 1` will be returned). \ No newline at end of file
diff --git a/docs/reference/search/aggregations/bucket/daterange-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/daterange-aggregation.asciidoc
new file mode 100644
index 0000000..710f3ac
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/daterange-aggregation.asciidoc
@@ -0,0 +1,110 @@
+[[search-aggregations-bucket-daterange-aggregation]]
+=== Date Range
+
+A range aggregation that is dedicated for date values. The main difference between this aggregation and the normal <<search-aggregations-bucket-range-aggregation,range>> aggregation is that the `from` and `to` values can be expressed in Date Math expressions, and it is also possible to specify a date format by which the `from` and `to` response fields will be returned:
+
+Example:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs": {
+ "range": {
+ "date_range": {
+ "field": "date",
+ "format": "MM-yyy",
+ "ranges": [
+ { "to": "now-10M/M" },
+ { "from": "now-10M/M" }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+In the example above, we created two range buckets, the first will "bucket" all documents dated prior to 10 months ago and
+the second will "bucket" all documents dated since 10 months ago
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "range": {
+ "buckets": [
+ {
+ "to": 1.3437792E+12,
+ "to_as_string": "08-2012",
+ "doc_count": 7
+ },
+ {
+ "from": 1.3437792E+12,
+ "from_as_string": "08-2012",
+ "doc_count": 2
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+[[date-format-pattern]]
+==== Date Format/Pattern
+
+NOTE: this information was copied from http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html[JodaDate]
+
+All ASCII letters are reserved as format pattern letters, which are defined as follows:
+
+[options="header"]
+|=======
+|Symbol |Meaning |Presentation |Examples
+|G |era |text |AD
+|C |century of era (>=0) |number |20
+|Y |year of era (>=0) |year |1996
+
+|x |weekyear |year |1996
+|w |week of weekyear |number |27
+|e |day of week |number |2
+|E |day of week |text |Tuesday; Tue
+
+|y |year |year |1996
+|D |day of year |number |189
+|M |month of year |month |July; Jul; 07
+|d |day of month |number |10
+
+|a |halfday of day |text |PM
+|K |hour of halfday (0~11) |number |0
+|h |clockhour of halfday (1~12) |number |12
+
+|H |hour of day (0~23) |number |0
+|k |clockhour of day (1~24) |number |24
+|m |minute of hour |number |30
+|s |second of minute |number |55
+|S |fraction of second |number |978
+
+|z |time zone |text |Pacific Standard Time; PST
+|Z |time zone offset/id |zone |-0800; -08:00; America/Los_Angeles
+
+|' |escape for text |delimiter
+|'' |single quote |literal |'
+|=======
+
+The count of pattern letters determine the format.
+
+Text:: If the number of pattern letters is 4 or more, the full form is used; otherwise a short or abbreviated form is used if available.
+
+Number:: The minimum number of digits. Shorter numbers are zero-padded to this amount.
+
+Year:: Numeric presentation for year and weekyear fields are handled specially. For example, if the count of 'y' is 2, the year will be displayed as the zero-based year of the century, which is two digits.
+
+Month:: 3 or over, use text, otherwise use number.
+
+Zone:: 'Z' outputs offset without a colon, 'ZZ' outputs the offset with a colon, 'ZZZ' or more outputs the zone id.
+
+Zone names:: Time zone names ('z') cannot be parsed.
+
+Any characters in the pattern that are not in the ranges of ['a'..'z'] and ['A'..'Z'] will be treated as quoted text. For instance, characters like ':', '.', ' ', '#' and '?' will appear in the resulting time text even they are not embraced within single quotes. \ No newline at end of file
diff --git a/docs/reference/search/aggregations/bucket/filter-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/filter-aggregation.asciidoc
new file mode 100644
index 0000000..5166002
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/filter-aggregation.asciidoc
@@ -0,0 +1,38 @@
+[[search-aggregations-bucket-filter-aggregation]]
+=== Filter
+
+Defines a single bucket of all the documents in the current document set context that match a specified filter. Often this will be used to narrow down the current aggregation context to a specific set of documents.
+
+Example:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "in_stock_products" : {
+ "filter" : { "range" : { "stock" : { "gt" : 0 } } },
+ "aggs" : {
+ "avg_price" : { "avg" : { "field" : "price" } }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+In the above example, we calculate the average price of all the products that are currently in-stock.
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggs" : {
+ "in_stock_products" : {
+ "doc_count" : 100,
+ "avg_price" : { "value" : 56.3 }
+ }
+ }
+}
+-------------------------------------------------- \ No newline at end of file
diff --git a/docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc
new file mode 100644
index 0000000..db4cd4a
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc
@@ -0,0 +1,105 @@
+[[search-aggregations-bucket-geodistance-aggregation]]
+=== Geo Distance
+
+A multi-bucket aggregation that works on `geo_point` fields and conceptually works very similar to the <<search-aggregations-bucket-range-aggregation,range>> aggregation. The user can define a point of origin and a set of distance range buckets. The aggregation evaluate the distance of each document value from the origin point and determines the buckets it belongs to based on the ranges (a document belongs to a bucket if the distance between the document and the origin falls within the distance range of the bucket).
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "rings_around_amsterdam" : {
+ "geo_distance" : {
+ "field" : "location",
+ "origin" : "52.3760, 4.894",
+ "ranges" : [
+ { "to" : 100 },
+ { "from" : 100, "to" : 300 },
+ { "from" : 300 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "rings" : {
+ "buckets": [
+ {
+ "unit": "km",
+ "to": 100.0,
+ "doc_count": 3
+ },
+ {
+ "unit": "km",
+ "from": 100.0,
+ "to": 300.0,
+ "doc_count": 1
+ },
+ {
+ "unit": "km",
+ "from": 300.0,
+ "doc_count": 7
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+The specified field must be of type `geo_point` (which can only be set explicitly in the mappings). And it can also hold an array of `geo_point` fields, in which case all will be taken into account during aggregation. The origin point can accept all formats supported by the `geo_point` <<mapping-geo-point-type,type>>:
+
+* Object format: `{ "lat" : 52.3760, "lon" : 4.894 }` - this is the safest format as it is the most explicit about the `lat` & `lon` values
+* String format: `"52.3760, 4.894"` - where the first number is the `lat` and the second is the `lon`
+* Array format: `[4.894, 52.3760]` - which is based on the `GeoJson` standard and where the first number is the `lon` and the second one is the `lat`
+
+By default, the distance unit is `km` but it can also accept: `mi` (miles), `in` (inch), `yd` (yards), `m` (meters), `cm` (centimeters), `mm` (millimeters).
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "rings" : {
+ "geo_distance" : {
+ "field" : "location",
+ "origin" : "52.3760, 4.894",
+ "unit" : "mi", <1>
+ "ranges" : [
+ { "to" : 100 },
+ { "from" : 100, "to" : 300 },
+ { "from" : 300 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+<1> The distances will be computed as miles
+
+There are two distance calculation modes: `sloppy_arc` (the default), `arc` (most accurate) and `plane` (fastest). The `arc` calculation is the most accurate one but also the more expensive one in terms of performance. The `sloppy_arc` is faster but less accurate. The `plane` is the fastest but least accurate distance function. Consider using `plane` when your search context is "narrow" and spans smaller geographical areas (like cities or even countries). `plane` may return higher error mergins for searches across very large areas (e.g. cross continent search). The distance calculation type can be set using the `distance_type` parameter:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "rings" : {
+ "geo_distance" : {
+ "field" : "location",
+ "origin" : "52.3760, 4.894",
+ "distance_type" : "plane",
+ "ranges" : [
+ { "to" : 100 },
+ { "from" : 100, "to" : 300 },
+ { "from" : 300 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/aggregations/bucket/geohashgrid-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/geohashgrid-aggregation.asciidoc
new file mode 100644
index 0000000..3ed2896
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/geohashgrid-aggregation.asciidoc
@@ -0,0 +1,127 @@
+[[search-aggregations-bucket-geohashgrid-aggregation]]
+=== GeoHash grid
+
+A multi-bucket aggregation that works on `geo_point` fields and groups points into buckets that represent cells in a grid.
+The resulting grid can be sparse and only contains cells that have matching data. Each cell is labeled using a http://en.wikipedia.org/wiki/Geohash[geohash] which is of user-definable precision.
+
+* High precision geohashes have a long string length and represent cells that cover only a small area.
+* Low precision geohashes have a short string length and represent cells that each cover a large area.
+
+Geohashes used in this aggregation can have a choice of precision between 1 and 12.
+
+WARNING: The highest-precision geohash of length 12 produces cells that cover less than a square metre of land and so high-precision requests can be very costly in terms of RAM and result sizes.
+Please see the example below on how to first filter the aggregation to a smaller geographic area before requesting high-levels of detail.
+
+The specified field must be of type `geo_point` (which can only be set explicitly in the mappings) and it can also hold an array of `geo_point` fields, in which case all points will be taken into account during aggregation.
+
+
+==== Simple low-precision request
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations" : {
+ "myLarge-GrainGeoHashGrid" : {
+ "geohash_grid" : {
+ "field" : "location",
+ "precision" : 3
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "myLarge-GrainGeoHashGrid": {
+ "buckets": [
+ {
+ "key": "svz",
+ "doc_count": 10964
+ },
+ {
+ "key": "sv8",
+ "doc_count": 3198
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+
+
+==== High-precision requests
+
+When requesting detailed buckets (typically for displaying a "zoomed in" map) a filter like <<query-dsl-geo-bounding-box-filter,geo_bounding_box>> should be applied to narrow the subject area otherwise potentially millions of buckets will be created and returned.
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations" : {
+ "zoomedInView" : {
+ "filter" : {
+ "geo_bounding_box" : {
+ "location" : {
+ "top_left" : "51.73, 0.9",
+ "bottom_right" : "51.55, 1.1"
+ }
+ }
+ },
+ "aggregations":{
+ "zoom1":{
+ "geohash_grid" : {
+ "field":"location",
+ "precision":8,
+ }
+ }
+ }
+ }
+ }
+ }
+--------------------------------------------------
+
+==== Cell dimensions at the equator
+The table below shows the metric dimensions for cells covered by various string lengths of geohash.
+Cell dimensions vary with latitude and so the table is for the worst-case scenario at the equator.
+
+[horizontal]
+*GeoHash length*:: *Area width x height*
+1:: 5,009.4km x 4,992.6km
+2:: 1,252.3km x 624.1km
+3:: 156.5km x 156km
+4:: 39.1km x 19.5km
+5:: 4.9km x 4.9km
+6:: 1.2km x 609.4m
+7:: 152.9m x 152.4m
+8:: 38.2m x 19m
+9:: 4.8m x 4.8m
+10:: 1.2m x 59.5cm
+11:: 14.9cm x 14.9cm
+12:: 3.7cm x 1.9cm
+
+
+
+==== Options
+
+[horizontal]
+field:: Mandatory. The name of the field indexed with GeoPoints.
+
+precision:: Optional. The string length of the geohashes used to define
+ cells/buckets in the results. Defaults to 5.
+
+size:: Optional. The maximum number of geohash buckets to return
+ (defaults to 10,000). When results are trimmed, buckets are
+ prioritised based on the volumes of documents they contain.
+
+shard_size:: Optional. To allow for more accurate counting of the top cells
+ returned in the final result the aggregation defaults to
+ returning `max(10,(size x number-of-shards))` buckets from each
+ shard. If this heuristic is undesirable, the number considered
+ from each shard can be over-ridden using this parameter.
+
+
diff --git a/docs/reference/search/aggregations/bucket/global-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/global-aggregation.asciidoc
new file mode 100644
index 0000000..da6ddc1
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/global-aggregation.asciidoc
@@ -0,0 +1,51 @@
+[[search-aggregations-bucket-global-aggregation]]
+=== Global
+
+Defines a single bucket of all the documents within the search execution context. This context is defined by the indices and the document types you're searching on, but is *not* influenced by the search query itself.
+
+NOTE: Global aggregators can only be placed as top level aggregators (it makes no sense to embed a global aggregator
+ within another bucket aggregator)
+
+Example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match" : { "title" : "shirt" }
+ },
+ "aggs" : {
+ "all_products" : {
+ "global" : {}, <1>
+ "aggs" : { <2>
+ "avg_price" : { "avg" : { "field" : "price" } }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+<1> The `global` aggregation has an empty body
+<2> The sub-aggregations that are registered for this `global` aggregation
+
+The above aggregation demonstrates how one would compute aggregations (`avg_price` in this example) on all the documents in the search context, regardless of the query (in our example, it will compute the average price over all products in our catalog, not just on the "shirts").
+
+The response for the above aggreation:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations" : {
+ "all_products" : {
+ "doc_count" : 100, <1>
+ "avg_price" : {
+ "value" : 56.3
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+<1> The number of documents that were aggregated (in our case, all documents within the search context)
diff --git a/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc
new file mode 100644
index 0000000..469ea23
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc
@@ -0,0 +1,283 @@
+[[search-aggregations-bucket-histogram-aggregation]]
+=== Histogram
+
+A multi-bucket values source based aggregation that can be applied on numeric values extracted from the documents.
+It dynamically builds fixed size (a.k.a. interval) buckets over the values. For example, if the documents have a field
+that holds a price (numeric), we can configure this aggregation to dynamically build buckets with interval `5`
+(in case of price it may represent $5). When the aggregation executes, the price field of every document will be
+evaluated and will be rounded down to its closest bucket - for example, if the price is `32` and the bucket size is `5`
+then the rounding will yield `30` and thus the document will "fall" into the bucket that is associated withe the key `30`.
+To make this more formal, here is the rounding function that is used:
+
+[source,java]
+--------------------------------------------------
+rem = value % interval
+if (rem < 0) {
+ rem += interval
+}
+bucket_key = value - rem
+--------------------------------------------------
+
+The following snippet "buckets" the products based on their `price` by interval of `50`:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "prices" : {
+ "histogram" : {
+ "field" : "price",
+ "interval" : 50
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+And the following may be the response:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "prices" : {
+ "buckets": [
+ {
+ "key": 0,
+ "doc_count": 2
+ },
+ {
+ "key": 50,
+ "doc_count": 4
+ },
+ {
+ "key": 150,
+ "doc_count": 3
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+The response above shows that none of the aggregated products has a price that falls within the range of `[100 - 150)`.
+By default, the response will only contain those buckets with a `doc_count` greater than 0. It is possible change that
+and request buckets with either a higher minimum count or even 0 (in which case elasticsearch will "fill in the gaps"
+and create buckets with zero documents). This can be configured using the `min_doc_count` setting:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "prices" : {
+ "histogram" : {
+ "field" : "price",
+ "interval" : 50,
+ "min_doc_count" : 0
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "prices" : {
+ "buckets": [
+ {
+ "key": 0,
+ "doc_count": 2
+ },
+ {
+ "key": 50,
+ "doc_count": 4
+ },
+ {
+ "key" : 100,
+ "doc_count" : 0 <1>
+ },
+ {
+ "key": 150,
+ "doc_count": 3
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+<1> No documents were found that belong in this bucket, yet it is still returned with zero `doc_count`.
+
+==== Order
+
+By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controled
+using the `order` setting.
+
+Ordering the buckets by their key - descending:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "prices" : {
+ "histogram" : {
+ "field" : "price",
+ "interval" : 50,
+ "order" : { "_key" : "desc" }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Ordering the buckets by their `doc_count` - ascending:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "prices" : {
+ "histogram" : {
+ "field" : "price",
+ "interval" : 50,
+ "order" : { "_count" : "asc" }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+If the histogram aggregation has a direct metrics sub-aggregation, the latter can determine the order of the buckets:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "prices" : {
+ "histogram" : {
+ "field" : "price",
+ "interval" : 50,
+ "order" : { "price_stats.min" : "asc" } <1>
+ },
+ "aggs" : {
+ "price_stats" : { "stats" : {} } <2>
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+<1> The `{ "price_stats.min" : asc" }` will sort the buckets based on `min` value of their their `price_stats` sub-aggregation.
+
+<2> There is no need to configure the `price` field for the `price_stats` aggregation as it will inherit it by default from its parent histogram aggregation.
+
+==== Minimum document count
+
+It is possible to only return buckets that have a document count that is greater than or equal to a configured
+limit through the `min_doc_count` option.
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "prices" : {
+ "histogram" : {
+ "field" : "price",
+ "interval" : 50,
+ "min_doc_count": 10
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The above aggregation would only return buckets that contain 10 documents or more. Default value is `1`.
+
+NOTE: The special value `0` can be used to add empty buckets to the response between the minimum and the maximum buckets.
+Here is an example of what the response could look like:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "prices": {
+ "buckets": {
+ "0": {
+ "key": 0,
+ "doc_count": 2
+ },
+ "50": {
+ "key": 50,
+ "doc_count": 0
+ },
+ "150": {
+ "key": 150,
+ "doc_count": 3
+ },
+ "200": {
+ "key": 150,
+ "doc_count": 0
+ },
+ "250": {
+ "key": 150,
+ "doc_count": 0
+ },
+ "300": {
+ "key": 150,
+ "doc_count": 1
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Response Format
+
+By default, the buckets are returned as an ordered array. It is also possible to request the response as a hash
+instead keyed by the buckets keys:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "prices" : {
+ "histogram" : {
+ "field" : "price",
+ "interval" : 50,
+ "keyed" : true
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "prices": {
+ "buckets": {
+ "0": {
+ "key": 0,
+ "doc_count": 2
+ },
+ "50": {
+ "key": 50,
+ "doc_count": 4
+ },
+ "150": {
+ "key": 150,
+ "doc_count": 3
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc
new file mode 100644
index 0000000..def09e6
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc
@@ -0,0 +1,98 @@
+[[search-aggregations-bucket-iprange-aggregation]]
+=== IPv4 Range
+
+Just like the dedicated <<search-aggregations-bucket-daterange-aggregation,date>> range aggregation, there is also a dedicated range aggregation for IPv4 typed fields:
+
+Example:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "ip_ranges" : {
+ "ip_range" : {
+ "field" : "ip",
+ "ranges" : [
+ { "to" : "10.0.0.5" },
+ { "from" : "10.0.0.5" }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "ip_ranges":
+ "buckets" : [
+ {
+ "to": 167772165,
+ "to_as_string": "10.0.0.5",
+ "doc_count": 4
+ },
+ {
+ "from": 167772165,
+ "from_as_string": "10.0.0.5",
+ "doc_count": 6
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+IP ranges can also be defined as CIDR masks:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "ip_ranges" : {
+ "ip_range" : {
+ "field" : "ip",
+ "ranges" : [
+ { "mask" : "10.0.0.0/25" },
+ { "mask" : "10.0.0.127/25" }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "ip_ranges": {
+ "buckets": [
+ {
+ "key": "10.0.0.0/25",
+ "from": 1.6777216E+8,
+ "from_as_string": "10.0.0.0",
+ "to": 167772287,
+ "to_as_string": "10.0.0.127",
+ "doc_count": 127
+ },
+ {
+ "key": "10.0.0.127/25",
+ "from": 1.6777216E+8,
+ "from_as_string": "10.0.0.0",
+ "to": 167772287,
+ "to_as_string": "10.0.0.127",
+ "doc_count": 127
+ }
+ ]
+ }
+ }
+}
+-------------------------------------------------- \ No newline at end of file
diff --git a/docs/reference/search/aggregations/bucket/missing-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/missing-aggregation.asciidoc
new file mode 100644
index 0000000..77526c2
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/missing-aggregation.asciidoc
@@ -0,0 +1,34 @@
+[[search-aggregations-bucket-missing-aggregation]]
+=== Missing
+
+A field data based single bucket aggregation, that creates a bucket of all documents in the current document set context that are missing a field value (effectively, missing a field or having the configured NULL value set). This aggregator will often be used in conjunction with other field data bucket aggregators (such as ranges) to return information for all the documents that could not be placed in any of the other buckets due to missing field data values.
+
+Example:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "products_without_a_price" : {
+ "missing" : { "field" : "price" }
+ }
+ }
+}
+--------------------------------------------------
+
+In the above example, we calculate the average price of all the products that are currently in-stock.
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggs" : {
+ "products_without_a_price" : {
+ "doc_count" : 10
+ }
+ }
+}
+-------------------------------------------------- \ No newline at end of file
diff --git a/docs/reference/search/aggregations/bucket/nested-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/nested-aggregation.asciidoc
new file mode 100644
index 0000000..cd49f88
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/nested-aggregation.asciidoc
@@ -0,0 +1,67 @@
+[[search-aggregations-bucket-nested-aggregation]]
+=== Nested
+
+A special single bucket aggregation that enables aggregating nested documents.
+
+For example, lets say we have a index of products, and each product holds the list of resellers - each having its own
+price for the product. The mapping could look like:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "product" : {
+ "properties" : {
+ "resellers" : { <1>
+ "type" : "nested"
+ "properties" : {
+ "name" : { "type" : "string" },
+ "price" : { "type" : "double" }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+<1> The `resellers` is an array that holds nested documents under the `product` object.
+
+The following aggregations will return the minimum price products can be purchased in:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match" : { "name" : "led tv" }
+ }
+ "aggs" : {
+ "resellers" : {
+ "nested" : {
+ "path" : "resellers"
+ },
+ "aggs" : {
+ "min_price" : { "min" : { "field" : "nested.value" } }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+As you can see above, the nested aggregation requires the `path` of the nested documents within the top level documents.
+Then one can define any type of aggregation over these nested documents.
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "resellers": {
+ "min_price": {
+ "value" : 350
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/aggregations/bucket/range-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/range-aggregation.asciidoc
new file mode 100644
index 0000000..41a4dd1
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/range-aggregation.asciidoc
@@ -0,0 +1,274 @@
+[[search-aggregations-bucket-range-aggregation]]
+=== Range
+
+A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. During the aggregation process, the values extracted from each document will be checked against each bucket range and "bucket" the relevant/matching document.
+
+Example:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "price_ranges" : {
+ "range" : {
+ "field" : "price",
+ "ranges" : [
+ { "to" : 50 },
+ { "from" : 50, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "price_ranges" : {
+ "buckets": [
+ {
+ "to": 50,
+ "doc_count": 2
+ },
+ {
+ "from": 50,
+ "to": 100,
+ "doc_count": 4
+ },
+ {
+ "from": 100,
+ "doc_count": 4
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+==== Keyed Response
+
+Setting the `key` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "price_ranges" : {
+ "range" : {
+ "field" : "price",
+ "keyed" : true,
+ "ranges" : [
+ { "to" : 50 },
+ { "from" : 50, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "price_ranges" : {
+ "buckets": {
+ "*-50.0": {
+ "to": 50,
+ "doc_count": 2
+ },
+ "50.0-100.0": {
+ "from": 50,
+ "to": 100,
+ "doc_count": 4
+ },
+ "100.0-*": {
+ "from": 100,
+ "doc_count": 4
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+It is also possible to customize the key for each range:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "price_ranges" : {
+ "range" : {
+ "field" : "price",
+ "keyed" : true,
+ "ranges" : [
+ { "key" : "cheap", "to" : 50 },
+ { "key" : "average", "from" : 50, "to" : 100 },
+ { "key" : "expensive", "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Script
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "price_ranges" : {
+ "range" : {
+ "script" : "doc['price'].value",
+ "ranges" : [
+ { "to" : 50 },
+ { "from" : 50, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Value Script
+
+Lets say the product prices are in USD but we would like to get the price ranges in EURO. We can use value script to convert the prices prior the aggregation (assuming conversion rate of 0.8)
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "price_ranges" : {
+ "range" : {
+ "field" : "price",
+ "script" : "_value * conversion_rate",
+ "params" : {
+ "conversion_rate" : 0.8
+ },
+ "ranges" : [
+ { "to" : 35 },
+ { "from" : 35, "to" : 70 },
+ { "from" : 70 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Sub Aggregations
+
+The following example, not only "bucket" the documents to the different buckets but also computes statistics over the prices in each price range
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "price_ranges" : {
+ "range" : {
+ "field" : "price",
+ "ranges" : [
+ { "to" : 50 },
+ { "from" : 50, "to" : 100 },
+ { "from" : 100 }
+ ]
+ },
+ "aggs" : {
+ "price_stats" : {
+ "stats" : { "field" : "price" }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggregations": {
+ "price_ranges" : {
+ "buckets": [
+ {
+ "to": 50,
+ "doc_count": 2,
+ "price_stats": {
+ "count": 2,
+ "min": 20,
+ "max": 47,
+ "avg": 33.5,
+ "sum": 67
+ }
+ },
+ {
+ "from": 50,
+ "to": 100,
+ "doc_count": 4,
+ "price_stats": {
+ "count": 4,
+ "min": 60,
+ "max": 98,
+ "avg": 82.5,
+ "sum": 330
+ }
+ },
+ {
+ "from": 100,
+ "doc_count": 4,
+ "price_stats": {
+ "count": 4,
+ "min": 134,
+ "max": 367,
+ "avg": 216,
+ "sum": 864
+ }
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+If a sub aggregation is also based on the same value source as the range aggregation (like the `stats` aggregation in the example above) it is possible to leave out the value source definition for it. The following will return the same response as above:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "price_ranges" : {
+ "range" : {
+ "field" : "price",
+ "ranges" : [
+ { "to" : 50 },
+ { "from" : 50, "to" : 100 },
+ { "from" : 100 }
+ ]
+ },
+ "aggs" : {
+ "price_stats" : {
+ "stats" : {} <1>
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+<1> We don't need to specify the `price` as we "inherit" it by default from the parent `range` aggregation \ No newline at end of file
diff --git a/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc
new file mode 100644
index 0000000..3cc7107
--- /dev/null
+++ b/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc
@@ -0,0 +1,294 @@
+[[search-aggregations-bucket-terms-aggregation]]
+=== Terms
+
+A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value.
+
+Example:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "genders" : {
+ "terms" : { "field" : "gender" }
+ }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations" : {
+ "genders" : {
+ "buckets" : [
+ {
+ "key" : "male",
+ "doc_count" : 10
+ },
+ {
+ "key" : "female",
+ "doc_count" : 10
+ },
+ ]
+ }
+ }
+}
+--------------------------------------------------
+
+By default, the `terms` aggregation will return the buckets for the top ten terms ordered by the `doc_count`. One can
+change this default behaviour by setting the `size` parameter.
+
+==== Size & Shard Size
+
+The `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By
+default, the node coordinating the search process will request each shard to provide its own top `size` term buckets
+and once all shards respond, it will reduce the results to the final list that will then be returned to the client.
+This means that if the number of unique terms is greater than `size`, the returned list is slightly off and not accurate
+(it could be that the term counts are slightly off and it could even be that a term that should have been in the top
+size buckets was not returned).
+
+The higher the requested `size` is, the more accurate the results will be, but also, the more expensive it will be to
+compute the final results (both due to bigger priority queues that are managed on a shard level and due to bigger data
+transfers between the nodes and the client).
+
+The `shard_size` parameter can be used to minimize the extra work that comes with bigger requested `size`. When defined,
+it will determine how many terms the coordinating node will request from each shard. Once all the shards responded, the
+coordinating node will then reduce them to a final result which will be based on the `size` parameter - this way,
+one can increase the accuracy of the returned terms and avoid the overhead of streaming a big list of buckets back to
+the client.
+
+NOTE: `shard_size` cannot be smaller than `size` (as it doesn't make much sense). When it is, elasticsearch will
+ override it and reset it to be equal to `size`.
+
+
+==== Order
+
+The order of the buckets can be customized by setting the `order` parameter. By default, the buckets are ordered by
+their `doc_count` descending. It is also possible to change this behaviour as follows:
+
+Ordering the buckets by their `doc_count` in an ascending manner:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "genders" : {
+ "terms" : {
+ "field" : "gender",
+ "order" : { "_count" : "asc" }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Ordering the buckets alphabetically by their terms in an ascending manner:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "genders" : {
+ "terms" : {
+ "field" : "gender",
+ "order" : { "_term" : "asc" }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+
+Ordering the buckets by single value metrics sub-aggregation (identified by the aggregation name):
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "genders" : {
+ "terms" : {
+ "field" : "gender",
+ "order" : { "avg_height" : "desc" }
+ },
+ "aggs" : {
+ "avg_height" : { "avg" : { "field" : "height" } }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Ordering the buckets by multi value metrics sub-aggregation (identified by the aggregation name):
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "genders" : {
+ "terms" : {
+ "field" : "gender",
+ "order" : { "stats.avg" : "desc" }
+ },
+ "aggs" : {
+ "height_stats" : { "stats" : { "field" : "height" } }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Minimum document count
+
+It is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "tags" : {
+ "terms" : {
+ "field" : "tag",
+ "min_doc_count": 10
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The above aggregation would only return tags which have been found in 10 hits or more. Default value is `1`.
+
+NOTE: Setting `min_doc_count`=`0` will also return buckets for terms that didn't match any hit. However, some of
+ the returned terms which have a document count of zero might only belong to deleted documents, so there is
+ no warranty that a `match_all` query would find a positive document count for those terms.
+
+WARNING: When NOT sorting on `doc_count` descending, high values of `min_doc_count` may return a number of buckets
+ which is less than `size` because not enough data was gathered from the shards. Missing buckets can be
+ back by increasing `shard_size`.
+
+==== Script
+
+Generating the terms using a script:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "genders" : {
+ "terms" : {
+ "script" : "doc['gender'].value"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Value Script
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "genders" : {
+ "terms" : {
+ "field" : "gender",
+ "script" : "'Gender: ' +_value"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+
+==== Filtering Values
+
+It is possible to filter the values for which buckets will be created. This can be done using the `include` and
+`exclude` parameters which are based on regular expressions.
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "tags" : {
+ "terms" : {
+ "field" : "tags",
+ "include" : ".*sport.*",
+ "exclude" : "water_.*"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+In the above example, buckets will be created for all the tags that has the word `sport` in them, except those starting
+with `water_` (so the tag `water_sports` will no be aggregated). The `include` regular expression will determine what
+values are "allowed" to be aggregated, while the `exclude` determines the values that should not be aggregated. When
+both are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`.
+
+The regular expression are based on the Java(TM) http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html[Pattern],
+and as such, they it is also possible to pass in flags that will determine how the compiled regular expression will work:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "tags" : {
+ "terms" : {
+ "field" : "tags",
+ "include" : {
+ "pattern" : ".*sport.*",
+ "flags" : "CANON_EQ|CASE_INSENSITIVE" <1>
+ },
+ "exclude" : {
+ "pattern" : "water_.*",
+ "flags" : "CANON_EQ|CASE_INSENSITIVE"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+<1> the flags are concatenated using the `|` character as a separator
+
+The possible flags that can be used are:
+http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#CANON_EQ[`CANON_EQ`],
+http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#CASE_INSENSITIVE[`CASE_INSENSITIVE`],
+http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#COMMENTS[`COMMENTS`],
+http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#DOTALL[`DOTALL`],
+http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#LITERAL[`LITERAL`],
+http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#MULTILINE[`MULTILINE`],
+http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#UNICODE_CASE[`UNICODE_CASE`],
+http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#UNICODE_CHARACTER_CLASS[`UNICODE_CHARACTER_CLASS`] and
+http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#UNIX_LINES[`UNIX_LINES`]
+
+==== Execution hint
+
+There are two mechanisms by which terms aggregations can be executed: either by using field values directly in order to aggregate
+data per-bucket (`map`), or by using ordinals of the field values instead of the values themselves (`ordinals`). Although the
+latter execution mode can be expected to be slightly faster, it is only available for use when the underlying data source exposes
+those terms ordinals. Moreover, it may actually be slower if most field values are unique. Elasticsearch tries to have sensible
+defaults when it comes to the execution mode that should be used, but in case you know that one execution mode may perform better
+than the other one, you have the ability to "hint" it to Elasticsearch:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "tags" : {
+ "terms" : {
+ "field" : "tags",
+ "execution_hint": "map" <1>
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+<1> the possible values are `map` and `ordinals`
+
+Please note that Elasticsearch will ignore this execution hint if it is not applicable.
diff --git a/docs/reference/search/aggregations/metrics.asciidoc b/docs/reference/search/aggregations/metrics.asciidoc
new file mode 100644
index 0000000..068fda2
--- /dev/null
+++ b/docs/reference/search/aggregations/metrics.asciidoc
@@ -0,0 +1,15 @@
+[[search-aggregations-metrics]]
+
+include::metrics/min-aggregation.asciidoc[]
+
+include::metrics/max-aggregation.asciidoc[]
+
+include::metrics/sum-aggregation.asciidoc[]
+
+include::metrics/avg-aggregation.asciidoc[]
+
+include::metrics/stats-aggregation.asciidoc[]
+
+include::metrics/extendedstats-aggregation.asciidoc[]
+
+include::metrics/valuecount-aggregation.asciidoc[]
diff --git a/docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc
new file mode 100644
index 0000000..e9d72e0
--- /dev/null
+++ b/docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc
@@ -0,0 +1,73 @@
+[[search-aggregations-metrics-avg-aggregation]]
+=== Avg
+
+A `single-value` metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.
+
+Assuming the data consists of documents representing exams grades (between 0 and 100) of students
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "avg_grade" : { "avg" : { "field" : "grade" } }
+ }
+}
+--------------------------------------------------
+
+The above aggregation computes the average grade over all documents. The aggregation type is `avg` and the `field` setting defines the numeric field of the documents the average will be computed on. The above will return the following:
+
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "avg_grade": {
+ "value": 75
+ }
+ }
+}
+--------------------------------------------------
+
+The name of the aggregation (`avg_grade` above) also serves as the key by which the aggregation result can be retrieved from the returned response.
+
+==== Script
+
+Computing the average grade based on a script:
+
+[source,js]
+--------------------------------------------------
+{
+ ...,
+
+ "aggs" : {
+ "avg_grade" : { "avg" : { "script" : "doc['grade'].value" } }
+ }
+}
+--------------------------------------------------
+
+===== Value Script
+
+It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new average:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ ...
+
+ "aggs" : {
+ "avg_corrected_grade" : {
+ "avg" : {
+ "field" : "grade",
+ "script" : "_value * correction",
+ "params" : {
+ "correction" : 1.2
+ }
+ }
+ }
+ }
+ }
+}
+-------------------------------------------------- \ No newline at end of file
diff --git a/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc
new file mode 100644
index 0000000..a632f4e
--- /dev/null
+++ b/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc
@@ -0,0 +1,82 @@
+[[search-aggregations-metrics-extendedstats-aggregation]]
+=== Extended Stats
+
+A `multi-value` metrics aggregation that computes stats over numeric values extracted from the aggregated documents. These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.
+
+The `extended_stats` aggregations is an extended version of the <<search-aggregations-metrics-stats-aggregation,`stats`>> aggregation, where additional metrics are added such as `sum_of_squares`, `variance` and `std_deviation`.
+
+Assuming the data consists of documents representing exams grades (between 0 and 100) of students
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "grades_stats" : { "extended_stats" : { "field" : "grade" } }
+ }
+}
+--------------------------------------------------
+
+The above aggregation computes the grades statistics over all documents. The aggregation type is `extended_stats` and the `field` setting defines the numeric field of the documents the stats will be computed on. The above will return the following:
+
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "grades_stats": {
+ "count": 6,
+ "min": 72,
+ "max": 117.6,
+ "avg": 94.2,
+ "sum": 565.2,
+ "sum_of_squares": 54551.51999999999,
+ "variance": 218.2799999999976,
+ "std_deviation": 14.774302013969987
+ }
+ }
+}
+--------------------------------------------------
+
+The name of the aggregation (`grades_stats` above) also serves as the key by which the aggreagtion result can be retrieved from the returned response.
+
+==== Script
+
+Computing the grades stats based on a script:
+
+[source,js]
+--------------------------------------------------
+{
+ ...,
+
+ "aggs" : {
+ "grades_stats" : { "extended_stats" : { "script" : "doc['grade'].value" } }
+ }
+}
+--------------------------------------------------
+
+===== Value Script
+
+It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new stats:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ ...
+
+ "aggs" : {
+ "grades_stats" : {
+ "extended_stats" : {
+ "field" : "grade",
+ "script" : "_value * correction",
+ "params" : {
+ "correction" : 1.2
+ }
+ }
+ }
+ }
+ }
+}
+-------------------------------------------------- \ No newline at end of file
diff --git a/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc
new file mode 100644
index 0000000..17b801e
--- /dev/null
+++ b/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc
@@ -0,0 +1,68 @@
+[[search-aggregations-metrics-max-aggregation]]
+=== Max
+
+A `single-value` metrics aggregation that keeps track and returns the maximum value among the numeric values extracted from the aggregated documents. These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.
+
+Computing the max price value across all documents
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "max_price" : { "max" : { "field" : "price" } }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "max_price": {
+ "value": 35
+ }
+ }
+}
+--------------------------------------------------
+
+As can be seen, the name of the aggregation (`max_price` above) also serves as the key by which the aggregation result can be retrieved from the returned response.
+
+==== Script
+
+Computing the max price value across all document, this time using a script:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "max_price" : { "max" : { "script" : "doc['price'].value" } }
+ }
+}
+--------------------------------------------------
+
+
+==== Value Script
+
+Let's say that the prices of the documents in our index are in USD, but we would like to compute the max in EURO (and for the sake of this example, lets say the conversion rate is 1.2). We can use a value script to apply the conversion rate to every value before it is aggregated:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "max_price_in_euros" : {
+ "max" : {
+ "field" : "price",
+ "script" : "_value * conversion_rate",
+ "params" : {
+ "conversion_rate" : 1.2
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
diff --git a/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc
new file mode 100644
index 0000000..d1cf0e3
--- /dev/null
+++ b/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc
@@ -0,0 +1,67 @@
+[[search-aggregations-metrics-min-aggregation]]
+=== Min
+
+A `single-value` metrics aggregation that keeps track and returns the minimum value among numeric values extracted from the aggregated documents. These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.
+
+Computing the min price value across all documents:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "min_price" : { "min" : { "field" : "price" } }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "min_price": {
+ "value": 10
+ }
+ }
+}
+--------------------------------------------------
+
+As can be seen, the name of the aggregation (`min_price` above) also serves as the key by which the aggreagtion result can be retrieved from the returned response.
+
+==== Script
+
+Computing the min price value across all document, this time using a script:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "min_price" : { "min" : { "script" : "doc['price'].value" } }
+ }
+}
+--------------------------------------------------
+
+
+==== Value Script
+
+Let's say that the prices of the documents in our index are in USD, but we would like to compute the min in EURO (and for the sake of this example, lets say the conversion rate is 1.2). We can use a value script to apply the conversion rate to every value before it is aggregated:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "min_price_in_euros" : {
+ "min" : {
+ "field" : "price",
+ "script" : "_value * conversion_rate",
+ "params" : {
+ "conversion_rate" : 1.2
+ }
+ }
+ }
+ }
+}
+-------------------------------------------------- \ No newline at end of file
diff --git a/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc
new file mode 100644
index 0000000..940c113
--- /dev/null
+++ b/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc
@@ -0,0 +1,79 @@
+[[search-aggregations-metrics-stats-aggregation]]
+=== Stats
+
+A `multi-value` metrics aggregation that computes stats over numeric values extracted from the aggregated documents. These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.
+
+The stats that are returned consist of: `min`, `max`, `sum`, `count` and `avg`.
+
+Assuming the data consists of documents representing exams grades (between 0 and 100) of students
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "grades_stats" : { "stats" : { "field" : "grade" } }
+ }
+}
+--------------------------------------------------
+
+The above aggregation computes the grades statistics over all documents. The aggregation type is `stats` and the `field` setting defines the numeric field of the documents the stats will be computed on. The above will return the following:
+
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "grades_stats": {
+ "count": 6,
+ "min": 60,
+ "max": 98,
+ "avg": 78.5,
+ "sum": 471
+ }
+ }
+}
+--------------------------------------------------
+
+The name of the aggregation (`grades_stats` above) also serves as the key by which the aggregation result can be retrieved from the returned response.
+
+==== Script
+
+Computing the grades stats based on a script:
+
+[source,js]
+--------------------------------------------------
+{
+ ...,
+
+ "aggs" : {
+ "grades_stats" : { "stats" : { "script" : "doc['grade'].value" } }
+ }
+}
+--------------------------------------------------
+
+===== Value Script
+
+It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use a value script to get the new stats:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ ...
+
+ "aggs" : {
+ "grades_stats" : {
+ "stats" : {
+ "field" : "grade",
+ "script" : "_value * correction",
+ "params" : {
+ "correction" : 1.2
+ }
+ }
+ }
+ }
+ }
+}
+-------------------------------------------------- \ No newline at end of file
diff --git a/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc
new file mode 100644
index 0000000..69575f5
--- /dev/null
+++ b/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc
@@ -0,0 +1,77 @@
+[[search-aggregations-metrics-sum-aggregation]]
+=== Sum
+
+A `single-value` metrics aggregation that sums up numeric values that are extracted from the aggregated documents. These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.
+
+Assuming the data consists of documents representing stock ticks, where each tick holds the change in the stock price from the previous tick.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "filtered" : {
+ "query" : { "match_all" : {}},
+ "filter" : {
+ "range" : { "timestamp" : { "from" : "now/1d+9.5h", "to" : "now/1d+16h" }}
+ }
+ }
+ },
+ "aggs" : {
+ "intraday_return" : { "sum" : { "field" : "change" } }
+ }
+}
+--------------------------------------------------
+
+The above aggregation sums up all changes in the today's trading stock ticks which accounts for the intraday return. The aggregation type is `sum` and the `field` setting defines the numeric field of the documents of which values will be summed up. The above will return the following:
+
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "intraday_return": {
+ "value": 2.18
+ }
+ }
+}
+--------------------------------------------------
+
+The name of the aggregation (`intraday_return` above) also serves as the key by which the aggregation result can be retrieved from the returned response.
+
+==== Script
+
+Computing the intraday return based on a script:
+
+[source,js]
+--------------------------------------------------
+{
+ ...,
+
+ "aggs" : {
+ "intraday_return" : { "sum" : { "script" : "doc['change'].value" } }
+ }
+}
+--------------------------------------------------
+
+===== Value Script
+
+Computing the sum of squares over all stock tick changes:
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ ...
+
+ "aggs" : {
+ "daytime_return" : {
+ "sum" : {
+ "field" : "change",
+ "script" : "_value * _value" }
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc
new file mode 100644
index 0000000..b3f6bf4
--- /dev/null
+++ b/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc
@@ -0,0 +1,35 @@
+[[search-aggregations-metrics-valuecount-aggregation]]
+=== Value Count
+
+A `single-value` metrics aggregation that counts the number of values that are extracted from the aggregated documents.
+These values can be extracted either from specific fields in the documents. Typically,
+this aggregator will be used in conjunction with other single-value aggregations. For example, when computing the `avg`
+one might be interested in the number of values the average is computed over.
+
+[source,js]
+--------------------------------------------------
+{
+ "aggs" : {
+ "grades_count" : { "value_count" : { "field" : "grade" } }
+ }
+}
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+
+ "aggregations": {
+ "grades_count": {
+ "value": 10
+ }
+ }
+}
+--------------------------------------------------
+
+The name of the aggregation (`grades_count` above) also serves as the key by which the aggregation result can be
+retrieved from the returned response.
+
diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc
new file mode 100644
index 0000000..a7036c1
--- /dev/null
+++ b/docs/reference/search/count.asciidoc
@@ -0,0 +1,89 @@
+[[search-count]]
+== Count API
+
+The count API allows to easily execute a query and get the number of
+matches for that query. It can be executed across one or more indices
+and across one or more types. The query can either be provided using a
+simple query string as a parameter, or using the
+<<query-dsl,Query DSL>> defined within the request
+body. Here is an example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/twitter/tweet/_count?q=user:kimchy'
+
+$ curl -XGET 'http://localhost:9200/twitter/tweet/_count' -d '
+{
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}'
+--------------------------------------------------
+
+NOTE: The query being sent in the body must be nested in a `query` key, same as
+the <<search-search,search api>> works added[1.0.0.RC1,The query was previously the top-level object].
+
+Both examples above do the same thing, which is count the number of
+tweets from the twitter index for a certain user. The result is:
+
+[source,js]
+--------------------------------------------------
+{
+ "count" : 1,
+ "_shards" : {
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0
+ }
+}
+--------------------------------------------------
+
+The query is optional, and when not provided, it will use `match_all` to
+count all the docs.
+
+[float]
+=== Multi index, Multi type
+
+The count API can be applied to <<search-multi-index-type,multiple types in multiple indices>>.
+
+[float]
+=== Request Parameters
+
+When executing count using the query parameter `q`, the query passed is
+a query string using Lucene query parser. There are additional
+parameters that can be passed:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Name |Description
+|df |The default field to use when no field prefix is defined within the
+query.
+
+|analyzer |The analyzer name to be used when analyzing the query string.
+
+|default_operator |The default operator to be used, can be `AND` or
+`OR`. Defaults to `OR`.
+|=======================================================================
+
+[float]
+=== Request Body
+
+The count can use the <<query-dsl,Query DSL>> within
+its body in order to express the query that should be executed. The body
+content can also be passed as a REST parameter named `source`.
+
+Both HTTP GET and HTTP POST can be used to execute count with body.
+Since not all clients support GET with body, POST is allowed as well.
+
+[float]
+=== Distributed
+
+The count operation is broadcast across all shards. For each shard id
+group, a replica is chosen and executed against it. This means that
+replicas increase the scalability of count.
+
+[float]
+=== Routing
+
+The routing value (a comma separated list of the routing values) can be
+specified to control which shards the count request will be executed on.
diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc
new file mode 100644
index 0000000..7c674af
--- /dev/null
+++ b/docs/reference/search/explain.asciidoc
@@ -0,0 +1,112 @@
+[[search-explain]]
+== Explain API
+
+The explain api computes a score explanation for a query and a specific
+document. This can give useful feedback whether a document matches or
+didn't match a specific query.
+
+The `index` and `type` parameters expect a single index and a single
+type respectively.
+
+[float]
+=== Usage
+
+Full query example:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/twitter/tweet/1/_explain' -d '{
+ "query" : {
+ "term" : { "message" : "search" }
+ }
+}'
+--------------------------------------------------
+
+This will yield the following result:
+
+[source,js]
+--------------------------------------------------
+{
+ "matches" : true,
+ "explanation" : {
+ "value" : 0.15342641,
+ "description" : "fieldWeight(message:search in 0), product of:",
+ "details" : [ {
+ "value" : 1.0,
+ "description" : "tf(termFreq(message:search)=1)"
+ }, {
+ "value" : 0.30685282,
+ "description" : "idf(docFreq=1, maxDocs=1)"
+ }, {
+ "value" : 0.5,
+ "description" : "fieldNorm(field=message, doc=0)"
+ } ]
+ }
+}
+--------------------------------------------------
+
+There is also a simpler way of specifying the query via the `q`
+parameter. The specified `q` parameter value is then parsed as if the
+`query_string` query was used. Example usage of the `q` parameter in the
+explain api:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/twitter/tweet/1/_explain?q=message:search'
+--------------------------------------------------
+
+This will yield the same result as the previous request.
+
+[float]
+=== All parameters:
+
+[horizontal]
+`_source`::
+
+ added[1.0.0.Beta1] Set to `true` to retrieve the `_source` of the document explained. You can also
+ retrieve part of the document by using `_source_include` & `_source_exclude` (see <<get-source-filtering,Get API>> for more details)
+
+`fields`::
+ Allows to control which stored fields to return as part of the
+ document explained.
+
+`routing`::
+ Controls the routing in the case the routing was used
+ during indexing.
+
+`parent`::
+ Same effect as setting the routing parameter.
+
+`preference`::
+ Controls on which shard the explain is executed.
+
+`source`::
+ Allows the data of the request to be put in the query
+ string of the url.
+
+`q`::
+ The query string (maps to the query_string query).
+
+`df`::
+ The default field to use when no field prefix is defined within
+ the query. Defaults to _all field.
+
+`analyzer`::
+ The analyzer name to be used when analyzing the query
+ string. Defaults to the analyzer of the _all field.
+
+`analyze_wildcard`::
+ Should wildcard and prefix queries be analyzed or
+ not. Defaults to false.
+
+`lowercase_expanded_terms`::
+ Should terms be automatically lowercased
+ or not. Defaults to true.
+
+`lenient`::
+ If set to true will cause format based failures (like
+ providing text to a numeric field) to be ignored. Defaults to false.
+
+`default_operator`::
+ The default operator to be used, can be AND or
+ OR. Defaults to OR.
diff --git a/docs/reference/search/facets.asciidoc b/docs/reference/search/facets.asciidoc
new file mode 100644
index 0000000..a6529a4
--- /dev/null
+++ b/docs/reference/search/facets.asciidoc
@@ -0,0 +1,283 @@
+[[search-facets]]
+== Facets
+
+The usual purpose of a full-text search engine is to return a small
+number of documents matching your query.
+
+_Facets_ provide aggregated data based on a search query. In the
+simplest case, a
+<<search-facets-terms-facet,terms facet>>
+can return _facet counts_ for various _facet values_ for a specific
+_field_. Elasticsearch supports more facet implementations, such as
+<<search-facets-statistical-facet,statistical>>
+or
+<<search-facets-date-histogram-facet,date
+histogram>> facets.
+
+The field used for facet calculations _must_ be of type numeric,
+date/time or be analyzed as a single token — see the
+<<mapping,_Mapping_>> guide for details on the
+analysis process.
+
+You can give the facet a custom _name_ and return multiple facets in one
+request.
+
+Let's try it out with a simple example. Suppose we have a number of
+articles with a field called `tags`, preferably analyzed with the
+<<analysis-keyword-analyzer,keyword>>
+analyzer. The facet aggregation will return counts for the most popular
+tags across the documents matching your query — or across all documents
+in the index.
+
+We will store some example data first:
+
+[source,js]
+--------------------------------------------------
+curl -X DELETE "http://localhost:9200/articles"
+curl -X POST "http://localhost:9200/articles/article" -d '{"title" : "One", "tags" : ["foo"]}'
+curl -X POST "http://localhost:9200/articles/article" -d '{"title" : "Two", "tags" : ["foo", "bar"]}'
+curl -X POST "http://localhost:9200/articles/article" -d '{"title" : "Three", "tags" : ["foo", "bar", "baz"]}'
+--------------------------------------------------
+
+Now, let's query the index for articles beginning with letter `T`
+and retrieve a
+<<search-facets-terms-facet,_terms facet_>>
+for the `tags` field. We will name the facet simply: _tags_.
+
+[source,js]
+--------------------------------------------------
+curl -X POST "http://localhost:9200/articles/_search?pretty=true" -d '
+ {
+ "query" : { "query_string" : {"query" : "T*"} },
+ "facets" : {
+ "tags" : { "terms" : {"field" : "tags"} }
+ }
+ }
+'
+--------------------------------------------------
+
+This request will return articles `Two` and `Three` (because
+they match our query), as well as the `tags` facet:
+
+[source,js]
+--------------------------------------------------
+"facets" : {
+ "tags" : {
+ "_type" : "terms",
+ "missing" : 0,
+ "total": 5,
+ "other": 0,
+ "terms" : [ {
+ "term" : "foo",
+ "count" : 2
+ }, {
+ "term" : "bar",
+ "count" : 2
+ }, {
+ "term" : "baz",
+ "count" : 1
+ } ]
+ }
+}
+--------------------------------------------------
+
+In the `terms` array, relevant _terms_ and _counts_ are returned. You'll
+probably want to display these to your users. The facet returns several
+important counts:
+
+* `missing` : The number of documents which have no value for the
+faceted field +
+ * `total` : The total number of terms in the facet +
+ * `other` : The number of terms not included in the returned facet
+(effectively `other` = `total` - `terms` )
+
+Notice, that the counts are scoped to the current query: _foo_ is
+counted only twice (not three times), _bar_ is counted twice and _baz_
+once. Also note that terms are counted once per document, even if the
+occur more frequently in that document.
+
+That's because the primary purpose of facets is to enable
+http://en.wikipedia.org/wiki/Faceted_search[_faceted navigation_],
+allowing the user to refine her query based on the insight from the
+facet, i.e. restrict the search to a specific category, price or date
+range. Facets can be used, however, for other purposes: computing
+histograms, statistical aggregations, and more. See the blog about
+link:/blog/data-visualization-with-elasticsearch-and-protovis/[data visualization].for inspiration.
+
+
+
+[float]
+=== Scope
+
+As we have already mentioned, facet computation is restricted to the
+scope of the current query, called `main`, by default. Facets can be
+computed within the `global` scope as well, in which case it will return
+values computed across all documents in the index:
+
+[source,js]
+--------------------------------------------------
+{
+ "facets" : {
+ "my_facets" : {
+ "terms" : { ... },
+ "global" : true <1>
+ }
+ }
+}
+--------------------------------------------------
+<1> The `global` keyword can be used with any facet type.
+
+There's one *important distinction* to keep in mind. While search
+_queries_ restrict both the returned documents and facet counts, search
+_filters_ restrict only returned documents — but _not_ facet counts.
+
+If you need to restrict both the documents and facets, and you're not
+willing or able to use a query, you may use a _facet filter_.
+
+[float]
+=== Facet Filter
+
+All facets can be configured with an additional filter (explained in the
+<<query-dsl,Query DSL>> section), which _will_ reduce
+the documents they use for computing results. An example with a _term_
+filter:
+
+[source,js]
+--------------------------------------------------
+{
+ "facets" : {
+ "<FACET NAME>" : {
+ "<FACET TYPE>" : {
+ ...
+ },
+ "facet_filter" : {
+ "term" : { "user" : "kimchy"}
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Note that this is different from a facet of the
+<<search-facets-filter-facet,filter>> type.
+
+[float]
+=== Facets with the _nested_ types
+
+<<mapping-nested-type,Nested>> mapping allows
+for better support for "inner" documents faceting, especially when it
+comes to multi valued key and value facets (like histograms, or term
+stats).
+
+What is it good for? First of all, this is the only way to use facets on
+nested documents once they are used (possibly for other reasons). But,
+there is also facet specific reason why nested documents can be used,
+and that's the fact that facets working on different key and value field
+(like term_stats, or histogram) can now support cases where both are
+multi valued properly.
+
+For example, let's use the following mapping:
+
+[source,js]
+--------------------------------------------------
+{
+ "type1" : {
+ "properties" : {
+ "obj1" : {
+ "type" : "nested"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+And, here is a sample data:
+
+[source,js]
+--------------------------------------------------
+{
+ "obj1" : [
+ {
+ "name" : "blue",
+ "count" : 4
+ },
+ {
+ "name" : "green",
+ "count" : 6
+ }
+ ]
+}
+--------------------------------------------------
+
+
+[float]
+==== All Nested Matching Root Documents
+
+Another option is to run the facet on all the nested documents matching
+the root objects that the main query will end up producing. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query": {
+ "match_all": {}
+ },
+ "facets": {
+ "facet1": {
+ "terms_stats": {
+ "key_field" : "name",
+ "value_field": "count"
+ },
+ "nested": "obj1"
+ }
+ }
+}
+--------------------------------------------------
+
+The `nested` element provides the path to the nested document (can be a
+multi level nested docs) that will be used.
+
+Facet filter allows you to filter your facet on the nested object level.
+It is important that these filters match on the nested object level and
+not on the root document level. In the following example the
+`terms_stats` only applies on nested objects with the name 'blue'.
+
+[source,js]
+--------------------------------------------------
+{
+ "query": {
+ "match_all": {}
+ },
+ "facets": {
+ "facet1": {
+ "terms_stats": {
+ "key_field" : "name",
+ "value_field": "count"
+ },
+ "nested": "obj1",
+ "facet_filter" : {
+ "term" : {"name" : "blue"}
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+include::facets/terms-facet.asciidoc[]
+
+include::facets/range-facet.asciidoc[]
+
+include::facets/histogram-facet.asciidoc[]
+
+include::facets/date-histogram-facet.asciidoc[]
+
+include::facets/filter-facet.asciidoc[]
+
+include::facets/query-facet.asciidoc[]
+
+include::facets/statistical-facet.asciidoc[]
+
+include::facets/terms-stats-facet.asciidoc[]
+
+include::facets/geo-distance-facet.asciidoc[]
+
diff --git a/docs/reference/search/facets/date-histogram-facet.asciidoc b/docs/reference/search/facets/date-histogram-facet.asciidoc
new file mode 100644
index 0000000..d92c0b2
--- /dev/null
+++ b/docs/reference/search/facets/date-histogram-facet.asciidoc
@@ -0,0 +1,134 @@
+[[search-facets-date-histogram-facet]]
+=== Date Histogram Facet
+
+A specific histogram facet that can work with `date` field types
+enhancing it over the regular
+<<search-facets-histogram-facet,histogram
+facet>>. Here is a quick example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "histo1" : {
+ "date_histogram" : {
+ "field" : "field_name",
+ "interval" : "day"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Interval
+
+The `interval` allows to set the interval at which buckets will be
+created for each hit. It allows for the constant values of `year`,
+`quarter`, `month`, `week`, `day`, `hour`, `minute`.
+
+It also support time setting like `1.5h` (up to `w` for weeks).
+
+==== Time Zone
+
+By default, times are stored as UTC milliseconds since the epoch. Thus,
+all computation and "bucketing" / "rounding" is done on UTC. It is
+possible to provide a time zone (both pre rounding, and post rounding)
+value, which will cause all computations to take the relevant zone into
+account. The time returned for each bucket/entry is milliseconds since
+the epoch of the provided time zone.
+
+The parameters are `pre_zone` (pre rounding based on interval) and
+`post_zone` (post rounding based on interval). The `time_zone` parameter
+simply sets the `pre_zone` parameter. By default, those are set to
+`UTC`.
+
+The zone value accepts either a numeric value for the hours offset, for
+example: `"time_zone" : -2`. It also accepts a format of hours and
+minutes, like `"time_zone" : "-02:30"`. Another option is to provide a
+time zone accepted as one of the values listed
+http://joda-time.sourceforge.net/timezones.html[here].
+
+Lets take an example. For `2012-04-01T04:15:30Z`, with a `pre_zone` of
+`-08:00`. For `day` interval, the actual time by applying the time zone
+and rounding falls under `2012-03-31`, so the returned value will be (in
+millis) of `2012-03-31T00:00:00Z` (UTC). For `hour` interval, applying
+the time zone results in `2012-03-31T20:15:30`, rounding it results in
+`2012-03-31T20:00:00`, but, we want to return it in UTC (`post_zone` is
+not set), so we convert it back to UTC: `2012-04-01T04:00:00Z`. Note, we
+are consistent in the results, returning the rounded value in UTC.
+
+`post_zone` simply takes the result, and adds the relevant offset.
+
+Sometimes, we want to apply the same conversion to UTC we did above for
+`hour` also for `day` (and up) intervals. We can set
+`pre_zone_adjust_large_interval` to `true`, which will apply the same
+conversion done for `hour` interval in the example, to `day` and above
+intervals (it can be set regardless of the interval, but only kick in
+when using `day` and higher intervals).
+
+==== Factor
+
+The date histogram works on numeric values (since time is stored in
+milliseconds since the epoch in UTC). But, sometimes, systems will store
+a different resolution (like seconds since UTC) in a numeric field. The
+`factor` parameter can be used to change the value in the field to
+milliseconds to actual do the relevant rounding, and then be applied
+again to get to the original unit. For example, when storing in a
+numeric field seconds resolution, the `factor` can be set to `1000`.
+
+==== Pre / Post Offset
+
+Specific offsets can be provided for pre rounding and post rounding. The
+`pre_offset` for pre rounding, and `post_offset` for post rounding. The
+format is the date time format (`1h`, `1d`, ...).
+
+==== Value Field
+
+The date_histogram facet allows to use a different key (of type date)
+which controls the bucketing, with a different value field which will
+then return the total and mean for that field values of the hits within
+the relevant bucket. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "histo1" : {
+ "date_histogram" : {
+ "key_field" : "timestamp",
+ "value_field" : "price",
+ "interval" : "day"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Script Value Field
+
+A script can be used to compute the value that will then be used to
+compute the total and mean for a bucket. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "histo1" : {
+ "date_histogram" : {
+ "key_field" : "timestamp",
+ "value_script" : "doc['price'].value * 2",
+ "interval" : "day"
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/facets/filter-facet.asciidoc b/docs/reference/search/facets/filter-facet.asciidoc
new file mode 100644
index 0000000..74dece4
--- /dev/null
+++ b/docs/reference/search/facets/filter-facet.asciidoc
@@ -0,0 +1,24 @@
+[[search-facets-filter-facet]]
+=== Filter Facets
+
+A filter facet (not to be confused with a
+<<search-facets,facet filter>>) allows you to
+return a count of the hits matching the filter. The filter itself can be
+expressed using the <<query-dsl,Query DSL>>. For
+example:
+
+[source,js]
+--------------------------------------------------
+{
+ "facets" : {
+ "wow_facet" : {
+ "filter" : {
+ "term" : { "tag" : "wow" }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Note, filter facet filters are faster than query facet when using native
+filters (non query wrapper ones).
diff --git a/docs/reference/search/facets/geo-distance-facet.asciidoc b/docs/reference/search/facets/geo-distance-facet.asciidoc
new file mode 100644
index 0000000..e56f4bd
--- /dev/null
+++ b/docs/reference/search/facets/geo-distance-facet.asciidoc
@@ -0,0 +1,252 @@
+[[search-facets-geo-distance-facet]]
+=== Geo Distance Facets
+
+The geo_distance facet is a facet providing information for ranges of
+distances from a provided geo_point including count of the number of
+hits that fall within each range, and aggregation information (like
+total).
+
+Assuming the following sample doc:
+
+[source,js]
+--------------------------------------------------
+{
+ "pin" : {
+ "location" : {
+ "lat" : 40.12,
+ "lon" : -71.34
+ }
+ }
+}
+--------------------------------------------------
+
+Here is an example that create a `geo_distance` facet from a
+`pin.location` of 40,-70, and a set of ranges:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "geo1" : {
+ "geo_distance" : {
+ "pin.location" : {
+ "lat" : 40,
+ "lon" : -70
+ },
+ "ranges" : [
+ { "to" : 10 },
+ { "from" : 10, "to" : 20 },
+ { "from" : 20, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Accepted Formats
+
+In much the same way the geo_point type can accept different
+representation of the geo point, the filter can accept it as well:
+
+===== Lat Lon As Properties
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "geo1" : {
+ "geo_distance" : {
+ "pin.location" : {
+ "lat" : 40,
+ "lon" : -70
+ },
+ "ranges" : [
+ { "to" : 10 },
+ { "from" : 10, "to" : 20 },
+ { "from" : 20, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+===== Lat Lon As Array
+
+Format in `[lon, lat]`, note, the order of lon/lat here in order to
+conform with http://geojson.org/[GeoJSON].
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "geo1" : {
+ "geo_distance" : {
+ "pin.location" : [40, -70],
+ "ranges" : [
+ { "to" : 10 },
+ { "from" : 10, "to" : 20 },
+ { "from" : 20, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+===== Lat Lon As String
+
+Format in `lat,lon`.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "geo1" : {
+ "geo_distance" : {
+ "pin.location" : "40, -70",
+ "ranges" : [
+ { "to" : 10 },
+ { "from" : 10, "to" : 20 },
+ { "from" : 20, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+===== Geohash
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "geo1" : {
+ "geo_distance" : {
+ "pin.location" : "drm3btev3e86",
+ "ranges" : [
+ { "to" : 10 },
+ { "from" : 10, "to" : 20 },
+ { "from" : 20, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Ranges
+
+When a `to` or `from` are not set, they are assumed to be unbounded.
+Ranges are allowed to overlap, basically, each range is treated by
+itself.
+
+==== Options
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Option |Description
+|`unit` |The unit the ranges are provided in. Defaults to `km`. Can also
+be `mi`, `miles`, `in`, `inch`, `yd`, `yards`, `ft`, `feet`, `kilometers`, `mm`, `millimeters`, `cm`, `centimeters`, `m` or `meters`.
+
+|`distance_type` |How to compute the distance. Can either be `arc`
+(better precision), `sloppy_arc` (faster) or `plane` (fastest). Defaults to `sloppy_arc`.
+|=======================================================================
+
+==== Value Options
+
+On top of the count of hits falling within each range, aggregated data
+can be provided (total) as well. By default, the aggregated data will
+simply use the distance calculated, but the value can be extracted
+either using a different numeric field, or a script. Here is an example
+of using a different numeric field:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "geo1" : {
+ "geo_distance" : {
+ "pin.location" : "drm3btev3e86",
+ "value_field" : "num1",
+ "ranges" : [
+ { "to" : 10 },
+ { "from" : 10, "to" : 20 },
+ { "from" : 20, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+And here is an example of using a script:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "geo1" : {
+ "geo_distance" : {
+ "pin.location" : "drm3btev3e86",
+ "value_script" : "doc['num1'].value * factor",
+ "params" : {
+ "factor" : 5
+ }
+ "ranges" : [
+ { "to" : 10 },
+ { "from" : 10, "to" : 20 },
+ { "from" : 20, "to" : 100 },
+ { "from" : 100 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Note the params option, allowing to pass parameters to the script
+(resulting in faster script execution instead of providing the values
+within the script each time).
+
+.`geo_point` Type
+[NOTE]
+--
+The facet *requires* the `geo_point` type to be set on the relevant
+field.
+--
+
+.Multi Location Per Document
+[NOTE]
+--
+The facet can work with multiple locations per document.
+--
diff --git a/docs/reference/search/facets/histogram-facet.asciidoc b/docs/reference/search/facets/histogram-facet.asciidoc
new file mode 100644
index 0000000..284a058
--- /dev/null
+++ b/docs/reference/search/facets/histogram-facet.asciidoc
@@ -0,0 +1,138 @@
+[[search-facets-histogram-facet]]
+=== Histogram Facets
+
+The histogram facet works with numeric data by building a histogram
+across intervals of the field values. Each value is "rounded" into an
+interval (or placed in a bucket), and statistics are provided per
+interval/bucket (count and total). Here is a simple example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "histo1" : {
+ "histogram" : {
+ "field" : "field_name",
+ "interval" : 100
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The above example will run a histogram facet on the `field_name` filed,
+with an `interval` of `100` (so, for example, a value of `1055` will be
+placed within the `1000` bucket).
+
+The interval can also be provided as a time based interval (using the
+time format). This mainly make sense when working on date fields or
+field that represent absolute milliseconds, here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "histo1" : {
+ "histogram" : {
+ "field" : "field_name",
+ "time_interval" : "1.5h"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Key and Value
+
+The histogram facet allows to use a different key and value. The key is
+used to place the hit/document within the appropriate bucket, and the
+value is used to compute statistical data (for example, total). Here is
+an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "histo1" : {
+ "histogram" : {
+ "key_field" : "key_field_name",
+ "value_field" : "value_field_name",
+ "interval" : 100
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Script Key and Value
+
+Sometimes, some munging of both the key and the value are needed. In the
+key case, before it is rounded into a bucket, and for the value, when
+the statistical data is computed per bucket
+<<modules-scripting,scripts>> can be used. Here
+is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "histo1" : {
+ "histogram" : {
+ "key_script" : "doc['date'].date.minuteOfHour",
+ "value_script" : "doc['num1'].value"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+In the above sample, we can use a date type field called `date` to get
+the minute of hour from it, and the total will be computed based on
+another field `num1`. Note, in this case, no `interval` was provided, so
+the bucket will be based directly on the `key_script` (no rounding).
+
+Parameters can also be provided to the different scripts (preferable if
+the script is the same, with different values for a specific parameter,
+like "factor"):
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "histo1" : {
+ "histogram" : {
+ "key_script" : "doc['date'].date.minuteOfHour * factor1",
+ "value_script" : "doc['num1'].value + factor2",
+ "params" : {
+ "factor1" : 2,
+ "factor2" : 3
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Memory Considerations
+
+In order to implement the histogram facet, the relevant field values are
+loaded into memory from the index. This means that per shard, there
+should be enough memory to contain them. Since by default, dynamic
+introduced types are `long` and `double`, one option to reduce the
+memory footprint is to explicitly set the types for the relevant fields
+to either `short`, `integer`, or `float` when possible.
diff --git a/docs/reference/search/facets/query-facet.asciidoc b/docs/reference/search/facets/query-facet.asciidoc
new file mode 100644
index 0000000..3f360da
--- /dev/null
+++ b/docs/reference/search/facets/query-facet.asciidoc
@@ -0,0 +1,19 @@
+[[search-facets-query-facet]]
+=== Query Facets
+
+A facet query allows to return a count of the hits matching the facet
+query. The query itself can be expressed using the Query DSL. For
+example:
+
+[source,js]
+--------------------------------------------------
+{
+ "facets" : {
+ "wow_facet" : {
+ "query" : {
+ "term" : { "tag" : "wow" }
+ }
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/facets/range-facet.asciidoc b/docs/reference/search/facets/range-facet.asciidoc
new file mode 100644
index 0000000..fa263ee
--- /dev/null
+++ b/docs/reference/search/facets/range-facet.asciidoc
@@ -0,0 +1,119 @@
+[[search-facets-range-facet]]
+=== Range Facets
+
+`range` facet allows to specify a set of ranges and get both the number
+of docs (count) that fall within each range, and aggregated data either
+based on the field, or using another field. Here is a simple example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "range1" : {
+ "range" : {
+ "field" : "field_name",
+ "ranges" : [
+ { "to" : 50 },
+ { "from" : 20, "to" : 70 },
+ { "from" : 70, "to" : 120 },
+ { "from" : 150 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Another option which is a bit more DSL enabled is to provide the ranges
+on the actual field name, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "range1" : {
+ "range" : {
+ "my_field" : [
+ { "to" : 50 },
+ { "from" : 20, "to" : 70 },
+ { "from" : 70, "to" : 120 },
+ { "from" : 150 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The `range` facet always includes the `from` parameter and excludes the
+`to` parameter for each range.
+
+==== Key and Value
+
+The `range` facet allows to use a different field to check if its value
+falls within a range, and another field to compute aggregated data per
+range (like total). For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "range1" : {
+ "range" : {
+ "key_field" : "field_name",
+ "value_field" : "another_field_name",
+ "ranges" : [
+ { "to" : 50 },
+ { "from" : 20, "to" : 70 },
+ { "from" : 70, "to" : 120 },
+ { "from" : 150 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Script Key and Value
+
+Sometimes, some munging of both the key and the value are needed. In the
+key case, before it is checked if it falls within a range, and for the
+value, when the statistical data is computed per range scripts can be
+used. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "range1" : {
+ "range" : {
+ "key_script" : "doc['date'].date.minuteOfHour",
+ "value_script" : "doc['num1'].value",
+ "ranges" : [
+ { "to" : 50 },
+ { "from" : 20, "to" : 70 },
+ { "from" : 70, "to" : 120 },
+ { "from" : 150 }
+ ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Date Ranges
+
+The range facet support also providing the range as string formatted
+dates.
diff --git a/docs/reference/search/facets/statistical-facet.asciidoc b/docs/reference/search/facets/statistical-facet.asciidoc
new file mode 100644
index 0000000..dfa51df
--- /dev/null
+++ b/docs/reference/search/facets/statistical-facet.asciidoc
@@ -0,0 +1,101 @@
+[[search-facets-statistical-facet]]
+=== Statistical Facet
+
+Statistical facet allows to compute statistical data on a numeric
+fields. The statistical data include count, total, sum of squares, mean
+(average), minimum, maximum, variance, and standard deviation. Here is
+an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "stat1" : {
+ "statistical" : {
+ "field" : "num1"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Script field
+
+When using `field`, the numeric value of the field is used to compute
+the statistical information. Sometimes, several fields values represent
+the statistics we want to compute, or some sort of mathematical
+evaluation. The script field allows to define a
+<<modules-scripting,script>> to evaluate, with
+its value used to compute the statistical information. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "stat1" : {
+ "statistical" : {
+ "script" : "doc['num1'].value + doc['num2'].value"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Parameters can also be provided to the different scripts (preferable if
+the script is the same, with different values for a specific parameter,
+like "factor"):
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "stat1" : {
+ "statistical" : {
+ "script" : "(doc['num1'].value + doc['num2'].value) * factor",
+ "params" : {
+ "factor" : 5
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Multi Field
+
+The statistical facet can be executed against more than one field,
+returning the aggregation result across those fields. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "facets" : {
+ "stat1" : {
+ "statistical" : {
+ "fields" : ["num1", "num2"]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Memory Considerations
+
+In order to implement the statistical facet, the relevant field values
+are loaded into memory from the index. This means that per shard, there
+should be enough memory to contain them. Since by default, dynamic
+introduced types are `long` and `double`, one option to reduce the
+memory footprint is to explicitly set the types for the relevant fields
+to either `short`, `integer`, or `float` when possible.
diff --git a/docs/reference/search/facets/terms-facet.asciidoc b/docs/reference/search/facets/terms-facet.asciidoc
new file mode 100644
index 0000000..29b24dc
--- /dev/null
+++ b/docs/reference/search/facets/terms-facet.asciidoc
@@ -0,0 +1,288 @@
+[[search-facets-terms-facet]]
+=== Terms Facet
+
+Allow to specify field facets that return the N most frequent terms. For
+example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : {
+ "field" : "tag",
+ "size" : 10
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+It is preferred to have the terms facet executed on a non analyzed
+field, or a field without a large number of terms it breaks to.
+
+==== Accuracy Control
+
+The `size` parameter defines how many top terms should be returned out
+of the overall terms list. By default, the node coordinating the
+search process will ask each shard to provide its own top `size` terms
+and once all shards respond, it will reduce the results to the final list
+that will then be sent back to the client. This means that if the number
+of unique terms is greater than `size`, the returned list is slightly off
+and not accurate (it could be that the term counts are slightly off and it
+could even be that a term that should have been in the top `size` entries
+was not returned).
+
+The higher the requested `size` is, the more accurate the results will be,
+but also, the more expensive it will be to compute the final results (both
+due to bigger priority queues that are managed on a shard level and due to
+bigger data transfers between the nodes and the client). In an attempt to
+minimize the extra work that comes with bigger requested `size` the
+`shard_size` parameter was introduced. When defined, it will determine
+how many terms the coordinating node will request from each shard. Once
+all the shards responded, the coordinating node will then reduce them
+to a final result which will be based on the `size` parameter - this way,
+one can increase the accuracy of the returned terms and avoid the overhead
+of streaming a big list of terms back to the client.
+
+Note that `shard_size` cannot be smaller than `size`... if that's the case
+elasticsearch will override it and reset it to be equal to `size`.
+
+
+==== Ordering
+
+Allow to control the ordering of the terms facets, to be ordered by
+`count`, `term`, `reverse_count` or `reverse_term`. The default is
+`count`. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : {
+ "field" : "tag",
+ "size" : 10,
+ "order" : "term"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== All Terms
+
+Allow to get all the terms in the terms facet, ones that do not match a
+hit, will have a count of 0. Note, this should not be used with fields
+that have many terms.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : {
+ "field" : "tag",
+ "all_terms" : true
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Excluding Terms
+
+It is possible to specify a set of terms that should be excluded from
+the terms facet request result:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : {
+ "field" : "tag",
+ "exclude" : ["term1", "term2"]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Regex Patterns
+
+The terms API allows to define regex expression that will control which
+terms will be included in the faceted list, here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : {
+ "field" : "tag",
+ "regex" : "_regex expression here_",
+ "regex_flags" : "DOTALL"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Check
+http://download.oracle.com/javase/6/docs/api/java/util/regex/Pattern.html#field_summary[Java
+Pattern API] for more details about `regex_flags` options.
+
+==== Term Scripts
+
+Allow to define a script for terms facet to process the actual term that
+will be used in the term facet collection, and also optionally control
+its inclusion or not.
+
+The script can either return a boolean value, with `true` to include it
+in the facet collection, and `false` to exclude it from the facet
+collection.
+
+Another option is for the script to return a `string` controlling the
+term that will be used to count against. The script execution will
+include the term variable which is the current field term used.
+
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : {
+ "field" : "tag",
+ "size" : 10,
+ "script" : "term + 'aaa'"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+And using the boolean feature:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : {
+ "field" : "tag",
+ "size" : 10,
+ "script" : "term == 'aaa' ? true : false"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Multi Fields
+
+The term facet can be executed against more than one field, returning
+the aggregation result across those fields. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : {
+ "fields" : ["tag1", "tag2"],
+ "size" : 10
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Script Field
+
+A script that provides the actual terms that will be processed for a
+given doc. A `script_field` (or `script` which will be used when no
+`field` or `fields` are provided) can be set to provide it.
+
+As an example, a search request (that is quite "heavy") can be executed
+and use either `_source` itself or `_fields` (for stored fields) without
+needing to load the terms to memory (at the expense of much slower
+execution of the search, and causing more IO load):
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "my_facet" : {
+ "terms" : {
+ "script_field" : "_source.my_field",
+ "size" : 10
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Or:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "my_facet" : {
+ "terms" : {
+ "script_field" : "_fields['my_field']",
+ "size" : 10
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Note also, that the above will use the whole field value as a single
+term.
+
+==== _index
+
+The term facet allows to specify a special field name called `_index`.
+This will return a facet count of hits per `_index` the search was
+executed on (relevant when a search request spans more than one index).
+
+==== Memory Considerations
+
+Term facet causes the relevant field values to be loaded into memory.
+This means that per shard, there should be enough memory to contain
+them. It is advisable to explicitly set the fields to be `not_analyzed`
+or make sure the number of unique tokens a field can have is not large.
diff --git a/docs/reference/search/facets/terms-stats-facet.asciidoc b/docs/reference/search/facets/terms-stats-facet.asciidoc
new file mode 100644
index 0000000..cd48757
--- /dev/null
+++ b/docs/reference/search/facets/terms-stats-facet.asciidoc
@@ -0,0 +1,51 @@
+[[search-facets-terms-stats-facet]]
+=== Terms Stats Facet
+
+The `terms_stats` facet combines both the
+<<search-facets-terms-facet,terms>> and
+<<search-facets-statistical-facet,statistical>>
+allowing to compute stats computed on a field, per term value driven by
+another field. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : { }
+ },
+ "facets" : {
+ "tag_price_stats" : {
+ "terms_stats" : {
+ "key_field" : "tag",
+ "value_field" : "price"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The `size` parameter controls how many facet entries will be returned.
+It defaults to `10`. Setting it to 0 will return all terms matching the
+hits (be careful not to return too many results).
+
+One can also set `shard_size` (in addition to `size`) which will determine
+how many term entries will be requested from each shard. When dealing
+with field with high cardinality (at least higher than the requested `size`)
+The greater `shard_size` is - the more accurate the result will be (and the
+more expensive the overall facet computation will be). `shard_size` is there
+to enable you to increase accuracy yet still avoid returning too many
+terms_stats entries back to the client.
+
+Ordering is done by setting `order`, with possible values of `term`,
+`reverse_term`, `count`, `reverse_count`, `total`, `reverse_total`,
+`min`, `reverse_min`, `max`, `reverse_max`, `mean`, `reverse_mean`.
+Defaults to `count`.
+
+The value computed can also be a script, using the `value_script`
+instead of `value_field`, in which case the `lang` can control its
+language, and `params` allow to provide custom parameters (as in other
+scripted components).
+
+Note, the terms stats can work with multi valued key fields, or multi
+valued value fields, but not when both are multi valued (as ordering is
+not maintained).
diff --git a/docs/reference/search/more-like-this.asciidoc b/docs/reference/search/more-like-this.asciidoc
new file mode 100644
index 0000000..28bd078
--- /dev/null
+++ b/docs/reference/search/more-like-this.asciidoc
@@ -0,0 +1,27 @@
+[[search-more-like-this]]
+== More Like This API
+
+The more like this (mlt) API allows to get documents that are "like" a
+specified document. Here is an example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/twitter/tweet/1/_mlt?mlt_fields=tag,content&min_doc_freq=1'
+--------------------------------------------------
+
+The API simply results in executing a search request with
+<<query-dsl-mlt-query,moreLikeThis>> query (http
+parameters match the parameters to the `more_like_this` query). This
+means that the body of the request can optionally include all the
+request body options in the <<search-search,search
+API>> (facets, from/to and so on).
+
+Rest parameters relating to search are also allowed, including
+`search_type`, `search_indices`, `search_types`, `search_scroll`,
+`search_size` and `search_from`.
+
+When no `mlt_fields` are specified, all the fields of the document will
+be used in the `more_like_this` query generated.
+
+Note: In order to use the `mlt` feature a `mlt_field` needs to be either
+be `stored`, store `term_vector` or `source` needs to be enabled.
diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc
new file mode 100644
index 0000000..c51bcac
--- /dev/null
+++ b/docs/reference/search/multi-search.asciidoc
@@ -0,0 +1,78 @@
+[[search-multi-search]]
+== Multi Search API
+
+The multi search API allows to execute several search requests within
+the same API. The endpoint for it is `_msearch`.
+
+The format of the request is similar to the bulk API format, and the
+structure is as follows (the structure is specifically optimized to
+reduce parsing if a specific search ends up redirected to another node):
+
+[source,js]
+--------------------------------------------------
+header\n
+body\n
+header\n
+body\n
+--------------------------------------------------
+
+The header part includes which index / indices to search on, optional
+(mapping) types to search on, the `search_type`, `preference`, and
+`routing`. The body includes the typical search body request (including
+the `query`, `facets`, `from`, `size`, and so on). Here is an example:
+
+[source,js]
+--------------------------------------------------
+$ cat requests
+{"index" : "test"}
+{"query" : {"match_all" : {}}, "from" : 0, "size" : 10}
+{"index" : "test", "search_type" : "count"}
+{"query" : {"match_all" : {}}}
+{}
+{"query" : {"match_all" : {}}}
+
+{"query" : {"match_all" : {}}}
+{"search_type" : "count"}
+{"query" : {"match_all" : {}}}
+
+$ curl -XGET localhost:9200/_msearch --data-binary @requests; echo
+--------------------------------------------------
+
+Note, the above includes an example of an empty header (can also be just
+without any content) which is supported as well.
+
+The response returns a `responses` array, which includes the search
+response for each search request matching its order in the original
+multi search request. If there was a complete failure for that specific
+search request, an object with `error` message will be returned in place
+of the actual search response.
+
+The endpoint allows to also search against an index/indices and
+type/types in the URI itself, in which case it will be used as the
+default unless explicitly defined otherwise in the header. For example:
+
+[source,js]
+--------------------------------------------------
+$ cat requests
+{}
+{"query" : {"match_all" : {}}, "from" : 0, "size" : 10}
+{}
+{"query" : {"match_all" : {}}}
+{"index" : "test2"}
+{"query" : {"match_all" : {}}}
+
+$ curl -XGET localhost:9200/test/_msearch --data-binary @requests; echo
+--------------------------------------------------
+
+The above will execute the search against the `test` index for all the
+requests that don't define an index, and the last one will be executed
+against the `test2` index.
+
+The `search_type` can be set in a similar manner to globally apply to
+all search requests.
+
+[float]
+[[msearch-security]]
+=== Security
+
+See <<url-access-control>>
diff --git a/docs/reference/search/percolate.asciidoc b/docs/reference/search/percolate.asciidoc
new file mode 100644
index 0000000..c214d80
--- /dev/null
+++ b/docs/reference/search/percolate.asciidoc
@@ -0,0 +1,447 @@
+[[search-percolate]]
+== Percolator
+
+added[1.0.0.Beta1]
+
+Traditionally you design documents based on your data and store them into an index and then define queries via the search api
+in order to retrieve these documents. The percolator works in the opposite direction, first you store queries into an
+index and then via the percolate api you define documents in order to retrieve these queries.
+
+The reason that queries can be stored comes from the fact that in Elasticsearch both documents and queries are defined in
+JSON. This allows you to embed queries into documents via the index api. Elasticsearch can extract the query from a
+document and make it available to the percolate api. Since documents are also defines as json, you can define a document
+in a request to the percolate api.
+
+The percolator and most of its features work in realtime, so once a percolate query is indexed it can immediately be used
+in the percolate api.
+
+[float]
+=== Sample usage
+
+Adding a query to the percolator:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'localhost:9200/my-index/.percolator/1' -d '{
+ "query" : {
+ "match" : {
+ "message" : "bonsai tree"
+ }
+ }
+}'
+--------------------------------------------------
+
+Matching documents to the added queries:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/my-index/message/_percolate' -d '{
+ "doc" : {
+ "message" : "A new bonsai tree in the office"
+ }
+}'
+--------------------------------------------------
+
+The above request will yield the following response:
+
+[source,js]
+--------------------------------------------------
+{
+ "took" : 19,
+ "_shards" : {
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0
+ },
+ "total" : 1,
+ "matches" : [
+ {
+ "_index" : "my-index",
+ "_id" : "1"
+ }
+ ]
+}
+--------------------------------------------------
+
+The percolate api returns matches that refer to percolate queries that have matched with the document defined in the percolate api.
+
+[float]
+=== Indexing percolator queries
+
+Percolate queries are stored as documents in a specific format and in an arbitrary index under a reserved type with the
+name `.percolator`. The query itself is placed as is in a json objects under the top level field `query`.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match" : {
+ "field" : "value"
+ }
+ }
+}
+--------------------------------------------------
+
+Since this is just an ordinary document, any field can be added to this document. This can be useful later on to only
+percolate documents by specific queries.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match" : {
+ "field" : "value"
+ }
+ },
+ "priority" : "high"
+}
+--------------------------------------------------
+
+On top of this also a mapping type can be associated with the this query. This allows to control how certain queries
+like range queries, shape filters and other query & filters that rely on mapping settings get constructed. This is
+important since the percolate queries are indexed into the `.percolator` type, and the queries / filters that rely on
+mapping settings would yield unexpected behaviour. Note by default field names do get resolved in a smart manner,
+but in certain cases with multiple types this can lead to unexpected behaviour, so being explicit about it will help.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "range" : {
+ "created_at" : {
+ "gte" : "2010-01-01T00:00:00",
+ "lte" : "2011-01-01T00:00:00"
+ }
+ }
+ },
+ "type" : "tweet",
+ "priority" : "high"
+}
+--------------------------------------------------
+
+In the above example the range query gets really parsed into a Lucene numeric range query, based on the settings for
+the field `created_at` in the type `tweet`.
+
+Just as with any other type, the `.percolator` type has a mapping, which you can configure via the mappings apis.
+The default percolate mapping doesn't index the query field and only stores it.
+
+Because `.percolate` is a type it also has a mapping. By default the following mapping is active:
+
+[source,js]
+--------------------------------------------------
+{
+ ".percolator" : {
+ "properties" : {
+ "query" : {
+ "type" : "object",
+ "enabled" : false
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+If needed this mapping can be modified wit the update mapping api.
+
+In order to un-register a percolate query the delete api can be used. So if the previous added query needs to be deleted
+the following delete requests needs to be executed:
+
+[source,js]
+--------------------------------------------------
+curl -XDELETE localhost:9200/my-index/.percolator/1
+--------------------------------------------------
+
+[float]
+=== Percolate api
+
+The percolate api executes in a distributed manner, meaning it executes on all shards an index points to.
+
+.Required options
+* `index` - The index that contains the `.percolator` type. This can also be an alias.
+* `type` - The type of the document to be percolated. The mapping of that type is used to parse document.
+* `doc` - The actual document to percolate. Unlike the other two options this needs to be specified in the request body. Note this isn't required when percolating an existing document.
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/twitter/tweet/_percolate' -d '{
+ "doc" : {
+ "created_at" : "2010-10-10T00:00:00",
+ "message" : "some text"
+ }
+}'
+--------------------------------------------------
+
+.Additional supported query string options
+* `routing` - In the case the percolate queries are partitioned by a custom routing value, that routing option make sure
+that the percolate request only gets executed on the shard where the routing value is partitioned to. This means that
+the percolate request only gets executed on one shard instead of all shards. Multiple values can be specified as a
+comma separated string, in that case the request can be be executed on more than one shard.
+* `preference` - Controls which shard replicas are preferred to execute the request on. Works the same as in the search api.
+* `ignore_unavailable` - Controls if missing concrete indices should silently be ignored. Same as is in the search api.
+* `percolate_format` - If `ids` is specified then the matches array in the percolate response will contain a string
+array of the matching ids instead of an array of objects. This can be useful the reduce the amount of data being send
+back to the client. Obviously if there are to percolator queries with same id from different indices there is no way
+the find out which percolator query belongs to what index. Any other value to `percolate_format` will be ignored.
+
+.Additional request body options
+* `filter` - Reduces the number queries to execute during percolating. Only the percolator queries that match with the
+filter will be included in the percolate execution. The filter option works in near realtime, so a refresh needs to have
+occurred for the filter to included the latest percolate queries.
+* `query` - Same as the `filter` option, but also the score is computed. The computed scores can then be used by the
+`track_scores` and `sort` option.
+* `size` - Defines to maximum number of matches (percolate queries) to be returned. Defaults to unlimited.
+* `track_scores` - Whether the `_score` is included for each match. The is based on the query and represents how the query matched
+to the percolate query's metadata and *not* how the document being percolated matched to the query. The `query` option
+is required for this option. Defaults to `false`.
+* `sort` - Define a sort specification like in the search api. Currently only sorting `_score` reverse (default relevancy)
+is supported. Other sort fields will throw an exception. The `size` and `query` option are required for this setting. Like
+`track_score` the score is based on the query and represents how the query matched to the percolate query's metadata
+and *not* how the document being percolated matched to the query.
+* `facets` - Allows facet definitions to be included. The facets are based on the matching percolator queries. See facet
+documentation how to define facets.
+* `aggs` - Allows aggregation definitions to be included. The aggregations are based on the matching percolator queries,
+look at the aggregation documentation on how to define aggregations.
+* `highlight` - Allows highlight definitions to be included. The document being percolated is being highlight for each
+matching query. This allows you to see how each match is highlighting the document being percolated. See highlight
+documentation on how to define highlights. The `size` option is required for highlighting, the performance of highlighting
+ in the percolate api depends of how many matches are being highlighted.
+
+[float]
+=== Dedicated percolator index
+
+Percolate queries can be added to any index. Instead of adding percolate queries to the index the data resides in,
+these queries can also be added to an dedicated index. The advantage of this is that this dedicated percolator index
+can have its own index settings (For example the number of primary and replicas shards). If you choose to have a dedicated
+percolate index, you need to make sure that the mappings from the normal index are also available on the percolate index.
+Otherwise percolate queries can be parsed incorrectly.
+
+[float]
+=== Filtering Executed Queries
+
+Filtering allows to reduce the number of queries, any filter that the search api supports, (expect the ones mentioned in important notes)
+can also be used in the percolate api. The filter only works on the metadata fields. The `query` field isn't indexed by
+default. Based on the query we indexed before the following filter can be defined:
+
+[source,js]
+--------------------------------------------------
+curl -XGET localhost:9200/test/type1/_percolate -d '{
+ "doc" : {
+ "field" : "value"
+ },
+ "filter" : {
+ "term" : {
+ "priority" : "high"
+ }
+ }
+}'
+--------------------------------------------------
+
+[float]
+=== Percolator count api
+
+The count percolate api, only keeps track of the number of matches and doesn't keep track of the actual matches
+Example:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/my-index/my-type/_percolate/count' -d '{
+ "doc" : {
+ "message" : "some message"
+ }
+}'
+--------------------------------------------------
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ ... // header
+ "total" : 3
+}
+--------------------------------------------------
+
+
+[float]
+=== Percolating an existing document
+
+In order to percolate in newly indexed document, the percolate existing document can be used. Based on the response
+from an index request the `_id` and other meta information can be used to the immediately percolate the newly added
+document.
+
+.Supported options for percolating an existing document on top of existing percolator options:
+* `id` - The id of the document to retrieve the source for.
+* `percolate_index` - The index containing the percolate queries. Defaults to the `index` defined in the url.
+* `percolate_type` - The percolate type (used for parsing the document). Default to `type` defined in the url.
+* `routing` - The routing value to use when retrieving the document to percolate.
+* `preference` - Which shard to prefer when retrieving the existing document.
+* `percolate_routing` - The routing value to use when percolating the existing document.
+* `percolate_preference` - Which shard to prefer when executing the percolate request.
+* `version` - Enables a version check. If the fetched document's version isn't equal to the specified version then the request fails with a version conflict and the percolation request is aborted.
+* `version_type` - Whether internal or external versioning is used. Defaults to internal versioning.
+
+Internally the percolate api will issue a get request for fetching the`_source` of the document to percolate.
+For this feature to work the `_source` for documents to be percolated need to be stored.
+
+[float]
+==== Example
+
+Index response:
+
+[source,js]
+--------------------------------------------------
+{
+ "_index" : "my-index",
+ "_type" : "message",
+ "_id" : "1",
+ "_version" : 1,
+ "created" : true
+}
+--------------------------------------------------
+
+Percolating an existing document:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/my-index1/message/1/_percolate'
+--------------------------------------------------
+
+The response is the same as with the regular percolate api.
+
+[float]
+=== Multi percolate api
+
+The multi percolate api allows to bundle multiple percolate requests into a single request, similar to what the multi
+search api does to search requests. The request body format is line based. Each percolate request item takes two lines,
+the first line is the header and the second line is the body.
+
+The header can contain any parameter that normally would be set via the request path or query string parameters. T
+here are several percolate actions, because there are multiple types of percolate requests.
+
+.Supported actions:
+* `percolate` - Action for defining a regular percolate request.
+* `count` - Action for defining a count percolate request.
+
+Depending on the percolate action different parameters can be specified. For example the percolate and percolate existing
+document actions support different parameters.
+
+.The following endpoints are supported
+* GET|POST /[index]/[type]/_mpercolate
+* GET}POST /[index]/_mpercolate
+* GET|POST /_mpercolate
+
+The `index` and `type` defined in the url path are the default index and type.
+
+[float]
+==== Example
+
+Request:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/twitter/tweet/_mpercolate' --data-binary @requests.txt; echo
+--------------------------------------------------
+
+The index twitter is the default index and the type tweet is the default type and will be used in the case a header
+doesn't specify an index or type.
+
+requests.txt:
+
+[source,js]
+--------------------------------------------------
+{"percolate" : {"index" : twitter", "type" : "tweet"}}
+{"doc" : {"message" : "some text"}}
+{"percolate" : {"index" : twitter", "type" : "tweet", "id" : "1"}}
+{}
+{"percolate" : {"index" : users", "type" : "user", "id" : "3", "percolate_index" : "users_2012" }}
+{"size" : 10}
+{"count" : {"index" : twitter", "type" : "tweet"}}
+{"doc" : {"message" : "some other text"}}
+{"count" : {"index" : twitter", "type" : "tweet", "id" : "1"}}
+{}
+--------------------------------------------------
+
+For a percolate existing document item (headers with the `id` field), the response can be an empty json object.
+All the required options are set in the header.
+
+Response:
+
+[source,js]
+--------------------------------------------------
+{
+ "items" : [
+ {
+ "took" : 24,
+ "_shards" : {
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0,
+ },
+ "total" : 3,
+ "matches" : ["1", "2", "3"]
+ },
+ {
+ "took" : 12,
+ "_shards" : {
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0,
+ },
+ "total" : 3,
+ "matches" : ["4", "5", "6"]
+ },
+ {
+ "error" : "[user][3]document missing"
+ },
+ {
+ "took" : 12,
+ "_shards" : {
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0,
+ },
+ "total" : 3
+ },
+ {
+ "took" : 14,
+ "_shards" : {
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0,
+ },
+ "total" : 3
+ }
+ ]
+}
+--------------------------------------------------
+
+Each item represents a percolate response, the order of the items maps to the order in where the percolate requests
+were specified. In case a percolate request failed, the item response is substituted with an error message.
+
+[float]
+=== How it works under the hood
+
+When indexing a document that contains a query in an index and the `.percolator` type the query part of the documents gets
+parsed into a Lucene query and is kept in memory until that percolator document is removed or the index containing the
+`.percolator` type get removed. So all the active percolator queries are kept in memory.
+
+At percolate time the document specified in the request gets parsed into a Lucene document and is stored in a in-memory
+Lucene index. This in-memory index can just hold this one document and it is optimized for that. Then all the queries
+that are registered to the index that the percolate request is targeted for are going to be executed on this single document
+in-memory index. This happens on each shard the percolate request need to execute.
+
+By using `routing`, `filter` or `query` features the amount of queries that need to be executed can be reduced and thus
+the time the percolate api needs to run can be decreased.
+
+[float]
+=== Important notes
+
+Because the percolator API is processing one document at a time, it doesn't support queries and filters that run
+against child and nested documents such as `has_child`, `has_parent`, `top_children`, and `nested`.
+
+The `wildcard` and `regexp` query natively use a lot of memory and because the percolator keeps the queries into memory
+this can easily take up the available memory in the heap space. If possible try to use a `prefix` query or ngramming to
+achieve the same result (with way less memory being used).
+
+The delete-by-query api doesn't work to unregister a query, it only deletes the percolate documents from disk. In order
+to update the registered queries in memory the index needs be closed and opened.
diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc
new file mode 100644
index 0000000..4a4a70a
--- /dev/null
+++ b/docs/reference/search/request-body.asciidoc
@@ -0,0 +1,112 @@
+[[search-request-body]]
+== Request Body Search
+
+The search request can be executed with a search DSL, which includes the
+<<query-dsl,Query DSL>>, within its body. Here is an
+example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/twitter/tweet/_search' -d '{
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+'
+--------------------------------------------------
+
+And here is a sample response:
+
+[source,js]
+--------------------------------------------------
+{
+ "_shards":{
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0
+ },
+ "hits":{
+ "total" : 1,
+ "hits" : [
+ {
+ "_index" : "twitter",
+ "_type" : "tweet",
+ "_id" : "1",
+ "_source" : {
+ "user" : "kimchy",
+ "postDate" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+ }
+ }
+ ]
+ }
+}
+--------------------------------------------------
+
+[float]
+=== Parameters
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Name |Description
+|`timeout` |A search timeout, bounding the search request to be executed
+within the specified time value and bail with the hits accumulated up to
+that point when expired. Defaults to no timeout.
+
+|`from` |The starting from index of the hits to return. Defaults to `0`.
+
+|`size` |The number of hits to return. Defaults to `10`.
+
+|`search_type` |The type of the search operation to perform. Can be
+`dfs_query_then_fetch`, `dfs_query_and_fetch`, `query_then_fetch`,
+`query_and_fetch`. Defaults to `query_then_fetch`. See
+<<search-request-search-type,_Search Type_>> for
+more details on the different types of search that can be performed.
+|=======================================================================
+
+Out of the above, the `search_type` is the one that can not be passed
+within the search request body, and in order to set it, it must be
+passed as a request REST parameter.
+
+The rest of the search request should be passed within the body itself.
+The body content can also be passed as a REST parameter named `source`.
+
+Both HTTP GET and HTTP POST can be used to execute search with body.
+Since not all clients support GET with body, POST is allowed as well.
+
+
+include::request/query.asciidoc[]
+
+include::request/from-size.asciidoc[]
+
+include::request/sort.asciidoc[]
+
+include::request/source-filtering.asciidoc[]
+
+include::request/fields.asciidoc[]
+
+include::request/script-fields.asciidoc[]
+
+include::request/fielddata-fields.asciidoc[]
+
+include::request/post-filter.asciidoc[]
+
+include::request/highlighting.asciidoc[]
+
+include::request/rescore.asciidoc[]
+
+include::request/search-type.asciidoc[]
+
+include::request/scroll.asciidoc[]
+
+include::request/preference.asciidoc[]
+
+include::request/explain.asciidoc[]
+
+include::request/version.asciidoc[]
+
+include::request/index-boost.asciidoc[]
+
+include::request/min-score.asciidoc[]
+
+include::request/named-queries-and-filters.asciidoc[]
diff --git a/docs/reference/search/request/explain.asciidoc b/docs/reference/search/request/explain.asciidoc
new file mode 100644
index 0000000..81dc110
--- /dev/null
+++ b/docs/reference/search/request/explain.asciidoc
@@ -0,0 +1,14 @@
+[[search-request-explain]]
+=== Explain
+
+Enables explanation for each hit on how its score was computed.
+
+[source,js]
+--------------------------------------------------
+{
+ "explain": true,
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/request/fielddata-fields.asciidoc b/docs/reference/search/request/fielddata-fields.asciidoc
new file mode 100644
index 0000000..6547c16
--- /dev/null
+++ b/docs/reference/search/request/fielddata-fields.asciidoc
@@ -0,0 +1,21 @@
+[[search-request-fielddata-fields]]
+=== Field Data Fields
+
+Allows to return the field data representiation of a field for each hit, for
+example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ ...
+ },
+ "fielddata_fields" : ["test1", "test2"]
+}
+--------------------------------------------------
+
+Field data fields can work on fields that are not stored.
+
+It's important to understand that using the `fielddata_fields` parameter will
+cause the terms for that field to be loaded to memory (cached), which will
+result in more memory consumption.
diff --git a/docs/reference/search/request/fields.asciidoc b/docs/reference/search/request/fields.asciidoc
new file mode 100644
index 0000000..659eaa6
--- /dev/null
+++ b/docs/reference/search/request/fields.asciidoc
@@ -0,0 +1,103 @@
+[[search-request-fields]]
+=== Fields
+
+Allows to selectively load specific stored fields for each document represented
+by a search hit.
+
+[source,js]
+--------------------------------------------------
+{
+ "fields" : ["user", "postDate"],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+`*` can be used to load all stored fields from the document.
+
+An empty array will cause only the `_id` and `_type` for each hit to be
+returned, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "fields" : [],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+
+For backwards compatibility, if the fields parameter specifies fields which are not stored (`store` mapping set to
+`false`), it will load the `_source` and extract it from it. This functionality has been replaced by the
+<<search-request-source-filtering,source filtering>> parameter.
+
+Field values fetched from the document it self are always returned as an array. Metadata fields like `_routing` and
+`_parent` fields are never returned as an array.
+
+Also only leaf fields can be returned via the `field` option. So object fields can't be returned and such requests
+will fail.
+
+Script fields can also be automatically detected and used as fields, so
+things like `_source.obj1.field1` can be used, though not recommended, as
+`obj1.field1` will work as well.
+
+[[partial]]
+==== Partial
+
+deprecated[1.0.0Beta1,Replaced by <<search-request-source-filtering>>]
+
+
+When loading data from `_source`, partial fields can be used to use
+wildcards to control what part of the `_source` will be loaded based on
+`include` and `exclude` patterns. For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "partial_fields" : {
+ "partial1" : {
+ "include" : "obj1.obj2.*",
+ }
+ }
+}
+--------------------------------------------------
+
+And one that will also exclude `obj1.obj3`:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "partial_fields" : {
+ "partial1" : {
+ "include" : "obj1.obj2.*",
+ "exclude" : "obj1.obj3.*"
+ }
+ }
+}
+--------------------------------------------------
+
+Both `include` and `exclude` support multiple patterns:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "match_all" : {}
+ },
+ "partial_fields" : {
+ "partial1" : {
+ "include" : ["obj1.obj2.*", "obj1.obj4.*"],
+ "exclude" : "obj1.obj3.*"
+ }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/request/from-size.asciidoc b/docs/reference/search/request/from-size.asciidoc
new file mode 100644
index 0000000..d8d8095
--- /dev/null
+++ b/docs/reference/search/request/from-size.asciidoc
@@ -0,0 +1,21 @@
+[[search-request-from-size]]
+=== From / Size
+
+Pagination of results can be done by using the `from` and `size`
+parameters. The `from` parameter defines the offset from the first
+result you want to fetch. The `size` parameter allows you to configure
+the maximum amount of hits to be returned.
+
+Though `from` and `size` can be set as request parameters, they can also
+be set within the search body. `from` defaults to `0`, and `size`
+defaults to `10`.
+
+[source,js]
+--------------------------------------------------
+{
+ "from" : 0, "size" : 10,
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc
new file mode 100644
index 0000000..00de5bc
--- /dev/null
+++ b/docs/reference/search/request/highlighting.asciidoc
@@ -0,0 +1,549 @@
+[[search-request-highlighting]]
+=== Highlighting
+
+Allows to highlight search results on one or more fields. The
+implementation uses either the lucene `highlighter`, `fast-vector-highlighter`
+or `postings-highlighter`. The following is an example of the search request
+body:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "fields" : {
+ "content" : {}
+ }
+ }
+}
+--------------------------------------------------
+
+In the above case, the `content` field will be highlighted for each
+search hit (there will be another element in each search hit, called
+`highlight`, which includes the highlighted fields and the highlighted
+fragments).
+
+In order to perform highlighting, the actual content of the field is
+required. If the field in question is stored (has `store` set to `true`
+in the mapping) it will be used, otherwise, the actual `_source` will
+be loaded and the relevant field will be extracted from it.
+
+The field name supports wildcard notation. For example, using `comment_*`
+will cause all fields that match the expression to be highlighted.
+
+==== Postings highlighter
+
+If `index_options` is set to `offsets` in the mapping the postings highlighter
+will be used instead of the plain highlighter. The postings highlighter:
+
+* Is faster since it doesn't require to reanalyze the text to be highlighted:
+the larger the documents the better the performance gain should be
+* Requires less disk space than term_vectors, needed for the fast vector
+highlighter
+* Breaks the text into sentences and highlights them. Plays really well with
+natural languages, not as well with fields containing for instance html markup
+* Treats the document as the whole corpus, and scores individual sentences as
+if they were documents in this corpus, using the BM25 algorithm
+
+Here is an example of setting the `content` field to allow for
+highlighting using the postings highlighter on it:
+
+[source,js]
+--------------------------------------------------
+{
+ "type_name" : {
+ "content" : {"index_options" : "offsets"}
+ }
+}
+--------------------------------------------------
+
+[NOTE]
+Note that the postings highlighter is meant to perform simple query terms
+highlighting, regardless of their positions. That means that when used for
+instance in combination with a phrase query, it will highlight all the terms
+that the query is composed of, regardless of whether they are actually part of
+a query match, effectively ignoring their positions.
+
+[WARNING]
+The postings highlighter does support highlighting of multi term queries, like
+prefix queries, wildcard queries and so on. On the other hand, this requires
+the queries to be rewritten using a proper
+<<query-dsl-multi-term-rewrite,rewrite method>> that supports multi term
+extraction, which is a potentially expensive operation.
+
+
+==== Fast vector highlighter
+
+If `term_vector` information is provided by setting `term_vector` to
+`with_positions_offsets` in the mapping then the fast vector highlighter
+will be used instead of the plain highlighter. The fast vector highlighter:
+
+* Is faster especially for large fields (> `1MB`)
+* Can be customized with `boundary_chars`, `boundary_max_scan`, and
+ `fragment_offset` (see <<boundary-characters,below>>)
+* Requires setting `term_vector` to `with_positions_offsets` which
+ increases the size of the index
+* Can combine matches from multiple fields into one result. See
+ `matched_fields`
+* Can assign different weights to matches at different positions allowing
+ for things like phrase matches being sorted above term matches when
+ highlighting a Boosting Query that boosts phrase matches over term matches
+
+Here is an example of setting the `content` field to allow for
+highlighting using the fast vector highlighter on it (this will cause
+the index to be bigger):
+
+[source,js]
+--------------------------------------------------
+{
+ "type_name" : {
+ "content" : {"term_vector" : "with_positions_offsets"}
+ }
+}
+--------------------------------------------------
+
+==== Force highlighter type
+
+The `type` field allows to force a specific highlighter type. This is useful
+for instance when needing to use the plain highlighter on a field that has
+`term_vectors` enabled. The allowed values are: `plain`, `postings` and `fvh`.
+The following is an example that forces the use of the plain highlighter:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "fields" : {
+ "content" : {"type" : "plain"}
+ }
+ }
+}
+--------------------------------------------------
+
+==== Force highlighting on source
+
+added[1.0.0.RC1]
+
+Forces the highlighting to highlight fields based on the source even if fields are
+stored separately. Defaults to `false`.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "fields" : {
+ "content" : {"force_source" : true}
+ }
+ }
+}
+--------------------------------------------------
+
+[[tags]]
+==== Highlighting Tags
+
+By default, the highlighting will wrap highlighted text in `<em>` and
+`</em>`. This can be controlled by setting `pre_tags` and `post_tags`,
+for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "pre_tags" : ["<tag1>"],
+ "post_tags" : ["</tag1>"],
+ "fields" : {
+ "_all" : {}
+ }
+ }
+}
+--------------------------------------------------
+
+Using the fast vector highlighter there can be more tags, and the "importance"
+is ordered.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "pre_tags" : ["<tag1>", "<tag2>"],
+ "post_tags" : ["</tag1>", "</tag2>"],
+ "fields" : {
+ "_all" : {}
+ }
+ }
+}
+--------------------------------------------------
+
+There are also built in "tag" schemas, with currently a single schema
+called `styled` with the following `pre_tags`:
+
+[source,js]
+--------------------------------------------------
+<em class="hlt1">, <em class="hlt2">, <em class="hlt3">,
+<em class="hlt4">, <em class="hlt5">, <em class="hlt6">,
+<em class="hlt7">, <em class="hlt8">, <em class="hlt9">,
+<em class="hlt10">
+--------------------------------------------------
+
+and `</em>` as `post_tags`. If you think of more nice to have built in tag
+schemas, just send an email to the mailing list or open an issue. Here
+is an example of switching tag schemas:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "tags_schema" : "styled",
+ "fields" : {
+ "content" : {}
+ }
+ }
+}
+--------------------------------------------------
+
+
+==== Encoder
+
+An `encoder` parameter can be used to define how highlighted text will
+be encoded. It can be either `default` (no encoding) or `html` (will
+escape html, if you use html highlighting tags).
+
+==== Highlighted Fragments
+
+Each field highlighted can control the size of the highlighted fragment
+in characters (defaults to `100`), and the maximum number of fragments
+to return (defaults to `5`).
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "fields" : {
+ "content" : {"fragment_size" : 150, "number_of_fragments" : 3}
+ }
+ }
+}
+--------------------------------------------------
+
+The `fragment_size` is ignored when using the postings highlighter, as it
+outputs sentences regardless of their length.
+
+On top of this it is possible to specify that highlighted fragments need
+to be sorted by score:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "order" : "score",
+ "fields" : {
+ "content" : {"fragment_size" : 150, "number_of_fragments" : 3}
+ }
+ }
+}
+--------------------------------------------------
+
+If the `number_of_fragments` value is set to `0` then no fragments are
+produced, instead the whole content of the field is returned, and of
+course it is highlighted. This can be very handy if short texts (like
+document title or address) need to be highlighted but no fragmentation
+is required. Note that `fragment_size` is ignored in this case.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "fields" : {
+ "_all" : {},
+ "bio.title" : {"number_of_fragments" : 0}
+ }
+ }
+}
+--------------------------------------------------
+
+When using `fast-vector-highlighter` one can use `fragment_offset`
+parameter to control the margin to start highlighting from.
+
+In the case where there is no matching fragment to highlight, the default is
+to not return anything. Instead, we can return a snippet of text from the
+beginning of the field by setting `no_match_size` (default `0`) to the length
+of the text that you want returned. The actual length may be shorter than
+specified as it tries to break on a word boundary. When using the postings
+highlighter it is not possible to control the actual size of the snippet,
+therefore the first sentence gets returned whenever `no_match_size` is
+greater than `0`.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "fields" : {
+ "content" : {
+ "fragment_size" : 150,
+ "number_of_fragments" : 3,
+ "no_match_size": 150
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+
+==== Highlight query
+
+It is also possible to highlight against a query other than the search
+query by setting `highlight_query`. This is especially useful if you
+use a rescore query because those are not taken into account by
+highlighting by default. Elasticsearch does not validate that
+`highlight_query` contains the search query in any way so it is possible
+to define it so legitimate query results aren't highlighted at all.
+Generally it is better to include the search query in the
+`highlight_query`. Here is an example of including both the search
+query and the rescore query in `highlight_query`.
+[source,js]
+--------------------------------------------------
+{
+ "fields": [ "_id" ],
+ "query" : {
+ "match": {
+ "content": {
+ "query": "foo bar"
+ }
+ }
+ },
+ "rescore": {
+ "window_size": 50,
+ "query": {
+ "rescore_query" : {
+ "match_phrase": {
+ "content": {
+ "query": "foo bar",
+ "phrase_slop": 1
+ }
+ }
+ },
+ "rescore_query_weight" : 10
+ }
+ },
+ "highlight" : {
+ "order" : "score",
+ "fields" : {
+ "content" : {
+ "fragment_size" : 150,
+ "number_of_fragments" : 3,
+ "highlight_query": {
+ "bool": {
+ "must": {
+ "match": {
+ "content": {
+ "query": "foo bar"
+ }
+ }
+ },
+ "should": {
+ "match_phrase": {
+ "content": {
+ "query": "foo bar",
+ "phrase_slop": 1,
+ "boost": 10.0
+ }
+ }
+ },
+ "minimum_should_match": 0
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Note that the score of text fragment in this case is calculated by the Lucene
+highlighting framework. For implementation details you can check the
+`ScoreOrderFragmentsBuilder.java` class. On the other hand when using the
+postings highlighter the fragments are scored using, as mentioned above,
+the BM25 algorithm.
+
+[[highlighting-settings]]
+==== Global Settings
+
+Highlighting settings can be set on a global level and then overridden
+at the field level.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {...},
+ "highlight" : {
+ "number_of_fragments" : 3,
+ "fragment_size" : 150,
+ "tag_schema" : "styled",
+ "fields" : {
+ "_all" : { "pre_tags" : ["<em>"], "post_tags" : ["</em>"] },
+ "bio.title" : { "number_of_fragments" : 0 },
+ "bio.author" : { "number_of_fragments" : 0 },
+ "bio.content" : { "number_of_fragments" : 5, "order" : "score" }
+ }
+ }
+}
+--------------------------------------------------
+
+[[field-match]]
+==== Require Field Match
+
+`require_field_match` can be set to `true` which will cause a field to
+be highlighted only if a query matched that field. `false` means that
+terms are highlighted on all requested fields regardless if the query
+matches specifically on them.
+
+[[boundary-characters]]
+==== Boundary Characters
+
+When highlighting a field using the fast vector highlighter,
+`boundary_chars` can be configured to define what constitutes a boundary
+for highlighting. It's a single string with each boundary character
+defined in it. It defaults to `.,!? \t\n`.
+
+The `boundary_max_scan` allows to control how far to look for boundary
+characters, and defaults to `20`.
+
+
+[[matched-fields]]
+==== Matched Fields
+The Fast Vector Highlighter can combine matches on multiple fields to
+highlight a single field using `matched_fields`. This is most
+intuitive for multifields that analyze the same string in different
+ways. All `matched_fields` must have `term_vector` set to
+`with_positions_offsets` but only the field to which the matches are
+combined is loaded so only that field would benefit from having
+`store` set to `yes`.
+
+In the following examples `content` is analyzed by the `english`
+analyzer and `content.plain` is analyzed by the `standard` analyzer.
+
+[source,js]
+--------------------------------------------------
+{
+ "query": {
+ "query_string": {
+ "query": "content.plain:running scissors",
+ "fields": ["content"]
+ }
+ },
+ "highlight": {
+ "order": "score",
+ "fields": {
+ "content": {
+ "matched_fields": ["content", "content.plain"],
+ "type" : "fvh"
+ }
+ }
+ }
+}
+--------------------------------------------------
+The above matches both "run with scissors" and "running with scissors"
+and would highlight "running" and "scissors" but not "run". If both
+phrases appear in a large document then "running with scissors" is
+sorted above "run with scissors" in the fragments list because there
+are more matches in that fragment.
+
+[source,js]
+--------------------------------------------------
+{
+ "query": {
+ "query_string": {
+ "query": "running scissors",
+ "fields": ["content", "content.plain^10"]
+ }
+ },
+ "highlight": {
+ "order": "score",
+ "fields": {
+ "content": {
+ "matched_fields": ["content", "content.plain"],
+ "type" : "fvh"
+ }
+ }
+ }
+}
+--------------------------------------------------
+The above highlights "run" as well as "running" and "scissors" but
+still sorts "running with scissors" above "run with scissors" because
+the plain match ("running") is boosted.
+
+[source,js]
+--------------------------------------------------
+{
+ "query": {
+ "query_string": {
+ "query": "running scissors",
+ "fields": ["content", "content.plain^10"]
+ }
+ },
+ "highlight": {
+ "order": "score",
+ "fields": {
+ "content": {
+ "matched_fields": ["content.plain"],
+ "type" : "fvh"
+ }
+ }
+ }
+}
+--------------------------------------------------
+The above query wouldn't highlight "run" or "scissor" but shows that
+it is just fine not to list the field to which the matches are combined
+(`content`) in the matched fields.
+
+[NOTE]
+Technically it is also fine to add fields to `matched_fields` that
+don't share the same underlying string as the field to which the matches
+are combined. The results might not make much sense and if one of the
+matches is off the end of the text then the whole the query will fail.
+
+[NOTE]
+===================================================================
+There is a small amount of overhead involved with setting
+`matched_fields` to a non-empty array so always prefer
+[source,js]
+--------------------------------------------------
+ "highlight": {
+ "fields": {
+ "content": {}
+ }
+ }
+--------------------------------------------------
+to
+[source,js]
+--------------------------------------------------
+ "highlight": {
+ "fields": {
+ "content": {
+ "matched_fields": ["content"],
+ "type" : "fvh"
+ }
+ }
+ }
+--------------------------------------------------
+===================================================================
+
+[[phrase-limit]]
+==== Phrase Limit
+The `fast-vector-highlighter` has a `phrase_limit` parameter that prevents
+it from analyzing too many phrases and eating tons of memory. It defaults
+to 256 so only the first 256 matching phrases in the document scored
+considered. You can raise the limit with the `phrase_limit` parameter but
+keep in mind that scoring more phrases consumes more time and memory.
+
+If using `matched_fields` keep in mind that `phrase_limit` phrases per
+matched field are considered.
diff --git a/docs/reference/search/request/index-boost.asciidoc b/docs/reference/search/request/index-boost.asciidoc
new file mode 100644
index 0000000..29d1da3
--- /dev/null
+++ b/docs/reference/search/request/index-boost.asciidoc
@@ -0,0 +1,17 @@
+[[search-request-index-boost]]
+=== Index Boost
+
+Allows to configure different boost level per index when searching
+across more than one indices. This is very handy when hits coming from
+one index matter more than hits coming from another index (think social
+graph where each user has an index).
+
+[source,js]
+--------------------------------------------------
+{
+ "indices_boost" : {
+ "index1" : 1.4,
+ "index2" : 1.3
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/request/min-score.asciidoc b/docs/reference/search/request/min-score.asciidoc
new file mode 100644
index 0000000..f5a212e
--- /dev/null
+++ b/docs/reference/search/request/min-score.asciidoc
@@ -0,0 +1,18 @@
+[[search-request-min-score]]
+=== min_score
+
+Exclude documents which have a `_score` less than the minimum specified
+in `min_score`:
+
+[source,js]
+--------------------------------------------------
+{
+ "min_score": 0.5,
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+Note, most times, this does not make much sense, but is provided for
+advanced use cases.
diff --git a/docs/reference/search/request/named-queries-and-filters.asciidoc b/docs/reference/search/request/named-queries-and-filters.asciidoc
new file mode 100644
index 0000000..c28baec
--- /dev/null
+++ b/docs/reference/search/request/named-queries-and-filters.asciidoc
@@ -0,0 +1,57 @@
+[[search-request-named-queries-and-filters]]
+=== Named Queries and Filters
+
+Each filter and query can accept a `_name` in its top level definition.
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "bool" : {
+ "should" : [
+ {"match" : { "name.first" : {"query" : "shay", "_name" : "first"} }},
+ {"match" : { "name.last" : {"query" : "banon", "_name" : "last"} }}
+ ]
+ }
+ },
+ "filter" : {
+ "terms" : {
+ "name.last" : ["banon", "kimchy"],
+ "_name" : "test"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The search response will include for each hit the `matched_filters` it matched on. The tagging of queries and filters
+only make sense for compound queries and filters (such as `bool` query and filter, `or` and `and` filter, `filtered` query etc.).
+
+Note, the query filter had to be enhanced in order to support this. In
+order to set a name, the `fquery` filter should be used, which wraps a
+query (just so there will be a place to set a name for it), for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "filtered" : {
+ "query" : {
+ "term" : { "name.first" : "shay" }
+ },
+ "filter" : {
+ "fquery" : {
+ "query" : {
+ "term" : { "name.last" : "banon" }
+ },
+ "_name" : "test"
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+==== Named queries
+
+The support for the `_name` option on queries as available from version `0.90.4` and the support on filters is available
+also in versions before `0.90.4`.
diff --git a/docs/reference/search/request/post-filter.asciidoc b/docs/reference/search/request/post-filter.asciidoc
new file mode 100644
index 0000000..489ea96
--- /dev/null
+++ b/docs/reference/search/request/post-filter.asciidoc
@@ -0,0 +1,90 @@
+[[search-request-post-filter]]
+=== Post filter
+
+The `post_filter` allows any filter that it holds to be executed as last filter, because
+of this the `post_filter` only has affect on the search hits and not facets.
+
+There are several reasons why to specify filters as `post_filter`. One reason is to force
+expensive filters to be executed as last filter, so that these filters only operate on the
+docs that match with the rest of the query. An example of for what filter a post_filter
+should be used for this reason is the `geo_distance` filter. The `geo_distance` filter is
+in general an expensive filter to execute and to reduce the execution time for this filter,
+one can choose to specify it as `post_filter`, so it runs on documents that are very likely
+to be end up as matching documents.
+
+Another important reason is when doing things like facet navigation,
+sometimes only the hits are needed to be filtered by the chosen facet,
+and all the facets should continue to be calculated based on the original query.
+The `post_filter` element within the search request can be used to accomplish it.
+
+Note, this is different compared to creating a `filtered` query with the
+filter, since this will cause the facets to only process the filtered
+results.
+
+For example, let's create two tweets, with two different tags:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'localhost:9200/twitter/tweet/1' -d '
+{
+ "message" : "something blue",
+ "tag" : "blue"
+}
+'
+
+curl -XPUT 'localhost:9200/twitter/tweet/2' -d '
+{
+ "message" : "something green",
+ "tag" : "green"
+}
+'
+
+curl -XPOST 'localhost:9200/_refresh'
+--------------------------------------------------
+
+We can now search for something, and have a terms facet.
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/twitter/_search?pretty=true' -d '
+{
+ "query" : {
+ "term" : { "message" : "something" }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : { "field" : "tag" }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+We get two hits, and the relevant facets with a count of 1 for both
+`green` and `blue`. Now, let's say the `green` facet is chosen, we can
+simply add a filter for it:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/twitter/_search?pretty=true' -d '
+{
+ "query" : {
+ "term" : { "message" : "something" }
+ },
+ "post_filter" : {
+ "term" : { "tag" : "green" }
+ },
+ "facets" : {
+ "tag" : {
+ "terms" : { "field" : "tag" }
+ }
+ }
+}
+'
+--------------------------------------------------
+
+And now, we get only 1 hit back, but the facets remain the same.
+
+Note, if additional filters are required on specific facets, they can be
+added as a `facet_filter` to the relevant facets.
+
diff --git a/docs/reference/search/request/preference.asciidoc b/docs/reference/search/request/preference.asciidoc
new file mode 100644
index 0000000..0a71a36
--- /dev/null
+++ b/docs/reference/search/request/preference.asciidoc
@@ -0,0 +1,42 @@
+[[search-request-preference]]
+=== Preference
+
+Controls a `preference` of which shard replicas to execute the search
+request on. By default, the operation is randomized between the shard
+replicas.
+
+The `preference` can be set to:
+
+[horizontal]
+`_primary`::
+ The operation will go and be executed only on the primary
+ shards.
+
+`_primary_first`::
+ The operation will go and be executed on the primary
+ shard, and if not available (failover), will execute on other shards.
+
+`_local`::
+ The operation will prefer to be executed on a local
+ allocated shard if possible.
+
+`_only_node:xyz`::
+ Restricts the search to execute only on a node with
+ the provided node id (`xyz` in this case).
+
+`_prefer_node:xyz`::
+ Prefers execution on the node with the provided
+ node id (`xyz` in this case) if applicable.
+
+`_shards:2,3`::
+ Restricts the operation to the specified shards. (`2`
+ and `3` in this case). This preference can be combined with other
+ preferences but it has to appear first: `_shards:2,3;_primary`
+
+Custom (string) value::
+ A custom value will be used to guarantee that
+ the same shards will be used for the same custom value. This can help
+ with "jumping values" when hitting different shards in different refresh
+ states. A sample value can be something like the web session id, or the
+ user name.
+
diff --git a/docs/reference/search/request/query.asciidoc b/docs/reference/search/request/query.asciidoc
new file mode 100644
index 0000000..e496320
--- /dev/null
+++ b/docs/reference/search/request/query.asciidoc
@@ -0,0 +1,14 @@
+[[search-request-query]]
+=== Query
+
+The query element within the search request body allows to define a
+query using the <<query-dsl,Query DSL>>.
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc
new file mode 100644
index 0000000..5011264
--- /dev/null
+++ b/docs/reference/search/request/rescore.asciidoc
@@ -0,0 +1,81 @@
+[[search-request-rescore]]
+=== Rescoring
+
+Rescoring can help to improve precision by reordering just the top (eg
+100 - 500) documents returned by the
+<<search-request-query,`query`>> and
+<<search-request-post-filter,`post_filter`>> phases, using a
+secondary (usually more costly) algorithm, instead of applying the
+costly algorithm to all documents in the index.
+
+A `rescore` request is executed on each shard before it returns its
+results to be sorted by the node handling the overall search request.
+
+Currently the rescore API has only one implementation: the query
+rescorer, which uses a query to tweak the scoring. In the future,
+alternative rescorers may be made available, for example, a pair-wise rescorer.
+
+*Note:* the `rescore` phase is not executed when
+<<search-request-search-type,`search_type`>> is set
+to `scan` or `count`.
+
+==== Query rescorer
+
+The query rescorer executes a second query only on the Top-K results
+returned by the <<search-request-query,`query`>> and
+<<search-request-post-filter,`post_filter`>> phases. The
+number of docs which will be examined on each shard can be controlled by
+the `window_size` parameter, which defaults to
+<<search-request-from-size,`from` and `size`>>.
+
+By default the scores from the original query and the rescore query are
+combined linearly to produce the final `_score` for each document. The
+relative importance of the original query and of the rescore query can
+be controlled with the `query_weight` and `rescore_query_weight`
+respectively. Both default to `1`.
+
+For example:
+
+[source,js]
+--------------------------------------------------
+curl -s -XPOST 'localhost:9200/_search' -d '{
+ "query" : {
+ "match" : {
+ "field1" : {
+ "operator" : "or",
+ "query" : "the quick brown",
+ "type" : "boolean"
+ }
+ }
+ },
+ "rescore" : {
+ "window_size" : 50,
+ "query" : {
+ "rescore_query" : {
+ "match" : {
+ "field1" : {
+ "query" : "the quick brown",
+ "type" : "phrase",
+ "slop" : 2
+ }
+ }
+ },
+ "query_weight" : 0.7,
+ "rescore_query_weight" : 1.2
+ }
+ }
+}
+'
+--------------------------------------------------
+
+The way the scores are combined can be controled with the `score_mode`:
+[cols="<,<",options="header",]
+|=======================================================================
+|Score Mode |Description
+|`total` |Add the original score and the rescore query score. The default.
+|`multiply` |Multiply the original score by the rescore query score. Useful
+for <<query-dsl-function-score-query,`function query`>> rescores.
+|`avg` |Average the original score and the rescore query score.
+|`max` |Take the max of original score and the rescore query score.
+|`min` |Take the min of the original score and the rescore query score.
+|=======================================================================
diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc
new file mode 100644
index 0000000..46b1698
--- /dev/null
+++ b/docs/reference/search/request/script-fields.asciidoc
@@ -0,0 +1,60 @@
+[[search-request-script-fields]]
+=== Script Fields
+
+Allows to return a <<modules-scripting,script
+evaluation>> (based on different fields) for each hit, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ ...
+ },
+ "script_fields" : {
+ "test1" : {
+ "script" : "doc['my_field_name'].value * 2"
+ },
+ "test2" : {
+ "script" : "doc['my_field_name'].value * factor",
+ "params" : {
+ "factor" : 2.0
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+Script fields can work on fields that are not stored (`my_field_name` in
+the above case), and allow to return custom values to be returned (the
+evaluated value of the script).
+
+Script fields can also access the actual `_source` document indexed and
+extract specific elements to be returned from it (can be an "object"
+type). Here is an example:
+
+[source,js]
+--------------------------------------------------
+ {
+ "query" : {
+ ...
+ },
+ "script_fields" : {
+ "test1" : {
+ "script" : "_source.obj1.obj2"
+ }
+ }
+ }
+--------------------------------------------------
+
+Note the `_source` keyword here to navigate the json-like model.
+
+It's important to understand the difference between
+`doc['my_field'].value` and `_source.my_field`. The first, using the doc
+keyword, will cause the terms for that field to be loaded to memory
+(cached), which will result in faster execution, but more memory
+consumption. Also, the `doc[...]` notation only allows for simple valued
+fields (can't return a json object from it) and make sense only on
+non-analyzed or single term based fields.
+
+The `_source` on the other hand causes the source to be loaded, parsed,
+and then only the relevant part of the json is returned.
diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc
new file mode 100644
index 0000000..0694270
--- /dev/null
+++ b/docs/reference/search/request/scroll.asciidoc
@@ -0,0 +1,42 @@
+[[search-request-scroll]]
+=== Scroll
+
+A search request can be scrolled by specifying the `scroll` parameter.
+The `scroll` parameter is a time value parameter (for example:
+`scroll=5m`), indicating for how long the nodes that participate in the
+search will maintain relevant resources in order to continue and support
+it. This is very similar in its idea to opening a cursor against a
+database.
+
+A `scroll_id` is returned from the first search request (and from
+continuous) scroll requests. The `scroll_id` should be used when
+scrolling (along with the `scroll` parameter, to stop the scroll from
+expiring). The scroll id can also be passed as part of the search
+request body.
+
+*Note*: the `scroll_id` changes for each scroll request and only the
+most recent one should be used.
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/twitter/tweet/_search?scroll=5m' -d '{
+ "query": {
+ "query_string" : {
+ "query" : "some query string here"
+ }
+ }
+}
+'
+--------------------------------------------------
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_search/scroll?scroll=5m&scroll_id=c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1'
+--------------------------------------------------
+
+Scrolling is not intended for real time user requests, it is intended
+for cases like scrolling over large portions of data that exists within
+elasticsearch to reindex it for example.
+
+For more information on scrolling, see the
+<<search-request-search-type,scan>> search type.
diff --git a/docs/reference/search/request/search-type.asciidoc b/docs/reference/search/request/search-type.asciidoc
new file mode 100644
index 0000000..6f4c7ba
--- /dev/null
+++ b/docs/reference/search/request/search-type.asciidoc
@@ -0,0 +1,156 @@
+[[search-request-search-type]]
+=== Search Type
+
+There are different execution paths that can be done when executing a
+distributed search. The distributed search operation needs to be
+scattered to all the relevant shards and then all the results are
+gathered back. When doing scatter/gather type execution, there are
+several ways to do that, specifically with search engines.
+
+One of the questions when executing a distributed search is how much
+results to retrieve from each shard. For example, if we have 10 shards,
+the 1st shard might hold the most relevant results from 0 till 10, with
+other shards results ranking below it. For this reason, when executing a
+request, we will need to get results from 0 till 10 from all shards,
+sort them, and then return the results if we want to insure correct
+results.
+
+Another question, which relates to search engine, is the fact that each
+shard stands on its own. When a query is executed on a specific shard,
+it does not take into account term frequencies and other search engine
+information from the other shards. If we want to support accurate
+ranking, we would need to first execute the query against all shards and
+gather the relevant term frequencies, and then, based on it, execute the
+query.
+
+Also, because of the need to sort the results, getting back a large
+document set, or even scrolling it, while maintaing the correct sorting
+behavior can be a very expensive operation. For large result set
+scrolling without sorting, the `scan` search type (explained below) is
+also available.
+
+Elasticsearch is very flexible and allows to control the type of search
+to execute on a *per search request* basis. The type can be configured
+by setting the *search_type* parameter in the query string. The types
+are:
+
+[[query-and-fetch]]
+==== Query And Fetch
+
+Parameter value: *query_and_fetch*.
+
+The most naive (and possibly fastest) implementation is to simply
+execute the query on all relevant shards and return the results. Each
+shard returns `size` results. Since each shard already returns `size`
+hits, this type actually returns `size` times `number of shards` results
+back to the caller.
+
+[[query-then-fetch]]
+==== Query Then Fetch
+
+Parameter value: *query_then_fetch*.
+
+The query is executed against all shards, but only enough information is
+returned (*not the document content*). The results are then sorted and
+ranked, and based on it, *only the relevant shards* are asked for the
+actual document content. The return number of hits is exactly as
+specified in `size`, since they are the only ones that are fetched. This
+is very handy when the index has a lot of shards (not replicas, shard id
+groups).
+
+NOTE: This is the default setting, if you do not specify a `search_type`
+ in your request.
+
+[[dfs-query-and-fetch]]
+==== Dfs, Query And Fetch
+
+Parameter value: *dfs_query_and_fetch*.
+
+Same as "Query And Fetch", except for an initial scatter phase which
+goes and computes the distributed term frequencies for more accurate
+scoring.
+
+[[dfs-query-then-fetch]]
+==== Dfs, Query Then Fetch
+
+Parameter value: *dfs_query_then_fetch*.
+
+Same as "Query Then Fetch", except for an initial scatter phase which
+goes and computes the distributed term frequencies for more accurate
+scoring.
+
+[[count]]
+==== Count
+
+Parameter value: *count*.
+
+A special search type that returns the count that matched the search
+request without any docs (represented in `total_hits`), and possibly,
+including facets as well. In general, this is preferable to the `count`
+API as it provides more options.
+
+[[scan]]
+==== Scan
+
+Parameter value: *scan*.
+
+The `scan` search type allows to efficiently scroll a large result set.
+It's used first by executing a search request with scrolling and a
+query:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/_search?search_type=scan&scroll=10m&size=50' -d '
+{
+ "query" : {
+ "match_all" : {}
+ }
+}
+'
+--------------------------------------------------
+
+The `scroll` parameter control the keep alive time of the scrolling
+request and initiates the scrolling process. The timeout applies per
+round trip (i.e. between the previous scan scroll request, to the next).
+
+The response will include no hits, with two important results, the
+`total_hits` will include the total hits that match the query, and the
+`scroll_id` that allows to start the scroll process. From this stage,
+the `_search/scroll` endpoint should be used to scroll the hits, feeding
+the next scroll request with the previous search result `scroll_id`. For
+example:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'localhost:9200/_search/scroll?scroll=10m' -d 'c2NhbjsxOjBLMzdpWEtqU2IyZHlmVURPeFJOZnc7MzowSzM3aVhLalNiMmR5ZlVET3hSTmZ3OzU6MEszN2lYS2pTYjJkeWZVRE94Uk5mdzsyOjBLMzdpWEtqU2IyZHlmVURPeFJOZnc7NDowSzM3aVhLalNiMmR5ZlVET3hSTmZ3Ow=='
+--------------------------------------------------
+
+Scroll requests will include a number of hits equal to the size
+multiplied by the number of primary shards.
+
+The "breaking" condition out of a scroll is when no hits has been
+returned. The total_hits will be maintained between scroll requests.
+
+Note, scan search type does not support sorting (either on score or a
+field) or faceting.
+
+[[clear-scroll]]
+===== Clear scroll api
+
+Besides consuming the scroll search until no hits has been returned a scroll
+search can also be aborted by deleting the `scroll_id`. This can be done via
+the clear scroll api. When the `scroll_id` has been deleted all the
+resources to keep the view open will be released. Example usage:
+
+[source,js]
+--------------------------------------------------
+curl -XDELETE 'localhost:9200/_search/scroll/c2NhbjsxOjBLMzdpWEtqU2IyZHlmVURPeFJOZnc7MzowSzM3aVhLalNiMmR5ZlVET3hSTmZ3OzU6MEszN2lYS2pTYjJkeWZVRE94Uk5mdzsyOjBLMzdpWEtqU2IyZHlmVURPeFJOZnc7NDowSzM3aVhLalNiMmR5ZlVET3hSTmZ3Ow=='
+--------------------------------------------------
+
+Multiple scroll ids can be specified in a comma separated manner.
+If all scroll ids need to be cleared the reserved `_all` value can used instead of an actual `scroll_id`:
+
+[source,js]
+--------------------------------------------------
+curl -XDELETE 'localhost:9200/_search/scroll/_all'
+--------------------------------------------------
diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc
new file mode 100644
index 0000000..fa3d591
--- /dev/null
+++ b/docs/reference/search/request/sort.asciidoc
@@ -0,0 +1,319 @@
+[[search-request-sort]]
+=== Sort
+
+Allows to add one or more sort on specific fields. Each sort can be
+reversed as well. The sort is defined on a per field level, with special
+field name for `_score` to sort by score.
+
+[source,js]
+--------------------------------------------------
+{
+ "sort" : [
+ { "post_date" : {"order" : "asc"}},
+ "user",
+ { "name" : "desc" },
+ { "age" : "desc" },
+ "_score"
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+==== Sort Values
+
+The sort values for each document returned are also returned as part of
+the response.
+
+==== Sort mode option
+
+Elasticsearch supports sorting by array or multi-valued fields. The `mode` option
+controls what array value is picked for sorting the document it belongs
+to. The `mode` option can have the following values:
+
+[horizontal]
+`min`:: Pick the lowest value.
+`max`:: Pick the highest value.
+`sum`:: Use the sum of all values as sort value. Only applicable for
+ number based array fields.
+`avg`:: Use the average of all values as sort value. Only applicable
+ for number based array fields.
+
+===== Sort mode example usage
+
+In the example below the field price has multiple prices per document.
+In this case the result hits will be sort by price ascending based on
+the average price per document.
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/_search' -d '{
+ "query" : {
+ ...
+ },
+ "sort" : [
+ {"price" : {"order" : "asc", "mode" : "avg"}}
+ ]
+}'
+--------------------------------------------------
+
+==== Sorting within nested objects.
+
+Elasticsearch also supports sorting by
+fields that are inside one or more nested objects. The sorting by nested
+field support has the following parameters on top of the already
+existing sort options:
+
+`nested_path`::
+ Defines the on what nested object to sort. The actual
+ sort field must be a direct field inside this nested object. The default
+ is to use the most immediate inherited nested object from the sort
+ field.
+
+`nested_filter`::
+ A filter the inner objects inside the nested path
+ should match with in order for its field values to be taken into account
+ by sorting. Common case is to repeat the query / filter inside the
+ nested filter or query. By default no `nested_filter` is active.
+
+===== Nested sorting example
+
+In the below example `offer` is a field of type `nested`. Because
+`offer` is the closest inherited nested field, it is picked as
+`nested_path`. Only the inner objects that have color blue will
+participate in sorting.
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/_search' -d '{
+ "query" : {
+ ...
+ },
+ "sort" : [
+ {
+ "offer.price" : {
+ "mode" : "avg",
+ "order" : "asc",
+ "nested_filter" : {
+ "term" : { "offer.color" : "blue" }
+ }
+ }
+ }
+ ]
+}'
+--------------------------------------------------
+
+Nested sorting is also supported when sorting by
+scripts and sorting by geo distance.
+
+==== Missing Values
+
+The `missing` parameter specifies how docs which are missing
+the field should be treated: The `missing` value can be
+set to `_last`, `_first`, or a custom value (that
+will be used for missing docs as the sort value). For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "sort" : [
+ { "price" : {"missing" : "_last"} },
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+NOTE: If a nested inner object doesn't match with
+the `nested_filter` then a missing value is used.
+
+==== Ignoring Unmapped Fields
+
+By default, the search request will fail if there is no mapping
+associated with a field. The `ignore_unmapped` option allows to ignore
+fields that have no mapping and not sort by them. Here is an example of
+how it can be used:
+
+[source,js]
+--------------------------------------------------
+{
+ "sort" : [
+ { "price" : {"ignore_unmapped" : true} },
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+==== Geo Distance Sorting
+
+Allow to sort by `_geo_distance`. Here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "sort" : [
+ {
+ "_geo_distance" : {
+ "pin.location" : [-70, 40],
+ "order" : "asc",
+ "unit" : "km"
+ }
+ }
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+Note: the geo distance sorting supports `sort_mode` options: `min`,
+`max` and `avg`.
+
+The following formats are supported in providing the coordinates:
+
+===== Lat Lon as Properties
+
+[source,js]
+--------------------------------------------------
+{
+ "sort" : [
+ {
+ "_geo_distance" : {
+ "pin.location" : {
+ "lat" : 40,
+ "lon" : -70
+ },
+ "order" : "asc",
+ "unit" : "km"
+ }
+ }
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+===== Lat Lon as String
+
+Format in `lat,lon`.
+
+[source,js]
+--------------------------------------------------
+{
+ "sort" : [
+ {
+ "_geo_distance" : {
+ "pin.location" : "-70,40",
+ "order" : "asc",
+ "unit" : "km"
+ }
+ }
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+===== Geohash
+
+[source,js]
+--------------------------------------------------
+{
+ "sort" : [
+ {
+ "_geo_distance" : {
+ "pin.location" : "drm3btev3e86",
+ "order" : "asc",
+ "unit" : "km"
+ }
+ }
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+===== Lat Lon as Array
+
+Format in `[lon, lat]`, note, the order of lon/lat here in order to
+conform with http://geojson.org/[GeoJSON].
+
+[source,js]
+--------------------------------------------------
+{
+ "sort" : [
+ {
+ "_geo_distance" : {
+ "pin.location" : [-70, 40],
+ "order" : "asc",
+ "unit" : "km"
+ }
+ }
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+==== Script Based Sorting
+
+Allow to sort based on custom scripts, here is an example:
+
+[source,js]
+--------------------------------------------------
+{
+ "query" : {
+ ....
+ },
+ "sort" : {
+ "_script" : {
+ "script" : "doc['field_name'].value * factor",
+ "type" : "number",
+ "params" : {
+ "factor" : 1.1
+ },
+ "order" : "asc"
+ }
+ }
+}
+--------------------------------------------------
+
+Note, it is recommended, for single custom based script based sorting,
+to use `function_score` query instead as sorting based on score is faster.
+
+==== Track Scores
+
+When sorting on a field, scores are not computed. By setting
+`track_scores` to true, scores will still be computed and tracked.
+
+[source,js]
+--------------------------------------------------
+{
+ "track_scores": true,
+ "sort" : [
+ { "post_date" : {"reverse" : true} },
+ { "name" : "desc" },
+ { "age" : "desc" }
+ ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+==== Memory Considerations
+
+When sorting, the relevant sorted field values are loaded into memory.
+This means that per shard, there should be enough memory to contain
+them. For string based types, the field sorted on should not be analyzed
+/ tokenized. For numeric types, if possible, it is recommended to
+explicitly set the type to six_hun types (like `short`, `integer` and
+`float`).
diff --git a/docs/reference/search/request/source-filtering.asciidoc b/docs/reference/search/request/source-filtering.asciidoc
new file mode 100644
index 0000000..7b81742
--- /dev/null
+++ b/docs/reference/search/request/source-filtering.asciidoc
@@ -0,0 +1,65 @@
+[[search-request-source-filtering]]
+=== Source filtering
+
+added[1.0.0.Beta1]
+
+
+Allows to control how the `_source` field is returned with every hit.
+
+By default operations return the contents of the `_source` field unless
+you have used the `fields` parameter or if the `_source` field is disabled.
+
+You can turn off `_source` retrieval by using the `_source` parameter:
+
+To disable `_source` retrieval set to `false`:
+
+[source,js]
+--------------------------------------------------
+{
+ "_source": false,
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+The `_source` also accepts one or more wildcard patterns to control what parts of the `_source` should be returned:
+
+For example:
+
+[source,js]
+--------------------------------------------------
+{
+ "_source": "obj.*",
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+Or
+
+[source,js]
+--------------------------------------------------
+{
+ "_source": [ "obj1.*", "obj2.*" ],
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
+
+Finally, for complete control, you can specify both include and exclude patterns:
+
+[source,js]
+--------------------------------------------------
+{
+ "_source": {
+ "include": [ "obj1.*", "obj2.*" ],
+ "exclude": [ "*.description" ],
+ }
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/request/version.asciidoc b/docs/reference/search/request/version.asciidoc
new file mode 100644
index 0000000..3b2329a
--- /dev/null
+++ b/docs/reference/search/request/version.asciidoc
@@ -0,0 +1,14 @@
+[[search-request-version]]
+=== Version
+
+Returns a version for each search hit.
+
+[source,js]
+--------------------------------------------------
+{
+ "version": true,
+ "query" : {
+ "term" : { "user" : "kimchy" }
+ }
+}
+--------------------------------------------------
diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc
new file mode 100644
index 0000000..5483c19
--- /dev/null
+++ b/docs/reference/search/search.asciidoc
@@ -0,0 +1,52 @@
+[[search-search]]
+== Search
+
+The search API allows to execute a search query and get back search hits
+that match the query. The query can either be provided using a simple
+<<search-uri-request,query string as a parameter>>, or using a
+<<search-request-body,request body>>.
+
+["float",id="search-multi-index-type"]
+=== Multi-Index, Multi-Type
+
+All search APIs can be applied across multiple types within an index, and
+across multiple indices with support for the
+<<multi-index,multi index syntax>>. For
+example, we can search on all documents across all types within the
+twitter index:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/twitter/_search?q=user:kimchy'
+--------------------------------------------------
+
+We can also search within specific types:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/twitter/tweet,user/_search?q=user:kimchy'
+--------------------------------------------------
+
+We can also search all tweets with a certain tag across several indices
+(for example, when each user has his own index):
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/kimchy,elasticsearch/tweet/_search?q=tag:wow'
+--------------------------------------------------
+
+Or we can search all tweets across all available indices using `_all`
+placeholder:
+
+[source,js]
+--------------------------------------------------
+$ curl - XGET 'http://localhost:9200/_all/tweet/_search?q=tag:wow'
+--------------------------------------------------
+
+Or even search across all indices and all types:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_search?q=tag:wow'
+--------------------------------------------------
+
diff --git a/docs/reference/search/suggesters.asciidoc b/docs/reference/search/suggesters.asciidoc
new file mode 100644
index 0000000..e753927
--- /dev/null
+++ b/docs/reference/search/suggesters.asciidoc
@@ -0,0 +1,275 @@
+[[search-suggesters]]
+== Suggesters
+
+The suggest feature suggests similar looking terms based on a provided
+text by using a suggester. Parts of the suggest feature are still under
+development.
+
+The suggest request part is either defined alongside the query part in a
+`_search` request or via the REST `_suggest` endpoint.
+
+[source,js]
+--------------------------------------------------
+curl -s -XPOST 'localhost:9200/_search' -d '{
+ "query" : {
+ ...
+ },
+ "suggest" : {
+ ...
+ }
+}'
+--------------------------------------------------
+
+Suggest requests executed against the `_suggest` endpoint should omit
+the surrounding `suggest` element which is only used if the suggest
+request is part of a search.
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/_suggest' -d '{
+ "my-suggestion" : {
+ "text" : "the amsterdma meetpu",
+ "term" : {
+ "field" : "body"
+ }
+ }
+}'
+--------------------------------------------------
+
+Several suggestions can be specified per request. Each suggestion is
+identified with an arbitrary name. In the example below two suggestions
+are requested. Both `my-suggest-1` and `my-suggest-2` suggestions use
+the `term` suggester, but have a different `text`.
+
+[source,js]
+--------------------------------------------------
+"suggest" : {
+ "my-suggest-1" : {
+ "text" : "the amsterdma meetpu",
+ "term" : {
+ "field" : "body"
+ }
+ },
+ "my-suggest-2" : {
+ "text" : "the rottredam meetpu",
+ "term" : {
+ "field" : "title",
+ }
+ }
+}
+--------------------------------------------------
+
+The below suggest response example includes the suggestion response for
+`my-suggest-1` and `my-suggest-2`. Each suggestion part contains
+entries. Each entry is effectively a token from the suggest text and
+contains the suggestion entry text, the original start offset and length
+in the suggest text and if found an arbitrary number of options.
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+ "suggest": {
+ "my-suggest-1": [
+ {
+ "text" : "amsterdma",
+ "offset": 4,
+ "length": 9,
+ "options": [
+ ...
+ ]
+ },
+ ...
+ ],
+ "my-suggest-2" : [
+ ...
+ ]
+ }
+ ...
+}
+--------------------------------------------------
+
+Each options array contains an option object that includes the
+suggested text, its document frequency and score compared to the suggest
+entry text. The meaning of the score depends on the used suggester. The
+term suggester's score is based on the edit distance.
+
+[source,js]
+--------------------------------------------------
+"options": [
+ {
+ "text": "amsterdam",
+ "freq": 77,
+ "score": 0.8888889
+ },
+ ...
+]
+--------------------------------------------------
+
+[float]
+[[global-suggest]]
+=== Global suggest text
+
+To avoid repetition of the suggest text, it is possible to define a
+global text. In the example below the suggest text is defined globally
+and applies to the `my-suggest-1` and `my-suggest-2` suggestions.
+
+[source,js]
+--------------------------------------------------
+"suggest" : {
+ "text" : "the amsterdma meetpu"
+ "my-suggest-1" : {
+ "term" : {
+ "field" : "title"
+ }
+ },
+ "my-suggest-2" : {
+ "term" : {
+ "field" : "body"
+ }
+ }
+}
+--------------------------------------------------
+
+The suggest text can in the above example also be specified as
+suggestion specific option. The suggest text specified on suggestion
+level override the suggest text on the global level.
+
+[float]
+=== Other suggest example.
+
+In the below example we request suggestions for the following suggest
+text: `devloping distibutd saerch engies` on the `title` field with a
+maximum of 3 suggestions per term inside the suggest text. Note that in
+this example we use the `count` search type. This isn't required, but a
+nice optimization. The suggestions are gather in the `query` phase and
+in the case that we only care about suggestions (so no hits) we don't
+need to execute the `fetch` phase.
+
+[source,js]
+--------------------------------------------------
+curl -s -XPOST 'localhost:9200/_search?search_type=count' -d '{
+ "suggest" : {
+ "my-title-suggestions-1" : {
+ "text" : "devloping distibutd saerch engies",
+ "term" : {
+ "size" : 3,
+ "field" : "title"
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+The above request could yield the response as stated in the code example
+below. As you can see if we take the first suggested options of each
+suggestion entry we get `developing distributed search engines` as
+result.
+
+[source,js]
+--------------------------------------------------
+{
+ ...
+ "suggest": {
+ "my-title-suggestions-1": [
+ {
+ "text": "devloping",
+ "offset": 0,
+ "length": 9,
+ "options": [
+ {
+ "text": "developing",
+ "freq": 77,
+ "score": 0.8888889
+ },
+ {
+ "text": "deloping",
+ "freq": 1,
+ "score": 0.875
+ },
+ {
+ "text": "deploying",
+ "freq": 2,
+ "score": 0.7777778
+ }
+ ]
+ },
+ {
+ "text": "distibutd",
+ "offset": 10,
+ "length": 9,
+ "options": [
+ {
+ "text": "distributed",
+ "freq": 217,
+ "score": 0.7777778
+ },
+ {
+ "text": "disributed",
+ "freq": 1,
+ "score": 0.7777778
+ },
+ {
+ "text": "distribute",
+ "freq": 1,
+ "score": 0.7777778
+ }
+ ]
+ },
+ {
+ "text": "saerch",
+ "offset": 20,
+ "length": 6,
+ "options": [
+ {
+ "text": "search",
+ "freq": 1038,
+ "score": 0.8333333
+ },
+ {
+ "text": "smerch",
+ "freq": 3,
+ "score": 0.8333333
+ },
+ {
+ "text": "serch",
+ "freq": 2,
+ "score": 0.8
+ }
+ ]
+ },
+ {
+ "text": "engies",
+ "offset": 27,
+ "length": 6,
+ "options": [
+ {
+ "text": "engines",
+ "freq": 568,
+ "score": 0.8333333
+ },
+ {
+ "text": "engles",
+ "freq": 3,
+ "score": 0.8333333
+ },
+ {
+ "text": "eggies",
+ "freq": 1,
+ "score": 0.8333333
+ }
+ ]
+ }
+ ]
+ }
+ ...
+}
+--------------------------------------------------
+
+include::suggesters/term-suggest.asciidoc[]
+
+include::suggesters/phrase-suggest.asciidoc[]
+
+include::suggesters/completion-suggest.asciidoc[]
+
+
diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc
new file mode 100644
index 0000000..41da0b5
--- /dev/null
+++ b/docs/reference/search/suggesters/completion-suggest.asciidoc
@@ -0,0 +1,231 @@
+[[search-suggesters-completion]]
+=== Completion Suggester
+
+NOTE: In order to understand the format of suggestions, please
+read the <<search-suggesters>> page first.
+
+The `completion` suggester is a so-called prefix suggester. It does not
+do spell correction like the `term` or `phrase` suggesters but allows
+basic `auto-complete` functionality.
+
+==== Why another suggester? Why not prefix queries?
+
+The first question which comes to mind when reading about a prefix
+suggestion is, why you should use it all, if you have prefix queries
+already. The answer is simple: Prefix suggestions are fast.
+
+The data structures are internally backed by Lucenes
+`AnalyzingSuggester`, which uses FSTs to execute suggestions. Usually
+these data structures are costly to create, stored in-memory and need to
+be rebuilt every now and then to reflect changes in your indexed
+documents. The `completion` suggester circumvents this by storing the
+FST as part of your index during index time. This allows for really fast
+loads and executions.
+
+[[completion-suggester-mapping]]
+==== Mapping
+
+In order to use this feature, you have to specify a special mapping for
+this field, which enables the special storage of the field.
+
+[source,js]
+--------------------------------------------------
+curl -X PUT localhost:9200/music
+curl -X PUT localhost:9200/music/song/_mapping -d '{
+ "song" : {
+ "properties" : {
+ "name" : { "type" : "string" },
+ "suggest" : { "type" : "completion",
+ "index_analyzer" : "simple",
+ "search_analyzer" : "simple",
+ "payloads" : true
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+Mapping supports the following parameters:
+
+`index_analyzer`::
+ The index analyzer to use, defaults to `simple`.
+
+`search_analyzer`::
+ The search analyzer to use, defaults to `simple`.
+ In case you are wondering why we did not opt for the `standard`
+ analyzer: We try to have easy to understand behaviour here, and if you
+ index the field content `At the Drive-in`, you will not get any
+ suggestions for `a`, nor for `d` (the first non stopword).
+
+
+`payloads`::
+ Enables the storing of payloads, defaults to `false`
+
+`preserve_separators`::
+ Preserves the separators, defaults to `true`.
+ If disabled, you could find a field starting with `Foo Fighters`, if you
+ suggest for `foof`.
+
+`preserve_position_increments`::
+ Enables position increments, defaults
+ to `true`. If disabled and using stopwords analyzer, you could get a
+ field starting with `The Beatles`, if you suggest for `b`. *Note*: You
+ could also achieve this by indexing two inputs, `Beatles` and
+ `The Beatles`, no need to change a simple analyzer, if you are able to
+ enrich your data.
+
+`max_input_length`::
+ Limits the length of a single input, defaults to `50` UTF-16 code points.
+ This limit is only used at index time to reduce the total number of
+ characters per input string in order to prevent massive inputs from
+ bloating the underlying datastructure. The most usecases won't be influenced
+ by the default value since prefix completions hardly grow beyond prefixes longer
+ than a handful of characters. (Old name "max_input_len" is deprecated)
+
+[[indexing]]
+==== Indexing
+
+[source,js]
+--------------------------------------------------
+curl -X PUT 'localhost:9200/music/song/1?refresh=true' -d '{
+ "name" : "Nevermind",
+ "suggest" : {
+ "input": [ "Nevermind", "Nirvana" ],
+ "output": "Nirvana - Nevermind",
+ "payload" : { "artistId" : 2321 },
+ "weight" : 34
+ }
+}'
+--------------------------------------------------
+
+The following parameters are supported:
+
+`input`::
+ The input to store, this can be a an array of strings or just
+ a string. This field is mandatory.
+
+`output`::
+ The string to return, if a suggestion matches. This is very
+ useful to normalize outputs (i.e. have them always in the format
+ `artist - songname`). The result is de-duplicated if several documents
+ have the same output, i.e. only one is returned as part of the
+ suggest result. This is optional.
+
+`payload`::
+ An arbitrary JSON object, which is simply returned in the
+ suggest option. You could store data like the id of a document, in order
+ to load it from elasticsearch without executing another search (which
+ might not yield any results, if `input` and `output` differ strongly).
+
+`weight`::
+ A positive integer, which defines a weight and allows you to
+ rank your suggestions. This field is optional.
+
+NOTE: Even though you are losing most of the features of the
+completion suggest, you can opt in for the shortest form, which even
+allows you to use inside of multi fields. But keep in mind, that you will
+not be able to use several inputs, an output, payloads or weights.
+
+[source,js]
+--------------------------------------------------
+{
+ "suggest" : "Nirvana"
+}
+--------------------------------------------------
+
+NOTE: The suggest data structure might not reflect deletes on
+documents immediately. You may need to do an <<indices-optimize>> for that.
+You can call optimize with the `only_expunge_deletes=true` to only cater for deletes
+or alternatively call a <<index-modules-merge>> operation.
+
+[[querying]]
+==== Querying
+
+Suggesting works as usual, except that you have to specify the suggest
+type as `completion`.
+
+[source,js]
+--------------------------------------------------
+curl -X POST 'localhost:9200/music/_suggest?pretty' -d '{
+ "song-suggest" : {
+ "text" : "n",
+ "completion" : {
+ "field" : "suggest"
+ }
+ }
+}'
+
+{
+ "_shards" : {
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0
+ },
+ "song-suggest" : [ {
+ "text" : "n",
+ "offset" : 0,
+ "length" : 4,
+ "options" : [ {
+ "text" : "Nirvana - Nevermind",
+ "score" : 34.0, "payload" : {"artistId":2321}
+ } ]
+ } ]
+}
+--------------------------------------------------
+
+As you can see, the payload is included in the response, if configured
+appropriately. If you configured a weight for a suggestion, this weight
+is used as `score`. Also the `text` field uses the `output` of your
+indexed suggestion, if configured, otherwise the matched part of the
+`input` field.
+
+
+[[fuzzy]]
+==== Fuzzy queries
+
+The completion suggester also supports fuzzy queries - this means,
+you can actually have a typo in your search and still get results back.
+
+[source,js]
+--------------------------------------------------
+curl -X POST 'localhost:9200/music/_suggest?pretty' -d '{
+ "song-suggest" : {
+ "text" : "n",
+ "completion" : {
+ "field" : "suggest",
+ "fuzzy" : {
+ "fuzziness" : 2
+ }
+ }
+ }
+}'
+--------------------------------------------------
+
+The fuzzy query can take specific fuzzy parameters.
+The following parameters are supported:
+
+[horizontal]
+`fuzziness`::
+ The fuzziness factor, defaults to `AUTO`.
+ See <<fuzziness>> for allowed settings.
+
+`transpositions`::
+ Sets if transpositions should be counted
+ as one or two changes, defaults to `true`
+
+`min_length`::
+ Minimum length of the input before fuzzy
+ suggestions are returned, defaults `3`
+
+`prefix_length`::
+ Minimum length of the input, which is not
+ checked for fuzzy alternatives, defaults to `1`
+
+`unicode_aware`::
+ Sets all are measurements (like edit distance,
+ transpositions and lengths) in unicode code points
+ (actual letters) instead of bytes.
+
+NOTE: If you want to stick with the default values, but
+ still use fuzzy, you can either use `fuzzy: {}`
+ or `fuzzy: true`.
diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc
new file mode 100644
index 0000000..3dc305a
--- /dev/null
+++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc
@@ -0,0 +1,319 @@
+[[search-suggesters-phrase]]
+=== Phrase Suggester
+
+NOTE: In order to understand the format of suggestions, please
+read the <<search-suggesters>> page first.
+
+The `term` suggester provides a very convenient API to access word
+alternatives on a per token basis within a certain string distance. The API
+allows accessing each token in the stream individually while
+suggest-selection is left to the API consumer. Yet, often pre-selected
+suggestions are required in order to present to the end-user. The
+`phrase` suggester adds additional logic on top of the `term` suggester
+to select entire corrected phrases instead of individual tokens weighted
+based on `ngram-langugage` models. In practice it this suggester will be
+able to make better decision about which tokens to pick based on
+co-occurence and frequencies.
+
+==== API Example
+
+The `phrase` request is defined along side the query part in the json
+request:
+
+[source,js]
+--------------------------------------------------
+curl -XPOST 'localhost:9200/_search' -d {
+ "suggest" : {
+ "text" : "Xor the Got-Jewel",
+ "simple_phrase" : {
+ "phrase" : {
+ "analyzer" : "body",
+ "field" : "bigram",
+ "size" : 1,
+ "real_word_error_likelihood" : 0.95,
+ "max_errors" : 0.5,
+ "gram_size" : 2,
+ "direct_generator" : [ {
+ "field" : "body",
+ "suggest_mode" : "always",
+ "min_word_length" : 1
+ } ],
+ "highlight": {
+ "pre_tag": "<em>",
+ "post_tag": "</em>"
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+The response contains suggestions scored by the most likely spell
+correction first. In this case we received the expected correction
+`xorr the god jewel` first while the second correction is less
+conservative where only one of the errors is corrected. Note, the
+request is executed with `max_errors` set to `0.5` so 50% of the terms
+can contain misspellings (See parameter descriptions below).
+
+[source,js]
+--------------------------------------------------
+ {
+ "took" : 5,
+ "timed_out" : false,
+ "_shards" : {
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0
+ },
+ "hits" : {
+ "total" : 2938,
+ "max_score" : 0.0,
+ "hits" : [ ]
+ },
+ "suggest" : {
+ "simple_phrase" : [ {
+ "text" : "Xor the Got-Jewel",
+ "offset" : 0,
+ "length" : 17,
+ "options" : [ {
+ "text" : "xorr the god jewel",
+ "highlighted": "<em>xorr</em> the <em>god</em> jewel",
+ "score" : 0.17877324
+ }, {
+ "text" : "xor the god jewel",
+ "highlighted": "xor the <em>god</em> jewel",
+ "score" : 0.14231323
+ } ]
+ } ]
+ }
+}
+--------------------------------------------------
+
+==== Basic Phrase suggest API parameters
+
+[horizontal]
+`field`::
+ the name of the field used to do n-gram lookups for the
+ language model, the suggester will use this field to gain statistics to
+ score corrections. This field is mandatory.
+
+`gram_size`::
+ sets max size of the n-grams (shingles) in the `field`.
+ If the field doesn't contain n-grams (shingles) this should be omitted
+ or set to `1`. Note that Elasticsearch tries to detect the gram size
+ based on the specified `field`. If the field uses a `shingle` filter the
+ `gram_size` is set to the `max_shingle_size` if not explicitly set.
+
+`real_word_error_likelihood`::
+ the likelihood of a term being a
+ misspelled even if the term exists in the dictionary. The default it
+ `0.95` corresponding to 5% or the real words are misspelled.
+
+
+`confidence`::
+ The confidence level defines a factor applied to the
+ input phrases score which is used as a threshold for other suggest
+ candidates. Only candidates that score higher than the threshold will be
+ included in the result. For instance a confidence level of `1.0` will
+ only return suggestions that score higher than the input phrase. If set
+ to `0.0` the top N candidates are returned. The default is `1.0`.
+
+`max_errors`::
+ the maximum percentage of the terms that at most
+ considered to be misspellings in order to form a correction. This method
+ accepts a float value in the range `[0..1)` as a fraction of the actual
+ query terms a number `>=1` as an absolute number of query terms. The
+ default is set to `1.0` which corresponds to that only corrections with
+ at most 1 misspelled term are returned. Note that setting this too high
+ can negativly impact performance. Low values like `1` or `2` are recommended
+ otherwise the time spend in suggest calls might exceed the time spend in
+ query execution.
+
+`separator`::
+ the separator that is used to separate terms in the
+ bigram field. If not set the whitespace character is used as a
+ separator.
+
+`size`::
+ the number of candidates that are generated for each
+ individual query term Low numbers like `3` or `5` typically produce good
+ results. Raising this can bring up terms with higher edit distances. The
+ default is `5`.
+
+`analyzer`::
+ Sets the analyzer to analyse to suggest text with.
+ Defaults to the search analyzer of the suggest field passed via `field`.
+
+`shard_size`::
+ Sets the maximum number of suggested term to be
+ retrieved from each individual shard. During the reduce phase, only the
+ top N suggestions are returned based on the `size` option. Defaults to
+ `5`.
+
+`text`::
+ Sets the text / query to provide suggestions for.
+
+`highlight`::
+ Sets up suggestion highlighting. If not provided then
+ no `highlighted` field is returned. If provided must
+ contain exactly `pre_tag` and `post_tag` which are
+ wrapped around the changed tokens. If multiple tokens
+ in a row are changed the entire phrase of changed tokens
+ is wrapped rather than each token.
+
+==== Smoothing Models
+
+The `phrase` suggester supports multiple smoothing models to balance
+weight between infrequent grams (grams (shingles) are not existing in
+the index) and frequent grams (appear at least once in the index).
+
+[horizontal]
+`stupid_backoff`::
+ a simple backoff model that backs off to lower
+ order n-gram models if the higher order count is `0` and discounts the
+ lower order n-gram model by a constant factor. The default `discount` is
+ `0.4`. Stupid Backoff is the default model.
+
+`laplace`::
+ a smoothing model that uses an additive smoothing where a
+ constant (typically `1.0` or smaller) is added to all counts to balance
+ weights, The default `alpha` is `0.5`.
+
+`linear_interpolation`::
+ a smoothing model that takes the weighted
+ mean of the unigrams, bigrams and trigrams based on user supplied
+ weights (lambdas). Linear Interpolation doesn't have any default values.
+ All parameters (`trigram_lambda`, `bigram_lambda`, `unigram_lambda`)
+ must be supplied.
+
+==== Candidate Generators
+
+The `phrase` suggester uses candidate generators to produce a list of
+possible terms per term in the given text. A single candidate generator
+is similar to a `term` suggester called for each individual term in the
+text. The output of the generators is subsequently scored in combination
+with the candidates from the other terms to for suggestion candidates.
+
+Currently only one type of candidate generator is supported, the
+`direct_generator`. The Phrase suggest API accepts a list of generators
+under the key `direct_generator` each of the generators in the list are
+called per term in the original text.
+
+==== Direct Generators
+
+The direct generators support the following parameters:
+
+[horizontal]
+`field`::
+ The field to fetch the candidate suggestions from. This is
+ an required option that either needs to be set globally or per
+ suggestion.
+
+`size`::
+ The maximum corrections to be returned per suggest text token.
+
+`suggest_mode`::
+ The suggest mode controls what suggestions are
+ included or controls for what suggest text terms, suggestions should be
+ suggested. Three possible values can be specified:
+ ** `missing`: Only suggest terms in the suggest text that aren't in the
+ index. This is the default.
+ ** `popular`: Only suggest suggestions that occur in more docs then the
+ original suggest text term.
+ ** `always`: Suggest any matching suggestions based on terms in the
+ suggest text.
+
+`max_edits`::
+ The maximum edit distance candidate suggestions can have
+ in order to be considered as a suggestion. Can only be a value between 1
+ and 2. Any other value result in an bad request error being thrown.
+ Defaults to 2.
+
+`prefix_length`::
+ The number of minimal prefix characters that must
+ match in order be a candidate suggestions. Defaults to 1. Increasing
+ this number improves spellcheck performance. Usually misspellings don't
+ occur in the beginning of terms. (Old name "prefix_len" is deprecated)
+
+`min_word_length`::
+ The minimum length a suggest text term must have in
+ order to be included. Defaults to 4. (Old name "min_word_len" is deprecated)
+
+`max_inspections`::
+ A factor that is used to multiply with the
+ `shards_size` in order to inspect more candidate spell corrections on
+ the shard level. Can improve accuracy at the cost of performance.
+ Defaults to 5.
+
+`min_doc_freq`::
+ The minimal threshold in number of documents a
+ suggestion should appear in. This can be specified as an absolute number
+ or as a relative percentage of number of documents. This can improve
+ quality by only suggesting high frequency terms. Defaults to 0f and is
+ not enabled. If a value higher than 1 is specified then the number
+ cannot be fractional. The shard level document frequencies are used for
+ this option.
+
+`max_term_freq`::
+ The maximum threshold in number of documents a
+ suggest text token can exist in order to be included. Can be a relative
+ percentage number (e.g 0.4) or an absolute number to represent document
+ frequencies. If an value higher than 1 is specified then fractional can
+ not be specified. Defaults to 0.01f. This can be used to exclude high
+ frequency terms from being spellchecked. High frequency terms are
+ usually spelled correctly on top of this also improves the spellcheck
+ performance. The shard level document frequencies are used for this
+ option.
+
+`pre_filter`::
+ a filter (analyzer) that is applied to each of the
+ tokens passed to this candidate generator. This filter is applied to the
+ original token before candidates are generated.
+
+`post_filter`::
+ a filter (analyzer) that is applied to each of the
+ generated tokens before they are passed to the actual phrase scorer.
+
+The following example shows a `phrase` suggest call with two generators,
+the first one is using a field containing ordinary indexed terms and the
+second one uses a field that uses terms indexed with a `reverse` filter
+(tokens are index in reverse order). This is used to overcome the limitation
+of the direct generators to require a constant prefix to provide
+high-performance suggestions. The `pre_filter` and `post_filter` options
+accept ordinary analyzer names.
+
+[source,js]
+--------------------------------------------------
+curl -s -XPOST 'localhost:9200/_search' -d {
+ "suggest" : {
+ "text" : "Xor the Got-Jewel",
+ "simple_phrase" : {
+ "phrase" : {
+ "analyzer" : "body",
+ "field" : "bigram",
+ "size" : 4,
+ "real_word_error_likelihood" : 0.95,
+ "confidence" : 2.0,
+ "gram_size" : 2,
+ "direct_generator" : [ {
+ "field" : "body",
+ "suggest_mode" : "always",
+ "min_word_length" : 1
+ }, {
+ "field" : "reverse",
+ "suggest_mode" : "always",
+ "min_word_length" : 1,
+ "pre_filter" : "reverse",
+ "post_filter" : "reverse"
+ } ]
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+`pre_filter` and `post_filter` can also be used to inject synonyms after
+candidates are generated. For instance for the query `captain usq` we
+might generate a candidate `usa` for term `usq` which is a synonym for
+`america` which allows to present `captain america` to the user if this
+phrase scores high enough.
diff --git a/docs/reference/search/suggesters/term-suggest.asciidoc b/docs/reference/search/suggesters/term-suggest.asciidoc
new file mode 100644
index 0000000..f6331c1
--- /dev/null
+++ b/docs/reference/search/suggesters/term-suggest.asciidoc
@@ -0,0 +1,110 @@
+[[search-suggesters-term]]
+=== Term suggester
+
+NOTE: In order to understand the format of suggestions, please
+read the <<search-suggesters>> page first.
+
+The `term` suggester suggests terms based on edit distance. The provided
+suggest text is analyzed before terms are suggested. The suggested terms
+are provided per analyzed suggest text token. The `term` suggester
+doesn't take the query into account that is part of request.
+
+==== Common suggest options:
+
+[horizontal]
+`text`::
+ The suggest text. The suggest text is a required option that
+ needs to be set globally or per suggestion.
+
+`field`::
+ The field to fetch the candidate suggestions from. This is
+ an required option that either needs to be set globally or per
+ suggestion.
+
+`analyzer`::
+ The analyzer to analyse the suggest text with. Defaults
+ to the search analyzer of the suggest field.
+
+`size`::
+ The maximum corrections to be returned per suggest text
+ token.
+
+`sort`::
+ Defines how suggestions should be sorted per suggest text
+ term. Two possible values:
++
+ ** `score`: Sort by sore first, then document frequency and
+ then the term itself.
+ ** `frequency`: Sort by document frequency first, then similarity
+ score and then the term itself.
++
+`suggest_mode`::
+ The suggest mode controls what suggestions are
+ included or controls for what suggest text terms, suggestions should be
+ suggested. Three possible values can be specified:
++
+ ** `missing`: Only suggest terms in the suggest text that aren't in
+ the index. This is the default.
+ ** `popular`: Only suggest suggestions that occur in more docs then
+ the original suggest text term.
+ ** `always`: Suggest any matching suggestions based on terms in the
+ suggest text.
+
+==== Other term suggest options:
+
+[horizontal]
+`lowercase_terms`::
+ Lower cases the suggest text terms after text analysis.
+
+`max_edits`::
+ The maximum edit distance candidate suggestions can
+ have in order to be considered as a suggestion. Can only be a value
+ between 1 and 2. Any other value result in an bad request error being
+ thrown. Defaults to 2.
+
+`prefix_length`::
+ The number of minimal prefix characters that must
+ match in order be a candidate suggestions. Defaults to 1. Increasing
+ this number improves spellcheck performance. Usually misspellings don't
+ occur in the beginning of terms. (Old name "prefix_len" is deprecated)
+
+`min_word_length`::
+ The minimum length a suggest text term must have in
+ order to be included. Defaults to 4. (Old name "min_word_len" is deprecated)
+
+`shard_size`::
+ Sets the maximum number of suggestions to be retrieved
+ from each individual shard. During the reduce phase only the top N
+ suggestions are returned based on the `size` option. Defaults to the
+ `size` option. Setting this to a value higher than the `size` can be
+ useful in order to get a more accurate document frequency for spelling
+ corrections at the cost of performance. Due to the fact that terms are
+ partitioned amongst shards, the shard level document frequencies of
+ spelling corrections may not be precise. Increasing this will make these
+ document frequencies more precise.
+
+`max_inspections`::
+ A factor that is used to multiply with the
+ `shards_size` in order to inspect more candidate spell corrections on
+ the shard level. Can improve accuracy at the cost of performance.
+ Defaults to 5.
+
+`min_doc_freq`::
+ The minimal threshold in number of documents a
+ suggestion should appear in. This can be specified as an absolute number
+ or as a relative percentage of number of documents. This can improve
+ quality by only suggesting high frequency terms. Defaults to 0f and is
+ not enabled. If a value higher than 1 is specified then the number
+ cannot be fractional. The shard level document frequencies are used for
+ this option.
+
+`max_term_freq`::
+ The maximum threshold in number of documents a
+ suggest text token can exist in order to be included. Can be a relative
+ percentage number (e.g 0.4) or an absolute number to represent document
+ frequencies. If an value higher than 1 is specified then fractional can
+ not be specified. Defaults to 0.01f. This can be used to exclude high
+ frequency terms from being spellchecked. High frequency terms are
+ usually spelled correctly on top of this also improves the spellcheck
+ performance. The shard level document frequencies are used for this
+ option.
diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc
new file mode 100644
index 0000000..e5d1dbe
--- /dev/null
+++ b/docs/reference/search/uri-request.asciidoc
@@ -0,0 +1,101 @@
+[[search-uri-request]]
+== URI Search
+
+A search request can be executed purely using a URI by providing request
+parameters. Not all search options are exposed when executing a search
+using this mode, but it can be handy for quick "curl tests". Here is an
+example:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy'
+--------------------------------------------------
+
+And here is a sample response:
+
+[source,js]
+--------------------------------------------------
+{
+ "_shards":{
+ "total" : 5,
+ "successful" : 5,
+ "failed" : 0
+ },
+ "hits":{
+ "total" : 1,
+ "hits" : [
+ {
+ "_index" : "twitter",
+ "_type" : "tweet",
+ "_id" : "1",
+ "_source" : {
+ "user" : "kimchy",
+ "postDate" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+ }
+ }
+ ]
+ }
+}
+--------------------------------------------------
+
+[float]
+=== Parameters
+
+The parameters allowed in the URI are:
+
+[cols="<,<",options="header",]
+|=======================================================================
+|Name |Description
+|`q` |The query string (maps to the `query_string` query, see
+<<query-dsl-query-string-query,_Query String
+Query_>> for more details).
+
+|`df` |The default field to use when no field prefix is defined within the
+query.
+
+|`analyzer` |The analyzer name to be used when analyzing the query string.
+
+|`default_operator` |The default operator to be used, can be `AND` or
+`OR`. Defaults to `OR`.
+
+|`explain` |For each hit, contain an explanation of how scoring of the
+hits was computed.
+
+|`_source`| added[1.0.0.Beta1]Set to `false` to disable retrieval of the `_source` field. You can also retrieve
+part of the document by using `_source_include` & `_source_exclude` (see the <<search-request-source-filtering, request body>>
+documentation for more details)
+
+|`fields` |The selective stored fields of the document to return for each hit,
+comma delimited. Not specifying any value will cause no fields to return.
+
+|`sort` |Sorting to perform. Can either be in the form of `fieldName`, or
+`fieldName:asc`/`fieldName:desc`. The fieldName can either be an actual
+field within the document, or the special `_score` name to indicate
+sorting based on scores. There can be several `sort` parameters (order
+is important).
+
+|`track_scores` |When sorting, set to `true` in order to still track
+scores and return them as part of each hit.
+
+|`timeout` |A search timeout, bounding the search request to be executed
+within the specified time value and bail with the hits accumulated up to
+that point when expired. Defaults to no timeout.
+
+|`from` |The starting from index of the hits to return. Defaults to `0`.
+
+|`size` |The number of hits to return. Defaults to `10`.
+
+|`search_type` |The type of the search operation to perform. Can be
+`dfs_query_then_fetch`, `dfs_query_and_fetch`, `query_then_fetch`,
+`query_and_fetch`, `count`, `scan`. Defaults to `query_then_fetch`. See
+<<search-request-search-type,_Search Type_>> for
+more details on the different types of search that can be performed.
+
+|`lowercase_expanded_terms` |Should terms be automatically lowercased or
+not. Defaults to `true`.
+
+|`analyze_wildcard` |Should wildcard and prefix queries be analyzed or
+not. Defaults to `false`.
+|=======================================================================
+
diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc
new file mode 100644
index 0000000..cdf9981
--- /dev/null
+++ b/docs/reference/search/validate.asciidoc
@@ -0,0 +1,77 @@
+[[search-validate]]
+== Validate API
+
+The validate API allows a user to validate a potentially expensive query
+without executing it. The following example shows how it can be used:
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '{
+ "user" : "kimchy",
+ "post_date" : "2009-11-15T14:12:12",
+ "message" : "trying out Elasticsearch"
+}'
+--------------------------------------------------
+
+When the query is valid, the response contains `valid:true`:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/_validate/query?q=user:foo'
+{"valid":true,"_shards":{"total":1,"successful":1,"failed":0}}
+--------------------------------------------------
+
+Or, with a request body:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/_validate/query' -d '{
+ "query" : {
+ "filtered" : {
+ "query" : {
+ "query_string" : {
+ "query" : "*:*"
+ }
+ },
+ "filter" : {
+ "term" : { "user" : "kimchy" }
+ }
+ }
+ }
+}'
+{"valid":true,"_shards":{"total":1,"successful":1,"failed":0}}
+--------------------------------------------------
+
+NOTE: The query being sent in the body must be nested in a `query` key, same as
+the <<search-search,search api>> works added[1.0.0.RC1,The query was previously the top-level object].
+
+If the query is invalid, `valid` will be `false`. Here the query is
+invalid because Elasticsearch knows the post_date field should be a date
+due to dynamic mapping, and 'foo' does not correctly parse into a date:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/_validate/query?q=post_date:foo'
+{"valid":false,"_shards":{"total":1,"successful":1,"failed":0}}
+--------------------------------------------------
+
+An `explain` parameter can be specified to get more detailed information
+about why a query failed:
+
+[source,js]
+--------------------------------------------------
+curl -XGET 'http://localhost:9200/twitter/tweet/_validate/query?q=post_date:foo&pretty=true&explain=true'
+{
+ "valid" : false,
+ "_shards" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0
+ },
+ "explanations" : [ {
+ "index" : "twitter",
+ "valid" : false,
+ "error" : "org.elasticsearch.index.query.QueryParsingException: [twitter] Failed to parse; org.elasticsearch.ElasticsearchParseException: failed to parse date field [foo], tried both date format [dateOptionalTime], and timestamp number; java.lang.IllegalArgumentException: Invalid format: \"foo\""
+ } ]
+}
+--------------------------------------------------
diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc
new file mode 100644
index 0000000..cf413cd
--- /dev/null
+++ b/docs/reference/setup.asciidoc
@@ -0,0 +1,63 @@
+[[setup]]
+= Setup
+
+[partintro]
+--
+This section includes information on how to setup *elasticsearch* and
+get it running. If you haven't already, http://www.elasticsearch.org/download[download] it, and
+then check the <<setup-installation,installation>> docs.
+
+NOTE: Elasticsearch can also be installed from our repositories using `apt` or `yum`.
+See <<setup-repositories>>.
+
+[[setup-installation]]
+[float]
+== Installation
+
+After link:/download[downloading] the latest release and extracting it,
+*elasticsearch* can be started using:
+
+[source,sh]
+--------------------------------------------------
+$ bin/elasticsearch
+--------------------------------------------------
+
+Under *nix system, the command will start the process in the foreground.
+To run it in the background, add the `-d` switch to it:
+
+[source,sh]
+--------------------------------------------------
+$ bin/elasticsearch -d
+--------------------------------------------------
+
+Elasticsearch is built using Java, and requires at least
+http://java.sun.com/javase/downloads/index.jsp[Java 6] in order to run.
+The version of Java that will be used can be set by setting the
+`JAVA_HOME` environment variable.
+
+.*NIX
+*************************************************************************
+There are added features when using the `elasticsearch` shell script.
+The first, which was explained earlier, is the ability to easily run the
+process either in the foreground or the background.
+
+Another feature is the ability to pass `-X` and `-D` or getopt long style
+configuration parameters directly to the script. When set, all override
+anything set using either `JAVA_OPTS` or `ES_JAVA_OPTS`. For example:
+
+[source,sh]
+--------------------------------------------------
+$ bin/elasticsearch -Xmx2g -Xms2g -Des.index.store.type=memory --node.name=my-node
+--------------------------------------------------
+*************************************************************************
+--
+
+include::setup/configuration.asciidoc[]
+
+include::setup/as-a-service.asciidoc[]
+
+include::setup/as-a-service-win.asciidoc[]
+
+include::setup/dir-layout.asciidoc[]
+
+include::setup/repositories.asciidoc[]
diff --git a/docs/reference/setup/as-a-service-win.asciidoc b/docs/reference/setup/as-a-service-win.asciidoc
new file mode 100644
index 0000000..f77f194
--- /dev/null
+++ b/docs/reference/setup/as-a-service-win.asciidoc
@@ -0,0 +1,63 @@
+[[setup-service-win]]
+== Running as a Service on Windows
+
+Windows users can configure Elasticsearch to run as a service to run in the background or start automatically
+at startup without any user interaction.
+This can be achieved through `service.bat` script under `bin/` folder which allows one to install,
+remove, manage or configure the service and potentially start and stop the service, all from the command-line.
+
+[source,sh]
+--------------------------------------------------
+c:\elasticsearch-0.90.5\bin>service
+
+Usage: service.bat install|remove|start|stop|manager [SERVICE_ID]
+--------------------------------------------------
+
+The script requires one parameter (the command to execute) followed by an optional one indicating the service
+id (useful when installing multiple Elasticsearch services).
+
+The commands available are:
+
+[horizontal]
+`install`:: Install Elasticsearch as a service
+
+`remove`:: Remove the installed Elasticsearch service (and stop the service if started)
+
+`start`:: Start the Elasticsearch service (if installed)
+
+`stop`:: Stop the Elasticsearch service (if started)
+
+`manager`:: Start a GUI for managing the installed service
+
+Note that the environment configuration options available during the installation are copied and will be used during
+the service lifecycle. This means any changes made to them after the installation will not be picked up unless
+the service is reinstalled.
+
+Based on the architecture of the available JDK/JRE (set through `JAVA_HOME`), the appropriate 64-bit(x64) or 32-bit(x86)
+service will be installed. This information is made available during install:
+
+[source,sh]
+--------------------------------------------------
+c:\elasticsearch-0.90.5\bin>service install
+Installing service : "elasticsearch-service-x64"
+Using JAVA_HOME (64-bit): "c:\jvm\jdk1.7"
+The service 'elasticsearch-service-x64' has been installed.
+--------------------------------------------------
+
+NOTE: While a JRE can be used for the Elasticsearch service, due to its use of a client VM (as oppose to a server JVM which
+offers better performance for long-running applications) its usage is discouraged and a warning will be issued.
+
+[float]
+=== Customizing service settings
+
+There are two ways to customize the service settings:
+
+Manager GUI:: accessible through `manager` command, the GUI offers insight into the installed service including its status, startup type,
+JVM, start and stop settings among other things. Simply invoking `service.bat` from the command-line with the aforementioned option
+will open up the manager window:
+
+image::images/service-manager-win.png["Windows Service Manager GUI",align="center"]
+
+Customizing `service.bat`:: at its core, `service.bat` relies on http://commons.apache.org/proper/commons-daemon/[Apache Commons Daemon] project
+to install the services. For full flexibility such as customizing the user under which the service runs, one can modify the installation
+parameters to tweak all the parameters accordingly. Do note that this requires reinstalling the service for the new settings to be applied.
diff --git a/docs/reference/setup/as-a-service.asciidoc b/docs/reference/setup/as-a-service.asciidoc
new file mode 100644
index 0000000..71bf760
--- /dev/null
+++ b/docs/reference/setup/as-a-service.asciidoc
@@ -0,0 +1,86 @@
+[[setup-service]]
+== Running As a Service on Linux
+
+In order to run elasticsearch as a service on your operating system, the provided packages try to make it as easy as possible for you to start and stop elasticsearch during reboot and upgrades.
+
+[float]
+=== Linux
+
+Currently our build automatically creates a debian package and an RPM package, which is available on the download page. The package itself does not have any dependencies, but you have to make sure that you installed a JDK.
+
+Each package features a configuration file, which allows you to set the following parameters
+
+[horizontal]
+`ES_USER`:: The user to run as, defaults to `elasticsearch`
+`ES_GROUP`:: The group to run as, defaults to `elasticsearch`
+`ES_HEAP_SIZE`:: The heap size to start with
+`ES_HEAP_NEWSIZE`:: The size of the new generation heap
+`ES_DIRECT_SIZE`:: The maximum size of the direct memory
+`MAX_OPEN_FILES`:: Maximum number of open files, defaults to `65535`
+`MAX_LOCKED_MEMORY`:: Maximum locked memory size. Set to "unlimited" if you use the bootstrap.mlockall option in elasticsearch.yml. You must also set ES_HEAP_SIZE.
+`MAX_MAP_COUNT`:: Maximum number of memory map areas a process may have. If you use `mmapfs` as index store type, make sure this is set to a high value. For more information, check the https://github.com/torvalds/linux/blob/master/Documentation/sysctl/vm.txt[linux kernel documentation] about `max_map_count`. This is set via `sysctl` before starting elasticsearch. Defaults to `65535`
+`LOG_DIR`:: Log directory, defaults to `/var/log/elasticsearch`
+`DATA_DIR`:: Data directory, defaults to `/var/lib/elasticsearch`
+`WORK_DIR`:: Work directory, defaults to `/tmp/elasticsearch`
+`CONF_DIR`:: Configuration file directory (which needs to include `elasticsearch.yml` and `logging.yml` files), defaults to `/etc/elasticsearch`
+`CONF_FILE`:: Path to configuration file, defaults to `/etc/elasticsearch/elasticsearch.yml`
+`ES_JAVA_OPTS`:: Any additional java options you may want to apply. This may be useful, if you need to set the `node.name` property, but do not want to change the `elasticsearch.yml` configuration file, because it is distributed via a provisioning system like puppet or chef. Example: `ES_JAVA_OPTS="-Des.node.name=search-01"`
+`RESTART_ON_UPGRADE`:: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your elasticsearch instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continouos shard reallocation resulting in high network traffic and reducing the response times of your cluster.
+
+[float]
+==== Debian/Ubuntu
+
+The debian package ships with everything you need as it uses standard debian tools like update `update-rc.d` to define the runlevels it runs on. The init script is placed at `/etc/init.d/elasticsearch` is you would expect it. The configuration file is placed at `/etc/default/elasticsearch`.
+
+The debian package does not start up the service by default. The reason for this is to prevent the instance to accidentally join a cluster, without being configured appropriately. After installing using `dpkg -i` you can use the following commands to ensure, that elasticsearch starts when the system is booted and then start up elasticsearch:
+
+[source,sh]
+--------------------------------------------------
+sudo update-rc.d elasticsearch defaults 95 10
+sudo /etc/init.d/elasticsearch start
+--------------------------------------------------
+
+[float]
+===== Installing the oracle JDK
+
+The usual recommendation is to run the Oracle JDK with elasticsearch. However Ubuntu and Debian only ship the OpenJDK due to license issues. You can easily install the oracle installer package though. In case you are missing the `add-apt-repository` command under Debian GNU/Linux, make sure have at least Debian Wheezy and the package `python-software-properties` installed
+
+[source,sh]
+--------------------------------------------------
+sudo add-apt-repository ppa:webupd8team/java
+sudo apt-get update
+sudo apt-get install oracle-java7-installer
+java -version
+--------------------------------------------------
+
+The last command should verify a successful installation of the Oracle JDK.
+
+
+[float]
+==== RPM based distributions
+
+[float]
+===== Using chkconfig
+
+Some RPM based distributions are using `chkconfig` to enable and disable services. The init script is located at `/etc/init.d/elasticsearch`, where as the configuration file is placed at `/etc/sysconfig/elasticsearch`. Like the debian package the RPM package is not started by default after installation, you have to do this manually by entering the following commands
+
+[source,sh]
+--------------------------------------------------
+sudo /sbin/chkconfig --add elasticsearch
+sudo service elasticsearch start
+--------------------------------------------------
+
+
+[float]
+===== Using systemd
+
+Distributions like SUSE do not use the `chkconfig` tool to register services, but rather `systemd` and its command `/bin/systemctl` to start and stop services (at least in newer versions, otherwise use the `chkconfig` commands above). The configuration file is also placed at `/etc/sysconfig/elasticsearch`. After installing the RPM, you have to change the systemd configuration and then start up elasticsearch
+
+[source,sh]
+--------------------------------------------------
+sudo /bin/systemctl daemon-reload
+sudo /bin/systemctl enable elasticsearch.service
+sudo /bin/systemctl start elasticsearch.service
+--------------------------------------------------
+
+Also note that changing the `MAX_MAP_COUNT` setting in `/etc/sysconfig/elasticsearch` does not have any effect, you will have to change it in `/usr/lib/sysctl.d/elasticsearch.conf` in order to have it applied at startup.
diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc
new file mode 100644
index 0000000..885dd9f
--- /dev/null
+++ b/docs/reference/setup/configuration.asciidoc
@@ -0,0 +1,237 @@
+[[setup-configuration]]
+== Configuration
+
+[float]
+=== Environment Variables
+
+Within the scripts, Elasticsearch comes with built in `JAVA_OPTS` passed
+to the JVM started. The most important setting for that is the `-Xmx` to
+control the maximum allowed memory for the process, and `-Xms` to
+control the minimum allocated memory for the process (_in general, the
+more memory allocated to the process, the better_).
+
+Most times it is better to leave the default `JAVA_OPTS` as they are,
+and use the `ES_JAVA_OPTS` environment variable in order to set / change
+JVM settings or arguments.
+
+The `ES_HEAP_SIZE` environment variable allows to set the heap memory
+that will be allocated to elasticsearch java process. It will allocate
+the same value to both min and max values, though those can be set
+explicitly (not recommended) by setting `ES_MIN_MEM` (defaults to
+`256m`), and `ES_MAX_MEM` (defaults to `1gb`).
+
+It is recommended to set the min and max memory to the same value, and
+enable <<setup-configuration-memory,`mlockall`>>.
+
+[float]
+[[system]]
+=== System Configuration
+
+[float]
+[[file-descriptors]]
+==== File Descriptors
+
+Make sure to increase the number of open files descriptors on the
+machine (or for the user running elasticsearch). Setting it to 32k or
+even 64k is recommended.
+
+In order to test how many open files the process can open, start it with
+`-Des.max-open-files` set to `true`. This will print the number of open
+files the process can open on startup.
+
+Alternatively, you can retrieve the `max_file_descriptors` for each node
+using the <<cluster-nodes-info>> API, with:
+
+[source,js]
+--------------------------------------------------
+curl localhost:9200/_nodes/process?pretty
+--------------------------------------------------
+
+
+[float]
+[[setup-configuration-memory]]
+==== Memory Settings
+
+There is an option to use
+http://opengroup.org/onlinepubs/007908799/xsh/mlockall.html[mlockall] to
+try to lock the process address space so it won't be swapped. For this
+to work, the `bootstrap.mlockall` should be set to `true` and it is
+recommended to set both the min and max memory allocation to be the
+same. Note: This option is only available on Linux/Unix operating
+systems.
+
+In order to see if this works or not, set the `common.jna` logging to
+DEBUG level. A solution to "Unknown mlockall error 0" can be to set
+`ulimit -l unlimited`.
+
+Note, `mlockall` might cause the JVM or shell
+session to exit if it fails to allocate the memory (because not enough
+memory is available on the machine).
+
+[float]
+[[settings]]
+=== Elasticsearch Settings
+
+*elasticsearch* configuration files can be found under `ES_HOME/config`
+folder. The folder comes with two files, the `elasticsearch.yml` for
+configuring Elasticsearch different
+<<modules,modules>>, and `logging.yml` for
+configuring the Elasticsearch logging.
+
+The configuration format is http://www.yaml.org/[YAML]. Here is an
+example of changing the address all network based modules will use to
+bind and publish to:
+
+[source,yaml]
+--------------------------------------------------
+network :
+ host : 10.0.0.4
+--------------------------------------------------
+
+
+[float]
+[[paths]]
+==== Paths
+
+In production use, you will almost certainly want to change paths for
+data and log files:
+
+[source,yaml]
+--------------------------------------------------
+path:
+ logs: /var/log/elasticsearch
+ data: /var/data/elasticsearch
+--------------------------------------------------
+
+[float]
+[[cluster-name]]
+==== Cluster name
+
+Also, don't forget to give your production cluster a name, which is used
+to discover and auto-join other nodes:
+
+[source,yaml]
+--------------------------------------------------
+cluster:
+ name: <NAME OF YOUR CLUSTER>
+--------------------------------------------------
+
+[float]
+[[node-name]]
+==== Node name
+
+You may also want to change the default node name for each node to
+something like the display hostname. By default Elasticsearch will
+randomly pick a Marvel character name from a list of around 3000 names
+when your node starts up.
+
+[source,yaml]
+--------------------------------------------------
+node:
+ name: <NAME OF YOUR NODE>
+--------------------------------------------------
+
+Internally, all settings are collapsed into "namespaced" settings. For
+example, the above gets collapsed into `node.name`. This means that
+its easy to support other configuration formats, for example,
+http://www.json.org[JSON]. If JSON is a preferred configuration format,
+simply rename the `elasticsearch.yml` file to `elasticsearch.json` and
+add:
+
+[float]
+[[styles]]
+==== Configuration styles
+
+[source,yaml]
+--------------------------------------------------
+{
+ "network" : {
+ "host" : "10.0.0.4"
+ }
+}
+--------------------------------------------------
+
+It also means that its easy to provide the settings externally either
+using the `ES_JAVA_OPTS` or as parameters to the `elasticsearch`
+command, for example:
+
+[source,sh]
+--------------------------------------------------
+$ elasticsearch -Des.network.host=10.0.0.4
+--------------------------------------------------
+
+Another option is to set `es.default.` prefix instead of `es.` prefix,
+which means the default setting will be used only if not explicitly set
+in the configuration file.
+
+Another option is to use the `${...}` notation within the configuration
+file which will resolve to an environment setting, for example:
+
+[source,js]
+--------------------------------------------------
+{
+ "network" : {
+ "host" : "${ES_NET_HOST}"
+ }
+}
+--------------------------------------------------
+
+The location of the configuration file can be set externally using a
+system property:
+
+[source,sh]
+--------------------------------------------------
+$ elasticsearch -Des.config=/path/to/config/file
+--------------------------------------------------
+
+[float]
+[[configuration-index-settings]]
+=== Index Settings
+
+Indices created within the cluster can provide their own settings. For
+example, the following creates an index with memory based storage
+instead of the default file system based one (the format can be either
+YAML or JSON):
+
+[source,sh]
+--------------------------------------------------
+$ curl -XPUT http://localhost:9200/kimchy/ -d \
+'
+index :
+ store:
+ type: memory
+'
+--------------------------------------------------
+
+Index level settings can be set on the node level as well, for example,
+within the `elasticsearch.yml` file, the following can be set:
+
+[source,yaml]
+--------------------------------------------------
+index :
+ store:
+ type: memory
+--------------------------------------------------
+
+This means that every index that gets created on the specific node
+started with the mentioned configuration will store the index in memory
+*unless the index explicitly sets it*. In other words, any index level
+settings override what is set in the node configuration. Of course, the
+above can also be set as a "collapsed" setting, for example:
+
+[source,sh]
+--------------------------------------------------
+$ elasticsearch -Des.index.store.type=memory
+--------------------------------------------------
+
+All of the index level configuration can be found within each
+<<index-modules,index module>>.
+
+[float]
+[[logging]]
+=== Logging
+
+Elasticsearch uses an internal logging abstraction and comes, out of the
+box, with http://logging.apache.org/log4j/[log4j]. It tries to simplify
+log4j configuration by using http://www.yaml.org/[YAML] to configure it,
+and the logging configuration file is `config/logging.yml` file.
diff --git a/docs/reference/setup/dir-layout.asciidoc b/docs/reference/setup/dir-layout.asciidoc
new file mode 100644
index 0000000..2971c45
--- /dev/null
+++ b/docs/reference/setup/dir-layout.asciidoc
@@ -0,0 +1,47 @@
+[[setup-dir-layout]]
+== Directory Layout
+
+The directory layout of an installation is as follows:
+
+[cols="<,<,<,<",options="header",]
+|=======================================================================
+|Type |Description |Default Location |Setting
+|*home* |Home of elasticsearch installation | | `path.home`
+
+|*bin* |Binary scripts including `elasticsearch` to start a node | `{path.home}/bin` |
+
+|*conf* |Configuration files including `elasticsearch.yml` |`{path.home}/config` |`path.conf`
+
+|*data* |The location of the data files of each index / shard allocated
+on the node. Can hold multiple locations. |`{path.home}/data`|`path.data`
+
+|*work* |Temporal files that are used by different nodes. |`{path.home}/work` |`path.work`
+
+|*logs* |Log files location |`{path.home}/logs` |`path.logs`
+
+|*plugins* |Plugin files location. Each plugin will be contained in a subdirectory. |`{path.home}/plugins` |`path.plugins`
+|=======================================================================
+
+The multiple data locations allows to stripe it. The striping is simple,
+placing whole files in one of the locations, and deciding where to place
+the file based on the value of the `index.store.distributor` setting:
+
+* `least_used` (default) always selects the directory with the most
+available space +
+ * `random` selects directories at random. The probability of selecting
+a particular directory is proportional to amount of available space in
+this directory.
+
+Note, there are no multiple copies of the same data, in that, its
+similar to RAID 0. Though simple, it should provide a good solution for
+people that don't want to mess with RAID. Here is how it is configured:
+
+---------------------------------
+path.data: /mnt/first,/mnt/second
+---------------------------------
+
+Or the in an array format:
+
+----------------------------------------
+path.data: ["/mnt/first", "/mnt/second"]
+----------------------------------------
diff --git a/docs/reference/setup/repositories.asciidoc b/docs/reference/setup/repositories.asciidoc
new file mode 100644
index 0000000..aa0ae30
--- /dev/null
+++ b/docs/reference/setup/repositories.asciidoc
@@ -0,0 +1,52 @@
+[[setup-repositories]]
+== Repositories
+
+We also have repositories available for APT and YUM based distributions.
+
+We have split the major versions in separate urls to avoid accidental upgrades across major version.
+For all 0.90.x releases use 0.90 as version number, for 1.0.x use 1.0, etc.
+
+[float]
+=== APT
+
+Download and install the Public Signing Key
+
+[source,sh]
+--------------------------------------------------
+wget -O - http://packages.elasticsearch.org/GPG-KEY-elasticsearch | apt-key add -
+--------------------------------------------------
+
+Add the following to your /etc/apt/sources.list to enable the repository
+
+[source,sh]
+--------------------------------------------------
+deb http://packages.elasticsearch.org/elasticsearch/0.90/debian stable main
+--------------------------------------------------
+
+Run apt-get update and the repository is ready for use.
+
+
+[float]
+=== YUM
+
+Download and install the Public Signing Key
+
+[source,sh]
+--------------------------------------------------
+rpm --import http://packages.elasticsearch.org/GPG-KEY-elasticsearch
+--------------------------------------------------
+
+Add the following in your /etc/yum.repos.d/ directory
+
+[source,sh]
+--------------------------------------------------
+[elasticsearch-0.90]
+name=Elasticsearch repository for 0.90.x packages
+baseurl=http://packages.elasticsearch.org/elasticsearch/0.90/centos
+gpgcheck=1
+gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
+enabled=1
+--------------------------------------------------
+
+And your repository is ready for use.
+
diff --git a/docs/reference/testing.asciidoc b/docs/reference/testing.asciidoc
new file mode 100644
index 0000000..fd1a460
--- /dev/null
+++ b/docs/reference/testing.asciidoc
@@ -0,0 +1,16 @@
+[[testing]]
+= Testing
+
+[partintro]
+--
+This section is about utilizing elasticsearch as part of your testing infrastructure.
+
+[float]
+[[testing-header]]
+== Testing:
+
+* <<testing-framework>>
+
+--
+
+include::testing/testing-framework.asciidoc[]
diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc
new file mode 100644
index 0000000..3d9fa28
--- /dev/null
+++ b/docs/reference/testing/testing-framework.asciidoc
@@ -0,0 +1,230 @@
+[[testing-framework]]
+== Java Testing Framework
+
+added[1.0.0.RC1]
+
+[[testing-intro]]
+
+Testing is a crucial part of your application, and as information retrieval itself is already a complex topic, there should not be any additional complexity in setting up a testing infrastructure, which uses elasticsearch. This is the main reason why we decided to release an additional file to the release, which allows you to use the same testing infrastructure we do in the elasticsearch core. The testing framework allows you to setup clusters with multiple nodes in order to check if your code covers everything needed to run in a cluster. The framework prevents you from writing complex code yourself to start, stop or manage several test nodes in a cluster. In addition there is another very important feature called randomized testing, which you are getting for free as it is part of the elasticsearch infrastructure.
+
+
+
+[[why-randomized-testing]]
+=== why randomized testing?
+
+The key concept of randomized testing is not to use the same input values for every testcase, but still be able to reproduce it in case of a failure. This allows to test with vastly different input variables in order to make sure, that your implementation is actually independent from your provided test data.
+
+If you are interested in the implementation being used, check out the http://labs.carrotsearch.com/randomizedtesting.html[RandomizedTesting webpage].
+
+
+[[using-elasticsearch-test-classes]]
+=== Using the elasticsearch test classes
+
+First, you need to include the testing dependency in your project. If you use maven and its `pom.xml` file, it looks like this
+
+[[source,xml]]
+--------------------------------------------------
+<dependencies>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-test-framework</artifactId>
+ <version>${lucene.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.elasticsearch</groupId>
+ <artifactId>elasticsearch</artifactId>
+ <version>${elasticsearch.version}</version>
+ <scope>test</scope>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.elasticsearch</groupId>
+ <artifactId>elasticsearch</artifactId>
+ <version>${elasticsearch.version}</version>
+ <scope>test</scope>
+ </dependency>
+</dependencies>
+--------------------------------------------------
+
+Replace the elasticsearch version and the lucene versions with the current elasticsearch version and its accompanying lucene release.
+
+There are already have a couple of classes, you can inherit from in your own test classes. The advantages of doing so is having already defined loggers, the whole randomized infrastructure is set up already.
+
+
+[[unit-tests]]
+=== unit tests
+
+In case you only need to execute a unit test, because your implementation can be isolated that good and does not require an up and running elasticsearch cluster, you can use the `ElasticsearchTestCase`. If you are testing lucene features, use `ElasticsearchLuceneTestCase` and if you are testing concrete token streams, use the `ElasticsearchTokenStreamTestCase` class. Those specific classes execute additional checks, which ensure that no resources leaks are happening, after the test has run.
+
+
+[[integration-tests]]
+=== integration tests
+
+These kind of tests require firing up a whole cluster of nodes, before the tests can actually be run. Compared to unit tests they are obviously way more time consuming, but the test infrastructure tries to minimize the time cost by only restarting the whole cluster, if this is configured explicitely.
+
+The class your tests have to inherit from is `ElasticsearchIntegrationTest`. As soon as you inherit, there is no need for you to start any elasticsearch nodes manually in your test anymore, though you might need to ensure that at least a certain amount of nodes is up running.
+
+[[helper-methods]]
+==== generic helper methods
+
+There are a couple of helper methods in `ElasticsearchIntegrationTest`, which will make your tests shorter and more concise.
+
+[horizontal]
+`refresh()`:: Refreshes all indices in a cluster
+`ensureGreen()`:: Ensures a green health cluster state, waiting for relocations. Waits the default timeout of 30 seconds before failing.
+`ensureYellow()`:: Ensures a yellow health cluster state, also waits for 30 seconds before failing.
+`createIndex(name)`:: Creates an index with the specified name
+`flush()`:: Flushes all indices in a cluster
+`flushAndRefresh()`:: Combines `flush()` and `refresh()` calls
+`optimize()`:: Waits for all relocations and optimized all indices in the cluster to one segment.
+`indexExists(name)`:: Checks if given index exists
+`admin()`:: Returns an `AdminClient` for administrative tasks
+`clusterService()`:: Returns the cluster service java class
+`cluster()`:: Returns the test cluster class, which is explained in the next paragraphs
+
+
+[[test-cluster-methods]]
+==== test cluster methods
+
+The `TestCluster` class is the heart of the cluster functionality in a randomized test and allows you to configure a specific setting or replay certain types of outages to check, how your custom code reacts.
+
+[horizontal]
+`ensureAtLeastNumNodes(n)`:: Ensure at least the specified number of nodes is running in the cluster
+`ensureAtMostNumNodes(n)`:: Ensure at most the specified number of nodes is running in the cluster
+`getInstance()`:: Get a guice instantiated instance of a class from a random node
+`getInstanceFromNode()`:: Get a guice instantiated instance of a class from a specified node
+`stopRandomNode()`:: Stop a random node in your cluster to mimic an outage
+`stopCurrentMasterNode()`:: Stop the current master node to force a new election
+`stopRandomNonMaster()`:: Stop a random non master node to mimic an outage
+`buildNode()`:: Create a new elasticsearch node
+`startNode(settings)`:: Create and start a new elasticsearch node
+
+
+[[accessing-clients]]
+==== Accessing clients
+
+In order to execute any actions, you have to use a client. You can use the `ElasticsearchIntegrationTest.client()` method to get back a random client. This client can be a `TransportClient` or a `NodeClient` - and usually you do not need to care as long as the action gets executed. There are several more methods for client selection inside of the `TestCluster` class, which can be accessed using the `ElasticsearchIntegrationTest.cluster()` method.
+
+[horizontal]
+`iterator()`:: An iterator over all available clients
+`masterClient()`:: Returns a client which is connected to the master node
+`nonMasterClient()`:: Returns a client which is not connected to the master node
+`clientNodeClient()`:: Returns a client, which is running on a client node
+`client(String nodeName)`:: Returns a client to a given node
+`smartClient()`:: Returns a smart client
+
+
+[[scoping]]
+==== Scoping
+
+By default the tests are run without restarting the cluster between tests or test classes in order to be as fast as possible. Of course all indices and templates are deleted between each test. However, sometimes you need to start a new cluster for each test or for a whole test suite - for example, if you load a certain plugin, but you do not want to load it for every test.
+
+You can use the `@ClusterScope` annotation at class level to configure this behaviour
+
+[source,java]
+-----------------------------------------
+@ClusterScope(scope=SUITE, numNodes=1)
+public class CustomSuggesterSearchTests extends ElasticsearchIntegrationTest {
+ // ... tests go here
+}
+-----------------------------------------
+
+The above sample configures an own cluster for this test suite, which is the class. Other values could be `GLOBAL` (the default) or `TEST` in order to spawn a new cluster for each test. The `numNodes` settings allows you to only start a certain number of nodes, which can speed up test execution, as starting a new node is a costly and time consuming operation and might not be needed for this test.
+
+
+[[changing-node-configuration]]
+==== Changing node configuration
+
+As elasticsearch is using JUnit 4, using the `@Before` and `@After` annotations is not a problem. However you should keep in mind, that this does not have any effect in your cluster setup, as the cluster is already up and running when those methods are run. So in case you want to configure settings - like loading a plugin on node startup - before the node is actually running, you should overwrite the `nodeSettings()` method from the `ElasticsearchIntegrationTest` class and change the cluster scope to `SUITE`.
+
+[source,java]
+-----------------------------------------
+@Override
+protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder()
+ .put("plugin.types", CustomSuggesterPlugin.class.getName())
+ .put(super.nodeSettings(nodeOrdinal)).build();
+}
+-----------------------------------------
+
+
+
+[[randomized-testing]]
+=== Randomized testing
+
+The code snippets you saw so far did not show any trace of randomized testing features, as they are carefully hidden under the hood. However when you are writing your own tests, you should make use of these features as well. Before starting with that, you should know, how to repeat a failed test with the same setup, how it failed. Luckily this is quite easy, as the whole mvn call is logged together with failed tests, which means you can simply copy and paste that line and run the test.
+
+[[generating-random-data]]
+==== Generating random data
+
+The next step is to convert your test using static test data into a test using randomized test data. The kind of data you could randomize varies a lot with the functionality you are testing against. Take a look at the following examples (note, that this list could go on for pages, as a distributed system has many, many moving parts):
+
+* Searching for data using arbitrary UTF8 signs
+* Changing your mapping configuration, index and field names with each run
+* Changing your response sizes/configurable limits with each run
+* Changing the number of shards/replicas when creating an index
+
+So, how can you create random data. The most important thing to know is, that you never should instantiate your own `Random` instance, but use the one provided in the `RandomizedTest`, from which all elasticsearch dependent test classes inherit from.
+
+[horizontal]
+`getRandom()`:: Returns the random instance, which can recreated when calling the test with specific parameters
+`randomBoolean()`:: Returns a random boolean
+`randomByte()`:: Returns a random byte
+`randomShort()`:: Returns a random short
+`randomInt()`:: Returns a random integer
+`randomLong()`:: Returns a random long
+`randomFloat()`:: Returns a random float
+`randomDouble()`:: Returns a random double
+
+`randomInt(max)`:: Returns a random integer between 0 and max
+`between()`:: Returns a random between the supplied range
+`atLeast()`:: Returns a random integer of at least the specified integer
+`atMost()`:: Returns a random integer of at most the specified integer
+
+`randomLocale()`:: Returns a random locale
+`randomTimeZone()`:: Returns a random timezone
+
+In addition, there are a couple of helper methods, allowing you to create random ASCII and Unicode strings, see methods beginning with `randomAscii`, `randomUnicode`, and `randomRealisticUnicode` in the random test class. The latter one tries to create more realistic unicode string by not being arbitrary random.
+
+If you want to debug a specific problem with a specific random seed, you can use the `@Seed` annotation to configure a specific seed for a test. If you want to run a test more than once, instead of starting the whole test suite over and over again, you can use the `@Repeat` annotation with an arbitrary value. Each iteration than gets run with a different seed.
+
+
+[[assertions]]
+=== Assertions
+
+As many elasticsearch tests are checking for a similar output, like the amount of hits or the first hit or special highlighting, a couple of predefined assertions have been created. Those have been put into the `ElasticsearchAssertions` class.
+
+[horizontal]
+`assertHitCount()`:: Checks hit count of a search or count request
+`assertAcked()`:: Ensure the a request has been ackknowledged by the master
+`assertSearchHits()`:: Asserts a search response contains specific ids
+`assertMatchCount()`:: Asserts a matching count from a percolation response
+`assertFirstHit()`:: Asserts the first hit hits the specified matcher
+`assertSecondHit()`:: Asserts the second hit hits the specified matcher
+`assertThirdHit()`:: Asserts the third hits hits the specified matcher
+`assertSearchHit()`:: Assert a certain element in a search response hits the specified matcher
+`assertNoFailures()`:: Asserts that no shard failures have occured in the response
+`assertHighlight()`:: Assert specific highlights matched
+`assertSuggestion()`:: Assert for specific suggestions
+`assertSuggestionSize()`:: Assert for specific suggestion count
+`assertThrows()`:: Assert a specific exception has been thrown
+
+Common matchers
+
+[horizontal]
+`hasId()`:: Matcher to check for a search hit id
+`hasType()`:: Matcher to check for a search hit type
+`hasIndex()`:: Matcher to check for a search hit index
+
+Usually, you would combine assertions and matchers in your test like this
+
+[source,java]
+----------------------------
+SearchResponse seearchResponse = client().prepareSearch() ...;
+assertHitCount(searchResponse, 4);
+assertFirstHit(searchResponse, hasId("4"));
+assertSearchHits(searchResponse, "1", "2", "3", "4");
+----------------------------
+
+
diff --git a/docs/river/couchdb.asciidoc b/docs/river/couchdb.asciidoc
new file mode 100644
index 0000000..5a2555e
--- /dev/null
+++ b/docs/river/couchdb.asciidoc
@@ -0,0 +1,11 @@
+[[river-couchdb]]
+== CouchDB River
+
+The CouchDB River allows to automatically index couchdb and make it
+searchable using the excellent
+http://guide.couchdb.org/draft/notifications.html[_changes] stream
+couchdb provides.
+
+See
+https://github.com/elasticsearch/elasticsearch-river-couchdb/blob/master/README.md[README
+file] for details.
diff --git a/docs/river/index.asciidoc b/docs/river/index.asciidoc
new file mode 100644
index 0000000..9d587f9
--- /dev/null
+++ b/docs/river/index.asciidoc
@@ -0,0 +1,77 @@
+[[river]]
+= Rivers
+
+== Intro
+
+A river is a pluggable service running within elasticsearch cluster
+pulling data (or being pushed with data) that is then indexed into the
+cluster.
+
+A river is composed of a unique name and a type. The type is the type of
+the river (out of the box, there is the `dummy` river that simply logs
+that it is running). The name uniquely identifies the river within the
+cluster. For example, one can run a river called `my_river` with type
+`dummy`, and another river called `my_other_river` with type `dummy`.
+
+
+[[how-it-works]]
+== How it Works
+
+A river instance (and its name) is a type within the `_river` index. All
+different rivers implementations accept a document called `_meta` that
+at the very least has the type of the river (twitter / couchdb / ...)
+associated with it. Creating a river is a simple curl request to index
+that `_meta` document (there is actually a `dummy` river used for
+testing):
+
+[source,js]
+--------------------------------------------------
+curl -XPUT 'localhost:9200/_river/my_river/_meta' -d '{
+ "type" : "dummy"
+}'
+--------------------------------------------------
+
+A river can also have more data associated with it in the form of more
+documents indexed under the given index type (the river name). For
+example, storing the last indexed state can be stored in a document that
+holds it.
+
+Deleting a river is a call to delete the type (and all documents
+associated with it):
+
+[source,js]
+--------------------------------------------------
+curl -XDELETE 'localhost:9200/_river/my_river/'
+--------------------------------------------------
+
+
+[[allocation]]
+== Cluster Allocation
+
+Rivers are singletons within the cluster. They get allocated
+automatically to one of the nodes and run. If that node fails, a river
+will be automatically allocated to another node.
+
+River allocation on nodes can be controlled on each node. The
+`node.river` can be set to `_none_` disabling any river allocation to
+it. The `node.river` can also include a comma separated list of either
+river names or types controlling the rivers allowed to run on it. For
+example: `my_river1,my_river2`, or `dummy,twitter`.
+
+
+[[status]]
+== Status
+
+Each river (regardless of the implementation) exposes a high level
+`_status` doc which includes the node the river is running on. Getting
+the status is a simple curl GET request to
+`/_river/{river name}/_status`.
+
+include::couchdb.asciidoc[]
+
+include::rabbitmq.asciidoc[]
+
+include::twitter.asciidoc[]
+
+include::wikipedia.asciidoc[]
+
diff --git a/docs/river/rabbitmq.asciidoc b/docs/river/rabbitmq.asciidoc
new file mode 100644
index 0000000..cdad9f8
--- /dev/null
+++ b/docs/river/rabbitmq.asciidoc
@@ -0,0 +1,9 @@
+[[river-rabbitmq]]
+== RabbitMQ River
+
+RabbitMQ River allows to automatically index a
+http://www.rabbitmq.com/[RabbitMQ] queue.
+
+See
+https://github.com/elasticsearch/elasticsearch-river-rabbitmq/blob/master/README.md[README
+file] for details.
diff --git a/docs/river/twitter.asciidoc b/docs/river/twitter.asciidoc
new file mode 100644
index 0000000..355c187
--- /dev/null
+++ b/docs/river/twitter.asciidoc
@@ -0,0 +1,10 @@
+[[river-twitter]]
+== Twitter River
+
+The twitter river indexes the public
+http://dev.twitter.com/pages/streaming_api[twitter stream], aka the
+hose, and makes it searchable.
+
+See
+https://github.com/elasticsearch/elasticsearch-river-twitter/blob/master/README.md[README
+file] for details.
diff --git a/docs/river/wikipedia.asciidoc b/docs/river/wikipedia.asciidoc
new file mode 100644
index 0000000..c65107a
--- /dev/null
+++ b/docs/river/wikipedia.asciidoc
@@ -0,0 +1,8 @@
+[[river-wikipedia]]
+== Wikipedia River
+
+A simple river to index http://en.wikipedia.org[Wikipedia].
+
+See
+https://github.com/elasticsearch/elasticsearch-river-wikipedia/blob/master/README.md[README
+file] for details.
diff --git a/docs/ruby/index.asciidoc b/docs/ruby/index.asciidoc
new file mode 100644
index 0000000..553b931
--- /dev/null
+++ b/docs/ruby/index.asciidoc
@@ -0,0 +1,86 @@
+= elasticsearch-ruby
+
+== Overview
+
+There's a suite of official Ruby libraries for Elasticsearch, which provide a client for connecting
+to Elasticsearch clusters, Ruby interface to the REST API, and more.
+
+See the full documentation at http://github.com/elasticsearch/elasticsearch-ruby.
+
+=== Elasticsearch Version Compatibility
+
+The Ruby libraries are compatible with both Elasticsearch 0.90.x and 1.0.x versions,
+but you have to install a matching http://rubygems.org/gems/elasticsearch/versions[gem version]:
+
+[cols="<,<",options="header",]
+|=========================================
+| Elasticsearch version | Ruby gem version
+| 0.90.x | 0.4.x
+| 1.0.x | 1.x
+|=========================================
+
+=== Installation
+
+Install the Ruby gem for Elasticsearch *1.x*:
+
+[source,sh]
+------------------------------------
+gem install elasticsearch
+------------------------------------
+
+...or add it do your Gemfile:
+
+[source,ruby]
+------------------------------------
+gem 'elasticsearch'
+------------------------------------
+
+Install the Ruby gem for Elasticsearch *0.90.x*:
+
+[source,sh]
+------------------------------------
+gem install elasticsearch -v 0.4.10
+------------------------------------
+
+...or add it do your Gemfile:
+
+[source,ruby]
+------------------------------------
+gem 'elasticsearch', '~> 0.4'
+------------------------------------
+
+=== Example Usage
+
+[source,ruby]
+------------------------------------
+require 'elasticsearch'
+
+client = Elasticsearch::Client.new log: true
+
+client.cluster.health
+
+client.index index: 'my-index', type: 'my-document', id: 1, body: { title: 'Test' }
+
+client.indices.refresh index: 'my-index'
+
+client.search index: 'my-index', body: { query: { match: { title: 'test' } } }
+------------------------------------
+
+
+=== Features at a Glance
+
+* Pluggable logging and tracing
+* Plugabble connection selection strategies (round-robin, random, custom)
+* Pluggable transport implementation, customizable and extendable
+* Pluggable serializer implementation
+* Request retries and dead connections handling
+* Node reloading (based on cluster state) on errors or on demand
+* Modular API implementation
+* 100% REST API coverage
+
+
+== Copyright and License
+
+This software is Copyright (c) 2013 by Elasticsearch BV.
+
+This is free software, licensed under The Apache License Version 2.0.
diff --git a/lib/sigar/libsigar-amd64-freebsd-6.so b/lib/sigar/libsigar-amd64-freebsd-6.so
new file mode 100644
index 0000000..3e94f0d
--- /dev/null
+++ b/lib/sigar/libsigar-amd64-freebsd-6.so
Binary files differ
diff --git a/lib/sigar/libsigar-amd64-linux.so b/lib/sigar/libsigar-amd64-linux.so
new file mode 100644
index 0000000..5a2e4c2
--- /dev/null
+++ b/lib/sigar/libsigar-amd64-linux.so
Binary files differ
diff --git a/lib/sigar/libsigar-amd64-solaris.so b/lib/sigar/libsigar-amd64-solaris.so
new file mode 100644
index 0000000..6396482
--- /dev/null
+++ b/lib/sigar/libsigar-amd64-solaris.so
Binary files differ
diff --git a/lib/sigar/libsigar-ia64-linux.so b/lib/sigar/libsigar-ia64-linux.so
new file mode 100644
index 0000000..2bd2fc8
--- /dev/null
+++ b/lib/sigar/libsigar-ia64-linux.so
Binary files differ
diff --git a/lib/sigar/libsigar-sparc-solaris.so b/lib/sigar/libsigar-sparc-solaris.so
new file mode 100644
index 0000000..aa847d2
--- /dev/null
+++ b/lib/sigar/libsigar-sparc-solaris.so
Binary files differ
diff --git a/lib/sigar/libsigar-sparc64-solaris.so b/lib/sigar/libsigar-sparc64-solaris.so
new file mode 100644
index 0000000..6c4fe80
--- /dev/null
+++ b/lib/sigar/libsigar-sparc64-solaris.so
Binary files differ
diff --git a/lib/sigar/libsigar-universal-macosx.dylib b/lib/sigar/libsigar-universal-macosx.dylib
new file mode 100644
index 0000000..27ab107
--- /dev/null
+++ b/lib/sigar/libsigar-universal-macosx.dylib
Binary files differ
diff --git a/lib/sigar/libsigar-universal64-macosx.dylib b/lib/sigar/libsigar-universal64-macosx.dylib
new file mode 100644
index 0000000..0c721fe
--- /dev/null
+++ b/lib/sigar/libsigar-universal64-macosx.dylib
Binary files differ
diff --git a/lib/sigar/libsigar-x86-freebsd-5.so b/lib/sigar/libsigar-x86-freebsd-5.so
new file mode 100644
index 0000000..8c50c61
--- /dev/null
+++ b/lib/sigar/libsigar-x86-freebsd-5.so
Binary files differ
diff --git a/lib/sigar/libsigar-x86-freebsd-6.so b/lib/sigar/libsigar-x86-freebsd-6.so
new file mode 100644
index 0000000..f080027
--- /dev/null
+++ b/lib/sigar/libsigar-x86-freebsd-6.so
Binary files differ
diff --git a/lib/sigar/libsigar-x86-linux.so b/lib/sigar/libsigar-x86-linux.so
new file mode 100644
index 0000000..a0b64ed
--- /dev/null
+++ b/lib/sigar/libsigar-x86-linux.so
Binary files differ
diff --git a/lib/sigar/libsigar-x86-solaris.so b/lib/sigar/libsigar-x86-solaris.so
new file mode 100644
index 0000000..c6452e5
--- /dev/null
+++ b/lib/sigar/libsigar-x86-solaris.so
Binary files differ
diff --git a/lib/sigar/sigar-1.6.4.jar b/lib/sigar/sigar-1.6.4.jar
new file mode 100644
index 0000000..58c733c
--- /dev/null
+++ b/lib/sigar/sigar-1.6.4.jar
Binary files differ
diff --git a/lib/sigar/sigar-amd64-winnt.dll b/lib/sigar/sigar-amd64-winnt.dll
new file mode 100644
index 0000000..1ec8a03
--- /dev/null
+++ b/lib/sigar/sigar-amd64-winnt.dll
Binary files differ
diff --git a/lib/sigar/sigar-x86-winnt.dll b/lib/sigar/sigar-x86-winnt.dll
new file mode 100644
index 0000000..6afdc01
--- /dev/null
+++ b/lib/sigar/sigar-x86-winnt.dll
Binary files differ
diff --git a/lib/sigar/sigar-x86-winnt.lib b/lib/sigar/sigar-x86-winnt.lib
new file mode 100644
index 0000000..04924a1
--- /dev/null
+++ b/lib/sigar/sigar-x86-winnt.lib
Binary files differ
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 0000000..b6be5d0
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,1197 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <name>elasticsearch</name>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.elasticsearch</groupId>
+ <artifactId>elasticsearch</artifactId>
+ <version>1.0.3</version>
+ <packaging>jar</packaging>
+ <description>Elasticsearch - Open Source, Distributed, RESTful Search Engine</description>
+ <inceptionYear>2009</inceptionYear>
+ <licenses>
+ <license>
+ <name>The Apache Software License, Version 2.0</name>
+ <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+ <distribution>repo</distribution>
+ </license>
+ </licenses>
+ <scm>
+ <connection>scm:git:git@github.com:elasticsearch/elasticsearch.git</connection>
+ <developerConnection>scm:git:git@github.com:elasticsearch/elasticsearch.git</developerConnection>
+ <url>http://github.com/elasticsearch/elasticsearch</url>
+ </scm>
+
+ <parent>
+ <groupId>org.sonatype.oss</groupId>
+ <artifactId>oss-parent</artifactId>
+ <version>7</version>
+ </parent>
+
+ <properties>
+ <lucene.version>4.6.1</lucene.version>
+ <tests.jvms>1</tests.jvms>
+ <tests.shuffle>true</tests.shuffle>
+ <tests.output>onerror</tests.output>
+ <tests.client.ratio></tests.client.ratio>
+ <es.logger.level>INFO</es.logger.level>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.hamcrest</groupId>
+ <artifactId>hamcrest-all</artifactId>
+ <version>1.3</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.carrotsearch.randomizedtesting</groupId>
+ <artifactId>randomizedtesting-runner</artifactId>
+ <version>2.0.15</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-test-framework</artifactId>
+ <version>${lucene.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-expressions</artifactId>
+ <version>${lucene.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpclient</artifactId>
+ <version>4.3.1</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-core</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-analyzers-common</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-codecs</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-queries</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>jakarta-regexp</groupId>
+ <artifactId>jakarta-regexp</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-memory</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-highlighter</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-queryparser</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>jakarta-regexp</groupId>
+ <artifactId>jakarta-regexp</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-suggest</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-join</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ </dependency>
+ <!-- Lucene spatial, make sure when upgrading to work with latest version of jts/spatial4j dependencies -->
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-spatial</artifactId>
+ <version>${lucene.version}</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.spatial4j</groupId>
+ <artifactId>spatial4j</artifactId>
+ <version>0.3</version>
+ <scope>compile</scope>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>com.vividsolutions</groupId>
+ <artifactId>jts</artifactId>
+ <version>1.12</version>
+ <scope>compile</scope>
+ <optional>true</optional>
+ <exclusions>
+ <exclusion>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <!-- Lucene spatial -->
+
+
+ <!-- START: dependencies that are shaded -->
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ <version>16.0</version>
+ <scope>compile</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>com.carrotsearch</groupId>
+ <artifactId>hppc</artifactId>
+ <version>0.5.3</version>
+ </dependency>
+
+ <dependency>
+ <groupId>joda-time</groupId>
+ <artifactId>joda-time</artifactId>
+ <!-- joda 2.0 moved to using volatile fields for datetime -->
+ <!-- When updating to a new version, make sure to update our copy of BaseDateTime -->
+ <version>2.3</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.joda</groupId>
+ <artifactId>joda-convert</artifactId>
+ <version>1.2</version>
+ <scope>compile</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.mvel</groupId>
+ <artifactId>mvel2</artifactId>
+ <version>2.1.9.Final</version>
+ <scope>compile</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-core</artifactId>
+ <version>2.3.2</version>
+ <scope>compile</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>com.fasterxml.jackson.dataformat</groupId>
+ <artifactId>jackson-dataformat-smile</artifactId>
+ <version>2.3.2</version>
+ <scope>compile</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>com.fasterxml.jackson.dataformat</groupId>
+ <artifactId>jackson-dataformat-yaml</artifactId>
+ <version>2.3.2</version>
+ <scope>compile</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty</artifactId>
+ <version>3.9.0.Final</version>
+ <scope>compile</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>com.ning</groupId>
+ <artifactId>compress-lzf</artifactId>
+ <version>0.9.6</version>
+ <scope>compile</scope>
+ </dependency>
+ <!-- END: dependencies that are shaded -->
+
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <version>1.2.17</version>
+ <scope>compile</scope>
+ <optional>true</optional>
+ </dependency>
+
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ <version>1.6.2</version>
+ <scope>compile</scope>
+ <optional>true</optional>
+ </dependency>
+
+ <dependency>
+ <groupId>net.java.dev.jna</groupId>
+ <artifactId>jna</artifactId>
+ <version>3.3.0</version>
+ <scope>compile</scope>
+ <optional>true</optional>
+ </dependency>
+
+ <dependency>
+ <groupId>org.fusesource</groupId>
+ <artifactId>sigar</artifactId>
+ <version>1.6.4</version>
+ <scope>compile</scope>
+ <optional>true</optional>
+ </dependency>
+
+ <!-- We don't use this since the publish pom is then messed up -->
+ <!--
+ <dependency>
+ <groupId>sigar</groupId>
+ <artifactId>sigar</artifactId>
+ <version>1.6.4</version>
+ <scope>system</scope>
+ <systemPath>${basedir}/lib/sigar/sigar-1.6.4.jar</systemPath>
+ <optional>true</optional>
+ </dependency>
+ -->
+ </dependencies>
+
+ <build>
+
+ <resources>
+ <resource>
+ <directory>${basedir}/src/main/java</directory>
+ <includes>
+ <include>**/*.json</include>
+ <include>**/*.yml</include>
+ </includes>
+ </resource>
+ <resource>
+ <directory>${basedir}/src/main/resources</directory>
+ <includes>
+ <include>**/*.*</include>
+ </includes>
+ <filtering>true</filtering>
+ </resource>
+ </resources>
+
+ <testResources>
+ <testResource>
+ <directory>${basedir}/src/test/java</directory>
+ <includes>
+ <include>**/*.json</include>
+ <include>**/*.yml</include>
+ <include>**/*.txt</include>
+ </includes>
+ </testResource>
+ <testResource>
+ <directory>${basedir}/src/test/resources</directory>
+ <includes>
+ <include>**/*.*</include>
+ </includes>
+ </testResource>
+ <testResource>
+ <directory>${basedir}/rest-api-spec</directory>
+ <targetPath>rest-api-spec</targetPath>
+ <includes>
+ <include>api/*.json</include>
+ <include>test/**/*.yaml</include>
+ </includes>
+ </testResource>
+ </testResources>
+
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.1</version>
+ <configuration>
+ <source>1.6</source>
+ <target>1.6</target>
+ <fork>true</fork>
+ <maxmem>512m</maxmem>
+ <!-- REMOVE WHEN UPGRADE:
+ see https://jira.codehaus.org/browse/MCOMPILER-209 it's a bug where
+ incremental compilation doesn't work unless it's set to false causeing
+ recompilation of the entire codebase each time without any changes. Should
+ be fixed in version > 3.1
+ -->
+ <useIncrementalCompilation>false</useIncrementalCompilation>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>com.carrotsearch.randomizedtesting</groupId>
+ <artifactId>junit4-maven-plugin</artifactId>
+ <version>2.0.15</version>
+ <executions>
+ <execution>
+ <id>tests</id>
+ <phase>test</phase>
+ <goals>
+ <goal>junit4</goal>
+ </goals>
+ <configuration>
+ <heartbeat>20</heartbeat>
+ <jvmOutputAction>pipe,warn</jvmOutputAction>
+ <leaveTemporary>true</leaveTemporary>
+ <listeners>
+ <report-ant-xml mavenExtensions="true"
+ dir="${project.build.directory}/surefire-reports"/>
+ <report-text
+ showThrowable="true"
+ showStackTraces="true"
+ showOutput="${tests.output}"
+ showStatusOk="false"
+ showStatusError="true"
+ showStatusFailure="true"
+ showStatusIgnored="true"
+ showSuiteSummary="true"
+ timestamps="false"/>
+ <report-execution-times file="${basedir}/.local-execution-hints.log"/>
+ </listeners>
+ <assertions>
+ <enable/>
+ <disable package="${tests.assertion.disabled}"/>
+ <!-- pass org.elasticsearch to run without assertions -->
+ </assertions>
+ <parallelism>${tests.jvms}</parallelism>
+ <balancers>
+ <execution-times>
+ <fileset dir="${basedir}" includes=".local-execution-hints.log"/>
+ </execution-times>
+ </balancers>
+ <includes>
+ <include>**/*Tests.class</include>
+ <include>**/*Test.class</include>
+ </includes>
+ <excludes>
+ <exclude>**/Abstract*.class</exclude>
+ <exclude>**/*StressTest.class</exclude>
+ </excludes>
+ <argLine>
+ ${tests.jvm.argline}
+ </argLine>
+ <jvmArgs>
+ <param>-Xmx512m</param>
+ <param>-Xss256k</param>
+ <param>-XX:MaxPermSize=128m</param>
+ <param>-XX:MaxDirectMemorySize=512m</param>
+ <param>-Des.logger.prefix=</param>
+ </jvmArgs>
+ <shuffleOnSlave>${tests.shuffle}</shuffleOnSlave>
+ <sysouts>${tests.verbose}</sysouts>
+ <seed>${tests.seed}</seed>
+ <haltOnFailure>${tests.failfast}</haltOnFailure>
+ <systemProperties>
+ <java.io.tmpdir>.</java.io.tmpdir> <!-- we use '.' since this is different per JVM-->
+ <!-- RandomizedTesting library system properties -->
+ <tests.jvm.argline>${tests.jvm.argline}</tests.jvm.argline>
+ <tests.appendseed>${tests.appendseed}</tests.appendseed>
+ <tests.iters>${tests.iters}</tests.iters>
+ <tests.maxfailures>${tests.maxfailures}</tests.maxfailures>
+ <tests.failfast>${tests.failfast}</tests.failfast>
+ <tests.class>${tests.class}</tests.class>
+ <tests.method>${tests.method}</tests.method>
+ <tests.nightly>${tests.nightly}</tests.nightly>
+ <tests.badapples>${tests.badapples}</tests.badapples>
+ <tests.weekly>${tests.weekly}</tests.weekly>
+ <tests.slow>${tests.slow}</tests.slow>
+ <tests.awaitsfix>${tests.awaitsfix}</tests.awaitsfix>
+ <tests.slow>${tests.slow}</tests.slow>
+ <tests.timeoutSuite>${tests.timeoutSuite}</tests.timeoutSuite>
+ <tests.showSuccess>${tests.showSuccess}</tests.showSuccess>
+ <tests.integration>${tests.integration}</tests.integration>
+ <tests.client.ratio>${tests.client.ratio}</tests.client.ratio>
+ <tests.enable_mock_modules>${tests.enable_mock_modules}</tests.enable_mock_modules>
+ <tests.assertion.disabled>${tests.assertion.disabled}</tests.assertion.disabled>
+ <tests.rest>${tests.rest}</tests.rest>
+ <tests.rest.suite>${tests.rest.suite}</tests.rest.suite>
+ <tests.rest.spec>${tests.rest.spec}</tests.rest.spec>
+ <tests.network>${tests.network}</tests.network>
+ <es.node.local>${env.ES_TEST_LOCAL}</es.node.local>
+ <es.node.mode>${es.node.mode}</es.node.mode>
+ <es.logger.level>${es.logger.level}</es.logger.level>
+ <tests.security.manager>${tests.security.manager}</tests.security.manager>
+ <java.awt.headless>true</java.awt.headless>
+ <!-- everything below is for security manager / test.policy -->
+ <junit4.tempDir>${project.build.directory}</junit4.tempDir>
+ <java.security.policy>${basedir}/dev-tools/tests.policy</java.security.policy>
+ </systemProperties>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <!-- we skip surefire to work with randomized testing above -->
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.16</version>
+ <configuration>
+ <skipTests>true</skipTests>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.2.1</version>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <version>2.2</version>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <shadeTestJar>true</shadeTestJar>
+ <minimizeJar>true</minimizeJar>
+ <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
+ <artifactSet>
+ <includes>
+ <include>com.google.guava:guava</include>
+ <include>com.carrotsearch:hppc</include>
+ <include>org.mvel:mvel2</include>
+ <include>com.fasterxml.jackson.core:jackson-core</include>
+ <include>com.fasterxml.jackson.dataformat:jackson-dataformat-smile</include>
+ <include>com.fasterxml.jackson.dataformat:jackson-dataformat-yaml</include>
+ <include>joda-time:joda-time</include>
+ <include>org.joda:joda-convert</include>
+ <include>io.netty:netty</include>
+ <include>com.ning:compress-lzf</include>
+ </includes>
+ </artifactSet>
+ <relocations>
+ <relocation>
+ <pattern>com.google.common</pattern>
+ <shadedPattern>org.elasticsearch.common</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.carrotsearch.hppc</pattern>
+ <shadedPattern>org.elasticsearch.common.hppc</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>jsr166y</pattern>
+ <shadedPattern>org.elasticsearch.common.util.concurrent.jsr166y</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>jsr166e</pattern>
+ <shadedPattern>org.elasticsearch.common.util.concurrent.jsr166e</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.mvel2</pattern>
+ <shadedPattern>org.elasticsearch.common.mvel2</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.fasterxml.jackson</pattern>
+ <shadedPattern>org.elasticsearch.common.jackson</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.joda</pattern>
+ <shadedPattern>org.elasticsearch.common.joda</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.jboss.netty</pattern>
+ <shadedPattern>org.elasticsearch.common.netty</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.ning.compress</pattern>
+ <shadedPattern>org.elasticsearch.common.compress</shadedPattern>
+ </relocation>
+ </relocations>
+ <filters>
+ <filter>
+ <artifact>*:*</artifact>
+ <excludes>
+ <exclude>META-INF/license/**</exclude>
+ <exclude>META-INF/*</exclude>
+ <exclude>META-INF/maven/**</exclude>
+ <exclude>LICENSE</exclude>
+ <exclude>NOTICE</exclude>
+ <exclude>/*.txt</exclude>
+ <exclude>build.properties</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ </configuration>
+ </plugin>
+ <plugin>
+ <artifactId>maven-resources-plugin</artifactId>
+ <version>2.6</version>
+ <executions>
+ <execution>
+ <id>copy-resources</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${project.build.directory}/bin</outputDirectory>
+ <resources>
+ <resource>
+ <directory>${basedir}/bin</directory>
+ <filtering>true</filtering>
+ <excludes>
+ <exclude>*.exe</exclude>
+ </excludes>
+ </resource>
+ <resource>
+ <directory>${basedir}/bin</directory>
+ <filtering>false</filtering>
+ <includes>
+ <include>*.exe</include>
+ </includes>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>2.4</version>
+ <configuration>
+ <appendAssemblyId>false</appendAssemblyId>
+ <outputDirectory>${project.build.directory}/releases/</outputDirectory>
+ <descriptors>
+ <descriptor>${basedir}/src/main/assemblies/targz-bin.xml</descriptor>
+ <descriptor>${basedir}/src/main/assemblies/zip-bin.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>buildnumber-maven-plugin</artifactId>
+ <version>1.2</version>
+ <executions>
+ <execution>
+ <phase>validate</phase>
+ <goals>
+ <goal>create</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <doCheck>false</doCheck>
+ <doUpdate>false</doUpdate>
+ </configuration>
+ </plugin>
+ <plugin>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <version>2.8</version>
+ <executions>
+ <execution>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${project.build.directory}/lib</outputDirectory>
+ <includeScope>runtime</includeScope>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <!-- some infos https://github.com/tcurdt/jdeb/blob/master/docs/maven.md
+ -->
+ <artifactId>jdeb</artifactId>
+ <groupId>org.vafer</groupId>
+ <version>1.0.1</version>
+ <configuration>
+ <deb>${project.build.directory}/releases/${project.artifactId}-${project.version}.deb</deb>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>jdeb</goal>
+ </goals>
+ <configuration>
+ <dataSet>
+ <data>
+ <src>${project.basedir}/</src>
+ <includes>*.txt, *.textile</includes>
+ <excludes>LICENSE.txt, .DS_Store</excludes>
+ <type>directory</type>
+ <mapper>
+ <type>perm</type>
+ <prefix>/usr/share/elasticsearch</prefix>
+ <user>root</user>
+ <group>root</group>
+ </mapper>
+ </data>
+ <data>
+ <!-- use the filtered one from the resources plugin -->
+ <src>${project.build.directory}/bin</src>
+ <type>directory</type>
+ <excludes>*.bat, .DS_Store, *.exe</excludes>
+ <mapper>
+ <type>perm</type>
+ <prefix>/usr/share/elasticsearch/bin</prefix>
+ <filemode>755</filemode>
+ <user>root</user>
+ <group>root</group>
+ </mapper>
+ </data>
+ <data>
+ <src>${project.build.directory}/</src>
+ <includes>${project.build.finalName}.jar</includes>
+ <type>directory</type>
+ <mapper>
+ <type>perm</type>
+ <prefix>/usr/share/elasticsearch/lib</prefix>
+ <user>root</user>
+ <group>root</group>
+ </mapper>
+ </data>
+ <data>
+ <src>${project.basedir}/lib/sigar/</src>
+ <includes>sigar-*.jar, libsigar-*-linux.*</includes>
+ <type>directory</type>
+ <mapper>
+ <type>perm</type>
+ <prefix>/usr/share/elasticsearch/lib/sigar</prefix>
+ <user>root</user>
+ <group>root</group>
+ </mapper>
+ </data>
+ <data>
+ <src>${project.build.directory}/lib</src>
+ <includes>lucene*, log4j*, jna*, spatial4j*, jts*</includes>
+ <type>directory</type>
+ <mapper>
+ <type>perm</type>
+ <prefix>/usr/share/elasticsearch/lib</prefix>
+ <user>root</user>
+ <group>root</group>
+ </mapper>
+ </data>
+ <data>
+ <src>${project.basedir}/src/deb/default/</src>
+ <type>directory</type>
+ <excludes>.DS_Store</excludes>
+ <mapper>
+ <type>perm</type>
+ <prefix>/etc/default</prefix>
+ <user>root</user>
+ <group>root</group>
+ </mapper>
+ </data>
+ <data>
+ <src>${project.basedir}/src/deb/init.d/</src>
+ <type>directory</type>
+ <excludes>.DS_Store</excludes>
+ <mapper>
+ <type>perm</type>
+ <prefix>/etc/init.d</prefix>
+ <filemode>755</filemode>
+ <user>root</user>
+ <group>root</group>
+ </mapper>
+ </data>
+ <data>
+ <src>${project.basedir}/config</src>
+ <type>directory</type>
+ <excludes>.DS_Store</excludes>
+ <mapper>
+ <type>perm</type>
+ <prefix>/etc/elasticsearch</prefix>
+ <user>root</user>
+ <group>root</group>
+ </mapper>
+ </data>
+ <data>
+ <src>${project.basedir}/src/deb/lintian</src>
+ <type>directory</type>
+ <excludes>.DS_Store</excludes>
+ <mapper>
+ <type>perm</type>
+ <prefix>/usr/share/lintian/overrides</prefix>
+ <user>root</user>
+ <group>root</group>
+ </mapper>
+ </data>
+ <data>
+ <src>${project.basedir}/src/deb/copyright</src>
+ <dst>/usr/share/doc/elasticsearch/copyright</dst>
+ <type>file</type>
+ </data>
+ </dataSet>
+
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>rpm-maven-plugin</artifactId>
+ <version>2.1-alpha-3</version>
+ <configuration>
+ <copyright>2013, Elasticsearch</copyright>
+ <distribution>Elasticsearch</distribution>
+ <group>Application/Internet</group>
+ <packager>Elasticsearch</packager>
+ <prefix>/usr</prefix>
+ <changelogFile>src/changelog</changelogFile>
+ <defineStatements>
+ <defineStatement>_unpackaged_files_terminate_build 0</defineStatement>
+ <defineStatement>_binaries_in_noarch_packages_terminate_build 0</defineStatement>
+ </defineStatements>
+ <defaultFilemode>644</defaultFilemode>
+ <defaultDirmode>755</defaultDirmode>
+ <defaultUsername>root</defaultUsername>
+ <defaultGroupname>root</defaultGroupname>
+ <mappings>
+ <mapping>
+ <directory>/etc/elasticsearch/</directory>
+ <configuration>noreplace</configuration>
+ <sources>
+ <source>
+ <location>config/</location>
+ <includes>
+ <include>*.yml</include>
+ </includes>
+ </source>
+ </sources>
+ </mapping>
+ <mapping>
+ <directory>/etc/sysconfig/</directory>
+ <configuration>noreplace</configuration>
+ <sources>
+ <source>
+ <location>src/rpm/sysconfig</location>
+ <includes>
+ <include>elasticsearch</include>
+ </includes>
+ </source>
+ </sources>
+ </mapping>
+ <mapping>
+ <directory>/etc/rc.d/init.d/</directory>
+ <filemode>755</filemode>
+ <configuration>true</configuration>
+ <sources>
+ <source>
+ <location>src/rpm/init.d/elasticsearch</location>
+ </source>
+ </sources>
+ </mapping>
+ <mapping>
+ <directory>/usr/lib/systemd/system/</directory>
+ <filemode>755</filemode>
+ <configuration>true</configuration>
+ <sources>
+ <source>
+ <location>src/rpm/systemd</location>
+ <includes>
+ <include>elasticsearch.service</include>
+ </includes>
+ </source>
+ </sources>
+ </mapping>
+ <mapping>
+ <directory>/usr/lib/sysctl.d/</directory>
+ <filemode>755</filemode>
+ <configuration>true</configuration>
+ <sources>
+ <source>
+ <location>src/rpm/systemd/sysctl.d</location>
+ <includes>
+ <include>elasticsearch.conf</include>
+ </includes>
+ </source>
+ </sources>
+ </mapping>
+ <mapping>
+ <directory>/usr/lib/tmpfiles.d/</directory>
+ <configuration>true</configuration>
+ <sources>
+ <source>
+ <location>src/rpm/systemd/</location>
+ <includes>
+ <include>elasticsearch.conf</include>
+ </includes>
+ </source>
+ </sources>
+ </mapping>
+ <mapping>
+ <directory>/var/run/elasticsearch/</directory>
+ <filemode>755</filemode>
+ <username>elasticsearch</username>
+ <groupname>elasticsearch</groupname>
+ </mapping>
+ <mapping>
+ <directory>/var/lib/elasticsearch/</directory>
+ <filemode>755</filemode>
+ <username>elasticsearch</username>
+ <groupname>elasticsearch</groupname>
+ </mapping>
+ <mapping>
+ <directory>/var/log/elasticsearch/</directory>
+ <filemode>755</filemode>
+ <username>elasticsearch</username>
+ <groupname>elasticsearch</groupname>
+ </mapping>
+ <mapping>
+ <directory>/usr/share/elasticsearch/bin/</directory>
+ <filemode>755</filemode>
+ <sources>
+ <source>
+ <location>target/bin</location>
+ <includes>
+ <include>elasticsearch</include>
+ <include>elasticsearch.in.sh</include>
+ <include>plugin</include>
+ </includes>
+ </source>
+ </sources>
+ </mapping>
+ <mapping>
+ <directory>/usr/share/elasticsearch/lib</directory>
+ <sources>
+ <source>
+ <location>target/lib/</location>
+ <includes>
+ <include>lucene*</include>
+ <include>log4j*</include>
+ <include>jna*</include>
+ <include>spatial4j*</include>
+ <include>jts*</include>
+ </includes>
+ </source>
+ <source>
+ <location>${project.build.directory}/</location>
+ <includes>
+ <include>${project.build.finalName}.jar</include>
+ </includes>
+ </source>
+ </sources>
+ </mapping>
+ <mapping>
+ <directory>/usr/share/elasticsearch/lib/sigar</directory>
+ <sources>
+ <source>
+ <location>lib/sigar</location>
+ <includes>
+ <include>sigar*.jar</include>
+ <include>libsigar-*-linux.*</include>
+ </includes>
+ </source>
+ </sources>
+ </mapping>
+ <mapping>
+ <directory>/usr/share/elasticsearch/</directory>
+ <sources>
+ <source>
+ <location>.</location>
+ <includes>
+ <include>LICENSE.txt</include>
+ <include>NOTICE.txt</include>
+ <include>README.textile</include>
+ </includes>
+ </source>
+ </sources>
+ </mapping>
+ </mappings>
+ <preinstallScriptlet>
+ <scriptFile>src/rpm/scripts/preinstall</scriptFile>
+ <fileEncoding>utf-8</fileEncoding>
+ </preinstallScriptlet>
+ <postinstallScriptlet>
+ <scriptFile>src/rpm/scripts/postinstall</scriptFile>
+ <fileEncoding>utf-8</fileEncoding>
+ </postinstallScriptlet>
+ <preremoveScriptlet>
+ <scriptFile>src/rpm/scripts/preremove</scriptFile>
+ <fileEncoding>utf-8</fileEncoding>
+ </preremoveScriptlet>
+ <postremoveScriptlet>
+ <scriptFile>src/rpm/scripts/postremove</scriptFile>
+ <fileEncoding>utf-8</fileEncoding>
+ </postremoveScriptlet>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>de.thetaphi</groupId>
+ <artifactId>forbiddenapis</artifactId>
+ <version>1.4</version>
+
+ <executions>
+ <execution>
+ <id>check-forbidden-apis</id>
+ <configuration>
+ <targetVersion>1.6</targetVersion>
+ <!-- disallow undocumented classes like sun.misc.Unsafe: -->
+ <internalRuntimeForbidden>true</internalRuntimeForbidden>
+ <!-- if the used Java version is too new, don't fail, just do nothing: -->
+ <failOnUnsupportedJava>false</failOnUnsupportedJava>
+ <excludes>
+ <exclude>jsr166e/**</exclude>
+ <exclude>jsr166y/**</exclude>
+ <!-- start excludes for valid system-out -->
+ <exclude>org/elasticsearch/common/logging/log4j/ConsoleAppender*</exclude>
+ <exclude>org/elasticsearch/plugins/PluginManager.class</exclude>
+ <exclude>org/elasticsearch/bootstrap/Bootstrap.class</exclude>
+ <exclude>org/elasticsearch/Version.class</exclude>
+ <exclude>org/apache/lucene/search/XReferenceManager.class</exclude>
+ <exclude>org/apache/lucene/search/XSearcherManager.class</exclude>
+ <exclude>org/apache/lucene/queries/XTermsFilter.class</exclude>
+ <exclude>org/elasticsearch/index/merge/Merges.class</exclude>
+ <exclude>org/elasticsearch/common/lucene/search/Queries$QueryWrapperFilterFactory.class</exclude>
+ <!-- end excludes for valid system-out -->
+ <!-- start excludes for Unsafe -->
+ <exclude>org/elasticsearch/common/util/UnsafeUtils.class</exclude>
+ <!-- end excludes for Unsafe -->
+ </excludes>
+ <bundledSignatures>
+ <!-- This will automatically choose the right signatures based on 'maven.compiler.target': -->
+ <bundledSignature>jdk-unsafe</bundledSignature>
+ <bundledSignature>jdk-deprecated</bundledSignature>
+ <bundledSignaure>jdk-system-out</bundledSignaure>
+ </bundledSignatures>
+ <signaturesFiles>
+ <signaturesFile>core-signatures.txt</signaturesFile>
+ </signaturesFiles>
+ </configuration>
+ <phase>compile</phase>
+ <goals>
+ <goal>check</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>check-forbidden-test-apis</id>
+ <configuration>
+ <targetVersion>1.6</targetVersion>
+ <!-- disallow undocumented classes like sun.misc.Unsafe: -->
+ <internalRuntimeForbidden>true</internalRuntimeForbidden>
+ <!-- if the used Java version is too new, don't fail, just do nothing: -->
+ <failOnUnsupportedJava>false</failOnUnsupportedJava>
+ <bundledSignatures>
+ <!-- This will automatically choose the right signatures based on 'maven.compiler.target': -->
+ <bundledSignature>jdk-unsafe</bundledSignature>
+ <bundledSignature>jdk-deprecated</bundledSignature>
+ </bundledSignatures>
+ </configuration>
+ <phase>test-compile</phase>
+ <goals>
+ <goal>testCheck</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>exec-maven-plugin</artifactId>
+ <groupId>org.codehaus.mojo</groupId>
+ <version>1.2.1</version>
+ <executions>
+ <execution>
+ <id>Java Version</id>
+ <phase>validate</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <executable>java</executable>
+ <arguments>
+ <argument>-version</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ </executions>
+
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.4</version>
+ <executions>
+ <execution>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>test-jar</goal>
+ </goals>
+ <configuration>
+ <includes>
+ <include>org/elasticsearch/test/**/*</include>
+ <include>org/elasticsearch/cache/recycler/MockPageCacheRecycler.class</include>
+ <include>org/apache/lucene/util/AbstractRandomizedTest.class</include>
+ <include>org/apache/lucene/util/AbstractRandomizedTest$*.class</include>
+ <!-- inner classes -->
+ <include>com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.class</include>
+ </includes>
+ <excludes>
+ <!-- we only distribute the REST tests runner, not the executable test -->
+ <exclude>org/elasticsearch/test/rest/ElasticsearchRestTests.class</exclude>
+ <!-- unit tests for yaml suite parser & rest spec parser need to be excluded -->
+ <exclude>org/elasticsearch/test/rest/test/**/*</exclude>
+ </excludes>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>com.mycila</groupId>
+ <artifactId>license-maven-plugin</artifactId>
+ <version>2.5</version>
+ <configuration>
+ <header>dev-tools/elasticsearch_license_header.txt</header>
+ <headerDefinitions>
+ <headerDefinition>dev-tools/license_header_definition.xml</headerDefinition>
+ </headerDefinitions>
+ <includes>
+ <include>src/main/java/org/elasticsearch/**/*.java</include>
+ <include>src/test/java/org/elasticsearch/**/*.java</include>
+ </includes>
+ <excludes>
+ <exclude>src/main/java/org/elasticsearch/common/inject/**</exclude>
+ <!-- Guice -->
+ <exclude>src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java</exclude>
+ <exclude>src/main/java/org/elasticsearch/common/lucene/search/XBooleanFilter.java</exclude>
+ <exclude>src/main/java/org/elasticsearch/common/lucene/search/XFilteredQuery.java</exclude>
+ <exclude>src/main/java/org/apache/lucene/queryparser/XSimpleQueryParser.java</exclude>
+ <exclude>src/main/java/org/apache/lucene/**/X*.java</exclude>
+ </excludes>
+ </configuration>
+ <!-- We can't run by default since the package is broken with java 1.6
+ see https://github.com/mycila/license-maven-plugin/issues/35
+ <executions>
+ <execution>
+ <goals>
+ <goal>check</goal>
+ </goals>
+ </execution>
+ </executions>
+ -->
+ </plugin>
+ </plugins>
+ <pluginManagement>
+ <plugins>
+ <!-- make m2e stfu -->
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <!-- copy-dependency plugin -->
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <versionRange>[1.0.0,)</versionRange>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <execute/>
+ </action>
+ </pluginExecution>
+ <!-- forbidden-apis plugin -->
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>de.thetaphi</groupId>
+ <artifactId>forbiddenapis</artifactId>
+ <versionRange>[1.0.0,)</versionRange>
+ <goals>
+ <goal>testCheck</goal>
+ <goal>check</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <execute/>
+ </action>
+ </pluginExecution>
+ <!-- exec-maven plugin -->
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <artifactId>exec-maven-plugin</artifactId>
+ <groupId>org.codehaus.mojo</groupId>
+ <versionRange>[1.0.0,)</versionRange>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <execute/>
+ </action>
+ </pluginExecution>
+ <!-- copy-dependency plugin -->
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-enforcer-plugin</artifactId>
+ <versionRange>[1.0.0,)</versionRange>
+ <goals>
+ <goal>enforce</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <execute/>
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-eclipse-plugin</artifactId>
+ <version>2.9</version>
+ <configuration>
+ <buildOutputDirectory>eclipse-build</buildOutputDirectory>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+</project>
diff --git a/rest-api-spec/.gitignore b/rest-api-spec/.gitignore
new file mode 100644
index 0000000..ab11000
--- /dev/null
+++ b/rest-api-spec/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+Gemfile.lock
+*.swp
+tmp/
diff --git a/rest-api-spec/LICENSE.txt b/rest-api-spec/LICENSE.txt
new file mode 100644
index 0000000..7b9fbe3
--- /dev/null
+++ b/rest-api-spec/LICENSE.txt
@@ -0,0 +1,13 @@
+Copyright (c) 2013 Elasticsearch
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rest-api-spec/README.markdown b/rest-api-spec/README.markdown
new file mode 100644
index 0000000..012b3e9
--- /dev/null
+++ b/rest-api-spec/README.markdown
@@ -0,0 +1,65 @@
+# Elasticsearch REST API JSON specification
+
+This repository contains a collection of JSON files which describe the [Elasticsearch](http://elasticsearch.org) HTTP API.
+
+Their purpose is to formalize and standardize the API, to facilitate development of libraries and integrations.
+
+Example for the ["Create Index"](http://www.elasticsearch.org/guide/reference/api/admin-indices-create-index/) API:
+
+```json
+{
+ "indices.create": {
+ "documentation": "http://www.elasticsearch.org/guide/reference/api/admin-indices-create-index/",
+ "methods": ["PUT", "POST"],
+ "url": {
+ "path": "/{index}",
+ "paths": ["/{index}"],
+ "parts": {
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ }
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ }
+ }
+ },
+ "body": {
+ "description" : "The configuration for the index (`settings` and `mappings`)"
+ }
+ }
+}
+```
+
+The specification contains:
+
+* The _name_ of the API (`indices.create`), which usually corresponds to the client calls
+* Link to the documentation at <http://elasticsearch.org>
+* List of HTTP methods for the endpoint
+* URL specification: path, parts, parameters
+* Whether body is allowed for the endpoint or not and its description
+
+The `methods` and `url.paths` elements list all possible HTTP methods and URLs for the endpoint;
+it is the responsibility of the developer to use this information for a sensible API on the target platform.
+
+# Utilities
+
+The repository contains some utilities in the `utils` directory:
+
+* The `thor api:generate:spec` will generate the basic JSON specification from Java source code
+* The `thor api:generate:code` generates Ruby source code and tests from the specs, and can be extended
+ to generate assets in another programming language
+
+Run `bundle install` and then `thor list` in the _utils_ folder.
+
+The full command to generate the api spec is:
+
+ thor api:spec:generate --output=myfolder --elasticsearch=/path/to/es
+
+## License
+
+This software is licensed under the Apache 2 license.
diff --git a/rest-api-spec/api/bulk.json b/rest-api-spec/api/bulk.json
new file mode 100644
index 0000000..aefbee7
--- /dev/null
+++ b/rest-api-spec/api/bulk.json
@@ -0,0 +1,58 @@
+{
+ "bulk": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-bulk.html",
+ "methods": ["POST", "PUT"],
+ "url": {
+ "path": "/_bulk",
+ "paths": ["/_bulk", "/{index}/_bulk", "/{index}/{type}/_bulk"],
+ "parts": {
+ "index": {
+ "type" : "string",
+ "description" : "Default index for items which don't provide one"
+ },
+ "type": {
+ "type" : "string",
+ "description" : "Default document type for items which don't provide one"
+ }
+ },
+ "params": {
+ "consistency": {
+ "type" : "enum",
+ "options" : ["one", "quorum", "all"],
+ "description" : "Explicit write consistency setting for the operation"
+ },
+ "refresh": {
+ "type" : "boolean",
+ "description" : "Refresh the index after performing the operation"
+ },
+ "replication": {
+ "type" : "enum",
+ "options" : ["sync","async"],
+ "default" : "sync",
+ "description" : "Explicitely set the replication type"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "type": {
+ "type" : "string",
+ "description" : "Default document type for items which don't provide one"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ }
+ }
+ },
+ "body": {
+ "description" : "The operation definition and data (action-data pairs), separated by newlines",
+ "required" : true,
+ "serialize" : "bulk"
+ }
+ }
+}
diff --git a/rest-api-spec/api/cat.aliases.json b/rest-api-spec/api/cat.aliases.json
new file mode 100644
index 0000000..4df8e1e
--- /dev/null
+++ b/rest-api-spec/api/cat.aliases.json
@@ -0,0 +1,41 @@
+{
+ "cat.aliases": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-aliases.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/aliases",
+ "paths": ["/_cat/aliases", "/_cat/aliases/{name}"],
+ "parts": {
+ "name": {
+ "type" : "list",
+ "description" : "A comma-separated list of alias names to return"
+ }
+ },
+ "params": {
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.allocation.json b/rest-api-spec/api/cat.allocation.json
new file mode 100644
index 0000000..c2783e1
--- /dev/null
+++ b/rest-api-spec/api/cat.allocation.json
@@ -0,0 +1,46 @@
+{
+ "cat.allocation": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-allocation.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/allocation",
+ "paths": ["/_cat/allocation", "/_cat/allocation/{node_id}"],
+ "parts": {
+ "node_id": {
+ "type": "list",
+ "description": "A comma-separated list of node IDs or names to limit the returned information"
+ }
+ },
+ "params": {
+ "bytes": {
+ "type": "enum",
+ "description" : "The unit in which to display byte values",
+ "options": [ "b", "k", "m", "g" ]
+ },
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.count.json b/rest-api-spec/api/cat.count.json
new file mode 100644
index 0000000..6b5b7d7
--- /dev/null
+++ b/rest-api-spec/api/cat.count.json
@@ -0,0 +1,41 @@
+{
+ "cat.count": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-count.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/count",
+ "paths": ["/_cat/count", "/_cat/count/{index}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description": "A comma-separated list of index names to limit the returned information"
+ }
+ },
+ "params": {
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.health.json b/rest-api-spec/api/cat.health.json
new file mode 100644
index 0000000..f8d73ae
--- /dev/null
+++ b/rest-api-spec/api/cat.health.json
@@ -0,0 +1,42 @@
+{
+ "cat.health": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-health.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/health",
+ "paths": ["/_cat/health"],
+ "parts": {
+ },
+ "params": {
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "ts": {
+ "type": "boolean",
+ "description": "Set to false to disable timestamping",
+ "default": true
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.help.json b/rest-api-spec/api/cat.help.json
new file mode 100644
index 0000000..d7f2913
--- /dev/null
+++ b/rest-api-spec/api/cat.help.json
@@ -0,0 +1,20 @@
+{
+ "cat.help": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat",
+ "paths": ["/_cat"],
+ "parts": {
+ },
+ "params": {
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.indices.json b/rest-api-spec/api/cat.indices.json
new file mode 100644
index 0000000..55fb1c6
--- /dev/null
+++ b/rest-api-spec/api/cat.indices.json
@@ -0,0 +1,51 @@
+{
+ "cat.indices": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-indices.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/indices",
+ "paths": ["/_cat/indices", "/_cat/indices/{index}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description": "A comma-separated list of index names to limit the returned information"
+ }
+ },
+ "params": {
+ "bytes": {
+ "type": "enum",
+ "description" : "The unit in which to display byte values",
+ "options": [ "b", "k", "m", "g" ]
+ },
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "pri" : {
+ "type": "boolean",
+ "description": "Set to true to return stats only for primary shards",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.master.json b/rest-api-spec/api/cat.master.json
new file mode 100644
index 0000000..83b8821
--- /dev/null
+++ b/rest-api-spec/api/cat.master.json
@@ -0,0 +1,37 @@
+{
+ "cat.master": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-master.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/master",
+ "paths": ["/_cat/master"],
+ "parts": {
+ },
+ "params": {
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.nodes.json b/rest-api-spec/api/cat.nodes.json
new file mode 100644
index 0000000..a458165
--- /dev/null
+++ b/rest-api-spec/api/cat.nodes.json
@@ -0,0 +1,37 @@
+{
+ "cat.nodes": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-nodes.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/nodes",
+ "paths": ["/_cat/nodes"],
+ "parts": {
+ },
+ "params": {
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.pending_tasks.json b/rest-api-spec/api/cat.pending_tasks.json
new file mode 100644
index 0000000..7e2c6b4
--- /dev/null
+++ b/rest-api-spec/api/cat.pending_tasks.json
@@ -0,0 +1,37 @@
+{
+ "cat.pending_tasks": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-pending-tasks.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/pending_tasks",
+ "paths": ["/_cat/pending_tasks"],
+ "parts": {
+ },
+ "params": {
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.recovery.json b/rest-api-spec/api/cat.recovery.json
new file mode 100644
index 0000000..70f69a5
--- /dev/null
+++ b/rest-api-spec/api/cat.recovery.json
@@ -0,0 +1,46 @@
+{
+ "cat.recovery": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-recovery.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/recovery",
+ "paths": ["/_cat/recovery", "/_cat/recovery/{index}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description": "A comma-separated list of index names to limit the returned information"
+ }
+ },
+ "params": {
+ "bytes": {
+ "type": "enum",
+ "description" : "The unit in which to display byte values",
+ "options": [ "b", "k", "m", "g" ]
+ },
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.shards.json b/rest-api-spec/api/cat.shards.json
new file mode 100644
index 0000000..df3825f
--- /dev/null
+++ b/rest-api-spec/api/cat.shards.json
@@ -0,0 +1,41 @@
+{
+ "cat.shards": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cat-shards.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/shards",
+ "paths": ["/_cat/shards", "/_cat/shards/{index}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description": "A comma-separated list of index names to limit the returned information"
+ }
+ },
+ "params": {
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cat.thread_pool.json b/rest-api-spec/api/cat.thread_pool.json
new file mode 100644
index 0000000..a0bd786
--- /dev/null
+++ b/rest-api-spec/api/cat.thread_pool.json
@@ -0,0 +1,42 @@
+{
+ "cat.thread_pool": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-thread-pool.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cat/thread_pool",
+ "paths": ["/_cat/thread_pool"],
+ "parts": {
+ },
+ "params": {
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "h": {
+ "type": "list",
+ "description" : "Comma-separated list of column names to display"
+ },
+ "help": {
+ "type": "boolean",
+ "description": "Return help information",
+ "default": false
+ },
+ "v": {
+ "type": "boolean",
+ "description": "Verbose mode. Display column headers",
+ "default": false
+ },
+ "full_id": {
+ "type": "boolean",
+ "description": "Enables displaying the complete node ids",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/clear_scroll.json b/rest-api-spec/api/clear_scroll.json
new file mode 100644
index 0000000..403d231
--- /dev/null
+++ b/rest-api-spec/api/clear_scroll.json
@@ -0,0 +1,21 @@
+{
+ "clear_scroll": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-request-scroll.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/_search/scroll/{scroll_id}",
+ "paths": ["/_search/scroll/{scroll_id}"],
+ "parts": {
+ "scroll_id": {
+ "type" : "list",
+ "required" : "true",
+ "description" : "A comma-separated list of scroll IDs to clear"
+ }
+ },
+ "params": {}
+ },
+ "body": {
+ "description": "A comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter"
+ }
+ }
+}
diff --git a/rest-api-spec/api/cluster.get_settings.json b/rest-api-spec/api/cluster.get_settings.json
new file mode 100644
index 0000000..98e2b3b
--- /dev/null
+++ b/rest-api-spec/api/cluster.get_settings.json
@@ -0,0 +1,26 @@
+{
+ "cluster.get_settings": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-update-settings.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cluster/settings",
+ "paths": ["/_cluster/settings"],
+ "parts": {},
+ "params": {
+ "flat_settings": {
+ "type": "boolean",
+ "description": "Return settings in flat format (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cluster.health.json b/rest-api-spec/api/cluster.health.json
new file mode 100644
index 0000000..9d679d6
--- /dev/null
+++ b/rest-api-spec/api/cluster.health.json
@@ -0,0 +1,55 @@
+{
+ "cluster.health": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-health.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cluster/health",
+ "paths": ["/_cluster/health", "/_cluster/health/{index}"],
+ "parts": {
+ "index": {
+ "type" : "string",
+ "description" : "Limit the information returned to a specific index"
+ }
+ },
+ "params": {
+ "level": {
+ "type" : "enum",
+ "options" : ["cluster","indices","shards"],
+ "default" : "cluster",
+ "description" : "Specify the level of detail for returned information"
+ },
+ "local": {
+ "type" : "boolean",
+ "description" : "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "wait_for_active_shards": {
+ "type" : "number",
+ "description" : "Wait until the specified number of shards is active"
+ },
+ "wait_for_nodes": {
+ "type" : "string",
+ "description" : "Wait until the specified number of nodes is available"
+ },
+ "wait_for_relocating_shards": {
+ "type" : "number",
+ "description" : "Wait until the specified number of relocating shards is finished"
+ },
+ "wait_for_status": {
+ "type" : "enum",
+ "options" : ["green","yellow","red"],
+ "default" : null,
+ "description" : "Wait until cluster is in a specific state"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cluster.pending_tasks.json b/rest-api-spec/api/cluster.pending_tasks.json
new file mode 100644
index 0000000..09a5fe7
--- /dev/null
+++ b/rest-api-spec/api/cluster.pending_tasks.json
@@ -0,0 +1,23 @@
+{
+ "cluster.pending_tasks": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-pending.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cluster/pending_tasks",
+ "paths": ["/_cluster/pending_tasks"],
+ "parts": {
+ },
+ "params": {
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type": "time",
+ "description": "Specify timeout for connection to master"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cluster.put_settings.json b/rest-api-spec/api/cluster.put_settings.json
new file mode 100644
index 0000000..a832c4d
--- /dev/null
+++ b/rest-api-spec/api/cluster.put_settings.json
@@ -0,0 +1,20 @@
+{
+ "cluster.put_settings": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-update-settings.html",
+ "methods": ["PUT"],
+ "url": {
+ "path": "/_cluster/settings",
+ "paths": ["/_cluster/settings"],
+ "parts": {},
+ "params": {
+ "flat_settings": {
+ "type": "boolean",
+ "description": "Return settings in flat format (default: false)"
+ }
+ }
+ },
+ "body": {
+ "description": "The settings to be updated. Can be either `transient` or `persistent` (survives cluster restart)."
+ }
+ }
+}
diff --git a/rest-api-spec/api/cluster.reroute.json b/rest-api-spec/api/cluster.reroute.json
new file mode 100644
index 0000000..9adf747
--- /dev/null
+++ b/rest-api-spec/api/cluster.reroute.json
@@ -0,0 +1,33 @@
+{
+ "cluster.reroute": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-reroute.html",
+ "methods": ["POST"],
+ "url": {
+ "path": "/_cluster/reroute",
+ "paths": ["/_cluster/reroute"],
+ "parts": {
+ },
+ "params": {
+ "dry_run": {
+ "type" : "boolean",
+ "description" : "Simulate the operation only and return the resulting state"
+ },
+ "filter_metadata": {
+ "type" : "boolean",
+ "description" : "Don't return cluster state metadata (default: false)"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ }
+ }
+ },
+ "body": {
+ "description" : "The definition of `commands` to perform (`move`, `cancel`, `allocate`)"
+ }
+ }
+}
diff --git a/rest-api-spec/api/cluster.state.json b/rest-api-spec/api/cluster.state.json
new file mode 100644
index 0000000..dcf0182
--- /dev/null
+++ b/rest-api-spec/api/cluster.state.json
@@ -0,0 +1,44 @@
+{
+ "cluster.state": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-state.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cluster/state",
+ "paths": [
+ "/_cluster/state",
+ "/_cluster/state/{metric}",
+ "/_cluster/state/{metric}/{index}"
+ ],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
+ },
+ "metric" : {
+ "type" : "list",
+ "options" : ["_all", "blocks", "metadata", "nodes", "routing_table", "master_node", "version"],
+ "description" : "Limit the information returned to the specified metrics"
+ }
+ },
+ "params": {
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ },
+ "master_timeout": {
+ "type": "time",
+ "description": "Specify timeout for connection to master"
+ },
+ "index_templates": {
+ "type": "list",
+ "description": "A comma separated list to return specific index templates when returning metadata"
+ },
+ "flat_settings": {
+ "type": "boolean",
+ "description": "Return settings in flat format (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/cluster.stats.json b/rest-api-spec/api/cluster.stats.json
new file mode 100644
index 0000000..8964b58
--- /dev/null
+++ b/rest-api-spec/api/cluster.stats.json
@@ -0,0 +1,28 @@
+{
+ "cluster.stats": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-stats.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_cluster/stats",
+ "paths": ["/_cluster/stats", "/_cluster/stats/nodes/{node_id}"],
+ "parts": {
+ "node_id": {
+ "type" : "list",
+ "description" : "A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes"
+ }
+ },
+ "params": {
+ "flat_settings": {
+ "type": "boolean",
+ "description": "Return settings in flat format (default: false)"
+ },
+ "human": {
+ "type": "boolean",
+ "description": "Whether to return time and byte values in human-readable format.",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/count.json b/rest-api-spec/api/count.json
new file mode 100644
index 0000000..90e5e9d
--- /dev/null
+++ b/rest-api-spec/api/count.json
@@ -0,0 +1,55 @@
+{
+ "count": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-count.html",
+ "methods": ["POST", "GET"],
+ "url": {
+ "path": "/_count",
+ "paths": ["/_count", "/{index}/_count", "/{index}/{type}/_count"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of indices to restrict the results"
+ },
+ "type": {
+ "type" : "list",
+ "description" : "A comma-separated list of types to restrict the results"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "min_score": {
+ "type" : "number",
+ "description" : "Include only documents with a specific `_score` value in the result"
+ },
+ "preference": {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "source": {
+ "type" : "string",
+ "description" : "The URL-encoded query definition (instead of using the request body)"
+ }
+ }
+ },
+ "body": {
+ "description" : "A query to restrict the results specified with the Query DSL (optional)"
+ }
+ }
+}
diff --git a/rest-api-spec/api/count_percolate.json b/rest-api-spec/api/count_percolate.json
new file mode 100644
index 0000000..3ab4cd5
--- /dev/null
+++ b/rest-api-spec/api/count_percolate.json
@@ -0,0 +1,72 @@
+{
+ "count_percolate": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-percolate.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/{index}/{type}/_percolate/count",
+ "paths": ["/{index}/{type}/_percolate/count", "/{index}/{type}/{id}/_percolate/count"],
+ "parts": {
+ "index": {
+ "type": "string",
+ "required": true,
+ "description": "The index of the document being count percolated."
+ },
+ "type": {
+ "type": "string",
+ "required": true,
+ "description": "The type of the document being count percolated."
+ },
+ "id": {
+ "type": "string",
+ "required": false,
+ "description": "Substitute the document in the request body with a document that is known by the specified id. On top of the id, the index and type parameter will be used to retrieve the document from within the cluster."
+ }
+ },
+ "params": {
+ "routing": {
+ "type": "list",
+ "description": "A comma-separated list of specific routing values"
+ },
+ "preference": {
+ "type": "string",
+ "description": "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "ignore_unavailable": {
+ "type": "boolean",
+ "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type": "boolean",
+ "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type": "enum",
+ "options": ["open", "closed"],
+ "default": "open",
+ "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "percolate_index": {
+ "type": "string",
+ "description": "The index to count percolate the document into. Defaults to index."
+ },
+ "percolate_type": {
+ "type": "string",
+ "description": "The type to count percolate document into. Defaults to type."
+ },
+ "version": {
+ "type": "number",
+ "description": "Explicit version number for concurrency control"
+ },
+ "version_type": {
+ "type": "enum",
+ "options": ["internal", "external"],
+ "description": "Specific version type"
+ }
+ }
+ },
+ "body": {
+ "description": "The count percolator request definition using the percolate DSL",
+ "required": false
+ }
+ }
+}
diff --git a/rest-api-spec/api/delete.json b/rest-api-spec/api/delete.json
new file mode 100644
index 0000000..6c168c7
--- /dev/null
+++ b/rest-api-spec/api/delete.json
@@ -0,0 +1,66 @@
+{
+ "delete": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-delete.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/{index}/{type}/{id}",
+ "paths": ["/{index}/{type}/{id}"],
+ "parts": {
+ "id": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The document ID"
+ },
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The type of the document"
+ }
+ },
+ "params": {
+ "consistency": {
+ "type" : "enum",
+ "options" : ["one", "quorum", "all"],
+ "description" : "Specific write consistency setting for the operation"
+ },
+ "parent": {
+ "type" : "string",
+ "description" : "ID of parent document"
+ },
+ "refresh": {
+ "type" : "boolean",
+ "description" : "Refresh the index after performing the operation"
+ },
+ "replication": {
+ "type" : "enum",
+ "options" : ["sync","async"],
+ "default" : "sync",
+ "description" : "Specific replication type"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "version" : {
+ "type" : "number",
+ "description" : "Explicit version number for concurrency control"
+ },
+ "version_type": {
+ "type" : "enum",
+ "options" : ["internal","external"],
+ "description" : "Specific version type"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/delete_by_query.json b/rest-api-spec/api/delete_by_query.json
new file mode 100644
index 0000000..3480b7e
--- /dev/null
+++ b/rest-api-spec/api/delete_by_query.json
@@ -0,0 +1,81 @@
+{
+ "delete_by_query": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-delete-by-query.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/{index}/_query",
+ "paths": ["/{index}/_query", "/{index}/{type}/_query"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "required": true,
+ "description" : "A comma-separated list of indices to restrict the operation; use `_all` to perform the operation on all indices"
+ },
+ "type": {
+ "type" : "list",
+ "description" : "A comma-separated list of types to restrict the operation"
+ }
+ },
+ "params": {
+ "analyzer": {
+ "type" : "string",
+ "description" : "The analyzer to use for the query string"
+ },
+ "consistency": {
+ "type" : "enum",
+ "options" : ["one", "quorum", "all"],
+ "description" : "Specific write consistency setting for the operation"
+ },
+ "default_operator": {
+ "type" : "enum",
+ "options" : ["AND","OR"],
+ "default" : "OR",
+ "description" : "The default operator for query string query (AND or OR)"
+ },
+ "df": {
+ "type" : "string",
+ "description" : "The field to use as default where no field prefix is given in the query string"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "replication": {
+ "type" : "enum",
+ "options" : ["sync","async"],
+ "default" : "sync",
+ "description" : "Specific replication type"
+ },
+ "q": {
+ "type" : "string",
+ "description" : "Query in the Lucene query string syntax"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "source": {
+ "type" : "string",
+ "description" : "The URL-encoded query definition (instead of using the request body)"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ }
+ }
+ },
+ "body": {
+ "description" : "A query to restrict the operation specified with the Query DSL"
+ }
+ }
+}
diff --git a/rest-api-spec/api/exists.json b/rest-api-spec/api/exists.json
new file mode 100644
index 0000000..b7ba649
--- /dev/null
+++ b/rest-api-spec/api/exists.json
@@ -0,0 +1,50 @@
+{
+ "exists": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-get.html",
+ "methods": ["HEAD"],
+ "url": {
+ "path": "/{index}/{type}/{id}",
+ "paths": ["/{index}/{type}/{id}"],
+ "parts": {
+ "id": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The document ID"
+ },
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The type of the document (use `_all` to fetch the first document matching the ID across all types)"
+ }
+ },
+ "params": {
+ "parent": {
+ "type" : "string",
+ "description" : "The ID of the parent document"
+ },
+ "preference": {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "realtime": {
+ "type" : "boolean",
+ "description" : "Specify whether to perform the operation in realtime or search mode"
+ },
+ "refresh": {
+ "type" : "boolean",
+ "description" : "Refresh the shard containing the document before performing the operation"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/explain.json b/rest-api-spec/api/explain.json
new file mode 100644
index 0000000..bee897b
--- /dev/null
+++ b/rest-api-spec/api/explain.json
@@ -0,0 +1,94 @@
+{
+ "explain": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-explain.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/{index}/{type}/{id}/_explain",
+ "paths": ["/{index}/{type}/{id}/_explain"],
+ "parts": {
+ "id": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The document ID"
+ },
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The type of the document"
+ }
+ },
+ "params": {
+ "analyze_wildcard": {
+ "type" : "boolean",
+ "description" : "Specify whether wildcards and prefix queries in the query string query should be analyzed (default: false)"
+ },
+ "analyzer": {
+ "type" : "string",
+ "description" : "The analyzer for the query string query"
+ },
+ "default_operator": {
+ "type" : "enum",
+ "options" : ["AND","OR"],
+ "default" : "OR",
+ "description" : "The default operator for query string query (AND or OR)"
+ },
+ "df": {
+ "type" : "string",
+ "description" : "The default field for query string query (default: _all)"
+ },
+ "fields": {
+ "type": "list",
+ "description" : "A comma-separated list of fields to return in the response"
+ },
+ "lenient": {
+ "type" : "boolean",
+ "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored"
+ },
+ "lowercase_expanded_terms": {
+ "type" : "boolean",
+ "description" : "Specify whether query terms should be lowercased"
+ },
+ "parent": {
+ "type" : "string",
+ "description" : "The ID of the parent document"
+ },
+ "preference": {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "q": {
+ "type" : "string",
+ "description" : "Query in the Lucene query string syntax"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "source": {
+ "type" : "string",
+ "description" : "The URL-encoded query definition (instead of using the request body)"
+ },
+ "_source": {
+ "type" : "list",
+ "description" : "True or false to return the _source field or not, or a list of fields to return"
+ },
+ "_source_exclude": {
+ "type" : "list",
+ "description" : "A list of fields to exclude from the returned _source field"
+ },
+ "_source_include": {
+ "type" : "list",
+ "description" : "A list of fields to extract and return from the _source field"
+ }
+ }
+ },
+ "body": {
+ "description" : "The query definition using the Query DSL"
+ }
+ }
+}
diff --git a/rest-api-spec/api/get.json b/rest-api-spec/api/get.json
new file mode 100644
index 0000000..888f528
--- /dev/null
+++ b/rest-api-spec/api/get.json
@@ -0,0 +1,75 @@
+{
+ "get": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-get.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/{index}/{type}/{id}",
+ "paths": ["/{index}/{type}/{id}"],
+ "parts": {
+ "id": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The document ID"
+ },
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The type of the document (use `_all` to fetch the first document matching the ID across all types)"
+ }
+ },
+ "params": {
+ "fields": {
+ "type": "list",
+ "description" : "A comma-separated list of fields to return in the response"
+ },
+ "parent": {
+ "type" : "string",
+ "description" : "The ID of the parent document"
+ },
+ "preference": {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "realtime": {
+ "type" : "boolean",
+ "description" : "Specify whether to perform the operation in realtime or search mode"
+ },
+ "refresh": {
+ "type" : "boolean",
+ "description" : "Refresh the shard containing the document before performing the operation"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "_source": {
+ "type" : "list",
+ "description" : "True or false to return the _source field or not, or a list of fields to return"
+ },
+ "_source_exclude": {
+ "type" : "list",
+ "description" : "A list of fields to exclude from the returned _source field"
+ },
+ "_source_include": {
+ "type" : "list",
+ "description" : "A list of fields to extract and return from the _source field"
+ },
+ "version" : {
+ "type" : "number",
+ "description" : "Explicit version number for concurrency control"
+ },
+ "version_type": {
+ "type" : "enum",
+ "options" : ["internal","external"],
+ "description" : "Specific version type"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/get_source.json b/rest-api-spec/api/get_source.json
new file mode 100644
index 0000000..8f37a77
--- /dev/null
+++ b/rest-api-spec/api/get_source.json
@@ -0,0 +1,71 @@
+{
+ "get_source": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-get.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/{index}/{type}/{id}/_source",
+ "paths": ["/{index}/{type}/{id}/_source"],
+ "parts": {
+ "id": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The document ID"
+ },
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The type of the document; use `_all` to fetch the first document matching the ID across all types"
+ }
+ },
+ "params": {
+ "parent": {
+ "type" : "string",
+ "description" : "The ID of the parent document"
+ },
+ "preference": {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "realtime": {
+ "type" : "boolean",
+ "description" : "Specify whether to perform the operation in realtime or search mode"
+ },
+ "refresh": {
+ "type" : "boolean",
+ "description" : "Refresh the shard containing the document before performing the operation"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "_source": {
+ "type" : "list",
+ "description" : "True or false to return the _source field or not, or a list of fields to return"
+ },
+ "_source_exclude": {
+ "type" : "list",
+ "description" : "A list of fields to exclude from the returned _source field"
+ },
+ "_source_include": {
+ "type" : "list",
+ "description" : "A list of fields to extract and return from the _source field"
+ },
+ "version" : {
+ "type" : "number",
+ "description" : "Explicit version number for concurrency control"
+ },
+ "version_type": {
+ "type" : "enum",
+ "options" : ["internal","external"],
+ "description" : "Specific version type"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/index.json b/rest-api-spec/api/index.json
new file mode 100644
index 0000000..1627bdd
--- /dev/null
+++ b/rest-api-spec/api/index.json
@@ -0,0 +1,82 @@
+{
+ "index": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-index_.html",
+ "methods": ["POST", "PUT"],
+ "url": {
+ "path": "/{index}/{type}",
+ "paths": ["/{index}/{type}", "/{index}/{type}/{id}"],
+ "parts": {
+ "id": {
+ "type" : "string",
+ "description" : "Document ID"
+ },
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The type of the document"
+ }
+ },
+ "params": {
+ "consistency": {
+ "type" : "enum",
+ "options" : ["one", "quorum", "all"],
+ "description" : "Explicit write consistency setting for the operation"
+ },
+ "op_type": {
+ "type" : "enum",
+ "options" : ["index", "create"],
+ "default" : "index",
+ "description" : "Explicit operation type"
+ },
+ "parent": {
+ "type" : "string",
+ "description" : "ID of the parent document"
+ },
+ "refresh": {
+ "type" : "boolean",
+ "description" : "Refresh the index after performing the operation"
+ },
+ "replication": {
+ "type" : "enum",
+ "options" : ["sync","async"],
+ "default" : "sync",
+ "description" : "Specific replication type"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "timestamp": {
+ "type" : "time",
+ "description" : "Explicit timestamp for the document"
+ },
+ "ttl": {
+ "type" : "duration",
+ "description" : "Expiration time for the document"
+ },
+ "version" : {
+ "type" : "number",
+ "description" : "Explicit version number for concurrency control"
+ },
+ "version_type": {
+ "type" : "enum",
+ "options" : ["internal","external"],
+ "description" : "Specific version type"
+ }
+ }
+ },
+ "body": {
+ "description" : "The document",
+ "required" : true
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.analyze.json b/rest-api-spec/api/indices.analyze.json
new file mode 100644
index 0000000..f9db551
--- /dev/null
+++ b/rest-api-spec/api/indices.analyze.json
@@ -0,0 +1,55 @@
+{
+ "indices.analyze": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-analyze.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/_analyze",
+ "paths": ["/_analyze", "/{index}/_analyze"],
+ "parts": {
+ "index": {
+ "type" : "string",
+ "description" : "The name of the index to scope the operation"
+ }
+ },
+ "params": {
+ "analyzer": {
+ "type" : "string",
+ "description" : "The name of the analyzer to use"
+ },
+ "field": {
+ "type" : "string",
+ "description" : "Use the analyzer configured for this field (instead of passing the analyzer name)"
+ },
+ "filters": {
+ "type" : "list",
+ "description" : "A comma-separated list of filters to use for the analysis"
+ },
+ "index": {
+ "type" : "string",
+ "description" : "The name of the index to scope the operation"
+ },
+ "prefer_local": {
+ "type" : "boolean",
+ "description" : "With `true`, specify that a local shard should be used if available, with `false`, use a random shard (default: true)"
+ },
+ "text": {
+ "type" : "string",
+ "description" : "The text on which the analysis should be performed (when request body is not used)"
+ },
+ "tokenizer": {
+ "type" : "string",
+ "description" : "The name of the tokenizer to use for the analysis"
+ },
+ "format": {
+ "type": "enum",
+ "options" : ["detailed","text"],
+ "default": "detailed",
+ "description": "Format of the output"
+ }
+ }
+ },
+ "body": {
+ "description" : "The text on which the analysis should be performed"
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/api/indices.clear_cache.json
new file mode 100644
index 0000000..36bf1ff
--- /dev/null
+++ b/rest-api-spec/api/indices.clear_cache.json
@@ -0,0 +1,73 @@
+{
+ "indices.clear_cache": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-clearcache.html",
+ "methods": ["POST", "GET"],
+ "url": {
+ "path": "/_cache/clear",
+ "paths": ["/_cache/clear", "/{index}/_cache/clear"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index name to limit the operation"
+ }
+ },
+ "params": {
+ "field_data": {
+ "type" : "boolean",
+ "description" : "Clear field data"
+ },
+ "fielddata": {
+ "type" : "boolean",
+ "description" : "Clear field data"
+ },
+ "fields": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields to clear when using the `field_data` parameter (default: all)"
+ },
+ "filter": {
+ "type" : "boolean",
+ "description" : "Clear filter caches"
+ },
+ "filter_cache": {
+ "type" : "boolean",
+ "description" : "Clear filter caches"
+ },
+ "filter_keys": {
+ "type" : "boolean",
+ "description" : "A comma-separated list of keys to clear when using the `filter_cache` parameter (default: all)"
+ },
+ "id": {
+ "type" : "boolean",
+ "description" : "Clear ID caches for parent/child"
+ },
+ "id_cache": {
+ "type" : "boolean",
+ "description" : "Clear ID caches for parent/child"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index name to limit the operation"
+ },
+ "recycler": {
+ "type" : "boolean",
+ "description" : "Clear the recycler cache"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.close.json b/rest-api-spec/api/indices.close.json
new file mode 100644
index 0000000..d26afac
--- /dev/null
+++ b/rest-api-spec/api/indices.close.json
@@ -0,0 +1,42 @@
+{
+ "indices.close": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-open-close.html",
+ "methods": ["POST"],
+ "url": {
+ "path": "/{index}/_close",
+ "paths": ["/{index}/_close"],
+ "parts": {
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ }
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.create.json b/rest-api-spec/api/indices.create.json
new file mode 100644
index 0000000..33c02f2
--- /dev/null
+++ b/rest-api-spec/api/indices.create.json
@@ -0,0 +1,30 @@
+{
+ "indices.create": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-create-index.html",
+ "methods": ["PUT", "POST"],
+ "url": {
+ "path": "/{index}",
+ "paths": ["/{index}"],
+ "parts": {
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ }
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ }
+ }
+ },
+ "body": {
+ "description" : "The configuration for the index (`settings` and `mappings`)"
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.delete.json b/rest-api-spec/api/indices.delete.json
new file mode 100644
index 0000000..aa904ca
--- /dev/null
+++ b/rest-api-spec/api/indices.delete.json
@@ -0,0 +1,27 @@
+{
+ "indices.delete": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-delete-index.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/{index}",
+ "paths": ["/{index}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of indices to delete; use `_all` or `*` string to delete all indices"
+ }
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.delete_alias.json b/rest-api-spec/api/indices.delete_alias.json
new file mode 100644
index 0000000..0b26ad5
--- /dev/null
+++ b/rest-api-spec/api/indices.delete_alias.json
@@ -0,0 +1,33 @@
+{
+ "indices.delete_alias": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-aliases.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/{index}/_alias/{name}",
+ "paths": ["/{index}/_alias/{name}", "/{index}/_aliases/{name}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "required" : true,
+ "description" : "A comma-separated list of index names (supports wildcards); use `_all` for all indices"
+ },
+ "name": {
+ "type" : "list",
+ "required" : true,
+ "description" : "A comma-separated list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices."
+ }
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit timestamp for the document"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.delete_mapping.json b/rest-api-spec/api/indices.delete_mapping.json
new file mode 100644
index 0000000..e1cce4b
--- /dev/null
+++ b/rest-api-spec/api/indices.delete_mapping.json
@@ -0,0 +1,29 @@
+{
+ "indices.delete_mapping": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-delete-mapping.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/{index}/{type}/_mapping",
+ "paths": ["/{index}/{type}/_mapping", "/{index}/{type}", "/{index}/_mapping/{type}", "/{index}/{type}/_mappings", "/{index}/_mappings/{type}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "required" : true,
+ "description" : "A comma-separated list of index names (supports wildcards); use `_all` for all indices"
+ },
+ "type": {
+ "type" : "list",
+ "required" : true,
+ "description" : "A comma-separated list of document types to delete (supports wildcards); use `_all` to delete all document types in the specified indices."
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.delete_template.json b/rest-api-spec/api/indices.delete_template.json
new file mode 100644
index 0000000..f012f0a
--- /dev/null
+++ b/rest-api-spec/api/indices.delete_template.json
@@ -0,0 +1,28 @@
+{
+ "indices.delete_template": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-templates.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/_template/{name}",
+ "paths": ["/_template/{name}"],
+ "parts": {
+ "name": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the template"
+ }
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.delete_warmer.json b/rest-api-spec/api/indices.delete_warmer.json
new file mode 100644
index 0000000..f05a144
--- /dev/null
+++ b/rest-api-spec/api/indices.delete_warmer.json
@@ -0,0 +1,33 @@
+{
+ "indices.delete_warmer": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-warmers.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/{index}/_warmer/{name}",
+ "paths": ["/{index}/_warmer/{name}", "/{index}/_warmers/{name}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "required" : true,
+ "description" : "A comma-separated list of index names to delete warmers from (supports wildcards); use `_all` to perform the operation on all indices."
+ },
+ "name" : {
+ "type" : "list",
+ "required" : true,
+ "description" : "A comma-separated list of warmer names to delete (supports wildcards); use `_all` to delete all warmers in the specified indices. You must specify a name either in the uri or in the parameters."
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ },
+ "name" : {
+ "type" : "list",
+ "description" : "A comma-separated list of warmer names to delete (supports wildcards); use `_all` to delete all warmers in the specified indices. You must specify a name either in the uri or in the parameters."
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.exists.json b/rest-api-spec/api/indices.exists.json
new file mode 100644
index 0000000..a0f4a8f
--- /dev/null
+++ b/rest-api-spec/api/indices.exists.json
@@ -0,0 +1,38 @@
+{
+ "indices.exists": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-get-settings.html",
+ "methods": ["HEAD"],
+ "url": {
+ "path": "/{index}",
+ "paths": ["/{index}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "required" : true,
+ "description" : "A comma-separated list of indices to check"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/api/indices.exists_alias.json
new file mode 100644
index 0000000..2ee8789
--- /dev/null
+++ b/rest-api-spec/api/indices.exists_alias.json
@@ -0,0 +1,41 @@
+{
+ "indices.exists_alias": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-aliases.html",
+ "methods": ["HEAD"],
+ "url": {
+ "path": "/_alias/{name}",
+ "paths": ["/_alias/{name}", "/{index}/_alias/{name}", "/{index}/_alias"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names to filter aliases"
+ },
+ "name": {
+ "type" : "list",
+ "description" : "A comma-separated list of alias names to return"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : ["open", "closed"],
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.exists_template.json b/rest-api-spec/api/indices.exists_template.json
new file mode 100644
index 0000000..0c0b74e
--- /dev/null
+++ b/rest-api-spec/api/indices.exists_template.json
@@ -0,0 +1,24 @@
+{
+ "indices.exists_template": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-templates.html",
+ "methods": ["HEAD"],
+ "url": {
+ "path": "/_template/{name}",
+ "paths": ["/_template/{name}"],
+ "parts": {
+ "name": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the template"
+ }
+ },
+ "params": {
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.exists_type.json b/rest-api-spec/api/indices.exists_type.json
new file mode 100644
index 0000000..da722aa
--- /dev/null
+++ b/rest-api-spec/api/indices.exists_type.json
@@ -0,0 +1,43 @@
+{
+ "indices.exists_type": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-types-exists.html",
+ "methods": ["HEAD"],
+ "url": {
+ "path": "/{index}/{type}",
+ "paths": ["/{index}/{type}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "required" : true,
+ "description" : "A comma-separated list of index names; use `_all` to check the types across all indices"
+ },
+ "type": {
+ "type" : "list",
+ "required" : true,
+ "description" : "A comma-separated list of document types to check"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.flush.json b/rest-api-spec/api/indices.flush.json
new file mode 100644
index 0000000..b0f16ca
--- /dev/null
+++ b/rest-api-spec/api/indices.flush.json
@@ -0,0 +1,41 @@
+{
+ "indices.flush": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-flush.html",
+ "methods": ["POST", "GET"],
+ "url": {
+ "path": "/_flush",
+ "paths": ["/_flush", "/{index}/_flush"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string for all indices"
+ }
+ },
+ "params": {
+ "force": {
+ "type" : "boolean",
+ "description" : "Whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as internal)"
+ },
+ "full": {
+ "type" : "boolean",
+ "description" : "If set to true a new index writer is created and settings that have been changed related to the index writer will be refreshed. Note: if a full flush is required for a setting to take effect this will be part of the settings update process and it not required to be executed by the user. (This setting can be considered as internal)"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.get_alias.json b/rest-api-spec/api/indices.get_alias.json
new file mode 100644
index 0000000..0a76bdf
--- /dev/null
+++ b/rest-api-spec/api/indices.get_alias.json
@@ -0,0 +1,41 @@
+{
+ "indices.get_alias": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-aliases.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_alias/",
+ "paths": [ "/_alias", "/_alias/{name}", "/{index}/_alias/{name}", "/{index}/_alias"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names to filter aliases"
+ },
+ "name": {
+ "type" : "list",
+ "description" : "A comma-separated list of alias names to return"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.get_aliases.json b/rest-api-spec/api/indices.get_aliases.json
new file mode 100644
index 0000000..f79aed9
--- /dev/null
+++ b/rest-api-spec/api/indices.get_aliases.json
@@ -0,0 +1,31 @@
+{
+ "indices.get_aliases": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-aliases.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_aliases",
+ "paths": ["/_aliases", "/{index}/_aliases", "/{index}/_aliases/{name}", "/_aliases/{name}" ],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names to filter aliases"
+ },
+ "name": {
+ "type" : "list",
+ "description" : "A comma-separated list of alias names to filter"
+ }
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.get_field_mapping.json b/rest-api-spec/api/indices.get_field_mapping.json
new file mode 100644
index 0000000..20728b6
--- /dev/null
+++ b/rest-api-spec/api/indices.get_field_mapping.json
@@ -0,0 +1,50 @@
+{
+ "indices.get_field_mapping": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-get-field-mapping.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_mapping/field/{field}",
+ "paths": ["/_mapping/field/{field}", "/{index}/_mapping/field/{field}", "/_mapping/{type}/field/{field}", "/{index}/_mapping/{type}/field/{field}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names"
+ },
+ "type": {
+ "type" : "list",
+ "description" : "A comma-separated list of document types"
+ },
+ "field": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields",
+ "required" : true
+ }
+ },
+ "params": {
+ "include_defaults": {
+ "type" : "boolean",
+ "description" : "Whether the default mapping values should be returned as well"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.get_mapping.json b/rest-api-spec/api/indices.get_mapping.json
new file mode 100644
index 0000000..0e6f011
--- /dev/null
+++ b/rest-api-spec/api/indices.get_mapping.json
@@ -0,0 +1,41 @@
+{
+ "indices.get_mapping": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-get-mapping.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_mapping",
+ "paths": ["/_mapping", "/{index}/_mapping", "/_mapping/{type}", "/{index}/_mapping/{type}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names"
+ },
+ "type": {
+ "type" : "list",
+ "description" : "A comma-separated list of document types"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.get_settings.json b/rest-api-spec/api/indices.get_settings.json
new file mode 100644
index 0000000..d9b8d40
--- /dev/null
+++ b/rest-api-spec/api/indices.get_settings.json
@@ -0,0 +1,45 @@
+{
+ "indices.get_settings": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-get-mapping.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_settings",
+ "paths": ["/_settings", "/{index}/_settings", "/{index}/_settings/{name}", "/_settings/{name}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
+ },
+ "name": {
+ "type" : "list",
+ "description" : "The name of the settings that should be included"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : ["open","closed"],
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "flat_settings": {
+ "type": "boolean",
+ "description": "Return settings in flat format (default: false)"
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.get_template.json b/rest-api-spec/api/indices.get_template.json
new file mode 100644
index 0000000..0a4a008
--- /dev/null
+++ b/rest-api-spec/api/indices.get_template.json
@@ -0,0 +1,28 @@
+{
+ "indices.get_template": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-templates.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_template/{name}",
+ "paths": ["/_template", "/_template/{name}"],
+ "parts": {
+ "name": {
+ "type" : "string",
+ "required" : false,
+ "description" : "The name of the template"
+ }
+ },
+ "params": {
+ "flat_settings": {
+ "type": "boolean",
+ "description": "Return settings in flat format (default: false)"
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.get_warmer.json b/rest-api-spec/api/indices.get_warmer.json
new file mode 100644
index 0000000..7d5fd49
--- /dev/null
+++ b/rest-api-spec/api/indices.get_warmer.json
@@ -0,0 +1,45 @@
+{
+ "indices.get_warmer": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-warmers.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_warmer",
+ "paths": [ "/_warmer", "/{index}/_warmer", "/{index}/_warmer/{name}", "/_warmer/{name}", "/{index}/{type}/_warmer/{name}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names to restrict the operation; use `_all` to perform the operation on all indices"
+ },
+ "name": {
+ "type" : "list",
+ "description" : "The name of the warmer (supports wildcards); leave empty to get all warmers"
+ },
+ "type": {
+ "type" : "list",
+ "description" : "A comma-separated list of document types to restrict the operation; leave empty to perform the operation on all types"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.open.json b/rest-api-spec/api/indices.open.json
new file mode 100644
index 0000000..7d1c275
--- /dev/null
+++ b/rest-api-spec/api/indices.open.json
@@ -0,0 +1,42 @@
+{
+ "indices.open": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-open-close.html",
+ "methods": ["POST"],
+ "url": {
+ "path": "/{index}/_open",
+ "paths": ["/{index}/_open"],
+ "parts": {
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ }
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "closed",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.optimize.json b/rest-api-spec/api/indices.optimize.json
new file mode 100644
index 0000000..fb6bd86
--- /dev/null
+++ b/rest-api-spec/api/indices.optimize.json
@@ -0,0 +1,52 @@
+{
+ "indices.optimize": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-optimize.html",
+ "methods": ["POST", "GET"],
+ "url": {
+ "path": "/_optimize",
+ "paths": ["/_optimize", "/{index}/_optimize"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
+ }
+ },
+ "params": {
+ "flush": {
+ "type" : "boolean",
+ "description" : "Specify whether the index should be flushed after performing the operation (default: true)"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "max_num_segments": {
+ "type" : "number",
+ "description" : "The number of segments the index should be merged into (default: dynamic)"
+ },
+ "only_expunge_deletes": {
+ "type" : "boolean",
+ "description" : "Specify whether the operation should only expunge deleted documents"
+ },
+ "operation_threading": {
+ "description" : "TODO: ?"
+ },
+ "wait_for_merge": {
+ "type" : "boolean",
+ "description" : "Specify whether the request should block until the merge process is finished (default: true)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.put_alias.json b/rest-api-spec/api/indices.put_alias.json
new file mode 100644
index 0000000..eb78de6
--- /dev/null
+++ b/rest-api-spec/api/indices.put_alias.json
@@ -0,0 +1,35 @@
+{
+ "indices.put_alias": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-aliases.html",
+ "methods": ["PUT", "POST"],
+ "url": {
+ "path": "/{index}/_alias/{name}",
+ "paths": ["/{index}/_alias/{name}", "/_alias/{name}", "/{index}/_aliases/{name}", "/_aliases/{name}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names the alias should point to (supports wildcards); use `_all` or omit to perform the operation on all indices."
+ },
+ "name": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the alias to be created or updated"
+ }
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit timestamp for the document"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ }
+ }
+ },
+ "body": {
+ "description" : "The settings for the alias, such as `routing` or `filter`",
+ "required" : false
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.put_mapping.json b/rest-api-spec/api/indices.put_mapping.json
new file mode 100644
index 0000000..891bca0
--- /dev/null
+++ b/rest-api-spec/api/indices.put_mapping.json
@@ -0,0 +1,53 @@
+{
+ "indices.put_mapping": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-put-mapping.html",
+ "methods": ["PUT", "POST"],
+ "url": {
+ "path": "/{index}/{type}/_mapping",
+ "paths": ["/{index}/{type}/_mapping", "/{index}/_mapping/{type}", "/_mapping/{type}", "/{index}/{type}/_mappings", "/{index}/_mappings/{type}", "/_mappings/{type}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices."
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the document type"
+ }
+ },
+ "params": {
+ "ignore_conflicts": {
+ "type" : "boolean",
+ "description" : "Specify whether to ignore conflicts while updating the mapping (default: false)"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ }
+ }
+ },
+ "body": {
+ "description" : "The mapping definition",
+ "required" : true
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.put_settings.json b/rest-api-spec/api/indices.put_settings.json
new file mode 100644
index 0000000..81b9a5c
--- /dev/null
+++ b/rest-api-spec/api/indices.put_settings.json
@@ -0,0 +1,44 @@
+{
+ "indices.put_settings": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-update-settings.html",
+ "methods": ["PUT"],
+ "url": {
+ "path": "/_settings",
+ "paths": ["/_settings", "/{index}/_settings"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type": "time",
+ "description": "Specify timeout for connection to master"
+ },
+ "ignore_unavailable": {
+ "type": "boolean",
+ "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type": "boolean",
+ "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type": "enum",
+ "options": ["open", "closed"],
+ "default": "open",
+ "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "flat_settings": {
+ "type": "boolean",
+ "description": "Return settings in flat format (default: false)"
+ }
+ }
+ },
+ "body": {
+ "description": "The index settings to be updated",
+ "required": true
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.put_template.json b/rest-api-spec/api/indices.put_template.json
new file mode 100644
index 0000000..f93dfd0
--- /dev/null
+++ b/rest-api-spec/api/indices.put_template.json
@@ -0,0 +1,39 @@
+{
+ "indices.put_template": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-templates.html",
+ "methods": ["PUT", "POST"],
+ "url": {
+ "path": "/_template/{name}",
+ "paths": ["/_template/{name}"],
+ "parts": {
+ "name": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the template"
+ }
+ },
+ "params": {
+ "order": {
+ "type" : "number",
+ "description" : "The order for this template when merging multiple matching ones (higher numbers are merged later, overriding the lower numbers)"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ },
+ "flat_settings": {
+ "type": "boolean",
+ "description": "Return settings in flat format (default: false)"
+ }
+ }
+ },
+ "body": {
+ "description" : "The template definition",
+ "required" : true
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.put_warmer.json b/rest-api-spec/api/indices.put_warmer.json
new file mode 100644
index 0000000..03f97ef
--- /dev/null
+++ b/rest-api-spec/api/indices.put_warmer.json
@@ -0,0 +1,49 @@
+{
+ "indices.put_warmer": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-warmers.html",
+ "methods": ["PUT", "POST"],
+ "url": {
+ "path": "/{index}/_warmer/{name}",
+ "paths": ["/_warmer/{name}", "/{index}/_warmer/{name}", "/{index}/{type}/_warmer/{name}", "/_warmers/{name}", "/{index}/_warmers/{name}", "/{index}/{type}/_warmers/{name}"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names to register the warmer for; use `_all` or omit to perform the operation on all indices"
+ },
+ "name": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the warmer"
+ },
+ "type": {
+ "type" : "list",
+ "description" : "A comma-separated list of document types to register the warmer for; leave empty to perform the operation on all types"
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed) in the search request to warm"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices in the search request to warm. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both, in the search request to warm."
+ }
+ }
+ },
+ "body": {
+ "description" : "The search request definition for the warmer (query, filters, facets, sorting, etc)",
+ "required" : true
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.refresh.json b/rest-api-spec/api/indices.refresh.json
new file mode 100644
index 0000000..18e5ed2
--- /dev/null
+++ b/rest-api-spec/api/indices.refresh.json
@@ -0,0 +1,41 @@
+{
+ "indices.refresh": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-refresh.html",
+ "methods": ["POST", "GET"],
+ "url": {
+ "path": "/_refresh",
+ "paths": ["/_refresh", "/{index}/_refresh"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "force": {
+ "type" : "boolean",
+ "description" : "Force a refresh even if not required",
+ "default": false
+ },
+ "operation_threading": {
+ "description" : "TODO: ?"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.segments.json b/rest-api-spec/api/indices.segments.json
new file mode 100644
index 0000000..77e3a18
--- /dev/null
+++ b/rest-api-spec/api/indices.segments.json
@@ -0,0 +1,41 @@
+{
+ "indices.segments": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-segments.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_segments",
+ "paths": ["/_segments", "/{index}/_segments"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "human": {
+ "type": "boolean",
+ "description": "Whether to return time and byte values in human-readable format.",
+ "default": false
+ },
+ "operation_threading": {
+ "description" : "TODO: ?"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.snapshot_index.json b/rest-api-spec/api/indices.snapshot_index.json
new file mode 100644
index 0000000..41cd7ba
--- /dev/null
+++ b/rest-api-spec/api/indices.snapshot_index.json
@@ -0,0 +1,33 @@
+{
+ "indices.snapshot_index": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/indices-gateway-snapshot.html",
+ "methods": ["POST"],
+ "url": {
+ "path": "/_gateway/snapshot",
+ "paths": ["/_gateway/snapshot", "/{index}/_gateway/snapshot"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string for all indices"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.stats.json b/rest-api-spec/api/indices.stats.json
new file mode 100644
index 0000000..d4337e1
--- /dev/null
+++ b/rest-api-spec/api/indices.stats.json
@@ -0,0 +1,60 @@
+{
+ "indices.stats": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-stats.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_stats",
+ "paths": [
+ "/_stats",
+ "/_stats/{metric}",
+ "/{index}/_stats",
+ "/{index}/_stats/{metric}"
+ ],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
+ },
+ "metric" : {
+ "type" : "list",
+ "options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "id_cache", "indexing", "merge", "percolate", "refresh", "search", "segments", "store", "warmer"],
+ "description" : "Limit the information returned the specific metrics."
+ }
+ },
+ "params": {
+ "completion_fields": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields for `fielddata` and `suggest` index metric (supports wildcards)"
+ },
+ "fielddata_fields": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields for `fielddata` index metric (supports wildcards)"
+ },
+ "fields": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields for `fielddata` and `completion` index metric (supports wildcards)"
+ },
+ "groups": {
+ "type" : "boolean",
+ "description" : "A comma-separated list of search groups for `search` index metric"
+ },
+ "human": {
+ "type": "boolean",
+ "description": "Whether to return time and byte values in human-readable format.",
+ "default": false
+ },
+ "level": {
+ "type" : "enum",
+ "description": "Return stats aggregated at cluster, index or shard level",
+ "options" : ["cluster", "indices", "shards"],
+ "default" : "indices"
+ },
+ "types" : {
+ "type" : "list",
+ "description" : "A comma-separated list of document types for the `indexing` index metric"
+ }
+ },
+ "body": null
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.status.json b/rest-api-spec/api/indices.status.json
new file mode 100644
index 0000000..177de04
--- /dev/null
+++ b/rest-api-spec/api/indices.status.json
@@ -0,0 +1,49 @@
+{
+ "indices.status": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-status.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_status",
+ "paths": ["/_status", "/{index}/_status"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "human": {
+ "type": "boolean",
+ "description": "Whether to return time and byte values in human-readable format.",
+ "default": false
+ },
+ "operation_threading": {
+ "description" : "TODO: ?"
+ },
+ "recovery": {
+ "type" : "boolean",
+ "description" : "Return information about shard recovery"
+ },
+ "snapshot": {
+ "type" : "boolean",
+ "description" : "TODO: ?"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/indices.update_aliases.json b/rest-api-spec/api/indices.update_aliases.json
new file mode 100644
index 0000000..5b375ee
--- /dev/null
+++ b/rest-api-spec/api/indices.update_aliases.json
@@ -0,0 +1,26 @@
+{
+ "indices.update_aliases": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/indices-aliases.html",
+ "methods": ["POST"],
+ "url": {
+ "path": "/_aliases",
+ "paths": ["/_aliases"],
+ "parts": {
+ },
+ "params": {
+ "timeout": {
+ "type" : "time",
+ "description" : "Request timeout"
+ },
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Specify timeout for connection to master"
+ }
+ }
+ },
+ "body": {
+ "description" : "The definition of `actions` to perform",
+ "required" : true
+ }
+ }
+}
diff --git a/rest-api-spec/api/indices.validate_query.json b/rest-api-spec/api/indices.validate_query.json
new file mode 100644
index 0000000..324d648
--- /dev/null
+++ b/rest-api-spec/api/indices.validate_query.json
@@ -0,0 +1,54 @@
+{
+ "indices.validate_query": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-validate.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/_validate/query",
+ "paths": ["/_validate/query", "/{index}/_validate/query", "/{index}/{type}/_validate/query"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices"
+ },
+ "type": {
+ "type" : "list",
+ "description" : "A comma-separated list of document types to restrict the operation; leave empty to perform the operation on all types"
+ }
+ },
+ "params": {
+ "explain": {
+ "type" : "boolean",
+ "description" : "Return detailed information about the error"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "operation_threading": {
+ "description" : "TODO: ?"
+ },
+ "source": {
+ "type" : "string",
+ "description" : "The URL-encoded query definition (instead of using the request body)"
+ },
+ "q": {
+ "type" : "string",
+ "description" : "Query in the Lucene query string syntax"
+ }
+ }
+ },
+ "body": {
+ "description" : "The query definition specified with the Query DSL"
+ }
+ }
+}
diff --git a/rest-api-spec/api/info.json b/rest-api-spec/api/info.json
new file mode 100644
index 0000000..b83b666
--- /dev/null
+++ b/rest-api-spec/api/info.json
@@ -0,0 +1,15 @@
+{
+ "info": {
+ "documentation": "http://www.elasticsearch.org/guide/",
+ "methods": ["GET"],
+ "url": {
+ "path": "/",
+ "paths": ["/"],
+ "parts": {
+ },
+ "params": {
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/mget.json b/rest-api-spec/api/mget.json
new file mode 100644
index 0000000..13688c7
--- /dev/null
+++ b/rest-api-spec/api/mget.json
@@ -0,0 +1,54 @@
+{
+ "mget": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-multi-get.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/_mget",
+ "paths": ["/_mget", "/{index}/_mget", "/{index}/{type}/_mget"],
+ "parts": {
+ "index": {
+ "type" : "string",
+ "description" : "The name of the index"
+ },
+ "type": {
+ "type" : "string",
+ "description" : "The type of the document"
+ }
+ },
+ "params": {
+ "fields": {
+ "type": "list",
+ "description" : "A comma-separated list of fields to return in the response"
+ },
+ "preference": {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "realtime": {
+ "type" : "boolean",
+ "description" : "Specify whether to perform the operation in realtime or search mode"
+ },
+ "refresh": {
+ "type" : "boolean",
+ "description" : "Refresh the shard containing the document before performing the operation"
+ },
+ "_source": {
+ "type" : "list",
+ "description" : "True or false to return the _source field or not, or a list of fields to return"
+ },
+ "_source_exclude": {
+ "type" : "list",
+ "description" : "A list of fields to exclude from the returned _source field"
+ },
+ "_source_include": {
+ "type" : "list",
+ "description" : "A list of fields to extract and return from the _source field"
+ }
+ }
+ },
+ "body": {
+ "description" : "Document identifiers; can be either `docs` (containing full document information) or `ids` (when index and type is provided in the URL.",
+ "required" : true
+ }
+ }
+}
diff --git a/rest-api-spec/api/mlt.json b/rest-api-spec/api/mlt.json
new file mode 100644
index 0000000..911bd8f
--- /dev/null
+++ b/rest-api-spec/api/mlt.json
@@ -0,0 +1,108 @@
+{
+ "mlt": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-more-like-this.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/{index}/{type}/{id}/_mlt",
+ "paths": ["/{index}/{type}/{id}/_mlt"],
+ "parts": {
+ "id": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The document ID"
+ },
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The type of the document (use `_all` to fetch the first document matching the ID across all types)"
+ }
+ },
+ "params": {
+ "boost_terms": {
+ "type" : "number",
+ "description" : "The boost factor"
+ },
+ "max_doc_freq": {
+ "type" : "number",
+ "description" : "The word occurrence frequency as count: words with higher occurrence in the corpus will be ignored"
+ },
+ "max_query_terms": {
+ "type" : "number",
+ "description" : "The maximum query terms to be included in the generated query"
+ },
+ "max_word_length": {
+ "type" : "number",
+ "description" : "The minimum length of the word: longer words will be ignored"
+ },
+ "min_doc_freq": {
+ "type" : "number",
+ "description" : "The word occurrence frequency as count: words with lower occurrence in the corpus will be ignored"
+ },
+ "min_term_freq": {
+ "type" : "number",
+ "description" : "The term frequency as percent: terms with lower occurence in the source document will be ignored"
+ },
+ "min_word_length": {
+ "type" : "number",
+ "description" : "The minimum length of the word: shorter words will be ignored"
+ },
+ "mlt_fields": {
+ "type" : "list",
+ "description" : "Specific fields to perform the query against"
+ },
+ "percent_terms_to_match": {
+ "type" : "number",
+ "description" : "How many terms have to match in order to consider the document a match (default: 0.3)"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "search_from": {
+ "type" : "number",
+ "description" : "The offset from which to return results"
+ },
+ "search_indices": {
+ "type" : "list",
+ "description" : "A comma-separated list of indices to perform the query against (default: the index containing the document)"
+ },
+ "search_query_hint": {
+ "type" : "string",
+ "description" : "The search query hint"
+ },
+ "search_scroll": {
+ "type" : "string",
+ "description" : "A scroll search request definition"
+ },
+ "search_size": {
+ "type" : "number",
+ "description" : "The number of documents to return (default: 10)"
+ },
+ "search_source": {
+ "type" : "string",
+ "description" : "A specific search request definition (instead of using the request body)"
+ },
+ "search_type": {
+ "type" : "string",
+ "description" : "Specific search type (eg. `dfs_then_fetch`, `count`, etc)"
+ },
+ "search_types": {
+ "type" : "list",
+ "description" : "A comma-separated list of types to perform the query against (default: the same type as the document)"
+ },
+ "stop_words": {
+ "type" : "list",
+ "description" : "A list of stop words to be ignored"
+ }
+ }
+ },
+ "body": {
+ "description" : "A specific search request definition"
+ }
+ }
+}
diff --git a/rest-api-spec/api/mpercolate.json b/rest-api-spec/api/mpercolate.json
new file mode 100644
index 0000000..4bf559a
--- /dev/null
+++ b/rest-api-spec/api/mpercolate.json
@@ -0,0 +1,41 @@
+{
+ "mpercolate": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-percolate.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/_mpercolate",
+ "paths": ["/_mpercolate", "/{index}/_mpercolate", "/{index}/{type}/_mpercolate"],
+ "parts": {
+ "index": {
+ "type": "string",
+ "description": "The index of the document being count percolated to use as default"
+ },
+ "type": {
+ "type" : "string",
+ "description" : "The type of the document being percolated to use as default."
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type": "boolean",
+ "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type": "boolean",
+ "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type": "enum",
+ "options": ["open", "closed"],
+ "default": "open",
+ "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ }
+ }
+ },
+ "body": {
+ "description": "The percolate request definitions (header & body pair), separated by newlines",
+ "required": true,
+ "serialize" : "bulk"
+ }
+ }
+}
diff --git a/rest-api-spec/api/msearch.json b/rest-api-spec/api/msearch.json
new file mode 100644
index 0000000..97d8488
--- /dev/null
+++ b/rest-api-spec/api/msearch.json
@@ -0,0 +1,32 @@
+{
+ "msearch": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-multi-search.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/_msearch",
+ "paths": ["/_msearch", "/{index}/_msearch", "/{index}/{type}/_msearch"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names to use as default"
+ },
+ "type": {
+ "type" : "list",
+ "description" : "A comma-separated list of document types to use as default"
+ }
+ },
+ "params": {
+ "search_type": {
+ "type" : "enum",
+ "options" : ["query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", "dfs_query_and_fetch", "count", "scan"],
+ "description" : "Search operation type"
+ }
+ }
+ },
+ "body": {
+ "description": "The request definitions (metadata-search request definition pairs), separated by newlines",
+ "required" : true,
+ "serialize" : "bulk"
+ }
+ }
+}
diff --git a/rest-api-spec/api/mtermvectors.json b/rest-api-spec/api/mtermvectors.json
new file mode 100644
index 0000000..4697e74
--- /dev/null
+++ b/rest-api-spec/api/mtermvectors.json
@@ -0,0 +1,86 @@
+{
+ "mtermvectors" : {
+ "documentation" : "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-multi-termvectors.html",
+ "methods" : ["GET", "POST"],
+ "url" : {
+ "path" : "/_mtermvectors",
+ "paths" : ["/_mtermvectors", "/{index}/_mtermvectors", "/{index}/{type}/_mtermvectors"],
+ "parts" : {
+ "index" : {
+ "type" : "string",
+ "description" : "The index in which the document resides."
+ },
+ "type" : {
+ "type" : "string",
+ "description" : "The type of the document."
+ },
+ "id" : {
+ "type" : "string",
+ "description" : "The id of the document."
+ }
+ },
+ "params" : {
+ "ids" : {
+ "type" : "list",
+ "description" : "A comma-separated list of documents ids. You must define ids as parameter or set \"ids\" or \"docs\" in the request body",
+ "required" : false
+ },
+ "term_statistics" : {
+ "type" : "boolean",
+ "description" : "Specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
+ "default" : false,
+ "required" : false
+ },
+ "field_statistics" : {
+ "type" : "boolean",
+ "description" : "Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
+ "default" : true,
+ "required" : false
+ },
+ "fields" : {
+ "type" : "list",
+ "description" : "A comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
+ "required" : false
+ },
+ "offsets" : {
+ "type" : "boolean",
+ "description" : "Specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
+ "default" : true,
+ "required" : false
+ },
+ "positions" : {
+ "type" : "boolean",
+ "description" : "Specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
+ "default" : true,
+ "required" : false
+ },
+ "payloads" : {
+ "type" : "boolean",
+ "description" : "Specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
+ "default" : true,
+ "required" : false
+ },
+ "preference" : {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random) .Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
+ "required" : false
+ },
+ "routing" : {
+ "type" : "string",
+ "description" : "Specific routing value. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
+ "required" : false
+ },
+ "parent" : {
+ "type" : "string",
+ "description" : "Parent id of documents. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
+ "required" : false
+ }
+ }
+ },
+ "body" : {
+ "description" : "Define ids, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.",
+ "required" : false
+
+ }
+ }
+} \ No newline at end of file
diff --git a/rest-api-spec/api/nodes.hot_threads.json b/rest-api-spec/api/nodes.hot_threads.json
new file mode 100644
index 0000000..64e4fe8
--- /dev/null
+++ b/rest-api-spec/api/nodes.hot_threads.json
@@ -0,0 +1,36 @@
+{
+ "nodes.hot_threads": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-nodes-hot-threads.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_nodes/hot_threads",
+ "paths": ["/_cluster/nodes/hotthreads", "/_cluster/nodes/hot_threads", "/_cluster/nodes/{node_id}/hotthreads", "/_cluster/nodes/{node_id}/hot_threads", "/_nodes/hotthreads", "/_nodes/hot_threads", "/_nodes/{node_id}/hotthreads", "/_nodes/{node_id}/hot_threads"],
+ "parts": {
+ "node_id": {
+ "type" : "list",
+ "description" : "A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes"
+ }
+ },
+ "params": {
+ "interval": {
+ "type" : "time",
+ "description" : "The interval for the second sampling of threads"
+ },
+ "snapshots": {
+ "type" : "number",
+ "description" : "Number of samples of thread stacktrace (default: 10)"
+ },
+ "threads": {
+ "type" : "number",
+ "description" : "Specify the number of threads to provide information for (default: 3)"
+ },
+ "type": {
+ "type" : "enum",
+ "options" : ["cpu", "wait", "block"],
+ "description" : "The type to sample (default: cpu)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/nodes.info.json b/rest-api-spec/api/nodes.info.json
new file mode 100644
index 0000000..1e9bba8
--- /dev/null
+++ b/rest-api-spec/api/nodes.info.json
@@ -0,0 +1,33 @@
+{
+ "nodes.info": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-nodes-info.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_nodes",
+ "paths": ["/_nodes", "/_nodes/{node_id}", "/_nodes/{metric}", "/_nodes/{node_id}/{metric}"],
+ "parts": {
+ "node_id": {
+ "type": "list",
+ "description": "A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes"
+ },
+ "metric": {
+ "type": "list",
+ "options": ["settings", "os", "process", "jvm", "thread_pool", "network", "transport", "http", "plugin"],
+ "description": "A comma-separated list of metrics you wish returned. Leave empty to return all."
+ }
+ },
+ "params": {
+ "flat_settings": {
+ "type": "boolean",
+ "description": "Return settings in flat format (default: false)"
+ },
+ "human": {
+ "type": "boolean",
+ "description": "Whether to return time and byte values in human-readable format.",
+ "default": false
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/nodes.shutdown.json b/rest-api-spec/api/nodes.shutdown.json
new file mode 100644
index 0000000..578abe0
--- /dev/null
+++ b/rest-api-spec/api/nodes.shutdown.json
@@ -0,0 +1,27 @@
+{
+ "nodes.shutdown": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-nodes-shutdown.html",
+ "methods": ["POST"],
+ "url": {
+ "path": "/_shutdown",
+ "paths": ["/_shutdown", "/_cluster/nodes/_shutdown", "/_cluster/nodes/{node_id}/_shutdown"],
+ "parts": {
+ "node_id": {
+ "type" : "list",
+ "description" : "A comma-separated list of node IDs or names to perform the operation on; use `_local` to perform the operation on the node you're connected to, leave empty to perform the operation on all nodes"
+ }
+ },
+ "params": {
+ "delay": {
+ "type" : "time",
+ "description" : "Set the delay for the operation (default: 1s)"
+ },
+ "exit": {
+ "type" : "boolean",
+ "description" : "Exit the JVM as well (default: true)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/nodes.stats.json b/rest-api-spec/api/nodes.stats.json
new file mode 100644
index 0000000..3f81527
--- /dev/null
+++ b/rest-api-spec/api/nodes.stats.json
@@ -0,0 +1,67 @@
+{
+ "nodes.stats": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/cluster-nodes-stats.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_nodes/stats",
+ "paths": [
+ "/_nodes/stats",
+ "/_nodes/{node_id}/stats",
+ "/_nodes/stats/{metric}",
+ "/_nodes/{node_id}/stats/{metric}",
+ "/_nodes/stats/{metric}/{index_metric}",
+ "/_nodes/{node_id}/stats/{metric}/{index_metric}"
+ ],
+ "parts": {
+ "metric" : {
+ "type" : "list",
+ "options" : ["_all", "breaker", "fs", "http", "indices", "jvm", "network", "os", "process", "thread_pool", "transport"],
+ "description" : "Limit the information returned to the specified metrics"
+ },
+ "index_metric" : {
+ "type" : "list",
+ "options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "id_cache", "indexing", "merge", "percolate", "refresh", "search", "segments", "store", "warmer"],
+ "description" : "Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified."
+ },
+ "node_id": {
+ "type" : "list",
+ "description" : "A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes"
+ }
+ },
+ "params": {
+ "completion_fields": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields for `fielddata` and `suggest` index metric (supports wildcards)"
+ },
+ "fielddata_fields": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields for `fielddata` index metric (supports wildcards)"
+ },
+ "fields": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields for `fielddata` and `completion` index metric (supports wildcards)"
+ },
+ "groups": {
+ "type" : "boolean",
+ "description" : "A comma-separated list of search groups for `search` index metric"
+ },
+ "human": {
+ "type": "boolean",
+ "description": "Whether to return time and byte values in human-readable format.",
+ "default": false
+ },
+ "level": {
+ "type" : "enum",
+ "description": "Return indices stats aggregated at node, index or shard level",
+ "options" : ["node", "indices", "shards"],
+ "default" : "node"
+ },
+ "types" : {
+ "type" : "list",
+ "description" : "A comma-separated list of document types for the `indexing` index metric"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/percolate.json b/rest-api-spec/api/percolate.json
new file mode 100644
index 0000000..50005a1
--- /dev/null
+++ b/rest-api-spec/api/percolate.json
@@ -0,0 +1,72 @@
+{
+ "percolate": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-percolate.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/{index}/{type}/_percolate",
+ "paths": ["/{index}/{type}/_percolate", "/{index}/{type}/{id}/_percolate"],
+ "parts": {
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The index of the document being percolated."
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The type of the document being percolated."
+ },
+ "id": {
+ "type" : "string",
+ "required" : false,
+ "description" : "Substitute the document in the request body with a document that is known by the specified id. On top of the id, the index and type parameter will be used to retrieve the document from within the cluster."
+ }
+ },
+ "params": {
+ "routing": {
+ "type" : "list",
+ "description" : "A comma-separated list of specific routing values"
+ },
+ "preference": {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "percolate_index": {
+ "type" : "string",
+ "description" : "The index to percolate the document into. Defaults to index."
+ },
+ "percolate_type": {
+ "type" : "string",
+ "description" : "The type to percolate document into. Defaults to type."
+ },
+ "version" : {
+ "type" : "number",
+ "description" : "Explicit version number for concurrency control"
+ },
+ "version_type": {
+ "type" : "enum",
+ "options" : ["internal","external"],
+ "description" : "Specific version type"
+ }
+ }
+ },
+ "body": {
+ "description" : "The percolator request definition using the percolate DSL",
+ "required" : false
+ }
+ }
+}
diff --git a/rest-api-spec/api/ping.json b/rest-api-spec/api/ping.json
new file mode 100644
index 0000000..65e1856
--- /dev/null
+++ b/rest-api-spec/api/ping.json
@@ -0,0 +1,15 @@
+{
+ "ping": {
+ "documentation": "http://www.elasticsearch.org/guide/",
+ "methods": ["HEAD"],
+ "url": {
+ "path": "/",
+ "paths": ["/"],
+ "parts": {
+ },
+ "params": {
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/scroll.json b/rest-api-spec/api/scroll.json
new file mode 100644
index 0000000..b01af64
--- /dev/null
+++ b/rest-api-spec/api/scroll.json
@@ -0,0 +1,29 @@
+{
+ "scroll": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-request-scroll.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/_search/scroll",
+ "paths": ["/_search/scroll", "/_search/scroll/{scroll_id}"],
+ "parts": {
+ "scroll_id": {
+ "type" : "string",
+ "description" : "The scroll ID"
+ }
+ },
+ "params": {
+ "scroll": {
+ "type" : "duration",
+ "description" : "Specify how long a consistent view of the index should be maintained for scrolled search"
+ },
+ "scroll_id": {
+ "type" : "string",
+ "description" : "The scroll ID for scrolled search"
+ }
+ }
+ },
+ "body": {
+ "description": "The scroll ID if not passed by URL or query parameter."
+ }
+ }
+}
diff --git a/rest-api-spec/api/search.json b/rest-api-spec/api/search.json
new file mode 100644
index 0000000..9c279bc
--- /dev/null
+++ b/rest-api-spec/api/search.json
@@ -0,0 +1,156 @@
+{
+ "search": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-search.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/_search",
+ "paths": ["/_search", "/{index}/_search", "/{index}/{type}/_search"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices"
+ },
+ "type": {
+ "type" : "list",
+ "description" : "A comma-separated list of document types to search; leave empty to perform the operation on all types"
+ }
+ },
+ "params": {
+ "analyzer": {
+ "type" : "string",
+ "description" : "The analyzer to use for the query string"
+ },
+ "analyze_wildcard": {
+ "type" : "boolean",
+ "description" : "Specify whether wildcard and prefix queries should be analyzed (default: false)"
+ },
+ "default_operator": {
+ "type" : "enum",
+ "options" : ["AND","OR"],
+ "default" : "OR",
+ "description" : "The default operator for query string query (AND or OR)"
+ },
+ "df": {
+ "type" : "string",
+ "description" : "The field to use as default where no field prefix is given in the query string"
+ },
+ "explain": {
+ "type" : "boolean",
+ "description" : "Specify whether to return detailed information about score computation as part of a hit"
+ },
+ "fields": {
+ "type" : "list",
+ "description" : "A comma-separated list of fields to return as part of a hit"
+ },
+ "from": {
+ "type" : "number",
+ "description" : "Starting offset (default: 0)"
+ },
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "indices_boost": {
+ "type" : "list",
+ "description" : "Comma-separated list of index boosts"
+ },
+ "lenient": {
+ "type" : "boolean",
+ "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored"
+ },
+ "lowercase_expanded_terms": {
+ "type" : "boolean",
+ "description" : "Specify whether query terms should be lowercased"
+ },
+ "preference": {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "q": {
+ "type" : "string",
+ "description" : "Query in the Lucene query string syntax"
+ },
+ "routing": {
+ "type" : "list",
+ "description" : "A comma-separated list of specific routing values"
+ },
+ "scroll": {
+ "type" : "duration",
+ "description" : "Specify how long a consistent view of the index should be maintained for scrolled search"
+ },
+ "search_type": {
+ "type" : "enum",
+ "options" : ["query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", "dfs_query_and_fetch", "count", "scan"],
+ "description" : "Search operation type"
+ },
+ "size": {
+ "type" : "number",
+ "description" : "Number of hits to return (default: 10)"
+ },
+ "sort": {
+ "type" : "list",
+ "description" : "A comma-separated list of <field>:<direction> pairs"
+ },
+ "source": {
+ "type" : "string",
+ "description" : "The URL-encoded request definition using the Query DSL (instead of using request body)"
+ },
+ "_source": {
+ "type" : "list",
+ "description" : "True or false to return the _source field or not, or a list of fields to return"
+ },
+ "_source_exclude": {
+ "type" : "list",
+ "description" : "A list of fields to exclude from the returned _source field"
+ },
+ "_source_include": {
+ "type" : "list",
+ "description" : "A list of fields to extract and return from the _source field"
+ },
+ "stats": {
+ "type" : "list",
+ "description" : "Specific 'tag' of the request for logging and statistical purposes"
+ },
+ "suggest_field": {
+ "type" : "string",
+ "description" : "Specify which field to use for suggestions"
+ },
+ "suggest_mode": {
+ "type" : "enum",
+ "options" : ["missing", "popular", "always"],
+ "default" : "missing",
+ "description" : "Specify suggest mode"
+ },
+ "suggest_size": {
+ "type" : "number",
+ "description" : "How many suggestions to return in response"
+ },
+ "suggest_text": {
+ "type" : "text",
+ "description" : "The source text for which the suggestions should be returned"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "version": {
+ "type" : "boolean",
+ "description" : "Specify whether to return document version as part of a hit"
+ }
+ }
+ },
+ "body": {
+ "description": "The search definition using the Query DSL"
+ }
+ }
+}
diff --git a/rest-api-spec/api/snapshot.create.json b/rest-api-spec/api/snapshot.create.json
new file mode 100644
index 0000000..b0608ab
--- /dev/null
+++ b/rest-api-spec/api/snapshot.create.json
@@ -0,0 +1,37 @@
+{
+ "snapshot.create": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/modules-snapshots.html",
+ "methods": ["PUT", "POST"],
+ "url": {
+ "path": "/_snapshot/{repository}/{snapshot}",
+ "paths": ["/_snapshot/{repository}/{snapshot}", "/_snapshot/{repository}/{snapshot}/_create"],
+ "parts": {
+ "repository": {
+ "type": "string",
+ "required" : true,
+ "description": "A repository name"
+ },
+ "snapshot": {
+ "type": "string",
+ "required" : true,
+ "description": "A snapshot name"
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "wait_for_completion": {
+ "type": "boolean",
+ "description": "Should this request wait until the operation has completed before returning",
+ "default": false
+ }
+ }
+ },
+ "body" : {
+ "description" : "The snapshot definition",
+ "required" : false
+ }
+ }
+}
diff --git a/rest-api-spec/api/snapshot.create_repository.json b/rest-api-spec/api/snapshot.create_repository.json
new file mode 100644
index 0000000..2503546
--- /dev/null
+++ b/rest-api-spec/api/snapshot.create_repository.json
@@ -0,0 +1,31 @@
+{
+ "snapshot.create_repository": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/modules-snapshots.html",
+ "methods": ["PUT", "POST"],
+ "url": {
+ "path": "/_snapshot/{repository}",
+ "paths": ["/_snapshot/{repository}"],
+ "parts": {
+ "repository": {
+ "type": "string",
+ "required" : true,
+ "description": "A repository name"
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ }
+ }
+ },
+ "body": {
+ "description" : "The repository definition",
+ "required" : true
+ }
+ }
+}
diff --git a/rest-api-spec/api/snapshot.delete.json b/rest-api-spec/api/snapshot.delete.json
new file mode 100644
index 0000000..de004b2
--- /dev/null
+++ b/rest-api-spec/api/snapshot.delete.json
@@ -0,0 +1,29 @@
+{
+ "snapshot.delete": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/modules-snapshots.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/_snapshot/{repository}/{snapshot}",
+ "paths": ["/_snapshot/{repository}/{snapshot}"],
+ "parts": {
+ "repository": {
+ "type": "string",
+ "required" : true,
+ "description": "A repository name"
+ },
+ "snapshot": {
+ "type": "string",
+ "required" : true,
+ "description": "A snapshot name"
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/snapshot.delete_repository.json b/rest-api-spec/api/snapshot.delete_repository.json
new file mode 100644
index 0000000..d879c75
--- /dev/null
+++ b/rest-api-spec/api/snapshot.delete_repository.json
@@ -0,0 +1,28 @@
+{
+ "snapshot.delete_repository": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/modules-snapshots.html",
+ "methods": ["DELETE"],
+ "url": {
+ "path": "/_snapshot/{repository}",
+ "paths": ["/_snapshot/{repository}"],
+ "parts": {
+ "repository": {
+ "type": "list",
+ "required" : true,
+ "description": "A comma-separated list of repository names"
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/snapshot.get.json b/rest-api-spec/api/snapshot.get.json
new file mode 100644
index 0000000..9639b92
--- /dev/null
+++ b/rest-api-spec/api/snapshot.get.json
@@ -0,0 +1,29 @@
+{
+ "snapshot.get": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/modules-snapshots.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_snapshot/{repository}/{snapshot}",
+ "paths": ["/_snapshot/{repository}/{snapshot}"],
+ "parts": {
+ "repository": {
+ "type": "string",
+ "required" : true,
+ "description": "A repository name"
+ },
+ "snapshot": {
+ "type": "list",
+ "required" : true,
+ "description": "A comma-separated list of snapshot names"
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/snapshot.get_repository.json b/rest-api-spec/api/snapshot.get_repository.json
new file mode 100644
index 0000000..567dce0
--- /dev/null
+++ b/rest-api-spec/api/snapshot.get_repository.json
@@ -0,0 +1,27 @@
+{
+ "snapshot.get_repository": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/modules-snapshots.html",
+ "methods": ["GET"],
+ "url": {
+ "path": "/_snapshot",
+ "paths": ["/_snapshot", "/_snapshot/{repository}"],
+ "parts": {
+ "repository": {
+ "type": "list",
+ "description": "A comma-separated list of repository names"
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "local": {
+ "type": "boolean",
+ "description": "Return local information, do not retrieve the state from master node (default: false)"
+ }
+ }
+ },
+ "body": null
+ }
+}
diff --git a/rest-api-spec/api/snapshot.restore.json b/rest-api-spec/api/snapshot.restore.json
new file mode 100644
index 0000000..6c9da74
--- /dev/null
+++ b/rest-api-spec/api/snapshot.restore.json
@@ -0,0 +1,37 @@
+{
+ "snapshot.restore": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/modules-snapshots.html",
+ "methods": ["POST"],
+ "url": {
+ "path": "/_snapshot/{repository}/{snapshot}/_restore",
+ "paths": ["/_snapshot/{repository}/{snapshot}/_restore"],
+ "parts": {
+ "repository": {
+ "type": "string",
+ "required" : true,
+ "description": "A repository name"
+ },
+ "snapshot": {
+ "type": "string",
+ "required" : true,
+ "description": "A snapshot name"
+ }
+ },
+ "params": {
+ "master_timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout for connection to master node"
+ },
+ "wait_for_completion": {
+ "type": "boolean",
+ "description": "Should this request wait until the operation has completed before returning",
+ "default": false
+ }
+ }
+ },
+ "body" : {
+ "description" : "Details of what to restore",
+ "required" : false
+ }
+ }
+}
diff --git a/rest-api-spec/api/suggest.json b/rest-api-spec/api/suggest.json
new file mode 100644
index 0000000..333b477
--- /dev/null
+++ b/rest-api-spec/api/suggest.json
@@ -0,0 +1,48 @@
+{
+ "suggest": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/search-search.html",
+ "methods": ["POST", "GET"],
+ "url": {
+ "path": "/_suggest",
+ "paths": ["/_suggest", "/{index}/_suggest"],
+ "parts": {
+ "index": {
+ "type" : "list",
+ "description" : "A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices"
+ }
+ },
+ "params": {
+ "ignore_unavailable": {
+ "type" : "boolean",
+ "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type" : "boolean",
+ "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type" : "enum",
+ "options" : ["open","closed"],
+ "default" : "open",
+ "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ },
+ "preference": {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random)"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "source": {
+ "type" : "string",
+ "description" : "The URL-encoded request definition (instead of using request body)"
+ }
+ }
+ },
+ "body": {
+ "description" : "The request definition",
+ "required" : true
+ }
+ }
+}
diff --git a/rest-api-spec/api/termvector.json b/rest-api-spec/api/termvector.json
new file mode 100644
index 0000000..99a9bdf
--- /dev/null
+++ b/rest-api-spec/api/termvector.json
@@ -0,0 +1,83 @@
+{
+ "termvector" : {
+ "documentation" : "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-termvectors.html",
+ "methods" : ["GET", "POST"],
+ "url" : {
+ "path" : "/{index}/{type}/{id}/_termvector",
+ "paths" : ["/{index}/{type}/{id}/_termvector"],
+ "parts" : {
+ "index" : {
+ "type" : "string",
+ "description" : "The index in which the document resides.",
+ "required" : true
+ },
+ "type" : {
+ "type" : "string",
+ "description" : "The type of the document.",
+ "required" : true
+ },
+ "id" : {
+ "type" : "string",
+ "description" : "The id of the document.",
+ "required" : true
+ }
+ },
+ "params": {
+ "term_statistics" : {
+ "type" : "boolean",
+ "description" : "Specifies if total term frequency and document frequency should be returned.",
+ "default" : false,
+ "required" : false
+ },
+ "field_statistics" : {
+ "type" : "boolean",
+ "description" : "Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned.",
+ "default" : true,
+ "required" : false
+ },
+ "fields" : {
+ "type" : "list",
+ "description" : "A comma-separated list of fields to return.",
+ "required" : false
+ },
+ "offsets" : {
+ "type" : "boolean",
+ "description" : "Specifies if term offsets should be returned.",
+ "default" : true,
+ "required" : false
+ },
+ "positions" : {
+ "type" : "boolean",
+ "description" : "Specifies if term positions should be returned.",
+ "default" : true,
+ "required" : false
+ },
+ "payloads" : {
+ "type" : "boolean",
+ "description" : "Specifies if term payloads should be returned.",
+ "default" : true,
+ "required" : false
+ },
+ "preference" : {
+ "type" : "string",
+ "description" : "Specify the node or shard the operation should be performed on (default: random).",
+ "required" : false
+ },
+ "routing" : {
+ "type" : "string",
+ "description" : "Specific routing value.",
+ "required" : false
+ },
+ "parent": {
+ "type" : "string",
+ "description" : "Parent id of documents.",
+ "required" : false
+ }
+ }
+ },
+ "body": {
+ "description" : "Define parameters. See documentation.",
+ "required" : false
+ }
+ }
+}
diff --git a/rest-api-spec/api/update.json b/rest-api-spec/api/update.json
new file mode 100644
index 0000000..35a158f
--- /dev/null
+++ b/rest-api-spec/api/update.json
@@ -0,0 +1,91 @@
+{
+ "update": {
+ "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-update.html",
+ "methods": ["POST"],
+ "url": {
+ "path": "/{index}/{type}/{id}/_update",
+ "paths": ["/{index}/{type}/{id}/_update"],
+ "parts": {
+ "id": {
+ "type" : "string",
+ "required" : true,
+ "description" : "Document ID"
+ },
+ "index": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The name of the index"
+ },
+ "type": {
+ "type" : "string",
+ "required" : true,
+ "description" : "The type of the document"
+ }
+ },
+ "params": {
+ "consistency": {
+ "type" : "enum",
+ "options" : ["one", "quorum", "all"],
+ "description" : "Explicit write consistency setting for the operation"
+ },
+ "fields": {
+ "type": "list",
+ "description" : "A comma-separated list of fields to return in the response"
+ },
+ "lang": {
+ "type" : "string",
+ "description" : "The script language (default: mvel)"
+ },
+ "parent": {
+ "type" : "string",
+ "description" : "ID of the parent document"
+ },
+ "refresh": {
+ "type" : "boolean",
+ "description" : "Refresh the index after performing the operation"
+ },
+ "replication": {
+ "type" : "enum",
+ "options" : ["sync","async"],
+ "default" : "sync",
+ "description" : "Specific replication type"
+ },
+ "retry_on_conflict": {
+ "type" : "number",
+ "description" : "Specify how many times should the operation be retried when a conflict occurs (default: 0)"
+ },
+ "routing": {
+ "type" : "string",
+ "description" : "Specific routing value"
+ },
+ "script": {
+ "description" : "The URL-encoded script definition (instead of using request body)"
+ },
+ "timeout": {
+ "type" : "time",
+ "description" : "Explicit operation timeout"
+ },
+ "timestamp": {
+ "type" : "time",
+ "description" : "Explicit timestamp for the document"
+ },
+ "ttl": {
+ "type" : "duration",
+ "description" : "Expiration time for the document"
+ },
+ "version" : {
+ "type" : "number",
+ "description" : "Explicit version number for concurrency control"
+ },
+ "version_type": {
+ "type" : "enum",
+ "options" : ["internal","external"],
+ "description" : "Specific version type"
+ }
+ }
+ },
+ "body": {
+ "description" : "The request definition using either `script` or partial `doc`"
+ }
+ }
+}
diff --git a/rest-api-spec/test/README.asciidoc b/rest-api-spec/test/README.asciidoc
new file mode 100644
index 0000000..97f7f09
--- /dev/null
+++ b/rest-api-spec/test/README.asciidoc
@@ -0,0 +1,233 @@
+Test Suite:
+===========
+
+A YAML test file consists of:
+* an optional `setup` section, followed by
+* one or more test sections
+
+For instance:
+
+ setup:
+ - do: ....
+ - do: ....
+
+ ---
+ "First test":
+ - do: ...
+ - match: ...
+
+ ---
+ "Second test":
+ - do: ...
+ - match: ...
+
+
+A `setup` section contains a list of commands to run before each test
+section in order to setup the same environment for each test section.
+
+A test section represents an independent test, containing multiple `do`
+statements and assertions. The contents of a test section must be run in
+order, but individual test sections may be run in any order, as follows:
+
+1. run `setup` (if any)
+2. reset the `response` var and the `stash` (see below)
+2. run test contents
+3. run teardown
+
+The `teardown` should delete all indices and all templates.
+
+Dot notation:
+-------------
+Dot notation is used for (1) method calls and (2) hierarchical data structures. For
+instance, a method call like `cluster.health` would do the equivalent of:
+
+ client.cluster.health(...params...)
+
+A test against `_tokens.1.token` would examine the `token` key, in the second element
+of the `tokens` array, inside the `response` var (see below):
+
+ $val = $response->{tokens}[1]{token} # Perl syntax roolz!
+
+If one of the levels (eg `tokens`) does not exist, it should return an undefined value.
+If no field name is given (ie the empty string) then return the current
+$val -- used for testing the whole response body.
+
+Use \. to specify paths that actually contain '.' in the key name, for example
+in the `indices.get_settings` API.
+
+Skipping tests:
+---------------
+If a test section should only be run on certain versions of Elasticsearch,
+then the first entry in the section (after the title) should be called
+`skip`, and should contain the range of versions to be
+skipped, and the reason why the tests are skipped. For instance:
+
+....
+ "Parent":
+ - skip:
+ version: "0 - 0.90.2"
+ reason: Delete ignores the parent param
+
+ - do:
+ ... test definitions ...
+....
+
+All tests in the file following the skip statement should be skipped if:
+`min <= current <= max`.
+
+The `version` range should always have an upper bound. Versions should
+either have each version part compared numerically, or should be converted
+to a string with sufficient digits to allow string comparison, eg
+
+ 0.90.2 -> 000-090-002
+
+Snapshot versions and versions of the form `1.0.0.Beta1` can be treated
+as the rounded down version, eg `1.0.0`.
+
+The skip section can also be used to list new features that need to be
+supported in order to run a test. This way the up-to-date runners will
+run the test, while the ones that don't support the feature yet can
+temporarily skip it, and avoid having lots of test failures in the meantime.
+Once all runners have implemented the feature, it can be declared supported
+by default, thus the related skip sections can be removed from the tests.
+
+....
+ "Parent":
+ - skip:
+ features: regex
+
+ - do:
+ ... test definitions ...
+....
+
+The `features` field can either be a string or an array of strings.
+The skip section requires to specify either a `version` or a `features` list.
+
+Required operators:
+-------------------
+
+=== `do`
+
+The `do` operator calls a method on the client. For instance:
+
+....
+ - do:
+ cluster.health:
+ level: shards
+....
+
+The response from the `do` operator should be stored in the `response` var, which
+is reset (1) at the beginning of a file or (2) on the next `do`.
+
+If the arguments to `do` include `catch`, then we are expecting an error, which should
+be caught and tested. For instance:
+
+....
+ - do:
+ catch: missing
+ get:
+ index: test
+ type: test
+ id: 1
+....
+
+The argument to `catch` can be any of:
+
+[horizontal]
+`missing`:: a 404 response from ES
+`conflict`:: a 409 response from ES
+`request`:: a generic error response from ES
+`param`:: a client-side error indicating an unknown parameter has been passed
+ to the method
+`/foo bar/`:: the text of the error message matches this regular expression
+
+If `catch` is specified, then the `response` var must be cleared, and the test
+should fail if no error is thrown.
+
+=== `set`
+
+For some tests, it is necessary to extract a value from the previous `response`, in
+order to reuse it in a subsequent `do` and other tests. For instance, when
+testing indexing a document without a specified ID:
+
+....
+ - do:
+ index:
+ index: test
+ type: test
+ - set: { _id: id } # stash the value of `response._id` as `id`
+ - do:
+ get:
+ index: test
+ type: test
+ id: $id # replace `$id` with the stashed value
+ - match: { _id: $id } # the returned `response._id` matches the stashed `id`
+....
+
+The last response obtained gets always stashed automatically as a string, called `body`.
+This is useful when needing to test apis that return text rather than json (e.g. cat api),
+as it allows to treat the whole body as an ordinary string field.
+
+Note that not only expected values can be retrieved from the stashed values (as in the
+example above), but the same goes for actual values:
+
+....
+ - match: { $body: /^.+$/ } # the returned `body` matches the provided regex
+....
+
+The stash should be reset at the beginning of each test file.
+
+=== `is_true`
+
+The specified key exists and has a true value (ie not `0`, `false`, `undefined`, `null`
+or the empty string), eg:
+
+....
+ - is_true: fields._parent # the _parent key exists in the fields hash and is "true"
+....
+
+=== `is_false`
+
+The specified key doesn't exist or has a false value (ie `0`, `false`, `undefined`,
+`null` or the empty string), eg:
+
+....
+ - is_false: fields._source # the _source key doesn't exist in the fields hash or is "false"
+....
+
+=== `match`
+
+Used to compare two variables (could be scalars, arrays or hashes). The two variables
+should be identical, eg:
+
+....
+ - match: { _source: { foo: bar }}
+....
+
+Supports also regular expressions with flag X for more readability (accepts whitespaces and comments):
+
+....
+ - match:
+ $body: >
+ /^ epoch \s+ timestamp \s+ count \s+ \n
+ \d+ \s+ \d{2}:\d{2}:\d{2} \s+ \d+ \s+ \n $/
+....
+
+=== `lt` and `gt`
+
+Compares two numeric values, eg:
+
+....
+ - lt: { fields._ttl: 10000 } # the `_ttl` value is less than 10,000
+....
+
+=== `length`
+
+This depends on the datatype of the value being examined, eg:
+
+....
+ - length: { _id: 22 } # the `_id` string is 22 chars long
+ - length: { _tokens: 3 } # the `_tokens` array has 3 elements
+ - length: { _source: 5 } # the `_source` hash has 5 keys
+....
+
diff --git a/rest-api-spec/test/bulk/10_basic.yaml b/rest-api-spec/test/bulk/10_basic.yaml
new file mode 100644
index 0000000..f861711
--- /dev/null
+++ b/rest-api-spec/test/bulk/10_basic.yaml
@@ -0,0 +1,25 @@
+---
+"Array of objects":
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - index:
+ _index: test_index
+ _type: test_type
+ _id: test_id
+ - f1: v1
+ f2: 42
+ - index:
+ _index: test_index
+ _type: test_type
+ _id: test_id2
+ - f1: v2
+ f2: 47
+
+ - do:
+ count:
+ index: test_index
+
+ - match: {count: 2}
+
diff --git a/rest-api-spec/test/bulk/20_list_of_strings.yaml b/rest-api-spec/test/bulk/20_list_of_strings.yaml
new file mode 100644
index 0000000..def91f4
--- /dev/null
+++ b/rest-api-spec/test/bulk/20_list_of_strings.yaml
@@ -0,0 +1,17 @@
+---
+"List of strings":
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}}'
+ - '{"f1": "v1", "f2": 42}'
+ - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}}'
+ - '{"f1": "v2", "f2": 47}'
+
+ - do:
+ count:
+ index: test_index
+
+ - match: {count: 2}
+
diff --git a/rest-api-spec/test/bulk/30_big_string.yaml b/rest-api-spec/test/bulk/30_big_string.yaml
new file mode 100644
index 0000000..1d11725
--- /dev/null
+++ b/rest-api-spec/test/bulk/30_big_string.yaml
@@ -0,0 +1,17 @@
+---
+"One big string":
+ - do:
+ bulk:
+ refresh: true
+ body: |
+ {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}}
+ {"f1": "v1", "f2": 42}
+ {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}}
+ {"f1": "v2", "f2": 47}
+
+ - do:
+ count:
+ index: test_index
+
+ - match: {count: 2}
+
diff --git a/rest-api-spec/test/cat.aliases/10_basic.yaml b/rest-api-spec/test/cat.aliases/10_basic.yaml
new file mode 100644
index 0000000..dc3d0f9
--- /dev/null
+++ b/rest-api-spec/test/cat.aliases/10_basic.yaml
@@ -0,0 +1,190 @@
+---
+"Help":
+ - do:
+ cat.aliases:
+ help: true
+
+ - match:
+ $body: >
+ /^ alias .+ \n
+ index .+ \n
+ filter .+ \n
+ routing.index .+ \n
+ routing.search .+ \n
+ $/
+
+---
+"Empty cluster":
+
+ - do:
+ cat.aliases: {}
+
+ - match:
+ $body: >
+ /^
+ $/
+
+---
+"Simple alias":
+
+ - do:
+ indices.create:
+ index: test
+
+ - do:
+ indices.put_alias:
+ index: test
+ name: test_alias
+
+ - do:
+ cat.aliases: {}
+
+ - match:
+ $body: >
+ /^
+ test_alias \s+
+ test \s+
+ - \s+
+ - \s+
+ - \s+
+ $/
+
+---
+"Complex alias":
+
+ - do:
+ indices.create:
+ index: test
+
+ - do:
+ indices.put_alias:
+ index: test
+ name: test_alias
+ body:
+ index_routing: ir
+ search_routing: "sr1,sr2"
+ filter:
+ term:
+ foo: bar
+ - do:
+ cat.aliases: {}
+
+ - match:
+ $body: >
+ /^
+ test_alias \s+
+ test \s+
+ [*] \s+
+ ir \s+
+ sr1,sr2 \s+
+ $/
+
+---
+"Alias name":
+
+ - do:
+ indices.create:
+ index: test
+
+ - do:
+ indices.put_alias:
+ index: test
+ name: test_1
+
+ - do:
+ indices.put_alias:
+ index: test
+ name: test_2
+
+ - do:
+ cat.aliases:
+ name: test_1
+
+ - match:
+ $body: /^test_1 .+ \n$/
+
+ - do:
+ cat.aliases:
+ name: test_2
+
+ - match:
+ $body: /^test_2 .+ \n$/
+
+ - do:
+ cat.aliases:
+ name: test_*
+
+ - match:
+ $body: / (^|\n)test_1 .+ \n/
+
+ - match:
+ $body: / (^|\n)test_2 .+ \n/
+
+---
+"Column headers":
+
+ - do:
+ indices.create:
+ index: test
+
+ - do:
+ indices.put_alias:
+ index: test
+ name: test_1
+
+ - do:
+ cat.aliases:
+ v: true
+
+ - match:
+ $body: >
+ /^ alias \s+
+ index \s+
+ filter \s+
+ routing.index \s+
+ routing.search \s+
+ \n
+ test_1 \s+
+ test \s+
+ - \s+
+ - \s+
+ - \s+
+ $/
+
+
+---
+"Select columns":
+
+ - do:
+ indices.create:
+ index: test
+
+ - do:
+ indices.put_alias:
+ index: test
+ name: test_1
+
+ - do:
+ cat.aliases:
+ h: [index, alias]
+
+ - match:
+ $body: /^ test \s+ test_1 \s+ $/
+
+
+ - do:
+ cat.aliases:
+ h: [index, alias]
+ v: true
+ - match:
+ $body: >
+ /^
+ index \s+ alias \s+ \n
+ test \s+ test_1 \s+ \n
+ $/
+
+
+
+
+
+
diff --git a/rest-api-spec/test/cat.allocation/10_basic.yaml b/rest-api-spec/test/cat.allocation/10_basic.yaml
new file mode 100644
index 0000000..6250e42
--- /dev/null
+++ b/rest-api-spec/test/cat.allocation/10_basic.yaml
@@ -0,0 +1,194 @@
+---
+"Help":
+ - do:
+ cat.allocation:
+ help: true
+
+ - match:
+ $body: >
+ /^ shards .+ \n
+ disk.used .+ \n
+ disk.avail .+ \n
+ disk.total .+ \n
+ disk.percent .+ \n
+ host .+ \n
+ ip .+ \n
+ node .+ \n
+ $/
+
+---
+"Empty cluster":
+
+ - do:
+ cat.allocation: {}
+
+ - match:
+ $body: >
+ /^
+ ( 0 \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+ \s+
+ [-\w.]+ \s+
+ \d+(\.\d+){3} \s+
+ \w.*
+ \n
+ )+
+ $/
+
+---
+"One index":
+
+ - do:
+ indices.create:
+ index: test
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+ timeout: 1s
+
+ - do:
+ cat.allocation: {}
+
+ - match:
+ $body: >
+ /^
+ ( [1-5] \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+ \s+
+ [-\w.]+ \s+
+ \d+(\.\d+){3} \s+
+ \w.*
+ \n
+ )+
+ (
+ \d+ \s+
+ UNASSIGNED \s+
+ \n
+ )?
+ $/
+
+---
+"Node ID":
+
+ - do:
+ cat.allocation:
+ node_id: _master
+
+ - match:
+ $body: >
+ /^
+ ( 0 \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+ \s+
+ [-\w.]+ \s+
+ \d+(\.\d+){3} \s+
+ \w.*
+ \n
+ )
+ $/
+
+ - do:
+ cat.allocation:
+ node_id: non_existent
+
+ - match:
+ $body: >
+ /^
+ $/
+
+---
+"Column headers":
+
+ - do:
+ cat.allocation:
+ v: true
+ - match:
+
+ $body: >
+ /^ shards \s+
+ disk.used \s+
+ disk.avail \s+
+ disk.total \s+
+ disk.percent \s+
+ host \s+
+ ip \s+
+ node \s+
+ \n
+
+ ( \s+0 \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+(\.\d+)?[kmgt]b \s+
+ \d+ \s+
+ [-\w.]+ \s+
+ \d+(\.\d+){3} \s+
+ \w.*
+ \n
+ )+
+ $/
+
+---
+"Select columns":
+
+ - do:
+ cat.allocation:
+ h: [disk.percent, node]
+
+ - match:
+ $body: >
+ /^
+ ( \d+ \s+
+ \w.*
+ \n
+ )+
+ $/
+
+ - do:
+ cat.allocation:
+ h: [disk.percent, node]
+ v: true
+
+ - match:
+ $body: >
+ /^
+ disk.percent \s+
+ node \s+
+ \n
+ (
+ \s+\d+ \s+
+ \w.*
+ \n
+ )+
+ $/
+
+
+---
+
+"Bytes":
+
+ - do:
+ cat.allocation:
+ bytes: g
+
+ - match:
+ $body: >
+ /^
+ ( 0 \s+
+ \d+ \s+
+ \d+ \s+
+ \d+ \s+
+ \d+ \s+
+ [-\w.]+ \s+
+ \d+(\.\d+){3} \s+
+ \w.*
+ \n
+ )+
+ $/
+
diff --git a/rest-api-spec/test/cat.count/10_basic.yaml b/rest-api-spec/test/cat.count/10_basic.yaml
new file mode 100644
index 0000000..d918652
--- /dev/null
+++ b/rest-api-spec/test/cat.count/10_basic.yaml
@@ -0,0 +1,76 @@
+---
+"Test cat count help":
+ - do:
+ cat.count:
+ help: true
+
+ - match:
+ $body: >
+ /^ epoch .+ \n
+ timestamp .+ \n
+ count .+ \n $/
+
+---
+"Test cat count output":
+
+ - do:
+ cat.count: {}
+
+ - match:
+ $body: >
+ /# epoch timestamp count
+ ^ \d+ \s \d{2}:\d{2}:\d{2} \s 0 \s $/
+
+ - do:
+ index:
+ index: index1
+ type: type1
+ id: 1
+ body: { foo: bar }
+ refresh: true
+
+ - do:
+ cat.count: {}
+
+ - match:
+ $body: >
+ /# epoch timestamp count
+ ^ \d+ \s \d{2}:\d{2}:\d{2} \s 1 \s $/
+
+ - do:
+ index:
+ index: index2
+ type: type2
+ id: 1
+ body: { foo: bar }
+ refresh: true
+
+ - do:
+ cat.count:
+ h: count
+
+ - match:
+ $body: >
+ /# count
+ ^ 2 \s $/
+
+
+ - do:
+ cat.count:
+ index: index1
+
+ - match:
+ $body: >
+ /# epoch timestamp count
+ ^ \d+ \s \d{2}:\d{2}:\d{2} \s 1 \s $/
+
+ - do:
+ cat.count:
+ index: index2
+ v: true
+
+ - match:
+ $body: >
+ /^ epoch \s+ timestamp \s+ count \s+ \n
+ \d+ \s+ \d{2}:\d{2}:\d{2} \s+ \d+ \s+ \n $/
+
diff --git a/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/test/cat.shards/10_basic.yaml
new file mode 100644
index 0000000..223fdc2
--- /dev/null
+++ b/rest-api-spec/test/cat.shards/10_basic.yaml
@@ -0,0 +1,49 @@
+---
+"Test cat shards output":
+
+ - do:
+ cat.shards: {}
+
+ - match:
+ $body: >
+ /^$/
+
+ - do:
+ index:
+ index: index1
+ type: type1
+ id: 1
+ body: { foo: bar }
+ refresh: true
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+ - do:
+ cat.shards: {}
+
+ - match:
+ $body: >
+ /^(index1 \s+ \d \s+ (p|r) \s+ ((STARTED|INITIALIZING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){10}$/
+
+ - do:
+ indices.create:
+ index: index2
+ body:
+ settings:
+ number_of_replicas: "0"
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ cat.shards: {}
+ - match:
+ $body: >
+ /^(index(1|2) \s+ \d \s+ (p|r) \s+ ((STARTED|INITIALIZING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){15}$/
+
+ - do:
+ cat.shards:
+ index: index2
+ - match:
+ $body: >
+ /^(index2 \s+ \d \s+ (p|r) \s+ ((STARTED|INITIALIZING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){5}$/
diff --git a/rest-api-spec/test/cat.thread_pool/10_basic.yaml b/rest-api-spec/test/cat.thread_pool/10_basic.yaml
new file mode 100644
index 0000000..af59d28
--- /dev/null
+++ b/rest-api-spec/test/cat.thread_pool/10_basic.yaml
@@ -0,0 +1,40 @@
+---
+"Test cat thread_pool output":
+
+ - do:
+ cat.thread_pool: {}
+
+ - match:
+ $body: >
+ / #host ip bulk.active bulk.queue bulk.rejected index.active index.queue index.rejected search.active search.queue search.rejected
+ ^ (\S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \n)+ $/
+
+ - do:
+ cat.thread_pool:
+ v: true
+
+ - match:
+ $body: >
+ /^ host \s+ ip \s+ bulk.active \s+ bulk.queue \s+ bulk.rejected \s+ index.active \s+ index.queue \s+ index.rejected \s+ search.active \s+ search.queue \s+ search.rejected \s+ \n
+ (\S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \n)+ $/
+
+ - do:
+ cat.thread_pool:
+ h: pid,id,h,i,po
+
+ - match:
+ $body: >
+ / #pid id host ip port
+ ^ (\d+ \s+ \S{4} \s+ \S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d{4} \n)+ $/
+
+ - do:
+ cat.thread_pool:
+ h: id,ba,fa,gea,ga,ia,maa,ma,oa,pa
+ v: true
+ full_id: true
+
+ - match:
+ $body: >
+ /^ id \s+ ba \s+ fa \s+ gea \s+ ga \s+ ia \s+ maa \s+ ma \s+ oa \s+ pa \s+ \n
+ (\S+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \n)+ $/
+
diff --git a/rest-api-spec/test/cluster.pending_tasks/10_basic.yaml b/rest-api-spec/test/cluster.pending_tasks/10_basic.yaml
new file mode 100644
index 0000000..f8fd8eb
--- /dev/null
+++ b/rest-api-spec/test/cluster.pending_tasks/10_basic.yaml
@@ -0,0 +1,14 @@
+---
+"Test pending tasks":
+ - do:
+ cluster.pending_tasks: {}
+
+ - is_true: tasks
+---
+"Test pending tasks with local flag":
+ - do:
+ cluster.pending_tasks:
+ local: true
+
+ - is_true: tasks
+
diff --git a/rest-api-spec/test/cluster.put_settings/10_basic.yaml b/rest-api-spec/test/cluster.put_settings/10_basic.yaml
new file mode 100644
index 0000000..e80ab74
--- /dev/null
+++ b/rest-api-spec/test/cluster.put_settings/10_basic.yaml
@@ -0,0 +1,16 @@
+---
+"Test put settings":
+ - do:
+ cluster.put_settings:
+ body:
+ transient:
+ discovery.zen.minimum_master_nodes: 1
+ flat_settings: true
+
+ - match: {transient: {discovery.zen.minimum_master_nodes: "1"}}
+
+ - do:
+ cluster.get_settings:
+ flat_settings: true
+
+ - match: {transient: {discovery.zen.minimum_master_nodes: "1"}}
diff --git a/rest-api-spec/test/cluster.reroute/10_basic.yaml b/rest-api-spec/test/cluster.reroute/10_basic.yaml
new file mode 100644
index 0000000..771f647
--- /dev/null
+++ b/rest-api-spec/test/cluster.reroute/10_basic.yaml
@@ -0,0 +1,4 @@
+---
+"Basic sanity check":
+ - do:
+ cluster.reroute: {}
diff --git a/rest-api-spec/test/cluster.state/10_basic.yaml b/rest-api-spec/test/cluster.state/10_basic.yaml
new file mode 100644
index 0000000..1bea4ad
--- /dev/null
+++ b/rest-api-spec/test/cluster.state/10_basic.yaml
@@ -0,0 +1,6 @@
+---
+"cluster state test":
+ - do:
+ cluster.state: {}
+
+ - is_true: master_node
diff --git a/rest-api-spec/test/cluster.state/20_filtering.yaml b/rest-api-spec/test/cluster.state/20_filtering.yaml
new file mode 100644
index 0000000..7c77ea5
--- /dev/null
+++ b/rest-api-spec/test/cluster.state/20_filtering.yaml
@@ -0,0 +1,164 @@
+setup:
+ - do:
+ index:
+ index: testidx
+ type: testtype
+ id: testing_document
+ body:
+ "text" : "The quick brown fox is brown."
+ - do:
+ indices.refresh: {}
+
+---
+"Filtering the cluster state by blocks should return the blocks field even if the response is empty":
+ - do:
+ cluster.state:
+ metric: [ blocks ]
+
+ - is_true: blocks
+ - is_false: nodes
+ - is_false: metadata
+ - is_false: routing_table
+ - is_false: routing_nodes
+ - is_false: allocations
+ - length: { blocks: 0 }
+
+---
+"Filtering the cluster state by blocks should return the blocks":
+# read only index
+# TODO: can this cause issues leaving it read only when deleting it in teardown
+ - do:
+ indices.put_settings:
+ index: testidx
+ body:
+ index.blocks.read_only: true
+ - do:
+ cluster.state:
+ metric: [ blocks ]
+
+ - is_true: blocks
+ - is_false: nodes
+ - is_false: metadata
+ - is_false: routing_table
+ - is_false: routing_nodes
+ - is_false: allocations
+ - length: { blocks: 1 }
+
+---
+"Filtering the cluster state by nodes only should work":
+ - do:
+ cluster.state:
+ metric: [ nodes ]
+
+ - is_false: blocks
+ - is_true: nodes
+ - is_false: metadata
+ - is_false: routing_table
+ - is_false: routing_nodes
+ - is_false: allocations
+
+---
+"Filtering the cluster state by metadata only should work":
+ - do:
+ cluster.state:
+ metric: [ metadata ]
+
+ - is_false: blocks
+ - is_false: nodes
+ - is_true: metadata
+ - is_false: routing_table
+ - is_false: routing_nodes
+ - is_false: allocations
+
+
+---
+"Filtering the cluster state by routing table only should work":
+ - do:
+ cluster.state:
+ metric: [ routing_table ]
+
+ - is_false: blocks
+ - is_false: nodes
+ - is_false: metadata
+ - is_true: routing_table
+ - is_true: routing_nodes
+ - is_true: allocations
+
+
+---
+"Filtering the cluster state for specific index templates should work ":
+ - do:
+ indices.put_template:
+ name: test1
+ body:
+ template: test-*
+ settings:
+ number_of_shards: 1
+
+ - do:
+ indices.put_template:
+ name: test2
+ body:
+ template: test-*
+ settings:
+ number_of_shards: 2
+
+ - do:
+ indices.put_template:
+ name: foo
+ body:
+ template: foo-*
+ settings:
+ number_of_shards: 3
+ - do:
+ cluster.state:
+ metric: [ metadata ]
+ index_templates: [ test1, test2 ]
+
+ - is_false: blocks
+ - is_false: nodes
+ - is_true: metadata
+ - is_false: routing_table
+ - is_false: routing_nodes
+ - is_false: allocations
+ - is_true: metadata.templates.test1
+ - is_true: metadata.templates.test2
+ - is_false: metadata.templates.foo
+
+---
+"Filtering the cluster state by indices should work in routing table and metadata":
+ - do:
+ index:
+ index: another
+ type: type
+ id: testing_document
+ body:
+ "text" : "The quick brown fox is brown."
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ cluster.state:
+ metric: [ routing_table, metadata ]
+ index: [ testidx ]
+
+ - is_false: metadata.indices.another
+ - is_false: routing_table.indices.another
+ - is_true: metadata.indices.testidx
+ - is_true: routing_table.indices.testidx
+
+---
+"Filtering the cluster state using _all for indices and metrics should work":
+ - do:
+ cluster.state:
+ metric: [ '_all' ]
+ index: [ '_all' ]
+
+ - is_true: blocks
+ - is_true: nodes
+ - is_true: metadata
+ - is_true: routing_table
+ - is_true: routing_nodes
+ - is_true: allocations
+
diff --git a/rest-api-spec/test/create/10_with_id.yaml b/rest-api-spec/test/create/10_with_id.yaml
new file mode 100644
index 0000000..1e58c38
--- /dev/null
+++ b/rest-api-spec/test/create/10_with_id.yaml
@@ -0,0 +1,33 @@
+---
+"Create with ID":
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: "1"}
+ - match: { _version: 1}
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: "1"}
+ - match: { _version: 1}
+ - match: { _source: { foo: bar }}
+
+ - do:
+ catch: conflict
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
diff --git a/rest-api-spec/test/create/15_without_id.yaml b/rest-api-spec/test/create/15_without_id.yaml
new file mode 100644
index 0000000..0342fdb
--- /dev/null
+++ b/rest-api-spec/test/create/15_without_id.yaml
@@ -0,0 +1,25 @@
+---
+"Create without ID":
+ - do:
+ create:
+ index: test_1
+ type: test
+ body: { foo: bar }
+
+ - is_true: _id
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _version: 1 }
+ - set: { _id: id }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: '$id'
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: $id }
+ - match: { _version: 1 }
+ - match: { _source: { foo: bar }}
diff --git a/rest-api-spec/test/create/30_internal_version.yaml b/rest-api-spec/test/create/30_internal_version.yaml
new file mode 100644
index 0000000..fcdb233
--- /dev/null
+++ b/rest-api-spec/test/create/30_internal_version.yaml
@@ -0,0 +1,20 @@
+---
+"Internal version":
+
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - match: { _version: 1}
+
+ - do:
+ catch: conflict
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
diff --git a/rest-api-spec/test/create/35_external_version.yaml b/rest-api-spec/test/create/35_external_version.yaml
new file mode 100644
index 0000000..8ee11b0
--- /dev/null
+++ b/rest-api-spec/test/create/35_external_version.yaml
@@ -0,0 +1,33 @@
+---
+"External version":
+
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ version_type: external
+ version: 5
+
+ - match: { _version: 5}
+
+ - do:
+ catch: conflict
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ version_type: external
+ version: 5
+
+ - do:
+ catch: conflict
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ version_type: external
+ version: 6
diff --git a/rest-api-spec/test/create/40_routing.yaml b/rest-api-spec/test/create/40_routing.yaml
new file mode 100644
index 0000000..bc3fb84
--- /dev/null
+++ b/rest-api-spec/test/create/40_routing.yaml
@@ -0,0 +1,41 @@
+---
+"Routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ fields: [_routing]
+
+ - match: { _id: "1"}
+ - match: { fields._routing: "5"}
+
+ - do:
+ catch: missing
+ get:
+ index: test_1
+ type: test
+ id: 1
+
diff --git a/rest-api-spec/test/create/50_parent.yaml b/rest-api-spec/test/create/50_parent.yaml
new file mode 100644
index 0000000..dcd24d9
--- /dev/null
+++ b/rest-api-spec/test/create/50_parent.yaml
@@ -0,0 +1,43 @@
+---
+"Parent":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ catch: /RoutingMissingException/
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ fields: [_parent, _routing]
+
+ - match: { _id: "1"}
+ - match: { fields._parent: "5"}
+ - match: { fields._routing: "5"}
+
diff --git a/rest-api-spec/test/create/55_parent_with_routing.yaml b/rest-api-spec/test/create/55_parent_with_routing.yaml
new file mode 100644
index 0000000..ec94718
--- /dev/null
+++ b/rest-api-spec/test/create/55_parent_with_routing.yaml
@@ -0,0 +1,54 @@
+---
+"Parent with routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ settings:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ fields: [_parent, _routing]
+
+ - match: { _id: "1"}
+ - match: { fields._parent: "5"}
+ - match: { fields._routing: "4"}
+
+ - do:
+ catch: missing
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ routing: 4
+
diff --git a/rest-api-spec/test/create/60_refresh.yaml b/rest-api-spec/test/create/60_refresh.yaml
new file mode 100644
index 0000000..99bfbc3
--- /dev/null
+++ b/rest-api-spec/test/create/60_refresh.yaml
@@ -0,0 +1,46 @@
+---
+"Refresh":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index.refresh_interval: -1
+ number_of_replicas: 0
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query: { term: { _id: 1 }}
+
+ - match: { hits.total: 0 }
+
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 2
+ refresh: 1
+ body: { foo: bar }
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query: { term: { _id: 2 }}
+
+ - match: { hits.total: 1 }
diff --git a/rest-api-spec/test/create/70_timestamp.yaml b/rest-api-spec/test/create/70_timestamp.yaml
new file mode 100644
index 0000000..f48a9fa
--- /dev/null
+++ b/rest-api-spec/test/create/70_timestamp.yaml
@@ -0,0 +1,81 @@
+---
+"Timestamp":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _timestamp:
+ enabled: 1
+ store: yes
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+# blank timestamp
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _timestamp
+
+ - is_true: fields._timestamp
+
+# milliseconds since epoch
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ timestamp: 1372011280000
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _timestamp
+
+ - match: { fields._timestamp: 1372011280000 }
+
+# date format
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ timestamp: 2013-06-23T18:14:40
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _timestamp
+
+ - match: { fields._timestamp: 1372011280000 }
+
diff --git a/rest-api-spec/test/create/75_ttl.yaml b/rest-api-spec/test/create/75_ttl.yaml
new file mode 100644
index 0000000..05eb88e
--- /dev/null
+++ b/rest-api-spec/test/create/75_ttl.yaml
@@ -0,0 +1,104 @@
+---
+"TTL":
+
+ - skip:
+ features: gtelte
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _ttl:
+ enabled: 1
+ store: yes
+ default: 10s
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+# blank ttl
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _ttl
+
+ - lte: { fields._ttl: 10000}
+ - gt: { fields._ttl: 0}
+
+# milliseconds
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ ttl: 100000
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _ttl
+
+ - lte: { fields._ttl: 100000}
+ - gt: { fields._ttl: 10000}
+
+# duration
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ - do:
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ ttl: 20s
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _ttl
+
+ - lte: { fields._ttl: 20000}
+ - gt: { fields._ttl: 10000}
+
+# with timestamp
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ - do:
+ catch: /AlreadyExpiredException/
+ create:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ ttl: 20s
+ timestamp: 2013-06-23T18:14:40
+
diff --git a/rest-api-spec/test/create/TODO.txt b/rest-api-spec/test/create/TODO.txt
new file mode 100644
index 0000000..d8a93ca
--- /dev/null
+++ b/rest-api-spec/test/create/TODO.txt
@@ -0,0 +1,5 @@
+Tests missing for:
+
+# consistency
+# percolate
+# replication
diff --git a/rest-api-spec/test/delete/10_basic.yaml b/rest-api-spec/test/delete/10_basic.yaml
new file mode 100644
index 0000000..a3671d5
--- /dev/null
+++ b/rest-api-spec/test/delete/10_basic.yaml
@@ -0,0 +1,19 @@
+---
+"Basic":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - match: { _version: 1 }
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _version: 2 }
diff --git a/rest-api-spec/test/delete/20_internal_version.yaml b/rest-api-spec/test/delete/20_internal_version.yaml
new file mode 100644
index 0000000..3d9ddb7
--- /dev/null
+++ b/rest-api-spec/test/delete/20_internal_version.yaml
@@ -0,0 +1,28 @@
+---
+"Internal version":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - match: { _version: 1}
+
+ - do:
+ catch: conflict
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ version: 2
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ version: 1
+
+ - match: { _version: 2 }
diff --git a/rest-api-spec/test/delete/25_external_version.yaml b/rest-api-spec/test/delete/25_external_version.yaml
new file mode 100644
index 0000000..453d64d
--- /dev/null
+++ b/rest-api-spec/test/delete/25_external_version.yaml
@@ -0,0 +1,32 @@
+---
+"External version":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ version_type: external
+ version: 5
+
+ - match: { _version: 5}
+
+ - do:
+ catch: conflict
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ version_type: external
+ version: 4
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ version_type: external
+ version: 6
+
+ - match: { _version: 6}
diff --git a/rest-api-spec/test/delete/30_routing.yaml b/rest-api-spec/test/delete/30_routing.yaml
new file mode 100644
index 0000000..b0d3ca3
--- /dev/null
+++ b/rest-api-spec/test/delete/30_routing.yaml
@@ -0,0 +1,29 @@
+---
+"Routing":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ body: { foo: bar }
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ catch: missing
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ routing: 4
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+
diff --git a/rest-api-spec/test/delete/40_parent.yaml b/rest-api-spec/test/delete/40_parent.yaml
new file mode 100644
index 0000000..6e8beca
--- /dev/null
+++ b/rest-api-spec/test/delete/40_parent.yaml
@@ -0,0 +1,36 @@
+---
+"Parent":
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ body: { foo: bar }
+
+ - do:
+ catch: missing
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ parent: 1
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+
diff --git a/rest-api-spec/test/delete/42_missing_parent.yml b/rest-api-spec/test/delete/42_missing_parent.yml
new file mode 100644
index 0000000..ac00a8d
--- /dev/null
+++ b/rest-api-spec/test/delete/42_missing_parent.yml
@@ -0,0 +1,28 @@
+---
+"Delete on all shards when parent not specified":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ body: { foo: bar }
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+
diff --git a/rest-api-spec/test/delete/45_parent_with_routing.yaml b/rest-api-spec/test/delete/45_parent_with_routing.yaml
new file mode 100644
index 0000000..7a11db4
--- /dev/null
+++ b/rest-api-spec/test/delete/45_parent_with_routing.yaml
@@ -0,0 +1,43 @@
+---
+"Parent with routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ settings:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ body: { foo: bar }
+
+ - do:
+ catch: missing
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 1
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+
diff --git a/rest-api-spec/test/delete/50_refresh.yaml b/rest-api-spec/test/delete/50_refresh.yaml
new file mode 100644
index 0000000..b550789
--- /dev/null
+++ b/rest-api-spec/test/delete/50_refresh.yaml
@@ -0,0 +1,73 @@
+---
+"Refresh":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ refresh_interval: -1
+ number_of_replicas: 0
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ refresh: 1
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 2
+ body: { foo: bar }
+ refresh: 1
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query: { terms: { _id: [1,2] }}
+
+ - match: { hits.total: 2 }
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query: { terms: { _id: [1,2] }}
+
+ - match: { hits.total: 2 }
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 2
+ refresh: 1
+
+# If a replica shard where doc 1 is located gets initialized at this point, doc 1
+# won't be found by the following search as the shard gets automatically refreshed
+# right before getting started. This is why this test only works with 0 replicas.
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query: { terms: { _id: [1,2] }}
+
+ - match: { hits.total: 1 }
diff --git a/rest-api-spec/test/delete/60_missing.yaml b/rest-api-spec/test/delete/60_missing.yaml
new file mode 100644
index 0000000..9cfdb48
--- /dev/null
+++ b/rest-api-spec/test/delete/60_missing.yaml
@@ -0,0 +1,19 @@
+---
+"Missing document with catch":
+
+ - do:
+ catch: missing
+ delete:
+ index: test_1
+ type: test
+ id: 1
+
+---
+"Missing document with ignore":
+
+ - do:
+ delete:
+ index: test_1
+ type: test
+ id: 1
+ ignore: 404
diff --git a/rest-api-spec/test/delete/TODO.txt b/rest-api-spec/test/delete/TODO.txt
new file mode 100644
index 0000000..0418363
--- /dev/null
+++ b/rest-api-spec/test/delete/TODO.txt
@@ -0,0 +1,6 @@
+Tests missing for:
+
+# consistency
+# replication
+# timeout
+
diff --git a/rest-api-spec/test/delete_by_query/10_basic.yaml b/rest-api-spec/test/delete_by_query/10_basic.yaml
new file mode 100644
index 0000000..c253ad8
--- /dev/null
+++ b/rest-api-spec/test/delete_by_query/10_basic.yaml
@@ -0,0 +1,42 @@
+---
+"Basic delete_by_query":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 2
+ body: { foo: baz }
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 3
+ body: { foo: foo }
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ delete_by_query:
+ index: test_1
+ body:
+ query:
+ match:
+ foo: bar
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ count:
+ index: test_1
+
+ - match: { count: 2 }
diff --git a/rest-api-spec/test/exists/10_basic.yaml b/rest-api-spec/test/exists/10_basic.yaml
new file mode 100644
index 0000000..ee6d73f
--- /dev/null
+++ b/rest-api-spec/test/exists/10_basic.yaml
@@ -0,0 +1,27 @@
+---
+"Basic":
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+
+ - is_false: ''
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "foo": "bar" }
+
+ - is_true: ''
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+
+ - is_true: ''
diff --git a/rest-api-spec/test/exists/30_parent.yaml b/rest-api-spec/test/exists/30_parent.yaml
new file mode 100644
index 0000000..0f84a1c
--- /dev/null
+++ b/rest-api-spec/test/exists/30_parent.yaml
@@ -0,0 +1,42 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+---
+"Parent":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ body: { foo: bar }
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+
+ - is_true: ''
+
+---
+"Parent omitted":
+
+ - do:
+ catch: request
+ exists:
+ index: test_1
+ type: test
+ id: 1
+
diff --git a/rest-api-spec/test/exists/40_routing.yaml b/rest-api-spec/test/exists/40_routing.yaml
new file mode 100644
index 0000000..56ba443
--- /dev/null
+++ b/rest-api-spec/test/exists/40_routing.yaml
@@ -0,0 +1,39 @@
+---
+"Routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ body: { foo: bar }
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+
+ - is_true: ''
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+
+ - is_false: ''
diff --git a/rest-api-spec/test/exists/55_parent_with_routing.yaml b/rest-api-spec/test/exists/55_parent_with_routing.yaml
new file mode 100644
index 0000000..0e92aac
--- /dev/null
+++ b/rest-api-spec/test/exists/55_parent_with_routing.yaml
@@ -0,0 +1,56 @@
+---
+"Parent with routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ settings:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ body: { foo: bar }
+
+ - is_true: ''
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+
+ - is_true: ''
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+
+ - is_false: ''
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+ routing: 4
+
+ - is_true: ''
+
diff --git a/rest-api-spec/test/exists/60_realtime_refresh.yaml b/rest-api-spec/test/exists/60_realtime_refresh.yaml
new file mode 100644
index 0000000..1025747
--- /dev/null
+++ b/rest-api-spec/test/exists/60_realtime_refresh.yaml
@@ -0,0 +1,50 @@
+---
+"Realtime Refresh":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ refresh_interval: -1
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+ realtime: 1
+
+ - is_true: ''
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+ realtime: 0
+
+ - is_false: ''
+
+ - do:
+ exists:
+ index: test_1
+ type: test
+ id: 1
+ realtime: 0
+ refresh: 1
+
+ - is_true: ''
diff --git a/rest-api-spec/test/exists/70_defaults.yaml b/rest-api-spec/test/exists/70_defaults.yaml
new file mode 100644
index 0000000..2db28f6
--- /dev/null
+++ b/rest-api-spec/test/exists/70_defaults.yaml
@@ -0,0 +1,17 @@
+---
+"Client-side default type":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "foo": "bar" }
+
+ - do:
+ exists:
+ index: test_1
+ type: _all
+ id: 1
+
+ - is_true: ''
diff --git a/rest-api-spec/test/exists/TODO.txt b/rest-api-spec/test/exists/TODO.txt
new file mode 100644
index 0000000..340ff57
--- /dev/null
+++ b/rest-api-spec/test/exists/TODO.txt
@@ -0,0 +1,3 @@
+Tests missing for:
+
+# preference
diff --git a/rest-api-spec/test/explain/10_basic.yaml b/rest-api-spec/test/explain/10_basic.yaml
new file mode 100644
index 0000000..fa68dfa
--- /dev/null
+++ b/rest-api-spec/test/explain/10_basic.yaml
@@ -0,0 +1,23 @@
+---
+"Basic mlt":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar, title: howdy }
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ explain:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ query:
+ match_all: {}
+
+ - is_true: matched
+ - match: { explanation.value: 1 }
diff --git a/rest-api-spec/test/explain/20_source_filtering.yaml b/rest-api-spec/test/explain/20_source_filtering.yaml
new file mode 100644
index 0000000..04aac33
--- /dev/null
+++ b/rest-api-spec/test/explain/20_source_filtering.yaml
@@ -0,0 +1,44 @@
+---
+"Source filtering":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 }
+ - do:
+ indices.refresh:
+ index: test_1
+
+ - do:
+ explain: { index: test_1, type: test, id: 1, _source: false, body: { query: { match_all: {}} } }
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: "1" }
+ - is_false: get._source
+
+ - do:
+ explain: { index: test_1, type: test, id: 1, _source: true, body: { query: { match_all: {}} } }
+ - match: { get._source.include.field1: v1 }
+
+ - do:
+ explain: { index: test_1, type: test, id: 1, _source: include.field1, body: { query: { match_all: {}} } }
+ - match: { get._source.include.field1: v1 }
+ - is_false: get._source.include.field2
+
+ - do:
+ explain: { index: test_1, type: test, id: 1, _source_include: include.field1, body: { query: { match_all: {}} } }
+ - match: { get._source.include.field1: v1 }
+ - is_false: get._source.include.field2
+
+ - do:
+ explain: { index: test_1, type: test, id: 1, _source_include: "include.field1,include.field2", body: { query: { match_all: {}} } }
+ - match: { get._source.include.field1: v1 }
+ - match: { get._source.include.field2: v2 }
+ - is_false: get._source.count
+
+ - do:
+ explain: { index: test_1, type: test, id: 1, _source_include: include, _source_exclude: "*.field2", body: { query: { match_all: {}} } }
+ - match: { get._source.include.field1: v1 }
+ - is_false: get._source.include.field2
+ - is_false: get._source.count
diff --git a/rest-api-spec/test/explain/TODO.txt b/rest-api-spec/test/explain/TODO.txt
new file mode 100644
index 0000000..2362d9d
--- /dev/null
+++ b/rest-api-spec/test/explain/TODO.txt
@@ -0,0 +1,6 @@
+Tests missing for:
+
+- everything :)
+
+
+# preference
diff --git a/rest-api-spec/test/get/10_basic.yaml b/rest-api-spec/test/get/10_basic.yaml
new file mode 100644
index 0000000..0689f71
--- /dev/null
+++ b/rest-api-spec/test/get/10_basic.yaml
@@ -0,0 +1,31 @@
+---
+"Basic":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 中文
+ body: { "foo": "Hello: 中文" }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 中文
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: 中文 }
+ - match: { _source: { foo: "Hello: 中文" } }
+
+ - do:
+ get:
+ index: test_1
+ type: _all
+ id: 中文
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: 中文 }
+ - match: { _source: { foo: "Hello: 中文" } }
diff --git a/rest-api-spec/test/get/15_default_values.yaml b/rest-api-spec/test/get/15_default_values.yaml
new file mode 100644
index 0000000..5e08112
--- /dev/null
+++ b/rest-api-spec/test/get/15_default_values.yaml
@@ -0,0 +1,21 @@
+---
+"Default values":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "foo": "bar" }
+
+ - do:
+ get:
+ index: test_1
+ type: _all
+ id: 1
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: '1' }
+ - match: { _source: { foo: "bar" } }
+
diff --git a/rest-api-spec/test/get/20_fields.yaml b/rest-api-spec/test/get/20_fields.yaml
new file mode 100644
index 0000000..15530b8
--- /dev/null
+++ b/rest-api-spec/test/get/20_fields.yaml
@@ -0,0 +1,45 @@
+---
+"Fields":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "foo": "bar", "count": 1 }
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: foo
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: '1' }
+ - match: { fields.foo: [bar] }
+ - is_false: _source
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: [foo, count]
+
+ - match: { fields.foo: [bar] }
+ - match: { fields.count: [1] }
+ - is_false: _source
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: [foo, count, _source]
+
+ - match: { fields.foo: [bar] }
+ - match: { fields.count: [1] }
+ - match: { _source.foo: bar }
+
+
diff --git a/rest-api-spec/test/get/30_parent.yaml b/rest-api-spec/test/get/30_parent.yaml
new file mode 100644
index 0000000..f7f31d0
--- /dev/null
+++ b/rest-api-spec/test/get/30_parent.yaml
@@ -0,0 +1,45 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 中文
+ body: { foo: bar }
+
+---
+"Parent":
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 中文
+ fields: [_parent, _routing]
+
+ - match: { _id: "1"}
+ - match: { fields._parent: 中文 }
+ - match: { fields._routing: 中文}
+
+---
+"Parent omitted":
+ - do:
+ catch: request
+ get:
+ index: test_1
+ type: test
+ id: 1
+
diff --git a/rest-api-spec/test/get/40_routing.yaml b/rest-api-spec/test/get/40_routing.yaml
new file mode 100644
index 0000000..f909cb0
--- /dev/null
+++ b/rest-api-spec/test/get/40_routing.yaml
@@ -0,0 +1,41 @@
+---
+"Routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ fields: [_routing]
+
+ - match: { _id: "1"}
+ - match: { fields._routing: "5"}
+
+ - do:
+ catch: missing
+ get:
+ index: test_1
+ type: test
+ id: 1
+
diff --git a/rest-api-spec/test/get/55_parent_with_routing.yaml b/rest-api-spec/test/get/55_parent_with_routing.yaml
new file mode 100644
index 0000000..43d60f5
--- /dev/null
+++ b/rest-api-spec/test/get/55_parent_with_routing.yaml
@@ -0,0 +1,54 @@
+---
+"Parent with routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ settings:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ fields: [_parent, _routing]
+
+ - match: { _id: "1"}
+ - match: { fields._parent: "5"}
+ - match: { fields._routing: "4"}
+
+ - do:
+ catch: missing
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ routing: 4
+
diff --git a/rest-api-spec/test/get/60_realtime_refresh.yaml b/rest-api-spec/test/get/60_realtime_refresh.yaml
new file mode 100644
index 0000000..4631bed
--- /dev/null
+++ b/rest-api-spec/test/get/60_realtime_refresh.yaml
@@ -0,0 +1,49 @@
+---
+"Realtime Refresh":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ refresh_interval: -1
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ realtime: 1
+
+ - is_true: found
+
+ - do:
+ catch: missing
+ get:
+ index: test_1
+ type: test
+ id: 1
+ realtime: 0
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ realtime: 0
+ refresh: 1
+
+ - is_true: found
diff --git a/rest-api-spec/test/get/70_source_filtering.yaml b/rest-api-spec/test/get/70_source_filtering.yaml
new file mode 100644
index 0000000..03572bb
--- /dev/null
+++ b/rest-api-spec/test/get/70_source_filtering.yaml
@@ -0,0 +1,56 @@
+---
+"Source filtering":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 }
+ - do:
+ get: { index: test_1, type: test, id: 1, _source: false }
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: "1" }
+ - is_false: _source
+
+ - do:
+ get: { index: test_1, type: test, id: 1, _source: true }
+ - match: { _source.include.field1: v1 }
+
+ - do:
+ get: { index: test_1, type: test, id: 1, _source: include.field1 }
+ - match: { _source.include.field1: v1 }
+ - is_false: _source.include.field2
+
+ - do:
+ get: { index: test_1, type: test, id: 1, _source_include: include.field1 }
+ - match: { _source.include.field1: v1 }
+ - is_false: _source.include.field2
+
+ - do:
+ get: { index: test_1, type: test, id: 1, _source_include: "include.field1,include.field2" }
+ - match: { _source.include.field1: v1 }
+ - match: { _source.include.field2: v2 }
+ - is_false: _source.count
+
+ - do:
+ get: { index: test_1, type: test, id: 1, _source_include: include, _source_exclude: "*.field2" }
+ - match: { _source.include.field1: v1 }
+ - is_false: _source.include.field2
+ - is_false: _source.count
+
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: count
+ _source: true
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: "1" }
+ - match: { fields.count: [1] }
+ - match: { _source.include.field1: v1 }
diff --git a/rest-api-spec/test/get/80_missing.yaml b/rest-api-spec/test/get/80_missing.yaml
new file mode 100644
index 0000000..a60d113
--- /dev/null
+++ b/rest-api-spec/test/get/80_missing.yaml
@@ -0,0 +1,19 @@
+---
+"Missing document with catch":
+
+ - do:
+ catch: missing
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+---
+"Missing document with ignore":
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ ignore: 404
diff --git a/rest-api-spec/test/get/TODO.txt b/rest-api-spec/test/get/TODO.txt
new file mode 100644
index 0000000..340ff57
--- /dev/null
+++ b/rest-api-spec/test/get/TODO.txt
@@ -0,0 +1,3 @@
+Tests missing for:
+
+# preference
diff --git a/rest-api-spec/test/get_source/10_basic.yaml b/rest-api-spec/test/get_source/10_basic.yaml
new file mode 100644
index 0000000..b20357c
--- /dev/null
+++ b/rest-api-spec/test/get_source/10_basic.yaml
@@ -0,0 +1,24 @@
+---
+"Basic":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "foo": "bar" }
+
+ - do:
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { '': { foo: bar } }
+
+ - do:
+ get_source:
+ index: test_1
+ type: _all
+ id: 1
+
+ - match: { '': { foo: bar } }
diff --git a/rest-api-spec/test/get_source/15_default_values.yaml b/rest-api-spec/test/get_source/15_default_values.yaml
new file mode 100644
index 0000000..3e67c62
--- /dev/null
+++ b/rest-api-spec/test/get_source/15_default_values.yaml
@@ -0,0 +1,16 @@
+---
+"Default values":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "foo": "bar" }
+
+ - do:
+ get_source:
+ index: test_1
+ type: _all
+ id: 1
+
+ - match: { '': { foo: bar } }
diff --git a/rest-api-spec/test/get_source/30_parent.yaml b/rest-api-spec/test/get_source/30_parent.yaml
new file mode 100644
index 0000000..35edb90
--- /dev/null
+++ b/rest-api-spec/test/get_source/30_parent.yaml
@@ -0,0 +1,43 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ body: { foo: bar }
+
+
+---
+"Parent":
+ - do:
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+
+ - match: { '': {foo: bar}}
+
+---
+"Parent omitted":
+
+ - do:
+ catch: request
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+
diff --git a/rest-api-spec/test/get_source/40_routing.yaml b/rest-api-spec/test/get_source/40_routing.yaml
new file mode 100644
index 0000000..f771dbb
--- /dev/null
+++ b/rest-api-spec/test/get_source/40_routing.yaml
@@ -0,0 +1,39 @@
+---
+"Routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ body: { foo: bar }
+
+ - do:
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+
+ - match: { '': {foo: bar}}
+
+ - do:
+ catch: missing
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+
diff --git a/rest-api-spec/test/get_source/55_parent_with_routing.yaml b/rest-api-spec/test/get_source/55_parent_with_routing.yaml
new file mode 100644
index 0000000..86fe2ba
--- /dev/null
+++ b/rest-api-spec/test/get_source/55_parent_with_routing.yaml
@@ -0,0 +1,51 @@
+---
+"Parent with routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ settings:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ body: { foo: bar }
+
+ - do:
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+
+ - match: { '': {foo: bar}}
+
+ - do:
+ catch: missing
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+
+ - do:
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+ routing: 4
+
diff --git a/rest-api-spec/test/get_source/60_realtime_refresh.yaml b/rest-api-spec/test/get_source/60_realtime_refresh.yaml
new file mode 100644
index 0000000..92f21ca
--- /dev/null
+++ b/rest-api-spec/test/get_source/60_realtime_refresh.yaml
@@ -0,0 +1,50 @@
+---
+"Realtime":
+
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ refresh_interval: -1
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+ realtime: 1
+
+ - match: { '': {foo: bar}}
+
+ - do:
+ catch: missing
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+ realtime: 0
+
+ - do:
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+ realtime: 0
+ refresh: 1
+
+ - match: { '': {foo: bar}}
+
diff --git a/rest-api-spec/test/get_source/70_source_filtering.yaml b/rest-api-spec/test/get_source/70_source_filtering.yaml
new file mode 100644
index 0000000..cc168ac
--- /dev/null
+++ b/rest-api-spec/test/get_source/70_source_filtering.yaml
@@ -0,0 +1,26 @@
+---
+"Source filtering":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 }
+
+ - do:
+ get_source: { index: test_1, type: test, id: 1, _source_include: include.field1 }
+ - match: { include.field1: v1 }
+ - is_false: include.field2
+
+ - do:
+ get_source: { index: test_1, type: test, id: 1, _source_include: "include.field1,include.field2" }
+ - match: { include.field1: v1 }
+ - match: { include.field2: v2 }
+ - is_false: count
+
+ - do:
+ get_source: { index: test_1, type: test, id: 1, _source_include: include, _source_exclude: "*.field2" }
+ - match: { include.field1: v1 }
+ - is_false: include.field2
+ - is_false: count
diff --git a/rest-api-spec/test/get_source/80_missing.yaml b/rest-api-spec/test/get_source/80_missing.yaml
new file mode 100644
index 0000000..fba8540
--- /dev/null
+++ b/rest-api-spec/test/get_source/80_missing.yaml
@@ -0,0 +1,19 @@
+---
+"Missing document with catch":
+
+ - do:
+ catch: missing
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+
+---
+"Missing document with ignore":
+
+ - do:
+ get_source:
+ index: test_1
+ type: test
+ id: 1
+ ignore: 404
diff --git a/rest-api-spec/test/get_source/TODO.txt b/rest-api-spec/test/get_source/TODO.txt
new file mode 100644
index 0000000..340ff57
--- /dev/null
+++ b/rest-api-spec/test/get_source/TODO.txt
@@ -0,0 +1,3 @@
+Tests missing for:
+
+# preference
diff --git a/rest-api-spec/test/index/10_with_id.yaml b/rest-api-spec/test/index/10_with_id.yaml
new file mode 100644
index 0000000..745e111
--- /dev/null
+++ b/rest-api-spec/test/index/10_with_id.yaml
@@ -0,0 +1,26 @@
+---
+"Index with ID":
+
+ - do:
+ index:
+ index: test-weird-index-中文
+ type: weird.type
+ id: 1
+ body: { foo: bar }
+
+ - match: { _index: test-weird-index-中文 }
+ - match: { _type: weird.type }
+ - match: { _id: "1"}
+ - match: { _version: 1}
+
+ - do:
+ get:
+ index: test-weird-index-中文
+ type: weird.type
+ id: 1
+
+ - match: { _index: test-weird-index-中文 }
+ - match: { _type: weird.type }
+ - match: { _id: "1"}
+ - match: { _version: 1}
+ - match: { _source: { foo: bar }}
diff --git a/rest-api-spec/test/index/15_without_id.yaml b/rest-api-spec/test/index/15_without_id.yaml
new file mode 100644
index 0000000..3fff051
--- /dev/null
+++ b/rest-api-spec/test/index/15_without_id.yaml
@@ -0,0 +1,26 @@
+---
+"Index without ID":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ body: { foo: bar }
+
+ - is_true: _id
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _version: 1 }
+ - set: { _id: id }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: '$id'
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: $id }
+ - match: { _version: 1 }
+ - match: { _source: { foo: bar }}
diff --git a/rest-api-spec/test/index/20_optype.yaml b/rest-api-spec/test/index/20_optype.yaml
new file mode 100644
index 0000000..60ae26d
--- /dev/null
+++ b/rest-api-spec/test/index/20_optype.yaml
@@ -0,0 +1,29 @@
+---
+"Optype":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ op_type: create
+ body: { foo: bar }
+
+ - do:
+ catch: conflict
+ index:
+ index: test_1
+ type: test
+ id: 1
+ op_type: create
+ body: { foo: bar }
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ op_type: index
+ body: { foo: bar }
+
+ - match: { _version: 2 }
diff --git a/rest-api-spec/test/index/30_internal_version.yaml b/rest-api-spec/test/index/30_internal_version.yaml
new file mode 100644
index 0000000..1767fbe
--- /dev/null
+++ b/rest-api-spec/test/index/30_internal_version.yaml
@@ -0,0 +1,36 @@
+---
+"Internal version":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ - match: { _version: 1}
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ - match: { _version: 2}
+
+ - do:
+ catch: conflict
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ version: 1
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ version: 2
+
+ - match: { _version: 3 }
diff --git a/rest-api-spec/test/index/35_external_version.yaml b/rest-api-spec/test/index/35_external_version.yaml
new file mode 100644
index 0000000..0f8f923
--- /dev/null
+++ b/rest-api-spec/test/index/35_external_version.yaml
@@ -0,0 +1,34 @@
+---
+"External version":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ version_type: external
+ version: 5
+
+ - match: { _version: 5}
+
+ - do:
+ catch: conflict
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ version_type: external
+ version: 5
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ version_type: external
+ version: 6
+
+ - match: { _version: 6}
diff --git a/rest-api-spec/test/index/40_routing.yaml b/rest-api-spec/test/index/40_routing.yaml
new file mode 100644
index 0000000..f909cb0
--- /dev/null
+++ b/rest-api-spec/test/index/40_routing.yaml
@@ -0,0 +1,41 @@
+---
+"Routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ fields: [_routing]
+
+ - match: { _id: "1"}
+ - match: { fields._routing: "5"}
+
+ - do:
+ catch: missing
+ get:
+ index: test_1
+ type: test
+ id: 1
+
diff --git a/rest-api-spec/test/index/50_parent.yaml b/rest-api-spec/test/index/50_parent.yaml
new file mode 100644
index 0000000..551d30d
--- /dev/null
+++ b/rest-api-spec/test/index/50_parent.yaml
@@ -0,0 +1,42 @@
+---
+"Parent":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ catch: /RoutingMissingException/
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ fields: [_parent, _routing]
+
+ - match: { _id: "1"}
+ - match: { fields._parent: "5"}
+ - match: { fields._routing: "5"}
+
diff --git a/rest-api-spec/test/index/55_parent_with_routing.yaml b/rest-api-spec/test/index/55_parent_with_routing.yaml
new file mode 100644
index 0000000..43d60f5
--- /dev/null
+++ b/rest-api-spec/test/index/55_parent_with_routing.yaml
@@ -0,0 +1,54 @@
+---
+"Parent with routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ settings:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ fields: [_parent, _routing]
+
+ - match: { _id: "1"}
+ - match: { fields._parent: "5"}
+ - match: { fields._routing: "4"}
+
+ - do:
+ catch: missing
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ routing: 4
+
diff --git a/rest-api-spec/test/index/60_refresh.yaml b/rest-api-spec/test/index/60_refresh.yaml
new file mode 100644
index 0000000..af6ea59
--- /dev/null
+++ b/rest-api-spec/test/index/60_refresh.yaml
@@ -0,0 +1,46 @@
+---
+"Refresh":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index.refresh_interval: -1
+ number_of_replicas: 0
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query: { term: { _id: 1 }}
+
+ - match: { hits.total: 0 }
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 2
+ refresh: 1
+ body: { foo: bar }
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query: { term: { _id: 2 }}
+
+ - match: { hits.total: 1 }
diff --git a/rest-api-spec/test/index/70_timestamp.yaml b/rest-api-spec/test/index/70_timestamp.yaml
new file mode 100644
index 0000000..9e94795
--- /dev/null
+++ b/rest-api-spec/test/index/70_timestamp.yaml
@@ -0,0 +1,71 @@
+---
+"Timestamp":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _timestamp:
+ enabled: 1
+ store: yes
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+# blank timestamp
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _timestamp
+
+ - is_true: fields._timestamp
+
+# milliseconds since epoch
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ timestamp: 1372011280000
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _timestamp
+
+ - match: { fields._timestamp: 1372011280000 }
+
+# date format
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ timestamp: 2013-06-23T18:14:40
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _timestamp
+
+ - match: { fields._timestamp: 1372011280000 }
+
diff --git a/rest-api-spec/test/index/75_ttl.yaml b/rest-api-spec/test/index/75_ttl.yaml
new file mode 100644
index 0000000..08a7db8
--- /dev/null
+++ b/rest-api-spec/test/index/75_ttl.yaml
@@ -0,0 +1,88 @@
+---
+"TTL":
+
+ - skip:
+ features: gtelte
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _ttl:
+ enabled: 1
+ store: yes
+ default: 10s
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+# blank ttl
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _ttl
+
+ - lte: { fields._ttl: 10000}
+ - gt: { fields._ttl: 0}
+
+# milliseconds
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ ttl: 100000
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _ttl
+
+ - lte: { fields._ttl: 100000}
+ - gt: { fields._ttl: 10000}
+
+# duration
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ ttl: 20s
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _ttl
+
+ - lte: { fields._ttl: 20000}
+ - gt: { fields._ttl: 10000}
+
+# with timestamp
+
+ - do:
+ catch: /AlreadyExpiredException/
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ ttl: 20s
+ timestamp: 2013-06-23T18:14:40
+
diff --git a/rest-api-spec/test/index/TODO.txt b/rest-api-spec/test/index/TODO.txt
new file mode 100644
index 0000000..d8a93ca
--- /dev/null
+++ b/rest-api-spec/test/index/TODO.txt
@@ -0,0 +1,5 @@
+Tests missing for:
+
+# consistency
+# percolate
+# replication
diff --git a/rest-api-spec/test/indices.analyze/10_analyze.yaml b/rest-api-spec/test/indices.analyze/10_analyze.yaml
new file mode 100644
index 0000000..d3af1e3
--- /dev/null
+++ b/rest-api-spec/test/indices.analyze/10_analyze.yaml
@@ -0,0 +1,50 @@
+# Will be performed before each test as a part of the test setup
+#
+setup:
+ - do:
+ ping: {}
+
+---
+"Basic test":
+ - do:
+ indices.analyze:
+ text: Foo Bar
+ - length: { tokens: 2 }
+ - match: { tokens.0.token: foo }
+ - match: { tokens.1.token: bar }
+
+---
+"Tokenizer and filter":
+ - do:
+ indices.analyze:
+ filters: lowercase
+ text: Foo Bar
+ tokenizer: keyword
+ - length: { tokens: 1 }
+ - match: { tokens.0.token: foo bar }
+
+---
+"Index and field":
+ - do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ test:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+
+ - do:
+ indices.analyze:
+ field: text
+ index: test
+ text: Foo Bar!
+ - length: { tokens: 2 }
+ - match: { tokens.0.token: Foo }
+ - match: { tokens.1.token: Bar! }
diff --git a/rest-api-spec/test/indices.clear_cache/10_basic.yaml b/rest-api-spec/test/indices.clear_cache/10_basic.yaml
new file mode 100644
index 0000000..3388d06
--- /dev/null
+++ b/rest-api-spec/test/indices.clear_cache/10_basic.yaml
@@ -0,0 +1,4 @@
+---
+"clear_cache test":
+ - do:
+ indices.clear_cache: {}
diff --git a/rest-api-spec/test/indices.create/10_basic.yaml b/rest-api-spec/test/indices.create/10_basic.yaml
new file mode 100644
index 0000000..2cb979b
--- /dev/null
+++ b/rest-api-spec/test/indices.create/10_basic.yaml
@@ -0,0 +1,85 @@
+---
+"Create index with mappings":
+
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ type_1: {}
+
+ - do:
+ indices.get_mapping:
+ index: test_index
+
+ - match: { test_index.mappings.type_1.properties: {}}
+
+---
+"Create index with settings":
+
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ settings:
+ number_of_replicas: "0"
+
+ - do:
+ indices.get_settings:
+ index: test_index
+
+ - match: { test_index.settings.index.number_of_replicas: "0"}
+
+---
+"Create index with warmers":
+
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ warmers:
+ test_warmer:
+ source:
+ query:
+ match_all: {}
+
+ - do:
+ indices.get_warmer:
+ index: test_index
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+
+---
+"Create index with mappings, settings and warmers":
+
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ type_1: {}
+ settings:
+ number_of_replicas: "0"
+ warmers:
+ test_warmer:
+ source:
+ query:
+ match_all: {}
+
+ - do:
+ indices.get_mapping:
+ index: test_index
+
+ - match: { test_index.mappings.type_1.properties: {}}
+
+ - do:
+ indices.get_settings:
+ index: test_index
+
+ - match: { test_index.settings.index.number_of_replicas: "0"}
+
+ - do:
+ indices.get_warmer:
+ index: test_index
+
+ - match: { test_index.warmers.test_warmer.source.query.match_all: {}}
diff --git a/rest-api-spec/test/indices.delete_alias/10_basic.yaml b/rest-api-spec/test/indices.delete_alias/10_basic.yaml
new file mode 100644
index 0000000..87f61ef
--- /dev/null
+++ b/rest-api-spec/test/indices.delete_alias/10_basic.yaml
@@ -0,0 +1,32 @@
+---
+"Basic test for delete alias":
+
+ - do:
+ indices.create:
+ index: testind
+
+ - do:
+ indices.put_alias:
+ index: testind
+ name: testali
+ body:
+ routing: "routing value"
+
+ - do:
+ indices.get_alias:
+ name: testali
+
+ - match: {testind.aliases.testali.search_routing: "routing value"}
+ - match: {testind.aliases.testali.index_routing: "routing value"}
+
+ - do:
+ indices.delete_alias:
+ index: testind
+ name: testali
+
+ - do:
+ indices.get_alias:
+ index: testind
+ name: testali
+
+ - match: { '': {}}
diff --git a/rest-api-spec/test/indices.delete_alias/all_path_options.yaml b/rest-api-spec/test/indices.delete_alias/all_path_options.yaml
new file mode 100644
index 0000000..bd158c3
--- /dev/null
+++ b/rest-api-spec/test/indices.delete_alias/all_path_options.yaml
@@ -0,0 +1,225 @@
+---
+setup:
+
+ - do:
+ indices.create:
+ index: test_index1
+
+ - do:
+ indices.create:
+ index: test_index2
+
+ - do:
+ indices.create:
+ index: foo
+
+ - do:
+ indices.put_alias:
+ name: alias1
+ body:
+ routing: "routing value"
+ - do:
+ indices.put_alias:
+ name: alias2
+ body:
+ routing: "routing value"
+
+---
+"check setup":
+ - do:
+ indices.get_alias:
+ name: alias1
+
+ - match: {test_index1.aliases.alias1.search_routing: "routing value"}
+ - match: {test_index2.aliases.alias1.search_routing: "routing value"}
+ - match: {foo.aliases.alias1.search_routing: "routing value"}
+
+ - do:
+ indices.get_alias:
+ name: alias2
+
+ - match: {test_index1.aliases.alias2.search_routing: "routing value"}
+ - match: {test_index2.aliases.alias2.search_routing: "routing value"}
+ - match: {foo.aliases.alias2.search_routing: "routing value"}
+
+---
+"check delete with _all index":
+ - do:
+ indices.delete_alias:
+ index: _all
+ name: alias1
+
+ - do:
+ catch: missing
+ indices.get_alias:
+ name: alias1
+ - do:
+ indices.get_alias:
+ name: alias2
+
+ - match: {test_index1.aliases.alias2.search_routing: "routing value"}
+ - match: {test_index2.aliases.alias2.search_routing: "routing value"}
+ - match: {foo.aliases.alias2.search_routing: "routing value"}
+
+---
+"check delete with * index":
+ - do:
+ indices.delete_alias:
+ index: "*"
+ name: alias1
+
+ - do:
+ catch: missing
+ indices.get_alias:
+ name: alias1
+ - do:
+ indices.get_alias:
+ name: alias2
+
+ - match: {test_index1.aliases.alias2.search_routing: "routing value"}
+ - match: {test_index2.aliases.alias2.search_routing: "routing value"}
+ - match: {foo.aliases.alias2.search_routing: "routing value"}
+
+---
+"check delete with index list":
+ - do:
+ indices.delete_alias:
+ index: "test_index1,test_index2"
+ name: alias1
+
+ - do:
+ indices.get_alias:
+ name: alias1
+
+ - match: {foo.aliases.alias1.search_routing: "routing value"}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_alias:
+ name: alias2
+
+ - match: {test_index1.aliases.alias2.search_routing: "routing value"}
+ - match: {test_index2.aliases.alias2.search_routing: "routing value"}
+ - match: {foo.aliases.alias2.search_routing: "routing value"}
+
+---
+"check delete with prefix* index":
+ - do:
+ indices.delete_alias:
+ index: "test_*"
+ name: alias1
+
+ - do:
+ indices.get_alias:
+ name: alias1
+
+ - match: {foo.aliases.alias1.search_routing: "routing value"}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_alias:
+ name: alias2
+
+ - match: {test_index1.aliases.alias2.search_routing: "routing value"}
+ - match: {test_index2.aliases.alias2.search_routing: "routing value"}
+ - match: {foo.aliases.alias2.search_routing: "routing value"}
+
+
+---
+"check delete with index list and * aliases":
+ - do:
+ indices.delete_alias:
+ index: "test_index1,test_index2"
+ name: "*"
+
+ - do:
+ indices.get_alias:
+ name: alias1
+
+ - match: {foo.aliases.alias1.search_routing: "routing value"}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_alias:
+ name: alias2
+
+ - match: {foo.aliases.alias2.search_routing: "routing value"}
+ - is_false: test_index1
+ - is_false: test_index2
+
+---
+"check delete with index list and _all aliases":
+ - do:
+ indices.delete_alias:
+ index: "test_index1,test_index2"
+ name: _all
+
+ - do:
+ indices.get_alias:
+ name: alias1
+
+ - match: {foo.aliases.alias1.search_routing: "routing value"}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_alias:
+ name: alias2
+
+ - match: {foo.aliases.alias2.search_routing: "routing value"}
+ - is_false: test_index1
+ - is_false: test_index2
+
+---
+"check delete with index list and wildcard aliases":
+ - do:
+ indices.delete_alias:
+ index: "test_index1,test_index2"
+ name: "*1"
+
+ - do:
+ indices.get_alias:
+ name: alias1
+
+ - match: {foo.aliases.alias1.search_routing: "routing value"}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_alias:
+ name: alias2
+
+ - match: {test_index1.aliases.alias2.search_routing: "routing value"}
+ - match: {test_index2.aliases.alias2.search_routing: "routing value"}
+ - match: {foo.aliases.alias2.search_routing: "routing value"}
+
+---
+"check 404 on no matching alias":
+ - do:
+ catch: missing
+ indices.delete_alias:
+ index: "*"
+ name: "non_existent"
+
+ - do:
+ catch: missing
+ indices.delete_alias:
+ index: "non_existent"
+ name: "alias1"
+
+
+---
+"check delete with blank index and blank alias":
+ - do:
+ catch: param
+ indices.delete_alias:
+ name: "alias1"
+
+ - do:
+ catch: param
+ indices.delete_alias:
+ index: "test_index1"
+
diff --git a/rest-api-spec/test/indices.delete_mapping/10_basic.yaml b/rest-api-spec/test/indices.delete_mapping/10_basic.yaml
new file mode 100644
index 0000000..e7139bc
--- /dev/null
+++ b/rest-api-spec/test/indices.delete_mapping/10_basic.yaml
@@ -0,0 +1,31 @@
+---
+"delete mapping tests":
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+
+ - do:
+ indices.exists_type:
+ index: test_index
+ type: test_type
+
+ - is_true: ''
+
+ - do:
+ indices.delete_mapping:
+ index: test_index
+ type: test_type
+
+ - do:
+ indices.exists_type:
+ index: test_index
+ type: test_type
+
+ - is_false: ''
diff --git a/rest-api-spec/test/indices.delete_mapping/all_path_options.yaml b/rest-api-spec/test/indices.delete_mapping/all_path_options.yaml
new file mode 100644
index 0000000..90a8656
--- /dev/null
+++ b/rest-api-spec/test/indices.delete_mapping/all_path_options.yaml
@@ -0,0 +1,272 @@
+setup:
+
+ - do:
+ indices.create:
+ index: test_index1
+ body:
+ mappings: { test_type1: { }}
+
+ - do:
+ indices.create:
+ index: test_index2
+ body:
+ mappings: { test_type2: { }}
+ - do:
+ indices.create:
+ index: foo
+ body:
+ mappings: { test_type2: { }}
+
+---
+"delete with _all index":
+ - do:
+ indices.delete_mapping:
+ index: _all
+ type: test_type2
+
+ - do:
+ indices.exists_type:
+ index: test_index1
+ type: test_type1
+
+ - is_true: ''
+
+ - do:
+ indices.exists_type:
+ index: test_index2
+ type: test_type2
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: foo
+ type: test_type2
+
+ - is_false: ''
+
+---
+"delete with * index":
+ - do:
+ indices.delete_mapping:
+ index: '*'
+ type: test_type2
+
+ - do:
+ indices.exists_type:
+ index: test_index1
+ type: test_type1
+
+ - is_true: ''
+
+ - do:
+ indices.exists_type:
+ index: test_index2
+ type: test_type2
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: foo
+ type: test_type2
+
+ - is_false: ''
+
+---
+"delete with prefix* index":
+ - do:
+ indices.delete_mapping:
+ index: test*
+ type: test_type2
+
+ - do:
+ indices.exists_type:
+ index: test_index1
+ type: test_type1
+
+ - is_true: ''
+
+ - do:
+ indices.exists_type:
+ index: test_index2
+ type: test_type2
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: foo
+ type: test_type2
+
+ - is_true: ''
+
+---
+"delete with list of indices":
+
+ - do:
+ indices.delete_mapping:
+ index: test_index1,test_index2
+ type: test_type2
+
+ - do:
+ indices.exists_type:
+ index: test_index1
+ type: test_type1
+
+ - is_true: ''
+
+ - do:
+ indices.exists_type:
+ index: test_index2
+ type: test_type2
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: foo
+ type: test_type2
+
+ - is_true: ''
+
+---
+"delete with index list and _all type":
+ - do:
+ indices.delete_mapping:
+ index: test_index1,test_index2
+ type: _all
+
+ - do:
+ indices.exists_type:
+ index: test_index1
+ type: test_type1
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: test_index2
+ type: test_type2
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: foo
+ type: test_type2
+
+ - is_true: ''
+
+
+---
+"delete with index list and * type":
+ - do:
+ indices.delete_mapping:
+ index: test_index1,test_index2
+ type: '*'
+
+ - do:
+ indices.exists_type:
+ index: test_index1
+ type: test_type1
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: test_index2
+ type: test_type2
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: foo
+ type: test_type2
+
+ - is_true: ''
+
+
+---
+"delete with index list and prefix* type":
+ - do:
+ indices.delete_mapping:
+ index: test_index1,test_index2
+ type: '*2'
+
+ - do:
+ indices.exists_type:
+ index: test_index1
+ type: test_type1
+
+ - is_true: ''
+
+ - do:
+ indices.exists_type:
+ index: test_index2
+ type: test_type2
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: foo
+ type: test_type2
+
+ - is_true: ''
+
+---
+"delete with index list and list of types":
+ - do:
+ indices.delete_mapping:
+ index: test_index1,test_index2
+ type: test_type1,test_type2
+
+ - do:
+ indices.exists_type:
+ index: test_index1
+ type: test_type1
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: test_index2
+ type: test_type2
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: foo
+ type: test_type2
+
+ - is_true: ''
+
+---
+"check 404 on no matching type":
+ - do:
+ catch: missing
+ indices.delete_mapping:
+ index: "*"
+ type: "non_existent"
+
+ - do:
+ catch: missing
+ indices.delete_mapping:
+ index: "non_existent"
+ type: "test_type1"
+
+---
+"check delete with blank index and blank type":
+ - do:
+ catch: param
+ indices.delete_mapping:
+ name: "test_type1"
+
+ - do:
+ catch: param
+ indices.delete_mapping:
+ index: "test_index1"
+
diff --git a/rest-api-spec/test/indices.delete_warmer/all_path_options.yaml b/rest-api-spec/test/indices.delete_warmer/all_path_options.yaml
new file mode 100644
index 0000000..603b01c
--- /dev/null
+++ b/rest-api-spec/test/indices.delete_warmer/all_path_options.yaml
@@ -0,0 +1,218 @@
+setup:
+ - do:
+ indices.create:
+ index: test_index1
+ body:
+ warmers:
+ test_warmer1:
+ source:
+ query:
+ match_all: {}
+ test_warmer2:
+ source:
+ query:
+ match_all: {}
+
+ - do:
+ indices.create:
+ index: test_index2
+ body:
+ warmers:
+ test_warmer1:
+ source:
+ query:
+ match_all: {}
+ test_warmer2:
+ source:
+ query:
+ match_all: {}
+
+ - do:
+ indices.create:
+ index: foo
+ body:
+ warmers:
+ test_warmer1:
+ source:
+ query:
+ match_all: {}
+ test_warmer2:
+ source:
+ query:
+ match_all: {}
+
+---
+"Check setup":
+
+ - do:
+ indices.get_warmer: { index: _all, name: '*' }
+
+ - match: {test_index1.warmers.test_warmer1.source.query.match_all: {}}
+ - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {test_index2.warmers.test_warmer1.source.query.match_all: {}}
+ - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {foo.warmers.test_warmer1.source.query.match_all: {}}
+ - match: {foo.warmers.test_warmer2.source.query.match_all: {}}
+
+
+---
+"check delete with _all index":
+ - do:
+ indices.delete_warmer:
+ index: _all
+ name: test_warmer1
+
+ - do:
+ indices.get_warmer: {}
+
+ - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {foo.warmers.test_warmer2.source.query.match_all: {}}
+
+---
+"check delete with * index":
+ - do:
+ indices.delete_warmer:
+ index: "*"
+ name: test_warmer1
+
+ - do:
+ indices.get_warmer: {}
+
+ - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {foo.warmers.test_warmer2.source.query.match_all: {}}
+
+---
+"check delete with index list":
+ - do:
+ indices.delete_warmer:
+ index: "test_index1,test_index2"
+ name: test_warmer1
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer1' }
+
+ - match: {foo.warmers.test_warmer1.source.query.match_all: {}}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer2' }
+
+ - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {foo.warmers.test_warmer2.source.query.match_all: {}}
+
+---
+"check delete with prefix* index":
+ - do:
+ indices.delete_warmer:
+ index: "test_*"
+ name: test_warmer1
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer1' }
+
+ - match: {foo.warmers.test_warmer1.source.query.match_all: {}}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer2' }
+
+ - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {foo.warmers.test_warmer2.source.query.match_all: {}}
+
+
+---
+"check delete with index list and * warmers":
+ - do:
+ indices.delete_warmer:
+ index: "test_index1,test_index2"
+ name: "*"
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer1' }
+
+ - match: {foo.warmers.test_warmer1.source.query.match_all: {}}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer2' }
+
+ - match: {foo.warmers.test_warmer2.source.query.match_all: {}}
+ - is_false: test_index1
+ - is_false: test_index2
+
+---
+"check delete with index list and _all warmers":
+ - do:
+ indices.delete_warmer:
+ index: "test_index1,test_index2"
+ name: _all
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer1' }
+
+ - match: {foo.warmers.test_warmer1.source.query.match_all: {}}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer2' }
+
+ - match: {foo.warmers.test_warmer2.source.query.match_all: {}}
+ - is_false: test_index1
+ - is_false: test_index2
+
+---
+"check delete with index list and wildcard warmers":
+ - do:
+ indices.delete_warmer:
+ index: "test_index1,test_index2"
+ name: "*1"
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer1' }
+
+ - match: {foo.warmers.test_warmer1.source.query.match_all: {}}
+ - is_false: test_index1
+ - is_false: test_index2
+
+ - do:
+ indices.get_warmer: { index: _all, name: 'test_warmer2' }
+
+ - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}}
+ - match: {foo.warmers.test_warmer2.source.query.match_all: {}}
+
+---
+"check 404 on no matching test_warmer":
+ - do:
+ catch: missing
+ indices.delete_warmer:
+ index: "*"
+ name: "non_existent"
+
+ - do:
+ catch: missing
+ indices.delete_warmer:
+ index: "non_existent"
+ name: "test_warmer1"
+
+
+---
+"check delete with blank index and blank test_warmer":
+ - do:
+ catch: param
+ indices.delete_warmer:
+ name: "test_warmer1"
+
+ - do:
+ catch: param
+ indices.delete_warmer:
+ index: "test_index1"
+
diff --git a/rest-api-spec/test/indices.exists/10_basic.yaml b/rest-api-spec/test/indices.exists/10_basic.yaml
new file mode 100644
index 0000000..e8bbb80
--- /dev/null
+++ b/rest-api-spec/test/indices.exists/10_basic.yaml
@@ -0,0 +1,25 @@
+---
+"Test indices.exists":
+ - do:
+ indices.exists:
+ index: test_index
+
+ - is_false: ''
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.exists:
+ index: test_index
+
+ - is_true: ''
+---
+"Test indices.exists with local flag":
+ - do:
+ indices.exists:
+ index: test_index
+ local: true
+
+ - is_false: ''
diff --git a/rest-api-spec/test/indices.exists_alias/10_basic.yaml b/rest-api-spec/test/indices.exists_alias/10_basic.yaml
new file mode 100644
index 0000000..fba0512
--- /dev/null
+++ b/rest-api-spec/test/indices.exists_alias/10_basic.yaml
@@ -0,0 +1,45 @@
+---
+"Test indices.exists_alias":
+ - do:
+ indices.exists_alias:
+ name: test_alias
+
+ - is_false: ''
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.put_alias:
+ index: test_index
+ name: test_alias
+
+ - do:
+ indices.exists_alias:
+ name: test_alias
+
+ - is_true: ''
+
+ - do:
+ indices.exists_alias:
+ index: test_index
+ name: test_alias
+
+ - is_true: ''
+
+ - do:
+ indices.exists_alias:
+ index: test_index1
+ name: test_alias
+
+ - is_false: ''
+
+---
+"Test indices.exists_alias with local flag":
+ - do:
+ indices.exists_alias:
+ name: test_alias
+ local: true
+
+ - is_false: ''
diff --git a/rest-api-spec/test/indices.exists_template/10_basic.yaml b/rest-api-spec/test/indices.exists_template/10_basic.yaml
new file mode 100644
index 0000000..f8e5052
--- /dev/null
+++ b/rest-api-spec/test/indices.exists_template/10_basic.yaml
@@ -0,0 +1,38 @@
+---
+setup:
+ - do:
+ indices.delete_template:
+ name: test
+ ignore: [404]
+---
+"Test indices.exists_template":
+ - do:
+ indices.exists_template:
+ name: test
+
+ - is_false: ''
+
+ - do:
+ indices.put_template:
+ name: test
+ body:
+ template: 'test-*'
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ indices.exists_template:
+ name: test
+
+ - is_true: ''
+
+---
+"Test indices.exists_template with local flag":
+ - do:
+ indices.exists_template:
+ name: test
+ local: true
+
+ - is_false: ''
+
diff --git a/rest-api-spec/test/indices.exists_type/10_basic.yaml b/rest-api-spec/test/indices.exists_type/10_basic.yaml
new file mode 100644
index 0000000..fb0dad5
--- /dev/null
+++ b/rest-api-spec/test/indices.exists_type/10_basic.yaml
@@ -0,0 +1,40 @@
+---
+"Exists type":
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ type_1: {}
+ type_2: {}
+
+ - do:
+ indices.exists_type:
+ index: test_2
+ type: type_1
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: test_1
+ type: type_3
+
+ - is_false: ''
+
+ - do:
+ indices.exists_type:
+ index: test_1
+ type: type_1
+
+ - is_true: ''
+---
+"Exists type with local flag":
+
+ - do:
+ indices.exists_type:
+ index: test_1
+ type: type_1
+ local: true
+
+ - is_false: ''
diff --git a/rest-api-spec/test/indices.get_alias/10_basic.yaml b/rest-api-spec/test/indices.get_alias/10_basic.yaml
new file mode 100644
index 0000000..42b696f
--- /dev/null
+++ b/rest-api-spec/test/indices.get_alias/10_basic.yaml
@@ -0,0 +1,219 @@
+---
+setup:
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.create:
+ index: test_index_2
+
+ - do:
+ indices.put_alias:
+ index: test_index
+ name: test_alias
+
+ - do:
+ indices.put_alias:
+ index: test_index
+ name: test_blias
+
+ - do:
+ indices.put_alias:
+ index: test_index_2
+ name: test_alias
+
+ - do:
+ indices.put_alias:
+ index: test_index_2
+ name: test_blias
+
+---
+"Get all aliases via /_alias":
+
+ - do:
+ indices.get_alias: {}
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_blias: {}}
+
+
+---
+"Get all aliases via /{index}/_alias/":
+
+ - do:
+ indices.get_alias:
+ index: test_index
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - is_false: test_index_2
+
+---
+"Get specific alias via /{index}/_alias/{name}":
+
+ - do:
+ indices.get_alias:
+ index: test_index
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2
+
+---
+"Get aliases via /{index}/_alias/_all":
+
+ - do:
+ indices.get_alias:
+ index: test_index
+ name: _all
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - is_false: test_index_2
+
+---
+"Get aliases via /{index}/_alias/*":
+
+ - do:
+ indices.get_alias:
+ index: test_index
+ name: '*'
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - is_false: test_index_2
+
+---
+"Get aliases via /{index}/_alias/prefix*":
+
+ - do:
+ indices.get_alias:
+ index: test_index
+ name: 'test_a*'
+
+ - match: {test_index.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2
+
+---
+"Get aliases via /{index}/_alias/name,name":
+
+ - do:
+ indices.get_alias:
+ index: test_index
+ name: 'test_alias,test_blias'
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - is_false: test_index_2
+
+---
+"Get aliases via /_alias/{name}":
+
+ - do:
+ indices.get_alias:
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+---
+"Get aliases via /_all/_alias/{name}":
+
+ - do:
+ indices.get_alias:
+ index: _all
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+---
+"Get aliases via /*/_alias/{name}":
+
+ - do:
+ indices.get_alias:
+ index: '*'
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+---
+"Get aliases via /pref*/_alias/{name}":
+
+ - do:
+ indices.get_alias:
+ index: '*2'
+ name: test_alias
+
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_alias
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+---
+"Get aliases via /name,name/_alias/{name}":
+
+ - do:
+ indices.get_alias:
+ index: test_index,test_index_2
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+
+---
+"Non-existent alias on an existing index returns an empty body":
+
+ - do:
+ indices.get_alias:
+ index: test_index
+ name: non-existent
+
+ - match: { '': {}}
+
+---
+"Existent and non-existent alias returns just the existing":
+
+ - do:
+ indices.get_alias:
+ index: test_index
+ name: test_alias,non-existent
+
+ - match: {test_index.aliases.test_alias: {}}
+ - is_false: test_index.aliases.non-existent
+
+---
+"Getting alias on an non-existent index should return 404":
+
+ - do:
+ catch: missing
+ indices.get_alias:
+ index: non-existent
+ name: foo
+
+---
+"Get alias with local flag":
+
+ - do:
+ indices.get_alias:
+ local: true
+
+ - is_true: test_index
+
+ - is_true: test_index_2
diff --git a/rest-api-spec/test/indices.get_aliases/10_basic.yaml b/rest-api-spec/test/indices.get_aliases/10_basic.yaml
new file mode 100644
index 0000000..73e136a
--- /dev/null
+++ b/rest-api-spec/test/indices.get_aliases/10_basic.yaml
@@ -0,0 +1,224 @@
+---
+setup:
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.create:
+ index: test_index_2
+
+ - do:
+ indices.put_alias:
+ index: test_index
+ name: test_alias
+
+ - do:
+ indices.put_alias:
+ index: test_index
+ name: test_blias
+
+ - do:
+ indices.put_alias:
+ index: test_index_2
+ name: test_alias
+
+ - do:
+ indices.put_alias:
+ index: test_index_2
+ name: test_blias
+
+---
+"Get all aliases via /_aliases":
+
+ - do:
+ indices.get_aliases: {}
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_blias: {}}
+
+
+---
+"Get all aliases via /{index}/_aliases/":
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - is_false: test_index_2
+
+---
+"Get specific alias via /{index}/_aliases/{name}":
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2
+
+---
+"Get aliases via /{index}/_aliases/_all":
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+ name: _all
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - is_false: test_index_2
+
+---
+"Get aliases via /{index}/_aliases/*":
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+ name: '*'
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - is_false: test_index_2
+
+---
+"Get aliases via /{index}/_aliases/prefix*":
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+ name: 'test_a*'
+
+ - match: {test_index.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2
+
+---
+"Get aliases via /{index}/_aliases/name,name":
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+ name: 'test_alias,test_blias'
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index.aliases.test_blias: {}}
+ - is_false: test_index_2
+
+---
+"Get aliases via /_aliases/{name}":
+
+ - do:
+ indices.get_aliases:
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+---
+"Get aliases via /_all/_aliases/{name}":
+
+ - do:
+ indices.get_aliases:
+ index: _all
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+---
+"Get aliases via /*/_aliases/{name}":
+
+ - do:
+ indices.get_aliases:
+ index: '*'
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+---
+"Get aliases via /pref*/_aliases/{name}":
+
+ - do:
+ indices.get_aliases:
+ index: '*2'
+ name: test_alias
+
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_alias
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+---
+"Get aliases via /name,name/_aliases/{name}":
+
+ - do:
+ indices.get_aliases:
+ index: test_index,test_index_2
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
+ - match: {test_index_2.aliases.test_alias: {}}
+ - is_false: test_index.aliases.test_blias
+ - is_false: test_index_2.aliases.test_blias
+
+
+---
+"Non-existent alias on an existing index returns matching indcies":
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+ name: non-existent
+
+ - match: { test_index.aliases: {}}
+
+---
+"Existent and non-existent alias returns just the existing":
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+ name: test_alias,non-existent
+
+ - match: {test_index.aliases.test_alias: {}}
+ - is_false: test_index.aliases.non-existent
+
+---
+"Getting alias on an non-existent index should return 404":
+
+ - skip:
+ version: 1 - 999
+ reason: not implemented yet
+ - do:
+ catch: missing
+ indices.get_aliases:
+ index: non-existent
+ name: foo
+
+---
+"Get aliases with local flag":
+
+ - do:
+ indices.get_aliases:
+ local: true
+
+ - is_true: test_index
+
+ - is_true: test_index_2
+
+
diff --git a/rest-api-spec/test/indices.get_field_mapping/10_basic.yaml b/rest-api-spec/test/indices.get_field_mapping/10_basic.yaml
new file mode 100644
index 0000000..0b63631
--- /dev/null
+++ b/rest-api-spec/test/indices.get_field_mapping/10_basic.yaml
@@ -0,0 +1,79 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ test_type:
+ properties:
+ text:
+ type: string
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+---
+"Get field mapping with no index and type":
+
+ - do:
+ indices.get_field_mapping:
+ field: text
+
+ - match: {test_index.mappings.test_type.text.mapping.text.type: string}
+
+---
+"Get field mapping by index only":
+ - do:
+ indices.get_field_mapping:
+ index: test_index
+ field: text
+
+ - match: {test_index.mappings.test_type.text.mapping.text.type: string}
+
+---
+"Get field mapping by type & field":
+
+ - do:
+ indices.get_field_mapping:
+ index: test_index
+ type: test_type
+ field: text
+
+ - match: {test_index.mappings.test_type.text.mapping.text.type: string}
+
+---
+"Get field mapping by type & field, with another field that doesn't exist":
+
+ - do:
+ indices.get_field_mapping:
+ index: test_index
+ type: test_type
+ field: [ text , text1 ]
+
+ - match: {test_index.mappings.test_type.text.mapping.text.type: string}
+ - is_false: test_index.mappings.test_type.text1
+
+---
+"Get field mapping with include_defaults":
+
+ - do:
+ indices.get_field_mapping:
+ index: test_index
+ type: test_type
+ field: text
+ include_defaults: true
+
+ - match: {test_index.mappings.test_type.text.mapping.text.type: string}
+ - match: {test_index.mappings.test_type.text.mapping.text.analyzer: default}
+
+---
+"Get field mapping should work without index specifying type and field":
+
+ - do:
+ indices.get_field_mapping:
+ type: test_type
+ field: text
+
+ - match: {test_index.mappings.test_type.text.mapping.text.type: string}
+
diff --git a/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yaml b/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yaml
new file mode 100644
index 0000000..1eae257
--- /dev/null
+++ b/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yaml
@@ -0,0 +1,24 @@
+---
+"Return empty object if field doesn't exist, but type and index do":
+
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ indices.get_field_mapping:
+ index: test_index
+ type: test_type
+ field: not_existent
+
+ - match: { '': {}}
diff --git a/rest-api-spec/test/indices.get_field_mapping/30_missing_type.yaml b/rest-api-spec/test/indices.get_field_mapping/30_missing_type.yaml
new file mode 100644
index 0000000..7c16d51
--- /dev/null
+++ b/rest-api-spec/test/indices.get_field_mapping/30_missing_type.yaml
@@ -0,0 +1,24 @@
+---
+"Raise 404 when type doesn't exist":
+
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ catch: missing
+ indices.get_field_mapping:
+ index: test_index
+ type: not_test_type
+ field: text
+
diff --git a/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yaml b/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yaml
new file mode 100644
index 0000000..ab2d985
--- /dev/null
+++ b/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yaml
@@ -0,0 +1,11 @@
+---
+"Raise 404 when index doesn't exist":
+
+ - do:
+ catch: missing
+ indices.get_field_mapping:
+ index: test_index
+ type: type
+ field: field
+
+
diff --git a/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yaml b/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yaml
new file mode 100644
index 0000000..766be05
--- /dev/null
+++ b/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yaml
@@ -0,0 +1,148 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ test_type:
+ properties:
+ t1:
+ type: string
+ t2:
+ type: string
+ obj:
+ path: just_name
+ properties:
+ t1:
+ type: string
+ i_t1:
+ type: string
+ index_name: t1
+ i_t3:
+ type: string
+ index_name: t3
+
+ - do:
+ indices.create:
+ index: test_index_2
+ body:
+ mappings:
+ test_type_2:
+ properties:
+ t1:
+ type: string
+ t2:
+ type: string
+ obj:
+ path: just_name
+ properties:
+ t1:
+ type: string
+ i_t1:
+ type: string
+ index_name: t1
+ i_t3:
+ type: string
+ index_name: t3
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+---
+"Get field mapping with * for fields":
+
+ - do:
+ indices.get_field_mapping:
+ field: "*"
+
+ - match: {test_index.mappings.test_type.t1.full_name: t1 }
+ - match: {test_index.mappings.test_type.t2.full_name: t2 }
+ - match: {test_index.mappings.test_type.obj\.t1.full_name: obj.t1 }
+ - match: {test_index.mappings.test_type.obj\.i_t1.full_name: obj.i_t1 }
+ - match: {test_index.mappings.test_type.obj\.i_t3.full_name: obj.i_t3 }
+
+---
+"Get field mapping with t* for fields":
+
+ - do:
+ indices.get_field_mapping:
+ index: test_index
+ field: "t*"
+
+# i_t1 matches the pattern using it's index name, but t1 already means a full name
+# of a field and thus takes precedence.
+ - match: {test_index.mappings.test_type.t1.full_name: t1 }
+ - match: {test_index.mappings.test_type.t2.full_name: t2 }
+ - match: {test_index.mappings.test_type.t3.full_name: obj.i_t3 }
+ - length: {test_index.mappings.test_type: 3}
+
+---
+"Get field mapping with *t1 for fields":
+
+ - do:
+ indices.get_field_mapping:
+ index: test_index
+ field: "*t1"
+ - match: {test_index.mappings.test_type.t1.full_name: t1 }
+ - match: {test_index.mappings.test_type.obj\.t1.full_name: obj.t1 }
+ - match: {test_index.mappings.test_type.obj\.i_t1.full_name: obj.i_t1 }
+ - length: {test_index.mappings.test_type: 3}
+
+---
+"Get field mapping with wildcarded relative names":
+
+ - do:
+ indices.get_field_mapping:
+ index: test_index
+ field: "i_*"
+ - match: {test_index.mappings.test_type.i_t1.full_name: obj.i_t1 }
+ - match: {test_index.mappings.test_type.i_t3.full_name: obj.i_t3 }
+ - length: {test_index.mappings.test_type: 2}
+
+---
+"Get field mapping should work using '_all' for indices and types":
+
+ - do:
+ indices.get_field_mapping:
+ index: _all
+ type: _all
+ field: "i_*"
+ - match: {test_index.mappings.test_type.i_t1.full_name: obj.i_t1 }
+ - match: {test_index.mappings.test_type.i_t3.full_name: obj.i_t3 }
+ - length: {test_index.mappings.test_type: 2}
+ - match: {test_index_2.mappings.test_type_2.i_t1.full_name: obj.i_t1 }
+ - match: {test_index_2.mappings.test_type_2.i_t3.full_name: obj.i_t3 }
+ - length: {test_index_2.mappings.test_type_2: 2}
+
+---
+"Get field mapping should work using '*' for indices and types":
+
+ - do:
+ indices.get_field_mapping:
+ index: '*'
+ type: '*'
+ field: "i_*"
+ - match: {test_index.mappings.test_type.i_t1.full_name: obj.i_t1 }
+ - match: {test_index.mappings.test_type.i_t3.full_name: obj.i_t3 }
+ - length: {test_index.mappings.test_type: 2}
+ - match: {test_index_2.mappings.test_type_2.i_t1.full_name: obj.i_t1 }
+ - match: {test_index_2.mappings.test_type_2.i_t3.full_name: obj.i_t3 }
+ - length: {test_index_2.mappings.test_type_2: 2}
+
+---
+"Get field mapping should work using comma_separated values for indices and types":
+
+ - do:
+ indices.get_field_mapping:
+ index: 'test_index,test_index_2'
+ type: 'test_type,test_type_2'
+ field: "i_*"
+ - match: {test_index.mappings.test_type.i_t1.full_name: obj.i_t1 }
+ - match: {test_index.mappings.test_type.i_t3.full_name: obj.i_t3 }
+ - length: {test_index.mappings.test_type: 2}
+ - match: {test_index_2.mappings.test_type_2.i_t1.full_name: obj.i_t1 }
+ - match: {test_index_2.mappings.test_type_2.i_t3.full_name: obj.i_t3 }
+ - length: {test_index_2.mappings.test_type_2: 2}
+
diff --git a/rest-api-spec/test/indices.get_mapping/10_basic.yaml b/rest-api-spec/test/indices.get_mapping/10_basic.yaml
new file mode 100644
index 0000000..b7e6abf
--- /dev/null
+++ b/rest-api-spec/test/indices.get_mapping/10_basic.yaml
@@ -0,0 +1,161 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ type_1: {}
+ type_2: {}
+ - do:
+ indices.create:
+ index: test_2
+ body:
+ mappings:
+ type_2: {}
+ type_3: {}
+
+---
+"Get /_mapping":
+
+ - do:
+ indices.get_mapping: {}
+
+ - match: { test_1.mappings.type_1.properties: {}}
+ - match: { test_1.mappings.type_2.properties: {}}
+ - match: { test_2.mappings.type_2.properties: {}}
+ - match: { test_2.mappings.type_3.properties: {}}
+
+---
+"Get /{index}/_mapping":
+
+ - do:
+ indices.get_mapping:
+ index: test_1
+
+ - match: { test_1.mappings.type_1.properties: {}}
+ - match: { test_1.mappings.type_2.properties: {}}
+ - is_false: test_2
+
+
+---
+"Get /{index}/_mapping/_all":
+
+ - do:
+ indices.get_mapping:
+ index: test_1
+ type: _all
+
+ - match: { test_1.mappings.type_1.properties: {}}
+ - match: { test_1.mappings.type_2.properties: {}}
+ - is_false: test_2
+
+---
+"Get /{index}/_mapping/*":
+
+ - do:
+ indices.get_mapping:
+ index: test_1
+ type: '*'
+
+ - match: { test_1.mappings.type_1.properties: {}}
+ - match: { test_1.mappings.type_2.properties: {}}
+ - is_false: test_2
+
+---
+"Get /{index}/_mapping/{type}":
+
+ - do:
+ indices.get_mapping:
+ index: test_1
+ type: type_1
+
+ - match: { test_1.mappings.type_1.properties: {}}
+ - is_false: test_1.mappings.type_2
+ - is_false: test_2
+
+---
+"Get /{index}/_mapping/{type,type}":
+
+ - do:
+ indices.get_mapping:
+ index: test_1
+ type: type_1,type_2
+
+ - match: { test_1.mappings.type_1.properties: {}}
+ - match: { test_1.mappings.type_2.properties: {}}
+ - is_false: test_2
+
+---
+"Get /{index}/_mapping/{type*}":
+
+ - do:
+ indices.get_mapping:
+ index: test_1
+ type: '*2'
+
+ - match: { test_1.mappings.type_2.properties: {}}
+ - is_false: test_1.mappings.type_1
+ - is_false: test_2
+
+---
+"Get /_mapping/{type}":
+
+ - do:
+ indices.get_mapping:
+ type: type_2
+
+ - match: { test_1.mappings.type_2.properties: {}}
+ - match: { test_2.mappings.type_2.properties: {}}
+ - is_false: test_1.mappings.type_1
+ - is_false: test_2.mappings.type_3
+
+---
+"Get /_all/_mapping/{type}":
+
+ - do:
+ indices.get_mapping:
+ index: _all
+ type: type_2
+
+ - match: { test_1.mappings.type_2.properties: {}}
+ - match: { test_2.mappings.type_2.properties: {}}
+ - is_false: test_1.mappings.type_1
+ - is_false: test_2.mappings.type_3
+
+---
+"Get /*/_mapping/{type}":
+
+ - do:
+ indices.get_mapping:
+ index: '*'
+ type: type_2
+
+ - match: { test_1.mappings.type_2.properties: {}}
+ - match: { test_2.mappings.type_2.properties: {}}
+ - is_false: test_1.mappings.type_1
+ - is_false: test_2.mappings.type_3
+
+---
+"Get /index,index/_mapping/{type}":
+
+ - do:
+ indices.get_mapping:
+ index: test_1,test_2
+ type: type_2
+
+ - match: { test_1.mappings.type_2.properties: {}}
+ - match: { test_2.mappings.type_2.properties: {}}
+ - is_false: test_2.mappings.type_3
+
+---
+"Get /index*/_mapping/{type}":
+
+ - do:
+ indices.get_mapping:
+ index: '*2'
+ type: type_2
+
+ - match: { test_2.mappings.type_2.properties: {}}
+ - is_false: test_1
+ - is_false: test_2.mappings.type_3
diff --git a/rest-api-spec/test/indices.get_mapping/20_missing_type.yaml b/rest-api-spec/test/indices.get_mapping/20_missing_type.yaml
new file mode 100644
index 0000000..91b1883
--- /dev/null
+++ b/rest-api-spec/test/indices.get_mapping/20_missing_type.yaml
@@ -0,0 +1,19 @@
+---
+"Return empty response when type doesn't exist":
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+
+ - do:
+ indices.get_mapping:
+ index: test_index
+ type: not_test_type
+
+ - match: { '': {}}
diff --git a/rest-api-spec/test/indices.get_mapping/30_missing_index.yaml b/rest-api-spec/test/indices.get_mapping/30_missing_index.yaml
new file mode 100644
index 0000000..6aadb37
--- /dev/null
+++ b/rest-api-spec/test/indices.get_mapping/30_missing_index.yaml
@@ -0,0 +1,9 @@
+---
+"Raise 404 when index doesn't exist":
+ - do:
+ catch: missing
+ indices.get_mapping:
+ index: test_index
+ type: not_test_type
+
+
diff --git a/rest-api-spec/test/indices.get_mapping/40_aliases.yaml b/rest-api-spec/test/indices.get_mapping/40_aliases.yaml
new file mode 100644
index 0000000..9dff6ed
--- /dev/null
+++ b/rest-api-spec/test/indices.get_mapping/40_aliases.yaml
@@ -0,0 +1,26 @@
+---
+"Getting mapping for aliases should return the real index as key":
+
+ - do:
+ indices.create:
+ index: test_index
+ body:
+ mappings:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+
+ - do:
+ indices.put_alias:
+ index: test_index
+ name: test_alias
+
+ - do:
+ indices.get_mapping:
+ index: test_alias
+
+ - match: {test_index.mappings.test_type.properties.text.type: string}
+ - match: {test_index.mappings.test_type.properties.text.analyzer: whitespace}
+
diff --git a/rest-api-spec/test/indices.get_settings/10_basic.yaml b/rest-api-spec/test/indices.get_settings/10_basic.yaml
new file mode 100644
index 0000000..eaca930
--- /dev/null
+++ b/rest-api-spec/test/indices.get_settings/10_basic.yaml
@@ -0,0 +1,165 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_1
+ - do:
+ indices.create:
+ index: test_2
+
+---
+"Get /_settings":
+
+ - do:
+ indices.get_settings: {}
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - match: { test_1.settings.index.number_of_replicas: "1"}
+ - match: { test_2.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_replicas: "1"}
+
+---
+"Get /{index}/_settings":
+
+ - do:
+ indices.get_settings:
+ index: test_1
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - match: { test_1.settings.index.number_of_replicas: "1"}
+ - is_false: test_2
+
+
+---
+"Get /{index}/_settings/_all":
+
+ - do:
+ indices.get_settings:
+ index: test_1
+ name: _all
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - match: { test_1.settings.index.number_of_replicas: "1"}
+ - is_false: test_2
+
+---
+"Get /{index}/_settings/*":
+
+ - do:
+ indices.get_settings:
+ index: test_1
+ name: '*'
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - match: { test_1.settings.index.number_of_replicas: "1"}
+ - is_false: test_2
+
+---
+"Get /{index}/_settings/{name}":
+
+ - do:
+ indices.get_settings:
+ index: test_1
+ name: index.number_of_shards
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - is_false: test_1.settings.index.number_of_replicas
+ - is_false: test_2
+
+---
+"Get /{index}/_settings/{name,name}":
+
+ - do:
+ indices.get_settings:
+ index: test_1
+ name: index.number_of_shards,index.number_of_replicas
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - match: { test_1.settings.index.number_of_replicas: "1"}
+ - is_false: test_2
+
+---
+"Get /{index}/_settings/{name*}":
+
+ - do:
+ indices.get_settings:
+ index: test_1
+ name: 'index.number_of_s*'
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - is_false: test_1.settings.index.number_of_replicas
+ - is_false: test_2
+
+---
+"Get /_settings/{name}":
+
+ - do:
+ indices.get_settings:
+ name: index.number_of_shards
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_shards: "5"}
+ - is_false: test_1.settings.index.number_of_replicas
+ - is_false: test_2.settings.index.number_of_replicas
+
+---
+"Get /_all/_settings/{name}":
+
+ - do:
+ indices.get_settings:
+ index: _all
+ name: index.number_of_shards
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_shards: "5"}
+ - is_false: test_1.settings.index.number_of_replicas
+ - is_false: test_2.settings.index.number_of_replicas
+
+
+---
+"Get /*/_settings/{name}":
+
+ - do:
+ indices.get_settings:
+ index: '*'
+ name: index.number_of_shards
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_shards: "5"}
+ - is_false: test_1.settings.index.number_of_replicas
+ - is_false: test_2.settings.index.number_of_replicas
+
+---
+"Get /index,index/_settings/{name}":
+
+ - do:
+ indices.get_settings:
+ index: test_1,test_2
+ name: index.number_of_shards
+
+ - match: { test_1.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_shards: "5"}
+ - is_false: test_1.settings.index.number_of_replicas
+ - is_false: test_2.settings.index.number_of_replicas
+
+---
+"Get /index*/_settings/{name}":
+
+ - do:
+ indices.get_settings:
+ index: '*2'
+ name: index.number_of_shards
+
+ - match: { test_2.settings.index.number_of_shards: "5"}
+ - is_false: test_1
+ - is_false: test_2.settings.index.number_of_replicas
+
+---
+"Get /_settings with local flag":
+
+ - do:
+ indices.get_settings:
+ local: true
+
+ - is_true: test_1
+ - is_true: test_2
diff --git a/rest-api-spec/test/indices.get_settings/20_aliases.yaml b/rest-api-spec/test/indices.get_settings/20_aliases.yaml
new file mode 100644
index 0000000..da76782
--- /dev/null
+++ b/rest-api-spec/test/indices.get_settings/20_aliases.yaml
@@ -0,0 +1,26 @@
+---
+"Getting settings for aliases should return the real index as key":
+
+ - do:
+ indices.create:
+ index: test-index
+ body:
+ settings:
+ index:
+ refresh_interval: -1
+ number_of_shards: 2
+ number_of_replicas: 3
+
+ - do:
+ indices.put_alias:
+ index: test-index
+ name: test-alias
+
+ - do:
+ indices.get_settings:
+ index: test-alias
+
+ - match: { test-index.settings.index.number_of_replicas: "3" }
+ - match: { test-index.settings.index.number_of_shards: "2" }
+ - match: { test-index.settings.index.refresh_interval: "-1" }
+
diff --git a/rest-api-spec/test/indices.get_template/10_basic.yaml b/rest-api-spec/test/indices.get_template/10_basic.yaml
new file mode 100644
index 0000000..f5a4cad
--- /dev/null
+++ b/rest-api-spec/test/indices.get_template/10_basic.yaml
@@ -0,0 +1,45 @@
+setup:
+ - do:
+ indices.put_template:
+ name: test
+ body:
+ template: test-*
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+---
+"Get template":
+
+ - do:
+ indices.get_template:
+ name: test
+
+ - match: {test.template: "test-*"}
+ - match: {test.settings: {index.number_of_shards: '1', index.number_of_replicas: '0'}}
+---
+"Get all templates":
+
+ - do:
+ indices.put_template:
+ name: test2
+ body:
+ template: test2-*
+ settings:
+ number_of_shards: 1
+
+ - do:
+ indices.get_template: {}
+
+ - match: {test.template: "test-*"}
+ - match: {test2.template: "test2-*"}
+
+---
+"Get template with local flag":
+
+ - do:
+ indices.get_template:
+ name: test
+ local: true
+
+ - is_true: test
diff --git a/rest-api-spec/test/indices.get_template/20_get_missing.yaml b/rest-api-spec/test/indices.get_template/20_get_missing.yaml
new file mode 100644
index 0000000..2751f57
--- /dev/null
+++ b/rest-api-spec/test/indices.get_template/20_get_missing.yaml
@@ -0,0 +1,13 @@
+setup:
+ - do:
+ indices.delete_template:
+ name: '*'
+ ignore: 404
+---
+"Get missing template":
+
+ - do:
+ catch: missing
+ indices.get_template:
+ name: test
+
diff --git a/rest-api-spec/test/indices.get_warmer/10_basic.yaml b/rest-api-spec/test/indices.get_warmer/10_basic.yaml
new file mode 100644
index 0000000..668a611
--- /dev/null
+++ b/rest-api-spec/test/indices.get_warmer/10_basic.yaml
@@ -0,0 +1,201 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ warmers:
+ warmer_1:
+ source: { query: { match_all: { }}}
+ warmer_2:
+ source: { query: { match_all: { }}}
+
+
+ - do:
+ indices.create:
+ index: test_2
+ body:
+ warmers:
+ warmer_2:
+ source: { query: { match_all: { }}}
+ warmer_3:
+ source: { query: { match_all: { }}}
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+---
+"Get /_warmer":
+
+ - do:
+ indices.get_warmer: {}
+
+ - match: { test_1.warmers.warmer_1.source.query.match_all: {}}
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - match: { test_2.warmers.warmer_2.source.query.match_all: {}}
+ - match: { test_2.warmers.warmer_3.source.query.match_all: {}}
+
+---
+"Get /{index}/_warmer":
+
+ - do:
+ indices.get_warmer:
+ index: test_1
+
+ - match: { test_1.warmers.warmer_1.source.query.match_all: {}}
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_2
+
+
+---
+"Get /{index}/_warmer/_all":
+
+ - do:
+ indices.get_warmer:
+ index: test_1
+ name: _all
+
+ - match: { test_1.warmers.warmer_1.source.query.match_all: {}}
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_2
+
+---
+"Get /{index}/_warmer/*":
+
+ - do:
+ indices.get_warmer:
+ index: test_1
+ name: '*'
+
+ - match: { test_1.warmers.warmer_1.source.query.match_all: {}}
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_2
+
+---
+"Get /{index}/_warmer/{name}":
+
+ - do:
+ indices.get_warmer:
+ index: test_1
+ name: warmer_1
+
+ - match: { test_1.warmers.warmer_1.source.query.match_all: {}}
+ - is_false: test_1.warmers.warmer_2
+ - is_false: test_2
+
+---
+"Get /{index}/_warmer/{name,name}":
+
+ - do:
+ indices.get_warmer:
+ index: test_1
+ name: warmer_1,warmer_2
+
+ - match: { test_1.warmers.warmer_1.source.query.match_all: {}}
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_2
+
+---
+"Get /{index}/_warmer/{name*}":
+
+ - do:
+ indices.get_warmer:
+ index: test_1
+ name: '*2'
+
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_1.warmers.warmer_1
+ - is_false: test_2
+
+---
+"Get /_warmer/{name}":
+
+ - do:
+ indices.get_warmer:
+ name: warmer_2
+
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - match: { test_2.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_1.warmers.warmer_1
+ - is_false: test_2.warmers.warmer_3
+
+---
+"Get /_all/_warmer/{name}":
+
+ - do:
+ indices.get_warmer:
+ index: _all
+ name: warmer_2
+
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - match: { test_2.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_1.warmers.warmer_1
+ - is_false: test_2.warmers.warmer_3
+
+---
+"Get /*/_warmer/{name}":
+
+ - do:
+ indices.get_warmer:
+ index: '*'
+ name: warmer_2
+
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - match: { test_2.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_1.warmers.warmer_1
+ - is_false: test_2.warmers.warmer_3
+
+---
+"Get /index,index/_warmer/{name}":
+
+ - do:
+ indices.get_warmer:
+ index: test_1,test_2
+ name: warmer_2
+
+ - match: { test_1.warmers.warmer_2.source.query.match_all: {}}
+ - match: { test_2.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_2.warmers.warmer_3
+
+---
+"Get /index*/_warmer/{name}":
+
+ - do:
+ indices.get_warmer:
+ index: '*2'
+ name: warmer_2
+
+ - match: { test_2.warmers.warmer_2.source.query.match_all: {}}
+ - is_false: test_1
+ - is_false: test_2.warmers.warmer_3
+
+---
+"Empty response when no matching warmer":
+
+ - do:
+ indices.get_warmer:
+ index: '*'
+ name: non_existent
+
+ - match: { '': {}}
+
+---
+"Throw 404 on missing index":
+
+ - do:
+ catch: missing
+ indices.get_warmer:
+ index: non_existent
+ name: '*'
+
+---
+"Get /_warmer with local flag":
+
+ - do:
+ indices.get_warmer:
+ local: true
+
+ - is_true: test_1
+ - is_true: test_2
+
diff --git a/rest-api-spec/test/indices.open/10_basic.yaml b/rest-api-spec/test/indices.open/10_basic.yaml
new file mode 100644
index 0000000..6b8846b
--- /dev/null
+++ b/rest-api-spec/test/indices.open/10_basic.yaml
@@ -0,0 +1,31 @@
+---
+"Basic test for index open/close":
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ indices.close:
+ index: test_index
+
+ - do:
+ catch: forbidden
+ search:
+ index: test_index
+
+ - do:
+ indices.open:
+ index: test_index
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ search:
+ index: test_index
+
diff --git a/rest-api-spec/test/indices.open/20_multiple_indices.yaml b/rest-api-spec/test/indices.open/20_multiple_indices.yaml
new file mode 100644
index 0000000..8a32cc4
--- /dev/null
+++ b/rest-api-spec/test/indices.open/20_multiple_indices.yaml
@@ -0,0 +1,83 @@
+setup:
+ - do:
+ indices.create:
+ index: test_index1
+ - do:
+ indices.create:
+ index: test_index2
+ - do:
+ indices.create:
+ index: test_index3
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+---
+"All indices":
+ - do:
+ indices.close:
+ index: _all
+
+ - do:
+ catch: forbidden
+ search:
+ index: test_index2
+
+ - do:
+ indices.open:
+ index: _all
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ search:
+ index: test_index2
+
+---
+"Trailing wildcard":
+ - do:
+ indices.close:
+ index: test_*
+
+ - do:
+ catch: forbidden
+ search:
+ index: test_index2
+
+ - do:
+ indices.open:
+ index: test_*
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ search:
+ index: test_index2
+
+---
+"Only wildcard":
+ - do:
+ indices.close:
+ index: '*'
+
+ - do:
+ catch: forbidden
+ search:
+ index: test_index3
+
+ - do:
+ indices.open:
+ index: '*'
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ search:
+ index: test_index3
+
diff --git a/rest-api-spec/test/indices.optimize/10_basic.yaml b/rest-api-spec/test/indices.optimize/10_basic.yaml
new file mode 100644
index 0000000..1f24871
--- /dev/null
+++ b/rest-api-spec/test/indices.optimize/10_basic.yaml
@@ -0,0 +1,10 @@
+---
+"Optimize index tests":
+ - do:
+ indices.create:
+ index: testing
+
+ - do:
+ indices.optimize:
+ index: testing
+ max_num_segments: 1
diff --git a/rest-api-spec/test/indices.put_alias/10_basic.yaml b/rest-api-spec/test/indices.put_alias/10_basic.yaml
new file mode 100644
index 0000000..98fd6b3
--- /dev/null
+++ b/rest-api-spec/test/indices.put_alias/10_basic.yaml
@@ -0,0 +1,29 @@
+---
+"Basic test for put alias":
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.exists_alias:
+ name: test_alias
+
+ - is_false: ''
+
+ - do:
+ indices.put_alias:
+ index: test_index
+ name: test_alias
+
+ - do:
+ indices.exists_alias:
+ name: test_alias
+
+ - is_true: ''
+
+ - do:
+ indices.get_alias:
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {}}
diff --git a/rest-api-spec/test/indices.put_alias/all_path_options.yaml b/rest-api-spec/test/indices.put_alias/all_path_options.yaml
new file mode 100644
index 0000000..3392ae3
--- /dev/null
+++ b/rest-api-spec/test/indices.put_alias/all_path_options.yaml
@@ -0,0 +1,127 @@
+---
+setup:
+# create three indices
+
+ - do:
+ indices.create:
+ index: test_index1
+ - do:
+ indices.create:
+ index: test_index2
+ - do:
+ indices.create:
+ index: foo
+
+---
+"put alias per index":
+
+ - do:
+ indices.put_alias:
+ index: test_index1
+ name: alias
+ - do:
+ indices.put_alias:
+ index: test_index2
+ name: alias
+
+ - do:
+ indices.get_alias:
+ name: alias
+
+ - match: {test_index1.aliases.alias: {}}
+
+ - match: {test_index2.aliases.alias: {}}
+
+ - is_false: foo
+
+---
+"put alias in _all index":
+
+ - do:
+ indices.put_alias:
+ index: _all
+ name: alias
+
+ - do:
+ indices.get_alias:
+ name: alias
+
+ - match: {test_index1.aliases.alias: {}}
+ - match: {test_index2.aliases.alias: {}}
+ - match: {foo.aliases.alias: {}}
+
+---
+"put alias in * index":
+
+
+ - do:
+ indices.put_alias:
+ index: '*'
+ name: alias
+
+ - do:
+ indices.get_alias:
+ name: alias
+
+ - match: {test_index1.aliases.alias: {}}
+ - match: {test_index2.aliases.alias: {}}
+ - match: {foo.aliases.alias: {}}
+
+---
+"put alias prefix* index":
+
+
+ - do:
+ indices.put_alias:
+ index: "test_*"
+ name: alias
+
+ - do:
+ indices.get_alias:
+ name: alias
+
+ - match: {test_index1.aliases.alias: {}}
+ - match: {test_index2.aliases.alias: {}}
+ - is_false: foo
+
+---
+"put alias in list of indices":
+
+
+ - do:
+ indices.put_alias:
+ index: "test_index1,test_index2"
+ name: alias
+
+ - do:
+ indices.get_alias:
+ name: alias
+
+ - match: {test_index1.aliases.alias: {}}
+ - match: {test_index2.aliases.alias: {}}
+ - is_false: foo
+
+---
+"put alias with blank index":
+
+
+ - do:
+ indices.put_alias:
+ name: alias
+
+ - do:
+ indices.get_alias:
+ name: alias
+
+ - match: {test_index1.aliases.alias: {}}
+ - match: {test_index2.aliases.alias: {}}
+ - match: {foo.aliases.alias: {}}
+
+---
+"put alias with missing name":
+
+
+ - do:
+ catch: param
+ indices.put_alias: {}
+
diff --git a/rest-api-spec/test/indices.put_mapping/10_basic.yaml b/rest-api-spec/test/indices.put_mapping/10_basic.yaml
new file mode 100644
index 0000000..e02b948
--- /dev/null
+++ b/rest-api-spec/test/indices.put_mapping/10_basic.yaml
@@ -0,0 +1,62 @@
+---
+"Test Create and update mapping":
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.put_mapping:
+ index: test_index
+ type: test_type
+ body:
+ test_type:
+ properties:
+ text1:
+ type: string
+ analyzer: whitespace
+ text2:
+ type: string
+ analyzer: whitespace
+
+ - do:
+ indices.get_mapping:
+ index: test_index
+
+ - match: {test_index.mappings.test_type.properties.text1.type: string}
+ - match: {test_index.mappings.test_type.properties.text1.analyzer: whitespace}
+ - match: {test_index.mappings.test_type.properties.text2.type: string}
+ - match: {test_index.mappings.test_type.properties.text2.analyzer: whitespace}
+
+ - do:
+ indices.put_mapping:
+ index: test_index
+ type: test_type
+ body:
+ test_type:
+ properties:
+ text1:
+ type: multi_field
+ fields:
+ text1:
+ type: string
+ analyzer: whitespace
+ text_raw:
+ type: string
+ index: not_analyzed
+ text2:
+ type: string
+ analyzer: whitespace
+ fields:
+ text_raw:
+ type: string
+ index: not_analyzed
+
+
+ - do:
+ indices.get_mapping:
+ index: test_index
+
+ - match: {test_index.mappings.test_type.properties.text1.type: string}
+ - match: {test_index.mappings.test_type.properties.text1.fields.text_raw.index: not_analyzed}
+ - match: {test_index.mappings.test_type.properties.text2.type: string}
+ - match: {test_index.mappings.test_type.properties.text2.fields.text_raw.index: not_analyzed}
diff --git a/rest-api-spec/test/indices.put_mapping/all_path_options.yaml b/rest-api-spec/test/indices.put_mapping/all_path_options.yaml
new file mode 100644
index 0000000..23b02f2
--- /dev/null
+++ b/rest-api-spec/test/indices.put_mapping/all_path_options.yaml
@@ -0,0 +1,178 @@
+setup:
+ - do:
+ indices.create:
+ index: test_index1
+ - do:
+ indices.create:
+ index: test_index2
+ - do:
+ indices.create:
+ index: foo
+
+
+---
+"put one mapping per index":
+ - do:
+ indices.put_mapping:
+ index: test_index1
+ type: test_type
+ body:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+ - do:
+ indices.put_mapping:
+ index: test_index2
+ type: test_type
+ body:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+
+
+ - do:
+ indices.get_mapping: {}
+
+ - match: {test_index1.mappings.test_type.properties.text.type: string}
+ - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - match: {test_index2.mappings.test_type.properties.text.type: string}
+ - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - is_false: foo
+
+---
+"put mapping in _all index":
+
+ - do:
+ indices.put_mapping:
+ index: _all
+ type: test_type
+ body:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+
+ - do:
+ indices.get_mapping: {}
+
+ - match: {test_index1.mappings.test_type.properties.text.type: string}
+ - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - match: {test_index2.mappings.test_type.properties.text.type: string}
+ - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - match: {foo.mappings.test_type.properties.text.type: string}
+ - match: {foo.mappings.test_type.properties.text.analyzer: whitespace}
+
+---
+"put mapping in * index":
+ - do:
+ indices.put_mapping:
+ index: "*"
+ type: test_type
+ body:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+
+ - do:
+ indices.get_mapping: {}
+
+ - match: {test_index1.mappings.test_type.properties.text.type: string}
+ - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - match: {test_index2.mappings.test_type.properties.text.type: string}
+ - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - match: {foo.mappings.test_type.properties.text.type: string}
+ - match: {foo.mappings.test_type.properties.text.analyzer: whitespace}
+
+---
+"put mapping in prefix* index":
+ - do:
+ indices.put_mapping:
+ index: "test_index*"
+ type: test_type
+ body:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+
+ - do:
+ indices.get_mapping: {}
+
+ - match: {test_index1.mappings.test_type.properties.text.type: string}
+ - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - match: {test_index2.mappings.test_type.properties.text.type: string}
+ - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - is_false: foo
+
+---
+"put mapping in list of indices":
+ - do:
+ indices.put_mapping:
+ index: [test_index1, test_index2]
+ type: test_type
+ body:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+
+ - do:
+ indices.get_mapping: {}
+
+ - match: {test_index1.mappings.test_type.properties.text.type: string}
+ - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - match: {test_index2.mappings.test_type.properties.text.type: string}
+ - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - is_false: foo
+
+---
+"put mapping with blank index":
+ - do:
+ indices.put_mapping:
+ type: test_type
+ body:
+ test_type:
+ properties:
+ text:
+ type: string
+ analyzer: whitespace
+
+ - do:
+ indices.get_mapping: {}
+
+ - match: {test_index1.mappings.test_type.properties.text.type: string}
+ - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - match: {test_index2.mappings.test_type.properties.text.type: string}
+ - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace}
+
+ - match: {foo.mappings.test_type.properties.text.type: string}
+ - match: {foo.mappings.test_type.properties.text.analyzer: whitespace}
+
+---
+"put mapping with missing type":
+
+
+ - do:
+ catch: param
+ indices.put_mapping: {}
+
diff --git a/rest-api-spec/test/indices.put_settings/10_basic.yaml b/rest-api-spec/test/indices.put_settings/10_basic.yaml
new file mode 100644
index 0000000..f5b913b
--- /dev/null
+++ b/rest-api-spec/test/indices.put_settings/10_basic.yaml
@@ -0,0 +1,30 @@
+---
+"Test indices settings":
+ - do:
+ indices.create:
+ index: test-index
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ indices.get_settings:
+ index: test-index
+ flat_settings: true
+
+ - match:
+ test-index.settings.index\.number_of_replicas: "0"
+
+ - do:
+ indices.put_settings:
+ body:
+ number_of_replicas: 1
+
+ - do:
+ indices.get_settings:
+ flat_settings: false
+
+ - match:
+ test-index.settings.index.number_of_replicas: "1"
+
diff --git a/rest-api-spec/test/indices.put_settings/all_path_options.yaml b/rest-api-spec/test/indices.put_settings/all_path_options.yaml
new file mode 100644
index 0000000..b9ae712
--- /dev/null
+++ b/rest-api-spec/test/indices.put_settings/all_path_options.yaml
@@ -0,0 +1,113 @@
+setup:
+ - do:
+ indices.create:
+ index: test_index1
+ - do:
+ indices.create:
+ index: test_index2
+ - do:
+ indices.create:
+ index: foo
+
+
+---
+"put settings per index":
+ - do:
+ indices.put_settings:
+ index: test_index1
+ body:
+ refresh_interval: 1s
+
+ - do:
+ indices.put_settings:
+ index: test_index2
+ body:
+ refresh_interval: 1s
+
+
+ - do:
+ indices.get_settings: {}
+
+ - match: {test_index1.settings.index.refresh_interval: 1s}
+ - match: {test_index2.settings.index.refresh_interval: 1s}
+ - is_false: foo.settings.index.refresh_interval
+
+---
+"put settings in _all index":
+ - do:
+ indices.put_settings:
+ index: _all
+ body:
+ refresh_interval: 1s
+
+ - do:
+ indices.get_settings: {}
+
+ - match: {test_index1.settings.index.refresh_interval: 1s}
+ - match: {test_index2.settings.index.refresh_interval: 1s}
+ - match: {foo.settings.index.refresh_interval: 1s}
+
+---
+"put settings in * index":
+ - do:
+ indices.put_settings:
+ index: '*'
+ body:
+ refresh_interval: 1s
+
+ - do:
+ indices.get_settings: {}
+
+ - match: {test_index1.settings.index.refresh_interval: 1s}
+ - match: {test_index2.settings.index.refresh_interval: 1s}
+ - match: {foo.settings.index.refresh_interval: 1s}
+
+
+---
+"put settings in prefix* index":
+ - do:
+ indices.put_settings:
+ index: 'test*'
+ body:
+ refresh_interval: 1s
+
+ - do:
+ indices.get_settings: {}
+
+ - match: {test_index1.settings.index.refresh_interval: 1s}
+ - match: {test_index2.settings.index.refresh_interval: 1s}
+ - is_false: foo.settings.index.refresh_interval
+
+---
+"put settings in list of indices":
+ - skip:
+ version: 1 - 999
+ reason: list of indices not implemented yet
+ - do:
+ indices.put_settings:
+ index: test_index1, test_index2
+ body:
+ refresh_interval: 1s
+
+ - do:
+ indices.get_settings: {}
+
+ - match: {test_index1.settings.index.refresh_interval: 1s}
+ - match: {test_index2.settings.index.refresh_interval: 1s}
+ - is_false: foo.settings.index.refresh_interval
+
+
+---
+"put settings in blank index":
+ - do:
+ indices.put_settings:
+ body:
+ refresh_interval: 1s
+
+ - do:
+ indices.get_settings: {}
+
+ - match: {test_index1.settings.index.refresh_interval: 1s}
+ - match: {test_index2.settings.index.refresh_interval: 1s}
+ - match: {foo.settings.index.refresh_interval: 1s}
+
diff --git a/rest-api-spec/test/indices.put_template/10_basic.yaml b/rest-api-spec/test/indices.put_template/10_basic.yaml
new file mode 100644
index 0000000..4c2ab84
--- /dev/null
+++ b/rest-api-spec/test/indices.put_template/10_basic.yaml
@@ -0,0 +1,17 @@
+---
+"Put template":
+ - do:
+ indices.put_template:
+ name: test
+ body:
+ template: test-*
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ indices.get_template:
+ name: test
+
+ - match: {test.template: "test-*"}
+ - match: {test.settings: {index.number_of_shards: '1', index.number_of_replicas: '0'}}
diff --git a/rest-api-spec/test/indices.put_warmer/10_basic.yaml b/rest-api-spec/test/indices.put_warmer/10_basic.yaml
new file mode 100644
index 0000000..44313aa
--- /dev/null
+++ b/rest-api-spec/test/indices.put_warmer/10_basic.yaml
@@ -0,0 +1,145 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.create:
+ index: test_idx
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ indices.put_warmer:
+ index: test_idx
+ name: test_warmer2
+ body:
+ query:
+ match_all: {}
+
+ - do:
+ indices.put_warmer:
+ index: test_index
+ name: test_warmer
+ body:
+ query:
+ match_all: {}
+
+---
+"Basic test for warmers":
+ - do:
+ indices.get_warmer:
+ index: test_index
+ name: test_warmer
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+
+ - do:
+ indices.delete_warmer:
+ index: test_index
+ name: test_warmer
+
+ - do:
+ indices.get_warmer:
+ index: test_index
+ name: test_warmer
+
+ - match: { '': {}}
+
+---
+"Getting all warmers via /_warmer should work":
+
+ - do:
+ indices.get_warmer: {}
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+ - match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
+
+
+---
+"Getting warmers for several indices should work using *":
+
+ - do:
+ indices.get_warmer:
+ index: '*'
+ name: '*'
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+ - match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
+
+---
+"Getting warmers for several indices should work using _all":
+
+ - do:
+ indices.get_warmer:
+ index: _all
+ name: _all
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+ - match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
+
+---
+"Getting all warmers without specifying index should work":
+
+ - do:
+ indices.get_warmer:
+ name: _all
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+ - match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
+
+---
+"Getting warmers for several indices should work using prefix*":
+
+ - do:
+ indices.get_warmer:
+ index: test_i*
+ name: test_w*
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+ - match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
+
+---
+"Getting warmers for several indices should work using comma-separated lists":
+
+ - do:
+ indices.get_warmer:
+ index: test_index,test_idx
+ name: test_warmer,test_warmer2
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+ - match: {test_idx.warmers.test_warmer2.source.query.match_all: {}}
+
+---
+"Getting a non-existent warmer on an existing index should return an empty body":
+
+ - do:
+ indices.get_warmer:
+ index: test_index
+ name: non-existent
+
+ - match: { '': {}}
+
+---
+"Getting an existent and non-existent warmer should return the existent and no data about the non-existent warmer":
+
+ - do:
+ indices.get_warmer:
+ index: test_index
+ name: test_warmer,non-existent
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+ - is_false: test_index.warmers.non-existent
+
+---
+"Getting warmer on an non-existent index should return 404":
+
+ - do:
+ catch: missing
+ indices.get_warmer:
+ index: non-existent
+ name: foo
+
diff --git a/rest-api-spec/test/indices.put_warmer/20_aliases.yaml b/rest-api-spec/test/indices.put_warmer/20_aliases.yaml
new file mode 100644
index 0000000..96d7344
--- /dev/null
+++ b/rest-api-spec/test/indices.put_warmer/20_aliases.yaml
@@ -0,0 +1,30 @@
+---
+"Getting warmer for aliases should return the real index as key":
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ indices.put_warmer:
+ index: test_index
+ name: test_warmer
+ body:
+ query:
+ match_all: {}
+
+ - do:
+ indices.put_alias:
+ index: test_index
+ name: test_alias
+
+ - do:
+ indices.get_warmer:
+ index: test_alias
+
+ - match: {test_index.warmers.test_warmer.source.query.match_all: {}}
+
diff --git a/rest-api-spec/test/indices.put_warmer/all_path_options.yaml b/rest-api-spec/test/indices.put_warmer/all_path_options.yaml
new file mode 100644
index 0000000..b9c64f7
--- /dev/null
+++ b/rest-api-spec/test/indices.put_warmer/all_path_options.yaml
@@ -0,0 +1,134 @@
+---
+setup:
+
+ - do:
+ indices.create:
+ index: test_index1
+
+ - do:
+ indices.create:
+ index: test_index2
+
+ - do:
+ indices.create:
+ index: foo
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+---
+"put warmer per index":
+
+ - do:
+ indices.put_warmer:
+ index: test_index1
+ name: warmer
+ body:
+ query:
+ match_all: {}
+ - do:
+ indices.put_warmer:
+ index: test_index2
+ name: warmer
+ body:
+ query:
+ match_all: {}
+
+ - do:
+ indices.get_warmer: { index: _all, name: '*' }
+
+ - match: {test_index1.warmers.warmer.source.query.match_all: {}}
+ - match: {test_index2.warmers.warmer.source.query.match_all: {}}
+ - is_false: foo
+
+---
+"put warmer in _all index":
+ - do:
+ indices.put_warmer:
+ index: _all
+ name: warmer
+ body:
+ query:
+ match_all: {}
+ - do:
+ indices.get_warmer: { index: _all, name: '*' }
+
+ - match: {test_index1.warmers.warmer.source.query.match_all: {}}
+ - match: {test_index2.warmers.warmer.source.query.match_all: {}}
+ - match: {foo.warmers.warmer.source.query.match_all: {}}
+
+---
+"put warmer in * index":
+ - do:
+ indices.put_warmer:
+ index: "*"
+ name: warmer
+ body:
+ query:
+ match_all: {}
+ - do:
+ indices.get_warmer: { index: _all, name: '*' }
+
+ - match: {test_index1.warmers.warmer.source.query.match_all: {}}
+ - match: {test_index2.warmers.warmer.source.query.match_all: {}}
+ - match: {foo.warmers.warmer.source.query.match_all: {}}
+
+---
+"put warmer prefix* index":
+ - do:
+ indices.put_warmer:
+ index: "test_index*"
+ name: warmer
+ body:
+ query:
+ match_all: {}
+ - do:
+ indices.get_warmer: { index: _all, name: '*' }
+
+ - match: {test_index1.warmers.warmer.source.query.match_all: {}}
+ - match: {test_index2.warmers.warmer.source.query.match_all: {}}
+ - is_false: foo
+
+---
+"put warmer in list of indices":
+ - do:
+ indices.put_warmer:
+ index: [test_index1, test_index2]
+ name: warmer
+ body:
+ query:
+ match_all: {}
+ - do:
+ indices.get_warmer: { index: _all, name: '*' }
+
+ - match: {test_index1.warmers.warmer.source.query.match_all: {}}
+ - match: {test_index2.warmers.warmer.source.query.match_all: {}}
+ - is_false: foo
+
+---
+"put warmer with blank index":
+ - do:
+ indices.put_warmer:
+ name: warmer
+ body:
+ query:
+ match_all: {}
+ - do:
+ indices.get_warmer: { index: _all, name: '*' }
+
+ - match: {test_index1.warmers.warmer.source.query.match_all: {}}
+ - match: {test_index2.warmers.warmer.source.query.match_all: {}}
+ - match: {foo.warmers.warmer.source.query.match_all: {}}
+
+---
+"put warmer with missing name":
+
+
+ - do:
+ catch: param
+ indices.put_warmer:
+ body:
+ query:
+ match_all: {}
+
diff --git a/rest-api-spec/test/indices.segments/10_basic.yaml b/rest-api-spec/test/indices.segments/10_basic.yaml
new file mode 100644
index 0000000..801982e
--- /dev/null
+++ b/rest-api-spec/test/indices.segments/10_basic.yaml
@@ -0,0 +1,5 @@
+---
+"segments test":
+ - do:
+ indices.segments:
+ allow_no_indices: true
diff --git a/rest-api-spec/test/indices.snapshot_index/10_basic.yaml b/rest-api-spec/test/indices.snapshot_index/10_basic.yaml
new file mode 100644
index 0000000..7acd47f
--- /dev/null
+++ b/rest-api-spec/test/indices.snapshot_index/10_basic.yaml
@@ -0,0 +1,4 @@
+---
+"snapshot_index test":
+ - do:
+ indices.snapshot_index: {}
diff --git a/rest-api-spec/test/indices.stats/10_basic.yaml b/rest-api-spec/test/indices.stats/10_basic.yaml
new file mode 100644
index 0000000..f197f38
--- /dev/null
+++ b/rest-api-spec/test/indices.stats/10_basic.yaml
@@ -0,0 +1,4 @@
+---
+"stats test":
+ - do:
+ indices.stats: {}
diff --git a/rest-api-spec/test/indices.status/10_basic.yaml b/rest-api-spec/test/indices.status/10_basic.yaml
new file mode 100644
index 0000000..89f4ae5
--- /dev/null
+++ b/rest-api-spec/test/indices.status/10_basic.yaml
@@ -0,0 +1,9 @@
+---
+"Indices status test":
+ - do:
+ indices.status: {}
+
+ - do:
+ catch: missing
+ indices.status:
+ index: not_here
diff --git a/rest-api-spec/test/indices.update_aliases/10_basic.yaml b/rest-api-spec/test/indices.update_aliases/10_basic.yaml
new file mode 100644
index 0000000..5b45f74
--- /dev/null
+++ b/rest-api-spec/test/indices.update_aliases/10_basic.yaml
@@ -0,0 +1,34 @@
+---
+"Basic test for aliases":
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.exists_alias:
+ name: test_alias
+
+ - is_false: ''
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_index
+ alias: test_alias
+ routing: routing_value
+
+ - do:
+ indices.exists_alias:
+ name: test_alias
+
+ - is_true: ''
+
+ - do:
+ indices.get_alias:
+ index: test_index
+ name: test_alias
+
+ - match: {test_index.aliases.test_alias: {'index_routing': 'routing_value', 'search_routing': 'routing_value'}}
diff --git a/rest-api-spec/test/indices.update_aliases/20_routing.yaml b/rest-api-spec/test/indices.update_aliases/20_routing.yaml
new file mode 100644
index 0000000..197de05
--- /dev/null
+++ b/rest-api-spec/test/indices.update_aliases/20_routing.yaml
@@ -0,0 +1,135 @@
+setup:
+ - do:
+ indices.create:
+ index: test_index
+
+---
+"Routing":
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_index
+ alias: test_alias
+ routing: routing
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+
+ - match: {test_index.aliases.test_alias: {'index_routing': 'routing', 'search_routing': 'routing'}}
+
+---
+"Index Routing":
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_index
+ alias: test_alias
+ index_routing: index_routing
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+
+ - match: {test_index.aliases.test_alias: {'index_routing': 'index_routing'}}
+
+---
+"Search Routing":
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_index
+ alias: test_alias
+ search_routing: search_routing
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+
+ - match: {test_index.aliases.test_alias: {'search_routing': 'search_routing'}}
+
+---
+"Index, Default Routing":
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_index
+ alias: test_alias
+ index_routing: index_routing
+ routing: routing
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+
+ - match: {test_index.aliases.test_alias: {'index_routing': 'index_routing', 'search_routing': 'routing'}}
+
+---
+"Search, Default Routing":
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_index
+ alias: test_alias
+ search_routing: search_routing
+ routing: routing
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+
+ - match: {test_index.aliases.test_alias: {'index_routing': 'routing', 'search_routing': 'search_routing'}}
+
+---
+"Index, Search, Default Routing":
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_index
+ alias: test_alias
+ index_routing: index_routing
+ search_routing: search_routing
+ routing: routing
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+
+ - match: {test_index.aliases.test_alias: {'index_routing': 'index_routing', 'search_routing': 'search_routing'}}
+
+---
+"Numeric Routing":
+
+ - do:
+ indices.update_aliases:
+ body:
+ actions:
+ - add:
+ index: test_index
+ alias: test_alias
+ routing: 5
+
+ - do:
+ indices.get_aliases:
+ index: test_index
+
+ - match: {test_index.aliases.test_alias: {'index_routing': '5', 'search_routing': '5'}}
+
diff --git a/rest-api-spec/test/indices.validate_query/10_basic.yaml b/rest-api-spec/test/indices.validate_query/10_basic.yaml
new file mode 100644
index 0000000..1325eaa
--- /dev/null
+++ b/rest-api-spec/test/indices.validate_query/10_basic.yaml
@@ -0,0 +1,27 @@
+---
+"Validate query api":
+ - do:
+ indices.create:
+ index: testing
+ body:
+ settings:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ indices.validate_query:
+ q: query string
+
+ - is_true: valid
+
+ - do:
+ indices.validate_query:
+ body:
+ query:
+ invalid_query: {}
+
+ - is_false: valid
+
diff --git a/rest-api-spec/test/info/10_info.yaml b/rest-api-spec/test/info/10_info.yaml
new file mode 100644
index 0000000..62301d8
--- /dev/null
+++ b/rest-api-spec/test/info/10_info.yaml
@@ -0,0 +1,8 @@
+---
+"Info":
+ - do: {info: {}}
+ - match: {status: 200}
+ - is_true: name
+ - is_true: tagline
+ - is_true: version
+ - is_true: version.number
diff --git a/rest-api-spec/test/info/20_lucene_version.yaml b/rest-api-spec/test/info/20_lucene_version.yaml
new file mode 100644
index 0000000..e67bc80
--- /dev/null
+++ b/rest-api-spec/test/info/20_lucene_version.yaml
@@ -0,0 +1,8 @@
+---
+"Lucene Version":
+ - do: {info: {}}
+ - match: {status: 200}
+ - is_true: version.lucene_version
+
+
+
diff --git a/rest-api-spec/test/mget/10_basic.yaml b/rest-api-spec/test/mget/10_basic.yaml
new file mode 100644
index 0000000..0850772
--- /dev/null
+++ b/rest-api-spec/test/mget/10_basic.yaml
@@ -0,0 +1,45 @@
+---
+"Basic multi-get":
+ - do:
+ indices.create:
+ index: test_2
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ mget:
+ body:
+ docs:
+ - { _index: test_2, _type: test, _id: 1}
+ - { _index: test_1, _type: none, _id: 1}
+ - { _index: test_1, _type: test, _id: 2}
+ - { _index: test_1, _type: test, _id: 1}
+
+ - is_false: docs.0.found
+ - match: { docs.0._index: test_2 }
+ - match: { docs.0._type: test }
+ - match: { docs.0._id: "1" }
+
+ - is_false: docs.1.found
+ - match: { docs.1._index: test_1 }
+ - match: { docs.1._type: none }
+ - match: { docs.1._id: "1" }
+
+ - is_false: docs.2.found
+ - match: { docs.2._index: test_1 }
+ - match: { docs.2._type: test }
+ - match: { docs.2._id: "2" }
+
+ - is_true: docs.3.found
+ - match: { docs.3._index: test_1 }
+ - match: { docs.3._type: test }
+ - match: { docs.3._id: "1" }
+ - match: { docs.3._version: 1 }
+ - match: { docs.3._source: { foo: bar }}
diff --git a/rest-api-spec/test/mget/11_default_index_type.yaml b/rest-api-spec/test/mget/11_default_index_type.yaml
new file mode 100644
index 0000000..b556cf9
--- /dev/null
+++ b/rest-api-spec/test/mget/11_default_index_type.yaml
@@ -0,0 +1,48 @@
+---
+"Default index/type":
+ - do:
+ indices.create:
+ index: test_2
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ body:
+ docs:
+ - { _index: test_2, _id: 1}
+ - { _type: none, _id: 1}
+ - { _id: 2}
+ - { _id: 1}
+
+ - is_false: docs.0.found
+ - match: { docs.0._index: test_2 }
+ - match: { docs.0._type: test }
+ - match: { docs.0._id: "1" }
+
+ - is_false: docs.1.found
+ - match: { docs.1._index: test_1 }
+ - match: { docs.1._type: none }
+ - match: { docs.1._id: "1" }
+
+ - is_false: docs.2.found
+ - match: { docs.2._index: test_1 }
+ - match: { docs.2._type: test }
+ - match: { docs.2._id: "2" }
+
+ - is_true: docs.3.found
+ - match: { docs.3._index: test_1 }
+ - match: { docs.3._type: test }
+ - match: { docs.3._id: "1" }
+ - match: { docs.3._version: 1 }
+ - match: { docs.3._source: { foo: bar }}
diff --git a/rest-api-spec/test/mget/12_non_existent_index.yaml b/rest-api-spec/test/mget/12_non_existent_index.yaml
new file mode 100644
index 0000000..f91f323
--- /dev/null
+++ b/rest-api-spec/test/mget/12_non_existent_index.yaml
@@ -0,0 +1,35 @@
+---
+"Non-existent index":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ mget:
+ body:
+ docs:
+ - { _index: test_2, _type: test, _id: 1}
+
+ - is_false: docs.0.found
+ - match: { docs.0._index: test_2 }
+ - match: { docs.0._type: test }
+ - match: { docs.0._id: "1" }
+
+ - do:
+ mget:
+ body:
+ index: test_2
+ docs:
+ - { _index: test_1, _type: test, _id: 1}
+
+ - is_true: docs.0.found
+ - match: { docs.0._index: test_1 }
+ - match: { docs.0._type: test }
+ - match: { docs.0._id: "1" }
diff --git a/rest-api-spec/test/mget/13_missing_metadata.yaml b/rest-api-spec/test/mget/13_missing_metadata.yaml
new file mode 100644
index 0000000..11b4a12
--- /dev/null
+++ b/rest-api-spec/test/mget/13_missing_metadata.yaml
@@ -0,0 +1,51 @@
+---
+"Missing metadata":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ catch: /ActionRequestValidationException.+ id is missing/
+ mget:
+ body:
+ docs:
+ - { _index: test_1, _type: test}
+
+ - do:
+ catch: /ActionRequestValidationException.+ index is missing/
+ mget:
+ body:
+ docs:
+ - { _type: test, _id: 1}
+
+ - do:
+ catch: /ActionRequestValidationException.+ no documents to get/
+ mget:
+ body:
+ docs: []
+
+ - do:
+ catch: /ActionRequestValidationException.+ no documents to get/
+ mget:
+ body: {}
+
+ - do:
+ mget:
+ body:
+ docs:
+ - { _index: test_1, _id: 1}
+
+ - is_true: docs.0.found
+ - match: { docs.0._index: test_1 }
+ - match: { docs.0._type: test }
+ - match: { docs.0._id: "1" }
+ - match: { docs.0._version: 1 }
+ - match: { docs.0._source: { foo: bar }}
diff --git a/rest-api-spec/test/mget/15_ids.yaml b/rest-api-spec/test/mget/15_ids.yaml
new file mode 100644
index 0000000..a86fc2c
--- /dev/null
+++ b/rest-api-spec/test/mget/15_ids.yaml
@@ -0,0 +1,72 @@
+---
+"IDs":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ index:
+ index: test_1
+ type: test_2
+ id: 2
+ body: { foo: baz }
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ body:
+ ids: [1, 2]
+
+ - is_true: docs.0.found
+ - match: { docs.0._index: test_1 }
+ - match: { docs.0._type: test }
+ - match: { docs.0._id: "1" }
+ - match: { docs.0._version: 1 }
+ - match: { docs.0._source: { foo: bar }}
+
+ - is_false: docs.1.found
+ - match: { docs.1._index: test_1 }
+ - match: { docs.1._type: test }
+ - match: { docs.1._id: "2" }
+
+ - do:
+ mget:
+ index: test_1
+ body:
+ ids: [1, 2]
+
+ - is_true: docs.0.found
+ - match: { docs.0._index: test_1 }
+ - match: { docs.0._type: test }
+ - match: { docs.0._id: "1" }
+ - match: { docs.0._version: 1 }
+ - match: { docs.0._source: { foo: bar }}
+
+ - is_true: docs.1.found
+ - match: { docs.1._index: test_1 }
+ - match: { docs.1._type: test_2 }
+ - match: { docs.1._id: "2" }
+ - match: { docs.1._version: 1 }
+ - match: { docs.1._source: { foo: baz }}
+
+
+ - do:
+ catch: /ActionRequestValidationException.+ no documents to get/
+ mget:
+ index: test_1
+ body:
+ ids: []
+
+ - do:
+ catch: /ActionRequestValidationException.+ no documents to get/
+ mget:
+ index: test_1
+ body: {}
diff --git a/rest-api-spec/test/mget/20_fields.yaml b/rest-api-spec/test/mget/20_fields.yaml
new file mode 100644
index 0000000..347fc25
--- /dev/null
+++ b/rest-api-spec/test/mget/20_fields.yaml
@@ -0,0 +1,109 @@
+---
+"Fields":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ body:
+ docs:
+ - { _id: 1 }
+ - { _id: 1, fields: foo }
+ - { _id: 1, fields: [foo] }
+ - { _id: 1, fields: [foo, _source] }
+
+ - is_false: docs.0.fields
+ - match: { docs.0._source: { foo: bar }}
+
+ - match: { docs.1.fields.foo: [bar] }
+ - is_false: docs.1._source
+
+ - match: { docs.2.fields.foo: [bar] }
+ - is_false: docs.2._source
+
+ - match: { docs.3.fields.foo: [bar] }
+ - match: { docs.3._source: { foo: bar }}
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ fields: foo
+ body:
+ docs:
+ - { _id: 1 }
+ - { _id: 1, fields: foo }
+ - { _id: 1, fields: [foo] }
+ - { _id: 1, fields: [foo, _source] }
+
+ - match: { docs.0.fields.foo: [bar] }
+ - is_false: docs.0._source
+
+ - match: { docs.1.fields.foo: [bar] }
+ - is_false: docs.1._source
+
+ - match: { docs.2.fields.foo: [bar] }
+ - is_false: docs.2._source
+
+ - match: { docs.3.fields.foo: [bar] }
+ - match: { docs.3._source: { foo: bar }}
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ fields: [foo]
+ body:
+ docs:
+ - { _id: 1 }
+ - { _id: 1, fields: foo }
+ - { _id: 1, fields: [foo] }
+ - { _id: 1, fields: [foo, _source] }
+
+ - match: { docs.0.fields.foo: [bar] }
+ - is_false: docs.0._source
+
+ - match: { docs.1.fields.foo: [bar] }
+ - is_false: docs.1._source
+
+ - match: { docs.2.fields.foo: [bar] }
+ - is_false: docs.2._source
+
+ - match: { docs.3.fields.foo: [bar] }
+ - match: { docs.3._source: { foo: bar }}
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ fields: [foo, _source]
+ body:
+ docs:
+ - { _id: 1 }
+ - { _id: 1, fields: foo }
+ - { _id: 1, fields: [foo] }
+ - { _id: 1, fields: [foo, _source] }
+
+ - match: { docs.0.fields.foo: [bar] }
+ - match: { docs.0._source: { foo: bar }}
+
+ - match: { docs.1.fields.foo: [bar] }
+ - is_false: docs.1._source
+
+ - match: { docs.2.fields.foo: [bar] }
+ - is_false: docs.2._source
+
+ - match: { docs.3.fields.foo: [bar] }
+ - match: { docs.3._source: { foo: bar }}
+
diff --git a/rest-api-spec/test/mget/30_parent.yaml b/rest-api-spec/test/mget/30_parent.yaml
new file mode 100644
index 0000000..491b8b8
--- /dev/null
+++ b/rest-api-spec/test/mget/30_parent.yaml
@@ -0,0 +1,57 @@
+---
+"Parent":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 4
+ body: { foo: bar }
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 2
+ parent: 5
+ body: { foo: bar }
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ body:
+ docs:
+ - { _id: 1 }
+ - { _id: 1, parent: 5, fields: [ _parent, _routing ] }
+ - { _id: 1, parent: 4, fields: [ _parent, _routing ] }
+ - { _id: 2, parent: 5, fields: [ _parent, _routing ] }
+
+ - is_false: docs.0.found
+ - is_false: docs.1.found
+
+ - is_true: docs.2.found
+ - match: { docs.2._index: test_1 }
+ - match: { docs.2._type: test }
+ - match: { docs.2._id: "1" }
+ - match: { docs.2.fields._parent: "4" }
+ - match: { docs.2.fields._routing: "4" }
+
+ - is_true: docs.3.found
+ - match: { docs.3._index: test_1 }
+ - match: { docs.3._type: test }
+ - match: { docs.3._id: "2" }
+ - match: { docs.3.fields._parent: "5" }
+ - match: { docs.3.fields._routing: "5" }
diff --git a/rest-api-spec/test/mget/40_routing.yaml b/rest-api-spec/test/mget/40_routing.yaml
new file mode 100644
index 0000000..96734da
--- /dev/null
+++ b/rest-api-spec/test/mget/40_routing.yaml
@@ -0,0 +1,42 @@
+---
+"Routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ body: { foo: bar }
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ fields: [_routing]
+ body:
+ docs:
+ - { _id: 1 }
+ - { _id: 1, routing: 4 }
+ - { _id: 1, routing: 5 }
+
+ - is_false: docs.0.found
+ - is_false: docs.1.found
+
+ - is_true: docs.2.found
+ - match: { docs.2._index: test_1 }
+ - match: { docs.2._type: test }
+ - match: { docs.2._id: "1" }
+ - match: { docs.2.fields._routing: "5" }
diff --git a/rest-api-spec/test/mget/55_parent_with_routing.yaml b/rest-api-spec/test/mget/55_parent_with_routing.yaml
new file mode 100644
index 0000000..039f5c5
--- /dev/null
+++ b/rest-api-spec/test/mget/55_parent_with_routing.yaml
@@ -0,0 +1,46 @@
+---
+"Parent":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ settings:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 4
+ routing: 5
+ body: { foo: bar }
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ fields: [ _routing , _parent]
+ body:
+ docs:
+ - { _id: 1 }
+ - { _id: 1, parent: 4 }
+ - { _id: 1, parent: 4, routing: 5 }
+
+ - is_false: docs.0.found
+ - is_false: docs.1.found
+
+ - is_true: docs.2.found
+ - match: { docs.2._index: test_1 }
+ - match: { docs.2._type: test }
+ - match: { docs.2._id: "1" }
+ - match: { docs.2.fields._parent: "4" }
+ - match: { docs.2.fields._routing: "5" }
diff --git a/rest-api-spec/test/mget/60_realtime_refresh.yaml b/rest-api-spec/test/mget/60_realtime_refresh.yaml
new file mode 100644
index 0000000..c688b4b
--- /dev/null
+++ b/rest-api-spec/test/mget/60_realtime_refresh.yaml
@@ -0,0 +1,53 @@
+---
+"Realtime Refresh":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ refresh_interval: -1
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ realtime: 0
+ body:
+ ids: [1]
+
+ - is_false: docs.0.found
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ realtime: 1
+ body:
+ ids: [1]
+
+ - is_true: docs.0.found
+
+ - do:
+ mget:
+ index: test_1
+ type: test
+ realtime: 0
+ refresh: 1
+ body:
+ ids: [1]
+
+ - is_true: docs.0.found
diff --git a/rest-api-spec/test/mget/70_source_filtering.yaml b/rest-api-spec/test/mget/70_source_filtering.yaml
new file mode 100644
index 0000000..a1c16b3
--- /dev/null
+++ b/rest-api-spec/test/mget/70_source_filtering.yaml
@@ -0,0 +1,119 @@
+setup:
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 }
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 2
+ body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 }
+
+---
+"Source filtering - true/false":
+
+ - do:
+ mget:
+ body:
+ docs:
+ - { _index: "test_1", _type: "test", _id: "1", _source: false }
+ - { _index: "test_1", _type: "test", _id: "2", _source: true }
+
+ - match: { docs.0._id: "1" }
+ - is_false: docs.0._source
+ - match: { docs.1._id: "2" }
+ - is_true: docs.1._source
+
+---
+"Source filtering - include field":
+
+ - do:
+ mget:
+ body:
+ docs:
+ - { _index: "test_1", _type: "test", _id: "1", _source: include.field1 }
+ - { _index: "test_1", _type: "test", _id: "2", _source: [ include.field1 ] }
+
+ - match: { docs.0._source: { include: { field1: v1 }} }
+ - match: { docs.1._source: { include: { field1: v1 }} }
+
+
+---
+"Source filtering - include nested field":
+
+ - do:
+ mget:
+ body:
+ docs:
+ - { _index: "test_1", _type: "test", _id: "1", _source: { include: include.field1 } }
+ - { _index: "test_1", _type: "test", _id: "2", _source: { include: [ include.field1 ] } }
+
+ - match: { docs.0._source: { include: { field1: v1 }} }
+ - match: { docs.1._source: { include: { field1: v1 }} }
+
+---
+"Source filtering - exclude field":
+
+ - do:
+ mget:
+ body:
+ docs:
+ - { _index: "test_1", _type: "test", _id: "1", _source: { include: [ include ], exclude: [ "*.field2" ] } }
+
+ - match: { docs.0._source: { include: { field1: v1 }} }
+
+---
+"Source filtering - ids and true/false":
+
+ - do:
+ mget:
+ _source: false
+ index: test_1
+ body: { ids: [ 1,2 ] }
+ - is_false: docs.0._source
+ - is_false: docs.1._source
+
+ - do:
+ mget:
+ _source: true
+ index: test_1
+ body: { ids: [ 1,2 ] }
+ - is_true: docs.0._source
+ - is_true: docs.1._source
+
+---
+"Source filtering - ids and include field":
+
+ - do:
+ mget:
+ _source: include.field1
+ index: test_1
+ body: { ids: [ 1,2 ] }
+ - match: { docs.0._source: { include: { field1: v1 }} }
+ - match: { docs.1._source: { include: { field1: v1 }} }
+
+---
+"Source filtering - ids and include nested field":
+
+ - do:
+ mget:
+ _source_include: "include.field1,count"
+ index: test_1
+ body: { ids: [ 1,2 ] }
+ - match: { docs.0._source: { include: { field1: v1 }, count: 1} }
+ - match: { docs.1._source: { include: { field1: v1 }, count: 1} }
+
+---
+"Source filtering - ids and exclude field":
+
+ - do:
+ mget:
+ _source_include: include
+ _source_exclude: "*.field2"
+ index: test_1
+ body: { ids: [ 1,2 ] }
+ - match: { docs.0._source: { include: { field1: v1 } } }
+ - match: { docs.1._source: { include: { field1: v1 } } }
diff --git a/rest-api-spec/test/mget/TODO.txt b/rest-api-spec/test/mget/TODO.txt
new file mode 100644
index 0000000..340ff57
--- /dev/null
+++ b/rest-api-spec/test/mget/TODO.txt
@@ -0,0 +1,3 @@
+Tests missing for:
+
+# preference
diff --git a/rest-api-spec/test/mlt/10_basic.yaml b/rest-api-spec/test/mlt/10_basic.yaml
new file mode 100644
index 0000000..e914607
--- /dev/null
+++ b/rest-api-spec/test/mlt/10_basic.yaml
@@ -0,0 +1,25 @@
+---
+"Basic mlt":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar, title: howdy }
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+ timeout: 1s
+ - do:
+ mlt:
+ index: test_1
+ type: test
+ id: 1
+ mlt_fields: title
+
+ - match: {hits.total: 0}
+
diff --git a/rest-api-spec/test/mpercolate/10_basic.yaml b/rest-api-spec/test/mpercolate/10_basic.yaml
new file mode 100644
index 0000000..70118c9
--- /dev/null
+++ b/rest-api-spec/test/mpercolate/10_basic.yaml
@@ -0,0 +1,41 @@
+---
+"Basic multi-percolate":
+ - do:
+ index:
+ index: percolator_index
+ type: my_type
+ id: 1
+ body: {foo: bar}
+
+ - do:
+ index:
+ index: percolator_index
+ type: .percolator
+ id: test_percolator
+ body:
+ query:
+ match_all: {}
+
+ - do:
+ mpercolate:
+ body:
+ - percolate:
+ index: percolator_index
+ type: my_type
+ - doc:
+ foo: bar
+ - percolate:
+ index: percolator_index1
+ type: my_type
+ - doc:
+ foo: bar
+ - percolate:
+ index: percolator_index
+ type: my_type
+ id: 1
+ - doc:
+ foo: bar
+
+ - match: { responses.0.total: 1 }
+ - match: { responses.1.error: "IndexMissingException[[percolator_index1] missing]" }
+ - match: { responses.2.total: 1 }
diff --git a/rest-api-spec/test/msearch/10_basic.yaml b/rest-api-spec/test/msearch/10_basic.yaml
new file mode 100644
index 0000000..ce6bc17
--- /dev/null
+++ b/rest-api-spec/test/msearch/10_basic.yaml
@@ -0,0 +1,45 @@
+---
+"Basic multi-search":
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 2
+ body: { foo: baz }
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 3
+ body: { foo: foo }
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ msearch:
+ body:
+ - index: test_1
+ - query:
+ match_all: {}
+ - index: test_2
+ - query:
+ match_all: {}
+ - search_type: count
+ index: test_1
+ - query:
+ match: {foo: bar}
+
+ - match: { responses.0.hits.total: 3 }
+ - match: { responses.1.error: "IndexMissingException[[test_2] missing]" }
+ - match: { responses.2.hits.total: 1 }
+
+
diff --git a/rest-api-spec/test/mtermvectors/10_basic.yaml b/rest-api-spec/test/mtermvectors/10_basic.yaml
new file mode 100644
index 0000000..53005b0
--- /dev/null
+++ b/rest-api-spec/test/mtermvectors/10_basic.yaml
@@ -0,0 +1,85 @@
+setup:
+ - do:
+ indices.create:
+ index: testidx
+ body:
+ mappings:
+ testtype:
+ properties:
+ text:
+ type : "string"
+ term_vector : "with_positions_offsets"
+ - do:
+ index:
+ index: testidx
+ type: testtype
+ id: testing_document
+ body: {"text" : "The quick brown fox is brown."}
+
+ - do:
+ indices.refresh: {}
+
+---
+"Basic tests for multi termvector get":
+
+ - do:
+ mtermvectors:
+ "term_statistics" : true
+ "body" :
+ "docs":
+ -
+ "_index" : "testidx"
+ "_type" : "testtype"
+ "_id" : "testing_document"
+
+ - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
+ - match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
+
+ - do:
+ mtermvectors:
+ "term_statistics" : true
+ "body" :
+ "docs":
+ -
+ "_index" : "testidx"
+ "_type" : "testtype"
+ "_id" : "testing_document"
+
+ - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
+ - match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
+
+ - do:
+ mtermvectors:
+ "term_statistics" : true
+ "index" : "testidx"
+ "body" :
+ "docs":
+ -
+ "_type" : "testtype"
+ "_id" : "testing_document"
+
+ - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
+ - match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
+
+ - do:
+ mtermvectors:
+ "term_statistics" : true
+ "index" : "testidx"
+ "type" : "testtype"
+ "body" :
+ "docs":
+ -
+ "_id" : "testing_document"
+
+ - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
+ - match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
+
+ - do:
+ mtermvectors:
+ "term_statistics" : true
+ "index" : "testidx"
+ "type" : "testtype"
+ "ids" : ["testing_document"]
+
+ - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
+ - match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
diff --git a/rest-api-spec/test/nodes.info/10_basic.yaml b/rest-api-spec/test/nodes.info/10_basic.yaml
new file mode 100644
index 0000000..cf632c5
--- /dev/null
+++ b/rest-api-spec/test/nodes.info/10_basic.yaml
@@ -0,0 +1,7 @@
+---
+"node_info test":
+ - do:
+ nodes.info: {}
+
+ - is_true: nodes
+ - is_true: cluster_name
diff --git a/rest-api-spec/test/nodes.stats/10_basic.yaml b/rest-api-spec/test/nodes.stats/10_basic.yaml
new file mode 100644
index 0000000..977d002
--- /dev/null
+++ b/rest-api-spec/test/nodes.stats/10_basic.yaml
@@ -0,0 +1,8 @@
+---
+"Nodes stats":
+ - do:
+ nodes.stats:
+ metric: [ indices, transport ]
+
+ - is_true: cluster_name
+ - is_true: nodes
diff --git a/rest-api-spec/test/percolate/15_new.yaml b/rest-api-spec/test/percolate/15_new.yaml
new file mode 100644
index 0000000..63c5b9a
--- /dev/null
+++ b/rest-api-spec/test/percolate/15_new.yaml
@@ -0,0 +1,40 @@
+---
+"Basic percolation tests":
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ index:
+ index: test_index
+ type: .percolator
+ id: test_percolator
+ body:
+ query:
+ match_all: {}
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ percolate:
+ index: test_index
+ type: test_type
+ body:
+ doc:
+ foo: bar
+
+ - match: {'total': 1}
+ - match: {'matches': [{_index: test_index, _id: test_percolator}]}
+
+ - do:
+ count_percolate:
+ index: test_index
+ type: test_type
+ body:
+ doc:
+ foo: bar
+
+ - is_false: matches
+ - match: {'total': 1}
diff --git a/rest-api-spec/test/percolate/16_existing_doc.yaml b/rest-api-spec/test/percolate/16_existing_doc.yaml
new file mode 100644
index 0000000..a539b0f
--- /dev/null
+++ b/rest-api-spec/test/percolate/16_existing_doc.yaml
@@ -0,0 +1,86 @@
+---
+"Percolate existing documents":
+
+ - do:
+ indices.create:
+ index: percolator_index
+
+ - do:
+ index:
+ index: percolator_index
+ type: .percolator
+ id: test_percolator
+ body:
+ query:
+ match_all: {}
+
+ - do:
+ index:
+ index: percolator_index
+ type: test_type
+ id: 1
+ body:
+ foo: bar
+
+ - do:
+ indices.create:
+ index: my_index
+
+ - do:
+ index:
+ index: my_index
+ type: my_type
+ id: 1
+ body:
+ foo: bar
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ percolate:
+ index: percolator_index
+ type: test_type
+ id: 1
+
+ - match: {'matches': [{_index: percolator_index, _id: test_percolator}]}
+
+ - do:
+ percolate:
+ index: my_index
+ type: my_type
+ id: 1
+ percolate_index: percolator_index
+ percolate_type: test_type
+
+ - match: {'matches': [{_index: percolator_index, _id: test_percolator}]}
+
+
+ - do:
+ index:
+ index: my_index
+ type: my_type
+ id: 1
+ body:
+ foo: bar
+
+ - do:
+ percolate:
+ index: my_index
+ type: my_type
+ id: 1
+ version: 2
+ percolate_index: percolator_index
+ percolate_type: test_type
+
+ - match: {'matches': [{_index: percolator_index, _id: test_percolator}]}
+
+ - do:
+ catch: conflict
+ percolate:
+ index: my_index
+ type: my_type
+ id: 1
+ version: 1
+ percolate_index: percolator_index
+ percolate_type: test_type
diff --git a/rest-api-spec/test/percolate/17_empty.yaml b/rest-api-spec/test/percolate/17_empty.yaml
new file mode 100644
index 0000000..0cd1ac5
--- /dev/null
+++ b/rest-api-spec/test/percolate/17_empty.yaml
@@ -0,0 +1,31 @@
+---
+"Basic percolation tests on an empty cluster":
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ percolate:
+ index: test_index
+ type: test_type
+ body:
+ doc:
+ foo: bar
+
+ - match: {'total': 0}
+ - match: {'matches': []}
+
+ - do:
+ count_percolate:
+ index: test_index
+ type: test_type
+ body:
+ doc:
+ foo: bar
+
+ - is_false: matches
+ - match: {'total': 0}
diff --git a/rest-api-spec/test/percolate/18_highligh_with_query.yaml b/rest-api-spec/test/percolate/18_highligh_with_query.yaml
new file mode 100644
index 0000000..c02996b
--- /dev/null
+++ b/rest-api-spec/test/percolate/18_highligh_with_query.yaml
@@ -0,0 +1,36 @@
+---
+"Basic percolation highlight query test":
+
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ index:
+ index: test_index
+ type: .percolator
+ id: test_percolator
+ body:
+ query:
+ match:
+ foo: bar
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ percolate:
+ index: test_index
+ type: test_type
+ body:
+ doc:
+ foo: "bar foo"
+ size: 1
+ highlight:
+ fields:
+ foo:
+ highlight_query:
+ match:
+ foo: foo
+
+ - match: {'total': 1}
diff --git a/rest-api-spec/test/ping/10_ping.yaml b/rest-api-spec/test/ping/10_ping.yaml
new file mode 100644
index 0000000..ec07c21
--- /dev/null
+++ b/rest-api-spec/test/ping/10_ping.yaml
@@ -0,0 +1,5 @@
+---
+"Ping":
+ - do: { ping: {}}
+ - is_true: ''
+
diff --git a/rest-api-spec/test/scroll/10_basic.yaml b/rest-api-spec/test/scroll/10_basic.yaml
new file mode 100644
index 0000000..3dbca74
--- /dev/null
+++ b/rest-api-spec/test/scroll/10_basic.yaml
@@ -0,0 +1,32 @@
+---
+"Basic scroll":
+ - do:
+ indices.create:
+ index: test_scroll
+ - do:
+ index:
+ index: test_scroll
+ type: test
+ id: 42
+ body: { foo: bar }
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ search:
+ index: test_scroll
+ search_type: scan
+ scroll: 1m
+ body:
+ query:
+ match_all: {}
+
+ - set: {_scroll_id: scroll_id}
+
+ - do:
+ scroll:
+ scroll_id: $scroll_id
+
+ - match: {hits.total: 1 }
+ - match: {hits.hits.0._id: "42" }
diff --git a/rest-api-spec/test/scroll/11_clear.yaml b/rest-api-spec/test/scroll/11_clear.yaml
new file mode 100644
index 0000000..30b7e18
--- /dev/null
+++ b/rest-api-spec/test/scroll/11_clear.yaml
@@ -0,0 +1,34 @@
+---
+"Clear scroll":
+ - do:
+ indices.create:
+ index: test_scroll
+ - do:
+ index:
+ index: test_scroll
+ type: test
+ id: 42
+ body: { foo: bar }
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ search:
+ index: test_scroll
+ search_type: scan
+ scroll: 1m
+ body:
+ query:
+ match_all: {}
+
+ - set: {_scroll_id: scroll_id1}
+
+ - do:
+ clear_scroll:
+ scroll_id: $scroll_id1
+
+ - do:
+ catch: request
+ scroll:
+ scroll_id: $scroll_id1
diff --git a/rest-api-spec/test/search/10_source_filtering.yaml b/rest-api-spec/test/search/10_source_filtering.yaml
new file mode 100644
index 0000000..40a67ba
--- /dev/null
+++ b/rest-api-spec/test/search/10_source_filtering.yaml
@@ -0,0 +1,92 @@
+---
+"Source filtering":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 }
+ - do:
+ indices.refresh: {}
+
+
+ - do:
+ search:
+ # stringified for boolean value
+ body: "{ _source: true, query: { match_all: {} } }"
+
+ - length: { hits.hits: 1 }
+ - match: { hits.hits.0._source.count: 1 }
+
+ - do: { search: { body: "{ _source: false, query: { match_all: {} } }" } }
+ - length: { hits.hits: 1 }
+ - is_false: hits.hits.0._source
+
+ - do: { search: { body: { query: { match_all: {} } } } }
+ - length: { hits.hits: 1 }
+ - match: { hits.hits.0._source.count: 1 }
+
+ - do: { search: { body: { _source: include.field1, query: { match_all: {} } } } }
+ - match: { hits.hits.0._source.include.field1: v1 }
+ - is_false: hits.hits.0._source.include.field2
+
+ - do: { search: { _source_include: include.field1, body: { _source: include.field2, query: { match_all: {} } } } }
+ - match: { hits.hits.0._source.include.field1: v1 }
+ - is_false: hits.hits.0._source.include.field2
+
+ - do: { search: { _source_include: include.field1, body: { query: { match_all: {} } } } }
+ - match: { hits.hits.0._source.include.field1: v1 }
+ - is_false: hits.hits.0._source.include.field2
+
+ - do: { search: { _source_exclude: count, body: { query: { match_all: {} } } } }
+ - match: { hits.hits.0._source.include: { field1 : v1 , field2: v2 }}
+ - is_false: hits.hits.0._source.count
+
+
+ - do:
+ search:
+ body:
+ _source: [ include.field1, include.field2 ]
+ query: { match_all: {} }
+ - match: { hits.hits.0._source.include.field1: v1 }
+ - match: { hits.hits.0._source.include.field2: v2 }
+ - is_false: hits.hits.0._source.count
+
+ - do:
+ search:
+ body:
+ _source:
+ include: [ include.field1, include.field2 ]
+ query: { match_all: {} }
+ - match: { hits.hits.0._source.include.field1: v1 }
+ - match: { hits.hits.0._source.include.field2: v2 }
+ - is_false: hits.hits.0._source.count
+
+ - do:
+ search:
+ body:
+ _source:
+ includes: include
+ excludes: "*.field2"
+ query: { match_all: {} }
+ - match: { hits.hits.0._source.include.field1: v1 }
+ - is_false: hits.hits.0._source.include.field2
+
+
+ - do:
+ search:
+ body:
+ fields: [ include.field2 ]
+ query: { match_all: {} }
+ - match: { hits.hits.0.fields: { include.field2 : [v2] }}
+ - is_false: hits.hits.0._source
+
+ - do:
+ search:
+ body:
+ fields: [ include.field2, _source ]
+ query: { match_all: {} }
+ - match: { hits.hits.0.fields: { include.field2 : [v2] }}
+ - is_true: hits.hits.0._source
+
diff --git a/rest-api-spec/test/search/20_default_values.yaml b/rest-api-spec/test/search/20_default_values.yaml
new file mode 100644
index 0000000..e8f1434
--- /dev/null
+++ b/rest-api-spec/test/search/20_default_values.yaml
@@ -0,0 +1,43 @@
+---
+"Default index":
+ - do:
+ indices.create:
+ index: test_2
+ - do:
+ indices.create:
+ index: test_1
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+
+ - do:
+ index:
+ index: test_2
+ type: test
+ id: 42
+ body: { foo: bar }
+
+ - do:
+ indices.refresh:
+ index: [test_1, test_2]
+
+ - do:
+ search:
+ index: _all
+ type: test
+ body:
+ query:
+ match:
+ foo: bar
+
+ - match: {hits.total: 2}
+ - match: {hits.hits.0._index: test_1 }
+ - match: {hits.hits.0._type: test }
+ - match: {hits.hits.0._id: "1" }
+
+ - match: {hits.hits.1._index: test_2 }
+ - match: {hits.hits.1._type: test }
+ - match: {hits.hits.1._id: "42" }
diff --git a/rest-api-spec/test/snapshot.get_repository/10_basic.yaml b/rest-api-spec/test/snapshot.get_repository/10_basic.yaml
new file mode 100644
index 0000000..1bb2fa0
--- /dev/null
+++ b/rest-api-spec/test/snapshot.get_repository/10_basic.yaml
@@ -0,0 +1,50 @@
+---
+setup:
+
+ - do:
+ snapshot.create_repository:
+ repository: test_repo1
+ body:
+ type: url
+ settings:
+ url: "http://snapshot.test1"
+
+ - do:
+ snapshot.create_repository:
+ repository: test_repo2
+ body:
+ type: url
+ settings:
+ url: "http://snapshot.test2"
+
+---
+"Get all repositories":
+ - do:
+ snapshot.get_repository: {}
+
+ - is_true: test_repo1
+ - is_true: test_repo2
+
+---
+"Get repository by name":
+ - do:
+ snapshot.get_repository:
+ repository: test_repo1
+
+ - is_true: test_repo1
+ - is_false: test_repo2
+
+---
+"Get missing repository by name":
+ - do:
+ snapshot.get_repository:
+ repository: test_repo2
+
+---
+"Get all repositories with local flag":
+ - do:
+ snapshot.get_repository:
+ local: true
+
+ - is_true: test_repo1
+ - is_true: test_repo2
diff --git a/rest-api-spec/test/suggest/10_basic.yaml b/rest-api-spec/test/suggest/10_basic.yaml
new file mode 100644
index 0000000..211e95b
--- /dev/null
+++ b/rest-api-spec/test/suggest/10_basic.yaml
@@ -0,0 +1,24 @@
+setup:
+ - do:
+ index:
+ index: test
+ type: test
+ id: testing_document
+ body:
+ body: Amsterdam meetup
+ - do:
+ indices.refresh: {}
+
+---
+"Basic tests for suggest API":
+
+ - do:
+ suggest:
+ body:
+ test_suggestion:
+ text: "The Amsterdma meetpu"
+ term:
+ field: body
+
+ - match: {test_suggestion.1.options.0.text: amsterdam}
+ - match: {test_suggestion.2.options.0.text: meetup}
diff --git a/rest-api-spec/test/termvector/10_basic.yaml b/rest-api-spec/test/termvector/10_basic.yaml
new file mode 100644
index 0000000..998c567
--- /dev/null
+++ b/rest-api-spec/test/termvector/10_basic.yaml
@@ -0,0 +1,35 @@
+setup:
+ - do:
+ indices.create:
+ index: testidx
+ body:
+ mappings:
+ testtype:
+ "properties":
+ "text":
+ "type" : "string"
+ "term_vector" : "with_positions_offsets"
+ - do:
+ index:
+ index: testidx
+ type: testtype
+ id: testing_document
+ body:
+ "text" : "The quick brown fox is brown."
+ - do:
+ indices.refresh: {}
+
+---
+"Basic tests for termvector get":
+
+ - do:
+ termvector:
+ index: testidx
+ type: testtype
+ id: testing_document
+ "term_statistics" : true
+
+
+ - match: {term_vectors.text.field_statistics.sum_doc_freq: 5}
+ - match: {term_vectors.text.terms.brown.doc_freq: 1}
+ - match: {term_vectors.text.terms.brown.tokens.0.start_offset: 10}
diff --git a/rest-api-spec/test/update/10_doc.yaml b/rest-api-spec/test/update/10_doc.yaml
new file mode 100644
index 0000000..b4581ed
--- /dev/null
+++ b/rest-api-spec/test/update/10_doc.yaml
@@ -0,0 +1,40 @@
+---
+"Partial document":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ foo: bar
+ count: 1
+ nested: { one: 1, two: 2 }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc:
+ foo: baz
+ nested:
+ one: 3
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: "1" }
+ - match: { _version: 2 }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _source.foo: baz }
+ - match: { _source.count: 1 }
+ - match: { _source.nested.one: 3 }
+ - match: { _source.nested.two: 2 }
+
diff --git a/rest-api-spec/test/update/15_script.yaml b/rest-api-spec/test/update/15_script.yaml
new file mode 100644
index 0000000..c8cfc5f
--- /dev/null
+++ b/rest-api-spec/test/update/15_script.yaml
@@ -0,0 +1,79 @@
+---
+"Script":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ foo: bar
+ count: 1
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ script: "1"
+ body:
+ lang: mvel
+ script: "ctx._source.foo = bar"
+ params: { bar: 'xxx' }
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: "1" }
+ - match: { _version: 2 }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _source.foo: xxx }
+ - match: { _source.count: 1 }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ lang: mvel
+ script: "ctx._source.foo = 'yyy'"
+
+ - match: { _index: test_1 }
+ - match: { _type: test }
+ - match: { _id: "1" }
+ - match: { _version: 3 }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _source.foo: yyy }
+ - match: { _source.count: 1 }
+
+ - do:
+ catch: /script_lang not supported \[doesnotexist\]/
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ script: "1"
+ lang: "doesnotexist"
+ params: { bar: 'xxx' }
+
+ - do:
+ catch: /script_lang not supported \[doesnotexist\]/
+ update:
+ index: test_1
+ type: test
+ id: 1
+ lang: doesnotexist
+ script: "1"
+
diff --git a/rest-api-spec/test/update/20_doc_upsert.yaml b/rest-api-spec/test/update/20_doc_upsert.yaml
new file mode 100644
index 0000000..f34e030
--- /dev/null
+++ b/rest-api-spec/test/update/20_doc_upsert.yaml
@@ -0,0 +1,41 @@
+---
+"Doc upsert":
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: bar, count: 1 }
+ upsert: { foo: baz }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _source.foo: baz }
+ - is_false: _source.count
+
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: bar, count: 1 }
+ upsert: { foo: baz }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _source.foo: bar }
+ - match: { _source.count: 1 }
+
+
diff --git a/rest-api-spec/test/update/22_doc_as_upsert.yaml b/rest-api-spec/test/update/22_doc_as_upsert.yaml
new file mode 100644
index 0000000..af25730
--- /dev/null
+++ b/rest-api-spec/test/update/22_doc_as_upsert.yaml
@@ -0,0 +1,41 @@
+---
+"Doc as upsert":
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: bar, count: 1 }
+ doc_as_upsert: 1
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _source.foo: bar }
+ - match: { _source.count: 1 }
+
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { count: 2 }
+ doc_as_upsert: 1
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _source.foo: bar }
+ - match: { _source.count: 2 }
+
+
diff --git a/rest-api-spec/test/update/25_script_upsert.yaml b/rest-api-spec/test/update/25_script_upsert.yaml
new file mode 100644
index 0000000..64226b7
--- /dev/null
+++ b/rest-api-spec/test/update/25_script_upsert.yaml
@@ -0,0 +1,41 @@
+---
+"Script upsert":
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ script: "ctx._source.foo = bar"
+ params: { bar: 'xxx' }
+ upsert: { foo: baz }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _source.foo: baz }
+
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ script: "ctx._source.foo = bar"
+ params: { bar: 'xxx' }
+ upsert: { foo: baz }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+
+ - match: { _source.foo: xxx }
+
+
diff --git a/rest-api-spec/test/update/30_internal_version.yaml b/rest-api-spec/test/update/30_internal_version.yaml
new file mode 100644
index 0000000..c9f3706
--- /dev/null
+++ b/rest-api-spec/test/update/30_internal_version.yaml
@@ -0,0 +1,47 @@
+---
+"Internal version":
+
+ - do:
+ catch: conflict
+ update:
+ index: test_1
+ type: test
+ id: 1
+ version: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - match: { _version: 1}
+
+ - do:
+ catch: conflict
+ update:
+ index: test_1
+ type: test
+ id: 1
+ version: 2
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ version: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - match: { _version: 2}
diff --git a/rest-api-spec/test/update/35_external_version.yaml b/rest-api-spec/test/update/35_external_version.yaml
new file mode 100644
index 0000000..a3ca3a9
--- /dev/null
+++ b/rest-api-spec/test/update/35_external_version.yaml
@@ -0,0 +1,40 @@
+---
+"External version":
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ version: 2
+ version_type: external
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - match: { _version: 2 }
+
+ - do:
+ catch: conflict
+ update:
+ index: test_1
+ type: test
+ id: 1
+ version: 2
+ version_type: external
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ version: 3
+ version_type: external
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - match: { _version: 3 }
diff --git a/rest-api-spec/test/update/40_routing.yaml b/rest-api-spec/test/update/40_routing.yaml
new file mode 100644
index 0000000..52cd938
--- /dev/null
+++ b/rest-api-spec/test/update/40_routing.yaml
@@ -0,0 +1,56 @@
+---
+"Routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ fields: _routing
+
+ - match: { fields._routing: "5"}
+
+ - do:
+ catch: missing
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ routing: 5
+ fields: foo
+ body:
+ doc: { foo: baz }
+
+ - match: { get.fields.foo: [baz] }
+
diff --git a/rest-api-spec/test/update/50_parent.yaml b/rest-api-spec/test/update/50_parent.yaml
new file mode 100644
index 0000000..3d15ea9
--- /dev/null
+++ b/rest-api-spec/test/update/50_parent.yaml
@@ -0,0 +1,79 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+---
+"Parent":
+
+ - do:
+ catch: /RoutingMissingException/
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ fields: [_parent, _routing]
+
+ - match: { fields._parent: "5"}
+ - match: { fields._routing: "5"}
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ fields: foo
+ body:
+ doc: { foo: baz }
+
+ - match: { get.fields.foo: [baz] }
+
+---
+"Parent omitted":
+
+ - do:
+ index:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ body: { foo: bar }
+
+ - do:
+ catch: request
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+
diff --git a/rest-api-spec/test/update/55_parent_with_routing.yaml b/rest-api-spec/test/update/55_parent_with_routing.yaml
new file mode 100644
index 0000000..cc82802
--- /dev/null
+++ b/rest-api-spec/test/update/55_parent_with_routing.yaml
@@ -0,0 +1,63 @@
+---
+"Parent with routing":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ settings:
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ wait_for_status: green
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ routing: 4
+ parent: 5
+ fields: [_parent, _routing]
+
+ - match: { fields._parent: "5"}
+ - match: { fields._routing: "4"}
+
+ - do:
+ catch: missing
+ update:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ body:
+ doc: { foo: baz }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ routing: 4
+ fields: foo
+ body:
+ doc: { foo: baz }
+
+ - match: { get.fields.foo: [baz] }
+
diff --git a/rest-api-spec/test/update/60_refresh.yaml b/rest-api-spec/test/update/60_refresh.yaml
new file mode 100644
index 0000000..6048292
--- /dev/null
+++ b/rest-api-spec/test/update/60_refresh.yaml
@@ -0,0 +1,50 @@
+---
+"Refresh":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index.refresh_interval: -1
+ number_of_replicas: 0
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query: { term: { _id: 1 }}
+
+ - match: { hits.total: 0 }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 2
+ refresh: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query: { term: { _id: 2 }}
+
+ - match: { hits.total: 1 }
diff --git a/rest-api-spec/test/update/70_timestamp.yaml b/rest-api-spec/test/update/70_timestamp.yaml
new file mode 100644
index 0000000..368f750
--- /dev/null
+++ b/rest-api-spec/test/update/70_timestamp.yaml
@@ -0,0 +1,77 @@
+---
+"Timestamp":
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _timestamp:
+ enabled: 1
+ store: yes
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+# blank timestamp
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _timestamp
+
+ - is_true: fields._timestamp
+
+# milliseconds since epoch
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+ timestamp: 1372011280000
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _timestamp
+
+ - match: { fields._timestamp: 1372011280000 }
+
+# date format
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+ timestamp: 2013-06-23T18:14:40
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _timestamp
+
+ - match: { fields._timestamp: 1372011280000 }
+
diff --git a/rest-api-spec/test/update/75_ttl.yaml b/rest-api-spec/test/update/75_ttl.yaml
new file mode 100644
index 0000000..2075c6b
--- /dev/null
+++ b/rest-api-spec/test/update/75_ttl.yaml
@@ -0,0 +1,96 @@
+---
+"TTL":
+
+ - skip:
+ features: gtelte
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _ttl:
+ enabled: 1
+ store: yes
+ default: 10s
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+# blank ttl
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _ttl
+
+ - lte: { fields._ttl: 10000}
+ - gt: { fields._ttl: 0}
+
+# milliseconds
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+ ttl: 100000
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _ttl
+
+ - lte: { fields._ttl: 100000}
+ - gt: { fields._ttl: 10000}
+
+# duration
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+ ttl: 20s
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ fields: _ttl
+
+ - lte: { fields._ttl: 20000}
+ - gt: { fields._ttl: 10000}
+
+# with timestamp
+
+ - do:
+ catch: /AlreadyExpiredException/
+ index:
+ index: test_1
+ type: test
+ id: 1
+ body: { foo: bar }
+ ttl: 20s
+ timestamp: 2013-06-23T18:14:40
+
diff --git a/rest-api-spec/test/update/80_fields.yaml b/rest-api-spec/test/update/80_fields.yaml
new file mode 100644
index 0000000..86d6afa
--- /dev/null
+++ b/rest-api-spec/test/update/80_fields.yaml
@@ -0,0 +1,20 @@
+---
+"Fields":
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ fields: foo,bar,_source
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - match: { get._source.foo: bar }
+ - match: { get.fields.foo: [bar] }
+ - is_false: get.fields.bar
+
+# TODO:
+#
+# - Add _routing
diff --git a/rest-api-spec/test/update/85_fields_meta.yaml b/rest-api-spec/test/update/85_fields_meta.yaml
new file mode 100644
index 0000000..97cefa1
--- /dev/null
+++ b/rest-api-spec/test/update/85_fields_meta.yaml
@@ -0,0 +1,51 @@
+---
+"Metadata Fields":
+
+ - skip:
+ version: "0 - 999"
+ reason: "Update doesn't return metadata fields, waiting for #3259"
+
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ mappings:
+ test:
+ _parent: { type: "foo" }
+ _timestamp:
+ enabled: 1
+ store: yes
+ _ttl:
+ enabled: 1
+ store: yes
+ default: 10s
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ fields: [ _parent, _routing, _timestamp, _ttl ]
+ body:
+ doc: { foo: baz }
+ upsert: { foo: bar }
+
+ - match: { get.fields._parent: "5" }
+ - match: { get.fields._routing: "5" }
+ - is_true: get.fields._timestamp
+ - is_true: get.fields._ttl
+
+ - do:
+ get:
+ index: test_1
+ type: test
+ id: 1
+ parent: 5
+ fields: [ _parent, _routing, _timestamp, _ttl ]
+
+
diff --git a/rest-api-spec/test/update/90_missing.yaml b/rest-api-spec/test/update/90_missing.yaml
new file mode 100644
index 0000000..d51c54c
--- /dev/null
+++ b/rest-api-spec/test/update/90_missing.yaml
@@ -0,0 +1,42 @@
+---
+"Missing document (partial doc)":
+
+ - do:
+ catch: missing
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body: { doc: { foo: bar } }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body: { doc: { foo: bar } }
+ ignore: 404
+
+---
+"Missing document (script)":
+
+
+ - do:
+ catch: missing
+ update:
+ index: test_1
+ type: test
+ id: 1
+ body:
+ script: "ctx._source.foo = bar"
+ params: { bar: 'xxx' }
+
+ - do:
+ update:
+ index: test_1
+ type: test
+ id: 1
+ ignore: 404
+ body:
+ script: "ctx._source.foo = bar"
+ params: { bar: 'xxx' }
diff --git a/rest-api-spec/test/update/TODO.txt b/rest-api-spec/test/update/TODO.txt
new file mode 100644
index 0000000..6eefbfd
--- /dev/null
+++ b/rest-api-spec/test/update/TODO.txt
@@ -0,0 +1,7 @@
+Tests missing for:
+
+# consistency
+# percolate
+# replication
+# retry_on_conflict
+# timeout \ No newline at end of file
diff --git a/rest-api-spec/utils/Gemfile b/rest-api-spec/utils/Gemfile
new file mode 100644
index 0000000..313d822
--- /dev/null
+++ b/rest-api-spec/utils/Gemfile
@@ -0,0 +1,9 @@
+source 'https://rubygems.org'
+
+gem 'activesupport'
+gem 'rest-client'
+gem 'coderay'
+gem 'multi_json'
+gem 'thor'
+gem 'json'
+gem 'pry'
diff --git a/rest-api-spec/utils/Thorfile b/rest-api-spec/utils/Thorfile
new file mode 100644
index 0000000..d0d8190
--- /dev/null
+++ b/rest-api-spec/utils/Thorfile
@@ -0,0 +1,3 @@
+require File.expand_path('./thor/generate_api')
+require File.expand_path('./thor/generate_source')
+require File.expand_path('./thor/lister')
diff --git a/rest-api-spec/utils/thor/generate_api.rb b/rest-api-spec/utils/thor/generate_api.rb
new file mode 100644
index 0000000..8755bef
--- /dev/null
+++ b/rest-api-spec/utils/thor/generate_api.rb
@@ -0,0 +1,189 @@
+require 'thor'
+
+require 'pathname'
+require 'active_support/core_ext/hash/deep_merge'
+require 'active_support/inflector/methods'
+require 'rest_client'
+require 'json'
+require 'pry'
+
+module Elasticsearch
+
+ module API
+ module Utils
+ # controller.registerHandler(RestRequest.Method.GET, "/_cluster/health", this);
+ PATTERN_REST = /.*controller.registerHandler\(.*(?<method>GET|POST|PUT|DELETE|HEAD|OPTIONS|PATCH)\s*,\s*"(?<url>.*)"\s*,\s*.+\);/
+ # request.param("index"), request.paramAsBoolean("docs", indicesStatsRequest.docs()), etc
+ PATTERN_URL_PARAMS = /request.param.*\("(?<param>[a-z_]+)".*/
+ # controller.registerHandler(GET, "/{index}/_refresh", this)
+ PATTERN_URL_PARTS = /\{(?<part>[a-zA-Z0-9\_\-]+)\}/
+ # request.hasContent()
+ PATTERN_HAS_BODY = /request\.hasContent()/
+
+ # Parses the Elasticsearch source code and returns a Hash of REST API information/specs.
+ #
+ # Example:
+ #
+ # {
+ # "cluster.health" => [
+ # { "method" => "GET",
+ # "path" => "/_cluster/health",
+ # "parts" => ["index"],
+ # "params" => ["index", "local", ... ],
+ # "body" => false
+ # }
+ #
+ def __parse_java_source(path)
+ path += '/' unless path =~ /\/$/ # Add trailing slash if missing
+ prefix = "src/main/java/org/elasticsearch/rest/action"
+
+ java_rest_files = Dir["#{path}#{prefix}/**/*.java"]
+
+ map = {}
+
+ java_rest_files.sort.each do |file|
+ content = File.read(file)
+ parts = file.gsub(path+prefix, '').split('/')
+ name = parts[0, parts.size-1].reject { |p| p =~ /^\s*$/ }.join('.')
+
+ # Remove the `admin` namespace
+ name.gsub! /admin\./, ''
+
+ # Extract params
+ url_params = content.scan(PATTERN_URL_PARAMS).map { |n| n.first }.sort
+
+ # Extract parts
+ url_parts = content.scan(PATTERN_URL_PARTS).map { |n| n.first }.sort
+
+ # Extract if body allowed
+ has_body = !!content.match(PATTERN_HAS_BODY)
+
+ # Extract HTTP method and path
+ content.scan(PATTERN_REST) do |method, path|
+ (map[name] ||= []) << { 'method' => method,
+ 'path' => path,
+ 'parts' => url_parts,
+ 'params' => url_params,
+ 'body' => has_body }
+ end
+
+ end
+
+ map
+ end
+
+ extend self
+ end
+
+ # Contains a generator which will parse the Elasticsearch *.java source files,
+ # extract information about REST API endpoints (URLs, HTTP methods, URL parameters, etc),
+ # and create a skeleton of the JSON API specification file for each endpoint.
+ #
+ # Usage:
+ #
+ # $ thor help api:generate:spec
+ #
+ # Example:
+ #
+ # time thor api:generate:spec \
+ # --force \
+ # --verbose \
+ # --crawl \
+ # --elasticsearch=/path/to/elasticsearch/source/code
+ #
+ # Features:
+ #
+ # * Extract the API name from the source filename (eg. `admin/cluster/health/RestClusterHealthAction.java` -> `cluster.health`)
+ # * Extract the URLs from the `registerHandler` statements
+ # * Extract the URL parts (eg. `{index}`) from the URLs
+ # * Extract the URL parameters (eg. `{timeout}`) from the `request.param("ABC")` statements
+ # * Detect whether HTTP body is allowed for the API from `request.hasContent()` statements
+ # * Search the <http://elasticsearch.org> website to get proper documentation URLs
+ # * Assemble the JSON format for the API spec
+ #
+ class JsonGenerator < Thor
+ namespace 'api:spec'
+
+ include Thor::Actions
+
+ __root = Pathname( File.expand_path('../../..', __FILE__) )
+
+ # Usage: thor help api:generate:spec
+ #
+ desc "generate", "Generate JSON API spec files from Elasticsearch source code"
+ method_option :force, type: :boolean, default: false, desc: 'Overwrite the output'
+ method_option :verbose, type: :boolean, default: false, desc: 'Output more information'
+ method_option :output, default: __root.join('tmp/out'), desc: 'Path to output directory'
+ method_option :elasticsearch, default: __root.join('tmp/elasticsearch'), desc: 'Path to directory with Elasticsearch source code'
+ method_option :crawl, type: :boolean, default: false, desc: 'Extract URLs from Elasticsearch website'
+
+ def generate
+ self.class.source_root File.expand_path('../', __FILE__)
+
+ @output = Pathname(options[:output])
+
+ rest_actions = Utils.__parse_java_source(options[:elasticsearch].to_s)
+
+ if rest_actions.empty?
+ say_status 'ERROR', 'Cannot find Elasticsearch source in ' + options[:elasticsearch].to_s, :red
+ exit(1)
+ end
+
+ rest_actions.each do |name, info|
+ doc_url = ""
+ parts = info.reduce([]) { |sum, n| sum |= n['parts']; sum }.reduce({}) { |sum, n| sum[n] = {}; sum }
+ params = info.reduce([]) { |sum, n| sum |= n['params']; sum }.reduce({}) { |sum, n| sum[n] = {}; sum }
+
+ if options[:crawl]
+ begin
+ response = RestClient.get "http://search.elasticsearch.org/elastic-search-website/guide/_search?q=#{URI.escape(name.gsub(/\./, ' '))}"
+ hits = JSON.load(response)['hits']['hits']
+ if hit = hits.first
+ if hit['_score'] > 0.2
+ doc_title = hit['fields']['title']
+ doc_url = "http://elasticsearch.org" + hit['fields']['url']
+ end
+ end
+ rescue Exception => e
+ puts "[!] ERROR: #{e.inspect}"
+ end
+ end
+
+ spec = {
+ name => {
+ 'documentation' => doc_url,
+
+ 'methods' => info.map { |n| n['method'] }.uniq,
+
+ 'url' => {
+ 'path' => info.first['path'],
+ 'paths' => info.map { |n| n['path'] }.uniq,
+ 'parts' => parts,
+ 'params' => params
+ },
+
+ 'body' => info.first['body'] ? {} : nil
+ }
+ }
+
+ json = JSON.pretty_generate(spec, indent: ' ', array_nl: '', object_nl: "\n", space: ' ', space_before: ' ')
+
+ # Fix JSON array formatting
+ json.gsub!(/\[\s+/, '[')
+ json.gsub!(/, {2,}"/, ', "')
+
+ create_file @output.join( "#{name}.json" ), json + "\n"
+
+ if options[:verbose]
+ lines = json.split("\n")
+ say_status 'JSON',
+ lines.first + "\n" + lines[1, lines.size].map { |l| ' '*14 + l }.join("\n")
+ end
+ end
+ end
+
+ private
+
+ end
+ end
+end
diff --git a/rest-api-spec/utils/thor/generate_source.rb b/rest-api-spec/utils/thor/generate_source.rb
new file mode 100644
index 0000000..2002da1
--- /dev/null
+++ b/rest-api-spec/utils/thor/generate_source.rb
@@ -0,0 +1,117 @@
+# encoding: UTF-8
+
+require 'thor'
+
+require 'pathname'
+require 'active_support/core_ext/hash/deep_merge'
+require 'active_support/inflector'
+require 'multi_json'
+require 'coderay'
+require 'pry'
+
+module Elasticsearch
+
+ module API
+
+ # A command line application based on [Thor](https://github.com/wycats/thor),
+ # which will read the JSON API spec file(s), and generate
+ # the Ruby source code (one file per API endpoint) with correct
+ # module namespace, method names, and RDoc documentation,
+ # as well as test files for each endpoint.
+ #
+ # Currently it only generates Ruby source, but can easily be
+ # extended and adapted to generate source code for other
+ # programming languages.
+ #
+ class SourceGenerator < Thor
+ namespace 'api:code'
+
+ include Thor::Actions
+
+ __root = Pathname( File.expand_path('../../..', __FILE__) )
+
+ desc "generate <PATH TO JSON SPEC FILES>", "Generate source code and tests from the REST API JSON specification"
+ method_option :language, default: 'ruby', desc: 'Programming language'
+ method_option :force, type: :boolean, default: false, desc: 'Overwrite the output'
+ method_option :verbose, type: :boolean, default: false, desc: 'Output more information'
+ method_option :input, default: File.expand_path('../../../api-spec', __FILE__), desc: 'Path to directory with JSON API specs'
+ method_option :output, default: File.expand_path('../../../tmp/out', __FILE__), desc: 'Path to output directory'
+
+ def generate(*files)
+ self.class.source_root File.expand_path('../', __FILE__)
+
+ @input = Pathname(options[:input])
+ @output = Pathname(options[:output])
+
+ # -- Test helper
+ copy_file "templates/ruby/test_helper.rb", @output.join('test').join('test_helper.rb') if options[:language] == 'ruby'
+
+ files.each do |file|
+ @path = Pathname(file)
+ @json = MultiJson.load( File.read(@path) )
+ @spec = @json.values.first
+ say_status 'json', @path, :yellow
+ # say_status 'JSON', @spec.inspect, options[:verbose]
+
+ @full_namespace = @json.keys.first.split('.')
+ @namespace_depth = @full_namespace.size > 0 ? @full_namespace.size-1 : 0
+ @module_namespace = @full_namespace[0, @namespace_depth]
+ @method_name = @full_namespace.last
+
+ # -- Ruby files
+
+ @path_to_file = @output.join('api').join( @module_namespace.join('/') ).join("#{@method_name}.rb")
+
+ empty_directory @output.join('api').join( @module_namespace.join('/') )
+
+ template "templates/#{options[:language]}/method.erb", @path_to_file
+
+ if options[:verbose]
+ colorized_output = CodeRay.scan_file(@path_to_file, :ruby).terminal
+ lines = colorized_output.split("\n")
+ say_status options[:language].downcase,
+ lines.first + "\n" + lines[1, lines.size].map { |l| ' '*14 + l }.join("\n"),
+ :yellow
+ end
+
+ # --- Test files
+
+ @test_directory = @output.join('test/api').join( @module_namespace.join('/') )
+ @test_file = @test_directory.join("#{@method_name}_test.rb")
+
+ empty_directory @test_directory
+ template "templates/#{options[:language]}/test.erb", @test_file
+
+ if options[:verbose]
+ colorized_output = colorized_output = CodeRay.scan_file(@test_file, :ruby).terminal
+ lines = colorized_output.split("\n")
+ say_status options[:language].downcase,
+ lines.first + "\n" + lines[1, lines.size].map { |l| ' '*14 + l }.join("\n"),
+ :yellow
+ say '▬'*terminal_width
+ end
+ end
+
+ # -- Tree output
+
+ if options[:verbose] && `which tree > /dev/null 2>&1`
+ lines = `tree #{@output}`.split("\n")
+ say_status 'tree',
+ lines.first + "\n" + lines[1, lines.size].map { |l| ' '*14 + l }.join("\n")
+ end
+ end
+
+ private
+
+ # Create the hierarchy of directories based on API namespaces
+ #
+ def __create_directories(key, value)
+ unless value['documentation']
+ empty_directory @output.join(key)
+ create_directory_hierarchy *value.to_a.first
+ end
+ end
+
+ end
+ end
+end
diff --git a/rest-api-spec/utils/thor/lister.rb b/rest-api-spec/utils/thor/lister.rb
new file mode 100644
index 0000000..2402494
--- /dev/null
+++ b/rest-api-spec/utils/thor/lister.rb
@@ -0,0 +1,41 @@
+# encoding: UTF-8
+
+require 'thor'
+
+require 'pathname'
+
+module Elasticsearch
+
+ module API
+
+ class Lister < Thor
+ namespace 'api'
+
+ desc "list <PATH DIRECTORY WITH JSON SPEC FILES>", "List all the REST API endpoints from the JSON specification"
+ method_option :verbose, type: :boolean, default: false, desc: 'Output more information'
+ method_option :format, default: 'text', desc: 'Output format (text, json)'
+ def list(directory)
+ input = Pathname(directory).join('*.json')
+ apis = Dir[input.to_s].map do |f|
+ File.basename(f, '.json')
+ end.sort
+
+ if options[:verbose]
+ say_status 'Count', apis.size
+ say '▬'*terminal_width
+ end
+
+ case options[:format]
+ when 'text'
+ apis.each { |a| puts "* #{a}" }
+ when 'json'
+ puts apis.inspect
+ else
+ puts "[!] ERROR: Unknown output format '#{options[:format]}'"
+ exit(1)
+ end
+ end
+ end
+
+ end
+end
diff --git a/rest-api-spec/utils/thor/templates/ruby/method.erb b/rest-api-spec/utils/thor/templates/ruby/method.erb
new file mode 100644
index 0000000..7f13d2f
--- /dev/null
+++ b/rest-api-spec/utils/thor/templates/ruby/method.erb
@@ -0,0 +1,64 @@
+module Elasticsearch
+ module API
+ <%- @module_namespace.each_with_index do |name, i| -%>
+ <%= ' '*i %>module <%= name.capitalize %>
+ <%- end -%>
+ <%= ' '*@namespace_depth %>module Actions
+
+ <%= ' '*@namespace_depth %># <%= @spec['description'] || 'TODO: Description' %>
+ <%= ' '*@namespace_depth %>#
+<%# URL parts -%>
+ <%- @spec['url']['parts'].each do |name,info| -%>
+ <%- info['type'] = 'String' if info['type'] == 'enum' # Rename 'enums' to 'strings' -%>
+ <%= ' '*@namespace_depth + "# @option arguments [#{info['type'] ? info['type'].capitalize : 'String'}] :#{name} #{info['description']}" + ( info['required'] ? ' (*Required*)' : '' ) -%><%= " (options: #{info['options'].join(', ')})" if info['options'] -%>
+ <%= "\n" -%>
+ <%- end -%>
+<%# Body -%>
+<%= ' '*(@namespace_depth+3) + '# @option arguments [Hash] :body ' + (@spec['body']['description'] || 'TODO: Description') + (@spec['body']['required'] ? ' (*Required*)' : '') + "\n" if @spec['body'] -%>
+<%# URL parameters -%>
+ <%- @spec['url']['params'].each do |name,info| -%>
+ <%- info['type'] = 'String' if info['type'] == 'enum' # Rename 'enums' to 'strings' -%>
+ <%= ' '*@namespace_depth + "# @option arguments [#{info['type'] ? info['type'].capitalize : 'String'}] :#{name} #{info['description']}" -%><%= " (options: #{info['options'].join(', ')})" if info['options'] -%>
+ <%= "\n" -%>
+ <%- end -%>
+ <%= ' '*@namespace_depth -%>#
+<%# Documentation link -%>
+ <%= ' '*@namespace_depth %># @see <%= @spec['documentation'] %>
+ <%= ' '*@namespace_depth %>#
+<%# Method definition -%>
+ <%= ' '*@namespace_depth -%>def <%= @method_name %>(arguments={})
+<%# Required arguments -%>
+ <%- @spec['url']['parts'].select { |name, info| info['required'] }.each do |name, info| -%>
+ <%= ' '*(@namespace_depth+1) + "raise ArgumentError, \"Required argument '#{name}' missing\" unless arguments[:#{name}]" + "\n" -%>
+ <%- end -%>
+<%- if @spec['body'] && @spec['body']['required'] -%>
+ <%= ' '*(@namespace_depth+1) + "raise ArgumentError, \"Required argument 'body' missing\" unless arguments[:body]" + "\n" -%>
+<%- end -%>
+<%# Method, path, params, body -%>
+ <%= ' '*@namespace_depth %> method = '<%= @spec['methods'].first %>'
+ <%- unless @spec['url']['parts'].empty? -%>
+ <%= ' '*@namespace_depth %> path = "<%= @spec['url']['path'].split('/').compact.reject {|p| p =~ /^\s*$/}.map do |p|
+ p =~ /\{/ ? "\#\{arguments[:#{p.tr('{}', '')}]\}" : p
+ end.join('/') %>"
+ <%- else -%>
+ <%= ' '*@namespace_depth %> path = "<%= @spec['url']['path'] %>"
+ <%- end -%>
+ <%- unless @spec['url']['params'].keys.empty? -%>
+ <%= ' '*@namespace_depth %> params = arguments.select do |k,v|
+ <%= ' '*@namespace_depth %> [ :<%= @spec['url']['params'].keys.join(",\n#{' '*(@namespace_depth+6)}:") %> ].include?(k)
+ <%= ' '*@namespace_depth %> end
+ <%= ' '*@namespace_depth %> # Normalize Ruby 1.8 and Ruby 1.9 Hash#select behaviour
+ <%= ' '*@namespace_depth %> params = Hash[params] unless params.is_a?(Hash)
+ <%- else -%>
+ <%= ' '*@namespace_depth %> params = {}
+ <%- end -%>
+ <%= ' '*@namespace_depth %> body = <%= @spec['body'].nil? ? 'nil' : 'arguments[:body]' %>
+<%# Perform request %>
+ <%= ' '*@namespace_depth %> perform_request(method, path, params, body).body
+ <%= ' '*@namespace_depth %>end
+ <%- @namespace_depth.downto(1) do |i| -%>
+ <%= ' '*(i-1) %>end
+ <%- end if @namespace_depth > 0 -%>
+ end
+ end
+end
diff --git a/rest-api-spec/utils/thor/templates/ruby/test.erb b/rest-api-spec/utils/thor/templates/ruby/test.erb
new file mode 100644
index 0000000..0825d81
--- /dev/null
+++ b/rest-api-spec/utils/thor/templates/ruby/test.erb
@@ -0,0 +1,26 @@
+require 'test_helper'
+
+module Elasticsearch
+ module Test
+ class <%= @module_namespace.empty? ? @method_name.camelize : @module_namespace.map {|n| n.capitalize}.join + @method_name.camelize %>Test < ::Test::Unit::TestCase
+
+ context "<%= @module_namespace.empty? ? '' : @module_namespace.map {|n| n.capitalize}.join + ': ' %><%= @method_name.humanize %>" do
+ subject { FakeClient.new(nil) }
+
+ should "perform correct request" do
+ subject.expects(:perform_request).with do |method, url, params, body|
+ assert_equal 'FAKE', method
+ assert_equal 'test', url
+ assert_equal Hash.new, params
+ <%= @spec['body'].nil? ? 'assert_nil body' : 'assert_equal Hash.new, body' %>
+ true
+ end.returns(FakeResponse.new)
+
+ subject.<%= @full_namespace.join('.') %>
+ end
+
+ end
+
+ end
+ end
+end
diff --git a/rest-api-spec/utils/thor/templates/ruby/test_helper.rb b/rest-api-spec/utils/thor/templates/ruby/test_helper.rb
new file mode 100644
index 0000000..6b822a1
--- /dev/null
+++ b/rest-api-spec/utils/thor/templates/ruby/test_helper.rb
@@ -0,0 +1,54 @@
+RUBY_1_8 = defined?(RUBY_VERSION) && RUBY_VERSION < '1.9'
+
+require 'rubygems' if RUBY_1_8
+
+require 'simplecov' and SimpleCov.start { add_filter "/test|test_/" } if ENV["COVERAGE"]
+
+require 'test/unit'
+require 'shoulda-context'
+require 'mocha/setup'
+require 'turn' unless ENV["TM_FILEPATH"] || ENV["NOTURN"] || RUBY_1_8
+
+require 'require-prof' if ENV["REQUIRE_PROF"]
+Dir[ File.expand_path('../../lib/elasticsearch/api/**/*.rb', __FILE__) ].each do |f|
+ puts 'Loading: ' + f.to_s if ENV['DEBUG']
+ require f
+end
+RequireProf.print_timing_infos if ENV["REQUIRE_PROF"]
+
+module Elasticsearch
+ module Test
+ def __full_namespace(o)
+ o.constants.inject([o]) do |sum, c|
+ m = o.const_get(c.to_s.to_sym)
+ sum << __full_namespace(m).flatten if m.is_a?(Module)
+ sum
+ end.flatten
+ end; module_function :__full_namespace
+
+ class FakeClient
+ # Include all "Actions" modules into the fake client
+ Elasticsearch::Test.__full_namespace(Elasticsearch::API).select { |m| m.to_s =~ /Actions$/ }.each do |m|
+ puts "Including: #{m}" if ENV['DEBUG']
+ include m
+ end
+
+ def perform_request(method, path, params, body)
+ puts "PERFORMING REQUEST:", "--> #{method.to_s.upcase} #{path} #{params} #{body}"
+ FakeResponse.new(200, 'FAKE', {})
+ end
+ end
+
+ FakeResponse = Struct.new(:status, :body, :headers) do
+ def status
+ values[0] || 200
+ end
+ def body
+ values[1] || '{}'
+ end
+ def headers
+ values[2] || {}
+ end
+ end
+ end
+end
diff --git a/src/deb/control/conffiles b/src/deb/control/conffiles
new file mode 100644
index 0000000..98e0f5a
--- /dev/null
+++ b/src/deb/control/conffiles
@@ -0,0 +1,4 @@
+/etc/init.d/elasticsearch
+/etc/default/elasticsearch
+/etc/elasticsearch/logging.yml
+/etc/elasticsearch/elasticsearch.yml
diff --git a/src/deb/control/control b/src/deb/control/control
new file mode 100644
index 0000000..b6207b2
--- /dev/null
+++ b/src/deb/control/control
@@ -0,0 +1,38 @@
+Package: elasticsearch
+Version: [[version]]
+Architecture: all
+Maintainer: Elasticsearch Team <info@elasticsearch.com>
+Depends: libc6, adduser
+Section: web
+Priority: optional
+Homepage: http://www.elasticsearch.org/
+Description: Open Source, Distributed, RESTful Search Engine
+ Elasticsearch is a distributed RESTful search engine built for the cloud.
+ .
+ Features include:
+ .
+ + Distributed and Highly Available Search Engine.
+ - Each index is fully sharded with a configurable number of shards.
+ - Each shard can have one or more replicas.
+ - Read / Search operations performed on either one of the replica shard.
+ + Multi Tenant with Multi Types.
+ - Support for more than one index.
+ - Support for more than one type per index.
+ - Index level configuration (number of shards, index storage, ...).
+ + Various set of APIs
+ - HTTP RESTful API
+ - Native Java API.
+ - All APIs perform automatic node operation rerouting.
+ + Document oriented
+ - No need for upfront schema definition.
+ - Schema can be defined per type for customization of the indexing process.
+ + Reliable, Asynchronous Write Behind for long term persistency.
+ + (Near) Real Time Search.
+ + Built on top of Lucene
+ - Each shard is a fully functional Lucene index
+ - All the power of Lucene easily exposed through simple
+ configuration/plugins.
+ + Per operation consistency
+ - Single document level operations are atomic, consistent, isolated and
+ durable.
+ + Open Source under Apache 2 License.
diff --git a/src/deb/control/postinst b/src/deb/control/postinst
new file mode 100755
index 0000000..166db9c
--- /dev/null
+++ b/src/deb/control/postinst
@@ -0,0 +1,52 @@
+#!/bin/sh
+set -e
+
+[ -f /etc/default/elasticsearch ] && . /etc/default/elasticsearch
+
+startElasticsearch() {
+ if [ -x "/etc/init.d/elasticsearch" ]; then
+ if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then
+ invoke-rc.d elasticsearch start || true
+ else
+ /etc/init.d/elasticsearch start || true
+ fi
+ fi
+}
+
+case "$1" in
+ configure)
+ [ -z "$ES_USER" ] && ES_USER="elasticsearch"
+ [ -z "$ES_GROUP" ] && ES_GROUP="elasticsearch"
+ if ! getent group "$ES_GROUP" > /dev/null 2>&1 ; then
+ addgroup --system "$ES_GROUP" --quiet
+ fi
+ if ! id $ES_USER > /dev/null 2>&1 ; then
+ adduser --system --home /usr/share/elasticsearch --no-create-home \
+ --ingroup "$ES_GROUP" --disabled-password --shell /bin/false \
+ "$ES_USER"
+ fi
+
+ # Set user permissions on /var/log/elasticsearch and /var/lib/elasticsearch
+ mkdir -p /var/log/elasticsearch /var/lib/elasticsearch
+ chown -R $ES_USER:$ES_GROUP /var/log/elasticsearch /var/lib/elasticsearch
+ chmod 755 /var/log/elasticsearch /var/lib/elasticsearch
+
+ # configuration files should not be modifiable by elasticsearch user, as this can be a security issue
+ chown -Rh root:root /etc/elasticsearch/*
+ chmod 755 /etc/elasticsearch
+ find /etc/elasticsearch -type f -exec chmod 644 {} ';'
+ find /etc/elasticsearch -type d -exec chmod 755 {} ';'
+
+ # if $2 is set, this is an upgrade
+ if ( [ -n $2 ] && [ "$RESTART_ON_UPGRADE" = "true" ] ) ; then
+ startElasticsearch
+ # this is a fresh installation
+ elif [ -z $2 ] ; then
+ echo "### NOT starting elasticsearch by default on bootup, please execute"
+ echo " sudo update-rc.d elasticsearch defaults 95 10"
+ echo "### In order to start elasticsearch, execute"
+ echo " sudo /etc/init.d/elasticsearch start"
+ fi
+ ;;
+esac
+
diff --git a/src/deb/control/postrm b/src/deb/control/postrm
new file mode 100755
index 0000000..59b6048
--- /dev/null
+++ b/src/deb/control/postrm
@@ -0,0 +1,33 @@
+#!/bin/sh
+set -e
+
+case "$1" in
+ remove)
+ # Remove logs
+ rm -rf /var/log/elasticsearch
+
+ # remove **only** empty data dir
+ rmdir -p --ignore-fail-on-non-empty /var/lib/elasticsearch
+ ;;
+
+ purge)
+ # Remove service
+ update-rc.d elasticsearch remove >/dev/null || true
+
+ # Remove logs and data
+ rm -rf /var/log/elasticsearch /var/lib/elasticsearch
+
+ # Remove user/group
+ deluser elasticsearch || true
+ delgroup elasticsearch || true
+ ;;
+
+ upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
+ # Nothing to do here
+ ;;
+
+ *)
+ echo "$0 called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
diff --git a/src/deb/control/prerm b/src/deb/control/prerm
new file mode 100755
index 0000000..fb3c9eb
--- /dev/null
+++ b/src/deb/control/prerm
@@ -0,0 +1,26 @@
+#!/bin/sh
+set -e
+
+[ -f /etc/default/elasticsearch ] && . /etc/default/elasticsearch
+
+stopElasticsearch() {
+ if [ -x "/etc/init.d/elasticsearch" ]; then
+ if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then
+ invoke-rc.d elasticsearch stop || true
+ else
+ /etc/init.d/elasticsearch stop || true
+ fi
+ fi
+}
+
+case "$1" in
+ upgrade)
+ if [ "$RESTART_ON_UPGRADE" = "true" ] ; then
+ stopElasticsearch
+ fi
+ ;;
+ remove)
+ stopElasticsearch
+ ;;
+esac
+
diff --git a/src/deb/copyright b/src/deb/copyright
new file mode 100644
index 0000000..ac4d7b0
--- /dev/null
+++ b/src/deb/copyright
@@ -0,0 +1,17 @@
+Copyright 2013 Elasticsearch <info@elasticsearch.org>
+
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the complete text of the Apache version 2.0 license
+ can be found in "/usr/share/common-licenses/Apache-2.0".
diff --git a/src/deb/default/elasticsearch b/src/deb/default/elasticsearch
new file mode 100644
index 0000000..b2aecd8
--- /dev/null
+++ b/src/deb/default/elasticsearch
@@ -0,0 +1,44 @@
+# Run Elasticsearch as this user ID and group ID
+#ES_USER=elasticsearch
+#ES_GROUP=elasticsearch
+
+# Heap Size (defaults to 256m min, 1g max)
+#ES_HEAP_SIZE=2g
+
+# Heap new generation
+#ES_HEAP_NEWSIZE=
+
+# max direct memory
+#ES_DIRECT_SIZE=
+
+# Maximum number of open files, defaults to 65535.
+#MAX_OPEN_FILES=65535
+
+# Maximum locked memory size. Set to "unlimited" if you use the
+# bootstrap.mlockall option in elasticsearch.yml. You must also set
+# ES_HEAP_SIZE.
+#MAX_LOCKED_MEMORY=unlimited
+
+# Maximum number of VMA (Virtual Memory Areas) a process can own
+#MAX_MAP_COUNT=262144
+
+# Elasticsearch log directory
+#LOG_DIR=/var/log/elasticsearch
+
+# Elasticsearch data directory
+#DATA_DIR=/var/lib/elasticsearch
+
+# Elasticsearch work directory
+#WORK_DIR=/tmp/elasticsearch
+
+# Elasticsearch configuration directory
+#CONF_DIR=/etc/elasticsearch
+
+# Elasticsearch configuration file (elasticsearch.yml)
+#CONF_FILE=/etc/elasticsearch/elasticsearch.yml
+
+# Additional Java OPTS
+#ES_JAVA_OPTS=
+
+# Configure restart on package upgrade (true, every other setting will lead to not restarting)
+#RESTART_ON_UPGRADE=true
diff --git a/src/deb/init.d/elasticsearch b/src/deb/init.d/elasticsearch
new file mode 100755
index 0000000..bd48fe1
--- /dev/null
+++ b/src/deb/init.d/elasticsearch
@@ -0,0 +1,203 @@
+#!/bin/sh
+#
+# /etc/init.d/elasticsearch -- startup script for Elasticsearch
+#
+# Written by Miquel van Smoorenburg <miquels@cistron.nl>.
+# Modified for Debian GNU/Linux by Ian Murdock <imurdock@gnu.ai.mit.edu>.
+# Modified for Tomcat by Stefan Gybas <sgybas@debian.org>.
+# Modified for Tomcat6 by Thierry Carrez <thierry.carrez@ubuntu.com>.
+# Additional improvements by Jason Brittain <jason.brittain@mulesoft.com>.
+# Modified by Nicolas Huray for Elasticsearch <nicolas.huray@gmail.com>.
+#
+### BEGIN INIT INFO
+# Provides: elasticsearch
+# Required-Start: $network $remote_fs $named
+# Required-Stop: $network $remote_fs $named
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Starts elasticsearch
+# Description: Starts elasticsearch using start-stop-daemon
+### END INIT INFO
+
+PATH=/bin:/usr/bin:/sbin:/usr/sbin
+NAME=elasticsearch
+DESC="Elasticsearch Server"
+DEFAULT=/etc/default/$NAME
+
+if [ `id -u` -ne 0 ]; then
+ echo "You need root privileges to run this script"
+ exit 1
+fi
+
+
+. /lib/lsb/init-functions
+
+if [ -r /etc/default/rcS ]; then
+ . /etc/default/rcS
+fi
+
+
+# The following variables can be overwritten in $DEFAULT
+
+# Run Elasticsearch as this user ID and group ID
+ES_USER=elasticsearch
+ES_GROUP=elasticsearch
+
+# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined in $DEFAULT)
+JDK_DIRS="/usr/lib/jvm/java-7-oracle /usr/lib/jvm/java-7-openjdk /usr/lib/jvm/java-7-openjdk-amd64/ /usr/lib/jvm/java-7-openjdk-armhf /usr/lib/jvm/java-7-openjdk-i386/ /usr/lib/jvm/java-6-sun /usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-openjdk-armhf /usr/lib/jvm/java-6-openjdk-i386 /usr/lib/jvm/default-java"
+
+# Look for the right JVM to use
+for jdir in $JDK_DIRS; do
+ if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then
+ JAVA_HOME="$jdir"
+ fi
+done
+export JAVA_HOME
+
+# Directory where the Elasticsearch binary distribution resides
+ES_HOME=/usr/share/$NAME
+
+# Heap Size (defaults to 256m min, 1g max)
+#ES_HEAP_SIZE=2g
+
+# Heap new generation
+#ES_HEAP_NEWSIZE=
+
+# max direct memory
+#ES_DIRECT_SIZE=
+
+# Additional Java OPTS
+#ES_JAVA_OPTS=
+
+# Maximum number of open files
+MAX_OPEN_FILES=65535
+
+# Maximum amount of locked memory
+#MAX_LOCKED_MEMORY=
+
+# Elasticsearch log directory
+LOG_DIR=/var/log/$NAME
+
+# Elasticsearch data directory
+DATA_DIR=/var/lib/$NAME
+
+# Elasticsearch work directory
+WORK_DIR=/tmp/$NAME
+
+# Elasticsearch configuration directory
+CONF_DIR=/etc/$NAME
+
+# Elasticsearch configuration file (elasticsearch.yml)
+CONF_FILE=$CONF_DIR/elasticsearch.yml
+
+# Maximum number of VMA (Virtual Memory Areas) a process can own
+MAX_MAP_COUNT=262144
+
+# End of variables that can be overwritten in $DEFAULT
+
+# overwrite settings from default file
+if [ -f "$DEFAULT" ]; then
+ . "$DEFAULT"
+fi
+
+# Define other required variables
+PID_FILE=/var/run/$NAME.pid
+DAEMON=$ES_HOME/bin/elasticsearch
+DAEMON_OPTS="-d -p $PID_FILE -Des.default.config=$CONF_FILE -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.work=$WORK_DIR -Des.default.path.conf=$CONF_DIR"
+
+export ES_HEAP_SIZE
+export ES_HEAP_NEWSIZE
+export ES_DIRECT_SIZE
+export ES_JAVA_OPTS
+
+# Check DAEMON exists
+test -x $DAEMON || exit 0
+
+checkJava() {
+ if [ -x "$JAVA_HOME/bin/java" ]; then
+ JAVA="$JAVA_HOME/bin/java"
+ else
+ JAVA=`which java`
+ fi
+
+ if [ ! -x "$JAVA" ]; then
+ echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
+ exit 1
+ fi
+}
+
+case "$1" in
+ start)
+ checkJava
+
+ if [ -n "$MAX_LOCKED_MEMORY" -a -z "$ES_HEAP_SIZE" ]; then
+ log_failure_msg "MAX_LOCKED_MEMORY is set - ES_HEAP_SIZE must also be set"
+ exit 1
+ fi
+
+ log_daemon_msg "Starting $DESC"
+
+ pid=`pidofproc -p $PID_FILE elasticsearch`
+ if [ -n "$pid" ] ; then
+ log_begin_msg "Already running."
+ log_end_msg 0
+ exit 0
+ fi
+
+ # Prepare environment
+ mkdir -p "$LOG_DIR" "$DATA_DIR" "$WORK_DIR" && chown "$ES_USER":"$ES_GROUP" "$LOG_DIR" "$DATA_DIR" "$WORK_DIR"
+ touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE"
+
+ if [ -n "$MAX_OPEN_FILES" ]; then
+ ulimit -n $MAX_OPEN_FILES
+ fi
+
+ if [ -n "$MAX_LOCKED_MEMORY" ]; then
+ ulimit -l $MAX_LOCKED_MEMORY
+ fi
+
+ if [ -n "$MAX_MAP_COUNT" ]; then
+ sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
+ fi
+
+ # Start Daemon
+ start-stop-daemon --start -b --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS
+ log_end_msg $?
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESC"
+
+ if [ -f "$PID_FILE" ]; then
+ start-stop-daemon --stop --pidfile "$PID_FILE" \
+ --user "$ES_USER" \
+ --retry=TERM/20/KILL/5 >/dev/null
+ if [ $? -eq 1 ]; then
+ log_progress_msg "$DESC is not running but pid file exists, cleaning up"
+ elif [ $? -eq 3 ]; then
+ PID="`cat $PID_FILE`"
+ log_failure_msg "Failed to stop $DESC (pid $PID)"
+ exit 1
+ fi
+ rm -f "$PID_FILE"
+ else
+ log_progress_msg "(not running)"
+ fi
+ log_end_msg 0
+ ;;
+ status)
+ status_of_proc -p $PID_FILE elasticsearch elasticsearch && exit 0 || exit $?
+ ;;
+ restart|force-reload)
+ if [ -f "$PID_FILE" ]; then
+ $0 stop
+ sleep 1
+ fi
+ $0 start
+ ;;
+ *)
+ log_success_msg "Usage: $0 {start|stop|restart|force-reload|status}"
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/src/deb/lintian/elasticsearch b/src/deb/lintian/elasticsearch
new file mode 100644
index 0000000..0e66c77
--- /dev/null
+++ b/src/deb/lintian/elasticsearch
@@ -0,0 +1,8 @@
+# Ignore arch dependent warnings, we chose the right libs on start
+elasticsearch binary: arch-independent-package-contains-binary-or-object
+# Not stripping external libraries
+elasticsearch binary: unstripped-binary-or-object
+# Ignore arch dependent warnings, we chose the right libs on start
+elasticsearch binary: arch-dependent-file-in-usr-share
+# Please check our changelog at http://www.elasticsearch.org/downloads/
+elasticsearch binary: changelog-file-missing-in-native-package
diff --git a/src/main/assemblies/common-bin.xml b/src/main/assemblies/common-bin.xml
new file mode 100644
index 0000000..c4a238e
--- /dev/null
+++ b/src/main/assemblies/common-bin.xml
@@ -0,0 +1,45 @@
+<component>
+ <dependencySets>
+ <dependencySet>
+ <outputDirectory>/lib</outputDirectory>
+ <useTransitiveFiltering>true</useTransitiveFiltering>
+ <includes>
+ <include>org.apache.lucene:lucene*</include>
+ <include>log4j:log4j</include>
+ <include>net.java.dev.jna:jna</include>
+ <include>com.spatial4j:spatial4j</include>
+ <include>com.vividsolutions:jts</include>
+ </includes>
+ </dependencySet>
+ <dependencySet>
+ <outputDirectory>/lib</outputDirectory>
+ <useTransitiveDependencies>false</useTransitiveDependencies>
+ <includes>
+ <include>org.elasticsearch:elasticsearch</include>
+ </includes>
+ </dependencySet>
+ </dependencySets>
+ <fileSets>
+ <fileSet>
+ <directory>config</directory>
+ <outputDirectory>config</outputDirectory>
+ <includes>
+ <include>*</include>
+ </includes>
+ </fileSet>
+ </fileSets>
+ <files>
+ <file>
+ <source>README.textile</source>
+ <outputDirectory>/</outputDirectory>
+ </file>
+ <file>
+ <source>LICENSE.txt</source>
+ <outputDirectory>/</outputDirectory>
+ </file>
+ <file>
+ <source>NOTICE.txt</source>
+ <outputDirectory>/</outputDirectory>
+ </file>
+ </files>
+</component> \ No newline at end of file
diff --git a/src/main/assemblies/targz-bin.xml b/src/main/assemblies/targz-bin.xml
new file mode 100644
index 0000000..f587844
--- /dev/null
+++ b/src/main/assemblies/targz-bin.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<assembly>
+ <id>targz</id>
+ <formats>
+ <format>tar.gz</format>
+ </formats>
+
+ <includeBaseDirectory>true</includeBaseDirectory>
+
+ <fileSets>
+ <fileSet>
+ <directory>bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ <lineEnding>unix</lineEnding>
+ <filtered>true</filtered>
+ <includes>
+ <include>elasticsearch.in.sh</include>
+ <include>elasticsearch</include>
+ <include>plugin</include>
+ </includes>
+ </fileSet>
+
+ <fileSet>
+ <directory>lib/sigar</directory>
+ <outputDirectory>lib/sigar</outputDirectory>
+ <excludes>
+ <exclude>*.dll</exclude>
+ <exclude>**winnt**</exclude>
+ </excludes>
+ </fileSet>
+ </fileSets>
+
+ <componentDescriptors>
+ <componentDescriptor>src/main/assemblies/common-bin.xml</componentDescriptor>
+ </componentDescriptors>
+</assembly> \ No newline at end of file
diff --git a/src/main/assemblies/zip-bin.xml b/src/main/assemblies/zip-bin.xml
new file mode 100644
index 0000000..58f387f
--- /dev/null
+++ b/src/main/assemblies/zip-bin.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<assembly>
+ <id>zip</id>
+ <formats>
+ <format>zip</format>
+ </formats>
+
+ <includeBaseDirectory>true</includeBaseDirectory>
+
+ <fileSets>
+ <fileSet>
+ <filtered>true</filtered>
+ <directory>bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <lineEnding>dos</lineEnding>
+ <includes>
+ <include>elasticsearch.bat</include>
+ <include>plugin.bat</include>
+ <include>service.bat</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <filtered>false</filtered>
+ <directory>bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <includes>
+ <include>*.exe</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <filtered>true</filtered>
+ <directory>bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ <lineEnding>unix</lineEnding>
+ <includes>
+ <include>elasticsearch.in.sh</include>
+ <include>elasticsearch</include>
+ <include>plugin</include>
+ </includes>
+ </fileSet>
+
+ <fileSet>
+ <directory>lib/sigar</directory>
+ <outputDirectory>lib/sigar</outputDirectory>
+ <includes>
+ <include>*</include>
+ </includes>
+ </fileSet>
+ </fileSets>
+
+ <componentDescriptors>
+ <componentDescriptor>src/main/assemblies/common-bin.xml</componentDescriptor>
+ </componentDescriptors>
+</assembly> \ No newline at end of file
diff --git a/src/main/java/jsr166e/CompletableFuture.java b/src/main/java/jsr166e/CompletableFuture.java
new file mode 100644
index 0000000..2a9e0bb
--- /dev/null
+++ b/src/main/java/jsr166e/CompletableFuture.java
@@ -0,0 +1,3314 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+import jsr166y.ThreadLocalRandom;
+
+import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.LockSupport;
+
+/**
+ * A {@link Future} that may be explicitly completed (setting its
+ * value and status), and may include dependent functions and actions
+ * that trigger upon its completion.
+ *
+ * <p>When two or more threads attempt to
+ * {@link #complete complete},
+ * {@link #completeExceptionally completeExceptionally}, or
+ * {@link #cancel cancel}
+ * a CompletableFuture, only one of them succeeds.
+ *
+ * <p>Methods are available for adding dependents based on
+ * user-provided Functions, Actions, or Runnables. The appropriate
+ * form to use depends on whether actions require arguments and/or
+ * produce results. Completion of a dependent action will trigger the
+ * completion of another CompletableFuture. Actions may also be
+ * triggered after either or both the current and another
+ * CompletableFuture complete. Multiple CompletableFutures may also
+ * be grouped as one using {@link #anyOf(CompletableFuture...)} and
+ * {@link #allOf(CompletableFuture...)}.
+ *
+ * <p>CompletableFutures themselves do not execute asynchronously.
+ * However, actions supplied for dependent completions of another
+ * CompletableFuture may do so, depending on whether they are provided
+ * via one of the <em>async</em> methods (that is, methods with names
+ * of the form <tt><var>xxx</var>Async</tt>). The <em>async</em>
+ * methods provide a way to commence asynchronous processing of an
+ * action using either a given {@link Executor} or by default the
+ * {@link ForkJoinPool#commonPool()}. To simplify monitoring,
+ * debugging, and tracking, all generated asynchronous tasks are
+ * instances of the marker interface {@link AsynchronousCompletionTask}.
+ *
+ * <p>Actions supplied for dependent completions of <em>non-async</em>
+ * methods may be performed by the thread that completes the current
+ * CompletableFuture, or by any other caller of these methods. There
+ * are no guarantees about the order of processing completions unless
+ * constrained by these methods.
+ *
+ * <p>Since (unlike {@link FutureTask}) this class has no direct
+ * control over the computation that causes it to be completed,
+ * cancellation is treated as just another form of exceptional completion.
+ * Method {@link #cancel cancel} has the same effect as
+ * {@code completeExceptionally(new CancellationException())}.
+ *
+ * <p>Upon exceptional completion (including cancellation), or when a
+ * completion entails an additional computation which terminates
+ * abruptly with an (unchecked) exception or error, then all of their
+ * dependent completions (and their dependents in turn) generally act
+ * as {@code completeExceptionally} with a {@link CompletionException}
+ * holding that exception as its cause. However, the {@link
+ * #exceptionally exceptionally} and {@link #handle handle}
+ * completions <em>are</em> able to handle exceptional completions of
+ * the CompletableFutures they depend on.
+ *
+ * <p>In case of exceptional completion with a CompletionException,
+ * methods {@link #get()} and {@link #get(long, TimeUnit)} throw an
+ * {@link ExecutionException} with the same cause as held in the
+ * corresponding CompletionException. However, in these cases,
+ * methods {@link #join()} and {@link #getNow} throw the
+ * CompletionException, which simplifies usage.
+ *
+ * <p>Arguments used to pass a completion result (that is, for parameters
+ * of type {@code T}) may be null, but passing a null value for any other
+ * parameter will result in a {@link NullPointerException} being thrown.
+ *
+ * @author Doug Lea
+ */
+public class CompletableFuture<T> implements Future<T> {
+ // jsr166e nested interfaces
+
+ /** Interface describing a void action of one argument */
+ public interface Action<A> { void accept(A a); }
+ /** Interface describing a void action of two arguments */
+ public interface BiAction<A,B> { void accept(A a, B b); }
+ /** Interface describing a function of one argument */
+ public interface Fun<A,T> { T apply(A a); }
+ /** Interface describing a function of two arguments */
+ public interface BiFun<A,B,T> { T apply(A a, B b); }
+ /** Interface describing a function of no arguments */
+ public interface Generator<T> { T get(); }
+
+
+ /*
+ * Overview:
+ *
+ * 1. Non-nullness of field result (set via CAS) indicates done.
+ * An AltResult is used to box null as a result, as well as to
+ * hold exceptions. Using a single field makes completion fast
+ * and simple to detect and trigger, at the expense of a lot of
+ * encoding and decoding that infiltrates many methods. One minor
+ * simplification relies on the (static) NIL (to box null results)
+ * being the only AltResult with a null exception field, so we
+ * don't usually need explicit comparisons with NIL. The CF
+ * exception propagation mechanics surrounding decoding rely on
+ * unchecked casts of decoded results really being unchecked,
+ * where user type errors are caught at point of use, as is
+ * currently the case in Java. These are highlighted by using
+ * SuppressWarnings-annotated temporaries.
+ *
+ * 2. Waiters are held in a Treiber stack similar to the one used
+ * in FutureTask, Phaser, and SynchronousQueue. See their
+ * internal documentation for algorithmic details.
+ *
+ * 3. Completions are also kept in a list/stack, and pulled off
+ * and run when completion is triggered. (We could even use the
+ * same stack as for waiters, but would give up the potential
+ * parallelism obtained because woken waiters help release/run
+ * others -- see method postComplete). Because post-processing
+ * may race with direct calls, class Completion opportunistically
+ * extends AtomicInteger so callers can claim the action via
+ * compareAndSet(0, 1). The Completion.run methods are all
+ * written a boringly similar uniform way (that sometimes includes
+ * unnecessary-looking checks, kept to maintain uniformity).
+ * There are enough dimensions upon which they differ that
+ * attempts to factor commonalities while maintaining efficiency
+ * require more lines of code than they would save.
+ *
+ * 4. The exported then/and/or methods do support a bit of
+ * factoring (see doThenApply etc). They must cope with the
+ * intrinsic races surrounding addition of a dependent action
+ * versus performing the action directly because the task is
+ * already complete. For example, a CF may not be complete upon
+ * entry, so a dependent completion is added, but by the time it
+ * is added, the target CF is complete, so must be directly
+ * executed. This is all done while avoiding unnecessary object
+ * construction in safe-bypass cases.
+ */
+
+ // preliminaries
+
+ static final class AltResult {
+ final Throwable ex; // null only for NIL
+ AltResult(Throwable ex) { this.ex = ex; }
+ }
+
+ static final AltResult NIL = new AltResult(null);
+
+ // Fields
+
+ volatile Object result; // Either the result or boxed AltResult
+ volatile WaitNode waiters; // Treiber stack of threads blocked on get()
+ volatile CompletionNode completions; // list (Treiber stack) of completions
+
+ // Basic utilities for triggering and processing completions
+
+ /**
+ * Removes and signals all waiting threads and runs all completions.
+ */
+ final void postComplete() {
+ WaitNode q; Thread t;
+ while ((q = waiters) != null) {
+ if (UNSAFE.compareAndSwapObject(this, WAITERS, q, q.next) &&
+ (t = q.thread) != null) {
+ q.thread = null;
+ LockSupport.unpark(t);
+ }
+ }
+
+ CompletionNode h; Completion c;
+ while ((h = completions) != null) {
+ if (UNSAFE.compareAndSwapObject(this, COMPLETIONS, h, h.next) &&
+ (c = h.completion) != null)
+ c.run();
+ }
+ }
+
+ /**
+ * Triggers completion with the encoding of the given arguments:
+ * if the exception is non-null, encodes it as a wrapped
+ * CompletionException unless it is one already. Otherwise uses
+ * the given result, boxed as NIL if null.
+ */
+ final void internalComplete(T v, Throwable ex) {
+ if (result == null)
+ UNSAFE.compareAndSwapObject
+ (this, RESULT, null,
+ (ex == null) ? (v == null) ? NIL : v :
+ new AltResult((ex instanceof CompletionException) ? ex :
+ new CompletionException(ex)));
+ postComplete(); // help out even if not triggered
+ }
+
+ /**
+ * If triggered, helps release and/or process completions.
+ */
+ final void helpPostComplete() {
+ if (result != null)
+ postComplete();
+ }
+
+ /* ------------- waiting for completions -------------- */
+
+ /** Number of processors, for spin control */
+ static final int NCPU = Runtime.getRuntime().availableProcessors();
+
+ /**
+ * Heuristic spin value for waitingGet() before blocking on
+ * multiprocessors
+ */
+ static final int SPINS = (NCPU > 1) ? 1 << 8 : 0;
+
+ /**
+ * Linked nodes to record waiting threads in a Treiber stack. See
+ * other classes such as Phaser and SynchronousQueue for more
+ * detailed explanation. This class implements ManagedBlocker to
+ * avoid starvation when blocking actions pile up in
+ * ForkJoinPools.
+ */
+ static final class WaitNode implements ForkJoinPool.ManagedBlocker {
+ long nanos; // wait time if timed
+ final long deadline; // non-zero if timed
+ volatile int interruptControl; // > 0: interruptible, < 0: interrupted
+ volatile Thread thread;
+ volatile WaitNode next;
+ WaitNode(boolean interruptible, long nanos, long deadline) {
+ this.thread = Thread.currentThread();
+ this.interruptControl = interruptible ? 1 : 0;
+ this.nanos = nanos;
+ this.deadline = deadline;
+ }
+ public boolean isReleasable() {
+ if (thread == null)
+ return true;
+ if (Thread.interrupted()) {
+ int i = interruptControl;
+ interruptControl = -1;
+ if (i > 0)
+ return true;
+ }
+ if (deadline != 0L &&
+ (nanos <= 0L || (nanos = deadline - System.nanoTime()) <= 0L)) {
+ thread = null;
+ return true;
+ }
+ return false;
+ }
+ public boolean block() {
+ if (isReleasable())
+ return true;
+ else if (deadline == 0L)
+ LockSupport.park(this);
+ else if (nanos > 0L)
+ LockSupport.parkNanos(this, nanos);
+ return isReleasable();
+ }
+ }
+
+ /**
+ * Returns raw result after waiting, or null if interruptible and
+ * interrupted.
+ */
+ private Object waitingGet(boolean interruptible) {
+ WaitNode q = null;
+ boolean queued = false;
+ int spins = SPINS;
+ for (Object r;;) {
+ if ((r = result) != null) {
+ if (q != null) { // suppress unpark
+ q.thread = null;
+ if (q.interruptControl < 0) {
+ if (interruptible) {
+ removeWaiter(q);
+ return null;
+ }
+ Thread.currentThread().interrupt();
+ }
+ }
+ postComplete(); // help release others
+ return r;
+ }
+ else if (spins > 0) {
+ int rnd = ThreadLocalRandom.current().nextInt();
+ if (rnd >= 0)
+ --spins;
+ }
+ else if (q == null)
+ q = new WaitNode(interruptible, 0L, 0L);
+ else if (!queued)
+ queued = UNSAFE.compareAndSwapObject(this, WAITERS,
+ q.next = waiters, q);
+ else if (interruptible && q.interruptControl < 0) {
+ removeWaiter(q);
+ return null;
+ }
+ else if (q.thread != null && result == null) {
+ try {
+ ForkJoinPool.managedBlock(q);
+ } catch (InterruptedException ex) {
+ q.interruptControl = -1;
+ }
+ }
+ }
+ }
+
+ /**
+ * Awaits completion or aborts on interrupt or timeout.
+ *
+ * @param nanos time to wait
+ * @return raw result
+ */
+ private Object timedAwaitDone(long nanos)
+ throws InterruptedException, TimeoutException {
+ WaitNode q = null;
+ boolean queued = false;
+ for (Object r;;) {
+ if ((r = result) != null) {
+ if (q != null) {
+ q.thread = null;
+ if (q.interruptControl < 0) {
+ removeWaiter(q);
+ throw new InterruptedException();
+ }
+ }
+ postComplete();
+ return r;
+ }
+ else if (q == null) {
+ if (nanos <= 0L)
+ throw new TimeoutException();
+ long d = System.nanoTime() + nanos;
+ q = new WaitNode(true, nanos, d == 0L ? 1L : d); // avoid 0
+ }
+ else if (!queued)
+ queued = UNSAFE.compareAndSwapObject(this, WAITERS,
+ q.next = waiters, q);
+ else if (q.interruptControl < 0) {
+ removeWaiter(q);
+ throw new InterruptedException();
+ }
+ else if (q.nanos <= 0L) {
+ if (result == null) {
+ removeWaiter(q);
+ throw new TimeoutException();
+ }
+ }
+ else if (q.thread != null && result == null) {
+ try {
+ ForkJoinPool.managedBlock(q);
+ } catch (InterruptedException ex) {
+ q.interruptControl = -1;
+ }
+ }
+ }
+ }
+
+ /**
+ * Tries to unlink a timed-out or interrupted wait node to avoid
+ * accumulating garbage. Internal nodes are simply unspliced
+ * without CAS since it is harmless if they are traversed anyway
+ * by releasers. To avoid effects of unsplicing from already
+ * removed nodes, the list is retraversed in case of an apparent
+ * race. This is slow when there are a lot of nodes, but we don't
+ * expect lists to be long enough to outweigh higher-overhead
+ * schemes.
+ */
+ private void removeWaiter(WaitNode node) {
+ if (node != null) {
+ node.thread = null;
+ retry:
+ for (;;) { // restart on removeWaiter race
+ for (WaitNode pred = null, q = waiters, s; q != null; q = s) {
+ s = q.next;
+ if (q.thread != null)
+ pred = q;
+ else if (pred != null) {
+ pred.next = s;
+ if (pred.thread == null) // check for race
+ continue retry;
+ }
+ else if (!UNSAFE.compareAndSwapObject(this, WAITERS, q, s))
+ continue retry;
+ }
+ break;
+ }
+ }
+ }
+
+ /* ------------- Async tasks -------------- */
+
+ /**
+ * A marker interface identifying asynchronous tasks produced by
+ * {@code async} methods. This may be useful for monitoring,
+ * debugging, and tracking asynchronous activities.
+ *
+ * @since 1.8
+ */
+ public static interface AsynchronousCompletionTask {
+ }
+
+ /** Base class can act as either FJ or plain Runnable */
+ abstract static class Async extends ForkJoinTask<Void>
+ implements Runnable, AsynchronousCompletionTask {
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void v) { }
+ public final void run() { exec(); }
+ }
+
+ static final class AsyncRun extends Async {
+ final Runnable fn;
+ final CompletableFuture<Void> dst;
+ AsyncRun(Runnable fn, CompletableFuture<Void> dst) {
+ this.fn = fn; this.dst = dst;
+ }
+ public final boolean exec() {
+ CompletableFuture<Void> d; Throwable ex;
+ if ((d = this.dst) != null && d.result == null) {
+ try {
+ fn.run();
+ ex = null;
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ d.internalComplete(null, ex);
+ }
+ return true;
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class AsyncSupply<U> extends Async {
+ final Generator<U> fn;
+ final CompletableFuture<U> dst;
+ AsyncSupply(Generator<U> fn, CompletableFuture<U> dst) {
+ this.fn = fn; this.dst = dst;
+ }
+ public final boolean exec() {
+ CompletableFuture<U> d; U u; Throwable ex;
+ if ((d = this.dst) != null && d.result == null) {
+ try {
+ u = fn.get();
+ ex = null;
+ } catch (Throwable rex) {
+ ex = rex;
+ u = null;
+ }
+ d.internalComplete(u, ex);
+ }
+ return true;
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class AsyncApply<T,U> extends Async {
+ final T arg;
+ final Fun<? super T,? extends U> fn;
+ final CompletableFuture<U> dst;
+ AsyncApply(T arg, Fun<? super T,? extends U> fn,
+ CompletableFuture<U> dst) {
+ this.arg = arg; this.fn = fn; this.dst = dst;
+ }
+ public final boolean exec() {
+ CompletableFuture<U> d; U u; Throwable ex;
+ if ((d = this.dst) != null && d.result == null) {
+ try {
+ u = fn.apply(arg);
+ ex = null;
+ } catch (Throwable rex) {
+ ex = rex;
+ u = null;
+ }
+ d.internalComplete(u, ex);
+ }
+ return true;
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class AsyncCombine<T,U,V> extends Async {
+ final T arg1;
+ final U arg2;
+ final BiFun<? super T,? super U,? extends V> fn;
+ final CompletableFuture<V> dst;
+ AsyncCombine(T arg1, U arg2,
+ BiFun<? super T,? super U,? extends V> fn,
+ CompletableFuture<V> dst) {
+ this.arg1 = arg1; this.arg2 = arg2; this.fn = fn; this.dst = dst;
+ }
+ public final boolean exec() {
+ CompletableFuture<V> d; V v; Throwable ex;
+ if ((d = this.dst) != null && d.result == null) {
+ try {
+ v = fn.apply(arg1, arg2);
+ ex = null;
+ } catch (Throwable rex) {
+ ex = rex;
+ v = null;
+ }
+ d.internalComplete(v, ex);
+ }
+ return true;
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class AsyncAccept<T> extends Async {
+ final T arg;
+ final Action<? super T> fn;
+ final CompletableFuture<Void> dst;
+ AsyncAccept(T arg, Action<? super T> fn,
+ CompletableFuture<Void> dst) {
+ this.arg = arg; this.fn = fn; this.dst = dst;
+ }
+ public final boolean exec() {
+ CompletableFuture<Void> d; Throwable ex;
+ if ((d = this.dst) != null && d.result == null) {
+ try {
+ fn.accept(arg);
+ ex = null;
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ d.internalComplete(null, ex);
+ }
+ return true;
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class AsyncAcceptBoth<T,U> extends Async {
+ final T arg1;
+ final U arg2;
+ final BiAction<? super T,? super U> fn;
+ final CompletableFuture<Void> dst;
+ AsyncAcceptBoth(T arg1, U arg2,
+ BiAction<? super T,? super U> fn,
+ CompletableFuture<Void> dst) {
+ this.arg1 = arg1; this.arg2 = arg2; this.fn = fn; this.dst = dst;
+ }
+ public final boolean exec() {
+ CompletableFuture<Void> d; Throwable ex;
+ if ((d = this.dst) != null && d.result == null) {
+ try {
+ fn.accept(arg1, arg2);
+ ex = null;
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ d.internalComplete(null, ex);
+ }
+ return true;
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class AsyncCompose<T,U> extends Async {
+ final T arg;
+ final Fun<? super T, CompletableFuture<U>> fn;
+ final CompletableFuture<U> dst;
+ AsyncCompose(T arg,
+ Fun<? super T, CompletableFuture<U>> fn,
+ CompletableFuture<U> dst) {
+ this.arg = arg; this.fn = fn; this.dst = dst;
+ }
+ public final boolean exec() {
+ CompletableFuture<U> d, fr; U u; Throwable ex;
+ if ((d = this.dst) != null && d.result == null) {
+ try {
+ fr = fn.apply(arg);
+ ex = (fr == null) ? new NullPointerException() : null;
+ } catch (Throwable rex) {
+ ex = rex;
+ fr = null;
+ }
+ if (ex != null)
+ u = null;
+ else {
+ Object r = fr.result;
+ if (r == null)
+ r = fr.waitingGet(false);
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ u = null;
+ }
+ else {
+ @SuppressWarnings("unchecked") U ur = (U) r;
+ u = ur;
+ }
+ }
+ d.internalComplete(u, ex);
+ }
+ return true;
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /* ------------- Completions -------------- */
+
+ /**
+ * Simple linked list nodes to record completions, used in
+ * basically the same way as WaitNodes. (We separate nodes from
+ * the Completions themselves mainly because for the And and Or
+ * methods, the same Completion object resides in two lists.)
+ */
+ static final class CompletionNode {
+ final Completion completion;
+ volatile CompletionNode next;
+ CompletionNode(Completion completion) { this.completion = completion; }
+ }
+
+ // Opportunistically subclass AtomicInteger to use compareAndSet to claim.
+ abstract static class Completion extends AtomicInteger implements Runnable {
+ }
+
+ static final class ThenApply<T,U> extends Completion {
+ final CompletableFuture<? extends T> src;
+ final Fun<? super T,? extends U> fn;
+ final CompletableFuture<U> dst;
+ final Executor executor;
+ ThenApply(CompletableFuture<? extends T> src,
+ Fun<? super T,? extends U> fn,
+ CompletableFuture<U> dst,
+ Executor executor) {
+ this.src = src; this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<? extends T> a;
+ final Fun<? super T,? extends U> fn;
+ final CompletableFuture<U> dst;
+ Object r; T t; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ Executor e = executor;
+ U u = null;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncApply<T,U>(t, fn, dst));
+ else
+ u = fn.apply(t);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(u, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class ThenAccept<T> extends Completion {
+ final CompletableFuture<? extends T> src;
+ final Action<? super T> fn;
+ final CompletableFuture<Void> dst;
+ final Executor executor;
+ ThenAccept(CompletableFuture<? extends T> src,
+ Action<? super T> fn,
+ CompletableFuture<Void> dst,
+ Executor executor) {
+ this.src = src; this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<? extends T> a;
+ final Action<? super T> fn;
+ final CompletableFuture<Void> dst;
+ Object r; T t; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ Executor e = executor;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncAccept<T>(t, fn, dst));
+ else
+ fn.accept(t);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class ThenRun extends Completion {
+ final CompletableFuture<?> src;
+ final Runnable fn;
+ final CompletableFuture<Void> dst;
+ final Executor executor;
+ ThenRun(CompletableFuture<?> src,
+ Runnable fn,
+ CompletableFuture<Void> dst,
+ Executor executor) {
+ this.src = src; this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<?> a;
+ final Runnable fn;
+ final CompletableFuture<Void> dst;
+ Object r; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult)
+ ex = ((AltResult)r).ex;
+ else
+ ex = null;
+ Executor e = executor;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncRun(fn, dst));
+ else
+ fn.run();
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class ThenCombine<T,U,V> extends Completion {
+ final CompletableFuture<? extends T> src;
+ final CompletableFuture<? extends U> snd;
+ final BiFun<? super T,? super U,? extends V> fn;
+ final CompletableFuture<V> dst;
+ final Executor executor;
+ ThenCombine(CompletableFuture<? extends T> src,
+ CompletableFuture<? extends U> snd,
+ BiFun<? super T,? super U,? extends V> fn,
+ CompletableFuture<V> dst,
+ Executor executor) {
+ this.src = src; this.snd = snd;
+ this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<? extends T> a;
+ final CompletableFuture<? extends U> b;
+ final BiFun<? super T,? super U,? extends V> fn;
+ final CompletableFuture<V> dst;
+ Object r, s; T t; U u; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ (b = this.snd) != null &&
+ (s = b.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ if (ex != null)
+ u = null;
+ else if (s instanceof AltResult) {
+ ex = ((AltResult)s).ex;
+ u = null;
+ }
+ else {
+ @SuppressWarnings("unchecked") U us = (U) s;
+ u = us;
+ }
+ Executor e = executor;
+ V v = null;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncCombine<T,U,V>(t, u, fn, dst));
+ else
+ v = fn.apply(t, u);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(v, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class ThenAcceptBoth<T,U> extends Completion {
+ final CompletableFuture<? extends T> src;
+ final CompletableFuture<? extends U> snd;
+ final BiAction<? super T,? super U> fn;
+ final CompletableFuture<Void> dst;
+ final Executor executor;
+ ThenAcceptBoth(CompletableFuture<? extends T> src,
+ CompletableFuture<? extends U> snd,
+ BiAction<? super T,? super U> fn,
+ CompletableFuture<Void> dst,
+ Executor executor) {
+ this.src = src; this.snd = snd;
+ this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<? extends T> a;
+ final CompletableFuture<? extends U> b;
+ final BiAction<? super T,? super U> fn;
+ final CompletableFuture<Void> dst;
+ Object r, s; T t; U u; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ (b = this.snd) != null &&
+ (s = b.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ if (ex != null)
+ u = null;
+ else if (s instanceof AltResult) {
+ ex = ((AltResult)s).ex;
+ u = null;
+ }
+ else {
+ @SuppressWarnings("unchecked") U us = (U) s;
+ u = us;
+ }
+ Executor e = executor;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncAcceptBoth<T,U>(t, u, fn, dst));
+ else
+ fn.accept(t, u);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class RunAfterBoth extends Completion {
+ final CompletableFuture<?> src;
+ final CompletableFuture<?> snd;
+ final Runnable fn;
+ final CompletableFuture<Void> dst;
+ final Executor executor;
+ RunAfterBoth(CompletableFuture<?> src,
+ CompletableFuture<?> snd,
+ Runnable fn,
+ CompletableFuture<Void> dst,
+ Executor executor) {
+ this.src = src; this.snd = snd;
+ this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<?> a;
+ final CompletableFuture<?> b;
+ final Runnable fn;
+ final CompletableFuture<Void> dst;
+ Object r, s; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ (b = this.snd) != null &&
+ (s = b.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult)
+ ex = ((AltResult)r).ex;
+ else
+ ex = null;
+ if (ex == null && (s instanceof AltResult))
+ ex = ((AltResult)s).ex;
+ Executor e = executor;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncRun(fn, dst));
+ else
+ fn.run();
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class AndCompletion extends Completion {
+ final CompletableFuture<?> src;
+ final CompletableFuture<?> snd;
+ final CompletableFuture<Void> dst;
+ AndCompletion(CompletableFuture<?> src,
+ CompletableFuture<?> snd,
+ CompletableFuture<Void> dst) {
+ this.src = src; this.snd = snd; this.dst = dst;
+ }
+ public final void run() {
+ final CompletableFuture<?> a;
+ final CompletableFuture<?> b;
+ final CompletableFuture<Void> dst;
+ Object r, s; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ (b = this.snd) != null &&
+ (s = b.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult)
+ ex = ((AltResult)r).ex;
+ else
+ ex = null;
+ if (ex == null && (s instanceof AltResult))
+ ex = ((AltResult)s).ex;
+ dst.internalComplete(null, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class ApplyToEither<T,U> extends Completion {
+ final CompletableFuture<? extends T> src;
+ final CompletableFuture<? extends T> snd;
+ final Fun<? super T,? extends U> fn;
+ final CompletableFuture<U> dst;
+ final Executor executor;
+ ApplyToEither(CompletableFuture<? extends T> src,
+ CompletableFuture<? extends T> snd,
+ Fun<? super T,? extends U> fn,
+ CompletableFuture<U> dst,
+ Executor executor) {
+ this.src = src; this.snd = snd;
+ this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<? extends T> a;
+ final CompletableFuture<? extends T> b;
+ final Fun<? super T,? extends U> fn;
+ final CompletableFuture<U> dst;
+ Object r; T t; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (((a = this.src) != null && (r = a.result) != null) ||
+ ((b = this.snd) != null && (r = b.result) != null)) &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ Executor e = executor;
+ U u = null;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncApply<T,U>(t, fn, dst));
+ else
+ u = fn.apply(t);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(u, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class AcceptEither<T> extends Completion {
+ final CompletableFuture<? extends T> src;
+ final CompletableFuture<? extends T> snd;
+ final Action<? super T> fn;
+ final CompletableFuture<Void> dst;
+ final Executor executor;
+ AcceptEither(CompletableFuture<? extends T> src,
+ CompletableFuture<? extends T> snd,
+ Action<? super T> fn,
+ CompletableFuture<Void> dst,
+ Executor executor) {
+ this.src = src; this.snd = snd;
+ this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<? extends T> a;
+ final CompletableFuture<? extends T> b;
+ final Action<? super T> fn;
+ final CompletableFuture<Void> dst;
+ Object r; T t; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (((a = this.src) != null && (r = a.result) != null) ||
+ ((b = this.snd) != null && (r = b.result) != null)) &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ Executor e = executor;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncAccept<T>(t, fn, dst));
+ else
+ fn.accept(t);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class RunAfterEither extends Completion {
+ final CompletableFuture<?> src;
+ final CompletableFuture<?> snd;
+ final Runnable fn;
+ final CompletableFuture<Void> dst;
+ final Executor executor;
+ RunAfterEither(CompletableFuture<?> src,
+ CompletableFuture<?> snd,
+ Runnable fn,
+ CompletableFuture<Void> dst,
+ Executor executor) {
+ this.src = src; this.snd = snd;
+ this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<?> a;
+ final CompletableFuture<?> b;
+ final Runnable fn;
+ final CompletableFuture<Void> dst;
+ Object r; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (((a = this.src) != null && (r = a.result) != null) ||
+ ((b = this.snd) != null && (r = b.result) != null)) &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult)
+ ex = ((AltResult)r).ex;
+ else
+ ex = null;
+ Executor e = executor;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncRun(fn, dst));
+ else
+ fn.run();
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class OrCompletion extends Completion {
+ final CompletableFuture<?> src;
+ final CompletableFuture<?> snd;
+ final CompletableFuture<Object> dst;
+ OrCompletion(CompletableFuture<?> src,
+ CompletableFuture<?> snd,
+ CompletableFuture<Object> dst) {
+ this.src = src; this.snd = snd; this.dst = dst;
+ }
+ public final void run() {
+ final CompletableFuture<?> a;
+ final CompletableFuture<?> b;
+ final CompletableFuture<Object> dst;
+ Object r, t; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (((a = this.src) != null && (r = a.result) != null) ||
+ ((b = this.snd) != null && (r = b.result) != null)) &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ t = r;
+ }
+ dst.internalComplete(t, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class ExceptionCompletion<T> extends Completion {
+ final CompletableFuture<? extends T> src;
+ final Fun<? super Throwable, ? extends T> fn;
+ final CompletableFuture<T> dst;
+ ExceptionCompletion(CompletableFuture<? extends T> src,
+ Fun<? super Throwable, ? extends T> fn,
+ CompletableFuture<T> dst) {
+ this.src = src; this.fn = fn; this.dst = dst;
+ }
+ public final void run() {
+ final CompletableFuture<? extends T> a;
+ final Fun<? super Throwable, ? extends T> fn;
+ final CompletableFuture<T> dst;
+ Object r; T t = null; Throwable ex, dx = null;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ compareAndSet(0, 1)) {
+ if ((r instanceof AltResult) &&
+ (ex = ((AltResult)r).ex) != null) {
+ try {
+ t = fn.apply(ex);
+ } catch (Throwable rex) {
+ dx = rex;
+ }
+ }
+ else {
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ dst.internalComplete(t, dx);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class ThenCopy<T> extends Completion {
+ final CompletableFuture<?> src;
+ final CompletableFuture<T> dst;
+ ThenCopy(CompletableFuture<?> src,
+ CompletableFuture<T> dst) {
+ this.src = src; this.dst = dst;
+ }
+ public final void run() {
+ final CompletableFuture<?> a;
+ final CompletableFuture<T> dst;
+ Object r; T t; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ dst.internalComplete(t, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ // version of ThenCopy for CompletableFuture<Void> dst
+ static final class ThenPropagate extends Completion {
+ final CompletableFuture<?> src;
+ final CompletableFuture<Void> dst;
+ ThenPropagate(CompletableFuture<?> src,
+ CompletableFuture<Void> dst) {
+ this.src = src; this.dst = dst;
+ }
+ public final void run() {
+ final CompletableFuture<?> a;
+ final CompletableFuture<Void> dst;
+ Object r; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult)
+ ex = ((AltResult)r).ex;
+ else
+ ex = null;
+ dst.internalComplete(null, ex);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class HandleCompletion<T,U> extends Completion {
+ final CompletableFuture<? extends T> src;
+ final BiFun<? super T, Throwable, ? extends U> fn;
+ final CompletableFuture<U> dst;
+ HandleCompletion(CompletableFuture<? extends T> src,
+ BiFun<? super T, Throwable, ? extends U> fn,
+ CompletableFuture<U> dst) {
+ this.src = src; this.fn = fn; this.dst = dst;
+ }
+ public final void run() {
+ final CompletableFuture<? extends T> a;
+ final BiFun<? super T, Throwable, ? extends U> fn;
+ final CompletableFuture<U> dst;
+ Object r; T t; Throwable ex;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ U u = null; Throwable dx = null;
+ try {
+ u = fn.apply(t, ex);
+ } catch (Throwable rex) {
+ dx = rex;
+ }
+ dst.internalComplete(u, dx);
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ static final class ThenCompose<T,U> extends Completion {
+ final CompletableFuture<? extends T> src;
+ final Fun<? super T, CompletableFuture<U>> fn;
+ final CompletableFuture<U> dst;
+ final Executor executor;
+ ThenCompose(CompletableFuture<? extends T> src,
+ Fun<? super T, CompletableFuture<U>> fn,
+ CompletableFuture<U> dst,
+ Executor executor) {
+ this.src = src; this.fn = fn; this.dst = dst;
+ this.executor = executor;
+ }
+ public final void run() {
+ final CompletableFuture<? extends T> a;
+ final Fun<? super T, CompletableFuture<U>> fn;
+ final CompletableFuture<U> dst;
+ Object r; T t; Throwable ex; Executor e;
+ if ((dst = this.dst) != null &&
+ (fn = this.fn) != null &&
+ (a = this.src) != null &&
+ (r = a.result) != null &&
+ compareAndSet(0, 1)) {
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ CompletableFuture<U> c = null;
+ U u = null;
+ boolean complete = false;
+ if (ex == null) {
+ if ((e = executor) != null)
+ e.execute(new AsyncCompose<T,U>(t, fn, dst));
+ else {
+ try {
+ if ((c = fn.apply(t)) == null)
+ ex = new NullPointerException();
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ }
+ if (c != null) {
+ ThenCopy<U> d = null;
+ Object s;
+ if ((s = c.result) == null) {
+ CompletionNode p = new CompletionNode
+ (d = new ThenCopy<U>(c, dst));
+ while ((s = c.result) == null) {
+ if (UNSAFE.compareAndSwapObject
+ (c, COMPLETIONS, p.next = c.completions, p))
+ break;
+ }
+ }
+ if (s != null && (d == null || d.compareAndSet(0, 1))) {
+ complete = true;
+ if (s instanceof AltResult) {
+ ex = ((AltResult)s).ex; // no rewrap
+ u = null;
+ }
+ else {
+ @SuppressWarnings("unchecked") U us = (U) s;
+ u = us;
+ }
+ }
+ }
+ if (complete || ex != null)
+ dst.internalComplete(u, ex);
+ if (c != null)
+ c.helpPostComplete();
+ }
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ // public methods
+
+ /**
+ * Creates a new incomplete CompletableFuture.
+ */
+ public CompletableFuture() {
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * by a task running in the {@link ForkJoinPool#commonPool()} with
+ * the value obtained by calling the given Generator.
+ *
+ * @param supplier a function returning the value to be used
+ * to complete the returned CompletableFuture
+ * @param <U> the function's return type
+ * @return the new CompletableFuture
+ */
+ public static <U> CompletableFuture<U> supplyAsync(Generator<U> supplier) {
+ if (supplier == null) throw new NullPointerException();
+ CompletableFuture<U> f = new CompletableFuture<U>();
+ ForkJoinPool.commonPool().
+ execute((ForkJoinTask<?>)new AsyncSupply<U>(supplier, f));
+ return f;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * by a task running in the given executor with the value obtained
+ * by calling the given Generator.
+ *
+ * @param supplier a function returning the value to be used
+ * to complete the returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @param <U> the function's return type
+ * @return the new CompletableFuture
+ */
+ public static <U> CompletableFuture<U> supplyAsync(Generator<U> supplier,
+ Executor executor) {
+ if (executor == null || supplier == null)
+ throw new NullPointerException();
+ CompletableFuture<U> f = new CompletableFuture<U>();
+ executor.execute(new AsyncSupply<U>(supplier, f));
+ return f;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * by a task running in the {@link ForkJoinPool#commonPool()} after
+ * it runs the given action.
+ *
+ * @param runnable the action to run before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public static CompletableFuture<Void> runAsync(Runnable runnable) {
+ if (runnable == null) throw new NullPointerException();
+ CompletableFuture<Void> f = new CompletableFuture<Void>();
+ ForkJoinPool.commonPool().
+ execute((ForkJoinTask<?>)new AsyncRun(runnable, f));
+ return f;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * by a task running in the given executor after it runs the given
+ * action.
+ *
+ * @param runnable the action to run before completing the
+ * returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public static CompletableFuture<Void> runAsync(Runnable runnable,
+ Executor executor) {
+ if (executor == null || runnable == null)
+ throw new NullPointerException();
+ CompletableFuture<Void> f = new CompletableFuture<Void>();
+ executor.execute(new AsyncRun(runnable, f));
+ return f;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is already completed with
+ * the given value.
+ *
+ * @param value the value
+ * @param <U> the type of the value
+ * @return the completed CompletableFuture
+ */
+ public static <U> CompletableFuture<U> completedFuture(U value) {
+ CompletableFuture<U> f = new CompletableFuture<U>();
+ f.result = (value == null) ? NIL : value;
+ return f;
+ }
+
+ /**
+ * Returns {@code true} if completed in any fashion: normally,
+ * exceptionally, or via cancellation.
+ *
+ * @return {@code true} if completed
+ */
+ public boolean isDone() {
+ return result != null;
+ }
+
+ /**
+ * Waits if necessary for this future to complete, and then
+ * returns its result.
+ *
+ * @return the result value
+ * @throws CancellationException if this future was cancelled
+ * @throws ExecutionException if this future completed exceptionally
+ * @throws InterruptedException if the current thread was interrupted
+ * while waiting
+ */
+ public T get() throws InterruptedException, ExecutionException {
+ Object r; Throwable ex, cause;
+ if ((r = result) == null && (r = waitingGet(true)) == null)
+ throw new InterruptedException();
+ if (!(r instanceof AltResult)) {
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ return tr;
+ }
+ if ((ex = ((AltResult)r).ex) == null)
+ return null;
+ if (ex instanceof CancellationException)
+ throw (CancellationException)ex;
+ if ((ex instanceof CompletionException) &&
+ (cause = ex.getCause()) != null)
+ ex = cause;
+ throw new ExecutionException(ex);
+ }
+
+ /**
+ * Waits if necessary for at most the given time for this future
+ * to complete, and then returns its result, if available.
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return the result value
+ * @throws CancellationException if this future was cancelled
+ * @throws ExecutionException if this future completed exceptionally
+ * @throws InterruptedException if the current thread was interrupted
+ * while waiting
+ * @throws TimeoutException if the wait timed out
+ */
+ public T get(long timeout, TimeUnit unit)
+ throws InterruptedException, ExecutionException, TimeoutException {
+ Object r; Throwable ex, cause;
+ long nanos = unit.toNanos(timeout);
+ if (Thread.interrupted())
+ throw new InterruptedException();
+ if ((r = result) == null)
+ r = timedAwaitDone(nanos);
+ if (!(r instanceof AltResult)) {
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ return tr;
+ }
+ if ((ex = ((AltResult)r).ex) == null)
+ return null;
+ if (ex instanceof CancellationException)
+ throw (CancellationException)ex;
+ if ((ex instanceof CompletionException) &&
+ (cause = ex.getCause()) != null)
+ ex = cause;
+ throw new ExecutionException(ex);
+ }
+
+ /**
+ * Returns the result value when complete, or throws an
+ * (unchecked) exception if completed exceptionally. To better
+ * conform with the use of common functional forms, if a
+ * computation involved in the completion of this
+ * CompletableFuture threw an exception, this method throws an
+ * (unchecked) {@link CompletionException} with the underlying
+ * exception as its cause.
+ *
+ * @return the result value
+ * @throws CancellationException if the computation was cancelled
+ * @throws CompletionException if this future completed
+ * exceptionally or a completion computation threw an exception
+ */
+ public T join() {
+ Object r; Throwable ex;
+ if ((r = result) == null)
+ r = waitingGet(false);
+ if (!(r instanceof AltResult)) {
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ return tr;
+ }
+ if ((ex = ((AltResult)r).ex) == null)
+ return null;
+ if (ex instanceof CancellationException)
+ throw (CancellationException)ex;
+ if (ex instanceof CompletionException)
+ throw (CompletionException)ex;
+ throw new CompletionException(ex);
+ }
+
+ /**
+ * Returns the result value (or throws any encountered exception)
+ * if completed, else returns the given valueIfAbsent.
+ *
+ * @param valueIfAbsent the value to return if not completed
+ * @return the result value, if completed, else the given valueIfAbsent
+ * @throws CancellationException if the computation was cancelled
+ * @throws CompletionException if this future completed
+ * exceptionally or a completion computation threw an exception
+ */
+ public T getNow(T valueIfAbsent) {
+ Object r; Throwable ex;
+ if ((r = result) == null)
+ return valueIfAbsent;
+ if (!(r instanceof AltResult)) {
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ return tr;
+ }
+ if ((ex = ((AltResult)r).ex) == null)
+ return null;
+ if (ex instanceof CancellationException)
+ throw (CancellationException)ex;
+ if (ex instanceof CompletionException)
+ throw (CompletionException)ex;
+ throw new CompletionException(ex);
+ }
+
+ /**
+ * If not already completed, sets the value returned by {@link
+ * #get()} and related methods to the given value.
+ *
+ * @param value the result value
+ * @return {@code true} if this invocation caused this CompletableFuture
+ * to transition to a completed state, else {@code false}
+ */
+ public boolean complete(T value) {
+ boolean triggered = result == null &&
+ UNSAFE.compareAndSwapObject(this, RESULT, null,
+ value == null ? NIL : value);
+ postComplete();
+ return triggered;
+ }
+
+ /**
+ * If not already completed, causes invocations of {@link #get()}
+ * and related methods to throw the given exception.
+ *
+ * @param ex the exception
+ * @return {@code true} if this invocation caused this CompletableFuture
+ * to transition to a completed state, else {@code false}
+ */
+ public boolean completeExceptionally(Throwable ex) {
+ if (ex == null) throw new NullPointerException();
+ boolean triggered = result == null &&
+ UNSAFE.compareAndSwapObject(this, RESULT, null, new AltResult(ex));
+ postComplete();
+ return triggered;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed
+ * when this CompletableFuture completes, with the result of the
+ * given function of this CompletableFuture's result.
+ *
+ * <p>If this CompletableFuture completes exceptionally, or the
+ * supplied function throws an exception, then the returned
+ * CompletableFuture completes exceptionally with a
+ * CompletionException holding the exception as its cause.
+ *
+ * @param fn the function to use to compute the value of
+ * the returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<U> thenApply(Fun<? super T,? extends U> fn) {
+ return doThenApply(fn, null);
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when this CompletableFuture completes, with the result of the
+ * given function of this CompletableFuture's result from a
+ * task running in the {@link ForkJoinPool#commonPool()}.
+ *
+ * <p>If this CompletableFuture completes exceptionally, or the
+ * supplied function throws an exception, then the returned
+ * CompletableFuture completes exceptionally with a
+ * CompletionException holding the exception as its cause.
+ *
+ * @param fn the function to use to compute the value of
+ * the returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<U> thenApplyAsync
+ (Fun<? super T,? extends U> fn) {
+ return doThenApply(fn, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when this CompletableFuture completes, with the result of the
+ * given function of this CompletableFuture's result from a
+ * task running in the given executor.
+ *
+ * <p>If this CompletableFuture completes exceptionally, or the
+ * supplied function throws an exception, then the returned
+ * CompletableFuture completes exceptionally with a
+ * CompletionException holding the exception as its cause.
+ *
+ * @param fn the function to use to compute the value of
+ * the returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<U> thenApplyAsync
+ (Fun<? super T,? extends U> fn,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doThenApply(fn, executor);
+ }
+
+ private <U> CompletableFuture<U> doThenApply
+ (Fun<? super T,? extends U> fn,
+ Executor e) {
+ if (fn == null) throw new NullPointerException();
+ CompletableFuture<U> dst = new CompletableFuture<U>();
+ ThenApply<T,U> d = null;
+ Object r;
+ if ((r = result) == null) {
+ CompletionNode p = new CompletionNode
+ (d = new ThenApply<T,U>(this, fn, dst, e));
+ while ((r = result) == null) {
+ if (UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p))
+ break;
+ }
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ T t; Throwable ex;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ U u = null;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncApply<T,U>(t, fn, dst));
+ else
+ u = fn.apply(t);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(u, ex);
+ }
+ helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed
+ * when this CompletableFuture completes, after performing the given
+ * action with this CompletableFuture's result.
+ *
+ * <p>If this CompletableFuture completes exceptionally, or the
+ * supplied action throws an exception, then the returned
+ * CompletableFuture completes exceptionally with a
+ * CompletionException holding the exception as its cause.
+ *
+ * @param block the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> thenAccept(Action<? super T> block) {
+ return doThenAccept(block, null);
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when this CompletableFuture completes, after performing the given
+ * action with this CompletableFuture's result from a task running
+ * in the {@link ForkJoinPool#commonPool()}.
+ *
+ * <p>If this CompletableFuture completes exceptionally, or the
+ * supplied action throws an exception, then the returned
+ * CompletableFuture completes exceptionally with a
+ * CompletionException holding the exception as its cause.
+ *
+ * @param block the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> thenAcceptAsync(Action<? super T> block) {
+ return doThenAccept(block, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when this CompletableFuture completes, after performing the given
+ * action with this CompletableFuture's result from a task running
+ * in the given executor.
+ *
+ * <p>If this CompletableFuture completes exceptionally, or the
+ * supplied action throws an exception, then the returned
+ * CompletableFuture completes exceptionally with a
+ * CompletionException holding the exception as its cause.
+ *
+ * @param block the action to perform before completing the
+ * returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> thenAcceptAsync(Action<? super T> block,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doThenAccept(block, executor);
+ }
+
+ private CompletableFuture<Void> doThenAccept(Action<? super T> fn,
+ Executor e) {
+ if (fn == null) throw new NullPointerException();
+ CompletableFuture<Void> dst = new CompletableFuture<Void>();
+ ThenAccept<T> d = null;
+ Object r;
+ if ((r = result) == null) {
+ CompletionNode p = new CompletionNode
+ (d = new ThenAccept<T>(this, fn, dst, e));
+ while ((r = result) == null) {
+ if (UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p))
+ break;
+ }
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ T t; Throwable ex;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncAccept<T>(t, fn, dst));
+ else
+ fn.accept(t);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed
+ * when this CompletableFuture completes, after performing the given
+ * action.
+ *
+ * <p>If this CompletableFuture completes exceptionally, or the
+ * supplied action throws an exception, then the returned
+ * CompletableFuture completes exceptionally with a
+ * CompletionException holding the exception as its cause.
+ *
+ * @param action the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> thenRun(Runnable action) {
+ return doThenRun(action, null);
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when this CompletableFuture completes, after performing the given
+ * action from a task running in the {@link ForkJoinPool#commonPool()}.
+ *
+ * <p>If this CompletableFuture completes exceptionally, or the
+ * supplied action throws an exception, then the returned
+ * CompletableFuture completes exceptionally with a
+ * CompletionException holding the exception as its cause.
+ *
+ * @param action the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> thenRunAsync(Runnable action) {
+ return doThenRun(action, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when this CompletableFuture completes, after performing the given
+ * action from a task running in the given executor.
+ *
+ * <p>If this CompletableFuture completes exceptionally, or the
+ * supplied action throws an exception, then the returned
+ * CompletableFuture completes exceptionally with a
+ * CompletionException holding the exception as its cause.
+ *
+ * @param action the action to perform before completing the
+ * returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> thenRunAsync(Runnable action,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doThenRun(action, executor);
+ }
+
+ private CompletableFuture<Void> doThenRun(Runnable action,
+ Executor e) {
+ if (action == null) throw new NullPointerException();
+ CompletableFuture<Void> dst = new CompletableFuture<Void>();
+ ThenRun d = null;
+ Object r;
+ if ((r = result) == null) {
+ CompletionNode p = new CompletionNode
+ (d = new ThenRun(this, action, dst, e));
+ while ((r = result) == null) {
+ if (UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p))
+ break;
+ }
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ Throwable ex;
+ if (r instanceof AltResult)
+ ex = ((AltResult)r).ex;
+ else
+ ex = null;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncRun(action, dst));
+ else
+ action.run();
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed
+ * when both this and the other given CompletableFuture complete,
+ * with the result of the given function of the results of the two
+ * CompletableFutures.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, or the supplied function throws an exception,
+ * then the returned CompletableFuture completes exceptionally
+ * with a CompletionException holding the exception as its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param fn the function to use to compute the value of
+ * the returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public <U,V> CompletableFuture<V> thenCombine
+ (CompletableFuture<? extends U> other,
+ BiFun<? super T,? super U,? extends V> fn) {
+ return doThenCombine(other, fn, null);
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when both this and the other given CompletableFuture complete,
+ * with the result of the given function of the results of the two
+ * CompletableFutures from a task running in the
+ * {@link ForkJoinPool#commonPool()}.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, or the supplied function throws an exception,
+ * then the returned CompletableFuture completes exceptionally
+ * with a CompletionException holding the exception as its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param fn the function to use to compute the value of
+ * the returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public <U,V> CompletableFuture<V> thenCombineAsync
+ (CompletableFuture<? extends U> other,
+ BiFun<? super T,? super U,? extends V> fn) {
+ return doThenCombine(other, fn, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when both this and the other given CompletableFuture complete,
+ * with the result of the given function of the results of the two
+ * CompletableFutures from a task running in the given executor.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, or the supplied function throws an exception,
+ * then the returned CompletableFuture completes exceptionally
+ * with a CompletionException holding the exception as its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param fn the function to use to compute the value of
+ * the returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public <U,V> CompletableFuture<V> thenCombineAsync
+ (CompletableFuture<? extends U> other,
+ BiFun<? super T,? super U,? extends V> fn,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doThenCombine(other, fn, executor);
+ }
+
+ private <U,V> CompletableFuture<V> doThenCombine
+ (CompletableFuture<? extends U> other,
+ BiFun<? super T,? super U,? extends V> fn,
+ Executor e) {
+ if (other == null || fn == null) throw new NullPointerException();
+ CompletableFuture<V> dst = new CompletableFuture<V>();
+ ThenCombine<T,U,V> d = null;
+ Object r, s = null;
+ if ((r = result) == null || (s = other.result) == null) {
+ d = new ThenCombine<T,U,V>(this, other, fn, dst, e);
+ CompletionNode q = null, p = new CompletionNode(d);
+ while ((r == null && (r = result) == null) ||
+ (s == null && (s = other.result) == null)) {
+ if (q != null) {
+ if (s != null ||
+ UNSAFE.compareAndSwapObject
+ (other, COMPLETIONS, q.next = other.completions, q))
+ break;
+ }
+ else if (r != null ||
+ UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p)) {
+ if (s != null)
+ break;
+ q = new CompletionNode(d);
+ }
+ }
+ }
+ if (r != null && s != null && (d == null || d.compareAndSet(0, 1))) {
+ T t; U u; Throwable ex;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ if (ex != null)
+ u = null;
+ else if (s instanceof AltResult) {
+ ex = ((AltResult)s).ex;
+ u = null;
+ }
+ else {
+ @SuppressWarnings("unchecked") U us = (U) s;
+ u = us;
+ }
+ V v = null;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncCombine<T,U,V>(t, u, fn, dst));
+ else
+ v = fn.apply(t, u);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(v, ex);
+ }
+ helpPostComplete();
+ other.helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed
+ * when both this and the other given CompletableFuture complete,
+ * after performing the given action with the results of the two
+ * CompletableFutures.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, or the supplied action throws an exception,
+ * then the returned CompletableFuture completes exceptionally
+ * with a CompletionException holding the exception as its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param block the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<Void> thenAcceptBoth
+ (CompletableFuture<? extends U> other,
+ BiAction<? super T, ? super U> block) {
+ return doThenAcceptBoth(other, block, null);
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when both this and the other given CompletableFuture complete,
+ * after performing the given action with the results of the two
+ * CompletableFutures from a task running in the {@link
+ * ForkJoinPool#commonPool()}.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, or the supplied action throws an exception,
+ * then the returned CompletableFuture completes exceptionally
+ * with a CompletionException holding the exception as its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param block the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<Void> thenAcceptBothAsync
+ (CompletableFuture<? extends U> other,
+ BiAction<? super T, ? super U> block) {
+ return doThenAcceptBoth(other, block, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when both this and the other given CompletableFuture complete,
+ * after performing the given action with the results of the two
+ * CompletableFutures from a task running in the given executor.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, or the supplied action throws an exception,
+ * then the returned CompletableFuture completes exceptionally
+ * with a CompletionException holding the exception as its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param block the action to perform before completing the
+ * returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<Void> thenAcceptBothAsync
+ (CompletableFuture<? extends U> other,
+ BiAction<? super T, ? super U> block,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doThenAcceptBoth(other, block, executor);
+ }
+
+ private <U> CompletableFuture<Void> doThenAcceptBoth
+ (CompletableFuture<? extends U> other,
+ BiAction<? super T,? super U> fn,
+ Executor e) {
+ if (other == null || fn == null) throw new NullPointerException();
+ CompletableFuture<Void> dst = new CompletableFuture<Void>();
+ ThenAcceptBoth<T,U> d = null;
+ Object r, s = null;
+ if ((r = result) == null || (s = other.result) == null) {
+ d = new ThenAcceptBoth<T,U>(this, other, fn, dst, e);
+ CompletionNode q = null, p = new CompletionNode(d);
+ while ((r == null && (r = result) == null) ||
+ (s == null && (s = other.result) == null)) {
+ if (q != null) {
+ if (s != null ||
+ UNSAFE.compareAndSwapObject
+ (other, COMPLETIONS, q.next = other.completions, q))
+ break;
+ }
+ else if (r != null ||
+ UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p)) {
+ if (s != null)
+ break;
+ q = new CompletionNode(d);
+ }
+ }
+ }
+ if (r != null && s != null && (d == null || d.compareAndSet(0, 1))) {
+ T t; U u; Throwable ex;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ if (ex != null)
+ u = null;
+ else if (s instanceof AltResult) {
+ ex = ((AltResult)s).ex;
+ u = null;
+ }
+ else {
+ @SuppressWarnings("unchecked") U us = (U) s;
+ u = us;
+ }
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncAcceptBoth<T,U>(t, u, fn, dst));
+ else
+ fn.accept(t, u);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ helpPostComplete();
+ other.helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed
+ * when both this and the other given CompletableFuture complete,
+ * after performing the given action.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, or the supplied action throws an exception,
+ * then the returned CompletableFuture completes exceptionally
+ * with a CompletionException holding the exception as its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param action the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> runAfterBoth(CompletableFuture<?> other,
+ Runnable action) {
+ return doRunAfterBoth(other, action, null);
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when both this and the other given CompletableFuture complete,
+ * after performing the given action from a task running in the
+ * {@link ForkJoinPool#commonPool()}.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, or the supplied action throws an exception,
+ * then the returned CompletableFuture completes exceptionally
+ * with a CompletionException holding the exception as its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param action the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> runAfterBothAsync(CompletableFuture<?> other,
+ Runnable action) {
+ return doRunAfterBoth(other, action, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when both this and the other given CompletableFuture complete,
+ * after performing the given action from a task running in the
+ * given executor.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, or the supplied action throws an exception,
+ * then the returned CompletableFuture completes exceptionally
+ * with a CompletionException holding the exception as its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param action the action to perform before completing the
+ * returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> runAfterBothAsync(CompletableFuture<?> other,
+ Runnable action,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doRunAfterBoth(other, action, executor);
+ }
+
+ private CompletableFuture<Void> doRunAfterBoth(CompletableFuture<?> other,
+ Runnable action,
+ Executor e) {
+ if (other == null || action == null) throw new NullPointerException();
+ CompletableFuture<Void> dst = new CompletableFuture<Void>();
+ RunAfterBoth d = null;
+ Object r, s = null;
+ if ((r = result) == null || (s = other.result) == null) {
+ d = new RunAfterBoth(this, other, action, dst, e);
+ CompletionNode q = null, p = new CompletionNode(d);
+ while ((r == null && (r = result) == null) ||
+ (s == null && (s = other.result) == null)) {
+ if (q != null) {
+ if (s != null ||
+ UNSAFE.compareAndSwapObject
+ (other, COMPLETIONS, q.next = other.completions, q))
+ break;
+ }
+ else if (r != null ||
+ UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p)) {
+ if (s != null)
+ break;
+ q = new CompletionNode(d);
+ }
+ }
+ }
+ if (r != null && s != null && (d == null || d.compareAndSet(0, 1))) {
+ Throwable ex;
+ if (r instanceof AltResult)
+ ex = ((AltResult)r).ex;
+ else
+ ex = null;
+ if (ex == null && (s instanceof AltResult))
+ ex = ((AltResult)s).ex;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncRun(action, dst));
+ else
+ action.run();
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ helpPostComplete();
+ other.helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed
+ * when either this or the other given CompletableFuture completes,
+ * with the result of the given function of either this or the other
+ * CompletableFuture's result.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, then the returned CompletableFuture may also do so,
+ * with a CompletionException holding one of these exceptions as its
+ * cause. No guarantees are made about which result or exception is
+ * used in the returned CompletableFuture. If the supplied function
+ * throws an exception, then the returned CompletableFuture completes
+ * exceptionally with a CompletionException holding the exception as
+ * its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param fn the function to use to compute the value of
+ * the returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<U> applyToEither
+ (CompletableFuture<? extends T> other,
+ Fun<? super T, U> fn) {
+ return doApplyToEither(other, fn, null);
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when either this or the other given CompletableFuture completes,
+ * with the result of the given function of either this or the other
+ * CompletableFuture's result from a task running in the
+ * {@link ForkJoinPool#commonPool()}.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, then the returned CompletableFuture may also do so,
+ * with a CompletionException holding one of these exceptions as its
+ * cause. No guarantees are made about which result or exception is
+ * used in the returned CompletableFuture. If the supplied function
+ * throws an exception, then the returned CompletableFuture completes
+ * exceptionally with a CompletionException holding the exception as
+ * its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param fn the function to use to compute the value of
+ * the returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<U> applyToEitherAsync
+ (CompletableFuture<? extends T> other,
+ Fun<? super T, U> fn) {
+ return doApplyToEither(other, fn, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when either this or the other given CompletableFuture completes,
+ * with the result of the given function of either this or the other
+ * CompletableFuture's result from a task running in the
+ * given executor.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, then the returned CompletableFuture may also do so,
+ * with a CompletionException holding one of these exceptions as its
+ * cause. No guarantees are made about which result or exception is
+ * used in the returned CompletableFuture. If the supplied function
+ * throws an exception, then the returned CompletableFuture completes
+ * exceptionally with a CompletionException holding the exception as
+ * its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param fn the function to use to compute the value of
+ * the returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<U> applyToEitherAsync
+ (CompletableFuture<? extends T> other,
+ Fun<? super T, U> fn,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doApplyToEither(other, fn, executor);
+ }
+
+ private <U> CompletableFuture<U> doApplyToEither
+ (CompletableFuture<? extends T> other,
+ Fun<? super T, U> fn,
+ Executor e) {
+ if (other == null || fn == null) throw new NullPointerException();
+ CompletableFuture<U> dst = new CompletableFuture<U>();
+ ApplyToEither<T,U> d = null;
+ Object r;
+ if ((r = result) == null && (r = other.result) == null) {
+ d = new ApplyToEither<T,U>(this, other, fn, dst, e);
+ CompletionNode q = null, p = new CompletionNode(d);
+ while ((r = result) == null && (r = other.result) == null) {
+ if (q != null) {
+ if (UNSAFE.compareAndSwapObject
+ (other, COMPLETIONS, q.next = other.completions, q))
+ break;
+ }
+ else if (UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p))
+ q = new CompletionNode(d);
+ }
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ T t; Throwable ex;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ U u = null;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncApply<T,U>(t, fn, dst));
+ else
+ u = fn.apply(t);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(u, ex);
+ }
+ helpPostComplete();
+ other.helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed
+ * when either this or the other given CompletableFuture completes,
+ * after performing the given action with the result of either this
+ * or the other CompletableFuture's result.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, then the returned CompletableFuture may also do so,
+ * with a CompletionException holding one of these exceptions as its
+ * cause. No guarantees are made about which result or exception is
+ * used in the returned CompletableFuture. If the supplied action
+ * throws an exception, then the returned CompletableFuture completes
+ * exceptionally with a CompletionException holding the exception as
+ * its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param block the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> acceptEither
+ (CompletableFuture<? extends T> other,
+ Action<? super T> block) {
+ return doAcceptEither(other, block, null);
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when either this or the other given CompletableFuture completes,
+ * after performing the given action with the result of either this
+ * or the other CompletableFuture's result from a task running in
+ * the {@link ForkJoinPool#commonPool()}.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, then the returned CompletableFuture may also do so,
+ * with a CompletionException holding one of these exceptions as its
+ * cause. No guarantees are made about which result or exception is
+ * used in the returned CompletableFuture. If the supplied action
+ * throws an exception, then the returned CompletableFuture completes
+ * exceptionally with a CompletionException holding the exception as
+ * its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param block the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> acceptEitherAsync
+ (CompletableFuture<? extends T> other,
+ Action<? super T> block) {
+ return doAcceptEither(other, block, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when either this or the other given CompletableFuture completes,
+ * after performing the given action with the result of either this
+ * or the other CompletableFuture's result from a task running in
+ * the given executor.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, then the returned CompletableFuture may also do so,
+ * with a CompletionException holding one of these exceptions as its
+ * cause. No guarantees are made about which result or exception is
+ * used in the returned CompletableFuture. If the supplied action
+ * throws an exception, then the returned CompletableFuture completes
+ * exceptionally with a CompletionException holding the exception as
+ * its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param block the action to perform before completing the
+ * returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> acceptEitherAsync
+ (CompletableFuture<? extends T> other,
+ Action<? super T> block,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doAcceptEither(other, block, executor);
+ }
+
+ private CompletableFuture<Void> doAcceptEither
+ (CompletableFuture<? extends T> other,
+ Action<? super T> fn,
+ Executor e) {
+ if (other == null || fn == null) throw new NullPointerException();
+ CompletableFuture<Void> dst = new CompletableFuture<Void>();
+ AcceptEither<T> d = null;
+ Object r;
+ if ((r = result) == null && (r = other.result) == null) {
+ d = new AcceptEither<T>(this, other, fn, dst, e);
+ CompletionNode q = null, p = new CompletionNode(d);
+ while ((r = result) == null && (r = other.result) == null) {
+ if (q != null) {
+ if (UNSAFE.compareAndSwapObject
+ (other, COMPLETIONS, q.next = other.completions, q))
+ break;
+ }
+ else if (UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p))
+ q = new CompletionNode(d);
+ }
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ T t; Throwable ex;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncAccept<T>(t, fn, dst));
+ else
+ fn.accept(t);
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ helpPostComplete();
+ other.helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed
+ * when either this or the other given CompletableFuture completes,
+ * after performing the given action.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, then the returned CompletableFuture may also do so,
+ * with a CompletionException holding one of these exceptions as its
+ * cause. No guarantees are made about which result or exception is
+ * used in the returned CompletableFuture. If the supplied action
+ * throws an exception, then the returned CompletableFuture completes
+ * exceptionally with a CompletionException holding the exception as
+ * its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param action the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> runAfterEither(CompletableFuture<?> other,
+ Runnable action) {
+ return doRunAfterEither(other, action, null);
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when either this or the other given CompletableFuture completes,
+ * after performing the given action from a task running in the
+ * {@link ForkJoinPool#commonPool()}.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, then the returned CompletableFuture may also do so,
+ * with a CompletionException holding one of these exceptions as its
+ * cause. No guarantees are made about which result or exception is
+ * used in the returned CompletableFuture. If the supplied action
+ * throws an exception, then the returned CompletableFuture completes
+ * exceptionally with a CompletionException holding the exception as
+ * its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param action the action to perform before completing the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> runAfterEitherAsync
+ (CompletableFuture<?> other,
+ Runnable action) {
+ return doRunAfterEither(other, action, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a new CompletableFuture that is asynchronously completed
+ * when either this or the other given CompletableFuture completes,
+ * after performing the given action from a task running in the
+ * given executor.
+ *
+ * <p>If this and/or the other CompletableFuture complete
+ * exceptionally, then the returned CompletableFuture may also do so,
+ * with a CompletionException holding one of these exceptions as its
+ * cause. No guarantees are made about which result or exception is
+ * used in the returned CompletableFuture. If the supplied action
+ * throws an exception, then the returned CompletableFuture completes
+ * exceptionally with a CompletionException holding the exception as
+ * its cause.
+ *
+ * @param other the other CompletableFuture
+ * @param action the action to perform before completing the
+ * returned CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<Void> runAfterEitherAsync
+ (CompletableFuture<?> other,
+ Runnable action,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doRunAfterEither(other, action, executor);
+ }
+
+ private CompletableFuture<Void> doRunAfterEither
+ (CompletableFuture<?> other,
+ Runnable action,
+ Executor e) {
+ if (other == null || action == null) throw new NullPointerException();
+ CompletableFuture<Void> dst = new CompletableFuture<Void>();
+ RunAfterEither d = null;
+ Object r;
+ if ((r = result) == null && (r = other.result) == null) {
+ d = new RunAfterEither(this, other, action, dst, e);
+ CompletionNode q = null, p = new CompletionNode(d);
+ while ((r = result) == null && (r = other.result) == null) {
+ if (q != null) {
+ if (UNSAFE.compareAndSwapObject
+ (other, COMPLETIONS, q.next = other.completions, q))
+ break;
+ }
+ else if (UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p))
+ q = new CompletionNode(d);
+ }
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ Throwable ex;
+ if (r instanceof AltResult)
+ ex = ((AltResult)r).ex;
+ else
+ ex = null;
+ if (ex == null) {
+ try {
+ if (e != null)
+ e.execute(new AsyncRun(action, dst));
+ else
+ action.run();
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ if (e == null || ex != null)
+ dst.internalComplete(null, ex);
+ }
+ helpPostComplete();
+ other.helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a CompletableFuture that upon completion, has the same
+ * value as produced by the given function of the result of this
+ * CompletableFuture.
+ *
+ * <p>If this CompletableFuture completes exceptionally, then the
+ * returned CompletableFuture also does so, with a
+ * CompletionException holding this exception as its cause.
+ * Similarly, if the computed CompletableFuture completes
+ * exceptionally, then so does the returned CompletableFuture.
+ *
+ * @param fn the function returning a new CompletableFuture
+ * @return the CompletableFuture
+ */
+ public <U> CompletableFuture<U> thenCompose
+ (Fun<? super T, CompletableFuture<U>> fn) {
+ return doThenCompose(fn, null);
+ }
+
+ /**
+ * Returns a CompletableFuture that upon completion, has the same
+ * value as that produced asynchronously using the {@link
+ * ForkJoinPool#commonPool()} by the given function of the result
+ * of this CompletableFuture.
+ *
+ * <p>If this CompletableFuture completes exceptionally, then the
+ * returned CompletableFuture also does so, with a
+ * CompletionException holding this exception as its cause.
+ * Similarly, if the computed CompletableFuture completes
+ * exceptionally, then so does the returned CompletableFuture.
+ *
+ * @param fn the function returning a new CompletableFuture
+ * @return the CompletableFuture
+ */
+ public <U> CompletableFuture<U> thenComposeAsync
+ (Fun<? super T, CompletableFuture<U>> fn) {
+ return doThenCompose(fn, ForkJoinPool.commonPool());
+ }
+
+ /**
+ * Returns a CompletableFuture that upon completion, has the same
+ * value as that produced asynchronously using the given executor
+ * by the given function of this CompletableFuture.
+ *
+ * <p>If this CompletableFuture completes exceptionally, then the
+ * returned CompletableFuture also does so, with a
+ * CompletionException holding this exception as its cause.
+ * Similarly, if the computed CompletableFuture completes
+ * exceptionally, then so does the returned CompletableFuture.
+ *
+ * @param fn the function returning a new CompletableFuture
+ * @param executor the executor to use for asynchronous execution
+ * @return the CompletableFuture
+ */
+ public <U> CompletableFuture<U> thenComposeAsync
+ (Fun<? super T, CompletableFuture<U>> fn,
+ Executor executor) {
+ if (executor == null) throw new NullPointerException();
+ return doThenCompose(fn, executor);
+ }
+
+ private <U> CompletableFuture<U> doThenCompose
+ (Fun<? super T, CompletableFuture<U>> fn,
+ Executor e) {
+ if (fn == null) throw new NullPointerException();
+ CompletableFuture<U> dst = null;
+ ThenCompose<T,U> d = null;
+ Object r;
+ if ((r = result) == null) {
+ dst = new CompletableFuture<U>();
+ CompletionNode p = new CompletionNode
+ (d = new ThenCompose<T,U>(this, fn, dst, e));
+ while ((r = result) == null) {
+ if (UNSAFE.compareAndSwapObject
+ (this, COMPLETIONS, p.next = completions, p))
+ break;
+ }
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ T t; Throwable ex;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ if (ex == null) {
+ if (e != null) {
+ if (dst == null)
+ dst = new CompletableFuture<U>();
+ e.execute(new AsyncCompose<T,U>(t, fn, dst));
+ }
+ else {
+ try {
+ if ((dst = fn.apply(t)) == null)
+ ex = new NullPointerException();
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ }
+ }
+ if (dst == null)
+ dst = new CompletableFuture<U>();
+ if (ex != null)
+ dst.internalComplete(null, ex);
+ }
+ helpPostComplete();
+ dst.helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed when this
+ * CompletableFuture completes, with the result of the given
+ * function of the exception triggering this CompletableFuture's
+ * completion when it completes exceptionally; otherwise, if this
+ * CompletableFuture completes normally, then the returned
+ * CompletableFuture also completes normally with the same value.
+ *
+ * @param fn the function to use to compute the value of the
+ * returned CompletableFuture if this CompletableFuture completed
+ * exceptionally
+ * @return the new CompletableFuture
+ */
+ public CompletableFuture<T> exceptionally
+ (Fun<Throwable, ? extends T> fn) {
+ if (fn == null) throw new NullPointerException();
+ CompletableFuture<T> dst = new CompletableFuture<T>();
+ ExceptionCompletion<T> d = null;
+ Object r;
+ if ((r = result) == null) {
+ CompletionNode p =
+ new CompletionNode(d = new ExceptionCompletion<T>(this, fn, dst));
+ while ((r = result) == null) {
+ if (UNSAFE.compareAndSwapObject(this, COMPLETIONS,
+ p.next = completions, p))
+ break;
+ }
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ T t = null; Throwable ex, dx = null;
+ if (r instanceof AltResult) {
+ if ((ex = ((AltResult)r).ex) != null) {
+ try {
+ t = fn.apply(ex);
+ } catch (Throwable rex) {
+ dx = rex;
+ }
+ }
+ }
+ else {
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ dst.internalComplete(t, dx);
+ }
+ helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed when this
+ * CompletableFuture completes, with the result of the given
+ * function of the result and exception of this CompletableFuture's
+ * completion. The given function is invoked with the result (or
+ * {@code null} if none) and the exception (or {@code null} if none)
+ * of this CompletableFuture when complete.
+ *
+ * @param fn the function to use to compute the value of the
+ * returned CompletableFuture
+ * @return the new CompletableFuture
+ */
+ public <U> CompletableFuture<U> handle
+ (BiFun<? super T, Throwable, ? extends U> fn) {
+ if (fn == null) throw new NullPointerException();
+ CompletableFuture<U> dst = new CompletableFuture<U>();
+ HandleCompletion<T,U> d = null;
+ Object r;
+ if ((r = result) == null) {
+ CompletionNode p =
+ new CompletionNode(d = new HandleCompletion<T,U>(this, fn, dst));
+ while ((r = result) == null) {
+ if (UNSAFE.compareAndSwapObject(this, COMPLETIONS,
+ p.next = completions, p))
+ break;
+ }
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ T t; Throwable ex;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ @SuppressWarnings("unchecked") T tr = (T) r;
+ t = tr;
+ }
+ U u; Throwable dx;
+ try {
+ u = fn.apply(t, ex);
+ dx = null;
+ } catch (Throwable rex) {
+ dx = rex;
+ u = null;
+ }
+ dst.internalComplete(u, dx);
+ }
+ helpPostComplete();
+ return dst;
+ }
+
+
+ /* ------------- Arbitrary-arity constructions -------------- */
+
+ /*
+ * The basic plan of attack is to recursively form binary
+ * completion trees of elements. This can be overkill for small
+ * sets, but scales nicely. The And/All vs Or/Any forms use the
+ * same idea, but details differ.
+ */
+
+ /**
+ * Returns a new CompletableFuture that is completed when all of
+ * the given CompletableFutures complete. If any of the given
+ * CompletableFutures complete exceptionally, then the returned
+ * CompletableFuture also does so, with a CompletionException
+ * holding this exception as its cause. Otherwise, the results,
+ * if any, of the given CompletableFutures are not reflected in
+ * the returned CompletableFuture, but may be obtained by
+ * inspecting them individually. If no CompletableFutures are
+ * provided, returns a CompletableFuture completed with the value
+ * {@code null}.
+ *
+ * <p>Among the applications of this method is to await completion
+ * of a set of independent CompletableFutures before continuing a
+ * program, as in: {@code CompletableFuture.allOf(c1, c2,
+ * c3).join();}.
+ *
+ * @param cfs the CompletableFutures
+ * @return a new CompletableFuture that is completed when all of the
+ * given CompletableFutures complete
+ * @throws NullPointerException if the array or any of its elements are
+ * {@code null}
+ */
+ public static CompletableFuture<Void> allOf(CompletableFuture<?>... cfs) {
+ int len = cfs.length; // Directly handle empty and singleton cases
+ if (len > 1)
+ return allTree(cfs, 0, len - 1);
+ else {
+ CompletableFuture<Void> dst = new CompletableFuture<Void>();
+ CompletableFuture<?> f;
+ if (len == 0)
+ dst.result = NIL;
+ else if ((f = cfs[0]) == null)
+ throw new NullPointerException();
+ else {
+ ThenPropagate d = null;
+ CompletionNode p = null;
+ Object r;
+ while ((r = f.result) == null) {
+ if (d == null)
+ d = new ThenPropagate(f, dst);
+ else if (p == null)
+ p = new CompletionNode(d);
+ else if (UNSAFE.compareAndSwapObject
+ (f, COMPLETIONS, p.next = f.completions, p))
+ break;
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1)))
+ dst.internalComplete(null, (r instanceof AltResult) ?
+ ((AltResult)r).ex : null);
+ f.helpPostComplete();
+ }
+ return dst;
+ }
+ }
+
+ /**
+ * Recursively constructs an And'ed tree of CompletableFutures.
+ * Called only when array known to have at least two elements.
+ */
+ private static CompletableFuture<Void> allTree(CompletableFuture<?>[] cfs,
+ int lo, int hi) {
+ CompletableFuture<?> fst, snd;
+ int mid = (lo + hi) >>> 1;
+ if ((fst = (lo == mid ? cfs[lo] : allTree(cfs, lo, mid))) == null ||
+ (snd = (hi == mid+1 ? cfs[hi] : allTree(cfs, mid+1, hi))) == null)
+ throw new NullPointerException();
+ CompletableFuture<Void> dst = new CompletableFuture<Void>();
+ AndCompletion d = null;
+ CompletionNode p = null, q = null;
+ Object r = null, s = null;
+ while ((r = fst.result) == null || (s = snd.result) == null) {
+ if (d == null)
+ d = new AndCompletion(fst, snd, dst);
+ else if (p == null)
+ p = new CompletionNode(d);
+ else if (q == null) {
+ if (UNSAFE.compareAndSwapObject
+ (fst, COMPLETIONS, p.next = fst.completions, p))
+ q = new CompletionNode(d);
+ }
+ else if (UNSAFE.compareAndSwapObject
+ (snd, COMPLETIONS, q.next = snd.completions, q))
+ break;
+ }
+ if ((r != null || (r = fst.result) != null) &&
+ (s != null || (s = snd.result) != null) &&
+ (d == null || d.compareAndSet(0, 1))) {
+ Throwable ex;
+ if (r instanceof AltResult)
+ ex = ((AltResult)r).ex;
+ else
+ ex = null;
+ if (ex == null && (s instanceof AltResult))
+ ex = ((AltResult)s).ex;
+ dst.internalComplete(null, ex);
+ }
+ fst.helpPostComplete();
+ snd.helpPostComplete();
+ return dst;
+ }
+
+ /**
+ * Returns a new CompletableFuture that is completed when any of
+ * the given CompletableFutures complete, with the same result.
+ * Otherwise, if it completed exceptionally, the returned
+ * CompletableFuture also does so, with a CompletionException
+ * holding this exception as its cause. If no CompletableFutures
+ * are provided, returns an incomplete CompletableFuture.
+ *
+ * @param cfs the CompletableFutures
+ * @return a new CompletableFuture that is completed with the
+ * result or exception of any of the given CompletableFutures when
+ * one completes
+ * @throws NullPointerException if the array or any of its elements are
+ * {@code null}
+ */
+ public static CompletableFuture<Object> anyOf(CompletableFuture<?>... cfs) {
+ int len = cfs.length; // Same idea as allOf
+ if (len > 1)
+ return anyTree(cfs, 0, len - 1);
+ else {
+ CompletableFuture<Object> dst = new CompletableFuture<Object>();
+ CompletableFuture<?> f;
+ if (len == 0)
+ ; // skip
+ else if ((f = cfs[0]) == null)
+ throw new NullPointerException();
+ else {
+ ThenCopy<Object> d = null;
+ CompletionNode p = null;
+ Object r;
+ while ((r = f.result) == null) {
+ if (d == null)
+ d = new ThenCopy<Object>(f, dst);
+ else if (p == null)
+ p = new CompletionNode(d);
+ else if (UNSAFE.compareAndSwapObject
+ (f, COMPLETIONS, p.next = f.completions, p))
+ break;
+ }
+ if (r != null && (d == null || d.compareAndSet(0, 1))) {
+ Throwable ex; Object t;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ t = r;
+ }
+ dst.internalComplete(t, ex);
+ }
+ f.helpPostComplete();
+ }
+ return dst;
+ }
+ }
+
+ /**
+ * Recursively constructs an Or'ed tree of CompletableFutures.
+ */
+ private static CompletableFuture<Object> anyTree(CompletableFuture<?>[] cfs,
+ int lo, int hi) {
+ CompletableFuture<?> fst, snd;
+ int mid = (lo + hi) >>> 1;
+ if ((fst = (lo == mid ? cfs[lo] : anyTree(cfs, lo, mid))) == null ||
+ (snd = (hi == mid+1 ? cfs[hi] : anyTree(cfs, mid+1, hi))) == null)
+ throw new NullPointerException();
+ CompletableFuture<Object> dst = new CompletableFuture<Object>();
+ OrCompletion d = null;
+ CompletionNode p = null, q = null;
+ Object r;
+ while ((r = fst.result) == null && (r = snd.result) == null) {
+ if (d == null)
+ d = new OrCompletion(fst, snd, dst);
+ else if (p == null)
+ p = new CompletionNode(d);
+ else if (q == null) {
+ if (UNSAFE.compareAndSwapObject
+ (fst, COMPLETIONS, p.next = fst.completions, p))
+ q = new CompletionNode(d);
+ }
+ else if (UNSAFE.compareAndSwapObject
+ (snd, COMPLETIONS, q.next = snd.completions, q))
+ break;
+ }
+ if ((r != null || (r = fst.result) != null ||
+ (r = snd.result) != null) &&
+ (d == null || d.compareAndSet(0, 1))) {
+ Throwable ex; Object t;
+ if (r instanceof AltResult) {
+ ex = ((AltResult)r).ex;
+ t = null;
+ }
+ else {
+ ex = null;
+ t = r;
+ }
+ dst.internalComplete(t, ex);
+ }
+ fst.helpPostComplete();
+ snd.helpPostComplete();
+ return dst;
+ }
+
+ /* ------------- Control and status methods -------------- */
+
+ /**
+ * If not already completed, completes this CompletableFuture with
+ * a {@link CancellationException}. Dependent CompletableFutures
+ * that have not already completed will also complete
+ * exceptionally, with a {@link CompletionException} caused by
+ * this {@code CancellationException}.
+ *
+ * @param mayInterruptIfRunning this value has no effect in this
+ * implementation because interrupts are not used to control
+ * processing.
+ *
+ * @return {@code true} if this task is now cancelled
+ */
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ boolean cancelled = (result == null) &&
+ UNSAFE.compareAndSwapObject
+ (this, RESULT, null, new AltResult(new CancellationException()));
+ postComplete();
+ return cancelled || isCancelled();
+ }
+
+ /**
+ * Returns {@code true} if this CompletableFuture was cancelled
+ * before it completed normally.
+ *
+ * @return {@code true} if this CompletableFuture was cancelled
+ * before it completed normally
+ */
+ public boolean isCancelled() {
+ Object r;
+ return ((r = result) instanceof AltResult) &&
+ (((AltResult)r).ex instanceof CancellationException);
+ }
+
+ /**
+ * Forcibly sets or resets the value subsequently returned by
+ * method {@link #get()} and related methods, whether or not
+ * already completed. This method is designed for use only in
+ * error recovery actions, and even in such situations may result
+ * in ongoing dependent completions using established versus
+ * overwritten outcomes.
+ *
+ * @param value the completion value
+ */
+ public void obtrudeValue(T value) {
+ result = (value == null) ? NIL : value;
+ postComplete();
+ }
+
+ /**
+ * Forcibly causes subsequent invocations of method {@link #get()}
+ * and related methods to throw the given exception, whether or
+ * not already completed. This method is designed for use only in
+ * recovery actions, and even in such situations may result in
+ * ongoing dependent completions using established versus
+ * overwritten outcomes.
+ *
+ * @param ex the exception
+ */
+ public void obtrudeException(Throwable ex) {
+ if (ex == null) throw new NullPointerException();
+ result = new AltResult(ex);
+ postComplete();
+ }
+
+ /**
+ * Returns the estimated number of CompletableFutures whose
+ * completions are awaiting completion of this CompletableFuture.
+ * This method is designed for use in monitoring system state, not
+ * for synchronization control.
+ *
+ * @return the number of dependent CompletableFutures
+ */
+ public int getNumberOfDependents() {
+ int count = 0;
+ for (CompletionNode p = completions; p != null; p = p.next)
+ ++count;
+ return count;
+ }
+
+ /**
+ * Returns a string identifying this CompletableFuture, as well as
+ * its completion state. The state, in brackets, contains the
+ * String {@code "Completed Normally"} or the String {@code
+ * "Completed Exceptionally"}, or the String {@code "Not
+ * completed"} followed by the number of CompletableFutures
+ * dependent upon its completion, if any.
+ *
+ * @return a string identifying this CompletableFuture, as well as its state
+ */
+ public String toString() {
+ Object r = result;
+ int count;
+ return super.toString() +
+ ((r == null) ?
+ (((count = getNumberOfDependents()) == 0) ?
+ "[Not completed]" :
+ "[Not completed, " + count + " dependents]") :
+ (((r instanceof AltResult) && ((AltResult)r).ex != null) ?
+ "[Completed exceptionally]" :
+ "[Completed normally]"));
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long RESULT;
+ private static final long WAITERS;
+ private static final long COMPLETIONS;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = CompletableFuture.class;
+ RESULT = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("result"));
+ WAITERS = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("waiters"));
+ COMPLETIONS = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("completions"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/CompletionException.java b/src/main/java/jsr166e/CompletionException.java
new file mode 100644
index 0000000..d25d675
--- /dev/null
+++ b/src/main/java/jsr166e/CompletionException.java
@@ -0,0 +1,61 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+
+/**
+ * Exception thrown when an error or other exception is encountered
+ * in the course of completing a result or task.
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class CompletionException extends RuntimeException {
+ private static final long serialVersionUID = 7830266012832686185L;
+
+ /**
+ * Constructs a {@code CompletionException} with no detail message.
+ * The cause is not initialized, and may subsequently be
+ * initialized by a call to {@link #initCause(Throwable) initCause}.
+ */
+ protected CompletionException() { }
+
+ /**
+ * Constructs a {@code CompletionException} with the specified detail
+ * message. The cause is not initialized, and may subsequently be
+ * initialized by a call to {@link #initCause(Throwable) initCause}.
+ *
+ * @param message the detail message
+ */
+ protected CompletionException(String message) {
+ super(message);
+ }
+
+ /**
+ * Constructs a {@code CompletionException} with the specified detail
+ * message and cause.
+ *
+ * @param message the detail message
+ * @param cause the cause (which is saved for later retrieval by the
+ * {@link #getCause()} method)
+ */
+ public CompletionException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ /**
+ * Constructs a {@code CompletionException} with the specified cause.
+ * The detail message is set to {@code (cause == null ? null :
+ * cause.toString())} (which typically contains the class and
+ * detail message of {@code cause}).
+ *
+ * @param cause the cause (which is saved for later retrieval by the
+ * {@link #getCause()} method)
+ */
+ public CompletionException(Throwable cause) {
+ super(cause);
+ }
+}
diff --git a/src/main/java/jsr166e/ConcurrentHashMapV8.java b/src/main/java/jsr166e/ConcurrentHashMapV8.java
new file mode 100644
index 0000000..05ed968
--- /dev/null
+++ b/src/main/java/jsr166e/ConcurrentHashMapV8.java
@@ -0,0 +1,6308 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+
+import jsr166e.ForkJoinPool;
+
+import java.io.ObjectStreamField;
+import java.io.Serializable;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.AbstractMap;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.ConcurrentModificationException;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.LockSupport;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * A hash table supporting full concurrency of retrievals and
+ * high expected concurrency for updates. This class obeys the
+ * same functional specification as {@link java.util.Hashtable}, and
+ * includes versions of methods corresponding to each method of
+ * {@code Hashtable}. However, even though all operations are
+ * thread-safe, retrieval operations do <em>not</em> entail locking,
+ * and there is <em>not</em> any support for locking the entire table
+ * in a way that prevents all access. This class is fully
+ * interoperable with {@code Hashtable} in programs that rely on its
+ * thread safety but not on its synchronization details.
+ *
+ * <p>Retrieval operations (including {@code get}) generally do not
+ * block, so may overlap with update operations (including {@code put}
+ * and {@code remove}). Retrievals reflect the results of the most
+ * recently <em>completed</em> update operations holding upon their
+ * onset. (More formally, an update operation for a given key bears a
+ * <em>happens-before</em> relation with any (non-null) retrieval for
+ * that key reporting the updated value.) For aggregate operations
+ * such as {@code putAll} and {@code clear}, concurrent retrievals may
+ * reflect insertion or removal of only some entries. Similarly,
+ * Iterators and Enumerations return elements reflecting the state of
+ * the hash table at some point at or since the creation of the
+ * iterator/enumeration. They do <em>not</em> throw {@link
+ * ConcurrentModificationException}. However, iterators are designed
+ * to be used by only one thread at a time. Bear in mind that the
+ * results of aggregate status methods including {@code size}, {@code
+ * isEmpty}, and {@code containsValue} are typically useful only when
+ * a map is not undergoing concurrent updates in other threads.
+ * Otherwise the results of these methods reflect transient states
+ * that may be adequate for monitoring or estimation purposes, but not
+ * for program control.
+ *
+ * <p>The table is dynamically expanded when there are too many
+ * collisions (i.e., keys that have distinct hash codes but fall into
+ * the same slot modulo the table size), with the expected average
+ * effect of maintaining roughly two bins per mapping (corresponding
+ * to a 0.75 load factor threshold for resizing). There may be much
+ * variance around this average as mappings are added and removed, but
+ * overall, this maintains a commonly accepted time/space tradeoff for
+ * hash tables. However, resizing this or any other kind of hash
+ * table may be a relatively slow operation. When possible, it is a
+ * good idea to provide a size estimate as an optional {@code
+ * initialCapacity} constructor argument. An additional optional
+ * {@code loadFactor} constructor argument provides a further means of
+ * customizing initial table capacity by specifying the table density
+ * to be used in calculating the amount of space to allocate for the
+ * given number of elements. Also, for compatibility with previous
+ * versions of this class, constructors may optionally specify an
+ * expected {@code concurrencyLevel} as an additional hint for
+ * internal sizing. Note that using many keys with exactly the same
+ * {@code hashCode()} is a sure way to slow down performance of any
+ * hash table. To ameliorate impact, when keys are {@link Comparable},
+ * this class may use comparison order among keys to help break ties.
+ *
+ * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
+ * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
+ * (using {@link #keySet(Object)} when only keys are of interest, and the
+ * mapped values are (perhaps transiently) not used or all take the
+ * same mapping value.
+ *
+ * <p>This class and its views and iterators implement all of the
+ * <em>optional</em> methods of the {@link Map} and {@link Iterator}
+ * interfaces.
+ *
+ * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
+ * does <em>not</em> allow {@code null} to be used as a key or value.
+ *
+ * <p>ConcurrentHashMapV8s support a set of sequential and parallel bulk
+ * operations that are designed
+ * to be safely, and often sensibly, applied even with maps that are
+ * being concurrently updated by other threads; for example, when
+ * computing a snapshot summary of the values in a shared registry.
+ * There are three kinds of operation, each with four forms, accepting
+ * functions with Keys, Values, Entries, and (Key, Value) arguments
+ * and/or return values. Because the elements of a ConcurrentHashMapV8
+ * are not ordered in any particular way, and may be processed in
+ * different orders in different parallel executions, the correctness
+ * of supplied functions should not depend on any ordering, or on any
+ * other objects or values that may transiently change while
+ * computation is in progress; and except for forEach actions, should
+ * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
+ * objects do not support method {@code setValue}.
+ *
+ * <ul>
+ * <li> forEach: Perform a given action on each element.
+ * A variant form applies a given transformation on each element
+ * before performing the action.</li>
+ *
+ * <li> search: Return the first available non-null result of
+ * applying a given function on each element; skipping further
+ * search when a result is found.</li>
+ *
+ * <li> reduce: Accumulate each element. The supplied reduction
+ * function cannot rely on ordering (more formally, it should be
+ * both associative and commutative). There are five variants:
+ *
+ * <ul>
+ *
+ * <li> Plain reductions. (There is not a form of this method for
+ * (key, value) function arguments since there is no corresponding
+ * return type.)</li>
+ *
+ * <li> Mapped reductions that accumulate the results of a given
+ * function applied to each element.</li>
+ *
+ * <li> Reductions to scalar doubles, longs, and ints, using a
+ * given basis value.</li>
+ *
+ * </ul>
+ * </li>
+ * </ul>
+ *
+ * <p>These bulk operations accept a {@code parallelismThreshold}
+ * argument. Methods proceed sequentially if the current map size is
+ * estimated to be less than the given threshold. Using a value of
+ * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
+ * of {@code 1} results in maximal parallelism by partitioning into
+ * enough subtasks to fully utilize the {@link
+ * ForkJoinPool#commonPool()} that is used for all parallel
+ * computations. Normally, you would initially choose one of these
+ * extreme values, and then measure performance of using in-between
+ * values that trade off overhead versus throughput.
+ *
+ * <p>The concurrency properties of bulk operations follow
+ * from those of ConcurrentHashMapV8: Any non-null result returned
+ * from {@code get(key)} and related access methods bears a
+ * happens-before relation with the associated insertion or
+ * update. The result of any bulk operation reflects the
+ * composition of these per-element relations (but is not
+ * necessarily atomic with respect to the map as a whole unless it
+ * is somehow known to be quiescent). Conversely, because keys
+ * and values in the map are never null, null serves as a reliable
+ * atomic indicator of the current lack of any result. To
+ * maintain this property, null serves as an implicit basis for
+ * all non-scalar reduction operations. For the double, long, and
+ * int versions, the basis should be one that, when combined with
+ * any other value, returns that other value (more formally, it
+ * should be the identity element for the reduction). Most common
+ * reductions have these properties; for example, computing a sum
+ * with basis 0 or a minimum with basis MAX_VALUE.
+ *
+ * <p>Search and transformation functions provided as arguments
+ * should similarly return null to indicate the lack of any result
+ * (in which case it is not used). In the case of mapped
+ * reductions, this also enables transformations to serve as
+ * filters, returning null (or, in the case of primitive
+ * specializations, the identity basis) if the element should not
+ * be combined. You can create compound transformations and
+ * filterings by composing them yourself under this "null means
+ * there is nothing there now" rule before using them in search or
+ * reduce operations.
+ *
+ * <p>Methods accepting and/or returning Entry arguments maintain
+ * key-value associations. They may be useful for example when
+ * finding the key for the greatest value. Note that "plain" Entry
+ * arguments can be supplied using {@code new
+ * AbstractMap.SimpleEntry(k,v)}.
+ *
+ * <p>Bulk operations may complete abruptly, throwing an
+ * exception encountered in the application of a supplied
+ * function. Bear in mind when handling such exceptions that other
+ * concurrently executing functions could also have thrown
+ * exceptions, or would have done so if the first exception had
+ * not occurred.
+ *
+ * <p>Speedups for parallel compared to sequential forms are common
+ * but not guaranteed. Parallel operations involving brief functions
+ * on small maps may execute more slowly than sequential forms if the
+ * underlying work to parallelize the computation is more expensive
+ * than the computation itself. Similarly, parallelization may not
+ * lead to much actual parallelism if all processors are busy
+ * performing unrelated tasks.
+ *
+ * <p>All arguments to all task methods must be non-null.
+ *
+ * <p><em>jsr166e note: During transition, this class
+ * uses nested functional interfaces with different names but the
+ * same forms as those expected for JDK8.</em>
+ *
+ * <p>This class is a member of the
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
+ * Java Collections Framework</a>.
+ *
+ * @since 1.5
+ * @author Doug Lea
+ * @param <K> the type of keys maintained by this map
+ * @param <V> the type of mapped values
+ */
+public class ConcurrentHashMapV8<K,V> extends AbstractMap<K,V>
+ implements ConcurrentMap<K,V>, Serializable {
+ private static final long serialVersionUID = 7249069246763182397L;
+
+ /**
+ * An object for traversing and partitioning elements of a source.
+ * This interface provides a subset of the functionality of JDK8
+ * java.util.Spliterator.
+ */
+ public static interface ConcurrentHashMapSpliterator<T> {
+ /**
+ * If possible, returns a new spliterator covering
+ * approximately one half of the elements, which will not be
+ * covered by this spliterator. Returns null if cannot be
+ * split.
+ */
+ ConcurrentHashMapSpliterator<T> trySplit();
+ /**
+ * Returns an estimate of the number of elements covered by
+ * this Spliterator.
+ */
+ long estimateSize();
+
+ /** Applies the action to each untraversed element */
+ void forEachRemaining(Action<? super T> action);
+ /** If an element remains, applies the action and returns true. */
+ boolean tryAdvance(Action<? super T> action);
+ }
+
+ // Sams
+ /** Interface describing a void action of one argument */
+ public interface Action<A> { void apply(A a); }
+ /** Interface describing a void action of two arguments */
+ public interface BiAction<A,B> { void apply(A a, B b); }
+ /** Interface describing a function of one argument */
+ public interface Fun<A,T> { T apply(A a); }
+ /** Interface describing a function of two arguments */
+ public interface BiFun<A,B,T> { T apply(A a, B b); }
+ /** Interface describing a function mapping its argument to a double */
+ public interface ObjectToDouble<A> { double apply(A a); }
+ /** Interface describing a function mapping its argument to a long */
+ public interface ObjectToLong<A> { long apply(A a); }
+ /** Interface describing a function mapping its argument to an int */
+ public interface ObjectToInt<A> {int apply(A a); }
+ /** Interface describing a function mapping two arguments to a double */
+ public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
+ /** Interface describing a function mapping two arguments to a long */
+ public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
+ /** Interface describing a function mapping two arguments to an int */
+ public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
+ /** Interface describing a function mapping two doubles to a double */
+ public interface DoubleByDoubleToDouble { double apply(double a, double b); }
+ /** Interface describing a function mapping two longs to a long */
+ public interface LongByLongToLong { long apply(long a, long b); }
+ /** Interface describing a function mapping two ints to an int */
+ public interface IntByIntToInt { int apply(int a, int b); }
+
+
+ /*
+ * Overview:
+ *
+ * The primary design goal of this hash table is to maintain
+ * concurrent readability (typically method get(), but also
+ * iterators and related methods) while minimizing update
+ * contention. Secondary goals are to keep space consumption about
+ * the same or better than java.util.HashMap, and to support high
+ * initial insertion rates on an empty table by many threads.
+ *
+ * This map usually acts as a binned (bucketed) hash table. Each
+ * key-value mapping is held in a Node. Most nodes are instances
+ * of the basic Node class with hash, key, value, and next
+ * fields. However, various subclasses exist: TreeNodes are
+ * arranged in balanced trees, not lists. TreeBins hold the roots
+ * of sets of TreeNodes. ForwardingNodes are placed at the heads
+ * of bins during resizing. ReservationNodes are used as
+ * placeholders while establishing values in computeIfAbsent and
+ * related methods. The types TreeBin, ForwardingNode, and
+ * ReservationNode do not hold normal user keys, values, or
+ * hashes, and are readily distinguishable during search etc
+ * because they have negative hash fields and null key and value
+ * fields. (These special nodes are either uncommon or transient,
+ * so the impact of carrying around some unused fields is
+ * insignificant.)
+ *
+ * The table is lazily initialized to a power-of-two size upon the
+ * first insertion. Each bin in the table normally contains a
+ * list of Nodes (most often, the list has only zero or one Node).
+ * Table accesses require volatile/atomic reads, writes, and
+ * CASes. Because there is no other way to arrange this without
+ * adding further indirections, we use intrinsics
+ * (sun.misc.Unsafe) operations.
+ *
+ * We use the top (sign) bit of Node hash fields for control
+ * purposes -- it is available anyway because of addressing
+ * constraints. Nodes with negative hash fields are specially
+ * handled or ignored in map methods.
+ *
+ * Insertion (via put or its variants) of the first node in an
+ * empty bin is performed by just CASing it to the bin. This is
+ * by far the most common case for put operations under most
+ * key/hash distributions. Other update operations (insert,
+ * delete, and replace) require locks. We do not want to waste
+ * the space required to associate a distinct lock object with
+ * each bin, so instead use the first node of a bin list itself as
+ * a lock. Locking support for these locks relies on builtin
+ * "synchronized" monitors.
+ *
+ * Using the first node of a list as a lock does not by itself
+ * suffice though: When a node is locked, any update must first
+ * validate that it is still the first node after locking it, and
+ * retry if not. Because new nodes are always appended to lists,
+ * once a node is first in a bin, it remains first until deleted
+ * or the bin becomes invalidated (upon resizing).
+ *
+ * The main disadvantage of per-bin locks is that other update
+ * operations on other nodes in a bin list protected by the same
+ * lock can stall, for example when user equals() or mapping
+ * functions take a long time. However, statistically, under
+ * random hash codes, this is not a common problem. Ideally, the
+ * frequency of nodes in bins follows a Poisson distribution
+ * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
+ * parameter of about 0.5 on average, given the resizing threshold
+ * of 0.75, although with a large variance because of resizing
+ * granularity. Ignoring variance, the expected occurrences of
+ * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
+ * first values are:
+ *
+ * 0: 0.60653066
+ * 1: 0.30326533
+ * 2: 0.07581633
+ * 3: 0.01263606
+ * 4: 0.00157952
+ * 5: 0.00015795
+ * 6: 0.00001316
+ * 7: 0.00000094
+ * 8: 0.00000006
+ * more: less than 1 in ten million
+ *
+ * Lock contention probability for two threads accessing distinct
+ * elements is roughly 1 / (8 * #elements) under random hashes.
+ *
+ * Actual hash code distributions encountered in practice
+ * sometimes deviate significantly from uniform randomness. This
+ * includes the case when N > (1<<30), so some keys MUST collide.
+ * Similarly for dumb or hostile usages in which multiple keys are
+ * designed to have identical hash codes or ones that differs only
+ * in masked-out high bits. So we use a secondary strategy that
+ * applies when the number of nodes in a bin exceeds a
+ * threshold. These TreeBins use a balanced tree to hold nodes (a
+ * specialized form of red-black trees), bounding search time to
+ * O(log N). Each search step in a TreeBin is at least twice as
+ * slow as in a regular list, but given that N cannot exceed
+ * (1<<64) (before running out of addresses) this bounds search
+ * steps, lock hold times, etc, to reasonable constants (roughly
+ * 100 nodes inspected per operation worst case) so long as keys
+ * are Comparable (which is very common -- String, Long, etc).
+ * TreeBin nodes (TreeNodes) also maintain the same "next"
+ * traversal pointers as regular nodes, so can be traversed in
+ * iterators in the same way.
+ *
+ * The table is resized when occupancy exceeds a percentage
+ * threshold (nominally, 0.75, but see below). Any thread
+ * noticing an overfull bin may assist in resizing after the
+ * initiating thread allocates and sets up the replacement array.
+ * However, rather than stalling, these other threads may proceed
+ * with insertions etc. The use of TreeBins shields us from the
+ * worst case effects of overfilling while resizes are in
+ * progress. Resizing proceeds by transferring bins, one by one,
+ * from the table to the next table. However, threads claim small
+ * blocks of indices to transfer (via field transferIndex) before
+ * doing so, reducing contention. A generation stamp in field
+ * sizeCtl ensures that resizings do not overlap. Because we are
+ * using power-of-two expansion, the elements from each bin must
+ * either stay at same index, or move with a power of two
+ * offset. We eliminate unnecessary node creation by catching
+ * cases where old nodes can be reused because their next fields
+ * won't change. On average, only about one-sixth of them need
+ * cloning when a table doubles. The nodes they replace will be
+ * garbage collectable as soon as they are no longer referenced by
+ * any reader thread that may be in the midst of concurrently
+ * traversing table. Upon transfer, the old table bin contains
+ * only a special forwarding node (with hash field "MOVED") that
+ * contains the next table as its key. On encountering a
+ * forwarding node, access and update operations restart, using
+ * the new table.
+ *
+ * Each bin transfer requires its bin lock, which can stall
+ * waiting for locks while resizing. However, because other
+ * threads can join in and help resize rather than contend for
+ * locks, average aggregate waits become shorter as resizing
+ * progresses. The transfer operation must also ensure that all
+ * accessible bins in both the old and new table are usable by any
+ * traversal. This is arranged in part by proceeding from the
+ * last bin (table.length - 1) up towards the first. Upon seeing
+ * a forwarding node, traversals (see class Traverser) arrange to
+ * move to the new table without revisiting nodes. To ensure that
+ * no intervening nodes are skipped even when moved out of order,
+ * a stack (see class TableStack) is created on first encounter of
+ * a forwarding node during a traversal, to maintain its place if
+ * later processing the current table. The need for these
+ * save/restore mechanics is relatively rare, but when one
+ * forwarding node is encountered, typically many more will be.
+ * So Traversers use a simple caching scheme to avoid creating so
+ * many new TableStack nodes. (Thanks to Peter Levart for
+ * suggesting use of a stack here.)
+ *
+ * The traversal scheme also applies to partial traversals of
+ * ranges of bins (via an alternate Traverser constructor)
+ * to support partitioned aggregate operations. Also, read-only
+ * operations give up if ever forwarded to a null table, which
+ * provides support for shutdown-style clearing, which is also not
+ * currently implemented.
+ *
+ * Lazy table initialization minimizes footprint until first use,
+ * and also avoids resizings when the first operation is from a
+ * putAll, constructor with map argument, or deserialization.
+ * These cases attempt to override the initial capacity settings,
+ * but harmlessly fail to take effect in cases of races.
+ *
+ * The element count is maintained using a specialization of
+ * LongAdder. We need to incorporate a specialization rather than
+ * just use a LongAdder in order to access implicit
+ * contention-sensing that leads to creation of multiple
+ * CounterCells. The counter mechanics avoid contention on
+ * updates but can encounter cache thrashing if read too
+ * frequently during concurrent access. To avoid reading so often,
+ * resizing under contention is attempted only upon adding to a
+ * bin already holding two or more nodes. Under uniform hash
+ * distributions, the probability of this occurring at threshold
+ * is around 13%, meaning that only about 1 in 8 puts check
+ * threshold (and after resizing, many fewer do so).
+ *
+ * TreeBins use a special form of comparison for search and
+ * related operations (which is the main reason we cannot use
+ * existing collections such as TreeMaps). TreeBins contain
+ * Comparable elements, but may contain others, as well as
+ * elements that are Comparable but not necessarily Comparable for
+ * the same T, so we cannot invoke compareTo among them. To handle
+ * this, the tree is ordered primarily by hash value, then by
+ * Comparable.compareTo order if applicable. On lookup at a node,
+ * if elements are not comparable or compare as 0 then both left
+ * and right children may need to be searched in the case of tied
+ * hash values. (This corresponds to the full list search that
+ * would be necessary if all elements were non-Comparable and had
+ * tied hashes.) On insertion, to keep a total ordering (or as
+ * close as is required here) across rebalancings, we compare
+ * classes and identityHashCodes as tie-breakers. The red-black
+ * balancing code is updated from pre-jdk-collections
+ * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
+ * based in turn on Cormen, Leiserson, and Rivest "Introduction to
+ * Algorithms" (CLR).
+ *
+ * TreeBins also require an additional locking mechanism. While
+ * list traversal is always possible by readers even during
+ * updates, tree traversal is not, mainly because of tree-rotations
+ * that may change the root node and/or its linkages. TreeBins
+ * include a simple read-write lock mechanism parasitic on the
+ * main bin-synchronization strategy: Structural adjustments
+ * associated with an insertion or removal are already bin-locked
+ * (and so cannot conflict with other writers) but must wait for
+ * ongoing readers to finish. Since there can be only one such
+ * waiter, we use a simple scheme using a single "waiter" field to
+ * block writers. However, readers need never block. If the root
+ * lock is held, they proceed along the slow traversal path (via
+ * next-pointers) until the lock becomes available or the list is
+ * exhausted, whichever comes first. These cases are not fast, but
+ * maximize aggregate expected throughput.
+ *
+ * Maintaining API and serialization compatibility with previous
+ * versions of this class introduces several oddities. Mainly: We
+ * leave untouched but unused constructor arguments refering to
+ * concurrencyLevel. We accept a loadFactor constructor argument,
+ * but apply it only to initial table capacity (which is the only
+ * time that we can guarantee to honor it.) We also declare an
+ * unused "Segment" class that is instantiated in minimal form
+ * only when serializing.
+ *
+ * Also, solely for compatibility with previous versions of this
+ * class, it extends AbstractMap, even though all of its methods
+ * are overridden, so it is just useless baggage.
+ *
+ * This file is organized to make things a little easier to follow
+ * while reading than they might otherwise: First the main static
+ * declarations and utilities, then fields, then main public
+ * methods (with a few factorings of multiple public methods into
+ * internal ones), then sizing methods, trees, traversers, and
+ * bulk operations.
+ */
+
+ /* ---------------- Constants -------------- */
+
+ /**
+ * The largest possible table capacity. This value must be
+ * exactly 1<<30 to stay within Java array allocation and indexing
+ * bounds for power of two table sizes, and is further required
+ * because the top two bits of 32bit hash fields are used for
+ * control purposes.
+ */
+ private static final int MAXIMUM_CAPACITY = 1 << 30;
+
+ /**
+ * The default initial table capacity. Must be a power of 2
+ * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
+ */
+ private static final int DEFAULT_CAPACITY = 16;
+
+ /**
+ * The largest possible (non-power of two) array size.
+ * Needed by toArray and related methods.
+ */
+ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
+
+ /**
+ * The default concurrency level for this table. Unused but
+ * defined for compatibility with previous versions of this class.
+ */
+ private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+
+ /**
+ * The load factor for this table. Overrides of this value in
+ * constructors affect only the initial table capacity. The
+ * actual floating point value isn't normally used -- it is
+ * simpler to use expressions such as {@code n - (n >>> 2)} for
+ * the associated resizing threshold.
+ */
+ private static final float LOAD_FACTOR = 0.75f;
+
+ /**
+ * The bin count threshold for using a tree rather than list for a
+ * bin. Bins are converted to trees when adding an element to a
+ * bin with at least this many nodes. The value must be greater
+ * than 2, and should be at least 8 to mesh with assumptions in
+ * tree removal about conversion back to plain bins upon
+ * shrinkage.
+ */
+ static final int TREEIFY_THRESHOLD = 8;
+
+ /**
+ * The bin count threshold for untreeifying a (split) bin during a
+ * resize operation. Should be less than TREEIFY_THRESHOLD, and at
+ * most 6 to mesh with shrinkage detection under removal.
+ */
+ static final int UNTREEIFY_THRESHOLD = 6;
+
+ /**
+ * The smallest table capacity for which bins may be treeified.
+ * (Otherwise the table is resized if too many nodes in a bin.)
+ * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
+ * conflicts between resizing and treeification thresholds.
+ */
+ static final int MIN_TREEIFY_CAPACITY = 64;
+
+ /**
+ * Minimum number of rebinnings per transfer step. Ranges are
+ * subdivided to allow multiple resizer threads. This value
+ * serves as a lower bound to avoid resizers encountering
+ * excessive memory contention. The value should be at least
+ * DEFAULT_CAPACITY.
+ */
+ private static final int MIN_TRANSFER_STRIDE = 16;
+
+ /**
+ * The number of bits used for generation stamp in sizeCtl.
+ * Must be at least 6 for 32bit arrays.
+ */
+ private static int RESIZE_STAMP_BITS = 16;
+
+ /**
+ * The maximum number of threads that can help resize.
+ * Must fit in 32 - RESIZE_STAMP_BITS bits.
+ */
+ private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
+
+ /**
+ * The bit shift for recording size stamp in sizeCtl.
+ */
+ private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
+
+ /*
+ * Encodings for Node hash fields. See above for explanation.
+ */
+ static final int MOVED = -1; // hash for forwarding nodes
+ static final int TREEBIN = -2; // hash for roots of trees
+ static final int RESERVED = -3; // hash for transient reservations
+ static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
+
+ /** Number of CPUS, to place bounds on some sizings */
+ static final int NCPU = Runtime.getRuntime().availableProcessors();
+
+ /** For serialization compatibility. */
+ private static final ObjectStreamField[] serialPersistentFields = {
+ new ObjectStreamField("segments", Segment[].class),
+ new ObjectStreamField("segmentMask", Integer.TYPE),
+ new ObjectStreamField("segmentShift", Integer.TYPE)
+ };
+
+ /* ---------------- Nodes -------------- */
+
+ /**
+ * Key-value entry. This class is never exported out as a
+ * user-mutable Map.Entry (i.e., one supporting setValue; see
+ * MapEntry below), but can be used for read-only traversals used
+ * in bulk tasks. Subclasses of Node with a negative hash field
+ * are special, and contain null keys and values (but are never
+ * exported). Otherwise, keys and vals are never null.
+ */
+ static class Node<K,V> implements Map.Entry<K,V> {
+ final int hash;
+ final K key;
+ volatile V val;
+ volatile Node<K,V> next;
+
+ Node(int hash, K key, V val, Node<K,V> next) {
+ this.hash = hash;
+ this.key = key;
+ this.val = val;
+ this.next = next;
+ }
+
+ public final K getKey() { return key; }
+ public final V getValue() { return val; }
+ public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
+ public final String toString(){ return key + "=" + val; }
+ public final V setValue(V value) {
+ throw new UnsupportedOperationException();
+ }
+
+ public final boolean equals(Object o) {
+ Object k, v, u; Map.Entry<?,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ (k == key || k.equals(key)) &&
+ (v == (u = val) || v.equals(u)));
+ }
+
+ /**
+ * Virtualized support for map.get(); overridden in subclasses.
+ */
+ Node<K,V> find(int h, Object k) {
+ Node<K,V> e = this;
+ if (k != null) {
+ do {
+ K ek;
+ if (e.hash == h &&
+ ((ek = e.key) == k || (ek != null && k.equals(ek))))
+ return e;
+ } while ((e = e.next) != null);
+ }
+ return null;
+ }
+ }
+
+ /* ---------------- Static utilities -------------- */
+
+ /**
+ * Spreads (XORs) higher bits of hash to lower and also forces top
+ * bit to 0. Because the table uses power-of-two masking, sets of
+ * hashes that vary only in bits above the current mask will
+ * always collide. (Among known examples are sets of Float keys
+ * holding consecutive whole numbers in small tables.) So we
+ * apply a transform that spreads the impact of higher bits
+ * downward. There is a tradeoff between speed, utility, and
+ * quality of bit-spreading. Because many common sets of hashes
+ * are already reasonably distributed (so don't benefit from
+ * spreading), and because we use trees to handle large sets of
+ * collisions in bins, we just XOR some shifted bits in the
+ * cheapest possible way to reduce systematic lossage, as well as
+ * to incorporate impact of the highest bits that would otherwise
+ * never be used in index calculations because of table bounds.
+ */
+ static final int spread(int h) {
+ return (h ^ (h >>> 16)) & HASH_BITS;
+ }
+
+ /**
+ * Returns a power of two table size for the given desired capacity.
+ * See Hackers Delight, sec 3.2
+ */
+ private static final int tableSizeFor(int c) {
+ int n = c - 1;
+ n |= n >>> 1;
+ n |= n >>> 2;
+ n |= n >>> 4;
+ n |= n >>> 8;
+ n |= n >>> 16;
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
+ }
+
+ /**
+ * Returns x's Class if it is of the form "class C implements
+ * Comparable<C>", else null.
+ */
+ static Class<?> comparableClassFor(Object x) {
+ if (x instanceof Comparable) {
+ Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
+ if ((c = x.getClass()) == String.class) // bypass checks
+ return c;
+ if ((ts = c.getGenericInterfaces()) != null) {
+ for (int i = 0; i < ts.length; ++i) {
+ if (((t = ts[i]) instanceof ParameterizedType) &&
+ ((p = (ParameterizedType)t).getRawType() ==
+ Comparable.class) &&
+ (as = p.getActualTypeArguments()) != null &&
+ as.length == 1 && as[0] == c) // type arg is c
+ return c;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns k.compareTo(x) if x matches kc (k's screened comparable
+ * class), else 0.
+ */
+ @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
+ static int compareComparables(Class<?> kc, Object k, Object x) {
+ return (x == null || x.getClass() != kc ? 0 :
+ ((Comparable)k).compareTo(x));
+ }
+
+ /* ---------------- Table element access -------------- */
+
+ /*
+ * Volatile access methods are used for table elements as well as
+ * elements of in-progress next table while resizing. All uses of
+ * the tab arguments must be null checked by callers. All callers
+ * also paranoically precheck that tab's length is not zero (or an
+ * equivalent check), thus ensuring that any index argument taking
+ * the form of a hash value anded with (length - 1) is a valid
+ * index. Note that, to be correct wrt arbitrary concurrency
+ * errors by users, these checks must operate on local variables,
+ * which accounts for some odd-looking inline assignments below.
+ * Note that calls to setTabAt always occur within locked regions,
+ * and so in principle require only release ordering, not
+ * full volatile semantics, but are currently coded as volatile
+ * writes to be conservative.
+ */
+
+ @SuppressWarnings("unchecked")
+ static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
+ return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
+ }
+
+ static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
+ Node<K,V> c, Node<K,V> v) {
+ return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
+ }
+
+ static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
+ U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
+ }
+
+ /* ---------------- Fields -------------- */
+
+ /**
+ * The array of bins. Lazily initialized upon first insertion.
+ * Size is always a power of two. Accessed directly by iterators.
+ */
+ transient volatile Node<K,V>[] table;
+
+ /**
+ * The next table to use; non-null only while resizing.
+ */
+ private transient volatile Node<K,V>[] nextTable;
+
+ /**
+ * Base counter value, used mainly when there is no contention,
+ * but also as a fallback during table initialization
+ * races. Updated via CAS.
+ */
+ private transient volatile long baseCount;
+
+ /**
+ * Table initialization and resizing control. When negative, the
+ * table is being initialized or resized: -1 for initialization,
+ * else -(1 + the number of active resizing threads). Otherwise,
+ * when table is null, holds the initial table size to use upon
+ * creation, or 0 for default. After initialization, holds the
+ * next element count value upon which to resize the table.
+ */
+ private transient volatile int sizeCtl;
+
+ /**
+ * The next table index (plus one) to split while resizing.
+ */
+ private transient volatile int transferIndex;
+
+ /**
+ * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
+ */
+ private transient volatile int cellsBusy;
+
+ /**
+ * Table of counter cells. When non-null, size is a power of 2.
+ */
+ private transient volatile CounterCell[] counterCells;
+
+ // views
+ private transient KeySetView<K,V> keySet;
+ private transient ValuesView<K,V> values;
+ private transient EntrySetView<K,V> entrySet;
+
+
+ /* ---------------- Public operations -------------- */
+
+ /**
+ * Creates a new, empty map with the default initial table size (16).
+ */
+ public ConcurrentHashMapV8() {
+ }
+
+ /**
+ * Creates a new, empty map with an initial table size
+ * accommodating the specified number of elements without the need
+ * to dynamically resize.
+ *
+ * @param initialCapacity The implementation performs internal
+ * sizing to accommodate this many elements.
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative
+ */
+ public ConcurrentHashMapV8(int initialCapacity) {
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException();
+ int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
+ MAXIMUM_CAPACITY :
+ tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
+ this.sizeCtl = cap;
+ }
+
+ /**
+ * Creates a new map with the same mappings as the given map.
+ *
+ * @param m the map
+ */
+ public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
+ this.sizeCtl = DEFAULT_CAPACITY;
+ putAll(m);
+ }
+
+ /**
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}) and
+ * initial table density ({@code loadFactor}).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative or the load factor is nonpositive
+ *
+ * @since 1.6
+ */
+ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
+ this(initialCapacity, loadFactor, 1);
+ }
+
+ /**
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}), table
+ * density ({@code loadFactor}), and number of concurrently
+ * updating threads ({@code concurrencyLevel}).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
+ * @param concurrencyLevel the estimated number of concurrently
+ * updating threads. The implementation may use this value as
+ * a sizing hint.
+ * @throws IllegalArgumentException if the initial capacity is
+ * negative or the load factor or concurrencyLevel are
+ * nonpositive
+ */
+ public ConcurrentHashMapV8(int initialCapacity,
+ float loadFactor, int concurrencyLevel) {
+ if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
+ throw new IllegalArgumentException();
+ if (initialCapacity < concurrencyLevel) // Use at least as many bins
+ initialCapacity = concurrencyLevel; // as estimated threads
+ long size = (long)(1.0 + (long)initialCapacity / loadFactor);
+ int cap = (size >= (long)MAXIMUM_CAPACITY) ?
+ MAXIMUM_CAPACITY : tableSizeFor((int)size);
+ this.sizeCtl = cap;
+ }
+
+ // Original (since JDK1.2) Map methods
+
+ /**
+ * {@inheritDoc}
+ */
+ public int size() {
+ long n = sumCount();
+ return ((n < 0L) ? 0 :
+ (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
+ (int)n);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public boolean isEmpty() {
+ return sumCount() <= 0L; // ignore transient negative values
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ *
+ * <p>More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code key.equals(k)},
+ * then this method returns {@code v}; otherwise it returns
+ * {@code null}. (There can be at most one such mapping.)
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ public V get(Object key) {
+ Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
+ int h = spread(key.hashCode());
+ if ((tab = table) != null && (n = tab.length) > 0 &&
+ (e = tabAt(tab, (n - 1) & h)) != null) {
+ if ((eh = e.hash) == h) {
+ if ((ek = e.key) == key || (ek != null && key.equals(ek)))
+ return e.val;
+ }
+ else if (eh < 0)
+ return (p = e.find(h, key)) != null ? p.val : null;
+ while ((e = e.next) != null) {
+ if (e.hash == h &&
+ ((ek = e.key) == key || (ek != null && key.equals(ek))))
+ return e.val;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Tests if the specified object is a key in this table.
+ *
+ * @param key possible key
+ * @return {@code true} if and only if the specified object
+ * is a key in this table, as determined by the
+ * {@code equals} method; {@code false} otherwise
+ * @throws NullPointerException if the specified key is null
+ */
+ public boolean containsKey(Object key) {
+ return get(key) != null;
+ }
+
+ /**
+ * Returns {@code true} if this map maps one or more keys to the
+ * specified value. Note: This method may require a full traversal
+ * of the map, and is much slower than method {@code containsKey}.
+ *
+ * @param value value whose presence in this map is to be tested
+ * @return {@code true} if this map maps one or more keys to the
+ * specified value
+ * @throws NullPointerException if the specified value is null
+ */
+ public boolean containsValue(Object value) {
+ if (value == null)
+ throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ V v;
+ if ((v = p.val) == value || (v != null && value.equals(v)))
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Maps the specified key to the specified value in this table.
+ * Neither the key nor the value can be null.
+ *
+ * <p>The value can be retrieved by calling the {@code get} method
+ * with a key that is equal to the original key.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}
+ * @throws NullPointerException if the specified key or value is null
+ */
+ public V put(K key, V value) {
+ return putVal(key, value, false);
+ }
+
+ /** Implementation for put and putIfAbsent */
+ final V putVal(K key, V value, boolean onlyIfAbsent) {
+ if (key == null || value == null) throw new NullPointerException();
+ int hash = spread(key.hashCode());
+ int binCount = 0;
+ for (Node<K,V>[] tab = table;;) {
+ Node<K,V> f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
+ if (casTabAt(tab, i, null,
+ new Node<K,V>(hash, key, value, null)))
+ break; // no lock when adding to empty bin
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ V oldVal = null;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node<K,V> e = f;; ++binCount) {
+ K ek;
+ if (e.hash == hash &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ oldVal = e.val;
+ if (!onlyIfAbsent)
+ e.val = value;
+ break;
+ }
+ Node<K,V> pred = e;
+ if ((e = e.next) == null) {
+ pred.next = new Node<K,V>(hash, key,
+ value, null);
+ break;
+ }
+ }
+ }
+ else if (f instanceof TreeBin) {
+ Node<K,V> p;
+ binCount = 2;
+ if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
+ value)) != null) {
+ oldVal = p.val;
+ if (!onlyIfAbsent)
+ p.val = value;
+ }
+ }
+ }
+ }
+ if (binCount != 0) {
+ if (binCount >= TREEIFY_THRESHOLD)
+ treeifyBin(tab, i);
+ if (oldVal != null)
+ return oldVal;
+ break;
+ }
+ }
+ }
+ addCount(1L, binCount);
+ return null;
+ }
+
+ /**
+ * Copies all of the mappings from the specified map to this one.
+ * These mappings replace any mappings that this map had for any of the
+ * keys currently in the specified map.
+ *
+ * @param m mappings to be stored in this map
+ */
+ public void putAll(Map<? extends K, ? extends V> m) {
+ tryPresize(m.size());
+ for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
+ putVal(e.getKey(), e.getValue(), false);
+ }
+
+ /**
+ * Removes the key (and its corresponding value) from this map.
+ * This method does nothing if the key is not in the map.
+ *
+ * @param key the key that needs to be removed
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}
+ * @throws NullPointerException if the specified key is null
+ */
+ public V remove(Object key) {
+ return replaceNode(key, null, null);
+ }
+
+ /**
+ * Implementation for the four public remove/replace methods:
+ * Replaces node value with v, conditional upon match of cv if
+ * non-null. If resulting value is null, delete.
+ */
+ final V replaceNode(Object key, V value, Object cv) {
+ int hash = spread(key.hashCode());
+ for (Node<K,V>[] tab = table;;) {
+ Node<K,V> f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0 ||
+ (f = tabAt(tab, i = (n - 1) & hash)) == null)
+ break;
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ V oldVal = null;
+ boolean validated = false;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ validated = true;
+ for (Node<K,V> e = f, pred = null;;) {
+ K ek;
+ if (e.hash == hash &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ V ev = e.val;
+ if (cv == null || cv == ev ||
+ (ev != null && cv.equals(ev))) {
+ oldVal = ev;
+ if (value != null)
+ e.val = value;
+ else if (pred != null)
+ pred.next = e.next;
+ else
+ setTabAt(tab, i, e.next);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null)
+ break;
+ }
+ }
+ else if (f instanceof TreeBin) {
+ validated = true;
+ TreeBin<K,V> t = (TreeBin<K,V>)f;
+ TreeNode<K,V> r, p;
+ if ((r = t.root) != null &&
+ (p = r.findTreeNode(hash, key, null)) != null) {
+ V pv = p.val;
+ if (cv == null || cv == pv ||
+ (pv != null && cv.equals(pv))) {
+ oldVal = pv;
+ if (value != null)
+ p.val = value;
+ else if (t.removeTreeNode(p))
+ setTabAt(tab, i, untreeify(t.first));
+ }
+ }
+ }
+ }
+ }
+ if (validated) {
+ if (oldVal != null) {
+ if (value == null)
+ addCount(-1L, -1);
+ return oldVal;
+ }
+ break;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Removes all of the mappings from this map.
+ */
+ public void clear() {
+ long delta = 0L; // negative number of deletions
+ int i = 0;
+ Node<K,V>[] tab = table;
+ while (tab != null && i < tab.length) {
+ int fh;
+ Node<K,V> f = tabAt(tab, i);
+ if (f == null)
+ ++i;
+ else if ((fh = f.hash) == MOVED) {
+ tab = helpTransfer(tab, f);
+ i = 0; // restart
+ }
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ Node<K,V> p = (fh >= 0 ? f :
+ (f instanceof TreeBin) ?
+ ((TreeBin<K,V>)f).first : null);
+ while (p != null) {
+ --delta;
+ p = p.next;
+ }
+ setTabAt(tab, i++, null);
+ }
+ }
+ }
+ }
+ if (delta != 0L)
+ addCount(delta, -1);
+ }
+
+ /**
+ * Returns a {@link Set} view of the keys contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from this map,
+ * via the {@code Iterator.remove}, {@code Set.remove},
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
+ * operations. It does not support the {@code add} or
+ * {@code addAll} operations.
+ *
+ * <p>The view's {@code iterator} is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ *
+ * @return the set view
+ */
+ public KeySetView<K,V> keySet() {
+ KeySetView<K,V> ks;
+ return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
+ }
+
+ /**
+ * Returns a {@link Collection} view of the values contained in this map.
+ * The collection is backed by the map, so changes to the map are
+ * reflected in the collection, and vice-versa. The collection
+ * supports element removal, which removes the corresponding
+ * mapping from this map, via the {@code Iterator.remove},
+ * {@code Collection.remove}, {@code removeAll},
+ * {@code retainAll}, and {@code clear} operations. It does not
+ * support the {@code add} or {@code addAll} operations.
+ *
+ * <p>The view's {@code iterator} is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ *
+ * @return the collection view
+ */
+ public Collection<V> values() {
+ ValuesView<K,V> vs;
+ return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
+ }
+
+ /**
+ * Returns a {@link Set} view of the mappings contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from the map,
+ * via the {@code Iterator.remove}, {@code Set.remove},
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
+ * operations.
+ *
+ * <p>The view's {@code iterator} is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ *
+ * @return the set view
+ */
+ public Set<Map.Entry<K,V>> entrySet() {
+ EntrySetView<K,V> es;
+ return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
+ }
+
+ /**
+ * Returns the hash code value for this {@link Map}, i.e.,
+ * the sum of, for each key-value pair in the map,
+ * {@code key.hashCode() ^ value.hashCode()}.
+ *
+ * @return the hash code value for this map
+ */
+ public int hashCode() {
+ int h = 0;
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; )
+ h += p.key.hashCode() ^ p.val.hashCode();
+ }
+ return h;
+ }
+
+ /**
+ * Returns a string representation of this map. The string
+ * representation consists of a list of key-value mappings (in no
+ * particular order) enclosed in braces ("{@code {}}"). Adjacent
+ * mappings are separated by the characters {@code ", "} (comma
+ * and space). Each key-value mapping is rendered as the key
+ * followed by an equals sign ("{@code =}") followed by the
+ * associated value.
+ *
+ * @return a string representation of this map
+ */
+ public String toString() {
+ Node<K,V>[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
+ StringBuilder sb = new StringBuilder();
+ sb.append('{');
+ Node<K,V> p;
+ if ((p = it.advance()) != null) {
+ for (;;) {
+ K k = p.key;
+ V v = p.val;
+ sb.append(k == this ? "(this Map)" : k);
+ sb.append('=');
+ sb.append(v == this ? "(this Map)" : v);
+ if ((p = it.advance()) == null)
+ break;
+ sb.append(',').append(' ');
+ }
+ }
+ return sb.append('}').toString();
+ }
+
+ /**
+ * Compares the specified object with this map for equality.
+ * Returns {@code true} if the given object is a map with the same
+ * mappings as this map. This operation may return misleading
+ * results if either map is concurrently modified during execution
+ * of this method.
+ *
+ * @param o object to be compared for equality with this map
+ * @return {@code true} if the specified object is equal to this map
+ */
+ public boolean equals(Object o) {
+ if (o != this) {
+ if (!(o instanceof Map))
+ return false;
+ Map<?,?> m = (Map<?,?>) o;
+ Node<K,V>[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ V val = p.val;
+ Object v = m.get(p.key);
+ if (v == null || (v != val && !v.equals(val)))
+ return false;
+ }
+ for (Map.Entry<?,?> e : m.entrySet()) {
+ Object mk, mv, v;
+ if ((mk = e.getKey()) == null ||
+ (mv = e.getValue()) == null ||
+ (v = get(mk)) == null ||
+ (mv != v && !mv.equals(v)))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Stripped-down version of helper class used in previous version,
+ * declared for the sake of serialization compatibility
+ */
+ static class Segment<K,V> extends ReentrantLock implements Serializable {
+ private static final long serialVersionUID = 2249069246763182397L;
+ final float loadFactor;
+ Segment(float lf) { this.loadFactor = lf; }
+ }
+
+ /**
+ * Saves the state of the {@code ConcurrentHashMapV8} instance to a
+ * stream (i.e., serializes it).
+ * @param s the stream
+ * @throws java.io.IOException if an I/O error occurs
+ * @serialData
+ * the key (Object) and value (Object)
+ * for each key-value mapping, followed by a null pair.
+ * The key-value mappings are emitted in no particular order.
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ // For serialization compatibility
+ // Emulate segment calculation from previous version of this class
+ int sshift = 0;
+ int ssize = 1;
+ while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
+ ++sshift;
+ ssize <<= 1;
+ }
+ int segmentShift = 32 - sshift;
+ int segmentMask = ssize - 1;
+ @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
+ new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
+ for (int i = 0; i < segments.length; ++i)
+ segments[i] = new Segment<K,V>(LOAD_FACTOR);
+ s.putFields().put("segments", segments);
+ s.putFields().put("segmentShift", segmentShift);
+ s.putFields().put("segmentMask", segmentMask);
+ s.writeFields();
+
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ s.writeObject(p.key);
+ s.writeObject(p.val);
+ }
+ }
+ s.writeObject(null);
+ s.writeObject(null);
+ segments = null; // throw away
+ }
+
+ /**
+ * Reconstitutes the instance from a stream (that is, deserializes it).
+ * @param s the stream
+ * @throws ClassNotFoundException if the class of a serialized object
+ * could not be found
+ * @throws java.io.IOException if an I/O error occurs
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ /*
+ * To improve performance in typical cases, we create nodes
+ * while reading, then place in table once size is known.
+ * However, we must also validate uniqueness and deal with
+ * overpopulated bins while doing so, which requires
+ * specialized versions of putVal mechanics.
+ */
+ sizeCtl = -1; // force exclusion for table construction
+ s.defaultReadObject();
+ long size = 0L;
+ Node<K,V> p = null;
+ for (;;) {
+ @SuppressWarnings("unchecked") K k = (K) s.readObject();
+ @SuppressWarnings("unchecked") V v = (V) s.readObject();
+ if (k != null && v != null) {
+ p = new Node<K,V>(spread(k.hashCode()), k, v, p);
+ ++size;
+ }
+ else
+ break;
+ }
+ if (size == 0L)
+ sizeCtl = 0;
+ else {
+ int n;
+ if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
+ n = MAXIMUM_CAPACITY;
+ else {
+ int sz = (int)size;
+ n = tableSizeFor(sz + (sz >>> 1) + 1);
+ }
+ @SuppressWarnings("unchecked")
+ Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
+ int mask = n - 1;
+ long added = 0L;
+ while (p != null) {
+ boolean insertAtFront;
+ Node<K,V> next = p.next, first;
+ int h = p.hash, j = h & mask;
+ if ((first = tabAt(tab, j)) == null)
+ insertAtFront = true;
+ else {
+ K k = p.key;
+ if (first.hash < 0) {
+ TreeBin<K,V> t = (TreeBin<K,V>)first;
+ if (t.putTreeVal(h, k, p.val) == null)
+ ++added;
+ insertAtFront = false;
+ }
+ else {
+ int binCount = 0;
+ insertAtFront = true;
+ Node<K,V> q; K qk;
+ for (q = first; q != null; q = q.next) {
+ if (q.hash == h &&
+ ((qk = q.key) == k ||
+ (qk != null && k.equals(qk)))) {
+ insertAtFront = false;
+ break;
+ }
+ ++binCount;
+ }
+ if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
+ insertAtFront = false;
+ ++added;
+ p.next = first;
+ TreeNode<K,V> hd = null, tl = null;
+ for (q = p; q != null; q = q.next) {
+ TreeNode<K,V> t = new TreeNode<K,V>
+ (q.hash, q.key, q.val, null, null);
+ if ((t.prev = tl) == null)
+ hd = t;
+ else
+ tl.next = t;
+ tl = t;
+ }
+ setTabAt(tab, j, new TreeBin<K,V>(hd));
+ }
+ }
+ }
+ if (insertAtFront) {
+ ++added;
+ p.next = first;
+ setTabAt(tab, j, p);
+ }
+ p = next;
+ }
+ table = tab;
+ sizeCtl = n - (n >>> 2);
+ baseCount = added;
+ }
+ }
+
+ // ConcurrentMap methods
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or {@code null} if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ public V putIfAbsent(K key, V value) {
+ return putVal(key, value, true);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ public boolean remove(Object key, Object value) {
+ if (key == null)
+ throw new NullPointerException();
+ return value != null && replaceNode(key, null, value) != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if any of the arguments are null
+ */
+ public boolean replace(K key, V oldValue, V newValue) {
+ if (key == null || oldValue == null || newValue == null)
+ throw new NullPointerException();
+ return replaceNode(key, newValue, oldValue) != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or {@code null} if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ public V replace(K key, V value) {
+ if (key == null || value == null)
+ throw new NullPointerException();
+ return replaceNode(key, value, null);
+ }
+
+ // Overrides of JDK8+ Map extension method defaults
+
+ /**
+ * Returns the value to which the specified key is mapped, or the
+ * given default value if this map contains no mapping for the
+ * key.
+ *
+ * @param key the key whose associated value is to be returned
+ * @param defaultValue the value to return if this map contains
+ * no mapping for the given key
+ * @return the mapping for the key, if present; else the default value
+ * @throws NullPointerException if the specified key is null
+ */
+ public V getOrDefault(Object key, V defaultValue) {
+ V v;
+ return (v = get(key)) == null ? defaultValue : v;
+ }
+
+ public void forEach(BiAction<? super K, ? super V> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ action.apply(p.key, p.val);
+ }
+ }
+ }
+
+ public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) {
+ if (function == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ V oldValue = p.val;
+ for (K key = p.key;;) {
+ V newValue = function.apply(key, oldValue);
+ if (newValue == null)
+ throw new NullPointerException();
+ if (replaceNode(key, newValue, oldValue) != null ||
+ (oldValue = get(key)) == null)
+ break;
+ }
+ }
+ }
+ }
+
+ /**
+ * If the specified key is not already associated with a value,
+ * attempts to compute its value using the given mapping function
+ * and enters it into this map unless {@code null}. The entire
+ * method invocation is performed atomically, so the function is
+ * applied at most once per key. Some attempted update operations
+ * on this map by other threads may be blocked while computation
+ * is in progress, so the computation should be short and simple,
+ * and must not attempt to update any other mappings of this map.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param mappingFunction the function to compute a value
+ * @return the current (existing or computed) value associated with
+ * the specified key, or null if the computed value is null
+ * @throws NullPointerException if the specified key or mappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the mappingFunction does so,
+ * in which case the mapping is left unestablished
+ */
+ public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
+ if (key == null || mappingFunction == null)
+ throw new NullPointerException();
+ int h = spread(key.hashCode());
+ V val = null;
+ int binCount = 0;
+ for (Node<K,V>[] tab = table;;) {
+ Node<K,V> f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
+ Node<K,V> r = new ReservationNode<K,V>();
+ synchronized (r) {
+ if (casTabAt(tab, i, null, r)) {
+ binCount = 1;
+ Node<K,V> node = null;
+ try {
+ if ((val = mappingFunction.apply(key)) != null)
+ node = new Node<K,V>(h, key, val, null);
+ } finally {
+ setTabAt(tab, i, node);
+ }
+ }
+ }
+ if (binCount != 0)
+ break;
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ boolean added = false;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node<K,V> e = f;; ++binCount) {
+ K ek; V ev;
+ if (e.hash == h &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ val = e.val;
+ break;
+ }
+ Node<K,V> pred = e;
+ if ((e = e.next) == null) {
+ if ((val = mappingFunction.apply(key)) != null) {
+ added = true;
+ pred.next = new Node<K,V>(h, key, val, null);
+ }
+ break;
+ }
+ }
+ }
+ else if (f instanceof TreeBin) {
+ binCount = 2;
+ TreeBin<K,V> t = (TreeBin<K,V>)f;
+ TreeNode<K,V> r, p;
+ if ((r = t.root) != null &&
+ (p = r.findTreeNode(h, key, null)) != null)
+ val = p.val;
+ else if ((val = mappingFunction.apply(key)) != null) {
+ added = true;
+ t.putTreeVal(h, key, val);
+ }
+ }
+ }
+ }
+ if (binCount != 0) {
+ if (binCount >= TREEIFY_THRESHOLD)
+ treeifyBin(tab, i);
+ if (!added)
+ return val;
+ break;
+ }
+ }
+ }
+ if (val != null)
+ addCount(1L, binCount);
+ return val;
+ }
+
+ /**
+ * If the value for the specified key is present, attempts to
+ * compute a new mapping given the key and its current mapped
+ * value. The entire method invocation is performed atomically.
+ * Some attempted update operations on this map by other threads
+ * may be blocked while computation is in progress, so the
+ * computation should be short and simple, and must not attempt to
+ * update any other mappings of this map.
+ *
+ * @param key key with which a value may be associated
+ * @param remappingFunction the function to compute a value
+ * @return the new value associated with the specified key, or null if none
+ * @throws NullPointerException if the specified key or remappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
+ */
+ public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
+ if (key == null || remappingFunction == null)
+ throw new NullPointerException();
+ int h = spread(key.hashCode());
+ V val = null;
+ int delta = 0;
+ int binCount = 0;
+ for (Node<K,V>[] tab = table;;) {
+ Node<K,V> f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
+ break;
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node<K,V> e = f, pred = null;; ++binCount) {
+ K ek;
+ if (e.hash == h &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ val = remappingFunction.apply(key, e.val);
+ if (val != null)
+ e.val = val;
+ else {
+ delta = -1;
+ Node<K,V> en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null)
+ break;
+ }
+ }
+ else if (f instanceof TreeBin) {
+ binCount = 2;
+ TreeBin<K,V> t = (TreeBin<K,V>)f;
+ TreeNode<K,V> r, p;
+ if ((r = t.root) != null &&
+ (p = r.findTreeNode(h, key, null)) != null) {
+ val = remappingFunction.apply(key, p.val);
+ if (val != null)
+ p.val = val;
+ else {
+ delta = -1;
+ if (t.removeTreeNode(p))
+ setTabAt(tab, i, untreeify(t.first));
+ }
+ }
+ }
+ }
+ }
+ if (binCount != 0)
+ break;
+ }
+ }
+ if (delta != 0)
+ addCount((long)delta, binCount);
+ return val;
+ }
+
+ /**
+ * Attempts to compute a mapping for the specified key and its
+ * current mapped value (or {@code null} if there is no current
+ * mapping). The entire method invocation is performed atomically.
+ * Some attempted update operations on this map by other threads
+ * may be blocked while computation is in progress, so the
+ * computation should be short and simple, and must not attempt to
+ * update any other mappings of this Map.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param remappingFunction the function to compute a value
+ * @return the new value associated with the specified key, or null if none
+ * @throws NullPointerException if the specified key or remappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
+ */
+ public V compute(K key,
+ BiFun<? super K, ? super V, ? extends V> remappingFunction) {
+ if (key == null || remappingFunction == null)
+ throw new NullPointerException();
+ int h = spread(key.hashCode());
+ V val = null;
+ int delta = 0;
+ int binCount = 0;
+ for (Node<K,V>[] tab = table;;) {
+ Node<K,V> f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
+ Node<K,V> r = new ReservationNode<K,V>();
+ synchronized (r) {
+ if (casTabAt(tab, i, null, r)) {
+ binCount = 1;
+ Node<K,V> node = null;
+ try {
+ if ((val = remappingFunction.apply(key, null)) != null) {
+ delta = 1;
+ node = new Node<K,V>(h, key, val, null);
+ }
+ } finally {
+ setTabAt(tab, i, node);
+ }
+ }
+ }
+ if (binCount != 0)
+ break;
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node<K,V> e = f, pred = null;; ++binCount) {
+ K ek;
+ if (e.hash == h &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ val = remappingFunction.apply(key, e.val);
+ if (val != null)
+ e.val = val;
+ else {
+ delta = -1;
+ Node<K,V> en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null) {
+ val = remappingFunction.apply(key, null);
+ if (val != null) {
+ delta = 1;
+ pred.next =
+ new Node<K,V>(h, key, val, null);
+ }
+ break;
+ }
+ }
+ }
+ else if (f instanceof TreeBin) {
+ binCount = 1;
+ TreeBin<K,V> t = (TreeBin<K,V>)f;
+ TreeNode<K,V> r, p;
+ if ((r = t.root) != null)
+ p = r.findTreeNode(h, key, null);
+ else
+ p = null;
+ V pv = (p == null) ? null : p.val;
+ val = remappingFunction.apply(key, pv);
+ if (val != null) {
+ if (p != null)
+ p.val = val;
+ else {
+ delta = 1;
+ t.putTreeVal(h, key, val);
+ }
+ }
+ else if (p != null) {
+ delta = -1;
+ if (t.removeTreeNode(p))
+ setTabAt(tab, i, untreeify(t.first));
+ }
+ }
+ }
+ }
+ if (binCount != 0) {
+ if (binCount >= TREEIFY_THRESHOLD)
+ treeifyBin(tab, i);
+ break;
+ }
+ }
+ }
+ if (delta != 0)
+ addCount((long)delta, binCount);
+ return val;
+ }
+
+ /**
+ * If the specified key is not already associated with a
+ * (non-null) value, associates it with the given value.
+ * Otherwise, replaces the value with the results of the given
+ * remapping function, or removes if {@code null}. The entire
+ * method invocation is performed atomically. Some attempted
+ * update operations on this map by other threads may be blocked
+ * while computation is in progress, so the computation should be
+ * short and simple, and must not attempt to update any other
+ * mappings of this Map.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value the value to use if absent
+ * @param remappingFunction the function to recompute a value if present
+ * @return the new value associated with the specified key, or null if none
+ * @throws NullPointerException if the specified key or the
+ * remappingFunction is null
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
+ */
+ public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
+ if (key == null || value == null || remappingFunction == null)
+ throw new NullPointerException();
+ int h = spread(key.hashCode());
+ V val = null;
+ int delta = 0;
+ int binCount = 0;
+ for (Node<K,V>[] tab = table;;) {
+ Node<K,V> f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
+ if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
+ delta = 1;
+ val = value;
+ break;
+ }
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node<K,V> e = f, pred = null;; ++binCount) {
+ K ek;
+ if (e.hash == h &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ val = remappingFunction.apply(e.val, value);
+ if (val != null)
+ e.val = val;
+ else {
+ delta = -1;
+ Node<K,V> en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null) {
+ delta = 1;
+ val = value;
+ pred.next =
+ new Node<K,V>(h, key, val, null);
+ break;
+ }
+ }
+ }
+ else if (f instanceof TreeBin) {
+ binCount = 2;
+ TreeBin<K,V> t = (TreeBin<K,V>)f;
+ TreeNode<K,V> r = t.root;
+ TreeNode<K,V> p = (r == null) ? null :
+ r.findTreeNode(h, key, null);
+ val = (p == null) ? value :
+ remappingFunction.apply(p.val, value);
+ if (val != null) {
+ if (p != null)
+ p.val = val;
+ else {
+ delta = 1;
+ t.putTreeVal(h, key, val);
+ }
+ }
+ else if (p != null) {
+ delta = -1;
+ if (t.removeTreeNode(p))
+ setTabAt(tab, i, untreeify(t.first));
+ }
+ }
+ }
+ }
+ if (binCount != 0) {
+ if (binCount >= TREEIFY_THRESHOLD)
+ treeifyBin(tab, i);
+ break;
+ }
+ }
+ }
+ if (delta != 0)
+ addCount((long)delta, binCount);
+ return val;
+ }
+
+ // Hashtable legacy methods
+
+ /**
+ * Legacy method testing if some key maps into the specified value
+ * in this table. This method is identical in functionality to
+ * {@link #containsValue(Object)}, and exists solely to ensure
+ * full compatibility with class {@link java.util.Hashtable},
+ * which supported this method prior to introduction of the
+ * Java Collections framework.
+ *
+ * @param value a value to search for
+ * @return {@code true} if and only if some key maps to the
+ * {@code value} argument in this table as
+ * determined by the {@code equals} method;
+ * {@code false} otherwise
+ * @throws NullPointerException if the specified value is null
+ */
+ @Deprecated public boolean contains(Object value) {
+ return containsValue(value);
+ }
+
+ /**
+ * Returns an enumeration of the keys in this table.
+ *
+ * @return an enumeration of the keys in this table
+ * @see #keySet()
+ */
+ public Enumeration<K> keys() {
+ Node<K,V>[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ return new KeyIterator<K,V>(t, f, 0, f, this);
+ }
+
+ /**
+ * Returns an enumeration of the values in this table.
+ *
+ * @return an enumeration of the values in this table
+ * @see #values()
+ */
+ public Enumeration<V> elements() {
+ Node<K,V>[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ return new ValueIterator<K,V>(t, f, 0, f, this);
+ }
+
+ // ConcurrentHashMapV8-only methods
+
+ /**
+ * Returns the number of mappings. This method should be used
+ * instead of {@link #size} because a ConcurrentHashMapV8 may
+ * contain more mappings than can be represented as an int. The
+ * value returned is an estimate; the actual count may differ if
+ * there are concurrent insertions or removals.
+ *
+ * @return the number of mappings
+ * @since 1.8
+ */
+ public long mappingCount() {
+ long n = sumCount();
+ return (n < 0L) ? 0L : n; // ignore transient negative values
+ }
+
+ /**
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
+ * from the given type to {@code Boolean.TRUE}.
+ *
+ * @return the new set
+ * @since 1.8
+ */
+ public static <K> KeySetView<K,Boolean> newKeySet() {
+ return new KeySetView<K,Boolean>
+ (new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE);
+ }
+
+ /**
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
+ * from the given type to {@code Boolean.TRUE}.
+ *
+ * @param initialCapacity The implementation performs internal
+ * sizing to accommodate this many elements.
+ * @return the new set
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative
+ * @since 1.8
+ */
+ public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
+ return new KeySetView<K,Boolean>
+ (new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE);
+ }
+
+ /**
+ * Returns a {@link Set} view of the keys in this map, using the
+ * given common mapped value for any additions (i.e., {@link
+ * Collection#add} and {@link Collection#addAll(Collection)}).
+ * This is of course only appropriate if it is acceptable to use
+ * the same value for all additions from this view.
+ *
+ * @param mappedValue the mapped value to use for any additions
+ * @return the set view
+ * @throws NullPointerException if the mappedValue is null
+ */
+ public KeySetView<K,V> keySet(V mappedValue) {
+ if (mappedValue == null)
+ throw new NullPointerException();
+ return new KeySetView<K,V>(this, mappedValue);
+ }
+
+ /* ---------------- Special Nodes -------------- */
+
+ /**
+ * A node inserted at head of bins during transfer operations.
+ */
+ static final class ForwardingNode<K,V> extends Node<K,V> {
+ final Node<K,V>[] nextTable;
+ ForwardingNode(Node<K,V>[] tab) {
+ super(MOVED, null, null, null);
+ this.nextTable = tab;
+ }
+
+ Node<K,V> find(int h, Object k) {
+ // loop to avoid arbitrarily deep recursion on forwarding nodes
+ outer: for (Node<K,V>[] tab = nextTable;;) {
+ Node<K,V> e; int n;
+ if (k == null || tab == null || (n = tab.length) == 0 ||
+ (e = tabAt(tab, (n - 1) & h)) == null)
+ return null;
+ for (;;) {
+ int eh; K ek;
+ if ((eh = e.hash) == h &&
+ ((ek = e.key) == k || (ek != null && k.equals(ek))))
+ return e;
+ if (eh < 0) {
+ if (e instanceof ForwardingNode) {
+ tab = ((ForwardingNode<K,V>)e).nextTable;
+ continue outer;
+ }
+ else
+ return e.find(h, k);
+ }
+ if ((e = e.next) == null)
+ return null;
+ }
+ }
+ }
+ }
+
+ /**
+ * A place-holder node used in computeIfAbsent and compute
+ */
+ static final class ReservationNode<K,V> extends Node<K,V> {
+ ReservationNode() {
+ super(RESERVED, null, null, null);
+ }
+
+ Node<K,V> find(int h, Object k) {
+ return null;
+ }
+ }
+
+ /* ---------------- Table Initialization and Resizing -------------- */
+
+ /**
+ * Returns the stamp bits for resizing a table of size n.
+ * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
+ */
+ static final int resizeStamp(int n) {
+ return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
+ }
+
+ /**
+ * Initializes table, using the size recorded in sizeCtl.
+ */
+ private final Node<K,V>[] initTable() {
+ Node<K,V>[] tab; int sc;
+ while ((tab = table) == null || tab.length == 0) {
+ if ((sc = sizeCtl) < 0)
+ Thread.yield(); // lost initialization race; just spin
+ else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
+ try {
+ if ((tab = table) == null || tab.length == 0) {
+ int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
+ @SuppressWarnings("unchecked")
+ Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
+ table = tab = nt;
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ break;
+ }
+ }
+ return tab;
+ }
+
+ /**
+ * Adds to count, and if table is too small and not already
+ * resizing, initiates transfer. If already resizing, helps
+ * perform transfer if work is available. Rechecks occupancy
+ * after a transfer to see if another resize is already needed
+ * because resizings are lagging additions.
+ *
+ * @param x the count to add
+ * @param check if <0, don't check resize, if <= 1 only check if uncontended
+ */
+ private final void addCount(long x, int check) {
+ CounterCell[] as; long b, s;
+ if ((as = counterCells) != null ||
+ !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
+ CounterHashCode hc; CounterCell a; long v; int m;
+ boolean uncontended = true;
+ if ((hc = threadCounterHashCode.get()) == null ||
+ as == null || (m = as.length - 1) < 0 ||
+ (a = as[m & hc.code]) == null ||
+ !(uncontended =
+ U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
+ fullAddCount(x, hc, uncontended);
+ return;
+ }
+ if (check <= 1)
+ return;
+ s = sumCount();
+ }
+ if (check >= 0) {
+ Node<K,V>[] tab, nt; int n, sc;
+ while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
+ (n = tab.length) < MAXIMUM_CAPACITY) {
+ int rs = resizeStamp(n);
+ if (sc < 0) {
+ if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
+ sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
+ transferIndex <= 0)
+ break;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
+ transfer(tab, nt);
+ }
+ else if (U.compareAndSwapInt(this, SIZECTL, sc,
+ (rs << RESIZE_STAMP_SHIFT) + 2))
+ transfer(tab, null);
+ s = sumCount();
+ }
+ }
+ }
+
+ /**
+ * Helps transfer if a resize is in progress.
+ */
+ final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
+ Node<K,V>[] nextTab; int sc;
+ if (tab != null && (f instanceof ForwardingNode) &&
+ (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
+ int rs = resizeStamp(tab.length);
+ while (nextTab == nextTable && table == tab &&
+ (sc = sizeCtl) < 0) {
+ if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
+ sc == rs + MAX_RESIZERS || transferIndex <= 0)
+ break;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
+ transfer(tab, nextTab);
+ break;
+ }
+ }
+ return nextTab;
+ }
+ return table;
+ }
+
+ /**
+ * Tries to presize table to accommodate the given number of elements.
+ *
+ * @param size number of elements (doesn't need to be perfectly accurate)
+ */
+ private final void tryPresize(int size) {
+ int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
+ tableSizeFor(size + (size >>> 1) + 1);
+ int sc;
+ while ((sc = sizeCtl) >= 0) {
+ Node<K,V>[] tab = table; int n;
+ if (tab == null || (n = tab.length) == 0) {
+ n = (sc > c) ? sc : c;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
+ try {
+ if (table == tab) {
+ @SuppressWarnings("unchecked")
+ Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
+ table = nt;
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ }
+ }
+ else if (c <= sc || n >= MAXIMUM_CAPACITY)
+ break;
+ else if (tab == table) {
+ int rs = resizeStamp(n);
+ if (sc < 0) {
+ Node<K,V>[] nt;
+ if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
+ sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
+ transferIndex <= 0)
+ break;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
+ transfer(tab, nt);
+ }
+ else if (U.compareAndSwapInt(this, SIZECTL, sc,
+ (rs << RESIZE_STAMP_SHIFT) + 2))
+ transfer(tab, null);
+ }
+ }
+ }
+
+ /**
+ * Moves and/or copies the nodes in each bin to new table. See
+ * above for explanation.
+ */
+ private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
+ int n = tab.length, stride;
+ if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
+ stride = MIN_TRANSFER_STRIDE; // subdivide range
+ if (nextTab == null) { // initiating
+ try {
+ @SuppressWarnings("unchecked")
+ Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
+ nextTab = nt;
+ } catch (Throwable ex) { // try to cope with OOME
+ sizeCtl = Integer.MAX_VALUE;
+ return;
+ }
+ nextTable = nextTab;
+ transferIndex = n;
+ }
+ int nextn = nextTab.length;
+ ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
+ boolean advance = true;
+ boolean finishing = false; // to ensure sweep before committing nextTab
+ for (int i = 0, bound = 0;;) {
+ Node<K,V> f; int fh;
+ while (advance) {
+ int nextIndex, nextBound;
+ if (--i >= bound || finishing)
+ advance = false;
+ else if ((nextIndex = transferIndex) <= 0) {
+ i = -1;
+ advance = false;
+ }
+ else if (U.compareAndSwapInt
+ (this, TRANSFERINDEX, nextIndex,
+ nextBound = (nextIndex > stride ?
+ nextIndex - stride : 0))) {
+ bound = nextBound;
+ i = nextIndex - 1;
+ advance = false;
+ }
+ }
+ if (i < 0 || i >= n || i + n >= nextn) {
+ int sc;
+ if (finishing) {
+ nextTable = null;
+ table = nextTab;
+ sizeCtl = (n << 1) - (n >>> 1);
+ return;
+ }
+ if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
+ if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
+ return;
+ finishing = advance = true;
+ i = n; // recheck before commit
+ }
+ }
+ else if ((f = tabAt(tab, i)) == null)
+ advance = casTabAt(tab, i, null, fwd);
+ else if ((fh = f.hash) == MOVED)
+ advance = true; // already processed
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ Node<K,V> ln, hn;
+ if (fh >= 0) {
+ int runBit = fh & n;
+ Node<K,V> lastRun = f;
+ for (Node<K,V> p = f.next; p != null; p = p.next) {
+ int b = p.hash & n;
+ if (b != runBit) {
+ runBit = b;
+ lastRun = p;
+ }
+ }
+ if (runBit == 0) {
+ ln = lastRun;
+ hn = null;
+ }
+ else {
+ hn = lastRun;
+ ln = null;
+ }
+ for (Node<K,V> p = f; p != lastRun; p = p.next) {
+ int ph = p.hash; K pk = p.key; V pv = p.val;
+ if ((ph & n) == 0)
+ ln = new Node<K,V>(ph, pk, pv, ln);
+ else
+ hn = new Node<K,V>(ph, pk, pv, hn);
+ }
+ setTabAt(nextTab, i, ln);
+ setTabAt(nextTab, i + n, hn);
+ setTabAt(tab, i, fwd);
+ advance = true;
+ }
+ else if (f instanceof TreeBin) {
+ TreeBin<K,V> t = (TreeBin<K,V>)f;
+ TreeNode<K,V> lo = null, loTail = null;
+ TreeNode<K,V> hi = null, hiTail = null;
+ int lc = 0, hc = 0;
+ for (Node<K,V> e = t.first; e != null; e = e.next) {
+ int h = e.hash;
+ TreeNode<K,V> p = new TreeNode<K,V>
+ (h, e.key, e.val, null, null);
+ if ((h & n) == 0) {
+ if ((p.prev = loTail) == null)
+ lo = p;
+ else
+ loTail.next = p;
+ loTail = p;
+ ++lc;
+ }
+ else {
+ if ((p.prev = hiTail) == null)
+ hi = p;
+ else
+ hiTail.next = p;
+ hiTail = p;
+ ++hc;
+ }
+ }
+ ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
+ (hc != 0) ? new TreeBin<K,V>(lo) : t;
+ hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
+ (lc != 0) ? new TreeBin<K,V>(hi) : t;
+ setTabAt(nextTab, i, ln);
+ setTabAt(nextTab, i + n, hn);
+ setTabAt(tab, i, fwd);
+ advance = true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* ---------------- Conversion from/to TreeBins -------------- */
+
+ /**
+ * Replaces all linked nodes in bin at given index unless table is
+ * too small, in which case resizes instead.
+ */
+ private final void treeifyBin(Node<K,V>[] tab, int index) {
+ Node<K,V> b; int n, sc;
+ if (tab != null) {
+ if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
+ tryPresize(n << 1);
+ else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
+ synchronized (b) {
+ if (tabAt(tab, index) == b) {
+ TreeNode<K,V> hd = null, tl = null;
+ for (Node<K,V> e = b; e != null; e = e.next) {
+ TreeNode<K,V> p =
+ new TreeNode<K,V>(e.hash, e.key, e.val,
+ null, null);
+ if ((p.prev = tl) == null)
+ hd = p;
+ else
+ tl.next = p;
+ tl = p;
+ }
+ setTabAt(tab, index, new TreeBin<K,V>(hd));
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Returns a list on non-TreeNodes replacing those in given list.
+ */
+ static <K,V> Node<K,V> untreeify(Node<K,V> b) {
+ Node<K,V> hd = null, tl = null;
+ for (Node<K,V> q = b; q != null; q = q.next) {
+ Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
+ if (tl == null)
+ hd = p;
+ else
+ tl.next = p;
+ tl = p;
+ }
+ return hd;
+ }
+
+ /* ---------------- TreeNodes -------------- */
+
+ /**
+ * Nodes for use in TreeBins
+ */
+ static final class TreeNode<K,V> extends Node<K,V> {
+ TreeNode<K,V> parent; // red-black tree links
+ TreeNode<K,V> left;
+ TreeNode<K,V> right;
+ TreeNode<K,V> prev; // needed to unlink next upon deletion
+ boolean red;
+
+ TreeNode(int hash, K key, V val, Node<K,V> next,
+ TreeNode<K,V> parent) {
+ super(hash, key, val, next);
+ this.parent = parent;
+ }
+
+ Node<K,V> find(int h, Object k) {
+ return findTreeNode(h, k, null);
+ }
+
+ /**
+ * Returns the TreeNode (or null if not found) for the given key
+ * starting at given root.
+ */
+ final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
+ if (k != null) {
+ TreeNode<K,V> p = this;
+ do {
+ int ph, dir; K pk; TreeNode<K,V> q;
+ TreeNode<K,V> pl = p.left, pr = p.right;
+ if ((ph = p.hash) > h)
+ p = pl;
+ else if (ph < h)
+ p = pr;
+ else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
+ return p;
+ else if (pl == null)
+ p = pr;
+ else if (pr == null)
+ p = pl;
+ else if ((kc != null ||
+ (kc = comparableClassFor(k)) != null) &&
+ (dir = compareComparables(kc, k, pk)) != 0)
+ p = (dir < 0) ? pl : pr;
+ else if ((q = pr.findTreeNode(h, k, kc)) != null)
+ return q;
+ else
+ p = pl;
+ } while (p != null);
+ }
+ return null;
+ }
+ }
+
+ /* ---------------- TreeBins -------------- */
+
+ /**
+ * TreeNodes used at the heads of bins. TreeBins do not hold user
+ * keys or values, but instead point to list of TreeNodes and
+ * their root. They also maintain a parasitic read-write lock
+ * forcing writers (who hold bin lock) to wait for readers (who do
+ * not) to complete before tree restructuring operations.
+ */
+ static final class TreeBin<K,V> extends Node<K,V> {
+ TreeNode<K,V> root;
+ volatile TreeNode<K,V> first;
+ volatile Thread waiter;
+ volatile int lockState;
+ // values for lockState
+ static final int WRITER = 1; // set while holding write lock
+ static final int WAITER = 2; // set when waiting for write lock
+ static final int READER = 4; // increment value for setting read lock
+
+ /**
+ * Tie-breaking utility for ordering insertions when equal
+ * hashCodes and non-comparable. We don't require a total
+ * order, just a consistent insertion rule to maintain
+ * equivalence across rebalancings. Tie-breaking further than
+ * necessary simplifies testing a bit.
+ */
+ static int tieBreakOrder(Object a, Object b) {
+ int d;
+ if (a == null || b == null ||
+ (d = a.getClass().getName().
+ compareTo(b.getClass().getName())) == 0)
+ d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
+ -1 : 1);
+ return d;
+ }
+
+ /**
+ * Creates bin with initial set of nodes headed by b.
+ */
+ TreeBin(TreeNode<K,V> b) {
+ super(TREEBIN, null, null, null);
+ this.first = b;
+ TreeNode<K,V> r = null;
+ for (TreeNode<K,V> x = b, next; x != null; x = next) {
+ next = (TreeNode<K,V>)x.next;
+ x.left = x.right = null;
+ if (r == null) {
+ x.parent = null;
+ x.red = false;
+ r = x;
+ }
+ else {
+ K k = x.key;
+ int h = x.hash;
+ Class<?> kc = null;
+ for (TreeNode<K,V> p = r;;) {
+ int dir, ph;
+ K pk = p.key;
+ if ((ph = p.hash) > h)
+ dir = -1;
+ else if (ph < h)
+ dir = 1;
+ else if ((kc == null &&
+ (kc = comparableClassFor(k)) == null) ||
+ (dir = compareComparables(kc, k, pk)) == 0)
+ dir = tieBreakOrder(k, pk);
+ TreeNode<K,V> xp = p;
+ if ((p = (dir <= 0) ? p.left : p.right) == null) {
+ x.parent = xp;
+ if (dir <= 0)
+ xp.left = x;
+ else
+ xp.right = x;
+ r = balanceInsertion(r, x);
+ break;
+ }
+ }
+ }
+ }
+ this.root = r;
+ assert checkInvariants(root);
+ }
+
+ /**
+ * Acquires write lock for tree restructuring.
+ */
+ private final void lockRoot() {
+ if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
+ contendedLock(); // offload to separate method
+ }
+
+ /**
+ * Releases write lock for tree restructuring.
+ */
+ private final void unlockRoot() {
+ lockState = 0;
+ }
+
+ /**
+ * Possibly blocks awaiting root lock.
+ */
+ private final void contendedLock() {
+ boolean waiting = false;
+ for (int s;;) {
+ if (((s = lockState) & ~WAITER) == 0) {
+ if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
+ if (waiting)
+ waiter = null;
+ return;
+ }
+ }
+ else if ((s & WAITER) == 0) {
+ if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
+ waiting = true;
+ waiter = Thread.currentThread();
+ }
+ }
+ else if (waiting)
+ LockSupport.park(this);
+ }
+ }
+
+ /**
+ * Returns matching node or null if none. Tries to search
+ * using tree comparisons from root, but continues linear
+ * search when lock not available.
+ */
+ final Node<K,V> find(int h, Object k) {
+ if (k != null) {
+ for (Node<K,V> e = first; e != null; ) {
+ int s; K ek;
+ if (((s = lockState) & (WAITER|WRITER)) != 0) {
+ if (e.hash == h &&
+ ((ek = e.key) == k || (ek != null && k.equals(ek))))
+ return e;
+ e = e.next;
+ }
+ else if (U.compareAndSwapInt(this, LOCKSTATE, s,
+ s + READER)) {
+ TreeNode<K,V> r, p;
+ try {
+ p = ((r = root) == null ? null :
+ r.findTreeNode(h, k, null));
+ } finally {
+ Thread w;
+ int ls;
+ do {} while (!U.compareAndSwapInt
+ (this, LOCKSTATE,
+ ls = lockState, ls - READER));
+ if (ls == (READER|WAITER) && (w = waiter) != null)
+ LockSupport.unpark(w);
+ }
+ return p;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Finds or adds a node.
+ * @return null if added
+ */
+ final TreeNode<K,V> putTreeVal(int h, K k, V v) {
+ Class<?> kc = null;
+ boolean searched = false;
+ for (TreeNode<K,V> p = root;;) {
+ int dir, ph; K pk;
+ if (p == null) {
+ first = root = new TreeNode<K,V>(h, k, v, null, null);
+ break;
+ }
+ else if ((ph = p.hash) > h)
+ dir = -1;
+ else if (ph < h)
+ dir = 1;
+ else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
+ return p;
+ else if ((kc == null &&
+ (kc = comparableClassFor(k)) == null) ||
+ (dir = compareComparables(kc, k, pk)) == 0) {
+ if (!searched) {
+ TreeNode<K,V> q, ch;
+ searched = true;
+ if (((ch = p.left) != null &&
+ (q = ch.findTreeNode(h, k, kc)) != null) ||
+ ((ch = p.right) != null &&
+ (q = ch.findTreeNode(h, k, kc)) != null))
+ return q;
+ }
+ dir = tieBreakOrder(k, pk);
+ }
+
+ TreeNode<K,V> xp = p;
+ if ((p = (dir <= 0) ? p.left : p.right) == null) {
+ TreeNode<K,V> x, f = first;
+ first = x = new TreeNode<K,V>(h, k, v, f, xp);
+ if (f != null)
+ f.prev = x;
+ if (dir <= 0)
+ xp.left = x;
+ else
+ xp.right = x;
+ if (!xp.red)
+ x.red = true;
+ else {
+ lockRoot();
+ try {
+ root = balanceInsertion(root, x);
+ } finally {
+ unlockRoot();
+ }
+ }
+ break;
+ }
+ }
+ assert checkInvariants(root);
+ return null;
+ }
+
+ /**
+ * Removes the given node, that must be present before this
+ * call. This is messier than typical red-black deletion code
+ * because we cannot swap the contents of an interior node
+ * with a leaf successor that is pinned by "next" pointers
+ * that are accessible independently of lock. So instead we
+ * swap the tree linkages.
+ *
+ * @return true if now too small, so should be untreeified
+ */
+ final boolean removeTreeNode(TreeNode<K,V> p) {
+ TreeNode<K,V> next = (TreeNode<K,V>)p.next;
+ TreeNode<K,V> pred = p.prev; // unlink traversal pointers
+ TreeNode<K,V> r, rl;
+ if (pred == null)
+ first = next;
+ else
+ pred.next = next;
+ if (next != null)
+ next.prev = pred;
+ if (first == null) {
+ root = null;
+ return true;
+ }
+ if ((r = root) == null || r.right == null || // too small
+ (rl = r.left) == null || rl.left == null)
+ return true;
+ lockRoot();
+ try {
+ TreeNode<K,V> replacement;
+ TreeNode<K,V> pl = p.left;
+ TreeNode<K,V> pr = p.right;
+ if (pl != null && pr != null) {
+ TreeNode<K,V> s = pr, sl;
+ while ((sl = s.left) != null) // find successor
+ s = sl;
+ boolean c = s.red; s.red = p.red; p.red = c; // swap colors
+ TreeNode<K,V> sr = s.right;
+ TreeNode<K,V> pp = p.parent;
+ if (s == pr) { // p was s's direct parent
+ p.parent = s;
+ s.right = p;
+ }
+ else {
+ TreeNode<K,V> sp = s.parent;
+ if ((p.parent = sp) != null) {
+ if (s == sp.left)
+ sp.left = p;
+ else
+ sp.right = p;
+ }
+ if ((s.right = pr) != null)
+ pr.parent = s;
+ }
+ p.left = null;
+ if ((p.right = sr) != null)
+ sr.parent = p;
+ if ((s.left = pl) != null)
+ pl.parent = s;
+ if ((s.parent = pp) == null)
+ r = s;
+ else if (p == pp.left)
+ pp.left = s;
+ else
+ pp.right = s;
+ if (sr != null)
+ replacement = sr;
+ else
+ replacement = p;
+ }
+ else if (pl != null)
+ replacement = pl;
+ else if (pr != null)
+ replacement = pr;
+ else
+ replacement = p;
+ if (replacement != p) {
+ TreeNode<K,V> pp = replacement.parent = p.parent;
+ if (pp == null)
+ r = replacement;
+ else if (p == pp.left)
+ pp.left = replacement;
+ else
+ pp.right = replacement;
+ p.left = p.right = p.parent = null;
+ }
+
+ root = (p.red) ? r : balanceDeletion(r, replacement);
+
+ if (p == replacement) { // detach pointers
+ TreeNode<K,V> pp;
+ if ((pp = p.parent) != null) {
+ if (p == pp.left)
+ pp.left = null;
+ else if (p == pp.right)
+ pp.right = null;
+ p.parent = null;
+ }
+ }
+ } finally {
+ unlockRoot();
+ }
+ assert checkInvariants(root);
+ return false;
+ }
+
+ /* ------------------------------------------------------------ */
+ // Red-black tree methods, all adapted from CLR
+
+ static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
+ TreeNode<K,V> p) {
+ TreeNode<K,V> r, pp, rl;
+ if (p != null && (r = p.right) != null) {
+ if ((rl = p.right = r.left) != null)
+ rl.parent = p;
+ if ((pp = r.parent = p.parent) == null)
+ (root = r).red = false;
+ else if (pp.left == p)
+ pp.left = r;
+ else
+ pp.right = r;
+ r.left = p;
+ p.parent = r;
+ }
+ return root;
+ }
+
+ static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
+ TreeNode<K,V> p) {
+ TreeNode<K,V> l, pp, lr;
+ if (p != null && (l = p.left) != null) {
+ if ((lr = p.left = l.right) != null)
+ lr.parent = p;
+ if ((pp = l.parent = p.parent) == null)
+ (root = l).red = false;
+ else if (pp.right == p)
+ pp.right = l;
+ else
+ pp.left = l;
+ l.right = p;
+ p.parent = l;
+ }
+ return root;
+ }
+
+ static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
+ TreeNode<K,V> x) {
+ x.red = true;
+ for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
+ if ((xp = x.parent) == null) {
+ x.red = false;
+ return x;
+ }
+ else if (!xp.red || (xpp = xp.parent) == null)
+ return root;
+ if (xp == (xppl = xpp.left)) {
+ if ((xppr = xpp.right) != null && xppr.red) {
+ xppr.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.right) {
+ root = rotateLeft(root, x = xp);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ root = rotateRight(root, xpp);
+ }
+ }
+ }
+ }
+ else {
+ if (xppl != null && xppl.red) {
+ xppl.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.left) {
+ root = rotateRight(root, x = xp);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ root = rotateLeft(root, xpp);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
+ TreeNode<K,V> x) {
+ for (TreeNode<K,V> xp, xpl, xpr;;) {
+ if (x == null || x == root)
+ return root;
+ else if ((xp = x.parent) == null) {
+ x.red = false;
+ return x;
+ }
+ else if (x.red) {
+ x.red = false;
+ return root;
+ }
+ else if ((xpl = xp.left) == x) {
+ if ((xpr = xp.right) != null && xpr.red) {
+ xpr.red = false;
+ xp.red = true;
+ root = rotateLeft(root, xp);
+ xpr = (xp = x.parent) == null ? null : xp.right;
+ }
+ if (xpr == null)
+ x = xp;
+ else {
+ TreeNode<K,V> sl = xpr.left, sr = xpr.right;
+ if ((sr == null || !sr.red) &&
+ (sl == null || !sl.red)) {
+ xpr.red = true;
+ x = xp;
+ }
+ else {
+ if (sr == null || !sr.red) {
+ if (sl != null)
+ sl.red = false;
+ xpr.red = true;
+ root = rotateRight(root, xpr);
+ xpr = (xp = x.parent) == null ?
+ null : xp.right;
+ }
+ if (xpr != null) {
+ xpr.red = (xp == null) ? false : xp.red;
+ if ((sr = xpr.right) != null)
+ sr.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ root = rotateLeft(root, xp);
+ }
+ x = root;
+ }
+ }
+ }
+ else { // symmetric
+ if (xpl != null && xpl.red) {
+ xpl.red = false;
+ xp.red = true;
+ root = rotateRight(root, xp);
+ xpl = (xp = x.parent) == null ? null : xp.left;
+ }
+ if (xpl == null)
+ x = xp;
+ else {
+ TreeNode<K,V> sl = xpl.left, sr = xpl.right;
+ if ((sl == null || !sl.red) &&
+ (sr == null || !sr.red)) {
+ xpl.red = true;
+ x = xp;
+ }
+ else {
+ if (sl == null || !sl.red) {
+ if (sr != null)
+ sr.red = false;
+ xpl.red = true;
+ root = rotateLeft(root, xpl);
+ xpl = (xp = x.parent) == null ?
+ null : xp.left;
+ }
+ if (xpl != null) {
+ xpl.red = (xp == null) ? false : xp.red;
+ if ((sl = xpl.left) != null)
+ sl.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ root = rotateRight(root, xp);
+ }
+ x = root;
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Recursive invariant check
+ */
+ static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
+ TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
+ tb = t.prev, tn = (TreeNode<K,V>)t.next;
+ if (tb != null && tb.next != t)
+ return false;
+ if (tn != null && tn.prev != t)
+ return false;
+ if (tp != null && t != tp.left && t != tp.right)
+ return false;
+ if (tl != null && (tl.parent != t || tl.hash > t.hash))
+ return false;
+ if (tr != null && (tr.parent != t || tr.hash < t.hash))
+ return false;
+ if (t.red && tl != null && tl.red && tr != null && tr.red)
+ return false;
+ if (tl != null && !checkInvariants(tl))
+ return false;
+ if (tr != null && !checkInvariants(tr))
+ return false;
+ return true;
+ }
+
+ private static final sun.misc.Unsafe U;
+ private static final long LOCKSTATE;
+ static {
+ try {
+ U = getUnsafe();
+ Class<?> k = TreeBin.class;
+ LOCKSTATE = U.objectFieldOffset
+ (k.getDeclaredField("lockState"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+ }
+
+ /* ----------------Table Traversal -------------- */
+
+ /**
+ * Records the table, its length, and current traversal index for a
+ * traverser that must process a region of a forwarded table before
+ * proceeding with current table.
+ */
+ static final class TableStack<K,V> {
+ int length;
+ int index;
+ Node<K,V>[] tab;
+ TableStack<K,V> next;
+ }
+
+ /**
+ * Encapsulates traversal for methods such as containsValue; also
+ * serves as a base class for other iterators and spliterators.
+ *
+ * Method advance visits once each still-valid node that was
+ * reachable upon iterator construction. It might miss some that
+ * were added to a bin after the bin was visited, which is OK wrt
+ * consistency guarantees. Maintaining this property in the face
+ * of possible ongoing resizes requires a fair amount of
+ * bookkeeping state that is difficult to optimize away amidst
+ * volatile accesses. Even so, traversal maintains reasonable
+ * throughput.
+ *
+ * Normally, iteration proceeds bin-by-bin traversing lists.
+ * However, if the table has been resized, then all future steps
+ * must traverse both the bin at the current index as well as at
+ * (index + baseSize); and so on for further resizings. To
+ * paranoically cope with potential sharing by users of iterators
+ * across threads, iteration terminates if a bounds checks fails
+ * for a table read.
+ */
+ static class Traverser<K,V> {
+ Node<K,V>[] tab; // current table; updated if resized
+ Node<K,V> next; // the next entry to use
+ TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
+ int index; // index of bin to use next
+ int baseIndex; // current index of initial table
+ int baseLimit; // index bound for initial table
+ final int baseSize; // initial table size
+
+ Traverser(Node<K,V>[] tab, int size, int index, int limit) {
+ this.tab = tab;
+ this.baseSize = size;
+ this.baseIndex = this.index = index;
+ this.baseLimit = limit;
+ this.next = null;
+ }
+
+ /**
+ * Advances if possible, returning next valid node, or null if none.
+ */
+ final Node<K,V> advance() {
+ Node<K,V> e;
+ if ((e = next) != null)
+ e = e.next;
+ for (;;) {
+ Node<K,V>[] t; int i, n; // must use locals in checks
+ if (e != null)
+ return next = e;
+ if (baseIndex >= baseLimit || (t = tab) == null ||
+ (n = t.length) <= (i = index) || i < 0)
+ return next = null;
+ if ((e = tabAt(t, i)) != null && e.hash < 0) {
+ if (e instanceof ForwardingNode) {
+ tab = ((ForwardingNode<K,V>)e).nextTable;
+ e = null;
+ pushState(t, i, n);
+ continue;
+ }
+ else if (e instanceof TreeBin)
+ e = ((TreeBin<K,V>)e).first;
+ else
+ e = null;
+ }
+ if (stack != null)
+ recoverState(n);
+ else if ((index = i + baseSize) >= n)
+ index = ++baseIndex; // visit upper slots if present
+ }
+ }
+
+ /**
+ * Saves traversal state upon encountering a forwarding node.
+ */
+ private void pushState(Node<K,V>[] t, int i, int n) {
+ TableStack<K,V> s = spare; // reuse if possible
+ if (s != null)
+ spare = s.next;
+ else
+ s = new TableStack<K,V>();
+ s.tab = t;
+ s.length = n;
+ s.index = i;
+ s.next = stack;
+ stack = s;
+ }
+
+ /**
+ * Possibly pops traversal state.
+ *
+ * @param n length of current table
+ */
+ private void recoverState(int n) {
+ TableStack<K,V> s; int len;
+ while ((s = stack) != null && (index += (len = s.length)) >= n) {
+ n = len;
+ index = s.index;
+ tab = s.tab;
+ s.tab = null;
+ TableStack<K,V> next = s.next;
+ s.next = spare; // save for reuse
+ stack = next;
+ spare = s;
+ }
+ if (s == null && (index += baseSize) >= n)
+ index = ++baseIndex;
+ }
+ }
+
+ /**
+ * Base of key, value, and entry Iterators. Adds fields to
+ * Traverser to support iterator.remove.
+ */
+ static class BaseIterator<K,V> extends Traverser<K,V> {
+ final ConcurrentHashMapV8<K,V> map;
+ Node<K,V> lastReturned;
+ BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
+ ConcurrentHashMapV8<K,V> map) {
+ super(tab, size, index, limit);
+ this.map = map;
+ advance();
+ }
+
+ public final boolean hasNext() { return next != null; }
+ public final boolean hasMoreElements() { return next != null; }
+
+ public final void remove() {
+ Node<K,V> p;
+ if ((p = lastReturned) == null)
+ throw new IllegalStateException();
+ lastReturned = null;
+ map.replaceNode(p.key, null, null);
+ }
+ }
+
+ static final class KeyIterator<K,V> extends BaseIterator<K,V>
+ implements Iterator<K>, Enumeration<K> {
+ KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
+ ConcurrentHashMapV8<K,V> map) {
+ super(tab, index, size, limit, map);
+ }
+
+ public final K next() {
+ Node<K,V> p;
+ if ((p = next) == null)
+ throw new NoSuchElementException();
+ K k = p.key;
+ lastReturned = p;
+ advance();
+ return k;
+ }
+
+ public final K nextElement() { return next(); }
+ }
+
+ static final class ValueIterator<K,V> extends BaseIterator<K,V>
+ implements Iterator<V>, Enumeration<V> {
+ ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
+ ConcurrentHashMapV8<K,V> map) {
+ super(tab, index, size, limit, map);
+ }
+
+ public final V next() {
+ Node<K,V> p;
+ if ((p = next) == null)
+ throw new NoSuchElementException();
+ V v = p.val;
+ lastReturned = p;
+ advance();
+ return v;
+ }
+
+ public final V nextElement() { return next(); }
+ }
+
+ static final class EntryIterator<K,V> extends BaseIterator<K,V>
+ implements Iterator<Map.Entry<K,V>> {
+ EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
+ ConcurrentHashMapV8<K,V> map) {
+ super(tab, index, size, limit, map);
+ }
+
+ public final Map.Entry<K,V> next() {
+ Node<K,V> p;
+ if ((p = next) == null)
+ throw new NoSuchElementException();
+ K k = p.key;
+ V v = p.val;
+ lastReturned = p;
+ advance();
+ return new MapEntry<K,V>(k, v, map);
+ }
+ }
+
+ /**
+ * Exported Entry for EntryIterator
+ */
+ static final class MapEntry<K,V> implements Map.Entry<K,V> {
+ final K key; // non-null
+ V val; // non-null
+ final ConcurrentHashMapV8<K,V> map;
+ MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) {
+ this.key = key;
+ this.val = val;
+ this.map = map;
+ }
+ public K getKey() { return key; }
+ public V getValue() { return val; }
+ public int hashCode() { return key.hashCode() ^ val.hashCode(); }
+ public String toString() { return key + "=" + val; }
+
+ public boolean equals(Object o) {
+ Object k, v; Map.Entry<?,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ (k == key || k.equals(key)) &&
+ (v == val || v.equals(val)));
+ }
+
+ /**
+ * Sets our entry's value and writes through to the map. The
+ * value to return is somewhat arbitrary here. Since we do not
+ * necessarily track asynchronous changes, the most recent
+ * "previous" value could be different from what we return (or
+ * could even have been removed, in which case the put will
+ * re-establish). We do not and cannot guarantee more.
+ */
+ public V setValue(V value) {
+ if (value == null) throw new NullPointerException();
+ V v = val;
+ val = value;
+ map.put(key, value);
+ return v;
+ }
+ }
+
+ static final class KeySpliterator<K,V> extends Traverser<K,V>
+ implements ConcurrentHashMapSpliterator<K> {
+ long est; // size estimate
+ KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
+ long est) {
+ super(tab, size, index, limit);
+ this.est = est;
+ }
+
+ public ConcurrentHashMapSpliterator<K> trySplit() {
+ int i, f, h;
+ return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
+ new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
+ f, est >>>= 1);
+ }
+
+ public void forEachRemaining(Action<? super K> action) {
+ if (action == null) throw new NullPointerException();
+ for (Node<K,V> p; (p = advance()) != null;)
+ action.apply(p.key);
+ }
+
+ public boolean tryAdvance(Action<? super K> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V> p;
+ if ((p = advance()) == null)
+ return false;
+ action.apply(p.key);
+ return true;
+ }
+
+ public long estimateSize() { return est; }
+
+ }
+
+ static final class ValueSpliterator<K,V> extends Traverser<K,V>
+ implements ConcurrentHashMapSpliterator<V> {
+ long est; // size estimate
+ ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
+ long est) {
+ super(tab, size, index, limit);
+ this.est = est;
+ }
+
+ public ConcurrentHashMapSpliterator<V> trySplit() {
+ int i, f, h;
+ return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
+ new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
+ f, est >>>= 1);
+ }
+
+ public void forEachRemaining(Action<? super V> action) {
+ if (action == null) throw new NullPointerException();
+ for (Node<K,V> p; (p = advance()) != null;)
+ action.apply(p.val);
+ }
+
+ public boolean tryAdvance(Action<? super V> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V> p;
+ if ((p = advance()) == null)
+ return false;
+ action.apply(p.val);
+ return true;
+ }
+
+ public long estimateSize() { return est; }
+
+ }
+
+ static final class EntrySpliterator<K,V> extends Traverser<K,V>
+ implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
+ final ConcurrentHashMapV8<K,V> map; // To export MapEntry
+ long est; // size estimate
+ EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
+ long est, ConcurrentHashMapV8<K,V> map) {
+ super(tab, size, index, limit);
+ this.map = map;
+ this.est = est;
+ }
+
+ public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
+ int i, f, h;
+ return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
+ new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
+ f, est >>>= 1, map);
+ }
+
+ public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
+ if (action == null) throw new NullPointerException();
+ for (Node<K,V> p; (p = advance()) != null; )
+ action.apply(new MapEntry<K,V>(p.key, p.val, map));
+ }
+
+ public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V> p;
+ if ((p = advance()) == null)
+ return false;
+ action.apply(new MapEntry<K,V>(p.key, p.val, map));
+ return true;
+ }
+
+ public long estimateSize() { return est; }
+
+ }
+
+ // Parallel bulk operations
+
+ /**
+ * Computes initial batch value for bulk tasks. The returned value
+ * is approximately exp2 of the number of times (minus one) to
+ * split task by two before executing leaf action. This value is
+ * faster to compute and more convenient to use as a guide to
+ * splitting than is the depth, since it is used while dividing by
+ * two anyway.
+ */
+ final int batchFor(long b) {
+ long n;
+ if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
+ return 0;
+ int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
+ return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
+ }
+
+ /**
+ * Performs the given action for each (key, value).
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param action the action
+ * @since 1.8
+ */
+ public void forEach(long parallelismThreshold,
+ BiAction<? super K,? super V> action) {
+ if (action == null) throw new NullPointerException();
+ new ForEachMappingTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ action).invoke();
+ }
+
+ /**
+ * Performs the given action for each non-null transformation
+ * of each (key, value).
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case the action is not applied)
+ * @param action the action
+ * @since 1.8
+ */
+ public <U> void forEach(long parallelismThreshold,
+ BiFun<? super K, ? super V, ? extends U> transformer,
+ Action<? super U> action) {
+ if (transformer == null || action == null)
+ throw new NullPointerException();
+ new ForEachTransformedMappingTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ transformer, action).invoke();
+ }
+
+ /**
+ * Returns a non-null result from applying the given search
+ * function on each (key, value), or null if none. Upon
+ * success, further element processing is suppressed and the
+ * results of any other parallel invocations of the search
+ * function are ignored.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param searchFunction a function returning a non-null
+ * result on success, else null
+ * @return a non-null result from applying the given search
+ * function on each (key, value), or null if none
+ * @since 1.8
+ */
+ public <U> U search(long parallelismThreshold,
+ BiFun<? super K, ? super V, ? extends U> searchFunction) {
+ if (searchFunction == null) throw new NullPointerException();
+ return new SearchMappingsTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ searchFunction, new AtomicReference<U>()).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all (key, value) pairs using the given reducer to
+ * combine values, or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case it is not combined)
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all (key, value) pairs
+ * @since 1.8
+ */
+ public <U> U reduce(long parallelismThreshold,
+ BiFun<? super K, ? super V, ? extends U> transformer,
+ BiFun<? super U, ? super U, ? extends U> reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceMappingsTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all (key, value) pairs using the given reducer to
+ * combine values, and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all (key, value) pairs
+ * @since 1.8
+ */
+ public double reduceToDouble(long parallelismThreshold,
+ ObjectByObjectToDouble<? super K, ? super V> transformer,
+ double basis,
+ DoubleByDoubleToDouble reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceMappingsToDoubleTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all (key, value) pairs using the given reducer to
+ * combine values, and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all (key, value) pairs
+ * @since 1.8
+ */
+ public long reduceToLong(long parallelismThreshold,
+ ObjectByObjectToLong<? super K, ? super V> transformer,
+ long basis,
+ LongByLongToLong reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceMappingsToLongTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all (key, value) pairs using the given reducer to
+ * combine values, and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all (key, value) pairs
+ * @since 1.8
+ */
+ public int reduceToInt(long parallelismThreshold,
+ ObjectByObjectToInt<? super K, ? super V> transformer,
+ int basis,
+ IntByIntToInt reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceMappingsToIntTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Performs the given action for each key.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param action the action
+ * @since 1.8
+ */
+ public void forEachKey(long parallelismThreshold,
+ Action<? super K> action) {
+ if (action == null) throw new NullPointerException();
+ new ForEachKeyTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ action).invoke();
+ }
+
+ /**
+ * Performs the given action for each non-null transformation
+ * of each key.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case the action is not applied)
+ * @param action the action
+ * @since 1.8
+ */
+ public <U> void forEachKey(long parallelismThreshold,
+ Fun<? super K, ? extends U> transformer,
+ Action<? super U> action) {
+ if (transformer == null || action == null)
+ throw new NullPointerException();
+ new ForEachTransformedKeyTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ transformer, action).invoke();
+ }
+
+ /**
+ * Returns a non-null result from applying the given search
+ * function on each key, or null if none. Upon success,
+ * further element processing is suppressed and the results of
+ * any other parallel invocations of the search function are
+ * ignored.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param searchFunction a function returning a non-null
+ * result on success, else null
+ * @return a non-null result from applying the given search
+ * function on each key, or null if none
+ * @since 1.8
+ */
+ public <U> U searchKeys(long parallelismThreshold,
+ Fun<? super K, ? extends U> searchFunction) {
+ if (searchFunction == null) throw new NullPointerException();
+ return new SearchKeysTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ searchFunction, new AtomicReference<U>()).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating all keys using the given
+ * reducer to combine values, or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating all keys using the given
+ * reducer to combine values, or null if none
+ * @since 1.8
+ */
+ public K reduceKeys(long parallelismThreshold,
+ BiFun<? super K, ? super K, ? extends K> reducer) {
+ if (reducer == null) throw new NullPointerException();
+ return new ReduceKeysTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all keys using the given reducer to combine values, or
+ * null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case it is not combined)
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all keys
+ * @since 1.8
+ */
+ public <U> U reduceKeys(long parallelismThreshold,
+ Fun<? super K, ? extends U> transformer,
+ BiFun<? super U, ? super U, ? extends U> reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceKeysTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all keys using the given reducer to combine values, and
+ * the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all keys
+ * @since 1.8
+ */
+ public double reduceKeysToDouble(long parallelismThreshold,
+ ObjectToDouble<? super K> transformer,
+ double basis,
+ DoubleByDoubleToDouble reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceKeysToDoubleTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all keys using the given reducer to combine values, and
+ * the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all keys
+ * @since 1.8
+ */
+ public long reduceKeysToLong(long parallelismThreshold,
+ ObjectToLong<? super K> transformer,
+ long basis,
+ LongByLongToLong reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceKeysToLongTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all keys using the given reducer to combine values, and
+ * the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all keys
+ * @since 1.8
+ */
+ public int reduceKeysToInt(long parallelismThreshold,
+ ObjectToInt<? super K> transformer,
+ int basis,
+ IntByIntToInt reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceKeysToIntTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Performs the given action for each value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param action the action
+ * @since 1.8
+ */
+ public void forEachValue(long parallelismThreshold,
+ Action<? super V> action) {
+ if (action == null)
+ throw new NullPointerException();
+ new ForEachValueTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ action).invoke();
+ }
+
+ /**
+ * Performs the given action for each non-null transformation
+ * of each value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case the action is not applied)
+ * @param action the action
+ * @since 1.8
+ */
+ public <U> void forEachValue(long parallelismThreshold,
+ Fun<? super V, ? extends U> transformer,
+ Action<? super U> action) {
+ if (transformer == null || action == null)
+ throw new NullPointerException();
+ new ForEachTransformedValueTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ transformer, action).invoke();
+ }
+
+ /**
+ * Returns a non-null result from applying the given search
+ * function on each value, or null if none. Upon success,
+ * further element processing is suppressed and the results of
+ * any other parallel invocations of the search function are
+ * ignored.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param searchFunction a function returning a non-null
+ * result on success, else null
+ * @return a non-null result from applying the given search
+ * function on each value, or null if none
+ * @since 1.8
+ */
+ public <U> U searchValues(long parallelismThreshold,
+ Fun<? super V, ? extends U> searchFunction) {
+ if (searchFunction == null) throw new NullPointerException();
+ return new SearchValuesTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ searchFunction, new AtomicReference<U>()).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating all values using the
+ * given reducer to combine values, or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating all values
+ * @since 1.8
+ */
+ public V reduceValues(long parallelismThreshold,
+ BiFun<? super V, ? super V, ? extends V> reducer) {
+ if (reducer == null) throw new NullPointerException();
+ return new ReduceValuesTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all values using the given reducer to combine values, or
+ * null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case it is not combined)
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all values
+ * @since 1.8
+ */
+ public <U> U reduceValues(long parallelismThreshold,
+ Fun<? super V, ? extends U> transformer,
+ BiFun<? super U, ? super U, ? extends U> reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceValuesTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all values using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all values
+ * @since 1.8
+ */
+ public double reduceValuesToDouble(long parallelismThreshold,
+ ObjectToDouble<? super V> transformer,
+ double basis,
+ DoubleByDoubleToDouble reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceValuesToDoubleTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all values using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all values
+ * @since 1.8
+ */
+ public long reduceValuesToLong(long parallelismThreshold,
+ ObjectToLong<? super V> transformer,
+ long basis,
+ LongByLongToLong reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceValuesToLongTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all values using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all values
+ * @since 1.8
+ */
+ public int reduceValuesToInt(long parallelismThreshold,
+ ObjectToInt<? super V> transformer,
+ int basis,
+ IntByIntToInt reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceValuesToIntTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Performs the given action for each entry.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param action the action
+ * @since 1.8
+ */
+ public void forEachEntry(long parallelismThreshold,
+ Action<? super Map.Entry<K,V>> action) {
+ if (action == null) throw new NullPointerException();
+ new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
+ action).invoke();
+ }
+
+ /**
+ * Performs the given action for each non-null transformation
+ * of each entry.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case the action is not applied)
+ * @param action the action
+ * @since 1.8
+ */
+ public <U> void forEachEntry(long parallelismThreshold,
+ Fun<Map.Entry<K,V>, ? extends U> transformer,
+ Action<? super U> action) {
+ if (transformer == null || action == null)
+ throw new NullPointerException();
+ new ForEachTransformedEntryTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ transformer, action).invoke();
+ }
+
+ /**
+ * Returns a non-null result from applying the given search
+ * function on each entry, or null if none. Upon success,
+ * further element processing is suppressed and the results of
+ * any other parallel invocations of the search function are
+ * ignored.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param searchFunction a function returning a non-null
+ * result on success, else null
+ * @return a non-null result from applying the given search
+ * function on each entry, or null if none
+ * @since 1.8
+ */
+ public <U> U searchEntries(long parallelismThreshold,
+ Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
+ if (searchFunction == null) throw new NullPointerException();
+ return new SearchEntriesTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ searchFunction, new AtomicReference<U>()).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating all entries using the
+ * given reducer to combine values, or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating all entries
+ * @since 1.8
+ */
+ public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
+ BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
+ if (reducer == null) throw new NullPointerException();
+ return new ReduceEntriesTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all entries using the given reducer to combine values,
+ * or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case it is not combined)
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all entries
+ * @since 1.8
+ */
+ public <U> U reduceEntries(long parallelismThreshold,
+ Fun<Map.Entry<K,V>, ? extends U> transformer,
+ BiFun<? super U, ? super U, ? extends U> reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceEntriesTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all entries using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all entries
+ * @since 1.8
+ */
+ public double reduceEntriesToDouble(long parallelismThreshold,
+ ObjectToDouble<Map.Entry<K,V>> transformer,
+ double basis,
+ DoubleByDoubleToDouble reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceEntriesToDoubleTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all entries using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all entries
+ * @since 1.8
+ */
+ public long reduceEntriesToLong(long parallelismThreshold,
+ ObjectToLong<Map.Entry<K,V>> transformer,
+ long basis,
+ LongByLongToLong reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceEntriesToLongTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all entries using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all entries
+ * @since 1.8
+ */
+ public int reduceEntriesToInt(long parallelismThreshold,
+ ObjectToInt<Map.Entry<K,V>> transformer,
+ int basis,
+ IntByIntToInt reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceEntriesToIntTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+
+ /* ----------------Views -------------- */
+
+ /**
+ * Base class for views.
+ */
+ abstract static class CollectionView<K,V,E>
+ implements Collection<E>, java.io.Serializable {
+ private static final long serialVersionUID = 7249069246763182397L;
+ final ConcurrentHashMapV8<K,V> map;
+ CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; }
+
+ /**
+ * Returns the map backing this view.
+ *
+ * @return the map backing this view
+ */
+ public ConcurrentHashMapV8<K,V> getMap() { return map; }
+
+ /**
+ * Removes all of the elements from this view, by removing all
+ * the mappings from the map backing this view.
+ */
+ public final void clear() { map.clear(); }
+ public final int size() { return map.size(); }
+ public final boolean isEmpty() { return map.isEmpty(); }
+
+ // implementations below rely on concrete classes supplying these
+ // abstract methods
+ /**
+ * Returns a "weakly consistent" iterator that will never
+ * throw {@link ConcurrentModificationException}, and
+ * guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not
+ * guaranteed to) reflect any modifications subsequent to
+ * construction.
+ */
+ public abstract Iterator<E> iterator();
+ public abstract boolean contains(Object o);
+ public abstract boolean remove(Object o);
+
+ private static final String oomeMsg = "Required array size too large";
+
+ public final Object[] toArray() {
+ long sz = map.mappingCount();
+ if (sz > MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ int n = (int)sz;
+ Object[] r = new Object[n];
+ int i = 0;
+ for (E e : this) {
+ if (i == n) {
+ if (n >= MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
+ n = MAX_ARRAY_SIZE;
+ else
+ n += (n >>> 1) + 1;
+ r = Arrays.copyOf(r, n);
+ }
+ r[i++] = e;
+ }
+ return (i == n) ? r : Arrays.copyOf(r, i);
+ }
+
+ @SuppressWarnings("unchecked")
+ public final <T> T[] toArray(T[] a) {
+ long sz = map.mappingCount();
+ if (sz > MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ int m = (int)sz;
+ T[] r = (a.length >= m) ? a :
+ (T[])java.lang.reflect.Array
+ .newInstance(a.getClass().getComponentType(), m);
+ int n = r.length;
+ int i = 0;
+ for (E e : this) {
+ if (i == n) {
+ if (n >= MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
+ n = MAX_ARRAY_SIZE;
+ else
+ n += (n >>> 1) + 1;
+ r = Arrays.copyOf(r, n);
+ }
+ r[i++] = (T)e;
+ }
+ if (a == r && i < n) {
+ r[i] = null; // null-terminate
+ return r;
+ }
+ return (i == n) ? r : Arrays.copyOf(r, i);
+ }
+
+ /**
+ * Returns a string representation of this collection.
+ * The string representation consists of the string representations
+ * of the collection's elements in the order they are returned by
+ * its iterator, enclosed in square brackets ({@code "[]"}).
+ * Adjacent elements are separated by the characters {@code ", "}
+ * (comma and space). Elements are converted to strings as by
+ * {@link String#valueOf(Object)}.
+ *
+ * @return a string representation of this collection
+ */
+ public final String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append('[');
+ Iterator<E> it = iterator();
+ if (it.hasNext()) {
+ for (;;) {
+ Object e = it.next();
+ sb.append(e == this ? "(this Collection)" : e);
+ if (!it.hasNext())
+ break;
+ sb.append(',').append(' ');
+ }
+ }
+ return sb.append(']').toString();
+ }
+
+ public final boolean containsAll(Collection<?> c) {
+ if (c != this) {
+ for (Object e : c) {
+ if (e == null || !contains(e))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public final boolean removeAll(Collection<?> c) {
+ boolean modified = false;
+ for (Iterator<E> it = iterator(); it.hasNext();) {
+ if (c.contains(it.next())) {
+ it.remove();
+ modified = true;
+ }
+ }
+ return modified;
+ }
+
+ public final boolean retainAll(Collection<?> c) {
+ boolean modified = false;
+ for (Iterator<E> it = iterator(); it.hasNext();) {
+ if (!c.contains(it.next())) {
+ it.remove();
+ modified = true;
+ }
+ }
+ return modified;
+ }
+
+ }
+
+ /**
+ * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
+ * which additions may optionally be enabled by mapping to a
+ * common value. This class cannot be directly instantiated.
+ * See {@link #keySet() keySet()},
+ * {@link #keySet(Object) keySet(V)},
+ * {@link #newKeySet() newKeySet()},
+ * {@link #newKeySet(int) newKeySet(int)}.
+ *
+ * @since 1.8
+ */
+ public static class KeySetView<K,V> extends CollectionView<K,V,K>
+ implements Set<K>, java.io.Serializable {
+ private static final long serialVersionUID = 7249069246763182397L;
+ private final V value;
+ KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public
+ super(map);
+ this.value = value;
+ }
+
+ /**
+ * Returns the default mapped value for additions,
+ * or {@code null} if additions are not supported.
+ *
+ * @return the default mapped value for additions, or {@code null}
+ * if not supported
+ */
+ public V getMappedValue() { return value; }
+
+ /**
+ * {@inheritDoc}
+ * @throws NullPointerException if the specified key is null
+ */
+ public boolean contains(Object o) { return map.containsKey(o); }
+
+ /**
+ * Removes the key from this map view, by removing the key (and its
+ * corresponding value) from the backing map. This method does
+ * nothing if the key is not in the map.
+ *
+ * @param o the key to be removed from the backing map
+ * @return {@code true} if the backing map contained the specified key
+ * @throws NullPointerException if the specified key is null
+ */
+ public boolean remove(Object o) { return map.remove(o) != null; }
+
+ /**
+ * @return an iterator over the keys of the backing map
+ */
+ public Iterator<K> iterator() {
+ Node<K,V>[] t;
+ ConcurrentHashMapV8<K,V> m = map;
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new KeyIterator<K,V>(t, f, 0, f, m);
+ }
+
+ /**
+ * Adds the specified key to this set view by mapping the key to
+ * the default mapped value in the backing map, if defined.
+ *
+ * @param e key to be added
+ * @return {@code true} if this set changed as a result of the call
+ * @throws NullPointerException if the specified key is null
+ * @throws UnsupportedOperationException if no default mapped value
+ * for additions was provided
+ */
+ public boolean add(K e) {
+ V v;
+ if ((v = value) == null)
+ throw new UnsupportedOperationException();
+ return map.putVal(e, v, true) == null;
+ }
+
+ /**
+ * Adds all of the elements in the specified collection to this set,
+ * as if by calling {@link #add} on each one.
+ *
+ * @param c the elements to be inserted into this set
+ * @return {@code true} if this set changed as a result of the call
+ * @throws NullPointerException if the collection or any of its
+ * elements are {@code null}
+ * @throws UnsupportedOperationException if no default mapped value
+ * for additions was provided
+ */
+ public boolean addAll(Collection<? extends K> c) {
+ boolean added = false;
+ V v;
+ if ((v = value) == null)
+ throw new UnsupportedOperationException();
+ for (K e : c) {
+ if (map.putVal(e, v, true) == null)
+ added = true;
+ }
+ return added;
+ }
+
+ public int hashCode() {
+ int h = 0;
+ for (K e : this)
+ h += e.hashCode();
+ return h;
+ }
+
+ public boolean equals(Object o) {
+ Set<?> c;
+ return ((o instanceof Set) &&
+ ((c = (Set<?>)o) == this ||
+ (containsAll(c) && c.containsAll(this))));
+ }
+
+ public ConcurrentHashMapSpliterator<K> spliteratorJSR166() {
+ Node<K,V>[] t;
+ ConcurrentHashMapV8<K,V> m = map;
+ long n = m.sumCount();
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
+ }
+
+ public void forEach(Action<? super K> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = map.table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; )
+ action.apply(p.key);
+ }
+ }
+ }
+
+ /**
+ * A view of a ConcurrentHashMapV8 as a {@link Collection} of
+ * values, in which additions are disabled. This class cannot be
+ * directly instantiated. See {@link #values()}.
+ */
+ static final class ValuesView<K,V> extends CollectionView<K,V,V>
+ implements Collection<V>, java.io.Serializable {
+ private static final long serialVersionUID = 2249069246763182397L;
+ ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
+ public final boolean contains(Object o) {
+ return map.containsValue(o);
+ }
+
+ public final boolean remove(Object o) {
+ if (o != null) {
+ for (Iterator<V> it = iterator(); it.hasNext();) {
+ if (o.equals(it.next())) {
+ it.remove();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public final Iterator<V> iterator() {
+ ConcurrentHashMapV8<K,V> m = map;
+ Node<K,V>[] t;
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new ValueIterator<K,V>(t, f, 0, f, m);
+ }
+
+ public final boolean add(V e) {
+ throw new UnsupportedOperationException();
+ }
+ public final boolean addAll(Collection<? extends V> c) {
+ throw new UnsupportedOperationException();
+ }
+
+ public ConcurrentHashMapSpliterator<V> spliteratorJSR166() {
+ Node<K,V>[] t;
+ ConcurrentHashMapV8<K,V> m = map;
+ long n = m.sumCount();
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
+ }
+
+ public void forEach(Action<? super V> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = map.table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; )
+ action.apply(p.val);
+ }
+ }
+ }
+
+ /**
+ * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
+ * entries. This class cannot be directly instantiated. See
+ * {@link #entrySet()}.
+ */
+ static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
+ implements Set<Map.Entry<K,V>>, java.io.Serializable {
+ private static final long serialVersionUID = 2249069246763182397L;
+ EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); }
+
+ public boolean contains(Object o) {
+ Object k, v, r; Map.Entry<?,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
+ (r = map.get(k)) != null &&
+ (v = e.getValue()) != null &&
+ (v == r || v.equals(r)));
+ }
+
+ public boolean remove(Object o) {
+ Object k, v; Map.Entry<?,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ map.remove(k, v));
+ }
+
+ /**
+ * @return an iterator over the entries of the backing map
+ */
+ public Iterator<Map.Entry<K,V>> iterator() {
+ ConcurrentHashMapV8<K,V> m = map;
+ Node<K,V>[] t;
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new EntryIterator<K,V>(t, f, 0, f, m);
+ }
+
+ public boolean add(Entry<K,V> e) {
+ return map.putVal(e.getKey(), e.getValue(), false) == null;
+ }
+
+ public boolean addAll(Collection<? extends Entry<K,V>> c) {
+ boolean added = false;
+ for (Entry<K,V> e : c) {
+ if (add(e))
+ added = true;
+ }
+ return added;
+ }
+
+ public final int hashCode() {
+ int h = 0;
+ Node<K,V>[] t;
+ if ((t = map.table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ h += p.hashCode();
+ }
+ }
+ return h;
+ }
+
+ public final boolean equals(Object o) {
+ Set<?> c;
+ return ((o instanceof Set) &&
+ ((c = (Set<?>)o) == this ||
+ (containsAll(c) && c.containsAll(this))));
+ }
+
+ public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliteratorJSR166() {
+ Node<K,V>[] t;
+ ConcurrentHashMapV8<K,V> m = map;
+ long n = m.sumCount();
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
+ }
+
+ public void forEach(Action<? super Map.Entry<K,V>> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = map.table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; )
+ action.apply(new MapEntry<K,V>(p.key, p.val, map));
+ }
+ }
+
+ }
+
+ // -------------------------------------------------------
+
+ /**
+ * Base class for bulk tasks. Repeats some fields and code from
+ * class Traverser, because we need to subclass CountedCompleter.
+ */
+ abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
+ Node<K,V>[] tab; // same as Traverser
+ Node<K,V> next;
+ int index;
+ int baseIndex;
+ int baseLimit;
+ final int baseSize;
+ int batch; // split control
+
+ BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
+ super(par);
+ this.batch = b;
+ this.index = this.baseIndex = i;
+ if ((this.tab = t) == null)
+ this.baseSize = this.baseLimit = 0;
+ else if (par == null)
+ this.baseSize = this.baseLimit = t.length;
+ else {
+ this.baseLimit = f;
+ this.baseSize = par.baseSize;
+ }
+ }
+
+ /**
+ * Same as Traverser version
+ */
+ final Node<K,V> advance() {
+ Node<K,V> e;
+ if ((e = next) != null)
+ e = e.next;
+ for (;;) {
+ Node<K,V>[] t; int i, n; K ek; // must use locals in checks
+ if (e != null)
+ return next = e;
+ if (baseIndex >= baseLimit || (t = tab) == null ||
+ (n = t.length) <= (i = index) || i < 0)
+ return next = null;
+ if ((e = tabAt(t, index)) != null && e.hash < 0) {
+ if (e instanceof ForwardingNode) {
+ tab = ((ForwardingNode<K,V>)e).nextTable;
+ e = null;
+ continue;
+ }
+ else if (e instanceof TreeBin)
+ e = ((TreeBin<K,V>)e).first;
+ else
+ e = null;
+ }
+ if ((index += baseSize) >= n)
+ index = ++baseIndex; // visit upper slots if present
+ }
+ }
+ }
+
+ /*
+ * Task classes. Coded in a regular but ugly format/style to
+ * simplify checks that each variant differs in the right way from
+ * others. The null screenings exist because compilers cannot tell
+ * that we've already null-checked task arguments, so we force
+ * simplest hoisted bypass to help avoid convoluted traps.
+ */
+ @SuppressWarnings("serial")
+ static final class ForEachKeyTask<K,V>
+ extends BulkTask<K,V,Void> {
+ final Action<? super K> action;
+ ForEachKeyTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Action<? super K> action) {
+ super(p, b, i, f, t);
+ this.action = action;
+ }
+ public final void compute() {
+ final Action<? super K> action;
+ if ((action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachKeyTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null;)
+ action.apply(p.key);
+ propagateCompletion();
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ForEachValueTask<K,V>
+ extends BulkTask<K,V,Void> {
+ final Action<? super V> action;
+ ForEachValueTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Action<? super V> action) {
+ super(p, b, i, f, t);
+ this.action = action;
+ }
+ public final void compute() {
+ final Action<? super V> action;
+ if ((action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachValueTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null;)
+ action.apply(p.val);
+ propagateCompletion();
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ForEachEntryTask<K,V>
+ extends BulkTask<K,V,Void> {
+ final Action<? super Entry<K,V>> action;
+ ForEachEntryTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Action<? super Entry<K,V>> action) {
+ super(p, b, i, f, t);
+ this.action = action;
+ }
+ public final void compute() {
+ final Action<? super Entry<K,V>> action;
+ if ((action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachEntryTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ action.apply(p);
+ propagateCompletion();
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ForEachMappingTask<K,V>
+ extends BulkTask<K,V,Void> {
+ final BiAction<? super K, ? super V> action;
+ ForEachMappingTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ BiAction<? super K,? super V> action) {
+ super(p, b, i, f, t);
+ this.action = action;
+ }
+ public final void compute() {
+ final BiAction<? super K, ? super V> action;
+ if ((action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachMappingTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ action.apply(p.key, p.val);
+ propagateCompletion();
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ForEachTransformedKeyTask<K,V,U>
+ extends BulkTask<K,V,Void> {
+ final Fun<? super K, ? extends U> transformer;
+ final Action<? super U> action;
+ ForEachTransformedKeyTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Fun<? super K, ? extends U> transformer, Action<? super U> action) {
+ super(p, b, i, f, t);
+ this.transformer = transformer; this.action = action;
+ }
+ public final void compute() {
+ final Fun<? super K, ? extends U> transformer;
+ final Action<? super U> action;
+ if ((transformer = this.transformer) != null &&
+ (action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachTransformedKeyTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ transformer, action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p.key)) != null)
+ action.apply(u);
+ }
+ propagateCompletion();
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ForEachTransformedValueTask<K,V,U>
+ extends BulkTask<K,V,Void> {
+ final Fun<? super V, ? extends U> transformer;
+ final Action<? super U> action;
+ ForEachTransformedValueTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Fun<? super V, ? extends U> transformer, Action<? super U> action) {
+ super(p, b, i, f, t);
+ this.transformer = transformer; this.action = action;
+ }
+ public final void compute() {
+ final Fun<? super V, ? extends U> transformer;
+ final Action<? super U> action;
+ if ((transformer = this.transformer) != null &&
+ (action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachTransformedValueTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ transformer, action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p.val)) != null)
+ action.apply(u);
+ }
+ propagateCompletion();
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ForEachTransformedEntryTask<K,V,U>
+ extends BulkTask<K,V,Void> {
+ final Fun<Map.Entry<K,V>, ? extends U> transformer;
+ final Action<? super U> action;
+ ForEachTransformedEntryTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
+ super(p, b, i, f, t);
+ this.transformer = transformer; this.action = action;
+ }
+ public final void compute() {
+ final Fun<Map.Entry<K,V>, ? extends U> transformer;
+ final Action<? super U> action;
+ if ((transformer = this.transformer) != null &&
+ (action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachTransformedEntryTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ transformer, action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p)) != null)
+ action.apply(u);
+ }
+ propagateCompletion();
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ForEachTransformedMappingTask<K,V,U>
+ extends BulkTask<K,V,Void> {
+ final BiFun<? super K, ? super V, ? extends U> transformer;
+ final Action<? super U> action;
+ ForEachTransformedMappingTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ BiFun<? super K, ? super V, ? extends U> transformer,
+ Action<? super U> action) {
+ super(p, b, i, f, t);
+ this.transformer = transformer; this.action = action;
+ }
+ public final void compute() {
+ final BiFun<? super K, ? super V, ? extends U> transformer;
+ final Action<? super U> action;
+ if ((transformer = this.transformer) != null &&
+ (action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachTransformedMappingTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ transformer, action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p.key, p.val)) != null)
+ action.apply(u);
+ }
+ propagateCompletion();
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class SearchKeysTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Fun<? super K, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ SearchKeysTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Fun<? super K, ? extends U> searchFunction,
+ AtomicReference<U> result) {
+ super(p, b, i, f, t);
+ this.searchFunction = searchFunction; this.result = result;
+ }
+ public final U getRawResult() { return result.get(); }
+ public final void compute() {
+ final Fun<? super K, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ if ((searchFunction = this.searchFunction) != null &&
+ (result = this.result) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ if (result.get() != null)
+ return;
+ addToPendingCount(1);
+ new SearchKeysTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ searchFunction, result).fork();
+ }
+ while (result.get() == null) {
+ U u;
+ Node<K,V> p;
+ if ((p = advance()) == null) {
+ propagateCompletion();
+ break;
+ }
+ if ((u = searchFunction.apply(p.key)) != null) {
+ if (result.compareAndSet(null, u))
+ quietlyCompleteRoot();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class SearchValuesTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Fun<? super V, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ SearchValuesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Fun<? super V, ? extends U> searchFunction,
+ AtomicReference<U> result) {
+ super(p, b, i, f, t);
+ this.searchFunction = searchFunction; this.result = result;
+ }
+ public final U getRawResult() { return result.get(); }
+ public final void compute() {
+ final Fun<? super V, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ if ((searchFunction = this.searchFunction) != null &&
+ (result = this.result) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ if (result.get() != null)
+ return;
+ addToPendingCount(1);
+ new SearchValuesTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ searchFunction, result).fork();
+ }
+ while (result.get() == null) {
+ U u;
+ Node<K,V> p;
+ if ((p = advance()) == null) {
+ propagateCompletion();
+ break;
+ }
+ if ((u = searchFunction.apply(p.val)) != null) {
+ if (result.compareAndSet(null, u))
+ quietlyCompleteRoot();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class SearchEntriesTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Fun<Entry<K,V>, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ SearchEntriesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Fun<Entry<K,V>, ? extends U> searchFunction,
+ AtomicReference<U> result) {
+ super(p, b, i, f, t);
+ this.searchFunction = searchFunction; this.result = result;
+ }
+ public final U getRawResult() { return result.get(); }
+ public final void compute() {
+ final Fun<Entry<K,V>, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ if ((searchFunction = this.searchFunction) != null &&
+ (result = this.result) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ if (result.get() != null)
+ return;
+ addToPendingCount(1);
+ new SearchEntriesTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ searchFunction, result).fork();
+ }
+ while (result.get() == null) {
+ U u;
+ Node<K,V> p;
+ if ((p = advance()) == null) {
+ propagateCompletion();
+ break;
+ }
+ if ((u = searchFunction.apply(p)) != null) {
+ if (result.compareAndSet(null, u))
+ quietlyCompleteRoot();
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class SearchMappingsTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final BiFun<? super K, ? super V, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ SearchMappingsTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ BiFun<? super K, ? super V, ? extends U> searchFunction,
+ AtomicReference<U> result) {
+ super(p, b, i, f, t);
+ this.searchFunction = searchFunction; this.result = result;
+ }
+ public final U getRawResult() { return result.get(); }
+ public final void compute() {
+ final BiFun<? super K, ? super V, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ if ((searchFunction = this.searchFunction) != null &&
+ (result = this.result) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ if (result.get() != null)
+ return;
+ addToPendingCount(1);
+ new SearchMappingsTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ searchFunction, result).fork();
+ }
+ while (result.get() == null) {
+ U u;
+ Node<K,V> p;
+ if ((p = advance()) == null) {
+ propagateCompletion();
+ break;
+ }
+ if ((u = searchFunction.apply(p.key, p.val)) != null) {
+ if (result.compareAndSet(null, u))
+ quietlyCompleteRoot();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ReduceKeysTask<K,V>
+ extends BulkTask<K,V,K> {
+ final BiFun<? super K, ? super K, ? extends K> reducer;
+ K result;
+ ReduceKeysTask<K,V> rights, nextRight;
+ ReduceKeysTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ ReduceKeysTask<K,V> nextRight,
+ BiFun<? super K, ? super K, ? extends K> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.reducer = reducer;
+ }
+ public final K getRawResult() { return result; }
+ public final void compute() {
+ final BiFun<? super K, ? super K, ? extends K> reducer;
+ if ((reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new ReduceKeysTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, reducer)).fork();
+ }
+ K r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ K u = p.key;
+ r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") ReduceKeysTask<K,V>
+ t = (ReduceKeysTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ K tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ReduceValuesTask<K,V>
+ extends BulkTask<K,V,V> {
+ final BiFun<? super V, ? super V, ? extends V> reducer;
+ V result;
+ ReduceValuesTask<K,V> rights, nextRight;
+ ReduceValuesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ ReduceValuesTask<K,V> nextRight,
+ BiFun<? super V, ? super V, ? extends V> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.reducer = reducer;
+ }
+ public final V getRawResult() { return result; }
+ public final void compute() {
+ final BiFun<? super V, ? super V, ? extends V> reducer;
+ if ((reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new ReduceValuesTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, reducer)).fork();
+ }
+ V r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ V v = p.val;
+ r = (r == null) ? v : reducer.apply(r, v);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") ReduceValuesTask<K,V>
+ t = (ReduceValuesTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ V tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class ReduceEntriesTask<K,V>
+ extends BulkTask<K,V,Map.Entry<K,V>> {
+ final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
+ Map.Entry<K,V> result;
+ ReduceEntriesTask<K,V> rights, nextRight;
+ ReduceEntriesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ ReduceEntriesTask<K,V> nextRight,
+ BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.reducer = reducer;
+ }
+ public final Map.Entry<K,V> getRawResult() { return result; }
+ public final void compute() {
+ final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
+ if ((reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new ReduceEntriesTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, reducer)).fork();
+ }
+ Map.Entry<K,V> r = null;
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = (r == null) ? p : reducer.apply(r, p);
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
+ t = (ReduceEntriesTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ Map.Entry<K,V> tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceKeysTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Fun<? super K, ? extends U> transformer;
+ final BiFun<? super U, ? super U, ? extends U> reducer;
+ U result;
+ MapReduceKeysTask<K,V,U> rights, nextRight;
+ MapReduceKeysTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceKeysTask<K,V,U> nextRight,
+ Fun<? super K, ? extends U> transformer,
+ BiFun<? super U, ? super U, ? extends U> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.reducer = reducer;
+ }
+ public final U getRawResult() { return result; }
+ public final void compute() {
+ final Fun<? super K, ? extends U> transformer;
+ final BiFun<? super U, ? super U, ? extends U> reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceKeysTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, reducer)).fork();
+ }
+ U r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p.key)) != null)
+ r = (r == null) ? u : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
+ t = (MapReduceKeysTask<K,V,U>)c,
+ s = t.rights;
+ while (s != null) {
+ U tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceValuesTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Fun<? super V, ? extends U> transformer;
+ final BiFun<? super U, ? super U, ? extends U> reducer;
+ U result;
+ MapReduceValuesTask<K,V,U> rights, nextRight;
+ MapReduceValuesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceValuesTask<K,V,U> nextRight,
+ Fun<? super V, ? extends U> transformer,
+ BiFun<? super U, ? super U, ? extends U> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.reducer = reducer;
+ }
+ public final U getRawResult() { return result; }
+ public final void compute() {
+ final Fun<? super V, ? extends U> transformer;
+ final BiFun<? super U, ? super U, ? extends U> reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceValuesTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, reducer)).fork();
+ }
+ U r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p.val)) != null)
+ r = (r == null) ? u : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
+ t = (MapReduceValuesTask<K,V,U>)c,
+ s = t.rights;
+ while (s != null) {
+ U tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceEntriesTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Fun<Map.Entry<K,V>, ? extends U> transformer;
+ final BiFun<? super U, ? super U, ? extends U> reducer;
+ U result;
+ MapReduceEntriesTask<K,V,U> rights, nextRight;
+ MapReduceEntriesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceEntriesTask<K,V,U> nextRight,
+ Fun<Map.Entry<K,V>, ? extends U> transformer,
+ BiFun<? super U, ? super U, ? extends U> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.reducer = reducer;
+ }
+ public final U getRawResult() { return result; }
+ public final void compute() {
+ final Fun<Map.Entry<K,V>, ? extends U> transformer;
+ final BiFun<? super U, ? super U, ? extends U> reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceEntriesTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, reducer)).fork();
+ }
+ U r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p)) != null)
+ r = (r == null) ? u : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
+ t = (MapReduceEntriesTask<K,V,U>)c,
+ s = t.rights;
+ while (s != null) {
+ U tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceMappingsTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final BiFun<? super K, ? super V, ? extends U> transformer;
+ final BiFun<? super U, ? super U, ? extends U> reducer;
+ U result;
+ MapReduceMappingsTask<K,V,U> rights, nextRight;
+ MapReduceMappingsTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceMappingsTask<K,V,U> nextRight,
+ BiFun<? super K, ? super V, ? extends U> transformer,
+ BiFun<? super U, ? super U, ? extends U> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.reducer = reducer;
+ }
+ public final U getRawResult() { return result; }
+ public final void compute() {
+ final BiFun<? super K, ? super V, ? extends U> transformer;
+ final BiFun<? super U, ? super U, ? extends U> reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceMappingsTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, reducer)).fork();
+ }
+ U r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p.key, p.val)) != null)
+ r = (r == null) ? u : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
+ t = (MapReduceMappingsTask<K,V,U>)c,
+ s = t.rights;
+ while (s != null) {
+ U tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceKeysToDoubleTask<K,V>
+ extends BulkTask<K,V,Double> {
+ final ObjectToDouble<? super K> transformer;
+ final DoubleByDoubleToDouble reducer;
+ final double basis;
+ double result;
+ MapReduceKeysToDoubleTask<K,V> rights, nextRight;
+ MapReduceKeysToDoubleTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceKeysToDoubleTask<K,V> nextRight,
+ ObjectToDouble<? super K> transformer,
+ double basis,
+ DoubleByDoubleToDouble reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Double getRawResult() { return result; }
+ public final void compute() {
+ final ObjectToDouble<? super K> transformer;
+ final DoubleByDoubleToDouble reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ double r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceKeysToDoubleTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p.key));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
+ t = (MapReduceKeysToDoubleTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceValuesToDoubleTask<K,V>
+ extends BulkTask<K,V,Double> {
+ final ObjectToDouble<? super V> transformer;
+ final DoubleByDoubleToDouble reducer;
+ final double basis;
+ double result;
+ MapReduceValuesToDoubleTask<K,V> rights, nextRight;
+ MapReduceValuesToDoubleTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceValuesToDoubleTask<K,V> nextRight,
+ ObjectToDouble<? super V> transformer,
+ double basis,
+ DoubleByDoubleToDouble reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Double getRawResult() { return result; }
+ public final void compute() {
+ final ObjectToDouble<? super V> transformer;
+ final DoubleByDoubleToDouble reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ double r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceValuesToDoubleTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
+ t = (MapReduceValuesToDoubleTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceEntriesToDoubleTask<K,V>
+ extends BulkTask<K,V,Double> {
+ final ObjectToDouble<Map.Entry<K,V>> transformer;
+ final DoubleByDoubleToDouble reducer;
+ final double basis;
+ double result;
+ MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
+ MapReduceEntriesToDoubleTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceEntriesToDoubleTask<K,V> nextRight,
+ ObjectToDouble<Map.Entry<K,V>> transformer,
+ double basis,
+ DoubleByDoubleToDouble reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Double getRawResult() { return result; }
+ public final void compute() {
+ final ObjectToDouble<Map.Entry<K,V>> transformer;
+ final DoubleByDoubleToDouble reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ double r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceEntriesToDoubleTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
+ t = (MapReduceEntriesToDoubleTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceMappingsToDoubleTask<K,V>
+ extends BulkTask<K,V,Double> {
+ final ObjectByObjectToDouble<? super K, ? super V> transformer;
+ final DoubleByDoubleToDouble reducer;
+ final double basis;
+ double result;
+ MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
+ MapReduceMappingsToDoubleTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceMappingsToDoubleTask<K,V> nextRight,
+ ObjectByObjectToDouble<? super K, ? super V> transformer,
+ double basis,
+ DoubleByDoubleToDouble reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Double getRawResult() { return result; }
+ public final void compute() {
+ final ObjectByObjectToDouble<? super K, ? super V> transformer;
+ final DoubleByDoubleToDouble reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ double r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceMappingsToDoubleTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p.key, p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
+ t = (MapReduceMappingsToDoubleTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceKeysToLongTask<K,V>
+ extends BulkTask<K,V,Long> {
+ final ObjectToLong<? super K> transformer;
+ final LongByLongToLong reducer;
+ final long basis;
+ long result;
+ MapReduceKeysToLongTask<K,V> rights, nextRight;
+ MapReduceKeysToLongTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceKeysToLongTask<K,V> nextRight,
+ ObjectToLong<? super K> transformer,
+ long basis,
+ LongByLongToLong reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Long getRawResult() { return result; }
+ public final void compute() {
+ final ObjectToLong<? super K> transformer;
+ final LongByLongToLong reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ long r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceKeysToLongTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p.key));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
+ t = (MapReduceKeysToLongTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceValuesToLongTask<K,V>
+ extends BulkTask<K,V,Long> {
+ final ObjectToLong<? super V> transformer;
+ final LongByLongToLong reducer;
+ final long basis;
+ long result;
+ MapReduceValuesToLongTask<K,V> rights, nextRight;
+ MapReduceValuesToLongTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceValuesToLongTask<K,V> nextRight,
+ ObjectToLong<? super V> transformer,
+ long basis,
+ LongByLongToLong reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Long getRawResult() { return result; }
+ public final void compute() {
+ final ObjectToLong<? super V> transformer;
+ final LongByLongToLong reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ long r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceValuesToLongTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
+ t = (MapReduceValuesToLongTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceEntriesToLongTask<K,V>
+ extends BulkTask<K,V,Long> {
+ final ObjectToLong<Map.Entry<K,V>> transformer;
+ final LongByLongToLong reducer;
+ final long basis;
+ long result;
+ MapReduceEntriesToLongTask<K,V> rights, nextRight;
+ MapReduceEntriesToLongTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceEntriesToLongTask<K,V> nextRight,
+ ObjectToLong<Map.Entry<K,V>> transformer,
+ long basis,
+ LongByLongToLong reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Long getRawResult() { return result; }
+ public final void compute() {
+ final ObjectToLong<Map.Entry<K,V>> transformer;
+ final LongByLongToLong reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ long r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceEntriesToLongTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
+ t = (MapReduceEntriesToLongTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceMappingsToLongTask<K,V>
+ extends BulkTask<K,V,Long> {
+ final ObjectByObjectToLong<? super K, ? super V> transformer;
+ final LongByLongToLong reducer;
+ final long basis;
+ long result;
+ MapReduceMappingsToLongTask<K,V> rights, nextRight;
+ MapReduceMappingsToLongTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceMappingsToLongTask<K,V> nextRight,
+ ObjectByObjectToLong<? super K, ? super V> transformer,
+ long basis,
+ LongByLongToLong reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Long getRawResult() { return result; }
+ public final void compute() {
+ final ObjectByObjectToLong<? super K, ? super V> transformer;
+ final LongByLongToLong reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ long r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceMappingsToLongTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p.key, p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
+ t = (MapReduceMappingsToLongTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceKeysToIntTask<K,V>
+ extends BulkTask<K,V,Integer> {
+ final ObjectToInt<? super K> transformer;
+ final IntByIntToInt reducer;
+ final int basis;
+ int result;
+ MapReduceKeysToIntTask<K,V> rights, nextRight;
+ MapReduceKeysToIntTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceKeysToIntTask<K,V> nextRight,
+ ObjectToInt<? super K> transformer,
+ int basis,
+ IntByIntToInt reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Integer getRawResult() { return result; }
+ public final void compute() {
+ final ObjectToInt<? super K> transformer;
+ final IntByIntToInt reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ int r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceKeysToIntTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p.key));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
+ t = (MapReduceKeysToIntTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceValuesToIntTask<K,V>
+ extends BulkTask<K,V,Integer> {
+ final ObjectToInt<? super V> transformer;
+ final IntByIntToInt reducer;
+ final int basis;
+ int result;
+ MapReduceValuesToIntTask<K,V> rights, nextRight;
+ MapReduceValuesToIntTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceValuesToIntTask<K,V> nextRight,
+ ObjectToInt<? super V> transformer,
+ int basis,
+ IntByIntToInt reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Integer getRawResult() { return result; }
+ public final void compute() {
+ final ObjectToInt<? super V> transformer;
+ final IntByIntToInt reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ int r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceValuesToIntTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
+ t = (MapReduceValuesToIntTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceEntriesToIntTask<K,V>
+ extends BulkTask<K,V,Integer> {
+ final ObjectToInt<Map.Entry<K,V>> transformer;
+ final IntByIntToInt reducer;
+ final int basis;
+ int result;
+ MapReduceEntriesToIntTask<K,V> rights, nextRight;
+ MapReduceEntriesToIntTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceEntriesToIntTask<K,V> nextRight,
+ ObjectToInt<Map.Entry<K,V>> transformer,
+ int basis,
+ IntByIntToInt reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Integer getRawResult() { return result; }
+ public final void compute() {
+ final ObjectToInt<Map.Entry<K,V>> transformer;
+ final IntByIntToInt reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ int r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceEntriesToIntTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
+ t = (MapReduceEntriesToIntTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class MapReduceMappingsToIntTask<K,V>
+ extends BulkTask<K,V,Integer> {
+ final ObjectByObjectToInt<? super K, ? super V> transformer;
+ final IntByIntToInt reducer;
+ final int basis;
+ int result;
+ MapReduceMappingsToIntTask<K,V> rights, nextRight;
+ MapReduceMappingsToIntTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceMappingsToIntTask<K,V> nextRight,
+ ObjectByObjectToInt<? super K, ? super V> transformer,
+ int basis,
+ IntByIntToInt reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Integer getRawResult() { return result; }
+ public final void compute() {
+ final ObjectByObjectToInt<? super K, ? super V> transformer;
+ final IntByIntToInt reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ int r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceMappingsToIntTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.apply(r, transformer.apply(p.key, p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
+ t = (MapReduceMappingsToIntTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.apply(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ /* ---------------- Counters -------------- */
+
+ // Adapted from LongAdder and Striped64.
+ // See their internal docs for explanation.
+
+ // A padded cell for distributing counts
+ static final class CounterCell {
+ volatile long p0, p1, p2, p3, p4, p5, p6;
+ volatile long value;
+ volatile long q0, q1, q2, q3, q4, q5, q6;
+ CounterCell(long x) { value = x; }
+ }
+
+ /**
+ * Holder for the thread-local hash code determining which
+ * CounterCell to use. The code is initialized via the
+ * counterHashCodeGenerator, but may be moved upon collisions.
+ */
+ static final class CounterHashCode {
+ int code;
+ }
+
+ /**
+ * Generates initial value for per-thread CounterHashCodes.
+ */
+ static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
+
+ /**
+ * Increment for counterHashCodeGenerator. See class ThreadLocal
+ * for explanation.
+ */
+ static final int SEED_INCREMENT = 0x61c88647;
+
+ /**
+ * Per-thread counter hash codes. Shared across all instances.
+ */
+ static final ThreadLocal<CounterHashCode> threadCounterHashCode =
+ new ThreadLocal<CounterHashCode>();
+
+
+ final long sumCount() {
+ CounterCell[] as = counterCells; CounterCell a;
+ long sum = baseCount;
+ if (as != null) {
+ for (int i = 0; i < as.length; ++i) {
+ if ((a = as[i]) != null)
+ sum += a.value;
+ }
+ }
+ return sum;
+ }
+
+ // See LongAdder version for explanation
+ private final void fullAddCount(long x, CounterHashCode hc,
+ boolean wasUncontended) {
+ int h;
+ if (hc == null) {
+ hc = new CounterHashCode();
+ int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
+ h = hc.code = (s == 0) ? 1 : s; // Avoid zero
+ threadCounterHashCode.set(hc);
+ }
+ else
+ h = hc.code;
+ boolean collide = false; // True if last slot nonempty
+ for (;;) {
+ CounterCell[] as; CounterCell a; int n; long v;
+ if ((as = counterCells) != null && (n = as.length) > 0) {
+ if ((a = as[(n - 1) & h]) == null) {
+ if (cellsBusy == 0) { // Try to attach new Cell
+ CounterCell r = new CounterCell(x); // Optimistic create
+ if (cellsBusy == 0 &&
+ U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
+ boolean created = false;
+ try { // Recheck under lock
+ CounterCell[] rs; int m, j;
+ if ((rs = counterCells) != null &&
+ (m = rs.length) > 0 &&
+ rs[j = (m - 1) & h] == null) {
+ rs[j] = r;
+ created = true;
+ }
+ } finally {
+ cellsBusy = 0;
+ }
+ if (created)
+ break;
+ continue; // Slot is now non-empty
+ }
+ }
+ collide = false;
+ }
+ else if (!wasUncontended) // CAS already known to fail
+ wasUncontended = true; // Continue after rehash
+ else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
+ break;
+ else if (counterCells != as || n >= NCPU)
+ collide = false; // At max size or stale
+ else if (!collide)
+ collide = true;
+ else if (cellsBusy == 0 &&
+ U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
+ try {
+ if (counterCells == as) {// Expand table unless stale
+ CounterCell[] rs = new CounterCell[n << 1];
+ for (int i = 0; i < n; ++i)
+ rs[i] = as[i];
+ counterCells = rs;
+ }
+ } finally {
+ cellsBusy = 0;
+ }
+ collide = false;
+ continue; // Retry with expanded table
+ }
+ h ^= h << 13; // Rehash
+ h ^= h >>> 17;
+ h ^= h << 5;
+ }
+ else if (cellsBusy == 0 && counterCells == as &&
+ U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
+ boolean init = false;
+ try { // Initialize table
+ if (counterCells == as) {
+ CounterCell[] rs = new CounterCell[2];
+ rs[h & 1] = new CounterCell(x);
+ counterCells = rs;
+ init = true;
+ }
+ } finally {
+ cellsBusy = 0;
+ }
+ if (init)
+ break;
+ }
+ else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
+ break; // Fall back on using base
+ }
+ hc.code = h; // Record index for next time
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long SIZECTL;
+ private static final long TRANSFERINDEX;
+ private static final long BASECOUNT;
+ private static final long CELLSBUSY;
+ private static final long CELLVALUE;
+ private static final long ABASE;
+ private static final int ASHIFT;
+
+ static {
+ try {
+ U = getUnsafe();
+ Class<?> k = ConcurrentHashMapV8.class;
+ SIZECTL = U.objectFieldOffset
+ (k.getDeclaredField("sizeCtl"));
+ TRANSFERINDEX = U.objectFieldOffset
+ (k.getDeclaredField("transferIndex"));
+ BASECOUNT = U.objectFieldOffset
+ (k.getDeclaredField("baseCount"));
+ CELLSBUSY = U.objectFieldOffset
+ (k.getDeclaredField("cellsBusy"));
+ Class<?> ck = CounterCell.class;
+ CELLVALUE = U.objectFieldOffset
+ (ck.getDeclaredField("value"));
+ Class<?> ak = Node[].class;
+ ABASE = U.arrayBaseOffset(ak);
+ int scale = U.arrayIndexScale(ak);
+ if ((scale & (scale - 1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/CountedCompleter.java b/src/main/java/jsr166e/CountedCompleter.java
new file mode 100644
index 0000000..c7717ee
--- /dev/null
+++ b/src/main/java/jsr166e/CountedCompleter.java
@@ -0,0 +1,750 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+
+/**
+ * A {@link ForkJoinTask} with a completion action performed when
+ * triggered and there are no remaining pending actions.
+ * CountedCompleters are in general more robust in the
+ * presence of subtask stalls and blockage than are other forms of
+ * ForkJoinTasks, but are less intuitive to program. Uses of
+ * CountedCompleter are similar to those of other completion based
+ * components (such as {@link java.nio.channels.CompletionHandler})
+ * except that multiple <em>pending</em> completions may be necessary
+ * to trigger the completion action {@link #onCompletion(CountedCompleter)},
+ * not just one.
+ * Unless initialized otherwise, the {@linkplain #getPendingCount pending
+ * count} starts at zero, but may be (atomically) changed using
+ * methods {@link #setPendingCount}, {@link #addToPendingCount}, and
+ * {@link #compareAndSetPendingCount}. Upon invocation of {@link
+ * #tryComplete}, if the pending action count is nonzero, it is
+ * decremented; otherwise, the completion action is performed, and if
+ * this completer itself has a completer, the process is continued
+ * with its completer. As is the case with related synchronization
+ * components such as {@link java.util.concurrent.Phaser Phaser} and
+ * {@link java.util.concurrent.Semaphore Semaphore}, these methods
+ * affect only internal counts; they do not establish any further
+ * internal bookkeeping. In particular, the identities of pending
+ * tasks are not maintained. As illustrated below, you can create
+ * subclasses that do record some or all pending tasks or their
+ * results when needed. As illustrated below, utility methods
+ * supporting customization of completion traversals are also
+ * provided. However, because CountedCompleters provide only basic
+ * synchronization mechanisms, it may be useful to create further
+ * abstract subclasses that maintain linkages, fields, and additional
+ * support methods appropriate for a set of related usages.
+ *
+ * <p>A concrete CountedCompleter class must define method {@link
+ * #compute}, that should in most cases (as illustrated below), invoke
+ * {@code tryComplete()} once before returning. The class may also
+ * optionally override method {@link #onCompletion(CountedCompleter)}
+ * to perform an action upon normal completion, and method
+ * {@link #onExceptionalCompletion(Throwable, CountedCompleter)} to
+ * perform an action upon any exception.
+ *
+ * <p>CountedCompleters most often do not bear results, in which case
+ * they are normally declared as {@code CountedCompleter<Void>}, and
+ * will always return {@code null} as a result value. In other cases,
+ * you should override method {@link #getRawResult} to provide a
+ * result from {@code join(), invoke()}, and related methods. In
+ * general, this method should return the value of a field (or a
+ * function of one or more fields) of the CountedCompleter object that
+ * holds the result upon completion. Method {@link #setRawResult} by
+ * default plays no role in CountedCompleters. It is possible, but
+ * rarely applicable, to override this method to maintain other
+ * objects or fields holding result data.
+ *
+ * <p>A CountedCompleter that does not itself have a completer (i.e.,
+ * one for which {@link #getCompleter} returns {@code null}) can be
+ * used as a regular ForkJoinTask with this added functionality.
+ * However, any completer that in turn has another completer serves
+ * only as an internal helper for other computations, so its own task
+ * status (as reported in methods such as {@link ForkJoinTask#isDone})
+ * is arbitrary; this status changes only upon explicit invocations of
+ * {@link #complete}, {@link ForkJoinTask#cancel},
+ * {@link ForkJoinTask#completeExceptionally(Throwable)} or upon
+ * exceptional completion of method {@code compute}. Upon any
+ * exceptional completion, the exception may be relayed to a task's
+ * completer (and its completer, and so on), if one exists and it has
+ * not otherwise already completed. Similarly, cancelling an internal
+ * CountedCompleter has only a local effect on that completer, so is
+ * not often useful.
+ *
+ * <p><b>Sample Usages.</b>
+ *
+ * <p><b>Parallel recursive decomposition.</b> CountedCompleters may
+ * be arranged in trees similar to those often used with {@link
+ * RecursiveAction}s, although the constructions involved in setting
+ * them up typically vary. Here, the completer of each task is its
+ * parent in the computation tree. Even though they entail a bit more
+ * bookkeeping, CountedCompleters may be better choices when applying
+ * a possibly time-consuming operation (that cannot be further
+ * subdivided) to each element of an array or collection; especially
+ * when the operation takes a significantly different amount of time
+ * to complete for some elements than others, either because of
+ * intrinsic variation (for example I/O) or auxiliary effects such as
+ * garbage collection. Because CountedCompleters provide their own
+ * continuations, other threads need not block waiting to perform
+ * them.
+ *
+ * <p>For example, here is an initial version of a class that uses
+ * divide-by-two recursive decomposition to divide work into single
+ * pieces (leaf tasks). Even when work is split into individual calls,
+ * tree-based techniques are usually preferable to directly forking
+ * leaf tasks, because they reduce inter-thread communication and
+ * improve load balancing. In the recursive case, the second of each
+ * pair of subtasks to finish triggers completion of its parent
+ * (because no result combination is performed, the default no-op
+ * implementation of method {@code onCompletion} is not overridden).
+ * A static utility method sets up the base task and invokes it
+ * (here, implicitly using the {@link ForkJoinPool#commonPool()}).
+ *
+ * <pre> {@code
+ * class MyOperation<E> { void apply(E e) { ... } }
+ *
+ * class ForEach<E> extends CountedCompleter<Void> {
+ *
+ * public static <E> void forEach(E[] array, MyOperation<E> op) {
+ * new ForEach<E>(null, array, op, 0, array.length).invoke();
+ * }
+ *
+ * final E[] array; final MyOperation<E> op; final int lo, hi;
+ * ForEach(CountedCompleter<?> p, E[] array, MyOperation<E> op, int lo, int hi) {
+ * super(p);
+ * this.array = array; this.op = op; this.lo = lo; this.hi = hi;
+ * }
+ *
+ * public void compute() { // version 1
+ * if (hi - lo >= 2) {
+ * int mid = (lo + hi) >>> 1;
+ * setPendingCount(2); // must set pending count before fork
+ * new ForEach(this, array, op, mid, hi).fork(); // right child
+ * new ForEach(this, array, op, lo, mid).fork(); // left child
+ * }
+ * else if (hi > lo)
+ * op.apply(array[lo]);
+ * tryComplete();
+ * }
+ * }}</pre>
+ *
+ * This design can be improved by noticing that in the recursive case,
+ * the task has nothing to do after forking its right task, so can
+ * directly invoke its left task before returning. (This is an analog
+ * of tail recursion removal.) Also, because the task returns upon
+ * executing its left task (rather than falling through to invoke
+ * {@code tryComplete}) the pending count is set to one:
+ *
+ * <pre> {@code
+ * class ForEach<E> ...
+ * public void compute() { // version 2
+ * if (hi - lo >= 2) {
+ * int mid = (lo + hi) >>> 1;
+ * setPendingCount(1); // only one pending
+ * new ForEach(this, array, op, mid, hi).fork(); // right child
+ * new ForEach(this, array, op, lo, mid).compute(); // direct invoke
+ * }
+ * else {
+ * if (hi > lo)
+ * op.apply(array[lo]);
+ * tryComplete();
+ * }
+ * }
+ * }</pre>
+ *
+ * As a further improvement, notice that the left task need not even exist.
+ * Instead of creating a new one, we can iterate using the original task,
+ * and add a pending count for each fork. Additionally, because no task
+ * in this tree implements an {@link #onCompletion(CountedCompleter)} method,
+ * {@code tryComplete()} can be replaced with {@link #propagateCompletion}.
+ *
+ * <pre> {@code
+ * class ForEach<E> ...
+ * public void compute() { // version 3
+ * int l = lo, h = hi;
+ * while (h - l >= 2) {
+ * int mid = (l + h) >>> 1;
+ * addToPendingCount(1);
+ * new ForEach(this, array, op, mid, h).fork(); // right child
+ * h = mid;
+ * }
+ * if (h > l)
+ * op.apply(array[l]);
+ * propagateCompletion();
+ * }
+ * }</pre>
+ *
+ * Additional improvements of such classes might entail precomputing
+ * pending counts so that they can be established in constructors,
+ * specializing classes for leaf steps, subdividing by say, four,
+ * instead of two per iteration, and using an adaptive threshold
+ * instead of always subdividing down to single elements.
+ *
+ * <p><b>Searching.</b> A tree of CountedCompleters can search for a
+ * value or property in different parts of a data structure, and
+ * report a result in an {@link
+ * java.util.concurrent.atomic.AtomicReference AtomicReference} as
+ * soon as one is found. The others can poll the result to avoid
+ * unnecessary work. (You could additionally {@linkplain #cancel
+ * cancel} other tasks, but it is usually simpler and more efficient
+ * to just let them notice that the result is set and if so skip
+ * further processing.) Illustrating again with an array using full
+ * partitioning (again, in practice, leaf tasks will almost always
+ * process more than one element):
+ *
+ * <pre> {@code
+ * class Searcher<E> extends CountedCompleter<E> {
+ * final E[] array; final AtomicReference<E> result; final int lo, hi;
+ * Searcher(CountedCompleter<?> p, E[] array, AtomicReference<E> result, int lo, int hi) {
+ * super(p);
+ * this.array = array; this.result = result; this.lo = lo; this.hi = hi;
+ * }
+ * public E getRawResult() { return result.get(); }
+ * public void compute() { // similar to ForEach version 3
+ * int l = lo, h = hi;
+ * while (result.get() == null && h >= l) {
+ * if (h - l >= 2) {
+ * int mid = (l + h) >>> 1;
+ * addToPendingCount(1);
+ * new Searcher(this, array, result, mid, h).fork();
+ * h = mid;
+ * }
+ * else {
+ * E x = array[l];
+ * if (matches(x) && result.compareAndSet(null, x))
+ * quietlyCompleteRoot(); // root task is now joinable
+ * break;
+ * }
+ * }
+ * tryComplete(); // normally complete whether or not found
+ * }
+ * boolean matches(E e) { ... } // return true if found
+ *
+ * public static <E> E search(E[] array) {
+ * return new Searcher<E>(null, array, new AtomicReference<E>(), 0, array.length).invoke();
+ * }
+ * }}</pre>
+ *
+ * In this example, as well as others in which tasks have no other
+ * effects except to compareAndSet a common result, the trailing
+ * unconditional invocation of {@code tryComplete} could be made
+ * conditional ({@code if (result.get() == null) tryComplete();})
+ * because no further bookkeeping is required to manage completions
+ * once the root task completes.
+ *
+ * <p><b>Recording subtasks.</b> CountedCompleter tasks that combine
+ * results of multiple subtasks usually need to access these results
+ * in method {@link #onCompletion(CountedCompleter)}. As illustrated in the following
+ * class (that performs a simplified form of map-reduce where mappings
+ * and reductions are all of type {@code E}), one way to do this in
+ * divide and conquer designs is to have each subtask record its
+ * sibling, so that it can be accessed in method {@code onCompletion}.
+ * This technique applies to reductions in which the order of
+ * combining left and right results does not matter; ordered
+ * reductions require explicit left/right designations. Variants of
+ * other streamlinings seen in the above examples may also apply.
+ *
+ * <pre> {@code
+ * class MyMapper<E> { E apply(E v) { ... } }
+ * class MyReducer<E> { E apply(E x, E y) { ... } }
+ * class MapReducer<E> extends CountedCompleter<E> {
+ * final E[] array; final MyMapper<E> mapper;
+ * final MyReducer<E> reducer; final int lo, hi;
+ * MapReducer<E> sibling;
+ * E result;
+ * MapReducer(CountedCompleter<?> p, E[] array, MyMapper<E> mapper,
+ * MyReducer<E> reducer, int lo, int hi) {
+ * super(p);
+ * this.array = array; this.mapper = mapper;
+ * this.reducer = reducer; this.lo = lo; this.hi = hi;
+ * }
+ * public void compute() {
+ * if (hi - lo >= 2) {
+ * int mid = (lo + hi) >>> 1;
+ * MapReducer<E> left = new MapReducer(this, array, mapper, reducer, lo, mid);
+ * MapReducer<E> right = new MapReducer(this, array, mapper, reducer, mid, hi);
+ * left.sibling = right;
+ * right.sibling = left;
+ * setPendingCount(1); // only right is pending
+ * right.fork();
+ * left.compute(); // directly execute left
+ * }
+ * else {
+ * if (hi > lo)
+ * result = mapper.apply(array[lo]);
+ * tryComplete();
+ * }
+ * }
+ * public void onCompletion(CountedCompleter<?> caller) {
+ * if (caller != this) {
+ * MapReducer<E> child = (MapReducer<E>)caller;
+ * MapReducer<E> sib = child.sibling;
+ * if (sib == null || sib.result == null)
+ * result = child.result;
+ * else
+ * result = reducer.apply(child.result, sib.result);
+ * }
+ * }
+ * public E getRawResult() { return result; }
+ *
+ * public static <E> E mapReduce(E[] array, MyMapper<E> mapper, MyReducer<E> reducer) {
+ * return new MapReducer<E>(null, array, mapper, reducer,
+ * 0, array.length).invoke();
+ * }
+ * }}</pre>
+ *
+ * Here, method {@code onCompletion} takes a form common to many
+ * completion designs that combine results. This callback-style method
+ * is triggered once per task, in either of the two different contexts
+ * in which the pending count is, or becomes, zero: (1) by a task
+ * itself, if its pending count is zero upon invocation of {@code
+ * tryComplete}, or (2) by any of its subtasks when they complete and
+ * decrement the pending count to zero. The {@code caller} argument
+ * distinguishes cases. Most often, when the caller is {@code this},
+ * no action is necessary. Otherwise the caller argument can be used
+ * (usually via a cast) to supply a value (and/or links to other
+ * values) to be combined. Assuming proper use of pending counts, the
+ * actions inside {@code onCompletion} occur (once) upon completion of
+ * a task and its subtasks. No additional synchronization is required
+ * within this method to ensure thread safety of accesses to fields of
+ * this task or other completed tasks.
+ *
+ * <p><b>Completion Traversals</b>. If using {@code onCompletion} to
+ * process completions is inapplicable or inconvenient, you can use
+ * methods {@link #firstComplete} and {@link #nextComplete} to create
+ * custom traversals. For example, to define a MapReducer that only
+ * splits out right-hand tasks in the form of the third ForEach
+ * example, the completions must cooperatively reduce along
+ * unexhausted subtask links, which can be done as follows:
+ *
+ * <pre> {@code
+ * class MapReducer<E> extends CountedCompleter<E> { // version 2
+ * final E[] array; final MyMapper<E> mapper;
+ * final MyReducer<E> reducer; final int lo, hi;
+ * MapReducer<E> forks, next; // record subtask forks in list
+ * E result;
+ * MapReducer(CountedCompleter<?> p, E[] array, MyMapper<E> mapper,
+ * MyReducer<E> reducer, int lo, int hi, MapReducer<E> next) {
+ * super(p);
+ * this.array = array; this.mapper = mapper;
+ * this.reducer = reducer; this.lo = lo; this.hi = hi;
+ * this.next = next;
+ * }
+ * public void compute() {
+ * int l = lo, h = hi;
+ * while (h - l >= 2) {
+ * int mid = (l + h) >>> 1;
+ * addToPendingCount(1);
+ * (forks = new MapReducer(this, array, mapper, reducer, mid, h, forks)).fork();
+ * h = mid;
+ * }
+ * if (h > l)
+ * result = mapper.apply(array[l]);
+ * // process completions by reducing along and advancing subtask links
+ * for (CountedCompleter<?> c = firstComplete(); c != null; c = c.nextComplete()) {
+ * for (MapReducer t = (MapReducer)c, s = t.forks; s != null; s = t.forks = s.next)
+ * t.result = reducer.apply(t.result, s.result);
+ * }
+ * }
+ * public E getRawResult() { return result; }
+ *
+ * public static <E> E mapReduce(E[] array, MyMapper<E> mapper, MyReducer<E> reducer) {
+ * return new MapReducer<E>(null, array, mapper, reducer,
+ * 0, array.length, null).invoke();
+ * }
+ * }}</pre>
+ *
+ * <p><b>Triggers.</b> Some CountedCompleters are themselves never
+ * forked, but instead serve as bits of plumbing in other designs;
+ * including those in which the completion of one or more async tasks
+ * triggers another async task. For example:
+ *
+ * <pre> {@code
+ * class HeaderBuilder extends CountedCompleter<...> { ... }
+ * class BodyBuilder extends CountedCompleter<...> { ... }
+ * class PacketSender extends CountedCompleter<...> {
+ * PacketSender(...) { super(null, 1); ... } // trigger on second completion
+ * public void compute() { } // never called
+ * public void onCompletion(CountedCompleter<?> caller) { sendPacket(); }
+ * }
+ * // sample use:
+ * PacketSender p = new PacketSender();
+ * new HeaderBuilder(p, ...).fork();
+ * new BodyBuilder(p, ...).fork();
+ * }</pre>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public abstract class CountedCompleter<T> extends ForkJoinTask<T> {
+ private static final long serialVersionUID = 5232453752276485070L;
+
+ /** This task's completer, or null if none */
+ final CountedCompleter<?> completer;
+ /** The number of pending tasks until completion */
+ volatile int pending;
+
+ /**
+ * Creates a new CountedCompleter with the given completer
+ * and initial pending count.
+ *
+ * @param completer this task's completer, or {@code null} if none
+ * @param initialPendingCount the initial pending count
+ */
+ protected CountedCompleter(CountedCompleter<?> completer,
+ int initialPendingCount) {
+ this.completer = completer;
+ this.pending = initialPendingCount;
+ }
+
+ /**
+ * Creates a new CountedCompleter with the given completer
+ * and an initial pending count of zero.
+ *
+ * @param completer this task's completer, or {@code null} if none
+ */
+ protected CountedCompleter(CountedCompleter<?> completer) {
+ this.completer = completer;
+ }
+
+ /**
+ * Creates a new CountedCompleter with no completer
+ * and an initial pending count of zero.
+ */
+ protected CountedCompleter() {
+ this.completer = null;
+ }
+
+ /**
+ * The main computation performed by this task.
+ */
+ public abstract void compute();
+
+ /**
+ * Performs an action when method {@link #tryComplete} is invoked
+ * and the pending count is zero, or when the unconditional
+ * method {@link #complete} is invoked. By default, this method
+ * does nothing. You can distinguish cases by checking the
+ * identity of the given caller argument. If not equal to {@code
+ * this}, then it is typically a subtask that may contain results
+ * (and/or links to other results) to combine.
+ *
+ * @param caller the task invoking this method (which may
+ * be this task itself)
+ */
+ public void onCompletion(CountedCompleter<?> caller) {
+ }
+
+ /**
+ * Performs an action when method {@link
+ * #completeExceptionally(Throwable)} is invoked or method {@link
+ * #compute} throws an exception, and this task has not already
+ * otherwise completed normally. On entry to this method, this task
+ * {@link ForkJoinTask#isCompletedAbnormally}. The return value
+ * of this method controls further propagation: If {@code true}
+ * and this task has a completer that has not completed, then that
+ * completer is also completed exceptionally, with the same
+ * exception as this completer. The default implementation of
+ * this method does nothing except return {@code true}.
+ *
+ * @param ex the exception
+ * @param caller the task invoking this method (which may
+ * be this task itself)
+ * @return {@code true} if this exception should be propagated to this
+ * task's completer, if one exists
+ */
+ public boolean onExceptionalCompletion(Throwable ex, CountedCompleter<?> caller) {
+ return true;
+ }
+
+ /**
+ * Returns the completer established in this task's constructor,
+ * or {@code null} if none.
+ *
+ * @return the completer
+ */
+ public final CountedCompleter<?> getCompleter() {
+ return completer;
+ }
+
+ /**
+ * Returns the current pending count.
+ *
+ * @return the current pending count
+ */
+ public final int getPendingCount() {
+ return pending;
+ }
+
+ /**
+ * Sets the pending count to the given value.
+ *
+ * @param count the count
+ */
+ public final void setPendingCount(int count) {
+ pending = count;
+ }
+
+ /**
+ * Adds (atomically) the given value to the pending count.
+ *
+ * @param delta the value to add
+ */
+ public final void addToPendingCount(int delta) {
+ int c;
+ do {} while (!U.compareAndSwapInt(this, PENDING, c = pending, c+delta));
+ }
+
+ /**
+ * Sets (atomically) the pending count to the given count only if
+ * it currently holds the given expected value.
+ *
+ * @param expected the expected value
+ * @param count the new value
+ * @return {@code true} if successful
+ */
+ public final boolean compareAndSetPendingCount(int expected, int count) {
+ return U.compareAndSwapInt(this, PENDING, expected, count);
+ }
+
+ /**
+ * If the pending count is nonzero, (atomically) decrements it.
+ *
+ * @return the initial (undecremented) pending count holding on entry
+ * to this method
+ */
+ public final int decrementPendingCountUnlessZero() {
+ int c;
+ do {} while ((c = pending) != 0 &&
+ !U.compareAndSwapInt(this, PENDING, c, c - 1));
+ return c;
+ }
+
+ /**
+ * Returns the root of the current computation; i.e., this
+ * task if it has no completer, else its completer's root.
+ *
+ * @return the root of the current computation
+ */
+ public final CountedCompleter<?> getRoot() {
+ CountedCompleter<?> a = this, p;
+ while ((p = a.completer) != null)
+ a = p;
+ return a;
+ }
+
+ /**
+ * If the pending count is nonzero, decrements the count;
+ * otherwise invokes {@link #onCompletion(CountedCompleter)}
+ * and then similarly tries to complete this task's completer,
+ * if one exists, else marks this task as complete.
+ */
+ public final void tryComplete() {
+ CountedCompleter<?> a = this, s = a;
+ for (int c;;) {
+ if ((c = a.pending) == 0) {
+ a.onCompletion(s);
+ if ((a = (s = a).completer) == null) {
+ s.quietlyComplete();
+ return;
+ }
+ }
+ else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
+ return;
+ }
+ }
+
+ /**
+ * Equivalent to {@link #tryComplete} but does not invoke {@link
+ * #onCompletion(CountedCompleter)} along the completion path:
+ * If the pending count is nonzero, decrements the count;
+ * otherwise, similarly tries to complete this task's completer, if
+ * one exists, else marks this task as complete. This method may be
+ * useful in cases where {@code onCompletion} should not, or need
+ * not, be invoked for each completer in a computation.
+ */
+ public final void propagateCompletion() {
+ CountedCompleter<?> a = this, s = a;
+ for (int c;;) {
+ if ((c = a.pending) == 0) {
+ if ((a = (s = a).completer) == null) {
+ s.quietlyComplete();
+ return;
+ }
+ }
+ else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
+ return;
+ }
+ }
+
+ /**
+ * Regardless of pending count, invokes
+ * {@link #onCompletion(CountedCompleter)}, marks this task as
+ * complete and further triggers {@link #tryComplete} on this
+ * task's completer, if one exists. The given rawResult is
+ * used as an argument to {@link #setRawResult} before invoking
+ * {@link #onCompletion(CountedCompleter)} or marking this task
+ * as complete; its value is meaningful only for classes
+ * overriding {@code setRawResult}. This method does not modify
+ * the pending count.
+ *
+ * <p>This method may be useful when forcing completion as soon as
+ * any one (versus all) of several subtask results are obtained.
+ * However, in the common (and recommended) case in which {@code
+ * setRawResult} is not overridden, this effect can be obtained
+ * more simply using {@code quietlyCompleteRoot();}.
+ *
+ * @param rawResult the raw result
+ */
+ public void complete(T rawResult) {
+ CountedCompleter<?> p;
+ setRawResult(rawResult);
+ onCompletion(this);
+ quietlyComplete();
+ if ((p = completer) != null)
+ p.tryComplete();
+ }
+
+
+ /**
+ * If this task's pending count is zero, returns this task;
+ * otherwise decrements its pending count and returns {@code
+ * null}. This method is designed to be used with {@link
+ * #nextComplete} in completion traversal loops.
+ *
+ * @return this task, if pending count was zero, else {@code null}
+ */
+ public final CountedCompleter<?> firstComplete() {
+ for (int c;;) {
+ if ((c = pending) == 0)
+ return this;
+ else if (U.compareAndSwapInt(this, PENDING, c, c - 1))
+ return null;
+ }
+ }
+
+ /**
+ * If this task does not have a completer, invokes {@link
+ * ForkJoinTask#quietlyComplete} and returns {@code null}. Or, if
+ * the completer's pending count is non-zero, decrements that
+ * pending count and returns {@code null}. Otherwise, returns the
+ * completer. This method can be used as part of a completion
+ * traversal loop for homogeneous task hierarchies:
+ *
+ * <pre> {@code
+ * for (CountedCompleter<?> c = firstComplete();
+ * c != null;
+ * c = c.nextComplete()) {
+ * // ... process c ...
+ * }}</pre>
+ *
+ * @return the completer, or {@code null} if none
+ */
+ public final CountedCompleter<?> nextComplete() {
+ CountedCompleter<?> p;
+ if ((p = completer) != null)
+ return p.firstComplete();
+ else {
+ quietlyComplete();
+ return null;
+ }
+ }
+
+ /**
+ * Equivalent to {@code getRoot().quietlyComplete()}.
+ */
+ public final void quietlyCompleteRoot() {
+ for (CountedCompleter<?> a = this, p;;) {
+ if ((p = a.completer) == null) {
+ a.quietlyComplete();
+ return;
+ }
+ a = p;
+ }
+ }
+
+ /**
+ * Supports ForkJoinTask exception propagation.
+ */
+ void internalPropagateException(Throwable ex) {
+ CountedCompleter<?> a = this, s = a;
+ while (a.onExceptionalCompletion(ex, s) &&
+ (a = (s = a).completer) != null && a.status >= 0 &&
+ a.recordExceptionalCompletion(ex) == EXCEPTIONAL)
+ ;
+ }
+
+ /**
+ * Implements execution conventions for CountedCompleters.
+ */
+ protected final boolean exec() {
+ compute();
+ return false;
+ }
+
+ /**
+ * Returns the result of the computation. By default,
+ * returns {@code null}, which is appropriate for {@code Void}
+ * actions, but in other cases should be overridden, almost
+ * always to return a field or function of a field that
+ * holds the result upon completion.
+ *
+ * @return the result of the computation
+ */
+ public T getRawResult() { return null; }
+
+ /**
+ * A method that result-bearing CountedCompleters may optionally
+ * use to help maintain result data. By default, does nothing.
+ * Overrides are not recommended. However, if this method is
+ * overridden to update existing objects or fields, then it must
+ * in general be defined to be thread-safe.
+ */
+ protected void setRawResult(T t) { }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long PENDING;
+ static {
+ try {
+ U = getUnsafe();
+ PENDING = U.objectFieldOffset
+ (CountedCompleter.class.getDeclaredField("pending"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/DoubleAdder.java b/src/main/java/jsr166e/DoubleAdder.java
new file mode 100644
index 0000000..3f22fa3
--- /dev/null
+++ b/src/main/java/jsr166e/DoubleAdder.java
@@ -0,0 +1,198 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+import java.io.Serializable;
+
+/**
+ * One or more variables that together maintain an initially zero
+ * {@code double} sum. When updates (method {@link #add}) are
+ * contended across threads, the set of variables may grow dynamically
+ * to reduce contention. Method {@link #sum} (or, equivalently {@link
+ * #doubleValue}) returns the current total combined across the
+ * variables maintaining the sum.
+ *
+ * <p>This class extends {@link Number}, but does <em>not</em> define
+ * methods such as {@code equals}, {@code hashCode} and {@code
+ * compareTo} because instances are expected to be mutated, and so are
+ * not useful as collection keys.
+ *
+ * <p><em>jsr166e note: This class is targeted to be placed in
+ * java.util.concurrent.atomic.</em>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class DoubleAdder extends Striped64 implements Serializable {
+ private static final long serialVersionUID = 7249069246863182397L;
+
+ /**
+ * Update function. Note that we must use "long" for underlying
+ * representations, because there is no compareAndSet for double,
+ * due to the fact that the bitwise equals used in any CAS
+ * implementation is not the same as double-precision equals.
+ * However, we use CAS only to detect and alleviate contention,
+ * for which bitwise equals works best anyway. In principle, the
+ * long/double conversions used here should be essentially free on
+ * most platforms since they just re-interpret bits.
+ *
+ * Similar conversions are used in other methods.
+ */
+ final long fn(long v, long x) {
+ return Double.doubleToRawLongBits
+ (Double.longBitsToDouble(v) +
+ Double.longBitsToDouble(x));
+ }
+
+ /**
+ * Creates a new adder with initial sum of zero.
+ */
+ public DoubleAdder() {
+ }
+
+ /**
+ * Adds the given value.
+ *
+ * @param x the value to add
+ */
+ public void add(double x) {
+ Cell[] as; long b, v; HashCode hc; Cell a; int n;
+ if ((as = cells) != null ||
+ !casBase(b = base,
+ Double.doubleToRawLongBits
+ (Double.longBitsToDouble(b) + x))) {
+ boolean uncontended = true;
+ int h = (hc = threadHashCode.get()).code;
+ if (as == null || (n = as.length) < 1 ||
+ (a = as[(n - 1) & h]) == null ||
+ !(uncontended = a.cas(v = a.value,
+ Double.doubleToRawLongBits
+ (Double.longBitsToDouble(v) + x))))
+ retryUpdate(Double.doubleToRawLongBits(x), hc, uncontended);
+ }
+ }
+
+ /**
+ * Returns the current sum. The returned value is <em>NOT</em> an
+ * atomic snapshot; invocation in the absence of concurrent
+ * updates returns an accurate result, but concurrent updates that
+ * occur while the sum is being calculated might not be
+ * incorporated. Also, because floating-point arithmetic is not
+ * strictly associative, the returned result need not be identical
+ * to the value that would be obtained in a sequential series of
+ * updates to a single variable.
+ *
+ * @return the sum
+ */
+ public double sum() {
+ Cell[] as = cells;
+ double sum = Double.longBitsToDouble(base);
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null)
+ sum += Double.longBitsToDouble(a.value);
+ }
+ }
+ return sum;
+ }
+
+ /**
+ * Resets variables maintaining the sum to zero. This method may
+ * be a useful alternative to creating a new adder, but is only
+ * effective if there are no concurrent updates. Because this
+ * method is intrinsically racy, it should only be used when it is
+ * known that no threads are concurrently updating.
+ */
+ public void reset() {
+ internalReset(0L);
+ }
+
+ /**
+ * Equivalent in effect to {@link #sum} followed by {@link
+ * #reset}. This method may apply for example during quiescent
+ * points between multithreaded computations. If there are
+ * updates concurrent with this method, the returned value is
+ * <em>not</em> guaranteed to be the final value occurring before
+ * the reset.
+ *
+ * @return the sum
+ */
+ public double sumThenReset() {
+ Cell[] as = cells;
+ double sum = Double.longBitsToDouble(base);
+ base = 0L;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null) {
+ long v = a.value;
+ a.value = 0L;
+ sum += Double.longBitsToDouble(v);
+ }
+ }
+ }
+ return sum;
+ }
+
+ /**
+ * Returns the String representation of the {@link #sum}.
+ * @return the String representation of the {@link #sum}
+ */
+ public String toString() {
+ return Double.toString(sum());
+ }
+
+ /**
+ * Equivalent to {@link #sum}.
+ *
+ * @return the sum
+ */
+ public double doubleValue() {
+ return sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as a {@code long} after a
+ * narrowing primitive conversion.
+ */
+ public long longValue() {
+ return (long)sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as an {@code int} after a
+ * narrowing primitive conversion.
+ */
+ public int intValue() {
+ return (int)sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as a {@code float}
+ * after a narrowing primitive conversion.
+ */
+ public float floatValue() {
+ return (float)sum();
+ }
+
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+ s.writeDouble(sum());
+ }
+
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ busy = 0;
+ cells = null;
+ base = Double.doubleToRawLongBits(s.readDouble());
+ }
+
+}
diff --git a/src/main/java/jsr166e/DoubleMaxUpdater.java b/src/main/java/jsr166e/DoubleMaxUpdater.java
new file mode 100644
index 0000000..b1859fc
--- /dev/null
+++ b/src/main/java/jsr166e/DoubleMaxUpdater.java
@@ -0,0 +1,193 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+import java.io.Serializable;
+
+/**
+ * One or more variables that together maintain a running {@code double}
+ * maximum with initial value {@code Double.NEGATIVE_INFINITY}. When
+ * updates (method {@link #update}) are contended across threads, the
+ * set of variables may grow dynamically to reduce contention. Method
+ * {@link #max} (or, equivalently, {@link #doubleValue}) returns the
+ * current maximum across the variables maintaining updates.
+ *
+ * <p>This class extends {@link Number}, but does <em>not</em> define
+ * methods such as {@code equals}, {@code hashCode} and {@code
+ * compareTo} because instances are expected to be mutated, and so are
+ * not useful as collection keys.
+ *
+ * <p><em>jsr166e note: This class is targeted to be placed in
+ * java.util.concurrent.atomic.</em>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class DoubleMaxUpdater extends Striped64 implements Serializable {
+ private static final long serialVersionUID = 7249069246863182397L;
+ /**
+ * Long representation of negative infinity. See class Double
+ * internal documentation for explanation.
+ */
+ private static final long MIN_AS_LONG = 0xfff0000000000000L;
+
+ /**
+ * Update function. See class DoubleAdder for rationale
+ * for using conversions from/to long.
+ */
+ final long fn(long v, long x) {
+ return Double.longBitsToDouble(v) > Double.longBitsToDouble(x) ? v : x;
+ }
+
+ /**
+ * Creates a new instance with initial value of {@code
+ * Double.NEGATIVE_INFINITY}.
+ */
+ public DoubleMaxUpdater() {
+ base = MIN_AS_LONG;
+ }
+
+ /**
+ * Updates the maximum to be at least the given value.
+ *
+ * @param x the value to update
+ */
+ public void update(double x) {
+ long lx = Double.doubleToRawLongBits(x);
+ Cell[] as; long b, v; HashCode hc; Cell a; int n;
+ if ((as = cells) != null ||
+ (Double.longBitsToDouble(b = base) < x && !casBase(b, lx))) {
+ boolean uncontended = true;
+ int h = (hc = threadHashCode.get()).code;
+ if (as == null || (n = as.length) < 1 ||
+ (a = as[(n - 1) & h]) == null ||
+ (Double.longBitsToDouble(v = a.value) < x &&
+ !(uncontended = a.cas(v, lx))))
+ retryUpdate(lx, hc, uncontended);
+ }
+ }
+
+ /**
+ * Returns the current maximum. The returned value is
+ * <em>NOT</em> an atomic snapshot; invocation in the absence of
+ * concurrent updates returns an accurate result, but concurrent
+ * updates that occur while the value is being calculated might
+ * not be incorporated.
+ *
+ * @return the maximum
+ */
+ public double max() {
+ Cell[] as = cells;
+ double max = Double.longBitsToDouble(base);
+ if (as != null) {
+ int n = as.length;
+ double v;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null && (v = Double.longBitsToDouble(a.value)) > max)
+ max = v;
+ }
+ }
+ return max;
+ }
+
+ /**
+ * Resets variables maintaining updates to {@code
+ * Double.NEGATIVE_INFINITY}. This method may be a useful
+ * alternative to creating a new updater, but is only effective if
+ * there are no concurrent updates. Because this method is
+ * intrinsically racy, it should only be used when it is known
+ * that no threads are concurrently updating.
+ */
+ public void reset() {
+ internalReset(MIN_AS_LONG);
+ }
+
+ /**
+ * Equivalent in effect to {@link #max} followed by {@link
+ * #reset}. This method may apply for example during quiescent
+ * points between multithreaded computations. If there are
+ * updates concurrent with this method, the returned value is
+ * <em>not</em> guaranteed to be the final value occurring before
+ * the reset.
+ *
+ * @return the maximum
+ */
+ public double maxThenReset() {
+ Cell[] as = cells;
+ double max = Double.longBitsToDouble(base);
+ base = MIN_AS_LONG;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null) {
+ double v = Double.longBitsToDouble(a.value);
+ a.value = MIN_AS_LONG;
+ if (v > max)
+ max = v;
+ }
+ }
+ }
+ return max;
+ }
+
+ /**
+ * Returns the String representation of the {@link #max}.
+ * @return the String representation of the {@link #max}
+ */
+ public String toString() {
+ return Double.toString(max());
+ }
+
+ /**
+ * Equivalent to {@link #max}.
+ *
+ * @return the max
+ */
+ public double doubleValue() {
+ return max();
+ }
+
+ /**
+ * Returns the {@link #max} as a {@code long} after a
+ * narrowing primitive conversion.
+ */
+ public long longValue() {
+ return (long)max();
+ }
+
+ /**
+ * Returns the {@link #max} as an {@code int} after a
+ * narrowing primitive conversion.
+ */
+ public int intValue() {
+ return (int)max();
+ }
+
+ /**
+ * Returns the {@link #max} as a {@code float}
+ * after a narrowing primitive conversion.
+ */
+ public float floatValue() {
+ return (float)max();
+ }
+
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+ s.writeDouble(max());
+ }
+
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ busy = 0;
+ cells = null;
+ base = Double.doubleToRawLongBits(s.readDouble());
+ }
+
+}
diff --git a/src/main/java/jsr166e/ForkJoinPool.java b/src/main/java/jsr166e/ForkJoinPool.java
new file mode 100644
index 0000000..8a04973
--- /dev/null
+++ b/src/main/java/jsr166e/ForkJoinPool.java
@@ -0,0 +1,3344 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+
+import jsr166y.ThreadLocalRandom;
+
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.AbstractExecutorService;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.RunnableFuture;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * An {@link ExecutorService} for running {@link ForkJoinTask}s.
+ * A {@code ForkJoinPool} provides the entry point for submissions
+ * from non-{@code ForkJoinTask} clients, as well as management and
+ * monitoring operations.
+ *
+ * <p>A {@code ForkJoinPool} differs from other kinds of {@link
+ * ExecutorService} mainly by virtue of employing
+ * <em>work-stealing</em>: all threads in the pool attempt to find and
+ * execute tasks submitted to the pool and/or created by other active
+ * tasks (eventually blocking waiting for work if none exist). This
+ * enables efficient processing when most tasks spawn other subtasks
+ * (as do most {@code ForkJoinTask}s), as well as when many small
+ * tasks are submitted to the pool from external clients. Especially
+ * when setting <em>asyncMode</em> to true in constructors, {@code
+ * ForkJoinPool}s may also be appropriate for use with event-style
+ * tasks that are never joined.
+ *
+ * <p>A static {@link #commonPool()} is available and appropriate for
+ * most applications. The common pool is used by any ForkJoinTask that
+ * is not explicitly submitted to a specified pool. Using the common
+ * pool normally reduces resource usage (its threads are slowly
+ * reclaimed during periods of non-use, and reinstated upon subsequent
+ * use).
+ *
+ * <p>For applications that require separate or custom pools, a {@code
+ * ForkJoinPool} may be constructed with a given target parallelism
+ * level; by default, equal to the number of available processors. The
+ * pool attempts to maintain enough active (or available) threads by
+ * dynamically adding, suspending, or resuming internal worker
+ * threads, even if some tasks are stalled waiting to join others.
+ * However, no such adjustments are guaranteed in the face of blocked
+ * I/O or other unmanaged synchronization. The nested {@link
+ * ManagedBlocker} interface enables extension of the kinds of
+ * synchronization accommodated.
+ *
+ * <p>In addition to execution and lifecycle control methods, this
+ * class provides status check methods (for example
+ * {@link #getStealCount}) that are intended to aid in developing,
+ * tuning, and monitoring fork/join applications. Also, method
+ * {@link #toString} returns indications of pool state in a
+ * convenient form for informal monitoring.
+ *
+ * <p>As is the case with other ExecutorServices, there are three
+ * main task execution methods summarized in the following table.
+ * These are designed to be used primarily by clients not already
+ * engaged in fork/join computations in the current pool. The main
+ * forms of these methods accept instances of {@code ForkJoinTask},
+ * but overloaded forms also allow mixed execution of plain {@code
+ * Runnable}- or {@code Callable}- based activities as well. However,
+ * tasks that are already executing in a pool should normally instead
+ * use the within-computation forms listed in the table unless using
+ * async event-style tasks that are not usually joined, in which case
+ * there is little difference among choice of methods.
+ *
+ * <table BORDER CELLPADDING=3 CELLSPACING=1>
+ * <caption>Summary of task execution methods</caption>
+ * <tr>
+ * <td></td>
+ * <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td>
+ * <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td>
+ * </tr>
+ * <tr>
+ * <td> <b>Arrange async execution</b></td>
+ * <td> {@link #execute(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#fork}</td>
+ * </tr>
+ * <tr>
+ * <td> <b>Await and obtain result</b></td>
+ * <td> {@link #invoke(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#invoke}</td>
+ * </tr>
+ * <tr>
+ * <td> <b>Arrange exec and obtain Future</b></td>
+ * <td> {@link #submit(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
+ * </tr>
+ * </table>
+ *
+ * <p>The common pool is by default constructed with default
+ * parameters, but these may be controlled by setting three
+ * {@linkplain System#getProperty system properties}:
+ * <ul>
+ * <li>{@code java.util.concurrent.ForkJoinPool.common.parallelism}
+ * - the parallelism level, a non-negative integer
+ * <li>{@code java.util.concurrent.ForkJoinPool.common.threadFactory}
+ * - the class name of a {@link ForkJoinWorkerThreadFactory}
+ * <li>{@code java.util.concurrent.ForkJoinPool.common.exceptionHandler}
+ * - the class name of a {@link UncaughtExceptionHandler}
+ * </ul>
+ * The system class loader is used to load these classes.
+ * Upon any error in establishing these settings, default parameters
+ * are used. It is possible to disable or limit the use of threads in
+ * the common pool by setting the parallelism property to zero, and/or
+ * using a factory that may return {@code null}.
+ *
+ * <p><b>Implementation notes</b>: This implementation restricts the
+ * maximum number of running threads to 32767. Attempts to create
+ * pools with greater than the maximum number result in
+ * {@code IllegalArgumentException}.
+ *
+ * <p>This implementation rejects submitted tasks (that is, by throwing
+ * {@link RejectedExecutionException}) only when the pool is shut down
+ * or internal resources have been exhausted.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public class ForkJoinPool extends AbstractExecutorService {
+
+ /*
+ * Implementation Overview
+ *
+ * This class and its nested classes provide the main
+ * functionality and control for a set of worker threads:
+ * Submissions from non-FJ threads enter into submission queues.
+ * Workers take these tasks and typically split them into subtasks
+ * that may be stolen by other workers. Preference rules give
+ * first priority to processing tasks from their own queues (LIFO
+ * or FIFO, depending on mode), then to randomized FIFO steals of
+ * tasks in other queues.
+ *
+ * WorkQueues
+ * ==========
+ *
+ * Most operations occur within work-stealing queues (in nested
+ * class WorkQueue). These are special forms of Deques that
+ * support only three of the four possible end-operations -- push,
+ * pop, and poll (aka steal), under the further constraints that
+ * push and pop are called only from the owning thread (or, as
+ * extended here, under a lock), while poll may be called from
+ * other threads. (If you are unfamiliar with them, you probably
+ * want to read Herlihy and Shavit's book "The Art of
+ * Multiprocessor programming", chapter 16 describing these in
+ * more detail before proceeding.) The main work-stealing queue
+ * design is roughly similar to those in the papers "Dynamic
+ * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
+ * (http://research.sun.com/scalable/pubs/index.html) and
+ * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
+ * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
+ * See also "Correct and Efficient Work-Stealing for Weak Memory
+ * Models" by Le, Pop, Cohen, and Nardelli, PPoPP 2013
+ * (http://www.di.ens.fr/~zappa/readings/ppopp13.pdf) for an
+ * analysis of memory ordering (atomic, volatile etc) issues. The
+ * main differences ultimately stem from GC requirements that we
+ * null out taken slots as soon as we can, to maintain as small a
+ * footprint as possible even in programs generating huge numbers
+ * of tasks. To accomplish this, we shift the CAS arbitrating pop
+ * vs poll (steal) from being on the indices ("base" and "top") to
+ * the slots themselves. So, both a successful pop and poll
+ * mainly entail a CAS of a slot from non-null to null. Because
+ * we rely on CASes of references, we do not need tag bits on base
+ * or top. They are simple ints as used in any circular
+ * array-based queue (see for example ArrayDeque). Updates to the
+ * indices must still be ordered in a way that guarantees that top
+ * == base means the queue is empty, but otherwise may err on the
+ * side of possibly making the queue appear nonempty when a push,
+ * pop, or poll have not fully committed. Note that this means
+ * that the poll operation, considered individually, is not
+ * wait-free. One thief cannot successfully continue until another
+ * in-progress one (or, if previously empty, a push) completes.
+ * However, in the aggregate, we ensure at least probabilistic
+ * non-blockingness. If an attempted steal fails, a thief always
+ * chooses a different random victim target to try next. So, in
+ * order for one thief to progress, it suffices for any
+ * in-progress poll or new push on any empty queue to
+ * complete. (This is why we normally use method pollAt and its
+ * variants that try once at the apparent base index, else
+ * consider alternative actions, rather than method poll.)
+ *
+ * This approach also enables support of a user mode in which local
+ * task processing is in FIFO, not LIFO order, simply by using
+ * poll rather than pop. This can be useful in message-passing
+ * frameworks in which tasks are never joined. However neither
+ * mode considers affinities, loads, cache localities, etc, so
+ * rarely provide the best possible performance on a given
+ * machine, but portably provide good throughput by averaging over
+ * these factors. (Further, even if we did try to use such
+ * information, we do not usually have a basis for exploiting it.
+ * For example, some sets of tasks profit from cache affinities,
+ * but others are harmed by cache pollution effects.)
+ *
+ * WorkQueues are also used in a similar way for tasks submitted
+ * to the pool. We cannot mix these tasks in the same queues used
+ * for work-stealing (this would contaminate lifo/fifo
+ * processing). Instead, we randomly associate submission queues
+ * with submitting threads, using a form of hashing. The
+ * Submitter probe value serves as a hash code for
+ * choosing existing queues, and may be randomly repositioned upon
+ * contention with other submitters. In essence, submitters act
+ * like workers except that they are restricted to executing local
+ * tasks that they submitted (or in the case of CountedCompleters,
+ * others with the same root task). However, because most
+ * shared/external queue operations are more expensive than
+ * internal, and because, at steady state, external submitters
+ * will compete for CPU with workers, ForkJoinTask.join and
+ * related methods disable them from repeatedly helping to process
+ * tasks if all workers are active. Insertion of tasks in shared
+ * mode requires a lock (mainly to protect in the case of
+ * resizing) but we use only a simple spinlock (using bits in
+ * field qlock), because submitters encountering a busy queue move
+ * on to try or create other queues -- they block only when
+ * creating and registering new queues.
+ *
+ * Management
+ * ==========
+ *
+ * The main throughput advantages of work-stealing stem from
+ * decentralized control -- workers mostly take tasks from
+ * themselves or each other. We cannot negate this in the
+ * implementation of other management responsibilities. The main
+ * tactic for avoiding bottlenecks is packing nearly all
+ * essentially atomic control state into two volatile variables
+ * that are by far most often read (not written) as status and
+ * consistency checks.
+ *
+ * Field "ctl" contains 64 bits holding all the information needed
+ * to atomically decide to add, inactivate, enqueue (on an event
+ * queue), dequeue, and/or re-activate workers. To enable this
+ * packing, we restrict maximum parallelism to (1<<15)-1 (which is
+ * far in excess of normal operating range) to allow ids, counts,
+ * and their negations (used for thresholding) to fit into 16bit
+ * fields.
+ *
+ * Field "plock" is a form of sequence lock with a saturating
+ * shutdown bit (similarly for per-queue "qlocks"), mainly
+ * protecting updates to the workQueues array, as well as to
+ * enable shutdown. When used as a lock, it is normally only very
+ * briefly held, so is nearly always available after at most a
+ * brief spin, but we use a monitor-based backup strategy to
+ * block when needed.
+ *
+ * Recording WorkQueues. WorkQueues are recorded in the
+ * "workQueues" array that is created upon first use and expanded
+ * if necessary. Updates to the array while recording new workers
+ * and unrecording terminated ones are protected from each other
+ * by a lock but the array is otherwise concurrently readable, and
+ * accessed directly. To simplify index-based operations, the
+ * array size is always a power of two, and all readers must
+ * tolerate null slots. Worker queues are at odd indices. Shared
+ * (submission) queues are at even indices, up to a maximum of 64
+ * slots, to limit growth even if array needs to expand to add
+ * more workers. Grouping them together in this way simplifies and
+ * speeds up task scanning.
+ *
+ * All worker thread creation is on-demand, triggered by task
+ * submissions, replacement of terminated workers, and/or
+ * compensation for blocked workers. However, all other support
+ * code is set up to work with other policies. To ensure that we
+ * do not hold on to worker references that would prevent GC, ALL
+ * accesses to workQueues are via indices into the workQueues
+ * array (which is one source of some of the messy code
+ * constructions here). In essence, the workQueues array serves as
+ * a weak reference mechanism. Thus for example the wait queue
+ * field of ctl stores indices, not references. Access to the
+ * workQueues in associated methods (for example signalWork) must
+ * both index-check and null-check the IDs. All such accesses
+ * ignore bad IDs by returning out early from what they are doing,
+ * since this can only be associated with termination, in which
+ * case it is OK to give up. All uses of the workQueues array
+ * also check that it is non-null (even if previously
+ * non-null). This allows nulling during termination, which is
+ * currently not necessary, but remains an option for
+ * resource-revocation-based shutdown schemes. It also helps
+ * reduce JIT issuance of uncommon-trap code, which tends to
+ * unnecessarily complicate control flow in some methods.
+ *
+ * Event Queuing. Unlike HPC work-stealing frameworks, we cannot
+ * let workers spin indefinitely scanning for tasks when none can
+ * be found immediately, and we cannot start/resume workers unless
+ * there appear to be tasks available. On the other hand, we must
+ * quickly prod them into action when new tasks are submitted or
+ * generated. In many usages, ramp-up time to activate workers is
+ * the main limiting factor in overall performance (this is
+ * compounded at program start-up by JIT compilation and
+ * allocation). So we try to streamline this as much as possible.
+ * We park/unpark workers after placing in an event wait queue
+ * when they cannot find work. This "queue" is actually a simple
+ * Treiber stack, headed by the "id" field of ctl, plus a 15bit
+ * counter value (that reflects the number of times a worker has
+ * been inactivated) to avoid ABA effects (we need only as many
+ * version numbers as worker threads). Successors are held in
+ * field WorkQueue.nextWait. Queuing deals with several intrinsic
+ * races, mainly that a task-producing thread can miss seeing (and
+ * signalling) another thread that gave up looking for work but
+ * has not yet entered the wait queue. We solve this by requiring
+ * a full sweep of all workers (via repeated calls to method
+ * scan()) both before and after a newly waiting worker is added
+ * to the wait queue. Because enqueued workers may actually be
+ * rescanning rather than waiting, we set and clear the "parker"
+ * field of WorkQueues to reduce unnecessary calls to unpark.
+ * (This requires a secondary recheck to avoid missed signals.)
+ * Note the unusual conventions about Thread.interrupts
+ * surrounding parking and other blocking: Because interrupts are
+ * used solely to alert threads to check termination, which is
+ * checked anyway upon blocking, we clear status (using
+ * Thread.interrupted) before any call to park, so that park does
+ * not immediately return due to status being set via some other
+ * unrelated call to interrupt in user code.
+ *
+ * Signalling. We create or wake up workers only when there
+ * appears to be at least one task they might be able to find and
+ * execute. When a submission is added or another worker adds a
+ * task to a queue that has fewer than two tasks, they signal
+ * waiting workers (or trigger creation of new ones if fewer than
+ * the given parallelism level -- signalWork). These primary
+ * signals are buttressed by others whenever other threads remove
+ * a task from a queue and notice that there are other tasks there
+ * as well. So in general, pools will be over-signalled. On most
+ * platforms, signalling (unpark) overhead time is noticeably
+ * long, and the time between signalling a thread and it actually
+ * making progress can be very noticeably long, so it is worth
+ * offloading these delays from critical paths as much as
+ * possible. Additionally, workers spin-down gradually, by staying
+ * alive so long as they see the ctl state changing. Similar
+ * stability-sensing techniques are also used before blocking in
+ * awaitJoin and helpComplete.
+ *
+ * Trimming workers. To release resources after periods of lack of
+ * use, a worker starting to wait when the pool is quiescent will
+ * time out and terminate if the pool has remained quiescent for a
+ * given period -- a short period if there are more threads than
+ * parallelism, longer as the number of threads decreases. This
+ * will slowly propagate, eventually terminating all workers after
+ * periods of non-use.
+ *
+ * Shutdown and Termination. A call to shutdownNow atomically sets
+ * a plock bit and then (non-atomically) sets each worker's
+ * qlock status, cancels all unprocessed tasks, and wakes up
+ * all waiting workers. Detecting whether termination should
+ * commence after a non-abrupt shutdown() call requires more work
+ * and bookkeeping. We need consensus about quiescence (i.e., that
+ * there is no more work). The active count provides a primary
+ * indication but non-abrupt shutdown still requires a rechecking
+ * scan for any workers that are inactive but not queued.
+ *
+ * Joining Tasks
+ * =============
+ *
+ * Any of several actions may be taken when one worker is waiting
+ * to join a task stolen (or always held) by another. Because we
+ * are multiplexing many tasks on to a pool of workers, we can't
+ * just let them block (as in Thread.join). We also cannot just
+ * reassign the joiner's run-time stack with another and replace
+ * it later, which would be a form of "continuation", that even if
+ * possible is not necessarily a good idea since we sometimes need
+ * both an unblocked task and its continuation to progress.
+ * Instead we combine two tactics:
+ *
+ * Helping: Arranging for the joiner to execute some task that it
+ * would be running if the steal had not occurred.
+ *
+ * Compensating: Unless there are already enough live threads,
+ * method tryCompensate() may create or re-activate a spare
+ * thread to compensate for blocked joiners until they unblock.
+ *
+ * A third form (implemented in tryRemoveAndExec) amounts to
+ * helping a hypothetical compensator: If we can readily tell that
+ * a possible action of a compensator is to steal and execute the
+ * task being joined, the joining thread can do so directly,
+ * without the need for a compensation thread (although at the
+ * expense of larger run-time stacks, but the tradeoff is
+ * typically worthwhile).
+ *
+ * The ManagedBlocker extension API can't use helping so relies
+ * only on compensation in method awaitBlocker.
+ *
+ * The algorithm in tryHelpStealer entails a form of "linear"
+ * helping: Each worker records (in field currentSteal) the most
+ * recent task it stole from some other worker. Plus, it records
+ * (in field currentJoin) the task it is currently actively
+ * joining. Method tryHelpStealer uses these markers to try to
+ * find a worker to help (i.e., steal back a task from and execute
+ * it) that could hasten completion of the actively joined task.
+ * In essence, the joiner executes a task that would be on its own
+ * local deque had the to-be-joined task not been stolen. This may
+ * be seen as a conservative variant of the approach in Wagner &
+ * Calder "Leapfrogging: a portable technique for implementing
+ * efficient futures" SIGPLAN Notices, 1993
+ * (http://portal.acm.org/citation.cfm?id=155354). It differs in
+ * that: (1) We only maintain dependency links across workers upon
+ * steals, rather than use per-task bookkeeping. This sometimes
+ * requires a linear scan of workQueues array to locate stealers,
+ * but often doesn't because stealers leave hints (that may become
+ * stale/wrong) of where to locate them. It is only a hint
+ * because a worker might have had multiple steals and the hint
+ * records only one of them (usually the most current). Hinting
+ * isolates cost to when it is needed, rather than adding to
+ * per-task overhead. (2) It is "shallow", ignoring nesting and
+ * potentially cyclic mutual steals. (3) It is intentionally
+ * racy: field currentJoin is updated only while actively joining,
+ * which means that we miss links in the chain during long-lived
+ * tasks, GC stalls etc (which is OK since blocking in such cases
+ * is usually a good idea). (4) We bound the number of attempts
+ * to find work (see MAX_HELP) and fall back to suspending the
+ * worker and if necessary replacing it with another.
+ *
+ * Helping actions for CountedCompleters are much simpler: Method
+ * helpComplete can take and execute any task with the same root
+ * as the task being waited on. However, this still entails some
+ * traversal of completer chains, so is less efficient than using
+ * CountedCompleters without explicit joins.
+ *
+ * It is impossible to keep exactly the target parallelism number
+ * of threads running at any given time. Determining the
+ * existence of conservatively safe helping targets, the
+ * availability of already-created spares, and the apparent need
+ * to create new spares are all racy, so we rely on multiple
+ * retries of each. Compensation in the apparent absence of
+ * helping opportunities is challenging to control on JVMs, where
+ * GC and other activities can stall progress of tasks that in
+ * turn stall out many other dependent tasks, without us being
+ * able to determine whether they will ever require compensation.
+ * Even though work-stealing otherwise encounters little
+ * degradation in the presence of more threads than cores,
+ * aggressively adding new threads in such cases entails risk of
+ * unwanted positive feedback control loops in which more threads
+ * cause more dependent stalls (as well as delayed progress of
+ * unblocked threads to the point that we know they are available)
+ * leading to more situations requiring more threads, and so
+ * on. This aspect of control can be seen as an (analytically
+ * intractable) game with an opponent that may choose the worst
+ * (for us) active thread to stall at any time. We take several
+ * precautions to bound losses (and thus bound gains), mainly in
+ * methods tryCompensate and awaitJoin.
+ *
+ * Common Pool
+ * ===========
+ *
+ * The static common pool always exists after static
+ * initialization. Since it (or any other created pool) need
+ * never be used, we minimize initial construction overhead and
+ * footprint to the setup of about a dozen fields, with no nested
+ * allocation. Most bootstrapping occurs within method
+ * fullExternalPush during the first submission to the pool.
+ *
+ * When external threads submit to the common pool, they can
+ * perform subtask processing (see externalHelpJoin and related
+ * methods). This caller-helps policy makes it sensible to set
+ * common pool parallelism level to one (or more) less than the
+ * total number of available cores, or even zero for pure
+ * caller-runs. We do not need to record whether external
+ * submissions are to the common pool -- if not, externalHelpJoin
+ * returns quickly (at the most helping to signal some common pool
+ * workers). These submitters would otherwise be blocked waiting
+ * for completion, so the extra effort (with liberally sprinkled
+ * task status checks) in inapplicable cases amounts to an odd
+ * form of limited spin-wait before blocking in ForkJoinTask.join.
+ *
+ * Style notes
+ * ===========
+ *
+ * There is a lot of representation-level coupling among classes
+ * ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
+ * fields of WorkQueue maintain data structures managed by
+ * ForkJoinPool, so are directly accessed. There is little point
+ * trying to reduce this, since any associated future changes in
+ * representations will need to be accompanied by algorithmic
+ * changes anyway. Several methods intrinsically sprawl because
+ * they must accumulate sets of consistent reads of volatiles held
+ * in local variables. Methods signalWork() and scan() are the
+ * main bottlenecks, so are especially heavily
+ * micro-optimized/mangled. There are lots of inline assignments
+ * (of form "while ((local = field) != 0)") which are usually the
+ * simplest way to ensure the required read orderings (which are
+ * sometimes critical). This leads to a "C"-like style of listing
+ * declarations of these locals at the heads of methods or blocks.
+ * There are several occurrences of the unusual "do {} while
+ * (!cas...)" which is the simplest way to force an update of a
+ * CAS'ed variable. There are also other coding oddities (including
+ * several unnecessary-looking hoisted null checks) that help
+ * some methods perform reasonably even when interpreted (not
+ * compiled).
+ *
+ * The order of declarations in this file is:
+ * (1) Static utility functions
+ * (2) Nested (static) classes
+ * (3) Static fields
+ * (4) Fields, along with constants used when unpacking some of them
+ * (5) Internal control methods
+ * (6) Callbacks and other support for ForkJoinTask methods
+ * (7) Exported methods
+ * (8) Static block initializing statics in minimally dependent order
+ */
+
+ // Static utilities
+
+ /**
+ * If there is a security manager, makes sure caller has
+ * permission to modify threads.
+ */
+ private static void checkPermission() {
+ SecurityManager security = System.getSecurityManager();
+ if (security != null)
+ security.checkPermission(modifyThreadPermission);
+ }
+
+ // Nested classes
+
+ /**
+ * Factory for creating new {@link ForkJoinWorkerThread}s.
+ * A {@code ForkJoinWorkerThreadFactory} must be defined and used
+ * for {@code ForkJoinWorkerThread} subclasses that extend base
+ * functionality or initialize threads with different contexts.
+ */
+ public static interface ForkJoinWorkerThreadFactory {
+ /**
+ * Returns a new worker thread operating in the given pool.
+ *
+ * @param pool the pool this thread works in
+ * @return the new worker thread
+ * @throws NullPointerException if the pool is null
+ */
+ public ForkJoinWorkerThread newThread(ForkJoinPool pool);
+ }
+
+ /**
+ * Default ForkJoinWorkerThreadFactory implementation; creates a
+ * new ForkJoinWorkerThread.
+ */
+ static final class DefaultForkJoinWorkerThreadFactory
+ implements ForkJoinWorkerThreadFactory {
+ public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
+ return new ForkJoinWorkerThread(pool);
+ }
+ }
+
+ /**
+ * Class for artificial tasks that are used to replace the target
+ * of local joins if they are removed from an interior queue slot
+ * in WorkQueue.tryRemoveAndExec. We don't need the proxy to
+ * actually do anything beyond having a unique identity.
+ */
+ static final class EmptyTask extends ForkJoinTask<Void> {
+ private static final long serialVersionUID = -7721805057305804111L;
+ EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void x) {}
+ public final boolean exec() { return true; }
+ }
+
+ /**
+ * Queues supporting work-stealing as well as external task
+ * submission. See above for main rationale and algorithms.
+ * Implementation relies heavily on "Unsafe" intrinsics
+ * and selective use of "volatile":
+ *
+ * Field "base" is the index (mod array.length) of the least valid
+ * queue slot, which is always the next position to steal (poll)
+ * from if nonempty. Reads and writes require volatile orderings
+ * but not CAS, because updates are only performed after slot
+ * CASes.
+ *
+ * Field "top" is the index (mod array.length) of the next queue
+ * slot to push to or pop from. It is written only by owner thread
+ * for push, or under lock for external/shared push, and accessed
+ * by other threads only after reading (volatile) base. Both top
+ * and base are allowed to wrap around on overflow, but (top -
+ * base) (or more commonly -(base - top) to force volatile read of
+ * base before top) still estimates size. The lock ("qlock") is
+ * forced to -1 on termination, causing all further lock attempts
+ * to fail. (Note: we don't need CAS for termination state because
+ * upon pool shutdown, all shared-queues will stop being used
+ * anyway.) Nearly all lock bodies are set up so that exceptions
+ * within lock bodies are "impossible" (modulo JVM errors that
+ * would cause failure anyway.)
+ *
+ * The array slots are read and written using the emulation of
+ * volatiles/atomics provided by Unsafe. Insertions must in
+ * general use putOrderedObject as a form of releasing store to
+ * ensure that all writes to the task object are ordered before
+ * its publication in the queue. All removals entail a CAS to
+ * null. The array is always a power of two. To ensure safety of
+ * Unsafe array operations, all accesses perform explicit null
+ * checks and implicit bounds checks via power-of-two masking.
+ *
+ * In addition to basic queuing support, this class contains
+ * fields described elsewhere to control execution. It turns out
+ * to work better memory-layout-wise to include them in this class
+ * rather than a separate class.
+ *
+ * Performance on most platforms is very sensitive to placement of
+ * instances of both WorkQueues and their arrays -- we absolutely
+ * do not want multiple WorkQueue instances or multiple queue
+ * arrays sharing cache lines. (It would be best for queue objects
+ * and their arrays to share, but there is nothing available to
+ * help arrange that). The @Contended annotation alerts JVMs to
+ * try to keep instances apart.
+ */
+ static final class WorkQueue {
+ /**
+ * Capacity of work-stealing queue array upon initialization.
+ * Must be a power of two; at least 4, but should be larger to
+ * reduce or eliminate cacheline sharing among queues.
+ * Currently, it is much larger, as a partial workaround for
+ * the fact that JVMs often place arrays in locations that
+ * share GC bookkeeping (especially cardmarks) such that
+ * per-write accesses encounter serious memory contention.
+ */
+ static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
+
+ /**
+ * Maximum size for queue arrays. Must be a power of two less
+ * than or equal to 1 << (31 - width of array entry) to ensure
+ * lack of wraparound of index calculations, but defined to a
+ * value a bit less than this to help users trap runaway
+ * programs before saturating systems.
+ */
+ static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
+
+ // Heuristic padding to ameliorate unfortunate memory placements
+ volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
+
+ volatile int eventCount; // encoded inactivation count; < 0 if inactive
+ int nextWait; // encoded record of next event waiter
+ int nsteals; // number of steals
+ int hint; // steal index hint
+ short poolIndex; // index of this queue in pool
+ final short mode; // 0: lifo, > 0: fifo, < 0: shared
+ volatile int qlock; // 1: locked, -1: terminate; else 0
+ volatile int base; // index of next slot for poll
+ int top; // index of next slot for push
+ ForkJoinTask<?>[] array; // the elements (initially unallocated)
+ final ForkJoinPool pool; // the containing pool (may be null)
+ final ForkJoinWorkerThread owner; // owning thread or null if shared
+ volatile Thread parker; // == owner during call to park; else null
+ volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
+ ForkJoinTask<?> currentSteal; // current non-local task being executed
+
+ volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
+ volatile Object pad18, pad19, pad1a, pad1b, pad1c, pad1d;
+
+ WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode,
+ int seed) {
+ this.pool = pool;
+ this.owner = owner;
+ this.mode = (short)mode;
+ this.hint = seed; // store initial seed for runWorker
+ // Place indices in the center of array (that is not yet allocated)
+ base = top = INITIAL_QUEUE_CAPACITY >>> 1;
+ }
+
+ /**
+ * Returns the approximate number of tasks in the queue.
+ */
+ final int queueSize() {
+ int n = base - top; // non-owner callers must read base first
+ return (n >= 0) ? 0 : -n; // ignore transient negative
+ }
+
+ /**
+ * Provides a more accurate estimate of whether this queue has
+ * any tasks than does queueSize, by checking whether a
+ * near-empty queue has at least one unclaimed task.
+ */
+ final boolean isEmpty() {
+ ForkJoinTask<?>[] a; int m, s;
+ int n = base - (s = top);
+ return (n >= 0 ||
+ (n == -1 &&
+ ((a = array) == null ||
+ (m = a.length - 1) < 0 ||
+ U.getObject
+ (a, (long)((m & (s - 1)) << ASHIFT) + ABASE) == null)));
+ }
+
+ /**
+ * Pushes a task. Call only by owner in unshared queues. (The
+ * shared-queue version is embedded in method externalPush.)
+ *
+ * @param task the task. Caller must ensure non-null.
+ * @throws RejectedExecutionException if array cannot be resized
+ */
+ final void push(ForkJoinTask<?> task) {
+ ForkJoinTask<?>[] a; ForkJoinPool p;
+ int s = top, n;
+ if ((a = array) != null) { // ignore if queue removed
+ int m = a.length - 1;
+ U.putOrderedObject(a, ((m & s) << ASHIFT) + ABASE, task);
+ if ((n = (top = s + 1) - base) <= 2)
+ (p = pool).signalWork(p.workQueues, this);
+ else if (n >= m)
+ growArray();
+ }
+ }
+
+ /**
+ * Initializes or doubles the capacity of array. Call either
+ * by owner or with lock held -- it is OK for base, but not
+ * top, to move while resizings are in progress.
+ */
+ final ForkJoinTask<?>[] growArray() {
+ ForkJoinTask<?>[] oldA = array;
+ int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
+ if (size > MAXIMUM_QUEUE_CAPACITY)
+ throw new RejectedExecutionException("Queue capacity exceeded");
+ int oldMask, t, b;
+ ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
+ if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
+ (t = top) - (b = base) > 0) {
+ int mask = size - 1;
+ do {
+ ForkJoinTask<?> x;
+ int oldj = ((b & oldMask) << ASHIFT) + ABASE;
+ int j = ((b & mask) << ASHIFT) + ABASE;
+ x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
+ if (x != null &&
+ U.compareAndSwapObject(oldA, oldj, x, null))
+ U.putObjectVolatile(a, j, x);
+ } while (++b != t);
+ }
+ return a;
+ }
+
+ /**
+ * Takes next task, if one exists, in LIFO order. Call only
+ * by owner in unshared queues.
+ */
+ final ForkJoinTask<?> pop() {
+ ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
+ if ((a = array) != null && (m = a.length - 1) >= 0) {
+ for (int s; (s = top - 1) - base >= 0;) {
+ long j = ((m & s) << ASHIFT) + ABASE;
+ if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
+ break;
+ if (U.compareAndSwapObject(a, j, t, null)) {
+ top = s;
+ return t;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes a task in FIFO order if b is base of queue and a task
+ * can be claimed without contention. Specialized versions
+ * appear in ForkJoinPool methods scan and tryHelpStealer.
+ */
+ final ForkJoinTask<?> pollAt(int b) {
+ ForkJoinTask<?> t; ForkJoinTask<?>[] a;
+ if ((a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
+ base == b && U.compareAndSwapObject(a, j, t, null)) {
+ U.putOrderedInt(this, QBASE, b + 1);
+ return t;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes next task, if one exists, in FIFO order.
+ */
+ final ForkJoinTask<?> poll() {
+ ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
+ while ((b = base) - top < 0 && (a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t != null) {
+ if (U.compareAndSwapObject(a, j, t, null)) {
+ U.putOrderedInt(this, QBASE, b + 1);
+ return t;
+ }
+ }
+ else if (base == b) {
+ if (b + 1 == top)
+ break;
+ Thread.yield(); // wait for lagging update (very rare)
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes next task, if one exists, in order specified by mode.
+ */
+ final ForkJoinTask<?> nextLocalTask() {
+ return mode == 0 ? pop() : poll();
+ }
+
+ /**
+ * Returns next task, if one exists, in order specified by mode.
+ */
+ final ForkJoinTask<?> peek() {
+ ForkJoinTask<?>[] a = array; int m;
+ if (a == null || (m = a.length - 1) < 0)
+ return null;
+ int i = mode == 0 ? top - 1 : base;
+ int j = ((i & m) << ASHIFT) + ABASE;
+ return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ }
+
+ /**
+ * Pops the given task only if it is at the current top.
+ * (A shared version is available only via FJP.tryExternalUnpush)
+ */
+ final boolean tryUnpush(ForkJoinTask<?> t) {
+ ForkJoinTask<?>[] a; int s;
+ if ((a = array) != null && (s = top) != base &&
+ U.compareAndSwapObject
+ (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
+ top = s;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Removes and cancels all known tasks, ignoring any exceptions.
+ */
+ final void cancelAll() {
+ ForkJoinTask.cancelIgnoringExceptions(currentJoin);
+ ForkJoinTask.cancelIgnoringExceptions(currentSteal);
+ for (ForkJoinTask<?> t; (t = poll()) != null; )
+ ForkJoinTask.cancelIgnoringExceptions(t);
+ }
+
+ // Specialized execution methods
+
+ /**
+ * Polls and runs tasks until empty.
+ */
+ final void pollAndExecAll() {
+ for (ForkJoinTask<?> t; (t = poll()) != null;)
+ t.doExec();
+ }
+
+ /**
+ * Executes a top-level task and any local tasks remaining
+ * after execution.
+ */
+ final void runTask(ForkJoinTask<?> task) {
+ if ((currentSteal = task) != null) {
+ task.doExec();
+ ForkJoinTask<?>[] a = array;
+ int md = mode;
+ ++nsteals;
+ currentSteal = null;
+ if (md != 0)
+ pollAndExecAll();
+ else if (a != null) {
+ int s, m = a.length - 1;
+ while ((s = top - 1) - base >= 0) {
+ long i = ((m & s) << ASHIFT) + ABASE;
+ ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObject(a, i);
+ if (t == null)
+ break;
+ if (U.compareAndSwapObject(a, i, t, null)) {
+ top = s;
+ t.doExec();
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * If present, removes from queue and executes the given task,
+ * or any other cancelled task. Returns (true) on any CAS
+ * or consistency check failure so caller can retry.
+ *
+ * @return false if no progress can be made, else true
+ */
+ final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
+ boolean stat;
+ ForkJoinTask<?>[] a; int m, s, b, n;
+ if (task != null && (a = array) != null && (m = a.length - 1) >= 0 &&
+ (n = (s = top) - (b = base)) > 0) {
+ boolean removed = false, empty = true;
+ stat = true;
+ for (ForkJoinTask<?> t;;) { // traverse from s to b
+ long j = ((--s & m) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObject(a, j);
+ if (t == null) // inconsistent length
+ break;
+ else if (t == task) {
+ if (s + 1 == top) { // pop
+ if (!U.compareAndSwapObject(a, j, task, null))
+ break;
+ top = s;
+ removed = true;
+ }
+ else if (base == b) // replace with proxy
+ removed = U.compareAndSwapObject(a, j, task,
+ new EmptyTask());
+ break;
+ }
+ else if (t.status >= 0)
+ empty = false;
+ else if (s + 1 == top) { // pop and throw away
+ if (U.compareAndSwapObject(a, j, t, null))
+ top = s;
+ break;
+ }
+ if (--n == 0) {
+ if (!empty && base == b)
+ stat = false;
+ break;
+ }
+ }
+ if (removed)
+ task.doExec();
+ }
+ else
+ stat = false;
+ return stat;
+ }
+
+ /**
+ * Tries to poll for and execute the given task or any other
+ * task in its CountedCompleter computation.
+ */
+ final boolean pollAndExecCC(CountedCompleter<?> root) {
+ ForkJoinTask<?>[] a; int b; Object o; CountedCompleter<?> t, r;
+ if ((b = base) - top < 0 && (a = array) != null) {
+ long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ if ((o = U.getObjectVolatile(a, j)) == null)
+ return true; // retry
+ if (o instanceof CountedCompleter) {
+ for (t = (CountedCompleter<?>)o, r = t;;) {
+ if (r == root) {
+ if (base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ U.putOrderedInt(this, QBASE, b + 1);
+ t.doExec();
+ }
+ return true;
+ }
+ else if ((r = r.completer) == null)
+ break; // not part of root computation
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Tries to pop and execute the given task or any other task
+ * in its CountedCompleter computation.
+ */
+ final boolean externalPopAndExecCC(CountedCompleter<?> root) {
+ ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r;
+ if (base - (s = top) < 0 && (a = array) != null) {
+ long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
+ if ((o = U.getObject(a, j)) instanceof CountedCompleter) {
+ for (t = (CountedCompleter<?>)o, r = t;;) {
+ if (r == root) {
+ if (U.compareAndSwapInt(this, QLOCK, 0, 1)) {
+ if (top == s && array == a &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ top = s - 1;
+ qlock = 0;
+ t.doExec();
+ }
+ else
+ qlock = 0;
+ }
+ return true;
+ }
+ else if ((r = r.completer) == null)
+ break;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Internal version
+ */
+ final boolean internalPopAndExecCC(CountedCompleter<?> root) {
+ ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r;
+ if (base - (s = top) < 0 && (a = array) != null) {
+ long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
+ if ((o = U.getObject(a, j)) instanceof CountedCompleter) {
+ for (t = (CountedCompleter<?>)o, r = t;;) {
+ if (r == root) {
+ if (U.compareAndSwapObject(a, j, t, null)) {
+ top = s - 1;
+ t.doExec();
+ }
+ return true;
+ }
+ else if ((r = r.completer) == null)
+ break;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if owned and not known to be blocked.
+ */
+ final boolean isApparentlyUnblocked() {
+ Thread wt; Thread.State s;
+ return (eventCount >= 0 &&
+ (wt = owner) != null &&
+ (s = wt.getState()) != Thread.State.BLOCKED &&
+ s != Thread.State.WAITING &&
+ s != Thread.State.TIMED_WAITING);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long QBASE;
+ private static final long QLOCK;
+ private static final int ABASE;
+ private static final int ASHIFT;
+ static {
+ try {
+ U = getUnsafe();
+ Class<?> k = WorkQueue.class;
+ Class<?> ak = ForkJoinTask[].class;
+ QBASE = U.objectFieldOffset
+ (k.getDeclaredField("base"));
+ QLOCK = U.objectFieldOffset
+ (k.getDeclaredField("qlock"));
+ ABASE = U.arrayBaseOffset(ak);
+ int scale = U.arrayIndexScale(ak);
+ if ((scale & (scale - 1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+ }
+
+ // static fields (initialized in static initializer below)
+
+ /**
+ * Per-thread submission bookkeeping. Shared across all pools
+ * to reduce ThreadLocal pollution and because random motion
+ * to avoid contention in one pool is likely to hold for others.
+ * Lazily initialized on first submission (but null-checked
+ * in other contexts to avoid unnecessary initialization).
+ */
+ static final ThreadLocal<Submitter> submitters;
+
+ /**
+ * Creates a new ForkJoinWorkerThread. This factory is used unless
+ * overridden in ForkJoinPool constructors.
+ */
+ public static final ForkJoinWorkerThreadFactory
+ defaultForkJoinWorkerThreadFactory;
+
+ /**
+ * Permission required for callers of methods that may start or
+ * kill threads.
+ */
+ private static final RuntimePermission modifyThreadPermission;
+
+ /**
+ * Common (static) pool. Non-null for public use unless a static
+ * construction exception, but internal usages null-check on use
+ * to paranoically avoid potential initialization circularities
+ * as well as to simplify generated code.
+ */
+ static final ForkJoinPool common;
+
+ /**
+ * Common pool parallelism. To allow simpler use and management
+ * when common pool threads are disabled, we allow the underlying
+ * common.parallelism field to be zero, but in that case still report
+ * parallelism as 1 to reflect resulting caller-runs mechanics.
+ */
+ static final int commonParallelism;
+
+ /**
+ * Sequence number for creating workerNamePrefix.
+ */
+ private static int poolNumberSequence;
+
+ /**
+ * Returns the next sequence number. We don't expect this to
+ * ever contend, so use simple builtin sync.
+ */
+ private static final synchronized int nextPoolId() {
+ return ++poolNumberSequence;
+ }
+
+ // static constants
+
+ /**
+ * Initial timeout value (in nanoseconds) for the thread
+ * triggering quiescence to park waiting for new work. On timeout,
+ * the thread will instead try to shrink the number of
+ * workers. The value should be large enough to avoid overly
+ * aggressive shrinkage during most transient stalls (long GCs
+ * etc).
+ */
+ private static final long IDLE_TIMEOUT = 2000L * 1000L * 1000L; // 2sec
+
+ /**
+ * Timeout value when there are more threads than parallelism level
+ */
+ private static final long FAST_IDLE_TIMEOUT = 200L * 1000L * 1000L;
+
+ /**
+ * Tolerance for idle timeouts, to cope with timer undershoots
+ */
+ private static final long TIMEOUT_SLOP = 2000000L;
+
+ /**
+ * The maximum stolen->joining link depth allowed in method
+ * tryHelpStealer. Must be a power of two. Depths for legitimate
+ * chains are unbounded, but we use a fixed constant to avoid
+ * (otherwise unchecked) cycles and to bound staleness of
+ * traversal parameters at the expense of sometimes blocking when
+ * we could be helping.
+ */
+ private static final int MAX_HELP = 64;
+
+ /**
+ * Increment for seed generators. See class ThreadLocal for
+ * explanation.
+ */
+ private static final int SEED_INCREMENT = 0x61c88647;
+
+ /*
+ * Bits and masks for control variables
+ *
+ * Field ctl is a long packed with:
+ * AC: Number of active running workers minus target parallelism (16 bits)
+ * TC: Number of total workers minus target parallelism (16 bits)
+ * ST: true if pool is terminating (1 bit)
+ * EC: the wait count of top waiting thread (15 bits)
+ * ID: poolIndex of top of Treiber stack of waiters (16 bits)
+ *
+ * When convenient, we can extract the upper 32 bits of counts and
+ * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
+ * (int)ctl. The ec field is never accessed alone, but always
+ * together with id and st. The offsets of counts by the target
+ * parallelism and the positionings of fields makes it possible to
+ * perform the most common checks via sign tests of fields: When
+ * ac is negative, there are not enough active workers, when tc is
+ * negative, there are not enough total workers, and when e is
+ * negative, the pool is terminating. To deal with these possibly
+ * negative fields, we use casts in and out of "short" and/or
+ * signed shifts to maintain signedness.
+ *
+ * When a thread is queued (inactivated), its eventCount field is
+ * set negative, which is the only way to tell if a worker is
+ * prevented from executing tasks, even though it must continue to
+ * scan for them to avoid queuing races. Note however that
+ * eventCount updates lag releases so usage requires care.
+ *
+ * Field plock is an int packed with:
+ * SHUTDOWN: true if shutdown is enabled (1 bit)
+ * SEQ: a sequence lock, with PL_LOCK bit set if locked (30 bits)
+ * SIGNAL: set when threads may be waiting on the lock (1 bit)
+ *
+ * The sequence number enables simple consistency checks:
+ * Staleness of read-only operations on the workQueues array can
+ * be checked by comparing plock before vs after the reads.
+ */
+
+ // bit positions/shifts for fields
+ private static final int AC_SHIFT = 48;
+ private static final int TC_SHIFT = 32;
+ private static final int ST_SHIFT = 31;
+ private static final int EC_SHIFT = 16;
+
+ // bounds
+ private static final int SMASK = 0xffff; // short bits
+ private static final int MAX_CAP = 0x7fff; // max #workers - 1
+ private static final int EVENMASK = 0xfffe; // even short bits
+ private static final int SQMASK = 0x007e; // max 64 (even) slots
+ private static final int SHORT_SIGN = 1 << 15;
+ private static final int INT_SIGN = 1 << 31;
+
+ // masks
+ private static final long STOP_BIT = 0x0001L << ST_SHIFT;
+ private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
+ private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
+
+ // units for incrementing and decrementing
+ private static final long TC_UNIT = 1L << TC_SHIFT;
+ private static final long AC_UNIT = 1L << AC_SHIFT;
+
+ // masks and units for dealing with u = (int)(ctl >>> 32)
+ private static final int UAC_SHIFT = AC_SHIFT - 32;
+ private static final int UTC_SHIFT = TC_SHIFT - 32;
+ private static final int UAC_MASK = SMASK << UAC_SHIFT;
+ private static final int UTC_MASK = SMASK << UTC_SHIFT;
+ private static final int UAC_UNIT = 1 << UAC_SHIFT;
+ private static final int UTC_UNIT = 1 << UTC_SHIFT;
+
+ // masks and units for dealing with e = (int)ctl
+ private static final int E_MASK = 0x7fffffff; // no STOP_BIT
+ private static final int E_SEQ = 1 << EC_SHIFT;
+
+ // plock bits
+ private static final int SHUTDOWN = 1 << 31;
+ private static final int PL_LOCK = 2;
+ private static final int PL_SIGNAL = 1;
+ private static final int PL_SPINS = 1 << 8;
+
+ // access mode for WorkQueue
+ static final int LIFO_QUEUE = 0;
+ static final int FIFO_QUEUE = 1;
+ static final int SHARED_QUEUE = -1;
+
+ // Heuristic padding to ameliorate unfortunate memory placements
+ volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
+
+ // Instance fields
+ volatile long stealCount; // collects worker counts
+ volatile long ctl; // main pool control
+ volatile int plock; // shutdown status and seqLock
+ volatile int indexSeed; // worker/submitter index seed
+ final short parallelism; // parallelism level
+ final short mode; // LIFO/FIFO
+ WorkQueue[] workQueues; // main registry
+ final ForkJoinWorkerThreadFactory factory;
+ final UncaughtExceptionHandler ueh; // per-worker UEH
+ final String workerNamePrefix; // to create worker name string
+
+ volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
+ volatile Object pad18, pad19, pad1a, pad1b;
+
+ /**
+ * Acquires the plock lock to protect worker array and related
+ * updates. This method is called only if an initial CAS on plock
+ * fails. This acts as a spinlock for normal cases, but falls back
+ * to builtin monitor to block when (rarely) needed. This would be
+ * a terrible idea for a highly contended lock, but works fine as
+ * a more conservative alternative to a pure spinlock.
+ */
+ private int acquirePlock() {
+ int spins = PL_SPINS, ps, nps;
+ for (;;) {
+ if (((ps = plock) & PL_LOCK) == 0 &&
+ U.compareAndSwapInt(this, PLOCK, ps, nps = ps + PL_LOCK))
+ return nps;
+ else if (spins >= 0) {
+ if (ThreadLocalRandom.current().nextInt() >= 0)
+ --spins;
+ }
+ else if (U.compareAndSwapInt(this, PLOCK, ps, ps | PL_SIGNAL)) {
+ synchronized (this) {
+ if ((plock & PL_SIGNAL) != 0) {
+ try {
+ wait();
+ } catch (InterruptedException ie) {
+ try {
+ Thread.currentThread().interrupt();
+ } catch (SecurityException ignore) {
+ }
+ }
+ }
+ else
+ notifyAll();
+ }
+ }
+ }
+ }
+
+ /**
+ * Unlocks and signals any thread waiting for plock. Called only
+ * when CAS of seq value for unlock fails.
+ */
+ private void releasePlock(int ps) {
+ plock = ps;
+ synchronized (this) { notifyAll(); }
+ }
+
+ /**
+ * Tries to create and start one worker if fewer than target
+ * parallelism level exist. Adjusts counts etc on failure.
+ */
+ private void tryAddWorker() {
+ long c; int u, e;
+ while ((u = (int)((c = ctl) >>> 32)) < 0 &&
+ (u & SHORT_SIGN) != 0 && (e = (int)c) >= 0) {
+ long nc = ((long)(((u + UTC_UNIT) & UTC_MASK) |
+ ((u + UAC_UNIT) & UAC_MASK)) << 32) | (long)e;
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ ForkJoinWorkerThreadFactory fac;
+ Throwable ex = null;
+ ForkJoinWorkerThread wt = null;
+ try {
+ if ((fac = factory) != null &&
+ (wt = fac.newThread(this)) != null) {
+ wt.start();
+ break;
+ }
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ deregisterWorker(wt, ex);
+ break;
+ }
+ }
+ }
+
+ // Registering and deregistering workers
+
+ /**
+ * Callback from ForkJoinWorkerThread to establish and record its
+ * WorkQueue. To avoid scanning bias due to packing entries in
+ * front of the workQueues array, we treat the array as a simple
+ * power-of-two hash table using per-thread seed as hash,
+ * expanding as needed.
+ *
+ * @param wt the worker thread
+ * @return the worker's queue
+ */
+ final WorkQueue registerWorker(ForkJoinWorkerThread wt) {
+ UncaughtExceptionHandler handler; WorkQueue[] ws; int s, ps;
+ wt.setDaemon(true);
+ if ((handler = ueh) != null)
+ wt.setUncaughtExceptionHandler(handler);
+ do {} while (!U.compareAndSwapInt(this, INDEXSEED, s = indexSeed,
+ s += SEED_INCREMENT) ||
+ s == 0); // skip 0
+ WorkQueue w = new WorkQueue(this, wt, mode, s);
+ if (((ps = plock) & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
+ try {
+ if ((ws = workQueues) != null) { // skip if shutting down
+ int n = ws.length, m = n - 1;
+ int r = (s << 1) | 1; // use odd-numbered indices
+ if (ws[r &= m] != null) { // collision
+ int probes = 0; // step by approx half size
+ int step = (n <= 4) ? 2 : ((n >>> 1) & EVENMASK) + 2;
+ while (ws[r = (r + step) & m] != null) {
+ if (++probes >= n) {
+ workQueues = ws = Arrays.copyOf(ws, n <<= 1);
+ m = n - 1;
+ probes = 0;
+ }
+ }
+ }
+ w.poolIndex = (short)r;
+ w.eventCount = r; // volatile write orders
+ ws[r] = w;
+ }
+ } finally {
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ wt.setName(workerNamePrefix.concat(Integer.toString(w.poolIndex >>> 1)));
+ return w;
+ }
+
+ /**
+ * Final callback from terminating worker, as well as upon failure
+ * to construct or start a worker. Removes record of worker from
+ * array, and adjusts counts. If pool is shutting down, tries to
+ * complete termination.
+ *
+ * @param wt the worker thread, or null if construction failed
+ * @param ex the exception causing failure, or null if none
+ */
+ final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
+ WorkQueue w = null;
+ if (wt != null && (w = wt.workQueue) != null) {
+ int ps; long sc;
+ w.qlock = -1; // ensure set
+ do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
+ sc = stealCount,
+ sc + w.nsteals));
+ if (((ps = plock) & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
+ try {
+ int idx = w.poolIndex;
+ WorkQueue[] ws = workQueues;
+ if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
+ ws[idx] = null;
+ } finally {
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ }
+
+ long c; // adjust ctl counts
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
+ ((c - TC_UNIT) & TC_MASK) |
+ (c & ~(AC_MASK|TC_MASK)))));
+
+ if (!tryTerminate(false, false) && w != null && w.array != null) {
+ w.cancelAll(); // cancel remaining tasks
+ WorkQueue[] ws; WorkQueue v; Thread p; int u, i, e;
+ while ((u = (int)((c = ctl) >>> 32)) < 0 && (e = (int)c) >= 0) {
+ if (e > 0) { // activate or create replacement
+ if ((ws = workQueues) == null ||
+ (i = e & SMASK) >= ws.length ||
+ (v = ws[i]) == null)
+ break;
+ long nc = (((long)(v.nextWait & E_MASK)) |
+ ((long)(u + UAC_UNIT) << 32));
+ if (v.eventCount != (e | INT_SIGN))
+ break;
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ v.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = v.parker) != null)
+ U.unpark(p);
+ break;
+ }
+ }
+ else {
+ if ((short)u < 0)
+ tryAddWorker();
+ break;
+ }
+ }
+ }
+ if (ex == null) // help clean refs on way out
+ ForkJoinTask.helpExpungeStaleExceptions();
+ else // rethrow
+ ForkJoinTask.rethrow(ex);
+ }
+
+ // Submissions
+
+ /**
+ * Per-thread records for threads that submit to pools. Currently
+ * holds only pseudo-random seed / index that is used to choose
+ * submission queues in method externalPush. In the future, this may
+ * also incorporate a means to implement different task rejection
+ * and resubmission policies.
+ *
+ * Seeds for submitters and workers/workQueues work in basically
+ * the same way but are initialized and updated using slightly
+ * different mechanics. Both are initialized using the same
+ * approach as in class ThreadLocal, where successive values are
+ * unlikely to collide with previous values. Seeds are then
+ * randomly modified upon collisions using xorshifts, which
+ * requires a non-zero seed.
+ */
+ static final class Submitter {
+ int seed;
+ Submitter(int s) { seed = s; }
+ }
+
+ /**
+ * Unless shutting down, adds the given task to a submission queue
+ * at submitter's current queue index (modulo submission
+ * range). Only the most common path is directly handled in this
+ * method. All others are relayed to fullExternalPush.
+ *
+ * @param task the task. Caller must ensure non-null.
+ */
+ final void externalPush(ForkJoinTask<?> task) {
+ Submitter z = submitters.get();
+ WorkQueue q; int r, m, s, n, am; ForkJoinTask<?>[] a;
+ int ps = plock;
+ WorkQueue[] ws = workQueues;
+ if (z != null && ps > 0 && ws != null && (m = (ws.length - 1)) >= 0 &&
+ (q = ws[m & (r = z.seed) & SQMASK]) != null && r != 0 &&
+ U.compareAndSwapInt(q, QLOCK, 0, 1)) { // lock
+ if ((a = q.array) != null &&
+ (am = a.length - 1) > (n = (s = q.top) - q.base)) {
+ int j = ((am & s) << ASHIFT) + ABASE;
+ U.putOrderedObject(a, j, task);
+ q.top = s + 1; // push on to deque
+ q.qlock = 0;
+ if (n <= 1)
+ signalWork(ws, q);
+ return;
+ }
+ q.qlock = 0;
+ }
+ fullExternalPush(task);
+ }
+
+ /**
+ * Full version of externalPush. This method is called, among
+ * other times, upon the first submission of the first task to the
+ * pool, so must perform secondary initialization. It also
+ * detects first submission by an external thread by looking up
+ * its ThreadLocal, and creates a new shared queue if the one at
+ * index if empty or contended. The plock lock body must be
+ * exception-free (so no try/finally) so we optimistically
+ * allocate new queues outside the lock and throw them away if
+ * (very rarely) not needed.
+ *
+ * Secondary initialization occurs when plock is zero, to create
+ * workQueue array and set plock to a valid value. This lock body
+ * must also be exception-free. Because the plock seq value can
+ * eventually wrap around zero, this method harmlessly fails to
+ * reinitialize if workQueues exists, while still advancing plock.
+ */
+ private void fullExternalPush(ForkJoinTask<?> task) {
+ int r = 0; // random index seed
+ for (Submitter z = submitters.get();;) {
+ WorkQueue[] ws; WorkQueue q; int ps, m, k;
+ if (z == null) {
+ if (U.compareAndSwapInt(this, INDEXSEED, r = indexSeed,
+ r += SEED_INCREMENT) && r != 0)
+ submitters.set(z = new Submitter(r));
+ }
+ else if (r == 0) { // move to a different index
+ r = z.seed;
+ r ^= r << 13; // same xorshift as WorkQueues
+ r ^= r >>> 17;
+ z.seed = r ^= (r << 5);
+ }
+ if ((ps = plock) < 0)
+ throw new RejectedExecutionException();
+ else if (ps == 0 || (ws = workQueues) == null ||
+ (m = ws.length - 1) < 0) { // initialize workQueues
+ int p = parallelism; // find power of two table size
+ int n = (p > 1) ? p - 1 : 1; // ensure at least 2 slots
+ n |= n >>> 1; n |= n >>> 2; n |= n >>> 4;
+ n |= n >>> 8; n |= n >>> 16; n = (n + 1) << 1;
+ WorkQueue[] nws = ((ws = workQueues) == null || ws.length == 0 ?
+ new WorkQueue[n] : null);
+ if (((ps = plock) & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ if (((ws = workQueues) == null || ws.length == 0) && nws != null)
+ workQueues = nws;
+ int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ else if ((q = ws[k = r & m & SQMASK]) != null) {
+ if (q.qlock == 0 && U.compareAndSwapInt(q, QLOCK, 0, 1)) {
+ ForkJoinTask<?>[] a = q.array;
+ int s = q.top;
+ boolean submitted = false;
+ try { // locked version of push
+ if ((a != null && a.length > s + 1 - q.base) ||
+ (a = q.growArray()) != null) { // must presize
+ int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
+ U.putOrderedObject(a, j, task);
+ q.top = s + 1;
+ submitted = true;
+ }
+ } finally {
+ q.qlock = 0; // unlock
+ }
+ if (submitted) {
+ signalWork(ws, q);
+ return;
+ }
+ }
+ r = 0; // move on failure
+ }
+ else if (((ps = plock) & PL_LOCK) == 0) { // create new queue
+ q = new WorkQueue(this, null, SHARED_QUEUE, r);
+ q.poolIndex = (short)k;
+ if (((ps = plock) & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ if ((ws = workQueues) != null && k < ws.length && ws[k] == null)
+ ws[k] = q;
+ int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ else
+ r = 0;
+ }
+ }
+
+ // Maintaining ctl counts
+
+ /**
+ * Increments active count; mainly called upon return from blocking.
+ */
+ final void incrementActiveCount() {
+ long c;
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, ((c & ~AC_MASK) |
+ ((c & AC_MASK) + AC_UNIT))));
+ }
+
+ /**
+ * Tries to create or activate a worker if too few are active.
+ *
+ * @param ws the worker array to use to find signallees
+ * @param q if non-null, the queue holding tasks to be processed
+ */
+ final void signalWork(WorkQueue[] ws, WorkQueue q) {
+ for (;;) {
+ long c; int e, u, i; WorkQueue w; Thread p;
+ if ((u = (int)((c = ctl) >>> 32)) >= 0)
+ break;
+ if ((e = (int)c) <= 0) {
+ if ((short)u < 0)
+ tryAddWorker();
+ break;
+ }
+ if (ws == null || ws.length <= (i = e & SMASK) ||
+ (w = ws[i]) == null)
+ break;
+ long nc = (((long)(w.nextWait & E_MASK)) |
+ ((long)(u + UAC_UNIT)) << 32);
+ int ne = (e + E_SEQ) & E_MASK;
+ if (w.eventCount == (e | INT_SIGN) &&
+ U.compareAndSwapLong(this, CTL, c, nc)) {
+ w.eventCount = ne;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ break;
+ }
+ if (q != null && q.base >= q.top)
+ break;
+ }
+ }
+
+ // Scanning for tasks
+
+ /**
+ * Top-level runloop for workers, called by ForkJoinWorkerThread.run.
+ */
+ final void runWorker(WorkQueue w) {
+ w.growArray(); // allocate queue
+ for (int r = w.hint; scan(w, r) == 0; ) {
+ r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // xorshift
+ }
+ }
+
+ /**
+ * Scans for and, if found, runs one task, else possibly
+ * inactivates the worker. This method operates on single reads of
+ * volatile state and is designed to be re-invoked continuously,
+ * in part because it returns upon detecting inconsistencies,
+ * contention, or state changes that indicate possible success on
+ * re-invocation.
+ *
+ * The scan searches for tasks across queues starting at a random
+ * index, checking each at least twice. The scan terminates upon
+ * either finding a non-empty queue, or completing the sweep. If
+ * the worker is not inactivated, it takes and runs a task from
+ * this queue. Otherwise, if not activated, it tries to activate
+ * itself or some other worker by signalling. On failure to find a
+ * task, returns (for retry) if pool state may have changed during
+ * an empty scan, or tries to inactivate if active, else possibly
+ * blocks or terminates via method awaitWork.
+ *
+ * @param w the worker (via its WorkQueue)
+ * @param r a random seed
+ * @return worker qlock status if would have waited, else 0
+ */
+ private final int scan(WorkQueue w, int r) {
+ WorkQueue[] ws; int m;
+ long c = ctl; // for consistency check
+ if ((ws = workQueues) != null && (m = ws.length - 1) >= 0 && w != null) {
+ for (int j = m + m + 1, ec = w.eventCount;;) {
+ WorkQueue q; int b, e; ForkJoinTask<?>[] a; ForkJoinTask<?> t;
+ if ((q = ws[(r - j) & m]) != null &&
+ (b = q.base) - q.top < 0 && (a = q.array) != null) {
+ long i = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ if ((t = ((ForkJoinTask<?>)
+ U.getObjectVolatile(a, i))) != null) {
+ if (ec < 0)
+ helpRelease(c, ws, w, q, b);
+ else if (q.base == b &&
+ U.compareAndSwapObject(a, i, t, null)) {
+ U.putOrderedInt(q, QBASE, b + 1);
+ if ((b + 1) - q.top < 0)
+ signalWork(ws, q);
+ w.runTask(t);
+ }
+ }
+ break;
+ }
+ else if (--j < 0) {
+ if ((ec | (e = (int)c)) < 0) // inactive or terminating
+ return awaitWork(w, c, ec);
+ else if (ctl == c) { // try to inactivate and enqueue
+ long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK));
+ w.nextWait = e;
+ w.eventCount = ec | INT_SIGN;
+ if (!U.compareAndSwapLong(this, CTL, c, nc))
+ w.eventCount = ec; // back out
+ }
+ break;
+ }
+ }
+ }
+ return 0;
+ }
+
+ /**
+ * A continuation of scan(), possibly blocking or terminating
+ * worker w. Returns without blocking if pool state has apparently
+ * changed since last invocation. Also, if inactivating w has
+ * caused the pool to become quiescent, checks for pool
+ * termination, and, so long as this is not the only worker, waits
+ * for event for up to a given duration. On timeout, if ctl has
+ * not changed, terminates the worker, which will in turn wake up
+ * another worker to possibly repeat this process.
+ *
+ * @param w the calling worker
+ * @param c the ctl value on entry to scan
+ * @param ec the worker's eventCount on entry to scan
+ */
+ private final int awaitWork(WorkQueue w, long c, int ec) {
+ int stat, ns; long parkTime, deadline;
+ if ((stat = w.qlock) >= 0 && w.eventCount == ec && ctl == c &&
+ !Thread.interrupted()) {
+ int e = (int)c;
+ int u = (int)(c >>> 32);
+ int d = (u >> UAC_SHIFT) + parallelism; // active count
+
+ if (e < 0 || (d <= 0 && tryTerminate(false, false)))
+ stat = w.qlock = -1; // pool is terminating
+ else if ((ns = w.nsteals) != 0) { // collect steals and retry
+ long sc;
+ w.nsteals = 0;
+ do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
+ sc = stealCount, sc + ns));
+ }
+ else {
+ long pc = ((d > 0 || ec != (e | INT_SIGN)) ? 0L :
+ ((long)(w.nextWait & E_MASK)) | // ctl to restore
+ ((long)(u + UAC_UNIT)) << 32);
+ if (pc != 0L) { // timed wait if last waiter
+ int dc = -(short)(c >>> TC_SHIFT);
+ parkTime = (dc < 0 ? FAST_IDLE_TIMEOUT:
+ (dc + 1) * IDLE_TIMEOUT);
+ deadline = System.nanoTime() + parkTime - TIMEOUT_SLOP;
+ }
+ else
+ parkTime = deadline = 0L;
+ if (w.eventCount == ec && ctl == c) {
+ Thread wt = Thread.currentThread();
+ U.putObject(wt, PARKBLOCKER, this);
+ w.parker = wt; // emulate LockSupport.park
+ if (w.eventCount == ec && ctl == c)
+ U.park(false, parkTime); // must recheck before park
+ w.parker = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ if (parkTime != 0L && ctl == c &&
+ deadline - System.nanoTime() <= 0L &&
+ U.compareAndSwapLong(this, CTL, c, pc))
+ stat = w.qlock = -1; // shrink pool
+ }
+ }
+ }
+ return stat;
+ }
+
+ /**
+ * Possibly releases (signals) a worker. Called only from scan()
+ * when a worker with apparently inactive status finds a non-empty
+ * queue. This requires revalidating all of the associated state
+ * from caller.
+ */
+ private final void helpRelease(long c, WorkQueue[] ws, WorkQueue w,
+ WorkQueue q, int b) {
+ WorkQueue v; int e, i; Thread p;
+ if (w != null && w.eventCount < 0 && (e = (int)c) > 0 &&
+ ws != null && ws.length > (i = e & SMASK) &&
+ (v = ws[i]) != null && ctl == c) {
+ long nc = (((long)(v.nextWait & E_MASK)) |
+ ((long)((int)(c >>> 32) + UAC_UNIT)) << 32);
+ int ne = (e + E_SEQ) & E_MASK;
+ if (q != null && q.base == b && w.eventCount < 0 &&
+ v.eventCount == (e | INT_SIGN) &&
+ U.compareAndSwapLong(this, CTL, c, nc)) {
+ v.eventCount = ne;
+ if ((p = v.parker) != null)
+ U.unpark(p);
+ }
+ }
+ }
+
+ /**
+ * Tries to locate and execute tasks for a stealer of the given
+ * task, or in turn one of its stealers, Traces currentSteal ->
+ * currentJoin links looking for a thread working on a descendant
+ * of the given task and with a non-empty queue to steal back and
+ * execute tasks from. The first call to this method upon a
+ * waiting join will often entail scanning/search, (which is OK
+ * because the joiner has nothing better to do), but this method
+ * leaves hints in workers to speed up subsequent calls. The
+ * implementation is very branchy to cope with potential
+ * inconsistencies or loops encountering chains that are stale,
+ * unknown, or so long that they are likely cyclic.
+ *
+ * @param joiner the joining worker
+ * @param task the task to join
+ * @return 0 if no progress can be made, negative if task
+ * known complete, else positive
+ */
+ private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
+ int stat = 0, steps = 0; // bound to avoid cycles
+ if (task != null && joiner != null &&
+ joiner.base - joiner.top >= 0) { // hoist checks
+ restart: for (;;) {
+ ForkJoinTask<?> subtask = task; // current target
+ for (WorkQueue j = joiner, v;;) { // v is stealer of subtask
+ WorkQueue[] ws; int m, s, h;
+ if ((s = task.status) < 0) {
+ stat = s;
+ break restart;
+ }
+ if ((ws = workQueues) == null || (m = ws.length - 1) <= 0)
+ break restart; // shutting down
+ if ((v = ws[h = (j.hint | 1) & m]) == null ||
+ v.currentSteal != subtask) {
+ for (int origin = h;;) { // find stealer
+ if (((h = (h + 2) & m) & 15) == 1 &&
+ (subtask.status < 0 || j.currentJoin != subtask))
+ continue restart; // occasional staleness check
+ if ((v = ws[h]) != null &&
+ v.currentSteal == subtask) {
+ j.hint = h; // save hint
+ break;
+ }
+ if (h == origin)
+ break restart; // cannot find stealer
+ }
+ }
+ for (;;) { // help stealer or descend to its stealer
+ ForkJoinTask[] a; int b;
+ if (subtask.status < 0) // surround probes with
+ continue restart; // consistency checks
+ if ((b = v.base) - v.top < 0 && (a = v.array) != null) {
+ int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ ForkJoinTask<?> t =
+ (ForkJoinTask<?>)U.getObjectVolatile(a, i);
+ if (subtask.status < 0 || j.currentJoin != subtask ||
+ v.currentSteal != subtask)
+ continue restart; // stale
+ stat = 1; // apparent progress
+ if (v.base == b) {
+ if (t == null)
+ break restart;
+ if (U.compareAndSwapObject(a, i, t, null)) {
+ U.putOrderedInt(v, QBASE, b + 1);
+ ForkJoinTask<?> ps = joiner.currentSteal;
+ int jt = joiner.top;
+ do {
+ joiner.currentSteal = t;
+ t.doExec(); // clear local tasks too
+ } while (task.status >= 0 &&
+ joiner.top != jt &&
+ (t = joiner.pop()) != null);
+ joiner.currentSteal = ps;
+ break restart;
+ }
+ }
+ }
+ else { // empty -- try to descend
+ ForkJoinTask<?> next = v.currentJoin;
+ if (subtask.status < 0 || j.currentJoin != subtask ||
+ v.currentSteal != subtask)
+ continue restart; // stale
+ else if (next == null || ++steps == MAX_HELP)
+ break restart; // dead-end or maybe cyclic
+ else {
+ subtask = next;
+ j = v;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ return stat;
+ }
+
+ /**
+ * Analog of tryHelpStealer for CountedCompleters. Tries to steal
+ * and run tasks within the target's computation.
+ *
+ * @param task the task to join
+ */
+ private int helpComplete(WorkQueue joiner, CountedCompleter<?> task) {
+ WorkQueue[] ws; int m;
+ int s = 0;
+ if ((ws = workQueues) != null && (m = ws.length - 1) >= 0 &&
+ joiner != null && task != null) {
+ int j = joiner.poolIndex;
+ int scans = m + m + 1;
+ long c = 0L; // for stability check
+ for (int k = scans; ; j += 2) {
+ WorkQueue q;
+ if ((s = task.status) < 0)
+ break;
+ else if (joiner.internalPopAndExecCC(task))
+ k = scans;
+ else if ((s = task.status) < 0)
+ break;
+ else if ((q = ws[j & m]) != null && q.pollAndExecCC(task))
+ k = scans;
+ else if (--k < 0) {
+ if (c == (c = ctl))
+ break;
+ k = scans;
+ }
+ }
+ }
+ return s;
+ }
+
+ /**
+ * Tries to decrement active count (sometimes implicitly) and
+ * possibly release or create a compensating worker in preparation
+ * for blocking. Fails on contention or termination. Otherwise,
+ * adds a new thread if no idle workers are available and pool
+ * may become starved.
+ *
+ * @param c the assumed ctl value
+ */
+ final boolean tryCompensate(long c) {
+ WorkQueue[] ws = workQueues;
+ int pc = parallelism, e = (int)c, m, tc;
+ if (ws != null && (m = ws.length - 1) >= 0 && e >= 0 && ctl == c) {
+ WorkQueue w = ws[e & m];
+ if (e != 0 && w != null) {
+ Thread p;
+ long nc = ((long)(w.nextWait & E_MASK) |
+ (c & (AC_MASK|TC_MASK)));
+ int ne = (e + E_SEQ) & E_MASK;
+ if (w.eventCount == (e | INT_SIGN) &&
+ U.compareAndSwapLong(this, CTL, c, nc)) {
+ w.eventCount = ne;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ return true; // replace with idle worker
+ }
+ }
+ else if ((tc = (short)(c >>> TC_SHIFT)) >= 0 &&
+ (int)(c >> AC_SHIFT) + pc > 1) {
+ long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
+ if (U.compareAndSwapLong(this, CTL, c, nc))
+ return true; // no compensation
+ }
+ else if (tc + pc < MAX_CAP) {
+ long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ ForkJoinWorkerThreadFactory fac;
+ Throwable ex = null;
+ ForkJoinWorkerThread wt = null;
+ try {
+ if ((fac = factory) != null &&
+ (wt = fac.newThread(this)) != null) {
+ wt.start();
+ return true;
+ }
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ deregisterWorker(wt, ex); // clean up and return false
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Helps and/or blocks until the given task is done.
+ *
+ * @param joiner the joining worker
+ * @param task the task
+ * @return task status on exit
+ */
+ final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
+ int s = 0;
+ if (task != null && (s = task.status) >= 0 && joiner != null) {
+ ForkJoinTask<?> prevJoin = joiner.currentJoin;
+ joiner.currentJoin = task;
+ do {} while (joiner.tryRemoveAndExec(task) && // process local tasks
+ (s = task.status) >= 0);
+ if (s >= 0 && (task instanceof CountedCompleter))
+ s = helpComplete(joiner, (CountedCompleter<?>)task);
+ long cc = 0; // for stability checks
+ while (s >= 0 && (s = task.status) >= 0) {
+ if ((s = tryHelpStealer(joiner, task)) == 0 &&
+ (s = task.status) >= 0) {
+ if (!tryCompensate(cc))
+ cc = ctl;
+ else {
+ if (task.trySetSignal() && (s = task.status) >= 0) {
+ synchronized (task) {
+ if (task.status >= 0) {
+ try { // see ForkJoinTask
+ task.wait(); // for explanation
+ } catch (InterruptedException ie) {
+ }
+ }
+ else
+ task.notifyAll();
+ }
+ }
+ long c; // reactivate
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl,
+ ((c & ~AC_MASK) |
+ ((c & AC_MASK) + AC_UNIT))));
+ }
+ }
+ }
+ joiner.currentJoin = prevJoin;
+ }
+ return s;
+ }
+
+ /**
+ * Stripped-down variant of awaitJoin used by timed joins. Tries
+ * to help join only while there is continuous progress. (Caller
+ * will then enter a timed wait.)
+ *
+ * @param joiner the joining worker
+ * @param task the task
+ */
+ final void helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
+ int s;
+ if (joiner != null && task != null && (s = task.status) >= 0) {
+ ForkJoinTask<?> prevJoin = joiner.currentJoin;
+ joiner.currentJoin = task;
+ do {} while (joiner.tryRemoveAndExec(task) && // process local tasks
+ (s = task.status) >= 0);
+ if (s >= 0) {
+ if (task instanceof CountedCompleter)
+ helpComplete(joiner, (CountedCompleter<?>)task);
+ do {} while (task.status >= 0 &&
+ tryHelpStealer(joiner, task) > 0);
+ }
+ joiner.currentJoin = prevJoin;
+ }
+ }
+
+ /**
+ * Returns a (probably) non-empty steal queue, if one is found
+ * during a scan, else null. This method must be retried by
+ * caller if, by the time it tries to use the queue, it is empty.
+ */
+ private WorkQueue findNonEmptyStealQueue() {
+ int r = ThreadLocalRandom.current().nextInt();
+ for (;;) {
+ int ps = plock, m; WorkQueue[] ws; WorkQueue q;
+ if ((ws = workQueues) != null && (m = ws.length - 1) >= 0) {
+ for (int j = (m + 1) << 2; j >= 0; --j) {
+ if ((q = ws[(((r - j) << 1) | 1) & m]) != null &&
+ q.base - q.top < 0)
+ return q;
+ }
+ }
+ if (plock == ps)
+ return null;
+ }
+ }
+
+ /**
+ * Runs tasks until {@code isQuiescent()}. We piggyback on
+ * active count ctl maintenance, but rather than blocking
+ * when tasks cannot be found, we rescan until all others cannot
+ * find tasks either.
+ */
+ final void helpQuiescePool(WorkQueue w) {
+ ForkJoinTask<?> ps = w.currentSteal;
+ for (boolean active = true;;) {
+ long c; WorkQueue q; ForkJoinTask<?> t; int b;
+ while ((t = w.nextLocalTask()) != null)
+ t.doExec();
+ if ((q = findNonEmptyStealQueue()) != null) {
+ if (!active) { // re-establish active count
+ active = true;
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl,
+ ((c & ~AC_MASK) |
+ ((c & AC_MASK) + AC_UNIT))));
+ }
+ if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
+ (w.currentSteal = t).doExec();
+ w.currentSteal = ps;
+ }
+ }
+ else if (active) { // decrement active count without queuing
+ long nc = ((c = ctl) & ~AC_MASK) | ((c & AC_MASK) - AC_UNIT);
+ if ((int)(nc >> AC_SHIFT) + parallelism == 0)
+ break; // bypass decrement-then-increment
+ if (U.compareAndSwapLong(this, CTL, c, nc))
+ active = false;
+ }
+ else if ((int)((c = ctl) >> AC_SHIFT) + parallelism <= 0 &&
+ U.compareAndSwapLong
+ (this, CTL, c, ((c & ~AC_MASK) |
+ ((c & AC_MASK) + AC_UNIT))))
+ break;
+ }
+ }
+
+ /**
+ * Gets and removes a local or stolen task for the given worker.
+ *
+ * @return a task, if available
+ */
+ final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
+ for (ForkJoinTask<?> t;;) {
+ WorkQueue q; int b;
+ if ((t = w.nextLocalTask()) != null)
+ return t;
+ if ((q = findNonEmptyStealQueue()) == null)
+ return null;
+ if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
+ return t;
+ }
+ }
+
+ /**
+ * Returns a cheap heuristic guide for task partitioning when
+ * programmers, frameworks, tools, or languages have little or no
+ * idea about task granularity. In essence by offering this
+ * method, we ask users only about tradeoffs in overhead vs
+ * expected throughput and its variance, rather than how finely to
+ * partition tasks.
+ *
+ * In a steady state strict (tree-structured) computation, each
+ * thread makes available for stealing enough tasks for other
+ * threads to remain active. Inductively, if all threads play by
+ * the same rules, each thread should make available only a
+ * constant number of tasks.
+ *
+ * The minimum useful constant is just 1. But using a value of 1
+ * would require immediate replenishment upon each steal to
+ * maintain enough tasks, which is infeasible. Further,
+ * partitionings/granularities of offered tasks should minimize
+ * steal rates, which in general means that threads nearer the top
+ * of computation tree should generate more than those nearer the
+ * bottom. In perfect steady state, each thread is at
+ * approximately the same level of computation tree. However,
+ * producing extra tasks amortizes the uncertainty of progress and
+ * diffusion assumptions.
+ *
+ * So, users will want to use values larger (but not much larger)
+ * than 1 to both smooth over transient shortages and hedge
+ * against uneven progress; as traded off against the cost of
+ * extra task overhead. We leave the user to pick a threshold
+ * value to compare with the results of this call to guide
+ * decisions, but recommend values such as 3.
+ *
+ * When all threads are active, it is on average OK to estimate
+ * surplus strictly locally. In steady-state, if one thread is
+ * maintaining say 2 surplus tasks, then so are others. So we can
+ * just use estimated queue length. However, this strategy alone
+ * leads to serious mis-estimates in some non-steady-state
+ * conditions (ramp-up, ramp-down, other stalls). We can detect
+ * many of these by further considering the number of "idle"
+ * threads, that are known to have zero queued tasks, so
+ * compensate by a factor of (#idle/#active) threads.
+ *
+ * Note: The approximation of #busy workers as #active workers is
+ * not very good under current signalling scheme, and should be
+ * improved.
+ */
+ static int getSurplusQueuedTaskCount() {
+ Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
+ if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
+ int p = (pool = (wt = (ForkJoinWorkerThread)t).pool).parallelism;
+ int n = (q = wt.workQueue).top - q.base;
+ int a = (int)(pool.ctl >> AC_SHIFT) + p;
+ return n - (a > (p >>>= 1) ? 0 :
+ a > (p >>>= 1) ? 1 :
+ a > (p >>>= 1) ? 2 :
+ a > (p >>>= 1) ? 4 :
+ 8);
+ }
+ return 0;
+ }
+
+ // Termination
+
+ /**
+ * Possibly initiates and/or completes termination. The caller
+ * triggering termination runs three passes through workQueues:
+ * (0) Setting termination status, followed by wakeups of queued
+ * workers; (1) cancelling all tasks; (2) interrupting lagging
+ * threads (likely in external tasks, but possibly also blocked in
+ * joins). Each pass repeats previous steps because of potential
+ * lagging thread creation.
+ *
+ * @param now if true, unconditionally terminate, else only
+ * if no work and no active workers
+ * @param enable if true, enable shutdown when next possible
+ * @return true if now terminating or terminated
+ */
+ private boolean tryTerminate(boolean now, boolean enable) {
+ int ps;
+ if (this == common) // cannot shut down
+ return false;
+ if ((ps = plock) >= 0) { // enable by setting plock
+ if (!enable)
+ return false;
+ if ((ps & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ int nps = ((ps + PL_LOCK) & ~SHUTDOWN) | SHUTDOWN;
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ for (long c;;) {
+ if (((c = ctl) & STOP_BIT) != 0) { // already terminating
+ if ((short)(c >>> TC_SHIFT) + parallelism <= 0) {
+ synchronized (this) {
+ notifyAll(); // signal when 0 workers
+ }
+ }
+ return true;
+ }
+ if (!now) { // check if idle & no tasks
+ WorkQueue[] ws; WorkQueue w;
+ if ((int)(c >> AC_SHIFT) + parallelism > 0)
+ return false;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; ++i) {
+ if ((w = ws[i]) != null &&
+ (!w.isEmpty() ||
+ ((i & 1) != 0 && w.eventCount >= 0))) {
+ signalWork(ws, w);
+ return false;
+ }
+ }
+ }
+ }
+ if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
+ for (int pass = 0; pass < 3; ++pass) {
+ WorkQueue[] ws; WorkQueue w; Thread wt;
+ if ((ws = workQueues) != null) {
+ int n = ws.length;
+ for (int i = 0; i < n; ++i) {
+ if ((w = ws[i]) != null) {
+ w.qlock = -1;
+ if (pass > 0) {
+ w.cancelAll();
+ if (pass > 1 && (wt = w.owner) != null) {
+ if (!wt.isInterrupted()) {
+ try {
+ wt.interrupt();
+ } catch (Throwable ignore) {
+ }
+ }
+ U.unpark(wt);
+ }
+ }
+ }
+ }
+ // Wake up workers parked on event queue
+ int i, e; long cc; Thread p;
+ while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
+ (i = e & SMASK) < n && i >= 0 &&
+ (w = ws[i]) != null) {
+ long nc = ((long)(w.nextWait & E_MASK) |
+ ((cc + AC_UNIT) & AC_MASK) |
+ (cc & (TC_MASK|STOP_BIT)));
+ if (w.eventCount == (e | INT_SIGN) &&
+ U.compareAndSwapLong(this, CTL, cc, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ w.qlock = -1;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // external operations on common pool
+
+ /**
+ * Returns common pool queue for a thread that has submitted at
+ * least one task.
+ */
+ static WorkQueue commonSubmitterQueue() {
+ Submitter z; ForkJoinPool p; WorkQueue[] ws; int m, r;
+ return ((z = submitters.get()) != null &&
+ (p = common) != null &&
+ (ws = p.workQueues) != null &&
+ (m = ws.length - 1) >= 0) ?
+ ws[m & z.seed & SQMASK] : null;
+ }
+
+ /**
+ * Tries to pop the given task from submitter's queue in common pool.
+ */
+ final boolean tryExternalUnpush(ForkJoinTask<?> task) {
+ WorkQueue joiner; ForkJoinTask<?>[] a; int m, s;
+ Submitter z = submitters.get();
+ WorkQueue[] ws = workQueues;
+ boolean popped = false;
+ if (z != null && ws != null && (m = ws.length - 1) >= 0 &&
+ (joiner = ws[z.seed & m & SQMASK]) != null &&
+ joiner.base != (s = joiner.top) &&
+ (a = joiner.array) != null) {
+ long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
+ if (U.getObject(a, j) == task &&
+ U.compareAndSwapInt(joiner, QLOCK, 0, 1)) {
+ if (joiner.top == s && joiner.array == a &&
+ U.compareAndSwapObject(a, j, task, null)) {
+ joiner.top = s - 1;
+ popped = true;
+ }
+ joiner.qlock = 0;
+ }
+ }
+ return popped;
+ }
+
+ final int externalHelpComplete(CountedCompleter<?> task) {
+ WorkQueue joiner; int m, j;
+ Submitter z = submitters.get();
+ WorkQueue[] ws = workQueues;
+ int s = 0;
+ if (z != null && ws != null && (m = ws.length - 1) >= 0 &&
+ (joiner = ws[(j = z.seed) & m & SQMASK]) != null && task != null) {
+ int scans = m + m + 1;
+ long c = 0L; // for stability check
+ j |= 1; // poll odd queues
+ for (int k = scans; ; j += 2) {
+ WorkQueue q;
+ if ((s = task.status) < 0)
+ break;
+ else if (joiner.externalPopAndExecCC(task))
+ k = scans;
+ else if ((s = task.status) < 0)
+ break;
+ else if ((q = ws[j & m]) != null && q.pollAndExecCC(task))
+ k = scans;
+ else if (--k < 0) {
+ if (c == (c = ctl))
+ break;
+ k = scans;
+ }
+ }
+ }
+ return s;
+ }
+
+ // Exported methods
+
+ // Constructors
+
+ /**
+ * Creates a {@code ForkJoinPool} with parallelism equal to {@link
+ * java.lang.Runtime#availableProcessors}, using the {@linkplain
+ * #defaultForkJoinWorkerThreadFactory default thread factory},
+ * no UncaughtExceptionHandler, and non-async LIFO processing mode.
+ *
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool() {
+ this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()),
+ defaultForkJoinWorkerThreadFactory, null, false);
+ }
+
+ /**
+ * Creates a {@code ForkJoinPool} with the indicated parallelism
+ * level, the {@linkplain
+ * #defaultForkJoinWorkerThreadFactory default thread factory},
+ * no UncaughtExceptionHandler, and non-async LIFO processing mode.
+ *
+ * @param parallelism the parallelism level
+ * @throws IllegalArgumentException if parallelism less than or
+ * equal to zero, or greater than implementation limit
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool(int parallelism) {
+ this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
+ }
+
+ /**
+ * Creates a {@code ForkJoinPool} with the given parameters.
+ *
+ * @param parallelism the parallelism level. For default value,
+ * use {@link java.lang.Runtime#availableProcessors}.
+ * @param factory the factory for creating new threads. For default value,
+ * use {@link #defaultForkJoinWorkerThreadFactory}.
+ * @param handler the handler for internal worker threads that
+ * terminate due to unrecoverable errors encountered while executing
+ * tasks. For default value, use {@code null}.
+ * @param asyncMode if true,
+ * establishes local first-in-first-out scheduling mode for forked
+ * tasks that are never joined. This mode may be more appropriate
+ * than default locally stack-based mode in applications in which
+ * worker threads only process event-style asynchronous tasks.
+ * For default value, use {@code false}.
+ * @throws IllegalArgumentException if parallelism less than or
+ * equal to zero, or greater than implementation limit
+ * @throws NullPointerException if the factory is null
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool(int parallelism,
+ ForkJoinWorkerThreadFactory factory,
+ UncaughtExceptionHandler handler,
+ boolean asyncMode) {
+ this(checkParallelism(parallelism),
+ checkFactory(factory),
+ handler,
+ (asyncMode ? FIFO_QUEUE : LIFO_QUEUE),
+ "ForkJoinPool-" + nextPoolId() + "-worker-");
+ checkPermission();
+ }
+
+ private static int checkParallelism(int parallelism) {
+ if (parallelism <= 0 || parallelism > MAX_CAP)
+ throw new IllegalArgumentException();
+ return parallelism;
+ }
+
+ private static ForkJoinWorkerThreadFactory checkFactory
+ (ForkJoinWorkerThreadFactory factory) {
+ if (factory == null)
+ throw new NullPointerException();
+ return factory;
+ }
+
+ /**
+ * Creates a {@code ForkJoinPool} with the given parameters, without
+ * any security checks or parameter validation. Invoked directly by
+ * makeCommonPool.
+ */
+ private ForkJoinPool(int parallelism,
+ ForkJoinWorkerThreadFactory factory,
+ UncaughtExceptionHandler handler,
+ int mode,
+ String workerNamePrefix) {
+ this.workerNamePrefix = workerNamePrefix;
+ this.factory = factory;
+ this.ueh = handler;
+ this.mode = (short)mode;
+ this.parallelism = (short)parallelism;
+ long np = (long)(-parallelism); // offset ctl counts
+ this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
+ }
+
+ /**
+ * Returns the common pool instance. This pool is statically
+ * constructed; its run state is unaffected by attempts to {@link
+ * #shutdown} or {@link #shutdownNow}. However this pool and any
+ * ongoing processing are automatically terminated upon program
+ * {@link System#exit}. Any program that relies on asynchronous
+ * task processing to complete before program termination should
+ * invoke {@code commonPool().}{@link #awaitQuiescence awaitQuiescence},
+ * before exit.
+ *
+ * @return the common pool instance
+ * @since 1.8
+ */
+ public static ForkJoinPool commonPool() {
+ // assert common != null : "static init error";
+ return common;
+ }
+
+ // Execution methods
+
+ /**
+ * Performs the given task, returning its result upon completion.
+ * If the computation encounters an unchecked Exception or Error,
+ * it is rethrown as the outcome of this invocation. Rethrown
+ * exceptions behave in the same way as regular exceptions, but,
+ * when possible, contain stack traces (as displayed for example
+ * using {@code ex.printStackTrace()}) of both the current thread
+ * as well as the thread actually encountering the exception;
+ * minimally only the latter.
+ *
+ * @param task the task
+ * @param <T> the type of the task's result
+ * @return the task's result
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> T invoke(ForkJoinTask<T> task) {
+ if (task == null)
+ throw new NullPointerException();
+ externalPush(task);
+ return task.join();
+ }
+
+ /**
+ * Arranges for (asynchronous) execution of the given task.
+ *
+ * @param task the task
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public void execute(ForkJoinTask<?> task) {
+ if (task == null)
+ throw new NullPointerException();
+ externalPush(task);
+ }
+
+ // AbstractExecutorService methods
+
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public void execute(Runnable task) {
+ if (task == null)
+ throw new NullPointerException();
+ ForkJoinTask<?> job;
+ if (task instanceof ForkJoinTask<?>) // avoid re-wrap
+ job = (ForkJoinTask<?>) task;
+ else
+ job = new ForkJoinTask.RunnableExecuteAction(task);
+ externalPush(job);
+ }
+
+ /**
+ * Submits a ForkJoinTask for execution.
+ *
+ * @param task the task to submit
+ * @param <T> the type of the task's result
+ * @return the task
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
+ if (task == null)
+ throw new NullPointerException();
+ externalPush(task);
+ return task;
+ }
+
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> ForkJoinTask<T> submit(Callable<T> task) {
+ ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
+ externalPush(job);
+ return job;
+ }
+
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> ForkJoinTask<T> submit(Runnable task, T result) {
+ ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
+ externalPush(job);
+ return job;
+ }
+
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public ForkJoinTask<?> submit(Runnable task) {
+ if (task == null)
+ throw new NullPointerException();
+ ForkJoinTask<?> job;
+ if (task instanceof ForkJoinTask<?>) // avoid re-wrap
+ job = (ForkJoinTask<?>) task;
+ else
+ job = new ForkJoinTask.AdaptedRunnableAction(task);
+ externalPush(job);
+ return job;
+ }
+
+ /**
+ * @throws NullPointerException {@inheritDoc}
+ * @throws RejectedExecutionException {@inheritDoc}
+ */
+ public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
+ // In previous versions of this class, this method constructed
+ // a task to run ForkJoinTask.invokeAll, but now external
+ // invocation of multiple tasks is at least as efficient.
+ ArrayList<Future<T>> futures = new ArrayList<Future<T>>(tasks.size());
+
+ boolean done = false;
+ try {
+ for (Callable<T> t : tasks) {
+ ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
+ futures.add(f);
+ externalPush(f);
+ }
+ for (int i = 0, size = futures.size(); i < size; i++)
+ ((ForkJoinTask<?>)futures.get(i)).quietlyJoin();
+ done = true;
+ return futures;
+ } finally {
+ if (!done)
+ for (int i = 0, size = futures.size(); i < size; i++)
+ futures.get(i).cancel(false);
+ }
+ }
+
+ /**
+ * Returns the factory used for constructing new workers.
+ *
+ * @return the factory used for constructing new workers
+ */
+ public ForkJoinWorkerThreadFactory getFactory() {
+ return factory;
+ }
+
+ /**
+ * Returns the handler for internal worker threads that terminate
+ * due to unrecoverable errors encountered while executing tasks.
+ *
+ * @return the handler, or {@code null} if none
+ */
+ public UncaughtExceptionHandler getUncaughtExceptionHandler() {
+ return ueh;
+ }
+
+ /**
+ * Returns the targeted parallelism level of this pool.
+ *
+ * @return the targeted parallelism level of this pool
+ */
+ public int getParallelism() {
+ int par;
+ return ((par = parallelism) > 0) ? par : 1;
+ }
+
+ /**
+ * Returns the targeted parallelism level of the common pool.
+ *
+ * @return the targeted parallelism level of the common pool
+ * @since 1.8
+ */
+ public static int getCommonPoolParallelism() {
+ return commonParallelism;
+ }
+
+ /**
+ * Returns the number of worker threads that have started but not
+ * yet terminated. The result returned by this method may differ
+ * from {@link #getParallelism} when threads are created to
+ * maintain parallelism when others are cooperatively blocked.
+ *
+ * @return the number of worker threads
+ */
+ public int getPoolSize() {
+ return parallelism + (short)(ctl >>> TC_SHIFT);
+ }
+
+ /**
+ * Returns {@code true} if this pool uses local first-in-first-out
+ * scheduling mode for forked tasks that are never joined.
+ *
+ * @return {@code true} if this pool uses async mode
+ */
+ public boolean getAsyncMode() {
+ return mode == FIFO_QUEUE;
+ }
+
+ /**
+ * Returns an estimate of the number of worker threads that are
+ * not blocked waiting to join tasks or for other managed
+ * synchronization. This method may overestimate the
+ * number of running threads.
+ *
+ * @return the number of worker threads
+ */
+ public int getRunningThreadCount() {
+ int rc = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && w.isApparentlyUnblocked())
+ ++rc;
+ }
+ }
+ return rc;
+ }
+
+ /**
+ * Returns an estimate of the number of threads that are currently
+ * stealing or executing tasks. This method may overestimate the
+ * number of active threads.
+ *
+ * @return the number of active threads
+ */
+ public int getActiveThreadCount() {
+ int r = parallelism + (int)(ctl >> AC_SHIFT);
+ return (r <= 0) ? 0 : r; // suppress momentarily negative values
+ }
+
+ /**
+ * Returns {@code true} if all worker threads are currently idle.
+ * An idle worker is one that cannot obtain a task to execute
+ * because none are available to steal from other threads, and
+ * there are no pending submissions to the pool. This method is
+ * conservative; it might not return {@code true} immediately upon
+ * idleness of all threads, but will eventually become true if
+ * threads remain inactive.
+ *
+ * @return {@code true} if all threads are currently idle
+ */
+ public boolean isQuiescent() {
+ return parallelism + (int)(ctl >> AC_SHIFT) <= 0;
+ }
+
+ /**
+ * Returns an estimate of the total number of tasks stolen from
+ * one thread's work queue by another. The reported value
+ * underestimates the actual total number of steals when the pool
+ * is not quiescent. This value may be useful for monitoring and
+ * tuning fork/join programs: in general, steal counts should be
+ * high enough to keep threads busy, but low enough to avoid
+ * overhead and contention across threads.
+ *
+ * @return the number of steals
+ */
+ public long getStealCount() {
+ long count = stealCount;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.nsteals;
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Returns an estimate of the total number of tasks currently held
+ * in queues by worker threads (but not including tasks submitted
+ * to the pool that have not begun executing). This value is only
+ * an approximation, obtained by iterating across all threads in
+ * the pool. This method may be useful for tuning task
+ * granularities.
+ *
+ * @return the number of queued tasks
+ */
+ public long getQueuedTaskCount() {
+ long count = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.queueSize();
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Returns an estimate of the number of tasks submitted to this
+ * pool that have not yet begun executing. This method may take
+ * time proportional to the number of submissions.
+ *
+ * @return the number of queued submissions
+ */
+ public int getQueuedSubmissionCount() {
+ int count = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.queueSize();
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Returns {@code true} if there are any tasks submitted to this
+ * pool that have not yet begun executing.
+ *
+ * @return {@code true} if there are any queued submissions
+ */
+ public boolean hasQueuedSubmissions() {
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && !w.isEmpty())
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Removes and returns the next unexecuted submission if one is
+ * available. This method may be useful in extensions to this
+ * class that re-assign work in systems with multiple pools.
+ *
+ * @return the next submission, or {@code null} if none
+ */
+ protected ForkJoinTask<?> pollSubmission() {
+ WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && (t = w.poll()) != null)
+ return t;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Removes all available unexecuted submitted and forked tasks
+ * from scheduling queues and adds them to the given collection,
+ * without altering their execution status. These may include
+ * artificially generated or wrapped tasks. This method is
+ * designed to be invoked only when the pool is known to be
+ * quiescent. Invocations at other times may not remove all
+ * tasks. A failure encountered while attempting to add elements
+ * to collection {@code c} may result in elements being in
+ * neither, either or both collections when the associated
+ * exception is thrown. The behavior of this operation is
+ * undefined if the specified collection is modified while the
+ * operation is in progress.
+ *
+ * @param c the collection to transfer elements into
+ * @return the number of elements transferred
+ */
+ protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
+ int count = 0;
+ WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; ++i) {
+ if ((w = ws[i]) != null) {
+ while ((t = w.poll()) != null) {
+ c.add(t);
+ ++count;
+ }
+ }
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Returns a string identifying this pool, as well as its state,
+ * including indications of run state, parallelism level, and
+ * worker and task counts.
+ *
+ * @return a string identifying this pool, as well as its state
+ */
+ public String toString() {
+ // Use a single pass through workQueues to collect counts
+ long qt = 0L, qs = 0L; int rc = 0;
+ long st = stealCount;
+ long c = ctl;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; ++i) {
+ if ((w = ws[i]) != null) {
+ int size = w.queueSize();
+ if ((i & 1) == 0)
+ qs += size;
+ else {
+ qt += size;
+ st += w.nsteals;
+ if (w.isApparentlyUnblocked())
+ ++rc;
+ }
+ }
+ }
+ }
+ int pc = parallelism;
+ int tc = pc + (short)(c >>> TC_SHIFT);
+ int ac = pc + (int)(c >> AC_SHIFT);
+ if (ac < 0) // ignore transient negative
+ ac = 0;
+ String level;
+ if ((c & STOP_BIT) != 0)
+ level = (tc == 0) ? "Terminated" : "Terminating";
+ else
+ level = plock < 0 ? "Shutting down" : "Running";
+ return super.toString() +
+ "[" + level +
+ ", parallelism = " + pc +
+ ", size = " + tc +
+ ", active = " + ac +
+ ", running = " + rc +
+ ", steals = " + st +
+ ", tasks = " + qt +
+ ", submissions = " + qs +
+ "]";
+ }
+
+ /**
+ * Possibly initiates an orderly shutdown in which previously
+ * submitted tasks are executed, but no new tasks will be
+ * accepted. Invocation has no effect on execution state if this
+ * is the {@link #commonPool()}, and no additional effect if
+ * already shut down. Tasks that are in the process of being
+ * submitted concurrently during the course of this method may or
+ * may not be rejected.
+ *
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public void shutdown() {
+ checkPermission();
+ tryTerminate(false, true);
+ }
+
+ /**
+ * Possibly attempts to cancel and/or stop all tasks, and reject
+ * all subsequently submitted tasks. Invocation has no effect on
+ * execution state if this is the {@link #commonPool()}, and no
+ * additional effect if already shut down. Otherwise, tasks that
+ * are in the process of being submitted or executed concurrently
+ * during the course of this method may or may not be
+ * rejected. This method cancels both existing and unexecuted
+ * tasks, in order to permit termination in the presence of task
+ * dependencies. So the method always returns an empty list
+ * (unlike the case for some other Executors).
+ *
+ * @return an empty list
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public List<Runnable> shutdownNow() {
+ checkPermission();
+ tryTerminate(true, true);
+ return Collections.emptyList();
+ }
+
+ /**
+ * Returns {@code true} if all tasks have completed following shut down.
+ *
+ * @return {@code true} if all tasks have completed following shut down
+ */
+ public boolean isTerminated() {
+ long c = ctl;
+ return ((c & STOP_BIT) != 0L &&
+ (short)(c >>> TC_SHIFT) + parallelism <= 0);
+ }
+
+ /**
+ * Returns {@code true} if the process of termination has
+ * commenced but not yet completed. This method may be useful for
+ * debugging. A return of {@code true} reported a sufficient
+ * period after shutdown may indicate that submitted tasks have
+ * ignored or suppressed interruption, or are waiting for I/O,
+ * causing this executor not to properly terminate. (See the
+ * advisory notes for class {@link ForkJoinTask} stating that
+ * tasks should not normally entail blocking operations. But if
+ * they do, they must abort them on interrupt.)
+ *
+ * @return {@code true} if terminating but not yet terminated
+ */
+ public boolean isTerminating() {
+ long c = ctl;
+ return ((c & STOP_BIT) != 0L &&
+ (short)(c >>> TC_SHIFT) + parallelism > 0);
+ }
+
+ /**
+ * Returns {@code true} if this pool has been shut down.
+ *
+ * @return {@code true} if this pool has been shut down
+ */
+ public boolean isShutdown() {
+ return plock < 0;
+ }
+
+ /**
+ * Blocks until all tasks have completed execution after a
+ * shutdown request, or the timeout occurs, or the current thread
+ * is interrupted, whichever happens first. Because the {@link
+ * #commonPool()} never terminates until program shutdown, when
+ * applied to the common pool, this method is equivalent to {@link
+ * #awaitQuiescence(long, TimeUnit)} but always returns {@code false}.
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return {@code true} if this executor terminated and
+ * {@code false} if the timeout elapsed before termination
+ * @throws InterruptedException if interrupted while waiting
+ */
+ public boolean awaitTermination(long timeout, TimeUnit unit)
+ throws InterruptedException {
+ if (Thread.interrupted())
+ throw new InterruptedException();
+ if (this == common) {
+ awaitQuiescence(timeout, unit);
+ return false;
+ }
+ long nanos = unit.toNanos(timeout);
+ if (isTerminated())
+ return true;
+ if (nanos <= 0L)
+ return false;
+ long deadline = System.nanoTime() + nanos;
+ synchronized (this) {
+ for (;;) {
+ if (isTerminated())
+ return true;
+ if (nanos <= 0L)
+ return false;
+ long millis = TimeUnit.NANOSECONDS.toMillis(nanos);
+ wait(millis > 0L ? millis : 1L);
+ nanos = deadline - System.nanoTime();
+ }
+ }
+ }
+
+ /**
+ * If called by a ForkJoinTask operating in this pool, equivalent
+ * in effect to {@link ForkJoinTask#helpQuiesce}. Otherwise,
+ * waits and/or attempts to assist performing tasks until this
+ * pool {@link #isQuiescent} or the indicated timeout elapses.
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return {@code true} if quiescent; {@code false} if the
+ * timeout elapsed.
+ */
+ public boolean awaitQuiescence(long timeout, TimeUnit unit) {
+ long nanos = unit.toNanos(timeout);
+ ForkJoinWorkerThread wt;
+ Thread thread = Thread.currentThread();
+ if ((thread instanceof ForkJoinWorkerThread) &&
+ (wt = (ForkJoinWorkerThread)thread).pool == this) {
+ helpQuiescePool(wt.workQueue);
+ return true;
+ }
+ long startTime = System.nanoTime();
+ WorkQueue[] ws;
+ int r = 0, m;
+ boolean found = true;
+ while (!isQuiescent() && (ws = workQueues) != null &&
+ (m = ws.length - 1) >= 0) {
+ if (!found) {
+ if ((System.nanoTime() - startTime) > nanos)
+ return false;
+ Thread.yield(); // cannot block
+ }
+ found = false;
+ for (int j = (m + 1) << 2; j >= 0; --j) {
+ ForkJoinTask<?> t; WorkQueue q; int b;
+ if ((q = ws[r++ & m]) != null && (b = q.base) - q.top < 0) {
+ found = true;
+ if ((t = q.pollAt(b)) != null)
+ t.doExec();
+ break;
+ }
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Waits and/or attempts to assist performing tasks indefinitely
+ * until the {@link #commonPool()} {@link #isQuiescent}.
+ */
+ static void quiesceCommonPool() {
+ common.awaitQuiescence(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
+ }
+
+ /**
+ * Interface for extending managed parallelism for tasks running
+ * in {@link ForkJoinPool}s.
+ *
+ * <p>A {@code ManagedBlocker} provides two methods. Method
+ * {@code isReleasable} must return {@code true} if blocking is
+ * not necessary. Method {@code block} blocks the current thread
+ * if necessary (perhaps internally invoking {@code isReleasable}
+ * before actually blocking). These actions are performed by any
+ * thread invoking {@link ForkJoinPool#managedBlock(ManagedBlocker)}.
+ * The unusual methods in this API accommodate synchronizers that
+ * may, but don't usually, block for long periods. Similarly, they
+ * allow more efficient internal handling of cases in which
+ * additional workers may be, but usually are not, needed to
+ * ensure sufficient parallelism. Toward this end,
+ * implementations of method {@code isReleasable} must be amenable
+ * to repeated invocation.
+ *
+ * <p>For example, here is a ManagedBlocker based on a
+ * ReentrantLock:
+ * <pre> {@code
+ * class ManagedLocker implements ManagedBlocker {
+ * final ReentrantLock lock;
+ * boolean hasLock = false;
+ * ManagedLocker(ReentrantLock lock) { this.lock = lock; }
+ * public boolean block() {
+ * if (!hasLock)
+ * lock.lock();
+ * return true;
+ * }
+ * public boolean isReleasable() {
+ * return hasLock || (hasLock = lock.tryLock());
+ * }
+ * }}</pre>
+ *
+ * <p>Here is a class that possibly blocks waiting for an
+ * item on a given queue:
+ * <pre> {@code
+ * class QueueTaker<E> implements ManagedBlocker {
+ * final BlockingQueue<E> queue;
+ * volatile E item = null;
+ * QueueTaker(BlockingQueue<E> q) { this.queue = q; }
+ * public boolean block() throws InterruptedException {
+ * if (item == null)
+ * item = queue.take();
+ * return true;
+ * }
+ * public boolean isReleasable() {
+ * return item != null || (item = queue.poll()) != null;
+ * }
+ * public E getItem() { // call after pool.managedBlock completes
+ * return item;
+ * }
+ * }}</pre>
+ */
+ public static interface ManagedBlocker {
+ /**
+ * Possibly blocks the current thread, for example waiting for
+ * a lock or condition.
+ *
+ * @return {@code true} if no additional blocking is necessary
+ * (i.e., if isReleasable would return true)
+ * @throws InterruptedException if interrupted while waiting
+ * (the method is not required to do so, but is allowed to)
+ */
+ boolean block() throws InterruptedException;
+
+ /**
+ * Returns {@code true} if blocking is unnecessary.
+ * @return {@code true} if blocking is unnecessary
+ */
+ boolean isReleasable();
+ }
+
+ /**
+ * Blocks in accord with the given blocker. If the current thread
+ * is a {@link ForkJoinWorkerThread}, this method possibly
+ * arranges for a spare thread to be activated if necessary to
+ * ensure sufficient parallelism while the current thread is blocked.
+ *
+ * <p>If the caller is not a {@link ForkJoinTask}, this method is
+ * behaviorally equivalent to
+ * <pre> {@code
+ * while (!blocker.isReleasable())
+ * if (blocker.block())
+ * return;
+ * }</pre>
+ *
+ * If the caller is a {@code ForkJoinTask}, then the pool may
+ * first be expanded to ensure parallelism, and later adjusted.
+ *
+ * @param blocker the blocker
+ * @throws InterruptedException if blocker.block did so
+ */
+ public static void managedBlock(ManagedBlocker blocker)
+ throws InterruptedException {
+ Thread t = Thread.currentThread();
+ if (t instanceof ForkJoinWorkerThread) {
+ ForkJoinPool p = ((ForkJoinWorkerThread)t).pool;
+ while (!blocker.isReleasable()) {
+ if (p.tryCompensate(p.ctl)) {
+ try {
+ do {} while (!blocker.isReleasable() &&
+ !blocker.block());
+ } finally {
+ p.incrementActiveCount();
+ }
+ break;
+ }
+ }
+ }
+ else {
+ do {} while (!blocker.isReleasable() &&
+ !blocker.block());
+ }
+ }
+
+ // AbstractExecutorService overrides. These rely on undocumented
+ // fact that ForkJoinTask.adapt returns ForkJoinTasks that also
+ // implement RunnableFuture.
+
+ protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
+ return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
+ }
+
+ protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
+ return new ForkJoinTask.AdaptedCallable<T>(callable);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long CTL;
+ private static final long PARKBLOCKER;
+ private static final int ABASE;
+ private static final int ASHIFT;
+ private static final long STEALCOUNT;
+ private static final long PLOCK;
+ private static final long INDEXSEED;
+ private static final long QBASE;
+ private static final long QLOCK;
+
+ static {
+ // initialize field offsets for CAS etc
+ try {
+ U = getUnsafe();
+ Class<?> k = ForkJoinPool.class;
+ CTL = U.objectFieldOffset
+ (k.getDeclaredField("ctl"));
+ STEALCOUNT = U.objectFieldOffset
+ (k.getDeclaredField("stealCount"));
+ PLOCK = U.objectFieldOffset
+ (k.getDeclaredField("plock"));
+ INDEXSEED = U.objectFieldOffset
+ (k.getDeclaredField("indexSeed"));
+ Class<?> tk = Thread.class;
+ PARKBLOCKER = U.objectFieldOffset
+ (tk.getDeclaredField("parkBlocker"));
+ Class<?> wk = WorkQueue.class;
+ QBASE = U.objectFieldOffset
+ (wk.getDeclaredField("base"));
+ QLOCK = U.objectFieldOffset
+ (wk.getDeclaredField("qlock"));
+ Class<?> ak = ForkJoinTask[].class;
+ ABASE = U.arrayBaseOffset(ak);
+ int scale = U.arrayIndexScale(ak);
+ if ((scale & (scale - 1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+
+ submitters = new ThreadLocal<Submitter>();
+ defaultForkJoinWorkerThreadFactory =
+ new DefaultForkJoinWorkerThreadFactory();
+ modifyThreadPermission = new RuntimePermission("modifyThread");
+
+ common = java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedAction<ForkJoinPool>() {
+ public ForkJoinPool run() { return makeCommonPool(); }});
+ int par = common.parallelism; // report 1 even if threads disabled
+ commonParallelism = par > 0 ? par : 1;
+ }
+
+ /**
+ * Creates and returns the common pool, respecting user settings
+ * specified via system properties.
+ */
+ private static ForkJoinPool makeCommonPool() {
+ int parallelism = -1;
+ ForkJoinWorkerThreadFactory factory
+ = defaultForkJoinWorkerThreadFactory;
+ UncaughtExceptionHandler handler = null;
+ try { // ignore exceptions in accessing/parsing properties
+ String pp = System.getProperty
+ ("java.util.concurrent.ForkJoinPool.common.parallelism");
+ String fp = System.getProperty
+ ("java.util.concurrent.ForkJoinPool.common.threadFactory");
+ String hp = System.getProperty
+ ("java.util.concurrent.ForkJoinPool.common.exceptionHandler");
+ if (pp != null)
+ parallelism = Integer.parseInt(pp);
+ if (fp != null)
+ factory = ((ForkJoinWorkerThreadFactory)ClassLoader.
+ getSystemClassLoader().loadClass(fp).newInstance());
+ if (hp != null)
+ handler = ((UncaughtExceptionHandler)ClassLoader.
+ getSystemClassLoader().loadClass(hp).newInstance());
+ } catch (Exception ignore) {
+ }
+
+ if (parallelism < 0 && // default 1 less than #cores
+ (parallelism = Runtime.getRuntime().availableProcessors() - 1) < 0)
+ parallelism = 0;
+ if (parallelism > MAX_CAP)
+ parallelism = MAX_CAP;
+ return new ForkJoinPool(parallelism, factory, handler, LIFO_QUEUE,
+ "ForkJoinPool.commonPool-worker-");
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/ForkJoinTask.java b/src/main/java/jsr166e/ForkJoinTask.java
new file mode 100644
index 0000000..99ac374
--- /dev/null
+++ b/src/main/java/jsr166e/ForkJoinTask.java
@@ -0,0 +1,1554 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.List;
+import java.util.RandomAccess;
+import java.lang.ref.WeakReference;
+import java.lang.ref.ReferenceQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.RunnableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.locks.ReentrantLock;
+import java.lang.reflect.Constructor;
+
+/**
+ * Abstract base class for tasks that run within a {@link ForkJoinPool}.
+ * A {@code ForkJoinTask} is a thread-like entity that is much
+ * lighter weight than a normal thread. Huge numbers of tasks and
+ * subtasks may be hosted by a small number of actual threads in a
+ * ForkJoinPool, at the price of some usage limitations.
+ *
+ * <p>A "main" {@code ForkJoinTask} begins execution when it is
+ * explicitly submitted to a {@link ForkJoinPool}, or, if not already
+ * engaged in a ForkJoin computation, commenced in the {@link
+ * ForkJoinPool#commonPool()} via {@link #fork}, {@link #invoke}, or
+ * related methods. Once started, it will usually in turn start other
+ * subtasks. As indicated by the name of this class, many programs
+ * using {@code ForkJoinTask} employ only methods {@link #fork} and
+ * {@link #join}, or derivatives such as {@link
+ * #invokeAll(ForkJoinTask...) invokeAll}. However, this class also
+ * provides a number of other methods that can come into play in
+ * advanced usages, as well as extension mechanics that allow support
+ * of new forms of fork/join processing.
+ *
+ * <p>A {@code ForkJoinTask} is a lightweight form of {@link Future}.
+ * The efficiency of {@code ForkJoinTask}s stems from a set of
+ * restrictions (that are only partially statically enforceable)
+ * reflecting their main use as computational tasks calculating pure
+ * functions or operating on purely isolated objects. The primary
+ * coordination mechanisms are {@link #fork}, that arranges
+ * asynchronous execution, and {@link #join}, that doesn't proceed
+ * until the task's result has been computed. Computations should
+ * ideally avoid {@code synchronized} methods or blocks, and should
+ * minimize other blocking synchronization apart from joining other
+ * tasks or using synchronizers such as Phasers that are advertised to
+ * cooperate with fork/join scheduling. Subdividable tasks should also
+ * not perform blocking I/O, and should ideally access variables that
+ * are completely independent of those accessed by other running
+ * tasks. These guidelines are loosely enforced by not permitting
+ * checked exceptions such as {@code IOExceptions} to be
+ * thrown. However, computations may still encounter unchecked
+ * exceptions, that are rethrown to callers attempting to join
+ * them. These exceptions may additionally include {@link
+ * RejectedExecutionException} stemming from internal resource
+ * exhaustion, such as failure to allocate internal task
+ * queues. Rethrown exceptions behave in the same way as regular
+ * exceptions, but, when possible, contain stack traces (as displayed
+ * for example using {@code ex.printStackTrace()}) of both the thread
+ * that initiated the computation as well as the thread actually
+ * encountering the exception; minimally only the latter.
+ *
+ * <p>It is possible to define and use ForkJoinTasks that may block,
+ * but doing do requires three further considerations: (1) Completion
+ * of few if any <em>other</em> tasks should be dependent on a task
+ * that blocks on external synchronization or I/O. Event-style async
+ * tasks that are never joined (for example, those subclassing {@link
+ * CountedCompleter}) often fall into this category. (2) To minimize
+ * resource impact, tasks should be small; ideally performing only the
+ * (possibly) blocking action. (3) Unless the {@link
+ * ForkJoinPool.ManagedBlocker} API is used, or the number of possibly
+ * blocked tasks is known to be less than the pool's {@link
+ * ForkJoinPool#getParallelism} level, the pool cannot guarantee that
+ * enough threads will be available to ensure progress or good
+ * performance.
+ *
+ * <p>The primary method for awaiting completion and extracting
+ * results of a task is {@link #join}, but there are several variants:
+ * The {@link Future#get} methods support interruptible and/or timed
+ * waits for completion and report results using {@code Future}
+ * conventions. Method {@link #invoke} is semantically
+ * equivalent to {@code fork(); join()} but always attempts to begin
+ * execution in the current thread. The "<em>quiet</em>" forms of
+ * these methods do not extract results or report exceptions. These
+ * may be useful when a set of tasks are being executed, and you need
+ * to delay processing of results or exceptions until all complete.
+ * Method {@code invokeAll} (available in multiple versions)
+ * performs the most common form of parallel invocation: forking a set
+ * of tasks and joining them all.
+ *
+ * <p>In the most typical usages, a fork-join pair act like a call
+ * (fork) and return (join) from a parallel recursive function. As is
+ * the case with other forms of recursive calls, returns (joins)
+ * should be performed innermost-first. For example, {@code a.fork();
+ * b.fork(); b.join(); a.join();} is likely to be substantially more
+ * efficient than joining {@code a} before {@code b}.
+ *
+ * <p>The execution status of tasks may be queried at several levels
+ * of detail: {@link #isDone} is true if a task completed in any way
+ * (including the case where a task was cancelled without executing);
+ * {@link #isCompletedNormally} is true if a task completed without
+ * cancellation or encountering an exception; {@link #isCancelled} is
+ * true if the task was cancelled (in which case {@link #getException}
+ * returns a {@link java.util.concurrent.CancellationException}); and
+ * {@link #isCompletedAbnormally} is true if a task was either
+ * cancelled or encountered an exception, in which case {@link
+ * #getException} will return either the encountered exception or
+ * {@link java.util.concurrent.CancellationException}.
+ *
+ * <p>The ForkJoinTask class is not usually directly subclassed.
+ * Instead, you subclass one of the abstract classes that support a
+ * particular style of fork/join processing, typically {@link
+ * RecursiveAction} for most computations that do not return results,
+ * {@link RecursiveTask} for those that do, and {@link
+ * CountedCompleter} for those in which completed actions trigger
+ * other actions. Normally, a concrete ForkJoinTask subclass declares
+ * fields comprising its parameters, established in a constructor, and
+ * then defines a {@code compute} method that somehow uses the control
+ * methods supplied by this base class.
+ *
+ * <p>Method {@link #join} and its variants are appropriate for use
+ * only when completion dependencies are acyclic; that is, the
+ * parallel computation can be described as a directed acyclic graph
+ * (DAG). Otherwise, executions may encounter a form of deadlock as
+ * tasks cyclically wait for each other. However, this framework
+ * supports other methods and techniques (for example the use of
+ * {@link java.util.concurrent.Phaser Phaser}, {@link #helpQuiesce}, and {@link #complete}) that
+ * may be of use in constructing custom subclasses for problems that
+ * are not statically structured as DAGs. To support such usages, a
+ * ForkJoinTask may be atomically <em>tagged</em> with a {@code short}
+ * value using {@link #setForkJoinTaskTag} or {@link
+ * #compareAndSetForkJoinTaskTag} and checked using {@link
+ * #getForkJoinTaskTag}. The ForkJoinTask implementation does not use
+ * these {@code protected} methods or tags for any purpose, but they
+ * may be of use in the construction of specialized subclasses. For
+ * example, parallel graph traversals can use the supplied methods to
+ * avoid revisiting nodes/tasks that have already been processed.
+ * (Method names for tagging are bulky in part to encourage definition
+ * of methods that reflect their usage patterns.)
+ *
+ * <p>Most base support methods are {@code final}, to prevent
+ * overriding of implementations that are intrinsically tied to the
+ * underlying lightweight task scheduling framework. Developers
+ * creating new basic styles of fork/join processing should minimally
+ * implement {@code protected} methods {@link #exec}, {@link
+ * #setRawResult}, and {@link #getRawResult}, while also introducing
+ * an abstract computational method that can be implemented in its
+ * subclasses, possibly relying on other {@code protected} methods
+ * provided by this class.
+ *
+ * <p>ForkJoinTasks should perform relatively small amounts of
+ * computation. Large tasks should be split into smaller subtasks,
+ * usually via recursive decomposition. As a very rough rule of thumb,
+ * a task should perform more than 100 and less than 10000 basic
+ * computational steps, and should avoid indefinite looping. If tasks
+ * are too big, then parallelism cannot improve throughput. If too
+ * small, then memory and internal task maintenance overhead may
+ * overwhelm processing.
+ *
+ * <p>This class provides {@code adapt} methods for {@link Runnable}
+ * and {@link Callable}, that may be of use when mixing execution of
+ * {@code ForkJoinTasks} with other kinds of tasks. When all tasks are
+ * of this form, consider using a pool constructed in <em>asyncMode</em>.
+ *
+ * <p>ForkJoinTasks are {@code Serializable}, which enables them to be
+ * used in extensions such as remote execution frameworks. It is
+ * sensible to serialize tasks only before or after, but not during,
+ * execution. Serialization is not relied on during execution itself.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
+
+ /*
+ * See the internal documentation of class ForkJoinPool for a
+ * general implementation overview. ForkJoinTasks are mainly
+ * responsible for maintaining their "status" field amidst relays
+ * to methods in ForkJoinWorkerThread and ForkJoinPool.
+ *
+ * The methods of this class are more-or-less layered into
+ * (1) basic status maintenance
+ * (2) execution and awaiting completion
+ * (3) user-level methods that additionally report results.
+ * This is sometimes hard to see because this file orders exported
+ * methods in a way that flows well in javadocs.
+ */
+
+ /*
+ * The status field holds run control status bits packed into a
+ * single int to minimize footprint and to ensure atomicity (via
+ * CAS). Status is initially zero, and takes on nonnegative
+ * values until completed, upon which status (anded with
+ * DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks
+ * undergoing blocking waits by other threads have the SIGNAL bit
+ * set. Completion of a stolen task with SIGNAL set awakens any
+ * waiters via notifyAll. Even though suboptimal for some
+ * purposes, we use basic builtin wait/notify to take advantage of
+ * "monitor inflation" in JVMs that we would otherwise need to
+ * emulate to avoid adding further per-task bookkeeping overhead.
+ * We want these monitors to be "fat", i.e., not use biasing or
+ * thin-lock techniques, so use some odd coding idioms that tend
+ * to avoid them, mainly by arranging that every synchronized
+ * block performs a wait, notifyAll or both.
+ *
+ * These control bits occupy only (some of) the upper half (16
+ * bits) of status field. The lower bits are used for user-defined
+ * tags.
+ */
+
+ /** The run status of this task */
+ volatile int status; // accessed directly by pool and workers
+ static final int DONE_MASK = 0xf0000000; // mask out non-completion bits
+ static final int NORMAL = 0xf0000000; // must be negative
+ static final int CANCELLED = 0xc0000000; // must be < NORMAL
+ static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED
+ static final int SIGNAL = 0x00010000; // must be >= 1 << 16
+ static final int SMASK = 0x0000ffff; // short bits for tags
+
+ /**
+ * Marks completion and wakes up threads waiting to join this
+ * task.
+ *
+ * @param completion one of NORMAL, CANCELLED, EXCEPTIONAL
+ * @return completion status on exit
+ */
+ private int setCompletion(int completion) {
+ for (int s;;) {
+ if ((s = status) < 0)
+ return s;
+ if (U.compareAndSwapInt(this, STATUS, s, s | completion)) {
+ if ((s >>> 16) != 0)
+ synchronized (this) { notifyAll(); }
+ return completion;
+ }
+ }
+ }
+
+ /**
+ * Primary execution method for stolen tasks. Unless done, calls
+ * exec and records status if completed, but doesn't wait for
+ * completion otherwise.
+ *
+ * @return status on exit from this method
+ */
+ final int doExec() {
+ int s; boolean completed;
+ if ((s = status) >= 0) {
+ try {
+ completed = exec();
+ } catch (Throwable rex) {
+ return setExceptionalCompletion(rex);
+ }
+ if (completed)
+ s = setCompletion(NORMAL);
+ }
+ return s;
+ }
+
+ /**
+ * Tries to set SIGNAL status unless already completed. Used by
+ * ForkJoinPool. Other variants are directly incorporated into
+ * externalAwaitDone etc.
+ *
+ * @return true if successful
+ */
+ final boolean trySetSignal() {
+ int s = status;
+ return s >= 0 && U.compareAndSwapInt(this, STATUS, s, s | SIGNAL);
+ }
+
+ /**
+ * Blocks a non-worker-thread until completion.
+ * @return status upon completion
+ */
+ private int externalAwaitDone() {
+ int s;
+ ForkJoinPool cp = ForkJoinPool.common;
+ if ((s = status) >= 0) {
+ if (cp != null) {
+ if (this instanceof CountedCompleter)
+ s = cp.externalHelpComplete((CountedCompleter<?>)this);
+ else if (cp.tryExternalUnpush(this))
+ s = doExec();
+ }
+ if (s >= 0 && (s = status) >= 0) {
+ boolean interrupted = false;
+ do {
+ if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0) {
+ try {
+ wait();
+ } catch (InterruptedException ie) {
+ interrupted = true;
+ }
+ }
+ else
+ notifyAll();
+ }
+ }
+ } while ((s = status) >= 0);
+ if (interrupted)
+ Thread.currentThread().interrupt();
+ }
+ }
+ return s;
+ }
+
+ /**
+ * Blocks a non-worker-thread until completion or interruption.
+ */
+ private int externalInterruptibleAwaitDone() throws InterruptedException {
+ int s;
+ ForkJoinPool cp = ForkJoinPool.common;
+ if (Thread.interrupted())
+ throw new InterruptedException();
+ if ((s = status) >= 0 && cp != null) {
+ if (this instanceof CountedCompleter)
+ cp.externalHelpComplete((CountedCompleter<?>)this);
+ else if (cp.tryExternalUnpush(this))
+ doExec();
+ }
+ while ((s = status) >= 0) {
+ if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0)
+ wait();
+ else
+ notifyAll();
+ }
+ }
+ }
+ return s;
+ }
+
+
+ /**
+ * Implementation for join, get, quietlyJoin. Directly handles
+ * only cases of already-completed, external wait, and
+ * unfork+exec. Others are relayed to ForkJoinPool.awaitJoin.
+ *
+ * @return status upon completion
+ */
+ private int doJoin() {
+ int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w;
+ return (s = status) < 0 ? s :
+ ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ (w = (wt = (ForkJoinWorkerThread)t).workQueue).
+ tryUnpush(this) && (s = doExec()) < 0 ? s :
+ wt.pool.awaitJoin(w, this) :
+ externalAwaitDone();
+ }
+
+ /**
+ * Implementation for invoke, quietlyInvoke.
+ *
+ * @return status upon completion
+ */
+ private int doInvoke() {
+ int s; Thread t; ForkJoinWorkerThread wt;
+ return (s = doExec()) < 0 ? s :
+ ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ (wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue, this) :
+ externalAwaitDone();
+ }
+
+ // Exception table support
+
+ /**
+ * Table of exceptions thrown by tasks, to enable reporting by
+ * callers. Because exceptions are rare, we don't directly keep
+ * them with task objects, but instead use a weak ref table. Note
+ * that cancellation exceptions don't appear in the table, but are
+ * instead recorded as status values.
+ *
+ * Note: These statics are initialized below in static block.
+ */
+ private static final ExceptionNode[] exceptionTable;
+ private static final ReentrantLock exceptionTableLock;
+ private static final ReferenceQueue<Object> exceptionTableRefQueue;
+
+ /**
+ * Fixed capacity for exceptionTable.
+ */
+ private static final int EXCEPTION_MAP_CAPACITY = 32;
+
+ /**
+ * Key-value nodes for exception table. The chained hash table
+ * uses identity comparisons, full locking, and weak references
+ * for keys. The table has a fixed capacity because it only
+ * maintains task exceptions long enough for joiners to access
+ * them, so should never become very large for sustained
+ * periods. However, since we do not know when the last joiner
+ * completes, we must use weak references and expunge them. We do
+ * so on each operation (hence full locking). Also, some thread in
+ * any ForkJoinPool will call helpExpungeStaleExceptions when its
+ * pool becomes isQuiescent.
+ */
+ static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
+ final Throwable ex;
+ ExceptionNode next;
+ final long thrower; // use id not ref to avoid weak cycles
+ final int hashCode; // store task hashCode before weak ref disappears
+ ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
+ super(task, exceptionTableRefQueue);
+ this.ex = ex;
+ this.next = next;
+ this.thrower = Thread.currentThread().getId();
+ this.hashCode = System.identityHashCode(task);
+ }
+ }
+
+ /**
+ * Records exception and sets status.
+ *
+ * @return status on exit
+ */
+ final int recordExceptionalCompletion(Throwable ex) {
+ int s;
+ if ((s = status) >= 0) {
+ int h = System.identityHashCode(this);
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ expungeStaleExceptions();
+ ExceptionNode[] t = exceptionTable;
+ int i = h & (t.length - 1);
+ for (ExceptionNode e = t[i]; ; e = e.next) {
+ if (e == null) {
+ t[i] = new ExceptionNode(this, ex, t[i]);
+ break;
+ }
+ if (e.get() == this) // already present
+ break;
+ }
+ } finally {
+ lock.unlock();
+ }
+ s = setCompletion(EXCEPTIONAL);
+ }
+ return s;
+ }
+
+ /**
+ * Records exception and possibly propagates.
+ *
+ * @return status on exit
+ */
+ private int setExceptionalCompletion(Throwable ex) {
+ int s = recordExceptionalCompletion(ex);
+ if ((s & DONE_MASK) == EXCEPTIONAL)
+ internalPropagateException(ex);
+ return s;
+ }
+
+ /**
+ * Hook for exception propagation support for tasks with completers.
+ */
+ void internalPropagateException(Throwable ex) {
+ }
+
+ /**
+ * Cancels, ignoring any exceptions thrown by cancel. Used during
+ * worker and pool shutdown. Cancel is spec'ed not to throw any
+ * exceptions, but if it does anyway, we have no recourse during
+ * shutdown, so guard against this case.
+ */
+ static final void cancelIgnoringExceptions(ForkJoinTask<?> t) {
+ if (t != null && t.status >= 0) {
+ try {
+ t.cancel(false);
+ } catch (Throwable ignore) {
+ }
+ }
+ }
+
+ /**
+ * Removes exception node and clears status.
+ */
+ private void clearExceptionalCompletion() {
+ int h = System.identityHashCode(this);
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ ExceptionNode[] t = exceptionTable;
+ int i = h & (t.length - 1);
+ ExceptionNode e = t[i];
+ ExceptionNode pred = null;
+ while (e != null) {
+ ExceptionNode next = e.next;
+ if (e.get() == this) {
+ if (pred == null)
+ t[i] = next;
+ else
+ pred.next = next;
+ break;
+ }
+ pred = e;
+ e = next;
+ }
+ expungeStaleExceptions();
+ status = 0;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * Returns a rethrowable exception for the given task, if
+ * available. To provide accurate stack traces, if the exception
+ * was not thrown by the current thread, we try to create a new
+ * exception of the same type as the one thrown, but with the
+ * recorded exception as its cause. If there is no such
+ * constructor, we instead try to use a no-arg constructor,
+ * followed by initCause, to the same effect. If none of these
+ * apply, or any fail due to other exceptions, we return the
+ * recorded exception, which is still correct, although it may
+ * contain a misleading stack trace.
+ *
+ * @return the exception, or null if none
+ */
+ private Throwable getThrowableException() {
+ if ((status & DONE_MASK) != EXCEPTIONAL)
+ return null;
+ int h = System.identityHashCode(this);
+ ExceptionNode e;
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ expungeStaleExceptions();
+ ExceptionNode[] t = exceptionTable;
+ e = t[h & (t.length - 1)];
+ while (e != null && e.get() != this)
+ e = e.next;
+ } finally {
+ lock.unlock();
+ }
+ Throwable ex;
+ if (e == null || (ex = e.ex) == null)
+ return null;
+ if (false && e.thrower != Thread.currentThread().getId()) {
+ Class<? extends Throwable> ec = ex.getClass();
+ try {
+ Constructor<?> noArgCtor = null;
+ Constructor<?>[] cs = ec.getConstructors();// public ctors only
+ for (int i = 0; i < cs.length; ++i) {
+ Constructor<?> c = cs[i];
+ Class<?>[] ps = c.getParameterTypes();
+ if (ps.length == 0)
+ noArgCtor = c;
+ else if (ps.length == 1 && ps[0] == Throwable.class)
+ return (Throwable)(c.newInstance(ex));
+ }
+ if (noArgCtor != null) {
+ Throwable wx = (Throwable)(noArgCtor.newInstance());
+ wx.initCause(ex);
+ return wx;
+ }
+ } catch (Exception ignore) {
+ }
+ }
+ return ex;
+ }
+
+ /**
+ * Poll stale refs and remove them. Call only while holding lock.
+ */
+ /**
+ * Poll stale refs and remove them. Call only while holding lock.
+ */
+ private static void expungeStaleExceptions() {
+ for (Object x; (x = exceptionTableRefQueue.poll()) != null;) {
+ if (x instanceof ExceptionNode) {
+ int hashCode = ((ExceptionNode)x).hashCode;
+ ExceptionNode[] t = exceptionTable;
+ int i = hashCode & (t.length - 1);
+ ExceptionNode e = t[i];
+ ExceptionNode pred = null;
+ while (e != null) {
+ ExceptionNode next = e.next;
+ if (e == x) {
+ if (pred == null)
+ t[i] = next;
+ else
+ pred.next = next;
+ break;
+ }
+ pred = e;
+ e = next;
+ }
+ }
+ }
+ }
+
+ /**
+ * If lock is available, poll stale refs and remove them.
+ * Called from ForkJoinPool when pools become quiescent.
+ */
+ static final void helpExpungeStaleExceptions() {
+ final ReentrantLock lock = exceptionTableLock;
+ if (lock.tryLock()) {
+ try {
+ expungeStaleExceptions();
+ } finally {
+ lock.unlock();
+ }
+ }
+ }
+
+ /**
+ * A version of "sneaky throw" to relay exceptions
+ */
+ static void rethrow(Throwable ex) {
+ if (ex != null)
+ ForkJoinTask.<RuntimeException>uncheckedThrow(ex);
+ }
+
+ /**
+ * The sneaky part of sneaky throw, relying on generics
+ * limitations to evade compiler complaints about rethrowing
+ * unchecked exceptions
+ */
+ @SuppressWarnings("unchecked") static <T extends Throwable>
+ void uncheckedThrow(Throwable t) throws T {
+ throw (T)t; // rely on vacuous cast
+ }
+
+ /**
+ * Throws exception, if any, associated with the given status.
+ */
+ private void reportException(int s) {
+ if (s == CANCELLED)
+ throw new CancellationException();
+ if (s == EXCEPTIONAL)
+ rethrow(getThrowableException());
+ }
+
+ // public methods
+
+ /**
+ * Arranges to asynchronously execute this task in the pool the
+ * current task is running in, if applicable, or using the {@link
+ * ForkJoinPool#commonPool()} if not {@link #inForkJoinPool}. While
+ * it is not necessarily enforced, it is a usage error to fork a
+ * task more than once unless it has completed and been
+ * reinitialized. Subsequent modifications to the state of this
+ * task or any data it operates on are not necessarily
+ * consistently observable by any thread other than the one
+ * executing it unless preceded by a call to {@link #join} or
+ * related methods, or a call to {@link #isDone} returning {@code
+ * true}.
+ *
+ * @return {@code this}, to simplify usage
+ */
+ public final ForkJoinTask<V> fork() {
+ Thread t;
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
+ ((ForkJoinWorkerThread)t).workQueue.push(this);
+ else
+ ForkJoinPool.common.externalPush(this);
+ return this;
+ }
+
+ /**
+ * Returns the result of the computation when it {@link #isDone is
+ * done}. This method differs from {@link #get()} in that
+ * abnormal completion results in {@code RuntimeException} or
+ * {@code Error}, not {@code ExecutionException}, and that
+ * interrupts of the calling thread do <em>not</em> cause the
+ * method to abruptly return by throwing {@code
+ * InterruptedException}.
+ *
+ * @return the computed result
+ */
+ public final V join() {
+ int s;
+ if ((s = doJoin() & DONE_MASK) != NORMAL)
+ reportException(s);
+ return getRawResult();
+ }
+
+ /**
+ * Commences performing this task, awaits its completion if
+ * necessary, and returns its result, or throws an (unchecked)
+ * {@code RuntimeException} or {@code Error} if the underlying
+ * computation did so.
+ *
+ * @return the computed result
+ */
+ public final V invoke() {
+ int s;
+ if ((s = doInvoke() & DONE_MASK) != NORMAL)
+ reportException(s);
+ return getRawResult();
+ }
+
+ /**
+ * Forks the given tasks, returning when {@code isDone} holds for
+ * each task or an (unchecked) exception is encountered, in which
+ * case the exception is rethrown. If more than one task
+ * encounters an exception, then this method throws any one of
+ * these exceptions. If any task encounters an exception, the
+ * other may be cancelled. However, the execution status of
+ * individual tasks is not guaranteed upon exceptional return. The
+ * status of each task may be obtained using {@link
+ * #getException()} and related methods to check if they have been
+ * cancelled, completed normally or exceptionally, or left
+ * unprocessed.
+ *
+ * @param t1 the first task
+ * @param t2 the second task
+ * @throws NullPointerException if any task is null
+ */
+ public static void invokeAll(ForkJoinTask<?> t1, ForkJoinTask<?> t2) {
+ int s1, s2;
+ t2.fork();
+ if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL)
+ t1.reportException(s1);
+ if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL)
+ t2.reportException(s2);
+ }
+
+ /**
+ * Forks the given tasks, returning when {@code isDone} holds for
+ * each task or an (unchecked) exception is encountered, in which
+ * case the exception is rethrown. If more than one task
+ * encounters an exception, then this method throws any one of
+ * these exceptions. If any task encounters an exception, others
+ * may be cancelled. However, the execution status of individual
+ * tasks is not guaranteed upon exceptional return. The status of
+ * each task may be obtained using {@link #getException()} and
+ * related methods to check if they have been cancelled, completed
+ * normally or exceptionally, or left unprocessed.
+ *
+ * @param tasks the tasks
+ * @throws NullPointerException if any task is null
+ */
+ public static void invokeAll(ForkJoinTask<?>... tasks) {
+ Throwable ex = null;
+ int last = tasks.length - 1;
+ for (int i = last; i >= 0; --i) {
+ ForkJoinTask<?> t = tasks[i];
+ if (t == null) {
+ if (ex == null)
+ ex = new NullPointerException();
+ }
+ else if (i != 0)
+ t.fork();
+ else if (t.doInvoke() < NORMAL && ex == null)
+ ex = t.getException();
+ }
+ for (int i = 1; i <= last; ++i) {
+ ForkJoinTask<?> t = tasks[i];
+ if (t != null) {
+ if (ex != null)
+ t.cancel(false);
+ else if (t.doJoin() < NORMAL)
+ ex = t.getException();
+ }
+ }
+ if (ex != null)
+ rethrow(ex);
+ }
+
+ /**
+ * Forks all tasks in the specified collection, returning when
+ * {@code isDone} holds for each task or an (unchecked) exception
+ * is encountered, in which case the exception is rethrown. If
+ * more than one task encounters an exception, then this method
+ * throws any one of these exceptions. If any task encounters an
+ * exception, others may be cancelled. However, the execution
+ * status of individual tasks is not guaranteed upon exceptional
+ * return. The status of each task may be obtained using {@link
+ * #getException()} and related methods to check if they have been
+ * cancelled, completed normally or exceptionally, or left
+ * unprocessed.
+ *
+ * @param tasks the collection of tasks
+ * @param <T> the type of the values returned from the tasks
+ * @return the tasks argument, to simplify usage
+ * @throws NullPointerException if tasks or any element are null
+ */
+ public static <T extends ForkJoinTask<?>> Collection<T> invokeAll(Collection<T> tasks) {
+ if (!(tasks instanceof RandomAccess) || !(tasks instanceof List<?>)) {
+ invokeAll(tasks.toArray(new ForkJoinTask<?>[tasks.size()]));
+ return tasks;
+ }
+ @SuppressWarnings("unchecked")
+ List<? extends ForkJoinTask<?>> ts =
+ (List<? extends ForkJoinTask<?>>) tasks;
+ Throwable ex = null;
+ int last = ts.size() - 1;
+ for (int i = last; i >= 0; --i) {
+ ForkJoinTask<?> t = ts.get(i);
+ if (t == null) {
+ if (ex == null)
+ ex = new NullPointerException();
+ }
+ else if (i != 0)
+ t.fork();
+ else if (t.doInvoke() < NORMAL && ex == null)
+ ex = t.getException();
+ }
+ for (int i = 1; i <= last; ++i) {
+ ForkJoinTask<?> t = ts.get(i);
+ if (t != null) {
+ if (ex != null)
+ t.cancel(false);
+ else if (t.doJoin() < NORMAL)
+ ex = t.getException();
+ }
+ }
+ if (ex != null)
+ rethrow(ex);
+ return tasks;
+ }
+
+ /**
+ * Attempts to cancel execution of this task. This attempt will
+ * fail if the task has already completed or could not be
+ * cancelled for some other reason. If successful, and this task
+ * has not started when {@code cancel} is called, execution of
+ * this task is suppressed. After this method returns
+ * successfully, unless there is an intervening call to {@link
+ * #reinitialize}, subsequent calls to {@link #isCancelled},
+ * {@link #isDone}, and {@code cancel} will return {@code true}
+ * and calls to {@link #join} and related methods will result in
+ * {@code CancellationException}.
+ *
+ * <p>This method may be overridden in subclasses, but if so, must
+ * still ensure that these properties hold. In particular, the
+ * {@code cancel} method itself must not throw exceptions.
+ *
+ * <p>This method is designed to be invoked by <em>other</em>
+ * tasks. To terminate the current task, you can just return or
+ * throw an unchecked exception from its computation method, or
+ * invoke {@link #completeExceptionally(Throwable)}.
+ *
+ * @param mayInterruptIfRunning this value has no effect in the
+ * default implementation because interrupts are not used to
+ * control cancellation.
+ *
+ * @return {@code true} if this task is now cancelled
+ */
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED;
+ }
+
+ public final boolean isDone() {
+ return status < 0;
+ }
+
+ public final boolean isCancelled() {
+ return (status & DONE_MASK) == CANCELLED;
+ }
+
+ /**
+ * Returns {@code true} if this task threw an exception or was cancelled.
+ *
+ * @return {@code true} if this task threw an exception or was cancelled
+ */
+ public final boolean isCompletedAbnormally() {
+ return status < NORMAL;
+ }
+
+ /**
+ * Returns {@code true} if this task completed without throwing an
+ * exception and was not cancelled.
+ *
+ * @return {@code true} if this task completed without throwing an
+ * exception and was not cancelled
+ */
+ public final boolean isCompletedNormally() {
+ return (status & DONE_MASK) == NORMAL;
+ }
+
+ /**
+ * Returns the exception thrown by the base computation, or a
+ * {@code CancellationException} if cancelled, or {@code null} if
+ * none or if the method has not yet completed.
+ *
+ * @return the exception, or {@code null} if none
+ */
+ public final Throwable getException() {
+ int s = status & DONE_MASK;
+ return ((s >= NORMAL) ? null :
+ (s == CANCELLED) ? new CancellationException() :
+ getThrowableException());
+ }
+
+ /**
+ * Completes this task abnormally, and if not already aborted or
+ * cancelled, causes it to throw the given exception upon
+ * {@code join} and related operations. This method may be used
+ * to induce exceptions in asynchronous tasks, or to force
+ * completion of tasks that would not otherwise complete. Its use
+ * in other situations is discouraged. This method is
+ * overridable, but overridden versions must invoke {@code super}
+ * implementation to maintain guarantees.
+ *
+ * @param ex the exception to throw. If this exception is not a
+ * {@code RuntimeException} or {@code Error}, the actual exception
+ * thrown will be a {@code RuntimeException} with cause {@code ex}.
+ */
+ public void completeExceptionally(Throwable ex) {
+ setExceptionalCompletion((ex instanceof RuntimeException) ||
+ (ex instanceof Error) ? ex :
+ new RuntimeException(ex));
+ }
+
+ /**
+ * Completes this task, and if not already aborted or cancelled,
+ * returning the given value as the result of subsequent
+ * invocations of {@code join} and related operations. This method
+ * may be used to provide results for asynchronous tasks, or to
+ * provide alternative handling for tasks that would not otherwise
+ * complete normally. Its use in other situations is
+ * discouraged. This method is overridable, but overridden
+ * versions must invoke {@code super} implementation to maintain
+ * guarantees.
+ *
+ * @param value the result value for this task
+ */
+ public void complete(V value) {
+ try {
+ setRawResult(value);
+ } catch (Throwable rex) {
+ setExceptionalCompletion(rex);
+ return;
+ }
+ setCompletion(NORMAL);
+ }
+
+ /**
+ * Completes this task normally without setting a value. The most
+ * recent value established by {@link #setRawResult} (or {@code
+ * null} by default) will be returned as the result of subsequent
+ * invocations of {@code join} and related operations.
+ *
+ * @since 1.8
+ */
+ public final void quietlyComplete() {
+ setCompletion(NORMAL);
+ }
+
+ /**
+ * Waits if necessary for the computation to complete, and then
+ * retrieves its result.
+ *
+ * @return the computed result
+ * @throws CancellationException if the computation was cancelled
+ * @throws ExecutionException if the computation threw an
+ * exception
+ * @throws InterruptedException if the current thread is not a
+ * member of a ForkJoinPool and was interrupted while waiting
+ */
+ public final V get() throws InterruptedException, ExecutionException {
+ int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ?
+ doJoin() : externalInterruptibleAwaitDone();
+ Throwable ex;
+ if ((s &= DONE_MASK) == CANCELLED)
+ throw new CancellationException();
+ if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
+ throw new ExecutionException(ex);
+ return getRawResult();
+ }
+
+ /**
+ * Waits if necessary for at most the given time for the computation
+ * to complete, and then retrieves its result, if available.
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return the computed result
+ * @throws CancellationException if the computation was cancelled
+ * @throws ExecutionException if the computation threw an
+ * exception
+ * @throws InterruptedException if the current thread is not a
+ * member of a ForkJoinPool and was interrupted while waiting
+ * @throws TimeoutException if the wait timed out
+ */
+ public final V get(long timeout, TimeUnit unit)
+ throws InterruptedException, ExecutionException, TimeoutException {
+ if (Thread.interrupted())
+ throw new InterruptedException();
+ // Messy in part because we measure in nanosecs, but wait in millisecs
+ int s; long ms;
+ long ns = unit.toNanos(timeout);
+ ForkJoinPool cp;
+ if ((s = status) >= 0 && ns > 0L) {
+ long deadline = System.nanoTime() + ns;
+ ForkJoinPool p = null;
+ ForkJoinPool.WorkQueue w = null;
+ Thread t = Thread.currentThread();
+ if (t instanceof ForkJoinWorkerThread) {
+ ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
+ p = wt.pool;
+ w = wt.workQueue;
+ p.helpJoinOnce(w, this); // no retries on failure
+ }
+ else if ((cp = ForkJoinPool.common) != null) {
+ if (this instanceof CountedCompleter)
+ cp.externalHelpComplete((CountedCompleter<?>)this);
+ else if (cp.tryExternalUnpush(this))
+ doExec();
+ }
+ boolean canBlock = false;
+ boolean interrupted = false;
+ try {
+ while ((s = status) >= 0) {
+ if (w != null && w.qlock < 0)
+ cancelIgnoringExceptions(this);
+ else if (!canBlock) {
+ if (p == null || p.tryCompensate(p.ctl))
+ canBlock = true;
+ }
+ else {
+ if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L &&
+ U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0) {
+ try {
+ wait(ms);
+ } catch (InterruptedException ie) {
+ if (p == null)
+ interrupted = true;
+ }
+ }
+ else
+ notifyAll();
+ }
+ }
+ if ((s = status) < 0 || interrupted ||
+ (ns = deadline - System.nanoTime()) <= 0L)
+ break;
+ }
+ }
+ } finally {
+ if (p != null && canBlock)
+ p.incrementActiveCount();
+ }
+ if (interrupted)
+ throw new InterruptedException();
+ }
+ if ((s &= DONE_MASK) != NORMAL) {
+ Throwable ex;
+ if (s == CANCELLED)
+ throw new CancellationException();
+ if (s != EXCEPTIONAL)
+ throw new TimeoutException();
+ if ((ex = getThrowableException()) != null)
+ throw new ExecutionException(ex);
+ }
+ return getRawResult();
+ }
+
+ /**
+ * Joins this task, without returning its result or throwing its
+ * exception. This method may be useful when processing
+ * collections of tasks when some have been cancelled or otherwise
+ * known to have aborted.
+ */
+ public final void quietlyJoin() {
+ doJoin();
+ }
+
+ /**
+ * Commences performing this task and awaits its completion if
+ * necessary, without returning its result or throwing its
+ * exception.
+ */
+ public final void quietlyInvoke() {
+ doInvoke();
+ }
+
+ /**
+ * Possibly executes tasks until the pool hosting the current task
+ * {@link ForkJoinPool#isQuiescent is quiescent}. This method may
+ * be of use in designs in which many tasks are forked, but none
+ * are explicitly joined, instead executing them until all are
+ * processed.
+ */
+ public static void helpQuiesce() {
+ Thread t;
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) {
+ ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
+ wt.pool.helpQuiescePool(wt.workQueue);
+ }
+ else
+ ForkJoinPool.quiesceCommonPool();
+ }
+
+ /**
+ * Resets the internal bookkeeping state of this task, allowing a
+ * subsequent {@code fork}. This method allows repeated reuse of
+ * this task, but only if reuse occurs when this task has either
+ * never been forked, or has been forked, then completed and all
+ * outstanding joins of this task have also completed. Effects
+ * under any other usage conditions are not guaranteed.
+ * This method may be useful when executing
+ * pre-constructed trees of subtasks in loops.
+ *
+ * <p>Upon completion of this method, {@code isDone()} reports
+ * {@code false}, and {@code getException()} reports {@code
+ * null}. However, the value returned by {@code getRawResult} is
+ * unaffected. To clear this value, you can invoke {@code
+ * setRawResult(null)}.
+ */
+ public void reinitialize() {
+ if ((status & DONE_MASK) == EXCEPTIONAL)
+ clearExceptionalCompletion();
+ else
+ status = 0;
+ }
+
+ /**
+ * Returns the pool hosting the current task execution, or null
+ * if this task is executing outside of any ForkJoinPool.
+ *
+ * @see #inForkJoinPool
+ * @return the pool, or {@code null} if none
+ */
+ public static ForkJoinPool getPool() {
+ Thread t = Thread.currentThread();
+ return (t instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread) t).pool : null;
+ }
+
+ /**
+ * Returns {@code true} if the current thread is a {@link
+ * ForkJoinWorkerThread} executing as a ForkJoinPool computation.
+ *
+ * @return {@code true} if the current thread is a {@link
+ * ForkJoinWorkerThread} executing as a ForkJoinPool computation,
+ * or {@code false} otherwise
+ */
+ public static boolean inForkJoinPool() {
+ return Thread.currentThread() instanceof ForkJoinWorkerThread;
+ }
+
+ /**
+ * Tries to unschedule this task for execution. This method will
+ * typically (but is not guaranteed to) succeed if this task is
+ * the most recently forked task by the current thread, and has
+ * not commenced executing in another thread. This method may be
+ * useful when arranging alternative local processing of tasks
+ * that could have been, but were not, stolen.
+ *
+ * @return {@code true} if unforked
+ */
+ public boolean tryUnfork() {
+ Thread t;
+ return (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread)t).workQueue.tryUnpush(this) :
+ ForkJoinPool.common.tryExternalUnpush(this));
+ }
+
+ /**
+ * Returns an estimate of the number of tasks that have been
+ * forked by the current worker thread but not yet executed. This
+ * value may be useful for heuristic decisions about whether to
+ * fork other tasks.
+ *
+ * @return the number of tasks
+ */
+ public static int getQueuedTaskCount() {
+ Thread t; ForkJoinPool.WorkQueue q;
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
+ q = ((ForkJoinWorkerThread)t).workQueue;
+ else
+ q = ForkJoinPool.commonSubmitterQueue();
+ return (q == null) ? 0 : q.queueSize();
+ }
+
+ /**
+ * Returns an estimate of how many more locally queued tasks are
+ * held by the current worker thread than there are other worker
+ * threads that might steal them, or zero if this thread is not
+ * operating in a ForkJoinPool. This value may be useful for
+ * heuristic decisions about whether to fork other tasks. In many
+ * usages of ForkJoinTasks, at steady state, each worker should
+ * aim to maintain a small constant surplus (for example, 3) of
+ * tasks, and to process computations locally if this threshold is
+ * exceeded.
+ *
+ * @return the surplus number of tasks, which may be negative
+ */
+ public static int getSurplusQueuedTaskCount() {
+ return ForkJoinPool.getSurplusQueuedTaskCount();
+ }
+
+ // Extension methods
+
+ /**
+ * Returns the result that would be returned by {@link #join}, even
+ * if this task completed abnormally, or {@code null} if this task
+ * is not known to have been completed. This method is designed
+ * to aid debugging, as well as to support extensions. Its use in
+ * any other context is discouraged.
+ *
+ * @return the result, or {@code null} if not completed
+ */
+ public abstract V getRawResult();
+
+ /**
+ * Forces the given value to be returned as a result. This method
+ * is designed to support extensions, and should not in general be
+ * called otherwise.
+ *
+ * @param value the value
+ */
+ protected abstract void setRawResult(V value);
+
+ /**
+ * Immediately performs the base action of this task and returns
+ * true if, upon return from this method, this task is guaranteed
+ * to have completed normally. This method may return false
+ * otherwise, to indicate that this task is not necessarily
+ * complete (or is not known to be complete), for example in
+ * asynchronous actions that require explicit invocations of
+ * completion methods. This method may also throw an (unchecked)
+ * exception to indicate abnormal exit. This method is designed to
+ * support extensions, and should not in general be called
+ * otherwise.
+ *
+ * @return {@code true} if this task is known to have completed normally
+ */
+ protected abstract boolean exec();
+
+ /**
+ * Returns, but does not unschedule or execute, a task queued by
+ * the current thread but not yet executed, if one is immediately
+ * available. There is no guarantee that this task will actually
+ * be polled or executed next. Conversely, this method may return
+ * null even if a task exists but cannot be accessed without
+ * contention with other threads. This method is designed
+ * primarily to support extensions, and is unlikely to be useful
+ * otherwise.
+ *
+ * @return the next task, or {@code null} if none are available
+ */
+ protected static ForkJoinTask<?> peekNextLocalTask() {
+ Thread t; ForkJoinPool.WorkQueue q;
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
+ q = ((ForkJoinWorkerThread)t).workQueue;
+ else
+ q = ForkJoinPool.commonSubmitterQueue();
+ return (q == null) ? null : q.peek();
+ }
+
+ /**
+ * Unschedules and returns, without executing, the next task
+ * queued by the current thread but not yet executed, if the
+ * current thread is operating in a ForkJoinPool. This method is
+ * designed primarily to support extensions, and is unlikely to be
+ * useful otherwise.
+ *
+ * @return the next task, or {@code null} if none are available
+ */
+ protected static ForkJoinTask<?> pollNextLocalTask() {
+ Thread t;
+ return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread)t).workQueue.nextLocalTask() :
+ null;
+ }
+
+ /**
+ * If the current thread is operating in a ForkJoinPool,
+ * unschedules and returns, without executing, the next task
+ * queued by the current thread but not yet executed, if one is
+ * available, or if not available, a task that was forked by some
+ * other thread, if available. Availability may be transient, so a
+ * {@code null} result does not necessarily imply quiescence of
+ * the pool this task is operating in. This method is designed
+ * primarily to support extensions, and is unlikely to be useful
+ * otherwise.
+ *
+ * @return a task, or {@code null} if none are available
+ */
+ protected static ForkJoinTask<?> pollTask() {
+ Thread t; ForkJoinWorkerThread wt;
+ return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ (wt = (ForkJoinWorkerThread)t).pool.nextTaskFor(wt.workQueue) :
+ null;
+ }
+
+ // tag operations
+
+ /**
+ * Returns the tag for this task.
+ *
+ * @return the tag for this task
+ * @since 1.8
+ */
+ public final short getForkJoinTaskTag() {
+ return (short)status;
+ }
+
+ /**
+ * Atomically sets the tag value for this task.
+ *
+ * @param tag the tag value
+ * @return the previous value of the tag
+ * @since 1.8
+ */
+ public final short setForkJoinTaskTag(short tag) {
+ for (int s;;) {
+ if (U.compareAndSwapInt(this, STATUS, s = status,
+ (s & ~SMASK) | (tag & SMASK)))
+ return (short)s;
+ }
+ }
+
+ /**
+ * Atomically conditionally sets the tag value for this task.
+ * Among other applications, tags can be used as visit markers
+ * in tasks operating on graphs, as in methods that check: {@code
+ * if (task.compareAndSetForkJoinTaskTag((short)0, (short)1))}
+ * before processing, otherwise exiting because the node has
+ * already been visited.
+ *
+ * @param e the expected tag value
+ * @param tag the new tag value
+ * @return {@code true} if successful; i.e., the current value was
+ * equal to e and is now tag.
+ * @since 1.8
+ */
+ public final boolean compareAndSetForkJoinTaskTag(short e, short tag) {
+ for (int s;;) {
+ if ((short)(s = status) != e)
+ return false;
+ if (U.compareAndSwapInt(this, STATUS, s,
+ (s & ~SMASK) | (tag & SMASK)))
+ return true;
+ }
+ }
+
+ /**
+ * Adaptor for Runnables. This implements RunnableFuture
+ * to be compliant with AbstractExecutorService constraints
+ * when used in ForkJoinPool.
+ */
+ static final class AdaptedRunnable<T> extends ForkJoinTask<T>
+ implements RunnableFuture<T> {
+ final Runnable runnable;
+ T result;
+ AdaptedRunnable(Runnable runnable, T result) {
+ if (runnable == null) throw new NullPointerException();
+ this.runnable = runnable;
+ this.result = result; // OK to set this even before completion
+ }
+ public final T getRawResult() { return result; }
+ public final void setRawResult(T v) { result = v; }
+ public final boolean exec() { runnable.run(); return true; }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /**
+ * Adaptor for Runnables without results
+ */
+ static final class AdaptedRunnableAction extends ForkJoinTask<Void>
+ implements RunnableFuture<Void> {
+ final Runnable runnable;
+ AdaptedRunnableAction(Runnable runnable) {
+ if (runnable == null) throw new NullPointerException();
+ this.runnable = runnable;
+ }
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void v) { }
+ public final boolean exec() { runnable.run(); return true; }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /**
+ * Adaptor for Runnables in which failure forces worker exception
+ */
+ static final class RunnableExecuteAction extends ForkJoinTask<Void> {
+ final Runnable runnable;
+ RunnableExecuteAction(Runnable runnable) {
+ if (runnable == null) throw new NullPointerException();
+ this.runnable = runnable;
+ }
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void v) { }
+ public final boolean exec() { runnable.run(); return true; }
+ void internalPropagateException(Throwable ex) {
+ rethrow(ex); // rethrow outside exec() catches.
+ }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /**
+ * Adaptor for Callables
+ */
+ static final class AdaptedCallable<T> extends ForkJoinTask<T>
+ implements RunnableFuture<T> {
+ final Callable<? extends T> callable;
+ T result;
+ AdaptedCallable(Callable<? extends T> callable) {
+ if (callable == null) throw new NullPointerException();
+ this.callable = callable;
+ }
+ public final T getRawResult() { return result; }
+ public final void setRawResult(T v) { result = v; }
+ public final boolean exec() {
+ try {
+ result = callable.call();
+ return true;
+ } catch (Error err) {
+ throw err;
+ } catch (RuntimeException rex) {
+ throw rex;
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 2838392045355241008L;
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code run}
+ * method of the given {@code Runnable} as its action, and returns
+ * a null result upon {@link #join}.
+ *
+ * @param runnable the runnable action
+ * @return the task
+ */
+ public static ForkJoinTask<?> adapt(Runnable runnable) {
+ return new AdaptedRunnableAction(runnable);
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code run}
+ * method of the given {@code Runnable} as its action, and returns
+ * the given result upon {@link #join}.
+ *
+ * @param runnable the runnable action
+ * @param result the result upon completion
+ * @param <T> the type of the result
+ * @return the task
+ */
+ public static <T> ForkJoinTask<T> adapt(Runnable runnable, T result) {
+ return new AdaptedRunnable<T>(runnable, result);
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code call}
+ * method of the given {@code Callable} as its action, and returns
+ * its result upon {@link #join}, translating any checked exceptions
+ * encountered into {@code RuntimeException}.
+ *
+ * @param callable the callable action
+ * @param <T> the type of the callable's result
+ * @return the task
+ */
+ public static <T> ForkJoinTask<T> adapt(Callable<? extends T> callable) {
+ return new AdaptedCallable<T>(callable);
+ }
+
+ // Serialization support
+
+ private static final long serialVersionUID = -7721805057305804111L;
+
+ /**
+ * Saves this task to a stream (that is, serializes it).
+ *
+ * @param s the stream
+ * @throws java.io.IOException if an I/O error occurs
+ * @serialData the current run status and the exception thrown
+ * during execution, or {@code null} if none
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+ s.writeObject(getException());
+ }
+
+ /**
+ * Reconstitutes this task from a stream (that is, deserializes it).
+ * @param s the stream
+ * @throws ClassNotFoundException if the class of a serialized object
+ * could not be found
+ * @throws java.io.IOException if an I/O error occurs
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ Object ex = s.readObject();
+ if (ex != null)
+ setExceptionalCompletion((Throwable)ex);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long STATUS;
+
+ static {
+ exceptionTableLock = new ReentrantLock();
+ exceptionTableRefQueue = new ReferenceQueue<Object>();
+ exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY];
+ try {
+ U = getUnsafe();
+ Class<?> k = ForkJoinTask.class;
+ STATUS = U.objectFieldOffset
+ (k.getDeclaredField("status"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/ForkJoinWorkerThread.java b/src/main/java/jsr166e/ForkJoinWorkerThread.java
new file mode 100644
index 0000000..dc32aa6
--- /dev/null
+++ b/src/main/java/jsr166e/ForkJoinWorkerThread.java
@@ -0,0 +1,123 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+
+
+/**
+ * A thread managed by a {@link ForkJoinPool}, which executes
+ * {@link ForkJoinTask}s.
+ * This class is subclassable solely for the sake of adding
+ * functionality -- there are no overridable methods dealing with
+ * scheduling or execution. However, you can override initialization
+ * and termination methods surrounding the main task processing loop.
+ * If you do create such a subclass, you will also need to supply a
+ * custom {@link ForkJoinPool.ForkJoinWorkerThreadFactory} to
+ * {@linkplain ForkJoinPool#ForkJoinPool use it} in a {@code ForkJoinPool}.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public class ForkJoinWorkerThread extends Thread {
+ /*
+ * ForkJoinWorkerThreads are managed by ForkJoinPools and perform
+ * ForkJoinTasks. For explanation, see the internal documentation
+ * of class ForkJoinPool.
+ *
+ * This class just maintains links to its pool and WorkQueue. The
+ * pool field is set immediately upon construction, but the
+ * workQueue field is not set until a call to registerWorker
+ * completes. This leads to a visibility race, that is tolerated
+ * by requiring that the workQueue field is only accessed by the
+ * owning thread.
+ */
+
+ final ForkJoinPool pool; // the pool this thread works in
+ final ForkJoinPool.WorkQueue workQueue; // work-stealing mechanics
+
+ /**
+ * Creates a ForkJoinWorkerThread operating in the given pool.
+ *
+ * @param pool the pool this thread works in
+ * @throws NullPointerException if pool is null
+ */
+ protected ForkJoinWorkerThread(ForkJoinPool pool) {
+ // Use a placeholder until a useful name can be set in registerWorker
+ super("aForkJoinWorkerThread");
+ this.pool = pool;
+ this.workQueue = pool.registerWorker(this);
+ }
+
+ /**
+ * Returns the pool hosting this thread.
+ *
+ * @return the pool
+ */
+ public ForkJoinPool getPool() {
+ return pool;
+ }
+
+ /**
+ * Returns the unique index number of this thread in its pool.
+ * The returned value ranges from zero to the maximum number of
+ * threads (minus one) that may exist in the pool, and does not
+ * change during the lifetime of the thread. This method may be
+ * useful for applications that track status or collect results
+ * per-worker-thread rather than per-task.
+ *
+ * @return the index number
+ */
+ public int getPoolIndex() {
+ return workQueue.poolIndex >>> 1; // ignore odd/even tag bit
+ }
+
+ /**
+ * Initializes internal state after construction but before
+ * processing any tasks. If you override this method, you must
+ * invoke {@code super.onStart()} at the beginning of the method.
+ * Initialization requires care: Most fields must have legal
+ * default values, to ensure that attempted accesses from other
+ * threads work correctly even before this thread starts
+ * processing tasks.
+ */
+ protected void onStart() {
+ }
+
+ /**
+ * Performs cleanup associated with termination of this worker
+ * thread. If you override this method, you must invoke
+ * {@code super.onTermination} at the end of the overridden method.
+ *
+ * @param exception the exception causing this thread to abort due
+ * to an unrecoverable error, or {@code null} if completed normally
+ */
+ protected void onTermination(Throwable exception) {
+ }
+
+ /**
+ * This method is required to be public, but should never be
+ * called explicitly. It performs the main run loop to execute
+ * {@link ForkJoinTask}s.
+ */
+ public void run() {
+ Throwable exception = null;
+ try {
+ onStart();
+ pool.runWorker(workQueue);
+ } catch (Throwable ex) {
+ exception = ex;
+ } finally {
+ try {
+ onTermination(exception);
+ } catch (Throwable ex) {
+ if (exception == null)
+ exception = ex;
+ } finally {
+ pool.deregisterWorker(this, exception);
+ }
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/LongAdder.java b/src/main/java/jsr166e/LongAdder.java
new file mode 100644
index 0000000..34ed76e
--- /dev/null
+++ b/src/main/java/jsr166e/LongAdder.java
@@ -0,0 +1,199 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+import java.util.concurrent.atomic.AtomicLong;
+import java.io.Serializable;
+
+/**
+ * One or more variables that together maintain an initially zero
+ * {@code long} sum. When updates (method {@link #add}) are contended
+ * across threads, the set of variables may grow dynamically to reduce
+ * contention. Method {@link #sum} (or, equivalently, {@link
+ * #longValue}) returns the current total combined across the
+ * variables maintaining the sum.
+ *
+ * <p>This class is usually preferable to {@link AtomicLong} when
+ * multiple threads update a common sum that is used for purposes such
+ * as collecting statistics, not for fine-grained synchronization
+ * control. Under low update contention, the two classes have similar
+ * characteristics. But under high contention, expected throughput of
+ * this class is significantly higher, at the expense of higher space
+ * consumption.
+ *
+ * <p>This class extends {@link Number}, but does <em>not</em> define
+ * methods such as {@code equals}, {@code hashCode} and {@code
+ * compareTo} because instances are expected to be mutated, and so are
+ * not useful as collection keys.
+ *
+ * <p><em>jsr166e note: This class is targeted to be placed in
+ * java.util.concurrent.atomic.</em>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class LongAdder extends Striped64 implements Serializable {
+ private static final long serialVersionUID = 7249069246863182397L;
+
+ /**
+ * Version of plus for use in retryUpdate
+ */
+ final long fn(long v, long x) { return v + x; }
+
+ /**
+ * Creates a new adder with initial sum of zero.
+ */
+ public LongAdder() {
+ }
+
+ /**
+ * Adds the given value.
+ *
+ * @param x the value to add
+ */
+ public void add(long x) {
+ Cell[] as; long b, v; HashCode hc; Cell a; int n;
+ if ((as = cells) != null || !casBase(b = base, b + x)) {
+ boolean uncontended = true;
+ int h = (hc = threadHashCode.get()).code;
+ if (as == null || (n = as.length) < 1 ||
+ (a = as[(n - 1) & h]) == null ||
+ !(uncontended = a.cas(v = a.value, v + x)))
+ retryUpdate(x, hc, uncontended);
+ }
+ }
+
+ /**
+ * Equivalent to {@code add(1)}.
+ */
+ public void increment() {
+ add(1L);
+ }
+
+ /**
+ * Equivalent to {@code add(-1)}.
+ */
+ public void decrement() {
+ add(-1L);
+ }
+
+ /**
+ * Returns the current sum. The returned value is <em>NOT</em> an
+ * atomic snapshot; invocation in the absence of concurrent
+ * updates returns an accurate result, but concurrent updates that
+ * occur while the sum is being calculated might not be
+ * incorporated.
+ *
+ * @return the sum
+ */
+ public long sum() {
+ long sum = base;
+ Cell[] as = cells;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null)
+ sum += a.value;
+ }
+ }
+ return sum;
+ }
+
+ /**
+ * Resets variables maintaining the sum to zero. This method may
+ * be a useful alternative to creating a new adder, but is only
+ * effective if there are no concurrent updates. Because this
+ * method is intrinsically racy, it should only be used when it is
+ * known that no threads are concurrently updating.
+ */
+ public void reset() {
+ internalReset(0L);
+ }
+
+ /**
+ * Equivalent in effect to {@link #sum} followed by {@link
+ * #reset}. This method may apply for example during quiescent
+ * points between multithreaded computations. If there are
+ * updates concurrent with this method, the returned value is
+ * <em>not</em> guaranteed to be the final value occurring before
+ * the reset.
+ *
+ * @return the sum
+ */
+ public long sumThenReset() {
+ long sum = base;
+ Cell[] as = cells;
+ base = 0L;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null) {
+ sum += a.value;
+ a.value = 0L;
+ }
+ }
+ }
+ return sum;
+ }
+
+ /**
+ * Returns the String representation of the {@link #sum}.
+ * @return the String representation of the {@link #sum}
+ */
+ public String toString() {
+ return Long.toString(sum());
+ }
+
+ /**
+ * Equivalent to {@link #sum}.
+ *
+ * @return the sum
+ */
+ public long longValue() {
+ return sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as an {@code int} after a narrowing
+ * primitive conversion.
+ */
+ public int intValue() {
+ return (int)sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as a {@code float}
+ * after a widening primitive conversion.
+ */
+ public float floatValue() {
+ return (float)sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as a {@code double} after a widening
+ * primitive conversion.
+ */
+ public double doubleValue() {
+ return (double)sum();
+ }
+
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+ s.writeLong(sum());
+ }
+
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ busy = 0;
+ cells = null;
+ base = s.readLong();
+ }
+
+}
diff --git a/src/main/java/jsr166e/LongAdderTable.java b/src/main/java/jsr166e/LongAdderTable.java
new file mode 100644
index 0000000..9e24a64
--- /dev/null
+++ b/src/main/java/jsr166e/LongAdderTable.java
@@ -0,0 +1,189 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+import jsr166e.LongAdder;
+import java.util.Map;
+import java.util.Set;
+import java.io.Serializable;
+
+/**
+ * A keyed table of adders, that may be useful in computing frequency
+ * counts and histograms, or may be used as a form of multiset. A
+ * {@link LongAdder} is associated with each key. Keys are added to
+ * the table implicitly upon any attempt to update, or may be added
+ * explicitly using method {@link #install}.
+ *
+ * <p><em>jsr166e note: This class is targeted to be placed in
+ * java.util.concurrent.atomic.</em>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class LongAdderTable<K> implements Serializable {
+ /** Relies on default serialization */
+ private static final long serialVersionUID = 7249369246863182397L;
+
+ /** The underlying map */
+ private final ConcurrentHashMapV8<K, LongAdder> map;
+
+ static final class CreateAdder
+ implements ConcurrentHashMapV8.Fun<Object, LongAdder> {
+ public LongAdder apply(Object unused) { return new LongAdder(); }
+ }
+
+ private static final CreateAdder createAdder = new CreateAdder();
+
+ /**
+ * Creates a new empty table.
+ */
+ public LongAdderTable() {
+ map = new ConcurrentHashMapV8<K, LongAdder>();
+ }
+
+ /**
+ * If the given key does not already exist in the table, inserts
+ * the key with initial sum of zero; in either case returning the
+ * adder associated with this key.
+ *
+ * @param key the key
+ * @return the adder associated with the key
+ */
+ public LongAdder install(K key) {
+ return map.computeIfAbsent(key, createAdder);
+ }
+
+ /**
+ * Adds the given value to the sum associated with the given
+ * key. If the key does not already exist in the table, it is
+ * inserted.
+ *
+ * @param key the key
+ * @param x the value to add
+ */
+ public void add(K key, long x) {
+ map.computeIfAbsent(key, createAdder).add(x);
+ }
+
+ /**
+ * Increments the sum associated with the given key. If the key
+ * does not already exist in the table, it is inserted.
+ *
+ * @param key the key
+ */
+ public void increment(K key) { add(key, 1L); }
+
+ /**
+ * Decrements the sum associated with the given key. If the key
+ * does not already exist in the table, it is inserted.
+ *
+ * @param key the key
+ */
+ public void decrement(K key) { add(key, -1L); }
+
+ /**
+ * Returns the sum associated with the given key, or zero if the
+ * key does not currently exist in the table.
+ *
+ * @param key the key
+ * @return the sum associated with the key, or zero if the key is
+ * not in the table
+ */
+ public long sum(K key) {
+ LongAdder a = map.get(key);
+ return a == null ? 0L : a.sum();
+ }
+
+ /**
+ * Resets the sum associated with the given key to zero if the key
+ * exists in the table. This method does <em>NOT</em> add or
+ * remove the key from the table (see {@link #remove}).
+ *
+ * @param key the key
+ */
+ public void reset(K key) {
+ LongAdder a = map.get(key);
+ if (a != null)
+ a.reset();
+ }
+
+ /**
+ * Resets the sum associated with the given key to zero if the key
+ * exists in the table. This method does <em>NOT</em> add or
+ * remove the key from the table (see {@link #remove}).
+ *
+ * @param key the key
+ * @return the previous sum, or zero if the key is not
+ * in the table
+ */
+ public long sumThenReset(K key) {
+ LongAdder a = map.get(key);
+ return a == null ? 0L : a.sumThenReset();
+ }
+
+ /**
+ * Returns the sum totalled across all keys.
+ *
+ * @return the sum totalled across all keys
+ */
+ public long sumAll() {
+ long sum = 0L;
+ for (LongAdder a : map.values())
+ sum += a.sum();
+ return sum;
+ }
+
+ /**
+ * Resets the sum associated with each key to zero.
+ */
+ public void resetAll() {
+ for (LongAdder a : map.values())
+ a.reset();
+ }
+
+ /**
+ * Totals, then resets, the sums associated with all keys.
+ *
+ * @return the sum totalled across all keys
+ */
+ public long sumThenResetAll() {
+ long sum = 0L;
+ for (LongAdder a : map.values())
+ sum += a.sumThenReset();
+ return sum;
+ }
+
+ /**
+ * Removes the given key from the table.
+ *
+ * @param key the key
+ */
+ public void remove(K key) { map.remove(key); }
+
+ /**
+ * Removes all keys from the table.
+ */
+ public void removeAll() { map.clear(); }
+
+ /**
+ * Returns the current set of keys.
+ *
+ * @return the current set of keys
+ */
+ public Set<K> keySet() {
+ return map.keySet();
+ }
+
+ /**
+ * Returns the current set of key-value mappings.
+ *
+ * @return the current set of key-value mappings
+ */
+ public Set<Map.Entry<K,LongAdder>> entrySet() {
+ return map.entrySet();
+ }
+
+}
diff --git a/src/main/java/jsr166e/LongMaxUpdater.java b/src/main/java/jsr166e/LongMaxUpdater.java
new file mode 100644
index 0000000..119cfef
--- /dev/null
+++ b/src/main/java/jsr166e/LongMaxUpdater.java
@@ -0,0 +1,183 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+import java.io.Serializable;
+
+/**
+ * One or more variables that together maintain a running {@code long}
+ * maximum with initial value {@code Long.MIN_VALUE}. When updates
+ * (method {@link #update}) are contended across threads, the set of
+ * variables may grow dynamically to reduce contention. Method {@link
+ * #max} (or, equivalently, {@link #longValue}) returns the current
+ * maximum across the variables maintaining updates.
+ *
+ * <p>This class extends {@link Number}, but does <em>not</em> define
+ * methods such as {@code equals}, {@code hashCode} and {@code
+ * compareTo} because instances are expected to be mutated, and so are
+ * not useful as collection keys.
+ *
+ * <p><em>jsr166e note: This class is targeted to be placed in
+ * java.util.concurrent.atomic.</em>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class LongMaxUpdater extends Striped64 implements Serializable {
+ private static final long serialVersionUID = 7249069246863182397L;
+
+ /**
+ * Version of max for use in retryUpdate
+ */
+ final long fn(long v, long x) { return v > x ? v : x; }
+
+ /**
+ * Creates a new instance with initial maximum of {@code
+ * Long.MIN_VALUE}.
+ */
+ public LongMaxUpdater() {
+ base = Long.MIN_VALUE;
+ }
+
+ /**
+ * Updates the maximum to be at least the given value.
+ *
+ * @param x the value to update
+ */
+ public void update(long x) {
+ Cell[] as; long b, v; HashCode hc; Cell a; int n;
+ if ((as = cells) != null ||
+ (b = base) < x && !casBase(b, x)) {
+ boolean uncontended = true;
+ int h = (hc = threadHashCode.get()).code;
+ if (as == null || (n = as.length) < 1 ||
+ (a = as[(n - 1) & h]) == null ||
+ ((v = a.value) < x && !(uncontended = a.cas(v, x))))
+ retryUpdate(x, hc, uncontended);
+ }
+ }
+
+ /**
+ * Returns the current maximum. The returned value is
+ * <em>NOT</em> an atomic snapshot; invocation in the absence of
+ * concurrent updates returns an accurate result, but concurrent
+ * updates that occur while the value is being calculated might
+ * not be incorporated.
+ *
+ * @return the maximum
+ */
+ public long max() {
+ Cell[] as = cells;
+ long max = base;
+ if (as != null) {
+ int n = as.length;
+ long v;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null && (v = a.value) > max)
+ max = v;
+ }
+ }
+ return max;
+ }
+
+ /**
+ * Resets variables maintaining updates to {@code Long.MIN_VALUE}.
+ * This method may be a useful alternative to creating a new
+ * updater, but is only effective if there are no concurrent
+ * updates. Because this method is intrinsically racy, it should
+ * only be used when it is known that no threads are concurrently
+ * updating.
+ */
+ public void reset() {
+ internalReset(Long.MIN_VALUE);
+ }
+
+ /**
+ * Equivalent in effect to {@link #max} followed by {@link
+ * #reset}. This method may apply for example during quiescent
+ * points between multithreaded computations. If there are
+ * updates concurrent with this method, the returned value is
+ * <em>not</em> guaranteed to be the final value occurring before
+ * the reset.
+ *
+ * @return the maximum
+ */
+ public long maxThenReset() {
+ Cell[] as = cells;
+ long max = base;
+ base = Long.MIN_VALUE;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null) {
+ long v = a.value;
+ a.value = Long.MIN_VALUE;
+ if (v > max)
+ max = v;
+ }
+ }
+ }
+ return max;
+ }
+
+ /**
+ * Returns the String representation of the {@link #max}.
+ * @return the String representation of the {@link #max}
+ */
+ public String toString() {
+ return Long.toString(max());
+ }
+
+ /**
+ * Equivalent to {@link #max}.
+ *
+ * @return the maximum
+ */
+ public long longValue() {
+ return max();
+ }
+
+ /**
+ * Returns the {@link #max} as an {@code int} after a narrowing
+ * primitive conversion.
+ */
+ public int intValue() {
+ return (int)max();
+ }
+
+ /**
+ * Returns the {@link #max} as a {@code float}
+ * after a widening primitive conversion.
+ */
+ public float floatValue() {
+ return (float)max();
+ }
+
+ /**
+ * Returns the {@link #max} as a {@code double} after a widening
+ * primitive conversion.
+ */
+ public double doubleValue() {
+ return (double)max();
+ }
+
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+ s.writeLong(max());
+ }
+
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ busy = 0;
+ cells = null;
+ base = s.readLong();
+ }
+
+}
diff --git a/src/main/java/jsr166e/RecursiveAction.java b/src/main/java/jsr166e/RecursiveAction.java
new file mode 100644
index 0000000..14cebbc
--- /dev/null
+++ b/src/main/java/jsr166e/RecursiveAction.java
@@ -0,0 +1,164 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+
+/**
+ * A recursive resultless {@link ForkJoinTask}. This class
+ * establishes conventions to parameterize resultless actions as
+ * {@code Void} {@code ForkJoinTask}s. Because {@code null} is the
+ * only valid value of type {@code Void}, methods such as {@code join}
+ * always return {@code null} upon completion.
+ *
+ * <p><b>Sample Usages.</b> Here is a simple but complete ForkJoin
+ * sort that sorts a given {@code long[]} array:
+ *
+ * <pre> {@code
+ * static class SortTask extends RecursiveAction {
+ * final long[] array; final int lo, hi;
+ * SortTask(long[] array, int lo, int hi) {
+ * this.array = array; this.lo = lo; this.hi = hi;
+ * }
+ * SortTask(long[] array) { this(array, 0, array.length); }
+ * protected void compute() {
+ * if (hi - lo < THRESHOLD)
+ * sortSequentially(lo, hi);
+ * else {
+ * int mid = (lo + hi) >>> 1;
+ * invokeAll(new SortTask(array, lo, mid),
+ * new SortTask(array, mid, hi));
+ * merge(lo, mid, hi);
+ * }
+ * }
+ * // implementation details follow:
+ * static final int THRESHOLD = 1000;
+ * void sortSequentially(int lo, int hi) {
+ * Arrays.sort(array, lo, hi);
+ * }
+ * void merge(int lo, int mid, int hi) {
+ * long[] buf = Arrays.copyOfRange(array, lo, mid);
+ * for (int i = 0, j = lo, k = mid; i < buf.length; j++)
+ * array[j] = (k == hi || buf[i] < array[k]) ?
+ * buf[i++] : array[k++];
+ * }
+ * }}</pre>
+ *
+ * You could then sort {@code anArray} by creating {@code new
+ * SortTask(anArray)} and invoking it in a ForkJoinPool. As a more
+ * concrete simple example, the following task increments each element
+ * of an array:
+ * <pre> {@code
+ * class IncrementTask extends RecursiveAction {
+ * final long[] array; final int lo, hi;
+ * IncrementTask(long[] array, int lo, int hi) {
+ * this.array = array; this.lo = lo; this.hi = hi;
+ * }
+ * protected void compute() {
+ * if (hi - lo < THRESHOLD) {
+ * for (int i = lo; i < hi; ++i)
+ * array[i]++;
+ * }
+ * else {
+ * int mid = (lo + hi) >>> 1;
+ * invokeAll(new IncrementTask(array, lo, mid),
+ * new IncrementTask(array, mid, hi));
+ * }
+ * }
+ * }}</pre>
+ *
+ * <p>The following example illustrates some refinements and idioms
+ * that may lead to better performance: RecursiveActions need not be
+ * fully recursive, so long as they maintain the basic
+ * divide-and-conquer approach. Here is a class that sums the squares
+ * of each element of a double array, by subdividing out only the
+ * right-hand-sides of repeated divisions by two, and keeping track of
+ * them with a chain of {@code next} references. It uses a dynamic
+ * threshold based on method {@code getSurplusQueuedTaskCount}, but
+ * counterbalances potential excess partitioning by directly
+ * performing leaf actions on unstolen tasks rather than further
+ * subdividing.
+ *
+ * <pre> {@code
+ * double sumOfSquares(ForkJoinPool pool, double[] array) {
+ * int n = array.length;
+ * Applyer a = new Applyer(array, 0, n, null);
+ * pool.invoke(a);
+ * return a.result;
+ * }
+ *
+ * class Applyer extends RecursiveAction {
+ * final double[] array;
+ * final int lo, hi;
+ * double result;
+ * Applyer next; // keeps track of right-hand-side tasks
+ * Applyer(double[] array, int lo, int hi, Applyer next) {
+ * this.array = array; this.lo = lo; this.hi = hi;
+ * this.next = next;
+ * }
+ *
+ * double atLeaf(int l, int h) {
+ * double sum = 0;
+ * for (int i = l; i < h; ++i) // perform leftmost base step
+ * sum += array[i] * array[i];
+ * return sum;
+ * }
+ *
+ * protected void compute() {
+ * int l = lo;
+ * int h = hi;
+ * Applyer right = null;
+ * while (h - l > 1 && getSurplusQueuedTaskCount() <= 3) {
+ * int mid = (l + h) >>> 1;
+ * right = new Applyer(array, mid, h, right);
+ * right.fork();
+ * h = mid;
+ * }
+ * double sum = atLeaf(l, h);
+ * while (right != null) {
+ * if (right.tryUnfork()) // directly calculate if not stolen
+ * sum += right.atLeaf(right.lo, right.hi);
+ * else {
+ * right.join();
+ * sum += right.result;
+ * }
+ * right = right.next;
+ * }
+ * result = sum;
+ * }
+ * }}</pre>
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public abstract class RecursiveAction extends ForkJoinTask<Void> {
+ private static final long serialVersionUID = 5232453952276485070L;
+
+ /**
+ * The main computation performed by this task.
+ */
+ protected abstract void compute();
+
+ /**
+ * Always returns {@code null}.
+ *
+ * @return {@code null} always
+ */
+ public final Void getRawResult() { return null; }
+
+ /**
+ * Requires null completion value.
+ */
+ protected final void setRawResult(Void mustBeNull) { }
+
+ /**
+ * Implements execution conventions for RecursiveActions.
+ */
+ protected final boolean exec() {
+ compute();
+ return true;
+ }
+
+}
diff --git a/src/main/java/jsr166e/RecursiveTask.java b/src/main/java/jsr166e/RecursiveTask.java
new file mode 100644
index 0000000..263364a
--- /dev/null
+++ b/src/main/java/jsr166e/RecursiveTask.java
@@ -0,0 +1,69 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+
+/**
+ * A recursive result-bearing {@link ForkJoinTask}.
+ *
+ * <p>For a classic example, here is a task computing Fibonacci numbers:
+ *
+ * <pre> {@code
+ * class Fibonacci extends RecursiveTask<Integer> {
+ * final int n;
+ * Fibonacci(int n) { this.n = n; }
+ * protected Integer compute() {
+ * if (n <= 1)
+ * return n;
+ * Fibonacci f1 = new Fibonacci(n - 1);
+ * f1.fork();
+ * Fibonacci f2 = new Fibonacci(n - 2);
+ * return f2.compute() + f1.join();
+ * }
+ * }}</pre>
+ *
+ * However, besides being a dumb way to compute Fibonacci functions
+ * (there is a simple fast linear algorithm that you'd use in
+ * practice), this is likely to perform poorly because the smallest
+ * subtasks are too small to be worthwhile splitting up. Instead, as
+ * is the case for nearly all fork/join applications, you'd pick some
+ * minimum granularity size (for example 10 here) for which you always
+ * sequentially solve rather than subdividing.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public abstract class RecursiveTask<V> extends ForkJoinTask<V> {
+ private static final long serialVersionUID = 5232453952276485270L;
+
+ /**
+ * The result of the computation.
+ */
+ V result;
+
+ /**
+ * The main computation performed by this task.
+ * @return the result of the computation
+ */
+ protected abstract V compute();
+
+ public final V getRawResult() {
+ return result;
+ }
+
+ protected final void setRawResult(V value) {
+ result = value;
+ }
+
+ /**
+ * Implements execution conventions for RecursiveTask.
+ */
+ protected final boolean exec() {
+ result = compute();
+ return true;
+ }
+
+}
diff --git a/src/main/java/jsr166e/StampedLock.java b/src/main/java/jsr166e/StampedLock.java
new file mode 100644
index 0000000..45d873a
--- /dev/null
+++ b/src/main/java/jsr166e/StampedLock.java
@@ -0,0 +1,1419 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+import jsr166y.ThreadLocalRandom;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.LockSupport;
+
+/**
+ * A capability-based lock with three modes for controlling read/write
+ * access. The state of a StampedLock consists of a version and mode.
+ * Lock acquisition methods return a stamp that represents and
+ * controls access with respect to a lock state; "try" versions of
+ * these methods may instead return the special value zero to
+ * represent failure to acquire access. Lock release and conversion
+ * methods require stamps as arguments, and fail if they do not match
+ * the state of the lock. The three modes are:
+ *
+ * <ul>
+ *
+ * <li><b>Writing.</b> Method {@link #writeLock} possibly blocks
+ * waiting for exclusive access, returning a stamp that can be used
+ * in method {@link #unlockWrite} to release the lock. Untimed and
+ * timed versions of {@code tryWriteLock} are also provided. When
+ * the lock is held in write mode, no read locks may be obtained,
+ * and all optimistic read validations will fail. </li>
+ *
+ * <li><b>Reading.</b> Method {@link #readLock} possibly blocks
+ * waiting for non-exclusive access, returning a stamp that can be
+ * used in method {@link #unlockRead} to release the lock. Untimed
+ * and timed versions of {@code tryReadLock} are also provided. </li>
+ *
+ * <li><b>Optimistic Reading.</b> Method {@link #tryOptimisticRead}
+ * returns a non-zero stamp only if the lock is not currently held
+ * in write mode. Method {@link #validate} returns true if the lock
+ * has not been acquired in write mode since obtaining a given
+ * stamp. This mode can be thought of as an extremely weak version
+ * of a read-lock, that can be broken by a writer at any time. The
+ * use of optimistic mode for short read-only code segments often
+ * reduces contention and improves throughput. However, its use is
+ * inherently fragile. Optimistic read sections should only read
+ * fields and hold them in local variables for later use after
+ * validation. Fields read while in optimistic mode may be wildly
+ * inconsistent, so usage applies only when you are familiar enough
+ * with data representations to check consistency and/or repeatedly
+ * invoke method {@code validate()}. For example, such steps are
+ * typically required when first reading an object or array
+ * reference, and then accessing one of its fields, elements or
+ * methods. </li>
+ *
+ * </ul>
+ *
+ * <p>This class also supports methods that conditionally provide
+ * conversions across the three modes. For example, method {@link
+ * #tryConvertToWriteLock} attempts to "upgrade" a mode, returning
+ * a valid write stamp if (1) already in writing mode (2) in reading
+ * mode and there are no other readers or (3) in optimistic mode and
+ * the lock is available. The forms of these methods are designed to
+ * help reduce some of the code bloat that otherwise occurs in
+ * retry-based designs.
+ *
+ * <p>StampedLocks are designed for use as internal utilities in the
+ * development of thread-safe components. Their use relies on
+ * knowledge of the internal properties of the data, objects, and
+ * methods they are protecting. They are not reentrant, so locked
+ * bodies should not call other unknown methods that may try to
+ * re-acquire locks (although you may pass a stamp to other methods
+ * that can use or convert it). The use of read lock modes relies on
+ * the associated code sections being side-effect-free. Unvalidated
+ * optimistic read sections cannot call methods that are not known to
+ * tolerate potential inconsistencies. Stamps use finite
+ * representations, and are not cryptographically secure (i.e., a
+ * valid stamp may be guessable). Stamp values may recycle after (no
+ * sooner than) one year of continuous operation. A stamp held without
+ * use or validation for longer than this period may fail to validate
+ * correctly. StampedLocks are serializable, but always deserialize
+ * into initial unlocked state, so they are not useful for remote
+ * locking.
+ *
+ * <p>The scheduling policy of StampedLock does not consistently
+ * prefer readers over writers or vice versa. All "try" methods are
+ * best-effort and do not necessarily conform to any scheduling or
+ * fairness policy. A zero return from any "try" method for acquiring
+ * or converting locks does not carry any information about the state
+ * of the lock; a subsequent invocation may succeed.
+ *
+ * <p>Because it supports coordinated usage across multiple lock
+ * modes, this class does not directly implement the {@link Lock} or
+ * {@link ReadWriteLock} interfaces. However, a StampedLock may be
+ * viewed {@link #asReadLock()}, {@link #asWriteLock()}, or {@link
+ * #asReadWriteLock()} in applications requiring only the associated
+ * set of functionality.
+ *
+ * <p><b>Sample Usage.</b> The following illustrates some usage idioms
+ * in a class that maintains simple two-dimensional points. The sample
+ * code illustrates some try/catch conventions even though they are
+ * not strictly needed here because no exceptions can occur in their
+ * bodies.<br>
+ *
+ * <pre>{@code
+ * class Point {
+ * private double x, y;
+ * private final StampedLock sl = new StampedLock();
+ *
+ * void move(double deltaX, double deltaY) { // an exclusively locked method
+ * long stamp = sl.writeLock();
+ * try {
+ * x += deltaX;
+ * y += deltaY;
+ * } finally {
+ * sl.unlockWrite(stamp);
+ * }
+ * }
+ *
+ * double distanceFromOrigin() { // A read-only method
+ * long stamp = sl.tryOptimisticRead();
+ * double currentX = x, currentY = y;
+ * if (!sl.validate(stamp)) {
+ * stamp = sl.readLock();
+ * try {
+ * currentX = x;
+ * currentY = y;
+ * } finally {
+ * sl.unlockRead(stamp);
+ * }
+ * }
+ * return Math.sqrt(currentX * currentX + currentY * currentY);
+ * }
+ *
+ * void moveIfAtOrigin(double newX, double newY) { // upgrade
+ * // Could instead start with optimistic, not read mode
+ * long stamp = sl.readLock();
+ * try {
+ * while (x == 0.0 && y == 0.0) {
+ * long ws = sl.tryConvertToWriteLock(stamp);
+ * if (ws != 0L) {
+ * stamp = ws;
+ * x = newX;
+ * y = newY;
+ * break;
+ * }
+ * else {
+ * sl.unlockRead(stamp);
+ * stamp = sl.writeLock();
+ * }
+ * }
+ * } finally {
+ * sl.unlock(stamp);
+ * }
+ * }
+ * }}</pre>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class StampedLock implements java.io.Serializable {
+ /*
+ * Algorithmic notes:
+ *
+ * The design employs elements of Sequence locks
+ * (as used in linux kernels; see Lameter's
+ * http://www.lameter.com/gelato2005.pdf
+ * and elsewhere; see
+ * Boehm's http://www.hpl.hp.com/techreports/2012/HPL-2012-68.html)
+ * and Ordered RW locks (see Shirako et al
+ * http://dl.acm.org/citation.cfm?id=2312015)
+ *
+ * Conceptually, the primary state of the lock includes a sequence
+ * number that is odd when write-locked and even otherwise.
+ * However, this is offset by a reader count that is non-zero when
+ * read-locked. The read count is ignored when validating
+ * "optimistic" seqlock-reader-style stamps. Because we must use
+ * a small finite number of bits (currently 7) for readers, a
+ * supplementary reader overflow word is used when the number of
+ * readers exceeds the count field. We do this by treating the max
+ * reader count value (RBITS) as a spinlock protecting overflow
+ * updates.
+ *
+ * Waiters use a modified form of CLH lock used in
+ * AbstractQueuedSynchronizer (see its internal documentation for
+ * a fuller account), where each node is tagged (field mode) as
+ * either a reader or writer. Sets of waiting readers are grouped
+ * (linked) under a common node (field cowait) so act as a single
+ * node with respect to most CLH mechanics. By virtue of the
+ * queue structure, wait nodes need not actually carry sequence
+ * numbers; we know each is greater than its predecessor. This
+ * simplifies the scheduling policy to a mainly-FIFO scheme that
+ * incorporates elements of Phase-Fair locks (see Brandenburg &
+ * Anderson, especially http://www.cs.unc.edu/~bbb/diss/). In
+ * particular, we use the phase-fair anti-barging rule: If an
+ * incoming reader arrives while read lock is held but there is a
+ * queued writer, this incoming reader is queued. (This rule is
+ * responsible for some of the complexity of method acquireRead,
+ * but without it, the lock becomes highly unfair.) Method release
+ * does not (and sometimes cannot) itself wake up cowaiters. This
+ * is done by the primary thread, but helped by any other threads
+ * with nothing better to do in methods acquireRead and
+ * acquireWrite.
+ *
+ * These rules apply to threads actually queued. All tryLock forms
+ * opportunistically try to acquire locks regardless of preference
+ * rules, and so may "barge" their way in. Randomized spinning is
+ * used in the acquire methods to reduce (increasingly expensive)
+ * context switching while also avoiding sustained memory
+ * thrashing among many threads. We limit spins to the head of
+ * queue. A thread spin-waits up to SPINS times (where each
+ * iteration decreases spin count with 50% probability) before
+ * blocking. If, upon wakening it fails to obtain lock, and is
+ * still (or becomes) the first waiting thread (which indicates
+ * that some other thread barged and obtained lock), it escalates
+ * spins (up to MAX_HEAD_SPINS) to reduce the likelihood of
+ * continually losing to barging threads.
+ *
+ * Nearly all of these mechanics are carried out in methods
+ * acquireWrite and acquireRead, that, as typical of such code,
+ * sprawl out because actions and retries rely on consistent sets
+ * of locally cached reads.
+ *
+ * As noted in Boehm's paper (above), sequence validation (mainly
+ * method validate()) requires stricter ordering rules than apply
+ * to normal volatile reads (of "state"). In the absence of (but
+ * continual hope for) explicit JVM support of intrinsics with
+ * double-sided reordering prohibition, or corresponding fence
+ * intrinsics, we for now uncomfortably rely on the fact that the
+ * Unsafe.getXVolatile intrinsic must have this property
+ * (syntactic volatile reads do not) for internal purposes anyway,
+ * even though it is not documented.
+ *
+ * The memory layout keeps lock state and queue pointers together
+ * (normally on the same cache line). This usually works well for
+ * read-mostly loads. In most other cases, the natural tendency of
+ * adaptive-spin CLH locks to reduce memory contention lessens
+ * motivation to further spread out contended locations, but might
+ * be subject to future improvements.
+ */
+
+ private static final long serialVersionUID = -6001602636862214147L;
+
+ /** Number of processors, for spin control */
+ private static final int NCPU = Runtime.getRuntime().availableProcessors();
+
+ /** Maximum number of retries before enqueuing on acquisition */
+ private static final int SPINS = (NCPU > 1) ? 1 << 6 : 0;
+
+ /** Maximum number of retries before blocking at head on acquisition */
+ private static final int HEAD_SPINS = (NCPU > 1) ? 1 << 10 : 0;
+
+ /** Maximum number of retries before re-blocking */
+ private static final int MAX_HEAD_SPINS = (NCPU > 1) ? 1 << 16 : 0;
+
+ /** The period for yielding when waiting for overflow spinlock */
+ private static final int OVERFLOW_YIELD_RATE = 7; // must be power 2 - 1
+
+ /** The number of bits to use for reader count before overflowing */
+ private static final int LG_READERS = 7;
+
+ // Values for lock state and stamp operations
+ private static final long RUNIT = 1L;
+ private static final long WBIT = 1L << LG_READERS;
+ private static final long RBITS = WBIT - 1L;
+ private static final long RFULL = RBITS - 1L;
+ private static final long ABITS = RBITS | WBIT;
+ private static final long SBITS = ~RBITS; // note overlap with ABITS
+
+ // Initial value for lock state; avoid failure value zero
+ private static final long ORIGIN = WBIT << 1;
+
+ // Special value from cancelled acquire methods so caller can throw IE
+ private static final long INTERRUPTED = 1L;
+
+ // Values for node status; order matters
+ private static final int WAITING = -1;
+ private static final int CANCELLED = 1;
+
+ // Modes for nodes (int not boolean to allow arithmetic)
+ private static final int RMODE = 0;
+ private static final int WMODE = 1;
+
+ /** Wait nodes */
+ static final class WNode {
+ volatile WNode prev;
+ volatile WNode next;
+ volatile WNode cowait; // list of linked readers
+ volatile Thread thread; // non-null while possibly parked
+ volatile int status; // 0, WAITING, or CANCELLED
+ final int mode; // RMODE or WMODE
+ WNode(int m, WNode p) { mode = m; prev = p; }
+ }
+
+ /** Head of CLH queue */
+ private transient volatile WNode whead;
+ /** Tail (last) of CLH queue */
+ private transient volatile WNode wtail;
+
+ // views
+ transient ReadLockView readLockView;
+ transient WriteLockView writeLockView;
+ transient ReadWriteLockView readWriteLockView;
+
+ /** Lock sequence/state */
+ private transient volatile long state;
+ /** extra reader count when state read count saturated */
+ private transient int readerOverflow;
+
+ /**
+ * Creates a new lock, initially in unlocked state.
+ */
+ public StampedLock() {
+ state = ORIGIN;
+ }
+
+ /**
+ * Exclusively acquires the lock, blocking if necessary
+ * until available.
+ *
+ * @return a stamp that can be used to unlock or convert mode
+ */
+ public long writeLock() {
+ long s, next; // bypass acquireWrite in fully unlocked case only
+ return ((((s = state) & ABITS) == 0L &&
+ U.compareAndSwapLong(this, STATE, s, next = s + WBIT)) ?
+ next : acquireWrite(false, 0L));
+ }
+
+ /**
+ * Exclusively acquires the lock if it is immediately available.
+ *
+ * @return a stamp that can be used to unlock or convert mode,
+ * or zero if the lock is not available
+ */
+ public long tryWriteLock() {
+ long s, next;
+ return ((((s = state) & ABITS) == 0L &&
+ U.compareAndSwapLong(this, STATE, s, next = s + WBIT)) ?
+ next : 0L);
+ }
+
+ /**
+ * Exclusively acquires the lock if it is available within the
+ * given time and the current thread has not been interrupted.
+ * Behavior under timeout and interruption matches that specified
+ * for method {@link Lock#tryLock(long,TimeUnit)}.
+ *
+ * @param time the maximum time to wait for the lock
+ * @param unit the time unit of the {@code time} argument
+ * @return a stamp that can be used to unlock or convert mode,
+ * or zero if the lock is not available
+ * @throws InterruptedException if the current thread is interrupted
+ * before acquiring the lock
+ */
+ public long tryWriteLock(long time, TimeUnit unit)
+ throws InterruptedException {
+ long nanos = unit.toNanos(time);
+ if (!Thread.interrupted()) {
+ long next, deadline;
+ if ((next = tryWriteLock()) != 0L)
+ return next;
+ if (nanos <= 0L)
+ return 0L;
+ if ((deadline = System.nanoTime() + nanos) == 0L)
+ deadline = 1L;
+ if ((next = acquireWrite(true, deadline)) != INTERRUPTED)
+ return next;
+ }
+ throw new InterruptedException();
+ }
+
+ /**
+ * Exclusively acquires the lock, blocking if necessary
+ * until available or the current thread is interrupted.
+ * Behavior under interruption matches that specified
+ * for method {@link Lock#lockInterruptibly()}.
+ *
+ * @return a stamp that can be used to unlock or convert mode
+ * @throws InterruptedException if the current thread is interrupted
+ * before acquiring the lock
+ */
+ public long writeLockInterruptibly() throws InterruptedException {
+ long next;
+ if (!Thread.interrupted() &&
+ (next = acquireWrite(true, 0L)) != INTERRUPTED)
+ return next;
+ throw new InterruptedException();
+ }
+
+ /**
+ * Non-exclusively acquires the lock, blocking if necessary
+ * until available.
+ *
+ * @return a stamp that can be used to unlock or convert mode
+ */
+ public long readLock() {
+ long s = state, next; // bypass acquireRead on common uncontended case
+ return ((whead == wtail && (s & ABITS) < RFULL &&
+ U.compareAndSwapLong(this, STATE, s, next = s + RUNIT)) ?
+ next : acquireRead(false, 0L));
+ }
+
+ /**
+ * Non-exclusively acquires the lock if it is immediately available.
+ *
+ * @return a stamp that can be used to unlock or convert mode,
+ * or zero if the lock is not available
+ */
+ public long tryReadLock() {
+ for (;;) {
+ long s, m, next;
+ if ((m = (s = state) & ABITS) == WBIT)
+ return 0L;
+ else if (m < RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, next = s + RUNIT))
+ return next;
+ }
+ else if ((next = tryIncReaderOverflow(s)) != 0L)
+ return next;
+ }
+ }
+
+ /**
+ * Non-exclusively acquires the lock if it is available within the
+ * given time and the current thread has not been interrupted.
+ * Behavior under timeout and interruption matches that specified
+ * for method {@link Lock#tryLock(long,TimeUnit)}.
+ *
+ * @param time the maximum time to wait for the lock
+ * @param unit the time unit of the {@code time} argument
+ * @return a stamp that can be used to unlock or convert mode,
+ * or zero if the lock is not available
+ * @throws InterruptedException if the current thread is interrupted
+ * before acquiring the lock
+ */
+ public long tryReadLock(long time, TimeUnit unit)
+ throws InterruptedException {
+ long s, m, next, deadline;
+ long nanos = unit.toNanos(time);
+ if (!Thread.interrupted()) {
+ if ((m = (s = state) & ABITS) != WBIT) {
+ if (m < RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, next = s + RUNIT))
+ return next;
+ }
+ else if ((next = tryIncReaderOverflow(s)) != 0L)
+ return next;
+ }
+ if (nanos <= 0L)
+ return 0L;
+ if ((deadline = System.nanoTime() + nanos) == 0L)
+ deadline = 1L;
+ if ((next = acquireRead(true, deadline)) != INTERRUPTED)
+ return next;
+ }
+ throw new InterruptedException();
+ }
+
+ /**
+ * Non-exclusively acquires the lock, blocking if necessary
+ * until available or the current thread is interrupted.
+ * Behavior under interruption matches that specified
+ * for method {@link Lock#lockInterruptibly()}.
+ *
+ * @return a stamp that can be used to unlock or convert mode
+ * @throws InterruptedException if the current thread is interrupted
+ * before acquiring the lock
+ */
+ public long readLockInterruptibly() throws InterruptedException {
+ long next;
+ if (!Thread.interrupted() &&
+ (next = acquireRead(true, 0L)) != INTERRUPTED)
+ return next;
+ throw new InterruptedException();
+ }
+
+ /**
+ * Returns a stamp that can later be validated, or zero
+ * if exclusively locked.
+ *
+ * @return a stamp, or zero if exclusively locked
+ */
+ public long tryOptimisticRead() {
+ long s;
+ return (((s = state) & WBIT) == 0L) ? (s & SBITS) : 0L;
+ }
+
+ /**
+ * Returns true if the lock has not been exclusively acquired
+ * since issuance of the given stamp. Always returns false if the
+ * stamp is zero. Always returns true if the stamp represents a
+ * currently held lock. Invoking this method with a value not
+ * obtained from {@link #tryOptimisticRead} or a locking method
+ * for this lock has no defined effect or result.
+ *
+ * @param stamp a stamp
+ * @return {@code true} if the lock has not been exclusively acquired
+ * since issuance of the given stamp; else false
+ */
+ public boolean validate(long stamp) {
+ // See above about current use of getLongVolatile here
+ return (stamp & SBITS) == (U.getLongVolatile(this, STATE) & SBITS);
+ }
+
+ /**
+ * If the lock state matches the given stamp, releases the
+ * exclusive lock.
+ *
+ * @param stamp a stamp returned by a write-lock operation
+ * @throws IllegalMonitorStateException if the stamp does
+ * not match the current state of this lock
+ */
+ public void unlockWrite(long stamp) {
+ WNode h;
+ if (state != stamp || (stamp & WBIT) == 0L)
+ throw new IllegalMonitorStateException();
+ state = (stamp += WBIT) == 0L ? ORIGIN : stamp;
+ if ((h = whead) != null && h.status != 0)
+ release(h);
+ }
+
+ /**
+ * If the lock state matches the given stamp, releases the
+ * non-exclusive lock.
+ *
+ * @param stamp a stamp returned by a read-lock operation
+ * @throws IllegalMonitorStateException if the stamp does
+ * not match the current state of this lock
+ */
+ public void unlockRead(long stamp) {
+ long s, m; WNode h;
+ for (;;) {
+ if (((s = state) & SBITS) != (stamp & SBITS) ||
+ (stamp & ABITS) == 0L || (m = s & ABITS) == 0L || m == WBIT)
+ throw new IllegalMonitorStateException();
+ if (m < RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) {
+ if (m == RUNIT && (h = whead) != null && h.status != 0)
+ release(h);
+ break;
+ }
+ }
+ else if (tryDecReaderOverflow(s) != 0L)
+ break;
+ }
+ }
+
+ /**
+ * If the lock state matches the given stamp, releases the
+ * corresponding mode of the lock.
+ *
+ * @param stamp a stamp returned by a lock operation
+ * @throws IllegalMonitorStateException if the stamp does
+ * not match the current state of this lock
+ */
+ public void unlock(long stamp) {
+ long a = stamp & ABITS, m, s; WNode h;
+ while (((s = state) & SBITS) == (stamp & SBITS)) {
+ if ((m = s & ABITS) == 0L)
+ break;
+ else if (m == WBIT) {
+ if (a != m)
+ break;
+ state = (s += WBIT) == 0L ? ORIGIN : s;
+ if ((h = whead) != null && h.status != 0)
+ release(h);
+ return;
+ }
+ else if (a == 0L || a >= WBIT)
+ break;
+ else if (m < RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) {
+ if (m == RUNIT && (h = whead) != null && h.status != 0)
+ release(h);
+ return;
+ }
+ }
+ else if (tryDecReaderOverflow(s) != 0L)
+ return;
+ }
+ throw new IllegalMonitorStateException();
+ }
+
+ /**
+ * If the lock state matches the given stamp, performs one of
+ * the following actions. If the stamp represents holding a write
+ * lock, returns it. Or, if a read lock, if the write lock is
+ * available, releases the read lock and returns a write stamp.
+ * Or, if an optimistic read, returns a write stamp only if
+ * immediately available. This method returns zero in all other
+ * cases.
+ *
+ * @param stamp a stamp
+ * @return a valid write stamp, or zero on failure
+ */
+ public long tryConvertToWriteLock(long stamp) {
+ long a = stamp & ABITS, m, s, next;
+ while (((s = state) & SBITS) == (stamp & SBITS)) {
+ if ((m = s & ABITS) == 0L) {
+ if (a != 0L)
+ break;
+ if (U.compareAndSwapLong(this, STATE, s, next = s + WBIT))
+ return next;
+ }
+ else if (m == WBIT) {
+ if (a != m)
+ break;
+ return stamp;
+ }
+ else if (m == RUNIT && a != 0L) {
+ if (U.compareAndSwapLong(this, STATE, s,
+ next = s - RUNIT + WBIT))
+ return next;
+ }
+ else
+ break;
+ }
+ return 0L;
+ }
+
+ /**
+ * If the lock state matches the given stamp, performs one of
+ * the following actions. If the stamp represents holding a write
+ * lock, releases it and obtains a read lock. Or, if a read lock,
+ * returns it. Or, if an optimistic read, acquires a read lock and
+ * returns a read stamp only if immediately available. This method
+ * returns zero in all other cases.
+ *
+ * @param stamp a stamp
+ * @return a valid read stamp, or zero on failure
+ */
+ public long tryConvertToReadLock(long stamp) {
+ long a = stamp & ABITS, m, s, next; WNode h;
+ while (((s = state) & SBITS) == (stamp & SBITS)) {
+ if ((m = s & ABITS) == 0L) {
+ if (a != 0L)
+ break;
+ else if (m < RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, next = s + RUNIT))
+ return next;
+ }
+ else if ((next = tryIncReaderOverflow(s)) != 0L)
+ return next;
+ }
+ else if (m == WBIT) {
+ if (a != m)
+ break;
+ state = next = s + (WBIT + RUNIT);
+ if ((h = whead) != null && h.status != 0)
+ release(h);
+ return next;
+ }
+ else if (a != 0L && a < WBIT)
+ return stamp;
+ else
+ break;
+ }
+ return 0L;
+ }
+
+ /**
+ * If the lock state matches the given stamp then, if the stamp
+ * represents holding a lock, releases it and returns an
+ * observation stamp. Or, if an optimistic read, returns it if
+ * validated. This method returns zero in all other cases, and so
+ * may be useful as a form of "tryUnlock".
+ *
+ * @param stamp a stamp
+ * @return a valid optimistic read stamp, or zero on failure
+ */
+ public long tryConvertToOptimisticRead(long stamp) {
+ long a = stamp & ABITS, m, s, next; WNode h;
+ for (;;) {
+ s = U.getLongVolatile(this, STATE); // see above
+ if (((s = state) & SBITS) != (stamp & SBITS))
+ break;
+ if ((m = s & ABITS) == 0L) {
+ if (a != 0L)
+ break;
+ return s;
+ }
+ else if (m == WBIT) {
+ if (a != m)
+ break;
+ state = next = (s += WBIT) == 0L ? ORIGIN : s;
+ if ((h = whead) != null && h.status != 0)
+ release(h);
+ return next;
+ }
+ else if (a == 0L || a >= WBIT)
+ break;
+ else if (m < RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, next = s - RUNIT)) {
+ if (m == RUNIT && (h = whead) != null && h.status != 0)
+ release(h);
+ return next & SBITS;
+ }
+ }
+ else if ((next = tryDecReaderOverflow(s)) != 0L)
+ return next & SBITS;
+ }
+ return 0L;
+ }
+
+ /**
+ * Releases the write lock if it is held, without requiring a
+ * stamp value. This method may be useful for recovery after
+ * errors.
+ *
+ * @return {@code true} if the lock was held, else false
+ */
+ public boolean tryUnlockWrite() {
+ long s; WNode h;
+ if (((s = state) & WBIT) != 0L) {
+ state = (s += WBIT) == 0L ? ORIGIN : s;
+ if ((h = whead) != null && h.status != 0)
+ release(h);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Releases one hold of the read lock if it is held, without
+ * requiring a stamp value. This method may be useful for recovery
+ * after errors.
+ *
+ * @return {@code true} if the read lock was held, else false
+ */
+ public boolean tryUnlockRead() {
+ long s, m; WNode h;
+ while ((m = (s = state) & ABITS) != 0L && m < WBIT) {
+ if (m < RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) {
+ if (m == RUNIT && (h = whead) != null && h.status != 0)
+ release(h);
+ return true;
+ }
+ }
+ else if (tryDecReaderOverflow(s) != 0L)
+ return true;
+ }
+ return false;
+ }
+
+ // status monitoring methods
+
+ /**
+ * Returns combined state-held and overflow read count for given
+ * state s.
+ */
+ private int getReadLockCount(long s) {
+ long readers;
+ if ((readers = s & RBITS) >= RFULL)
+ readers = RFULL + readerOverflow;
+ return (int) readers;
+ }
+
+ /**
+ * Returns {@code true} if the lock is currently held exclusively.
+ *
+ * @return {@code true} if the lock is currently held exclusively
+ */
+ public boolean isWriteLocked() {
+ return (state & WBIT) != 0L;
+ }
+
+ /**
+ * Returns {@code true} if the lock is currently held non-exclusively.
+ *
+ * @return {@code true} if the lock is currently held non-exclusively
+ */
+ public boolean isReadLocked() {
+ return (state & RBITS) != 0L;
+ }
+
+ /**
+ * Queries the number of read locks held for this lock. This
+ * method is designed for use in monitoring system state, not for
+ * synchronization control.
+ * @return the number of read locks held
+ */
+ public int getReadLockCount() {
+ return getReadLockCount(state);
+ }
+
+ /**
+ * Returns a string identifying this lock, as well as its lock
+ * state. The state, in brackets, includes the String {@code
+ * "Unlocked"} or the String {@code "Write-locked"} or the String
+ * {@code "Read-locks:"} followed by the current number of
+ * read-locks held.
+ *
+ * @return a string identifying this lock, as well as its lock state
+ */
+ public String toString() {
+ long s = state;
+ return super.toString() +
+ ((s & ABITS) == 0L ? "[Unlocked]" :
+ (s & WBIT) != 0L ? "[Write-locked]" :
+ "[Read-locks:" + getReadLockCount(s) + "]");
+ }
+
+ // views
+
+ /**
+ * Returns a plain {@link Lock} view of this StampedLock in which
+ * the {@link Lock#lock} method is mapped to {@link #readLock},
+ * and similarly for other methods. The returned Lock does not
+ * support a {@link Condition}; method {@link
+ * Lock#newCondition()} throws {@code
+ * UnsupportedOperationException}.
+ *
+ * @return the lock
+ */
+ public Lock asReadLock() {
+ ReadLockView v;
+ return ((v = readLockView) != null ? v :
+ (readLockView = new ReadLockView()));
+ }
+
+ /**
+ * Returns a plain {@link Lock} view of this StampedLock in which
+ * the {@link Lock#lock} method is mapped to {@link #writeLock},
+ * and similarly for other methods. The returned Lock does not
+ * support a {@link Condition}; method {@link
+ * Lock#newCondition()} throws {@code
+ * UnsupportedOperationException}.
+ *
+ * @return the lock
+ */
+ public Lock asWriteLock() {
+ WriteLockView v;
+ return ((v = writeLockView) != null ? v :
+ (writeLockView = new WriteLockView()));
+ }
+
+ /**
+ * Returns a {@link ReadWriteLock} view of this StampedLock in
+ * which the {@link ReadWriteLock#readLock()} method is mapped to
+ * {@link #asReadLock()}, and {@link ReadWriteLock#writeLock()} to
+ * {@link #asWriteLock()}.
+ *
+ * @return the lock
+ */
+ public ReadWriteLock asReadWriteLock() {
+ ReadWriteLockView v;
+ return ((v = readWriteLockView) != null ? v :
+ (readWriteLockView = new ReadWriteLockView()));
+ }
+
+ // view classes
+
+ final class ReadLockView implements Lock {
+ public void lock() { readLock(); }
+ public void lockInterruptibly() throws InterruptedException {
+ readLockInterruptibly();
+ }
+ public boolean tryLock() { return tryReadLock() != 0L; }
+ public boolean tryLock(long time, TimeUnit unit)
+ throws InterruptedException {
+ return tryReadLock(time, unit) != 0L;
+ }
+ public void unlock() { unstampedUnlockRead(); }
+ public Condition newCondition() {
+ throw new UnsupportedOperationException();
+ }
+ }
+
+ final class WriteLockView implements Lock {
+ public void lock() { writeLock(); }
+ public void lockInterruptibly() throws InterruptedException {
+ writeLockInterruptibly();
+ }
+ public boolean tryLock() { return tryWriteLock() != 0L; }
+ public boolean tryLock(long time, TimeUnit unit)
+ throws InterruptedException {
+ return tryWriteLock(time, unit) != 0L;
+ }
+ public void unlock() { unstampedUnlockWrite(); }
+ public Condition newCondition() {
+ throw new UnsupportedOperationException();
+ }
+ }
+
+ final class ReadWriteLockView implements ReadWriteLock {
+ public Lock readLock() { return asReadLock(); }
+ public Lock writeLock() { return asWriteLock(); }
+ }
+
+ // Unlock methods without stamp argument checks for view classes.
+ // Needed because view-class lock methods throw away stamps.
+
+ final void unstampedUnlockWrite() {
+ WNode h; long s;
+ if (((s = state) & WBIT) == 0L)
+ throw new IllegalMonitorStateException();
+ state = (s += WBIT) == 0L ? ORIGIN : s;
+ if ((h = whead) != null && h.status != 0)
+ release(h);
+ }
+
+ final void unstampedUnlockRead() {
+ for (;;) {
+ long s, m; WNode h;
+ if ((m = (s = state) & ABITS) == 0L || m >= WBIT)
+ throw new IllegalMonitorStateException();
+ else if (m < RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) {
+ if (m == RUNIT && (h = whead) != null && h.status != 0)
+ release(h);
+ break;
+ }
+ }
+ else if (tryDecReaderOverflow(s) != 0L)
+ break;
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ state = ORIGIN; // reset to unlocked state
+ }
+
+ // internals
+
+ /**
+ * Tries to increment readerOverflow by first setting state
+ * access bits value to RBITS, indicating hold of spinlock,
+ * then updating, then releasing.
+ *
+ * @param s a reader overflow stamp: (s & ABITS) >= RFULL
+ * @return new stamp on success, else zero
+ */
+ private long tryIncReaderOverflow(long s) {
+ // assert (s & ABITS) >= RFULL;
+ if ((s & ABITS) == RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, s | RBITS)) {
+ ++readerOverflow;
+ state = s;
+ return s;
+ }
+ }
+ else if ((ThreadLocalRandom.current().nextInt() &
+ OVERFLOW_YIELD_RATE) == 0)
+ Thread.yield();
+ return 0L;
+ }
+
+ /**
+ * Tries to decrement readerOverflow.
+ *
+ * @param s a reader overflow stamp: (s & ABITS) >= RFULL
+ * @return new stamp on success, else zero
+ */
+ private long tryDecReaderOverflow(long s) {
+ // assert (s & ABITS) >= RFULL;
+ if ((s & ABITS) == RFULL) {
+ if (U.compareAndSwapLong(this, STATE, s, s | RBITS)) {
+ int r; long next;
+ if ((r = readerOverflow) > 0) {
+ readerOverflow = r - 1;
+ next = s;
+ }
+ else
+ next = s - RUNIT;
+ state = next;
+ return next;
+ }
+ }
+ else if ((ThreadLocalRandom.current().nextInt() &
+ OVERFLOW_YIELD_RATE) == 0)
+ Thread.yield();
+ return 0L;
+ }
+
+ /**
+ * Wakes up the successor of h (normally whead). This is normally
+ * just h.next, but may require traversal from wtail if next
+ * pointers are lagging. This may fail to wake up an acquiring
+ * thread when one or more have been cancelled, but the cancel
+ * methods themselves provide extra safeguards to ensure liveness.
+ */
+ private void release(WNode h) {
+ if (h != null) {
+ WNode q; Thread w;
+ U.compareAndSwapInt(h, WSTATUS, WAITING, 0);
+ if ((q = h.next) == null || q.status == CANCELLED) {
+ for (WNode t = wtail; t != null && t != h; t = t.prev)
+ if (t.status <= 0)
+ q = t;
+ }
+ if (q != null && (w = q.thread) != null)
+ U.unpark(w);
+ }
+ }
+
+ /**
+ * See above for explanation.
+ *
+ * @param interruptible true if should check interrupts and if so
+ * return INTERRUPTED
+ * @param deadline if nonzero, the System.nanoTime value to timeout
+ * at (and return zero)
+ * @return next state, or INTERRUPTED
+ */
+ private long acquireWrite(boolean interruptible, long deadline) {
+ WNode node = null, p;
+ for (int spins = -1;;) { // spin while enqueuing
+ long m, s, ns;
+ if ((m = (s = state) & ABITS) == 0L) {
+ if (U.compareAndSwapLong(this, STATE, s, ns = s + WBIT))
+ return ns;
+ }
+ else if (spins < 0)
+ spins = (m == WBIT && wtail == whead) ? SPINS : 0;
+ else if (spins > 0) {
+ if (ThreadLocalRandom.current().nextInt() >= 0)
+ --spins;
+ }
+ else if ((p = wtail) == null) { // initialize queue
+ WNode hd = new WNode(WMODE, null);
+ if (U.compareAndSwapObject(this, WHEAD, null, hd))
+ wtail = hd;
+ }
+ else if (node == null)
+ node = new WNode(WMODE, p);
+ else if (node.prev != p)
+ node.prev = p;
+ else if (U.compareAndSwapObject(this, WTAIL, p, node)) {
+ p.next = node;
+ break;
+ }
+ }
+
+ for (int spins = -1;;) {
+ WNode h, np, pp; int ps;
+ if ((h = whead) == p) {
+ if (spins < 0)
+ spins = HEAD_SPINS;
+ else if (spins < MAX_HEAD_SPINS)
+ spins <<= 1;
+ for (int k = spins;;) { // spin at head
+ long s, ns;
+ if (((s = state) & ABITS) == 0L) {
+ if (U.compareAndSwapLong(this, STATE, s,
+ ns = s + WBIT)) {
+ whead = node;
+ node.prev = null;
+ return ns;
+ }
+ }
+ else if (ThreadLocalRandom.current().nextInt() >= 0 &&
+ --k <= 0)
+ break;
+ }
+ }
+ else if (h != null) { // help release stale waiters
+ WNode c; Thread w;
+ while ((c = h.cowait) != null) {
+ if (U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) &&
+ (w = c.thread) != null)
+ U.unpark(w);
+ }
+ }
+ if (whead == h) {
+ if ((np = node.prev) != p) {
+ if (np != null)
+ (p = np).next = node; // stale
+ }
+ else if ((ps = p.status) == 0)
+ U.compareAndSwapInt(p, WSTATUS, 0, WAITING);
+ else if (ps == CANCELLED) {
+ if ((pp = p.prev) != null) {
+ node.prev = pp;
+ pp.next = node;
+ }
+ }
+ else {
+ long time; // 0 argument to park means no timeout
+ if (deadline == 0L)
+ time = 0L;
+ else if ((time = deadline - System.nanoTime()) <= 0L)
+ return cancelWaiter(node, node, false);
+ Thread wt = Thread.currentThread();
+ U.putObject(wt, PARKBLOCKER, this);
+ node.thread = wt;
+ if (p.status < 0 && (p != h || (state & ABITS) != 0L) &&
+ whead == h && node.prev == p)
+ U.park(false, time); // emulate LockSupport.park
+ node.thread = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ if (interruptible && Thread.interrupted())
+ return cancelWaiter(node, node, true);
+ }
+ }
+ }
+ }
+
+ /**
+ * See above for explanation.
+ *
+ * @param interruptible true if should check interrupts and if so
+ * return INTERRUPTED
+ * @param deadline if nonzero, the System.nanoTime value to timeout
+ * at (and return zero)
+ * @return next state, or INTERRUPTED
+ */
+ private long acquireRead(boolean interruptible, long deadline) {
+ WNode node = null, p;
+ for (int spins = -1;;) {
+ WNode h;
+ if ((h = whead) == (p = wtail)) {
+ for (long m, s, ns;;) {
+ if ((m = (s = state) & ABITS) < RFULL ?
+ U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT) :
+ (m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L))
+ return ns;
+ else if (m >= WBIT) {
+ if (spins > 0) {
+ if (ThreadLocalRandom.current().nextInt() >= 0)
+ --spins;
+ }
+ else {
+ if (spins == 0) {
+ WNode nh = whead, np = wtail;
+ if ((nh == h && np == p) || (h = nh) != (p = np))
+ break;
+ }
+ spins = SPINS;
+ }
+ }
+ }
+ }
+ if (p == null) { // initialize queue
+ WNode hd = new WNode(WMODE, null);
+ if (U.compareAndSwapObject(this, WHEAD, null, hd))
+ wtail = hd;
+ }
+ else if (node == null)
+ node = new WNode(RMODE, p);
+ else if (h == p || p.mode != RMODE) {
+ if (node.prev != p)
+ node.prev = p;
+ else if (U.compareAndSwapObject(this, WTAIL, p, node)) {
+ p.next = node;
+ break;
+ }
+ }
+ else if (!U.compareAndSwapObject(p, WCOWAIT,
+ node.cowait = p.cowait, node))
+ node.cowait = null;
+ else {
+ for (;;) {
+ WNode pp, c; Thread w;
+ if ((h = whead) != null && (c = h.cowait) != null &&
+ U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) &&
+ (w = c.thread) != null) // help release
+ U.unpark(w);
+ if (h == (pp = p.prev) || h == p || pp == null) {
+ long m, s, ns;
+ do {
+ if ((m = (s = state) & ABITS) < RFULL ?
+ U.compareAndSwapLong(this, STATE, s,
+ ns = s + RUNIT) :
+ (m < WBIT &&
+ (ns = tryIncReaderOverflow(s)) != 0L))
+ return ns;
+ } while (m < WBIT);
+ }
+ if (whead == h && p.prev == pp) {
+ long time;
+ if (pp == null || h == p || p.status > 0) {
+ node = null; // throw away
+ break;
+ }
+ if (deadline == 0L)
+ time = 0L;
+ else if ((time = deadline - System.nanoTime()) <= 0L)
+ return cancelWaiter(node, p, false);
+ Thread wt = Thread.currentThread();
+ U.putObject(wt, PARKBLOCKER, this);
+ node.thread = wt;
+ if ((h != pp || (state & ABITS) == WBIT) &&
+ whead == h && p.prev == pp)
+ U.park(false, time);
+ node.thread = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ if (interruptible && Thread.interrupted())
+ return cancelWaiter(node, p, true);
+ }
+ }
+ }
+ }
+
+ for (int spins = -1;;) {
+ WNode h, np, pp; int ps;
+ if ((h = whead) == p) {
+ if (spins < 0)
+ spins = HEAD_SPINS;
+ else if (spins < MAX_HEAD_SPINS)
+ spins <<= 1;
+ for (int k = spins;;) { // spin at head
+ long m, s, ns;
+ if ((m = (s = state) & ABITS) < RFULL ?
+ U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT) :
+ (m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L)) {
+ WNode c; Thread w;
+ whead = node;
+ node.prev = null;
+ while ((c = node.cowait) != null) {
+ if (U.compareAndSwapObject(node, WCOWAIT,
+ c, c.cowait) &&
+ (w = c.thread) != null)
+ U.unpark(w);
+ }
+ return ns;
+ }
+ else if (m >= WBIT &&
+ ThreadLocalRandom.current().nextInt() >= 0 && --k <= 0)
+ break;
+ }
+ }
+ else if (h != null) {
+ WNode c; Thread w;
+ while ((c = h.cowait) != null) {
+ if (U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) &&
+ (w = c.thread) != null)
+ U.unpark(w);
+ }
+ }
+ if (whead == h) {
+ if ((np = node.prev) != p) {
+ if (np != null)
+ (p = np).next = node; // stale
+ }
+ else if ((ps = p.status) == 0)
+ U.compareAndSwapInt(p, WSTATUS, 0, WAITING);
+ else if (ps == CANCELLED) {
+ if ((pp = p.prev) != null) {
+ node.prev = pp;
+ pp.next = node;
+ }
+ }
+ else {
+ long time;
+ if (deadline == 0L)
+ time = 0L;
+ else if ((time = deadline - System.nanoTime()) <= 0L)
+ return cancelWaiter(node, node, false);
+ Thread wt = Thread.currentThread();
+ U.putObject(wt, PARKBLOCKER, this);
+ node.thread = wt;
+ if (p.status < 0 &&
+ (p != h || (state & ABITS) == WBIT) &&
+ whead == h && node.prev == p)
+ U.park(false, time);
+ node.thread = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ if (interruptible && Thread.interrupted())
+ return cancelWaiter(node, node, true);
+ }
+ }
+ }
+ }
+
+ /**
+ * If node non-null, forces cancel status and unsplices it from
+ * queue if possible and wakes up any cowaiters (of the node, or
+ * group, as applicable), and in any case helps release current
+ * first waiter if lock is free. (Calling with null arguments
+ * serves as a conditional form of release, which is not currently
+ * needed but may be needed under possible future cancellation
+ * policies). This is a variant of cancellation methods in
+ * AbstractQueuedSynchronizer (see its detailed explanation in AQS
+ * internal documentation).
+ *
+ * @param node if nonnull, the waiter
+ * @param group either node or the group node is cowaiting with
+ * @param interrupted if already interrupted
+ * @return INTERRUPTED if interrupted or Thread.interrupted, else zero
+ */
+ private long cancelWaiter(WNode node, WNode group, boolean interrupted) {
+ if (node != null && group != null) {
+ Thread w;
+ node.status = CANCELLED;
+ // unsplice cancelled nodes from group
+ for (WNode p = group, q; (q = p.cowait) != null;) {
+ if (q.status == CANCELLED) {
+ U.compareAndSwapObject(p, WCOWAIT, q, q.cowait);
+ p = group; // restart
+ }
+ else
+ p = q;
+ }
+ if (group == node) {
+ for (WNode r = group.cowait; r != null; r = r.cowait) {
+ if ((w = r.thread) != null)
+ U.unpark(w); // wake up uncancelled co-waiters
+ }
+ for (WNode pred = node.prev; pred != null; ) { // unsplice
+ WNode succ, pp; // find valid successor
+ while ((succ = node.next) == null ||
+ succ.status == CANCELLED) {
+ WNode q = null; // find successor the slow way
+ for (WNode t = wtail; t != null && t != node; t = t.prev)
+ if (t.status != CANCELLED)
+ q = t; // don't link if succ cancelled
+ if (succ == q || // ensure accurate successor
+ U.compareAndSwapObject(node, WNEXT,
+ succ, succ = q)) {
+ if (succ == null && node == wtail)
+ U.compareAndSwapObject(this, WTAIL, node, pred);
+ break;
+ }
+ }
+ if (pred.next == node) // unsplice pred link
+ U.compareAndSwapObject(pred, WNEXT, node, succ);
+ if (succ != null && (w = succ.thread) != null) {
+ succ.thread = null;
+ U.unpark(w); // wake up succ to observe new pred
+ }
+ if (pred.status != CANCELLED || (pp = pred.prev) == null)
+ break;
+ node.prev = pp; // repeat if new pred wrong/cancelled
+ U.compareAndSwapObject(pp, WNEXT, pred, succ);
+ pred = pp;
+ }
+ }
+ }
+ WNode h; // Possibly release first waiter
+ while ((h = whead) != null) {
+ long s; WNode q; // similar to release() but check eligibility
+ if ((q = h.next) == null || q.status == CANCELLED) {
+ for (WNode t = wtail; t != null && t != h; t = t.prev)
+ if (t.status <= 0)
+ q = t;
+ }
+ if (h == whead) {
+ if (q != null && h.status == 0 &&
+ ((s = state) & ABITS) != WBIT && // waiter is eligible
+ (s == 0L || q.mode == RMODE))
+ release(h);
+ break;
+ }
+ }
+ return (interrupted || Thread.interrupted()) ? INTERRUPTED : 0L;
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long STATE;
+ private static final long WHEAD;
+ private static final long WTAIL;
+ private static final long WNEXT;
+ private static final long WSTATUS;
+ private static final long WCOWAIT;
+ private static final long PARKBLOCKER;
+
+ static {
+ try {
+ U = getUnsafe();
+ Class<?> k = StampedLock.class;
+ Class<?> wk = WNode.class;
+ STATE = U.objectFieldOffset
+ (k.getDeclaredField("state"));
+ WHEAD = U.objectFieldOffset
+ (k.getDeclaredField("whead"));
+ WTAIL = U.objectFieldOffset
+ (k.getDeclaredField("wtail"));
+ WSTATUS = U.objectFieldOffset
+ (wk.getDeclaredField("status"));
+ WNEXT = U.objectFieldOffset
+ (wk.getDeclaredField("next"));
+ WCOWAIT = U.objectFieldOffset
+ (wk.getDeclaredField("cowait"));
+ Class<?> tk = Thread.class;
+ PARKBLOCKER = U.objectFieldOffset
+ (tk.getDeclaredField("parkBlocker"));
+
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/Striped64.java b/src/main/java/jsr166e/Striped64.java
new file mode 100644
index 0000000..45e4e2d
--- /dev/null
+++ b/src/main/java/jsr166e/Striped64.java
@@ -0,0 +1,341 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e;
+import java.util.Random;
+
+/**
+ * A package-local class holding common representation and mechanics
+ * for classes supporting dynamic striping on 64bit values. The class
+ * extends Number so that concrete subclasses must publicly do so.
+ */
+abstract class Striped64 extends Number {
+ /*
+ * This class maintains a lazily-initialized table of atomically
+ * updated variables, plus an extra "base" field. The table size
+ * is a power of two. Indexing uses masked per-thread hash codes.
+ * Nearly all declarations in this class are package-private,
+ * accessed directly by subclasses.
+ *
+ * Table entries are of class Cell; a variant of AtomicLong padded
+ * to reduce cache contention on most processors. Padding is
+ * overkill for most Atomics because they are usually irregularly
+ * scattered in memory and thus don't interfere much with each
+ * other. But Atomic objects residing in arrays will tend to be
+ * placed adjacent to each other, and so will most often share
+ * cache lines (with a huge negative performance impact) without
+ * this precaution.
+ *
+ * In part because Cells are relatively large, we avoid creating
+ * them until they are needed. When there is no contention, all
+ * updates are made to the base field. Upon first contention (a
+ * failed CAS on base update), the table is initialized to size 2.
+ * The table size is doubled upon further contention until
+ * reaching the nearest power of two greater than or equal to the
+ * number of CPUS. Table slots remain empty (null) until they are
+ * needed.
+ *
+ * A single spinlock ("busy") is used for initializing and
+ * resizing the table, as well as populating slots with new Cells.
+ * There is no need for a blocking lock; when the lock is not
+ * available, threads try other slots (or the base). During these
+ * retries, there is increased contention and reduced locality,
+ * which is still better than alternatives.
+ *
+ * Per-thread hash codes are initialized to random values.
+ * Contention and/or table collisions are indicated by failed
+ * CASes when performing an update operation (see method
+ * retryUpdate). Upon a collision, if the table size is less than
+ * the capacity, it is doubled in size unless some other thread
+ * holds the lock. If a hashed slot is empty, and lock is
+ * available, a new Cell is created. Otherwise, if the slot
+ * exists, a CAS is tried. Retries proceed by "double hashing",
+ * using a secondary hash (Marsaglia XorShift) to try to find a
+ * free slot.
+ *
+ * The table size is capped because, when there are more threads
+ * than CPUs, supposing that each thread were bound to a CPU,
+ * there would exist a perfect hash function mapping threads to
+ * slots that eliminates collisions. When we reach capacity, we
+ * search for this mapping by randomly varying the hash codes of
+ * colliding threads. Because search is random, and collisions
+ * only become known via CAS failures, convergence can be slow,
+ * and because threads are typically not bound to CPUS forever,
+ * may not occur at all. However, despite these limitations,
+ * observed contention rates are typically low in these cases.
+ *
+ * It is possible for a Cell to become unused when threads that
+ * once hashed to it terminate, as well as in the case where
+ * doubling the table causes no thread to hash to it under
+ * expanded mask. We do not try to detect or remove such cells,
+ * under the assumption that for long-running instances, observed
+ * contention levels will recur, so the cells will eventually be
+ * needed again; and for short-lived ones, it does not matter.
+ */
+
+ /**
+ * Padded variant of AtomicLong supporting only raw accesses plus CAS.
+ * The value field is placed between pads, hoping that the JVM doesn't
+ * reorder them.
+ *
+ * JVM intrinsics note: It would be possible to use a release-only
+ * form of CAS here, if it were provided.
+ */
+ static final class Cell {
+ volatile long p0, p1, p2, p3, p4, p5, p6;
+ volatile long value;
+ volatile long q0, q1, q2, q3, q4, q5, q6;
+ Cell(long x) { value = x; }
+
+ final boolean cas(long cmp, long val) {
+ return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long valueOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> ak = Cell.class;
+ valueOffset = UNSAFE.objectFieldOffset
+ (ak.getDeclaredField("value"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ }
+
+ /**
+ * Holder for the thread-local hash code. The code is initially
+ * random, but may be set to a different value upon collisions.
+ */
+ static final class HashCode {
+ static final Random rng = new Random();
+ int code;
+ HashCode() {
+ int h = rng.nextInt(); // Avoid zero to allow xorShift rehash
+ code = (h == 0) ? 1 : h;
+ }
+ }
+
+ /**
+ * The corresponding ThreadLocal class
+ */
+ static final class ThreadHashCode extends ThreadLocal<HashCode> {
+ public HashCode initialValue() { return new HashCode(); }
+ }
+
+ /**
+ * Static per-thread hash codes. Shared across all instances to
+ * reduce ThreadLocal pollution and because adjustments due to
+ * collisions in one table are likely to be appropriate for
+ * others.
+ */
+ static final ThreadHashCode threadHashCode = new ThreadHashCode();
+
+ /** Number of CPUS, to place bound on table size */
+ static final int NCPU = Runtime.getRuntime().availableProcessors();
+
+ /**
+ * Table of cells. When non-null, size is a power of 2.
+ */
+ transient volatile Cell[] cells;
+
+ /**
+ * Base value, used mainly when there is no contention, but also as
+ * a fallback during table initialization races. Updated via CAS.
+ */
+ transient volatile long base;
+
+ /**
+ * Spinlock (locked via CAS) used when resizing and/or creating Cells.
+ */
+ transient volatile int busy;
+
+ /**
+ * Package-private default constructor
+ */
+ Striped64() {
+ }
+
+ /**
+ * CASes the base field.
+ */
+ final boolean casBase(long cmp, long val) {
+ return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val);
+ }
+
+ /**
+ * CASes the busy field from 0 to 1 to acquire lock.
+ */
+ final boolean casBusy() {
+ return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1);
+ }
+
+ /**
+ * Computes the function of current and new value. Subclasses
+ * should open-code this update function for most uses, but the
+ * virtualized form is needed within retryUpdate.
+ *
+ * @param currentValue the current value (of either base or a cell)
+ * @param newValue the argument from a user update call
+ * @return result of the update function
+ */
+ abstract long fn(long currentValue, long newValue);
+
+ /**
+ * Handles cases of updates involving initialization, resizing,
+ * creating new Cells, and/or contention. See above for
+ * explanation. This method suffers the usual non-modularity
+ * problems of optimistic retry code, relying on rechecked sets of
+ * reads.
+ *
+ * @param x the value
+ * @param hc the hash code holder
+ * @param wasUncontended false if CAS failed before call
+ */
+ final void retryUpdate(long x, HashCode hc, boolean wasUncontended) {
+ int h = hc.code;
+ boolean collide = false; // True if last slot nonempty
+ for (;;) {
+ Cell[] as; Cell a; int n; long v;
+ if ((as = cells) != null && (n = as.length) > 0) {
+ if ((a = as[(n - 1) & h]) == null) {
+ if (busy == 0) { // Try to attach new Cell
+ Cell r = new Cell(x); // Optimistically create
+ if (busy == 0 && casBusy()) {
+ boolean created = false;
+ try { // Recheck under lock
+ Cell[] rs; int m, j;
+ if ((rs = cells) != null &&
+ (m = rs.length) > 0 &&
+ rs[j = (m - 1) & h] == null) {
+ rs[j] = r;
+ created = true;
+ }
+ } finally {
+ busy = 0;
+ }
+ if (created)
+ break;
+ continue; // Slot is now non-empty
+ }
+ }
+ collide = false;
+ }
+ else if (!wasUncontended) // CAS already known to fail
+ wasUncontended = true; // Continue after rehash
+ else if (a.cas(v = a.value, fn(v, x)))
+ break;
+ else if (n >= NCPU || cells != as)
+ collide = false; // At max size or stale
+ else if (!collide)
+ collide = true;
+ else if (busy == 0 && casBusy()) {
+ try {
+ if (cells == as) { // Expand table unless stale
+ Cell[] rs = new Cell[n << 1];
+ for (int i = 0; i < n; ++i)
+ rs[i] = as[i];
+ cells = rs;
+ }
+ } finally {
+ busy = 0;
+ }
+ collide = false;
+ continue; // Retry with expanded table
+ }
+ h ^= h << 13; // Rehash
+ h ^= h >>> 17;
+ h ^= h << 5;
+ }
+ else if (busy == 0 && cells == as && casBusy()) {
+ boolean init = false;
+ try { // Initialize table
+ if (cells == as) {
+ Cell[] rs = new Cell[2];
+ rs[h & 1] = new Cell(x);
+ cells = rs;
+ init = true;
+ }
+ } finally {
+ busy = 0;
+ }
+ if (init)
+ break;
+ }
+ else if (casBase(v = base, fn(v, x)))
+ break; // Fall back on using base
+ }
+ hc.code = h; // Record index for next time
+ }
+
+
+ /**
+ * Sets base and all cells to the given value.
+ */
+ final void internalReset(long initialValue) {
+ Cell[] as = cells;
+ base = initialValue;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null)
+ a.value = initialValue;
+ }
+ }
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long baseOffset;
+ private static final long busyOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> sk = Striped64.class;
+ baseOffset = UNSAFE.objectFieldOffset
+ (sk.getDeclaredField("base"));
+ busyOffset = UNSAFE.objectFieldOffset
+ (sk.getDeclaredField("busy"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/extra/AtomicDouble.java b/src/main/java/jsr166e/extra/AtomicDouble.java
new file mode 100644
index 0000000..b397658
--- /dev/null
+++ b/src/main/java/jsr166e/extra/AtomicDouble.java
@@ -0,0 +1,280 @@
+/*
+ * Written by Doug Lea and Martin Buchholz with assistance from
+ * members of JCP JSR-166 Expert Group and released to the public
+ * domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e.extra;
+
+import static java.lang.Double.doubleToRawLongBits;
+import static java.lang.Double.longBitsToDouble;
+
+/**
+ * A {@code double} value that may be updated atomically. See the
+ * {@link java.util.concurrent.atomic} package specification for
+ * description of the properties of atomic variables. An {@code
+ * AtomicDouble} is used in applications such as atomic accumulation,
+ * and cannot be used as a replacement for a {@link Double}. However,
+ * this class does extend {@code Number} to allow uniform access by
+ * tools and utilities that deal with numerically-based classes.
+ *
+ * <p id="bitEquals">This class compares primitive {@code double}
+ * values in methods such as {@link #compareAndSet} by comparing their
+ * bitwise representation using {@link Double#doubleToRawLongBits},
+ * which differs from both the primitive double {@code ==} operator
+ * and from {@link Double#equals}, as if implemented by:
+ * <pre> {@code
+ * static boolean bitEquals(double x, double y) {
+ * long xBits = Double.doubleToRawLongBits(x);
+ * long yBits = Double.doubleToRawLongBits(y);
+ * return xBits == yBits;
+ * }}</pre>
+ *
+ * @see jsr166e.DoubleAdder
+ * @see jsr166e.DoubleMaxUpdater
+ *
+ * @author Doug Lea
+ * @author Martin Buchholz
+ */
+public class AtomicDouble extends Number implements java.io.Serializable {
+ private static final long serialVersionUID = -8405198993435143622L;
+
+ private transient volatile long value;
+
+ /**
+ * Creates a new {@code AtomicDouble} with the given initial value.
+ *
+ * @param initialValue the initial value
+ */
+ public AtomicDouble(double initialValue) {
+ value = doubleToRawLongBits(initialValue);
+ }
+
+ /**
+ * Creates a new {@code AtomicDouble} with initial value {@code 0.0}.
+ */
+ public AtomicDouble() {
+ // assert doubleToRawLongBits(0.0) == 0L;
+ }
+
+ /**
+ * Gets the current value.
+ *
+ * @return the current value
+ */
+ public final double get() {
+ return longBitsToDouble(value);
+ }
+
+ /**
+ * Sets to the given value.
+ *
+ * @param newValue the new value
+ */
+ public final void set(double newValue) {
+ long next = doubleToRawLongBits(newValue);
+ value = next;
+ }
+
+ /**
+ * Eventually sets to the given value.
+ *
+ * @param newValue the new value
+ */
+ public final void lazySet(double newValue) {
+ long next = doubleToRawLongBits(newValue);
+ unsafe.putOrderedLong(this, valueOffset, next);
+ }
+
+ /**
+ * Atomically sets to the given value and returns the old value.
+ *
+ * @param newValue the new value
+ * @return the previous value
+ */
+ public final double getAndSet(double newValue) {
+ long next = doubleToRawLongBits(newValue);
+ while (true) {
+ long current = value;
+ if (unsafe.compareAndSwapLong(this, valueOffset, current, next))
+ return longBitsToDouble(current);
+ }
+ }
+
+ /**
+ * Atomically sets the value to the given updated value
+ * if the current value is <a href="#bitEquals">bitwise equal</a>
+ * to the expected value.
+ *
+ * @param expect the expected value
+ * @param update the new value
+ * @return {@code true} if successful. False return indicates that
+ * the actual value was not bitwise equal to the expected value.
+ */
+ public final boolean compareAndSet(double expect, double update) {
+ return unsafe.compareAndSwapLong(this, valueOffset,
+ doubleToRawLongBits(expect),
+ doubleToRawLongBits(update));
+ }
+
+ /**
+ * Atomically sets the value to the given updated value
+ * if the current value is <a href="#bitEquals">bitwise equal</a>
+ * to the expected value.
+ *
+ * <p><a
+ * href="http://download.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/package-summary.html#Spurious">
+ * May fail spuriously and does not provide ordering guarantees</a>,
+ * so is only rarely an appropriate alternative to {@code compareAndSet}.
+ *
+ * @param expect the expected value
+ * @param update the new value
+ * @return {@code true} if successful
+ */
+ public final boolean weakCompareAndSet(double expect, double update) {
+ return compareAndSet(expect, update);
+ }
+
+ /**
+ * Atomically adds the given value to the current value.
+ *
+ * @param delta the value to add
+ * @return the previous value
+ */
+ public final double getAndAdd(double delta) {
+ while (true) {
+ long current = value;
+ double currentVal = longBitsToDouble(current);
+ double nextVal = currentVal + delta;
+ long next = doubleToRawLongBits(nextVal);
+ if (unsafe.compareAndSwapLong(this, valueOffset, current, next))
+ return currentVal;
+ }
+ }
+
+ /**
+ * Atomically adds the given value to the current value.
+ *
+ * @param delta the value to add
+ * @return the updated value
+ */
+ public final double addAndGet(double delta) {
+ while (true) {
+ long current = value;
+ double currentVal = longBitsToDouble(current);
+ double nextVal = currentVal + delta;
+ long next = doubleToRawLongBits(nextVal);
+ if (unsafe.compareAndSwapLong(this, valueOffset, current, next))
+ return nextVal;
+ }
+ }
+
+ /**
+ * Returns the String representation of the current value.
+ * @return the String representation of the current value
+ */
+ public String toString() {
+ return Double.toString(get());
+ }
+
+ /**
+ * Returns the value of this {@code AtomicDouble} as an {@code int}
+ * after a narrowing primitive conversion.
+ */
+ public int intValue() {
+ return (int) get();
+ }
+
+ /**
+ * Returns the value of this {@code AtomicDouble} as a {@code long}
+ * after a narrowing primitive conversion.
+ */
+ public long longValue() {
+ return (long) get();
+ }
+
+ /**
+ * Returns the value of this {@code AtomicDouble} as a {@code float}
+ * after a narrowing primitive conversion.
+ */
+ public float floatValue() {
+ return (float) get();
+ }
+
+ /**
+ * Returns the value of this {@code AtomicDouble} as a {@code double}.
+ */
+ public double doubleValue() {
+ return get();
+ }
+
+ /**
+ * Saves the state to a stream (that is, serializes it).
+ *
+ * @param s the stream
+ * @throws java.io.IOException if an I/O error occurs
+ * @serialData The current value is emitted (a {@code double}).
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+
+ s.writeDouble(get());
+ }
+
+ /**
+ * Reconstitutes the instance from a stream (that is, deserializes it).
+ * @param s the stream
+ * @throws ClassNotFoundException if the class of a serialized object
+ * could not be found
+ * @throws java.io.IOException if an I/O error occurs
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+
+ set(s.readDouble());
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe unsafe = getUnsafe();
+ private static final long valueOffset;
+
+ static {
+ try {
+ valueOffset = unsafe.objectFieldOffset
+ (AtomicDouble.class.getDeclaredField("value"));
+ } catch (Exception ex) { throw new Error(ex); }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/extra/AtomicDoubleArray.java b/src/main/java/jsr166e/extra/AtomicDoubleArray.java
new file mode 100644
index 0000000..33b10f8
--- /dev/null
+++ b/src/main/java/jsr166e/extra/AtomicDoubleArray.java
@@ -0,0 +1,327 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e.extra;
+
+import static java.lang.Double.doubleToRawLongBits;
+import static java.lang.Double.longBitsToDouble;
+
+/**
+ * A {@code double} array in which elements may be updated atomically.
+ * See the {@link java.util.concurrent.atomic} package specification
+ * for description of the properties of atomic variables.
+ *
+ * <p id="bitEquals">This class compares primitive {@code double}
+ * values in methods such as {@link #compareAndSet} by comparing their
+ * bitwise representation using {@link Double#doubleToRawLongBits},
+ * which differs from both the primitive double {@code ==} operator
+ * and from {@link Double#equals}, as if implemented by:
+ * <pre> {@code
+ * static boolean bitEquals(double x, double y) {
+ * long xBits = Double.doubleToRawLongBits(x);
+ * long yBits = Double.doubleToRawLongBits(y);
+ * return xBits == yBits;
+ * }}</pre>
+ *
+ * @author Doug Lea
+ * @author Martin Buchholz
+ */
+public class AtomicDoubleArray implements java.io.Serializable {
+ private static final long serialVersionUID = -2308431214976778248L;
+
+ private final transient long[] array;
+
+ private long checkedByteOffset(int i) {
+ if (i < 0 || i >= array.length)
+ throw new IndexOutOfBoundsException("index " + i);
+
+ return byteOffset(i);
+ }
+
+ private static long byteOffset(int i) {
+ return ((long) i << shift) + base;
+ }
+
+ /**
+ * Creates a new {@code AtomicDoubleArray} of the given length,
+ * with all elements initially zero.
+ *
+ * @param length the length of the array
+ */
+ public AtomicDoubleArray(int length) {
+ array = new long[length];
+ }
+
+ /**
+ * Creates a new {@code AtomicDoubleArray} with the same length
+ * as, and all elements copied from, the given array.
+ *
+ * @param array the array to copy elements from
+ * @throws NullPointerException if array is null
+ */
+ public AtomicDoubleArray(double[] array) {
+ // Visibility guaranteed by final field guarantees
+ final int len = array.length;
+ final long[] a = new long[len];
+ for (int i = 0; i < len; i++)
+ a[i] = doubleToRawLongBits(array[i]);
+ this.array = a;
+ }
+
+ /**
+ * Returns the length of the array.
+ *
+ * @return the length of the array
+ */
+ public final int length() {
+ return array.length;
+ }
+
+ /**
+ * Gets the current value at position {@code i}.
+ *
+ * @param i the index
+ * @return the current value
+ */
+ public final double get(int i) {
+ return longBitsToDouble(getRaw(checkedByteOffset(i)));
+ }
+
+ private long getRaw(long offset) {
+ return unsafe.getLongVolatile(array, offset);
+ }
+
+ /**
+ * Sets the element at position {@code i} to the given value.
+ *
+ * @param i the index
+ * @param newValue the new value
+ */
+ public final void set(int i, double newValue) {
+ long next = doubleToRawLongBits(newValue);
+ unsafe.putLongVolatile(array, checkedByteOffset(i), next);
+ }
+
+ /**
+ * Eventually sets the element at position {@code i} to the given value.
+ *
+ * @param i the index
+ * @param newValue the new value
+ */
+ public final void lazySet(int i, double newValue) {
+ long next = doubleToRawLongBits(newValue);
+ unsafe.putOrderedLong(array, checkedByteOffset(i), next);
+ }
+
+ /**
+ * Atomically sets the element at position {@code i} to the given value
+ * and returns the old value.
+ *
+ * @param i the index
+ * @param newValue the new value
+ * @return the previous value
+ */
+ public final double getAndSet(int i, double newValue) {
+ long next = doubleToRawLongBits(newValue);
+ long offset = checkedByteOffset(i);
+ while (true) {
+ long current = getRaw(offset);
+ if (compareAndSetRaw(offset, current, next))
+ return longBitsToDouble(current);
+ }
+ }
+
+ /**
+ * Atomically sets the element at position {@code i} to the given
+ * updated value
+ * if the current value is <a href="#bitEquals">bitwise equal</a>
+ * to the expected value.
+ *
+ * @param i the index
+ * @param expect the expected value
+ * @param update the new value
+ * @return true if successful. False return indicates that
+ * the actual value was not equal to the expected value.
+ */
+ public final boolean compareAndSet(int i, double expect, double update) {
+ return compareAndSetRaw(checkedByteOffset(i),
+ doubleToRawLongBits(expect),
+ doubleToRawLongBits(update));
+ }
+
+ private boolean compareAndSetRaw(long offset, long expect, long update) {
+ return unsafe.compareAndSwapLong(array, offset, expect, update);
+ }
+
+ /**
+ * Atomically sets the element at position {@code i} to the given
+ * updated value
+ * if the current value is <a href="#bitEquals">bitwise equal</a>
+ * to the expected value.
+ *
+ * <p><a
+ * href="http://download.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/package-summary.html#Spurious">
+ * May fail spuriously and does not provide ordering guarantees</a>,
+ * so is only rarely an appropriate alternative to {@code compareAndSet}.
+ *
+ * @param i the index
+ * @param expect the expected value
+ * @param update the new value
+ * @return true if successful
+ */
+ public final boolean weakCompareAndSet(int i, double expect, double update) {
+ return compareAndSet(i, expect, update);
+ }
+
+ /**
+ * Atomically adds the given value to the element at index {@code i}.
+ *
+ * @param i the index
+ * @param delta the value to add
+ * @return the previous value
+ */
+ public final double getAndAdd(int i, double delta) {
+ long offset = checkedByteOffset(i);
+ while (true) {
+ long current = getRaw(offset);
+ double currentVal = longBitsToDouble(current);
+ double nextVal = currentVal + delta;
+ long next = doubleToRawLongBits(nextVal);
+ if (compareAndSetRaw(offset, current, next))
+ return currentVal;
+ }
+ }
+
+ /**
+ * Atomically adds the given value to the element at index {@code i}.
+ *
+ * @param i the index
+ * @param delta the value to add
+ * @return the updated value
+ */
+ public double addAndGet(int i, double delta) {
+ long offset = checkedByteOffset(i);
+ while (true) {
+ long current = getRaw(offset);
+ double currentVal = longBitsToDouble(current);
+ double nextVal = currentVal + delta;
+ long next = doubleToRawLongBits(nextVal);
+ if (compareAndSetRaw(offset, current, next))
+ return nextVal;
+ }
+ }
+
+ /**
+ * Returns the String representation of the current values of array.
+ * @return the String representation of the current values of array
+ */
+ public String toString() {
+ int iMax = array.length - 1;
+ if (iMax == -1)
+ return "[]";
+
+ // Double.toString(Math.PI).length() == 17
+ StringBuilder b = new StringBuilder((17 + 2) * (iMax + 1));
+ b.append('[');
+ for (int i = 0;; i++) {
+ b.append(longBitsToDouble(getRaw(byteOffset(i))));
+ if (i == iMax)
+ return b.append(']').toString();
+ b.append(',').append(' ');
+ }
+ }
+
+ /**
+ * Saves the state to a stream (that is, serializes it).
+ *
+ * @param s the stream
+ * @throws java.io.IOException if an I/O error occurs
+ * @serialData The length of the array is emitted (int), followed by all
+ * of its elements (each a {@code double}) in the proper order.
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+
+ // Write out array length
+ int length = length();
+ s.writeInt(length);
+
+ // Write out all elements in the proper order.
+ for (int i = 0; i < length; i++)
+ s.writeDouble(get(i));
+ }
+
+ /**
+ * Reconstitutes the instance from a stream (that is, deserializes it).
+ * @param s the stream
+ * @throws ClassNotFoundException if the class of a serialized object
+ * could not be found
+ * @throws java.io.IOException if an I/O error occurs
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+
+ // Read in array length and allocate array
+ int length = s.readInt();
+ unsafe.putObjectVolatile(this, arrayOffset, new long[length]);
+
+ // Read in all elements in the proper order.
+ for (int i = 0; i < length; i++)
+ set(i, s.readDouble());
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe unsafe = getUnsafe();
+ private static final long arrayOffset;
+ private static final int base = unsafe.arrayBaseOffset(long[].class);
+ private static final int shift;
+
+ static {
+ try {
+ Class<?> k = AtomicDoubleArray.class;
+ arrayOffset = unsafe.objectFieldOffset
+ (k.getDeclaredField("array"));
+ int scale = unsafe.arrayIndexScale(long[].class);
+ if ((scale & (scale - 1)) != 0)
+ throw new Error("data type scale not a power of two");
+ shift = 31 - Integer.numberOfLeadingZeros(scale);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/extra/ReadMostlyVector.java b/src/main/java/jsr166e/extra/ReadMostlyVector.java
new file mode 100644
index 0000000..fae43f5
--- /dev/null
+++ b/src/main/java/jsr166e/extra/ReadMostlyVector.java
@@ -0,0 +1,1631 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e.extra;
+import jsr166e.StampedLock;
+import java.util.*;
+
+/**
+ * A class with the same methods and array-based characteristics as
+ * {@link java.util.Vector} but with reduced contention and improved
+ * throughput when invocations of read-only methods by multiple
+ * threads are most common.
+ *
+ * <p>The iterators returned by this class's {@link #iterator()
+ * iterator} and {@link #listIterator(int) listIterator} methods are
+ * best-effort in the presence of concurrent modifications, and do
+ * <em>NOT</em> throw {@link ConcurrentModificationException}. An
+ * iterator's {@code next()} method returns consecutive elements as
+ * they appear in the underlying array upon each access. Alternatively,
+ * method {@link #snapshotIterator} may be used for deterministic
+ * traversals, at the expense of making a copy, and unavailability of
+ * method {@code Iterator.remove}.
+ *
+ * <p>Otherwise, this class supports all methods, under the same
+ * documented specifications, as {@code Vector}. Consult {@link
+ * java.util.Vector} for detailed specifications. Additionally, this
+ * class provides methods {@link #addIfAbsent} and {@link
+ * #addAllAbsent}.
+ *
+ * @author Doug Lea
+ */
+public class ReadMostlyVector<E>
+ implements List<E>, RandomAccess, Cloneable, java.io.Serializable {
+ private static final long serialVersionUID = 8673264195747942595L;
+
+ /*
+ * This class exists mainly as a vehicle to exercise various
+ * constructions using SequenceLocks. Read-only methods
+ * take one of a few forms:
+ *
+ * Short methods,including get(index), continually retry obtaining
+ * a snapshot of array, count, and element, using sequence number
+ * to validate.
+ *
+ * Methods that are potentially O(n) (or worse) try once in
+ * read-only mode, and then lock. When in read-only mode, they
+ * validate only at the end of an array scan unless the element is
+ * actually used (for example, as an argument of method equals).
+ *
+ * We rely on some invariants that are always true, even for field
+ * reads in read-only mode that have not yet been validated:
+ * - array != null
+ * - count >= 0
+ */
+
+ /**
+ * The maximum size of array to allocate.
+ * See CopyOnWriteArrayList for explanation.
+ */
+ private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
+
+ // fields are non-private to simplify nested class access
+ Object[] array;
+ final StampedLock lock;
+ int count;
+ final int capacityIncrement;
+
+ /**
+ * Creates an empty vector with the given initial capacity and
+ * capacity increment.
+ *
+ * @param initialCapacity the initial capacity of the underlying array
+ * @param capacityIncrement if non-zero, the number to
+ * add when resizing to accommodate additional elements.
+ * If zero, the array size is doubled when resized.
+ *
+ * @throws IllegalArgumentException if initial capacity is negative
+ */
+ public ReadMostlyVector(int initialCapacity, int capacityIncrement) {
+ super();
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException("Illegal Capacity: "+
+ initialCapacity);
+ this.array = new Object[initialCapacity];
+ this.capacityIncrement = capacityIncrement;
+ this.lock = new StampedLock();
+ }
+
+ /**
+ * Creates an empty vector with the given initial capacity.
+ *
+ * @param initialCapacity the initial capacity of the underlying array
+ * @throws IllegalArgumentException if initial capacity is negative
+ */
+ public ReadMostlyVector(int initialCapacity) {
+ this(initialCapacity, 0);
+ }
+
+ /**
+ * Creates an empty vector.
+ */
+ public ReadMostlyVector() {
+ this.capacityIncrement = 0;
+ this.lock = new StampedLock();
+ }
+
+ /**
+ * Creates a vector containing the elements of the specified
+ * collection, in the order they are returned by the collection's
+ * iterator.
+ *
+ * @param c the collection of initially held elements
+ * @throws NullPointerException if the specified collection is null
+ */
+ public ReadMostlyVector(Collection<? extends E> c) {
+ Object[] elements = c.toArray();
+ // c.toArray might (incorrectly) not return Object[] (see 6260652)
+ if (elements.getClass() != Object[].class)
+ elements = Arrays.copyOf(elements, elements.length, Object[].class);
+ this.array = elements;
+ this.count = elements.length;
+ this.capacityIncrement = 0;
+ this.lock = new StampedLock();
+ }
+
+ // internal constructor for clone
+ ReadMostlyVector(Object[] array, int count, int capacityIncrement) {
+ this.array = array;
+ this.count = count;
+ this.capacityIncrement = capacityIncrement;
+ this.lock = new StampedLock();
+ }
+
+ static final int INITIAL_CAP = 16;
+
+ // For explanation, see CopyOnWriteArrayList
+ final Object[] grow(int minCapacity) {
+ Object[] items;
+ int newCapacity;
+ if ((items = array) == null)
+ newCapacity = INITIAL_CAP;
+ else {
+ int oldCapacity = array.length;
+ newCapacity = oldCapacity + ((capacityIncrement > 0) ?
+ capacityIncrement : oldCapacity);
+ }
+ if (newCapacity - minCapacity < 0)
+ newCapacity = minCapacity;
+ if (newCapacity - MAX_ARRAY_SIZE > 0) {
+ if (minCapacity < 0) // overflow
+ throw new OutOfMemoryError();
+ else if (minCapacity > MAX_ARRAY_SIZE)
+ newCapacity = Integer.MAX_VALUE;
+ else
+ newCapacity = MAX_ARRAY_SIZE;
+ }
+ return array = ((items == null) ?
+ new Object[newCapacity] :
+ Arrays.copyOf(items, newCapacity));
+ }
+
+ /*
+ * Internal versions of most base functionality, wrapped
+ * in different ways from public methods from this class
+ * as well as sublist and iterator classes.
+ */
+
+ static int findFirstIndex(Object[] items, Object x, int index, int fence) {
+ int len;
+ if (items != null && (len = items.length) > 0) {
+ int start = (index < 0) ? 0 : index;
+ int bound = (fence < len) ? fence : len;
+ for (int i = start; i < bound; ++i) {
+ Object e = items[i];
+ if ((x == null) ? e == null : x.equals(e))
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ static int findLastIndex(Object[] items, Object x, int index, int origin) {
+ int len;
+ if (items != null && (len = items.length) > 0) {
+ int last = (index < len) ? index : len - 1;
+ int start = (origin < 0) ? 0 : origin;
+ for (int i = last; i >= start; --i) {
+ Object e = items[i];
+ if ((x == null) ? e == null : x.equals(e))
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ final void rawAdd(E e) {
+ int n = count;
+ Object[] items = array;
+ if (items == null || n >= items.length)
+ items = grow(n + 1);
+ items[n] = e;
+ count = n + 1;
+ }
+
+ final void rawAddAt(int index, E e) {
+ int n = count;
+ Object[] items = array;
+ if (index > n)
+ throw new ArrayIndexOutOfBoundsException(index);
+ if (items == null || n >= items.length)
+ items = grow(n + 1);
+ if (index < n)
+ System.arraycopy(items, index, items, index + 1, n - index);
+ items[index] = e;
+ count = n + 1;
+ }
+
+ final boolean rawAddAllAt(int index, Object[] elements) {
+ int n = count;
+ Object[] items = array;
+ if (index < 0 || index > n)
+ throw new ArrayIndexOutOfBoundsException(index);
+ int len = elements.length;
+ if (len == 0)
+ return false;
+ int newCount = n + len;
+ if (items == null || newCount >= items.length)
+ items = grow(newCount);
+ int mv = n - index;
+ if (mv > 0)
+ System.arraycopy(items, index, items, index + len, mv);
+ System.arraycopy(elements, 0, items, index, len);
+ count = newCount;
+ return true;
+ }
+
+ final boolean rawRemoveAt(int index) {
+ int n = count - 1;
+ Object[] items = array;
+ if (items == null || index < 0 || index > n)
+ return false;
+ int mv = n - index;
+ if (mv > 0)
+ System.arraycopy(items, index + 1, items, index, mv);
+ items[n] = null;
+ count = n;
+ return true;
+ }
+
+ /**
+ * Internal version of removeAll for lists and sublists. In this
+ * and other similar methods below, the bound argument is, if
+ * non-negative, the purported upper bound of a list/sublist, or
+ * is left negative if the bound should be determined via count
+ * field under lock.
+ */
+ final boolean lockedRemoveAll(Collection<?> c, int origin, int bound) {
+ boolean removed = false;
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ int n = count;
+ int fence = bound < 0 || bound > n ? n : bound;
+ if (origin >= 0 && origin < fence) {
+ for (Object x : c) {
+ while (rawRemoveAt(findFirstIndex(array, x, origin, fence)))
+ removed = true;
+ }
+ }
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ return removed;
+ }
+
+ final boolean lockedRetainAll(Collection<?> c, int origin, int bound) {
+ final StampedLock lock = this.lock;
+ boolean removed = false;
+ if (c != this) {
+ long stamp = lock.writeLock();
+ try {
+ Object[] items;
+ int i, n;
+ if ((items = array) != null && (n = count) > 0 &&
+ n < items.length && (i = origin) >= 0) {
+ int fence = bound < 0 || bound > n ? n : bound;
+ while (i < fence) {
+ if (c.contains(items[i]))
+ ++i;
+ else {
+ --fence;
+ int mv = --n - i;
+ if (mv > 0)
+ System.arraycopy(items, i + 1, items, i, mv);
+ }
+ }
+ if (count != n) {
+ count = n;
+ removed = true;
+ }
+ }
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+ return removed;
+ }
+
+ final void internalClear(int origin, int bound) {
+ Object[] items;
+ int n, len;
+ if ((items = array) != null && (len = items.length) > 0) {
+ if (origin < 0)
+ origin = 0;
+ if ((n = count) > len)
+ n = len;
+ int fence = bound < 0 || bound > n ? n : bound;
+ int removed = fence - origin;
+ int newCount = n - removed;
+ int mv = n - (origin + removed);
+ if (mv > 0)
+ System.arraycopy(items, origin + removed, items, origin, mv);
+ for (int i = n; i < newCount; ++i)
+ items[i] = null;
+ count = newCount;
+ }
+ }
+
+ final boolean internalContainsAll(Collection<?> c, int origin, int bound) {
+ Object[] items;
+ int n, len;
+ if ((items = array) != null && (len = items.length) > 0) {
+ if (origin < 0)
+ origin = 0;
+ if ((n = count) > len)
+ n = len;
+ int fence = bound < 0 || bound > n ? n : bound;
+ for (Object e : c) {
+ if (findFirstIndex(items, e, origin, fence) < 0)
+ return false;
+ }
+ }
+ else if (!c.isEmpty())
+ return false;
+ return true;
+ }
+
+ final boolean internalEquals(List<?> list, int origin, int bound) {
+ Object[] items;
+ int n, len;
+ if ((items = array) != null && (len = items.length) > 0) {
+ if (origin < 0)
+ origin = 0;
+ if ((n = count) > len)
+ n = len;
+ int fence = bound < 0 || bound > n ? n : bound;
+ Iterator<?> it = list.iterator();
+ for (int i = origin; i < fence; ++i) {
+ if (!it.hasNext())
+ return false;
+ Object y = it.next();
+ Object x = items[i];
+ if (x != y && (x == null || !x.equals(y)))
+ return false;
+ }
+ if (it.hasNext())
+ return false;
+ }
+ else if (!list.isEmpty())
+ return false;
+ return true;
+ }
+
+ final int internalHashCode(int origin, int bound) {
+ int hash = 1;
+ Object[] items;
+ int n, len;
+ if ((items = array) != null && (len = items.length) > 0) {
+ if (origin < 0)
+ origin = 0;
+ if ((n = count) > len)
+ n = len;
+ int fence = bound < 0 || bound > n ? n : bound;
+ for (int i = origin; i < fence; ++i) {
+ Object e = items[i];
+ hash = 31*hash + (e == null ? 0 : e.hashCode());
+ }
+ }
+ return hash;
+ }
+
+ final String internalToString(int origin, int bound) {
+ Object[] items;
+ int n, len;
+ if ((items = array) != null && (len = items.length) > 0) {
+ if ((n = count) > len)
+ n = len;
+ int fence = bound < 0 || bound > n ? n : bound;
+ int i = (origin < 0) ? 0 : origin;
+ if (i != fence) {
+ StringBuilder sb = new StringBuilder();
+ sb.append('[');
+ for (;;) {
+ Object e = items[i];
+ sb.append((e == this) ? "(this Collection)" : e.toString());
+ if (++i < fence)
+ sb.append(',').append(' ');
+ else
+ return sb.append(']').toString();
+ }
+ }
+ }
+ return "[]";
+ }
+
+ final Object[] internalToArray(int origin, int bound) {
+ Object[] items;
+ int n, len;
+ if ((items = array) != null && (len = items.length) > 0) {
+ if (origin < 0)
+ origin = 0;
+ if ((n = count) > len)
+ n = len;
+ int fence = bound < 0 || bound > n ? n : bound;
+ int i = (origin < 0) ? 0 : origin;
+ if (i != fence)
+ return Arrays.copyOfRange(items, i, fence, Object[].class);
+ }
+ return new Object[0];
+ }
+
+ @SuppressWarnings("unchecked")
+ final <T> T[] internalToArray(T[] a, int origin, int bound) {
+ int alen = a.length;
+ Object[] items;
+ int n, len;
+ if ((items = array) != null && (len = items.length) > 0) {
+ if (origin < 0)
+ origin = 0;
+ if ((n = count) > len)
+ n = len;
+ int fence = bound < 0 || bound > n ? n : bound;
+ int i = (origin < 0) ? 0 : origin;
+ int rlen = fence - origin;
+ if (rlen > 0) {
+ if (alen >= rlen) {
+ System.arraycopy(items, 0, a, origin, rlen);
+ if (alen > rlen)
+ a[rlen] = null;
+ return a;
+ }
+ return (T[]) Arrays.copyOfRange(items, i, fence, a.getClass());
+ }
+ }
+ if (alen > 0)
+ a[0] = null;
+ return a;
+ }
+
+ // public List methods
+
+ public boolean add(E e) {
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ rawAdd(e);
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ return true;
+ }
+
+ public void add(int index, E element) {
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ rawAddAt(index, element);
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ public boolean addAll(Collection<? extends E> c) {
+ Object[] elements = c.toArray();
+ int len = elements.length;
+ if (len == 0)
+ return false;
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ Object[] items = array;
+ int n = count;
+ int newCount = n + len;
+ if (items == null || newCount >= items.length)
+ items = grow(newCount);
+ System.arraycopy(elements, 0, items, n, len);
+ count = newCount;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ return true;
+ }
+
+ public boolean addAll(int index, Collection<? extends E> c) {
+ Object[] elements = c.toArray();
+ boolean ret;
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ ret = rawAddAllAt(index, elements);
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ return ret;
+ }
+
+ public void clear() {
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ int n = count;
+ Object[] items = array;
+ if (items != null) {
+ for (int i = 0; i < n; i++)
+ items[i] = null;
+ }
+ count = 0;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ public boolean contains(Object o) {
+ return indexOf(o, 0) >= 0;
+ }
+
+ public boolean containsAll(Collection<?> c) {
+ boolean ret;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ ret = internalContainsAll(c, 0, -1);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ return ret;
+ }
+
+ public boolean equals(Object o) {
+ if (o == this)
+ return true;
+ if (!(o instanceof List))
+ return false;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ return internalEquals((List<?>)o, 0, -1);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public E get(int index) {
+ final StampedLock lock = this.lock;
+ long stamp = lock.tryOptimisticRead();
+ Object[] items;
+ if (index >= 0 && (items = array) != null &&
+ index < count && index < items.length) {
+ @SuppressWarnings("unchecked") E e = (E)items[index];
+ if (lock.validate(stamp))
+ return e;
+ }
+ return lockedGet(index);
+ }
+
+ @SuppressWarnings("unchecked") private E lockedGet(int index) {
+ boolean oobe = false;
+ E e = null;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ Object[] items;
+ if ((items = array) != null && index < items.length &&
+ index < count && index >= 0)
+ e = (E)items[index];
+ else
+ oobe = true;
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ if (oobe)
+ throw new ArrayIndexOutOfBoundsException(index);
+ return e;
+ }
+
+ public int hashCode() {
+ int h;
+ final StampedLock lock = this.lock;
+ long s = lock.readLock();
+ try {
+ h = internalHashCode(0, -1);
+ } finally {
+ lock.unlockRead(s);
+ }
+ return h;
+ }
+
+ public int indexOf(Object o) {
+ int idx;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ idx = findFirstIndex(array, o, 0, count);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ return idx;
+ }
+
+ public boolean isEmpty() {
+ final StampedLock lock = this.lock;
+ long stamp = lock.tryOptimisticRead();
+ return count == 0; // no need for validation
+ }
+
+ public Iterator<E> iterator() {
+ return new Itr<E>(this, 0);
+ }
+
+ public int lastIndexOf(Object o) {
+ int idx;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ idx = findLastIndex(array, o, count - 1, 0);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ return idx;
+ }
+
+ public ListIterator<E> listIterator() {
+ return new Itr<E>(this, 0);
+ }
+
+ public ListIterator<E> listIterator(int index) {
+ return new Itr<E>(this, index);
+ }
+
+ @SuppressWarnings("unchecked") public E remove(int index) {
+ E oldValue = null;
+ boolean oobe = false;
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ if (index < 0 || index >= count)
+ oobe = true;
+ else {
+ oldValue = (E) array[index];
+ rawRemoveAt(index);
+ }
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ if (oobe)
+ throw new ArrayIndexOutOfBoundsException(index);
+ return oldValue;
+ }
+
+ public boolean remove(Object o) {
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ return rawRemoveAt(findFirstIndex(array, o, 0, count));
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ public boolean removeAll(Collection<?> c) {
+ return lockedRemoveAll(c, 0, -1);
+ }
+
+ public boolean retainAll(Collection<?> c) {
+ return lockedRetainAll(c, 0, -1);
+ }
+
+ @SuppressWarnings("unchecked") public E set(int index, E element) {
+ E oldValue = null;
+ boolean oobe = false;
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ Object[] items = array;
+ if (items == null || index < 0 || index >= count)
+ oobe = true;
+ else {
+ oldValue = (E) items[index];
+ items[index] = element;
+ }
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ if (oobe)
+ throw new ArrayIndexOutOfBoundsException(index);
+ return oldValue;
+ }
+
+ public int size() {
+ final StampedLock lock = this.lock;
+ long stamp = lock.tryOptimisticRead();
+ return count; // no need for validation
+ }
+
+ private int lockedSize() {
+ int n;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ n = count;
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ return n;
+ }
+
+ public List<E> subList(int fromIndex, int toIndex) {
+ int ssize = toIndex - fromIndex;
+ if (ssize >= 0 && fromIndex >= 0) {
+ ReadMostlyVectorSublist<E> ret = null;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ if (toIndex <= count)
+ ret = new ReadMostlyVectorSublist<E>(this, fromIndex, ssize);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ if (ret != null)
+ return ret;
+ }
+
+ throw new ArrayIndexOutOfBoundsException(fromIndex < 0 ? fromIndex : toIndex);
+ }
+
+ public Object[] toArray() {
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ return internalToArray(0, -1);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public <T> T[] toArray(T[] a) {
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ return internalToArray(a, 0, -1);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public String toString() {
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ return internalToString(0, -1);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ // ReadMostlyVector-only methods
+
+ /**
+ * Appends the element, if not present.
+ *
+ * @param e element to be added to this list, if absent
+ * @return {@code true} if the element was added
+ */
+ public boolean addIfAbsent(E e) {
+ boolean ret;
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ if (findFirstIndex(array, e, 0, count) < 0) {
+ rawAdd(e);
+ ret = true;
+ }
+ else
+ ret = false;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ return ret;
+ }
+
+ /**
+ * Appends all of the elements in the specified collection that
+ * are not already contained in this list, to the end of
+ * this list, in the order that they are returned by the
+ * specified collection's iterator.
+ *
+ * @param c collection containing elements to be added to this list
+ * @return the number of elements added
+ * @throws NullPointerException if the specified collection is null
+ * @see #addIfAbsent(Object)
+ */
+ public int addAllAbsent(Collection<? extends E> c) {
+ int added = 0;
+ Object[] cs = c.toArray();
+ int clen = cs.length;
+ if (clen != 0) {
+ long stamp = lock.writeLock();
+ try {
+ for (int i = 0; i < clen; ++i) {
+ @SuppressWarnings("unchecked")
+ E e = (E) cs[i];
+ if (findFirstIndex(array, e, 0, count) < 0) {
+ rawAdd(e);
+ ++added;
+ }
+ }
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+ return added;
+ }
+
+ /**
+ * Returns an iterator operating over a snapshot copy of the
+ * elements of this collection created upon construction of the
+ * iterator. The iterator does <em>NOT</em> support the
+ * {@code remove} method.
+ *
+ * @return an iterator over the elements in this list in proper sequence
+ */
+ public Iterator<E> snapshotIterator() {
+ return new SnapshotIterator<E>(this);
+ }
+
+ static final class SnapshotIterator<E> implements Iterator<E> {
+ private final Object[] items;
+ private int cursor;
+ SnapshotIterator(ReadMostlyVector<E> v) { items = v.toArray(); }
+ public boolean hasNext() { return cursor < items.length; }
+ @SuppressWarnings("unchecked") public E next() {
+ if (cursor < items.length)
+ return (E) items[cursor++];
+ throw new NoSuchElementException();
+ }
+ public void remove() { throw new UnsupportedOperationException() ; }
+ }
+
+ /** Interface describing a void action of one argument */
+ public interface Action<A> { void apply(A a); }
+
+ public void forEachReadOnly(Action<E> action) {
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ Object[] items;
+ int len, n;
+ if ((items = array) != null && (len = items.length) > 0 &&
+ (n = count) <= len) {
+ for (int i = 0; i < n; ++i) {
+ @SuppressWarnings("unchecked") E e = (E)items[i];
+ action.apply(e);
+ }
+ }
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ // Vector-only methods
+
+ /** See {@link Vector#firstElement} */
+ public E firstElement() {
+ final StampedLock lock = this.lock;
+ long stamp = lock.tryOptimisticRead();
+ Object[] items;
+ if ((items = array) != null && count > 0 && items.length > 0) {
+ @SuppressWarnings("unchecked") E e = (E)items[0];
+ if (lock.validate(stamp))
+ return e;
+ }
+ return lockedFirstElement();
+ }
+
+ @SuppressWarnings("unchecked") private E lockedFirstElement() {
+ Object e = null;
+ boolean oobe = false;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ Object[] items = array;
+ if (items != null && count > 0 && items.length > 0)
+ e = items[0];
+ else
+ oobe = true;
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ if (oobe)
+ throw new NoSuchElementException();
+ return (E) e;
+ }
+
+ /** See {@link Vector#lastElement} */
+ public E lastElement() {
+ final StampedLock lock = this.lock;
+ long stamp = lock.tryOptimisticRead();
+ Object[] items;
+ int i;
+ if ((items = array) != null && (i = count - 1) >= 0 &&
+ i < items.length) {
+ @SuppressWarnings("unchecked") E e = (E)items[i];
+ if (lock.validate(stamp))
+ return e;
+ }
+ return lockedLastElement();
+ }
+
+ @SuppressWarnings("unchecked") private E lockedLastElement() {
+ Object e = null;
+ boolean oobe = false;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ Object[] items = array;
+ int i = count - 1;
+ if (items != null && i >= 0 && i < items.length)
+ e = items[i];
+ else
+ oobe = true;
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ if (oobe)
+ throw new NoSuchElementException();
+ return (E) e;
+ }
+
+ /** See {@link Vector#indexOf(Object, int)} */
+ public int indexOf(Object o, int index) {
+ if (index < 0)
+ throw new ArrayIndexOutOfBoundsException(index);
+ int idx;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ idx = findFirstIndex(array, o, index, count);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ return idx;
+ }
+
+ /** See {@link Vector#lastIndexOf(Object, int)} */
+ public int lastIndexOf(Object o, int index) {
+ boolean oobe = false;
+ int idx = -1;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ if (index < count)
+ idx = findLastIndex(array, o, index, 0);
+ else
+ oobe = true;
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ if (oobe)
+ throw new ArrayIndexOutOfBoundsException(index);
+ return idx;
+ }
+
+ /** See {@link Vector#setSize} */
+ public void setSize(int newSize) {
+ if (newSize < 0)
+ throw new ArrayIndexOutOfBoundsException(newSize);
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ Object[] items;
+ int n = count;
+ if (newSize > n)
+ grow(newSize);
+ else if ((items = array) != null) {
+ for (int i = newSize ; i < n ; i++)
+ items[i] = null;
+ }
+ count = newSize;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ /** See {@link Vector#copyInto} */
+ public void copyInto(Object[] anArray) {
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ Object[] items;
+ if ((items = array) != null)
+ System.arraycopy(items, 0, anArray, 0, count);
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ /** See {@link Vector#trimToSize} */
+ public void trimToSize() {
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ Object[] items = array;
+ int n = count;
+ if (items != null && n < items.length)
+ array = Arrays.copyOf(items, n);
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ /** See {@link Vector#ensureCapacity} */
+ public void ensureCapacity(int minCapacity) {
+ if (minCapacity > 0) {
+ final StampedLock lock = this.lock;
+ long stamp = lock.writeLock();
+ try {
+ Object[] items = array;
+ int cap = (items == null) ? 0 : items.length;
+ if (minCapacity - cap > 0)
+ grow(minCapacity);
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+ }
+
+ /** See {@link Vector#elements} */
+ public Enumeration<E> elements() {
+ return new Itr<E>(this, 0);
+ }
+
+ /** See {@link Vector#capacity} */
+ public int capacity() {
+ return array.length;
+ }
+
+ /** See {@link Vector#elementAt} */
+ public E elementAt(int index) {
+ return get(index);
+ }
+
+ /** See {@link Vector#setElementAt} */
+ public void setElementAt(E obj, int index) {
+ set(index, obj);
+ }
+
+ /** See {@link Vector#removeElementAt} */
+ public void removeElementAt(int index) {
+ remove(index);
+ }
+
+ /** See {@link Vector#insertElementAt} */
+ public void insertElementAt(E obj, int index) {
+ add(index, obj);
+ }
+
+ /** See {@link Vector#addElement} */
+ public void addElement(E obj) {
+ add(obj);
+ }
+
+ /** See {@link Vector#removeElement} */
+ public boolean removeElement(Object obj) {
+ return remove(obj);
+ }
+
+ /** See {@link Vector#removeAllElements} */
+ public void removeAllElements() {
+ clear();
+ }
+
+ // other methods
+
+ public ReadMostlyVector<E> clone() {
+ Object[] a = null;
+ int n;
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ Object[] items = array;
+ if (items == null)
+ n = 0;
+ else {
+ int len = items.length;
+ if ((n = count) > len)
+ n = len;
+ a = Arrays.copyOf(items, n);
+ }
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ return new ReadMostlyVector<E>(a, n, capacityIncrement);
+ }
+
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ final StampedLock lock = this.lock;
+ long stamp = lock.readLock();
+ try {
+ s.defaultWriteObject();
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ static final class Itr<E> implements ListIterator<E>, Enumeration<E> {
+ final StampedLock lock;
+ final ReadMostlyVector<E> list;
+ Object[] items;
+ long seq;
+ int cursor;
+ int fence;
+ int lastRet;
+
+ Itr(ReadMostlyVector<E> list, int index) {
+ final StampedLock lock = list.lock;
+ long stamp = lock.readLock();
+ try {
+ this.list = list;
+ this.lock = lock;
+ this.items = list.array;
+ this.fence = list.count;
+ this.cursor = index;
+ this.lastRet = -1;
+ } finally {
+ this.seq = lock.tryConvertToOptimisticRead(stamp);
+ }
+ if (index < 0 || index > fence)
+ throw new ArrayIndexOutOfBoundsException(index);
+ }
+
+ public boolean hasPrevious() {
+ return cursor > 0;
+ }
+
+ public int nextIndex() {
+ return cursor;
+ }
+
+ public int previousIndex() {
+ return cursor - 1;
+ }
+
+ public boolean hasNext() {
+ return cursor < fence;
+ }
+
+ public E next() {
+ int i = cursor;
+ Object[] es = items;
+ if (es == null || i < 0 || i >= fence || i >= es.length)
+ throw new NoSuchElementException();
+ @SuppressWarnings("unchecked") E e = (E)es[i];
+ lastRet = i;
+ cursor = i + 1;
+ if (!lock.validate(seq))
+ throw new ConcurrentModificationException();
+ return e;
+ }
+
+ public E previous() {
+ int i = cursor - 1;
+ Object[] es = items;
+ if (es == null || i < 0 || i >= fence || i >= es.length)
+ throw new NoSuchElementException();
+ @SuppressWarnings("unchecked") E e = (E)es[i];
+ lastRet = i;
+ cursor = i;
+ if (!lock.validate(seq))
+ throw new ConcurrentModificationException();
+ return e;
+ }
+
+ public void remove() {
+ int i = lastRet;
+ if (i < 0)
+ throw new IllegalStateException();
+ if ((seq = lock.tryConvertToWriteLock(seq)) == 0)
+ throw new ConcurrentModificationException();
+ try {
+ list.rawRemoveAt(i);
+ fence = list.count;
+ cursor = i;
+ lastRet = -1;
+ } finally {
+ seq = lock.tryConvertToOptimisticRead(seq);
+ }
+ }
+
+ public void set(E e) {
+ int i = lastRet;
+ Object[] es = items;
+ if (es == null || i < 0 | i >= fence)
+ throw new IllegalStateException();
+ if ((seq = lock.tryConvertToWriteLock(seq)) == 0)
+ throw new ConcurrentModificationException();
+ try {
+ es[i] = e;
+ } finally {
+ seq = lock.tryConvertToOptimisticRead(seq);
+ }
+ }
+
+ public void add(E e) {
+ int i = cursor;
+ if (i < 0)
+ throw new IllegalStateException();
+ if ((seq = lock.tryConvertToWriteLock(seq)) == 0)
+ throw new ConcurrentModificationException();
+ try {
+ list.rawAddAt(i, e);
+ items = list.array;
+ fence = list.count;
+ cursor = i + 1;
+ lastRet = -1;
+ } finally {
+ seq = lock.tryConvertToOptimisticRead(seq);
+ }
+ }
+
+ public boolean hasMoreElements() { return hasNext(); }
+ public E nextElement() { return next(); }
+ }
+
+ static final class ReadMostlyVectorSublist<E>
+ implements List<E>, RandomAccess, java.io.Serializable {
+ private static final long serialVersionUID = 3041673470172026059L;
+
+ final ReadMostlyVector<E> list;
+ final int offset;
+ volatile int size;
+
+ ReadMostlyVectorSublist(ReadMostlyVector<E> list,
+ int offset, int size) {
+ this.list = list;
+ this.offset = offset;
+ this.size = size;
+ }
+
+ private void rangeCheck(int index) {
+ if (index < 0 || index >= size)
+ throw new ArrayIndexOutOfBoundsException(index);
+ }
+
+ public boolean add(E element) {
+ final StampedLock lock = list.lock;
+ long stamp = lock.writeLock();
+ try {
+ int c = size;
+ list.rawAddAt(c + offset, element);
+ size = c + 1;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ return true;
+ }
+
+ public void add(int index, E element) {
+ final StampedLock lock = list.lock;
+ long stamp = lock.writeLock();
+ try {
+ if (index < 0 || index > size)
+ throw new ArrayIndexOutOfBoundsException(index);
+ list.rawAddAt(index + offset, element);
+ ++size;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ public boolean addAll(Collection<? extends E> c) {
+ Object[] elements = c.toArray();
+ final StampedLock lock = list.lock;
+ long stamp = lock.writeLock();
+ try {
+ int s = size;
+ int pc = list.count;
+ list.rawAddAllAt(offset + s, elements);
+ int added = list.count - pc;
+ size = s + added;
+ return added != 0;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ public boolean addAll(int index, Collection<? extends E> c) {
+ Object[] elements = c.toArray();
+ final StampedLock lock = list.lock;
+ long stamp = lock.writeLock();
+ try {
+ int s = size;
+ if (index < 0 || index > s)
+ throw new ArrayIndexOutOfBoundsException(index);
+ int pc = list.count;
+ list.rawAddAllAt(index + offset, elements);
+ int added = list.count - pc;
+ size = s + added;
+ return added != 0;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ public void clear() {
+ final StampedLock lock = list.lock;
+ long stamp = lock.writeLock();
+ try {
+ list.internalClear(offset, offset + size);
+ size = 0;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ public boolean contains(Object o) {
+ return indexOf(o) >= 0;
+ }
+
+ public boolean containsAll(Collection<?> c) {
+ final StampedLock lock = list.lock;
+ long stamp = lock.readLock();
+ try {
+ return list.internalContainsAll(c, offset, offset + size);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public boolean equals(Object o) {
+ if (o == this)
+ return true;
+ if (!(o instanceof List))
+ return false;
+ final StampedLock lock = list.lock;
+ long stamp = lock.readLock();
+ try {
+ return list.internalEquals((List<?>)(o), offset, offset + size);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public E get(int index) {
+ if (index < 0 || index >= size)
+ throw new ArrayIndexOutOfBoundsException(index);
+ return list.get(index + offset);
+ }
+
+ public int hashCode() {
+ final StampedLock lock = list.lock;
+ long stamp = lock.readLock();
+ try {
+ return list.internalHashCode(offset, offset + size);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public int indexOf(Object o) {
+ final StampedLock lock = list.lock;
+ long stamp = lock.readLock();
+ try {
+ int idx = findFirstIndex(list.array, o, offset, offset + size);
+ return idx < 0 ? -1 : idx - offset;
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public boolean isEmpty() {
+ return size() == 0;
+ }
+
+ public Iterator<E> iterator() {
+ return new SubItr<E>(this, offset);
+ }
+
+ public int lastIndexOf(Object o) {
+ final StampedLock lock = list.lock;
+ long stamp = lock.readLock();
+ try {
+ int idx = findLastIndex(list.array, o, offset + size - 1, offset);
+ return idx < 0 ? -1 : idx - offset;
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public ListIterator<E> listIterator() {
+ return new SubItr<E>(this, offset);
+ }
+
+ public ListIterator<E> listIterator(int index) {
+ return new SubItr<E>(this, index + offset);
+ }
+
+ public E remove(int index) {
+ final StampedLock lock = list.lock;
+ long stamp = lock.writeLock();
+ try {
+ Object[] items = list.array;
+ int i = index + offset;
+ if (items == null || index < 0 || index >= size || i >= items.length)
+ throw new ArrayIndexOutOfBoundsException(index);
+ @SuppressWarnings("unchecked") E result = (E)items[i];
+ list.rawRemoveAt(i);
+ size--;
+ return result;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ public boolean remove(Object o) {
+ final StampedLock lock = list.lock;
+ long stamp = lock.writeLock();
+ try {
+ if (list.rawRemoveAt(findFirstIndex(list.array, o, offset,
+ offset + size))) {
+ --size;
+ return true;
+ }
+ else
+ return false;
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+
+ public boolean removeAll(Collection<?> c) {
+ return list.lockedRemoveAll(c, offset, offset + size);
+ }
+
+ public boolean retainAll(Collection<?> c) {
+ return list.lockedRetainAll(c, offset, offset + size);
+ }
+
+ public E set(int index, E element) {
+ if (index < 0 || index >= size)
+ throw new ArrayIndexOutOfBoundsException(index);
+ return list.set(index+offset, element);
+ }
+
+ public int size() {
+ return size;
+ }
+
+ public List<E> subList(int fromIndex, int toIndex) {
+ int c = size;
+ int ssize = toIndex - fromIndex;
+ if (fromIndex < 0)
+ throw new ArrayIndexOutOfBoundsException(fromIndex);
+ if (toIndex > c || ssize < 0)
+ throw new ArrayIndexOutOfBoundsException(toIndex);
+ return new ReadMostlyVectorSublist<E>(list, offset+fromIndex, ssize);
+ }
+
+ public Object[] toArray() {
+ final StampedLock lock = list.lock;
+ long stamp = lock.readLock();
+ try {
+ return list.internalToArray(offset, offset + size);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public <T> T[] toArray(T[] a) {
+ final StampedLock lock = list.lock;
+ long stamp = lock.readLock();
+ try {
+ return list.internalToArray(a, offset, offset + size);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ public String toString() {
+ final StampedLock lock = list.lock;
+ long stamp = lock.readLock();
+ try {
+ return list.internalToString(offset, offset + size);
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ }
+
+ static final class SubItr<E> implements ListIterator<E> {
+ final ReadMostlyVectorSublist<E> sublist;
+ final ReadMostlyVector<E> list;
+ final StampedLock lock;
+ Object[] items;
+ long seq;
+ int cursor;
+ int origin;
+ int fence;
+ int lastRet;
+
+ SubItr(ReadMostlyVectorSublist<E> sublist, int index) {
+ final StampedLock lock = sublist.list.lock;
+ long stamp = lock.readLock();
+ try {
+ this.sublist = sublist;
+ this.list = sublist.list;
+ this.lock = lock;
+ this.cursor = index;
+ this.origin = sublist.offset;
+ this.fence = origin + sublist.size;
+ this.lastRet = -1;
+ } finally {
+ this.seq = lock.tryConvertToOptimisticRead(stamp);
+ }
+ if (index < 0 || cursor > fence)
+ throw new ArrayIndexOutOfBoundsException(index);
+ }
+
+ public int nextIndex() {
+ return cursor - origin;
+ }
+
+ public int previousIndex() {
+ return cursor - origin - 1;
+ }
+
+ public boolean hasNext() {
+ return cursor < fence;
+ }
+
+ public boolean hasPrevious() {
+ return cursor > origin;
+ }
+
+ public E next() {
+ int i = cursor;
+ Object[] es = items;
+ if (es == null || i < origin || i >= fence || i >= es.length)
+ throw new NoSuchElementException();
+ @SuppressWarnings("unchecked") E e = (E)es[i];
+ lastRet = i;
+ cursor = i + 1;
+ if (!lock.validate(seq))
+ throw new ConcurrentModificationException();
+ return e;
+ }
+
+ public E previous() {
+ int i = cursor - 1;
+ Object[] es = items;
+ if (es == null || i < 0 || i >= fence || i >= es.length)
+ throw new NoSuchElementException();
+ @SuppressWarnings("unchecked") E e = (E)es[i];
+ lastRet = i;
+ cursor = i;
+ if (!lock.validate(seq))
+ throw new ConcurrentModificationException();
+ return e;
+ }
+
+ public void remove() {
+ int i = lastRet;
+ if (i < 0)
+ throw new IllegalStateException();
+ if ((seq = lock.tryConvertToWriteLock(seq)) == 0)
+ throw new ConcurrentModificationException();
+ try {
+ list.rawRemoveAt(i);
+ fence = origin + sublist.size;
+ cursor = i;
+ lastRet = -1;
+ } finally {
+ seq = lock.tryConvertToOptimisticRead(seq);
+ }
+ }
+
+ public void set(E e) {
+ int i = lastRet;
+ if (i < origin || i >= fence)
+ throw new IllegalStateException();
+ if ((seq = lock.tryConvertToWriteLock(seq)) == 0)
+ throw new ConcurrentModificationException();
+ try {
+ list.set(i, e);
+ } finally {
+ seq = lock.tryConvertToOptimisticRead(seq);
+ }
+ }
+
+ public void add(E e) {
+ int i = cursor;
+ if (i < origin || i >= fence)
+ throw new IllegalStateException();
+ if ((seq = lock.tryConvertToWriteLock(seq)) == 0)
+ throw new ConcurrentModificationException();
+ try {
+ list.rawAddAt(i, e);
+ items = list.array;
+ fence = origin + sublist.size;
+ cursor = i + 1;
+ lastRet = -1;
+ } finally {
+ seq = lock.tryConvertToOptimisticRead(seq);
+ }
+ }
+ }
+}
diff --git a/src/main/java/jsr166e/extra/SequenceLock.java b/src/main/java/jsr166e/extra/SequenceLock.java
new file mode 100644
index 0000000..ed2a8c8
--- /dev/null
+++ b/src/main/java/jsr166e/extra/SequenceLock.java
@@ -0,0 +1,639 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166e.extra;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.AbstractQueuedLongSynchronizer;
+import java.util.Collection;
+import java.io.ObjectOutputStream;
+import java.io.ObjectInputStream;
+import java.io.IOException;
+
+/**
+ * A reentrant mutual exclusion {@link Lock} in which each lock
+ * acquisition or release advances a sequence number. When the
+ * sequence number (accessible using {@link #getSequence()}) is odd,
+ * the lock is held. When it is even (i.e., ({@code lock.getSequence()
+ * & 1L) == 0L}), the lock is released. Method {@link
+ * #awaitAvailability} can be used to await availability of the lock,
+ * returning its current sequence number. Sequence numbers (as well as
+ * reentrant hold counts) are of type {@code long} to ensure that they
+ * will not wrap around until hundreds of years of use under current
+ * processor rates. A SequenceLock can be created with a specified
+ * number of spins. Attempts to acquire the lock in method {@link
+ * #lock} will retry at least the given number of times before
+ * blocking. If not specified, a default, possibly platform-specific,
+ * value is used.
+ *
+ * <p>Except for the lack of support for specified fairness policies,
+ * or {@link Condition} objects, a SequenceLock can be used in the
+ * same way as {@link ReentrantLock}. It provides similar status and
+ * monitoring methods, such as {@link #isHeldByCurrentThread}.
+ * SequenceLocks may be preferable in contexts in which multiple
+ * threads invoke short read-only methods much more frequently than
+ * fully locked methods.
+ *
+ * <p>Methods {@code awaitAvailability} and {@code getSequence} can
+ * be used together to define (partially) optimistic read-only methods
+ * that are usually more efficient than ReadWriteLocks when they
+ * apply. These methods should in general be structured as loops that
+ * await lock availability, then read {@code volatile} fields into
+ * local variables (and may further read other values derived from
+ * these, for example the {@code length} of a {@code volatile} array),
+ * and retry if the sequence number changed while doing so.
+ * Alternatively, because {@code awaitAvailability} accommodates
+ * reentrancy, a method can retry a bounded number of times before
+ * switching to locking mode. While conceptually straightforward,
+ * expressing these ideas can be verbose. For example:
+ *
+ * <pre> {@code
+ * class Point {
+ * private volatile double x, y;
+ * private final SequenceLock sl = new SequenceLock();
+ *
+ * // an exclusively locked method
+ * void move(double deltaX, double deltaY) {
+ * sl.lock();
+ * try {
+ * x += deltaX;
+ * y += deltaY;
+ * } finally {
+ * sl.unlock();
+ * }
+ * }
+ *
+ * // A read-only method
+ * double distanceFromOriginV1() {
+ * double currentX, currentY;
+ * long seq;
+ * do {
+ * seq = sl.awaitAvailability();
+ * currentX = x;
+ * currentY = y;
+ * } while (sl.getSequence() != seq); // retry if sequence changed
+ * return Math.sqrt(currentX * currentX + currentY * currentY);
+ * }
+ *
+ * // Uses bounded retries before locking
+ * double distanceFromOriginV2() {
+ * double currentX, currentY;
+ * long seq;
+ * int retries = RETRIES_BEFORE_LOCKING; // for example 8
+ * try {
+ * do {
+ * if (--retries < 0)
+ * sl.lock();
+ * seq = sl.awaitAvailability();
+ * currentX = x;
+ * currentY = y;
+ * } while (sl.getSequence() != seq);
+ * } finally {
+ * if (retries < 0)
+ * sl.unlock();
+ * }
+ * return Math.sqrt(currentX * currentX + currentY * currentY);
+ * }
+ * }}</pre>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class SequenceLock implements Lock, java.io.Serializable {
+ private static final long serialVersionUID = 7373984872572414699L;
+
+ static final class Sync extends AbstractQueuedLongSynchronizer {
+ private static final long serialVersionUID = 2540673546047039555L;
+
+ /**
+ * The number of times to spin in lock() and awaitAvailability().
+ */
+ final int spins;
+
+ /**
+ * The number of reentrant holds on this lock. Uses a long for
+ * compatibility with other AbstractQueuedLongSynchronizer
+ * operations. Accessed only by lock holder.
+ */
+ long holds;
+
+ Sync(int spins) { this.spins = spins; }
+
+ // overrides of AQLS methods
+
+ public final boolean isHeldExclusively() {
+ return (getState() & 1L) != 0L &&
+ getExclusiveOwnerThread() == Thread.currentThread();
+ }
+
+ public final boolean tryAcquire(long acquires) {
+ Thread current = Thread.currentThread();
+ long c = getState();
+ if ((c & 1L) == 0L) {
+ if (compareAndSetState(c, c + 1L)) {
+ holds = acquires;
+ setExclusiveOwnerThread(current);
+ return true;
+ }
+ }
+ else if (current == getExclusiveOwnerThread()) {
+ holds += acquires;
+ return true;
+ }
+ return false;
+ }
+
+ public final boolean tryRelease(long releases) {
+ if (Thread.currentThread() != getExclusiveOwnerThread())
+ throw new IllegalMonitorStateException();
+ if ((holds -= releases) == 0L) {
+ setExclusiveOwnerThread(null);
+ setState(getState() + 1L);
+ return true;
+ }
+ return false;
+ }
+
+ public final long tryAcquireShared(long unused) {
+ return (((getState() & 1L) == 0L) ? 1L :
+ (getExclusiveOwnerThread() == Thread.currentThread()) ? 0L:
+ -1L);
+ }
+
+ public final boolean tryReleaseShared(long unused) {
+ return (getState() & 1L) == 0L;
+ }
+
+ public final Condition newCondition() {
+ throw new UnsupportedOperationException();
+ }
+
+ // Other methods in support of SequenceLock
+
+ final long getSequence() {
+ return getState();
+ }
+
+ final void lock() {
+ int k = spins;
+ while (!tryAcquire(1L)) {
+ if (k == 0) {
+ acquire(1L);
+ break;
+ }
+ --k;
+ }
+ }
+
+ final long awaitAvailability() {
+ long s;
+ while (((s = getState()) & 1L) != 0L &&
+ getExclusiveOwnerThread() != Thread.currentThread()) {
+ acquireShared(1L);
+ releaseShared(1L);
+ }
+ return s;
+ }
+
+ final long tryAwaitAvailability(long nanos)
+ throws InterruptedException, TimeoutException {
+ Thread current = Thread.currentThread();
+ for (;;) {
+ long s = getState();
+ if ((s & 1L) == 0L || getExclusiveOwnerThread() == current) {
+ releaseShared(1L);
+ return s;
+ }
+ if (!tryAcquireSharedNanos(1L, nanos))
+ throw new TimeoutException();
+ // since tryAcquireSharedNanos doesn't return seq
+ // retry with minimal wait time.
+ nanos = 1L;
+ }
+ }
+
+ final boolean isLocked() {
+ return (getState() & 1L) != 0L;
+ }
+
+ final Thread getOwner() {
+ return (getState() & 1L) == 0L ? null : getExclusiveOwnerThread();
+ }
+
+ final long getHoldCount() {
+ return isHeldExclusively() ? holds : 0;
+ }
+
+ private void readObject(ObjectInputStream s)
+ throws IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ holds = 0L;
+ setState(0L); // reset to unlocked state
+ }
+ }
+
+ private final Sync sync;
+
+ /**
+ * The default spin value for constructor. Future versions of this
+ * class might choose platform-specific values. Currently, except
+ * on uniprocessors, it is set to a small value that overcomes near
+ * misses between releases and acquires.
+ */
+ static final int DEFAULT_SPINS =
+ Runtime.getRuntime().availableProcessors() > 1 ? 64 : 0;
+
+ /**
+ * Creates an instance of {@code SequenceLock} with the default
+ * number of retry attempts to acquire the lock before blocking.
+ */
+ public SequenceLock() { sync = new Sync(DEFAULT_SPINS); }
+
+ /**
+ * Creates an instance of {@code SequenceLock} that will retry
+ * attempts to acquire the lock at least the given number of times
+ * before blocking.
+ *
+ * @param spins the number of times before blocking
+ */
+ public SequenceLock(int spins) { sync = new Sync(spins); }
+
+ /**
+ * Returns the current sequence number of this lock. The sequence
+ * number is advanced upon each acquire or release action. When
+ * this value is odd, the lock is held; when even, it is released.
+ *
+ * @return the current sequence number
+ */
+ public long getSequence() { return sync.getSequence(); }
+
+ /**
+ * Returns the current sequence number when the lock is, or
+ * becomes, available. A lock is available if it is either
+ * released, or is held by the current thread. If the lock is not
+ * available, the current thread becomes disabled for thread
+ * scheduling purposes and lies dormant until the lock has been
+ * released by some other thread.
+ *
+ * @return the current sequence number
+ */
+ public long awaitAvailability() { return sync.awaitAvailability(); }
+
+ /**
+ * Returns the current sequence number if the lock is, or
+ * becomes, available within the specified waiting time.
+ *
+ * <p>If the lock is not available, the current thread becomes
+ * disabled for thread scheduling purposes and lies dormant until
+ * one of three things happens:
+ *
+ * <ul>
+ *
+ * <li>The lock becomes available, in which case the current
+ * sequence number is returned.
+ *
+ * <li>Some other thread {@linkplain Thread#interrupt interrupts}
+ * the current thread, in which case this method throws
+ * {@link InterruptedException}.
+ *
+ * <li>The specified waiting time elapses, in which case
+ * this method throws {@link TimeoutException}.
+ *
+ * </ul>
+ *
+ * @param timeout the time to wait for availability
+ * @param unit the time unit of the timeout argument
+ * @return the current sequence number if the lock is available
+ * upon return from this method
+ * @throws InterruptedException if the current thread is interrupted
+ * @throws TimeoutException if the lock was not available within
+ * the specified waiting time
+ * @throws NullPointerException if the time unit is null
+ */
+ public long tryAwaitAvailability(long timeout, TimeUnit unit)
+ throws InterruptedException, TimeoutException {
+ return sync.tryAwaitAvailability(unit.toNanos(timeout));
+ }
+
+ /**
+ * Acquires the lock.
+ *
+ * <p>If the current thread already holds this lock then the hold count
+ * is incremented by one and the method returns immediately without
+ * incrementing the sequence number.
+ *
+ * <p>If this lock not held by another thread, this method
+ * increments the sequence number (which thus becomes an odd
+ * number), sets the lock hold count to one, and returns
+ * immediately.
+ *
+ * <p>If the lock is held by another thread then the current
+ * thread may retry acquiring this lock, depending on the {@code
+ * spin} count established in constructor. If the lock is still
+ * not acquired, the current thread becomes disabled for thread
+ * scheduling purposes and lies dormant until enabled by
+ * some other thread releasing the lock.
+ */
+ public void lock() { sync.lock(); }
+
+ /**
+ * Acquires the lock unless the current thread is
+ * {@linkplain Thread#interrupt interrupted}.
+ *
+ * <p>If the current thread already holds this lock then the hold count
+ * is incremented by one and the method returns immediately without
+ * incrementing the sequence number.
+ *
+ * <p>If this lock not held by another thread, this method
+ * increments the sequence number (which thus becomes an odd
+ * number), sets the lock hold count to one, and returns
+ * immediately.
+ *
+ * <p>If the lock is held by another thread then the current
+ * thread may retry acquiring this lock, depending on the {@code
+ * spin} count established in constructor. If the lock is still
+ * not acquired, the current thread becomes disabled for thread
+ * scheduling purposes and lies dormant until one of two things
+ * happens:
+ *
+ * <ul>
+ *
+ * <li>The lock is acquired by the current thread; or
+ *
+ * <li>Some other thread {@linkplain Thread#interrupt interrupts} the
+ * current thread.
+ *
+ * </ul>
+ *
+ * <p>If the lock is acquired by the current thread then the lock hold
+ * count is set to one and the sequence number is incremented.
+ *
+ * <p>If the current thread:
+ *
+ * <ul>
+ *
+ * <li>has its interrupted status set on entry to this method; or
+ *
+ * <li>is {@linkplain Thread#interrupt interrupted} while acquiring
+ * the lock,
+ *
+ * </ul>
+ *
+ * then {@link InterruptedException} is thrown and the current thread's
+ * interrupted status is cleared.
+ *
+ * <p>In this implementation, as this method is an explicit
+ * interruption point, preference is given to responding to the
+ * interrupt over normal or reentrant acquisition of the lock.
+ *
+ * @throws InterruptedException if the current thread is interrupted
+ */
+ public void lockInterruptibly() throws InterruptedException {
+ sync.acquireInterruptibly(1L);
+ }
+
+ /**
+ * Acquires the lock only if it is not held by another thread at the time
+ * of invocation.
+ *
+ * <p>If the current thread already holds this lock then the hold
+ * count is incremented by one and the method returns {@code true}
+ * without incrementing the sequence number.
+ *
+ * <p>If this lock not held by another thread, this method
+ * increments the sequence number (which thus becomes an odd
+ * number), sets the lock hold count to one, and returns {@code
+ * true}.
+ *
+ * <p>If the lock is held by another thread then this method
+ * returns {@code false}.
+ *
+ * @return {@code true} if the lock was free and was acquired by the
+ * current thread, or the lock was already held by the current
+ * thread; and {@code false} otherwise
+ */
+ public boolean tryLock() { return sync.tryAcquire(1L); }
+
+ /**
+ * Acquires the lock if it is not held by another thread within the given
+ * waiting time and the current thread has not been
+ * {@linkplain Thread#interrupt interrupted}.
+ *
+ * <p>If the current thread already holds this lock then the hold count
+ * is incremented by one and the method returns immediately without
+ * incrementing the sequence number.
+ *
+ * <p>If this lock not held by another thread, this method
+ * increments the sequence number (which thus becomes an odd
+ * number), sets the lock hold count to one, and returns
+ * immediately.
+ *
+ * <p>If the lock is held by another thread then the current
+ * thread may retry acquiring this lock, depending on the {@code
+ * spin} count established in constructor. If the lock is still
+ * not acquired, the current thread becomes disabled for thread
+ * scheduling purposes and lies dormant until one of three things
+ * happens:
+ *
+ * <ul>
+ *
+ * <li>The lock is acquired by the current thread; or
+ *
+ * <li>Some other thread {@linkplain Thread#interrupt interrupts}
+ * the current thread; or
+ *
+ * <li>The specified waiting time elapses
+ *
+ * </ul>
+ *
+ * <p>If the lock is acquired then the value {@code true} is returned and
+ * the lock hold count is set to one.
+ *
+ * <p>If the current thread:
+ *
+ * <ul>
+ *
+ * <li>has its interrupted status set on entry to this method; or
+ *
+ * <li>is {@linkplain Thread#interrupt interrupted} while
+ * acquiring the lock,
+ *
+ * </ul>
+ * then {@link InterruptedException} is thrown and the current thread's
+ * interrupted status is cleared.
+ *
+ * <p>If the specified waiting time elapses then the value {@code false}
+ * is returned. If the time is less than or equal to zero, the method
+ * will not wait at all.
+ *
+ * <p>In this implementation, as this method is an explicit
+ * interruption point, preference is given to responding to the
+ * interrupt over normal or reentrant acquisition of the lock, and
+ * over reporting the elapse of the waiting time.
+ *
+ * @param timeout the time to wait for the lock
+ * @param unit the time unit of the timeout argument
+ * @return {@code true} if the lock was free and was acquired by the
+ * current thread, or the lock was already held by the current
+ * thread; and {@code false} if the waiting time elapsed before
+ * the lock could be acquired
+ * @throws InterruptedException if the current thread is interrupted
+ * @throws NullPointerException if the time unit is null
+ */
+ public boolean tryLock(long timeout, TimeUnit unit)
+ throws InterruptedException {
+ return sync.tryAcquireNanos(1L, unit.toNanos(timeout));
+ }
+
+ /**
+ * Attempts to release this lock.
+ *
+ * <p>If the current thread is the holder of this lock then the
+ * hold count is decremented. If the hold count is now zero then
+ * the sequence number is incremented (thus becoming an even
+ * number) and the lock is released. If the current thread is not
+ * the holder of this lock then {@link
+ * IllegalMonitorStateException} is thrown.
+ *
+ * @throws IllegalMonitorStateException if the current thread does not
+ * hold this lock
+ */
+ public void unlock() { sync.release(1); }
+
+ /**
+ * Throws UnsupportedOperationException. SequenceLocks
+ * do not support Condition objects.
+ *
+ * @throws UnsupportedOperationException always
+ */
+ public Condition newCondition() {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Queries the number of holds on this lock by the current thread.
+ *
+ * <p>A thread has a hold on a lock for each lock action that is not
+ * matched by an unlock action.
+ *
+ * <p>The hold count information is typically only used for testing and
+ * debugging purposes.
+ *
+ * @return the number of holds on this lock by the current thread,
+ * or zero if this lock is not held by the current thread
+ */
+ public long getHoldCount() { return sync.getHoldCount(); }
+
+ /**
+ * Queries if this lock is held by the current thread.
+ *
+ * @return {@code true} if current thread holds this lock and
+ * {@code false} otherwise
+ */
+ public boolean isHeldByCurrentThread() { return sync.isHeldExclusively(); }
+
+ /**
+ * Queries if this lock is held by any thread. This method is
+ * designed for use in monitoring of the system state,
+ * not for synchronization control.
+ *
+ * @return {@code true} if any thread holds this lock and
+ * {@code false} otherwise
+ */
+ public boolean isLocked() { return sync.isLocked(); }
+
+ /**
+ * Returns the thread that currently owns this lock, or
+ * {@code null} if not owned. When this method is called by a
+ * thread that is not the owner, the return value reflects a
+ * best-effort approximation of current lock status. For example,
+ * the owner may be momentarily {@code null} even if there are
+ * threads trying to acquire the lock but have not yet done so.
+ * This method is designed to facilitate construction of
+ * subclasses that provide more extensive lock monitoring
+ * facilities.
+ *
+ * @return the owner, or {@code null} if not owned
+ */
+ protected Thread getOwner() { return sync.getOwner(); }
+
+ /**
+ * Queries whether any threads are waiting to acquire this lock. Note that
+ * because cancellations may occur at any time, a {@code true}
+ * return does not guarantee that any other thread will ever
+ * acquire this lock. This method is designed primarily for use in
+ * monitoring of the system state.
+ *
+ * @return {@code true} if there may be other threads waiting to
+ * acquire the lock
+ */
+ public final boolean hasQueuedThreads() {
+ return sync.hasQueuedThreads();
+ }
+
+ /**
+ * Queries whether the given thread is waiting to acquire this
+ * lock. Note that because cancellations may occur at any time, a
+ * {@code true} return does not guarantee that this thread
+ * will ever acquire this lock. This method is designed primarily for use
+ * in monitoring of the system state.
+ *
+ * @param thread the thread
+ * @return {@code true} if the given thread is queued waiting for this lock
+ * @throws NullPointerException if the thread is null
+ */
+ public final boolean hasQueuedThread(Thread thread) {
+ return sync.isQueued(thread);
+ }
+
+ /**
+ * Returns an estimate of the number of threads waiting to
+ * acquire this lock. The value is only an estimate because the number of
+ * threads may change dynamically while this method traverses
+ * internal data structures. This method is designed for use in
+ * monitoring of the system state, not for synchronization
+ * control.
+ *
+ * @return the estimated number of threads waiting for this lock
+ */
+ public final int getQueueLength() {
+ return sync.getQueueLength();
+ }
+
+ /**
+ * Returns a collection containing threads that may be waiting to
+ * acquire this lock. Because the actual set of threads may change
+ * dynamically while constructing this result, the returned
+ * collection is only a best-effort estimate. The elements of the
+ * returned collection are in no particular order. This method is
+ * designed to facilitate construction of subclasses that provide
+ * more extensive monitoring facilities.
+ *
+ * @return the collection of threads
+ */
+ protected Collection<Thread> getQueuedThreads() {
+ return sync.getQueuedThreads();
+ }
+
+ /**
+ * Returns a string identifying this lock, as well as its lock state.
+ * The state, in brackets, includes either the String {@code "Unlocked"}
+ * or the String {@code "Locked by"} followed by the
+ * {@linkplain Thread#getName name} of the owning thread.
+ *
+ * @return a string identifying this lock, as well as its lock state
+ */
+ public String toString() {
+ Thread o = sync.getOwner();
+ return super.toString() + ((o == null) ?
+ "[Unlocked]" :
+ "[Locked by thread " + o.getName() + "]");
+ }
+
+}
diff --git a/src/main/java/jsr166y/ConcurrentLinkedDeque.java b/src/main/java/jsr166y/ConcurrentLinkedDeque.java
new file mode 100644
index 0000000..d17bfd9
--- /dev/null
+++ b/src/main/java/jsr166y/ConcurrentLinkedDeque.java
@@ -0,0 +1,1468 @@
+/*
+ * Written by Doug Lea and Martin Buchholz with assistance from members of
+ * JCP JSR-166 Expert Group and released to the public domain, as explained
+ * at http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+import java.util.AbstractCollection;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+
+/**
+ * An unbounded concurrent {@linkplain Deque deque} based on linked nodes.
+ * Concurrent insertion, removal, and access operations execute safely
+ * across multiple threads.
+ * A {@code ConcurrentLinkedDeque} is an appropriate choice when
+ * many threads will share access to a common collection.
+ * Like most other concurrent collection implementations, this class
+ * does not permit the use of {@code null} elements.
+ *
+ * <p>Iterators are <i>weakly consistent</i>, returning elements
+ * reflecting the state of the deque at some point at or since the
+ * creation of the iterator. They do <em>not</em> throw {@link
+ * java.util.ConcurrentModificationException
+ * ConcurrentModificationException}, and may proceed concurrently with
+ * other operations.
+ *
+ * <p>Beware that, unlike in most collections, the {@code size} method
+ * is <em>NOT</em> a constant-time operation. Because of the
+ * asynchronous nature of these deques, determining the current number
+ * of elements requires a traversal of the elements, and so may report
+ * inaccurate results if this collection is modified during traversal.
+ * Additionally, the bulk operations {@code addAll},
+ * {@code removeAll}, {@code retainAll}, {@code containsAll},
+ * {@code equals}, and {@code toArray} are <em>not</em> guaranteed
+ * to be performed atomically. For example, an iterator operating
+ * concurrently with an {@code addAll} operation might view only some
+ * of the added elements.
+ *
+ * <p>This class and its iterator implement all of the <em>optional</em>
+ * methods of the {@link Deque} and {@link Iterator} interfaces.
+ *
+ * <p>Memory consistency effects: As with other concurrent collections,
+ * actions in a thread prior to placing an object into a
+ * {@code ConcurrentLinkedDeque}
+ * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
+ * actions subsequent to the access or removal of that element from
+ * the {@code ConcurrentLinkedDeque} in another thread.
+ *
+ * <p>This class is a member of the
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
+ * Java Collections Framework</a>.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ * @author Martin Buchholz
+ * @param <E> the type of elements held in this collection
+ */
+public class ConcurrentLinkedDeque<E>
+ extends AbstractCollection<E>
+ implements Deque<E>, java.io.Serializable {
+
+ /*
+ * This is an implementation of a concurrent lock-free deque
+ * supporting interior removes but not interior insertions, as
+ * required to support the entire Deque interface.
+ *
+ * We extend the techniques developed for ConcurrentLinkedQueue and
+ * LinkedTransferQueue (see the internal docs for those classes).
+ * Understanding the ConcurrentLinkedQueue implementation is a
+ * prerequisite for understanding the implementation of this class.
+ *
+ * The data structure is a symmetrical doubly-linked "GC-robust"
+ * linked list of nodes. We minimize the number of volatile writes
+ * using two techniques: advancing multiple hops with a single CAS
+ * and mixing volatile and non-volatile writes of the same memory
+ * locations.
+ *
+ * A node contains the expected E ("item") and links to predecessor
+ * ("prev") and successor ("next") nodes:
+ *
+ * class Node<E> { volatile Node<E> prev, next; volatile E item; }
+ *
+ * A node p is considered "live" if it contains a non-null item
+ * (p.item != null). When an item is CASed to null, the item is
+ * atomically logically deleted from the collection.
+ *
+ * At any time, there is precisely one "first" node with a null
+ * prev reference that terminates any chain of prev references
+ * starting at a live node. Similarly there is precisely one
+ * "last" node terminating any chain of next references starting at
+ * a live node. The "first" and "last" nodes may or may not be live.
+ * The "first" and "last" nodes are always mutually reachable.
+ *
+ * A new element is added atomically by CASing the null prev or
+ * next reference in the first or last node to a fresh node
+ * containing the element. The element's node atomically becomes
+ * "live" at that point.
+ *
+ * A node is considered "active" if it is a live node, or the
+ * first or last node. Active nodes cannot be unlinked.
+ *
+ * A "self-link" is a next or prev reference that is the same node:
+ * p.prev == p or p.next == p
+ * Self-links are used in the node unlinking process. Active nodes
+ * never have self-links.
+ *
+ * A node p is active if and only if:
+ *
+ * p.item != null ||
+ * (p.prev == null && p.next != p) ||
+ * (p.next == null && p.prev != p)
+ *
+ * The deque object has two node references, "head" and "tail".
+ * The head and tail are only approximations to the first and last
+ * nodes of the deque. The first node can always be found by
+ * following prev pointers from head; likewise for tail. However,
+ * it is permissible for head and tail to be referring to deleted
+ * nodes that have been unlinked and so may not be reachable from
+ * any live node.
+ *
+ * There are 3 stages of node deletion;
+ * "logical deletion", "unlinking", and "gc-unlinking".
+ *
+ * 1. "logical deletion" by CASing item to null atomically removes
+ * the element from the collection, and makes the containing node
+ * eligible for unlinking.
+ *
+ * 2. "unlinking" makes a deleted node unreachable from active
+ * nodes, and thus eventually reclaimable by GC. Unlinked nodes
+ * may remain reachable indefinitely from an iterator.
+ *
+ * Physical node unlinking is merely an optimization (albeit a
+ * critical one), and so can be performed at our convenience. At
+ * any time, the set of live nodes maintained by prev and next
+ * links are identical, that is, the live nodes found via next
+ * links from the first node is equal to the elements found via
+ * prev links from the last node. However, this is not true for
+ * nodes that have already been logically deleted - such nodes may
+ * be reachable in one direction only.
+ *
+ * 3. "gc-unlinking" takes unlinking further by making active
+ * nodes unreachable from deleted nodes, making it easier for the
+ * GC to reclaim future deleted nodes. This step makes the data
+ * structure "gc-robust", as first described in detail by Boehm
+ * (http://portal.acm.org/citation.cfm?doid=503272.503282).
+ *
+ * GC-unlinked nodes may remain reachable indefinitely from an
+ * iterator, but unlike unlinked nodes, are never reachable from
+ * head or tail.
+ *
+ * Making the data structure GC-robust will eliminate the risk of
+ * unbounded memory retention with conservative GCs and is likely
+ * to improve performance with generational GCs.
+ *
+ * When a node is dequeued at either end, e.g. via poll(), we would
+ * like to break any references from the node to active nodes. We
+ * develop further the use of self-links that was very effective in
+ * other concurrent collection classes. The idea is to replace
+ * prev and next pointers with special values that are interpreted
+ * to mean off-the-list-at-one-end. These are approximations, but
+ * good enough to preserve the properties we want in our
+ * traversals, e.g. we guarantee that a traversal will never visit
+ * the same element twice, but we don't guarantee whether a
+ * traversal that runs out of elements will be able to see more
+ * elements later after enqueues at that end. Doing gc-unlinking
+ * safely is particularly tricky, since any node can be in use
+ * indefinitely (for example by an iterator). We must ensure that
+ * the nodes pointed at by head/tail never get gc-unlinked, since
+ * head/tail are needed to get "back on track" by other nodes that
+ * are gc-unlinked. gc-unlinking accounts for much of the
+ * implementation complexity.
+ *
+ * Since neither unlinking nor gc-unlinking are necessary for
+ * correctness, there are many implementation choices regarding
+ * frequency (eagerness) of these operations. Since volatile
+ * reads are likely to be much cheaper than CASes, saving CASes by
+ * unlinking multiple adjacent nodes at a time may be a win.
+ * gc-unlinking can be performed rarely and still be effective,
+ * since it is most important that long chains of deleted nodes
+ * are occasionally broken.
+ *
+ * The actual representation we use is that p.next == p means to
+ * goto the first node (which in turn is reached by following prev
+ * pointers from head), and p.next == null && p.prev == p means
+ * that the iteration is at an end and that p is a (static final)
+ * dummy node, NEXT_TERMINATOR, and not the last active node.
+ * Finishing the iteration when encountering such a TERMINATOR is
+ * good enough for read-only traversals, so such traversals can use
+ * p.next == null as the termination condition. When we need to
+ * find the last (active) node, for enqueueing a new node, we need
+ * to check whether we have reached a TERMINATOR node; if so,
+ * restart traversal from tail.
+ *
+ * The implementation is completely directionally symmetrical,
+ * except that most public methods that iterate through the list
+ * follow next pointers ("forward" direction).
+ *
+ * We believe (without full proof) that all single-element deque
+ * operations (e.g., addFirst, peekLast, pollLast) are linearizable
+ * (see Herlihy and Shavit's book). However, some combinations of
+ * operations are known not to be linearizable. In particular,
+ * when an addFirst(A) is racing with pollFirst() removing B, it is
+ * possible for an observer iterating over the elements to observe
+ * A B C and subsequently observe A C, even though no interior
+ * removes are ever performed. Nevertheless, iterators behave
+ * reasonably, providing the "weakly consistent" guarantees.
+ *
+ * Empirically, microbenchmarks suggest that this class adds about
+ * 40% overhead relative to ConcurrentLinkedQueue, which feels as
+ * good as we can hope for.
+ */
+
+ private static final long serialVersionUID = 876323262645176354L;
+
+ /**
+ * A node from which the first node on list (that is, the unique node p
+ * with p.prev == null && p.next != p) can be reached in O(1) time.
+ * Invariants:
+ * - the first node is always O(1) reachable from head via prev links
+ * - all live nodes are reachable from the first node via succ()
+ * - head != null
+ * - (tmp = head).next != tmp || tmp != head
+ * - head is never gc-unlinked (but may be unlinked)
+ * Non-invariants:
+ * - head.item may or may not be null
+ * - head may not be reachable from the first or last node, or from tail
+ */
+ private transient volatile Node<E> head;
+
+ /**
+ * A node from which the last node on list (that is, the unique node p
+ * with p.next == null && p.prev != p) can be reached in O(1) time.
+ * Invariants:
+ * - the last node is always O(1) reachable from tail via next links
+ * - all live nodes are reachable from the last node via pred()
+ * - tail != null
+ * - tail is never gc-unlinked (but may be unlinked)
+ * Non-invariants:
+ * - tail.item may or may not be null
+ * - tail may not be reachable from the first or last node, or from head
+ */
+ private transient volatile Node<E> tail;
+
+ private static final Node<Object> PREV_TERMINATOR, NEXT_TERMINATOR;
+
+ @SuppressWarnings("unchecked")
+ Node<E> prevTerminator() {
+ return (Node<E>) PREV_TERMINATOR;
+ }
+
+ @SuppressWarnings("unchecked")
+ Node<E> nextTerminator() {
+ return (Node<E>) NEXT_TERMINATOR;
+ }
+
+ static final class Node<E> {
+ volatile Node<E> prev;
+ volatile E item;
+ volatile Node<E> next;
+
+ Node() { // default constructor for NEXT_TERMINATOR, PREV_TERMINATOR
+ }
+
+ /**
+ * Constructs a new node. Uses relaxed write because item can
+ * only be seen after publication via casNext or casPrev.
+ */
+ Node(E item) {
+ UNSAFE.putObject(this, itemOffset, item);
+ }
+
+ boolean casItem(E cmp, E val) {
+ return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
+ }
+
+ void lazySetNext(Node<E> val) {
+ UNSAFE.putOrderedObject(this, nextOffset, val);
+ }
+
+ boolean casNext(Node<E> cmp, Node<E> val) {
+ return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
+ }
+
+ void lazySetPrev(Node<E> val) {
+ UNSAFE.putOrderedObject(this, prevOffset, val);
+ }
+
+ boolean casPrev(Node<E> cmp, Node<E> val) {
+ return UNSAFE.compareAndSwapObject(this, prevOffset, cmp, val);
+ }
+
+ // Unsafe mechanics
+
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long prevOffset;
+ private static final long itemOffset;
+ private static final long nextOffset;
+
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = Node.class;
+ prevOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("prev"));
+ itemOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("item"));
+ nextOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("next"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+ }
+
+ /**
+ * Links e as first element.
+ */
+ private void linkFirst(E e) {
+ checkNotNull(e);
+ final Node<E> newNode = new Node<E>(e);
+
+ restartFromHead:
+ for (;;)
+ for (Node<E> h = head, p = h, q;;) {
+ if ((q = p.prev) != null &&
+ (q = (p = q).prev) != null)
+ // Check for head updates every other hop.
+ // If p == q, we are sure to follow head instead.
+ p = (h != (h = head)) ? h : q;
+ else if (p.next == p) // PREV_TERMINATOR
+ continue restartFromHead;
+ else {
+ // p is first node
+ newNode.lazySetNext(p); // CAS piggyback
+ if (p.casPrev(null, newNode)) {
+ // Successful CAS is the linearization point
+ // for e to become an element of this deque,
+ // and for newNode to become "live".
+ if (p != h) // hop two nodes at a time
+ casHead(h, newNode); // Failure is OK.
+ return;
+ }
+ // Lost CAS race to another thread; re-read prev
+ }
+ }
+ }
+
+ /**
+ * Links e as last element.
+ */
+ private void linkLast(E e) {
+ checkNotNull(e);
+ final Node<E> newNode = new Node<E>(e);
+
+ restartFromTail:
+ for (;;)
+ for (Node<E> t = tail, p = t, q;;) {
+ if ((q = p.next) != null &&
+ (q = (p = q).next) != null)
+ // Check for tail updates every other hop.
+ // If p == q, we are sure to follow tail instead.
+ p = (t != (t = tail)) ? t : q;
+ else if (p.prev == p) // NEXT_TERMINATOR
+ continue restartFromTail;
+ else {
+ // p is last node
+ newNode.lazySetPrev(p); // CAS piggyback
+ if (p.casNext(null, newNode)) {
+ // Successful CAS is the linearization point
+ // for e to become an element of this deque,
+ // and for newNode to become "live".
+ if (p != t) // hop two nodes at a time
+ casTail(t, newNode); // Failure is OK.
+ return;
+ }
+ // Lost CAS race to another thread; re-read next
+ }
+ }
+ }
+
+ private static final int HOPS = 2;
+
+ /**
+ * Unlinks non-null node x.
+ */
+ void unlink(Node<E> x) {
+ // assert x != null;
+ // assert x.item == null;
+ // assert x != PREV_TERMINATOR;
+ // assert x != NEXT_TERMINATOR;
+
+ final Node<E> prev = x.prev;
+ final Node<E> next = x.next;
+ if (prev == null) {
+ unlinkFirst(x, next);
+ } else if (next == null) {
+ unlinkLast(x, prev);
+ } else {
+ // Unlink interior node.
+ //
+ // This is the common case, since a series of polls at the
+ // same end will be "interior" removes, except perhaps for
+ // the first one, since end nodes cannot be unlinked.
+ //
+ // At any time, all active nodes are mutually reachable by
+ // following a sequence of either next or prev pointers.
+ //
+ // Our strategy is to find the unique active predecessor
+ // and successor of x. Try to fix up their links so that
+ // they point to each other, leaving x unreachable from
+ // active nodes. If successful, and if x has no live
+ // predecessor/successor, we additionally try to gc-unlink,
+ // leaving active nodes unreachable from x, by rechecking
+ // that the status of predecessor and successor are
+ // unchanged and ensuring that x is not reachable from
+ // tail/head, before setting x's prev/next links to their
+ // logical approximate replacements, self/TERMINATOR.
+ Node<E> activePred, activeSucc;
+ boolean isFirst, isLast;
+ int hops = 1;
+
+ // Find active predecessor
+ for (Node<E> p = prev; ; ++hops) {
+ if (p.item != null) {
+ activePred = p;
+ isFirst = false;
+ break;
+ }
+ Node<E> q = p.prev;
+ if (q == null) {
+ if (p.next == p)
+ return;
+ activePred = p;
+ isFirst = true;
+ break;
+ }
+ else if (p == q)
+ return;
+ else
+ p = q;
+ }
+
+ // Find active successor
+ for (Node<E> p = next; ; ++hops) {
+ if (p.item != null) {
+ activeSucc = p;
+ isLast = false;
+ break;
+ }
+ Node<E> q = p.next;
+ if (q == null) {
+ if (p.prev == p)
+ return;
+ activeSucc = p;
+ isLast = true;
+ break;
+ }
+ else if (p == q)
+ return;
+ else
+ p = q;
+ }
+
+ // TODO: better HOP heuristics
+ if (hops < HOPS
+ // always squeeze out interior deleted nodes
+ && (isFirst | isLast))
+ return;
+
+ // Squeeze out deleted nodes between activePred and
+ // activeSucc, including x.
+ skipDeletedSuccessors(activePred);
+ skipDeletedPredecessors(activeSucc);
+
+ // Try to gc-unlink, if possible
+ if ((isFirst | isLast) &&
+
+ // Recheck expected state of predecessor and successor
+ (activePred.next == activeSucc) &&
+ (activeSucc.prev == activePred) &&
+ (isFirst ? activePred.prev == null : activePred.item != null) &&
+ (isLast ? activeSucc.next == null : activeSucc.item != null)) {
+
+ updateHead(); // Ensure x is not reachable from head
+ updateTail(); // Ensure x is not reachable from tail
+
+ // Finally, actually gc-unlink
+ x.lazySetPrev(isFirst ? prevTerminator() : x);
+ x.lazySetNext(isLast ? nextTerminator() : x);
+ }
+ }
+ }
+
+ /**
+ * Unlinks non-null first node.
+ */
+ private void unlinkFirst(Node<E> first, Node<E> next) {
+ // assert first != null;
+ // assert next != null;
+ // assert first.item == null;
+ for (Node<E> o = null, p = next, q;;) {
+ if (p.item != null || (q = p.next) == null) {
+ if (o != null && p.prev != p && first.casNext(next, p)) {
+ skipDeletedPredecessors(p);
+ if (first.prev == null &&
+ (p.next == null || p.item != null) &&
+ p.prev == first) {
+
+ updateHead(); // Ensure o is not reachable from head
+ updateTail(); // Ensure o is not reachable from tail
+
+ // Finally, actually gc-unlink
+ o.lazySetNext(o);
+ o.lazySetPrev(prevTerminator());
+ }
+ }
+ return;
+ }
+ else if (p == q)
+ return;
+ else {
+ o = p;
+ p = q;
+ }
+ }
+ }
+
+ /**
+ * Unlinks non-null last node.
+ */
+ private void unlinkLast(Node<E> last, Node<E> prev) {
+ // assert last != null;
+ // assert prev != null;
+ // assert last.item == null;
+ for (Node<E> o = null, p = prev, q;;) {
+ if (p.item != null || (q = p.prev) == null) {
+ if (o != null && p.next != p && last.casPrev(prev, p)) {
+ skipDeletedSuccessors(p);
+ if (last.next == null &&
+ (p.prev == null || p.item != null) &&
+ p.next == last) {
+
+ updateHead(); // Ensure o is not reachable from head
+ updateTail(); // Ensure o is not reachable from tail
+
+ // Finally, actually gc-unlink
+ o.lazySetPrev(o);
+ o.lazySetNext(nextTerminator());
+ }
+ }
+ return;
+ }
+ else if (p == q)
+ return;
+ else {
+ o = p;
+ p = q;
+ }
+ }
+ }
+
+ /**
+ * Guarantees that any node which was unlinked before a call to
+ * this method will be unreachable from head after it returns.
+ * Does not guarantee to eliminate slack, only that head will
+ * point to a node that was active while this method was running.
+ */
+ private final void updateHead() {
+ // Either head already points to an active node, or we keep
+ // trying to cas it to the first node until it does.
+ Node<E> h, p, q;
+ restartFromHead:
+ while ((h = head).item == null && (p = h.prev) != null) {
+ for (;;) {
+ if ((q = p.prev) == null ||
+ (q = (p = q).prev) == null) {
+ // It is possible that p is PREV_TERMINATOR,
+ // but if so, the CAS is guaranteed to fail.
+ if (casHead(h, p))
+ return;
+ else
+ continue restartFromHead;
+ }
+ else if (h != head)
+ continue restartFromHead;
+ else
+ p = q;
+ }
+ }
+ }
+
+ /**
+ * Guarantees that any node which was unlinked before a call to
+ * this method will be unreachable from tail after it returns.
+ * Does not guarantee to eliminate slack, only that tail will
+ * point to a node that was active while this method was running.
+ */
+ private final void updateTail() {
+ // Either tail already points to an active node, or we keep
+ // trying to cas it to the last node until it does.
+ Node<E> t, p, q;
+ restartFromTail:
+ while ((t = tail).item == null && (p = t.next) != null) {
+ for (;;) {
+ if ((q = p.next) == null ||
+ (q = (p = q).next) == null) {
+ // It is possible that p is NEXT_TERMINATOR,
+ // but if so, the CAS is guaranteed to fail.
+ if (casTail(t, p))
+ return;
+ else
+ continue restartFromTail;
+ }
+ else if (t != tail)
+ continue restartFromTail;
+ else
+ p = q;
+ }
+ }
+ }
+
+ private void skipDeletedPredecessors(Node<E> x) {
+ whileActive:
+ do {
+ Node<E> prev = x.prev;
+ // assert prev != null;
+ // assert x != NEXT_TERMINATOR;
+ // assert x != PREV_TERMINATOR;
+ Node<E> p = prev;
+ findActive:
+ for (;;) {
+ if (p.item != null)
+ break findActive;
+ Node<E> q = p.prev;
+ if (q == null) {
+ if (p.next == p)
+ continue whileActive;
+ break findActive;
+ }
+ else if (p == q)
+ continue whileActive;
+ else
+ p = q;
+ }
+
+ // found active CAS target
+ if (prev == p || x.casPrev(prev, p))
+ return;
+
+ } while (x.item != null || x.next == null);
+ }
+
+ private void skipDeletedSuccessors(Node<E> x) {
+ whileActive:
+ do {
+ Node<E> next = x.next;
+ // assert next != null;
+ // assert x != NEXT_TERMINATOR;
+ // assert x != PREV_TERMINATOR;
+ Node<E> p = next;
+ findActive:
+ for (;;) {
+ if (p.item != null)
+ break findActive;
+ Node<E> q = p.next;
+ if (q == null) {
+ if (p.prev == p)
+ continue whileActive;
+ break findActive;
+ }
+ else if (p == q)
+ continue whileActive;
+ else
+ p = q;
+ }
+
+ // found active CAS target
+ if (next == p || x.casNext(next, p))
+ return;
+
+ } while (x.item != null || x.prev == null);
+ }
+
+ /**
+ * Returns the successor of p, or the first node if p.next has been
+ * linked to self, which will only be true if traversing with a
+ * stale pointer that is now off the list.
+ */
+ final Node<E> succ(Node<E> p) {
+ // TODO: should we skip deleted nodes here?
+ Node<E> q = p.next;
+ return (p == q) ? first() : q;
+ }
+
+ /**
+ * Returns the predecessor of p, or the last node if p.prev has been
+ * linked to self, which will only be true if traversing with a
+ * stale pointer that is now off the list.
+ */
+ final Node<E> pred(Node<E> p) {
+ Node<E> q = p.prev;
+ return (p == q) ? last() : q;
+ }
+
+ /**
+ * Returns the first node, the unique node p for which:
+ * p.prev == null && p.next != p
+ * The returned node may or may not be logically deleted.
+ * Guarantees that head is set to the returned node.
+ */
+ Node<E> first() {
+ restartFromHead:
+ for (;;)
+ for (Node<E> h = head, p = h, q;;) {
+ if ((q = p.prev) != null &&
+ (q = (p = q).prev) != null)
+ // Check for head updates every other hop.
+ // If p == q, we are sure to follow head instead.
+ p = (h != (h = head)) ? h : q;
+ else if (p == h
+ // It is possible that p is PREV_TERMINATOR,
+ // but if so, the CAS is guaranteed to fail.
+ || casHead(h, p))
+ return p;
+ else
+ continue restartFromHead;
+ }
+ }
+
+ /**
+ * Returns the last node, the unique node p for which:
+ * p.next == null && p.prev != p
+ * The returned node may or may not be logically deleted.
+ * Guarantees that tail is set to the returned node.
+ */
+ Node<E> last() {
+ restartFromTail:
+ for (;;)
+ for (Node<E> t = tail, p = t, q;;) {
+ if ((q = p.next) != null &&
+ (q = (p = q).next) != null)
+ // Check for tail updates every other hop.
+ // If p == q, we are sure to follow tail instead.
+ p = (t != (t = tail)) ? t : q;
+ else if (p == t
+ // It is possible that p is NEXT_TERMINATOR,
+ // but if so, the CAS is guaranteed to fail.
+ || casTail(t, p))
+ return p;
+ else
+ continue restartFromTail;
+ }
+ }
+
+ // Minor convenience utilities
+
+ /**
+ * Throws NullPointerException if argument is null.
+ *
+ * @param v the element
+ */
+ private static void checkNotNull(Object v) {
+ if (v == null)
+ throw new NullPointerException();
+ }
+
+ /**
+ * Returns element unless it is null, in which case throws
+ * NoSuchElementException.
+ *
+ * @param v the element
+ * @return the element
+ */
+ private E screenNullResult(E v) {
+ if (v == null)
+ throw new NoSuchElementException();
+ return v;
+ }
+
+ /**
+ * Creates an array list and fills it with elements of this list.
+ * Used by toArray.
+ *
+ * @return the array list
+ */
+ private ArrayList<E> toArrayList() {
+ ArrayList<E> list = new ArrayList<E>();
+ for (Node<E> p = first(); p != null; p = succ(p)) {
+ E item = p.item;
+ if (item != null)
+ list.add(item);
+ }
+ return list;
+ }
+
+ /**
+ * Constructs an empty deque.
+ */
+ public ConcurrentLinkedDeque() {
+ head = tail = new Node<E>(null);
+ }
+
+ /**
+ * Constructs a deque initially containing the elements of
+ * the given collection, added in traversal order of the
+ * collection's iterator.
+ *
+ * @param c the collection of elements to initially contain
+ * @throws NullPointerException if the specified collection or any
+ * of its elements are null
+ */
+ public ConcurrentLinkedDeque(Collection<? extends E> c) {
+ // Copy c into a private chain of Nodes
+ Node<E> h = null, t = null;
+ for (E e : c) {
+ checkNotNull(e);
+ Node<E> newNode = new Node<E>(e);
+ if (h == null)
+ h = t = newNode;
+ else {
+ t.lazySetNext(newNode);
+ newNode.lazySetPrev(t);
+ t = newNode;
+ }
+ }
+ initHeadTail(h, t);
+ }
+
+ /**
+ * Initializes head and tail, ensuring invariants hold.
+ */
+ private void initHeadTail(Node<E> h, Node<E> t) {
+ if (h == t) {
+ if (h == null)
+ h = t = new Node<E>(null);
+ else {
+ // Avoid edge case of a single Node with non-null item.
+ Node<E> newNode = new Node<E>(null);
+ t.lazySetNext(newNode);
+ newNode.lazySetPrev(t);
+ t = newNode;
+ }
+ }
+ head = h;
+ tail = t;
+ }
+
+ /**
+ * Inserts the specified element at the front of this deque.
+ * As the deque is unbounded, this method will never throw
+ * {@link IllegalStateException}.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public void addFirst(E e) {
+ linkFirst(e);
+ }
+
+ /**
+ * Inserts the specified element at the end of this deque.
+ * As the deque is unbounded, this method will never throw
+ * {@link IllegalStateException}.
+ *
+ * <p>This method is equivalent to {@link #add}.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public void addLast(E e) {
+ linkLast(e);
+ }
+
+ /**
+ * Inserts the specified element at the front of this deque.
+ * As the deque is unbounded, this method will never return {@code false}.
+ *
+ * @return {@code true} (as specified by {@link Deque#offerFirst})
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean offerFirst(E e) {
+ linkFirst(e);
+ return true;
+ }
+
+ /**
+ * Inserts the specified element at the end of this deque.
+ * As the deque is unbounded, this method will never return {@code false}.
+ *
+ * <p>This method is equivalent to {@link #add}.
+ *
+ * @return {@code true} (as specified by {@link Deque#offerLast})
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean offerLast(E e) {
+ linkLast(e);
+ return true;
+ }
+
+ public E peekFirst() {
+ for (Node<E> p = first(); p != null; p = succ(p)) {
+ E item = p.item;
+ if (item != null)
+ return item;
+ }
+ return null;
+ }
+
+ public E peekLast() {
+ for (Node<E> p = last(); p != null; p = pred(p)) {
+ E item = p.item;
+ if (item != null)
+ return item;
+ }
+ return null;
+ }
+
+ /**
+ * @throws NoSuchElementException {@inheritDoc}
+ */
+ public E getFirst() {
+ return screenNullResult(peekFirst());
+ }
+
+ /**
+ * @throws NoSuchElementException {@inheritDoc}
+ */
+ public E getLast() {
+ return screenNullResult(peekLast());
+ }
+
+ public E pollFirst() {
+ for (Node<E> p = first(); p != null; p = succ(p)) {
+ E item = p.item;
+ if (item != null && p.casItem(item, null)) {
+ unlink(p);
+ return item;
+ }
+ }
+ return null;
+ }
+
+ public E pollLast() {
+ for (Node<E> p = last(); p != null; p = pred(p)) {
+ E item = p.item;
+ if (item != null && p.casItem(item, null)) {
+ unlink(p);
+ return item;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * @throws NoSuchElementException {@inheritDoc}
+ */
+ public E removeFirst() {
+ return screenNullResult(pollFirst());
+ }
+
+ /**
+ * @throws NoSuchElementException {@inheritDoc}
+ */
+ public E removeLast() {
+ return screenNullResult(pollLast());
+ }
+
+ // *** Queue and stack methods ***
+
+ /**
+ * Inserts the specified element at the tail of this deque.
+ * As the deque is unbounded, this method will never return {@code false}.
+ *
+ * @return {@code true} (as specified by {@link Queue#offer})
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean offer(E e) {
+ return offerLast(e);
+ }
+
+ /**
+ * Inserts the specified element at the tail of this deque.
+ * As the deque is unbounded, this method will never throw
+ * {@link IllegalStateException} or return {@code false}.
+ *
+ * @return {@code true} (as specified by {@link Collection#add})
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean add(E e) {
+ return offerLast(e);
+ }
+
+ public E poll() { return pollFirst(); }
+ public E remove() { return removeFirst(); }
+ public E peek() { return peekFirst(); }
+ public E element() { return getFirst(); }
+ public void push(E e) { addFirst(e); }
+ public E pop() { return removeFirst(); }
+
+ /**
+ * Removes the first element {@code e} such that
+ * {@code o.equals(e)}, if such an element exists in this deque.
+ * If the deque does not contain the element, it is unchanged.
+ *
+ * @param o element to be removed from this deque, if present
+ * @return {@code true} if the deque contained the specified element
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean removeFirstOccurrence(Object o) {
+ checkNotNull(o);
+ for (Node<E> p = first(); p != null; p = succ(p)) {
+ E item = p.item;
+ if (item != null && o.equals(item) && p.casItem(item, null)) {
+ unlink(p);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Removes the last element {@code e} such that
+ * {@code o.equals(e)}, if such an element exists in this deque.
+ * If the deque does not contain the element, it is unchanged.
+ *
+ * @param o element to be removed from this deque, if present
+ * @return {@code true} if the deque contained the specified element
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean removeLastOccurrence(Object o) {
+ checkNotNull(o);
+ for (Node<E> p = last(); p != null; p = pred(p)) {
+ E item = p.item;
+ if (item != null && o.equals(item) && p.casItem(item, null)) {
+ unlink(p);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns {@code true} if this deque contains at least one
+ * element {@code e} such that {@code o.equals(e)}.
+ *
+ * @param o element whose presence in this deque is to be tested
+ * @return {@code true} if this deque contains the specified element
+ */
+ public boolean contains(Object o) {
+ if (o == null) return false;
+ for (Node<E> p = first(); p != null; p = succ(p)) {
+ E item = p.item;
+ if (item != null && o.equals(item))
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Returns {@code true} if this collection contains no elements.
+ *
+ * @return {@code true} if this collection contains no elements
+ */
+ public boolean isEmpty() {
+ return peekFirst() == null;
+ }
+
+ /**
+ * Returns the number of elements in this deque. If this deque
+ * contains more than {@code Integer.MAX_VALUE} elements, it
+ * returns {@code Integer.MAX_VALUE}.
+ *
+ * <p>Beware that, unlike in most collections, this method is
+ * <em>NOT</em> a constant-time operation. Because of the
+ * asynchronous nature of these deques, determining the current
+ * number of elements requires traversing them all to count them.
+ * Additionally, it is possible for the size to change during
+ * execution of this method, in which case the returned result
+ * will be inaccurate. Thus, this method is typically not very
+ * useful in concurrent applications.
+ *
+ * @return the number of elements in this deque
+ */
+ public int size() {
+ int count = 0;
+ for (Node<E> p = first(); p != null; p = succ(p))
+ if (p.item != null)
+ // Collection.size() spec says to max out
+ if (++count == Integer.MAX_VALUE)
+ break;
+ return count;
+ }
+
+ /**
+ * Removes the first element {@code e} such that
+ * {@code o.equals(e)}, if such an element exists in this deque.
+ * If the deque does not contain the element, it is unchanged.
+ *
+ * @param o element to be removed from this deque, if present
+ * @return {@code true} if the deque contained the specified element
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean remove(Object o) {
+ return removeFirstOccurrence(o);
+ }
+
+ /**
+ * Appends all of the elements in the specified collection to the end of
+ * this deque, in the order that they are returned by the specified
+ * collection's iterator. Attempts to {@code addAll} of a deque to
+ * itself result in {@code IllegalArgumentException}.
+ *
+ * @param c the elements to be inserted into this deque
+ * @return {@code true} if this deque changed as a result of the call
+ * @throws NullPointerException if the specified collection or any
+ * of its elements are null
+ * @throws IllegalArgumentException if the collection is this deque
+ */
+ public boolean addAll(Collection<? extends E> c) {
+ if (c == this)
+ // As historically specified in AbstractQueue#addAll
+ throw new IllegalArgumentException();
+
+ // Copy c into a private chain of Nodes
+ Node<E> beginningOfTheEnd = null, last = null;
+ for (E e : c) {
+ checkNotNull(e);
+ Node<E> newNode = new Node<E>(e);
+ if (beginningOfTheEnd == null)
+ beginningOfTheEnd = last = newNode;
+ else {
+ last.lazySetNext(newNode);
+ newNode.lazySetPrev(last);
+ last = newNode;
+ }
+ }
+ if (beginningOfTheEnd == null)
+ return false;
+
+ // Atomically append the chain at the tail of this collection
+ restartFromTail:
+ for (;;)
+ for (Node<E> t = tail, p = t, q;;) {
+ if ((q = p.next) != null &&
+ (q = (p = q).next) != null)
+ // Check for tail updates every other hop.
+ // If p == q, we are sure to follow tail instead.
+ p = (t != (t = tail)) ? t : q;
+ else if (p.prev == p) // NEXT_TERMINATOR
+ continue restartFromTail;
+ else {
+ // p is last node
+ beginningOfTheEnd.lazySetPrev(p); // CAS piggyback
+ if (p.casNext(null, beginningOfTheEnd)) {
+ // Successful CAS is the linearization point
+ // for all elements to be added to this deque.
+ if (!casTail(t, last)) {
+ // Try a little harder to update tail,
+ // since we may be adding many elements.
+ t = tail;
+ if (last.next == null)
+ casTail(t, last);
+ }
+ return true;
+ }
+ // Lost CAS race to another thread; re-read next
+ }
+ }
+ }
+
+ /**
+ * Removes all of the elements from this deque.
+ */
+ public void clear() {
+ while (pollFirst() != null)
+ ;
+ }
+
+ /**
+ * Returns an array containing all of the elements in this deque, in
+ * proper sequence (from first to last element).
+ *
+ * <p>The returned array will be "safe" in that no references to it are
+ * maintained by this deque. (In other words, this method must allocate
+ * a new array). The caller is thus free to modify the returned array.
+ *
+ * <p>This method acts as bridge between array-based and collection-based
+ * APIs.
+ *
+ * @return an array containing all of the elements in this deque
+ */
+ public Object[] toArray() {
+ return toArrayList().toArray();
+ }
+
+ /**
+ * Returns an array containing all of the elements in this deque,
+ * in proper sequence (from first to last element); the runtime
+ * type of the returned array is that of the specified array. If
+ * the deque fits in the specified array, it is returned therein.
+ * Otherwise, a new array is allocated with the runtime type of
+ * the specified array and the size of this deque.
+ *
+ * <p>If this deque fits in the specified array with room to spare
+ * (i.e., the array has more elements than this deque), the element in
+ * the array immediately following the end of the deque is set to
+ * {@code null}.
+ *
+ * <p>Like the {@link #toArray()} method, this method acts as
+ * bridge between array-based and collection-based APIs. Further,
+ * this method allows precise control over the runtime type of the
+ * output array, and may, under certain circumstances, be used to
+ * save allocation costs.
+ *
+ * <p>Suppose {@code x} is a deque known to contain only strings.
+ * The following code can be used to dump the deque into a newly
+ * allocated array of {@code String}:
+ *
+ * <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
+ *
+ * Note that {@code toArray(new Object[0])} is identical in function to
+ * {@code toArray()}.
+ *
+ * @param a the array into which the elements of the deque are to
+ * be stored, if it is big enough; otherwise, a new array of the
+ * same runtime type is allocated for this purpose
+ * @return an array containing all of the elements in this deque
+ * @throws ArrayStoreException if the runtime type of the specified array
+ * is not a supertype of the runtime type of every element in
+ * this deque
+ * @throws NullPointerException if the specified array is null
+ */
+ public <T> T[] toArray(T[] a) {
+ return toArrayList().toArray(a);
+ }
+
+ /**
+ * Returns an iterator over the elements in this deque in proper sequence.
+ * The elements will be returned in order from first (head) to last (tail).
+ *
+ * <p>The returned iterator is a "weakly consistent" iterator that
+ * will never throw {@link java.util.ConcurrentModificationException
+ * ConcurrentModificationException}, and guarantees to traverse
+ * elements as they existed upon construction of the iterator, and
+ * may (but is not guaranteed to) reflect any modifications
+ * subsequent to construction.
+ *
+ * @return an iterator over the elements in this deque in proper sequence
+ */
+ public Iterator<E> iterator() {
+ return new Itr();
+ }
+
+ /**
+ * Returns an iterator over the elements in this deque in reverse
+ * sequential order. The elements will be returned in order from
+ * last (tail) to first (head).
+ *
+ * <p>The returned iterator is a "weakly consistent" iterator that
+ * will never throw {@link java.util.ConcurrentModificationException
+ * ConcurrentModificationException}, and guarantees to traverse
+ * elements as they existed upon construction of the iterator, and
+ * may (but is not guaranteed to) reflect any modifications
+ * subsequent to construction.
+ *
+ * @return an iterator over the elements in this deque in reverse order
+ */
+ public Iterator<E> descendingIterator() {
+ return new DescendingItr();
+ }
+
+ private abstract class AbstractItr implements Iterator<E> {
+ /**
+ * Next node to return item for.
+ */
+ private Node<E> nextNode;
+
+ /**
+ * nextItem holds on to item fields because once we claim
+ * that an element exists in hasNext(), we must return it in
+ * the following next() call even if it was in the process of
+ * being removed when hasNext() was called.
+ */
+ private E nextItem;
+
+ /**
+ * Node returned by most recent call to next. Needed by remove.
+ * Reset to null if this element is deleted by a call to remove.
+ */
+ private Node<E> lastRet;
+
+ abstract Node<E> startNode();
+ abstract Node<E> nextNode(Node<E> p);
+
+ AbstractItr() {
+ advance();
+ }
+
+ /**
+ * Sets nextNode and nextItem to next valid node, or to null
+ * if no such.
+ */
+ private void advance() {
+ lastRet = nextNode;
+
+ Node<E> p = (nextNode == null) ? startNode() : nextNode(nextNode);
+ for (;; p = nextNode(p)) {
+ if (p == null) {
+ // p might be active end or TERMINATOR node; both are OK
+ nextNode = null;
+ nextItem = null;
+ break;
+ }
+ E item = p.item;
+ if (item != null) {
+ nextNode = p;
+ nextItem = item;
+ break;
+ }
+ }
+ }
+
+ public boolean hasNext() {
+ return nextItem != null;
+ }
+
+ public E next() {
+ E item = nextItem;
+ if (item == null) throw new NoSuchElementException();
+ advance();
+ return item;
+ }
+
+ public void remove() {
+ Node<E> l = lastRet;
+ if (l == null) throw new IllegalStateException();
+ l.item = null;
+ unlink(l);
+ lastRet = null;
+ }
+ }
+
+ /** Forward iterator */
+ private class Itr extends AbstractItr {
+ Node<E> startNode() { return first(); }
+ Node<E> nextNode(Node<E> p) { return succ(p); }
+ }
+
+ /** Descending iterator */
+ private class DescendingItr extends AbstractItr {
+ Node<E> startNode() { return last(); }
+ Node<E> nextNode(Node<E> p) { return pred(p); }
+ }
+
+ /**
+ * Saves the state to a stream (that is, serializes it).
+ *
+ * @serialData All of the elements (each an {@code E}) in
+ * the proper order, followed by a null
+ * @param s the stream
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+
+ // Write out any hidden stuff
+ s.defaultWriteObject();
+
+ // Write out all elements in the proper order.
+ for (Node<E> p = first(); p != null; p = succ(p)) {
+ E item = p.item;
+ if (item != null)
+ s.writeObject(item);
+ }
+
+ // Use trailing null as sentinel
+ s.writeObject(null);
+ }
+
+ /**
+ * Reconstitutes the instance from a stream (that is, deserializes it).
+ * @param s the stream
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+
+ // Read in elements until trailing null sentinel found
+ Node<E> h = null, t = null;
+ Object item;
+ while ((item = s.readObject()) != null) {
+ @SuppressWarnings("unchecked")
+ Node<E> newNode = new Node<E>((E) item);
+ if (h == null)
+ h = t = newNode;
+ else {
+ t.lazySetNext(newNode);
+ newNode.lazySetPrev(t);
+ t = newNode;
+ }
+ }
+ initHeadTail(h, t);
+ }
+
+
+ private boolean casHead(Node<E> cmp, Node<E> val) {
+ return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
+ }
+
+ private boolean casTail(Node<E> cmp, Node<E> val) {
+ return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
+ }
+
+ // Unsafe mechanics
+
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long headOffset;
+ private static final long tailOffset;
+ static {
+ PREV_TERMINATOR = new Node<Object>();
+ PREV_TERMINATOR.next = PREV_TERMINATOR;
+ NEXT_TERMINATOR = new Node<Object>();
+ NEXT_TERMINATOR.prev = NEXT_TERMINATOR;
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = ConcurrentLinkedDeque.class;
+ headOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("head"));
+ tailOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("tail"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166y/CountedCompleter.java b/src/main/java/jsr166y/CountedCompleter.java
new file mode 100644
index 0000000..7e74240
--- /dev/null
+++ b/src/main/java/jsr166y/CountedCompleter.java
@@ -0,0 +1,744 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+/**
+ * A {@link ForkJoinTask} with a completion action performed when
+ * triggered and there are no remaining pending
+ * actions. CountedCompleters are in general more robust in the
+ * presence of subtask stalls and blockage than are other forms of
+ * ForkJoinTasks, but are less intuitive to program. Uses of
+ * CountedCompleter are similar to those of other completion based
+ * components (such as {@link java.nio.channels.CompletionHandler})
+ * except that multiple <em>pending</em> completions may be necessary
+ * to trigger the completion action {@link #onCompletion}, not just one.
+ * Unless initialized otherwise, the {@linkplain #getPendingCount pending
+ * count} starts at zero, but may be (atomically) changed using
+ * methods {@link #setPendingCount}, {@link #addToPendingCount}, and
+ * {@link #compareAndSetPendingCount}. Upon invocation of {@link
+ * #tryComplete}, if the pending action count is nonzero, it is
+ * decremented; otherwise, the completion action is performed, and if
+ * this completer itself has a completer, the process is continued
+ * with its completer. As is the case with related synchronization
+ * components such as {@link java.util.concurrent.Phaser Phaser} and
+ * {@link java.util.concurrent.Semaphore Semaphore}, these methods
+ * affect only internal counts; they do not establish any further
+ * internal bookkeeping. In particular, the identities of pending
+ * tasks are not maintained. As illustrated below, you can create
+ * subclasses that do record some or all pending tasks or their
+ * results when needed. As illustrated below, utility methods
+ * supporting customization of completion traversals are also
+ * provided. However, because CountedCompleters provide only basic
+ * synchronization mechanisms, it may be useful to create further
+ * abstract subclasses that maintain linkages, fields, and additional
+ * support methods appropriate for a set of related usages.
+ *
+ * <p>A concrete CountedCompleter class must define method {@link
+ * #compute}, that should in most cases (as illustrated below), invoke
+ * {@code tryComplete()} once before returning. The class may also
+ * optionally override method {@link #onCompletion} to perform an
+ * action upon normal completion, and method {@link
+ * #onExceptionalCompletion} to perform an action upon any exception.
+ *
+ * <p>CountedCompleters most often do not bear results, in which case
+ * they are normally declared as {@code CountedCompleter<Void>}, and
+ * will always return {@code null} as a result value. In other cases,
+ * you should override method {@link #getRawResult} to provide a
+ * result from {@code join(), invoke()}, and related methods. In
+ * general, this method should return the value of a field (or a
+ * function of one or more fields) of the CountedCompleter object that
+ * holds the result upon completion. Method {@link #setRawResult} by
+ * default plays no role in CountedCompleters. It is possible, but
+ * rarely applicable, to override this method to maintain other
+ * objects or fields holding result data.
+ *
+ * <p>A CountedCompleter that does not itself have a completer (i.e.,
+ * one for which {@link #getCompleter} returns {@code null}) can be
+ * used as a regular ForkJoinTask with this added functionality.
+ * However, any completer that in turn has another completer serves
+ * only as an internal helper for other computations, so its own task
+ * status (as reported in methods such as {@link ForkJoinTask#isDone})
+ * is arbitrary; this status changes only upon explicit invocations of
+ * {@link #complete}, {@link ForkJoinTask#cancel}, {@link
+ * ForkJoinTask#completeExceptionally} or upon exceptional completion
+ * of method {@code compute}. Upon any exceptional completion, the
+ * exception may be relayed to a task's completer (and its completer,
+ * and so on), if one exists and it has not otherwise already
+ * completed. Similarly, cancelling an internal CountedCompleter has
+ * only a local effect on that completer, so is not often useful.
+ *
+ * <p><b>Sample Usages.</b>
+ *
+ * <p><b>Parallel recursive decomposition.</b> CountedCompleters may
+ * be arranged in trees similar to those often used with {@link
+ * RecursiveAction}s, although the constructions involved in setting
+ * them up typically vary. Here, the completer of each task is its
+ * parent in the computation tree. Even though they entail a bit more
+ * bookkeeping, CountedCompleters may be better choices when applying
+ * a possibly time-consuming operation (that cannot be further
+ * subdivided) to each element of an array or collection; especially
+ * when the operation takes a significantly different amount of time
+ * to complete for some elements than others, either because of
+ * intrinsic variation (for example I/O) or auxiliary effects such as
+ * garbage collection. Because CountedCompleters provide their own
+ * continuations, other threads need not block waiting to perform
+ * them.
+ *
+ * <p>For example, here is an initial version of a class that uses
+ * divide-by-two recursive decomposition to divide work into single
+ * pieces (leaf tasks). Even when work is split into individual calls,
+ * tree-based techniques are usually preferable to directly forking
+ * leaf tasks, because they reduce inter-thread communication and
+ * improve load balancing. In the recursive case, the second of each
+ * pair of subtasks to finish triggers completion of its parent
+ * (because no result combination is performed, the default no-op
+ * implementation of method {@code onCompletion} is not overridden). A
+ * static utility method sets up the base task and invokes it
+ * (here, implicitly using the {@link ForkJoinPool#commonPool()}).
+ *
+ * <pre> {@code
+ * class MyOperation<E> { void apply(E e) { ... } }
+ *
+ * class ForEach<E> extends CountedCompleter<Void> {
+ *
+ * public static <E> void forEach(E[] array, MyOperation<E> op) {
+ * new ForEach<E>(null, array, op, 0, array.length).invoke();
+ * }
+ *
+ * final E[] array; final MyOperation<E> op; final int lo, hi;
+ * ForEach(CountedCompleter<?> p, E[] array, MyOperation<E> op, int lo, int hi) {
+ * super(p);
+ * this.array = array; this.op = op; this.lo = lo; this.hi = hi;
+ * }
+ *
+ * public void compute() { // version 1
+ * if (hi - lo >= 2) {
+ * int mid = (lo + hi) >>> 1;
+ * setPendingCount(2); // must set pending count before fork
+ * new ForEach(this, array, op, mid, hi).fork(); // right child
+ * new ForEach(this, array, op, lo, mid).fork(); // left child
+ * }
+ * else if (hi > lo)
+ * op.apply(array[lo]);
+ * tryComplete();
+ * }
+ * }}</pre>
+ *
+ * This design can be improved by noticing that in the recursive case,
+ * the task has nothing to do after forking its right task, so can
+ * directly invoke its left task before returning. (This is an analog
+ * of tail recursion removal.) Also, because the task returns upon
+ * executing its left task (rather than falling through to invoke
+ * {@code tryComplete}) the pending count is set to one:
+ *
+ * <pre> {@code
+ * class ForEach<E> ...
+ * public void compute() { // version 2
+ * if (hi - lo >= 2) {
+ * int mid = (lo + hi) >>> 1;
+ * setPendingCount(1); // only one pending
+ * new ForEach(this, array, op, mid, hi).fork(); // right child
+ * new ForEach(this, array, op, lo, mid).compute(); // direct invoke
+ * }
+ * else {
+ * if (hi > lo)
+ * op.apply(array[lo]);
+ * tryComplete();
+ * }
+ * }
+ * }</pre>
+ *
+ * As a further improvement, notice that the left task need not even
+ * exist. Instead of creating a new one, we can iterate using the
+ * original task, and add a pending count for each fork. Additionally,
+ * because no task in this tree implements an {@link #onCompletion}
+ * method, {@code tryComplete()} can be replaced with {@link
+ * #propagateCompletion}.
+ *
+ * <pre> {@code
+ * class ForEach<E> ...
+ * public void compute() { // version 3
+ * int l = lo, h = hi;
+ * while (h - l >= 2) {
+ * int mid = (l + h) >>> 1;
+ * addToPendingCount(1);
+ * new ForEach(this, array, op, mid, h).fork(); // right child
+ * h = mid;
+ * }
+ * if (h > l)
+ * op.apply(array[l]);
+ * propagateCompletion();
+ * }
+ * }</pre>
+ *
+ * Additional improvements of such classes might entail precomputing
+ * pending counts so that they can be established in constructors,
+ * specializing classes for leaf steps, subdividing by say, four,
+ * instead of two per iteration, and using an adaptive threshold
+ * instead of always subdividing down to single elements.
+ *
+ * <p><b>Searching.</b> A tree of CountedCompleters can search for a
+ * value or property in different parts of a data structure, and
+ * report a result in an {@link
+ * java.util.concurrent.atomic.AtomicReference AtomicReference} as
+ * soon as one is found. The others can poll the result to avoid
+ * unnecessary work. (You could additionally {@linkplain #cancel
+ * cancel} other tasks, but it is usually simpler and more efficient
+ * to just let them notice that the result is set and if so skip
+ * further processing.) Illustrating again with an array using full
+ * partitioning (again, in practice, leaf tasks will almost always
+ * process more than one element):
+ *
+ * <pre> {@code
+ * class Searcher<E> extends CountedCompleter<E> {
+ * final E[] array; final AtomicReference<E> result; final int lo, hi;
+ * Searcher(CountedCompleter<?> p, E[] array, AtomicReference<E> result, int lo, int hi) {
+ * super(p);
+ * this.array = array; this.result = result; this.lo = lo; this.hi = hi;
+ * }
+ * public E getRawResult() { return result.get(); }
+ * public void compute() { // similar to ForEach version 3
+ * int l = lo, h = hi;
+ * while (result.get() == null && h >= l) {
+ * if (h - l >= 2) {
+ * int mid = (l + h) >>> 1;
+ * addToPendingCount(1);
+ * new Searcher(this, array, result, mid, h).fork();
+ * h = mid;
+ * }
+ * else {
+ * E x = array[l];
+ * if (matches(x) && result.compareAndSet(null, x))
+ * quietlyCompleteRoot(); // root task is now joinable
+ * break;
+ * }
+ * }
+ * tryComplete(); // normally complete whether or not found
+ * }
+ * boolean matches(E e) { ... } // return true if found
+ *
+ * public static <E> E search(E[] array) {
+ * return new Searcher<E>(null, array, new AtomicReference<E>(), 0, array.length).invoke();
+ * }
+ * }}</pre>
+ *
+ * In this example, as well as others in which tasks have no other
+ * effects except to compareAndSet a common result, the trailing
+ * unconditional invocation of {@code tryComplete} could be made
+ * conditional ({@code if (result.get() == null) tryComplete();})
+ * because no further bookkeeping is required to manage completions
+ * once the root task completes.
+ *
+ * <p><b>Recording subtasks.</b> CountedCompleter tasks that combine
+ * results of multiple subtasks usually need to access these results
+ * in method {@link #onCompletion}. As illustrated in the following
+ * class (that performs a simplified form of map-reduce where mappings
+ * and reductions are all of type {@code E}), one way to do this in
+ * divide and conquer designs is to have each subtask record its
+ * sibling, so that it can be accessed in method {@code onCompletion}.
+ * This technique applies to reductions in which the order of
+ * combining left and right results does not matter; ordered
+ * reductions require explicit left/right designations. Variants of
+ * other streamlinings seen in the above examples may also apply.
+ *
+ * <pre> {@code
+ * class MyMapper<E> { E apply(E v) { ... } }
+ * class MyReducer<E> { E apply(E x, E y) { ... } }
+ * class MapReducer<E> extends CountedCompleter<E> {
+ * final E[] array; final MyMapper<E> mapper;
+ * final MyReducer<E> reducer; final int lo, hi;
+ * MapReducer<E> sibling;
+ * E result;
+ * MapReducer(CountedCompleter<?> p, E[] array, MyMapper<E> mapper,
+ * MyReducer<E> reducer, int lo, int hi) {
+ * super(p);
+ * this.array = array; this.mapper = mapper;
+ * this.reducer = reducer; this.lo = lo; this.hi = hi;
+ * }
+ * public void compute() {
+ * if (hi - lo >= 2) {
+ * int mid = (lo + hi) >>> 1;
+ * MapReducer<E> left = new MapReducer(this, array, mapper, reducer, lo, mid);
+ * MapReducer<E> right = new MapReducer(this, array, mapper, reducer, mid, hi);
+ * left.sibling = right;
+ * right.sibling = left;
+ * setPendingCount(1); // only right is pending
+ * right.fork();
+ * left.compute(); // directly execute left
+ * }
+ * else {
+ * if (hi > lo)
+ * result = mapper.apply(array[lo]);
+ * tryComplete();
+ * }
+ * }
+ * public void onCompletion(CountedCompleter<?> caller) {
+ * if (caller != this) {
+ * MapReducer<E> child = (MapReducer<E>)caller;
+ * MapReducer<E> sib = child.sibling;
+ * if (sib == null || sib.result == null)
+ * result = child.result;
+ * else
+ * result = reducer.apply(child.result, sib.result);
+ * }
+ * }
+ * public E getRawResult() { return result; }
+ *
+ * public static <E> E mapReduce(E[] array, MyMapper<E> mapper, MyReducer<E> reducer) {
+ * return new MapReducer<E>(null, array, mapper, reducer,
+ * 0, array.length).invoke();
+ * }
+ * }}</pre>
+ *
+ * Here, method {@code onCompletion} takes a form common to many
+ * completion designs that combine results. This callback-style method
+ * is triggered once per task, in either of the two different contexts
+ * in which the pending count is, or becomes, zero: (1) by a task
+ * itself, if its pending count is zero upon invocation of {@code
+ * tryComplete}, or (2) by any of its subtasks when they complete and
+ * decrement the pending count to zero. The {@code caller} argument
+ * distinguishes cases. Most often, when the caller is {@code this},
+ * no action is necessary. Otherwise the caller argument can be used
+ * (usually via a cast) to supply a value (and/or links to other
+ * values) to be combined. Assuming proper use of pending counts, the
+ * actions inside {@code onCompletion} occur (once) upon completion of
+ * a task and its subtasks. No additional synchronization is required
+ * within this method to ensure thread safety of accesses to fields of
+ * this task or other completed tasks.
+ *
+ * <p><b>Completion Traversals</b>. If using {@code onCompletion} to
+ * process completions is inapplicable or inconvenient, you can use
+ * methods {@link #firstComplete} and {@link #nextComplete} to create
+ * custom traversals. For example, to define a MapReducer that only
+ * splits out right-hand tasks in the form of the third ForEach
+ * example, the completions must cooperatively reduce along
+ * unexhausted subtask links, which can be done as follows:
+ *
+ * <pre> {@code
+ * class MapReducer<E> extends CountedCompleter<E> { // version 2
+ * final E[] array; final MyMapper<E> mapper;
+ * final MyReducer<E> reducer; final int lo, hi;
+ * MapReducer<E> forks, next; // record subtask forks in list
+ * E result;
+ * MapReducer(CountedCompleter<?> p, E[] array, MyMapper<E> mapper,
+ * MyReducer<E> reducer, int lo, int hi, MapReducer<E> next) {
+ * super(p);
+ * this.array = array; this.mapper = mapper;
+ * this.reducer = reducer; this.lo = lo; this.hi = hi;
+ * this.next = next;
+ * }
+ * public void compute() {
+ * int l = lo, h = hi;
+ * while (h - l >= 2) {
+ * int mid = (l + h) >>> 1;
+ * addToPendingCount(1);
+ * (forks = new MapReducer(this, array, mapper, reducer, mid, h, forks)).fork;
+ * h = mid;
+ * }
+ * if (h > l)
+ * result = mapper.apply(array[l]);
+ * // process completions by reducing along and advancing subtask links
+ * for (CountedCompleter<?> c = firstComplete(); c != null; c = c.nextComplete()) {
+ * for (MapReducer t = (MapReducer)c, s = t.forks; s != null; s = t.forks = s.next)
+ * t.result = reducer.apply(t.result, s.result);
+ * }
+ * }
+ * public E getRawResult() { return result; }
+ *
+ * public static <E> E mapReduce(E[] array, MyMapper<E> mapper, MyReducer<E> reducer) {
+ * return new MapReducer<E>(null, array, mapper, reducer,
+ * 0, array.length, null).invoke();
+ * }
+ * }}</pre>
+ *
+ * <p><b>Triggers.</b> Some CountedCompleters are themselves never
+ * forked, but instead serve as bits of plumbing in other designs;
+ * including those in which the completion of one of more async tasks
+ * triggers another async task. For example:
+ *
+ * <pre> {@code
+ * class HeaderBuilder extends CountedCompleter<...> { ... }
+ * class BodyBuilder extends CountedCompleter<...> { ... }
+ * class PacketSender extends CountedCompleter<...> {
+ * PacketSender(...) { super(null, 1); ... } // trigger on second completion
+ * public void compute() { } // never called
+ * public void onCompletion(CountedCompleter<?> caller) { sendPacket(); }
+ * }
+ * // sample use:
+ * PacketSender p = new PacketSender();
+ * new HeaderBuilder(p, ...).fork();
+ * new BodyBuilder(p, ...).fork();
+ * }</pre>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public abstract class CountedCompleter<T> extends ForkJoinTask<T> {
+ private static final long serialVersionUID = 5232453752276485070L;
+
+ /** This task's completer, or null if none */
+ final CountedCompleter<?> completer;
+ /** The number of pending tasks until completion */
+ volatile int pending;
+
+ /**
+ * Creates a new CountedCompleter with the given completer
+ * and initial pending count.
+ *
+ * @param completer this task's completer, or {@code null} if none
+ * @param initialPendingCount the initial pending count
+ */
+ protected CountedCompleter(CountedCompleter<?> completer,
+ int initialPendingCount) {
+ this.completer = completer;
+ this.pending = initialPendingCount;
+ }
+
+ /**
+ * Creates a new CountedCompleter with the given completer
+ * and an initial pending count of zero.
+ *
+ * @param completer this task's completer, or {@code null} if none
+ */
+ protected CountedCompleter(CountedCompleter<?> completer) {
+ this.completer = completer;
+ }
+
+ /**
+ * Creates a new CountedCompleter with no completer
+ * and an initial pending count of zero.
+ */
+ protected CountedCompleter() {
+ this.completer = null;
+ }
+
+ /**
+ * The main computation performed by this task.
+ */
+ public abstract void compute();
+
+ /**
+ * Performs an action when method {@link #tryComplete} is invoked
+ * and the pending count is zero, or when the unconditional
+ * method {@link #complete} is invoked. By default, this method
+ * does nothing. You can distinguish cases by checking the
+ * identity of the given caller argument. If not equal to {@code
+ * this}, then it is typically a subtask that may contain results
+ * (and/or links to other results) to combine.
+ *
+ * @param caller the task invoking this method (which may
+ * be this task itself)
+ */
+ public void onCompletion(CountedCompleter<?> caller) {
+ }
+
+ /**
+ * Performs an action when method {@link #completeExceptionally}
+ * is invoked or method {@link #compute} throws an exception, and
+ * this task has not otherwise already completed normally. On
+ * entry to this method, this task {@link
+ * ForkJoinTask#isCompletedAbnormally}. The return value of this
+ * method controls further propagation: If {@code true} and this
+ * task has a completer, then this completer is also completed
+ * exceptionally. The default implementation of this method does
+ * nothing except return {@code true}.
+ *
+ * @param ex the exception
+ * @param caller the task invoking this method (which may
+ * be this task itself)
+ * @return true if this exception should be propagated to this
+ * task's completer, if one exists
+ */
+ public boolean onExceptionalCompletion(Throwable ex, CountedCompleter<?> caller) {
+ return true;
+ }
+
+ /**
+ * Returns the completer established in this task's constructor,
+ * or {@code null} if none.
+ *
+ * @return the completer
+ */
+ public final CountedCompleter<?> getCompleter() {
+ return completer;
+ }
+
+ /**
+ * Returns the current pending count.
+ *
+ * @return the current pending count
+ */
+ public final int getPendingCount() {
+ return pending;
+ }
+
+ /**
+ * Sets the pending count to the given value.
+ *
+ * @param count the count
+ */
+ public final void setPendingCount(int count) {
+ pending = count;
+ }
+
+ /**
+ * Adds (atomically) the given value to the pending count.
+ *
+ * @param delta the value to add
+ */
+ public final void addToPendingCount(int delta) {
+ int c; // note: can replace with intrinsic in jdk8
+ do {} while (!U.compareAndSwapInt(this, PENDING, c = pending, c+delta));
+ }
+
+ /**
+ * Sets (atomically) the pending count to the given count only if
+ * it currently holds the given expected value.
+ *
+ * @param expected the expected value
+ * @param count the new value
+ * @return true if successful
+ */
+ public final boolean compareAndSetPendingCount(int expected, int count) {
+ return U.compareAndSwapInt(this, PENDING, expected, count);
+ }
+
+ /**
+ * If the pending count is nonzero, (atomically) decrements it.
+ *
+ * @return the initial (undecremented) pending count holding on entry
+ * to this method
+ */
+ public final int decrementPendingCountUnlessZero() {
+ int c;
+ do {} while ((c = pending) != 0 &&
+ !U.compareAndSwapInt(this, PENDING, c, c - 1));
+ return c;
+ }
+
+ /**
+ * Returns the root of the current computation; i.e., this
+ * task if it has no completer, else its completer's root.
+ *
+ * @return the root of the current computation
+ */
+ public final CountedCompleter<?> getRoot() {
+ CountedCompleter<?> a = this, p;
+ while ((p = a.completer) != null)
+ a = p;
+ return a;
+ }
+
+ /**
+ * If the pending count is nonzero, decrements the count;
+ * otherwise invokes {@link #onCompletion} and then similarly
+ * tries to complete this task's completer, if one exists,
+ * else marks this task as complete.
+ */
+ public final void tryComplete() {
+ CountedCompleter<?> a = this, s = a;
+ for (int c;;) {
+ if ((c = a.pending) == 0) {
+ a.onCompletion(s);
+ if ((a = (s = a).completer) == null) {
+ s.quietlyComplete();
+ return;
+ }
+ }
+ else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
+ return;
+ }
+ }
+
+ /**
+ * Equivalent to {@link #tryComplete} but does not invoke {@link
+ * #onCompletion} along the completion path: If the pending count
+ * is nonzero, decrements the count; otherwise, similarly tries to
+ * complete this task's completer, if one exists, else marks this
+ * task as complete. This method may be useful in cases where
+ * {@code onCompletion} should not, or need not, be invoked for
+ * each completer in a computation.
+ */
+ public final void propagateCompletion() {
+ CountedCompleter<?> a = this, s = a;
+ for (int c;;) {
+ if ((c = a.pending) == 0) {
+ if ((a = (s = a).completer) == null) {
+ s.quietlyComplete();
+ return;
+ }
+ }
+ else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
+ return;
+ }
+ }
+
+ /**
+ * Regardless of pending count, invokes {@link #onCompletion},
+ * marks this task as complete and further triggers {@link
+ * #tryComplete} on this task's completer, if one exists. The
+ * given rawResult is used as an argument to {@link #setRawResult}
+ * before invoking {@link #onCompletion} or marking this task as
+ * complete; its value is meaningful only for classes overriding
+ * {@code setRawResult}.
+ *
+ * <p>This method may be useful when forcing completion as soon as
+ * any one (versus all) of several subtask results are obtained.
+ * However, in the common (and recommended) case in which {@code
+ * setRawResult} is not overridden, this effect can be obtained
+ * more simply using {@code quietlyCompleteRoot();}.
+ *
+ * @param rawResult the raw result
+ */
+ public void complete(T rawResult) {
+ CountedCompleter<?> p;
+ setRawResult(rawResult);
+ onCompletion(this);
+ quietlyComplete();
+ if ((p = completer) != null)
+ p.tryComplete();
+ }
+
+
+ /**
+ * If this task's pending count is zero, returns this task;
+ * otherwise decrements its pending count and returns {@code
+ * null}. This method is designed to be used with {@link
+ * #nextComplete} in completion traversal loops.
+ *
+ * @return this task, if pending count was zero, else {@code null}
+ */
+ public final CountedCompleter<?> firstComplete() {
+ for (int c;;) {
+ if ((c = pending) == 0)
+ return this;
+ else if (U.compareAndSwapInt(this, PENDING, c, c - 1))
+ return null;
+ }
+ }
+
+ /**
+ * If this task does not have a completer, invokes {@link
+ * ForkJoinTask#quietlyComplete} and returns {@code null}. Or, if
+ * this task's pending count is non-zero, decrements its pending
+ * count and returns {@code null}. Otherwise, returns the
+ * completer. This method can be used as part of a completion
+ * traversal loop for homogeneous task hierarchies:
+ *
+ * <pre> {@code
+ * for (CountedCompleter<?> c = firstComplete();
+ * c != null;
+ * c = c.nextComplete()) {
+ * // ... process c ...
+ * }}</pre>
+ *
+ * @return the completer, or {@code null} if none
+ */
+ public final CountedCompleter<?> nextComplete() {
+ CountedCompleter<?> p;
+ if ((p = completer) != null)
+ return p.firstComplete();
+ else {
+ quietlyComplete();
+ return null;
+ }
+ }
+
+ /**
+ * Equivalent to {@code getRoot().quietlyComplete()}.
+ */
+ public final void quietlyCompleteRoot() {
+ for (CountedCompleter<?> a = this, p;;) {
+ if ((p = a.completer) == null) {
+ a.quietlyComplete();
+ return;
+ }
+ a = p;
+ }
+ }
+
+ /**
+ * Supports ForkJoinTask exception propagation.
+ */
+ void internalPropagateException(Throwable ex) {
+ CountedCompleter<?> a = this, s = a;
+ while (a.onExceptionalCompletion(ex, s) &&
+ (a = (s = a).completer) != null && a.status >= 0)
+ a.recordExceptionalCompletion(ex);
+ }
+
+ /**
+ * Implements execution conventions for CountedCompleters.
+ */
+ protected final boolean exec() {
+ compute();
+ return false;
+ }
+
+ /**
+ * Returns the result of the computation. By default,
+ * returns {@code null}, which is appropriate for {@code Void}
+ * actions, but in other cases should be overridden, almost
+ * always to return a field or function of a field that
+ * holds the result upon completion.
+ *
+ * @return the result of the computation
+ */
+ public T getRawResult() { return null; }
+
+ /**
+ * A method that result-bearing CountedCompleters may optionally
+ * use to help maintain result data. By default, does nothing.
+ * Overrides are not recommended. However, if this method is
+ * overridden to update existing objects or fields, then it must
+ * in general be defined to be thread-safe.
+ */
+ protected void setRawResult(T t) { }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long PENDING;
+ static {
+ try {
+ U = getUnsafe();
+ PENDING = U.objectFieldOffset
+ (CountedCompleter.class.getDeclaredField("pending"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166y/ForkJoinPool.java b/src/main/java/jsr166y/ForkJoinPool.java
new file mode 100644
index 0000000..211722d
--- /dev/null
+++ b/src/main/java/jsr166y/ForkJoinPool.java
@@ -0,0 +1,3427 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.AbstractExecutorService;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.RunnableFuture;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * An {@link ExecutorService} for running {@link ForkJoinTask}s.
+ * A {@code ForkJoinPool} provides the entry point for submissions
+ * from non-{@code ForkJoinTask} clients, as well as management and
+ * monitoring operations.
+ *
+ * <p>A {@code ForkJoinPool} differs from other kinds of {@link
+ * ExecutorService} mainly by virtue of employing
+ * <em>work-stealing</em>: all threads in the pool attempt to find and
+ * execute tasks submitted to the pool and/or created by other active
+ * tasks (eventually blocking waiting for work if none exist). This
+ * enables efficient processing when most tasks spawn other subtasks
+ * (as do most {@code ForkJoinTask}s), as well as when many small
+ * tasks are submitted to the pool from external clients. Especially
+ * when setting <em>asyncMode</em> to true in constructors, {@code
+ * ForkJoinPool}s may also be appropriate for use with event-style
+ * tasks that are never joined.
+ *
+ * <p>A static {@link #commonPool()} is available and appropriate for
+ * most applications. The common pool is used by any ForkJoinTask that
+ * is not explicitly submitted to a specified pool. Using the common
+ * pool normally reduces resource usage (its threads are slowly
+ * reclaimed during periods of non-use, and reinstated upon subsequent
+ * use).
+ *
+ * <p>For applications that require separate or custom pools, a {@code
+ * ForkJoinPool} may be constructed with a given target parallelism
+ * level; by default, equal to the number of available processors. The
+ * pool attempts to maintain enough active (or available) threads by
+ * dynamically adding, suspending, or resuming internal worker
+ * threads, even if some tasks are stalled waiting to join
+ * others. However, no such adjustments are guaranteed in the face of
+ * blocked I/O or other unmanaged synchronization. The nested {@link
+ * ManagedBlocker} interface enables extension of the kinds of
+ * synchronization accommodated.
+ *
+ * <p>In addition to execution and lifecycle control methods, this
+ * class provides status check methods (for example
+ * {@link #getStealCount}) that are intended to aid in developing,
+ * tuning, and monitoring fork/join applications. Also, method
+ * {@link #toString} returns indications of pool state in a
+ * convenient form for informal monitoring.
+ *
+ * <p>As is the case with other ExecutorServices, there are three
+ * main task execution methods summarized in the following table.
+ * These are designed to be used primarily by clients not already
+ * engaged in fork/join computations in the current pool. The main
+ * forms of these methods accept instances of {@code ForkJoinTask},
+ * but overloaded forms also allow mixed execution of plain {@code
+ * Runnable}- or {@code Callable}- based activities as well. However,
+ * tasks that are already executing in a pool should normally instead
+ * use the within-computation forms listed in the table unless using
+ * async event-style tasks that are not usually joined, in which case
+ * there is little difference among choice of methods.
+ *
+ * <table BORDER CELLPADDING=3 CELLSPACING=1>
+ * <tr>
+ * <td></td>
+ * <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td>
+ * <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td>
+ * </tr>
+ * <tr>
+ * <td> <b>Arrange async execution</td>
+ * <td> {@link #execute(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#fork}</td>
+ * </tr>
+ * <tr>
+ * <td> <b>Await and obtain result</td>
+ * <td> {@link #invoke(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#invoke}</td>
+ * </tr>
+ * <tr>
+ * <td> <b>Arrange exec and obtain Future</td>
+ * <td> {@link #submit(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
+ * </tr>
+ * </table>
+ *
+ * <p>The common pool is by default constructed with default
+ * parameters, but these may be controlled by setting three {@link
+ * System#getProperty system properties} with prefix {@code
+ * java.util.concurrent.ForkJoinPool.common}: {@code parallelism} --
+ * an integer greater than zero, {@code threadFactory} -- the class
+ * name of a {@link ForkJoinWorkerThreadFactory}, and {@code
+ * exceptionHandler} -- the class name of a {@link
+ * java.lang.Thread.UncaughtExceptionHandler
+ * Thread.UncaughtExceptionHandler}. Upon any error in establishing
+ * these settings, default parameters are used.
+ *
+ * <p><b>Implementation notes</b>: This implementation restricts the
+ * maximum number of running threads to 32767. Attempts to create
+ * pools with greater than the maximum number result in
+ * {@code IllegalArgumentException}.
+ *
+ * <p>This implementation rejects submitted tasks (that is, by throwing
+ * {@link RejectedExecutionException}) only when the pool is shut down
+ * or internal resources have been exhausted.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public class ForkJoinPool extends AbstractExecutorService {
+
+ /*
+ * Implementation Overview
+ *
+ * This class and its nested classes provide the main
+ * functionality and control for a set of worker threads:
+ * Submissions from non-FJ threads enter into submission queues.
+ * Workers take these tasks and typically split them into subtasks
+ * that may be stolen by other workers. Preference rules give
+ * first priority to processing tasks from their own queues (LIFO
+ * or FIFO, depending on mode), then to randomized FIFO steals of
+ * tasks in other queues.
+ *
+ * WorkQueues
+ * ==========
+ *
+ * Most operations occur within work-stealing queues (in nested
+ * class WorkQueue). These are special forms of Deques that
+ * support only three of the four possible end-operations -- push,
+ * pop, and poll (aka steal), under the further constraints that
+ * push and pop are called only from the owning thread (or, as
+ * extended here, under a lock), while poll may be called from
+ * other threads. (If you are unfamiliar with them, you probably
+ * want to read Herlihy and Shavit's book "The Art of
+ * Multiprocessor programming", chapter 16 describing these in
+ * more detail before proceeding.) The main work-stealing queue
+ * design is roughly similar to those in the papers "Dynamic
+ * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
+ * (http://research.sun.com/scalable/pubs/index.html) and
+ * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
+ * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
+ * The main differences ultimately stem from GC requirements that
+ * we null out taken slots as soon as we can, to maintain as small
+ * a footprint as possible even in programs generating huge
+ * numbers of tasks. To accomplish this, we shift the CAS
+ * arbitrating pop vs poll (steal) from being on the indices
+ * ("base" and "top") to the slots themselves. So, both a
+ * successful pop and poll mainly entail a CAS of a slot from
+ * non-null to null. Because we rely on CASes of references, we
+ * do not need tag bits on base or top. They are simple ints as
+ * used in any circular array-based queue (see for example
+ * ArrayDeque). Updates to the indices must still be ordered in a
+ * way that guarantees that top == base means the queue is empty,
+ * but otherwise may err on the side of possibly making the queue
+ * appear nonempty when a push, pop, or poll have not fully
+ * committed. Note that this means that the poll operation,
+ * considered individually, is not wait-free. One thief cannot
+ * successfully continue until another in-progress one (or, if
+ * previously empty, a push) completes. However, in the
+ * aggregate, we ensure at least probabilistic non-blockingness.
+ * If an attempted steal fails, a thief always chooses a different
+ * random victim target to try next. So, in order for one thief to
+ * progress, it suffices for any in-progress poll or new push on
+ * any empty queue to complete. (This is why we normally use
+ * method pollAt and its variants that try once at the apparent
+ * base index, else consider alternative actions, rather than
+ * method poll.)
+ *
+ * This approach also enables support of a user mode in which local
+ * task processing is in FIFO, not LIFO order, simply by using
+ * poll rather than pop. This can be useful in message-passing
+ * frameworks in which tasks are never joined. However neither
+ * mode considers affinities, loads, cache localities, etc, so
+ * rarely provide the best possible performance on a given
+ * machine, but portably provide good throughput by averaging over
+ * these factors. (Further, even if we did try to use such
+ * information, we do not usually have a basis for exploiting it.
+ * For example, some sets of tasks profit from cache affinities,
+ * but others are harmed by cache pollution effects.)
+ *
+ * WorkQueues are also used in a similar way for tasks submitted
+ * to the pool. We cannot mix these tasks in the same queues used
+ * for work-stealing (this would contaminate lifo/fifo
+ * processing). Instead, we randomly associate submission queues
+ * with submitting threads, using a form of hashing. The
+ * ThreadLocal Submitter class contains a value initially used as
+ * a hash code for choosing existing queues, but may be randomly
+ * repositioned upon contention with other submitters. In
+ * essence, submitters act like workers except that they are
+ * restricted to executing local tasks that they submitted (or in
+ * the case of CountedCompleters, others with the same root task).
+ * However, because most shared/external queue operations are more
+ * expensive than internal, and because, at steady state, external
+ * submitters will compete for CPU with workers, ForkJoinTask.join
+ * and related methods disable them from repeatedly helping to
+ * process tasks if all workers are active. Insertion of tasks in
+ * shared mode requires a lock (mainly to protect in the case of
+ * resizing) but we use only a simple spinlock (using bits in
+ * field qlock), because submitters encountering a busy queue move
+ * on to try or create other queues -- they block only when
+ * creating and registering new queues.
+ *
+ * Management
+ * ==========
+ *
+ * The main throughput advantages of work-stealing stem from
+ * decentralized control -- workers mostly take tasks from
+ * themselves or each other. We cannot negate this in the
+ * implementation of other management responsibilities. The main
+ * tactic for avoiding bottlenecks is packing nearly all
+ * essentially atomic control state into two volatile variables
+ * that are by far most often read (not written) as status and
+ * consistency checks.
+ *
+ * Field "ctl" contains 64 bits holding all the information needed
+ * to atomically decide to add, inactivate, enqueue (on an event
+ * queue), dequeue, and/or re-activate workers. To enable this
+ * packing, we restrict maximum parallelism to (1<<15)-1 (which is
+ * far in excess of normal operating range) to allow ids, counts,
+ * and their negations (used for thresholding) to fit into 16bit
+ * fields.
+ *
+ * Field "plock" is a form of sequence lock with a saturating
+ * shutdown bit (similarly for per-queue "qlocks"), mainly
+ * protecting updates to the workQueues array, as well as to
+ * enable shutdown. When used as a lock, it is normally only very
+ * briefly held, so is nearly always available after at most a
+ * brief spin, but we use a monitor-based backup strategy to
+ * block when needed.
+ *
+ * Recording WorkQueues. WorkQueues are recorded in the
+ * "workQueues" array that is created upon first use and expanded
+ * if necessary. Updates to the array while recording new workers
+ * and unrecording terminated ones are protected from each other
+ * by a lock but the array is otherwise concurrently readable, and
+ * accessed directly. To simplify index-based operations, the
+ * array size is always a power of two, and all readers must
+ * tolerate null slots. Worker queues are at odd indices. Shared
+ * (submission) queues are at even indices, up to a maximum of 64
+ * slots, to limit growth even if array needs to expand to add
+ * more workers. Grouping them together in this way simplifies and
+ * speeds up task scanning.
+ *
+ * All worker thread creation is on-demand, triggered by task
+ * submissions, replacement of terminated workers, and/or
+ * compensation for blocked workers. However, all other support
+ * code is set up to work with other policies. To ensure that we
+ * do not hold on to worker references that would prevent GC, ALL
+ * accesses to workQueues are via indices into the workQueues
+ * array (which is one source of some of the messy code
+ * constructions here). In essence, the workQueues array serves as
+ * a weak reference mechanism. Thus for example the wait queue
+ * field of ctl stores indices, not references. Access to the
+ * workQueues in associated methods (for example signalWork) must
+ * both index-check and null-check the IDs. All such accesses
+ * ignore bad IDs by returning out early from what they are doing,
+ * since this can only be associated with termination, in which
+ * case it is OK to give up. All uses of the workQueues array
+ * also check that it is non-null (even if previously
+ * non-null). This allows nulling during termination, which is
+ * currently not necessary, but remains an option for
+ * resource-revocation-based shutdown schemes. It also helps
+ * reduce JIT issuance of uncommon-trap code, which tends to
+ * unnecessarily complicate control flow in some methods.
+ *
+ * Event Queuing. Unlike HPC work-stealing frameworks, we cannot
+ * let workers spin indefinitely scanning for tasks when none can
+ * be found immediately, and we cannot start/resume workers unless
+ * there appear to be tasks available. On the other hand, we must
+ * quickly prod them into action when new tasks are submitted or
+ * generated. In many usages, ramp-up time to activate workers is
+ * the main limiting factor in overall performance (this is
+ * compounded at program start-up by JIT compilation and
+ * allocation). So we try to streamline this as much as possible.
+ * We park/unpark workers after placing in an event wait queue
+ * when they cannot find work. This "queue" is actually a simple
+ * Treiber stack, headed by the "id" field of ctl, plus a 15bit
+ * counter value (that reflects the number of times a worker has
+ * been inactivated) to avoid ABA effects (we need only as many
+ * version numbers as worker threads). Successors are held in
+ * field WorkQueue.nextWait. Queuing deals with several intrinsic
+ * races, mainly that a task-producing thread can miss seeing (and
+ * signalling) another thread that gave up looking for work but
+ * has not yet entered the wait queue. We solve this by requiring
+ * a full sweep of all workers (via repeated calls to method
+ * scan()) both before and after a newly waiting worker is added
+ * to the wait queue. During a rescan, the worker might release
+ * some other queued worker rather than itself, which has the same
+ * net effect. Because enqueued workers may actually be rescanning
+ * rather than waiting, we set and clear the "parker" field of
+ * WorkQueues to reduce unnecessary calls to unpark. (This
+ * requires a secondary recheck to avoid missed signals.) Note
+ * the unusual conventions about Thread.interrupts surrounding
+ * parking and other blocking: Because interrupts are used solely
+ * to alert threads to check termination, which is checked anyway
+ * upon blocking, we clear status (using Thread.interrupted)
+ * before any call to park, so that park does not immediately
+ * return due to status being set via some other unrelated call to
+ * interrupt in user code.
+ *
+ * Signalling. We create or wake up workers only when there
+ * appears to be at least one task they might be able to find and
+ * execute. However, many other threads may notice the same task
+ * and each signal to wake up a thread that might take it. So in
+ * general, pools will be over-signalled. When a submission is
+ * added or another worker adds a task to a queue that has fewer
+ * than two tasks, they signal waiting workers (or trigger
+ * creation of new ones if fewer than the given parallelism level
+ * -- signalWork), and may leave a hint to the unparked worker to
+ * help signal others upon wakeup). These primary signals are
+ * buttressed by others (see method helpSignal) whenever other
+ * threads scan for work or do not have a task to process. On
+ * most platforms, signalling (unpark) overhead time is noticeably
+ * long, and the time between signalling a thread and it actually
+ * making progress can be very noticeably long, so it is worth
+ * offloading these delays from critical paths as much as
+ * possible.
+ *
+ * Trimming workers. To release resources after periods of lack of
+ * use, a worker starting to wait when the pool is quiescent will
+ * time out and terminate if the pool has remained quiescent for a
+ * given period -- a short period if there are more threads than
+ * parallelism, longer as the number of threads decreases. This
+ * will slowly propagate, eventually terminating all workers after
+ * periods of non-use.
+ *
+ * Shutdown and Termination. A call to shutdownNow atomically sets
+ * a plock bit and then (non-atomically) sets each worker's
+ * qlock status, cancels all unprocessed tasks, and wakes up
+ * all waiting workers. Detecting whether termination should
+ * commence after a non-abrupt shutdown() call requires more work
+ * and bookkeeping. We need consensus about quiescence (i.e., that
+ * there is no more work). The active count provides a primary
+ * indication but non-abrupt shutdown still requires a rechecking
+ * scan for any workers that are inactive but not queued.
+ *
+ * Joining Tasks
+ * =============
+ *
+ * Any of several actions may be taken when one worker is waiting
+ * to join a task stolen (or always held) by another. Because we
+ * are multiplexing many tasks on to a pool of workers, we can't
+ * just let them block (as in Thread.join). We also cannot just
+ * reassign the joiner's run-time stack with another and replace
+ * it later, which would be a form of "continuation", that even if
+ * possible is not necessarily a good idea since we sometimes need
+ * both an unblocked task and its continuation to progress.
+ * Instead we combine two tactics:
+ *
+ * Helping: Arranging for the joiner to execute some task that it
+ * would be running if the steal had not occurred.
+ *
+ * Compensating: Unless there are already enough live threads,
+ * method tryCompensate() may create or re-activate a spare
+ * thread to compensate for blocked joiners until they unblock.
+ *
+ * A third form (implemented in tryRemoveAndExec) amounts to
+ * helping a hypothetical compensator: If we can readily tell that
+ * a possible action of a compensator is to steal and execute the
+ * task being joined, the joining thread can do so directly,
+ * without the need for a compensation thread (although at the
+ * expense of larger run-time stacks, but the tradeoff is
+ * typically worthwhile).
+ *
+ * The ManagedBlocker extension API can't use helping so relies
+ * only on compensation in method awaitBlocker.
+ *
+ * The algorithm in tryHelpStealer entails a form of "linear"
+ * helping: Each worker records (in field currentSteal) the most
+ * recent task it stole from some other worker. Plus, it records
+ * (in field currentJoin) the task it is currently actively
+ * joining. Method tryHelpStealer uses these markers to try to
+ * find a worker to help (i.e., steal back a task from and execute
+ * it) that could hasten completion of the actively joined task.
+ * In essence, the joiner executes a task that would be on its own
+ * local deque had the to-be-joined task not been stolen. This may
+ * be seen as a conservative variant of the approach in Wagner &
+ * Calder "Leapfrogging: a portable technique for implementing
+ * efficient futures" SIGPLAN Notices, 1993
+ * (http://portal.acm.org/citation.cfm?id=155354). It differs in
+ * that: (1) We only maintain dependency links across workers upon
+ * steals, rather than use per-task bookkeeping. This sometimes
+ * requires a linear scan of workQueues array to locate stealers,
+ * but often doesn't because stealers leave hints (that may become
+ * stale/wrong) of where to locate them. It is only a hint
+ * because a worker might have had multiple steals and the hint
+ * records only one of them (usually the most current). Hinting
+ * isolates cost to when it is needed, rather than adding to
+ * per-task overhead. (2) It is "shallow", ignoring nesting and
+ * potentially cyclic mutual steals. (3) It is intentionally
+ * racy: field currentJoin is updated only while actively joining,
+ * which means that we miss links in the chain during long-lived
+ * tasks, GC stalls etc (which is OK since blocking in such cases
+ * is usually a good idea). (4) We bound the number of attempts
+ * to find work (see MAX_HELP) and fall back to suspending the
+ * worker and if necessary replacing it with another.
+ *
+ * Helping actions for CountedCompleters are much simpler: Method
+ * helpComplete can take and execute any task with the same root
+ * as the task being waited on. However, this still entails some
+ * traversal of completer chains, so is less efficient than using
+ * CountedCompleters without explicit joins.
+ *
+ * It is impossible to keep exactly the target parallelism number
+ * of threads running at any given time. Determining the
+ * existence of conservatively safe helping targets, the
+ * availability of already-created spares, and the apparent need
+ * to create new spares are all racy, so we rely on multiple
+ * retries of each. Compensation in the apparent absence of
+ * helping opportunities is challenging to control on JVMs, where
+ * GC and other activities can stall progress of tasks that in
+ * turn stall out many other dependent tasks, without us being
+ * able to determine whether they will ever require compensation.
+ * Even though work-stealing otherwise encounters little
+ * degradation in the presence of more threads than cores,
+ * aggressively adding new threads in such cases entails risk of
+ * unwanted positive feedback control loops in which more threads
+ * cause more dependent stalls (as well as delayed progress of
+ * unblocked threads to the point that we know they are available)
+ * leading to more situations requiring more threads, and so
+ * on. This aspect of control can be seen as an (analytically
+ * intractable) game with an opponent that may choose the worst
+ * (for us) active thread to stall at any time. We take several
+ * precautions to bound losses (and thus bound gains), mainly in
+ * methods tryCompensate and awaitJoin.
+ *
+ * Common Pool
+ * ===========
+ *
+ * The static common Pool always exists after static
+ * initialization. Since it (or any other created pool) need
+ * never be used, we minimize initial construction overhead and
+ * footprint to the setup of about a dozen fields, with no nested
+ * allocation. Most bootstrapping occurs within method
+ * fullExternalPush during the first submission to the pool.
+ *
+ * When external threads submit to the common pool, they can
+ * perform some subtask processing (see externalHelpJoin and
+ * related methods). We do not need to record whether these
+ * submissions are to the common pool -- if not, externalHelpJoin
+ * returns quickly (at the most helping to signal some common pool
+ * workers). These submitters would otherwise be blocked waiting
+ * for completion, so the extra effort (with liberally sprinkled
+ * task status checks) in inapplicable cases amounts to an odd
+ * form of limited spin-wait before blocking in ForkJoinTask.join.
+ *
+ * Style notes
+ * ===========
+ *
+ * There is a lot of representation-level coupling among classes
+ * ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
+ * fields of WorkQueue maintain data structures managed by
+ * ForkJoinPool, so are directly accessed. There is little point
+ * trying to reduce this, since any associated future changes in
+ * representations will need to be accompanied by algorithmic
+ * changes anyway. Several methods intrinsically sprawl because
+ * they must accumulate sets of consistent reads of volatiles held
+ * in local variables. Methods signalWork() and scan() are the
+ * main bottlenecks, so are especially heavily
+ * micro-optimized/mangled. There are lots of inline assignments
+ * (of form "while ((local = field) != 0)") which are usually the
+ * simplest way to ensure the required read orderings (which are
+ * sometimes critical). This leads to a "C"-like style of listing
+ * declarations of these locals at the heads of methods or blocks.
+ * There are several occurrences of the unusual "do {} while
+ * (!cas...)" which is the simplest way to force an update of a
+ * CAS'ed variable. There are also other coding oddities (including
+ * several unnecessary-looking hoisted null checks) that help
+ * some methods perform reasonably even when interpreted (not
+ * compiled).
+ *
+ * The order of declarations in this file is:
+ * (1) Static utility functions
+ * (2) Nested (static) classes
+ * (3) Static fields
+ * (4) Fields, along with constants used when unpacking some of them
+ * (5) Internal control methods
+ * (6) Callbacks and other support for ForkJoinTask methods
+ * (7) Exported methods
+ * (8) Static block initializing statics in minimally dependent order
+ */
+
+ // Static utilities
+
+ /**
+ * If there is a security manager, makes sure caller has
+ * permission to modify threads.
+ */
+ private static void checkPermission() {
+ SecurityManager security = System.getSecurityManager();
+ if (security != null)
+ security.checkPermission(modifyThreadPermission);
+ }
+
+ // Nested classes
+
+ /**
+ * Factory for creating new {@link ForkJoinWorkerThread}s.
+ * A {@code ForkJoinWorkerThreadFactory} must be defined and used
+ * for {@code ForkJoinWorkerThread} subclasses that extend base
+ * functionality or initialize threads with different contexts.
+ */
+ public static interface ForkJoinWorkerThreadFactory {
+ /**
+ * Returns a new worker thread operating in the given pool.
+ *
+ * @param pool the pool this thread works in
+ * @throws NullPointerException if the pool is null
+ */
+ public ForkJoinWorkerThread newThread(ForkJoinPool pool);
+ }
+
+ /**
+ * Default ForkJoinWorkerThreadFactory implementation; creates a
+ * new ForkJoinWorkerThread.
+ */
+ static final class DefaultForkJoinWorkerThreadFactory
+ implements ForkJoinWorkerThreadFactory {
+ public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
+ return new ForkJoinWorkerThread(pool);
+ }
+ }
+
+ /**
+ * Per-thread records for threads that submit to pools. Currently
+ * holds only pseudo-random seed / index that is used to choose
+ * submission queues in method externalPush. In the future, this may
+ * also incorporate a means to implement different task rejection
+ * and resubmission policies.
+ *
+ * Seeds for submitters and workers/workQueues work in basically
+ * the same way but are initialized and updated using slightly
+ * different mechanics. Both are initialized using the same
+ * approach as in class ThreadLocal, where successive values are
+ * unlikely to collide with previous values. Seeds are then
+ * randomly modified upon collisions using xorshifts, which
+ * requires a non-zero seed.
+ */
+ static final class Submitter {
+ int seed;
+ Submitter(int s) { seed = s; }
+ }
+
+ /**
+ * Class for artificial tasks that are used to replace the target
+ * of local joins if they are removed from an interior queue slot
+ * in WorkQueue.tryRemoveAndExec. We don't need the proxy to
+ * actually do anything beyond having a unique identity.
+ */
+ static final class EmptyTask extends ForkJoinTask<Void> {
+ private static final long serialVersionUID = -7721805057305804111L;
+ EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void x) {}
+ public final boolean exec() { return true; }
+ }
+
+ /**
+ * Queues supporting work-stealing as well as external task
+ * submission. See above for main rationale and algorithms.
+ * Implementation relies heavily on "Unsafe" intrinsics
+ * and selective use of "volatile":
+ *
+ * Field "base" is the index (mod array.length) of the least valid
+ * queue slot, which is always the next position to steal (poll)
+ * from if nonempty. Reads and writes require volatile orderings
+ * but not CAS, because updates are only performed after slot
+ * CASes.
+ *
+ * Field "top" is the index (mod array.length) of the next queue
+ * slot to push to or pop from. It is written only by owner thread
+ * for push, or under lock for external/shared push, and accessed
+ * by other threads only after reading (volatile) base. Both top
+ * and base are allowed to wrap around on overflow, but (top -
+ * base) (or more commonly -(base - top) to force volatile read of
+ * base before top) still estimates size. The lock ("qlock") is
+ * forced to -1 on termination, causing all further lock attempts
+ * to fail. (Note: we don't need CAS for termination state because
+ * upon pool shutdown, all shared-queues will stop being used
+ * anyway.) Nearly all lock bodies are set up so that exceptions
+ * within lock bodies are "impossible" (modulo JVM errors that
+ * would cause failure anyway.)
+ *
+ * The array slots are read and written using the emulation of
+ * volatiles/atomics provided by Unsafe. Insertions must in
+ * general use putOrderedObject as a form of releasing store to
+ * ensure that all writes to the task object are ordered before
+ * its publication in the queue. All removals entail a CAS to
+ * null. The array is always a power of two. To ensure safety of
+ * Unsafe array operations, all accesses perform explicit null
+ * checks and implicit bounds checks via power-of-two masking.
+ *
+ * In addition to basic queuing support, this class contains
+ * fields described elsewhere to control execution. It turns out
+ * to work better memory-layout-wise to include them in this class
+ * rather than a separate class.
+ *
+ * Performance on most platforms is very sensitive to placement of
+ * instances of both WorkQueues and their arrays -- we absolutely
+ * do not want multiple WorkQueue instances or multiple queue
+ * arrays sharing cache lines. (It would be best for queue objects
+ * and their arrays to share, but there is nothing available to
+ * help arrange that). Unfortunately, because they are recorded
+ * in a common array, WorkQueue instances are often moved to be
+ * adjacent by garbage collectors. To reduce impact, we use field
+ * padding that works OK on common platforms; this effectively
+ * trades off slightly slower average field access for the sake of
+ * avoiding really bad worst-case access. (Until better JVM
+ * support is in place, this padding is dependent on transient
+ * properties of JVM field layout rules.) We also take care in
+ * allocating, sizing and resizing the array. Non-shared queue
+ * arrays are initialized by workers before use. Others are
+ * allocated on first use.
+ */
+ static final class WorkQueue {
+ /**
+ * Capacity of work-stealing queue array upon initialization.
+ * Must be a power of two; at least 4, but should be larger to
+ * reduce or eliminate cacheline sharing among queues.
+ * Currently, it is much larger, as a partial workaround for
+ * the fact that JVMs often place arrays in locations that
+ * share GC bookkeeping (especially cardmarks) such that
+ * per-write accesses encounter serious memory contention.
+ */
+ static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
+
+ /**
+ * Maximum size for queue arrays. Must be a power of two less
+ * than or equal to 1 << (31 - width of array entry) to ensure
+ * lack of wraparound of index calculations, but defined to a
+ * value a bit less than this to help users trap runaway
+ * programs before saturating systems.
+ */
+ static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
+
+ // Heuristic padding to ameliorate unfortunate memory placements
+ volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
+
+ int seed; // for random scanning; initialize nonzero
+ volatile int eventCount; // encoded inactivation count; < 0 if inactive
+ int nextWait; // encoded record of next event waiter
+ int hint; // steal or signal hint (index)
+ int poolIndex; // index of this queue in pool (or 0)
+ final int mode; // 0: lifo, > 0: fifo, < 0: shared
+ int nsteals; // number of steals
+ volatile int qlock; // 1: locked, -1: terminate; else 0
+ volatile int base; // index of next slot for poll
+ int top; // index of next slot for push
+ ForkJoinTask<?>[] array; // the elements (initially unallocated)
+ final ForkJoinPool pool; // the containing pool (may be null)
+ final ForkJoinWorkerThread owner; // owning thread or null if shared
+ volatile Thread parker; // == owner during call to park; else null
+ volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
+ ForkJoinTask<?> currentSteal; // current non-local task being executed
+
+ volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
+ volatile Object pad18, pad19, pad1a, pad1b, pad1c, pad1d;
+
+ WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode,
+ int seed) {
+ this.pool = pool;
+ this.owner = owner;
+ this.mode = mode;
+ this.seed = seed;
+ // Place indices in the center of array (that is not yet allocated)
+ base = top = INITIAL_QUEUE_CAPACITY >>> 1;
+ }
+
+ /**
+ * Returns the approximate number of tasks in the queue.
+ */
+ final int queueSize() {
+ int n = base - top; // non-owner callers must read base first
+ return (n >= 0) ? 0 : -n; // ignore transient negative
+ }
+
+ /**
+ * Provides a more accurate estimate of whether this queue has
+ * any tasks than does queueSize, by checking whether a
+ * near-empty queue has at least one unclaimed task.
+ */
+ final boolean isEmpty() {
+ ForkJoinTask<?>[] a; int m, s;
+ int n = base - (s = top);
+ return (n >= 0 ||
+ (n == -1 &&
+ ((a = array) == null ||
+ (m = a.length - 1) < 0 ||
+ U.getObject
+ (a, (long)((m & (s - 1)) << ASHIFT) + ABASE) == null)));
+ }
+
+ /**
+ * Pushes a task. Call only by owner in unshared queues. (The
+ * shared-queue version is embedded in method externalPush.)
+ *
+ * @param task the task. Caller must ensure non-null.
+ * @throws RejectedExecutionException if array cannot be resized
+ */
+ final void push(ForkJoinTask<?> task) {
+ ForkJoinTask<?>[] a; ForkJoinPool p;
+ int s = top, m, n;
+ if ((a = array) != null) { // ignore if queue removed
+ int j = (((m = a.length - 1) & s) << ASHIFT) + ABASE;
+ U.putOrderedObject(a, j, task);
+ if ((n = (top = s + 1) - base) <= 2) {
+ if ((p = pool) != null)
+ p.signalWork(this);
+ }
+ else if (n >= m)
+ growArray();
+ }
+ }
+
+ /**
+ * Initializes or doubles the capacity of array. Call either
+ * by owner or with lock held -- it is OK for base, but not
+ * top, to move while resizings are in progress.
+ */
+ final ForkJoinTask<?>[] growArray() {
+ ForkJoinTask<?>[] oldA = array;
+ int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
+ if (size > MAXIMUM_QUEUE_CAPACITY)
+ throw new RejectedExecutionException("Queue capacity exceeded");
+ int oldMask, t, b;
+ ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
+ if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
+ (t = top) - (b = base) > 0) {
+ int mask = size - 1;
+ do {
+ ForkJoinTask<?> x;
+ int oldj = ((b & oldMask) << ASHIFT) + ABASE;
+ int j = ((b & mask) << ASHIFT) + ABASE;
+ x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
+ if (x != null &&
+ U.compareAndSwapObject(oldA, oldj, x, null))
+ U.putObjectVolatile(a, j, x);
+ } while (++b != t);
+ }
+ return a;
+ }
+
+ /**
+ * Takes next task, if one exists, in LIFO order. Call only
+ * by owner in unshared queues.
+ */
+ final ForkJoinTask<?> pop() {
+ ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
+ if ((a = array) != null && (m = a.length - 1) >= 0) {
+ for (int s; (s = top - 1) - base >= 0;) {
+ long j = ((m & s) << ASHIFT) + ABASE;
+ if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
+ break;
+ if (U.compareAndSwapObject(a, j, t, null)) {
+ top = s;
+ return t;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes a task in FIFO order if b is base of queue and a task
+ * can be claimed without contention. Specialized versions
+ * appear in ForkJoinPool methods scan and tryHelpStealer.
+ */
+ final ForkJoinTask<?> pollAt(int b) {
+ ForkJoinTask<?> t; ForkJoinTask<?>[] a;
+ if ((a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
+ base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ base = b + 1;
+ return t;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes next task, if one exists, in FIFO order.
+ */
+ final ForkJoinTask<?> poll() {
+ ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
+ while ((b = base) - top < 0 && (a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t != null) {
+ if (base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ base = b + 1;
+ return t;
+ }
+ }
+ else if (base == b) {
+ if (b + 1 == top)
+ break;
+ Thread.yield(); // wait for lagging update (very rare)
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes next task, if one exists, in order specified by mode.
+ */
+ final ForkJoinTask<?> nextLocalTask() {
+ return mode == 0 ? pop() : poll();
+ }
+
+ /**
+ * Returns next task, if one exists, in order specified by mode.
+ */
+ final ForkJoinTask<?> peek() {
+ ForkJoinTask<?>[] a = array; int m;
+ if (a == null || (m = a.length - 1) < 0)
+ return null;
+ int i = mode == 0 ? top - 1 : base;
+ int j = ((i & m) << ASHIFT) + ABASE;
+ return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ }
+
+ /**
+ * Pops the given task only if it is at the current top.
+ * (A shared version is available only via FJP.tryExternalUnpush)
+ */
+ final boolean tryUnpush(ForkJoinTask<?> t) {
+ ForkJoinTask<?>[] a; int s;
+ if ((a = array) != null && (s = top) != base &&
+ U.compareAndSwapObject
+ (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
+ top = s;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Removes and cancels all known tasks, ignoring any exceptions.
+ */
+ final void cancelAll() {
+ ForkJoinTask.cancelIgnoringExceptions(currentJoin);
+ ForkJoinTask.cancelIgnoringExceptions(currentSteal);
+ for (ForkJoinTask<?> t; (t = poll()) != null; )
+ ForkJoinTask.cancelIgnoringExceptions(t);
+ }
+
+ /**
+ * Computes next value for random probes. Scans don't require
+ * a very high quality generator, but also not a crummy one.
+ * Marsaglia xor-shift is cheap and works well enough. Note:
+ * This is manually inlined in its usages in ForkJoinPool to
+ * avoid writes inside busy scan loops.
+ */
+ final int nextSeed() {
+ int r = seed;
+ r ^= r << 13;
+ r ^= r >>> 17;
+ return seed = r ^= r << 5;
+ }
+
+ // Specialized execution methods
+
+ /**
+ * Pops and runs tasks until empty.
+ */
+ private void popAndExecAll() {
+ // A bit faster than repeated pop calls
+ ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t;
+ while ((a = array) != null && (m = a.length - 1) >= 0 &&
+ (s = top - 1) - base >= 0 &&
+ (t = ((ForkJoinTask<?>)
+ U.getObject(a, j = ((m & s) << ASHIFT) + ABASE)))
+ != null) {
+ if (U.compareAndSwapObject(a, j, t, null)) {
+ top = s;
+ t.doExec();
+ }
+ }
+ }
+
+ /**
+ * Polls and runs tasks until empty.
+ */
+ private void pollAndExecAll() {
+ for (ForkJoinTask<?> t; (t = poll()) != null;)
+ t.doExec();
+ }
+
+ /**
+ * If present, removes from queue and executes the given task,
+ * or any other cancelled task. Returns (true) on any CAS
+ * or consistency check failure so caller can retry.
+ *
+ * @return false if no progress can be made, else true
+ */
+ final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
+ boolean stat = true, removed = false, empty = true;
+ ForkJoinTask<?>[] a; int m, s, b, n;
+ if ((a = array) != null && (m = a.length - 1) >= 0 &&
+ (n = (s = top) - (b = base)) > 0) {
+ for (ForkJoinTask<?> t;;) { // traverse from s to b
+ int j = ((--s & m) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t == null) // inconsistent length
+ break;
+ else if (t == task) {
+ if (s + 1 == top) { // pop
+ if (!U.compareAndSwapObject(a, j, task, null))
+ break;
+ top = s;
+ removed = true;
+ }
+ else if (base == b) // replace with proxy
+ removed = U.compareAndSwapObject(a, j, task,
+ new EmptyTask());
+ break;
+ }
+ else if (t.status >= 0)
+ empty = false;
+ else if (s + 1 == top) { // pop and throw away
+ if (U.compareAndSwapObject(a, j, t, null))
+ top = s;
+ break;
+ }
+ if (--n == 0) {
+ if (!empty && base == b)
+ stat = false;
+ break;
+ }
+ }
+ }
+ if (removed)
+ task.doExec();
+ return stat;
+ }
+
+ /**
+ * Polls for and executes the given task or any other task in
+ * its CountedCompleter computation.
+ */
+ final boolean pollAndExecCC(ForkJoinTask<?> root) {
+ ForkJoinTask<?>[] a; int b; Object o;
+ outer: while ((b = base) - top < 0 && (a = array) != null) {
+ long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ if ((o = U.getObject(a, j)) == null ||
+ !(o instanceof CountedCompleter))
+ break;
+ for (CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;;) {
+ if (r == root) {
+ if (base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ base = b + 1;
+ t.doExec();
+ return true;
+ }
+ else
+ break; // restart
+ }
+ if ((r = r.completer) == null)
+ break outer; // not part of root computation
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Executes a top-level task and any local tasks remaining
+ * after execution.
+ */
+ final void runTask(ForkJoinTask<?> t) {
+ if (t != null) {
+ (currentSteal = t).doExec();
+ currentSteal = null;
+ ++nsteals;
+ if (base - top < 0) { // process remaining local tasks
+ if (mode == 0)
+ popAndExecAll();
+ else
+ pollAndExecAll();
+ }
+ }
+ }
+
+ /**
+ * Executes a non-top-level (stolen) task.
+ */
+ final void runSubtask(ForkJoinTask<?> t) {
+ if (t != null) {
+ ForkJoinTask<?> ps = currentSteal;
+ (currentSteal = t).doExec();
+ currentSteal = ps;
+ }
+ }
+
+ /**
+ * Returns true if owned and not known to be blocked.
+ */
+ final boolean isApparentlyUnblocked() {
+ Thread wt; Thread.State s;
+ return (eventCount >= 0 &&
+ (wt = owner) != null &&
+ (s = wt.getState()) != Thread.State.BLOCKED &&
+ s != Thread.State.WAITING &&
+ s != Thread.State.TIMED_WAITING);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long QLOCK;
+ private static final int ABASE;
+ private static final int ASHIFT;
+ static {
+ try {
+ U = getUnsafe();
+ Class<?> k = WorkQueue.class;
+ Class<?> ak = ForkJoinTask[].class;
+ QLOCK = U.objectFieldOffset
+ (k.getDeclaredField("qlock"));
+ ABASE = U.arrayBaseOffset(ak);
+ int scale = U.arrayIndexScale(ak);
+ if ((scale & (scale - 1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+ }
+
+ // static fields (initialized in static initializer below)
+
+ /**
+ * Creates a new ForkJoinWorkerThread. This factory is used unless
+ * overridden in ForkJoinPool constructors.
+ */
+ public static final ForkJoinWorkerThreadFactory
+ defaultForkJoinWorkerThreadFactory;
+
+ /**
+ * Per-thread submission bookkeeping. Shared across all pools
+ * to reduce ThreadLocal pollution and because random motion
+ * to avoid contention in one pool is likely to hold for others.
+ * Lazily initialized on first submission (but null-checked
+ * in other contexts to avoid unnecessary initialization).
+ */
+ static final ThreadLocal<Submitter> submitters;
+
+ /**
+ * Permission required for callers of methods that may start or
+ * kill threads.
+ */
+ private static final RuntimePermission modifyThreadPermission;
+
+ /**
+ * Common (static) pool. Non-null for public use unless a static
+ * construction exception, but internal usages null-check on use
+ * to paranoically avoid potential initialization circularities
+ * as well as to simplify generated code.
+ */
+ static final ForkJoinPool common;
+
+ /**
+ * Common pool parallelism. Must equal common.parallelism.
+ */
+ static final int commonParallelism;
+
+ /**
+ * Sequence number for creating workerNamePrefix.
+ */
+ private static int poolNumberSequence;
+
+ /**
+ * Returns the next sequence number. We don't expect this to
+ * ever contend, so use simple builtin sync.
+ */
+ private static final synchronized int nextPoolId() {
+ return ++poolNumberSequence;
+ }
+
+ // static constants
+
+ /**
+ * Initial timeout value (in nanoseconds) for the thread
+ * triggering quiescence to park waiting for new work. On timeout,
+ * the thread will instead try to shrink the number of
+ * workers. The value should be large enough to avoid overly
+ * aggressive shrinkage during most transient stalls (long GCs
+ * etc).
+ */
+ private static final long IDLE_TIMEOUT = 2000L * 1000L * 1000L; // 2sec
+
+ /**
+ * Timeout value when there are more threads than parallelism level
+ */
+ private static final long FAST_IDLE_TIMEOUT = 200L * 1000L * 1000L;
+
+ /**
+ * Tolerance for idle timeouts, to cope with timer undershoots
+ */
+ private static final long TIMEOUT_SLOP = 2000000L;
+
+ /**
+ * The maximum stolen->joining link depth allowed in method
+ * tryHelpStealer. Must be a power of two. Depths for legitimate
+ * chains are unbounded, but we use a fixed constant to avoid
+ * (otherwise unchecked) cycles and to bound staleness of
+ * traversal parameters at the expense of sometimes blocking when
+ * we could be helping.
+ */
+ private static final int MAX_HELP = 64;
+
+ /**
+ * Increment for seed generators. See class ThreadLocal for
+ * explanation.
+ */
+ private static final int SEED_INCREMENT = 0x61c88647;
+
+ /*
+ * Bits and masks for control variables
+ *
+ * Field ctl is a long packed with:
+ * AC: Number of active running workers minus target parallelism (16 bits)
+ * TC: Number of total workers minus target parallelism (16 bits)
+ * ST: true if pool is terminating (1 bit)
+ * EC: the wait count of top waiting thread (15 bits)
+ * ID: poolIndex of top of Treiber stack of waiters (16 bits)
+ *
+ * When convenient, we can extract the upper 32 bits of counts and
+ * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
+ * (int)ctl. The ec field is never accessed alone, but always
+ * together with id and st. The offsets of counts by the target
+ * parallelism and the positionings of fields makes it possible to
+ * perform the most common checks via sign tests of fields: When
+ * ac is negative, there are not enough active workers, when tc is
+ * negative, there are not enough total workers, and when e is
+ * negative, the pool is terminating. To deal with these possibly
+ * negative fields, we use casts in and out of "short" and/or
+ * signed shifts to maintain signedness.
+ *
+ * When a thread is queued (inactivated), its eventCount field is
+ * set negative, which is the only way to tell if a worker is
+ * prevented from executing tasks, even though it must continue to
+ * scan for them to avoid queuing races. Note however that
+ * eventCount updates lag releases so usage requires care.
+ *
+ * Field plock is an int packed with:
+ * SHUTDOWN: true if shutdown is enabled (1 bit)
+ * SEQ: a sequence lock, with PL_LOCK bit set if locked (30 bits)
+ * SIGNAL: set when threads may be waiting on the lock (1 bit)
+ *
+ * The sequence number enables simple consistency checks:
+ * Staleness of read-only operations on the workQueues array can
+ * be checked by comparing plock before vs after the reads.
+ */
+
+ // bit positions/shifts for fields
+ private static final int AC_SHIFT = 48;
+ private static final int TC_SHIFT = 32;
+ private static final int ST_SHIFT = 31;
+ private static final int EC_SHIFT = 16;
+
+ // bounds
+ private static final int SMASK = 0xffff; // short bits
+ private static final int MAX_CAP = 0x7fff; // max #workers - 1
+ private static final int EVENMASK = 0xfffe; // even short bits
+ private static final int SQMASK = 0x007e; // max 64 (even) slots
+ private static final int SHORT_SIGN = 1 << 15;
+ private static final int INT_SIGN = 1 << 31;
+
+ // masks
+ private static final long STOP_BIT = 0x0001L << ST_SHIFT;
+ private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
+ private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
+
+ // units for incrementing and decrementing
+ private static final long TC_UNIT = 1L << TC_SHIFT;
+ private static final long AC_UNIT = 1L << AC_SHIFT;
+
+ // masks and units for dealing with u = (int)(ctl >>> 32)
+ private static final int UAC_SHIFT = AC_SHIFT - 32;
+ private static final int UTC_SHIFT = TC_SHIFT - 32;
+ private static final int UAC_MASK = SMASK << UAC_SHIFT;
+ private static final int UTC_MASK = SMASK << UTC_SHIFT;
+ private static final int UAC_UNIT = 1 << UAC_SHIFT;
+ private static final int UTC_UNIT = 1 << UTC_SHIFT;
+
+ // masks and units for dealing with e = (int)ctl
+ private static final int E_MASK = 0x7fffffff; // no STOP_BIT
+ private static final int E_SEQ = 1 << EC_SHIFT;
+
+ // plock bits
+ private static final int SHUTDOWN = 1 << 31;
+ private static final int PL_LOCK = 2;
+ private static final int PL_SIGNAL = 1;
+ private static final int PL_SPINS = 1 << 8;
+
+ // access mode for WorkQueue
+ static final int LIFO_QUEUE = 0;
+ static final int FIFO_QUEUE = 1;
+ static final int SHARED_QUEUE = -1;
+
+ // bounds for #steps in scan loop -- must be power 2 minus 1
+ private static final int MIN_SCAN = 0x1ff; // cover estimation slop
+ private static final int MAX_SCAN = 0x1ffff; // 4 * max workers
+
+ // Instance fields
+
+ /*
+ * Field layout of this class tends to matter more than one would
+ * like. Runtime layout order is only loosely related to
+ * declaration order and may differ across JVMs, but the following
+ * empirically works OK on current JVMs.
+ */
+
+ // Heuristic padding to ameliorate unfortunate memory placements
+ volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
+
+ volatile long stealCount; // collects worker counts
+ volatile long ctl; // main pool control
+ volatile int plock; // shutdown status and seqLock
+ volatile int indexSeed; // worker/submitter index seed
+ final int config; // mode and parallelism level
+ WorkQueue[] workQueues; // main registry
+ final ForkJoinWorkerThreadFactory factory;
+ final Thread.UncaughtExceptionHandler ueh; // per-worker UEH
+ final String workerNamePrefix; // to create worker name string
+
+ volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
+ volatile Object pad18, pad19, pad1a, pad1b;
+
+ /**
+ * Acquires the plock lock to protect worker array and related
+ * updates. This method is called only if an initial CAS on plock
+ * fails. This acts as a spinlock for normal cases, but falls back
+ * to builtin monitor to block when (rarely) needed. This would be
+ * a terrible idea for a highly contended lock, but works fine as
+ * a more conservative alternative to a pure spinlock.
+ */
+ private int acquirePlock() {
+ int spins = PL_SPINS, r = 0, ps, nps;
+ for (;;) {
+ if (((ps = plock) & PL_LOCK) == 0 &&
+ U.compareAndSwapInt(this, PLOCK, ps, nps = ps + PL_LOCK))
+ return nps;
+ else if (r == 0) { // randomize spins if possible
+ Thread t = Thread.currentThread(); WorkQueue w; Submitter z;
+ if ((t instanceof ForkJoinWorkerThread) &&
+ (w = ((ForkJoinWorkerThread)t).workQueue) != null)
+ r = w.seed;
+ else if ((z = submitters.get()) != null)
+ r = z.seed;
+ else
+ r = 1;
+ }
+ else if (spins >= 0) {
+ r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
+ if (r >= 0)
+ --spins;
+ }
+ else if (U.compareAndSwapInt(this, PLOCK, ps, ps | PL_SIGNAL)) {
+ synchronized (this) {
+ if ((plock & PL_SIGNAL) != 0) {
+ try {
+ wait();
+ } catch (InterruptedException ie) {
+ try {
+ Thread.currentThread().interrupt();
+ } catch (SecurityException ignore) {
+ }
+ }
+ }
+ else
+ notifyAll();
+ }
+ }
+ }
+ }
+
+ /**
+ * Unlocks and signals any thread waiting for plock. Called only
+ * when CAS of seq value for unlock fails.
+ */
+ private void releasePlock(int ps) {
+ plock = ps;
+ synchronized (this) { notifyAll(); }
+ }
+
+ /**
+ * Tries to create and start one worker if fewer than target
+ * parallelism level exist. Adjusts counts etc on failure.
+ */
+ private void tryAddWorker() {
+ long c; int u;
+ while ((u = (int)((c = ctl) >>> 32)) < 0 &&
+ (u & SHORT_SIGN) != 0 && (int)c == 0) {
+ long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
+ ((u + UAC_UNIT) & UAC_MASK)) << 32;
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ ForkJoinWorkerThreadFactory fac;
+ Throwable ex = null;
+ ForkJoinWorkerThread wt = null;
+ try {
+ if ((fac = factory) != null &&
+ (wt = fac.newThread(this)) != null) {
+ wt.start();
+ break;
+ }
+ } catch (Throwable e) {
+ ex = e;
+ }
+ deregisterWorker(wt, ex);
+ break;
+ }
+ }
+ }
+
+ // Registering and deregistering workers
+
+ /**
+ * Callback from ForkJoinWorkerThread to establish and record its
+ * WorkQueue. To avoid scanning bias due to packing entries in
+ * front of the workQueues array, we treat the array as a simple
+ * power-of-two hash table using per-thread seed as hash,
+ * expanding as needed.
+ *
+ * @param wt the worker thread
+ * @return the worker's queue
+ */
+ final WorkQueue registerWorker(ForkJoinWorkerThread wt) {
+ Thread.UncaughtExceptionHandler handler; WorkQueue[] ws; int s, ps;
+ wt.setDaemon(true);
+ if ((handler = ueh) != null)
+ wt.setUncaughtExceptionHandler(handler);
+ do {} while (!U.compareAndSwapInt(this, INDEXSEED, s = indexSeed,
+ s += SEED_INCREMENT) ||
+ s == 0); // skip 0
+ WorkQueue w = new WorkQueue(this, wt, config >>> 16, s);
+ if (((ps = plock) & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
+ try {
+ if ((ws = workQueues) != null) { // skip if shutting down
+ int n = ws.length, m = n - 1;
+ int r = (s << 1) | 1; // use odd-numbered indices
+ if (ws[r &= m] != null) { // collision
+ int probes = 0; // step by approx half size
+ int step = (n <= 4) ? 2 : ((n >>> 1) & EVENMASK) + 2;
+ while (ws[r = (r + step) & m] != null) {
+ if (++probes >= n) {
+ workQueues = ws = Arrays.copyOf(ws, n <<= 1);
+ m = n - 1;
+ probes = 0;
+ }
+ }
+ }
+ w.eventCount = w.poolIndex = r; // volatile write orders
+ ws[r] = w;
+ }
+ } finally {
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ wt.setName(workerNamePrefix.concat(Integer.toString(w.poolIndex)));
+ return w;
+ }
+
+ /**
+ * Final callback from terminating worker, as well as upon failure
+ * to construct or start a worker. Removes record of worker from
+ * array, and adjusts counts. If pool is shutting down, tries to
+ * complete termination.
+ *
+ * @param wt the worker thread or null if construction failed
+ * @param ex the exception causing failure, or null if none
+ */
+ final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
+ WorkQueue w = null;
+ if (wt != null && (w = wt.workQueue) != null) {
+ int ps;
+ w.qlock = -1; // ensure set
+ long ns = w.nsteals, sc; // collect steal count
+ do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
+ sc = stealCount, sc + ns));
+ if (((ps = plock) & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
+ try {
+ int idx = w.poolIndex;
+ WorkQueue[] ws = workQueues;
+ if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
+ ws[idx] = null;
+ } finally {
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ }
+
+ long c; // adjust ctl counts
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
+ ((c - TC_UNIT) & TC_MASK) |
+ (c & ~(AC_MASK|TC_MASK)))));
+
+ if (!tryTerminate(false, false) && w != null && w.array != null) {
+ w.cancelAll(); // cancel remaining tasks
+ WorkQueue[] ws; WorkQueue v; Thread p; int u, i, e;
+ while ((u = (int)((c = ctl) >>> 32)) < 0 && (e = (int)c) >= 0) {
+ if (e > 0) { // activate or create replacement
+ if ((ws = workQueues) == null ||
+ (i = e & SMASK) >= ws.length ||
+ (v = ws[i]) == null)
+ break;
+ long nc = (((long)(v.nextWait & E_MASK)) |
+ ((long)(u + UAC_UNIT) << 32));
+ if (v.eventCount != (e | INT_SIGN))
+ break;
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ v.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = v.parker) != null)
+ U.unpark(p);
+ break;
+ }
+ }
+ else {
+ if ((short)u < 0)
+ tryAddWorker();
+ break;
+ }
+ }
+ }
+ if (ex == null) // help clean refs on way out
+ ForkJoinTask.helpExpungeStaleExceptions();
+ else // rethrow
+ ForkJoinTask.rethrow(ex);
+ }
+
+ // Submissions
+
+ /**
+ * Unless shutting down, adds the given task to a submission queue
+ * at submitter's current queue index (modulo submission
+ * range). Only the most common path is directly handled in this
+ * method. All others are relayed to fullExternalPush.
+ *
+ * @param task the task. Caller must ensure non-null.
+ */
+ final void externalPush(ForkJoinTask<?> task) {
+ WorkQueue[] ws; WorkQueue q; Submitter z; int m; ForkJoinTask<?>[] a;
+ if ((z = submitters.get()) != null && plock > 0 &&
+ (ws = workQueues) != null && (m = (ws.length - 1)) >= 0 &&
+ (q = ws[m & z.seed & SQMASK]) != null &&
+ U.compareAndSwapInt(q, QLOCK, 0, 1)) { // lock
+ int b = q.base, s = q.top, n, an;
+ if ((a = q.array) != null && (an = a.length) > (n = s + 1 - b)) {
+ int j = (((an - 1) & s) << ASHIFT) + ABASE;
+ U.putOrderedObject(a, j, task);
+ q.top = s + 1; // push on to deque
+ q.qlock = 0;
+ if (n <= 2)
+ signalWork(q);
+ return;
+ }
+ q.qlock = 0;
+ }
+ fullExternalPush(task);
+ }
+
+ /**
+ * Full version of externalPush. This method is called, among
+ * other times, upon the first submission of the first task to the
+ * pool, so must perform secondary initialization. It also
+ * detects first submission by an external thread by looking up
+ * its ThreadLocal, and creates a new shared queue if the one at
+ * index if empty or contended. The plock lock body must be
+ * exception-free (so no try/finally) so we optimistically
+ * allocate new queues outside the lock and throw them away if
+ * (very rarely) not needed.
+ *
+ * Secondary initialization occurs when plock is zero, to create
+ * workQueue array and set plock to a valid value. This lock body
+ * must also be exception-free. Because the plock seq value can
+ * eventually wrap around zero, this method harmlessly fails to
+ * reinitialize if workQueues exists, while still advancing plock.
+ */
+ private void fullExternalPush(ForkJoinTask<?> task) {
+ int r = 0; // random index seed
+ for (Submitter z = submitters.get();;) {
+ WorkQueue[] ws; WorkQueue q; int ps, m, k;
+ if (z == null) {
+ if (U.compareAndSwapInt(this, INDEXSEED, r = indexSeed,
+ r += SEED_INCREMENT) && r != 0)
+ submitters.set(z = new Submitter(r));
+ }
+ else if (r == 0) { // move to a different index
+ r = z.seed;
+ r ^= r << 13; // same xorshift as WorkQueues
+ r ^= r >>> 17;
+ z.seed = r ^ (r << 5);
+ }
+ else if ((ps = plock) < 0)
+ throw new RejectedExecutionException();
+ else if (ps == 0 || (ws = workQueues) == null ||
+ (m = ws.length - 1) < 0) { // initialize workQueues
+ int p = config & SMASK; // find power of two table size
+ int n = (p > 1) ? p - 1 : 1; // ensure at least 2 slots
+ n |= n >>> 1; n |= n >>> 2; n |= n >>> 4;
+ n |= n >>> 8; n |= n >>> 16; n = (n + 1) << 1;
+ WorkQueue[] nws = ((ws = workQueues) == null || ws.length == 0 ?
+ new WorkQueue[n] : null);
+ if (((ps = plock) & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ if (((ws = workQueues) == null || ws.length == 0) && nws != null)
+ workQueues = nws;
+ int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ else if ((q = ws[k = r & m & SQMASK]) != null) {
+ if (q.qlock == 0 && U.compareAndSwapInt(q, QLOCK, 0, 1)) {
+ ForkJoinTask<?>[] a = q.array;
+ int s = q.top;
+ boolean submitted = false;
+ try { // locked version of push
+ if ((a != null && a.length > s + 1 - q.base) ||
+ (a = q.growArray()) != null) { // must presize
+ int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
+ U.putOrderedObject(a, j, task);
+ q.top = s + 1;
+ submitted = true;
+ }
+ } finally {
+ q.qlock = 0; // unlock
+ }
+ if (submitted) {
+ signalWork(q);
+ return;
+ }
+ }
+ r = 0; // move on failure
+ }
+ else if (((ps = plock) & PL_LOCK) == 0) { // create new queue
+ q = new WorkQueue(this, null, SHARED_QUEUE, r);
+ if (((ps = plock) & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ if ((ws = workQueues) != null && k < ws.length && ws[k] == null)
+ ws[k] = q;
+ int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ else
+ r = 0; // try elsewhere while lock held
+ }
+ }
+
+ // Maintaining ctl counts
+
+ /**
+ * Increments active count; mainly called upon return from blocking.
+ */
+ final void incrementActiveCount() {
+ long c;
+ do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT));
+ }
+
+ /**
+ * Tries to create or activate a worker if too few are active.
+ *
+ * @param q the (non-null) queue holding tasks to be signalled
+ */
+ final void signalWork(WorkQueue q) {
+ int hint = q.poolIndex;
+ long c; int e, u, i, n; WorkQueue[] ws; WorkQueue w; Thread p;
+ while ((u = (int)((c = ctl) >>> 32)) < 0) {
+ if ((e = (int)c) > 0) {
+ if ((ws = workQueues) != null && ws.length > (i = e & SMASK) &&
+ (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
+ long nc = (((long)(w.nextWait & E_MASK)) |
+ ((long)(u + UAC_UNIT) << 32));
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ w.hint = hint;
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ break;
+ }
+ if (q.top - q.base <= 0)
+ break;
+ }
+ else
+ break;
+ }
+ else {
+ if ((short)u < 0)
+ tryAddWorker();
+ break;
+ }
+ }
+ }
+
+ // Scanning for tasks
+
+ /**
+ * Top-level runloop for workers, called by ForkJoinWorkerThread.run.
+ */
+ final void runWorker(WorkQueue w) {
+ w.growArray(); // allocate queue
+ do { w.runTask(scan(w)); } while (w.qlock >= 0);
+ }
+
+ /**
+ * Scans for and, if found, returns one task, else possibly
+ * inactivates the worker. This method operates on single reads of
+ * volatile state and is designed to be re-invoked continuously,
+ * in part because it returns upon detecting inconsistencies,
+ * contention, or state changes that indicate possible success on
+ * re-invocation.
+ *
+ * The scan searches for tasks across queues (starting at a random
+ * index, and relying on registerWorker to irregularly scatter
+ * them within array to avoid bias), checking each at least twice.
+ * The scan terminates upon either finding a non-empty queue, or
+ * completing the sweep. If the worker is not inactivated, it
+ * takes and returns a task from this queue. Otherwise, if not
+ * activated, it signals workers (that may include itself) and
+ * returns so caller can retry. Also returns for true if the
+ * worker array may have changed during an empty scan. On failure
+ * to find a task, we take one of the following actions, after
+ * which the caller will retry calling this method unless
+ * terminated.
+ *
+ * * If pool is terminating, terminate the worker.
+ *
+ * * If not already enqueued, try to inactivate and enqueue the
+ * worker on wait queue. Or, if inactivating has caused the pool
+ * to be quiescent, relay to idleAwaitWork to possibly shrink
+ * pool.
+ *
+ * * If already enqueued and none of the above apply, possibly
+ * park awaiting signal, else lingering to help scan and signal.
+ *
+ * * If a non-empty queue discovered or left as a hint,
+ * help wake up other workers before return.
+ *
+ * @param w the worker (via its WorkQueue)
+ * @return a task or null if none found
+ */
+ private final ForkJoinTask<?> scan(WorkQueue w) {
+ WorkQueue[] ws; int m;
+ int ps = plock; // read plock before ws
+ if (w != null && (ws = workQueues) != null && (m = ws.length - 1) >= 0) {
+ int ec = w.eventCount; // ec is negative if inactive
+ int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
+ w.hint = -1; // update seed and clear hint
+ int j = ((m + m + 1) | MIN_SCAN) & MAX_SCAN;
+ do {
+ WorkQueue q; ForkJoinTask<?>[] a; int b;
+ if ((q = ws[(r + j) & m]) != null && (b = q.base) - q.top < 0 &&
+ (a = q.array) != null) { // probably nonempty
+ int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ ForkJoinTask<?> t = (ForkJoinTask<?>)
+ U.getObjectVolatile(a, i);
+ if (q.base == b && ec >= 0 && t != null &&
+ U.compareAndSwapObject(a, i, t, null)) {
+ if ((q.base = b + 1) - q.top < 0)
+ signalWork(q);
+ return t; // taken
+ }
+ else if ((ec < 0 || j < m) && (int)(ctl >> AC_SHIFT) <= 0) {
+ w.hint = (r + j) & m; // help signal below
+ break; // cannot take
+ }
+ }
+ } while (--j >= 0);
+
+ int h, e, ns; long c, sc; WorkQueue q;
+ if ((ns = w.nsteals) != 0) {
+ if (U.compareAndSwapLong(this, STEALCOUNT,
+ sc = stealCount, sc + ns))
+ w.nsteals = 0; // collect steals and rescan
+ }
+ else if (plock != ps) // consistency check
+ ; // skip
+ else if ((e = (int)(c = ctl)) < 0)
+ w.qlock = -1; // pool is terminating
+ else {
+ if ((h = w.hint) < 0) {
+ if (ec >= 0) { // try to enqueue/inactivate
+ long nc = (((long)ec |
+ ((c - AC_UNIT) & (AC_MASK|TC_MASK))));
+ w.nextWait = e; // link and mark inactive
+ w.eventCount = ec | INT_SIGN;
+ if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc))
+ w.eventCount = ec; // unmark on CAS failure
+ else if ((int)(c >> AC_SHIFT) == 1 - (config & SMASK))
+ idleAwaitWork(w, nc, c);
+ }
+ else if (w.eventCount < 0 && ctl == c) {
+ Thread wt = Thread.currentThread();
+ Thread.interrupted(); // clear status
+ U.putObject(wt, PARKBLOCKER, this);
+ w.parker = wt; // emulate LockSupport.park
+ if (w.eventCount < 0) // recheck
+ U.park(false, 0L); // block
+ w.parker = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ }
+ }
+ if ((h >= 0 || (h = w.hint) >= 0) &&
+ (ws = workQueues) != null && h < ws.length &&
+ (q = ws[h]) != null) { // signal others before retry
+ WorkQueue v; Thread p; int u, i, s;
+ for (int n = (config & SMASK) - 1;;) {
+ int idleCount = (w.eventCount < 0) ? 0 : -1;
+ if (((s = idleCount - q.base + q.top) <= n &&
+ (n = s) <= 0) ||
+ (u = (int)((c = ctl) >>> 32)) >= 0 ||
+ (e = (int)c) <= 0 || m < (i = e & SMASK) ||
+ (v = ws[i]) == null)
+ break;
+ long nc = (((long)(v.nextWait & E_MASK)) |
+ ((long)(u + UAC_UNIT) << 32));
+ if (v.eventCount != (e | INT_SIGN) ||
+ !U.compareAndSwapLong(this, CTL, c, nc))
+ break;
+ v.hint = h;
+ v.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = v.parker) != null)
+ U.unpark(p);
+ if (--n <= 0)
+ break;
+ }
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * If inactivating worker w has caused the pool to become
+ * quiescent, checks for pool termination, and, so long as this is
+ * not the only worker, waits for event for up to a given
+ * duration. On timeout, if ctl has not changed, terminates the
+ * worker, which will in turn wake up another worker to possibly
+ * repeat this process.
+ *
+ * @param w the calling worker
+ * @param currentCtl the ctl value triggering possible quiescence
+ * @param prevCtl the ctl value to restore if thread is terminated
+ */
+ private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) {
+ if (w != null && w.eventCount < 0 &&
+ !tryTerminate(false, false) && (int)prevCtl != 0 &&
+ ctl == currentCtl) {
+ int dc = -(short)(currentCtl >>> TC_SHIFT);
+ long parkTime = dc < 0 ? FAST_IDLE_TIMEOUT: (dc + 1) * IDLE_TIMEOUT;
+ long deadline = System.nanoTime() + parkTime - TIMEOUT_SLOP;
+ Thread wt = Thread.currentThread();
+ while (ctl == currentCtl) {
+ Thread.interrupted(); // timed variant of version in scan()
+ U.putObject(wt, PARKBLOCKER, this);
+ w.parker = wt;
+ if (ctl == currentCtl)
+ U.park(false, parkTime);
+ w.parker = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ if (ctl != currentCtl)
+ break;
+ if (deadline - System.nanoTime() <= 0L &&
+ U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) {
+ w.eventCount = (w.eventCount + E_SEQ) | E_MASK;
+ w.hint = -1;
+ w.qlock = -1; // shrink
+ break;
+ }
+ }
+ }
+ }
+
+ /**
+ * Scans through queues looking for work while joining a task; if
+ * any present, signals. May return early if more signalling is
+ * detectably unneeded.
+ *
+ * @param task return early if done
+ * @param origin an index to start scan
+ */
+ private void helpSignal(ForkJoinTask<?> task, int origin) {
+ WorkQueue[] ws; WorkQueue w; Thread p; long c; int m, u, e, i, s;
+ if (task != null && task.status >= 0 &&
+ (u = (int)(ctl >>> 32)) < 0 && (u >> UAC_SHIFT) < 0 &&
+ (ws = workQueues) != null && (m = ws.length - 1) >= 0) {
+ outer: for (int k = origin, j = m; j >= 0; --j) {
+ WorkQueue q = ws[k++ & m];
+ for (int n = m;;) { // limit to at most m signals
+ if (task.status < 0)
+ break outer;
+ if (q == null ||
+ ((s = -q.base + q.top) <= n && (n = s) <= 0))
+ break;
+ if ((u = (int)((c = ctl) >>> 32)) >= 0 ||
+ (e = (int)c) <= 0 || m < (i = e & SMASK) ||
+ (w = ws[i]) == null)
+ break outer;
+ long nc = (((long)(w.nextWait & E_MASK)) |
+ ((long)(u + UAC_UNIT) << 32));
+ if (w.eventCount != (e | INT_SIGN))
+ break outer;
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ if (--n <= 0)
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Tries to locate and execute tasks for a stealer of the given
+ * task, or in turn one of its stealers, Traces currentSteal ->
+ * currentJoin links looking for a thread working on a descendant
+ * of the given task and with a non-empty queue to steal back and
+ * execute tasks from. The first call to this method upon a
+ * waiting join will often entail scanning/search, (which is OK
+ * because the joiner has nothing better to do), but this method
+ * leaves hints in workers to speed up subsequent calls. The
+ * implementation is very branchy to cope with potential
+ * inconsistencies or loops encountering chains that are stale,
+ * unknown, or so long that they are likely cyclic.
+ *
+ * @param joiner the joining worker
+ * @param task the task to join
+ * @return 0 if no progress can be made, negative if task
+ * known complete, else positive
+ */
+ private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
+ int stat = 0, steps = 0; // bound to avoid cycles
+ if (joiner != null && task != null) { // hoist null checks
+ restart: for (;;) {
+ ForkJoinTask<?> subtask = task; // current target
+ for (WorkQueue j = joiner, v;;) { // v is stealer of subtask
+ WorkQueue[] ws; int m, s, h;
+ if ((s = task.status) < 0) {
+ stat = s;
+ break restart;
+ }
+ if ((ws = workQueues) == null || (m = ws.length - 1) <= 0)
+ break restart; // shutting down
+ if ((v = ws[h = (j.hint | 1) & m]) == null ||
+ v.currentSteal != subtask) {
+ for (int origin = h;;) { // find stealer
+ if (((h = (h + 2) & m) & 15) == 1 &&
+ (subtask.status < 0 || j.currentJoin != subtask))
+ continue restart; // occasional staleness check
+ if ((v = ws[h]) != null &&
+ v.currentSteal == subtask) {
+ j.hint = h; // save hint
+ break;
+ }
+ if (h == origin)
+ break restart; // cannot find stealer
+ }
+ }
+ for (;;) { // help stealer or descend to its stealer
+ ForkJoinTask[] a; int b;
+ if (subtask.status < 0) // surround probes with
+ continue restart; // consistency checks
+ if ((b = v.base) - v.top < 0 && (a = v.array) != null) {
+ int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ ForkJoinTask<?> t =
+ (ForkJoinTask<?>)U.getObjectVolatile(a, i);
+ if (subtask.status < 0 || j.currentJoin != subtask ||
+ v.currentSteal != subtask)
+ continue restart; // stale
+ stat = 1; // apparent progress
+ if (t != null && v.base == b &&
+ U.compareAndSwapObject(a, i, t, null)) {
+ v.base = b + 1; // help stealer
+ joiner.runSubtask(t);
+ }
+ else if (v.base == b && ++steps == MAX_HELP)
+ break restart; // v apparently stalled
+ }
+ else { // empty -- try to descend
+ ForkJoinTask<?> next = v.currentJoin;
+ if (subtask.status < 0 || j.currentJoin != subtask ||
+ v.currentSteal != subtask)
+ continue restart; // stale
+ else if (next == null || ++steps == MAX_HELP)
+ break restart; // dead-end or maybe cyclic
+ else {
+ subtask = next;
+ j = v;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ return stat;
+ }
+
+ /**
+ * Analog of tryHelpStealer for CountedCompleters. Tries to steal
+ * and run tasks within the target's computation.
+ *
+ * @param task the task to join
+ * @param mode if shared, exit upon completing any task
+ * if all workers are active
+ */
+ private int helpComplete(ForkJoinTask<?> task, int mode) {
+ WorkQueue[] ws; WorkQueue q; int m, n, s, u;
+ if (task != null && (ws = workQueues) != null &&
+ (m = ws.length - 1) >= 0) {
+ for (int j = 1, origin = j;;) {
+ if ((s = task.status) < 0)
+ return s;
+ if ((q = ws[j & m]) != null && q.pollAndExecCC(task)) {
+ origin = j;
+ if (mode == SHARED_QUEUE &&
+ ((u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0))
+ break;
+ }
+ else if ((j = (j + 2) & m) == origin)
+ break;
+ }
+ }
+ return 0;
+ }
+
+ /**
+ * Tries to decrement active count (sometimes implicitly) and
+ * possibly release or create a compensating worker in preparation
+ * for blocking. Fails on contention or termination. Otherwise,
+ * adds a new thread if no idle workers are available and pool
+ * may become starved.
+ */
+ final boolean tryCompensate() {
+ int pc = config & SMASK, e, i, tc; long c;
+ WorkQueue[] ws; WorkQueue w; Thread p;
+ if ((ws = workQueues) != null && (e = (int)(c = ctl)) >= 0) {
+ if (e != 0 && (i = e & SMASK) < ws.length &&
+ (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
+ long nc = ((long)(w.nextWait & E_MASK) |
+ (c & (AC_MASK|TC_MASK)));
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ return true; // replace with idle worker
+ }
+ }
+ else if ((tc = (short)(c >>> TC_SHIFT)) >= 0 &&
+ (int)(c >> AC_SHIFT) + pc > 1) {
+ long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
+ if (U.compareAndSwapLong(this, CTL, c, nc))
+ return true; // no compensation
+ }
+ else if (tc + pc < MAX_CAP) {
+ long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ ForkJoinWorkerThreadFactory fac;
+ Throwable ex = null;
+ ForkJoinWorkerThread wt = null;
+ try {
+ if ((fac = factory) != null &&
+ (wt = fac.newThread(this)) != null) {
+ wt.start();
+ return true;
+ }
+ } catch (Throwable rex) {
+ ex = rex;
+ }
+ deregisterWorker(wt, ex); // clean up and return false
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Helps and/or blocks until the given task is done.
+ *
+ * @param joiner the joining worker
+ * @param task the task
+ * @return task status on exit
+ */
+ final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
+ int s = 0;
+ if (joiner != null && task != null && (s = task.status) >= 0) {
+ ForkJoinTask<?> prevJoin = joiner.currentJoin;
+ joiner.currentJoin = task;
+ do {} while ((s = task.status) >= 0 && !joiner.isEmpty() &&
+ joiner.tryRemoveAndExec(task)); // process local tasks
+ if (s >= 0 && (s = task.status) >= 0) {
+ helpSignal(task, joiner.poolIndex);
+ if ((s = task.status) >= 0 &&
+ (task instanceof CountedCompleter))
+ s = helpComplete(task, LIFO_QUEUE);
+ }
+ while (s >= 0 && (s = task.status) >= 0) {
+ if ((!joiner.isEmpty() || // try helping
+ (s = tryHelpStealer(joiner, task)) == 0) &&
+ (s = task.status) >= 0) {
+ helpSignal(task, joiner.poolIndex);
+ if ((s = task.status) >= 0 && tryCompensate()) {
+ if (task.trySetSignal() && (s = task.status) >= 0) {
+ synchronized (task) {
+ if (task.status >= 0) {
+ try { // see ForkJoinTask
+ task.wait(); // for explanation
+ } catch (InterruptedException ie) {
+ }
+ }
+ else
+ task.notifyAll();
+ }
+ }
+ long c; // re-activate
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c + AC_UNIT));
+ }
+ }
+ }
+ joiner.currentJoin = prevJoin;
+ }
+ return s;
+ }
+
+ /**
+ * Stripped-down variant of awaitJoin used by timed joins. Tries
+ * to help join only while there is continuous progress. (Caller
+ * will then enter a timed wait.)
+ *
+ * @param joiner the joining worker
+ * @param task the task
+ */
+ final void helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
+ int s;
+ if (joiner != null && task != null && (s = task.status) >= 0) {
+ ForkJoinTask<?> prevJoin = joiner.currentJoin;
+ joiner.currentJoin = task;
+ do {} while ((s = task.status) >= 0 && !joiner.isEmpty() &&
+ joiner.tryRemoveAndExec(task));
+ if (s >= 0 && (s = task.status) >= 0) {
+ helpSignal(task, joiner.poolIndex);
+ if ((s = task.status) >= 0 &&
+ (task instanceof CountedCompleter))
+ s = helpComplete(task, LIFO_QUEUE);
+ }
+ if (s >= 0 && joiner.isEmpty()) {
+ do {} while (task.status >= 0 &&
+ tryHelpStealer(joiner, task) > 0);
+ }
+ joiner.currentJoin = prevJoin;
+ }
+ }
+
+ /**
+ * Returns a (probably) non-empty steal queue, if one is found
+ * during a scan, else null. This method must be retried by
+ * caller if, by the time it tries to use the queue, it is empty.
+ * @param r a (random) seed for scanning
+ */
+ private WorkQueue findNonEmptyStealQueue(int r) {
+ for (;;) {
+ int ps = plock, m; WorkQueue[] ws; WorkQueue q;
+ if ((ws = workQueues) != null && (m = ws.length - 1) >= 0) {
+ for (int j = (m + 1) << 2; j >= 0; --j) {
+ if ((q = ws[(((r + j) << 1) | 1) & m]) != null &&
+ q.base - q.top < 0)
+ return q;
+ }
+ }
+ if (plock == ps)
+ return null;
+ }
+ }
+
+ /**
+ * Runs tasks until {@code isQuiescent()}. We piggyback on
+ * active count ctl maintenance, but rather than blocking
+ * when tasks cannot be found, we rescan until all others cannot
+ * find tasks either.
+ */
+ final void helpQuiescePool(WorkQueue w) {
+ for (boolean active = true;;) {
+ long c; WorkQueue q; ForkJoinTask<?> t; int b;
+ while ((t = w.nextLocalTask()) != null) {
+ if (w.base - w.top < 0)
+ signalWork(w);
+ t.doExec();
+ }
+ if ((q = findNonEmptyStealQueue(w.nextSeed())) != null) {
+ if (!active) { // re-establish active count
+ active = true;
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c + AC_UNIT));
+ }
+ if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
+ if (q.base - q.top < 0)
+ signalWork(q);
+ w.runSubtask(t);
+ }
+ }
+ else if (active) { // decrement active count without queuing
+ long nc = (c = ctl) - AC_UNIT;
+ if ((int)(nc >> AC_SHIFT) + (config & SMASK) == 0)
+ return; // bypass decrement-then-increment
+ if (U.compareAndSwapLong(this, CTL, c, nc))
+ active = false;
+ }
+ else if ((int)((c = ctl) >> AC_SHIFT) + (config & SMASK) == 0 &&
+ U.compareAndSwapLong(this, CTL, c, c + AC_UNIT))
+ return;
+ }
+ }
+
+ /**
+ * Gets and removes a local or stolen task for the given worker.
+ *
+ * @return a task, if available
+ */
+ final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
+ for (ForkJoinTask<?> t;;) {
+ WorkQueue q; int b;
+ if ((t = w.nextLocalTask()) != null)
+ return t;
+ if ((q = findNonEmptyStealQueue(w.nextSeed())) == null)
+ return null;
+ if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
+ if (q.base - q.top < 0)
+ signalWork(q);
+ return t;
+ }
+ }
+ }
+
+ /**
+ * Returns a cheap heuristic guide for task partitioning when
+ * programmers, frameworks, tools, or languages have little or no
+ * idea about task granularity. In essence by offering this
+ * method, we ask users only about tradeoffs in overhead vs
+ * expected throughput and its variance, rather than how finely to
+ * partition tasks.
+ *
+ * In a steady state strict (tree-structured) computation, each
+ * thread makes available for stealing enough tasks for other
+ * threads to remain active. Inductively, if all threads play by
+ * the same rules, each thread should make available only a
+ * constant number of tasks.
+ *
+ * The minimum useful constant is just 1. But using a value of 1
+ * would require immediate replenishment upon each steal to
+ * maintain enough tasks, which is infeasible. Further,
+ * partitionings/granularities of offered tasks should minimize
+ * steal rates, which in general means that threads nearer the top
+ * of computation tree should generate more than those nearer the
+ * bottom. In perfect steady state, each thread is at
+ * approximately the same level of computation tree. However,
+ * producing extra tasks amortizes the uncertainty of progress and
+ * diffusion assumptions.
+ *
+ * So, users will want to use values larger (but not much larger)
+ * than 1 to both smooth over transient shortages and hedge
+ * against uneven progress; as traded off against the cost of
+ * extra task overhead. We leave the user to pick a threshold
+ * value to compare with the results of this call to guide
+ * decisions, but recommend values such as 3.
+ *
+ * When all threads are active, it is on average OK to estimate
+ * surplus strictly locally. In steady-state, if one thread is
+ * maintaining say 2 surplus tasks, then so are others. So we can
+ * just use estimated queue length. However, this strategy alone
+ * leads to serious mis-estimates in some non-steady-state
+ * conditions (ramp-up, ramp-down, other stalls). We can detect
+ * many of these by further considering the number of "idle"
+ * threads, that are known to have zero queued tasks, so
+ * compensate by a factor of (#idle/#active) threads.
+ *
+ * Note: The approximation of #busy workers as #active workers is
+ * not very good under current signalling scheme, and should be
+ * improved.
+ */
+ static int getSurplusQueuedTaskCount() {
+ Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
+ if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
+ int p = (pool = (wt = (ForkJoinWorkerThread)t).pool).config & SMASK;
+ int n = (q = wt.workQueue).top - q.base;
+ int a = (int)(pool.ctl >> AC_SHIFT) + p;
+ return n - (a > (p >>>= 1) ? 0 :
+ a > (p >>>= 1) ? 1 :
+ a > (p >>>= 1) ? 2 :
+ a > (p >>>= 1) ? 4 :
+ 8);
+ }
+ return 0;
+ }
+
+ // Termination
+
+ /**
+ * Possibly initiates and/or completes termination. The caller
+ * triggering termination runs three passes through workQueues:
+ * (0) Setting termination status, followed by wakeups of queued
+ * workers; (1) cancelling all tasks; (2) interrupting lagging
+ * threads (likely in external tasks, but possibly also blocked in
+ * joins). Each pass repeats previous steps because of potential
+ * lagging thread creation.
+ *
+ * @param now if true, unconditionally terminate, else only
+ * if no work and no active workers
+ * @param enable if true, enable shutdown when next possible
+ * @return true if now terminating or terminated
+ */
+ private boolean tryTerminate(boolean now, boolean enable) {
+ int ps;
+ if (this == common) // cannot shut down
+ return false;
+ if ((ps = plock) >= 0) { // enable by setting plock
+ if (!enable)
+ return false;
+ if ((ps & PL_LOCK) != 0 ||
+ !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
+ ps = acquirePlock();
+ int nps = ((ps + PL_LOCK) & ~SHUTDOWN) | SHUTDOWN;
+ if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
+ releasePlock(nps);
+ }
+ for (long c;;) {
+ if (((c = ctl) & STOP_BIT) != 0) { // already terminating
+ if ((short)(c >>> TC_SHIFT) == -(config & SMASK)) {
+ synchronized (this) {
+ notifyAll(); // signal when 0 workers
+ }
+ }
+ return true;
+ }
+ if (!now) { // check if idle & no tasks
+ WorkQueue[] ws; WorkQueue w;
+ if ((int)(c >> AC_SHIFT) != -(config & SMASK))
+ return false;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; ++i) {
+ if ((w = ws[i]) != null) {
+ if (!w.isEmpty()) { // signal unprocessed tasks
+ signalWork(w);
+ return false;
+ }
+ if ((i & 1) != 0 && w.eventCount >= 0)
+ return false; // unqueued inactive worker
+ }
+ }
+ }
+ }
+ if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
+ for (int pass = 0; pass < 3; ++pass) {
+ WorkQueue[] ws; WorkQueue w; Thread wt;
+ if ((ws = workQueues) != null) {
+ int n = ws.length;
+ for (int i = 0; i < n; ++i) {
+ if ((w = ws[i]) != null) {
+ w.qlock = -1;
+ if (pass > 0) {
+ w.cancelAll();
+ if (pass > 1 && (wt = w.owner) != null) {
+ if (!wt.isInterrupted()) {
+ try {
+ wt.interrupt();
+ } catch (Throwable ignore) {
+ }
+ }
+ U.unpark(wt);
+ }
+ }
+ }
+ }
+ // Wake up workers parked on event queue
+ int i, e; long cc; Thread p;
+ while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
+ (i = e & SMASK) < n && i >= 0 &&
+ (w = ws[i]) != null) {
+ long nc = ((long)(w.nextWait & E_MASK) |
+ ((cc + AC_UNIT) & AC_MASK) |
+ (cc & (TC_MASK|STOP_BIT)));
+ if (w.eventCount == (e | INT_SIGN) &&
+ U.compareAndSwapLong(this, CTL, cc, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ w.qlock = -1;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // external operations on common pool
+
+ /**
+ * Returns common pool queue for a thread that has submitted at
+ * least one task.
+ */
+ static WorkQueue commonSubmitterQueue() {
+ ForkJoinPool p; WorkQueue[] ws; int m; Submitter z;
+ return ((z = submitters.get()) != null &&
+ (p = common) != null &&
+ (ws = p.workQueues) != null &&
+ (m = ws.length - 1) >= 0) ?
+ ws[m & z.seed & SQMASK] : null;
+ }
+
+ /**
+ * Tries to pop the given task from submitter's queue in common pool.
+ */
+ static boolean tryExternalUnpush(ForkJoinTask<?> t) {
+ ForkJoinPool p; WorkQueue[] ws; WorkQueue q; Submitter z;
+ ForkJoinTask<?>[] a; int m, s;
+ if (t != null &&
+ (z = submitters.get()) != null &&
+ (p = common) != null &&
+ (ws = p.workQueues) != null &&
+ (m = ws.length - 1) >= 0 &&
+ (q = ws[m & z.seed & SQMASK]) != null &&
+ (s = q.top) != q.base &&
+ (a = q.array) != null) {
+ long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
+ if (U.getObject(a, j) == t &&
+ U.compareAndSwapInt(q, QLOCK, 0, 1)) {
+ if (q.array == a && q.top == s && // recheck
+ U.compareAndSwapObject(a, j, t, null)) {
+ q.top = s - 1;
+ q.qlock = 0;
+ return true;
+ }
+ q.qlock = 0;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Tries to pop and run local tasks within the same computation
+ * as the given root. On failure, tries to help complete from
+ * other queues via helpComplete.
+ */
+ private void externalHelpComplete(WorkQueue q, ForkJoinTask<?> root) {
+ ForkJoinTask<?>[] a; int m;
+ if (q != null && (a = q.array) != null && (m = (a.length - 1)) >= 0 &&
+ root != null && root.status >= 0) {
+ for (;;) {
+ int s, u; Object o; CountedCompleter<?> task = null;
+ if ((s = q.top) - q.base > 0) {
+ long j = ((m & (s - 1)) << ASHIFT) + ABASE;
+ if ((o = U.getObject(a, j)) != null &&
+ (o instanceof CountedCompleter)) {
+ CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;
+ do {
+ if (r == root) {
+ if (U.compareAndSwapInt(q, QLOCK, 0, 1)) {
+ if (q.array == a && q.top == s &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ q.top = s - 1;
+ task = t;
+ }
+ q.qlock = 0;
+ }
+ break;
+ }
+ } while ((r = r.completer) != null);
+ }
+ }
+ if (task != null)
+ task.doExec();
+ if (root.status < 0 ||
+ (u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0)
+ break;
+ if (task == null) {
+ helpSignal(root, q.poolIndex);
+ if (root.status >= 0)
+ helpComplete(root, SHARED_QUEUE);
+ break;
+ }
+ }
+ }
+ }
+
+ /**
+ * Tries to help execute or signal availability of the given task
+ * from submitter's queue in common pool.
+ */
+ static void externalHelpJoin(ForkJoinTask<?> t) {
+ // Some hard-to-avoid overlap with tryExternalUnpush
+ ForkJoinPool p; WorkQueue[] ws; WorkQueue q, w; Submitter z;
+ ForkJoinTask<?>[] a; int m, s, n;
+ if (t != null &&
+ (z = submitters.get()) != null &&
+ (p = common) != null &&
+ (ws = p.workQueues) != null &&
+ (m = ws.length - 1) >= 0 &&
+ (q = ws[m & z.seed & SQMASK]) != null &&
+ (a = q.array) != null) {
+ int am = a.length - 1;
+ if ((s = q.top) != q.base) {
+ long j = ((am & (s - 1)) << ASHIFT) + ABASE;
+ if (U.getObject(a, j) == t &&
+ U.compareAndSwapInt(q, QLOCK, 0, 1)) {
+ if (q.array == a && q.top == s &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ q.top = s - 1;
+ q.qlock = 0;
+ t.doExec();
+ }
+ else
+ q.qlock = 0;
+ }
+ }
+ if (t.status >= 0) {
+ if (t instanceof CountedCompleter)
+ p.externalHelpComplete(q, t);
+ else
+ p.helpSignal(t, q.poolIndex);
+ }
+ }
+ }
+
+ // Exported methods
+
+ // Constructors
+
+ /**
+ * Creates a {@code ForkJoinPool} with parallelism equal to {@link
+ * java.lang.Runtime#availableProcessors}, using the {@linkplain
+ * #defaultForkJoinWorkerThreadFactory default thread factory},
+ * no UncaughtExceptionHandler, and non-async LIFO processing mode.
+ *
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool() {
+ this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()),
+ defaultForkJoinWorkerThreadFactory, null, false);
+ }
+
+ /**
+ * Creates a {@code ForkJoinPool} with the indicated parallelism
+ * level, the {@linkplain
+ * #defaultForkJoinWorkerThreadFactory default thread factory},
+ * no UncaughtExceptionHandler, and non-async LIFO processing mode.
+ *
+ * @param parallelism the parallelism level
+ * @throws IllegalArgumentException if parallelism less than or
+ * equal to zero, or greater than implementation limit
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool(int parallelism) {
+ this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
+ }
+
+ /**
+ * Creates a {@code ForkJoinPool} with the given parameters.
+ *
+ * @param parallelism the parallelism level. For default value,
+ * use {@link java.lang.Runtime#availableProcessors}.
+ * @param factory the factory for creating new threads. For default value,
+ * use {@link #defaultForkJoinWorkerThreadFactory}.
+ * @param handler the handler for internal worker threads that
+ * terminate due to unrecoverable errors encountered while executing
+ * tasks. For default value, use {@code null}.
+ * @param asyncMode if true,
+ * establishes local first-in-first-out scheduling mode for forked
+ * tasks that are never joined. This mode may be more appropriate
+ * than default locally stack-based mode in applications in which
+ * worker threads only process event-style asynchronous tasks.
+ * For default value, use {@code false}.
+ * @throws IllegalArgumentException if parallelism less than or
+ * equal to zero, or greater than implementation limit
+ * @throws NullPointerException if the factory is null
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool(int parallelism,
+ ForkJoinWorkerThreadFactory factory,
+ Thread.UncaughtExceptionHandler handler,
+ boolean asyncMode) {
+ checkPermission();
+ if (factory == null)
+ throw new NullPointerException();
+ if (parallelism <= 0 || parallelism > MAX_CAP)
+ throw new IllegalArgumentException();
+ this.factory = factory;
+ this.ueh = handler;
+ this.config = parallelism | (asyncMode ? (FIFO_QUEUE << 16) : 0);
+ long np = (long)(-parallelism); // offset ctl counts
+ this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
+ int pn = nextPoolId();
+ StringBuilder sb = new StringBuilder("ForkJoinPool-");
+ sb.append(Integer.toString(pn));
+ sb.append("-worker-");
+ this.workerNamePrefix = sb.toString();
+ }
+
+ /**
+ * Constructor for common pool, suitable only for static initialization.
+ * Basically the same as above, but uses smallest possible initial footprint.
+ */
+ ForkJoinPool(int parallelism, long ctl,
+ ForkJoinWorkerThreadFactory factory,
+ Thread.UncaughtExceptionHandler handler) {
+ this.config = parallelism;
+ this.ctl = ctl;
+ this.factory = factory;
+ this.ueh = handler;
+ this.workerNamePrefix = "ForkJoinPool.commonPool-worker-";
+ }
+
+ /**
+ * Returns the common pool instance. This pool is statically
+ * constructed; its run state is unaffected by attempts to {@link
+ * #shutdown} or {@link #shutdownNow}. However this pool and any
+ * ongoing processing are automatically terminated upon program
+ * {@link System#exit}. Any program that relies on asynchronous
+ * task processing to complete before program termination should
+ * invoke {@code commonPool().}{@link #awaitQuiescence}, before
+ * exit.
+ *
+ * @return the common pool instance
+ * @since 1.8
+ */
+ public static ForkJoinPool commonPool() {
+ // assert common != null : "static init error";
+ return common;
+ }
+
+ // Execution methods
+
+ /**
+ * Performs the given task, returning its result upon completion.
+ * If the computation encounters an unchecked Exception or Error,
+ * it is rethrown as the outcome of this invocation. Rethrown
+ * exceptions behave in the same way as regular exceptions, but,
+ * when possible, contain stack traces (as displayed for example
+ * using {@code ex.printStackTrace()}) of both the current thread
+ * as well as the thread actually encountering the exception;
+ * minimally only the latter.
+ *
+ * @param task the task
+ * @return the task's result
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> T invoke(ForkJoinTask<T> task) {
+ if (task == null)
+ throw new NullPointerException();
+ externalPush(task);
+ return task.join();
+ }
+
+ /**
+ * Arranges for (asynchronous) execution of the given task.
+ *
+ * @param task the task
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public void execute(ForkJoinTask<?> task) {
+ if (task == null)
+ throw new NullPointerException();
+ externalPush(task);
+ }
+
+ // AbstractExecutorService methods
+
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public void execute(Runnable task) {
+ if (task == null)
+ throw new NullPointerException();
+ ForkJoinTask<?> job;
+ if (task instanceof ForkJoinTask<?>) // avoid re-wrap
+ job = (ForkJoinTask<?>) task;
+ else
+ job = new ForkJoinTask.AdaptedRunnableAction(task);
+ externalPush(job);
+ }
+
+ /**
+ * Submits a ForkJoinTask for execution.
+ *
+ * @param task the task to submit
+ * @return the task
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
+ if (task == null)
+ throw new NullPointerException();
+ externalPush(task);
+ return task;
+ }
+
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> ForkJoinTask<T> submit(Callable<T> task) {
+ ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
+ externalPush(job);
+ return job;
+ }
+
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> ForkJoinTask<T> submit(Runnable task, T result) {
+ ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
+ externalPush(job);
+ return job;
+ }
+
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public ForkJoinTask<?> submit(Runnable task) {
+ if (task == null)
+ throw new NullPointerException();
+ ForkJoinTask<?> job;
+ if (task instanceof ForkJoinTask<?>) // avoid re-wrap
+ job = (ForkJoinTask<?>) task;
+ else
+ job = new ForkJoinTask.AdaptedRunnableAction(task);
+ externalPush(job);
+ return job;
+ }
+
+ /**
+ * @throws NullPointerException {@inheritDoc}
+ * @throws RejectedExecutionException {@inheritDoc}
+ */
+ public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
+ // In previous versions of this class, this method constructed
+ // a task to run ForkJoinTask.invokeAll, but now external
+ // invocation of multiple tasks is at least as efficient.
+ ArrayList<Future<T>> futures = new ArrayList<Future<T>>(tasks.size());
+
+ boolean done = false;
+ try {
+ for (Callable<T> t : tasks) {
+ ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
+ futures.add(f);
+ externalPush(f);
+ }
+ for (int i = 0, size = futures.size(); i < size; i++)
+ ((ForkJoinTask<?>)futures.get(i)).quietlyJoin();
+ done = true;
+ return futures;
+ } finally {
+ if (!done)
+ for (int i = 0, size = futures.size(); i < size; i++)
+ futures.get(i).cancel(false);
+ }
+ }
+
+ /**
+ * Returns the factory used for constructing new workers.
+ *
+ * @return the factory used for constructing new workers
+ */
+ public ForkJoinWorkerThreadFactory getFactory() {
+ return factory;
+ }
+
+ /**
+ * Returns the handler for internal worker threads that terminate
+ * due to unrecoverable errors encountered while executing tasks.
+ *
+ * @return the handler, or {@code null} if none
+ */
+ public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
+ return ueh;
+ }
+
+ /**
+ * Returns the targeted parallelism level of this pool.
+ *
+ * @return the targeted parallelism level of this pool
+ */
+ public int getParallelism() {
+ return config & SMASK;
+ }
+
+ /**
+ * Returns the targeted parallelism level of the common pool.
+ *
+ * @return the targeted parallelism level of the common pool
+ * @since 1.8
+ */
+ public static int getCommonPoolParallelism() {
+ return commonParallelism;
+ }
+
+ /**
+ * Returns the number of worker threads that have started but not
+ * yet terminated. The result returned by this method may differ
+ * from {@link #getParallelism} when threads are created to
+ * maintain parallelism when others are cooperatively blocked.
+ *
+ * @return the number of worker threads
+ */
+ public int getPoolSize() {
+ return (config & SMASK) + (short)(ctl >>> TC_SHIFT);
+ }
+
+ /**
+ * Returns {@code true} if this pool uses local first-in-first-out
+ * scheduling mode for forked tasks that are never joined.
+ *
+ * @return {@code true} if this pool uses async mode
+ */
+ public boolean getAsyncMode() {
+ return (config >>> 16) == FIFO_QUEUE;
+ }
+
+ /**
+ * Returns an estimate of the number of worker threads that are
+ * not blocked waiting to join tasks or for other managed
+ * synchronization. This method may overestimate the
+ * number of running threads.
+ *
+ * @return the number of worker threads
+ */
+ public int getRunningThreadCount() {
+ int rc = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && w.isApparentlyUnblocked())
+ ++rc;
+ }
+ }
+ return rc;
+ }
+
+ /**
+ * Returns an estimate of the number of threads that are currently
+ * stealing or executing tasks. This method may overestimate the
+ * number of active threads.
+ *
+ * @return the number of active threads
+ */
+ public int getActiveThreadCount() {
+ int r = (config & SMASK) + (int)(ctl >> AC_SHIFT);
+ return (r <= 0) ? 0 : r; // suppress momentarily negative values
+ }
+
+ /**
+ * Returns {@code true} if all worker threads are currently idle.
+ * An idle worker is one that cannot obtain a task to execute
+ * because none are available to steal from other threads, and
+ * there are no pending submissions to the pool. This method is
+ * conservative; it might not return {@code true} immediately upon
+ * idleness of all threads, but will eventually become true if
+ * threads remain inactive.
+ *
+ * @return {@code true} if all threads are currently idle
+ */
+ public boolean isQuiescent() {
+ return (int)(ctl >> AC_SHIFT) + (config & SMASK) == 0;
+ }
+
+ /**
+ * Returns an estimate of the total number of tasks stolen from
+ * one thread's work queue by another. The reported value
+ * underestimates the actual total number of steals when the pool
+ * is not quiescent. This value may be useful for monitoring and
+ * tuning fork/join programs: in general, steal counts should be
+ * high enough to keep threads busy, but low enough to avoid
+ * overhead and contention across threads.
+ *
+ * @return the number of steals
+ */
+ public long getStealCount() {
+ long count = stealCount;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.nsteals;
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Returns an estimate of the total number of tasks currently held
+ * in queues by worker threads (but not including tasks submitted
+ * to the pool that have not begun executing). This value is only
+ * an approximation, obtained by iterating across all threads in
+ * the pool. This method may be useful for tuning task
+ * granularities.
+ *
+ * @return the number of queued tasks
+ */
+ public long getQueuedTaskCount() {
+ long count = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.queueSize();
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Returns an estimate of the number of tasks submitted to this
+ * pool that have not yet begun executing. This method may take
+ * time proportional to the number of submissions.
+ *
+ * @return the number of queued submissions
+ */
+ public int getQueuedSubmissionCount() {
+ int count = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.queueSize();
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Returns {@code true} if there are any tasks submitted to this
+ * pool that have not yet begun executing.
+ *
+ * @return {@code true} if there are any queued submissions
+ */
+ public boolean hasQueuedSubmissions() {
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && !w.isEmpty())
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Removes and returns the next unexecuted submission if one is
+ * available. This method may be useful in extensions to this
+ * class that re-assign work in systems with multiple pools.
+ *
+ * @return the next submission, or {@code null} if none
+ */
+ protected ForkJoinTask<?> pollSubmission() {
+ WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && (t = w.poll()) != null)
+ return t;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Removes all available unexecuted submitted and forked tasks
+ * from scheduling queues and adds them to the given collection,
+ * without altering their execution status. These may include
+ * artificially generated or wrapped tasks. This method is
+ * designed to be invoked only when the pool is known to be
+ * quiescent. Invocations at other times may not remove all
+ * tasks. A failure encountered while attempting to add elements
+ * to collection {@code c} may result in elements being in
+ * neither, either or both collections when the associated
+ * exception is thrown. The behavior of this operation is
+ * undefined if the specified collection is modified while the
+ * operation is in progress.
+ *
+ * @param c the collection to transfer elements into
+ * @return the number of elements transferred
+ */
+ protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
+ int count = 0;
+ WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; ++i) {
+ if ((w = ws[i]) != null) {
+ while ((t = w.poll()) != null) {
+ c.add(t);
+ ++count;
+ }
+ }
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Returns a string identifying this pool, as well as its state,
+ * including indications of run state, parallelism level, and
+ * worker and task counts.
+ *
+ * @return a string identifying this pool, as well as its state
+ */
+ public String toString() {
+ // Use a single pass through workQueues to collect counts
+ long qt = 0L, qs = 0L; int rc = 0;
+ long st = stealCount;
+ long c = ctl;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; ++i) {
+ if ((w = ws[i]) != null) {
+ int size = w.queueSize();
+ if ((i & 1) == 0)
+ qs += size;
+ else {
+ qt += size;
+ st += w.nsteals;
+ if (w.isApparentlyUnblocked())
+ ++rc;
+ }
+ }
+ }
+ }
+ int pc = (config & SMASK);
+ int tc = pc + (short)(c >>> TC_SHIFT);
+ int ac = pc + (int)(c >> AC_SHIFT);
+ if (ac < 0) // ignore transient negative
+ ac = 0;
+ String level;
+ if ((c & STOP_BIT) != 0)
+ level = (tc == 0) ? "Terminated" : "Terminating";
+ else
+ level = plock < 0 ? "Shutting down" : "Running";
+ return super.toString() +
+ "[" + level +
+ ", parallelism = " + pc +
+ ", size = " + tc +
+ ", active = " + ac +
+ ", running = " + rc +
+ ", steals = " + st +
+ ", tasks = " + qt +
+ ", submissions = " + qs +
+ "]";
+ }
+
+ /**
+ * Possibly initiates an orderly shutdown in which previously
+ * submitted tasks are executed, but no new tasks will be
+ * accepted. Invocation has no effect on execution state if this
+ * is the {@link #commonPool()}, and no additional effect if
+ * already shut down. Tasks that are in the process of being
+ * submitted concurrently during the course of this method may or
+ * may not be rejected.
+ *
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public void shutdown() {
+ checkPermission();
+ tryTerminate(false, true);
+ }
+
+ /**
+ * Possibly attempts to cancel and/or stop all tasks, and reject
+ * all subsequently submitted tasks. Invocation has no effect on
+ * execution state if this is the {@link #commonPool()}, and no
+ * additional effect if already shut down. Otherwise, tasks that
+ * are in the process of being submitted or executed concurrently
+ * during the course of this method may or may not be
+ * rejected. This method cancels both existing and unexecuted
+ * tasks, in order to permit termination in the presence of task
+ * dependencies. So the method always returns an empty list
+ * (unlike the case for some other Executors).
+ *
+ * @return an empty list
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public List<Runnable> shutdownNow() {
+ checkPermission();
+ tryTerminate(true, true);
+ return Collections.emptyList();
+ }
+
+ /**
+ * Returns {@code true} if all tasks have completed following shut down.
+ *
+ * @return {@code true} if all tasks have completed following shut down
+ */
+ public boolean isTerminated() {
+ long c = ctl;
+ return ((c & STOP_BIT) != 0L &&
+ (short)(c >>> TC_SHIFT) == -(config & SMASK));
+ }
+
+ /**
+ * Returns {@code true} if the process of termination has
+ * commenced but not yet completed. This method may be useful for
+ * debugging. A return of {@code true} reported a sufficient
+ * period after shutdown may indicate that submitted tasks have
+ * ignored or suppressed interruption, or are waiting for I/O,
+ * causing this executor not to properly terminate. (See the
+ * advisory notes for class {@link ForkJoinTask} stating that
+ * tasks should not normally entail blocking operations. But if
+ * they do, they must abort them on interrupt.)
+ *
+ * @return {@code true} if terminating but not yet terminated
+ */
+ public boolean isTerminating() {
+ long c = ctl;
+ return ((c & STOP_BIT) != 0L &&
+ (short)(c >>> TC_SHIFT) != -(config & SMASK));
+ }
+
+ /**
+ * Returns {@code true} if this pool has been shut down.
+ *
+ * @return {@code true} if this pool has been shut down
+ */
+ public boolean isShutdown() {
+ return plock < 0;
+ }
+
+ /**
+ * Blocks until all tasks have completed execution after a
+ * shutdown request, or the timeout occurs, or the current thread
+ * is interrupted, whichever happens first. Because the {@link
+ * #commonPool()} never terminates until program shutdown, when
+ * applied to the common pool, this method is equivalent to {@link
+ * #awaitQuiescence} but always returns {@code false}.
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return {@code true} if this executor terminated and
+ * {@code false} if the timeout elapsed before termination
+ * @throws InterruptedException if interrupted while waiting
+ */
+ public boolean awaitTermination(long timeout, TimeUnit unit)
+ throws InterruptedException {
+ if (Thread.interrupted())
+ throw new InterruptedException();
+ if (this == common) {
+ awaitQuiescence(timeout, unit);
+ return false;
+ }
+ long nanos = unit.toNanos(timeout);
+ if (isTerminated())
+ return true;
+ long startTime = System.nanoTime();
+ boolean terminated = false;
+ synchronized (this) {
+ for (long waitTime = nanos, millis = 0L;;) {
+ if (terminated = isTerminated() ||
+ waitTime <= 0L ||
+ (millis = unit.toMillis(waitTime)) <= 0L)
+ break;
+ wait(millis);
+ waitTime = nanos - (System.nanoTime() - startTime);
+ }
+ }
+ return terminated;
+ }
+
+ /**
+ * If called by a ForkJoinTask operating in this pool, equivalent
+ * in effect to {@link ForkJoinTask#helpQuiesce}. Otherwise,
+ * waits and/or attempts to assist performing tasks until this
+ * pool {@link #isQuiescent} or the indicated timeout elapses.
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return {@code true} if quiescent; {@code false} if the
+ * timeout elapsed.
+ */
+ public boolean awaitQuiescence(long timeout, TimeUnit unit) {
+ long nanos = unit.toNanos(timeout);
+ ForkJoinWorkerThread wt;
+ Thread thread = Thread.currentThread();
+ if ((thread instanceof ForkJoinWorkerThread) &&
+ (wt = (ForkJoinWorkerThread)thread).pool == this) {
+ helpQuiescePool(wt.workQueue);
+ return true;
+ }
+ long startTime = System.nanoTime();
+ WorkQueue[] ws;
+ int r = 0, m;
+ boolean found = true;
+ while (!isQuiescent() && (ws = workQueues) != null &&
+ (m = ws.length - 1) >= 0) {
+ if (!found) {
+ if ((System.nanoTime() - startTime) > nanos)
+ return false;
+ Thread.yield(); // cannot block
+ }
+ found = false;
+ for (int j = (m + 1) << 2; j >= 0; --j) {
+ ForkJoinTask<?> t; WorkQueue q; int b;
+ if ((q = ws[r++ & m]) != null && (b = q.base) - q.top < 0) {
+ found = true;
+ if ((t = q.pollAt(b)) != null) {
+ if (q.base - q.top < 0)
+ signalWork(q);
+ t.doExec();
+ }
+ break;
+ }
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Waits and/or attempts to assist performing tasks indefinitely
+ * until the {@link #commonPool()} {@link #isQuiescent}.
+ */
+ static void quiesceCommonPool() {
+ common.awaitQuiescence(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
+ }
+
+ /**
+ * Interface for extending managed parallelism for tasks running
+ * in {@link ForkJoinPool}s.
+ *
+ * <p>A {@code ManagedBlocker} provides two methods. Method
+ * {@code isReleasable} must return {@code true} if blocking is
+ * not necessary. Method {@code block} blocks the current thread
+ * if necessary (perhaps internally invoking {@code isReleasable}
+ * before actually blocking). These actions are performed by any
+ * thread invoking {@link ForkJoinPool#managedBlock}. The
+ * unusual methods in this API accommodate synchronizers that may,
+ * but don't usually, block for long periods. Similarly, they
+ * allow more efficient internal handling of cases in which
+ * additional workers may be, but usually are not, needed to
+ * ensure sufficient parallelism. Toward this end,
+ * implementations of method {@code isReleasable} must be amenable
+ * to repeated invocation.
+ *
+ * <p>For example, here is a ManagedBlocker based on a
+ * ReentrantLock:
+ * <pre> {@code
+ * class ManagedLocker implements ManagedBlocker {
+ * final ReentrantLock lock;
+ * boolean hasLock = false;
+ * ManagedLocker(ReentrantLock lock) { this.lock = lock; }
+ * public boolean block() {
+ * if (!hasLock)
+ * lock.lock();
+ * return true;
+ * }
+ * public boolean isReleasable() {
+ * return hasLock || (hasLock = lock.tryLock());
+ * }
+ * }}</pre>
+ *
+ * <p>Here is a class that possibly blocks waiting for an
+ * item on a given queue:
+ * <pre> {@code
+ * class QueueTaker<E> implements ManagedBlocker {
+ * final BlockingQueue<E> queue;
+ * volatile E item = null;
+ * QueueTaker(BlockingQueue<E> q) { this.queue = q; }
+ * public boolean block() throws InterruptedException {
+ * if (item == null)
+ * item = queue.take();
+ * return true;
+ * }
+ * public boolean isReleasable() {
+ * return item != null || (item = queue.poll()) != null;
+ * }
+ * public E getItem() { // call after pool.managedBlock completes
+ * return item;
+ * }
+ * }}</pre>
+ */
+ public static interface ManagedBlocker {
+ /**
+ * Possibly blocks the current thread, for example waiting for
+ * a lock or condition.
+ *
+ * @return {@code true} if no additional blocking is necessary
+ * (i.e., if isReleasable would return true)
+ * @throws InterruptedException if interrupted while waiting
+ * (the method is not required to do so, but is allowed to)
+ */
+ boolean block() throws InterruptedException;
+
+ /**
+ * Returns {@code true} if blocking is unnecessary.
+ */
+ boolean isReleasable();
+ }
+
+ /**
+ * Blocks in accord with the given blocker. If the current thread
+ * is a {@link ForkJoinWorkerThread}, this method possibly
+ * arranges for a spare thread to be activated if necessary to
+ * ensure sufficient parallelism while the current thread is blocked.
+ *
+ * <p>If the caller is not a {@link ForkJoinTask}, this method is
+ * behaviorally equivalent to
+ * <pre> {@code
+ * while (!blocker.isReleasable())
+ * if (blocker.block())
+ * return;
+ * }</pre>
+ *
+ * If the caller is a {@code ForkJoinTask}, then the pool may
+ * first be expanded to ensure parallelism, and later adjusted.
+ *
+ * @param blocker the blocker
+ * @throws InterruptedException if blocker.block did so
+ */
+ public static void managedBlock(ManagedBlocker blocker)
+ throws InterruptedException {
+ Thread t = Thread.currentThread();
+ if (t instanceof ForkJoinWorkerThread) {
+ ForkJoinPool p = ((ForkJoinWorkerThread)t).pool;
+ while (!blocker.isReleasable()) { // variant of helpSignal
+ WorkQueue[] ws; WorkQueue q; int m, u;
+ if ((ws = p.workQueues) != null && (m = ws.length - 1) >= 0) {
+ for (int i = 0; i <= m; ++i) {
+ if (blocker.isReleasable())
+ return;
+ if ((q = ws[i]) != null && q.base - q.top < 0) {
+ p.signalWork(q);
+ if ((u = (int)(p.ctl >>> 32)) >= 0 ||
+ (u >> UAC_SHIFT) >= 0)
+ break;
+ }
+ }
+ }
+ if (p.tryCompensate()) {
+ try {
+ do {} while (!blocker.isReleasable() &&
+ !blocker.block());
+ } finally {
+ p.incrementActiveCount();
+ }
+ break;
+ }
+ }
+ }
+ else {
+ do {} while (!blocker.isReleasable() &&
+ !blocker.block());
+ }
+ }
+
+ // AbstractExecutorService overrides. These rely on undocumented
+ // fact that ForkJoinTask.adapt returns ForkJoinTasks that also
+ // implement RunnableFuture.
+
+ protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
+ return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
+ }
+
+ protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
+ return new ForkJoinTask.AdaptedCallable<T>(callable);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long CTL;
+ private static final long PARKBLOCKER;
+ private static final int ABASE;
+ private static final int ASHIFT;
+ private static final long STEALCOUNT;
+ private static final long PLOCK;
+ private static final long INDEXSEED;
+ private static final long QLOCK;
+
+ static {
+ // initialize field offsets for CAS etc
+ try {
+ U = getUnsafe();
+ Class<?> k = ForkJoinPool.class;
+ CTL = U.objectFieldOffset
+ (k.getDeclaredField("ctl"));
+ STEALCOUNT = U.objectFieldOffset
+ (k.getDeclaredField("stealCount"));
+ PLOCK = U.objectFieldOffset
+ (k.getDeclaredField("plock"));
+ INDEXSEED = U.objectFieldOffset
+ (k.getDeclaredField("indexSeed"));
+ Class<?> tk = Thread.class;
+ PARKBLOCKER = U.objectFieldOffset
+ (tk.getDeclaredField("parkBlocker"));
+ Class<?> wk = WorkQueue.class;
+ QLOCK = U.objectFieldOffset
+ (wk.getDeclaredField("qlock"));
+ Class<?> ak = ForkJoinTask[].class;
+ ABASE = U.arrayBaseOffset(ak);
+ int scale = U.arrayIndexScale(ak);
+ if ((scale & (scale - 1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+
+ submitters = new ThreadLocal<Submitter>();
+ ForkJoinWorkerThreadFactory fac = defaultForkJoinWorkerThreadFactory =
+ new DefaultForkJoinWorkerThreadFactory();
+ modifyThreadPermission = new RuntimePermission("modifyThread");
+
+ /*
+ * Establish common pool parameters. For extra caution,
+ * computations to set up common pool state are here; the
+ * constructor just assigns these values to fields.
+ */
+
+ int par = 0;
+ Thread.UncaughtExceptionHandler handler = null;
+ try { // TBD: limit or report ignored exceptions?
+ String pp = System.getProperty
+ ("java.util.concurrent.ForkJoinPool.common.parallelism");
+ String hp = System.getProperty
+ ("java.util.concurrent.ForkJoinPool.common.exceptionHandler");
+ String fp = System.getProperty
+ ("java.util.concurrent.ForkJoinPool.common.threadFactory");
+ if (fp != null)
+ fac = ((ForkJoinWorkerThreadFactory)ClassLoader.
+ getSystemClassLoader().loadClass(fp).newInstance());
+ if (hp != null)
+ handler = ((Thread.UncaughtExceptionHandler)ClassLoader.
+ getSystemClassLoader().loadClass(hp).newInstance());
+ if (pp != null)
+ par = Integer.parseInt(pp);
+ } catch (Exception ignore) {
+ }
+
+ if (par <= 0)
+ par = Runtime.getRuntime().availableProcessors();
+ if (par > MAX_CAP)
+ par = MAX_CAP;
+ commonParallelism = par;
+ long np = (long)(-par); // precompute initial ctl value
+ long ct = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
+
+ common = new ForkJoinPool(par, ct, fac, handler);
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166y/ForkJoinTask.java b/src/main/java/jsr166y/ForkJoinTask.java
new file mode 100644
index 0000000..ab56eca
--- /dev/null
+++ b/src/main/java/jsr166y/ForkJoinTask.java
@@ -0,0 +1,1509 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.List;
+import java.util.RandomAccess;
+import java.lang.ref.WeakReference;
+import java.lang.ref.ReferenceQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.RunnableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.locks.ReentrantLock;
+import java.lang.reflect.Constructor;
+
+/**
+ * Abstract base class for tasks that run within a {@link ForkJoinPool}.
+ * A {@code ForkJoinTask} is a thread-like entity that is much
+ * lighter weight than a normal thread. Huge numbers of tasks and
+ * subtasks may be hosted by a small number of actual threads in a
+ * ForkJoinPool, at the price of some usage limitations.
+ *
+ * <p>A "main" {@code ForkJoinTask} begins execution when it is
+ * explicitly submitted to a {@link ForkJoinPool}, or, if not already
+ * engaged in a ForkJoin computation, commenced in the {@link
+ * ForkJoinPool#commonPool()} via {@link #fork}, {@link #invoke}, or
+ * related methods. Once started, it will usually in turn start other
+ * subtasks. As indicated by the name of this class, many programs
+ * using {@code ForkJoinTask} employ only methods {@link #fork} and
+ * {@link #join}, or derivatives such as {@link
+ * #invokeAll(ForkJoinTask...) invokeAll}. However, this class also
+ * provides a number of other methods that can come into play in
+ * advanced usages, as well as extension mechanics that allow support
+ * of new forms of fork/join processing.
+ *
+ * <p>A {@code ForkJoinTask} is a lightweight form of {@link Future}.
+ * The efficiency of {@code ForkJoinTask}s stems from a set of
+ * restrictions (that are only partially statically enforceable)
+ * reflecting their main use as computational tasks calculating pure
+ * functions or operating on purely isolated objects. The primary
+ * coordination mechanisms are {@link #fork}, that arranges
+ * asynchronous execution, and {@link #join}, that doesn't proceed
+ * until the task's result has been computed. Computations should
+ * ideally avoid {@code synchronized} methods or blocks, and should
+ * minimize other blocking synchronization apart from joining other
+ * tasks or using synchronizers such as Phasers that are advertised to
+ * cooperate with fork/join scheduling. Subdividable tasks should also
+ * not perform blocking I/O, and should ideally access variables that
+ * are completely independent of those accessed by other running
+ * tasks. These guidelines are loosely enforced by not permitting
+ * checked exceptions such as {@code IOExceptions} to be
+ * thrown. However, computations may still encounter unchecked
+ * exceptions, that are rethrown to callers attempting to join
+ * them. These exceptions may additionally include {@link
+ * RejectedExecutionException} stemming from internal resource
+ * exhaustion, such as failure to allocate internal task
+ * queues. Rethrown exceptions behave in the same way as regular
+ * exceptions, but, when possible, contain stack traces (as displayed
+ * for example using {@code ex.printStackTrace()}) of both the thread
+ * that initiated the computation as well as the thread actually
+ * encountering the exception; minimally only the latter.
+ *
+ * <p>It is possible to define and use ForkJoinTasks that may block,
+ * but doing do requires three further considerations: (1) Completion
+ * of few if any <em>other</em> tasks should be dependent on a task
+ * that blocks on external synchronization or I/O. Event-style async
+ * tasks that are never joined (for example, those subclassing {@link
+ * CountedCompleter}) often fall into this category. (2) To minimize
+ * resource impact, tasks should be small; ideally performing only the
+ * (possibly) blocking action. (3) Unless the {@link
+ * ForkJoinPool.ManagedBlocker} API is used, or the number of possibly
+ * blocked tasks is known to be less than the pool's {@link
+ * ForkJoinPool#getParallelism} level, the pool cannot guarantee that
+ * enough threads will be available to ensure progress or good
+ * performance.
+ *
+ * <p>The primary method for awaiting completion and extracting
+ * results of a task is {@link #join}, but there are several variants:
+ * The {@link Future#get} methods support interruptible and/or timed
+ * waits for completion and report results using {@code Future}
+ * conventions. Method {@link #invoke} is semantically
+ * equivalent to {@code fork(); join()} but always attempts to begin
+ * execution in the current thread. The "<em>quiet</em>" forms of
+ * these methods do not extract results or report exceptions. These
+ * may be useful when a set of tasks are being executed, and you need
+ * to delay processing of results or exceptions until all complete.
+ * Method {@code invokeAll} (available in multiple versions)
+ * performs the most common form of parallel invocation: forking a set
+ * of tasks and joining them all.
+ *
+ * <p>In the most typical usages, a fork-join pair act like a call
+ * (fork) and return (join) from a parallel recursive function. As is
+ * the case with other forms of recursive calls, returns (joins)
+ * should be performed innermost-first. For example, {@code a.fork();
+ * b.fork(); b.join(); a.join();} is likely to be substantially more
+ * efficient than joining {@code a} before {@code b}.
+ *
+ * <p>The execution status of tasks may be queried at several levels
+ * of detail: {@link #isDone} is true if a task completed in any way
+ * (including the case where a task was cancelled without executing);
+ * {@link #isCompletedNormally} is true if a task completed without
+ * cancellation or encountering an exception; {@link #isCancelled} is
+ * true if the task was cancelled (in which case {@link #getException}
+ * returns a {@link java.util.concurrent.CancellationException}); and
+ * {@link #isCompletedAbnormally} is true if a task was either
+ * cancelled or encountered an exception, in which case {@link
+ * #getException} will return either the encountered exception or
+ * {@link java.util.concurrent.CancellationException}.
+ *
+ * <p>The ForkJoinTask class is not usually directly subclassed.
+ * Instead, you subclass one of the abstract classes that support a
+ * particular style of fork/join processing, typically {@link
+ * RecursiveAction} for most computations that do not return results,
+ * {@link RecursiveTask} for those that do, and {@link
+ * CountedCompleter} for those in which completed actions trigger
+ * other actions. Normally, a concrete ForkJoinTask subclass declares
+ * fields comprising its parameters, established in a constructor, and
+ * then defines a {@code compute} method that somehow uses the control
+ * methods supplied by this base class.
+ *
+ * <p>Method {@link #join} and its variants are appropriate for use
+ * only when completion dependencies are acyclic; that is, the
+ * parallel computation can be described as a directed acyclic graph
+ * (DAG). Otherwise, executions may encounter a form of deadlock as
+ * tasks cyclically wait for each other. However, this framework
+ * supports other methods and techniques (for example the use of
+ * {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that
+ * may be of use in constructing custom subclasses for problems that
+ * are not statically structured as DAGs. To support such usages a
+ * ForkJoinTask may be atomically <em>tagged</em> with a {@code short}
+ * value using {@link #setForkJoinTaskTag} or {@link
+ * #compareAndSetForkJoinTaskTag} and checked using {@link
+ * #getForkJoinTaskTag}. The ForkJoinTask implementation does not use
+ * these {@code protected} methods or tags for any purpose, but they
+ * may be of use in the construction of specialized subclasses. For
+ * example, parallel graph traversals can use the supplied methods to
+ * avoid revisiting nodes/tasks that have already been processed.
+ * (Method names for tagging are bulky in part to encourage definition
+ * of methods that reflect their usage patterns.)
+ *
+ * <p>Most base support methods are {@code final}, to prevent
+ * overriding of implementations that are intrinsically tied to the
+ * underlying lightweight task scheduling framework. Developers
+ * creating new basic styles of fork/join processing should minimally
+ * implement {@code protected} methods {@link #exec}, {@link
+ * #setRawResult}, and {@link #getRawResult}, while also introducing
+ * an abstract computational method that can be implemented in its
+ * subclasses, possibly relying on other {@code protected} methods
+ * provided by this class.
+ *
+ * <p>ForkJoinTasks should perform relatively small amounts of
+ * computation. Large tasks should be split into smaller subtasks,
+ * usually via recursive decomposition. As a very rough rule of thumb,
+ * a task should perform more than 100 and less than 10000 basic
+ * computational steps, and should avoid indefinite looping. If tasks
+ * are too big, then parallelism cannot improve throughput. If too
+ * small, then memory and internal task maintenance overhead may
+ * overwhelm processing.
+ *
+ * <p>This class provides {@code adapt} methods for {@link Runnable}
+ * and {@link Callable}, that may be of use when mixing execution of
+ * {@code ForkJoinTasks} with other kinds of tasks. When all tasks are
+ * of this form, consider using a pool constructed in <em>asyncMode</em>.
+ *
+ * <p>ForkJoinTasks are {@code Serializable}, which enables them to be
+ * used in extensions such as remote execution frameworks. It is
+ * sensible to serialize tasks only before or after, but not during,
+ * execution. Serialization is not relied on during execution itself.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
+
+ /*
+ * See the internal documentation of class ForkJoinPool for a
+ * general implementation overview. ForkJoinTasks are mainly
+ * responsible for maintaining their "status" field amidst relays
+ * to methods in ForkJoinWorkerThread and ForkJoinPool.
+ *
+ * The methods of this class are more-or-less layered into
+ * (1) basic status maintenance
+ * (2) execution and awaiting completion
+ * (3) user-level methods that additionally report results.
+ * This is sometimes hard to see because this file orders exported
+ * methods in a way that flows well in javadocs.
+ */
+
+ /*
+ * The status field holds run control status bits packed into a
+ * single int to minimize footprint and to ensure atomicity (via
+ * CAS). Status is initially zero, and takes on nonnegative
+ * values until completed, upon which status (anded with
+ * DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks
+ * undergoing blocking waits by other threads have the SIGNAL bit
+ * set. Completion of a stolen task with SIGNAL set awakens any
+ * waiters via notifyAll. Even though suboptimal for some
+ * purposes, we use basic builtin wait/notify to take advantage of
+ * "monitor inflation" in JVMs that we would otherwise need to
+ * emulate to avoid adding further per-task bookkeeping overhead.
+ * We want these monitors to be "fat", i.e., not use biasing or
+ * thin-lock techniques, so use some odd coding idioms that tend
+ * to avoid them, mainly by arranging that every synchronized
+ * block performs a wait, notifyAll or both.
+ *
+ * These control bits occupy only (some of) the upper half (16
+ * bits) of status field. The lower bits are used for user-defined
+ * tags.
+ */
+
+ /** The run status of this task */
+ volatile int status; // accessed directly by pool and workers
+ static final int DONE_MASK = 0xf0000000; // mask out non-completion bits
+ static final int NORMAL = 0xf0000000; // must be negative
+ static final int CANCELLED = 0xc0000000; // must be < NORMAL
+ static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED
+ static final int SIGNAL = 0x00010000; // must be >= 1 << 16
+ static final int SMASK = 0x0000ffff; // short bits for tags
+
+ /**
+ * Marks completion and wakes up threads waiting to join this
+ * task.
+ *
+ * @param completion one of NORMAL, CANCELLED, EXCEPTIONAL
+ * @return completion status on exit
+ */
+ private int setCompletion(int completion) {
+ for (int s;;) {
+ if ((s = status) < 0)
+ return s;
+ if (U.compareAndSwapInt(this, STATUS, s, s | completion)) {
+ if ((s >>> 16) != 0)
+ synchronized (this) { notifyAll(); }
+ return completion;
+ }
+ }
+ }
+
+ /**
+ * Primary execution method for stolen tasks. Unless done, calls
+ * exec and records status if completed, but doesn't wait for
+ * completion otherwise.
+ *
+ * @return status on exit from this method
+ */
+ final int doExec() {
+ int s; boolean completed;
+ if ((s = status) >= 0) {
+ try {
+ completed = exec();
+ } catch (Throwable rex) {
+ return setExceptionalCompletion(rex);
+ }
+ if (completed)
+ s = setCompletion(NORMAL);
+ }
+ return s;
+ }
+
+ /**
+ * Tries to set SIGNAL status unless already completed. Used by
+ * ForkJoinPool. Other variants are directly incorporated into
+ * externalAwaitDone etc.
+ *
+ * @return true if successful
+ */
+ final boolean trySetSignal() {
+ int s = status;
+ return s >= 0 && U.compareAndSwapInt(this, STATUS, s, s | SIGNAL);
+ }
+
+ /**
+ * Blocks a non-worker-thread until completion.
+ * @return status upon completion
+ */
+ private int externalAwaitDone() {
+ int s;
+ ForkJoinPool.externalHelpJoin(this);
+ boolean interrupted = false;
+ while ((s = status) >= 0) {
+ if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0) {
+ try {
+ wait();
+ } catch (InterruptedException ie) {
+ interrupted = true;
+ }
+ }
+ else
+ notifyAll();
+ }
+ }
+ }
+ if (interrupted)
+ Thread.currentThread().interrupt();
+ return s;
+ }
+
+ /**
+ * Blocks a non-worker-thread until completion or interruption.
+ */
+ private int externalInterruptibleAwaitDone() throws InterruptedException {
+ int s;
+ if (Thread.interrupted())
+ throw new InterruptedException();
+ ForkJoinPool.externalHelpJoin(this);
+ while ((s = status) >= 0) {
+ if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0)
+ wait();
+ else
+ notifyAll();
+ }
+ }
+ }
+ return s;
+ }
+
+
+ /**
+ * Implementation for join, get, quietlyJoin. Directly handles
+ * only cases of already-completed, external wait, and
+ * unfork+exec. Others are relayed to ForkJoinPool.awaitJoin.
+ *
+ * @return status upon completion
+ */
+ private int doJoin() {
+ int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w;
+ return (s = status) < 0 ? s :
+ ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ (w = (wt = (ForkJoinWorkerThread)t).workQueue).
+ tryUnpush(this) && (s = doExec()) < 0 ? s :
+ wt.pool.awaitJoin(w, this) :
+ externalAwaitDone();
+ }
+
+ /**
+ * Implementation for invoke, quietlyInvoke.
+ *
+ * @return status upon completion
+ */
+ private int doInvoke() {
+ int s; Thread t; ForkJoinWorkerThread wt;
+ return (s = doExec()) < 0 ? s :
+ ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ (wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue, this) :
+ externalAwaitDone();
+ }
+
+ // Exception table support
+
+ /**
+ * Table of exceptions thrown by tasks, to enable reporting by
+ * callers. Because exceptions are rare, we don't directly keep
+ * them with task objects, but instead use a weak ref table. Note
+ * that cancellation exceptions don't appear in the table, but are
+ * instead recorded as status values.
+ *
+ * Note: These statics are initialized below in static block.
+ */
+ private static final ExceptionNode[] exceptionTable;
+ private static final ReentrantLock exceptionTableLock;
+ private static final ReferenceQueue<Object> exceptionTableRefQueue;
+
+ /**
+ * Fixed capacity for exceptionTable.
+ */
+ private static final int EXCEPTION_MAP_CAPACITY = 32;
+
+ /**
+ * Key-value nodes for exception table. The chained hash table
+ * uses identity comparisons, full locking, and weak references
+ * for keys. The table has a fixed capacity because it only
+ * maintains task exceptions long enough for joiners to access
+ * them, so should never become very large for sustained
+ * periods. However, since we do not know when the last joiner
+ * completes, we must use weak references and expunge them. We do
+ * so on each operation (hence full locking). Also, some thread in
+ * any ForkJoinPool will call helpExpungeStaleExceptions when its
+ * pool becomes isQuiescent.
+ */
+ static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
+ final Throwable ex;
+ ExceptionNode next;
+ final long thrower; // use id not ref to avoid weak cycles
+ final int hashCode; // store task hashCode before weak ref disappears
+ ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
+ super(task, exceptionTableRefQueue);
+ this.ex = ex;
+ this.next = next;
+ this.thrower = Thread.currentThread().getId();
+ this.hashCode = System.identityHashCode(task);
+ }
+ }
+
+ /**
+ * Records exception and sets status.
+ *
+ * @return status on exit
+ */
+ final int recordExceptionalCompletion(Throwable ex) {
+ int s;
+ if ((s = status) >= 0) {
+ int h = System.identityHashCode(this);
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ expungeStaleExceptions();
+ ExceptionNode[] t = exceptionTable;
+ int i = h & (t.length - 1);
+ for (ExceptionNode e = t[i]; ; e = e.next) {
+ if (e == null) {
+ t[i] = new ExceptionNode(this, ex, t[i]);
+ break;
+ }
+ if (e.get() == this) // already present
+ break;
+ }
+ } finally {
+ lock.unlock();
+ }
+ s = setCompletion(EXCEPTIONAL);
+ }
+ return s;
+ }
+
+ /**
+ * Records exception and possibly propagates.
+ *
+ * @return status on exit
+ */
+ private int setExceptionalCompletion(Throwable ex) {
+ int s = recordExceptionalCompletion(ex);
+ if ((s & DONE_MASK) == EXCEPTIONAL)
+ internalPropagateException(ex);
+ return s;
+ }
+
+ /**
+ * Hook for exception propagation support for tasks with completers.
+ */
+ void internalPropagateException(Throwable ex) {
+ }
+
+ /**
+ * Cancels, ignoring any exceptions thrown by cancel. Used during
+ * worker and pool shutdown. Cancel is spec'ed not to throw any
+ * exceptions, but if it does anyway, we have no recourse during
+ * shutdown, so guard against this case.
+ */
+ static final void cancelIgnoringExceptions(ForkJoinTask<?> t) {
+ if (t != null && t.status >= 0) {
+ try {
+ t.cancel(false);
+ } catch (Throwable ignore) {
+ }
+ }
+ }
+
+ /**
+ * Removes exception node and clears status.
+ */
+ private void clearExceptionalCompletion() {
+ int h = System.identityHashCode(this);
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ ExceptionNode[] t = exceptionTable;
+ int i = h & (t.length - 1);
+ ExceptionNode e = t[i];
+ ExceptionNode pred = null;
+ while (e != null) {
+ ExceptionNode next = e.next;
+ if (e.get() == this) {
+ if (pred == null)
+ t[i] = next;
+ else
+ pred.next = next;
+ break;
+ }
+ pred = e;
+ e = next;
+ }
+ expungeStaleExceptions();
+ status = 0;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * Returns a rethrowable exception for the given task, if
+ * available. To provide accurate stack traces, if the exception
+ * was not thrown by the current thread, we try to create a new
+ * exception of the same type as the one thrown, but with the
+ * recorded exception as its cause. If there is no such
+ * constructor, we instead try to use a no-arg constructor,
+ * followed by initCause, to the same effect. If none of these
+ * apply, or any fail due to other exceptions, we return the
+ * recorded exception, which is still correct, although it may
+ * contain a misleading stack trace.
+ *
+ * @return the exception, or null if none
+ */
+ private Throwable getThrowableException() {
+ if ((status & DONE_MASK) != EXCEPTIONAL)
+ return null;
+ int h = System.identityHashCode(this);
+ ExceptionNode e;
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ expungeStaleExceptions();
+ ExceptionNode[] t = exceptionTable;
+ e = t[h & (t.length - 1)];
+ while (e != null && e.get() != this)
+ e = e.next;
+ } finally {
+ lock.unlock();
+ }
+ Throwable ex;
+ if (e == null || (ex = e.ex) == null)
+ return null;
+ if (false && e.thrower != Thread.currentThread().getId()) {
+ Class<? extends Throwable> ec = ex.getClass();
+ try {
+ Constructor<?> noArgCtor = null;
+ Constructor<?>[] cs = ec.getConstructors();// public ctors only
+ for (int i = 0; i < cs.length; ++i) {
+ Constructor<?> c = cs[i];
+ Class<?>[] ps = c.getParameterTypes();
+ if (ps.length == 0)
+ noArgCtor = c;
+ else if (ps.length == 1 && ps[0] == Throwable.class)
+ return (Throwable)(c.newInstance(ex));
+ }
+ if (noArgCtor != null) {
+ Throwable wx = (Throwable)(noArgCtor.newInstance());
+ wx.initCause(ex);
+ return wx;
+ }
+ } catch (Exception ignore) {
+ }
+ }
+ return ex;
+ }
+
+ /**
+ * Poll stale refs and remove them. Call only while holding lock.
+ */
+ private static void expungeStaleExceptions() {
+ for (Object x; (x = exceptionTableRefQueue.poll()) != null;) {
+ if (x instanceof ExceptionNode) {
+ int hashCode = ((ExceptionNode)x).hashCode;
+ ExceptionNode[] t = exceptionTable;
+ int i = hashCode & (t.length - 1);
+ ExceptionNode e = t[i];
+ ExceptionNode pred = null;
+ while (e != null) {
+ ExceptionNode next = e.next;
+ if (e == x) {
+ if (pred == null)
+ t[i] = next;
+ else
+ pred.next = next;
+ break;
+ }
+ pred = e;
+ e = next;
+ }
+ }
+ }
+ }
+
+ /**
+ * If lock is available, poll stale refs and remove them.
+ * Called from ForkJoinPool when pools become quiescent.
+ */
+ static final void helpExpungeStaleExceptions() {
+ final ReentrantLock lock = exceptionTableLock;
+ if (lock.tryLock()) {
+ try {
+ expungeStaleExceptions();
+ } finally {
+ lock.unlock();
+ }
+ }
+ }
+
+ /**
+ * A version of "sneaky throw" to relay exceptions
+ */
+ static void rethrow(final Throwable ex) {
+ if (ex != null) {
+ if (ex instanceof Error)
+ throw (Error)ex;
+ if (ex instanceof RuntimeException)
+ throw (RuntimeException)ex;
+ ForkJoinTask.<RuntimeException>uncheckedThrow(ex);
+ }
+ }
+
+ /**
+ * The sneaky part of sneaky throw, relying on generics
+ * limitations to evade compiler complaints about rethrowing
+ * unchecked exceptions
+ */
+ @SuppressWarnings("unchecked") static <T extends Throwable>
+ void uncheckedThrow(Throwable t) throws T {
+ if (t != null)
+ throw (T)t; // rely on vacuous cast
+ }
+
+ /**
+ * Throws exception, if any, associated with the given status.
+ */
+ private void reportException(int s) {
+ if (s == CANCELLED)
+ throw new CancellationException();
+ if (s == EXCEPTIONAL)
+ rethrow(getThrowableException());
+ }
+
+ // public methods
+
+ /**
+ * Arranges to asynchronously execute this task in the pool the
+ * current task is running in, if applicable, or using the {@link
+ * ForkJoinPool#commonPool()} if not {@link #inForkJoinPool}. While
+ * it is not necessarily enforced, it is a usage error to fork a
+ * task more than once unless it has completed and been
+ * reinitialized. Subsequent modifications to the state of this
+ * task or any data it operates on are not necessarily
+ * consistently observable by any thread other than the one
+ * executing it unless preceded by a call to {@link #join} or
+ * related methods, or a call to {@link #isDone} returning {@code
+ * true}.
+ *
+ * @return {@code this}, to simplify usage
+ */
+ public final ForkJoinTask<V> fork() {
+ Thread t;
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
+ ((ForkJoinWorkerThread)t).workQueue.push(this);
+ else
+ ForkJoinPool.common.externalPush(this);
+ return this;
+ }
+
+ /**
+ * Returns the result of the computation when it {@link #isDone is
+ * done}. This method differs from {@link #get()} in that
+ * abnormal completion results in {@code RuntimeException} or
+ * {@code Error}, not {@code ExecutionException}, and that
+ * interrupts of the calling thread do <em>not</em> cause the
+ * method to abruptly return by throwing {@code
+ * InterruptedException}.
+ *
+ * @return the computed result
+ */
+ public final V join() {
+ int s;
+ if ((s = doJoin() & DONE_MASK) != NORMAL)
+ reportException(s);
+ return getRawResult();
+ }
+
+ /**
+ * Commences performing this task, awaits its completion if
+ * necessary, and returns its result, or throws an (unchecked)
+ * {@code RuntimeException} or {@code Error} if the underlying
+ * computation did so.
+ *
+ * @return the computed result
+ */
+ public final V invoke() {
+ int s;
+ if ((s = doInvoke() & DONE_MASK) != NORMAL)
+ reportException(s);
+ return getRawResult();
+ }
+
+ /**
+ * Forks the given tasks, returning when {@code isDone} holds for
+ * each task or an (unchecked) exception is encountered, in which
+ * case the exception is rethrown. If more than one task
+ * encounters an exception, then this method throws any one of
+ * these exceptions. If any task encounters an exception, the
+ * other may be cancelled. However, the execution status of
+ * individual tasks is not guaranteed upon exceptional return. The
+ * status of each task may be obtained using {@link
+ * #getException()} and related methods to check if they have been
+ * cancelled, completed normally or exceptionally, or left
+ * unprocessed.
+ *
+ * @param t1 the first task
+ * @param t2 the second task
+ * @throws NullPointerException if any task is null
+ */
+ public static void invokeAll(ForkJoinTask<?> t1, ForkJoinTask<?> t2) {
+ int s1, s2;
+ t2.fork();
+ if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL)
+ t1.reportException(s1);
+ if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL)
+ t2.reportException(s2);
+ }
+
+ /**
+ * Forks the given tasks, returning when {@code isDone} holds for
+ * each task or an (unchecked) exception is encountered, in which
+ * case the exception is rethrown. If more than one task
+ * encounters an exception, then this method throws any one of
+ * these exceptions. If any task encounters an exception, others
+ * may be cancelled. However, the execution status of individual
+ * tasks is not guaranteed upon exceptional return. The status of
+ * each task may be obtained using {@link #getException()} and
+ * related methods to check if they have been cancelled, completed
+ * normally or exceptionally, or left unprocessed.
+ *
+ * @param tasks the tasks
+ * @throws NullPointerException if any task is null
+ */
+ public static void invokeAll(ForkJoinTask<?>... tasks) {
+ Throwable ex = null;
+ int last = tasks.length - 1;
+ for (int i = last; i >= 0; --i) {
+ ForkJoinTask<?> t = tasks[i];
+ if (t == null) {
+ if (ex == null)
+ ex = new NullPointerException();
+ }
+ else if (i != 0)
+ t.fork();
+ else if (t.doInvoke() < NORMAL && ex == null)
+ ex = t.getException();
+ }
+ for (int i = 1; i <= last; ++i) {
+ ForkJoinTask<?> t = tasks[i];
+ if (t != null) {
+ if (ex != null)
+ t.cancel(false);
+ else if (t.doJoin() < NORMAL)
+ ex = t.getException();
+ }
+ }
+ if (ex != null)
+ rethrow(ex);
+ }
+
+ /**
+ * Forks all tasks in the specified collection, returning when
+ * {@code isDone} holds for each task or an (unchecked) exception
+ * is encountered, in which case the exception is rethrown. If
+ * more than one task encounters an exception, then this method
+ * throws any one of these exceptions. If any task encounters an
+ * exception, others may be cancelled. However, the execution
+ * status of individual tasks is not guaranteed upon exceptional
+ * return. The status of each task may be obtained using {@link
+ * #getException()} and related methods to check if they have been
+ * cancelled, completed normally or exceptionally, or left
+ * unprocessed.
+ *
+ * @param tasks the collection of tasks
+ * @return the tasks argument, to simplify usage
+ * @throws NullPointerException if tasks or any element are null
+ */
+ public static <T extends ForkJoinTask<?>> Collection<T> invokeAll(Collection<T> tasks) {
+ if (!(tasks instanceof RandomAccess) || !(tasks instanceof List<?>)) {
+ invokeAll(tasks.toArray(new ForkJoinTask<?>[tasks.size()]));
+ return tasks;
+ }
+ @SuppressWarnings("unchecked")
+ List<? extends ForkJoinTask<?>> ts =
+ (List<? extends ForkJoinTask<?>>) tasks;
+ Throwable ex = null;
+ int last = ts.size() - 1;
+ for (int i = last; i >= 0; --i) {
+ ForkJoinTask<?> t = ts.get(i);
+ if (t == null) {
+ if (ex == null)
+ ex = new NullPointerException();
+ }
+ else if (i != 0)
+ t.fork();
+ else if (t.doInvoke() < NORMAL && ex == null)
+ ex = t.getException();
+ }
+ for (int i = 1; i <= last; ++i) {
+ ForkJoinTask<?> t = ts.get(i);
+ if (t != null) {
+ if (ex != null)
+ t.cancel(false);
+ else if (t.doJoin() < NORMAL)
+ ex = t.getException();
+ }
+ }
+ if (ex != null)
+ rethrow(ex);
+ return tasks;
+ }
+
+ /**
+ * Attempts to cancel execution of this task. This attempt will
+ * fail if the task has already completed or could not be
+ * cancelled for some other reason. If successful, and this task
+ * has not started when {@code cancel} is called, execution of
+ * this task is suppressed. After this method returns
+ * successfully, unless there is an intervening call to {@link
+ * #reinitialize}, subsequent calls to {@link #isCancelled},
+ * {@link #isDone}, and {@code cancel} will return {@code true}
+ * and calls to {@link #join} and related methods will result in
+ * {@code CancellationException}.
+ *
+ * <p>This method may be overridden in subclasses, but if so, must
+ * still ensure that these properties hold. In particular, the
+ * {@code cancel} method itself must not throw exceptions.
+ *
+ * <p>This method is designed to be invoked by <em>other</em>
+ * tasks. To terminate the current task, you can just return or
+ * throw an unchecked exception from its computation method, or
+ * invoke {@link #completeExceptionally}.
+ *
+ * @param mayInterruptIfRunning this value has no effect in the
+ * default implementation because interrupts are not used to
+ * control cancellation.
+ *
+ * @return {@code true} if this task is now cancelled
+ */
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED;
+ }
+
+ public final boolean isDone() {
+ return status < 0;
+ }
+
+ public final boolean isCancelled() {
+ return (status & DONE_MASK) == CANCELLED;
+ }
+
+ /**
+ * Returns {@code true} if this task threw an exception or was cancelled.
+ *
+ * @return {@code true} if this task threw an exception or was cancelled
+ */
+ public final boolean isCompletedAbnormally() {
+ return status < NORMAL;
+ }
+
+ /**
+ * Returns {@code true} if this task completed without throwing an
+ * exception and was not cancelled.
+ *
+ * @return {@code true} if this task completed without throwing an
+ * exception and was not cancelled
+ */
+ public final boolean isCompletedNormally() {
+ return (status & DONE_MASK) == NORMAL;
+ }
+
+ /**
+ * Returns the exception thrown by the base computation, or a
+ * {@code CancellationException} if cancelled, or {@code null} if
+ * none or if the method has not yet completed.
+ *
+ * @return the exception, or {@code null} if none
+ */
+ public final Throwable getException() {
+ int s = status & DONE_MASK;
+ return ((s >= NORMAL) ? null :
+ (s == CANCELLED) ? new CancellationException() :
+ getThrowableException());
+ }
+
+ /**
+ * Completes this task abnormally, and if not already aborted or
+ * cancelled, causes it to throw the given exception upon
+ * {@code join} and related operations. This method may be used
+ * to induce exceptions in asynchronous tasks, or to force
+ * completion of tasks that would not otherwise complete. Its use
+ * in other situations is discouraged. This method is
+ * overridable, but overridden versions must invoke {@code super}
+ * implementation to maintain guarantees.
+ *
+ * @param ex the exception to throw. If this exception is not a
+ * {@code RuntimeException} or {@code Error}, the actual exception
+ * thrown will be a {@code RuntimeException} with cause {@code ex}.
+ */
+ public void completeExceptionally(Throwable ex) {
+ setExceptionalCompletion((ex instanceof RuntimeException) ||
+ (ex instanceof Error) ? ex :
+ new RuntimeException(ex));
+ }
+
+ /**
+ * Completes this task, and if not already aborted or cancelled,
+ * returning the given value as the result of subsequent
+ * invocations of {@code join} and related operations. This method
+ * may be used to provide results for asynchronous tasks, or to
+ * provide alternative handling for tasks that would not otherwise
+ * complete normally. Its use in other situations is
+ * discouraged. This method is overridable, but overridden
+ * versions must invoke {@code super} implementation to maintain
+ * guarantees.
+ *
+ * @param value the result value for this task
+ */
+ public void complete(V value) {
+ try {
+ setRawResult(value);
+ } catch (Throwable rex) {
+ setExceptionalCompletion(rex);
+ return;
+ }
+ setCompletion(NORMAL);
+ }
+
+ /**
+ * Completes this task normally without setting a value. The most
+ * recent value established by {@link #setRawResult} (or {@code
+ * null} by default) will be returned as the result of subsequent
+ * invocations of {@code join} and related operations.
+ *
+ * @since 1.8
+ */
+ public final void quietlyComplete() {
+ setCompletion(NORMAL);
+ }
+
+ /**
+ * Waits if necessary for the computation to complete, and then
+ * retrieves its result.
+ *
+ * @return the computed result
+ * @throws CancellationException if the computation was cancelled
+ * @throws ExecutionException if the computation threw an
+ * exception
+ * @throws InterruptedException if the current thread is not a
+ * member of a ForkJoinPool and was interrupted while waiting
+ */
+ public final V get() throws InterruptedException, ExecutionException {
+ int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ?
+ doJoin() : externalInterruptibleAwaitDone();
+ Throwable ex;
+ if ((s &= DONE_MASK) == CANCELLED)
+ throw new CancellationException();
+ if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
+ throw new ExecutionException(ex);
+ return getRawResult();
+ }
+
+ /**
+ * Waits if necessary for at most the given time for the computation
+ * to complete, and then retrieves its result, if available.
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return the computed result
+ * @throws CancellationException if the computation was cancelled
+ * @throws ExecutionException if the computation threw an
+ * exception
+ * @throws InterruptedException if the current thread is not a
+ * member of a ForkJoinPool and was interrupted while waiting
+ * @throws TimeoutException if the wait timed out
+ */
+ public final V get(long timeout, TimeUnit unit)
+ throws InterruptedException, ExecutionException, TimeoutException {
+ if (Thread.interrupted())
+ throw new InterruptedException();
+ // Messy in part because we measure in nanosecs, but wait in millisecs
+ int s; long ms;
+ long ns = unit.toNanos(timeout);
+ if ((s = status) >= 0 && ns > 0L) {
+ long deadline = System.nanoTime() + ns;
+ ForkJoinPool p = null;
+ ForkJoinPool.WorkQueue w = null;
+ Thread t = Thread.currentThread();
+ if (t instanceof ForkJoinWorkerThread) {
+ ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
+ p = wt.pool;
+ w = wt.workQueue;
+ p.helpJoinOnce(w, this); // no retries on failure
+ }
+ else
+ ForkJoinPool.externalHelpJoin(this);
+ boolean canBlock = false;
+ boolean interrupted = false;
+ try {
+ while ((s = status) >= 0) {
+ if (w != null && w.qlock < 0)
+ cancelIgnoringExceptions(this);
+ else if (!canBlock) {
+ if (p == null || p.tryCompensate())
+ canBlock = true;
+ }
+ else {
+ if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L &&
+ U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0) {
+ try {
+ wait(ms);
+ } catch (InterruptedException ie) {
+ if (p == null)
+ interrupted = true;
+ }
+ }
+ else
+ notifyAll();
+ }
+ }
+ if ((s = status) < 0 || interrupted ||
+ (ns = deadline - System.nanoTime()) <= 0L)
+ break;
+ }
+ }
+ } finally {
+ if (p != null && canBlock)
+ p.incrementActiveCount();
+ }
+ if (interrupted)
+ throw new InterruptedException();
+ }
+ if ((s &= DONE_MASK) != NORMAL) {
+ Throwable ex;
+ if (s == CANCELLED)
+ throw new CancellationException();
+ if (s != EXCEPTIONAL)
+ throw new TimeoutException();
+ if ((ex = getThrowableException()) != null)
+ throw new ExecutionException(ex);
+ }
+ return getRawResult();
+ }
+
+ /**
+ * Joins this task, without returning its result or throwing its
+ * exception. This method may be useful when processing
+ * collections of tasks when some have been cancelled or otherwise
+ * known to have aborted.
+ */
+ public final void quietlyJoin() {
+ doJoin();
+ }
+
+ /**
+ * Commences performing this task and awaits its completion if
+ * necessary, without returning its result or throwing its
+ * exception.
+ */
+ public final void quietlyInvoke() {
+ doInvoke();
+ }
+
+ /**
+ * Possibly executes tasks until the pool hosting the current task
+ * {@link ForkJoinPool#isQuiescent is quiescent}. This method may
+ * be of use in designs in which many tasks are forked, but none
+ * are explicitly joined, instead executing them until all are
+ * processed.
+ */
+ public static void helpQuiesce() {
+ Thread t;
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) {
+ ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
+ wt.pool.helpQuiescePool(wt.workQueue);
+ }
+ else
+ ForkJoinPool.quiesceCommonPool();
+ }
+
+ /**
+ * Resets the internal bookkeeping state of this task, allowing a
+ * subsequent {@code fork}. This method allows repeated reuse of
+ * this task, but only if reuse occurs when this task has either
+ * never been forked, or has been forked, then completed and all
+ * outstanding joins of this task have also completed. Effects
+ * under any other usage conditions are not guaranteed.
+ * This method may be useful when executing
+ * pre-constructed trees of subtasks in loops.
+ *
+ * <p>Upon completion of this method, {@code isDone()} reports
+ * {@code false}, and {@code getException()} reports {@code
+ * null}. However, the value returned by {@code getRawResult} is
+ * unaffected. To clear this value, you can invoke {@code
+ * setRawResult(null)}.
+ */
+ public void reinitialize() {
+ if ((status & DONE_MASK) == EXCEPTIONAL)
+ clearExceptionalCompletion();
+ else
+ status = 0;
+ }
+
+ /**
+ * Returns the pool hosting the current task execution, or null
+ * if this task is executing outside of any ForkJoinPool.
+ *
+ * @see #inForkJoinPool
+ * @return the pool, or {@code null} if none
+ */
+ public static ForkJoinPool getPool() {
+ Thread t = Thread.currentThread();
+ return (t instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread) t).pool : null;
+ }
+
+ /**
+ * Returns {@code true} if the current thread is a {@link
+ * ForkJoinWorkerThread} executing as a ForkJoinPool computation.
+ *
+ * @return {@code true} if the current thread is a {@link
+ * ForkJoinWorkerThread} executing as a ForkJoinPool computation,
+ * or {@code false} otherwise
+ */
+ public static boolean inForkJoinPool() {
+ return Thread.currentThread() instanceof ForkJoinWorkerThread;
+ }
+
+ /**
+ * Tries to unschedule this task for execution. This method will
+ * typically (but is not guaranteed to) succeed if this task is
+ * the most recently forked task by the current thread, and has
+ * not commenced executing in another thread. This method may be
+ * useful when arranging alternative local processing of tasks
+ * that could have been, but were not, stolen.
+ *
+ * @return {@code true} if unforked
+ */
+ public boolean tryUnfork() {
+ Thread t;
+ return (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread)t).workQueue.tryUnpush(this) :
+ ForkJoinPool.tryExternalUnpush(this));
+ }
+
+ /**
+ * Returns an estimate of the number of tasks that have been
+ * forked by the current worker thread but not yet executed. This
+ * value may be useful for heuristic decisions about whether to
+ * fork other tasks.
+ *
+ * @return the number of tasks
+ */
+ public static int getQueuedTaskCount() {
+ Thread t; ForkJoinPool.WorkQueue q;
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
+ q = ((ForkJoinWorkerThread)t).workQueue;
+ else
+ q = ForkJoinPool.commonSubmitterQueue();
+ return (q == null) ? 0 : q.queueSize();
+ }
+
+ /**
+ * Returns an estimate of how many more locally queued tasks are
+ * held by the current worker thread than there are other worker
+ * threads that might steal them, or zero if this thread is not
+ * operating in a ForkJoinPool. This value may be useful for
+ * heuristic decisions about whether to fork other tasks. In many
+ * usages of ForkJoinTasks, at steady state, each worker should
+ * aim to maintain a small constant surplus (for example, 3) of
+ * tasks, and to process computations locally if this threshold is
+ * exceeded.
+ *
+ * @return the surplus number of tasks, which may be negative
+ */
+ public static int getSurplusQueuedTaskCount() {
+ return ForkJoinPool.getSurplusQueuedTaskCount();
+ }
+
+ // Extension methods
+
+ /**
+ * Returns the result that would be returned by {@link #join}, even
+ * if this task completed abnormally, or {@code null} if this task
+ * is not known to have been completed. This method is designed
+ * to aid debugging, as well as to support extensions. Its use in
+ * any other context is discouraged.
+ *
+ * @return the result, or {@code null} if not completed
+ */
+ public abstract V getRawResult();
+
+ /**
+ * Forces the given value to be returned as a result. This method
+ * is designed to support extensions, and should not in general be
+ * called otherwise.
+ *
+ * @param value the value
+ */
+ protected abstract void setRawResult(V value);
+
+ /**
+ * Immediately performs the base action of this task and returns
+ * true if, upon return from this method, this task is guaranteed
+ * to have completed normally. This method may return false
+ * otherwise, to indicate that this task is not necessarily
+ * complete (or is not known to be complete), for example in
+ * asynchronous actions that require explicit invocations of
+ * completion methods. This method may also throw an (unchecked)
+ * exception to indicate abnormal exit. This method is designed to
+ * support extensions, and should not in general be called
+ * otherwise.
+ *
+ * @return {@code true} if this task is known to have completed normally
+ */
+ protected abstract boolean exec();
+
+ /**
+ * Returns, but does not unschedule or execute, a task queued by
+ * the current thread but not yet executed, if one is immediately
+ * available. There is no guarantee that this task will actually
+ * be polled or executed next. Conversely, this method may return
+ * null even if a task exists but cannot be accessed without
+ * contention with other threads. This method is designed
+ * primarily to support extensions, and is unlikely to be useful
+ * otherwise.
+ *
+ * @return the next task, or {@code null} if none are available
+ */
+ protected static ForkJoinTask<?> peekNextLocalTask() {
+ Thread t; ForkJoinPool.WorkQueue q;
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
+ q = ((ForkJoinWorkerThread)t).workQueue;
+ else
+ q = ForkJoinPool.commonSubmitterQueue();
+ return (q == null) ? null : q.peek();
+ }
+
+ /**
+ * Unschedules and returns, without executing, the next task
+ * queued by the current thread but not yet executed, if the
+ * current thread is operating in a ForkJoinPool. This method is
+ * designed primarily to support extensions, and is unlikely to be
+ * useful otherwise.
+ *
+ * @return the next task, or {@code null} if none are available
+ */
+ protected static ForkJoinTask<?> pollNextLocalTask() {
+ Thread t;
+ return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread)t).workQueue.nextLocalTask() :
+ null;
+ }
+
+ /**
+ * If the current thread is operating in a ForkJoinPool,
+ * unschedules and returns, without executing, the next task
+ * queued by the current thread but not yet executed, if one is
+ * available, or if not available, a task that was forked by some
+ * other thread, if available. Availability may be transient, so a
+ * {@code null} result does not necessarily imply quiescence of
+ * the pool this task is operating in. This method is designed
+ * primarily to support extensions, and is unlikely to be useful
+ * otherwise.
+ *
+ * @return a task, or {@code null} if none are available
+ */
+ protected static ForkJoinTask<?> pollTask() {
+ Thread t; ForkJoinWorkerThread wt;
+ return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
+ (wt = (ForkJoinWorkerThread)t).pool.nextTaskFor(wt.workQueue) :
+ null;
+ }
+
+ // tag operations
+
+ /**
+ * Returns the tag for this task.
+ *
+ * @return the tag for this task
+ * @since 1.8
+ */
+ public final short getForkJoinTaskTag() {
+ return (short)status;
+ }
+
+ /**
+ * Atomically sets the tag value for this task.
+ *
+ * @param tag the tag value
+ * @return the previous value of the tag
+ * @since 1.8
+ */
+ public final short setForkJoinTaskTag(short tag) {
+ for (int s;;) {
+ if (U.compareAndSwapInt(this, STATUS, s = status,
+ (s & ~SMASK) | (tag & SMASK)))
+ return (short)s;
+ }
+ }
+
+ /**
+ * Atomically conditionally sets the tag value for this task.
+ * Among other applications, tags can be used as visit markers
+ * in tasks operating on graphs, as in methods that check: {@code
+ * if (task.compareAndSetForkJoinTaskTag((short)0, (short)1))}
+ * before processing, otherwise exiting because the node has
+ * already been visited.
+ *
+ * @param e the expected tag value
+ * @param tag the new tag value
+ * @return true if successful; i.e., the current value was
+ * equal to e and is now tag.
+ * @since 1.8
+ */
+ public final boolean compareAndSetForkJoinTaskTag(short e, short tag) {
+ for (int s;;) {
+ if ((short)(s = status) != e)
+ return false;
+ if (U.compareAndSwapInt(this, STATUS, s,
+ (s & ~SMASK) | (tag & SMASK)))
+ return true;
+ }
+ }
+
+ /**
+ * Adaptor for Runnables. This implements RunnableFuture
+ * to be compliant with AbstractExecutorService constraints
+ * when used in ForkJoinPool.
+ */
+ static final class AdaptedRunnable<T> extends ForkJoinTask<T>
+ implements RunnableFuture<T> {
+ final Runnable runnable;
+ T result;
+ AdaptedRunnable(Runnable runnable, T result) {
+ if (runnable == null) throw new NullPointerException();
+ this.runnable = runnable;
+ this.result = result; // OK to set this even before completion
+ }
+ public final T getRawResult() { return result; }
+ public final void setRawResult(T v) { result = v; }
+ public final boolean exec() { runnable.run(); return true; }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /**
+ * Adaptor for Runnables without results
+ */
+ static final class AdaptedRunnableAction extends ForkJoinTask<Void>
+ implements RunnableFuture<Void> {
+ final Runnable runnable;
+ AdaptedRunnableAction(Runnable runnable) {
+ if (runnable == null) throw new NullPointerException();
+ this.runnable = runnable;
+ }
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void v) { }
+ public final boolean exec() { runnable.run(); return true; }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /**
+ * Adaptor for Callables
+ */
+ static final class AdaptedCallable<T> extends ForkJoinTask<T>
+ implements RunnableFuture<T> {
+ final Callable<? extends T> callable;
+ T result;
+ AdaptedCallable(Callable<? extends T> callable) {
+ if (callable == null) throw new NullPointerException();
+ this.callable = callable;
+ }
+ public final T getRawResult() { return result; }
+ public final void setRawResult(T v) { result = v; }
+ public final boolean exec() {
+ try {
+ result = callable.call();
+ return true;
+ } catch (Error err) {
+ throw err;
+ } catch (RuntimeException rex) {
+ throw rex;
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 2838392045355241008L;
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code run}
+ * method of the given {@code Runnable} as its action, and returns
+ * a null result upon {@link #join}.
+ *
+ * @param runnable the runnable action
+ * @return the task
+ */
+ public static ForkJoinTask<?> adapt(Runnable runnable) {
+ return new AdaptedRunnableAction(runnable);
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code run}
+ * method of the given {@code Runnable} as its action, and returns
+ * the given result upon {@link #join}.
+ *
+ * @param runnable the runnable action
+ * @param result the result upon completion
+ * @return the task
+ */
+ public static <T> ForkJoinTask<T> adapt(Runnable runnable, T result) {
+ return new AdaptedRunnable<T>(runnable, result);
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code call}
+ * method of the given {@code Callable} as its action, and returns
+ * its result upon {@link #join}, translating any checked exceptions
+ * encountered into {@code RuntimeException}.
+ *
+ * @param callable the callable action
+ * @return the task
+ */
+ public static <T> ForkJoinTask<T> adapt(Callable<? extends T> callable) {
+ return new AdaptedCallable<T>(callable);
+ }
+
+ // Serialization support
+
+ private static final long serialVersionUID = -7721805057305804111L;
+
+ /**
+ * Saves this task to a stream (that is, serializes it).
+ *
+ * @serialData the current run status and the exception thrown
+ * during execution, or {@code null} if none
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+ s.writeObject(getException());
+ }
+
+ /**
+ * Reconstitutes this task from a stream (that is, deserializes it).
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ Object ex = s.readObject();
+ if (ex != null)
+ setExceptionalCompletion((Throwable)ex);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long STATUS;
+
+ static {
+ exceptionTableLock = new ReentrantLock();
+ exceptionTableRefQueue = new ReferenceQueue<Object>();
+ exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY];
+ try {
+ U = getUnsafe();
+ Class<?> k = ForkJoinTask.class;
+ STATUS = U.objectFieldOffset
+ (k.getDeclaredField("status"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166y/ForkJoinWorkerThread.java b/src/main/java/jsr166y/ForkJoinWorkerThread.java
new file mode 100644
index 0000000..647d0cf
--- /dev/null
+++ b/src/main/java/jsr166y/ForkJoinWorkerThread.java
@@ -0,0 +1,121 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+/**
+ * A thread managed by a {@link ForkJoinPool}, which executes
+ * {@link ForkJoinTask}s.
+ * This class is subclassable solely for the sake of adding
+ * functionality -- there are no overridable methods dealing with
+ * scheduling or execution. However, you can override initialization
+ * and termination methods surrounding the main task processing loop.
+ * If you do create such a subclass, you will also need to supply a
+ * custom {@link ForkJoinPool.ForkJoinWorkerThreadFactory} to use it
+ * in a {@code ForkJoinPool}.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public class ForkJoinWorkerThread extends Thread {
+ /*
+ * ForkJoinWorkerThreads are managed by ForkJoinPools and perform
+ * ForkJoinTasks. For explanation, see the internal documentation
+ * of class ForkJoinPool.
+ *
+ * This class just maintains links to its pool and WorkQueue. The
+ * pool field is set immediately upon construction, but the
+ * workQueue field is not set until a call to registerWorker
+ * completes. This leads to a visibility race, that is tolerated
+ * by requiring that the workQueue field is only accessed by the
+ * owning thread.
+ */
+
+ final ForkJoinPool pool; // the pool this thread works in
+ final ForkJoinPool.WorkQueue workQueue; // work-stealing mechanics
+
+ /**
+ * Creates a ForkJoinWorkerThread operating in the given pool.
+ *
+ * @param pool the pool this thread works in
+ * @throws NullPointerException if pool is null
+ */
+ protected ForkJoinWorkerThread(ForkJoinPool pool) {
+ // Use a placeholder until a useful name can be set in registerWorker
+ super("aForkJoinWorkerThread");
+ this.pool = pool;
+ this.workQueue = pool.registerWorker(this);
+ }
+
+ /**
+ * Returns the pool hosting this thread.
+ *
+ * @return the pool
+ */
+ public ForkJoinPool getPool() {
+ return pool;
+ }
+
+ /**
+ * Returns the index number of this thread in its pool. The
+ * returned value ranges from zero to the maximum number of
+ * threads (minus one) that have ever been created in the pool.
+ * This method may be useful for applications that track status or
+ * collect results per-worker rather than per-task.
+ *
+ * @return the index number
+ */
+ public int getPoolIndex() {
+ return workQueue.poolIndex;
+ }
+
+ /**
+ * Initializes internal state after construction but before
+ * processing any tasks. If you override this method, you must
+ * invoke {@code super.onStart()} at the beginning of the method.
+ * Initialization requires care: Most fields must have legal
+ * default values, to ensure that attempted accesses from other
+ * threads work correctly even before this thread starts
+ * processing tasks.
+ */
+ protected void onStart() {
+ }
+
+ /**
+ * Performs cleanup associated with termination of this worker
+ * thread. If you override this method, you must invoke
+ * {@code super.onTermination} at the end of the overridden method.
+ *
+ * @param exception the exception causing this thread to abort due
+ * to an unrecoverable error, or {@code null} if completed normally
+ */
+ protected void onTermination(Throwable exception) {
+ }
+
+ /**
+ * This method is required to be public, but should never be
+ * called explicitly. It performs the main run loop to execute
+ * {@link ForkJoinTask}s.
+ */
+ public void run() {
+ Throwable exception = null;
+ try {
+ onStart();
+ pool.runWorker(workQueue);
+ } catch (Throwable ex) {
+ exception = ex;
+ } finally {
+ try {
+ onTermination(exception);
+ } catch (Throwable ex) {
+ if (exception == null)
+ exception = ex;
+ } finally {
+ pool.deregisterWorker(this, exception);
+ }
+ }
+ }
+}
diff --git a/src/main/java/jsr166y/LinkedTransferQueue.java b/src/main/java/jsr166y/LinkedTransferQueue.java
new file mode 100644
index 0000000..b29a86c
--- /dev/null
+++ b/src/main/java/jsr166y/LinkedTransferQueue.java
@@ -0,0 +1,1353 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+import java.util.AbstractQueue;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.LockSupport;
+
+/**
+ * An unbounded {@link TransferQueue} based on linked nodes.
+ * This queue orders elements FIFO (first-in-first-out) with respect
+ * to any given producer. The <em>head</em> of the queue is that
+ * element that has been on the queue the longest time for some
+ * producer. The <em>tail</em> of the queue is that element that has
+ * been on the queue the shortest time for some producer.
+ *
+ * <p>Beware that, unlike in most collections, the {@code size} method
+ * is <em>NOT</em> a constant-time operation. Because of the
+ * asynchronous nature of these queues, determining the current number
+ * of elements requires a traversal of the elements, and so may report
+ * inaccurate results if this collection is modified during traversal.
+ * Additionally, the bulk operations {@code addAll},
+ * {@code removeAll}, {@code retainAll}, {@code containsAll},
+ * {@code equals}, and {@code toArray} are <em>not</em> guaranteed
+ * to be performed atomically. For example, an iterator operating
+ * concurrently with an {@code addAll} operation might view only some
+ * of the added elements.
+ *
+ * <p>This class and its iterator implement all of the
+ * <em>optional</em> methods of the {@link Collection} and {@link
+ * Iterator} interfaces.
+ *
+ * <p>Memory consistency effects: As with other concurrent
+ * collections, actions in a thread prior to placing an object into a
+ * {@code LinkedTransferQueue}
+ * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
+ * actions subsequent to the access or removal of that element from
+ * the {@code LinkedTransferQueue} in another thread.
+ *
+ * <p>This class is a member of the
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
+ * Java Collections Framework</a>.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ * @param <E> the type of elements held in this collection
+ */
+public class LinkedTransferQueue<E> extends AbstractQueue<E>
+ implements TransferQueue<E>, java.io.Serializable {
+ private static final long serialVersionUID = -3223113410248163686L;
+
+ /*
+ * *** Overview of Dual Queues with Slack ***
+ *
+ * Dual Queues, introduced by Scherer and Scott
+ * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are
+ * (linked) queues in which nodes may represent either data or
+ * requests. When a thread tries to enqueue a data node, but
+ * encounters a request node, it instead "matches" and removes it;
+ * and vice versa for enqueuing requests. Blocking Dual Queues
+ * arrange that threads enqueuing unmatched requests block until
+ * other threads provide the match. Dual Synchronous Queues (see
+ * Scherer, Lea, & Scott
+ * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
+ * additionally arrange that threads enqueuing unmatched data also
+ * block. Dual Transfer Queues support all of these modes, as
+ * dictated by callers.
+ *
+ * A FIFO dual queue may be implemented using a variation of the
+ * Michael & Scott (M&S) lock-free queue algorithm
+ * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf).
+ * It maintains two pointer fields, "head", pointing to a
+ * (matched) node that in turn points to the first actual
+ * (unmatched) queue node (or null if empty); and "tail" that
+ * points to the last node on the queue (or again null if
+ * empty). For example, here is a possible queue with four data
+ * elements:
+ *
+ * head tail
+ * | |
+ * v v
+ * M -> U -> U -> U -> U
+ *
+ * The M&S queue algorithm is known to be prone to scalability and
+ * overhead limitations when maintaining (via CAS) these head and
+ * tail pointers. This has led to the development of
+ * contention-reducing variants such as elimination arrays (see
+ * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
+ * optimistic back pointers (see Ladan-Mozes & Shavit
+ * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
+ * However, the nature of dual queues enables a simpler tactic for
+ * improving M&S-style implementations when dual-ness is needed.
+ *
+ * In a dual queue, each node must atomically maintain its match
+ * status. While there are other possible variants, we implement
+ * this here as: for a data-mode node, matching entails CASing an
+ * "item" field from a non-null data value to null upon match, and
+ * vice-versa for request nodes, CASing from null to a data
+ * value. (Note that the linearization properties of this style of
+ * queue are easy to verify -- elements are made available by
+ * linking, and unavailable by matching.) Compared to plain M&S
+ * queues, this property of dual queues requires one additional
+ * successful atomic operation per enq/deq pair. But it also
+ * enables lower cost variants of queue maintenance mechanics. (A
+ * variation of this idea applies even for non-dual queues that
+ * support deletion of interior elements, such as
+ * j.u.c.ConcurrentLinkedQueue.)
+ *
+ * Once a node is matched, its match status can never again
+ * change. We may thus arrange that the linked list of them
+ * contain a prefix of zero or more matched nodes, followed by a
+ * suffix of zero or more unmatched nodes. (Note that we allow
+ * both the prefix and suffix to be zero length, which in turn
+ * means that we do not use a dummy header.) If we were not
+ * concerned with either time or space efficiency, we could
+ * correctly perform enqueue and dequeue operations by traversing
+ * from a pointer to the initial node; CASing the item of the
+ * first unmatched node on match and CASing the next field of the
+ * trailing node on appends. (Plus some special-casing when
+ * initially empty). While this would be a terrible idea in
+ * itself, it does have the benefit of not requiring ANY atomic
+ * updates on head/tail fields.
+ *
+ * We introduce here an approach that lies between the extremes of
+ * never versus always updating queue (head and tail) pointers.
+ * This offers a tradeoff between sometimes requiring extra
+ * traversal steps to locate the first and/or last unmatched
+ * nodes, versus the reduced overhead and contention of fewer
+ * updates to queue pointers. For example, a possible snapshot of
+ * a queue is:
+ *
+ * head tail
+ * | |
+ * v v
+ * M -> M -> U -> U -> U -> U
+ *
+ * The best value for this "slack" (the targeted maximum distance
+ * between the value of "head" and the first unmatched node, and
+ * similarly for "tail") is an empirical matter. We have found
+ * that using very small constants in the range of 1-3 work best
+ * over a range of platforms. Larger values introduce increasing
+ * costs of cache misses and risks of long traversal chains, while
+ * smaller values increase CAS contention and overhead.
+ *
+ * Dual queues with slack differ from plain M&S dual queues by
+ * virtue of only sometimes updating head or tail pointers when
+ * matching, appending, or even traversing nodes; in order to
+ * maintain a targeted slack. The idea of "sometimes" may be
+ * operationalized in several ways. The simplest is to use a
+ * per-operation counter incremented on each traversal step, and
+ * to try (via CAS) to update the associated queue pointer
+ * whenever the count exceeds a threshold. Another, that requires
+ * more overhead, is to use random number generators to update
+ * with a given probability per traversal step.
+ *
+ * In any strategy along these lines, because CASes updating
+ * fields may fail, the actual slack may exceed targeted
+ * slack. However, they may be retried at any time to maintain
+ * targets. Even when using very small slack values, this
+ * approach works well for dual queues because it allows all
+ * operations up to the point of matching or appending an item
+ * (hence potentially allowing progress by another thread) to be
+ * read-only, thus not introducing any further contention. As
+ * described below, we implement this by performing slack
+ * maintenance retries only after these points.
+ *
+ * As an accompaniment to such techniques, traversal overhead can
+ * be further reduced without increasing contention of head
+ * pointer updates: Threads may sometimes shortcut the "next" link
+ * path from the current "head" node to be closer to the currently
+ * known first unmatched node, and similarly for tail. Again, this
+ * may be triggered with using thresholds or randomization.
+ *
+ * These ideas must be further extended to avoid unbounded amounts
+ * of costly-to-reclaim garbage caused by the sequential "next"
+ * links of nodes starting at old forgotten head nodes: As first
+ * described in detail by Boehm
+ * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC
+ * delays noticing that any arbitrarily old node has become
+ * garbage, all newer dead nodes will also be unreclaimed.
+ * (Similar issues arise in non-GC environments.) To cope with
+ * this in our implementation, upon CASing to advance the head
+ * pointer, we set the "next" link of the previous head to point
+ * only to itself; thus limiting the length of connected dead lists.
+ * (We also take similar care to wipe out possibly garbage
+ * retaining values held in other Node fields.) However, doing so
+ * adds some further complexity to traversal: If any "next"
+ * pointer links to itself, it indicates that the current thread
+ * has lagged behind a head-update, and so the traversal must
+ * continue from the "head". Traversals trying to find the
+ * current tail starting from "tail" may also encounter
+ * self-links, in which case they also continue at "head".
+ *
+ * It is tempting in slack-based scheme to not even use CAS for
+ * updates (similarly to Ladan-Mozes & Shavit). However, this
+ * cannot be done for head updates under the above link-forgetting
+ * mechanics because an update may leave head at a detached node.
+ * And while direct writes are possible for tail updates, they
+ * increase the risk of long retraversals, and hence long garbage
+ * chains, which can be much more costly than is worthwhile
+ * considering that the cost difference of performing a CAS vs
+ * write is smaller when they are not triggered on each operation
+ * (especially considering that writes and CASes equally require
+ * additional GC bookkeeping ("write barriers") that are sometimes
+ * more costly than the writes themselves because of contention).
+ *
+ * *** Overview of implementation ***
+ *
+ * We use a threshold-based approach to updates, with a slack
+ * threshold of two -- that is, we update head/tail when the
+ * current pointer appears to be two or more steps away from the
+ * first/last node. The slack value is hard-wired: a path greater
+ * than one is naturally implemented by checking equality of
+ * traversal pointers except when the list has only one element,
+ * in which case we keep slack threshold at one. Avoiding tracking
+ * explicit counts across method calls slightly simplifies an
+ * already-messy implementation. Using randomization would
+ * probably work better if there were a low-quality dirt-cheap
+ * per-thread one available, but even ThreadLocalRandom is too
+ * heavy for these purposes.
+ *
+ * With such a small slack threshold value, it is not worthwhile
+ * to augment this with path short-circuiting (i.e., unsplicing
+ * interior nodes) except in the case of cancellation/removal (see
+ * below).
+ *
+ * We allow both the head and tail fields to be null before any
+ * nodes are enqueued; initializing upon first append. This
+ * simplifies some other logic, as well as providing more
+ * efficient explicit control paths instead of letting JVMs insert
+ * implicit NullPointerExceptions when they are null. While not
+ * currently fully implemented, we also leave open the possibility
+ * of re-nulling these fields when empty (which is complicated to
+ * arrange, for little benefit.)
+ *
+ * All enqueue/dequeue operations are handled by the single method
+ * "xfer" with parameters indicating whether to act as some form
+ * of offer, put, poll, take, or transfer (each possibly with
+ * timeout). The relative complexity of using one monolithic
+ * method outweighs the code bulk and maintenance problems of
+ * using separate methods for each case.
+ *
+ * Operation consists of up to three phases. The first is
+ * implemented within method xfer, the second in tryAppend, and
+ * the third in method awaitMatch.
+ *
+ * 1. Try to match an existing node
+ *
+ * Starting at head, skip already-matched nodes until finding
+ * an unmatched node of opposite mode, if one exists, in which
+ * case matching it and returning, also if necessary updating
+ * head to one past the matched node (or the node itself if the
+ * list has no other unmatched nodes). If the CAS misses, then
+ * a loop retries advancing head by two steps until either
+ * success or the slack is at most two. By requiring that each
+ * attempt advances head by two (if applicable), we ensure that
+ * the slack does not grow without bound. Traversals also check
+ * if the initial head is now off-list, in which case they
+ * start at the new head.
+ *
+ * If no candidates are found and the call was untimed
+ * poll/offer, (argument "how" is NOW) return.
+ *
+ * 2. Try to append a new node (method tryAppend)
+ *
+ * Starting at current tail pointer, find the actual last node
+ * and try to append a new node (or if head was null, establish
+ * the first node). Nodes can be appended only if their
+ * predecessors are either already matched or are of the same
+ * mode. If we detect otherwise, then a new node with opposite
+ * mode must have been appended during traversal, so we must
+ * restart at phase 1. The traversal and update steps are
+ * otherwise similar to phase 1: Retrying upon CAS misses and
+ * checking for staleness. In particular, if a self-link is
+ * encountered, then we can safely jump to a node on the list
+ * by continuing the traversal at current head.
+ *
+ * On successful append, if the call was ASYNC, return.
+ *
+ * 3. Await match or cancellation (method awaitMatch)
+ *
+ * Wait for another thread to match node; instead cancelling if
+ * the current thread was interrupted or the wait timed out. On
+ * multiprocessors, we use front-of-queue spinning: If a node
+ * appears to be the first unmatched node in the queue, it
+ * spins a bit before blocking. In either case, before blocking
+ * it tries to unsplice any nodes between the current "head"
+ * and the first unmatched node.
+ *
+ * Front-of-queue spinning vastly improves performance of
+ * heavily contended queues. And so long as it is relatively
+ * brief and "quiet", spinning does not much impact performance
+ * of less-contended queues. During spins threads check their
+ * interrupt status and generate a thread-local random number
+ * to decide to occasionally perform a Thread.yield. While
+ * yield has underdefined specs, we assume that it might help,
+ * and will not hurt, in limiting impact of spinning on busy
+ * systems. We also use smaller (1/2) spins for nodes that are
+ * not known to be front but whose predecessors have not
+ * blocked -- these "chained" spins avoid artifacts of
+ * front-of-queue rules which otherwise lead to alternating
+ * nodes spinning vs blocking. Further, front threads that
+ * represent phase changes (from data to request node or vice
+ * versa) compared to their predecessors receive additional
+ * chained spins, reflecting longer paths typically required to
+ * unblock threads during phase changes.
+ *
+ *
+ * ** Unlinking removed interior nodes **
+ *
+ * In addition to minimizing garbage retention via self-linking
+ * described above, we also unlink removed interior nodes. These
+ * may arise due to timed out or interrupted waits, or calls to
+ * remove(x) or Iterator.remove. Normally, given a node that was
+ * at one time known to be the predecessor of some node s that is
+ * to be removed, we can unsplice s by CASing the next field of
+ * its predecessor if it still points to s (otherwise s must
+ * already have been removed or is now offlist). But there are two
+ * situations in which we cannot guarantee to make node s
+ * unreachable in this way: (1) If s is the trailing node of list
+ * (i.e., with null next), then it is pinned as the target node
+ * for appends, so can only be removed later after other nodes are
+ * appended. (2) We cannot necessarily unlink s given a
+ * predecessor node that is matched (including the case of being
+ * cancelled): the predecessor may already be unspliced, in which
+ * case some previous reachable node may still point to s.
+ * (For further explanation see Herlihy & Shavit "The Art of
+ * Multiprocessor Programming" chapter 9). Although, in both
+ * cases, we can rule out the need for further action if either s
+ * or its predecessor are (or can be made to be) at, or fall off
+ * from, the head of list.
+ *
+ * Without taking these into account, it would be possible for an
+ * unbounded number of supposedly removed nodes to remain
+ * reachable. Situations leading to such buildup are uncommon but
+ * can occur in practice; for example when a series of short timed
+ * calls to poll repeatedly time out but never otherwise fall off
+ * the list because of an untimed call to take at the front of the
+ * queue.
+ *
+ * When these cases arise, rather than always retraversing the
+ * entire list to find an actual predecessor to unlink (which
+ * won't help for case (1) anyway), we record a conservative
+ * estimate of possible unsplice failures (in "sweepVotes").
+ * We trigger a full sweep when the estimate exceeds a threshold
+ * ("SWEEP_THRESHOLD") indicating the maximum number of estimated
+ * removal failures to tolerate before sweeping through, unlinking
+ * cancelled nodes that were not unlinked upon initial removal.
+ * We perform sweeps by the thread hitting threshold (rather than
+ * background threads or by spreading work to other threads)
+ * because in the main contexts in which removal occurs, the
+ * caller is already timed-out, cancelled, or performing a
+ * potentially O(n) operation (e.g. remove(x)), none of which are
+ * time-critical enough to warrant the overhead that alternatives
+ * would impose on other threads.
+ *
+ * Because the sweepVotes estimate is conservative, and because
+ * nodes become unlinked "naturally" as they fall off the head of
+ * the queue, and because we allow votes to accumulate even while
+ * sweeps are in progress, there are typically significantly fewer
+ * such nodes than estimated. Choice of a threshold value
+ * balances the likelihood of wasted effort and contention, versus
+ * providing a worst-case bound on retention of interior nodes in
+ * quiescent queues. The value defined below was chosen
+ * empirically to balance these under various timeout scenarios.
+ *
+ * Note that we cannot self-link unlinked interior nodes during
+ * sweeps. However, the associated garbage chains terminate when
+ * some successor ultimately falls off the head of the list and is
+ * self-linked.
+ */
+
+ /** True if on multiprocessor */
+ private static final boolean MP =
+ Runtime.getRuntime().availableProcessors() > 1;
+
+ /**
+ * The number of times to spin (with randomly interspersed calls
+ * to Thread.yield) on multiprocessor before blocking when a node
+ * is apparently the first waiter in the queue. See above for
+ * explanation. Must be a power of two. The value is empirically
+ * derived -- it works pretty well across a variety of processors,
+ * numbers of CPUs, and OSes.
+ */
+ private static final int FRONT_SPINS = 1 << 7;
+
+ /**
+ * The number of times to spin before blocking when a node is
+ * preceded by another node that is apparently spinning. Also
+ * serves as an increment to FRONT_SPINS on phase changes, and as
+ * base average frequency for yielding during spins. Must be a
+ * power of two.
+ */
+ private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
+
+ /**
+ * The maximum number of estimated removal failures (sweepVotes)
+ * to tolerate before sweeping through the queue unlinking
+ * cancelled nodes that were not unlinked upon initial
+ * removal. See above for explanation. The value must be at least
+ * two to avoid useless sweeps when removing trailing nodes.
+ */
+ static final int SWEEP_THRESHOLD = 32;
+
+ /**
+ * Queue nodes. Uses Object, not E, for items to allow forgetting
+ * them after use. Relies heavily on Unsafe mechanics to minimize
+ * unnecessary ordering constraints: Writes that are intrinsically
+ * ordered wrt other accesses or CASes use simple relaxed forms.
+ */
+ static final class Node {
+ final boolean isData; // false if this is a request node
+ volatile Object item; // initially non-null if isData; CASed to match
+ volatile Node next;
+ volatile Thread waiter; // null until waiting
+
+ // CAS methods for fields
+ final boolean casNext(Node cmp, Node val) {
+ return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
+ }
+
+ final boolean casItem(Object cmp, Object val) {
+ // assert cmp == null || cmp.getClass() != Node.class;
+ return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
+ }
+
+ /**
+ * Constructs a new node. Uses relaxed write because item can
+ * only be seen after publication via casNext.
+ */
+ Node(Object item, boolean isData) {
+ UNSAFE.putObject(this, itemOffset, item); // relaxed write
+ this.isData = isData;
+ }
+
+ /**
+ * Links node to itself to avoid garbage retention. Called
+ * only after CASing head field, so uses relaxed write.
+ */
+ final void forgetNext() {
+ UNSAFE.putObject(this, nextOffset, this);
+ }
+
+ /**
+ * Sets item to self and waiter to null, to avoid garbage
+ * retention after matching or cancelling. Uses relaxed writes
+ * because order is already constrained in the only calling
+ * contexts: item is forgotten only after volatile/atomic
+ * mechanics that extract items. Similarly, clearing waiter
+ * follows either CAS or return from park (if ever parked;
+ * else we don't care).
+ */
+ final void forgetContents() {
+ UNSAFE.putObject(this, itemOffset, this);
+ UNSAFE.putObject(this, waiterOffset, null);
+ }
+
+ /**
+ * Returns true if this node has been matched, including the
+ * case of artificial matches due to cancellation.
+ */
+ final boolean isMatched() {
+ Object x = item;
+ return (x == this) || ((x == null) == isData);
+ }
+
+ /**
+ * Returns true if this is an unmatched request node.
+ */
+ final boolean isUnmatchedRequest() {
+ return !isData && item == null;
+ }
+
+ /**
+ * Returns true if a node with the given mode cannot be
+ * appended to this node because this node is unmatched and
+ * has opposite data mode.
+ */
+ final boolean cannotPrecede(boolean haveData) {
+ boolean d = isData;
+ Object x;
+ return d != haveData && (x = item) != this && (x != null) == d;
+ }
+
+ /**
+ * Tries to artificially match a data node -- used by remove.
+ */
+ final boolean tryMatchData() {
+ // assert isData;
+ Object x = item;
+ if (x != null && x != this && casItem(x, null)) {
+ LockSupport.unpark(waiter);
+ return true;
+ }
+ return false;
+ }
+
+ private static final long serialVersionUID = -3375979862319811754L;
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long itemOffset;
+ private static final long nextOffset;
+ private static final long waiterOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = Node.class;
+ itemOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("item"));
+ nextOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("next"));
+ waiterOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("waiter"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+ }
+
+ /** head of the queue; null until first enqueue */
+ transient volatile Node head;
+
+ /** tail of the queue; null until first append */
+ private transient volatile Node tail;
+
+ /** The number of apparent failures to unsplice removed nodes */
+ private transient volatile int sweepVotes;
+
+ // CAS methods for fields
+ private boolean casTail(Node cmp, Node val) {
+ return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
+ }
+
+ private boolean casHead(Node cmp, Node val) {
+ return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
+ }
+
+ private boolean casSweepVotes(int cmp, int val) {
+ return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val);
+ }
+
+ /*
+ * Possible values for "how" argument in xfer method.
+ */
+ private static final int NOW = 0; // for untimed poll, tryTransfer
+ private static final int ASYNC = 1; // for offer, put, add
+ private static final int SYNC = 2; // for transfer, take
+ private static final int TIMED = 3; // for timed poll, tryTransfer
+
+ @SuppressWarnings("unchecked")
+ static <E> E cast(Object item) {
+ // assert item == null || item.getClass() != Node.class;
+ return (E) item;
+ }
+
+ /**
+ * Implements all queuing methods. See above for explanation.
+ *
+ * @param e the item or null for take
+ * @param haveData true if this is a put, else a take
+ * @param how NOW, ASYNC, SYNC, or TIMED
+ * @param nanos timeout in nanosecs, used only if mode is TIMED
+ * @return an item if matched, else e
+ * @throws NullPointerException if haveData mode but e is null
+ */
+ private E xfer(E e, boolean haveData, int how, long nanos) {
+ if (haveData && (e == null))
+ throw new NullPointerException();
+ Node s = null; // the node to append, if needed
+
+ retry:
+ for (;;) { // restart on append race
+
+ for (Node h = head, p = h; p != null;) { // find & match first node
+ boolean isData = p.isData;
+ Object item = p.item;
+ if (item != p && (item != null) == isData) { // unmatched
+ if (isData == haveData) // can't match
+ break;
+ if (p.casItem(item, e)) { // match
+ for (Node q = p; q != h;) {
+ Node n = q.next; // update by 2 unless singleton
+ if (head == h && casHead(h, n == null ? q : n)) {
+ h.forgetNext();
+ break;
+ } // advance and retry
+ if ((h = head) == null ||
+ (q = h.next) == null || !q.isMatched())
+ break; // unless slack < 2
+ }
+ LockSupport.unpark(p.waiter);
+ return LinkedTransferQueue.<E>cast(item);
+ }
+ }
+ Node n = p.next;
+ p = (p != n) ? n : (h = head); // Use head if p offlist
+ }
+
+ if (how != NOW) { // No matches available
+ if (s == null)
+ s = new Node(e, haveData);
+ Node pred = tryAppend(s, haveData);
+ if (pred == null)
+ continue retry; // lost race vs opposite mode
+ if (how != ASYNC)
+ return awaitMatch(s, pred, e, (how == TIMED), nanos);
+ }
+ return e; // not waiting
+ }
+ }
+
+ /**
+ * Tries to append node s as tail.
+ *
+ * @param s the node to append
+ * @param haveData true if appending in data mode
+ * @return null on failure due to losing race with append in
+ * different mode, else s's predecessor, or s itself if no
+ * predecessor
+ */
+ private Node tryAppend(Node s, boolean haveData) {
+ for (Node t = tail, p = t;;) { // move p to last node and append
+ Node n, u; // temps for reads of next & tail
+ if (p == null && (p = head) == null) {
+ if (casHead(null, s))
+ return s; // initialize
+ }
+ else if (p.cannotPrecede(haveData))
+ return null; // lost race vs opposite mode
+ else if ((n = p.next) != null) // not last; keep traversing
+ p = p != t && t != (u = tail) ? (t = u) : // stale tail
+ (p != n) ? n : null; // restart if off list
+ else if (!p.casNext(null, s))
+ p = p.next; // re-read on CAS failure
+ else {
+ if (p != t) { // update if slack now >= 2
+ while ((tail != t || !casTail(t, s)) &&
+ (t = tail) != null &&
+ (s = t.next) != null && // advance and retry
+ (s = s.next) != null && s != t);
+ }
+ return p;
+ }
+ }
+ }
+
+ /**
+ * Spins/yields/blocks until node s is matched or caller gives up.
+ *
+ * @param s the waiting node
+ * @param pred the predecessor of s, or s itself if it has no
+ * predecessor, or null if unknown (the null case does not occur
+ * in any current calls but may in possible future extensions)
+ * @param e the comparison value for checking match
+ * @param timed if true, wait only until timeout elapses
+ * @param nanos timeout in nanosecs, used only if timed is true
+ * @return matched item, or e if unmatched on interrupt or timeout
+ */
+ private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
+ long lastTime = timed ? System.nanoTime() : 0L;
+ Thread w = Thread.currentThread();
+ int spins = -1; // initialized after first item and cancel checks
+ ThreadLocalRandom randomYields = null; // bound if needed
+
+ for (;;) {
+ Object item = s.item;
+ if (item != e) { // matched
+ // assert item != s;
+ s.forgetContents(); // avoid garbage
+ return LinkedTransferQueue.<E>cast(item);
+ }
+ if ((w.isInterrupted() || (timed && nanos <= 0)) &&
+ s.casItem(e, s)) { // cancel
+ unsplice(pred, s);
+ return e;
+ }
+
+ if (spins < 0) { // establish spins at/near front
+ if ((spins = spinsFor(pred, s.isData)) > 0)
+ randomYields = ThreadLocalRandom.current();
+ }
+ else if (spins > 0) { // spin
+ --spins;
+ if (randomYields.nextInt(CHAINED_SPINS) == 0)
+ Thread.yield(); // occasionally yield
+ }
+ else if (s.waiter == null) {
+ s.waiter = w; // request unpark then recheck
+ }
+ else if (timed) {
+ long now = System.nanoTime();
+ if ((nanos -= now - lastTime) > 0)
+ LockSupport.parkNanos(this, nanos);
+ lastTime = now;
+ }
+ else {
+ LockSupport.park(this);
+ }
+ }
+ }
+
+ /**
+ * Returns spin/yield value for a node with given predecessor and
+ * data mode. See above for explanation.
+ */
+ private static int spinsFor(Node pred, boolean haveData) {
+ if (MP && pred != null) {
+ if (pred.isData != haveData) // phase change
+ return FRONT_SPINS + CHAINED_SPINS;
+ if (pred.isMatched()) // probably at front
+ return FRONT_SPINS;
+ if (pred.waiter == null) // pred apparently spinning
+ return CHAINED_SPINS;
+ }
+ return 0;
+ }
+
+ /* -------------- Traversal methods -------------- */
+
+ /**
+ * Returns the successor of p, or the head node if p.next has been
+ * linked to self, which will only be true if traversing with a
+ * stale pointer that is now off the list.
+ */
+ final Node succ(Node p) {
+ Node next = p.next;
+ return (p == next) ? head : next;
+ }
+
+ /**
+ * Returns the first unmatched node of the given mode, or null if
+ * none. Used by methods isEmpty, hasWaitingConsumer.
+ */
+ private Node firstOfMode(boolean isData) {
+ for (Node p = head; p != null; p = succ(p)) {
+ if (!p.isMatched())
+ return (p.isData == isData) ? p : null;
+ }
+ return null;
+ }
+
+ /**
+ * Returns the item in the first unmatched node with isData; or
+ * null if none. Used by peek.
+ */
+ private E firstDataItem() {
+ for (Node p = head; p != null; p = succ(p)) {
+ Object item = p.item;
+ if (p.isData) {
+ if (item != null && item != p)
+ return LinkedTransferQueue.<E>cast(item);
+ }
+ else if (item == null)
+ return null;
+ }
+ return null;
+ }
+
+ /**
+ * Traverses and counts unmatched nodes of the given mode.
+ * Used by methods size and getWaitingConsumerCount.
+ */
+ private int countOfMode(boolean data) {
+ int count = 0;
+ for (Node p = head; p != null; ) {
+ if (!p.isMatched()) {
+ if (p.isData != data)
+ return 0;
+ if (++count == Integer.MAX_VALUE) // saturated
+ break;
+ }
+ Node n = p.next;
+ if (n != p)
+ p = n;
+ else {
+ count = 0;
+ p = head;
+ }
+ }
+ return count;
+ }
+
+ final class Itr implements Iterator<E> {
+ private Node nextNode; // next node to return item for
+ private E nextItem; // the corresponding item
+ private Node lastRet; // last returned node, to support remove
+ private Node lastPred; // predecessor to unlink lastRet
+
+ /**
+ * Moves to next node after prev, or first node if prev null.
+ */
+ private void advance(Node prev) {
+ /*
+ * To track and avoid buildup of deleted nodes in the face
+ * of calls to both Queue.remove and Itr.remove, we must
+ * include variants of unsplice and sweep upon each
+ * advance: Upon Itr.remove, we may need to catch up links
+ * from lastPred, and upon other removes, we might need to
+ * skip ahead from stale nodes and unsplice deleted ones
+ * found while advancing.
+ */
+
+ Node r, b; // reset lastPred upon possible deletion of lastRet
+ if ((r = lastRet) != null && !r.isMatched())
+ lastPred = r; // next lastPred is old lastRet
+ else if ((b = lastPred) == null || b.isMatched())
+ lastPred = null; // at start of list
+ else {
+ Node s, n; // help with removal of lastPred.next
+ while ((s = b.next) != null &&
+ s != b && s.isMatched() &&
+ (n = s.next) != null && n != s)
+ b.casNext(s, n);
+ }
+
+ this.lastRet = prev;
+
+ for (Node p = prev, s, n;;) {
+ s = (p == null) ? head : p.next;
+ if (s == null)
+ break;
+ else if (s == p) {
+ p = null;
+ continue;
+ }
+ Object item = s.item;
+ if (s.isData) {
+ if (item != null && item != s) {
+ nextItem = LinkedTransferQueue.<E>cast(item);
+ nextNode = s;
+ return;
+ }
+ }
+ else if (item == null)
+ break;
+ // assert s.isMatched();
+ if (p == null)
+ p = s;
+ else if ((n = s.next) == null)
+ break;
+ else if (s == n)
+ p = null;
+ else
+ p.casNext(s, n);
+ }
+ nextNode = null;
+ nextItem = null;
+ }
+
+ Itr() {
+ advance(null);
+ }
+
+ public final boolean hasNext() {
+ return nextNode != null;
+ }
+
+ public final E next() {
+ Node p = nextNode;
+ if (p == null) throw new NoSuchElementException();
+ E e = nextItem;
+ advance(p);
+ return e;
+ }
+
+ public final void remove() {
+ final Node lastRet = this.lastRet;
+ if (lastRet == null)
+ throw new IllegalStateException();
+ this.lastRet = null;
+ if (lastRet.tryMatchData())
+ unsplice(lastPred, lastRet);
+ }
+ }
+
+ /* -------------- Removal methods -------------- */
+
+ /**
+ * Unsplices (now or later) the given deleted/cancelled node with
+ * the given predecessor.
+ *
+ * @param pred a node that was at one time known to be the
+ * predecessor of s, or null or s itself if s is/was at head
+ * @param s the node to be unspliced
+ */
+ final void unsplice(Node pred, Node s) {
+ s.forgetContents(); // forget unneeded fields
+ /*
+ * See above for rationale. Briefly: if pred still points to
+ * s, try to unlink s. If s cannot be unlinked, because it is
+ * trailing node or pred might be unlinked, and neither pred
+ * nor s are head or offlist, add to sweepVotes, and if enough
+ * votes have accumulated, sweep.
+ */
+ if (pred != null && pred != s && pred.next == s) {
+ Node n = s.next;
+ if (n == null ||
+ (n != s && pred.casNext(s, n) && pred.isMatched())) {
+ for (;;) { // check if at, or could be, head
+ Node h = head;
+ if (h == pred || h == s || h == null)
+ return; // at head or list empty
+ if (!h.isMatched())
+ break;
+ Node hn = h.next;
+ if (hn == null)
+ return; // now empty
+ if (hn != h && casHead(h, hn))
+ h.forgetNext(); // advance head
+ }
+ if (pred.next != pred && s.next != s) { // recheck if offlist
+ for (;;) { // sweep now if enough votes
+ int v = sweepVotes;
+ if (v < SWEEP_THRESHOLD) {
+ if (casSweepVotes(v, v + 1))
+ break;
+ }
+ else if (casSweepVotes(v, 0)) {
+ sweep();
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Unlinks matched (typically cancelled) nodes encountered in a
+ * traversal from head.
+ */
+ private void sweep() {
+ for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
+ if (!s.isMatched())
+ // Unmatched nodes are never self-linked
+ p = s;
+ else if ((n = s.next) == null) // trailing node is pinned
+ break;
+ else if (s == n) // stale
+ // No need to also check for p == s, since that implies s == n
+ p = head;
+ else
+ p.casNext(s, n);
+ }
+ }
+
+ /**
+ * Main implementation of remove(Object)
+ */
+ private boolean findAndRemove(Object e) {
+ if (e != null) {
+ for (Node pred = null, p = head; p != null; ) {
+ Object item = p.item;
+ if (p.isData) {
+ if (item != null && item != p && e.equals(item) &&
+ p.tryMatchData()) {
+ unsplice(pred, p);
+ return true;
+ }
+ }
+ else if (item == null)
+ break;
+ pred = p;
+ if ((p = p.next) == pred) { // stale
+ pred = null;
+ p = head;
+ }
+ }
+ }
+ return false;
+ }
+
+
+ /**
+ * Creates an initially empty {@code LinkedTransferQueue}.
+ */
+ public LinkedTransferQueue() {
+ }
+
+ /**
+ * Creates a {@code LinkedTransferQueue}
+ * initially containing the elements of the given collection,
+ * added in traversal order of the collection's iterator.
+ *
+ * @param c the collection of elements to initially contain
+ * @throws NullPointerException if the specified collection or any
+ * of its elements are null
+ */
+ public LinkedTransferQueue(Collection<? extends E> c) {
+ this();
+ addAll(c);
+ }
+
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never block.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public void put(E e) {
+ xfer(e, true, ASYNC, 0);
+ }
+
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never block or
+ * return {@code false}.
+ *
+ * @return {@code true} (as specified by
+ * {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit)
+ * BlockingQueue.offer})
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean offer(E e, long timeout, TimeUnit unit) {
+ xfer(e, true, ASYNC, 0);
+ return true;
+ }
+
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never return {@code false}.
+ *
+ * @return {@code true} (as specified by {@link Queue#offer})
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean offer(E e) {
+ xfer(e, true, ASYNC, 0);
+ return true;
+ }
+
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never throw
+ * {@link IllegalStateException} or return {@code false}.
+ *
+ * @return {@code true} (as specified by {@link Collection#add})
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean add(E e) {
+ xfer(e, true, ASYNC, 0);
+ return true;
+ }
+
+ /**
+ * Transfers the element to a waiting consumer immediately, if possible.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * otherwise returning {@code false} without enqueuing the element.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean tryTransfer(E e) {
+ return xfer(e, true, NOW, 0) == null;
+ }
+
+ /**
+ * Transfers the element to a consumer, waiting if necessary to do so.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else inserts the specified element at the tail of this queue
+ * and waits until the element is received by a consumer.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public void transfer(E e) throws InterruptedException {
+ if (xfer(e, true, SYNC, 0) != null) {
+ Thread.interrupted(); // failure possible only due to interrupt
+ throw new InterruptedException();
+ }
+ }
+
+ /**
+ * Transfers the element to a consumer if it is possible to do so
+ * before the timeout elapses.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else inserts the specified element at the tail of this queue
+ * and waits until the element is received by a consumer,
+ * returning {@code false} if the specified wait time elapses
+ * before the element can be transferred.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean tryTransfer(E e, long timeout, TimeUnit unit)
+ throws InterruptedException {
+ if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
+ return true;
+ if (!Thread.interrupted())
+ return false;
+ throw new InterruptedException();
+ }
+
+ public E take() throws InterruptedException {
+ E e = xfer(null, false, SYNC, 0);
+ if (e != null)
+ return e;
+ Thread.interrupted();
+ throw new InterruptedException();
+ }
+
+ public E poll(long timeout, TimeUnit unit) throws InterruptedException {
+ E e = xfer(null, false, TIMED, unit.toNanos(timeout));
+ if (e != null || !Thread.interrupted())
+ return e;
+ throw new InterruptedException();
+ }
+
+ public E poll() {
+ return xfer(null, false, NOW, 0);
+ }
+
+ /**
+ * @throws NullPointerException {@inheritDoc}
+ * @throws IllegalArgumentException {@inheritDoc}
+ */
+ public int drainTo(Collection<? super E> c) {
+ if (c == null)
+ throw new NullPointerException();
+ if (c == this)
+ throw new IllegalArgumentException();
+ int n = 0;
+ for (E e; (e = poll()) != null;) {
+ c.add(e);
+ ++n;
+ }
+ return n;
+ }
+
+ /**
+ * @throws NullPointerException {@inheritDoc}
+ * @throws IllegalArgumentException {@inheritDoc}
+ */
+ public int drainTo(Collection<? super E> c, int maxElements) {
+ if (c == null)
+ throw new NullPointerException();
+ if (c == this)
+ throw new IllegalArgumentException();
+ int n = 0;
+ for (E e; n < maxElements && (e = poll()) != null;) {
+ c.add(e);
+ ++n;
+ }
+ return n;
+ }
+
+ /**
+ * Returns an iterator over the elements in this queue in proper sequence.
+ * The elements will be returned in order from first (head) to last (tail).
+ *
+ * <p>The returned iterator is a "weakly consistent" iterator that
+ * will never throw {@link java.util.ConcurrentModificationException
+ * ConcurrentModificationException}, and guarantees to traverse
+ * elements as they existed upon construction of the iterator, and
+ * may (but is not guaranteed to) reflect any modifications
+ * subsequent to construction.
+ *
+ * @return an iterator over the elements in this queue in proper sequence
+ */
+ public Iterator<E> iterator() {
+ return new Itr();
+ }
+
+ public E peek() {
+ return firstDataItem();
+ }
+
+ /**
+ * Returns {@code true} if this queue contains no elements.
+ *
+ * @return {@code true} if this queue contains no elements
+ */
+ public boolean isEmpty() {
+ for (Node p = head; p != null; p = succ(p)) {
+ if (!p.isMatched())
+ return !p.isData;
+ }
+ return true;
+ }
+
+ public boolean hasWaitingConsumer() {
+ return firstOfMode(false) != null;
+ }
+
+ /**
+ * Returns the number of elements in this queue. If this queue
+ * contains more than {@code Integer.MAX_VALUE} elements, returns
+ * {@code Integer.MAX_VALUE}.
+ *
+ * <p>Beware that, unlike in most collections, this method is
+ * <em>NOT</em> a constant-time operation. Because of the
+ * asynchronous nature of these queues, determining the current
+ * number of elements requires an O(n) traversal.
+ *
+ * @return the number of elements in this queue
+ */
+ public int size() {
+ return countOfMode(true);
+ }
+
+ public int getWaitingConsumerCount() {
+ return countOfMode(false);
+ }
+
+ /**
+ * Removes a single instance of the specified element from this queue,
+ * if it is present. More formally, removes an element {@code e} such
+ * that {@code o.equals(e)}, if this queue contains one or more such
+ * elements.
+ * Returns {@code true} if this queue contained the specified element
+ * (or equivalently, if this queue changed as a result of the call).
+ *
+ * @param o element to be removed from this queue, if present
+ * @return {@code true} if this queue changed as a result of the call
+ */
+ public boolean remove(Object o) {
+ return findAndRemove(o);
+ }
+
+ /**
+ * Returns {@code true} if this queue contains the specified element.
+ * More formally, returns {@code true} if and only if this queue contains
+ * at least one element {@code e} such that {@code o.equals(e)}.
+ *
+ * @param o object to be checked for containment in this queue
+ * @return {@code true} if this queue contains the specified element
+ */
+ public boolean contains(Object o) {
+ if (o == null) return false;
+ for (Node p = head; p != null; p = succ(p)) {
+ Object item = p.item;
+ if (p.isData) {
+ if (item != null && item != p && o.equals(item))
+ return true;
+ }
+ else if (item == null)
+ break;
+ }
+ return false;
+ }
+
+ /**
+ * Always returns {@code Integer.MAX_VALUE} because a
+ * {@code LinkedTransferQueue} is not capacity constrained.
+ *
+ * @return {@code Integer.MAX_VALUE} (as specified by
+ * {@link java.util.concurrent.BlockingQueue#remainingCapacity()
+ * BlockingQueue.remainingCapacity})
+ */
+ public int remainingCapacity() {
+ return Integer.MAX_VALUE;
+ }
+
+ /**
+ * Saves the state to a stream (that is, serializes it).
+ *
+ * @serialData All of the elements (each an {@code E}) in
+ * the proper order, followed by a null
+ * @param s the stream
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+ for (E e : this)
+ s.writeObject(e);
+ // Use trailing null as sentinel
+ s.writeObject(null);
+ }
+
+ /**
+ * Reconstitutes the Queue instance from a stream (that is,
+ * deserializes it).
+ *
+ * @param s the stream
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ for (;;) {
+ @SuppressWarnings("unchecked")
+ E item = (E) s.readObject();
+ if (item == null)
+ break;
+ else
+ offer(item);
+ }
+ }
+
+ // Unsafe mechanics
+
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long headOffset;
+ private static final long tailOffset;
+ private static final long sweepVotesOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = LinkedTransferQueue.class;
+ headOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("head"));
+ tailOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("tail"));
+ sweepVotesOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("sweepVotes"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166y/Phaser.java b/src/main/java/jsr166y/Phaser.java
new file mode 100644
index 0000000..45dd0ba
--- /dev/null
+++ b/src/main/java/jsr166y/Phaser.java
@@ -0,0 +1,1164 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.LockSupport;
+
+/**
+ * A reusable synchronization barrier, similar in functionality to
+ * {@link java.util.concurrent.CyclicBarrier CyclicBarrier} and
+ * {@link java.util.concurrent.CountDownLatch CountDownLatch}
+ * but supporting more flexible usage.
+ *
+ * <p><b>Registration.</b> Unlike the case for other barriers, the
+ * number of parties <em>registered</em> to synchronize on a phaser
+ * may vary over time. Tasks may be registered at any time (using
+ * methods {@link #register}, {@link #bulkRegister}, or forms of
+ * constructors establishing initial numbers of parties), and
+ * optionally deregistered upon any arrival (using {@link
+ * #arriveAndDeregister}). As is the case with most basic
+ * synchronization constructs, registration and deregistration affect
+ * only internal counts; they do not establish any further internal
+ * bookkeeping, so tasks cannot query whether they are registered.
+ * (However, you can introduce such bookkeeping by subclassing this
+ * class.)
+ *
+ * <p><b>Synchronization.</b> Like a {@code CyclicBarrier}, a {@code
+ * Phaser} may be repeatedly awaited. Method {@link
+ * #arriveAndAwaitAdvance} has effect analogous to {@link
+ * java.util.concurrent.CyclicBarrier#await CyclicBarrier.await}. Each
+ * generation of a phaser has an associated phase number. The phase
+ * number starts at zero, and advances when all parties arrive at the
+ * phaser, wrapping around to zero after reaching {@code
+ * Integer.MAX_VALUE}. The use of phase numbers enables independent
+ * control of actions upon arrival at a phaser and upon awaiting
+ * others, via two kinds of methods that may be invoked by any
+ * registered party:
+ *
+ * <ul>
+ *
+ * <li> <b>Arrival.</b> Methods {@link #arrive} and
+ * {@link #arriveAndDeregister} record arrival. These methods
+ * do not block, but return an associated <em>arrival phase
+ * number</em>; that is, the phase number of the phaser to which
+ * the arrival applied. When the final party for a given phase
+ * arrives, an optional action is performed and the phase
+ * advances. These actions are performed by the party
+ * triggering a phase advance, and are arranged by overriding
+ * method {@link #onAdvance(int, int)}, which also controls
+ * termination. Overriding this method is similar to, but more
+ * flexible than, providing a barrier action to a {@code
+ * CyclicBarrier}.
+ *
+ * <li> <b>Waiting.</b> Method {@link #awaitAdvance} requires an
+ * argument indicating an arrival phase number, and returns when
+ * the phaser advances to (or is already at) a different phase.
+ * Unlike similar constructions using {@code CyclicBarrier},
+ * method {@code awaitAdvance} continues to wait even if the
+ * waiting thread is interrupted. Interruptible and timeout
+ * versions are also available, but exceptions encountered while
+ * tasks wait interruptibly or with timeout do not change the
+ * state of the phaser. If necessary, you can perform any
+ * associated recovery within handlers of those exceptions,
+ * often after invoking {@code forceTermination}. Phasers may
+ * also be used by tasks executing in a {@link ForkJoinPool},
+ * which will ensure sufficient parallelism to execute tasks
+ * when others are blocked waiting for a phase to advance.
+ *
+ * </ul>
+ *
+ * <p><b>Termination.</b> A phaser may enter a <em>termination</em>
+ * state, that may be checked using method {@link #isTerminated}. Upon
+ * termination, all synchronization methods immediately return without
+ * waiting for advance, as indicated by a negative return value.
+ * Similarly, attempts to register upon termination have no effect.
+ * Termination is triggered when an invocation of {@code onAdvance}
+ * returns {@code true}. The default implementation returns {@code
+ * true} if a deregistration has caused the number of registered
+ * parties to become zero. As illustrated below, when phasers control
+ * actions with a fixed number of iterations, it is often convenient
+ * to override this method to cause termination when the current phase
+ * number reaches a threshold. Method {@link #forceTermination} is
+ * also available to abruptly release waiting threads and allow them
+ * to terminate.
+ *
+ * <p><b>Tiering.</b> Phasers may be <em>tiered</em> (i.e.,
+ * constructed in tree structures) to reduce contention. Phasers with
+ * large numbers of parties that would otherwise experience heavy
+ * synchronization contention costs may instead be set up so that
+ * groups of sub-phasers share a common parent. This may greatly
+ * increase throughput even though it incurs greater per-operation
+ * overhead.
+ *
+ * <p>In a tree of tiered phasers, registration and deregistration of
+ * child phasers with their parent are managed automatically.
+ * Whenever the number of registered parties of a child phaser becomes
+ * non-zero (as established in the {@link #Phaser(Phaser,int)}
+ * constructor, {@link #register}, or {@link #bulkRegister}), the
+ * child phaser is registered with its parent. Whenever the number of
+ * registered parties becomes zero as the result of an invocation of
+ * {@link #arriveAndDeregister}, the child phaser is deregistered
+ * from its parent.
+ *
+ * <p><b>Monitoring.</b> While synchronization methods may be invoked
+ * only by registered parties, the current state of a phaser may be
+ * monitored by any caller. At any given moment there are {@link
+ * #getRegisteredParties} parties in total, of which {@link
+ * #getArrivedParties} have arrived at the current phase ({@link
+ * #getPhase}). When the remaining ({@link #getUnarrivedParties})
+ * parties arrive, the phase advances. The values returned by these
+ * methods may reflect transient states and so are not in general
+ * useful for synchronization control. Method {@link #toString}
+ * returns snapshots of these state queries in a form convenient for
+ * informal monitoring.
+ *
+ * <p><b>Sample usages:</b>
+ *
+ * <p>A {@code Phaser} may be used instead of a {@code CountDownLatch}
+ * to control a one-shot action serving a variable number of parties.
+ * The typical idiom is for the method setting this up to first
+ * register, then start the actions, then deregister, as in:
+ *
+ * <pre> {@code
+ * void runTasks(List<Runnable> tasks) {
+ * final Phaser phaser = new Phaser(1); // "1" to register self
+ * // create and start threads
+ * for (final Runnable task : tasks) {
+ * phaser.register();
+ * new Thread() {
+ * public void run() {
+ * phaser.arriveAndAwaitAdvance(); // await all creation
+ * task.run();
+ * }
+ * }.start();
+ * }
+ *
+ * // allow threads to start and deregister self
+ * phaser.arriveAndDeregister();
+ * }}</pre>
+ *
+ * <p>One way to cause a set of threads to repeatedly perform actions
+ * for a given number of iterations is to override {@code onAdvance}:
+ *
+ * <pre> {@code
+ * void startTasks(List<Runnable> tasks, final int iterations) {
+ * final Phaser phaser = new Phaser() {
+ * protected boolean onAdvance(int phase, int registeredParties) {
+ * return phase >= iterations || registeredParties == 0;
+ * }
+ * };
+ * phaser.register();
+ * for (final Runnable task : tasks) {
+ * phaser.register();
+ * new Thread() {
+ * public void run() {
+ * do {
+ * task.run();
+ * phaser.arriveAndAwaitAdvance();
+ * } while (!phaser.isTerminated());
+ * }
+ * }.start();
+ * }
+ * phaser.arriveAndDeregister(); // deregister self, don't wait
+ * }}</pre>
+ *
+ * If the main task must later await termination, it
+ * may re-register and then execute a similar loop:
+ * <pre> {@code
+ * // ...
+ * phaser.register();
+ * while (!phaser.isTerminated())
+ * phaser.arriveAndAwaitAdvance();}</pre>
+ *
+ * <p>Related constructions may be used to await particular phase numbers
+ * in contexts where you are sure that the phase will never wrap around
+ * {@code Integer.MAX_VALUE}. For example:
+ *
+ * <pre> {@code
+ * void awaitPhase(Phaser phaser, int phase) {
+ * int p = phaser.register(); // assumes caller not already registered
+ * while (p < phase) {
+ * if (phaser.isTerminated())
+ * // ... deal with unexpected termination
+ * else
+ * p = phaser.arriveAndAwaitAdvance();
+ * }
+ * phaser.arriveAndDeregister();
+ * }}</pre>
+ *
+ *
+ * <p>To create a set of {@code n} tasks using a tree of phasers, you
+ * could use code of the following form, assuming a Task class with a
+ * constructor accepting a {@code Phaser} that it registers with upon
+ * construction. After invocation of {@code build(new Task[n], 0, n,
+ * new Phaser())}, these tasks could then be started, for example by
+ * submitting to a pool:
+ *
+ * <pre> {@code
+ * void build(Task[] tasks, int lo, int hi, Phaser ph) {
+ * if (hi - lo > TASKS_PER_PHASER) {
+ * for (int i = lo; i < hi; i += TASKS_PER_PHASER) {
+ * int j = Math.min(i + TASKS_PER_PHASER, hi);
+ * build(tasks, i, j, new Phaser(ph));
+ * }
+ * } else {
+ * for (int i = lo; i < hi; ++i)
+ * tasks[i] = new Task(ph);
+ * // assumes new Task(ph) performs ph.register()
+ * }
+ * }}</pre>
+ *
+ * The best value of {@code TASKS_PER_PHASER} depends mainly on
+ * expected synchronization rates. A value as low as four may
+ * be appropriate for extremely small per-phase task bodies (thus
+ * high rates), or up to hundreds for extremely large ones.
+ *
+ * <p><b>Implementation notes</b>: This implementation restricts the
+ * maximum number of parties to 65535. Attempts to register additional
+ * parties result in {@code IllegalStateException}. However, you can and
+ * should create tiered phasers to accommodate arbitrarily large sets
+ * of participants.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public class Phaser {
+ /*
+ * This class implements an extension of X10 "clocks". Thanks to
+ * Vijay Saraswat for the idea, and to Vivek Sarkar for
+ * enhancements to extend functionality.
+ */
+
+ /**
+ * Primary state representation, holding four bit-fields:
+ *
+ * unarrived -- the number of parties yet to hit barrier (bits 0-15)
+ * parties -- the number of parties to wait (bits 16-31)
+ * phase -- the generation of the barrier (bits 32-62)
+ * terminated -- set if barrier is terminated (bit 63 / sign)
+ *
+ * Except that a phaser with no registered parties is
+ * distinguished by the otherwise illegal state of having zero
+ * parties and one unarrived parties (encoded as EMPTY below).
+ *
+ * To efficiently maintain atomicity, these values are packed into
+ * a single (atomic) long. Good performance relies on keeping
+ * state decoding and encoding simple, and keeping race windows
+ * short.
+ *
+ * All state updates are performed via CAS except initial
+ * registration of a sub-phaser (i.e., one with a non-null
+ * parent). In this (relatively rare) case, we use built-in
+ * synchronization to lock while first registering with its
+ * parent.
+ *
+ * The phase of a subphaser is allowed to lag that of its
+ * ancestors until it is actually accessed -- see method
+ * reconcileState.
+ */
+ private volatile long state;
+
+ private static final int MAX_PARTIES = 0xffff;
+ private static final int MAX_PHASE = Integer.MAX_VALUE;
+ private static final int PARTIES_SHIFT = 16;
+ private static final int PHASE_SHIFT = 32;
+ private static final int UNARRIVED_MASK = 0xffff; // to mask ints
+ private static final long PARTIES_MASK = 0xffff0000L; // to mask longs
+ private static final long COUNTS_MASK = 0xffffffffL;
+ private static final long TERMINATION_BIT = 1L << 63;
+
+ // some special values
+ private static final int ONE_ARRIVAL = 1;
+ private static final int ONE_PARTY = 1 << PARTIES_SHIFT;
+ private static final int ONE_DEREGISTER = ONE_ARRIVAL|ONE_PARTY;
+ private static final int EMPTY = 1;
+
+ // The following unpacking methods are usually manually inlined
+
+ private static int unarrivedOf(long s) {
+ int counts = (int)s;
+ return (counts == EMPTY) ? 0 : (counts & UNARRIVED_MASK);
+ }
+
+ private static int partiesOf(long s) {
+ return (int)s >>> PARTIES_SHIFT;
+ }
+
+ private static int phaseOf(long s) {
+ return (int)(s >>> PHASE_SHIFT);
+ }
+
+ private static int arrivedOf(long s) {
+ int counts = (int)s;
+ return (counts == EMPTY) ? 0 :
+ (counts >>> PARTIES_SHIFT) - (counts & UNARRIVED_MASK);
+ }
+
+ /**
+ * The parent of this phaser, or null if none
+ */
+ private final Phaser parent;
+
+ /**
+ * The root of phaser tree. Equals this if not in a tree.
+ */
+ private final Phaser root;
+
+ /**
+ * Heads of Treiber stacks for waiting threads. To eliminate
+ * contention when releasing some threads while adding others, we
+ * use two of them, alternating across even and odd phases.
+ * Subphasers share queues with root to speed up releases.
+ */
+ private final AtomicReference<QNode> evenQ;
+ private final AtomicReference<QNode> oddQ;
+
+ private AtomicReference<QNode> queueFor(int phase) {
+ return ((phase & 1) == 0) ? evenQ : oddQ;
+ }
+
+ /**
+ * Returns message string for bounds exceptions on arrival.
+ */
+ private String badArrive(long s) {
+ return "Attempted arrival of unregistered party for " +
+ stateToString(s);
+ }
+
+ /**
+ * Returns message string for bounds exceptions on registration.
+ */
+ private String badRegister(long s) {
+ return "Attempt to register more than " +
+ MAX_PARTIES + " parties for " + stateToString(s);
+ }
+
+ /**
+ * Main implementation for methods arrive and arriveAndDeregister.
+ * Manually tuned to speed up and minimize race windows for the
+ * common case of just decrementing unarrived field.
+ *
+ * @param adjust value to subtract from state;
+ * ONE_ARRIVAL for arrive,
+ * ONE_DEREGISTER for arriveAndDeregister
+ */
+ private int doArrive(int adjust) {
+ final Phaser root = this.root;
+ for (;;) {
+ long s = (root == this) ? state : reconcileState();
+ int phase = (int)(s >>> PHASE_SHIFT);
+ if (phase < 0)
+ return phase;
+ int counts = (int)s;
+ int unarrived = (counts == EMPTY) ? 0 : (counts & UNARRIVED_MASK);
+ if (unarrived <= 0)
+ throw new IllegalStateException(badArrive(s));
+ if (UNSAFE.compareAndSwapLong(this, stateOffset, s, s-=adjust)) {
+ if (unarrived == 1) {
+ long n = s & PARTIES_MASK; // base of next state
+ int nextUnarrived = (int)n >>> PARTIES_SHIFT;
+ if (root == this) {
+ if (onAdvance(phase, nextUnarrived))
+ n |= TERMINATION_BIT;
+ else if (nextUnarrived == 0)
+ n |= EMPTY;
+ else
+ n |= nextUnarrived;
+ int nextPhase = (phase + 1) & MAX_PHASE;
+ n |= (long)nextPhase << PHASE_SHIFT;
+ UNSAFE.compareAndSwapLong(this, stateOffset, s, n);
+ releaseWaiters(phase);
+ }
+ else if (nextUnarrived == 0) { // propagate deregistration
+ phase = parent.doArrive(ONE_DEREGISTER);
+ UNSAFE.compareAndSwapLong(this, stateOffset,
+ s, s | EMPTY);
+ }
+ else
+ phase = parent.doArrive(ONE_ARRIVAL);
+ }
+ return phase;
+ }
+ }
+ }
+
+ /**
+ * Implementation of register, bulkRegister
+ *
+ * @param registrations number to add to both parties and
+ * unarrived fields. Must be greater than zero.
+ */
+ private int doRegister(int registrations) {
+ // adjustment to state
+ long adjust = ((long)registrations << PARTIES_SHIFT) | registrations;
+ final Phaser parent = this.parent;
+ int phase;
+ for (;;) {
+ long s = (parent == null) ? state : reconcileState();
+ int counts = (int)s;
+ int parties = counts >>> PARTIES_SHIFT;
+ int unarrived = counts & UNARRIVED_MASK;
+ if (registrations > MAX_PARTIES - parties)
+ throw new IllegalStateException(badRegister(s));
+ phase = (int)(s >>> PHASE_SHIFT);
+ if (phase < 0)
+ break;
+ if (counts != EMPTY) { // not 1st registration
+ if (parent == null || reconcileState() == s) {
+ if (unarrived == 0) // wait out advance
+ root.internalAwaitAdvance(phase, null);
+ else if (UNSAFE.compareAndSwapLong(this, stateOffset,
+ s, s + adjust))
+ break;
+ }
+ }
+ else if (parent == null) { // 1st root registration
+ long next = ((long)phase << PHASE_SHIFT) | adjust;
+ if (UNSAFE.compareAndSwapLong(this, stateOffset, s, next))
+ break;
+ }
+ else {
+ synchronized (this) { // 1st sub registration
+ if (state == s) { // recheck under lock
+ phase = parent.doRegister(1);
+ if (phase < 0)
+ break;
+ // finish registration whenever parent registration
+ // succeeded, even when racing with termination,
+ // since these are part of the same "transaction".
+ while (!UNSAFE.compareAndSwapLong
+ (this, stateOffset, s,
+ ((long)phase << PHASE_SHIFT) | adjust)) {
+ s = state;
+ phase = (int)(root.state >>> PHASE_SHIFT);
+ // assert (int)s == EMPTY;
+ }
+ break;
+ }
+ }
+ }
+ }
+ return phase;
+ }
+
+ /**
+ * Resolves lagged phase propagation from root if necessary.
+ * Reconciliation normally occurs when root has advanced but
+ * subphasers have not yet done so, in which case they must finish
+ * their own advance by setting unarrived to parties (or if
+ * parties is zero, resetting to unregistered EMPTY state).
+ *
+ * @return reconciled state
+ */
+ private long reconcileState() {
+ final Phaser root = this.root;
+ long s = state;
+ if (root != this) {
+ int phase, p;
+ // CAS to root phase with current parties, tripping unarrived
+ while ((phase = (int)(root.state >>> PHASE_SHIFT)) !=
+ (int)(s >>> PHASE_SHIFT) &&
+ !UNSAFE.compareAndSwapLong
+ (this, stateOffset, s,
+ s = (((long)phase << PHASE_SHIFT) |
+ ((phase < 0) ? (s & COUNTS_MASK) :
+ (((p = (int)s >>> PARTIES_SHIFT) == 0) ? EMPTY :
+ ((s & PARTIES_MASK) | p))))))
+ s = state;
+ }
+ return s;
+ }
+
+ /**
+ * Creates a new phaser with no initially registered parties, no
+ * parent, and initial phase number 0. Any thread using this
+ * phaser will need to first register for it.
+ */
+ public Phaser() {
+ this(null, 0);
+ }
+
+ /**
+ * Creates a new phaser with the given number of registered
+ * unarrived parties, no parent, and initial phase number 0.
+ *
+ * @param parties the number of parties required to advance to the
+ * next phase
+ * @throws IllegalArgumentException if parties less than zero
+ * or greater than the maximum number of parties supported
+ */
+ public Phaser(int parties) {
+ this(null, parties);
+ }
+
+ /**
+ * Equivalent to {@link #Phaser(Phaser, int) Phaser(parent, 0)}.
+ *
+ * @param parent the parent phaser
+ */
+ public Phaser(Phaser parent) {
+ this(parent, 0);
+ }
+
+ /**
+ * Creates a new phaser with the given parent and number of
+ * registered unarrived parties. When the given parent is non-null
+ * and the given number of parties is greater than zero, this
+ * child phaser is registered with its parent.
+ *
+ * @param parent the parent phaser
+ * @param parties the number of parties required to advance to the
+ * next phase
+ * @throws IllegalArgumentException if parties less than zero
+ * or greater than the maximum number of parties supported
+ */
+ public Phaser(Phaser parent, int parties) {
+ if (parties >>> PARTIES_SHIFT != 0)
+ throw new IllegalArgumentException("Illegal number of parties");
+ int phase = 0;
+ this.parent = parent;
+ if (parent != null) {
+ final Phaser root = parent.root;
+ this.root = root;
+ this.evenQ = root.evenQ;
+ this.oddQ = root.oddQ;
+ if (parties != 0)
+ phase = parent.doRegister(1);
+ }
+ else {
+ this.root = this;
+ this.evenQ = new AtomicReference<QNode>();
+ this.oddQ = new AtomicReference<QNode>();
+ }
+ this.state = (parties == 0) ? (long)EMPTY :
+ ((long)phase << PHASE_SHIFT) |
+ ((long)parties << PARTIES_SHIFT) |
+ ((long)parties);
+ }
+
+ /**
+ * Adds a new unarrived party to this phaser. If an ongoing
+ * invocation of {@link #onAdvance} is in progress, this method
+ * may await its completion before returning. If this phaser has
+ * a parent, and this phaser previously had no registered parties,
+ * this child phaser is also registered with its parent. If
+ * this phaser is terminated, the attempt to register has
+ * no effect, and a negative value is returned.
+ *
+ * @return the arrival phase number to which this registration
+ * applied. If this value is negative, then this phaser has
+ * terminated, in which case registration has no effect.
+ * @throws IllegalStateException if attempting to register more
+ * than the maximum supported number of parties
+ */
+ public int register() {
+ return doRegister(1);
+ }
+
+ /**
+ * Adds the given number of new unarrived parties to this phaser.
+ * If an ongoing invocation of {@link #onAdvance} is in progress,
+ * this method may await its completion before returning. If this
+ * phaser has a parent, and the given number of parties is greater
+ * than zero, and this phaser previously had no registered
+ * parties, this child phaser is also registered with its parent.
+ * If this phaser is terminated, the attempt to register has no
+ * effect, and a negative value is returned.
+ *
+ * @param parties the number of additional parties required to
+ * advance to the next phase
+ * @return the arrival phase number to which this registration
+ * applied. If this value is negative, then this phaser has
+ * terminated, in which case registration has no effect.
+ * @throws IllegalStateException if attempting to register more
+ * than the maximum supported number of parties
+ * @throws IllegalArgumentException if {@code parties < 0}
+ */
+ public int bulkRegister(int parties) {
+ if (parties < 0)
+ throw new IllegalArgumentException();
+ if (parties == 0)
+ return getPhase();
+ return doRegister(parties);
+ }
+
+ /**
+ * Arrives at this phaser, without waiting for others to arrive.
+ *
+ * <p>It is a usage error for an unregistered party to invoke this
+ * method. However, this error may result in an {@code
+ * IllegalStateException} only upon some subsequent operation on
+ * this phaser, if ever.
+ *
+ * @return the arrival phase number, or a negative value if terminated
+ * @throws IllegalStateException if not terminated and the number
+ * of unarrived parties would become negative
+ */
+ public int arrive() {
+ return doArrive(ONE_ARRIVAL);
+ }
+
+ /**
+ * Arrives at this phaser and deregisters from it without waiting
+ * for others to arrive. Deregistration reduces the number of
+ * parties required to advance in future phases. If this phaser
+ * has a parent, and deregistration causes this phaser to have
+ * zero parties, this phaser is also deregistered from its parent.
+ *
+ * <p>It is a usage error for an unregistered party to invoke this
+ * method. However, this error may result in an {@code
+ * IllegalStateException} only upon some subsequent operation on
+ * this phaser, if ever.
+ *
+ * @return the arrival phase number, or a negative value if terminated
+ * @throws IllegalStateException if not terminated and the number
+ * of registered or unarrived parties would become negative
+ */
+ public int arriveAndDeregister() {
+ return doArrive(ONE_DEREGISTER);
+ }
+
+ /**
+ * Arrives at this phaser and awaits others. Equivalent in effect
+ * to {@code awaitAdvance(arrive())}. If you need to await with
+ * interruption or timeout, you can arrange this with an analogous
+ * construction using one of the other forms of the {@code
+ * awaitAdvance} method. If instead you need to deregister upon
+ * arrival, use {@code awaitAdvance(arriveAndDeregister())}.
+ *
+ * <p>It is a usage error for an unregistered party to invoke this
+ * method. However, this error may result in an {@code
+ * IllegalStateException} only upon some subsequent operation on
+ * this phaser, if ever.
+ *
+ * @return the arrival phase number, or the (negative)
+ * {@linkplain #getPhase() current phase} if terminated
+ * @throws IllegalStateException if not terminated and the number
+ * of unarrived parties would become negative
+ */
+ public int arriveAndAwaitAdvance() {
+ // Specialization of doArrive+awaitAdvance eliminating some reads/paths
+ final Phaser root = this.root;
+ for (;;) {
+ long s = (root == this) ? state : reconcileState();
+ int phase = (int)(s >>> PHASE_SHIFT);
+ if (phase < 0)
+ return phase;
+ int counts = (int)s;
+ int unarrived = (counts == EMPTY) ? 0 : (counts & UNARRIVED_MASK);
+ if (unarrived <= 0)
+ throw new IllegalStateException(badArrive(s));
+ if (UNSAFE.compareAndSwapLong(this, stateOffset, s,
+ s -= ONE_ARRIVAL)) {
+ if (unarrived > 1)
+ return root.internalAwaitAdvance(phase, null);
+ if (root != this)
+ return parent.arriveAndAwaitAdvance();
+ long n = s & PARTIES_MASK; // base of next state
+ int nextUnarrived = (int)n >>> PARTIES_SHIFT;
+ if (onAdvance(phase, nextUnarrived))
+ n |= TERMINATION_BIT;
+ else if (nextUnarrived == 0)
+ n |= EMPTY;
+ else
+ n |= nextUnarrived;
+ int nextPhase = (phase + 1) & MAX_PHASE;
+ n |= (long)nextPhase << PHASE_SHIFT;
+ if (!UNSAFE.compareAndSwapLong(this, stateOffset, s, n))
+ return (int)(state >>> PHASE_SHIFT); // terminated
+ releaseWaiters(phase);
+ return nextPhase;
+ }
+ }
+ }
+
+ /**
+ * Awaits the phase of this phaser to advance from the given phase
+ * value, returning immediately if the current phase is not equal
+ * to the given phase value or this phaser is terminated.
+ *
+ * @param phase an arrival phase number, or negative value if
+ * terminated; this argument is normally the value returned by a
+ * previous call to {@code arrive} or {@code arriveAndDeregister}.
+ * @return the next arrival phase number, or the argument if it is
+ * negative, or the (negative) {@linkplain #getPhase() current phase}
+ * if terminated
+ */
+ public int awaitAdvance(int phase) {
+ final Phaser root = this.root;
+ long s = (root == this) ? state : reconcileState();
+ int p = (int)(s >>> PHASE_SHIFT);
+ if (phase < 0)
+ return phase;
+ if (p == phase)
+ return root.internalAwaitAdvance(phase, null);
+ return p;
+ }
+
+ /**
+ * Awaits the phase of this phaser to advance from the given phase
+ * value, throwing {@code InterruptedException} if interrupted
+ * while waiting, or returning immediately if the current phase is
+ * not equal to the given phase value or this phaser is
+ * terminated.
+ *
+ * @param phase an arrival phase number, or negative value if
+ * terminated; this argument is normally the value returned by a
+ * previous call to {@code arrive} or {@code arriveAndDeregister}.
+ * @return the next arrival phase number, or the argument if it is
+ * negative, or the (negative) {@linkplain #getPhase() current phase}
+ * if terminated
+ * @throws InterruptedException if thread interrupted while waiting
+ */
+ public int awaitAdvanceInterruptibly(int phase)
+ throws InterruptedException {
+ final Phaser root = this.root;
+ long s = (root == this) ? state : reconcileState();
+ int p = (int)(s >>> PHASE_SHIFT);
+ if (phase < 0)
+ return phase;
+ if (p == phase) {
+ QNode node = new QNode(this, phase, true, false, 0L);
+ p = root.internalAwaitAdvance(phase, node);
+ if (node.wasInterrupted)
+ throw new InterruptedException();
+ }
+ return p;
+ }
+
+ /**
+ * Awaits the phase of this phaser to advance from the given phase
+ * value or the given timeout to elapse, throwing {@code
+ * InterruptedException} if interrupted while waiting, or
+ * returning immediately if the current phase is not equal to the
+ * given phase value or this phaser is terminated.
+ *
+ * @param phase an arrival phase number, or negative value if
+ * terminated; this argument is normally the value returned by a
+ * previous call to {@code arrive} or {@code arriveAndDeregister}.
+ * @param timeout how long to wait before giving up, in units of
+ * {@code unit}
+ * @param unit a {@code TimeUnit} determining how to interpret the
+ * {@code timeout} parameter
+ * @return the next arrival phase number, or the argument if it is
+ * negative, or the (negative) {@linkplain #getPhase() current phase}
+ * if terminated
+ * @throws InterruptedException if thread interrupted while waiting
+ * @throws TimeoutException if timed out while waiting
+ */
+ public int awaitAdvanceInterruptibly(int phase,
+ long timeout, TimeUnit unit)
+ throws InterruptedException, TimeoutException {
+ long nanos = unit.toNanos(timeout);
+ final Phaser root = this.root;
+ long s = (root == this) ? state : reconcileState();
+ int p = (int)(s >>> PHASE_SHIFT);
+ if (phase < 0)
+ return phase;
+ if (p == phase) {
+ QNode node = new QNode(this, phase, true, true, nanos);
+ p = root.internalAwaitAdvance(phase, node);
+ if (node.wasInterrupted)
+ throw new InterruptedException();
+ else if (p == phase)
+ throw new TimeoutException();
+ }
+ return p;
+ }
+
+ /**
+ * Forces this phaser to enter termination state. Counts of
+ * registered parties are unaffected. If this phaser is a member
+ * of a tiered set of phasers, then all of the phasers in the set
+ * are terminated. If this phaser is already terminated, this
+ * method has no effect. This method may be useful for
+ * coordinating recovery after one or more tasks encounter
+ * unexpected exceptions.
+ */
+ public void forceTermination() {
+ // Only need to change root state
+ final Phaser root = this.root;
+ long s;
+ while ((s = root.state) >= 0) {
+ if (UNSAFE.compareAndSwapLong(root, stateOffset,
+ s, s | TERMINATION_BIT)) {
+ // signal all threads
+ releaseWaiters(0); // Waiters on evenQ
+ releaseWaiters(1); // Waiters on oddQ
+ return;
+ }
+ }
+ }
+
+ /**
+ * Returns the current phase number. The maximum phase number is
+ * {@code Integer.MAX_VALUE}, after which it restarts at
+ * zero. Upon termination, the phase number is negative,
+ * in which case the prevailing phase prior to termination
+ * may be obtained via {@code getPhase() + Integer.MIN_VALUE}.
+ *
+ * @return the phase number, or a negative value if terminated
+ */
+ public final int getPhase() {
+ return (int)(root.state >>> PHASE_SHIFT);
+ }
+
+ /**
+ * Returns the number of parties registered at this phaser.
+ *
+ * @return the number of parties
+ */
+ public int getRegisteredParties() {
+ return partiesOf(state);
+ }
+
+ /**
+ * Returns the number of registered parties that have arrived at
+ * the current phase of this phaser. If this phaser has terminated,
+ * the returned value is meaningless and arbitrary.
+ *
+ * @return the number of arrived parties
+ */
+ public int getArrivedParties() {
+ return arrivedOf(reconcileState());
+ }
+
+ /**
+ * Returns the number of registered parties that have not yet
+ * arrived at the current phase of this phaser. If this phaser has
+ * terminated, the returned value is meaningless and arbitrary.
+ *
+ * @return the number of unarrived parties
+ */
+ public int getUnarrivedParties() {
+ return unarrivedOf(reconcileState());
+ }
+
+ /**
+ * Returns the parent of this phaser, or {@code null} if none.
+ *
+ * @return the parent of this phaser, or {@code null} if none
+ */
+ public Phaser getParent() {
+ return parent;
+ }
+
+ /**
+ * Returns the root ancestor of this phaser, which is the same as
+ * this phaser if it has no parent.
+ *
+ * @return the root ancestor of this phaser
+ */
+ public Phaser getRoot() {
+ return root;
+ }
+
+ /**
+ * Returns {@code true} if this phaser has been terminated.
+ *
+ * @return {@code true} if this phaser has been terminated
+ */
+ public boolean isTerminated() {
+ return root.state < 0L;
+ }
+
+ /**
+ * Overridable method to perform an action upon impending phase
+ * advance, and to control termination. This method is invoked
+ * upon arrival of the party advancing this phaser (when all other
+ * waiting parties are dormant). If this method returns {@code
+ * true}, this phaser will be set to a final termination state
+ * upon advance, and subsequent calls to {@link #isTerminated}
+ * will return true. Any (unchecked) Exception or Error thrown by
+ * an invocation of this method is propagated to the party
+ * attempting to advance this phaser, in which case no advance
+ * occurs.
+ *
+ * <p>The arguments to this method provide the state of the phaser
+ * prevailing for the current transition. The effects of invoking
+ * arrival, registration, and waiting methods on this phaser from
+ * within {@code onAdvance} are unspecified and should not be
+ * relied on.
+ *
+ * <p>If this phaser is a member of a tiered set of phasers, then
+ * {@code onAdvance} is invoked only for its root phaser on each
+ * advance.
+ *
+ * <p>To support the most common use cases, the default
+ * implementation of this method returns {@code true} when the
+ * number of registered parties has become zero as the result of a
+ * party invoking {@code arriveAndDeregister}. You can disable
+ * this behavior, thus enabling continuation upon future
+ * registrations, by overriding this method to always return
+ * {@code false}:
+ *
+ * <pre> {@code
+ * Phaser phaser = new Phaser() {
+ * protected boolean onAdvance(int phase, int parties) { return false; }
+ * }}</pre>
+ *
+ * @param phase the current phase number on entry to this method,
+ * before this phaser is advanced
+ * @param registeredParties the current number of registered parties
+ * @return {@code true} if this phaser should terminate
+ */
+ protected boolean onAdvance(int phase, int registeredParties) {
+ return registeredParties == 0;
+ }
+
+ /**
+ * Returns a string identifying this phaser, as well as its
+ * state. The state, in brackets, includes the String {@code
+ * "phase = "} followed by the phase number, {@code "parties = "}
+ * followed by the number of registered parties, and {@code
+ * "arrived = "} followed by the number of arrived parties.
+ *
+ * @return a string identifying this phaser, as well as its state
+ */
+ public String toString() {
+ return stateToString(reconcileState());
+ }
+
+ /**
+ * Implementation of toString and string-based error messages
+ */
+ private String stateToString(long s) {
+ return super.toString() +
+ "[phase = " + phaseOf(s) +
+ " parties = " + partiesOf(s) +
+ " arrived = " + arrivedOf(s) + "]";
+ }
+
+ // Waiting mechanics
+
+ /**
+ * Removes and signals threads from queue for phase.
+ */
+ private void releaseWaiters(int phase) {
+ QNode q; // first element of queue
+ Thread t; // its thread
+ AtomicReference<QNode> head = (phase & 1) == 0 ? evenQ : oddQ;
+ while ((q = head.get()) != null &&
+ q.phase != (int)(root.state >>> PHASE_SHIFT)) {
+ if (head.compareAndSet(q, q.next) &&
+ (t = q.thread) != null) {
+ q.thread = null;
+ LockSupport.unpark(t);
+ }
+ }
+ }
+
+ /**
+ * Variant of releaseWaiters that additionally tries to remove any
+ * nodes no longer waiting for advance due to timeout or
+ * interrupt. Currently, nodes are removed only if they are at
+ * head of queue, which suffices to reduce memory footprint in
+ * most usages.
+ *
+ * @return current phase on exit
+ */
+ private int abortWait(int phase) {
+ AtomicReference<QNode> head = (phase & 1) == 0 ? evenQ : oddQ;
+ for (;;) {
+ Thread t;
+ QNode q = head.get();
+ int p = (int)(root.state >>> PHASE_SHIFT);
+ if (q == null || ((t = q.thread) != null && q.phase == p))
+ return p;
+ if (head.compareAndSet(q, q.next) && t != null) {
+ q.thread = null;
+ LockSupport.unpark(t);
+ }
+ }
+ }
+
+ /** The number of CPUs, for spin control */
+ private static final int NCPU = Runtime.getRuntime().availableProcessors();
+
+ /**
+ * The number of times to spin before blocking while waiting for
+ * advance, per arrival while waiting. On multiprocessors, fully
+ * blocking and waking up a large number of threads all at once is
+ * usually a very slow process, so we use rechargeable spins to
+ * avoid it when threads regularly arrive: When a thread in
+ * internalAwaitAdvance notices another arrival before blocking,
+ * and there appear to be enough CPUs available, it spins
+ * SPINS_PER_ARRIVAL more times before blocking. The value trades
+ * off good-citizenship vs big unnecessary slowdowns.
+ */
+ static final int SPINS_PER_ARRIVAL = (NCPU < 2) ? 1 : 1 << 8;
+
+ /**
+ * Possibly blocks and waits for phase to advance unless aborted.
+ * Call only on root phaser.
+ *
+ * @param phase current phase
+ * @param node if non-null, the wait node to track interrupt and timeout;
+ * if null, denotes noninterruptible wait
+ * @return current phase
+ */
+ private int internalAwaitAdvance(int phase, QNode node) {
+ // assert root == this;
+ releaseWaiters(phase-1); // ensure old queue clean
+ boolean queued = false; // true when node is enqueued
+ int lastUnarrived = 0; // to increase spins upon change
+ int spins = SPINS_PER_ARRIVAL;
+ long s;
+ int p;
+ while ((p = (int)((s = state) >>> PHASE_SHIFT)) == phase) {
+ if (node == null) { // spinning in noninterruptible mode
+ int unarrived = (int)s & UNARRIVED_MASK;
+ if (unarrived != lastUnarrived &&
+ (lastUnarrived = unarrived) < NCPU)
+ spins += SPINS_PER_ARRIVAL;
+ boolean interrupted = Thread.interrupted();
+ if (interrupted || --spins < 0) { // need node to record intr
+ node = new QNode(this, phase, false, false, 0L);
+ node.wasInterrupted = interrupted;
+ }
+ }
+ else if (node.isReleasable()) // done or aborted
+ break;
+ else if (!queued) { // push onto queue
+ AtomicReference<QNode> head = (phase & 1) == 0 ? evenQ : oddQ;
+ QNode q = node.next = head.get();
+ if ((q == null || q.phase == phase) &&
+ (int)(state >>> PHASE_SHIFT) == phase) // avoid stale enq
+ queued = head.compareAndSet(q, node);
+ }
+ else {
+ try {
+ ForkJoinPool.managedBlock(node);
+ } catch (InterruptedException ie) {
+ node.wasInterrupted = true;
+ }
+ }
+ }
+
+ if (node != null) {
+ if (node.thread != null)
+ node.thread = null; // avoid need for unpark()
+ if (node.wasInterrupted && !node.interruptible)
+ Thread.currentThread().interrupt();
+ if (p == phase && (p = (int)(state >>> PHASE_SHIFT)) == phase)
+ return abortWait(phase); // possibly clean up on abort
+ }
+ releaseWaiters(phase);
+ return p;
+ }
+
+ /**
+ * Wait nodes for Treiber stack representing wait queue
+ */
+ static final class QNode implements ForkJoinPool.ManagedBlocker {
+ final Phaser phaser;
+ final int phase;
+ final boolean interruptible;
+ final boolean timed;
+ boolean wasInterrupted;
+ long nanos;
+ long lastTime;
+ volatile Thread thread; // nulled to cancel wait
+ QNode next;
+
+ QNode(Phaser phaser, int phase, boolean interruptible,
+ boolean timed, long nanos) {
+ this.phaser = phaser;
+ this.phase = phase;
+ this.interruptible = interruptible;
+ this.nanos = nanos;
+ this.timed = timed;
+ this.lastTime = timed ? System.nanoTime() : 0L;
+ thread = Thread.currentThread();
+ }
+
+ public boolean isReleasable() {
+ if (thread == null)
+ return true;
+ if (phaser.getPhase() != phase) {
+ thread = null;
+ return true;
+ }
+ if (Thread.interrupted())
+ wasInterrupted = true;
+ if (wasInterrupted && interruptible) {
+ thread = null;
+ return true;
+ }
+ if (timed) {
+ if (nanos > 0L) {
+ long now = System.nanoTime();
+ nanos -= now - lastTime;
+ lastTime = now;
+ }
+ if (nanos <= 0L) {
+ thread = null;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public boolean block() {
+ if (isReleasable())
+ return true;
+ else if (!timed)
+ LockSupport.park(this);
+ else if (nanos > 0)
+ LockSupport.parkNanos(this, nanos);
+ return isReleasable();
+ }
+ }
+
+ // Unsafe mechanics
+
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long stateOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = Phaser.class;
+ stateOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("state"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/src/main/java/jsr166y/RecursiveAction.java b/src/main/java/jsr166y/RecursiveAction.java
new file mode 100644
index 0000000..071ae28
--- /dev/null
+++ b/src/main/java/jsr166y/RecursiveAction.java
@@ -0,0 +1,164 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+/**
+ * A recursive resultless {@link ForkJoinTask}. This class
+ * establishes conventions to parameterize resultless actions as
+ * {@code Void} {@code ForkJoinTask}s. Because {@code null} is the
+ * only valid value of type {@code Void}, methods such as {@code join}
+ * always return {@code null} upon completion.
+ *
+ * <p><b>Sample Usages.</b> Here is a simple but complete ForkJoin
+ * sort that sorts a given {@code long[]} array:
+ *
+ * <pre> {@code
+ * static class SortTask extends RecursiveAction {
+ * final long[] array; final int lo, hi;
+ * SortTask(long[] array, int lo, int hi) {
+ * this.array = array; this.lo = lo; this.hi = hi;
+ * }
+ * SortTask(long[] array) { this(array, 0, array.length); }
+ * protected void compute() {
+ * if (hi - lo < THRESHOLD)
+ * sortSequentially(lo, hi);
+ * else {
+ * int mid = (lo + hi) >>> 1;
+ * invokeAll(new SortTask(array, lo, mid),
+ * new SortTask(array, mid, hi));
+ * merge(lo, mid, hi);
+ * }
+ * }
+ * // implementation details follow:
+ * static final int THRESHOLD = 1000;
+ * void sortSequentially(int lo, int hi) {
+ * Arrays.sort(array, lo, hi);
+ * }
+ * void merge(int lo, int mid, int hi) {
+ * long[] buf = Arrays.copyOfRange(array, lo, mid);
+ * for (int i = 0, j = lo, k = mid; i < buf.length; j++)
+ * array[j] = (k == hi || buf[i] < array[k]) ?
+ * buf[i++] : array[k++];
+ * }
+ * }}</pre>
+ *
+ * You could then sort {@code anArray} by creating {@code new
+ * SortTask(anArray)} and invoking it in a ForkJoinPool. As a more
+ * concrete simple example, the following task increments each element
+ * of an array:
+ * <pre> {@code
+ * class IncrementTask extends RecursiveAction {
+ * final long[] array; final int lo, hi;
+ * IncrementTask(long[] array, int lo, int hi) {
+ * this.array = array; this.lo = lo; this.hi = hi;
+ * }
+ * protected void compute() {
+ * if (hi - lo < THRESHOLD) {
+ * for (int i = lo; i < hi; ++i)
+ * array[i]++;
+ * }
+ * else {
+ * int mid = (lo + hi) >>> 1;
+ * invokeAll(new IncrementTask(array, lo, mid),
+ * new IncrementTask(array, mid, hi));
+ * }
+ * }
+ * }}</pre>
+ *
+ * <p>The following example illustrates some refinements and idioms
+ * that may lead to better performance: RecursiveActions need not be
+ * fully recursive, so long as they maintain the basic
+ * divide-and-conquer approach. Here is a class that sums the squares
+ * of each element of a double array, by subdividing out only the
+ * right-hand-sides of repeated divisions by two, and keeping track of
+ * them with a chain of {@code next} references. It uses a dynamic
+ * threshold based on method {@code getSurplusQueuedTaskCount}, but
+ * counterbalances potential excess partitioning by directly
+ * performing leaf actions on unstolen tasks rather than further
+ * subdividing.
+ *
+ * <pre> {@code
+ * double sumOfSquares(ForkJoinPool pool, double[] array) {
+ * int n = array.length;
+ * Applyer a = new Applyer(array, 0, n, null);
+ * pool.invoke(a);
+ * return a.result;
+ * }
+ *
+ * class Applyer extends RecursiveAction {
+ * final double[] array;
+ * final int lo, hi;
+ * double result;
+ * Applyer next; // keeps track of right-hand-side tasks
+ * Applyer(double[] array, int lo, int hi, Applyer next) {
+ * this.array = array; this.lo = lo; this.hi = hi;
+ * this.next = next;
+ * }
+ *
+ * double atLeaf(int l, int h) {
+ * double sum = 0;
+ * for (int i = l; i < h; ++i) // perform leftmost base step
+ * sum += array[i] * array[i];
+ * return sum;
+ * }
+ *
+ * protected void compute() {
+ * int l = lo;
+ * int h = hi;
+ * Applyer right = null;
+ * while (h - l > 1 && getSurplusQueuedTaskCount() <= 3) {
+ * int mid = (l + h) >>> 1;
+ * right = new Applyer(array, mid, h, right);
+ * right.fork();
+ * h = mid;
+ * }
+ * double sum = atLeaf(l, h);
+ * while (right != null) {
+ * if (right.tryUnfork()) // directly calculate if not stolen
+ * sum += right.atLeaf(right.lo, right.hi);
+ * else {
+ * right.join();
+ * sum += right.result;
+ * }
+ * right = right.next;
+ * }
+ * result = sum;
+ * }
+ * }}</pre>
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public abstract class RecursiveAction extends ForkJoinTask<Void> {
+ private static final long serialVersionUID = 5232453952276485070L;
+
+ /**
+ * The main computation performed by this task.
+ */
+ protected abstract void compute();
+
+ /**
+ * Always returns {@code null}.
+ *
+ * @return {@code null} always
+ */
+ public final Void getRawResult() { return null; }
+
+ /**
+ * Requires null completion value.
+ */
+ protected final void setRawResult(Void mustBeNull) { }
+
+ /**
+ * Implements execution conventions for RecursiveActions.
+ */
+ protected final boolean exec() {
+ compute();
+ return true;
+ }
+
+}
diff --git a/src/main/java/jsr166y/RecursiveTask.java b/src/main/java/jsr166y/RecursiveTask.java
new file mode 100644
index 0000000..0192966
--- /dev/null
+++ b/src/main/java/jsr166y/RecursiveTask.java
@@ -0,0 +1,68 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+/**
+ * A recursive result-bearing {@link ForkJoinTask}.
+ *
+ * <p>For a classic example, here is a task computing Fibonacci numbers:
+ *
+ * <pre> {@code
+ * class Fibonacci extends RecursiveTask<Integer> {
+ * final int n;
+ * Fibonacci(int n) { this.n = n; }
+ * protected Integer compute() {
+ * if (n <= 1)
+ * return n;
+ * Fibonacci f1 = new Fibonacci(n - 1);
+ * f1.fork();
+ * Fibonacci f2 = new Fibonacci(n - 2);
+ * return f2.compute() + f1.join();
+ * }
+ * }}</pre>
+ *
+ * However, besides being a dumb way to compute Fibonacci functions
+ * (there is a simple fast linear algorithm that you'd use in
+ * practice), this is likely to perform poorly because the smallest
+ * subtasks are too small to be worthwhile splitting up. Instead, as
+ * is the case for nearly all fork/join applications, you'd pick some
+ * minimum granularity size (for example 10 here) for which you always
+ * sequentially solve rather than subdividing.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public abstract class RecursiveTask<V> extends ForkJoinTask<V> {
+ private static final long serialVersionUID = 5232453952276485270L;
+
+ /**
+ * The result of the computation.
+ */
+ V result;
+
+ /**
+ * The main computation performed by this task.
+ */
+ protected abstract V compute();
+
+ public final V getRawResult() {
+ return result;
+ }
+
+ protected final void setRawResult(V value) {
+ result = value;
+ }
+
+ /**
+ * Implements execution conventions for RecursiveTask.
+ */
+ protected final boolean exec() {
+ result = compute();
+ return true;
+ }
+
+}
diff --git a/src/main/java/jsr166y/ThreadLocalRandom.java b/src/main/java/jsr166y/ThreadLocalRandom.java
new file mode 100644
index 0000000..fe6fc8e
--- /dev/null
+++ b/src/main/java/jsr166y/ThreadLocalRandom.java
@@ -0,0 +1,197 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+
+import java.util.Random;
+
+/**
+ * A random number generator isolated to the current thread. Like the
+ * global {@link java.util.Random} generator used by the {@link
+ * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized
+ * with an internally generated seed that may not otherwise be
+ * modified. When applicable, use of {@code ThreadLocalRandom} rather
+ * than shared {@code Random} objects in concurrent programs will
+ * typically encounter much less overhead and contention. Use of
+ * {@code ThreadLocalRandom} is particularly appropriate when multiple
+ * tasks (for example, each a {@link ForkJoinTask}) use random numbers
+ * in parallel in thread pools.
+ *
+ * <p>Usages of this class should typically be of the form:
+ * {@code ThreadLocalRandom.current().nextX(...)} (where
+ * {@code X} is {@code Int}, {@code Long}, etc).
+ * When all usages are of this form, it is never possible to
+ * accidently share a {@code ThreadLocalRandom} across multiple threads.
+ *
+ * <p>This class also provides additional commonly used bounded random
+ * generation methods.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ */
+public class ThreadLocalRandom extends Random {
+ // same constants as Random, but must be redeclared because private
+ private static final long multiplier = 0x5DEECE66DL;
+ private static final long addend = 0xBL;
+ private static final long mask = (1L << 48) - 1;
+
+ /**
+ * The random seed. We can't use super.seed.
+ */
+ private long rnd;
+
+ /**
+ * Initialization flag to permit calls to setSeed to succeed only
+ * while executing the Random constructor. We can't allow others
+ * since it would cause setting seed in one part of a program to
+ * unintentionally impact other usages by the thread.
+ */
+ boolean initialized;
+
+ // Padding to help avoid memory contention among seed updates in
+ // different TLRs in the common case that they are located near
+ // each other.
+ private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
+
+ /**
+ * The actual ThreadLocal
+ */
+ private static final ThreadLocal<ThreadLocalRandom> localRandom =
+ new ThreadLocal<ThreadLocalRandom>() {
+ protected ThreadLocalRandom initialValue() {
+ return new ThreadLocalRandom();
+ }
+ };
+
+
+ /**
+ * Constructor called only by localRandom.initialValue.
+ */
+ ThreadLocalRandom() {
+ super();
+ initialized = true;
+ }
+
+ /**
+ * Returns the current thread's {@code ThreadLocalRandom}.
+ *
+ * @return the current thread's {@code ThreadLocalRandom}
+ */
+ public static ThreadLocalRandom current() {
+ return localRandom.get();
+ }
+
+ /**
+ * Throws {@code UnsupportedOperationException}. Setting seeds in
+ * this generator is not supported.
+ *
+ * @throws UnsupportedOperationException always
+ */
+ public void setSeed(long seed) {
+ if (initialized)
+ throw new UnsupportedOperationException();
+ rnd = (seed ^ multiplier) & mask;
+ }
+
+ protected int next(int bits) {
+ rnd = (rnd * multiplier + addend) & mask;
+ return (int) (rnd >>> (48-bits));
+ }
+
+ /**
+ * Returns a pseudorandom, uniformly distributed value between the
+ * given least value (inclusive) and bound (exclusive).
+ *
+ * @param least the least value returned
+ * @param bound the upper bound (exclusive)
+ * @return the next value
+ * @throws IllegalArgumentException if least greater than or equal
+ * to bound
+ */
+ public int nextInt(int least, int bound) {
+ if (least >= bound)
+ throw new IllegalArgumentException();
+ return nextInt(bound - least) + least;
+ }
+
+ /**
+ * Returns a pseudorandom, uniformly distributed value
+ * between 0 (inclusive) and the specified value (exclusive).
+ *
+ * @param n the bound on the random number to be returned. Must be
+ * positive.
+ * @return the next value
+ * @throws IllegalArgumentException if n is not positive
+ */
+ public long nextLong(long n) {
+ if (n <= 0)
+ throw new IllegalArgumentException("n must be positive");
+ // Divide n by two until small enough for nextInt. On each
+ // iteration (at most 31 of them but usually much less),
+ // randomly choose both whether to include high bit in result
+ // (offset) and whether to continue with the lower vs upper
+ // half (which makes a difference only if odd).
+ long offset = 0;
+ while (n >= Integer.MAX_VALUE) {
+ int bits = next(2);
+ long half = n >>> 1;
+ long nextn = ((bits & 2) == 0) ? half : n - half;
+ if ((bits & 1) == 0)
+ offset += n - nextn;
+ n = nextn;
+ }
+ return offset + nextInt((int) n);
+ }
+
+ /**
+ * Returns a pseudorandom, uniformly distributed value between the
+ * given least value (inclusive) and bound (exclusive).
+ *
+ * @param least the least value returned
+ * @param bound the upper bound (exclusive)
+ * @return the next value
+ * @throws IllegalArgumentException if least greater than or equal
+ * to bound
+ */
+ public long nextLong(long least, long bound) {
+ if (least >= bound)
+ throw new IllegalArgumentException();
+ return nextLong(bound - least) + least;
+ }
+
+ /**
+ * Returns a pseudorandom, uniformly distributed {@code double} value
+ * between 0 (inclusive) and the specified value (exclusive).
+ *
+ * @param n the bound on the random number to be returned. Must be
+ * positive.
+ * @return the next value
+ * @throws IllegalArgumentException if n is not positive
+ */
+ public double nextDouble(double n) {
+ if (n <= 0)
+ throw new IllegalArgumentException("n must be positive");
+ return nextDouble() * n;
+ }
+
+ /**
+ * Returns a pseudorandom, uniformly distributed value between the
+ * given least value (inclusive) and bound (exclusive).
+ *
+ * @param least the least value returned
+ * @param bound the upper bound (exclusive)
+ * @return the next value
+ * @throws IllegalArgumentException if least greater than or equal
+ * to bound
+ */
+ public double nextDouble(double least, double bound) {
+ if (least >= bound)
+ throw new IllegalArgumentException();
+ return nextDouble() * (bound - least) + least;
+ }
+
+ private static final long serialVersionUID = -5851777807851030925L;
+}
diff --git a/src/main/java/jsr166y/TransferQueue.java b/src/main/java/jsr166y/TransferQueue.java
new file mode 100644
index 0000000..3aeb69e
--- /dev/null
+++ b/src/main/java/jsr166y/TransferQueue.java
@@ -0,0 +1,133 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package jsr166y;
+import java.util.concurrent.*;
+
+/**
+ * A {@link BlockingQueue} in which producers may wait for consumers
+ * to receive elements. A {@code TransferQueue} may be useful for
+ * example in message passing applications in which producers
+ * sometimes (using method {@link #transfer}) await receipt of
+ * elements by consumers invoking {@code take} or {@code poll}, while
+ * at other times enqueue elements (via method {@code put}) without
+ * waiting for receipt.
+ * {@linkplain #tryTransfer(Object) Non-blocking} and
+ * {@linkplain #tryTransfer(Object,long,TimeUnit) time-out} versions of
+ * {@code tryTransfer} are also available.
+ * A {@code TransferQueue} may also be queried, via {@link
+ * #hasWaitingConsumer}, whether there are any threads waiting for
+ * items, which is a converse analogy to a {@code peek} operation.
+ *
+ * <p>Like other blocking queues, a {@code TransferQueue} may be
+ * capacity bounded. If so, an attempted transfer operation may
+ * initially block waiting for available space, and/or subsequently
+ * block waiting for reception by a consumer. Note that in a queue
+ * with zero capacity, such as {@link SynchronousQueue}, {@code put}
+ * and {@code transfer} are effectively synonymous.
+ *
+ * <p>This interface is a member of the
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
+ * Java Collections Framework</a>.
+ *
+ * @since 1.7
+ * @author Doug Lea
+ * @param <E> the type of elements held in this collection
+ */
+public interface TransferQueue<E> extends BlockingQueue<E> {
+ /**
+ * Transfers the element to a waiting consumer immediately, if possible.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * otherwise returning {@code false} without enqueuing the element.
+ *
+ * @param e the element to transfer
+ * @return {@code true} if the element was transferred, else
+ * {@code false}
+ * @throws ClassCastException if the class of the specified element
+ * prevents it from being added to this queue
+ * @throws NullPointerException if the specified element is null
+ * @throws IllegalArgumentException if some property of the specified
+ * element prevents it from being added to this queue
+ */
+ boolean tryTransfer(E e);
+
+ /**
+ * Transfers the element to a consumer, waiting if necessary to do so.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else waits until the element is received by a consumer.
+ *
+ * @param e the element to transfer
+ * @throws InterruptedException if interrupted while waiting,
+ * in which case the element is not left enqueued
+ * @throws ClassCastException if the class of the specified element
+ * prevents it from being added to this queue
+ * @throws NullPointerException if the specified element is null
+ * @throws IllegalArgumentException if some property of the specified
+ * element prevents it from being added to this queue
+ */
+ void transfer(E e) throws InterruptedException;
+
+ /**
+ * Transfers the element to a consumer if it is possible to do so
+ * before the timeout elapses.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else waits until the element is received by a consumer,
+ * returning {@code false} if the specified wait time elapses
+ * before the element can be transferred.
+ *
+ * @param e the element to transfer
+ * @param timeout how long to wait before giving up, in units of
+ * {@code unit}
+ * @param unit a {@code TimeUnit} determining how to interpret the
+ * {@code timeout} parameter
+ * @return {@code true} if successful, or {@code false} if
+ * the specified waiting time elapses before completion,
+ * in which case the element is not left enqueued
+ * @throws InterruptedException if interrupted while waiting,
+ * in which case the element is not left enqueued
+ * @throws ClassCastException if the class of the specified element
+ * prevents it from being added to this queue
+ * @throws NullPointerException if the specified element is null
+ * @throws IllegalArgumentException if some property of the specified
+ * element prevents it from being added to this queue
+ */
+ boolean tryTransfer(E e, long timeout, TimeUnit unit)
+ throws InterruptedException;
+
+ /**
+ * Returns {@code true} if there is at least one consumer waiting
+ * to receive an element via {@link #take} or
+ * timed {@link #poll(long,TimeUnit) poll}.
+ * The return value represents a momentary state of affairs.
+ *
+ * @return {@code true} if there is at least one waiting consumer
+ */
+ boolean hasWaitingConsumer();
+
+ /**
+ * Returns an estimate of the number of consumers waiting to
+ * receive elements via {@link #take} or timed
+ * {@link #poll(long,TimeUnit) poll}. The return value is an
+ * approximation of a momentary state of affairs, that may be
+ * inaccurate if consumers have completed or given up waiting.
+ * The value may be useful for monitoring and heuristics, but
+ * not for synchronization control. Implementations of this
+ * method are likely to be noticeably slower than those for
+ * {@link #hasWaitingConsumer}.
+ *
+ * @return the number of consumers waiting to receive elements
+ */
+ int getWaitingConsumerCount();
+}
diff --git a/src/main/java/jsr166y/package-info.java b/src/main/java/jsr166y/package-info.java
new file mode 100644
index 0000000..9802803
--- /dev/null
+++ b/src/main/java/jsr166y/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+
+/**
+ * Preview versions of classes targeted for Java 7. Includes a
+ * fine-grained parallel computation framework: ForkJoinTasks and
+ * their related support classes provide a very efficient basis for
+ * obtaining platform-independent parallel speed-ups of
+ * computation-intensive operations. They are not a full substitute
+ * for the kinds of arbitrary processing supported by Executors or
+ * Threads. However, when applicable, they typically provide
+ * significantly greater performance on multiprocessor platforms.
+ *
+ * <p>Candidates for fork/join processing mainly include those that
+ * can be expressed using parallel divide-and-conquer techniques: To
+ * solve a problem, break it in two (or more) parts, and then solve
+ * those parts in parallel, continuing on in this way until the
+ * problem is too small to be broken up, so is solved directly. The
+ * underlying <em>work-stealing</em> framework makes subtasks
+ * available to other threads (normally one per CPU), that help
+ * complete the tasks. In general, the most efficient ForkJoinTasks
+ * are those that directly implement this algorithmic design pattern.
+ */
+package jsr166y;
diff --git a/src/main/java/org/apache/lucene/analysis/CustomAnalyzerWrapper.java b/src/main/java/org/apache/lucene/analysis/CustomAnalyzerWrapper.java
new file mode 100644
index 0000000..6ea95e4
--- /dev/null
+++ b/src/main/java/org/apache/lucene/analysis/CustomAnalyzerWrapper.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.analysis;
+
+import java.io.Reader;
+
+/**
+ * Similar to Lucene {@link AnalyzerWrapper} but actually allows to set the reuse strategy....
+ * //TODO add to lucene the ability to set it...
+ */
+public abstract class CustomAnalyzerWrapper extends Analyzer {
+
+ /**
+ * Creates a new CustomAnalyzerWrapper. Since the {@link Analyzer.ReuseStrategy} of
+ * the wrapped Analyzers are unknown, {@link Analyzer.PerFieldReuseStrategy} is assumed
+ */
+ protected CustomAnalyzerWrapper(ReuseStrategy reuseStrategy) {
+ super(reuseStrategy);
+ }
+
+ /**
+ * Retrieves the wrapped Analyzer appropriate for analyzing the field with
+ * the given name
+ *
+ * @param fieldName Name of the field which is to be analyzed
+ * @return Analyzer for the field with the given name. Assumed to be non-null
+ */
+ protected abstract Analyzer getWrappedAnalyzer(String fieldName);
+
+ /**
+ * Wraps / alters the given TokenStreamComponents, taken from the wrapped
+ * Analyzer, to form new components. It is through this method that new
+ * TokenFilters can be added by AnalyzerWrappers.
+ *
+ * @param fieldName Name of the field which is to be analyzed
+ * @param components TokenStreamComponents taken from the wrapped Analyzer
+ * @return Wrapped / altered TokenStreamComponents.
+ */
+ protected abstract TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components);
+
+ @Override
+ protected final TokenStreamComponents createComponents(String fieldName, Reader aReader) {
+ return wrapComponents(fieldName, getWrappedAnalyzer(fieldName).createComponents(fieldName, aReader));
+ }
+
+ @Override
+ public int getPositionIncrementGap(String fieldName) {
+ return getWrappedAnalyzer(fieldName).getPositionIncrementGap(fieldName);
+ }
+
+ @Override
+ public int getOffsetGap(String fieldName) {
+ return getWrappedAnalyzer(fieldName).getOffsetGap(fieldName);
+ }
+
+ @Override
+ public final Reader initReader(String fieldName, Reader reader) {
+ return getWrappedAnalyzer(fieldName).initReader(fieldName, reader);
+ }
+}
diff --git a/src/main/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.java b/src/main/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.java
new file mode 100644
index 0000000..ea93850
--- /dev/null
+++ b/src/main/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.analysis.miscellaneous;
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+
+import java.io.IOException;
+
+/**
+ * A token filter that truncates tokens.
+ */
+public class TruncateTokenFilter extends TokenFilter {
+
+ private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+
+ private final int size;
+
+ public TruncateTokenFilter(TokenStream in, int size) {
+ super(in);
+ this.size = size;
+ }
+
+ @Override
+ public final boolean incrementToken() throws IOException {
+ if (input.incrementToken()) {
+ final int length = termAttribute.length();
+ if (length > size) {
+ termAttribute.setLength(size);
+ }
+ return true;
+ } else {
+ return false;
+ }
+ }
+}
+
+
diff --git a/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java b/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java
new file mode 100644
index 0000000..4ccad58
--- /dev/null
+++ b/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.analysis.miscellaneous;
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.util.Version;
+
+import java.io.IOException;
+
+/**
+ * A token filter that generates unique tokens. Can remove unique tokens only on the same
+ * position increments as well.
+ */
+public class UniqueTokenFilter extends TokenFilter {
+
+ private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+ private final PositionIncrementAttribute posIncAttribute = addAttribute(PositionIncrementAttribute.class);
+
+ // use a fixed version, as we don't care about case sensitivity.
+ private final CharArraySet previous = new CharArraySet(Version.LUCENE_31, 8, false);
+ private final boolean onlyOnSamePosition;
+
+ public UniqueTokenFilter(TokenStream in) {
+ this(in, false);
+ }
+
+ public UniqueTokenFilter(TokenStream in, boolean onlyOnSamePosition) {
+ super(in);
+ this.onlyOnSamePosition = onlyOnSamePosition;
+ }
+
+ @Override
+ public final boolean incrementToken() throws IOException {
+ while (input.incrementToken()) {
+ final char term[] = termAttribute.buffer();
+ final int length = termAttribute.length();
+
+ boolean duplicate;
+ if (onlyOnSamePosition) {
+ final int posIncrement = posIncAttribute.getPositionIncrement();
+ if (posIncrement > 0) {
+ previous.clear();
+ }
+
+ duplicate = (posIncrement == 0 && previous.contains(term, 0, length));
+ } else {
+ duplicate = previous.contains(term, 0, length);
+ }
+
+ // clone the term, and add to the set of seen terms.
+ char saved[] = new char[length];
+ System.arraycopy(term, 0, saved, 0, length);
+ previous.add(saved);
+
+ if (!duplicate) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public final void reset() throws IOException {
+ super.reset();
+ previous.clear();
+ }
+}
+
+
diff --git a/src/main/java/org/apache/lucene/index/TrackingConcurrentMergeScheduler.java b/src/main/java/org/apache/lucene/index/TrackingConcurrentMergeScheduler.java
new file mode 100644
index 0000000..66b3328
--- /dev/null
+++ b/src/main/java/org/apache/lucene/index/TrackingConcurrentMergeScheduler.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.index;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.metrics.MeanMetric;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.merge.OnGoingMerge;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Set;
+
+/**
+ * An extension to the {@link ConcurrentMergeScheduler} that provides tracking on merge times, total
+ * and current merges.
+ */
+public class TrackingConcurrentMergeScheduler extends ConcurrentMergeScheduler {
+
+ protected final ESLogger logger;
+
+ private final MeanMetric totalMerges = new MeanMetric();
+ private final CounterMetric totalMergesNumDocs = new CounterMetric();
+ private final CounterMetric totalMergesSizeInBytes = new CounterMetric();
+ private final CounterMetric currentMerges = new CounterMetric();
+ private final CounterMetric currentMergesNumDocs = new CounterMetric();
+ private final CounterMetric currentMergesSizeInBytes = new CounterMetric();
+
+ private final Set<OnGoingMerge> onGoingMerges = ConcurrentCollections.newConcurrentSet();
+ private final Set<OnGoingMerge> readOnlyOnGoingMerges = Collections.unmodifiableSet(onGoingMerges);
+
+ public TrackingConcurrentMergeScheduler(ESLogger logger) {
+ super();
+ this.logger = logger;
+ }
+
+ public long totalMerges() {
+ return totalMerges.count();
+ }
+
+ public long totalMergeTime() {
+ return totalMerges.sum();
+ }
+
+ public long totalMergeNumDocs() {
+ return totalMergesNumDocs.count();
+ }
+
+ public long totalMergeSizeInBytes() {
+ return totalMergesSizeInBytes.count();
+ }
+
+ public long currentMerges() {
+ return currentMerges.count();
+ }
+
+ public long currentMergesNumDocs() {
+ return currentMergesNumDocs.count();
+ }
+
+ public long currentMergesSizeInBytes() {
+ return currentMergesSizeInBytes.count();
+ }
+
+ public Set<OnGoingMerge> onGoingMerges() {
+ return readOnlyOnGoingMerges;
+ }
+
+ @Override
+ protected void doMerge(MergePolicy.OneMerge merge) throws IOException {
+ int totalNumDocs = merge.totalNumDocs();
+ // don't used #totalBytesSize() since need to be executed under IW lock, might be fixed in future Lucene version
+ long totalSizeInBytes = merge.estimatedMergeBytes;
+ long time = System.currentTimeMillis();
+ currentMerges.inc();
+ currentMergesNumDocs.inc(totalNumDocs);
+ currentMergesSizeInBytes.inc(totalSizeInBytes);
+
+ OnGoingMerge onGoingMerge = new OnGoingMerge(merge);
+ onGoingMerges.add(onGoingMerge);
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("merge [{}] starting..., merging [{}] segments, [{}] docs, [{}] size, into [{}] estimated_size", merge.info == null ? "_na_" : merge.info.info.name, merge.segments.size(), totalNumDocs, new ByteSizeValue(totalSizeInBytes), new ByteSizeValue(merge.estimatedMergeBytes));
+ }
+ try {
+ beforeMerge(onGoingMerge);
+ super.doMerge(merge);
+ } finally {
+ long took = System.currentTimeMillis() - time;
+
+ onGoingMerges.remove(onGoingMerge);
+ afterMerge(onGoingMerge);
+
+ currentMerges.dec();
+ currentMergesNumDocs.dec(totalNumDocs);
+ currentMergesSizeInBytes.dec(totalSizeInBytes);
+
+ totalMergesNumDocs.inc(totalNumDocs);
+ totalMergesSizeInBytes.inc(totalSizeInBytes);
+ totalMerges.inc(took);
+ if (took > 20000) { // if more than 20 seconds, DEBUG log it
+ logger.debug("merge [{}] done, took [{}]", merge.info == null ? "_na_" : merge.info.info.name, TimeValue.timeValueMillis(took));
+ } else if (logger.isTraceEnabled()) {
+ logger.trace("merge [{}] done, took [{}]", merge.info == null ? "_na_" : merge.info.info.name, TimeValue.timeValueMillis(took));
+ }
+ }
+ }
+
+ /**
+ * A callback allowing for custom logic before an actual merge starts.
+ */
+ protected void beforeMerge(OnGoingMerge merge) {
+
+ }
+
+ /**
+ * A callback allowing for custom logic before an actual merge starts.
+ */
+ protected void afterMerge(OnGoingMerge merge) {
+
+ }
+
+ @Override
+ public MergeScheduler clone() {
+ // Lucene IW makes a clone internally but since we hold on to this instance
+ // the clone will just be the identity.
+ return this;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/apache/lucene/index/TrackingSerialMergeScheduler.java b/src/main/java/org/apache/lucene/index/TrackingSerialMergeScheduler.java
new file mode 100644
index 0000000..bd3ed9d
--- /dev/null
+++ b/src/main/java/org/apache/lucene/index/TrackingSerialMergeScheduler.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.index;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.metrics.MeanMetric;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.merge.OnGoingMerge;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Set;
+
+// LUCENE MONITOR - Copied from SerialMergeScheduler
+public class TrackingSerialMergeScheduler extends MergeScheduler {
+
+ protected final ESLogger logger;
+
+ private final MeanMetric totalMerges = new MeanMetric();
+ private final CounterMetric totalMergesNumDocs = new CounterMetric();
+ private final CounterMetric totalMergesSizeInBytes = new CounterMetric();
+ private final CounterMetric currentMerges = new CounterMetric();
+ private final CounterMetric currentMergesNumDocs = new CounterMetric();
+ private final CounterMetric currentMergesSizeInBytes = new CounterMetric();
+
+ private final Set<OnGoingMerge> onGoingMerges = ConcurrentCollections.newConcurrentSet();
+ private final Set<OnGoingMerge> readOnlyOnGoingMerges = Collections.unmodifiableSet(onGoingMerges);
+
+ public TrackingSerialMergeScheduler(ESLogger logger) {
+ this.logger = logger;
+ }
+
+ public long totalMerges() {
+ return totalMerges.count();
+ }
+
+ public long totalMergeTime() {
+ return totalMerges.sum();
+ }
+
+ public long totalMergeNumDocs() {
+ return totalMergesNumDocs.count();
+ }
+
+ public long totalMergeSizeInBytes() {
+ return totalMergesSizeInBytes.count();
+ }
+
+ public long currentMerges() {
+ return currentMerges.count();
+ }
+
+ public long currentMergesNumDocs() {
+ return currentMergesNumDocs.count();
+ }
+
+ public long currentMergesSizeInBytes() {
+ return currentMergesSizeInBytes.count();
+ }
+
+ public Set<OnGoingMerge> onGoingMerges() {
+ return readOnlyOnGoingMerges;
+ }
+
+ /**
+ * Just do the merges in sequence. We do this
+ * "synchronized" so that even if the application is using
+ * multiple threads, only one merge may run at a time.
+ */
+ @Override
+ synchronized public void merge(IndexWriter writer) throws CorruptIndexException, IOException {
+ while (true) {
+ MergePolicy.OneMerge merge = writer.getNextMerge();
+ if (merge == null)
+ break;
+
+ // different from serial merge, call mergeInit here so we get the correct stats
+ // mergeInit can be called several times without side affects (checks on merge.info not being null)
+ writer.mergeInit(merge);
+
+ int totalNumDocs = merge.totalNumDocs();
+ // don't used #totalBytesSize() since need to be executed under IW lock, might be fixed in future Lucene version
+ long totalSizeInBytes = merge.estimatedMergeBytes;
+ long time = System.currentTimeMillis();
+ currentMerges.inc();
+ currentMergesNumDocs.inc(totalNumDocs);
+ currentMergesSizeInBytes.inc(totalSizeInBytes);
+
+ OnGoingMerge onGoingMerge = new OnGoingMerge(merge);
+ onGoingMerges.add(onGoingMerge);
+
+ // sadly, segment name is not available since mergeInit is called from merge itself...
+ if (logger.isTraceEnabled()) {
+ logger.trace("merge [{}] starting..., merging [{}] segments, [{}] docs, [{}] size, into [{}] estimated_size", merge.info == null ? "_na_" : merge.info.info.name, merge.segments.size(), totalNumDocs, new ByteSizeValue(totalSizeInBytes), new ByteSizeValue(merge.estimatedMergeBytes));
+ }
+ try {
+ beforeMerge(onGoingMerge);
+ writer.merge(merge);
+ } finally {
+ long took = System.currentTimeMillis() - time;
+
+ onGoingMerges.remove(onGoingMerge);
+ afterMerge(onGoingMerge);
+
+ currentMerges.dec();
+ currentMergesNumDocs.dec(totalNumDocs);
+ currentMergesSizeInBytes.dec(totalSizeInBytes);
+
+ totalMergesNumDocs.inc(totalNumDocs);
+ totalMergesSizeInBytes.inc(totalSizeInBytes);
+ totalMerges.inc(took);
+ if (took > 20000) { // if more than 20 seconds, DEBUG log it
+ logger.debug("merge [{}] done, took [{}]", merge.info == null ? "_na_" : merge.info.info.name, TimeValue.timeValueMillis(took));
+ } else if (logger.isTraceEnabled()) {
+ logger.trace("merge [{}] done, took [{}]", merge.info == null ? "_na_" : merge.info.info.name, TimeValue.timeValueMillis(took));
+ }
+ }
+ }
+ }
+
+ /**
+ * A callback allowing for custom logic before an actual merge starts.
+ */
+ protected void beforeMerge(OnGoingMerge merge) {
+
+ }
+
+ /**
+ * A callback allowing for custom logic before an actual merge starts.
+ */
+ protected void afterMerge(OnGoingMerge merge) {
+
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public MergeScheduler clone() {
+ // Lucene IW makes a clone internally but since we hold on to this instance
+ // the clone will just be the identity.
+ return this;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/apache/lucene/index/memory/ExtendedMemoryIndex.java b/src/main/java/org/apache/lucene/index/memory/ExtendedMemoryIndex.java
new file mode 100644
index 0000000..5f99fce
--- /dev/null
+++ b/src/main/java/org/apache/lucene/index/memory/ExtendedMemoryIndex.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.index.memory;
+
+/**
+ * This class overwrites {@link MemoryIndex} to make the reuse constructor visible.
+ */
+public final class ExtendedMemoryIndex extends MemoryIndex {
+
+ public ExtendedMemoryIndex(boolean storeOffsets, long maxReusedBytes) {
+ super(storeOffsets, maxReusedBytes);
+ }
+
+}
diff --git a/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java b/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java
new file mode 100644
index 0000000..e1dcf58
--- /dev/null
+++ b/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.queries;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.index.mapper.FieldMapper;
+
+import java.io.IOException;
+
+/**
+ * Extended version of {@link CommonTermsQuery} that allows to pass in a
+ * <tt>minimumNumberShouldMatch</tt> specification that uses the actual num of high frequent terms
+ * to calculate the minimum matching terms.
+ */
+public class ExtendedCommonTermsQuery extends CommonTermsQuery {
+
+ private final FieldMapper<?> mapper;
+
+ public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency, boolean disableCoord, FieldMapper<?> mapper) {
+ super(highFreqOccur, lowFreqOccur, maxTermFrequency, disableCoord);
+ this.mapper = mapper;
+ }
+
+ private String lowFreqMinNumShouldMatchSpec;
+ private String highFreqMinNumShouldMatchSpec;
+
+ @Override
+ protected int calcLowFreqMinimumNumberShouldMatch(int numOptional) {
+ return calcMinimumNumberShouldMatch(lowFreqMinNumShouldMatchSpec, numOptional);
+ }
+
+ protected int calcMinimumNumberShouldMatch(String spec, int numOptional) {
+ if (spec == null) {
+ return 0;
+ }
+ return Queries.calculateMinShouldMatch(numOptional, spec);
+ }
+
+ @Override
+ protected int calcHighFreqMinimumNumberShouldMatch(int numOptional) {
+ return calcMinimumNumberShouldMatch(highFreqMinNumShouldMatchSpec, numOptional);
+ }
+
+ public void setHighFreqMinimumNumberShouldMatch(String spec) {
+ this.highFreqMinNumShouldMatchSpec = spec;
+ }
+
+ public String getHighFreqMinimumNumberShouldMatchSpec() {
+ return highFreqMinNumShouldMatchSpec;
+ }
+
+ public void setLowFreqMinimumNumberShouldMatch(String spec) {
+ this.lowFreqMinNumShouldMatchSpec = spec;
+ }
+
+ public String getLowFreqMinimumNumberShouldMatchSpec() {
+ return lowFreqMinNumShouldMatchSpec;
+ }
+
+ // LUCENE-UPGRADE: remove this method if on 4.8
+ @Override
+ public Query rewrite(IndexReader reader) throws IOException {
+ if (this.terms.isEmpty()) {
+ return new BooleanQuery();
+ } else if (this.terms.size() == 1) {
+ final Query tq = newTermQuery(this.terms.get(0), null);
+ tq.setBoost(getBoost());
+ return tq;
+ }
+ return super.rewrite(reader);
+ }
+
+ // LUCENE-UPGRADE: remove this method if on 4.8
+ @Override
+ protected Query buildQuery(final int maxDoc,
+ final TermContext[] contextArray, final Term[] queryTerms) {
+ BooleanQuery lowFreq = new BooleanQuery(disableCoord);
+ BooleanQuery highFreq = new BooleanQuery(disableCoord);
+ highFreq.setBoost(highFreqBoost);
+ lowFreq.setBoost(lowFreqBoost);
+ BooleanQuery query = new BooleanQuery(true);
+ for (int i = 0; i < queryTerms.length; i++) {
+ TermContext termContext = contextArray[i];
+ if (termContext == null) {
+ lowFreq.add(newTermQuery(queryTerms[i], null), lowFreqOccur);
+ } else {
+ if ((maxTermFrequency >= 1f && termContext.docFreq() > maxTermFrequency)
+ || (termContext.docFreq() > (int) Math.ceil(maxTermFrequency * (float) maxDoc))) {
+ highFreq.add(newTermQuery(queryTerms[i], termContext), highFreqOccur);
+ } else {
+ lowFreq.add(newTermQuery(queryTerms[i], termContext), lowFreqOccur);
+ }
+ }
+
+ }
+ final int numLowFreqClauses = lowFreq.clauses().size();
+ final int numHighFreqClauses = highFreq.clauses().size();
+ if (lowFreqOccur == Occur.SHOULD && numLowFreqClauses > 0) {
+ int minMustMatch = calcLowFreqMinimumNumberShouldMatch(numLowFreqClauses);
+ lowFreq.setMinimumNumberShouldMatch(minMustMatch);
+ }
+ if (highFreqOccur == Occur.SHOULD && numHighFreqClauses > 0) {
+ int minMustMatch = calcHighFreqMinimumNumberShouldMatch(numHighFreqClauses);
+ highFreq.setMinimumNumberShouldMatch(minMustMatch);
+ }
+ if (lowFreq.clauses().isEmpty()) {
+ /*
+ * if lowFreq is empty we rewrite the high freq terms in a conjunction to
+ * prevent slow queries.
+ */
+ if (highFreq.getMinimumNumberShouldMatch() == 0 && highFreqOccur != Occur.MUST) {
+ for (BooleanClause booleanClause : highFreq) {
+ booleanClause.setOccur(Occur.MUST);
+ }
+ }
+ highFreq.setBoost(getBoost());
+ return highFreq;
+ } else if (highFreq.clauses().isEmpty()) {
+ // only do low freq terms - we don't have high freq terms
+ lowFreq.setBoost(getBoost());
+ return lowFreq;
+ } else {
+ query.add(highFreq, Occur.SHOULD);
+ query.add(lowFreq, Occur.MUST);
+ query.setBoost(getBoost());
+ return query;
+ }
+ }
+
+ //@Override
+ // LUCENE-UPGRADE: remove this method if on 4.8
+ protected Query newTermQuery(Term term, TermContext context) {
+ if (mapper == null) {
+ // this should be super.newTermQuery(term, context) once it's available in the super class
+ return context == null ? new TermQuery(term) : new TermQuery(term, context);
+ }
+ final Query query = mapper.queryStringTermQuery(term);
+ if (query == null) {
+ // this should be super.newTermQuery(term, context) once it's available in the super class
+ return context == null ? new TermQuery(term) : new TermQuery(term, context);
+ } else {
+ return query;
+ }
+ }
+}
diff --git a/src/main/java/org/apache/lucene/queries/XTermsFilter.java b/src/main/java/org/apache/lucene/queries/XTermsFilter.java
new file mode 100644
index 0000000..ea11f03
--- /dev/null
+++ b/src/main/java/org/apache/lucene/queries/XTermsFilter.java
@@ -0,0 +1,328 @@
+package org.apache.lucene.queries;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Constructs a filter for docs matching any of the terms added to this class.
+ * Unlike a RangeFilter this can be used for filtering on multiple terms that are not necessarily in
+ * a sequence. An example might be a collection of primary keys from a database query result or perhaps
+ * a choice of "category" labels picked by the end user. As a filter, this is much faster than the
+ * equivalent query (a BooleanQuery with many "should" TermQueries)
+ */
+public final class XTermsFilter extends Filter {
+
+ static {
+ assert Version.LUCENE_46 == org.elasticsearch.Version.CURRENT.luceneVersion : "Remove this once we are on LUCENE_48 - see LUCENE-5502";
+ }
+
+ /*
+ * this class is often used for large number of terms in a single field.
+ * to optimize for this case and to be filter-cache friendly we
+ * serialize all terms into a single byte array and store offsets
+ * in a parallel array to keep the # of object constant and speed up
+ * equals / hashcode.
+ *
+ * This adds quite a bit of complexity but allows large term filters to
+ * be efficient for GC and cache-lookups
+ */
+ private final int[] offsets;
+ private final byte[] termsBytes;
+ private final TermsAndField[] termsAndFields;
+ private final int hashCode; // cached hashcode for fast cache lookups
+ private static final int PRIME = 31;
+
+ /**
+ * Creates a new {@link XTermsFilter} from the given list. The list
+ * can contain duplicate terms and multiple fields.
+ */
+ public XTermsFilter(final List<Term> terms) {
+ this(new FieldAndTermEnum() {
+ // we need to sort for deduplication and to have a common cache key
+ final Iterator<Term> iter = sort(terms).iterator();
+ @Override
+ public BytesRef next() {
+ if (iter.hasNext()) {
+ Term next = iter.next();
+ field = next.field();
+ return next.bytes();
+ }
+ return null;
+ }}, terms.size());
+ }
+
+ /**
+ * Creates a new {@link XTermsFilter} from the given {@link BytesRef} list for
+ * a single field.
+ */
+ public XTermsFilter(final String field, final List<BytesRef> terms) {
+ this(new FieldAndTermEnum(field) {
+ // we need to sort for deduplication and to have a common cache key
+ final Iterator<BytesRef> iter = sort(terms).iterator();
+ @Override
+ public BytesRef next() {
+ if (iter.hasNext()) {
+ return iter.next();
+ }
+ return null;
+ }
+ }, terms.size());
+ }
+
+ /**
+ * Creates a new {@link XTermsFilter} from the given {@link BytesRef} array for
+ * a single field.
+ */
+ public XTermsFilter(final String field, final BytesRef...terms) {
+ // this ctor prevents unnecessary Term creations
+ this(field, Arrays.asList(terms));
+ }
+
+ /**
+ * Creates a new {@link XTermsFilter} from the given array. The array can
+ * contain duplicate terms and multiple fields.
+ */
+ public XTermsFilter(final Term... terms) {
+ this(Arrays.asList(terms));
+ }
+
+
+ private XTermsFilter(FieldAndTermEnum iter, int length) {
+ // TODO: maybe use oal.index.PrefixCodedTerms instead?
+ // If number of terms is more than a few hundred it
+ // should be a win
+
+ // TODO: we also pack terms in FieldCache/DocValues
+ // ... maybe we can refactor to share that code
+
+ // TODO: yet another option is to build the union of the terms in
+ // an automaton an call intersect on the termsenum if the density is high
+
+ int hash = 9;
+ byte[] serializedTerms = new byte[0];
+ this.offsets = new int[length+1];
+ int lastEndOffset = 0;
+ int index = 0;
+ ArrayList<TermsAndField> termsAndFields = new ArrayList<TermsAndField>();
+ TermsAndField lastTermsAndField = null;
+ BytesRef previousTerm = null;
+ String previousField = null;
+ BytesRef currentTerm;
+ String currentField;
+ while((currentTerm = iter.next()) != null) {
+ currentField = iter.field();
+ if (currentField == null) {
+ throw new IllegalArgumentException("Field must not be null");
+ }
+ if (previousField != null) {
+ // deduplicate
+ if (previousField.equals(currentField)) {
+ if (previousTerm.bytesEquals(currentTerm)){
+ continue;
+ }
+ } else {
+ final int start = lastTermsAndField == null ? 0 : lastTermsAndField.end;
+ lastTermsAndField = new TermsAndField(start, index, previousField);
+ termsAndFields.add(lastTermsAndField);
+ }
+ }
+ hash = PRIME * hash + currentField.hashCode();
+ hash = PRIME * hash + currentTerm.hashCode();
+ if (serializedTerms.length < lastEndOffset+currentTerm.length) {
+ serializedTerms = ArrayUtil.grow(serializedTerms, lastEndOffset+currentTerm.length);
+ }
+ System.arraycopy(currentTerm.bytes, currentTerm.offset, serializedTerms, lastEndOffset, currentTerm.length);
+ offsets[index] = lastEndOffset;
+ lastEndOffset += currentTerm.length;
+ index++;
+ previousTerm = currentTerm;
+ previousField = currentField;
+ }
+ offsets[index] = lastEndOffset;
+ final int start = lastTermsAndField == null ? 0 : lastTermsAndField.end;
+ lastTermsAndField = new TermsAndField(start, index, previousField);
+ termsAndFields.add(lastTermsAndField);
+ this.termsBytes = ArrayUtil.shrink(serializedTerms, lastEndOffset);
+ this.termsAndFields = termsAndFields.toArray(new TermsAndField[termsAndFields.size()]);
+ this.hashCode = hash;
+
+ }
+
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ final AtomicReader reader = context.reader();
+ FixedBitSet result = null; // lazy init if needed - no need to create a big bitset ahead of time
+ final Fields fields = reader.fields();
+ final BytesRef spare = new BytesRef(this.termsBytes);
+ if (fields == null) {
+ return result;
+ }
+ Terms terms = null;
+ TermsEnum termsEnum = null;
+ DocsEnum docs = null;
+ for (TermsAndField termsAndField : this.termsAndFields) {
+ if ((terms = fields.terms(termsAndField.field)) != null) {
+ termsEnum = terms.iterator(termsEnum); // this won't return null
+ for (int i = termsAndField.start; i < termsAndField.end; i++) {
+ spare.offset = offsets[i];
+ spare.length = offsets[i+1] - offsets[i];
+ if (termsEnum.seekExact(spare)) {
+ docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE); // no freq since we don't need them
+ if (result == null) {
+ if (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ result = new FixedBitSet(reader.maxDoc());
+ // lazy init but don't do it in the hot loop since we could read many docs
+ result.set(docs.docID());
+ }
+ }
+ while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ result.set(docs.docID());
+ }
+ }
+ }
+ }
+ }
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if ((obj == null) || (obj.getClass() != this.getClass())) {
+ return false;
+ }
+
+ XTermsFilter test = (XTermsFilter) obj;
+ // first check the fields before even comparing the bytes
+ if (test.hashCode == hashCode && Arrays.equals(termsAndFields, test.termsAndFields)) {
+ int lastOffset = termsAndFields[termsAndFields.length - 1].end;
+ // compare offsets since we sort they must be identical
+ if (ArrayUtil.equals(offsets, 0, test.offsets, 0, lastOffset + 1)) {
+ // straight byte comparison since we sort they must be identical
+ return ArrayUtil.equals(termsBytes, 0, test.termsBytes, 0, offsets[lastOffset]);
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ BytesRef spare = new BytesRef(termsBytes);
+ boolean first = true;
+ for (int i = 0; i < termsAndFields.length; i++) {
+ TermsAndField current = termsAndFields[i];
+ for (int j = current.start; j < current.end; j++) {
+ spare.offset = offsets[j];
+ spare.length = offsets[j+1] - offsets[j];
+ if (!first) {
+ builder.append(' ');
+ }
+ first = false;
+ builder.append(current.field).append(':');
+ builder.append(spare.utf8ToString());
+ }
+ }
+
+ return builder.toString();
+ }
+
+ private static final class TermsAndField {
+ final int start;
+ final int end;
+ final String field;
+
+
+ TermsAndField(int start, int end, String field) {
+ super();
+ this.start = start;
+ this.end = end;
+ this.field = field;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((field == null) ? 0 : field.hashCode());
+ result = prime * result + end;
+ result = prime * result + start;
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (obj == null) return false;
+ if (getClass() != obj.getClass()) return false;
+ TermsAndField other = (TermsAndField) obj;
+ if (field == null) {
+ if (other.field != null) return false;
+ } else if (!field.equals(other.field)) return false;
+ if (end != other.end) return false;
+ if (start != other.start) return false;
+ return true;
+ }
+
+ }
+
+ private static abstract class FieldAndTermEnum {
+ protected String field;
+
+ public abstract BytesRef next();
+
+ public FieldAndTermEnum() {}
+
+ public FieldAndTermEnum(String field) { this.field = field; }
+
+ public String field() {
+ return field;
+ }
+ }
+
+ /*
+ * simple utility that returns the in-place sorted list
+ */
+ private static <T extends Comparable<? super T>> List<T> sort(List<T> toSort) {
+ if (toSort.isEmpty()) {
+ throw new IllegalArgumentException("no terms provided");
+ }
+ Collections.sort(toSort);
+ return toSort;
+ }
+}
diff --git a/src/main/java/org/apache/lucene/queryparser/XSimpleQueryParser.java b/src/main/java/org/apache/lucene/queryparser/XSimpleQueryParser.java
new file mode 100644
index 0000000..91bc21d
--- /dev/null
+++ b/src/main/java/org/apache/lucene/queryparser/XSimpleQueryParser.java
@@ -0,0 +1,521 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.queryparser;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.QueryBuilder;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.lucene.Lucene;
+
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * XSimpleQueryParser is used to parse human readable query syntax.
+ * <p>
+ * The main idea behind this parser is that a person should be able to type
+ * whatever they want to represent a query, and this parser will do its best
+ * to interpret what to search for no matter how poorly composed the request
+ * may be. Tokens are considered to be any of a term, phrase, or subquery for the
+ * operations described below. Whitespace including ' ' '\n' '\r' and '\t'
+ * and certain operators may be used to delimit tokens ( ) + | " .
+ * <p>
+ * Any errors in query syntax will be ignored and the parser will attempt
+ * to decipher what it can; however, this may mean odd or unexpected results.
+ * <h4>Query Operators</h4>
+ * <ul>
+ * <li>'{@code +}' specifies {@code AND} operation: <tt>token1+token2</tt>
+ * <li>'{@code |}' specifies {@code OR} operation: <tt>token1|token2</tt>
+ * <li>'{@code -}' negates a single token: <tt>-token0</tt>
+ * <li>'{@code "}' creates phrases of terms: <tt>"term1 term2 ..."</tt>
+ * <li>'{@code *}' at the end of terms specifies prefix query: <tt>term*</tt>
+ * <li>'{@code (}' and '{@code )}' specifies precedence: <tt>token1 + (token2 | token3)</tt>
+ * </ul>
+ * <p>
+ * The {@link #setDefaultOperator default operator} is {@code OR} if no other operator is specified.
+ * For example, the following will {@code OR} {@code token1} and {@code token2} together:
+ * <tt>token1 token2</tt>
+ * <p>
+ * Normal operator precedence will be simple order from right to left.
+ * For example, the following will evaluate {@code token1 OR token2} first,
+ * then {@code AND} with {@code token3}:
+ * <blockquote>token1 | token2 + token3</blockquote>
+ * <h4>Escaping</h4>
+ * <p>
+ * An individual term may contain any possible character with certain characters
+ * requiring escaping using a '{@code \}'. The following characters will need to be escaped in
+ * terms and phrases:
+ * {@code + | " ( ) ' \}
+ * <p>
+ * The '{@code -}' operator is a special case. On individual terms (not phrases) the first
+ * character of a term that is {@code -} must be escaped; however, any '{@code -}' characters
+ * beyond the first character do not need to be escaped.
+ * For example:
+ * <ul>
+ * <li>{@code -term1} -- Specifies {@code NOT} operation against {@code term1}
+ * <li>{@code \-term1} -- Searches for the term {@code -term1}.
+ * <li>{@code term-1} -- Searches for the term {@code term-1}.
+ * <li>{@code term\-1} -- Searches for the term {@code term-1}.
+ * </ul>
+ * <p>
+ * The '{@code *}' operator is a special case. On individual terms (not phrases) the last
+ * character of a term that is '{@code *}' must be escaped; however, any '{@code *}' characters
+ * before the last character do not need to be escaped:
+ * <ul>
+ * <li>{@code term1*} -- Searches for the prefix {@code term1}
+ * <li>{@code term1\*} -- Searches for the term {@code term1*}
+ * <li>{@code term*1} -- Searches for the term {@code term*1}
+ * <li>{@code term\*1} -- Searches for the term {@code term*1}
+ * </ul>
+ * <p>
+ * Note that above examples consider the terms before text processing.
+ */
+public class XSimpleQueryParser extends QueryBuilder {
+
+ static {
+ assert Version.LUCENE_46.onOrAfter(Lucene.VERSION) : "Lucene 4.7 adds SimpleQueryParser, remove me!";
+ }
+
+ /** Map of fields to query against with their weights */
+ protected final Map<String,Float> weights;
+ /** flags to the parser (to turn features on/off) */
+ protected final int flags;
+
+ /** Enables {@code AND} operator (+) */
+ public static final int AND_OPERATOR = 1<<0;
+ /** Enables {@code NOT} operator (-) */
+ public static final int NOT_OPERATOR = 1<<1;
+ /** Enables {@code OR} operator (|) */
+ public static final int OR_OPERATOR = 1<<2;
+ /** Enables {@code PREFIX} operator (*) */
+ public static final int PREFIX_OPERATOR = 1<<3;
+ /** Enables {@code PHRASE} operator (") */
+ public static final int PHRASE_OPERATOR = 1<<4;
+ /** Enables {@code PRECEDENCE} operators: {@code (} and {@code )} */
+ public static final int PRECEDENCE_OPERATORS = 1<<5;
+ /** Enables {@code ESCAPE} operator (\) */
+ public static final int ESCAPE_OPERATOR = 1<<6;
+ /** Enables {@code WHITESPACE} operators: ' ' '\n' '\r' '\t' */
+ public static final int WHITESPACE_OPERATOR = 1<<7;
+
+ private BooleanClause.Occur defaultOperator = BooleanClause.Occur.SHOULD;
+
+ /** Creates a new parser searching over a single field. */
+ public XSimpleQueryParser(Analyzer analyzer, String field) {
+ this(analyzer, Collections.singletonMap(field, 1.0F));
+ }
+
+ /** Creates a new parser searching over multiple fields with different weights. */
+ public XSimpleQueryParser(Analyzer analyzer, Map<String, Float> weights) {
+ this(analyzer, weights, -1);
+ }
+
+ /** Creates a new parser with custom flags used to enable/disable certain features. */
+ public XSimpleQueryParser(Analyzer analyzer, Map<String, Float> weights, int flags) {
+ super(analyzer);
+ this.weights = weights;
+ this.flags = flags;
+ }
+
+ /** Parses the query text and returns parsed query (or null if empty) */
+ public Query parse(String queryText) {
+ char data[] = queryText.toCharArray();
+ char buffer[] = new char[data.length];
+
+ State state = new State(data, buffer, 0, data.length);
+ parseSubQuery(state);
+ return state.top;
+ }
+
+ private void parseSubQuery(State state) {
+ while (state.index < state.length) {
+ if (state.data[state.index] == '(' && (flags & PRECEDENCE_OPERATORS) != 0) {
+ // the beginning of a subquery has been found
+ consumeSubQuery(state);
+ } else if (state.data[state.index] == ')' && (flags & PRECEDENCE_OPERATORS) != 0) {
+ // this is an extraneous character so it is ignored
+ ++state.index;
+ } else if (state.data[state.index] == '"' && (flags & PHRASE_OPERATOR) != 0) {
+ // the beginning of a phrase has been found
+ consumePhrase(state);
+ } else if (state.data[state.index] == '+' && (flags & AND_OPERATOR) != 0) {
+ // an and operation has been explicitly set
+ // if an operation has already been set this one is ignored
+ // if a term (or phrase or subquery) has not been found yet the
+ // operation is also ignored since there is no previous
+ // term (or phrase or subquery) to and with
+ if (state.currentOperation == null && state.top != null) {
+ state.currentOperation = BooleanClause.Occur.MUST;
+ }
+
+ ++state.index;
+ } else if (state.data[state.index] == '|' && (flags & OR_OPERATOR) != 0) {
+ // an or operation has been explicitly set
+ // if an operation has already been set this one is ignored
+ // if a term (or phrase or subquery) has not been found yet the
+ // operation is also ignored since there is no previous
+ // term (or phrase or subquery) to or with
+ if (state.currentOperation == null && state.top != null) {
+ state.currentOperation = BooleanClause.Occur.SHOULD;
+ }
+
+ ++state.index;
+ } else if (state.data[state.index] == '-' && (flags & NOT_OPERATOR) != 0) {
+ // a not operator has been found, so increase the not count
+ // two not operators in a row negate each other
+ ++state.not;
+ ++state.index;
+
+ // continue so the not operator is not reset
+ // before the next character is determined
+ continue;
+ } else if ((state.data[state.index] == ' '
+ || state.data[state.index] == '\t'
+ || state.data[state.index] == '\n'
+ || state.data[state.index] == '\r') && (flags & WHITESPACE_OPERATOR) != 0) {
+ // ignore any whitespace found as it may have already been
+ // used a delimiter across a term (or phrase or subquery)
+ // or is simply extraneous
+ ++state.index;
+ } else {
+ // the beginning of a token has been found
+ consumeToken(state);
+ }
+
+ // reset the not operator as even whitespace is not allowed when
+ // specifying the not operation for a term (or phrase or subquery)
+ state.not = 0;
+ }
+ }
+
+ private void consumeSubQuery(State state) {
+ assert (flags & PRECEDENCE_OPERATORS) != 0;
+ int start = ++state.index;
+ int precedence = 1;
+ boolean escaped = false;
+
+ while (state.index < state.length) {
+ if (!escaped) {
+ if (state.data[state.index] == '\\' && (flags & ESCAPE_OPERATOR) != 0) {
+ // an escape character has been found so
+ // whatever character is next will become
+ // part of the subquery unless the escape
+ // character is the last one in the data
+ escaped = true;
+ ++state.index;
+
+ continue;
+ } else if (state.data[state.index] == '(') {
+ // increase the precedence as there is a
+ // subquery in the current subquery
+ ++precedence;
+ } else if (state.data[state.index] == ')') {
+ --precedence;
+
+ if (precedence == 0) {
+ // this should be the end of the subquery
+ // all characters found will used for
+ // creating the subquery
+ break;
+ }
+ }
+ }
+
+ escaped = false;
+ ++state.index;
+ }
+
+ if (state.index == state.length) {
+ // a closing parenthesis was never found so the opening
+ // parenthesis is considered extraneous and will be ignored
+ state.index = start;
+ } else if (state.index == start) {
+ // a closing parenthesis was found immediately after the opening
+ // parenthesis so the current operation is reset since it would
+ // have been applied to this subquery
+ state.currentOperation = null;
+
+ ++state.index;
+ } else {
+ // a complete subquery has been found and is recursively parsed by
+ // starting over with a new state object
+ State subState = new State(state.data, state.buffer, start, state.index);
+ parseSubQuery(subState);
+ buildQueryTree(state, subState.top);
+
+ ++state.index;
+ }
+ }
+
+ private void consumePhrase(State state) {
+ assert (flags & PHRASE_OPERATOR) != 0;
+ int start = ++state.index;
+ int copied = 0;
+ boolean escaped = false;
+
+ while (state.index < state.length) {
+ if (!escaped) {
+ if (state.data[state.index] == '\\' && (flags & ESCAPE_OPERATOR) != 0) {
+ // an escape character has been found so
+ // whatever character is next will become
+ // part of the phrase unless the escape
+ // character is the last one in the data
+ escaped = true;
+ ++state.index;
+
+ continue;
+ } else if (state.data[state.index] == '"') {
+ // this should be the end of the phrase
+ // all characters found will used for
+ // creating the phrase query
+ break;
+ }
+ }
+
+ escaped = false;
+ state.buffer[copied++] = state.data[state.index++];
+ }
+
+ if (state.index == state.length) {
+ // a closing double quote was never found so the opening
+ // double quote is considered extraneous and will be ignored
+ state.index = start;
+ } else if (state.index == start) {
+ // a closing double quote was found immediately after the opening
+ // double quote so the current operation is reset since it would
+ // have been applied to this phrase
+ state.currentOperation = null;
+
+ ++state.index;
+ } else {
+ // a complete phrase has been found and is parsed through
+ // through the analyzer from the given field
+ String phrase = new String(state.buffer, 0, copied);
+ Query branch = newPhraseQuery(phrase);
+ buildQueryTree(state, branch);
+
+ ++state.index;
+ }
+ }
+
+ private void consumeToken(State state) {
+ int copied = 0;
+ boolean escaped = false;
+ boolean prefix = false;
+
+ while (state.index < state.length) {
+ if (!escaped) {
+ if (state.data[state.index] == '\\' && (flags & ESCAPE_OPERATOR) != 0) {
+ // an escape character has been found so
+ // whatever character is next will become
+ // part of the term unless the escape
+ // character is the last one in the data
+ escaped = true;
+ prefix = false;
+ ++state.index;
+
+ continue;
+ } else if ((state.data[state.index] == '"' && (flags & PHRASE_OPERATOR) != 0)
+ || (state.data[state.index] == '|' && (flags & OR_OPERATOR) != 0)
+ || (state.data[state.index] == '+' && (flags & AND_OPERATOR) != 0)
+ || (state.data[state.index] == '(' && (flags & PRECEDENCE_OPERATORS) != 0)
+ || (state.data[state.index] == ')' && (flags & PRECEDENCE_OPERATORS) != 0)
+ || ((state.data[state.index] == ' '
+ || state.data[state.index] == '\t'
+ || state.data[state.index] == '\n'
+ || state.data[state.index] == '\r') && (flags & WHITESPACE_OPERATOR) != 0)) {
+ // this should be the end of the term
+ // all characters found will used for
+ // creating the term query
+ break;
+ }
+
+ // wildcard tracks whether or not the last character
+ // was a '*' operator that hasn't been escaped
+ // there must be at least one valid character before
+ // searching for a prefixed set of terms
+ prefix = copied > 0 && state.data[state.index] == '*' && (flags & PREFIX_OPERATOR) != 0;
+ }
+
+ escaped = false;
+ state.buffer[copied++] = state.data[state.index++];
+ }
+
+ if (copied > 0) {
+ final Query branch;
+
+ if (prefix) {
+ // if a term is found with a closing '*' it is considered to be a prefix query
+ // and will have prefix added as an option
+ String token = new String(state.buffer, 0, copied - 1);
+ branch = newPrefixQuery(token);
+ } else {
+ // a standard term has been found so it will be run through
+ // the entire analysis chain from the specified schema field
+ String token = new String(state.buffer, 0, copied);
+ branch = newDefaultQuery(token);
+ }
+
+ buildQueryTree(state, branch);
+ }
+ }
+
+ // buildQueryTree should be called after a term, phrase, or subquery
+ // is consumed to be added to our existing query tree
+ // this method will only add to the existing tree if the branch contained in state is not null
+ private void buildQueryTree(State state, Query branch) {
+ if (branch != null) {
+ // modify our branch to a BooleanQuery wrapper for not
+ // this is necessary any time a term, phrase, or subquery is negated
+ if (state.not % 2 == 1) {
+ BooleanQuery nq = new BooleanQuery();
+ nq.add(branch, BooleanClause.Occur.MUST_NOT);
+ nq.add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD);
+ branch = nq;
+ }
+
+ // first term (or phrase or subquery) found and will begin our query tree
+ if (state.top == null) {
+ state.top = branch;
+ } else {
+ // more than one term (or phrase or subquery) found
+ // set currentOperation to the default if no other operation is explicitly set
+ if (state.currentOperation == null) {
+ state.currentOperation = defaultOperator;
+ }
+
+ // operational change requiring a new parent node
+ // this occurs if the previous operation is not the same as current operation
+ // because the previous operation must be evaluated separately to preserve
+ // the proper precedence and the current operation will take over as the top of the tree
+ if (state.previousOperation != state.currentOperation) {
+ BooleanQuery bq = new BooleanQuery();
+ bq.add(state.top, state.currentOperation);
+ state.top = bq;
+ }
+
+ // reset all of the state for reuse
+ ((BooleanQuery)state.top).add(branch, state.currentOperation);
+ state.previousOperation = state.currentOperation;
+ }
+
+ // reset the current operation as it was intended to be applied to
+ // the incoming term (or phrase or subquery) even if branch was null
+ // due to other possible errors
+ state.currentOperation = null;
+ }
+ }
+
+ /**
+ * Factory method to generate a standard query (no phrase or prefix operators).
+ */
+ protected Query newDefaultQuery(String text) {
+ BooleanQuery bq = new BooleanQuery(true);
+ for (Map.Entry<String,Float> entry : weights.entrySet()) {
+ Query q = createBooleanQuery(entry.getKey(), text, defaultOperator);
+ if (q != null) {
+ q.setBoost(entry.getValue());
+ bq.add(q, BooleanClause.Occur.SHOULD);
+ }
+ }
+ return simplify(bq);
+ }
+
+ /**
+ * Factory method to generate a phrase query.
+ */
+ protected Query newPhraseQuery(String text) {
+ BooleanQuery bq = new BooleanQuery(true);
+ for (Map.Entry<String,Float> entry : weights.entrySet()) {
+ Query q = createPhraseQuery(entry.getKey(), text);
+ if (q != null) {
+ q.setBoost(entry.getValue());
+ bq.add(q, BooleanClause.Occur.SHOULD);
+ }
+ }
+ return simplify(bq);
+ }
+
+ /**
+ * Factory method to generate a prefix query.
+ */
+ protected Query newPrefixQuery(String text) {
+ BooleanQuery bq = new BooleanQuery(true);
+ for (Map.Entry<String,Float> entry : weights.entrySet()) {
+ PrefixQuery prefix = new PrefixQuery(new Term(entry.getKey(), text));
+ prefix.setBoost(entry.getValue());
+ bq.add(prefix, BooleanClause.Occur.SHOULD);
+ }
+ return simplify(bq);
+ }
+
+ /**
+ * Helper to simplify boolean queries with 0 or 1 clause
+ */
+ protected Query simplify(BooleanQuery bq) {
+ if (bq.clauses().isEmpty()) {
+ return null;
+ } else if (bq.clauses().size() == 1) {
+ return bq.clauses().get(0).getQuery();
+ } else {
+ return bq;
+ }
+ }
+
+ /**
+ * Returns the implicit operator setting, which will be
+ * either {@code SHOULD} or {@code MUST}.
+ */
+ public BooleanClause.Occur getDefaultOperator() {
+ return defaultOperator;
+ }
+
+ /**
+ * Sets the implicit operator setting, which must be
+ * either {@code SHOULD} or {@code MUST}.
+ */
+ public void setDefaultOperator(BooleanClause.Occur operator) {
+ if (operator != BooleanClause.Occur.SHOULD && operator != BooleanClause.Occur.MUST) {
+ throw new IllegalArgumentException("invalid operator: only SHOULD or MUST are allowed");
+ }
+ this.defaultOperator = operator;
+ }
+
+ static class State {
+ final char[] data; // the characters in the query string
+ final char[] buffer; // a temporary buffer used to reduce necessary allocations
+ int index;
+ int length;
+
+ BooleanClause.Occur currentOperation;
+ BooleanClause.Occur previousOperation;
+ int not;
+
+ Query top;
+
+ State(char[] data, char[] buffer, int index, int length) {
+ this.data = data;
+ this.buffer = buffer;
+ this.index = index;
+ this.length = length;
+ }
+ }
+}
+
diff --git a/src/main/java/org/apache/lucene/queryparser/classic/ExistsFieldQueryExtension.java b/src/main/java/org/apache/lucene/queryparser/classic/ExistsFieldQueryExtension.java
new file mode 100644
index 0000000..470f7b8
--- /dev/null
+++ b/src/main/java/org/apache/lucene/queryparser/classic/ExistsFieldQueryExtension.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.queryparser.classic;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.index.query.ExistsFilterParser;
+import org.elasticsearch.index.query.QueryParseContext;
+
+/**
+ *
+ */
+public class ExistsFieldQueryExtension implements FieldQueryExtension {
+
+ public static final String NAME = "_exists_";
+
+ @Override
+ public Query query(QueryParseContext parseContext, String queryText) {
+ return new XConstantScoreQuery(ExistsFilterParser.newFilter(parseContext, queryText, null));
+ }
+}
diff --git a/src/main/java/org/apache/lucene/queryparser/classic/FieldQueryExtension.java b/src/main/java/org/apache/lucene/queryparser/classic/FieldQueryExtension.java
new file mode 100644
index 0000000..003ff18
--- /dev/null
+++ b/src/main/java/org/apache/lucene/queryparser/classic/FieldQueryExtension.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.queryparser.classic;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.index.query.QueryParseContext;
+
+/**
+ *
+ */
+public interface FieldQueryExtension {
+
+ Query query(QueryParseContext parseContext, String queryText);
+}
diff --git a/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java
new file mode 100644
index 0000000..eceada9
--- /dev/null
+++ b/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java
@@ -0,0 +1,884 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.queryparser.classic;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.automaton.RegExp;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.support.QueryParsers;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
+import static org.elasticsearch.common.lucene.search.Queries.optimizeQuery;
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ * A query parser that uses the {@link MapperService} in order to build smarter
+ * queries based on the mapping information.
+ * <p/>
+ * <p>Also breaks fields with [type].[name] into a boolean query that must include the type
+ * as well as the query on the name.
+ */
+public class MapperQueryParser extends QueryParser {
+
+ public static final ImmutableMap<String, FieldQueryExtension> fieldQueryExtensions;
+
+ static {
+ fieldQueryExtensions = ImmutableMap.<String, FieldQueryExtension>builder()
+ .put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension())
+ .put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension())
+ .build();
+ }
+
+ private final QueryParseContext parseContext;
+
+ private QueryParserSettings settings;
+
+ private Analyzer quoteAnalyzer;
+
+ private boolean forcedAnalyzer;
+ private boolean forcedQuoteAnalyzer;
+
+ private FieldMapper currentMapper;
+
+ private boolean analyzeWildcard;
+
+ private String quoteFieldSuffix;
+
+ public MapperQueryParser(QueryParseContext parseContext) {
+ super(Lucene.QUERYPARSER_VERSION, null, null);
+ this.parseContext = parseContext;
+ }
+
+ public MapperQueryParser(QueryParserSettings settings, QueryParseContext parseContext) {
+ super(Lucene.QUERYPARSER_VERSION, settings.defaultField(), settings.defaultAnalyzer());
+ this.parseContext = parseContext;
+ reset(settings);
+ }
+
+ public void reset(QueryParserSettings settings) {
+ this.settings = settings;
+ this.field = settings.defaultField();
+
+ if (settings.fields() != null) {
+ if (settings.fields.size() == 1) {
+ // just mark it as the default field
+ this.field = settings.fields().get(0);
+ } else {
+ // otherwise, we need to have the default field being null...
+ this.field = null;
+ }
+ }
+
+ this.forcedAnalyzer = settings.forcedAnalyzer() != null;
+ this.setAnalyzer(forcedAnalyzer ? settings.forcedAnalyzer() : settings.defaultAnalyzer());
+ if (settings.forcedQuoteAnalyzer() != null) {
+ this.forcedQuoteAnalyzer = true;
+ this.quoteAnalyzer = settings.forcedQuoteAnalyzer();
+ } else if (forcedAnalyzer) {
+ this.forcedQuoteAnalyzer = true;
+ this.quoteAnalyzer = settings.forcedAnalyzer();
+ } else {
+ this.forcedAnalyzer = false;
+ this.quoteAnalyzer = settings.defaultQuoteAnalyzer();
+ }
+ this.quoteFieldSuffix = settings.quoteFieldSuffix();
+ setMultiTermRewriteMethod(settings.rewriteMethod());
+ setEnablePositionIncrements(settings.enablePositionIncrements());
+ setAutoGeneratePhraseQueries(settings.autoGeneratePhraseQueries());
+ setAllowLeadingWildcard(settings.allowLeadingWildcard());
+ setLowercaseExpandedTerms(settings.lowercaseExpandedTerms());
+ setPhraseSlop(settings.phraseSlop());
+ setDefaultOperator(settings.defaultOperator());
+ setFuzzyMinSim(settings.fuzzyMinSim());
+ setFuzzyPrefixLength(settings.fuzzyPrefixLength());
+ this.analyzeWildcard = settings.analyzeWildcard();
+ }
+
+ /**
+ * We override this one so we can get the fuzzy part to be treated as string, so people can do: "age:10~5" or "timestamp:2012-10-10~5d"
+ */
+ @Override
+ Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException {
+ if (fuzzySlop.image.length() == 1) {
+ return getFuzzyQuery(qfield, termImage, Float.toString(fuzzyMinSim));
+ }
+ return getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1));
+ }
+
+ @Override
+ protected Query newTermQuery(Term term) {
+ if (currentMapper != null) {
+ Query termQuery = currentMapper.queryStringTermQuery(term);
+ if (termQuery != null) {
+ return termQuery;
+ }
+ }
+ return super.newTermQuery(term);
+ }
+
+ @Override
+ protected Query newMatchAllDocsQuery() {
+ return Queries.newMatchAllQuery();
+ }
+
+ @Override
+ public Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
+ FieldQueryExtension fieldQueryExtension = fieldQueryExtensions.get(field);
+ if (fieldQueryExtension != null) {
+ return fieldQueryExtension.query(parseContext, queryText);
+ }
+ Collection<String> fields = extractMultiFields(field);
+ if (fields != null) {
+ if (fields.size() == 1) {
+ return getFieldQuerySingle(fields.iterator().next(), queryText, quoted);
+ }
+ if (settings.useDisMax()) {
+ DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
+ boolean added = false;
+ for (String mField : fields) {
+ Query q = getFieldQuerySingle(mField, queryText, quoted);
+ if (q != null) {
+ added = true;
+ applyBoost(mField, q);
+ disMaxQuery.add(q);
+ }
+ }
+ if (!added) {
+ return null;
+ }
+ return disMaxQuery;
+ } else {
+ List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+ for (String mField : fields) {
+ Query q = getFieldQuerySingle(mField, queryText, quoted);
+ if (q != null) {
+ applyBoost(mField, q);
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ }
+ }
+ if (clauses.size() == 0) // happens for stopwords
+ return null;
+ return getBooleanQuery(clauses, true);
+ }
+ } else {
+ return getFieldQuerySingle(field, queryText, quoted);
+ }
+ }
+
+ private Query getFieldQuerySingle(String field, String queryText, boolean quoted) throws ParseException {
+ if (!quoted && queryText.length() > 1) {
+ if (queryText.charAt(0) == '>') {
+ if (queryText.length() > 2) {
+ if (queryText.charAt(1) == '=') {
+ return getRangeQuerySingle(field, queryText.substring(2), null, true, true);
+ }
+ }
+ return getRangeQuerySingle(field, queryText.substring(1), null, false, true);
+ } else if (queryText.charAt(0) == '<') {
+ if (queryText.length() > 2) {
+ if (queryText.charAt(1) == '=') {
+ return getRangeQuerySingle(field, null, queryText.substring(2), true, true);
+ }
+ }
+ return getRangeQuerySingle(field, null, queryText.substring(1), true, false);
+ }
+ }
+ currentMapper = null;
+ Analyzer oldAnalyzer = getAnalyzer();
+ try {
+ MapperService.SmartNameFieldMappers fieldMappers = null;
+ if (quoted) {
+ setAnalyzer(quoteAnalyzer);
+ if (quoteFieldSuffix != null) {
+ fieldMappers = parseContext.smartFieldMappers(field + quoteFieldSuffix);
+ }
+ }
+ if (fieldMappers == null) {
+ fieldMappers = parseContext.smartFieldMappers(field);
+ }
+ if (fieldMappers != null) {
+ if (quoted) {
+ if (!forcedQuoteAnalyzer) {
+ setAnalyzer(fieldMappers.searchQuoteAnalyzer());
+ }
+ } else {
+ if (!forcedAnalyzer) {
+ setAnalyzer(fieldMappers.searchAnalyzer());
+ }
+ }
+ currentMapper = fieldMappers.fieldMappers().mapper();
+ if (currentMapper != null) {
+ Query query = null;
+ if (currentMapper.useTermQueryWithQueryString()) {
+ try {
+ if (fieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{fieldMappers.docMapper().type()});
+ try {
+ query = currentMapper.termQuery(queryText, parseContext);
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ query = currentMapper.termQuery(queryText, parseContext);
+ }
+ } catch (RuntimeException e) {
+ if (settings.lenient()) {
+ return null;
+ } else {
+ throw e;
+ }
+ }
+ }
+ if (query == null) {
+ query = super.getFieldQuery(currentMapper.names().indexName(), queryText, quoted);
+ }
+ return wrapSmartNameQuery(query, fieldMappers, parseContext);
+ }
+ }
+ return super.getFieldQuery(field, queryText, quoted);
+ } finally {
+ setAnalyzer(oldAnalyzer);
+ }
+ }
+
+ @Override
+ protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException {
+ Collection<String> fields = extractMultiFields(field);
+ if (fields != null) {
+ if (settings.useDisMax()) {
+ DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
+ boolean added = false;
+ for (String mField : fields) {
+ Query q = super.getFieldQuery(mField, queryText, slop);
+ if (q != null) {
+ added = true;
+ applyBoost(mField, q);
+ applySlop(q, slop);
+ disMaxQuery.add(q);
+ }
+ }
+ if (!added) {
+ return null;
+ }
+ return disMaxQuery;
+ } else {
+ List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+ for (String mField : fields) {
+ Query q = super.getFieldQuery(mField, queryText, slop);
+ if (q != null) {
+ applyBoost(mField, q);
+ applySlop(q, slop);
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ }
+ }
+ if (clauses.size() == 0) // happens for stopwords
+ return null;
+ return getBooleanQuery(clauses, true);
+ }
+ } else {
+ return super.getFieldQuery(field, queryText, slop);
+ }
+ }
+
+ @Override
+ protected Query getRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) throws ParseException {
+ if ("*".equals(part1)) {
+ part1 = null;
+ }
+ if ("*".equals(part2)) {
+ part2 = null;
+ }
+
+ Collection<String> fields = extractMultiFields(field);
+
+ if (fields == null) {
+ return getRangeQuerySingle(field, part1, part2, startInclusive, endInclusive);
+ }
+
+
+ if (fields.size() == 1) {
+ return getRangeQuerySingle(fields.iterator().next(), part1, part2, startInclusive, endInclusive);
+ }
+
+ if (settings.useDisMax()) {
+ DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
+ boolean added = false;
+ for (String mField : fields) {
+ Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive);
+ if (q != null) {
+ added = true;
+ applyBoost(mField, q);
+ disMaxQuery.add(q);
+ }
+ }
+ if (!added) {
+ return null;
+ }
+ return disMaxQuery;
+ } else {
+ List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+ for (String mField : fields) {
+ Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive);
+ if (q != null) {
+ applyBoost(mField, q);
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ }
+ }
+ if (clauses.size() == 0) // happens for stopwords
+ return null;
+ return getBooleanQuery(clauses, true);
+ }
+ }
+
+ private Query getRangeQuerySingle(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) {
+ currentMapper = null;
+ MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
+ if (fieldMappers != null) {
+ currentMapper = fieldMappers.fieldMappers().mapper();
+ if (currentMapper != null) {
+
+ if (lowercaseExpandedTerms && !currentMapper.isNumeric()) {
+ part1 = part1 == null ? null : part1.toLowerCase(locale);
+ part2 = part2 == null ? null : part2.toLowerCase(locale);
+ }
+
+ try {
+ Query rangeQuery = currentMapper.rangeQuery(part1, part2, startInclusive, endInclusive, parseContext);
+ return wrapSmartNameQuery(rangeQuery, fieldMappers, parseContext);
+ } catch (RuntimeException e) {
+ if (settings.lenient()) {
+ return null;
+ }
+ throw e;
+ }
+ }
+ }
+ return newRangeQuery(field, part1, part2, startInclusive, endInclusive);
+ }
+
+ protected Query getFuzzyQuery(String field, String termStr, String minSimilarity) throws ParseException {
+ if (lowercaseExpandedTerms) {
+ termStr = termStr.toLowerCase(locale);
+ }
+ Collection<String> fields = extractMultiFields(field);
+ if (fields != null) {
+ if (fields.size() == 1) {
+ return getFuzzyQuerySingle(fields.iterator().next(), termStr, minSimilarity);
+ }
+ if (settings.useDisMax()) {
+ DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
+ boolean added = false;
+ for (String mField : fields) {
+ Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
+ if (q != null) {
+ added = true;
+ applyBoost(mField, q);
+ disMaxQuery.add(q);
+ }
+ }
+ if (!added) {
+ return null;
+ }
+ return disMaxQuery;
+ } else {
+ List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+ for (String mField : fields) {
+ Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
+ applyBoost(mField, q);
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ }
+ return getBooleanQuery(clauses, true);
+ }
+ } else {
+ return getFuzzyQuerySingle(field, termStr, minSimilarity);
+ }
+ }
+
+ private Query getFuzzyQuerySingle(String field, String termStr, String minSimilarity) throws ParseException {
+ currentMapper = null;
+ MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
+ if (fieldMappers != null) {
+ currentMapper = fieldMappers.fieldMappers().mapper();
+ if (currentMapper != null) {
+ try {
+ //LUCENE 4 UPGRADE I disabled transpositions here by default - maybe this needs to be changed
+ Query fuzzyQuery = currentMapper.fuzzyQuery(termStr, Fuzziness.build(minSimilarity), fuzzyPrefixLength, settings.fuzzyMaxExpansions(), false);
+ return wrapSmartNameQuery(fuzzyQuery, fieldMappers, parseContext);
+ } catch (RuntimeException e) {
+ if (settings.lenient()) {
+ return null;
+ }
+ throw e;
+ }
+ }
+ }
+ return super.getFuzzyQuery(field, termStr, Float.parseFloat(minSimilarity));
+ }
+
+ @Override
+ protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) {
+ String text = term.text();
+ int numEdits = FuzzyQuery.floatToEdits(minimumSimilarity, text.codePointCount(0, text.length()));
+ //LUCENE 4 UPGRADE I disabled transpositions here by default - maybe this needs to be changed
+ FuzzyQuery query = new FuzzyQuery(term, numEdits, prefixLength, settings.fuzzyMaxExpansions(), false);
+ QueryParsers.setRewriteMethod(query, settings.fuzzyRewriteMethod());
+ return query;
+ }
+
+ @Override
+ protected Query getPrefixQuery(String field, String termStr) throws ParseException {
+ if (lowercaseExpandedTerms) {
+ termStr = termStr.toLowerCase(locale);
+ }
+ Collection<String> fields = extractMultiFields(field);
+ if (fields != null) {
+ if (fields.size() == 1) {
+ return getPrefixQuerySingle(fields.iterator().next(), termStr);
+ }
+ if (settings.useDisMax()) {
+ DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
+ boolean added = false;
+ for (String mField : fields) {
+ Query q = getPrefixQuerySingle(mField, termStr);
+ if (q != null) {
+ added = true;
+ applyBoost(mField, q);
+ disMaxQuery.add(q);
+ }
+ }
+ if (!added) {
+ return null;
+ }
+ return disMaxQuery;
+ } else {
+ List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+ for (String mField : fields) {
+ Query q = getPrefixQuerySingle(mField, termStr);
+ if (q != null) {
+ applyBoost(mField, q);
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ }
+ }
+ if (clauses.size() == 0) // happens for stopwords
+ return null;
+ return getBooleanQuery(clauses, true);
+ }
+ } else {
+ return getPrefixQuerySingle(field, termStr);
+ }
+ }
+
+ private Query getPrefixQuerySingle(String field, String termStr) throws ParseException {
+ currentMapper = null;
+ Analyzer oldAnalyzer = getAnalyzer();
+ try {
+ MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
+ if (fieldMappers != null) {
+ if (!forcedAnalyzer) {
+ setAnalyzer(fieldMappers.searchAnalyzer());
+ }
+ currentMapper = fieldMappers.fieldMappers().mapper();
+ if (currentMapper != null) {
+ Query query = null;
+ if (currentMapper.useTermQueryWithQueryString()) {
+ if (fieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{fieldMappers.docMapper().type()});
+ try {
+ query = currentMapper.prefixQuery(termStr, multiTermRewriteMethod, parseContext);
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ query = currentMapper.prefixQuery(termStr, multiTermRewriteMethod, parseContext);
+ }
+ }
+ if (query == null) {
+ query = getPossiblyAnalyzedPrefixQuery(currentMapper.names().indexName(), termStr);
+ }
+ return wrapSmartNameQuery(query, fieldMappers, parseContext);
+ }
+ }
+ return getPossiblyAnalyzedPrefixQuery(field, termStr);
+ } catch (RuntimeException e) {
+ if (settings.lenient()) {
+ return null;
+ }
+ throw e;
+ } finally {
+ setAnalyzer(oldAnalyzer);
+ }
+ }
+
+ private Query getPossiblyAnalyzedPrefixQuery(String field, String termStr) throws ParseException {
+ if (!analyzeWildcard) {
+ return super.getPrefixQuery(field, termStr);
+ }
+ // get Analyzer from superclass and tokenize the term
+ TokenStream source;
+ try {
+ source = getAnalyzer().tokenStream(field, termStr);
+ source.reset();
+ } catch (IOException e) {
+ return super.getPrefixQuery(field, termStr);
+ }
+ List<String> tlist = new ArrayList<String>();
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
+
+ while (true) {
+ try {
+ if (!source.incrementToken()) break;
+ } catch (IOException e) {
+ break;
+ }
+ tlist.add(termAtt.toString());
+ }
+
+ try {
+ source.close();
+ } catch (IOException e) {
+ // ignore
+ }
+
+ if (tlist.size() == 1) {
+ return super.getPrefixQuery(field, tlist.get(0));
+ } else {
+ // build a boolean query with prefix on each one...
+ List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+ for (String token : tlist) {
+ clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
+ }
+ return getBooleanQuery(clauses, true);
+
+ //return super.getPrefixQuery(field, termStr);
+
+ /* this means that the analyzer used either added or consumed
+* (common for a stemmer) tokens, and we can't build a PrefixQuery */
+// throw new ParseException("Cannot build PrefixQuery with analyzer "
+// + getAnalyzer().getClass()
+// + (tlist.size() > 1 ? " - token(s) added" : " - token consumed"));
+ }
+
+ }
+
+ @Override
+ protected Query getWildcardQuery(String field, String termStr) throws ParseException {
+ if (termStr.equals("*")) {
+ // we want to optimize for match all query for the "*:*", and "*" cases
+ if ("*".equals(field) || Objects.equal(field, this.field)) {
+ String actualField = field;
+ if (actualField == null) {
+ actualField = this.field;
+ }
+ if (actualField == null) {
+ return newMatchAllDocsQuery();
+ }
+ if ("*".equals(actualField) || "_all".equals(actualField)) {
+ return newMatchAllDocsQuery();
+ }
+ // effectively, we check if a field exists or not
+ return fieldQueryExtensions.get(ExistsFieldQueryExtension.NAME).query(parseContext, actualField);
+ }
+ }
+ if (lowercaseExpandedTerms) {
+ termStr = termStr.toLowerCase(locale);
+ }
+ Collection<String> fields = extractMultiFields(field);
+ if (fields != null) {
+ if (fields.size() == 1) {
+ return getWildcardQuerySingle(fields.iterator().next(), termStr);
+ }
+ if (settings.useDisMax()) {
+ DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
+ boolean added = false;
+ for (String mField : fields) {
+ Query q = getWildcardQuerySingle(mField, termStr);
+ if (q != null) {
+ added = true;
+ applyBoost(mField, q);
+ disMaxQuery.add(q);
+ }
+ }
+ if (!added) {
+ return null;
+ }
+ return disMaxQuery;
+ } else {
+ List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+ for (String mField : fields) {
+ Query q = getWildcardQuerySingle(mField, termStr);
+ if (q != null) {
+ applyBoost(mField, q);
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ }
+ }
+ if (clauses.size() == 0) // happens for stopwords
+ return null;
+ return getBooleanQuery(clauses, true);
+ }
+ } else {
+ return getWildcardQuerySingle(field, termStr);
+ }
+ }
+
+ private Query getWildcardQuerySingle(String field, String termStr) throws ParseException {
+ String indexedNameField = field;
+ currentMapper = null;
+ Analyzer oldAnalyzer = getAnalyzer();
+ try {
+ MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
+ if (fieldMappers != null) {
+ if (!forcedAnalyzer) {
+ setAnalyzer(fieldMappers.searchAnalyzer());
+ }
+ currentMapper = fieldMappers.fieldMappers().mapper();
+ if (currentMapper != null) {
+ indexedNameField = currentMapper.names().indexName();
+ }
+ return wrapSmartNameQuery(getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr), fieldMappers, parseContext);
+ }
+ return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
+ } catch (RuntimeException e) {
+ if (settings.lenient()) {
+ return null;
+ }
+ throw e;
+ } finally {
+ setAnalyzer(oldAnalyzer);
+ }
+ }
+
+ private Query getPossiblyAnalyzedWildcardQuery(String field, String termStr) throws ParseException {
+ if (!analyzeWildcard) {
+ return super.getWildcardQuery(field, termStr);
+ }
+ boolean isWithinToken = (!termStr.startsWith("?") && !termStr.startsWith("*"));
+ StringBuilder aggStr = new StringBuilder();
+ StringBuilder tmp = new StringBuilder();
+ for (int i = 0; i < termStr.length(); i++) {
+ char c = termStr.charAt(i);
+ if (c == '?' || c == '*') {
+ if (isWithinToken) {
+ try {
+ TokenStream source = getAnalyzer().tokenStream(field, tmp.toString());
+ source.reset();
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
+ if (source.incrementToken()) {
+ String term = termAtt.toString();
+ if (term.length() == 0) {
+ // no tokens, just use what we have now
+ aggStr.append(tmp);
+ } else {
+ aggStr.append(term);
+ }
+ } else {
+ // no tokens, just use what we have now
+ aggStr.append(tmp);
+ }
+ source.close();
+ } catch (IOException e) {
+ aggStr.append(tmp);
+ }
+ tmp.setLength(0);
+ }
+ isWithinToken = false;
+ aggStr.append(c);
+ } else {
+ tmp.append(c);
+ isWithinToken = true;
+ }
+ }
+ if (isWithinToken) {
+ try {
+ TokenStream source = getAnalyzer().tokenStream(field, tmp.toString());
+ source.reset();
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
+ if (source.incrementToken()) {
+ String term = termAtt.toString();
+ if (term.length() == 0) {
+ // no tokens, just use what we have now
+ aggStr.append(tmp);
+ } else {
+ aggStr.append(term);
+ }
+ } else {
+ // no tokens, just use what we have now
+ aggStr.append(tmp);
+ }
+ source.close();
+ } catch (IOException e) {
+ aggStr.append(tmp);
+ }
+ }
+
+ return super.getWildcardQuery(field, aggStr.toString());
+ }
+
+ @Override
+ protected Query getRegexpQuery(String field, String termStr) throws ParseException {
+ if (lowercaseExpandedTerms) {
+ termStr = termStr.toLowerCase(locale);
+ }
+ Collection<String> fields = extractMultiFields(field);
+ if (fields != null) {
+ if (fields.size() == 1) {
+ return getRegexpQuerySingle(fields.iterator().next(), termStr);
+ }
+ if (settings.useDisMax()) {
+ DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
+ boolean added = false;
+ for (String mField : fields) {
+ Query q = getRegexpQuerySingle(mField, termStr);
+ if (q != null) {
+ added = true;
+ applyBoost(mField, q);
+ disMaxQuery.add(q);
+ }
+ }
+ if (!added) {
+ return null;
+ }
+ return disMaxQuery;
+ } else {
+ List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+ for (String mField : fields) {
+ Query q = getRegexpQuerySingle(mField, termStr);
+ if (q != null) {
+ applyBoost(mField, q);
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ }
+ }
+ if (clauses.size() == 0) // happens for stopwords
+ return null;
+ return getBooleanQuery(clauses, true);
+ }
+ } else {
+ return getRegexpQuerySingle(field, termStr);
+ }
+ }
+
+ private Query getRegexpQuerySingle(String field, String termStr) throws ParseException {
+ currentMapper = null;
+ Analyzer oldAnalyzer = getAnalyzer();
+ try {
+ MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
+ if (fieldMappers != null) {
+ if (!forcedAnalyzer) {
+ setAnalyzer(fieldMappers.searchAnalyzer());
+ }
+ currentMapper = fieldMappers.fieldMappers().mapper();
+ if (currentMapper != null) {
+ Query query = null;
+ if (currentMapper.useTermQueryWithQueryString()) {
+ if (fieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{fieldMappers.docMapper().type()});
+ try {
+ query = currentMapper.regexpQuery(termStr, RegExp.ALL, multiTermRewriteMethod, parseContext);
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ query = currentMapper.regexpQuery(termStr, RegExp.ALL, multiTermRewriteMethod, parseContext);
+ }
+ }
+ if (query == null) {
+ query = super.getRegexpQuery(field, termStr);
+ }
+ return wrapSmartNameQuery(query, fieldMappers, parseContext);
+ }
+ }
+ return super.getRegexpQuery(field, termStr);
+ } catch (RuntimeException e) {
+ if (settings.lenient()) {
+ return null;
+ }
+ throw e;
+ } finally {
+ setAnalyzer(oldAnalyzer);
+ }
+ }
+
+ @Override
+ protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord) throws ParseException {
+ Query q = super.getBooleanQuery(clauses, disableCoord);
+ if (q == null) {
+ return null;
+ }
+ return optimizeQuery(fixNegativeQueryIfNeeded(q));
+ }
+
+ private void applyBoost(String field, Query q) {
+ if (settings.boosts() != null) {
+ float boost = 1f;
+ if (settings.boosts().containsKey(field)) {
+ boost = settings.boosts().lget();
+ }
+ q.setBoost(boost);
+ }
+ }
+
+ private void applySlop(Query q, int slop) {
+ if (q instanceof XFilteredQuery) {
+ applySlop(((XFilteredQuery)q).getQuery(), slop);
+ }
+ if (q instanceof PhraseQuery) {
+ ((PhraseQuery) q).setSlop(slop);
+ } else if (q instanceof MultiPhraseQuery) {
+ ((MultiPhraseQuery) q).setSlop(slop);
+ }
+ }
+
+ private Collection<String> extractMultiFields(String field) {
+ Collection<String> fields = null;
+ if (field != null) {
+ fields = parseContext.simpleMatchToIndexNames(field);
+ } else {
+ fields = settings.fields();
+ }
+ return fields;
+ }
+
+ public Query parse(String query) throws ParseException {
+ if (query.trim().isEmpty()) {
+ // if the query string is empty we return no docs / empty result
+ // the behavior is simple to change in the client if all docs is required
+ // or a default query
+ return new MatchNoDocsQuery();
+ }
+ return super.parse(query);
+ }
+}
diff --git a/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java b/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java
new file mode 100644
index 0000000..ad200d4
--- /dev/null
+++ b/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.queryparser.classic;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.index.query.MissingFilterParser;
+import org.elasticsearch.index.query.QueryParseContext;
+
+/**
+ *
+ */
+public class MissingFieldQueryExtension implements FieldQueryExtension {
+
+ public static final String NAME = "_missing_";
+
+ @Override
+ public Query query(QueryParseContext parseContext, String queryText) {
+ return new XConstantScoreQuery(MissingFilterParser.newFilter(parseContext, queryText,
+ MissingFilterParser.DEFAULT_EXISTENCE_VALUE, MissingFilterParser.DEFAULT_NULL_VALUE, null));
+ }
+}
diff --git a/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java b/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java
new file mode 100644
index 0000000..9b8f9aa
--- /dev/null
+++ b/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java
@@ -0,0 +1,376 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.queryparser.classic;
+
+import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.search.FuzzyQuery;
+import org.apache.lucene.search.MultiTermQuery;
+
+import java.util.Collection;
+import java.util.List;
+
+/**
+ *
+ */
+public class QueryParserSettings {
+
+ public static final boolean DEFAULT_ALLOW_LEADING_WILDCARD = true;
+ public static final boolean DEFAULT_ANALYZE_WILDCARD = false;
+ public static final float DEFAULT_BOOST = 1.f;
+
+ private String queryString;
+ private String defaultField;
+ private float boost = DEFAULT_BOOST;
+ private MapperQueryParser.Operator defaultOperator = QueryParser.Operator.OR;
+ private boolean autoGeneratePhraseQueries = false;
+ private boolean allowLeadingWildcard = DEFAULT_ALLOW_LEADING_WILDCARD;
+ private boolean lowercaseExpandedTerms = true;
+ private boolean enablePositionIncrements = true;
+ private int phraseSlop = 0;
+ private float fuzzyMinSim = FuzzyQuery.defaultMinSimilarity;
+ private int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength;
+ private int fuzzyMaxExpansions = FuzzyQuery.defaultMaxExpansions;
+ private MultiTermQuery.RewriteMethod fuzzyRewriteMethod = null;
+ private boolean analyzeWildcard = DEFAULT_ANALYZE_WILDCARD;
+ private boolean escape = false;
+ private Analyzer defaultAnalyzer = null;
+ private Analyzer defaultQuoteAnalyzer = null;
+ private Analyzer forcedAnalyzer = null;
+ private Analyzer forcedQuoteAnalyzer = null;
+ private String quoteFieldSuffix = null;
+ private MultiTermQuery.RewriteMethod rewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+ private String minimumShouldMatch;
+ private boolean lenient;
+
+
+ List<String> fields = null;
+ Collection<String> queryTypes = null;
+ ObjectFloatOpenHashMap<String> boosts = null;
+ float tieBreaker = 0.0f;
+ boolean useDisMax = true;
+
+ public boolean isCacheable() {
+ // a hack for now :) to determine if a query string is cacheable
+ return !queryString.contains("now");
+ }
+
+ public String queryString() {
+ return queryString;
+ }
+
+ public void queryString(String queryString) {
+ this.queryString = queryString;
+ }
+
+ public String defaultField() {
+ return defaultField;
+ }
+
+ public void defaultField(String defaultField) {
+ this.defaultField = defaultField;
+ }
+
+ public float boost() {
+ return boost;
+ }
+
+ public void boost(float boost) {
+ this.boost = boost;
+ }
+
+ public QueryParser.Operator defaultOperator() {
+ return defaultOperator;
+ }
+
+ public void defaultOperator(QueryParser.Operator defaultOperator) {
+ this.defaultOperator = defaultOperator;
+ }
+
+ public boolean autoGeneratePhraseQueries() {
+ return autoGeneratePhraseQueries;
+ }
+
+ public void autoGeneratePhraseQueries(boolean autoGeneratePhraseQueries) {
+ this.autoGeneratePhraseQueries = autoGeneratePhraseQueries;
+ }
+
+ public boolean allowLeadingWildcard() {
+ return allowLeadingWildcard;
+ }
+
+ public void allowLeadingWildcard(boolean allowLeadingWildcard) {
+ this.allowLeadingWildcard = allowLeadingWildcard;
+ }
+
+ public boolean lowercaseExpandedTerms() {
+ return lowercaseExpandedTerms;
+ }
+
+ public void lowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
+ this.lowercaseExpandedTerms = lowercaseExpandedTerms;
+ }
+
+ public boolean enablePositionIncrements() {
+ return enablePositionIncrements;
+ }
+
+ public void enablePositionIncrements(boolean enablePositionIncrements) {
+ this.enablePositionIncrements = enablePositionIncrements;
+ }
+
+ public int phraseSlop() {
+ return phraseSlop;
+ }
+
+ public void phraseSlop(int phraseSlop) {
+ this.phraseSlop = phraseSlop;
+ }
+
+ public float fuzzyMinSim() {
+ return fuzzyMinSim;
+ }
+
+ public void fuzzyMinSim(float fuzzyMinSim) {
+ this.fuzzyMinSim = fuzzyMinSim;
+ }
+
+ public int fuzzyPrefixLength() {
+ return fuzzyPrefixLength;
+ }
+
+ public void fuzzyPrefixLength(int fuzzyPrefixLength) {
+ this.fuzzyPrefixLength = fuzzyPrefixLength;
+ }
+
+ public int fuzzyMaxExpansions() {
+ return fuzzyMaxExpansions;
+ }
+
+ public void fuzzyMaxExpansions(int fuzzyMaxExpansions) {
+ this.fuzzyMaxExpansions = fuzzyMaxExpansions;
+ }
+
+ public MultiTermQuery.RewriteMethod fuzzyRewriteMethod() {
+ return fuzzyRewriteMethod;
+ }
+
+ public void fuzzyRewriteMethod(MultiTermQuery.RewriteMethod fuzzyRewriteMethod) {
+ this.fuzzyRewriteMethod = fuzzyRewriteMethod;
+ }
+
+ public boolean escape() {
+ return escape;
+ }
+
+ public void escape(boolean escape) {
+ this.escape = escape;
+ }
+
+ public Analyzer defaultAnalyzer() {
+ return defaultAnalyzer;
+ }
+
+ public void defaultAnalyzer(Analyzer defaultAnalyzer) {
+ this.defaultAnalyzer = defaultAnalyzer;
+ }
+
+ public Analyzer defaultQuoteAnalyzer() {
+ return defaultQuoteAnalyzer;
+ }
+
+ public void defaultQuoteAnalyzer(Analyzer defaultAnalyzer) {
+ this.defaultQuoteAnalyzer = defaultAnalyzer;
+ }
+
+ public Analyzer forcedAnalyzer() {
+ return forcedAnalyzer;
+ }
+
+ public void forcedAnalyzer(Analyzer forcedAnalyzer) {
+ this.forcedAnalyzer = forcedAnalyzer;
+ }
+
+ public Analyzer forcedQuoteAnalyzer() {
+ return forcedQuoteAnalyzer;
+ }
+
+ public void forcedQuoteAnalyzer(Analyzer forcedAnalyzer) {
+ this.forcedQuoteAnalyzer = forcedAnalyzer;
+ }
+
+ public boolean analyzeWildcard() {
+ return this.analyzeWildcard;
+ }
+
+ public void analyzeWildcard(boolean analyzeWildcard) {
+ this.analyzeWildcard = analyzeWildcard;
+ }
+
+ public MultiTermQuery.RewriteMethod rewriteMethod() {
+ return this.rewriteMethod;
+ }
+
+ public void rewriteMethod(MultiTermQuery.RewriteMethod rewriteMethod) {
+ this.rewriteMethod = rewriteMethod;
+ }
+
+ public String minimumShouldMatch() {
+ return this.minimumShouldMatch;
+ }
+
+ public void minimumShouldMatch(String minimumShouldMatch) {
+ this.minimumShouldMatch = minimumShouldMatch;
+ }
+
+ public void quoteFieldSuffix(String quoteFieldSuffix) {
+ this.quoteFieldSuffix = quoteFieldSuffix;
+ }
+
+ public String quoteFieldSuffix() {
+ return this.quoteFieldSuffix;
+ }
+
+ public void lenient(boolean lenient) {
+ this.lenient = lenient;
+ }
+
+ public boolean lenient() {
+ return this.lenient;
+ }
+
+ public List<String> fields() {
+ return fields;
+ }
+
+ public void fields(List<String> fields) {
+ this.fields = fields;
+ }
+
+ public Collection<String> queryTypes() {
+ return queryTypes;
+ }
+
+ public void queryTypes(Collection<String> queryTypes) {
+ this.queryTypes = queryTypes;
+ }
+
+ public ObjectFloatOpenHashMap<String> boosts() {
+ return boosts;
+ }
+
+ public void boosts(ObjectFloatOpenHashMap<String> boosts) {
+ this.boosts = boosts;
+ }
+
+ public float tieBreaker() {
+ return tieBreaker;
+ }
+
+ public void tieBreaker(float tieBreaker) {
+ this.tieBreaker = tieBreaker;
+ }
+
+ public boolean useDisMax() {
+ return useDisMax;
+ }
+
+ public void useDisMax(boolean useDisMax) {
+ this.useDisMax = useDisMax;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ QueryParserSettings that = (QueryParserSettings) o;
+
+ if (autoGeneratePhraseQueries != that.autoGeneratePhraseQueries()) return false;
+ if (allowLeadingWildcard != that.allowLeadingWildcard) return false;
+ if (Float.compare(that.boost, boost) != 0) return false;
+ if (enablePositionIncrements != that.enablePositionIncrements) return false;
+ if (escape != that.escape) return false;
+ if (analyzeWildcard != that.analyzeWildcard) return false;
+ if (Float.compare(that.fuzzyMinSim, fuzzyMinSim) != 0) return false;
+ if (fuzzyPrefixLength != that.fuzzyPrefixLength) return false;
+ if (fuzzyMaxExpansions != that.fuzzyMaxExpansions) return false;
+ if (fuzzyRewriteMethod != null ? !fuzzyRewriteMethod.equals(that.fuzzyRewriteMethod) : that.fuzzyRewriteMethod != null)
+ return false;
+ if (lowercaseExpandedTerms != that.lowercaseExpandedTerms) return false;
+ if (phraseSlop != that.phraseSlop) return false;
+ if (defaultAnalyzer != null ? !defaultAnalyzer.equals(that.defaultAnalyzer) : that.defaultAnalyzer != null)
+ return false;
+ if (defaultQuoteAnalyzer != null ? !defaultQuoteAnalyzer.equals(that.defaultQuoteAnalyzer) : that.defaultQuoteAnalyzer != null)
+ return false;
+ if (forcedAnalyzer != null ? !forcedAnalyzer.equals(that.forcedAnalyzer) : that.forcedAnalyzer != null)
+ return false;
+ if (forcedQuoteAnalyzer != null ? !forcedQuoteAnalyzer.equals(that.forcedQuoteAnalyzer) : that.forcedQuoteAnalyzer != null)
+ return false;
+ if (defaultField != null ? !defaultField.equals(that.defaultField) : that.defaultField != null) return false;
+ if (defaultOperator != that.defaultOperator) return false;
+ if (queryString != null ? !queryString.equals(that.queryString) : that.queryString != null) return false;
+ if (rewriteMethod != null ? !rewriteMethod.equals(that.rewriteMethod) : that.rewriteMethod != null)
+ return false;
+ if (minimumShouldMatch != null ? !minimumShouldMatch.equals(that.minimumShouldMatch) : that.minimumShouldMatch != null)
+ return false;
+ if (quoteFieldSuffix != null ? !quoteFieldSuffix.equals(that.quoteFieldSuffix) : that.quoteFieldSuffix != null)
+ return false;
+ if (lenient != that.lenient) {
+ return false;
+ }
+
+ if (Float.compare(that.tieBreaker, tieBreaker) != 0) return false;
+ if (useDisMax != that.useDisMax) return false;
+ if (boosts != null ? !boosts.equals(that.boosts) : that.boosts != null) return false;
+ if (fields != null ? !fields.equals(that.fields) : that.fields != null) return false;
+ if (queryTypes != null ? !queryTypes.equals(that.queryTypes) : that.queryTypes != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = queryString != null ? queryString.hashCode() : 0;
+ result = 31 * result + (defaultField != null ? defaultField.hashCode() : 0);
+ result = 31 * result + (boost != +0.0f ? Float.floatToIntBits(boost) : 0);
+ result = 31 * result + (defaultOperator != null ? defaultOperator.hashCode() : 0);
+ result = 31 * result + (autoGeneratePhraseQueries ? 1 : 0);
+ result = 31 * result + (allowLeadingWildcard ? 1 : 0);
+ result = 31 * result + (lowercaseExpandedTerms ? 1 : 0);
+ result = 31 * result + (enablePositionIncrements ? 1 : 0);
+ result = 31 * result + phraseSlop;
+ result = 31 * result + (fuzzyMinSim != +0.0f ? Float.floatToIntBits(fuzzyMinSim) : 0);
+ result = 31 * result + fuzzyPrefixLength;
+ result = 31 * result + (escape ? 1 : 0);
+ result = 31 * result + (defaultAnalyzer != null ? defaultAnalyzer.hashCode() : 0);
+ result = 31 * result + (defaultQuoteAnalyzer != null ? defaultQuoteAnalyzer.hashCode() : 0);
+ result = 31 * result + (forcedAnalyzer != null ? forcedAnalyzer.hashCode() : 0);
+ result = 31 * result + (forcedQuoteAnalyzer != null ? forcedQuoteAnalyzer.hashCode() : 0);
+ result = 31 * result + (analyzeWildcard ? 1 : 0);
+
+ result = 31 * result + (fields != null ? fields.hashCode() : 0);
+ result = 31 * result + (queryTypes != null ? queryTypes.hashCode() : 0);
+ result = 31 * result + (boosts != null ? boosts.hashCode() : 0);
+ result = 31 * result + (tieBreaker != +0.0f ? Float.floatToIntBits(tieBreaker) : 0);
+ result = 31 * result + (useDisMax ? 1 : 0);
+ return result;
+ }
+}
diff --git a/src/main/java/org/apache/lucene/search/XReferenceManager.java b/src/main/java/org/apache/lucene/search/XReferenceManager.java
new file mode 100644
index 0000000..07fb066
--- /dev/null
+++ b/src/main/java/org/apache/lucene/search/XReferenceManager.java
@@ -0,0 +1,326 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.util.Version;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * Utility class to safely share instances of a certain type across multiple
+ * threads, while periodically refreshing them. This class ensures each
+ * reference is closed only once all threads have finished using it. It is
+ * recommended to consult the documentation of {@link org.apache.lucene.search.XReferenceManager}
+ * implementations for their {@link #maybeRefresh()} semantics.
+ *
+ * @param <G>
+ * the concrete type that will be {@link #acquire() acquired} and
+ * {@link #release(Object) released}.
+ *
+ * @lucene.experimental
+ */
+public abstract class XReferenceManager<G> implements Closeable {
+ static {
+ assert Version.LUCENE_46 == org.elasticsearch.Version.CURRENT.luceneVersion : "Remove this once we are on LUCENE_47 - see LUCENE-5436";
+ }
+
+ private static final String REFERENCE_MANAGER_IS_CLOSED_MSG = "this ReferenceManager is closed";
+
+ protected volatile G current;
+
+ private final Lock refreshLock = new ReentrantLock();
+
+ private final List<RefreshListener> refreshListeners = new CopyOnWriteArrayList<RefreshListener>();
+
+ private void ensureOpen() {
+ if (current == null) {
+ throw new AlreadyClosedException(REFERENCE_MANAGER_IS_CLOSED_MSG);
+ }
+ }
+
+ private synchronized void swapReference(G newReference) throws IOException {
+ ensureOpen();
+ final G oldReference = current;
+ current = newReference;
+ release(oldReference);
+ }
+
+ /**
+ * Decrement reference counting on the given reference.
+ * @throws java.io.IOException if reference decrement on the given resource failed.
+ * */
+ protected abstract void decRef(G reference) throws IOException;
+
+ /**
+ * Refresh the given reference if needed. Returns {@code null} if no refresh
+ * was needed, otherwise a new refreshed reference.
+ * @throws org.apache.lucene.store.AlreadyClosedException if the reference manager has been {@link #close() closed}.
+ * @throws java.io.IOException if the refresh operation failed
+ */
+ protected abstract G refreshIfNeeded(G referenceToRefresh) throws IOException;
+
+ /**
+ * Try to increment reference counting on the given reference. Return true if
+ * the operation was successful.
+ * @throws org.apache.lucene.store.AlreadyClosedException if the reference manager has been {@link #close() closed}.
+ */
+ protected abstract boolean tryIncRef(G reference) throws IOException;
+
+ /**
+ * Obtain the current reference. You must match every call to acquire with one
+ * call to {@link #release}; it's best to do so in a finally clause, and set
+ * the reference to {@code null} to prevent accidental usage after it has been
+ * released.
+ * @throws org.apache.lucene.store.AlreadyClosedException if the reference manager has been {@link #close() closed}.
+ */
+ public final G acquire() throws IOException {
+ G ref;
+
+ do {
+ if ((ref = current) == null) {
+ throw new AlreadyClosedException(REFERENCE_MANAGER_IS_CLOSED_MSG);
+ }
+ if (tryIncRef(ref)) {
+ return ref;
+ }
+ if (getRefCount(ref) == 0 && current == ref) {
+ assert ref != null;
+ /* if we can't increment the reader but we are
+ still the current reference the RM is in a
+ illegal states since we can't make any progress
+ anymore. The reference is closed but the RM still
+ holds on to it as the actual instance.
+ This can only happen if somebody outside of the RM
+ decrements the refcount without a corresponding increment
+ since the RM assigns the new reference before counting down
+ the reference. */
+ throw new IllegalStateException("The managed reference has already closed - this is likely a bug when the reference count is modified outside of the ReferenceManager");
+ }
+ } while (true);
+ }
+
+ /**
+ * <p>
+ * Closes this ReferenceManager to prevent future {@link #acquire() acquiring}. A
+ * reference manager should be closed if the reference to the managed resource
+ * should be disposed or the application using the {@link org.apache.lucene.search.XReferenceManager}
+ * is shutting down. The managed resource might not be released immediately,
+ * if the {@link org.apache.lucene.search.XReferenceManager} user is holding on to a previously
+ * {@link #acquire() acquired} reference. The resource will be released once
+ * when the last reference is {@link #release(Object) released}. Those
+ * references can still be used as if the manager was still active.
+ * </p>
+ * <p>
+ * Applications should not {@link #acquire() acquire} new references from this
+ * manager once this method has been called. {@link #acquire() Acquiring} a
+ * resource on a closed {@link org.apache.lucene.search.XReferenceManager} will throw an
+ * {@link org.apache.lucene.store.AlreadyClosedException}.
+ * </p>
+ *
+ * @throws java.io.IOException
+ * if the underlying reader of the current reference could not be closed
+ */
+ @Override
+ public final synchronized void close() throws IOException {
+ if (current != null) {
+ // make sure we can call this more than once
+ // closeable javadoc says:
+ // if this is already closed then invoking this method has no effect.
+ swapReference(null);
+ afterClose();
+ }
+ }
+
+ /**
+ * Returns the current reference count of the given reference.
+ */
+ protected abstract int getRefCount(G reference);
+
+ /**
+ * Called after close(), so subclass can free any resources.
+ * @throws java.io.IOException if the after close operation in a sub-class throws an {@link java.io.IOException}
+ * */
+ protected void afterClose() throws IOException {
+ }
+
+ private void doMaybeRefresh() throws IOException {
+ // it's ok to call lock() here (blocking) because we're supposed to get here
+ // from either maybeRefreh() or maybeRefreshBlocking(), after the lock has
+ // already been obtained. Doing that protects us from an accidental bug
+ // where this method will be called outside the scope of refreshLock.
+ // Per ReentrantLock's javadoc, calling lock() by the same thread more than
+ // once is ok, as long as unlock() is called a matching number of times.
+ refreshLock.lock();
+ boolean refreshed = false;
+ try {
+ final G reference = acquire();
+ try {
+ notifyRefreshListenersBefore();
+ G newReference = refreshIfNeeded(reference);
+ if (newReference != null) {
+ assert newReference != reference : "refreshIfNeeded should return null if refresh wasn't needed";
+ try {
+ swapReference(newReference);
+ refreshed = true;
+ } finally {
+ if (!refreshed) {
+ release(newReference);
+ }
+ }
+ }
+ } finally {
+ release(reference);
+ notifyRefreshListenersRefreshed(refreshed);
+ }
+ afterMaybeRefresh();
+ } finally {
+ refreshLock.unlock();
+ }
+ }
+
+ /**
+ * You must call this (or {@link #maybeRefreshBlocking()}), periodically, if
+ * you want that {@link #acquire()} will return refreshed instances.
+ *
+ * <p>
+ * <b>Threads</b>: it's fine for more than one thread to call this at once.
+ * Only the first thread will attempt the refresh; subsequent threads will see
+ * that another thread is already handling refresh and will return
+ * immediately. Note that this means if another thread is already refreshing
+ * then subsequent threads will return right away without waiting for the
+ * refresh to complete.
+ *
+ * <p>
+ * If this method returns true it means the calling thread either refreshed or
+ * that there were no changes to refresh. If it returns false it means another
+ * thread is currently refreshing.
+ * </p>
+ * @throws java.io.IOException if refreshing the resource causes an {@link java.io.IOException}
+ * @throws org.apache.lucene.store.AlreadyClosedException if the reference manager has been {@link #close() closed}.
+ */
+ public final boolean maybeRefresh() throws IOException {
+ ensureOpen();
+
+ // Ensure only 1 thread does refresh at once; other threads just return immediately:
+ final boolean doTryRefresh = refreshLock.tryLock();
+ if (doTryRefresh) {
+ try {
+ doMaybeRefresh();
+ } finally {
+ refreshLock.unlock();
+ }
+ }
+
+ return doTryRefresh;
+ }
+
+ /**
+ * You must call this (or {@link #maybeRefresh()}), periodically, if you want
+ * that {@link #acquire()} will return refreshed instances.
+ *
+ * <p>
+ * <b>Threads</b>: unlike {@link #maybeRefresh()}, if another thread is
+ * currently refreshing, this method blocks until that thread completes. It is
+ * useful if you want to guarantee that the next call to {@link #acquire()}
+ * will return a refreshed instance. Otherwise, consider using the
+ * non-blocking {@link #maybeRefresh()}.
+ * @throws java.io.IOException if refreshing the resource causes an {@link java.io.IOException}
+ * @throws org.apache.lucene.store.AlreadyClosedException if the reference manager has been {@link #close() closed}.
+ */
+ public final void maybeRefreshBlocking() throws IOException {
+ ensureOpen();
+
+ // Ensure only 1 thread does refresh at once
+ refreshLock.lock();
+ try {
+ doMaybeRefresh();
+ } finally {
+ refreshLock.unlock();
+ }
+ }
+
+ /** Called after a refresh was attempted, regardless of
+ * whether a new reference was in fact created.
+ * @throws java.io.IOException if a low level I/O exception occurs
+ **/
+ protected void afterMaybeRefresh() throws IOException {
+ }
+
+ /**
+ * Release the reference previously obtained via {@link #acquire()}.
+ * <p>
+ * <b>NOTE:</b> it's safe to call this after {@link #close()}.
+ * @throws java.io.IOException if the release operation on the given resource throws an {@link java.io.IOException}
+ */
+ public final void release(G reference) throws IOException {
+ assert reference != null;
+ decRef(reference);
+ }
+
+ private void notifyRefreshListenersBefore() throws IOException {
+ for (RefreshListener refreshListener : refreshListeners) {
+ refreshListener.beforeRefresh();
+ }
+ }
+
+ private void notifyRefreshListenersRefreshed(boolean didRefresh) throws IOException {
+ for (RefreshListener refreshListener : refreshListeners) {
+ refreshListener.afterRefresh(didRefresh);
+ }
+ }
+
+ /**
+ * Adds a listener, to be notified when a reference is refreshed/swapped.
+ */
+ public void addListener(RefreshListener listener) {
+ if (listener == null) {
+ throw new NullPointerException("Listener cannot be null");
+ }
+ refreshListeners.add(listener);
+ }
+
+ /**
+ * Remove a listener added with {@link #addListener(RefreshListener)}.
+ */
+ public void removeListener(RefreshListener listener) {
+ if (listener == null) {
+ throw new NullPointerException("Listener cannot be null");
+ }
+ refreshListeners.remove(listener);
+ }
+
+ /** Use to receive notification when a refresh has
+ * finished. See {@link #addListener}. */
+ public interface RefreshListener {
+
+ /** Called right before a refresh attempt starts. */
+ void beforeRefresh() throws IOException;
+
+ /** Called after the attempted refresh; if the refresh
+ * did open a new reference then didRefresh will be true
+ * and {@link #acquire()} is guaranteed to return the new
+ * reference. */
+ void afterRefresh(boolean didRefresh) throws IOException;
+ }
+}
diff --git a/src/main/java/org/apache/lucene/search/XSearcherManager.java b/src/main/java/org/apache/lucene/search/XSearcherManager.java
new file mode 100644
index 0000000..36c74a0
--- /dev/null
+++ b/src/main/java/org/apache/lucene/search/XSearcherManager.java
@@ -0,0 +1,177 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Version;
+
+/**
+ * Utility class to safely share {@link IndexSearcher} instances across multiple
+ * threads, while periodically reopening. This class ensures each searcher is
+ * closed only once all threads have finished using it.
+ *
+ * <p>
+ * Use {@link #acquire} to obtain the current searcher, and {@link #release} to
+ * release it, like this:
+ *
+ * <pre class="prettyprint">
+ * IndexSearcher s = manager.acquire();
+ * try {
+ * // Do searching, doc retrieval, etc. with s
+ * } finally {
+ * manager.release(s);
+ * }
+ * // Do not use s after this!
+ * s = null;
+ * </pre>
+ *
+ * <p>
+ * In addition you should periodically call {@link #maybeRefresh}. While it's
+ * possible to call this just before running each query, this is discouraged
+ * since it penalizes the unlucky queries that do the reopen. It's better to use
+ * a separate background thread, that periodically calls maybeReopen. Finally,
+ * be sure to call {@link #close} once you are done.
+ *
+ * @see SearcherFactory
+ *
+ * @lucene.experimental
+ */
+public final class XSearcherManager extends XReferenceManager<IndexSearcher> {
+
+ static {
+ assert Version.LUCENE_46 == org.elasticsearch.Version.CURRENT.luceneVersion : "Remove this once we are on LUCENE_47 - see LUCENE-5436";
+ }
+
+ private final SearcherFactory searcherFactory;
+
+ /**
+ * Creates and returns a new XSearcherManager from the given
+ * {@link IndexWriter}.
+ *
+ * @param writer
+ * the IndexWriter to open the IndexReader from.
+ * @param applyAllDeletes
+ * If <code>true</code>, all buffered deletes will be applied (made
+ * visible) in the {@link IndexSearcher} / {@link DirectoryReader}.
+ * If <code>false</code>, the deletes may or may not be applied, but
+ * remain buffered (in IndexWriter) so that they will be applied in
+ * the future. Applying deletes can be costly, so if your app can
+ * tolerate deleted documents being returned you might gain some
+ * performance by passing <code>false</code>. See
+ * {@link DirectoryReader#openIfChanged(DirectoryReader, IndexWriter, boolean)}.
+ * @param searcherFactory
+ * An optional {@link SearcherFactory}. Pass <code>null</code> if you
+ * don't require the searcher to be warmed before going live or other
+ * custom behavior.
+ *
+ * @throws IOException if there is a low-level I/O error
+ */
+ public XSearcherManager(IndexWriter writer, boolean applyAllDeletes, SearcherFactory searcherFactory) throws IOException {
+ if (searcherFactory == null) {
+ searcherFactory = new SearcherFactory();
+ }
+ this.searcherFactory = searcherFactory;
+ current = getSearcher(searcherFactory, DirectoryReader.open(writer, applyAllDeletes));
+ }
+
+ /**
+ * Creates and returns a new XSearcherManager from the given {@link Directory}.
+ * @param dir the directory to open the DirectoryReader on.
+ * @param searcherFactory An optional {@link SearcherFactory}. Pass
+ * <code>null</code> if you don't require the searcher to be warmed
+ * before going live or other custom behavior.
+ *
+ * @throws IOException if there is a low-level I/O error
+ */
+ public XSearcherManager(Directory dir, SearcherFactory searcherFactory) throws IOException {
+ if (searcherFactory == null) {
+ searcherFactory = new SearcherFactory();
+ }
+ this.searcherFactory = searcherFactory;
+ current = getSearcher(searcherFactory, DirectoryReader.open(dir));
+ }
+
+ @Override
+ protected void decRef(IndexSearcher reference) throws IOException {
+ reference.getIndexReader().decRef();
+ }
+
+ @Override
+ protected IndexSearcher refreshIfNeeded(IndexSearcher referenceToRefresh) throws IOException {
+ final IndexReader r = referenceToRefresh.getIndexReader();
+ assert r instanceof DirectoryReader: "searcher's IndexReader should be a DirectoryReader, but got " + r;
+ final IndexReader newReader = DirectoryReader.openIfChanged((DirectoryReader) r);
+ if (newReader == null) {
+ return null;
+ } else {
+ return getSearcher(searcherFactory, newReader);
+ }
+ }
+
+ @Override
+ protected boolean tryIncRef(IndexSearcher reference) {
+ return reference.getIndexReader().tryIncRef();
+ }
+
+ @Override
+ protected int getRefCount(IndexSearcher reference) {
+ return reference.getIndexReader().getRefCount();
+ }
+
+ /**
+ * Returns <code>true</code> if no changes have occured since this searcher
+ * ie. reader was opened, otherwise <code>false</code>.
+ * @see DirectoryReader#isCurrent()
+ */
+ public boolean isSearcherCurrent() throws IOException {
+ final IndexSearcher searcher = acquire();
+ try {
+ final IndexReader r = searcher.getIndexReader();
+ assert r instanceof DirectoryReader: "searcher's IndexReader should be a DirectoryReader, but got " + r;
+ return ((DirectoryReader) r).isCurrent();
+ } finally {
+ release(searcher);
+ }
+ }
+
+ /** Expert: creates a searcher from the provided {@link
+ * IndexReader} using the provided {@link
+ * SearcherFactory}. NOTE: this decRefs incoming reader
+ * on throwing an exception. */
+ public static IndexSearcher getSearcher(SearcherFactory searcherFactory, IndexReader reader) throws IOException {
+ boolean success = false;
+ final IndexSearcher searcher;
+ try {
+ searcher = searcherFactory.newSearcher(reader);
+ if (searcher.getIndexReader() != reader) {
+ throw new IllegalStateException("SearcherFactory must wrap exactly the provided reader (got " + searcher.getIndexReader() + " but expected " + reader + ")");
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ reader.decRef();
+ }
+ }
+ return searcher;
+ }
+}
diff --git a/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java
new file mode 100644
index 0000000..59ef784
--- /dev/null
+++ b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.search.highlight.Encoder;
+import org.elasticsearch.search.highlight.HighlightUtils;
+
+/**
+Custom passage formatter that allows us to:
+1) extract different snippets (instead of a single big string) together with their scores ({@link Snippet})
+2) use the {@link Encoder} implementations that are already used with the other highlighters
+ */
+public class CustomPassageFormatter extends PassageFormatter {
+
+ private final String preTag;
+ private final String postTag;
+ private final Encoder encoder;
+
+ public CustomPassageFormatter(String preTag, String postTag, Encoder encoder) {
+ this.preTag = preTag;
+ this.postTag = postTag;
+ this.encoder = encoder;
+ }
+
+ @Override
+ public Snippet[] format(Passage[] passages, String content) {
+ Snippet[] snippets = new Snippet[passages.length];
+ int pos;
+ for (int j = 0; j < passages.length; j++) {
+ Passage passage = passages[j];
+ StringBuilder sb = new StringBuilder();
+ pos = passage.startOffset;
+ for (int i = 0; i < passage.numMatches; i++) {
+ int start = passage.matchStarts[i];
+ int end = passage.matchEnds[i];
+ // its possible to have overlapping terms
+ if (start > pos) {
+ append(sb, content, pos, start);
+ }
+ if (end > pos) {
+ sb.append(preTag);
+ append(sb, content, Math.max(pos, start), end);
+ sb.append(postTag);
+ pos = end;
+ }
+ }
+ // its possible a "term" from the analyzer could span a sentence boundary.
+ append(sb, content, pos, Math.max(pos, passage.endOffset));
+ //we remove the paragraph separator if present at the end of the snippet (we used it as separator between values)
+ if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) {
+ sb.deleteCharAt(sb.length() - 1);
+ }
+ //and we trim the snippets too
+ snippets[j] = new Snippet(sb.toString().trim(), passage.score, passage.numMatches > 0);
+ }
+ return snippets;
+ }
+
+ protected void append(StringBuilder dest, String content, int start, int end) {
+ dest.append(encoder.encodeText(content.substring(start, end)));
+ }
+}
diff --git a/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java
new file mode 100644
index 0000000..be5ad66
--- /dev/null
+++ b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.search.highlight.HighlightUtils;
+
+import java.io.IOException;
+import java.text.BreakIterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Subclass of the {@link XPostingsHighlighter} that works for a single field in a single document.
+ * It receives the field values as input and it performs discrete highlighting on each single value
+ * calling the highlightDoc method multiple times.
+ * It allows to pass in the query terms to avoid calling extract terms multiple times.
+ *
+ * The use that we make of the postings highlighter is not optimal. It would be much better to
+ * highlight multiple docs in a single call, as we actually lose its sequential IO. But that would require:
+ * 1) to make our fork more complex and harder to maintain to perform discrete highlighting (needed to return
+ * a different snippet per value when number_of_fragments=0 and the field has multiple values)
+ * 2) refactoring of the elasticsearch highlight api which currently works per hit
+ *
+ */
+public final class CustomPostingsHighlighter extends XPostingsHighlighter {
+
+ private static final Snippet[] EMPTY_SNIPPET = new Snippet[0];
+ private static final Passage[] EMPTY_PASSAGE = new Passage[0];
+
+ private final CustomPassageFormatter passageFormatter;
+ private final int noMatchSize;
+ private final int totalContentLength;
+ private final String[] fieldValues;
+ private final int[] fieldValuesOffsets;
+ private int currentValueIndex = 0;
+
+ private BreakIterator breakIterator;
+
+ public CustomPostingsHighlighter(CustomPassageFormatter passageFormatter, List<Object> fieldValues, boolean mergeValues, int maxLength, int noMatchSize) {
+ super(maxLength);
+ this.passageFormatter = passageFormatter;
+ this.noMatchSize = noMatchSize;
+
+ if (mergeValues) {
+ String rawValue = Strings.collectionToDelimitedString(fieldValues, String.valueOf(getMultiValuedSeparator("")));
+ String fieldValue = rawValue.substring(0, Math.min(rawValue.length(), maxLength));
+ this.fieldValues = new String[]{fieldValue};
+ this.fieldValuesOffsets = new int[]{0};
+ this.totalContentLength = fieldValue.length();
+ } else {
+ this.fieldValues = new String[fieldValues.size()];
+ this.fieldValuesOffsets = new int[fieldValues.size()];
+ int contentLength = 0;
+ int offset = 0;
+ int previousLength = -1;
+ for (int i = 0; i < fieldValues.size(); i++) {
+ String rawValue = fieldValues.get(i).toString();
+ String fieldValue = rawValue.substring(0, Math.min(rawValue.length(), maxLength));
+ this.fieldValues[i] = fieldValue;
+ contentLength += fieldValue.length();
+ offset += previousLength + 1;
+ this.fieldValuesOffsets[i] = offset;
+ previousLength = fieldValue.length();
+ }
+ this.totalContentLength = contentLength;
+ }
+ }
+
+ /*
+ Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object
+ */
+ public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexSearcher searcher, int docId, int maxPassages) throws IOException {
+ IndexReader reader = searcher.getIndexReader();
+ IndexReaderContext readerContext = reader.getContext();
+ List<AtomicReaderContext> leaves = readerContext.leaves();
+
+ String[] contents = new String[]{loadCurrentFieldValue()};
+ Map<Integer, Object> snippetsMap = highlightField(field, contents, getBreakIterator(field), terms, new int[]{docId}, leaves, maxPassages);
+
+ //increment the current value index so that next time we'll highlight the next value if available
+ currentValueIndex++;
+
+ Object snippetObject = snippetsMap.get(docId);
+ if (snippetObject != null && snippetObject instanceof Snippet[]) {
+ return (Snippet[]) snippetObject;
+ }
+ return EMPTY_SNIPPET;
+ }
+
+ /*
+ Method provided through our own fork: allows to do proper scoring when doing per value discrete highlighting.
+ Used to provide the total length of the field (all values) for proper scoring.
+ */
+ @Override
+ protected int getContentLength(String field, int docId) {
+ return totalContentLength;
+ }
+
+ /*
+ Method provided through our own fork: allows to perform proper per value discrete highlighting.
+ Used to provide the offset for the current value.
+ */
+ @Override
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ if (currentValueIndex < fieldValuesOffsets.length) {
+ return fieldValuesOffsets[currentValueIndex];
+ }
+ throw new IllegalArgumentException("No more values offsets to return");
+ }
+
+ public void setBreakIterator(BreakIterator breakIterator) {
+ this.breakIterator = breakIterator;
+ }
+
+ @Override
+ protected PassageFormatter getFormatter(String field) {
+ return passageFormatter;
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ if (breakIterator == null) {
+ return super.getBreakIterator(field);
+ }
+ return breakIterator;
+ }
+
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
+ return HighlightUtils.PARAGRAPH_SEPARATOR;
+ }
+
+ /*
+ By default the postings highlighter returns non highlighted snippet when there are no matches.
+ We want to return no snippets by default, unless no_match_size is greater than 0
+ */
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ if (noMatchSize > 0) {
+ //we want to return the first sentence of the first snippet only
+ return super.getEmptyHighlight(fieldName, bi, 1);
+ }
+ return EMPTY_PASSAGE;
+ }
+
+ /*
+ Not needed since we call our own loadCurrentFieldValue explicitly, but we override it anyway for consistency.
+ */
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ return new String[][]{new String[]{loadCurrentFieldValue()}};
+ }
+
+ /*
+ Our own method that returns the field values, which relies on the content that was provided when creating the highlighter.
+ Supports per value discrete highlighting calling the highlightDoc method multiple times, one per value.
+ */
+ protected String loadCurrentFieldValue() {
+ if (currentValueIndex < fieldValues.length) {
+ return fieldValues[currentValueIndex];
+ }
+ throw new IllegalArgumentException("No more values to return");
+ }
+}
diff --git a/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java b/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java
new file mode 100644
index 0000000..bf68020
--- /dev/null
+++ b/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+/**
+ * Represents a scored highlighted snippet.
+ * It's our own arbitrary object that we get back from the postings highlighter when highlighting a document.
+ * Every snippet contains its formatted text and its score.
+ * The score is needed since we highlight every single value separately and we might want to return snippets sorted by score.
+ */
+public class Snippet {
+
+ private final String text;
+ private final float score;
+ private final boolean isHighlighted;
+
+ public Snippet(String text, float score, boolean isHighlighted) {
+ this.text = text;
+ this.score = score;
+ this.isHighlighted = isHighlighted;
+ }
+
+ public String getText() {
+ return text;
+ }
+
+ public float getScore() {
+ return score;
+ }
+
+ public boolean isHighlighted() {
+ return isHighlighted;
+ }
+}
diff --git a/src/main/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighter.java b/src/main/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighter.java
new file mode 100644
index 0000000..869b559
--- /dev/null
+++ b/src/main/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighter.java
@@ -0,0 +1,777 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.InPlaceMergeSorter;
+import org.apache.lucene.util.UnicodeUtil;
+
+import java.io.IOException;
+import java.text.BreakIterator;
+import java.util.*;
+
+/*
+FORKED from Lucene 4.5 to be able to:
+1) support discrete highlighting for multiple values, so that we can return a different snippet per value when highlighting the whole text
+2) call the highlightField method directly from subclasses and provide the terms by ourselves
+3) Applied LUCENE-4906 to allow PassageFormatter to return arbitrary objects (LUCENE 4.6)
+
+All our changes start with //BEGIN EDIT
+ */
+public class XPostingsHighlighter {
+
+ //BEGIN EDIT added method to override offset for current value (default 0)
+ //we need this to perform discrete highlighting per field
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ return 0;
+ }
+ //END EDIT
+
+ //BEGIN EDIT
+ //we need this to fix scoring when highlighting every single value separately, since the score depends on the total length of the field (all values rather than only the current one)
+ protected int getContentLength(String field, int docId) {
+ return -1;
+ }
+ //END EDIT
+
+
+ // TODO: maybe allow re-analysis for tiny fields? currently we require offsets,
+ // but if the analyzer is really fast and the field is tiny, this might really be
+ // unnecessary.
+
+ /** for rewriting: we don't want slow processing from MTQs */
+ private static final IndexReader EMPTY_INDEXREADER = new MultiReader();
+
+ /** Default maximum content size to process. Typically snippets
+ * closer to the beginning of the document better summarize its content */
+ public static final int DEFAULT_MAX_LENGTH = 10000;
+
+ private final int maxLength;
+
+ /** Set the first time {@link #getFormatter} is called,
+ * and then reused. */
+ private PassageFormatter defaultFormatter;
+
+ /** Set the first time {@link #getScorer} is called,
+ * and then reused. */
+ private PassageScorer defaultScorer;
+
+ /**
+ * Creates a new highlighter with default parameters.
+ */
+ public XPostingsHighlighter() {
+ this(DEFAULT_MAX_LENGTH);
+ }
+
+ /**
+ * Creates a new highlighter, specifying maximum content length.
+ * @param maxLength maximum content size to process.
+ * @throws IllegalArgumentException if <code>maxLength</code> is negative or <code>Integer.MAX_VALUE</code>
+ */
+ public XPostingsHighlighter(int maxLength) {
+ if (maxLength < 0 || maxLength == Integer.MAX_VALUE) {
+ // two reasons: no overflow problems in BreakIterator.preceding(offset+1),
+ // our sentinel in the offsets queue uses this value to terminate.
+ throw new IllegalArgumentException("maxLength must be < Integer.MAX_VALUE");
+ }
+ this.maxLength = maxLength;
+ }
+
+ /** Returns the {@link java.text.BreakIterator} to use for
+ * dividing text into passages. This returns
+ * {@link java.text.BreakIterator#getSentenceInstance(java.util.Locale)} by default;
+ * subclasses can override to customize. */
+ protected BreakIterator getBreakIterator(String field) {
+ return BreakIterator.getSentenceInstance(Locale.ROOT);
+ }
+
+ /** Returns the {@link PassageFormatter} to use for
+ * formatting passages into highlighted snippets. This
+ * returns a new {@code PassageFormatter} by default;
+ * subclasses can override to customize. */
+ protected PassageFormatter getFormatter(String field) {
+ if (defaultFormatter == null) {
+ defaultFormatter = new DefaultPassageFormatter();
+ }
+ return defaultFormatter;
+ }
+
+ /** Returns the {@link PassageScorer} to use for
+ * ranking passages. This
+ * returns a new {@code PassageScorer} by default;
+ * subclasses can override to customize. */
+ protected PassageScorer getScorer(String field) {
+ if (defaultScorer == null) {
+ defaultScorer = new PassageScorer();
+ }
+ return defaultScorer;
+ }
+
+ /**
+ * Highlights the top passages from a single field.
+ *
+ * @param field field name to highlight.
+ * Must have a stored string value and also be indexed with offsets.
+ * @param query query to highlight.
+ * @param searcher searcher that was previously used to execute the query.
+ * @param topDocs TopDocs containing the summary result documents to highlight.
+ * @return Array of formatted snippets corresponding to the documents in <code>topDocs</code>.
+ * If no highlights were found for a document, the
+ * first sentence for the field will be returned.
+ * @throws java.io.IOException if an I/O error occurred during processing
+ * @throws IllegalArgumentException if <code>field</code> was indexed without
+ * {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
+ */
+ public String[] highlight(String field, Query query, IndexSearcher searcher, TopDocs topDocs) throws IOException {
+ return highlight(field, query, searcher, topDocs, 1);
+ }
+
+ /**
+ * Highlights the top-N passages from a single field.
+ *
+ * @param field field name to highlight.
+ * Must have a stored string value and also be indexed with offsets.
+ * @param query query to highlight.
+ * @param searcher searcher that was previously used to execute the query.
+ * @param topDocs TopDocs containing the summary result documents to highlight.
+ * @param maxPassages The maximum number of top-N ranked passages used to
+ * form the highlighted snippets.
+ * @return Array of formatted snippets corresponding to the documents in <code>topDocs</code>.
+ * If no highlights were found for a document, the
+ * first {@code maxPassages} sentences from the
+ * field will be returned.
+ * @throws IOException if an I/O error occurred during processing
+ * @throws IllegalArgumentException if <code>field</code> was indexed without
+ * {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
+ */
+ public String[] highlight(String field, Query query, IndexSearcher searcher, TopDocs topDocs, int maxPassages) throws IOException {
+ Map<String,String[]> res = highlightFields(new String[] { field }, query, searcher, topDocs, new int[] { maxPassages });
+ return res.get(field);
+ }
+
+ /**
+ * Highlights the top passages from multiple fields.
+ * <p>
+ * Conceptually, this behaves as a more efficient form of:
+ * <pre class="prettyprint">
+ * Map m = new HashMap();
+ * for (String field : fields) {
+ * m.put(field, highlight(field, query, searcher, topDocs));
+ * }
+ * return m;
+ * </pre>
+ *
+ * @param fields field names to highlight.
+ * Must have a stored string value and also be indexed with offsets.
+ * @param query query to highlight.
+ * @param searcher searcher that was previously used to execute the query.
+ * @param topDocs TopDocs containing the summary result documents to highlight.
+ * @return Map keyed on field name, containing the array of formatted snippets
+ * corresponding to the documents in <code>topDocs</code>.
+ * If no highlights were found for a document, the
+ * first sentence from the field will be returned.
+ * @throws IOException if an I/O error occurred during processing
+ * @throws IllegalArgumentException if <code>field</code> was indexed without
+ * {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
+ */
+ public Map<String,String[]> highlightFields(String fields[], Query query, IndexSearcher searcher, TopDocs topDocs) throws IOException {
+ int maxPassages[] = new int[fields.length];
+ Arrays.fill(maxPassages, 1);
+ return highlightFields(fields, query, searcher, topDocs, maxPassages);
+ }
+
+ /**
+ * Highlights the top-N passages from multiple fields.
+ * <p>
+ * Conceptually, this behaves as a more efficient form of:
+ * <pre class="prettyprint">
+ * Map m = new HashMap();
+ * for (String field : fields) {
+ * m.put(field, highlight(field, query, searcher, topDocs, maxPassages));
+ * }
+ * return m;
+ * </pre>
+ *
+ * @param fields field names to highlight.
+ * Must have a stored string value and also be indexed with offsets.
+ * @param query query to highlight.
+ * @param searcher searcher that was previously used to execute the query.
+ * @param topDocs TopDocs containing the summary result documents to highlight.
+ * @param maxPassages The maximum number of top-N ranked passages per-field used to
+ * form the highlighted snippets.
+ * @return Map keyed on field name, containing the array of formatted snippets
+ * corresponding to the documents in <code>topDocs</code>.
+ * If no highlights were found for a document, the
+ * first {@code maxPassages} sentences from the
+ * field will be returned.
+ * @throws IOException if an I/O error occurred during processing
+ * @throws IllegalArgumentException if <code>field</code> was indexed without
+ * {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
+ */
+ public Map<String,String[]> highlightFields(String fields[], Query query, IndexSearcher searcher, TopDocs topDocs, int maxPassages[]) throws IOException {
+ final ScoreDoc scoreDocs[] = topDocs.scoreDocs;
+ int docids[] = new int[scoreDocs.length];
+ for (int i = 0; i < docids.length; i++) {
+ docids[i] = scoreDocs[i].doc;
+ }
+
+ return highlightFields(fields, query, searcher, docids, maxPassages);
+ }
+
+ /**
+ * Highlights the top-N passages from multiple fields,
+ * for the provided int[] docids.
+ *
+ * @param fieldsIn field names to highlight.
+ * Must have a stored string value and also be indexed with offsets.
+ * @param query query to highlight.
+ * @param searcher searcher that was previously used to execute the query.
+ * @param docidsIn containing the document IDs to highlight.
+ * @param maxPassagesIn The maximum number of top-N ranked passages per-field used to
+ * form the highlighted snippets.
+ * @return Map keyed on field name, containing the array of formatted snippets
+ * corresponding to the documents in <code>topDocs</code>.
+ * If no highlights were found for a document, the
+ * first {@code maxPassages} from the field will
+ * be returned.
+ * @throws IOException if an I/O error occurred during processing
+ * @throws IllegalArgumentException if <code>field</code> was indexed without
+ * {@link org.apache.lucene.index.FieldInfo.IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
+ */
+ public Map<String,String[]> highlightFields(String fieldsIn[], Query query, IndexSearcher searcher, int[] docidsIn, int maxPassagesIn[]) throws IOException {
+ Map<String,String[]> snippets = new HashMap<String,String[]>();
+ for(Map.Entry<String,Object[]> ent : highlightFieldsAsObjects(fieldsIn, query, searcher, docidsIn, maxPassagesIn).entrySet()) {
+ Object[] snippetObjects = ent.getValue();
+ String[] snippetStrings = new String[snippetObjects.length];
+ snippets.put(ent.getKey(), snippetStrings);
+ for(int i=0;i<snippetObjects.length;i++) {
+ Object snippet = snippetObjects[i];
+ if (snippet != null) {
+ snippetStrings[i] = snippet.toString();
+ }
+ }
+ }
+
+ return snippets;
+ }
+
+ public Map<String,Object[]> highlightFieldsAsObjects(String fieldsIn[], Query query, IndexSearcher searcher, int[] docidsIn, int maxPassagesIn[]) throws IOException {
+ if (fieldsIn.length < 1) {
+ throw new IllegalArgumentException("fieldsIn must not be empty");
+ }
+ if (fieldsIn.length != maxPassagesIn.length) {
+ throw new IllegalArgumentException("invalid number of maxPassagesIn");
+ }
+ final IndexReader reader = searcher.getIndexReader();
+ query = rewrite(query);
+ SortedSet<Term> queryTerms = new TreeSet<Term>();
+ query.extractTerms(queryTerms);
+
+ IndexReaderContext readerContext = reader.getContext();
+ List<AtomicReaderContext> leaves = readerContext.leaves();
+
+ // Make our own copies because we sort in-place:
+ int[] docids = new int[docidsIn.length];
+ System.arraycopy(docidsIn, 0, docids, 0, docidsIn.length);
+ final String fields[] = new String[fieldsIn.length];
+ System.arraycopy(fieldsIn, 0, fields, 0, fieldsIn.length);
+ final int maxPassages[] = new int[maxPassagesIn.length];
+ System.arraycopy(maxPassagesIn, 0, maxPassages, 0, maxPassagesIn.length);
+
+ // sort for sequential io
+ Arrays.sort(docids);
+ new InPlaceMergeSorter() {
+
+ @Override
+ protected void swap(int i, int j) {
+ String tmp = fields[i];
+ fields[i] = fields[j];
+ fields[j] = tmp;
+ int tmp2 = maxPassages[i];
+ maxPassages[i] = maxPassages[j];
+ maxPassages[j] = tmp2;
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ return fields[i].compareTo(fields[j]);
+ }
+
+ }.sort(0, fields.length);
+
+ // pull stored data:
+ String[][] contents = loadFieldValues(searcher, fields, docids, maxLength);
+
+ Map<String,Object[]> highlights = new HashMap<String,Object[]>();
+ for (int i = 0; i < fields.length; i++) {
+ String field = fields[i];
+ int numPassages = maxPassages[i];
+
+ Term floor = new Term(field, "");
+ Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
+ SortedSet<Term> fieldTerms = queryTerms.subSet(floor, ceiling);
+ // TODO: should we have some reasonable defaults for term pruning? (e.g. stopwords)
+
+ // Strip off the redundant field:
+ BytesRef terms[] = new BytesRef[fieldTerms.size()];
+ int termUpto = 0;
+ for(Term term : fieldTerms) {
+ terms[termUpto++] = term.bytes();
+ }
+ Map<Integer,Object> fieldHighlights = highlightField(field, contents[i], getBreakIterator(field), terms, docids, leaves, numPassages);
+
+ Object[] result = new Object[docids.length];
+ for (int j = 0; j < docidsIn.length; j++) {
+ result[j] = fieldHighlights.get(docidsIn[j]);
+ }
+ highlights.put(field, result);
+ }
+ return highlights;
+ }
+
+ /** Loads the String values for each field X docID to be
+ * highlighted. By default this loads from stored
+ * fields, but a subclass can change the source. This
+ * method should allocate the String[fields.length][docids.length]
+ * and fill all values. The returned Strings must be
+ * identical to what was indexed. */
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ String contents[][] = new String[fields.length][docids.length];
+ char valueSeparators[] = new char[fields.length];
+ for (int i = 0; i < fields.length; i++) {
+ valueSeparators[i] = getMultiValuedSeparator(fields[i]);
+ }
+ LimitedStoredFieldVisitor visitor = new LimitedStoredFieldVisitor(fields, valueSeparators, maxLength);
+ for (int i = 0; i < docids.length; i++) {
+ searcher.doc(docids[i], visitor);
+ for (int j = 0; j < fields.length; j++) {
+ contents[j][i] = visitor.getValue(j);
+ }
+ visitor.reset();
+ }
+ return contents;
+ }
+
+ /**
+ * Returns the logical separator between values for multi-valued fields.
+ * The default value is a space character, which means passages can span across values,
+ * but a subclass can override, for example with {@code U+2029 PARAGRAPH SEPARATOR (PS)}
+ * if each value holds a discrete passage for highlighting.
+ */
+ protected char getMultiValuedSeparator(String field) {
+ return ' ';
+ }
+
+ //BEGIN EDIT: made protected so that we can call from our subclass and pass in the terms by ourselves
+ protected Map<Integer,Object> highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List<AtomicReaderContext> leaves, int maxPassages) throws IOException {
+ //private Map<Integer,Object> highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List<AtomicReaderContext > leaves, int maxPassages) throws IOException {
+ //END EDIT
+
+ Map<Integer,Object> highlights = new HashMap<Integer,Object>();
+
+ // reuse in the real sense... for docs in same segment we just advance our old enum
+ DocsAndPositionsEnum postings[] = null;
+ TermsEnum termsEnum = null;
+ int lastLeaf = -1;
+
+ PassageFormatter fieldFormatter = getFormatter(field);
+ if (fieldFormatter == null) {
+ throw new NullPointerException("PassageFormatter cannot be null");
+ }
+
+ for (int i = 0; i < docids.length; i++) {
+ String content = contents[i];
+ if (content.length() == 0) {
+ continue; // nothing to do
+ }
+ bi.setText(content);
+ int doc = docids[i];
+ int leaf = ReaderUtil.subIndex(doc, leaves);
+ AtomicReaderContext subContext = leaves.get(leaf);
+ AtomicReader r = subContext.reader();
+ Terms t = r.terms(field);
+ if (t == null) {
+ continue; // nothing to do
+ }
+ if (leaf != lastLeaf) {
+ termsEnum = t.iterator(null);
+ postings = new DocsAndPositionsEnum[terms.length];
+ }
+ Passage passages[] = highlightDoc(field, terms, content.length(), bi, doc - subContext.docBase, termsEnum, postings, maxPassages);
+ if (passages.length == 0) {
+ passages = getEmptyHighlight(field, bi, maxPassages);
+ }
+ if (passages.length > 0) {
+ // otherwise a null snippet (eg if field is missing
+ // entirely from the doc)
+ highlights.put(doc, fieldFormatter.format(passages, content));
+ }
+ lastLeaf = leaf;
+ }
+
+ return highlights;
+ }
+
+ // algorithm: treat sentence snippets as miniature documents
+ // we can intersect these with the postings lists via BreakIterator.preceding(offset),s
+ // score each sentence as norm(sentenceStartOffset) * sum(weight * tf(freq))
+ private Passage[] highlightDoc(String field, BytesRef terms[], int contentLength, BreakIterator bi, int doc,
+ TermsEnum termsEnum, DocsAndPositionsEnum[] postings, int n) throws IOException {
+
+ //BEGIN EDIT added call to method that returns the offset for the current value (discrete highlighting)
+ int valueOffset = getOffsetForCurrentValue(field, doc);
+ //END EDIT
+
+ PassageScorer scorer = getScorer(field);
+ if (scorer == null) {
+ throw new NullPointerException("PassageScorer cannot be null");
+ }
+
+
+ //BEGIN EDIT discrete highlighting
+ // the scoring needs to be based on the length of the whole field (all values rather than only the current one)
+ int totalContentLength = getContentLength(field, doc);
+ if (totalContentLength == -1) {
+ totalContentLength = contentLength;
+ }
+ //END EDIT
+
+
+ PriorityQueue<OffsetsEnum> pq = new PriorityQueue<OffsetsEnum>();
+ float weights[] = new float[terms.length];
+ // initialize postings
+ for (int i = 0; i < terms.length; i++) {
+ DocsAndPositionsEnum de = postings[i];
+ int pDoc;
+ if (de == EMPTY) {
+ continue;
+ } else if (de == null) {
+ postings[i] = EMPTY; // initially
+ if (!termsEnum.seekExact(terms[i])) {
+ continue; // term not found
+ }
+ de = postings[i] = termsEnum.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_OFFSETS);
+ if (de == null) {
+ // no positions available
+ throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
+ }
+ pDoc = de.advance(doc);
+ } else {
+ pDoc = de.docID();
+ if (pDoc < doc) {
+ pDoc = de.advance(doc);
+ }
+ }
+
+ if (doc == pDoc) {
+ //BEGIN EDIT we take into account the length of the whole field (all values) to properly score the snippets
+ weights[i] = scorer.weight(totalContentLength, de.freq());
+ //weights[i] = scorer.weight(contentLength, de.freq());
+ //END EDIT
+ de.nextPosition();
+ pq.add(new OffsetsEnum(de, i));
+ }
+ }
+
+ pq.add(new OffsetsEnum(EMPTY, Integer.MAX_VALUE)); // a sentinel for termination
+
+ PriorityQueue<Passage> passageQueue = new PriorityQueue<Passage>(n, new Comparator<Passage>() {
+ @Override
+ public int compare(Passage left, Passage right) {
+ if (left.score < right.score) {
+ return -1;
+ } else if (left.score > right.score) {
+ return 1;
+ } else {
+ return left.startOffset - right.startOffset;
+ }
+ }
+ });
+ Passage current = new Passage();
+
+ OffsetsEnum off;
+ while ((off = pq.poll()) != null) {
+ final DocsAndPositionsEnum dp = off.dp;
+
+ int start = dp.startOffset();
+ if (start == -1) {
+ throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
+ }
+ int end = dp.endOffset();
+ // LUCENE-5166: this hit would span the content limit... however more valid
+ // hits may exist (they are sorted by start). so we pretend like we never
+ // saw this term, it won't cause a passage to be added to passageQueue or anything.
+ assert EMPTY.startOffset() == Integer.MAX_VALUE;
+ if (start < contentLength && end > contentLength) {
+ continue;
+ }
+
+
+ //BEGIN EDIT support for discrete highlighting (added block code)
+ //switch to the first match in the current value if there is one
+ boolean seenEnough = false;
+ while (start < valueOffset) {
+ if (off.pos == dp.freq()) {
+ seenEnough = true;
+ break;
+ } else {
+ off.pos++;
+ dp.nextPosition();
+ start = dp.startOffset();
+ end = dp.endOffset();
+ }
+ }
+
+ //continue with next term if we've already seen the current one all the times it appears
+ //that means that the current value doesn't hold matches for the current term
+ if (seenEnough) {
+ continue;
+ }
+
+ //we now subtract the offset of the current value to both start and end
+ start -= valueOffset;
+ end -= valueOffset;
+ //END EDIT
+
+
+ if (start >= current.endOffset) {
+ if (current.startOffset >= 0) {
+ // finalize current
+ //BEGIN EDIT we take into account the value offset when scoring the snippet based on its position
+ current.score *= scorer.norm(current.startOffset + valueOffset);
+ //current.score *= scorer.norm(current.startOffset);
+ //END EDIT
+ // new sentence: first add 'current' to queue
+ if (passageQueue.size() == n && current.score < passageQueue.peek().score) {
+ current.reset(); // can't compete, just reset it
+ } else {
+ passageQueue.offer(current);
+ if (passageQueue.size() > n) {
+ current = passageQueue.poll();
+ current.reset();
+ } else {
+ current = new Passage();
+ }
+ }
+ }
+ // if we exceed limit, we are done
+ if (start >= contentLength) {
+ Passage passages[] = new Passage[passageQueue.size()];
+ passageQueue.toArray(passages);
+ for (Passage p : passages) {
+ p.sort();
+ }
+ // sort in ascending order
+ Arrays.sort(passages, new Comparator<Passage>() {
+ @Override
+ public int compare(Passage left, Passage right) {
+ return left.startOffset - right.startOffset;
+ }
+ });
+ return passages;
+ }
+ // advance breakiterator
+ assert BreakIterator.DONE < 0;
+ current.startOffset = Math.max(bi.preceding(start+1), 0);
+ current.endOffset = Math.min(bi.next(), contentLength);
+ }
+ int tf = 0;
+ while (true) {
+ tf++;
+ current.addMatch(start, end, terms[off.id]);
+ if (off.pos == dp.freq()) {
+ break; // removed from pq
+ } else {
+ off.pos++;
+ dp.nextPosition();
+ //BEGIN EDIT support for discrete highlighting
+ start = dp.startOffset() - valueOffset;
+ end = dp.endOffset() - valueOffset;
+ //start = dp.startOffset();
+ //end = dp.endOffset();
+ //END EDIT
+ }
+ if (start >= current.endOffset || end > contentLength) {
+ pq.offer(off);
+ break;
+ }
+ }
+ current.score += weights[off.id] * scorer.tf(tf, current.endOffset - current.startOffset);
+ }
+
+ // Dead code but compiler disagrees:
+ assert false;
+ return null;
+ }
+
+ /** Called to summarize a document when no hits were
+ * found. By default this just returns the first
+ * {@code maxPassages} sentences; subclasses can override
+ * to customize. */
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ // BreakIterator should be un-next'd:
+ List<Passage> passages = new ArrayList<Passage>();
+ int pos = bi.current();
+ assert pos == 0;
+ while (passages.size() < maxPassages) {
+ int next = bi.next();
+ if (next == BreakIterator.DONE) {
+ break;
+ }
+ Passage passage = new Passage();
+ passage.score = Float.NaN;
+ passage.startOffset = pos;
+ passage.endOffset = next;
+ passages.add(passage);
+ pos = next;
+ }
+
+ return passages.toArray(new Passage[passages.size()]);
+ }
+
+ private static class OffsetsEnum implements Comparable<OffsetsEnum> {
+ DocsAndPositionsEnum dp;
+ int pos;
+ int id;
+
+ OffsetsEnum(DocsAndPositionsEnum dp, int id) throws IOException {
+ this.dp = dp;
+ this.id = id;
+ this.pos = 1;
+ }
+
+ @Override
+ public int compareTo(OffsetsEnum other) {
+ try {
+ int off = dp.startOffset();
+ int otherOff = other.dp.startOffset();
+ if (off == otherOff) {
+ return id - other.id;
+ } else {
+ return Long.signum(((long)off) - otherOff);
+ }
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ private static final DocsAndPositionsEnum EMPTY = new DocsAndPositionsEnum() {
+
+ @Override
+ public int nextPosition() throws IOException { return 0; }
+
+ @Override
+ public int startOffset() throws IOException { return Integer.MAX_VALUE; }
+
+ @Override
+ public int endOffset() throws IOException { return Integer.MAX_VALUE; }
+
+ @Override
+ public BytesRef getPayload() throws IOException { return null; }
+
+ @Override
+ public int freq() throws IOException { return 0; }
+
+ @Override
+ public int docID() { return NO_MORE_DOCS; }
+
+ @Override
+ public int nextDoc() throws IOException { return NO_MORE_DOCS; }
+
+ @Override
+ public int advance(int target) throws IOException { return NO_MORE_DOCS; }
+
+ @Override
+ public long cost() { return 0; }
+ };
+
+ /**
+ * we rewrite against an empty indexreader: as we don't want things like
+ * rangeQueries that don't summarize the document
+ */
+ private static Query rewrite(Query original) throws IOException {
+ Query query = original;
+ for (Query rewrittenQuery = query.rewrite(EMPTY_INDEXREADER); rewrittenQuery != query;
+ rewrittenQuery = query.rewrite(EMPTY_INDEXREADER)) {
+ query = rewrittenQuery;
+ }
+ return query;
+ }
+
+ private static class LimitedStoredFieldVisitor extends StoredFieldVisitor {
+ private final String fields[];
+ private final char valueSeparators[];
+ private final int maxLength;
+ private final StringBuilder builders[];
+ private int currentField = -1;
+
+ public LimitedStoredFieldVisitor(String fields[], char valueSeparators[], int maxLength) {
+ assert fields.length == valueSeparators.length;
+ this.fields = fields;
+ this.valueSeparators = valueSeparators;
+ this.maxLength = maxLength;
+ builders = new StringBuilder[fields.length];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = new StringBuilder();
+ }
+ }
+
+ @Override
+ public void stringField(FieldInfo fieldInfo, String value) throws IOException {
+ assert currentField >= 0;
+ StringBuilder builder = builders[currentField];
+ if (builder.length() > 0 && builder.length() < maxLength) {
+ builder.append(valueSeparators[currentField]);
+ }
+ if (builder.length() + value.length() > maxLength) {
+ builder.append(value, 0, maxLength - builder.length());
+ } else {
+ builder.append(value);
+ }
+ }
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ currentField = Arrays.binarySearch(fields, fieldInfo.name);
+ if (currentField < 0) {
+ return Status.NO;
+ } else if (builders[currentField].length() > maxLength) {
+ return fields.length == 1 ? Status.STOP : Status.NO;
+ }
+ return Status.YES;
+ }
+
+ String getValue(int i) {
+ return builders[i].toString();
+ }
+
+ void reset() {
+ currentField = -1;
+ for (int i = 0; i < fields.length; i++) {
+ builders[i].setLength(0);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java b/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java
new file mode 100644
index 0000000..6e8b0a1
--- /dev/null
+++ b/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java
@@ -0,0 +1,1052 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.lucene.search.suggest.analyzing;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.TokenStreamToAutomaton;
+import org.apache.lucene.search.suggest.InputIterator;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.search.suggest.Sort;
+import org.apache.lucene.store.*;
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.*;
+import org.apache.lucene.util.automaton.*;
+import org.apache.lucene.util.fst.*;
+import org.apache.lucene.util.fst.FST.BytesReader;
+import org.apache.lucene.util.fst.PairOutputs.Pair;
+import org.apache.lucene.util.fst.Util.MinResult;
+import org.elasticsearch.common.collect.HppcMaps;
+
+import java.io.*;
+import java.util.*;
+
+/**
+ * Suggester that first analyzes the surface form, adds the
+ * analyzed form to a weighted FST, and then does the same
+ * thing at lookup time. This means lookup is based on the
+ * analyzed form while suggestions are still the surface
+ * form(s).
+ *
+ * <p>
+ * This can result in powerful suggester functionality. For
+ * example, if you use an analyzer removing stop words,
+ * then the partial text "ghost chr..." could see the
+ * suggestion "The Ghost of Christmas Past". Note that
+ * position increments MUST NOT be preserved for this example
+ * to work, so you should call the constructor with
+ * <code>preservePositionIncrements</code> parameter set to
+ * false
+ *
+ * <p>
+ * If SynonymFilter is used to map wifi and wireless network to
+ * hotspot then the partial text "wirele..." could suggest
+ * "wifi router". Token normalization like stemmers, accent
+ * removal, etc., would allow suggestions to ignore such
+ * variations.
+ *
+ * <p>
+ * When two matching suggestions have the same weight, they
+ * are tie-broken by the analyzed form. If their analyzed
+ * form is the same then the order is undefined.
+ *
+ * <p>
+ * There are some limitations:
+ * <ul>
+ *
+ * <li> A lookup from a query like "net" in English won't
+ * be any different than "net " (ie, user added a
+ * trailing space) because analyzers don't reflect
+ * when they've seen a token separator and when they
+ * haven't.
+ *
+ * <li> If you're using {@code StopFilter}, and the user will
+ * type "fast apple", but so far all they've typed is
+ * "fast a", again because the analyzer doesn't convey whether
+ * it's seen a token separator after the "a",
+ * {@code StopFilter} will remove that "a" causing
+ * far more matches than you'd expect.
+ *
+ * <li> Lookups with the empty string return no results
+ * instead of all results.
+ * </ul>
+ *
+ * @lucene.experimental
+ */
+public class XAnalyzingSuggester extends Lookup {
+
+ /**
+ * FST<Weight,Surface>:
+ * input is the analyzed form, with a null byte between terms
+ * weights are encoded as costs: (Integer.MAX_VALUE-weight)
+ * surface is the original, unanalyzed form.
+ */
+ private FST<Pair<Long,BytesRef>> fst = null;
+
+ /**
+ * Analyzer that will be used for analyzing suggestions at
+ * index time.
+ */
+ private final Analyzer indexAnalyzer;
+
+ /**
+ * Analyzer that will be used for analyzing suggestions at
+ * query time.
+ */
+ private final Analyzer queryAnalyzer;
+
+ /**
+ * True if exact match suggestions should always be returned first.
+ */
+ private final boolean exactFirst;
+
+ /**
+ * True if separator between tokens should be preserved.
+ */
+ private final boolean preserveSep;
+
+ /** Include this flag in the options parameter to {@link
+ * #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)} to always
+ * return the exact match first, regardless of score. This
+ * has no performance impact but could result in
+ * low-quality suggestions. */
+ public static final int EXACT_FIRST = 1;
+
+ /** Include this flag in the options parameter to {@link
+ * #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)} to preserve
+ * token separators when matching. */
+ public static final int PRESERVE_SEP = 2;
+
+ /** Represents the separation between tokens, if
+ * PRESERVE_SEP was specified */
+ public static final int SEP_LABEL = '\u001F';
+
+ /** Marks end of the analyzed input and start of dedup
+ * byte. */
+ public static final int END_BYTE = 0x0;
+
+ /** Maximum number of dup surface forms (different surface
+ * forms for the same analyzed form). */
+ private final int maxSurfaceFormsPerAnalyzedForm;
+
+ /** Maximum graph paths to index for a single analyzed
+ * surface form. This only matters if your analyzer
+ * makes lots of alternate paths (e.g. contains
+ * SynonymFilter). */
+ private final int maxGraphExpansions;
+
+ /** Highest number of analyzed paths we saw for any single
+ * input surface form. For analyzers that never create
+ * graphs this will always be 1. */
+ private int maxAnalyzedPathsForOneInput;
+
+ private boolean hasPayloads;
+
+ private final int sepLabel;
+ private final int payloadSep;
+ private final int endByte;
+ private final int holeCharacter;
+
+ public static final int PAYLOAD_SEP = '\u001F';
+ public static final int HOLE_CHARACTER = '\u001E';
+
+ /** Whether position holes should appear in the automaton. */
+ private boolean preservePositionIncrements;
+
+ /**
+ * Calls {@link #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)
+ * AnalyzingSuggester(analyzer, analyzer, EXACT_FIRST |
+ * PRESERVE_SEP, 256, -1)}
+ */
+ public XAnalyzingSuggester(Analyzer analyzer) {
+ this(analyzer, analyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true, null, false, 0, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
+ }
+
+ /**
+ * Calls {@link #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)
+ * AnalyzingSuggester(indexAnalyzer, queryAnalyzer, EXACT_FIRST |
+ * PRESERVE_SEP, 256, -1)}
+ */
+ public XAnalyzingSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer) {
+ this(indexAnalyzer, queryAnalyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true, null, false, 0, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
+ }
+
+ /**
+ * Creates a new suggester.
+ *
+ * @param indexAnalyzer Analyzer that will be used for
+ * analyzing suggestions while building the index.
+ * @param queryAnalyzer Analyzer that will be used for
+ * analyzing query text during lookup
+ * @param options see {@link #EXACT_FIRST}, {@link #PRESERVE_SEP}
+ * @param maxSurfaceFormsPerAnalyzedForm Maximum number of
+ * surface forms to keep for a single analyzed form.
+ * When there are too many surface forms we discard the
+ * lowest weighted ones.
+ * @param maxGraphExpansions Maximum number of graph paths
+ * to expand from the analyzed form. Set this to -1 for
+ * no limit.
+ */
+ public XAnalyzingSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer, int options, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
+ boolean preservePositionIncrements, FST<Pair<Long, BytesRef>> fst, boolean hasPayloads, int maxAnalyzedPathsForOneInput,
+ int sepLabel, int payloadSep, int endByte, int holeCharacter) {
+ // SIMON EDIT: I added fst, hasPayloads and maxAnalyzedPathsForOneInput
+ this.indexAnalyzer = indexAnalyzer;
+ this.queryAnalyzer = queryAnalyzer;
+ this.fst = fst;
+ this.hasPayloads = hasPayloads;
+ if ((options & ~(EXACT_FIRST | PRESERVE_SEP)) != 0) {
+ throw new IllegalArgumentException("options should only contain EXACT_FIRST and PRESERVE_SEP; got " + options);
+ }
+ this.exactFirst = (options & EXACT_FIRST) != 0;
+ this.preserveSep = (options & PRESERVE_SEP) != 0;
+
+ // NOTE: this is just an implementation limitation; if
+ // somehow this is a problem we could fix it by using
+ // more than one byte to disambiguate ... but 256 seems
+ // like it should be way more then enough.
+ if (maxSurfaceFormsPerAnalyzedForm <= 0 || maxSurfaceFormsPerAnalyzedForm > 256) {
+ throw new IllegalArgumentException("maxSurfaceFormsPerAnalyzedForm must be > 0 and < 256 (got: " + maxSurfaceFormsPerAnalyzedForm + ")");
+ }
+ this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
+
+ if (maxGraphExpansions < 1 && maxGraphExpansions != -1) {
+ throw new IllegalArgumentException("maxGraphExpansions must -1 (no limit) or > 0 (got: " + maxGraphExpansions + ")");
+ }
+ this.maxGraphExpansions = maxGraphExpansions;
+ this.maxAnalyzedPathsForOneInput = maxAnalyzedPathsForOneInput;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.sepLabel = sepLabel;
+ this.payloadSep = payloadSep;
+ this.endByte = endByte;
+ this.holeCharacter = holeCharacter;
+ }
+
+ /** Returns byte size of the underlying FST. */
+ public long sizeInBytes() {
+ return fst == null ? 0 : fst.sizeInBytes();
+ }
+
+ private static void copyDestTransitions(State from, State to, List<Transition> transitions) {
+ if (to.isAccept()) {
+ from.setAccept(true);
+ }
+ for(Transition t : to.getTransitions()) {
+ transitions.add(t);
+ }
+ }
+
+ // Replaces SEP with epsilon or remaps them if
+ // we were asked to preserve them:
+ private static void replaceSep(Automaton a, boolean preserveSep, int replaceSep) {
+
+ State[] states = a.getNumberedStates();
+
+ // Go in reverse topo sort so we know we only have to
+ // make one pass:
+ for(int stateNumber=states.length-1;stateNumber >=0;stateNumber--) {
+ final State state = states[stateNumber];
+ List<Transition> newTransitions = new ArrayList<Transition>();
+ for(Transition t : state.getTransitions()) {
+ assert t.getMin() == t.getMax();
+ if (t.getMin() == TokenStreamToAutomaton.POS_SEP) {
+ if (preserveSep) {
+ // Remap to SEP_LABEL:
+ newTransitions.add(new Transition(replaceSep, t.getDest()));
+ } else {
+ copyDestTransitions(state, t.getDest(), newTransitions);
+ a.setDeterministic(false);
+ }
+ } else if (t.getMin() == TokenStreamToAutomaton.HOLE) {
+
+ // Just remove the hole: there will then be two
+ // SEP tokens next to each other, which will only
+ // match another hole at search time. Note that
+ // it will also match an empty-string token ... if
+ // that's somehow a problem we can always map HOLE
+ // to a dedicated byte (and escape it in the
+ // input).
+ copyDestTransitions(state, t.getDest(), newTransitions);
+ a.setDeterministic(false);
+ } else {
+ newTransitions.add(t);
+ }
+ }
+ state.setTransitions(newTransitions.toArray(new Transition[newTransitions.size()]));
+ }
+ }
+
+ protected Automaton convertAutomaton(Automaton a) {
+ return a;
+ }
+
+ /** Just escapes the 0xff byte (which we still for SEP). */
+ private static final class EscapingTokenStreamToAutomaton extends TokenStreamToAutomaton {
+
+ final BytesRef spare = new BytesRef();
+ private char sepLabel;
+
+ public EscapingTokenStreamToAutomaton(char sepLabel) {
+ this.sepLabel = sepLabel;
+ }
+
+ @Override
+ protected BytesRef changeToken(BytesRef in) {
+ int upto = 0;
+ for(int i=0;i<in.length;i++) {
+ byte b = in.bytes[in.offset+i];
+ if (b == (byte) sepLabel) {
+ if (spare.bytes.length == upto) {
+ spare.grow(upto+2);
+ }
+ spare.bytes[upto++] = (byte) sepLabel;
+ spare.bytes[upto++] = b;
+ } else {
+ if (spare.bytes.length == upto) {
+ spare.grow(upto+1);
+ }
+ spare.bytes[upto++] = b;
+ }
+ }
+ spare.offset = 0;
+ spare.length = upto;
+ return spare;
+ }
+ }
+
+ public TokenStreamToAutomaton getTokenStreamToAutomaton() {
+ final TokenStreamToAutomaton tsta;
+ if (preserveSep) {
+ tsta = new EscapingTokenStreamToAutomaton((char) sepLabel);
+ } else {
+ // When we're not preserving sep, we don't steal 0xff
+ // byte, so we don't need to do any escaping:
+ tsta = new TokenStreamToAutomaton();
+ }
+ tsta.setPreservePositionIncrements(preservePositionIncrements);
+ return tsta;
+ }
+
+ private static class AnalyzingComparator implements Comparator<BytesRef> {
+
+ private final boolean hasPayloads;
+
+ public AnalyzingComparator(boolean hasPayloads) {
+ this.hasPayloads = hasPayloads;
+ }
+
+ private final ByteArrayDataInput readerA = new ByteArrayDataInput();
+ private final ByteArrayDataInput readerB = new ByteArrayDataInput();
+ private final BytesRef scratchA = new BytesRef();
+ private final BytesRef scratchB = new BytesRef();
+
+ @Override
+ public int compare(BytesRef a, BytesRef b) {
+
+ // First by analyzed form:
+ readerA.reset(a.bytes, a.offset, a.length);
+ scratchA.length = readerA.readShort();
+ scratchA.bytes = a.bytes;
+ scratchA.offset = readerA.getPosition();
+
+ readerB.reset(b.bytes, b.offset, b.length);
+ scratchB.bytes = b.bytes;
+ scratchB.length = readerB.readShort();
+ scratchB.offset = readerB.getPosition();
+
+ int cmp = scratchA.compareTo(scratchB);
+ if (cmp != 0) {
+ return cmp;
+ }
+ readerA.skipBytes(scratchA.length);
+ readerB.skipBytes(scratchB.length);
+ // Next by cost:
+ long aCost = readerA.readInt();
+ long bCost = readerB.readInt();
+ if (aCost < bCost) {
+ return -1;
+ } else if (aCost > bCost) {
+ return 1;
+ }
+
+ // Finally by surface form:
+ if (hasPayloads) {
+ scratchA.length = readerA.readShort();
+ scratchA.offset = readerA.getPosition();
+ scratchB.length = readerB.readShort();
+ scratchB.offset = readerB.getPosition();
+ } else {
+ scratchA.offset = readerA.getPosition();
+ scratchA.length = a.length - scratchA.offset;
+ scratchB.offset = readerB.getPosition();
+ scratchB.length = b.length - scratchB.offset;
+ }
+ return scratchA.compareTo(scratchB);
+ }
+ }
+
+ @Override
+ public void build(InputIterator iterator) throws IOException {
+ String prefix = getClass().getSimpleName();
+ File directory = Sort.defaultTempDir();
+ File tempInput = File.createTempFile(prefix, ".input", directory);
+ File tempSorted = File.createTempFile(prefix, ".sorted", directory);
+
+ hasPayloads = iterator.hasPayloads();
+
+ Sort.ByteSequencesWriter writer = new Sort.ByteSequencesWriter(tempInput);
+ Sort.ByteSequencesReader reader = null;
+ BytesRef scratch = new BytesRef();
+
+ TokenStreamToAutomaton ts2a = getTokenStreamToAutomaton();
+
+ boolean success = false;
+ byte buffer[] = new byte[8];
+ try {
+ ByteArrayDataOutput output = new ByteArrayDataOutput(buffer);
+ BytesRef surfaceForm;
+
+ while ((surfaceForm = iterator.next()) != null) {
+ Set<IntsRef> paths = toFiniteStrings(surfaceForm, ts2a);
+
+ maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, paths.size());
+
+ for (IntsRef path : paths) {
+
+ Util.toBytesRef(path, scratch);
+
+ // length of the analyzed text (FST input)
+ if (scratch.length > Short.MAX_VALUE-2) {
+ throw new IllegalArgumentException("cannot handle analyzed forms > " + (Short.MAX_VALUE-2) + " in length (got " + scratch.length + ")");
+ }
+ short analyzedLength = (short) scratch.length;
+
+ // compute the required length:
+ // analyzed sequence + weight (4) + surface + analyzedLength (short)
+ int requiredLength = analyzedLength + 4 + surfaceForm.length + 2;
+
+ BytesRef payload;
+
+ if (hasPayloads) {
+ if (surfaceForm.length > (Short.MAX_VALUE-2)) {
+ throw new IllegalArgumentException("cannot handle surface form > " + (Short.MAX_VALUE-2) + " in length (got " + surfaceForm.length + ")");
+ }
+ payload = iterator.payload();
+ // payload + surfaceLength (short)
+ requiredLength += payload.length + 2;
+ } else {
+ payload = null;
+ }
+
+ buffer = ArrayUtil.grow(buffer, requiredLength);
+
+ output.reset(buffer);
+
+ output.writeShort(analyzedLength);
+
+ output.writeBytes(scratch.bytes, scratch.offset, scratch.length);
+
+ output.writeInt(encodeWeight(iterator.weight()));
+
+ if (hasPayloads) {
+ for(int i=0;i<surfaceForm.length;i++) {
+ if (surfaceForm.bytes[i] == payloadSep) {
+ throw new IllegalArgumentException("surface form cannot contain unit separator character U+001F; this character is reserved");
+ }
+ }
+ output.writeShort((short) surfaceForm.length);
+ output.writeBytes(surfaceForm.bytes, surfaceForm.offset, surfaceForm.length);
+ output.writeBytes(payload.bytes, payload.offset, payload.length);
+ } else {
+ output.writeBytes(surfaceForm.bytes, surfaceForm.offset, surfaceForm.length);
+ }
+
+ assert output.getPosition() == requiredLength: output.getPosition() + " vs " + requiredLength;
+
+ writer.write(buffer, 0, output.getPosition());
+ }
+ }
+ writer.close();
+
+ // Sort all input/output pairs (required by FST.Builder):
+ new Sort(new AnalyzingComparator(hasPayloads)).sort(tempInput, tempSorted);
+
+ // Free disk space:
+ tempInput.delete();
+
+ reader = new Sort.ByteSequencesReader(tempSorted);
+
+ PairOutputs<Long,BytesRef> outputs = new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton());
+ Builder<Pair<Long,BytesRef>> builder = new Builder<Pair<Long,BytesRef>>(FST.INPUT_TYPE.BYTE1, outputs);
+
+ // Build FST:
+ BytesRef previousAnalyzed = null;
+ BytesRef analyzed = new BytesRef();
+ BytesRef surface = new BytesRef();
+ IntsRef scratchInts = new IntsRef();
+ ByteArrayDataInput input = new ByteArrayDataInput();
+
+ // Used to remove duplicate surface forms (but we
+ // still index the hightest-weight one). We clear
+ // this when we see a new analyzed form, so it cannot
+ // grow unbounded (at most 256 entries):
+ Set<BytesRef> seenSurfaceForms = new HashSet<BytesRef>();
+
+ int dedup = 0;
+ while (reader.read(scratch)) {
+ input.reset(scratch.bytes, scratch.offset, scratch.length);
+ short analyzedLength = input.readShort();
+ analyzed.grow(analyzedLength+2);
+ input.readBytes(analyzed.bytes, 0, analyzedLength);
+ analyzed.length = analyzedLength;
+
+ long cost = input.readInt();
+
+ surface.bytes = scratch.bytes;
+ if (hasPayloads) {
+ surface.length = input.readShort();
+ surface.offset = input.getPosition();
+ } else {
+ surface.offset = input.getPosition();
+ surface.length = scratch.length - surface.offset;
+ }
+
+ if (previousAnalyzed == null) {
+ previousAnalyzed = new BytesRef();
+ previousAnalyzed.copyBytes(analyzed);
+ seenSurfaceForms.add(BytesRef.deepCopyOf(surface));
+ } else if (analyzed.equals(previousAnalyzed)) {
+ dedup++;
+ if (dedup >= maxSurfaceFormsPerAnalyzedForm) {
+ // More than maxSurfaceFormsPerAnalyzedForm
+ // dups: skip the rest:
+ continue;
+ }
+ if (seenSurfaceForms.contains(surface)) {
+ continue;
+ }
+ seenSurfaceForms.add(BytesRef.deepCopyOf(surface));
+ } else {
+ dedup = 0;
+ previousAnalyzed.copyBytes(analyzed);
+ seenSurfaceForms.clear();
+ seenSurfaceForms.add(BytesRef.deepCopyOf(surface));
+ }
+
+ // TODO: I think we can avoid the extra 2 bytes when
+ // there is no dup (dedup==0), but we'd have to fix
+ // the exactFirst logic ... which would be sort of
+ // hairy because we'd need to special case the two
+ // (dup/not dup)...
+
+ // NOTE: must be byte 0 so we sort before whatever
+ // is next
+ analyzed.bytes[analyzed.offset+analyzed.length] = 0;
+ analyzed.bytes[analyzed.offset+analyzed.length+1] = (byte) dedup;
+ analyzed.length += 2;
+
+ Util.toIntsRef(analyzed, scratchInts);
+ //System.out.println("ADD: " + scratchInts + " -> " + cost + ": " + surface.utf8ToString());
+ if (!hasPayloads) {
+ builder.add(scratchInts, outputs.newPair(cost, BytesRef.deepCopyOf(surface)));
+ } else {
+ int payloadOffset = input.getPosition() + surface.length;
+ int payloadLength = scratch.length - payloadOffset;
+ BytesRef br = new BytesRef(surface.length + 1 + payloadLength);
+ System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length);
+ br.bytes[surface.length] = (byte) payloadSep;
+ System.arraycopy(scratch.bytes, payloadOffset, br.bytes, surface.length+1, payloadLength);
+ br.length = br.bytes.length;
+ builder.add(scratchInts, outputs.newPair(cost, br));
+ }
+ }
+ fst = builder.finish();
+
+ //PrintWriter pw = new PrintWriter("/tmp/out.dot");
+ //Util.toDot(fst, pw, true, true);
+ //pw.close();
+
+ success = true;
+ } finally {
+ if (success) {
+ IOUtils.close(reader, writer);
+ } else {
+ IOUtils.closeWhileHandlingException(reader, writer);
+ }
+
+ tempInput.delete();
+ tempSorted.delete();
+ }
+ }
+
+ @Override
+ public boolean store(OutputStream output) throws IOException {
+ DataOutput dataOut = new OutputStreamDataOutput(output);
+ try {
+ if (fst == null) {
+ return false;
+ }
+
+ fst.save(dataOut);
+ dataOut.writeVInt(maxAnalyzedPathsForOneInput);
+ dataOut.writeByte((byte) (hasPayloads ? 1 : 0));
+ } finally {
+ IOUtils.close(output);
+ }
+ return true;
+ }
+
+ @Override
+ public boolean load(InputStream input) throws IOException {
+ DataInput dataIn = new InputStreamDataInput(input);
+ try {
+ this.fst = new FST<Pair<Long,BytesRef>>(dataIn, new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()));
+ maxAnalyzedPathsForOneInput = dataIn.readVInt();
+ hasPayloads = dataIn.readByte() == 1;
+ } finally {
+ IOUtils.close(input);
+ }
+ return true;
+ }
+
+ private LookupResult getLookupResult(Long output1, BytesRef output2, CharsRef spare) {
+ LookupResult result;
+ if (hasPayloads) {
+ int sepIndex = -1;
+ for(int i=0;i<output2.length;i++) {
+ if (output2.bytes[output2.offset+i] == payloadSep) {
+ sepIndex = i;
+ break;
+ }
+ }
+ assert sepIndex != -1;
+ spare.grow(sepIndex);
+ final int payloadLen = output2.length - sepIndex - 1;
+ UnicodeUtil.UTF8toUTF16(output2.bytes, output2.offset, sepIndex, spare);
+ BytesRef payload = new BytesRef(payloadLen);
+ System.arraycopy(output2.bytes, sepIndex+1, payload.bytes, 0, payloadLen);
+ payload.length = payloadLen;
+ result = new LookupResult(spare.toString(), decodeWeight(output1), payload);
+ } else {
+ spare.grow(output2.length);
+ UnicodeUtil.UTF8toUTF16(output2, spare);
+ result = new LookupResult(spare.toString(), decodeWeight(output1));
+ }
+
+ return result;
+ }
+
+ private boolean sameSurfaceForm(BytesRef key, BytesRef output2) {
+ if (hasPayloads) {
+ // output2 has at least PAYLOAD_SEP byte:
+ if (key.length >= output2.length) {
+ return false;
+ }
+ for(int i=0;i<key.length;i++) {
+ if (key.bytes[key.offset+i] != output2.bytes[output2.offset+i]) {
+ return false;
+ }
+ }
+ return output2.bytes[output2.offset + key.length] == payloadSep;
+ } else {
+ return key.bytesEquals(output2);
+ }
+ }
+
+ @Override
+ public List<LookupResult> lookup(final CharSequence key, boolean onlyMorePopular, int num) {
+ assert num > 0;
+
+ if (onlyMorePopular) {
+ throw new IllegalArgumentException("this suggester only works with onlyMorePopular=false");
+ }
+ if (fst == null) {
+ return Collections.emptyList();
+ }
+
+ //System.out.println("lookup key=" + key + " num=" + num);
+ for (int i = 0; i < key.length(); i++) {
+ if (key.charAt(i) == holeCharacter) {
+ throw new IllegalArgumentException("lookup key cannot contain HOLE character U+001E; this character is reserved");
+ }
+ if (key.charAt(i) == sepLabel) {
+ throw new IllegalArgumentException("lookup key cannot contain unit separator character U+001F; this character is reserved");
+ }
+ }
+ final BytesRef utf8Key = new BytesRef(key);
+ try {
+
+ Automaton lookupAutomaton = toLookupAutomaton(key);
+
+ final CharsRef spare = new CharsRef();
+
+ //System.out.println(" now intersect exactFirst=" + exactFirst);
+
+ // Intersect automaton w/ suggest wFST and get all
+ // prefix starting nodes & their outputs:
+ //final PathIntersector intersector = getPathIntersector(lookupAutomaton, fst);
+
+ //System.out.println(" prefixPaths: " + prefixPaths.size());
+
+ BytesReader bytesReader = fst.getBytesReader();
+
+ FST.Arc<Pair<Long,BytesRef>> scratchArc = new FST.Arc<Pair<Long,BytesRef>>();
+
+ final List<LookupResult> results = new ArrayList<LookupResult>();
+
+ List<FSTUtil.Path<Pair<Long,BytesRef>>> prefixPaths = FSTUtil.intersectPrefixPaths(convertAutomaton(lookupAutomaton), fst);
+
+ if (exactFirst) {
+
+ int count = 0;
+ for (FSTUtil.Path<Pair<Long,BytesRef>> path : prefixPaths) {
+ if (fst.findTargetArc(endByte, path.fstNode, scratchArc, bytesReader) != null) {
+ // This node has END_BYTE arc leaving, meaning it's an
+ // "exact" match:
+ count++;
+ }
+ }
+
+ // Searcher just to find the single exact only
+ // match, if present:
+ Util.TopNSearcher<Pair<Long,BytesRef>> searcher;
+ searcher = new Util.TopNSearcher<Pair<Long,BytesRef>>(fst, count * maxSurfaceFormsPerAnalyzedForm, count * maxSurfaceFormsPerAnalyzedForm, weightComparator);
+
+ // NOTE: we could almost get away with only using
+ // the first start node. The only catch is if
+ // maxSurfaceFormsPerAnalyzedForm had kicked in and
+ // pruned our exact match from one of these nodes
+ // ...:
+ for (FSTUtil.Path<Pair<Long,BytesRef>> path : prefixPaths) {
+ if (fst.findTargetArc(endByte, path.fstNode, scratchArc, bytesReader) != null) {
+ // This node has END_BYTE arc leaving, meaning it's an
+ // "exact" match:
+ searcher.addStartPaths(scratchArc, fst.outputs.add(path.output, scratchArc.output), false, path.input);
+ }
+ }
+
+ MinResult<Pair<Long,BytesRef>> completions[] = searcher.search();
+
+ // NOTE: this is rather inefficient: we enumerate
+ // every matching "exactly the same analyzed form"
+ // path, and then do linear scan to see if one of
+ // these exactly matches the input. It should be
+ // possible (though hairy) to do something similar
+ // to getByOutput, since the surface form is encoded
+ // into the FST output, so we more efficiently hone
+ // in on the exact surface-form match. Still, I
+ // suspect very little time is spent in this linear
+ // seach: it's bounded by how many prefix start
+ // nodes we have and the
+ // maxSurfaceFormsPerAnalyzedForm:
+ for(MinResult<Pair<Long,BytesRef>> completion : completions) {
+ BytesRef output2 = completion.output.output2;
+ if (sameSurfaceForm(utf8Key, output2)) {
+ results.add(getLookupResult(completion.output.output1, output2, spare));
+ break;
+ }
+ }
+
+ if (results.size() == num) {
+ // That was quick:
+ return results;
+ }
+ }
+
+ Util.TopNSearcher<Pair<Long,BytesRef>> searcher;
+ searcher = new Util.TopNSearcher<Pair<Long,BytesRef>>(fst,
+ num - results.size(),
+ num * maxAnalyzedPathsForOneInput,
+ weightComparator) {
+ private final Set<BytesRef> seen = new HashSet<BytesRef>();
+
+ @Override
+ protected boolean acceptResult(IntsRef input, Pair<Long,BytesRef> output) {
+
+ // Dedup: when the input analyzes to a graph we
+ // can get duplicate surface forms:
+ if (seen.contains(output.output2)) {
+ return false;
+ }
+ seen.add(output.output2);
+
+ if (!exactFirst) {
+ return true;
+ } else {
+ // In exactFirst mode, don't accept any paths
+ // matching the surface form since that will
+ // create duplicate results:
+ if (sameSurfaceForm(utf8Key, output.output2)) {
+ // We found exact match, which means we should
+ // have already found it in the first search:
+ assert results.size() == 1;
+ return false;
+ } else {
+ return true;
+ }
+ }
+ }
+ };
+
+ prefixPaths = getFullPrefixPaths(prefixPaths, lookupAutomaton, fst);
+
+ for (FSTUtil.Path<Pair<Long,BytesRef>> path : prefixPaths) {
+ searcher.addStartPaths(path.fstNode, path.output, true, path.input);
+ }
+
+ MinResult<Pair<Long,BytesRef>> completions[] = searcher.search();
+
+ for(MinResult<Pair<Long,BytesRef>> completion : completions) {
+
+ LookupResult result = getLookupResult(completion.output.output1, completion.output.output2, spare);
+
+ // TODO: for fuzzy case would be nice to return
+ // how many edits were required
+
+ //System.out.println(" result=" + result);
+ results.add(result);
+
+ if (results.size() == num) {
+ // In the exactFirst=true case the search may
+ // produce one extra path
+ break;
+ }
+ }
+
+ return results;
+ } catch (IOException bogus) {
+ throw new RuntimeException(bogus);
+ }
+ }
+
+ /** Returns all completion paths to initialize the search. */
+ protected List<FSTUtil.Path<Pair<Long,BytesRef>>> getFullPrefixPaths(List<FSTUtil.Path<Pair<Long,BytesRef>>> prefixPaths,
+ Automaton lookupAutomaton,
+ FST<Pair<Long,BytesRef>> fst)
+ throws IOException {
+ return prefixPaths;
+ }
+
+ public final Set<IntsRef> toFiniteStrings(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException {
+ // Analyze surface form:
+ TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString());
+ return toFiniteStrings(ts2a, ts);
+ }
+ public final Set<IntsRef> toFiniteStrings(final TokenStreamToAutomaton ts2a, TokenStream ts) throws IOException {
+ // Analyze surface form:
+
+ // Create corresponding automaton: labels are bytes
+ // from each analyzed token, with byte 0 used as
+ // separator between tokens:
+ Automaton automaton = ts2a.toAutomaton(ts);
+ ts.close();
+
+ replaceSep(automaton, preserveSep, sepLabel);
+
+ assert SpecialOperations.isFinite(automaton);
+
+ // Get all paths from the automaton (there can be
+ // more than one path, eg if the analyzer created a
+ // graph using SynFilter or WDF):
+
+ // TODO: we could walk & add simultaneously, so we
+ // don't have to alloc [possibly biggish]
+ // intermediate HashSet in RAM:
+ return SpecialOperations.getFiniteStrings(automaton, maxGraphExpansions);
+ }
+
+ final Automaton toLookupAutomaton(final CharSequence key) throws IOException {
+ // Turn tokenstream into automaton:
+ TokenStream ts = queryAnalyzer.tokenStream("", key.toString());
+ Automaton automaton = (getTokenStreamToAutomaton()).toAutomaton(ts);
+ ts.close();
+
+ // TODO: we could use the end offset to "guess"
+ // whether the final token was a partial token; this
+ // would only be a heuristic ... but maybe an OK one.
+ // This way we could eg differentiate "net" from "net ",
+ // which we can't today...
+
+ replaceSep(automaton, preserveSep, sepLabel);
+
+ // TODO: we can optimize this somewhat by determinizing
+ // while we convert
+ BasicOperations.determinize(automaton);
+ return automaton;
+ }
+
+
+
+ /**
+ * Returns the weight associated with an input string,
+ * or null if it does not exist.
+ */
+ public Object get(CharSequence key) {
+ throw new UnsupportedOperationException();
+ }
+
+ /** cost -> weight */
+ public static int decodeWeight(long encoded) {
+ return (int)(Integer.MAX_VALUE - encoded);
+ }
+
+ /** weight -> cost */
+ public static int encodeWeight(long value) {
+ if (value < 0 || value > Integer.MAX_VALUE) {
+ throw new UnsupportedOperationException("cannot encode value: " + value);
+ }
+ return Integer.MAX_VALUE - (int)value;
+ }
+
+ static final Comparator<Pair<Long,BytesRef>> weightComparator = new Comparator<Pair<Long,BytesRef>> () {
+ @Override
+ public int compare(Pair<Long,BytesRef> left, Pair<Long,BytesRef> right) {
+ return left.output1.compareTo(right.output1);
+ }
+ };
+
+
+ public static class XBuilder {
+ private Builder<Pair<Long, BytesRef>> builder;
+ private int maxSurfaceFormsPerAnalyzedForm;
+ private IntsRef scratchInts = new IntsRef();
+ private final PairOutputs<Long, BytesRef> outputs;
+ private boolean hasPayloads;
+ private BytesRef analyzed = new BytesRef();
+ private final SurfaceFormAndPayload[] surfaceFormsAndPayload;
+ private int count;
+ private ObjectIntOpenHashMap<BytesRef> seenSurfaceForms = HppcMaps.Object.Integer.ensureNoNullKeys(256, 0.75f);
+ private int payloadSep;
+
+ public XBuilder(int maxSurfaceFormsPerAnalyzedForm, boolean hasPayloads, int payloadSep) {
+ this.payloadSep = payloadSep;
+ this.outputs = new PairOutputs<Long, BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton());
+ this.builder = new Builder<Pair<Long, BytesRef>>(FST.INPUT_TYPE.BYTE1, outputs);
+ this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
+ this.hasPayloads = hasPayloads;
+ surfaceFormsAndPayload = new SurfaceFormAndPayload[maxSurfaceFormsPerAnalyzedForm];
+
+ }
+ public void startTerm(BytesRef analyzed) {
+ this.analyzed.copyBytes(analyzed);
+ this.analyzed.grow(analyzed.length+2);
+ }
+
+ private final static class SurfaceFormAndPayload implements Comparable<SurfaceFormAndPayload> {
+ BytesRef payload;
+ long weight;
+
+ public SurfaceFormAndPayload(BytesRef payload, long cost) {
+ super();
+ this.payload = payload;
+ this.weight = cost;
+ }
+
+ @Override
+ public int compareTo(SurfaceFormAndPayload o) {
+ int res = compare(weight, o.weight);
+ if (res == 0 ){
+ return payload.compareTo(o.payload);
+ }
+ return res;
+ }
+ public static int compare(long x, long y) {
+ return (x < y) ? -1 : ((x == y) ? 0 : 1);
+ }
+ }
+
+ public void addSurface(BytesRef surface, BytesRef payload, long cost) throws IOException {
+ int surfaceIndex = -1;
+ long encodedWeight = cost == -1 ? cost : encodeWeight(cost);
+ /*
+ * we need to check if we have seen this surface form, if so only use the
+ * the surface form with the highest weight and drop the rest no matter if
+ * the payload differs.
+ */
+ if (count >= maxSurfaceFormsPerAnalyzedForm) {
+ // More than maxSurfaceFormsPerAnalyzedForm
+ // dups: skip the rest:
+ return;
+ }
+ BytesRef surfaceCopy;
+ if (count > 0 && seenSurfaceForms.containsKey(surface)) {
+ surfaceIndex = seenSurfaceForms.lget();
+ SurfaceFormAndPayload surfaceFormAndPayload = surfaceFormsAndPayload[surfaceIndex];
+ if (encodedWeight >= surfaceFormAndPayload.weight) {
+ return;
+ }
+ surfaceCopy = BytesRef.deepCopyOf(surface);
+ } else {
+ surfaceIndex = count++;
+ surfaceCopy = BytesRef.deepCopyOf(surface);
+ seenSurfaceForms.put(surfaceCopy, surfaceIndex);
+ }
+
+ BytesRef payloadRef;
+ if (!hasPayloads) {
+ payloadRef = surfaceCopy;
+ } else {
+ int len = surface.length + 1 + payload.length;
+ final BytesRef br = new BytesRef(len);
+ System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length);
+ br.bytes[surface.length] = (byte) payloadSep;
+ System.arraycopy(payload.bytes, payload.offset, br.bytes, surface.length + 1, payload.length);
+ br.length = len;
+ payloadRef = br;
+ }
+ if (surfaceFormsAndPayload[surfaceIndex] == null) {
+ surfaceFormsAndPayload[surfaceIndex] = new SurfaceFormAndPayload(payloadRef, encodedWeight);
+ } else {
+ surfaceFormsAndPayload[surfaceIndex].payload = payloadRef;
+ surfaceFormsAndPayload[surfaceIndex].weight = encodedWeight;
+ }
+ }
+
+ public void finishTerm(long defaultWeight) throws IOException {
+ ArrayUtil.timSort(surfaceFormsAndPayload, 0, count);
+ int deduplicator = 0;
+ analyzed.bytes[analyzed.offset + analyzed.length] = 0;
+ analyzed.length += 2;
+ for (int i = 0; i < count; i++) {
+ analyzed.bytes[analyzed.offset + analyzed.length - 1 ] = (byte) deduplicator++;
+ Util.toIntsRef(analyzed, scratchInts);
+ SurfaceFormAndPayload candiate = surfaceFormsAndPayload[i];
+ long cost = candiate.weight == -1 ? encodeWeight(Math.min(Integer.MAX_VALUE, defaultWeight)) : candiate.weight;
+ builder.add(scratchInts, outputs.newPair(cost, candiate.payload));
+ }
+ seenSurfaceForms.clear();
+ count = 0;
+ }
+
+ public FST<Pair<Long, BytesRef>> build() throws IOException {
+ return builder.finish();
+ }
+
+ public boolean hasPayloads() {
+ return hasPayloads;
+ }
+
+ public int maxSurfaceFormsPerAnalyzedForm() {
+ return maxSurfaceFormsPerAnalyzedForm;
+ }
+
+ }
+}
diff --git a/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java b/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java
new file mode 100644
index 0000000..4379cda
--- /dev/null
+++ b/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.lucene.search.suggest.analyzing;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStreamToAutomaton;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.automaton.*;
+import org.apache.lucene.util.fst.FST;
+import org.apache.lucene.util.fst.PairOutputs;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Implements a fuzzy {@link AnalyzingSuggester}. The similarity measurement is
+ * based on the Damerau-Levenshtein (optimal string alignment) algorithm, though
+ * you can explicitly choose classic Levenshtein by passing <code>false</code>
+ * for the <code>transpositions</code> parameter.
+ * <p>
+ * At most, this query will match terms up to
+ * {@value org.apache.lucene.util.automaton.LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE}
+ * edits. Higher distances are not supported. Note that the
+ * fuzzy distance is measured in "byte space" on the bytes
+ * returned by the {@link org.apache.lucene.analysis.TokenStream}'s {@link
+ * org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute}, usually UTF8. By default
+ * the analyzed bytes must be at least 3 {@link
+ * #DEFAULT_MIN_FUZZY_LENGTH} bytes before any edits are
+ * considered. Furthermore, the first 1 {@link
+ * #DEFAULT_NON_FUZZY_PREFIX} byte is not allowed to be
+ * edited. We allow up to 1 (@link
+ * #DEFAULT_MAX_EDITS} edit.
+ * If {@link #unicodeAware} parameter in the constructor is set to true, maxEdits,
+ * minFuzzyLength, transpositions and nonFuzzyPrefix are measured in Unicode code
+ * points (actual letters) instead of bytes.*
+ *
+ * <p>
+ * NOTE: This suggester does not boost suggestions that
+ * required no edits over suggestions that did require
+ * edits. This is a known limitation.
+ *
+ * <p>
+ * Note: complex query analyzers can have a significant impact on the lookup
+ * performance. It's recommended to not use analyzers that drop or inject terms
+ * like synonyms to keep the complexity of the prefix intersection low for good
+ * lookup performance. At index time, complex analyzers can safely be used.
+ * </p>
+ *
+ * @lucene.experimental
+ */
+public final class XFuzzySuggester extends XAnalyzingSuggester {
+ private final int maxEdits;
+ private final boolean transpositions;
+ private final int nonFuzzyPrefix;
+ private final int minFuzzyLength;
+ private final boolean unicodeAware;
+
+ /**
+ * Measure maxEdits, minFuzzyLength, transpositions and nonFuzzyPrefix
+ * parameters in Unicode code points (actual letters)
+ * instead of bytes.
+ */
+ public static final boolean DEFAULT_UNICODE_AWARE = false;
+
+ /**
+ * The default minimum length of the key passed to {@link
+ * #lookup} before any edits are allowed.
+ */
+ public static final int DEFAULT_MIN_FUZZY_LENGTH = 3;
+
+ /**
+ * The default prefix length where edits are not allowed.
+ */
+ public static final int DEFAULT_NON_FUZZY_PREFIX = 1;
+
+ /**
+ * The default maximum number of edits for fuzzy
+ * suggestions.
+ */
+ public static final int DEFAULT_MAX_EDITS = 1;
+
+ /**
+ * The default transposition value passed to {@link org.apache.lucene.util.automaton.LevenshteinAutomata}
+ */
+ public static final boolean DEFAULT_TRANSPOSITIONS = true;
+
+ /**
+ * Creates a {@link FuzzySuggester} instance initialized with default values.
+ *
+ * @param analyzer the analyzer used for this suggester
+ */
+ public XFuzzySuggester(Analyzer analyzer) {
+ this(analyzer, analyzer);
+ }
+
+ /**
+ * Creates a {@link FuzzySuggester} instance with an index & a query analyzer initialized with default values.
+ *
+ * @param indexAnalyzer
+ * Analyzer that will be used for analyzing suggestions while building the index.
+ * @param queryAnalyzer
+ * Analyzer that will be used for analyzing query text during lookup
+ */
+ public XFuzzySuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer) {
+ this(indexAnalyzer, queryAnalyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, DEFAULT_MAX_EDITS, DEFAULT_TRANSPOSITIONS,
+ DEFAULT_NON_FUZZY_PREFIX, DEFAULT_MIN_FUZZY_LENGTH, DEFAULT_UNICODE_AWARE, null, false, 0, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
+
+ }
+
+ /**
+ * Creates a {@link FuzzySuggester} instance.
+ *
+ * @param indexAnalyzer Analyzer that will be used for
+ * analyzing suggestions while building the index.
+ * @param queryAnalyzer Analyzer that will be used for
+ * analyzing query text during lookup
+ * @param options see {@link #EXACT_FIRST}, {@link #PRESERVE_SEP}
+ * @param maxSurfaceFormsPerAnalyzedForm Maximum number of
+ * surface forms to keep for a single analyzed form.
+ * When there are too many surface forms we discard the
+ * lowest weighted ones.
+ * @param maxGraphExpansions Maximum number of graph paths
+ * to expand from the analyzed form. Set this to -1 for
+ * no limit.
+ * @param maxEdits must be >= 0 and <= {@link org.apache.lucene.util.automaton.LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE} .
+ * @param transpositions <code>true</code> if transpositions should be treated as a primitive
+ * edit operation. If this is false, comparisons will implement the classic
+ * Levenshtein algorithm.
+ * @param nonFuzzyPrefix length of common (non-fuzzy) prefix (see default {@link #DEFAULT_NON_FUZZY_PREFIX}
+ * @param minFuzzyLength minimum length of lookup key before any edits are allowed (see default {@link #DEFAULT_MIN_FUZZY_LENGTH})
+ * @param sepLabel separation label
+ * @param payloadSep payload separator byte
+ * @param endByte end byte marker byte
+ */
+ public XFuzzySuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer, int options, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
+ int maxEdits, boolean transpositions, int nonFuzzyPrefix, int minFuzzyLength, boolean unicodeAware,
+ FST<PairOutputs.Pair<Long, BytesRef>> fst, boolean hasPayloads, int maxAnalyzedPathsForOneInput,
+ int sepLabel, int payloadSep, int endByte, int holeCharacter) {
+ super(indexAnalyzer, queryAnalyzer, options, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions, true, fst, hasPayloads, maxAnalyzedPathsForOneInput, sepLabel, payloadSep, endByte, holeCharacter);
+ if (maxEdits < 0 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
+ throw new IllegalArgumentException("maxEdits must be between 0 and " + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
+ }
+ if (nonFuzzyPrefix < 0) {
+ throw new IllegalArgumentException("nonFuzzyPrefix must not be >= 0 (got " + nonFuzzyPrefix + ")");
+ }
+ if (minFuzzyLength < 0) {
+ throw new IllegalArgumentException("minFuzzyLength must not be >= 0 (got " + minFuzzyLength + ")");
+ }
+
+ this.maxEdits = maxEdits;
+ this.transpositions = transpositions;
+ this.nonFuzzyPrefix = nonFuzzyPrefix;
+ this.minFuzzyLength = minFuzzyLength;
+ this.unicodeAware = unicodeAware;
+ }
+
+ @Override
+ protected List<FSTUtil.Path<PairOutputs.Pair<Long,BytesRef>>> getFullPrefixPaths(List<FSTUtil.Path<PairOutputs.Pair<Long,BytesRef>>> prefixPaths,
+ Automaton lookupAutomaton,
+ FST<PairOutputs.Pair<Long,BytesRef>> fst)
+ throws IOException {
+
+ // TODO: right now there's no penalty for fuzzy/edits,
+ // ie a completion whose prefix matched exactly what the
+ // user typed gets no boost over completions that
+ // required an edit, which get no boost over completions
+ // requiring two edits. I suspect a multiplicative
+ // factor is appropriate (eg, say a fuzzy match must be at
+ // least 2X better weight than the non-fuzzy match to
+ // "compete") ... in which case I think the wFST needs
+ // to be log weights or something ...
+
+ Automaton levA = convertAutomaton(toLevenshteinAutomata(lookupAutomaton));
+ /*
+ Writer w = new OutputStreamWriter(new FileOutputStream("out.dot"), "UTF-8");
+ w.write(levA.toDot());
+ w.close();
+ System.out.println("Wrote LevA to out.dot");
+ */
+ return FSTUtil.intersectPrefixPaths(levA, fst);
+ }
+
+ @Override
+ protected Automaton convertAutomaton(Automaton a) {
+ if (unicodeAware) {
+ Automaton utf8automaton = new UTF32ToUTF8().convert(a);
+ BasicOperations.determinize(utf8automaton);
+ return utf8automaton;
+ } else {
+ return a;
+ }
+ }
+
+ @Override
+ public TokenStreamToAutomaton getTokenStreamToAutomaton() {
+ final TokenStreamToAutomaton tsta = super.getTokenStreamToAutomaton();
+ tsta.setUnicodeArcs(unicodeAware);
+ return tsta;
+ }
+
+ Automaton toLevenshteinAutomata(Automaton automaton) {
+ final Set<IntsRef> ref = SpecialOperations.getFiniteStrings(automaton, -1);
+ Automaton subs[] = new Automaton[ref.size()];
+ int upto = 0;
+ for (IntsRef path : ref) {
+ if (path.length <= nonFuzzyPrefix || path.length < minFuzzyLength) {
+ subs[upto] = BasicAutomata.makeString(path.ints, path.offset, path.length);
+ upto++;
+ } else {
+ Automaton prefix = BasicAutomata.makeString(path.ints, path.offset, nonFuzzyPrefix);
+ int ints[] = new int[path.length-nonFuzzyPrefix];
+ System.arraycopy(path.ints, path.offset+nonFuzzyPrefix, ints, 0, ints.length);
+ // TODO: maybe add alphaMin to LevenshteinAutomata,
+ // and pass 1 instead of 0? We probably don't want
+ // to allow the trailing dedup bytes to be
+ // edited... but then 0 byte is "in general" allowed
+ // on input (but not in UTF8).
+ LevenshteinAutomata lev = new LevenshteinAutomata(ints, unicodeAware ? Character.MAX_CODE_POINT : 255, transpositions);
+ Automaton levAutomaton = lev.toAutomaton(maxEdits);
+ Automaton combined = BasicOperations.concatenate(Arrays.asList(prefix, levAutomaton));
+ combined.setDeterministic(true); // its like the special case in concatenate itself, except we cloneExpanded already
+ subs[upto] = combined;
+ upto++;
+ }
+ }
+
+ if (subs.length == 0) {
+ // automaton is empty, there is no accepted paths through it
+ return BasicAutomata.makeEmpty(); // matches nothing
+ } else if (subs.length == 1) {
+ // no synonyms or anything: just a single path through the tokenstream
+ return subs[0];
+ } else {
+ // multiple paths: this is really scary! is it slow?
+ // maybe we should not do this and throw UOE?
+ Automaton a = BasicOperations.union(Arrays.asList(subs));
+ // TODO: we could call toLevenshteinAutomata() before det?
+ // this only happens if you have multiple paths anyway (e.g. synonyms)
+ BasicOperations.determinize(a);
+
+ return a;
+ }
+ }
+}
diff --git a/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java
new file mode 100644
index 0000000..8512e5c
--- /dev/null
+++ b/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.search.vectorhighlight;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.FilterClause;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
+import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ *
+ */
+// LUCENE MONITOR
+public class CustomFieldQuery extends FieldQuery {
+
+ private static Field multiTermQueryWrapperFilterQueryField;
+
+ static {
+ try {
+ multiTermQueryWrapperFilterQueryField = MultiTermQueryWrapperFilter.class.getDeclaredField("query");
+ multiTermQueryWrapperFilterQueryField.setAccessible(true);
+ } catch (NoSuchFieldException e) {
+ // ignore
+ }
+ }
+
+ public static final ThreadLocal<Boolean> highlightFilters = new ThreadLocal<Boolean>();
+
+ public CustomFieldQuery(Query query, IndexReader reader, FastVectorHighlighter highlighter) throws IOException {
+ this(query, reader, highlighter.isPhraseHighlight(), highlighter.isFieldMatch());
+ }
+
+ public CustomFieldQuery(Query query, IndexReader reader, boolean phraseHighlight, boolean fieldMatch) throws IOException {
+ super(query, reader, phraseHighlight, fieldMatch);
+ highlightFilters.remove();
+ }
+
+ @Override
+ void flatten(Query sourceQuery, IndexReader reader, Collection<Query> flatQueries) throws IOException {
+ if (sourceQuery instanceof SpanTermQuery) {
+ super.flatten(new TermQuery(((SpanTermQuery) sourceQuery).getTerm()), reader, flatQueries);
+ } else if (sourceQuery instanceof ConstantScoreQuery) {
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) sourceQuery;
+ if (constantScoreQuery.getFilter() != null) {
+ flatten(constantScoreQuery.getFilter(), reader, flatQueries);
+ } else {
+ flatten(constantScoreQuery.getQuery(), reader, flatQueries);
+ }
+ } else if (sourceQuery instanceof FunctionScoreQuery) {
+ flatten(((FunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries);
+ } else if (sourceQuery instanceof FilteredQuery) {
+ flatten(((FilteredQuery) sourceQuery).getQuery(), reader, flatQueries);
+ flatten(((FilteredQuery) sourceQuery).getFilter(), reader, flatQueries);
+ } else if (sourceQuery instanceof XFilteredQuery) {
+ flatten(((XFilteredQuery) sourceQuery).getQuery(), reader, flatQueries);
+ flatten(((XFilteredQuery) sourceQuery).getFilter(), reader, flatQueries);
+ } else if (sourceQuery instanceof MultiPhrasePrefixQuery) {
+ flatten(sourceQuery.rewrite(reader), reader, flatQueries);
+ } else if (sourceQuery instanceof FiltersFunctionScoreQuery) {
+ flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries);
+ } else if (sourceQuery instanceof MultiPhraseQuery) {
+ MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery);
+ convertMultiPhraseQuery(0, new int[q.getTermArrays().size()] , q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
+ } else {
+ super.flatten(sourceQuery, reader, flatQueries);
+ }
+ }
+
+ private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, List<Term[]> terms, int[] pos, IndexReader reader, Collection<Query> flatQueries) throws IOException {
+ if (currentPos == 0) {
+ // if we have more than 16 terms
+ int numTerms = 0;
+ for (Term[] currentPosTerm : terms) {
+ numTerms += currentPosTerm.length;
+ }
+ if (numTerms > 16) {
+ for (Term[] currentPosTerm : terms) {
+ for (Term term : currentPosTerm) {
+ super.flatten(new TermQuery(term), reader, flatQueries);
+ }
+ }
+ return;
+ }
+ }
+ /*
+ * we walk all possible ways and for each path down the MPQ we create a PhraseQuery this is what FieldQuery supports.
+ * It seems expensive but most queries will pretty small.
+ */
+ if (currentPos == terms.size()) {
+ PhraseQuery query = new PhraseQuery();
+ query.setBoost(orig.getBoost());
+ query.setSlop(orig.getSlop());
+ for (int i = 0; i < termsIdx.length; i++) {
+ query.add(terms.get(i)[termsIdx[i]], pos[i]);
+ }
+ this.flatten(query, reader, flatQueries);
+ } else {
+ Term[] t = terms.get(currentPos);
+ for (int i = 0; i < t.length; i++) {
+ termsIdx[currentPos] = i;
+ convertMultiPhraseQuery(currentPos+1, termsIdx, orig, terms, pos, reader, flatQueries);
+ }
+ }
+ }
+
+ void flatten(Filter sourceFilter, IndexReader reader, Collection<Query> flatQueries) throws IOException {
+ Boolean highlight = highlightFilters.get();
+ if (highlight == null || highlight.equals(Boolean.FALSE)) {
+ return;
+ }
+ if (sourceFilter instanceof TermFilter) {
+ flatten(new TermQuery(((TermFilter) sourceFilter).getTerm()), reader, flatQueries);
+ } else if (sourceFilter instanceof MultiTermQueryWrapperFilter) {
+ if (multiTermQueryWrapperFilterQueryField != null) {
+ try {
+ flatten((Query) multiTermQueryWrapperFilterQueryField.get(sourceFilter), reader, flatQueries);
+ } catch (IllegalAccessException e) {
+ // ignore
+ }
+ }
+ } else if (sourceFilter instanceof XBooleanFilter) {
+ XBooleanFilter booleanFilter = (XBooleanFilter) sourceFilter;
+ for (FilterClause clause : booleanFilter.clauses()) {
+ if (clause.getOccur() == BooleanClause.Occur.MUST || clause.getOccur() == BooleanClause.Occur.SHOULD) {
+ flatten(clause.getFilter(), reader, flatQueries);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/BufferedChecksumIndexOutput.java b/src/main/java/org/apache/lucene/store/BufferedChecksumIndexOutput.java
new file mode 100644
index 0000000..0c64a93
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/BufferedChecksumIndexOutput.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.store;
+
+import java.io.IOException;
+import java.util.zip.Checksum;
+
+/**
+ */
+public class BufferedChecksumIndexOutput extends BufferedIndexOutput {
+
+ private final IndexOutput delegate;
+ private final BufferedIndexOutput bufferedDelegate;
+ private final Checksum digest;
+
+ public BufferedChecksumIndexOutput(IndexOutput delegate, Checksum digest) {
+ super(delegate instanceof BufferedIndexOutput ? ((BufferedIndexOutput) delegate).getBufferSize() : BufferedIndexOutput.DEFAULT_BUFFER_SIZE);
+ if (delegate instanceof BufferedIndexOutput) {
+ bufferedDelegate = (BufferedIndexOutput) delegate;
+ this.delegate = delegate;
+ } else {
+ this.delegate = delegate;
+ bufferedDelegate = null;
+ }
+ this.digest = digest;
+ }
+
+ public Checksum digest() {
+ return digest;
+ }
+
+ public IndexOutput underlying() {
+ return this.delegate;
+ }
+
+ // don't override it, base class method simple reads from input and writes to this output
+// @Override public void copyBytes(IndexInput input, long numBytes) throws IOException {
+// delegate.copyBytes(input, numBytes);
+// }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ super.close();
+ } finally {
+ delegate.close();
+ }
+
+ }
+
+ @Override
+ protected void flushBuffer(byte[] b, int offset, int len) throws IOException {
+ if (bufferedDelegate != null) {
+ bufferedDelegate.flushBuffer(b, offset, len);
+ } else {
+ delegate.writeBytes(b, offset, len);
+ }
+ digest.update(b, offset, len);
+ }
+
+ // don't override it, base class method simple reads from input and writes to this output
+// @Override public void copyBytes(IndexInput input, long numBytes) throws IOException {
+// delegate.copyBytes(input, numBytes);
+// }
+
+ @Override
+ public void flush() throws IOException {
+ try {
+ super.flush();
+ } finally {
+ delegate.flush();
+ }
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ // seek might be called on files, which means that the checksum is not file checksum
+ // but a checksum of the bytes written to this stream, which is the same for each
+ // type of file in lucene
+ super.seek(pos);
+ delegate.seek(pos);
+ }
+
+ @Override
+ public long length() throws IOException {
+ return delegate.length();
+ }
+
+ @Override
+ public void setLength(long length) throws IOException {
+ delegate.setLength(length);
+ }
+
+ @Override
+ public String toString() {
+ return delegate.toString();
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/RateLimitedFSDirectory.java b/src/main/java/org/apache/lucene/store/RateLimitedFSDirectory.java
new file mode 100644
index 0000000..814a75f
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/RateLimitedFSDirectory.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.lucene.store;
+
+import org.apache.lucene.store.IOContext.Context;
+
+import java.io.IOException;
+
+public final class RateLimitedFSDirectory extends FilterDirectory{
+
+ private final StoreRateLimiting.Provider rateLimitingProvider;
+
+ private final StoreRateLimiting.Listener rateListener;
+
+ public RateLimitedFSDirectory(FSDirectory wrapped, StoreRateLimiting.Provider rateLimitingProvider,
+ StoreRateLimiting.Listener rateListener) {
+ super(wrapped);
+ this.rateLimitingProvider = rateLimitingProvider;
+ this.rateListener = rateListener;
+ }
+
+ @Override
+ public IndexOutput createOutput(String name, IOContext context) throws IOException {
+ final IndexOutput output = in.createOutput(name, context);
+
+ StoreRateLimiting rateLimiting = rateLimitingProvider.rateLimiting();
+ StoreRateLimiting.Type type = rateLimiting.getType();
+ RateLimiter limiter = rateLimiting.getRateLimiter();
+ if (type == StoreRateLimiting.Type.NONE || limiter == null) {
+ return output;
+ }
+ if (context.context == Context.MERGE) {
+ // we are mering, and type is either MERGE or ALL, rate limit...
+ return new RateLimitedIndexOutput(limiter, rateListener, output);
+ }
+ if (type == StoreRateLimiting.Type.ALL) {
+ return new RateLimitedIndexOutput(limiter, rateListener, output);
+ }
+ // we shouldn't really get here...
+ return output;
+ }
+
+
+ @Override
+ public void close() throws IOException {
+ in.close();
+ }
+
+ @Override
+ public String toString() {
+ StoreRateLimiting rateLimiting = rateLimitingProvider.rateLimiting();
+ StoreRateLimiting.Type type = rateLimiting.getType();
+ RateLimiter limiter = rateLimiting.getRateLimiter();
+ if (type == StoreRateLimiting.Type.NONE || limiter == null) {
+ return StoreUtils.toString(in);
+ } else {
+ return "rate_limited(" + StoreUtils.toString(in) + ", type=" + type.name() + ", rate=" + limiter.getMbPerSec() + ")";
+ }
+ }
+
+
+ static final class RateLimitedIndexOutput extends BufferedIndexOutput {
+
+ private final IndexOutput delegate;
+ private final BufferedIndexOutput bufferedDelegate;
+ private final RateLimiter rateLimiter;
+ private final StoreRateLimiting.Listener rateListener;
+
+ RateLimitedIndexOutput(final RateLimiter rateLimiter, final StoreRateLimiting.Listener rateListener, final IndexOutput delegate) {
+ super(delegate instanceof BufferedIndexOutput ? ((BufferedIndexOutput) delegate).getBufferSize() : BufferedIndexOutput.DEFAULT_BUFFER_SIZE);
+ if (delegate instanceof BufferedIndexOutput) {
+ bufferedDelegate = (BufferedIndexOutput) delegate;
+ this.delegate = delegate;
+ } else {
+ this.delegate = delegate;
+ bufferedDelegate = null;
+ }
+ this.rateLimiter = rateLimiter;
+ this.rateListener = rateListener;
+ }
+
+ @Override
+ protected void flushBuffer(byte[] b, int offset, int len) throws IOException {
+ rateListener.onPause(rateLimiter.pause(len));
+ if (bufferedDelegate != null) {
+ bufferedDelegate.flushBuffer(b, offset, len);
+ } else {
+ delegate.writeBytes(b, offset, len);
+ }
+
+ }
+
+ @Override
+ public long length() throws IOException {
+ return delegate.length();
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ flush();
+ delegate.seek(pos);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ try {
+ super.flush();
+ } finally {
+ delegate.flush();
+ }
+ }
+
+ @Override
+ public void setLength(long length) throws IOException {
+ delegate.setLength(length);
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ super.close();
+ } finally {
+ delegate.close();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/StoreRateLimiting.java b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java
new file mode 100644
index 0000000..8b745ef
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.lucene.store;
+
+import org.apache.lucene.store.RateLimiter.SimpleRateLimiter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+/**
+ */
+public class StoreRateLimiting {
+
+ public static interface Provider {
+
+ StoreRateLimiting rateLimiting();
+ }
+
+ public interface Listener {
+
+ void onPause(long nanos);
+ }
+
+ public static enum Type {
+ NONE,
+ MERGE,
+ ALL;
+
+ public static Type fromString(String type) throws ElasticsearchIllegalArgumentException {
+ if ("none".equalsIgnoreCase(type)) {
+ return NONE;
+ } else if ("merge".equalsIgnoreCase(type)) {
+ return MERGE;
+ } else if ("all".equalsIgnoreCase(type)) {
+ return ALL;
+ }
+ throw new ElasticsearchIllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]");
+ }
+ }
+
+ private final SimpleRateLimiter rateLimiter = new SimpleRateLimiter(0);
+ private volatile SimpleRateLimiter actualRateLimiter;
+
+ private volatile Type type;
+
+ public StoreRateLimiting() {
+
+ }
+
+ @Nullable
+ public RateLimiter getRateLimiter() {
+ return actualRateLimiter;
+ }
+
+ public void setMaxRate(ByteSizeValue rate) {
+ if (rate.bytes() <= 0) {
+ actualRateLimiter = null;
+ } else if (actualRateLimiter == null) {
+ actualRateLimiter = rateLimiter;
+ actualRateLimiter.setMbPerSec(rate.mbFrac());
+ } else {
+ assert rateLimiter == actualRateLimiter;
+ rateLimiter.setMbPerSec(rate.mbFrac());
+ }
+ }
+
+ public Type getType() {
+ return type;
+ }
+
+ public void setType(Type type) {
+ this.type = type;
+ }
+
+ public void setType(String type) throws ElasticsearchIllegalArgumentException {
+ this.type = Type.fromString(type);
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/StoreUtils.java b/src/main/java/org/apache/lucene/store/StoreUtils.java
new file mode 100644
index 0000000..5364970
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/StoreUtils.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.lucene.store;
+
+/**
+ */
+public final class StoreUtils {
+
+ private StoreUtils() {
+
+ }
+
+ public static String toString(Directory directory) {
+ if (directory instanceof NIOFSDirectory) {
+ NIOFSDirectory niofsDirectory = (NIOFSDirectory)directory;
+ return "niofs(" + niofsDirectory.getDirectory() + ")";
+ }
+ if (directory instanceof MMapDirectory) {
+ MMapDirectory mMapDirectory = (MMapDirectory)directory;
+ return "mmapfs(" + mMapDirectory.getDirectory() + ")";
+ }
+ if (directory instanceof SimpleFSDirectory) {
+ SimpleFSDirectory simpleFSDirectory = (SimpleFSDirectory)directory;
+ return "simplefs(" + simpleFSDirectory.getDirectory() + ")";
+ }
+ return directory.toString();
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferAllocator.java b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferAllocator.java
new file mode 100644
index 0000000..27b4a1a
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferAllocator.java
@@ -0,0 +1,97 @@
+package org.apache.lucene.store.bytebuffer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+
+/**
+ * A byte buffer allocator simple allocates byte buffers, and handles releasing
+ * them. Implementation can include special direct buffer cleaning when releasing
+ * a buffer, as well as caching of byte buffers.
+ * <p/>
+ * <p>There are two types of buffers that can be allocated, small and big. This
+ * comes in handy when knowing in advance (more or less) the size of the buffers
+ * needed (large files or small), as well as in caching implementations.
+ */
+public interface ByteBufferAllocator {
+
+ /**
+ * Helper class to allocator implementations allowing to clean direct buffers.
+ */
+ public static class Cleaner {
+ public static final boolean CLEAN_SUPPORTED;
+ private static final Method directBufferCleaner;
+ private static final Method directBufferCleanerClean;
+
+ static {
+ Method directBufferCleanerX = null;
+ Method directBufferCleanerCleanX = null;
+ boolean v;
+ try {
+ directBufferCleanerX = Class.forName("java.nio.DirectByteBuffer").getMethod("cleaner");
+ directBufferCleanerX.setAccessible(true);
+ directBufferCleanerCleanX = Class.forName("sun.misc.Cleaner").getMethod("clean");
+ directBufferCleanerCleanX.setAccessible(true);
+ v = true;
+ } catch (Exception e) {
+ v = false;
+ }
+ CLEAN_SUPPORTED = v;
+ directBufferCleaner = directBufferCleanerX;
+ directBufferCleanerClean = directBufferCleanerCleanX;
+ }
+
+ public static void clean(ByteBuffer buffer) {
+ if (CLEAN_SUPPORTED && buffer.isDirect()) {
+ try {
+ Object cleaner = directBufferCleaner.invoke(buffer);
+ directBufferCleanerClean.invoke(cleaner);
+ } catch (Exception e) {
+ // silently ignore exception
+ }
+ }
+ }
+ }
+
+ public static enum Type {
+ SMALL,
+ LARGE
+ }
+
+ /**
+ * The size (in bytes) that is allocated for the provided type.
+ */
+ int sizeInBytes(Type type);
+
+ /**
+ * Allocate a byte buffer for the specific type.
+ */
+ ByteBuffer allocate(Type type) throws IOException;
+
+ /**
+ * Release the buffer.
+ */
+ void release(ByteBuffer buffer);
+
+ /**
+ * Close the allocator, releasing any cached buffers for example.
+ */
+ void close();
+}
diff --git a/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferDirectory.java b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferDirectory.java
new file mode 100644
index 0000000..9403085
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferDirectory.java
@@ -0,0 +1,182 @@
+package org.apache.lucene.store.bytebuffer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.store.*;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * A memory based directory that uses {@link java.nio.ByteBuffer} in order to store the directory content.
+ * <p/>
+ * <p>The benefit of using {@link java.nio.ByteBuffer} is the fact that it can be stored in "native" memory
+ * outside of the JVM heap, thus not incurring the GC overhead of large in memory index.
+ * <p/>
+ * <p>Each "file" is segmented into one or more byte buffers.
+ * <p/>
+ * <p>If constructed with {@link ByteBufferAllocator}, it allows to control the allocation and release of
+ * byte buffer. For example, custom implementations can include caching of byte buffers.
+ */
+public class ByteBufferDirectory extends BaseDirectory {
+
+ protected final Map<String, ByteBufferFile> files = new ConcurrentHashMap<String, ByteBufferFile>();
+
+ private final ByteBufferAllocator allocator;
+
+ private final boolean internalAllocator;
+
+ final AtomicLong sizeInBytes = new AtomicLong();
+
+
+ /**
+ * Constructs a new directory using {@link PlainByteBufferAllocator}.
+ */
+ public ByteBufferDirectory() {
+ this.allocator = new PlainByteBufferAllocator(false, 1024, 1024 * 10);
+ this.internalAllocator = true;
+ try {
+ setLockFactory(new SingleInstanceLockFactory());
+ } catch (IOException e) {
+ // will not happen
+ }
+ }
+
+ /**
+ * Constructs a new byte buffer directory with a custom allocator.
+ */
+ public ByteBufferDirectory(ByteBufferAllocator allocator) {
+ this.allocator = allocator;
+ this.internalAllocator = false;
+ try {
+ setLockFactory(new SingleInstanceLockFactory());
+ } catch (IOException e) {
+ // will not happen
+ }
+ }
+
+ /**
+ * Returns the size in bytes of the directory, chunk by buffer size.
+ */
+ public long sizeInBytes() {
+ return sizeInBytes.get();
+ }
+
+ public void sync(Collection<String> names) throws IOException {
+ // nothing to do here
+ }
+
+ @Override
+ public String[] listAll() throws IOException {
+ return files.keySet().toArray(new String[0]);
+ }
+
+ @Override
+ public boolean fileExists(String name) throws IOException {
+ return files.containsKey(name);
+ }
+
+ @Override
+ public void deleteFile(String name) throws IOException {
+ ByteBufferFile file = files.remove(name);
+ if (file == null)
+ throw new FileNotFoundException(name);
+ sizeInBytes.addAndGet(-file.sizeInBytes());
+ file.delete();
+ }
+
+ @Override
+ public long fileLength(String name) throws IOException {
+ ByteBufferFile file = files.get(name);
+ if (file == null)
+ throw new FileNotFoundException(name);
+ return file.getLength();
+ }
+
+ private final static ImmutableSet<String> SMALL_FILES_SUFFIXES = ImmutableSet.of(
+ "del", // 1 bit per doc
+ "cfe", // compound file metadata
+ "si", // segment info
+ "fnm" // field info (metadata like omit norms etc)
+ );
+
+ private static boolean isSmallFile(String fileName) {
+ if (fileName.startsWith("segments")) {
+ return true;
+ }
+ if (fileName.lastIndexOf('.') > 0) {
+ String suffix = fileName.substring(fileName.lastIndexOf('.') + 1);
+ return SMALL_FILES_SUFFIXES.contains(suffix);
+ }
+ return false;
+ }
+
+ @Override
+ public IndexOutput createOutput(String name, IOContext context) throws IOException {
+ ByteBufferAllocator.Type allocatorType = ByteBufferAllocator.Type.LARGE;
+ if (isSmallFile(name)) {
+ allocatorType = ByteBufferAllocator.Type.SMALL;
+ }
+ ByteBufferFileOutput file = new ByteBufferFileOutput(this, allocator.sizeInBytes(allocatorType));
+ ByteBufferFile existing = files.put(name, file);
+ if (existing != null) {
+ sizeInBytes.addAndGet(-existing.sizeInBytes());
+ existing.delete();
+ }
+ return new ByteBufferIndexOutput(this, name, allocator, allocatorType, file);
+ }
+
+ void closeOutput(String name, ByteBufferFileOutput file) {
+ // we replace the output file with a read only file, with no sync
+ files.put(name, new ByteBufferFile(file));
+ }
+
+ @Override
+ public IndexInput openInput(String name, IOContext context) throws IOException {
+ ByteBufferFile file = files.get(name);
+ if (file == null)
+ throw new FileNotFoundException(name);
+ return new ByteBufferIndexInput(name, file);
+ }
+
+ @Override
+ public void close() throws IOException {
+ String[] files = listAll();
+ for (String file : files) {
+ deleteFile(file);
+ }
+ if (internalAllocator) {
+ allocator.close();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "byte_buffer";
+ }
+
+ void releaseBuffer(ByteBuffer byteBuffer) {
+ allocator.release(byteBuffer);
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferFile.java b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferFile.java
new file mode 100644
index 0000000..b726d77
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferFile.java
@@ -0,0 +1,102 @@
+package org.apache.lucene.store.bytebuffer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ */
+public class ByteBufferFile {
+
+ final ByteBufferDirectory dir;
+
+ final int bufferSize;
+
+ final List<ByteBuffer> buffers;
+
+ long length;
+
+ volatile long lastModified = System.currentTimeMillis();
+
+ final AtomicInteger refCount;
+
+ long sizeInBytes;
+
+ public ByteBufferFile(ByteBufferDirectory dir, int bufferSize) {
+ this.dir = dir;
+ this.bufferSize = bufferSize;
+ this.buffers = new ArrayList<ByteBuffer>();
+ this.refCount = new AtomicInteger(1);
+ }
+
+ ByteBufferFile(ByteBufferFile file) {
+ this.dir = file.dir;
+ this.bufferSize = file.bufferSize;
+ this.buffers = file.buffers;
+ this.length = file.length;
+ this.lastModified = file.lastModified;
+ this.refCount = file.refCount;
+ this.sizeInBytes = file.sizeInBytes;
+ }
+
+ public long getLength() {
+ return length;
+ }
+
+ public long getLastModified() {
+ return lastModified;
+ }
+
+ void setLastModified(long lastModified) {
+ this.lastModified = lastModified;
+ }
+
+ long sizeInBytes() {
+ return sizeInBytes;
+ }
+
+ ByteBuffer getBuffer(int index) {
+ return buffers.get(index);
+ }
+
+ int numBuffers() {
+ return buffers.size();
+ }
+
+ void delete() {
+ decRef();
+ }
+
+ void incRef() {
+ refCount.incrementAndGet();
+ }
+
+ void decRef() {
+ if (refCount.decrementAndGet() == 0) {
+ length = 0;
+ for (ByteBuffer buffer : buffers) {
+ dir.releaseBuffer(buffer);
+ }
+ buffers.clear();
+ sizeInBytes = 0;
+ }
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferFileOutput.java b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferFileOutput.java
new file mode 100644
index 0000000..7de55bf
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferFileOutput.java
@@ -0,0 +1,65 @@
+package org.apache.lucene.store.bytebuffer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.nio.ByteBuffer;
+
+/**
+ */
+public class ByteBufferFileOutput extends ByteBufferFile {
+
+ public ByteBufferFileOutput(ByteBufferDirectory dir, int bufferSize) {
+ super(dir, bufferSize);
+ }
+
+ @Override
+ public synchronized long getLength() {
+ return super.getLength();
+ }
+
+ @Override
+ public synchronized long getLastModified() {
+ return super.getLastModified();
+ }
+
+ synchronized void setLength(long length) {
+ this.length = length;
+ }
+
+ synchronized final void addBuffer(ByteBuffer buffer) {
+ buffers.add(buffer);
+ sizeInBytes += buffer.remaining();
+ dir.sizeInBytes.addAndGet(buffer.remaining());
+ }
+
+ @Override
+ synchronized ByteBuffer getBuffer(int index) {
+ return super.getBuffer(index);
+ }
+
+ @Override
+ synchronized int numBuffers() {
+ return super.numBuffers();
+ }
+
+ @Override
+ synchronized long sizeInBytes() {
+ return super.sizeInBytes();
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferIndexInput.java b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferIndexInput.java
new file mode 100644
index 0000000..aeba553
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferIndexInput.java
@@ -0,0 +1,198 @@
+package org.apache.lucene.store.bytebuffer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.IndexInput;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.BufferUnderflowException;
+import java.nio.ByteBuffer;
+
+/**
+ */
+public class ByteBufferIndexInput extends IndexInput {
+
+ private final static ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0).asReadOnlyBuffer();
+
+ private final ByteBufferFile file;
+ private final long length;
+
+ private ByteBuffer currentBuffer;
+ private int currentBufferIndex;
+
+ private long bufferStart;
+ private final int BUFFER_SIZE;
+
+ private volatile boolean closed = false;
+
+ public ByteBufferIndexInput(String name, ByteBufferFile file) throws IOException {
+ super("BBIndexInput(name=" + name + ")");
+ this.file = file;
+ this.file.incRef();
+ this.length = file.getLength();
+ this.BUFFER_SIZE = file.bufferSize;
+
+ // make sure that we switch to the
+ // first needed buffer lazily
+ currentBufferIndex = -1;
+ currentBuffer = EMPTY_BUFFER;
+ }
+
+ @Override
+ public void close() {
+ // we protected from double closing the index input since
+ // some tests do that...
+ if (closed) {
+ return;
+ }
+ closed = true;
+ file.decRef();
+ }
+
+ @Override
+ public long length() {
+ return length;
+ }
+
+ @Override
+ public short readShort() throws IOException {
+ try {
+ currentBuffer.mark();
+ return currentBuffer.getShort();
+ } catch (BufferUnderflowException e) {
+ currentBuffer.reset();
+ return super.readShort();
+ }
+ }
+
+ @Override
+ public int readInt() throws IOException {
+ try {
+ currentBuffer.mark();
+ return currentBuffer.getInt();
+ } catch (BufferUnderflowException e) {
+ currentBuffer.reset();
+ return super.readInt();
+ }
+ }
+
+ @Override
+ public long readLong() throws IOException {
+ try {
+ currentBuffer.mark();
+ return currentBuffer.getLong();
+ } catch (BufferUnderflowException e) {
+ currentBuffer.reset();
+ return super.readLong();
+ }
+ }
+
+ @Override
+ public byte readByte() throws IOException {
+ if (!currentBuffer.hasRemaining()) {
+ currentBufferIndex++;
+ switchCurrentBuffer(true);
+ }
+ return currentBuffer.get();
+ }
+
+ @Override
+ public void readBytes(byte[] b, int offset, int len) throws IOException {
+ while (len > 0) {
+ if (!currentBuffer.hasRemaining()) {
+ currentBufferIndex++;
+ switchCurrentBuffer(true);
+ }
+
+ int remainInBuffer = currentBuffer.remaining();
+ int bytesToCopy = len < remainInBuffer ? len : remainInBuffer;
+ currentBuffer.get(b, offset, bytesToCopy);
+ offset += bytesToCopy;
+ len -= bytesToCopy;
+ }
+ }
+
+ @Override
+ public long getFilePointer() {
+ return currentBufferIndex < 0 ? 0 : bufferStart + currentBuffer.position();
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ if (currentBuffer == EMPTY_BUFFER || pos < bufferStart || pos >= bufferStart + BUFFER_SIZE) {
+ currentBufferIndex = (int) (pos / BUFFER_SIZE);
+ switchCurrentBuffer(false);
+ }
+ try {
+ currentBuffer.position((int) (pos % BUFFER_SIZE));
+ // Grrr, need to wrap in IllegalArgumentException since tests (if not other places)
+ // expect an IOException...
+ } catch (IllegalArgumentException e) {
+ IOException ioException = new IOException("seeking past position");
+ ioException.initCause(e);
+ throw ioException;
+ }
+ }
+
+ private void switchCurrentBuffer(boolean enforceEOF) throws IOException {
+ if (currentBufferIndex >= file.numBuffers()) {
+ // end of file reached, no more buffers left
+ if (enforceEOF) {
+ throw new EOFException("Read past EOF (resource: " + this + ")");
+ } else {
+ // Force EOF if a read takes place at this position
+ currentBufferIndex--;
+ currentBuffer.position(currentBuffer.limit());
+ }
+ } else {
+ ByteBuffer buffer = file.getBuffer(currentBufferIndex);
+ // we must duplicate (and make it read only while we are at it) since we need position and such to be independent
+ currentBuffer = buffer.asReadOnlyBuffer();
+ currentBuffer.position(0);
+ bufferStart = (long) BUFFER_SIZE * (long) currentBufferIndex;
+ // if we are at the tip, limit the current buffer to only whats available to read
+ long buflen = length - bufferStart;
+ if (buflen < BUFFER_SIZE) {
+ currentBuffer.limit((int) buflen);
+ }
+
+ // we need to enforce EOF here as well...
+ if (!currentBuffer.hasRemaining()) {
+ if (enforceEOF) {
+ throw new EOFException("Read past EOF (resource: " + this + ")");
+ } else {
+ // Force EOF if a read takes place at this position
+ currentBufferIndex--;
+ currentBuffer.position(currentBuffer.limit());
+ }
+ }
+ }
+ }
+
+ @Override
+ public IndexInput clone() {
+ ByteBufferIndexInput cloned = (ByteBufferIndexInput) super.clone();
+ cloned.file.incRef(); // inc ref on cloned one
+ if (currentBuffer != EMPTY_BUFFER) {
+ cloned.currentBuffer = currentBuffer.asReadOnlyBuffer();
+ cloned.currentBuffer.position(currentBuffer.position());
+ }
+ return cloned;
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferIndexOutput.java b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferIndexOutput.java
new file mode 100644
index 0000000..a667bab
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/bytebuffer/ByteBufferIndexOutput.java
@@ -0,0 +1,132 @@
+package org.apache.lucene.store.bytebuffer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import org.apache.lucene.store.IndexOutput;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ */
+public class ByteBufferIndexOutput extends IndexOutput {
+
+ private final static ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0).asReadOnlyBuffer();
+
+ private final ByteBufferDirectory dir;
+ private final String name;
+ private final ByteBufferAllocator allocator;
+ private final ByteBufferAllocator.Type allocatorType;
+ private final int BUFFER_SIZE;
+ private final ByteBufferFileOutput file;
+
+ private ByteBuffer currentBuffer;
+ private int currentBufferIndex;
+
+ private long bufferStart;
+
+ public ByteBufferIndexOutput(ByteBufferDirectory dir, String name, ByteBufferAllocator allocator, ByteBufferAllocator.Type allocatorType, ByteBufferFileOutput file) throws IOException {
+ this.dir = dir;
+ this.name = name;
+ this.allocator = allocator;
+ this.allocatorType = allocatorType;
+ this.BUFFER_SIZE = file.bufferSize;
+ this.file = file;
+
+ currentBufferIndex = -1;
+ currentBuffer = EMPTY_BUFFER;
+ }
+
+ @Override
+ public void close() throws IOException {
+ flush();
+ dir.closeOutput(name, file);
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ // set the file length in case we seek back
+ // and flush() has not been called yet
+ setFileLength();
+ if (pos < bufferStart || pos >= bufferStart + BUFFER_SIZE) {
+ currentBufferIndex = (int) (pos / BUFFER_SIZE);
+ switchCurrentBuffer();
+ }
+ currentBuffer.position((int) (pos % BUFFER_SIZE));
+ }
+
+ @Override
+ public long length() {
+ return file.getLength();
+ }
+
+ @Override
+ public void writeByte(byte b) throws IOException {
+ if (!currentBuffer.hasRemaining()) {
+ currentBufferIndex++;
+ switchCurrentBuffer();
+ }
+ currentBuffer.put(b);
+ }
+
+ @Override
+ public void writeBytes(byte[] b, int offset, int len) throws IOException {
+ while (len > 0) {
+ if (!currentBuffer.hasRemaining()) {
+ currentBufferIndex++;
+ switchCurrentBuffer();
+ }
+
+ int remainInBuffer = currentBuffer.remaining();
+ int bytesToCopy = len < remainInBuffer ? len : remainInBuffer;
+ currentBuffer.put(b, offset, bytesToCopy);
+ offset += bytesToCopy;
+ len -= bytesToCopy;
+ }
+ }
+
+ private void switchCurrentBuffer() throws IOException {
+ if (currentBufferIndex == file.numBuffers()) {
+ currentBuffer = allocator.allocate(allocatorType);
+ file.addBuffer(currentBuffer);
+ } else {
+ currentBuffer = file.getBuffer(currentBufferIndex);
+ }
+ currentBuffer.position(0);
+ bufferStart = (long) BUFFER_SIZE * (long) currentBufferIndex;
+ }
+
+ private void setFileLength() {
+ long pointer = bufferStart + currentBuffer.position();
+ if (pointer > file.getLength()) {
+ file.setLength(pointer);
+ }
+ }
+
+ @Override
+ public void flush() throws IOException {
+ file.setLastModified(System.currentTimeMillis());
+ setFileLength();
+ }
+
+ @Override
+ public long getFilePointer() {
+ return currentBufferIndex < 0 ? 0 : bufferStart + currentBuffer.position();
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/bytebuffer/CachingByteBufferAllocator.java b/src/main/java/org/apache/lucene/store/bytebuffer/CachingByteBufferAllocator.java
new file mode 100644
index 0000000..925a76c
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/bytebuffer/CachingByteBufferAllocator.java
@@ -0,0 +1,82 @@
+package org.apache.lucene.store.bytebuffer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+/**
+ * The caching byte buffer allocator allows to define a global size for both the small and large buffers
+ * allocated. Those will be reused when possible.
+ */
+public class CachingByteBufferAllocator extends PlainByteBufferAllocator {
+
+ private final BlockingQueue<ByteBuffer> smallCache;
+ private final BlockingQueue<ByteBuffer> largeCache;
+
+ /**
+ * @param direct If set to true, will allocate direct buffers (off heap).
+ * @param smallBufferSizeInBytes The size (in bytes) of the small buffer allocation.
+ * @param largeBufferSizeInBytes The size (in bytes) of the large buffer allocation.
+ * @param smallCacheSizeInBytes The size of the small cache buffer in bytes.
+ * @param largeCacheSizeInBytes The size of the large cache buffer in bytes.
+ */
+ public CachingByteBufferAllocator(boolean direct, int smallBufferSizeInBytes, int largeBufferSizeInBytes,
+ int smallCacheSizeInBytes, int largeCacheSizeInBytes) {
+ super(direct, smallBufferSizeInBytes, largeBufferSizeInBytes);
+ this.smallCache = new LinkedBlockingQueue<ByteBuffer>(smallCacheSizeInBytes / smallBufferSizeInBytes);
+ this.largeCache = new LinkedBlockingQueue<ByteBuffer>(largeCacheSizeInBytes / largeBufferSizeInBytes);
+ }
+
+
+ public ByteBuffer allocate(Type type) throws IOException {
+ ByteBuffer buffer = type == Type.SMALL ? smallCache.poll() : largeCache.poll();
+ if (buffer == null) {
+ buffer = super.allocate(type);
+ }
+ return buffer;
+ }
+
+ public void release(ByteBuffer buffer) {
+ if (buffer.capacity() == smallBufferSizeInBytes) {
+ boolean success = smallCache.offer(buffer);
+ if (!success) {
+ super.release(buffer);
+ }
+ } else if (buffer.capacity() == largeBufferSizeInBytes) {
+ boolean success = largeCache.offer(buffer);
+ if (!success) {
+ super.release(buffer);
+ }
+ }
+ // otherwise, just ignore it? not our allocation...
+ }
+
+ public void close() {
+ for (ByteBuffer buffer : smallCache) {
+ super.release(buffer);
+ }
+ smallCache.clear();
+ for (ByteBuffer buffer : largeCache) {
+ super.release(buffer);
+ }
+ largeCache.clear();
+ }
+}
diff --git a/src/main/java/org/apache/lucene/store/bytebuffer/PlainByteBufferAllocator.java b/src/main/java/org/apache/lucene/store/bytebuffer/PlainByteBufferAllocator.java
new file mode 100644
index 0000000..e800b61
--- /dev/null
+++ b/src/main/java/org/apache/lucene/store/bytebuffer/PlainByteBufferAllocator.java
@@ -0,0 +1,67 @@
+package org.apache.lucene.store.bytebuffer;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * A simple byte buffer allocator that does not caching. The direct flag
+ * allows to control if the byte buffer will be allocated off heap or not.
+ */
+public class PlainByteBufferAllocator implements ByteBufferAllocator {
+
+ protected final boolean direct;
+
+ protected final int smallBufferSizeInBytes;
+
+ protected final int largeBufferSizeInBytes;
+
+ /**
+ * Constructs a new plain byte buffer allocator that does no caching.
+ *
+ * @param direct If set to true, will allocate direct buffers (off heap).
+ * @param smallBufferSizeInBytes The size (in bytes) of the small buffer allocation.
+ * @param largeBufferSizeInBytes The size (in bytes) of the large buffer allocation.
+ */
+ public PlainByteBufferAllocator(boolean direct, int smallBufferSizeInBytes, int largeBufferSizeInBytes) {
+ this.direct = direct;
+ this.smallBufferSizeInBytes = smallBufferSizeInBytes;
+ this.largeBufferSizeInBytes = largeBufferSizeInBytes;
+ }
+
+ public int sizeInBytes(Type type) {
+ return type == Type.SMALL ? smallBufferSizeInBytes : largeBufferSizeInBytes;
+ }
+
+ public ByteBuffer allocate(Type type) throws IOException {
+ int sizeToAllocate = type == Type.SMALL ? smallBufferSizeInBytes : largeBufferSizeInBytes;
+ if (direct) {
+ return ByteBuffer.allocateDirect(sizeToAllocate);
+ }
+ return ByteBuffer.allocate(sizeToAllocate);
+ }
+
+ public void release(ByteBuffer buffer) {
+ Cleaner.clean(buffer);
+ }
+
+ public void close() {
+ // nothing to do here...
+ }
+}
diff --git a/src/main/java/org/elasticsearch/Build.java b/src/main/java/org/elasticsearch/Build.java
new file mode 100644
index 0000000..d7e6b81
--- /dev/null
+++ b/src/main/java/org/elasticsearch/Build.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.common.io.FastStringReader;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+
+import java.io.IOException;
+import java.util.Properties;
+
+/**
+ */
+public class Build {
+
+ public static final Build CURRENT;
+
+ static {
+ String hash = "NA";
+ String hashShort = "NA";
+ String timestamp = "NA";
+
+ try {
+ String properties = Streams.copyToStringFromClasspath("/es-build.properties");
+ Properties props = new Properties();
+ props.load(new FastStringReader(properties));
+ hash = props.getProperty("hash", hash);
+ if (!hash.equals("NA")) {
+ hashShort = hash.substring(0, 7);
+ }
+ String gitTimestampRaw = props.getProperty("timestamp");
+ if (gitTimestampRaw != null) {
+ timestamp = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC).print(Long.parseLong(gitTimestampRaw));
+ }
+ } catch (Exception e) {
+ // just ignore...
+ }
+
+ CURRENT = new Build(hash, hashShort, timestamp);
+ }
+
+ private String hash;
+ private String hashShort;
+ private String timestamp;
+
+ Build(String hash, String hashShort, String timestamp) {
+ this.hash = hash;
+ this.hashShort = hashShort;
+ this.timestamp = timestamp;
+ }
+
+ public String hash() {
+ return hash;
+ }
+
+ public String hashShort() {
+ return hashShort;
+ }
+
+ public String timestamp() {
+ return timestamp;
+ }
+
+ public static Build readBuild(StreamInput in) throws IOException {
+ String hash = in.readString();
+ String hashShort = in.readString();
+ String timestamp = in.readString();
+ return new Build(hash, hashShort, timestamp);
+ }
+
+ public static void writeBuild(Build build, StreamOutput out) throws IOException {
+ out.writeString(build.hash());
+ out.writeString(build.hashShort());
+ out.writeString(build.timestamp());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/ElasticsearchException.java b/src/main/java/org/elasticsearch/ElasticsearchException.java
new file mode 100644
index 0000000..dc1ab99
--- /dev/null
+++ b/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ * A base class for all elasticsearch exceptions.
+ */
+public class ElasticsearchException extends RuntimeException {
+
+ /**
+ * Construct a <code>ElasticsearchException</code> with the specified detail message.
+ *
+ * @param msg the detail message
+ */
+ public ElasticsearchException(String msg) {
+ super(msg);
+ }
+
+ /**
+ * Construct a <code>ElasticsearchException</code> with the specified detail message
+ * and nested exception.
+ *
+ * @param msg the detail message
+ * @param cause the nested exception
+ */
+ public ElasticsearchException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+
+ /**
+ * Returns the rest status code associated with this exception.
+ */
+ public RestStatus status() {
+ Throwable cause = unwrapCause();
+ if (cause == this) {
+ return RestStatus.INTERNAL_SERVER_ERROR;
+ } else if (cause instanceof ElasticsearchException) {
+ return ((ElasticsearchException) cause).status();
+ } else if (cause instanceof IllegalArgumentException) {
+ return RestStatus.BAD_REQUEST;
+ } else {
+ return RestStatus.INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ /**
+ * Unwraps the actual cause from the exception for cases when the exception is a
+ * {@link ElasticsearchWrapperException}.
+ *
+ * @see org.elasticsearch.ExceptionsHelper#unwrapCause(Throwable)
+ */
+ public Throwable unwrapCause() {
+ return ExceptionsHelper.unwrapCause(this);
+ }
+
+ /**
+ * Return the detail message, including the message from the nested exception
+ * if there is one.
+ */
+ public String getDetailedMessage() {
+ if (getCause() != null) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(toString()).append("; ");
+ if (getCause() instanceof ElasticsearchException) {
+ sb.append(((ElasticsearchException) getCause()).getDetailedMessage());
+ } else {
+ sb.append(getCause());
+ }
+ return sb.toString();
+ } else {
+ return super.toString();
+ }
+ }
+
+
+ /**
+ * Retrieve the innermost cause of this exception, if none, returns the current exception.
+ */
+ public Throwable getRootCause() {
+ Throwable rootCause = this;
+ Throwable cause = getCause();
+ while (cause != null && cause != rootCause) {
+ rootCause = cause;
+ cause = cause.getCause();
+ }
+ return rootCause;
+ }
+
+ /**
+ * Retrieve the most specific cause of this exception, that is,
+ * either the innermost cause (root cause) or this exception itself.
+ * <p>Differs from {@link #getRootCause()} in that it falls back
+ * to the present exception if there is no root cause.
+ *
+ * @return the most specific cause (never <code>null</code>)
+ */
+ public Throwable getMostSpecificCause() {
+ Throwable rootCause = getRootCause();
+ return (rootCause != null ? rootCause : this);
+ }
+
+ /**
+ * Check whether this exception contains an exception of the given type:
+ * either it is of the given class itself or it contains a nested cause
+ * of the given type.
+ *
+ * @param exType the exception type to look for
+ * @return whether there is a nested exception of the specified type
+ */
+ public boolean contains(Class exType) {
+ if (exType == null) {
+ return false;
+ }
+ if (exType.isInstance(this)) {
+ return true;
+ }
+ Throwable cause = getCause();
+ if (cause == this) {
+ return false;
+ }
+ if (cause instanceof ElasticsearchException) {
+ return ((ElasticsearchException) cause).contains(exType);
+ } else {
+ while (cause != null) {
+ if (exType.isInstance(cause)) {
+ return true;
+ }
+ if (cause.getCause() == cause) {
+ break;
+ }
+ cause = cause.getCause();
+ }
+ return false;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/ElasticsearchGenerationException.java b/src/main/java/org/elasticsearch/ElasticsearchGenerationException.java
new file mode 100644
index 0000000..0325b12
--- /dev/null
+++ b/src/main/java/org/elasticsearch/ElasticsearchGenerationException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+/**
+ * A generic exception indicating failure to generate.
+ *
+ *
+ */
+public class ElasticsearchGenerationException extends ElasticsearchException {
+
+ public ElasticsearchGenerationException(String msg) {
+ super(msg);
+ }
+
+ public ElasticsearchGenerationException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java b/src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java
new file mode 100644
index 0000000..fc6d110
--- /dev/null
+++ b/src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class ElasticsearchIllegalArgumentException extends ElasticsearchException {
+
+ public ElasticsearchIllegalArgumentException() {
+ super(null);
+ }
+
+ public ElasticsearchIllegalArgumentException(String msg) {
+ super(msg);
+ }
+
+ public ElasticsearchIllegalArgumentException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java b/src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java
new file mode 100644
index 0000000..d837d69
--- /dev/null
+++ b/src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+/**
+ *
+ */
+public class ElasticsearchIllegalStateException extends ElasticsearchException {
+
+ public ElasticsearchIllegalStateException() {
+ super(null);
+ }
+
+ public ElasticsearchIllegalStateException(String msg) {
+ super(msg);
+ }
+
+ public ElasticsearchIllegalStateException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/ElasticsearchNullPointerException.java b/src/main/java/org/elasticsearch/ElasticsearchNullPointerException.java
new file mode 100644
index 0000000..721bf05
--- /dev/null
+++ b/src/main/java/org/elasticsearch/ElasticsearchNullPointerException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+/**
+ *
+ */
+public class ElasticsearchNullPointerException extends ElasticsearchException {
+
+ public ElasticsearchNullPointerException() {
+ super(null);
+ }
+
+ public ElasticsearchNullPointerException(String msg) {
+ super(msg);
+ }
+
+ public ElasticsearchNullPointerException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/ElasticsearchParseException.java b/src/main/java/org/elasticsearch/ElasticsearchParseException.java
new file mode 100644
index 0000000..cc6d907
--- /dev/null
+++ b/src/main/java/org/elasticsearch/ElasticsearchParseException.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class ElasticsearchParseException extends ElasticsearchException {
+
+ public ElasticsearchParseException(String msg) {
+ super(msg);
+ }
+
+ public ElasticsearchParseException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/ElasticsearchTimeoutException.java b/src/main/java/org/elasticsearch/ElasticsearchTimeoutException.java
new file mode 100644
index 0000000..a5e8189
--- /dev/null
+++ b/src/main/java/org/elasticsearch/ElasticsearchTimeoutException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+/**
+ * The same as {@link java.util.concurrent.TimeoutException} simply a runtime one.
+ *
+ *
+ */
+public class ElasticsearchTimeoutException extends ElasticsearchException {
+
+ public ElasticsearchTimeoutException(String message) {
+ super(message);
+ }
+
+ public ElasticsearchTimeoutException(String message, Throwable cause) {
+ super(message, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java b/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java
new file mode 100644
index 0000000..79e501b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+/**
+ *
+ */
+public interface ElasticsearchWrapperException {
+
+ Throwable getCause();
+}
diff --git a/src/main/java/org/elasticsearch/ExceptionsHelper.java b/src/main/java/org/elasticsearch/ExceptionsHelper.java
new file mode 100644
index 0000000..2f075a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/ExceptionsHelper.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public final class ExceptionsHelper {
+
+ private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
+
+ public static RuntimeException convertToRuntime(Throwable t) {
+ if (t instanceof RuntimeException) {
+ return (RuntimeException) t;
+ }
+ return new ElasticsearchException(t.getMessage(), t);
+ }
+
+ public static ElasticsearchException convertToElastic(Throwable t) {
+ if (t instanceof ElasticsearchException) {
+ return (ElasticsearchException) t;
+ }
+ return new ElasticsearchException(t.getMessage(), t);
+ }
+
+ public static RestStatus status(Throwable t) {
+ if (t instanceof ElasticsearchException) {
+ return ((ElasticsearchException) t).status();
+ }
+ return RestStatus.INTERNAL_SERVER_ERROR;
+ }
+
+ public static Throwable unwrapCause(Throwable t) {
+ int counter = 0;
+ Throwable result = t;
+ while (result instanceof ElasticsearchWrapperException) {
+ if (result.getCause() == null) {
+ return result;
+ }
+ if (result.getCause() == result) {
+ return result;
+ }
+ if (counter++ > 10) {
+ // dear god, if we got more than 10 levels down, WTF? just bail
+ logger.warn("Exception cause unwrapping ran for 10 levels...", t);
+ return result;
+ }
+ result = result.getCause();
+ }
+ return result;
+ }
+
+ public static String detailedMessage(Throwable t) {
+ return detailedMessage(t, false, 0);
+ }
+
+ public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) {
+ if (t == null) {
+ return "Unknown";
+ }
+ int counter = initialCounter + 1;
+ if (t.getCause() != null) {
+ StringBuilder sb = new StringBuilder();
+ while (t != null) {
+ sb.append(t.getClass().getSimpleName());
+ if (t.getMessage() != null) {
+ sb.append("[");
+ sb.append(t.getMessage());
+ sb.append("]");
+ }
+ if (!newLines) {
+ sb.append("; ");
+ }
+ t = t.getCause();
+ if (t != null) {
+ if (newLines) {
+ sb.append("\n");
+ for (int i = 0; i < counter; i++) {
+ sb.append("\t");
+ }
+ } else {
+ sb.append("nested: ");
+ }
+ }
+ counter++;
+ }
+ return sb.toString();
+ } else {
+ return t.getClass().getSimpleName() + "[" + t.getMessage() + "]";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/Version.java b/src/main/java/org/elasticsearch/Version.java
new file mode 100644
index 0000000..7c935d8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/Version.java
@@ -0,0 +1,427 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ */
+@SuppressWarnings("deprecation")
+public class Version implements Serializable {
+
+ // The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator
+ // AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
+ // the (internal) format of the id is there so we can easily do after/before checks on the id
+
+ public static final int V_0_18_0_ID = /*00*/180099;
+ public static final Version V_0_18_0 = new Version(V_0_18_0_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_18_1_ID = /*00*/180199;
+ public static final Version V_0_18_1 = new Version(V_0_18_1_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_18_2_ID = /*00*/180299;
+ public static final Version V_0_18_2 = new Version(V_0_18_2_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_18_3_ID = /*00*/180399;
+ public static final Version V_0_18_3 = new Version(V_0_18_3_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_18_4_ID = /*00*/180499;
+ public static final Version V_0_18_4 = new Version(V_0_18_4_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_18_5_ID = /*00*/180599;
+ public static final Version V_0_18_5 = new Version(V_0_18_5_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_18_6_ID = /*00*/180699;
+ public static final Version V_0_18_6 = new Version(V_0_18_6_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_18_7_ID = /*00*/180799;
+ public static final Version V_0_18_7 = new Version(V_0_18_7_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_18_8_ID = /*00*/180899;
+ public static final Version V_0_18_8 = new Version(V_0_18_8_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+
+ public static final int V_0_19_0_RC1_ID = /*00*/190051;
+ public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+
+ public static final int V_0_19_0_RC2_ID = /*00*/190052;
+ public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+
+ public static final int V_0_19_0_RC3_ID = /*00*/190053;
+ public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+
+ public static final int V_0_19_0_ID = /*00*/190099;
+ public static final Version V_0_19_0 = new Version(V_0_19_0_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_1_ID = /*00*/190199;
+ public static final Version V_0_19_1 = new Version(V_0_19_1_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_2_ID = /*00*/190299;
+ public static final Version V_0_19_2 = new Version(V_0_19_2_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_3_ID = /*00*/190399;
+ public static final Version V_0_19_3 = new Version(V_0_19_3_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_4_ID = /*00*/190499;
+ public static final Version V_0_19_4 = new Version(V_0_19_4_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_5_ID = /*00*/190599;
+ public static final Version V_0_19_5 = new Version(V_0_19_5_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_6_ID = /*00*/190699;
+ public static final Version V_0_19_6 = new Version(V_0_19_6_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_7_ID = /*00*/190799;
+ public static final Version V_0_19_7 = new Version(V_0_19_7_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_8_ID = /*00*/190899;
+ public static final Version V_0_19_8 = new Version(V_0_19_8_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_9_ID = /*00*/190999;
+ public static final Version V_0_19_9 = new Version(V_0_19_9_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_10_ID = /*00*/191099;
+ public static final Version V_0_19_10 = new Version(V_0_19_10_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_11_ID = /*00*/191199;
+ public static final Version V_0_19_11 = new Version(V_0_19_11_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_12_ID = /*00*/191299;
+ public static final Version V_0_19_12 = new Version(V_0_19_12_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_19_13_ID = /*00*/191399;
+ public static final Version V_0_19_13 = new Version(V_0_19_13_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+
+ public static final int V_0_20_0_RC1_ID = /*00*/200051;
+ public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_20_0_ID = /*00*/200099;
+ public static final Version V_0_20_0 = new Version(V_0_20_0_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_20_1_ID = /*00*/200199;
+ public static final Version V_0_20_1 = new Version(V_0_20_1_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_20_2_ID = /*00*/200299;
+ public static final Version V_0_20_2 = new Version(V_0_20_2_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_20_3_ID = /*00*/200399;
+ public static final Version V_0_20_3 = new Version(V_0_20_3_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_20_4_ID = /*00*/200499;
+ public static final Version V_0_20_4 = new Version(V_0_20_4_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_20_5_ID = /*00*/200599;
+ public static final Version V_0_20_5 = new Version(V_0_20_5_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_20_6_ID = /*00*/200699;
+ public static final Version V_0_20_6 = new Version(V_0_20_6_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+ public static final int V_0_20_7_ID = /*00*/200799;
+ public static final Version V_0_20_7 = new Version(V_0_20_7_ID, false, org.apache.lucene.util.Version.LUCENE_36);
+
+ public static final int V_0_90_0_Beta1_ID = /*00*/900001;
+ public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_41);
+ public static final int V_0_90_0_RC1_ID = /*00*/900051;
+ public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_41);
+ public static final int V_0_90_0_RC2_ID = /*00*/900052;
+ public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, false, org.apache.lucene.util.Version.LUCENE_42);
+ public static final int V_0_90_0_ID = /*00*/900099;
+ public static final Version V_0_90_0 = new Version(V_0_90_0_ID, false, org.apache.lucene.util.Version.LUCENE_42);
+ public static final int V_0_90_1_ID = /*00*/900199;
+ public static final Version V_0_90_1 = new Version(V_0_90_1_ID, false, org.apache.lucene.util.Version.LUCENE_43);
+ public static final int V_0_90_2_ID = /*00*/900299;
+ public static final Version V_0_90_2 = new Version(V_0_90_2_ID, false, org.apache.lucene.util.Version.LUCENE_43);
+ public static final int V_0_90_3_ID = /*00*/900399;
+ public static final Version V_0_90_3 = new Version(V_0_90_3_ID, false, org.apache.lucene.util.Version.LUCENE_44);
+ public static final int V_0_90_4_ID = /*00*/900499;
+ public static final Version V_0_90_4 = new Version(V_0_90_4_ID, false, org.apache.lucene.util.Version.LUCENE_44);
+ public static final int V_0_90_5_ID = /*00*/900599;
+ public static final Version V_0_90_5 = new Version(V_0_90_5_ID, false, org.apache.lucene.util.Version.LUCENE_44);
+ public static final int V_0_90_6_ID = /*00*/900699;
+ public static final Version V_0_90_6 = new Version(V_0_90_6_ID, false, org.apache.lucene.util.Version.LUCENE_45);
+ public static final int V_0_90_7_ID = /*00*/900799;
+ public static final Version V_0_90_7 = new Version(V_0_90_7_ID, false, org.apache.lucene.util.Version.LUCENE_45);
+ public static final int V_0_90_8_ID = /*00*/900899;
+ public static final Version V_0_90_8 = new Version(V_0_90_8_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_0_90_9_ID = /*00*/900999;
+ public static final Version V_0_90_9 = new Version(V_0_90_9_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_0_90_10_ID = /*00*/901099;
+ public static final Version V_0_90_10 = new Version(V_0_90_10_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_0_90_11_ID = /*00*/901199;
+ public static final Version V_0_90_11 = new Version(V_0_90_11_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_0_90_12_ID = /*00*/901299;
+ public static final Version V_0_90_12 = new Version(V_0_90_12_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_0_90_13_ID = /*00*/901399;
+ public static final Version V_0_90_13 = new Version(V_0_90_13_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_0_90_14_ID = /*00*/901499;
+ public static final Version V_0_90_14 = new Version(V_0_90_14_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+
+ public static final int V_1_0_0_Beta1_ID = /*00*/1000001;
+ public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_45);
+ public static final int V_1_0_0_Beta2_ID = /*00*/1000002;
+ public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_1_0_0_RC1_ID = /*00*/1000051;
+ public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_1_0_0_RC2_ID = /*00*/1000052;
+ public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_1_0_0_ID = /*00*/1000099;
+ public static final Version V_1_0_0 = new Version(V_1_0_0_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_1_0_1_ID = /*00*/1000199;
+ public static final Version V_1_0_1 = new Version(V_1_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_1_0_2_ID = /*00*/1000299;
+ public static final Version V_1_0_2 = new Version(V_1_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+ public static final int V_1_0_3_ID = /*00*/1000399;
+ public static final Version V_1_0_3 = new Version(V_1_0_3_ID, false, org.apache.lucene.util.Version.LUCENE_46);
+
+ public static final Version CURRENT = V_1_0_3;
+
+ static {
+ assert CURRENT.luceneVersion == Lucene.VERSION : "Version must be upgraded to [" + Lucene.VERSION + "] is still set to [" + CURRENT.luceneVersion + "]";
+ }
+
+ public static Version readVersion(StreamInput in) throws IOException {
+ return fromId(in.readVInt());
+ }
+
+ public static Version fromId(int id) {
+ switch (id) {
+ case V_1_0_3_ID:
+ return V_1_0_3;
+ case V_1_0_2_ID:
+ return V_1_0_2;
+ case V_1_0_1_ID:
+ return V_1_0_1;
+ case V_1_0_0_ID:
+ return V_1_0_0;
+ case V_1_0_0_RC2_ID:
+ return V_1_0_0_RC2;
+ case V_1_0_0_RC1_ID:
+ return V_1_0_0_RC1;
+ case V_1_0_0_Beta2_ID:
+ return V_1_0_0_Beta2;
+ case V_1_0_0_Beta1_ID:
+ return V_1_0_0_Beta1;
+ case V_0_90_14_ID:
+ return V_0_90_14;
+ case V_0_90_13_ID:
+ return V_0_90_13;
+ case V_0_90_12_ID:
+ return V_0_90_12;
+ case V_0_90_11_ID:
+ return V_0_90_11;
+ case V_0_90_10_ID:
+ return V_0_90_10;
+ case V_0_90_9_ID:
+ return V_0_90_9;
+ case V_0_90_8_ID:
+ return V_0_90_8;
+ case V_0_90_7_ID:
+ return V_0_90_7;
+ case V_0_90_6_ID:
+ return V_0_90_6;
+ case V_0_90_5_ID:
+ return V_0_90_5;
+ case V_0_90_4_ID:
+ return V_0_90_4;
+ case V_0_90_3_ID:
+ return V_0_90_3;
+ case V_0_90_2_ID:
+ return V_0_90_2;
+ case V_0_90_1_ID:
+ return V_0_90_1;
+ case V_0_90_0_ID:
+ return V_0_90_0;
+ case V_0_90_0_RC2_ID:
+ return V_0_90_0_RC2;
+ case V_0_90_0_RC1_ID:
+ return V_0_90_0_RC1;
+ case V_0_90_0_Beta1_ID:
+ return V_0_90_0_Beta1;
+
+ case V_0_20_7_ID:
+ return V_0_20_7;
+ case V_0_20_6_ID:
+ return V_0_20_6;
+ case V_0_20_5_ID:
+ return V_0_20_5;
+ case V_0_20_4_ID:
+ return V_0_20_4;
+ case V_0_20_3_ID:
+ return V_0_20_3;
+ case V_0_20_2_ID:
+ return V_0_20_2;
+ case V_0_20_1_ID:
+ return V_0_20_1;
+ case V_0_20_0_ID:
+ return V_0_20_0;
+ case V_0_20_0_RC1_ID:
+ return V_0_20_0_RC1;
+
+ case V_0_19_0_RC1_ID:
+ return V_0_19_0_RC1;
+ case V_0_19_0_RC2_ID:
+ return V_0_19_0_RC2;
+ case V_0_19_0_RC3_ID:
+ return V_0_19_0_RC3;
+ case V_0_19_0_ID:
+ return V_0_19_0;
+ case V_0_19_1_ID:
+ return V_0_19_1;
+ case V_0_19_2_ID:
+ return V_0_19_2;
+ case V_0_19_3_ID:
+ return V_0_19_3;
+ case V_0_19_4_ID:
+ return V_0_19_4;
+ case V_0_19_5_ID:
+ return V_0_19_5;
+ case V_0_19_6_ID:
+ return V_0_19_6;
+ case V_0_19_7_ID:
+ return V_0_19_7;
+ case V_0_19_8_ID:
+ return V_0_19_8;
+ case V_0_19_9_ID:
+ return V_0_19_9;
+ case V_0_19_10_ID:
+ return V_0_19_10;
+ case V_0_19_11_ID:
+ return V_0_19_11;
+ case V_0_19_12_ID:
+ return V_0_19_12;
+ case V_0_19_13_ID:
+ return V_0_19_13;
+
+ case V_0_18_0_ID:
+ return V_0_18_0;
+ case V_0_18_1_ID:
+ return V_0_18_1;
+ case V_0_18_2_ID:
+ return V_0_18_2;
+ case V_0_18_3_ID:
+ return V_0_18_3;
+ case V_0_18_4_ID:
+ return V_0_18_4;
+ case V_0_18_5_ID:
+ return V_0_18_5;
+ case V_0_18_6_ID:
+ return V_0_18_6;
+ case V_0_18_7_ID:
+ return V_0_18_7;
+ case V_0_18_8_ID:
+ return V_0_18_8;
+
+ default:
+ return new Version(id, null, Lucene.VERSION);
+ }
+ }
+
+ public static void writeVersion(Version version, StreamOutput out) throws IOException {
+ out.writeVInt(version.id);
+ }
+
+ /**
+ * Returns the smallest version between the 2.
+ */
+ public static Version smallest(Version version1, Version version2) {
+ return version1.id < version2.id ? version1 : version2;
+ }
+
+ public final int id;
+ public final byte major;
+ public final byte minor;
+ public final byte revision;
+ public final byte build;
+ public final Boolean snapshot;
+ public final org.apache.lucene.util.Version luceneVersion;
+
+ Version(int id, @Nullable Boolean snapshot, org.apache.lucene.util.Version luceneVersion) {
+ this.id = id;
+ this.major = (byte) ((id / 1000000) % 100);
+ this.minor = (byte) ((id / 10000) % 100);
+ this.revision = (byte) ((id / 100) % 100);
+ this.build = (byte) (id % 100);
+ this.snapshot = snapshot;
+ this.luceneVersion = luceneVersion;
+ }
+
+ public boolean snapshot() {
+ return snapshot != null && snapshot;
+ }
+
+ public boolean after(Version version) {
+ return version.id < id;
+ }
+
+ public boolean onOrAfter(Version version) {
+ return version.id <= id;
+ }
+
+ public boolean before(Version version) {
+ return version.id > id;
+ }
+
+ public boolean onOrBefore(Version version) {
+ return version.id >= id;
+ }
+
+ /**
+ * Just the version number (without -SNAPSHOT if snapshot).
+ */
+ public String number() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(major).append('.').append(minor).append('.').append(revision);
+ if (build < 50) {
+ sb.append(".Beta").append(build);
+ } else if (build < 99) {
+ sb.append(".RC").append(build - 50);
+ }
+ return sb.toString();
+ }
+
+ public static void main(String[] args) {
+ System.out.println("Version: " + Version.CURRENT + ", Build: " + Build.CURRENT.hashShort() + "/" + Build.CURRENT.timestamp() + ", JVM: " + JvmInfo.jvmInfo().version());
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(number());
+ if (snapshot()) {
+ sb.append("-SNAPSHOT");
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ Version version = (Version) o;
+
+ if (id != version.id) {
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return id;
+ }
+
+ public static class Module extends AbstractModule {
+
+ private final Version version;
+
+ public Module(Version version) {
+ this.version = version;
+ }
+
+ @Override
+ protected void configure() {
+ bind(Version.class).toInstance(version);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/Action.java b/src/main/java/org/elasticsearch/action/Action.java
new file mode 100644
index 0000000..30482cb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/Action.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.client.Client;
+
+/**
+ * Main action (used with {@link Client} API.
+ */
+public abstract class Action<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
+ extends GenericAction<Request, Response> {
+
+ protected Action(String name) {
+ super(name);
+ }
+
+ public abstract RequestBuilder newRequestBuilder(Client client);
+}
diff --git a/src/main/java/org/elasticsearch/action/ActionFuture.java b/src/main/java/org/elasticsearch/action/ActionFuture.java
new file mode 100644
index 0000000..3848536
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ActionFuture.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * An extension to {@link Future} allowing for simplified "get" operations.
+ *
+ *
+ */
+public interface ActionFuture<T> extends Future<T> {
+
+ /**
+ * Similar to {@link #get()}, just catching the {@link InterruptedException} with
+ * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
+ * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
+ * <p/>
+ * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
+ * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
+ * still accessible using {@link #getRootFailure()}.
+ */
+ T actionGet() throws ElasticsearchException;
+
+ /**
+ * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} with
+ * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
+ * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
+ * <p/>
+ * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
+ * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
+ * still accessible using {@link #getRootFailure()}.
+ */
+ T actionGet(String timeout) throws ElasticsearchException;
+
+ /**
+ * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} with
+ * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
+ * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
+ * <p/>
+ * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
+ * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
+ * still accessible using {@link #getRootFailure()}.
+ *
+ * @param timeoutMillis Timeout in millis
+ */
+ T actionGet(long timeoutMillis) throws ElasticsearchException;
+
+ /**
+ * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} with
+ * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
+ * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
+ * <p/>
+ * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
+ * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
+ * still accessible using {@link #getRootFailure()}.
+ */
+ T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException;
+
+ /**
+ * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} with
+ * restoring the interrupted state on the thread and throwing an {@link org.elasticsearch.ElasticsearchIllegalStateException},
+ * and throwing the actual cause of the {@link java.util.concurrent.ExecutionException}.
+ * <p/>
+ * <p>Note, the actual cause is unwrapped to the actual failure (for example, unwrapped
+ * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is
+ * still accessible using {@link #getRootFailure()}.
+ */
+ T actionGet(TimeValue timeout) throws ElasticsearchException;
+
+ /**
+ * The root (possibly) wrapped failure.
+ */
+ @Nullable
+ Throwable getRootFailure();
+}
diff --git a/src/main/java/org/elasticsearch/action/ActionListener.java b/src/main/java/org/elasticsearch/action/ActionListener.java
new file mode 100644
index 0000000..0b3a69b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ActionListener.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+/**
+ * A listener for action responses or failures.
+ *
+ *
+ */
+public interface ActionListener<Response> {
+
+ /**
+ * A response handler.
+ */
+ void onResponse(Response response);
+
+ /**
+ * A failure handler.
+ */
+ void onFailure(Throwable e);
+}
diff --git a/src/main/java/org/elasticsearch/action/ActionModule.java b/src/main/java/org/elasticsearch/action/ActionModule.java
new file mode 100644
index 0000000..8ef29d6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction;
+import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartAction;
+import org.elasticsearch.action.admin.cluster.node.restart.TransportNodesRestartAction;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownAction;
+import org.elasticsearch.action.admin.cluster.node.shutdown.TransportNodesShutdownAction;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
+import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
+import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction;
+import org.elasticsearch.action.admin.cluster.repositories.get.TransportGetRepositoriesAction;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction;
+import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction;
+import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction;
+import org.elasticsearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction;
+import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction;
+import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsAction;
+import org.elasticsearch.action.admin.cluster.snapshots.get.TransportGetSnapshotsAction;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotAction;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.TransportRestoreSnapshotAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
+import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
+import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction;
+import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
+import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction;
+import org.elasticsearch.action.admin.indices.alias.exists.TransportAliasesExistAction;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction;
+import org.elasticsearch.action.admin.indices.alias.get.TransportGetAliasesAction;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction;
+import org.elasticsearch.action.admin.indices.analyze.TransportAnalyzeAction;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction;
+import org.elasticsearch.action.admin.indices.cache.clear.TransportClearIndicesCacheAction;
+import org.elasticsearch.action.admin.indices.close.CloseIndexAction;
+import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
+import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
+import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
+import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsAction;
+import org.elasticsearch.action.admin.indices.exists.indices.TransportIndicesExistsAction;
+import org.elasticsearch.action.admin.indices.exists.types.TransportTypesExistsAction;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction;
+import org.elasticsearch.action.admin.indices.flush.FlushAction;
+import org.elasticsearch.action.admin.indices.flush.TransportFlushAction;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotAction;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.TransportGatewaySnapshotAction;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingAction;
+import org.elasticsearch.action.admin.indices.mapping.delete.TransportDeleteMappingAction;
+import org.elasticsearch.action.admin.indices.mapping.get.*;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
+import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction;
+import org.elasticsearch.action.admin.indices.open.OpenIndexAction;
+import org.elasticsearch.action.admin.indices.open.TransportOpenIndexAction;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeAction;
+import org.elasticsearch.action.admin.indices.optimize.TransportOptimizeAction;
+import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
+import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction;
+import org.elasticsearch.action.admin.indices.segments.TransportIndicesSegmentsAction;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.get.TransportGetSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
+import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusAction;
+import org.elasticsearch.action.admin.indices.status.TransportIndicesStatusAction;
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
+import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction;
+import org.elasticsearch.action.admin.indices.template.get.TransportGetIndexTemplatesAction;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction;
+import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction;
+import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
+import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
+import org.elasticsearch.action.admin.indices.warmer.get.TransportGetWarmersAction;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction;
+import org.elasticsearch.action.admin.indices.warmer.put.TransportPutWarmerAction;
+import org.elasticsearch.action.bulk.BulkAction;
+import org.elasticsearch.action.bulk.TransportBulkAction;
+import org.elasticsearch.action.bulk.TransportShardBulkAction;
+import org.elasticsearch.action.count.CountAction;
+import org.elasticsearch.action.count.TransportCountAction;
+import org.elasticsearch.action.delete.DeleteAction;
+import org.elasticsearch.action.delete.TransportDeleteAction;
+import org.elasticsearch.action.delete.index.TransportIndexDeleteAction;
+import org.elasticsearch.action.delete.index.TransportShardDeleteAction;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryAction;
+import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction;
+import org.elasticsearch.action.deletebyquery.TransportIndexDeleteByQueryAction;
+import org.elasticsearch.action.deletebyquery.TransportShardDeleteByQueryAction;
+import org.elasticsearch.action.explain.ExplainAction;
+import org.elasticsearch.action.explain.TransportExplainAction;
+import org.elasticsearch.action.get.*;
+import org.elasticsearch.action.index.IndexAction;
+import org.elasticsearch.action.index.TransportIndexAction;
+import org.elasticsearch.action.mlt.MoreLikeThisAction;
+import org.elasticsearch.action.mlt.TransportMoreLikeThisAction;
+import org.elasticsearch.action.percolate.*;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.action.search.type.*;
+import org.elasticsearch.action.suggest.SuggestAction;
+import org.elasticsearch.action.suggest.TransportSuggestAction;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.action.termvector.*;
+import org.elasticsearch.action.update.TransportUpdateAction;
+import org.elasticsearch.action.update.UpdateAction;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.MapBinder;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class ActionModule extends AbstractModule {
+
+ private final Map<String, ActionEntry> actions = Maps.newHashMap();
+
+ static class ActionEntry<Request extends ActionRequest, Response extends ActionResponse> {
+ public final GenericAction<Request, Response> action;
+ public final Class<? extends TransportAction<Request, Response>> transportAction;
+ public final Class[] supportTransportActions;
+
+ ActionEntry(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
+ this.action = action;
+ this.transportAction = transportAction;
+ this.supportTransportActions = supportTransportActions;
+ }
+
+
+ }
+
+ private final boolean proxy;
+
+ public ActionModule(boolean proxy) {
+ this.proxy = proxy;
+ }
+
+ /**
+ * Registers an action.
+ *
+ * @param action The action type.
+ * @param transportAction The transport action implementing the actual action.
+ * @param supportTransportActions Any support actions that are needed by the transport action.
+ * @param <Request> The request type.
+ * @param <Response> The response type.
+ */
+ public <Request extends ActionRequest, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
+ actions.put(action.name(), new ActionEntry<Request, Response>(action, transportAction, supportTransportActions));
+ }
+
+ @Override
+ protected void configure() {
+
+ registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
+ registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
+ registerAction(NodesShutdownAction.INSTANCE, TransportNodesShutdownAction.class);
+ registerAction(NodesRestartAction.INSTANCE, TransportNodesRestartAction.class);
+ registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
+
+ registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
+ registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
+ registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
+ registerAction(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class);
+ registerAction(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class);
+ registerAction(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class);
+ registerAction(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class);
+ registerAction(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class);
+ registerAction(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class);
+ registerAction(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class);
+ registerAction(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class);
+ registerAction(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class);
+ registerAction(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class);
+ registerAction(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class);
+
+ registerAction(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class);
+ registerAction(IndicesStatusAction.INSTANCE, TransportIndicesStatusAction.class);
+ registerAction(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class);
+ registerAction(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class);
+ registerAction(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class);
+ registerAction(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class);
+ registerAction(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class);
+ registerAction(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class);
+ registerAction(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class);
+ registerAction(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class);
+ registerAction(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class, TransportGetFieldMappingsIndexAction.class);
+ registerAction(PutMappingAction.INSTANCE, TransportPutMappingAction.class);
+ registerAction(DeleteMappingAction.INSTANCE, TransportDeleteMappingAction.class);
+ registerAction(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class);
+ registerAction(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class);
+ registerAction(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class);
+ registerAction(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class);
+ registerAction(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class);
+ registerAction(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class);
+ registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
+ registerAction(GatewaySnapshotAction.INSTANCE, TransportGatewaySnapshotAction.class);
+ registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
+ registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
+ registerAction(OptimizeAction.INSTANCE, TransportOptimizeAction.class);
+ registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
+ registerAction(PutWarmerAction.INSTANCE, TransportPutWarmerAction.class);
+ registerAction(DeleteWarmerAction.INSTANCE, TransportDeleteWarmerAction.class);
+ registerAction(GetWarmersAction.INSTANCE, TransportGetWarmersAction.class);
+ registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
+ registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
+ registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);
+
+ registerAction(IndexAction.INSTANCE, TransportIndexAction.class);
+ registerAction(GetAction.INSTANCE, TransportGetAction.class);
+ registerAction(TermVectorAction.INSTANCE, TransportSingleShardTermVectorAction.class);
+ registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
+ TransportSingleShardMultiTermsVectorAction.class);
+ registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class,
+ TransportIndexDeleteAction.class, TransportShardDeleteAction.class);
+ registerAction(CountAction.INSTANCE, TransportCountAction.class);
+ registerAction(SuggestAction.INSTANCE, TransportSuggestAction.class);
+ registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class);
+ registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class,
+ TransportShardMultiGetAction.class);
+ registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
+ TransportShardBulkAction.class);
+ registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class,
+ TransportIndexDeleteByQueryAction.class, TransportShardDeleteByQueryAction.class);
+ registerAction(SearchAction.INSTANCE, TransportSearchAction.class,
+ TransportSearchDfsQueryThenFetchAction.class,
+ TransportSearchQueryThenFetchAction.class,
+ TransportSearchDfsQueryAndFetchAction.class,
+ TransportSearchQueryAndFetchAction.class,
+ TransportSearchScanAction.class
+ );
+ registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class,
+ TransportSearchScrollScanAction.class,
+ TransportSearchScrollQueryThenFetchAction.class,
+ TransportSearchScrollQueryAndFetchAction.class
+ );
+ registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
+ registerAction(MoreLikeThisAction.INSTANCE, TransportMoreLikeThisAction.class);
+ registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
+ registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class);
+ registerAction(ExplainAction.INSTANCE, TransportExplainAction.class);
+ registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
+
+ // register Name -> GenericAction Map that can be injected to instances.
+ MapBinder<String, GenericAction> actionsBinder
+ = MapBinder.newMapBinder(binder(), String.class, GenericAction.class);
+
+ for (Map.Entry<String, ActionEntry> entry : actions.entrySet()) {
+ actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().action);
+ }
+
+ // register GenericAction -> transportAction Map that can be injected to instances.
+ // also register any supporting classes
+ if (!proxy) {
+ MapBinder<GenericAction, TransportAction> transportActionsBinder
+ = MapBinder.newMapBinder(binder(), GenericAction.class, TransportAction.class);
+ for (Map.Entry<String, ActionEntry> entry : actions.entrySet()) {
+ // bind the action as eager singleton, so the map binder one will reuse it
+ bind(entry.getValue().transportAction).asEagerSingleton();
+ transportActionsBinder.addBinding(entry.getValue().action).to(entry.getValue().transportAction).asEagerSingleton();
+ for (Class supportAction : entry.getValue().supportTransportActions) {
+ bind(supportAction).asEagerSingleton();
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/ActionRequest.java b/src/main/java/org/elasticsearch/action/ActionRequest.java
new file mode 100644
index 0000000..98eb6f9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ActionRequest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class ActionRequest<T extends ActionRequest> extends TransportRequest {
+
+ private boolean listenerThreaded = false;
+
+ protected ActionRequest() {
+ super();
+ }
+
+ protected ActionRequest(ActionRequest request) {
+ super(request);
+ // this does not set the listenerThreaded API, if needed, its up to the caller to set it
+ // since most times, we actually want it to not be threaded...
+ //this.listenerThreaded = request.listenerThreaded();
+ }
+
+ /**
+ * Should the response listener be executed on a thread or not.
+ * <p/>
+ * <p>When not executing on a thread, it will either be executed on the calling thread, or
+ * on an expensive, IO based, thread.
+ */
+ public final boolean listenerThreaded() {
+ return this.listenerThreaded;
+ }
+
+ /**
+ * Sets if the response listener be executed on a thread or not.
+ */
+ @SuppressWarnings("unchecked")
+ public final T listenerThreaded(boolean listenerThreaded) {
+ this.listenerThreaded = listenerThreaded;
+ return (T) this;
+ }
+
+ public abstract ActionRequestValidationException validate();
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java
new file mode 100644
index 0000000..ad64733
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.support.PlainListenableActionFuture;
+import org.elasticsearch.client.internal.InternalGenericClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public abstract class ActionRequestBuilder<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> {
+
+ protected final Request request;
+
+ protected final InternalGenericClient client;
+
+ protected ActionRequestBuilder(InternalGenericClient client, Request request) {
+ this.client = client;
+ this.request = request;
+ }
+
+ public Request request() {
+ return this.request;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setListenerThreaded(boolean listenerThreaded) {
+ request.listenerThreaded(listenerThreaded);
+ return (RequestBuilder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder putHeader(String key, Object value) {
+ request.putHeader(key, value);
+ return (RequestBuilder) this;
+ }
+
+ public ListenableActionFuture<Response> execute() {
+ PlainListenableActionFuture<Response> future = new PlainListenableActionFuture<Response>(request.listenerThreaded(), client.threadPool());
+ execute(future);
+ return future;
+ }
+
+ /**
+ * Short version of execute().actionGet().
+ */
+ public Response get() throws ElasticsearchException {
+ return execute().actionGet();
+ }
+
+ /**
+ * Short version of execute().actionGet().
+ */
+ public Response get(TimeValue timeout) throws ElasticsearchException {
+ return execute().actionGet(timeout);
+ }
+
+ /**
+ * Short version of execute().actionGet().
+ */
+ public Response get(String timeout) throws ElasticsearchException {
+ return execute().actionGet(timeout);
+ }
+
+ public void execute(ActionListener<Response> listener) {
+ doExecute(listener);
+ }
+
+ protected abstract void doExecute(ActionListener<Response> listener);
+}
diff --git a/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java b/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java
new file mode 100644
index 0000000..ab51c14
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchException;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public class ActionRequestValidationException extends ElasticsearchException {
+
+ private final List<String> validationErrors = new ArrayList<String>();
+
+ public ActionRequestValidationException() {
+ super(null);
+ }
+
+ public void addValidationError(String error) {
+ validationErrors.add(error);
+ }
+
+ public void addValidationErrors(Iterable<String> errors) {
+ for (String error : errors) {
+ validationErrors.add(error);
+ }
+ }
+
+ public List<String> validationErrors() {
+ return validationErrors;
+ }
+
+ @Override
+ public String getMessage() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Validation Failed: ");
+ int index = 0;
+ for (String error : validationErrors) {
+ sb.append(++index).append(": ").append(error).append(";");
+ }
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/ActionResponse.java b/src/main/java/org/elasticsearch/action/ActionResponse.java
new file mode 100644
index 0000000..104c3b4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ActionResponse.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class ActionResponse extends TransportResponse {
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/FailedNodeException.java b/src/main/java/org/elasticsearch/action/FailedNodeException.java
new file mode 100644
index 0000000..7e9857f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/FailedNodeException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class FailedNodeException extends ElasticsearchException {
+
+ private final String nodeId;
+
+ public FailedNodeException(String nodeId, String msg, Throwable cause) {
+ super(msg, cause);
+ this.nodeId = nodeId;
+ }
+
+ public String nodeId() {
+ return this.nodeId;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/GenericAction.java b/src/main/java/org/elasticsearch/action/GenericAction.java
new file mode 100644
index 0000000..7b54f2f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/GenericAction.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.TransportRequestOptions;
+
+/**
+ * A generic action. Should strive to make it a singleton.
+ */
+public abstract class GenericAction<Request extends ActionRequest, Response extends ActionResponse> {
+
+ private final String name;
+
+ /**
+ * @param name The name of the action, must be unique across actions.
+ */
+ protected GenericAction(String name) {
+ this.name = name;
+ }
+
+ /**
+ * The name of the action. Must be unique across actions.
+ */
+ public String name() {
+ return this.name;
+ }
+
+ /**
+ * Creates a new response instance.
+ */
+ public abstract Response newResponse();
+
+ /**
+ * Optional request options for the action.
+ */
+ public TransportRequestOptions transportOptions(Settings settings) {
+ return TransportRequestOptions.EMPTY;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return name.equals(((GenericAction) o).name());
+ }
+
+ @Override
+ public int hashCode() {
+ return name.hashCode();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/ListenableActionFuture.java b/src/main/java/org/elasticsearch/action/ListenableActionFuture.java
new file mode 100644
index 0000000..582d2ee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ListenableActionFuture.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+/**
+ * An {@link ActionFuture} that listeners can be added to.
+ *
+ *
+ */
+public interface ListenableActionFuture<T> extends ActionFuture<T> {
+
+ /**
+ * Add an action listener to be invoked when a response has received.
+ */
+ void addListener(final ActionListener<T> listener);
+
+ /**
+ * Add an action listener (runnable) to be invoked when a response has received.
+ */
+ void addListener(final Runnable listener);
+}
diff --git a/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java b/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java
new file mode 100644
index 0000000..5c06cff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class NoShardAvailableActionException extends IndexShardException {
+
+ public NoShardAvailableActionException(ShardId shardId) {
+ super(shardId, null);
+ }
+
+ public NoShardAvailableActionException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public NoShardAvailableActionException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/NoSuchNodeException.java b/src/main/java/org/elasticsearch/action/NoSuchNodeException.java
new file mode 100644
index 0000000..fb5699b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/NoSuchNodeException.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+/**
+ *
+ */
+public class NoSuchNodeException extends FailedNodeException {
+
+ public NoSuchNodeException(String nodeId) {
+ super(nodeId, "No such node [" + nodeId + "]", null);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java b/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java
new file mode 100644
index 0000000..bc75ce3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class PrimaryMissingActionException extends ElasticsearchException {
+
+ public PrimaryMissingActionException(String message) {
+ super(message);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/RoutingMissingException.java b/src/main/java/org/elasticsearch/action/RoutingMissingException.java
new file mode 100644
index 0000000..58cd7e9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/RoutingMissingException.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class RoutingMissingException extends ElasticsearchException {
+
+ private final String index;
+
+ private final String type;
+
+ private final String id;
+
+ public RoutingMissingException(String index, String type, String id) {
+ super("routing is required for [" + index + "]/[" + type + "]/[" + id + "]");
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public String type() {
+ return type;
+ }
+
+ public String id() {
+ return id;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java b/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java
new file mode 100644
index 0000000..49b3d01
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.Serializable;
+
+/**
+ * An exception indicating that a failure occurred performing an operation on the shard.
+ *
+ *
+ */
+public interface ShardOperationFailedException extends Streamable, Serializable {
+
+ /**
+ * The index the operation failed on. Might return <tt>null</tt> if it can't be derived.
+ */
+ String index();
+
+ /**
+ * The index the operation failed on. Might return <tt>-1</tt> if it can't be derived.
+ */
+ int shardId();
+
+ /**
+ * The reason of the failure.
+ */
+ String reason();
+
+ /**
+ * The status of the failure.
+ */
+ RestStatus status();
+}
diff --git a/src/main/java/org/elasticsearch/action/ThreadingModel.java b/src/main/java/org/elasticsearch/action/ThreadingModel.java
new file mode 100644
index 0000000..5f87d82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ThreadingModel.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ *
+ */
+public enum ThreadingModel {
+ NONE((byte) 0),
+ OPERATION((byte) 1),
+ LISTENER((byte) 2),
+ OPERATION_LISTENER((byte) 3);
+
+ private byte id;
+
+ ThreadingModel(byte id) {
+ this.id = id;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ /**
+ * <tt>true</tt> if the actual operation the action represents will be executed
+ * on a different thread than the calling thread (assuming it will be executed
+ * on the same node).
+ */
+ public boolean threadedOperation() {
+ return this == OPERATION || this == OPERATION_LISTENER;
+ }
+
+ /**
+ * <tt>true</tt> if the invocation of the action result listener will be executed
+ * on a different thread (than the calling thread or an "expensive" thread, like the
+ * IO thread).
+ */
+ public boolean threadedListener() {
+ return this == LISTENER || this == OPERATION_LISTENER;
+ }
+
+ public ThreadingModel addListener() {
+ if (this == NONE) {
+ return LISTENER;
+ }
+ if (this == OPERATION) {
+ return OPERATION_LISTENER;
+ }
+ return this;
+ }
+
+ public ThreadingModel removeListener() {
+ if (this == LISTENER) {
+ return NONE;
+ }
+ if (this == OPERATION_LISTENER) {
+ return OPERATION;
+ }
+ return this;
+ }
+
+ public ThreadingModel addOperation() {
+ if (this == NONE) {
+ return OPERATION;
+ }
+ if (this == LISTENER) {
+ return OPERATION_LISTENER;
+ }
+ return this;
+ }
+
+ public ThreadingModel removeOperation() {
+ if (this == OPERATION) {
+ return NONE;
+ }
+ if (this == OPERATION_LISTENER) {
+ return LISTENER;
+ }
+ return this;
+ }
+
+ public static ThreadingModel fromId(byte id) {
+ if (id == 0) {
+ return NONE;
+ } else if (id == 1) {
+ return OPERATION;
+ } else if (id == 2) {
+ return LISTENER;
+ } else if (id == 3) {
+ return OPERATION_LISTENER;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No threading model for [" + id + "]");
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/TimestampParsingException.java b/src/main/java/org/elasticsearch/action/TimestampParsingException.java
new file mode 100644
index 0000000..634a0bf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/TimestampParsingException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ */
+public class TimestampParsingException extends ElasticsearchException {
+
+ private final String timestamp;
+
+ public TimestampParsingException(String timestamp) {
+ super("failed to parse timestamp [" + timestamp + "]");
+ this.timestamp = timestamp;
+ }
+
+ public String timestamp() {
+ return timestamp;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java
new file mode 100644
index 0000000..461ef1a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportResponseHandler;
+import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportRequestOptions;
+import org.elasticsearch.transport.TransportService;
+
+import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
+
+/**
+ * A generic proxy that will execute the given action against a specific node.
+ */
+public class TransportActionNodeProxy<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
+
+ protected final TransportService transportService;
+
+ private final GenericAction<Request, Response> action;
+
+ private final TransportRequestOptions transportOptions;
+
+ @Inject
+ public TransportActionNodeProxy(Settings settings, GenericAction<Request, Response> action, TransportService transportService) {
+ super(settings);
+ this.action = action;
+ this.transportService = transportService;
+ this.transportOptions = action.transportOptions(settings);
+ }
+
+ public ActionFuture<Response> execute(DiscoveryNode node, Request request) throws ElasticsearchException {
+ PlainActionFuture<Response> future = newFuture();
+ request.listenerThreaded(false);
+ execute(node, request, future);
+ return future;
+ }
+
+ public void execute(DiscoveryNode node, final Request request, final ActionListener<Response> listener) {
+ ActionRequestValidationException validationException = request.validate();
+ if (validationException != null) {
+ listener.onFailure(validationException);
+ return;
+ }
+ transportService.sendRequest(node, action.name(), request, transportOptions, new BaseTransportResponseHandler<Response>() {
+ @Override
+ public Response newInstance() {
+ return action.newResponse();
+ }
+
+ @Override
+ public String executor() {
+ if (request.listenerThreaded()) {
+ return ThreadPool.Names.GENERIC;
+ }
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(Response response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+ });
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/UnavailableShardsException.java b/src/main/java/org/elasticsearch/action/UnavailableShardsException.java
new file mode 100644
index 0000000..dcc6f04
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/UnavailableShardsException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class UnavailableShardsException extends ElasticsearchException {
+
+ public UnavailableShardsException(@Nullable ShardId shardId, String message) {
+ super(buildMessage(shardId, message));
+ }
+
+ private static String buildMessage(ShardId shardId, String message) {
+ if (shardId == null) {
+ return message;
+ }
+ return "[" + shardId.index().name() + "][" + shardId.id() + "] " + message;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/ValidateActions.java b/src/main/java/org/elasticsearch/action/ValidateActions.java
new file mode 100644
index 0000000..d0ade06
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/ValidateActions.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+/**
+ *
+ */
+public class ValidateActions {
+
+ public static ActionRequestValidationException addValidationError(String error, ActionRequestValidationException validationException) {
+ if (validationException == null) {
+ validationException = new ActionRequestValidationException();
+ }
+ validationException.addValidationError(error);
+ return validationException;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java b/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java
new file mode 100644
index 0000000..eeabdb9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ * Write Consistency Level control how many replicas should be active for a write operation to occur (a write operation
+ * can be index, or delete).
+ *
+ *
+ */
+public enum WriteConsistencyLevel {
+ DEFAULT((byte) 0),
+ ONE((byte) 1),
+ QUORUM((byte) 2),
+ ALL((byte) 3);
+
+ private final byte id;
+
+ WriteConsistencyLevel(byte id) {
+ this.id = id;
+ }
+
+ public byte id() {
+ return id;
+ }
+
+ public static WriteConsistencyLevel fromId(byte value) {
+ if (value == 0) {
+ return DEFAULT;
+ } else if (value == 1) {
+ return ONE;
+ } else if (value == 2) {
+ return QUORUM;
+ } else if (value == 3) {
+ return ALL;
+ }
+ throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]");
+ }
+
+ public static WriteConsistencyLevel fromString(String value) {
+ if (value.equals("default")) {
+ return DEFAULT;
+ } else if (value.equals("one")) {
+ return ONE;
+ } else if (value.equals("quorum")) {
+ return QUORUM;
+ } else if (value.equals("all")) {
+ return ALL;
+ }
+ throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/ClusterAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/ClusterAction.java
new file mode 100644
index 0000000..3f8986d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/ClusterAction.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ * Cluster action (used with {@link ClusterAdminClient} API.
+ */
+public abstract class ClusterAction<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
+ extends GenericAction<Request, Response> {
+
+ protected ClusterAction(String name) {
+ super(name);
+ }
+
+ public abstract RequestBuilder newRequestBuilder(ClusterAdminClient client);
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java
new file mode 100644
index 0000000..50c73be
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.health;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class ClusterHealthAction extends ClusterAction<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> {
+
+ public static final ClusterHealthAction INSTANCE = new ClusterHealthAction();
+ public static final String NAME = "cluster/health";
+
+ private ClusterHealthAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ClusterHealthResponse newResponse() {
+ return new ClusterHealthResponse();
+ }
+
+ @Override
+ public ClusterHealthRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new ClusterHealthRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java
new file mode 100644
index 0000000..3fe6efd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.health;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
+
+/**
+ *
+ */
+public class ClusterHealthRequest extends MasterNodeReadOperationRequest<ClusterHealthRequest> {
+
+ private String[] indices;
+ private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS);
+ private ClusterHealthStatus waitForStatus;
+ private int waitForRelocatingShards = -1;
+ private int waitForActiveShards = -1;
+ private String waitForNodes = "";
+ private Priority waitForEvents = null;
+
+ ClusterHealthRequest() {
+ }
+
+ public ClusterHealthRequest(String... indices) {
+ this.indices = indices;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public ClusterHealthRequest indices(String[] indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ public TimeValue timeout() {
+ return timeout;
+ }
+
+ public ClusterHealthRequest timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ if (masterNodeTimeout == DEFAULT_MASTER_NODE_TIMEOUT) {
+ masterNodeTimeout = timeout;
+ }
+ return this;
+ }
+
+ public ClusterHealthRequest timeout(String timeout) {
+ return this.timeout(TimeValue.parseTimeValue(timeout, null));
+ }
+
+ public ClusterHealthStatus waitForStatus() {
+ return waitForStatus;
+ }
+
+ public ClusterHealthRequest waitForStatus(ClusterHealthStatus waitForStatus) {
+ this.waitForStatus = waitForStatus;
+ return this;
+ }
+
+ public ClusterHealthRequest waitForGreenStatus() {
+ return waitForStatus(ClusterHealthStatus.GREEN);
+ }
+
+ public ClusterHealthRequest waitForYellowStatus() {
+ return waitForStatus(ClusterHealthStatus.YELLOW);
+ }
+
+ public int waitForRelocatingShards() {
+ return waitForRelocatingShards;
+ }
+
+ public ClusterHealthRequest waitForRelocatingShards(int waitForRelocatingShards) {
+ this.waitForRelocatingShards = waitForRelocatingShards;
+ return this;
+ }
+
+ public int waitForActiveShards() {
+ return waitForActiveShards;
+ }
+
+ public ClusterHealthRequest waitForActiveShards(int waitForActiveShards) {
+ this.waitForActiveShards = waitForActiveShards;
+ return this;
+ }
+
+ public String waitForNodes() {
+ return waitForNodes;
+ }
+
+ /**
+ * Waits for N number of nodes. Use "12" for exact mapping, ">12" and "<12" for range.
+ */
+ public ClusterHealthRequest waitForNodes(String waitForNodes) {
+ this.waitForNodes = waitForNodes;
+ return this;
+ }
+
+ public ClusterHealthRequest waitForEvents(Priority waitForEvents) {
+ this.waitForEvents = waitForEvents;
+ return this;
+ }
+
+ public Priority waitForEvents() {
+ return this.waitForEvents;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ if (size == 0) {
+ indices = Strings.EMPTY_ARRAY;
+ } else {
+ indices = new String[size];
+ for (int i = 0; i < indices.length; i++) {
+ indices[i] = in.readString();
+ }
+ }
+ timeout = readTimeValue(in);
+ if (in.readBoolean()) {
+ waitForStatus = ClusterHealthStatus.fromValue(in.readByte());
+ }
+ waitForRelocatingShards = in.readInt();
+ waitForActiveShards = in.readInt();
+ waitForNodes = in.readString();
+ readLocal(in);
+ if (in.readBoolean()) {
+ waitForEvents = Priority.fromByte(in.readByte());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (indices == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(indices.length);
+ for (String index : indices) {
+ out.writeString(index);
+ }
+ }
+ timeout.writeTo(out);
+ if (waitForStatus == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeByte(waitForStatus.value());
+ }
+ out.writeInt(waitForRelocatingShards);
+ out.writeInt(waitForActiveShards);
+ out.writeString(waitForNodes);
+ writeLocal(out);
+ if (waitForEvents == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeByte(waitForEvents.value());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java
new file mode 100644
index 0000000..8a459cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.health;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> {
+
+ public ClusterHealthRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new ClusterHealthRequest());
+ }
+
+ public ClusterHealthRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ public ClusterHealthRequestBuilder setTimeout(TimeValue timeout) {
+ request.timeout(timeout);
+ return this;
+ }
+
+ public ClusterHealthRequestBuilder setTimeout(String timeout) {
+ request.timeout(timeout);
+ return this;
+ }
+
+ public ClusterHealthRequestBuilder setWaitForStatus(ClusterHealthStatus waitForStatus) {
+ request.waitForStatus(waitForStatus);
+ return this;
+ }
+
+ public ClusterHealthRequestBuilder setWaitForGreenStatus() {
+ request.waitForGreenStatus();
+ return this;
+ }
+
+ public ClusterHealthRequestBuilder setWaitForYellowStatus() {
+ request.waitForYellowStatus();
+ return this;
+ }
+
+ public ClusterHealthRequestBuilder setWaitForRelocatingShards(int waitForRelocatingShards) {
+ request.waitForRelocatingShards(waitForRelocatingShards);
+ return this;
+ }
+
+ public ClusterHealthRequestBuilder setWaitForActiveShards(int waitForActiveShards) {
+ request.waitForActiveShards(waitForActiveShards);
+ return this;
+ }
+
+ /**
+ * Waits for N number of nodes. Use "12" for exact mapping, ">12" and "<12" for range.
+ */
+ public ClusterHealthRequestBuilder setWaitForNodes(String waitForNodes) {
+ request.waitForNodes(waitForNodes);
+ return this;
+ }
+
+ public ClusterHealthRequestBuilder setWaitForEvents(Priority waitForEvents) {
+ request.waitForEvents(waitForEvents);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<ClusterHealthResponse> listener) {
+ ((ClusterAdminClient) client).health(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java
new file mode 100644
index 0000000..56f594d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.health;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingTableValidation;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth.readClusterIndexHealth;
+
+/**
+ *
+ */
+public class ClusterHealthResponse extends ActionResponse implements Iterable<ClusterIndexHealth>, ToXContent {
+
+ private String clusterName;
+ int numberOfNodes = 0;
+ int numberOfDataNodes = 0;
+ int activeShards = 0;
+ int relocatingShards = 0;
+ int activePrimaryShards = 0;
+ int initializingShards = 0;
+ int unassignedShards = 0;
+ boolean timedOut = false;
+ ClusterHealthStatus status = ClusterHealthStatus.RED;
+ private List<String> validationFailures;
+ Map<String, ClusterIndexHealth> indices = Maps.newHashMap();
+
+ ClusterHealthResponse() {
+ }
+
+ public ClusterHealthResponse(String clusterName, List<String> validationFailures) {
+ this.clusterName = clusterName;
+ this.validationFailures = validationFailures;
+ }
+
+ public ClusterHealthResponse(String clusterName, String[] concreteIndices, ClusterState clusterState) {
+ this.clusterName = clusterName;
+ RoutingTableValidation validation = clusterState.routingTable().validate(clusterState.metaData());
+ validationFailures = validation.failures();
+ numberOfNodes = clusterState.nodes().size();
+ numberOfDataNodes = clusterState.nodes().dataNodes().size();
+
+ for (String index : concreteIndices) {
+ IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(index);
+ IndexMetaData indexMetaData = clusterState.metaData().index(index);
+ if (indexRoutingTable == null) {
+ continue;
+ }
+
+ ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
+
+ indices.put(indexHealth.getIndex(), indexHealth);
+ }
+
+ status = ClusterHealthStatus.GREEN;
+
+ for (ClusterIndexHealth indexHealth : indices.values()) {
+ activePrimaryShards += indexHealth.activePrimaryShards;
+ activeShards += indexHealth.activeShards;
+ relocatingShards += indexHealth.relocatingShards;
+ initializingShards += indexHealth.initializingShards;
+ unassignedShards += indexHealth.unassignedShards;
+ if (indexHealth.getStatus() == ClusterHealthStatus.RED) {
+ status = ClusterHealthStatus.RED;
+ } else if (indexHealth.getStatus() == ClusterHealthStatus.YELLOW && status != ClusterHealthStatus.RED) {
+ status = ClusterHealthStatus.YELLOW;
+ }
+ }
+
+ if (!validationFailures.isEmpty()) {
+ status = ClusterHealthStatus.RED;
+ } else if (clusterState.blocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE)) {
+ status = ClusterHealthStatus.RED;
+ }
+ }
+
+ public String getClusterName() {
+ return clusterName;
+ }
+
+ /**
+ * The validation failures on the cluster level (without index validation failures).
+ */
+ public List<String> getValidationFailures() {
+ return this.validationFailures;
+ }
+
+ /**
+ * All the validation failures, including index level validation failures.
+ */
+ public List<String> getAllValidationFailures() {
+ List<String> allFailures = newArrayList(getValidationFailures());
+ for (ClusterIndexHealth indexHealth : indices.values()) {
+ allFailures.addAll(indexHealth.getValidationFailures());
+ }
+ return allFailures;
+ }
+
+ public int getActiveShards() {
+ return activeShards;
+ }
+
+ public int getRelocatingShards() {
+ return relocatingShards;
+ }
+
+ public int getActivePrimaryShards() {
+ return activePrimaryShards;
+ }
+
+ public int getInitializingShards() {
+ return initializingShards;
+ }
+
+ public int getUnassignedShards() {
+ return unassignedShards;
+ }
+
+ public int getNumberOfNodes() {
+ return this.numberOfNodes;
+ }
+
+ public int getNumberOfDataNodes() {
+ return this.numberOfDataNodes;
+ }
+
+ /**
+ * <tt>true</tt> if the waitForXXX has timeout out and did not match.
+ */
+ public boolean isTimedOut() {
+ return this.timedOut;
+ }
+
+ public ClusterHealthStatus getStatus() {
+ return status;
+ }
+
+ public Map<String, ClusterIndexHealth> getIndices() {
+ return indices;
+ }
+
+ @Override
+ public Iterator<ClusterIndexHealth> iterator() {
+ return indices.values().iterator();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ clusterName = in.readString();
+ activePrimaryShards = in.readVInt();
+ activeShards = in.readVInt();
+ relocatingShards = in.readVInt();
+ initializingShards = in.readVInt();
+ unassignedShards = in.readVInt();
+ numberOfNodes = in.readVInt();
+ numberOfDataNodes = in.readVInt();
+ status = ClusterHealthStatus.fromValue(in.readByte());
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ ClusterIndexHealth indexHealth = readClusterIndexHealth(in);
+ indices.put(indexHealth.getIndex(), indexHealth);
+ }
+ timedOut = in.readBoolean();
+ size = in.readVInt();
+ if (size == 0) {
+ validationFailures = ImmutableList.of();
+ } else {
+ for (int i = 0; i < size; i++) {
+ validationFailures.add(in.readString());
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(clusterName);
+ out.writeVInt(activePrimaryShards);
+ out.writeVInt(activeShards);
+ out.writeVInt(relocatingShards);
+ out.writeVInt(initializingShards);
+ out.writeVInt(unassignedShards);
+ out.writeVInt(numberOfNodes);
+ out.writeVInt(numberOfDataNodes);
+ out.writeByte(status.value());
+ out.writeVInt(indices.size());
+ for (ClusterIndexHealth indexHealth : this) {
+ indexHealth.writeTo(out);
+ }
+ out.writeBoolean(timedOut);
+
+ out.writeVInt(validationFailures.size());
+ for (String failure : validationFailures) {
+ out.writeString(failure);
+ }
+ }
+
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder("ClusterHealthResponse - status [").append(status).append("]")
+ .append("\ntimedOut [").append(timedOut).append("]")
+ .append("\nclustername [").append(clusterName).append("]")
+ .append("\nnumberOfNodes [").append(numberOfNodes).append("]")
+ .append("\nnumberOfDataNodes [").append(numberOfDataNodes).append("]")
+ .append("\nactiveShards [").append(activeShards).append("]")
+ .append("\nrelocatingShards [").append(relocatingShards).append("]")
+ .append("\nactivePrimaryShards [").append(activePrimaryShards).append("]")
+ .append("\ninitializingShards [").append(initializingShards).append("]")
+ .append("\nvalidationFailures ").append(validationFailures)
+ .append("\nindices:");
+
+ for (Map.Entry<String, ClusterIndexHealth> indexEntry : indices.entrySet()) {
+ builder.append(" [").append(indexEntry.getKey()).append("][").append(indexEntry.getValue().status).append("]");
+ }
+ return builder.toString();
+ }
+
+ static final class Fields {
+ static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
+ static final XContentBuilderString STATUS = new XContentBuilderString("status");
+ static final XContentBuilderString TIMED_OUT = new XContentBuilderString("timed_out");
+ static final XContentBuilderString NUMBER_OF_NODES = new XContentBuilderString("number_of_nodes");
+ static final XContentBuilderString NUMBER_OF_DATA_NODES = new XContentBuilderString("number_of_data_nodes");
+ static final XContentBuilderString ACTIVE_PRIMARY_SHARDS = new XContentBuilderString("active_primary_shards");
+ static final XContentBuilderString ACTIVE_SHARDS = new XContentBuilderString("active_shards");
+ static final XContentBuilderString RELOCATING_SHARDS = new XContentBuilderString("relocating_shards");
+ static final XContentBuilderString INITIALIZING_SHARDS = new XContentBuilderString("initializing_shards");
+ static final XContentBuilderString UNASSIGNED_SHARDS = new XContentBuilderString("unassigned_shards");
+ static final XContentBuilderString VALIDATION_FAILURES = new XContentBuilderString("validation_failures");
+ static final XContentBuilderString INDICES = new XContentBuilderString("indices");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.CLUSTER_NAME, getClusterName());
+ builder.field(Fields.STATUS, getStatus().name().toLowerCase(Locale.ROOT));
+ builder.field(Fields.TIMED_OUT, isTimedOut());
+ builder.field(Fields.NUMBER_OF_NODES, getNumberOfNodes());
+ builder.field(Fields.NUMBER_OF_DATA_NODES, getNumberOfDataNodes());
+ builder.field(Fields.ACTIVE_PRIMARY_SHARDS, getActivePrimaryShards());
+ builder.field(Fields.ACTIVE_SHARDS, getActiveShards());
+ builder.field(Fields.RELOCATING_SHARDS, getRelocatingShards());
+ builder.field(Fields.INITIALIZING_SHARDS, getInitializingShards());
+ builder.field(Fields.UNASSIGNED_SHARDS, getUnassignedShards());
+
+ String level = params.param("level", "cluster");
+ boolean outputIndices = "indices".equals(level) || "shards".equals(level);
+
+
+ if (!getValidationFailures().isEmpty()) {
+ builder.startArray(Fields.VALIDATION_FAILURES);
+ for (String validationFailure : getValidationFailures()) {
+ builder.value(validationFailure);
+ }
+ // if we don't print index level information, still print the index validation failures
+ // so we know why the status is red
+ if (!outputIndices) {
+ for (ClusterIndexHealth indexHealth : indices.values()) {
+ builder.startObject(indexHealth.getIndex());
+
+ if (!indexHealth.getValidationFailures().isEmpty()) {
+ builder.startArray(Fields.VALIDATION_FAILURES);
+ for (String validationFailure : indexHealth.getValidationFailures()) {
+ builder.value(validationFailure);
+ }
+ builder.endArray();
+ }
+
+ builder.endObject();
+ }
+ }
+ builder.endArray();
+ }
+
+ if (outputIndices) {
+ builder.startObject(Fields.INDICES);
+ for (ClusterIndexHealth indexHealth : indices.values()) {
+ builder.startObject(indexHealth.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
+ indexHealth.toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java
new file mode 100644
index 0000000..50479ee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.health;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ *
+ */
+public enum ClusterHealthStatus {
+ GREEN((byte) 0),
+ YELLOW((byte) 1),
+ RED((byte) 2);
+
+ private byte value;
+
+ ClusterHealthStatus(byte value) {
+ this.value = value;
+ }
+
+ public byte value() {
+ return value;
+ }
+
+ public static ClusterHealthStatus fromValue(byte value) {
+ switch (value) {
+ case 0:
+ return GREEN;
+ case 1:
+ return YELLOW;
+ case 2:
+ return RED;
+ default:
+ throw new ElasticsearchIllegalArgumentException("No cluster health status for value [" + value + "]");
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterIndexHealth.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterIndexHealth.java
new file mode 100644
index 0000000..8dcc67e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterIndexHealth.java
@@ -0,0 +1,293 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.health;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.action.admin.cluster.health.ClusterShardHealth.readClusterShardHealth;
+
+/**
+ *
+ */
+public class ClusterIndexHealth implements Iterable<ClusterShardHealth>, Streamable, ToXContent {
+
+ private String index;
+
+ private int numberOfShards;
+
+ private int numberOfReplicas;
+
+ int activeShards = 0;
+
+ int relocatingShards = 0;
+
+ int initializingShards = 0;
+
+ int unassignedShards = 0;
+
+ int activePrimaryShards = 0;
+
+ ClusterHealthStatus status = ClusterHealthStatus.RED;
+
+ final Map<Integer, ClusterShardHealth> shards = Maps.newHashMap();
+
+ List<String> validationFailures;
+
+ private ClusterIndexHealth() {
+ }
+
+ public ClusterIndexHealth(IndexMetaData indexMetaData, IndexRoutingTable indexRoutingTable) {
+ this.index = indexMetaData.index();
+ this.numberOfShards = indexMetaData.getNumberOfShards();
+ this.numberOfReplicas = indexMetaData.getNumberOfReplicas();
+ this.validationFailures = indexRoutingTable.validate(indexMetaData);
+
+ for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) {
+ ClusterShardHealth shardHealth = new ClusterShardHealth(shardRoutingTable.shardId().id());
+ for (ShardRouting shardRouting : shardRoutingTable) {
+ if (shardRouting.active()) {
+ shardHealth.activeShards++;
+ if (shardRouting.relocating()) {
+ // the shard is relocating, the one it is relocating to will be in initializing state, so we don't count it
+ shardHealth.relocatingShards++;
+ }
+ if (shardRouting.primary()) {
+ shardHealth.primaryActive = true;
+ }
+ } else if (shardRouting.initializing()) {
+ shardHealth.initializingShards++;
+ } else if (shardRouting.unassigned()) {
+ shardHealth.unassignedShards++;
+ }
+ }
+ if (shardHealth.primaryActive) {
+ if (shardHealth.activeShards == shardRoutingTable.size()) {
+ shardHealth.status = ClusterHealthStatus.GREEN;
+ } else {
+ shardHealth.status = ClusterHealthStatus.YELLOW;
+ }
+ } else {
+ shardHealth.status = ClusterHealthStatus.RED;
+ }
+ shards.put(shardHealth.getId(), shardHealth);
+ }
+
+ // update the index status
+ status = ClusterHealthStatus.GREEN;
+
+ for (ClusterShardHealth shardHealth : shards.values()) {
+ if (shardHealth.isPrimaryActive()) {
+ activePrimaryShards++;
+ }
+ activeShards += shardHealth.activeShards;
+ relocatingShards += shardHealth.relocatingShards;
+ initializingShards += shardHealth.initializingShards;
+ unassignedShards += shardHealth.unassignedShards;
+
+ if (shardHealth.getStatus() == ClusterHealthStatus.RED) {
+ status = ClusterHealthStatus.RED;
+ } else if (shardHealth.getStatus() == ClusterHealthStatus.YELLOW && status != ClusterHealthStatus.RED) {
+ // do not override an existing red
+ status = ClusterHealthStatus.YELLOW;
+ }
+ }
+ if (!validationFailures.isEmpty()) {
+ status = ClusterHealthStatus.RED;
+ } else if (shards.isEmpty()) { // might be since none has been created yet (two phase index creation)
+ status = ClusterHealthStatus.RED;
+ }
+ }
+
+ public String getIndex() {
+ return index;
+ }
+
+ public List<String> getValidationFailures() {
+ return this.validationFailures;
+ }
+
+ public int getNumberOfShards() {
+ return numberOfShards;
+ }
+
+ public int getNumberOfReplicas() {
+ return numberOfReplicas;
+ }
+
+ public int getActiveShards() {
+ return activeShards;
+ }
+
+ public int getRelocatingShards() {
+ return relocatingShards;
+ }
+
+ public int getActivePrimaryShards() {
+ return activePrimaryShards;
+ }
+
+ public int getInitializingShards() {
+ return initializingShards;
+ }
+
+ public int getUnassignedShards() {
+ return unassignedShards;
+ }
+
+ public ClusterHealthStatus getStatus() {
+ return status;
+ }
+
+ public Map<Integer, ClusterShardHealth> getShards() {
+ return this.shards;
+ }
+
+ @Override
+ public Iterator<ClusterShardHealth> iterator() {
+ return shards.values().iterator();
+ }
+
+ public static ClusterIndexHealth readClusterIndexHealth(StreamInput in) throws IOException {
+ ClusterIndexHealth indexHealth = new ClusterIndexHealth();
+ indexHealth.readFrom(in);
+ return indexHealth;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ index = in.readString();
+ numberOfShards = in.readVInt();
+ numberOfReplicas = in.readVInt();
+ activePrimaryShards = in.readVInt();
+ activeShards = in.readVInt();
+ relocatingShards = in.readVInt();
+ initializingShards = in.readVInt();
+ unassignedShards = in.readVInt();
+ status = ClusterHealthStatus.fromValue(in.readByte());
+
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ ClusterShardHealth shardHealth = readClusterShardHealth(in);
+ shards.put(shardHealth.getId(), shardHealth);
+ }
+ size = in.readVInt();
+ if (size == 0) {
+ validationFailures = ImmutableList.of();
+ } else {
+ for (int i = 0; i < size; i++) {
+ validationFailures.add(in.readString());
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(index);
+ out.writeVInt(numberOfShards);
+ out.writeVInt(numberOfReplicas);
+ out.writeVInt(activePrimaryShards);
+ out.writeVInt(activeShards);
+ out.writeVInt(relocatingShards);
+ out.writeVInt(initializingShards);
+ out.writeVInt(unassignedShards);
+ out.writeByte(status.value());
+
+ out.writeVInt(shards.size());
+ for (ClusterShardHealth shardHealth : this) {
+ shardHealth.writeTo(out);
+ }
+
+ out.writeVInt(validationFailures.size());
+ for (String failure : validationFailures) {
+ out.writeString(failure);
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString STATUS = new XContentBuilderString("status");
+ static final XContentBuilderString NUMBER_OF_SHARDS = new XContentBuilderString("number_of_shards");
+ static final XContentBuilderString NUMBER_OF_REPLICAS = new XContentBuilderString("number_of_replicas");
+ static final XContentBuilderString ACTIVE_PRIMARY_SHARDS = new XContentBuilderString("active_primary_shards");
+ static final XContentBuilderString ACTIVE_SHARDS = new XContentBuilderString("active_shards");
+ static final XContentBuilderString RELOCATING_SHARDS = new XContentBuilderString("relocating_shards");
+ static final XContentBuilderString INITIALIZING_SHARDS = new XContentBuilderString("initializing_shards");
+ static final XContentBuilderString UNASSIGNED_SHARDS = new XContentBuilderString("unassigned_shards");
+ static final XContentBuilderString VALIDATION_FAILURES = new XContentBuilderString("validation_failures");
+ static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
+ static final XContentBuilderString PRIMARY_ACTIVE = new XContentBuilderString("primary_active");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.STATUS, getStatus().name().toLowerCase(Locale.ROOT));
+ builder.field(Fields.NUMBER_OF_SHARDS, getNumberOfShards());
+ builder.field(Fields.NUMBER_OF_REPLICAS, getNumberOfReplicas());
+ builder.field(Fields.ACTIVE_PRIMARY_SHARDS, getActivePrimaryShards());
+ builder.field(Fields.ACTIVE_SHARDS, getActiveShards());
+ builder.field(Fields.RELOCATING_SHARDS, getRelocatingShards());
+ builder.field(Fields.INITIALIZING_SHARDS, getInitializingShards());
+ builder.field(Fields.UNASSIGNED_SHARDS, getUnassignedShards());
+
+ if (!getValidationFailures().isEmpty()) {
+ builder.startArray(Fields.VALIDATION_FAILURES);
+ for (String validationFailure : getValidationFailures()) {
+ builder.value(validationFailure);
+ }
+ builder.endArray();
+ }
+
+ if ("shards".equals(params.param("level", "indices"))) {
+ builder.startObject(Fields.SHARDS);
+
+ for (ClusterShardHealth shardHealth : shards.values()) {
+ builder.startObject(Integer.toString(shardHealth.getId()));
+
+ builder.field(Fields.STATUS, shardHealth.getStatus().name().toLowerCase(Locale.ROOT));
+ builder.field(Fields.PRIMARY_ACTIVE, shardHealth.isPrimaryActive());
+ builder.field(Fields.ACTIVE_SHARDS, shardHealth.getActiveShards());
+ builder.field(Fields.RELOCATING_SHARDS, shardHealth.getRelocatingShards());
+ builder.field(Fields.INITIALIZING_SHARDS, shardHealth.getInitializingShards());
+ builder.field(Fields.UNASSIGNED_SHARDS, shardHealth.getUnassignedShards());
+
+ builder.endObject();
+ }
+
+ builder.endObject();
+ }
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterShardHealth.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterShardHealth.java
new file mode 100644
index 0000000..5625ba3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterShardHealth.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.health;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ClusterShardHealth implements Streamable {
+
+ private int shardId;
+
+ ClusterHealthStatus status = ClusterHealthStatus.RED;
+
+ int activeShards = 0;
+
+ int relocatingShards = 0;
+
+ int initializingShards = 0;
+
+ int unassignedShards = 0;
+
+ boolean primaryActive = false;
+
+ private ClusterShardHealth() {
+
+ }
+
+ ClusterShardHealth(int shardId) {
+ this.shardId = shardId;
+ }
+
+ public int getId() {
+ return shardId;
+ }
+
+ public ClusterHealthStatus getStatus() {
+ return status;
+ }
+
+ public int getRelocatingShards() {
+ return relocatingShards;
+ }
+
+ public int getActiveShards() {
+ return activeShards;
+ }
+
+ public boolean isPrimaryActive() {
+ return primaryActive;
+ }
+
+ public int getInitializingShards() {
+ return initializingShards;
+ }
+
+ public int getUnassignedShards() {
+ return unassignedShards;
+ }
+
+ static ClusterShardHealth readClusterShardHealth(StreamInput in) throws IOException {
+ ClusterShardHealth ret = new ClusterShardHealth();
+ ret.readFrom(in);
+ return ret;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ shardId = in.readVInt();
+ status = ClusterHealthStatus.fromValue(in.readByte());
+ activeShards = in.readVInt();
+ relocatingShards = in.readVInt();
+ initializingShards = in.readVInt();
+ unassignedShards = in.readVInt();
+ primaryActive = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(shardId);
+ out.writeByte(status.value());
+ out.writeVInt(activeShards);
+ out.writeVInt(relocatingShards);
+ out.writeVInt(initializingShards);
+ out.writeVInt(unassignedShards);
+ out.writeBoolean(primaryActive);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java
new file mode 100644
index 0000000..c5a6857
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.health;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class TransportClusterHealthAction extends TransportMasterNodeReadOperationAction<ClusterHealthRequest, ClusterHealthResponse> {
+
+ private final ClusterName clusterName;
+
+ @Inject
+ public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ ClusterName clusterName) {
+ super(settings, transportService, clusterService, threadPool);
+ this.clusterName = clusterName;
+ }
+
+ @Override
+ protected String executor() {
+ // we block here...
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ protected String transportAction() {
+ return ClusterHealthAction.NAME;
+ }
+
+ @Override
+ protected ClusterHealthRequest newRequest() {
+ return new ClusterHealthRequest();
+ }
+
+ @Override
+ protected ClusterHealthResponse newResponse() {
+ return new ClusterHealthResponse();
+ }
+
+ @Override
+ protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) throws ElasticsearchException {
+ long endTime = System.currentTimeMillis() + request.timeout().millis();
+
+ if (request.waitForEvents() != null) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ return currentState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+ });
+
+ try {
+ latch.await(request.timeout().millis(), TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+
+
+ int waitFor = 5;
+ if (request.waitForStatus() == null) {
+ waitFor--;
+ }
+ if (request.waitForRelocatingShards() == -1) {
+ waitFor--;
+ }
+ if (request.waitForActiveShards() == -1) {
+ waitFor--;
+ }
+ if (request.waitForNodes().isEmpty()) {
+ waitFor--;
+ }
+ if (request.indices().length == 0) { // check that they actually exists in the meta data
+ waitFor--;
+ }
+ if (waitFor == 0) {
+ // no need to wait for anything
+ ClusterState clusterState = clusterService.state();
+ listener.onResponse(clusterHealth(request, clusterState));
+ return;
+ }
+ while (true) {
+ int waitForCounter = 0;
+ ClusterState clusterState = clusterService.state();
+ ClusterHealthResponse response = clusterHealth(request, clusterState);
+ if (request.waitForStatus() != null && response.getStatus().value() <= request.waitForStatus().value()) {
+ waitForCounter++;
+ }
+ if (request.waitForRelocatingShards() != -1 && response.getRelocatingShards() <= request.waitForRelocatingShards()) {
+ waitForCounter++;
+ }
+ if (request.waitForActiveShards() != -1 && response.getActiveShards() >= request.waitForActiveShards()) {
+ waitForCounter++;
+ }
+ if (request.indices().length > 0) {
+ try {
+ clusterState.metaData().concreteIndices(request.indices());
+ waitForCounter++;
+ } catch (IndexMissingException e) {
+ response.status = ClusterHealthStatus.RED; // no indices, make sure its RED
+ // missing indices, wait a bit more...
+ }
+ }
+ if (!request.waitForNodes().isEmpty()) {
+ if (request.waitForNodes().startsWith(">=")) {
+ int expected = Integer.parseInt(request.waitForNodes().substring(2));
+ if (response.getNumberOfNodes() >= expected) {
+ waitForCounter++;
+ }
+ } else if (request.waitForNodes().startsWith("ge(")) {
+ int expected = Integer.parseInt(request.waitForNodes().substring(3, request.waitForNodes().length() - 1));
+ if (response.getNumberOfNodes() >= expected) {
+ waitForCounter++;
+ }
+ } else if (request.waitForNodes().startsWith("<=")) {
+ int expected = Integer.parseInt(request.waitForNodes().substring(2));
+ if (response.getNumberOfNodes() <= expected) {
+ waitForCounter++;
+ }
+ } else if (request.waitForNodes().startsWith("le(")) {
+ int expected = Integer.parseInt(request.waitForNodes().substring(3, request.waitForNodes().length() - 1));
+ if (response.getNumberOfNodes() <= expected) {
+ waitForCounter++;
+ }
+ } else if (request.waitForNodes().startsWith(">")) {
+ int expected = Integer.parseInt(request.waitForNodes().substring(1));
+ if (response.getNumberOfNodes() > expected) {
+ waitForCounter++;
+ }
+ } else if (request.waitForNodes().startsWith("gt(")) {
+ int expected = Integer.parseInt(request.waitForNodes().substring(3, request.waitForNodes().length() - 1));
+ if (response.getNumberOfNodes() > expected) {
+ waitForCounter++;
+ }
+ } else if (request.waitForNodes().startsWith("<")) {
+ int expected = Integer.parseInt(request.waitForNodes().substring(1));
+ if (response.getNumberOfNodes() < expected) {
+ waitForCounter++;
+ }
+ } else if (request.waitForNodes().startsWith("lt(")) {
+ int expected = Integer.parseInt(request.waitForNodes().substring(3, request.waitForNodes().length() - 1));
+ if (response.getNumberOfNodes() < expected) {
+ waitForCounter++;
+ }
+ } else {
+ int expected = Integer.parseInt(request.waitForNodes());
+ if (response.getNumberOfNodes() == expected) {
+ waitForCounter++;
+ }
+ }
+ }
+ if (waitForCounter == waitFor) {
+ listener.onResponse(response);
+ return;
+ }
+ if (System.currentTimeMillis() > endTime) {
+ response.timedOut = true;
+ listener.onResponse(response);
+ return;
+ }
+ try {
+ Thread.sleep(200);
+ } catch (InterruptedException e) {
+ response.timedOut = true;
+ listener.onResponse(response);
+ return;
+ }
+ }
+ }
+
+
+ private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, ClusterState clusterState) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Calculating health based on state version [{}]", clusterState.version());
+ }
+ String[] concreteIndices;
+ try {
+ concreteIndices = clusterState.metaData().concreteIndicesIgnoreMissing(request.indices());
+ } catch (IndexMissingException e) {
+ // one of the specified indices is not there - treat it as RED.
+ ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState);
+ response.status = ClusterHealthStatus.RED;
+ return response;
+ }
+
+ return new ClusterHealthResponse(clusterName.value(), concreteIndices, clusterState);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java
new file mode 100644
index 0000000..9485395
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.hotthreads;
+
+import org.elasticsearch.action.support.nodes.NodeOperationResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class NodeHotThreads extends NodeOperationResponse {
+
+ private String hotThreads;
+
+ NodeHotThreads() {
+ }
+
+ public NodeHotThreads(DiscoveryNode node, String hotThreads) {
+ super(node);
+ this.hotThreads = hotThreads;
+ }
+
+ public String getHotThreads() {
+ return this.hotThreads;
+ }
+
+ public static NodeHotThreads readNodeHotThreads(StreamInput in) throws IOException {
+ NodeHotThreads node = new NodeHotThreads();
+ node.readFrom(in);
+ return node;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ hotThreads = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(hotThreads);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java
new file mode 100644
index 0000000..6c496f7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.hotthreads;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class NodesHotThreadsAction extends ClusterAction<NodesHotThreadsRequest, NodesHotThreadsResponse, NodesHotThreadsRequestBuilder> {
+
+ public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction();
+ public static final String NAME = "cluster/nodes/hot_threads";
+
+ private NodesHotThreadsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public NodesHotThreadsResponse newResponse() {
+ return new NodesHotThreadsResponse();
+ }
+
+ @Override
+ public NodesHotThreadsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new NodesHotThreadsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java
new file mode 100644
index 0000000..faa0320
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.hotthreads;
+
+import org.elasticsearch.action.support.nodes.NodesOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class NodesHotThreadsRequest extends NodesOperationRequest<NodesHotThreadsRequest> {
+
+ int threads = 3;
+ String type = "cpu";
+ TimeValue interval = new TimeValue(500, TimeUnit.MILLISECONDS);
+ int snapshots = 10;
+
+ /**
+ * Get hot threads from nodes based on the nodes ids specified. If none are passed, hot
+ * threads for all nodes is used.
+ */
+ public NodesHotThreadsRequest(String... nodesIds) {
+ super(nodesIds);
+ }
+
+ public int threads() {
+ return this.threads;
+ }
+
+ public NodesHotThreadsRequest threads(int threads) {
+ this.threads = threads;
+ return this;
+ }
+
+ public NodesHotThreadsRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public NodesHotThreadsRequest interval(TimeValue interval) {
+ this.interval = interval;
+ return this;
+ }
+
+ public TimeValue interval() {
+ return this.interval;
+ }
+
+ public int snapshots() {
+ return this.snapshots;
+ }
+
+ public NodesHotThreadsRequest snapshots(int snapshots) {
+ this.snapshots = snapshots;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ threads = in.readInt();
+ type = in.readString();
+ interval = TimeValue.readTimeValue(in);
+ snapshots = in.readInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(threads);
+ out.writeString(type);
+ interval.writeTo(out);
+ out.writeInt(snapshots);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java
new file mode 100644
index 0000000..6e2065b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.hotthreads;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ */
+public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder<NodesHotThreadsRequest, NodesHotThreadsResponse, NodesHotThreadsRequestBuilder> {
+
+ public NodesHotThreadsRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new NodesHotThreadsRequest());
+ }
+
+ public NodesHotThreadsRequestBuilder setThreads(int threads) {
+ request.threads(threads);
+ return this;
+ }
+
+ public NodesHotThreadsRequestBuilder setType(String type) {
+ request.type(type);
+ return this;
+ }
+
+ public NodesHotThreadsRequestBuilder setInterval(TimeValue interval) {
+ request.interval(interval);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<NodesHotThreadsResponse> listener) {
+ ((ClusterAdminClient) client).nodesHotThreads(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java
new file mode 100644
index 0000000..2b04435
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.hotthreads;
+
+import org.elasticsearch.action.support.nodes.NodesOperationResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class NodesHotThreadsResponse extends NodesOperationResponse<NodeHotThreads> {
+
+ NodesHotThreadsResponse() {
+ }
+
+ public NodesHotThreadsResponse(ClusterName clusterName, NodeHotThreads[] nodes) {
+ super(clusterName, nodes);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodes = new NodeHotThreads[in.readVInt()];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeHotThreads.readNodeHotThreads(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(nodes.length);
+ for (NodeHotThreads node : nodes) {
+ node.writeTo(out);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java
new file mode 100644
index 0000000..184d3d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.hotthreads;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.support.nodes.NodeOperationRequest;
+import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.jvm.HotThreads;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public class TransportNodesHotThreadsAction extends TransportNodesOperationAction<NodesHotThreadsRequest, NodesHotThreadsResponse, TransportNodesHotThreadsAction.NodeRequest, NodeHotThreads> {
+
+ @Inject
+ public TransportNodesHotThreadsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ ClusterService clusterService, TransportService transportService) {
+ super(settings, clusterName, threadPool, clusterService, transportService);
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ protected String transportAction() {
+ return NodesHotThreadsAction.NAME;
+ }
+
+ @Override
+ protected NodesHotThreadsResponse newResponse(NodesHotThreadsRequest request, AtomicReferenceArray responses) {
+ final List<NodeHotThreads> nodes = Lists.newArrayList();
+ for (int i = 0; i < responses.length(); i++) {
+ Object resp = responses.get(i);
+ if (resp instanceof NodeHotThreads) {
+ nodes.add((NodeHotThreads) resp);
+ }
+ }
+ return new NodesHotThreadsResponse(clusterName, nodes.toArray(new NodeHotThreads[nodes.size()]));
+ }
+
+ @Override
+ protected NodesHotThreadsRequest newRequest() {
+ return new NodesHotThreadsRequest();
+ }
+
+ @Override
+ protected NodeRequest newNodeRequest() {
+ return new NodeRequest();
+ }
+
+ @Override
+ protected NodeRequest newNodeRequest(String nodeId, NodesHotThreadsRequest request) {
+ return new NodeRequest(nodeId, request);
+ }
+
+ @Override
+ protected NodeHotThreads newNodeResponse() {
+ return new NodeHotThreads();
+ }
+
+ @Override
+ protected NodeHotThreads nodeOperation(NodeRequest request) throws ElasticsearchException {
+ HotThreads hotThreads = new HotThreads()
+ .busiestThreads(request.request.threads)
+ .type(request.request.type)
+ .interval(request.request.interval)
+ .threadElementsSnapshotCount(request.request.snapshots);
+ try {
+ return new NodeHotThreads(clusterService.localNode(), hotThreads.detect());
+ } catch (Exception e) {
+ throw new ElasticsearchException("failed to detect hot threads", e);
+ }
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return false;
+ }
+
+ static class NodeRequest extends NodeOperationRequest {
+
+ NodesHotThreadsRequest request;
+
+ NodeRequest() {
+ }
+
+ NodeRequest(String nodeId, NodesHotThreadsRequest request) {
+ super(request, nodeId);
+ this.request = request;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ request = new NodesHotThreadsRequest();
+ request.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ request.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java
new file mode 100644
index 0000000..462f51f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java
@@ -0,0 +1,311 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.info;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Build;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.support.nodes.NodeOperationResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.HttpInfo;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.monitor.network.NetworkInfo;
+import org.elasticsearch.monitor.os.OsInfo;
+import org.elasticsearch.monitor.process.ProcessInfo;
+import org.elasticsearch.threadpool.ThreadPoolInfo;
+import org.elasticsearch.transport.TransportInfo;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Node information (static, does not change over time).
+ */
+public class NodeInfo extends NodeOperationResponse {
+
+ @Nullable
+ private ImmutableMap<String, String> serviceAttributes;
+
+ private Version version;
+ private Build build;
+
+ @Nullable
+ private Settings settings;
+
+ @Nullable
+ private OsInfo os;
+
+ @Nullable
+ private ProcessInfo process;
+
+ @Nullable
+ private JvmInfo jvm;
+
+ @Nullable
+ private ThreadPoolInfo threadPool;
+
+ @Nullable
+ private NetworkInfo network;
+
+ @Nullable
+ private TransportInfo transport;
+
+ @Nullable
+ private HttpInfo http;
+
+ @Nullable
+ private PluginsInfo plugins;
+
+ NodeInfo() {
+ }
+
+ public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable ImmutableMap<String, String> serviceAttributes, @Nullable Settings settings,
+ @Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool, @Nullable NetworkInfo network,
+ @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsInfo plugins) {
+ super(node);
+ this.version = version;
+ this.build = build;
+ this.serviceAttributes = serviceAttributes;
+ this.settings = settings;
+ this.os = os;
+ this.process = process;
+ this.jvm = jvm;
+ this.threadPool = threadPool;
+ this.network = network;
+ this.transport = transport;
+ this.http = http;
+ this.plugins = plugins;
+ }
+
+ /**
+ * System's hostname. <code>null</code> in case of UnknownHostException
+ */
+ @Nullable
+ public String getHostname() {
+ return getNode().getHostName();
+ }
+
+ /**
+ * The current ES version
+ */
+ public Version getVersion() {
+ return version;
+ }
+
+ /**
+ * The build version of the node.
+ */
+ public Build getBuild() {
+ return this.build;
+ }
+
+ /**
+ * The service attributes of the node.
+ */
+ @Nullable
+ public ImmutableMap<String, String> getServiceAttributes() {
+ return this.serviceAttributes;
+ }
+
+ /**
+ * The settings of the node.
+ */
+ @Nullable
+ public Settings getSettings() {
+ return this.settings;
+ }
+
+ /**
+ * Operating System level information.
+ */
+ @Nullable
+ public OsInfo getOs() {
+ return this.os;
+ }
+
+ /**
+ * Process level information.
+ */
+ @Nullable
+ public ProcessInfo getProcess() {
+ return process;
+ }
+
+ /**
+ * JVM level information.
+ */
+ @Nullable
+ public JvmInfo getJvm() {
+ return jvm;
+ }
+
+ @Nullable
+ public ThreadPoolInfo getThreadPool() {
+ return this.threadPool;
+ }
+
+ /**
+ * Network level information.
+ */
+ @Nullable
+ public NetworkInfo getNetwork() {
+ return network;
+ }
+
+ @Nullable
+ public TransportInfo getTransport() {
+ return transport;
+ }
+
+ @Nullable
+ public HttpInfo getHttp() {
+ return http;
+ }
+
+ @Nullable
+ public PluginsInfo getPlugins() {
+ return this.plugins;
+ }
+
+ public static NodeInfo readNodeInfo(StreamInput in) throws IOException {
+ NodeInfo nodeInfo = new NodeInfo();
+ nodeInfo.readFrom(in);
+ return nodeInfo;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ version = Version.readVersion(in);
+ build = Build.readBuild(in);
+ if (in.readBoolean()) {
+ ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ builder.put(in.readString(), in.readString());
+ }
+ serviceAttributes = builder.build();
+ }
+ if (in.readBoolean()) {
+ settings = ImmutableSettings.readSettingsFromStream(in);
+ }
+ if (in.readBoolean()) {
+ os = OsInfo.readOsInfo(in);
+ }
+ if (in.readBoolean()) {
+ process = ProcessInfo.readProcessInfo(in);
+ }
+ if (in.readBoolean()) {
+ jvm = JvmInfo.readJvmInfo(in);
+ }
+ if (in.readBoolean()) {
+ threadPool = ThreadPoolInfo.readThreadPoolInfo(in);
+ }
+ if (in.readBoolean()) {
+ network = NetworkInfo.readNetworkInfo(in);
+ }
+ if (in.readBoolean()) {
+ transport = TransportInfo.readTransportInfo(in);
+ }
+ if (in.readBoolean()) {
+ http = HttpInfo.readHttpInfo(in);
+ }
+ if (in.readBoolean()) {
+ plugins = PluginsInfo.readPluginsInfo(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(version.id);
+ Build.writeBuild(build, out);
+ if (getServiceAttributes() == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(serviceAttributes.size());
+ for (Map.Entry<String, String> entry : serviceAttributes.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeString(entry.getValue());
+ }
+ }
+ if (settings == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ ImmutableSettings.writeSettingsToStream(settings, out);
+ }
+ if (os == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ os.writeTo(out);
+ }
+ if (process == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ process.writeTo(out);
+ }
+ if (jvm == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ jvm.writeTo(out);
+ }
+ if (threadPool == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ threadPool.writeTo(out);
+ }
+ if (network == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ network.writeTo(out);
+ }
+ if (transport == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ transport.writeTo(out);
+ }
+ if (http == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ http.writeTo(out);
+ }
+ if (plugins == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ plugins.writeTo(out);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java
new file mode 100644
index 0000000..8c187fe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.info;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class NodesInfoAction extends ClusterAction<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
+
+ public static final NodesInfoAction INSTANCE = new NodesInfoAction();
+ public static final String NAME = "cluster/nodes/info";
+
+ private NodesInfoAction() {
+ super(NAME);
+ }
+
+ @Override
+ public NodesInfoResponse newResponse() {
+ return new NodesInfoResponse();
+ }
+
+ @Override
+ public NodesInfoRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new NodesInfoRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
new file mode 100644
index 0000000..b9ab8d3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.info;
+
+import org.elasticsearch.action.support.nodes.NodesOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A request to get node (cluster) level information.
+ */
+public class NodesInfoRequest extends NodesOperationRequest<NodesInfoRequest> {
+
+ private boolean settings = true;
+ private boolean os = true;
+ private boolean process = true;
+ private boolean jvm = true;
+ private boolean threadPool = true;
+ private boolean network = true;
+ private boolean transport = true;
+ private boolean http = true;
+ private boolean plugin = true;
+
+ public NodesInfoRequest() {
+ }
+
+ /**
+ * Get information from nodes based on the nodes ids specified. If none are passed, information
+ * for all nodes will be returned.
+ */
+ public NodesInfoRequest(String... nodesIds) {
+ super(nodesIds);
+ }
+
+ /**
+ * Clears all info flags.
+ */
+ public NodesInfoRequest clear() {
+ settings = false;
+ os = false;
+ process = false;
+ jvm = false;
+ threadPool = false;
+ network = false;
+ transport = false;
+ http = false;
+ plugin = false;
+ return this;
+ }
+
+ /**
+ * Sets to return all the data.
+ */
+ public NodesInfoRequest all() {
+ settings = true;
+ os = true;
+ process = true;
+ jvm = true;
+ threadPool = true;
+ network = true;
+ transport = true;
+ http = true;
+ plugin = true;
+ return this;
+ }
+
+ /**
+ * Should the node settings be returned.
+ */
+ public boolean settings() {
+ return this.settings;
+ }
+
+ /**
+ * Should the node settings be returned.
+ */
+ public NodesInfoRequest settings(boolean settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ /**
+ * Should the node OS be returned.
+ */
+ public boolean os() {
+ return this.os;
+ }
+
+ /**
+ * Should the node OS be returned.
+ */
+ public NodesInfoRequest os(boolean os) {
+ this.os = os;
+ return this;
+ }
+
+ /**
+ * Should the node Process be returned.
+ */
+ public boolean process() {
+ return this.process;
+ }
+
+ /**
+ * Should the node Process be returned.
+ */
+ public NodesInfoRequest process(boolean process) {
+ this.process = process;
+ return this;
+ }
+
+ /**
+ * Should the node JVM be returned.
+ */
+ public boolean jvm() {
+ return this.jvm;
+ }
+
+ /**
+ * Should the node JVM be returned.
+ */
+ public NodesInfoRequest jvm(boolean jvm) {
+ this.jvm = jvm;
+ return this;
+ }
+
+ /**
+ * Should the node Thread Pool info be returned.
+ */
+ public boolean threadPool() {
+ return this.threadPool;
+ }
+
+ /**
+ * Should the node Thread Pool info be returned.
+ */
+ public NodesInfoRequest threadPool(boolean threadPool) {
+ this.threadPool = threadPool;
+ return this;
+ }
+
+ /**
+ * Should the node Network be returned.
+ */
+ public boolean network() {
+ return this.network;
+ }
+
+ /**
+ * Should the node Network be returned.
+ */
+ public NodesInfoRequest network(boolean network) {
+ this.network = network;
+ return this;
+ }
+
+ /**
+ * Should the node Transport be returned.
+ */
+ public boolean transport() {
+ return this.transport;
+ }
+
+ /**
+ * Should the node Transport be returned.
+ */
+ public NodesInfoRequest transport(boolean transport) {
+ this.transport = transport;
+ return this;
+ }
+
+ /**
+ * Should the node HTTP be returned.
+ */
+ public boolean http() {
+ return this.http;
+ }
+
+ /**
+ * Should the node HTTP be returned.
+ */
+ public NodesInfoRequest http(boolean http) {
+ this.http = http;
+ return this;
+ }
+
+ /**
+ * Should information about plugins be returned
+ * @param plugin true if you want info
+ * @return The request
+ */
+ public NodesInfoRequest plugin(boolean plugin) {
+ this.plugin = plugin;
+ return this;
+ }
+
+ /**
+ * @return true if information about plugins is requested
+ */
+ public boolean plugin() {
+ return plugin;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ settings = in.readBoolean();
+ os = in.readBoolean();
+ process = in.readBoolean();
+ jvm = in.readBoolean();
+ threadPool = in.readBoolean();
+ network = in.readBoolean();
+ transport = in.readBoolean();
+ http = in.readBoolean();
+ plugin = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(settings);
+ out.writeBoolean(os);
+ out.writeBoolean(process);
+ out.writeBoolean(jvm);
+ out.writeBoolean(threadPool);
+ out.writeBoolean(network);
+ out.writeBoolean(transport);
+ out.writeBoolean(http);
+ out.writeBoolean(plugin);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java
new file mode 100644
index 0000000..f119522
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.info;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ *
+ */
+public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
+
+ public NodesInfoRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new NodesInfoRequest());
+ }
+
+ /**
+ * Clears all info flags.
+ */
+ public NodesInfoRequestBuilder clear() {
+ request.clear();
+ return this;
+ }
+
+ /**
+ * Sets to reutrn all the data.
+ */
+ public NodesInfoRequestBuilder all() {
+ request.all();
+ return this;
+ }
+
+ /**
+ * Should the node settings be returned.
+ */
+ public NodesInfoRequestBuilder setSettings(boolean settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Should the node OS info be returned.
+ */
+ public NodesInfoRequestBuilder setOs(boolean os) {
+ request.os(os);
+ return this;
+ }
+
+ /**
+ * Should the node OS process be returned.
+ */
+ public NodesInfoRequestBuilder setProcess(boolean process) {
+ request.process(process);
+ return this;
+ }
+
+ /**
+ * Should the node JVM info be returned.
+ */
+ public NodesInfoRequestBuilder setJvm(boolean jvm) {
+ request.jvm(jvm);
+ return this;
+ }
+
+ /**
+ * Should the node thread pool info be returned.
+ */
+ public NodesInfoRequestBuilder setThreadPool(boolean threadPool) {
+ request.threadPool(threadPool);
+ return this;
+ }
+
+ /**
+ * Should the node Network info be returned.
+ */
+ public NodesInfoRequestBuilder setNetwork(boolean network) {
+ request.network(network);
+ return this;
+ }
+
+ /**
+ * Should the node Transport info be returned.
+ */
+ public NodesInfoRequestBuilder setTransport(boolean transport) {
+ request.transport(transport);
+ return this;
+ }
+
+ /**
+ * Should the node HTTP info be returned.
+ */
+ public NodesInfoRequestBuilder setHttp(boolean http) {
+ request.http(http);
+ return this;
+ }
+
+ public NodesInfoRequestBuilder setPlugin(boolean plugin) {
+ request().plugin(plugin);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<NodesInfoResponse> listener) {
+ ((ClusterAdminClient) client).nodesInfo(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java
new file mode 100644
index 0000000..0eb8ac2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.info;
+
+import org.elasticsearch.action.support.nodes.NodesOperationResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class NodesInfoResponse extends NodesOperationResponse<NodeInfo> implements ToXContent {
+
+ private SettingsFilter settingsFilter;
+
+ public NodesInfoResponse() {
+ }
+
+ public NodesInfoResponse(ClusterName clusterName, NodeInfo[] nodes) {
+ super(clusterName, nodes);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodes = new NodeInfo[in.readVInt()];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeInfo.readNodeInfo(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(nodes.length);
+ for (NodeInfo node : nodes) {
+ node.writeTo(out);
+ }
+ }
+
+ public NodesInfoResponse settingsFilter(SettingsFilter settingsFilter) {
+ this.settingsFilter = settingsFilter;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("cluster_name", getClusterName().value(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.startObject("nodes");
+ for (NodeInfo nodeInfo : this) {
+ builder.startObject(nodeInfo.getNode().id(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.field("name", nodeInfo.getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("transport_address", nodeInfo.getNode().address().toString());
+ builder.field("host", nodeInfo.getNode().getHostName(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("ip", nodeInfo.getNode().getHostAddress(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.field("version", nodeInfo.getVersion());
+ builder.field("build", nodeInfo.getBuild().hashShort());
+
+ if (nodeInfo.getServiceAttributes() != null) {
+ for (Map.Entry<String, String> nodeAttribute : nodeInfo.getServiceAttributes().entrySet()) {
+ builder.field(nodeAttribute.getKey(), nodeAttribute.getValue(), XContentBuilder.FieldCaseConversion.NONE);
+ }
+ }
+
+ if (!nodeInfo.getNode().attributes().isEmpty()) {
+ builder.startObject("attributes");
+ for (Map.Entry<String, String> attr : nodeInfo.getNode().attributes().entrySet()) {
+ builder.field(attr.getKey(), attr.getValue(), XContentBuilder.FieldCaseConversion.NONE);
+ }
+ builder.endObject();
+ }
+
+
+ if (nodeInfo.getSettings() != null) {
+ builder.startObject("settings");
+ Settings settings = settingsFilter != null ? settingsFilter.filterSettings(nodeInfo.getSettings()) : nodeInfo.getSettings();
+ settings.toXContent(builder, params);
+ builder.endObject();
+ }
+
+ if (nodeInfo.getOs() != null) {
+ nodeInfo.getOs().toXContent(builder, params);
+ }
+ if (nodeInfo.getProcess() != null) {
+ nodeInfo.getProcess().toXContent(builder, params);
+ }
+ if (nodeInfo.getJvm() != null) {
+ nodeInfo.getJvm().toXContent(builder, params);
+ }
+ if (nodeInfo.getThreadPool() != null) {
+ nodeInfo.getThreadPool().toXContent(builder, params);
+ }
+ if (nodeInfo.getNetwork() != null) {
+ nodeInfo.getNetwork().toXContent(builder, params);
+ }
+ if (nodeInfo.getTransport() != null) {
+ nodeInfo.getTransport().toXContent(builder, params);
+ }
+ if (nodeInfo.getHttp() != null) {
+ nodeInfo.getHttp().toXContent(builder, params);
+ }
+ if (nodeInfo.getPlugins() != null) {
+ nodeInfo.getPlugins().toXContent(builder, params);
+ }
+
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+ builder.startObject();
+ toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ } catch (IOException e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginInfo.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginInfo.java
new file mode 100644
index 0000000..b1b2fc2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginInfo.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.cluster.node.info;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+public class PluginInfo implements Streamable, Serializable, ToXContent {
+ public static final String DESCRIPTION_NOT_AVAILABLE = "No description found.";
+ public static final String VERSION_NOT_AVAILABLE = "NA";
+
+ static final class Fields {
+ static final XContentBuilderString NAME = new XContentBuilderString("name");
+ static final XContentBuilderString DESCRIPTION = new XContentBuilderString("description");
+ static final XContentBuilderString URL = new XContentBuilderString("url");
+ static final XContentBuilderString JVM = new XContentBuilderString("jvm");
+ static final XContentBuilderString SITE = new XContentBuilderString("site");
+ static final XContentBuilderString VERSION = new XContentBuilderString("version");
+ }
+
+ private String name;
+ private String description;
+ private boolean site;
+ private boolean jvm;
+ private String version;
+
+ public PluginInfo() {
+ }
+
+ /**
+ * Information about plugins
+ *
+ * @param name Its name
+ * @param description Its description
+ * @param site true if it's a site plugin
+ * @param jvm true if it's a jvm plugin
+ * @param version Version number is applicable (NA otherwise)
+ */
+ public PluginInfo(String name, String description, boolean site, boolean jvm, String version) {
+ this.name = name;
+ this.description = description;
+ this.site = site;
+ this.jvm = jvm;
+ if (Strings.hasText(version)) {
+ this.version = version;
+ } else {
+ this.version = VERSION_NOT_AVAILABLE;
+ }
+ }
+
+ /**
+ * @return Plugin's name
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return Plugin's description if any
+ */
+ public String getDescription() {
+ return description;
+ }
+
+ /**
+ * @return true if it's a site plugin
+ */
+ public boolean isSite() {
+ return site;
+ }
+
+ /**
+ * @return true if it's a plugin running in the jvm
+ */
+ public boolean isJvm() {
+ return jvm;
+ }
+
+ /**
+ * We compute the URL for sites: "/_plugin/" + name + "/"
+ *
+ * @return relative URL for site plugin
+ */
+ public String getUrl() {
+ if (site) {
+ return ("/_plugin/" + name + "/");
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * @return Version number for the plugin
+ */
+ public String getVersion() {
+ return version;
+ }
+
+ public static PluginInfo readPluginInfo(StreamInput in) throws IOException {
+ PluginInfo info = new PluginInfo();
+ info.readFrom(in);
+ return info;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ this.name = in.readString();
+ this.description = in.readString();
+ this.site = in.readBoolean();
+ this.jvm = in.readBoolean();
+ if (in.getVersion().onOrAfter(Version.V_1_0_0_RC2)) {
+ this.version = in.readString();
+ } else {
+ this.version = VERSION_NOT_AVAILABLE;
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeString(description);
+ out.writeBoolean(site);
+ out.writeBoolean(jvm);
+ if (out.getVersion().onOrAfter(Version.V_1_0_0_RC2)) {
+ out.writeString(version);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(Fields.NAME, name);
+ builder.field(Fields.VERSION, version);
+ builder.field(Fields.DESCRIPTION, description);
+ if (site) {
+ builder.field(Fields.URL, getUrl());
+ }
+ builder.field(Fields.JVM, jvm);
+ builder.field(Fields.SITE, site);
+ builder.endObject();
+
+ return builder;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ PluginInfo that = (PluginInfo) o;
+
+ if (!name.equals(that.name)) return false;
+ if (version != null ? !version.equals(that.version) : that.version != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return name.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ final StringBuffer sb = new StringBuffer("PluginInfo{");
+ sb.append("name='").append(name).append('\'');
+ sb.append(", description='").append(description).append('\'');
+ sb.append(", site=").append(site);
+ sb.append(", jvm=").append(jvm);
+ sb.append(", version='").append(version).append('\'');
+ sb.append('}');
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsInfo.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsInfo.java
new file mode 100644
index 0000000..c83e94a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsInfo.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.info;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+public class PluginsInfo implements Streamable, Serializable, ToXContent {
+ static final class Fields {
+ static final XContentBuilderString PLUGINS = new XContentBuilderString("plugins");
+ }
+
+ private List<PluginInfo> infos;
+
+ public PluginsInfo() {
+ infos = new ArrayList<PluginInfo>();
+ }
+
+ public PluginsInfo(int size) {
+ infos = new ArrayList<PluginInfo>(size);
+ }
+
+ public List<PluginInfo> getInfos() {
+ return infos;
+ }
+
+ public void add(PluginInfo info) {
+ infos.add(info);
+ }
+
+ public static PluginsInfo readPluginsInfo(StreamInput in) throws IOException {
+ PluginsInfo infos = new PluginsInfo();
+ infos.readFrom(in);
+ return infos;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int plugins_size = in.readInt();
+ for (int i = 0; i < plugins_size; i++) {
+ infos.add(PluginInfo.readPluginInfo(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeInt(infos.size());
+ for (PluginInfo plugin : infos) {
+ plugin.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray(Fields.PLUGINS);
+ for (PluginInfo pluginInfo : infos) {
+ pluginInfo.toXContent(builder, params);
+ }
+ builder.endArray();
+
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java
new file mode 100644
index 0000000..735ea41
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.info;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.support.nodes.NodeOperationRequest;
+import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public class TransportNodesInfoAction extends TransportNodesOperationAction<NodesInfoRequest, NodesInfoResponse, TransportNodesInfoAction.NodeInfoRequest, NodeInfo> {
+
+ private final NodeService nodeService;
+
+ @Inject
+ public TransportNodesInfoAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ ClusterService clusterService, TransportService transportService,
+ NodeService nodeService) {
+ super(settings, clusterName, threadPool, clusterService, transportService);
+ this.nodeService = nodeService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return NodesInfoAction.NAME;
+ }
+
+ @Override
+ protected NodesInfoResponse newResponse(NodesInfoRequest nodesInfoRequest, AtomicReferenceArray responses) {
+ final List<NodeInfo> nodesInfos = new ArrayList<NodeInfo>();
+ for (int i = 0; i < responses.length(); i++) {
+ Object resp = responses.get(i);
+ if (resp instanceof NodeInfo) {
+ nodesInfos.add((NodeInfo) resp);
+ }
+ }
+ return new NodesInfoResponse(clusterName, nodesInfos.toArray(new NodeInfo[nodesInfos.size()]));
+ }
+
+ @Override
+ protected NodesInfoRequest newRequest() {
+ return new NodesInfoRequest();
+ }
+
+ @Override
+ protected NodeInfoRequest newNodeRequest() {
+ return new NodeInfoRequest();
+ }
+
+ @Override
+ protected NodeInfoRequest newNodeRequest(String nodeId, NodesInfoRequest request) {
+ return new NodeInfoRequest(nodeId, request);
+ }
+
+ @Override
+ protected NodeInfo newNodeResponse() {
+ return new NodeInfo();
+ }
+
+ @Override
+ protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) throws ElasticsearchException {
+ NodesInfoRequest request = nodeRequest.request;
+ return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
+ request.network(), request.transport(), request.http(), request.plugin());
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return false;
+ }
+
+ static class NodeInfoRequest extends NodeOperationRequest {
+
+ NodesInfoRequest request;
+
+ NodeInfoRequest() {
+ }
+
+ NodeInfoRequest(String nodeId, NodesInfoRequest request) {
+ super(request, nodeId);
+ this.request = request;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ request = new NodesInfoRequest();
+ request.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ request.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartAction.java
new file mode 100644
index 0000000..9846fcb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.restart;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class NodesRestartAction extends ClusterAction<NodesRestartRequest, NodesRestartResponse, NodesRestartRequestBuilder> {
+
+ public static final NodesRestartAction INSTANCE = new NodesRestartAction();
+ public static final String NAME = "cluster/nodes/restart";
+
+ private NodesRestartAction() {
+ super(NAME);
+ }
+
+ @Override
+ public NodesRestartResponse newResponse() {
+ return new NodesRestartResponse();
+ }
+
+ @Override
+ public NodesRestartRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new NodesRestartRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartRequest.java
new file mode 100644
index 0000000..f72e404
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartRequest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.restart;
+
+import org.elasticsearch.action.support.nodes.NodesOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
+
+/**
+ * A request to restart one ore more nodes (or the whole cluster).
+ */
+public class NodesRestartRequest extends NodesOperationRequest<NodesRestartRequest> {
+
+ TimeValue delay = TimeValue.timeValueSeconds(1);
+
+ protected NodesRestartRequest() {
+ }
+
+ /**
+ * Restarts down nodes based on the nodes ids specified. If none are passed, <b>all</b>
+ * nodes will be shutdown.
+ */
+ public NodesRestartRequest(String... nodesIds) {
+ super(nodesIds);
+ }
+
+ /**
+ * The delay for the restart to occur. Defaults to <tt>1s</tt>.
+ */
+ public NodesRestartRequest delay(TimeValue delay) {
+ this.delay = delay;
+ return this;
+ }
+
+ /**
+ * The delay for the restart to occur. Defaults to <tt>1s</tt>.
+ */
+ public NodesRestartRequest delay(String delay) {
+ return delay(TimeValue.parseTimeValue(delay, null));
+ }
+
+ public TimeValue delay() {
+ return this.delay;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ delay = readTimeValue(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ delay.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartRequestBuilder.java
new file mode 100644
index 0000000..d51691d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartRequestBuilder.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.restart;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class NodesRestartRequestBuilder extends NodesOperationRequestBuilder<NodesRestartRequest, NodesRestartResponse, NodesRestartRequestBuilder> {
+
+ public NodesRestartRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new NodesRestartRequest());
+ }
+
+ /**
+ * The delay for the restart to occur. Defaults to <tt>1s</tt>.
+ */
+ public NodesRestartRequestBuilder setDelay(TimeValue delay) {
+ request.delay(delay);
+ return this;
+ }
+
+ /**
+ * The delay for the restart to occur. Defaults to <tt>1s</tt>.
+ */
+ public NodesRestartRequestBuilder setDelay(String delay) {
+ request.delay(delay);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<NodesRestartResponse> listener) {
+ ((ClusterAdminClient) client).nodesRestart(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartResponse.java
new file mode 100644
index 0000000..27d8686
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/NodesRestartResponse.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.restart;
+
+import org.elasticsearch.action.support.nodes.NodeOperationResponse;
+import org.elasticsearch.action.support.nodes.NodesOperationResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NodesRestartResponse extends NodesOperationResponse<NodesRestartResponse.NodeRestartResponse> {
+
+ NodesRestartResponse() {
+ }
+
+ public NodesRestartResponse(ClusterName clusterName, NodeRestartResponse[] nodes) {
+ super(clusterName, nodes);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodes = new NodeRestartResponse[in.readVInt()];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeRestartResponse.readNodeRestartResponse(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(nodes.length);
+ for (NodeRestartResponse node : nodes) {
+ node.writeTo(out);
+ }
+ }
+
+ public static class NodeRestartResponse extends NodeOperationResponse {
+
+ NodeRestartResponse() {
+ }
+
+ public NodeRestartResponse(DiscoveryNode node) {
+ super(node);
+ }
+
+ public static NodeRestartResponse readNodeRestartResponse(StreamInput in) throws IOException {
+ NodeRestartResponse res = new NodeRestartResponse();
+ res.readFrom(in);
+ return res;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/TransportNodesRestartAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/TransportNodesRestartAction.java
new file mode 100644
index 0000000..1572abf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/restart/TransportNodesRestartAction.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.restart;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.nodes.NodeOperationRequest;
+import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
+
+/**
+ *
+ */
+public class TransportNodesRestartAction extends TransportNodesOperationAction<NodesRestartRequest, NodesRestartResponse, TransportNodesRestartAction.NodeRestartRequest, NodesRestartResponse.NodeRestartResponse> {
+
+ private final Node node;
+
+ private final boolean disabled;
+
+ private AtomicBoolean restartRequested = new AtomicBoolean();
+
+ @Inject
+ public TransportNodesRestartAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ ClusterService clusterService, TransportService transportService,
+ Node node) {
+ super(settings, clusterName, threadPool, clusterService, transportService);
+ this.node = node;
+ disabled = componentSettings.getAsBoolean("disabled", false);
+ }
+
+ @Override
+ protected void doExecute(NodesRestartRequest nodesRestartRequest, ActionListener<NodesRestartResponse> listener) {
+ listener.onFailure(new ElasticsearchIllegalStateException("restart is disabled (for now) ...."));
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ protected String transportAction() {
+ return NodesRestartAction.NAME;
+ }
+
+ @Override
+ protected NodesRestartResponse newResponse(NodesRestartRequest nodesShutdownRequest, AtomicReferenceArray responses) {
+ final List<NodesRestartResponse.NodeRestartResponse> nodeRestartResponses = newArrayList();
+ for (int i = 0; i < responses.length(); i++) {
+ Object resp = responses.get(i);
+ if (resp instanceof NodesRestartResponse.NodeRestartResponse) {
+ nodeRestartResponses.add((NodesRestartResponse.NodeRestartResponse) resp);
+ }
+ }
+ return new NodesRestartResponse(clusterName, nodeRestartResponses.toArray(new NodesRestartResponse.NodeRestartResponse[nodeRestartResponses.size()]));
+ }
+
+ @Override
+ protected NodesRestartRequest newRequest() {
+ return new NodesRestartRequest();
+ }
+
+ @Override
+ protected NodeRestartRequest newNodeRequest() {
+ return new NodeRestartRequest();
+ }
+
+ @Override
+ protected NodeRestartRequest newNodeRequest(String nodeId, NodesRestartRequest request) {
+ return new NodeRestartRequest(nodeId, request);
+ }
+
+ @Override
+ protected NodesRestartResponse.NodeRestartResponse newNodeResponse() {
+ return new NodesRestartResponse.NodeRestartResponse();
+ }
+
+ @Override
+ protected NodesRestartResponse.NodeRestartResponse nodeOperation(NodeRestartRequest request) throws ElasticsearchException {
+ if (disabled) {
+ throw new ElasticsearchIllegalStateException("Restart is disabled");
+ }
+ if (!restartRequested.compareAndSet(false, true)) {
+ return new NodesRestartResponse.NodeRestartResponse(clusterService.localNode());
+ }
+ logger.info("Restarting in [{}]", request.delay);
+ threadPool.schedule(request.delay, ThreadPool.Names.GENERIC, new Runnable() {
+ @Override
+ public void run() {
+ boolean restartWithWrapper = false;
+ if (System.getProperty("elasticsearch-service") != null) {
+ try {
+ Class wrapperManager = settings.getClassLoader().loadClass("org.tanukisoftware.wrapper.WrapperManager");
+ logger.info("Initiating requested restart (using service)");
+ wrapperManager.getMethod("restartAndReturn").invoke(null);
+ restartWithWrapper = true;
+ } catch (Throwable e) {
+ logger.error("failed to initial restart on service wrapper", e);
+ }
+ }
+ if (!restartWithWrapper) {
+ logger.info("Initiating requested restart");
+ try {
+ node.stop();
+ node.start();
+ } catch (Exception e) {
+ logger.warn("Failed to restart", e);
+ } finally {
+ restartRequested.set(false);
+ }
+ }
+ }
+ });
+ return new NodesRestartResponse.NodeRestartResponse(clusterService.localNode());
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return false;
+ }
+
+ protected static class NodeRestartRequest extends NodeOperationRequest {
+
+ TimeValue delay;
+
+ private NodeRestartRequest() {
+ }
+
+ private NodeRestartRequest(String nodeId, NodesRestartRequest request) {
+ super(request, nodeId);
+ this.delay = request.delay;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ delay = readTimeValue(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ delay.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java
new file mode 100644
index 0000000..89bce5e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.shutdown;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class NodesShutdownAction extends ClusterAction<NodesShutdownRequest, NodesShutdownResponse, NodesShutdownRequestBuilder> {
+
+ public static final NodesShutdownAction INSTANCE = new NodesShutdownAction();
+ public static final String NAME = "cluster/nodes/shutdown";
+
+ private NodesShutdownAction() {
+ super(NAME);
+ }
+
+ @Override
+ public NodesShutdownResponse newResponse() {
+ return new NodesShutdownResponse();
+ }
+
+ @Override
+ public NodesShutdownRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new NodesShutdownRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequest.java
new file mode 100644
index 0000000..aeaf575
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequest.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.shutdown;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
+
+/**
+ *
+ */
+public class NodesShutdownRequest extends MasterNodeOperationRequest<NodesShutdownRequest> {
+
+ String[] nodesIds = Strings.EMPTY_ARRAY;
+
+ TimeValue delay = TimeValue.timeValueSeconds(1);
+
+ boolean exit = true;
+
+ NodesShutdownRequest() {
+ }
+
+ public NodesShutdownRequest(String... nodesIds) {
+ this.nodesIds = nodesIds;
+ }
+
+ public NodesShutdownRequest nodesIds(String... nodesIds) {
+ this.nodesIds = nodesIds;
+ return this;
+ }
+
+ /**
+ * The delay for the shutdown to occur. Defaults to <tt>1s</tt>.
+ */
+ public NodesShutdownRequest delay(TimeValue delay) {
+ this.delay = delay;
+ return this;
+ }
+
+ public TimeValue delay() {
+ return this.delay;
+ }
+
+ /**
+ * The delay for the shutdown to occur. Defaults to <tt>1s</tt>.
+ */
+ public NodesShutdownRequest delay(String delay) {
+ return delay(TimeValue.parseTimeValue(delay, null));
+ }
+
+ /**
+ * Should the JVM be exited as well or not. Defaults to <tt>true</tt>.
+ */
+ public NodesShutdownRequest exit(boolean exit) {
+ this.exit = exit;
+ return this;
+ }
+
+ /**
+ * Should the JVM be exited as well or not. Defaults to <tt>true</tt>.
+ */
+ public boolean exit() {
+ return exit;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ delay = readTimeValue(in);
+ nodesIds = in.readStringArray();
+ exit = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ delay.writeTo(out);
+ out.writeStringArrayNullable(nodesIds);
+ out.writeBoolean(exit);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java
new file mode 100644
index 0000000..5967d57
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.shutdown;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class NodesShutdownRequestBuilder extends MasterNodeOperationRequestBuilder<NodesShutdownRequest, NodesShutdownResponse, NodesShutdownRequestBuilder> {
+
+ public NodesShutdownRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new NodesShutdownRequest());
+ }
+
+ /**
+ * The nodes ids to restart.
+ */
+ public NodesShutdownRequestBuilder setNodesIds(String... nodesIds) {
+ request.nodesIds(nodesIds);
+ return this;
+ }
+
+ /**
+ * The delay for the restart to occur. Defaults to <tt>1s</tt>.
+ */
+ public NodesShutdownRequestBuilder setDelay(TimeValue delay) {
+ request.delay(delay);
+ return this;
+ }
+
+ /**
+ * The delay for the restart to occur. Defaults to <tt>1s</tt>.
+ */
+ public NodesShutdownRequestBuilder setDelay(String delay) {
+ request.delay(delay);
+ return this;
+ }
+
+ /**
+ * Should the JVM be exited as well or not. Defaults to <tt>true</tt>.
+ */
+ public NodesShutdownRequestBuilder setExit(boolean exit) {
+ request.exit(exit);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<NodesShutdownResponse> listener) {
+ ((ClusterAdminClient) client).nodesShutdown(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java
new file mode 100644
index 0000000..7375038
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.shutdown;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NodesShutdownResponse extends ActionResponse {
+
+ private ClusterName clusterName;
+ private DiscoveryNode[] nodes;
+
+ NodesShutdownResponse() {
+ }
+
+ public NodesShutdownResponse(ClusterName clusterName, DiscoveryNode[] nodes) {
+ this.clusterName = clusterName;
+ this.nodes = nodes;
+ }
+
+ public ClusterName getClusterName() {
+ return this.clusterName;
+ }
+
+ public DiscoveryNode[] getNodes() {
+ return this.nodes;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ clusterName = ClusterName.readClusterName(in);
+ nodes = new DiscoveryNode[in.readVInt()];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = DiscoveryNode.readNode(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ clusterName.writeTo(out);
+ out.writeVInt(nodes.length);
+ for (DiscoveryNode node : nodes) {
+ node.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java
new file mode 100644
index 0000000..5d12cb7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java
@@ -0,0 +1,317 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.shutdown;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ *
+ */
+public class TransportNodesShutdownAction extends TransportMasterNodeOperationAction<NodesShutdownRequest, NodesShutdownResponse> {
+
+ private final Node node;
+ private final ClusterName clusterName;
+ private final boolean disabled;
+ private final TimeValue delay;
+
+ @Inject
+ public TransportNodesShutdownAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ Node node, ClusterName clusterName) {
+ super(settings, transportService, clusterService, threadPool);
+ this.node = node;
+ this.clusterName = clusterName;
+ this.disabled = settings.getAsBoolean("action.disable_shutdown", componentSettings.getAsBoolean("disabled", false));
+ this.delay = componentSettings.getAsTime("delay", TimeValue.timeValueMillis(200));
+
+ this.transportService.registerHandler(NodeShutdownRequestHandler.ACTION, new NodeShutdownRequestHandler());
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ protected String transportAction() {
+ return NodesShutdownAction.NAME;
+ }
+
+ @Override
+ protected NodesShutdownRequest newRequest() {
+ return new NodesShutdownRequest();
+ }
+
+ @Override
+ protected NodesShutdownResponse newResponse() {
+ return new NodesShutdownResponse();
+ }
+
+ @Override
+ protected void processBeforeDelegationToMaster(NodesShutdownRequest request, ClusterState state) {
+ String[] nodesIds = request.nodesIds;
+ if (nodesIds != null) {
+ for (int i = 0; i < nodesIds.length; i++) {
+ // replace the _local one, since it looses its meaning when going over to the master...
+ if ("_local".equals(nodesIds[i])) {
+ nodesIds[i] = state.nodes().localNodeId();
+ }
+ }
+ }
+ }
+
+ @Override
+ protected void masterOperation(final NodesShutdownRequest request, final ClusterState state, final ActionListener<NodesShutdownResponse> listener) throws ElasticsearchException {
+ if (disabled) {
+ throw new ElasticsearchIllegalStateException("Shutdown is disabled");
+ }
+ final ObjectOpenHashSet<DiscoveryNode> nodes = new ObjectOpenHashSet<DiscoveryNode>();
+ if (state.nodes().isAllNodes(request.nodesIds)) {
+ logger.info("[cluster_shutdown]: requested, shutting down in [{}]", request.delay);
+ nodes.addAll(state.nodes().dataNodes().values());
+ nodes.addAll(state.nodes().masterNodes().values());
+ Thread t = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Thread.sleep(request.delay.millis());
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ // first, stop the cluster service
+ logger.trace("[cluster_shutdown]: stopping the cluster service so no re-routing will occur");
+ clusterService.stop();
+
+ final CountDownLatch latch = new CountDownLatch(nodes.size());
+ for (ObjectCursor<DiscoveryNode> cursor : nodes) {
+ final DiscoveryNode node = cursor.value;
+ if (node.id().equals(state.nodes().masterNodeId())) {
+ // don't shutdown the master yet...
+ latch.countDown();
+ } else {
+ logger.trace("[cluster_shutdown]: sending shutdown request to [{}]", node);
+ transportService.sendRequest(node, NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ logger.trace("[cluster_shutdown]: received shutdown response from [{}]", node);
+ latch.countDown();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("[cluster_shutdown]: received failed shutdown response from [{}]", exp, node);
+ latch.countDown();
+ }
+ });
+ }
+ }
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ logger.info("[cluster_shutdown]: done shutting down all nodes except master, proceeding to master");
+
+ // now, kill the master
+ logger.trace("[cluster_shutdown]: shutting down the master [{}]", state.nodes().masterNode());
+ transportService.sendRequest(state.nodes().masterNode(), NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ logger.trace("[cluster_shutdown]: received shutdown response from master");
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("[cluster_shutdown]: received failed shutdown response master", exp);
+ }
+ });
+ }
+ });
+ t.start();
+ } else {
+ final String[] nodesIds = state.nodes().resolveNodesIds(request.nodesIds);
+ logger.info("[partial_cluster_shutdown]: requested, shutting down [{}] in [{}]", nodesIds, request.delay);
+
+ for (String nodeId : nodesIds) {
+ final DiscoveryNode node = state.nodes().get(nodeId);
+ if (node != null) {
+ nodes.add(node);
+ }
+ }
+
+ Thread t = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Thread.sleep(request.delay.millis());
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ final CountDownLatch latch = new CountDownLatch(nodesIds.length);
+ for (String nodeId : nodesIds) {
+ final DiscoveryNode node = state.nodes().get(nodeId);
+ if (node == null) {
+ logger.warn("[partial_cluster_shutdown]: no node to shutdown for node_id [{}]", nodeId);
+ latch.countDown();
+ continue;
+ }
+
+ logger.trace("[partial_cluster_shutdown]: sending shutdown request to [{}]", node);
+ transportService.sendRequest(node, NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ logger.trace("[partial_cluster_shutdown]: received shutdown response from [{}]", node);
+ latch.countDown();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("[partial_cluster_shutdown]: received failed shutdown response from [{}]", exp, node);
+ latch.countDown();
+ }
+ });
+ }
+
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ logger.info("[partial_cluster_shutdown]: done shutting down [{}]", ((Object) nodesIds));
+ }
+ });
+ t.start();
+ }
+ listener.onResponse(new NodesShutdownResponse(clusterName, nodes.toArray(DiscoveryNode.class)));
+ }
+
+ private class NodeShutdownRequestHandler extends BaseTransportRequestHandler<NodeShutdownRequest> {
+
+ static final String ACTION = "/cluster/nodes/shutdown/node";
+
+ @Override
+ public NodeShutdownRequest newInstance() {
+ return new NodeShutdownRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(final NodeShutdownRequest request, TransportChannel channel) throws Exception {
+ if (disabled) {
+ throw new ElasticsearchIllegalStateException("Shutdown is disabled");
+ }
+ logger.info("shutting down in [{}]", delay);
+ Thread t = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Thread.sleep(delay.millis());
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ if (!request.exit) {
+ logger.info("initiating requested shutdown (no exit)...");
+ try {
+ node.close();
+ } catch (Exception e) {
+ logger.warn("Failed to shutdown", e);
+ }
+ return;
+ }
+ boolean shutdownWithWrapper = false;
+ if (System.getProperty("elasticsearch-service") != null) {
+ try {
+ Class wrapperManager = settings.getClassLoader().loadClass("org.tanukisoftware.wrapper.WrapperManager");
+ logger.info("initiating requested shutdown (using service)");
+ wrapperManager.getMethod("stopAndReturn", int.class).invoke(null, 0);
+ shutdownWithWrapper = true;
+ } catch (Throwable e) {
+ logger.error("failed to initial shutdown on service wrapper", e);
+ }
+ }
+ if (!shutdownWithWrapper) {
+ logger.info("initiating requested shutdown...");
+ try {
+ node.close();
+ } catch (Exception e) {
+ logger.warn("Failed to shutdown", e);
+ } finally {
+ // make sure we initiate the shutdown hooks, so the Bootstrap#main thread will exit
+ System.exit(0);
+ }
+ }
+ }
+ });
+ t.start();
+
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+
+ static class NodeShutdownRequest extends TransportRequest {
+
+ boolean exit;
+
+ NodeShutdownRequest() {
+ }
+
+ NodeShutdownRequest(NodesShutdownRequest request) {
+ super(request);
+ this.exit = request.exit();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ exit = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(exit);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java
new file mode 100644
index 0000000..0910628
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java
@@ -0,0 +1,333 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.stats;
+
+import org.elasticsearch.action.support.nodes.NodeOperationResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.http.HttpStats;
+import org.elasticsearch.indices.NodeIndicesStats;
+import org.elasticsearch.indices.fielddata.breaker.FieldDataBreakerStats;
+import org.elasticsearch.monitor.fs.FsStats;
+import org.elasticsearch.monitor.jvm.JvmStats;
+import org.elasticsearch.monitor.network.NetworkStats;
+import org.elasticsearch.monitor.os.OsStats;
+import org.elasticsearch.monitor.process.ProcessStats;
+import org.elasticsearch.threadpool.ThreadPoolStats;
+import org.elasticsearch.transport.TransportStats;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Node statistics (dynamic, changes depending on when created).
+ */
+public class NodeStats extends NodeOperationResponse implements ToXContent {
+
+ private long timestamp;
+
+ @Nullable
+ private NodeIndicesStats indices;
+
+ @Nullable
+ private OsStats os;
+
+ @Nullable
+ private ProcessStats process;
+
+ @Nullable
+ private JvmStats jvm;
+
+ @Nullable
+ private ThreadPoolStats threadPool;
+
+ @Nullable
+ private NetworkStats network;
+
+ @Nullable
+ private FsStats fs;
+
+ @Nullable
+ private TransportStats transport;
+
+ @Nullable
+ private HttpStats http;
+
+ @Nullable
+ private FieldDataBreakerStats breaker;
+
+ NodeStats() {
+ }
+
+ public NodeStats(DiscoveryNode node, long timestamp, @Nullable NodeIndicesStats indices,
+ @Nullable OsStats os, @Nullable ProcessStats process, @Nullable JvmStats jvm, @Nullable ThreadPoolStats threadPool,
+ @Nullable NetworkStats network, @Nullable FsStats fs, @Nullable TransportStats transport, @Nullable HttpStats http,
+ @Nullable FieldDataBreakerStats breaker) {
+ super(node);
+ this.timestamp = timestamp;
+ this.indices = indices;
+ this.os = os;
+ this.process = process;
+ this.jvm = jvm;
+ this.threadPool = threadPool;
+ this.network = network;
+ this.fs = fs;
+ this.transport = transport;
+ this.http = http;
+ this.breaker = breaker;
+ }
+
+ public long getTimestamp() {
+ return this.timestamp;
+ }
+
+ @Nullable
+ public String getHostname() {
+ return getNode().getHostName();
+ }
+
+ /**
+ * Indices level stats.
+ */
+ @Nullable
+ public NodeIndicesStats getIndices() {
+ return this.indices;
+ }
+
+ /**
+ * Operating System level statistics.
+ */
+ @Nullable
+ public OsStats getOs() {
+ return this.os;
+ }
+
+ /**
+ * Process level statistics.
+ */
+ @Nullable
+ public ProcessStats getProcess() {
+ return process;
+ }
+
+ /**
+ * JVM level statistics.
+ */
+ @Nullable
+ public JvmStats getJvm() {
+ return jvm;
+ }
+
+ /**
+ * Thread Pool level statistics.
+ */
+ @Nullable
+ public ThreadPoolStats getThreadPool() {
+ return this.threadPool;
+ }
+
+ /**
+ * Network level statistics.
+ */
+ @Nullable
+ public NetworkStats getNetwork() {
+ return network;
+ }
+
+ /**
+ * File system level stats.
+ */
+ @Nullable
+ public FsStats getFs() {
+ return fs;
+ }
+
+ @Nullable
+ public TransportStats getTransport() {
+ return this.transport;
+ }
+
+ @Nullable
+ public HttpStats getHttp() {
+ return this.http;
+ }
+
+ @Nullable
+ public FieldDataBreakerStats getBreaker() {
+ return this.breaker;
+ }
+
+ public static NodeStats readNodeStats(StreamInput in) throws IOException {
+ NodeStats nodeInfo = new NodeStats();
+ nodeInfo.readFrom(in);
+ return nodeInfo;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ timestamp = in.readVLong();
+ if (in.readBoolean()) {
+ indices = NodeIndicesStats.readIndicesStats(in);
+ }
+ if (in.readBoolean()) {
+ os = OsStats.readOsStats(in);
+ }
+ if (in.readBoolean()) {
+ process = ProcessStats.readProcessStats(in);
+ }
+ if (in.readBoolean()) {
+ jvm = JvmStats.readJvmStats(in);
+ }
+ if (in.readBoolean()) {
+ threadPool = ThreadPoolStats.readThreadPoolStats(in);
+ }
+ if (in.readBoolean()) {
+ network = NetworkStats.readNetworkStats(in);
+ }
+ if (in.readBoolean()) {
+ fs = FsStats.readFsStats(in);
+ }
+ if (in.readBoolean()) {
+ transport = TransportStats.readTransportStats(in);
+ }
+ if (in.readBoolean()) {
+ http = HttpStats.readHttpStats(in);
+ }
+ breaker = FieldDataBreakerStats.readOptionalCircuitBreakerStats(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVLong(timestamp);
+ if (indices == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ indices.writeTo(out);
+ }
+ if (os == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ os.writeTo(out);
+ }
+ if (process == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ process.writeTo(out);
+ }
+ if (jvm == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ jvm.writeTo(out);
+ }
+ if (threadPool == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ threadPool.writeTo(out);
+ }
+ if (network == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ network.writeTo(out);
+ }
+ if (fs == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ fs.writeTo(out);
+ }
+ if (transport == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ transport.writeTo(out);
+ }
+ if (http == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ http.writeTo(out);
+ }
+ out.writeOptionalStreamable(breaker);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (!params.param("node_info_format", "default").equals("none")) {
+ builder.field("name", getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("transport_address", getNode().address().toString(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("host", getNode().getHostName(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("ip", getNode().getAddress(), XContentBuilder.FieldCaseConversion.NONE);
+
+ if (!getNode().attributes().isEmpty()) {
+ builder.startObject("attributes");
+ for (Map.Entry<String, String> attr : getNode().attributes().entrySet()) {
+ builder.field(attr.getKey(), attr.getValue(), XContentBuilder.FieldCaseConversion.NONE);
+ }
+ builder.endObject();
+ }
+ }
+
+ if (getIndices() != null) {
+ getIndices().toXContent(builder, params);
+ }
+
+ if (getOs() != null) {
+ getOs().toXContent(builder, params);
+ }
+ if (getProcess() != null) {
+ getProcess().toXContent(builder, params);
+ }
+ if (getJvm() != null) {
+ getJvm().toXContent(builder, params);
+ }
+ if (getThreadPool() != null) {
+ getThreadPool().toXContent(builder, params);
+ }
+ if (getNetwork() != null) {
+ getNetwork().toXContent(builder, params);
+ }
+ if (getFs() != null) {
+ getFs().toXContent(builder, params);
+ }
+ if (getTransport() != null) {
+ getTransport().toXContent(builder, params);
+ }
+ if (getHttp() != null) {
+ getHttp().toXContent(builder, params);
+ }
+ if (getBreaker() != null) {
+ getBreaker().toXContent(builder, params);
+ }
+
+ return builder;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java
new file mode 100644
index 0000000..6d2910a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.stats;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class NodesStatsAction extends ClusterAction<NodesStatsRequest, NodesStatsResponse, NodesStatsRequestBuilder> {
+
+ public static final NodesStatsAction INSTANCE = new NodesStatsAction();
+ public static final String NAME = "cluster/nodes/stats";
+
+ private NodesStatsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public NodesStatsResponse newResponse() {
+ return new NodesStatsResponse();
+ }
+
+ @Override
+ public NodesStatsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new NodesStatsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java
new file mode 100644
index 0000000..f37208d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.stats;
+
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.support.nodes.NodesOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A request to get node (cluster) level stats.
+ */
+public class NodesStatsRequest extends NodesOperationRequest<NodesStatsRequest> {
+
+ private CommonStatsFlags indices = new CommonStatsFlags();
+ private boolean os;
+ private boolean process;
+ private boolean jvm;
+ private boolean threadPool;
+ private boolean network;
+ private boolean fs;
+ private boolean transport;
+ private boolean http;
+ private boolean breaker;
+
+ protected NodesStatsRequest() {
+ }
+
+ /**
+ * Get stats from nodes based on the nodes ids specified. If none are passed, stats
+ * for all nodes will be returned.
+ */
+ public NodesStatsRequest(String... nodesIds) {
+ super(nodesIds);
+ }
+
+ /**
+ * Sets all the request flags.
+ */
+ public NodesStatsRequest all() {
+ this.indices.all();
+ this.os = true;
+ this.process = true;
+ this.jvm = true;
+ this.threadPool = true;
+ this.network = true;
+ this.fs = true;
+ this.transport = true;
+ this.http = true;
+ this.breaker = true;
+ return this;
+ }
+
+ /**
+ * Clears all the request flags.
+ */
+ public NodesStatsRequest clear() {
+ this.indices.clear();
+ this.os = false;
+ this.process = false;
+ this.jvm = false;
+ this.threadPool = false;
+ this.network = false;
+ this.fs = false;
+ this.transport = false;
+ this.http = false;
+ this.breaker = false;
+ return this;
+ }
+
+ public CommonStatsFlags indices() {
+ return indices;
+ }
+
+ public NodesStatsRequest indices(CommonStatsFlags indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * Should indices stats be returned.
+ */
+ public NodesStatsRequest indices(boolean indices) {
+ if (indices) {
+ this.indices.all();
+ } else {
+ this.indices.clear();
+ }
+ return this;
+ }
+
+ /**
+ * Should the node OS be returned.
+ */
+ public boolean os() {
+ return this.os;
+ }
+
+ /**
+ * Should the node OS be returned.
+ */
+ public NodesStatsRequest os(boolean os) {
+ this.os = os;
+ return this;
+ }
+
+ /**
+ * Should the node Process be returned.
+ */
+ public boolean process() {
+ return this.process;
+ }
+
+ /**
+ * Should the node Process be returned.
+ */
+ public NodesStatsRequest process(boolean process) {
+ this.process = process;
+ return this;
+ }
+
+ /**
+ * Should the node JVM be returned.
+ */
+ public boolean jvm() {
+ return this.jvm;
+ }
+
+ /**
+ * Should the node JVM be returned.
+ */
+ public NodesStatsRequest jvm(boolean jvm) {
+ this.jvm = jvm;
+ return this;
+ }
+
+ /**
+ * Should the node Thread Pool be returned.
+ */
+ public boolean threadPool() {
+ return this.threadPool;
+ }
+
+ /**
+ * Should the node Thread Pool be returned.
+ */
+ public NodesStatsRequest threadPool(boolean threadPool) {
+ this.threadPool = threadPool;
+ return this;
+ }
+
+ /**
+ * Should the node Network be returned.
+ */
+ public boolean network() {
+ return this.network;
+ }
+
+ /**
+ * Should the node Network be returned.
+ */
+ public NodesStatsRequest network(boolean network) {
+ this.network = network;
+ return this;
+ }
+
+ /**
+ * Should the node file system stats be returned.
+ */
+ public boolean fs() {
+ return this.fs;
+ }
+
+ /**
+ * Should the node file system stats be returned.
+ */
+ public NodesStatsRequest fs(boolean fs) {
+ this.fs = fs;
+ return this;
+ }
+
+ /**
+ * Should the node Transport be returned.
+ */
+ public boolean transport() {
+ return this.transport;
+ }
+
+ /**
+ * Should the node Transport be returned.
+ */
+ public NodesStatsRequest transport(boolean transport) {
+ this.transport = transport;
+ return this;
+ }
+
+ /**
+ * Should the node HTTP be returned.
+ */
+ public boolean http() {
+ return this.http;
+ }
+
+ /**
+ * Should the node HTTP be returned.
+ */
+ public NodesStatsRequest http(boolean http) {
+ this.http = http;
+ return this;
+ }
+
+ public boolean breaker() {
+ return this.breaker;
+ }
+
+ /**
+ * Should the node's circuit breaker stats be returned.
+ */
+ public NodesStatsRequest breaker(boolean breaker) {
+ this.breaker = breaker;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = CommonStatsFlags.readCommonStatsFlags(in);
+ os = in.readBoolean();
+ process = in.readBoolean();
+ jvm = in.readBoolean();
+ threadPool = in.readBoolean();
+ network = in.readBoolean();
+ fs = in.readBoolean();
+ transport = in.readBoolean();
+ http = in.readBoolean();
+ breaker = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ indices.writeTo(out);
+ out.writeBoolean(os);
+ out.writeBoolean(process);
+ out.writeBoolean(jvm);
+ out.writeBoolean(threadPool);
+ out.writeBoolean(network);
+ out.writeBoolean(fs);
+ out.writeBoolean(transport);
+ out.writeBoolean(http);
+ out.writeBoolean(breaker);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java
new file mode 100644
index 0000000..c3bcd6d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.stats;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ *
+ */
+public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder<NodesStatsRequest, NodesStatsResponse, NodesStatsRequestBuilder> {
+
+ public NodesStatsRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new NodesStatsRequest());
+ }
+
+ /**
+ * Sets all the request flags.
+ */
+ public NodesStatsRequestBuilder all() {
+ request.all();
+ return this;
+ }
+
+ /**
+ * Clears all stats flags.
+ */
+ public NodesStatsRequestBuilder clear() {
+ request.clear();
+ return this;
+ }
+
+ /**
+ * Should the node indices stats be returned.
+ */
+ public NodesStatsRequestBuilder setIndices(boolean indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ public NodesStatsRequestBuilder setBreaker(boolean breaker) {
+ request.breaker(breaker);
+ return this;
+ }
+
+ /**
+ * Should the node indices stats be returned.
+ */
+ public NodesStatsRequestBuilder setIndices(CommonStatsFlags indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * Should the node OS stats be returned.
+ */
+ public NodesStatsRequestBuilder setOs(boolean os) {
+ request.os(os);
+ return this;
+ }
+
+ /**
+ * Should the node OS stats be returned.
+ */
+ public NodesStatsRequestBuilder setProcess(boolean process) {
+ request.process(process);
+ return this;
+ }
+
+ /**
+ * Should the node JVM stats be returned.
+ */
+ public NodesStatsRequestBuilder setJvm(boolean jvm) {
+ request.jvm(jvm);
+ return this;
+ }
+
+ /**
+ * Should the node thread pool stats be returned.
+ */
+ public NodesStatsRequestBuilder setThreadPool(boolean threadPool) {
+ request.threadPool(threadPool);
+ return this;
+ }
+
+ /**
+ * Should the node Network stats be returned.
+ */
+ public NodesStatsRequestBuilder setNetwork(boolean network) {
+ request.network(network);
+ return this;
+ }
+
+ /**
+ * Should the node file system stats be returned.
+ */
+ public NodesStatsRequestBuilder setFs(boolean fs) {
+ request.fs(fs);
+ return this;
+ }
+
+ /**
+ * Should the node Transport stats be returned.
+ */
+ public NodesStatsRequestBuilder setTransport(boolean transport) {
+ request.transport(transport);
+ return this;
+ }
+
+ /**
+ * Should the node HTTP stats be returned.
+ */
+ public NodesStatsRequestBuilder setHttp(boolean http) {
+ request.http(http);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<NodesStatsResponse> listener) {
+ ((ClusterAdminClient) client).nodesStats(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java
new file mode 100644
index 0000000..27affe2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.stats;
+
+import org.elasticsearch.action.support.nodes.NodesOperationResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NodesStatsResponse extends NodesOperationResponse<NodeStats> implements ToXContent {
+
+ NodesStatsResponse() {
+ }
+
+ public NodesStatsResponse(ClusterName clusterName, NodeStats[] nodes) {
+ super(clusterName, nodes);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodes = new NodeStats[in.readVInt()];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeStats.readNodeStats(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(nodes.length);
+ for (NodeStats node : nodes) {
+ node.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("cluster_name", getClusterName().value());
+
+ builder.startObject("nodes");
+ for (NodeStats nodeStats : this) {
+ builder.startObject(nodeStats.getNode().id(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("timestamp", nodeStats.getTimestamp());
+ nodeStats.toXContent(builder, params);
+
+ builder.endObject();
+ }
+ builder.endObject();
+
+ return builder;
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+ builder.startObject();
+ toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ } catch (IOException e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java
new file mode 100644
index 0000000..52c16a9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.stats;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.support.nodes.NodeOperationRequest;
+import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public class TransportNodesStatsAction extends TransportNodesOperationAction<NodesStatsRequest, NodesStatsResponse, TransportNodesStatsAction.NodeStatsRequest, NodeStats> {
+
+ private final NodeService nodeService;
+
+ @Inject
+ public TransportNodesStatsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ ClusterService clusterService, TransportService transportService,
+ NodeService nodeService) {
+ super(settings, clusterName, threadPool, clusterService, transportService);
+ this.nodeService = nodeService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return NodesStatsAction.NAME;
+ }
+
+ @Override
+ protected NodesStatsResponse newResponse(NodesStatsRequest nodesInfoRequest, AtomicReferenceArray responses) {
+ final List<NodeStats> nodeStats = Lists.newArrayList();
+ for (int i = 0; i < responses.length(); i++) {
+ Object resp = responses.get(i);
+ if (resp instanceof NodeStats) {
+ nodeStats.add((NodeStats) resp);
+ }
+ }
+ return new NodesStatsResponse(clusterName, nodeStats.toArray(new NodeStats[nodeStats.size()]));
+ }
+
+ @Override
+ protected NodesStatsRequest newRequest() {
+ return new NodesStatsRequest();
+ }
+
+ @Override
+ protected NodeStatsRequest newNodeRequest() {
+ return new NodeStatsRequest();
+ }
+
+ @Override
+ protected NodeStatsRequest newNodeRequest(String nodeId, NodesStatsRequest request) {
+ return new NodeStatsRequest(nodeId, request);
+ }
+
+ @Override
+ protected NodeStats newNodeResponse() {
+ return new NodeStats();
+ }
+
+ @Override
+ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) throws ElasticsearchException {
+ NodesStatsRequest request = nodeStatsRequest.request;
+ return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(), request.network(),
+ request.fs(), request.transport(), request.http(), request.breaker());
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return false;
+ }
+
+ static class NodeStatsRequest extends NodeOperationRequest {
+
+ NodesStatsRequest request;
+
+ NodeStatsRequest() {
+ }
+
+ NodeStatsRequest(String nodeId, NodesStatsRequest request) {
+ super(request, nodeId);
+ this.request = request;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ request = new NodesStatsRequest();
+ request.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ request.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java
new file mode 100644
index 0000000..e4f5b99
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.delete;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ * Unregister repository action
+ */
+public class DeleteRepositoryAction extends ClusterAction<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder> {
+
+ public static final DeleteRepositoryAction INSTANCE = new DeleteRepositoryAction();
+ public static final String NAME = "cluster/repository/delete";
+
+ private DeleteRepositoryAction() {
+ super(NAME);
+ }
+
+ @Override
+ public DeleteRepositoryResponse newResponse() {
+ return new DeleteRepositoryResponse();
+ }
+
+ @Override
+ public DeleteRepositoryRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new DeleteRepositoryRequestBuilder(client);
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java
new file mode 100644
index 0000000..d99a666
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.delete;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Unregister repository request.
+ * <p/>
+ * The unregister repository command just unregisters the repository. No data is getting deleted from the repository.
+ */
+public class DeleteRepositoryRequest extends AcknowledgedRequest<DeleteRepositoryRequest> {
+
+ private String name;
+
+ DeleteRepositoryRequest() {
+ }
+
+ /**
+ * Constructs a new unregister repository request with the provided name.
+ *
+ * @param name name of the repository
+ */
+ public DeleteRepositoryRequest(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (name == null) {
+ validationException = addValidationError("name is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets the name of the repository to unregister.
+ *
+ * @param name name of the repository
+ */
+ public DeleteRepositoryRequest name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ /**
+ * The name of the repository.
+ *
+ * @return the name of the repository
+ */
+ public String name() {
+ return this.name;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ name = in.readString();
+ readTimeout(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(name);
+ writeTimeout(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java
new file mode 100644
index 0000000..3b0e1d5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.delete;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ * Builder for unregister repository request
+ */
+public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder> {
+
+ /**
+ * Constructs unregister repository request builder
+ *
+ * @param clusterAdminClient cluster admin client
+ */
+ public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) {
+ super((InternalClusterAdminClient) clusterAdminClient, new DeleteRepositoryRequest());
+ }
+
+ /**
+ * Constructs unregister repository request builder with specified repository name
+ *
+ * @param clusterAdminClient cluster adming client
+ */
+ public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) {
+ super((InternalClusterAdminClient) clusterAdminClient, new DeleteRepositoryRequest(name));
+ }
+
+ /**
+ * Sets the repository name
+ *
+ * @param name the repository name
+ */
+ public DeleteRepositoryRequestBuilder setName(String name) {
+ request.name(name);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<DeleteRepositoryResponse> listener) {
+ ((ClusterAdminClient) client).deleteRepository(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java
new file mode 100644
index 0000000..b83c815
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.delete;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Unregister repository response
+ */
+public class DeleteRepositoryResponse extends AcknowledgedResponse {
+
+ DeleteRepositoryResponse() {
+ }
+
+ DeleteRepositoryResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java
new file mode 100644
index 0000000..a61498d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.delete;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Transport action for unregister repository operation
+ */
+public class TransportDeleteRepositoryAction extends TransportMasterNodeOperationAction<DeleteRepositoryRequest, DeleteRepositoryResponse> {
+
+ private final RepositoriesService repositoriesService;
+
+ @Inject
+ public TransportDeleteRepositoryAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ RepositoriesService repositoriesService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ this.repositoriesService = repositoriesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteRepositoryAction.NAME;
+ }
+
+ @Override
+ protected DeleteRepositoryRequest newRequest() {
+ return new DeleteRepositoryRequest();
+ }
+
+ @Override
+ protected DeleteRepositoryResponse newResponse() {
+ return new DeleteRepositoryResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(DeleteRepositoryRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
+ }
+
+ @Override
+ protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, final ActionListener<DeleteRepositoryResponse> listener) throws ElasticsearchException {
+ repositoriesService.unregisterRepository(
+ new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name())
+ .masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()),
+ new ActionListener<RepositoriesService.UnregisterRepositoryResponse>() {
+
+ @Override
+ public void onResponse(RepositoriesService.UnregisterRepositoryResponse unregisterRepositoryResponse) {
+ listener.onResponse(new DeleteRepositoryResponse(unregisterRepositoryResponse.isAcknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ listener.onFailure(e);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java
new file mode 100644
index 0000000..240ea4d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.get;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ * Get repositories action
+ */
+public class GetRepositoriesAction extends ClusterAction<GetRepositoriesRequest, GetRepositoriesResponse, GetRepositoriesRequestBuilder> {
+
+ public static final GetRepositoriesAction INSTANCE = new GetRepositoriesAction();
+ public static final String NAME = "cluster/repository/get";
+
+ private GetRepositoriesAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GetRepositoriesResponse newResponse() {
+ return new GetRepositoriesResponse();
+ }
+
+ @Override
+ public GetRepositoriesRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new GetRepositoriesRequestBuilder(client);
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java
new file mode 100644
index 0000000..1ddc4c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.get;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Get repository request
+ */
+public class GetRepositoriesRequest extends MasterNodeReadOperationRequest<GetRepositoriesRequest> {
+
+ private String[] repositories = Strings.EMPTY_ARRAY;
+
+ GetRepositoriesRequest() {
+ }
+
+ /**
+ * Constructs a new get repositories request with a list of repositories.
+ * <p/>
+ * If the list of repositories is empty or it contains a single element "_all", all registered repositories
+ * are returned.
+ *
+ * @param repositories list of repositories
+ */
+ public GetRepositoriesRequest(String[] repositories) {
+ this.repositories = repositories;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (repositories == null) {
+ validationException = addValidationError("repositories is null", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * The names of the repositories.
+ *
+ * @return list of repositories
+ */
+ public String[] repositories() {
+ return this.repositories;
+ }
+
+ /**
+ * Sets the list or repositories.
+ * <p/>
+ * If the list of repositories is empty or it contains a single element "_all", all registered repositories
+ * are returned.
+ *
+ * @param repositories list of repositories
+ * @return this request
+ */
+ public GetRepositoriesRequest repositories(String[] repositories) {
+ this.repositories = repositories;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ repositories = in.readStringArray();
+ readLocal(in, Version.V_1_0_0_RC2);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(repositories);
+ writeLocal(out, Version.V_1_0_0_RC2);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java
new file mode 100644
index 0000000..83d7875
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.get;
+
+import com.google.common.collect.ObjectArrays;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ * Get repository request builder
+ */
+public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetRepositoriesRequest, GetRepositoriesResponse, GetRepositoriesRequestBuilder> {
+
+ /**
+ * Creates new get repository request builder
+ *
+ * @param clusterAdminClient cluster admin client
+ */
+ public GetRepositoriesRequestBuilder(ClusterAdminClient clusterAdminClient) {
+ super((InternalClusterAdminClient) clusterAdminClient, new GetRepositoriesRequest());
+ }
+
+ /**
+ * Creates new get repository request builder
+ *
+ * @param clusterAdminClient cluster admin client
+ * @param repositories list of repositories to get
+ */
+ public GetRepositoriesRequestBuilder(ClusterAdminClient clusterAdminClient, String... repositories) {
+ super((InternalClusterAdminClient) clusterAdminClient, new GetRepositoriesRequest(repositories));
+ }
+
+ /**
+ * Sets list of repositories to get
+ *
+ * @param repositories list of repositories
+ * @return builder
+ */
+ public GetRepositoriesRequestBuilder setRepositories(String... repositories) {
+ request.repositories(repositories);
+ return this;
+ }
+
+ /**
+ * Adds repositories to the list of repositories to get
+ *
+ * @param repositories list of repositories
+ * @return builder
+ */
+ public GetRepositoriesRequestBuilder addRepositories(String... repositories) {
+ request.repositories(ObjectArrays.concat(request.repositories(), repositories, String.class));
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<GetRepositoriesResponse> listener) {
+ ((ClusterAdminClient) client).getRepositories(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java
new file mode 100644
index 0000000..3cbe044
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.get;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.metadata.RepositoryMetaData;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * Get repositories response
+ */
+public class GetRepositoriesResponse extends ActionResponse implements Iterable<RepositoryMetaData> {
+
+ private ImmutableList<RepositoryMetaData> repositories = ImmutableList.of();
+
+
+ GetRepositoriesResponse() {
+ }
+
+ GetRepositoriesResponse(ImmutableList<RepositoryMetaData> repositories) {
+ this.repositories = repositories;
+ }
+
+ /**
+ * List of repositories to return
+ *
+ * @return list or repositories
+ */
+ public ImmutableList<RepositoryMetaData> repositories() {
+ return repositories;
+ }
+
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ ImmutableList.Builder<RepositoryMetaData> repositoryListBuilder = ImmutableList.builder();
+ for (int j = 0; j < size; j++) {
+ repositoryListBuilder.add(new RepositoryMetaData(
+ in.readString(),
+ in.readString(),
+ ImmutableSettings.readSettingsFromStream(in))
+ );
+ }
+ repositories = repositoryListBuilder.build();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(repositories.size());
+ for (RepositoryMetaData repository : repositories) {
+ out.writeString(repository.name());
+ out.writeString(repository.type());
+ ImmutableSettings.writeSettingsToStream(repository.settings(), out);
+ }
+ }
+
+ /**
+ * Iterator over the repositories data
+ *
+ * @return iterator over the repositories data
+ */
+ @Override
+ public Iterator<RepositoryMetaData> iterator() {
+ return repositories.iterator();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java
new file mode 100644
index 0000000..c677d93
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.get;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
+import org.elasticsearch.cluster.metadata.RepositoryMetaData;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.repositories.RepositoryMissingException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Transport action for get repositories operation
+ */
+public class TransportGetRepositoriesAction extends TransportMasterNodeReadOperationAction<GetRepositoriesRequest, GetRepositoriesResponse> {
+
+ @Inject
+ public TransportGetRepositoriesAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return GetRepositoriesAction.NAME;
+ }
+
+ @Override
+ protected GetRepositoriesRequest newRequest() {
+ return new GetRepositoriesRequest();
+ }
+
+ @Override
+ protected GetRepositoriesResponse newResponse() {
+ return new GetRepositoriesResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(GetRepositoriesRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
+ }
+
+ @Override
+ protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener<GetRepositoriesResponse> listener) throws ElasticsearchException {
+ MetaData metaData = state.metaData();
+ RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
+ if (request.repositories().length == 0 || (request.repositories().length == 1 && "_all".equals(request.repositories()[0]))) {
+ if (repositories != null) {
+ listener.onResponse(new GetRepositoriesResponse(repositories.repositories()));
+ } else {
+ listener.onResponse(new GetRepositoriesResponse(ImmutableList.<RepositoryMetaData>of()));
+ }
+ } else {
+ if (repositories != null) {
+ ImmutableList.Builder<RepositoryMetaData> repositoryListBuilder = ImmutableList.builder();
+ for (String repository : request.repositories()) {
+ RepositoryMetaData repositoryMetaData = repositories.repository(repository);
+ if (repositoryMetaData == null) {
+ listener.onFailure(new RepositoryMissingException(repository));
+ return;
+ }
+ repositoryListBuilder.add(repositoryMetaData);
+ }
+ listener.onResponse(new GetRepositoriesResponse(repositoryListBuilder.build()));
+ } else {
+ listener.onFailure(new RepositoryMissingException(request.repositories()[0]));
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java
new file mode 100644
index 0000000..ab782fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.put;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ * Register repository action
+ */
+public class PutRepositoryAction extends ClusterAction<PutRepositoryRequest, PutRepositoryResponse, PutRepositoryRequestBuilder> {
+
+ public static final PutRepositoryAction INSTANCE = new PutRepositoryAction();
+ public static final String NAME = "cluster/repository/put";
+
+ private PutRepositoryAction() {
+ super(NAME);
+ }
+
+ @Override
+ public PutRepositoryResponse newResponse() {
+ return new PutRepositoryResponse();
+ }
+
+ @Override
+ public PutRepositoryRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new PutRepositoryRequestBuilder(client);
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java
new file mode 100644
index 0000000..59b2e7c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.put;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
+import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
+
+/**
+ * Register repository request.
+ * <p/>
+ * Registers a repository with given name, type and settings. If the repository with the same name already
+ * exists in the cluster, the new repository will replace the existing repository.
+ */
+public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryRequest> {
+
+ private String name;
+
+ private String type;
+
+ private Settings settings = EMPTY_SETTINGS;
+
+ PutRepositoryRequest() {
+ }
+
+ /**
+ * Constructs a new put repository request with the provided name.
+ */
+ public PutRepositoryRequest(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (name == null) {
+ validationException = addValidationError("name is missing", validationException);
+ }
+ if (type == null) {
+ validationException = addValidationError("type is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets the name of the repository.
+ *
+ * @param name repository name
+ */
+ public PutRepositoryRequest name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ /**
+ * The name of the repository.
+ *
+ * @return repository name
+ */
+ public String name() {
+ return this.name;
+ }
+
+ /**
+ * The type of the repository
+ * <p/>
+ * <ul>
+ * <li>"fs" - shared filesystem repository</li>
+ * </ul>
+ *
+ * @param type repository type
+ * @return this request
+ */
+ public PutRepositoryRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * Returns repository type
+ *
+ * @return repository type
+ */
+ public String type() {
+ return this.type;
+ }
+
+ /**
+ * Sets the repository settings
+ *
+ * @param settings repository settings
+ * @return this request
+ */
+ public PutRepositoryRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ /**
+ * Sets the repository settings
+ *
+ * @param settings repository settings
+ * @return this request
+ */
+ public PutRepositoryRequest settings(Settings.Builder settings) {
+ this.settings = settings.build();
+ return this;
+ }
+
+ /**
+ * Sets the repository settings.
+ *
+ * @param source repository settings in json, yaml or properties format
+ * @return this request
+ */
+ public PutRepositoryRequest settings(String source) {
+ this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
+ return this;
+ }
+
+ /**
+ * Sets the repository settings.
+ *
+ * @param source repository settings
+ * @return this request
+ */
+ public PutRepositoryRequest settings(Map<String, Object> source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ settings(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ return this;
+ }
+
+ /**
+ * Returns repository settings
+ *
+ * @return repository settings
+ */
+ public Settings settings() {
+ return this.settings;
+ }
+
+
+ /**
+ * Parses repository definition.
+ *
+ * @param repositoryDefinition repository definition
+ */
+ public PutRepositoryRequest source(XContentBuilder repositoryDefinition) {
+ return source(repositoryDefinition.bytes());
+ }
+
+ /**
+ * Parses repository definition.
+ *
+ * @param repositoryDefinition repository definition
+ */
+ public PutRepositoryRequest source(Map repositoryDefinition) {
+ Map<String, Object> source = repositoryDefinition;
+ for (Map.Entry<String, Object> entry : source.entrySet()) {
+ String name = entry.getKey();
+ if (name.equals("type")) {
+ type(entry.getValue().toString());
+ } else if (name.equals("settings")) {
+ if (!(entry.getValue() instanceof Map)) {
+ throw new ElasticsearchIllegalArgumentException("Malformed settings section, should include an inner object");
+ }
+ settings((Map<String, Object>) entry.getValue());
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Parses repository definition.
+ * JSON, Smile and YAML formats are supported
+ *
+ * @param repositoryDefinition repository definition
+ */
+ public PutRepositoryRequest source(String repositoryDefinition) {
+ try {
+ return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + repositoryDefinition + "]", e);
+ }
+ }
+
+ /**
+ * Parses repository definition.
+ * JSON, Smile and YAML formats are supported
+ *
+ * @param repositoryDefinition repository definition
+ */
+ public PutRepositoryRequest source(byte[] repositoryDefinition) {
+ return source(repositoryDefinition, 0, repositoryDefinition.length);
+ }
+
+ /**
+ * Parses repository definition.
+ * JSON, Smile and YAML formats are supported
+ *
+ * @param repositoryDefinition repository definition
+ */
+ public PutRepositoryRequest source(byte[] repositoryDefinition, int offset, int length) {
+ try {
+ return source(XContentFactory.xContent(repositoryDefinition, offset, length).createParser(repositoryDefinition, offset, length).mapOrderedAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
+ }
+ }
+
+ /**
+ * Parses repository definition.
+ * JSON, Smile and YAML formats are supported
+ *
+ * @param repositoryDefinition repository definition
+ */
+ public PutRepositoryRequest source(BytesReference repositoryDefinition) {
+ try {
+ return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ name = in.readString();
+ type = in.readString();
+ settings = readSettingsFromStream(in);
+ readTimeout(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(name);
+ out.writeString(type);
+ writeSettingsToStream(settings, out);
+ writeTimeout(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java
new file mode 100644
index 0000000..d527d0c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.put;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+
+/**
+ * Register repository request builder
+ */
+public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder<PutRepositoryRequest, PutRepositoryResponse, PutRepositoryRequestBuilder> {
+
+ /**
+ * Constructs register repository request
+ *
+ * @param clusterAdminClient cluster admin client
+ */
+ public PutRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) {
+ super((InternalClusterAdminClient) clusterAdminClient, new PutRepositoryRequest());
+ }
+
+ /**
+ * Constructs register repository request for the repository with a given name
+ *
+ * @param clusterAdminClient cluster admin client
+ * @param name repository name
+ */
+ public PutRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) {
+ super((InternalClusterAdminClient) clusterAdminClient, new PutRepositoryRequest(name));
+ }
+
+ /**
+ * Sets the repository name
+ *
+ * @param name repository name
+ * @return this builder
+ */
+ public PutRepositoryRequestBuilder setName(String name) {
+ request.name(name);
+ return this;
+ }
+
+ /**
+ * Sets the repository type
+ *
+ * @param type repository type
+ * @return this builder
+ */
+ public PutRepositoryRequestBuilder setType(String type) {
+ request.type(type);
+ return this;
+ }
+
+ /**
+ * Sets the repository settings
+ *
+ * @param settings repository settings
+ * @return this builder
+ */
+ public PutRepositoryRequestBuilder setSettings(Settings settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the repository settings
+ *
+ * @param settings repository settings builder
+ * @return this builder
+ */
+ public PutRepositoryRequestBuilder setSettings(Settings.Builder settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the repository settings in Json, Yaml or properties format
+ *
+ * @param source repository settings
+ * @return this builder
+ */
+ public PutRepositoryRequestBuilder setSettings(String source) {
+ request.settings(source);
+ return this;
+ }
+
+ /**
+ * Sets the repository settings
+ *
+ * @param source repository settings
+ * @return this builder
+ */
+ public PutRepositoryRequestBuilder setSettings(Map<String, Object> source) {
+ request.settings(source);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<PutRepositoryResponse> listener) {
+ ((ClusterAdminClient) client).putRepository(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java
new file mode 100644
index 0000000..c2b4574
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.put;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Register repository response
+ */
+public class PutRepositoryResponse extends AcknowledgedResponse {
+
+ PutRepositoryResponse() {
+ }
+
+ PutRepositoryResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java
new file mode 100644
index 0000000..ed59359
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories.put;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Transport action for register repository operation
+ */
+public class TransportPutRepositoryAction extends TransportMasterNodeOperationAction<PutRepositoryRequest, PutRepositoryResponse> {
+
+ private final RepositoriesService repositoriesService;
+
+ @Inject
+ public TransportPutRepositoryAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ RepositoriesService repositoriesService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ this.repositoriesService = repositoriesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return PutRepositoryAction.NAME;
+ }
+
+ @Override
+ protected PutRepositoryRequest newRequest() {
+ return new PutRepositoryRequest();
+ }
+
+ @Override
+ protected PutRepositoryResponse newResponse() {
+ return new PutRepositoryResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(PutRepositoryRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
+ }
+
+ @Override
+ protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener<PutRepositoryResponse> listener) throws ElasticsearchException {
+
+ repositoriesService.registerRepository(new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]", request.name(), request.type())
+ .settings(request.settings())
+ .masterNodeTimeout(request.masterNodeTimeout())
+ .ackTimeout(request.timeout()), new ActionListener<RepositoriesService.RegisterRepositoryResponse>() {
+
+ @Override
+ public void onResponse(RepositoriesService.RegisterRepositoryResponse response) {
+ listener.onResponse(new PutRepositoryResponse(response.isAcknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ listener.onFailure(e);
+ }
+ });
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java
new file mode 100644
index 0000000..286146a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.reroute;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class ClusterRerouteAction extends ClusterAction<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
+
+ public static final ClusterRerouteAction INSTANCE = new ClusterRerouteAction();
+ public static final String NAME = "cluster/reroute";
+
+ private ClusterRerouteAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ClusterRerouteResponse newResponse() {
+ return new ClusterRerouteResponse();
+ }
+
+ @Override
+ public ClusterRerouteRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new ClusterRerouteRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java
new file mode 100644
index 0000000..a0a7939
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.reroute;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * Request to submit cluster reroute allocation commands
+ */
+public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteRequest> {
+
+ AllocationCommands commands = new AllocationCommands();
+ boolean dryRun;
+
+ public ClusterRerouteRequest() {
+ }
+
+ /**
+ * Adds allocation commands to be applied to the cluster. Note, can be empty, in which case
+ * will simply run a simple "reroute".
+ */
+ public ClusterRerouteRequest add(AllocationCommand... commands) {
+ this.commands.add(commands);
+ return this;
+ }
+
+ /**
+ * Sets a dry run flag (defaults to <tt>false</tt>) allowing to run the commands without
+ * actually applying them to the cluster state, and getting the resulting cluster state back.
+ */
+ public ClusterRerouteRequest dryRun(boolean dryRun) {
+ this.dryRun = dryRun;
+ return this;
+ }
+
+ /**
+ * Returns the current dry run flag which allows to run the commands without actually applying them,
+ * just to get back the resulting cluster state back.
+ */
+ public boolean dryRun() {
+ return this.dryRun;
+ }
+
+ /**
+ * Sets the source for the request.
+ */
+ public ClusterRerouteRequest source(BytesReference source) throws Exception {
+ XContentParser parser = XContentHelper.createParser(source);
+ try {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("commands".equals(currentFieldName)) {
+ this.commands = AllocationCommands.fromXContent(parser);
+ } else {
+ throw new ElasticsearchParseException("failed to parse reroute request, got start array with wrong field name [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("dry_run".equals(currentFieldName) || "dryRun".equals(currentFieldName)) {
+ dryRun = parser.booleanValue();
+ } else {
+ throw new ElasticsearchParseException("failed to parse reroute request, got value with wrong field name [" + currentFieldName + "]");
+ }
+ }
+ }
+ } finally {
+ parser.close();
+ }
+ return this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ commands = AllocationCommands.readFrom(in);
+ dryRun = in.readBoolean();
+ readTimeout(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ AllocationCommands.writeTo(commands, out);
+ out.writeBoolean(dryRun);
+ writeTimeout(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java
new file mode 100644
index 0000000..6e6064d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.reroute;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
+import org.elasticsearch.common.bytes.BytesReference;
+
+/**
+ * Builder for a cluster reroute request
+ */
+public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
+
+ public ClusterRerouteRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new ClusterRerouteRequest());
+ }
+
+ /**
+ * Adds allocation commands to be applied to the cluster. Note, can be empty, in which case
+ * will simply run a simple "reroute".
+ */
+ public ClusterRerouteRequestBuilder add(AllocationCommand... commands) {
+ request.add(commands);
+ return this;
+ }
+
+ /**
+ * Sets a dry run flag (defaults to <tt>false</tt>) allowing to run the commands without
+ * actually applying them to the cluster state, and getting the resulting cluster state back.
+ */
+ public ClusterRerouteRequestBuilder setDryRun(boolean dryRun) {
+ request.dryRun(dryRun);
+ return this;
+ }
+
+ /**
+ * Sets the source for the request
+ */
+ public ClusterRerouteRequestBuilder setSource(BytesReference source) throws Exception {
+ request.source(source);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<ClusterRerouteResponse> listener) {
+ ((ClusterAdminClient) client).reroute(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java
new file mode 100644
index 0000000..a30d0f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.reroute;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Response returned after a cluster reroute request
+ */
+public class ClusterRerouteResponse extends AcknowledgedResponse {
+
+ private ClusterState state;
+
+ ClusterRerouteResponse() {
+
+ }
+
+ ClusterRerouteResponse(boolean acknowledged, ClusterState state) {
+ super(acknowledged);
+ this.state = state;
+ }
+
+ /**
+ * Returns the cluster state resulted from the cluster reroute request execution
+ */
+ public ClusterState getState() {
+ return this.state;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ state = ClusterState.Builder.readFrom(in, null);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ ClusterState.Builder.writeTo(state, out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java
new file mode 100644
index 0000000..850aeff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.reroute;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ */
+public class TransportClusterRerouteAction extends TransportMasterNodeOperationAction<ClusterRerouteRequest, ClusterRerouteResponse> {
+
+ private final AllocationService allocationService;
+
+ @Inject
+ public TransportClusterRerouteAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ AllocationService allocationService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.allocationService = allocationService;
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return ClusterRerouteAction.NAME;
+ }
+
+ @Override
+ protected ClusterRerouteRequest newRequest() {
+ return new ClusterRerouteRequest();
+ }
+
+ @Override
+ protected ClusterRerouteResponse newResponse() {
+ return new ClusterRerouteResponse();
+ }
+
+ @Override
+ protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) throws ElasticsearchException {
+ clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.URGENT, new AckedClusterStateUpdateTask() {
+
+ private volatile ClusterState clusterStateToSend;
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new ClusterRerouteResponse(true, clusterStateToSend));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.timeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.debug("failed to perform [{}]", t, source);
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.commands, true);
+ ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
+ clusterStateToSend = newState;
+ if (request.dryRun) {
+ return currentState;
+ }
+ return newState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+ });
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java
new file mode 100644
index 0000000..69bc9e5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.settings;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class ClusterUpdateSettingsAction extends ClusterAction<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> {
+
+ public static final ClusterUpdateSettingsAction INSTANCE = new ClusterUpdateSettingsAction();
+ public static final String NAME = "cluster/settings/update";
+
+ private ClusterUpdateSettingsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ClusterUpdateSettingsResponse newResponse() {
+ return new ClusterUpdateSettingsResponse();
+ }
+
+ @Override
+ public ClusterUpdateSettingsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new ClusterUpdateSettingsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java
new file mode 100644
index 0000000..3332553
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.settings;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
+import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
+
+/**
+ * Request for an update cluster settings action
+ */
+public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpdateSettingsRequest> {
+
+ private Settings transientSettings = EMPTY_SETTINGS;
+ private Settings persistentSettings = EMPTY_SETTINGS;
+
+ public ClusterUpdateSettingsRequest() {
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (transientSettings.getAsMap().isEmpty() && persistentSettings.getAsMap().isEmpty()) {
+ validationException = addValidationError("no settings to update", validationException);
+ }
+ return validationException;
+ }
+
+ Settings transientSettings() {
+ return transientSettings;
+ }
+
+ Settings persistentSettings() {
+ return persistentSettings;
+ }
+
+ /**
+ * Sets the transient settings to be updated. They will not survive a full cluster restart
+ */
+ public ClusterUpdateSettingsRequest transientSettings(Settings settings) {
+ this.transientSettings = settings;
+ return this;
+ }
+
+ /**
+ * Sets the transient settings to be updated. They will not survive a full cluster restart
+ */
+ public ClusterUpdateSettingsRequest transientSettings(Settings.Builder settings) {
+ this.transientSettings = settings.build();
+ return this;
+ }
+
+ /**
+ * Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
+ */
+ public ClusterUpdateSettingsRequest transientSettings(String source) {
+ this.transientSettings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
+ return this;
+ }
+
+ /**
+ * Sets the transient settings to be updated. They will not survive a full cluster restart
+ */
+ @SuppressWarnings("unchecked")
+ public ClusterUpdateSettingsRequest transientSettings(Map source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ transientSettings(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ return this;
+ }
+
+ /**
+ * Sets the persistent settings to be updated. They will get applied cross restarts
+ */
+ public ClusterUpdateSettingsRequest persistentSettings(Settings settings) {
+ this.persistentSettings = settings;
+ return this;
+ }
+
+ /**
+ * Sets the persistent settings to be updated. They will get applied cross restarts
+ */
+ public ClusterUpdateSettingsRequest persistentSettings(Settings.Builder settings) {
+ this.persistentSettings = settings.build();
+ return this;
+ }
+
+ /**
+ * Sets the source containing the persistent settings to be updated. They will get applied cross restarts
+ */
+ public ClusterUpdateSettingsRequest persistentSettings(String source) {
+ this.persistentSettings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
+ return this;
+ }
+
+ /**
+ * Sets the persistent settings to be updated. They will get applied cross restarts
+ */
+ @SuppressWarnings("unchecked")
+ public ClusterUpdateSettingsRequest persistentSettings(Map source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ persistentSettings(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ transientSettings = readSettingsFromStream(in);
+ persistentSettings = readSettingsFromStream(in);
+ readTimeout(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeSettingsToStream(transientSettings, out);
+ writeSettingsToStream(persistentSettings, out);
+ writeTimeout(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java
new file mode 100644
index 0000000..b53ef88
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.settings;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+
+/**
+ * Builder for a cluster update settings request
+ */
+public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> {
+
+ public ClusterUpdateSettingsRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new ClusterUpdateSettingsRequest());
+ }
+
+ /**
+ * Sets the transient settings to be updated. They will not survive a full cluster restart
+ */
+ public ClusterUpdateSettingsRequestBuilder setTransientSettings(Settings settings) {
+ request.transientSettings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the transient settings to be updated. They will not survive a full cluster restart
+ */
+ public ClusterUpdateSettingsRequestBuilder setTransientSettings(Settings.Builder settings) {
+ request.transientSettings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
+ */
+ public ClusterUpdateSettingsRequestBuilder setTransientSettings(String settings) {
+ request.transientSettings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the transient settings to be updated. They will not survive a full cluster restart
+ */
+ public ClusterUpdateSettingsRequestBuilder setTransientSettings(Map settings) {
+ request.transientSettings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the persistent settings to be updated. They will get applied cross restarts
+ */
+ public ClusterUpdateSettingsRequestBuilder setPersistentSettings(Settings settings) {
+ request.persistentSettings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the persistent settings to be updated. They will get applied cross restarts
+ */
+ public ClusterUpdateSettingsRequestBuilder setPersistentSettings(Settings.Builder settings) {
+ request.persistentSettings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the source containing the persistent settings to be updated. They will get applied cross restarts
+ */
+ public ClusterUpdateSettingsRequestBuilder setPersistentSettings(String settings) {
+ request.persistentSettings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the persistent settings to be updated. They will get applied cross restarts
+ */
+ public ClusterUpdateSettingsRequestBuilder setPersistentSettings(Map settings) {
+ request.persistentSettings(settings);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<ClusterUpdateSettingsResponse> listener) {
+ ((ClusterAdminClient) client).updateSettings(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java
new file mode 100644
index 0000000..f155763
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.settings;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+import java.io.IOException;
+
+/**
+ * A response for a cluster update settings action.
+ */
+public class ClusterUpdateSettingsResponse extends AcknowledgedResponse {
+
+ Settings transientSettings;
+ Settings persistentSettings;
+
+ ClusterUpdateSettingsResponse() {
+ this.persistentSettings = ImmutableSettings.EMPTY;
+ this.transientSettings = ImmutableSettings.EMPTY;
+ }
+
+ ClusterUpdateSettingsResponse(boolean acknowledged, Settings transientSettings, Settings persistentSettings) {
+ super(acknowledged);
+ this.persistentSettings = persistentSettings;
+ this.transientSettings = transientSettings;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ transientSettings = ImmutableSettings.readSettingsFromStream(in);
+ persistentSettings = ImmutableSettings.readSettingsFromStream(in);
+ readAcknowledged(in);
+ }
+
+ public Settings getTransientSettings() {
+ return transientSettings;
+ }
+
+ public Settings getPersistentSettings() {
+ return persistentSettings;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ ImmutableSettings.writeSettingsToStream(transientSettings, out);
+ ImmutableSettings.writeSettingsToStream(persistentSettings, out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
new file mode 100644
index 0000000..9ce5f54
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
@@ -0,0 +1,248 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.settings;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
+import org.elasticsearch.cluster.settings.DynamicSettings;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+
+import static org.elasticsearch.cluster.ClusterState.builder;
+
+/**
+ *
+ */
+public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOperationAction<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse> {
+
+ private final AllocationService allocationService;
+
+ private final DynamicSettings dynamicSettings;
+
+ @Inject
+ public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ AllocationService allocationService, @ClusterDynamicSettings DynamicSettings dynamicSettings) {
+ super(settings, transportService, clusterService, threadPool);
+ this.allocationService = allocationService;
+ this.dynamicSettings = dynamicSettings;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return ClusterUpdateSettingsAction.NAME;
+ }
+
+ @Override
+ protected ClusterUpdateSettingsRequest newRequest() {
+ return new ClusterUpdateSettingsRequest();
+ }
+
+ @Override
+ protected ClusterUpdateSettingsResponse newResponse() {
+ return new ClusterUpdateSettingsResponse();
+ }
+
+ @Override
+ protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) throws ElasticsearchException {
+ final ImmutableSettings.Builder transientUpdates = ImmutableSettings.settingsBuilder();
+ final ImmutableSettings.Builder persistentUpdates = ImmutableSettings.settingsBuilder();
+
+ clusterService.submitStateUpdateTask("cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask() {
+
+ private volatile boolean changed = false;
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ if (changed) {
+ reroute(true);
+ } else {
+ listener.onResponse(new ClusterUpdateSettingsResponse(true, transientUpdates.build(), persistentUpdates.build()));
+ }
+
+ }
+
+ @Override
+ public void onAckTimeout() {
+ if (changed) {
+ reroute(false);
+ } else {
+ listener.onResponse(new ClusterUpdateSettingsResponse(false, transientUpdates.build(), persistentUpdates.build()));
+ }
+ }
+
+ private void reroute(final boolean updateSettingsAcked) {
+ clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ //we wait for the reroute ack only if the update settings was acknowledged
+ return updateSettingsAcked;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ //we return when the cluster reroute is acked (the acknowledged flag depends on whether the update settings was acknowledged)
+ listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build()));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ //we return when the cluster reroute ack times out (acknowledged false)
+ listener.onResponse(new ClusterUpdateSettingsResponse(false, transientUpdates.build(), persistentUpdates.build()));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.timeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ //if the reroute fails we only log
+ logger.debug("failed to perform [{}]", t, source);
+ }
+
+ @Override
+ public ClusterState execute(final ClusterState currentState) {
+ // now, reroute in case things that require it changed (e.g. number of replicas)
+ RoutingAllocation.Result routingResult = allocationService.reroute(currentState);
+ if (!routingResult.changed()) {
+ return currentState;
+ }
+ return ClusterState.builder(currentState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+ });
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.timeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.debug("failed to perform [{}]", t, source);
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(final ClusterState currentState) {
+ ImmutableSettings.Builder transientSettings = ImmutableSettings.settingsBuilder();
+ transientSettings.put(currentState.metaData().transientSettings());
+ for (Map.Entry<String, String> entry : request.transientSettings().getAsMap().entrySet()) {
+ if (dynamicSettings.hasDynamicSetting(entry.getKey()) || entry.getKey().startsWith("logger.")) {
+ String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue());
+ if (error == null) {
+ transientSettings.put(entry.getKey(), entry.getValue());
+ transientUpdates.put(entry.getKey(), entry.getValue());
+ changed = true;
+ } else {
+ logger.warn("ignoring transient setting [{}], [{}]", entry.getKey(), error);
+ }
+ } else {
+ logger.warn("ignoring transient setting [{}], not dynamically updateable", entry.getKey());
+ }
+ }
+
+ ImmutableSettings.Builder persistentSettings = ImmutableSettings.settingsBuilder();
+ persistentSettings.put(currentState.metaData().persistentSettings());
+ for (Map.Entry<String, String> entry : request.persistentSettings().getAsMap().entrySet()) {
+ if (dynamicSettings.hasDynamicSetting(entry.getKey()) || entry.getKey().startsWith("logger.")) {
+ String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue());
+ if (error == null) {
+ persistentSettings.put(entry.getKey(), entry.getValue());
+ persistentUpdates.put(entry.getKey(), entry.getValue());
+ changed = true;
+ } else {
+ logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error);
+ }
+ } else {
+ logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey());
+ }
+ }
+
+ if (!changed) {
+ return currentState;
+ }
+
+ MetaData.Builder metaData = MetaData.builder(currentState.metaData())
+ .persistentSettings(persistentSettings.build())
+ .transientSettings(transientSettings.build());
+
+ ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
+ boolean updatedReadOnly = metaData.persistentSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || metaData.transientSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false);
+ if (updatedReadOnly) {
+ blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
+ } else {
+ blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
+ }
+
+ return builder(currentState).metaData(metaData).blocks(blocks).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java
new file mode 100644
index 0000000..3d935e9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.shards;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class ClusterSearchShardsAction extends ClusterAction<ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder> {
+
+ public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction();
+ public static final String NAME = "cluster/shards/search_shards";
+
+ private ClusterSearchShardsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ClusterSearchShardsResponse newResponse() {
+ return new ClusterSearchShardsResponse();
+ }
+
+ @Override
+ public ClusterSearchShardsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new ClusterSearchShardsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java
new file mode 100644
index 0000000..bd82404
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.shards;
+
+import org.elasticsearch.cluster.routing.ImmutableShardRouting;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ */
+public class ClusterSearchShardsGroup implements Streamable, ToXContent {
+
+ private String index;
+ private int shardId;
+ ShardRouting[] shards;
+
+ ClusterSearchShardsGroup() {
+
+ }
+
+ public ClusterSearchShardsGroup(String index, int shardId, ShardRouting[] shards) {
+ this.index = index;
+ this.shardId = shardId;
+ this.shards = shards;
+ }
+
+ public static ClusterSearchShardsGroup readSearchShardsGroupResponse(StreamInput in) throws IOException {
+ ClusterSearchShardsGroup response = new ClusterSearchShardsGroup();
+ response.readFrom(in);
+ return response;
+ }
+
+ public String getIndex() {
+ return index;
+ }
+
+ public int getShardId() {
+ return shardId;
+ }
+
+ public ShardRouting[] getShards() {
+ return shards;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ index = in.readString();
+ shardId = in.readVInt();
+ shards = new ShardRouting[in.readVInt()];
+ for (int i = 0; i < shards.length; i++) {
+ shards[i] = ImmutableShardRouting.readShardRoutingEntry(in, index, shardId);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(index);
+ out.writeVInt(shardId);
+ out.writeVInt(shards.length);
+ for (ShardRouting shardRouting : shards) {
+ shardRouting.writeToThin(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray();
+ for (ShardRouting shard : getShards()) {
+ shard.toXContent(builder, params);
+ }
+ builder.endArray();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
new file mode 100644
index 0000000..e1251ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.shards;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class ClusterSearchShardsRequest extends MasterNodeReadOperationRequest<ClusterSearchShardsRequest> {
+ private String[] indices;
+ @Nullable
+ private String routing;
+ @Nullable
+ private String preference;
+ private String[] types = Strings.EMPTY_ARRAY;
+ private IndicesOptions indicesOptions = IndicesOptions.lenient();
+
+
+ public ClusterSearchShardsRequest() {
+ }
+
+ public ClusterSearchShardsRequest(String... indices) {
+ indices(indices);
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ /**
+ * Sets the indices the search will be executed on.
+ */
+ public ClusterSearchShardsRequest indices(String... indices) {
+ if (indices == null) {
+ throw new ElasticsearchIllegalArgumentException("indices must not be null");
+ } else {
+ for (int i = 0; i < indices.length; i++) {
+ if (indices[i] == null) {
+ throw new ElasticsearchIllegalArgumentException("indices[" + i + "] must not be null");
+ }
+ }
+ }
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * The indices
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public ClusterSearchShardsRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ /**
+ * The document types to execute the search against. Defaults to be executed against
+ * all types.
+ */
+ public String[] types() {
+ return types;
+ }
+
+ /**
+ * The document types to execute the search against. Defaults to be executed against
+ * all types.
+ */
+ public ClusterSearchShardsRequest types(String... types) {
+ this.types = types;
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public String routing() {
+ return this.routing;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public ClusterSearchShardsRequest routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public ClusterSearchShardsRequest routing(String... routings) {
+ this.routing = Strings.arrayToCommaDelimitedString(routings);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public ClusterSearchShardsRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public String preference() {
+ return this.preference;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+
+ indices = new String[in.readVInt()];
+ for (int i = 0; i < indices.length; i++) {
+ indices[i] = in.readString();
+ }
+
+ routing = in.readOptionalString();
+ preference = in.readOptionalString();
+
+ types = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ readLocal(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+
+ out.writeVInt(indices.length);
+ for (String index : indices) {
+ out.writeString(index);
+ }
+
+ out.writeOptionalString(routing);
+ out.writeOptionalString(preference);
+
+ out.writeStringArray(types);
+ indicesOptions.writeIndicesOptions(out);
+ writeLocal(out);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java
new file mode 100644
index 0000000..c6b0ab9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.shards;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ */
+public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder> {
+
+ public ClusterSearchShardsRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new ClusterSearchShardsRequest());
+ }
+
+ /**
+ * Sets the indices the search will be executed on.
+ */
+ public ClusterSearchShardsRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * The document types to execute the search against. Defaults to be executed against
+ * all types.
+ */
+ public ClusterSearchShardsRequestBuilder setTypes(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public ClusterSearchShardsRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public ClusterSearchShardsRequestBuilder setRouting(String... routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public ClusterSearchShardsRequestBuilder setPreference(String preference) {
+ request.preference(preference);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal indices wildcard expressions.
+ * For example indices that don't exist.
+ */
+ public ClusterSearchShardsRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request().indicesOptions(indicesOptions);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<ClusterSearchShardsResponse> listener) {
+ ((ClusterAdminClient) client).searchShards(request, listener);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java
new file mode 100644
index 0000000..75468c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.shards;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ */
+public class ClusterSearchShardsResponse extends ActionResponse implements ToXContent {
+
+ private ClusterSearchShardsGroup[] groups;
+ private DiscoveryNode[] nodes;
+
+ ClusterSearchShardsResponse() {
+
+ }
+
+ public ClusterSearchShardsGroup[] getGroups() {
+ return groups;
+ }
+
+ public DiscoveryNode[] getNodes() {
+ return nodes;
+ }
+
+ public ClusterSearchShardsResponse(ClusterSearchShardsGroup[] groups, DiscoveryNode[] nodes) {
+ this.groups = groups;
+ this.nodes = nodes;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ groups = new ClusterSearchShardsGroup[in.readVInt()];
+ for (int i = 0; i < groups.length; i++) {
+ groups[i] = ClusterSearchShardsGroup.readSearchShardsGroupResponse(in);
+ }
+ nodes = new DiscoveryNode[in.readVInt()];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = DiscoveryNode.readNode(in);
+ }
+
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(groups.length);
+ for (ClusterSearchShardsGroup response : groups) {
+ response.writeTo(out);
+ }
+ out.writeVInt(nodes.length);
+ for (DiscoveryNode node : nodes) {
+ node.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("nodes");
+ for (DiscoveryNode node : nodes) {
+ builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("name", node.name());
+ builder.field("transport_address", node.getAddress());
+ if (!node.attributes().isEmpty()) {
+ builder.startObject("attributes");
+ for (Map.Entry<String, String> attr : node.attributes().entrySet()) {
+ builder.field(attr.getKey(), attr.getValue());
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ builder.startArray("shards");
+ for (ClusterSearchShardsGroup group : groups) {
+ group.toXContent(builder, params);
+ }
+ builder.endArray();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java
new file mode 100644
index 0000000..529cce4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.shards;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.collect.Sets.newHashSet;
+
+/**
+ */
+public class TransportClusterSearchShardsAction extends TransportMasterNodeReadOperationAction<ClusterSearchShardsRequest, ClusterSearchShardsResponse> {
+
+ @Inject
+ public TransportClusterSearchShardsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String transportAction() {
+ return ClusterSearchShardsAction.NAME;
+ }
+
+ @Override
+ protected String executor() {
+ // all in memory work here...
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected ClusterSearchShardsRequest newRequest() {
+ return new ClusterSearchShardsRequest();
+ }
+
+ @Override
+ protected ClusterSearchShardsResponse newResponse() {
+ return new ClusterSearchShardsResponse();
+ }
+
+ @Override
+ protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) throws ElasticsearchException {
+ ClusterState clusterState = clusterService.state();
+ String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices(), request.indicesOptions());
+ Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
+ Set<String> nodeIds = newHashSet();
+ GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference());
+ ShardRouting shard;
+ ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()];
+ int currentGroup = 0;
+ for (ShardIterator shardIt : groupShardsIterator) {
+ String index = shardIt.shardId().getIndex();
+ int shardId = shardIt.shardId().getId();
+ ShardRouting[] shardRoutings = new ShardRouting[shardIt.size()];
+ int currentShard = 0;
+ shardIt.reset();
+ while ((shard = shardIt.nextOrNull()) != null) {
+ shardRoutings[currentShard++] = shard;
+ nodeIds.add(shard.currentNodeId());
+ }
+ groupResponses[currentGroup++] = new ClusterSearchShardsGroup(index, shardId, shardRoutings);
+ }
+ DiscoveryNode[] nodes = new DiscoveryNode[nodeIds.size()];
+ int currentNode = 0;
+ for (String nodeId : nodeIds) {
+ nodes[currentNode++] = clusterState.getNodes().get(nodeId);
+ }
+ listener.onResponse(new ClusterSearchShardsResponse(groupResponses, nodes));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java
new file mode 100644
index 0000000..761c891
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.create;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ * Create snapshot action
+ */
+public class CreateSnapshotAction extends ClusterAction<CreateSnapshotRequest, CreateSnapshotResponse, CreateSnapshotRequestBuilder> {
+
+ public static final CreateSnapshotAction INSTANCE = new CreateSnapshotAction();
+ public static final String NAME = "cluster/snapshot/create";
+
+ private CreateSnapshotAction() {
+ super(NAME);
+ }
+
+ @Override
+ public CreateSnapshotResponse newResponse() {
+ return new CreateSnapshotResponse();
+ }
+
+ @Override
+ public CreateSnapshotRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new CreateSnapshotRequestBuilder(client);
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java
new file mode 100644
index 0000000..b1730d2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java
@@ -0,0 +1,488 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.create;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.Strings.EMPTY_ARRAY;
+import static org.elasticsearch.common.Strings.hasLength;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
+import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+
+/**
+ * Create snapshot request
+ * <p/>
+ * The only mandatory parameter is repository name. The repository name has to satisfy the following requirements
+ * <ul>
+ * <li>be a non-empty string</li>
+ * <li>must not contain whitespace (tabs or spaces)</li>
+ * <li>must not contain comma (',')</li>
+ * <li>must not contain hash sign ('#')</li>
+ * <li>must not start with underscore ('-')</li>
+ * <li>must be lowercase</li>
+ * <li>must not contain invalid file name characters {@link org.elasticsearch.common.Strings#INVALID_FILENAME_CHARS} </li>
+ * </ul>
+ */
+public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnapshotRequest> {
+
+ private String snapshot;
+
+ private String repository;
+
+ private String[] indices = EMPTY_ARRAY;
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ private boolean partial = false;
+
+ private Settings settings = EMPTY_SETTINGS;
+
+ private boolean includeGlobalState = true;
+
+ private boolean waitForCompletion;
+
+ CreateSnapshotRequest() {
+ }
+
+ /**
+ * Constructs a new put repository request with the provided snapshot and repository names
+ *
+ * @param repository repository name
+ * @param snapshot snapshot name
+ */
+ public CreateSnapshotRequest(String repository, String snapshot) {
+ this.snapshot = snapshot;
+ this.repository = repository;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (snapshot == null) {
+ validationException = addValidationError("snapshot is missing", validationException);
+ }
+ if (repository == null) {
+ validationException = addValidationError("repository is missing", validationException);
+ }
+ if (indices == null) {
+ validationException = addValidationError("indices is null", validationException);
+ }
+ for (String index : indices) {
+ if (index == null) {
+ validationException = addValidationError("index is null", validationException);
+ break;
+ }
+ }
+ if (indicesOptions == null) {
+ validationException = addValidationError("indicesOptions is null", validationException);
+ }
+ if (settings == null) {
+ validationException = addValidationError("settings is null", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets the snapshot name
+ *
+ * @param snapshot snapshot name
+ */
+ public CreateSnapshotRequest snapshot(String snapshot) {
+ this.snapshot = snapshot;
+ return this;
+ }
+
+ /**
+ * The snapshot name
+ *
+ * @return snapshot name
+ */
+ public String snapshot() {
+ return this.snapshot;
+ }
+
+ /**
+ * Sets repository name
+ *
+ * @param repository name
+ * @return this request
+ */
+ public CreateSnapshotRequest repository(String repository) {
+ this.repository = repository;
+ return this;
+ }
+
+ /**
+ * Returns repository name
+ *
+ * @return repository name
+ */
+ public String repository() {
+ return this.repository;
+ }
+
+ /**
+ * Sets a list of indices that should be included into the snapshot
+ * <p/>
+ * The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
+ * prefix "test" except index "test42". Aliases are supported. An empty list or {"_all"} will snapshot all open
+ * indices in the cluster.
+ *
+ * @param indices
+ * @return this request
+ */
+ public CreateSnapshotRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * Sets a list of indices that should be included into the snapshot
+ * <p/>
+ * The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
+ * prefix "test" except index "test42". Aliases are supported. An empty list or {"_all"} will snapshot all open
+ * indices in the cluster.
+ *
+ * @param indices
+ * @return this request
+ */
+ public CreateSnapshotRequest indices(List<String> indices) {
+ this.indices = indices.toArray(new String[indices.size()]);
+ return this;
+ }
+
+ /**
+ * Returns a list of indices that should be included into the snapshot
+ *
+ * @return list of indices
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ /**
+ * Specifies the indices options. Like what type of requested indices to ignore. For example indices that don't exist.
+ *
+ * @return the desired behaviour regarding indices options
+ */
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ /**
+ * Specifies the indices options. Like what type of requested indices to ignore. For example indices that don't exist.
+ *
+ * @param indicesOptions the desired behaviour regarding indices options
+ * @return this request
+ */
+ public CreateSnapshotRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+
+ /**
+ * Returns true if indices with unavailable shards should be be partially snapshotted.
+ *
+ * @return the desired behaviour regarding indices options
+ */
+ public boolean partial() {
+ return partial;
+ }
+
+ /**
+ * Set to true to allow indices with unavailable shards to be partially snapshotted.
+ *
+ * @param partial true if indices with unavailable shards should be be partially snapshotted.
+ * @return this request
+ */
+ public CreateSnapshotRequest partial(boolean partial) {
+ this.partial = partial;
+ return this;
+ }
+
+ /**
+ * If set to true the request should wait for the snapshot completion before returning.
+ *
+ * @param waitForCompletion true if
+ * @return this request
+ */
+ public CreateSnapshotRequest waitForCompletion(boolean waitForCompletion) {
+ this.waitForCompletion = waitForCompletion;
+ return this;
+ }
+
+ /**
+ * Returns true if the request should wait for the snapshot completion before returning
+ *
+ * @return true if the request should wait for completion
+ */
+ public boolean waitForCompletion() {
+ return waitForCompletion;
+ }
+
+ /**
+ * Sets repository-specific snapshot settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param settings repository-specific snapshot settings
+ * @return this request
+ */
+ public CreateSnapshotRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ /**
+ * Sets repository-specific snapshot settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param settings repository-specific snapshot settings
+ * @return this request
+ */
+ public CreateSnapshotRequest settings(Settings.Builder settings) {
+ this.settings = settings.build();
+ return this;
+ }
+
+ /**
+ * Sets repository-specific snapshot settings in JSON, YAML or properties format
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param source repository-specific snapshot settings
+ * @return this request
+ */
+ public CreateSnapshotRequest settings(String source) {
+ this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
+ return this;
+ }
+
+ /**
+ * Sets repository-specific snapshot settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param source repository-specific snapshot settings
+ * @return this request
+ */
+ public CreateSnapshotRequest settings(Map<String, Object> source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ settings(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ return this;
+ }
+
+ /**
+ * Returns repository-specific snapshot settings
+ *
+ * @return repository-specific snapshot settings
+ */
+ public Settings settings() {
+ return this.settings;
+ }
+
+ /**
+ * Set to true if global state should be stored as part of the snapshot
+ *
+ * @param includeGlobalState true if global state should be stored
+ * @return this request
+ */
+ public CreateSnapshotRequest includeGlobalState(boolean includeGlobalState) {
+ this.includeGlobalState = includeGlobalState;
+ return this;
+ }
+
+ /**
+ * Returns true if global state should be stored as part of the snapshot
+ *
+ * @return true if global state should be stored as part of the snapshot
+ */
+ public boolean includeGlobalState() {
+ return includeGlobalState;
+ }
+
+ /**
+ * Parses snapshot definition.
+ *
+ * @param source snapshot definition
+ * @return this request
+ */
+ public CreateSnapshotRequest source(XContentBuilder source) {
+ return source(source.bytes());
+ }
+
+ /**
+ * Parses snapshot definition.
+ *
+ * @param source snapshot definition
+ * @return this request
+ */
+ public CreateSnapshotRequest source(Map source) {
+ boolean ignoreUnavailable = IndicesOptions.lenient().ignoreUnavailable();
+ boolean allowNoIndices = IndicesOptions.lenient().allowNoIndices();
+ boolean expandWildcardsOpen = IndicesOptions.lenient().expandWildcardsOpen();
+ boolean expandWildcardsClosed = IndicesOptions.lenient().expandWildcardsClosed();
+ for (Map.Entry<String, Object> entry : ((Map<String, Object>) source).entrySet()) {
+ String name = entry.getKey();
+ if (name.equals("indices")) {
+ if (entry.getValue() instanceof String) {
+ indices(Strings.splitStringByCommaToArray((String) entry.getValue()));
+ } else if (entry.getValue() instanceof ArrayList) {
+ indices((ArrayList<String>) entry.getValue());
+ } else {
+ throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
+ }
+ } else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) {
+ ignoreUnavailable = nodeBooleanValue(entry.getValue());
+ } else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) {
+ allowNoIndices = nodeBooleanValue(entry.getValue());
+ } else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) {
+ expandWildcardsOpen = nodeBooleanValue(entry.getValue());
+ } else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) {
+ expandWildcardsClosed = nodeBooleanValue(entry.getValue());
+ } else if (name.equals("partial")) {
+ partial(nodeBooleanValue(entry.getValue()));
+ } else if (name.equals("settings")) {
+ if (!(entry.getValue() instanceof Map)) {
+ throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object");
+ }
+ settings((Map<String, Object>) entry.getValue());
+ } else if (name.equals("include_global_state")) {
+ includeGlobalState = nodeBooleanValue(entry.getValue());
+ }
+ }
+ indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed));
+ return this;
+ }
+
+ /**
+ * Parses snapshot definition. JSON, YAML and properties formats are supported
+ *
+ * @param source snapshot definition
+ * @return this request
+ */
+ public CreateSnapshotRequest source(String source) {
+ if (hasLength(source)) {
+ try {
+ return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + source + "]", e);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Parses snapshot definition. JSON, YAML and properties formats are supported
+ *
+ * @param source snapshot definition
+ * @return this request
+ */
+ public CreateSnapshotRequest source(byte[] source) {
+ return source(source, 0, source.length);
+ }
+
+ /**
+ * Parses snapshot definition. JSON, YAML and properties formats are supported
+ *
+ * @param source snapshot definition
+ * @param offset offset
+ * @param length length
+ * @return this request
+ */
+ public CreateSnapshotRequest source(byte[] source, int offset, int length) {
+ if (length > 0) {
+ try {
+ return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Parses snapshot definition. JSON, YAML and properties formats are supported
+ *
+ * @param source snapshot definition
+ * @return this request
+ */
+ public CreateSnapshotRequest source(BytesReference source) {
+ try {
+ return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse snapshot source", e);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ snapshot = in.readString();
+ repository = in.readString();
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ settings = readSettingsFromStream(in);
+ includeGlobalState = in.readBoolean();
+ waitForCompletion = in.readBoolean();
+ partial = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(snapshot);
+ out.writeString(repository);
+ out.writeStringArray(indices);
+ indicesOptions.writeIndicesOptions(out);
+ writeSettingsToStream(settings, out);
+ out.writeBoolean(includeGlobalState);
+ out.writeBoolean(waitForCompletion);
+ out.writeBoolean(partial);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java
new file mode 100644
index 0000000..81dae3b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.create;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+
+/**
+ * Create snapshot request builder
+ */
+public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder<CreateSnapshotRequest, CreateSnapshotResponse, CreateSnapshotRequestBuilder> {
+
+ /**
+ * Constructs a new create snapshot request builder
+ *
+ * @param clusterAdminClient cluster admin client
+ */
+ public CreateSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) {
+ super((InternalClusterAdminClient) clusterAdminClient, new CreateSnapshotRequest());
+ }
+
+ /**
+ * Constructs a new create snapshot request builder with specified repository and snapshot names
+ *
+ * @param clusterAdminClient cluster admin client
+ * @param repository repository name
+ * @param snapshot snapshot name
+ */
+ public CreateSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String snapshot) {
+ super((InternalClusterAdminClient) clusterAdminClient, new CreateSnapshotRequest(repository, snapshot));
+ }
+
+ /**
+ * Sets the snapshot name
+ *
+ * @param snapshot snapshot name
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setSnapshot(String snapshot) {
+ request.snapshot(snapshot);
+ return this;
+ }
+
+ /**
+ * Sets the repository name
+ *
+ * @param repository repository name
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setRepository(String repository) {
+ request.repository(repository);
+ return this;
+ }
+
+ /**
+ * Sets a list of indices that should be included into the snapshot
+ * <p/>
+ * The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
+ * prefix "test" except index "test42". Aliases are supported. An empty list or {"_all"} will snapshot all open
+ * indices in the cluster.
+ *
+ * @param indices
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * Specifies the indices options. Like what type of requested indices to ignore. For example indices that don't exist.
+ *
+ * @param indicesOptions the desired behaviour regarding indices options
+ * @return this request
+ */
+ public CreateSnapshotRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request.indicesOptions(indicesOptions);
+ return this;
+ }
+
+ /**
+ * If set to true the request should wait for the snapshot completion before returning.
+ *
+ * @param waitForCompletion true if
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
+ request.waitForCompletion(waitForCompletion);
+ return this;
+ }
+
+ /**
+ * If set to true the request should snapshot indices with unavailable shards
+ *
+ * @param partial true if request should snapshot indices with unavailable shards
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setPartial(boolean partial) {
+ request.partial(partial);
+ return this;
+ }
+
+ /**
+ * Sets repository-specific snapshot settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param settings repository-specific snapshot settings
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setSettings(Settings settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Sets repository-specific snapshot settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param settings repository-specific snapshot settings
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setSettings(Settings.Builder settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Sets repository-specific snapshot settings in YAML, JSON or properties format
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param source repository-specific snapshot settings
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setSettings(String source) {
+ request.settings(source);
+ return this;
+ }
+
+ /**
+ * Sets repository-specific snapshot settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param settings repository-specific snapshot settings
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setSettings(Map<String, Object> settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Set to true if snapshot should include global cluster state
+ *
+ * @param includeGlobalState true if snapshot should include global cluster state
+ * @return this builder
+ */
+ public CreateSnapshotRequestBuilder setIncludeGlobalState(boolean includeGlobalState) {
+ request.includeGlobalState(includeGlobalState);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<CreateSnapshotResponse> listener) {
+ ((ClusterAdminClient) client).createSnapshot(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java
new file mode 100644
index 0000000..c718abc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.create;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.snapshots.SnapshotInfo;
+
+import java.io.IOException;
+
+/**
+ * Create snapshot response
+ */
+public class CreateSnapshotResponse extends ActionResponse implements ToXContent {
+
+ @Nullable
+ private SnapshotInfo snapshotInfo;
+
+ CreateSnapshotResponse(@Nullable SnapshotInfo snapshotInfo) {
+ this.snapshotInfo = snapshotInfo;
+ }
+
+ CreateSnapshotResponse() {
+ }
+
+ /**
+ * Returns snapshot information if snapshot was completed by the time this method returned or null otherwise.
+ *
+ * @return snapshot information or null
+ */
+ public SnapshotInfo getSnapshotInfo() {
+ return snapshotInfo;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ snapshotInfo = SnapshotInfo.readOptionalSnapshotInfo(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalStreamable(snapshotInfo);
+ }
+
+ /**
+ * Returns HTTP status
+ * <p/>
+ * <ul>
+ * <li>{@link RestStatus#ACCEPTED}</li> if snapshot is still in progress
+ * <li>{@link RestStatus#OK}</li> if snapshot was successful or partially successful
+ * <li>{@link RestStatus#INTERNAL_SERVER_ERROR}</li> if snapshot failed completely
+ * </ul>
+ *
+ * @return
+ */
+ public RestStatus status() {
+ if (snapshotInfo == null) {
+ return RestStatus.ACCEPTED;
+ }
+ return snapshotInfo.status();
+ }
+
+ static final class Fields {
+ static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot");
+ static final XContentBuilderString ACCEPTED = new XContentBuilderString("accepted");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (snapshotInfo != null) {
+ builder.field(Fields.SNAPSHOT);
+ snapshotInfo.toXContent(builder, params);
+ } else {
+ builder.field(Fields.ACCEPTED, true);
+ }
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java
new file mode 100644
index 0000000..5b54e41
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.create;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.snapshots.SnapshotInfo;
+import org.elasticsearch.snapshots.SnapshotsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Transport action for create snapshot operation
+ */
+public class TransportCreateSnapshotAction extends TransportMasterNodeOperationAction<CreateSnapshotRequest, CreateSnapshotResponse> {
+ private final SnapshotsService snapshotsService;
+
+ @Inject
+ public TransportCreateSnapshotAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, SnapshotsService snapshotsService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.snapshotsService = snapshotsService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SNAPSHOT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return CreateSnapshotAction.NAME;
+ }
+
+ @Override
+ protected CreateSnapshotRequest newRequest() {
+ return new CreateSnapshotRequest();
+ }
+
+ @Override
+ protected CreateSnapshotResponse newResponse() {
+ return new CreateSnapshotResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(CreateSnapshotRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
+ }
+
+ @Override
+ protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener<CreateSnapshotResponse> listener) throws ElasticsearchException {
+ SnapshotsService.SnapshotRequest snapshotRequest =
+ new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository())
+ .indices(request.indices())
+ .indicesOptions(request.indicesOptions())
+ .partial(request.partial())
+ .settings(request.settings())
+ .includeGlobalState(request.includeGlobalState())
+ .masterNodeTimeout(request.masterNodeTimeout());
+ snapshotsService.createSnapshot(snapshotRequest, new SnapshotsService.CreateSnapshotListener() {
+ @Override
+ public void onResponse() {
+ if (request.waitForCompletion()) {
+ snapshotsService.addListener(new SnapshotsService.SnapshotCompletionListener() {
+ SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot());
+
+ @Override
+ public void onSnapshotCompletion(SnapshotId snapshotId, SnapshotInfo snapshot) {
+ if (this.snapshotId.equals(snapshotId)) {
+ listener.onResponse(new CreateSnapshotResponse(snapshot));
+ snapshotsService.removeListener(this);
+ }
+ }
+
+ @Override
+ public void onSnapshotFailure(SnapshotId snapshotId, Throwable t) {
+ if (this.snapshotId.equals(snapshotId)) {
+ listener.onFailure(t);
+ snapshotsService.removeListener(this);
+ }
+ }
+ });
+ } else {
+ listener.onResponse(new CreateSnapshotResponse());
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java
new file mode 100644
index 0000000..2589dee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.delete;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ * Delete snapshot action
+ */
+public class DeleteSnapshotAction extends ClusterAction<DeleteSnapshotRequest, DeleteSnapshotResponse, DeleteSnapshotRequestBuilder> {
+
+ public static final DeleteSnapshotAction INSTANCE = new DeleteSnapshotAction();
+ public static final String NAME = "cluster/snapshot/delete";
+
+ private DeleteSnapshotAction() {
+ super(NAME);
+ }
+
+ @Override
+ public DeleteSnapshotResponse newResponse() {
+ return new DeleteSnapshotResponse();
+ }
+
+ @Override
+ public DeleteSnapshotRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new DeleteSnapshotRequestBuilder(client);
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java
new file mode 100644
index 0000000..5d6c554
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.delete;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Delete snapshot request
+ * <p/>
+ * Delete snapshot request removes the snapshot record from the repository and cleans up all
+ * files that are associated with this particular snapshot. All files that are shared with
+ * at least one other existing snapshot are left intact.
+ */
+public class DeleteSnapshotRequest extends MasterNodeOperationRequest<DeleteSnapshotRequest> {
+
+ private String repository;
+
+ private String snapshot;
+
+ /**
+ * Constructs a new delete snapshots request
+ */
+ public DeleteSnapshotRequest() {
+ }
+
+ /**
+ * Constructs a new delete snapshots request with repository and snapshot name
+ *
+ * @param repository repository name
+ * @param snapshot snapshot name
+ */
+ public DeleteSnapshotRequest(String repository, String snapshot) {
+ this.repository = repository;
+ this.snapshot = snapshot;
+ }
+
+ /**
+ * Constructs a new delete snapshots request with repository name
+ *
+ * @param repository repository name
+ */
+ public DeleteSnapshotRequest(String repository) {
+ this.repository = repository;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (repository == null) {
+ validationException = addValidationError("repository is missing", validationException);
+ }
+ if (snapshot == null) {
+ validationException = addValidationError("snapshot is missing", validationException);
+ }
+ return validationException;
+ }
+
+
+ public DeleteSnapshotRequest repository(String repository) {
+ this.repository = repository;
+ return this;
+ }
+
+ /**
+ * Returns repository name
+ *
+ * @return repository name
+ */
+ public String repository() {
+ return this.repository;
+ }
+
+ /**
+ * Returns repository name
+ *
+ * @return repository name
+ */
+ public String snapshot() {
+ return this.snapshot;
+ }
+
+ /**
+ * Sets snapshot name
+ *
+ * @return this request
+ */
+ public DeleteSnapshotRequest snapshot(String snapshot) {
+ this.snapshot = snapshot;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ repository = in.readString();
+ snapshot = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(repository);
+ out.writeString(snapshot);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java
new file mode 100644
index 0000000..96de7ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.delete;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ * Delete snapshot request builder
+ */
+public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder<DeleteSnapshotRequest, DeleteSnapshotResponse, DeleteSnapshotRequestBuilder> {
+
+ /**
+ * Constructs delete snapshot request builder
+ *
+ * @param clusterAdminClient cluster admin client
+ */
+ public DeleteSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) {
+ super((InternalClusterAdminClient) clusterAdminClient, new DeleteSnapshotRequest());
+ }
+
+ /**
+ * Constructs delete snapshot request builder with specified repository and snapshot names
+ *
+ * @param clusterAdminClient cluster admin client
+ * @param repository repository name
+ * @param snapshot snapshot name
+ */
+ public DeleteSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String snapshot) {
+ super((InternalClusterAdminClient) clusterAdminClient, new DeleteSnapshotRequest(repository, snapshot));
+ }
+
+ /**
+ * Sets the repository name
+ *
+ * @param repository repository name
+ * @return this builder
+ */
+ public DeleteSnapshotRequestBuilder setRepository(String repository) {
+ request.repository(repository);
+ return this;
+ }
+
+ /**
+ * Sets the snapshot name
+ *
+ * @param snapshot snapshot name
+ * @return this builder
+ */
+ public DeleteSnapshotRequestBuilder setSnapshot(String snapshot) {
+ request.snapshot(snapshot);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<DeleteSnapshotResponse> listener) {
+ ((ClusterAdminClient) client).deleteSnapshot(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponse.java
new file mode 100644
index 0000000..0783f30
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponse.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.delete;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Delete snapshot response
+ */
+public class DeleteSnapshotResponse extends AcknowledgedResponse {
+
+ DeleteSnapshotResponse() {
+ }
+
+ DeleteSnapshotResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java
new file mode 100644
index 0000000..07bfb4d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.delete;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.snapshots.SnapshotsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Transport action for delete snapshot operation
+ */
+public class TransportDeleteSnapshotAction extends TransportMasterNodeOperationAction<DeleteSnapshotRequest, DeleteSnapshotResponse> {
+ private final SnapshotsService snapshotsService;
+
+ @Inject
+ public TransportDeleteSnapshotAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, SnapshotsService snapshotsService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.snapshotsService = snapshotsService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteSnapshotAction.NAME;
+ }
+
+ @Override
+ protected DeleteSnapshotRequest newRequest() {
+ return new DeleteSnapshotRequest();
+ }
+
+ @Override
+ protected DeleteSnapshotResponse newResponse() {
+ return new DeleteSnapshotResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(DeleteSnapshotRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
+ }
+
+ @Override
+ protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener<DeleteSnapshotResponse> listener) throws ElasticsearchException {
+ SnapshotId snapshotIds = new SnapshotId(request.repository(), request.snapshot());
+ snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() {
+ @Override
+ public void onResponse() {
+ listener.onResponse(new DeleteSnapshotResponse(true));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java
new file mode 100644
index 0000000..6b5fb7e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.get;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ * Get snapshots action
+ */
+public class GetSnapshotsAction extends ClusterAction<GetSnapshotsRequest, GetSnapshotsResponse, GetSnapshotsRequestBuilder> {
+
+ public static final GetSnapshotsAction INSTANCE = new GetSnapshotsAction();
+ public static final String NAME = "cluster/snapshot/get";
+
+ private GetSnapshotsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GetSnapshotsResponse newResponse() {
+ return new GetSnapshotsResponse();
+ }
+
+ @Override
+ public GetSnapshotsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new GetSnapshotsRequestBuilder(client);
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java
new file mode 100644
index 0000000..a2c3fec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.get;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Get snapshot request
+ */
+public class GetSnapshotsRequest extends MasterNodeOperationRequest<GetSnapshotsRequest> {
+
+ private String repository;
+
+ private String[] snapshots = Strings.EMPTY_ARRAY;
+
+ GetSnapshotsRequest() {
+ }
+
+ /**
+ * Constructs a new get snapshots request with given repository name and list of snapshots
+ *
+ * @param repository repository name
+ * @param snapshots list of snapshots
+ */
+ public GetSnapshotsRequest(String repository, String[] snapshots) {
+ this.repository = repository;
+ this.snapshots = snapshots;
+ }
+
+ /**
+ * Constructs a new get snapshots request with given repository name
+ *
+ * @param repository repository name
+ */
+ public GetSnapshotsRequest(String repository) {
+ this.repository = repository;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (repository == null) {
+ validationException = addValidationError("repository is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets repository name
+ *
+ * @param repository repository name
+ * @return this request
+ */
+ public GetSnapshotsRequest repository(String repository) {
+ this.repository = repository;
+ return this;
+ }
+
+ /**
+ * Returns repository name
+ *
+ * @return repository name
+ */
+ public String repository() {
+ return this.repository;
+ }
+
+ /**
+ * Returns the names of the snapshots.
+ *
+ * @return the names of snapshots
+ */
+ public String[] snapshots() {
+ return this.snapshots;
+ }
+
+ /**
+ * Sets the list of snapshots to be returned
+ *
+ * @param snapshots
+ * @return this request
+ */
+ public GetSnapshotsRequest snapshots(String[] snapshots) {
+ this.snapshots = snapshots;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ repository = in.readString();
+ snapshots = in.readStringArray();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(repository);
+ out.writeStringArray(snapshots);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java
new file mode 100644
index 0000000..f3dc2ab
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.get;
+
+import com.google.common.collect.ObjectArrays;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ * Get snapshots request builder
+ */
+public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilder<GetSnapshotsRequest, GetSnapshotsResponse, GetSnapshotsRequestBuilder> {
+
+ /**
+ * Constructs the new get snapshot request
+ *
+ * @param clusterAdminClient cluster admin client
+ */
+ public GetSnapshotsRequestBuilder(ClusterAdminClient clusterAdminClient) {
+ super((InternalClusterAdminClient) clusterAdminClient, new GetSnapshotsRequest());
+ }
+
+ /**
+ * Constructs the new get snapshot request with specified repository
+ *
+ * @param clusterAdminClient cluster admin client
+ * @param repository repository name
+ */
+ public GetSnapshotsRequestBuilder(ClusterAdminClient clusterAdminClient, String repository) {
+ super((InternalClusterAdminClient) clusterAdminClient, new GetSnapshotsRequest(repository));
+ }
+
+ /**
+ * Sets the repository name
+ *
+ * @param repository repository name
+ * @return this builder
+ */
+ public GetSnapshotsRequestBuilder setRepository(String repository) {
+ request.repository(repository);
+ return this;
+ }
+
+ /**
+ * Sets list of snapshots to return
+ *
+ * @param snapshots list of snapshots
+ * @return this builder
+ */
+ public GetSnapshotsRequestBuilder setSnapshots(String... snapshots) {
+ request.snapshots(snapshots);
+ return this;
+ }
+
+ /**
+ * Adds additional snapshots to the list of snapshots to return
+ *
+ * @param snapshots additional snapshots
+ * @return this builder
+ */
+ public GetSnapshotsRequestBuilder addSnapshots(String... snapshots) {
+ request.snapshots(ObjectArrays.concat(request.snapshots(), snapshots, String.class));
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<GetSnapshotsResponse> listener) {
+ ((ClusterAdminClient) client).getSnapshots(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java
new file mode 100644
index 0000000..f0cedd9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.get;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.snapshots.SnapshotInfo;
+
+import java.io.IOException;
+
+/**
+ * Get snapshots response
+ */
+public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
+
+ private ImmutableList<SnapshotInfo> snapshots = ImmutableList.of();
+
+ GetSnapshotsResponse() {
+ }
+
+ GetSnapshotsResponse(ImmutableList<SnapshotInfo> snapshots) {
+ this.snapshots = snapshots;
+ }
+
+ /**
+ * Returns the list of snapshots
+ *
+ * @return the list of snapshots
+ */
+ public ImmutableList<SnapshotInfo> getSnapshots() {
+ return snapshots;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ ImmutableList.Builder<SnapshotInfo> builder = ImmutableList.builder();
+ for (int i = 0; i < size; i++) {
+ builder.add(SnapshotInfo.readSnapshotInfo(in));
+ }
+ snapshots = builder.build();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(snapshots.size());
+ for (SnapshotInfo snapshotInfo : snapshots) {
+ snapshotInfo.writeTo(out);
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startArray(Fields.SNAPSHOTS);
+ for (SnapshotInfo snapshotInfo : snapshots) {
+ snapshotInfo.toXContent(builder, params);
+ }
+ builder.endArray();
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java
new file mode 100644
index 0000000..425d545
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.get;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.snapshots.Snapshot;
+import org.elasticsearch.snapshots.SnapshotInfo;
+import org.elasticsearch.snapshots.SnapshotsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Transport Action for get snapshots operation
+ */
+public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAction<GetSnapshotsRequest, GetSnapshotsResponse> {
+ private final SnapshotsService snapshotsService;
+
+ @Inject
+ public TransportGetSnapshotsAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, SnapshotsService snapshotsService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.snapshotsService = snapshotsService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SNAPSHOT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return GetSnapshotsAction.NAME;
+ }
+
+ @Override
+ protected GetSnapshotsRequest newRequest() {
+ return new GetSnapshotsRequest();
+ }
+
+ @Override
+ protected GetSnapshotsResponse newResponse() {
+ return new GetSnapshotsResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(GetSnapshotsRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
+ }
+
+ @Override
+ protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener<GetSnapshotsResponse> listener) throws ElasticsearchException {
+ SnapshotId[] snapshotIds = new SnapshotId[request.snapshots().length];
+ for (int i = 0; i < snapshotIds.length; i++) {
+ snapshotIds[i] = new SnapshotId(request.repository(), request.snapshots()[i]);
+ }
+
+ try {
+ ImmutableList.Builder<SnapshotInfo> snapshotInfoBuilder = ImmutableList.builder();
+ if (snapshotIds.length > 0) {
+ for (SnapshotId snapshotId : snapshotIds) {
+ snapshotInfoBuilder.add(new SnapshotInfo(snapshotsService.snapshot(snapshotId)));
+ }
+ } else {
+ ImmutableList<Snapshot> snapshots = snapshotsService.snapshots(request.repository());
+ for (Snapshot snapshot : snapshots) {
+ snapshotInfoBuilder.add(new SnapshotInfo(snapshot));
+ }
+ }
+ listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder.build()));
+ } catch (Throwable t) {
+ listener.onFailure(t);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java
new file mode 100644
index 0000000..859a22c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.restore;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ * Restore snapshot action
+ */
+public class RestoreSnapshotAction extends ClusterAction<RestoreSnapshotRequest, RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> {
+
+ public static final RestoreSnapshotAction INSTANCE = new RestoreSnapshotAction();
+ public static final String NAME = "cluster/snapshot/restore";
+
+ private RestoreSnapshotAction() {
+ super(NAME);
+ }
+
+ @Override
+ public RestoreSnapshotResponse newResponse() {
+ return new RestoreSnapshotResponse();
+ }
+
+ @Override
+ public RestoreSnapshotRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new RestoreSnapshotRequestBuilder(client);
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java
new file mode 100644
index 0000000..39aa0bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java
@@ -0,0 +1,530 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.restore;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.Strings.hasLength;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
+import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+
+/**
+ * Restore snapshot request
+ */
+public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSnapshotRequest> {
+
+ private String snapshot;
+
+ private String repository;
+
+ private String[] indices = Strings.EMPTY_ARRAY;
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ private String renamePattern;
+
+ private String renameReplacement;
+
+ private boolean waitForCompletion;
+
+ private boolean includeGlobalState = true;
+
+ private Settings settings = EMPTY_SETTINGS;
+
+ RestoreSnapshotRequest() {
+ }
+
+ /**
+ * Constructs a new put repository request with the provided repository and snapshot names.
+ *
+ * @param repository repository name
+ * @param snapshot snapshot name
+ */
+ public RestoreSnapshotRequest(String repository, String snapshot) {
+ this.snapshot = snapshot;
+ this.repository = repository;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (snapshot == null) {
+ validationException = addValidationError("name is missing", validationException);
+ }
+ if (repository == null) {
+ validationException = addValidationError("repository is missing", validationException);
+ }
+ if (indices == null) {
+ validationException = addValidationError("indices are missing", validationException);
+ }
+ if (indicesOptions == null) {
+ validationException = addValidationError("indicesOptions is missing", validationException);
+ }
+ if (settings == null) {
+ validationException = addValidationError("settings are missing", validationException);
+ }
+
+ return validationException;
+ }
+
+ /**
+ * Sets the name of the snapshot.
+ *
+ * @param snapshot snapshot name
+ * @return this request
+ */
+ public RestoreSnapshotRequest snapshot(String snapshot) {
+ this.snapshot = snapshot;
+ return this;
+ }
+
+ /**
+ * Returns the name of the snapshot.
+ *
+ * @return snapshot name
+ */
+ public String snapshot() {
+ return this.snapshot;
+ }
+
+ /**
+ * Sets repository name
+ *
+ * @param repository repository name
+ * @return this request
+ */
+ public RestoreSnapshotRequest repository(String repository) {
+ this.repository = repository;
+ return this;
+ }
+
+ /**
+ * Returns repository name
+ *
+ * @return repository name
+ */
+ public String repository() {
+ return this.repository;
+ }
+
+ /**
+ * Sets the list of indices that should be restored from snapshot
+ * <p/>
+ * The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
+ * prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
+ * indices in the snapshot.
+ *
+ * @param indices list of indices
+ * @return this request
+ */
+ public RestoreSnapshotRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * Sets the list of indices that should be restored from snapshot
+ * <p/>
+ * The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
+ * prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
+ * indices in the snapshot.
+ *
+ * @param indices list of indices
+ * @return this request
+ */
+ public RestoreSnapshotRequest indices(List<String> indices) {
+ this.indices = indices.toArray(new String[indices.size()]);
+ return this;
+ }
+
+ /**
+ * Returns list of indices that should be restored from snapshot
+ *
+ * @return
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
+ * For example indices that don't exist.
+ *
+ * @return the desired behaviour regarding indices to ignore and wildcard indices expression
+ */
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
+ * For example indices that don't exist.
+ *
+ * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
+ * @return this request
+ */
+ public RestoreSnapshotRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ /**
+ * Sets rename pattern that should be applied to restored indices.
+ * <p/>
+ * Indices that match the rename pattern will be renamed according to {@link #renameReplacement(String)}. The
+ * rename pattern is applied according to the {@link java.util.regex.Matcher#appendReplacement(StringBuffer, String)}
+ * The request will fail if two or more indices will be renamed into the same name.
+ *
+ * @param renamePattern rename pattern
+ * @return this request
+ */
+ public RestoreSnapshotRequest renamePattern(String renamePattern) {
+ this.renamePattern = renamePattern;
+ return this;
+ }
+
+ /**
+ * Returns rename pattern
+ *
+ * @return rename pattern
+ */
+ public String renamePattern() {
+ return renamePattern;
+ }
+
+ /**
+ * Sets rename replacement
+ * <p/>
+ * See {@link #renamePattern(String)} for more information.
+ *
+ * @param renameReplacement rename replacement
+ * @return
+ */
+ public RestoreSnapshotRequest renameReplacement(String renameReplacement) {
+ this.renameReplacement = renameReplacement;
+ return this;
+ }
+
+ /**
+ * Returns rename replacement
+ *
+ * @return rename replacement
+ */
+ public String renameReplacement() {
+ return renameReplacement;
+ }
+
+ /**
+ * If this parameter is set to true the operation will wait for completion of restore process before returning.
+ *
+ * @param waitForCompletion if true the operation will wait for completion
+ * @return this request
+ */
+ public RestoreSnapshotRequest waitForCompletion(boolean waitForCompletion) {
+ this.waitForCompletion = waitForCompletion;
+ return this;
+ }
+
+ /**
+ * Returns wait for completion setting
+ *
+ * @return true if the operation will wait for completion
+ */
+ public boolean waitForCompletion() {
+ return waitForCompletion;
+ }
+
+ /**
+ * Sets repository-specific restore settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param settings repository-specific snapshot settings
+ * @return this request
+ */
+ public RestoreSnapshotRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ /**
+ * Sets repository-specific restore settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param settings repository-specific snapshot settings
+ * @return this request
+ */
+ public RestoreSnapshotRequest settings(Settings.Builder settings) {
+ this.settings = settings.build();
+ return this;
+ }
+
+ /**
+ * Sets repository-specific restore settings in JSON, YAML or properties format
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param source repository-specific snapshot settings
+ * @return this request
+ */
+ public RestoreSnapshotRequest settings(String source) {
+ this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
+ return this;
+ }
+
+ /**
+ * Sets repository-specific restore settings
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param source repository-specific snapshot settings
+ * @return this request
+ */
+ public RestoreSnapshotRequest settings(Map<String, Object> source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ settings(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ return this;
+ }
+
+ /**
+ * Returns repository-specific restore settings
+ *
+ * @return restore settings
+ */
+ public Settings settings() {
+ return this.settings;
+ }
+
+ /**
+ * If set to true the restore procedure will restore global cluster state.
+ * <p/>
+ * The global cluster state includes persistent settings and index template definitions.
+ *
+ * @param includeGlobalState true if global state should be restored from the snapshot
+ * @return this request
+ */
+ public RestoreSnapshotRequest includeGlobalState(boolean includeGlobalState) {
+ this.includeGlobalState = includeGlobalState;
+ return this;
+ }
+
+ /**
+ * Returns true if global state should be restored from this snapshot
+ *
+ * @return true if global state should be restored
+ */
+ public boolean includeGlobalState() {
+ return includeGlobalState;
+ }
+
+ /**
+ * Parses restore definition
+ *
+ * @param source restore definition
+ * @return this request
+ */
+ public RestoreSnapshotRequest source(XContentBuilder source) {
+ try {
+ return source(source.bytes());
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to build json for repository request", e);
+ }
+ }
+
+ /**
+ * Parses restore definition
+ *
+ * @param source restore definition
+ * @return this request
+ */
+ public RestoreSnapshotRequest source(Map source) {
+ boolean ignoreUnavailable = IndicesOptions.lenient().ignoreUnavailable();
+ boolean allowNoIndices = IndicesOptions.lenient().allowNoIndices();
+ boolean expandWildcardsOpen = IndicesOptions.lenient().expandWildcardsOpen();
+ boolean expandWildcardsClosed = IndicesOptions.lenient().expandWildcardsClosed();
+
+ for (Map.Entry<String, Object> entry : ((Map<String, Object>) source).entrySet()) {
+ String name = entry.getKey();
+ if (name.equals("indices")) {
+ if (entry.getValue() instanceof String) {
+ indices(Strings.splitStringByCommaToArray((String) entry.getValue()));
+ } else if (entry.getValue() instanceof ArrayList) {
+ indices((ArrayList<String>) entry.getValue());
+ } else {
+ throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
+ }
+ } else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) {
+ ignoreUnavailable = nodeBooleanValue(entry.getValue());
+ } else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) {
+ allowNoIndices = nodeBooleanValue(entry.getValue());
+ } else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) {
+ expandWildcardsOpen = nodeBooleanValue(entry.getValue());
+ } else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) {
+ expandWildcardsClosed = nodeBooleanValue(entry.getValue());
+ } else if (name.equals("settings")) {
+ if (!(entry.getValue() instanceof Map)) {
+ throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object");
+ }
+ settings((Map<String, Object>) entry.getValue());
+ } else if (name.equals("include_global_state")) {
+ includeGlobalState = nodeBooleanValue(entry.getValue());
+ } else if (name.equals("rename_pattern")) {
+ if (entry.getValue() instanceof String) {
+ renamePattern((String) entry.getValue());
+ } else {
+ throw new ElasticsearchIllegalArgumentException("malformed rename_pattern");
+ }
+ } else if (name.equals("rename_replacement")) {
+ if (entry.getValue() instanceof String) {
+ renameReplacement((String) entry.getValue());
+ } else {
+ throw new ElasticsearchIllegalArgumentException("malformed rename_replacement");
+ }
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Unknown parameter " + name);
+ }
+ }
+ indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed));
+ return this;
+ }
+
+ /**
+ * Parses restore definition
+ * <p/>
+ * JSON, YAML and properties formats are supported
+ *
+ * @param source restore definition
+ * @return this request
+ */
+ public RestoreSnapshotRequest source(String source) {
+ if (hasLength(source)) {
+ try {
+ return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + source + "]", e);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Parses restore definition
+ * <p/>
+ * JSON, YAML and properties formats are supported
+ *
+ * @param source restore definition
+ * @return this request
+ */
+ public RestoreSnapshotRequest source(byte[] source) {
+ return source(source, 0, source.length);
+ }
+
+ /**
+ * Parses restore definition
+ * <p/>
+ * JSON, YAML and properties formats are supported
+ *
+ * @param source restore definition
+ * @param offset offset
+ * @param length length
+ * @return this request
+ */
+ public RestoreSnapshotRequest source(byte[] source, int offset, int length) {
+ if (length > 0) {
+ try {
+ return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Parses restore definition
+ * <p/>
+ * JSON, YAML and properties formats are supported
+ *
+ * @param source restore definition
+ * @return this request
+ */
+ public RestoreSnapshotRequest source(BytesReference source) {
+ try {
+ return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ snapshot = in.readString();
+ repository = in.readString();
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ renamePattern = in.readOptionalString();
+ renameReplacement = in.readOptionalString();
+ waitForCompletion = in.readBoolean();
+ includeGlobalState = in.readBoolean();
+ settings = readSettingsFromStream(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(snapshot);
+ out.writeString(repository);
+ out.writeStringArray(indices);
+ indicesOptions.writeIndicesOptions(out);
+ out.writeOptionalString(renamePattern);
+ out.writeOptionalString(renameReplacement);
+ out.writeBoolean(waitForCompletion);
+ out.writeBoolean(includeGlobalState);
+ writeSettingsToStream(settings, out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java
new file mode 100644
index 0000000..bc3ec41
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.restore;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+
+/**
+ * Restore snapshot request builder
+ */
+public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder<RestoreSnapshotRequest, RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> {
+
+ /**
+ * Constructs new restore snapshot request builder
+ *
+ * @param clusterAdminClient cluster admin client
+ */
+ public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) {
+ super((InternalClusterAdminClient) clusterAdminClient, new RestoreSnapshotRequest());
+ }
+
+ /**
+ * Constructs new restore snapshot request builder with specified repository and snapshot names
+ *
+ * @param clusterAdminClient cluster admin client
+ * @param repository reposiory name
+ * @param name snapshot name
+ */
+ public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String name) {
+ super((InternalClusterAdminClient) clusterAdminClient, new RestoreSnapshotRequest(repository, name));
+ }
+
+
+ /**
+ * Sets snapshot name
+ *
+ * @param snapshot snapshot name
+ * @return this builder
+ */
+ public RestoreSnapshotRequestBuilder setSnapshot(String snapshot) {
+ request.snapshot(snapshot);
+ return this;
+ }
+
+ /**
+ * Sets repository name
+ *
+ * @param repository repository name
+ * @return this builder
+ */
+ public RestoreSnapshotRequestBuilder setRepository(String repository) {
+ request.repository(repository);
+ return this;
+ }
+
+ /**
+ * Sets the list of indices that should be restored from snapshot
+ * <p/>
+ * The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
+ * prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
+ * indices in the snapshot.
+ *
+ * @param indices list of indices
+ * @return this builder
+ */
+ public RestoreSnapshotRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
+ * For example indices that don't exist.
+ *
+ * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
+ * @return this request
+ */
+ public RestoreSnapshotRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request.indicesOptions(indicesOptions);
+ return this;
+ }
+
+
+ /**
+ * Sets rename pattern that should be applied to restored indices.
+ * <p/>
+ * Indices that match the rename pattern will be renamed according to {@link #setRenameReplacement(String)}. The
+ * rename pattern is applied according to the {@link java.util.regex.Matcher#appendReplacement(StringBuffer, String)}
+ * The request will fail if two or more indices will be renamed into the same name.
+ *
+ * @param renamePattern rename pattern
+ * @return this builder
+ */
+ public RestoreSnapshotRequestBuilder setRenamePattern(String renamePattern) {
+ request.renamePattern(renamePattern);
+ return this;
+ }
+
+ /**
+ * Sets rename replacement
+ * <p/>
+ * See {@link #setRenamePattern(String)} for more information.
+ *
+ * @param renameReplacement rename replacement
+ * @return
+ */
+ public RestoreSnapshotRequestBuilder setRenameReplacement(String renameReplacement) {
+ request.renameReplacement(renameReplacement);
+ return this;
+ }
+
+
+ /**
+ * Sets repository-specific restore settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param settings repository-specific snapshot settings
+ * @return this builder
+ */
+ public RestoreSnapshotRequestBuilder setSettings(Settings settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Sets repository-specific restore settings.
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param settings repository-specific snapshot settings
+ * @return this builder
+ */
+ public RestoreSnapshotRequestBuilder setSettings(Settings.Builder settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Sets repository-specific restore settings in JSON, YAML or properties format
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param source repository-specific snapshot settings
+ * @return this builder
+ */
+ public RestoreSnapshotRequestBuilder setSettings(String source) {
+ request.settings(source);
+ return this;
+ }
+
+ /**
+ * Sets repository-specific restore settings
+ * <p/>
+ * See repository documentation for more information.
+ *
+ * @param source repository-specific snapshot settings
+ * @return this builder
+ */
+ public RestoreSnapshotRequestBuilder setSettings(Map<String, Object> source) {
+ request.settings(source);
+ return this;
+ }
+
+ /**
+ * If this parameter is set to true the operation will wait for completion of restore process before returning.
+ *
+ * @param waitForCompletion if true the operation will wait for completion
+ * @return this builder
+ */
+ public RestoreSnapshotRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
+ request.waitForCompletion(waitForCompletion);
+ return this;
+ }
+
+ /**
+ * If set to true the restore procedure will restore global cluster state.
+ * <p/>
+ * The global cluster state includes persistent settings and index template definitions.
+ *
+ * @param restoreGlobalState true if global state should be restored from the snapshot
+ * @return this request
+ */
+ public RestoreSnapshotRequestBuilder setRestoreGlobalState(boolean restoreGlobalState) {
+ request.includeGlobalState(restoreGlobalState);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<RestoreSnapshotResponse> listener) {
+ ((ClusterAdminClient) client).restoreSnapshot(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java
new file mode 100644
index 0000000..0101b95
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.restore;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.snapshots.RestoreInfo;
+
+import java.io.IOException;
+
+/**
+ * Contains information about restores snapshot
+ */
+public class RestoreSnapshotResponse extends ActionResponse implements ToXContent {
+
+ @Nullable
+ private RestoreInfo restoreInfo;
+
+ RestoreSnapshotResponse(@Nullable RestoreInfo restoreInfo) {
+ this.restoreInfo = restoreInfo;
+ }
+
+ RestoreSnapshotResponse() {
+ }
+
+ /**
+ * Returns restore information if snapshot was completed before this method returned, null otherwise
+ *
+ * @return restore information or null
+ */
+ public RestoreInfo getRestoreInfo() {
+ return restoreInfo;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ restoreInfo = RestoreInfo.readOptionalRestoreInfo(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalStreamable(restoreInfo);
+ }
+
+ public RestStatus status() {
+ if (restoreInfo == null) {
+ return RestStatus.ACCEPTED;
+ }
+ return restoreInfo.status();
+ }
+
+ static final class Fields {
+ static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot");
+ static final XContentBuilderString ACCEPTED = new XContentBuilderString("accepted");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ if (restoreInfo != null) {
+ builder.field(Fields.SNAPSHOT);
+ restoreInfo.toXContent(builder, params);
+ } else {
+ builder.field(Fields.ACCEPTED, true);
+ }
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java
new file mode 100644
index 0000000..e3fc5b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.snapshots.restore;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.snapshots.RestoreInfo;
+import org.elasticsearch.snapshots.RestoreService;
+import org.elasticsearch.snapshots.RestoreService.RestoreSnapshotListener;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Transport action for restore snapshot operation
+ */
+public class TransportRestoreSnapshotAction extends TransportMasterNodeOperationAction<RestoreSnapshotRequest, RestoreSnapshotResponse> {
+ private final RestoreService restoreService;
+
+ @Inject
+ public TransportRestoreSnapshotAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, RestoreService restoreService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.restoreService = restoreService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SNAPSHOT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return RestoreSnapshotAction.NAME;
+ }
+
+ @Override
+ protected RestoreSnapshotRequest newRequest() {
+ return new RestoreSnapshotRequest();
+ }
+
+ @Override
+ protected RestoreSnapshotResponse newResponse() {
+ return new RestoreSnapshotResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(RestoreSnapshotRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
+ }
+
+ @Override
+ protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener<RestoreSnapshotResponse> listener) throws ElasticsearchException {
+ RestoreService.RestoreRequest restoreRequest =
+ new RestoreService.RestoreRequest("restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot())
+ .indices(request.indices())
+ .indicesOptions(request.indicesOptions())
+ .renamePattern(request.renamePattern())
+ .renameReplacement(request.renameReplacement())
+ .includeGlobalState(request.includeGlobalState())
+ .settings(request.settings())
+ .masterNodeTimeout(request.masterNodeTimeout());
+ restoreService.restoreSnapshot(restoreRequest, new RestoreSnapshotListener() {
+ @Override
+ public void onResponse(RestoreInfo restoreInfo) {
+ if (restoreInfo == null) {
+ if (request.waitForCompletion()) {
+ restoreService.addListener(new RestoreService.RestoreCompletionListener() {
+ SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot());
+
+ @Override
+ public void onRestoreCompletion(SnapshotId snapshotId, RestoreInfo snapshot) {
+ if (this.snapshotId.equals(snapshotId)) {
+ listener.onResponse(new RestoreSnapshotResponse(snapshot));
+ restoreService.removeListener(this);
+ }
+ }
+ });
+ } else {
+ listener.onResponse(new RestoreSnapshotResponse(null));
+ }
+ } else {
+ listener.onResponse(new RestoreSnapshotResponse(restoreInfo));
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java
new file mode 100644
index 0000000..d55647b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.state;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class ClusterStateAction extends ClusterAction<ClusterStateRequest, ClusterStateResponse, ClusterStateRequestBuilder> {
+
+ public static final ClusterStateAction INSTANCE = new ClusterStateAction();
+ public static final String NAME = "cluster/state";
+
+ private ClusterStateAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ClusterStateResponse newResponse() {
+ return new ClusterStateResponse();
+ }
+
+ @Override
+ public ClusterStateRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new ClusterStateRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java
new file mode 100644
index 0000000..9332b46
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.state;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ClusterStateRequest extends MasterNodeReadOperationRequest<ClusterStateRequest> {
+
+ private boolean routingTable = true;
+ private boolean nodes = true;
+ private boolean metaData = true;
+ private boolean blocks = true;
+ private String[] indices = Strings.EMPTY_ARRAY;
+ private String[] indexTemplates = Strings.EMPTY_ARRAY;
+
+ public ClusterStateRequest() {
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ public ClusterStateRequest all() {
+ routingTable = true;
+ nodes = true;
+ metaData = true;
+ blocks = true;
+ indices = Strings.EMPTY_ARRAY;
+ indexTemplates = Strings.EMPTY_ARRAY;
+ return this;
+ }
+
+ public ClusterStateRequest clear() {
+ routingTable = false;
+ nodes = false;
+ metaData = false;
+ blocks = false;
+ indices = Strings.EMPTY_ARRAY;
+ indexTemplates = Strings.EMPTY_ARRAY;
+ return this;
+ }
+
+ public boolean routingTable() {
+ return routingTable;
+ }
+
+ public ClusterStateRequest routingTable(boolean routingTable) {
+ this.routingTable = routingTable;
+ return this;
+ }
+
+ public boolean nodes() {
+ return nodes;
+ }
+
+ public ClusterStateRequest nodes(boolean nodes) {
+ this.nodes = nodes;
+ return this;
+ }
+
+ public boolean metaData() {
+ return metaData;
+ }
+
+ public ClusterStateRequest metaData(boolean metaData) {
+ this.metaData = metaData;
+ return this;
+ }
+
+ public boolean blocks() {
+ return blocks;
+ }
+
+ public ClusterStateRequest blocks(boolean blocks) {
+ this.blocks = blocks;
+ return this;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public ClusterStateRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ public String[] indexTemplates() {
+ return this.indexTemplates;
+ }
+
+ public ClusterStateRequest indexTemplates(String... indexTemplates) {
+ this.indexTemplates = indexTemplates;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ routingTable = in.readBoolean();
+ nodes = in.readBoolean();
+ metaData = in.readBoolean();
+ blocks = in.readBoolean();
+ indices = in.readStringArray();
+ indexTemplates = in.readStringArray();
+ readLocal(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(routingTable);
+ out.writeBoolean(nodes);
+ out.writeBoolean(metaData);
+ out.writeBoolean(blocks);
+ out.writeStringArray(indices);
+ out.writeStringArray(indexTemplates);
+ writeLocal(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java
new file mode 100644
index 0000000..e47dbdc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.state;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ *
+ */
+public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterStateRequest, ClusterStateResponse, ClusterStateRequestBuilder> {
+
+ public ClusterStateRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new ClusterStateRequest());
+ }
+
+ /**
+ * Include all data
+ */
+ public ClusterStateRequestBuilder all() {
+ request.all();
+ return this;
+ }
+
+ /**
+ * Do not include any data
+ */
+ public ClusterStateRequestBuilder clear() {
+ request.clear();
+ return this;
+ }
+
+ public ClusterStateRequestBuilder setBlocks(boolean filter) {
+ request.blocks(filter);
+ return this;
+ }
+
+ /**
+ * Should the cluster state result include the {@link org.elasticsearch.cluster.metadata.MetaData}. Defaults
+ * to <tt>true</tt>.
+ */
+ public ClusterStateRequestBuilder setMetaData(boolean filter) {
+ request.metaData(filter);
+ return this;
+ }
+
+ /**
+ * Should the cluster state result include the {@link org.elasticsearch.cluster.node.DiscoveryNodes}. Defaults
+ * to <tt>true</tt>.
+ */
+ public ClusterStateRequestBuilder setNodes(boolean filter) {
+ request.nodes(filter);
+ return this;
+ }
+
+ /**
+ * Should the cluster state result include teh {@link org.elasticsearch.cluster.routing.RoutingTable}. Defaults
+ * to <tt>true</tt>.
+ */
+ public ClusterStateRequestBuilder setRoutingTable(boolean filter) {
+ request.routingTable(filter);
+ return this;
+ }
+
+ /**
+ * When {@link #setMetaData(boolean)} is set, which indices to return the {@link org.elasticsearch.cluster.metadata.IndexMetaData}
+ * for. Defaults to all indices.
+ */
+ public ClusterStateRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ public ClusterStateRequestBuilder setIndexTemplates(String... templates) {
+ request.indexTemplates(templates);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<ClusterStateResponse> listener) {
+ ((ClusterAdminClient) client).state(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java
new file mode 100644
index 0000000..9ada271
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.state;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ClusterStateResponse extends ActionResponse {
+
+ private ClusterName clusterName;
+ private ClusterState clusterState;
+
+ public ClusterStateResponse() {
+ }
+
+ ClusterStateResponse(ClusterName clusterName, ClusterState clusterState) {
+ this.clusterName = clusterName;
+ this.clusterState = clusterState;
+ }
+
+ public ClusterState getState() {
+ return this.clusterState;
+ }
+
+ public ClusterName getClusterName() {
+ return this.clusterName;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ clusterName = ClusterName.readClusterName(in);
+ clusterState = ClusterState.Builder.readFrom(in, null);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ clusterName.writeTo(out);
+ ClusterState.Builder.writeTo(clusterState, out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java
new file mode 100644
index 0000000..f9c38af
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.state;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ *
+ */
+public class TransportClusterStateAction extends TransportMasterNodeReadOperationAction<ClusterStateRequest, ClusterStateResponse> {
+
+ private final ClusterName clusterName;
+
+ @Inject
+ public TransportClusterStateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ ClusterName clusterName) {
+ super(settings, transportService, clusterService, threadPool);
+ this.clusterName = clusterName;
+ }
+
+ @Override
+ protected String executor() {
+ // very lightweight operation in memory, no need to fork to a thread
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return ClusterStateAction.NAME;
+ }
+
+ @Override
+ protected ClusterStateRequest newRequest() {
+ return new ClusterStateRequest();
+ }
+
+ @Override
+ protected ClusterStateResponse newResponse() {
+ return new ClusterStateResponse();
+ }
+
+ @Override
+ protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener<ClusterStateResponse> listener) throws ElasticsearchException {
+ ClusterState currentState = clusterService.state();
+ logger.trace("Serving cluster state request using version {}", currentState.version());
+ ClusterState.Builder builder = ClusterState.builder();
+ builder.version(currentState.version());
+ if (request.nodes()) {
+ builder.nodes(currentState.nodes());
+ }
+ if (request.routingTable()) {
+ if (request.indices().length > 0) {
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (String filteredIndex : request.indices()) {
+ if (currentState.routingTable().getIndicesRouting().containsKey(filteredIndex)) {
+ routingTableBuilder.add(currentState.routingTable().getIndicesRouting().get(filteredIndex));
+ }
+ }
+ builder.routingTable(routingTableBuilder);
+ } else {
+ builder.routingTable(currentState.routingTable());
+ }
+ builder.allocationExplanation(currentState.allocationExplanation());
+ }
+ if (request.blocks()) {
+ builder.blocks(currentState.blocks());
+ }
+ if (request.metaData()) {
+ MetaData.Builder mdBuilder;
+ if (request.indices().length == 0 && request.indexTemplates().length == 0) {
+ mdBuilder = MetaData.builder(currentState.metaData());
+ } else {
+ mdBuilder = MetaData.builder();
+ }
+
+ if (request.indices().length > 0) {
+ String[] indices = currentState.metaData().concreteIndicesIgnoreMissing(request.indices());
+ for (String filteredIndex : indices) {
+ IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex);
+ if (indexMetaData != null) {
+ mdBuilder.put(indexMetaData, false);
+ }
+ }
+ }
+
+ if (request.indexTemplates().length > 0) {
+ for (String templateName : request.indexTemplates()) {
+ IndexTemplateMetaData indexTemplateMetaData = currentState.metaData().templates().get(templateName);
+ if (indexTemplateMetaData != null) {
+ mdBuilder.put(indexTemplateMetaData);
+ }
+ }
+ }
+
+ builder.metaData(mdBuilder);
+ }
+ listener.onResponse(new ClusterStateResponse(clusterName, builder.build()));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java
new file mode 100644
index 0000000..ae2463f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class ClusterStatsAction extends ClusterAction<ClusterStatsRequest, ClusterStatsResponse, ClusterStatsRequestBuilder> {
+
+ public static final ClusterStatsAction INSTANCE = new ClusterStatsAction();
+ public static final String NAME = "cluster/stats";
+
+ private ClusterStatsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ClusterStatsResponse newResponse() {
+ return new ClusterStatsResponse();
+ }
+
+ @Override
+ public ClusterStatsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new ClusterStatsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java
new file mode 100644
index 0000000..898419a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.cache.filter.FilterCacheStats;
+import org.elasticsearch.index.cache.id.IdCacheStats;
+import org.elasticsearch.index.engine.SegmentsStats;
+import org.elasticsearch.index.fielddata.FieldDataStats;
+import org.elasticsearch.index.percolator.stats.PercolateStats;
+import org.elasticsearch.index.shard.DocsStats;
+import org.elasticsearch.index.store.StoreStats;
+import org.elasticsearch.search.suggest.completion.CompletionStats;
+
+import java.io.IOException;
+
+public class ClusterStatsIndices implements ToXContent, Streamable {
+
+ private int indexCount;
+ private ShardStats shards;
+ private DocsStats docs;
+ private StoreStats store;
+ private FieldDataStats fieldData;
+ private FilterCacheStats filterCache;
+ private IdCacheStats idCache;
+ private CompletionStats completion;
+ private SegmentsStats segments;
+ private PercolateStats percolate;
+
+ private ClusterStatsIndices() {
+ }
+
+ public ClusterStatsIndices(ClusterStatsNodeResponse[] nodeResponses) {
+ ObjectObjectOpenHashMap<String, ShardStats> countsPerIndex = new ObjectObjectOpenHashMap<String, ShardStats>();
+
+ this.docs = new DocsStats();
+ this.store = new StoreStats();
+ this.fieldData = new FieldDataStats();
+ this.filterCache = new FilterCacheStats();
+ this.idCache = new IdCacheStats();
+ this.completion = new CompletionStats();
+ this.segments = new SegmentsStats();
+ this.percolate = new PercolateStats();
+
+ for (ClusterStatsNodeResponse r : nodeResponses) {
+ for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
+ ShardStats indexShardStats = countsPerIndex.get(shardStats.getIndex());
+ if (indexShardStats == null) {
+ indexShardStats = new ShardStats();
+ countsPerIndex.put(shardStats.getIndex(), indexShardStats);
+ }
+
+ indexShardStats.total++;
+
+ CommonStats shardCommonStats = shardStats.getStats();
+
+ if (shardStats.getShardRouting().primary()) {
+ indexShardStats.primaries++;
+ docs.add(shardCommonStats.docs);
+ }
+ store.add(shardCommonStats.store);
+ fieldData.add(shardCommonStats.fieldData);
+ filterCache.add(shardCommonStats.filterCache);
+ idCache.add(shardCommonStats.idCache);
+ completion.add(shardCommonStats.completion);
+ segments.add(shardCommonStats.segments);
+ percolate.add(shardCommonStats.percolate);
+ }
+ }
+
+ shards = new ShardStats();
+ indexCount = countsPerIndex.size();
+ for (ObjectObjectCursor<String, ShardStats> indexCountsCursor : countsPerIndex) {
+ shards.addIndexShardCount(indexCountsCursor.value);
+ }
+ }
+
+ public int getIndexCount() {
+ return indexCount;
+ }
+
+ public ShardStats getShards() {
+ return this.shards;
+ }
+
+ public DocsStats getDocs() {
+ return docs;
+ }
+
+ public StoreStats getStore() {
+ return store;
+ }
+
+ public FieldDataStats getFieldData() {
+ return fieldData;
+ }
+
+ public FilterCacheStats getFilterCache() {
+ return filterCache;
+ }
+
+ public IdCacheStats getIdCache() {
+ return idCache;
+ }
+
+ public CompletionStats getCompletion() {
+ return completion;
+ }
+
+ public SegmentsStats getSegments() {
+ return segments;
+ }
+
+ public PercolateStats getPercolate() {
+ return percolate;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ indexCount = in.readVInt();
+ shards = ShardStats.readShardStats(in);
+ docs = DocsStats.readDocStats(in);
+ store = StoreStats.readStoreStats(in);
+ fieldData = FieldDataStats.readFieldDataStats(in);
+ filterCache = FilterCacheStats.readFilterCacheStats(in);
+ idCache = IdCacheStats.readIdCacheStats(in);
+ completion = CompletionStats.readCompletionStats(in);
+ segments = SegmentsStats.readSegmentsStats(in);
+ percolate = PercolateStats.readPercolateStats(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(indexCount);
+ shards.writeTo(out);
+ docs.writeTo(out);
+ store.writeTo(out);
+ fieldData.writeTo(out);
+ filterCache.writeTo(out);
+ idCache.writeTo(out);
+ completion.writeTo(out);
+ segments.writeTo(out);
+ percolate.writeTo(out);
+ }
+
+ public static ClusterStatsIndices readIndicesStats(StreamInput in) throws IOException {
+ ClusterStatsIndices indicesStats = new ClusterStatsIndices();
+ indicesStats.readFrom(in);
+ return indicesStats;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.COUNT, indexCount);
+ shards.toXContent(builder, params);
+ docs.toXContent(builder, params);
+ store.toXContent(builder, params);
+ fieldData.toXContent(builder, params);
+ filterCache.toXContent(builder, params);
+ idCache.toXContent(builder, params);
+ completion.toXContent(builder, params);
+ segments.toXContent(builder, params);
+ percolate.toXContent(builder, params);
+ return builder;
+ }
+
+ public static class ShardStats implements ToXContent, Streamable {
+
+ int indices;
+ int total;
+ int primaries;
+
+ // min/max
+ int minIndexShards = -1;
+ int maxIndexShards = -1;
+ int minIndexPrimaryShards = -1;
+ int maxIndexPrimaryShards = -1;
+ double minIndexReplication = -1;
+ double totalIndexReplication = 0;
+ double maxIndexReplication = -1;
+
+ public ShardStats() {
+ }
+
+ /**
+ * number of indices in the cluster
+ */
+ public int getIndices() {
+ return this.indices;
+ }
+
+ /**
+ * total number of shards in the cluster
+ */
+ public int getTotal() {
+ return this.total;
+ }
+
+ /**
+ * total number of primary shards in the cluster
+ */
+ public int getPrimaries() {
+ return this.primaries;
+ }
+
+ /**
+ * returns how many *redundant* copies of the data the cluster holds - running with no replicas will return 0
+ */
+ public double getReplication() {
+ if (primaries == 0) {
+ return 0;
+ }
+ return (((double) (total - primaries)) / primaries);
+ }
+
+ /**
+ * the maximum number of shards (primary+replicas) an index has
+ */
+ public int getMaxIndexShards() {
+ return this.maxIndexShards;
+ }
+
+ /**
+ * the minimum number of shards (primary+replicas) an index has
+ */
+ public int getMinIndexShards() {
+ return this.minIndexShards;
+ }
+
+ /**
+ * average number of shards (primary+replicas) across the indices
+ */
+ public double getAvgIndexShards() {
+ if (this.indices == 0) {
+ return -1;
+ }
+ return ((double) this.total) / this.indices;
+ }
+
+ /**
+ * the maximum number of primary shards an index has
+ */
+ public int getMaxIndexPrimaryShards() {
+ return this.maxIndexPrimaryShards;
+ }
+
+ /**
+ * the minimum number of primary shards an index has
+ */
+ public int getMinIndexPrimaryShards() {
+ return this.minIndexPrimaryShards;
+ }
+
+ /**
+ * the average number primary shards across the indices
+ */
+ public double getAvgIndexPrimaryShards() {
+ if (this.indices == 0) {
+ return -1;
+ }
+ return ((double) this.primaries) / this.indices;
+ }
+
+ /**
+ * minimum replication factor across the indices. See {@link #getReplication}
+ */
+ public double getMinIndexReplication() {
+ return this.minIndexReplication;
+ }
+
+ /**
+ * average replication factor across the indices. See {@link #getReplication}
+ */
+ public double getAvgIndexReplication() {
+ if (indices == 0) {
+ return -1;
+ }
+ return this.totalIndexReplication / this.indices;
+ }
+
+ /**
+ * maximum replication factor across the indices. See {@link #getReplication
+ */
+ public double getMaxIndexReplication() {
+ return this.maxIndexReplication;
+ }
+
+ public void addIndexShardCount(ShardStats indexShardCount) {
+ this.indices++;
+ this.primaries += indexShardCount.primaries;
+ this.total += indexShardCount.total;
+ this.totalIndexReplication += indexShardCount.getReplication();
+ if (this.indices == 1) {
+ // first index, uninitialized.
+ minIndexPrimaryShards = indexShardCount.primaries;
+ maxIndexPrimaryShards = indexShardCount.primaries;
+ minIndexShards = indexShardCount.total;
+ maxIndexShards = indexShardCount.total;
+ minIndexReplication = indexShardCount.getReplication();
+ maxIndexReplication = minIndexReplication;
+ } else {
+ minIndexShards = Math.min(minIndexShards, indexShardCount.total);
+ minIndexPrimaryShards = Math.min(minIndexPrimaryShards, indexShardCount.primaries);
+ minIndexReplication = Math.min(minIndexReplication, indexShardCount.getReplication());
+
+ maxIndexShards = Math.max(maxIndexShards, indexShardCount.total);
+ maxIndexPrimaryShards = Math.max(maxIndexPrimaryShards, indexShardCount.primaries);
+ maxIndexReplication = Math.max(maxIndexReplication, indexShardCount.getReplication());
+ }
+ }
+
+ public static ShardStats readShardStats(StreamInput in) throws IOException {
+ ShardStats c = new ShardStats();
+ c.readFrom(in);
+ return c;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ indices = in.readVInt();
+ total = in.readVInt();
+ primaries = in.readVInt();
+ minIndexShards = in.readVInt();
+ maxIndexShards = in.readVInt();
+ minIndexPrimaryShards = in.readVInt();
+ maxIndexPrimaryShards = in.readVInt();
+ minIndexReplication = in.readDouble();
+ totalIndexReplication = in.readDouble();
+ maxIndexReplication = in.readDouble();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(indices);
+ out.writeVInt(total);
+ out.writeVInt(primaries);
+ out.writeVInt(minIndexShards);
+ out.writeVInt(maxIndexShards);
+ out.writeVInt(minIndexPrimaryShards);
+ out.writeVInt(maxIndexPrimaryShards);
+ out.writeDouble(minIndexReplication);
+ out.writeDouble(totalIndexReplication);
+ out.writeDouble(maxIndexReplication);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString PRIMARIES = new XContentBuilderString("primaries");
+ static final XContentBuilderString REPLICATION = new XContentBuilderString("replication");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ static final XContentBuilderString AVG = new XContentBuilderString("avg");
+ static final XContentBuilderString INDEX = new XContentBuilderString("index");
+ }
+
+ private void addIntMinMax(XContentBuilderString field, int min, int max, double avg, XContentBuilder builder) throws IOException {
+ builder.startObject(field);
+ builder.field(Fields.MIN, min);
+ builder.field(Fields.MAX, max);
+ builder.field(Fields.AVG, avg);
+ builder.endObject();
+ }
+
+ private void addDoubleMinMax(XContentBuilderString field, double min, double max, double avg, XContentBuilder builder) throws IOException {
+ builder.startObject(field);
+ builder.field(Fields.MIN, min);
+ builder.field(Fields.MAX, max);
+ builder.field(Fields.AVG, avg);
+ builder.endObject();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.SHARDS);
+ if (indices > 0) {
+
+ builder.field(Fields.TOTAL, total);
+ builder.field(Fields.PRIMARIES, primaries);
+ builder.field(Fields.REPLICATION, getReplication());
+
+ builder.startObject(Fields.INDEX);
+ addIntMinMax(Fields.SHARDS, minIndexShards, maxIndexShards, getAvgIndexShards(), builder);
+ addIntMinMax(Fields.PRIMARIES, minIndexPrimaryShards, maxIndexPrimaryShards, getAvgIndexPrimaryShards(), builder);
+ addDoubleMinMax(Fields.REPLICATION, minIndexReplication, maxIndexReplication, getAvgIndexReplication(), builder);
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public String toString() {
+ return "total [" + total + "] primaries [" + primaries + "]";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java
new file mode 100644
index 0000000..24222da
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.action.support.nodes.NodeOperationResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+public class ClusterStatsNodeResponse extends NodeOperationResponse {
+
+ private NodeInfo nodeInfo;
+ private NodeStats nodeStats;
+ private ShardStats[] shardsStats;
+ private ClusterHealthStatus clusterStatus;
+
+ ClusterStatsNodeResponse() {
+ }
+
+ public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats) {
+ super(node);
+ this.nodeInfo = nodeInfo;
+ this.nodeStats = nodeStats;
+ this.shardsStats = shardsStats;
+ this.clusterStatus = clusterStatus;
+ }
+
+ public NodeInfo nodeInfo() {
+ return this.nodeInfo;
+ }
+
+ public NodeStats nodeStats() {
+ return this.nodeStats;
+ }
+
+ /**
+ * Cluster Health Status, only populated on master nodes.
+ */
+ @Nullable
+ public ClusterHealthStatus clusterStatus() {
+ return clusterStatus;
+ }
+
+ public ShardStats[] shardsStats() {
+ return this.shardsStats;
+ }
+
+ public static ClusterStatsNodeResponse readNodeResponse(StreamInput in) throws IOException {
+ ClusterStatsNodeResponse nodeResponse = new ClusterStatsNodeResponse();
+ nodeResponse.readFrom(in);
+ return nodeResponse;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ clusterStatus = null;
+ if (in.readBoolean()) {
+ clusterStatus = ClusterHealthStatus.fromValue(in.readByte());
+ }
+ this.nodeInfo = NodeInfo.readNodeInfo(in);
+ this.nodeStats = NodeStats.readNodeStats(in);
+ int size = in.readVInt();
+ shardsStats = new ShardStats[size];
+ for (size--; size >= 0; size--) {
+ shardsStats[size] = ShardStats.readShardStats(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (clusterStatus == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeByte(clusterStatus.value());
+ }
+ nodeInfo.writeTo(out);
+ nodeStats.writeTo(out);
+ out.writeVInt(shardsStats.length);
+ for (ShardStats ss : shardsStats) {
+ ss.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java
new file mode 100644
index 0000000..6a79d3b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java
@@ -0,0 +1,694 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.cursors.ObjectIntCursor;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.monitor.fs.FsStats;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.monitor.os.OsInfo;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.util.HashSet;
+import java.util.Set;
+
+public class ClusterStatsNodes implements ToXContent, Streamable {
+
+ private Counts counts;
+ private Set<Version> versions;
+ private OsStats os;
+ private ProcessStats process;
+ private JvmStats jvm;
+ private FsStats.Info fs;
+ private Set<PluginInfo> plugins;
+
+ private ClusterStatsNodes() {
+ }
+
+ public ClusterStatsNodes(ClusterStatsNodeResponse[] nodeResponses) {
+ this.counts = new Counts();
+ this.versions = new HashSet<Version>();
+ this.os = new OsStats();
+ this.jvm = new JvmStats();
+ this.fs = new FsStats.Info();
+ this.plugins = new HashSet<PluginInfo>();
+ this.process = new ProcessStats();
+
+ Set<InetAddress> seenAddresses = new HashSet<InetAddress>(nodeResponses.length);
+
+ for (ClusterStatsNodeResponse nodeResponse : nodeResponses) {
+
+ counts.addNodeInfo(nodeResponse.nodeInfo());
+ versions.add(nodeResponse.nodeInfo().getVersion());
+ process.addNodeStats(nodeResponse.nodeStats());
+ jvm.addNodeInfoStats(nodeResponse.nodeInfo(), nodeResponse.nodeStats());
+ plugins.addAll(nodeResponse.nodeInfo().getPlugins().getInfos());
+
+ // now do the stats that should be deduped by hardware (implemented by ip deduping)
+ TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress();
+ InetAddress inetAddress = null;
+ if (publishAddress.uniqueAddressTypeId() == 1) {
+ inetAddress = ((InetSocketTransportAddress) publishAddress).address().getAddress();
+ }
+
+ if (!seenAddresses.add(inetAddress)) {
+ continue;
+ }
+
+ os.addNodeInfo(nodeResponse.nodeInfo());
+ if (nodeResponse.nodeStats().getFs() != null) {
+ fs.add(nodeResponse.nodeStats().getFs().total());
+ }
+ }
+ }
+
+
+ public Counts getCounts() {
+ return this.counts;
+ }
+
+ public Set<Version> getVersions() {
+ return versions;
+ }
+
+ public OsStats getOs() {
+ return os;
+ }
+
+ public ProcessStats getProcess() {
+ return process;
+ }
+
+ public JvmStats getJvm() {
+ return jvm;
+ }
+
+ public FsStats.Info getFs() {
+ return fs;
+ }
+
+ public Set<PluginInfo> getPlugins() {
+ return plugins;
+ }
+
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ counts = Counts.readCounts(in);
+
+ int size = in.readVInt();
+ versions = new HashSet<Version>(size);
+ for (; size > 0; size--) {
+ versions.add(Version.readVersion(in));
+ }
+
+ os = OsStats.readOsStats(in);
+ process = ProcessStats.readStats(in);
+ jvm = JvmStats.readJvmStats(in);
+ fs = FsStats.Info.readInfoFrom(in);
+
+ size = in.readVInt();
+ plugins = new HashSet<PluginInfo>(size);
+ for (; size > 0; size--) {
+ plugins.add(PluginInfo.readPluginInfo(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ counts.writeTo(out);
+ out.writeVInt(versions.size());
+ for (Version v : versions) Version.writeVersion(v, out);
+ os.writeTo(out);
+ process.writeTo(out);
+ jvm.writeTo(out);
+ fs.writeTo(out);
+ out.writeVInt(plugins.size());
+ for (PluginInfo p : plugins) {
+ p.writeTo(out);
+ }
+ }
+
+ public static ClusterStatsNodes readNodeStats(StreamInput in) throws IOException {
+ ClusterStatsNodes nodeStats = new ClusterStatsNodes();
+ nodeStats.readFrom(in);
+ return nodeStats;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString VERSIONS = new XContentBuilderString("versions");
+ static final XContentBuilderString OS = new XContentBuilderString("os");
+ static final XContentBuilderString PROCESS = new XContentBuilderString("process");
+ static final XContentBuilderString JVM = new XContentBuilderString("jvm");
+ static final XContentBuilderString FS = new XContentBuilderString("fs");
+ static final XContentBuilderString PLUGINS = new XContentBuilderString("plugins");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.COUNT);
+ counts.toXContent(builder, params);
+ builder.endObject();
+
+ builder.startArray(Fields.VERSIONS);
+ for (Version v : versions) {
+ builder.value(v.toString());
+ }
+ builder.endArray();
+
+ builder.startObject(Fields.OS);
+ os.toXContent(builder, params);
+ builder.endObject();
+
+ builder.startObject(Fields.PROCESS);
+ process.toXContent(builder, params);
+ builder.endObject();
+
+ builder.startObject(Fields.JVM);
+ jvm.toXContent(builder, params);
+ builder.endObject();
+
+ builder.field(Fields.FS);
+ fs.toXContent(builder, params);
+
+ builder.startArray(Fields.PLUGINS);
+ for (PluginInfo pluginInfo : plugins) {
+ pluginInfo.toXContent(builder, params);
+ }
+ builder.endArray();
+ return builder;
+ }
+
+ public static class Counts implements Streamable, ToXContent {
+ int total;
+ int masterOnly;
+ int dataOnly;
+ int masterData;
+ int client;
+
+ public void addNodeInfo(NodeInfo nodeInfo) {
+ total++;
+ DiscoveryNode node = nodeInfo.getNode();
+ if (node.masterNode()) {
+ if (node.dataNode()) {
+ masterData++;
+ } else {
+ masterOnly++;
+ }
+ } else if (node.dataNode()) {
+ dataOnly++;
+ } else if (node.clientNode()) {
+ client++;
+ }
+ }
+
+ public int getTotal() {
+ return total;
+ }
+
+ public int getMasterOnly() {
+ return masterOnly;
+ }
+
+ public int getDataOnly() {
+ return dataOnly;
+ }
+
+ public int getMasterData() {
+ return masterData;
+ }
+
+ public int getClient() {
+ return client;
+ }
+
+ public static Counts readCounts(StreamInput in) throws IOException {
+ Counts c = new Counts();
+ c.readFrom(in);
+ return c;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ total = in.readVInt();
+ masterOnly = in.readVInt();
+ dataOnly = in.readVInt();
+ masterData = in.readVInt();
+ client = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(total);
+ out.writeVInt(masterOnly);
+ out.writeVInt(dataOnly);
+ out.writeVInt(masterData);
+ out.writeVInt(client);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString MASTER_ONLY = new XContentBuilderString("master_only");
+ static final XContentBuilderString DATA_ONLY = new XContentBuilderString("data_only");
+ static final XContentBuilderString MASTER_DATA = new XContentBuilderString("master_data");
+ static final XContentBuilderString CLIENT = new XContentBuilderString("client");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.TOTAL, total);
+ builder.field(Fields.MASTER_ONLY, masterOnly);
+ builder.field(Fields.DATA_ONLY, dataOnly);
+ builder.field(Fields.MASTER_DATA, masterData);
+ builder.field(Fields.CLIENT, client);
+ return builder;
+ }
+ }
+
+ public static class OsStats implements ToXContent, Streamable {
+
+ int availableProcessors;
+ long availableMemory;
+ ObjectIntOpenHashMap<OsInfo.Cpu> cpus;
+
+ public OsStats() {
+ cpus = new ObjectIntOpenHashMap<org.elasticsearch.monitor.os.OsInfo.Cpu>();
+ }
+
+ public void addNodeInfo(NodeInfo nodeInfo) {
+ availableProcessors += nodeInfo.getOs().availableProcessors();
+ if (nodeInfo.getOs() == null) {
+ return;
+ }
+ if (nodeInfo.getOs().cpu() != null) {
+ cpus.addTo(nodeInfo.getOs().cpu(), 1);
+ }
+ if (nodeInfo.getOs().getMem() != null && nodeInfo.getOs().getMem().getTotal().bytes() != -1) {
+ availableMemory += nodeInfo.getOs().getMem().getTotal().bytes();
+ }
+ }
+
+ public int getAvailableProcessors() {
+ return availableProcessors;
+ }
+
+ public ByteSizeValue getAvailableMemory() {
+ return new ByteSizeValue(availableMemory);
+ }
+
+ public ObjectIntOpenHashMap<OsInfo.Cpu> getCpus() {
+ return cpus;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ availableProcessors = in.readVInt();
+ availableMemory = in.readLong();
+ int size = in.readVInt();
+ cpus = new ObjectIntOpenHashMap<OsInfo.Cpu>(size);
+ for (; size > 0; size--) {
+ cpus.addTo(OsInfo.Cpu.readCpu(in), in.readVInt());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(availableProcessors);
+ out.writeLong(availableMemory);
+ out.writeVInt(cpus.size());
+ for (ObjectIntCursor<OsInfo.Cpu> c : cpus) {
+ c.key.writeTo(out);
+ out.writeVInt(c.value);
+ }
+
+ }
+
+ public static OsStats readOsStats(StreamInput in) throws IOException {
+ OsStats os = new OsStats();
+ os.readFrom(in);
+ return os;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString AVAILABLE_PROCESSORS = new XContentBuilderString("available_processors");
+ static final XContentBuilderString MEM = new XContentBuilderString("mem");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_IN_BYTES = new XContentBuilderString("total_in_bytes");
+ static final XContentBuilderString CPU = new XContentBuilderString("cpu");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
+ builder.startObject(Fields.MEM);
+ builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, availableMemory);
+ builder.endObject();
+
+ builder.startArray(Fields.CPU);
+ for (ObjectIntCursor<OsInfo.Cpu> cpu : cpus) {
+ builder.startObject();
+ cpu.key.toXContent(builder, params);
+ builder.field(Fields.COUNT, cpu.value);
+ builder.endObject();
+ }
+ builder.endArray();
+
+ return builder;
+ }
+ }
+
+ public static class ProcessStats implements ToXContent, Streamable {
+
+ int count;
+ int cpuPercent;
+ long totalOpenFileDescriptors;
+ long minOpenFileDescriptors = Long.MAX_VALUE;
+ long maxOpenFileDescriptors = Long.MIN_VALUE;
+
+ public void addNodeStats(NodeStats nodeStats) {
+ if (nodeStats.getProcess() == null) {
+ return;
+ }
+ count++;
+ if (nodeStats.getProcess().cpu() != null) {
+ // with no sigar, this may not be available
+ cpuPercent += nodeStats.getProcess().cpu().getPercent();
+ }
+ long fd = nodeStats.getProcess().openFileDescriptors();
+ if (fd > 0) {
+ // fd can be -1 if not supported on platform
+ totalOpenFileDescriptors += fd;
+ }
+ // we still do min max calc on -1, so we'll have an indication of it not being supported on one of the nodes.
+ minOpenFileDescriptors = Math.min(minOpenFileDescriptors, fd);
+ maxOpenFileDescriptors = Math.max(maxOpenFileDescriptors, fd);
+ }
+
+ /**
+ * Cpu usage in percentages - 100 is 1 core.
+ */
+ public int getCpuPercent() {
+ return cpuPercent;
+ }
+
+ public long getAvgOpenFileDescriptors() {
+ if (count == 0) {
+ return -1;
+ }
+ return totalOpenFileDescriptors / count;
+ }
+
+ public long getMaxOpenFileDescriptors() {
+ if (count == 0) {
+ return -1;
+ }
+ return maxOpenFileDescriptors;
+ }
+
+ public long getMinOpenFileDescriptors() {
+ if (count == 0) {
+ return -1;
+ }
+ return minOpenFileDescriptors;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ count = in.readVInt();
+ cpuPercent = in.readVInt();
+ totalOpenFileDescriptors = in.readVLong();
+ minOpenFileDescriptors = in.readLong();
+ maxOpenFileDescriptors = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(count);
+ out.writeVInt(cpuPercent);
+ out.writeVLong(totalOpenFileDescriptors);
+ out.writeLong(minOpenFileDescriptors);
+ out.writeLong(maxOpenFileDescriptors);
+ }
+
+ public static ProcessStats readStats(StreamInput in) throws IOException {
+ ProcessStats cpu = new ProcessStats();
+ cpu.readFrom(in);
+ return cpu;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString CPU = new XContentBuilderString("cpu");
+ static final XContentBuilderString PERCENT = new XContentBuilderString("percent");
+ static final XContentBuilderString OPEN_FILE_DESCRIPTORS = new XContentBuilderString("open_file_descriptors");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ static final XContentBuilderString AVG = new XContentBuilderString("avg");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.CPU).field(Fields.PERCENT, cpuPercent).endObject();
+ if (count > 0) {
+ builder.startObject(Fields.OPEN_FILE_DESCRIPTORS);
+ builder.field(Fields.MIN, getMinOpenFileDescriptors());
+ builder.field(Fields.MAX, getMaxOpenFileDescriptors());
+ builder.field(Fields.AVG, getAvgOpenFileDescriptors());
+ builder.endObject();
+ }
+ return builder;
+ }
+ }
+
+ public static class JvmStats implements Streamable, ToXContent {
+
+ ObjectIntOpenHashMap<JvmVersion> versions;
+ long threads;
+ long maxUptime;
+ long heapUsed;
+ long heapMax;
+
+ JvmStats() {
+ versions = new ObjectIntOpenHashMap<JvmVersion>();
+ threads = 0;
+ maxUptime = 0;
+ heapMax = 0;
+ heapUsed = 0;
+ }
+
+ public ObjectIntOpenHashMap<JvmVersion> getVersions() {
+ return versions;
+ }
+
+ /**
+ * The total number of threads in the cluster
+ */
+ public long getThreads() {
+ return threads;
+ }
+
+ /**
+ * The maximum uptime of a node in the cluster
+ */
+ public TimeValue getMaxUpTime() {
+ return new TimeValue(maxUptime);
+ }
+
+ /**
+ * Total heap used in the cluster
+ */
+ public ByteSizeValue getHeapUsed() {
+ return new ByteSizeValue(heapUsed);
+ }
+
+ /**
+ * Maximum total heap available to the cluster
+ */
+ public ByteSizeValue getHeapMax() {
+ return new ByteSizeValue(heapMax);
+ }
+
+ public void addNodeInfoStats(NodeInfo nodeInfo, NodeStats nodeStats) {
+ versions.addTo(new JvmVersion(nodeInfo.getJvm()), 1);
+ org.elasticsearch.monitor.jvm.JvmStats js = nodeStats.getJvm();
+ if (js == null) {
+ return;
+ }
+ if (js.threads() != null) {
+ threads += js.threads().count();
+ }
+ maxUptime = Math.max(maxUptime, js.uptime().millis());
+ if (js.mem() != null) {
+ heapUsed += js.mem().getHeapUsed().bytes();
+ heapMax += js.mem().getHeapMax().bytes();
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int size = in.readVInt();
+ versions = new ObjectIntOpenHashMap<JvmVersion>(size);
+ for (; size > 0; size--) {
+ versions.addTo(JvmVersion.readJvmVersion(in), in.readVInt());
+ }
+ threads = in.readVLong();
+ maxUptime = in.readVLong();
+ heapUsed = in.readVLong();
+ heapMax = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(versions.size());
+ for (ObjectIntCursor<JvmVersion> v : versions) {
+ v.key.writeTo(out);
+ out.writeVInt(v.value);
+ }
+
+ out.writeVLong(threads);
+ out.writeVLong(maxUptime);
+ out.writeVLong(heapUsed);
+ out.writeVLong(heapMax);
+ }
+
+ public static JvmStats readJvmStats(StreamInput in) throws IOException {
+ JvmStats jvmStats = new JvmStats();
+ jvmStats.readFrom(in);
+ return jvmStats;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString VERSIONS = new XContentBuilderString("versions");
+ static final XContentBuilderString VERSION = new XContentBuilderString("version");
+ static final XContentBuilderString VM_NAME = new XContentBuilderString("vm_name");
+ static final XContentBuilderString VM_VERSION = new XContentBuilderString("vm_version");
+ static final XContentBuilderString VM_VENDOR = new XContentBuilderString("vm_vendor");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString THREADS = new XContentBuilderString("threads");
+ static final XContentBuilderString MAX_UPTIME = new XContentBuilderString("max_uptime");
+ static final XContentBuilderString MAX_UPTIME_IN_MILLIS = new XContentBuilderString("max_uptime_in_millis");
+ static final XContentBuilderString MEM = new XContentBuilderString("mem");
+ static final XContentBuilderString HEAP_USED = new XContentBuilderString("heap_used");
+ static final XContentBuilderString HEAP_USED_IN_BYTES = new XContentBuilderString("heap_used_in_bytes");
+ static final XContentBuilderString HEAP_MAX = new XContentBuilderString("heap_max");
+ static final XContentBuilderString HEAP_MAX_IN_BYTES = new XContentBuilderString("heap_max_in_bytes");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.timeValueField(Fields.MAX_UPTIME_IN_MILLIS, Fields.MAX_UPTIME, maxUptime);
+ builder.startArray(Fields.VERSIONS);
+ for (ObjectIntCursor<JvmVersion> v : versions) {
+ builder.startObject();
+ builder.field(Fields.VERSION, v.key.version);
+ builder.field(Fields.VM_NAME, v.key.vmName);
+ builder.field(Fields.VM_VERSION, v.key.vmVersion);
+ builder.field(Fields.VM_VENDOR, v.key.vmVendor);
+ builder.field(Fields.COUNT, v.value);
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.startObject(Fields.MEM);
+ builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, heapUsed);
+ builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, heapMax);
+ builder.endObject();
+
+ builder.field(Fields.THREADS, threads);
+ return builder;
+ }
+ }
+
+ public static class JvmVersion implements Streamable {
+ String version;
+ String vmName;
+ String vmVersion;
+ String vmVendor;
+
+ JvmVersion(JvmInfo jvmInfo) {
+ version = jvmInfo.version();
+ vmName = jvmInfo.vmName();
+ vmVersion = jvmInfo.vmVersion();
+ vmVendor = jvmInfo.vmVendor();
+ }
+
+ JvmVersion() {
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ JvmVersion jvm = (JvmVersion) o;
+
+ return vmVersion.equals(jvm.vmVersion) && vmVendor.equals(jvm.vmVendor);
+ }
+
+ @Override
+ public int hashCode() {
+ return vmVersion.hashCode();
+ }
+
+ public static JvmVersion readJvmVersion(StreamInput in) throws IOException {
+ JvmVersion jvm = new JvmVersion();
+ jvm.readFrom(in);
+ return jvm;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ version = in.readString();
+ vmName = in.readString();
+ vmVersion = in.readString();
+ vmVendor = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(version);
+ out.writeString(vmName);
+ out.writeString(vmVersion);
+ out.writeString(vmVendor);
+ }
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java
new file mode 100644
index 0000000..287ca72
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import org.elasticsearch.action.support.nodes.NodesOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A request to get cluster level stats.
+ */
+public class ClusterStatsRequest extends NodesOperationRequest<ClusterStatsRequest> {
+
+ /**
+ * Get stats from nodes based on the nodes ids specified. If none are passed, stats
+ * based on all nodes will be returned.
+ */
+ public ClusterStatsRequest(String... nodesIds) {
+ super(nodesIds);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java
new file mode 100644
index 0000000..b664ca9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ *
+ */
+public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder<ClusterStatsRequest, ClusterStatsResponse, ClusterStatsRequestBuilder> {
+
+ public ClusterStatsRequestBuilder(ClusterAdminClient clusterClient) {
+ super((InternalClusterAdminClient) clusterClient, new ClusterStatsRequest());
+ }
+
+ @Override
+ protected void doExecute(ActionListener<ClusterStatsResponse> listener) {
+ ((ClusterAdminClient) client).clusterStats(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java
new file mode 100644
index 0000000..3b84e86
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.support.nodes.NodesOperationResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentFactory;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ *
+ */
+public class ClusterStatsResponse extends NodesOperationResponse<ClusterStatsNodeResponse> implements ToXContent {
+
+ ClusterStatsNodes nodesStats;
+ ClusterStatsIndices indicesStats;
+ String clusterUUID;
+ ClusterHealthStatus status;
+ long timestamp;
+
+
+ ClusterStatsResponse() {
+ }
+
+ public ClusterStatsResponse(long timestamp, ClusterName clusterName, String clusterUUID, ClusterStatsNodeResponse[] nodes) {
+ super(clusterName, null);
+ this.timestamp = timestamp;
+ this.clusterUUID = clusterUUID;
+ nodesStats = new ClusterStatsNodes(nodes);
+ indicesStats = new ClusterStatsIndices(nodes);
+ for (ClusterStatsNodeResponse response : nodes) {
+ // only the master node populates the status
+ if (response.clusterStatus() != null) {
+ status = response.clusterStatus();
+ break;
+ }
+ }
+ }
+
+ public long getTimestamp() {
+ return this.timestamp;
+ }
+
+ public ClusterHealthStatus getStatus() {
+ return this.status;
+ }
+
+ public ClusterStatsNodes getNodesStats() {
+ return nodesStats;
+ }
+
+ public ClusterStatsIndices getIndicesStats() {
+ return indicesStats;
+ }
+
+ @Override
+ public ClusterStatsNodeResponse[] getNodes() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Map<String, ClusterStatsNodeResponse> getNodesMap() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterStatsNodeResponse getAt(int position) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Iterator<ClusterStatsNodeResponse> iterator() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ timestamp = in.readVLong();
+ status = null;
+ if (in.readBoolean()) {
+ // it may be that the master switched on us while doing the operation. In this case the status may be null.
+ status = ClusterHealthStatus.fromValue(in.readByte());
+ }
+ clusterUUID = in.readString();
+ nodesStats = ClusterStatsNodes.readNodeStats(in);
+ indicesStats = ClusterStatsIndices.readIndicesStats(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVLong(timestamp);
+ if (status == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeByte(status.value());
+ }
+ out.writeString(clusterUUID);
+ nodesStats.writeTo(out);
+ indicesStats.writeTo(out);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString NODES = new XContentBuilderString("nodes");
+ static final XContentBuilderString INDICES = new XContentBuilderString("indices");
+ static final XContentBuilderString UUID = new XContentBuilderString("uuid");
+ static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
+ static final XContentBuilderString STATUS = new XContentBuilderString("status");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("timestamp", getTimestamp());
+ builder.field(Fields.CLUSTER_NAME, getClusterName().value());
+ if (params.paramAsBoolean("output_uuid", false)) {
+ builder.field(Fields.UUID, clusterUUID);
+ }
+ if (status != null) {
+ builder.field(Fields.STATUS, status.name().toLowerCase(Locale.ROOT));
+ }
+ builder.startObject(Fields.INDICES);
+ indicesStats.toXContent(builder, params);
+ builder.endObject();
+ builder.startObject(Fields.NODES);
+ nodesStats.toXContent(builder, params);
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+ builder.startObject();
+ toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ } catch (IOException e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
new file mode 100644
index 0000000..8870f9e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.action.support.nodes.NodeOperationRequest;
+import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public class TransportClusterStatsAction extends TransportNodesOperationAction<ClusterStatsRequest, ClusterStatsResponse,
+ TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> {
+
+ private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
+ CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.FilterCache, CommonStatsFlags.Flag.IdCache,
+ CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments, CommonStatsFlags.Flag.Percolate);
+
+ private final NodeService nodeService;
+ private final IndicesService indicesService;
+
+
+ @Inject
+ public TransportClusterStatsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ ClusterService clusterService, TransportService transportService,
+ NodeService nodeService, IndicesService indicesService) {
+ super(settings, clusterName, threadPool, clusterService, transportService);
+ this.nodeService = nodeService;
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return ClusterStatsAction.NAME;
+ }
+
+ @Override
+ protected ClusterStatsResponse newResponse(ClusterStatsRequest clusterStatsRequest, AtomicReferenceArray responses) {
+ final List<ClusterStatsNodeResponse> nodeStats = new ArrayList<ClusterStatsNodeResponse>(responses.length());
+ for (int i = 0; i < responses.length(); i++) {
+ Object resp = responses.get(i);
+ if (resp instanceof ClusterStatsNodeResponse) {
+ nodeStats.add((ClusterStatsNodeResponse) resp);
+ }
+ }
+ return new ClusterStatsResponse(System.currentTimeMillis(), clusterName,
+ clusterService.state().metaData().uuid(), nodeStats.toArray(new ClusterStatsNodeResponse[nodeStats.size()]));
+ }
+
+ @Override
+ protected ClusterStatsRequest newRequest() {
+ return new ClusterStatsRequest();
+ }
+
+ @Override
+ protected ClusterStatsNodeRequest newNodeRequest() {
+ return new ClusterStatsNodeRequest();
+ }
+
+ @Override
+ protected ClusterStatsNodeRequest newNodeRequest(String nodeId, ClusterStatsRequest request) {
+ return new ClusterStatsNodeRequest(nodeId, request);
+ }
+
+ @Override
+ protected ClusterStatsNodeResponse newNodeResponse() {
+ return new ClusterStatsNodeResponse();
+ }
+
+ @Override
+ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) throws ElasticsearchException {
+ NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, false, true, false, true);
+ NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, false, true, false, false, false);
+ List<ShardStats> shardsStats = new ArrayList<ShardStats>();
+ for (String index : indicesService.indices()) {
+ IndexService indexService = indicesService.indexService(index);
+ if (indexService == null) {
+ continue;
+ }
+ for (IndexShard indexShard : indexService) {
+ if (indexShard.routingEntry().active()) {
+ // only report on fully started shards
+ shardsStats.add(new ShardStats(indexShard, SHARD_STATS_FLAGS));
+ }
+ }
+
+ }
+
+ ClusterHealthStatus clusterStatus = null;
+ if (clusterService.state().nodes().localNodeMaster()) {
+ // populate cluster status
+ clusterStatus = ClusterHealthStatus.GREEN;
+ for (IndexRoutingTable indexRoutingTable : clusterService.state().routingTable()) {
+ IndexMetaData indexMetaData = clusterService.state().metaData().index(indexRoutingTable.index());
+ if (indexRoutingTable == null) {
+ continue;
+ }
+
+ ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
+ switch (indexHealth.getStatus()) {
+ case RED:
+ clusterStatus = ClusterHealthStatus.RED;
+ break;
+ case YELLOW:
+ if (clusterStatus != ClusterHealthStatus.RED) {
+ clusterStatus = ClusterHealthStatus.YELLOW;
+ }
+ break;
+ }
+ }
+ }
+
+ return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]));
+
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return false;
+ }
+
+ static class ClusterStatsNodeRequest extends NodeOperationRequest {
+
+ ClusterStatsRequest request;
+
+ ClusterStatsNodeRequest() {
+ }
+
+ ClusterStatsNodeRequest(String nodeId, ClusterStatsRequest request) {
+ super(request, nodeId);
+ this.request = request;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ request = new ClusterStatsRequest();
+ request.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ request.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java
new file mode 100644
index 0000000..e96752c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.tasks;
+
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ */
+public class PendingClusterTasksAction extends ClusterAction<PendingClusterTasksRequest, PendingClusterTasksResponse, PendingClusterTasksRequestBuilder> {
+
+ public static final PendingClusterTasksAction INSTANCE = new PendingClusterTasksAction();
+ public static final String NAME = "cluster/task";
+
+ private PendingClusterTasksAction() {
+ super(NAME);
+ }
+
+ @Override
+ public PendingClusterTasksResponse newResponse() {
+ return new PendingClusterTasksResponse();
+ }
+
+ @Override
+ public PendingClusterTasksRequestBuilder newRequestBuilder(ClusterAdminClient client) {
+ return new PendingClusterTasksRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java
new file mode 100644
index 0000000..f3704bb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.tasks;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class PendingClusterTasksRequest extends MasterNodeReadOperationRequest<PendingClusterTasksRequest> {
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readLocal(in, Version.V_1_0_0_RC2);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeLocal(out, Version.V_1_0_0_RC2);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java
new file mode 100644
index 0000000..197b8f4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.tasks;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ */
+public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder<PendingClusterTasksRequest, PendingClusterTasksResponse, PendingClusterTasksRequestBuilder> {
+
+ public PendingClusterTasksRequestBuilder(ClusterAdminClient client) {
+ super((InternalClusterAdminClient) client, new PendingClusterTasksRequest());
+ }
+
+ @Override
+ protected void doExecute(ActionListener<PendingClusterTasksResponse> listener) {
+ ((InternalClusterAdminClient) client).pendingClusterTasks(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java
new file mode 100644
index 0000000..92d134a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.tasks;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ */
+public class PendingClusterTasksResponse extends ActionResponse implements Iterable<PendingClusterTask>, ToXContent {
+
+ private List<PendingClusterTask> pendingTasks;
+
+ PendingClusterTasksResponse() {
+ }
+
+ PendingClusterTasksResponse(List<PendingClusterTask> pendingTasks) {
+ this.pendingTasks = pendingTasks;
+ }
+
+ public List<PendingClusterTask> pendingTasks() {
+ return pendingTasks;
+ }
+
+ /**
+ * The pending cluster tasks
+ */
+ public List<PendingClusterTask> getPendingTasks() {
+ return pendingTasks();
+ }
+
+ @Override
+ public Iterator<PendingClusterTask> iterator() {
+ return pendingTasks.iterator();
+ }
+
+ public String prettyPrint() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("tasks: (").append(pendingTasks.size()).append("):\n");
+ for (PendingClusterTask pendingClusterTask : this) {
+ sb.append(pendingClusterTask.getInsertOrder()).append("/").append(pendingClusterTask.getPriority()).append("/").append(pendingClusterTask.getSource()).append("/").append(pendingClusterTask.getTimeInQueue()).append("\n");
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+ builder.startObject();
+ toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ } catch (IOException e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray(Fields.TASKS);
+ for (PendingClusterTask pendingClusterTask : this) {
+ builder.startObject();
+ builder.field(Fields.INSERT_ORDER, pendingClusterTask.insertOrder());
+ builder.field(Fields.PRIORITY, pendingClusterTask.priority());
+ builder.field(Fields.SOURCE, pendingClusterTask.source());
+ builder.field(Fields.TIME_IN_QUEUE_MILLIS, pendingClusterTask.timeInQueueInMillis());
+ builder.field(Fields.TIME_IN_QUEUE, pendingClusterTask.getTimeInQueue());
+ builder.endObject();
+ }
+ builder.endArray();
+ return builder;
+ }
+
+ static final class Fields {
+
+ static final XContentBuilderString TASKS = new XContentBuilderString("tasks");
+ static final XContentBuilderString INSERT_ORDER = new XContentBuilderString("insert_order");
+ static final XContentBuilderString PRIORITY = new XContentBuilderString("priority");
+ static final XContentBuilderString SOURCE = new XContentBuilderString("source");
+ static final XContentBuilderString TIME_IN_QUEUE_MILLIS = new XContentBuilderString("time_in_queue_millis");
+ static final XContentBuilderString TIME_IN_QUEUE = new XContentBuilderString("time_in_queue");
+
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ pendingTasks = new ArrayList<PendingClusterTask>(size);
+ for (int i = 0; i < size; i++) {
+ PendingClusterTask task = new PendingClusterTask();
+ task.readFrom(in);
+ pendingTasks.add(task);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(pendingTasks.size());
+ for (PendingClusterTask task : pendingTasks) {
+ task.writeTo(out);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java
new file mode 100644
index 0000000..9a23681
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.tasks;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ */
+public class TransportPendingClusterTasksAction extends TransportMasterNodeReadOperationAction<PendingClusterTasksRequest, PendingClusterTasksResponse> {
+
+ private final ClusterService clusterService;
+
+ @Inject
+ public TransportPendingClusterTasksAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ this.clusterService = clusterService;
+ }
+
+ @Override
+ protected String transportAction() {
+ return PendingClusterTasksAction.NAME;
+ }
+
+ @Override
+ protected String executor() {
+ // very lightweight operation in memory, no need to fork to a thread
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected PendingClusterTasksRequest newRequest() {
+ return new PendingClusterTasksRequest();
+ }
+
+ @Override
+ protected PendingClusterTasksResponse newResponse() {
+ return new PendingClusterTasksResponse();
+ }
+
+ @Override
+ protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener<PendingClusterTasksResponse> listener) throws ElasticsearchException {
+ listener.onResponse(new PendingClusterTasksResponse(clusterService.pendingTasks()));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/IndicesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/IndicesAction.java
new file mode 100644
index 0000000..71429a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/IndicesAction.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ * Indices action (used with {@link IndicesAdminClient} API.
+ */
+public abstract class IndicesAction<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
+ extends GenericAction<Request, Response> {
+
+ protected IndicesAction(String name) {
+ super(name);
+ }
+
+ public abstract RequestBuilder newRequestBuilder(IndicesAdminClient client);
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java
new file mode 100644
index 0000000..d34b259
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class IndicesAliasesAction extends IndicesAction<IndicesAliasesRequest, IndicesAliasesResponse, IndicesAliasesRequestBuilder> {
+
+ public static final IndicesAliasesAction INSTANCE = new IndicesAliasesAction();
+ public static final String NAME = "indices/aliases";
+
+ private IndicesAliasesAction() {
+ super(NAME);
+ }
+
+ @Override
+ public IndicesAliasesResponse newResponse() {
+ return new IndicesAliasesResponse();
+ }
+
+ @Override
+ public IndicesAliasesRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new IndicesAliasesRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java
new file mode 100644
index 0000000..30cbd6d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.alias;
+
+import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
+import org.elasticsearch.cluster.metadata.AliasAction;
+
+/**
+ * Cluster state update request that allows to add or remove aliases
+ */
+public class IndicesAliasesClusterStateUpdateRequest extends ClusterStateUpdateRequest<IndicesAliasesClusterStateUpdateRequest> {
+
+ AliasAction[] actions;
+
+ IndicesAliasesClusterStateUpdateRequest() {
+
+ }
+
+ /**
+ * Returns the alias actions to be performed
+ */
+ public AliasAction[] actions() {
+ return actions;
+ }
+
+ /**
+ * Sets the alias actions to be executed
+ */
+ public IndicesAliasesClusterStateUpdateRequest actions(AliasAction[] actions) {
+ this.actions = actions;
+ return this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java
new file mode 100644
index 0000000..af6d193
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.cluster.metadata.AliasAction.Type;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.index.query.FilterBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.cluster.metadata.AliasAction.readAliasAction;
+
+/**
+ * A request to add/remove aliases for one or more indices.
+ */
+public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesRequest> {
+
+ private List<AliasActions> allAliasActions = Lists.newArrayList();
+
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
+
+ public IndicesAliasesRequest() {
+
+ }
+
+ /*
+ * Aliases can be added by passing multiple indices to the Request and
+ * deleted by passing multiple indices and aliases. They are expanded into
+ * distinct AliasAction instances when the request is processed. This class
+ * holds the AliasAction and in addition the arrays or alias names and
+ * indices that is later used to create the final AliasAction instances.
+ */
+ public static class AliasActions {
+ private String[] indices = Strings.EMPTY_ARRAY;
+ private String[] aliases = Strings.EMPTY_ARRAY;
+ private AliasAction aliasAction;
+
+ public AliasActions(AliasAction.Type type, String[] indices, String[] aliases) {
+ aliasAction = new AliasAction(type);
+ indices(indices);
+ aliases(aliases);
+ }
+
+ public AliasActions(AliasAction.Type type, String index, String alias) {
+ aliasAction = new AliasAction(type);
+ indices(index);
+ aliases(alias);
+ }
+
+ AliasActions(AliasAction.Type type, String[] index, String alias) {
+ aliasAction = new AliasAction(type);
+ indices(index);
+ aliases(alias);
+ }
+
+ public AliasActions(AliasAction action) {
+ this.aliasAction = action;
+ indices(action.index());
+ aliases(action.alias());
+ }
+
+ public AliasActions(Type type, String index, String[] aliases) {
+ aliasAction = new AliasAction(type);
+ indices(index);
+ aliases(aliases);
+ }
+
+ public AliasActions() {
+ }
+
+ public AliasActions filter(Map<String, Object> filter) {
+ aliasAction.filter(filter);
+ return this;
+ }
+
+ public AliasActions filter(FilterBuilder filter) {
+ aliasAction.filter(filter);
+ return this;
+ }
+
+ public Type actionType() {
+ return aliasAction.actionType();
+ }
+
+ public void routing(String routing) {
+ aliasAction.routing(routing);
+ }
+
+ public void searchRouting(String searchRouting) {
+ aliasAction.searchRouting(searchRouting);
+ }
+
+ public void indexRouting(String indexRouting) {
+ aliasAction.indexRouting(indexRouting);
+ }
+
+ public AliasActions filter(String filter) {
+ aliasAction.filter(filter);
+ return this;
+ }
+
+ public void indices(String... indices) {
+ List<String> finalIndices = new ArrayList<String>();
+ for (String index : indices) {
+ if (index != null) {
+ finalIndices.add(index);
+ }
+ }
+ this.indices = finalIndices.toArray(new String[finalIndices.size()]);
+ }
+
+ public void aliases(String... aliases) {
+ this.aliases = aliases;
+ }
+
+ public String[] aliases() {
+ return aliases;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public AliasAction aliasAction() {
+ return aliasAction;
+ }
+
+ public String[] concreteAliases(MetaData metaData, String concreteIndex) {
+ if (aliasAction.actionType() == Type.REMOVE) {
+ //for DELETE we expand the aliases
+ String[] indexAsArray = {concreteIndex};
+ ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliasMetaData = metaData.findAliases(aliases, indexAsArray);
+ List<String> finalAliases = new ArrayList<String> ();
+ for (ObjectCursor<ImmutableList<AliasMetaData>> curAliases : aliasMetaData.values()) {
+ for (AliasMetaData aliasMeta: curAliases.value) {
+ finalAliases.add(aliasMeta.alias());
+ }
+ }
+ return finalAliases.toArray(new String[finalAliases.size()]);
+ } else {
+ //for add we just return the current aliases
+ return aliases;
+ }
+ }
+ public AliasActions readFrom(StreamInput in) throws IOException {
+ indices = in.readStringArray();
+ aliases = in.readStringArray();
+ aliasAction = readAliasAction(in);
+ return this;
+ }
+
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeStringArray(indices);
+ out.writeStringArray(aliases);
+ this.aliasAction.writeTo(out);
+ }
+ }
+
+ /**
+ * Adds an alias to the index.
+ * @param alias The alias
+ * @param indices The indices
+ */
+ public IndicesAliasesRequest addAlias(String alias, String... indices) {
+ addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias));
+ return this;
+ }
+
+
+ public void addAliasAction(AliasActions aliasAction) {
+ allAliasActions.add(aliasAction);
+ }
+
+
+ public IndicesAliasesRequest addAliasAction(AliasAction action) {
+ addAliasAction(new AliasActions(action));
+ return this;
+ }
+
+ /**
+ * Adds an alias to the index.
+ * @param alias The alias
+ * @param filter The filter
+ * @param indices The indices
+ */
+ public IndicesAliasesRequest addAlias(String alias, Map<String, Object> filter, String... indices) {
+ addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter));
+ return this;
+ }
+
+ /**
+ * Adds an alias to the index.
+ * @param alias The alias
+ * @param filterBuilder The filter
+ * @param indices The indices
+ */
+ public IndicesAliasesRequest addAlias(String alias, FilterBuilder filterBuilder, String... indices) {
+ addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filterBuilder));
+ return this;
+ }
+
+
+ /**
+ * Removes an alias to the index.
+ *
+ * @param indices The indices
+ * @param aliases The aliases
+ */
+ public IndicesAliasesRequest removeAlias(String[] indices, String... aliases) {
+ addAliasAction(new AliasActions(AliasAction.Type.REMOVE, indices, aliases));
+ return this;
+ }
+
+ /**
+ * Removes an alias to the index.
+ *
+ * @param index The index
+ * @param aliases The aliases
+ */
+ public IndicesAliasesRequest removeAlias(String index, String... aliases) {
+ addAliasAction(new AliasActions(AliasAction.Type.REMOVE, index, aliases));
+ return this;
+ }
+
+ List<AliasActions> aliasActions() {
+ return this.allAliasActions;
+ }
+
+ public List<AliasActions> getAliasActions() {
+ return aliasActions();
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (allAliasActions.isEmpty()) {
+ return addValidationError("Must specify at least one alias action", validationException);
+ }
+ for (AliasActions aliasAction : allAliasActions) {
+ if (aliasAction.actionType() == AliasAction.Type.ADD) {
+ if (aliasAction.aliases.length != 1) {
+ validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ + "] requires exactly one [alias] to be set", validationException);
+ }
+ if (!Strings.hasText(aliasAction.aliases[0])) {
+ validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ + "] requires an [alias] to be set", validationException);
+ }
+ } else {
+ if (aliasAction.aliases.length == 0) {
+ validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ + "]: aliases may not be empty", validationException);
+ }
+ for (String alias : aliasAction.aliases) {
+ if (!Strings.hasText(alias)) {
+ validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ + "]: [alias] may not be empty string", validationException);
+ }
+ }
+ if (CollectionUtils.isEmpty(aliasAction.indices)) {
+ validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ + "]: indices may not be empty", validationException);
+ }
+ }
+ if (!CollectionUtils.isEmpty(aliasAction.indices)) {
+ for (String index : aliasAction.indices) {
+ if (!Strings.hasText(index)) {
+ validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ + "]: [index] may not be empty string", validationException);
+ }
+ }
+ }
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ allAliasActions.add(readAliasActions(in));
+ }
+ readTimeout(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(allAliasActions.size());
+ for (AliasActions aliasAction : allAliasActions) {
+ aliasAction.writeTo(out);
+ }
+ writeTimeout(out);
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ private AliasActions readAliasActions(StreamInput in) throws IOException {
+ AliasActions actions = new AliasActions();
+ return actions.readFrom(in);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java
new file mode 100644
index 0000000..9041372
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.index.query.FilterBuilder;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<IndicesAliasesRequest, IndicesAliasesResponse, IndicesAliasesRequestBuilder> {
+
+ public IndicesAliasesRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new IndicesAliasesRequest());
+ }
+
+ /**
+ * Adds an alias to the index.
+ *
+ * @param index The index
+ * @param alias The alias
+ */
+ public IndicesAliasesRequestBuilder addAlias(String index, String alias) {
+ request.addAlias(alias, index);
+ return this;
+ }
+
+ /**
+ * Adds an alias to the index.
+ *
+ * @param index The indices
+ * @param alias The alias
+ */
+ public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias) {
+ request.addAlias(alias, indices);
+ return this;
+ }
+
+ /**
+ * Adds an alias to the index.
+ *
+ * @param index The index
+ * @param alias The alias
+ * @param filter The filter
+ */
+ public IndicesAliasesRequestBuilder addAlias(String index, String alias, String filter) {
+ AliasActions action = new AliasActions(AliasAction.Type.ADD, index, alias).filter(filter);
+ request.addAliasAction(action);
+ return this;
+ }
+
+ /**
+ * Adds an alias to the index.
+ *
+ * @param indices The indices
+ * @param alias The alias
+ * @param filter The filter
+ */
+ public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, String filter) {
+ AliasActions action = new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter);
+ request.addAliasAction(action);
+ return this;
+ }
+
+ /**
+ * Adds an alias to the index.
+ *
+ * @param indices The indices
+ * @param alias The alias
+ * @param filter The filter
+ */
+ public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias, Map<String, Object> filter) {
+ request.addAlias(alias, filter, indices);
+ return this;
+ }
+
+ /**
+ * Adds an alias to the index.
+ *
+ * @param index The indices
+ * @param alias The alias
+ * @param filter The filter
+ */
+ public IndicesAliasesRequestBuilder addAlias(String index, String alias, Map<String, Object> filter) {
+ request.addAlias(alias, filter, index);
+ return this;
+ }
+
+ /**
+ * Adds an alias to the index.
+ *
+ * @param indices The indices
+ * @param alias The alias
+ * @param filterBuilder The filter
+ */
+ public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, FilterBuilder filterBuilder) {
+ request.addAlias(alias, filterBuilder, indices);
+ return this;
+ }
+
+ /**
+ * Adds an alias to the index.
+ *
+ * @param index The index
+ * @param alias The alias
+ * @param filterBuilder The filter
+ */
+ public IndicesAliasesRequestBuilder addAlias(String index, String alias, FilterBuilder filterBuilder) {
+ request.addAlias(alias, filterBuilder, index);
+ return this;
+ }
+
+ /**
+ * Removes an alias from the index.
+ *
+ * @param index The index
+ * @param alias The alias
+ */
+ public IndicesAliasesRequestBuilder removeAlias(String index, String alias) {
+ request.removeAlias(index, alias);
+ return this;
+ }
+
+ /**
+ * Removes aliases from the index.
+ *
+ * @param indices The indices
+ * @param aliases The aliases
+ */
+ public IndicesAliasesRequestBuilder removeAlias(String[] indices, String... aliases) {
+ request.removeAlias(indices, aliases);
+ return this;
+ }
+
+ /**
+ * Removes aliases from the index.
+ *
+ * @param index The index
+ * @param aliases The aliases
+ */
+ public IndicesAliasesRequestBuilder removeAlias(String index, String[] aliases) {
+ request.removeAlias(index, aliases);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<IndicesAliasesResponse> listener) {
+ ((IndicesAdminClient) client).aliases(request, listener);
+ }
+
+ /**
+ * Adds an alias action to the request.
+ *
+ * @param aliasAction The alias action
+ */
+ public IndicesAliasesRequestBuilder addAliasAction(AliasAction aliasAction) {
+ request.addAliasAction(aliasAction);
+ return this;
+ }
+
+ /**
+ * Adds an alias action to the request.
+ *
+ * @param aliasAction The alias action
+ */
+ public IndicesAliasesRequestBuilder addAliasAction(
+ AliasActions action) {
+ request.addAliasAction(action);
+ return this;
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java
new file mode 100644
index 0000000..81fb1c1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response for a add/remove alias action.
+ */
+public class IndicesAliasesResponse extends AcknowledgedResponse {
+
+ IndicesAliasesResponse() {
+
+ }
+
+ IndicesAliasesResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java
new file mode 100644
index 0000000..64da64e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesMissingException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Add/remove aliases action
+ */
+public class TransportIndicesAliasesAction extends TransportMasterNodeOperationAction<IndicesAliasesRequest, IndicesAliasesResponse> {
+
+ private final MetaDataIndexAliasesService indexAliasesService;
+
+ @Inject
+ public TransportIndicesAliasesAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, MetaDataIndexAliasesService indexAliasesService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.indexAliasesService = indexAliasesService;
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away...
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return IndicesAliasesAction.NAME;
+ }
+
+ @Override
+ protected IndicesAliasesRequest newRequest() {
+ return new IndicesAliasesRequest();
+ }
+
+ @Override
+ protected IndicesAliasesResponse newResponse() {
+ return new IndicesAliasesResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(IndicesAliasesRequest request, ClusterState state) {
+ Set<String> indices = Sets.newHashSet();
+ for (AliasActions aliasAction : request.aliasActions()) {
+ for (String index : aliasAction.indices()) {
+ indices.add(index);
+ }
+ }
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, indices.toArray(new String[indices.size()]));
+ }
+
+ @Override
+ protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener<IndicesAliasesResponse> listener) throws ElasticsearchException {
+
+ //Expand the indices names
+ List<AliasActions> actions = request.aliasActions();
+ List<AliasAction> finalActions = new ArrayList<AliasAction>();
+ boolean hasOnlyDeletesButNoneCanBeDone = true;
+ Set<String> aliases = new HashSet<String>();
+ for (AliasActions action : actions) {
+ //expand indices
+ String[] concreteIndices = state.metaData().concreteIndices(action.indices(), request.indicesOptions());
+ //collect the aliases
+ for (String alias : action.aliases()) {
+ aliases.add(alias);
+ }
+ for (String index : concreteIndices) {
+ for (String alias : action.concreteAliases(state.metaData(), index)) {
+ AliasAction finalAction = new AliasAction(action.aliasAction());
+ finalAction.index(index);
+ finalAction.alias(alias);
+ finalActions.add(finalAction);
+ //if there is only delete requests, none will be added if the types do not map to any existing type
+ hasOnlyDeletesButNoneCanBeDone = false;
+ }
+ }
+ }
+ if (hasOnlyDeletesButNoneCanBeDone && actions.size() != 0) {
+ throw new AliasesMissingException(aliases.toArray(new String[aliases.size()]));
+ }
+ request.aliasActions().clear();
+ IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest()
+ .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
+ .actions(finalActions.toArray(new AliasAction[finalActions.size()]));
+
+ indexAliasesService.indicesAliases(updateRequest, new ClusterStateUpdateListener() {
+ @Override
+ public void onResponse(ClusterStateUpdateResponse response) {
+ listener.onResponse(new IndicesAliasesResponse(response.isAcknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.debug("failed to perform aliases", t);
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java
new file mode 100644
index 0000000..4009ec1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias.exists;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class AliasesExistAction extends IndicesAction<GetAliasesRequest, AliasesExistResponse, AliasesExistRequestBuilder> {
+
+ public static final AliasesExistAction INSTANCE = new AliasesExistAction();
+ public static final String NAME = "indices/exists/aliases";
+
+ private AliasesExistAction() {
+ super(NAME);
+ }
+
+ @Override
+ public AliasesExistRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new AliasesExistRequestBuilder(client);
+ }
+
+ @Override
+ public AliasesExistResponse newResponse() {
+ return new AliasesExistResponse();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java
new file mode 100644
index 0000000..57be7b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias.exists;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.alias.get.BaseAliasesRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class AliasesExistRequestBuilder extends BaseAliasesRequestBuilder<AliasesExistResponse, AliasesExistRequestBuilder> {
+
+ public AliasesExistRequestBuilder(IndicesAdminClient client, String... aliases) {
+ super(client, aliases);
+ }
+
+ @Override
+ protected void doExecute(ActionListener<AliasesExistResponse> listener) {
+ ((IndicesAdminClient) client).aliasesExist(request, listener);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistResponse.java
new file mode 100644
index 0000000..65474c8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistResponse.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias.exists;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class AliasesExistResponse extends ActionResponse {
+
+ private boolean exists;
+
+ public AliasesExistResponse(boolean exists) {
+ this.exists = exists;
+ }
+
+ AliasesExistResponse() {
+ }
+
+ public boolean exists() {
+ return exists;
+ }
+
+ public boolean isExists() {
+ return exists();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ exists = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(exists);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java
new file mode 100644
index 0000000..29f344c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.alias.exists;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ */
+public class TransportAliasesExistAction extends TransportMasterNodeReadOperationAction<GetAliasesRequest, AliasesExistResponse> {
+
+ @Inject
+ public TransportAliasesExistAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String transportAction() {
+ return AliasesExistAction.NAME;
+ }
+
+ @Override
+ protected String executor() {
+ // very lightweight operation, no need to fork
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected GetAliasesRequest newRequest() {
+ return new GetAliasesRequest();
+ }
+
+ @Override
+ protected AliasesExistResponse newResponse() {
+ return new AliasesExistResponse();
+ }
+
+ @Override
+ protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<AliasesExistResponse> listener) throws ElasticsearchException {
+ String[] concreteIndices = state.metaData().concreteIndices(request.indices(), request.indicesOptions());
+ request.indices(concreteIndices);
+
+ boolean result = state.metaData().hasAliases(request.aliases(), request.indices());
+ listener.onResponse(new AliasesExistResponse(result));
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java
new file mode 100644
index 0000000..0682529
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias.get;
+
+import com.google.common.collect.ObjectArrays;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ */
+public abstract class BaseAliasesRequestBuilder<Response extends ActionResponse, Builder extends BaseAliasesRequestBuilder<Response, Builder>> extends MasterNodeReadOperationRequestBuilder<GetAliasesRequest, Response, Builder> {
+
+ public BaseAliasesRequestBuilder(IndicesAdminClient client, String... aliases) {
+ super((InternalIndicesAdminClient) client, new GetAliasesRequest(aliases));
+ }
+
+ @SuppressWarnings("unchecked")
+ public Builder setAliases(String... aliases) {
+ request.aliases(aliases);
+ return (Builder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public Builder addAliases(String... aliases) {
+ request.aliases(ObjectArrays.concat(request.aliases(), aliases, String.class));
+ return (Builder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public Builder setIndices(String... indices) {
+ request.indices(indices);
+ return (Builder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public Builder addIndices(String... indices) {
+ request.indices(ObjectArrays.concat(request.indices(), indices, String.class));
+ return (Builder) this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ *
+ * For example indices that don't exist.
+ */
+ @SuppressWarnings("unchecked")
+ public Builder setIndicesOptions(IndicesOptions options) {
+ request.indicesOptions(options);
+ return (Builder) this;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java
new file mode 100644
index 0000000..85d2a59
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias.get;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class GetAliasesAction extends IndicesAction<GetAliasesRequest, GetAliasesResponse, GetAliasesRequestBuilder> {
+
+ public static final GetAliasesAction INSTANCE = new GetAliasesAction();
+ public static final String NAME = "indices/get/aliases";
+
+ private GetAliasesAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GetAliasesRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new GetAliasesRequestBuilder(client);
+ }
+
+ @Override
+ public GetAliasesResponse newResponse() {
+ return new GetAliasesResponse();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java
new file mode 100644
index 0000000..cc0b9a0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.alias.get;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class GetAliasesRequest extends MasterNodeReadOperationRequest<GetAliasesRequest> {
+
+ private String[] indices = Strings.EMPTY_ARRAY;
+ private String[] aliases = Strings.EMPTY_ARRAY;
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ public GetAliasesRequest(String[] aliases) {
+ this.aliases = aliases;
+ }
+
+ public GetAliasesRequest(String alias) {
+ this.aliases = new String[]{alias};
+ }
+
+ public GetAliasesRequest() {
+ }
+
+ public GetAliasesRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ public GetAliasesRequest aliases(String... aliases) {
+ this.aliases = aliases;
+ return this;
+ }
+
+ public GetAliasesRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public String[] aliases() {
+ return aliases;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ aliases = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ readLocal(in, Version.V_1_0_0_RC2);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(indices);
+ out.writeStringArray(aliases);
+ indicesOptions.writeIndicesOptions(out);
+ writeLocal(out, Version.V_1_0_0_RC2);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java
new file mode 100644
index 0000000..776eae4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class GetAliasesRequestBuilder extends BaseAliasesRequestBuilder<GetAliasesResponse, GetAliasesRequestBuilder> {
+
+ public GetAliasesRequestBuilder(IndicesAdminClient client, String... aliases) {
+ super(client, aliases);
+ }
+
+ @Override
+ protected void doExecute(ActionListener<GetAliasesResponse> listener) {
+ ((IndicesAdminClient) client).getAliases(request, listener);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java
new file mode 100644
index 0000000..87dcfc2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.alias.get;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ */
+public class GetAliasesResponse extends ActionResponse {
+
+ private ImmutableOpenMap<String, List<AliasMetaData>> aliases = ImmutableOpenMap.of();
+
+ public GetAliasesResponse(ImmutableOpenMap<String, List<AliasMetaData>> aliases) {
+ this.aliases = aliases;
+ }
+
+ GetAliasesResponse() {
+ }
+
+
+ public ImmutableOpenMap<String, List<AliasMetaData>> getAliases() {
+ return aliases;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ ImmutableOpenMap.Builder<String, List<AliasMetaData>> aliasesBuilder = ImmutableOpenMap.builder();
+ for (int i = 0; i < size; i++) {
+ String key = in.readString();
+ int valueSize = in.readVInt();
+ List<AliasMetaData> value = new ArrayList<AliasMetaData>(valueSize);
+ for (int j = 0; j < valueSize; j++) {
+ value.add(AliasMetaData.Builder.readFrom(in));
+ }
+ aliasesBuilder.put(key, ImmutableList.copyOf(value));
+ }
+ aliases = aliasesBuilder.build();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(aliases.size());
+ for (ObjectObjectCursor<String, List<AliasMetaData>> entry : aliases) {
+ out.writeString(entry.key);
+ out.writeVInt(entry.value.size());
+ for (AliasMetaData aliasMetaData : entry.value) {
+ AliasMetaData.Builder.writeTo(aliasMetaData, out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java
new file mode 100644
index 0000000..9909af4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.alias.get;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+
+/**
+ */
+public class TransportGetAliasesAction extends TransportMasterNodeReadOperationAction<GetAliasesRequest, GetAliasesResponse> {
+
+ @Inject
+ public TransportGetAliasesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String transportAction() {
+ return GetAliasesAction.NAME;
+ }
+
+ @Override
+ protected String executor() {
+ // very lightweight operation all in memory no need to fork to a thread pool
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected GetAliasesRequest newRequest() {
+ return new GetAliasesRequest();
+ }
+
+ @Override
+ protected GetAliasesResponse newResponse() {
+ return new GetAliasesResponse();
+ }
+
+ @Override
+ protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) throws ElasticsearchException {
+ String[] concreteIndices = state.metaData().concreteIndices(request.indices(), request.indicesOptions());
+ request.indices(concreteIndices);
+
+ @SuppressWarnings("unchecked") // ImmutableList to List results incompatible type
+ ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), request.indices());
+ listener.onResponse(new GetAliasesResponse(result));
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java
new file mode 100644
index 0000000..a8c2183
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.analyze;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class AnalyzeAction extends IndicesAction<AnalyzeRequest, AnalyzeResponse, AnalyzeRequestBuilder> {
+
+ public static final AnalyzeAction INSTANCE = new AnalyzeAction();
+ public static final String NAME = "indices/analyze";
+
+ private AnalyzeAction() {
+ super(NAME);
+ }
+
+ @Override
+ public AnalyzeResponse newResponse() {
+ return new AnalyzeResponse();
+ }
+
+ @Override
+ public AnalyzeRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new AnalyzeRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java
new file mode 100644
index 0000000..fb7366c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.analyze;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.single.custom.SingleCustomOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to analyze a text associated with a specific index. Allow to provide
+ * the actual analyzer name to perform the analysis with.
+ */
+public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest> {
+
+ private String index;
+
+ private String text;
+
+ private String analyzer;
+
+ private String tokenizer;
+
+ private String[] tokenFilters;
+
+ private String field;
+
+ AnalyzeRequest() {
+
+ }
+
+ /**
+ * Constructs a new analyzer request for the provided text.
+ *
+ * @param text The text to analyze
+ */
+ public AnalyzeRequest(String text) {
+ this.text = text;
+ }
+
+ /**
+ * Constructs a new analyzer request for the provided index and text.
+ *
+ * @param index The index name
+ * @param text The text to analyze
+ */
+ public AnalyzeRequest(@Nullable String index, String text) {
+ this.index = index;
+ this.text = text;
+ }
+
+ public String text() {
+ return this.text;
+ }
+
+ public AnalyzeRequest index(String index) {
+ this.index = index;
+ return this;
+ }
+
+ public String index() {
+ return this.index;
+ }
+
+ public AnalyzeRequest analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ public String analyzer() {
+ return this.analyzer;
+ }
+
+ public AnalyzeRequest tokenizer(String tokenizer) {
+ this.tokenizer = tokenizer;
+ return this;
+ }
+
+ public String tokenizer() {
+ return this.tokenizer;
+ }
+
+ public AnalyzeRequest tokenFilters(String... tokenFilters) {
+ this.tokenFilters = tokenFilters;
+ return this;
+ }
+
+ public String[] tokenFilters() {
+ return this.tokenFilters;
+ }
+
+ public AnalyzeRequest field(String field) {
+ this.field = field;
+ return this;
+ }
+
+ public String field() {
+ return this.field;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (text == null) {
+ validationException = addValidationError("text is missing", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readOptionalString();
+ text = in.readString();
+ analyzer = in.readOptionalString();
+ tokenizer = in.readOptionalString();
+ int size = in.readVInt();
+ if (size > 0) {
+ tokenFilters = new String[size];
+ for (int i = 0; i < size; i++) {
+ tokenFilters[i] = in.readString();
+ }
+ }
+ field = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalString(index);
+ out.writeString(text);
+ out.writeOptionalString(analyzer);
+ out.writeOptionalString(tokenizer);
+ if (tokenFilters == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(tokenFilters.length);
+ for (String tokenFilter : tokenFilters) {
+ out.writeString(tokenFilter);
+ }
+ }
+ out.writeOptionalString(field);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java
new file mode 100644
index 0000000..8e42ce0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.analyze;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.single.custom.SingleCustomOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class AnalyzeRequestBuilder extends SingleCustomOperationRequestBuilder<AnalyzeRequest, AnalyzeResponse, AnalyzeRequestBuilder> {
+
+ public AnalyzeRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new AnalyzeRequest());
+ }
+
+ public AnalyzeRequestBuilder(IndicesAdminClient indicesClient, String index, String text) {
+ super((InternalIndicesAdminClient) indicesClient, new AnalyzeRequest(index, text));
+ }
+
+ /**
+ * Sets the index to use to analyzer the text (for example, if it holds specific analyzers
+ * registered).
+ */
+ public AnalyzeRequestBuilder setIndex(String index) {
+ request.index(index);
+ return this;
+ }
+
+ /**
+ * Sets the analyzer name to use in order to analyze the text.
+ *
+ * @param analyzer The analyzer name.
+ */
+ public AnalyzeRequestBuilder setAnalyzer(String analyzer) {
+ request.analyzer(analyzer);
+ return this;
+ }
+
+ /**
+ * Sets the field that its analyzer will be used to analyze the text. Note, requires an index
+ * to be set.
+ */
+ public AnalyzeRequestBuilder setField(String field) {
+ request.field(field);
+ return this;
+ }
+
+ /**
+ * Instead of setting the analyzer, sets the tokenizer that will be used as part of a custom
+ * analyzer.
+ */
+ public AnalyzeRequestBuilder setTokenizer(String tokenizer) {
+ request.tokenizer(tokenizer);
+ return this;
+ }
+
+ /**
+ * Sets token filters that will be used on top of a tokenizer provided.
+ */
+ public AnalyzeRequestBuilder setTokenFilters(String... tokenFilters) {
+ request.tokenFilters(tokenFilters);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<AnalyzeResponse> listener) {
+ ((IndicesAdminClient) client).analyze(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java
new file mode 100644
index 0000000..31fe5c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.analyze;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeResponse.AnalyzeToken>, ToXContent {
+
+ public static class AnalyzeToken implements Streamable {
+ private String term;
+ private int startOffset;
+ private int endOffset;
+ private int position;
+ private String type;
+
+ AnalyzeToken() {
+ }
+
+ public AnalyzeToken(String term, int position, int startOffset, int endOffset, String type) {
+ this.term = term;
+ this.position = position;
+ this.startOffset = startOffset;
+ this.endOffset = endOffset;
+ this.type = type;
+ }
+
+ public String getTerm() {
+ return this.term;
+ }
+
+ public int getStartOffset() {
+ return this.startOffset;
+ }
+
+ public int getEndOffset() {
+ return this.endOffset;
+ }
+
+ public int getPosition() {
+ return this.position;
+ }
+
+ public String getType() {
+ return this.type;
+ }
+
+ public static AnalyzeToken readAnalyzeToken(StreamInput in) throws IOException {
+ AnalyzeToken analyzeToken = new AnalyzeToken();
+ analyzeToken.readFrom(in);
+ return analyzeToken;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ term = in.readString();
+ startOffset = in.readInt();
+ endOffset = in.readInt();
+ position = in.readVInt();
+ type = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(term);
+ out.writeInt(startOffset);
+ out.writeInt(endOffset);
+ out.writeVInt(position);
+ out.writeOptionalString(type);
+ }
+ }
+
+ private List<AnalyzeToken> tokens;
+
+ AnalyzeResponse() {
+ }
+
+ public AnalyzeResponse(List<AnalyzeToken> tokens) {
+ this.tokens = tokens;
+ }
+
+ public List<AnalyzeToken> getTokens() {
+ return this.tokens;
+ }
+
+ @Override
+ public Iterator<AnalyzeToken> iterator() {
+ return tokens.iterator();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray(Fields.TOKENS);
+ for (AnalyzeToken token : tokens) {
+ builder.startObject();
+ builder.field(Fields.TOKEN, token.getTerm());
+ builder.field(Fields.START_OFFSET, token.getStartOffset());
+ builder.field(Fields.END_OFFSET, token.getEndOffset());
+ builder.field(Fields.TYPE, token.getType());
+ builder.field(Fields.POSITION, token.getPosition());
+ builder.endObject();
+ }
+ builder.endArray();
+ return builder;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ tokens = new ArrayList<AnalyzeToken>(size);
+ for (int i = 0; i < size; i++) {
+ tokens.add(AnalyzeToken.readAnalyzeToken(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(tokens.size());
+ for (AnalyzeToken token : tokens) {
+ token.writeTo(out);
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString TOKENS = new XContentBuilderString("tokens");
+ static final XContentBuilderString TOKEN = new XContentBuilderString("token");
+ static final XContentBuilderString START_OFFSET = new XContentBuilderString("start_offset");
+ static final XContentBuilderString END_OFFSET = new XContentBuilderString("end_offset");
+ static final XContentBuilderString TYPE = new XContentBuilderString("type");
+ static final XContentBuilderString POSITION = new XContentBuilderString("position");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java
new file mode 100644
index 0000000..3676fc1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.analyze;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.support.single.custom.TransportSingleCustomOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.ShardsIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.*;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class TransportAnalyzeAction extends TransportSingleCustomOperationAction<AnalyzeRequest, AnalyzeResponse> {
+
+ private final IndicesService indicesService;
+
+ private final IndicesAnalysisService indicesAnalysisService;
+
+ @Inject
+ public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService, IndicesAnalysisService indicesAnalysisService) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ this.indicesAnalysisService = indicesAnalysisService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.INDEX;
+ }
+
+ @Override
+ protected AnalyzeRequest newRequest() {
+ return new AnalyzeRequest();
+ }
+
+ @Override
+ protected AnalyzeResponse newResponse() {
+ return new AnalyzeResponse();
+ }
+
+ @Override
+ protected String transportAction() {
+ return AnalyzeAction.NAME;
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, AnalyzeRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, AnalyzeRequest request) {
+ if (request.index() != null) {
+ request.index(state.metaData().concreteIndex(request.index()));
+ return state.blocks().indexBlockedException(ClusterBlockLevel.READ, request.index());
+ }
+ return null;
+ }
+
+ @Override
+ protected ShardsIterator shards(ClusterState state, AnalyzeRequest request) {
+ if (request.index() == null) {
+ // just execute locally....
+ return null;
+ }
+ return state.routingTable().index(request.index()).randomAllActiveShardsIt();
+ }
+
+ @Override
+ protected AnalyzeResponse shardOperation(AnalyzeRequest request, int shardId) throws ElasticsearchException {
+ IndexService indexService = null;
+ if (request.index() != null) {
+ indexService = indicesService.indexServiceSafe(request.index());
+ }
+ Analyzer analyzer = null;
+ boolean closeAnalyzer = false;
+ String field = null;
+ if (request.field() != null) {
+ if (indexService == null) {
+ throw new ElasticsearchIllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
+ }
+ FieldMapper<?> fieldMapper = indexService.mapperService().smartNameFieldMapper(request.field());
+ if (fieldMapper != null) {
+ if (fieldMapper.isNumeric()) {
+ throw new ElasticsearchIllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
+ }
+ analyzer = fieldMapper.indexAnalyzer();
+ field = fieldMapper.names().indexName();
+
+ }
+ }
+ if (field == null) {
+ if (indexService != null) {
+ field = indexService.queryParserService().defaultField();
+ } else {
+ field = AllFieldMapper.NAME;
+ }
+ }
+ if (analyzer == null && request.analyzer() != null) {
+ if (indexService == null) {
+ analyzer = indicesAnalysisService.analyzer(request.analyzer());
+ } else {
+ analyzer = indexService.analysisService().analyzer(request.analyzer());
+ }
+ if (analyzer == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
+ }
+ } else if (request.tokenizer() != null) {
+ TokenizerFactory tokenizerFactory;
+ if (indexService == null) {
+ TokenizerFactoryFactory tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(request.tokenizer());
+ if (tokenizerFactoryFactory == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
+ }
+ tokenizerFactory = tokenizerFactoryFactory.create(request.tokenizer(), ImmutableSettings.Builder.EMPTY_SETTINGS);
+ } else {
+ tokenizerFactory = indexService.analysisService().tokenizer(request.tokenizer());
+ if (tokenizerFactory == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
+ }
+ }
+ TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
+ if (request.tokenFilters() != null && request.tokenFilters().length > 0) {
+ tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().length];
+ for (int i = 0; i < request.tokenFilters().length; i++) {
+ String tokenFilterName = request.tokenFilters()[i];
+ if (indexService == null) {
+ TokenFilterFactoryFactory tokenFilterFactoryFactory = indicesAnalysisService.tokenFilterFactoryFactory(tokenFilterName);
+ if (tokenFilterFactoryFactory == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find global token filter under [" + request.tokenizer() + "]");
+ }
+ tokenFilterFactories[i] = tokenFilterFactoryFactory.create(tokenFilterName, ImmutableSettings.Builder.EMPTY_SETTINGS);
+ } else {
+ tokenFilterFactories[i] = indexService.analysisService().tokenFilter(tokenFilterName);
+ if (tokenFilterFactories[i] == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + request.tokenizer() + "]");
+ }
+ }
+ if (tokenFilterFactories[i] == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + request.tokenizer() + "]");
+ }
+ }
+ }
+ analyzer = new CustomAnalyzer(tokenizerFactory, new CharFilterFactory[0], tokenFilterFactories);
+ closeAnalyzer = true;
+ } else if (analyzer == null) {
+ if (indexService == null) {
+ analyzer = Lucene.STANDARD_ANALYZER;
+ } else {
+ analyzer = indexService.analysisService().defaultIndexAnalyzer();
+ }
+ }
+ if (analyzer == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find analyzer");
+ }
+
+ List<AnalyzeResponse.AnalyzeToken> tokens = Lists.newArrayList();
+ TokenStream stream = null;
+ try {
+ stream = analyzer.tokenStream(field, request.text());
+ stream.reset();
+ CharTermAttribute term = stream.addAttribute(CharTermAttribute.class);
+ PositionIncrementAttribute posIncr = stream.addAttribute(PositionIncrementAttribute.class);
+ OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class);
+ TypeAttribute type = stream.addAttribute(TypeAttribute.class);
+
+ int position = 0;
+ while (stream.incrementToken()) {
+ int increment = posIncr.getPositionIncrement();
+ if (increment > 0) {
+ position = position + increment;
+ }
+ tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), position, offset.startOffset(), offset.endOffset(), type.type()));
+ }
+ stream.end();
+ } catch (IOException e) {
+ throw new ElasticsearchException("failed to analyze", e);
+ } finally {
+ if (stream != null) {
+ try {
+ stream.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ if (closeAnalyzer) {
+ analyzer.close();
+ }
+ }
+
+ return new AnalyzeResponse(tokens);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java
new file mode 100644
index 0000000..244ca6c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.cache.clear;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class ClearIndicesCacheAction extends IndicesAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, ClearIndicesCacheRequestBuilder> {
+
+ public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction();
+ public static final String NAME = "indices/cache/clear";
+
+ private ClearIndicesCacheAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ClearIndicesCacheResponse newResponse() {
+ return new ClearIndicesCacheResponse();
+ }
+
+ @Override
+ public ClearIndicesCacheRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new ClearIndicesCacheRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java
new file mode 100644
index 0000000..3aa7ddb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.cache.clear;
+
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearIndicesCacheRequest> {
+
+ private boolean filterCache = false;
+ private boolean fieldDataCache = false;
+ private boolean idCache = false;
+ private boolean recycler = false;
+ private String[] fields = null;
+ private String[] filterKeys = null;
+
+
+ ClearIndicesCacheRequest() {
+ }
+
+ public ClearIndicesCacheRequest(String... indices) {
+ super(indices);
+ }
+
+ public boolean filterCache() {
+ return filterCache;
+ }
+
+ public ClearIndicesCacheRequest filterCache(boolean filterCache) {
+ this.filterCache = filterCache;
+ return this;
+ }
+
+ public boolean fieldDataCache() {
+ return this.fieldDataCache;
+ }
+
+ public ClearIndicesCacheRequest fieldDataCache(boolean fieldDataCache) {
+ this.fieldDataCache = fieldDataCache;
+ return this;
+ }
+
+ public ClearIndicesCacheRequest fields(String... fields) {
+ this.fields = fields;
+ return this;
+ }
+
+ public String[] fields() {
+ return this.fields;
+ }
+
+ public ClearIndicesCacheRequest filterKeys(String... filterKeys) {
+ this.filterKeys = filterKeys;
+ return this;
+ }
+
+ public String[] filterKeys() {
+ return this.filterKeys;
+ }
+
+ public boolean idCache() {
+ return this.idCache;
+ }
+
+ public ClearIndicesCacheRequest recycler(boolean recycler) {
+ this.recycler = recycler;
+ return this;
+ }
+
+ public boolean recycler() {
+ return this.recycler;
+ }
+
+ public ClearIndicesCacheRequest idCache(boolean idCache) {
+ this.idCache = idCache;
+ return this;
+ }
+
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ filterCache = in.readBoolean();
+ fieldDataCache = in.readBoolean();
+ idCache = in.readBoolean();
+ recycler = in.readBoolean();
+ fields = in.readStringArray();
+ filterKeys = in.readStringArray();
+ }
+
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(filterCache);
+ out.writeBoolean(fieldDataCache);
+ out.writeBoolean(idCache);
+ out.writeBoolean(recycler);
+ out.writeStringArrayNullable(fields);
+ out.writeStringArrayNullable(filterKeys);
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java
new file mode 100644
index 0000000..91b7630
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.cache.clear;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder<ClearIndicesCacheRequest, ClearIndicesCacheResponse, ClearIndicesCacheRequestBuilder> {
+
+ public ClearIndicesCacheRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new ClearIndicesCacheRequest());
+ }
+
+ public ClearIndicesCacheRequestBuilder setFilterCache(boolean filterCache) {
+ request.filterCache(filterCache);
+ return this;
+ }
+
+ public ClearIndicesCacheRequestBuilder setFieldDataCache(boolean fieldDataCache) {
+ request.fieldDataCache(fieldDataCache);
+ return this;
+ }
+
+ public ClearIndicesCacheRequestBuilder setFields(String... fields) {
+ request.fields(fields);
+ return this;
+ }
+
+ public ClearIndicesCacheRequestBuilder setFilterKeys(String... filterKeys) {
+ request.filterKeys(filterKeys);
+ return this;
+ }
+
+ public ClearIndicesCacheRequestBuilder setIdCache(boolean idCache) {
+ request.idCache(idCache);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<ClearIndicesCacheResponse> listener) {
+ ((IndicesAdminClient) client).clearCache(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java
new file mode 100644
index 0000000..a9f0948
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.cache.clear;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * The response of a refresh action.
+ *
+ *
+ */
+public class ClearIndicesCacheResponse extends BroadcastOperationResponse {
+
+ ClearIndicesCacheResponse() {
+
+ }
+
+ ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ShardClearIndicesCacheRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ShardClearIndicesCacheRequest.java
new file mode 100644
index 0000000..e0e07e8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ShardClearIndicesCacheRequest.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.cache.clear;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
+
+ private boolean filterCache = false;
+ private boolean fieldDataCache = false;
+ private boolean idCache = false;
+ private boolean recycler;
+
+ private String[] fields = null;
+ private String[] filterKeys = null;
+
+ ShardClearIndicesCacheRequest() {
+ }
+
+ public ShardClearIndicesCacheRequest(String index, int shardId, ClearIndicesCacheRequest request) {
+ super(index, shardId, request);
+ filterCache = request.filterCache();
+ fieldDataCache = request.fieldDataCache();
+ idCache = request.idCache();
+ fields = request.fields();
+ filterKeys = request.filterKeys();
+ recycler = request.recycler();
+ }
+
+ public boolean filterCache() {
+ return filterCache;
+ }
+
+ public boolean fieldDataCache() {
+ return this.fieldDataCache;
+ }
+
+ public boolean idCache() {
+ return this.idCache;
+ }
+
+ public boolean recycler() {
+ return this.recycler;
+ }
+
+ public String[] fields() {
+ return this.fields;
+ }
+
+ public String[] filterKeys() {
+ return this.filterKeys;
+ }
+
+ public ShardClearIndicesCacheRequest waitForOperations(boolean waitForOperations) {
+ this.filterCache = waitForOperations;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ filterCache = in.readBoolean();
+ fieldDataCache = in.readBoolean();
+ idCache = in.readBoolean();
+ recycler = in.readBoolean();
+ fields = in.readStringArray();
+ filterKeys = in.readStringArray();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(filterCache);
+ out.writeBoolean(fieldDataCache);
+ out.writeBoolean(idCache);
+ out.writeBoolean(recycler);
+ out.writeStringArrayNullable(fields);
+ out.writeStringArrayNullable(filterKeys);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ShardClearIndicesCacheResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ShardClearIndicesCacheResponse.java
new file mode 100644
index 0000000..137b928
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ShardClearIndicesCacheResponse.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.cache.clear;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardClearIndicesCacheResponse extends BroadcastShardOperationResponse {
+
+ ShardClearIndicesCacheResponse() {
+ }
+
+ public ShardClearIndicesCacheResponse(String index, int shardId) {
+ super(index, shardId);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java
new file mode 100644
index 0000000..859a9d8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.cache.clear;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.cache.filter.terms.IndicesTermsFilterCache;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * Indices clear cache action.
+ */
+public class TransportClearIndicesCacheAction extends TransportBroadcastOperationAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, ShardClearIndicesCacheRequest, ShardClearIndicesCacheResponse> {
+
+ private final IndicesService indicesService;
+ private final IndicesTermsFilterCache termsFilterCache;
+ private final CacheRecycler cacheRecycler;
+
+ @Inject
+ public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ TransportService transportService, IndicesService indicesService, IndicesTermsFilterCache termsFilterCache,
+ CacheRecycler cacheRecycler) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ this.termsFilterCache = termsFilterCache;
+ this.cacheRecycler = cacheRecycler;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return ClearIndicesCacheAction.NAME;
+ }
+
+ @Override
+ protected ClearIndicesCacheRequest newRequest() {
+ return new ClearIndicesCacheRequest();
+ }
+
+ @Override
+ protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ List<ShardOperationFailedException> shardFailures = null;
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // simply ignore non active shards
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ successfulShards++;
+ }
+ }
+ return new ClearIndicesCacheResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected ShardClearIndicesCacheRequest newShardRequest() {
+ return new ShardClearIndicesCacheRequest();
+ }
+
+ @Override
+ protected ShardClearIndicesCacheRequest newShardRequest(ShardRouting shard, ClearIndicesCacheRequest request) {
+ return new ShardClearIndicesCacheRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected ShardClearIndicesCacheResponse newShardResponse() {
+ return new ShardClearIndicesCacheResponse();
+ }
+
+ @Override
+ protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) throws ElasticsearchException {
+ IndexService service = indicesService.indexService(request.index());
+ if (service != null) {
+ // we always clear the query cache
+ service.cache().queryParserCache().clear();
+ boolean clearedAtLeastOne = false;
+ if (request.filterCache()) {
+ clearedAtLeastOne = true;
+ service.cache().filter().clear("api");
+ termsFilterCache.clear("api");
+ }
+ if (request.filterKeys() != null && request.filterKeys().length > 0) {
+ clearedAtLeastOne = true;
+ service.cache().filter().clear("api", request.filterKeys());
+ termsFilterCache.clear("api", request.filterKeys());
+ }
+ if (request.fieldDataCache()) {
+ clearedAtLeastOne = true;
+ if (request.fields() == null || request.fields().length == 0) {
+ service.fieldData().clear();
+ } else {
+ for (String field : request.fields()) {
+ service.fieldData().clearField(field);
+ }
+ }
+ }
+ if (request.recycler()) {
+ logger.info("Clear CacheRecycler on index [{}]", service.index());
+ clearedAtLeastOne = true;
+ // cacheRecycler.clear();
+ }
+ if (request.idCache()) {
+ clearedAtLeastOne = true;
+ service.cache().idCache().clear();
+ }
+ if (!clearedAtLeastOne) {
+ if (request.fields() != null && request.fields().length > 0) {
+ // only clear caches relating to the specified fields
+ for (String field : request.fields()) {
+ service.fieldData().clearField(field);
+ }
+ } else {
+ service.cache().clear("api");
+ service.fieldData().clear();
+ termsFilterCache.clear("api");
+ }
+ }
+ }
+ return new ShardClearIndicesCacheResponse(request.index(), request.shardId());
+ }
+
+ /**
+ * The refresh request works against *all* shards.
+ */
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, ClearIndicesCacheRequest request, String[] concreteIndices) {
+ return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, ClearIndicesCacheRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, ClearIndicesCacheRequest request, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java
new file mode 100644
index 0000000..4956d17
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.close;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class CloseIndexAction extends IndicesAction<CloseIndexRequest, CloseIndexResponse, CloseIndexRequestBuilder> {
+
+ public static final CloseIndexAction INSTANCE = new CloseIndexAction();
+ public static final String NAME = "indices/close";
+
+ private CloseIndexAction() {
+ super(NAME);
+ }
+
+ @Override
+ public CloseIndexResponse newResponse() {
+ return new CloseIndexResponse();
+ }
+
+ @Override
+ public CloseIndexRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new CloseIndexRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexClusterStateUpdateRequest.java
new file mode 100644
index 0000000..ba5cc2a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexClusterStateUpdateRequest.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.close;
+
+import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest;
+
+/**
+ * Cluster state update request that allows to close one or more indices
+ */
+public class CloseIndexClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<CloseIndexClusterStateUpdateRequest> {
+
+ CloseIndexClusterStateUpdateRequest() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java
new file mode 100644
index 0000000..6141d6e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.close;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to close an index.
+ */
+public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> {
+
+ private String[] indices;
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
+
+ CloseIndexRequest() {
+ }
+
+ /**
+ * Constructs a new close index request for the specified index.
+ */
+ public CloseIndexRequest(String... indices) {
+ this.indices = indices;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (indices == null || indices.length == 0) {
+ validationException = addValidationError("index is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * The indices to be closed
+ * @return the indices to be closed
+ */
+ String[] indices() {
+ return indices;
+ }
+
+ /**
+ * Sets the indices to be closed
+ * @param indices the indices to be closed
+ * @return the request itself
+ */
+ public CloseIndexRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
+ * For example indices that don't exist.
+ *
+ * @return the desired behaviour regarding indices to ignore and wildcard indices expressions
+ */
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal wild wildcard expressions.
+ * For example indices that don't exist.
+ *
+ * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
+ * @return the request itself
+ */
+ public CloseIndexRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ readTimeout(in);
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(indices);
+ writeTimeout(out);
+ indicesOptions.writeIndicesOptions(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java
new file mode 100644
index 0000000..d5a3fe7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.close;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ * Builder for close index request
+ */
+public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder<CloseIndexRequest, CloseIndexResponse, CloseIndexRequestBuilder> {
+
+ public CloseIndexRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new CloseIndexRequest());
+ }
+
+ public CloseIndexRequestBuilder(IndicesAdminClient indicesClient, String... indices) {
+ super((InternalIndicesAdminClient) indicesClient, new CloseIndexRequest(indices));
+ }
+
+ /**
+ * Sets the indices to be closed
+ * @param indices the indices to be closed
+ * @return the request itself
+ */
+ public CloseIndexRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions
+ * For example indices that don't exist.
+ *
+ * @param indicesOptions the desired behaviour regarding indices to ignore and indices wildcard expressions
+ * @return the request itself
+ */
+ public CloseIndexRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request.indicesOptions(indicesOptions);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<CloseIndexResponse> listener) {
+ ((IndicesAdminClient) client).close(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java
new file mode 100644
index 0000000..b85962c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.close;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response for a close index action.
+ */
+public class CloseIndexResponse extends AcknowledgedResponse {
+
+ CloseIndexResponse() {
+ }
+
+ CloseIndexResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java
new file mode 100644
index 0000000..f57ff8a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.close;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Close index action
+ */
+public class TransportCloseIndexAction extends TransportMasterNodeOperationAction<CloseIndexRequest, CloseIndexResponse> {
+
+ private final MetaDataIndexStateService indexStateService;
+ private final DestructiveOperations destructiveOperations;
+
+ @Inject
+ public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, MetaDataIndexStateService indexStateService, NodeSettingsService nodeSettingsService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.indexStateService = indexStateService;
+ this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
+ }
+
+ @Override
+ protected String executor() {
+ // no need to use a thread pool, we go async right away
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return CloseIndexAction.NAME;
+ }
+
+ @Override
+ protected CloseIndexRequest newRequest() {
+ return new CloseIndexRequest();
+ }
+
+ @Override
+ protected CloseIndexResponse newResponse() {
+ return new CloseIndexResponse();
+ }
+
+ @Override
+ protected void doExecute(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) {
+ destructiveOperations.failDestructive(request.indices());
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterState state) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
+ }
+
+ @Override
+ protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) throws ElasticsearchException {
+ request.indices(state.metaData().concreteIndices(request.indices(), request.indicesOptions()));
+ CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest()
+ .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
+ .indices(request.indices());
+
+ indexStateService.closeIndex(updateRequest, new ClusterStateUpdateListener() {
+
+ @Override
+ public void onResponse(ClusterStateUpdateResponse response) {
+ listener.onResponse(new CloseIndexResponse(response.isAcknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.debug("failed to close indices [{}]", t, request.indices());
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java
new file mode 100644
index 0000000..705f7c1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.create;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class CreateIndexAction extends IndicesAction<CreateIndexRequest, CreateIndexResponse, CreateIndexRequestBuilder> {
+
+ public static final CreateIndexAction INSTANCE = new CreateIndexAction();
+ public static final String NAME = "indices/create";
+
+ private CreateIndexAction() {
+ super(NAME);
+ }
+
+ @Override
+ public CreateIndexResponse newResponse() {
+ return new CreateIndexResponse();
+ }
+
+ @Override
+ public CreateIndexRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new CreateIndexRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java
new file mode 100644
index 0000000..99d3d10
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.create;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ * Cluster state update request that allows to create an index
+ */
+public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequest<CreateIndexClusterStateUpdateRequest> {
+
+ final String cause;
+ final String index;
+
+ private IndexMetaData.State state = IndexMetaData.State.OPEN;
+
+ private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ private Map<String, String> mappings = Maps.newHashMap();
+
+ private Map<String, IndexMetaData.Custom> customs = newHashMap();
+
+ private Set<ClusterBlock> blocks = Sets.newHashSet();
+
+
+ CreateIndexClusterStateUpdateRequest(String cause, String index) {
+ this.cause = cause;
+ this.index = index;
+ }
+
+ public CreateIndexClusterStateUpdateRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public CreateIndexClusterStateUpdateRequest mappings(Map<String, String> mappings) {
+ this.mappings.putAll(mappings);
+ return this;
+ }
+
+ public CreateIndexClusterStateUpdateRequest customs(Map<String, IndexMetaData.Custom> customs) {
+ this.customs.putAll(customs);
+ return this;
+ }
+
+ public CreateIndexClusterStateUpdateRequest blocks(Set<ClusterBlock> blocks) {
+ this.blocks.addAll(blocks);
+ return this;
+ }
+
+ public CreateIndexClusterStateUpdateRequest state(IndexMetaData.State state) {
+ this.state = state;
+ return this;
+ }
+
+ public String cause() {
+ return cause;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public IndexMetaData.State state() {
+ return state;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+
+ public Map<String, String> mappings() {
+ return mappings;
+ }
+
+ public Map<String, IndexMetaData.Custom> customs() {
+ return customs;
+ }
+
+ public Set<ClusterBlock> blocks() {
+ return blocks;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java
new file mode 100644
index 0000000..665dff3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java
@@ -0,0 +1,382 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.create;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
+import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
+
+/**
+ * A request to create an index. Best created with {@link org.elasticsearch.client.Requests#createIndexRequest(String)}.
+ * <p/>
+ * <p>The index created can optionally be created with {@link #settings(org.elasticsearch.common.settings.Settings)}.
+ *
+ * @see org.elasticsearch.client.IndicesAdminClient#create(CreateIndexRequest)
+ * @see org.elasticsearch.client.Requests#createIndexRequest(String)
+ * @see CreateIndexResponse
+ */
+public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest> {
+
+ private String cause = "";
+
+ private String index;
+
+ private Settings settings = EMPTY_SETTINGS;
+
+ private Map<String, String> mappings = newHashMap();
+
+ private Map<String, IndexMetaData.Custom> customs = newHashMap();
+
+ CreateIndexRequest() {
+ }
+
+ /**
+ * Constructs a new request to create an index with the specified name.
+ */
+ public CreateIndexRequest(String index) {
+ this(index, EMPTY_SETTINGS);
+ }
+
+ /**
+ * Constructs a new request to create an index with the specified name and settings.
+ */
+ public CreateIndexRequest(String index, Settings settings) {
+ this.index = index;
+ this.settings = settings;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (index == null) {
+ validationException = addValidationError("index is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * The index name to create.
+ */
+ String index() {
+ return index;
+ }
+
+ public CreateIndexRequest index(String index) {
+ this.index = index;
+ return this;
+ }
+
+ /**
+ * The settings to create the index with.
+ */
+ Settings settings() {
+ return settings;
+ }
+
+ /**
+ * The cause for this index creation.
+ */
+ String cause() {
+ return cause;
+ }
+
+ /**
+ * A simplified version of settings that takes key value pairs settings.
+ */
+ public CreateIndexRequest settings(Object... settings) {
+ this.settings = ImmutableSettings.builder().put(settings).build();
+ return this;
+ }
+
+ /**
+ * The settings to create the index with.
+ */
+ public CreateIndexRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ /**
+ * The settings to create the index with.
+ */
+ public CreateIndexRequest settings(Settings.Builder settings) {
+ this.settings = settings.build();
+ return this;
+ }
+
+ /**
+ * The settings to create the index with (either json/yaml/properties format)
+ */
+ public CreateIndexRequest settings(String source) {
+ this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
+ return this;
+ }
+
+ /**
+ * Allows to set the settings using a json builder.
+ */
+ public CreateIndexRequest settings(XContentBuilder builder) {
+ try {
+ settings(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate json settings from builder", e);
+ }
+ return this;
+ }
+
+ /**
+ * The settings to create the index with (either json/yaml/properties format)
+ */
+ @SuppressWarnings("unchecked")
+ public CreateIndexRequest settings(Map source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ settings(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public CreateIndexRequest mapping(String type, String source) {
+ mappings.put(type, source);
+ return this;
+ }
+
+ /**
+ * The cause for this index creation.
+ */
+ public CreateIndexRequest cause(String cause) {
+ this.cause = cause;
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public CreateIndexRequest mapping(String type, XContentBuilder source) {
+ try {
+ mappings.put(type, source.string());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
+ }
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ @SuppressWarnings("unchecked")
+ public CreateIndexRequest mapping(String type, Map source) {
+ // wrap it in a type map if its not
+ if (source.size() != 1 || !source.containsKey(type)) {
+ source = MapBuilder.<String, Object>newMapBuilder().put(type, source).map();
+ }
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ return mapping(type, builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ }
+
+ /**
+ * A specialized simplified mapping source method, takes the form of simple properties definition:
+ * ("field1", "type=string,store=true").
+ */
+ public CreateIndexRequest mapping(String type, Object... source) {
+ mapping(type, PutMappingRequest.buildFromSimplifiedDef(type, source));
+ return this;
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequest source(String source) {
+ return source(source.getBytes(Charsets.UTF_8));
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequest source(XContentBuilder source) {
+ return source(source.bytes());
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequest source(byte[] source) {
+ return source(source, 0, source.length);
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequest source(byte[] source, int offset, int length) {
+ return source(new BytesArray(source, offset, length));
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequest source(BytesReference source) {
+ XContentType xContentType = XContentFactory.xContentType(source);
+ if (xContentType != null) {
+ try {
+ source(XContentFactory.xContent(xContentType).createParser(source).mapAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to parse source for create index", e);
+ }
+ } else {
+ settings(new String(source.toBytes(), Charsets.UTF_8));
+ }
+ return this;
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ @SuppressWarnings("unchecked")
+ public CreateIndexRequest source(Map<String, Object> source) {
+ boolean found = false;
+ for (Map.Entry<String, Object> entry : source.entrySet()) {
+ String name = entry.getKey();
+ if (name.equals("settings")) {
+ found = true;
+ settings((Map<String, Object>) entry.getValue());
+ } else if (name.equals("mappings")) {
+ found = true;
+ Map<String, Object> mappings = (Map<String, Object>) entry.getValue();
+ for (Map.Entry<String, Object> entry1 : mappings.entrySet()) {
+ mapping(entry1.getKey(), (Map<String, Object>) entry1.getValue());
+ }
+ } else {
+ // maybe custom?
+ IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name);
+ if (factory != null) {
+ found = true;
+ try {
+ customs.put(name, factory.fromMap((Map<String, Object>) entry.getValue()));
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]");
+ }
+ }
+ }
+ }
+ if (!found) {
+ // the top level are settings, use them
+ settings(source);
+ }
+ return this;
+ }
+
+ Map<String, String> mappings() {
+ return this.mappings;
+ }
+
+ /**
+ * Adds custom metadata to the index to be created.
+ */
+ public CreateIndexRequest custom(IndexMetaData.Custom custom) {
+ customs.put(custom.type(), custom);
+ return this;
+ }
+
+ Map<String, IndexMetaData.Custom> customs() {
+ return this.customs;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ cause = in.readString();
+ index = in.readString();
+ settings = readSettingsFromStream(in);
+ readTimeout(in);
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ mappings.put(in.readString(), in.readString());
+ }
+ int customSize = in.readVInt();
+ for (int i = 0; i < customSize; i++) {
+ String type = in.readString();
+ IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in);
+ customs.put(type, customIndexMetaData);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(cause);
+ out.writeString(index);
+ writeSettingsToStream(settings, out);
+ writeTimeout(out);
+ out.writeVInt(mappings.size());
+ for (Map.Entry<String, String> entry : mappings.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeString(entry.getValue());
+ }
+ out.writeVInt(customs.size());
+ for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
+ out.writeString(entry.getKey());
+ IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java
new file mode 100644
index 0000000..d3a9ae8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.create;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.util.Map;
+
+/**
+ * Builder for a create index request
+ */
+public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<CreateIndexRequest, CreateIndexResponse, CreateIndexRequestBuilder> {
+
+ public CreateIndexRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new CreateIndexRequest());
+ }
+
+ public CreateIndexRequestBuilder(IndicesAdminClient indicesClient, String index) {
+ super((InternalIndicesAdminClient) indicesClient, new CreateIndexRequest(index));
+ }
+
+ /**
+ * Sets the name of the index to be created
+ */
+ public CreateIndexRequestBuilder setIndex(String index) {
+ request.index(index);
+ return this;
+ }
+
+ /**
+ * The settings to create the index with.
+ */
+ public CreateIndexRequestBuilder setSettings(Settings settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * The settings to create the index with.
+ */
+ public CreateIndexRequestBuilder setSettings(Settings.Builder settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Allows to set the settings using a json builder.
+ */
+ public CreateIndexRequestBuilder setSettings(XContentBuilder builder) {
+ request.settings(builder);
+ return this;
+ }
+
+ /**
+ * The settings to create the index with (either json/yaml/properties format)
+ */
+ public CreateIndexRequestBuilder setSettings(String source) {
+ request.settings(source);
+ return this;
+ }
+
+ /**
+ * A simplified version of settings that takes key value pairs settings.
+ */
+ public CreateIndexRequestBuilder setSettings(Object... settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * The settings to create the index with (either json/yaml/properties format)
+ */
+ public CreateIndexRequestBuilder setSettings(Map<String, Object> source) {
+ request.settings(source);
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public CreateIndexRequestBuilder addMapping(String type, String source) {
+ request.mapping(type, source);
+ return this;
+ }
+
+ /**
+ * The cause for this index creation.
+ */
+ public CreateIndexRequestBuilder setCause(String cause) {
+ request.cause(cause);
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public CreateIndexRequestBuilder addMapping(String type, XContentBuilder source) {
+ request.mapping(type, source);
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public CreateIndexRequestBuilder addMapping(String type, Map<String, Object> source) {
+ request.mapping(type, source);
+ return this;
+ }
+
+ /**
+ * A specialized simplified mapping source method, takes the form of simple properties definition:
+ * ("field1", "type=string,store=true").
+ */
+ public CreateIndexRequestBuilder addMapping(String type, Object... source) {
+ request.mapping(type, source);
+ return this;
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequestBuilder setSource(String source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequestBuilder setSource(BytesReference source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequestBuilder setSource(byte[] source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequestBuilder setSource(byte[] source, int offset, int length) {
+ request.source(source, offset, length);
+ return this;
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequestBuilder setSource(Map<String, Object> source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Adds custom metadata to the index to be created.
+ */
+ public CreateIndexRequestBuilder addCustom(IndexMetaData.Custom custom) {
+ request.custom(custom);
+ return this;
+ }
+
+ /**
+ * Sets the settings and mappings as a single source.
+ */
+ public CreateIndexRequestBuilder setSource(XContentBuilder source) {
+ request.source(source);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<CreateIndexResponse> listener) {
+ ((IndicesAdminClient) client).create(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java
new file mode 100644
index 0000000..485f0a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.create;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response for a create index action.
+ */
+public class CreateIndexResponse extends AcknowledgedResponse {
+
+ CreateIndexResponse() {
+ }
+
+ CreateIndexResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java
new file mode 100644
index 0000000..3b013b8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.create;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Create index action.
+ */
+public class TransportCreateIndexAction extends TransportMasterNodeOperationAction<CreateIndexRequest, CreateIndexResponse> {
+
+ private final MetaDataCreateIndexService createIndexService;
+
+ @Inject
+ public TransportCreateIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, MetaDataCreateIndexService createIndexService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.createIndexService = createIndexService;
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return CreateIndexAction.NAME;
+ }
+
+ @Override
+ protected CreateIndexRequest newRequest() {
+ return new CreateIndexRequest();
+ }
+
+ @Override
+ protected CreateIndexResponse newResponse() {
+ return new CreateIndexResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(CreateIndexRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, request.index());
+ }
+
+ @Override
+ protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener<CreateIndexResponse> listener) throws ElasticsearchException {
+ String cause = request.cause();
+ if (cause.length() == 0) {
+ cause = "api";
+ }
+
+ CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(cause, request.index())
+ .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
+ .settings(request.settings()).mappings(request.mappings())
+ .customs(request.customs());
+
+ createIndexService.createIndex(updateRequest, new ClusterStateUpdateListener() {
+
+ @Override
+ public void onResponse(ClusterStateUpdateResponse response) {
+ listener.onResponse(new CreateIndexResponse(response.isAcknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ if (t instanceof IndexAlreadyExistsException) {
+ logger.trace("[{}] failed to create", t, request.index());
+ } else {
+ logger.debug("[{}] failed to create", t, request.index());
+ }
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/create/package-info.java
new file mode 100644
index 0000000..7114dc6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/create/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Create index action.
+ */
+package org.elasticsearch.action.admin.indices.create; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java
new file mode 100644
index 0000000..7d35afb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.delete;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class DeleteIndexAction extends IndicesAction<DeleteIndexRequest, DeleteIndexResponse, DeleteIndexRequestBuilder> {
+
+ public static final DeleteIndexAction INSTANCE = new DeleteIndexAction();
+ public static final String NAME = "indices/delete";
+
+ private DeleteIndexAction() {
+ super(NAME);
+ }
+
+ @Override
+ public DeleteIndexResponse newResponse() {
+ return new DeleteIndexResponse();
+ }
+
+ @Override
+ public DeleteIndexRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new DeleteIndexRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java
new file mode 100644
index 0000000..6164aae
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.delete;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
+
+/**
+ * A request to delete an index. Best created with {@link org.elasticsearch.client.Requests#deleteIndexRequest(String)}.
+ */
+public class DeleteIndexRequest extends MasterNodeOperationRequest<DeleteIndexRequest> {
+
+ private String[] indices;
+ // Delete index should work by default on both open and closed indices.
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true);
+ private TimeValue timeout = AcknowledgedRequest.DEFAULT_ACK_TIMEOUT;
+
+ DeleteIndexRequest() {
+ }
+
+ /**
+ * Constructs a new delete index request for the specified index.
+ */
+ public DeleteIndexRequest(String index) {
+ this.indices = new String[]{index};
+ }
+
+ public DeleteIndexRequest(String... indices) {
+ this.indices = indices;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public DeleteIndexRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (indices == null || indices.length == 0) {
+ validationException = addValidationError("index / indices is missing", validationException);
+ }
+ return validationException;
+ }
+
+ public DeleteIndexRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * The index to delete.
+ */
+ String[] indices() {
+ return indices;
+ }
+
+ /**
+ * Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
+ * to <tt>10s</tt>.
+ */
+ public TimeValue timeout() {
+ return timeout;
+ }
+
+ /**
+ * Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
+ * to <tt>10s</tt>.
+ */
+ public DeleteIndexRequest timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return this;
+ }
+
+ /**
+ * Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
+ * to <tt>10s</tt>.
+ */
+ public DeleteIndexRequest timeout(String timeout) {
+ return timeout(TimeValue.parseTimeValue(timeout, null));
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ timeout = readTimeValue(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(indices);
+ indicesOptions.writeIndicesOptions(out);
+ timeout.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java
new file mode 100644
index 0000000..338f2fc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.delete;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class DeleteIndexRequestBuilder extends MasterNodeOperationRequestBuilder<DeleteIndexRequest, DeleteIndexResponse, DeleteIndexRequestBuilder> {
+
+ public DeleteIndexRequestBuilder(IndicesAdminClient indicesClient, String... indices) {
+ super((InternalIndicesAdminClient) indicesClient, new DeleteIndexRequest(indices));
+ }
+
+ /**
+ * Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
+ * to <tt>60s</tt>.
+ */
+ public DeleteIndexRequestBuilder setTimeout(TimeValue timeout) {
+ request.timeout(timeout);
+ return this;
+ }
+
+ /**
+ * Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
+ * to <tt>10s</tt>.
+ */
+ public DeleteIndexRequestBuilder setTimeout(String timeout) {
+ request.timeout(timeout);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ *
+ * For example indices that don't exist.
+ */
+ public DeleteIndexRequestBuilder setIndicesOptions(IndicesOptions options) {
+ request.indicesOptions(options);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<DeleteIndexResponse> listener) {
+ ((IndicesAdminClient) client).delete(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java
new file mode 100644
index 0000000..509686d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.delete;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response for a delete index action.
+ */
+public class DeleteIndexResponse extends AcknowledgedResponse {
+
+ DeleteIndexResponse() {
+ }
+
+ DeleteIndexResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
new file mode 100644
index 0000000..032171c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.delete;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.CountDown;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Delete index action.
+ */
+public class TransportDeleteIndexAction extends TransportMasterNodeOperationAction<DeleteIndexRequest, DeleteIndexResponse> {
+
+ private final MetaDataDeleteIndexService deleteIndexService;
+ private final DestructiveOperations destructiveOperations;
+
+ @Inject
+ public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService,
+ NodeSettingsService nodeSettingsService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.deleteIndexService = deleteIndexService;
+ this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteIndexAction.NAME;
+ }
+
+ @Override
+ protected DeleteIndexRequest newRequest() {
+ return new DeleteIndexRequest();
+ }
+
+ @Override
+ protected DeleteIndexResponse newResponse() {
+ return new DeleteIndexResponse();
+ }
+
+ @Override
+ protected void doExecute(DeleteIndexRequest request, ActionListener<DeleteIndexResponse> listener) {
+ destructiveOperations.failDestructive(request.indices());
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterState state) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
+ }
+
+ @Override
+ protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) throws ElasticsearchException {
+ request.indices(state.metaData().concreteIndices(request.indices(), request.indicesOptions()));
+ if (request.indices().length == 0) {
+ listener.onResponse(new DeleteIndexResponse(true));
+ return;
+ }
+ // TODO: this API should be improved, currently, if one delete index failed, we send a failure, we should send a response array that includes all the indices that were deleted
+ final CountDown count = new CountDown(request.indices().length);
+ for (final String index : request.indices()) {
+ deleteIndexService.deleteIndex(new MetaDataDeleteIndexService.Request(index).timeout(request.timeout()).masterTimeout(request.masterNodeTimeout()), new MetaDataDeleteIndexService.Listener() {
+
+ private volatile Throwable lastFailure;
+ private volatile boolean ack = true;
+
+ @Override
+ public void onResponse(MetaDataDeleteIndexService.Response response) {
+ if (!response.acknowledged()) {
+ ack = false;
+ }
+ if (count.countDown()) {
+ if (lastFailure != null) {
+ listener.onFailure(lastFailure);
+ } else {
+ listener.onResponse(new DeleteIndexResponse(ack));
+ }
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.debug("[{}] failed to delete index", t, index);
+ lastFailure = t;
+ if (count.countDown()) {
+ listener.onFailure(t);
+ }
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/package-info.java
new file mode 100644
index 0000000..94ac828
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Delete index action.
+ */
+package org.elasticsearch.action.admin.indices.delete; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java
new file mode 100644
index 0000000..e4a107f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.exists.indices;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class IndicesExistsAction extends IndicesAction<IndicesExistsRequest, IndicesExistsResponse, IndicesExistsRequestBuilder> {
+
+ public static final IndicesExistsAction INSTANCE = new IndicesExistsAction();
+ public static final String NAME = "indices/exists";
+
+ private IndicesExistsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public IndicesExistsResponse newResponse() {
+ return new IndicesExistsResponse();
+ }
+
+ @Override
+ public IndicesExistsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new IndicesExistsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java
new file mode 100644
index 0000000..07d5a2e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.exists.indices;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+public class IndicesExistsRequest extends MasterNodeReadOperationRequest<IndicesExistsRequest> {
+
+ private String[] indices = Strings.EMPTY_ARRAY;
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
+
+ public IndicesExistsRequest(String... indices) {
+ this.indices = indices;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public IndicesExistsRequest indices(String[] indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public IndicesExistsRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (indices == null || indices.length == 0) {
+ validationException = addValidationError("index/indices is missing", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ readLocal(in, Version.V_1_0_0_RC2);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(indices);
+ indicesOptions.writeIndicesOptions(out);
+ writeLocal(out, Version.V_1_0_0_RC2);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java
new file mode 100644
index 0000000..d75bad1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.exists.indices;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class IndicesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder<IndicesExistsRequest, IndicesExistsResponse, IndicesExistsRequestBuilder> {
+
+ public IndicesExistsRequestBuilder(IndicesAdminClient indicesClient, String... indices) {
+ super((InternalIndicesAdminClient) indicesClient, new IndicesExistsRequest(indices));
+ }
+
+ public IndicesExistsRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ *
+ * For example indices that don't exist.
+ */
+ public IndicesExistsRequestBuilder setIndicesOptions(IndicesOptions options) {
+ request.indicesOptions(options);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<IndicesExistsResponse> listener) {
+ ((IndicesAdminClient) client).exists(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsResponse.java
new file mode 100644
index 0000000..82d9828
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.exists.indices;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+public class IndicesExistsResponse extends ActionResponse {
+
+ private boolean exists;
+
+ IndicesExistsResponse() {
+ }
+
+ public IndicesExistsResponse(boolean exists) {
+ this.exists = exists;
+ }
+
+ public boolean isExists() {
+ return this.exists;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ exists = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(exists);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java
new file mode 100644
index 0000000..d06a77a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.exists.indices;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Indices exists action.
+ */
+public class TransportIndicesExistsAction extends TransportMasterNodeReadOperationAction<IndicesExistsRequest, IndicesExistsResponse> {
+
+ @Inject
+ public TransportIndicesExistsAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String executor() {
+ // lightweight in memory check
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return IndicesExistsAction.NAME;
+ }
+
+ @Override
+ protected IndicesExistsRequest newRequest() {
+ return new IndicesExistsRequest();
+ }
+
+ @Override
+ protected IndicesExistsResponse newResponse() {
+ return new IndicesExistsResponse();
+ }
+
+ @Override
+ protected void doExecute(IndicesExistsRequest request, ActionListener<IndicesExistsResponse> listener) {
+ // don't call this since it will throw IndexMissingException
+ //request.indices(clusterService.state().metaData().concreteIndices(request.indices()));
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
+ }
+
+ @Override
+ protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener<IndicesExistsResponse> listener) throws ElasticsearchException {
+ boolean exists;
+ try {
+ // Similar as the previous behaviour, but now also aliases and wildcards are supported.
+ clusterService.state().metaData().concreteIndices(request.indices(), request.indicesOptions());
+ exists = true;
+ } catch (IndexMissingException e) {
+ exists = false;
+ }
+ listener.onResponse(new IndicesExistsResponse(exists));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java
new file mode 100644
index 0000000..b172e9b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.exists.types;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Types exists transport action.
+ */
+public class TransportTypesExistsAction extends TransportMasterNodeReadOperationAction<TypesExistsRequest, TypesExistsResponse> {
+
+ @Inject
+ public TransportTypesExistsAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String executor() {
+ // lightweight check
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return TypesExistsAction.NAME;
+ }
+
+ @Override
+ protected TypesExistsRequest newRequest() {
+ return new TypesExistsRequest();
+ }
+
+ @Override
+ protected TypesExistsResponse newResponse() {
+ return new TypesExistsResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(TypesExistsRequest request, ClusterState state) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
+ }
+
+ @Override
+ protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener<TypesExistsResponse> listener) throws ElasticsearchException {
+ String[] concreteIndices = state.metaData().concreteIndices(request.indices(), request.indicesOptions());
+ if (concreteIndices.length == 0) {
+ listener.onResponse(new TypesExistsResponse(false));
+ return;
+ }
+
+ for (String concreteIndex : concreteIndices) {
+ if (!state.metaData().hasConcreteIndex(concreteIndex)) {
+ listener.onResponse(new TypesExistsResponse(false));
+ return;
+ }
+
+ ImmutableOpenMap<String, MappingMetaData> mappings = state.metaData().getIndices().get(concreteIndex).mappings();
+ if (mappings.isEmpty()) {
+ listener.onResponse(new TypesExistsResponse(false));
+ return;
+ }
+
+ for (String type : request.types()) {
+ if (!mappings.containsKey(type)) {
+ listener.onResponse(new TypesExistsResponse(false));
+ return;
+ }
+ }
+ }
+
+ listener.onResponse(new TypesExistsResponse(true));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java
new file mode 100644
index 0000000..7150c22
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.exists.types;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class TypesExistsAction extends IndicesAction<TypesExistsRequest, TypesExistsResponse, TypesExistsRequestBuilder> {
+
+ public static final TypesExistsAction INSTANCE = new TypesExistsAction();
+ public static final String NAME = "indices/types/exists";
+
+ private TypesExistsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public TypesExistsResponse newResponse() {
+ return new TypesExistsResponse();
+ }
+
+ @Override
+ public TypesExistsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new TypesExistsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequest.java
new file mode 100644
index 0000000..027ef5d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequest.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.exists.types;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ */
+public class TypesExistsRequest extends MasterNodeReadOperationRequest<TypesExistsRequest> {
+
+ private String[] indices;
+ private String[] types;
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ TypesExistsRequest() {
+ }
+
+ public TypesExistsRequest(String[] indices, String... types) {
+ this.indices = indices;
+ this.types = types;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public void indices(String[] indices) {
+ this.indices = indices;
+ }
+
+ public String[] types() {
+ return types;
+ }
+
+ public void types(String[] types) {
+ this.types = types;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public TypesExistsRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (indices == null) { // Specifying '*' via rest api results in an empty array
+ validationException = addValidationError("index/indices is missing", validationException);
+ }
+ if (types == null || types.length == 0) {
+ validationException = addValidationError("type/types is missing", validationException);
+ }
+
+ return validationException;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(indices);
+ out.writeStringArray(types);
+ indicesOptions.writeIndicesOptions(out);
+ writeLocal(out, Version.V_1_0_0_RC2);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ types = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ readLocal(in, Version.V_1_0_0_RC2);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java
new file mode 100644
index 0000000..89d48e7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.exists.types;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+import org.elasticsearch.common.Strings;
+
+/**
+ * A builder for {@link TypesExistsRequest}.
+ */
+public class TypesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder<TypesExistsRequest, TypesExistsResponse, TypesExistsRequestBuilder> {
+
+ /**
+ * @param indices What indices to check for types
+ */
+ public TypesExistsRequestBuilder(IndicesAdminClient indicesClient, String... indices) {
+ super((InternalIndicesAdminClient) indicesClient, new TypesExistsRequest(indices, Strings.EMPTY_ARRAY));
+ }
+
+ TypesExistsRequestBuilder(IndicesAdminClient client) {
+ super((InternalIndicesAdminClient) client, new TypesExistsRequest());
+ }
+
+ /**
+ * @param indices What indices to check for types
+ */
+ public TypesExistsRequestBuilder setIndices(String[] indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * @param types The types to check if they exist
+ */
+ public TypesExistsRequestBuilder setTypes(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ /**
+ * @param indicesOptions Specifies how to resolve indices that aren't active / ready and indices wildcard expressions
+ */
+ public TypesExistsRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request.indicesOptions(indicesOptions);
+ return this;
+ }
+
+ protected void doExecute(ActionListener<TypesExistsResponse> listener) {
+ ((IndicesAdminClient) client).typesExists(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsResponse.java
new file mode 100644
index 0000000..44dd8d9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.exists.types;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Whether all of the existed types exist.
+ */
+public class TypesExistsResponse extends ActionResponse {
+
+ private boolean exists;
+
+ TypesExistsResponse() {
+ }
+
+ public TypesExistsResponse(boolean exists) {
+ this.exists = exists;
+ }
+
+ public boolean isExists() {
+ return this.exists;
+ }
+
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ exists = in.readBoolean();
+ }
+
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(exists);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java
new file mode 100644
index 0000000..c78fa59
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class FlushAction extends IndicesAction<FlushRequest, FlushResponse, FlushRequestBuilder> {
+
+ public static final FlushAction INSTANCE = new FlushAction();
+ public static final String NAME = "indices/flush";
+
+ private FlushAction() {
+ super(NAME);
+ }
+
+ @Override
+ public FlushResponse newResponse() {
+ return new FlushResponse();
+ }
+
+ @Override
+ public FlushRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new FlushRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java
new file mode 100644
index 0000000..9deadc1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A flush request to flush one or more indices. The flush process of an index basically frees memory from the index
+ * by flushing data to the index storage and clearing the internal transaction log. By default, Elasticsearch uses
+ * memory heuristics in order to automatically trigger flush operations as required in order to clear memory.
+ * <p/>
+ * <p>Best created with {@link org.elasticsearch.client.Requests#flushRequest(String...)}.
+ *
+ * @see org.elasticsearch.client.Requests#flushRequest(String...)
+ * @see org.elasticsearch.client.IndicesAdminClient#flush(FlushRequest)
+ * @see FlushResponse
+ */
+public class FlushRequest extends BroadcastOperationRequest<FlushRequest> {
+
+ private boolean force = false;
+ private boolean full = false;
+
+ FlushRequest() {
+
+ }
+
+ /**
+ * Constructs a new flush request against one or more indices. If nothing is provided, all indices will
+ * be flushed.
+ */
+ public FlushRequest(String... indices) {
+ super(indices);
+ }
+
+ /**
+ * Should a "full" flush be performed.
+ */
+ public boolean full() {
+ return this.full;
+ }
+
+ /**
+ * Should a "full" flush be performed.
+ */
+ public FlushRequest full(boolean full) {
+ this.full = full;
+ return this;
+ }
+
+ /**
+ * Force flushing, even if one is possibly not needed.
+ */
+ public boolean force() {
+ return force;
+ }
+
+ /**
+ * Force flushing, even if one is possibly not needed.
+ */
+ public FlushRequest force(boolean force) {
+ this.force = force;
+ return this;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(full);
+ out.writeBoolean(force);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ full = in.readBoolean();
+ force = in.readBoolean();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java
new file mode 100644
index 0000000..cee4b46
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class FlushRequestBuilder extends BroadcastOperationRequestBuilder<FlushRequest, FlushResponse, FlushRequestBuilder> {
+
+ public FlushRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new FlushRequest());
+ }
+
+ public FlushRequestBuilder setFull(boolean full) {
+ request.full(full);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<FlushResponse> listener) {
+ ((IndicesAdminClient) client).flush(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java
new file mode 100644
index 0000000..c4c52e7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * A response to flush action.
+ *
+ *
+ */
+public class FlushResponse extends BroadcastOperationResponse {
+
+ FlushResponse() {
+
+ }
+
+ FlushResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java
new file mode 100644
index 0000000..f98ea26
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardFlushRequest extends BroadcastShardOperationRequest {
+
+ private boolean full;
+ private boolean force;
+
+ ShardFlushRequest() {
+ }
+
+ public ShardFlushRequest(String index, int shardId, FlushRequest request) {
+ super(index, shardId, request);
+ this.full = request.full();
+ this.force = request.force();
+ }
+
+ public boolean full() {
+ return this.full;
+ }
+
+ public boolean force() {
+ return this.force;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ full = in.readBoolean();
+ force = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(full);
+ out.writeBoolean(force);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java
new file mode 100644
index 0000000..1f234ae
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardFlushResponse extends BroadcastShardOperationResponse {
+
+ ShardFlushResponse() {
+
+ }
+
+ public ShardFlushResponse(String index, int shardId) {
+ super(index, shardId);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java
new file mode 100644
index 0000000..02dc414
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * Flush Action.
+ */
+public class TransportFlushAction extends TransportBroadcastOperationAction<FlushRequest, FlushResponse, ShardFlushRequest, ShardFlushResponse> {
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportFlushAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.FLUSH;
+ }
+
+ @Override
+ protected String transportAction() {
+ return FlushAction.NAME;
+ }
+
+ @Override
+ protected FlushRequest newRequest() {
+ return new FlushRequest();
+ }
+
+ @Override
+ protected FlushResponse newResponse(FlushRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ List<ShardOperationFailedException> shardFailures = null;
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // a non active shard, ignore
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ successfulShards++;
+ }
+ }
+ return new FlushResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected ShardFlushRequest newShardRequest() {
+ return new ShardFlushRequest();
+ }
+
+ @Override
+ protected ShardFlushRequest newShardRequest(ShardRouting shard, FlushRequest request) {
+ return new ShardFlushRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected ShardFlushResponse newShardResponse() {
+ return new ShardFlushResponse();
+ }
+
+ @Override
+ protected ShardFlushResponse shardOperation(ShardFlushRequest request) throws ElasticsearchException {
+ IndexShard indexShard = indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
+ indexShard.flush(new Engine.Flush().type(request.full() ? Engine.Flush.Type.NEW_WRITER : Engine.Flush.Type.COMMIT_TRANSLOG).force(request.force()));
+ return new ShardFlushResponse(request.index(), request.shardId());
+ }
+
+ /**
+ * The refresh request works against *all* shards.
+ */
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, FlushRequest request, String[] concreteIndices) {
+ return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, FlushRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, FlushRequest countRequest, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/package-info.java
new file mode 100644
index 0000000..c398480
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Flush index/indices action.
+ */
+package org.elasticsearch.action.admin.indices.flush; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/gateway/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/gateway/package-info.java
new file mode 100644
index 0000000..aaa86ff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/gateway/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Indices Gateway Administrative Actions.
+ */
+package org.elasticsearch.action.admin.indices.gateway; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotAction.java
new file mode 100644
index 0000000..ade6ecf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotAction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.gateway.snapshot;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ * @deprecated Use snapshot/restore API instead
+ */
+@Deprecated
+public class GatewaySnapshotAction extends IndicesAction<GatewaySnapshotRequest, GatewaySnapshotResponse, GatewaySnapshotRequestBuilder> {
+
+ public static final GatewaySnapshotAction INSTANCE = new GatewaySnapshotAction();
+ public static final String NAME = "indices/gateway/snapshot";
+
+ private GatewaySnapshotAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GatewaySnapshotResponse newResponse() {
+ return new GatewaySnapshotResponse();
+ }
+
+ @Override
+ public GatewaySnapshotRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new GatewaySnapshotRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequest.java
new file mode 100644
index 0000000..978f9fd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequest.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.gateway.snapshot;
+
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+
+/**
+ * Gateway snapshot allows to explicitly perform a snapshot through the gateway of one or more indices (backup them).
+ * By default, each index gateway periodically snapshot changes, though it can be disabled and be controlled completely
+ * through this API. Best created using {@link org.elasticsearch.client.Requests#gatewaySnapshotRequest(String...)}.
+ *
+ * @see org.elasticsearch.client.Requests#gatewaySnapshotRequest(String...)
+ * @see org.elasticsearch.client.IndicesAdminClient#gatewaySnapshot(GatewaySnapshotRequest)
+ * @see GatewaySnapshotResponse
+ * @deprecated Use snapshot/restore API instead
+ */
+@Deprecated
+public class GatewaySnapshotRequest extends BroadcastOperationRequest<GatewaySnapshotRequest> {
+
+ GatewaySnapshotRequest() {
+
+ }
+
+ /**
+ * Constructs a new gateway snapshot against one or more indices. No indices means the gateway snapshot
+ * will be executed against all indices.
+ */
+ public GatewaySnapshotRequest(String... indices) {
+ this.indices = indices;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequestBuilder.java
new file mode 100644
index 0000000..1afc0e1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequestBuilder.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.gateway.snapshot;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ * @deprecated Use snapshot/restore API instead
+ */
+@Deprecated
+
+public class GatewaySnapshotRequestBuilder extends BroadcastOperationRequestBuilder<GatewaySnapshotRequest, GatewaySnapshotResponse, GatewaySnapshotRequestBuilder> {
+
+ public GatewaySnapshotRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new GatewaySnapshotRequest());
+ }
+
+ @Override
+ protected void doExecute(ActionListener<GatewaySnapshotResponse> listener) {
+ ((IndicesAdminClient) client).gatewaySnapshot(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotResponse.java
new file mode 100644
index 0000000..9e50645
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotResponse.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.gateway.snapshot;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Response for the gateway snapshot action.
+ *
+ * @deprecated Use snapshot/restore API instead
+ */
+@Deprecated
+public class GatewaySnapshotResponse extends BroadcastOperationResponse {
+
+ GatewaySnapshotResponse() {
+
+ }
+
+ GatewaySnapshotResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotRequest.java
new file mode 100644
index 0000000..dadd729
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.gateway.snapshot;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardGatewaySnapshotRequest extends BroadcastShardOperationRequest {
+
+ ShardGatewaySnapshotRequest() {
+ }
+
+ public ShardGatewaySnapshotRequest(String index, int shardId, GatewaySnapshotRequest request) {
+ super(index, shardId, request);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotResponse.java
new file mode 100644
index 0000000..21e21b1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotResponse.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.gateway.snapshot;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardGatewaySnapshotResponse extends BroadcastShardOperationResponse {
+
+ ShardGatewaySnapshotResponse() {
+ }
+
+ public ShardGatewaySnapshotResponse(String index, int shardId) {
+ super(index, shardId);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportGatewaySnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportGatewaySnapshotAction.java
new file mode 100644
index 0000000..d9b101d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportGatewaySnapshotAction.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.gateway.snapshot;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.gateway.IndexShardGatewayService;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ * @deprecated use Snapshot/Restore API instead
+ */
+@Deprecated
+public class TransportGatewaySnapshotAction extends TransportBroadcastOperationAction<GatewaySnapshotRequest, GatewaySnapshotResponse, ShardGatewaySnapshotRequest, ShardGatewaySnapshotResponse> {
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportGatewaySnapshotAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ TransportService transportService, IndicesService indicesService) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SNAPSHOT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return GatewaySnapshotAction.NAME;
+ }
+
+ @Override
+ protected GatewaySnapshotRequest newRequest() {
+ return new GatewaySnapshotRequest();
+ }
+
+ @Override
+ protected GatewaySnapshotResponse newResponse(GatewaySnapshotRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ List<ShardOperationFailedException> shardFailures = null;
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // non active shard, ignore
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = Lists.newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ successfulShards++;
+ }
+ }
+ return new GatewaySnapshotResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected ShardGatewaySnapshotRequest newShardRequest() {
+ return new ShardGatewaySnapshotRequest();
+ }
+
+ @Override
+ protected ShardGatewaySnapshotRequest newShardRequest(ShardRouting shard, GatewaySnapshotRequest request) {
+ return new ShardGatewaySnapshotRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected ShardGatewaySnapshotResponse newShardResponse() {
+ return new ShardGatewaySnapshotResponse();
+ }
+
+ @Override
+ protected ShardGatewaySnapshotResponse shardOperation(ShardGatewaySnapshotRequest request) throws ElasticsearchException {
+ IndexShardGatewayService shardGatewayService = indicesService.indexServiceSafe(request.index())
+ .shardInjectorSafe(request.shardId()).getInstance(IndexShardGatewayService.class);
+ shardGatewayService.snapshot("api");
+ return new ShardGatewaySnapshotResponse(request.index(), request.shardId());
+ }
+
+ /**
+ * The snapshot request works against all primary shards.
+ */
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, GatewaySnapshotRequest request, String[] concreteIndices) {
+ return clusterState.routingTable().activePrimaryShardsGrouped(concreteIndices, true);
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, GatewaySnapshotRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, GatewaySnapshotRequest request, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/package-info.java
new file mode 100644
index 0000000..7d50ee3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * GAteway Snapshot Action.
+ * @deprecated Use snapshot/restore API instead
+ */
+package org.elasticsearch.action.admin.indices.gateway.snapshot; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingAction.java
new file mode 100644
index 0000000..e6b8b8d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.delete;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class DeleteMappingAction extends IndicesAction<DeleteMappingRequest, DeleteMappingResponse, DeleteMappingRequestBuilder> {
+
+ public static final DeleteMappingAction INSTANCE = new DeleteMappingAction();
+ public static final String NAME = "indices/mapping/delete";
+
+ private DeleteMappingAction() {
+ super(NAME);
+ }
+
+ @Override
+ public DeleteMappingResponse newResponse() {
+ return new DeleteMappingResponse();
+ }
+
+ @Override
+ public DeleteMappingRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new DeleteMappingRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingClusterStateUpdateRequest.java
new file mode 100644
index 0000000..5a2ef82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingClusterStateUpdateRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.delete;
+
+import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest;
+
+/**
+ * Cluster state update request that allows to delete a mapping
+ */
+public class DeleteMappingClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<DeleteMappingClusterStateUpdateRequest> {
+
+ private String[] types;
+
+ DeleteMappingClusterStateUpdateRequest() {
+
+ }
+
+ /**
+ * Returns the type to be removed
+ */
+ public String[] types() {
+ return types;
+ }
+
+ /**
+ * Sets the type to be removed
+ */
+ public DeleteMappingClusterStateUpdateRequest types(String[] types) {
+ this.types = types;
+ return this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingRequest.java
new file mode 100644
index 0000000..e023b88
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingRequest.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.delete;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.CollectionUtils;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Represents a request to delete a mapping
+ */
+public class DeleteMappingRequest extends AcknowledgedRequest<DeleteMappingRequest> {
+
+ private String[] indices;
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
+ private String[] types;
+
+ DeleteMappingRequest() {
+ }
+
+ /**
+ * Constructs a new delete mapping request against one or more indices. If nothing is set then
+ * it will be executed against all indices.
+ */
+ public DeleteMappingRequest(String... indices) {
+ this.indices = indices;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (CollectionUtils.isEmpty(types)) {
+ validationException = addValidationError("mapping type is missing", validationException);
+ } else {
+ validationException = checkForEmptyString(validationException, types);
+ }
+ if (CollectionUtils.isEmpty(indices)) {
+ validationException = addValidationError("index is missing", validationException);
+ } else {
+ validationException = checkForEmptyString(validationException, indices);
+ }
+
+ return validationException;
+ }
+
+ private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
+ boolean containsEmptyString = false;
+ for (String string : strings) {
+ if (!Strings.hasText(string)) {
+ containsEmptyString = true;
+ }
+ }
+ if (containsEmptyString) {
+ validationException = addValidationError("types must not contain empty strings", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets the indices this delete mapping operation will execute on.
+ */
+ public DeleteMappingRequest indices(String[] indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * The indices the mappings will be removed from.
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public DeleteMappingRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ /**
+ * The mapping types.
+ */
+ public String[] types() {
+ return types;
+ }
+
+ /**
+ * The type of the mappings to remove.
+ */
+ public DeleteMappingRequest types(String... types) {
+ this.types = types;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ types = in.readStringArray();
+ readTimeout(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArrayNullable(indices);
+ indicesOptions.writeIndicesOptions(out);
+ out.writeStringArrayNullable(types);
+ writeTimeout(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingRequestBuilder.java
new file mode 100644
index 0000000..eb1abfb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingRequestBuilder.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.delete;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ * Builder for a delete mapping request
+ */
+public class DeleteMappingRequestBuilder extends AcknowledgedRequestBuilder<DeleteMappingRequest, DeleteMappingResponse, DeleteMappingRequestBuilder> {
+
+ public DeleteMappingRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new DeleteMappingRequest());
+ }
+
+ /**
+ * Sets the indices the delete mapping will execute on
+ */
+ public DeleteMappingRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * Sets the type of the mapping to remove
+ */
+ public DeleteMappingRequestBuilder setType(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ *
+ * For example indices that don't exist.
+ */
+ public DeleteMappingRequestBuilder setIndicesOptions(IndicesOptions options) {
+ request.indicesOptions(options);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<DeleteMappingResponse> listener) {
+ ((IndicesAdminClient) client).deleteMapping(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingResponse.java
new file mode 100644
index 0000000..e1498ca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/DeleteMappingResponse.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.delete;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * The response of remove mapping operation.
+ */
+public class DeleteMappingResponse extends AcknowledgedResponse {
+
+ DeleteMappingResponse() {
+
+ }
+
+ DeleteMappingResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/TransportDeleteMappingAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/TransportDeleteMappingAction.java
new file mode 100644
index 0000000..f55e714
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/delete/TransportDeleteMappingAction.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.delete;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.flush.TransportFlushAction;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaDataMappingService;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.BoolFilterBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.TypeFilterBuilder;
+import org.elasticsearch.indices.TypeMissingException;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Delete mapping action.
+ */
+public class TransportDeleteMappingAction extends TransportMasterNodeOperationAction<DeleteMappingRequest, DeleteMappingResponse> {
+
+ private final MetaDataMappingService metaDataMappingService;
+ private final TransportFlushAction flushAction;
+ private final TransportDeleteByQueryAction deleteByQueryAction;
+ private final TransportRefreshAction refreshAction;
+ private final DestructiveOperations destructiveOperations;
+
+ @Inject
+ public TransportDeleteMappingAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, MetaDataMappingService metaDataMappingService,
+ TransportDeleteByQueryAction deleteByQueryAction, TransportRefreshAction refreshAction,
+ TransportFlushAction flushAction, NodeSettingsService nodeSettingsService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.metaDataMappingService = metaDataMappingService;
+ this.deleteByQueryAction = deleteByQueryAction;
+ this.refreshAction = refreshAction;
+ this.flushAction = flushAction;
+ this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
+ }
+
+ @Override
+ protected String executor() {
+ // no need for fork on another thread pool, we go async right away
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteMappingAction.NAME;
+ }
+
+ @Override
+ protected DeleteMappingRequest newRequest() {
+ return new DeleteMappingRequest();
+ }
+
+ @Override
+ protected DeleteMappingResponse newResponse() {
+ return new DeleteMappingResponse();
+ }
+
+ @Override
+ protected void doExecute(DeleteMappingRequest request, ActionListener<DeleteMappingResponse> listener) {
+ destructiveOperations.failDestructive(request.indices());
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(DeleteMappingRequest request, ClusterState state) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
+ }
+
+ @Override
+ protected void masterOperation(final DeleteMappingRequest request, final ClusterState state, final ActionListener<DeleteMappingResponse> listener) throws ElasticsearchException {
+ request.indices(state.metaData().concreteIndices(request.indices(), request.indicesOptions()));
+ flushAction.execute(Requests.flushRequest(request.indices()), new ActionListener<FlushResponse>() {
+ @Override
+ public void onResponse(FlushResponse flushResponse) {
+
+ // get all types that need to be deleted.
+ ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = clusterService.state().metaData().findMappings(
+ request.indices(), request.types()
+ );
+ // create OrFilter with type filters within to account for different types
+ BoolFilterBuilder filterBuilder = new BoolFilterBuilder();
+ Set<String> types = new HashSet<String>();
+ for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> typesMeta : result) {
+ for (ObjectObjectCursor<String, MappingMetaData> type : typesMeta.value) {
+ filterBuilder.should(new TypeFilterBuilder(type.key));
+ types.add(type.key);
+ }
+ }
+ if (types.size() == 0) {
+ throw new TypeMissingException(new Index("_all"), request.types(), "No index has the type.");
+ }
+ request.types(types.toArray(new String[types.size()]));
+ QuerySourceBuilder querySourceBuilder = new QuerySourceBuilder()
+ .setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), filterBuilder));
+ deleteByQueryAction.execute(Requests.deleteByQueryRequest(request.indices()).source(querySourceBuilder), new ActionListener<DeleteByQueryResponse>() {
+ @Override
+ public void onResponse(DeleteByQueryResponse deleteByQueryResponse) {
+ refreshAction.execute(Requests.refreshRequest(request.indices()), new ActionListener<RefreshResponse>() {
+ @Override
+ public void onResponse(RefreshResponse refreshResponse) {
+ removeMapping();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ removeMapping();
+ }
+
+ protected void removeMapping() {
+ DeleteMappingClusterStateUpdateRequest clusterStateUpdateRequest = new DeleteMappingClusterStateUpdateRequest()
+ .indices(request.indices()).types(request.types())
+ .ackTimeout(request.timeout())
+ .masterNodeTimeout(request.masterNodeTimeout());
+
+ metaDataMappingService.removeMapping(clusterStateUpdateRequest, new ClusterStateUpdateListener() {
+ @Override
+ public void onResponse(ClusterStateUpdateResponse response) {
+ listener.onResponse(new DeleteMappingResponse(response.isAcknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ listener.onFailure(t);
+ }
+ });
+ }
+ });
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ listener.onFailure(t);
+ }
+ });
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java
new file mode 100644
index 0000000..721b1de
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public class GetFieldMappingsAction extends IndicesAction<GetFieldMappingsRequest, GetFieldMappingsResponse, GetFieldMappingsRequestBuilder> {
+
+ public static final GetFieldMappingsAction INSTANCE = new GetFieldMappingsAction();
+ public static final String NAME = "mappings/fields/get";
+
+ private GetFieldMappingsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GetFieldMappingsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new GetFieldMappingsRequestBuilder((InternalGenericClient) client);
+ }
+
+ @Override
+ public GetFieldMappingsResponse newResponse() {
+ return new GetFieldMappingsResponse();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java
new file mode 100644
index 0000000..d9974f7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.single.custom.SingleCustomOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+class GetFieldMappingsIndexRequest extends SingleCustomOperationRequest<GetFieldMappingsIndexRequest> {
+
+ private String index;
+
+ private boolean probablySingleFieldRequest;
+ private boolean includeDefaults;
+ private String[] fields = Strings.EMPTY_ARRAY;
+ private String[] types = Strings.EMPTY_ARRAY;
+
+ GetFieldMappingsIndexRequest() {
+ }
+
+ GetFieldMappingsIndexRequest(GetFieldMappingsRequest other, String index, boolean probablySingleFieldRequest) {
+ this.preferLocal(other.local);
+ this.probablySingleFieldRequest = probablySingleFieldRequest;
+ this.includeDefaults = other.includeDefaults();
+ this.types = other.types();
+ this.fields = other.fields();
+ this.index = index;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public String[] types() {
+ return types;
+ }
+
+ public String[] fields() {
+ return fields;
+ }
+
+ public boolean probablySingleFieldRequest() {
+ return probablySingleFieldRequest;
+ }
+
+ public boolean includeDefaults() {
+ return includeDefaults;
+ }
+
+ /** Indicates whether default mapping settings should be returned */
+ public GetFieldMappingsIndexRequest includeDefaults(boolean includeDefaults) {
+ this.includeDefaults = includeDefaults;
+ return this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeStringArray(types);
+ out.writeStringArray(fields);
+ out.writeBoolean(includeDefaults);
+ out.writeBoolean(probablySingleFieldRequest);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ types = in.readStringArray();
+ fields = in.readStringArray();
+ includeDefaults = in.readBoolean();
+ probablySingleFieldRequest = in.readBoolean();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java
new file mode 100644
index 0000000..647752f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+/** Request the mappings of specific fields */
+public class GetFieldMappingsRequest extends ActionRequest<GetFieldMappingsRequest> {
+
+ protected boolean local = false;
+
+ private String[] fields = Strings.EMPTY_ARRAY;
+
+ private boolean includeDefaults = false;
+
+ private String[] indices = Strings.EMPTY_ARRAY;
+ private String[] types = Strings.EMPTY_ARRAY;
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ public GetFieldMappingsRequest() {
+
+ }
+
+ public GetFieldMappingsRequest(GetFieldMappingsRequest other) {
+ this.local = other.local;
+ this.includeDefaults = other.includeDefaults;
+ this.indices = other.indices;
+ this.types = other.types;
+ this.indicesOptions = other.indicesOptions;
+ this.fields = other.fields;
+ }
+
+ /**
+ * Indicate whether the receiving node should operate based on local index information or forward requests,
+ * where needed, to other nodes. If running locally, request will not raise errors if running locally & missing indices.
+ */
+ public GetFieldMappingsRequest local(boolean local) {
+ this.local = local;
+ return this;
+ }
+
+ public boolean local() {
+ return local;
+ }
+
+ public GetFieldMappingsRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ public GetFieldMappingsRequest types(String... types) {
+ this.types = types;
+ return this;
+ }
+
+ public GetFieldMappingsRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public String[] types() {
+ return types;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ /** @param fields a list of fields to retrieve the mapping for */
+ public GetFieldMappingsRequest fields(String... fields) {
+ this.fields = fields;
+ return this;
+ }
+
+ public String[] fields() {
+ return fields;
+ }
+
+ public boolean includeDefaults() {
+ return includeDefaults;
+ }
+
+ /** Indicates whether default mapping settings should be returned */
+ public GetFieldMappingsRequest includeDefaults(boolean includeDefaults) {
+ this.includeDefaults = includeDefaults;
+ return this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ // This request used to inherit from MasterNodeOperationRequest, so for bwc we need to keep serializing it.
+ MasterNodeOperationRequest.DEFAULT_MASTER_NODE_TIMEOUT.writeTo(out);
+ out.writeStringArray(indices);
+ out.writeStringArray(types);
+ indicesOptions.writeIndicesOptions(out);
+ out.writeBoolean(local);
+ out.writeStringArray(fields);
+ out.writeBoolean(includeDefaults);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ // This request used to inherit from MasterNodeOperationRequest, so for bwc we need to keep serializing it.
+ TimeValue.readTimeValue(in);
+ indices = in.readStringArray();
+ types = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ local = in.readBoolean();
+ fields = in.readStringArray();
+ includeDefaults = in.readBoolean();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java
new file mode 100644
index 0000000..955a984
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import com.google.common.collect.ObjectArrays;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/** A helper class to build {@link GetFieldMappingsRequest} objects */
+public class GetFieldMappingsRequestBuilder extends ActionRequestBuilder<GetFieldMappingsRequest, GetFieldMappingsResponse, GetFieldMappingsRequestBuilder> {
+
+ public GetFieldMappingsRequestBuilder(InternalGenericClient client, String... indices) {
+ super(client, new GetFieldMappingsRequest().indices(indices));
+ }
+
+ public GetFieldMappingsRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ public GetFieldMappingsRequestBuilder addIndices(String... indices) {
+ request.indices(ObjectArrays.concat(request.indices(), indices, String.class));
+ return this;
+ }
+
+ public GetFieldMappingsRequestBuilder setTypes(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ public GetFieldMappingsRequestBuilder addTypes(String... types) {
+ request.types(ObjectArrays.concat(request.types(), types, String.class));
+ return this;
+ }
+
+ public GetFieldMappingsRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request.indicesOptions(indicesOptions);
+ return this;
+ }
+
+
+ /** Sets the fields to retrieve. */
+ public GetFieldMappingsRequestBuilder setFields(String... fields) {
+ request.fields(fields);
+ return this;
+ }
+
+ /** Indicates whether default mapping settings should be returned */
+ public GetFieldMappingsRequestBuilder includeDefaults(boolean includeDefaults) {
+ request.includeDefaults(includeDefaults);
+ return this;
+ }
+
+
+ @Override
+ protected void doExecute(ActionListener<GetFieldMappingsResponse> listener) {
+ ((IndicesAdminClient) client).getFieldMappings(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java
new file mode 100644
index 0000000..bd55901
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.mapper.Mapper;
+
+import java.io.IOException;
+import java.util.Map;
+
+/** Response object for {@link GetFieldMappingsRequest} API */
+public class GetFieldMappingsResponse extends ActionResponse implements ToXContent {
+
+ private ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappings = ImmutableMap.of();
+
+ GetFieldMappingsResponse(ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappings) {
+ this.mappings = mappings;
+ }
+
+ GetFieldMappingsResponse() {
+ }
+
+ /** returns the retrieved field mapping. The return map keys are index, type, field (as specified in the request). */
+ public ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappings() {
+ return mappings;
+ }
+
+ /**
+ * Returns the mappings of a specific field.
+ *
+ * @param field field name as specified in the {@link GetFieldMappingsRequest}
+ * @return FieldMappingMetaData for the requested field or null if not found.
+ */
+ public FieldMappingMetaData fieldMappings(String index, String type, String field) {
+ ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>> indexMapping = mappings.get(index);
+ if (indexMapping == null) {
+ return null;
+ }
+ ImmutableMap<String, FieldMappingMetaData> typeMapping = indexMapping.get(type);
+ if (typeMapping == null) {
+ return null;
+ }
+ return typeMapping.get(field);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ for (Map.Entry<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) {
+ builder.startObject(indexEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.startObject("mappings");
+ for (Map.Entry<String, ImmutableMap<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) {
+ builder.startObject(typeEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
+ for (Map.Entry<String, FieldMappingMetaData> fieldEntry : typeEntry.getValue().entrySet()) {
+ builder.startObject(fieldEntry.getKey());
+ fieldEntry.getValue().toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+ return builder;
+ }
+
+ public static class FieldMappingMetaData implements ToXContent {
+ public static final FieldMappingMetaData NULL = new FieldMappingMetaData("", BytesArray.EMPTY);
+
+ private String fullName;
+ private BytesReference source;
+
+ public FieldMappingMetaData(String fullName, BytesReference source) {
+ this.fullName = fullName;
+ this.source = source;
+ }
+
+ public String fullName() {
+ return fullName;
+ }
+
+ /** Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. */
+ public Map<String, Object> sourceAsMap() {
+ return XContentHelper.convertToMap(source.array(), source.arrayOffset(), source.length(), true).v2();
+ }
+
+ public boolean isNull() {
+ return NULL.fullName().equals(fullName) && NULL.source.length() == source.length();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("full_name", fullName);
+ XContentHelper.writeRawField("mapping", source, builder, params);
+ return builder;
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ ImmutableMap.Builder<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexMapBuilder = ImmutableMap.builder();
+ for (int i = 0; i < size; i++) {
+ String index = in.readString();
+ int typesSize = in.readVInt();
+ ImmutableMap.Builder<String, ImmutableMap<String, FieldMappingMetaData>> typeMapBuilder = ImmutableMap.builder();
+ for (int j = 0; j < typesSize; j++) {
+ String type = in.readString();
+ ImmutableMap.Builder<String, FieldMappingMetaData> fieldMapBuilder = ImmutableMap.builder();
+ int fieldSize = in.readVInt();
+ for (int k = 0; k < fieldSize; k++) {
+ fieldMapBuilder.put(in.readString(), new FieldMappingMetaData(in.readString(), in.readBytesReference()));
+ }
+ typeMapBuilder.put(type, fieldMapBuilder.build());
+ }
+ indexMapBuilder.put(index, typeMapBuilder.build());
+ }
+ mappings = indexMapBuilder.build();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(mappings.size());
+ for (Map.Entry<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) {
+ out.writeString(indexEntry.getKey());
+ out.writeVInt(indexEntry.getValue().size());
+ for (Map.Entry<String, ImmutableMap<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) {
+ out.writeString(typeEntry.getKey());
+ out.writeVInt(typeEntry.getValue().size());
+ for (Map.Entry<String, FieldMappingMetaData> fieldEntry : typeEntry.getValue().entrySet()) {
+ out.writeString(fieldEntry.getKey());
+ FieldMappingMetaData fieldMapping = fieldEntry.getValue();
+ out.writeString(fieldMapping.fullName());
+ out.writeBytesReference(fieldMapping.source);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java
new file mode 100644
index 0000000..f0c25ce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public class GetMappingsAction extends IndicesAction<GetMappingsRequest, GetMappingsResponse, GetMappingsRequestBuilder> {
+
+ public static final GetMappingsAction INSTANCE = new GetMappingsAction();
+ public static final String NAME = "mappings/get";
+
+ private GetMappingsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GetMappingsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new GetMappingsRequestBuilder((InternalGenericClient) client);
+ }
+
+ @Override
+ public GetMappingsResponse newResponse() {
+ return new GetMappingsResponse();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java
new file mode 100644
index 0000000..15222cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
+
+/**
+ */
+public class GetMappingsRequest extends ClusterInfoRequest<GetMappingsRequest> {
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
new file mode 100644
index 0000000..dc2eab2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder<GetMappingsRequest, GetMappingsResponse, GetMappingsRequestBuilder> {
+
+ public GetMappingsRequestBuilder(InternalGenericClient client, String... indices) {
+ super(client, new GetMappingsRequest().indices(indices));
+ }
+
+ @Override
+ protected void doExecute(ActionListener<GetMappingsResponse> listener) {
+ ((IndicesAdminClient) client).getMappings(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java
new file mode 100644
index 0000000..b27577f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class GetMappingsResponse extends ActionResponse {
+
+ private ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = ImmutableOpenMap.of();
+
+ GetMappingsResponse(ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings) {
+ this.mappings = mappings;
+ }
+
+ GetMappingsResponse() {
+ }
+
+ public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings() {
+ return mappings;
+ }
+
+ public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> getMappings() {
+ return mappings();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> indexMapBuilder = ImmutableOpenMap.builder();
+ for (int i = 0; i < size; i++) {
+ String key = in.readString();
+ int valueSize = in.readVInt();
+ ImmutableOpenMap.Builder<String, MappingMetaData> typeMapBuilder = ImmutableOpenMap.builder();
+ for (int j = 0; j < valueSize; j++) {
+ typeMapBuilder.put(in.readString(), MappingMetaData.readFrom(in));
+ }
+ indexMapBuilder.put(key, typeMapBuilder.build());
+ }
+ mappings = indexMapBuilder.build();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(mappings.size());
+ for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappings) {
+ out.writeString(indexEntry.key);
+ out.writeVInt(indexEntry.value.size());
+ for (ObjectObjectCursor<String, MappingMetaData> typeEntry : indexEntry.value) {
+ out.writeString(typeEntry.key);
+ MappingMetaData.writeTo(typeEntry.value, out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java
new file mode 100644
index 0000000..a0d3079
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ */
+public class TransportGetFieldMappingsAction extends TransportAction<GetFieldMappingsRequest, GetFieldMappingsResponse> {
+
+ private final ClusterService clusterService;
+ private final TransportGetFieldMappingsIndexAction shardAction;
+ private final String transportAction;
+
+ @Inject
+ public TransportGetFieldMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, TransportGetFieldMappingsIndexAction shardAction) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.shardAction = shardAction;
+ this.transportAction = GetFieldMappingsAction.NAME;
+ transportService.registerHandler(transportAction, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(GetFieldMappingsRequest request, final ActionListener<GetFieldMappingsResponse> listener) {
+ ClusterState clusterState = clusterService.state();
+ String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices(), request.indicesOptions());
+ final AtomicInteger indexCounter = new AtomicInteger();
+ final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length);
+ final AtomicReferenceArray<Object> indexResponses = new AtomicReferenceArray<Object>(concreteIndices.length);
+
+ if (concreteIndices == null || concreteIndices.length == 0) {
+ listener.onResponse(new GetFieldMappingsResponse());
+ } else {
+ boolean probablySingleFieldRequest = concreteIndices.length == 1 && request.types().length == 1 && request.fields().length == 1;
+ for (final String index : concreteIndices) {
+ GetFieldMappingsIndexRequest shardRequest = new GetFieldMappingsIndexRequest(request, index, probablySingleFieldRequest);
+ // no threading needed, all is done on the index replication one
+ shardRequest.listenerThreaded(false);
+ shardAction.execute(shardRequest, new ActionListener<GetFieldMappingsResponse>() {
+ @Override
+ public void onResponse(GetFieldMappingsResponse result) {
+ indexResponses.set(indexCounter.getAndIncrement(), result);
+ if (completionCounter.decrementAndGet() == 0) {
+ listener.onResponse(merge(indexResponses));
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ int index = indexCounter.getAndIncrement();
+ indexResponses.set(index, e);
+ if (completionCounter.decrementAndGet() == 0) {
+ listener.onResponse(merge(indexResponses));
+ }
+ }
+ });
+ }
+ }
+ }
+
+ private GetFieldMappingsResponse merge(AtomicReferenceArray<Object> indexResponses) {
+ MapBuilder<String, ImmutableMap<String, ImmutableMap<String, GetFieldMappingsResponse.FieldMappingMetaData>>> mergedResponses = MapBuilder.newMapBuilder();
+ for (int i = 0; i < indexResponses.length(); i++) {
+ Object element = indexResponses.get(i);
+ if (element instanceof GetFieldMappingsResponse) {
+ GetFieldMappingsResponse response = (GetFieldMappingsResponse) element;
+ mergedResponses.putAll(response.mappings());
+ }
+ }
+ return new GetFieldMappingsResponse(mergedResponses.immutableMap());
+ }
+
+ private class TransportHandler extends BaseTransportRequestHandler<GetFieldMappingsRequest> {
+
+ @Override
+ public GetFieldMappingsRequest newInstance() {
+ return new GetFieldMappingsRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(final GetFieldMappingsRequest request, final TransportChannel channel) throws Exception {
+ // no need for a threaded listener, since we just send a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<GetFieldMappingsResponse>() {
+ @Override
+ public void onResponse(GetFieldMappingsResponse result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send error response for action [" + transportAction + "] and request [" + request + "]", e1);
+ }
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java
new file mode 100644
index 0000000..4ad7522
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData;
+import org.elasticsearch.action.support.single.custom.TransportSingleCustomOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.ShardsIterator;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.TypeMissingException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.Collection;
+
+/**
+ */
+public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomOperationAction<GetFieldMappingsIndexRequest, GetFieldMappingsResponse> {
+
+ protected final ClusterService clusterService;
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportGetFieldMappingsIndexAction(Settings settings, ClusterService clusterService,
+ TransportService transportService,
+ IndicesService indicesService,
+ ThreadPool threadPool) {
+ super(settings, threadPool, clusterService, transportService);
+ this.clusterService = clusterService;
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String transportAction() {
+ return GetFieldMappingsAction.NAME + "/index";
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected ShardsIterator shards(ClusterState state, GetFieldMappingsIndexRequest request) {
+ // Will balance requests between shards
+ return state.routingTable().index(request.index()).randomAllActiveShardsIt();
+ }
+
+ @Override
+ protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, int shardId) throws ElasticsearchException {
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ Collection<String> typeIntersection;
+ if (request.types().length == 0) {
+ typeIntersection = indexService.mapperService().types();
+
+ } else {
+ typeIntersection = Collections2.filter(indexService.mapperService().types(), new Predicate<String>() {
+
+ @Override
+ public boolean apply(String type) {
+ return Regex.simpleMatch(request.types(), type);
+ }
+
+ });
+ if (typeIntersection.isEmpty()) {
+ throw new TypeMissingException(new Index(request.index()), request.types());
+ }
+ }
+
+ MapBuilder<String, ImmutableMap<String, FieldMappingMetaData>> typeMappings = new MapBuilder<String, ImmutableMap<String, FieldMappingMetaData>>();
+ for (String type : typeIntersection) {
+ DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
+ ImmutableMap<String, FieldMappingMetaData> fieldMapping = findFieldMappingsByType(documentMapper, request);
+ if (!fieldMapping.isEmpty()) {
+ typeMappings.put(type, fieldMapping);
+ }
+ }
+
+ return new GetFieldMappingsResponse(ImmutableMap.of(request.index(), typeMappings.immutableMap()));
+ }
+
+ @Override
+ protected GetFieldMappingsIndexRequest newRequest() {
+ return new GetFieldMappingsIndexRequest();
+ }
+
+ @Override
+ protected GetFieldMappingsResponse newResponse() {
+ return new GetFieldMappingsResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, GetFieldMappingsIndexRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, GetFieldMappingsIndexRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.READ, request.index());
+ }
+
+ private static final ToXContent.Params includeDefaultsParams = new ToXContent.Params() {
+
+ final static String INCLUDE_DEFAULTS = "include_defaults";
+
+ @Override
+ public String param(String key) {
+ if (INCLUDE_DEFAULTS.equals(key)) {
+ return "true";
+ }
+ return null;
+ }
+
+ @Override
+ public String param(String key, String defaultValue) {
+ if (INCLUDE_DEFAULTS.equals(key)) {
+ return "true";
+ }
+ return defaultValue;
+ }
+
+ @Override
+ public boolean paramAsBoolean(String key, boolean defaultValue) {
+ if (INCLUDE_DEFAULTS.equals(key)) {
+ return true;
+ }
+ return defaultValue;
+ }
+
+ public Boolean paramAsBoolean(String key, Boolean defaultValue) {
+ if (INCLUDE_DEFAULTS.equals(key)) {
+ return true;
+ }
+ return defaultValue;
+ }
+
+ @Override
+ @Deprecated
+ public Boolean paramAsBooleanOptional(String key, Boolean defaultValue) {
+ return paramAsBoolean(key, defaultValue);
+ }
+ };
+
+ private ImmutableMap<String, FieldMappingMetaData> findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) throws ElasticsearchException {
+ MapBuilder<String, FieldMappingMetaData> fieldMappings = new MapBuilder<String, FieldMappingMetaData>();
+ ImmutableList<FieldMapper> allFieldMappers = documentMapper.mappers().mappers();
+ for (String field : request.fields()) {
+ if (Regex.isMatchAllPattern(field)) {
+ for (FieldMapper fieldMapper : allFieldMappers) {
+ addFieldMapper(fieldMapper.names().fullName(), fieldMapper, fieldMappings, request.includeDefaults());
+ }
+ } else if (Regex.isSimpleMatchPattern(field)) {
+ // go through the field mappers 3 times, to make sure we give preference to the resolve order: full name, index name, name.
+ // also make sure we only store each mapper once.
+ boolean[] resolved = new boolean[allFieldMappers.size()];
+ for (int i = 0; i < allFieldMappers.size(); i++) {
+ FieldMapper fieldMapper = allFieldMappers.get(i);
+ if (Regex.simpleMatch(field, fieldMapper.names().fullName())) {
+ addFieldMapper(fieldMapper.names().fullName(), fieldMapper, fieldMappings, request.includeDefaults());
+ resolved[i] = true;
+ }
+ }
+ for (int i = 0; i < allFieldMappers.size(); i++) {
+ if (resolved[i]) {
+ continue;
+ }
+ FieldMapper fieldMapper = allFieldMappers.get(i);
+ if (Regex.simpleMatch(field, fieldMapper.names().indexName())) {
+ addFieldMapper(fieldMapper.names().indexName(), fieldMapper, fieldMappings, request.includeDefaults());
+ resolved[i] = true;
+ }
+ }
+ for (int i = 0; i < allFieldMappers.size(); i++) {
+ if (resolved[i]) {
+ continue;
+ }
+ FieldMapper fieldMapper = allFieldMappers.get(i);
+ if (Regex.simpleMatch(field, fieldMapper.names().name())) {
+ addFieldMapper(fieldMapper.names().name(), fieldMapper, fieldMappings, request.includeDefaults());
+ resolved[i] = true;
+ }
+ }
+
+ } else {
+ // not a pattern
+ FieldMapper fieldMapper = documentMapper.mappers().smartNameFieldMapper(field);
+ if (fieldMapper != null) {
+ addFieldMapper(field, fieldMapper, fieldMappings, request.includeDefaults());
+ } else if (request.probablySingleFieldRequest()) {
+ fieldMappings.put(field, FieldMappingMetaData.NULL);
+ }
+ }
+ }
+ return fieldMappings.immutableMap();
+ }
+
+ private void addFieldMapper(String field, FieldMapper fieldMapper, MapBuilder<String, FieldMappingMetaData> fieldMappings, boolean includeDefaults) {
+ if (fieldMappings.containsKey(field)) {
+ return;
+ }
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject();
+ fieldMapper.toXContent(builder, includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.names().fullName(), builder.bytes()));
+ } catch (IOException e) {
+ throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e);
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java
new file mode 100644
index 0000000..0a32432
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.get;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ */
+public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMappingsRequest, GetMappingsResponse> {
+
+ @Inject
+ public TransportGetMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String transportAction() {
+ return GetMappingsAction.NAME;
+ }
+
+ @Override
+ protected GetMappingsRequest newRequest() {
+ return new GetMappingsRequest();
+ }
+
+ @Override
+ protected GetMappingsResponse newResponse() {
+ return new GetMappingsResponse();
+ }
+
+ @Override
+ protected void doMasterOperation(final GetMappingsRequest request, final ClusterState state, final ActionListener<GetMappingsResponse> listener) throws ElasticsearchException {
+ logger.trace("serving getMapping request based on version {}", state.version());
+ ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = state.metaData().findMappings(
+ request.indices(), request.types()
+ );
+ listener.onResponse(new GetMappingsResponse(result));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/package-info.java
new file mode 100644
index 0000000..3ccc5ce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Indices Mapping Administrative Actions.
+ */
+package org.elasticsearch.action.admin.indices.mapping; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java
new file mode 100644
index 0000000..2631247
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.put;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class PutMappingAction extends IndicesAction<PutMappingRequest, PutMappingResponse, PutMappingRequestBuilder> {
+
+ public static final PutMappingAction INSTANCE = new PutMappingAction();
+ public static final String NAME = "indices/mapping/put";
+
+ private PutMappingAction() {
+ super(NAME);
+ }
+
+ @Override
+ public PutMappingResponse newResponse() {
+ return new PutMappingResponse();
+ }
+
+ @Override
+ public PutMappingRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new PutMappingRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java
new file mode 100644
index 0000000..f0299a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.put;
+
+import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest;
+
+/**
+ * Cluster state update request that allows to put a mapping
+ */
+public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<PutMappingClusterStateUpdateRequest> {
+
+ private String type;
+
+ private String source;
+
+ private boolean ignoreConflicts = false;
+
+ PutMappingClusterStateUpdateRequest() {
+
+ }
+
+ public String type() {
+ return type;
+ }
+
+ public PutMappingClusterStateUpdateRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ public String source() {
+ return source;
+ }
+
+ public PutMappingClusterStateUpdateRequest source(String source) {
+ this.source = source;
+ return this;
+ }
+
+ public boolean ignoreConflicts() {
+ return ignoreConflicts;
+ }
+
+ public PutMappingClusterStateUpdateRequest ignoreConflicts(boolean ignoreConflicts) {
+ this.ignoreConflicts = ignoreConflicts;
+ return this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java
new file mode 100644
index 0000000..4826698
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java
@@ -0,0 +1,275 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.put;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Puts mapping definition registered under a specific type into one or more indices. Best created with
+ * {@link org.elasticsearch.client.Requests#putMappingRequest(String...)}.
+ * <p/>
+ * <p>If the mappings already exists, the new mappings will be merged with the new one. If there are elements
+ * that can't be merged are detected, the request will be rejected unless the {@link #ignoreConflicts(boolean)}
+ * is set. In such a case, the duplicate mappings will be rejected.
+ *
+ * @see org.elasticsearch.client.Requests#putMappingRequest(String...)
+ * @see org.elasticsearch.client.IndicesAdminClient#putMapping(PutMappingRequest)
+ * @see PutMappingResponse
+ */
+public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> {
+
+ private static ObjectOpenHashSet<String> RESERVED_FIELDS = ObjectOpenHashSet.from(
+ "_uid", "_id", "_type", "_source", "_all", "_analyzer", "_boost", "_parent", "_routing", "_index",
+ "_size", "_timestamp", "_ttl"
+ );
+
+ private String[] indices;
+
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
+
+ private String type;
+
+ private String source;
+
+ private boolean ignoreConflicts = false;
+
+ PutMappingRequest() {
+ }
+
+ /**
+ * Constructs a new put mapping request against one or more indices. If nothing is set then
+ * it will be executed against all indices.
+ */
+ public PutMappingRequest(String... indices) {
+ this.indices = indices;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (type == null) {
+ validationException = addValidationError("mapping type is missing", validationException);
+ }
+ if (source == null) {
+ validationException = addValidationError("mapping source is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets the indices this put mapping operation will execute on.
+ */
+ public PutMappingRequest indices(String[] indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * The indices the mappings will be put.
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public PutMappingRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ /**
+ * The mapping type.
+ */
+ public String type() {
+ return type;
+ }
+
+ /**
+ * The type of the mappings.
+ */
+ public PutMappingRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * The mapping source definition.
+ */
+ public String source() {
+ return source;
+ }
+
+ /**
+ * A specialized simplified mapping source method, takes the form of simple properties definition:
+ * ("field1", "type=string,store=true").
+ *
+ * Also supports metadata mapping fields such as `_all` and `_parent` as property definition, these metadata
+ * mapping fields will automatically be put on the top level mapping object.
+ */
+ public PutMappingRequest source(Object... source) {
+ return source(buildFromSimplifiedDef(type, source));
+ }
+
+ public static XContentBuilder buildFromSimplifiedDef(String type, Object... source) {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ if (type != null) {
+ builder.startObject(type);
+ }
+
+ for (int i = 0; i < source.length; i++) {
+ String fieldName = source[i++].toString();
+ if (RESERVED_FIELDS.contains(fieldName)) {
+ builder.startObject(fieldName);
+ String[] s1 = Strings.splitStringByCommaToArray(source[i].toString());
+ for (String s : s1) {
+ String[] s2 = Strings.split(s, "=");
+ if (s2.length != 2) {
+ throw new ElasticsearchIllegalArgumentException("malformed " + s);
+ }
+ builder.field(s2[0], s2[1]);
+ }
+ builder.endObject();
+ }
+ }
+
+ builder.startObject("properties");
+ for (int i = 0; i < source.length; i++) {
+ String fieldName = source[i++].toString();
+ if (RESERVED_FIELDS.contains(fieldName)) {
+ continue;
+ }
+
+ builder.startObject(fieldName);
+ String[] s1 = Strings.splitStringByCommaToArray(source[i].toString());
+ for (String s : s1) {
+ String[] s2 = Strings.split(s, "=");
+ if (s2.length != 2) {
+ throw new ElasticsearchIllegalArgumentException("malformed " + s);
+ }
+ builder.field(s2[0], s2[1]);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ if (type != null) {
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("failed to generate simplified mapping definition", e);
+ }
+ }
+
+ /**
+ * The mapping source definition.
+ */
+ public PutMappingRequest source(XContentBuilder mappingBuilder) {
+ try {
+ return source(mappingBuilder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
+ }
+ }
+
+ /**
+ * The mapping source definition.
+ */
+ @SuppressWarnings("unchecked")
+ public PutMappingRequest source(Map mappingSource) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(mappingSource);
+ return source(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + mappingSource + "]", e);
+ }
+ }
+
+ /**
+ * The mapping source definition.
+ */
+ public PutMappingRequest source(String mappingSource) {
+ this.source = mappingSource;
+ return this;
+ }
+
+ /**
+ * If there is already a mapping definition registered against the type, then it will be merged. If there are
+ * elements that can't be merged are detected, the request will be rejected unless the
+ * {@link #ignoreConflicts(boolean)} is set. In such a case, the duplicate mappings will be rejected.
+ */
+ public boolean ignoreConflicts() {
+ return ignoreConflicts;
+ }
+
+ /**
+ * If there is already a mapping definition registered against the type, then it will be merged. If there are
+ * elements that can't be merged are detected, the request will be rejected unless the
+ * {@link #ignoreConflicts(boolean)} is set. In such a case, the duplicate mappings will be rejected.
+ */
+ public PutMappingRequest ignoreConflicts(boolean ignoreDuplicates) {
+ this.ignoreConflicts = ignoreDuplicates;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ type = in.readOptionalString();
+ source = in.readString();
+ readTimeout(in);
+ ignoreConflicts = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArrayNullable(indices);
+ indicesOptions.writeIndicesOptions(out);
+ out.writeOptionalString(type);
+ out.writeString(source);
+ writeTimeout(out);
+ out.writeBoolean(ignoreConflicts);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java
new file mode 100644
index 0000000..8f555fc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.put;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.util.Map;
+
+/**
+ * Builder for a put mapping request
+ */
+public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMappingRequest, PutMappingResponse, PutMappingRequestBuilder> {
+
+ public PutMappingRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new PutMappingRequest());
+ }
+
+ public PutMappingRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ *
+ * For example indices that don't exist.
+ */
+ public PutMappingRequestBuilder setIndicesOptions(IndicesOptions options) {
+ request.indicesOptions(options);
+ return this;
+ }
+
+ /**
+ * The type of the mappings.
+ */
+ public PutMappingRequestBuilder setType(String type) {
+ request.type(type);
+ return this;
+ }
+
+ /**
+ * The mapping source definition.
+ */
+ public PutMappingRequestBuilder setSource(XContentBuilder mappingBuilder) {
+ request.source(mappingBuilder);
+ return this;
+ }
+
+ /**
+ * The mapping source definition.
+ */
+ public PutMappingRequestBuilder setSource(Map mappingSource) {
+ request.source(mappingSource);
+ return this;
+ }
+
+ /**
+ * The mapping source definition.
+ */
+ public PutMappingRequestBuilder setSource(String mappingSource) {
+ request.source(mappingSource);
+ return this;
+ }
+
+ /**
+ * A specialized simplified mapping source method, takes the form of simple properties definition:
+ * ("field1", "type=string,store=true").
+ */
+ public PutMappingRequestBuilder setSource(Object... source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * If there is already a mapping definition registered against the type, then it will be merged. If there are
+ * elements that can't be merged are detected, the request will be rejected unless the
+ * {@link #setIgnoreConflicts(boolean)} is set. In such a case, the duplicate mappings will be rejected.
+ */
+ public PutMappingRequestBuilder setIgnoreConflicts(boolean ignoreConflicts) {
+ request.ignoreConflicts(ignoreConflicts);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<PutMappingResponse> listener) {
+ ((IndicesAdminClient) client).putMapping(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java
new file mode 100644
index 0000000..8ddec5e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.put;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * The response of put mapping operation.
+ */
+public class PutMappingResponse extends AcknowledgedResponse {
+
+ PutMappingResponse() {
+
+ }
+
+ PutMappingResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java
new file mode 100644
index 0000000..7fed67a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.put;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MetaDataMappingService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Put mapping action.
+ */
+public class TransportPutMappingAction extends TransportMasterNodeOperationAction<PutMappingRequest, PutMappingResponse> {
+
+ private final MetaDataMappingService metaDataMappingService;
+
+ @Inject
+ public TransportPutMappingAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, MetaDataMappingService metaDataMappingService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.metaDataMappingService = metaDataMappingService;
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return PutMappingAction.NAME;
+ }
+
+ @Override
+ protected PutMappingRequest newRequest() {
+ return new PutMappingRequest();
+ }
+
+ @Override
+ protected PutMappingResponse newResponse() {
+ return new PutMappingResponse();
+ }
+
+ @Override
+ protected void doExecute(PutMappingRequest request, ActionListener<PutMappingResponse> listener) {
+ request.indices(clusterService.state().metaData().concreteIndices(request.indices(), request.indicesOptions()));
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(PutMappingRequest request, ClusterState state) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
+ }
+
+ @Override
+ protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener<PutMappingResponse> listener) throws ElasticsearchException {
+ PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
+ .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
+ .indices(request.indices()).type(request.type())
+ .source(request.source()).ignoreConflicts(request.ignoreConflicts());
+
+ metaDataMappingService.putMapping(updateRequest, new ClusterStateUpdateListener() {
+
+ @Override
+ public void onResponse(ClusterStateUpdateResponse response) {
+ listener.onResponse(new PutMappingResponse(response.isAcknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.debug("failed to put mappings on indices [{}], type [{}]", t, request.indices(), request.type());
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/package-info.java
new file mode 100644
index 0000000..4c67368
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Put Mapping Action.
+ */
+package org.elasticsearch.action.admin.indices.mapping.put; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java
new file mode 100644
index 0000000..c0b4541
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.open;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class OpenIndexAction extends IndicesAction<OpenIndexRequest, OpenIndexResponse, OpenIndexRequestBuilder> {
+
+ public static final OpenIndexAction INSTANCE = new OpenIndexAction();
+ public static final String NAME = "indices/open";
+
+ private OpenIndexAction() {
+ super(NAME);
+ }
+
+ @Override
+ public OpenIndexResponse newResponse() {
+ return new OpenIndexResponse();
+ }
+
+ @Override
+ public OpenIndexRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new OpenIndexRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java
new file mode 100644
index 0000000..a6da3d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.open;
+
+import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest;
+
+/**
+ * Cluster state update request that allows to open one or more indices
+ */
+public class OpenIndexClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<OpenIndexClusterStateUpdateRequest> {
+
+ OpenIndexClusterStateUpdateRequest() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java
new file mode 100644
index 0000000..77bd59f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.open;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to open an index.
+ */
+public class OpenIndexRequest extends AcknowledgedRequest<OpenIndexRequest> {
+
+ private String[] indices;
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, false, true);
+
+ OpenIndexRequest() {
+ }
+
+ /**
+ * Constructs a new open index request for the specified index.
+ */
+ public OpenIndexRequest(String... indices) {
+ this.indices = indices;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (indices == null || indices.length == 0) {
+ validationException = addValidationError("index is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * The indices to be opened
+ * @return the indices to be opened
+ */
+ String[] indices() {
+ return indices;
+ }
+
+ /**
+ * Sets the indices to be opened
+ * @param indices the indices to be opened
+ * @return the request itself
+ */
+ public OpenIndexRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
+ * For example indices that don't exist.
+ *
+ * @return the current behaviour when it comes to index names and wildcard indices expressions
+ */
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
+ * For example indices that don't exist.
+ *
+ * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
+ * @return the request itself
+ */
+ public OpenIndexRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ readTimeout(in);
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(indices);
+ writeTimeout(out);
+ indicesOptions.writeIndicesOptions(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java
new file mode 100644
index 0000000..aecd446
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.open;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ * Builder for for open index request
+ */
+public class OpenIndexRequestBuilder extends AcknowledgedRequestBuilder<OpenIndexRequest, OpenIndexResponse, OpenIndexRequestBuilder> {
+
+ public OpenIndexRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new OpenIndexRequest());
+ }
+
+ public OpenIndexRequestBuilder(IndicesAdminClient indicesClient, String... indices) {
+ super((InternalIndicesAdminClient) indicesClient, new OpenIndexRequest(indices));
+ }
+
+ /**
+ * Sets the indices to be opened
+ * @param indices the indices to be opened
+ * @return the request itself
+ */
+ public OpenIndexRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal with wildcard indices expressions.
+ * For example indices that don't exist.
+ *
+ * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
+ * @return the request itself
+ */
+ public OpenIndexRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request.indicesOptions(indicesOptions);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<OpenIndexResponse> listener) {
+ ((IndicesAdminClient) client).open(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java
new file mode 100644
index 0000000..f1efb30
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.open;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response for a open index action.
+ */
+public class OpenIndexResponse extends AcknowledgedResponse {
+
+ OpenIndexResponse() {
+ }
+
+ OpenIndexResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java
new file mode 100644
index 0000000..4afd5a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.open;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Open index action
+ */
+public class TransportOpenIndexAction extends TransportMasterNodeOperationAction<OpenIndexRequest, OpenIndexResponse> {
+
+ private final MetaDataIndexStateService indexStateService;
+ private final DestructiveOperations destructiveOperations;
+
+ @Inject
+ public TransportOpenIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, MetaDataIndexStateService indexStateService, NodeSettingsService nodeSettingsService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.indexStateService = indexStateService;
+ this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away...
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return OpenIndexAction.NAME;
+ }
+
+ @Override
+ protected OpenIndexRequest newRequest() {
+ return new OpenIndexRequest();
+ }
+
+ @Override
+ protected OpenIndexResponse newResponse() {
+ return new OpenIndexResponse();
+ }
+
+ @Override
+ protected void doExecute(OpenIndexRequest request, ActionListener<OpenIndexResponse> listener) {
+ destructiveOperations.failDestructive(request.indices());
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(OpenIndexRequest request, ClusterState state) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
+ }
+
+ @Override
+ protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener<OpenIndexResponse> listener) throws ElasticsearchException {
+ request.indices(state.metaData().concreteIndices(request.indices(), request.indicesOptions()));
+ OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest()
+ .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
+ .indices(request.indices());
+
+ indexStateService.openIndex(updateRequest, new ClusterStateUpdateListener() {
+
+ @Override
+ public void onResponse(ClusterStateUpdateResponse response) {
+ listener.onResponse(new OpenIndexResponse(response.isAcknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.debug("failed to open indices [{}]", t, request.indices());
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeAction.java
new file mode 100644
index 0000000..6a3dc66
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.optimize;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class OptimizeAction extends IndicesAction<OptimizeRequest, OptimizeResponse, OptimizeRequestBuilder> {
+
+ public static final OptimizeAction INSTANCE = new OptimizeAction();
+ public static final String NAME = "indices/optimize";
+
+ private OptimizeAction() {
+ super(NAME);
+ }
+
+ @Override
+ public OptimizeResponse newResponse() {
+ return new OptimizeResponse();
+ }
+
+ @Override
+ public OptimizeRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new OptimizeRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequest.java
new file mode 100644
index 0000000..eda0aa4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequest.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.optimize;
+
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A request to optimize one or more indices. In order to optimize on all the indices, pass an empty array or
+ * <tt>null</tt> for the indices.
+ * <p/>
+ * <p>{@link #waitForMerge(boolean)} allows to control if the call will block until the optimize completes and
+ * defaults to <tt>true</tt>.
+ * <p/>
+ * <p>{@link #maxNumSegments(int)} allows to control the number of segments to optimize down to. By default, will
+ * cause the optimize process to optimize down to half the configured number of segments.
+ *
+ * @see org.elasticsearch.client.Requests#optimizeRequest(String...)
+ * @see org.elasticsearch.client.IndicesAdminClient#optimize(OptimizeRequest)
+ * @see OptimizeResponse
+ */
+public class OptimizeRequest extends BroadcastOperationRequest<OptimizeRequest> {
+
+ public static final class Defaults {
+ public static final boolean WAIT_FOR_MERGE = true;
+ public static final int MAX_NUM_SEGMENTS = -1;
+ public static final boolean ONLY_EXPUNGE_DELETES = false;
+ public static final boolean FLUSH = true;
+ }
+
+ private boolean waitForMerge = Defaults.WAIT_FOR_MERGE;
+ private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS;
+ private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES;
+ private boolean flush = Defaults.FLUSH;
+
+ /**
+ * Constructs an optimization request over one or more indices.
+ *
+ * @param indices The indices to optimize, no indices passed means all indices will be optimized.
+ */
+ public OptimizeRequest(String... indices) {
+ super(indices);
+ }
+
+ public OptimizeRequest() {
+
+ }
+
+ /**
+ * Should the call block until the optimize completes. Defaults to <tt>true</tt>.
+ */
+ public boolean waitForMerge() {
+ return waitForMerge;
+ }
+
+ /**
+ * Should the call block until the optimize completes. Defaults to <tt>true</tt>.
+ */
+ public OptimizeRequest waitForMerge(boolean waitForMerge) {
+ this.waitForMerge = waitForMerge;
+ return this;
+ }
+
+ /**
+ * Will optimize the index down to <= maxNumSegments. By default, will cause the optimize
+ * process to optimize down to half the configured number of segments.
+ */
+ public int maxNumSegments() {
+ return maxNumSegments;
+ }
+
+ /**
+ * Will optimize the index down to <= maxNumSegments. By default, will cause the optimize
+ * process to optimize down to half the configured number of segments.
+ */
+ public OptimizeRequest maxNumSegments(int maxNumSegments) {
+ this.maxNumSegments = maxNumSegments;
+ return this;
+ }
+
+ /**
+ * Should the optimization only expunge deletes from the index, without full optimization.
+ * Defaults to full optimization (<tt>false</tt>).
+ */
+ public boolean onlyExpungeDeletes() {
+ return onlyExpungeDeletes;
+ }
+
+ /**
+ * Should the optimization only expunge deletes from the index, without full optimization.
+ * Defaults to full optimization (<tt>false</tt>).
+ */
+ public OptimizeRequest onlyExpungeDeletes(boolean onlyExpungeDeletes) {
+ this.onlyExpungeDeletes = onlyExpungeDeletes;
+ return this;
+ }
+
+ /**
+ * Should flush be performed after the optimization. Defaults to <tt>true</tt>.
+ */
+ public boolean flush() {
+ return flush;
+ }
+
+ /**
+ * Should flush be performed after the optimization. Defaults to <tt>true</tt>.
+ */
+ public OptimizeRequest flush(boolean flush) {
+ this.flush = flush;
+ return this;
+ }
+
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ waitForMerge = in.readBoolean();
+ maxNumSegments = in.readInt();
+ onlyExpungeDeletes = in.readBoolean();
+ flush = in.readBoolean();
+ }
+
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(waitForMerge);
+ out.writeInt(maxNumSegments);
+ out.writeBoolean(onlyExpungeDeletes);
+ out.writeBoolean(flush);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequestBuilder.java
new file mode 100644
index 0000000..ac99dd4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequestBuilder.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.optimize;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ * A request to optimize one or more indices. In order to optimize on all the indices, pass an empty array or
+ * <tt>null</tt> for the indices.
+ * <p/>
+ * <p>{@link #setWaitForMerge(boolean)} allows to control if the call will block until the optimize completes and
+ * defaults to <tt>true</tt>.
+ * <p/>
+ * <p>{@link #setMaxNumSegments(int)} allows to control the number of segments to optimize down to. By default, will
+ * cause the optimize process to optimize down to half the configured number of segments.
+ */
+public class OptimizeRequestBuilder extends BroadcastOperationRequestBuilder<OptimizeRequest, OptimizeResponse, OptimizeRequestBuilder> {
+
+ public OptimizeRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new OptimizeRequest());
+ }
+
+ /**
+ * Should the call block until the optimize completes. Defaults to <tt>true</tt>.
+ */
+ public OptimizeRequestBuilder setWaitForMerge(boolean waitForMerge) {
+ request.waitForMerge(waitForMerge);
+ return this;
+ }
+
+ /**
+ * Will optimize the index down to <= maxNumSegments. By default, will cause the optimize
+ * process to optimize down to half the configured number of segments.
+ */
+ public OptimizeRequestBuilder setMaxNumSegments(int maxNumSegments) {
+ request.maxNumSegments(maxNumSegments);
+ return this;
+ }
+
+ /**
+ * Should the optimization only expunge deletes from the index, without full optimization.
+ * Defaults to full optimization (<tt>false</tt>).
+ */
+ public OptimizeRequestBuilder setOnlyExpungeDeletes(boolean onlyExpungeDeletes) {
+ request.onlyExpungeDeletes(onlyExpungeDeletes);
+ return this;
+ }
+
+ /**
+ * Should flush be performed after the optimization. Defaults to <tt>true</tt>.
+ */
+ public OptimizeRequestBuilder setFlush(boolean flush) {
+ request.flush(flush);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<OptimizeResponse> listener) {
+ ((IndicesAdminClient) client).optimize(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeResponse.java
new file mode 100644
index 0000000..d4a189e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.optimize;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * A response for optimize action.
+ *
+ *
+ */
+public class OptimizeResponse extends BroadcastOperationResponse {
+
+ OptimizeResponse() {
+
+ }
+
+ OptimizeResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/ShardOptimizeRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/ShardOptimizeRequest.java
new file mode 100644
index 0000000..4cd6630
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/ShardOptimizeRequest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.optimize;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardOptimizeRequest extends BroadcastShardOperationRequest {
+
+ private boolean waitForMerge = OptimizeRequest.Defaults.WAIT_FOR_MERGE;
+ private int maxNumSegments = OptimizeRequest.Defaults.MAX_NUM_SEGMENTS;
+ private boolean onlyExpungeDeletes = OptimizeRequest.Defaults.ONLY_EXPUNGE_DELETES;
+ private boolean flush = OptimizeRequest.Defaults.FLUSH;
+
+ ShardOptimizeRequest() {
+ }
+
+ public ShardOptimizeRequest(String index, int shardId, OptimizeRequest request) {
+ super(index, shardId, request);
+ waitForMerge = request.waitForMerge();
+ maxNumSegments = request.maxNumSegments();
+ onlyExpungeDeletes = request.onlyExpungeDeletes();
+ flush = request.flush();
+ }
+
+ boolean waitForMerge() {
+ return waitForMerge;
+ }
+
+ int maxNumSegments() {
+ return maxNumSegments;
+ }
+
+ public boolean onlyExpungeDeletes() {
+ return onlyExpungeDeletes;
+ }
+
+ public boolean flush() {
+ return flush;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ waitForMerge = in.readBoolean();
+ maxNumSegments = in.readInt();
+ onlyExpungeDeletes = in.readBoolean();
+ flush = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(waitForMerge);
+ out.writeInt(maxNumSegments);
+ out.writeBoolean(onlyExpungeDeletes);
+ out.writeBoolean(flush);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/ShardOptimizeResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/ShardOptimizeResponse.java
new file mode 100644
index 0000000..26055fd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/ShardOptimizeResponse.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.optimize;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardOptimizeResponse extends BroadcastShardOperationResponse {
+
+ ShardOptimizeResponse() {
+ }
+
+ public ShardOptimizeResponse(String index, int shardId) {
+ super(index, shardId);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java
new file mode 100644
index 0000000..ad87911
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.optimize;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * Optimize index/indices action.
+ */
+public class TransportOptimizeAction extends TransportBroadcastOperationAction<OptimizeRequest, OptimizeResponse, ShardOptimizeRequest, ShardOptimizeResponse> {
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportOptimizeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ TransportService transportService, IndicesService indicesService) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.OPTIMIZE;
+ }
+
+ @Override
+ protected String transportAction() {
+ return OptimizeAction.NAME;
+ }
+
+ @Override
+ protected OptimizeRequest newRequest() {
+ return new OptimizeRequest();
+ }
+
+ @Override
+ protected OptimizeResponse newResponse(OptimizeRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ List<ShardOperationFailedException> shardFailures = null;
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // a non active shard, ignore...
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ successfulShards++;
+ }
+ }
+ return new OptimizeResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected ShardOptimizeRequest newShardRequest() {
+ return new ShardOptimizeRequest();
+ }
+
+ @Override
+ protected ShardOptimizeRequest newShardRequest(ShardRouting shard, OptimizeRequest request) {
+ return new ShardOptimizeRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected ShardOptimizeResponse newShardResponse() {
+ return new ShardOptimizeResponse();
+ }
+
+ @Override
+ protected ShardOptimizeResponse shardOperation(ShardOptimizeRequest request) throws ElasticsearchException {
+ IndexShard indexShard = indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
+ indexShard.optimize(new Engine.Optimize()
+ .waitForMerge(request.waitForMerge())
+ .maxNumSegments(request.maxNumSegments())
+ .onlyExpungeDeletes(request.onlyExpungeDeletes())
+ .flush(request.flush())
+ );
+ return new ShardOptimizeResponse(request.index(), request.shardId());
+ }
+
+ /**
+ * The refresh request works against *all* shards.
+ */
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, OptimizeRequest request, String[] concreteIndices) {
+ return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, OptimizeRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, OptimizeRequest request, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/package-info.java
new file mode 100644
index 0000000..e54ff9e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Optimize index/indices action.
+ */
+package org.elasticsearch.action.admin.indices.optimize; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/package-info.java
new file mode 100644
index 0000000..86854ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Indices Administrative Actions.
+ */
+package org.elasticsearch.action.admin.indices; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java
new file mode 100644
index 0000000..e4a5524
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.refresh;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class RefreshAction extends IndicesAction<RefreshRequest, RefreshResponse, RefreshRequestBuilder> {
+
+ public static final RefreshAction INSTANCE = new RefreshAction();
+ public static final String NAME = "indices/refresh";
+
+ private RefreshAction() {
+ super(NAME);
+ }
+
+ @Override
+ public RefreshResponse newResponse() {
+ return new RefreshResponse();
+ }
+
+ @Override
+ public RefreshRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new RefreshRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java
new file mode 100644
index 0000000..58532e0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.refresh;
+
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A refresh request making all operations performed since the last refresh available for search. The (near) real-time
+ * capabilities depends on the index engine used. For example, the internal one requires refresh to be called, but by
+ * default a refresh is scheduled periodically.
+ *
+ * @see org.elasticsearch.client.Requests#refreshRequest(String...)
+ * @see org.elasticsearch.client.IndicesAdminClient#refresh(RefreshRequest)
+ * @see RefreshResponse
+ */
+public class RefreshRequest extends BroadcastOperationRequest<RefreshRequest> {
+
+ private boolean force = true;
+
+ RefreshRequest() {
+ }
+
+ public RefreshRequest(String... indices) {
+ super(indices);
+ }
+
+ public boolean force() {
+ return force;
+ }
+
+ /**
+ * Forces calling refresh, overriding the check that dirty operations even happened. Defaults
+ * to true (note, still lightweight if no refresh is needed).
+ */
+ public RefreshRequest force(boolean force) {
+ this.force = force;
+ return this;
+ }
+
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ force = in.readBoolean();
+ }
+
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(force);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java
new file mode 100644
index 0000000..7bf15c4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.refresh;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ * A refresh request making all operations performed since the last refresh available for search. The (near) real-time
+ * capabilities depends on the index engine used. For example, the internal one requires refresh to be called, but by
+ * default a refresh is scheduled periodically.
+ */
+public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder<RefreshRequest, RefreshResponse, RefreshRequestBuilder> {
+
+ public RefreshRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new RefreshRequest());
+ }
+
+ /**
+ * Forces calling refresh, overriding the check that dirty operations even happened. Defaults
+ * to true (note, still lightweight if no refresh is needed).
+ */
+ public RefreshRequestBuilder setForce(boolean force) {
+ request.force(force);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<RefreshResponse> listener) {
+ ((IndicesAdminClient) client).refresh(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java
new file mode 100644
index 0000000..3130b07
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.refresh;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * The response of a refresh action.
+ *
+ *
+ */
+public class RefreshResponse extends BroadcastOperationResponse {
+
+ RefreshResponse() {
+
+ }
+
+ RefreshResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java
new file mode 100644
index 0000000..7e39713
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.refresh;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardRefreshRequest extends BroadcastShardOperationRequest {
+
+ private boolean force = true;
+
+ ShardRefreshRequest() {
+ }
+
+ public ShardRefreshRequest(String index, int shardId, RefreshRequest request) {
+ super(index, shardId, request);
+ force = request.force();
+ }
+
+ public boolean force() {
+ return force;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ force = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(force);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java
new file mode 100644
index 0000000..83c1ec5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.refresh;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class ShardRefreshResponse extends BroadcastShardOperationResponse {
+
+ ShardRefreshResponse() {
+ }
+
+ public ShardRefreshResponse(String index, int shardId) {
+ super(index, shardId);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java
new file mode 100644
index 0000000..452b0aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.refresh;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * Refresh action.
+ */
+public class TransportRefreshAction extends TransportBroadcastOperationAction<RefreshRequest, RefreshResponse, ShardRefreshRequest, ShardRefreshResponse> {
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ TransportService transportService, IndicesService indicesService) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.REFRESH;
+ }
+
+ @Override
+ protected String transportAction() {
+ return RefreshAction.NAME;
+ }
+
+ @Override
+ protected RefreshRequest newRequest() {
+ return new RefreshRequest();
+ }
+
+ @Override
+ protected RefreshResponse newResponse(RefreshRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ List<ShardOperationFailedException> shardFailures = null;
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // non active shard, ignore
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ successfulShards++;
+ }
+ }
+ return new RefreshResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected ShardRefreshRequest newShardRequest() {
+ return new ShardRefreshRequest();
+ }
+
+ @Override
+ protected ShardRefreshRequest newShardRequest(ShardRouting shard, RefreshRequest request) {
+ return new ShardRefreshRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected ShardRefreshResponse newShardResponse() {
+ return new ShardRefreshResponse();
+ }
+
+ @Override
+ protected ShardRefreshResponse shardOperation(ShardRefreshRequest request) throws ElasticsearchException {
+ IndexShard indexShard = indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
+ indexShard.refresh(new Engine.Refresh("api").force(request.force()));
+ logger.trace("{} refresh request executed, force: [{}]", indexShard.shardId(), request.force());
+ return new ShardRefreshResponse(request.index(), request.shardId());
+ }
+
+ /**
+ * The refresh request works against *all* shards.
+ */
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, RefreshRequest request, String[] concreteIndices) {
+ return clusterState.routingTable().allAssignedShardsGrouped(concreteIndices, true);
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, RefreshRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, RefreshRequest countRequest, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/package-info.java
new file mode 100644
index 0000000..ff0706b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Refresh index/indices action.
+ */
+package org.elasticsearch.action.admin.indices.refresh; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java
new file mode 100644
index 0000000..453a6f1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+public class IndexSegments implements Iterable<IndexShardSegments> {
+
+ private final String index;
+
+ private final Map<Integer, IndexShardSegments> indexShards;
+
+ IndexSegments(String index, ShardSegments[] shards) {
+ this.index = index;
+
+ Map<Integer, List<ShardSegments>> tmpIndexShards = Maps.newHashMap();
+ for (ShardSegments shard : shards) {
+ List<ShardSegments> lst = tmpIndexShards.get(shard.getShardRouting().id());
+ if (lst == null) {
+ lst = Lists.newArrayList();
+ tmpIndexShards.put(shard.getShardRouting().id(), lst);
+ }
+ lst.add(shard);
+ }
+ indexShards = Maps.newHashMap();
+ for (Map.Entry<Integer, List<ShardSegments>> entry : tmpIndexShards.entrySet()) {
+ indexShards.put(entry.getKey(), new IndexShardSegments(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardSegments[entry.getValue().size()])));
+ }
+ }
+
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * A shard id to index shard segments map (note, index shard segments is the replication shard group that maps
+ * to the shard id).
+ */
+ public Map<Integer, IndexShardSegments> getShards() {
+ return this.indexShards;
+ }
+
+ @Override
+ public Iterator<IndexShardSegments> iterator() {
+ return indexShards.values().iterator();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java
new file mode 100644
index 0000000..4132c7b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.Iterator;
+
+public class IndexShardSegments implements Iterable<ShardSegments> {
+
+ private final ShardId shardId;
+
+ private final ShardSegments[] shards;
+
+ IndexShardSegments(ShardId shardId, ShardSegments[] shards) {
+ this.shardId = shardId;
+ this.shards = shards;
+ }
+
+ public ShardId getShardId() {
+ return this.shardId;
+ }
+
+ public ShardSegments getAt(int i) {
+ return shards[i];
+ }
+
+ public ShardSegments[] getShards() {
+ return this.shards;
+ }
+
+ @Override
+ public Iterator<ShardSegments> iterator() {
+ return Iterators.forArray(shards);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java
new file mode 100644
index 0000000..c4d0663
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.engine.Segment;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class IndicesSegmentResponse extends BroadcastOperationResponse implements ToXContent {
+
+ private ShardSegments[] shards;
+
+ private Map<String, IndexSegments> indicesSegments;
+
+ IndicesSegmentResponse() {
+
+ }
+
+ IndicesSegmentResponse(ShardSegments[] shards, ClusterState clusterState, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ this.shards = shards;
+ }
+
+ public Map<String, IndexSegments> getIndices() {
+ if (indicesSegments != null) {
+ return indicesSegments;
+ }
+ Map<String, IndexSegments> indicesSegments = Maps.newHashMap();
+
+ Set<String> indices = Sets.newHashSet();
+ for (ShardSegments shard : shards) {
+ indices.add(shard.getIndex());
+ }
+
+ for (String index : indices) {
+ List<ShardSegments> shards = Lists.newArrayList();
+ for (ShardSegments shard : this.shards) {
+ if (shard.getShardRouting().index().equals(index)) {
+ shards.add(shard);
+ }
+ }
+ indicesSegments.put(index, new IndexSegments(index, shards.toArray(new ShardSegments[shards.size()])));
+ }
+ this.indicesSegments = indicesSegments;
+ return indicesSegments;
+ }
+
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shards = new ShardSegments[in.readVInt()];
+ for (int i = 0; i < shards.length; i++) {
+ shards[i] = ShardSegments.readShardSegments(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(shards.length);
+ for (ShardSegments shard : shards) {
+ shard.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.INDICES);
+
+ for (IndexSegments indexSegments : getIndices().values()) {
+ builder.startObject(indexSegments.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.startObject(Fields.SHARDS);
+ for (IndexShardSegments indexSegment : indexSegments) {
+ builder.startArray(Integer.toString(indexSegment.getShardId().id()));
+ for (ShardSegments shardSegments : indexSegment) {
+ builder.startObject();
+
+ builder.startObject(Fields.ROUTING);
+ builder.field(Fields.STATE, shardSegments.getShardRouting().state());
+ builder.field(Fields.PRIMARY, shardSegments.getShardRouting().primary());
+ builder.field(Fields.NODE, shardSegments.getShardRouting().currentNodeId());
+ if (shardSegments.getShardRouting().relocatingNodeId() != null) {
+ builder.field(Fields.RELOCATING_NODE, shardSegments.getShardRouting().relocatingNodeId());
+ }
+ builder.endObject();
+
+ builder.field(Fields.NUM_COMMITTED_SEGMENTS, shardSegments.getNumberOfCommitted());
+ builder.field(Fields.NUM_SEARCH_SEGMENTS, shardSegments.getNumberOfSearch());
+
+ builder.startObject(Fields.SEGMENTS);
+ for (Segment segment : shardSegments) {
+ builder.startObject(segment.getName());
+ builder.field(Fields.GENERATION, segment.getGeneration());
+ builder.field(Fields.NUM_DOCS, segment.getNumDocs());
+ builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs());
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSizeInBytes());
+ builder.byteSizeField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, segment.getMemoryInBytes());
+ builder.field(Fields.COMMITTED, segment.isCommitted());
+ builder.field(Fields.SEARCH, segment.isSearch());
+ if (segment.getVersion() != null) {
+ builder.field(Fields.VERSION, segment.getVersion());
+ }
+ if (segment.isCompound() != null) {
+ builder.field(Fields.COMPOUND, segment.isCompound());
+ }
+ if (segment.getMergeId() != null) {
+ builder.field(Fields.MERGE_ID, segment.getMergeId());
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+
+ builder.endObject();
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString INDICES = new XContentBuilderString("indices");
+ static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
+ static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
+ static final XContentBuilderString STATE = new XContentBuilderString("state");
+ static final XContentBuilderString PRIMARY = new XContentBuilderString("primary");
+ static final XContentBuilderString NODE = new XContentBuilderString("node");
+ static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node");
+
+ static final XContentBuilderString SEGMENTS = new XContentBuilderString("segments");
+ static final XContentBuilderString GENERATION = new XContentBuilderString("generation");
+ static final XContentBuilderString NUM_COMMITTED_SEGMENTS = new XContentBuilderString("num_committed_segments");
+ static final XContentBuilderString NUM_SEARCH_SEGMENTS = new XContentBuilderString("num_search_segments");
+ static final XContentBuilderString NUM_DOCS = new XContentBuilderString("num_docs");
+ static final XContentBuilderString DELETED_DOCS = new XContentBuilderString("deleted_docs");
+ static final XContentBuilderString SIZE = new XContentBuilderString("size");
+ static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes");
+ static final XContentBuilderString COMMITTED = new XContentBuilderString("committed");
+ static final XContentBuilderString SEARCH = new XContentBuilderString("search");
+ static final XContentBuilderString VERSION = new XContentBuilderString("version");
+ static final XContentBuilderString COMPOUND = new XContentBuilderString("compound");
+ static final XContentBuilderString MERGE_ID = new XContentBuilderString("merge_id");
+ static final XContentBuilderString MEMORY = new XContentBuilderString("memory");
+ static final XContentBuilderString MEMORY_IN_BYTES = new XContentBuilderString("memory_in_bytes");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java
new file mode 100644
index 0000000..dd46a03
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class IndicesSegmentsAction extends IndicesAction<IndicesSegmentsRequest, IndicesSegmentResponse, IndicesSegmentsRequestBuilder> {
+
+ public static final IndicesSegmentsAction INSTANCE = new IndicesSegmentsAction();
+ public static final String NAME = "indices/segments";
+
+ private IndicesSegmentsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public IndicesSegmentResponse newResponse() {
+ return new IndicesSegmentResponse();
+ }
+
+ @Override
+ public IndicesSegmentsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new IndicesSegmentsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java
new file mode 100644
index 0000000..71ece1f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.common.Strings;
+
+public class IndicesSegmentsRequest extends BroadcastOperationRequest<IndicesSegmentsRequest> {
+
+ public IndicesSegmentsRequest() {
+ this(Strings.EMPTY_ARRAY);
+ }
+
+ public IndicesSegmentsRequest(String... indices) {
+ super(indices);
+ indicesOptions(IndicesOptions.fromOptions(false, false, true, false));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java
new file mode 100644
index 0000000..5bf8377
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder<IndicesSegmentsRequest, IndicesSegmentResponse, IndicesSegmentsRequestBuilder> {
+
+ public IndicesSegmentsRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new IndicesSegmentsRequest());
+ }
+
+ @Override
+ protected void doExecute(ActionListener<IndicesSegmentResponse> listener) {
+ ((IndicesAdminClient) client).segments(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java b/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java
new file mode 100644
index 0000000..e8eafaf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.engine.Segment;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry;
+
+public class ShardSegments extends BroadcastShardOperationResponse implements Iterable<Segment> {
+
+ private ShardRouting shardRouting;
+
+ private List<Segment> segments;
+
+ ShardSegments() {
+ }
+
+ public ShardSegments(ShardRouting shardRouting, List<Segment> segments) {
+ super(shardRouting.index(), shardRouting.id());
+ this.shardRouting = shardRouting;
+ this.segments = segments;
+ }
+
+ @Override
+ public Iterator<Segment> iterator() {
+ return segments.iterator();
+ }
+
+ public ShardRouting getShardRouting() {
+ return this.shardRouting;
+ }
+
+ public List<Segment> getSegments() {
+ return this.segments;
+ }
+
+ public int getNumberOfCommitted() {
+ int count = 0;
+ for (Segment segment : segments) {
+ if (segment.isCommitted()) {
+ count++;
+ }
+ }
+ return count;
+ }
+
+ public int getNumberOfSearch() {
+ int count = 0;
+ for (Segment segment : segments) {
+ if (segment.isSearch()) {
+ count++;
+ }
+ }
+ return count;
+ }
+
+ public static ShardSegments readShardSegments(StreamInput in) throws IOException {
+ ShardSegments shard = new ShardSegments();
+ shard.readFrom(in);
+ return shard;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardRouting = readShardRoutingEntry(in);
+ int size = in.readVInt();
+ if (size == 0) {
+ segments = ImmutableList.of();
+ } else {
+ segments = new ArrayList<Segment>(size);
+ for (int i = 0; i < size; i++) {
+ segments.add(Segment.readSegment(in));
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardRouting.writeTo(out);
+ out.writeVInt(segments.size());
+ for (Segment segment : segments) {
+ segment.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java
new file mode 100644
index 0000000..36e0a24
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.service.InternalIndexService;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class TransportIndicesSegmentsAction extends TransportBroadcastOperationAction<IndicesSegmentsRequest, IndicesSegmentResponse, TransportIndicesSegmentsAction.IndexShardSegmentRequest, ShardSegments> {
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportIndicesSegmentsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return IndicesSegmentsAction.NAME;
+ }
+
+ @Override
+ protected IndicesSegmentsRequest newRequest() {
+ return new IndicesSegmentsRequest();
+ }
+
+ /**
+ * Segments goes across *all* active shards.
+ */
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, IndicesSegmentsRequest request, String[] concreteIndices) {
+ return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, IndicesSegmentsRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, IndicesSegmentsRequest countRequest, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
+ }
+
+ @Override
+ protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ List<ShardOperationFailedException> shardFailures = null;
+ final List<ShardSegments> shards = newArrayList();
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // simply ignore non active shards
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ shards.add((ShardSegments) shardResponse);
+ successfulShards++;
+ }
+ }
+ return new IndicesSegmentResponse(shards.toArray(new ShardSegments[shards.size()]), clusterState, shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected IndexShardSegmentRequest newShardRequest() {
+ return new IndexShardSegmentRequest();
+ }
+
+ @Override
+ protected IndexShardSegmentRequest newShardRequest(ShardRouting shard, IndicesSegmentsRequest request) {
+ return new IndexShardSegmentRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected ShardSegments newShardResponse() {
+ return new ShardSegments();
+ }
+
+ @Override
+ protected ShardSegments shardOperation(IndexShardSegmentRequest request) throws ElasticsearchException {
+ InternalIndexService indexService = (InternalIndexService) indicesService.indexServiceSafe(request.index());
+ InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId());
+ return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments());
+ }
+
+ public static class IndexShardSegmentRequest extends BroadcastShardOperationRequest {
+
+ IndexShardSegmentRequest() {
+ }
+
+ IndexShardSegmentRequest(String index, int shardId, IndicesSegmentsRequest request) {
+ super(index, shardId, request);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java
new file mode 100644
index 0000000..4e3d5f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.get;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public class GetSettingsAction extends IndicesAction<GetSettingsRequest, GetSettingsResponse, GetSettingsRequestBuilder> {
+
+ public static final GetSettingsAction INSTANCE = new GetSettingsAction();
+ public static final String NAME = "indices/settings/get";
+
+ public GetSettingsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GetSettingsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new GetSettingsRequestBuilder((InternalGenericClient) client);
+ }
+
+ @Override
+ public GetSettingsResponse newResponse() {
+ return new GetSettingsResponse();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java
new file mode 100644
index 0000000..dc1f6a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.get;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ValidateActions;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class GetSettingsRequest extends MasterNodeReadOperationRequest<GetSettingsRequest> {
+
+ private String[] indices = Strings.EMPTY_ARRAY;
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true);
+ private String[] names = Strings.EMPTY_ARRAY;
+
+ public GetSettingsRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ public GetSettingsRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public String[] names() {
+ return names;
+ }
+
+ public GetSettingsRequest names(String... names) {
+ this.names = names;
+ return this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (names == null) {
+ validationException = ValidateActions.addValidationError("names may not be null", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ names = in.readStringArray();
+ readLocal(in, Version.V_1_0_0_RC2);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(indices);
+ indicesOptions.writeIndicesOptions(out);
+ out.writeStringArray(names);
+ writeLocal(out, Version.V_1_0_0_RC2);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java
new file mode 100644
index 0000000..e5445f8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.get;
+
+import com.google.common.collect.ObjectArrays;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetSettingsRequest, GetSettingsResponse, GetSettingsRequestBuilder> {
+
+ public GetSettingsRequestBuilder(InternalGenericClient client, String... indices) {
+ super(client, new GetSettingsRequest().indices(indices));
+ }
+
+ public GetSettingsRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ public GetSettingsRequestBuilder addIndices(String... indices) {
+ request.indices(ObjectArrays.concat(request.indices(), indices, String.class));
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ *
+ * For example indices that don't exist.
+ */
+ public GetSettingsRequestBuilder setIndicesOptions(IndicesOptions options) {
+ request.indicesOptions(options);
+ return this;
+ }
+
+ public GetSettingsRequestBuilder setNames(String... names) {
+ request.names(names);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<GetSettingsResponse> listener) {
+ ((IndicesAdminClient) client).getSettings(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java
new file mode 100644
index 0000000..6043e7c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.get;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+import java.io.IOException;
+
+/**
+ */
+public class GetSettingsResponse extends ActionResponse {
+
+ private ImmutableOpenMap<String, Settings> indexToSettings = ImmutableOpenMap.of();
+
+ public GetSettingsResponse(ImmutableOpenMap<String, Settings> indexToSettings) {
+ this.indexToSettings = indexToSettings;
+ }
+
+ GetSettingsResponse() {
+ }
+
+ public ImmutableOpenMap<String, Settings> getIndexToSettings() {
+ return indexToSettings;
+ }
+
+ public String getSetting(String index, String setting) {
+ Settings settings = indexToSettings.get(index);
+ if (setting != null) {
+ return settings.get(setting);
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ ImmutableOpenMap.Builder<String, Settings> builder = ImmutableOpenMap.builder();
+ for (int i = 0; i < size; i++) {
+ builder.put(in.readString(), ImmutableSettings.readSettingsFromStream(in));
+ }
+ indexToSettings = builder.build();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(indexToSettings.size());
+ for (ObjectObjectCursor<String, Settings> cursor : indexToSettings) {
+ out.writeString(cursor.key);
+ ImmutableSettings.writeSettingsToStream(cursor.value, out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java
new file mode 100644
index 0000000..872ffbd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.get;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+
+/**
+ */
+public class TransportGetSettingsAction extends TransportMasterNodeReadOperationAction<GetSettingsRequest, GetSettingsResponse> {
+
+ private final SettingsFilter settingsFilter;
+
+ @Inject
+ public TransportGetSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, SettingsFilter settingsFilter) {
+ super(settings, transportService, clusterService, threadPool);
+ this.settingsFilter = settingsFilter;
+ }
+
+ @Override
+ protected String transportAction() {
+ return GetSettingsAction.NAME;
+ }
+
+ @Override
+ protected String executor() {
+ // Very lightweight operation
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected GetSettingsRequest newRequest() {
+ return new GetSettingsRequest();
+ }
+
+ @Override
+ protected GetSettingsResponse newResponse() {
+ return new GetSettingsResponse();
+ }
+
+ @Override
+ protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener<GetSettingsResponse> listener) throws ElasticsearchException {
+ request.indices(state.metaData().concreteIndices(request.indices(), request.indicesOptions()));
+ ImmutableOpenMap.Builder<String, Settings> indexToSettingsBuilder = ImmutableOpenMap.builder();
+ for (String concreteIndex : request.indices()) {
+ IndexMetaData indexMetaData = state.getMetaData().index(concreteIndex);
+ if (indexMetaData == null) {
+ continue;
+ }
+
+ Settings settings = settingsFilter.filterSettings(indexMetaData.settings());
+ if (!CollectionUtils.isEmpty(request.names())) {
+ ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder();
+ for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
+ if (Regex.simpleMatch(request.names(), entry.getKey())) {
+ settingsBuilder.put(entry.getKey(), entry.getValue());
+ }
+ }
+ settings = settingsBuilder.build();
+ }
+ indexToSettingsBuilder.put(concreteIndex, settings);
+ }
+ listener.onResponse(new GetSettingsResponse(indexToSettingsBuilder.build()));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java
new file mode 100644
index 0000000..16f5986
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.put;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ *
+ */
+public class TransportUpdateSettingsAction extends TransportMasterNodeOperationAction<UpdateSettingsRequest, UpdateSettingsResponse> {
+
+ private final MetaDataUpdateSettingsService updateSettingsService;
+
+ @Inject
+ public TransportUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ MetaDataUpdateSettingsService updateSettingsService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.updateSettingsService = updateSettingsService;
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away....
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return UpdateSettingsAction.NAME;
+ }
+
+ @Override
+ protected UpdateSettingsRequest newRequest() {
+ return new UpdateSettingsRequest();
+ }
+
+ @Override
+ protected UpdateSettingsResponse newResponse() {
+ return new UpdateSettingsResponse();
+ }
+
+ @Override
+ protected void doExecute(UpdateSettingsRequest request, ActionListener<UpdateSettingsResponse> listener) {
+ request.indices(clusterService.state().metaData().concreteIndices(request.indices(), request.indicesOptions()));
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) throws ElasticsearchException {
+ UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
+ .indices(request.indices())
+ .settings(request.settings())
+ .ackTimeout(request.timeout())
+ .masterNodeTimeout(request.masterNodeTimeout());
+
+ updateSettingsService.updateSettings(clusterStateUpdateRequest, new ClusterStateUpdateListener() {
+ @Override
+ public void onResponse(ClusterStateUpdateResponse response) {
+ listener.onResponse(new UpdateSettingsResponse(response.isAcknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.debug("failed to update settings on indices [{}]", t, request.indices());
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java
new file mode 100644
index 0000000..98a45b2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.put;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class UpdateSettingsAction extends IndicesAction<UpdateSettingsRequest, UpdateSettingsResponse, UpdateSettingsRequestBuilder> {
+
+ public static final UpdateSettingsAction INSTANCE = new UpdateSettingsAction();
+ public static final String NAME = "indices/settings/update";
+
+ private UpdateSettingsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public UpdateSettingsResponse newResponse() {
+ return new UpdateSettingsResponse();
+ }
+
+ @Override
+ public UpdateSettingsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new UpdateSettingsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java
new file mode 100644
index 0000000..ac6f6f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.put;
+
+import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * Cluster state update request that allows to update settings for some indices
+ */
+public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<UpdateSettingsClusterStateUpdateRequest> {
+
+ private Settings settings;
+
+ public UpdateSettingsClusterStateUpdateRequest() {
+
+ }
+
+ /**
+ * Returns the {@link Settings} to update
+ */
+ public Settings settings() {
+ return settings;
+ }
+
+ /**
+ * Sets the {@link Settings} to update
+ */
+ public UpdateSettingsClusterStateUpdateRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java
new file mode 100644
index 0000000..9be19a9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.put;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
+import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
+
+/**
+ * Request for an update index settings action
+ */
+public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsRequest> {
+
+ private String[] indices;
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
+ private Settings settings = EMPTY_SETTINGS;
+
+ UpdateSettingsRequest() {
+ }
+
+ /**
+ * Constructs a new request to update settings for one or more indices
+ */
+ public UpdateSettingsRequest(String... indices) {
+ this.indices = indices;
+ }
+
+ /**
+ * Constructs a new request to update settings for one or more indices
+ */
+ public UpdateSettingsRequest(Settings settings, String... indices) {
+ this.indices = indices;
+ this.settings = settings;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (settings.getAsMap().isEmpty()) {
+ validationException = addValidationError("no settings to update", validationException);
+ }
+ return validationException;
+ }
+
+ String[] indices() {
+ return indices;
+ }
+
+ Settings settings() {
+ return settings;
+ }
+
+ /**
+ * Sets the indices to apply to settings update to
+ */
+ public UpdateSettingsRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public UpdateSettingsRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ /**
+ * Sets the settings to be updated
+ */
+ public UpdateSettingsRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ /**
+ * Sets the settings to be updated
+ */
+ public UpdateSettingsRequest settings(Settings.Builder settings) {
+ this.settings = settings.build();
+ return this;
+ }
+
+ /**
+ * Sets the settings to be updated (either json/yaml/properties format)
+ */
+ public UpdateSettingsRequest settings(String source) {
+ this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
+ return this;
+ }
+
+ /**
+ * Sets the settings to be updated (either json/yaml/properties format)
+ */
+ @SuppressWarnings("unchecked")
+ public UpdateSettingsRequest settings(Map source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ settings(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ settings = readSettingsFromStream(in);
+ readTimeout(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArrayNullable(indices);
+ indicesOptions.writeIndicesOptions(out);
+ writeSettingsToStream(settings, out);
+ writeTimeout(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java
new file mode 100644
index 0000000..fb73373
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.put;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+
+/**
+ * Builder for an update index settings request
+ */
+public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<UpdateSettingsRequest, UpdateSettingsResponse, UpdateSettingsRequestBuilder> {
+
+ public UpdateSettingsRequestBuilder(IndicesAdminClient indicesClient, String... indices) {
+ super((InternalIndicesAdminClient) indicesClient, new UpdateSettingsRequest(indices));
+ }
+
+ /**
+ * Sets the indices the update settings will execute on
+ */
+ public UpdateSettingsRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ *
+ * For example indices that don't exist.
+ */
+ public UpdateSettingsRequestBuilder setIndicesOptions(IndicesOptions options) {
+ request.indicesOptions(options);
+ return this;
+ }
+
+ /**
+ * Sets the settings to be updated
+ */
+ public UpdateSettingsRequestBuilder setSettings(Settings settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the settings to be updated
+ */
+ public UpdateSettingsRequestBuilder setSettings(Settings.Builder settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * Sets the settings to be updated (either json/yaml/properties format)
+ */
+ public UpdateSettingsRequestBuilder setSettings(String source) {
+ request.settings(source);
+ return this;
+ }
+
+ /**
+ * Sets the settings to be updated (either json/yaml/properties format)
+ */
+ public UpdateSettingsRequestBuilder setSettings(Map<String, Object> source) {
+ request.settings(source);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<UpdateSettingsResponse> listener) {
+ ((IndicesAdminClient) client).updateSettings(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java
new file mode 100644
index 0000000..b147584
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.settings.put;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response for an update index settings action
+ */
+public class UpdateSettingsResponse extends AcknowledgedResponse {
+
+ UpdateSettingsResponse() {
+ }
+
+ UpdateSettingsResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java
new file mode 100644
index 0000000..7bb490b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java
@@ -0,0 +1,638 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.cache.filter.FilterCacheStats;
+import org.elasticsearch.index.cache.id.IdCacheStats;
+import org.elasticsearch.index.engine.SegmentsStats;
+import org.elasticsearch.index.fielddata.FieldDataStats;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.get.GetStats;
+import org.elasticsearch.index.indexing.IndexingStats;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.percolator.stats.PercolateStats;
+import org.elasticsearch.index.refresh.RefreshStats;
+import org.elasticsearch.index.search.stats.SearchStats;
+import org.elasticsearch.index.shard.DocsStats;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.store.StoreStats;
+import org.elasticsearch.index.translog.TranslogStats;
+import org.elasticsearch.index.warmer.WarmerStats;
+import org.elasticsearch.search.suggest.completion.CompletionStats;
+
+import java.io.IOException;
+
+/**
+ */
+public class CommonStats implements Streamable, ToXContent {
+
+ public CommonStats() {
+ this(CommonStatsFlags.NONE);
+ }
+
+ public CommonStats(CommonStatsFlags flags) {
+ CommonStatsFlags.Flag[] setFlags = flags.getFlags();
+
+ for (CommonStatsFlags.Flag flag : setFlags) {
+ switch (flag) {
+ case Docs:
+ docs = new DocsStats();
+ break;
+ case Store:
+ store = new StoreStats();
+ break;
+ case Indexing:
+ indexing = new IndexingStats();
+ break;
+ case Get:
+ get = new GetStats();
+ break;
+ case Search:
+ search = new SearchStats();
+ break;
+ case Merge:
+ merge = new MergeStats();
+ break;
+ case Refresh:
+ refresh = new RefreshStats();
+ break;
+ case Flush:
+ flush = new FlushStats();
+ break;
+ case Warmer:
+ warmer = new WarmerStats();
+ break;
+ case FilterCache:
+ filterCache = new FilterCacheStats();
+ break;
+ case IdCache:
+ idCache = new IdCacheStats();
+ break;
+ case FieldData:
+ fieldData = new FieldDataStats();
+ break;
+ case Completion:
+ completion = new CompletionStats();
+ break;
+ case Segments:
+ segments = new SegmentsStats();
+ break;
+ case Percolate:
+ percolate = new PercolateStats();
+ break;
+ case Translog:
+ translog = new TranslogStats();
+ break;
+ default:
+ throw new IllegalStateException("Unknown Flag: " + flag);
+ }
+ }
+ }
+
+
+ public CommonStats(IndexShard indexShard, CommonStatsFlags flags) {
+ CommonStatsFlags.Flag[] setFlags = flags.getFlags();
+
+ for (CommonStatsFlags.Flag flag : setFlags) {
+ switch (flag) {
+ case Docs:
+ docs = indexShard.docStats();
+ break;
+ case Store:
+ store = indexShard.storeStats();
+ break;
+ case Indexing:
+ indexing = indexShard.indexingStats(flags.types());
+ break;
+ case Get:
+ get = indexShard.getStats();
+ break;
+ case Search:
+ search = indexShard.searchStats(flags.groups());
+ break;
+ case Merge:
+ merge = indexShard.mergeStats();
+ break;
+ case Refresh:
+ refresh = indexShard.refreshStats();
+ break;
+ case Flush:
+ flush = indexShard.flushStats();
+ break;
+ case Warmer:
+ warmer = indexShard.warmerStats();
+ break;
+ case FilterCache:
+ filterCache = indexShard.filterCacheStats();
+ break;
+ case IdCache:
+ idCache = indexShard.idCacheStats();
+ break;
+ case FieldData:
+ fieldData = indexShard.fieldDataStats(flags.fieldDataFields());
+ break;
+ case Completion:
+ completion = indexShard.completionStats(flags.completionDataFields());
+ break;
+ case Segments:
+ segments = indexShard.segmentStats();
+ break;
+ case Percolate:
+ percolate = indexShard.shardPercolateService().stats();
+ break;
+ case Translog:
+ translog = indexShard.translogStats();
+ break;
+ default:
+ throw new IllegalStateException("Unknown Flag: " + flag);
+ }
+ }
+ }
+
+ @Nullable
+ public DocsStats docs;
+
+ @Nullable
+ public StoreStats store;
+
+ @Nullable
+ public IndexingStats indexing;
+
+ @Nullable
+ public GetStats get;
+
+ @Nullable
+ public SearchStats search;
+
+ @Nullable
+ public MergeStats merge;
+
+ @Nullable
+ public RefreshStats refresh;
+
+ @Nullable
+ public FlushStats flush;
+
+ @Nullable
+ public WarmerStats warmer;
+
+ @Nullable
+ public FilterCacheStats filterCache;
+
+ @Nullable
+ public IdCacheStats idCache;
+
+ @Nullable
+ public FieldDataStats fieldData;
+
+ @Nullable
+ public PercolateStats percolate;
+
+ @Nullable
+ public CompletionStats completion;
+
+ @Nullable
+ public SegmentsStats segments;
+
+ @Nullable
+ public TranslogStats translog;
+
+ public void add(CommonStats stats) {
+ if (docs == null) {
+ if (stats.getDocs() != null) {
+ docs = new DocsStats();
+ docs.add(stats.getDocs());
+ }
+ } else {
+ docs.add(stats.getDocs());
+ }
+ if (store == null) {
+ if (stats.getStore() != null) {
+ store = new StoreStats();
+ store.add(stats.getStore());
+ }
+ } else {
+ store.add(stats.getStore());
+ }
+ if (indexing == null) {
+ if (stats.getIndexing() != null) {
+ indexing = new IndexingStats();
+ indexing.add(stats.getIndexing());
+ }
+ } else {
+ indexing.add(stats.getIndexing());
+ }
+ if (get == null) {
+ if (stats.getGet() != null) {
+ get = new GetStats();
+ get.add(stats.getGet());
+ }
+ } else {
+ get.add(stats.getGet());
+ }
+ if (search == null) {
+ if (stats.getSearch() != null) {
+ search = new SearchStats();
+ search.add(stats.getSearch());
+ }
+ } else {
+ search.add(stats.getSearch());
+ }
+ if (merge == null) {
+ if (stats.getMerge() != null) {
+ merge = new MergeStats();
+ merge.add(stats.getMerge());
+ }
+ } else {
+ merge.add(stats.getMerge());
+ }
+ if (refresh == null) {
+ if (stats.getRefresh() != null) {
+ refresh = new RefreshStats();
+ refresh.add(stats.getRefresh());
+ }
+ } else {
+ refresh.add(stats.getRefresh());
+ }
+ if (flush == null) {
+ if (stats.getFlush() != null) {
+ flush = new FlushStats();
+ flush.add(stats.getFlush());
+ }
+ } else {
+ flush.add(stats.getFlush());
+ }
+ if (warmer == null) {
+ if (stats.getWarmer() != null) {
+ warmer = new WarmerStats();
+ warmer.add(stats.getWarmer());
+ }
+ } else {
+ warmer.add(stats.getWarmer());
+ }
+ if (filterCache == null) {
+ if (stats.getFilterCache() != null) {
+ filterCache = new FilterCacheStats();
+ filterCache.add(stats.getFilterCache());
+ }
+ } else {
+ filterCache.add(stats.getFilterCache());
+ }
+
+ if (idCache == null) {
+ if (stats.getIdCache() != null) {
+ idCache = new IdCacheStats();
+ idCache.add(stats.getIdCache());
+ }
+ } else {
+ idCache.add(stats.getIdCache());
+ }
+
+ if (fieldData == null) {
+ if (stats.getFieldData() != null) {
+ fieldData = new FieldDataStats();
+ fieldData.add(stats.getFieldData());
+ }
+ } else {
+ fieldData.add(stats.getFieldData());
+ }
+ if (percolate == null) {
+ if (stats.getPercolate() != null) {
+ percolate = new PercolateStats();
+ percolate.add(stats.getPercolate());
+ }
+ } else {
+ percolate.add(stats.getPercolate());
+ }
+ if (completion == null) {
+ if (stats.getCompletion() != null) {
+ completion = new CompletionStats();
+ completion.add(stats.getCompletion());
+ }
+ } else {
+ completion.add(stats.getCompletion());
+ }
+ if (segments == null) {
+ if (stats.getSegments() != null) {
+ segments = new SegmentsStats();
+ segments.add(stats.getSegments());
+ }
+ } else {
+ segments.add(stats.getSegments());
+ }
+ if (translog == null) {
+ if (stats.getTranslog() != null) {
+ translog = new TranslogStats();
+ translog.add(stats.getTranslog());
+ }
+ } else {
+ translog.add(stats.getTranslog());
+ }
+ }
+
+ @Nullable
+ public DocsStats getDocs() {
+ return this.docs;
+ }
+
+ @Nullable
+ public StoreStats getStore() {
+ return store;
+ }
+
+ @Nullable
+ public IndexingStats getIndexing() {
+ return indexing;
+ }
+
+ @Nullable
+ public GetStats getGet() {
+ return get;
+ }
+
+ @Nullable
+ public SearchStats getSearch() {
+ return search;
+ }
+
+ @Nullable
+ public MergeStats getMerge() {
+ return merge;
+ }
+
+ @Nullable
+ public RefreshStats getRefresh() {
+ return refresh;
+ }
+
+ @Nullable
+ public FlushStats getFlush() {
+ return flush;
+ }
+
+ @Nullable
+ public WarmerStats getWarmer() {
+ return this.warmer;
+ }
+
+ @Nullable
+ public FilterCacheStats getFilterCache() {
+ return this.filterCache;
+ }
+
+ @Nullable
+ public IdCacheStats getIdCache() {
+ return this.idCache;
+ }
+
+ @Nullable
+ public FieldDataStats getFieldData() {
+ return this.fieldData;
+ }
+
+ @Nullable
+ public PercolateStats getPercolate() {
+ return percolate;
+ }
+
+ @Nullable
+ public CompletionStats getCompletion() {
+ return completion;
+ }
+
+ @Nullable
+ public SegmentsStats getSegments() {
+ return segments;
+ }
+
+ @Nullable
+ public TranslogStats getTranslog() { return translog; }
+
+ public static CommonStats readCommonStats(StreamInput in) throws IOException {
+ CommonStats stats = new CommonStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ docs = DocsStats.readDocStats(in);
+ }
+ if (in.readBoolean()) {
+ store = StoreStats.readStoreStats(in);
+ }
+ if (in.readBoolean()) {
+ indexing = IndexingStats.readIndexingStats(in);
+ }
+ if (in.readBoolean()) {
+ get = GetStats.readGetStats(in);
+ }
+ if (in.readBoolean()) {
+ search = SearchStats.readSearchStats(in);
+ }
+ if (in.readBoolean()) {
+ merge = MergeStats.readMergeStats(in);
+ }
+ if (in.readBoolean()) {
+ refresh = RefreshStats.readRefreshStats(in);
+ }
+ if (in.readBoolean()) {
+ flush = FlushStats.readFlushStats(in);
+ }
+ if (in.readBoolean()) {
+ warmer = WarmerStats.readWarmerStats(in);
+ }
+ if (in.readBoolean()) {
+ filterCache = FilterCacheStats.readFilterCacheStats(in);
+ }
+ if (in.readBoolean()) {
+ idCache = IdCacheStats.readIdCacheStats(in);
+ }
+ if (in.readBoolean()) {
+ fieldData = FieldDataStats.readFieldDataStats(in);
+ }
+ if (in.readBoolean()) {
+ percolate = PercolateStats.readPercolateStats(in);
+ }
+ if (in.readBoolean()) {
+ completion = CompletionStats.readCompletionStats(in);
+ }
+ if (in.readBoolean()) {
+ segments = SegmentsStats.readSegmentsStats(in);
+ }
+ translog = in.readOptionalStreamable(new TranslogStats());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (docs == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ docs.writeTo(out);
+ }
+ if (store == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ store.writeTo(out);
+ }
+ if (indexing == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ indexing.writeTo(out);
+ }
+ if (get == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ get.writeTo(out);
+ }
+ if (search == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ search.writeTo(out);
+ }
+ if (merge == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ merge.writeTo(out);
+ }
+ if (refresh == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ refresh.writeTo(out);
+ }
+ if (flush == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ flush.writeTo(out);
+ }
+ if (warmer == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ warmer.writeTo(out);
+ }
+ if (filterCache == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ filterCache.writeTo(out);
+ }
+ if (idCache == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ idCache.writeTo(out);
+ }
+ if (fieldData == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ fieldData.writeTo(out);
+ }
+ if (percolate == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ percolate.writeTo(out);
+ }
+ if (completion == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ completion.writeTo(out);
+ }
+ if (segments == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ segments.writeTo(out);
+ }
+ out.writeOptionalStreamable(translog);
+ }
+
+ // note, requires a wrapping object
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (docs != null) {
+ docs.toXContent(builder, params);
+ }
+ if (store != null) {
+ store.toXContent(builder, params);
+ }
+ if (indexing != null) {
+ indexing.toXContent(builder, params);
+ }
+ if (get != null) {
+ get.toXContent(builder, params);
+ }
+ if (search != null) {
+ search.toXContent(builder, params);
+ }
+ if (merge != null) {
+ merge.toXContent(builder, params);
+ }
+ if (refresh != null) {
+ refresh.toXContent(builder, params);
+ }
+ if (flush != null) {
+ flush.toXContent(builder, params);
+ }
+ if (warmer != null) {
+ warmer.toXContent(builder, params);
+ }
+ if (filterCache != null) {
+ filterCache.toXContent(builder, params);
+ }
+ if (idCache != null) {
+ idCache.toXContent(builder, params);
+ }
+ if (fieldData != null) {
+ fieldData.toXContent(builder, params);
+ }
+ if (percolate != null) {
+ percolate.toXContent(builder, params);
+ }
+ if (completion != null) {
+ completion.toXContent(builder, params);
+ }
+ if (segments != null) {
+ segments.toXContent(builder, params);
+ }
+ if (translog != null) {
+ translog.toXContent(builder, params);
+ }
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
new file mode 100644
index 0000000..52cb8b3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+/**
+ */
+public class CommonStatsFlags implements Streamable, Cloneable {
+
+ public final static CommonStatsFlags ALL = new CommonStatsFlags().all();
+ public final static CommonStatsFlags NONE = new CommonStatsFlags().clear();
+
+ private EnumSet<Flag> flags = EnumSet.allOf(Flag.class);
+ private String[] types = null;
+ private String[] groups = null;
+ private String[] fieldDataFields = null;
+ private String[] completionDataFields = null;
+
+
+ /**
+ * @param flags flags to set. If no flags are supplied, default flags will be set.
+ */
+ public CommonStatsFlags(Flag... flags) {
+ if (flags.length > 0) {
+ clear();
+ for (Flag f : flags) {
+ this.flags.add(f);
+ }
+ }
+ }
+
+
+ /**
+ * Sets all flags to return all stats.
+ */
+ public CommonStatsFlags all() {
+ flags = EnumSet.allOf(Flag.class);
+ types = null;
+ groups = null;
+ fieldDataFields = null;
+ completionDataFields = null;
+ return this;
+ }
+
+ /**
+ * Clears all stats.
+ */
+ public CommonStatsFlags clear() {
+ flags = EnumSet.noneOf(Flag.class);
+ types = null;
+ groups = null;
+ fieldDataFields = null;
+ completionDataFields = null;
+ return this;
+ }
+
+ public boolean anySet() {
+ return !flags.isEmpty();
+ }
+
+ public Flag[] getFlags() {
+ return flags.toArray(new Flag[flags.size()]);
+ }
+
+ /**
+ * Document types to return stats for. Mainly affects {@link Flag#Indexing} when
+ * enabled, returning specific indexing stats for those types.
+ */
+ public CommonStatsFlags types(String... types) {
+ this.types = types;
+ return this;
+ }
+
+ /**
+ * Document types to return stats for. Mainly affects {@link Flag#Indexing} when
+ * enabled, returning specific indexing stats for those types.
+ */
+ public String[] types() {
+ return this.types;
+ }
+
+ /**
+ * Sets specific search group stats to retrieve the stats for. Mainly affects search
+ * when enabled.
+ */
+ public CommonStatsFlags groups(String... groups) {
+ this.groups = groups;
+ return this;
+ }
+
+ public String[] groups() {
+ return this.groups;
+ }
+
+ /**
+ * Sets specific search group stats to retrieve the stats for. Mainly affects search
+ * when enabled.
+ */
+ public CommonStatsFlags fieldDataFields(String... fieldDataFields) {
+ this.fieldDataFields = fieldDataFields;
+ return this;
+ }
+
+ public String[] fieldDataFields() {
+ return this.fieldDataFields;
+ }
+
+ public CommonStatsFlags completionDataFields(String... completionDataFields) {
+ this.completionDataFields = completionDataFields;
+ return this;
+ }
+
+ public String[] completionDataFields() {
+ return this.completionDataFields;
+ }
+
+ public boolean isSet(Flag flag) {
+ return flags.contains(flag);
+ }
+
+ boolean unSet(Flag flag) {
+ return flags.remove(flag);
+ }
+
+ void set(Flag flag) {
+ flags.add(flag);
+ }
+
+
+ public CommonStatsFlags set(Flag flag, boolean add) {
+ if (add) {
+ set(flag);
+ } else {
+ unSet(flag);
+ }
+ return this;
+ }
+
+ public static CommonStatsFlags readCommonStatsFlags(StreamInput in) throws IOException {
+ CommonStatsFlags flags = new CommonStatsFlags();
+ flags.readFrom(in);
+ return flags;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ long longFlags = 0;
+ for (Flag flag : flags) {
+ longFlags |= (1 << flag.ordinal());
+ }
+ out.writeLong(longFlags);
+
+ out.writeStringArrayNullable(types);
+ out.writeStringArrayNullable(groups);
+ out.writeStringArrayNullable(fieldDataFields);
+ out.writeStringArrayNullable(completionDataFields);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ final long longFlags = in.readLong();
+ flags.clear();
+ for (Flag flag : Flag.values()) {
+ if ((longFlags & (1 << flag.ordinal())) != 0) {
+ flags.add(flag);
+ }
+ }
+ types = in.readStringArray();
+ groups = in.readStringArray();
+ fieldDataFields = in.readStringArray();
+ completionDataFields = in.readStringArray();
+ }
+
+ @Override
+ public CommonStatsFlags clone() {
+ try {
+ CommonStatsFlags cloned = (CommonStatsFlags) super.clone();
+ cloned.flags = flags.clone();
+ return cloned;
+ } catch (CloneNotSupportedException e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ public static enum Flag {
+ // Do not change the order of these flags we use
+ // the ordinal for encoding! Only append to the end!
+ Store("store"),
+ Indexing("indexing"),
+ Get("get"),
+ Search("search"),
+ Merge("merge"),
+ Flush("flush"),
+ Refresh("refresh"),
+ FilterCache("filter_cache"),
+ IdCache("id_cache"),
+ FieldData("fielddata"),
+ Docs("docs"),
+ Warmer("warmer"),
+ Percolate("percolate"),
+ Completion("completion"),
+ Segments("segments"),
+ Translog("translog");
+
+ private final String restName;
+
+ Flag(String restName) {
+ this.restName = restName;
+ }
+
+ public String getRestName() {
+ return restName;
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java
new file mode 100644
index 0000000..e599468
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ */
+public class IndexShardStats implements Iterable<ShardStats>, Streamable {
+
+ private ShardId shardId;
+
+ private ShardStats[] shards;
+
+ private IndexShardStats() {}
+
+ public IndexShardStats(ShardId shardId, ShardStats[] shards) {
+ this.shardId = shardId;
+ this.shards = shards;
+ }
+
+ public ShardId getShardId() {
+ return this.shardId;
+ }
+
+ public ShardStats[] getShards() {
+ return shards;
+ }
+
+ public ShardStats getAt(int position) {
+ return shards[position];
+ }
+
+ @Override
+ public Iterator<ShardStats> iterator() {
+ return Iterators.forArray(shards);
+ }
+
+ private CommonStats total = null;
+
+ public CommonStats getTotal() {
+ if (total != null) {
+ return total;
+ }
+ CommonStats stats = new CommonStats();
+ for (ShardStats shard : shards) {
+ stats.add(shard.getStats());
+ }
+ total = stats;
+ return stats;
+ }
+
+ private CommonStats primary = null;
+
+ public CommonStats getPrimary() {
+ if (primary != null) {
+ return primary;
+ }
+ CommonStats stats = new CommonStats();
+ for (ShardStats shard : shards) {
+ if (shard.getShardRouting().primary()) {
+ stats.add(shard.getStats());
+ }
+ }
+ primary = stats;
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ shardId = ShardId.readShardId(in);
+ int shardSize = in.readVInt();
+ shards = new ShardStats[shardSize];
+ for (int i = 0; i < shardSize; i++) {
+ shards[i] = ShardStats.readShardStats(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ shardId.writeTo(out);
+ out.writeVInt(shards.length);
+ for (ShardStats stats : shards) {
+ stats.writeTo(out);
+ }
+ }
+
+ public static IndexShardStats readIndexShardStats(StreamInput in) throws IOException {
+ IndexShardStats indexShardStats = new IndexShardStats();
+ indexShardStats.readFrom(in);
+ return indexShardStats;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
new file mode 100644
index 0000000..7cda8d0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ */
+public class IndexStats implements Iterable<IndexShardStats> {
+
+ private final String index;
+
+ private final ShardStats shards[];
+
+ public IndexStats(String index, ShardStats[] shards) {
+ this.index = index;
+ this.shards = shards;
+ }
+
+ public String getIndex() {
+ return this.index;
+ }
+
+ public ShardStats[] getShards() {
+ return this.shards;
+ }
+
+ private Map<Integer, IndexShardStats> indexShards;
+
+ public Map<Integer, IndexShardStats> getIndexShards() {
+ if (indexShards != null) {
+ return indexShards;
+ }
+ Map<Integer, List<ShardStats>> tmpIndexShards = Maps.newHashMap();
+ for (ShardStats shard : shards) {
+ List<ShardStats> lst = tmpIndexShards.get(shard.getShardRouting().id());
+ if (lst == null) {
+ lst = Lists.newArrayList();
+ tmpIndexShards.put(shard.getShardRouting().id(), lst);
+ }
+ lst.add(shard);
+ }
+ indexShards = Maps.newHashMap();
+ for (Map.Entry<Integer, List<ShardStats>> entry : tmpIndexShards.entrySet()) {
+ indexShards.put(entry.getKey(), new IndexShardStats(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardStats[entry.getValue().size()])));
+ }
+ return indexShards;
+ }
+
+ @Override
+ public Iterator<IndexShardStats> iterator() {
+ return getIndexShards().values().iterator();
+ }
+
+ private CommonStats total = null;
+
+ public CommonStats getTotal() {
+ if (total != null) {
+ return total;
+ }
+ CommonStats stats = new CommonStats();
+ for (ShardStats shard : shards) {
+ stats.add(shard.getStats());
+ }
+ total = stats;
+ return stats;
+ }
+
+ private CommonStats primary = null;
+
+ public CommonStats getPrimaries() {
+ if (primary != null) {
+ return primary;
+ }
+ CommonStats stats = new CommonStats();
+ for (ShardStats shard : shards) {
+ if (shard.getShardRouting().primary()) {
+ stats.add(shard.getStats());
+ }
+ }
+ primary = stats;
+ return stats;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java
new file mode 100644
index 0000000..327c605
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class IndicesStatsAction extends IndicesAction<IndicesStatsRequest, IndicesStatsResponse, IndicesStatsRequestBuilder> {
+
+ public static final IndicesStatsAction INSTANCE = new IndicesStatsAction();
+ public static final String NAME = "indices/stats";
+
+ private IndicesStatsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public IndicesStatsResponse newResponse() {
+ return new IndicesStatsResponse();
+ }
+
+ @Override
+ public IndicesStatsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new IndicesStatsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java
new file mode 100644
index 0000000..13a6269
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java
@@ -0,0 +1,261 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A request to get indices level stats. Allow to enable different stats to be returned.
+ * <p/>
+ * <p>By default, all statistics are enabled.
+ * <p/>
+ * <p>All the stats to be returned can be cleared using {@link #clear()}, at which point, specific
+ * stats can be enabled.
+ */
+public class IndicesStatsRequest extends BroadcastOperationRequest<IndicesStatsRequest> {
+
+ private CommonStatsFlags flags = new CommonStatsFlags();
+
+ /**
+ * Sets all flags to return all stats.
+ */
+ public IndicesStatsRequest all() {
+ flags.all();
+ return this;
+ }
+
+ /**
+ * Clears all stats.
+ */
+ public IndicesStatsRequest clear() {
+ flags.clear();
+ return this;
+ }
+
+ /**
+ * Document types to return stats for. Mainly affects {@link #indexing(boolean)} when
+ * enabled, returning specific indexing stats for those types.
+ */
+ public IndicesStatsRequest types(String... types) {
+ flags.types(types);
+ return this;
+ }
+
+ /**
+ * Document types to return stats for. Mainly affects {@link #indexing(boolean)} when
+ * enabled, returning specific indexing stats for those types.
+ */
+ public String[] types() {
+ return this.flags.types();
+ }
+
+ /**
+ * Sets specific search group stats to retrieve the stats for. Mainly affects search
+ * when enabled.
+ */
+ public IndicesStatsRequest groups(String... groups) {
+ flags.groups(groups);
+ return this;
+ }
+
+ public String[] groups() {
+ return this.flags.groups();
+ }
+
+ public IndicesStatsRequest docs(boolean docs) {
+ flags.set(Flag.Docs, docs);
+ return this;
+ }
+
+ public boolean docs() {
+ return flags.isSet(Flag.Docs);
+ }
+
+ public IndicesStatsRequest store(boolean store) {
+ flags.set(Flag.Store, store);
+ return this;
+ }
+
+ public boolean store() {
+ return flags.isSet(Flag.Store);
+ }
+
+ public IndicesStatsRequest indexing(boolean indexing) {
+ flags.set(Flag.Indexing, indexing);
+
+ return this;
+ }
+
+ public boolean indexing() {
+ return flags.isSet(Flag.Indexing);
+ }
+
+ public IndicesStatsRequest get(boolean get) {
+ flags.set(Flag.Get, get);
+ return this;
+ }
+
+ public boolean get() {
+ return flags.isSet(Flag.Get);
+ }
+
+ public IndicesStatsRequest search(boolean search) {
+ flags.set(Flag.Search, search);
+ return this;
+ }
+
+ public boolean search() {
+ return flags.isSet(Flag.Search);
+ }
+
+ public IndicesStatsRequest merge(boolean merge) {
+ flags.set(Flag.Merge, merge);
+ return this;
+ }
+
+ public boolean merge() {
+ return flags.isSet(Flag.Merge);
+ }
+
+ public IndicesStatsRequest refresh(boolean refresh) {
+ flags.set(Flag.Refresh, refresh);
+ return this;
+ }
+
+ public boolean refresh() {
+ return flags.isSet(Flag.Refresh);
+ }
+
+ public IndicesStatsRequest flush(boolean flush) {
+ flags.set(Flag.Flush, flush);
+ return this;
+ }
+
+ public boolean flush() {
+ return flags.isSet(Flag.Flush);
+ }
+
+ public IndicesStatsRequest warmer(boolean warmer) {
+ flags.set(Flag.Warmer, warmer);
+ return this;
+ }
+
+ public boolean warmer() {
+ return flags.isSet(Flag.Warmer);
+ }
+
+ public IndicesStatsRequest filterCache(boolean filterCache) {
+ flags.set(Flag.FilterCache, filterCache);
+ return this;
+ }
+
+ public boolean filterCache() {
+ return flags.isSet(Flag.FilterCache);
+ }
+
+ public IndicesStatsRequest idCache(boolean idCache) {
+ flags.set(Flag.IdCache, idCache);
+ return this;
+ }
+
+ public boolean idCache() {
+ return flags.isSet(Flag.IdCache);
+ }
+
+ public IndicesStatsRequest fieldData(boolean fieldData) {
+ flags.set(Flag.FieldData, fieldData);
+ return this;
+ }
+
+ public boolean fieldData() {
+ return flags.isSet(Flag.FieldData);
+ }
+
+ public IndicesStatsRequest percolate(boolean percolate) {
+ flags.set(Flag.Percolate, percolate);
+ return this;
+ }
+
+ public boolean percolate() {
+ return flags.isSet(Flag.Percolate);
+ }
+
+ public IndicesStatsRequest segments(boolean segments) {
+ flags.set(Flag.Segments, segments);
+ return this;
+ }
+
+ public boolean segments() {
+ return flags.isSet(Flag.Segments);
+ }
+
+ public IndicesStatsRequest fieldDataFields(String... fieldDataFields) {
+ flags.fieldDataFields(fieldDataFields);
+ return this;
+ }
+
+ public String[] fieldDataFields() {
+ return flags.fieldDataFields();
+ }
+
+ public IndicesStatsRequest completion(boolean completion) {
+ flags.set(Flag.Completion, completion);
+ return this;
+ }
+
+ public boolean completion() {
+ return flags.isSet(Flag.Completion);
+ }
+
+ public IndicesStatsRequest completionFields(String... completionDataFields) {
+ flags.completionDataFields(completionDataFields);
+ return this;
+ }
+
+ public String[] completionFields() {
+ return flags.completionDataFields();
+ }
+
+ public IndicesStatsRequest translog(boolean translog) {
+ flags.set(Flag.Translog, translog);
+ return this;
+ }
+
+ public boolean translog() {
+ return flags.isSet(Flag.Translog);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ flags.writeTo(out);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ flags = CommonStatsFlags.readCommonStatsFlags(in);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java
new file mode 100644
index 0000000..2740779
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ * A request to get indices level stats. Allow to enable different stats to be returned.
+ * <p/>
+ * <p>By default, the {@link #setDocs(boolean)}, {@link #setStore(boolean)}, {@link #setIndexing(boolean)}
+ * are enabled. Other stats can be enabled as well.
+ * <p/>
+ * <p>All the stats to be returned can be cleared using {@link #clear()}, at which point, specific
+ * stats can be enabled.
+ */
+public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder<IndicesStatsRequest, IndicesStatsResponse, IndicesStatsRequestBuilder> {
+
+ public IndicesStatsRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new IndicesStatsRequest());
+ }
+
+ /**
+ * Sets all flags to return all stats.
+ */
+ public IndicesStatsRequestBuilder all() {
+ request.all();
+ return this;
+ }
+
+ /**
+ * Clears all stats.
+ */
+ public IndicesStatsRequestBuilder clear() {
+ request.clear();
+ return this;
+ }
+
+ /**
+ * Document types to return stats for. Mainly affects {@link #setIndexing(boolean)} when
+ * enabled, returning specific indexing stats for those types.
+ */
+ public IndicesStatsRequestBuilder setTypes(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setGroups(String... groups) {
+ request.groups(groups);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setDocs(boolean docs) {
+ request.docs(docs);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setStore(boolean store) {
+ request.store(store);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setIndexing(boolean indexing) {
+ request.indexing(indexing);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setGet(boolean get) {
+ request.get(get);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setSearch(boolean search) {
+ request.search(search);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setMerge(boolean merge) {
+ request.merge(merge);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setRefresh(boolean refresh) {
+ request.refresh(refresh);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setFlush(boolean flush) {
+ request.flush(flush);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setWarmer(boolean warmer) {
+ request.warmer(warmer);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setFilterCache(boolean filterCache) {
+ request.filterCache(filterCache);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setIdCache(boolean idCache) {
+ request.idCache(idCache);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setFieldData(boolean fieldData) {
+ request.fieldData(fieldData);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setFieldDataFields(String... fields) {
+ request.fieldDataFields(fields);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setPercolate(boolean percolate) {
+ request.percolate(percolate);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setSegments(boolean segments) {
+ request.segments(segments);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setCompletion(boolean completion) {
+ request.completion(completion);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setCompletionFields(String... fields) {
+ request.completionFields(fields);
+ return this;
+ }
+
+ public IndicesStatsRequestBuilder setTranslog(boolean translog) {
+ request.translog(translog);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<IndicesStatsResponse> listener) {
+ ((IndicesAdminClient) client).stats(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java
new file mode 100644
index 0000000..f2f10c4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ */
+public class IndicesStatsResponse extends BroadcastOperationResponse implements ToXContent {
+
+ private ShardStats[] shards;
+
+ private ImmutableMap<ShardRouting, CommonStats> shardStatsMap;
+
+ IndicesStatsResponse() {
+
+ }
+
+ IndicesStatsResponse(ShardStats[] shards, ClusterState clusterState, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ this.shards = shards;
+ }
+
+ public ImmutableMap<ShardRouting, CommonStats> asMap() {
+ if (shardStatsMap == null) {
+ ImmutableMap.Builder<ShardRouting, CommonStats> mb = ImmutableMap.builder();
+ for (ShardStats ss : shards) {
+ mb.put(ss.getShardRouting(), ss.getStats());
+ }
+
+ shardStatsMap = mb.build();
+ }
+ return shardStatsMap;
+ }
+
+ public ShardStats[] getShards() {
+ return this.shards;
+ }
+
+ public ShardStats getAt(int position) {
+ return shards[position];
+ }
+
+ public IndexStats getIndex(String index) {
+ return getIndices().get(index);
+ }
+
+ private Map<String, IndexStats> indicesStats;
+
+ public Map<String, IndexStats> getIndices() {
+ if (indicesStats != null) {
+ return indicesStats;
+ }
+ Map<String, IndexStats> indicesStats = Maps.newHashMap();
+
+ Set<String> indices = Sets.newHashSet();
+ for (ShardStats shard : shards) {
+ indices.add(shard.getIndex());
+ }
+
+ for (String index : indices) {
+ List<ShardStats> shards = Lists.newArrayList();
+ for (ShardStats shard : this.shards) {
+ if (shard.getShardRouting().index().equals(index)) {
+ shards.add(shard);
+ }
+ }
+ indicesStats.put(index, new IndexStats(index, shards.toArray(new ShardStats[shards.size()])));
+ }
+ this.indicesStats = indicesStats;
+ return indicesStats;
+ }
+
+ private CommonStats total = null;
+
+ public CommonStats getTotal() {
+ if (total != null) {
+ return total;
+ }
+ CommonStats stats = new CommonStats();
+ for (ShardStats shard : shards) {
+ stats.add(shard.getStats());
+ }
+ total = stats;
+ return stats;
+ }
+
+ private CommonStats primary = null;
+
+ public CommonStats getPrimaries() {
+ if (primary != null) {
+ return primary;
+ }
+ CommonStats stats = new CommonStats();
+ for (ShardStats shard : shards) {
+ if (shard.getShardRouting().primary()) {
+ stats.add(shard.getStats());
+ }
+ }
+ primary = stats;
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shards = new ShardStats[in.readVInt()];
+ for (int i = 0; i < shards.length; i++) {
+ shards[i] = ShardStats.readShardStats(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(shards.length);
+ for (ShardStats shard : shards) {
+ shard.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ String level = params.param("level", "indices");
+ boolean isLevelValid = "indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level) || "cluster".equalsIgnoreCase(level);
+ if (!isLevelValid) {
+ return builder;
+ }
+
+ builder.startObject("_all");
+
+ builder.startObject("primaries");
+ getPrimaries().toXContent(builder, params);
+ builder.endObject();
+
+ builder.startObject("total");
+ getTotal().toXContent(builder, params);
+ builder.endObject();
+
+ builder.endObject();
+
+ if ("indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level)) {
+ builder.startObject(Fields.INDICES);
+ for (IndexStats indexStats : getIndices().values()) {
+ builder.startObject(indexStats.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.startObject("primaries");
+ indexStats.getPrimaries().toXContent(builder, params);
+ builder.endObject();
+
+ builder.startObject("total");
+ indexStats.getTotal().toXContent(builder, params);
+ builder.endObject();
+
+ if ("shards".equalsIgnoreCase(level)) {
+ builder.startObject(Fields.SHARDS);
+ for (IndexShardStats indexShardStats : indexStats) {
+ builder.startArray(Integer.toString(indexShardStats.getShardId().id()));
+ for (ShardStats shardStats : indexShardStats) {
+ builder.startObject();
+ shardStats.toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString INDICES = new XContentBuilderString("indices");
+ static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+ builder.startObject();
+ toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ } catch (IOException e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java
new file mode 100644
index 0000000..02fc179
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.shard.service.IndexShard;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry;
+
+/**
+ */
+public class ShardStats extends BroadcastShardOperationResponse implements ToXContent {
+
+ private ShardRouting shardRouting;
+
+ CommonStats stats;
+
+ ShardStats() {
+ }
+
+ public ShardStats(IndexShard indexShard, CommonStatsFlags flags) {
+ super(indexShard.routingEntry().index(), indexShard.routingEntry().id());
+ this.shardRouting = indexShard.routingEntry();
+ this.stats = new CommonStats(indexShard, flags);
+ }
+
+ /**
+ * The shard routing information (cluster wide shard state).
+ */
+ public ShardRouting getShardRouting() {
+ return this.shardRouting;
+ }
+
+ public CommonStats getStats() {
+ return this.stats;
+ }
+
+ public static ShardStats readShardStats(StreamInput in) throws IOException {
+ ShardStats stats = new ShardStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardRouting = readShardRoutingEntry(in);
+ stats = CommonStats.readCommonStats(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardRouting.writeTo(out);
+ stats.writeTo(out);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.ROUTING)
+ .field(Fields.STATE, shardRouting.state())
+ .field(Fields.PRIMARY, shardRouting.primary())
+ .field(Fields.NODE, shardRouting.currentNodeId())
+ .field(Fields.RELOCATING_NODE, shardRouting.relocatingNodeId())
+ .endObject();
+
+ stats.toXContent(builder, params);
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
+ static final XContentBuilderString STATE = new XContentBuilderString("state");
+ static final XContentBuilderString PRIMARY = new XContentBuilderString("primary");
+ static final XContentBuilderString NODE = new XContentBuilderString("node");
+ static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java
new file mode 100644
index 0000000..26511e7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.service.InternalIndexService;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ */
+public class TransportIndicesStatsAction extends TransportBroadcastOperationAction<IndicesStatsRequest, IndicesStatsResponse, TransportIndicesStatsAction.IndexShardStatsRequest, ShardStats> {
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportIndicesStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return IndicesStatsAction.NAME;
+ }
+
+ @Override
+ protected IndicesStatsRequest newRequest() {
+ return new IndicesStatsRequest();
+ }
+
+ /**
+ * Status goes across *all* shards.
+ */
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, IndicesStatsRequest request, String[] concreteIndices) {
+ return clusterState.routingTable().allAssignedShardsGrouped(concreteIndices, true);
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, IndicesStatsRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, IndicesStatsRequest request, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
+ }
+
+
+ @Override
+ protected IndicesStatsResponse newResponse(IndicesStatsRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ List<ShardOperationFailedException> shardFailures = null;
+ final List<ShardStats> shards = Lists.newArrayList();
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // simply ignore non active shards
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ shards.add((ShardStats) shardResponse);
+ successfulShards++;
+ }
+ }
+ return new IndicesStatsResponse(shards.toArray(new ShardStats[shards.size()]), clusterState, shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected IndexShardStatsRequest newShardRequest() {
+ return new IndexShardStatsRequest();
+ }
+
+ @Override
+ protected IndexShardStatsRequest newShardRequest(ShardRouting shard, IndicesStatsRequest request) {
+ return new IndexShardStatsRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected ShardStats newShardResponse() {
+ return new ShardStats();
+ }
+
+ @Override
+ protected ShardStats shardOperation(IndexShardStatsRequest request) throws ElasticsearchException {
+ InternalIndexService indexService = (InternalIndexService) indicesService.indexServiceSafe(request.index());
+ InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId());
+
+ CommonStatsFlags flags = new CommonStatsFlags().clear();
+
+ if (request.request.docs()) {
+ flags.set(CommonStatsFlags.Flag.Docs);
+ }
+ if (request.request.store()) {
+ flags.set(CommonStatsFlags.Flag.Store);
+ }
+ if (request.request.indexing()) {
+ flags.set(CommonStatsFlags.Flag.Indexing);
+ flags.types(request.request.types());
+ }
+ if (request.request.get()) {
+ flags.set(CommonStatsFlags.Flag.Get);
+ }
+ if (request.request.search()) {
+ flags.set(CommonStatsFlags.Flag.Search);
+ flags.groups(request.request.groups());
+ }
+ if (request.request.merge()) {
+ flags.set(CommonStatsFlags.Flag.Merge);
+ }
+ if (request.request.refresh()) {
+ flags.set(CommonStatsFlags.Flag.Refresh);
+ }
+ if (request.request.flush()) {
+ flags.set(CommonStatsFlags.Flag.Flush);
+ }
+ if (request.request.warmer()) {
+ flags.set(CommonStatsFlags.Flag.Warmer);
+ }
+ if (request.request.filterCache()) {
+ flags.set(CommonStatsFlags.Flag.FilterCache);
+ }
+ if (request.request.idCache()) {
+ flags.set(CommonStatsFlags.Flag.IdCache);
+ }
+ if (request.request.fieldData()) {
+ flags.set(CommonStatsFlags.Flag.FieldData);
+ flags.fieldDataFields(request.request.fieldDataFields());
+ }
+ if (request.request.percolate()) {
+ flags.set(CommonStatsFlags.Flag.Percolate);
+ }
+ if (request.request.segments()) {
+ flags.set(CommonStatsFlags.Flag.Segments);
+ }
+ if (request.request.completion()) {
+ flags.set(CommonStatsFlags.Flag.Completion);
+ flags.completionDataFields(request.request.completionFields());
+ }
+ if (request.request.translog()) {
+ flags.set(CommonStatsFlags.Flag.Translog);
+ }
+
+ return new ShardStats(indexShard, flags);
+ }
+
+ public static class IndexShardStatsRequest extends BroadcastShardOperationRequest {
+
+ // TODO if there are many indices, the request might hold a large indices array..., we don't really need to serialize it
+ IndicesStatsRequest request;
+
+ IndexShardStatsRequest() {
+ }
+
+ IndexShardStatsRequest(String index, int shardId, IndicesStatsRequest request) {
+ super(index, shardId, request);
+ this.request = request;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ request = new IndicesStatsRequest();
+ request.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ request.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/DocsStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/status/DocsStatus.java
new file mode 100644
index 0000000..541ace0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/DocsStatus.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+/**
+ *
+ */
+public class DocsStatus {
+
+ long numDocs = 0;
+ long maxDoc = 0;
+ long deletedDocs = 0;
+
+ /**
+ * The number of docs.
+ */
+ public long getNumDocs() {
+ return numDocs;
+ }
+
+ /**
+ * The max doc.
+ */
+ public long getMaxDoc() {
+ return maxDoc;
+ }
+
+ /**
+ * The number of deleted docs in the index.
+ */
+ public long getDeletedDocs() {
+ return deletedDocs;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/GatewayRecoveryStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/status/GatewayRecoveryStatus.java
new file mode 100644
index 0000000..6c6c18f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/GatewayRecoveryStatus.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class GatewayRecoveryStatus {
+
+ public enum Stage {
+ INIT((byte) 0),
+ INDEX((byte) 1),
+ TRANSLOG((byte) 2),
+ FINALIZE((byte) 3),
+ DONE((byte) 4);
+
+ private final byte value;
+
+ Stage(byte value) {
+ this.value = value;
+ }
+
+ public byte value() {
+ return value;
+ }
+
+ public static Stage fromValue(byte value) {
+ if (value == 0) {
+ return INIT;
+ } else if (value == 1) {
+ return INDEX;
+ } else if (value == 2) {
+ return TRANSLOG;
+ } else if (value == 3) {
+ return FINALIZE;
+ } else if (value == 4) {
+ return DONE;
+ }
+ throw new ElasticsearchIllegalArgumentException("No stage found for [" + value + ']');
+ }
+ }
+
+ final Stage stage;
+
+ final long startTime;
+
+ final long time;
+
+ final long indexSize;
+
+ final long reusedIndexSize;
+
+ final long recoveredIndexSize;
+
+ final long recoveredTranslogOperations;
+
+ public GatewayRecoveryStatus(Stage stage, long startTime, long time, long indexSize, long reusedIndexSize,
+ long recoveredIndexSize, long recoveredTranslogOperations) {
+ this.stage = stage;
+ this.startTime = startTime;
+ this.time = time;
+ this.indexSize = indexSize;
+ this.reusedIndexSize = reusedIndexSize;
+ this.recoveredIndexSize = recoveredIndexSize;
+ this.recoveredTranslogOperations = recoveredTranslogOperations;
+ }
+
+ public Stage getStage() {
+ return this.stage;
+ }
+
+ public long getStartTime() {
+ return this.startTime;
+ }
+
+ public TimeValue getTime() {
+ return TimeValue.timeValueMillis(time);
+ }
+
+ public ByteSizeValue getIndexSize() {
+ return new ByteSizeValue(indexSize);
+ }
+
+ public ByteSizeValue getReusedIndexSize() {
+ return new ByteSizeValue(reusedIndexSize);
+ }
+
+ public ByteSizeValue getExpectedRecoveredIndexSize() {
+ return new ByteSizeValue(indexSize - reusedIndexSize);
+ }
+
+ /**
+ * How much of the index has been recovered.
+ */
+ public ByteSizeValue getRecoveredIndexSize() {
+ return new ByteSizeValue(recoveredIndexSize);
+ }
+
+ public int getIndexRecoveryProgress() {
+ if (recoveredIndexSize == 0) {
+ if (indexSize != 0 && indexSize == reusedIndexSize) {
+ return 100;
+ }
+ return 0;
+ }
+ return (int) (((double) recoveredIndexSize) / getExpectedRecoveredIndexSize().bytes() * 100);
+ }
+
+ public long getRecoveredTranslogOperations() {
+ return recoveredTranslogOperations;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/GatewaySnapshotStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/status/GatewaySnapshotStatus.java
new file mode 100644
index 0000000..1bcd48b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/GatewaySnapshotStatus.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class GatewaySnapshotStatus {
+
+ public static enum Stage {
+ NONE((byte) 0),
+ INDEX((byte) 1),
+ TRANSLOG((byte) 2),
+ FINALIZE((byte) 3),
+ DONE((byte) 4),
+ FAILURE((byte) 5);
+
+ private final byte value;
+
+ Stage(byte value) {
+ this.value = value;
+ }
+
+ public byte value() {
+ return this.value;
+ }
+
+ public static Stage fromValue(byte value) {
+ if (value == 0) {
+ return Stage.NONE;
+ } else if (value == 1) {
+ return Stage.INDEX;
+ } else if (value == 2) {
+ return Stage.TRANSLOG;
+ } else if (value == 3) {
+ return Stage.FINALIZE;
+ } else if (value == 4) {
+ return Stage.DONE;
+ } else if (value == 5) {
+ return Stage.FAILURE;
+ }
+ throw new ElasticsearchIllegalArgumentException("No stage found for [" + value + "]");
+ }
+ }
+
+ final Stage stage;
+
+ final long startTime;
+
+ final long time;
+
+ final long indexSize;
+
+ final int expectedNumberOfOperations;
+
+ public GatewaySnapshotStatus(Stage stage, long startTime, long time, long indexSize, int expectedNumberOfOperations) {
+ this.stage = stage;
+ this.startTime = startTime;
+ this.time = time;
+ this.indexSize = indexSize;
+ this.expectedNumberOfOperations = expectedNumberOfOperations;
+ }
+
+ public Stage getStage() {
+ return this.stage;
+ }
+
+ public long getStartTime() {
+ return this.startTime;
+ }
+
+ public TimeValue getTime() {
+ return TimeValue.timeValueMillis(time);
+ }
+
+ public ByteSizeValue getIndexSize() {
+ return new ByteSizeValue(indexSize);
+ }
+
+ public int getExpectedNumberOfOperations() {
+ return expectedNumberOfOperations;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/IndexShardStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/status/IndexShardStatus.java
new file mode 100644
index 0000000..5ad644b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/IndexShardStatus.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.refresh.RefreshStats;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.Iterator;
+
+/**
+ *
+ */
+public class IndexShardStatus implements Iterable<ShardStatus> {
+
+ private final ShardId shardId;
+
+ private final ShardStatus[] shards;
+
+ IndexShardStatus(ShardId shardId, ShardStatus[] shards) {
+ this.shardId = shardId;
+ this.shards = shards;
+ }
+
+ public ShardId getShardId() {
+ return this.shardId;
+ }
+
+ public ShardStatus[] getShards() {
+ return this.shards;
+ }
+
+ public ShardStatus getAt(int position) {
+ return shards[position];
+ }
+
+ /**
+ * Returns only the primary shards store size in bytes.
+ */
+ public ByteSizeValue getPrimaryStoreSize() {
+ long bytes = -1;
+ for (ShardStatus shard : getShards()) {
+ if (!shard.getShardRouting().primary()) {
+ // only sum docs for the primaries
+ continue;
+ }
+ if (shard.getStoreSize() != null) {
+ if (bytes == -1) {
+ bytes = 0;
+ }
+ bytes += shard.getStoreSize().bytes();
+ }
+ }
+ if (bytes == -1) {
+ return null;
+ }
+ return new ByteSizeValue(bytes);
+ }
+
+ /**
+ * Returns the full store size in bytes, of both primaries and replicas.
+ */
+ public ByteSizeValue getStoreSize() {
+ long bytes = -1;
+ for (ShardStatus shard : getShards()) {
+ if (shard.getStoreSize() != null) {
+ if (bytes == -1) {
+ bytes = 0;
+ }
+ bytes += shard.getStoreSize().bytes();
+ }
+ }
+ if (bytes == -1) {
+ return null;
+ }
+ return new ByteSizeValue(bytes);
+ }
+
+ public long getTranslogOperations() {
+ long translogOperations = -1;
+ for (ShardStatus shard : getShards()) {
+ if (shard.getTranslogOperations() != -1) {
+ if (translogOperations == -1) {
+ translogOperations = 0;
+ }
+ translogOperations += shard.getTranslogOperations();
+ }
+ }
+ return translogOperations;
+ }
+
+ private transient DocsStatus docs;
+
+ public DocsStatus getDocs() {
+ if (docs != null) {
+ return docs;
+ }
+ DocsStatus docs = null;
+ for (ShardStatus shard : getShards()) {
+ if (!shard.getShardRouting().primary()) {
+ // only sum docs for the primaries
+ continue;
+ }
+ if (shard.getDocs() == null) {
+ continue;
+ }
+ if (docs == null) {
+ docs = new DocsStatus();
+ }
+ docs.numDocs += shard.getDocs().getNumDocs();
+ docs.maxDoc += shard.getDocs().getMaxDoc();
+ docs.deletedDocs += shard.getDocs().getDeletedDocs();
+ }
+ this.docs = docs;
+ return this.docs;
+ }
+
+ /**
+ * Total merges of this shard replication group.
+ */
+ public MergeStats getMergeStats() {
+ MergeStats mergeStats = new MergeStats();
+ for (ShardStatus shard : shards) {
+ mergeStats.add(shard.getMergeStats());
+ }
+ return mergeStats;
+ }
+
+ public RefreshStats getRefreshStats() {
+ RefreshStats refreshStats = new RefreshStats();
+ for (ShardStatus shard : shards) {
+ refreshStats.add(shard.getRefreshStats());
+ }
+ return refreshStats;
+ }
+
+ public FlushStats getFlushStats() {
+ FlushStats flushStats = new FlushStats();
+ for (ShardStatus shard : shards) {
+ flushStats.add(shard.flushStats);
+ }
+ return flushStats;
+ }
+
+ @Override
+ public Iterator<ShardStatus> iterator() {
+ return Iterators.forArray(shards);
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/IndexStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/status/IndexStatus.java
new file mode 100644
index 0000000..334d907
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/IndexStatus.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.refresh.RefreshStats;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class IndexStatus implements Iterable<IndexShardStatus> {
+
+ private final String index;
+
+ private final Map<Integer, IndexShardStatus> indexShards;
+
+ IndexStatus(String index, ShardStatus[] shards) {
+ this.index = index;
+
+ Map<Integer, List<ShardStatus>> tmpIndexShards = Maps.newHashMap();
+ for (ShardStatus shard : shards) {
+ List<ShardStatus> lst = tmpIndexShards.get(shard.getShardRouting().id());
+ if (lst == null) {
+ lst = newArrayList();
+ tmpIndexShards.put(shard.getShardRouting().id(), lst);
+ }
+ lst.add(shard);
+ }
+ indexShards = Maps.newHashMap();
+ for (Map.Entry<Integer, List<ShardStatus>> entry : tmpIndexShards.entrySet()) {
+ indexShards.put(entry.getKey(), new IndexShardStatus(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardStatus[entry.getValue().size()])));
+ }
+ }
+
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * A shard id to index shard status map (note, index shard status is the replication shard group that maps
+ * to the shard id).
+ */
+ public Map<Integer, IndexShardStatus> getShards() {
+ return this.indexShards;
+ }
+
+ /**
+ * Returns only the primary shards store size in bytes.
+ */
+ public ByteSizeValue getPrimaryStoreSize() {
+ long bytes = -1;
+ for (IndexShardStatus shard : this) {
+ if (shard.getPrimaryStoreSize() != null) {
+ if (bytes == -1) {
+ bytes = 0;
+ }
+ bytes += shard.getPrimaryStoreSize().bytes();
+ }
+ }
+ if (bytes == -1) {
+ return null;
+ }
+ return new ByteSizeValue(bytes);
+ }
+
+ /**
+ * Returns the full store size in bytes, of both primaries and replicas.
+ */
+ public ByteSizeValue getStoreSize() {
+ long bytes = -1;
+ for (IndexShardStatus shard : this) {
+ if (shard.getStoreSize() != null) {
+ if (bytes == -1) {
+ bytes = 0;
+ }
+ bytes += shard.getStoreSize().bytes();
+ }
+ }
+ if (bytes == -1) {
+ return null;
+ }
+ return new ByteSizeValue(bytes);
+ }
+
+ public long getTranslogOperations() {
+ long translogOperations = -1;
+ for (IndexShardStatus shard : this) {
+ if (shard.getTranslogOperations() != -1) {
+ if (translogOperations == -1) {
+ translogOperations = 0;
+ }
+ translogOperations += shard.getTranslogOperations();
+ }
+ }
+ return translogOperations;
+ }
+
+ private transient DocsStatus docs;
+
+ public DocsStatus getDocs() {
+ if (docs != null) {
+ return docs;
+ }
+ DocsStatus docs = null;
+ for (IndexShardStatus shard : this) {
+ if (shard.getDocs() == null) {
+ continue;
+ }
+ if (docs == null) {
+ docs = new DocsStatus();
+ }
+ docs.numDocs += shard.getDocs().getNumDocs();
+ docs.maxDoc += shard.getDocs().getMaxDoc();
+ docs.deletedDocs += shard.getDocs().getDeletedDocs();
+ }
+ this.docs = docs;
+ return docs;
+ }
+
+ /**
+ * Total merges of this index.
+ */
+ public MergeStats getMergeStats() {
+ MergeStats mergeStats = new MergeStats();
+ for (IndexShardStatus shard : this) {
+ mergeStats.add(shard.getMergeStats());
+ }
+ return mergeStats;
+ }
+
+ public RefreshStats getRefreshStats() {
+ RefreshStats refreshStats = new RefreshStats();
+ for (IndexShardStatus shard : this) {
+ refreshStats.add(shard.getRefreshStats());
+ }
+ return refreshStats;
+ }
+
+ public FlushStats getFlushStats() {
+ FlushStats flushStats = new FlushStats();
+ for (IndexShardStatus shard : this) {
+ flushStats.add(shard.getFlushStats());
+ }
+ return flushStats;
+ }
+
+ @Override
+ public Iterator<IndexShardStatus> iterator() {
+ return indexShards.values().iterator();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusAction.java b/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusAction.java
new file mode 100644
index 0000000..8c394fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class IndicesStatusAction extends IndicesAction<IndicesStatusRequest, IndicesStatusResponse, IndicesStatusRequestBuilder> {
+
+ public static final IndicesStatusAction INSTANCE = new IndicesStatusAction();
+ public static final String NAME = "indices/status";
+
+ private IndicesStatusAction() {
+ super(NAME);
+ }
+
+ @Override
+ public IndicesStatusResponse newResponse() {
+ return new IndicesStatusResponse();
+ }
+
+ @Override
+ public IndicesStatusRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new IndicesStatusRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequest.java
new file mode 100644
index 0000000..ff2045d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequest.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class IndicesStatusRequest extends BroadcastOperationRequest<IndicesStatusRequest> {
+
+ private boolean recovery = false;
+
+ private boolean snapshot = false;
+
+ public IndicesStatusRequest() {
+ this(Strings.EMPTY_ARRAY);
+ }
+
+ public IndicesStatusRequest(String... indices) {
+ super(indices);
+ }
+
+ /**
+ * Should the status include recovery information. Defaults to <tt>false</tt>.
+ */
+ public IndicesStatusRequest recovery(boolean recovery) {
+ this.recovery = recovery;
+ return this;
+ }
+
+ public boolean recovery() {
+ return this.recovery;
+ }
+
+ /**
+ * Should the status include recovery information. Defaults to <tt>false</tt>.
+ */
+ public IndicesStatusRequest snapshot(boolean snapshot) {
+ this.snapshot = snapshot;
+ return this;
+ }
+
+ public boolean snapshot() {
+ return this.snapshot;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(recovery);
+ out.writeBoolean(snapshot);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ recovery = in.readBoolean();
+ snapshot = in.readBoolean();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequestBuilder.java
new file mode 100644
index 0000000..b93d6fa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusRequestBuilder.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class IndicesStatusRequestBuilder extends BroadcastOperationRequestBuilder<IndicesStatusRequest, IndicesStatusResponse, IndicesStatusRequestBuilder> {
+
+ public IndicesStatusRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new IndicesStatusRequest());
+ }
+
+ /**
+ * Should the status include recovery information. Defaults to <tt>false</tt>.
+ */
+ public IndicesStatusRequestBuilder setRecovery(boolean recovery) {
+ request.recovery(recovery);
+ return this;
+ }
+
+ /**
+ * Should the status include recovery information. Defaults to <tt>false</tt>.
+ */
+ public IndicesStatusRequestBuilder setSnapshot(boolean snapshot) {
+ request.snapshot(snapshot);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<IndicesStatusResponse> listener) {
+ ((IndicesAdminClient) client).status(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusResponse.java
new file mode 100644
index 0000000..79e6092
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/IndicesStatusResponse.java
@@ -0,0 +1,322 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.refresh.RefreshStats;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.action.admin.indices.status.ShardStatus.readIndexShardStatus;
+
+/**
+ *
+ */
+public class IndicesStatusResponse extends BroadcastOperationResponse implements ToXContent {
+
+ protected ShardStatus[] shards;
+
+ private Map<String, IndexStatus> indicesStatus;
+
+ IndicesStatusResponse() {
+ }
+
+ IndicesStatusResponse(ShardStatus[] shards, ClusterState clusterState, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ this.shards = shards;
+ }
+
+ public ShardStatus[] getShards() {
+ return this.shards;
+ }
+
+ public ShardStatus getAt(int position) {
+ return shards[position];
+ }
+
+ public IndexStatus getIndex(String index) {
+ return getIndices().get(index);
+ }
+
+ public Map<String, IndexStatus> getIndices() {
+ if (indicesStatus != null) {
+ return indicesStatus;
+ }
+ Map<String, IndexStatus> indicesStatus = newHashMap();
+
+ Set<String> indices = Sets.newHashSet();
+ for (ShardStatus shard : shards) {
+ indices.add(shard.getIndex());
+ }
+
+ for (String index : indices) {
+ List<ShardStatus> shards = newArrayList();
+ for (ShardStatus shard : this.shards) {
+ if (shard.getShardRouting().index().equals(index)) {
+ shards.add(shard);
+ }
+ }
+ indicesStatus.put(index, new IndexStatus(index, shards.toArray(new ShardStatus[shards.size()])));
+ }
+ this.indicesStatus = indicesStatus;
+ return indicesStatus;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(getShards().length);
+ for (ShardStatus status : getShards()) {
+ status.writeTo(out);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shards = new ShardStatus[in.readVInt()];
+ for (int i = 0; i < shards.length; i++) {
+ shards[i] = readIndexShardStatus(in);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return toXContent(builder, params, null);
+ }
+
+ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nullable SettingsFilter settingsFilter) throws IOException {
+ builder.startObject(Fields.INDICES);
+ for (IndexStatus indexStatus : getIndices().values()) {
+ builder.startObject(indexStatus.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.startObject(Fields.INDEX);
+ if (indexStatus.getStoreSize() != null) {
+ builder.byteSizeField(Fields.PRIMARY_SIZE_IN_BYTES, Fields.PRIMARY_SIZE, indexStatus.getPrimaryStoreSize());
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, indexStatus.getStoreSize());
+ }
+ builder.endObject();
+ if (indexStatus.getTranslogOperations() != -1) {
+ builder.startObject(Fields.TRANSLOG);
+ builder.field(Fields.OPERATIONS, indexStatus.getTranslogOperations());
+ builder.endObject();
+ }
+
+ if (indexStatus.getDocs() != null) {
+ builder.startObject(Fields.DOCS);
+ builder.field(Fields.NUM_DOCS, indexStatus.getDocs().getNumDocs());
+ builder.field(Fields.MAX_DOC, indexStatus.getDocs().getMaxDoc());
+ builder.field(Fields.DELETED_DOCS, indexStatus.getDocs().getDeletedDocs());
+ builder.endObject();
+ }
+
+ MergeStats mergeStats = indexStatus.getMergeStats();
+ if (mergeStats != null) {
+ mergeStats.toXContent(builder, params);
+ }
+ RefreshStats refreshStats = indexStatus.getRefreshStats();
+ if (refreshStats != null) {
+ refreshStats.toXContent(builder, params);
+ }
+ FlushStats flushStats = indexStatus.getFlushStats();
+ if (flushStats != null) {
+ flushStats.toXContent(builder, params);
+ }
+
+ builder.startObject(Fields.SHARDS);
+ for (IndexShardStatus indexShardStatus : indexStatus) {
+ builder.startArray(Integer.toString(indexShardStatus.getShardId().id()));
+ for (ShardStatus shardStatus : indexShardStatus) {
+ builder.startObject();
+
+ builder.startObject(Fields.ROUTING)
+ .field(Fields.STATE, shardStatus.getShardRouting().state())
+ .field(Fields.PRIMARY, shardStatus.getShardRouting().primary())
+ .field(Fields.NODE, shardStatus.getShardRouting().currentNodeId())
+ .field(Fields.RELOCATING_NODE, shardStatus.getShardRouting().relocatingNodeId())
+ .field(Fields.SHARD, shardStatus.getShardRouting().shardId().id())
+ .field(Fields.INDEX, shardStatus.getShardRouting().shardId().index().name())
+ .endObject();
+
+ builder.field(Fields.STATE, shardStatus.getState());
+ if (shardStatus.getStoreSize() != null) {
+ builder.startObject(Fields.INDEX);
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, shardStatus.getStoreSize());
+ builder.endObject();
+ }
+ if (shardStatus.getTranslogId() != -1) {
+ builder.startObject(Fields.TRANSLOG);
+ builder.field(Fields.ID, shardStatus.getTranslogId());
+ builder.field(Fields.OPERATIONS, shardStatus.getTranslogOperations());
+ builder.endObject();
+ }
+
+ if (shardStatus.getDocs() != null) {
+ builder.startObject(Fields.DOCS);
+ builder.field(Fields.NUM_DOCS, shardStatus.getDocs().getNumDocs());
+ builder.field(Fields.MAX_DOC, shardStatus.getDocs().getMaxDoc());
+ builder.field(Fields.DELETED_DOCS, shardStatus.getDocs().getDeletedDocs());
+ builder.endObject();
+ }
+
+ mergeStats = shardStatus.getMergeStats();
+ if (mergeStats != null) {
+ mergeStats.toXContent(builder, params);
+ }
+
+ refreshStats = shardStatus.getRefreshStats();
+ if (refreshStats != null) {
+ refreshStats.toXContent(builder, params);
+ }
+ flushStats = shardStatus.getFlushStats();
+ if (flushStats != null) {
+ flushStats.toXContent(builder, params);
+ }
+
+ if (shardStatus.getPeerRecoveryStatus() != null) {
+ PeerRecoveryStatus peerRecoveryStatus = shardStatus.getPeerRecoveryStatus();
+ builder.startObject(Fields.PEER_RECOVERY);
+ builder.field(Fields.STAGE, peerRecoveryStatus.getStage());
+ builder.field(Fields.START_TIME_IN_MILLIS, peerRecoveryStatus.getStartTime());
+ builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, peerRecoveryStatus.getTime());
+
+ builder.startObject(Fields.INDEX);
+ builder.field(Fields.PROGRESS, peerRecoveryStatus.getIndexRecoveryProgress());
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, peerRecoveryStatus.getIndexSize());
+ builder.byteSizeField(Fields.REUSED_SIZE_IN_BYTES, Fields.REUSED_SIZE, peerRecoveryStatus.getReusedIndexSize());
+ builder.byteSizeField(Fields.EXPECTED_RECOVERED_SIZE_IN_BYTES, Fields.EXPECTED_RECOVERED_SIZE, peerRecoveryStatus.getExpectedRecoveredIndexSize());
+ builder.byteSizeField(Fields.RECOVERED_SIZE_IN_BYTES, Fields.RECOVERED_SIZE, peerRecoveryStatus.getRecoveredIndexSize());
+ builder.endObject();
+
+ builder.startObject(Fields.TRANSLOG);
+ builder.field(Fields.RECOVERED, peerRecoveryStatus.getRecoveredTranslogOperations());
+ builder.endObject();
+
+ builder.endObject();
+ }
+
+ if (shardStatus.getGatewayRecoveryStatus() != null) {
+ GatewayRecoveryStatus gatewayRecoveryStatus = shardStatus.getGatewayRecoveryStatus();
+ builder.startObject(Fields.GATEWAY_RECOVERY);
+ builder.field(Fields.STAGE, gatewayRecoveryStatus.getStage());
+ builder.field(Fields.START_TIME_IN_MILLIS, gatewayRecoveryStatus.getStartTime());
+ builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, gatewayRecoveryStatus.getTime());
+
+ builder.startObject(Fields.INDEX);
+ builder.field(Fields.PROGRESS, gatewayRecoveryStatus.getIndexRecoveryProgress());
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, gatewayRecoveryStatus.getIndexSize());
+ builder.byteSizeField(Fields.REUSED_SIZE_IN_BYTES, Fields.REUSED_SIZE, gatewayRecoveryStatus.getReusedIndexSize());
+ builder.byteSizeField(Fields.EXPECTED_RECOVERED_SIZE_IN_BYTES, Fields.EXPECTED_RECOVERED_SIZE, gatewayRecoveryStatus.getExpectedRecoveredIndexSize());
+ builder.byteSizeField(Fields.RECOVERED_SIZE_IN_BYTES, Fields.RECOVERED_SIZE, gatewayRecoveryStatus.getRecoveredIndexSize());
+ builder.endObject();
+
+ builder.startObject(Fields.TRANSLOG);
+ builder.field(Fields.RECOVERED, gatewayRecoveryStatus.getRecoveredTranslogOperations());
+ builder.endObject();
+
+ builder.endObject();
+ }
+
+ if (shardStatus.getGatewaySnapshotStatus() != null) {
+ GatewaySnapshotStatus gatewaySnapshotStatus = shardStatus.getGatewaySnapshotStatus();
+ builder.startObject(Fields.GATEWAY_SNAPSHOT);
+ builder.field(Fields.STAGE, gatewaySnapshotStatus.getStage());
+ builder.field(Fields.START_TIME_IN_MILLIS, gatewaySnapshotStatus.getStartTime());
+ builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, gatewaySnapshotStatus.getTime());
+
+ builder.startObject(Fields.INDEX);
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, gatewaySnapshotStatus.getIndexSize());
+ builder.endObject();
+
+ builder.startObject(Fields.TRANSLOG);
+ builder.field(Fields.EXPECTED_OPERATIONS, gatewaySnapshotStatus.getExpectedNumberOfOperations());
+ builder.endObject();
+
+ builder.endObject();
+ }
+
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString INDICES = new XContentBuilderString("indices");
+ static final XContentBuilderString INDEX = new XContentBuilderString("index");
+ static final XContentBuilderString PRIMARY_SIZE = new XContentBuilderString("primary_size");
+ static final XContentBuilderString PRIMARY_SIZE_IN_BYTES = new XContentBuilderString("primary_size_in_bytes");
+ static final XContentBuilderString SIZE = new XContentBuilderString("size");
+ static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes");
+ static final XContentBuilderString TRANSLOG = new XContentBuilderString("translog");
+ static final XContentBuilderString OPERATIONS = new XContentBuilderString("operations");
+ static final XContentBuilderString DOCS = new XContentBuilderString("docs");
+ static final XContentBuilderString NUM_DOCS = new XContentBuilderString("num_docs");
+ static final XContentBuilderString MAX_DOC = new XContentBuilderString("max_doc");
+ static final XContentBuilderString DELETED_DOCS = new XContentBuilderString("deleted_docs");
+ static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
+ static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
+ static final XContentBuilderString STATE = new XContentBuilderString("state");
+ static final XContentBuilderString PRIMARY = new XContentBuilderString("primary");
+ static final XContentBuilderString NODE = new XContentBuilderString("node");
+ static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node");
+ static final XContentBuilderString SHARD = new XContentBuilderString("shard");
+ static final XContentBuilderString ID = new XContentBuilderString("id");
+ static final XContentBuilderString PEER_RECOVERY = new XContentBuilderString("peer_recovery");
+ static final XContentBuilderString STAGE = new XContentBuilderString("stage");
+ static final XContentBuilderString START_TIME_IN_MILLIS = new XContentBuilderString("start_time_in_millis");
+ static final XContentBuilderString TIME = new XContentBuilderString("time");
+ static final XContentBuilderString TIME_IN_MILLIS = new XContentBuilderString("time_in_millis");
+ static final XContentBuilderString PROGRESS = new XContentBuilderString("progress");
+ static final XContentBuilderString REUSED_SIZE = new XContentBuilderString("reused_size");
+ static final XContentBuilderString REUSED_SIZE_IN_BYTES = new XContentBuilderString("reused_size_in_bytes");
+ static final XContentBuilderString EXPECTED_RECOVERED_SIZE = new XContentBuilderString("expected_recovered_size");
+ static final XContentBuilderString EXPECTED_RECOVERED_SIZE_IN_BYTES = new XContentBuilderString("expected_recovered_size_in_bytes");
+ static final XContentBuilderString RECOVERED_SIZE = new XContentBuilderString("recovered_size");
+ static final XContentBuilderString RECOVERED_SIZE_IN_BYTES = new XContentBuilderString("recovered_size_in_bytes");
+ static final XContentBuilderString RECOVERED = new XContentBuilderString("recovered");
+ static final XContentBuilderString GATEWAY_RECOVERY = new XContentBuilderString("gateway_recovery");
+ static final XContentBuilderString GATEWAY_SNAPSHOT = new XContentBuilderString("gateway_snapshot");
+ static final XContentBuilderString EXPECTED_OPERATIONS = new XContentBuilderString("expected_operations");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/PeerRecoveryStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/status/PeerRecoveryStatus.java
new file mode 100644
index 0000000..1fe9cc0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/PeerRecoveryStatus.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class PeerRecoveryStatus {
+
+ public enum Stage {
+ INIT((byte) 0),
+ INDEX((byte) 1),
+ TRANSLOG((byte) 2),
+ FINALIZE((byte) 3),
+ DONE((byte) 4);
+
+ private final byte value;
+
+ Stage(byte value) {
+ this.value = value;
+ }
+
+ public byte value() {
+ return value;
+ }
+
+ public static Stage fromValue(byte value) {
+ if (value == 0) {
+ return INIT;
+ } else if (value == 1) {
+ return INDEX;
+ } else if (value == 2) {
+ return TRANSLOG;
+ } else if (value == 3) {
+ return FINALIZE;
+ } else if (value == 4) {
+ return DONE;
+ }
+ throw new ElasticsearchIllegalArgumentException("No stage found for [" + value + ']');
+ }
+ }
+
+ final Stage stage;
+
+ final long startTime;
+
+ final long time;
+
+ final long indexSize;
+
+ final long reusedIndexSize;
+
+ final long recoveredIndexSize;
+
+ final long recoveredTranslogOperations;
+
+ public PeerRecoveryStatus(Stage stage, long startTime, long time, long indexSize, long reusedIndexSize,
+ long recoveredIndexSize, long recoveredTranslogOperations) {
+ this.stage = stage;
+ this.startTime = startTime;
+ this.time = time;
+ this.indexSize = indexSize;
+ this.reusedIndexSize = reusedIndexSize;
+ this.recoveredIndexSize = recoveredIndexSize;
+ this.recoveredTranslogOperations = recoveredTranslogOperations;
+ }
+
+ public Stage getStage() {
+ return this.stage;
+ }
+
+ public long getStartTime() {
+ return this.startTime;
+ }
+
+ public TimeValue getTime() {
+ return TimeValue.timeValueMillis(time);
+ }
+
+ public ByteSizeValue getIndexSize() {
+ return new ByteSizeValue(indexSize);
+ }
+
+ public ByteSizeValue getReusedIndexSize() {
+ return new ByteSizeValue(reusedIndexSize);
+ }
+
+ public ByteSizeValue getExpectedRecoveredIndexSize() {
+ return new ByteSizeValue(indexSize - reusedIndexSize);
+ }
+
+ /**
+ * How much of the index has been recovered.
+ */
+ public ByteSizeValue getRecoveredIndexSize() {
+ return new ByteSizeValue(recoveredIndexSize);
+ }
+
+ public int getIndexRecoveryProgress() {
+ if (recoveredIndexSize == 0) {
+ if (indexSize != 0 && indexSize == reusedIndexSize) {
+ return 100;
+ }
+ return 0;
+ }
+ return (int) (((double) recoveredIndexSize) / getExpectedRecoveredIndexSize().bytes() * 100);
+ }
+
+ public long getRecoveredTranslogOperations() {
+ return recoveredTranslogOperations;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/ShardStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/status/ShardStatus.java
new file mode 100644
index 0000000..6ad2035
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/ShardStatus.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.refresh.RefreshStats;
+import org.elasticsearch.index.shard.IndexShardState;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry;
+import static org.elasticsearch.common.unit.ByteSizeValue.readBytesSizeValue;
+
+/**
+ * Shard instance (actual allocated shard) status.
+ */
+public class ShardStatus extends BroadcastShardOperationResponse {
+
+ private ShardRouting shardRouting;
+
+ IndexShardState state;
+
+ ByteSizeValue storeSize;
+
+ long translogId = -1;
+
+ long translogOperations = -1;
+
+ DocsStatus docs;
+
+ MergeStats mergeStats;
+
+ RefreshStats refreshStats;
+
+ FlushStats flushStats;
+
+ PeerRecoveryStatus peerRecoveryStatus;
+
+ GatewayRecoveryStatus gatewayRecoveryStatus;
+
+ GatewaySnapshotStatus gatewaySnapshotStatus;
+
+ ShardStatus() {
+ }
+
+ ShardStatus(ShardRouting shardRouting) {
+ super(shardRouting.index(), shardRouting.id());
+ this.shardRouting = shardRouting;
+ }
+
+ /**
+ * The shard routing information (cluster wide shard state).
+ */
+ public ShardRouting getShardRouting() {
+ return this.shardRouting;
+ }
+
+ /**
+ * The shard state (index/local state).
+ */
+ public IndexShardState getState() {
+ return state;
+ }
+
+ /**
+ * The current size of the shard index storage.
+ */
+ public ByteSizeValue getStoreSize() {
+ return storeSize;
+ }
+
+ /**
+ * The transaction log id.
+ */
+ public long getTranslogId() {
+ return translogId;
+ }
+
+ /**
+ * The number of transaction operations in the transaction log.
+ */
+ public long getTranslogOperations() {
+ return translogOperations;
+ }
+
+ /**
+ * Docs level information for the shard index, <tt>null</tt> if not applicable.
+ */
+ public DocsStatus getDocs() {
+ return docs;
+ }
+
+ /**
+ * Index merge statistics.
+ */
+ public MergeStats getMergeStats() {
+ return this.mergeStats;
+ }
+
+ /**
+ * Refresh stats.
+ */
+ public RefreshStats getRefreshStats() {
+ return this.refreshStats;
+ }
+
+ public FlushStats getFlushStats() {
+ return this.flushStats;
+ }
+
+ /**
+ * Peer recovery status (<tt>null</tt> if not applicable). Both real time if an on going recovery
+ * is in progress and summary once it is done.
+ */
+ public PeerRecoveryStatus getPeerRecoveryStatus() {
+ return peerRecoveryStatus;
+ }
+
+ /**
+ * Gateway recovery status (<tt>null</tt> if not applicable). Both real time if an on going recovery
+ * is in progress adn summary once it is done.
+ */
+ public GatewayRecoveryStatus getGatewayRecoveryStatus() {
+ return gatewayRecoveryStatus;
+ }
+
+ /**
+ * The current on going snapshot to the gateway or the last one if none is on going.
+ */
+ public GatewaySnapshotStatus getGatewaySnapshotStatus() {
+ return gatewaySnapshotStatus;
+ }
+
+ public static ShardStatus readIndexShardStatus(StreamInput in) throws IOException {
+ ShardStatus shardStatus = new ShardStatus();
+ shardStatus.readFrom(in);
+ return shardStatus;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardRouting.writeTo(out);
+ out.writeByte(state.id());
+ if (storeSize == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ storeSize.writeTo(out);
+ }
+ out.writeLong(translogId);
+ out.writeLong(translogOperations);
+ if (docs == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeLong(docs.getNumDocs());
+ out.writeLong(docs.getMaxDoc());
+ out.writeLong(docs.getDeletedDocs());
+ }
+ if (peerRecoveryStatus == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeByte(peerRecoveryStatus.stage.value());
+ out.writeVLong(peerRecoveryStatus.startTime);
+ out.writeVLong(peerRecoveryStatus.time);
+ out.writeVLong(peerRecoveryStatus.indexSize);
+ out.writeVLong(peerRecoveryStatus.reusedIndexSize);
+ out.writeVLong(peerRecoveryStatus.recoveredIndexSize);
+ out.writeVLong(peerRecoveryStatus.recoveredTranslogOperations);
+ }
+
+ if (gatewayRecoveryStatus == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeByte(gatewayRecoveryStatus.stage.value());
+ out.writeVLong(gatewayRecoveryStatus.startTime);
+ out.writeVLong(gatewayRecoveryStatus.time);
+ out.writeVLong(gatewayRecoveryStatus.indexSize);
+ out.writeVLong(gatewayRecoveryStatus.reusedIndexSize);
+ out.writeVLong(gatewayRecoveryStatus.recoveredIndexSize);
+ out.writeVLong(gatewayRecoveryStatus.recoveredTranslogOperations);
+ }
+
+ if (gatewaySnapshotStatus == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeByte(gatewaySnapshotStatus.stage.value());
+ out.writeVLong(gatewaySnapshotStatus.startTime);
+ out.writeVLong(gatewaySnapshotStatus.time);
+ out.writeVLong(gatewaySnapshotStatus.indexSize);
+ out.writeVInt(gatewaySnapshotStatus.getExpectedNumberOfOperations());
+ }
+
+ if (mergeStats == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ mergeStats.writeTo(out);
+ }
+ if (refreshStats == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ refreshStats.writeTo(out);
+ }
+ if (flushStats == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ flushStats.writeTo(out);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardRouting = readShardRoutingEntry(in);
+ state = IndexShardState.fromId(in.readByte());
+ if (in.readBoolean()) {
+ storeSize = readBytesSizeValue(in);
+ }
+ translogId = in.readLong();
+ translogOperations = in.readLong();
+ if (in.readBoolean()) {
+ docs = new DocsStatus();
+ docs.numDocs = in.readLong();
+ docs.maxDoc = in.readLong();
+ docs.deletedDocs = in.readLong();
+ }
+ if (in.readBoolean()) {
+ peerRecoveryStatus = new PeerRecoveryStatus(PeerRecoveryStatus.Stage.fromValue(in.readByte()),
+ in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong());
+ }
+
+ if (in.readBoolean()) {
+ gatewayRecoveryStatus = new GatewayRecoveryStatus(GatewayRecoveryStatus.Stage.fromValue(in.readByte()),
+ in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong());
+ }
+
+ if (in.readBoolean()) {
+ gatewaySnapshotStatus = new GatewaySnapshotStatus(GatewaySnapshotStatus.Stage.fromValue(in.readByte()),
+ in.readVLong(), in.readVLong(), in.readVLong(), in.readVInt());
+ }
+
+ if (in.readBoolean()) {
+ mergeStats = MergeStats.readMergeStats(in);
+ }
+ if (in.readBoolean()) {
+ refreshStats = RefreshStats.readRefreshStats(in);
+ }
+ if (in.readBoolean()) {
+ flushStats = FlushStats.readFlushStats(in);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/status/TransportIndicesStatusAction.java b/src/main/java/org/elasticsearch/action/admin/indices/status/TransportIndicesStatusAction.java
new file mode 100644
index 0000000..9082835
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/status/TransportIndicesStatusAction.java
@@ -0,0 +1,294 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.status;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.gateway.IndexShardGatewayService;
+import org.elasticsearch.index.gateway.SnapshotStatus;
+import org.elasticsearch.index.service.InternalIndexService;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.recovery.RecoveryStatus;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class TransportIndicesStatusAction extends TransportBroadcastOperationAction<IndicesStatusRequest, IndicesStatusResponse, TransportIndicesStatusAction.IndexShardStatusRequest, ShardStatus> {
+
+ private final IndicesService indicesService;
+
+ private final RecoveryTarget peerRecoveryTarget;
+
+ @Inject
+ public TransportIndicesStatusAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService, RecoveryTarget peerRecoveryTarget) {
+ super(settings, threadPool, clusterService, transportService);
+ this.peerRecoveryTarget = peerRecoveryTarget;
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected String transportAction() {
+ return IndicesStatusAction.NAME;
+ }
+
+ @Override
+ protected IndicesStatusRequest newRequest() {
+ return new IndicesStatusRequest();
+ }
+
+ /**
+ * Status goes across *all* shards.
+ */
+ @Override
+ protected GroupShardsIterator shards(ClusterState state, IndicesStatusRequest request, String[] concreteIndices) {
+ return state.routingTable().allAssignedShardsGrouped(concreteIndices, true);
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, IndicesStatusRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, IndicesStatusRequest countRequest, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
+ }
+
+ @Override
+ protected IndicesStatusResponse newResponse(IndicesStatusRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ List<ShardOperationFailedException> shardFailures = null;
+ final List<ShardStatus> shards = newArrayList();
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // simply ignore non active shards
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ shards.add((ShardStatus) shardResponse);
+ successfulShards++;
+ }
+ }
+ return new IndicesStatusResponse(shards.toArray(new ShardStatus[shards.size()]), clusterState, shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected IndexShardStatusRequest newShardRequest() {
+ return new IndexShardStatusRequest();
+ }
+
+ @Override
+ protected IndexShardStatusRequest newShardRequest(ShardRouting shard, IndicesStatusRequest request) {
+ return new IndexShardStatusRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected ShardStatus newShardResponse() {
+ return new ShardStatus();
+ }
+
+ @Override
+ protected ShardStatus shardOperation(IndexShardStatusRequest request) throws ElasticsearchException {
+ InternalIndexService indexService = (InternalIndexService) indicesService.indexServiceSafe(request.index());
+ InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId());
+ ShardStatus shardStatus = new ShardStatus(indexShard.routingEntry());
+ shardStatus.state = indexShard.state();
+ try {
+ shardStatus.storeSize = indexShard.store().estimateSize();
+ } catch (IOException e) {
+ // failure to get the store size...
+ }
+ if (indexShard.state() == IndexShardState.STARTED) {
+// shardStatus.estimatedFlushableMemorySize = indexShard.estimateFlushableMemorySize();
+ shardStatus.translogId = indexShard.translog().currentId();
+ shardStatus.translogOperations = indexShard.translog().estimatedNumberOfOperations();
+ Engine.Searcher searcher = indexShard.acquireSearcher("indices_status");
+ try {
+ shardStatus.docs = new DocsStatus();
+ shardStatus.docs.numDocs = searcher.reader().numDocs();
+ shardStatus.docs.maxDoc = searcher.reader().maxDoc();
+ shardStatus.docs.deletedDocs = searcher.reader().numDeletedDocs();
+ } finally {
+ searcher.release();
+ }
+
+ shardStatus.mergeStats = indexShard.mergeScheduler().stats();
+ shardStatus.refreshStats = indexShard.refreshStats();
+ shardStatus.flushStats = indexShard.flushStats();
+ }
+
+ if (request.recovery) {
+ // check on going recovery (from peer or gateway)
+ RecoveryStatus peerRecoveryStatus = indexShard.peerRecoveryStatus();
+ if (peerRecoveryStatus == null) {
+ peerRecoveryStatus = peerRecoveryTarget.peerRecoveryStatus(indexShard.shardId());
+ }
+ if (peerRecoveryStatus != null) {
+ PeerRecoveryStatus.Stage stage;
+ switch (peerRecoveryStatus.stage()) {
+ case INIT:
+ stage = PeerRecoveryStatus.Stage.INIT;
+ break;
+ case INDEX:
+ stage = PeerRecoveryStatus.Stage.INDEX;
+ break;
+ case TRANSLOG:
+ stage = PeerRecoveryStatus.Stage.TRANSLOG;
+ break;
+ case FINALIZE:
+ stage = PeerRecoveryStatus.Stage.FINALIZE;
+ break;
+ case DONE:
+ stage = PeerRecoveryStatus.Stage.DONE;
+ break;
+ default:
+ stage = PeerRecoveryStatus.Stage.INIT;
+ }
+ shardStatus.peerRecoveryStatus = new PeerRecoveryStatus(stage, peerRecoveryStatus.startTime(), peerRecoveryStatus.time(),
+ peerRecoveryStatus.phase1TotalSize(), peerRecoveryStatus.phase1ExistingTotalSize(),
+ peerRecoveryStatus.currentFilesSize(), peerRecoveryStatus.currentTranslogOperations());
+ }
+
+ IndexShardGatewayService gatewayService = indexService.shardInjector(request.shardId()).getInstance(IndexShardGatewayService.class);
+ org.elasticsearch.index.gateway.RecoveryStatus gatewayRecoveryStatus = gatewayService.recoveryStatus();
+ if (gatewayRecoveryStatus != null) {
+ GatewayRecoveryStatus.Stage stage;
+ switch (gatewayRecoveryStatus.stage()) {
+ case INIT:
+ stage = GatewayRecoveryStatus.Stage.INIT;
+ break;
+ case INDEX:
+ stage = GatewayRecoveryStatus.Stage.INDEX;
+ break;
+ case TRANSLOG:
+ stage = GatewayRecoveryStatus.Stage.TRANSLOG;
+ break;
+ case DONE:
+ stage = GatewayRecoveryStatus.Stage.DONE;
+ break;
+ default:
+ stage = GatewayRecoveryStatus.Stage.INIT;
+ }
+ shardStatus.gatewayRecoveryStatus = new GatewayRecoveryStatus(stage, gatewayRecoveryStatus.startTime(), gatewayRecoveryStatus.time(),
+ gatewayRecoveryStatus.index().totalSize(), gatewayRecoveryStatus.index().reusedTotalSize(), gatewayRecoveryStatus.index().currentFilesSize(), gatewayRecoveryStatus.translog().currentTranslogOperations());
+ }
+ }
+
+ if (request.snapshot) {
+ IndexShardGatewayService gatewayService = indexService.shardInjector(request.shardId()).getInstance(IndexShardGatewayService.class);
+ SnapshotStatus snapshotStatus = gatewayService.snapshotStatus();
+ if (snapshotStatus != null) {
+ GatewaySnapshotStatus.Stage stage;
+ switch (snapshotStatus.stage()) {
+ case DONE:
+ stage = GatewaySnapshotStatus.Stage.DONE;
+ break;
+ case FAILURE:
+ stage = GatewaySnapshotStatus.Stage.FAILURE;
+ break;
+ case TRANSLOG:
+ stage = GatewaySnapshotStatus.Stage.TRANSLOG;
+ break;
+ case FINALIZE:
+ stage = GatewaySnapshotStatus.Stage.FINALIZE;
+ break;
+ case INDEX:
+ stage = GatewaySnapshotStatus.Stage.INDEX;
+ break;
+ default:
+ stage = GatewaySnapshotStatus.Stage.NONE;
+ break;
+ }
+ shardStatus.gatewaySnapshotStatus = new GatewaySnapshotStatus(stage, snapshotStatus.startTime(), snapshotStatus.time(),
+ snapshotStatus.index().totalSize(), snapshotStatus.translog().expectedNumberOfOperations());
+ }
+ }
+
+ return shardStatus;
+ }
+
+ public static class IndexShardStatusRequest extends BroadcastShardOperationRequest {
+
+ boolean recovery;
+
+ boolean snapshot;
+
+ IndexShardStatusRequest() {
+ }
+
+ IndexShardStatusRequest(String index, int shardId, IndicesStatusRequest request) {
+ super(index, shardId, request);
+ recovery = request.recovery();
+ snapshot = request.snapshot();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ recovery = in.readBoolean();
+ snapshot = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(recovery);
+ out.writeBoolean(snapshot);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java
new file mode 100644
index 0000000..2d604b1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.template.delete;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class DeleteIndexTemplateAction extends IndicesAction<DeleteIndexTemplateRequest, DeleteIndexTemplateResponse, DeleteIndexTemplateRequestBuilder> {
+
+ public static final DeleteIndexTemplateAction INSTANCE = new DeleteIndexTemplateAction();
+ public static final String NAME = "indices/template/delete";
+
+ private DeleteIndexTemplateAction() {
+ super(NAME);
+ }
+
+ @Override
+ public DeleteIndexTemplateResponse newResponse() {
+ return new DeleteIndexTemplateResponse();
+ }
+
+ @Override
+ public DeleteIndexTemplateRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new DeleteIndexTemplateRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java
new file mode 100644
index 0000000..43fe223
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.delete;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to delete an index template.
+ */
+public class DeleteIndexTemplateRequest extends MasterNodeOperationRequest<DeleteIndexTemplateRequest> {
+
+ private String name;
+
+ DeleteIndexTemplateRequest() {
+ }
+
+ /**
+ * Constructs a new delete index request for the specified name.
+ */
+ public DeleteIndexTemplateRequest(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (name == null) {
+ validationException = addValidationError("name is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * The index template name to delete.
+ */
+ String name() {
+ return name;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ name = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(name);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java
new file mode 100644
index 0000000..33fe431
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.delete;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder<DeleteIndexTemplateRequest, DeleteIndexTemplateResponse, DeleteIndexTemplateRequestBuilder> {
+
+ public DeleteIndexTemplateRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new DeleteIndexTemplateRequest());
+ }
+
+ public DeleteIndexTemplateRequestBuilder(IndicesAdminClient indicesClient, String name) {
+ super((InternalIndicesAdminClient) indicesClient, new DeleteIndexTemplateRequest(name));
+ }
+
+ @Override
+ protected void doExecute(ActionListener<DeleteIndexTemplateResponse> listener) {
+ ((IndicesAdminClient) client).deleteTemplate(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateResponse.java
new file mode 100644
index 0000000..5c2a2b1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateResponse.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.delete;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response for a delete index template.
+ */
+public class DeleteIndexTemplateResponse extends AcknowledgedResponse {
+
+ DeleteIndexTemplateResponse() {
+ }
+
+ DeleteIndexTemplateResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java
new file mode 100644
index 0000000..57fa92b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.delete;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Delete index action.
+ */
+public class TransportDeleteIndexTemplateAction extends TransportMasterNodeOperationAction<DeleteIndexTemplateRequest, DeleteIndexTemplateResponse> {
+
+ private final MetaDataIndexTemplateService indexTemplateService;
+
+ @Inject
+ public TransportDeleteIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.indexTemplateService = indexTemplateService;
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteIndexTemplateAction.NAME;
+ }
+
+ @Override
+ protected DeleteIndexTemplateRequest newRequest() {
+ return new DeleteIndexTemplateRequest();
+ }
+
+ @Override
+ protected DeleteIndexTemplateResponse newResponse() {
+ return new DeleteIndexTemplateResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(DeleteIndexTemplateRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
+ }
+
+ @Override
+ protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener<DeleteIndexTemplateResponse> listener) throws ElasticsearchException {
+ indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() {
+ @Override
+ public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) {
+ listener.onResponse(new DeleteIndexTemplateResponse(response.acknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.debug("failed to delete templates [{}]", t, request.name());
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java
new file mode 100644
index 0000000..f0a445c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.get;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ *
+ */
+public class GetIndexTemplatesAction extends IndicesAction<GetIndexTemplatesRequest, GetIndexTemplatesResponse, GetIndexTemplatesRequestBuilder> {
+
+ public static final GetIndexTemplatesAction INSTANCE = new GetIndexTemplatesAction();
+ public static final String NAME = "indices/template/get";
+
+ protected GetIndexTemplatesAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GetIndexTemplatesResponse newResponse() {
+ return new GetIndexTemplatesResponse();
+ }
+
+ @Override
+ public GetIndexTemplatesRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new GetIndexTemplatesRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java
new file mode 100644
index 0000000..0d04cbe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.get;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Request that allows to retrieve index templates
+ */
+public class GetIndexTemplatesRequest extends MasterNodeReadOperationRequest<GetIndexTemplatesRequest> {
+
+ private String[] names;
+
+ public GetIndexTemplatesRequest() {
+ }
+
+ public GetIndexTemplatesRequest(String... names) {
+ this.names = names;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (names == null) {
+ validationException = addValidationError("names is null or empty", validationException);
+ } else {
+ for (String name : names) {
+ if (name == null || !Strings.hasText(name)) {
+ validationException = addValidationError("name is missing", validationException);
+ }
+ }
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets the names of the index templates.
+ */
+ public GetIndexTemplatesRequest names(String... names) {
+ this.names = names;
+ return this;
+ }
+
+ /**
+ * The names of the index templates.
+ */
+ public String[] names() {
+ return this.names;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ names = in.readStringArray();
+ readLocal(in, Version.V_1_0_0_RC2);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(names);
+ writeLocal(out, Version.V_1_0_0_RC2);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java
new file mode 100644
index 0000000..89d0c59
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetIndexTemplatesRequest, GetIndexTemplatesResponse, GetIndexTemplatesRequestBuilder> {
+
+ public GetIndexTemplatesRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new GetIndexTemplatesRequest());
+ }
+
+ public GetIndexTemplatesRequestBuilder(IndicesAdminClient indicesClient, String... names) {
+ super((InternalIndicesAdminClient) indicesClient, new GetIndexTemplatesRequest(names));
+ }
+
+ @Override
+ protected void doExecute(ActionListener<GetIndexTemplatesResponse> listener) {
+ ((IndicesAdminClient) client).getTemplates(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java
new file mode 100644
index 0000000..56de198
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.get;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class GetIndexTemplatesResponse extends ActionResponse {
+
+ private List<IndexTemplateMetaData> indexTemplates;
+
+ GetIndexTemplatesResponse() {
+ }
+
+ GetIndexTemplatesResponse(List<IndexTemplateMetaData> indexTemplates) {
+ this.indexTemplates = indexTemplates;
+ }
+
+ public List<IndexTemplateMetaData> getIndexTemplates() {
+ return indexTemplates;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ indexTemplates = Lists.newArrayListWithExpectedSize(size);
+ for (int i = 0 ; i < size ; i++) {
+ indexTemplates.add(0, IndexTemplateMetaData.Builder.readFrom(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(indexTemplates.size());
+ for (IndexTemplateMetaData indexTemplate : indexTemplates) {
+ IndexTemplateMetaData.Builder.writeTo(indexTemplate, out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java
new file mode 100644
index 0000000..9eaa26b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.get;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadOperationAction<GetIndexTemplatesRequest, GetIndexTemplatesResponse> {
+
+ @Inject
+ public TransportGetIndexTemplatesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String transportAction() {
+ return GetIndexTemplatesAction.NAME;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected GetIndexTemplatesRequest newRequest() {
+ return new GetIndexTemplatesRequest();
+ }
+
+ @Override
+ protected GetIndexTemplatesResponse newResponse() {
+ return new GetIndexTemplatesResponse();
+ }
+
+ @Override
+ protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener<GetIndexTemplatesResponse> listener) throws ElasticsearchException {
+ List<IndexTemplateMetaData> results;
+
+ // If we did not ask for a specific name, then we return all templates
+ if (request.names().length == 0) {
+ results = Lists.newArrayList(state.metaData().templates().values().toArray(IndexTemplateMetaData.class));
+ } else {
+ results = Lists.newArrayList();
+ }
+
+ for (String name : request.names()) {
+ if (Regex.isSimpleMatchPattern(name)) {
+ for (ObjectObjectCursor<String, IndexTemplateMetaData> entry : state.metaData().templates()) {
+ if (Regex.simpleMatch(name, entry.key)) {
+ results.add(entry.value);
+ }
+ }
+ } else if (state.metaData().templates().containsKey(name)) {
+ results.add(state.metaData().templates().get(name));
+ }
+ }
+
+ listener.onResponse(new GetIndexTemplatesResponse(results));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java
new file mode 100644
index 0000000..0435ba9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.template.put;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class PutIndexTemplateAction extends IndicesAction<PutIndexTemplateRequest, PutIndexTemplateResponse, PutIndexTemplateRequestBuilder> {
+
+ public static final PutIndexTemplateAction INSTANCE = new PutIndexTemplateAction();
+ public static final String NAME = "indices/template/put";
+
+ private PutIndexTemplateAction() {
+ super(NAME);
+ }
+
+ @Override
+ public PutIndexTemplateResponse newResponse() {
+ return new PutIndexTemplateResponse();
+ }
+
+ @Override
+ public PutIndexTemplateRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new PutIndexTemplateRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java
new file mode 100644
index 0000000..7a54984
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java
@@ -0,0 +1,380 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.put;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
+import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
+
+/**
+ * A request to create an index template.
+ */
+public class PutIndexTemplateRequest extends MasterNodeOperationRequest<PutIndexTemplateRequest> {
+
+ private String name;
+
+ private String cause = "";
+
+ private String template;
+
+ private int order;
+
+ private boolean create;
+
+ private Settings settings = EMPTY_SETTINGS;
+
+ private Map<String, String> mappings = newHashMap();
+
+ private Map<String, IndexMetaData.Custom> customs = newHashMap();
+
+ PutIndexTemplateRequest() {
+ }
+
+ /**
+ * Constructs a new put index template request with the provided name.
+ */
+ public PutIndexTemplateRequest(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (name == null) {
+ validationException = addValidationError("name is missing", validationException);
+ }
+ if (template == null) {
+ validationException = addValidationError("template is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets the name of the index template.
+ */
+ public PutIndexTemplateRequest name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ /**
+ * The name of the index template.
+ */
+ public String name() {
+ return this.name;
+ }
+
+ public PutIndexTemplateRequest template(String template) {
+ this.template = template;
+ return this;
+ }
+
+ public String template() {
+ return this.template;
+ }
+
+ public PutIndexTemplateRequest order(int order) {
+ this.order = order;
+ return this;
+ }
+
+ public int order() {
+ return this.order;
+ }
+
+ /**
+ * Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
+ * exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}.
+ */
+ public PutIndexTemplateRequest create(boolean create) {
+ this.create = create;
+ return this;
+ }
+
+ public boolean create() {
+ return create;
+ }
+
+ /**
+ * The settings to create the index template with.
+ */
+ public PutIndexTemplateRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ /**
+ * The settings to create the index template with.
+ */
+ public PutIndexTemplateRequest settings(Settings.Builder settings) {
+ this.settings = settings.build();
+ return this;
+ }
+
+ /**
+ * The settings to create the index template with (either json/yaml/properties format).
+ */
+ public PutIndexTemplateRequest settings(String source) {
+ this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
+ return this;
+ }
+
+ /**
+ * The settings to crete the index template with (either json/yaml/properties format).
+ */
+ public PutIndexTemplateRequest settings(Map<String, Object> source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ settings(builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ return this;
+ }
+
+ Settings settings() {
+ return this.settings;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public PutIndexTemplateRequest mapping(String type, String source) {
+ mappings.put(type, source);
+ return this;
+ }
+
+ /**
+ * The cause for this index template creation.
+ */
+ public PutIndexTemplateRequest cause(String cause) {
+ this.cause = cause;
+ return this;
+ }
+
+ public String cause() {
+ return this.cause;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public PutIndexTemplateRequest mapping(String type, XContentBuilder source) {
+ try {
+ mappings.put(type, source.string());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
+ }
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public PutIndexTemplateRequest mapping(String type, Map<String, Object> source) {
+ // wrap it in a type map if its not
+ if (source.size() != 1 || !source.containsKey(type)) {
+ source = MapBuilder.<String, Object>newMapBuilder().put(type, source).map();
+ }
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(source);
+ return mapping(type, builder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ }
+
+ Map<String, String> mappings() {
+ return this.mappings;
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequest source(XContentBuilder templateBuilder) {
+ try {
+ return source(templateBuilder.bytes());
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to build json for template request", e);
+ }
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequest source(Map templateSource) {
+ Map<String, Object> source = templateSource;
+ for (Map.Entry<String, Object> entry : source.entrySet()) {
+ String name = entry.getKey();
+ if (name.equals("template")) {
+ template(entry.getValue().toString());
+ } else if (name.equals("order")) {
+ order(XContentMapValues.nodeIntegerValue(entry.getValue(), order()));
+ } else if (name.equals("settings")) {
+ if (!(entry.getValue() instanceof Map)) {
+ throw new ElasticsearchIllegalArgumentException("Malformed settings section, should include an inner object");
+ }
+ settings((Map<String, Object>) entry.getValue());
+ } else if (name.equals("mappings")) {
+ Map<String, Object> mappings = (Map<String, Object>) entry.getValue();
+ for (Map.Entry<String, Object> entry1 : mappings.entrySet()) {
+ if (!(entry1.getValue() instanceof Map)) {
+ throw new ElasticsearchIllegalArgumentException("Malformed mappings section for type [" + entry1.getKey() + "], should include an inner object describing the mapping");
+ }
+ mapping(entry1.getKey(), (Map<String, Object>) entry1.getValue());
+ }
+ } else {
+ // maybe custom?
+ IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name);
+ if (factory != null) {
+ try {
+ customs.put(name, factory.fromMap((Map<String, Object>) entry.getValue()));
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]");
+ }
+ }
+ }
+ }
+ return this;
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequest source(String templateSource) {
+ try {
+ return source(XContentFactory.xContent(templateSource).createParser(templateSource).mapOrderedAndClose());
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse template source [" + templateSource + "]", e);
+ }
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequest source(byte[] source) {
+ return source(source, 0, source.length);
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequest source(byte[] source, int offset, int length) {
+ try {
+ return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
+ }
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequest source(BytesReference source) {
+ try {
+ return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
+ }
+ }
+
+ public PutIndexTemplateRequest custom(IndexMetaData.Custom custom) {
+ customs.put(custom.type(), custom);
+ return this;
+ }
+
+ Map<String, IndexMetaData.Custom> customs() {
+ return this.customs;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ cause = in.readString();
+ name = in.readString();
+ template = in.readString();
+ order = in.readInt();
+ create = in.readBoolean();
+ settings = readSettingsFromStream(in);
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ mappings.put(in.readString(), in.readString());
+ }
+ int customSize = in.readVInt();
+ for (int i = 0; i < customSize; i++) {
+ String type = in.readString();
+ IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in);
+ customs.put(type, customIndexMetaData);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(cause);
+ out.writeString(name);
+ out.writeString(template);
+ out.writeInt(order);
+ out.writeBoolean(create);
+ writeSettingsToStream(settings, out);
+ out.writeVInt(mappings.size());
+ for (Map.Entry<String, String> entry : mappings.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeString(entry.getValue());
+ }
+ out.writeVInt(customs.size());
+ for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
+ out.writeString(entry.getKey());
+ IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java
new file mode 100644
index 0000000..fc52e2a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.put;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder<PutIndexTemplateRequest, PutIndexTemplateResponse, PutIndexTemplateRequestBuilder> {
+
+ public PutIndexTemplateRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new PutIndexTemplateRequest());
+ }
+
+ public PutIndexTemplateRequestBuilder(IndicesAdminClient indicesClient, String name) {
+ super((InternalIndicesAdminClient) indicesClient, new PutIndexTemplateRequest(name));
+ }
+
+ /**
+ * Sets the template match expression that will be used to match on indices created.
+ */
+ public PutIndexTemplateRequestBuilder setTemplate(String template) {
+ request.template(template);
+ return this;
+ }
+
+ /**
+ * Sets the order of this template if more than one template matches.
+ */
+ public PutIndexTemplateRequestBuilder setOrder(int order) {
+ request.order(order);
+ return this;
+ }
+
+ /**
+ * Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
+ * exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}.
+ */
+ public PutIndexTemplateRequestBuilder setCreate(boolean create) {
+ request.create(create);
+ return this;
+ }
+
+ /**
+ * The settings to created the index template with.
+ */
+ public PutIndexTemplateRequestBuilder setSettings(Settings settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * The settings to created the index template with.
+ */
+ public PutIndexTemplateRequestBuilder setSettings(Settings.Builder settings) {
+ request.settings(settings);
+ return this;
+ }
+
+ /**
+ * The settings to crete the index template with (either json/yaml/properties format)
+ */
+ public PutIndexTemplateRequestBuilder setSettings(String source) {
+ request.settings(source);
+ return this;
+ }
+
+ /**
+ * The settings to crete the index template with (either json/yaml/properties format)
+ */
+ public PutIndexTemplateRequestBuilder setSettings(Map<String, Object> source) {
+ request.settings(source);
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index template gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public PutIndexTemplateRequestBuilder addMapping(String type, String source) {
+ request.mapping(type, source);
+ return this;
+ }
+
+ /**
+ * The cause for this index template creation.
+ */
+ public PutIndexTemplateRequestBuilder cause(String cause) {
+ request.cause(cause);
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index template gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public PutIndexTemplateRequestBuilder addMapping(String type, XContentBuilder source) {
+ request.mapping(type, source);
+ return this;
+ }
+
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param type The mapping type
+ * @param source The mapping source
+ */
+ public PutIndexTemplateRequestBuilder addMapping(String type, Map<String, Object> source) {
+ request.mapping(type, source);
+ return this;
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequestBuilder setSource(XContentBuilder templateBuilder) {
+ request.source(templateBuilder);
+ return this;
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequestBuilder setSource(Map templateSource) {
+ request.source(templateSource);
+ return this;
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequestBuilder setSource(String templateSource) {
+ request.source(templateSource);
+ return this;
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequestBuilder setSource(BytesReference templateSource) {
+ request.source(templateSource);
+ return this;
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequestBuilder setSource(byte[] templateSource) {
+ request.source(templateSource);
+ return this;
+ }
+
+ /**
+ * The template source definition.
+ */
+ public PutIndexTemplateRequestBuilder setSource(byte[] templateSource, int offset, int length) {
+ request.source(templateSource, offset, length);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<PutIndexTemplateResponse> listener) {
+ ((IndicesAdminClient) client).putTemplate(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java
new file mode 100644
index 0000000..5953642
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.put;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response for a put index template action.
+ */
+public class PutIndexTemplateResponse extends AcknowledgedResponse {
+
+ PutIndexTemplateResponse() {
+ }
+
+ PutIndexTemplateResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java
new file mode 100644
index 0000000..9762094
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.template.put;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Put index template action.
+ */
+public class TransportPutIndexTemplateAction extends TransportMasterNodeOperationAction<PutIndexTemplateRequest, PutIndexTemplateResponse> {
+
+ private final MetaDataIndexTemplateService indexTemplateService;
+
+ @Inject
+ public TransportPutIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.indexTemplateService = indexTemplateService;
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away...
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return PutIndexTemplateAction.NAME;
+ }
+
+ @Override
+ protected PutIndexTemplateRequest newRequest() {
+ return new PutIndexTemplateRequest();
+ }
+
+ @Override
+ protected PutIndexTemplateResponse newResponse() {
+ return new PutIndexTemplateResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(PutIndexTemplateRequest request, ClusterState state) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
+ }
+
+ @Override
+ protected void masterOperation(final PutIndexTemplateRequest request, final ClusterState state, final ActionListener<PutIndexTemplateResponse> listener) throws ElasticsearchException {
+ String cause = request.cause();
+ if (cause.length() == 0) {
+ cause = "api";
+ }
+
+ indexTemplateService.putTemplate(new MetaDataIndexTemplateService.PutRequest(cause, request.name())
+ .template(request.template())
+ .order(request.order())
+ .settings(request.settings())
+ .mappings(request.mappings())
+ .customs(request.customs())
+ .create(request.create())
+ .masterTimeout(request.masterNodeTimeout()),
+
+ new MetaDataIndexTemplateService.PutListener() {
+ @Override
+ public void onResponse(MetaDataIndexTemplateService.PutResponse response) {
+ listener.onResponse(new PutIndexTemplateResponse(response.acknowledged()));
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.debug("failed to delete template [{}]", t, request.name());
+ listener.onFailure(t);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java
new file mode 100644
index 0000000..ea145ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.validate.query;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class QueryExplanation implements Streamable {
+
+ private String index;
+
+ private boolean valid;
+
+ private String explanation;
+
+ private String error;
+
+ QueryExplanation() {
+
+ }
+
+ public QueryExplanation(String index, boolean valid, String explanation, String error) {
+ this.index = index;
+ this.valid = valid;
+ this.explanation = explanation;
+ this.error = error;
+ }
+
+ public String getIndex() {
+ return this.index;
+ }
+
+ public boolean isValid() {
+ return this.valid;
+ }
+
+ public String getError() {
+ return this.error;
+ }
+
+ public String getExplanation() {
+ return this.explanation;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ index = in.readString();
+ valid = in.readBoolean();
+ explanation = in.readOptionalString();
+ error = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(index);
+ out.writeBoolean(valid);
+ out.writeOptionalString(explanation);
+ out.writeOptionalString(error);
+ }
+
+ public static QueryExplanation readQueryExplanation(StreamInput in) throws IOException {
+ QueryExplanation exp = new QueryExplanation();
+ exp.readFrom(in);
+ return exp;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java
new file mode 100644
index 0000000..d4d13ca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.validate.query;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Internal validate request executed directly against a specific index shard.
+ */
+class ShardValidateQueryRequest extends BroadcastShardOperationRequest {
+
+ private BytesReference source;
+ private String[] types = Strings.EMPTY_ARRAY;
+ private boolean explain;
+ private long nowInMillis;
+
+ @Nullable
+ private String[] filteringAliases;
+
+ ShardValidateQueryRequest() {
+
+ }
+
+ public ShardValidateQueryRequest(String index, int shardId, @Nullable String[] filteringAliases, ValidateQueryRequest request) {
+ super(index, shardId, request);
+ this.source = request.source();
+ this.types = request.types();
+ this.explain = request.explain();
+ this.filteringAliases = filteringAliases;
+ this.nowInMillis = request.nowInMillis;
+ }
+
+ public BytesReference source() {
+ return source;
+ }
+
+ public String[] types() {
+ return this.types;
+ }
+
+ public boolean explain() {
+ return this.explain;
+ }
+
+ public String[] filteringAliases() {
+ return filteringAliases;
+ }
+
+ public long nowInMillis() {
+ return this.nowInMillis;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ source = in.readBytesReference();
+
+ int typesSize = in.readVInt();
+ if (typesSize > 0) {
+ types = new String[typesSize];
+ for (int i = 0; i < typesSize; i++) {
+ types[i] = in.readString();
+ }
+ }
+ int aliasesSize = in.readVInt();
+ if (aliasesSize > 0) {
+ filteringAliases = new String[aliasesSize];
+ for (int i = 0; i < aliasesSize; i++) {
+ filteringAliases[i] = in.readString();
+ }
+ }
+
+ explain = in.readBoolean();
+ nowInMillis = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBytesReference(source);
+
+ out.writeVInt(types.length);
+ for (String type : types) {
+ out.writeString(type);
+ }
+ if (filteringAliases != null) {
+ out.writeVInt(filteringAliases.length);
+ for (String alias : filteringAliases) {
+ out.writeString(alias);
+ }
+ } else {
+ out.writeVInt(0);
+ }
+
+ out.writeBoolean(explain);
+ out.writeVLong(nowInMillis);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java
new file mode 100644
index 0000000..e3522f7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.validate.query;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Internal validate response of a shard validate request executed directly against a specific shard.
+ *
+ *
+ */
+class ShardValidateQueryResponse extends BroadcastShardOperationResponse {
+
+ private boolean valid;
+
+ private String explanation;
+
+ private String error;
+
+ ShardValidateQueryResponse() {
+
+ }
+
+ public ShardValidateQueryResponse(String index, int shardId, boolean valid, String explanation, String error) {
+ super(index, shardId);
+ this.valid = valid;
+ this.explanation = explanation;
+ this.error = error;
+ }
+
+ public boolean isValid() {
+ return this.valid;
+ }
+
+ public String getExplanation() {
+ return explanation;
+ }
+
+ public String getError() {
+ return error;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ valid = in.readBoolean();
+ explanation = in.readOptionalString();
+ error = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(valid);
+ out.writeOptionalString(explanation);
+ out.writeOptionalString(error);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java
new file mode 100644
index 0000000..57fc187
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.validate.query;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.internal.DefaultSearchContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class TransportValidateQueryAction extends TransportBroadcastOperationAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> {
+
+ private final IndicesService indicesService;
+
+ private final ScriptService scriptService;
+
+ private final CacheRecycler cacheRecycler;
+
+ private final PageCacheRecycler pageCacheRecycler;
+
+ @Inject
+ public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ this.scriptService = scriptService;
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ }
+
+ @Override
+ protected void doExecute(ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) {
+ request.nowInMillis = System.currentTimeMillis();
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+
+ @Override
+ protected String transportAction() {
+ return ValidateQueryAction.NAME;
+ }
+
+ @Override
+ protected ValidateQueryRequest newRequest() {
+ return new ValidateQueryRequest();
+ }
+
+ @Override
+ protected ShardValidateQueryRequest newShardRequest() {
+ return new ShardValidateQueryRequest();
+ }
+
+ @Override
+ protected ShardValidateQueryRequest newShardRequest(ShardRouting shard, ValidateQueryRequest request) {
+ String[] filteringAliases = clusterService.state().metaData().filteringAliases(shard.index(), request.indices());
+ return new ShardValidateQueryRequest(shard.index(), shard.id(), filteringAliases, request);
+ }
+
+ @Override
+ protected ShardValidateQueryResponse newShardResponse() {
+ return new ShardValidateQueryResponse();
+ }
+
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, ValidateQueryRequest request, String[] concreteIndices) {
+ // Hard-code routing to limit request to a single shard, but still, randomize it...
+ Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(Integer.toString(ThreadLocalRandom.current().nextInt(1000)), request.indices());
+ return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, "_local");
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, ValidateQueryRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, ValidateQueryRequest countRequest, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
+ }
+
+ @Override
+ protected ValidateQueryResponse newResponse(ValidateQueryRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ boolean valid = true;
+ List<ShardOperationFailedException> shardFailures = null;
+ List<QueryExplanation> queryExplanations = null;
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // simply ignore non active shards
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ ShardValidateQueryResponse validateQueryResponse = (ShardValidateQueryResponse) shardResponse;
+ valid = valid && validateQueryResponse.isValid();
+ if (request.explain()) {
+ if (queryExplanations == null) {
+ queryExplanations = newArrayList();
+ }
+ queryExplanations.add(new QueryExplanation(
+ validateQueryResponse.getIndex(),
+ validateQueryResponse.isValid(),
+ validateQueryResponse.getExplanation(),
+ validateQueryResponse.getError()
+ ));
+ }
+ successfulShards++;
+ }
+ }
+ return new ValidateQueryResponse(valid, queryExplanations, shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) throws ElasticsearchException {
+ IndexQueryParserService queryParserService = indicesService.indexServiceSafe(request.index()).queryParserService();
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = indexService.shardSafe(request.shardId());
+
+ boolean valid;
+ String explanation = null;
+ String error = null;
+ if (request.source().length() == 0) {
+ valid = true;
+ } else {
+ SearchContext.setCurrent(new DefaultSearchContext(0,
+ new ShardSearchRequest().types(request.types()).nowInMillis(request.nowInMillis()),
+ null, indexShard.acquireSearcher("validate_query"), indexService, indexShard,
+ scriptService, cacheRecycler, pageCacheRecycler));
+ try {
+ ParsedQuery parsedQuery = queryParserService.parseQuery(request.source());
+ valid = true;
+ if (request.explain()) {
+ explanation = parsedQuery.query().toString();
+ }
+ } catch (QueryParsingException e) {
+ valid = false;
+ error = e.getDetailedMessage();
+ } catch (AssertionError e) {
+ valid = false;
+ error = e.getMessage();
+ } finally {
+ SearchContext.current().release();
+ SearchContext.removeCurrent();
+ }
+ }
+ return new ShardValidateQueryResponse(request.index(), request.shardId(), valid, explanation, error);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java
new file mode 100644
index 0000000..8bfcc3e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.validate.query;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class ValidateQueryAction extends IndicesAction<ValidateQueryRequest, ValidateQueryResponse, ValidateQueryRequestBuilder> {
+
+ public static final ValidateQueryAction INSTANCE = new ValidateQueryAction();
+ public static final String NAME = "indices/validate/query";
+
+ private ValidateQueryAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ValidateQueryResponse newResponse() {
+ return new ValidateQueryResponse();
+ }
+
+ @Override
+ public ValidateQueryRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new ValidateQueryRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
new file mode 100644
index 0000000..161898f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.validate.query;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+/**
+ * A request to validate a specific query.
+ * <p/>
+ * <p>The request requires the query source to be set either using {@link #source(QuerySourceBuilder)},
+ * or {@link #source(byte[])}.
+ */
+public class ValidateQueryRequest extends BroadcastOperationRequest<ValidateQueryRequest> {
+
+ private static final XContentType contentType = Requests.CONTENT_TYPE;
+
+ private BytesReference source;
+ private boolean sourceUnsafe;
+
+ private boolean explain;
+
+ private String[] types = Strings.EMPTY_ARRAY;
+
+ long nowInMillis;
+
+ ValidateQueryRequest() {
+ this(Strings.EMPTY_ARRAY);
+ }
+
+ /**
+ * Constructs a new validate request against the provided indices. No indices provided means it will
+ * run against all indices.
+ */
+ public ValidateQueryRequest(String... indices) {
+ super(indices);
+ indicesOptions(IndicesOptions.fromOptions(false, false, true, false));
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ return validationException;
+ }
+
+ @Override
+ protected void beforeStart() {
+ if (sourceUnsafe) {
+ source = source.copyBytesArray();
+ sourceUnsafe = false;
+ }
+ }
+
+ /**
+ * The source to execute.
+ */
+ BytesReference source() {
+ return source;
+ }
+
+ public ValidateQueryRequest source(QuerySourceBuilder sourceBuilder) {
+ this.source = sourceBuilder.buildAsBytes(contentType);
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source to execute in the form of a map.
+ */
+ public ValidateQueryRequest source(Map source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(source);
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ }
+
+ public ValidateQueryRequest source(XContentBuilder builder) {
+ this.source = builder.bytes();
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The query source to validate. It is preferable to use either {@link #source(byte[])}
+ * or {@link #source(QuerySourceBuilder)}.
+ */
+ public ValidateQueryRequest source(String source) {
+ this.source = new BytesArray(source);
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source to validate.
+ */
+ public ValidateQueryRequest source(byte[] source) {
+ return source(source, 0, source.length, false);
+ }
+
+ /**
+ * The source to validate.
+ */
+ public ValidateQueryRequest source(byte[] source, int offset, int length, boolean unsafe) {
+ return source(new BytesArray(source, offset, length), unsafe);
+ }
+
+ /**
+ * The source to validate.
+ */
+ public ValidateQueryRequest source(BytesReference source, boolean unsafe) {
+ this.source = source;
+ this.sourceUnsafe = unsafe;
+ return this;
+ }
+
+ /**
+ * The types of documents the query will run against. Defaults to all types.
+ */
+ public String[] types() {
+ return this.types;
+ }
+
+ /**
+ * The types of documents the query will run against. Defaults to all types.
+ */
+ public ValidateQueryRequest types(String... types) {
+ this.types = types;
+ return this;
+ }
+
+ /**
+ * Indicate if detailed information about query is requested
+ */
+ public void explain(boolean explain) {
+ this.explain = explain;
+ }
+
+ /**
+ * Indicates if detailed information about query is requested
+ */
+ public boolean explain() {
+ return explain;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+
+ sourceUnsafe = false;
+ source = in.readBytesReference();
+
+ int typesSize = in.readVInt();
+ if (typesSize > 0) {
+ types = new String[typesSize];
+ for (int i = 0; i < typesSize; i++) {
+ types[i] = in.readString();
+ }
+ }
+
+ explain = in.readBoolean();
+
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+
+ out.writeBytesReference(source);
+
+ out.writeVInt(types.length);
+ for (String type : types) {
+ out.writeString(type);
+ }
+
+ out.writeBoolean(explain);
+ }
+
+ @Override
+ public String toString() {
+ String sSource = "_na_";
+ try {
+ sSource = XContentHelper.convertToJson(source, false);
+ } catch (Exception e) {
+ // ignore
+ }
+ return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types) + ", source[" + sSource + "], explain:" + explain;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java
new file mode 100644
index 0000000..ec909dc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.validate.query;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.query.QueryBuilder;
+
+/**
+ *
+ */
+public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder<ValidateQueryRequest, ValidateQueryResponse, ValidateQueryRequestBuilder> {
+
+ private QuerySourceBuilder sourceBuilder;
+
+ public ValidateQueryRequestBuilder(IndicesAdminClient client) {
+ super((InternalIndicesAdminClient) client, new ValidateQueryRequest());
+ }
+
+ /**
+ * The types of documents the query will run against. Defaults to all types.
+ */
+ public ValidateQueryRequestBuilder setTypes(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ /**
+ * The query source to validate.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public ValidateQueryRequestBuilder setQuery(QueryBuilder queryBuilder) {
+ sourceBuilder().setQuery(queryBuilder);
+ return this;
+ }
+
+ /**
+ * The source to validate.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public ValidateQueryRequestBuilder setSource(BytesReference source) {
+ request().source(source, false);
+ return this;
+ }
+
+ /**
+ * The source to validate.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public ValidateQueryRequestBuilder setSource(BytesReference source, boolean unsafe) {
+ request().source(source, unsafe);
+ return this;
+ }
+
+ /**
+ * The source to validate.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public ValidateQueryRequestBuilder setSource(byte[] source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Indicates if detailed information about the query should be returned.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public ValidateQueryRequestBuilder setExplain(boolean explain) {
+ request.explain(explain);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<ValidateQueryResponse> listener) {
+ if (sourceBuilder != null) {
+ request.source(sourceBuilder);
+ }
+
+ ((IndicesAdminClient) client).validateQuery(request, listener);
+ }
+
+ private QuerySourceBuilder sourceBuilder() {
+ if (sourceBuilder == null) {
+ sourceBuilder = new QuerySourceBuilder();
+ }
+ return sourceBuilder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java
new file mode 100644
index 0000000..2996834
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.validate.query;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.action.admin.indices.validate.query.QueryExplanation.readQueryExplanation;
+
+/**
+ * The response of the validate action.
+ *
+ *
+ */
+public class ValidateQueryResponse extends BroadcastOperationResponse {
+
+ private boolean valid;
+
+ private List<QueryExplanation> queryExplanations;
+
+ ValidateQueryResponse() {
+
+ }
+
+ ValidateQueryResponse(boolean valid, List<QueryExplanation> queryExplanations, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ this.valid = valid;
+ this.queryExplanations = queryExplanations;
+ if (queryExplanations == null) {
+ this.queryExplanations = ImmutableList.of();
+ }
+ }
+
+ /**
+ * A boolean denoting whether the query is valid.
+ */
+ public boolean isValid() {
+ return valid;
+ }
+
+ /**
+ * The list of query explanations.
+ */
+ public List<? extends QueryExplanation> getQueryExplanation() {
+ if (queryExplanations == null) {
+ return ImmutableList.of();
+ }
+ return queryExplanations;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ valid = in.readBoolean();
+ int size = in.readVInt();
+ if (size > 0) {
+ queryExplanations = new ArrayList<QueryExplanation>(size);
+ for (int i = 0; i < size; i++) {
+ queryExplanations.add(readQueryExplanation(in));
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(valid);
+ out.writeVInt(queryExplanations.size());
+ for (QueryExplanation exp : queryExplanations) {
+ exp.writeTo(out);
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/package-info.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/package-info.java
new file mode 100644
index 0000000..118e31c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Validate action.
+ */
+package org.elasticsearch.action.admin.indices.validate.query; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java
new file mode 100644
index 0000000..8473129
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.delete;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class DeleteWarmerAction extends IndicesAction<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
+
+ public static final DeleteWarmerAction INSTANCE = new DeleteWarmerAction();
+ public static final String NAME = "indices/warmer/delete";
+
+ private DeleteWarmerAction() {
+ super(NAME);
+ }
+
+ @Override
+ public DeleteWarmerResponse newResponse() {
+ return new DeleteWarmerResponse();
+ }
+
+ @Override
+ public DeleteWarmerRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new DeleteWarmerRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequest.java
new file mode 100644
index 0000000..5e336b4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequest.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.warmer.delete;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.CollectionUtils;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to delete an index warmer.
+ */
+public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest> {
+
+ private String[] names = Strings.EMPTY_ARRAY;
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
+ private String[] indices = Strings.EMPTY_ARRAY;
+
+ DeleteWarmerRequest() {
+ }
+
+ /**
+ * Constructs a new delete warmer request for the specified name.
+ *
+ * @param name: the name (or wildcard expression) of the warmer to match, null to delete all.
+ */
+ public DeleteWarmerRequest(String... names) {
+ names(names);
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (CollectionUtils.isEmpty(names)) {
+ validationException = addValidationError("warmer names are missing", validationException);
+ } else {
+ validationException = checkForEmptyString(validationException, names);
+ }
+ if (CollectionUtils.isEmpty(indices)) {
+ validationException = addValidationError("indices are missing", validationException);
+ } else {
+ validationException = checkForEmptyString(validationException, indices);
+ }
+ return validationException;
+ }
+
+ private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
+ boolean containsEmptyString = false;
+ for (String string : strings) {
+ if (!Strings.hasText(string)) {
+ containsEmptyString = true;
+ }
+ }
+ if (containsEmptyString) {
+ validationException = addValidationError("types must not contain empty strings", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * The name to delete.
+ */
+ @Nullable
+ String[] names() {
+ return names;
+ }
+
+ /**
+ * The name (or wildcard expression) of the index warmer to delete, or null
+ * to delete all warmers.
+ */
+ public DeleteWarmerRequest names(@Nullable String... names) {
+ this.names = names;
+ return this;
+ }
+
+ /**
+ * Sets the indices this put mapping operation will execute on.
+ */
+ public DeleteWarmerRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * The indices the mappings will be put.
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public DeleteWarmerRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ names = in.readStringArray();
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ readTimeout(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArrayNullable(names);
+ out.writeStringArrayNullable(indices);
+ indicesOptions.writeIndicesOptions(out);
+ writeTimeout(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequestBuilder.java
new file mode 100644
index 0000000..01428fa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequestBuilder.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.delete;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class DeleteWarmerRequestBuilder extends AcknowledgedRequestBuilder<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
+
+ public DeleteWarmerRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new DeleteWarmerRequest());
+ }
+
+ public DeleteWarmerRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * The name (or wildcard expression) of the index warmer to delete, or null
+ * to delete all warmers.
+ */
+ public DeleteWarmerRequestBuilder setNames(String... names) {
+ request.names(names);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ *
+ * For example indices that don't exist.
+ */
+ public DeleteWarmerRequestBuilder setIndicesOptions(IndicesOptions options) {
+ request.indicesOptions(options);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<DeleteWarmerResponse> listener) {
+ ((IndicesAdminClient) client).deleteWarmer(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerResponse.java
new file mode 100644
index 0000000..22b5f39
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerResponse.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.warmer.delete;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response for a delete warmer.
+ */
+public class DeleteWarmerResponse extends AcknowledgedResponse {
+
+ DeleteWarmerResponse() {
+ super();
+ }
+
+ DeleteWarmerResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java
new file mode 100644
index 0000000..92dcd07
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.warmer.delete;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.search.warmer.IndexWarmerMissingException;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Delete index warmer.
+ */
+public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAction<DeleteWarmerRequest, DeleteWarmerResponse> {
+
+ @Inject
+ public TransportDeleteWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteWarmerAction.NAME;
+ }
+
+ @Override
+ protected DeleteWarmerRequest newRequest() {
+ return new DeleteWarmerRequest();
+ }
+
+ @Override
+ protected DeleteWarmerResponse newResponse() {
+ return new DeleteWarmerResponse();
+ }
+
+ @Override
+ protected void doExecute(DeleteWarmerRequest request, ActionListener<DeleteWarmerResponse> listener) {
+ // update to concrete indices
+ request.indices(clusterService.state().metaData().concreteIndices(request.indices(), request.indicesOptions()));
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(DeleteWarmerRequest request, ClusterState state) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
+ }
+
+ @Override
+ protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener<DeleteWarmerResponse> listener) throws ElasticsearchException {
+ clusterService.submitStateUpdateTask("delete_warmer [" + Arrays.toString(request.names()) + "]", new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new DeleteWarmerResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new DeleteWarmerResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.timeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.debug("failed to delete warmer [{}] on indices [{}]", t, Arrays.toString(request.names()), request.indices());
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+
+ boolean globalFoundAtLeastOne = false;
+ for (String index : request.indices()) {
+ IndexMetaData indexMetaData = currentState.metaData().index(index);
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
+ if (warmers != null) {
+ List<IndexWarmersMetaData.Entry> entries = Lists.newArrayList();
+ for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
+ boolean keepWarmer = true;
+ for (String warmer : request.names()) {
+ if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals("_all")) {
+ globalFoundAtLeastOne = true;
+ keepWarmer = false;
+ // don't add it...
+ break;
+ }
+ }
+ if (keepWarmer) {
+ entries.add(entry);
+ }
+ }
+ // a change, update it...
+ if (entries.size() != warmers.entries().size()) {
+ warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()]));
+ IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers);
+ mdBuilder.put(indexBuilder);
+ }
+ }
+ }
+
+ if (!globalFoundAtLeastOne) {
+ throw new IndexWarmerMissingException(request.names());
+ }
+
+ if (logger.isInfoEnabled()) {
+ for (String index : request.indices()) {
+ IndexMetaData indexMetaData = currentState.metaData().index(index);
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
+ if (warmers != null) {
+ for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
+ for (String warmer : request.names()) {
+ if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals("_all")) {
+ logger.info("[{}] delete warmer [{}]", index, entry.name());
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java
new file mode 100644
index 0000000..ee8574a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.get;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public class GetWarmersAction extends IndicesAction<GetWarmersRequest, GetWarmersResponse, GetWarmersRequestBuilder> {
+
+ public static final GetWarmersAction INSTANCE = new GetWarmersAction();
+ public static final String NAME = "warmers/get";
+
+ private GetWarmersAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GetWarmersRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new GetWarmersRequestBuilder((InternalGenericClient) client);
+ }
+
+ @Override
+ public GetWarmersResponse newResponse() {
+ return new GetWarmersResponse();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequest.java
new file mode 100644
index 0000000..cb31dfc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequest.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.get;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class GetWarmersRequest extends ClusterInfoRequest<GetWarmersRequest> {
+
+ private String[] warmers = Strings.EMPTY_ARRAY;
+
+ public GetWarmersRequest warmers(String[] warmers) {
+ this.warmers = warmers;
+ return this;
+ }
+
+ public String[] warmers() {
+ return warmers;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ warmers = in.readStringArray();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(warmers);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequestBuilder.java
new file mode 100644
index 0000000..e8ec2cd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequestBuilder.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.get;
+
+import com.google.common.collect.ObjectArrays;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public class GetWarmersRequestBuilder extends ClusterInfoRequestBuilder<GetWarmersRequest, GetWarmersResponse, GetWarmersRequestBuilder> {
+
+ public GetWarmersRequestBuilder(InternalGenericClient client, String... indices) {
+ super(client, new GetWarmersRequest().indices(indices));
+ }
+
+ public GetWarmersRequestBuilder setWarmers(String... warmers) {
+ request.warmers(warmers);
+ return this;
+ }
+
+ public GetWarmersRequestBuilder addWarmers(String... warmers) {
+ request.warmers(ObjectArrays.concat(request.warmers(), warmers, String.class));
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<GetWarmersResponse> listener) {
+ ((IndicesAdminClient) client).getWarmers(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java
new file mode 100644
index 0000000..026380a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.get;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+
+import java.io.IOException;
+
+/**
+ */
+public class GetWarmersResponse extends ActionResponse {
+
+ private ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
+
+ GetWarmersResponse(ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> warmers) {
+ this.warmers = warmers;
+ }
+
+ GetWarmersResponse() {
+ }
+
+ public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> warmers() {
+ return warmers;
+ }
+
+ public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> getWarmers() {
+ return warmers();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ ImmutableOpenMap.Builder<String, ImmutableList<IndexWarmersMetaData.Entry>> indexMapBuilder = ImmutableOpenMap.builder();
+ for (int i = 0; i < size; i++) {
+ String key = in.readString();
+ int valueSize = in.readVInt();
+ ImmutableList.Builder<IndexWarmersMetaData.Entry> warmerEntryBuilder = ImmutableList.builder();
+ for (int j = 0; j < valueSize; j++) {
+ warmerEntryBuilder.add(new IndexWarmersMetaData.Entry(
+ in.readString(),
+ in.readStringArray(),
+ in.readBytesReference())
+ );
+ }
+ indexMapBuilder.put(key, warmerEntryBuilder.build());
+ }
+ warmers = indexMapBuilder.build();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(warmers.size());
+ for (ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
+ out.writeString(indexEntry.key);
+ out.writeVInt(indexEntry.value.size());
+ for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
+ out.writeString(warmerEntry.name());
+ out.writeStringArray(warmerEntry.types());
+ out.writeBytesReference(warmerEntry.source());
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java
new file mode 100644
index 0000000..716f93b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.get;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ */
+public class TransportGetWarmersAction extends TransportClusterInfoAction<GetWarmersRequest, GetWarmersResponse> {
+
+ @Inject
+ public TransportGetWarmersAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String transportAction() {
+ return GetWarmersAction.NAME;
+ }
+
+ @Override
+ protected GetWarmersRequest newRequest() {
+ return new GetWarmersRequest();
+ }
+
+ @Override
+ protected GetWarmersResponse newResponse() {
+ return new GetWarmersResponse();
+ }
+
+ @Override
+ protected void doMasterOperation(final GetWarmersRequest request, final ClusterState state, final ActionListener<GetWarmersResponse> listener) throws ElasticsearchException {
+ ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> result = state.metaData().findWarmers(
+ request.indices(), request.types(), request.warmers()
+ );
+ listener.onResponse(new GetWarmersResponse(result));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java
new file mode 100644
index 0000000..ee799c8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.put;
+
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ */
+public class PutWarmerAction extends IndicesAction<PutWarmerRequest, PutWarmerResponse, PutWarmerRequestBuilder> {
+
+ public static final PutWarmerAction INSTANCE = new PutWarmerAction();
+ public static final String NAME = "indices/warmer/put";
+
+ private PutWarmerAction() {
+ super(NAME);
+ }
+
+ @Override
+ public PutWarmerResponse newResponse() {
+ return new PutWarmerResponse();
+ }
+
+ @Override
+ public PutWarmerRequestBuilder newRequestBuilder(IndicesAdminClient client) {
+ return new PutWarmerRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequest.java
new file mode 100644
index 0000000..bf11004
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.put;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to put a search warmer.
+ */
+public class PutWarmerRequest extends AcknowledgedRequest<PutWarmerRequest> {
+
+ private String name;
+
+ private SearchRequest searchRequest;
+
+ PutWarmerRequest() {
+ }
+
+ /**
+ * Constructs a new warmer.
+ *
+ * @param name The name of the warmer.
+ */
+ public PutWarmerRequest(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Sets the name of the warmer.
+ */
+ public PutWarmerRequest name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ String name() {
+ return this.name;
+ }
+
+ /**
+ * Sets the search request to warm.
+ */
+ public PutWarmerRequest searchRequest(SearchRequest searchRequest) {
+ this.searchRequest = searchRequest;
+ return this;
+ }
+
+ /**
+ * Sets the search request to warm.
+ */
+ public PutWarmerRequest searchRequest(SearchRequestBuilder searchRequest) {
+ this.searchRequest = searchRequest.request();
+ return this;
+ }
+
+ SearchRequest searchRequest() {
+ return this.searchRequest;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (searchRequest == null) {
+ validationException = addValidationError("search request is missing", validationException);
+ } else {
+ validationException = searchRequest.validate();
+ }
+ if (name == null) {
+ validationException = addValidationError("name is missing", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ name = in.readString();
+ if (in.readBoolean()) {
+ searchRequest = new SearchRequest();
+ searchRequest.readFrom(in);
+ }
+ readTimeout(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(name);
+ if (searchRequest == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ searchRequest.writeTo(out);
+ }
+ writeTimeout(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestBuilder.java
new file mode 100644
index 0000000..030f695
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestBuilder.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.put;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+
+/**
+ *
+ */
+public class PutWarmerRequestBuilder extends AcknowledgedRequestBuilder<PutWarmerRequest, PutWarmerResponse, PutWarmerRequestBuilder> {
+
+ public PutWarmerRequestBuilder(IndicesAdminClient indicesClient, String name) {
+ super((InternalIndicesAdminClient) indicesClient, new PutWarmerRequest().name(name));
+ }
+
+ public PutWarmerRequestBuilder(IndicesAdminClient indicesClient) {
+ super((InternalIndicesAdminClient) indicesClient, new PutWarmerRequest());
+ }
+
+ /**
+ * Sets the name of the warmer.
+ */
+ public PutWarmerRequestBuilder setName(String name) {
+ request.name(name);
+ return this;
+ }
+
+ /**
+ * Sets the search request to use to warm the index when applicable.
+ */
+ public PutWarmerRequestBuilder setSearchRequest(SearchRequest searchRequest) {
+ request.searchRequest(searchRequest);
+ return this;
+ }
+
+ /**
+ * Sets the search request to use to warm the index when applicable.
+ */
+ public PutWarmerRequestBuilder setSearchRequest(SearchRequestBuilder searchRequest) {
+ request.searchRequest(searchRequest);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<PutWarmerResponse> listener) {
+ ((IndicesAdminClient) client).putWarmer(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerResponse.java
new file mode 100644
index 0000000..d656761
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerResponse.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.put;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * The response of put warmer operation.
+ */
+public class PutWarmerResponse extends AcknowledgedResponse {
+
+ PutWarmerResponse() {
+ super();
+ }
+
+ PutWarmerResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ readAcknowledged(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ writeAcknowledged(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java
new file mode 100644
index 0000000..db1fb74
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.warmer.put;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.TransportSearchAction;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Put warmer action.
+ */
+public class TransportPutWarmerAction extends TransportMasterNodeOperationAction<PutWarmerRequest, PutWarmerResponse> {
+
+ private final TransportSearchAction searchAction;
+
+ @Inject
+ public TransportPutWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ TransportSearchAction searchAction) {
+ super(settings, transportService, clusterService, threadPool);
+ this.searchAction = searchAction;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected String transportAction() {
+ return PutWarmerAction.NAME;
+ }
+
+ @Override
+ protected PutWarmerRequest newRequest() {
+ return new PutWarmerRequest();
+ }
+
+ @Override
+ protected PutWarmerResponse newResponse() {
+ return new PutWarmerResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(PutWarmerRequest request, ClusterState state) {
+ String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.searchRequest().indices(), request.searchRequest().indicesOptions());
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
+ }
+
+ @Override
+ protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener<PutWarmerResponse> listener) throws ElasticsearchException {
+ // first execute the search request, see that its ok...
+ searchAction.execute(request.searchRequest(), new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse searchResponse) {
+ if (searchResponse.getFailedShards() > 0) {
+ listener.onFailure(new ElasticsearchException("search failed with failed shards: " + Arrays.toString(searchResponse.getShardFailures())));
+ return;
+ }
+
+ clusterService.submitStateUpdateTask("put_warmer [" + request.name() + "]", new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new PutWarmerResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new PutWarmerResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.timeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.debug("failed to put warmer [{}] on indices [{}]", t, request.name(), request.searchRequest().indices());
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ MetaData metaData = currentState.metaData();
+ String[] concreteIndices = metaData.concreteIndices(request.searchRequest().indices(), request.searchRequest().indicesOptions());
+
+ BytesReference source = null;
+ if (request.searchRequest().source() != null && request.searchRequest().source().length() > 0) {
+ source = request.searchRequest().source();
+ } else if (request.searchRequest().extraSource() != null && request.searchRequest().extraSource().length() > 0) {
+ source = request.searchRequest().extraSource();
+ }
+
+ // now replace it on the metadata
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+
+ for (String index : concreteIndices) {
+ IndexMetaData indexMetaData = metaData.index(index);
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
+ if (warmers == null) {
+ logger.info("[{}] putting warmer [{}]", index, request.name());
+ warmers = new IndexWarmersMetaData(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), source));
+ } else {
+ boolean found = false;
+ List<IndexWarmersMetaData.Entry> entries = new ArrayList<IndexWarmersMetaData.Entry>(warmers.entries().size() + 1);
+ for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
+ if (entry.name().equals(request.name())) {
+ found = true;
+ entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), source));
+ } else {
+ entries.add(entry);
+ }
+ }
+ if (!found) {
+ logger.info("[{}] put warmer [{}]", index, request.name());
+ entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), source));
+ } else {
+ logger.info("[{}] update warmer [{}]", index, request.name());
+ }
+ warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()]));
+ }
+ IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers);
+ mdBuilder.put(indexBuilder);
+ }
+
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+ });
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ listener.onFailure(e);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/admin/package-info.java b/src/main/java/org/elasticsearch/action/admin/package-info.java
new file mode 100644
index 0000000..ea226b5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/admin/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Administrative Actions.
+ */
+package org.elasticsearch.action.admin; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkAction.java b/src/main/java/org/elasticsearch/action/bulk/BulkAction.java
new file mode 100644
index 0000000..59e2826
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/BulkAction.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.TransportRequestOptions;
+
+/**
+ */
+public class BulkAction extends Action<BulkRequest, BulkResponse, BulkRequestBuilder> {
+
+ public static final BulkAction INSTANCE = new BulkAction();
+ public static final String NAME = "bulk";
+
+ private BulkAction() {
+ super(NAME);
+ }
+
+ @Override
+ public BulkResponse newResponse() {
+ return new BulkResponse();
+ }
+
+ @Override
+ public BulkRequestBuilder newRequestBuilder(Client client) {
+ return new BulkRequestBuilder(client);
+ }
+
+ @Override
+ public TransportRequestOptions transportOptions(Settings settings) {
+ return TransportRequestOptions.options()
+ .withType(TransportRequestOptions.Type.BULK)
+ .withCompress(settings.getAsBoolean("action.bulk.compress", true)
+ );
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java
new file mode 100644
index 0000000..02709e8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BulkItemRequest implements Streamable {
+
+ private int id;
+
+ private ActionRequest request;
+
+ BulkItemRequest() {
+
+ }
+
+ public BulkItemRequest(int id, ActionRequest request) {
+ this.id = id;
+ this.request = request;
+ }
+
+ public int id() {
+ return id;
+ }
+
+ public ActionRequest request() {
+ return request;
+ }
+
+ public static BulkItemRequest readBulkItem(StreamInput in) throws IOException {
+ BulkItemRequest item = new BulkItemRequest();
+ item.readFrom(in);
+ return item;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ id = in.readVInt();
+ byte type = in.readByte();
+ if (type == 0) {
+ request = new IndexRequest();
+ } else if (type == 1) {
+ request = new DeleteRequest();
+ } else if (type == 2) {
+ request = new UpdateRequest();
+ }
+ request.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(id);
+ if (request instanceof IndexRequest) {
+ out.writeByte((byte) 0);
+ } else if (request instanceof DeleteRequest) {
+ out.writeByte((byte) 1);
+ } else if (request instanceof UpdateRequest) {
+ out.writeByte((byte) 2);
+ }
+ request.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
new file mode 100644
index 0000000..7e813c3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
@@ -0,0 +1,302 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+
+/**
+ * Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id
+ * of the relevant action, and if it has failed or not (with the failure message incase it failed).
+ */
+public class BulkItemResponse implements Streamable {
+
+ /**
+ * Represents a failure.
+ */
+ public static class Failure {
+ private final String index;
+ private final String type;
+ private final String id;
+ private final String message;
+ private final RestStatus status;
+
+ public Failure(String index, String type, String id, Throwable t) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ this.message = ExceptionsHelper.detailedMessage(t);
+ this.status = ExceptionsHelper.status(t);
+ }
+
+
+ public Failure(String index, String type, String id, String message, RestStatus status) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ this.message = message;
+ this.status = status;
+ }
+
+ /**
+ * The index name of the action.
+ */
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * The type of the action.
+ */
+ public String getType() {
+ return type;
+ }
+
+ /**
+ * The id of the action.
+ */
+ public String getId() {
+ return id;
+ }
+
+ /**
+ * The failure message.
+ */
+ public String getMessage() {
+ return this.message;
+ }
+
+ /**
+ * The rest status.
+ */
+ public RestStatus getStatus() {
+ return this.status;
+ }
+ }
+
+ private int id;
+
+ private String opType;
+
+ private ActionResponse response;
+
+ private Failure failure;
+
+ BulkItemResponse() {
+
+ }
+
+ public BulkItemResponse(int id, String opType, ActionResponse response) {
+ this.id = id;
+ this.opType = opType;
+ this.response = response;
+ }
+
+ public BulkItemResponse(int id, String opType, Failure failure) {
+ this.id = id;
+ this.opType = opType;
+ this.failure = failure;
+ }
+
+ /**
+ * The numeric order of the item matching the same request order in the bulk request.
+ */
+ public int getItemId() {
+ return id;
+ }
+
+ /**
+ * The operation type ("index", "create" or "delete").
+ */
+ public String getOpType() {
+ return this.opType;
+ }
+
+ /**
+ * The index name of the action.
+ */
+ public String getIndex() {
+ if (failure != null) {
+ return failure.getIndex();
+ }
+ if (response instanceof IndexResponse) {
+ return ((IndexResponse) response).getIndex();
+ } else if (response instanceof DeleteResponse) {
+ return ((DeleteResponse) response).getIndex();
+ } else if (response instanceof UpdateResponse) {
+ return ((UpdateResponse) response).getIndex();
+ }
+ return null;
+ }
+
+ /**
+ * The type of the action.
+ */
+ public String getType() {
+ if (failure != null) {
+ return failure.getType();
+ }
+ if (response instanceof IndexResponse) {
+ return ((IndexResponse) response).getType();
+ } else if (response instanceof DeleteResponse) {
+ return ((DeleteResponse) response).getType();
+ } else if (response instanceof UpdateResponse) {
+ return ((UpdateResponse) response).getType();
+ }
+ return null;
+ }
+
+ /**
+ * The id of the action.
+ */
+ public String getId() {
+ if (failure != null) {
+ return failure.getId();
+ }
+ if (response instanceof IndexResponse) {
+ return ((IndexResponse) response).getId();
+ } else if (response instanceof DeleteResponse) {
+ return ((DeleteResponse) response).getId();
+ } else if (response instanceof UpdateResponse) {
+ return ((UpdateResponse) response).getId();
+ }
+ return null;
+ }
+
+ /**
+ * The version of the action.
+ */
+ public long getVersion() {
+ if (failure != null) {
+ return -1;
+ }
+ if (response instanceof IndexResponse) {
+ return ((IndexResponse) response).getVersion();
+ } else if (response instanceof DeleteResponse) {
+ return ((DeleteResponse) response).getVersion();
+ } else if (response instanceof UpdateResponse) {
+ return ((UpdateResponse) response).getVersion();
+ }
+ return -1;
+ }
+
+ /**
+ * The actual response ({@link IndexResponse} or {@link DeleteResponse}). <tt>null</tt> in
+ * case of failure.
+ */
+ public <T extends ActionResponse> T getResponse() {
+ return (T) response;
+ }
+
+ /**
+ * Is this a failed execution of an operation.
+ */
+ public boolean isFailed() {
+ return failure != null;
+ }
+
+ /**
+ * The failure message, <tt>null</tt> if it did not fail.
+ */
+ public String getFailureMessage() {
+ if (failure != null) {
+ return failure.getMessage();
+ }
+ return null;
+ }
+
+ /**
+ * The actual failure object if there was a failure.
+ */
+ public Failure getFailure() {
+ return this.failure;
+ }
+
+ public static BulkItemResponse readBulkItem(StreamInput in) throws IOException {
+ BulkItemResponse response = new BulkItemResponse();
+ response.readFrom(in);
+ return response;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ id = in.readVInt();
+ opType = in.readSharedString();
+
+ byte type = in.readByte();
+ if (type == 0) {
+ response = new IndexResponse();
+ response.readFrom(in);
+ } else if (type == 1) {
+ response = new DeleteResponse();
+ response.readFrom(in);
+ } else if (type == 3) { // make 3 instead of 2, because 2 is already in use for 'no responses'
+ response = new UpdateResponse();
+ response.readFrom(in);
+ }
+
+ if (in.readBoolean()) {
+ String fIndex = in.readSharedString();
+ String fType = in.readSharedString();
+ String fId = in.readOptionalString();
+ String fMessage = in.readString();
+ RestStatus status = RestStatus.readFrom(in);
+ failure = new Failure(fIndex, fType, fId, fMessage, status);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(id);
+ out.writeSharedString(opType);
+
+ if (response == null) {
+ out.writeByte((byte) 2);
+ } else {
+ if (response instanceof IndexResponse) {
+ out.writeByte((byte) 0);
+ } else if (response instanceof DeleteResponse) {
+ out.writeByte((byte) 1);
+ } else if (response instanceof UpdateResponse) {
+ out.writeByte((byte) 3); // make 3 instead of 2, because 2 is already in use for 'no responses'
+ }
+ response.writeTo(out);
+ }
+ if (failure == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeSharedString(failure.getIndex());
+ out.writeSharedString(failure.getType());
+ out.writeOptionalString(failure.getId());
+ out.writeString(failure.getMessage());
+ RestStatus.writeTo(out, failure.getStatus());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
new file mode 100644
index 0000000..ffb69de
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
@@ -0,0 +1,340 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * A bulk processor is a thread safe bulk processing class, allowing to easily set when to "flush" a new bulk request
+ * (either based on number of actions, based on the size, or time), and to easily control the number of concurrent bulk
+ * requests allowed to be executed in parallel.
+ * <p/>
+ * In order to create a new bulk processor, use the {@link Builder}.
+ */
+public class BulkProcessor {
+
+ /**
+ * A listener for the execution.
+ */
+ public static interface Listener {
+
+ /**
+ * Callback before the bulk is executed.
+ */
+ void beforeBulk(long executionId, BulkRequest request);
+
+ /**
+ * Callback after a successful execution of bulk request.
+ */
+ void afterBulk(long executionId, BulkRequest request, BulkResponse response);
+
+ /**
+ * Callback after a failed execution of bulk request.
+ */
+ void afterBulk(long executionId, BulkRequest request, Throwable failure);
+ }
+
+ /**
+ * A builder used to create a build an instance of a bulk processor.
+ */
+ public static class Builder {
+
+ private final Client client;
+ private final Listener listener;
+
+ private String name;
+ private int concurrentRequests = 1;
+ private int bulkActions = 1000;
+ private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
+ private TimeValue flushInterval = null;
+
+ /**
+ * Creates a builder of bulk processor with the client to use and the listener that will be used
+ * to be notified on the completion of bulk requests.
+ */
+ public Builder(Client client, Listener listener) {
+ this.client = client;
+ this.listener = listener;
+ }
+
+ /**
+ * Sets an optional name to identify this bulk processor.
+ */
+ public Builder setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ /**
+ * Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single
+ * request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed
+ * while accumulating new bulk requests. Defaults to <tt>1</tt>.
+ */
+ public Builder setConcurrentRequests(int concurrentRequests) {
+ this.concurrentRequests = concurrentRequests;
+ return this;
+ }
+
+ /**
+ * Sets when to flush a new bulk request based on the number of actions currently added. Defaults to
+ * <tt>1000</tt>. Can be set to <tt>-1</tt> to disable it.
+ */
+ public Builder setBulkActions(int bulkActions) {
+ this.bulkActions = bulkActions;
+ return this;
+ }
+
+ /**
+ * Sets when to flush a new bulk request based on the size of actions currently added. Defaults to
+ * <tt>5mb</tt>. Can be set to <tt>-1</tt> to disable it.
+ */
+ public Builder setBulkSize(ByteSizeValue bulkSize) {
+ this.bulkSize = bulkSize;
+ return this;
+ }
+
+ /**
+ * Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set.
+ * <p/>
+ * Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)}
+ * can be set to <tt>-1</tt> with the flush interval set allowing for complete async processing of bulk actions.
+ */
+ public Builder setFlushInterval(TimeValue flushInterval) {
+ this.flushInterval = flushInterval;
+ return this;
+ }
+
+ /**
+ * Builds a new bulk processor.
+ */
+ public BulkProcessor build() {
+ return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
+ }
+ }
+
+ public static Builder builder(Client client, Listener listener) {
+ return new Builder(client, listener);
+ }
+
+ private final Client client;
+ private final Listener listener;
+
+ private final String name;
+
+ private final int concurrentRequests;
+ private final int bulkActions;
+ private final int bulkSize;
+ private final TimeValue flushInterval;
+
+ private final Semaphore semaphore;
+ private final ScheduledThreadPoolExecutor scheduler;
+ private final ScheduledFuture scheduledFuture;
+
+ private final AtomicLong executionIdGen = new AtomicLong();
+
+ private BulkRequest bulkRequest;
+
+ private volatile boolean closed = false;
+
+ BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
+ this.client = client;
+ this.listener = listener;
+ this.name = name;
+ this.concurrentRequests = concurrentRequests;
+ this.bulkActions = bulkActions;
+ this.bulkSize = bulkSize.bytesAsInt();
+
+ this.semaphore = new Semaphore(concurrentRequests);
+ this.bulkRequest = new BulkRequest();
+
+ this.flushInterval = flushInterval;
+ if (flushInterval != null) {
+ this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(((InternalClient) client).settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
+ this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
+ this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
+ this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS);
+ } else {
+ this.scheduler = null;
+ this.scheduledFuture = null;
+ }
+ }
+
+ /**
+ * Closes the processor. If flushing by time is enabled, then its shutdown. Any remaining bulk actions are flushed.
+ */
+ public synchronized void close() {
+ if (closed) {
+ return;
+ }
+ closed = true;
+ if (this.scheduledFuture != null) {
+ this.scheduledFuture.cancel(false);
+ this.scheduler.shutdown();
+ }
+ if (bulkRequest.numberOfActions() > 0) {
+ execute();
+ }
+ }
+
+ /**
+ * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
+ * (for example, if no id is provided, one will be generated, or usage of the create flag).
+ */
+ public BulkProcessor add(IndexRequest request) {
+ return add((ActionRequest) request);
+ }
+
+ /**
+ * Adds an {@link DeleteRequest} to the list of actions to execute.
+ */
+ public BulkProcessor add(DeleteRequest request) {
+ return add((ActionRequest) request);
+ }
+
+ /**
+ * Adds either a delete or an index request.
+ */
+ public BulkProcessor add(ActionRequest request) {
+ return add(request, null);
+ }
+
+ public BulkProcessor add(ActionRequest request, @Nullable Object payload) {
+ internalAdd(request, payload);
+ return this;
+ }
+
+ private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) {
+ bulkRequest.add(request, payload);
+ executeIfNeeded();
+ }
+
+ public BulkProcessor add(BytesReference data, boolean contentUnsafe, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
+ return add(data, contentUnsafe, defaultIndex, defaultType, null);
+ }
+
+ public synchronized BulkProcessor add(BytesReference data, boolean contentUnsafe, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable Object payload) throws Exception {
+ bulkRequest.add(data, contentUnsafe, defaultIndex, defaultType, null, payload, true);
+ executeIfNeeded();
+ return this;
+ }
+
+ private void executeIfNeeded() {
+ if (closed) {
+ throw new ElasticsearchIllegalStateException("bulk process already closed");
+ }
+ if (!isOverTheLimit()) {
+ return;
+ }
+ execute();
+ }
+
+ // (currently) needs to be executed under a lock
+ private void execute() {
+ final BulkRequest bulkRequest = this.bulkRequest;
+ final long executionId = executionIdGen.incrementAndGet();
+
+ this.bulkRequest = new BulkRequest();
+
+ if (concurrentRequests == 0) {
+ // execute in a blocking fashion...
+ try {
+ listener.beforeBulk(executionId, bulkRequest);
+ listener.afterBulk(executionId, bulkRequest, client.bulk(bulkRequest).actionGet());
+ } catch (Exception e) {
+ listener.afterBulk(executionId, bulkRequest, e);
+ }
+ } else {
+ boolean success = false;
+ try {
+ semaphore.acquire();
+ listener.beforeBulk(executionId, bulkRequest);
+ client.bulk(bulkRequest, new ActionListener<BulkResponse>() {
+ @Override
+ public void onResponse(BulkResponse response) {
+ try {
+ listener.afterBulk(executionId, bulkRequest, response);
+ } finally {
+ semaphore.release();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ listener.afterBulk(executionId, bulkRequest, e);
+ } finally {
+ semaphore.release();
+ }
+ }
+ });
+ success = true;
+ } catch (InterruptedException e) {
+ Thread.interrupted();
+ listener.afterBulk(executionId, bulkRequest, e);
+ } finally {
+ if (!success) { // if we fail on client.bulk() release the semaphore
+ semaphore.release();
+ }
+ }
+
+ }
+ }
+
+ private boolean isOverTheLimit() {
+ if (bulkActions != -1 && bulkRequest.numberOfActions() > bulkActions) {
+ return true;
+ }
+ if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() > bulkSize) {
+ return true;
+ }
+ return false;
+ }
+
+ class Flush implements Runnable {
+
+ @Override
+ public void run() {
+ synchronized (BulkProcessor.this) {
+ if (closed) {
+ return;
+ }
+ if (bulkRequest.numberOfActions() == 0) {
+ return;
+ }
+ execute();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
new file mode 100644
index 0000000..4c9504a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
@@ -0,0 +1,519 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.VersionType;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes
+ * it in a single batch.
+ *
+ * @see org.elasticsearch.client.Client#bulk(BulkRequest)
+ */
+public class BulkRequest extends ActionRequest<BulkRequest> {
+
+ private static final int REQUEST_OVERHEAD = 50;
+
+ final List<ActionRequest> requests = Lists.newArrayList();
+ List<Object> payloads = null;
+
+ protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
+ private ReplicationType replicationType = ReplicationType.DEFAULT;
+ private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
+ private boolean refresh = false;
+
+ private long sizeInBytes = 0;
+
+ /**
+ * Adds a list of requests to be executed. Either index or delete requests.
+ */
+ public BulkRequest add(ActionRequest... requests) {
+ for (ActionRequest request : requests) {
+ add(request, null);
+ }
+ return this;
+ }
+
+ public BulkRequest add(ActionRequest request) {
+ return add(request, null);
+ }
+
+ public BulkRequest add(ActionRequest request, @Nullable Object payload) {
+ if (request instanceof IndexRequest) {
+ add((IndexRequest) request, payload);
+ } else if (request instanceof DeleteRequest) {
+ add((DeleteRequest) request, payload);
+ } else if (request instanceof UpdateRequest) {
+ add((UpdateRequest) request, payload);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No support for request [" + request + "]");
+ }
+ return this;
+ }
+
+ /**
+ * Adds a list of requests to be executed. Either index or delete requests.
+ */
+ public BulkRequest add(Iterable<ActionRequest> requests) {
+ for (ActionRequest request : requests) {
+ if (request instanceof IndexRequest) {
+ add((IndexRequest) request);
+ } else if (request instanceof DeleteRequest) {
+ add((DeleteRequest) request);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No support for request [" + request + "]");
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
+ * (for example, if no id is provided, one will be generated, or usage of the create flag).
+ */
+ public BulkRequest add(IndexRequest request) {
+ request.beforeLocalFork();
+ return internalAdd(request, null);
+ }
+
+ public BulkRequest add(IndexRequest request, @Nullable Object payload) {
+ request.beforeLocalFork();
+ return internalAdd(request, payload);
+ }
+
+ BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) {
+ requests.add(request);
+ addPayload(payload);
+ sizeInBytes += request.source().length() + REQUEST_OVERHEAD;
+ return this;
+ }
+
+ /**
+ * Adds an {@link UpdateRequest} to the list of actions to execute.
+ */
+ public BulkRequest add(UpdateRequest request) {
+ request.beforeLocalFork();
+ return internalAdd(request, null);
+ }
+
+ public BulkRequest add(UpdateRequest request, @Nullable Object payload) {
+ request.beforeLocalFork();
+ return internalAdd(request, payload);
+ }
+
+ BulkRequest internalAdd(UpdateRequest request, @Nullable Object payload) {
+ requests.add(request);
+ addPayload(payload);
+ if (request.doc() != null) {
+ sizeInBytes += request.doc().source().length();
+ }
+ if (request.upsertRequest() != null) {
+ sizeInBytes += request.upsertRequest().source().length();
+ }
+ if (request.script() != null) {
+ sizeInBytes += request.script().length() * 2;
+ }
+ return this;
+ }
+
+ /**
+ * Adds an {@link DeleteRequest} to the list of actions to execute.
+ */
+ public BulkRequest add(DeleteRequest request) {
+ return add(request, null);
+ }
+
+ public BulkRequest add(DeleteRequest request, @Nullable Object payload) {
+ requests.add(request);
+ addPayload(payload);
+ sizeInBytes += REQUEST_OVERHEAD;
+ return this;
+ }
+
+ private void addPayload(Object payload) {
+ if (payloads == null) {
+ if (payload == null) {
+ return;
+ }
+ payloads = new ArrayList<Object>(requests.size() + 10);
+ // add requests#size-1 elements to the payloads if it null (we add for an *existing* request)
+ for (int i = 1; i < requests.size(); i++) {
+ payloads.add(null);
+ }
+ }
+ payloads.add(payload);
+ }
+
+ /**
+ * The list of requests in this bulk request.
+ */
+ public List<ActionRequest> requests() {
+ return this.requests;
+ }
+
+ /**
+ * The list of optional payloads associated with requests in the same order as the requests. Note, elements within
+ * it might be null if no payload has been provided.
+ * <p/>
+ * Note, if no payloads have been provided, this method will return null (as to conserve memory overhead).
+ */
+ @Nullable
+ public List<Object> payloads() {
+ return this.payloads;
+ }
+
+ /**
+ * The number of actions in the bulk request.
+ */
+ public int numberOfActions() {
+ return requests.size();
+ }
+
+ /**
+ * The estimated size in bytes of the bulk request.
+ */
+ public long estimatedSizeInBytes() {
+ return sizeInBytes;
+ }
+
+ /**
+ * Adds a framed data in binary format
+ */
+ public BulkRequest add(byte[] data, int from, int length, boolean contentUnsafe) throws Exception {
+ return add(data, from, length, contentUnsafe, null, null);
+ }
+
+ /**
+ * Adds a framed data in binary format
+ */
+ public BulkRequest add(byte[] data, int from, int length, boolean contentUnsafe, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
+ return add(new BytesArray(data, from, length), contentUnsafe, defaultIndex, defaultType);
+ }
+
+ /**
+ * Adds a framed data in binary format
+ */
+ public BulkRequest add(BytesReference data, boolean contentUnsafe, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
+ return add(data, contentUnsafe, defaultIndex, defaultType, null, null, true);
+ }
+
+ /**
+ * Adds a framed data in binary format
+ */
+ public BulkRequest add(BytesReference data, boolean contentUnsafe, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws Exception {
+ return add(data, contentUnsafe, defaultIndex, defaultType, null, null, allowExplicitIndex);
+ }
+
+ public BulkRequest add(BytesReference data, boolean contentUnsafe, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
+ XContent xContent = XContentFactory.xContent(data);
+ int from = 0;
+ int length = data.length();
+ byte marker = xContent.streamSeparator();
+ while (true) {
+ int nextMarker = findNextMarker(marker, from, data, length);
+ if (nextMarker == -1) {
+ break;
+ }
+ // now parse the action
+ XContentParser parser = xContent.createParser(data.slice(from, nextMarker - from));
+
+ try {
+ // move pointers
+ from = nextMarker + 1;
+
+ // Move to START_OBJECT
+ XContentParser.Token token = parser.nextToken();
+ if (token == null) {
+ continue;
+ }
+ assert token == XContentParser.Token.START_OBJECT;
+ // Move to FIELD_NAME, that's the action
+ token = parser.nextToken();
+ assert token == XContentParser.Token.FIELD_NAME;
+ String action = parser.currentName();
+
+ String index = defaultIndex;
+ String type = defaultType;
+ String id = null;
+ String routing = defaultRouting;
+ String parent = null;
+ String timestamp = null;
+ Long ttl = null;
+ String opType = null;
+ long version = Versions.MATCH_ANY;
+ VersionType versionType = VersionType.INTERNAL;
+ int retryOnConflict = 0;
+
+ // at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id)
+ // or START_OBJECT which will have another set of parameters
+
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("_index".equals(currentFieldName)) {
+ if (!allowExplicitIndex) {
+ throw new ElasticsearchIllegalArgumentException("explicit index in bulk is not allowed");
+ }
+ index = parser.text();
+ } else if ("_type".equals(currentFieldName)) {
+ type = parser.text();
+ } else if ("_id".equals(currentFieldName)) {
+ id = parser.text();
+ } else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
+ routing = parser.text();
+ } else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
+ parent = parser.text();
+ } else if ("_timestamp".equals(currentFieldName) || "timestamp".equals(currentFieldName)) {
+ timestamp = parser.text();
+ } else if ("_ttl".equals(currentFieldName) || "ttl".equals(currentFieldName)) {
+ if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
+ ttl = TimeValue.parseTimeValue(parser.text(), null).millis();
+ } else {
+ ttl = parser.longValue();
+ }
+ } else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
+ opType = parser.text();
+ } else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
+ version = parser.longValue();
+ } else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
+ versionType = VersionType.fromString(parser.text());
+ } else if ("_retry_on_conflict".equals(currentFieldName) || "_retryOnConflict".equals(currentFieldName)) {
+ retryOnConflict = parser.intValue();
+ }
+ }
+ }
+
+ if ("delete".equals(action)) {
+ add(new DeleteRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType), payload);
+ } else {
+ nextMarker = findNextMarker(marker, from, data, length);
+ if (nextMarker == -1) {
+ break;
+ }
+ // order is important, we set parent after routing, so routing will be set to parent if not set explicitly
+ // we use internalAdd so we don't fork here, this allows us not to copy over the big byte array to small chunks
+ // of index request. All index requests are still unsafe if applicable.
+ if ("index".equals(action)) {
+ if (opType == null) {
+ internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
+ .source(data.slice(from, nextMarker - from), contentUnsafe), payload);
+ } else {
+ internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
+ .create("create".equals(opType))
+ .source(data.slice(from, nextMarker - from), contentUnsafe), payload);
+ }
+ } else if ("create".equals(action)) {
+ internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
+ .create(true)
+ .source(data.slice(from, nextMarker - from), contentUnsafe), payload);
+ } else if ("update".equals(action)) {
+ UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict)
+ .version(version).versionType(versionType)
+ .source(data.slice(from, nextMarker - from));
+
+ IndexRequest upsertRequest = updateRequest.upsertRequest();
+ if (upsertRequest != null) {
+ upsertRequest.routing(routing);
+ upsertRequest.parent(parent); // order is important, set it after routing, so it will set the routing
+ upsertRequest.timestamp(timestamp);
+ upsertRequest.ttl(ttl);
+ upsertRequest.version(version);
+ upsertRequest.versionType(versionType);
+ }
+ IndexRequest doc = updateRequest.doc();
+ if (doc != null) {
+ doc.routing(routing);
+ doc.parent(parent); // order is important, set it after routing, so it will set the routing
+ doc.timestamp(timestamp);
+ doc.ttl(ttl);
+ doc.version(version);
+ doc.versionType(versionType);
+ }
+
+ internalAdd(updateRequest, payload);
+ }
+ // move pointers
+ from = nextMarker + 1;
+ }
+ } finally {
+ parser.close();
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
+ */
+ public BulkRequest consistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ this.consistencyLevel = consistencyLevel;
+ return this;
+ }
+
+ public WriteConsistencyLevel consistencyLevel() {
+ return this.consistencyLevel;
+ }
+
+ /**
+ * Should a refresh be executed post this bulk operation causing the operations to
+ * be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public BulkRequest refresh(boolean refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public boolean refresh() {
+ return this.refresh;
+ }
+
+ /**
+ * Set the replication type for this operation.
+ */
+ public BulkRequest replicationType(ReplicationType replicationType) {
+ this.replicationType = replicationType;
+ return this;
+ }
+
+ public ReplicationType replicationType() {
+ return this.replicationType;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ public final BulkRequest timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ public final BulkRequest timeout(String timeout) {
+ return timeout(TimeValue.parseTimeValue(timeout, null));
+ }
+
+ public TimeValue timeout() {
+ return timeout;
+ }
+
+ private int findNextMarker(byte marker, int from, BytesReference data, int length) {
+ for (int i = from; i < length; i++) {
+ if (data.get(i) == marker) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (requests.isEmpty()) {
+ validationException = addValidationError("no requests added", validationException);
+ }
+ for (int i = 0; i < requests.size(); i++) {
+ ActionRequestValidationException ex = requests.get(i).validate();
+ if (ex != null) {
+ if (validationException == null) {
+ validationException = new ActionRequestValidationException();
+ }
+ validationException.addValidationErrors(ex.validationErrors());
+ }
+ }
+
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ replicationType = ReplicationType.fromId(in.readByte());
+ consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ byte type = in.readByte();
+ if (type == 0) {
+ IndexRequest request = new IndexRequest();
+ request.readFrom(in);
+ requests.add(request);
+ } else if (type == 1) {
+ DeleteRequest request = new DeleteRequest();
+ request.readFrom(in);
+ requests.add(request);
+ } else if (type == 2) {
+ UpdateRequest request = new UpdateRequest();
+ request.readFrom(in);
+ requests.add(request);
+ }
+ }
+ refresh = in.readBoolean();
+ timeout = TimeValue.readTimeValue(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(replicationType.id());
+ out.writeByte(consistencyLevel.id());
+ out.writeVInt(requests.size());
+ for (ActionRequest request : requests) {
+ if (request instanceof IndexRequest) {
+ out.writeByte((byte) 0);
+ } else if (request instanceof DeleteRequest) {
+ out.writeByte((byte) 1);
+ } else if (request instanceof UpdateRequest) {
+ out.writeByte((byte) 2);
+ }
+ request.writeTo(out);
+ }
+ out.writeBoolean(refresh);
+ timeout.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java
new file mode 100644
index 0000000..8ef7116
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteRequestBuilder;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes
+ * it in a single batch.
+ */
+public class BulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse, BulkRequestBuilder> {
+
+ public BulkRequestBuilder(Client client) {
+ super((InternalClient) client, new BulkRequest());
+ }
+
+ /**
+ * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
+ * (for example, if no id is provided, one will be generated, or usage of the create flag).
+ */
+ public BulkRequestBuilder add(IndexRequest request) {
+ super.request.add(request);
+ return this;
+ }
+
+ /**
+ * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
+ * (for example, if no id is provided, one will be generated, or usage of the create flag).
+ */
+ public BulkRequestBuilder add(IndexRequestBuilder request) {
+ super.request.add(request.request());
+ return this;
+ }
+
+ /**
+ * Adds an {@link DeleteRequest} to the list of actions to execute.
+ */
+ public BulkRequestBuilder add(DeleteRequest request) {
+ super.request.add(request);
+ return this;
+ }
+
+ /**
+ * Adds an {@link DeleteRequest} to the list of actions to execute.
+ */
+ public BulkRequestBuilder add(DeleteRequestBuilder request) {
+ super.request.add(request.request());
+ return this;
+ }
+
+
+ /**
+ * Adds an {@link DeleteRequest} to the list of actions to execute.
+ */
+ public BulkRequestBuilder add(UpdateRequest request) {
+ super.request.add(request);
+ return this;
+ }
+
+ /**
+ * Adds an {@link DeleteRequest} to the list of actions to execute.
+ */
+ public BulkRequestBuilder add(UpdateRequestBuilder request) {
+ super.request.add(request.request());
+ return this;
+ }
+
+ /**
+ * Adds a framed data in binary format
+ */
+ public BulkRequestBuilder add(byte[] data, int from, int length, boolean contentUnsafe) throws Exception {
+ request.add(data, from, length, contentUnsafe, null, null);
+ return this;
+ }
+
+ /**
+ * Adds a framed data in binary format
+ */
+ public BulkRequestBuilder add(byte[] data, int from, int length, boolean contentUnsafe, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
+ request.add(data, from, length, contentUnsafe, defaultIndex, defaultType);
+ return this;
+ }
+
+ /**
+ * Set the replication type for this operation.
+ */
+ public BulkRequestBuilder setReplicationType(ReplicationType replicationType) {
+ request.replicationType(replicationType);
+ return this;
+ }
+
+ /**
+ * Sets the consistency level. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}.
+ */
+ public BulkRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ request.consistencyLevel(consistencyLevel);
+ return this;
+ }
+
+ /**
+ * Should a refresh be executed post this bulk operation causing the operations to
+ * be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public BulkRequestBuilder setRefresh(boolean refresh) {
+ request.refresh(refresh);
+ return this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ public final BulkRequestBuilder setTimeout(TimeValue timeout) {
+ request.timeout(timeout);
+ return this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ public final BulkRequestBuilder setTimeout(String timeout) {
+ request.timeout(timeout);
+ return this;
+ }
+
+ /**
+ * The number of actions currently in the bulk.
+ */
+ public int numberOfActions() {
+ return request.numberOfActions();
+ }
+
+ @Override
+ protected void doExecute(ActionListener<BulkResponse> listener) {
+ ((Client) client).bulk(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java
new file mode 100644
index 0000000..50525fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * A response of a bulk execution. Holding a response for each item responding (in order) of the
+ * bulk requests. Each item holds the index/type/id is operated on, and if it failed or not (with the
+ * failure message).
+ */
+public class BulkResponse extends ActionResponse implements Iterable<BulkItemResponse> {
+
+ private BulkItemResponse[] responses;
+ private long tookInMillis;
+
+ BulkResponse() {
+ }
+
+ public BulkResponse(BulkItemResponse[] responses, long tookInMillis) {
+ this.responses = responses;
+ this.tookInMillis = tookInMillis;
+ }
+
+ /**
+ * How long the bulk execution took.
+ */
+ public TimeValue getTook() {
+ return new TimeValue(tookInMillis);
+ }
+
+ /**
+ * How long the bulk execution took in milliseconds.
+ */
+ public long getTookInMillis() {
+ return tookInMillis;
+ }
+
+ /**
+ * Has anything failed with the execution.
+ */
+ public boolean hasFailures() {
+ for (BulkItemResponse response : responses) {
+ if (response.isFailed()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public String buildFailureMessage() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("failure in bulk execution:");
+ for (int i = 0; i < responses.length; i++) {
+ BulkItemResponse response = responses[i];
+ if (response.isFailed()) {
+ sb.append("\n[").append(i)
+ .append("]: index [").append(response.getIndex()).append("], type [").append(response.getType()).append("], id [").append(response.getId())
+ .append("], message [").append(response.getFailureMessage()).append("]");
+ }
+ }
+ return sb.toString();
+ }
+
+ /**
+ * The items representing each action performed in the bulk operation (in the same order!).
+ */
+ public BulkItemResponse[] getItems() {
+ return responses;
+ }
+
+ @Override
+ public Iterator<BulkItemResponse> iterator() {
+ return Iterators.forArray(responses);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ responses = new BulkItemResponse[in.readVInt()];
+ for (int i = 0; i < responses.length; i++) {
+ responses[i] = BulkItemResponse.readBulkItem(in);
+ }
+ tookInMillis = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(responses.length);
+ for (BulkItemResponse response : responses) {
+ response.writeTo(out);
+ }
+ out.writeVLong(tookInMillis);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
new file mode 100644
index 0000000..f791288
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest;
+import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BulkShardRequest extends ShardReplicationOperationRequest<BulkShardRequest> {
+
+ private int shardId;
+
+ private BulkItemRequest[] items;
+
+ private boolean refresh;
+
+ BulkShardRequest() {
+ }
+
+ BulkShardRequest(String index, int shardId, boolean refresh, BulkItemRequest[] items) {
+ this.index = index;
+ this.shardId = shardId;
+ this.items = items;
+ this.refresh = refresh;
+ }
+
+ boolean refresh() {
+ return this.refresh;
+ }
+
+ int shardId() {
+ return shardId;
+ }
+
+ BulkItemRequest[] items() {
+ return items;
+ }
+
+ /**
+ * Before we fork on a local thread, make sure we copy over the bytes if they are unsafe
+ */
+ @Override
+ public void beforeLocalFork() {
+ for (BulkItemRequest item : items) {
+ if (item.request() instanceof InstanceShardOperationRequest) {
+ ((InstanceShardOperationRequest) item.request()).beforeLocalFork();
+ } else {
+ ((ShardReplicationOperationRequest) item.request()).beforeLocalFork();
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(shardId);
+ out.writeVInt(items.length);
+ for (BulkItemRequest item : items) {
+ if (item != null) {
+ out.writeBoolean(true);
+ item.writeTo(out);
+ } else {
+ out.writeBoolean(false);
+ }
+ }
+ out.writeBoolean(refresh);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = in.readVInt();
+ items = new BulkItemRequest[in.readVInt()];
+ for (int i = 0; i < items.length; i++) {
+ if (in.readBoolean()) {
+ items[i] = BulkItemRequest.readBulkItem(in);
+ }
+ }
+ refresh = in.readBoolean();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java
new file mode 100644
index 0000000..83d5b65
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BulkShardResponse extends ActionResponse {
+
+ private ShardId shardId;
+ private BulkItemResponse[] responses;
+
+ BulkShardResponse() {
+ }
+
+ BulkShardResponse(ShardId shardId, BulkItemResponse[] responses) {
+ this.shardId = shardId;
+ this.responses = responses;
+ }
+
+ public ShardId getShardId() {
+ return shardId;
+ }
+
+ public BulkItemResponse[] getResponses() {
+ return responses;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = ShardId.readShardId(in);
+ responses = new BulkItemResponse[in.readVInt()];
+ for (int i = 0; i < responses.length; i++) {
+ responses[i] = BulkItemResponse.readBulkItem(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardId.writeTo(out);
+ out.writeVInt(responses.length);
+ for (BulkItemResponse response : responses) {
+ response.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
new file mode 100644
index 0000000..c2e61ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
@@ -0,0 +1,347 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.support.AutoCreateIndex;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ *
+ */
+public class TransportBulkAction extends TransportAction<BulkRequest, BulkResponse> {
+
+ private final AutoCreateIndex autoCreateIndex;
+
+ private final boolean allowIdGeneration;
+
+ private final ClusterService clusterService;
+
+ private final TransportShardBulkAction shardBulkAction;
+
+ private final TransportCreateIndexAction createIndexAction;
+
+ @Inject
+ public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
+ TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.shardBulkAction = shardBulkAction;
+ this.createIndexAction = createIndexAction;
+
+ this.autoCreateIndex = new AutoCreateIndex(settings);
+ this.allowIdGeneration = componentSettings.getAsBoolean("action.allow_id_generation", true);
+
+ transportService.registerHandler(BulkAction.NAME, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
+ final long startTime = System.currentTimeMillis();
+ Set<String> indices = Sets.newHashSet();
+ for (ActionRequest request : bulkRequest.requests) {
+ if (request instanceof IndexRequest) {
+ IndexRequest indexRequest = (IndexRequest) request;
+ if (!indices.contains(indexRequest.index())) {
+ indices.add(indexRequest.index());
+ }
+ } else if (request instanceof DeleteRequest) {
+ DeleteRequest deleteRequest = (DeleteRequest) request;
+ if (!indices.contains(deleteRequest.index())) {
+ indices.add(deleteRequest.index());
+ }
+ } else if (request instanceof UpdateRequest) {
+ UpdateRequest updateRequest = (UpdateRequest) request;
+ if (!indices.contains(updateRequest.index())) {
+ indices.add(updateRequest.index());
+ }
+ }
+ }
+
+ if (autoCreateIndex.needToCheck()) {
+ final AtomicInteger counter = new AtomicInteger(indices.size());
+ final AtomicBoolean failed = new AtomicBoolean();
+ ClusterState state = clusterService.state();
+ for (String index : indices) {
+ if (autoCreateIndex.shouldAutoCreate(index, state)) {
+ createIndexAction.execute(new CreateIndexRequest(index).cause("auto(bulk api)"), new ActionListener<CreateIndexResponse>() {
+ @Override
+ public void onResponse(CreateIndexResponse result) {
+ if (counter.decrementAndGet() == 0) {
+ executeBulk(bulkRequest, startTime, listener);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
+ // we have the index, do it
+ if (counter.decrementAndGet() == 0) {
+ executeBulk(bulkRequest, startTime, listener);
+ }
+ } else if (failed.compareAndSet(false, true)) {
+ listener.onFailure(e);
+ }
+ }
+ });
+ } else {
+ if (counter.decrementAndGet() == 0) {
+ executeBulk(bulkRequest, startTime, listener);
+ }
+ }
+ }
+ } else {
+ executeBulk(bulkRequest, startTime, listener);
+ }
+ }
+
+ /**
+ * This method executes the {@link BulkRequest} and calls the given listener once the request returns.
+ * This method will not create any indices even if auto-create indices is enabled.
+ *
+ * @see #doExecute(BulkRequest, org.elasticsearch.action.ActionListener)
+ */
+ public void executeBulk(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
+ final long startTime = System.currentTimeMillis();
+ executeBulk(bulkRequest, startTime, listener);
+ }
+
+ private void executeBulk(final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener) {
+ ClusterState clusterState = clusterService.state();
+ // TODO use timeout to wait here if its blocked...
+ clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE);
+
+ MetaData metaData = clusterState.metaData();
+ final AtomicArray<BulkItemResponse> responses = new AtomicArray<BulkItemResponse>(bulkRequest.requests.size());
+
+ for (int i = 0; i < bulkRequest.requests.size(); i++) {
+ ActionRequest request = bulkRequest.requests.get(i);
+ if (request instanceof IndexRequest) {
+ IndexRequest indexRequest = (IndexRequest) request;
+ String aliasOrIndex = indexRequest.index();
+ indexRequest.index(clusterState.metaData().concreteIndex(indexRequest.index()));
+
+ MappingMetaData mappingMd = null;
+ if (metaData.hasIndex(indexRequest.index())) {
+ mappingMd = metaData.index(indexRequest.index()).mappingOrDefault(indexRequest.type());
+ }
+ try {
+ indexRequest.process(metaData, aliasOrIndex, mappingMd, allowIdGeneration);
+ } catch (ElasticsearchParseException e) {
+ BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e);
+ BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure);
+ responses.set(i, bulkItemResponse);
+ // make sure the request gets never processed again
+ bulkRequest.requests.set(i, null);
+ }
+ } else if (request instanceof DeleteRequest) {
+ DeleteRequest deleteRequest = (DeleteRequest) request;
+ deleteRequest.routing(clusterState.metaData().resolveIndexRouting(deleteRequest.routing(), deleteRequest.index()));
+ deleteRequest.index(clusterState.metaData().concreteIndex(deleteRequest.index()));
+ } else if (request instanceof UpdateRequest) {
+ UpdateRequest updateRequest = (UpdateRequest) request;
+ updateRequest.routing(clusterState.metaData().resolveIndexRouting(updateRequest.routing(), updateRequest.index()));
+ updateRequest.index(clusterState.metaData().concreteIndex(updateRequest.index()));
+ }
+ }
+
+ // first, go over all the requests and create a ShardId -> Operations mapping
+ Map<ShardId, List<BulkItemRequest>> requestsByShard = Maps.newHashMap();
+
+ for (int i = 0; i < bulkRequest.requests.size(); i++) {
+ ActionRequest request = bulkRequest.requests.get(i);
+ if (request instanceof IndexRequest) {
+ IndexRequest indexRequest = (IndexRequest) request;
+ ShardId shardId = clusterService.operationRouting().indexShards(clusterState, indexRequest.index(), indexRequest.type(), indexRequest.id(), indexRequest.routing()).shardId();
+ List<BulkItemRequest> list = requestsByShard.get(shardId);
+ if (list == null) {
+ list = Lists.newArrayList();
+ requestsByShard.put(shardId, list);
+ }
+ list.add(new BulkItemRequest(i, request));
+ } else if (request instanceof DeleteRequest) {
+ DeleteRequest deleteRequest = (DeleteRequest) request;
+ MappingMetaData mappingMd = clusterState.metaData().index(deleteRequest.index()).mappingOrDefault(deleteRequest.type());
+ if (mappingMd != null && mappingMd.routing().required() && deleteRequest.routing() == null) {
+ // if routing is required, and no routing on the delete request, we need to broadcast it....
+ GroupShardsIterator groupShards = clusterService.operationRouting().broadcastDeleteShards(clusterState, deleteRequest.index());
+ for (ShardIterator shardIt : groupShards) {
+ List<BulkItemRequest> list = requestsByShard.get(shardIt.shardId());
+ if (list == null) {
+ list = Lists.newArrayList();
+ requestsByShard.put(shardIt.shardId(), list);
+ }
+ list.add(new BulkItemRequest(i, new DeleteRequest(deleteRequest)));
+ }
+ } else {
+ ShardId shardId = clusterService.operationRouting().deleteShards(clusterState, deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
+ List<BulkItemRequest> list = requestsByShard.get(shardId);
+ if (list == null) {
+ list = Lists.newArrayList();
+ requestsByShard.put(shardId, list);
+ }
+ list.add(new BulkItemRequest(i, request));
+ }
+ } else if (request instanceof UpdateRequest) {
+ UpdateRequest updateRequest = (UpdateRequest) request;
+ MappingMetaData mappingMd = clusterState.metaData().index(updateRequest.index()).mappingOrDefault(updateRequest.type());
+ if (mappingMd != null && mappingMd.routing().required() && updateRequest.routing() == null) {
+ continue; // What to do?
+ }
+ ShardId shardId = clusterService.operationRouting().indexShards(clusterState, updateRequest.index(), updateRequest.type(), updateRequest.id(), updateRequest.routing()).shardId();
+ List<BulkItemRequest> list = requestsByShard.get(shardId);
+ if (list == null) {
+ list = Lists.newArrayList();
+ requestsByShard.put(shardId, list);
+ }
+ list.add(new BulkItemRequest(i, request));
+ }
+ }
+
+ if (requestsByShard.isEmpty()) {
+ listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), System.currentTimeMillis() - startTime));
+ return;
+ }
+
+ final AtomicInteger counter = new AtomicInteger(requestsByShard.size());
+ for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) {
+ final ShardId shardId = entry.getKey();
+ final List<BulkItemRequest> requests = entry.getValue();
+ BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId.index().name(), shardId.id(), bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()]));
+ bulkShardRequest.replicationType(bulkRequest.replicationType());
+ bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel());
+ bulkShardRequest.timeout(bulkRequest.timeout());
+ shardBulkAction.execute(bulkShardRequest, new ActionListener<BulkShardResponse>() {
+ @Override
+ public void onResponse(BulkShardResponse bulkShardResponse) {
+ for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) {
+ responses.set(bulkItemResponse.getItemId(), bulkItemResponse);
+ }
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ // create failures for all relevant requests
+ String message = ExceptionsHelper.detailedMessage(e);
+ RestStatus status = ExceptionsHelper.status(e);
+ for (BulkItemRequest request : requests) {
+ if (request.request() instanceof IndexRequest) {
+ IndexRequest indexRequest = (IndexRequest) request.request();
+ responses.set(request.id(), new BulkItemResponse(request.id(), indexRequest.opType().toString().toLowerCase(Locale.ENGLISH),
+ new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), message, status)));
+ } else if (request.request() instanceof DeleteRequest) {
+ DeleteRequest deleteRequest = (DeleteRequest) request.request();
+ responses.set(request.id(), new BulkItemResponse(request.id(), "delete",
+ new BulkItemResponse.Failure(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), message, status)));
+ } else if (request.request() instanceof UpdateRequest) {
+ UpdateRequest updateRequest = (UpdateRequest) request.request();
+ responses.set(request.id(), new BulkItemResponse(request.id(), "update",
+ new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(), updateRequest.id(), message, status)));
+ }
+ }
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ private void finishHim() {
+ listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), System.currentTimeMillis() - startTime));
+ }
+ });
+ }
+ }
+
+ class TransportHandler extends BaseTransportRequestHandler<BulkRequest> {
+
+ @Override
+ public BulkRequest newInstance() {
+ return new BulkRequest();
+ }
+
+ @Override
+ public void messageReceived(final BulkRequest request, final TransportChannel channel) throws Exception {
+ // no need to use threaded listener, since we just send a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<BulkResponse>() {
+ @Override
+ public void onResponse(BulkResponse result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send error response for action [" + BulkAction.NAME + "] and request [" + request + "]", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
new file mode 100644
index 0000000..f8f3373
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
@@ -0,0 +1,631 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction;
+import org.elasticsearch.action.update.UpdateHelper;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportRequestOptions;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Performs the index operation.
+ */
+public class TransportShardBulkAction extends TransportShardReplicationOperationAction<BulkShardRequest, BulkShardRequest, BulkShardResponse> {
+
+ private final MappingUpdatedAction mappingUpdatedAction;
+ private final UpdateHelper updateHelper;
+ private final boolean allowIdGeneration;
+
+ @Inject
+ public TransportShardBulkAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
+ MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper) {
+ super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction);
+ this.mappingUpdatedAction = mappingUpdatedAction;
+ this.updateHelper = updateHelper;
+ this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.BULK;
+ }
+
+ @Override
+ protected boolean checkWriteConsistency() {
+ return true;
+ }
+
+ @Override
+ protected TransportRequestOptions transportOptions() {
+ return BulkAction.INSTANCE.transportOptions(settings);
+ }
+
+ @Override
+ protected BulkShardRequest newRequestInstance() {
+ return new BulkShardRequest();
+ }
+
+ @Override
+ protected BulkShardRequest newReplicaRequestInstance() {
+ return new BulkShardRequest();
+ }
+
+ @Override
+ protected BulkShardResponse newResponseInstance() {
+ return new BulkShardResponse();
+ }
+
+ @Override
+ protected String transportAction() {
+ return BulkAction.NAME + "/shard";
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, BulkShardRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, BulkShardRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState clusterState, BulkShardRequest request) {
+ return clusterState.routingTable().index(request.index()).shard(request.shardId()).shardsIt();
+ }
+
+ @Override
+ protected PrimaryResponse<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
+ final BulkShardRequest request = shardRequest.request;
+ IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
+ Engine.IndexingOperation[] ops = null;
+ final Set<Tuple<String, String>> mappingsToUpdate = Sets.newHashSet();
+
+ BulkItemResponse[] responses = new BulkItemResponse[request.items().length];
+ long[] preVersions = new long[request.items().length];
+ for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) {
+ BulkItemRequest item = request.items()[requestIndex];
+ if (item.request() instanceof IndexRequest) {
+ IndexRequest indexRequest = (IndexRequest) item.request();
+ try {
+
+ try {
+ WriteResult result = shardIndexOperation(request, indexRequest, clusterState, indexShard, true);
+ // add the response
+ IndexResponse indexResponse = result.response();
+ responses[requestIndex] = new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse);
+ preVersions[requestIndex] = result.preVersion;
+ if (result.mappingToUpdate != null) {
+ mappingsToUpdate.add(result.mappingToUpdate);
+ }
+ if (result.op != null) {
+ if (ops == null) {
+ ops = new Engine.IndexingOperation[request.items().length];
+ }
+ ops[requestIndex] = result.op;
+ }
+ } catch (WriteFailure e){
+ Tuple<String, String> mappingsToUpdateOnFailure = e.mappingsToUpdate;
+ if (mappingsToUpdateOnFailure != null) {
+ mappingsToUpdate.add(mappingsToUpdateOnFailure);
+ }
+ throw e.getCause();
+ }
+ } catch (Throwable e) {
+ // rethrow the failure if we are going to retry on primary and let parent failure to handle it
+ if (retryPrimaryException(e)) {
+ // restore updated versions...
+ for (int j = 0; j < requestIndex; j++) {
+ applyVersion(request.items()[j], preVersions[j]);
+ }
+ for (Tuple<String, String> mappingToUpdate : mappingsToUpdate) {
+ updateMappingOnMaster(mappingToUpdate.v1(), mappingToUpdate.v2());
+ }
+ throw (ElasticsearchException) e;
+ }
+ if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
+ logger.trace("[{}][{}] failed to execute bulk item (index) {}", e, shardRequest.request.index(), shardRequest.shardId, indexRequest);
+ } else {
+ logger.debug("[{}][{}] failed to execute bulk item (index) {}", e, shardRequest.request.index(), shardRequest.shardId, indexRequest);
+ }
+ responses[requestIndex] = new BulkItemResponse(item.id(), indexRequest.opType().lowercase(),
+ new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e));
+ // nullify the request so it won't execute on the replicas
+ request.items()[requestIndex] = null;
+ }
+ } else if (item.request() instanceof DeleteRequest) {
+ DeleteRequest deleteRequest = (DeleteRequest) item.request();
+ try {
+ // add the response
+ DeleteResponse deleteResponse = shardDeleteOperation(deleteRequest, indexShard).response();
+ responses[requestIndex] = new BulkItemResponse(item.id(), "delete", deleteResponse);
+ } catch (Throwable e) {
+ // rethrow the failure if we are going to retry on primary and let parent failure to handle it
+ if (retryPrimaryException(e)) {
+ // restore updated versions...
+ for (int j = 0; j < requestIndex; j++) {
+ applyVersion(request.items()[j], preVersions[j]);
+ }
+ throw (ElasticsearchException) e;
+ }
+ if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
+ logger.trace("[{}][{}] failed to execute bulk item (delete) {}", e, shardRequest.request.index(), shardRequest.shardId, deleteRequest);
+ } else {
+ logger.debug("[{}][{}] failed to execute bulk item (delete) {}", e, shardRequest.request.index(), shardRequest.shardId, deleteRequest);
+ }
+ responses[requestIndex] = new BulkItemResponse(item.id(), "delete",
+ new BulkItemResponse.Failure(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), e));
+ // nullify the request so it won't execute on the replicas
+ request.items()[requestIndex] = null;
+ }
+ } else if (item.request() instanceof UpdateRequest) {
+ UpdateRequest updateRequest = (UpdateRequest) item.request();
+ // We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE
+ for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) {
+ UpdateResult updateResult;
+ try {
+ updateResult = shardUpdateOperation(clusterState, request, updateRequest, indexShard);
+ } catch (Throwable t) {
+ updateResult = new UpdateResult(null, null, false, t, null);
+ }
+ if (updateResult.success()) {
+
+ switch (updateResult.result.operation()) {
+ case UPSERT:
+ case INDEX:
+ WriteResult result = updateResult.writeResult;
+ IndexRequest indexRequest = updateResult.request();
+ BytesReference indexSourceAsBytes = indexRequest.source();
+ // add the response
+ IndexResponse indexResponse = result.response();
+ UpdateResponse updateResponse = new UpdateResponse(indexResponse.getIndex(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated());
+ if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
+ Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
+ updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
+ }
+ responses[requestIndex] = new BulkItemResponse(item.id(), "update", updateResponse);
+ preVersions[requestIndex] = result.preVersion;
+ if (result.mappingToUpdate != null) {
+ mappingsToUpdate.add(result.mappingToUpdate);
+ }
+ if (result.op != null) {
+ if (ops == null) {
+ ops = new Engine.IndexingOperation[request.items().length];
+ }
+ ops[requestIndex] = result.op;
+ }
+ // Replace the update request to the translated index request to execute on the replica.
+ request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
+ break;
+ case DELETE:
+ DeleteResponse response = updateResult.writeResult.response();
+ DeleteRequest deleteRequest = updateResult.request();
+ updateResponse = new UpdateResponse(response.getIndex(), response.getType(), response.getId(), response.getVersion(), false);
+ updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));
+ responses[requestIndex] = new BulkItemResponse(item.id(), "update", updateResponse);
+ // Replace the update request to the translated delete request to execute on the replica.
+ request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);
+ break;
+ case NONE:
+ responses[requestIndex] = new BulkItemResponse(item.id(), "update", updateResult.noopResult);
+ request.items()[requestIndex] = null; // No need to go to the replica
+ break;
+ }
+ // NOTE: Breaking out of the retry_on_conflict loop!
+ break;
+ } else if (updateResult.failure()) {
+ Throwable t = updateResult.error;
+ if (updateResult.retry) {
+ // updateAttemptCount is 0 based and marks current attempt, if it's equal to retryOnConflict we are going out of the iteration
+ if (updateAttemptsCount >= updateRequest.retryOnConflict()) {
+ // we can't try any more
+ responses[requestIndex] = new BulkItemResponse(item.id(), "update",
+ new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(), updateRequest.id(), t));
+ request.items()[requestIndex] = null; // do not send to replicas
+ }
+ } else {
+ // rethrow the failure if we are going to retry on primary and let parent failure to handle it
+ if (retryPrimaryException(t)) {
+ // restore updated versions...
+ for (int j = 0; j < requestIndex; j++) {
+ applyVersion(request.items()[j], preVersions[j]);
+ }
+ throw (ElasticsearchException) t;
+ }
+ if (updateResult.result == null) {
+ responses[requestIndex] = new BulkItemResponse(item.id(), "update", new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(), updateRequest.id(), t));
+ } else {
+ switch (updateResult.result.operation()) {
+ case UPSERT:
+ case INDEX:
+ IndexRequest indexRequest = updateResult.request();
+ if (t instanceof ElasticsearchException && ((ElasticsearchException) t).status() == RestStatus.CONFLICT) {
+ logger.trace("[{}][{}] failed to execute bulk item (index) {}", t, shardRequest.request.index(), shardRequest.shardId, indexRequest);
+ } else {
+ logger.debug("[{}][{}] failed to execute bulk item (index) {}", t, shardRequest.request.index(), shardRequest.shardId, indexRequest);
+ }
+ responses[requestIndex] = new BulkItemResponse(item.id(), indexRequest.opType().lowercase(),
+ new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), t));
+ break;
+ case DELETE:
+ DeleteRequest deleteRequest = updateResult.request();
+ if (t instanceof ElasticsearchException && ((ElasticsearchException) t).status() == RestStatus.CONFLICT) {
+ logger.trace("[{}][{}] failed to execute bulk item (delete) {}", t, shardRequest.request.index(), shardRequest.shardId, deleteRequest);
+ } else {
+ logger.debug("[{}][{}] failed to execute bulk item (delete) {}", t, shardRequest.request.index(), shardRequest.shardId, deleteRequest);
+ }
+ responses[requestIndex] = new BulkItemResponse(item.id(), "delete",
+ new BulkItemResponse.Failure(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), t));
+ break;
+ }
+ }
+ // nullify the request so it won't execute on the replicas
+ request.items()[requestIndex] = null;
+ // NOTE: Breaking out of the retry_on_conflict loop!
+ break;
+ }
+
+ }
+ }
+ }
+
+ assert responses[requestIndex] != null; // we must have set a response somewhere.
+
+ }
+
+ for (Tuple<String, String> mappingToUpdate : mappingsToUpdate) {
+ updateMappingOnMaster(mappingToUpdate.v1(), mappingToUpdate.v2());
+ }
+
+ if (request.refresh()) {
+ try {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_bulk").force(false));
+ } catch (Throwable e) {
+ // ignore
+ }
+ }
+ BulkShardResponse response = new BulkShardResponse(new ShardId(request.index(), request.shardId()), responses);
+ return new PrimaryResponse<BulkShardResponse, BulkShardRequest>(shardRequest.request, response, ops);
+ }
+
+ static class WriteResult {
+
+ final Object response;
+ final long preVersion;
+ final Tuple<String, String> mappingToUpdate;
+ final Engine.IndexingOperation op;
+
+ WriteResult(Object response, long preVersion, Tuple<String, String> mappingToUpdate, Engine.IndexingOperation op) {
+ this.response = response;
+ this.preVersion = preVersion;
+ this.mappingToUpdate = mappingToUpdate;
+ this.op = op;
+ }
+
+ @SuppressWarnings("unchecked")
+ <T> T response() {
+ return (T) response;
+ }
+
+ }
+
+ static class WriteFailure extends ElasticsearchException implements ElasticsearchWrapperException {
+ @Nullable
+ final Tuple<String, String> mappingsToUpdate;
+
+ WriteFailure(Throwable cause, Tuple<String, String> mappingsToUpdate) {
+ super(null, cause);
+ assert cause != null;
+ this.mappingsToUpdate = mappingsToUpdate;
+ }
+ }
+
+ private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, ClusterState clusterState,
+ IndexShard indexShard, boolean processed) {
+
+ // validate, if routing is required, that we got routing
+ MappingMetaData mappingMd = clusterState.metaData().index(request.index()).mappingOrDefault(indexRequest.type());
+ if (mappingMd != null && mappingMd.routing().required()) {
+ if (indexRequest.routing() == null) {
+ throw new RoutingMissingException(indexRequest.index(), indexRequest.type(), indexRequest.id());
+ }
+ }
+
+ if (!processed) {
+ indexRequest.process(clusterState.metaData(), indexRequest.index(), mappingMd, allowIdGeneration);
+ }
+
+ SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, indexRequest.source()).type(indexRequest.type()).id(indexRequest.id())
+ .routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl());
+
+ // update mapping on master if needed, we won't update changes to the same type, since once its changed, it won't have mappers added
+ Tuple<String, String> mappingsToUpdate = null;
+
+ long version;
+ boolean created;
+ Engine.IndexingOperation op;
+ long preVersion = indexRequest.version();
+ try {
+ if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
+ Engine.Index index = indexShard.prepareIndex(sourceToParse).version(indexRequest.version()).versionType(indexRequest.versionType()).origin(Engine.Operation.Origin.PRIMARY);
+ if (index.parsedDoc().mappingsModified()) {
+ mappingsToUpdate = Tuple.tuple(indexRequest.index(), indexRequest.type());
+ }
+ indexShard.index(index);
+ version = index.version();
+ op = index;
+ created = index.created();
+ } else {
+ Engine.Create create = indexShard.prepareCreate(sourceToParse).version(indexRequest.version()).versionType(indexRequest.versionType()).origin(Engine.Operation.Origin.PRIMARY);
+ if (create.parsedDoc().mappingsModified()) {
+ mappingsToUpdate = Tuple.tuple(indexRequest.index(), indexRequest.type());
+ }
+ indexShard.create(create);
+ version = create.version();
+ op = create;
+ created = true;
+ }
+ // update the version on request so it will happen on the replicas
+ indexRequest.versionType(indexRequest.versionType());
+ indexRequest.version(version);
+ } catch (Throwable t) {
+ throw new WriteFailure(t, mappingsToUpdate);
+ }
+
+ IndexResponse indexResponse = new IndexResponse(indexRequest.index(), indexRequest.type(), indexRequest.id(), version, created);
+ return new WriteResult(indexResponse, preVersion, mappingsToUpdate, op);
+ }
+
+ private WriteResult shardDeleteOperation(DeleteRequest deleteRequest, IndexShard indexShard) {
+ Engine.Delete delete = indexShard.prepareDelete(deleteRequest.type(), deleteRequest.id(), deleteRequest.version()).versionType(deleteRequest.versionType()).origin(Engine.Operation.Origin.PRIMARY);
+ indexShard.delete(delete);
+ // update the request with the version so it will go to the replicas
+ deleteRequest.version(delete.version());
+ DeleteResponse deleteResponse = new DeleteResponse(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), delete.version(), delete.found());
+ return new WriteResult(deleteResponse, deleteRequest.version(), null, null);
+ }
+
+ static class UpdateResult {
+
+ final UpdateHelper.Result result;
+ final ActionRequest actionRequest;
+ final boolean retry;
+ final Throwable error;
+ final WriteResult writeResult;
+ final UpdateResponse noopResult;
+
+ UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, boolean retry, Throwable error, WriteResult writeResult) {
+ this.result = result;
+ this.actionRequest = actionRequest;
+ this.retry = retry;
+ this.error = error;
+ this.writeResult = writeResult;
+ this.noopResult = null;
+ }
+
+ UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, WriteResult writeResult) {
+ this.result = result;
+ this.actionRequest = actionRequest;
+ this.writeResult = writeResult;
+ this.retry = false;
+ this.error = null;
+ this.noopResult = null;
+ }
+
+ public UpdateResult(UpdateHelper.Result result, UpdateResponse updateResponse) {
+ this.result = result;
+ this.noopResult = updateResponse;
+ this.actionRequest = null;
+ this.writeResult = null;
+ this.retry = false;
+ this.error = null;
+ }
+
+
+ boolean failure() {
+ return error != null;
+ }
+
+ boolean success() {
+ return noopResult != null || writeResult != null;
+ }
+
+ @SuppressWarnings("unchecked")
+ <T extends ActionRequest> T request() {
+ return (T) actionRequest;
+ }
+
+
+ }
+
+ private UpdateResult shardUpdateOperation(ClusterState clusterState, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) {
+ UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard);
+ switch (translate.operation()) {
+ case UPSERT:
+ case INDEX:
+ IndexRequest indexRequest = translate.action();
+ try {
+ WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, clusterState, indexShard, false);
+ return new UpdateResult(translate, indexRequest, result);
+ } catch (Throwable t) {
+ t = ExceptionsHelper.unwrapCause(t);
+ boolean retry = false;
+ if (t instanceof VersionConflictEngineException || (t instanceof DocumentAlreadyExistsException && translate.operation() == UpdateHelper.Operation.UPSERT)) {
+ retry = true;
+ }
+ return new UpdateResult(translate, indexRequest, retry, t, null);
+ }
+ case DELETE:
+ DeleteRequest deleteRequest = translate.action();
+ try {
+ WriteResult result = shardDeleteOperation(deleteRequest, indexShard);
+ return new UpdateResult(translate, deleteRequest, result);
+ } catch (Throwable t) {
+ t = ExceptionsHelper.unwrapCause(t);
+ boolean retry = false;
+ if (t instanceof VersionConflictEngineException) {
+ retry = true;
+ }
+ return new UpdateResult(translate, deleteRequest, retry, t, null);
+ }
+ case NONE:
+ UpdateResponse updateResponse = translate.action();
+ return new UpdateResult(translate, updateResponse);
+ default:
+ throw new ElasticsearchIllegalStateException("Illegal update operation " + translate.operation());
+ }
+ }
+
+ protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
+ IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
+ final BulkShardRequest request = shardRequest.request;
+ for (int i = 0; i < request.items().length; i++) {
+ BulkItemRequest item = request.items()[i];
+ if (item == null) {
+ continue;
+ }
+ if (item.request() instanceof IndexRequest) {
+ IndexRequest indexRequest = (IndexRequest) item.request();
+ try {
+ SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, indexRequest.source()).type(indexRequest.type()).id(indexRequest.id())
+ .routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl());
+
+ if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
+ Engine.Index index = indexShard.prepareIndex(sourceToParse).version(indexRequest.version()).origin(Engine.Operation.Origin.REPLICA);
+ indexShard.index(index);
+ } else {
+ Engine.Create create = indexShard.prepareCreate(sourceToParse).version(indexRequest.version()).origin(Engine.Operation.Origin.REPLICA);
+ indexShard.create(create);
+ }
+ } catch (Throwable e) {
+ // ignore, we are on backup
+ }
+ } else if (item.request() instanceof DeleteRequest) {
+ DeleteRequest deleteRequest = (DeleteRequest) item.request();
+ try {
+ Engine.Delete delete = indexShard.prepareDelete(deleteRequest.type(), deleteRequest.id(), deleteRequest.version()).origin(Engine.Operation.Origin.REPLICA);
+ indexShard.delete(delete);
+ } catch (Throwable e) {
+ // ignore, we are on backup
+ }
+ }
+ }
+
+ if (request.refresh()) {
+ try {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_bulk").force(false));
+ } catch (Throwable e) {
+ // ignore
+ }
+ }
+ }
+
+ private void updateMappingOnMaster(final String index, final String type) {
+ try {
+ MapperService mapperService = indicesService.indexServiceSafe(index).mapperService();
+ final DocumentMapper documentMapper = mapperService.documentMapper(type);
+ if (documentMapper == null) { // should not happen
+ return;
+ }
+ IndexMetaData metaData = clusterService.state().metaData().index(index);
+ if (metaData == null) {
+ return;
+ }
+
+ // we generate the order id before we get the mapping to send and refresh the source, so
+ // if 2 happen concurrently, we know that the later order will include the previous one
+ long orderId = mappingUpdatedAction.generateNextMappingUpdateOrder();
+ documentMapper.refreshSource();
+
+ DiscoveryNode node = clusterService.localNode();
+ final MappingUpdatedAction.MappingUpdatedRequest request = new MappingUpdatedAction.MappingUpdatedRequest(index, metaData.uuid(), type, documentMapper.mappingSource(), orderId, node != null ? node.id() : null);
+ mappingUpdatedAction.execute(request, new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() {
+ @Override
+ public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) {
+ // all is well
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.warn("failed to update master on updated mapping for {}", e, request);
+ }
+ });
+ } catch (Throwable e) {
+ logger.warn("failed to update master on updated mapping for index [{}], type [{}]", e, index, type);
+ }
+ }
+
+ private void applyVersion(BulkItemRequest item, long version) {
+ if (item.request() instanceof IndexRequest) {
+ ((IndexRequest) item.request()).version(version);
+ } else if (item.request() instanceof DeleteRequest) {
+ ((DeleteRequest) item.request()).version(version);
+ } else {
+ // log?
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/count/CountAction.java b/src/main/java/org/elasticsearch/action/count/CountAction.java
new file mode 100644
index 0000000..3f8079d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/count/CountAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class CountAction extends Action<CountRequest, CountResponse, CountRequestBuilder> {
+
+ public static final CountAction INSTANCE = new CountAction();
+ public static final String NAME = "count";
+
+ private CountAction() {
+ super(NAME);
+ }
+
+ @Override
+ public CountResponse newResponse() {
+ return new CountResponse();
+ }
+
+ @Override
+ public CountRequestBuilder newRequestBuilder(Client client) {
+ return new CountRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/count/CountRequest.java b/src/main/java/org/elasticsearch/action/count/CountRequest.java
new file mode 100644
index 0000000..adcc007
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/count/CountRequest.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+/**
+ * A request to count the number of documents matching a specific query. Best created with
+ * {@link org.elasticsearch.client.Requests#countRequest(String...)}.
+ * <p/>
+ * <p>The request requires the query source to be set either using {@link #source(QuerySourceBuilder)},
+ * or {@link #source(byte[])}.
+ *
+ * @see CountResponse
+ * @see org.elasticsearch.client.Client#count(CountRequest)
+ * @see org.elasticsearch.client.Requests#countRequest(String...)
+ */
+public class CountRequest extends BroadcastOperationRequest<CountRequest> {
+
+ private static final XContentType contentType = Requests.CONTENT_TYPE;
+
+ public static final float DEFAULT_MIN_SCORE = -1f;
+
+ private float minScore = DEFAULT_MIN_SCORE;
+
+ @Nullable
+ protected String routing;
+
+ @Nullable
+ private String preference;
+
+ private BytesReference source;
+ private boolean sourceUnsafe;
+
+ private String[] types = Strings.EMPTY_ARRAY;
+
+ long nowInMillis;
+
+ CountRequest() {
+ }
+
+ /**
+ * Constructs a new count request against the provided indices. No indices provided means it will
+ * run against all indices.
+ */
+ public CountRequest(String... indices) {
+ super(indices);
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ return validationException;
+ }
+
+ @Override
+ protected void beforeStart() {
+ if (sourceUnsafe) {
+ source = source.copyBytesArray();
+ sourceUnsafe = false;
+ }
+ }
+
+ /**
+ * The minimum score of the documents to include in the count.
+ */
+ float minScore() {
+ return minScore;
+ }
+
+ /**
+ * The minimum score of the documents to include in the count. Defaults to <tt>-1</tt> which means all
+ * documents will be included in the count.
+ */
+ public CountRequest minScore(float minScore) {
+ this.minScore = minScore;
+ return this;
+ }
+
+ /**
+ * The source to execute.
+ */
+ BytesReference source() {
+ return source;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public CountRequest source(QuerySourceBuilder sourceBuilder) {
+ this.source = sourceBuilder.buildAsBytes(contentType);
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source to execute in the form of a map.
+ */
+ public CountRequest source(Map querySource) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(querySource);
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + querySource + "]", e);
+ }
+ }
+
+ public CountRequest source(XContentBuilder builder) {
+ this.source = builder.bytes();
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source to execute. It is preferable to use either {@link #source(byte[])}
+ * or {@link #source(QuerySourceBuilder)}.
+ */
+ public CountRequest source(String querySource) {
+ this.source = new BytesArray(querySource);
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public CountRequest source(byte[] querySource) {
+ return source(querySource, 0, querySource.length, false);
+ }
+
+ /**
+ * The source to execute.
+ */
+ public CountRequest source(byte[] querySource, int offset, int length, boolean unsafe) {
+ return source(new BytesArray(querySource, offset, length), unsafe);
+ }
+
+ public CountRequest source(BytesReference querySource, boolean unsafe) {
+ this.source = querySource;
+ this.sourceUnsafe = unsafe;
+ return this;
+ }
+
+ /**
+ * The types of documents the query will run against. Defaults to all types.
+ */
+ public String[] types() {
+ return this.types;
+ }
+
+ /**
+ * The types of documents the query will run against. Defaults to all types.
+ */
+ public CountRequest types(String... types) {
+ this.types = types;
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public String routing() {
+ return this.routing;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public CountRequest routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public CountRequest routing(String... routings) {
+ this.routing = Strings.arrayToCommaDelimitedString(routings);
+ return this;
+ }
+
+ public CountRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public String preference() {
+ return this.preference;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ minScore = in.readFloat();
+ routing = in.readOptionalString();
+ preference = in.readOptionalString();
+ sourceUnsafe = false;
+ source = in.readBytesReference();
+ types = in.readStringArray();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeFloat(minScore);
+ out.writeOptionalString(routing);
+ out.writeOptionalString(preference);
+ out.writeBytesReference(source);
+ out.writeStringArray(types);
+ }
+
+ @Override
+ public String toString() {
+ String sSource = "_na_";
+ try {
+ sSource = XContentHelper.convertToJson(source, false);
+ } catch (Exception e) {
+ // ignore
+ }
+ return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types) + ", source[" + sSource + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java b/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java
new file mode 100644
index 0000000..aa7ff2a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.query.QueryBuilder;
+
+/**
+ * A count action request builder.
+ */
+public class CountRequestBuilder extends BroadcastOperationRequestBuilder<CountRequest, CountResponse, CountRequestBuilder> {
+
+ private QuerySourceBuilder sourceBuilder;
+
+ public CountRequestBuilder(Client client) {
+ super((InternalClient) client, new CountRequest());
+ }
+
+ /**
+ * The types of documents the query will run against. Defaults to all types.
+ */
+ public CountRequestBuilder setTypes(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ /**
+ * The minimum score of the documents to include in the count. Defaults to <tt>-1</tt> which means all
+ * documents will be included in the count.
+ */
+ public CountRequestBuilder setMinScore(float minScore) {
+ request.minScore(minScore);
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public CountRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards,
+ * _shards:x,y to operate on shards x & y, or a custom value, which guarantees that the same order
+ * will be used across different requests.
+ */
+ public CountRequestBuilder setPreference(String preference) {
+ request.preference(preference);
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public CountRequestBuilder setRouting(String... routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * The query source to execute.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public CountRequestBuilder setQuery(QueryBuilder queryBuilder) {
+ sourceBuilder().setQuery(queryBuilder);
+ return this;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public CountRequestBuilder setSource(BytesReference source) {
+ request().source(source, false);
+ return this;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public CountRequestBuilder setSource(BytesReference source, boolean unsafe) {
+ request().source(source, unsafe);
+ return this;
+ }
+
+ /**
+ * The query source to execute.
+ */
+ public CountRequestBuilder setSource(byte[] querySource) {
+ request.source(querySource);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<CountResponse> listener) {
+ if (sourceBuilder != null) {
+ request.source(sourceBuilder);
+ }
+
+ ((InternalClient) client).count(request, listener);
+ }
+
+ private QuerySourceBuilder sourceBuilder() {
+ if (sourceBuilder == null) {
+ sourceBuilder = new QuerySourceBuilder();
+ }
+ return sourceBuilder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/count/CountResponse.java b/src/main/java/org/elasticsearch/action/count/CountResponse.java
new file mode 100644
index 0000000..e6e9860
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/count/CountResponse.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * The response of the count action.
+ */
+public class CountResponse extends BroadcastOperationResponse {
+
+ private long count;
+
+ CountResponse() {
+
+ }
+
+ CountResponse(long count, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ this.count = count;
+ }
+
+ /**
+ * The count of documents matching the query provided.
+ */
+ public long getCount() {
+ return count;
+ }
+
+ public RestStatus status() {
+ if (getFailedShards() == 0) {
+ if (getSuccessfulShards() == 0 && getTotalShards() > 0) {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+ return RestStatus.OK;
+ }
+ // if total failure, bubble up the status code to the response level
+ if (getSuccessfulShards() == 0 && getTotalShards() > 0) {
+ RestStatus status = RestStatus.OK;
+ for (ShardOperationFailedException shardFailure : getShardFailures()) {
+ RestStatus shardStatus = shardFailure.status();
+ if (shardStatus.getStatus() >= status.getStatus()) {
+ status = shardStatus;
+ }
+ }
+ return status;
+ }
+ return RestStatus.OK;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ count = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVLong(count);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/count/ShardCountRequest.java b/src/main/java/org/elasticsearch/action/count/ShardCountRequest.java
new file mode 100644
index 0000000..f0e0afa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/count/ShardCountRequest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Internal count request executed directly against a specific index shard.
+ */
+class ShardCountRequest extends BroadcastShardOperationRequest {
+
+ private float minScore;
+
+ private BytesReference querySource;
+
+ private String[] types = Strings.EMPTY_ARRAY;
+
+ private long nowInMillis;
+
+ @Nullable
+ private String[] filteringAliases;
+
+ ShardCountRequest() {
+
+ }
+
+ public ShardCountRequest(String index, int shardId, @Nullable String[] filteringAliases, CountRequest request) {
+ super(index, shardId, request);
+ this.minScore = request.minScore();
+ this.querySource = request.source();
+ this.types = request.types();
+ this.filteringAliases = filteringAliases;
+ this.nowInMillis = request.nowInMillis;
+ }
+
+ public float minScore() {
+ return minScore;
+ }
+
+ public BytesReference querySource() {
+ return querySource;
+ }
+
+ public String[] types() {
+ return this.types;
+ }
+
+ public String[] filteringAliases() {
+ return filteringAliases;
+ }
+
+ public long nowInMillis() {
+ return this.nowInMillis;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ minScore = in.readFloat();
+
+ querySource = in.readBytesReference();
+
+ int typesSize = in.readVInt();
+ if (typesSize > 0) {
+ types = new String[typesSize];
+ for (int i = 0; i < typesSize; i++) {
+ types[i] = in.readString();
+ }
+ }
+ int aliasesSize = in.readVInt();
+ if (aliasesSize > 0) {
+ filteringAliases = new String[aliasesSize];
+ for (int i = 0; i < aliasesSize; i++) {
+ filteringAliases[i] = in.readString();
+ }
+ }
+ nowInMillis = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeFloat(minScore);
+
+ out.writeBytesReference(querySource);
+
+ out.writeVInt(types.length);
+ for (String type : types) {
+ out.writeString(type);
+ }
+ if (filteringAliases != null) {
+ out.writeVInt(filteringAliases.length);
+ for (String alias : filteringAliases) {
+ out.writeString(alias);
+ }
+ } else {
+ out.writeVInt(0);
+ }
+ out.writeVLong(nowInMillis);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/count/ShardCountResponse.java b/src/main/java/org/elasticsearch/action/count/ShardCountResponse.java
new file mode 100644
index 0000000..a67c6ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/count/ShardCountResponse.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Internal count response of a shard count request executed directly against a specific shard.
+ *
+ *
+ */
+class ShardCountResponse extends BroadcastShardOperationResponse {
+
+ private long count;
+
+ ShardCountResponse() {
+
+ }
+
+ public ShardCountResponse(String index, int shardId, long count) {
+ super(index, shardId);
+ this.count = count;
+ }
+
+ public long getCount() {
+ return this.count;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ count = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVLong(count);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/count/TransportCountAction.java b/src/main/java/org/elasticsearch/action/count/TransportCountAction.java
new file mode 100644
index 0000000..3d06a38
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/count/TransportCountAction.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.internal.DefaultSearchContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.query.QueryPhaseExecutionException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class TransportCountAction extends TransportBroadcastOperationAction<CountRequest, CountResponse, ShardCountRequest, ShardCountResponse> {
+
+ private final IndicesService indicesService;
+
+ private final ScriptService scriptService;
+
+ private final CacheRecycler cacheRecycler;
+
+ private final PageCacheRecycler pageCacheRecycler;
+
+ @Inject
+ public TransportCountAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService, ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ this.scriptService = scriptService;
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ }
+
+ @Override
+ protected void doExecute(CountRequest request, ActionListener<CountResponse> listener) {
+ request.nowInMillis = System.currentTimeMillis();
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+
+ @Override
+ protected String transportAction() {
+ return CountAction.NAME;
+ }
+
+ @Override
+ protected CountRequest newRequest() {
+ return new CountRequest();
+ }
+
+ @Override
+ protected ShardCountRequest newShardRequest() {
+ return new ShardCountRequest();
+ }
+
+ @Override
+ protected ShardCountRequest newShardRequest(ShardRouting shard, CountRequest request) {
+ String[] filteringAliases = clusterService.state().metaData().filteringAliases(shard.index(), request.indices());
+ return new ShardCountRequest(shard.index(), shard.id(), filteringAliases, request);
+ }
+
+ @Override
+ protected ShardCountResponse newShardResponse() {
+ return new ShardCountResponse();
+ }
+
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, CountRequest request, String[] concreteIndices) {
+ Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
+ return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference());
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, CountRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, CountRequest countRequest, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
+ }
+
+ @Override
+ protected CountResponse newResponse(CountRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+ long count = 0;
+ List<ShardOperationFailedException> shardFailures = null;
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // simply ignore non active shards
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ count += ((ShardCountResponse) shardResponse).getCount();
+ successfulShards++;
+ }
+ }
+ return new CountResponse(count, shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected ShardCountResponse shardOperation(ShardCountRequest request) throws ElasticsearchException {
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = indexService.shardSafe(request.shardId());
+
+ SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
+ SearchContext context = new DefaultSearchContext(0,
+ new ShardSearchRequest().types(request.types())
+ .filteringAliases(request.filteringAliases())
+ .nowInMillis(request.nowInMillis()),
+ shardTarget, indexShard.acquireSearcher("count"), indexService, indexShard,
+ scriptService, cacheRecycler, pageCacheRecycler);
+ SearchContext.setCurrent(context);
+
+ try {
+ // TODO: min score should move to be "null" as a value that is not initialized...
+ if (request.minScore() != -1) {
+ context.minimumScore(request.minScore());
+ }
+ BytesReference source = request.querySource();
+ if (source != null && source.length() > 0) {
+ try {
+ QueryParseContext.setTypes(request.types());
+ context.parsedQuery(indexService.queryParserService().parseQuery(source));
+ } finally {
+ QueryParseContext.removeTypes();
+ }
+ }
+ context.preProcess();
+ try {
+ long count = Lucene.count(context.searcher(), context.query());
+ return new ShardCountResponse(request.index(), request.shardId(), count);
+ } catch (Exception e) {
+ throw new QueryPhaseExecutionException(context, "failed to execute count", e);
+ }
+ } finally {
+ // this will also release the index searcher
+ context.release();
+ SearchContext.removeCurrent();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/count/package-info.java b/src/main/java/org/elasticsearch/action/count/package-info.java
new file mode 100644
index 0000000..2d4945d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/count/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Count action.
+ */
+package org.elasticsearch.action.count; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/delete/DeleteAction.java b/src/main/java/org/elasticsearch/action/delete/DeleteAction.java
new file mode 100644
index 0000000..9643e3c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/DeleteAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class DeleteAction extends Action<DeleteRequest, DeleteResponse, DeleteRequestBuilder> {
+
+ public static final DeleteAction INSTANCE = new DeleteAction();
+ public static final String NAME = "delete";
+
+ private DeleteAction() {
+ super(NAME);
+ }
+
+ @Override
+ public DeleteResponse newResponse() {
+ return new DeleteResponse();
+ }
+
+ @Override
+ public DeleteRequestBuilder newRequestBuilder(Client client) {
+ return new DeleteRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java
new file mode 100644
index 0000000..326f3f5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.VersionType;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to delete a document from an index based on its type and id. Best created using
+ * {@link org.elasticsearch.client.Requests#deleteRequest(String)}.
+ * <p/>
+ * <p>The operation requires the {@link #index()}, {@link #type(String)} and {@link #id(String)} to
+ * be set.
+ *
+ * @see DeleteResponse
+ * @see org.elasticsearch.client.Client#delete(DeleteRequest)
+ * @see org.elasticsearch.client.Requests#deleteRequest(String)
+ */
+public class DeleteRequest extends ShardReplicationOperationRequest<DeleteRequest> {
+
+ private String type;
+ private String id;
+ @Nullable
+ private String routing;
+ private boolean refresh;
+ private long version;
+ private VersionType versionType = VersionType.INTERNAL;
+
+ /**
+ * Constructs a new delete request against the specified index. The {@link #type(String)} and {@link #id(String)}
+ * must be set.
+ */
+ public DeleteRequest(String index) {
+ this.index = index;
+ }
+
+ /**
+ * Constructs a new delete request against the specified index with the type and id.
+ *
+ * @param index The index to get the document from
+ * @param type The type of the document
+ * @param id The id of the document
+ */
+ public DeleteRequest(String index, String type, String id) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ }
+
+ public DeleteRequest(DeleteRequest request) {
+ super(request);
+ this.type = request.type();
+ this.id = request.id();
+ this.routing = request.routing();
+ this.refresh = request.refresh();
+ this.version = request.version();
+ this.versionType = request.versionType();
+ }
+
+ public DeleteRequest() {
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (type == null) {
+ validationException = addValidationError("type is missing", validationException);
+ }
+ if (id == null) {
+ validationException = addValidationError("id is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * The type of the document to delete.
+ */
+ public String type() {
+ return type;
+ }
+
+ /**
+ * Sets the type of the document to delete.
+ */
+ public DeleteRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * The id of the document to delete.
+ */
+ public String id() {
+ return id;
+ }
+
+ /**
+ * Sets the id of the document to delete.
+ */
+ public DeleteRequest id(String id) {
+ this.id = id;
+ return this;
+ }
+
+ /**
+ * Sets the parent id of this document. Will simply set the routing to this value, as it is only
+ * used for routing with delete requests.
+ */
+ public DeleteRequest parent(String parent) {
+ if (routing == null) {
+ routing = parent;
+ }
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the request. Using this value to hash the shard
+ * and not the id.
+ */
+ public DeleteRequest routing(String routing) {
+ if (routing != null && routing.length() == 0) {
+ this.routing = null;
+ } else {
+ this.routing = routing;
+ }
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the delete request. Using this value to hash the shard
+ * and not the id.
+ */
+ public String routing() {
+ return this.routing;
+ }
+
+ /**
+ * Should a refresh be executed post this index operation causing the operation to
+ * be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public DeleteRequest refresh(boolean refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public boolean refresh() {
+ return this.refresh;
+ }
+
+ /**
+ * Sets the version, which will cause the delete operation to only be performed if a matching
+ * version exists and no changes happened on the doc since then.
+ */
+ public DeleteRequest version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ public DeleteRequest versionType(VersionType versionType) {
+ this.versionType = versionType;
+ return this;
+ }
+
+ public VersionType versionType() {
+ return this.versionType;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ type = in.readSharedString();
+ id = in.readString();
+ routing = in.readOptionalString();
+ refresh = in.readBoolean();
+ version = in.readLong();
+ versionType = VersionType.fromValue(in.readByte());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeSharedString(type);
+ out.writeString(id);
+ out.writeOptionalString(routing());
+ out.writeBoolean(refresh);
+ out.writeLong(version);
+ out.writeByte(versionType.getValue());
+ }
+
+ @Override
+ public String toString() {
+ return "delete {[" + index + "][" + type + "][" + id + "]}";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java
new file mode 100644
index 0000000..1d7a4b2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.action.support.replication.ShardReplicationOperationRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.VersionType;
+
+/**
+ * A delete document action request builder.
+ */
+public class DeleteRequestBuilder extends ShardReplicationOperationRequestBuilder<DeleteRequest, DeleteResponse, DeleteRequestBuilder> {
+
+ public DeleteRequestBuilder(Client client) {
+ super((InternalClient) client, new DeleteRequest());
+ }
+
+ public DeleteRequestBuilder(Client client, @Nullable String index) {
+ super((InternalClient) client, new DeleteRequest(index));
+ }
+
+ /**
+ * Sets the type of the document to delete.
+ */
+ public DeleteRequestBuilder setType(String type) {
+ request.type(type);
+ return this;
+ }
+
+ /**
+ * Sets the id of the document to delete.
+ */
+ public DeleteRequestBuilder setId(String id) {
+ request.id(id);
+ return this;
+ }
+
+ /**
+ * Sets the parent id of this document. Will simply set the routing to this value, as it is only
+ * used for routing with delete requests.
+ */
+ public DeleteRequestBuilder setParent(String parent) {
+ request.parent(parent);
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the delete request. Using this value to hash the shard
+ * and not the id.
+ */
+ public DeleteRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * Should a refresh be executed post this index operation causing the operation to
+ * be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public DeleteRequestBuilder setRefresh(boolean refresh) {
+ request.refresh(refresh);
+ return this;
+ }
+
+ /**
+ * Sets the version, which will cause the delete operation to only be performed if a matching
+ * version exists and no changes happened on the doc since then.
+ */
+ public DeleteRequestBuilder setVersion(long version) {
+ request.version(version);
+ return this;
+ }
+
+ /**
+ * Sets the type of versioning to use. Defaults to {@link VersionType#INTERNAL}.
+ */
+ public DeleteRequestBuilder setVersionType(VersionType versionType) {
+ request.versionType(versionType);
+ return this;
+ }
+
+ /**
+ * Set the replication type for this operation.
+ */
+ public DeleteRequestBuilder setReplicationType(ReplicationType replicationType) {
+ request.replicationType(replicationType);
+ return this;
+ }
+
+ /**
+ * Sets the consistency level. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}.
+ */
+ public DeleteRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ request.consistencyLevel(consistencyLevel);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<DeleteResponse> listener) {
+ ((Client) client).delete(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java
new file mode 100644
index 0000000..40645ee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * The response of the delete action.
+ *
+ * @see org.elasticsearch.action.delete.DeleteRequest
+ * @see org.elasticsearch.client.Client#delete(DeleteRequest)
+ */
+public class DeleteResponse extends ActionResponse {
+
+ private String index;
+ private String id;
+ private String type;
+ private long version;
+ private boolean found;
+
+ public DeleteResponse() {
+
+ }
+
+ public DeleteResponse(String index, String type, String id, long version, boolean found) {
+ this.index = index;
+ this.id = id;
+ this.type = type;
+ this.version = version;
+ this.found = found;
+ }
+
+ /**
+ * The index the document was deleted from.
+ */
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * The type of the document deleted.
+ */
+ public String getType() {
+ return this.type;
+ }
+
+ /**
+ * The id of the document deleted.
+ */
+ public String getId() {
+ return this.id;
+ }
+
+ /**
+ * The version of the delete operation.
+ */
+ public long getVersion() {
+ return this.version;
+ }
+
+ /**
+ * Returns <tt>true</tt> if a doc was found to delete.
+ */
+ public boolean isFound() {
+ return found;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readSharedString();
+ type = in.readSharedString();
+ id = in.readString();
+ version = in.readLong();
+ found = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeSharedString(index);
+ out.writeSharedString(type);
+ out.writeString(id);
+ out.writeLong(version);
+ out.writeBoolean(found);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java
new file mode 100644
index 0000000..d5fcdc0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
+import org.elasticsearch.action.delete.index.IndexDeleteRequest;
+import org.elasticsearch.action.delete.index.IndexDeleteResponse;
+import org.elasticsearch.action.delete.index.ShardDeleteResponse;
+import org.elasticsearch.action.delete.index.TransportIndexDeleteAction;
+import org.elasticsearch.action.support.AutoCreateIndex;
+import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Performs the delete operation.
+ */
+public class TransportDeleteAction extends TransportShardReplicationOperationAction<DeleteRequest, DeleteRequest, DeleteResponse> {
+
+ private final AutoCreateIndex autoCreateIndex;
+
+ private final TransportCreateIndexAction createIndexAction;
+
+ private final TransportIndexDeleteAction indexDeleteAction;
+
+ @Inject
+ public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
+ TransportCreateIndexAction createIndexAction, TransportIndexDeleteAction indexDeleteAction) {
+ super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction);
+ this.createIndexAction = createIndexAction;
+ this.indexDeleteAction = indexDeleteAction;
+ this.autoCreateIndex = new AutoCreateIndex(settings);
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.INDEX;
+ }
+
+ @Override
+ protected void doExecute(final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
+ if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) {
+ request.beforeLocalFork();
+ createIndexAction.execute(new CreateIndexRequest(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
+ @Override
+ public void onResponse(CreateIndexResponse result) {
+ innerExecute(request, listener);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
+ // we have the index, do it
+ innerExecute(request, listener);
+ } else {
+ listener.onFailure(e);
+ }
+ }
+ });
+ } else {
+ innerExecute(request, listener);
+ }
+ }
+
+ @Override
+ protected boolean resolveRequest(final ClusterState state, final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
+ request.routing(state.metaData().resolveIndexRouting(request.routing(), request.index()));
+ request.index(state.metaData().concreteIndex(request.index()));
+ if (state.metaData().hasIndex(request.index())) {
+ // check if routing is required, if so, do a broadcast delete
+ MappingMetaData mappingMd = state.metaData().index(request.index()).mappingOrDefault(request.type());
+ if (mappingMd != null && mappingMd.routing().required()) {
+ if (request.routing() == null) {
+ indexDeleteAction.execute(new IndexDeleteRequest(request), new ActionListener<IndexDeleteResponse>() {
+ @Override
+ public void onResponse(IndexDeleteResponse indexDeleteResponse) {
+ // go over the response, see if we have found one, and the version if found
+ long version = Versions.MATCH_ANY;
+ boolean found = false;
+ for (ShardDeleteResponse deleteResponse : indexDeleteResponse.getResponses()) {
+ if (deleteResponse.isFound()) {
+ version = deleteResponse.getVersion();
+ found = true;
+ break;
+ }
+ }
+ listener.onResponse(new DeleteResponse(request.index(), request.type(), request.id(), version, found));
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ listener.onFailure(e);
+ }
+ });
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ private void innerExecute(final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected boolean checkWriteConsistency() {
+ return true;
+ }
+
+ @Override
+ protected DeleteRequest newRequestInstance() {
+ return new DeleteRequest();
+ }
+
+ @Override
+ protected DeleteRequest newReplicaRequestInstance() {
+ return new DeleteRequest();
+ }
+
+ @Override
+ protected DeleteResponse newResponseInstance() {
+ return new DeleteResponse();
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteAction.NAME;
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, DeleteRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, DeleteRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
+ }
+
+ @Override
+ protected PrimaryResponse<DeleteResponse, DeleteRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
+ DeleteRequest request = shardRequest.request;
+ IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
+ Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version())
+ .versionType(request.versionType())
+ .origin(Engine.Operation.Origin.PRIMARY);
+ indexShard.delete(delete);
+ // update the request with teh version so it will go to the replicas
+ request.version(delete.version());
+
+ if (request.refresh()) {
+ try {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ DeleteResponse response = new DeleteResponse(request.index(), request.type(), request.id(), delete.version(), delete.found());
+ return new PrimaryResponse<DeleteResponse, DeleteRequest>(shardRequest.request, response, null);
+ }
+
+ @Override
+ protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
+ DeleteRequest request = shardRequest.request;
+ IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
+ Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version())
+ .origin(Engine.Operation.Origin.REPLICA);
+
+ indexShard.delete(delete);
+
+ if (request.refresh()) {
+ try {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState clusterState, DeleteRequest request) {
+ return clusterService.operationRouting()
+ .deleteShards(clusterService.state(), request.index(), request.type(), request.id(), request.routing());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/delete/index/IndexDeleteRequest.java b/src/main/java/org/elasticsearch/action/delete/index/IndexDeleteRequest.java
new file mode 100644
index 0000000..c7d4c7a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/index/IndexDeleteRequest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete.index;
+
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class IndexDeleteRequest extends IndexReplicationOperationRequest<IndexDeleteRequest> {
+
+ private String type;
+ private String id;
+ private boolean refresh = false;
+ private long version;
+
+ IndexDeleteRequest() {
+ }
+
+ public IndexDeleteRequest(DeleteRequest request) {
+ this.timeout = request.timeout();
+ this.consistencyLevel = request.consistencyLevel();
+ this.replicationType = request.replicationType();
+ this.index = request.index();
+ this.type = request.type();
+ this.id = request.id();
+ this.refresh = request.refresh();
+ this.version = request.version();
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String id() {
+ return this.id;
+ }
+
+ public boolean refresh() {
+ return this.refresh;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ type = in.readString();
+ id = in.readString();
+ refresh = in.readBoolean();
+ version = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(type);
+ out.writeString(id);
+ out.writeBoolean(refresh);
+ out.writeLong(version);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/delete/index/IndexDeleteResponse.java b/src/main/java/org/elasticsearch/action/delete/index/IndexDeleteResponse.java
new file mode 100644
index 0000000..ffea979
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/index/IndexDeleteResponse.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete.index;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Delete by query response executed on a specific index.
+ */
+public class IndexDeleteResponse extends ActionResponse {
+
+ private String index;
+ private int successfulShards;
+ private int failedShards;
+ private ShardDeleteResponse[] deleteResponses;
+
+ IndexDeleteResponse(String index, int failedShards, ShardDeleteResponse[] deleteResponses) {
+ this.index = index;
+ this.successfulShards = deleteResponses.length;
+ this.failedShards = failedShards;
+ this.deleteResponses = deleteResponses;
+ }
+
+ IndexDeleteResponse() {
+
+ }
+
+ /**
+ * The index the delete by query operation was executed against.
+ */
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * The total number of shards the delete by query was executed on.
+ */
+ public int getTotalShards() {
+ return failedShards + successfulShards;
+ }
+
+ /**
+ * The successful number of shards the delete by query was executed on.
+ */
+ public int getSuccessfulShards() {
+ return successfulShards;
+ }
+
+ /**
+ * The failed number of shards the delete by query was executed on.
+ */
+ public int getFailedShards() {
+ return failedShards;
+ }
+
+ public ShardDeleteResponse[] getResponses() {
+ return this.deleteResponses;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ successfulShards = in.readVInt();
+ failedShards = in.readVInt();
+ deleteResponses = new ShardDeleteResponse[in.readVInt()];
+ for (int i = 0; i < deleteResponses.length; i++) {
+ deleteResponses[i] = new ShardDeleteResponse();
+ deleteResponses[i].readFrom(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeVInt(successfulShards);
+ out.writeVInt(failedShards);
+ out.writeVInt(deleteResponses.length);
+ for (ShardDeleteResponse deleteResponse : deleteResponses) {
+ deleteResponse.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/delete/index/ShardDeleteRequest.java b/src/main/java/org/elasticsearch/action/delete/index/ShardDeleteRequest.java
new file mode 100644
index 0000000..82a77c1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/index/ShardDeleteRequest.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete.index;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Delete by query request to execute on a specific shard.
+ */
+public class ShardDeleteRequest extends ShardReplicationOperationRequest<ShardDeleteRequest> {
+
+ private int shardId;
+ private String type;
+ private String id;
+ private boolean refresh = false;
+ private long version;
+
+ ShardDeleteRequest(IndexDeleteRequest request, int shardId) {
+ super(request);
+ this.index = request.index();
+ this.shardId = shardId;
+ this.type = request.type();
+ this.id = request.id();
+ replicationType(request.replicationType());
+ consistencyLevel(request.consistencyLevel());
+ timeout = request.timeout();
+ this.refresh = request.refresh();
+ this.version = request.version();
+ }
+
+ ShardDeleteRequest() {
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (type == null) {
+ addValidationError("type is missing", validationException);
+ }
+ if (id == null) {
+ addValidationError("id is missing", validationException);
+ }
+ return validationException;
+ }
+
+ public int shardId() {
+ return this.shardId;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String id() {
+ return this.id;
+ }
+
+ public boolean refresh() {
+ return this.refresh;
+ }
+
+ public void version(long version) {
+ this.version = version;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = in.readVInt();
+ type = in.readString();
+ id = in.readString();
+ refresh = in.readBoolean();
+ version = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(shardId);
+ out.writeString(type);
+ out.writeString(id);
+ out.writeBoolean(refresh);
+ out.writeLong(version);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/delete/index/ShardDeleteResponse.java b/src/main/java/org/elasticsearch/action/delete/index/ShardDeleteResponse.java
new file mode 100644
index 0000000..1ad326e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/index/ShardDeleteResponse.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete.index;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Delete response executed on a specific shard.
+ */
+public class ShardDeleteResponse extends ActionResponse {
+
+ private long version;
+ private boolean found;
+
+ public ShardDeleteResponse() {
+ }
+
+ public ShardDeleteResponse(long version, boolean found) {
+ this.version = version;
+ this.found = found;
+ }
+
+ public long getVersion() {
+ return version;
+ }
+
+ public boolean isFound() {
+ return found;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ version = in.readLong();
+ found = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(version);
+ out.writeBoolean(found);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/delete/index/TransportIndexDeleteAction.java b/src/main/java/org/elasticsearch/action/delete/index/TransportIndexDeleteAction.java
new file mode 100644
index 0000000..4ec0cd4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/index/TransportIndexDeleteAction.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete.index;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class TransportIndexDeleteAction extends TransportIndexReplicationOperationAction<IndexDeleteRequest, IndexDeleteResponse, ShardDeleteRequest, ShardDeleteRequest, ShardDeleteResponse> {
+
+ @Inject
+ public TransportIndexDeleteAction(Settings settings, ClusterService clusterService, TransportService transportService,
+ ThreadPool threadPool, TransportShardDeleteAction deleteAction) {
+ super(settings, transportService, clusterService, threadPool, deleteAction);
+ }
+
+ @Override
+ protected IndexDeleteRequest newRequestInstance() {
+ return new IndexDeleteRequest();
+ }
+
+ @Override
+ protected IndexDeleteResponse newResponseInstance(IndexDeleteRequest request, List<ShardDeleteResponse> shardDeleteResponses, int failuresCount, List<ShardOperationFailedException> shardFailures) {
+ return new IndexDeleteResponse(request.index(), failuresCount, shardDeleteResponses.toArray(new ShardDeleteResponse[shardDeleteResponses.size()]));
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return false;
+ }
+
+ @Override
+ protected String transportAction() {
+ return "indices/index/delete";
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, IndexDeleteRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, IndexDeleteRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
+ }
+
+ @Override
+ protected GroupShardsIterator shards(IndexDeleteRequest request) {
+ return clusterService.operationRouting().broadcastDeleteShards(clusterService.state(), request.index());
+ }
+
+ @Override
+ protected ShardDeleteRequest newShardRequestInstance(IndexDeleteRequest request, int shardId) {
+ return new ShardDeleteRequest(request, shardId);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/delete/index/TransportShardDeleteAction.java b/src/main/java/org/elasticsearch/action/delete/index/TransportShardDeleteAction.java
new file mode 100644
index 0000000..d7c53dc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/index/TransportShardDeleteAction.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.delete.index;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ *
+ */
+public class TransportShardDeleteAction extends TransportShardReplicationOperationAction<ShardDeleteRequest, ShardDeleteRequest, ShardDeleteResponse> {
+
+ @Inject
+ public TransportShardDeleteAction(Settings settings, TransportService transportService,
+ ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool,
+ ShardStateAction shardStateAction) {
+ super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction);
+ }
+
+ @Override
+ protected boolean checkWriteConsistency() {
+ return true;
+ }
+
+ @Override
+ protected ShardDeleteRequest newRequestInstance() {
+ return new ShardDeleteRequest();
+ }
+
+ @Override
+ protected ShardDeleteRequest newReplicaRequestInstance() {
+ return new ShardDeleteRequest();
+ }
+
+ @Override
+ protected ShardDeleteResponse newResponseInstance() {
+ return new ShardDeleteResponse();
+ }
+
+ @Override
+ protected String transportAction() {
+ return "indices/index/b_shard/delete";
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.INDEX;
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, ShardDeleteRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, ShardDeleteRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
+ }
+
+ @Override
+ protected PrimaryResponse<ShardDeleteResponse, ShardDeleteRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
+ ShardDeleteRequest request = shardRequest.request;
+ IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
+ Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version())
+ .origin(Engine.Operation.Origin.PRIMARY);
+ indexShard.delete(delete);
+ // update the version to happen on the replicas
+ request.version(delete.version());
+
+ if (request.refresh()) {
+ try {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+
+ ShardDeleteResponse response = new ShardDeleteResponse(delete.version(), delete.found());
+ return new PrimaryResponse<ShardDeleteResponse, ShardDeleteRequest>(shardRequest.request, response, null);
+ }
+
+ @Override
+ protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
+ ShardDeleteRequest request = shardRequest.request;
+ IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
+ Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version())
+ .origin(Engine.Operation.Origin.REPLICA);
+ indexShard.delete(delete);
+
+ if (request.refresh()) {
+ try {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState clusterState, ShardDeleteRequest request) {
+ GroupShardsIterator group = clusterService.operationRouting().broadcastDeleteShards(clusterService.state(), request.index());
+ for (ShardIterator shardIt : group) {
+ if (shardIt.shardId().id() == request.shardId()) {
+ return shardIt;
+ }
+ }
+ throw new ElasticsearchIllegalStateException("No shards iterator found for shard [" + request.shardId() + "]");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/delete/package-info.java b/src/main/java/org/elasticsearch/action/delete/package-info.java
new file mode 100644
index 0000000..03698aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/delete/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Delete action.
+ */
+package org.elasticsearch.action.delete; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java
new file mode 100644
index 0000000..a3888cc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class DeleteByQueryAction extends Action<DeleteByQueryRequest, DeleteByQueryResponse, DeleteByQueryRequestBuilder> {
+
+ public static final DeleteByQueryAction INSTANCE = new DeleteByQueryAction();
+ public static final String NAME = "deleteByQuery";
+
+ private DeleteByQueryAction() {
+ super(NAME);
+ }
+
+ @Override
+ public DeleteByQueryResponse newResponse() {
+ return new DeleteByQueryResponse();
+ }
+
+ @Override
+ public DeleteByQueryRequestBuilder newRequestBuilder(Client client) {
+ return new DeleteByQueryRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java
new file mode 100644
index 0000000..f80df26
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to delete all documents that matching a specific query. Best created with
+ * {@link org.elasticsearch.client.Requests#deleteByQueryRequest(String...)}.
+ * <p/>
+ * <p>The request requires the source to be set either using {@link #source(QuerySourceBuilder)},
+ * or {@link #source(byte[])}.
+ *
+ * @see DeleteByQueryResponse
+ * @see org.elasticsearch.client.Requests#deleteByQueryRequest(String...)
+ * @see org.elasticsearch.client.Client#deleteByQuery(DeleteByQueryRequest)
+ */
+public class DeleteByQueryRequest extends IndicesReplicationOperationRequest<DeleteByQueryRequest> {
+
+ private static final XContentType contentType = Requests.CONTENT_TYPE;
+
+ private BytesReference source;
+ private boolean sourceUnsafe;
+
+ private String[] types = Strings.EMPTY_ARRAY;
+ @Nullable
+ private String routing;
+
+ /**
+ * Constructs a new delete by query request to run against the provided indices. No indices means
+ * it will run against all indices.
+ */
+ public DeleteByQueryRequest(String... indices) {
+ this.indices = indices;
+ }
+
+ public DeleteByQueryRequest() {
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (source == null) {
+ validationException = addValidationError("source is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * The source to execute.
+ */
+ BytesReference source() {
+ if (sourceUnsafe) {
+ source = source.copyBytesArray();
+ }
+ return source;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public DeleteByQueryRequest source(QuerySourceBuilder sourceBuilder) {
+ this.source = sourceBuilder.buildAsBytes(contentType);
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source to execute. It is preferable to use either {@link #source(byte[])}
+ * or {@link #source(QuerySourceBuilder)}.
+ */
+ public DeleteByQueryRequest source(String query) {
+ this.source = new BytesArray(query.getBytes(Charsets.UTF_8));
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source to execute in the form of a map.
+ */
+ public DeleteByQueryRequest source(Map source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(source);
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ }
+
+ public DeleteByQueryRequest source(XContentBuilder builder) {
+ this.source = builder.bytes();
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public DeleteByQueryRequest source(byte[] source) {
+ return source(source, 0, source.length, false);
+ }
+
+ /**
+ * The source to execute.
+ */
+ public DeleteByQueryRequest source(byte[] source, int offset, int length, boolean unsafe) {
+ this.source = new BytesArray(source, offset, length);
+ this.sourceUnsafe = unsafe;
+ return this;
+ }
+
+ public DeleteByQueryRequest source(BytesReference source, boolean unsafe) {
+ this.source = source;
+ this.sourceUnsafe = unsafe;
+ return this;
+ }
+
+ /**
+ * The types of documents the query will run against. Defaults to all types.
+ */
+ String[] types() {
+ return this.types;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public String routing() {
+ return this.routing;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public DeleteByQueryRequest routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public DeleteByQueryRequest routing(String... routings) {
+ this.routing = Strings.arrayToCommaDelimitedString(routings);
+ return this;
+ }
+
+ /**
+ * The types of documents the query will run against. Defaults to all types.
+ */
+ public DeleteByQueryRequest types(String... types) {
+ this.types = types;
+ return this;
+ }
+
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ sourceUnsafe = false;
+ source = in.readBytesReference();
+ routing = in.readOptionalString();
+ types = in.readStringArray();
+ }
+
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBytesReference(source);
+ out.writeOptionalString(routing);
+ out.writeStringArray(types);
+ }
+
+ @Override
+ public String toString() {
+ String sSource = "_na_";
+ try {
+ sSource = XContentHelper.convertToJson(source, false);
+ } catch (Exception e) {
+ // ignore
+ }
+ return "[" + Arrays.toString(indices) + "][" + Arrays.toString(types) + "], source[" + sSource + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java
new file mode 100644
index 0000000..7bbbce1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequestBuilder;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class DeleteByQueryRequestBuilder extends IndicesReplicationOperationRequestBuilder<DeleteByQueryRequest, DeleteByQueryResponse, DeleteByQueryRequestBuilder> {
+
+ private QuerySourceBuilder sourceBuilder;
+
+ public DeleteByQueryRequestBuilder(Client client) {
+ super((InternalClient) client, new DeleteByQueryRequest());
+ }
+
+ /**
+ * The types of documents the query will run against. Defaults to all types.
+ */
+ public DeleteByQueryRequestBuilder setTypes(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the action will be executed on.
+ */
+ public DeleteByQueryRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the action will be executed on.
+ */
+ public DeleteByQueryRequestBuilder setRouting(String... routing) {
+ request.routing(routing);
+ return this;
+ }
+
+
+ /**
+ * The query to delete documents for.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public DeleteByQueryRequestBuilder setQuery(QueryBuilder queryBuilder) {
+ sourceBuilder().setQuery(queryBuilder);
+ return this;
+ }
+
+ /**
+ * The source to execute. It is preferable to use either {@link #setSource(byte[])}
+ * or {@link #setQuery(QueryBuilder)}.
+ */
+ public DeleteByQueryRequestBuilder setSource(String source) {
+ request().source(source);
+ return this;
+ }
+
+ /**
+ * The source to execute in the form of a map.
+ */
+ public DeleteByQueryRequestBuilder setSource(Map<String, Object> source) {
+ request().source(source);
+ return this;
+ }
+
+ /**
+ * The source to execute in the form of a builder.
+ */
+ public DeleteByQueryRequestBuilder setSource(XContentBuilder builder) {
+ request().source(builder);
+ return this;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public DeleteByQueryRequestBuilder setSource(byte[] source) {
+ request().source(source);
+ return this;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public DeleteByQueryRequestBuilder setSource(BytesReference source) {
+ request().source(source, false);
+ return this;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public DeleteByQueryRequestBuilder setSource(BytesReference source, boolean unsafe) {
+ request().source(source, unsafe);
+ return this;
+ }
+
+ /**
+ * The source to execute.
+ */
+ public DeleteByQueryRequestBuilder setSource(byte[] source, int offset, int length, boolean unsafe) {
+ request().source(source, offset, length, unsafe);
+ return this;
+ }
+
+ /**
+ * The replication type to use with this operation.
+ */
+ public DeleteByQueryRequestBuilder setReplicationType(ReplicationType replicationType) {
+ request.replicationType(replicationType);
+ return this;
+ }
+
+ /**
+ * The replication type to use with this operation.
+ */
+ public DeleteByQueryRequestBuilder setReplicationType(String replicationType) {
+ request.replicationType(replicationType);
+ return this;
+ }
+
+ public DeleteByQueryRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ request.consistencyLevel(consistencyLevel);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<DeleteByQueryResponse> listener) {
+ if (sourceBuilder != null) {
+ request.source(sourceBuilder);
+ }
+
+ ((Client) client).deleteByQuery(request, listener);
+ }
+
+ private QuerySourceBuilder sourceBuilder() {
+ if (sourceBuilder == null) {
+ sourceBuilder = new QuerySourceBuilder();
+ }
+ return sourceBuilder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java
new file mode 100644
index 0000000..92f98cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ * The response of delete by query action. Holds the {@link IndexDeleteByQueryResponse}s from all the
+ * different indices.
+ */
+public class DeleteByQueryResponse extends ActionResponse implements Iterable<IndexDeleteByQueryResponse> {
+
+ private Map<String, IndexDeleteByQueryResponse> indices = newHashMap();
+
+ DeleteByQueryResponse() {
+
+ }
+
+ @Override
+ public Iterator<IndexDeleteByQueryResponse> iterator() {
+ return indices.values().iterator();
+ }
+
+ /**
+ * The responses from all the different indices.
+ */
+ public Map<String, IndexDeleteByQueryResponse> getIndices() {
+ return indices;
+ }
+
+ /**
+ * The response of a specific index.
+ */
+ public IndexDeleteByQueryResponse getIndex(String index) {
+ return indices.get(index);
+ }
+
+ public RestStatus status() {
+ RestStatus status = RestStatus.OK;
+ for (IndexDeleteByQueryResponse indexResponse : indices.values()) {
+ if (indexResponse.getFailedShards() > 0) {
+ RestStatus indexStatus = indexResponse.getFailures()[0].status();
+ if (indexResponse.getFailures().length > 1) {
+ for (int i = 1; i < indexResponse.getFailures().length; i++) {
+ if (indexResponse.getFailures()[i].status().getStatus() >= 500) {
+ indexStatus = indexResponse.getFailures()[i].status();
+ }
+ }
+ }
+ if (status.getStatus() < indexStatus.getStatus()) {
+ status = indexStatus;
+ }
+ }
+ }
+ return status;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ IndexDeleteByQueryResponse response = new IndexDeleteByQueryResponse();
+ response.readFrom(in);
+ indices.put(response.getIndex(), response);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(indices.size());
+ for (IndexDeleteByQueryResponse indexResponse : indices.values()) {
+ indexResponse.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java b/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java
new file mode 100644
index 0000000..76fee0e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Delete by query request to execute on a specific index.
+ */
+public class IndexDeleteByQueryRequest extends IndexReplicationOperationRequest<IndexDeleteByQueryRequest> {
+
+ private BytesReference source;
+ private String[] types = Strings.EMPTY_ARRAY;
+ @Nullable
+ private Set<String> routing;
+ @Nullable
+ private String[] filteringAliases;
+
+ IndexDeleteByQueryRequest(DeleteByQueryRequest request, String index, @Nullable Set<String> routing, @Nullable String[] filteringAliases) {
+ this.index = index;
+ this.timeout = request.timeout();
+ this.source = request.source();
+ this.types = request.types();
+ this.replicationType = request.replicationType();
+ this.consistencyLevel = request.consistencyLevel();
+ this.routing = routing;
+ this.filteringAliases = filteringAliases;
+ }
+
+ IndexDeleteByQueryRequest() {
+ }
+
+ BytesReference source() {
+ return source;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (source == null) {
+ validationException = addValidationError("source is missing", validationException);
+ }
+ return validationException;
+ }
+
+ Set<String> routing() {
+ return this.routing;
+ }
+
+ String[] types() {
+ return this.types;
+ }
+
+ String[] filteringAliases() {
+ return filteringAliases;
+ }
+
+ public IndexDeleteByQueryRequest timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return this;
+ }
+
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ source = in.readBytesReference();
+ int typesSize = in.readVInt();
+ if (typesSize > 0) {
+ types = new String[typesSize];
+ for (int i = 0; i < typesSize; i++) {
+ types[i] = in.readString();
+ }
+ }
+ int routingSize = in.readVInt();
+ if (routingSize > 0) {
+ routing = new HashSet<String>(routingSize);
+ for (int i = 0; i < routingSize; i++) {
+ routing.add(in.readString());
+ }
+ }
+ int aliasesSize = in.readVInt();
+ if (aliasesSize > 0) {
+ filteringAliases = new String[aliasesSize];
+ for (int i = 0; i < aliasesSize; i++) {
+ filteringAliases[i] = in.readString();
+ }
+ }
+ }
+
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBytesReference(source);
+ out.writeVInt(types.length);
+ for (String type : types) {
+ out.writeString(type);
+ }
+ if (routing != null) {
+ out.writeVInt(routing.size());
+ for (String r : routing) {
+ out.writeString(r);
+ }
+ } else {
+ out.writeVInt(0);
+ }
+ if (filteringAliases != null) {
+ out.writeVInt(filteringAliases.length);
+ for (String alias : filteringAliases) {
+ out.writeString(alias);
+ }
+ } else {
+ out.writeVInt(0);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java b/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java
new file mode 100644
index 0000000..be3e5d1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Delete by query response executed on a specific index.
+ */
+public class IndexDeleteByQueryResponse extends ActionResponse {
+
+ private String index;
+ private int successfulShards;
+ private int failedShards;
+ private ShardOperationFailedException[] failures;
+
+ IndexDeleteByQueryResponse(String index, int successfulShards, int failedShards, List<ShardOperationFailedException> failures) {
+ this.index = index;
+ this.successfulShards = successfulShards;
+ this.failedShards = failedShards;
+ if (failures == null || failures.isEmpty()) {
+ this.failures = new DefaultShardOperationFailedException[0];
+ } else {
+ this.failures = failures.toArray(new ShardOperationFailedException[failures.size()]);
+ }
+ }
+
+ IndexDeleteByQueryResponse() {
+
+ }
+
+ /**
+ * The index the delete by query operation was executed against.
+ */
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * The total number of shards the delete by query was executed on.
+ */
+ public int getTotalShards() {
+ return failedShards + successfulShards;
+ }
+
+ /**
+ * The successful number of shards the delete by query was executed on.
+ */
+ public int getSuccessfulShards() {
+ return successfulShards;
+ }
+
+ /**
+ * The failed number of shards the delete by query was executed on.
+ */
+ public int getFailedShards() {
+ return failedShards;
+ }
+
+ public ShardOperationFailedException[] getFailures() {
+ return failures;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ successfulShards = in.readVInt();
+ failedShards = in.readVInt();
+ int size = in.readVInt();
+ failures = new ShardOperationFailedException[size];
+ for (int i = 0; i < size; i++) {
+ failures[i] = DefaultShardOperationFailedException.readShardOperationFailed(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeVInt(successfulShards);
+ out.writeVInt(failedShards);
+ out.writeVInt(failures.length);
+ for (ShardOperationFailedException failure : failures) {
+ failure.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java b/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java
new file mode 100644
index 0000000..7d42bc5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentHelper;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Delete by query request to execute on a specific shard.
+ */
+public class ShardDeleteByQueryRequest extends ShardReplicationOperationRequest<ShardDeleteByQueryRequest> {
+
+ private int shardId;
+ private BytesReference source;
+ private String[] types = Strings.EMPTY_ARRAY;
+ @Nullable
+ private Set<String> routing;
+ @Nullable
+ private String[] filteringAliases;
+
+ ShardDeleteByQueryRequest(IndexDeleteByQueryRequest request, int shardId) {
+ super(request);
+ this.index = request.index();
+ this.source = request.source();
+ this.types = request.types();
+ this.shardId = shardId;
+ replicationType(request.replicationType());
+ consistencyLevel(request.consistencyLevel());
+ timeout = request.timeout();
+ this.routing = request.routing();
+ filteringAliases = request.filteringAliases();
+ }
+
+ ShardDeleteByQueryRequest() {
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (source == null) {
+ addValidationError("source is missing", validationException);
+ }
+ return validationException;
+ }
+
+ public int shardId() {
+ return this.shardId;
+ }
+
+ BytesReference source() {
+ return source;
+ }
+
+ public String[] types() {
+ return this.types;
+ }
+
+ public Set<String> routing() {
+ return this.routing;
+ }
+
+ public String[] filteringAliases() {
+ return filteringAliases;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ source = in.readBytesReference();
+ shardId = in.readVInt();
+ types = in.readStringArray();
+ int routingSize = in.readVInt();
+ if (routingSize > 0) {
+ routing = new HashSet<String>(routingSize);
+ for (int i = 0; i < routingSize; i++) {
+ routing.add(in.readString());
+ }
+ }
+ int aliasesSize = in.readVInt();
+ if (aliasesSize > 0) {
+ filteringAliases = new String[aliasesSize];
+ for (int i = 0; i < aliasesSize; i++) {
+ filteringAliases[i] = in.readString();
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBytesReference(source);
+ out.writeVInt(shardId);
+ out.writeStringArray(types);
+ if (routing != null) {
+ out.writeVInt(routing.size());
+ for (String r : routing) {
+ out.writeString(r);
+ }
+ } else {
+ out.writeVInt(0);
+ }
+ if (filteringAliases != null) {
+ out.writeVInt(filteringAliases.length);
+ for (String alias : filteringAliases) {
+ out.writeString(alias);
+ }
+ } else {
+ out.writeVInt(0);
+ }
+ }
+
+ @Override
+ public String toString() {
+ String sSource = "_na_";
+ try {
+ sSource = XContentHelper.convertToJson(source, false);
+ } catch (Exception e) {
+ // ignore
+ }
+ return "delete_by_query {[" + index + "]" + Arrays.toString(types) + ", query [" + sSource + "]}";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java b/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java
new file mode 100644
index 0000000..5dee7c6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Delete by query response executed on a specific shard.
+ */
+public class ShardDeleteByQueryResponse extends ActionResponse {
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java
new file mode 100644
index 0000000..9be61c5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.action.support.replication.TransportIndicesReplicationOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ */
+public class TransportDeleteByQueryAction extends TransportIndicesReplicationOperationAction<DeleteByQueryRequest, DeleteByQueryResponse, IndexDeleteByQueryRequest, IndexDeleteByQueryResponse, ShardDeleteByQueryRequest, ShardDeleteByQueryRequest, ShardDeleteByQueryResponse> {
+
+ private final DestructiveOperations destructiveOperations;
+
+ @Inject
+ public TransportDeleteByQueryAction(Settings settings, ClusterService clusterService, TransportService transportService,
+ ThreadPool threadPool, TransportIndexDeleteByQueryAction indexDeleteByQueryAction,
+ NodeSettingsService nodeSettingsService) {
+ super(settings, transportService, clusterService, threadPool, indexDeleteByQueryAction);
+ this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
+ }
+
+ @Override
+ protected void doExecute(DeleteByQueryRequest request, ActionListener<DeleteByQueryResponse> listener) {
+ destructiveOperations.failDestructive(request.indices());
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected Map<String, Set<String>> resolveRouting(ClusterState clusterState, DeleteByQueryRequest request) throws ElasticsearchException {
+ return clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
+ }
+
+ @Override
+ protected DeleteByQueryRequest newRequestInstance() {
+ return new DeleteByQueryRequest();
+ }
+
+ @Override
+ protected DeleteByQueryResponse newResponseInstance(DeleteByQueryRequest request, AtomicReferenceArray indexResponses) {
+ DeleteByQueryResponse response = new DeleteByQueryResponse();
+ for (int i = 0; i < indexResponses.length(); i++) {
+ IndexDeleteByQueryResponse indexResponse = (IndexDeleteByQueryResponse) indexResponses.get(i);
+ if (indexResponse != null) {
+ response.getIndices().put(indexResponse.getIndex(), indexResponse);
+ }
+ }
+ return response;
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return false;
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteByQueryAction.NAME;
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, DeleteByQueryRequest replicationPingRequest) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, DeleteByQueryRequest request, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.WRITE, concreteIndices);
+ }
+
+ @Override
+ protected IndexDeleteByQueryRequest newIndexRequestInstance(DeleteByQueryRequest request, String index, Set<String> routing) {
+ String[] filteringAliases = clusterService.state().metaData().filteringAliases(index, request.indices());
+ return new IndexDeleteByQueryRequest(request, index, routing, filteringAliases);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java
new file mode 100644
index 0000000..8d3806b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class TransportIndexDeleteByQueryAction extends TransportIndexReplicationOperationAction<IndexDeleteByQueryRequest, IndexDeleteByQueryResponse, ShardDeleteByQueryRequest, ShardDeleteByQueryRequest, ShardDeleteByQueryResponse> {
+
+ @Inject
+ public TransportIndexDeleteByQueryAction(Settings settings, ClusterService clusterService, TransportService transportService,
+ ThreadPool threadPool, TransportShardDeleteByQueryAction shardDeleteByQueryAction) {
+ super(settings, transportService, clusterService, threadPool, shardDeleteByQueryAction);
+ }
+
+ @Override
+ protected IndexDeleteByQueryRequest newRequestInstance() {
+ return new IndexDeleteByQueryRequest();
+ }
+
+ @Override
+ protected IndexDeleteByQueryResponse newResponseInstance(IndexDeleteByQueryRequest request, List<ShardDeleteByQueryResponse> shardDeleteByQueryResponses, int failuresCount, List<ShardOperationFailedException> shardFailures) {
+ return new IndexDeleteByQueryResponse(request.index(), shardDeleteByQueryResponses.size(), failuresCount, shardFailures);
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return true;
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteByQueryAction.NAME + "/index";
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, IndexDeleteByQueryRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, IndexDeleteByQueryRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
+ }
+
+ @Override
+ protected GroupShardsIterator shards(IndexDeleteByQueryRequest request) {
+ return clusterService.operationRouting().deleteByQueryShards(clusterService.state(), request.index(), request.routing());
+ }
+
+ @Override
+ protected ShardDeleteByQueryRequest newShardRequestInstance(IndexDeleteByQueryRequest request, int shardId) {
+ return new ShardDeleteByQueryRequest(request, shardId);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java
new file mode 100644
index 0000000..04e2c96
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.deletebyquery;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.internal.DefaultSearchContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ *
+ */
+public class TransportShardDeleteByQueryAction extends TransportShardReplicationOperationAction<ShardDeleteByQueryRequest, ShardDeleteByQueryRequest, ShardDeleteByQueryResponse> {
+
+ private final ScriptService scriptService;
+ private final CacheRecycler cacheRecycler;
+ private final PageCacheRecycler pageCacheRecycler;
+
+ @Inject
+ public TransportShardDeleteByQueryAction(Settings settings, TransportService transportService,
+ ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool,
+ ShardStateAction shardStateAction, ScriptService scriptService, CacheRecycler cacheRecycler,
+ PageCacheRecycler pageCacheRecycler) {
+ super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction);
+ this.scriptService = scriptService;
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ }
+
+ @Override
+ protected boolean checkWriteConsistency() {
+ return true;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.INDEX;
+ }
+
+ @Override
+ protected ShardDeleteByQueryRequest newRequestInstance() {
+ return new ShardDeleteByQueryRequest();
+ }
+
+ @Override
+ protected ShardDeleteByQueryRequest newReplicaRequestInstance() {
+ return new ShardDeleteByQueryRequest();
+ }
+
+ @Override
+ protected ShardDeleteByQueryResponse newResponseInstance() {
+ return new ShardDeleteByQueryResponse();
+ }
+
+ @Override
+ protected String transportAction() {
+ return DeleteByQueryAction.NAME + "/shard";
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, ShardDeleteByQueryRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, ShardDeleteByQueryRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
+ }
+
+ @Override
+ protected PrimaryResponse<ShardDeleteByQueryResponse, ShardDeleteByQueryRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
+ ShardDeleteByQueryRequest request = shardRequest.request;
+ IndexService indexService = indicesService.indexServiceSafe(shardRequest.request.index());
+ IndexShard indexShard = indexService.shardSafe(shardRequest.shardId);
+
+ SearchContext.setCurrent(new DefaultSearchContext(0, new ShardSearchRequest().types(request.types()), null,
+ indexShard.acquireSearcher("delete_by_query"), indexService, indexShard, scriptService, cacheRecycler, pageCacheRecycler));
+ try {
+ Engine.DeleteByQuery deleteByQuery = indexShard.prepareDeleteByQuery(request.source(), request.filteringAliases(), request.types())
+ .origin(Engine.Operation.Origin.PRIMARY);
+ SearchContext.current().parsedQuery(new ParsedQuery(deleteByQuery.query(), ImmutableMap.<String, Filter>of()));
+ indexShard.deleteByQuery(deleteByQuery);
+ } finally {
+ SearchContext searchContext = SearchContext.current();
+ searchContext.clearAndRelease();
+ SearchContext.removeCurrent();
+ }
+ return new PrimaryResponse<ShardDeleteByQueryResponse, ShardDeleteByQueryRequest>(shardRequest.request, new ShardDeleteByQueryResponse(), null);
+ }
+
+
+ @Override
+ protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
+ ShardDeleteByQueryRequest request = shardRequest.request;
+ IndexService indexService = indicesService.indexServiceSafe(shardRequest.request.index());
+ IndexShard indexShard = indexService.shardSafe(shardRequest.shardId);
+
+ SearchContext.setCurrent(new DefaultSearchContext(0, new ShardSearchRequest().types(request.types()), null,
+ indexShard.acquireSearcher("delete_by_query", IndexShard.Mode.WRITE), indexService, indexShard, scriptService,
+ cacheRecycler, pageCacheRecycler));
+ try {
+ Engine.DeleteByQuery deleteByQuery = indexShard.prepareDeleteByQuery(request.source(), request.filteringAliases(), request.types())
+ .origin(Engine.Operation.Origin.REPLICA);
+ SearchContext.current().parsedQuery(new ParsedQuery(deleteByQuery.query(), ImmutableMap.<String, Filter>of()));
+ indexShard.deleteByQuery(deleteByQuery);
+ } finally {
+ SearchContext searchContext = SearchContext.current();
+ searchContext.clearAndRelease();
+ SearchContext.removeCurrent();
+ }
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState clusterState, ShardDeleteByQueryRequest request) {
+ GroupShardsIterator group = clusterService.operationRouting().deleteByQueryShards(clusterService.state(), request.index(), request.routing());
+ for (ShardIterator shardIt : group) {
+ if (shardIt.shardId().id() == request.shardId()) {
+ return shardIt;
+ }
+ }
+ throw new ElasticsearchIllegalStateException("No shards iterator found for shard [" + request.shardId() + "]");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/package-info.java b/src/main/java/org/elasticsearch/action/deletebyquery/package-info.java
new file mode 100644
index 0000000..a4bb682
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/deletebyquery/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Delete by query action.
+ */
+package org.elasticsearch.action.deletebyquery; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/explain/ExplainAction.java b/src/main/java/org/elasticsearch/action/explain/ExplainAction.java
new file mode 100644
index 0000000..a8e7b9b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/explain/ExplainAction.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.explain;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ * Entry point for the explain feature.
+ */
+public class ExplainAction extends Action<ExplainRequest, ExplainResponse, ExplainRequestBuilder> {
+
+ public static final ExplainAction INSTANCE = new ExplainAction();
+ public static final String NAME = "explain";
+
+ private ExplainAction() {
+ super(NAME);
+ }
+
+ public ExplainRequestBuilder newRequestBuilder(Client client) {
+ return new ExplainRequestBuilder(client);
+ }
+
+ public ExplainResponse newResponse() {
+ return new ExplainResponse();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java
new file mode 100644
index 0000000..7898e47
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.explain;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ValidateActions;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+import java.io.IOException;
+
+/**
+ * Explain request encapsulating the explain query and document identifier to get an explanation for.
+ */
+public class ExplainRequest extends SingleShardOperationRequest<ExplainRequest> {
+
+ private static final XContentType contentType = Requests.CONTENT_TYPE;
+
+ private String type = "_all";
+ private String id;
+ private String routing;
+ private String preference;
+ private BytesReference source;
+ private boolean sourceUnsafe;
+ private String[] fields;
+ private FetchSourceContext fetchSourceContext;
+
+ private String[] filteringAlias = Strings.EMPTY_ARRAY;
+
+ long nowInMillis;
+
+ ExplainRequest() {
+ }
+
+ public ExplainRequest(String index, String type, String id) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ }
+
+ public String type() {
+ return type;
+ }
+
+ public ExplainRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ public String id() {
+ return id;
+ }
+
+ public ExplainRequest id(String id) {
+ this.id = id;
+ return this;
+ }
+
+ public String routing() {
+ return routing;
+ }
+
+ public ExplainRequest routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ /**
+ * Simple sets the routing. Since the parent is only used to get to the right shard.
+ */
+ public ExplainRequest parent(String parent) {
+ this.routing = parent;
+ return this;
+ }
+
+ public String preference() {
+ return preference;
+ }
+
+ public ExplainRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public BytesReference source() {
+ return source;
+ }
+
+ public boolean sourceUnsafe() {
+ return sourceUnsafe;
+ }
+
+ public ExplainRequest source(QuerySourceBuilder sourceBuilder) {
+ this.source = sourceBuilder.buildAsBytes(contentType);
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ public ExplainRequest source(BytesReference source, boolean unsafe) {
+ this.source = source;
+ this.sourceUnsafe = unsafe;
+ return this;
+ }
+
+ /**
+ * Allows setting the {@link FetchSourceContext} for this request, controlling if and how _source should be returned.
+ */
+ public ExplainRequest fetchSourceContext(FetchSourceContext context) {
+ this.fetchSourceContext = context;
+ return this;
+ }
+
+ public FetchSourceContext fetchSourceContext() {
+ return fetchSourceContext;
+ }
+
+
+ public String[] fields() {
+ return fields;
+ }
+
+ public ExplainRequest fields(String[] fields) {
+ this.fields = fields;
+ return this;
+ }
+
+ public String[] filteringAlias() {
+ return filteringAlias;
+ }
+
+ public ExplainRequest filteringAlias(String[] filteringAlias) {
+ if (filteringAlias != null) {
+ this.filteringAlias = filteringAlias;
+ }
+
+ return this;
+ }
+
+ @Override
+ protected void beforeLocalFork() {
+ if (sourceUnsafe) {
+ source = source.copyBytesArray();
+ sourceUnsafe = false;
+ }
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (type == null) {
+ validationException = ValidateActions.addValidationError("type is missing", validationException);
+ }
+ if (id == null) {
+ validationException = ValidateActions.addValidationError("id is missing", validationException);
+ }
+ if (source == null) {
+ validationException = ValidateActions.addValidationError("source is missing", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ type = in.readString();
+ id = in.readString();
+ routing = in.readOptionalString();
+ preference = in.readOptionalString();
+ source = in.readBytesReference();
+ sourceUnsafe = false;
+ filteringAlias = in.readStringArray();
+ if (in.readBoolean()) {
+ fields = in.readStringArray();
+ }
+
+ fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);
+ nowInMillis = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(type);
+ out.writeString(id);
+ out.writeOptionalString(routing);
+ out.writeOptionalString(preference);
+ out.writeBytesReference(source);
+ out.writeStringArray(filteringAlias);
+ if (fields != null) {
+ out.writeBoolean(true);
+ out.writeStringArray(fields);
+ } else {
+ out.writeBoolean(false);
+ }
+
+ FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
+ out.writeVLong(nowInMillis);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java b/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java
new file mode 100644
index 0000000..b1926c9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.explain;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+/**
+ * A builder for {@link ExplainRequest}.
+ */
+public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder<ExplainRequest, ExplainResponse, ExplainRequestBuilder> {
+
+ private QuerySourceBuilder sourceBuilder;
+
+ ExplainRequestBuilder(Client client) {
+ super((InternalClient) client, new ExplainRequest());
+ }
+
+ public ExplainRequestBuilder(Client client, String index, String type, String id) {
+ super((InternalClient) client, new ExplainRequest().index(index).type(type).id(id));
+ }
+
+ /**
+ * Sets the type to get a score explanation for.
+ */
+ public ExplainRequestBuilder setType(String type) {
+ request().type(type);
+ return this;
+ }
+
+ /**
+ * Sets the id to get a score explanation for.
+ */
+ public ExplainRequestBuilder setId(String id) {
+ request().id(id);
+ return this;
+ }
+
+ /**
+ * Sets the routing for sharding.
+ */
+ public ExplainRequestBuilder setRouting(String routing) {
+ request().routing(routing);
+ return this;
+ }
+
+ /**
+ * Simple sets the routing. Since the parent is only used to get to the right shard.
+ */
+ public ExplainRequestBuilder setParent(String parent) {
+ request().parent(parent);
+ return this;
+ }
+
+ /**
+ * Sets the shard preference.
+ */
+ public ExplainRequestBuilder setPreference(String preference) {
+ request().preference(preference);
+ return this;
+ }
+
+ /**
+ * Sets the query to get a score explanation for.
+ */
+ public ExplainRequestBuilder setQuery(QueryBuilder query) {
+ sourceBuilder().setQuery(query);
+ return this;
+ }
+
+ /**
+ * Sets the query to get a score explanation for.
+ */
+ public ExplainRequestBuilder setQuery(BytesReference query) {
+ sourceBuilder().setQuery(query);
+ return this;
+ }
+
+ /**
+ * Explicitly specify the fields that will be returned for the explained document. By default, nothing is returned.
+ */
+ public ExplainRequestBuilder setFields(String... fields) {
+ request.fields(fields);
+ return this;
+ }
+
+ /**
+ * Indicates whether the response should contain the stored _source
+ *
+ *
+ * @param fetch
+ * @return
+ */
+ public ExplainRequestBuilder setFetchSource(boolean fetch) {
+ FetchSourceContext context = request.fetchSourceContext();
+ if (context == null) {
+ request.fetchSourceContext(new FetchSourceContext(fetch));
+ }
+ else {
+ context.fetchSource(fetch);
+ }
+ return this;
+ }
+
+ /**
+ * Indicate that _source should be returned, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param include An optional include (optionally wildcarded) pattern to filter the returned _source
+ * @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public ExplainRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
+ return setFetchSource(
+ include == null? Strings.EMPTY_ARRAY : new String[] {include},
+ exclude == null? Strings.EMPTY_ARRAY : new String[] {exclude});
+ }
+
+ /**
+ * Indicate that _source should be returned, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
+ * @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public ExplainRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
+ FetchSourceContext context = request.fetchSourceContext();
+ if (context == null) {
+ request.fetchSourceContext(new FetchSourceContext(includes, excludes));
+ }
+ else {
+ context.fetchSource(true);
+ context.includes(includes);
+ context.excludes(excludes);
+ }
+ return this;
+ }
+
+ /**
+ * Sets the full source of the explain request (for example, wrapping an actual query).
+ */
+ public ExplainRequestBuilder setSource(BytesReference source, boolean unsafe) {
+ request().source(source, unsafe);
+ return this;
+ }
+
+ /**
+ * Sets whether the actual explain action should occur in a different thread if executed locally.
+ */
+ public ExplainRequestBuilder operationThreaded(boolean threadedOperation) {
+ request().operationThreaded(threadedOperation);
+ return this;
+ }
+
+ protected void doExecute(ActionListener<ExplainResponse> listener) {
+ if (sourceBuilder != null) {
+ request.source(sourceBuilder);
+ }
+
+ ((Client) client).explain(request, listener);
+ }
+
+ private QuerySourceBuilder sourceBuilder() {
+ if (sourceBuilder == null) {
+ sourceBuilder = new QuerySourceBuilder();
+ }
+ return sourceBuilder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java b/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java
new file mode 100644
index 0000000..51a9360
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.explain;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.get.GetResult;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.lucene.Lucene.readExplanation;
+import static org.elasticsearch.common.lucene.Lucene.writeExplanation;
+
+/**
+ * Response containing the score explanation.
+ */
+public class ExplainResponse extends ActionResponse {
+
+ private boolean exists;
+ private Explanation explanation;
+ private GetResult getResult;
+
+ ExplainResponse() {
+ }
+
+ public ExplainResponse(boolean exists) {
+ this.exists = exists;
+ }
+
+ public ExplainResponse(boolean exists, Explanation explanation) {
+ this.exists = exists;
+ this.explanation = explanation;
+ }
+
+ public ExplainResponse(boolean exists, Explanation explanation, GetResult getResult) {
+ this.exists = exists;
+ this.explanation = explanation;
+ this.getResult = getResult;
+ }
+
+ public Explanation getExplanation() {
+ return explanation;
+ }
+
+ public boolean isMatch() {
+ return explanation != null && explanation.isMatch();
+ }
+
+ public boolean hasExplanation() {
+ return explanation != null;
+ }
+
+ public boolean isExists() {
+ return exists;
+ }
+
+ public GetResult getGetResult() {
+ return getResult;
+ }
+
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ exists = in.readBoolean();
+ if (in.readBoolean()) {
+ explanation = readExplanation(in);
+ }
+ if (in.readBoolean()) {
+ getResult = GetResult.readGetResult(in);
+ }
+ }
+
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(exists);
+ if (explanation == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ writeExplanation(out, explanation);
+ }
+ if (getResult == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ getResult.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java
new file mode 100644
index 0000000..59ddff5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.explain;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.get.GetResult;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.internal.DefaultSearchContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.rescore.Rescorer;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+
+/**
+ * Explain transport action. Computes the explain on the targeted shard.
+ */
+// TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain.
+public class TransportExplainAction extends TransportShardSingleOperationAction<ExplainRequest, ExplainResponse> {
+
+ private final IndicesService indicesService;
+
+ private final ScriptService scriptService;
+
+ private final CacheRecycler cacheRecycler;
+
+ private final PageCacheRecycler pageCacheRecycler;
+
+ @Inject
+ public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ TransportService transportService, IndicesService indicesService,
+ ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ this.scriptService = scriptService;
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ }
+
+ @Override
+ protected void doExecute(ExplainRequest request, ActionListener<ExplainResponse> listener) {
+ request.nowInMillis = System.currentTimeMillis();
+ super.doExecute(request, listener);
+ }
+
+ protected String transportAction() {
+ return ExplainAction.NAME;
+ }
+
+ protected String executor() {
+ return ThreadPool.Names.GET; // Or use Names.SEARCH?
+ }
+
+ @Override
+ protected void resolveRequest(ClusterState state, ExplainRequest request) {
+ String concreteIndex = state.metaData().concreteIndex(request.index());
+ request.filteringAlias(state.metaData().filteringAliases(concreteIndex, request.index()));
+ request.index(state.metaData().concreteIndex(request.index()));
+
+ // Fail fast on the node that received the request.
+ if (request.routing() == null && state.getMetaData().routingRequired(request.index(), request.type())) {
+ throw new RoutingMissingException(request.index(), request.type(), request.id());
+ }
+ }
+
+ protected ExplainResponse shardOperation(ExplainRequest request, int shardId) throws ElasticsearchException {
+ IndexService indexService = indicesService.indexService(request.index());
+ IndexShard indexShard = indexService.shardSafe(shardId);
+ Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
+ Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
+ if (!result.exists()) {
+ return new ExplainResponse(false);
+ }
+
+ SearchContext context = new DefaultSearchContext(
+ 0,
+ new ShardSearchRequest().types(new String[]{request.type()})
+ .filteringAliases(request.filteringAlias())
+ .nowInMillis(request.nowInMillis),
+ null, result.searcher(), indexService, indexShard,
+ scriptService, cacheRecycler, pageCacheRecycler
+ );
+ SearchContext.setCurrent(context);
+
+ try {
+ context.parsedQuery(indexService.queryParserService().parseQuery(request.source()));
+ context.preProcess();
+ int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
+ Explanation explanation;
+ if (context.rescore() != null) {
+ RescoreSearchContext ctx = context.rescore();
+ Rescorer rescorer = ctx.rescorer();
+ explanation = rescorer.explain(topLevelDocId, context, ctx);
+ } else {
+ explanation = context.searcher().explain(context.query(), topLevelDocId);
+ }
+ if (request.fields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
+ // Advantage is that we're not opening a second searcher to retrieve the _source. Also
+ // because we are working in the same searcher in engineGetResult we can be sure that a
+ // doc isn't deleted between the initial get and this call.
+ GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext());
+ return new ExplainResponse(true, explanation, getResult);
+ } else {
+ return new ExplainResponse(true, explanation);
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchException("Could not explain", e);
+ } finally {
+ context.release();
+ SearchContext.removeCurrent();
+ }
+ }
+
+ protected ExplainRequest newRequest() {
+ return new ExplainRequest();
+ }
+
+ protected ExplainResponse newResponse() {
+ return new ExplainResponse();
+ }
+
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, ExplainRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ protected ClusterBlockException checkRequestBlock(ClusterState state, ExplainRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.READ, request.index());
+ }
+
+ protected ShardIterator shards(ClusterState state, ExplainRequest request) throws ElasticsearchException {
+ return clusterService.operationRouting().getShards(
+ clusterService.state(), request.index(), request.type(), request.id(), request.routing(), request.preference()
+ );
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/explain/package-info.java b/src/main/java/org/elasticsearch/action/explain/package-info.java
new file mode 100644
index 0000000..a38b269
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/explain/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Explain action.
+ */
+package org.elasticsearch.action.explain;
diff --git a/src/main/java/org/elasticsearch/action/get/GetAction.java b/src/main/java/org/elasticsearch/action/get/GetAction.java
new file mode 100644
index 0000000..2234905
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/GetAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class GetAction extends Action<GetRequest, GetResponse, GetRequestBuilder> {
+
+ public static final GetAction INSTANCE = new GetAction();
+ public static final String NAME = "get";
+
+ private GetAction() {
+ super(NAME);
+ }
+
+ @Override
+ public GetResponse newResponse() {
+ return new GetResponse();
+ }
+
+ @Override
+ public GetRequestBuilder newRequestBuilder(Client client) {
+ return new GetRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/get/GetRequest.java b/src/main/java/org/elasticsearch/action/get/GetRequest.java
new file mode 100644
index 0000000..f8b06a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/GetRequest.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ValidateActions;
+import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+import java.io.IOException;
+
+/**
+ * A request to get a document (its source) from an index based on its type (optional) and id. Best created using
+ * {@link org.elasticsearch.client.Requests#getRequest(String)}.
+ * <p/>
+ * <p>The operation requires the {@link #index()}, {@link #type(String)} and {@link #id(String)}
+ * to be set.
+ *
+ * @see org.elasticsearch.action.get.GetResponse
+ * @see org.elasticsearch.client.Requests#getRequest(String)
+ * @see org.elasticsearch.client.Client#get(GetRequest)
+ */
+public class GetRequest extends SingleShardOperationRequest<GetRequest> {
+
+ protected String type;
+ protected String id;
+ protected String routing;
+ protected String preference;
+
+ private String[] fields;
+
+ private FetchSourceContext fetchSourceContext;
+
+ private boolean refresh = false;
+
+ Boolean realtime;
+
+ private VersionType versionType = VersionType.INTERNAL;
+ private long version = Versions.MATCH_ANY;
+
+ GetRequest() {
+ type = "_all";
+ }
+
+ /**
+ * Constructs a new get request against the specified index. The {@link #type(String)} and {@link #id(String)}
+ * must be set.
+ */
+ public GetRequest(String index) {
+ super(index);
+ this.type = "_all";
+ }
+
+ /**
+ * Constructs a new get request against the specified index with the type and id.
+ *
+ * @param index The index to get the document from
+ * @param type The type of the document
+ * @param id The id of the document
+ */
+ public GetRequest(String index, String type, String id) {
+ super(index);
+ this.type = type;
+ this.id = id;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (type == null) {
+ validationException = ValidateActions.addValidationError("type is missing", validationException);
+ }
+ if (id == null) {
+ validationException = ValidateActions.addValidationError("id is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets the type of the document to fetch.
+ */
+ public GetRequest type(@Nullable String type) {
+ if (type == null) {
+ type = "_all";
+ }
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * Sets the id of the document to fetch.
+ */
+ public GetRequest id(String id) {
+ this.id = id;
+ return this;
+ }
+
+ /**
+ * Sets the parent id of this document. Will simply set the routing to this value, as it is only
+ * used for routing with delete requests.
+ */
+ public GetRequest parent(String parent) {
+ if (routing == null) {
+ routing = parent;
+ }
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the request. Using this value to hash the shard
+ * and not the id.
+ */
+ public GetRequest routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public GetRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public String type() {
+ return type;
+ }
+
+ public String id() {
+ return id;
+ }
+
+ public String routing() {
+ return this.routing;
+ }
+
+ public String preference() {
+ return this.preference;
+ }
+
+ /**
+ * Allows setting the {@link FetchSourceContext} for this request, controlling if and how _source should be returned.
+ */
+ public GetRequest fetchSourceContext(FetchSourceContext context) {
+ this.fetchSourceContext = context;
+ return this;
+ }
+
+ public FetchSourceContext fetchSourceContext() {
+ return fetchSourceContext;
+ }
+
+ /**
+ * Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
+ * field will be returned.
+ */
+ public GetRequest fields(String... fields) {
+ this.fields = fields;
+ return this;
+ }
+
+ /**
+ * Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
+ * field will be returned.
+ */
+ public String[] fields() {
+ return this.fields;
+ }
+
+ /**
+ * Should a refresh be executed before this get operation causing the operation to
+ * return the latest value. Note, heavy get should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public GetRequest refresh(boolean refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public boolean refresh() {
+ return this.refresh;
+ }
+
+ public boolean realtime() {
+ return this.realtime == null ? true : this.realtime;
+ }
+
+ public GetRequest realtime(Boolean realtime) {
+ this.realtime = realtime;
+ return this;
+ }
+
+ /**
+ * Sets the version, which will cause the get operation to only be performed if a matching
+ * version exists and no changes happened on the doc since then.
+ */
+ public long version() {
+ return version;
+ }
+
+ public GetRequest version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ /**
+ * Sets the versioning type. Defaults to {@link org.elasticsearch.index.VersionType#INTERNAL}.
+ */
+ public GetRequest versionType(VersionType versionType) {
+ this.versionType = versionType;
+ return this;
+ }
+
+ public VersionType versionType() {
+ return this.versionType;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ type = in.readSharedString();
+ id = in.readString();
+ routing = in.readOptionalString();
+ preference = in.readOptionalString();
+ refresh = in.readBoolean();
+ int size = in.readInt();
+ if (size >= 0) {
+ fields = new String[size];
+ for (int i = 0; i < size; i++) {
+ fields[i] = in.readString();
+ }
+ }
+ byte realtime = in.readByte();
+ if (realtime == 0) {
+ this.realtime = false;
+ } else if (realtime == 1) {
+ this.realtime = true;
+ }
+
+ this.versionType = VersionType.fromValue(in.readByte());
+ this.version = in.readVLong();
+
+ fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeSharedString(type);
+ out.writeString(id);
+ out.writeOptionalString(routing);
+ out.writeOptionalString(preference);
+
+ out.writeBoolean(refresh);
+ if (fields == null) {
+ out.writeInt(-1);
+ } else {
+ out.writeInt(fields.length);
+ for (String field : fields) {
+ out.writeString(field);
+ }
+ }
+ if (realtime == null) {
+ out.writeByte((byte) -1);
+ } else if (realtime == false) {
+ out.writeByte((byte) 0);
+ } else {
+ out.writeByte((byte) 1);
+ }
+
+ out.writeByte(versionType.getValue());
+ out.writeVLong(version);
+
+ FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
+ }
+
+ @Override
+ public String toString() {
+ return "[" + index + "][" + type + "][" + id + "]: routing [" + routing + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java b/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java
new file mode 100644
index 0000000..850df1d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+/**
+ * A get document action request builder.
+ */
+public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetRequest, GetResponse, GetRequestBuilder> {
+
+ public GetRequestBuilder(Client client) {
+ super((InternalClient) client, new GetRequest());
+ }
+
+ public GetRequestBuilder(Client client, @Nullable String index) {
+ super((InternalClient) client, new GetRequest(index));
+ }
+
+ /**
+ * Sets the type of the document to fetch. If set to <tt>null</tt>, will use just the id to fetch the
+ * first document matching it.
+ */
+ public GetRequestBuilder setType(@Nullable String type) {
+ request.type(type);
+ return this;
+ }
+
+ /**
+ * Sets the id of the document to fetch.
+ */
+ public GetRequestBuilder setId(String id) {
+ request.id(id);
+ return this;
+ }
+
+ /**
+ * Sets the parent id of this document. Will simply set the routing to this value, as it is only
+ * used for routing with delete requests.
+ */
+ public GetRequestBuilder setParent(String parent) {
+ request.parent(parent);
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the request. Using this value to hash the shard
+ * and not the id.
+ */
+ public GetRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public GetRequestBuilder setPreference(String preference) {
+ request.preference(preference);
+ return this;
+ }
+
+ /**
+ * Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
+ * field will be returned.
+ */
+ public GetRequestBuilder setFields(String... fields) {
+ request.fields(fields);
+ return this;
+ }
+
+ /**
+ * Indicates whether the response should contain the stored _source
+ *
+ * @param fetch
+ * @return
+ */
+ public GetRequestBuilder setFetchSource(boolean fetch) {
+ FetchSourceContext context = request.fetchSourceContext();
+ if (context == null) {
+ request.fetchSourceContext(new FetchSourceContext(fetch));
+ }
+ else {
+ context.fetchSource(fetch);
+ }
+ return this;
+ }
+
+ /**
+ * Indicate that _source should be returned, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param include An optional include (optionally wildcarded) pattern to filter the returned _source
+ * @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public GetRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
+ return setFetchSource(
+ include == null? Strings.EMPTY_ARRAY : new String[] {include},
+ exclude == null? Strings.EMPTY_ARRAY : new String[] {exclude});
+ }
+
+ /**
+ * Indicate that _source should be returned, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
+ * @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public GetRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
+ FetchSourceContext context = request.fetchSourceContext();
+ if (context == null) {
+ request.fetchSourceContext(new FetchSourceContext(includes, excludes));
+ }
+ else {
+ context.fetchSource(true);
+ context.includes(includes);
+ context.excludes(excludes);
+ }
+ return this;
+ }
+
+ /**
+ * Should a refresh be executed before this get operation causing the operation to
+ * return the latest value. Note, heavy get should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public GetRequestBuilder setRefresh(boolean refresh) {
+ request.refresh(refresh);
+ return this;
+ }
+
+ public GetRequestBuilder setRealtime(Boolean realtime) {
+ request.realtime(realtime);
+ return this;
+ }
+
+ /**
+ * Sets the version, which will cause the get operation to only be performed if a matching
+ * version exists and no changes happened on the doc since then.
+ */
+ public GetRequestBuilder setVersion(long version) {
+ request.version(version);
+ return this;
+ }
+
+ /**
+ * Sets the versioning type. Defaults to {@link org.elasticsearch.index.VersionType#INTERNAL}.
+ */
+ public GetRequestBuilder setVersionType(VersionType versionType) {
+ request.versionType(versionType);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<GetResponse> listener) {
+ ((Client) client).get(request, listener);
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/action/get/GetResponse.java b/src/main/java/org/elasticsearch/action/get/GetResponse.java
new file mode 100644
index 0000000..31de65d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/GetResponse.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.get.GetField;
+import org.elasticsearch.index.get.GetResult;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * The response of a get action.
+ *
+ * @see GetRequest
+ * @see org.elasticsearch.client.Client#get(GetRequest)
+ */
+public class GetResponse extends ActionResponse implements Iterable<GetField>, ToXContent {
+
+ private GetResult getResult;
+
+ GetResponse() {
+ }
+
+ GetResponse(GetResult getResult) {
+ this.getResult = getResult;
+ }
+
+ /**
+ * Does the document exists.
+ */
+ public boolean isExists() {
+ return getResult.isExists();
+ }
+
+ /**
+ * The index the document was fetched from.
+ */
+ public String getIndex() {
+ return getResult.getIndex();
+ }
+
+ /**
+ * The type of the document.
+ */
+ public String getType() {
+ return getResult.getType();
+ }
+
+ /**
+ * The id of the document.
+ */
+ public String getId() {
+ return getResult.getId();
+ }
+
+ /**
+ * The version of the doc.
+ */
+ public long getVersion() {
+ return getResult.getVersion();
+ }
+
+ /**
+ * The source of the document if exists.
+ */
+ public byte[] getSourceAsBytes() {
+ return getResult.source();
+ }
+
+ /**
+ * Returns the internal source bytes, as they are returned without munging (for example,
+ * might still be compressed).
+ */
+ public BytesReference getSourceInternal() {
+ return getResult.internalSourceRef();
+ }
+
+ /**
+ * Returns bytes reference, also un compress the source if needed.
+ */
+ public BytesReference getSourceAsBytesRef() {
+ return getResult.sourceRef();
+ }
+
+ /**
+ * Is the source empty (not available) or not.
+ */
+ public boolean isSourceEmpty() {
+ return getResult.isSourceEmpty();
+ }
+
+ /**
+ * The source of the document (as a string).
+ */
+ public String getSourceAsString() {
+ return getResult.sourceAsString();
+ }
+
+ /**
+ * The source of the document (As a map).
+ */
+ @SuppressWarnings({"unchecked"})
+ public Map<String, Object> getSourceAsMap() throws ElasticsearchParseException {
+ return getResult.sourceAsMap();
+ }
+
+ public Map<String, Object> getSource() {
+ return getResult.getSource();
+ }
+
+ public Map<String, GetField> getFields() {
+ return getResult.getFields();
+ }
+
+ public GetField getField(String name) {
+ return getResult.field(name);
+ }
+
+ @Override
+ public Iterator<GetField> iterator() {
+ return getResult.iterator();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return getResult.toXContent(builder, params);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ getResult = GetResult.readGetResult(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ getResult.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetAction.java b/src/main/java/org/elasticsearch/action/get/MultiGetAction.java
new file mode 100644
index 0000000..3134bfa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/MultiGetAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class MultiGetAction extends Action<MultiGetRequest, MultiGetResponse, MultiGetRequestBuilder> {
+
+ public static final MultiGetAction INSTANCE = new MultiGetAction();
+ public static final String NAME = "mget";
+
+ private MultiGetAction() {
+ super(NAME);
+ }
+
+ @Override
+ public MultiGetResponse newResponse() {
+ return new MultiGetResponse();
+ }
+
+ @Override
+ public MultiGetRequestBuilder newRequestBuilder(Client client) {
+ return new MultiGetRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java b/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java
new file mode 100644
index 0000000..bdaf8cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+/**
+ * A single multi get response.
+ */
+public class MultiGetItemResponse implements Streamable {
+
+ private GetResponse response;
+ private MultiGetResponse.Failure failure;
+
+ MultiGetItemResponse() {
+
+ }
+
+ public MultiGetItemResponse(GetResponse response, MultiGetResponse.Failure failure) {
+ this.response = response;
+ this.failure = failure;
+ }
+
+ /**
+ * The index name of the document.
+ */
+ public String getIndex() {
+ if (failure != null) {
+ return failure.getIndex();
+ }
+ return response.getIndex();
+ }
+
+ /**
+ * The type of the document.
+ */
+ public String getType() {
+ if (failure != null) {
+ return failure.getType();
+ }
+ return response.getType();
+ }
+
+ /**
+ * The id of the document.
+ */
+ public String getId() {
+ if (failure != null) {
+ return failure.getId();
+ }
+ return response.getId();
+ }
+
+ /**
+ * Is this a failed execution?
+ */
+ public boolean isFailed() {
+ return failure != null;
+ }
+
+ /**
+ * The actual get response, <tt>null</tt> if its a failure.
+ */
+ public GetResponse getResponse() {
+ return this.response;
+ }
+
+ /**
+ * The failure if relevant.
+ */
+ public MultiGetResponse.Failure getFailure() {
+ return this.failure;
+ }
+
+ public static MultiGetItemResponse readItemResponse(StreamInput in) throws IOException {
+ MultiGetItemResponse response = new MultiGetItemResponse();
+ response.readFrom(in);
+ return response;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ failure = MultiGetResponse.Failure.readFailure(in);
+ } else {
+ response = new GetResponse();
+ response.readFrom(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (failure != null) {
+ out.writeBoolean(true);
+ failure.writeTo(out);
+ } else {
+ out.writeBoolean(false);
+ response.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java
new file mode 100644
index 0000000..18a5b07
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java
@@ -0,0 +1,444 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ValidateActions;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class MultiGetRequest extends ActionRequest<MultiGetRequest> {
+
+ /**
+ * A single get item.
+ */
+ public static class Item implements Streamable {
+ private String index;
+ private String type;
+ private String id;
+ private String routing;
+ private String[] fields;
+ private long version = Versions.MATCH_ANY;
+ private VersionType versionType = VersionType.INTERNAL;
+ private FetchSourceContext fetchSourceContext;
+
+ Item() {
+
+ }
+
+ /**
+ * Constructs a single get item.
+ *
+ * @param index The index name
+ * @param type The type (can be null)
+ * @param id The id
+ */
+ public Item(String index, @Nullable String type, String id) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ }
+
+ public String index() {
+ return this.index;
+ }
+
+ public Item index(String index) {
+ this.index = index;
+ return this;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String id() {
+ return this.id;
+ }
+
+ /**
+ * The routing associated with this document.
+ */
+ public Item routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ public String routing() {
+ return this.routing;
+ }
+
+ public Item parent(String parent) {
+ if (routing == null) {
+ this.routing = parent;
+ }
+ return this;
+ }
+
+ public Item fields(String... fields) {
+ this.fields = fields;
+ return this;
+ }
+
+ public String[] fields() {
+ return this.fields;
+ }
+
+ public long version() {
+ return version;
+ }
+
+ public Item version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ public VersionType versionType() {
+ return versionType;
+ }
+
+ public Item versionType(VersionType versionType) {
+ this.versionType = versionType;
+ return this;
+ }
+
+ public FetchSourceContext fetchSourceContext() {
+ return this.fetchSourceContext;
+ }
+
+ /**
+ * Allows setting the {@link FetchSourceContext} for this request, controlling if and how _source should be returned.
+ */
+ public Item fetchSourceContext(FetchSourceContext fetchSourceContext) {
+ this.fetchSourceContext = fetchSourceContext;
+ return this;
+ }
+
+ public static Item readItem(StreamInput in) throws IOException {
+ Item item = new Item();
+ item.readFrom(in);
+ return item;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ index = in.readSharedString();
+ type = in.readOptionalSharedString();
+ id = in.readString();
+ routing = in.readOptionalString();
+ int size = in.readVInt();
+ if (size > 0) {
+ fields = new String[size];
+ for (int i = 0; i < size; i++) {
+ fields[i] = in.readString();
+ }
+ }
+ version = in.readVLong();
+ versionType = VersionType.fromValue(in.readByte());
+
+ fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeSharedString(index);
+ out.writeOptionalSharedString(type);
+ out.writeString(id);
+ out.writeOptionalString(routing);
+ if (fields == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(fields.length);
+ for (String field : fields) {
+ out.writeString(field);
+ }
+ }
+
+ out.writeVLong(version);
+ out.writeByte(versionType.getValue());
+
+ FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
+ }
+ }
+
+ private boolean listenerThreaded = false;
+
+ String preference;
+ Boolean realtime;
+ boolean refresh;
+
+ List<Item> items = new ArrayList<Item>();
+
+ public MultiGetRequest add(Item item) {
+ items.add(item);
+ return this;
+ }
+
+ public MultiGetRequest add(String index, @Nullable String type, String id) {
+ items.add(new Item(index, type, id));
+ return this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (items.isEmpty()) {
+ validationException = ValidateActions.addValidationError("no documents to get", validationException);
+ } else {
+ for (int i = 0; i < items.size(); i++) {
+ Item item = items.get(i);
+ if (item.index() == null) {
+ validationException = ValidateActions.addValidationError("index is missing for doc " + i, validationException);
+ }
+ if (item.id() == null) {
+ validationException = ValidateActions.addValidationError("id is missing for doc " + i, validationException);
+ }
+ }
+ }
+ return validationException;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public MultiGetRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public String preference() {
+ return this.preference;
+ }
+
+ public boolean realtime() {
+ return this.realtime == null ? true : this.realtime;
+ }
+
+ public MultiGetRequest realtime(Boolean realtime) {
+ this.realtime = realtime;
+ return this;
+ }
+
+ public boolean refresh() {
+ return this.refresh;
+ }
+
+ public MultiGetRequest refresh(boolean refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception {
+ return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true);
+ }
+
+ public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, BytesReference data) throws Exception {
+ return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, data, true);
+ }
+
+ public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, BytesReference data, boolean allowExplicitIndex) throws Exception {
+ return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, null, data, allowExplicitIndex);
+ }
+
+ public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, BytesReference data, boolean allowExplicitIndex) throws Exception {
+ XContentParser parser = XContentFactory.xContent(data).createParser(data);
+ try {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("docs".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchIllegalArgumentException("docs array element should include an object");
+ }
+ String index = defaultIndex;
+ String type = defaultType;
+ String id = null;
+ String routing = defaultRouting;
+ String parent = null;
+ List<String> fields = null;
+ long version = Versions.MATCH_ANY;
+ VersionType versionType = VersionType.INTERNAL;
+
+ FetchSourceContext fetchSourceContext = null;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("_index".equals(currentFieldName)) {
+ if (!allowExplicitIndex) {
+ throw new ElasticsearchIllegalArgumentException("explicit index in multi get is not allowed");
+ }
+ index = parser.text();
+ } else if ("_type".equals(currentFieldName)) {
+ type = parser.text();
+ } else if ("_id".equals(currentFieldName)) {
+ id = parser.text();
+ } else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
+ routing = parser.text();
+ } else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
+ parent = parser.text();
+ } else if ("fields".equals(currentFieldName)) {
+ fields = new ArrayList<String>();
+ fields.add(parser.text());
+ } else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
+ version = parser.longValue();
+ } else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
+ versionType = VersionType.fromString(parser.text());
+ } else if ("_source".equals(currentFieldName)) {
+ if (parser.isBooleanValue()) {
+ fetchSourceContext = new FetchSourceContext(parser.booleanValue());
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ fetchSourceContext = new FetchSourceContext(new String[]{parser.text()});
+ } else {
+ throw new ElasticsearchParseException("illegal type for _source: [" + token + "]");
+ }
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("fields".equals(currentFieldName)) {
+ fields = new ArrayList<String>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ fields.add(parser.text());
+ }
+ } else if ("_source".equals(currentFieldName)) {
+ ArrayList<String> includes = new ArrayList<String>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ includes.add(parser.text());
+ }
+ fetchSourceContext = new FetchSourceContext(includes.toArray(Strings.EMPTY_ARRAY));
+ }
+
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("_source".equals(currentFieldName)) {
+ List<String> currentList = null, includes = null, excludes = null;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ if ("includes".equals(currentFieldName) || "include".equals(currentFieldName)) {
+ currentList = includes != null ? includes : (includes = new ArrayList<String>(2));
+ } else if ("excludes".equals(currentFieldName) || "exclude".equals(currentFieldName)) {
+ currentList = excludes != null ? excludes : (excludes = new ArrayList<String>(2));
+ } else {
+ throw new ElasticsearchParseException("Source definition may not contain " + parser.text());
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ currentList.add(parser.text());
+ }
+ } else if (token.isValue()) {
+ currentList.add(parser.text());
+ } else {
+ throw new ElasticsearchParseException("unexpected token while parsing source settings");
+ }
+ }
+
+ fetchSourceContext = new FetchSourceContext(
+ includes == null ? Strings.EMPTY_ARRAY : includes.toArray(new String[includes.size()]),
+ excludes == null ? Strings.EMPTY_ARRAY : excludes.toArray(new String[excludes.size()]));
+ }
+ }
+ }
+ String[] aFields;
+ if (fields != null) {
+ aFields = fields.toArray(new String[fields.size()]);
+ } else {
+ aFields = defaultFields;
+ }
+ add(new Item(index, type, id).routing(routing).fields(aFields).parent(parent).version(version).versionType(versionType)
+ .fetchSourceContext(fetchSourceContext == null ? defaultFetchSource : fetchSourceContext));
+ }
+ } else if ("ids".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (!token.isValue()) {
+ throw new ElasticsearchIllegalArgumentException("ids array element should only contain ids");
+ }
+ add(new Item(defaultIndex, defaultType, parser.text()).fields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
+ }
+ }
+ }
+ }
+ } finally {
+ parser.close();
+ }
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ preference = in.readOptionalString();
+ refresh = in.readBoolean();
+ byte realtime = in.readByte();
+ if (realtime == 0) {
+ this.realtime = false;
+ } else if (realtime == 1) {
+ this.realtime = true;
+ }
+
+ int size = in.readVInt();
+ items = new ArrayList<Item>(size);
+ for (int i = 0; i < size; i++) {
+ items.add(Item.readItem(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalString(preference);
+ out.writeBoolean(refresh);
+ if (realtime == null) {
+ out.writeByte((byte) -1);
+ } else if (realtime == false) {
+ out.writeByte((byte) 0);
+ } else {
+ out.writeByte((byte) 1);
+ }
+
+ out.writeVInt(items.size());
+ for (Item item : items) {
+ item.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java b/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java
new file mode 100644
index 0000000..546d481
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+
+/**
+ * A multi get document action request builder.
+ */
+public class MultiGetRequestBuilder extends ActionRequestBuilder<MultiGetRequest, MultiGetResponse, MultiGetRequestBuilder> {
+
+ public MultiGetRequestBuilder(Client client) {
+ super((InternalClient) client, new MultiGetRequest());
+ }
+
+ public MultiGetRequestBuilder add(String index, @Nullable String type, String id) {
+ request.add(index, type, id);
+ return this;
+ }
+
+ public MultiGetRequestBuilder add(String index, @Nullable String type, Iterable<String> ids) {
+ for (String id : ids) {
+ request.add(index, type, id);
+ }
+ return this;
+ }
+
+ public MultiGetRequestBuilder add(String index, @Nullable String type, String... ids) {
+ for (String id : ids) {
+ request.add(index, type, id);
+ }
+ return this;
+ }
+
+ public MultiGetRequestBuilder add(MultiGetRequest.Item item) {
+ request.add(item);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public MultiGetRequestBuilder setPreference(String preference) {
+ request.preference(preference);
+ return this;
+ }
+
+ /**
+ * Should a refresh be executed before this get operation causing the operation to
+ * return the latest value. Note, heavy get should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public MultiGetRequestBuilder setRefresh(boolean refresh) {
+ request.refresh(refresh);
+ return this;
+ }
+
+ public MultiGetRequestBuilder setRealtime(Boolean realtime) {
+ request.realtime(realtime);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<MultiGetResponse> listener) {
+ ((Client) client).multiGet(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java
new file mode 100644
index 0000000..0c367ef
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+public class MultiGetResponse extends ActionResponse implements Iterable<MultiGetItemResponse>, ToXContent {
+
+ /**
+ * Represents a failure.
+ */
+ public static class Failure implements Streamable {
+ private String index;
+ private String type;
+ private String id;
+ private String message;
+
+ Failure() {
+
+ }
+
+ public Failure(String index, String type, String id, String message) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ this.message = message;
+ }
+
+ /**
+ * The index name of the action.
+ */
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * The type of the action.
+ */
+ public String getType() {
+ return type;
+ }
+
+ /**
+ * The id of the action.
+ */
+ public String getId() {
+ return id;
+ }
+
+ /**
+ * The failure message.
+ */
+ public String getMessage() {
+ return this.message;
+ }
+
+ public static Failure readFailure(StreamInput in) throws IOException {
+ Failure failure = new Failure();
+ failure.readFrom(in);
+ return failure;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ index = in.readString();
+ type = in.readOptionalString();
+ id = in.readString();
+ message = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(index);
+ out.writeOptionalString(type);
+ out.writeString(id);
+ out.writeString(message);
+ }
+ }
+
+ private MultiGetItemResponse[] responses;
+
+ MultiGetResponse() {
+ }
+
+ public MultiGetResponse(MultiGetItemResponse[] responses) {
+ this.responses = responses;
+ }
+
+ public MultiGetItemResponse[] getResponses() {
+ return this.responses;
+ }
+
+ @Override
+ public Iterator<MultiGetItemResponse> iterator() {
+ return Iterators.forArray(responses);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.startArray(Fields.DOCS);
+ for (MultiGetItemResponse response : responses) {
+ if (response.isFailed()) {
+ builder.startObject();
+ Failure failure = response.getFailure();
+ builder.field(Fields._INDEX, failure.getIndex());
+ builder.field(Fields._TYPE, failure.getType());
+ builder.field(Fields._ID, failure.getId());
+ builder.field(Fields.ERROR, failure.getMessage());
+ builder.endObject();
+ } else {
+ GetResponse getResponse = response.getResponse();
+ getResponse.toXContent(builder, params);
+ }
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString DOCS = new XContentBuilderString("docs");
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString ERROR = new XContentBuilderString("error");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ responses = new MultiGetItemResponse[in.readVInt()];
+ for (int i = 0; i < responses.length; i++) {
+ responses[i] = MultiGetItemResponse.readItemResponse(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(responses.length);
+ for (MultiGetItemResponse response : responses) {
+ response.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java b/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java
new file mode 100644
index 0000000..51e229d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import com.carrotsearch.hppc.IntArrayList;
+import com.carrotsearch.hppc.LongArrayList;
+import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetShardRequest> {
+
+ private int shardId;
+ private String preference;
+ Boolean realtime;
+ boolean refresh;
+
+ IntArrayList locations;
+ List<String> types;
+ List<String> ids;
+ List<String[]> fields;
+ LongArrayList versions;
+ List<VersionType> versionTypes;
+ List<FetchSourceContext> fetchSourceContexts;
+
+ MultiGetShardRequest() {
+
+ }
+
+ MultiGetShardRequest(String index, int shardId) {
+ super(index);
+ this.shardId = shardId;
+ locations = new IntArrayList();
+ types = new ArrayList<String>();
+ ids = new ArrayList<String>();
+ fields = new ArrayList<String[]>();
+ versions = new LongArrayList();
+ versionTypes = new ArrayList<VersionType>();
+ fetchSourceContexts = new ArrayList<FetchSourceContext>();
+ }
+
+ public int shardId() {
+ return this.shardId;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public MultiGetShardRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public String preference() {
+ return this.preference;
+ }
+
+ public boolean realtime() {
+ return this.realtime == null ? true : this.realtime;
+ }
+
+ public MultiGetShardRequest realtime(Boolean realtime) {
+ this.realtime = realtime;
+ return this;
+ }
+
+ public boolean refresh() {
+ return this.refresh;
+ }
+
+ public MultiGetShardRequest refresh(boolean refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public void add(int location, @Nullable String type, String id, String[] fields, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
+ this.locations.add(location);
+ this.types.add(type);
+ this.ids.add(id);
+ this.fields.add(fields);
+ this.versions.add(version);
+ this.versionTypes.add(versionType);
+ this.fetchSourceContexts.add(fetchSourceContext);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ locations = new IntArrayList(size);
+ types = new ArrayList<String>(size);
+ ids = new ArrayList<String>(size);
+ fields = new ArrayList<String[]>(size);
+ versions = new LongArrayList(size);
+ versionTypes = new ArrayList<VersionType>(size);
+ fetchSourceContexts = new ArrayList<FetchSourceContext>(size);
+ for (int i = 0; i < size; i++) {
+ locations.add(in.readVInt());
+ if (in.readBoolean()) {
+ types.add(in.readSharedString());
+ } else {
+ types.add(null);
+ }
+ ids.add(in.readString());
+ int size1 = in.readVInt();
+ if (size1 > 0) {
+ String[] fields = new String[size1];
+ for (int j = 0; j < size1; j++) {
+ fields[j] = in.readString();
+ }
+ this.fields.add(fields);
+ } else {
+ fields.add(null);
+ }
+ versions.add(in.readVLong());
+ versionTypes.add(VersionType.fromValue(in.readByte()));
+
+ fetchSourceContexts.add(FetchSourceContext.optionalReadFromStream(in));
+ }
+
+ preference = in.readOptionalString();
+ refresh = in.readBoolean();
+ byte realtime = in.readByte();
+ if (realtime == 0) {
+ this.realtime = false;
+ } else if (realtime == 1) {
+ this.realtime = true;
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(types.size());
+ for (int i = 0; i < types.size(); i++) {
+ out.writeVInt(locations.get(i));
+ if (types.get(i) == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeSharedString(types.get(i));
+ }
+ out.writeString(ids.get(i));
+ if (fields.get(i) == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(fields.get(i).length);
+ for (String field : fields.get(i)) {
+ out.writeString(field);
+ }
+ }
+ out.writeVLong(versions.get(i));
+ out.writeByte(versionTypes.get(i).getValue());
+ FetchSourceContext fetchSourceContext = fetchSourceContexts.get(i);
+ FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
+ }
+
+ out.writeOptionalString(preference);
+ out.writeBoolean(refresh);
+ if (realtime == null) {
+ out.writeByte((byte) -1);
+ } else if (realtime == false) {
+ out.writeByte((byte) 0);
+ } else {
+ out.writeByte((byte) 1);
+ }
+
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetShardResponse.java b/src/main/java/org/elasticsearch/action/get/MultiGetShardResponse.java
new file mode 100644
index 0000000..c0765ea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/MultiGetShardResponse.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import com.carrotsearch.hppc.IntArrayList;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class MultiGetShardResponse extends ActionResponse {
+
+ IntArrayList locations;
+ List<GetResponse> responses;
+ List<MultiGetResponse.Failure> failures;
+
+ MultiGetShardResponse() {
+ locations = new IntArrayList();
+ responses = new ArrayList<GetResponse>();
+ failures = new ArrayList<MultiGetResponse.Failure>();
+ }
+
+ public void add(int location, GetResponse response) {
+ locations.add(location);
+ responses.add(response);
+ failures.add(null);
+ }
+
+ public void add(int location, MultiGetResponse.Failure failure) {
+ locations.add(location);
+ responses.add(null);
+ failures.add(failure);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ locations = new IntArrayList(size);
+ responses = new ArrayList<GetResponse>(size);
+ failures = new ArrayList<MultiGetResponse.Failure>(size);
+ for (int i = 0; i < size; i++) {
+ locations.add(in.readVInt());
+ if (in.readBoolean()) {
+ GetResponse response = new GetResponse();
+ response.readFrom(in);
+ responses.add(response);
+ } else {
+ responses.add(null);
+ }
+ if (in.readBoolean()) {
+ failures.add(MultiGetResponse.Failure.readFailure(in));
+ } else {
+ failures.add(null);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(locations.size());
+ for (int i = 0; i < locations.size(); i++) {
+ out.writeVInt(locations.get(i));
+ if (responses.get(i) == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ responses.get(i).writeTo(out);
+ }
+ if (failures.get(i) == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ failures.get(i).writeTo(out);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/src/main/java/org/elasticsearch/action/get/TransportGetAction.java
new file mode 100644
index 0000000..cfa9edc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/TransportGetAction.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.get.GetResult;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Performs the get operation.
+ */
+public class TransportGetAction extends TransportShardSingleOperationAction<GetRequest, GetResponse> {
+
+ public static boolean REFRESH_FORCE = false;
+
+ private final IndicesService indicesService;
+ private final boolean realtime;
+
+ @Inject
+ public TransportGetAction(Settings settings, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService, ThreadPool threadPool) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+
+ this.realtime = settings.getAsBoolean("action.get.realtime", true);
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GET;
+ }
+
+ @Override
+ protected String transportAction() {
+ return GetAction.NAME;
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, GetRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, GetRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.READ, request.index());
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState state, GetRequest request) {
+ return clusterService.operationRouting()
+ .getShards(clusterService.state(), request.index(), request.type(), request.id(), request.routing(), request.preference());
+ }
+
+ @Override
+ protected void resolveRequest(ClusterState state, GetRequest request) {
+ if (request.realtime == null) {
+ request.realtime = this.realtime;
+ }
+ // update the routing (request#index here is possibly an alias)
+ request.routing(state.metaData().resolveIndexRouting(request.routing(), request.index()));
+ request.index(state.metaData().concreteIndex(request.index()));
+
+ // Fail fast on the node that received the request.
+ if (request.routing() == null && state.getMetaData().routingRequired(request.index(), request.type())) {
+ throw new RoutingMissingException(request.index(), request.type(), request.id());
+ }
+ }
+
+ @Override
+ protected GetResponse shardOperation(GetRequest request, int shardId) throws ElasticsearchException {
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = indexService.shardSafe(shardId);
+
+ if (request.refresh() && !request.realtime()) {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_get").force(REFRESH_FORCE));
+ }
+
+ GetResult result = indexShard.getService().get(request.type(), request.id(), request.fields(),
+ request.realtime(), request.version(), request.versionType(), request.fetchSourceContext());
+ return new GetResponse(result);
+ }
+
+ @Override
+ protected GetRequest newRequest() {
+ return new GetRequest();
+ }
+
+ @Override
+ protected GetResponse newResponse() {
+ return new GetResponse();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java b/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java
new file mode 100644
index 0000000..339f3ca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class TransportMultiGetAction extends TransportAction<MultiGetRequest, MultiGetResponse> {
+
+ private final ClusterService clusterService;
+
+ private final TransportShardMultiGetAction shardAction;
+
+ @Inject
+ public TransportMultiGetAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, TransportShardMultiGetAction shardAction) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.shardAction = shardAction;
+
+ transportService.registerHandler(MultiGetAction.NAME, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(final MultiGetRequest request, final ActionListener<MultiGetResponse> listener) {
+ ClusterState clusterState = clusterService.state();
+
+ clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
+
+ final AtomicArray<MultiGetItemResponse> responses = new AtomicArray<MultiGetItemResponse>(request.items.size());
+
+ Map<ShardId, MultiGetShardRequest> shardRequests = new HashMap<ShardId, MultiGetShardRequest>();
+ for (int i = 0; i < request.items.size(); i++) {
+ MultiGetRequest.Item item = request.items.get(i);
+ if (!clusterState.metaData().hasConcreteIndex(item.index())) {
+ responses.set(i, new MultiGetItemResponse(null, new MultiGetResponse.Failure(item.index(), item.type(), item.id(), "[" + item.index() + "] missing")));
+ continue;
+ }
+ if (item.routing() == null && clusterState.getMetaData().routingRequired(item.index(), item.type())) {
+ responses.set(i, new MultiGetItemResponse(null, new MultiGetResponse.Failure(item.index(), item.type(), item.id(), "routing is required, but hasn't been specified")));
+ continue;
+ }
+
+ item.routing(clusterState.metaData().resolveIndexRouting(item.routing(), item.index()));
+ item.index(clusterState.metaData().concreteIndex(item.index()));
+ ShardId shardId = clusterService.operationRouting()
+ .getShards(clusterState, item.index(), item.type(), item.id(), item.routing(), null).shardId();
+ MultiGetShardRequest shardRequest = shardRequests.get(shardId);
+ if (shardRequest == null) {
+ shardRequest = new MultiGetShardRequest(shardId.index().name(), shardId.id());
+ shardRequest.preference(request.preference);
+ shardRequest.realtime(request.realtime);
+ shardRequest.refresh(request.refresh);
+
+ shardRequests.put(shardId, shardRequest);
+ }
+ shardRequest.add(i, item.type(), item.id(), item.fields(), item.version(), item.versionType(), item.fetchSourceContext());
+ }
+
+ if (shardRequests.size() == 0) {
+ // only failures..
+ listener.onResponse(new MultiGetResponse(responses.toArray(new MultiGetItemResponse[responses.length()])));
+ }
+
+ final AtomicInteger counter = new AtomicInteger(shardRequests.size());
+
+ for (final MultiGetShardRequest shardRequest : shardRequests.values()) {
+ shardAction.execute(shardRequest, new ActionListener<MultiGetShardResponse>() {
+ @Override
+ public void onResponse(MultiGetShardResponse response) {
+ for (int i = 0; i < response.locations.size(); i++) {
+ responses.set(response.locations.get(i), new MultiGetItemResponse(response.responses.get(i), response.failures.get(i)));
+ }
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ // create failures for all relevant requests
+ String message = ExceptionsHelper.detailedMessage(e);
+ for (int i = 0; i < shardRequest.locations.size(); i++) {
+ responses.set(shardRequest.locations.get(i), new MultiGetItemResponse(null,
+ new MultiGetResponse.Failure(shardRequest.index(), shardRequest.types.get(i), shardRequest.ids.get(i), message)));
+ }
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ private void finishHim() {
+ listener.onResponse(new MultiGetResponse(responses.toArray(new MultiGetItemResponse[responses.length()])));
+ }
+ });
+ }
+ }
+
+ class TransportHandler extends BaseTransportRequestHandler<MultiGetRequest> {
+
+ @Override
+ public MultiGetRequest newInstance() {
+ return new MultiGetRequest();
+ }
+
+ @Override
+ public void messageReceived(final MultiGetRequest request, final TransportChannel channel) throws Exception {
+ // no need to use threaded listener, since we just send a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<MultiGetResponse>() {
+ @Override
+ public void onResponse(MultiGetResponse response) {
+ try {
+ channel.sendResponse(response);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send error response for action [" + MultiGetAction.NAME + "] and request [" + request + "]", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java
new file mode 100644
index 0000000..ab20403
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.support.TransportActions;
+import org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.get.GetResult;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+public class TransportShardMultiGetAction extends TransportShardSingleOperationAction<MultiGetShardRequest, MultiGetShardResponse> {
+
+ private final IndicesService indicesService;
+
+ private final boolean realtime;
+
+ @Inject
+ public TransportShardMultiGetAction(Settings settings, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService, ThreadPool threadPool) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+
+ this.realtime = settings.getAsBoolean("action.get.realtime", true);
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GET;
+ }
+
+ @Override
+ protected String transportAction() {
+ return MultiGetAction.NAME + "/shard";
+ }
+
+ @Override
+ protected MultiGetShardRequest newRequest() {
+ return new MultiGetShardRequest();
+ }
+
+ @Override
+ protected MultiGetShardResponse newResponse() {
+ return new MultiGetShardResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, MultiGetShardRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, MultiGetShardRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.READ, request.index());
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState state, MultiGetShardRequest request) {
+ return clusterService.operationRouting()
+ .getShards(clusterService.state(), request.index(), request.shardId(), request.preference());
+ }
+
+ @Override
+ protected void resolveRequest(ClusterState state, MultiGetShardRequest request) {
+ if (request.realtime == null) {
+ request.realtime = this.realtime;
+ }
+ // no need to set concrete index and routing here, it has already been set by the multi get action on the item
+ //request.index(state.metaData().concreteIndex(request.index()));
+ }
+
+ @Override
+ protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, int shardId) throws ElasticsearchException {
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = indexService.shardSafe(shardId);
+
+ if (request.refresh() && !request.realtime()) {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_mget").force(TransportGetAction.REFRESH_FORCE));
+ }
+
+ MultiGetShardResponse response = new MultiGetShardResponse();
+ for (int i = 0; i < request.locations.size(); i++) {
+ String type = request.types.get(i);
+ String id = request.ids.get(i);
+ String[] fields = request.fields.get(i);
+
+ long version = request.versions.get(i);
+ VersionType versionType = request.versionTypes.get(i);
+ if (versionType == null) {
+ versionType = VersionType.INTERNAL;
+ }
+
+ FetchSourceContext fetchSourceContext = request.fetchSourceContexts.get(i);
+ try {
+ GetResult getResult = indexShard.getService().get(type, id, fields, request.realtime(), version, versionType, fetchSourceContext);
+ response.add(request.locations.get(i), new GetResponse(getResult));
+ } catch (Throwable t) {
+ if (TransportActions.isShardNotAvailableException(t)) {
+ throw (ElasticsearchException) t;
+ } else {
+ logger.debug("[{}][{}] failed to execute multi_get for [{}]/[{}]", t, request.index(), shardId, type, id);
+ response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), type, id, ExceptionsHelper.detailedMessage(t)));
+ }
+ }
+ }
+
+ return response;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/get/package-info.java b/src/main/java/org/elasticsearch/action/get/package-info.java
new file mode 100644
index 0000000..bc9ab80
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/get/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Get action.
+ */
+package org.elasticsearch.action.get; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/index/IndexAction.java b/src/main/java/org/elasticsearch/action/index/IndexAction.java
new file mode 100644
index 0000000..e00625f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/index/IndexAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.index;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class IndexAction extends Action<IndexRequest, IndexResponse, IndexRequestBuilder> {
+
+ public static final IndexAction INSTANCE = new IndexAction();
+ public static final String NAME = "index";
+
+ private IndexAction() {
+ super(NAME);
+ }
+
+ @Override
+ public IndexResponse newResponse() {
+ return new IndexResponse();
+ }
+
+ @Override
+ public IndexRequestBuilder newRequestBuilder(Client client) {
+ return new IndexRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/src/main/java/org/elasticsearch/action/index/IndexRequest.java
new file mode 100644
index 0000000..a2e66a0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -0,0 +1,650 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.index;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Index request to index a typed JSON document into a specific index and make it searchable. Best
+ * created using {@link org.elasticsearch.client.Requests#indexRequest(String)}.
+ * <p/>
+ * <p>The index requires the {@link #index()}, {@link #type(String)}, {@link #id(String)} and
+ * {@link #source(byte[])} to be set.
+ * <p/>
+ * <p>The source (content to index) can be set in its bytes form using ({@link #source(byte[])}),
+ * its string form ({@link #source(String)}) or using a {@link org.elasticsearch.common.xcontent.XContentBuilder}
+ * ({@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}).
+ * <p/>
+ * <p>If the {@link #id(String)} is not set, it will be automatically generated.
+ *
+ * @see IndexResponse
+ * @see org.elasticsearch.client.Requests#indexRequest(String)
+ * @see org.elasticsearch.client.Client#index(IndexRequest)
+ */
+public class IndexRequest extends ShardReplicationOperationRequest<IndexRequest> {
+
+ /**
+ * Operation type controls if the type of the index operation.
+ */
+ public static enum OpType {
+ /**
+ * Index the source. If there an existing document with the id, it will
+ * be replaced.
+ */
+ INDEX((byte) 0),
+ /**
+ * Creates the resource. Simply adds it to the index, if there is an existing
+ * document with the id, then it won't be removed.
+ */
+ CREATE((byte) 1);
+
+ private final byte id;
+ private final String lowercase;
+
+ OpType(byte id) {
+ this.id = id;
+ this.lowercase = this.toString().toLowerCase(Locale.ENGLISH);
+ }
+
+ /**
+ * The internal representation of the operation type.
+ */
+ public byte id() {
+ return id;
+ }
+
+ public String lowercase() {
+ return this.lowercase;
+ }
+
+ /**
+ * Constructs the operation type from its internal representation.
+ */
+ public static OpType fromId(byte id) {
+ if (id == 0) {
+ return INDEX;
+ } else if (id == 1) {
+ return CREATE;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No type match for [" + id + "]");
+ }
+ }
+ }
+
+ private String type;
+ private String id;
+ @Nullable
+ private String routing;
+ @Nullable
+ private String parent;
+ @Nullable
+ private String timestamp;
+ private long ttl = -1;
+
+ private BytesReference source;
+ private boolean sourceUnsafe;
+
+ private OpType opType = OpType.INDEX;
+
+ private boolean refresh = false;
+ private long version = Versions.MATCH_ANY;
+ private VersionType versionType = VersionType.INTERNAL;
+
+ private XContentType contentType = Requests.INDEX_CONTENT_TYPE;
+
+ public IndexRequest() {
+ }
+
+ /**
+ * Constructs a new index request against the specific index. The {@link #type(String)}
+ * {@link #source(byte[])} must be set.
+ */
+ public IndexRequest(String index) {
+ this.index = index;
+ }
+
+ /**
+ * Constructs a new index request against the specific index and type. The
+ * {@link #source(byte[])} must be set.
+ */
+ public IndexRequest(String index, String type) {
+ this.index = index;
+ this.type = type;
+ }
+
+ /**
+ * Constructs a new index request against the index, type, id and using the source.
+ *
+ * @param index The index to index into
+ * @param type The type to index into
+ * @param id The id of document
+ */
+ public IndexRequest(String index, String type, String id) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (type == null) {
+ validationException = addValidationError("type is missing", validationException);
+ }
+ if (source == null) {
+ validationException = addValidationError("source is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Before we fork on a local thread, make sure we copy over the bytes if they are unsafe
+ */
+ @Override
+ public void beforeLocalFork() {
+ // only fork if copy over if source is unsafe
+ safeSource();
+ }
+
+ /**
+ * Sets the content type that will be used when generating a document from user provided objects (like Map).
+ */
+ public IndexRequest contentType(XContentType contentType) {
+ this.contentType = contentType;
+ return this;
+ }
+
+ /**
+ * The type of the indexed document.
+ */
+ public String type() {
+ return type;
+ }
+
+ /**
+ * Sets the type of the indexed document.
+ */
+ public IndexRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * The id of the indexed document. If not set, will be automatically generated.
+ */
+ public String id() {
+ return id;
+ }
+
+ /**
+ * Sets the id of the indexed document. If not set, will be automatically generated.
+ */
+ public IndexRequest id(String id) {
+ this.id = id;
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the request. Using this value to hash the shard
+ * and not the id.
+ */
+ public IndexRequest routing(String routing) {
+ if (routing != null && routing.length() == 0) {
+ this.routing = null;
+ } else {
+ this.routing = routing;
+ }
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the request. Using this value to hash the shard
+ * and not the id.
+ */
+ public String routing() {
+ return this.routing;
+ }
+
+ /**
+ * Sets the parent id of this document. If routing is not set, automatically set it as the
+ * routing as well.
+ */
+ public IndexRequest parent(String parent) {
+ this.parent = parent;
+ if (routing == null) {
+ routing = parent;
+ }
+ return this;
+ }
+
+ public String parent() {
+ return this.parent;
+ }
+
+ /**
+ * Sets the timestamp either as millis since the epoch, or, in the configured date format.
+ */
+ public IndexRequest timestamp(String timestamp) {
+ this.timestamp = timestamp;
+ return this;
+ }
+
+ public String timestamp() {
+ return this.timestamp;
+ }
+
+ /**
+ * Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise. Setting it
+ * to <tt>null</tt> will reset to have no ttl.
+ */
+ public IndexRequest ttl(Long ttl) throws ElasticsearchGenerationException {
+ if (ttl == null) {
+ this.ttl = -1;
+ return this;
+ }
+ if (ttl <= 0) {
+ throw new ElasticsearchIllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]");
+ }
+ this.ttl = ttl;
+ return this;
+ }
+
+ public long ttl() {
+ return this.ttl;
+ }
+
+ /**
+ * The source of the document to index, recopied to a new array if it is unsage.
+ */
+ public BytesReference source() {
+ return source;
+ }
+
+ public BytesReference safeSource() {
+ if (sourceUnsafe) {
+ source = source.copyBytesArray();
+ sourceUnsafe = false;
+ }
+ return source;
+ }
+
+ public Map<String, Object> sourceAsMap() {
+ return XContentHelper.convertToMap(source, false).v2();
+ }
+
+ /**
+ * Index the Map as a {@link org.elasticsearch.client.Requests#INDEX_CONTENT_TYPE}.
+ *
+ * @param source The map to index
+ */
+ public IndexRequest source(Map source) throws ElasticsearchGenerationException {
+ return source(source, contentType);
+ }
+
+ /**
+ * Index the Map as the provided content type.
+ *
+ * @param source The map to index
+ */
+ public IndexRequest source(Map source, XContentType contentType) throws ElasticsearchGenerationException {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(source);
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ }
+
+ /**
+ * Sets the document source to index.
+ * <p/>
+ * <p>Note, its preferable to either set it using {@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}
+ * or using the {@link #source(byte[])}.
+ */
+ public IndexRequest source(String source) {
+ this.source = new BytesArray(source.getBytes(Charsets.UTF_8));
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * Sets the content source to index.
+ */
+ public IndexRequest source(XContentBuilder sourceBuilder) {
+ source = sourceBuilder.bytes();
+ sourceUnsafe = false;
+ return this;
+ }
+
+ public IndexRequest source(String field1, Object value1) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.startObject().field(field1, value1).endObject();
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate", e);
+ }
+ }
+
+ public IndexRequest source(String field1, Object value1, String field2, Object value2) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.startObject().field(field1, value1).field(field2, value2).endObject();
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate", e);
+ }
+ }
+
+ public IndexRequest source(String field1, Object value1, String field2, Object value2, String field3, Object value3) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.startObject().field(field1, value1).field(field2, value2).field(field3, value3).endObject();
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate", e);
+ }
+ }
+
+ public IndexRequest source(String field1, Object value1, String field2, Object value2, String field3, Object value3, String field4, Object value4) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.startObject().field(field1, value1).field(field2, value2).field(field3, value3).field(field4, value4).endObject();
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate", e);
+ }
+ }
+
+ public IndexRequest source(Object... source) {
+ if (source.length % 2 != 0) {
+ throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]");
+ }
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.startObject();
+ for (int i = 0; i < source.length; i++) {
+ builder.field(source[i++].toString(), source[i]);
+ }
+ builder.endObject();
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate", e);
+ }
+ }
+
+ /**
+ * Sets the document to index in bytes form.
+ */
+ public IndexRequest source(BytesReference source, boolean unsafe) {
+ this.source = source;
+ this.sourceUnsafe = unsafe;
+ return this;
+ }
+
+ /**
+ * Sets the document to index in bytes form.
+ */
+ public IndexRequest source(byte[] source) {
+ return source(source, 0, source.length);
+ }
+
+ /**
+ * Sets the document to index in bytes form (assumed to be safe to be used from different
+ * threads).
+ *
+ * @param source The source to index
+ * @param offset The offset in the byte array
+ * @param length The length of the data
+ */
+ public IndexRequest source(byte[] source, int offset, int length) {
+ return source(source, offset, length, false);
+ }
+
+ /**
+ * Sets the document to index in bytes form.
+ *
+ * @param source The source to index
+ * @param offset The offset in the byte array
+ * @param length The length of the data
+ * @param unsafe Is the byte array safe to be used form a different thread
+ */
+ public IndexRequest source(byte[] source, int offset, int length, boolean unsafe) {
+ this.source = new BytesArray(source, offset, length);
+ this.sourceUnsafe = unsafe;
+ return this;
+ }
+
+ /**
+ * Sets the type of operation to perform.
+ */
+ public IndexRequest opType(OpType opType) {
+ this.opType = opType;
+ return this;
+ }
+
+ /**
+ * Sets a string representation of the {@link #opType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can
+ * be either "index" or "create".
+ */
+ public IndexRequest opType(String opType) throws ElasticsearchIllegalArgumentException {
+ if ("create".equals(opType)) {
+ return opType(OpType.CREATE);
+ } else if ("index".equals(opType)) {
+ return opType(OpType.INDEX);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No index opType matching [" + opType + "]");
+ }
+ }
+
+ /**
+ * Set to <tt>true</tt> to force this index to use {@link OpType#CREATE}.
+ */
+ public IndexRequest create(boolean create) {
+ if (create) {
+ return opType(OpType.CREATE);
+ } else {
+ return opType(OpType.INDEX);
+ }
+ }
+
+ /**
+ * The type of operation to perform.
+ */
+ public OpType opType() {
+ return this.opType;
+ }
+
+ /**
+ * Should a refresh be executed post this index operation causing the operation to
+ * be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public IndexRequest refresh(boolean refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public boolean refresh() {
+ return this.refresh;
+ }
+
+ /**
+ * Sets the version, which will cause the index operation to only be performed if a matching
+ * version exists and no changes happened on the doc since then.
+ */
+ public IndexRequest version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ /**
+ * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}.
+ */
+ public IndexRequest versionType(VersionType versionType) {
+ this.versionType = versionType;
+ return this;
+ }
+
+ public VersionType versionType() {
+ return this.versionType;
+ }
+
+ public void process(MetaData metaData, String aliasOrIndex, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration) throws ElasticsearchException {
+ // resolve the routing if needed
+ routing(metaData.resolveIndexRouting(routing, aliasOrIndex));
+ // resolve timestamp if provided externally
+ if (timestamp != null) {
+ timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp,
+ mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER);
+ }
+ // extract values if needed
+ if (mappingMd != null) {
+ MappingMetaData.ParseContext parseContext = mappingMd.createParseContext(id, routing, timestamp);
+
+ if (parseContext.shouldParse()) {
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(source);
+ mappingMd.parse(parser, parseContext);
+ if (parseContext.shouldParseId()) {
+ id = parseContext.id();
+ }
+ if (parseContext.shouldParseRouting()) {
+ routing = parseContext.routing();
+ }
+ if (parseContext.shouldParseTimestamp()) {
+ timestamp = parseContext.timestamp();
+ timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp, mappingMd.timestamp().dateTimeFormatter());
+ }
+ } catch (Exception e) {
+ throw new ElasticsearchParseException("failed to parse doc to extract routing/timestamp/id", e);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ // might as well check for routing here
+ if (mappingMd.routing().required() && routing == null) {
+ throw new RoutingMissingException(index, type, id);
+ }
+
+ if (parent != null && !mappingMd.hasParentField()) {
+ throw new ElasticsearchIllegalArgumentException("Can't specify parent if no parent field has been configured");
+ }
+ } else {
+ if (parent != null) {
+ throw new ElasticsearchIllegalArgumentException("Can't specify parent if no parent field has been configured");
+ }
+ }
+
+ // generate id if not already provided and id generation is allowed
+ if (allowIdGeneration) {
+ if (id == null) {
+ id(Strings.randomBase64UUID());
+ // since we generate the id, change it to CREATE
+ opType(IndexRequest.OpType.CREATE);
+ }
+ }
+
+ // generate timestamp if not provided, we always have one post this stage...
+ if (timestamp == null) {
+ timestamp = Long.toString(System.currentTimeMillis());
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ type = in.readSharedString();
+ id = in.readOptionalString();
+ routing = in.readOptionalString();
+ parent = in.readOptionalString();
+ timestamp = in.readOptionalString();
+ ttl = in.readLong();
+ source = in.readBytesReference();
+ sourceUnsafe = false;
+
+ opType = OpType.fromId(in.readByte());
+ refresh = in.readBoolean();
+ version = in.readLong();
+ versionType = VersionType.fromValue(in.readByte());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeSharedString(type);
+ out.writeOptionalString(id);
+ out.writeOptionalString(routing);
+ out.writeOptionalString(parent);
+ out.writeOptionalString(timestamp);
+ out.writeLong(ttl);
+ out.writeBytesReference(source);
+ out.writeByte(opType.id());
+ out.writeBoolean(refresh);
+ out.writeLong(version);
+ out.writeByte(versionType.getValue());
+ }
+
+ @Override
+ public String toString() {
+ String sSource = "_na_";
+ try {
+ sSource = XContentHelper.convertToJson(source, false);
+ } catch (Exception e) {
+ // ignore
+ }
+ return "index {[" + index + "][" + type + "][" + id + "], source[" + sSource + "]}";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
new file mode 100644
index 0000000..27fc7de
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.index;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.action.support.replication.ShardReplicationOperationRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.VersionType;
+
+import java.util.Map;
+
+/**
+ * An index document action request builder.
+ */
+public class IndexRequestBuilder extends ShardReplicationOperationRequestBuilder<IndexRequest, IndexResponse, IndexRequestBuilder> {
+
+ public IndexRequestBuilder(Client client) {
+ super((InternalClient) client, new IndexRequest());
+ }
+
+ public IndexRequestBuilder(Client client, @Nullable String index) {
+ super((InternalClient) client, new IndexRequest(index));
+ }
+
+ /**
+ * Sets the type to index the document to.
+ */
+ public IndexRequestBuilder setType(String type) {
+ request.type(type);
+ return this;
+ }
+
+ /**
+ * Sets the id to index the document under. Optional, and if not set, one will be automatically
+ * generated.
+ */
+ public IndexRequestBuilder setId(String id) {
+ request.id(id);
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the request. Using this value to hash the shard
+ * and not the id.
+ */
+ public IndexRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * Sets the parent id of this document. If routing is not set, automatically set it as the
+ * routing as well.
+ */
+ public IndexRequestBuilder setParent(String parent) {
+ request.parent(parent);
+ return this;
+ }
+
+ /**
+ * Sets the source.
+ */
+ public IndexRequestBuilder setSource(BytesReference source, boolean unsafe) {
+ request.source(source, unsafe);
+ return this;
+ }
+
+ /**
+ * Sets the source.
+ */
+ public IndexRequestBuilder setSource(BytesReference source) {
+ request.source(source, false);
+ return this;
+ }
+
+ /**
+ * Index the Map as a JSON.
+ *
+ * @param source The map to index
+ */
+ public IndexRequestBuilder setSource(Map<String, Object> source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Index the Map as the provided content type.
+ *
+ * @param source The map to index
+ */
+ public IndexRequestBuilder setSource(Map<String, Object> source, XContentType contentType) {
+ request.source(source, contentType);
+ return this;
+ }
+
+ /**
+ * Sets the document source to index.
+ * <p/>
+ * <p>Note, its preferable to either set it using {@link #setSource(org.elasticsearch.common.xcontent.XContentBuilder)}
+ * or using the {@link #setSource(byte[])}.
+ */
+ public IndexRequestBuilder setSource(String source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Sets the content source to index.
+ */
+ public IndexRequestBuilder setSource(XContentBuilder sourceBuilder) {
+ request.source(sourceBuilder);
+ return this;
+ }
+
+ /**
+ * Sets the document to index in bytes form.
+ */
+ public IndexRequestBuilder setSource(byte[] source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Sets the document to index in bytes form (assumed to be safe to be used from different
+ * threads).
+ *
+ * @param source The source to index
+ * @param offset The offset in the byte array
+ * @param length The length of the data
+ */
+ public IndexRequestBuilder setSource(byte[] source, int offset, int length) {
+ request.source(source, offset, length);
+ return this;
+ }
+
+ /**
+ * Sets the document to index in bytes form.
+ *
+ * @param source The source to index
+ * @param offset The offset in the byte array
+ * @param length The length of the data
+ * @param unsafe Is the byte array safe to be used form a different thread
+ */
+ public IndexRequestBuilder setSource(byte[] source, int offset, int length, boolean unsafe) {
+ request.source(source, offset, length, unsafe);
+ return this;
+ }
+
+ /**
+ * Constructs a simple document with a field and a value.
+ */
+ public IndexRequestBuilder setSource(String field1, Object value1) {
+ request.source(field1, value1);
+ return this;
+ }
+
+ /**
+ * Constructs a simple document with a field and value pairs.
+ */
+ public IndexRequestBuilder setSource(String field1, Object value1, String field2, Object value2) {
+ request.source(field1, value1, field2, value2);
+ return this;
+ }
+
+ /**
+ * Constructs a simple document with a field and value pairs.
+ */
+ public IndexRequestBuilder setSource(String field1, Object value1, String field2, Object value2, String field3, Object value3) {
+ request.source(field1, value1, field2, value2, field3, value3);
+ return this;
+ }
+
+ /**
+ * Constructs a simple document with a field and value pairs.
+ */
+ public IndexRequestBuilder setSource(String field1, Object value1, String field2, Object value2, String field3, Object value3, String field4, Object value4) {
+ request.source(field1, value1, field2, value2, field3, value3, field4, value4);
+ return this;
+ }
+
+ /**
+ * Constructs a simple document with a field name and value pairs.
+ * <b>Note: the number of objects passed to this method must be and even number.</b>
+ */
+ public IndexRequestBuilder setSource(Object... source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * The content type that will be used to generate a document from user provided objects (like Map).
+ */
+ public IndexRequestBuilder setContentType(XContentType contentType) {
+ request.contentType(contentType);
+ return this;
+ }
+
+ /**
+ * Sets the type of operation to perform.
+ */
+ public IndexRequestBuilder setOpType(IndexRequest.OpType opType) {
+ request.opType(opType);
+ return this;
+ }
+
+ /**
+ * Sets a string representation of the {@link #setOpType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can
+ * be either "index" or "create".
+ */
+ public IndexRequestBuilder setOpType(String opType) {
+ request.opType(opType);
+ return this;
+ }
+
+ /**
+ * Set to <tt>true</tt> to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}.
+ */
+ public IndexRequestBuilder setCreate(boolean create) {
+ request.create(create);
+ return this;
+ }
+
+ /**
+ * Should a refresh be executed post this index operation causing the operation to
+ * be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public IndexRequestBuilder setRefresh(boolean refresh) {
+ request.refresh(refresh);
+ return this;
+ }
+
+ /**
+ * Set the replication type for this operation.
+ */
+ public IndexRequestBuilder setReplicationType(ReplicationType replicationType) {
+ request.replicationType(replicationType);
+ return this;
+ }
+
+ /**
+ * Sets the consistency level. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}.
+ */
+ public IndexRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ request.consistencyLevel(consistencyLevel);
+ return this;
+ }
+
+ /**
+ * Set the replication type for this operation.
+ */
+ public IndexRequestBuilder setReplicationType(String replicationType) {
+ request.replicationType(replicationType);
+ return this;
+ }
+
+ /**
+ * Sets the version, which will cause the index operation to only be performed if a matching
+ * version exists and no changes happened on the doc since then.
+ */
+ public IndexRequestBuilder setVersion(long version) {
+ request.version(version);
+ return this;
+ }
+
+ /**
+ * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}.
+ */
+ public IndexRequestBuilder setVersionType(VersionType versionType) {
+ request.versionType(versionType);
+ return this;
+ }
+
+ /**
+ * Sets the timestamp either as millis since the epoch, or, in the configured date format.
+ */
+ public IndexRequestBuilder setTimestamp(String timestamp) {
+ request.timestamp(timestamp);
+ return this;
+ }
+
+ // Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise.
+ public IndexRequestBuilder setTTL(long ttl) {
+ request.ttl(ttl);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<IndexResponse> listener) {
+ ((Client) client).index(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/src/main/java/org/elasticsearch/action/index/IndexResponse.java
new file mode 100644
index 0000000..c64d391
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/index/IndexResponse.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.index;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A response of an index operation,
+ *
+ * @see org.elasticsearch.action.index.IndexRequest
+ * @see org.elasticsearch.client.Client#index(IndexRequest)
+ */
+public class IndexResponse extends ActionResponse {
+
+ private String index;
+ private String id;
+ private String type;
+ private long version;
+ private boolean created;
+
+ public IndexResponse() {
+
+ }
+
+ public IndexResponse(String index, String type, String id, long version, boolean created) {
+ this.index = index;
+ this.id = id;
+ this.type = type;
+ this.version = version;
+ this.created = created;
+ }
+
+ /**
+ * The index the document was indexed into.
+ */
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * The type of the document indexed.
+ */
+ public String getType() {
+ return this.type;
+ }
+
+ /**
+ * The id of the document indexed.
+ */
+ public String getId() {
+ return this.id;
+ }
+
+ /**
+ * Returns the current version of the doc indexed.
+ */
+ public long getVersion() {
+ return this.version;
+ }
+
+ /**
+ * Returns true if the document was created, false if updated.
+ */
+ public boolean isCreated() {
+ return this.created;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readSharedString();
+ type = in.readSharedString();
+ id = in.readString();
+ version = in.readLong();
+ created = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeSharedString(index);
+ out.writeSharedString(type);
+ out.writeString(id);
+ out.writeLong(version);
+ out.writeBoolean(created);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java
new file mode 100644
index 0000000..d771393
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java
@@ -0,0 +1,310 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.index;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
+import org.elasticsearch.action.support.AutoCreateIndex;
+import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Performs the index operation.
+ * <p/>
+ * <p>Allows for the following settings:
+ * <ul>
+ * <li><b>autoCreateIndex</b>: When set to <tt>true</tt>, will automatically create an index if one does not exists.
+ * Defaults to <tt>true</tt>.
+ * <li><b>allowIdGeneration</b>: If the id is set not, should it be generated. Defaults to <tt>true</tt>.
+ * </ul>
+ */
+public class TransportIndexAction extends TransportShardReplicationOperationAction<IndexRequest, IndexRequest, IndexResponse> {
+
+ private final AutoCreateIndex autoCreateIndex;
+
+ private final boolean allowIdGeneration;
+
+ private final TransportCreateIndexAction createIndexAction;
+
+ private final MappingUpdatedAction mappingUpdatedAction;
+
+ private final boolean waitForMappingChange;
+
+ @Inject
+ public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
+ TransportCreateIndexAction createIndexAction, MappingUpdatedAction mappingUpdatedAction) {
+ super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction);
+ this.createIndexAction = createIndexAction;
+ this.mappingUpdatedAction = mappingUpdatedAction;
+ this.autoCreateIndex = new AutoCreateIndex(settings);
+ this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
+ this.waitForMappingChange = settings.getAsBoolean("action.wait_on_mapping_change", false);
+ }
+
+ @Override
+ protected void doExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) {
+ // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
+ if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) {
+ request.beforeLocalFork(); // we fork on another thread...
+ createIndexAction.execute(new CreateIndexRequest(request.index()).cause("auto(index api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
+ @Override
+ public void onResponse(CreateIndexResponse result) {
+ innerExecute(request, listener);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
+ // we have the index, do it
+ try {
+ innerExecute(request, listener);
+ } catch (Throwable e1) {
+ listener.onFailure(e1);
+ }
+ } else {
+ listener.onFailure(e);
+ }
+ }
+ });
+ } else {
+ innerExecute(request, listener);
+ }
+ }
+
+ @Override
+ protected boolean resolveRequest(ClusterState state, IndexRequest request, ActionListener<IndexResponse> indexResponseActionListener) {
+ MetaData metaData = clusterService.state().metaData();
+ String aliasOrIndex = request.index();
+ request.index(metaData.concreteIndex(request.index()));
+ MappingMetaData mappingMd = null;
+ if (metaData.hasIndex(request.index())) {
+ mappingMd = metaData.index(request.index()).mappingOrDefault(request.type());
+ }
+ request.process(metaData, aliasOrIndex, mappingMd, allowIdGeneration);
+ return true;
+ }
+
+ private void innerExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) {
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected boolean checkWriteConsistency() {
+ return true;
+ }
+
+ @Override
+ protected IndexRequest newRequestInstance() {
+ return new IndexRequest();
+ }
+
+ @Override
+ protected IndexRequest newReplicaRequestInstance() {
+ return new IndexRequest();
+ }
+
+ @Override
+ protected IndexResponse newResponseInstance() {
+ return new IndexResponse();
+ }
+
+ @Override
+ protected String transportAction() {
+ return IndexAction.NAME;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.INDEX;
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, IndexRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, IndexRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState clusterState, IndexRequest request) {
+ return clusterService.operationRouting()
+ .indexShards(clusterService.state(), request.index(), request.type(), request.id(), request.routing());
+ }
+
+ @Override
+ protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
+ final IndexRequest request = shardRequest.request;
+
+ // validate, if routing is required, that we got routing
+ IndexMetaData indexMetaData = clusterState.metaData().index(request.index());
+ MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
+ if (mappingMd != null && mappingMd.routing().required()) {
+ if (request.routing() == null) {
+ throw new RoutingMissingException(request.index(), request.type(), request.id());
+ }
+ }
+
+ IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
+ SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).type(request.type()).id(request.id())
+ .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
+ long version;
+ boolean created;
+ Engine.IndexingOperation op;
+ if (request.opType() == IndexRequest.OpType.INDEX) {
+ Engine.Index index = indexShard.prepareIndex(sourceToParse)
+ .version(request.version())
+ .versionType(request.versionType())
+ .origin(Engine.Operation.Origin.PRIMARY);
+ if (index.parsedDoc().mappingsModified()) {
+ updateMappingOnMaster(request, indexMetaData);
+ }
+ indexShard.index(index);
+ version = index.version();
+ op = index;
+ created = index.created();
+ } else {
+ Engine.Create create = indexShard.prepareCreate(sourceToParse)
+ .version(request.version())
+ .versionType(request.versionType())
+ .origin(Engine.Operation.Origin.PRIMARY);
+ if (create.parsedDoc().mappingsModified()) {
+ updateMappingOnMaster(request, indexMetaData);
+ }
+ indexShard.create(create);
+ version = create.version();
+ op = create;
+ created = true;
+ }
+ if (request.refresh()) {
+ try {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
+ } catch (Throwable e) {
+ // ignore
+ }
+ }
+
+ // update the version on the request, so it will be used for the replicas
+ request.version(version);
+
+ IndexResponse response = new IndexResponse(request.index(), request.type(), request.id(), version, created);
+ return new PrimaryResponse<IndexResponse, IndexRequest>(shardRequest.request, response, op);
+ }
+
+ @Override
+ protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
+ IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
+ IndexRequest request = shardRequest.request;
+ SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).type(request.type()).id(request.id())
+ .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
+ if (request.opType() == IndexRequest.OpType.INDEX) {
+ Engine.Index index = indexShard.prepareIndex(sourceToParse)
+ .version(request.version())
+ .origin(Engine.Operation.Origin.REPLICA);
+ indexShard.index(index);
+ } else {
+ Engine.Create create = indexShard.prepareCreate(sourceToParse)
+ .version(request.version())
+ .origin(Engine.Operation.Origin.REPLICA);
+ indexShard.create(create);
+ }
+ if (request.refresh()) {
+ try {
+ indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ }
+
+ private void updateMappingOnMaster(final IndexRequest request, IndexMetaData indexMetaData) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ try {
+ final MapperService mapperService = indicesService.indexServiceSafe(request.index()).mapperService();
+ final DocumentMapper documentMapper = mapperService.documentMapper(request.type());
+ if (documentMapper == null) { // should not happen
+ return;
+ }
+ // we generate the order id before we get the mapping to send and refresh the source, so
+ // if 2 happen concurrently, we know that the later order will include the previous one
+ long orderId = mappingUpdatedAction.generateNextMappingUpdateOrder();
+ documentMapper.refreshSource();
+ DiscoveryNode node = clusterService.localNode();
+ final MappingUpdatedAction.MappingUpdatedRequest mappingRequest =
+ new MappingUpdatedAction.MappingUpdatedRequest(request.index(), indexMetaData.uuid(), request.type(), documentMapper.mappingSource(), orderId, node != null ? node.id() : null);
+ logger.trace("Sending mapping updated to master: {}", mappingRequest);
+ mappingUpdatedAction.execute(mappingRequest, new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() {
+ @Override
+ public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) {
+ // all is well
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ latch.countDown();
+ logger.warn("Failed to update master on updated mapping for {}", e, mappingRequest);
+ }
+ });
+ } catch (Exception e) {
+ latch.countDown();
+ logger.warn("Failed to update master on updated mapping for index [" + request.index() + "], type [" + request.type() + "]", e);
+ }
+
+ if (waitForMappingChange) {
+ try {
+ latch.await(5, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/index/package-info.java b/src/main/java/org/elasticsearch/action/index/package-info.java
new file mode 100644
index 0000000..842b8fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/index/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Index action.
+ */
+package org.elasticsearch.action.index; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java
new file mode 100644
index 0000000..2628888
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.mlt;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class MoreLikeThisAction extends Action<MoreLikeThisRequest, SearchResponse, MoreLikeThisRequestBuilder> {
+
+ public static final MoreLikeThisAction INSTANCE = new MoreLikeThisAction();
+ public static final String NAME = "mlt";
+
+ private MoreLikeThisAction() {
+ super(NAME);
+ }
+
+ @Override
+ public SearchResponse newResponse() {
+ return new SearchResponse();
+ }
+
+ @Override
+ public MoreLikeThisRequestBuilder newRequestBuilder(Client client) {
+ return new MoreLikeThisRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java
new file mode 100644
index 0000000..09d8f82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java
@@ -0,0 +1,664 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.mlt;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ValidateActions;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.search.Scroll.readScroll;
+
+/**
+ * A more like this request allowing to search for documents that a "like" the provided document. The document
+ * to check against to fetched based on the index, type and id provided. Best created with {@link org.elasticsearch.client.Requests#moreLikeThisRequest(String)}.
+ * <p/>
+ * <p>Note, the {@link #index()}, {@link #type(String)} and {@link #id(String)} are required.
+ *
+ * @see org.elasticsearch.client.Client#moreLikeThis(MoreLikeThisRequest)
+ * @see org.elasticsearch.client.Requests#moreLikeThisRequest(String)
+ * @see org.elasticsearch.action.search.SearchResponse
+ */
+public class MoreLikeThisRequest extends ActionRequest<MoreLikeThisRequest> {
+
+ private static final XContentType contentType = Requests.CONTENT_TYPE;
+
+ private String index;
+
+ private String type;
+
+ private String id;
+
+ private String routing;
+
+ private String[] fields;
+
+ private float percentTermsToMatch = -1;
+ private int minTermFreq = -1;
+ private int maxQueryTerms = -1;
+ private String[] stopWords = null;
+ private int minDocFreq = -1;
+ private int maxDocFreq = -1;
+ private int minWordLength = -1;
+ private int maxWordLength = -1;
+ private float boostTerms = -1;
+
+ private SearchType searchType = SearchType.DEFAULT;
+ private int searchSize = 0;
+ private int searchFrom = 0;
+ private String searchQueryHint;
+ private String[] searchIndices;
+ private String[] searchTypes;
+ private Scroll searchScroll;
+
+ private BytesReference searchSource;
+ private boolean searchSourceUnsafe;
+
+ MoreLikeThisRequest() {
+ }
+
+ /**
+ * Constructs a new more like this request for a document that will be fetch from the provided index.
+ * Use {@link #type(String)} and {@link #id(String)} to specify the document to load.
+ */
+ public MoreLikeThisRequest(String index) {
+ this.index = index;
+ }
+
+ /**
+ * The index to load the document from which the "like" query will run with.
+ */
+ public String index() {
+ return index;
+ }
+
+ /**
+ * The type of document to load from which the "like" query will run with.
+ */
+ public String type() {
+ return type;
+ }
+
+ void index(String index) {
+ this.index = index;
+ }
+
+ /**
+ * The type of document to load from which the "like" query will execute with.
+ */
+ public MoreLikeThisRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * The id of document to load from which the "like" query will execute with.
+ */
+ public String id() {
+ return id;
+ }
+
+ /**
+ * The id of document to load from which the "like" query will execute with.
+ */
+ public MoreLikeThisRequest id(String id) {
+ this.id = id;
+ return this;
+ }
+
+ /**
+ * @return The routing for this request. This used for the `get` part of the mlt request.
+ */
+ public String routing() {
+ return routing;
+ }
+
+ public void routing(String routing) {
+ this.routing = routing;
+ }
+
+ /**
+ * The fields of the document to use in order to find documents "like" this one. Defaults to run
+ * against all the document fields.
+ */
+ public String[] fields() {
+ return this.fields;
+ }
+
+ /**
+ * The fields of the document to use in order to find documents "like" this one. Defaults to run
+ * against all the document fields.
+ */
+ public MoreLikeThisRequest fields(String... fields) {
+ this.fields = fields;
+ return this;
+ }
+
+ /**
+ * The percent of the terms to match for each field. Defaults to <tt>0.3f</tt>.
+ */
+ public MoreLikeThisRequest percentTermsToMatch(float percentTermsToMatch) {
+ this.percentTermsToMatch = percentTermsToMatch;
+ return this;
+ }
+
+ /**
+ * The percent of the terms to match for each field. Defaults to <tt>0.3f</tt>.
+ */
+ public float percentTermsToMatch() {
+ return this.percentTermsToMatch;
+ }
+
+ /**
+ * The frequency below which terms will be ignored in the source doc. Defaults to <tt>2</tt>.
+ */
+ public MoreLikeThisRequest minTermFreq(int minTermFreq) {
+ this.minTermFreq = minTermFreq;
+ return this;
+ }
+
+ /**
+ * The frequency below which terms will be ignored in the source doc. Defaults to <tt>2</tt>.
+ */
+ public int minTermFreq() {
+ return this.minTermFreq;
+ }
+
+ /**
+ * The maximum number of query terms that will be included in any generated query. Defaults to <tt>25</tt>.
+ */
+ public MoreLikeThisRequest maxQueryTerms(int maxQueryTerms) {
+ this.maxQueryTerms = maxQueryTerms;
+ return this;
+ }
+
+ /**
+ * The maximum number of query terms that will be included in any generated query. Defaults to <tt>25</tt>.
+ */
+ public int maxQueryTerms() {
+ return this.maxQueryTerms;
+ }
+
+ /**
+ * Any word in this set is considered "uninteresting" and ignored.
+ * <p/>
+ * <p>Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as
+ * for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting".
+ * <p/>
+ * <p>Defaults to no stop words.
+ */
+ public MoreLikeThisRequest stopWords(String... stopWords) {
+ this.stopWords = stopWords;
+ return this;
+ }
+
+ /**
+ * Any word in this set is considered "uninteresting" and ignored.
+ * <p/>
+ * <p>Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as
+ * for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting".
+ * <p/>
+ * <p>Defaults to no stop words.
+ */
+ public String[] stopWords() {
+ return this.stopWords;
+ }
+
+ /**
+ * The frequency at which words will be ignored which do not occur in at least this
+ * many docs. Defaults to <tt>5</tt>.
+ */
+ public MoreLikeThisRequest minDocFreq(int minDocFreq) {
+ this.minDocFreq = minDocFreq;
+ return this;
+ }
+
+ /**
+ * The frequency at which words will be ignored which do not occur in at least this
+ * many docs. Defaults to <tt>5</tt>.
+ */
+ public int minDocFreq() {
+ return this.minDocFreq;
+ }
+
+ /**
+ * The maximum frequency in which words may still appear. Words that appear
+ * in more than this many docs will be ignored. Defaults to unbounded.
+ */
+ public MoreLikeThisRequest maxDocFreq(int maxDocFreq) {
+ this.maxDocFreq = maxDocFreq;
+ return this;
+ }
+
+ /**
+ * The maximum frequency in which words may still appear. Words that appear
+ * in more than this many docs will be ignored. Defaults to unbounded.
+ */
+ public int maxDocFreq() {
+ return this.maxDocFreq;
+ }
+
+ /**
+ * The minimum word length below which words will be ignored. Defaults to <tt>0</tt>.
+ */
+ public MoreLikeThisRequest minWordLength(int minWordLength) {
+ this.minWordLength = minWordLength;
+ return this;
+ }
+
+ /**
+ * The minimum word length below which words will be ignored. Defaults to <tt>0</tt>.
+ */
+ public int minWordLength() {
+ return this.minWordLength;
+ }
+
+ /**
+ * The maximum word length above which words will be ignored. Defaults to unbounded.
+ */
+ public MoreLikeThisRequest maxWordLength(int maxWordLength) {
+ this.maxWordLength = maxWordLength;
+ return this;
+ }
+
+ /**
+ * The maximum word length above which words will be ignored. Defaults to unbounded.
+ */
+ public int maxWordLength() {
+ return this.maxWordLength;
+ }
+
+ /**
+ * The boost factor to use when boosting terms. Defaults to <tt>1</tt>.
+ */
+ public MoreLikeThisRequest boostTerms(float boostTerms) {
+ this.boostTerms = boostTerms;
+ return this;
+ }
+
+ /**
+ * The boost factor to use when boosting terms. Defaults to <tt>1</tt>.
+ */
+ public float boostTerms() {
+ return this.boostTerms;
+ }
+
+ void beforeLocalFork() {
+ if (searchSourceUnsafe) {
+ searchSource = searchSource.copyBytesArray();
+ searchSourceUnsafe = false;
+ }
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequest searchSource(SearchSourceBuilder sourceBuilder) {
+ this.searchSource = sourceBuilder.buildAsBytes(Requests.CONTENT_TYPE);
+ this.searchSourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequest searchSource(String searchSource) {
+ this.searchSource = new BytesArray(searchSource);
+ this.searchSourceUnsafe = false;
+ return this;
+ }
+
+ public MoreLikeThisRequest searchSource(Map searchSource) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(searchSource);
+ return searchSource(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + searchSource + "]", e);
+ }
+ }
+
+ public MoreLikeThisRequest searchSource(XContentBuilder builder) {
+ this.searchSource = builder.bytes();
+ this.searchSourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequest searchSource(byte[] searchSource) {
+ return searchSource(searchSource, 0, searchSource.length, false);
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequest searchSource(byte[] searchSource, int offset, int length, boolean unsafe) {
+ return searchSource(new BytesArray(searchSource, offset, length), unsafe);
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequest searchSource(BytesReference searchSource, boolean unsafe) {
+ this.searchSource = searchSource;
+ this.searchSourceUnsafe = unsafe;
+ return this;
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public BytesReference searchSource() {
+ return this.searchSource;
+ }
+
+ public boolean searchSourceUnsafe() {
+ return searchSourceUnsafe;
+ }
+
+ /**
+ * The search type of the mlt search query.
+ */
+ public MoreLikeThisRequest searchType(SearchType searchType) {
+ this.searchType = searchType;
+ return this;
+ }
+
+ /**
+ * The search type of the mlt search query.
+ */
+ public MoreLikeThisRequest searchType(String searchType) throws ElasticsearchIllegalArgumentException {
+ return searchType(SearchType.fromString(searchType));
+ }
+
+ /**
+ * The search type of the mlt search query.
+ */
+ public SearchType searchType() {
+ return this.searchType;
+ }
+
+ /**
+ * The indices the resulting mlt query will run against. If not set, will run
+ * against the index the document was fetched from.
+ */
+ public MoreLikeThisRequest searchIndices(String... searchIndices) {
+ this.searchIndices = searchIndices;
+ return this;
+ }
+
+ /**
+ * The indices the resulting mlt query will run against. If not set, will run
+ * against the index the document was fetched from.
+ */
+ public String[] searchIndices() {
+ return this.searchIndices;
+ }
+
+ /**
+ * The types the resulting mlt query will run against. If not set, will run
+ * against the type of the document fetched.
+ */
+ public MoreLikeThisRequest searchTypes(String... searchTypes) {
+ this.searchTypes = searchTypes;
+ return this;
+ }
+
+ /**
+ * The types the resulting mlt query will run against. If not set, will run
+ * against the type of the document fetched.
+ */
+ public String[] searchTypes() {
+ return this.searchTypes;
+ }
+
+ /**
+ * Optional search query hint.
+ */
+ public MoreLikeThisRequest searchQueryHint(String searchQueryHint) {
+ this.searchQueryHint = searchQueryHint;
+ return this;
+ }
+
+ /**
+ * Optional search query hint.
+ */
+ public String searchQueryHint() {
+ return this.searchQueryHint;
+ }
+
+ /**
+ * An optional search scroll request to be able to continue and scroll the search
+ * operation.
+ */
+ public MoreLikeThisRequest searchScroll(Scroll searchScroll) {
+ this.searchScroll = searchScroll;
+ return this;
+ }
+
+ /**
+ * An optional search scroll request to be able to continue and scroll the search
+ * operation.
+ */
+ public Scroll searchScroll() {
+ return this.searchScroll;
+ }
+
+ /**
+ * The number of documents to return, defaults to 10.
+ */
+ public MoreLikeThisRequest searchSize(int size) {
+ this.searchSize = size;
+ return this;
+ }
+
+ public int searchSize() {
+ return this.searchSize;
+ }
+
+ /**
+ * From which search result set to return.
+ */
+ public MoreLikeThisRequest searchFrom(int from) {
+ this.searchFrom = from;
+ return this;
+ }
+
+ public int searchFrom() {
+ return this.searchFrom;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (index == null) {
+ validationException = ValidateActions.addValidationError("index is missing", validationException);
+ }
+ if (type == null) {
+ validationException = ValidateActions.addValidationError("type is missing", validationException);
+ }
+ if (id == null) {
+ validationException = ValidateActions.addValidationError("id is missing", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ type = in.readString();
+ id = in.readString();
+ // no need to pass threading over the network, they are always false when coming throw a thread pool
+ int size = in.readVInt();
+ if (size == 0) {
+ fields = Strings.EMPTY_ARRAY;
+ } else {
+ fields = new String[size];
+ for (int i = 0; i < size; i++) {
+ fields[i] = in.readString();
+ }
+ }
+
+ percentTermsToMatch = in.readFloat();
+ minTermFreq = in.readVInt();
+ maxQueryTerms = in.readVInt();
+ size = in.readVInt();
+ if (size > 0) {
+ stopWords = new String[size];
+ for (int i = 0; i < size; i++) {
+ stopWords[i] = in.readString();
+ }
+ }
+ minDocFreq = in.readVInt();
+ maxDocFreq = in.readVInt();
+ minWordLength = in.readVInt();
+ maxWordLength = in.readVInt();
+ boostTerms = in.readFloat();
+ searchType = SearchType.fromId(in.readByte());
+ if (in.readBoolean()) {
+ searchQueryHint = in.readString();
+ }
+ size = in.readVInt();
+ if (size == 0) {
+ searchIndices = null;
+ } else if (size == 1) {
+ searchIndices = Strings.EMPTY_ARRAY;
+ } else {
+ searchIndices = new String[size - 1];
+ for (int i = 0; i < searchIndices.length; i++) {
+ searchIndices[i] = in.readString();
+ }
+ }
+ size = in.readVInt();
+ if (size == 0) {
+ searchTypes = null;
+ } else if (size == 1) {
+ searchTypes = Strings.EMPTY_ARRAY;
+ } else {
+ searchTypes = new String[size - 1];
+ for (int i = 0; i < searchTypes.length; i++) {
+ searchTypes[i] = in.readString();
+ }
+ }
+ if (in.readBoolean()) {
+ searchScroll = readScroll(in);
+ }
+
+ searchSourceUnsafe = false;
+ searchSource = in.readBytesReference();
+
+ searchSize = in.readVInt();
+ searchFrom = in.readVInt();
+ routing = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeString(type);
+ out.writeString(id);
+ if (fields == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(fields.length);
+ for (String field : fields) {
+ out.writeString(field);
+ }
+ }
+
+ out.writeFloat(percentTermsToMatch);
+ out.writeVInt(minTermFreq);
+ out.writeVInt(maxQueryTerms);
+ if (stopWords == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(stopWords.length);
+ for (String stopWord : stopWords) {
+ out.writeString(stopWord);
+ }
+ }
+ out.writeVInt(minDocFreq);
+ out.writeVInt(maxDocFreq);
+ out.writeVInt(minWordLength);
+ out.writeVInt(maxWordLength);
+ out.writeFloat(boostTerms);
+
+ out.writeByte(searchType.id());
+ if (searchQueryHint == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(searchQueryHint);
+ }
+ if (searchIndices == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(searchIndices.length + 1);
+ for (String index : searchIndices) {
+ out.writeString(index);
+ }
+ }
+ if (searchTypes == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(searchTypes.length + 1);
+ for (String type : searchTypes) {
+ out.writeString(type);
+ }
+ }
+ if (searchScroll == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ searchScroll.writeTo(out);
+ }
+ out.writeBytesReference(searchSource);
+
+ out.writeVInt(searchSize);
+ out.writeVInt(searchFrom);
+ out.writeOptionalString(routing);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java
new file mode 100644
index 0000000..cbeeb38
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.mlt;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+
+import java.util.Map;
+
+/**
+ */
+public class MoreLikeThisRequestBuilder extends ActionRequestBuilder<MoreLikeThisRequest, SearchResponse, MoreLikeThisRequestBuilder> {
+
+ public MoreLikeThisRequestBuilder(Client client) {
+ super((InternalClient) client, new MoreLikeThisRequest());
+ }
+
+ public MoreLikeThisRequestBuilder(Client client, String index, String type, String id) {
+ super((InternalClient) client, new MoreLikeThisRequest(index).type(type).id(id));
+ }
+
+ /**
+ * The fields of the document to use in order to find documents "like" this one. Defaults to run
+ * against all the document fields.
+ */
+ public MoreLikeThisRequestBuilder setField(String... fields) {
+ request.fields(fields);
+ return this;
+ }
+
+ /**
+ * Sets the routing. Required if routing isn't id based.
+ */
+ public MoreLikeThisRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * The percent of the terms to match for each field. Defaults to <tt>0.3f</tt>.
+ */
+ public MoreLikeThisRequestBuilder setPercentTermsToMatch(float percentTermsToMatch) {
+ request.percentTermsToMatch(percentTermsToMatch);
+ return this;
+ }
+
+ /**
+ * The frequency below which terms will be ignored in the source doc. Defaults to <tt>2</tt>.
+ */
+ public MoreLikeThisRequestBuilder setMinTermFreq(int minTermFreq) {
+ request.minTermFreq(minTermFreq);
+ return this;
+ }
+
+ /**
+ * The maximum number of query terms that will be included in any generated query. Defaults to <tt>25</tt>.
+ */
+ public MoreLikeThisRequestBuilder maxQueryTerms(int maxQueryTerms) {
+ request.maxQueryTerms(maxQueryTerms);
+ return this;
+ }
+
+ /**
+ * Any word in this set is considered "uninteresting" and ignored.
+ * <p/>
+ * <p>Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as
+ * for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting".
+ * <p/>
+ * <p>Defaults to no stop words.
+ */
+ public MoreLikeThisRequestBuilder setStopWords(String... stopWords) {
+ request.stopWords(stopWords);
+ return this;
+ }
+
+ /**
+ * The frequency at which words will be ignored which do not occur in at least this
+ * many docs. Defaults to <tt>5</tt>.
+ */
+ public MoreLikeThisRequestBuilder setMinDocFreq(int minDocFreq) {
+ request.minDocFreq(minDocFreq);
+ return this;
+ }
+
+ /**
+ * The maximum frequency in which words may still appear. Words that appear
+ * in more than this many docs will be ignored. Defaults to unbounded.
+ */
+ public MoreLikeThisRequestBuilder setMaxDocFreq(int maxDocFreq) {
+ request.maxDocFreq(maxDocFreq);
+ return this;
+ }
+
+ /**
+ * The minimum word length below which words will be ignored. Defaults to <tt>0</tt>.
+ */
+ public MoreLikeThisRequestBuilder setMinWordLen(int minWordLen) {
+ request.minWordLength(minWordLen);
+ return this;
+ }
+
+ /**
+ * The maximum word length above which words will be ignored. Defaults to unbounded.
+ */
+ public MoreLikeThisRequestBuilder setMaxWordLen(int maxWordLen) {
+ request().maxWordLength(maxWordLen);
+ return this;
+ }
+
+ /**
+ * The boost factor to use when boosting terms. Defaults to <tt>1</tt>.
+ */
+ public MoreLikeThisRequestBuilder setBoostTerms(float boostTerms) {
+ request.boostTerms(boostTerms);
+ return this;
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequestBuilder setSearchSource(SearchSourceBuilder sourceBuilder) {
+ request.searchSource(sourceBuilder);
+ return this;
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequestBuilder setSearchSource(String searchSource) {
+ request.searchSource(searchSource);
+ return this;
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequestBuilder setSearchSource(Map searchSource) {
+ request.searchSource(searchSource);
+ return this;
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequestBuilder setSearchSource(XContentBuilder builder) {
+ request.searchSource(builder);
+ return this;
+ }
+
+ /**
+ * An optional search source request allowing to control the search request for the
+ * more like this documents.
+ */
+ public MoreLikeThisRequestBuilder setSearchSource(byte[] searchSource) {
+ request.searchSource(searchSource);
+ return this;
+ }
+
+ /**
+ * The search type of the mlt search query.
+ */
+ public MoreLikeThisRequestBuilder setSearchType(SearchType searchType) {
+ request.searchType(searchType);
+ return this;
+ }
+
+ /**
+ * The search type of the mlt search query.
+ */
+ public MoreLikeThisRequestBuilder setSearchType(String searchType) throws ElasticsearchIllegalArgumentException {
+ request.searchType(searchType);
+ return this;
+ }
+
+ /**
+ * The indices the resulting mlt query will run against. If not set, will run
+ * against the index the document was fetched from.
+ */
+ public MoreLikeThisRequestBuilder setSearchIndices(String... searchIndices) {
+ request.searchIndices(searchIndices);
+ return this;
+ }
+
+ /**
+ * The types the resulting mlt query will run against. If not set, will run
+ * against the type of the document fetched.
+ */
+ public MoreLikeThisRequestBuilder setSearchTypes(String... searchTypes) {
+ request.searchTypes(searchTypes);
+ return this;
+ }
+
+ /**
+ * An optional search scroll request to be able to continue and scroll the search
+ * operation.
+ */
+ public MoreLikeThisRequestBuilder setSearchScroll(Scroll searchScroll) {
+ request.searchScroll(searchScroll);
+ return this;
+ }
+
+ /**
+ * The number of documents to return, defaults to 10.
+ */
+ public MoreLikeThisRequestBuilder setSearchSize(int size) {
+ request.searchSize(size);
+ return this;
+ }
+
+ /**
+ * From which search result set to return.
+ */
+ public MoreLikeThisRequestBuilder setSearchFrom(int from) {
+ request.searchFrom(from);
+ return this;
+ }
+
+
+ @Override
+ protected void doExecute(ActionListener<SearchResponse> listener) {
+ ((Client) client).moreLikeThis(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java b/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java
new file mode 100644
index 0000000..0b0912b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java
@@ -0,0 +1,363 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.mlt;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.get.TransportGetAction;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.TransportSearchAction;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.get.GetField;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.query.BoolQueryBuilder;
+import org.elasticsearch.index.query.MoreLikeThisFieldQueryBuilder;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Set;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.client.Requests.getRequest;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+
+/**
+ * The more like this action.
+ */
+public class TransportMoreLikeThisAction extends TransportAction<MoreLikeThisRequest, SearchResponse> {
+
+ private final TransportSearchAction searchAction;
+
+ private final TransportGetAction getAction;
+
+ private final IndicesService indicesService;
+
+ private final ClusterService clusterService;
+
+ private final TransportService transportService;
+
+ @Inject
+ public TransportMoreLikeThisAction(Settings settings, ThreadPool threadPool, TransportSearchAction searchAction, TransportGetAction getAction,
+ ClusterService clusterService, IndicesService indicesService, TransportService transportService) {
+ super(settings, threadPool);
+ this.searchAction = searchAction;
+ this.getAction = getAction;
+ this.indicesService = indicesService;
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+
+ transportService.registerHandler(MoreLikeThisAction.NAME, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(final MoreLikeThisRequest request, final ActionListener<SearchResponse> listener) {
+ // update to actual index name
+ ClusterState clusterState = clusterService.state();
+ // update to the concrete index
+ final String concreteIndex = clusterState.metaData().concreteIndex(request.index());
+
+ Iterable<MutableShardRouting> routingNode = clusterState.getRoutingNodes().routingNodeIter(clusterService.localNode().getId());
+ if (routingNode == null) {
+ redirect(request, concreteIndex, listener, clusterState);
+ return;
+ }
+ boolean hasIndexLocally = false;
+ for (MutableShardRouting shardRouting : routingNode) {
+ if (concreteIndex.equals(shardRouting.index())) {
+ hasIndexLocally = true;
+ break;
+ }
+ }
+ if (!hasIndexLocally) {
+ redirect(request, concreteIndex, listener, clusterState);
+ return;
+ }
+ Set<String> getFields = newHashSet();
+ if (request.fields() != null) {
+ Collections.addAll(getFields, request.fields());
+ }
+ // add the source, in case we need to parse it to get fields
+ getFields.add(SourceFieldMapper.NAME);
+
+ GetRequest getRequest = getRequest(concreteIndex)
+ .fields(getFields.toArray(new String[getFields.size()]))
+ .type(request.type())
+ .id(request.id())
+ .routing(request.routing())
+ .listenerThreaded(true)
+ .operationThreaded(true);
+
+ request.beforeLocalFork();
+ getAction.execute(getRequest, new ActionListener<GetResponse>() {
+ @Override
+ public void onResponse(GetResponse getResponse) {
+ if (!getResponse.isExists()) {
+ listener.onFailure(new DocumentMissingException(null, request.type(), request.id()));
+ return;
+ }
+ final BoolQueryBuilder boolBuilder = boolQuery();
+ try {
+ final DocumentMapper docMapper = indicesService.indexServiceSafe(concreteIndex).mapperService().documentMapper(request.type());
+ if (docMapper == null) {
+ throw new ElasticsearchException("No DocumentMapper found for type [" + request.type() + "]");
+ }
+ final Set<String> fields = newHashSet();
+ if (request.fields() != null) {
+ for (String field : request.fields()) {
+ FieldMappers fieldMappers = docMapper.mappers().smartName(field);
+ if (fieldMappers != null) {
+ fields.add(fieldMappers.mapper().names().indexName());
+ } else {
+ fields.add(field);
+ }
+ }
+ }
+
+ if (!fields.isEmpty()) {
+ // if fields are not empty, see if we got them in the response
+ for (Iterator<String> it = fields.iterator(); it.hasNext(); ) {
+ String field = it.next();
+ GetField getField = getResponse.getField(field);
+ if (getField != null) {
+ for (Object value : getField.getValues()) {
+ addMoreLikeThis(request, boolBuilder, getField.getName(), value.toString(), true);
+ }
+ it.remove();
+ }
+ }
+ if (!fields.isEmpty()) {
+ // if we don't get all the fields in the get response, see if we can parse the source
+ parseSource(getResponse, boolBuilder, docMapper, fields, request);
+ }
+ } else {
+ // we did not ask for any fields, try and get it from the source
+ parseSource(getResponse, boolBuilder, docMapper, fields, request);
+ }
+
+ if (!boolBuilder.hasClauses()) {
+ // no field added, fail
+ listener.onFailure(new ElasticsearchException("No fields found to fetch the 'likeText' from"));
+ return;
+ }
+
+ // exclude myself
+ Term uidTerm = docMapper.uidMapper().term(request.type(), request.id());
+ boolBuilder.mustNot(termQuery(uidTerm.field(), uidTerm.text()));
+ boolBuilder.adjustPureNegative(false);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ return;
+ }
+
+ String[] searchIndices = request.searchIndices();
+ if (searchIndices == null) {
+ searchIndices = new String[]{request.index()};
+ }
+ String[] searchTypes = request.searchTypes();
+ if (searchTypes == null) {
+ searchTypes = new String[]{request.type()};
+ }
+ int size = request.searchSize() != 0 ? request.searchSize() : 10;
+ int from = request.searchFrom() != 0 ? request.searchFrom() : 0;
+ SearchRequest searchRequest = searchRequest(searchIndices)
+ .types(searchTypes)
+ .searchType(request.searchType())
+ .scroll(request.searchScroll())
+ .extraSource(searchSource()
+ .query(boolBuilder)
+ .from(from)
+ .size(size)
+ )
+ .listenerThreaded(request.listenerThreaded());
+
+ if (request.searchSource() != null) {
+ searchRequest.source(request.searchSource(), request.searchSourceUnsafe());
+ }
+ searchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ listener.onFailure(e);
+ }
+ });
+
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ listener.onFailure(e);
+ }
+ });
+ }
+
+ // Redirects the request to a data node, that has the index meta data locally available.
+ private void redirect(MoreLikeThisRequest request, String concreteIndex, final ActionListener<SearchResponse> listener, ClusterState clusterState) {
+ ShardIterator shardIterator = clusterService.operationRouting().getShards(clusterState, concreteIndex, request.type(), request.id(), request.routing(), null);
+ ShardRouting shardRouting = shardIterator.firstOrNull();
+ if (shardRouting == null) {
+ throw new ElasticsearchException("No shards for index " + request.index());
+ }
+ String nodeId = shardRouting.currentNodeId();
+ DiscoveryNode discoveryNode = clusterState.nodes().get(nodeId);
+ transportService.sendRequest(discoveryNode, MoreLikeThisAction.NAME, request, new TransportResponseHandler<SearchResponse>() {
+
+ @Override
+ public SearchResponse newInstance() {
+ return new SearchResponse();
+ }
+
+ @Override
+ public void handleResponse(SearchResponse response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+
+ private void parseSource(GetResponse getResponse, final BoolQueryBuilder boolBuilder, DocumentMapper docMapper, final Set<String> fields, final MoreLikeThisRequest request) {
+ if (getResponse.isSourceEmpty()) {
+ return;
+ }
+ docMapper.parse(SourceToParse.source(getResponse.getSourceAsBytesRef()).type(request.type()).id(request.id()), new DocumentMapper.ParseListenerAdapter() {
+ @Override
+ public boolean beforeFieldAdded(FieldMapper fieldMapper, Field field, Object parseContext) {
+ if (!field.fieldType().indexed()) {
+ return false;
+ }
+ if (fieldMapper instanceof InternalMapper) {
+ return true;
+ }
+ String value = fieldMapper.value(convertField(field)).toString();
+ if (value == null) {
+ return false;
+ }
+
+ if (fields.isEmpty() || fields.contains(field.name())) {
+ addMoreLikeThis(request, boolBuilder, fieldMapper, field, !fields.isEmpty());
+ }
+
+ return false;
+ }
+ });
+ }
+
+ private Object convertField(Field field) {
+ if (field.stringValue() != null) {
+ return field.stringValue();
+ } else if (field.binaryValue() != null) {
+ return BytesRef.deepCopyOf(field.binaryValue()).bytes;
+ } else if (field.numericValue() != null) {
+ return field.numericValue();
+ } else {
+ throw new ElasticsearchIllegalStateException("Field should have either a string, numeric or binary value");
+ }
+ }
+
+ private void addMoreLikeThis(MoreLikeThisRequest request, BoolQueryBuilder boolBuilder, FieldMapper fieldMapper, Field field, boolean failOnUnsupportedField) {
+ addMoreLikeThis(request, boolBuilder, field.name(), fieldMapper.value(convertField(field)).toString(), failOnUnsupportedField);
+ }
+
+ private void addMoreLikeThis(MoreLikeThisRequest request, BoolQueryBuilder boolBuilder, String fieldName, String likeText, boolean failOnUnsupportedField) {
+ MoreLikeThisFieldQueryBuilder mlt = moreLikeThisFieldQuery(fieldName)
+ .likeText(likeText)
+ .percentTermsToMatch(request.percentTermsToMatch())
+ .boostTerms(request.boostTerms())
+ .minDocFreq(request.minDocFreq())
+ .maxDocFreq(request.maxDocFreq())
+ .minWordLength(request.minWordLength())
+ .maxWordLen(request.maxWordLength())
+ .minTermFreq(request.minTermFreq())
+ .maxQueryTerms(request.maxQueryTerms())
+ .stopWords(request.stopWords())
+ .failOnUnsupportedField(failOnUnsupportedField);
+ boolBuilder.should(mlt);
+ }
+
+ private class TransportHandler extends BaseTransportRequestHandler<MoreLikeThisRequest> {
+
+ @Override
+ public MoreLikeThisRequest newInstance() {
+ return new MoreLikeThisRequest();
+ }
+
+ @Override
+ public void messageReceived(MoreLikeThisRequest request, final TransportChannel channel) throws Exception {
+ // no need to have a threaded listener since we just send back a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send response for get", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/mlt/package-info.java b/src/main/java/org/elasticsearch/action/mlt/package-info.java
new file mode 100644
index 0000000..8c02dde
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/mlt/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * More Like This action.
+ */
+package org.elasticsearch.action.mlt; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/percolate/MultiPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateAction.java
new file mode 100644
index 0000000..d32eac8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class MultiPercolateAction extends Action<MultiPercolateRequest, MultiPercolateResponse, MultiPercolateRequestBuilder> {
+
+ public static final MultiPercolateAction INSTANCE = new MultiPercolateAction();
+ public static final String NAME = "mpercolate";
+
+ private MultiPercolateAction() {
+ super(NAME);
+ }
+
+ @Override
+ public MultiPercolateResponse newResponse() {
+ return new MultiPercolateResponse();
+ }
+
+ @Override
+ public MultiPercolateRequestBuilder newRequestBuilder(Client client) {
+ return new MultiPercolateRequestBuilder(client);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java
new file mode 100644
index 0000000..535ee00
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java
@@ -0,0 +1,382 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ */
+public class MultiPercolateRequest extends ActionRequest<MultiPercolateRequest> {
+
+ private String[] indices;
+ private String documentType;
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+ private List<PercolateRequest> requests = Lists.newArrayList();
+
+ public MultiPercolateRequest add(PercolateRequestBuilder requestBuilder) {
+ return add(requestBuilder.request());
+ }
+
+ public MultiPercolateRequest add(PercolateRequest request) {
+ if (request.indices() == null && indices != null) {
+ request.indices(indices);
+ }
+ if (request.documentType() == null && documentType != null) {
+ request.documentType(documentType);
+ }
+ if (request.indicesOptions() == IndicesOptions.strict() && indicesOptions != IndicesOptions.strict()) {
+ request.indicesOptions(indicesOptions);
+ }
+ requests.add(request);
+ return this;
+ }
+
+ public MultiPercolateRequest add(byte[] data, int from, int length, boolean contentUnsafe) throws Exception {
+ return add(new BytesArray(data, from, length), contentUnsafe, true);
+ }
+
+ public MultiPercolateRequest add(BytesReference data, boolean contentUnsafe, boolean allowExplicitIndex) throws Exception {
+ XContent xContent = XContentFactory.xContent(data);
+ int from = 0;
+ int length = data.length();
+ byte marker = xContent.streamSeparator();
+ while (true) {
+ int nextMarker = findNextMarker(marker, from, data, length);
+ if (nextMarker == -1) {
+ break;
+ }
+ // support first line with \n
+ if (nextMarker == 0) {
+ from = nextMarker + 1;
+ continue;
+ }
+
+ PercolateRequest percolateRequest = new PercolateRequest();
+ if (indices != null) {
+ percolateRequest.indices(indices);
+ }
+ if (documentType != null) {
+ percolateRequest.documentType(documentType);
+ }
+ if (indicesOptions != IndicesOptions.strict()) {
+ percolateRequest.indicesOptions(indicesOptions);
+ }
+
+ // now parse the action
+ if (nextMarker - from > 0) {
+ XContentParser parser = xContent.createParser(data.slice(from, nextMarker - from));
+ try {
+ // Move to START_OBJECT, if token is null, its an empty data
+ XContentParser.Token token = parser.nextToken();
+ if (token != null) {
+ // Top level json object
+ assert token == XContentParser.Token.START_OBJECT;
+ token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new ElasticsearchParseException("Expected field");
+ }
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchParseException("expected start object");
+ }
+ String percolateAction = parser.currentName();
+ if ("percolate".equals(percolateAction)) {
+ parsePercolateAction(parser, percolateRequest, allowExplicitIndex);
+ } else if ("count".equals(percolateAction)) {
+ percolateRequest.onlyCount(true);
+ parsePercolateAction(parser, percolateRequest, allowExplicitIndex);
+ } else {
+ throw new ElasticsearchParseException(percolateAction + " isn't a supported percolate operation");
+ }
+ }
+ } finally {
+ parser.close();
+ }
+ }
+
+ // move pointers
+ from = nextMarker + 1;
+
+ // now for the body
+ nextMarker = findNextMarker(marker, from, data, length);
+ if (nextMarker == -1) {
+ break;
+ }
+
+ percolateRequest.source(data.slice(from, nextMarker - from), contentUnsafe);
+ // move pointers
+ from = nextMarker + 1;
+
+ add(percolateRequest);
+ }
+
+ return this;
+ }
+
+ private void parsePercolateAction(XContentParser parser, PercolateRequest percolateRequest, boolean allowExplicitIndex) throws IOException {
+ String globalIndex = indices != null && indices.length > 0 ? indices[0] : null;
+
+ Map<String, Object> header = new HashMap<String, Object>();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ header.put(currentFieldName, parser.text());
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ header.put(currentFieldName, parseArray(parser));
+ }
+ }
+
+ boolean ignoreUnavailable = IndicesOptions.strict().ignoreUnavailable();
+ boolean allowNoIndices = IndicesOptions.strict().allowNoIndices();
+ boolean expandWildcardsOpen = IndicesOptions.strict().expandWildcardsOpen();
+ boolean expandWildcardsClosed = IndicesOptions.strict().expandWildcardsClosed();
+
+ if (header.containsKey("id")) {
+ GetRequest getRequest = new GetRequest(globalIndex);
+ percolateRequest.getRequest(getRequest);
+ for (Map.Entry<String, Object> entry : header.entrySet()) {
+ Object value = entry.getValue();
+ if ("id".equals(entry.getKey())) {
+ getRequest.id((String) value);
+ header.put("id", entry.getValue());
+ } else if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) {
+ if (!allowExplicitIndex) {
+ throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed");
+ }
+ getRequest.index((String) value);
+ } else if ("type".equals(entry.getKey())) {
+ getRequest.type((String) value);
+ } else if ("preference".equals(entry.getKey())) {
+ getRequest.preference((String) value);
+ } else if ("routing".equals(entry.getKey())) {
+ getRequest.routing((String) value);
+ } else if ("percolate_index".equals(entry.getKey()) || "percolate_indices".equals(entry.getKey()) || "percolateIndex".equals(entry.getKey()) || "percolateIndices".equals(entry.getKey())) {
+ if (value instanceof String[]) {
+ percolateRequest.indices((String[]) value);
+ } else {
+ percolateRequest.indices(Strings.splitStringByCommaToArray((String) value));
+ }
+ } else if ("percolate_type".equals(entry.getKey()) || "percolateType".equals(entry.getKey())) {
+ percolateRequest.documentType((String) value);
+ } else if ("percolate_preference".equals(entry.getKey()) || "percolatePreference".equals(entry.getKey())) {
+ percolateRequest.preference((String) value);
+ } else if ("percolate_routing".equals(entry.getKey()) || "percolateRouting".equals(entry.getKey())) {
+ percolateRequest.routing((String) value);
+ } else if ("ignore_unavailable".equals(currentFieldName) || "ignoreUnavailable".equals(currentFieldName)) {
+ ignoreUnavailable = Boolean.valueOf((String) value);
+ } else if ("allow_no_indices".equals(currentFieldName) || "allowNoIndices".equals(currentFieldName)) {
+ allowNoIndices = Boolean.valueOf((String) value);
+ } else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) {
+ String[] wildcards;
+ if (value instanceof String[]) {
+ wildcards = (String[]) value;
+ } else {
+ wildcards = Strings.splitStringByCommaToArray((String) value);
+ }
+
+ for (String wildcard : wildcards) {
+ if ("open".equals(wildcard)) {
+ expandWildcardsOpen = true;
+ } else if ("closed".equals(wildcard)) {
+ expandWildcardsClosed = true;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]");
+ }
+ }
+ }
+ }
+
+ // Setting values based on get request, if needed...
+ if ((percolateRequest.indices() == null || percolateRequest.indices().length == 0) && getRequest.index() != null) {
+ percolateRequest.indices(getRequest.index());
+ }
+ if (percolateRequest.documentType() == null && getRequest.type() != null) {
+ percolateRequest.documentType(getRequest.type());
+ }
+ if (percolateRequest.routing() == null && getRequest.routing() != null) {
+ percolateRequest.routing(getRequest.routing());
+ }
+ if (percolateRequest.preference() == null && getRequest.preference() != null) {
+ percolateRequest.preference(getRequest.preference());
+ }
+ } else {
+ for (Map.Entry<String, Object> entry : header.entrySet()) {
+ Object value = entry.getValue();
+ if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) {
+ if (!allowExplicitIndex) {
+ throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed");
+ }
+ if (value instanceof String[]) {
+ percolateRequest.indices((String[]) value);
+ } else {
+ percolateRequest.indices(Strings.splitStringByCommaToArray((String) value));
+ }
+ } else if ("type".equals(entry.getKey())) {
+ percolateRequest.documentType((String) value);
+ } else if ("preference".equals(entry.getKey())) {
+ percolateRequest.preference((String) value);
+ } else if ("routing".equals(entry.getKey())) {
+ percolateRequest.routing((String) value);
+ } else if ("ignore_unavailable".equals(currentFieldName) || "ignoreUnavailable".equals(currentFieldName)) {
+ ignoreUnavailable = Boolean.valueOf((String) value);
+ } else if ("allow_no_indices".equals(currentFieldName) || "allowNoIndices".equals(currentFieldName)) {
+ allowNoIndices = Boolean.valueOf((String) value);
+ } else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) {
+ String[] wildcards;
+ if (value instanceof String[]) {
+ wildcards = (String[]) value;
+ } else {
+ wildcards = Strings.splitStringByCommaToArray((String) value);
+ }
+
+ for (String wildcard : wildcards) {
+ if ("open".equals(wildcard)) {
+ expandWildcardsOpen = true;
+ } else if ("closed".equals(wildcard)) {
+ expandWildcardsClosed = true;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]");
+ }
+ }
+ }
+ }
+ }
+ percolateRequest.indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed));
+ }
+
+ private String[] parseArray(XContentParser parser) throws IOException {
+ final List<String> list = new ArrayList<String>();
+ assert parser.currentToken() == XContentParser.Token.START_ARRAY;
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ list.add(parser.text());
+ }
+ return list.toArray(new String[list.size()]);
+ }
+
+ private int findNextMarker(byte marker, int from, BytesReference data, int length) {
+ for (int i = from; i < length; i++) {
+ if (data.get(i) == marker) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ public List<PercolateRequest> requests() {
+ return this.requests;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public MultiPercolateRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public MultiPercolateRequest indices(String... indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ public String documentType() {
+ return documentType;
+ }
+
+ public MultiPercolateRequest documentType(String type) {
+ this.documentType = type;
+ return this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (requests.isEmpty()) {
+ validationException = addValidationError("no requests added", validationException);
+ }
+ for (int i = 0; i < requests.size(); i++) {
+ ActionRequestValidationException ex = requests.get(i).validate();
+ if (ex != null) {
+ if (validationException == null) {
+ validationException = new ActionRequestValidationException();
+ }
+ validationException.addValidationErrors(ex.validationErrors());
+ }
+ }
+
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ documentType = in.readOptionalString();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ PercolateRequest request = new PercolateRequest();
+ request.readFrom(in);
+ requests.add(request);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArrayNullable(indices);
+ out.writeOptionalString(documentType);
+ indicesOptions.writeIndicesOptions(out);
+ out.writeVInt(requests.size());
+ for (PercolateRequest request : requests) {
+ request.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequestBuilder.java b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequestBuilder.java
new file mode 100644
index 0000000..64d79d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequestBuilder.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+
+/**
+ */
+public class MultiPercolateRequestBuilder extends ActionRequestBuilder<MultiPercolateRequest, MultiPercolateResponse, MultiPercolateRequestBuilder> {
+
+ public MultiPercolateRequestBuilder(Client client) {
+ super((InternalClient) client, new MultiPercolateRequest());
+ }
+
+ /**
+ * Bundles the specified percolate request to the multi percolate request.
+ */
+ public MultiPercolateRequestBuilder add(PercolateRequest percolateRequest) {
+ request.add(percolateRequest);
+ return this;
+ }
+
+ /**
+ * Bundles the specified percolate request build to the multi percolate request.
+ */
+ public MultiPercolateRequestBuilder add(PercolateRequestBuilder percolateRequestBuilder) {
+ request.add(percolateRequestBuilder);
+ return this;
+ }
+
+ /**
+ * Specifies how to globally ignore indices that are not available and how to deal with wildcard indices expressions.
+ *
+ * Invoke this method before invoking {@link #add(PercolateRequestBuilder)}.
+ */
+ public MultiPercolateRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request.indicesOptions(indicesOptions);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<MultiPercolateResponse> listener) {
+ ((Client) client).multiPercolate(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/MultiPercolateResponse.java b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateResponse.java
new file mode 100644
index 0000000..87e17eb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateResponse.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ */
+public class MultiPercolateResponse extends ActionResponse implements Iterable<MultiPercolateResponse.Item>, ToXContent {
+
+ private Item[] items;
+
+ public MultiPercolateResponse(Item[] items) {
+ this.items = items;
+ }
+
+ public MultiPercolateResponse() {
+ this.items = new Item[0];
+ }
+
+ @Override
+ public Iterator<Item> iterator() {
+ return Iterators.forArray(items);
+ }
+
+ public Item[] items() {
+ return items;
+ }
+
+ public Item[] getItems() {
+ return items;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.startArray(Fields.RESPONSES);
+ for (MultiPercolateResponse.Item item : items) {
+ if (item.isFailure()) {
+ builder.startObject();
+ builder.field(Fields.ERROR, item.getErrorMessage());
+ builder.endObject();
+ } else {
+ item.getResponse().toXContent(builder, params);
+ }
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(items.length);
+ for (Item item : items) {
+ item.writeTo(out);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ items = new Item[size];
+ for (int i = 0; i < items.length; i++) {
+ items[i] = new Item();
+ items[i].readFrom(in);
+ }
+ }
+
+ public static class Item implements Streamable {
+
+ private PercolateResponse response;
+ private String errorMessage;
+
+ public Item(PercolateResponse response) {
+ this.response = response;
+ }
+
+ public Item(String errorMessage) {
+ this.errorMessage = errorMessage;
+ }
+
+ public Item() {
+ }
+
+ public PercolateResponse response() {
+ return response;
+ }
+
+ public String errorMessage() {
+ return errorMessage;
+ }
+
+ public PercolateResponse getResponse() {
+ return response;
+ }
+
+ public String getErrorMessage() {
+ return errorMessage;
+ }
+
+ public boolean isFailure() {
+ return errorMessage != null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ response = new PercolateResponse();
+ response.readFrom(in);
+ } else {
+ errorMessage = in.readString();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (response != null) {
+ out.writeBoolean(true);
+ response.writeTo(out);
+ } else {
+ out.writeBoolean(false);
+ out.writeString(errorMessage);
+ }
+ }
+
+ }
+
+ static final class Fields {
+ static final XContentBuilderString RESPONSES = new XContentBuilderString("responses");
+ static final XContentBuilderString ERROR = new XContentBuilderString("error");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/PercolateAction.java
new file mode 100644
index 0000000..249f213
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/PercolateAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class PercolateAction extends Action<PercolateRequest, PercolateResponse, PercolateRequestBuilder> {
+
+ public static final PercolateAction INSTANCE = new PercolateAction();
+ public static final String NAME = "percolate";
+
+ private PercolateAction() {
+ super(NAME);
+ }
+
+ @Override
+ public PercolateResponse newResponse() {
+ return new PercolateResponse();
+ }
+
+ @Override
+ public PercolateRequestBuilder newRequestBuilder(Client client) {
+ return new PercolateRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java b/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java
new file mode 100644
index 0000000..548fe23
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ *
+ */
+public class PercolateRequest extends BroadcastOperationRequest<PercolateRequest> {
+
+ public static final XContentType contentType = Requests.CONTENT_TYPE;
+
+ private String documentType;
+ private String routing;
+ private String preference;
+ private GetRequest getRequest;
+ private boolean onlyCount;
+
+ private BytesReference source;
+ private boolean unsafe;
+
+ private BytesReference docSource;
+
+ // Used internally in order to compute tookInMillis, TransportBroadcastOperationAction itself doesn't allow
+ // to hold it temporarily in an easy way
+ long startTime;
+
+ public PercolateRequest() {
+ }
+
+ public PercolateRequest(PercolateRequest request, BytesReference docSource) {
+ super(request.indices());
+ operationThreading(request.operationThreading());
+ this.documentType = request.documentType();
+ this.routing = request.routing();
+ this.preference = request.preference();
+ this.source = request.source;
+ this.docSource = docSource;
+ this.onlyCount = request.onlyCount;
+ this.startTime = request.startTime;
+ }
+
+ public String documentType() {
+ return documentType;
+ }
+
+ public void documentType(String type) {
+ this.documentType = type;
+ }
+
+ public String routing() {
+ return routing;
+ }
+
+ public PercolateRequest routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ public String preference() {
+ return preference;
+ }
+
+ public PercolateRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public GetRequest getRequest() {
+ return getRequest;
+ }
+
+ public void getRequest(GetRequest getRequest) {
+ this.getRequest = getRequest;
+ }
+
+ /**
+ * Before we fork on a local thread, make sure we copy over the bytes if they are unsafe
+ */
+ @Override
+ public void beforeLocalFork() {
+ if (unsafe) {
+ source = source.copyBytesArray();
+ unsafe = false;
+ }
+ }
+
+ public BytesReference source() {
+ return source;
+ }
+
+ public PercolateRequest source(Map document) throws ElasticsearchGenerationException {
+ return source(document, contentType);
+ }
+
+ public PercolateRequest source(Map document, XContentType contentType) throws ElasticsearchGenerationException {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(document);
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + document + "]", e);
+ }
+ }
+
+ public PercolateRequest source(String document) {
+ this.source = new BytesArray(document);
+ this.unsafe = false;
+ return this;
+ }
+
+ public PercolateRequest source(XContentBuilder documentBuilder) {
+ source = documentBuilder.bytes();
+ unsafe = false;
+ return this;
+ }
+
+ public PercolateRequest source(byte[] document) {
+ return source(document, 0, document.length);
+ }
+
+ public PercolateRequest source(byte[] source, int offset, int length) {
+ return source(source, offset, length, false);
+ }
+
+ public PercolateRequest source(byte[] source, int offset, int length, boolean unsafe) {
+ return source(new BytesArray(source, offset, length), unsafe);
+ }
+
+ public PercolateRequest source(BytesReference source, boolean unsafe) {
+ this.source = source;
+ this.unsafe = unsafe;
+ return this;
+ }
+
+ public PercolateRequest source(PercolateSourceBuilder sourceBuilder) {
+ this.source = sourceBuilder.buildAsBytes(contentType);
+ this.unsafe = false;
+ return this;
+ }
+
+ public boolean onlyCount() {
+ return onlyCount;
+ }
+
+ public void onlyCount(boolean onlyCount) {
+ this.onlyCount = onlyCount;
+ }
+
+ BytesReference docSource() {
+ return docSource;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (documentType == null) {
+ validationException = addValidationError("type is missing", validationException);
+ }
+ if (source == null && getRequest == null) {
+ validationException = addValidationError("source or get is missing", validationException);
+ }
+ if (getRequest != null && getRequest.fields() != null) {
+ validationException = addValidationError("get fields option isn't supported via percolate request", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ startTime = in.readVLong();
+ documentType = in.readString();
+ routing = in.readOptionalString();
+ preference = in.readOptionalString();
+ unsafe = false;
+ source = in.readBytesReference();
+ docSource = in.readBytesReference();
+ if (in.readBoolean()) {
+ getRequest = new GetRequest(null);
+ getRequest.readFrom(in);
+ }
+ onlyCount = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVLong(startTime);
+ out.writeString(documentType);
+ out.writeOptionalString(routing);
+ out.writeOptionalString(preference);
+ out.writeBytesReference(source);
+ out.writeBytesReference(docSource);
+ if (getRequest != null) {
+ out.writeBoolean(true);
+ getRequest.writeTo(out);
+ } else {
+ out.writeBoolean(false);
+ }
+ out.writeBoolean(onlyCount);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java b/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java
new file mode 100644
index 0000000..83fbd22
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.facet.FacetBuilder;
+import org.elasticsearch.search.highlight.HighlightBuilder;
+import org.elasticsearch.search.sort.SortBuilder;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder<PercolateRequest, PercolateResponse, PercolateRequestBuilder> {
+
+ private PercolateSourceBuilder sourceBuilder;
+
+ public PercolateRequestBuilder(Client client) {
+ super((InternalClient) client, new PercolateRequest());
+ }
+
+ /**
+ * Sets the type of the document to percolate.
+ */
+ public PercolateRequestBuilder setDocumentType(String type) {
+ request.documentType(type);
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public PercolateRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * List of routing values to control the shards the search will be executed on.
+ */
+ public PercolateRequestBuilder setRouting(String... routings) {
+ request.routing(Strings.arrayToCommaDelimitedString(routings));
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public PercolateRequestBuilder setPreference(String preference) {
+ request.preference(preference);
+ return this;
+ }
+
+ /**
+ * Enables percolating an existing document. Instead of specifying the source of the document to percolate, define
+ * a get request that will fetch a document and use its source.
+ */
+ public PercolateRequestBuilder setGetRequest(GetRequest getRequest) {
+ request.getRequest(getRequest);
+ return this;
+ }
+
+ /**
+ * Whether only to return total count and don't keep track of the matches (Count percolation).
+ */
+ public PercolateRequestBuilder setOnlyCount(boolean onlyCount) {
+ request.onlyCount(onlyCount);
+ return this;
+ }
+
+ /**
+ * Limits the maximum number of percolate query matches to be returned.
+ */
+ public PercolateRequestBuilder setSize(int size) {
+ sourceBuilder().setSize(size);
+ return this;
+ }
+
+ /**
+ * Similar as {@link #setScore(boolean)}, but whether to sort by the score descending.
+ */
+ public PercolateRequestBuilder setSortByScore(boolean sort) {
+ sourceBuilder().setSort(sort);
+ return this;
+ }
+
+ /**
+ * Adds
+ */
+ public PercolateRequestBuilder addSort(SortBuilder sort) {
+ sourceBuilder().addSort(sort);
+ return this;
+ }
+
+ /**
+ * Whether to compute a score for each match and include it in the response. The score is based on
+ * {@link #setPercolateQuery(QueryBuilder)}}.
+ */
+ public PercolateRequestBuilder setScore(boolean score) {
+ sourceBuilder().setTrackScores(score);
+ return this;
+ }
+
+ /**
+ * Sets a query to reduce the number of percolate queries to be evaluated and score the queries that match based
+ * on this query.
+ */
+ public PercolateRequestBuilder setPercolateDoc(PercolateSourceBuilder.DocBuilder docBuilder) {
+ sourceBuilder().setDoc(docBuilder);
+ return this;
+ }
+
+ /**
+ * Sets a query to reduce the number of percolate queries to be evaluated and score the queries that match based
+ * on this query.
+ */
+ public PercolateRequestBuilder setPercolateQuery(QueryBuilder queryBuilder) {
+ sourceBuilder().setQueryBuilder(queryBuilder);
+ return this;
+ }
+
+ /**
+ * Sets a filter to reduce the number of percolate queries to be evaluated.
+ */
+ public PercolateRequestBuilder setPercolateFilter(FilterBuilder filterBuilder) {
+ sourceBuilder().setFilterBuilder(filterBuilder);
+ return this;
+ }
+
+ /**
+ * Enables highlighting for the percolate document. Per matched percolate query highlight the percolate document.
+ */
+ public PercolateRequestBuilder setHighlightBuilder(HighlightBuilder highlightBuilder) {
+ sourceBuilder().setHighlightBuilder(highlightBuilder);
+ return this;
+ }
+
+ /**
+ * Add a facet definition.
+ */
+ public PercolateRequestBuilder addFacet(FacetBuilder facetBuilder) {
+ sourceBuilder().addFacet(facetBuilder);
+ return this;
+ }
+
+ /**
+ * Add a aggregation definition.
+ */
+ public PercolateRequestBuilder addAggregation(AggregationBuilder aggregationBuilder) {
+ sourceBuilder().addAggregation(aggregationBuilder);
+ return this;
+ }
+
+ /**
+ * Sets the raw percolate request body.
+ */
+ public PercolateRequestBuilder setSource(PercolateSourceBuilder source) {
+ sourceBuilder = source;
+ return this;
+ }
+
+ public PercolateRequestBuilder setSource(Map<String, Object> source) {
+ request.source(source);
+ return this;
+ }
+
+ public PercolateRequestBuilder setSource(Map<String, Object> source, XContentType contentType) {
+ request.source(source, contentType);
+ return this;
+ }
+
+ public PercolateRequestBuilder setSource(String source) {
+ request.source(source);
+ return this;
+ }
+
+ public PercolateRequestBuilder setSource(XContentBuilder sourceBuilder) {
+ request.source(sourceBuilder);
+ return this;
+ }
+
+ public PercolateRequestBuilder setSource(BytesReference source) {
+ request.source(source, false);
+ return this;
+ }
+
+ public PercolateRequestBuilder setSource(BytesReference source, boolean unsafe) {
+ request.source(source, unsafe);
+ return this;
+ }
+
+ public PercolateRequestBuilder setSource(byte[] source) {
+ request.source(source);
+ return this;
+ }
+
+ public PercolateRequestBuilder setSource(byte[] source, int offset, int length) {
+ request.source(source, offset, length);
+ return this;
+ }
+
+ public PercolateRequestBuilder setSource(byte[] source, int offset, int length, boolean unsafe) {
+ request.source(source, offset, length, unsafe);
+ return this;
+ }
+
+ private PercolateSourceBuilder sourceBuilder() {
+ if (sourceBuilder == null) {
+ sourceBuilder = new PercolateSourceBuilder();
+ }
+ return sourceBuilder;
+ }
+
+ @Override
+ public PercolateRequest request() {
+ if (sourceBuilder != null) {
+ request.source(sourceBuilder);
+ }
+ return request;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<PercolateResponse> listener) {
+ if (sourceBuilder != null) {
+ request.source(sourceBuilder);
+ }
+ ((Client) client).percolate(request, listener);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java b/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java
new file mode 100644
index 0000000..c6ae1d0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java
@@ -0,0 +1,294 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.percolator.PercolatorService;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.facet.InternalFacets;
+import org.elasticsearch.search.highlight.HighlightField;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ *
+ */
+public class PercolateResponse extends BroadcastOperationResponse implements Iterable<PercolateResponse.Match>, ToXContent {
+
+ public static final Match[] EMPTY = new Match[0];
+
+ private long tookInMillis;
+ private Match[] matches;
+ private long count;
+ private InternalFacets facets;
+ private InternalAggregations aggregations;
+
+ public PercolateResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures,
+ Match[] matches, long count, long tookInMillis, InternalFacets facets, InternalAggregations aggregations) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ this.tookInMillis = tookInMillis;
+ this.matches = matches;
+ this.count = count;
+ this.facets = facets;
+ this.aggregations = aggregations;
+ }
+
+ public PercolateResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures, long tookInMillis, Match[] matches) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ this.tookInMillis = tookInMillis;
+ this.matches = matches;
+ }
+
+ PercolateResponse() {
+ }
+
+ public PercolateResponse(Match[] matches) {
+ this.matches = matches;
+ }
+
+ /**
+ * How long the percolate took.
+ */
+ public TimeValue getTook() {
+ return new TimeValue(tookInMillis);
+ }
+
+ /**
+ * How long the percolate took in milliseconds.
+ */
+ public long getTookInMillis() {
+ return tookInMillis;
+ }
+
+ /**
+ * @return The queries that match with the document being percolated. This can return <code>null</code> if th.
+ */
+ public Match[] getMatches() {
+ return this.matches;
+ }
+
+ /**
+ * @return The total number of queries that have matched with the document being percolated.
+ */
+ public long getCount() {
+ return count;
+ }
+
+ /**
+ * @return Any facet that has been executed on the query metadata. This can return <code>null</code>.
+ */
+ public InternalFacets getFacets() {
+ return facets;
+ }
+
+ /**
+ * @return Any aggregations that has been executed on the query metadata. This can return <code>null</code>.
+ */
+ public InternalAggregations getAggregations() {
+ return aggregations;
+ }
+
+ @Override
+ public Iterator<Match> iterator() {
+ return Arrays.asList(matches).iterator();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+
+ builder.field(Fields.TOOK, tookInMillis);
+ RestActions.buildBroadcastShardsHeader(builder, this);
+
+ builder.field(Fields.TOTAL, count);
+ if (matches != null) {
+ builder.startArray(Fields.MATCHES);
+ boolean justIds = "ids".equals(params.param("percolate_format"));
+ if (justIds) {
+ for (PercolateResponse.Match match : matches) {
+ builder.value(match.getId());
+ }
+ } else {
+ for (PercolateResponse.Match match : matches) {
+ builder.startObject();
+ builder.field(Fields._INDEX, match.getIndex());
+ builder.field(Fields._ID, match.getId());
+ float score = match.getScore();
+ if (score != PercolatorService.NO_SCORE) {
+ builder.field(Fields._SCORE, match.getScore());
+ }
+ if (match.getHighlightFields() != null) {
+ builder.startObject(Fields.HIGHLIGHT);
+ for (HighlightField field : match.getHighlightFields().values()) {
+ builder.field(field.name());
+ if (field.fragments() == null) {
+ builder.nullValue();
+ } else {
+ builder.startArray();
+ for (Text fragment : field.fragments()) {
+ builder.value(fragment);
+ }
+ builder.endArray();
+ }
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+ }
+ builder.endArray();
+ }
+ if (facets != null) {
+ facets.toXContent(builder, params);
+ }
+
+ if (aggregations != null) {
+ aggregations.toXContent(builder, params);
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ tookInMillis = in.readVLong();
+ count = in.readVLong();
+ int size = in.readVInt();
+ if (size != -1) {
+ matches = new Match[size];
+ for (int i = 0; i < size; i++) {
+ matches[i] = new Match();
+ matches[i].readFrom(in);
+ }
+ }
+ facets = InternalFacets.readOptionalFacets(in);
+ aggregations = InternalAggregations.readOptionalAggregations(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVLong(tookInMillis);
+ out.writeVLong(count);
+ if (matches == null) {
+ out.writeVInt(-1);
+ } else {
+ out.writeVInt(matches.length);
+ for (Match match : matches) {
+ match.writeTo(out);
+ }
+ }
+ out.writeOptionalStreamable(facets);
+ out.writeOptionalStreamable(aggregations);
+ }
+
+ public static class Match implements Streamable {
+
+ private Text index;
+ private Text id;
+ private float score;
+ private Map<String, HighlightField> hl;
+
+ public Match(Text index, Text id, float score, Map<String, HighlightField> hl) {
+ this.id = id;
+ this.score = score;
+ this.index = index;
+ this.hl = hl;
+ }
+
+ public Match(Text index, Text id, float score) {
+ this.id = id;
+ this.score = score;
+ this.index = index;
+ }
+
+ Match() {
+ }
+
+ public Text getIndex() {
+ return index;
+ }
+
+ public Text getId() {
+ return id;
+ }
+
+ public float getScore() {
+ return score;
+ }
+
+ public Map<String, HighlightField> getHighlightFields() {
+ return hl;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ id = in.readText();
+ index = in.readText();
+ score = in.readFloat();
+ int size = in.readVInt();
+ if (size > 0) {
+ hl = new HashMap<String, HighlightField>(size);
+ for (int j = 0; j < size; j++) {
+ hl.put(in.readString(), HighlightField.readHighlightField(in));
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeText(id);
+ out.writeText(index);
+ out.writeFloat(score);
+ if (hl != null) {
+ out.writeVInt(hl.size());
+ for (Map.Entry<String, HighlightField> entry : hl.entrySet()) {
+ out.writeString(entry.getKey());
+ entry.getValue().writeTo(out);
+ }
+ } else {
+ out.writeVInt(0);
+ }
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString TOOK = new XContentBuilderString("took");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString MATCHES = new XContentBuilderString("matches");
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString _SCORE = new XContentBuilderString("_score");
+ static final XContentBuilderString HIGHLIGHT = new XContentBuilderString("highlight");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateShardRequest.java b/src/main/java/org/elasticsearch/action/percolate/PercolateShardRequest.java
new file mode 100644
index 0000000..4b65515
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/PercolateShardRequest.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+
+/**
+ */
+public class PercolateShardRequest extends BroadcastShardOperationRequest {
+
+ private String documentType;
+ private BytesReference source;
+ private BytesReference docSource;
+ private boolean onlyCount;
+
+ public PercolateShardRequest() {
+ }
+
+ public PercolateShardRequest(String index, int shardId) {
+ super(index, shardId);
+ }
+
+ public PercolateShardRequest(String index, int shardId, PercolateRequest request) {
+ super(index, shardId, request);
+ this.documentType = request.documentType();
+ this.source = request.source();
+ this.docSource = request.docSource();
+ this.onlyCount = request.onlyCount();
+ }
+
+ public PercolateShardRequest(ShardId shardId, PercolateRequest request) {
+ super(shardId.index().name(), shardId.id());
+ this.documentType = request.documentType();
+ this.source = request.source();
+ this.docSource = request.docSource();
+ this.onlyCount = request.onlyCount();
+ }
+
+ public String documentType() {
+ return documentType;
+ }
+
+ public BytesReference source() {
+ return source;
+ }
+
+ public BytesReference docSource() {
+ return docSource;
+ }
+
+ public boolean onlyCount() {
+ return onlyCount;
+ }
+
+ void documentType(String documentType) {
+ this.documentType = documentType;
+ }
+
+ void source(BytesReference source) {
+ this.source = source;
+ }
+
+ void docSource(BytesReference docSource) {
+ this.docSource = docSource;
+ }
+
+ void onlyCount(boolean onlyCount) {
+ this.onlyCount = onlyCount;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ documentType = in.readString();
+ source = in.readBytesReference();
+ docSource = in.readBytesReference();
+ onlyCount = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(documentType);
+ out.writeBytesReference(source);
+ out.writeBytesReference(docSource);
+ out.writeBoolean(onlyCount);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java b/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java
new file mode 100644
index 0000000..e644685
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.percolator.PercolateContext;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.facet.InternalFacets;
+import org.elasticsearch.search.highlight.HighlightField;
+import org.elasticsearch.search.query.QuerySearchResult;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ */
+public class PercolateShardResponse extends BroadcastShardOperationResponse {
+
+ private static final BytesRef[] EMPTY_MATCHES = new BytesRef[0];
+ private static final float[] EMPTY_SCORES = new float[0];
+ private static final List<Map<String, HighlightField>> EMPTY_HL = ImmutableList.of();
+
+ private long count;
+ private float[] scores;
+ private BytesRef[] matches;
+ private List<Map<String, HighlightField>> hls;
+ private byte percolatorTypeId;
+ private int requestedSize;
+
+ private InternalFacets facets;
+ private InternalAggregations aggregations;
+
+ PercolateShardResponse() {
+ hls = new ArrayList<Map<String, HighlightField>>();
+ }
+
+ public PercolateShardResponse(BytesRef[] matches, List<Map<String, HighlightField>> hls, long count, float[] scores, PercolateContext context, String index, int shardId) {
+ super(index, shardId);
+ this.matches = matches;
+ this.hls = hls;
+ this.count = count;
+ this.scores = scores;
+ this.percolatorTypeId = context.percolatorTypeId;
+ this.requestedSize = context.size();
+ QuerySearchResult result = context.queryResult();
+ if (result != null) {
+ if (result.facets() != null) {
+ this.facets = new InternalFacets(result.facets().facets());
+ }
+ if (result.aggregations() != null) {
+ this.aggregations = (InternalAggregations) result.aggregations();
+ }
+ }
+ }
+
+ public PercolateShardResponse(BytesRef[] matches, long count, float[] scores, PercolateContext context, String index, int shardId) {
+ this(matches, EMPTY_HL, count, scores, context, index, shardId);
+ }
+
+ public PercolateShardResponse(BytesRef[] matches, List<Map<String, HighlightField>> hls, long count, PercolateContext context, String index, int shardId) {
+ this(matches, hls, count, EMPTY_SCORES, context, index, shardId);
+ }
+
+ public PercolateShardResponse(long count, PercolateContext context, String index, int shardId) {
+ this(EMPTY_MATCHES, EMPTY_HL, count, EMPTY_SCORES, context, index, shardId);
+ }
+
+ public PercolateShardResponse(PercolateContext context, String index, int shardId) {
+ this(EMPTY_MATCHES, EMPTY_HL, 0, EMPTY_SCORES, context, index, shardId);
+ }
+
+ public BytesRef[] matches() {
+ return matches;
+ }
+
+ public float[] scores() {
+ return scores;
+ }
+
+ public long count() {
+ return count;
+ }
+
+ public int requestedSize() {
+ return requestedSize;
+ }
+
+ public List<Map<String, HighlightField>> hls() {
+ return hls;
+ }
+
+ public InternalFacets facets() {
+ return facets;
+ }
+
+ public InternalAggregations aggregations() {
+ return aggregations;
+ }
+
+ public byte percolatorTypeId() {
+ return percolatorTypeId;
+ }
+
+ public boolean isEmpty() {
+ return percolatorTypeId == 0x00;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ percolatorTypeId = in.readByte();
+ requestedSize = in.readVInt();
+ count = in.readVLong();
+ matches = new BytesRef[in.readVInt()];
+ for (int i = 0; i < matches.length; i++) {
+ matches[i] = in.readBytesRef();
+ }
+ scores = new float[in.readVInt()];
+ for (int i = 0; i < scores.length; i++) {
+ scores[i] = in.readFloat();
+ }
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ int mSize = in.readVInt();
+ Map<String, HighlightField> fields = new HashMap<String, HighlightField>();
+ for (int j = 0; j < mSize; j++) {
+ fields.put(in.readString(), HighlightField.readHighlightField(in));
+ }
+ hls.add(fields);
+ }
+ facets = InternalFacets.readOptionalFacets(in);
+ aggregations = InternalAggregations.readOptionalAggregations(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(percolatorTypeId);
+ out.writeVLong(requestedSize);
+ out.writeVLong(count);
+ out.writeVInt(matches.length);
+ for (BytesRef match : matches) {
+ out.writeBytesRef(match);
+ }
+ out.writeVLong(scores.length);
+ for (float score : scores) {
+ out.writeFloat(score);
+ }
+ out.writeVInt(hls.size());
+ for (Map<String, HighlightField> hl : hls) {
+ out.writeVInt(hl.size());
+ for (Map.Entry<String, HighlightField> entry : hl.entrySet()) {
+ out.writeString(entry.getKey());
+ entry.getValue().writeTo(out);
+ }
+ }
+ out.writeOptionalStreamable(facets);
+ out.writeOptionalStreamable(aggregations);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java b/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java
new file mode 100644
index 0000000..772b35d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java
@@ -0,0 +1,295 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.percolate;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+import org.elasticsearch.search.highlight.HighlightBuilder;
+import org.elasticsearch.search.sort.ScoreSortBuilder;
+import org.elasticsearch.search.sort.SortBuilder;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Builder to create the percolate request body.
+ */
+public class PercolateSourceBuilder implements ToXContent {
+
+ private DocBuilder docBuilder;
+ private QueryBuilder queryBuilder;
+ private FilterBuilder filterBuilder;
+ private Integer size;
+ private Boolean sort;
+ private List<SortBuilder> sorts;
+ private Boolean trackScores;
+ private HighlightBuilder highlightBuilder;
+ private List<FacetBuilder> facets;
+ private List<AggregationBuilder> aggregations;
+
+ public DocBuilder percolateDocument() {
+ if (docBuilder == null) {
+ docBuilder = new DocBuilder();
+ }
+ return docBuilder;
+ }
+
+ public DocBuilder getDoc() {
+ return docBuilder;
+ }
+
+ /**
+ * Sets the document to run the percolate queries against.
+ */
+ public PercolateSourceBuilder setDoc(DocBuilder docBuilder) {
+ this.docBuilder = docBuilder;
+ return this;
+ }
+
+ public QueryBuilder getQueryBuilder() {
+ return queryBuilder;
+ }
+
+ /**
+ * Sets a query to reduce the number of percolate queries to be evaluated and score the queries that match based
+ * on this query.
+ */
+ public PercolateSourceBuilder setQueryBuilder(QueryBuilder queryBuilder) {
+ this.queryBuilder = queryBuilder;
+ return this;
+ }
+
+ public FilterBuilder getFilterBuilder() {
+ return filterBuilder;
+ }
+
+ /**
+ * Sets a filter to reduce the number of percolate queries to be evaluated.
+ */
+ public PercolateSourceBuilder setFilterBuilder(FilterBuilder filterBuilder) {
+ this.filterBuilder = filterBuilder;
+ return this;
+ }
+
+ /**
+ * Limits the maximum number of percolate query matches to be returned.
+ */
+ public PercolateSourceBuilder setSize(int size) {
+ this.size = size;
+ return this;
+ }
+
+ /**
+ * Similar as {@link #setTrackScores(boolean)}, but whether to sort by the score descending.
+ */
+ public PercolateSourceBuilder setSort(boolean sort) {
+ if (sort) {
+ addSort(new ScoreSortBuilder());
+ } else {
+ this.sorts = null;
+ }
+ return this;
+ }
+
+ /**
+ * Adds a sort builder. Only sorting by score desc is supported.
+ */
+ public PercolateSourceBuilder addSort(SortBuilder sort) {
+ if (sorts == null) {
+ sorts = Lists.newArrayList();
+ }
+ sorts.add(sort);
+ return this;
+ }
+
+ /**
+ * Whether to compute a score for each match and include it in the response. The score is based on
+ * {@link #setQueryBuilder(QueryBuilder)}.
+ */
+ public PercolateSourceBuilder setTrackScores(boolean trackScores) {
+ this.trackScores = trackScores;
+ return this;
+ }
+
+ /**
+ * Enables highlighting for the percolate document. Per matched percolate query highlight the percolate document.
+ */
+ public PercolateSourceBuilder setHighlightBuilder(HighlightBuilder highlightBuilder) {
+ this.highlightBuilder = highlightBuilder;
+ return this;
+ }
+
+ /**
+ * Add a facet definition.
+ */
+ public PercolateSourceBuilder addFacet(FacetBuilder facetBuilder) {
+ if (facets == null) {
+ facets = Lists.newArrayList();
+ }
+ facets.add(facetBuilder);
+ return this;
+ }
+
+ /**
+ * Add an aggregationB definition.
+ */
+ public PercolateSourceBuilder addAggregation(AggregationBuilder aggregationBuilder) {
+ if (aggregations == null) {
+ aggregations = Lists.newArrayList();
+ }
+ aggregations.add(aggregationBuilder);
+ return this;
+ }
+
+ public BytesReference buildAsBytes(XContentType contentType) throws SearchSourceBuilderException {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ toXContent(builder, ToXContent.EMPTY_PARAMS);
+ return builder.bytes();
+ } catch (Exception e) {
+ throw new SearchSourceBuilderException("Failed to build search source", e);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (docBuilder != null) {
+ docBuilder.toXContent(builder, params);
+ }
+ if (queryBuilder != null) {
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ }
+ if (filterBuilder != null) {
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ }
+ if (size != null) {
+ builder.field("size", size);
+ }
+ if (sorts != null) {
+ builder.startArray("sort");
+ for (SortBuilder sort : sorts) {
+ builder.startObject();
+ sort.toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+ if (trackScores != null) {
+ builder.field("track_scores", trackScores);
+ }
+ if (highlightBuilder != null) {
+ highlightBuilder.toXContent(builder, params);
+ }
+ if (facets != null) {
+ builder.field("facets");
+ builder.startObject();
+ for (FacetBuilder facet : facets) {
+ facet.toXContent(builder, params);
+ }
+ builder.endObject();
+ }
+ if (aggregations != null) {
+ builder.field("aggregations");
+ builder.startObject();
+ for (AbstractAggregationBuilder aggregation : aggregations) {
+ aggregation.toXContent(builder, params);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ public static DocBuilder docBuilder() {
+ return new DocBuilder();
+ }
+
+ public static class DocBuilder implements ToXContent {
+
+ private BytesReference doc;
+
+ public DocBuilder setDoc(BytesReference doc) {
+ this.doc = doc;
+ return this;
+ }
+
+ public DocBuilder setDoc(String field, Object value) {
+ Map<String, Object> values = new HashMap<String, Object>(2);
+ values.put(field, value);
+ setDoc(values);
+ return this;
+ }
+
+ public DocBuilder setDoc(String doc) {
+ this.doc = new BytesArray(doc);
+ return this;
+ }
+
+ public DocBuilder setDoc(XContentBuilder doc) {
+ this.doc = doc.bytes();
+ return this;
+ }
+
+ public DocBuilder setDoc(Map doc) {
+ return setDoc(doc, PercolateRequest.contentType);
+ }
+
+ public DocBuilder setDoc(Map doc, XContentType contentType) {
+ try {
+ return setDoc(XContentFactory.contentBuilder(contentType).map(doc));
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + doc + "]", e);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ XContentType contentType = XContentFactory.xContentType(doc);
+ if (contentType == builder.contentType()) {
+ builder.rawField("doc", doc);
+ } else {
+ XContentParser parser = XContentFactory.xContent(contentType).createParser(doc);
+ try {
+ parser.nextToken();
+ builder.field("doc");
+ builder.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+ return builder;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java
new file mode 100644
index 0000000..e58e5f4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java
@@ -0,0 +1,363 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.percolate;
+
+import com.carrotsearch.hppc.IntArrayList;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.UnavailableShardsException;
+import org.elasticsearch.action.get.*;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.percolator.PercolatorService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ */
+public class TransportMultiPercolateAction extends TransportAction<MultiPercolateRequest, MultiPercolateResponse> {
+
+ private final ClusterService clusterService;
+ private final PercolatorService percolatorService;
+
+ private final TransportMultiGetAction multiGetAction;
+ private final TransportShardMultiPercolateAction shardMultiPercolateAction;
+
+ @Inject
+ public TransportMultiPercolateAction(Settings settings, ThreadPool threadPool, TransportShardMultiPercolateAction shardMultiPercolateAction,
+ ClusterService clusterService, TransportService transportService, PercolatorService percolatorService,
+ TransportMultiGetAction multiGetAction) {
+ super(settings, threadPool);
+ this.shardMultiPercolateAction = shardMultiPercolateAction;
+ this.clusterService = clusterService;
+ this.percolatorService = percolatorService;
+ this.multiGetAction = multiGetAction;
+
+ transportService.registerHandler(MultiPercolateAction.NAME, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(final MultiPercolateRequest request, final ActionListener<MultiPercolateResponse> listener) {
+ final ClusterState clusterState = clusterService.state();
+ clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
+
+ final List<Object> percolateRequests = new ArrayList<Object>(request.requests().size());
+ // Can have a mixture of percolate requests. (normal percolate requests & percolate existing doc),
+ // so we need to keep track for what percolate request we had a get request
+ final IntArrayList getRequestSlots = new IntArrayList();
+ List<GetRequest> existingDocsRequests = new ArrayList<GetRequest>();
+ for (int slot = 0; slot < request.requests().size(); slot++) {
+ PercolateRequest percolateRequest = request.requests().get(slot);
+ percolateRequest.startTime = System.currentTimeMillis();
+ percolateRequests.add(percolateRequest);
+ if (percolateRequest.getRequest() != null) {
+ existingDocsRequests.add(percolateRequest.getRequest());
+ getRequestSlots.add(slot);
+ }
+ }
+
+ if (!existingDocsRequests.isEmpty()) {
+ final MultiGetRequest multiGetRequest = new MultiGetRequest();
+ for (GetRequest getRequest : existingDocsRequests) {
+ multiGetRequest.add(
+ new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id())
+ .routing(getRequest.routing())
+ );
+ }
+
+ multiGetAction.execute(multiGetRequest, new ActionListener<MultiGetResponse>() {
+
+ @Override
+ public void onResponse(MultiGetResponse multiGetItemResponses) {
+ for (int i = 0; i < multiGetItemResponses.getResponses().length; i++) {
+ MultiGetItemResponse itemResponse = multiGetItemResponses.getResponses()[i];
+ int slot = getRequestSlots.get(i);
+ if (!itemResponse.isFailed()) {
+ GetResponse getResponse = itemResponse.getResponse();
+ if (getResponse.isExists()) {
+ PercolateRequest originalRequest = (PercolateRequest) percolateRequests.get(slot);
+ percolateRequests.set(slot, new PercolateRequest(originalRequest, getResponse.getSourceAsBytesRef()));
+ } else {
+ logger.trace("mpercolate existing doc, item[{}] doesn't exist", slot);
+ percolateRequests.set(slot, new DocumentMissingException(null, getResponse.getType(), getResponse.getId()));
+ }
+ } else {
+ logger.trace("mpercolate existing doc, item[{}] failure {}", slot, itemResponse.getFailure());
+ percolateRequests.set(slot, itemResponse.getFailure());
+ }
+ }
+ new ASyncAction(percolateRequests, listener, clusterState).run();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ listener.onFailure(e);
+ }
+ });
+ } else {
+ new ASyncAction(percolateRequests, listener, clusterState).run();
+ }
+
+ }
+
+
+ private class ASyncAction {
+
+ final ActionListener<MultiPercolateResponse> finalListener;
+ final Map<ShardId, TransportShardMultiPercolateAction.Request> requestsByShard;
+ final List<Object> percolateRequests;
+
+ final Map<ShardId, IntArrayList> shardToSlots;
+ final AtomicInteger expectedOperations;
+ final AtomicArray<Object> reducedResponses;
+ final AtomicReferenceArray<AtomicInteger> expectedOperationsPerItem;
+ final AtomicReferenceArray<AtomicReferenceArray> responsesByItemAndShard;
+
+ ASyncAction(List<Object> percolateRequests, ActionListener<MultiPercolateResponse> finalListener, ClusterState clusterState) {
+ this.finalListener = finalListener;
+ this.percolateRequests = percolateRequests;
+ responsesByItemAndShard = new AtomicReferenceArray<AtomicReferenceArray>(percolateRequests.size());
+ expectedOperationsPerItem = new AtomicReferenceArray<AtomicInteger>(percolateRequests.size());
+ reducedResponses = new AtomicArray<Object>(percolateRequests.size());
+
+ // Resolving concrete indices and routing and grouping the requests by shard
+ requestsByShard = new HashMap<ShardId, TransportShardMultiPercolateAction.Request>();
+ // Keep track what slots belong to what shard, in case a request to a shard fails on all copies
+ shardToSlots = new HashMap<ShardId, IntArrayList>();
+ int expectedResults = 0;
+ for (int slot = 0; slot < percolateRequests.size(); slot++) {
+ Object element = percolateRequests.get(slot);
+ assert element != null;
+ if (element instanceof PercolateRequest) {
+ PercolateRequest percolateRequest = (PercolateRequest) element;
+ String[] concreteIndices;
+ try {
+ concreteIndices = clusterState.metaData().concreteIndices(percolateRequest.indices(), percolateRequest.indicesOptions());
+ } catch (IndexMissingException e) {
+ reducedResponses.set(slot, e);
+ responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
+ expectedOperationsPerItem.set(slot, new AtomicInteger(0));
+ continue;
+ }
+ Map<String, Set<String>> routing = clusterState.metaData().resolveSearchRouting(percolateRequest.routing(), percolateRequest.indices());
+ // TODO: I only need shardIds, ShardIterator(ShardRouting) is only needed in TransportShardMultiPercolateAction
+ GroupShardsIterator shards = clusterService.operationRouting().searchShards(
+ clusterState, percolateRequest.indices(), concreteIndices, routing, percolateRequest.preference()
+ );
+ if (shards.size() == 0) {
+ reducedResponses.set(slot, new UnavailableShardsException(null, "No shards available"));
+ responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
+ expectedOperationsPerItem.set(slot, new AtomicInteger(0));
+ continue;
+ }
+
+ responsesByItemAndShard.set(slot, new AtomicReferenceArray(shards.size()));
+ expectedOperationsPerItem.set(slot, new AtomicInteger(shards.size()));
+ for (ShardIterator shard : shards) {
+ ShardId shardId = shard.shardId();
+ TransportShardMultiPercolateAction.Request requests = requestsByShard.get(shardId);
+ if (requests == null) {
+ requestsByShard.put(shardId, requests = new TransportShardMultiPercolateAction.Request(shard.shardId().getIndex(), shardId.id(), percolateRequest.preference()));
+ }
+ logger.trace("Adding shard[{}] percolate request for item[{}]", shardId, slot);
+ requests.add(new TransportShardMultiPercolateAction.Request.Item(slot, new PercolateShardRequest(shardId, percolateRequest)));
+
+ IntArrayList items = shardToSlots.get(shardId);
+ if (items == null) {
+ shardToSlots.put(shardId, items = new IntArrayList());
+ }
+ items.add(slot);
+ }
+ expectedResults++;
+ } else if (element instanceof Throwable || element instanceof MultiGetResponse.Failure) {
+ logger.trace("item[{}] won't be executed, reason: {}", slot, element);
+ reducedResponses.set(slot, element);
+ responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
+ expectedOperationsPerItem.set(slot, new AtomicInteger(0));
+ }
+ }
+ expectedOperations = new AtomicInteger(expectedResults);
+ }
+
+ void run() {
+ if (expectedOperations.get() == 0) {
+ finish();
+ return;
+ }
+
+ logger.trace("mpercolate executing for shards {}", requestsByShard.keySet());
+ for (Map.Entry<ShardId, TransportShardMultiPercolateAction.Request> entry : requestsByShard.entrySet()) {
+ final ShardId shardId = entry.getKey();
+ TransportShardMultiPercolateAction.Request shardRequest = entry.getValue();
+ shardMultiPercolateAction.execute(shardRequest, new ActionListener<TransportShardMultiPercolateAction.Response>() {
+
+ @Override
+ public void onResponse(TransportShardMultiPercolateAction.Response response) {
+ onShardResponse(shardId, response);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ onShardFailure(shardId, e);
+ }
+
+ });
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ void onShardResponse(ShardId shardId, TransportShardMultiPercolateAction.Response response) {
+ logger.debug("{} Percolate shard response", shardId);
+ try {
+ for (TransportShardMultiPercolateAction.Response.Item item : response.items()) {
+ AtomicReferenceArray shardResults = responsesByItemAndShard.get(item.slot());
+ if (shardResults == null) {
+ assert false : "shardResults can't be null";
+ continue;
+ }
+
+ if (item.failed()) {
+ shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, item.error().string()));
+ } else {
+ shardResults.set(shardId.id(), item.response());
+ }
+
+ assert expectedOperationsPerItem.get(item.slot()).get() >= 1 : "slot[" + item.slot() + "] can't be lower than one";
+ if (expectedOperationsPerItem.get(item.slot()).decrementAndGet() == 0) {
+ // Failure won't bubble up, since we fail the whole request now via the catch clause below,
+ // so expectedOperationsPerItem will not be decremented twice.
+ reduce(item.slot());
+ }
+ }
+ } catch (Throwable e) {
+ logger.error("{} Percolate original reduce error", e, shardId);
+ finalListener.onFailure(e);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ void onShardFailure(ShardId shardId, Throwable e) {
+ logger.debug("{} Shard multi percolate failure", e, shardId);
+ try {
+ IntArrayList slots = shardToSlots.get(shardId);
+ for (int i = 0; i < slots.size(); i++) {
+ int slot = slots.get(i);
+ AtomicReferenceArray shardResults = responsesByItemAndShard.get(slot);
+ if (shardResults == null) {
+ continue;
+ }
+
+ shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, e));
+ assert expectedOperationsPerItem.get(slot).get() >= 1 : "slot[" + slot + "] can't be lower than one. Caused by: " + e.getMessage();
+ if (expectedOperationsPerItem.get(slot).decrementAndGet() == 0) {
+ reduce(slot);
+ }
+ }
+ } catch (Throwable t) {
+ logger.error("{} Percolate original reduce error, original error {}", t, shardId, e);
+ finalListener.onFailure(t);
+ }
+ }
+
+ void reduce(int slot) {
+ AtomicReferenceArray shardResponses = responsesByItemAndShard.get(slot);
+ PercolateResponse reducedResponse = TransportPercolateAction.reduce((PercolateRequest) percolateRequests.get(slot), shardResponses, percolatorService);
+ reducedResponses.set(slot, reducedResponse);
+ assert expectedOperations.get() >= 1 : "slot[" + slot + "] expected options should be >= 1 but is " + expectedOperations.get();
+ if (expectedOperations.decrementAndGet() == 0) {
+ finish();
+ }
+ }
+
+ void finish() {
+ MultiPercolateResponse.Item[] finalResponse = new MultiPercolateResponse.Item[reducedResponses.length()];
+ for (int slot = 0; slot < reducedResponses.length(); slot++) {
+ Object element = reducedResponses.get(slot);
+ assert element != null : "Element[" + slot + "] shouldn't be null";
+ if (element instanceof PercolateResponse) {
+ finalResponse[slot] = new MultiPercolateResponse.Item((PercolateResponse) element);
+ } else if (element instanceof Throwable) {
+ finalResponse[slot] = new MultiPercolateResponse.Item(ExceptionsHelper.detailedMessage((Throwable) element));
+ } else if (element instanceof MultiGetResponse.Failure) {
+ finalResponse[slot] = new MultiPercolateResponse.Item(((MultiGetResponse.Failure)element).getMessage());
+ }
+ }
+ finalListener.onResponse(new MultiPercolateResponse(finalResponse));
+ }
+
+ }
+
+
+ class TransportHandler extends BaseTransportRequestHandler<MultiPercolateRequest> {
+
+ @Override
+ public MultiPercolateRequest newInstance() {
+ return new MultiPercolateRequest();
+ }
+
+ @Override
+ public void messageReceived(final MultiPercolateRequest request, final TransportChannel channel) throws Exception {
+ // no need to use threaded listener, since we just send a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<MultiPercolateResponse>() {
+ @Override
+ public void onResponse(MultiPercolateResponse response) {
+ try {
+ channel.sendResponse(response);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send error response for action [mpercolate] and request [" + request + "]", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java
new file mode 100644
index 0000000..f8bc377
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.get.TransportGetAction;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.percolator.PercolateException;
+import org.elasticsearch.percolator.PercolatorService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class TransportPercolateAction extends TransportBroadcastOperationAction<PercolateRequest, PercolateResponse, PercolateShardRequest, PercolateShardResponse> {
+
+ private final PercolatorService percolatorService;
+ private final TransportGetAction getAction;
+
+ @Inject
+ public TransportPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ TransportService transportService, PercolatorService percolatorService,
+ TransportGetAction getAction) {
+ super(settings, threadPool, clusterService, transportService);
+ this.percolatorService = percolatorService;
+ this.getAction = getAction;
+ }
+
+ @Override
+ protected void doExecute(final PercolateRequest request, final ActionListener<PercolateResponse> listener) {
+ request.startTime = System.currentTimeMillis();
+ if (request.getRequest() != null) {
+ getAction.execute(request.getRequest(), new ActionListener<GetResponse>() {
+ @Override
+ public void onResponse(GetResponse getResponse) {
+ if (!getResponse.isExists()) {
+ onFailure(new DocumentMissingException(null, request.getRequest().type(), request.getRequest().id()));
+ return;
+ }
+
+ BytesReference docSource = getResponse.getSourceAsBytesRef();
+ TransportPercolateAction.super.doExecute(new PercolateRequest(request, docSource), listener);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ listener.onFailure(e);
+ }
+ });
+ } else {
+ super.doExecute(request, listener);
+ }
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.PERCOLATE;
+ }
+
+ @Override
+ protected PercolateRequest newRequest() {
+ return new PercolateRequest();
+ }
+
+ @Override
+ protected String transportAction() {
+ return PercolateAction.NAME;
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, PercolateRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, PercolateRequest request, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
+ }
+
+ @Override
+ protected PercolateResponse newResponse(PercolateRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ return reduce(request, shardsResponses, percolatorService);
+ }
+
+ public static PercolateResponse reduce(PercolateRequest request, AtomicReferenceArray shardsResponses, PercolatorService percolatorService) {
+ int successfulShards = 0;
+ int failedShards = 0;
+
+ List<PercolateShardResponse> shardResults = null;
+ List<ShardOperationFailedException> shardFailures = null;
+
+ byte percolatorTypeId = 0x00;
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // simply ignore non active shards
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ PercolateShardResponse percolateShardResponse = (PercolateShardResponse) shardResponse;
+ successfulShards++;
+ if (!percolateShardResponse.isEmpty()) {
+ if (shardResults == null) {
+ percolatorTypeId = percolateShardResponse.percolatorTypeId();
+ shardResults = newArrayList();
+ }
+ shardResults.add(percolateShardResponse);
+ }
+ }
+ }
+
+ if (shardResults == null) {
+ long tookInMillis = System.currentTimeMillis() - request.startTime;
+ PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY;
+ return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches);
+ } else {
+ PercolatorService.ReduceResult result = percolatorService.reduce(percolatorTypeId, shardResults);
+ long tookInMillis = System.currentTimeMillis() - request.startTime;
+ return new PercolateResponse(
+ shardsResponses.length(), successfulShards, failedShards, shardFailures,
+ result.matches(), result.count(), tookInMillis, result.reducedFacets(), result.reducedAggregations()
+ );
+ }
+ }
+
+ @Override
+ protected PercolateShardRequest newShardRequest() {
+ return new PercolateShardRequest();
+ }
+
+ @Override
+ protected PercolateShardRequest newShardRequest(ShardRouting shard, PercolateRequest request) {
+ return new PercolateShardRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected PercolateShardResponse newShardResponse() {
+ return new PercolateShardResponse();
+ }
+
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, PercolateRequest request, String[] concreteIndices) {
+ Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
+ return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference());
+ }
+
+ @Override
+ protected PercolateShardResponse shardOperation(PercolateShardRequest request) throws ElasticsearchException {
+ try {
+ return percolatorService.percolate(request);
+ } catch (Throwable e) {
+ logger.trace("[{}][{}] failed to percolate", e, request.index(), request.shardId());
+ ShardId shardId = new ShardId(request.index(), request.shardId());
+ throw new PercolateException(shardId, "failed to percolate", e);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java
new file mode 100644
index 0000000..4243862
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.support.TransportActions;
+import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;
+import org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.percolator.PercolatorService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ */
+public class TransportShardMultiPercolateAction extends TransportShardSingleOperationAction<TransportShardMultiPercolateAction.Request, TransportShardMultiPercolateAction.Response> {
+
+ private final PercolatorService percolatorService;
+
+ @Inject
+ public TransportShardMultiPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, PercolatorService percolatorService) {
+ super(settings, threadPool, clusterService, transportService);
+ this.percolatorService = percolatorService;
+ }
+
+ @Override
+ protected String transportAction() {
+ return "mpercolate/shard";
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.PERCOLATE;
+ }
+
+ @Override
+ protected Request newRequest() {
+ return new Request();
+ }
+
+ @Override
+ protected Response newResponse() {
+ return new Response();
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, Request request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, Request request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.READ, request.index());
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState state, Request request) throws ElasticsearchException {
+ return clusterService.operationRouting().getShards(
+ clusterService.state(), request.index(), request.shardId(), request.preference
+ );
+ }
+
+ @Override
+ protected Response shardOperation(Request request, int shardId) throws ElasticsearchException {
+ // TODO: Look into combining the shard req's docs into one in memory index.
+ Response response = new Response();
+ response.items = new ArrayList<Response.Item>(request.items.size());
+ for (Request.Item item : request.items) {
+ Response.Item responseItem;
+ int slot = item.slot;
+ try {
+ responseItem = new Response.Item(slot, percolatorService.percolate(item.request));
+ } catch (Throwable t) {
+ if (TransportActions.isShardNotAvailableException(t)) {
+ throw (ElasticsearchException) t;
+ } else {
+ logger.debug("[{}][{}] failed to multi percolate", t, request.index(), request.shardId());
+ responseItem = new Response.Item(slot, new StringText(ExceptionsHelper.detailedMessage(t)));
+ }
+ }
+ response.items.add(responseItem);
+ }
+ return response;
+ }
+
+
+ public static class Request extends SingleShardOperationRequest {
+
+ private int shardId;
+ private String preference;
+ private List<Item> items;
+
+ public Request() {
+ }
+
+ public Request(String concreteIndex, int shardId, String preference) {
+ this.index = concreteIndex;
+ this.shardId = shardId;
+ this.preference = preference;
+ this.items = new ArrayList<Item>();
+ }
+
+ public int shardId() {
+ return shardId;
+ }
+
+ public void add(Item item) {
+ items.add(item);
+ }
+
+ public List<Item> items() {
+ return items;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = in.readVInt();
+ preference = in.readOptionalString();
+ int size = in.readVInt();
+ items = new ArrayList<Item>(size);
+ for (int i = 0; i < size; i++) {
+ int slot = in.readVInt();
+ PercolateShardRequest shardRequest = new PercolateShardRequest(index(), shardId);
+ shardRequest.documentType(in.readString());
+ shardRequest.source(in.readBytesReference());
+ shardRequest.docSource(in.readBytesReference());
+ shardRequest.onlyCount(in.readBoolean());
+ Item item = new Item(slot, shardRequest);
+ items.add(item);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(shardId);
+ out.writeOptionalString(preference);
+ out.writeVInt(items.size());
+ for (Item item : items) {
+ out.writeVInt(item.slot);
+ out.writeString(item.request.documentType());
+ out.writeBytesReference(item.request.source());
+ out.writeBytesReference(item.request.docSource());
+ out.writeBoolean(item.request.onlyCount());
+ }
+ }
+
+ public static class Item {
+
+ private final int slot;
+ private final PercolateShardRequest request;
+
+ public Item(int slot, PercolateShardRequest request) {
+ this.slot = slot;
+ this.request = request;
+ }
+
+ public int slot() {
+ return slot;
+ }
+
+ public PercolateShardRequest request() {
+ return request;
+ }
+
+ }
+
+ }
+
+ public static class Response extends ActionResponse {
+
+ private List<Item> items;
+
+ public List<Item> items() {
+ return items;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(items.size());
+ for (Item item : items) {
+ out.writeVInt(item.slot);
+ if (item.response != null) {
+ out.writeBoolean(true);
+ item.response.writeTo(out);
+ } else {
+ out.writeBoolean(false);
+ out.writeText(item.error);
+ }
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ items = new ArrayList<Item>(size);
+ for (int i = 0; i < size; i++) {
+ int slot = in.readVInt();
+ if (in.readBoolean()) {
+ PercolateShardResponse shardResponse = new PercolateShardResponse();
+ shardResponse.readFrom(in);
+ items.add(new Item(slot, shardResponse));
+ } else {
+ items.add(new Item(slot, in.readText()));
+ }
+ }
+ }
+
+ public static class Item {
+
+ private final int slot;
+ private final PercolateShardResponse response;
+ private final Text error;
+
+ public Item(Integer slot, PercolateShardResponse response) {
+ this.slot = slot;
+ this.response = response;
+ this.error = null;
+ }
+
+ public Item(Integer slot, Text error) {
+ this.slot = slot;
+ this.error = error;
+ this.response = null;
+ }
+
+ public int slot() {
+ return slot;
+ }
+
+ public PercolateShardResponse response() {
+ return response;
+ }
+
+ public Text error() {
+ return error;
+ }
+
+ public boolean failed() {
+ return error != null;
+ }
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java b/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java
new file mode 100644
index 0000000..762447e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class ClearScrollAction extends Action<ClearScrollRequest, ClearScrollResponse, ClearScrollRequestBuilder> {
+
+ public static final ClearScrollAction INSTANCE = new ClearScrollAction();
+ public static final String NAME = "clear_sc";
+
+ private ClearScrollAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ClearScrollResponse newResponse() {
+ return new ClearScrollResponse();
+ }
+
+ @Override
+ public ClearScrollRequestBuilder newRequestBuilder(Client client) {
+ return new ClearScrollRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java b/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java
new file mode 100644
index 0000000..c6b6b31
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ */
+public class ClearScrollRequest extends ActionRequest<ClearScrollRequest> {
+
+ private List<String> scrollIds;
+
+ public List<String> getScrollIds() {
+ return scrollIds;
+ }
+
+ public void setScrollIds(List<String> scrollIds) {
+ this.scrollIds = scrollIds;
+ }
+
+ public void addScrollId(String scrollId) {
+ if (scrollIds == null) {
+ scrollIds = newArrayList();
+ }
+ scrollIds.add(scrollId);
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (scrollIds == null || scrollIds.isEmpty()) {
+ validationException = addValidationError("no scroll ids specified", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ scrollIds = Arrays.asList(in.readStringArray());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (scrollIds == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeStringArray(scrollIds.toArray(new String[scrollIds.size()]));
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java
new file mode 100644
index 0000000..35dfbe2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+
+import java.util.List;
+
+/**
+ */
+public class ClearScrollRequestBuilder extends ActionRequestBuilder<ClearScrollRequest, ClearScrollResponse, ClearScrollRequestBuilder> {
+
+ public ClearScrollRequestBuilder(Client client) {
+ super((InternalClient) client, new ClearScrollRequest());
+ }
+
+ public ClearScrollRequestBuilder setScrollIds(List<String> cursorIds) {
+ request.setScrollIds(cursorIds);
+ return this;
+ }
+
+ public ClearScrollRequestBuilder addScrollId(String cursorId) {
+ request.addScrollId(cursorId);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<ClearScrollResponse> listener) {
+ ((Client) client).clearScroll(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java b/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java
new file mode 100644
index 0000000..f3ec450
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class ClearScrollResponse extends ActionResponse {
+
+ private boolean succeeded;
+
+ public ClearScrollResponse(boolean succeeded) {
+ this.succeeded = succeeded;
+ }
+
+ ClearScrollResponse() {
+ }
+
+ public boolean isSucceeded() {
+ return succeeded;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ succeeded = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(succeeded);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java b/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java
new file mode 100644
index 0000000..007cc6f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class MultiSearchAction extends Action<MultiSearchRequest, MultiSearchResponse, MultiSearchRequestBuilder> {
+
+ public static final MultiSearchAction INSTANCE = new MultiSearchAction();
+ public static final String NAME = "msearch";
+
+ private MultiSearchAction() {
+ super(NAME);
+ }
+
+ @Override
+ public MultiSearchResponse newResponse() {
+ return new MultiSearchResponse();
+ }
+
+ @Override
+ public MultiSearchRequestBuilder newRequestBuilder(Client client) {
+ return new MultiSearchRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java
new file mode 100644
index 0000000..7c79117
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java
@@ -0,0 +1,276 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A multi search API request.
+ */
+public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> {
+
+ private List<SearchRequest> requests = Lists.newArrayList();
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ /**
+ * Add a search request to execute. Note, the order is important, the search response will be returned in the
+ * same order as the search requests.
+ */
+ public MultiSearchRequest add(SearchRequestBuilder request) {
+ requests.add(request.request());
+ return this;
+ }
+
+ /**
+ * Add a search request to execute. Note, the order is important, the search response will be returned in the
+ * same order as the search requests.
+ */
+ public MultiSearchRequest add(SearchRequest request) {
+ requests.add(request);
+ return this;
+ }
+
+ public MultiSearchRequest add(byte[] data, int from, int length, boolean contentUnsafe,
+ @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType) throws Exception {
+ return add(new BytesArray(data, from, length), contentUnsafe, indices, types, searchType, null, IndicesOptions.strict(), true);
+ }
+
+ public MultiSearchRequest add(BytesReference data, boolean contentUnsafe, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, IndicesOptions indicesOptions) throws Exception {
+ return add(data, contentUnsafe, indices, types, searchType, null, indicesOptions, true);
+ }
+
+ public MultiSearchRequest add(BytesReference data, boolean contentUnsafe, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex) throws Exception {
+ XContent xContent = XContentFactory.xContent(data);
+ int from = 0;
+ int length = data.length();
+ byte marker = xContent.streamSeparator();
+ while (true) {
+ int nextMarker = findNextMarker(marker, from, data, length);
+ if (nextMarker == -1) {
+ break;
+ }
+ // support first line with \n
+ if (nextMarker == 0) {
+ from = nextMarker + 1;
+ continue;
+ }
+
+ SearchRequest searchRequest = new SearchRequest();
+ if (indices != null) {
+ searchRequest.indices(indices);
+ }
+ if (indicesOptions != null) {
+ searchRequest.indicesOptions(indicesOptions);
+ }
+ if (types != null && types.length > 0) {
+ searchRequest.types(types);
+ }
+ if (routing != null) {
+ searchRequest.routing(routing);
+ }
+ searchRequest.searchType(searchType);
+
+ boolean ignoreUnavailable = IndicesOptions.strict().ignoreUnavailable();
+ boolean allowNoIndices = IndicesOptions.strict().allowNoIndices();
+ boolean expandWildcardsOpen = IndicesOptions.strict().expandWildcardsOpen();
+ boolean expandWildcardsClosed = IndicesOptions.strict().expandWildcardsClosed();
+
+ // now parse the action
+ if (nextMarker - from > 0) {
+ XContentParser parser = xContent.createParser(data.slice(from, nextMarker - from));
+ try {
+ // Move to START_OBJECT, if token is null, its an empty data
+ XContentParser.Token token = parser.nextToken();
+ if (token != null) {
+ assert token == XContentParser.Token.START_OBJECT;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName) || "indices".equals(currentFieldName)) {
+ if (!allowExplicitIndex) {
+ throw new ElasticsearchIllegalArgumentException("explicit index in multi search is not allowed");
+ }
+ searchRequest.indices(Strings.splitStringByCommaToArray(parser.text()));
+ } else if ("type".equals(currentFieldName) || "types".equals(currentFieldName)) {
+ searchRequest.types(Strings.splitStringByCommaToArray(parser.text()));
+ } else if ("search_type".equals(currentFieldName) || "searchType".equals(currentFieldName)) {
+ searchRequest.searchType(parser.text());
+ } else if ("preference".equals(currentFieldName)) {
+ searchRequest.preference(parser.text());
+ } else if ("routing".equals(currentFieldName)) {
+ searchRequest.routing(parser.text());
+ } else if ("ignore_unavailable".equals(currentFieldName) || "ignoreUnavailable".equals(currentFieldName)) {
+ ignoreUnavailable = parser.booleanValue();
+ } else if ("allow_no_indices".equals(currentFieldName) || "allowNoIndices".equals(currentFieldName)) {
+ allowNoIndices = parser.booleanValue();
+ } else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) {
+ String[] wildcards = Strings.splitStringByCommaToArray(parser.text());
+ for (String wildcard : wildcards) {
+ if ("open".equals(wildcard)) {
+ expandWildcardsOpen = true;
+ } else if ("closed".equals(wildcard)) {
+ expandWildcardsClosed = true;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]");
+ }
+ }
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("index".equals(currentFieldName) || "indices".equals(currentFieldName)) {
+ if (!allowExplicitIndex) {
+ throw new ElasticsearchIllegalArgumentException("explicit index in multi search is not allowed");
+ }
+ searchRequest.indices(parseArray(parser));
+ } else if ("type".equals(currentFieldName) || "types".equals(currentFieldName)) {
+ searchRequest.types(parseArray(parser));
+ } else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) {
+ String[] wildcards = parseArray(parser);
+ for (String wildcard : wildcards) {
+ if ("open".equals(wildcard)) {
+ expandWildcardsOpen = true;
+ } else if ("closed".equals(wildcard)) {
+ expandWildcardsClosed = true;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]");
+ }
+ }
+ } else {
+ throw new ElasticsearchParseException(currentFieldName + " doesn't support arrays");
+ }
+ }
+ }
+ }
+ } finally {
+ parser.close();
+ }
+ }
+ searchRequest.indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed));
+
+ // move pointers
+ from = nextMarker + 1;
+ // now for the body
+ nextMarker = findNextMarker(marker, from, data, length);
+ if (nextMarker == -1) {
+ break;
+ }
+
+ searchRequest.source(data.slice(from, nextMarker - from), contentUnsafe);
+ // move pointers
+ from = nextMarker + 1;
+
+ add(searchRequest);
+ }
+
+ return this;
+ }
+
+ private String[] parseArray(XContentParser parser) throws IOException {
+ final List<String> list = new ArrayList<String>();
+ assert parser.currentToken() == XContentParser.Token.START_ARRAY;
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ list.add(parser.text());
+ }
+ return list.toArray(new String[list.size()]);
+ }
+
+ private int findNextMarker(byte marker, int from, BytesReference data, int length) {
+ for (int i = from; i < length; i++) {
+ if (data.get(i) == marker) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ public List<SearchRequest> requests() {
+ return this.requests;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (requests.isEmpty()) {
+ validationException = addValidationError("no requests added", validationException);
+ }
+ for (int i = 0; i < requests.size(); i++) {
+ ActionRequestValidationException ex = requests.get(i).validate();
+ if (ex != null) {
+ if (validationException == null) {
+ validationException = new ActionRequestValidationException();
+ }
+ validationException.addValidationErrors(ex.validationErrors());
+ }
+ }
+
+ return validationException;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public MultiSearchRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ SearchRequest request = new SearchRequest();
+ request.readFrom(in);
+ requests.add(request);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(requests.size());
+ for (SearchRequest request : requests) {
+ request.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java
new file mode 100644
index 0000000..cb54edf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+
+/**
+ * A request builder for multiple search requests.
+ */
+public class MultiSearchRequestBuilder extends ActionRequestBuilder<MultiSearchRequest, MultiSearchResponse, MultiSearchRequestBuilder> {
+
+ public MultiSearchRequestBuilder(Client client) {
+ super((InternalClient) client, new MultiSearchRequest());
+ }
+
+ /**
+ * Add a search request to execute. Note, the order is important, the search response will be returned in the
+ * same order as the search requests.
+ * <p/>
+ * If ignoreIndices has been set on the search request, then the indicesOptions of the multi search request
+ * will not be used (if set).
+ */
+ public MultiSearchRequestBuilder add(SearchRequest request) {
+ if (request.indicesOptions() == IndicesOptions.strict() && request().indicesOptions() != IndicesOptions.strict()) {
+ request.indicesOptions(request().indicesOptions());
+ }
+
+ super.request.add(request);
+ return this;
+ }
+
+ /**
+ * Add a search request to execute. Note, the order is important, the search response will be returned in the
+ * same order as the search requests.
+ */
+ public MultiSearchRequestBuilder add(SearchRequestBuilder request) {
+ if (request.request().indicesOptions() == IndicesOptions.strict() && request().indicesOptions() != IndicesOptions.strict()) {
+ request.request().indicesOptions(request().indicesOptions());
+ }
+
+ super.request.add(request);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal with wildcard indices expressions.
+ * For example indices that don't exist.
+ *
+ * Invoke this method before invoking {@link #add(SearchRequestBuilder)}.
+ */
+ public MultiSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request().indicesOptions(indicesOptions);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<MultiSearchResponse> listener) {
+ ((Client) client).multiSearch(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java
new file mode 100644
index 0000000..56e36e3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentFactory;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * A multi search response.
+ */
+public class MultiSearchResponse extends ActionResponse implements Iterable<MultiSearchResponse.Item>, ToXContent {
+
+ /**
+ * A search response item, holding the actual search response, or an error message if it failed.
+ */
+ public static class Item implements Streamable {
+ private SearchResponse response;
+ private String failureMessage;
+
+ Item() {
+
+ }
+
+ public Item(SearchResponse response, String failureMessage) {
+ this.response = response;
+ this.failureMessage = failureMessage;
+ }
+
+ /**
+ * Is it a failed search?
+ */
+ public boolean isFailure() {
+ return failureMessage != null;
+ }
+
+ /**
+ * The actual failure message, null if its not a failure.
+ */
+ @Nullable
+ public String getFailureMessage() {
+ return failureMessage;
+ }
+
+ /**
+ * The actual search response, null if its a failure.
+ */
+ @Nullable
+ public SearchResponse getResponse() {
+ return this.response;
+ }
+
+ public static Item readItem(StreamInput in) throws IOException {
+ Item item = new Item();
+ item.readFrom(in);
+ return item;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ this.response = new SearchResponse();
+ response.readFrom(in);
+ } else {
+ failureMessage = in.readString();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (response != null) {
+ out.writeBoolean(true);
+ response.writeTo(out);
+ } else {
+ out.writeBoolean(false);
+ out.writeString(failureMessage);
+ }
+ }
+ }
+
+ private Item[] items;
+
+ MultiSearchResponse() {
+ }
+
+ public MultiSearchResponse(Item[] items) {
+ this.items = items;
+ }
+
+ @Override
+ public Iterator<Item> iterator() {
+ return Iterators.forArray(items);
+ }
+
+ /**
+ * The list of responses, the order is the same as the one provided in the request.
+ */
+ public Item[] getResponses() {
+ return this.items;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ items = new Item[in.readVInt()];
+ for (int i = 0; i < items.length; i++) {
+ items[i] = Item.readItem(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(items.length);
+ for (Item item : items) {
+ item.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray(Fields.RESPONSES);
+ for (Item item : items) {
+ if (item.isFailure()) {
+ builder.startObject();
+ builder.field(Fields.ERROR, item.getFailureMessage());
+ builder.endObject();
+ } else {
+ builder.startObject();
+ item.getResponse().toXContent(builder, params);
+ builder.endObject();
+ }
+ }
+ builder.endArray();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString RESPONSES = new XContentBuilderString("responses");
+ static final XContentBuilderString ERROR = new XContentBuilderString("error");
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+ builder.startObject();
+ toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ } catch (IOException e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/ReduceSearchPhaseException.java b/src/main/java/org/elasticsearch/action/search/ReduceSearchPhaseException.java
new file mode 100644
index 0000000..84398ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/ReduceSearchPhaseException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+/**
+ * A failure during a reduce phase (when receiving results from several shards, and reducing them
+ * into one or more results and possible actions).
+ *
+ *
+ */
+public class ReduceSearchPhaseException extends SearchPhaseExecutionException {
+
+ public ReduceSearchPhaseException(String phaseName, String msg, ShardSearchFailure[] shardFailures) {
+ super(phaseName, "[reduce] " + msg, shardFailures);
+ }
+
+ public ReduceSearchPhaseException(String phaseName, String msg, Throwable cause, ShardSearchFailure[] shardFailures) {
+ super(phaseName, "[reduce] " + msg, cause, shardFailures);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/SearchAction.java b/src/main/java/org/elasticsearch/action/search/SearchAction.java
new file mode 100644
index 0000000..19180ad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class SearchAction extends Action<SearchRequest, SearchResponse, SearchRequestBuilder> {
+
+ public static final SearchAction INSTANCE = new SearchAction();
+ public static final String NAME = "search";
+
+ private SearchAction() {
+ super(NAME);
+ }
+
+ @Override
+ public SearchResponse newResponse() {
+ return new SearchResponse();
+ }
+
+ @Override
+ public SearchRequestBuilder newRequestBuilder(Client client) {
+ return new SearchRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/SearchOperationThreading.java b/src/main/java/org/elasticsearch/action/search/SearchOperationThreading.java
new file mode 100644
index 0000000..2338038
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchOperationThreading.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+
+/**
+ * Controls the operation threading model for search operation that are performed
+ * locally on the executing node.
+ *
+ *
+ */
+public enum SearchOperationThreading {
+ /**
+ * No threads are used, all the local shards operations will be performed on the calling
+ * thread.
+ */
+ NO_THREADS((byte) 0),
+ /**
+ * The local shards operations will be performed in serial manner on a single forked thread.
+ */
+ SINGLE_THREAD((byte) 1),
+ /**
+ * Each local shard operation will execute on its own thread.
+ */
+ THREAD_PER_SHARD((byte) 2);
+
+ private final byte id;
+
+ SearchOperationThreading(byte id) {
+ this.id = id;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ public static SearchOperationThreading fromId(byte id) {
+ if (id == 0) {
+ return NO_THREADS;
+ }
+ if (id == 1) {
+ return SINGLE_THREAD;
+ }
+ if (id == 2) {
+ return THREAD_PER_SHARD;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type matching id [" + id + "]");
+ }
+
+ public static SearchOperationThreading fromString(String value, @Nullable SearchOperationThreading defaultValue) {
+ if (value == null) {
+ return defaultValue;
+ }
+ if ("no_threads".equals(value) || "noThreads".equals(value)) {
+ return NO_THREADS;
+ } else if ("single_thread".equals(value) || "singleThread".equals(value)) {
+ return SINGLE_THREAD;
+ } else if ("thread_per_shard".equals(value) || "threadPerShard".equals(value)) {
+ return THREAD_PER_SHARD;
+ }
+ throw new ElasticsearchIllegalArgumentException("No value for search operation threading matching [" + value + "]");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java b/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java
new file mode 100644
index 0000000..9e3de98
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class SearchPhaseExecutionException extends ElasticsearchException {
+
+ private final String phaseName;
+
+ private ShardSearchFailure[] shardFailures;
+
+ public SearchPhaseExecutionException(String phaseName, String msg, ShardSearchFailure[] shardFailures) {
+ super(buildMessage(phaseName, msg, shardFailures));
+ this.phaseName = phaseName;
+ this.shardFailures = shardFailures;
+ }
+
+ public SearchPhaseExecutionException(String phaseName, String msg, Throwable cause, ShardSearchFailure[] shardFailures) {
+ super(buildMessage(phaseName, msg, shardFailures), cause);
+ this.phaseName = phaseName;
+ this.shardFailures = shardFailures;
+ }
+
+ @Override
+ public RestStatus status() {
+ if (shardFailures.length == 0) {
+ // if no successful shards, it means no active shards, so just return SERVICE_UNAVAILABLE
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+ RestStatus status = shardFailures[0].status();
+ if (shardFailures.length > 1) {
+ for (int i = 1; i < shardFailures.length; i++) {
+ if (shardFailures[i].status().getStatus() >= 500) {
+ status = shardFailures[i].status();
+ }
+ }
+ }
+ return status;
+ }
+
+ public String phaseName() {
+ return phaseName;
+ }
+
+ public ShardSearchFailure[] shardFailures() {
+ return shardFailures;
+ }
+
+ private static String buildMessage(String phaseName, String msg, ShardSearchFailure[] shardFailures) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Failed to execute phase [").append(phaseName).append("], ").append(msg);
+ if (shardFailures != null && shardFailures.length > 0) {
+ sb.append("; shardFailures ");
+ for (ShardSearchFailure shardFailure : shardFailures) {
+ if (shardFailure.shard() != null) {
+ sb.append("{").append(shardFailure.shard()).append(": ").append(shardFailure.reason()).append("}");
+ } else {
+ sb.append("{").append(shardFailure.reason()).append("}");
+ }
+ }
+ }
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/src/main/java/org/elasticsearch/action/search/SearchRequest.java
new file mode 100644
index 0000000..a214b0b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchRequest.java
@@ -0,0 +1,501 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.search.Scroll.readScroll;
+
+/**
+ * A request to execute search against one or more indices (or all). Best created using
+ * {@link org.elasticsearch.client.Requests#searchRequest(String...)}.
+ * <p/>
+ * <p>Note, the search {@link #source(org.elasticsearch.search.builder.SearchSourceBuilder)}
+ * is required. The search source is the different search options, including facets and such.
+ * <p/>
+ * <p>There is an option to specify an addition search source using the {@link #extraSource(org.elasticsearch.search.builder.SearchSourceBuilder)}.
+ *
+ * @see org.elasticsearch.client.Requests#searchRequest(String...)
+ * @see org.elasticsearch.client.Client#search(SearchRequest)
+ * @see SearchResponse
+ */
+public class SearchRequest extends ActionRequest<SearchRequest> {
+
+ private static final XContentType contentType = Requests.CONTENT_TYPE;
+
+ private SearchType searchType = SearchType.DEFAULT;
+
+ private String[] indices;
+
+ @Nullable
+ private String routing;
+ @Nullable
+ private String preference;
+
+ private BytesReference source;
+ private boolean sourceUnsafe;
+
+ private BytesReference extraSource;
+ private boolean extraSourceUnsafe;
+
+ private Scroll scroll;
+
+ private String[] types = Strings.EMPTY_ARRAY;
+
+ private SearchOperationThreading operationThreading = SearchOperationThreading.THREAD_PER_SHARD;
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ public SearchRequest() {
+ }
+
+ /**
+ * Constructs a new search request against the indices. No indices provided here means that search
+ * will run against all indices.
+ */
+ public SearchRequest(String... indices) {
+ indices(indices);
+ }
+
+ /**
+ * Constructs a new search request against the provided indices with the given search source.
+ */
+ public SearchRequest(String[] indices, byte[] source) {
+ indices(indices);
+ this.source = new BytesArray(source);
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ // no need to check, we resolve to match all query
+// if (source == null && extraSource == null) {
+// validationException = addValidationError("search source is missing", validationException);
+// }
+ return validationException;
+ }
+
+ public void beforeStart() {
+ // we always copy over if needed, the reason is that a request might fail while being search remotely
+ // and then we need to keep the buffer around
+ if (source != null && sourceUnsafe) {
+ source = source.copyBytesArray();
+ sourceUnsafe = false;
+ }
+ if (extraSource != null && extraSourceUnsafe) {
+ extraSource = extraSource.copyBytesArray();
+ extraSourceUnsafe = false;
+ }
+ }
+
+ /**
+ * Internal.
+ */
+ public void beforeLocalFork() {
+ }
+
+ /**
+ * Sets the indices the search will be executed on.
+ */
+ public SearchRequest indices(String... indices) {
+ if (indices == null) {
+ throw new ElasticsearchIllegalArgumentException("indices must not be null");
+ } else {
+ for (int i = 0; i < indices.length; i++) {
+ if (indices[i] == null) {
+ throw new ElasticsearchIllegalArgumentException("indices[" + i +"] must not be null");
+ }
+ }
+ }
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * Controls the the search operation threading model.
+ */
+ public SearchOperationThreading operationThreading() {
+ return this.operationThreading;
+ }
+
+ /**
+ * Controls the the search operation threading model.
+ */
+ public SearchRequest operationThreading(SearchOperationThreading operationThreading) {
+ this.operationThreading = operationThreading;
+ return this;
+ }
+
+ /**
+ * Sets the string representation of the operation threading model. Can be one of
+ * "no_threads", "single_thread" and "thread_per_shard".
+ */
+ public SearchRequest operationThreading(String operationThreading) {
+ return operationThreading(SearchOperationThreading.fromString(operationThreading, this.operationThreading));
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public SearchRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ /**
+ * The document types to execute the search against. Defaults to be executed against
+ * all types.
+ */
+ public String[] types() {
+ return types;
+ }
+
+ /**
+ * The document types to execute the search against. Defaults to be executed against
+ * all types.
+ */
+ public SearchRequest types(String... types) {
+ this.types = types;
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public String routing() {
+ return this.routing;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public SearchRequest routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public SearchRequest routing(String... routings) {
+ this.routing = Strings.arrayToCommaDelimitedString(routings);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public SearchRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public String preference() {
+ return this.preference;
+ }
+
+ /**
+ * The search type to execute, defaults to {@link SearchType#DEFAULT}.
+ */
+ public SearchRequest searchType(SearchType searchType) {
+ this.searchType = searchType;
+ return this;
+ }
+
+ /**
+ * The a string representation search type to execute, defaults to {@link SearchType#DEFAULT}. Can be
+ * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
+ * "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
+ */
+ public SearchRequest searchType(String searchType) throws ElasticsearchIllegalArgumentException {
+ return searchType(SearchType.fromString(searchType));
+ }
+
+ /**
+ * The source of the search request.
+ */
+ public SearchRequest source(SearchSourceBuilder sourceBuilder) {
+ this.source = sourceBuilder.buildAsBytes(contentType);
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source of the search request. Consider using either {@link #source(byte[])} or
+ * {@link #source(org.elasticsearch.search.builder.SearchSourceBuilder)}.
+ */
+ public SearchRequest source(String source) {
+ this.source = new BytesArray(source);
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The source of the search request in the form of a map.
+ */
+ public SearchRequest source(Map source) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(source);
+ return source(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ }
+
+ public SearchRequest source(XContentBuilder builder) {
+ this.source = builder.bytes();
+ this.sourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * The search source to execute.
+ */
+ public SearchRequest source(byte[] source) {
+ return source(source, 0, source.length, false);
+ }
+
+
+ /**
+ * The search source to execute.
+ */
+ public SearchRequest source(byte[] source, int offset, int length) {
+ return source(source, offset, length, false);
+ }
+
+ /**
+ * The search source to execute.
+ */
+ public SearchRequest source(byte[] source, int offset, int length, boolean unsafe) {
+ return source(new BytesArray(source, offset, length), unsafe);
+ }
+
+ /**
+ * The search source to execute.
+ */
+ public SearchRequest source(BytesReference source, boolean unsafe) {
+ this.source = source;
+ this.sourceUnsafe = unsafe;
+ return this;
+ }
+
+ /**
+ * The search source to execute.
+ */
+ public BytesReference source() {
+ return source;
+ }
+
+ /**
+ * Allows to provide additional source that will be used as well.
+ */
+ public SearchRequest extraSource(SearchSourceBuilder sourceBuilder) {
+ if (sourceBuilder == null) {
+ extraSource = null;
+ return this;
+ }
+ this.extraSource = sourceBuilder.buildAsBytes(contentType);
+ this.extraSourceUnsafe = false;
+ return this;
+ }
+
+ public SearchRequest extraSource(Map extraSource) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.map(extraSource);
+ return extraSource(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
+ }
+ }
+
+ public SearchRequest extraSource(XContentBuilder builder) {
+ this.extraSource = builder.bytes();
+ this.extraSourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * Allows to provide additional source that will use used as well.
+ */
+ public SearchRequest extraSource(String source) {
+ this.extraSource = new BytesArray(source);
+ this.extraSourceUnsafe = false;
+ return this;
+ }
+
+ /**
+ * Allows to provide additional source that will be used as well.
+ */
+ public SearchRequest extraSource(byte[] source) {
+ return extraSource(source, 0, source.length, false);
+ }
+
+ /**
+ * Allows to provide additional source that will be used as well.
+ */
+ public SearchRequest extraSource(byte[] source, int offset, int length) {
+ return extraSource(source, offset, length, false);
+ }
+
+ /**
+ * Allows to provide additional source that will be used as well.
+ */
+ public SearchRequest extraSource(byte[] source, int offset, int length, boolean unsafe) {
+ return extraSource(new BytesArray(source, offset, length), unsafe);
+ }
+
+ /**
+ * Allows to provide additional source that will be used as well.
+ */
+ public SearchRequest extraSource(BytesReference source, boolean unsafe) {
+ this.extraSource = source;
+ this.extraSourceUnsafe = unsafe;
+ return this;
+ }
+
+ /**
+ * Additional search source to execute.
+ */
+ public BytesReference extraSource() {
+ return this.extraSource;
+ }
+
+ /**
+ * The tye of search to execute.
+ */
+ public SearchType searchType() {
+ return searchType;
+ }
+
+ /**
+ * The indices
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request.
+ */
+ public Scroll scroll() {
+ return scroll;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request.
+ */
+ public SearchRequest scroll(Scroll scroll) {
+ this.scroll = scroll;
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public SearchRequest scroll(TimeValue keepAlive) {
+ return scroll(new Scroll(keepAlive));
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public SearchRequest scroll(String keepAlive) {
+ return scroll(new Scroll(TimeValue.parseTimeValue(keepAlive, null)));
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ operationThreading = SearchOperationThreading.fromId(in.readByte());
+ searchType = SearchType.fromId(in.readByte());
+
+ indices = new String[in.readVInt()];
+ for (int i = 0; i < indices.length; i++) {
+ indices[i] = in.readString();
+ }
+
+ routing = in.readOptionalString();
+ preference = in.readOptionalString();
+
+ if (in.readBoolean()) {
+ scroll = readScroll(in);
+ }
+
+ sourceUnsafe = false;
+ source = in.readBytesReference();
+
+ extraSourceUnsafe = false;
+ extraSource = in.readBytesReference();
+
+ types = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(operationThreading.id());
+ out.writeByte(searchType.id());
+
+ out.writeVInt(indices.length);
+ for (String index : indices) {
+ out.writeString(index);
+ }
+
+ out.writeOptionalString(routing);
+ out.writeOptionalString(preference);
+
+ if (scroll == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ scroll.writeTo(out);
+ }
+ out.writeBytesReference(source);
+ out.writeBytesReference(extraSource);
+ out.writeStringArray(types);
+ indicesOptions.writeIndicesOptions(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java
new file mode 100644
index 0000000..8fe4c2d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java
@@ -0,0 +1,988 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.facet.FacetBuilder;
+import org.elasticsearch.search.highlight.HighlightBuilder;
+import org.elasticsearch.search.rescore.RescoreBuilder;
+import org.elasticsearch.search.sort.SortBuilder;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+
+import java.util.Map;
+
+/**
+ * A search action request builder.
+ */
+public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse, SearchRequestBuilder> {
+
+ private SearchSourceBuilder sourceBuilder;
+
+ public SearchRequestBuilder(Client client) {
+ super((InternalClient) client, new SearchRequest());
+ }
+
+ /**
+ * Sets the indices the search will be executed on.
+ */
+ public SearchRequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return this;
+ }
+
+ /**
+ * The document types to execute the search against. Defaults to be executed against
+ * all types.
+ */
+ public SearchRequestBuilder setTypes(String... types) {
+ request.types(types);
+ return this;
+ }
+
+ /**
+ * The search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}.
+ */
+ public SearchRequestBuilder setSearchType(SearchType searchType) {
+ request.searchType(searchType);
+ return this;
+ }
+
+ /**
+ * The a string representation search type to execute, defaults to {@link SearchType#DEFAULT}. Can be
+ * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
+ * "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
+ */
+ public SearchRequestBuilder setSearchType(String searchType) throws ElasticsearchIllegalArgumentException {
+ request.searchType(searchType);
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request.
+ */
+ public SearchRequestBuilder setScroll(Scroll scroll) {
+ request.scroll(scroll);
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public SearchRequestBuilder setScroll(TimeValue keepAlive) {
+ request.scroll(keepAlive);
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public SearchRequestBuilder setScroll(String keepAlive) {
+ request.scroll(keepAlive);
+ return this;
+ }
+
+ /**
+ * An optional timeout to control how long search is allowed to take.
+ */
+ public SearchRequestBuilder setTimeout(TimeValue timeout) {
+ sourceBuilder().timeout(timeout);
+ return this;
+ }
+
+ /**
+ * An optional timeout to control how long search is allowed to take.
+ */
+ public SearchRequestBuilder setTimeout(String timeout) {
+ sourceBuilder().timeout(timeout);
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public SearchRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public SearchRequestBuilder setRouting(String... routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public SearchRequestBuilder setPreference(String preference) {
+ request.preference(preference);
+ return this;
+ }
+
+ /**
+ * Controls the the search operation threading model.
+ */
+ public SearchRequestBuilder setOperationThreading(SearchOperationThreading operationThreading) {
+ request.operationThreading(operationThreading);
+ return this;
+ }
+
+ /**
+ * Sets the string representation of the operation threading model. Can be one of
+ * "no_threads", "single_thread" and "thread_per_shard".
+ */
+ public SearchRequestBuilder setOperationThreading(String operationThreading) {
+ request.operationThreading(operationThreading);
+ return this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and wildcard indices expressions.
+ *
+ * For example indices that don't exist.
+ */
+ public SearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request().indicesOptions(indicesOptions);
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a search query.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public SearchRequestBuilder setQuery(QueryBuilder queryBuilder) {
+ sourceBuilder().query(queryBuilder);
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchRequestBuilder setQuery(String query) {
+ sourceBuilder().query(query);
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchRequestBuilder setQuery(BytesReference queryBinary) {
+ sourceBuilder().query(queryBinary);
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchRequestBuilder setQuery(byte[] queryBinary) {
+ sourceBuilder().query(queryBinary);
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchRequestBuilder setQuery(byte[] queryBinary, int queryBinaryOffset, int queryBinaryLength) {
+ sourceBuilder().query(queryBinary, queryBinaryOffset, queryBinaryLength);
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchRequestBuilder setQuery(XContentBuilder query) {
+ sourceBuilder().query(query);
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchRequestBuilder setQuery(Map query) {
+ sourceBuilder().query(query);
+ return this;
+ }
+
+ /**
+ * Sets a filter that will be executed after the query has been executed and only has affect on the search hits
+ * (not aggregations or facets). This filter is always executed as last filtering mechanism.
+ */
+ public SearchRequestBuilder setPostFilter(FilterBuilder postFilter) {
+ sourceBuilder().postFilter(postFilter);
+ return this;
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchRequestBuilder setPostFilter(String postFilter) {
+ sourceBuilder().postFilter(postFilter);
+ return this;
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchRequestBuilder setPostFilter(BytesReference postFilter) {
+ sourceBuilder().postFilter(postFilter);
+ return this;
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchRequestBuilder setPostFilter(byte[] postFilter) {
+ sourceBuilder().postFilter(postFilter);
+ return this;
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchRequestBuilder setPostFilter(byte[] postFilter, int postFilterOffset, int postFilterLength) {
+ sourceBuilder().postFilter(postFilter, postFilterOffset, postFilterLength);
+ return this;
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchRequestBuilder setPostFilter(XContentBuilder postFilter) {
+ sourceBuilder().postFilter(postFilter);
+ return this;
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchRequestBuilder setPostFilter(Map postFilter) {
+ sourceBuilder().postFilter(postFilter);
+ return this;
+ }
+
+ /**
+ * Sets the minimum score below which docs will be filtered out.
+ */
+ public SearchRequestBuilder setMinScore(float minScore) {
+ sourceBuilder().minScore(minScore);
+ return this;
+ }
+
+ /**
+ * From index to start the search from. Defaults to <tt>0</tt>.
+ */
+ public SearchRequestBuilder setFrom(int from) {
+ sourceBuilder().from(from);
+ return this;
+ }
+
+ /**
+ * The number of search hits to return. Defaults to <tt>10</tt>.
+ */
+ public SearchRequestBuilder setSize(int size) {
+ sourceBuilder().size(size);
+ return this;
+ }
+
+ /**
+ * Should each {@link org.elasticsearch.search.SearchHit} be returned with an
+ * explanation of the hit (ranking).
+ */
+ public SearchRequestBuilder setExplain(boolean explain) {
+ sourceBuilder().explain(explain);
+ return this;
+ }
+
+ /**
+ * Should each {@link org.elasticsearch.search.SearchHit} be returned with its
+ * version.
+ */
+ public SearchRequestBuilder setVersion(boolean version) {
+ sourceBuilder().version(version);
+ return this;
+ }
+
+ /**
+ * Sets the boost a specific index will receive when the query is executeed against it.
+ *
+ * @param index The index to apply the boost against
+ * @param indexBoost The boost to apply to the index
+ */
+ public SearchRequestBuilder addIndexBoost(String index, float indexBoost) {
+ sourceBuilder().indexBoost(index, indexBoost);
+ return this;
+ }
+
+ /**
+ * The stats groups this request will be aggregated under.
+ */
+ public SearchRequestBuilder setStats(String... statsGroups) {
+ sourceBuilder().stats(statsGroups);
+ return this;
+ }
+
+ /**
+ * Sets no fields to be loaded, resulting in only id and type to be returned per field.
+ */
+ public SearchRequestBuilder setNoFields() {
+ sourceBuilder().noFields();
+ return this;
+ }
+
+ /**
+ * Indicates whether the response should contain the stored _source for every hit
+ *
+ * @param fetch
+ * @return
+ */
+ public SearchRequestBuilder setFetchSource(boolean fetch) {
+ sourceBuilder().fetchSource(fetch);
+ return this;
+ }
+
+ /**
+ * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param include An optional include (optionally wildcarded) pattern to filter the returned _source
+ * @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public SearchRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
+ sourceBuilder().fetchSource(include, exclude);
+ return this;
+ }
+
+ /**
+ * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
+ * @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public SearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
+ sourceBuilder().fetchSource(includes, excludes);
+ return this;
+ }
+
+
+ /**
+ * Adds a field to load and return (note, it must be stored) as part of the search request.
+ * If none are specified, the source of the document will be return.
+ */
+ public SearchRequestBuilder addField(String field) {
+ sourceBuilder().field(field);
+ return this;
+ }
+
+ /**
+ * Adds a field data based field to load and return. The field does not have to be stored,
+ * but its recommended to use non analyzed or numeric fields.
+ *
+ * @param name The field to get from the field data cache
+ */
+ public SearchRequestBuilder addFieldDataField(String name) {
+ sourceBuilder().fieldDataField(name);
+ return this;
+ }
+
+ /**
+ * Adds a script based field to load and return. The field does not have to be stored,
+ * but its recommended to use non analyzed or numeric fields.
+ *
+ * @param name The name that will represent this value in the return hit
+ * @param script The script to use
+ */
+ public SearchRequestBuilder addScriptField(String name, String script) {
+ sourceBuilder().scriptField(name, script);
+ return this;
+ }
+
+ /**
+ * Adds a script based field to load and return. The field does not have to be stored,
+ * but its recommended to use non analyzed or numeric fields.
+ *
+ * @param name The name that will represent this value in the return hit
+ * @param script The script to use
+ * @param params Parameters that the script can use.
+ */
+ public SearchRequestBuilder addScriptField(String name, String script, Map<String, Object> params) {
+ sourceBuilder().scriptField(name, script, params);
+ return this;
+ }
+
+ /**
+ * Adds a partial field based on _source, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @deprecated since 1.0.0
+ * use {@link org.elasticsearch.action.search.SearchRequestBuilder#setFetchSource(String, String)} instead
+ *
+ * @param name The name of the field
+ * @param include An optional include (optionally wildcarded) pattern from _source
+ * @param exclude An optional exclude (optionally wildcarded) pattern from _source
+ */
+ @Deprecated
+ public SearchRequestBuilder addPartialField(String name, @Nullable String include, @Nullable String exclude) {
+ sourceBuilder().partialField(name, include, exclude);
+ return this;
+ }
+
+ /**
+ * Adds a partial field based on _source, with an "includes" and/or "excludes set which can include simple wildcard
+ * elements.
+ *
+ * @deprecated since 1.0.0
+ * use {@link org.elasticsearch.action.search.SearchRequestBuilder#setFetchSource(String[], String[])} instead
+ *
+ * @param name The name of the field
+ * @param includes An optional list of includes (optionally wildcarded) patterns from _source
+ * @param excludes An optional list of excludes (optionally wildcarded) patterns from _source
+ */
+ @Deprecated
+ public SearchRequestBuilder addPartialField(String name, @Nullable String[] includes, @Nullable String[] excludes) {
+ sourceBuilder().partialField(name, includes, excludes);
+ return this;
+ }
+
+ /**
+ * Adds a script based field to load and return. The field does not have to be stored,
+ * but its recommended to use non analyzed or numeric fields.
+ *
+ * @param name The name that will represent this value in the return hit
+ * @param lang The language of the script
+ * @param script The script to use
+ * @param params Parameters that the script can use (can be <tt>null</tt>).
+ */
+ public SearchRequestBuilder addScriptField(String name, String lang, String script, Map<String, Object> params) {
+ sourceBuilder().scriptField(name, lang, script, params);
+ return this;
+ }
+
+ /**
+ * Adds a sort against the given field name and the sort ordering.
+ *
+ * @param field The name of the field
+ * @param order The sort ordering
+ */
+ public SearchRequestBuilder addSort(String field, SortOrder order) {
+ sourceBuilder().sort(field, order);
+ return this;
+ }
+
+ /**
+ * Adds a generic sort builder.
+ *
+ * @see org.elasticsearch.search.sort.SortBuilders
+ */
+ public SearchRequestBuilder addSort(SortBuilder sort) {
+ sourceBuilder().sort(sort);
+ return this;
+ }
+
+ /**
+ * Applies when sorting, and controls if scores will be tracked as well. Defaults to
+ * <tt>false</tt>.
+ */
+ public SearchRequestBuilder setTrackScores(boolean trackScores) {
+ sourceBuilder().trackScores(trackScores);
+ return this;
+ }
+
+ /**
+ * Adds the fields to load and return as part of the search request. If none are specified,
+ * the source of the document will be returned.
+ */
+ public SearchRequestBuilder addFields(String... fields) {
+ sourceBuilder().fields(fields);
+ return this;
+ }
+
+ /**
+ * Adds a facet to the search operation.
+ */
+ public SearchRequestBuilder addFacet(FacetBuilder facet) {
+ sourceBuilder().facet(facet);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of facets to use.
+ */
+ public SearchRequestBuilder setFacets(BytesReference facets) {
+ sourceBuilder().facets(facets);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of facets to use.
+ */
+ public SearchRequestBuilder setFacets(byte[] facets) {
+ sourceBuilder().facets(facets);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of facets to use.
+ */
+ public SearchRequestBuilder setFacets(byte[] facets, int facetsOffset, int facetsLength) {
+ sourceBuilder().facets(facets, facetsOffset, facetsLength);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of facets to use.
+ */
+ public SearchRequestBuilder setFacets(XContentBuilder facets) {
+ sourceBuilder().facets(facets);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of facets to use.
+ */
+ public SearchRequestBuilder setFacets(Map facets) {
+ sourceBuilder().facets(facets);
+ return this;
+ }
+
+ /**
+ * Adds an get to the search operation.
+ */
+ public SearchRequestBuilder addAggregation(AbstractAggregationBuilder aggregation) {
+ sourceBuilder().aggregation(aggregation);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of addAggregation to use.
+ */
+ public SearchRequestBuilder setAggregations(BytesReference aggregations) {
+ sourceBuilder().aggregations(aggregations);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of addAggregation to use.
+ */
+ public SearchRequestBuilder setAggregations(byte[] aggregations) {
+ sourceBuilder().aggregations(aggregations);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of addAggregation to use.
+ */
+ public SearchRequestBuilder setAggregations(byte[] aggregations, int aggregationsOffset, int aggregationsLength) {
+ sourceBuilder().facets(aggregations, aggregationsOffset, aggregationsLength);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of addAggregation to use.
+ */
+ public SearchRequestBuilder setAggregations(XContentBuilder aggregations) {
+ sourceBuilder().aggregations(aggregations);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent) binary representation of addAggregation to use.
+ */
+ public SearchRequestBuilder setAggregations(Map aggregations) {
+ sourceBuilder().aggregations(aggregations);
+ return this;
+ }
+
+ /**
+ * Adds a field to be highlighted with default fragment size of 100 characters, and
+ * default number of fragments of 5.
+ *
+ * @param name The field to highlight
+ */
+ public SearchRequestBuilder addHighlightedField(String name) {
+ highlightBuilder().field(name);
+ return this;
+ }
+
+
+ /**
+ * Adds a field to be highlighted with a provided fragment size (in characters), and
+ * default number of fragments of 5.
+ *
+ * @param name The field to highlight
+ * @param fragmentSize The size of a fragment in characters
+ */
+ public SearchRequestBuilder addHighlightedField(String name, int fragmentSize) {
+ highlightBuilder().field(name, fragmentSize);
+ return this;
+ }
+
+ /**
+ * Adds a field to be highlighted with a provided fragment size (in characters), and
+ * a provided (maximum) number of fragments.
+ *
+ * @param name The field to highlight
+ * @param fragmentSize The size of a fragment in characters
+ * @param numberOfFragments The (maximum) number of fragments
+ */
+ public SearchRequestBuilder addHighlightedField(String name, int fragmentSize, int numberOfFragments) {
+ highlightBuilder().field(name, fragmentSize, numberOfFragments);
+ return this;
+ }
+
+ /**
+ * Adds a field to be highlighted with a provided fragment size (in characters),
+ * a provided (maximum) number of fragments and an offset for the highlight.
+ *
+ * @param name The field to highlight
+ * @param fragmentSize The size of a fragment in characters
+ * @param numberOfFragments The (maximum) number of fragments
+ */
+ public SearchRequestBuilder addHighlightedField(String name, int fragmentSize, int numberOfFragments,
+ int fragmentOffset) {
+ highlightBuilder().field(name, fragmentSize, numberOfFragments, fragmentOffset);
+ return this;
+ }
+
+ /**
+ * Adds a highlighted field.
+ */
+ public SearchRequestBuilder addHighlightedField(HighlightBuilder.Field field) {
+ highlightBuilder().field(field);
+ return this;
+ }
+
+ /**
+ * Set a tag scheme that encapsulates a built in pre and post tags. The allows schemes
+ * are <tt>styled</tt> and <tt>default</tt>.
+ *
+ * @param schemaName The tag scheme name
+ */
+ public SearchRequestBuilder setHighlighterTagsSchema(String schemaName) {
+ highlightBuilder().tagsSchema(schemaName);
+ return this;
+ }
+
+ /**
+ * Explicitly set the pre tags that will be used for highlighting.
+ */
+ public SearchRequestBuilder setHighlighterPreTags(String... preTags) {
+ highlightBuilder().preTags(preTags);
+ return this;
+ }
+
+ /**
+ * Explicitly set the post tags that will be used for highlighting.
+ */
+ public SearchRequestBuilder setHighlighterPostTags(String... postTags) {
+ highlightBuilder().postTags(postTags);
+ return this;
+ }
+
+ /**
+ * The order of fragments per field. By default, ordered by the order in the
+ * highlighted text. Can be <tt>score</tt>, which then it will be ordered
+ * by score of the fragments.
+ */
+ public SearchRequestBuilder setHighlighterOrder(String order) {
+ highlightBuilder().order(order);
+ return this;
+ }
+
+
+ /**
+ * The encoder to set for highlighting
+ */
+ public SearchRequestBuilder setHighlighterEncoder(String encoder) {
+ highlightBuilder().encoder(encoder);
+ return this;
+ }
+
+ /**
+ * Sets a query to be used for highlighting all fields instead of the search query.
+ */
+ public SearchRequestBuilder setHighlighterQuery(QueryBuilder highlightQuery) {
+ highlightBuilder().highlightQuery(highlightQuery);
+ return this;
+ }
+
+ public SearchRequestBuilder setHighlighterRequireFieldMatch(boolean requireFieldMatch) {
+ highlightBuilder().requireFieldMatch(requireFieldMatch);
+ return this;
+ }
+
+ /**
+ * The highlighter type to use.
+ */
+ public SearchRequestBuilder setHighlighterType(String type) {
+ highlightBuilder().highlighterType(type);
+ return this;
+ }
+
+ /**
+ * Sets the size of the fragment to return from the beginning of the field if there are no matches to
+ * highlight and the field doesn't also define noMatchSize.
+ * @param noMatchSize integer to set or null to leave out of request. default is null.
+ * @return this builder for chaining
+ */
+ public SearchRequestBuilder setHighlighterNoMatchSize(Integer noMatchSize) {
+ highlightBuilder().noMatchSize(noMatchSize);
+ return this;
+ }
+
+ public SearchRequestBuilder setHighlighterOptions(Map<String, Object> options) {
+ highlightBuilder().options(options);
+ return this;
+ }
+
+ /**
+ * Delegates to {@link org.elasticsearch.search.suggest.SuggestBuilder#setText(String)}.
+ */
+ public SearchRequestBuilder setSuggestText(String globalText) {
+ suggestBuilder().setText(globalText);
+ return this;
+ }
+
+ /**
+ * Delegates to {@link org.elasticsearch.search.suggest.SuggestBuilder#addSuggestion(org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder)}.
+ */
+ public SearchRequestBuilder addSuggestion(SuggestBuilder.SuggestionBuilder<?> suggestion) {
+ suggestBuilder().addSuggestion(suggestion);
+ return this;
+ }
+
+ public SearchRequestBuilder setRescorer(RescoreBuilder.Rescorer rescorer) {
+ rescoreBuilder().rescorer(rescorer);
+ return this;
+ }
+
+ public SearchRequestBuilder setRescoreWindow(int window) {
+ rescoreBuilder().windowSize(window);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Note, settings anything other
+ * than the search type will cause this source to be overridden, consider using
+ * {@link #setExtraSource(String)}.
+ */
+ public SearchRequestBuilder setSource(String source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Allows to set other parameters.
+ */
+ public SearchRequestBuilder setExtraSource(String source) {
+ request.extraSource(source);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Note, settings anything other
+ * than the search type will cause this source to be overridden, consider using
+ * {@link #setExtraSource(BytesReference)}.
+ */
+ public SearchRequestBuilder setSource(BytesReference source) {
+ request.source(source, false);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Note, settings anything other
+ * than the search type will cause this source to be overridden, consider using
+ * {@link #setExtraSource(BytesReference)}.
+ */
+ public SearchRequestBuilder setSource(BytesReference source, boolean unsafe) {
+ request.source(source, unsafe);
+ return this;
+ }
+
+
+ /**
+ * Sets the source of the request as a json string. Note, settings anything other
+ * than the search type will cause this source to be overridden, consider using
+ * {@link #setExtraSource(byte[])}.
+ */
+ public SearchRequestBuilder setSource(byte[] source) {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Allows to set other parameters.
+ */
+ public SearchRequestBuilder setExtraSource(BytesReference source) {
+ request.extraSource(source, false);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Allows to set other parameters.
+ */
+ public SearchRequestBuilder setExtraSource(BytesReference source, boolean unsafe) {
+ request.extraSource(source, unsafe);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Allows to set other parameters.
+ */
+ public SearchRequestBuilder setExtraSource(byte[] source) {
+ request.extraSource(source);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Note, settings anything other
+ * than the search type will cause this source to be overridden, consider using
+ * {@link #setExtraSource(byte[])}.
+ */
+ public SearchRequestBuilder setSource(byte[] source, int offset, int length) {
+ request.source(source, offset, length);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Allows to set other parameters.
+ */
+ public SearchRequestBuilder setExtraSource(byte[] source, int offset, int length) {
+ request.extraSource(source, offset, length);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Note, settings anything other
+ * than the search type will cause this source to be overridden, consider using
+ * {@link #setExtraSource(byte[])}.
+ */
+ public SearchRequestBuilder setSource(XContentBuilder builder) {
+ request.source(builder);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a json string. Allows to set other parameters.
+ */
+ public SearchRequestBuilder setExtraSource(XContentBuilder builder) {
+ request.extraSource(builder);
+ return this;
+ }
+
+ /**
+ * Sets the source of the request as a map. Note, setting anything other than the
+ * search type will cause this source to be overridden, consider using
+ * {@link #setExtraSource(java.util.Map)}.
+ */
+ public SearchRequestBuilder setSource(Map source) {
+ request.source(source);
+ return this;
+ }
+
+ public SearchRequestBuilder setExtraSource(Map source) {
+ request.extraSource(source);
+ return this;
+ }
+
+ /**
+ * Sets the source builder to be used with this request. Note, any operations done
+ * on this require builder before are discarded as this internal builder replaces
+ * what has been built up until this point.
+ */
+ public SearchRequestBuilder internalBuilder(SearchSourceBuilder sourceBuilder) {
+ this.sourceBuilder = sourceBuilder;
+ return this;
+ }
+
+ /**
+ * Returns the internal search source builder used to construct the request.
+ */
+ public SearchSourceBuilder internalBuilder() {
+ return sourceBuilder();
+ }
+
+ @Override
+ public String toString() {
+ return internalBuilder().toString();
+ }
+
+ @Override
+ public SearchRequest request() {
+ if (sourceBuilder != null) {
+ request.source(sourceBuilder());
+ }
+ return request;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<SearchResponse> listener) {
+ if (sourceBuilder != null) {
+ request.source(sourceBuilder());
+ }
+ ((Client) client).search(request, listener);
+ }
+
+ private SearchSourceBuilder sourceBuilder() {
+ if (sourceBuilder == null) {
+ sourceBuilder = new SearchSourceBuilder();
+ }
+ return sourceBuilder;
+ }
+
+ private HighlightBuilder highlightBuilder() {
+ return sourceBuilder().highlighter();
+ }
+
+ private SuggestBuilder suggestBuilder() {
+ return sourceBuilder().suggest();
+ }
+
+ private RescoreBuilder rescoreBuilder() {
+ return sourceBuilder().rescore();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/src/main/java/org/elasticsearch/action/search/SearchResponse.java
new file mode 100644
index 0000000..08ad7a2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchResponse.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.facet.Facets;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.suggest.Suggest;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
+import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse;
+
+/**
+ * A response of a search request.
+ */
+public class SearchResponse extends ActionResponse implements ToXContent {
+
+ private InternalSearchResponse internalResponse;
+
+ private String scrollId;
+
+ private int totalShards;
+
+ private int successfulShards;
+
+ private ShardSearchFailure[] shardFailures;
+
+ private long tookInMillis;
+
+ public SearchResponse() {
+ }
+
+ public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards, long tookInMillis, ShardSearchFailure[] shardFailures) {
+ this.internalResponse = internalResponse;
+ this.scrollId = scrollId;
+ this.totalShards = totalShards;
+ this.successfulShards = successfulShards;
+ this.tookInMillis = tookInMillis;
+ this.shardFailures = shardFailures;
+ }
+
+ public RestStatus status() {
+ if (shardFailures.length == 0) {
+ if (successfulShards == 0 && totalShards > 0) {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+ return RestStatus.OK;
+ }
+ // if total failure, bubble up the status code to the response level
+ if (successfulShards == 0 && totalShards > 0) {
+ RestStatus status = RestStatus.OK;
+ for (int i = 0; i < shardFailures.length; i++) {
+ RestStatus shardStatus = shardFailures[i].status();
+ if (shardStatus.getStatus() >= status.getStatus()) {
+ status = shardFailures[i].status();
+ }
+ }
+ return status;
+ }
+ return RestStatus.OK;
+ }
+
+ /**
+ * The search hits.
+ */
+ public SearchHits getHits() {
+ return internalResponse.hits();
+ }
+
+ /**
+ * The search facets.
+ */
+ public Facets getFacets() {
+ return internalResponse.facets();
+ }
+
+ public Aggregations getAggregations() {
+ return internalResponse.aggregations();
+ }
+
+
+ public Suggest getSuggest() {
+ return internalResponse.suggest();
+ }
+
+ /**
+ * Has the search operation timed out.
+ */
+ public boolean isTimedOut() {
+ return internalResponse.timedOut();
+ }
+
+ /**
+ * How long the search took.
+ */
+ public TimeValue getTook() {
+ return new TimeValue(tookInMillis);
+ }
+
+ /**
+ * How long the search took in milliseconds.
+ */
+ public long getTookInMillis() {
+ return tookInMillis;
+ }
+
+ /**
+ * The total number of shards the search was executed on.
+ */
+ public int getTotalShards() {
+ return totalShards;
+ }
+
+ /**
+ * The successful number of shards the search was executed on.
+ */
+ public int getSuccessfulShards() {
+ return successfulShards;
+ }
+
+ /**
+ * The failed number of shards the search was executed on.
+ */
+ public int getFailedShards() {
+ // we don't return totalShards - successfulShards, we don't count "no shards available" as a failed shard, just don't
+ // count it in the successful counter
+ return shardFailures.length;
+ }
+
+ /**
+ * The failures that occurred during the search.
+ */
+ public ShardSearchFailure[] getShardFailures() {
+ return this.shardFailures;
+ }
+
+ /**
+ * If scrolling was enabled ({@link SearchRequest#scroll(org.elasticsearch.search.Scroll)}, the
+ * scroll id that can be used to continue scrolling.
+ */
+ public String getScrollId() {
+ return scrollId;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id");
+ static final XContentBuilderString _SHARDS = new XContentBuilderString("_shards");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString SUCCESSFUL = new XContentBuilderString("successful");
+ static final XContentBuilderString FAILED = new XContentBuilderString("failed");
+ static final XContentBuilderString FAILURES = new XContentBuilderString("failures");
+ static final XContentBuilderString STATUS = new XContentBuilderString("status");
+ static final XContentBuilderString INDEX = new XContentBuilderString("index");
+ static final XContentBuilderString SHARD = new XContentBuilderString("shard");
+ static final XContentBuilderString REASON = new XContentBuilderString("reason");
+ static final XContentBuilderString TOOK = new XContentBuilderString("took");
+ static final XContentBuilderString TIMED_OUT = new XContentBuilderString("timed_out");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (scrollId != null) {
+ builder.field(Fields._SCROLL_ID, scrollId);
+ }
+ builder.field(Fields.TOOK, tookInMillis);
+ builder.field(Fields.TIMED_OUT, isTimedOut());
+ builder.startObject(Fields._SHARDS);
+ builder.field(Fields.TOTAL, getTotalShards());
+ builder.field(Fields.SUCCESSFUL, getSuccessfulShards());
+ builder.field(Fields.FAILED, getFailedShards());
+
+ if (shardFailures.length > 0) {
+ builder.startArray(Fields.FAILURES);
+ for (ShardSearchFailure shardFailure : shardFailures) {
+ builder.startObject();
+ if (shardFailure.shard() != null) {
+ builder.field(Fields.INDEX, shardFailure.shard().index());
+ builder.field(Fields.SHARD, shardFailure.shard().shardId());
+ }
+ builder.field(Fields.STATUS, shardFailure.status().getStatus());
+ builder.field(Fields.REASON, shardFailure.reason());
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+
+ builder.endObject();
+ internalResponse.toXContent(builder, params);
+ return builder;
+ }
+
+ public static SearchResponse readSearchResponse(StreamInput in) throws IOException {
+ SearchResponse response = new SearchResponse();
+ response.readFrom(in);
+ return response;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ internalResponse = readInternalSearchResponse(in);
+ totalShards = in.readVInt();
+ successfulShards = in.readVInt();
+ int size = in.readVInt();
+ if (size == 0) {
+ shardFailures = ShardSearchFailure.EMPTY_ARRAY;
+ } else {
+ shardFailures = new ShardSearchFailure[size];
+ for (int i = 0; i < shardFailures.length; i++) {
+ shardFailures[i] = readShardSearchFailure(in);
+ }
+ }
+ scrollId = in.readOptionalString();
+ tookInMillis = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ internalResponse.writeTo(out);
+ out.writeVInt(totalShards);
+ out.writeVInt(successfulShards);
+
+ out.writeVInt(shardFailures.length);
+ for (ShardSearchFailure shardSearchFailure : shardFailures) {
+ shardSearchFailure.writeTo(out);
+ }
+
+ out.writeOptionalString(scrollId);
+ out.writeVLong(tookInMillis);
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+ builder.startObject();
+ toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ } catch (IOException e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java b/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java
new file mode 100644
index 0000000..1d2f963
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class SearchScrollAction extends Action<SearchScrollRequest, SearchResponse, SearchScrollRequestBuilder> {
+
+ public static final SearchScrollAction INSTANCE = new SearchScrollAction();
+ public static final String NAME = "search/scroll";
+
+ private SearchScrollAction() {
+ super(NAME);
+ }
+
+ @Override
+ public SearchResponse newResponse() {
+ return new SearchResponse();
+ }
+
+ @Override
+ public SearchScrollRequestBuilder newRequestBuilder(Client client) {
+ return new SearchScrollRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java b/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java
new file mode 100644
index 0000000..deea529
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.Scroll;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.search.Scroll.readScroll;
+
+/**
+ *
+ */
+public class SearchScrollRequest extends ActionRequest<SearchScrollRequest> {
+
+ private String scrollId;
+
+ private Scroll scroll;
+
+ private SearchOperationThreading operationThreading = SearchOperationThreading.THREAD_PER_SHARD;
+
+ public SearchScrollRequest() {
+ }
+
+ public SearchScrollRequest(String scrollId) {
+ this.scrollId = scrollId;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (scrollId == null) {
+ validationException = addValidationError("scrollId is missing", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * Controls the the search operation threading model.
+ */
+ public SearchOperationThreading operationThreading() {
+ return this.operationThreading;
+ }
+
+ /**
+ * Controls the the search operation threading model.
+ */
+ public SearchScrollRequest operationThreading(SearchOperationThreading operationThreading) {
+ this.operationThreading = operationThreading;
+ return this;
+ }
+
+ /**
+ * The scroll id used to scroll the search.
+ */
+ public String scrollId() {
+ return scrollId;
+ }
+
+ public SearchScrollRequest scrollId(String scrollId) {
+ this.scrollId = scrollId;
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request.
+ */
+ public Scroll scroll() {
+ return scroll;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request.
+ */
+ public SearchScrollRequest scroll(Scroll scroll) {
+ this.scroll = scroll;
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public SearchScrollRequest scroll(TimeValue keepAlive) {
+ return scroll(new Scroll(keepAlive));
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public SearchScrollRequest scroll(String keepAlive) {
+ return scroll(new Scroll(TimeValue.parseTimeValue(keepAlive, null)));
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ operationThreading = SearchOperationThreading.fromId(in.readByte());
+ scrollId = in.readString();
+ if (in.readBoolean()) {
+ scroll = readScroll(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(operationThreading.id());
+ out.writeString(scrollId);
+ if (scroll == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ scroll.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java
new file mode 100644
index 0000000..fa75149
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.Scroll;
+
+/**
+ * A search scroll action request builder.
+ */
+public class SearchScrollRequestBuilder extends ActionRequestBuilder<SearchScrollRequest, SearchResponse, SearchScrollRequestBuilder> {
+
+ public SearchScrollRequestBuilder(Client client) {
+ super((InternalClient) client, new SearchScrollRequest());
+ }
+
+ public SearchScrollRequestBuilder(Client client, String scrollId) {
+ super((InternalClient) client, new SearchScrollRequest(scrollId));
+ }
+
+ /**
+ * Controls the the search operation threading model.
+ */
+ public SearchScrollRequestBuilder setOperationThreading(SearchOperationThreading operationThreading) {
+ request.operationThreading(operationThreading);
+ return this;
+ }
+
+ /**
+ * Should the listener be called on a separate thread if needed.
+ */
+ public SearchScrollRequestBuilder listenerThreaded(boolean threadedListener) {
+ request.listenerThreaded(threadedListener);
+ return this;
+ }
+
+ /**
+ * The scroll id to use to continue scrolling.
+ */
+ public SearchScrollRequestBuilder setScrollId(String scrollId) {
+ request.scrollId(scrollId);
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request.
+ */
+ public SearchScrollRequestBuilder setScroll(Scroll scroll) {
+ request.scroll(scroll);
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public SearchScrollRequestBuilder setScroll(TimeValue keepAlive) {
+ request.scroll(keepAlive);
+ return this;
+ }
+
+ /**
+ * If set, will enable scrolling of the search request for the specified timeout.
+ */
+ public SearchScrollRequestBuilder setScroll(String keepAlive) {
+ request.scroll(keepAlive);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<SearchResponse> listener) {
+ ((Client) client).searchScroll(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/SearchType.java b/src/main/java/org/elasticsearch/action/search/SearchType.java
new file mode 100644
index 0000000..0cfc126
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/SearchType.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ * Search type represent the manner at which the search operation is executed.
+ *
+ *
+ */
+public enum SearchType {
+ /**
+ * Same as {@link #QUERY_THEN_FETCH}, except for an initial scatter phase which goes and computes the distributed
+ * term frequencies for more accurate scoring.
+ */
+ DFS_QUERY_THEN_FETCH((byte) 0),
+ /**
+ * The query is executed against all shards, but only enough information is returned (not the document content).
+ * The results are then sorted and ranked, and based on it, only the relevant shards are asked for the actual
+ * document content. The return number of hits is exactly as specified in size, since they are the only ones that
+ * are fetched. This is very handy when the index has a lot of shards (not replicas, shard id groups).
+ */
+ QUERY_THEN_FETCH((byte) 1),
+ /**
+ * Same as {@link #QUERY_AND_FETCH}, except for an initial scatter phase which goes and computes the distributed
+ * term frequencies for more accurate scoring.
+ */
+ DFS_QUERY_AND_FETCH((byte) 2),
+ /**
+ * The most naive (and possibly fastest) implementation is to simply execute the query on all relevant shards
+ * and return the results. Each shard returns size results. Since each shard already returns size hits, this
+ * type actually returns size times number of shards results back to the caller.
+ */
+ QUERY_AND_FETCH((byte) 3),
+ /**
+ * Performs scanning of the results which executes the search without any sorting.
+ * It will automatically start scrolling the result set.
+ */
+ SCAN((byte) 4),
+ /**
+ * Only counts the results, will still execute facets and the like.
+ */
+ COUNT((byte) 5);
+
+ /**
+ * The default search type ({@link #QUERY_THEN_FETCH}.
+ */
+ public static final SearchType DEFAULT = QUERY_THEN_FETCH;
+
+ private byte id;
+
+ SearchType(byte id) {
+ this.id = id;
+ }
+
+ /**
+ * The internal id of the type.
+ */
+ public byte id() {
+ return this.id;
+ }
+
+ /**
+ * Constructs search type based on the internal id.
+ */
+ public static SearchType fromId(byte id) {
+ if (id == 0) {
+ return DFS_QUERY_THEN_FETCH;
+ } else if (id == 1) {
+ return QUERY_THEN_FETCH;
+ } else if (id == 2) {
+ return DFS_QUERY_AND_FETCH;
+ } else if (id == 3) {
+ return QUERY_AND_FETCH;
+ } else if (id == 4) {
+ return SCAN;
+ } else if (id == 5) {
+ return COUNT;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No search type for [" + id + "]");
+ }
+ }
+
+ /**
+ * The a string representation search type to execute, defaults to {@link SearchType#DEFAULT}. Can be
+ * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
+ * "query_then_fetch"/"queryThenFetch", "query_and_fetch"/"queryAndFetch", and "scan".
+ */
+ public static SearchType fromString(String searchType) throws ElasticsearchIllegalArgumentException {
+ if (searchType == null) {
+ return SearchType.DEFAULT;
+ }
+ if ("dfs_query_then_fetch".equals(searchType)) {
+ return SearchType.DFS_QUERY_THEN_FETCH;
+ } else if ("dfs_query_and_fetch".equals(searchType)) {
+ return SearchType.DFS_QUERY_AND_FETCH;
+ } else if ("query_then_fetch".equals(searchType)) {
+ return SearchType.QUERY_THEN_FETCH;
+ } else if ("query_and_fetch".equals(searchType)) {
+ return SearchType.QUERY_AND_FETCH;
+ } else if ("scan".equals(searchType)) {
+ return SearchType.SCAN;
+ } else if ("count".equals(searchType)) {
+ return SearchType.COUNT;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No search type for [" + searchType + "]");
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java
new file mode 100644
index 0000000..d461ee8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchException;
+import org.elasticsearch.search.SearchShardTarget;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
+
+/**
+ * Represents a failure to search on a specific shard.
+ */
+public class ShardSearchFailure implements ShardOperationFailedException {
+
+ public static final ShardSearchFailure[] EMPTY_ARRAY = new ShardSearchFailure[0];
+
+ private SearchShardTarget shardTarget;
+ private String reason;
+ private RestStatus status;
+ private transient Throwable failure;
+
+ private ShardSearchFailure() {
+
+ }
+
+ @Nullable
+ public Throwable failure() {
+ return failure;
+ }
+
+ public ShardSearchFailure(Throwable t) {
+ this(t, null);
+ }
+
+ public ShardSearchFailure(Throwable t, @Nullable SearchShardTarget shardTarget) {
+ this.failure = t;
+ Throwable actual = ExceptionsHelper.unwrapCause(t);
+ if (actual != null && actual instanceof SearchException) {
+ this.shardTarget = ((SearchException) actual).shard();
+ } else if (shardTarget != null) {
+ this.shardTarget = shardTarget;
+ }
+ if (actual != null && actual instanceof ElasticsearchException) {
+ status = ((ElasticsearchException) actual).status();
+ } else {
+ status = RestStatus.INTERNAL_SERVER_ERROR;
+ }
+ this.reason = ExceptionsHelper.detailedMessage(t);
+ }
+
+ public ShardSearchFailure(String reason, SearchShardTarget shardTarget) {
+ this(reason, shardTarget, RestStatus.INTERNAL_SERVER_ERROR);
+ }
+
+ public ShardSearchFailure(String reason, SearchShardTarget shardTarget, RestStatus status) {
+ this.shardTarget = shardTarget;
+ this.reason = reason;
+ this.status = status;
+ }
+
+ /**
+ * The search shard target the failure occurred on.
+ */
+ @Nullable
+ public SearchShardTarget shard() {
+ return this.shardTarget;
+ }
+
+ public RestStatus status() {
+ return this.status;
+ }
+
+ /**
+ * The index the search failed on.
+ */
+ @Override
+ public String index() {
+ if (shardTarget != null) {
+ return shardTarget.index();
+ }
+ return null;
+ }
+
+ /**
+ * The shard id the search failed on.
+ */
+ @Override
+ public int shardId() {
+ if (shardTarget != null) {
+ return shardTarget.shardId();
+ }
+ return -1;
+ }
+
+ /**
+ * The reason of the failure.
+ */
+ public String reason() {
+ return this.reason;
+ }
+
+ @Override
+ public String toString() {
+ return "shard [" + (shardTarget == null ? "_na" : shardTarget) + "], reason [" + reason + "]";
+ }
+
+ public static ShardSearchFailure readShardSearchFailure(StreamInput in) throws IOException {
+ ShardSearchFailure shardSearchFailure = new ShardSearchFailure();
+ shardSearchFailure.readFrom(in);
+ return shardSearchFailure;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ shardTarget = readSearchShardTarget(in);
+ }
+ reason = in.readString();
+ status = RestStatus.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (shardTarget == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ shardTarget.writeTo(out);
+ }
+ out.writeString(reason);
+ RestStatus.writeTo(out, status);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java
new file mode 100644
index 0000000..c510371
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.CountDown;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.action.search.type.TransportSearchHelper.parseScrollId;
+
+/**
+ */
+public class TransportClearScrollAction extends TransportAction<ClearScrollRequest, ClearScrollResponse> {
+
+ private final ClusterService clusterService;
+ private final SearchServiceTransportAction searchServiceTransportAction;
+
+ @Inject
+ public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool, ClusterService clusterService, SearchServiceTransportAction searchServiceTransportAction) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.searchServiceTransportAction = searchServiceTransportAction;
+ transportService.registerHandler(ClearScrollAction.NAME, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(ClearScrollRequest request, final ActionListener<ClearScrollResponse> listener) {
+ new Async(request, listener, clusterService.state()).run();
+ }
+
+ private class Async {
+
+ final DiscoveryNodes nodes;
+ final CountDown expectedOps;
+ final ClearScrollRequest request;
+ final List<Tuple<String, Long>[]> contexts = new ArrayList<Tuple<String, Long>[]>();
+ final AtomicReference<Throwable> expHolder;
+ final ActionListener<ClearScrollResponse> listener;
+
+ private Async(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener, ClusterState clusterState) {
+ int expectedOps = 0;
+ this.nodes = clusterState.nodes();
+ if (request.getScrollIds().size() == 1 && "_all".equals(request.getScrollIds().get(0))) {
+ expectedOps = nodes.size();
+ } else {
+ for (String parsedScrollId : request.getScrollIds()) {
+ Tuple<String, Long>[] context = parseScrollId(parsedScrollId).getContext();
+ expectedOps += context.length;
+ this.contexts.add(context);
+ }
+ }
+
+ this.request = request;
+ this.listener = listener;
+ this.expHolder = new AtomicReference<Throwable>();
+ this.expectedOps = new CountDown(expectedOps);
+ }
+
+ public void run() {
+ if (expectedOps.isCountedDown()) {
+ listener.onResponse(new ClearScrollResponse(true));
+ return;
+ }
+
+ if (contexts.isEmpty()) {
+ for (final DiscoveryNode node : nodes) {
+ searchServiceTransportAction.sendClearAllScrollContexts(node, request, new ActionListener<Boolean>() {
+ @Override
+ public void onResponse(Boolean success) {
+ onFreedContext();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ onFailedFreedContext(e, node);
+ }
+ });
+ }
+ } else {
+ for (Tuple<String, Long>[] context : contexts) {
+ for (Tuple<String, Long> target : context) {
+ final DiscoveryNode node = nodes.get(target.v1());
+ if (node == null) {
+ onFreedContext();
+ continue;
+ }
+
+ searchServiceTransportAction.sendFreeContext(node, target.v2(), request, new ActionListener<Boolean>() {
+ @Override
+ public void onResponse(Boolean success) {
+ onFreedContext();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ onFailedFreedContext(e, node);
+ }
+ });
+ }
+ }
+ }
+ }
+
+ void onFreedContext() {
+ if (expectedOps.countDown()) {
+ boolean succeeded = expHolder.get() == null;
+ listener.onResponse(new ClearScrollResponse(succeeded));
+ }
+ }
+
+ void onFailedFreedContext(Throwable e, DiscoveryNode node) {
+ logger.warn("Clear SC failed on node[{}]", e, node);
+ if (expectedOps.countDown()) {
+ listener.onResponse(new ClearScrollResponse(false));
+ } else {
+ expHolder.set(e);
+ }
+ }
+
+ }
+
+ class TransportHandler extends BaseTransportRequestHandler<ClearScrollRequest> {
+
+ @Override
+ public ClearScrollRequest newInstance() {
+ return new ClearScrollRequest();
+ }
+
+ @Override
+ public void messageReceived(final ClearScrollRequest request, final TransportChannel channel) throws Exception {
+ // no need to use threaded listener, since we just send a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<ClearScrollResponse>() {
+ @Override
+ public void onResponse(ClearScrollResponse response) {
+ try {
+ channel.sendResponse(response);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send error response for action [clear_sc] and request [" + request + "]", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java
new file mode 100644
index 0000000..0a0c5d3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ */
+public class TransportMultiSearchAction extends TransportAction<MultiSearchRequest, MultiSearchResponse> {
+
+ private final ClusterService clusterService;
+
+ private final TransportSearchAction searchAction;
+
+ @Inject
+ public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, TransportSearchAction searchAction) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.searchAction = searchAction;
+
+ transportService.registerHandler(MultiSearchAction.NAME, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(final MultiSearchRequest request, final ActionListener<MultiSearchResponse> listener) {
+ ClusterState clusterState = clusterService.state();
+ clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
+
+ final AtomicArray<MultiSearchResponse.Item> responses = new AtomicArray<MultiSearchResponse.Item>(request.requests().size());
+ final AtomicInteger counter = new AtomicInteger(responses.length());
+ for (int i = 0; i < responses.length(); i++) {
+ final int index = i;
+ searchAction.execute(request.requests().get(i), new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse searchResponse) {
+ responses.set(index, new MultiSearchResponse.Item(searchResponse, null));
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ responses.set(index, new MultiSearchResponse.Item(null, ExceptionsHelper.detailedMessage(e)));
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ private void finishHim() {
+ listener.onResponse(new MultiSearchResponse(responses.toArray(new MultiSearchResponse.Item[responses.length()])));
+ }
+ });
+ }
+ }
+
+ class TransportHandler extends BaseTransportRequestHandler<MultiSearchRequest> {
+
+ @Override
+ public MultiSearchRequest newInstance() {
+ return new MultiSearchRequest();
+ }
+
+ @Override
+ public void messageReceived(final MultiSearchRequest request, final TransportChannel channel) throws Exception {
+ // no need to use threaded listener, since we just send a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<MultiSearchResponse>() {
+ @Override
+ public void onResponse(MultiSearchResponse response) {
+ try {
+ channel.sendResponse(response);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send error response for action [msearch] and request [" + request + "]", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
new file mode 100644
index 0000000..ed6ec8c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.type.*;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.action.search.SearchType.*;
+
+/**
+ *
+ */
+public class TransportSearchAction extends TransportAction<SearchRequest, SearchResponse> {
+
+ private final ClusterService clusterService;
+
+ private final TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction;
+
+ private final TransportSearchQueryThenFetchAction queryThenFetchAction;
+
+ private final TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction;
+
+ private final TransportSearchQueryAndFetchAction queryAndFetchAction;
+
+ private final TransportSearchScanAction scanAction;
+
+ private final TransportSearchCountAction countAction;
+
+ private final boolean optimizeSingleShard;
+
+ @Inject
+ public TransportSearchAction(Settings settings, ThreadPool threadPool,
+ TransportService transportService, ClusterService clusterService,
+ TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction,
+ TransportSearchQueryThenFetchAction queryThenFetchAction,
+ TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction,
+ TransportSearchQueryAndFetchAction queryAndFetchAction,
+ TransportSearchScanAction scanAction,
+ TransportSearchCountAction countAction) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.dfsQueryThenFetchAction = dfsQueryThenFetchAction;
+ this.queryThenFetchAction = queryThenFetchAction;
+ this.dfsQueryAndFetchAction = dfsQueryAndFetchAction;
+ this.queryAndFetchAction = queryAndFetchAction;
+ this.scanAction = scanAction;
+ this.countAction = countAction;
+
+ this.optimizeSingleShard = componentSettings.getAsBoolean("optimize_single_shard", true);
+
+ transportService.registerHandler(SearchAction.NAME, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
+ // optimize search type for cases where there is only one shard group to search on
+ if (optimizeSingleShard && searchRequest.searchType() != SCAN && searchRequest.searchType() != COUNT) {
+ try {
+ ClusterState clusterState = clusterService.state();
+ String[] concreteIndices = clusterState.metaData().concreteIndices(searchRequest.indices(), searchRequest.indicesOptions());
+ Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(searchRequest.routing(), searchRequest.indices());
+ int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, searchRequest.indices(), concreteIndices, routingMap, searchRequest.preference());
+ if (shardCount == 1) {
+ // if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
+ searchRequest.searchType(QUERY_AND_FETCH);
+ }
+ } catch (IndexMissingException e) {
+ // ignore this, we will notify the search response if its really the case
+ // from the actual action
+ } catch (Exception e) {
+ logger.debug("failed to optimize search type, continue as normal", e);
+ }
+ }
+
+ if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) {
+ dfsQueryThenFetchAction.execute(searchRequest, listener);
+ } else if (searchRequest.searchType() == SearchType.QUERY_THEN_FETCH) {
+ queryThenFetchAction.execute(searchRequest, listener);
+ } else if (searchRequest.searchType() == SearchType.DFS_QUERY_AND_FETCH) {
+ dfsQueryAndFetchAction.execute(searchRequest, listener);
+ } else if (searchRequest.searchType() == SearchType.QUERY_AND_FETCH) {
+ queryAndFetchAction.execute(searchRequest, listener);
+ } else if (searchRequest.searchType() == SearchType.SCAN) {
+ scanAction.execute(searchRequest, listener);
+ } else if (searchRequest.searchType() == SearchType.COUNT) {
+ countAction.execute(searchRequest, listener);
+ }
+ }
+
+ private class TransportHandler extends BaseTransportRequestHandler<SearchRequest> {
+
+ @Override
+ public SearchRequest newInstance() {
+ return new SearchRequest();
+ }
+
+ @Override
+ public void messageReceived(SearchRequest request, final TransportChannel channel) throws Exception {
+ // no need for a threaded listener
+ request.listenerThreaded(false);
+ // we don't spawn, so if we get a request with no threading, change it to single threaded
+ if (request.operationThreading() == SearchOperationThreading.NO_THREADS) {
+ request.operationThreading(SearchOperationThreading.SINGLE_THREAD);
+ }
+ execute(request, new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send response for search", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java
new file mode 100644
index 0000000..1e5380f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.type.ParsedScrollId;
+import org.elasticsearch.action.search.type.TransportSearchScrollQueryAndFetchAction;
+import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction;
+import org.elasticsearch.action.search.type.TransportSearchScrollScanAction;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import static org.elasticsearch.action.search.type.ParsedScrollId.*;
+import static org.elasticsearch.action.search.type.TransportSearchHelper.parseScrollId;
+
+/**
+ *
+ */
+public class TransportSearchScrollAction extends TransportAction<SearchScrollRequest, SearchResponse> {
+
+ private final TransportSearchScrollQueryThenFetchAction queryThenFetchAction;
+
+ private final TransportSearchScrollQueryAndFetchAction queryAndFetchAction;
+
+ private final TransportSearchScrollScanAction scanAction;
+
+ @Inject
+ public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
+ TransportSearchScrollQueryThenFetchAction queryThenFetchAction,
+ TransportSearchScrollQueryAndFetchAction queryAndFetchAction,
+ TransportSearchScrollScanAction scanAction) {
+ super(settings, threadPool);
+ this.queryThenFetchAction = queryThenFetchAction;
+ this.queryAndFetchAction = queryAndFetchAction;
+ this.scanAction = scanAction;
+
+ transportService.registerHandler(SearchScrollAction.NAME, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(SearchScrollRequest request, ActionListener<SearchResponse> listener) {
+ try {
+ ParsedScrollId scrollId = parseScrollId(request.scrollId());
+ if (scrollId.getType().equals(QUERY_THEN_FETCH_TYPE)) {
+ queryThenFetchAction.execute(request, scrollId, listener);
+ } else if (scrollId.getType().equals(QUERY_AND_FETCH_TYPE)) {
+ queryAndFetchAction.execute(request, scrollId, listener);
+ } else if (scrollId.getType().equals(SCAN)) {
+ scanAction.execute(request, scrollId, listener);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized");
+ }
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ }
+
+ private class TransportHandler extends BaseTransportRequestHandler<SearchScrollRequest> {
+
+ @Override
+ public SearchScrollRequest newInstance() {
+ return new SearchScrollRequest();
+ }
+
+ @Override
+ public void messageReceived(SearchScrollRequest request, final TransportChannel channel) throws Exception {
+ // no need for a threaded listener
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send response for search", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/package-info.java b/src/main/java/org/elasticsearch/action/search/package-info.java
new file mode 100644
index 0000000..a20d03d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Search action.
+ */
+package org.elasticsearch.action.search; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/search/type/ParsedScrollId.java b/src/main/java/org/elasticsearch/action/search/type/ParsedScrollId.java
new file mode 100644
index 0000000..f54bacc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/ParsedScrollId.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import org.elasticsearch.common.collect.Tuple;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class ParsedScrollId {
+
+ public static final String QUERY_THEN_FETCH_TYPE = "queryThenFetch";
+
+ public static final String QUERY_AND_FETCH_TYPE = "queryAndFetch";
+
+ public static final String SCAN = "scan";
+
+ private final String source;
+
+ private final String type;
+
+ private final Tuple<String, Long>[] context;
+
+ private final Map<String, String> attributes;
+
+ public ParsedScrollId(String source, String type, Tuple<String, Long>[] context, Map<String, String> attributes) {
+ this.source = source;
+ this.type = type;
+ this.context = context;
+ this.attributes = attributes;
+ }
+
+ public String getSource() {
+ return source;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public Tuple<String, Long>[] getContext() {
+ return context;
+ }
+
+ public Map<String, String> getAttributes() {
+ return this.attributes;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java
new file mode 100644
index 0000000..96c2d24
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.fetch.FetchSearchResultProvider;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId;
+
+/**
+ *
+ */
+public class TransportSearchCountAction extends TransportSearchTypeAction {
+
+ @Inject
+ public TransportSearchCountAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings, threadPool, clusterService, searchService, searchPhaseController);
+ }
+
+ @Override
+ protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
+ new AsyncAction(searchRequest, listener).start();
+ }
+
+ private class AsyncAction extends BaseAsyncAction<QuerySearchResult> {
+
+ private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
+ super(request, listener);
+ }
+
+ @Override
+ protected String firstPhaseName() {
+ return "query";
+ }
+
+ @Override
+ protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<QuerySearchResult> listener) {
+ searchService.sendExecuteQuery(node, request, listener);
+ }
+
+ @Override
+ protected void moveToSecondPhase() throws Exception {
+ // no need to sort, since we know we have no hits back
+ final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, (AtomicArray<? extends FetchSearchResultProvider>) AtomicArray.empty());
+ String scrollId = null;
+ if (request.scroll() != null) {
+ scrollId = buildScrollId(request.searchType(), firstResults, null);
+ }
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java
new file mode 100644
index 0000000..05705fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.ReduceSearchPhaseException;
+import org.elasticsearch.action.search.SearchOperationThreading;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.dfs.AggregatedDfs;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.fetch.QueryFetchSearchResult;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.query.QuerySearchRequest;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ *
+ */
+public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAction {
+
+ @Inject
+ public TransportSearchDfsQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings, threadPool, clusterService, searchService, searchPhaseController);
+ }
+
+ @Override
+ protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
+ new AsyncAction(searchRequest, listener).start();
+ }
+
+ private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
+
+ private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
+
+ private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
+ super(request, listener);
+ queryFetchResults = new AtomicArray<QueryFetchSearchResult>(firstResults.length());
+ }
+
+ @Override
+ protected String firstPhaseName() {
+ return "dfs";
+ }
+
+ @Override
+ protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<DfsSearchResult> listener) {
+ searchService.sendExecuteDfs(node, request, listener);
+ }
+
+ @Override
+ protected void moveToSecondPhase() {
+ final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
+ final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
+
+ int localOperations = 0;
+ for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
+ DfsSearchResult dfsResult = entry.value;
+ DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ localOperations++;
+ } else {
+ QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
+ executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
+ }
+ }
+ if (localOperations > 0) {
+ if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
+ DfsSearchResult dfsResult = entry.value;
+ DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
+ executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
+ }
+ }
+ }
+ });
+ } else {
+ boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
+ for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
+ final DfsSearchResult dfsResult = entry.value;
+ final DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ final QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
+ try {
+ if (localAsync) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
+ }
+ });
+ } else {
+ executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
+ }
+ } catch (Throwable t) {
+ onSecondPhaseFailure(t, querySearchRequest, entry.index, dfsResult, counter);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
+ searchService.sendExecuteFetch(node, querySearchRequest, new SearchServiceListener<QueryFetchSearchResult>() {
+ @Override
+ public void onResult(QueryFetchSearchResult result) {
+ result.shardTarget(dfsResult.shardTarget());
+ queryFetchResults.set(shardIndex, result);
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
+ }
+ });
+ }
+
+ void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
+ }
+ this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
+ successulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ void finishHim() {
+ try {
+ innerFinishHim();
+ } catch (Throwable e) {
+ ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", e, buildShardFailures());
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to reduce search", failure);
+ }
+ listener.onFailure(failure);
+ } finally {
+ //
+ }
+ }
+
+ void innerFinishHim() throws Exception {
+ sortedShardList = searchPhaseController.sortDocs(queryFetchResults);
+ final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults);
+ String scrollId = null;
+ if (request.scroll() != null) {
+ scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
+ }
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java
new file mode 100644
index 0000000..62565bf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import com.carrotsearch.hppc.IntArrayList;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.ReduceSearchPhaseException;
+import org.elasticsearch.action.search.SearchOperationThreading;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.dfs.AggregatedDfs;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.fetch.FetchSearchRequest;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.query.QuerySearchRequest;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ *
+ */
+public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeAction {
+
+ @Inject
+ public TransportSearchDfsQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings, threadPool, clusterService, searchService, searchPhaseController);
+ }
+
+ @Override
+ protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
+ new AsyncAction(searchRequest, listener).start();
+ }
+
+ private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
+
+ final AtomicArray<QuerySearchResult> queryResults;
+ final AtomicArray<FetchSearchResult> fetchResults;
+ final AtomicArray<IntArrayList> docIdsToLoad;
+
+ private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
+ super(request, listener);
+ queryResults = new AtomicArray<QuerySearchResult>(firstResults.length());
+ fetchResults = new AtomicArray<FetchSearchResult>(firstResults.length());
+ docIdsToLoad = new AtomicArray<IntArrayList>(firstResults.length());
+ }
+
+ @Override
+ protected String firstPhaseName() {
+ return "dfs";
+ }
+
+ @Override
+ protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<DfsSearchResult> listener) {
+ searchService.sendExecuteDfs(node, request, listener);
+ }
+
+ @Override
+ protected void moveToSecondPhase() {
+ final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
+ final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
+
+ int localOperations = 0;
+ for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
+ DfsSearchResult dfsResult = entry.value;
+ DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ localOperations++;
+ } else {
+ QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
+ executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
+ }
+ }
+
+ if (localOperations > 0) {
+ if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
+ DfsSearchResult dfsResult = entry.value;
+ DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
+ executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
+ }
+ }
+ }
+ });
+ } else {
+ boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
+ for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
+ final DfsSearchResult dfsResult = entry.value;
+ final DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ final QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
+ try {
+ if (localAsync) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
+ }
+ });
+ } else {
+ executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
+ }
+ } catch (Throwable t) {
+ onQueryFailure(t, querySearchRequest, entry.index, dfsResult, counter);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, final QuerySearchRequest querySearchRequest, DiscoveryNode node) {
+ searchService.sendExecuteQuery(node, querySearchRequest, new SearchServiceListener<QuerySearchResult>() {
+ @Override
+ public void onResult(QuerySearchResult result) {
+ result.shardTarget(dfsResult.shardTarget());
+ queryResults.set(shardIndex, result);
+ if (counter.decrementAndGet() == 0) {
+ executeFetchPhase();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ onQueryFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
+ }
+ });
+ }
+
+ void onQueryFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
+ }
+ this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
+ successulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ executeFetchPhase();
+ }
+ }
+
+ void executeFetchPhase() {
+ try {
+ innerExecuteFetchPhase();
+ } catch (Throwable e) {
+ listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures()));
+ }
+ }
+
+ void innerExecuteFetchPhase() {
+ sortedShardList = searchPhaseController.sortDocs(queryResults);
+ searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
+
+ if (docIdsToLoad.asList().isEmpty()) {
+ finishHim();
+ return;
+ }
+
+ final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
+ int localOperations = 0;
+ for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
+ QuerySearchResult queryResult = queryResults.get(entry.index);
+ DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ localOperations++;
+ } else {
+ FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
+ executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
+ }
+ }
+
+ if (localOperations > 0) {
+ if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
+ QuerySearchResult queryResult = queryResults.get(entry.index);
+ DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
+ executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
+ }
+ }
+ }
+ });
+ } else {
+ boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
+ for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
+ final QuerySearchResult queryResult = queryResults.get(entry.index);
+ final DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ final FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
+ try {
+ if (localAsync) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
+ }
+ });
+ } else {
+ executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
+ }
+ } catch (Throwable t) {
+ onFetchFailure(t, fetchSearchRequest, entry.index, queryResult.shardTarget(), counter);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final FetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
+ searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener<FetchSearchResult>() {
+ @Override
+ public void onResult(FetchSearchResult result) {
+ result.shardTarget(shardTarget);
+ fetchResults.set(shardIndex, result);
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
+ }
+ });
+ }
+
+ void onFetchFailure(Throwable t, FetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
+ }
+ this.addShardFailure(shardIndex, shardTarget, t);
+ successulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ void finishHim() {
+ try {
+ innerFinishHim();
+ } catch (Throwable e) {
+ ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", e, buildShardFailures());
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to reduce search", failure);
+ }
+ listener.onFailure(failure);
+ } finally {
+ releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
+ }
+ }
+
+ void innerFinishHim() throws Exception {
+ final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
+ String scrollId = null;
+ if (request.scroll() != null) {
+ scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
+ }
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchHelper.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchHelper.java
new file mode 100644
index 0000000..1006b0b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchHelper.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchScrollRequest;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Base64;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.SearchPhaseResult;
+import org.elasticsearch.search.internal.InternalScrollSearchRequest;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public abstract class TransportSearchHelper {
+
+ public static ShardSearchRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request, String[] filteringAliases, long nowInMillis) {
+ ShardSearchRequest shardRequest = new ShardSearchRequest(request, shardRouting, numberOfShards);
+ shardRequest.filteringAliases(filteringAliases);
+ shardRequest.nowInMillis(nowInMillis);
+ return shardRequest;
+ }
+
+ public static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) {
+ return new InternalScrollSearchRequest(request, id);
+ }
+
+ public static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults, @Nullable Map<String, String> attributes) throws IOException {
+ if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) {
+ return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults, attributes);
+ } else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) {
+ return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults, attributes);
+ } else if (searchType == SearchType.SCAN) {
+ return buildScrollId(ParsedScrollId.SCAN, searchPhaseResults, attributes);
+ } else {
+ throw new ElasticsearchIllegalStateException();
+ }
+ }
+
+ public static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults, @Nullable Map<String, String> attributes) throws IOException {
+ StringBuilder sb = new StringBuilder().append(type).append(';');
+ sb.append(searchPhaseResults.asList().size()).append(';');
+ for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
+ SearchPhaseResult searchPhaseResult = entry.value;
+ sb.append(searchPhaseResult.id()).append(':').append(searchPhaseResult.shardTarget().nodeId()).append(';');
+ }
+ if (attributes == null) {
+ sb.append("0;");
+ } else {
+ sb.append(attributes.size()).append(";");
+ for (Map.Entry<String, String> entry : attributes.entrySet()) {
+ sb.append(entry.getKey()).append(':').append(entry.getValue()).append(';');
+ }
+ }
+ BytesRef bytesRef = new BytesRef();
+ UnicodeUtil.UTF16toUTF8(sb, 0, sb.length(), bytesRef);
+
+ return Base64.encodeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length, Base64.URL_SAFE);
+ }
+
+ public static ParsedScrollId parseScrollId(String scrollId) {
+ CharsRef spare = new CharsRef();
+ try {
+ byte[] decode = Base64.decode(scrollId, Base64.URL_SAFE);
+ UnicodeUtil.UTF8toUTF16(decode, 0, decode.length, spare);
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to decode scrollId", e);
+ }
+ String[] elements = Strings.splitStringToArray(spare, ';');
+ if (elements.length < 2) {
+ throw new ElasticsearchIllegalArgumentException("Malformed scrollId [" + scrollId + "]");
+ }
+
+ int index = 0;
+ String type = elements[index++];
+ int contextSize = Integer.parseInt(elements[index++]);
+ if (elements.length < contextSize + 2) {
+ throw new ElasticsearchIllegalArgumentException("Malformed scrollId [" + scrollId + "]");
+ }
+
+ @SuppressWarnings({"unchecked"}) Tuple<String, Long>[] context = new Tuple[contextSize];
+ for (int i = 0; i < contextSize; i++) {
+ String element = elements[index++];
+ int sep = element.indexOf(':');
+ if (sep == -1) {
+ throw new ElasticsearchIllegalArgumentException("Malformed scrollId [" + scrollId + "]");
+ }
+ context[i] = new Tuple<String, Long>(element.substring(sep + 1), Long.parseLong(element.substring(0, sep)));
+ }
+ Map<String, String> attributes;
+ int attributesSize = Integer.parseInt(elements[index++]);
+ if (attributesSize == 0) {
+ attributes = ImmutableMap.of();
+ } else {
+ attributes = Maps.newHashMapWithExpectedSize(attributesSize);
+ for (int i = 0; i < attributesSize; i++) {
+ String element = elements[index++];
+ int sep = element.indexOf(':');
+ attributes.put(element.substring(0, sep), element.substring(sep + 1));
+ }
+ }
+ return new ParsedScrollId(scrollId, type, context, attributes);
+ }
+
+ private TransportSearchHelper() {
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java
new file mode 100644
index 0000000..6c6e8d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.ReduceSearchPhaseException;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.fetch.QueryFetchSearchResult;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId;
+
+/**
+ *
+ */
+public class TransportSearchQueryAndFetchAction extends TransportSearchTypeAction {
+
+ @Inject
+ public TransportSearchQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings, threadPool, clusterService, searchService, searchPhaseController);
+ }
+
+ @Override
+ protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
+ new AsyncAction(searchRequest, listener).start();
+ }
+
+ private class AsyncAction extends BaseAsyncAction<QueryFetchSearchResult> {
+
+ private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
+ super(request, listener);
+ }
+
+ @Override
+ protected String firstPhaseName() {
+ return "query_fetch";
+ }
+
+ @Override
+ protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<QueryFetchSearchResult> listener) {
+ searchService.sendExecuteFetch(node, request, listener);
+ }
+
+ @Override
+ protected void moveToSecondPhase() throws Exception {
+ try {
+ innerFinishHim();
+ } catch (Throwable e) {
+ ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", e, buildShardFailures());
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to reduce search", failure);
+ }
+ listener.onFailure(failure);
+ }
+ }
+
+ private void innerFinishHim() throws IOException {
+ sortedShardList = searchPhaseController.sortDocs(firstResults);
+ final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, firstResults);
+ String scrollId = null;
+ if (request.scroll() != null) {
+ scrollId = buildScrollId(request.searchType(), firstResults, null);
+ }
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java
new file mode 100644
index 0000000..cdfe566
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import com.carrotsearch.hppc.IntArrayList;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.ReduceSearchPhaseException;
+import org.elasticsearch.action.search.SearchOperationThreading;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.fetch.FetchSearchRequest;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ *
+ */
+public class TransportSearchQueryThenFetchAction extends TransportSearchTypeAction {
+
+ @Inject
+ public TransportSearchQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings, threadPool, clusterService, searchService, searchPhaseController);
+ }
+
+ @Override
+ protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
+ new AsyncAction(searchRequest, listener).start();
+ }
+
+ private class AsyncAction extends BaseAsyncAction<QuerySearchResult> {
+
+ final AtomicArray<FetchSearchResult> fetchResults;
+ final AtomicArray<IntArrayList> docIdsToLoad;
+
+ private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
+ super(request, listener);
+ fetchResults = new AtomicArray<FetchSearchResult>(firstResults.length());
+ docIdsToLoad = new AtomicArray<IntArrayList>(firstResults.length());
+ }
+
+ @Override
+ protected String firstPhaseName() {
+ return "query";
+ }
+
+ @Override
+ protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<QuerySearchResult> listener) {
+ searchService.sendExecuteQuery(node, request, listener);
+ }
+
+ @Override
+ protected void moveToSecondPhase() {
+ sortedShardList = searchPhaseController.sortDocs(firstResults);
+ searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
+
+ if (docIdsToLoad.asList().isEmpty()) {
+ finishHim();
+ return;
+ }
+
+ final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
+
+ int localOperations = 0;
+ for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
+ QuerySearchResult queryResult = firstResults.get(entry.index);
+ DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ localOperations++;
+ } else {
+ FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
+ executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
+ }
+ }
+
+ if (localOperations > 0) {
+ if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
+ QuerySearchResult queryResult = firstResults.get(entry.index);
+ DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
+ executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
+ }
+ }
+ }
+ });
+ } else {
+ boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
+ for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
+ final QuerySearchResult queryResult = firstResults.get(entry.index);
+ final DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
+ if (node.id().equals(nodes.localNodeId())) {
+ final FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
+ try {
+ if (localAsync) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
+ }
+ });
+ } else {
+ executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
+ }
+ } catch (Throwable t) {
+ onFetchFailure(t, fetchSearchRequest, entry.index, queryResult.shardTarget(), counter);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final FetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
+ searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener<FetchSearchResult>() {
+ @Override
+ public void onResult(FetchSearchResult result) {
+ result.shardTarget(shardTarget);
+ fetchResults.set(shardIndex, result);
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
+ }
+ });
+ }
+
+ void onFetchFailure(Throwable t, FetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
+ }
+ this.addShardFailure(shardIndex, shardTarget, t);
+ successulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ void finishHim() {
+ try {
+ innerFinishHim();
+ } catch (Throwable e) {
+ ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", e, buildShardFailures());
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to reduce search", failure);
+ }
+ listener.onFailure(failure);
+ } finally {
+ releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
+ }
+ }
+
+ void innerFinishHim() throws Exception {
+ InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, fetchResults);
+ String scrollId = null;
+ if (request.scroll() != null) {
+ scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
+ }
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java
new file mode 100644
index 0000000..b7e5c10
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.fetch.FetchSearchResultProvider;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId;
+
+public class TransportSearchScanAction extends TransportSearchTypeAction {
+
+ @Inject
+ public TransportSearchScanAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings, threadPool, clusterService, searchService, searchPhaseController);
+ }
+
+ @Override
+ protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
+ new AsyncAction(searchRequest, listener).start();
+ }
+
+ private class AsyncAction extends BaseAsyncAction<QuerySearchResult> {
+
+ private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
+ super(request, listener);
+ }
+
+ @Override
+ protected String firstPhaseName() {
+ return "init_scan";
+ }
+
+ @Override
+ protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<QuerySearchResult> listener) {
+ searchService.sendExecuteScan(node, request, listener);
+ }
+
+ @Override
+ protected void moveToSecondPhase() throws Exception {
+ final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, (AtomicArray<? extends FetchSearchResultProvider>) AtomicArray.empty());
+ String scrollId = null;
+ if (request.scroll() != null) {
+ scrollId = buildScrollId(request.searchType(), firstResults, ImmutableMap.of("total_hits", Long.toString(internalResponse.hits().totalHits())));
+ }
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java
new file mode 100644
index 0000000..f2e5c1d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import org.apache.lucene.search.ScoreDoc;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.fetch.QueryFetchSearchResult;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.action.search.type.TransportSearchHelper.internalScrollSearchRequest;
+
+/**
+ *
+ */
+public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent {
+
+ private final ThreadPool threadPool;
+
+ private final ClusterService clusterService;
+
+ private final SearchServiceTransportAction searchService;
+
+ private final SearchPhaseController searchPhaseController;
+
+ @Inject
+ public TransportSearchScrollQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.searchService = searchService;
+ this.searchPhaseController = searchPhaseController;
+ }
+
+ public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
+ new AsyncAction(request, scrollId, listener).start();
+ }
+
+ private class AsyncAction {
+
+ private final SearchScrollRequest request;
+
+ private final ActionListener<SearchResponse> listener;
+
+ private final ParsedScrollId scrollId;
+
+ private final DiscoveryNodes nodes;
+
+ private volatile AtomicArray<ShardSearchFailure> shardFailures;
+ private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
+
+ private final AtomicInteger successfulOps;
+ private final AtomicInteger counter;
+
+ private final long startTime = System.currentTimeMillis();
+
+ private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
+ this.request = request;
+ this.listener = listener;
+ this.scrollId = scrollId;
+ this.nodes = clusterService.state().nodes();
+ this.successfulOps = new AtomicInteger(scrollId.getContext().length);
+ this.counter = new AtomicInteger(scrollId.getContext().length);
+
+ this.queryFetchResults = new AtomicArray<QueryFetchSearchResult>(scrollId.getContext().length);
+ }
+
+ protected final ShardSearchFailure[] buildShardFailures() {
+ if (shardFailures == null) {
+ return ShardSearchFailure.EMPTY_ARRAY;
+ }
+ List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
+ ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
+ for (int i = 0; i < failures.length; i++) {
+ failures[i] = entries.get(i).value;
+ }
+ return failures;
+ }
+
+ // we do our best to return the shard failures, but its ok if its not fully concurrently safe
+ // we simply try and return as much as possible
+ protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
+ if (shardFailures == null) {
+ shardFailures = new AtomicArray<ShardSearchFailure>(scrollId.getContext().length);
+ }
+ shardFailures.set(shardIndex, failure);
+ }
+
+ public void start() {
+ if (scrollId.getContext().length == 0) {
+ listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", null));
+ return;
+ }
+
+ int localOperations = 0;
+ Tuple<String, Long>[] context = scrollId.getContext();
+ for (int i = 0; i < context.length; i++) {
+ Tuple<String, Long> target = context[i];
+ DiscoveryNode node = nodes.get(target.v1());
+ if (node != null) {
+ if (nodes.localNodeId().equals(node.id())) {
+ localOperations++;
+ } else {
+ executePhase(i, node, target.v2());
+ }
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
+ }
+ successfulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+ }
+
+ if (localOperations > 0) {
+ if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ Tuple<String, Long>[] context1 = scrollId.getContext();
+ for (int i = 0; i < context1.length; i++) {
+ Tuple<String, Long> target = context1[i];
+ DiscoveryNode node = nodes.get(target.v1());
+ if (node != null && nodes.localNodeId().equals(node.id())) {
+ executePhase(i, node, target.v2());
+ }
+ }
+ }
+ });
+ } else {
+ boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
+ Tuple<String, Long>[] context1 = scrollId.getContext();
+ for (int i = 0; i < context1.length; i++) {
+ final Tuple<String, Long> target = context1[i];
+ final int shardIndex = i;
+ final DiscoveryNode node = nodes.get(target.v1());
+ if (node != null && nodes.localNodeId().equals(node.id())) {
+ try {
+ if (localAsync) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ executePhase(shardIndex, node, target.v2());
+ }
+ });
+ } else {
+ executePhase(shardIndex, node, target.v2());
+ }
+ } catch (Throwable t) {
+ onPhaseFailure(t, target.v2(), shardIndex);
+ }
+ }
+ }
+ }
+ }
+
+ for (Tuple<String, Long> target : scrollId.getContext()) {
+ DiscoveryNode node = nodes.get(target.v1());
+ if (node == null) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
+ }
+ successfulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ } else {
+ }
+ }
+ }
+
+ void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
+ searchService.sendExecuteFetch(node, internalScrollSearchRequest(searchId, request), new SearchServiceListener<QueryFetchSearchResult>() {
+ @Override
+ public void onResult(QueryFetchSearchResult result) {
+ queryFetchResults.set(shardIndex, result);
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ onPhaseFailure(t, searchId, shardIndex);
+ }
+ });
+ }
+
+ private void onPhaseFailure(Throwable t, long searchId, int shardIndex) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] Failed to execute query phase", t, searchId);
+ }
+ addShardFailure(shardIndex, new ShardSearchFailure(t));
+ successfulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ private void finishHim() {
+ try {
+ innerFinishHim();
+ } catch (Throwable e) {
+ listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
+ }
+ }
+
+ private void innerFinishHim() {
+ ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(queryFetchResults);
+ final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults);
+ String scrollId = null;
+ if (request.scroll() != null) {
+ scrollId = request.scrollId();
+ }
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
+ System.currentTimeMillis() - startTime, buildShardFailures()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java
new file mode 100644
index 0000000..4a5681e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import com.carrotsearch.hppc.IntArrayList;
+import org.apache.lucene.search.ScoreDoc;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.fetch.FetchSearchRequest;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.action.search.type.TransportSearchHelper.internalScrollSearchRequest;
+
+/**
+ *
+ */
+public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent {
+
+ private final ThreadPool threadPool;
+
+ private final ClusterService clusterService;
+
+ private final SearchServiceTransportAction searchService;
+
+ private final SearchPhaseController searchPhaseController;
+
+ @Inject
+ public TransportSearchScrollQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.searchService = searchService;
+ this.searchPhaseController = searchPhaseController;
+ }
+
+ public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
+ new AsyncAction(request, scrollId, listener).start();
+ }
+
+ private class AsyncAction {
+
+ private final SearchScrollRequest request;
+
+ private final ActionListener<SearchResponse> listener;
+
+ private final ParsedScrollId scrollId;
+
+ private final DiscoveryNodes nodes;
+
+ private volatile AtomicArray<ShardSearchFailure> shardFailures;
+ final AtomicArray<QuerySearchResult> queryResults;
+ final AtomicArray<FetchSearchResult> fetchResults;
+
+ private volatile ScoreDoc[] sortedShardList;
+
+ private final AtomicInteger successfulOps;
+
+ private final long startTime = System.currentTimeMillis();
+
+ private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
+ this.request = request;
+ this.listener = listener;
+ this.scrollId = scrollId;
+ this.nodes = clusterService.state().nodes();
+ this.successfulOps = new AtomicInteger(scrollId.getContext().length);
+
+ this.queryResults = new AtomicArray<QuerySearchResult>(scrollId.getContext().length);
+ this.fetchResults = new AtomicArray<FetchSearchResult>(scrollId.getContext().length);
+ }
+
+ protected final ShardSearchFailure[] buildShardFailures() {
+ if (shardFailures == null) {
+ return ShardSearchFailure.EMPTY_ARRAY;
+ }
+ List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
+ ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
+ for (int i = 0; i < failures.length; i++) {
+ failures[i] = entries.get(i).value;
+ }
+ return failures;
+ }
+
+ // we do our best to return the shard failures, but its ok if its not fully concurrently safe
+ // we simply try and return as much as possible
+ protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
+ if (shardFailures == null) {
+ shardFailures = new AtomicArray<ShardSearchFailure>(scrollId.getContext().length);
+ }
+ shardFailures.set(shardIndex, failure);
+ }
+
+ public void start() {
+ if (scrollId.getContext().length == 0) {
+ listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", null));
+ return;
+ }
+ final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
+
+ int localOperations = 0;
+ Tuple<String, Long>[] context = scrollId.getContext();
+ for (int i = 0; i < context.length; i++) {
+ Tuple<String, Long> target = context[i];
+ DiscoveryNode node = nodes.get(target.v1());
+ if (node != null) {
+ if (nodes.localNodeId().equals(node.id())) {
+ localOperations++;
+ } else {
+ executeQueryPhase(i, counter, node, target.v2());
+ }
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
+ }
+ successfulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ executeFetchPhase();
+ }
+ }
+ }
+
+ if (localOperations > 0) {
+ if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ Tuple<String, Long>[] context1 = scrollId.getContext();
+ for (int i = 0; i < context1.length; i++) {
+ Tuple<String, Long> target = context1[i];
+ DiscoveryNode node = nodes.get(target.v1());
+ if (node != null && nodes.localNodeId().equals(node.id())) {
+ executeQueryPhase(i, counter, node, target.v2());
+ }
+ }
+ }
+ });
+ } else {
+ boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
+ Tuple<String, Long>[] context1 = scrollId.getContext();
+ for (int i = 0; i < context1.length; i++) {
+ final Tuple<String, Long> target = context1[i];
+ final int shardIndex = i;
+ final DiscoveryNode node = nodes.get(target.v1());
+ if (node != null && nodes.localNodeId().equals(node.id())) {
+ try {
+ if (localAsync) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ executeQueryPhase(shardIndex, counter, node, target.v2());
+ }
+ });
+ } else {
+ executeQueryPhase(shardIndex, counter, node, target.v2());
+ }
+ } catch (Throwable t) {
+ onQueryPhaseFailure(shardIndex, counter, target.v2(), t);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
+ searchService.sendExecuteQuery(node, internalScrollSearchRequest(searchId, request), new SearchServiceListener<QuerySearchResult>() {
+ @Override
+ public void onResult(QuerySearchResult result) {
+ queryResults.set(shardIndex, result);
+ if (counter.decrementAndGet() == 0) {
+ executeFetchPhase();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ onQueryPhaseFailure(shardIndex, counter, searchId, t);
+ }
+ });
+ }
+
+ void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Throwable t) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] Failed to execute query phase", t, searchId);
+ }
+ addShardFailure(shardIndex, new ShardSearchFailure(t));
+ successfulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ executeFetchPhase();
+ }
+ }
+
+ private void executeFetchPhase() {
+ sortedShardList = searchPhaseController.sortDocs(queryResults);
+ AtomicArray<IntArrayList> docIdsToLoad = new AtomicArray<IntArrayList>(queryResults.length());
+ searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
+
+ if (docIdsToLoad.asList().isEmpty()) {
+ finishHim();
+ }
+
+ final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
+
+ for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
+ IntArrayList docIds = entry.value;
+ final QuerySearchResult querySearchResult = queryResults.get(entry.index);
+ FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, querySearchResult.id(), docIds);
+ DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
+ searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener<FetchSearchResult>() {
+ @Override
+ public void onResult(FetchSearchResult result) {
+ result.shardTarget(querySearchResult.shardTarget());
+ fetchResults.set(entry.index, result);
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Failed to execute fetch phase", t);
+ }
+ successfulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+ });
+ }
+ }
+
+ private void finishHim() {
+ try {
+ innerFinishHim();
+ } catch (Throwable e) {
+ listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
+ }
+ }
+
+ private void innerFinishHim() {
+ InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
+ String scrollId = null;
+ if (request.scroll() != null) {
+ scrollId = request.scrollId();
+ }
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
+ System.currentTimeMillis() - startTime, buildShardFailures()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java
new file mode 100644
index 0000000..8a4fd2d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import org.apache.lucene.search.ScoreDoc;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.fetch.QueryFetchSearchResult;
+import org.elasticsearch.search.internal.InternalSearchHits;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.action.search.type.TransportSearchHelper.internalScrollSearchRequest;
+
+/**
+ *
+ */
+public class TransportSearchScrollScanAction extends AbstractComponent {
+
+ private final ThreadPool threadPool;
+
+ private final ClusterService clusterService;
+
+ private final SearchServiceTransportAction searchService;
+
+ private final SearchPhaseController searchPhaseController;
+
+ @Inject
+ public TransportSearchScrollScanAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.searchService = searchService;
+ this.searchPhaseController = searchPhaseController;
+ }
+
+ public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
+ new AsyncAction(request, scrollId, listener).start();
+ }
+
+ private class AsyncAction {
+
+ private final SearchScrollRequest request;
+
+ private final ActionListener<SearchResponse> listener;
+
+ private final ParsedScrollId scrollId;
+
+ private final DiscoveryNodes nodes;
+
+ private volatile AtomicArray<ShardSearchFailure> shardFailures;
+ private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
+
+ private final AtomicInteger successfulOps;
+ private final AtomicInteger counter;
+ private final long startTime = System.currentTimeMillis();
+
+ private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
+ this.request = request;
+ this.listener = listener;
+ this.scrollId = scrollId;
+ this.nodes = clusterService.state().nodes();
+ this.successfulOps = new AtomicInteger(scrollId.getContext().length);
+ this.counter = new AtomicInteger(scrollId.getContext().length);
+
+ this.queryFetchResults = new AtomicArray<QueryFetchSearchResult>(scrollId.getContext().length);
+ }
+
+ protected final ShardSearchFailure[] buildShardFailures() {
+ if (shardFailures == null) {
+ return ShardSearchFailure.EMPTY_ARRAY;
+ }
+ List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
+ ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
+ for (int i = 0; i < failures.length; i++) {
+ failures[i] = entries.get(i).value;
+ }
+ return failures;
+ }
+
+ // we do our best to return the shard failures, but its ok if its not fully concurrently safe
+ // we simply try and return as much as possible
+ protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
+ if (shardFailures == null) {
+ shardFailures = new AtomicArray<ShardSearchFailure>(scrollId.getContext().length);
+ }
+ shardFailures.set(shardIndex, failure);
+ }
+
+ public void start() {
+ if (scrollId.getContext().length == 0) {
+ final InternalSearchResponse internalResponse = new InternalSearchResponse(new InternalSearchHits(InternalSearchHits.EMPTY, Long.parseLong(this.scrollId.getAttributes().get("total_hits")), 0.0f), null, null, null, false);
+ listener.onResponse(new SearchResponse(internalResponse, request.scrollId(), 0, 0, 0l, buildShardFailures()));
+ return;
+ }
+
+ int localOperations = 0;
+ Tuple<String, Long>[] context = scrollId.getContext();
+ for (int i = 0; i < context.length; i++) {
+ Tuple<String, Long> target = context[i];
+ DiscoveryNode node = nodes.get(target.v1());
+ if (node != null) {
+ if (nodes.localNodeId().equals(node.id())) {
+ localOperations++;
+ } else {
+ executePhase(i, node, target.v2());
+ }
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
+ }
+ successfulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+ }
+
+ if (localOperations > 0) {
+ if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ Tuple<String, Long>[] context1 = scrollId.getContext();
+ for (int i = 0; i < context1.length; i++) {
+ Tuple<String, Long> target = context1[i];
+ DiscoveryNode node = nodes.get(target.v1());
+ if (node != null && nodes.localNodeId().equals(node.id())) {
+ executePhase(i, node, target.v2());
+ }
+ }
+ }
+ });
+ } else {
+ boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
+ Tuple<String, Long>[] context1 = scrollId.getContext();
+ for (int i = 0; i < context1.length; i++) {
+ final Tuple<String, Long> target = context1[i];
+ final int shardIndex = i;
+ final DiscoveryNode node = nodes.get(target.v1());
+ if (node != null && nodes.localNodeId().equals(node.id())) {
+ try {
+ if (localAsync) {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ executePhase(shardIndex, node, target.v2());
+ }
+ });
+ } else {
+ executePhase(shardIndex, node, target.v2());
+ }
+ } catch (Throwable t) {
+ onPhaseFailure(t, target.v2(), shardIndex);
+ }
+ }
+ }
+ }
+ }
+
+ for (Tuple<String, Long> target : scrollId.getContext()) {
+ DiscoveryNode node = nodes.get(target.v1());
+ if (node == null) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
+ }
+ successfulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ } else {
+ }
+ }
+ }
+
+ void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
+ searchService.sendExecuteScan(node, internalScrollSearchRequest(searchId, request), new SearchServiceListener<QueryFetchSearchResult>() {
+ @Override
+ public void onResult(QueryFetchSearchResult result) {
+ queryFetchResults.set(shardIndex, result);
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ onPhaseFailure(t, searchId, shardIndex);
+ }
+ });
+ }
+
+ void onPhaseFailure(Throwable t, long searchId, int shardIndex) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] Failed to execute query phase", t, searchId);
+ }
+ addShardFailure(shardIndex, new ShardSearchFailure(t));
+ successfulOps.decrementAndGet();
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ private void finishHim() {
+ try {
+ innerFinishHim();
+ } catch (Throwable e) {
+ ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", e, buildShardFailures());
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to reduce search", failure);
+ }
+ listener.onFailure(failure);
+ }
+ }
+
+ private void innerFinishHim() throws IOException {
+ int numberOfHits = 0;
+ for (AtomicArray.Entry<QueryFetchSearchResult> entry : queryFetchResults.asList()) {
+ numberOfHits += entry.value.queryResult().topDocs().scoreDocs.length;
+ }
+ ScoreDoc[] docs = new ScoreDoc[numberOfHits];
+ int counter = 0;
+ for (AtomicArray.Entry<QueryFetchSearchResult> entry : queryFetchResults.asList()) {
+ ScoreDoc[] scoreDocs = entry.value.queryResult().topDocs().scoreDocs;
+ for (ScoreDoc scoreDoc : scoreDocs) {
+ scoreDoc.shardIndex = entry.index;
+ docs[counter++] = scoreDoc;
+ }
+ }
+ final InternalSearchResponse internalResponse = searchPhaseController.merge(docs, queryFetchResults, queryFetchResults);
+ ((InternalSearchHits) internalResponse.hits()).totalHits = Long.parseLong(this.scrollId.getAttributes().get("total_hits"));
+
+
+ for (AtomicArray.Entry<QueryFetchSearchResult> entry : queryFetchResults.asList()) {
+ if (entry.value.queryResult().topDocs().scoreDocs.length < entry.value.queryResult().size()) {
+ // we found more than we want for this round, remove this from our scrolling
+ queryFetchResults.set(entry.index, null);
+ }
+ }
+
+ String scrollId = null;
+ if (request.scroll() != null) {
+ // we rebuild the scroll id since we remove shards that we finished scrolling on
+ scrollId = TransportSearchHelper.buildScrollId(this.scrollId.getType(), queryFetchResults, this.scrollId.getAttributes()); // continue moving the total_hits
+ }
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
+ System.currentTimeMillis() - startTime, buildShardFailures()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java
new file mode 100644
index 0000000..de61c72
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search.type;
+
+import com.carrotsearch.hppc.IntArrayList;
+import org.apache.lucene.search.ScoreDoc;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.NoShardAvailableActionException;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.action.support.TransportActions;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.SearchPhaseResult;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.action.SearchServiceListener;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.query.QuerySearchResultProvider;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.action.search.type.TransportSearchHelper.internalSearchRequest;
+
+/**
+ *
+ */
+public abstract class TransportSearchTypeAction extends TransportAction<SearchRequest, SearchResponse> {
+
+ protected final ClusterService clusterService;
+
+ protected final SearchServiceTransportAction searchService;
+
+ protected final SearchPhaseController searchPhaseController;
+
+ public TransportSearchTypeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.searchService = searchService;
+ this.searchPhaseController = searchPhaseController;
+ }
+
+ protected abstract class BaseAsyncAction<FirstResult extends SearchPhaseResult> {
+
+ protected final ActionListener<SearchResponse> listener;
+
+ protected final GroupShardsIterator shardsIts;
+
+ protected final SearchRequest request;
+
+ protected final ClusterState clusterState;
+ protected final DiscoveryNodes nodes;
+
+ protected final int expectedSuccessfulOps;
+ private final int expectedTotalOps;
+
+ protected final AtomicInteger successulOps = new AtomicInteger();
+ private final AtomicInteger totalOps = new AtomicInteger();
+
+ protected final AtomicArray<FirstResult> firstResults;
+ private volatile AtomicArray<ShardSearchFailure> shardFailures;
+ private final Object shardFailuresMutex = new Object();
+ protected volatile ScoreDoc[] sortedShardList;
+
+ protected final long startTime = System.currentTimeMillis();
+
+ protected BaseAsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
+ this.request = request;
+ this.listener = listener;
+
+ this.clusterState = clusterService.state();
+ nodes = clusterState.nodes();
+
+ clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
+
+ String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices(), request.indicesOptions());
+
+ for (String index : concreteIndices) {
+ clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
+ }
+
+ Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
+
+ shardsIts = clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference());
+ expectedSuccessfulOps = shardsIts.size();
+ // we need to add 1 for non active partition, since we count it in the total!
+ expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
+
+ firstResults = new AtomicArray<FirstResult>(shardsIts.size());
+ }
+
+ public void start() {
+ if (expectedSuccessfulOps == 0) {
+ // no search shards to search on, bail with empty response (it happens with search across _all with no indices around and consistent with broadcast operations)
+ listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, System.currentTimeMillis() - startTime, ShardSearchFailure.EMPTY_ARRAY));
+ return;
+ }
+ request.beforeStart();
+ // count the local operations, and perform the non local ones
+ int localOperations = 0;
+ int shardIndex = -1;
+ for (final ShardIterator shardIt : shardsIts) {
+ shardIndex++;
+ final ShardRouting shard = shardIt.firstOrNull();
+ if (shard != null) {
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ localOperations++;
+ } else {
+ // do the remote operation here, the localAsync flag is not relevant
+ performFirstPhase(shardIndex, shardIt);
+ }
+ } else {
+ // really, no shards active in this group
+ onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
+ }
+ }
+ // we have local operations, perform them now
+ if (localOperations > 0) {
+ if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
+ request.beforeLocalFork();
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ int shardIndex = -1;
+ for (final ShardIterator shardIt : shardsIts) {
+ shardIndex++;
+ final ShardRouting shard = shardIt.firstOrNull();
+ if (shard != null) {
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ performFirstPhase(shardIndex, shardIt);
+ }
+ }
+ }
+ }
+ });
+ } else {
+ boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
+ if (localAsync) {
+ request.beforeLocalFork();
+ }
+ shardIndex = -1;
+ for (final ShardIterator shardIt : shardsIts) {
+ shardIndex++;
+ final int fShardIndex = shardIndex;
+ final ShardRouting shard = shardIt.firstOrNull();
+ if (shard != null) {
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ if (localAsync) {
+ try {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ performFirstPhase(fShardIndex, shardIt);
+ }
+ });
+ } catch (Throwable t) {
+ onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t);
+ }
+ } else {
+ performFirstPhase(fShardIndex, shardIt);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void performFirstPhase(final int shardIndex, final ShardIterator shardIt) {
+ performFirstPhase(shardIndex, shardIt, shardIt.nextOrNull());
+ }
+
+ void performFirstPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
+ if (shard == null) {
+ // no more active shards... (we should not really get here, but just for safety)
+ onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
+ } else {
+ final DiscoveryNode node = nodes.get(shard.currentNodeId());
+ if (node == null) {
+ onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
+ } else {
+ String[] filteringAliases = clusterState.metaData().filteringAliases(shard.index(), request.indices());
+ sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime), new SearchServiceListener<FirstResult>() {
+ @Override
+ public void onResult(FirstResult result) {
+ onFirstPhaseResult(shardIndex, shard, result, shardIt);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
+ }
+ });
+ }
+ }
+ }
+
+ void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
+ result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
+ processFirstPhaseResult(shardIndex, shard, result);
+ // we need to increment successful ops first before we compare the exit condition otherwise if we
+ // are fast we could concurrently update totalOps but then preempt one of the threads which can
+ // cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
+ successulOps.incrementAndGet();
+ // increment all the "future" shards to update the total ops since we some may work and some may not...
+ // and when that happens, we break on total ops, so we must maintain them
+ final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
+ if (xTotalOps == expectedTotalOps) {
+ try {
+ innerMoveToSecondPhase();
+ } catch (Throwable e) {
+ if (logger.isDebugEnabled()) {
+ logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
+ }
+ listener.onFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
+ }
+ }
+ }
+
+ void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId, final ShardIterator shardIt, Throwable t) {
+ // we always add the shard failure for a specific shard instance
+ // we do make sure to clean it on a successful response from a shard
+ SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId());
+ addShardFailure(shardIndex, shardTarget, t);
+
+ if (totalOps.incrementAndGet() == expectedTotalOps) {
+ if (logger.isDebugEnabled()) {
+ if (t != null && !TransportActions.isShardNotAvailableException(t)) {
+ if (shard != null) {
+ logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
+ } else {
+ logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
+ }
+ }
+ }
+ if (successulOps.get() == 0) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("All shards failed for phase: [{}]", firstPhaseName(), t);
+ }
+ // no successful ops, raise an exception
+ listener.onFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", buildShardFailures()));
+ } else {
+ try {
+ innerMoveToSecondPhase();
+ } catch (Throwable e) {
+ listener.onFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
+ }
+ }
+ } else {
+ final ShardRouting nextShard = shardIt.nextOrNull();
+ final boolean lastShard = nextShard == null;
+ // trace log this exception
+ if (logger.isTraceEnabled() && t != null) {
+ logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
+ }
+ if (!lastShard) {
+ try {
+ threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ performFirstPhase(shardIndex, shardIt, nextShard);
+ }
+ });
+ } catch (Throwable t1) {
+ onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t1);
+ }
+ } else {
+ // no more shards active, add a failure
+ if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
+ if (t != null && !TransportActions.isShardNotAvailableException(t)) {
+ logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
+ }
+ }
+ }
+ }
+ }
+
+ private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request, boolean lastShard) {
+ if (shard != null) {
+ return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
+ } else {
+ return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
+ }
+ }
+
+ /**
+ * Builds how long it took to execute the search.
+ */
+ protected final long buildTookInMillis() {
+ return System.currentTimeMillis() - startTime;
+ }
+
+ protected final ShardSearchFailure[] buildShardFailures() {
+ AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
+ if (shardFailures == null) {
+ return ShardSearchFailure.EMPTY_ARRAY;
+ }
+ List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
+ ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
+ for (int i = 0; i < failures.length; i++) {
+ failures[i] = entries.get(i).value;
+ }
+ return failures;
+ }
+
+ protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Throwable t) {
+ // we don't aggregate shard failures on non active shards (but do keep the header counts right)
+ if (TransportActions.isShardNotAvailableException(t)) {
+ return;
+ }
+
+ // lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
+ if (shardFailures == null) {
+ synchronized (shardFailuresMutex) {
+ if (shardFailures == null) {
+ shardFailures = new AtomicArray<ShardSearchFailure>(shardsIts.size());
+ }
+ }
+ }
+ ShardSearchFailure failure = shardFailures.get(shardIndex);
+ if (failure == null) {
+ shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
+ } else {
+ // the failure is already present, try and not override it with an exception that is less meaningless
+ // for example, getting illegal shard state
+ if (TransportActions.isReadOverrideException(t)) {
+ shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
+ }
+ }
+ }
+
+ /**
+ * Releases shard targets that are not used in the docsIdsToLoad.
+ */
+ protected void releaseIrrelevantSearchContexts(AtomicArray<? extends QuerySearchResultProvider> queryResults,
+ AtomicArray<IntArrayList> docIdsToLoad) {
+ if (docIdsToLoad == null) {
+ return;
+ }
+ // we only release search context that we did not fetch from if we are not scrolling
+ if (request.scroll() == null) {
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
+ if (docIdsToLoad.get(entry.index) == null) {
+ DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
+ if (node != null) { // should not happen (==null) but safeguard anyhow
+ searchService.sendFreeContext(node, entry.value.queryResult().id(), request);
+ }
+ }
+ }
+ }
+ }
+
+ protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<FirstResult> listener);
+
+ protected final void processFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result) {
+ firstResults.set(shardIndex, result);
+
+ // clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
+ // so its ok concurrency wise to miss potentially the shard failures being created because of another failure
+ // in the #addShardFailure, because by definition, it will happen on *another* shardIndex
+ AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
+ if (shardFailures != null) {
+ shardFailures.set(shardIndex, null);
+ }
+ }
+
+ final void innerMoveToSecondPhase() throws Exception {
+ if (logger.isTraceEnabled()) {
+ StringBuilder sb = new StringBuilder();
+ boolean hadOne = false;
+ for (int i = 0; i < firstResults.length(); i++) {
+ FirstResult result = firstResults.get(i);
+ if (result == null) {
+ continue; // failure
+ }
+ if (hadOne) {
+ sb.append(",");
+ } else {
+ hadOne = true;
+ }
+ sb.append(result.shardTarget());
+ }
+
+ logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version());
+ }
+ moveToSecondPhase();
+ }
+
+ protected abstract void moveToSecondPhase() throws Exception;
+
+ protected abstract String firstPhaseName();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java b/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java
new file mode 100644
index 0000000..cbc1114
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.suggest;
+
+import java.io.IOException;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+/**
+ * Internal suggest request executed directly against a specific index shard.
+ */
+final class ShardSuggestRequest extends BroadcastShardOperationRequest {
+
+ private BytesReference suggestSource;
+
+ ShardSuggestRequest() {
+ }
+
+ public ShardSuggestRequest(String index, int shardId, SuggestRequest request) {
+ super(index, shardId, request);
+ this.suggestSource = request.suggest();
+ }
+
+ public BytesReference suggest() {
+ return suggestSource;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ suggestSource = in.readBytesReference();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBytesReference(suggestSource);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/suggest/ShardSuggestResponse.java b/src/main/java/org/elasticsearch/action/suggest/ShardSuggestResponse.java
new file mode 100644
index 0000000..bb7731e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/suggest/ShardSuggestResponse.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.suggest;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.suggest.Suggest;
+
+import java.io.IOException;
+
+/**
+ * Internal suggest response of a shard suggest request executed directly against a specific shard.
+ */
+class ShardSuggestResponse extends BroadcastShardOperationResponse {
+
+ private final Suggest suggest;
+
+ ShardSuggestResponse() {
+ this.suggest = new Suggest();
+ }
+
+ public ShardSuggestResponse(String index, int shardId, Suggest suggest) {
+ super(index, shardId);
+ this.suggest = suggest;
+ }
+
+ public Suggest getSuggest() {
+ return this.suggest;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ suggest.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ suggest.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/suggest/SuggestAction.java b/src/main/java/org/elasticsearch/action/suggest/SuggestAction.java
new file mode 100644
index 0000000..b4ab65e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/suggest/SuggestAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.suggest;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.search.suggest.Suggest;
+
+/**
+ */
+public class SuggestAction extends Action<SuggestRequest, SuggestResponse, SuggestRequestBuilder> {
+
+ public static final SuggestAction INSTANCE = new SuggestAction();
+ public static final String NAME = "suggest";
+
+ private SuggestAction() {
+ super(NAME);
+ }
+
+ @Override
+ public SuggestResponse newResponse() {
+ return new SuggestResponse(new Suggest());
+ }
+
+ @Override
+ public SuggestRequestBuilder newRequestBuilder(Client client) {
+ return new SuggestRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java b/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java
new file mode 100644
index 0000000..0ec09fe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.suggest;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+
+/**
+ * A request to get suggestions for corrections of phrases. Best created with
+ * {@link org.elasticsearch.client.Requests#suggestRequest(String...)}.
+ * <p/>
+ * <p>The request requires the query source to be set either using {@link #query(org.elasticsearch.index.query.QueryBuilder)},
+ * or {@link #query(byte[])}.
+ *
+ * @see SuggestResponse
+ * @see org.elasticsearch.client.Client#suggest(SuggestRequest)
+ * @see org.elasticsearch.client.Requests#suggestRequest(String...)
+ */
+public final class SuggestRequest extends BroadcastOperationRequest<SuggestRequest> {
+
+ static final XContentType contentType = Requests.CONTENT_TYPE;
+
+ @Nullable
+ private String routing;
+
+ @Nullable
+ private String preference;
+
+ private BytesReference suggestSource;
+ private boolean suggestSourceUnsafe;
+
+ SuggestRequest() {
+ }
+
+ /**
+ * Constructs a new suggest request against the provided indices. No indices provided means it will
+ * run against all indices.
+ */
+ public SuggestRequest(String... indices) {
+ super(indices);
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ return validationException;
+ }
+
+ @Override
+ protected void beforeStart() {
+ if (suggestSourceUnsafe) {
+ suggest(suggestSource.copyBytesArray(), false);
+ }
+ }
+
+ /**
+ * The Phrase to get correction suggestions for
+ */
+ BytesReference suggest() {
+ return suggestSource;
+ }
+
+ /**
+ * set a new source for the suggest query
+ */
+ public SuggestRequest suggest(BytesReference suggestSource) {
+ return suggest(suggestSource, false);
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public String routing() {
+ return this.routing;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public SuggestRequest routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public SuggestRequest routing(String... routings) {
+ this.routing = Strings.arrayToCommaDelimitedString(routings);
+ return this;
+ }
+
+ public SuggestRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public String preference() {
+ return this.preference;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ routing = in.readOptionalString();
+ preference = in.readOptionalString();
+ suggest(in.readBytesReference());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalString(routing);
+ out.writeOptionalString(preference);
+ out.writeBytesReference(suggestSource);
+ }
+
+ @Override
+ public String toString() {
+ String sSource = "_na_";
+ try {
+ sSource = XContentHelper.convertToJson(suggestSource, false);
+ } catch (Exception e) {
+ // ignore
+ }
+ return "[" + Arrays.toString(indices) + "]" + ", suggestSource[" + sSource + "]";
+ }
+
+ public SuggestRequest suggest(BytesReference suggestSource, boolean contentUnsafe) {
+ this.suggestSource = suggestSource;
+ this.suggestSourceUnsafe = contentUnsafe;
+ return this;
+ }
+
+ public SuggestRequest suggest(String source) {
+ return suggest(new BytesArray(source));
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java b/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java
new file mode 100644
index 0000000..fc798f1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.suggest;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder;
+
+import java.io.IOException;
+
+/**
+ * A suggest action request builder.
+ */
+public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder<SuggestRequest, SuggestResponse, SuggestRequestBuilder> {
+
+ final SuggestBuilder suggest = new SuggestBuilder();
+
+ public SuggestRequestBuilder(Client client) {
+ super((InternalClient) client, new SuggestRequest());
+ }
+
+ /**
+ * Add a definition for suggestions to the request
+ */
+ public <T> SuggestRequestBuilder addSuggestion(SuggestionBuilder<T> suggestion) {
+ suggest.addSuggestion(suggestion);
+ return this;
+ }
+
+ /**
+ * A comma separated list of routing values to control the shards the search will be executed on.
+ */
+ public SuggestRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ public SuggestRequestBuilder setSuggestText(String globalText) {
+ this.suggest.setText(globalText);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards,
+ * _shards:x,y to operate on shards x & y, or a custom value, which guarantees that the same order
+ * will be used across different requests.
+ */
+ public SuggestRequestBuilder setPreference(String preference) {
+ request.preference(preference);
+ return this;
+ }
+
+ /**
+ * The routing values to control the shards that the search will be executed on.
+ */
+ public SuggestRequestBuilder setRouting(String... routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<SuggestResponse> listener) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(SuggestRequest.contentType);
+ suggest.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ request.suggest(builder.bytes());
+ } catch (IOException e) {
+ throw new ElasticsearchException("Unable to build suggestion request", e);
+ }
+
+ ((InternalClient) client).suggest(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java b/src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java
new file mode 100644
index 0000000..24a8922
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.suggest;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.suggest.Suggest;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
+
+/**
+ * The response of the suggest action.
+ */
+public final class SuggestResponse extends BroadcastOperationResponse {
+
+ private final Suggest suggest;
+
+ SuggestResponse(Suggest suggest) {
+ this.suggest = suggest;
+ }
+
+ SuggestResponse(Suggest suggest, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ this.suggest = suggest;
+ }
+
+ /**
+ * The Suggestions of the phrase.
+ */
+ public Suggest getSuggest() {
+ return suggest;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ this.suggest.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ this.suggest.writeTo(out);
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+ builder.startObject();
+ suggest.toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ } catch (IOException e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java b/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java
new file mode 100644
index 0000000..303915e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.suggest;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.search.suggest.SuggestPhase;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * Defines the transport of a suggestion request across the cluster
+ */
+public class TransportSuggestAction extends TransportBroadcastOperationAction<SuggestRequest, SuggestResponse, ShardSuggestRequest, ShardSuggestResponse> {
+
+ private final IndicesService indicesService;
+
+ private final SuggestPhase suggestPhase;
+
+ @Inject
+ public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService, SuggestPhase suggestPhase) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ this.suggestPhase = suggestPhase;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.SUGGEST;
+ }
+
+ @Override
+ protected String transportAction() {
+ return SuggestAction.NAME;
+ }
+
+ @Override
+ protected SuggestRequest newRequest() {
+ return new SuggestRequest();
+ }
+
+ @Override
+ protected ShardSuggestRequest newShardRequest() {
+ return new ShardSuggestRequest();
+ }
+
+ @Override
+ protected ShardSuggestRequest newShardRequest(ShardRouting shard, SuggestRequest request) {
+ return new ShardSuggestRequest(shard.index(), shard.id(), request);
+ }
+
+ @Override
+ protected ShardSuggestResponse newShardResponse() {
+ return new ShardSuggestResponse();
+ }
+
+ @Override
+ protected GroupShardsIterator shards(ClusterState clusterState, SuggestRequest request, String[] concreteIndices) {
+ Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
+ return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference());
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, SuggestRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, SuggestRequest countRequest, String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
+ }
+
+ @Override
+ protected SuggestResponse newResponse(SuggestRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
+ int successfulShards = 0;
+ int failedShards = 0;
+
+ final Map<String, List<Suggest.Suggestion>> groupedSuggestions = new HashMap<String, List<Suggest.Suggestion>>();
+
+ List<ShardOperationFailedException> shardFailures = null;
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ Object shardResponse = shardsResponses.get(i);
+ if (shardResponse == null) {
+ // simply ignore non active shards
+ } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
+ failedShards++;
+ if (shardFailures == null) {
+ shardFailures = newArrayList();
+ }
+ shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
+ } else {
+ Suggest suggest = ((ShardSuggestResponse) shardResponse).getSuggest();
+ Suggest.group(groupedSuggestions, suggest);
+ successfulShards++;
+ }
+ }
+
+ return new SuggestResponse(new Suggest(Suggest.reduce(groupedSuggestions)), shardsResponses.length(), successfulShards, failedShards, shardFailures);
+ }
+
+ @Override
+ protected ShardSuggestResponse shardOperation(ShardSuggestRequest request) throws ElasticsearchException {
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = indexService.shardSafe(request.shardId());
+ final Engine.Searcher searcher = indexShard.acquireSearcher("suggest");
+ XContentParser parser = null;
+ try {
+ BytesReference suggest = request.suggest();
+ if (suggest != null && suggest.length() > 0) {
+ parser = XContentFactory.xContent(suggest).createParser(suggest);
+ if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchIllegalArgumentException("suggest content missing");
+ }
+ final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(), request.index(), request.shardId());
+ final Suggest result = suggestPhase.execute(context, searcher.reader());
+ return new ShardSuggestResponse(request.index(), request.shardId(), result);
+ }
+ return new ShardSuggestResponse(request.index(), request.shardId(), new Suggest());
+ } catch (Throwable ex) {
+ throw new ElasticsearchException("failed to execute suggest", ex);
+ } finally {
+ searcher.release();
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/suggest/package-info.java b/src/main/java/org/elasticsearch/action/suggest/package-info.java
new file mode 100644
index 0000000..a2c0f48
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/suggest/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Suggest action.
+ */
+package org.elasticsearch.action.suggest; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java b/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java
new file mode 100644
index 0000000..8072e19
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ListenableActionFuture;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.List;
+
+/**
+ *
+ */
+public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> {
+
+ final boolean listenerThreaded;
+
+ final ThreadPool threadPool;
+
+ volatile Object listeners;
+
+ boolean executedListeners = false;
+
+ protected AbstractListenableActionFuture(boolean listenerThreaded, ThreadPool threadPool) {
+ this.listenerThreaded = listenerThreaded;
+ this.threadPool = threadPool;
+ }
+
+ public boolean listenerThreaded() {
+ return false; // we control execution of the listener
+ }
+
+ public ThreadPool threadPool() {
+ return threadPool;
+ }
+
+ public void addListener(final ActionListener<T> listener) {
+ internalAddListener(listener);
+ }
+
+ public void addListener(final Runnable listener) {
+ internalAddListener(listener);
+ }
+
+ public void internalAddListener(Object listener) {
+ boolean executeImmediate = false;
+ synchronized (this) {
+ if (executedListeners) {
+ executeImmediate = true;
+ } else {
+ Object listeners = this.listeners;
+ if (listeners == null) {
+ listeners = listener;
+ } else if (listeners instanceof List) {
+ ((List) this.listeners).add(listener);
+ } else {
+ Object orig = listeners;
+ listeners = Lists.newArrayListWithCapacity(2);
+ ((List) listeners).add(orig);
+ ((List) listeners).add(listener);
+ }
+ this.listeners = listeners;
+ }
+ }
+ if (executeImmediate) {
+ executeListener(listener);
+ }
+ }
+
+ @Override
+ protected void done() {
+ super.done();
+ synchronized (this) {
+ executedListeners = true;
+ }
+ Object listeners = this.listeners;
+ if (listeners != null) {
+ if (listeners instanceof List) {
+ List list = (List) listeners;
+ for (Object listener : list) {
+ executeListener(listener);
+ }
+ } else {
+ executeListener(listeners);
+ }
+ }
+ }
+
+ private void executeListener(final Object listener) {
+ if (listenerThreaded) {
+ if (listener instanceof Runnable) {
+ threadPool.generic().execute((Runnable) listener);
+ } else {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ ActionListener<T> lst = (ActionListener<T>) listener;
+ try {
+ lst.onResponse(actionGet());
+ } catch (ElasticsearchException e) {
+ lst.onFailure(e);
+ }
+ }
+ });
+ }
+ } else {
+ if (listener instanceof Runnable) {
+ ((Runnable) listener).run();
+ } else {
+ ActionListener<T> lst = (ActionListener<T>) listener;
+ try {
+ lst.onResponse(actionGet());
+ } catch (ElasticsearchException e) {
+ lst.onFailure(e);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java b/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java
new file mode 100644
index 0000000..f5acc4f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.BaseFuture;
+import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ *
+ */
+public abstract class AdapterActionFuture<T, L> extends BaseFuture<T> implements ActionFuture<T>, ActionListener<L> {
+
+ private Throwable rootFailure;
+
+ @Override
+ public T actionGet() throws ElasticsearchException {
+ try {
+ return get();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new ElasticsearchIllegalStateException("Future got interrupted", e);
+ } catch (ExecutionException e) {
+ throw rethrowExecutionException(e);
+ }
+ }
+
+ @Override
+ public T actionGet(String timeout) throws ElasticsearchException {
+ return actionGet(TimeValue.parseTimeValue(timeout, null));
+ }
+
+ @Override
+ public T actionGet(long timeoutMillis) throws ElasticsearchException {
+ return actionGet(timeoutMillis, TimeUnit.MILLISECONDS);
+ }
+
+ @Override
+ public T actionGet(TimeValue timeout) throws ElasticsearchException {
+ return actionGet(timeout.millis(), TimeUnit.MILLISECONDS);
+ }
+
+ @Override
+ public T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException {
+ try {
+ return get(timeout, unit);
+ } catch (TimeoutException e) {
+ throw new ElasticsearchTimeoutException(e.getMessage());
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new ElasticsearchIllegalStateException("Future got interrupted", e);
+ } catch (ExecutionException e) {
+ throw rethrowExecutionException(e);
+ }
+ }
+
+ static ElasticsearchException rethrowExecutionException(ExecutionException e) {
+ if (e.getCause() instanceof ElasticsearchException) {
+ ElasticsearchException esEx = (ElasticsearchException) e.getCause();
+ Throwable root = esEx.unwrapCause();
+ if (root instanceof ElasticsearchException) {
+ return (ElasticsearchException) root;
+ }
+ return new UncategorizedExecutionException("Failed execution", root);
+ } else {
+ return new UncategorizedExecutionException("Failed execution", e);
+ }
+ }
+
+ @Override
+ public void onResponse(L result) {
+ set(convert(result));
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ setException(e);
+ }
+
+ protected abstract T convert(L listenerResponse);
+
+ @Override
+ public Throwable getRootFailure() {
+ return rootFailure;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java
new file mode 100644
index 0000000..c97d726
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ */
+public class AutoCreateIndex {
+
+ private final boolean needToCheck;
+ private final boolean globallyDisabled;
+ private final String[] matches;
+ private final String[] matches2;
+
+ public AutoCreateIndex(Settings settings) {
+ String value = settings.get("action.auto_create_index");
+ if (value == null || Booleans.isExplicitTrue(value)) {
+ needToCheck = true;
+ globallyDisabled = false;
+ matches = null;
+ matches2 = null;
+ } else if (Booleans.isExplicitFalse(value)) {
+ needToCheck = false;
+ globallyDisabled = true;
+ matches = null;
+ matches2 = null;
+ } else {
+ needToCheck = true;
+ globallyDisabled = false;
+ matches = Strings.commaDelimitedListToStringArray(value);
+ matches2 = new String[matches.length];
+ for (int i = 0; i < matches.length; i++) {
+ matches2[i] = matches[i].substring(1);
+ }
+ }
+ }
+
+ /**
+ * Do we really need to check if an index should be auto created?
+ */
+ public boolean needToCheck() {
+ return this.needToCheck;
+ }
+
+ /**
+ * Should the index be auto created?
+ */
+ public boolean shouldAutoCreate(String index, ClusterState state) {
+ if (!needToCheck) {
+ return false;
+ }
+ if (state.metaData().hasConcreteIndex(index)) {
+ return false;
+ }
+ if (globallyDisabled) {
+ return false;
+ }
+ // matches not set, default value of "true"
+ if (matches == null) {
+ return true;
+ }
+ for (int i = 0; i < matches.length; i++) {
+ char c = matches[i].charAt(0);
+ if (c == '-') {
+ if (Regex.simpleMatch(matches2[i], index)) {
+ return false;
+ }
+ } else if (c == '+') {
+ if (Regex.simpleMatch(matches2[i], index)) {
+ return true;
+ }
+ } else {
+ if (Regex.simpleMatch(matches[i], index)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java
new file mode 100644
index 0000000..3a7e900
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+
+import static org.elasticsearch.ExceptionsHelper.detailedMessage;
+
+/**
+ *
+ */
+public class DefaultShardOperationFailedException implements ShardOperationFailedException {
+
+ private String index;
+
+ private int shardId;
+
+ private String reason;
+
+ private RestStatus status;
+
+ private DefaultShardOperationFailedException() {
+
+ }
+
+ public DefaultShardOperationFailedException(IndexShardException e) {
+ this.index = e.shardId().index().name();
+ this.shardId = e.shardId().id();
+ this.reason = detailedMessage(e);
+ this.status = e.status();
+ }
+
+ public DefaultShardOperationFailedException(String index, int shardId, Throwable t) {
+ this.index = index;
+ this.shardId = shardId;
+ this.reason = detailedMessage(t);
+ if (t != null && t instanceof ElasticsearchException) {
+ status = ((ElasticsearchException) t).status();
+ } else {
+ status = RestStatus.INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ @Override
+ public String index() {
+ return this.index;
+ }
+
+ @Override
+ public int shardId() {
+ return this.shardId;
+ }
+
+ @Override
+ public String reason() {
+ return this.reason;
+ }
+
+ @Override
+ public RestStatus status() {
+ return status;
+ }
+
+ public static DefaultShardOperationFailedException readShardOperationFailed(StreamInput in) throws IOException {
+ DefaultShardOperationFailedException exp = new DefaultShardOperationFailedException();
+ exp.readFrom(in);
+ return exp;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ index = in.readString();
+ }
+ shardId = in.readVInt();
+ reason = in.readString();
+ status = RestStatus.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (index == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(index);
+ }
+ out.writeVInt(shardId);
+ out.writeString(reason);
+ RestStatus.writeTo(out, status);
+ }
+
+ @Override
+ public String toString() {
+ return "[" + index + "][" + shardId + "] failed, reason [" + reason + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java
new file mode 100644
index 0000000..0324114
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+/**
+ * Helper for dealing with destructive operations and wildcard usage.
+ */
+public final class DestructiveOperations implements NodeSettingsService.Listener {
+
+ /**
+ * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed.
+ */
+ public static final String REQUIRES_NAME = "action.destructive_requires_name";
+
+ private final ESLogger logger;
+ private volatile boolean destructiveRequiresName;
+
+ // TODO: Turn into a component that can be reused and wired up into all the transport actions where
+ // this helper logic is required. Note: also added the logger as argument, otherwise the same log
+ // statement is printed several times, this can removed once this becomes a component.
+ public DestructiveOperations(ESLogger logger, Settings settings, NodeSettingsService nodeSettingsService) {
+ this.logger = logger;
+ destructiveRequiresName = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, false);
+ nodeSettingsService.addListener(this);
+ }
+
+ /**
+ * Fail if there is wildcard usage in indices and the named is required for destructive operations.
+ */
+ public void failDestructive(String[] aliasesOrIndices) {
+ if (!destructiveRequiresName) {
+ return;
+ }
+
+ if (aliasesOrIndices == null || aliasesOrIndices.length == 0) {
+ throw new ElasticsearchIllegalArgumentException("Wildcard expressions or all indices are not allowed");
+ } else if (aliasesOrIndices.length == 1) {
+ if (hasWildcardUsage(aliasesOrIndices[0])) {
+ throw new ElasticsearchIllegalArgumentException("Wildcard expressions or all indices are not allowed");
+ }
+ } else {
+ for (String aliasesOrIndex : aliasesOrIndices) {
+ if (hasWildcardUsage(aliasesOrIndex)) {
+ throw new ElasticsearchIllegalArgumentException("Wildcard expressions or all indices are not allowed");
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ boolean newValue = settings.getAsBoolean("action.destructive_requires_name", destructiveRequiresName);
+ if (destructiveRequiresName != newValue) {
+ logger.info("updating [action.operate_all_indices] from [{}] to [{}]", destructiveRequiresName, newValue);
+ this.destructiveRequiresName = newValue;
+ }
+ }
+
+ private static boolean hasWildcardUsage(String aliasOrIndex) {
+ return "_all".equals(aliasOrIndex) || aliasOrIndex.indexOf('*') != -1;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/src/main/java/org/elasticsearch/action/support/IndicesOptions.java
new file mode 100644
index 0000000..b05f774
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/IndicesOptions.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.rest.RestRequest;
+
+import java.io.IOException;
+
+/**
+ * Controls how to deal when concrete indices are unavailable (closed & missing), to what wildcard expression expand
+ * (all, closed or open indices) and how to deal when a wildcard expression resolves into no concrete indices.
+ */
+public class IndicesOptions {
+
+ private static final IndicesOptions[] VALUES;
+
+ static {
+ byte max = 1 << 4;
+ VALUES = new IndicesOptions[max];
+ for (byte id = 0; id < max; id++) {
+ VALUES[id] = new IndicesOptions(id);
+ }
+ }
+
+ private final byte id;
+
+ private IndicesOptions(byte id) {
+ this.id = id;
+ }
+
+ /**
+ * @return Whether specified concrete indices should be ignored when unavailable (missing or closed)
+ */
+ public boolean ignoreUnavailable() {
+ return (id & 1) != 0;
+ }
+
+ /**
+ * @return Whether to ignore if a wildcard indices expression resolves into no concrete indices.
+ * The `_all` string or when no indices have been specified also count as wildcard expressions.
+ */
+ public boolean allowNoIndices() {
+ return (id & 2) != 0;
+ }
+
+ /**
+ * @return Whether wildcard indices expressions should expanded into open indices should be
+ */
+ public boolean expandWildcardsOpen() {
+ return (id & 4) != 0;
+ }
+
+ /**
+ * @return Whether wildcard indices expressions should expanded into closed indices should be
+ */
+ public boolean expandWildcardsClosed() {
+ return (id & 8) != 0;
+ }
+
+ public void writeIndicesOptions(StreamOutput out) throws IOException {
+ out.write(id);
+ }
+
+ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException {
+ byte id = in.readByte();
+ if (id >= VALUES.length) {
+ throw new ElasticsearchIllegalArgumentException("No valid missing index type id: " + id);
+ }
+ return VALUES[id];
+ }
+
+ public static IndicesOptions fromOptions(boolean ignoreUnavailable, boolean allowNoIndices, boolean expandToOpenIndices, boolean expandToClosedIndices) {
+ byte id = toByte(ignoreUnavailable, allowNoIndices, expandToOpenIndices, expandToClosedIndices);
+ return VALUES[id];
+ }
+
+ public static IndicesOptions fromRequest(RestRequest request, IndicesOptions defaultSettings) {
+ String sWildcards = request.param("expand_wildcards");
+ String sIgnoreUnavailable = request.param("ignore_unavailable");
+ String sAllowNoIndices = request.param("allow_no_indices");
+ if (sWildcards == null && sIgnoreUnavailable == null && sAllowNoIndices == null) {
+ return defaultSettings;
+ }
+
+ boolean expandWildcardsOpen = defaultSettings.expandWildcardsOpen();
+ boolean expandWildcardsClosed = defaultSettings.expandWildcardsClosed();
+ if (sWildcards != null) {
+ String[] wildcards = Strings.splitStringByCommaToArray(sWildcards);
+ for (String wildcard : wildcards) {
+ if ("open".equals(wildcard)) {
+ expandWildcardsOpen = true;
+ } else if ("closed".equals(wildcard)) {
+ expandWildcardsClosed = true;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]");
+ }
+ }
+ }
+
+ return fromOptions(
+ toBool(sIgnoreUnavailable, defaultSettings.ignoreUnavailable()),
+ toBool(sAllowNoIndices, defaultSettings.allowNoIndices()),
+ expandWildcardsOpen,
+ expandWildcardsClosed
+ );
+ }
+
+ /**
+ * @return indices options that requires any specified index to exists, expands wildcards only to open indices and
+ * allow that no indices are resolved from wildcard expressions (not returning an error).
+ */
+ public static IndicesOptions strict() {
+ return VALUES[6];
+ }
+
+ /**
+ * @return indices options that ignore unavailable indices, expand wildcards only to open indices and
+ * allow that no indices are resolved from wildcard expressions (not returning an error).
+ */
+ public static IndicesOptions lenient() {
+ return VALUES[7];
+ }
+
+ private static byte toByte(boolean ignoreUnavailable, boolean allowNoIndices, boolean wildcardExpandToOpen, boolean wildcardExpandToClosed) {
+ byte id = 0;
+ if (ignoreUnavailable) {
+ id |= 1;
+ }
+ if (allowNoIndices) {
+ id |= 2;
+ }
+ if (wildcardExpandToOpen) {
+ id |= 4;
+ }
+ if (wildcardExpandToClosed) {
+ id |= 8;
+ }
+ return id;
+ }
+
+ private static boolean toBool(String sValue, boolean defaultValue) {
+ if (sValue == null) {
+ return defaultValue;
+ }
+ return !(sValue.equals("false") || sValue.equals("0") || sValue.equals("off"));
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java
new file mode 100644
index 0000000..8473bcf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+/**
+ *
+ */
+public class PlainActionFuture<T> extends AdapterActionFuture<T, T> {
+
+ public static <T> PlainActionFuture<T> newFuture() {
+ return new PlainActionFuture<T>();
+ }
+
+ @Override
+ protected T convert(T listenerResponse) {
+ return listenerResponse;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java b/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java
new file mode 100644
index 0000000..3d6cb28
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.threadpool.ThreadPool;
+
+/**
+ *
+ */
+public class PlainListenableActionFuture<T> extends AbstractListenableActionFuture<T, T> {
+
+ public PlainListenableActionFuture(boolean listenerThreaded, ThreadPool threadPool) {
+ super(listenerThreaded, threadPool);
+ }
+
+ @Override
+ protected T convert(T response) {
+ return response;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/QuerySourceBuilder.java b/src/main/java/org/elasticsearch/action/support/QuerySourceBuilder.java
new file mode 100644
index 0000000..464ad36
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/QuerySourceBuilder.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+
+import java.io.IOException;
+
+public class QuerySourceBuilder implements ToXContent {
+
+ private QueryBuilder queryBuilder;
+
+ private BytesReference queryBinary;
+
+ public QuerySourceBuilder setQuery(QueryBuilder query) {
+ this.queryBuilder = query;
+ return this;
+ }
+
+ public QuerySourceBuilder setQuery(BytesReference queryBinary) {
+ this.queryBinary = queryBinary;
+ return this;
+ }
+
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (queryBuilder != null) {
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ }
+
+ if (queryBinary != null) {
+ if (XContentFactory.xContentType(queryBinary) == builder.contentType()) {
+ builder.rawField("query", queryBinary);
+ } else {
+ builder.field("query_binary", queryBinary);
+ }
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ public BytesReference buildAsBytes(XContentType contentType) throws SearchSourceBuilderException {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ toXContent(builder, ToXContent.EMPTY_PARAMS);
+ return builder.bytes();
+ } catch (Exception e) {
+ throw new SearchSourceBuilderException("Failed to build search source", e);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/TransportAction.java b/src/main/java/org/elasticsearch/action/support/TransportAction.java
new file mode 100644
index 0000000..6bbc1a2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/TransportAction.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.*;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
+
+/**
+ *
+ */
+public abstract class TransportAction<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
+
+ protected final ThreadPool threadPool;
+
+ protected TransportAction(Settings settings, ThreadPool threadPool) {
+ super(settings);
+ this.threadPool = threadPool;
+ }
+
+ public ActionFuture<Response> execute(Request request) throws ElasticsearchException {
+ PlainActionFuture<Response> future = newFuture();
+ // since we don't have a listener, and we release a possible lock with the future
+ // there is no need to execute it under a listener thread
+ request.listenerThreaded(false);
+ execute(request, future);
+ return future;
+ }
+
+ public void execute(Request request, ActionListener<Response> listener) {
+ if (request.listenerThreaded()) {
+ listener = new ThreadedActionListener<Response>(threadPool, listener, logger);
+ }
+ ActionRequestValidationException validationException = request.validate();
+ if (validationException != null) {
+ listener.onFailure(validationException);
+ return;
+ }
+ try {
+ doExecute(request, listener);
+ } catch (Throwable e) {
+ logger.trace("Error during transport action execution.", e);
+ listener.onFailure(e);
+ }
+ }
+
+ protected abstract void doExecute(Request request, ActionListener<Response> listener);
+
+ static final class ThreadedActionListener<Response> implements ActionListener<Response> {
+
+ private final ThreadPool threadPool;
+
+ private final ActionListener<Response> listener;
+
+ private final ESLogger logger;
+
+ ThreadedActionListener(ThreadPool threadPool, ActionListener<Response> listener, ESLogger logger) {
+ this.threadPool = threadPool;
+ this.listener = listener;
+ this.logger = logger;
+ }
+
+ @Override
+ public void onResponse(final Response response) {
+ try {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ listener.onResponse(response);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ }
+ });
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Can not run threaded action, exectuion rejected [{}] running on current thread", listener);
+ /* we don't care if that takes long since we are shutting down. But if we not respond somebody could wait
+ * for the response on the listener side which could be a remote machine so make sure we push it out there.*/
+ try {
+ listener.onResponse(response);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ }
+ }
+
+ @Override
+ public void onFailure(final Throwable e) {
+ try {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ listener.onFailure(e);
+ }
+ });
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Can not run threaded action, exectuion rejected for listener [{}] running on current thread", listener);
+ /* we don't care if that takes long since we are shutting down. But if we not respond somebody could wait
+ * for the response on the listener side which could be a remote machine so make sure we push it out there.*/
+ listener.onFailure(e);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/TransportActions.java b/src/main/java/org/elasticsearch/action/support/TransportActions.java
new file mode 100644
index 0000000..0b4b72b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/TransportActions.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.NoShardAvailableActionException;
+import org.elasticsearch.index.IndexShardMissingException;
+import org.elasticsearch.index.shard.IllegalIndexShardStateException;
+import org.elasticsearch.indices.IndexMissingException;
+
+/**
+ */
+public class TransportActions {
+
+ public static boolean isShardNotAvailableException(Throwable t) {
+ Throwable actual = ExceptionsHelper.unwrapCause(t);
+ if (actual instanceof IllegalIndexShardStateException) {
+ return true;
+ }
+ if (actual instanceof IndexMissingException) {
+ return true;
+ }
+ if (actual instanceof IndexShardMissingException) {
+ return true;
+ }
+ if (actual instanceof NoShardAvailableActionException) {
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * If a failure is already present, should this failure override it or not for read operations.
+ */
+ public static boolean isReadOverrideException(Throwable t) {
+ if (isShardNotAvailableException(t)) {
+ return false;
+ }
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequest.java b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequest.java
new file mode 100644
index 0000000..a0d17f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequest.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.broadcast;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class BroadcastOperationRequest<T extends BroadcastOperationRequest> extends ActionRequest<T> {
+
+ protected String[] indices;
+
+ private BroadcastOperationThreading operationThreading = BroadcastOperationThreading.THREAD_PER_SHARD;
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ protected BroadcastOperationRequest() {
+
+ }
+
+ protected BroadcastOperationRequest(String[] indices) {
+ this.indices = indices;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final T indices(String... indices) {
+ this.indices = indices;
+ return (T) this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ /**
+ * Controls the operation threading model.
+ */
+ public BroadcastOperationThreading operationThreading() {
+ return operationThreading;
+ }
+
+ /**
+ * Controls the operation threading model.
+ */
+ @SuppressWarnings("unchecked")
+ public final T operationThreading(BroadcastOperationThreading operationThreading) {
+ this.operationThreading = operationThreading;
+ return (T) this;
+ }
+
+ /**
+ * Controls the operation threading model.
+ */
+ public T operationThreading(String operationThreading) {
+ return operationThreading(BroadcastOperationThreading.fromString(operationThreading, this.operationThreading));
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final T indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return (T) this;
+ }
+
+ protected void beforeStart() {
+
+ }
+
+ protected void beforeLocalFork() {
+
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArrayNullable(indices);
+ out.writeByte(operationThreading.id());
+ indicesOptions.writeIndicesOptions(out);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ operationThreading = BroadcastOperationThreading.fromId(in.readByte());
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java
new file mode 100644
index 0000000..df8738e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.broadcast;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public abstract class BroadcastOperationRequestBuilder<Request extends BroadcastOperationRequest<Request>, Response extends BroadcastOperationResponse, RequestBuilder extends BroadcastOperationRequestBuilder<Request, Response, RequestBuilder>>
+ extends ActionRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected BroadcastOperationRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Controls the operation threading model.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setOperationThreading(BroadcastOperationThreading operationThreading) {
+ request.operationThreading(operationThreading);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Controls the operation threading model.
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setOperationThreading(String operationThreading) {
+ request.operationThreading(operationThreading);
+ return (RequestBuilder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request.indicesOptions(indicesOptions);
+ return (RequestBuilder) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java
new file mode 100644
index 0000000..2fd4f97
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.broadcast;
+
+import static org.elasticsearch.action.support.DefaultShardOperationFailedException.readShardOperationFailed;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+/**
+ * Base class for all broadcast operation based responses.
+ */
+public abstract class BroadcastOperationResponse extends ActionResponse {
+ private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0];
+ private int totalShards;
+ private int successfulShards;
+ private int failedShards;
+ private ShardOperationFailedException[] shardFailures = EMPTY;
+
+ protected BroadcastOperationResponse() {
+ }
+
+ protected BroadcastOperationResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
+ this.totalShards = totalShards;
+ this.successfulShards = successfulShards;
+ this.failedShards = failedShards;
+ this.shardFailures = shardFailures == null ? EMPTY : shardFailures.toArray(new ShardOperationFailedException[shardFailures.size()]);
+ }
+
+ /**
+ * The total shards this request ran against.
+ */
+ public int getTotalShards() {
+ return totalShards;
+ }
+
+ /**
+ * The successful shards this request was executed on.
+ */
+ public int getSuccessfulShards() {
+ return successfulShards;
+ }
+
+ /**
+ * The failed shards this request was executed on.
+ */
+ public int getFailedShards() {
+ return failedShards;
+ }
+
+ /**
+ * The list of shard failures exception.
+ */
+ public ShardOperationFailedException[] getShardFailures() {
+ return shardFailures;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ totalShards = in.readVInt();
+ successfulShards = in.readVInt();
+ failedShards = in.readVInt();
+ int size = in.readVInt();
+ if (size > 0) {
+ shardFailures = new ShardOperationFailedException[size];
+ for (int i = 0; i < size; i++) {
+ shardFailures[i] = readShardOperationFailed(in);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(totalShards);
+ out.writeVInt(successfulShards);
+ out.writeVInt(failedShards);
+ out.writeVInt(shardFailures.length);
+ for (ShardOperationFailedException exp : shardFailures) {
+ exp.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationThreading.java b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationThreading.java
new file mode 100644
index 0000000..aeedde7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationThreading.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.broadcast;
+
+import java.util.Locale;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ * Controls the operation threading model for broadcast operation that are performed
+ * locally on the executing node.
+ *
+ *
+ */
+public enum BroadcastOperationThreading {
+ /**
+ * No threads are used, all the local shards operations will be performed on the calling
+ * thread.
+ */
+ NO_THREADS((byte) 0),
+ /**
+ * The local shards operations will be performed in serial manner on a single forked thread.
+ */
+ SINGLE_THREAD((byte) 1),
+ /**
+ * Each local shard operation will execute on its own thread.
+ */
+ THREAD_PER_SHARD((byte) 2);
+
+ private final byte id;
+
+ BroadcastOperationThreading(byte id) {
+ this.id = id;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ public static BroadcastOperationThreading fromId(byte id) {
+ if (id == 0) {
+ return NO_THREADS;
+ }
+ if (id == 1) {
+ return SINGLE_THREAD;
+ }
+ if (id == 2) {
+ return THREAD_PER_SHARD;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type matching id [" + id + "]");
+ }
+
+ public static BroadcastOperationThreading fromString(String value, BroadcastOperationThreading defaultValue) {
+ if (value == null) {
+ return defaultValue;
+ }
+ return BroadcastOperationThreading.valueOf(value.toUpperCase(Locale.ROOT));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java
new file mode 100644
index 0000000..b07d178
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.broadcast;
+
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * An exception indicating that a failure occurred performing an operation on the shard.
+ *
+ *
+ */
+public class BroadcastShardOperationFailedException extends IndexShardException implements ElasticsearchWrapperException {
+
+ public BroadcastShardOperationFailedException(ShardId shardId, String msg) {
+ super(shardId, msg, null);
+ }
+
+ public BroadcastShardOperationFailedException(ShardId shardId, Throwable cause) {
+ super(shardId, "", cause);
+ }
+
+ public BroadcastShardOperationFailedException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationRequest.java b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationRequest.java
new file mode 100644
index 0000000..6a43a8c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationRequest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.broadcast;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class BroadcastShardOperationRequest extends TransportRequest {
+
+ private String index;
+ private int shardId;
+
+ protected BroadcastShardOperationRequest() {
+ }
+
+ protected BroadcastShardOperationRequest(String index, int shardId, BroadcastOperationRequest request) {
+ super(request);
+ this.index = index;
+ this.shardId = shardId;
+ }
+
+ public BroadcastShardOperationRequest(String index, int shardId) {
+ this.index = index;
+ this.shardId = shardId;
+ }
+
+ public String index() {
+ return this.index;
+ }
+
+ public int shardId() {
+ return this.shardId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ shardId = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeVInt(shardId);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationResponse.java b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationResponse.java
new file mode 100644
index 0000000..194a77a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationResponse.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.broadcast;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class BroadcastShardOperationResponse extends TransportResponse {
+
+ String index;
+ int shardId;
+
+ protected BroadcastShardOperationResponse() {
+
+ }
+
+ protected BroadcastShardOperationResponse(String index, int shardId) {
+ this.index = index;
+ this.shardId = shardId;
+ }
+
+ public String getIndex() {
+ return this.index;
+ }
+
+ public int getShardId() {
+ return this.shardId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ shardId = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeVInt(shardId);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java b/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java
new file mode 100644
index 0000000..7765a8d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java
@@ -0,0 +1,416 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.broadcast;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.NoShardAvailableActionException;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.action.support.TransportActions;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public abstract class TransportBroadcastOperationAction<Request extends BroadcastOperationRequest, Response extends BroadcastOperationResponse, ShardRequest extends BroadcastShardOperationRequest, ShardResponse extends BroadcastShardOperationResponse>
+ extends TransportAction<Request, Response> {
+
+ protected final ClusterService clusterService;
+
+ protected final TransportService transportService;
+
+ protected final ThreadPool threadPool;
+
+ final String transportAction;
+ final String transportShardAction;
+ final String executor;
+
+ protected TransportBroadcastOperationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+ this.threadPool = threadPool;
+
+ this.transportAction = transportAction();
+ this.transportShardAction = transportAction() + "/s";
+ this.executor = executor();
+
+ transportService.registerHandler(transportAction, new TransportHandler());
+ transportService.registerHandler(transportShardAction, new ShardTransportHandler());
+ }
+
+ @Override
+ protected void doExecute(Request request, ActionListener<Response> listener) {
+ new AsyncBroadcastAction(request, listener).start();
+ }
+
+ protected abstract String transportAction();
+
+ protected abstract String executor();
+
+ protected abstract Request newRequest();
+
+ protected abstract Response newResponse(Request request, AtomicReferenceArray shardsResponses, ClusterState clusterState);
+
+ protected abstract ShardRequest newShardRequest();
+
+ protected abstract ShardRequest newShardRequest(ShardRouting shard, Request request);
+
+ protected abstract ShardResponse newShardResponse();
+
+ protected abstract ShardResponse shardOperation(ShardRequest request) throws ElasticsearchException;
+
+ protected abstract GroupShardsIterator shards(ClusterState clusterState, Request request, String[] concreteIndices);
+
+ protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
+
+ protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices);
+
+ class AsyncBroadcastAction {
+
+ private final Request request;
+
+ private final ActionListener<Response> listener;
+
+ private final ClusterState clusterState;
+
+ private final DiscoveryNodes nodes;
+
+ private final GroupShardsIterator shardsIts;
+
+ private final int expectedOps;
+
+ private final AtomicInteger counterOps = new AtomicInteger();
+
+ private final AtomicReferenceArray shardsResponses;
+
+ AsyncBroadcastAction(Request request, ActionListener<Response> listener) {
+ this.request = request;
+ this.listener = listener;
+
+ clusterState = clusterService.state();
+
+ ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
+ if (blockException != null) {
+ throw blockException;
+ }
+ // update to concrete indices
+ String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices(), request.indicesOptions());
+ blockException = checkRequestBlock(clusterState, request, concreteIndices);
+ if (blockException != null) {
+ throw blockException;
+ }
+
+ nodes = clusterState.nodes();
+ shardsIts = shards(clusterState, request, concreteIndices);
+ expectedOps = shardsIts.size();
+
+ shardsResponses = new AtomicReferenceArray<Object>(expectedOps);
+ }
+
+ public void start() {
+ if (shardsIts.size() == 0) {
+ // no shards
+ try {
+ listener.onResponse(newResponse(request, new AtomicReferenceArray(0), clusterState));
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ return;
+ }
+ request.beforeStart();
+ // count the local operations, and perform the non local ones
+ int localOperations = 0;
+ int shardIndex = -1;
+ for (final ShardIterator shardIt : shardsIts) {
+ shardIndex++;
+ final ShardRouting shard = shardIt.firstOrNull();
+ if (shard != null) {
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ localOperations++;
+ } else {
+ // do the remote operation here, the localAsync flag is not relevant
+ performOperation(shardIt, shardIndex, true);
+ }
+ } else {
+ // really, no shards active in this group
+ onOperation(null, shardIt, shardIndex, new NoShardAvailableActionException(shardIt.shardId()));
+ }
+ }
+ // we have local operations, perform them now
+ if (localOperations > 0) {
+ if (request.operationThreading() == BroadcastOperationThreading.SINGLE_THREAD) {
+ request.beforeLocalFork();
+ threadPool.executor(executor).execute(new Runnable() {
+ @Override
+ public void run() {
+ int shardIndex = -1;
+ for (final ShardIterator shardIt : shardsIts) {
+ shardIndex++;
+ final ShardRouting shard = shardIt.firstOrNull();
+ if (shard != null) {
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ performOperation(shardIt, shardIndex, false);
+ }
+ }
+ }
+ }
+ });
+ } else {
+ boolean localAsync = request.operationThreading() == BroadcastOperationThreading.THREAD_PER_SHARD;
+ if (localAsync) {
+ request.beforeLocalFork();
+ }
+ shardIndex = -1;
+ for (final ShardIterator shardIt : shardsIts) {
+ shardIndex++;
+ final ShardRouting shard = shardIt.firstOrNull();
+ if (shard != null) {
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ performOperation(shardIt, shardIndex, localAsync);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void performOperation(final ShardIterator shardIt, int shardIndex, boolean localAsync) {
+ performOperation(shardIt, shardIt.nextOrNull(), shardIndex, localAsync);
+ }
+
+ void performOperation(final ShardIterator shardIt, final ShardRouting shard, final int shardIndex, boolean localAsync) {
+ if (shard == null) {
+ // no more active shards... (we should not really get here, just safety)
+ onOperation(null, shardIt, shardIndex, new NoShardAvailableActionException(shardIt.shardId()));
+ } else {
+ try {
+ final ShardRequest shardRequest = newShardRequest(shard, request);
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ if (localAsync) {
+ threadPool.executor(executor).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ onOperation(shard, shardIndex, shardOperation(shardRequest));
+ } catch (Throwable e) {
+ onOperation(shard, shardIt, shardIndex, e);
+ }
+ }
+ });
+ } else {
+ onOperation(shard, shardIndex, shardOperation(shardRequest));
+ }
+ } else {
+ DiscoveryNode node = nodes.get(shard.currentNodeId());
+ if (node == null) {
+ // no node connected, act as failure
+ onOperation(shard, shardIt, shardIndex, new NoShardAvailableActionException(shardIt.shardId()));
+ } else {
+ transportService.sendRequest(node, transportShardAction, shardRequest, new BaseTransportResponseHandler<ShardResponse>() {
+ @Override
+ public ShardResponse newInstance() {
+ return newShardResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(ShardResponse response) {
+ onOperation(shard, shardIndex, response);
+ }
+
+ @Override
+ public void handleException(TransportException e) {
+ onOperation(shard, shardIt, shardIndex, e);
+ }
+ });
+ }
+ }
+ } catch (Throwable e) {
+ onOperation(shard, shardIt, shardIndex, e);
+ }
+ }
+ }
+
+ @SuppressWarnings({"unchecked"})
+ void onOperation(ShardRouting shard, int shardIndex, ShardResponse response) {
+ shardsResponses.set(shardIndex, response);
+ if (expectedOps == counterOps.incrementAndGet()) {
+ finishHim();
+ }
+ }
+
+ @SuppressWarnings({"unchecked"})
+ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int shardIndex, Throwable t) {
+ // we set the shard failure always, even if its the first in the replication group, and the next one
+ // will work (it will just override it...)
+ setFailure(shardIt, shardIndex, t);
+
+ ShardRouting nextShard = shardIt.nextOrNull();
+ if (nextShard != null) {
+ if (t != null) {
+ if (logger.isTraceEnabled()) {
+ if (!TransportActions.isShardNotAvailableException(t)) {
+ if (shard != null) {
+ logger.trace(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
+ } else {
+ logger.trace(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
+ }
+ }
+ }
+ }
+ // we are not threaded here if we got here from the transport
+ // or we possibly threaded if we got from a local threaded one,
+ // in which case, the next shard in the partition will not be local one
+ // so there is no meaning to this flag
+ performOperation(shardIt, nextShard, shardIndex, true);
+ } else {
+ if (logger.isDebugEnabled()) {
+ if (t != null) {
+ if (!TransportActions.isShardNotAvailableException(t)) {
+ if (shard != null) {
+ logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
+ } else {
+ logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
+ }
+ }
+ }
+ }
+ if (expectedOps == counterOps.incrementAndGet()) {
+ finishHim();
+ }
+ }
+ }
+
+ void finishHim() {
+ try {
+ listener.onResponse(newResponse(request, shardsResponses, clusterState));
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ }
+
+ void setFailure(ShardIterator shardIt, int shardIndex, Throwable t) {
+ // we don't aggregate shard failures on non active shards (but do keep the header counts right)
+ if (TransportActions.isShardNotAvailableException(t)) {
+ return;
+ }
+
+ if (!(t instanceof BroadcastShardOperationFailedException)) {
+ t = new BroadcastShardOperationFailedException(shardIt.shardId(), t);
+ }
+
+ Object response = shardsResponses.get(shardIndex);
+ if (response == null) {
+ // just override it and return
+ shardsResponses.set(shardIndex, t);
+ }
+
+ if (!(response instanceof Throwable)) {
+ // we should never really get here...
+ return;
+ }
+
+ // the failure is already present, try and not override it with an exception that is less meaningless
+ // for example, getting illegal shard state
+ if (TransportActions.isReadOverrideException(t)) {
+ shardsResponses.set(shardIndex, t);
+ }
+ }
+ }
+
+ class TransportHandler extends BaseTransportRequestHandler<Request> {
+
+ @Override
+ public Request newInstance() {
+ return newRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(Request request, final TransportChannel channel) throws Exception {
+ // we just send back a response, no need to fork a listener
+ request.listenerThreaded(false);
+ // we don't spawn, so if we get a request with no threading, change it to single threaded
+ if (request.operationThreading() == BroadcastOperationThreading.NO_THREADS) {
+ request.operationThreading(BroadcastOperationThreading.SINGLE_THREAD);
+ }
+ execute(request, new ActionListener<Response>() {
+ @Override
+ public void onResponse(Response response) {
+ try {
+ channel.sendResponse(response);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ }
+ });
+ }
+ }
+
+ class ShardTransportHandler extends BaseTransportRequestHandler<ShardRequest> {
+
+ @Override
+ public ShardRequest newInstance() {
+ return newShardRequest();
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+
+ @Override
+ public void messageReceived(final ShardRequest request, final TransportChannel channel) throws Exception {
+ channel.sendResponse(shardOperation(request));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java
new file mode 100644
index 0000000..56ce2b3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+
+/**
+ * Abstract class that allows to mark action requests that support acknowledgements.
+ * Facilitates consistency across different api.
+ */
+public abstract class AcknowledgedRequest<T extends MasterNodeOperationRequest> extends MasterNodeOperationRequest<T> {
+
+ public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30);
+
+ protected TimeValue timeout = DEFAULT_ACK_TIMEOUT;
+
+ protected AcknowledgedRequest() {
+ }
+
+ /**
+ * Allows to set the timeout
+ * @param timeout timeout as a string (e.g. 1s)
+ * @return the request itself
+ */
+ @SuppressWarnings("unchecked")
+ public final T timeout(String timeout) {
+ this.timeout = TimeValue.parseTimeValue(timeout, this.timeout);
+ return (T)this;
+ }
+
+ /**
+ * Allows to set the timeout
+ * @param timeout timeout as a {@link TimeValue}
+ * @return the request itself
+ */
+ @SuppressWarnings("unchecked")
+ public final T timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return (T) this;
+ }
+
+ /**
+ * Returns the current timeout
+ * @return the current timeout as a {@link TimeValue}
+ */
+ public final TimeValue timeout() {
+ return timeout;
+ }
+
+ /**
+ * Reads the timeout value
+ */
+ protected void readTimeout(StreamInput in) throws IOException {
+ timeout = readTimeValue(in);
+ }
+
+ /**
+ * writes the timeout value
+ */
+ protected void writeTimeout(StreamOutput out) throws IOException {
+ timeout.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java
new file mode 100644
index 0000000..42eac4c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.client.internal.InternalGenericClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ * Base request builder for master node operations that support acknowledgements
+ */
+public abstract class AcknowledgedRequestBuilder<Request extends AcknowledgedRequest<Request>, Response extends AcknowledgedResponse, RequestBuilder extends AcknowledgedRequestBuilder<Request, Response, RequestBuilder>>
+ extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected AcknowledgedRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ /**
+ * Sets the maximum wait for acknowledgement from other nodes
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setTimeout(TimeValue timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder)this;
+ }
+
+ /**
+ * Timeout to wait for the operation to be acknowledged by current cluster nodes. Defaults
+ * to <tt>10s</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setTimeout(String timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder)this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java b/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java
new file mode 100644
index 0000000..cdac96a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Abstract class that allows to mark action responses that support acknowledgements.
+ * Facilitates consistency across different api.
+ */
+public abstract class AcknowledgedResponse extends ActionResponse {
+
+ private boolean acknowledged;
+
+ protected AcknowledgedResponse() {
+
+ }
+
+ protected AcknowledgedResponse(boolean acknowledged) {
+ this.acknowledged = acknowledged;
+ }
+
+ /**
+ * Returns whether the response is acknowledged or not
+ * @return true if the response is acknowledged, false otherwise
+ */
+ public final boolean isAcknowledged() {
+ return acknowledged;
+ }
+
+ /**
+ * Reads the timeout value
+ */
+ protected void readAcknowledged(StreamInput in) throws IOException {
+ acknowledged = in.readBoolean();
+ }
+
+ /**
+ * Writes the timeout value
+ */
+ protected void writeAcknowledged(StreamOutput out) throws IOException {
+ out.writeBoolean(acknowledged);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequest.java b/src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequest.java
new file mode 100644
index 0000000..7c6a8b1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequest.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+/**
+ * A based request for master based operation.
+ */
+public abstract class MasterNodeOperationRequest<T extends MasterNodeOperationRequest> extends ActionRequest<T> {
+
+ public static TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30);
+
+ protected TimeValue masterNodeTimeout = DEFAULT_MASTER_NODE_TIMEOUT;
+
+ /**
+ * A timeout value in case the master has not been discovered yet or disconnected.
+ */
+ @SuppressWarnings("unchecked")
+ public final T masterNodeTimeout(TimeValue timeout) {
+ this.masterNodeTimeout = timeout;
+ return (T) this;
+ }
+
+ /**
+ * A timeout value in case the master has not been discovered yet or disconnected.
+ */
+ public final T masterNodeTimeout(String timeout) {
+ return masterNodeTimeout(TimeValue.parseTimeValue(timeout, null));
+ }
+
+ public final TimeValue masterNodeTimeout() {
+ return this.masterNodeTimeout;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ masterNodeTimeout = TimeValue.readTimeValue(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ masterNodeTimeout.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequestBuilder.java
new file mode 100644
index 0000000..3455a61
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/MasterNodeOperationRequestBuilder.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.client.internal.InternalGenericClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ * Base request builder for master node operations
+ */
+public abstract class MasterNodeOperationRequestBuilder<Request extends MasterNodeOperationRequest<Request>, Response extends ActionResponse, RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder>>
+ extends ActionRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected MasterNodeOperationRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ /**
+ * Sets the master node timeout in case the master has not yet been discovered.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setMasterNodeTimeout(TimeValue timeout) {
+ request.masterNodeTimeout(timeout);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Sets the master node timeout in case the master has not yet been discovered.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setMasterNodeTimeout(String timeout) {
+ request.masterNodeTimeout(timeout);
+ return (RequestBuilder) this;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequest.java b/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequest.java
new file mode 100644
index 0000000..e6d4a23
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Base request for master based read operations that allows to read the cluster state from the local node if needed
+ */
+public abstract class MasterNodeReadOperationRequest<T extends MasterNodeReadOperationRequest> extends MasterNodeOperationRequest<T> {
+
+ protected boolean local = false;
+
+ @SuppressWarnings("unchecked")
+ public final T local(boolean local) {
+ this.local = local;
+ return (T) this;
+ }
+
+ public final boolean local() {
+ return local;
+ }
+
+ /**
+ * Reads the local flag
+ */
+ protected void readLocal(StreamInput in) throws IOException {
+ readLocal(in, null);
+ }
+
+ /**
+ * Reads the local flag if on or after the specified min version or if the version is <code>null</code>.
+ */
+ protected void readLocal(StreamInput in, Version minVersion) throws IOException {
+ if (minVersion == null || in.getVersion().onOrAfter(minVersion)) {
+ local = in.readBoolean();
+ }
+ }
+
+ /**
+ * writes the local flag
+ */
+ protected void writeLocal(StreamOutput out) throws IOException {
+ writeLocal(out, null);
+ }
+
+ /**
+ * writes the local flag if on or after the specified min version or if the version is <code>null</code>.
+ */
+ protected void writeLocal(StreamOutput out, Version minVersion) throws IOException {
+ if (minVersion == null || out.getVersion().onOrAfter(minVersion)) {
+ out.writeBoolean(local);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java
new file mode 100644
index 0000000..5d636c6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ * Base request builder for master node read operations that can be executed on the local node as well
+ */
+public abstract class MasterNodeReadOperationRequestBuilder<Request extends MasterNodeReadOperationRequest<Request>, Response extends ActionResponse, RequestBuilder extends MasterNodeReadOperationRequestBuilder<Request, Response, RequestBuilder>>
+ extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected MasterNodeReadOperationRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ /**
+ * Specifies if the request should be executed on local node rather than on master
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setLocal(boolean local) {
+ request.local(local);
+ return (RequestBuilder) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java
new file mode 100644
index 0000000..4e8d9f9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.TimeoutClusterStateListener;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.MasterNotDiscoveredException;
+import org.elasticsearch.node.NodeClosedException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+/**
+ * A base class for operations that needs to be performed on the master node.
+ */
+public abstract class TransportMasterNodeOperationAction<Request extends MasterNodeOperationRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
+
+ protected final TransportService transportService;
+
+ protected final ClusterService clusterService;
+
+ final String transportAction;
+ final String executor;
+
+ protected TransportMasterNodeOperationAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, threadPool);
+ this.transportService = transportService;
+ this.clusterService = clusterService;
+
+ this.transportAction = transportAction();
+ this.executor = executor();
+
+ transportService.registerHandler(transportAction, new TransportHandler());
+ }
+
+ protected abstract String transportAction();
+
+ protected abstract String executor();
+
+ protected abstract Request newRequest();
+
+ protected abstract Response newResponse();
+
+ protected abstract void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws ElasticsearchException;
+
+ protected boolean localExecute(Request request) {
+ return false;
+ }
+
+ protected ClusterBlockException checkBlock(Request request, ClusterState state) {
+ return null;
+ }
+
+ protected void processBeforeDelegationToMaster(Request request, ClusterState state) {
+
+ }
+
+ @Override
+ public void execute(Request request, ActionListener<Response> listener) {
+ // since the callback is async, we typically can get called from within an event in the cluster service
+ // or something similar, so make sure we are threaded so we won't block it.
+ request.listenerThreaded(true);
+ super.execute(request, listener);
+ }
+
+ @Override
+ protected void doExecute(final Request request, final ActionListener<Response> listener) {
+ innerExecute(request, listener, false);
+ }
+
+ private void innerExecute(final Request request, final ActionListener<Response> listener, final boolean retrying) {
+ final ClusterState clusterState = clusterService.state();
+ final DiscoveryNodes nodes = clusterState.nodes();
+ if (nodes.localNodeMaster() || localExecute(request)) {
+ // check for block, if blocked, retry, else, execute locally
+ final ClusterBlockException blockException = checkBlock(request, clusterState);
+ if (blockException != null) {
+ if (!blockException.retryable()) {
+ listener.onFailure(blockException);
+ return;
+ }
+ clusterService.add(request.masterNodeTimeout(), new TimeoutClusterStateListener() {
+ @Override
+ public void postAdded() {
+ ClusterBlockException blockException = checkBlock(request, clusterService.state());
+ if (blockException == null || !blockException.retryable()) {
+ clusterService.remove(this);
+ innerExecute(request, listener, false);
+ }
+ }
+
+ @Override
+ public void onClose() {
+ clusterService.remove(this);
+ listener.onFailure(blockException);
+ }
+
+ @Override
+ public void onTimeout(TimeValue timeout) {
+ clusterService.remove(this);
+ listener.onFailure(blockException);
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ ClusterBlockException blockException = checkBlock(request, event.state());
+ if (blockException == null || !blockException.retryable()) {
+ clusterService.remove(this);
+ innerExecute(request, listener, false);
+ }
+ }
+ });
+ } else {
+ try {
+ threadPool.executor(executor).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ masterOperation(request, clusterService.state(), listener);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ }
+ });
+ } catch (Throwable t) {
+ listener.onFailure(t);
+ }
+ }
+ } else {
+ if (nodes.masterNode() == null) {
+ if (retrying) {
+ listener.onFailure(new MasterNotDiscoveredException());
+ } else {
+ clusterService.add(request.masterNodeTimeout(), new TimeoutClusterStateListener() {
+ @Override
+ public void postAdded() {
+ ClusterState clusterStateV2 = clusterService.state();
+ if (clusterStateV2.nodes().masterNodeId() != null) {
+ // now we have a master, try and execute it...
+ clusterService.remove(this);
+ innerExecute(request, listener, true);
+ }
+ }
+
+ @Override
+ public void onClose() {
+ clusterService.remove(this);
+ listener.onFailure(new NodeClosedException(clusterService.localNode()));
+ }
+
+ @Override
+ public void onTimeout(TimeValue timeout) {
+ clusterService.remove(this);
+ listener.onFailure(new MasterNotDiscoveredException("waited for [" + timeout + "]"));
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (event.nodesDelta().masterNodeChanged()) {
+ clusterService.remove(this);
+ innerExecute(request, listener, true);
+ }
+ }
+ });
+ }
+ return;
+ }
+ processBeforeDelegationToMaster(request, clusterState);
+ transportService.sendRequest(nodes.masterNode(), transportAction, request, new BaseTransportResponseHandler<Response>() {
+ @Override
+ public Response newInstance() {
+ return newResponse();
+ }
+
+ @Override
+ public void handleResponse(Response response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleException(final TransportException exp) {
+ if (exp.unwrapCause() instanceof ConnectTransportException) {
+ // we want to retry here a bit to see if a new master is elected
+ clusterService.add(request.masterNodeTimeout(), new TimeoutClusterStateListener() {
+ @Override
+ public void postAdded() {
+ ClusterState clusterStateV2 = clusterService.state();
+ if (!clusterState.nodes().masterNodeId().equals(clusterStateV2.nodes().masterNodeId())) {
+ // master changes while adding the listener, try here
+ clusterService.remove(this);
+ innerExecute(request, listener, false);
+ }
+ }
+
+ @Override
+ public void onClose() {
+ clusterService.remove(this);
+ listener.onFailure(new NodeClosedException(clusterService.localNode()));
+ }
+
+ @Override
+ public void onTimeout(TimeValue timeout) {
+ clusterService.remove(this);
+ listener.onFailure(new MasterNotDiscoveredException());
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (event.nodesDelta().masterNodeChanged()) {
+ clusterService.remove(this);
+ innerExecute(request, listener, false);
+ }
+ }
+ });
+ } else {
+ listener.onFailure(exp);
+ }
+ }
+ });
+ }
+ }
+
+ private class TransportHandler extends BaseTransportRequestHandler<Request> {
+
+ @Override
+ public Request newInstance() {
+ return newRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
+ // we just send back a response, no need to fork a listener
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<Response>() {
+ @Override
+ public void onResponse(Response response) {
+ try {
+ channel.sendResponse(response);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java
new file mode 100644
index 0000000..c4d9683
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * A base class for read operations that needs to be performed on the master node.
+ * Can also be executed on the local node if needed.
+ */
+public abstract class TransportMasterNodeReadOperationAction<Request extends MasterNodeReadOperationRequest, Response extends ActionResponse> extends TransportMasterNodeOperationAction<Request, Response> {
+
+ public static final String FORCE_LOCAL_SETTING = "action.master.force_local";
+
+ private Boolean forceLocal;
+
+ protected TransportMasterNodeReadOperationAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ this.forceLocal = settings.getAsBoolean(FORCE_LOCAL_SETTING, null);
+ }
+
+ protected final boolean localExecute(Request request) {
+ if (forceLocal != null) {
+ return forceLocal;
+ }
+ return request.local();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java
new file mode 100644
index 0000000..3502481
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.master.info;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public abstract class ClusterInfoRequest<T extends ClusterInfoRequest> extends MasterNodeReadOperationRequest<T> {
+
+ private String[] indices = Strings.EMPTY_ARRAY;
+ private String[] types = Strings.EMPTY_ARRAY;
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ @SuppressWarnings("unchecked")
+ public T indices(String... indices) {
+ this.indices = indices;
+ return (T) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public T types(String... types) {
+ this.types = types;
+ return (T) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public T indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return (T) this;
+ }
+
+ public String[] indices() {
+ return indices;
+ }
+
+ public String[] types() {
+ return types;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ indices = in.readStringArray();
+ types = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ readLocal(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(indices);
+ out.writeStringArray(types);
+ indicesOptions.writeIndicesOptions(out);
+ writeLocal(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java
new file mode 100644
index 0000000..f8834d9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.master.info;
+
+import com.google.common.collect.ObjectArrays;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public abstract class ClusterInfoRequestBuilder<Request extends ClusterInfoRequest<Request>, Response extends ActionResponse, Builder extends ClusterInfoRequestBuilder<Request, Response, Builder>> extends MasterNodeReadOperationRequestBuilder<Request, Response, Builder> {
+
+ protected ClusterInfoRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ @SuppressWarnings("unchecked")
+ public Builder setIndices(String... indices) {
+ request.indices(indices);
+ return (Builder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public Builder addIndices(String... indices) {
+ request.indices(ObjectArrays.concat(request.indices(), indices, String.class));
+ return (Builder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public Builder setTypes(String... types) {
+ request.types(types);
+ return (Builder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public Builder addTypes(String... types) {
+ request.types(ObjectArrays.concat(request.types(), types, String.class));
+ return (Builder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public Builder setIndicesOptions(IndicesOptions indicesOptions) {
+ request.indicesOptions(indicesOptions);
+ return (Builder) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java b/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java
new file mode 100644
index 0000000..cc94840
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.master.info;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ */
+public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequest, Response extends ActionResponse> extends TransportMasterNodeReadOperationAction<Request, Response> {
+
+ public TransportClusterInfoAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected String executor() {
+ // read operation, lightweight...
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected final void masterOperation(final Request request, final ClusterState state, final ActionListener<Response> listener) throws ElasticsearchException {
+ String[] concreteIndices = state.metaData().concreteIndices(request.indices(), request.indicesOptions());
+ request.indices(concreteIndices);
+ doMasterOperation(request, state, listener);
+ }
+
+ protected abstract void doMasterOperation(Request request, ClusterState state, final ActionListener<Response> listener) throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationRequest.java b/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationRequest.java
new file mode 100644
index 0000000..4d8a426
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationRequest.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.nodes;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class NodeOperationRequest extends TransportRequest {
+
+ private String nodeId;
+
+ protected NodeOperationRequest() {
+
+ }
+
+ protected NodeOperationRequest(NodesOperationRequest request, String nodeId) {
+ super(request);
+ this.nodeId = nodeId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodeId = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(nodeId);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationResponse.java b/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationResponse.java
new file mode 100644
index 0000000..3415a07
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/nodes/NodeOperationResponse.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.nodes;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+/**
+ * A base class for node level operations.
+ */
+public abstract class NodeOperationResponse extends TransportResponse {
+
+ private DiscoveryNode node;
+
+ protected NodeOperationResponse() {
+ }
+
+ protected NodeOperationResponse(DiscoveryNode node) {
+ assert node != null;
+ this.node = node;
+ }
+
+ /**
+ * The node this information relates to.
+ */
+ public DiscoveryNode getNode() {
+ return node;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ node = DiscoveryNode.readNode(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ node.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequest.java b/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequest.java
new file mode 100644
index 0000000..f92f7c2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequest.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.nodes;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class NodesOperationRequest<T extends NodesOperationRequest> extends ActionRequest<T> {
+
+ public static String[] ALL_NODES = Strings.EMPTY_ARRAY;
+
+ private String[] nodesIds;
+
+ private TimeValue timeout;
+
+ protected NodesOperationRequest() {
+
+ }
+
+ protected NodesOperationRequest(String... nodesIds) {
+ this.nodesIds = nodesIds;
+ }
+
+ public final String[] nodesIds() {
+ return nodesIds;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final T nodesIds(String... nodesIds) {
+ this.nodesIds = nodesIds;
+ return (T) this;
+ }
+
+ public TimeValue timeout() {
+ return this.timeout;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final T timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return (T) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final T timeout(String timeout) {
+ this.timeout = TimeValue.parseTimeValue(timeout, null);
+ return (T) this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodesIds = in.readStringArray();
+ if (in.readBoolean()) {
+ timeout = TimeValue.readTimeValue(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArrayNullable(nodesIds);
+ if (timeout == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ timeout.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java
new file mode 100644
index 0000000..41d386b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.nodes;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.client.internal.InternalGenericClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ */
+public abstract class NodesOperationRequestBuilder<Request extends NodesOperationRequest<Request>, Response extends NodesOperationResponse, RequestBuilder extends NodesOperationRequestBuilder<Request, Response, RequestBuilder>>
+ extends ActionRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected NodesOperationRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setNodesIds(String... nodesIds) {
+ request.nodesIds(nodesIds);
+ return (RequestBuilder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setTimeout(TimeValue timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setTimeout(String timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationResponse.java b/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationResponse.java
new file mode 100644
index 0000000..c77d801
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationResponse.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.nodes;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ *
+ */
+public abstract class NodesOperationResponse<NodeResponse extends NodeOperationResponse> extends ActionResponse implements Iterable<NodeResponse> {
+
+ private ClusterName clusterName;
+ protected NodeResponse[] nodes;
+ private Map<String, NodeResponse> nodesMap;
+
+ protected NodesOperationResponse() {
+ }
+
+ protected NodesOperationResponse(ClusterName clusterName, NodeResponse[] nodes) {
+ this.clusterName = clusterName;
+ this.nodes = nodes;
+ }
+
+ public ClusterName getClusterName() {
+ return this.clusterName;
+ }
+
+ public String getClusterNameAsString() {
+ return this.clusterName.value();
+ }
+
+ public NodeResponse[] getNodes() {
+ return nodes;
+ }
+
+ public NodeResponse getAt(int position) {
+ return nodes[position];
+ }
+
+ @Override
+ public Iterator<NodeResponse> iterator() {
+ return getNodesMap().values().iterator();
+ }
+
+ public Map<String, NodeResponse> getNodesMap() {
+ if (nodesMap == null) {
+ nodesMap = Maps.newHashMap();
+ for (NodeResponse nodeResponse : nodes) {
+ nodesMap.put(nodeResponse.getNode().id(), nodeResponse);
+ }
+ }
+ return nodesMap;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ clusterName = ClusterName.readClusterName(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ clusterName.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java b/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java
new file mode 100644
index 0000000..3565d90
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java
@@ -0,0 +1,296 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.nodes;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.FailedNodeException;
+import org.elasticsearch.action.NoSuchNodeException;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public abstract class TransportNodesOperationAction<Request extends NodesOperationRequest, Response extends NodesOperationResponse, NodeRequest extends NodeOperationRequest, NodeResponse extends NodeOperationResponse> extends TransportAction<Request, Response> {
+
+ protected final ClusterName clusterName;
+
+ protected final ClusterService clusterService;
+
+ protected final TransportService transportService;
+
+ final String transportAction;
+ final String transportNodeAction;
+ final String executor;
+
+ @Inject
+ public TransportNodesOperationAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ ClusterService clusterService, TransportService transportService) {
+ super(settings, threadPool);
+ this.clusterName = clusterName;
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+
+ this.transportAction = transportAction();
+ this.transportNodeAction = transportAction() + "/n";
+ this.executor = executor();
+
+ transportService.registerHandler(transportAction, new TransportHandler());
+ transportService.registerHandler(transportNodeAction, new NodeTransportHandler());
+ }
+
+ @Override
+ protected void doExecute(Request request, ActionListener<Response> listener) {
+ new AsyncAction(request, listener).start();
+ }
+
+ protected abstract String transportAction();
+
+ protected boolean transportCompress() {
+ return false;
+ }
+
+ protected abstract String executor();
+
+ protected abstract Request newRequest();
+
+ protected abstract Response newResponse(Request request, AtomicReferenceArray nodesResponses);
+
+ protected abstract NodeRequest newNodeRequest();
+
+ protected abstract NodeRequest newNodeRequest(String nodeId, Request request);
+
+ protected abstract NodeResponse newNodeResponse();
+
+ protected abstract NodeResponse nodeOperation(NodeRequest request) throws ElasticsearchException;
+
+ protected abstract boolean accumulateExceptions();
+
+ protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
+ return nodesIds;
+ }
+
+ private class AsyncAction {
+
+ private final Request request;
+ private final String[] nodesIds;
+ private final ActionListener<Response> listener;
+ private final ClusterState clusterState;
+ private final AtomicReferenceArray<Object> responses;
+ private final AtomicInteger counter = new AtomicInteger();
+
+ private AsyncAction(Request request, ActionListener<Response> listener) {
+ this.request = request;
+ this.listener = listener;
+ clusterState = clusterService.state();
+ String[] nodesIds = clusterState.nodes().resolveNodesIds(request.nodesIds());
+ this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
+ this.responses = new AtomicReferenceArray<Object>(this.nodesIds.length);
+ }
+
+ private void start() {
+ if (nodesIds.length == 0) {
+ // nothing to notify
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ listener.onResponse(newResponse(request, responses));
+ }
+ });
+ return;
+ }
+ TransportRequestOptions transportRequestOptions = TransportRequestOptions.options();
+ if (request.timeout() != null) {
+ transportRequestOptions.withTimeout(request.timeout());
+ }
+ transportRequestOptions.withCompress(transportCompress());
+ for (int i = 0; i < nodesIds.length; i++) {
+ final String nodeId = nodesIds[i];
+ final int idx = i;
+ final DiscoveryNode node = clusterState.nodes().nodes().get(nodeId);
+ try {
+ if (nodeId.equals("_local") || nodeId.equals(clusterState.nodes().localNodeId())) {
+ threadPool.executor(executor()).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ onOperation(idx, nodeOperation(newNodeRequest(clusterState.nodes().localNodeId(), request)));
+ } catch (Throwable e) {
+ onFailure(idx, clusterState.nodes().localNodeId(), e);
+ }
+ }
+ });
+ } else if (nodeId.equals("_master")) {
+ threadPool.executor(executor()).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ onOperation(idx, nodeOperation(newNodeRequest(clusterState.nodes().masterNodeId(), request)));
+ } catch (Throwable e) {
+ onFailure(idx, clusterState.nodes().masterNodeId(), e);
+ }
+ }
+ });
+ } else {
+ if (node == null) {
+ onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
+ } else if (!clusterService.localNode().shouldConnectTo(node)) {
+ onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
+ } else {
+ NodeRequest nodeRequest = newNodeRequest(nodeId, request);
+ transportService.sendRequest(node, transportNodeAction, nodeRequest, transportRequestOptions, new BaseTransportResponseHandler<NodeResponse>() {
+ @Override
+ public NodeResponse newInstance() {
+ return newNodeResponse();
+ }
+
+ @Override
+ public void handleResponse(NodeResponse response) {
+ onOperation(idx, response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ onFailure(idx, node.id(), exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+ } catch (Throwable t) {
+ onFailure(idx, nodeId, t);
+ }
+ }
+ }
+
+ private void onOperation(int idx, NodeResponse nodeResponse) {
+ responses.set(idx, nodeResponse);
+ if (counter.incrementAndGet() == responses.length()) {
+ finishHim();
+ }
+ }
+
+ private void onFailure(int idx, String nodeId, Throwable t) {
+ if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
+ logger.debug("failed to execute on node [{}]", t, nodeId);
+ }
+ if (accumulateExceptions()) {
+ responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
+ }
+ if (counter.incrementAndGet() == responses.length()) {
+ finishHim();
+ }
+ }
+
+ private void finishHim() {
+ Response finalResponse;
+ try {
+ finalResponse = newResponse(request, responses);
+ } catch (Throwable t) {
+ logger.debug("failed to combine responses from nodes", t);
+ listener.onFailure(t);
+ return;
+ }
+ listener.onResponse(finalResponse);
+ }
+ }
+
+ private class TransportHandler extends BaseTransportRequestHandler<Request> {
+
+ @Override
+ public Request newInstance() {
+ return newRequest();
+ }
+
+ @Override
+ public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<Response>() {
+ @Override
+ public void onResponse(Response response) {
+ TransportResponseOptions options = TransportResponseOptions.options().withCompress(transportCompress());
+ try {
+ channel.sendResponse(response, options);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send response", e);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public String toString() {
+ return transportAction;
+ }
+ }
+
+ private class NodeTransportHandler extends BaseTransportRequestHandler<NodeRequest> {
+
+ @Override
+ public NodeRequest newInstance() {
+ return newNodeRequest();
+ }
+
+ @Override
+ public void messageReceived(final NodeRequest request, final TransportChannel channel) throws Exception {
+ channel.sendResponse(nodeOperation(request));
+ }
+
+ @Override
+ public String toString() {
+ return transportNodeAction;
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java
new file mode 100644
index 0000000..f15cc79
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ *
+ */
+public class IndexReplicationOperationRequest<T extends IndexReplicationOperationRequest> extends ActionRequest<T> {
+
+ protected TimeValue timeout = ShardReplicationOperationRequest.DEFAULT_TIMEOUT;
+
+ protected String index;
+
+ protected ReplicationType replicationType = ReplicationType.DEFAULT;
+ protected WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
+
+ public TimeValue timeout() {
+ return timeout;
+ }
+
+ public String index() {
+ return this.index;
+ }
+
+ @SuppressWarnings("unchecked")
+ public T index(String index) {
+ this.index = index;
+ return (T) this;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ @SuppressWarnings("unchecked")
+ public T replicationType(ReplicationType replicationType) {
+ this.replicationType = replicationType;
+ return (T) this;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ public T replicationType(String replicationType) {
+ return replicationType(ReplicationType.fromString(replicationType));
+ }
+
+ public ReplicationType replicationType() {
+ return this.replicationType;
+ }
+
+ public WriteConsistencyLevel consistencyLevel() {
+ return this.consistencyLevel;
+ }
+
+ /**
+ * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
+ */
+ @SuppressWarnings("unchecked")
+ public T consistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ this.consistencyLevel = consistencyLevel;
+ return (T) this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (index == null) {
+ validationException = addValidationError("index name missing", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ replicationType = ReplicationType.fromId(in.readByte());
+ consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
+ timeout = TimeValue.readTimeValue(in);
+ index = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(replicationType.id());
+ out.writeByte(consistencyLevel.id());
+ timeout.writeTo(out);
+ out.writeString(index);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java
new file mode 100644
index 0000000..583059f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class IndicesReplicationOperationRequest<T extends IndicesReplicationOperationRequest> extends ActionRequest<T> {
+
+ protected TimeValue timeout = ShardReplicationOperationRequest.DEFAULT_TIMEOUT;
+ protected String[] indices;
+ private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
+
+ protected ReplicationType replicationType = ReplicationType.DEFAULT;
+ protected WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
+
+ public TimeValue timeout() {
+ return timeout;
+ }
+
+ /**
+ * A timeout to wait if the delete by query operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public final T timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return (T) this;
+ }
+
+ /**
+ * A timeout to wait if the delete by query operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public T timeout(String timeout) {
+ this.timeout = TimeValue.parseTimeValue(timeout, null);
+ return (T) this;
+ }
+
+ public String[] indices() {
+ return this.indices;
+ }
+
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ public T indicesOptions(IndicesOptions indicesOptions) {
+ if (indicesOptions == null) {
+ throw new IllegalArgumentException("IndicesOptions must not be null");
+ }
+ this.indicesOptions = indicesOptions;
+ return (T) this;
+ }
+
+ /**
+ * The indices the request will execute against.
+ */
+ @SuppressWarnings("unchecked")
+ public final T indices(String[] indices) {
+ this.indices = indices;
+ return (T) this;
+ }
+
+ public ReplicationType replicationType() {
+ return this.replicationType;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ @SuppressWarnings("unchecked")
+ public final T replicationType(ReplicationType replicationType) {
+ if (replicationType == null) {
+ throw new IllegalArgumentException("ReplicationType must not be null");
+ }
+ this.replicationType = replicationType;
+ return (T) this;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ public final T replicationType(String replicationType) {
+ return replicationType(ReplicationType.fromString(replicationType));
+ }
+
+ public WriteConsistencyLevel consistencyLevel() {
+ return this.consistencyLevel;
+ }
+
+ /**
+ * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
+ */
+ @SuppressWarnings("unchecked")
+ public final T consistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ if (consistencyLevel == null) {
+ throw new IllegalArgumentException("WriteConsistencyLevel must not be null");
+ }
+ this.consistencyLevel = consistencyLevel;
+ return (T) this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ replicationType = ReplicationType.fromId(in.readByte());
+ consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
+ timeout = TimeValue.readTimeValue(in);
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(replicationType.id());
+ out.writeByte(consistencyLevel.id());
+ timeout.writeTo(out);
+ out.writeStringArrayNullable(indices);
+ indicesOptions.writeIndicesOptions(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java
new file mode 100644
index 0000000..e33e3bc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.internal.InternalGenericClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ */
+public abstract class IndicesReplicationOperationRequestBuilder<Request extends IndicesReplicationOperationRequest<Request>, Response extends ActionResponse, RequestBuilder extends IndicesReplicationOperationRequestBuilder<Request, Response, RequestBuilder>>
+ extends ActionRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected IndicesReplicationOperationRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setTimeout(TimeValue timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setTimeout(String timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setIndices(String... indices) {
+ request.indices(indices);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Specifies what type of requested indices to ignore and how to deal with wildcard indices expressions.
+ * For example indices that don't exist.
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ request().indicesOptions(indicesOptions);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setReplicationType(ReplicationType replicationType) {
+ request.replicationType(replicationType);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setReplicationType(String replicationType) {
+ request.replicationType(replicationType);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ request.consistencyLevel(consistencyLevel);
+ return (RequestBuilder) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/replication/ReplicationShardOperationFailedException.java b/src/main/java/org/elasticsearch/action/support/replication/ReplicationShardOperationFailedException.java
new file mode 100644
index 0000000..5aa1475
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/ReplicationShardOperationFailedException.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * An exception indicating that a failure occurred performing an operation on the shard.
+ *
+ *
+ */
+public class ReplicationShardOperationFailedException extends IndexShardException implements ElasticsearchWrapperException {
+
+ public ReplicationShardOperationFailedException(ShardId shardId, String msg) {
+ super(shardId, msg, null);
+ }
+
+ public ReplicationShardOperationFailedException(ShardId shardId, Throwable cause) {
+ super(shardId, "", cause);
+ }
+
+ public ReplicationShardOperationFailedException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/support/replication/ReplicationType.java b/src/main/java/org/elasticsearch/action/support/replication/ReplicationType.java
new file mode 100644
index 0000000..9ee279d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/ReplicationType.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ * The type of replication to perform.
+ */
+public enum ReplicationType {
+ /**
+ * Sync replication, wait till all replicas have performed the operation.
+ */
+ SYNC((byte) 0),
+ /**
+ * Async replication. Will send the request to replicas, but will not wait for it
+ */
+ ASYNC((byte) 1),
+ /**
+ * Use the default replication type configured for this node.
+ */
+ DEFAULT((byte) 2);
+
+ private byte id;
+
+ ReplicationType(byte id) {
+ this.id = id;
+ }
+
+ /**
+ * The internal representation of the operation type.
+ */
+ public byte id() {
+ return id;
+ }
+
+ /**
+ * Constructs the operation type from its internal representation.
+ */
+ public static ReplicationType fromId(byte id) {
+ if (id == 0) {
+ return SYNC;
+ } else if (id == 1) {
+ return ASYNC;
+ } else if (id == 2) {
+ return DEFAULT;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No type match for [" + id + "]");
+ }
+ }
+
+ /**
+ * Parse the replication type from string.
+ */
+ public static ReplicationType fromString(String type) {
+ if ("async".equals(type)) {
+ return ASYNC;
+ } else if ("sync".equals(type)) {
+ return SYNC;
+ } else if ("default".equals(type)) {
+ return DEFAULT;
+ }
+ throw new ElasticsearchIllegalArgumentException("No replication type match for [" + type + "], should be either `async`, or `sync`");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java
new file mode 100644
index 0000000..7bcafbb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ *
+ */
+public abstract class ShardReplicationOperationRequest<T extends ShardReplicationOperationRequest> extends ActionRequest<T> {
+
+ public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
+
+ protected TimeValue timeout = DEFAULT_TIMEOUT;
+
+ protected String index;
+
+ private boolean threadedOperation = true;
+ private ReplicationType replicationType = ReplicationType.DEFAULT;
+ private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
+
+ protected ShardReplicationOperationRequest() {
+
+ }
+
+ public ShardReplicationOperationRequest(ActionRequest request) {
+ super(request);
+ }
+
+ public ShardReplicationOperationRequest(T request) {
+ super(request);
+ this.timeout = request.timeout();
+ this.index = request.index();
+ this.threadedOperation = request.operationThreaded();
+ this.replicationType = request.replicationType();
+ this.consistencyLevel = request.consistencyLevel();
+ }
+
+ /**
+ * Controls if the operation will be executed on a separate thread when executed locally.
+ */
+ public final boolean operationThreaded() {
+ return threadedOperation;
+ }
+
+ /**
+ * Controls if the operation will be executed on a separate thread when executed locally. Defaults
+ * to <tt>true</tt> when running in embedded mode.
+ */
+ @SuppressWarnings("unchecked")
+ public final T operationThreaded(boolean threadedOperation) {
+ this.threadedOperation = threadedOperation;
+ return (T) this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public final T timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return (T) this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ public final T timeout(String timeout) {
+ return timeout(TimeValue.parseTimeValue(timeout, null));
+ }
+
+ public TimeValue timeout() {
+ return timeout;
+ }
+
+ public String index() {
+ return this.index;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final T index(String index) {
+ this.index = index;
+ return (T) this;
+ }
+
+ /**
+ * The replication type.
+ */
+ public ReplicationType replicationType() {
+ return this.replicationType;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ @SuppressWarnings("unchecked")
+ public final T replicationType(ReplicationType replicationType) {
+ this.replicationType = replicationType;
+ return (T) this;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ public final T replicationType(String replicationType) {
+ return replicationType(ReplicationType.fromString(replicationType));
+ }
+
+ public WriteConsistencyLevel consistencyLevel() {
+ return this.consistencyLevel;
+ }
+
+ /**
+ * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
+ */
+ @SuppressWarnings("unchecked")
+ public final T consistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ this.consistencyLevel = consistencyLevel;
+ return (T) this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (index == null) {
+ validationException = addValidationError("index is missing", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ replicationType = ReplicationType.fromId(in.readByte());
+ consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
+ timeout = TimeValue.readTimeValue(in);
+ index = in.readSharedString();
+ // no need to serialize threaded* parameters, since they only matter locally
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(replicationType.id());
+ out.writeByte(consistencyLevel.id());
+ timeout.writeTo(out);
+ out.writeSharedString(index);
+ }
+
+ /**
+ * Called before the request gets forked into a local thread.
+ */
+ public void beforeLocalFork() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequestBuilder.java
new file mode 100644
index 0000000..b03b93d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequestBuilder.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.client.internal.InternalGenericClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ */
+public abstract class ShardReplicationOperationRequestBuilder<Request extends ShardReplicationOperationRequest<Request>, Response extends ActionResponse, RequestBuilder extends ShardReplicationOperationRequestBuilder<Request, Response, RequestBuilder>>
+ extends ActionRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected ShardReplicationOperationRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ /**
+ * Controls if the operation will be executed on a separate thread when executed locally. Defaults
+ * to <tt>true</tt> when running in embedded mode.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setOperationThreaded(boolean threadedOperation) {
+ request.operationThreaded(threadedOperation);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setTimeout(TimeValue timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setTimeout(String timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setIndex(String index) {
+ request.index(index);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setReplicationType(ReplicationType replicationType) {
+ request.replicationType(replicationType);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setReplicationType(String replicationType) {
+ request.replicationType(replicationType);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
+ */
+ @SuppressWarnings("unchecked")
+ public RequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ request.consistencyLevel(consistencyLevel);
+ return (RequestBuilder) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java
new file mode 100644
index 0000000..6eba612
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public abstract class TransportIndexReplicationOperationAction<Request extends IndexReplicationOperationRequest, Response extends ActionResponse, ShardRequest extends ShardReplicationOperationRequest, ShardReplicaRequest extends ActionRequest, ShardResponse extends ActionResponse>
+ extends TransportAction<Request, Response> {
+
+ protected final ClusterService clusterService;
+
+ protected final TransportShardReplicationOperationAction<ShardRequest, ShardReplicaRequest, ShardResponse> shardAction;
+
+ @Inject
+ public TransportIndexReplicationOperationAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ TransportShardReplicationOperationAction<ShardRequest, ShardReplicaRequest, ShardResponse> shardAction) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.shardAction = shardAction;
+
+ transportService.registerHandler(transportAction(), new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(final Request request, final ActionListener<Response> listener) {
+ ClusterState clusterState = clusterService.state();
+ ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
+ if (blockException != null) {
+ throw blockException;
+ }
+ // update to concrete index
+ request.index(clusterState.metaData().concreteIndex(request.index()));
+ blockException = checkRequestBlock(clusterState, request);
+ if (blockException != null) {
+ throw blockException;
+ }
+
+ GroupShardsIterator groups;
+ try {
+ groups = shards(request);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ return;
+ }
+ final AtomicInteger indexCounter = new AtomicInteger();
+ final AtomicInteger failureCounter = new AtomicInteger();
+ final AtomicInteger completionCounter = new AtomicInteger(groups.size());
+ final AtomicReferenceArray<ShardActionResult> shardsResponses = new AtomicReferenceArray<ShardActionResult>(groups.size());
+
+ for (final ShardIterator shardIt : groups) {
+ ShardRequest shardRequest = newShardRequestInstance(request, shardIt.shardId().id());
+
+ // TODO for now, we fork operations on shardIt of the index
+ shardRequest.beforeLocalFork(); // optimize for local fork
+ shardRequest.operationThreaded(true);
+
+ // no need for threaded listener, we will fork when its done based on the index request
+ shardRequest.listenerThreaded(false);
+ shardAction.execute(shardRequest, new ActionListener<ShardResponse>() {
+ @Override
+ public void onResponse(ShardResponse result) {
+ shardsResponses.set(indexCounter.getAndIncrement(), new ShardActionResult(result));
+ returnIfNeeded();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ failureCounter.getAndIncrement();
+ int index = indexCounter.getAndIncrement();
+ if (accumulateExceptions()) {
+ shardsResponses.set(index, new ShardActionResult(
+ new DefaultShardOperationFailedException(request.index, shardIt.shardId().id(), e)));
+ }
+ returnIfNeeded();
+ }
+
+ private void returnIfNeeded() {
+ if (completionCounter.decrementAndGet() == 0) {
+ List<ShardResponse> responses = Lists.newArrayList();
+ List<ShardOperationFailedException> failures = Lists.newArrayList();
+ for (int i = 0; i < shardsResponses.length(); i++) {
+ ShardActionResult shardActionResult = shardsResponses.get(i);
+ if (shardActionResult == null) {
+ assert !accumulateExceptions();
+ continue;
+ }
+ if (shardActionResult.isFailure()) {
+ assert accumulateExceptions() && shardActionResult.shardFailure != null;
+ failures.add(shardActionResult.shardFailure);
+ } else {
+ responses.add(shardActionResult.shardResponse);
+ }
+ }
+
+ assert failures.size() == 0 || failures.size() == failureCounter.get();
+ listener.onResponse(newResponseInstance(request, responses, failureCounter.get(), failures));
+ }
+ }
+ });
+ }
+ }
+
+ protected abstract Request newRequestInstance();
+
+ protected abstract Response newResponseInstance(Request request, List<ShardResponse> shardResponses, int failuresCount, List<ShardOperationFailedException> shardFailures);
+
+ protected abstract String transportAction();
+
+ protected abstract GroupShardsIterator shards(Request request) throws ElasticsearchException;
+
+ protected abstract ShardRequest newShardRequestInstance(Request request, int shardId);
+
+ protected abstract boolean accumulateExceptions();
+
+ protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
+
+ protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request);
+
+ private class ShardActionResult {
+
+ private final ShardResponse shardResponse;
+ private final ShardOperationFailedException shardFailure;
+
+ private ShardActionResult(ShardResponse shardResponse) {
+ assert shardResponse != null;
+ this.shardResponse = shardResponse;
+ this.shardFailure = null;
+ }
+
+ private ShardActionResult(ShardOperationFailedException shardOperationFailedException) {
+ assert shardOperationFailedException != null;
+ this.shardFailure = shardOperationFailedException;
+ this.shardResponse = null;
+ }
+
+ boolean isFailure() {
+ return shardFailure != null;
+ }
+ }
+
+ private class TransportHandler extends BaseTransportRequestHandler<Request> {
+
+ @Override
+ public Request newInstance() {
+ return newRequestInstance();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
+ // no need to use threaded listener, since we just send a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<Response>() {
+ @Override
+ public void onResponse(Response result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send error response for action [" + transportAction() + "] and request [" + request + "]", e1);
+ }
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java
new file mode 100644
index 0000000..5c0dd26
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ */
+public abstract class TransportIndicesReplicationOperationAction<Request extends IndicesReplicationOperationRequest, Response extends ActionResponse, IndexRequest extends IndexReplicationOperationRequest, IndexResponse extends ActionResponse,
+ ShardRequest extends ShardReplicationOperationRequest, ShardReplicaRequest extends ActionRequest, ShardResponse extends ActionResponse>
+ extends TransportAction<Request, Response> {
+
+ protected final ClusterService clusterService;
+
+ protected final TransportIndexReplicationOperationAction<IndexRequest, IndexResponse, ShardRequest, ShardReplicaRequest, ShardResponse> indexAction;
+
+
+ final String transportAction;
+
+ @Inject
+ public TransportIndicesReplicationOperationAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ TransportIndexReplicationOperationAction<IndexRequest, IndexResponse, ShardRequest, ShardReplicaRequest, ShardResponse> indexAction) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.indexAction = indexAction;
+
+ this.transportAction = transportAction();
+
+ transportService.registerHandler(transportAction, new TransportHandler());
+ }
+
+
+ protected abstract Map<String, Set<String>> resolveRouting(ClusterState clusterState, Request request) throws ElasticsearchException;
+
+ @Override
+ protected void doExecute(final Request request, final ActionListener<Response> listener) {
+ ClusterState clusterState = clusterService.state();
+ ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
+ if (blockException != null) {
+ throw blockException;
+ }
+ // get actual indices
+ String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices(), request.indicesOptions());
+ blockException = checkRequestBlock(clusterState, request, concreteIndices);
+ if (blockException != null) {
+ throw blockException;
+ }
+
+ final AtomicInteger indexCounter = new AtomicInteger();
+ final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length);
+ final AtomicReferenceArray<Object> indexResponses = new AtomicReferenceArray<Object>(concreteIndices.length);
+
+ Map<String, Set<String>> routingMap = resolveRouting(clusterState, request);
+ if (concreteIndices == null || concreteIndices.length == 0) {
+ listener.onResponse(newResponseInstance(request, indexResponses));
+ } else {
+ for (final String index : concreteIndices) {
+ Set<String> routing = null;
+ if (routingMap != null) {
+ routing = routingMap.get(index);
+ }
+ IndexRequest indexRequest = newIndexRequestInstance(request, index, routing);
+ // no threading needed, all is done on the index replication one
+ indexRequest.listenerThreaded(false);
+ indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse result) {
+ indexResponses.set(indexCounter.getAndIncrement(), result);
+ if (completionCounter.decrementAndGet() == 0) {
+ listener.onResponse(newResponseInstance(request, indexResponses));
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ int index = indexCounter.getAndIncrement();
+ if (accumulateExceptions()) {
+ indexResponses.set(index, e);
+ }
+ if (completionCounter.decrementAndGet() == 0) {
+ listener.onResponse(newResponseInstance(request, indexResponses));
+ }
+ }
+ });
+ }
+ }
+ }
+
+ protected abstract Request newRequestInstance();
+
+ protected abstract Response newResponseInstance(Request request, AtomicReferenceArray indexResponses);
+
+ protected abstract String transportAction();
+
+ protected abstract IndexRequest newIndexRequestInstance(Request request, String index, Set<String> routing);
+
+ protected abstract boolean accumulateExceptions();
+
+ protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
+
+ protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices);
+
+ private class TransportHandler extends BaseTransportRequestHandler<Request> {
+
+ @Override
+ public Request newInstance() {
+ return newRequestInstance();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
+ // no need for a threaded listener, since we just send a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<Response>() {
+ @Override
+ public void onResponse(Response result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send error response for action [" + transportAction + "] and request [" + request + "]", e1);
+ }
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java
new file mode 100644
index 0000000..caffb6b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java
@@ -0,0 +1,806 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.replication;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.action.support.TransportActions;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.TimeoutClusterStateListener;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.node.NodeClosedException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.ExceptionsHelper.detailedMessage;
+
+/**
+ */
+public abstract class TransportShardReplicationOperationAction<Request extends ShardReplicationOperationRequest, ReplicaRequest extends ActionRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
+
+ protected final TransportService transportService;
+ protected final ClusterService clusterService;
+ protected final IndicesService indicesService;
+ protected final ShardStateAction shardStateAction;
+ protected final ReplicationType defaultReplicationType;
+ protected final WriteConsistencyLevel defaultWriteConsistencyLevel;
+ protected final TransportRequestOptions transportOptions;
+
+ final String transportAction;
+ final String transportReplicaAction;
+ final String executor;
+ final boolean checkWriteConsistency;
+
+ protected TransportShardReplicationOperationAction(Settings settings, TransportService transportService,
+ ClusterService clusterService, IndicesService indicesService,
+ ThreadPool threadPool, ShardStateAction shardStateAction) {
+ super(settings, threadPool);
+ this.transportService = transportService;
+ this.clusterService = clusterService;
+ this.indicesService = indicesService;
+ this.shardStateAction = shardStateAction;
+
+ this.transportAction = transportAction();
+ this.transportReplicaAction = transportReplicaAction();
+ this.executor = executor();
+ this.checkWriteConsistency = checkWriteConsistency();
+
+ transportService.registerHandler(transportAction, new OperationTransportHandler());
+ transportService.registerHandler(transportReplicaAction, new ReplicaOperationTransportHandler());
+
+ this.transportOptions = transportOptions();
+
+ this.defaultReplicationType = ReplicationType.fromString(settings.get("action.replication_type", "sync"));
+ this.defaultWriteConsistencyLevel = WriteConsistencyLevel.fromString(settings.get("action.write_consistency", "quorum"));
+ }
+
+ @Override
+ protected void doExecute(Request request, ActionListener<Response> listener) {
+ new AsyncShardOperationAction(request, listener).start();
+ }
+
+ protected abstract Request newRequestInstance();
+
+ protected abstract ReplicaRequest newReplicaRequestInstance();
+
+ protected abstract Response newResponseInstance();
+
+ protected abstract String transportAction();
+
+ protected abstract String executor();
+
+ protected abstract PrimaryResponse<Response, ReplicaRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest);
+
+ protected abstract void shardOperationOnReplica(ReplicaOperationRequest shardRequest);
+
+ /**
+ * Called once replica operations have been dispatched on the
+ */
+ protected void postPrimaryOperation(Request request, PrimaryResponse<Response, ReplicaRequest> response) {
+
+ }
+
+ protected abstract ShardIterator shards(ClusterState clusterState, Request request) throws ElasticsearchException;
+
+ protected abstract boolean checkWriteConsistency();
+
+ protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
+
+ protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request);
+
+ /**
+ * Resolves the request, by default, simply setting the concrete index (if its aliased one). If the resolve
+ * means a different execution, then return false here to indicate not to continue and execute this request.
+ */
+ protected boolean resolveRequest(ClusterState state, Request request, ActionListener<Response> listener) {
+ request.index(state.metaData().concreteIndex(request.index()));
+ return true;
+ }
+
+ protected TransportRequestOptions transportOptions() {
+ return TransportRequestOptions.EMPTY;
+ }
+
+ /**
+ * Should the operations be performed on the replicas as well. Defaults to <tt>false</tt> meaning operations
+ * will be executed on the replica.
+ */
+ protected boolean ignoreReplicas() {
+ return false;
+ }
+
+ private String transportReplicaAction() {
+ return transportAction() + "/replica";
+ }
+
+ protected boolean retryPrimaryException(Throwable e) {
+ return TransportActions.isShardNotAvailableException(e);
+ }
+
+ /**
+ * Should an exception be ignored when the operation is performed on the replica.
+ */
+ boolean ignoreReplicaException(Throwable e) {
+ if (TransportActions.isShardNotAvailableException(e)) {
+ return true;
+ }
+ Throwable cause = ExceptionsHelper.unwrapCause(e);
+ if (cause instanceof ConnectTransportException) {
+ return true;
+ }
+ // on version conflict or document missing, it means
+ // that a news change has crept into the replica, and its fine
+ if (cause instanceof VersionConflictEngineException) {
+ return true;
+ }
+ // same here
+ if (cause instanceof DocumentAlreadyExistsException) {
+ return true;
+ }
+ return false;
+ }
+
+ class OperationTransportHandler extends BaseTransportRequestHandler<Request> {
+
+ @Override
+ public Request newInstance() {
+ return newRequestInstance();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
+ // no need to have a threaded listener since we just send back a response
+ request.listenerThreaded(false);
+ // if we have a local operation, execute it on a thread since we don't spawn
+ request.operationThreaded(true);
+ execute(request, new ActionListener<Response>() {
+ @Override
+ public void onResponse(Response result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Throwable e1) {
+ logger.warn("Failed to send response for " + transportAction, e1);
+ }
+ }
+ });
+ }
+ }
+
+ class ReplicaOperationTransportHandler extends BaseTransportRequestHandler<ReplicaOperationRequest> {
+
+ @Override
+ public ReplicaOperationRequest newInstance() {
+ return new ReplicaOperationRequest();
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+
+ // we must never reject on because of thread pool capacity on replicas
+ @Override
+ public boolean isForceExecution() {
+ return true;
+ }
+
+ @Override
+ public void messageReceived(final ReplicaOperationRequest request, final TransportChannel channel) throws Exception {
+ shardOperationOnReplica(request);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+
+ protected class PrimaryOperationRequest implements Streamable {
+
+ public int shardId;
+
+ public Request request;
+
+ public PrimaryOperationRequest() {
+ }
+
+ public PrimaryOperationRequest(int shardId, Request request) {
+ this.shardId = shardId;
+ this.request = request;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ shardId = in.readVInt();
+ request = newRequestInstance();
+ request.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(shardId);
+ request.writeTo(out);
+ }
+ }
+
+ protected class ReplicaOperationRequest extends TransportRequest {
+
+ public int shardId;
+ public ReplicaRequest request;
+
+ public ReplicaOperationRequest() {
+ }
+
+ public ReplicaOperationRequest(int shardId, ReplicaRequest request) {
+ super(request);
+ this.shardId = shardId;
+ this.request = request;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = in.readVInt();
+ request = newReplicaRequestInstance();
+ request.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(shardId);
+ request.writeTo(out);
+ }
+ }
+
+ protected class AsyncShardOperationAction {
+
+ private final ActionListener<Response> listener;
+ private final Request request;
+ private volatile ClusterState clusterState;
+ private volatile ShardIterator shardIt;
+ private final AtomicBoolean primaryOperationStarted = new AtomicBoolean();
+ private final ReplicationType replicationType;
+ protected final long startTime = System.currentTimeMillis();
+
+ AsyncShardOperationAction(Request request, ActionListener<Response> listener) {
+ this.request = request;
+ this.listener = listener;
+
+ if (request.replicationType() != ReplicationType.DEFAULT) {
+ replicationType = request.replicationType();
+ } else {
+ replicationType = defaultReplicationType;
+ }
+ }
+
+ public void start() {
+ start(false);
+ }
+
+ /**
+ * Returns <tt>true</tt> if the action starting to be performed on the primary (or is done).
+ */
+ public boolean start(final boolean fromClusterEvent) throws ElasticsearchException {
+ this.clusterState = clusterService.state();
+ try {
+ ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
+ if (blockException != null) {
+ if (blockException.retryable()) {
+ logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage());
+ retry(fromClusterEvent, blockException);
+ return false;
+ } else {
+ throw blockException;
+ }
+ }
+ // check if we need to execute, and if not, return
+ if (!resolveRequest(clusterState, request, listener)) {
+ return true;
+ }
+ blockException = checkRequestBlock(clusterState, request);
+ if (blockException != null) {
+ if (blockException.retryable()) {
+ logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage());
+ retry(fromClusterEvent, blockException);
+ return false;
+ } else {
+ throw blockException;
+ }
+ }
+ shardIt = shards(clusterState, request);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ return true;
+ }
+
+ // no shardIt, might be in the case between index gateway recovery and shardIt initialization
+ if (shardIt.size() == 0) {
+ logger.trace("no shard instances known for shard [{}], scheduling a retry", shardIt.shardId());
+
+ retry(fromClusterEvent, null);
+ return false;
+ }
+
+ boolean foundPrimary = false;
+ ShardRouting shardX;
+ while ((shardX = shardIt.nextOrNull()) != null) {
+ final ShardRouting shard = shardX;
+ // we only deal with primary shardIt here...
+ if (!shard.primary()) {
+ continue;
+ }
+ if (!shard.active() || !clusterState.nodes().nodeExists(shard.currentNodeId())) {
+ logger.trace("primary shard [{}] is not yet active or we do not know the node it is assigned to [{}], scheduling a retry.", shard.shardId(), shard.currentNodeId());
+ retry(fromClusterEvent, null);
+ return false;
+ }
+
+ // check here for consistency
+ if (checkWriteConsistency) {
+ WriteConsistencyLevel consistencyLevel = defaultWriteConsistencyLevel;
+ if (request.consistencyLevel() != WriteConsistencyLevel.DEFAULT) {
+ consistencyLevel = request.consistencyLevel();
+ }
+ int requiredNumber = 1;
+ if (consistencyLevel == WriteConsistencyLevel.QUORUM && shardIt.size() > 2) {
+ // only for more than 2 in the number of shardIt it makes sense, otherwise its 1 shard with 1 replica, quorum is 1 (which is what it is initialized to)
+ requiredNumber = (shardIt.size() / 2) + 1;
+ } else if (consistencyLevel == WriteConsistencyLevel.ALL) {
+ requiredNumber = shardIt.size();
+ }
+
+ if (shardIt.sizeActive() < requiredNumber) {
+ logger.trace("not enough active copies of shard [{}] to meet write consistency of [{}] (have {}, needed {}), scheduling a retry.",
+ shard.shardId(), consistencyLevel, shardIt.sizeActive(), requiredNumber);
+ retry(fromClusterEvent, null);
+ return false;
+ }
+ }
+
+ if (!primaryOperationStarted.compareAndSet(false, true)) {
+ return true;
+ }
+
+ foundPrimary = true;
+ if (shard.currentNodeId().equals(clusterState.nodes().localNodeId())) {
+ try {
+ if (request.operationThreaded()) {
+ request.beforeLocalFork();
+ threadPool.executor(executor).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ performOnPrimary(shard.id(), shard, clusterState);
+ } catch (Throwable t) {
+ listener.onFailure(t);
+ }
+ }
+ });
+ } else {
+ performOnPrimary(shard.id(), shard, clusterState);
+ }
+ } catch (Throwable t) {
+ listener.onFailure(t);
+ }
+ } else {
+ DiscoveryNode node = clusterState.nodes().get(shard.currentNodeId());
+ transportService.sendRequest(node, transportAction, request, transportOptions, new BaseTransportResponseHandler<Response>() {
+
+ @Override
+ public Response newInstance() {
+ return newResponseInstance();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(Response response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ // if we got disconnected from the node, or the node / shard is not in the right state (being closed)
+ if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException ||
+ retryPrimaryException(exp)) {
+ primaryOperationStarted.set(false);
+ // we already marked it as started when we executed it (removed the listener) so pass false
+ // to re-add to the cluster listener
+ logger.trace("received an error from node the primary was assigned to ({}), scheduling a retry", exp.getMessage());
+ retry(false, null);
+ } else {
+ listener.onFailure(exp);
+ }
+ }
+ });
+ }
+ break;
+ }
+ // we won't find a primary if there are no shards in the shard iterator, retry...
+ if (!foundPrimary) {
+ logger.trace("couldn't find a eligible primary shard, scheduling for retry.");
+ retry(fromClusterEvent, null);
+ return false;
+ }
+ return true;
+ }
+
+ void retry(boolean fromClusterEvent, @Nullable final Throwable failure) {
+ if (fromClusterEvent) {
+ logger.trace("retry scheduling ignored as it as we already have a listener in place");
+ return;
+ }
+
+ // make it threaded operation so we fork on the discovery listener thread
+ request.beforeLocalFork();
+ request.operationThreaded(true);
+
+ TimeValue timeout = new TimeValue(request.timeout().millis() - (System.currentTimeMillis() - startTime));
+ if (timeout.millis() <= 0) {
+ raiseTimeoutFailure(timeout, failure);
+ return;
+ }
+
+ clusterService.add(timeout, new TimeoutClusterStateListener() {
+ @Override
+ public void postAdded() {
+ // check if state version changed while we were adding this listener
+ long sampledVersion = clusterState.version();
+ long currentVersion = clusterService.state().version();
+ if (sampledVersion != currentVersion) {
+ logger.trace("state change while we were trying to add listener, trying to start again, sampled_version [{}], current_version [{}]", sampledVersion, currentVersion);
+ if (start(true)) {
+ // if we managed to start and perform the operation on the primary, we can remove this listener
+ clusterService.remove(this);
+ }
+ }
+ }
+
+ @Override
+ public void onClose() {
+ clusterService.remove(this);
+ listener.onFailure(new NodeClosedException(clusterService.localNode()));
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ logger.trace("cluster changed (version {}), trying to start again", event.state().version());
+ if (start(true)) {
+ // if we managed to start and perform the operation on the primary, we can remove this listener
+ clusterService.remove(this);
+ }
+ }
+
+ @Override
+ public void onTimeout(TimeValue timeValue) {
+ // just to be on the safe side, see if we can start it now?
+ if (start(true)) {
+ clusterService.remove(this);
+ return;
+ }
+ clusterService.remove(this);
+ raiseTimeoutFailure(timeValue, failure);
+ }
+ });
+ }
+
+ void raiseTimeoutFailure(TimeValue timeout, @Nullable Throwable failure) {
+ if (failure == null) {
+ if (shardIt == null) {
+ failure = new UnavailableShardsException(null, "no available shards: Timeout waiting for [" + timeout + "], request: " + request.toString());
+ } else {
+ failure = new UnavailableShardsException(shardIt.shardId(), "[" + shardIt.size() + "] shardIt, [" + shardIt.sizeActive() + "] active : Timeout waiting for [" + timeout + "], request: " + request.toString());
+ }
+ }
+ listener.onFailure(failure);
+ }
+
+ void performOnPrimary(int primaryShardId, final ShardRouting shard, ClusterState clusterState) {
+ try {
+ PrimaryResponse<Response, ReplicaRequest> response = shardOperationOnPrimary(clusterState, new PrimaryOperationRequest(primaryShardId, request));
+ performReplicas(response);
+ } catch (Throwable e) {
+ // shard has not been allocated yet, retry it here
+ if (retryPrimaryException(e)) {
+ primaryOperationStarted.set(false);
+ logger.trace("had an error while performing operation on primary ({}), scheduling a retry.", e.getMessage());
+ retry(false, e);
+ return;
+ }
+ if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
+ if (logger.isTraceEnabled()) {
+ logger.trace(shard.shortSummary() + ": Failed to execute [" + request + "]", e);
+ }
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", e);
+ }
+ }
+ listener.onFailure(e);
+ }
+ }
+
+ void performReplicas(final PrimaryResponse<Response, ReplicaRequest> response) {
+ if (ignoreReplicas()) {
+ postPrimaryOperation(request, response);
+ listener.onResponse(response.response());
+ return;
+ }
+
+ ShardRouting shard;
+
+ // we double check on the state, if it got changed we need to make sure we take the latest one cause
+ // maybe a replica shard started its recovery process and we need to apply it there...
+
+ // we also need to make sure if the new state has a new primary shard (that we indexed to before) started
+ // and assigned to another node (while the indexing happened). In that case, we want to apply it on the
+ // new primary shard as well...
+ ClusterState newState = clusterService.state();
+ ShardRouting newPrimaryShard = null;
+ if (clusterState != newState) {
+ shardIt.reset();
+ ShardRouting originalPrimaryShard = null;
+ while ((shard = shardIt.nextOrNull()) != null) {
+ if (shard.primary()) {
+ originalPrimaryShard = shard;
+ break;
+ }
+ }
+ if (originalPrimaryShard == null || !originalPrimaryShard.active()) {
+ throw new ElasticsearchIllegalStateException("unexpected state, failed to find primary shard on an index operation that succeeded");
+ }
+
+ clusterState = newState;
+ shardIt = shards(newState, request);
+ while ((shard = shardIt.nextOrNull()) != null) {
+ if (shard.primary()) {
+ if (originalPrimaryShard.currentNodeId().equals(shard.currentNodeId())) {
+ newPrimaryShard = null;
+ } else {
+ newPrimaryShard = shard;
+ }
+ break;
+ }
+ }
+ }
+
+ // initialize the counter
+ int replicaCounter = shardIt.assignedReplicasIncludingRelocating();
+
+ if (newPrimaryShard != null) {
+ replicaCounter++;
+ }
+
+ if (replicaCounter == 0) {
+ postPrimaryOperation(request, response);
+ listener.onResponse(response.response());
+ return;
+ }
+
+ if (replicationType == ReplicationType.ASYNC) {
+ postPrimaryOperation(request, response);
+ // async replication, notify the listener
+ listener.onResponse(response.response());
+ // now, trick the counter so it won't decrease to 0 and notify the listeners
+ replicaCounter = Integer.MIN_VALUE;
+ }
+
+ // we add one to the replica count to do the postPrimaryOperation
+ replicaCounter++;
+ AtomicInteger counter = new AtomicInteger(replicaCounter);
+
+
+ IndexMetaData indexMetaData = clusterState.metaData().index(request.index());
+
+ if (newPrimaryShard != null) {
+ performOnReplica(response, counter, newPrimaryShard, newPrimaryShard.currentNodeId(), indexMetaData);
+ }
+
+ shardIt.reset(); // reset the iterator
+ while ((shard = shardIt.nextOrNull()) != null) {
+ // if its unassigned, nothing to do here...
+ if (shard.unassigned()) {
+ continue;
+ }
+
+ // if the shard is primary and relocating, add one to the counter since we perform it on the replica as well
+ // (and we already did it on the primary)
+ boolean doOnlyOnRelocating = false;
+ if (shard.primary()) {
+ if (shard.relocating()) {
+ doOnlyOnRelocating = true;
+ } else {
+ continue;
+ }
+ }
+ // we index on a replica that is initializing as well since we might not have got the event
+ // yet that it was started. We will get an exception IllegalShardState exception if its not started
+ // and that's fine, we will ignore it
+ if (!doOnlyOnRelocating) {
+ performOnReplica(response, counter, shard, shard.currentNodeId(), indexMetaData);
+ }
+ if (shard.relocating()) {
+ performOnReplica(response, counter, shard, shard.relocatingNodeId(), indexMetaData);
+ }
+ }
+
+ // now do the postPrimary operation, and check if the listener needs to be invoked
+ postPrimaryOperation(request, response);
+ // we also invoke here in case replicas finish before postPrimaryAction does
+ if (counter.decrementAndGet() == 0) {
+ listener.onResponse(response.response());
+ }
+ }
+
+ void performOnReplica(final PrimaryResponse<Response, ReplicaRequest> response, final AtomicInteger counter, final ShardRouting shard, String nodeId, final IndexMetaData indexMetaData) {
+ // if we don't have that node, it means that it might have failed and will be created again, in
+ // this case, we don't have to do the operation, and just let it failover
+ if (!clusterState.nodes().nodeExists(nodeId)) {
+ if (counter.decrementAndGet() == 0) {
+ listener.onResponse(response.response());
+ }
+ return;
+ }
+
+ final ReplicaOperationRequest shardRequest = new ReplicaOperationRequest(shardIt.shardId().id(), response.replicaRequest());
+ if (!nodeId.equals(clusterState.nodes().localNodeId())) {
+ DiscoveryNode node = clusterState.nodes().get(nodeId);
+ transportService.sendRequest(node, transportReplicaAction, shardRequest, transportOptions, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleResponse(TransportResponse.Empty vResponse) {
+ finishIfPossible();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ if (!ignoreReplicaException(exp.unwrapCause())) {
+ logger.warn("Failed to perform " + transportAction + " on replica " + shardIt.shardId(), exp);
+ shardStateAction.shardFailed(shard, indexMetaData.getUUID(),
+ "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(exp) + "]");
+ }
+ finishIfPossible();
+ }
+
+ private void finishIfPossible() {
+ if (counter.decrementAndGet() == 0) {
+ listener.onResponse(response.response());
+ }
+ }
+ });
+ } else {
+ if (request.operationThreaded()) {
+ request.beforeLocalFork();
+ try {
+ threadPool.executor(executor).execute(new AbstractRunnable() {
+ @Override
+ public void run() {
+ try {
+ shardOperationOnReplica(shardRequest);
+ } catch (Throwable e) {
+ if (!ignoreReplicaException(e)) {
+ logger.warn("Failed to perform " + transportAction + " on replica " + shardIt.shardId(), e);
+ shardStateAction.shardFailed(shard, indexMetaData.getUUID(),
+ "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]");
+ }
+ }
+ if (counter.decrementAndGet() == 0) {
+ listener.onResponse(response.response());
+ }
+ }
+
+ // we must never reject on because of thread pool capacity on replicas
+ @Override
+ public boolean isForceExecution() {
+ return true;
+ }
+ });
+ } catch (Throwable e) {
+ if (!ignoreReplicaException(e)) {
+ logger.warn("Failed to perform " + transportAction + " on replica " + shardIt.shardId(), e);
+ shardStateAction.shardFailed(shard, indexMetaData.getUUID(),
+ "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]");
+ }
+ // we want to decrement the counter here, in teh failure handling, cause we got rejected
+ // from executing on the thread pool
+ if (counter.decrementAndGet() == 0) {
+ listener.onResponse(response.response());
+ }
+ }
+ } else {
+ try {
+ shardOperationOnReplica(shardRequest);
+ } catch (Throwable e) {
+ if (!ignoreReplicaException(e)) {
+ logger.warn("Failed to perform " + transportAction + " on replica" + shardIt.shardId(), e);
+ shardStateAction.shardFailed(shard, indexMetaData.getUUID(),
+ "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]");
+ }
+ }
+ if (counter.decrementAndGet() == 0) {
+ listener.onResponse(response.response());
+ }
+ }
+ }
+ }
+ }
+
+ public static class PrimaryResponse<Response, ReplicaRequest> {
+ private final ReplicaRequest replicaRequest;
+ private final Response response;
+ private final Object payload;
+
+ public PrimaryResponse(ReplicaRequest replicaRequest, Response response, Object payload) {
+ this.replicaRequest = replicaRequest;
+ this.response = response;
+ this.payload = payload;
+ }
+
+ public ReplicaRequest replicaRequest() {
+ return this.replicaRequest;
+ }
+
+ public Response response() {
+ return response;
+ }
+
+ public Object payload() {
+ return payload;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequest.java b/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequest.java
new file mode 100644
index 0000000..9b20f45
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequest.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.single.custom;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class SingleCustomOperationRequest<T extends SingleCustomOperationRequest> extends ActionRequest<T> {
+
+ private boolean threadedOperation = true;
+ private boolean preferLocal = true;
+
+ protected SingleCustomOperationRequest() {
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ /**
+ * Controls if the operation will be executed on a separate thread when executed locally.
+ */
+ public boolean operationThreaded() {
+ return threadedOperation;
+ }
+
+ /**
+ * Controls if the operation will be executed on a separate thread when executed locally.
+ */
+ @SuppressWarnings("unchecked")
+ public final T operationThreaded(boolean threadedOperation) {
+ this.threadedOperation = threadedOperation;
+ return (T) this;
+ }
+
+ /**
+ * if this operation hits a node with a local relevant shard, should it be preferred
+ * to be executed on, or just do plain round robin. Defaults to <tt>true</tt>
+ */
+ @SuppressWarnings("unchecked")
+ public final T preferLocal(boolean preferLocal) {
+ this.preferLocal = preferLocal;
+ return (T) this;
+ }
+
+ /**
+ * if this operation hits a node with a local relevant shard, should it be preferred
+ * to be executed on, or just do plain round robin. Defaults to <tt>true</tt>
+ */
+ public boolean preferLocalShard() {
+ return this.preferLocal;
+ }
+
+ public void beforeLocalFork() {
+
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ preferLocal = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(preferLocal);
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequestBuilder.java
new file mode 100644
index 0000000..ab4b48d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequestBuilder.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.single.custom;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public abstract class SingleCustomOperationRequestBuilder<Request extends SingleCustomOperationRequest<Request>, Response extends ActionResponse, RequestBuilder extends SingleCustomOperationRequestBuilder<Request, Response, RequestBuilder>>
+ extends ActionRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected SingleCustomOperationRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ /**
+ * Controls if the operation will be executed on a separate thread when executed locally.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setOperationThreaded(boolean threadedOperation) {
+ request.operationThreaded(threadedOperation);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * if this operation hits a node with a local relevant shard, should it be preferred
+ * to be executed on, or just do plain round robin. Defaults to <tt>true</tt>
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setPreferLocal(boolean preferLocal) {
+ request.preferLocal(preferLocal);
+ return (RequestBuilder) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/single/custom/TransportSingleCustomOperationAction.java b/src/main/java/org/elasticsearch/action/support/single/custom/TransportSingleCustomOperationAction.java
new file mode 100644
index 0000000..da2e1d4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/single/custom/TransportSingleCustomOperationAction.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.single.custom;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.NoShardAvailableActionException;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardsIterator;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class TransportSingleCustomOperationAction<Request extends SingleCustomOperationRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
+
+ protected final ClusterService clusterService;
+
+ protected final TransportService transportService;
+
+ final String transportAction;
+ final String transportShardAction;
+ final String executor;
+
+ protected TransportSingleCustomOperationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+
+ this.transportAction = transportAction();
+ this.transportShardAction = transportAction() + "/s";
+ this.executor = executor();
+
+ transportService.registerHandler(transportAction, new TransportHandler());
+ transportService.registerHandler(transportShardAction, new ShardTransportHandler());
+ }
+
+ @Override
+ protected void doExecute(Request request, ActionListener<Response> listener) {
+ new AsyncSingleAction(request, listener).start();
+ }
+
+ protected abstract String transportAction();
+
+ protected abstract String executor();
+
+ /**
+ * Can return null to execute on this local node.
+ */
+ protected abstract ShardsIterator shards(ClusterState state, Request request);
+
+ protected abstract Response shardOperation(Request request, int shardId) throws ElasticsearchException;
+
+ protected abstract Request newRequest();
+
+ protected abstract Response newResponse();
+
+ protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
+
+ protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request);
+
+ private class AsyncSingleAction {
+
+ private final ActionListener<Response> listener;
+
+ private final ShardsIterator shardsIt;
+
+ private final Request request;
+
+ private final DiscoveryNodes nodes;
+
+ private AsyncSingleAction(Request request, ActionListener<Response> listener) {
+ this.request = request;
+ this.listener = listener;
+
+ ClusterState clusterState = clusterService.state();
+ nodes = clusterState.nodes();
+ ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
+ if (blockException != null) {
+ throw blockException;
+ }
+ blockException = checkRequestBlock(clusterState, request);
+ if (blockException != null) {
+ throw blockException;
+ }
+ this.shardsIt = shards(clusterState, request);
+ }
+
+ public void start() {
+ performFirst();
+ }
+
+ private void onFailure(ShardRouting shardRouting, Throwable e) {
+ if (logger.isTraceEnabled() && e != null) {
+ logger.trace(shardRouting.shortSummary() + ": Failed to execute [" + request + "]", e);
+ }
+ perform(e);
+ }
+
+ /**
+ * First get should try and use a shard that exists on a local node for better performance
+ */
+ private void performFirst() {
+ if (shardsIt == null) {
+ // just execute it on the local node
+ if (request.operationThreaded()) {
+ request.beforeLocalFork();
+ threadPool.executor(executor()).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Response response = shardOperation(request, -1);
+ listener.onResponse(response);
+ } catch (Throwable e) {
+ onFailure(null, e);
+ }
+ }
+ });
+ return;
+ } else {
+ try {
+ final Response response = shardOperation(request, -1);
+ listener.onResponse(response);
+ return;
+ } catch (Throwable e) {
+ onFailure(null, e);
+ }
+ }
+ return;
+ }
+
+ if (request.preferLocalShard()) {
+ boolean foundLocal = false;
+ ShardRouting shardX;
+ while ((shardX = shardsIt.nextOrNull()) != null) {
+ final ShardRouting shard = shardX;
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ foundLocal = true;
+ if (request.operationThreaded()) {
+ request.beforeLocalFork();
+ threadPool.executor(executor()).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Response response = shardOperation(request, shard.id());
+ listener.onResponse(response);
+ } catch (Throwable e) {
+ shardsIt.reset();
+ onFailure(shard, e);
+ }
+ }
+ });
+ return;
+ } else {
+ try {
+ final Response response = shardOperation(request, shard.id());
+ listener.onResponse(response);
+ return;
+ } catch (Throwable e) {
+ shardsIt.reset();
+ onFailure(shard, e);
+ }
+ }
+ }
+ }
+ if (!foundLocal) {
+ // no local node get, go remote
+ shardsIt.reset();
+ perform(null);
+ }
+ } else {
+ perform(null);
+ }
+ }
+
+ private void perform(final Throwable lastException) {
+ final ShardRouting shard = shardsIt == null ? null : shardsIt.nextOrNull();
+ if (shard == null) {
+ Throwable failure = lastException;
+ if (failure == null) {
+ failure = new NoShardAvailableActionException(null, "No shard available for [" + request + "]");
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to execute [" + request + "]", failure);
+ }
+ }
+ listener.onFailure(failure);
+ } else {
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ // we don't prefer local shard, so try and do it here
+ if (!request.preferLocalShard()) {
+ try {
+ if (request.operationThreaded()) {
+ request.beforeLocalFork();
+ threadPool.executor(executor).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Response response = shardOperation(request, shard.id());
+ listener.onResponse(response);
+ } catch (Throwable e) {
+ onFailure(shard, e);
+ }
+ }
+ });
+ } else {
+ final Response response = shardOperation(request, shard.id());
+ listener.onResponse(response);
+ }
+ } catch (Throwable e) {
+ onFailure(shard, e);
+ }
+ } else {
+ perform(lastException);
+ }
+ } else {
+ DiscoveryNode node = nodes.get(shard.currentNodeId());
+ transportService.sendRequest(node, transportShardAction, new ShardSingleOperationRequest(request, shard.id()), new BaseTransportResponseHandler<Response>() {
+ @Override
+ public Response newInstance() {
+ return newResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(final Response response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ onFailure(shard, exp);
+ }
+ });
+ }
+ }
+ }
+ }
+
+ private class TransportHandler extends BaseTransportRequestHandler<Request> {
+
+ @Override
+ public Request newInstance() {
+ return newRequest();
+ }
+
+ @Override
+ public void messageReceived(Request request, final TransportChannel channel) throws Exception {
+ // no need to have a threaded listener since we just send back a response
+ request.listenerThreaded(false);
+ // if we have a local operation, execute it on a thread since we don't spawn
+ request.operationThreaded(true);
+ execute(request, new ActionListener<Response>() {
+ @Override
+ public void onResponse(Response result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send response for get", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ private class ShardTransportHandler extends BaseTransportRequestHandler<ShardSingleOperationRequest> {
+
+ @Override
+ public ShardSingleOperationRequest newInstance() {
+ return new ShardSingleOperationRequest();
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+
+ @Override
+ public void messageReceived(final ShardSingleOperationRequest request, final TransportChannel channel) throws Exception {
+ Response response = shardOperation(request.request(), request.shardId());
+ channel.sendResponse(response);
+ }
+ }
+
+ protected class ShardSingleOperationRequest extends TransportRequest {
+
+ private Request request;
+ private int shardId;
+
+ ShardSingleOperationRequest() {
+ }
+
+ public ShardSingleOperationRequest(Request request, int shardId) {
+ super(request);
+ this.request = request;
+ this.shardId = shardId;
+ }
+
+ public Request request() {
+ return request;
+ }
+
+ public int shardId() {
+ return shardId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ request = newRequest();
+ request.readFrom(in);
+ shardId = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ request.writeTo(out);
+ out.writeVInt(shardId);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java
new file mode 100644
index 0000000..a8dbaf4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.single.instance;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ValidateActions;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public abstract class InstanceShardOperationRequest<T extends InstanceShardOperationRequest> extends ActionRequest<T> {
+
+ public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
+
+ protected TimeValue timeout = DEFAULT_TIMEOUT;
+
+ protected String index;
+ // -1 means its not set, allows to explicitly direct a request to a specific shard
+ protected int shardId = -1;
+
+ protected InstanceShardOperationRequest() {
+ }
+
+ public InstanceShardOperationRequest(String index) {
+ this.index = index;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (index == null) {
+ validationException = ValidateActions.addValidationError("index is missing", validationException);
+ }
+ return validationException;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final T index(String index) {
+ this.index = index;
+ return (T) this;
+ }
+
+ public TimeValue timeout() {
+ return timeout;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public final T timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return (T) this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ public final T timeout(String timeout) {
+ return timeout(TimeValue.parseTimeValue(timeout, null));
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ shardId = in.readInt();
+ timeout = TimeValue.readTimeValue(in);
+ // no need to pass threading over the network, they are always false when coming throw a thread pool
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeInt(shardId);
+ timeout.writeTo(out);
+ }
+
+ public void beforeLocalFork() {
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java
new file mode 100644
index 0000000..f2c2822
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.single.instance;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.client.internal.InternalGenericClient;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ */
+public abstract class InstanceShardOperationRequestBuilder<Request extends InstanceShardOperationRequest<Request>, Response extends ActionResponse, RequestBuilder extends InstanceShardOperationRequestBuilder<Request, Response, RequestBuilder>>
+ extends ActionRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected InstanceShardOperationRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setIndex(String index) {
+ request.index(index);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setTimeout(TimeValue timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setTimeout(String timeout) {
+ request.timeout(timeout);
+ return (RequestBuilder) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java
new file mode 100644
index 0000000..8da89b2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java
@@ -0,0 +1,336 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.single.instance;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.UnavailableShardsException;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.TimeoutClusterStateListener;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.node.NodeClosedException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ *
+ */
+public abstract class TransportInstanceSingleOperationAction<Request extends InstanceShardOperationRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
+
+ protected final ClusterService clusterService;
+
+ protected final TransportService transportService;
+
+ final String transportAction;
+ final String executor;
+
+ protected TransportInstanceSingleOperationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+
+ this.transportAction = transportAction();
+ this.executor = executor();
+
+ transportService.registerHandler(transportAction, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(Request request, ActionListener<Response> listener) {
+ new AsyncSingleAction(request, listener).start();
+ }
+
+ protected abstract String executor();
+
+ protected abstract String transportAction();
+
+ protected abstract void shardOperation(Request request, ActionListener<Response> listener) throws ElasticsearchException;
+
+ protected abstract Request newRequest();
+
+ protected abstract Response newResponse();
+
+ protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
+
+ protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request);
+
+ /**
+ * Resolves the request, by default, simply setting the concrete index (if its aliased one). If the resolve
+ * means a different execution, then return false here to indicate not to continue and execute this request.
+ */
+ protected boolean resolveRequest(ClusterState state, Request request, ActionListener<Response> listener) {
+ request.index(state.metaData().concreteIndex(request.index()));
+ return true;
+ }
+
+ protected boolean retryOnFailure(Throwable e) {
+ return false;
+ }
+
+ protected TransportRequestOptions transportOptions() {
+ return TransportRequestOptions.EMPTY;
+ }
+
+ /**
+ * Should return an iterator with a single shard!
+ */
+ protected abstract ShardIterator shards(ClusterState clusterState, Request request) throws ElasticsearchException;
+
+ class AsyncSingleAction {
+
+ private final ActionListener<Response> listener;
+
+ private final Request request;
+
+ private ShardIterator shardIt;
+
+ private DiscoveryNodes nodes;
+
+ private final AtomicBoolean operationStarted = new AtomicBoolean();
+
+ private AsyncSingleAction(Request request, ActionListener<Response> listener) {
+ this.request = request;
+ this.listener = listener;
+ }
+
+ public void start() {
+ start(false);
+ }
+
+ public boolean start(final boolean fromClusterEvent) throws ElasticsearchException {
+ final ClusterState clusterState = clusterService.state();
+ nodes = clusterState.nodes();
+ try {
+ ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
+ if (blockException != null) {
+ if (blockException.retryable()) {
+ retry(fromClusterEvent, blockException);
+ return false;
+ } else {
+ throw blockException;
+ }
+ }
+ // check if we need to execute, and if not, return
+ if (!resolveRequest(clusterState, request, listener)) {
+ return true;
+ }
+ blockException = checkRequestBlock(clusterState, request);
+ if (blockException != null) {
+ if (blockException.retryable()) {
+ retry(fromClusterEvent, blockException);
+ return false;
+ } else {
+ throw blockException;
+ }
+ }
+ shardIt = shards(clusterState, request);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ return true;
+ }
+
+ // no shardIt, might be in the case between index gateway recovery and shardIt initialization
+ if (shardIt.size() == 0) {
+ retry(fromClusterEvent, null);
+ return false;
+ }
+
+ // this transport only make sense with an iterator that returns a single shard routing (like primary)
+ assert shardIt.size() == 1;
+
+ ShardRouting shard = shardIt.nextOrNull();
+ assert shard != null;
+
+ if (!shard.active()) {
+ retry(fromClusterEvent, null);
+ return false;
+ }
+
+ if (!operationStarted.compareAndSet(false, true)) {
+ return true;
+ }
+
+ request.shardId = shardIt.shardId().id();
+ if (shard.currentNodeId().equals(nodes.localNodeId())) {
+ request.beforeLocalFork();
+ try {
+ threadPool.executor(executor).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ shardOperation(request, listener);
+ } catch (Throwable e) {
+ if (retryOnFailure(e)) {
+ operationStarted.set(false);
+ // we already marked it as started when we executed it (removed the listener) so pass false
+ // to re-add to the cluster listener
+ retry(false, null);
+ } else {
+ listener.onFailure(e);
+ }
+ }
+ }
+ });
+ } catch (Throwable e) {
+ if (retryOnFailure(e)) {
+ retry(fromClusterEvent, null);
+ } else {
+ listener.onFailure(e);
+ }
+ }
+ } else {
+ DiscoveryNode node = nodes.get(shard.currentNodeId());
+ transportService.sendRequest(node, transportAction, request, transportOptions(), new BaseTransportResponseHandler<Response>() {
+
+ @Override
+ public Response newInstance() {
+ return newResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(Response response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ // if we got disconnected from the node, or the node / shard is not in the right state (being closed)
+ if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException ||
+ retryOnFailure(exp)) {
+ operationStarted.set(false);
+ // we already marked it as started when we executed it (removed the listener) so pass false
+ // to re-add to the cluster listener
+ retry(false, null);
+ } else {
+ listener.onFailure(exp);
+ }
+ }
+ });
+ }
+ return true;
+ }
+
+ void retry(final boolean fromClusterEvent, final @Nullable Throwable failure) {
+ if (!fromClusterEvent) {
+ // make it threaded operation so we fork on the discovery listener thread
+ request.beforeLocalFork();
+ clusterService.add(request.timeout(), new TimeoutClusterStateListener() {
+ @Override
+ public void postAdded() {
+ if (start(true)) {
+ // if we managed to start and perform the operation on the primary, we can remove this listener
+ clusterService.remove(this);
+ }
+ }
+
+ @Override
+ public void onClose() {
+ clusterService.remove(this);
+ listener.onFailure(new NodeClosedException(nodes.localNode()));
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (start(true)) {
+ // if we managed to start and perform the operation on the primary, we can remove this listener
+ clusterService.remove(this);
+ }
+ }
+
+ @Override
+ public void onTimeout(TimeValue timeValue) {
+ // just to be on the safe side, see if we can start it now?
+ if (start(true)) {
+ clusterService.remove(this);
+ return;
+ }
+ clusterService.remove(this);
+ Throwable listenFailure = failure;
+ if (listenFailure == null) {
+ if (shardIt == null) {
+ listenFailure = new UnavailableShardsException(new ShardId(request.index(), -1), "Timeout waiting for [" + timeValue + "], request: " + request.toString());
+ } else {
+ listenFailure = new UnavailableShardsException(shardIt.shardId(), "[" + shardIt.size() + "] shardIt, [" + shardIt.sizeActive() + "] active : Timeout waiting for [" + timeValue + "], request: " + request.toString());
+ }
+ }
+ listener.onFailure(listenFailure);
+ }
+ });
+ }
+ }
+ }
+
+ class TransportHandler extends BaseTransportRequestHandler<Request> {
+
+ @Override
+ public Request newInstance() {
+ return newRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(Request request, final TransportChannel channel) throws Exception {
+ // no need to have a threaded listener since we just send back a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<Response>() {
+ @Override
+ public void onResponse(Response result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("Failed to send response for get", e1);
+ }
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java
new file mode 100644
index 0000000..60fbeb2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.single.shard;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ValidateActions;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class SingleShardOperationRequest<T extends SingleShardOperationRequest> extends ActionRequest<T> {
+
+ protected String index;
+
+ private boolean threadedOperation = true;
+
+ protected SingleShardOperationRequest() {
+ }
+
+ public SingleShardOperationRequest(String index) {
+ this.index = index;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (index == null) {
+ validationException = ValidateActions.addValidationError("index is missing", validationException);
+ }
+ return validationException;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ /**
+ * Sets the index.
+ */
+ @SuppressWarnings("unchecked")
+ public final T index(String index) {
+ this.index = index;
+ return (T) this;
+ }
+
+ /**
+ * Controls if the operation will be executed on a separate thread when executed locally.
+ */
+ public boolean operationThreaded() {
+ return threadedOperation;
+ }
+
+ /**
+ * Controls if the operation will be executed on a separate thread when executed locally.
+ */
+ @SuppressWarnings("unchecked")
+ public final T operationThreaded(boolean threadedOperation) {
+ this.threadedOperation = threadedOperation;
+ return (T) this;
+ }
+
+ protected void beforeLocalFork() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ // no need to pass threading over the network, they are always false when coming throw a thread pool
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ }
+
+}
+
diff --git a/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequestBuilder.java
new file mode 100644
index 0000000..86a98a2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequestBuilder.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.single.shard;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.client.internal.InternalGenericClient;
+
+/**
+ */
+public abstract class SingleShardOperationRequestBuilder<Request extends SingleShardOperationRequest<Request>, Response extends ActionResponse, RequestBuilder extends SingleShardOperationRequestBuilder<Request, Response, RequestBuilder>>
+ extends ActionRequestBuilder<Request, Response, RequestBuilder> {
+
+ protected SingleShardOperationRequestBuilder(InternalGenericClient client, Request request) {
+ super(client, request);
+ }
+
+ /**
+ * Sets the index.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setIndex(String index) {
+ request.index(index);
+ return (RequestBuilder) this;
+ }
+
+ /**
+ * Controls if the operation will be executed on a separate thread when executed locally.
+ */
+ @SuppressWarnings("unchecked")
+ public final RequestBuilder setOperationThreaded(boolean threadedOperation) {
+ request.operationThreaded(threadedOperation);
+ return (RequestBuilder) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java b/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java
new file mode 100644
index 0000000..fbb8c1e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.single.shard;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.NoShardAvailableActionException;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.action.support.TransportActions;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException;
+
+/**
+ * A base class for single shard read operations.
+ */
+public abstract class TransportShardSingleOperationAction<Request extends SingleShardOperationRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
+
+ protected final ClusterService clusterService;
+
+ protected final TransportService transportService;
+
+ final String transportAction;
+ final String transportShardAction;
+ final String executor;
+
+ protected TransportShardSingleOperationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+
+ this.transportAction = transportAction();
+ this.transportShardAction = transportAction() + "/s";
+ this.executor = executor();
+
+ transportService.registerHandler(transportAction, new TransportHandler());
+ transportService.registerHandler(transportShardAction, new ShardTransportHandler());
+ }
+
+ @Override
+ protected void doExecute(Request request, ActionListener<Response> listener) {
+ new AsyncSingleAction(request, listener).start();
+ }
+
+ protected abstract String transportAction();
+
+ protected abstract String executor();
+
+ protected abstract Response shardOperation(Request request, int shardId) throws ElasticsearchException;
+
+ protected abstract Request newRequest();
+
+ protected abstract Response newResponse();
+
+ protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
+
+ protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request);
+
+ protected void resolveRequest(ClusterState state, Request request) {
+ request.index(state.metaData().concreteIndex(request.index()));
+ }
+
+ protected abstract ShardIterator shards(ClusterState state, Request request) throws ElasticsearchException;
+
+ class AsyncSingleAction {
+
+ private final ActionListener<Response> listener;
+ private final ShardIterator shardIt;
+ private final Request request;
+ private final DiscoveryNodes nodes;
+ private volatile Throwable lastFailure;
+
+ private AsyncSingleAction(Request request, ActionListener<Response> listener) {
+ this.request = request;
+ this.listener = listener;
+
+ ClusterState clusterState = clusterService.state();
+ nodes = clusterState.nodes();
+ ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
+ if (blockException != null) {
+ throw blockException;
+ }
+ resolveRequest(clusterState, request);
+ blockException = checkRequestBlock(clusterState, request);
+ if (blockException != null) {
+ throw blockException;
+ }
+
+ this.shardIt = shards(clusterState, request);
+ }
+
+ public void start() {
+ perform(null);
+ }
+
+ private void onFailure(ShardRouting shardRouting, Throwable e) {
+ if (logger.isTraceEnabled() && e != null) {
+ logger.trace("{}: failed to execute [{}]", e, shardRouting, request);
+ }
+ perform(e);
+ }
+
+ private void perform(@Nullable final Throwable currentFailure) {
+ Throwable lastFailure = this.lastFailure;
+ if (lastFailure == null || TransportActions.isReadOverrideException(currentFailure)) {
+ lastFailure = currentFailure;
+ this.lastFailure = currentFailure;
+ }
+ final ShardRouting shardRouting = shardIt.nextOrNull();
+ if (shardRouting == null) {
+ Throwable failure = lastFailure;
+ if (failure == null || isShardNotAvailableException(failure)) {
+ failure = new NoShardAvailableActionException(shardIt.shardId());
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("{}: failed to execute [{}]", failure, shardIt.shardId(), request);
+ }
+ }
+ listener.onFailure(failure);
+ return;
+ }
+
+ if (shardRouting.currentNodeId().equals(nodes.localNodeId())) {
+ try {
+ if (request.operationThreaded()) {
+ request.beforeLocalFork();
+ threadPool.executor(executor).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Response response = shardOperation(request, shardRouting.id());
+ listener.onResponse(response);
+ } catch (Throwable e) {
+ onFailure(shardRouting, e);
+ }
+ }
+ });
+ } else {
+ final Response response = shardOperation(request, shardRouting.id());
+ listener.onResponse(response);
+ }
+ } catch (Throwable e) {
+ onFailure(shardRouting, e);
+ }
+ } else {
+ DiscoveryNode node = nodes.get(shardRouting.currentNodeId());
+ if (node == null) {
+ onFailure(shardRouting, new NoShardAvailableActionException(shardIt.shardId()));
+ } else {
+ transportService.sendRequest(node, transportShardAction, new ShardSingleOperationRequest(request, shardRouting.id()), new BaseTransportResponseHandler<Response>() {
+
+ @Override
+ public Response newInstance() {
+ return newResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(final Response response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ onFailure(shardRouting, exp);
+ }
+ });
+ }
+ }
+ }
+ }
+
+ private class TransportHandler extends BaseTransportRequestHandler<Request> {
+
+ @Override
+ public Request newInstance() {
+ return newRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(Request request, final TransportChannel channel) throws Exception {
+ // no need to have a threaded listener since we just send back a response
+ request.listenerThreaded(false);
+ // if we have a local operation, execute it on a thread since we don't spawn
+ request.operationThreaded(true);
+ execute(request, new ActionListener<Response>() {
+ @Override
+ public void onResponse(Response result) {
+ try {
+ channel.sendResponse(result);
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Exception e1) {
+ logger.warn("failed to send response for get", e1);
+ }
+ }
+ });
+ }
+ }
+
+ private class ShardTransportHandler extends BaseTransportRequestHandler<ShardSingleOperationRequest> {
+
+ @Override
+ public ShardSingleOperationRequest newInstance() {
+ return new ShardSingleOperationRequest();
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+
+ @Override
+ public void messageReceived(final ShardSingleOperationRequest request, final TransportChannel channel) throws Exception {
+ Response response = shardOperation(request.request(), request.shardId());
+ channel.sendResponse(response);
+ }
+ }
+
+ class ShardSingleOperationRequest extends TransportRequest {
+
+ private Request request;
+
+ private int shardId;
+
+ ShardSingleOperationRequest() {
+ }
+
+ public ShardSingleOperationRequest(Request request, int shardId) {
+ super(request);
+ this.request = request;
+ this.shardId = shardId;
+ }
+
+ public Request request() {
+ return request;
+ }
+
+ public int shardId() {
+ return shardId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ request = newRequest();
+ request.readFrom(in);
+ shardId = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ request.writeTo(out);
+ out.writeVInt(shardId);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsAction.java b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsAction.java
new file mode 100644
index 0000000..a6de3d4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class MultiTermVectorsAction extends Action<MultiTermVectorsRequest, MultiTermVectorsResponse, MultiTermVectorsRequestBuilder> {
+
+ public static final MultiTermVectorsAction INSTANCE = new MultiTermVectorsAction();
+ public static final String NAME = "mtv";
+
+ private MultiTermVectorsAction() {
+ super(NAME);
+ }
+
+ @Override
+ public MultiTermVectorsResponse newResponse() {
+ return new MultiTermVectorsResponse();
+ }
+
+ @Override
+ public MultiTermVectorsRequestBuilder newRequestBuilder(Client client) {
+ return new MultiTermVectorsRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsItemResponse.java b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsItemResponse.java
new file mode 100644
index 0000000..1dd6cca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsItemResponse.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+/**
+ * A single multi get response.
+ */
+public class MultiTermVectorsItemResponse implements Streamable {
+
+ private TermVectorResponse response;
+ private MultiTermVectorsResponse.Failure failure;
+
+ MultiTermVectorsItemResponse() {
+
+ }
+
+ public MultiTermVectorsItemResponse(TermVectorResponse response, MultiTermVectorsResponse.Failure failure) {
+ assert (((response == null) && (failure != null)) || ((response != null) && (failure == null)));
+ this.response = response;
+ this.failure = failure;
+ }
+
+ /**
+ * The index name of the document.
+ */
+ public String getIndex() {
+ if (failure != null) {
+ return failure.getIndex();
+ }
+ return response.getIndex();
+ }
+
+ /**
+ * The type of the document.
+ */
+ public String getType() {
+ if (failure != null) {
+ return failure.getType();
+ }
+ return response.getType();
+ }
+
+ /**
+ * The id of the document.
+ */
+ public String getId() {
+ if (failure != null) {
+ return failure.getId();
+ }
+ return response.getId();
+ }
+
+ /**
+ * Is this a failed execution?
+ */
+ public boolean isFailed() {
+ return failure != null;
+ }
+
+ /**
+ * The actual get response, <tt>null</tt> if its a failure.
+ */
+ public TermVectorResponse getResponse() {
+ return this.response;
+ }
+
+ /**
+ * The failure if relevant.
+ */
+ public MultiTermVectorsResponse.Failure getFailure() {
+ return this.failure;
+ }
+
+ public static MultiTermVectorsItemResponse readItemResponse(StreamInput in) throws IOException {
+ MultiTermVectorsItemResponse response = new MultiTermVectorsItemResponse();
+ response.readFrom(in);
+ return response;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ failure = MultiTermVectorsResponse.Failure.readFailure(in);
+ } else {
+ response = new TermVectorResponse();
+ response.readFrom(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (failure != null) {
+ out.writeBoolean(true);
+ failure.writeTo(out);
+ } else {
+ out.writeBoolean(false);
+ response.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsRequest.java b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsRequest.java
new file mode 100644
index 0000000..c6af5fa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsRequest.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ValidateActions;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.rest.RestRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+public class MultiTermVectorsRequest extends ActionRequest<MultiTermVectorsRequest> {
+
+ String preference;
+ List<TermVectorRequest> requests = new ArrayList<TermVectorRequest>();
+
+ final Set<String> ids = new HashSet<String>();
+
+ public MultiTermVectorsRequest add(TermVectorRequest termVectorRequest) {
+ requests.add(termVectorRequest);
+ return this;
+ }
+
+ public MultiTermVectorsRequest add(String index, @Nullable String type, String id) {
+ requests.add(new TermVectorRequest(index, type, id));
+ return this;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (requests.isEmpty()) {
+ validationException = ValidateActions.addValidationError("multi term vectors: no documents requested", validationException);
+ } else {
+ for (int i = 0; i < requests.size(); i++) {
+ TermVectorRequest termVectorRequest = requests.get(i);
+ ActionRequestValidationException validationExceptionForDoc = termVectorRequest.validate();
+ if (validationExceptionForDoc != null) {
+ validationException = ValidateActions.addValidationError("at multi term vectors for doc " + i,
+ validationExceptionForDoc);
+ }
+ }
+ }
+ return validationException;
+ }
+
+ public void add(TermVectorRequest template, BytesReference data)
+ throws Exception {
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ if (data.length() > 0) {
+ XContentParser parser = XContentFactory.xContent(data).createParser(data);
+ try {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+
+ if ("docs".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchIllegalArgumentException("docs array element should include an object");
+ }
+ TermVectorRequest termVectorRequest = new TermVectorRequest(template);
+ TermVectorRequest.parseRequest(termVectorRequest, parser);
+ add(termVectorRequest);
+ }
+ } else if ("ids".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (!token.isValue()) {
+ throw new ElasticsearchIllegalArgumentException("ids array element should only contain ids");
+ }
+ ids.add(parser.text());
+ }
+ } else {
+ throw new ElasticsearchParseException(
+ "No parameter named " + currentFieldName + "and type ARRAY");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
+ if ("parameters".equals(currentFieldName)) {
+ TermVectorRequest.parseRequest(template, parser);
+ } else {
+ throw new ElasticsearchParseException(
+ "No parameter named " + currentFieldName + "and type OBJECT");
+ }
+ } else if (currentFieldName != null) {
+ throw new ElasticsearchParseException("_mtermvectors: Parameter " + currentFieldName + "not supported");
+ }
+ }
+ }
+ finally {
+ parser.close();
+ }
+ }
+ for (String id : ids) {
+ TermVectorRequest curRequest = new TermVectorRequest(template);
+ curRequest.id(id);
+ requests.add(curRequest);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ preference = in.readOptionalString();
+ int size = in.readVInt();
+ requests = new ArrayList<TermVectorRequest>(size);
+ for (int i = 0; i < size; i++) {
+ requests.add(TermVectorRequest.readTermVectorRequest(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalString(preference);
+ out.writeVInt(requests.size());
+ for (TermVectorRequest termVectorRequest : requests) {
+ termVectorRequest.writeTo(out);
+ }
+ }
+
+ public void ids(String[] ids) {
+ for (String id : ids) {
+ this.ids.add(id.replaceAll("\\s", ""));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsRequestBuilder.java b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsRequestBuilder.java
new file mode 100644
index 0000000..877283c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsRequestBuilder.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+
+public class MultiTermVectorsRequestBuilder extends ActionRequestBuilder<MultiTermVectorsRequest, MultiTermVectorsResponse, MultiTermVectorsRequestBuilder> {
+ public MultiTermVectorsRequestBuilder(Client client) {
+ super((InternalClient) client, new MultiTermVectorsRequest());
+ }
+
+ public MultiTermVectorsRequestBuilder add(String index, @Nullable String type, Iterable<String> ids) {
+ for (String id : ids) {
+ request.add(index, type, id);
+ }
+ return this;
+ }
+
+ public MultiTermVectorsRequestBuilder add(String index, @Nullable String type, String... ids) {
+ for (String id : ids) {
+ request.add(index, type, id);
+ }
+ return this;
+ }
+
+ public MultiTermVectorsRequestBuilder add(TermVectorRequest termVectorRequest) {
+ request.add(termVectorRequest);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<MultiTermVectorsResponse> listener) {
+ ((Client) client).multiTermVectors(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsResponse.java b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsResponse.java
new file mode 100644
index 0000000..e4814d9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsResponse.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+public class MultiTermVectorsResponse extends ActionResponse implements Iterable<MultiTermVectorsItemResponse>, ToXContent {
+
+ /**
+ * Represents a failure.
+ */
+ public static class Failure implements Streamable {
+ private String index;
+ private String type;
+ private String id;
+ private String message;
+
+ Failure() {
+
+ }
+
+ public Failure(String index, String type, String id, String message) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ this.message = message;
+ }
+
+ /**
+ * The index name of the action.
+ */
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * The type of the action.
+ */
+ public String getType() {
+ return type;
+ }
+
+ /**
+ * The id of the action.
+ */
+ public String getId() {
+ return id;
+ }
+
+ /**
+ * The failure message.
+ */
+ public String getMessage() {
+ return this.message;
+ }
+
+ public static Failure readFailure(StreamInput in) throws IOException {
+ Failure failure = new Failure();
+ failure.readFrom(in);
+ return failure;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ index = in.readString();
+ type = in.readOptionalString();
+ id = in.readString();
+ message = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(index);
+ out.writeOptionalString(type);
+ out.writeString(id);
+ out.writeString(message);
+ }
+ }
+
+ private MultiTermVectorsItemResponse[] responses;
+
+ MultiTermVectorsResponse() {
+ }
+
+ public MultiTermVectorsResponse(MultiTermVectorsItemResponse[] responses) {
+ this.responses = responses;
+ }
+
+ public MultiTermVectorsItemResponse[] getResponses() {
+ return this.responses;
+ }
+
+ @Override
+ public Iterator<MultiTermVectorsItemResponse> iterator() {
+ return Iterators.forArray(responses);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.startArray(Fields.DOCS);
+ for (MultiTermVectorsItemResponse response : responses) {
+ if (response.isFailed()) {
+ builder.startObject();
+ Failure failure = response.getFailure();
+ builder.field(Fields._INDEX, failure.getIndex());
+ builder.field(Fields._TYPE, failure.getType());
+ builder.field(Fields._ID, failure.getId());
+ builder.field(Fields.ERROR, failure.getMessage());
+ builder.endObject();
+ } else {
+ TermVectorResponse getResponse = response.getResponse();
+ getResponse.toXContent(builder, params);
+ }
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString DOCS = new XContentBuilderString("docs");
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString ERROR = new XContentBuilderString("error");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ responses = new MultiTermVectorsItemResponse[in.readVInt()];
+ for (int i = 0; i < responses.length; i++) {
+ responses[i] = MultiTermVectorsItemResponse.readItemResponse(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(responses.length);
+ for (MultiTermVectorsItemResponse response : responses) {
+ response.writeTo(out);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsShardRequest.java b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsShardRequest.java
new file mode 100644
index 0000000..cf122cd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsShardRequest.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import com.carrotsearch.hppc.IntArrayList;
+import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class MultiTermVectorsShardRequest extends SingleShardOperationRequest<MultiTermVectorsShardRequest> {
+
+ private int shardId;
+ private String preference;
+
+ IntArrayList locations;
+ List<TermVectorRequest> requests;
+
+ MultiTermVectorsShardRequest() {
+
+ }
+
+ MultiTermVectorsShardRequest(String index, int shardId) {
+ super(index);
+ this.shardId = shardId;
+ locations = new IntArrayList();
+ requests = new ArrayList<TermVectorRequest>();
+ }
+
+ public int shardId() {
+ return this.shardId;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+ public MultiTermVectorsShardRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ public String preference() {
+ return this.preference;
+ }
+
+
+ public void add(int location, TermVectorRequest request) {
+ this.locations.add(location);
+ this.requests.add(request);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ locations = new IntArrayList(size);
+ requests = new ArrayList<TermVectorRequest>(size);
+ for (int i = 0; i < size; i++) {
+ locations.add(in.readVInt());
+ requests.add(TermVectorRequest.readTermVectorRequest(in));
+ }
+
+ preference = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(locations.size());
+ for (int i = 0; i < locations.size(); i++) {
+ out.writeVInt(locations.get(i));
+ requests.get(i).writeTo(out);
+ }
+
+ out.writeOptionalString(preference);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsShardResponse.java b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsShardResponse.java
new file mode 100644
index 0000000..446bb13
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsShardResponse.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import com.carrotsearch.hppc.IntArrayList;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class MultiTermVectorsShardResponse extends ActionResponse {
+
+ IntArrayList locations;
+ List<TermVectorResponse> responses;
+ List<MultiTermVectorsResponse.Failure> failures;
+
+ MultiTermVectorsShardResponse() {
+ locations = new IntArrayList();
+ responses = new ArrayList<TermVectorResponse>();
+ failures = new ArrayList<MultiTermVectorsResponse.Failure>();
+ }
+
+ public void add(int location, TermVectorResponse response) {
+ locations.add(location);
+ responses.add(response);
+ failures.add(null);
+ }
+
+ public void add(int location, MultiTermVectorsResponse.Failure failure) {
+ locations.add(location);
+ responses.add(null);
+ failures.add(failure);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ locations = new IntArrayList(size);
+ responses = new ArrayList<TermVectorResponse>(size);
+ failures = new ArrayList<MultiTermVectorsResponse.Failure>(size);
+ for (int i = 0; i < size; i++) {
+ locations.add(in.readVInt());
+ if (in.readBoolean()) {
+ TermVectorResponse response = new TermVectorResponse();
+ response.readFrom(in);
+ responses.add(response);
+ } else {
+ responses.add(null);
+ }
+ if (in.readBoolean()) {
+ failures.add(MultiTermVectorsResponse.Failure.readFailure(in));
+ } else {
+ failures.add(null);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(locations.size());
+ for (int i = 0; i < locations.size(); i++) {
+ out.writeVInt(locations.get(i));
+ if (responses.get(i) == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ responses.get(i).writeTo(out);
+ }
+ if (failures.get(i) == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ failures.get(i).writeTo(out);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/termvector/TermVectorAction.java b/src/main/java/org/elasticsearch/action/termvector/TermVectorAction.java
new file mode 100644
index 0000000..1f51c45
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/TermVectorAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.elasticsearch.action.Action;
+
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class TermVectorAction extends Action<TermVectorRequest, TermVectorResponse, TermVectorRequestBuilder> {
+
+ public static final TermVectorAction INSTANCE = new TermVectorAction();
+ public static final String NAME = "tv";
+
+ private TermVectorAction() {
+ super(NAME);
+ }
+
+ @Override
+ public TermVectorResponse newResponse() {
+ return new TermVectorResponse();
+ }
+
+ @Override
+ public TermVectorRequestBuilder newRequestBuilder(Client client) {
+ return new TermVectorRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/TermVectorFields.java b/src/main/java/org/elasticsearch/action/termvector/TermVectorFields.java
new file mode 100644
index 0000000..10c684f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/TermVectorFields.java
@@ -0,0 +1,484 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import com.carrotsearch.hppc.cursors.ObjectLongCursor;
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.Iterator;
+
+import static org.apache.lucene.util.ArrayUtil.grow;
+
+/**
+ * This class represents the result of a {@link TermVectorRequest}. It works
+ * exactly like the {@link Fields} class except for one thing: It can return
+ * offsets and payloads even if positions are not present. You must call
+ * nextPosition() anyway to move the counter although this method only returns
+ * <tt>-1,</tt>, if no positions were returned by the {@link TermVectorRequest}.
+ * <p/>
+ * The data is stored in two byte arrays ({@code headerRef} and
+ * {@code termVectors}, both {@link ByteRef}) that have the following format:
+ * <p/>
+ * {@code headerRef}: Stores offsets per field in the {@code termVectors} array
+ * and some header information as {@link BytesRef}. Format is
+ * <ul>
+ * <li>String : "TV"</li>
+ * <li>vint: version (=-1)</li>
+ * <li>boolean: hasTermStatistics (are the term statistics stored?)</li>
+ * <li>boolean: hasFieldStatitsics (are the field statistics stored?)</li>
+ * <li>vint: number of fields</li>
+ * <ul>
+ * <li>String: field name 1</li>
+ * <li>vint: offset in {@code termVectors} for field 1</li>
+ * <li>...</li>
+ * <li>String: field name last field</li>
+ * <li>vint: offset in {@code termVectors} for last field</li>
+ * </ul>
+ * </ul>
+ * <p/>
+ * termVectors: Stores the actual term vectors as a {@link BytesRef}.
+ * <p/>
+ * Term vectors for each fields are stored in blocks, one for each field. The
+ * offsets in {@code headerRef} are used to find where the block for a field
+ * starts. Each block begins with a
+ * <ul>
+ * <li>vint: number of terms</li>
+ * <li>boolean: positions (has it positions stored?)</li>
+ * <li>boolean: offsets (has it offsets stored?)</li>
+ * <li>boolean: payloads (has it payloads stored?)</li>
+ * </ul>
+ * If the field statistics were requested ({@code hasFieldStatistics} is true,
+ * see {@code headerRef}), the following numbers are stored:
+ * <ul>
+ * <li>vlong: sum of total term freqencies of the field (sumTotalTermFreq)</li>
+ * <li>vlong: sum of document frequencies for each term (sumDocFreq)</li>
+ * <li>vint: number of documents in the shard that has an entry for this field
+ * (docCount)</li>
+ * </ul>
+ * <p/>
+ * After that, for each term it stores
+ * <ul>
+ * <ul>
+ * <li>vint: term lengths</li>
+ * <li>BytesRef: term name</li>
+ * </ul>
+ * <p/>
+ * If term statistics are requested ({@code hasTermStatistics} is true, see
+ * {@code headerRef}):
+ * <ul>
+ * <li>vint: document frequency, how often does this term appear in documents?</li>
+ * <li>vlong: total term frequency. Sum of terms in this field.</li>
+ * </ul>
+ * After that
+ * <ul>
+ * <li>vint: frequency (always returned)</li>
+ * <ul>
+ * <li>vint: position_1 (if positions == true)</li>
+ * <li>vint: startOffset_1 (if offset == true)</li>
+ * <li>vint: endOffset_1 (if offset == true)</li>
+ * <li>BytesRef: payload_1 (if payloads == true)</li>
+ * <li>...</li>
+ * <li>vint: endOffset_freqency (if offset == true)</li>
+ * <li>BytesRef: payload_freqency (if payloads == true)</li>
+ * <ul>
+ * </ul> </ul>
+ */
+
+public final class TermVectorFields extends Fields {
+
+ private final ObjectLongOpenHashMap<String> fieldMap;
+ private final BytesReference termVectors;
+ final boolean hasTermStatistic;
+ final boolean hasFieldStatistic;
+
+ /**
+ * @param headerRef Stores offsets per field in the {@code termVectors} and some
+ * header information as {@link BytesRef}.
+ * @param termVectors Stores the actual term vectors as a {@link BytesRef}.
+ */
+ public TermVectorFields(BytesReference headerRef, BytesReference termVectors) throws IOException {
+ BytesStreamInput header = new BytesStreamInput(headerRef);
+ fieldMap = new ObjectLongOpenHashMap<String>();
+
+ // here we read the header to fill the field offset map
+ String headerString = header.readString();
+ assert headerString.equals("TV");
+ int version = header.readInt();
+ assert version == -1;
+ hasTermStatistic = header.readBoolean();
+ hasFieldStatistic = header.readBoolean();
+ final int numFields = header.readVInt();
+ for (int i = 0; i < numFields; i++) {
+ fieldMap.put((header.readString()), header.readVLong());
+ }
+ header.close();
+ // reference to the term vector data
+ this.termVectors = termVectors;
+ }
+
+ @Override
+ public Iterator<String> iterator() {
+ final Iterator<ObjectLongCursor<String>> iterator = fieldMap.iterator();
+ return new Iterator<String>() {
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public String next() {
+ return iterator.next().key;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ // first, find where in the termVectors bytes the actual term vector for
+ // this field is stored
+ if (!fieldMap.containsKey(field)) {
+ return null; // we don't have it.
+ }
+ long offset = fieldMap.lget();
+ final BytesStreamInput perFieldTermVectorInput = new BytesStreamInput(this.termVectors);
+ perFieldTermVectorInput.reset();
+ perFieldTermVectorInput.skip(offset);
+
+ // read how many terms....
+ final long numTerms = perFieldTermVectorInput.readVLong();
+ // ...if positions etc. were stored....
+ final boolean hasPositions = perFieldTermVectorInput.readBoolean();
+ final boolean hasOffsets = perFieldTermVectorInput.readBoolean();
+ final boolean hasPayloads = perFieldTermVectorInput.readBoolean();
+ // read the field statistics
+ final long sumTotalTermFreq = hasFieldStatistic ? readPotentiallyNegativeVLong(perFieldTermVectorInput) : -1;
+ final long sumDocFreq = hasFieldStatistic ? readPotentiallyNegativeVLong(perFieldTermVectorInput) : -1;
+ final int docCount = hasFieldStatistic ? readPotentiallyNegativeVInt(perFieldTermVectorInput) : -1;
+
+ return new Terms() {
+
+ @Override
+ public TermsEnum iterator(TermsEnum reuse) throws IOException {
+ // convert bytes ref for the terms to actual data
+ return new TermsEnum() {
+ int currentTerm = 0;
+ int freq = 0;
+ int docFreq = -1;
+ long totalTermFrequency = -1;
+ int[] positions = new int[1];
+ int[] startOffsets = new int[1];
+ int[] endOffsets = new int[1];
+ BytesRef[] payloads = new BytesRef[1];
+ final BytesRef spare = new BytesRef();
+
+ @Override
+ public BytesRef next() throws IOException {
+ if (currentTerm++ < numTerms) {
+ // term string. first the size...
+ int termVectorSize = perFieldTermVectorInput.readVInt();
+ spare.grow(termVectorSize);
+ // ...then the value.
+ perFieldTermVectorInput.readBytes(spare.bytes, 0, termVectorSize);
+ spare.length = termVectorSize;
+ if (hasTermStatistic) {
+ docFreq = readPotentiallyNegativeVInt(perFieldTermVectorInput);
+ totalTermFrequency = readPotentiallyNegativeVLong(perFieldTermVectorInput);
+
+ }
+
+ freq = readPotentiallyNegativeVInt(perFieldTermVectorInput);
+ // grow the arrays to read the values. this is just
+ // for performance reasons. Re-use memory instead of
+ // realloc.
+ growBuffers();
+ // finally, read the values into the arrays
+ // curentPosition etc. so that we can just iterate
+ // later
+ writeInfos(perFieldTermVectorInput);
+ return spare;
+
+ } else {
+ return null;
+ }
+
+ }
+
+ private void writeInfos(final BytesStreamInput input) throws IOException {
+ for (int i = 0; i < freq; i++) {
+ if (hasPositions) {
+ positions[i] = input.readVInt();
+ }
+ if (hasOffsets) {
+ startOffsets[i] = input.readVInt();
+ endOffsets[i] = input.readVInt();
+ }
+ if (hasPayloads) {
+ int payloadLength = input.readVInt();
+ if (payloads[i] == null) {
+ payloads[i] = new BytesRef(payloadLength);
+ } else {
+ payloads[i].grow(payloadLength);
+ }
+ input.readBytes(payloads[i].bytes, 0, payloadLength);
+ payloads[i].length = payloadLength;
+ payloads[i].offset = 0;
+ }
+ }
+ }
+
+ private void growBuffers() {
+
+ if (hasPositions) {
+ positions = grow(positions, freq);
+ }
+ if (hasOffsets) {
+ startOffsets = grow(startOffsets, freq);
+ endOffsets = grow(endOffsets, freq);
+ }
+ if (hasPayloads) {
+ if (payloads.length < freq) {
+ final BytesRef[] newArray = new BytesRef[ArrayUtil.oversize(freq, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+ System.arraycopy(payloads, 0, newArray, 0, payloads.length);
+ payloads = newArray;
+ }
+ }
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() {
+ return BytesRef.getUTF8SortedAsUnicodeComparator();
+ }
+
+ @Override
+ public SeekStatus seekCeil(BytesRef text) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void seekExact(long ord) throws IOException {
+ throw new UnsupportedOperationException("Seek is not supported");
+ }
+
+ @Override
+ public BytesRef term() throws IOException {
+ return spare;
+ }
+
+ @Override
+ public long ord() throws IOException {
+ throw new UnsupportedOperationException("ordinals are not supported");
+ }
+
+ @Override
+ public int docFreq() throws IOException {
+ return docFreq;
+ }
+
+ @Override
+ public long totalTermFreq() throws IOException {
+ return totalTermFrequency;
+ }
+
+ @Override
+ public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ return docsAndPositions(liveDocs, reuse instanceof DocsAndPositionsEnum ? (DocsAndPositionsEnum) reuse : null, 0);
+ }
+
+ @Override
+ public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+ final TermVectorsDocsAndPosEnum retVal = (reuse instanceof TermVectorsDocsAndPosEnum ? (TermVectorsDocsAndPosEnum) reuse
+ : new TermVectorsDocsAndPosEnum());
+ return retVal.reset(hasPositions ? positions : null, hasOffsets ? startOffsets : null, hasOffsets ? endOffsets
+ : null, hasPayloads ? payloads : null, freq);
+ }
+
+ };
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() {
+ return BytesRef.getUTF8SortedAsUnicodeComparator();
+ }
+
+ @Override
+ public long size() throws IOException {
+ return numTerms;
+ }
+
+ @Override
+ public long getSumTotalTermFreq() throws IOException {
+ return sumTotalTermFreq;
+ }
+
+ @Override
+ public long getSumDocFreq() throws IOException {
+ return sumDocFreq;
+ }
+
+ @Override
+ public int getDocCount() throws IOException {
+ return docCount;
+ }
+
+ @Override
+ public boolean hasFreqs() {
+ return true;
+ }
+
+ @Override
+ public boolean hasOffsets() {
+ return hasOffsets;
+ }
+
+ @Override
+ public boolean hasPositions() {
+ return hasPositions;
+ }
+
+ @Override
+ public boolean hasPayloads() {
+ return hasPayloads;
+ }
+
+ };
+ }
+
+ @Override
+ public int size() {
+ return fieldMap.size();
+ }
+
+ private final class TermVectorsDocsAndPosEnum extends DocsAndPositionsEnum {
+ private boolean hasPositions;
+ private boolean hasOffsets;
+ private boolean hasPayloads;
+ int curPos = -1;
+ int doc = -1;
+ private int freq;
+ private int[] startOffsets;
+ private int[] positions;
+ private BytesRef[] payloads;
+ private int[] endOffsets;
+
+ private DocsAndPositionsEnum reset(int[] positions, int[] startOffsets, int[] endOffsets, BytesRef[] payloads, int freq) {
+ curPos = -1;
+ doc = -1;
+ this.hasPositions = positions != null;
+ this.hasOffsets = startOffsets != null;
+ this.hasPayloads = payloads != null;
+ this.freq = freq;
+ this.startOffsets = startOffsets;
+ this.endOffsets = endOffsets;
+ this.payloads = payloads;
+ this.positions = positions;
+ return this;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return doc = (doc == -1 ? 0 : NO_MORE_DOCS);
+ }
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ while (nextDoc() < target && doc != NO_MORE_DOCS) {
+ }
+ return doc;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return freq;
+ }
+
+ // call nextPosition once before calling this one
+ // because else counter is not advanced
+ @Override
+ public int startOffset() throws IOException {
+ assert curPos < freq && curPos >= 0;
+ return hasOffsets ? startOffsets[curPos] : -1;
+
+ }
+
+ @Override
+ // can return -1 if posistions were not requested or
+ // stored but offsets were stored and requested
+ public int nextPosition() throws IOException {
+ assert curPos + 1 < freq;
+ ++curPos;
+ // this is kind of cheating but if you don't need positions
+ // we safe lots fo space on the wire
+ return hasPositions ? positions[curPos] : -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ assert curPos < freq && curPos >= 0;
+ return hasPayloads ? payloads[curPos] : null;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ assert curPos < freq && curPos >= 0;
+ return hasOffsets ? endOffsets[curPos] : -1;
+ }
+
+ @Override
+ public long cost() {
+ return 1;
+ }
+ }
+
+ // read a vInt. this is used if the integer might be negative. In this case,
+ // the writer writes a 0 for -1 or value +1 and accordingly we have to
+ // substract 1 again
+ // adds one to mock not existing term freq
+ int readPotentiallyNegativeVInt(BytesStreamInput stream) throws IOException {
+ return stream.readVInt() - 1;
+ }
+
+ // read a vLong. this is used if the integer might be negative. In this
+ // case, the writer writes a 0 for -1 or value +1 and accordingly we have to
+ // substract 1 again
+ // adds one to mock not existing term freq
+ long readPotentiallyNegativeVLong(BytesStreamInput stream) throws IOException {
+ return stream.readVLong() - 1;
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/termvector/TermVectorRequest.java b/src/main/java/org/elasticsearch/action/termvector/TermVectorRequest.java
new file mode 100644
index 0000000..81fba7b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/TermVectorRequest.java
@@ -0,0 +1,400 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ValidateActions;
+import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * Request returning the term vector (doc frequency, positions, offsets) for a
+ * document.
+ * <p/>
+ * Note, the {@link #index()}, {@link #type(String)} and {@link #id(String)} are
+ * required.
+ */
+public class TermVectorRequest extends SingleShardOperationRequest<TermVectorRequest> {
+
+ private String type;
+
+ private String id;
+
+ private String routing;
+
+ protected String preference;
+
+ // TODO: change to String[]
+ private Set<String> selectedFields;
+
+ private EnumSet<Flag> flagsEnum = EnumSet.of(Flag.Positions, Flag.Offsets, Flag.Payloads,
+ Flag.FieldStatistics);
+
+ public TermVectorRequest() {
+ }
+
+ /**
+ * Constructs a new term vector request for a document that will be fetch
+ * from the provided index. Use {@link #type(String)} and
+ * {@link #id(String)} to specify the document to load.
+ */
+ public TermVectorRequest(String index, String type, String id) {
+ super(index);
+ this.id = id;
+ this.type = type;
+ }
+
+ /**
+ * Constructs a new term vector request for a document that will be fetch
+ * from the provided index. Use {@link #type(String)} and
+ * {@link #id(String)} to specify the document to load.
+ */
+ public TermVectorRequest(TermVectorRequest other) {
+ super(other.index());
+ this.id = other.id();
+ this.type = other.type();
+ this.flagsEnum = other.getFlags().clone();
+ this.preference = other.preference();
+ this.routing = other.routing();
+ if (other.selectedFields != null) {
+ this.selectedFields = new HashSet<String>(other.selectedFields);
+ }
+ }
+
+ public EnumSet<Flag> getFlags() {
+ return flagsEnum;
+ }
+
+ /**
+ * Sets the type of document to get the term vector for.
+ */
+ public TermVectorRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * Returns the type of document to get the term vector for.
+ */
+ public String type() {
+ return type;
+ }
+
+ /**
+ * Returns the id of document the term vector is requested for.
+ */
+ public String id() {
+ return id;
+ }
+
+ /**
+ * Sets the id of document the term vector is requested for.
+ */
+ public TermVectorRequest id(String id) {
+ this.id = id;
+ return this;
+ }
+
+ /**
+ * @return The routing for this request.
+ */
+ public String routing() {
+ return routing;
+ }
+
+ public TermVectorRequest routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ /**
+ * Sets the parent id of this document. Will simply set the routing to this
+ * value, as it is only used for routing with delete requests.
+ */
+ public TermVectorRequest parent(String parent) {
+ if (routing == null) {
+ routing = parent;
+ }
+ return this;
+ }
+
+ public String preference() {
+ return this.preference;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across
+ * shards. Can be set to <tt>_local</tt> to prefer local shards,
+ * <tt>_primary</tt> to execute only on primary shards, or a custom value,
+ * which guarantees that the same order will be used across different
+ * requests.
+ */
+ public TermVectorRequest preference(String preference) {
+ this.preference = preference;
+ return this;
+ }
+
+ /**
+ * Return the start and stop offsets for each term if they were stored or
+ * skip offsets.
+ */
+ public TermVectorRequest offsets(boolean offsets) {
+ setFlag(Flag.Offsets, offsets);
+ return this;
+ }
+
+ /**
+ * @returns <code>true</code> if term offsets should be returned. Otherwise
+ * <code>false</code>
+ */
+ public boolean offsets() {
+ return flagsEnum.contains(Flag.Offsets);
+ }
+
+ /**
+ * Return the positions for each term if stored or skip.
+ */
+ public TermVectorRequest positions(boolean positions) {
+ setFlag(Flag.Positions, positions);
+ return this;
+ }
+
+ /**
+ * @return Returns if the positions for each term should be returned if
+ * stored or skip.
+ */
+ public boolean positions() {
+ return flagsEnum.contains(Flag.Positions);
+ }
+
+ /**
+ * @returns <code>true</code> if term payloads should be returned. Otherwise
+ * <code>false</code>
+ */
+ public boolean payloads() {
+ return flagsEnum.contains(Flag.Payloads);
+ }
+
+ /**
+ * Return the payloads for each term or skip.
+ */
+ public TermVectorRequest payloads(boolean payloads) {
+ setFlag(Flag.Payloads, payloads);
+ return this;
+ }
+
+ /**
+ * @returns <code>true</code> if term statistics should be returned.
+ * Otherwise <code>false</code>
+ */
+ public boolean termStatistics() {
+ return flagsEnum.contains(Flag.TermStatistics);
+ }
+
+ /**
+ * Return the term statistics for each term in the shard or skip.
+ */
+ public TermVectorRequest termStatistics(boolean termStatistics) {
+ setFlag(Flag.TermStatistics, termStatistics);
+ return this;
+ }
+
+ /**
+ * @returns <code>true</code> if field statistics should be returned.
+ * Otherwise <code>false</code>
+ */
+ public boolean fieldStatistics() {
+ return flagsEnum.contains(Flag.FieldStatistics);
+ }
+
+ /**
+ * Return the field statistics for each term in the shard or skip.
+ */
+ public TermVectorRequest fieldStatistics(boolean fieldStatistics) {
+ setFlag(Flag.FieldStatistics, fieldStatistics);
+ return this;
+ }
+
+ /**
+ * Return only term vectors for special selected fields. Returns for term
+ * vectors for all fields if selectedFields == null
+ */
+ public Set<String> selectedFields() {
+ return selectedFields;
+ }
+
+ /**
+ * Return only term vectors for special selected fields. Returns the term
+ * vectors for all fields if selectedFields == null
+ */
+ public TermVectorRequest selectedFields(String[] fields) {
+ selectedFields = fields != null && fields.length != 0 ? Sets.newHashSet(fields) : null;
+ return this;
+ }
+
+ private void setFlag(Flag flag, boolean set) {
+ if (set && !flagsEnum.contains(flag)) {
+ flagsEnum.add(flag);
+ } else if (!set) {
+ flagsEnum.remove(flag);
+ assert (!flagsEnum.contains(flag));
+ }
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (index == null) {
+ validationException = ValidateActions.addValidationError("index is missing", validationException);
+ }
+ if (type == null) {
+ validationException = ValidateActions.addValidationError("type is missing", validationException);
+ }
+ if (id == null) {
+ validationException = ValidateActions.addValidationError("id is missing", validationException);
+ }
+ return validationException;
+ }
+
+ public static TermVectorRequest readTermVectorRequest(StreamInput in) throws IOException {
+ TermVectorRequest termVectorRequest = new TermVectorRequest();
+ termVectorRequest.readFrom(in);
+ return termVectorRequest;
+ }
+
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ type = in.readString();
+ id = in.readString();
+ routing = in.readOptionalString();
+ preference = in.readOptionalString();
+ long flags = in.readVLong();
+
+ flagsEnum.clear();
+ for (Flag flag : Flag.values()) {
+ if ((flags & (1 << flag.ordinal())) != 0) {
+ flagsEnum.add(flag);
+ }
+ }
+ int numSelectedFields = in.readVInt();
+ if (numSelectedFields > 0) {
+ selectedFields = new HashSet<String>();
+ for (int i = 0; i < numSelectedFields; i++) {
+ selectedFields.add(in.readString());
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeString(type);
+ out.writeString(id);
+ out.writeOptionalString(routing);
+ out.writeOptionalString(preference);
+ long longFlags = 0;
+ for (Flag flag : flagsEnum) {
+ longFlags |= (1 << flag.ordinal());
+ }
+ out.writeVLong(longFlags);
+ if (selectedFields != null) {
+ out.writeVInt(selectedFields.size());
+ for (String selectedField : selectedFields) {
+ out.writeString(selectedField);
+ }
+ } else {
+ out.writeVInt(0);
+ }
+
+ }
+
+ public static enum Flag {
+ // Do not change the order of these flags we use
+ // the ordinal for encoding! Only append to the end!
+ Positions, Offsets, Payloads, FieldStatistics, TermStatistics;
+ }
+
+ /**
+ * populates a request object (pre-populated with defaults) based on a parser.
+ *
+ * @param termVectorRequest
+ * @param parser
+ * @throws IOException
+ */
+ public static void parseRequest(TermVectorRequest termVectorRequest, XContentParser parser) throws IOException {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ List<String> fields = new ArrayList<String>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (currentFieldName != null) {
+ if (currentFieldName.equals("fields")) {
+
+ if (token == XContentParser.Token.START_ARRAY) {
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ fields.add(parser.text());
+ }
+ } else {
+ throw new ElasticsearchParseException(
+ "The parameter fields must be given as an array! Use syntax : \"fields\" : [\"field1\", \"field2\",...]");
+ }
+ } else if (currentFieldName.equals("offsets")) {
+ termVectorRequest.offsets(parser.booleanValue());
+ } else if (currentFieldName.equals("positions")) {
+ termVectorRequest.positions(parser.booleanValue());
+ } else if (currentFieldName.equals("payloads")) {
+ termVectorRequest.payloads(parser.booleanValue());
+ } else if (currentFieldName.equals("term_statistics") || currentFieldName.equals("termStatistics")) {
+ termVectorRequest.termStatistics(parser.booleanValue());
+ } else if (currentFieldName.equals("field_statistics") || currentFieldName.equals("fieldStatistics")) {
+ termVectorRequest.fieldStatistics(parser.booleanValue());
+ } else if ("_index".equals(currentFieldName)) { // the following is important for multi request parsing.
+ termVectorRequest.index = parser.text();
+ } else if ("_type".equals(currentFieldName)) {
+ termVectorRequest.type = parser.text();
+ } else if ("_id".equals(currentFieldName)) {
+ termVectorRequest.id = parser.text();
+ } else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
+ termVectorRequest.routing = parser.text();
+ } else {
+ throw new ElasticsearchParseException("The parameter " + currentFieldName
+ + " is not valid for term vector request!");
+ }
+ }
+ }
+
+ if (fields.size() > 0) {
+ String[] fieldsAsArray = new String[fields.size()];
+ termVectorRequest.selectedFields(fields.toArray(fieldsAsArray));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/TermVectorRequestBuilder.java b/src/main/java/org/elasticsearch/action/termvector/TermVectorRequestBuilder.java
new file mode 100644
index 0000000..1f8069a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/TermVectorRequestBuilder.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+
+/**
+ */
+public class TermVectorRequestBuilder extends ActionRequestBuilder<TermVectorRequest, TermVectorResponse, TermVectorRequestBuilder> {
+
+ public TermVectorRequestBuilder(Client client) {
+ super((InternalClient) client, new TermVectorRequest());
+ }
+
+ public TermVectorRequestBuilder(Client client, String index, String type, String id) {
+ super((InternalClient) client, new TermVectorRequest(index, type, id));
+ }
+
+ /**
+ * Sets the routing. Required if routing isn't id based.
+ */
+ public TermVectorRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ /**
+ * Sets the parent id of this document. Will simply set the routing to this value, as it is only
+ * used for routing with delete requests.
+ */
+ public TermVectorRequestBuilder setParent(String parent) {
+ request.parent(parent);
+ return this;
+ }
+
+ /**
+ * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
+ * <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
+ * a custom value, which guarantees that the same order will be used across different requests.
+ */
+
+ public TermVectorRequestBuilder setPreference(String preference) {
+ request.preference(preference);
+ return this;
+ }
+
+ public TermVectorRequestBuilder setOffsets(boolean offsets) {
+ request.offsets(offsets);
+ return this;
+ }
+
+ public TermVectorRequestBuilder setPositions(boolean positions) {
+ request.positions(positions);
+ return this;
+ }
+
+ public TermVectorRequestBuilder setPayloads(boolean payloads) {
+ request.payloads(payloads);
+ return this;
+ }
+
+ public TermVectorRequestBuilder setTermStatistics(boolean termStatistics) {
+ request.termStatistics(termStatistics);
+ return this;
+ }
+
+ public TermVectorRequestBuilder setFieldStatistics(boolean fieldStatistics) {
+ request.fieldStatistics(fieldStatistics);
+ return this;
+ }
+
+ public TermVectorRequestBuilder setSelectedFields(String... fields) {
+ request.selectedFields(fields);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<TermVectorResponse> listener) {
+ ((Client) client).termVector(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/TermVectorResponse.java b/src/main/java/org/elasticsearch/action/termvector/TermVectorResponse.java
new file mode 100644
index 0000000..f662cbb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/TermVectorResponse.java
@@ -0,0 +1,362 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import com.google.common.collect.Iterators;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.termvector.TermVectorRequest.Flag;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.Set;
+
+public class TermVectorResponse extends ActionResponse implements ToXContent {
+
+ private static class FieldStrings {
+ // term statistics strings
+ public static final XContentBuilderString TTF = new XContentBuilderString("ttf");
+ public static final XContentBuilderString DOC_FREQ = new XContentBuilderString("doc_freq");
+ public static final XContentBuilderString TERM_FREQ = new XContentBuilderString("term_freq");
+
+ // field statistics strings
+ public static final XContentBuilderString FIELD_STATISTICS = new XContentBuilderString("field_statistics");
+ public static final XContentBuilderString DOC_COUNT = new XContentBuilderString("doc_count");
+ public static final XContentBuilderString SUM_DOC_FREQ = new XContentBuilderString("sum_doc_freq");
+ public static final XContentBuilderString SUM_TTF = new XContentBuilderString("sum_ttf");
+
+ public static final XContentBuilderString TOKENS = new XContentBuilderString("tokens");
+ public static final XContentBuilderString POS = new XContentBuilderString("position");
+ public static final XContentBuilderString START_OFFSET = new XContentBuilderString("start_offset");
+ public static final XContentBuilderString END_OFFSET = new XContentBuilderString("end_offset");
+ public static final XContentBuilderString PAYLOAD = new XContentBuilderString("payload");
+ public static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ public static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ public static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ public static final XContentBuilderString _VERSION = new XContentBuilderString("_version");
+ public static final XContentBuilderString FOUND = new XContentBuilderString("found");
+ public static final XContentBuilderString TERMS = new XContentBuilderString("terms");
+ public static final XContentBuilderString TERM_VECTORS = new XContentBuilderString("term_vectors");
+
+ }
+
+ private BytesReference termVectors;
+ private BytesReference headerRef;
+ private String index;
+ private String type;
+ private String id;
+ private long docVersion;
+ private boolean exists = false;
+
+ private boolean sourceCopied = false;
+
+ int[] curentPositions = new int[0];
+ int[] currentStartOffset = new int[0];
+ int[] currentEndOffset = new int[0];
+ BytesReference[] currentPayloads = new BytesReference[0];
+
+ public TermVectorResponse(String index, String type, String id) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ }
+
+ TermVectorResponse() {
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(index);
+ out.writeString(type);
+ out.writeString(id);
+ out.writeVLong(docVersion);
+ final boolean docExists = isExists();
+ out.writeBoolean(docExists);
+ out.writeBoolean(hasTermVectors());
+ if (hasTermVectors()) {
+ out.writeBytesReference(headerRef);
+ out.writeBytesReference(termVectors);
+ }
+ }
+
+ private boolean hasTermVectors() {
+ assert (headerRef == null && termVectors == null) || (headerRef != null && termVectors != null);
+ return headerRef != null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ index = in.readString();
+ type = in.readString();
+ id = in.readString();
+ docVersion = in.readVLong();
+ exists = in.readBoolean();
+ if (in.readBoolean()) {
+ headerRef = in.readBytesReference();
+ termVectors = in.readBytesReference();
+ }
+ }
+
+ public Fields getFields() throws IOException {
+ if (hasTermVectors() && isExists()) {
+ if (!sourceCopied) { // make the bytes safe
+ headerRef = headerRef.copyBytesArray();
+ termVectors = termVectors.copyBytesArray();
+ }
+ return new TermVectorFields(headerRef, termVectors);
+ } else {
+ return new Fields() {
+ @Override
+ public Iterator<String> iterator() {
+ return Iterators.emptyIterator();
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ return null;
+ }
+
+ @Override
+ public int size() {
+ return 0;
+ }
+ };
+ }
+
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ assert index != null;
+ assert type != null;
+ assert id != null;
+ builder.startObject();
+ builder.field(FieldStrings._INDEX, index);
+ builder.field(FieldStrings._TYPE, type);
+ builder.field(FieldStrings._ID, id);
+ builder.field(FieldStrings._VERSION, docVersion);
+ builder.field(FieldStrings.FOUND, isExists());
+ if (!isExists()) {
+ builder.endObject();
+ return builder;
+ }
+ builder.startObject(FieldStrings.TERM_VECTORS);
+ final CharsRef spare = new CharsRef();
+ Fields theFields = getFields();
+ Iterator<String> fieldIter = theFields.iterator();
+ while (fieldIter.hasNext()) {
+ buildField(builder, spare, theFields, fieldIter);
+ }
+ builder.endObject();
+ builder.endObject();
+ return builder;
+
+ }
+
+ private void buildField(XContentBuilder builder, final CharsRef spare, Fields theFields, Iterator<String> fieldIter) throws IOException {
+ String fieldName = fieldIter.next();
+ builder.startObject(fieldName);
+ Terms curTerms = theFields.terms(fieldName);
+ // write field statistics
+ buildFieldStatistics(builder, curTerms);
+ builder.startObject(FieldStrings.TERMS);
+ TermsEnum termIter = curTerms.iterator(null);
+ for (int i = 0; i < curTerms.size(); i++) {
+ buildTerm(builder, spare, curTerms, termIter);
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+
+ private void buildTerm(XContentBuilder builder, final CharsRef spare, Terms curTerms, TermsEnum termIter) throws IOException {
+ // start term, optimized writing
+ BytesRef term = termIter.next();
+ UnicodeUtil.UTF8toUTF16(term, spare);
+ builder.startObject(spare.toString());
+ buildTermStatistics(builder, termIter);
+ // finally write the term vectors
+ DocsAndPositionsEnum posEnum = termIter.docsAndPositions(null, null);
+ int termFreq = posEnum.freq();
+ builder.field(FieldStrings.TERM_FREQ, termFreq);
+ initMemory(curTerms, termFreq);
+ initValues(curTerms, posEnum, termFreq);
+ buildValues(builder, curTerms, termFreq);
+ builder.endObject();
+ }
+
+ private void buildTermStatistics(XContentBuilder builder, TermsEnum termIter) throws IOException {
+ // write term statistics. At this point we do not naturally have a
+ // boolean that says if these values actually were requested.
+ // However, we can assume that they were not if the statistic values are
+ // <= 0.
+ assert (((termIter.docFreq() > 0) && (termIter.totalTermFreq() > 0)) || ((termIter.docFreq() == -1) && (termIter.totalTermFreq() == -1)));
+ int docFreq = termIter.docFreq();
+ if (docFreq > 0) {
+ builder.field(FieldStrings.DOC_FREQ, docFreq);
+ builder.field(FieldStrings.TTF, termIter.totalTermFreq());
+ }
+ }
+
+ private void buildValues(XContentBuilder builder, Terms curTerms, int termFreq) throws IOException {
+ if (!(curTerms.hasPayloads() || curTerms.hasOffsets() || curTerms.hasPositions())) {
+ return;
+ }
+
+ builder.startArray(FieldStrings.TOKENS);
+ for (int i = 0; i < termFreq; i++) {
+ builder.startObject();
+ if (curTerms.hasPositions()) {
+ builder.field(FieldStrings.POS, curentPositions[i]);
+ }
+ if (curTerms.hasOffsets()) {
+ builder.field(FieldStrings.START_OFFSET, currentStartOffset[i]);
+ builder.field(FieldStrings.END_OFFSET, currentEndOffset[i]);
+ }
+ if (curTerms.hasPayloads() && (currentPayloads[i].length() > 0)) {
+ builder.field(FieldStrings.PAYLOAD, currentPayloads[i]);
+ }
+ builder.endObject();
+ }
+ builder.endArray();
+
+ }
+
+ private void initValues(Terms curTerms, DocsAndPositionsEnum posEnum, int termFreq) throws IOException {
+ for (int j = 0; j < termFreq; j++) {
+ int nextPos = posEnum.nextPosition();
+ if (curTerms.hasPositions()) {
+ curentPositions[j] = nextPos;
+ }
+ if (curTerms.hasOffsets()) {
+ currentStartOffset[j] = posEnum.startOffset();
+ currentEndOffset[j] = posEnum.endOffset();
+ }
+ if (curTerms.hasPayloads()) {
+ BytesRef curPayload = posEnum.getPayload();
+ if (curPayload != null) {
+ currentPayloads[j] = new BytesArray(curPayload.bytes, 0, curPayload.length);
+ } else {
+ currentPayloads[j] = null;
+ }
+
+ }
+ }
+ }
+
+ private void initMemory(Terms curTerms, int termFreq) {
+ // init memory for performance reasons
+ if (curTerms.hasPositions()) {
+ curentPositions = ArrayUtil.grow(curentPositions, termFreq);
+ }
+ if (curTerms.hasOffsets()) {
+ currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq);
+ currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq);
+ }
+ if (curTerms.hasPayloads()) {
+ currentPayloads = new BytesArray[termFreq];
+ }
+ }
+
+ private void buildFieldStatistics(XContentBuilder builder, Terms curTerms) throws IOException {
+ long sumDocFreq = curTerms.getSumDocFreq();
+ int docCount = curTerms.getDocCount();
+ long sumTotalTermFrequencies = curTerms.getSumTotalTermFreq();
+ if (docCount > 0) {
+ assert ((sumDocFreq > 0)) : "docCount >= 0 but sumDocFreq ain't!";
+ assert ((sumTotalTermFrequencies > 0)) : "docCount >= 0 but sumTotalTermFrequencies ain't!";
+ builder.startObject(FieldStrings.FIELD_STATISTICS);
+ builder.field(FieldStrings.SUM_DOC_FREQ, sumDocFreq);
+ builder.field(FieldStrings.DOC_COUNT, docCount);
+ builder.field(FieldStrings.SUM_TTF, sumTotalTermFrequencies);
+ builder.endObject();
+ } else if (docCount == -1) { // this should only be -1 if the field
+ // statistics were not requested at all. In
+ // this case all 3 values should be -1
+ assert ((sumDocFreq == -1)) : "docCount was -1 but sumDocFreq ain't!";
+ assert ((sumTotalTermFrequencies == -1)) : "docCount was -1 but sumTotalTermFrequencies ain't!";
+ } else {
+ throw new ElasticsearchIllegalStateException(
+ "Something is wrong with the field statistics of the term vector request: Values are " + "\n"
+ + FieldStrings.SUM_DOC_FREQ + " " + sumDocFreq + "\n" + FieldStrings.DOC_COUNT + " " + docCount + "\n"
+ + FieldStrings.SUM_TTF + " " + sumTotalTermFrequencies);
+ }
+ }
+
+ public boolean isExists() {
+ return exists;
+ }
+
+ public void setExists(boolean exists) {
+ this.exists = exists;
+ }
+
+ public void setFields(Fields termVectorsByField, Set<String> selectedFields, EnumSet<Flag> flags, Fields topLevelFields) throws IOException {
+ TermVectorWriter tvw = new TermVectorWriter(this);
+
+ if (termVectorsByField != null) {
+ tvw.setFields(termVectorsByField, selectedFields, flags, topLevelFields);
+ }
+
+ }
+
+ public void setTermVectorField(BytesStreamOutput output) {
+ termVectors = output.bytes();
+ }
+
+ public void setHeader(BytesReference header) {
+ headerRef = header;
+
+ }
+
+ public void setDocVersion(long version) {
+ this.docVersion = version;
+
+ }
+
+ public String getIndex() {
+ return index;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public String getId() {
+ return id;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/TermVectorWriter.java b/src/main/java/org/elasticsearch/action/termvector/TermVectorWriter.java
new file mode 100644
index 0000000..ba30319
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/TermVectorWriter.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.termvector;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.action.termvector.TermVectorRequest.Flag;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Set;
+
+// package only - this is an internal class!
+final class TermVectorWriter {
+ final List<String> fields = new ArrayList<String>();
+ final List<Long> fieldOffset = new ArrayList<Long>();
+ final BytesStreamOutput output = new BytesStreamOutput(1); // can we somehow
+ // predict the
+ // size here?
+ private static final String HEADER = "TV";
+ private static final int CURRENT_VERSION = -1;
+ TermVectorResponse response = null;
+
+ TermVectorWriter(TermVectorResponse termVectorResponse) throws IOException {
+ response = termVectorResponse;
+ }
+
+ void setFields(Fields termVectorsByField, Set<String> selectedFields, EnumSet<Flag> flags, Fields topLevelFields) throws IOException {
+
+ int numFieldsWritten = 0;
+ TermsEnum iterator = null;
+ DocsAndPositionsEnum docsAndPosEnum = null;
+ DocsEnum docsEnum = null;
+ TermsEnum topLevelIterator = null;
+ for (String field : termVectorsByField) {
+ if ((selectedFields != null) && (!selectedFields.contains(field))) {
+ continue;
+ }
+
+ Terms fieldTermVector = termVectorsByField.terms(field);
+ Terms topLevelTerms = topLevelFields.terms(field);
+
+ topLevelIterator = topLevelTerms.iterator(topLevelIterator);
+ boolean positions = flags.contains(Flag.Positions) && fieldTermVector.hasPositions();
+ boolean offsets = flags.contains(Flag.Offsets) && fieldTermVector.hasOffsets();
+ boolean payloads = flags.contains(Flag.Payloads) && fieldTermVector.hasPayloads();
+ startField(field, fieldTermVector.size(), positions, offsets, payloads);
+ if (flags.contains(Flag.FieldStatistics)) {
+ writeFieldStatistics(topLevelTerms);
+ }
+ iterator = fieldTermVector.iterator(iterator);
+ final boolean useDocsAndPos = positions || offsets || payloads;
+ while (iterator.next() != null) { // iterate all terms of the
+ // current field
+ // get the doc frequency
+ BytesRef term = iterator.term();
+ boolean foundTerm = topLevelIterator.seekExact(term);
+ assert (foundTerm);
+ startTerm(term);
+ if (flags.contains(Flag.TermStatistics)) {
+ writeTermStatistics(topLevelIterator);
+ }
+ if (useDocsAndPos) {
+ // given we have pos or offsets
+ docsAndPosEnum = writeTermWithDocsAndPos(iterator, docsAndPosEnum, positions, offsets, payloads);
+ } else {
+ // if we do not have the positions stored, we need to
+ // get the frequency from a DocsEnum.
+ docsEnum = writeTermWithDocsOnly(iterator, docsEnum);
+ }
+ }
+ numFieldsWritten++;
+ }
+ response.setTermVectorField(output);
+ response.setHeader(writeHeader(numFieldsWritten, flags.contains(Flag.TermStatistics), flags.contains(Flag.FieldStatistics)));
+ }
+
+ private BytesReference writeHeader(int numFieldsWritten, boolean getTermStatistics, boolean getFieldStatistics) throws IOException {
+ // now, write the information about offset of the terms in the
+ // termVectors field
+ BytesStreamOutput header = new BytesStreamOutput();
+ header.writeString(HEADER);
+ header.writeInt(CURRENT_VERSION);
+ header.writeBoolean(getTermStatistics);
+ header.writeBoolean(getFieldStatistics);
+ header.writeVInt(numFieldsWritten);
+ for (int i = 0; i < fields.size(); i++) {
+ header.writeString(fields.get(i));
+ header.writeVLong(fieldOffset.get(i).longValue());
+ }
+ header.close();
+ return header.bytes();
+ }
+
+ private DocsEnum writeTermWithDocsOnly(TermsEnum iterator, DocsEnum docsEnum) throws IOException {
+ docsEnum = iterator.docs(null, docsEnum);
+ int nextDoc = docsEnum.nextDoc();
+ assert nextDoc != DocsEnum.NO_MORE_DOCS;
+ writeFreq(docsEnum.freq());
+ nextDoc = docsEnum.nextDoc();
+ assert nextDoc == DocsEnum.NO_MORE_DOCS;
+ return docsEnum;
+ }
+
+ private DocsAndPositionsEnum writeTermWithDocsAndPos(TermsEnum iterator, DocsAndPositionsEnum docsAndPosEnum, boolean positions,
+ boolean offsets, boolean payloads) throws IOException {
+ docsAndPosEnum = iterator.docsAndPositions(null, docsAndPosEnum);
+ // for each term (iterator next) in this field (field)
+ // iterate over the docs (should only be one)
+ int nextDoc = docsAndPosEnum.nextDoc();
+ assert nextDoc != DocsEnum.NO_MORE_DOCS;
+ final int freq = docsAndPosEnum.freq();
+ writeFreq(freq);
+ for (int j = 0; j < freq; j++) {
+ int curPos = docsAndPosEnum.nextPosition();
+ if (positions) {
+ writePosition(curPos);
+ }
+ if (offsets) {
+ writeOffsets(docsAndPosEnum.startOffset(), docsAndPosEnum.endOffset());
+ }
+ if (payloads) {
+ writePayload(docsAndPosEnum.getPayload());
+ }
+ }
+ nextDoc = docsAndPosEnum.nextDoc();
+ assert nextDoc == DocsEnum.NO_MORE_DOCS;
+ return docsAndPosEnum;
+ }
+
+ private void writePayload(BytesRef payload) throws IOException {
+ if (payload != null) {
+ output.writeVInt(payload.length);
+ output.writeBytes(payload.bytes, payload.offset, payload.length);
+ } else {
+ output.writeVInt(0);
+ }
+ }
+
+ private void writeFreq(int termFreq) throws IOException {
+
+ writePotentiallyNegativeVInt(termFreq);
+ }
+
+ private void writeOffsets(int startOffset, int endOffset) throws IOException {
+ assert (startOffset >= 0);
+ assert (endOffset >= 0);
+ if ((startOffset >= 0) && (endOffset >= 0)) {
+ output.writeVInt(startOffset);
+ output.writeVInt(endOffset);
+ }
+ }
+
+ private void writePosition(int pos) throws IOException {
+ assert (pos >= 0);
+ if (pos >= 0) {
+ output.writeVInt(pos);
+ }
+ }
+
+ private void startField(String fieldName, long termsSize, boolean writePositions, boolean writeOffsets, boolean writePayloads)
+ throws IOException {
+ fields.add(fieldName);
+ fieldOffset.add(output.position());
+ output.writeVLong(termsSize);
+ // add information on if positions etc. are written
+ output.writeBoolean(writePositions);
+ output.writeBoolean(writeOffsets);
+ output.writeBoolean(writePayloads);
+ }
+
+ private void startTerm(BytesRef term) throws IOException {
+ output.writeVInt(term.length);
+ output.writeBytes(term.bytes, term.offset, term.length);
+
+ }
+
+ private void writeTermStatistics(TermsEnum topLevelIterator) throws IOException {
+ int docFreq = topLevelIterator.docFreq();
+ assert (docFreq >= 0);
+ writePotentiallyNegativeVInt(docFreq);
+ long ttf = topLevelIterator.totalTermFreq();
+ assert (ttf >= 0);
+ writePotentiallyNegativeVLong(ttf);
+
+ }
+
+ private void writeFieldStatistics(Terms topLevelTerms) throws IOException {
+ long sttf = topLevelTerms.getSumTotalTermFreq();
+ assert (sttf >= 0);
+ writePotentiallyNegativeVLong(sttf);
+ long sdf = topLevelTerms.getSumDocFreq();
+ assert (sdf >= 0);
+ writePotentiallyNegativeVLong(sdf);
+ int dc = topLevelTerms.getDocCount();
+ assert (dc >= 0);
+ writePotentiallyNegativeVInt(dc);
+
+ }
+
+ private void writePotentiallyNegativeVInt(int value) throws IOException {
+ // term freq etc. can be negative if not present... we transport that
+ // further...
+ output.writeVInt(Math.max(0, value + 1));
+ }
+
+ private void writePotentiallyNegativeVLong(long value) throws IOException {
+ // term freq etc. can be negative if not present... we transport that
+ // further...
+ output.writeVLong(Math.max(0, value + 1));
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/termvector/TransportMultiTermVectorsAction.java b/src/main/java/org/elasticsearch/action/termvector/TransportMultiTermVectorsAction.java
new file mode 100644
index 0000000..b0c5b59
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/TransportMultiTermVectorsAction.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BaseTransportRequestHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class TransportMultiTermVectorsAction extends TransportAction<MultiTermVectorsRequest, MultiTermVectorsResponse> {
+
+ private final ClusterService clusterService;
+
+ private final TransportSingleShardMultiTermsVectorAction shardAction;
+
+ @Inject
+ public TransportMultiTermVectorsAction(Settings settings, ThreadPool threadPool, TransportService transportService,
+ ClusterService clusterService, TransportSingleShardMultiTermsVectorAction shardAction) {
+ super(settings, threadPool);
+ this.clusterService = clusterService;
+ this.shardAction = shardAction;
+
+ transportService.registerHandler(MultiTermVectorsAction.NAME, new TransportHandler());
+ }
+
+ @Override
+ protected void doExecute(final MultiTermVectorsRequest request, final ActionListener<MultiTermVectorsResponse> listener) {
+ ClusterState clusterState = clusterService.state();
+
+ clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
+
+ final AtomicArray<MultiTermVectorsItemResponse> responses = new AtomicArray<MultiTermVectorsItemResponse>(request.requests.size());
+
+ Map<ShardId, MultiTermVectorsShardRequest> shardRequests = new HashMap<ShardId, MultiTermVectorsShardRequest>();
+ for (int i = 0; i < request.requests.size(); i++) {
+ TermVectorRequest termVectorRequest = request.requests.get(i);
+ termVectorRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorRequest.routing(), termVectorRequest.index()));
+ if (!clusterState.metaData().hasConcreteIndex(termVectorRequest.index())) {
+ responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(termVectorRequest.index(),
+ termVectorRequest.type(), termVectorRequest.id(), "[" + termVectorRequest.index() + "] missing")));
+ continue;
+ }
+ if (termVectorRequest.routing() == null && clusterState.getMetaData().routingRequired(termVectorRequest.index(), termVectorRequest.type())) {
+ responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(termVectorRequest.index(),
+ termVectorRequest.type(), termVectorRequest.id(), "routing is required, but hasn't been specified")));
+ continue;
+ }
+ termVectorRequest.index(clusterState.metaData().concreteIndex(termVectorRequest.index()));
+ ShardId shardId = clusterService
+ .operationRouting()
+ .getShards(clusterState, termVectorRequest.index(), termVectorRequest.type(), termVectorRequest.id(),
+ termVectorRequest.routing(), null).shardId();
+ MultiTermVectorsShardRequest shardRequest = shardRequests.get(shardId);
+ if (shardRequest == null) {
+ shardRequest = new MultiTermVectorsShardRequest(shardId.index().name(), shardId.id());
+ shardRequest.preference(request.preference);
+
+ shardRequests.put(shardId, shardRequest);
+ }
+ shardRequest.add(i, termVectorRequest);
+ }
+
+ if (shardRequests.size() == 0) {
+ // only failures..
+ listener.onResponse(new MultiTermVectorsResponse(responses.toArray(new MultiTermVectorsItemResponse[responses.length()])));
+ }
+
+ final AtomicInteger counter = new AtomicInteger(shardRequests.size());
+ for (final MultiTermVectorsShardRequest shardRequest : shardRequests.values()) {
+ shardAction.execute(shardRequest, new ActionListener<MultiTermVectorsShardResponse>() {
+ @Override
+ public void onResponse(MultiTermVectorsShardResponse response) {
+ for (int i = 0; i < response.locations.size(); i++) {
+ responses.set(response.locations.get(i), new MultiTermVectorsItemResponse(response.responses.get(i),
+ response.failures.get(i)));
+ }
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ // create failures for all relevant requests
+ String message = ExceptionsHelper.detailedMessage(e);
+ for (int i = 0; i < shardRequest.locations.size(); i++) {
+ TermVectorRequest termVectorRequest = shardRequest.requests.get(i);
+ responses.set(shardRequest.locations.get(i), new MultiTermVectorsItemResponse(null,
+ new MultiTermVectorsResponse.Failure(shardRequest.index(), termVectorRequest.type(),
+ termVectorRequest.id(), message)));
+ }
+ if (counter.decrementAndGet() == 0) {
+ finishHim();
+ }
+ }
+
+ private void finishHim() {
+ listener.onResponse(new MultiTermVectorsResponse(
+ responses.toArray(new MultiTermVectorsItemResponse[responses.length()])));
+ }
+ });
+ }
+ }
+
+ class TransportHandler extends BaseTransportRequestHandler<MultiTermVectorsRequest> {
+
+ @Override
+ public MultiTermVectorsRequest newInstance() {
+ return new MultiTermVectorsRequest();
+ }
+
+ @Override
+ public void messageReceived(final MultiTermVectorsRequest request, final TransportChannel channel) throws Exception {
+ // no need to use threaded listener, since we just send a response
+ request.listenerThreaded(false);
+ execute(request, new ActionListener<MultiTermVectorsResponse>() {
+ @Override
+ public void onResponse(MultiTermVectorsResponse response) {
+ try {
+ channel.sendResponse(response);
+ } catch (Throwable t) {
+ onFailure(t);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(e);
+ } catch (Throwable t) {
+ logger.warn("Failed to send error response for action [" + MultiTermVectorsAction.NAME + "] and request ["
+ + request + "]", t);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/TransportSingleShardMultiTermsVectorAction.java b/src/main/java/org/elasticsearch/action/termvector/TransportSingleShardMultiTermsVectorAction.java
new file mode 100644
index 0000000..9b7069e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/TransportSingleShardMultiTermsVectorAction.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.support.TransportActions;
+import org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+public class TransportSingleShardMultiTermsVectorAction extends TransportShardSingleOperationAction<MultiTermVectorsShardRequest, MultiTermVectorsShardResponse> {
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportSingleShardMultiTermsVectorAction(Settings settings, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService, ThreadPool threadPool) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GET;
+ }
+
+ @Override
+ protected String transportAction() {
+ return MultiTermVectorsAction.NAME + "/shard";
+ }
+
+ @Override
+ protected MultiTermVectorsShardRequest newRequest() {
+ return new MultiTermVectorsShardRequest();
+ }
+
+ @Override
+ protected MultiTermVectorsShardResponse newResponse() {
+ return new MultiTermVectorsShardResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, MultiTermVectorsShardRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, MultiTermVectorsShardRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.READ, request.index());
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState state, MultiTermVectorsShardRequest request) {
+ return clusterService.operationRouting()
+ .getShards(clusterService.state(), request.index(), request.shardId(), request.preference());
+ }
+
+ @Override
+ protected void resolveRequest(ClusterState state, MultiTermVectorsShardRequest request) {
+ // no need to set concrete index and routing here, it has already been set by the multi term vectors action on the item
+ // request.index(state.metaData().concreteIndex(request.index()));
+ }
+
+ @Override
+ protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, int shardId) throws ElasticsearchException {
+
+ MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
+ for (int i = 0; i < request.locations.size(); i++) {
+ TermVectorRequest termVectorRequest = request.requests.get(i);
+
+ try {
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = indexService.shardSafe(shardId);
+ TermVectorResponse termVectorResponse = indexShard.termVectorService().getTermVector(termVectorRequest);
+ response.add(request.locations.get(i), termVectorResponse);
+ } catch (Throwable t) {
+ if (TransportActions.isShardNotAvailableException(t)) {
+ throw (ElasticsearchException) t;
+ } else {
+ logger.debug("[{}][{}] failed to execute multi term vectors for [{}]/[{}]", t, request.index(), shardId, termVectorRequest.type(), termVectorRequest.id());
+ response.add(request.locations.get(i),
+ new MultiTermVectorsResponse.Failure(request.index(), termVectorRequest.type(), termVectorRequest.id(), ExceptionsHelper.detailedMessage(t)));
+ }
+ }
+ }
+
+ return response;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/TransportSingleShardTermVectorAction.java b/src/main/java/org/elasticsearch/action/termvector/TransportSingleShardTermVectorAction.java
new file mode 100644
index 0000000..1a56f04
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/TransportSingleShardTermVectorAction.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Performs the get operation.
+ */
+public class TransportSingleShardTermVectorAction extends TransportShardSingleOperationAction<TermVectorRequest, TermVectorResponse> {
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportSingleShardTermVectorAction(Settings settings, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService, ThreadPool threadPool) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ protected String executor() {
+ // TODO: Is this the right pool to execute this on?
+ return ThreadPool.Names.GET;
+ }
+
+ @Override
+ protected String transportAction() {
+ return TermVectorAction.NAME;
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, TermVectorRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, TermVectorRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.READ, request.index());
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState state, TermVectorRequest request) {
+ return clusterService.operationRouting().getShards(clusterService.state(), request.index(), request.type(), request.id(),
+ request.routing(), request.preference());
+ }
+
+ @Override
+ protected void resolveRequest(ClusterState state, TermVectorRequest request) {
+ // update the routing (request#index here is possibly an alias)
+ request.routing(state.metaData().resolveIndexRouting(request.routing(), request.index()));
+ request.index(state.metaData().concreteIndex(request.index()));
+
+ // Fail fast on the node that received the request.
+ if (request.routing() == null && state.getMetaData().routingRequired(request.index(), request.type())) {
+ throw new RoutingMissingException(request.index(), request.type(), request.id());
+ }
+ }
+
+ @Override
+ protected TermVectorResponse shardOperation(TermVectorRequest request, int shardId) throws ElasticsearchException {
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = indexService.shardSafe(shardId);
+ return indexShard.termVectorService().getTermVector(request);
+ }
+
+ @Override
+ protected TermVectorRequest newRequest() {
+ return new TermVectorRequest();
+ }
+
+ @Override
+ protected TermVectorResponse newResponse() {
+ return new TermVectorResponse();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/termvector/package-info.java b/src/main/java/org/elasticsearch/action/termvector/package-info.java
new file mode 100644
index 0000000..83ecc11
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/termvector/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Get the term vector for a specific document.
+ */
+package org.elasticsearch.action.termvector; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
new file mode 100644
index 0000000..5dec4a0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java
@@ -0,0 +1,292 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.update;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.delete.TransportDeleteAction;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.index.TransportIndexAction;
+import org.elasticsearch.action.support.AutoCreateIndex;
+import org.elasticsearch.action.support.TransportActions;
+import org.elasticsearch.action.support.single.instance.TransportInstanceSingleOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.PlainShardIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+
+/**
+ */
+public class TransportUpdateAction extends TransportInstanceSingleOperationAction<UpdateRequest, UpdateResponse> {
+
+ private final TransportDeleteAction deleteAction;
+ private final TransportIndexAction indexAction;
+ private final AutoCreateIndex autoCreateIndex;
+ private final TransportCreateIndexAction createIndexAction;
+ private final UpdateHelper updateHelper;
+
+ @Inject
+ public TransportUpdateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
+ TransportIndexAction indexAction, TransportDeleteAction deleteAction, TransportCreateIndexAction createIndexAction,
+ UpdateHelper updateHelper) {
+ super(settings, threadPool, clusterService, transportService);
+ this.indexAction = indexAction;
+ this.deleteAction = deleteAction;
+ this.createIndexAction = createIndexAction;
+ this.updateHelper = updateHelper;
+ this.autoCreateIndex = new AutoCreateIndex(settings);
+ }
+
+ @Override
+ protected String transportAction() {
+ return UpdateAction.NAME;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.INDEX;
+ }
+
+ @Override
+ protected UpdateRequest newRequest() {
+ return new UpdateRequest();
+ }
+
+ @Override
+ protected UpdateResponse newResponse() {
+ return new UpdateResponse();
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, UpdateRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, UpdateRequest request) {
+ return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
+ }
+
+ @Override
+ protected boolean retryOnFailure(Throwable e) {
+ return TransportActions.isShardNotAvailableException(e);
+ }
+
+ @Override
+ protected boolean resolveRequest(ClusterState state, UpdateRequest request, ActionListener<UpdateResponse> listener) {
+ MetaData metaData = clusterService.state().metaData();
+ String aliasOrIndex = request.index();
+ request.routing((metaData.resolveIndexRouting(request.routing(), aliasOrIndex)));
+ request.index(metaData.concreteIndex(request.index()));
+
+ // Fail fast on the node that received the request, rather than failing when translating on the index or delete request.
+ if (request.routing() == null && state.getMetaData().routingRequired(request.index(), request.type())) {
+ throw new RoutingMissingException(request.index(), request.type(), request.id());
+ }
+ return true;
+ }
+
+ @Override
+ protected void doExecute(final UpdateRequest request, final ActionListener<UpdateResponse> listener) {
+ // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
+ if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) {
+ request.beforeLocalFork(); // we fork on another thread...
+ createIndexAction.execute(new CreateIndexRequest(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
+ @Override
+ public void onResponse(CreateIndexResponse result) {
+ innerExecute(request, listener);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
+ // we have the index, do it
+ try {
+ innerExecute(request, listener);
+ } catch (Throwable e1) {
+ listener.onFailure(e1);
+ }
+ } else {
+ listener.onFailure(e);
+ }
+ }
+ });
+ } else {
+ innerExecute(request, listener);
+ }
+ }
+
+ private void innerExecute(final UpdateRequest request, final ActionListener<UpdateResponse> listener) {
+ super.doExecute(request, listener);
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) throws ElasticsearchException {
+ if (request.shardId() != -1) {
+ return clusterState.routingTable().index(request.index()).shard(request.shardId()).primaryShardIt();
+ }
+ ShardIterator shardIterator = clusterService.operationRouting()
+ .indexShards(clusterService.state(), request.index(), request.type(), request.id(), request.routing());
+ ShardRouting shard;
+ while ((shard = shardIterator.nextOrNull()) != null) {
+ if (shard.primary()) {
+ return new PlainShardIterator(shardIterator.shardId(), ImmutableList.of(shard));
+ }
+ }
+ return new PlainShardIterator(shardIterator.shardId(), ImmutableList.<ShardRouting>of());
+ }
+
+ @Override
+ protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener) throws ElasticsearchException {
+ shardOperation(request, listener, 0);
+ }
+
+ protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) throws ElasticsearchException {
+ final UpdateHelper.Result result = updateHelper.prepare(request);
+ switch (result.operation()) {
+ case UPSERT:
+ IndexRequest upsertRequest = result.action();
+ // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
+ final BytesReference upsertSourceBytes = upsertRequest.source();
+ indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ UpdateResponse update = new UpdateResponse(response.getIndex(), response.getType(), response.getId(), response.getVersion(), response.isCreated());
+ if (request.fields() != null && request.fields().length > 0) {
+ Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true);
+ update.setGetResult(updateHelper.extractGetResult(request, response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
+ } else {
+ update.setGetResult(null);
+ }
+ listener.onResponse(update);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ e = ExceptionsHelper.unwrapCause(e);
+ if (e instanceof VersionConflictEngineException || e instanceof DocumentAlreadyExistsException) {
+ if (retryCount < request.retryOnConflict()) {
+ threadPool.executor(executor()).execute(new Runnable() {
+ @Override
+ public void run() {
+ shardOperation(request, listener, retryCount + 1);
+ }
+ });
+ return;
+ }
+ }
+ listener.onFailure(e);
+ }
+ });
+ break;
+ case INDEX:
+ IndexRequest indexRequest = result.action();
+ // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
+ final BytesReference indexSourceBytes = indexRequest.source();
+ indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ UpdateResponse update = new UpdateResponse(response.getIndex(), response.getType(), response.getId(), response.getVersion(), response.isCreated());
+ update.setGetResult(updateHelper.extractGetResult(request, response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes));
+ listener.onResponse(update);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ e = ExceptionsHelper.unwrapCause(e);
+ if (e instanceof VersionConflictEngineException) {
+ if (retryCount < request.retryOnConflict()) {
+ threadPool.executor(executor()).execute(new Runnable() {
+ @Override
+ public void run() {
+ shardOperation(request, listener, retryCount + 1);
+ }
+ });
+ return;
+ }
+ }
+ listener.onFailure(e);
+ }
+ });
+ break;
+ case DELETE:
+ DeleteRequest deleteRequest = result.action();
+ deleteAction.execute(deleteRequest, new ActionListener<DeleteResponse>() {
+ @Override
+ public void onResponse(DeleteResponse response) {
+ UpdateResponse update = new UpdateResponse(response.getIndex(), response.getType(), response.getId(), response.getVersion(), false);
+ update.setGetResult(updateHelper.extractGetResult(request, response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null));
+ listener.onResponse(update);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ e = ExceptionsHelper.unwrapCause(e);
+ if (e instanceof VersionConflictEngineException) {
+ if (retryCount < request.retryOnConflict()) {
+ threadPool.executor(executor()).execute(new Runnable() {
+ @Override
+ public void run() {
+ shardOperation(request, listener, retryCount + 1);
+ }
+ });
+ return;
+ }
+ }
+ listener.onFailure(e);
+ }
+ });
+ break;
+ case NONE:
+ UpdateResponse update = result.action();
+ listener.onResponse(update);
+ break;
+ default:
+ throw new ElasticsearchIllegalStateException("Illegal operation " + result.operation());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/update/UpdateAction.java b/src/main/java/org/elasticsearch/action/update/UpdateAction.java
new file mode 100644
index 0000000..773b23f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/update/UpdateAction.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.update;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.Client;
+
+/**
+ */
+public class UpdateAction extends Action<UpdateRequest, UpdateResponse, UpdateRequestBuilder> {
+
+ public static final UpdateAction INSTANCE = new UpdateAction();
+ public static final String NAME = "update";
+
+ private UpdateAction() {
+ super(NAME);
+ }
+
+ @Override
+ public UpdateResponse newResponse() {
+ return new UpdateResponse();
+ }
+
+ @Override
+ public UpdateRequestBuilder newRequestBuilder(Client client) {
+ return new UpdateRequestBuilder(client);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
new file mode 100644
index 0000000..5d7dcac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.update;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.engine.DocumentSourceMissingException;
+import org.elasticsearch.index.get.GetField;
+import org.elasticsearch.index.get.GetResult;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
+import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.lookup.SourceLookup;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMapWithExpectedSize;
+
+/**
+ * Helper for translating an update request to an index, delete request or update response.
+ */
+public class UpdateHelper extends AbstractComponent {
+
+ private final IndicesService indicesService;
+ private final ScriptService scriptService;
+
+ @Inject
+ public UpdateHelper(Settings settings, IndicesService indicesService, ScriptService scriptService) {
+ super(settings);
+ this.indicesService = indicesService;
+ this.scriptService = scriptService;
+ }
+
+ /**
+ * Prepares an update request by converting it into an index or delete request or an update response (no action).
+ */
+ public Result prepare(UpdateRequest request) {
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = indexService.shardSafe(request.shardId());
+ return prepare(request, indexShard);
+ }
+
+ public Result prepare(UpdateRequest request, IndexShard indexShard) {
+ long getDate = System.currentTimeMillis();
+ final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
+ new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME},
+ true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
+
+ if (!getResult.isExists()) {
+ if (request.upsertRequest() == null && !request.docAsUpsert()) {
+ throw new DocumentMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id());
+ }
+ IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
+ indexRequest.index(request.index()).type(request.type()).id(request.id())
+ // it has to be a "create!"
+ .create(true)
+ .routing(request.routing())
+ .refresh(request.refresh())
+ .replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel());
+ indexRequest.operationThreaded(false);
+ if (request.versionType() == VersionType.EXTERNAL) {
+ // in external versioning mode, we want to create the new document using the given version.
+ indexRequest.version(request.version()).versionType(VersionType.EXTERNAL);
+ }
+ return new Result(indexRequest, Operation.UPSERT, null, null);
+ }
+
+ long updateVersion = getResult.getVersion();
+ if (request.versionType() == VersionType.EXTERNAL) {
+ updateVersion = request.version(); // remember, match_any is excluded by the conflict test
+ }
+
+ if (getResult.internalSourceRef() == null) {
+ // no source, we can't do nothing, through a failure...
+ throw new DocumentSourceMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id());
+ }
+
+ Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
+ String operation = null;
+ String timestamp = null;
+ Long ttl = null;
+ Object fetchedTTL = null;
+ final Map<String, Object> updatedSourceAsMap;
+ final XContentType updateSourceContentType = sourceAndContent.v1();
+ String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null;
+ String parent = getResult.getFields().containsKey(ParentFieldMapper.NAME) ? getResult.field(ParentFieldMapper.NAME).getValue().toString() : null;
+
+ if (request.script() == null && request.doc() != null) {
+ IndexRequest indexRequest = request.doc();
+ updatedSourceAsMap = sourceAndContent.v2();
+ if (indexRequest.ttl() > 0) {
+ ttl = indexRequest.ttl();
+ }
+ timestamp = indexRequest.timestamp();
+ if (indexRequest.routing() != null) {
+ routing = indexRequest.routing();
+ }
+ if (indexRequest.parent() != null) {
+ parent = indexRequest.parent();
+ }
+ XContentHelper.update(updatedSourceAsMap, indexRequest.sourceAsMap());
+ } else {
+ Map<String, Object> ctx = new HashMap<String, Object>(2);
+ ctx.put("_source", sourceAndContent.v2());
+
+ try {
+ ExecutableScript script = scriptService.executable(request.scriptLang, request.script, request.scriptParams);
+ script.setNextVar("ctx", ctx);
+ script.run();
+ // we need to unwrap the ctx...
+ ctx = (Map<String, Object>) script.unwrap(ctx);
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("failed to execute script", e);
+ }
+
+ operation = (String) ctx.get("op");
+ timestamp = (String) ctx.get("_timestamp");
+
+ fetchedTTL = ctx.get("_ttl");
+ if (fetchedTTL != null) {
+ if (fetchedTTL instanceof Number) {
+ ttl = ((Number) fetchedTTL).longValue();
+ } else {
+ ttl = TimeValue.parseTimeValue((String) fetchedTTL, null).millis();
+ }
+ }
+
+ updatedSourceAsMap = (Map<String, Object>) ctx.get("_source");
+ }
+
+ // apply script to update the source
+ // No TTL has been given in the update script so we keep previous TTL value if there is one
+ if (ttl == null) {
+ ttl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
+ if (ttl != null) {
+ ttl = ttl - (System.currentTimeMillis() - getDate); // It is an approximation of exact TTL value, could be improved
+ }
+ }
+
+ if (operation == null || "index".equals(operation)) {
+ final IndexRequest indexRequest = Requests.indexRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent)
+ .source(updatedSourceAsMap, updateSourceContentType)
+ .version(updateVersion).versionType(request.versionType())
+ .replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel())
+ .timestamp(timestamp).ttl(ttl)
+ .refresh(request.refresh());
+ indexRequest.operationThreaded(false);
+ return new Result(indexRequest, Operation.INDEX, updatedSourceAsMap, updateSourceContentType);
+ } else if ("delete".equals(operation)) {
+ DeleteRequest deleteRequest = Requests.deleteRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent)
+ .version(updateVersion).versionType(request.versionType())
+ .replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel());
+ deleteRequest.operationThreaded(false);
+ return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType);
+ } else if ("none".equals(operation)) {
+ UpdateResponse update = new UpdateResponse(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion(), false);
+ update.setGetResult(extractGetResult(request, getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, null));
+ return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
+ } else {
+ logger.warn("Used update operation [{}] for script [{}], doing nothing...", operation, request.script);
+ UpdateResponse update = new UpdateResponse(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion(), false);
+ return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
+ }
+ }
+
+ /**
+ * Extracts the fields from the updated document to be returned in a update response
+ */
+ public GetResult extractGetResult(final UpdateRequest request, long version, final Map<String, Object> source, XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) {
+ if (request.fields() == null || request.fields().length == 0) {
+ return null;
+ }
+ boolean sourceRequested = false;
+ Map<String, GetField> fields = null;
+ if (request.fields() != null && request.fields().length > 0) {
+ SourceLookup sourceLookup = new SourceLookup();
+ sourceLookup.setNextSource(source);
+ for (String field : request.fields()) {
+ if (field.equals("_source")) {
+ sourceRequested = true;
+ continue;
+ }
+ Object value = sourceLookup.extractValue(field);
+ if (value != null) {
+ if (fields == null) {
+ fields = newHashMapWithExpectedSize(2);
+ }
+ GetField getField = fields.get(field);
+ if (getField == null) {
+ getField = new GetField(field, new ArrayList<Object>(2));
+ fields.put(field, getField);
+ }
+ getField.getValues().add(value);
+ }
+ }
+ }
+
+ // TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType)
+ return new GetResult(request.index(), request.type(), request.id(), version, true, sourceRequested ? sourceAsBytes : null, fields);
+ }
+
+ public static class Result {
+
+ private final Streamable action;
+ private final Operation operation;
+ private final Map<String, Object> updatedSourceAsMap;
+ private final XContentType updateSourceContentType;
+
+ public Result(Streamable action, Operation operation, Map<String, Object> updatedSourceAsMap, XContentType updateSourceContentType) {
+ this.action = action;
+ this.operation = operation;
+ this.updatedSourceAsMap = updatedSourceAsMap;
+ this.updateSourceContentType = updateSourceContentType;
+ }
+
+ @SuppressWarnings("unchecked")
+ public <T extends Streamable> T action() {
+ return (T) action;
+ }
+
+ public Operation operation() {
+ return operation;
+ }
+
+ public Map<String, Object> updatedSourceAsMap() {
+ return updatedSourceAsMap;
+ }
+
+ public XContentType updateSourceContentType() {
+ return updateSourceContentType;
+ }
+ }
+
+ public static enum Operation {
+
+ UPSERT,
+ INDEX,
+ DELETE,
+ NONE
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
new file mode 100644
index 0000000..9765043
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
@@ -0,0 +1,656 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.update;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.VersionType;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ */
+public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest> {
+
+ private String type;
+ private String id;
+ @Nullable
+ private String routing;
+
+ @Nullable
+ String script;
+ @Nullable
+ String scriptLang;
+ @Nullable
+ Map<String, Object> scriptParams;
+
+ private String[] fields;
+
+ private long version = Versions.MATCH_ANY;
+ private VersionType versionType = VersionType.INTERNAL;
+ private int retryOnConflict = 0;
+
+ private boolean refresh = false;
+
+ private ReplicationType replicationType = ReplicationType.DEFAULT;
+ private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
+
+ private IndexRequest upsertRequest;
+
+ private boolean docAsUpsert = false;
+
+ @Nullable
+ private IndexRequest doc;
+
+ public UpdateRequest() {
+
+ }
+
+ public UpdateRequest(String index, String type, String id) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = super.validate();
+ if (type == null) {
+ validationException = addValidationError("type is missing", validationException);
+ }
+ if (id == null) {
+ validationException = addValidationError("id is missing", validationException);
+ }
+
+ if (version != Versions.MATCH_ANY && retryOnConflict > 0) {
+ validationException = addValidationError("can't provide both retry_on_conflict and a specific version", validationException);
+ }
+
+ if (script == null && doc == null) {
+ validationException = addValidationError("script or doc is missing", validationException);
+ }
+ if (script != null && doc != null) {
+ validationException = addValidationError("can't provide both script and doc", validationException);
+ }
+ if (doc == null && docAsUpsert) {
+ validationException = addValidationError("doc must be specified if doc_as_upsert is enabled", validationException);
+ }
+ return validationException;
+ }
+
+ /**
+ * The type of the indexed document.
+ */
+ public String type() {
+ return type;
+ }
+
+ /**
+ * Sets the type of the indexed document.
+ */
+ public UpdateRequest type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * The id of the indexed document.
+ */
+ public String id() {
+ return id;
+ }
+
+ /**
+ * Sets the id of the indexed document.
+ */
+ public UpdateRequest id(String id) {
+ this.id = id;
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the request. Using this value to hash the shard
+ * and not the id.
+ */
+ public UpdateRequest routing(String routing) {
+ if (routing != null && routing.length() == 0) {
+ this.routing = null;
+ } else {
+ this.routing = routing;
+ }
+ return this;
+ }
+
+ /**
+ * Sets the parent id of this document. Will simply set the routing to this value, as it is only
+ * used for routing with delete requests.
+ */
+ public UpdateRequest parent(String parent) {
+ if (routing == null) {
+ routing = parent;
+ }
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the request. Using this value to hash the shard
+ * and not the id.
+ */
+ public String routing() {
+ return this.routing;
+ }
+
+ int shardId() {
+ return this.shardId;
+ }
+
+ public String script() {
+ return this.script;
+ }
+
+ public Map<String, Object> scriptParams() {
+ return this.scriptParams;
+ }
+
+ /**
+ * The script to execute. Note, make sure not to send different script each times and instead
+ * use script params if possible with the same (automatically compiled) script.
+ */
+ public UpdateRequest script(String script) {
+ this.script = script;
+ return this;
+ }
+
+ /**
+ * The language of the script to execute.
+ */
+ public UpdateRequest scriptLang(String scriptLang) {
+ this.scriptLang = scriptLang;
+ return this;
+ }
+
+ public String scriptLang() {
+ return scriptLang;
+ }
+
+ /**
+ * Add a script parameter.
+ */
+ public UpdateRequest addScriptParam(String name, Object value) {
+ if (scriptParams == null) {
+ scriptParams = Maps.newHashMap();
+ }
+ scriptParams.put(name, value);
+ return this;
+ }
+
+ /**
+ * Sets the script parameters to use with the script.
+ */
+ public UpdateRequest scriptParams(Map<String, Object> scriptParams) {
+ if (this.scriptParams == null) {
+ this.scriptParams = scriptParams;
+ } else {
+ this.scriptParams.putAll(scriptParams);
+ }
+ return this;
+ }
+
+ /**
+ * The script to execute. Note, make sure not to send different script each times and instead
+ * use script params if possible with the same (automatically compiled) script.
+ */
+ public UpdateRequest script(String script, @Nullable Map<String, Object> scriptParams) {
+ this.script = script;
+ if (this.scriptParams != null) {
+ this.scriptParams.putAll(scriptParams);
+ } else {
+ this.scriptParams = scriptParams;
+ }
+ return this;
+ }
+
+ /**
+ * The script to execute. Note, make sure not to send different script each times and instead
+ * use script params if possible with the same (automatically compiled) script.
+ *
+ * @param script The script to execute
+ * @param scriptLang The script language
+ * @param scriptParams The script parameters
+ */
+ public UpdateRequest script(String script, @Nullable String scriptLang, @Nullable Map<String, Object> scriptParams) {
+ this.script = script;
+ this.scriptLang = scriptLang;
+ if (this.scriptParams != null) {
+ this.scriptParams.putAll(scriptParams);
+ } else {
+ this.scriptParams = scriptParams;
+ }
+ return this;
+ }
+
+ /**
+ * Explicitly specify the fields that will be returned. By default, nothing is returned.
+ */
+ public UpdateRequest fields(String... fields) {
+ this.fields = fields;
+ return this;
+ }
+
+ /**
+ * Get the fields to be returned.
+ */
+ public String[] fields() {
+ return this.fields;
+ }
+
+ /**
+ * Sets the number of retries of a version conflict occurs because the document was updated between
+ * getting it and updating it. Defaults to 0.
+ */
+ public UpdateRequest retryOnConflict(int retryOnConflict) {
+ this.retryOnConflict = retryOnConflict;
+ return this;
+ }
+
+ public int retryOnConflict() {
+ return this.retryOnConflict;
+ }
+
+ /**
+ * Sets the version, which will cause the index operation to only be performed if a matching
+ * version exists and no changes happened on the doc since then.
+ */
+ public UpdateRequest version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ /**
+ * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}.
+ */
+ public UpdateRequest versionType(VersionType versionType) {
+ this.versionType = versionType;
+ return this;
+ }
+
+ public VersionType versionType() {
+ return this.versionType;
+ }
+
+ /**
+ * Should a refresh be executed post this update operation causing the operation to
+ * be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public UpdateRequest refresh(boolean refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public boolean refresh() {
+ return this.refresh;
+ }
+
+ /**
+ * The replication type.
+ */
+ public ReplicationType replicationType() {
+ return this.replicationType;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ public UpdateRequest replicationType(ReplicationType replicationType) {
+ this.replicationType = replicationType;
+ return this;
+ }
+
+ public WriteConsistencyLevel consistencyLevel() {
+ return this.consistencyLevel;
+ }
+
+ /**
+ * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
+ */
+ public UpdateRequest consistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ this.consistencyLevel = consistencyLevel;
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequest doc(IndexRequest doc) {
+ this.doc = doc;
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequest doc(XContentBuilder source) {
+ safeDoc().source(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequest doc(Map source) {
+ safeDoc().source(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequest doc(Map source, XContentType contentType) {
+ safeDoc().source(source, contentType);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequest doc(String source) {
+ safeDoc().source(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequest doc(byte[] source) {
+ safeDoc().source(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequest doc(byte[] source, int offset, int length) {
+ safeDoc().source(source, offset, length);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified, the doc provided
+ * is a field and value pairs.
+ */
+ public UpdateRequest doc(Object... source) {
+ safeDoc().source(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequest doc(String field, Object value) {
+ safeDoc().source(field, value);
+ return this;
+ }
+
+ public IndexRequest doc() {
+ return this.doc;
+ }
+
+ private IndexRequest safeDoc() {
+ if (doc == null) {
+ doc = new IndexRequest();
+ }
+ return doc;
+ }
+
+ /**
+ * Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException}
+ * is thrown.
+ */
+ public UpdateRequest upsert(IndexRequest upsertRequest) {
+ this.upsertRequest = upsertRequest;
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequest upsert(XContentBuilder source) {
+ safeUpsertRequest().source(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequest upsert(Map source) {
+ safeUpsertRequest().source(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequest upsert(Map source, XContentType contentType) {
+ safeUpsertRequest().source(source, contentType);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequest upsert(String source) {
+ safeUpsertRequest().source(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequest upsert(byte[] source) {
+ safeUpsertRequest().source(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequest upsert(byte[] source, int offset, int length) {
+ safeUpsertRequest().source(source, offset, length);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists. The doc
+ * includes field and value pairs.
+ */
+ public UpdateRequest upsert(Object... source) {
+ safeUpsertRequest().source(source);
+ return this;
+ }
+
+ public IndexRequest upsertRequest() {
+ return this.upsertRequest;
+ }
+
+ private IndexRequest safeUpsertRequest() {
+ if (upsertRequest == null) {
+ upsertRequest = new IndexRequest();
+ }
+ return upsertRequest;
+ }
+
+ public UpdateRequest source(XContentBuilder source) throws Exception {
+ return source(source.bytes());
+ }
+
+ public UpdateRequest source(byte[] source) throws Exception {
+ return source(source, 0, source.length);
+ }
+
+ public UpdateRequest source(byte[] source, int offset, int length) throws Exception {
+ return source(new BytesArray(source, offset, length));
+ }
+
+ public UpdateRequest source(BytesReference source) throws Exception {
+ XContentType xContentType = XContentFactory.xContentType(source);
+ XContentParser parser = XContentFactory.xContent(xContentType).createParser(source);
+ try {
+ XContentParser.Token t = parser.nextToken();
+ if (t == null) {
+ return this;
+ }
+ String currentFieldName = null;
+ while ((t = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (t == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.textOrNull();
+ } else if ("params".equals(currentFieldName)) {
+ scriptParams = parser.map();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("upsert".equals(currentFieldName)) {
+ XContentBuilder builder = XContentFactory.contentBuilder(xContentType);
+ builder.copyCurrentStructure(parser);
+ safeUpsertRequest().source(builder);
+ } else if ("doc".equals(currentFieldName)) {
+ XContentBuilder docBuilder = XContentFactory.contentBuilder(xContentType);
+ docBuilder.copyCurrentStructure(parser);
+ safeDoc().source(docBuilder);
+ } else if ("doc_as_upsert".equals(currentFieldName)) {
+ docAsUpsert(parser.booleanValue());
+ }
+ }
+ } finally {
+ parser.close();
+ }
+ return this;
+ }
+
+ public boolean docAsUpsert() {
+ return this.docAsUpsert;
+ }
+
+ public void docAsUpsert(boolean shouldUpsertDoc) {
+ this.docAsUpsert = shouldUpsertDoc;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ replicationType = ReplicationType.fromId(in.readByte());
+ consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
+ type = in.readSharedString();
+ id = in.readString();
+ routing = in.readOptionalString();
+ script = in.readOptionalString();
+ scriptLang = in.readOptionalString();
+ scriptParams = in.readMap();
+ retryOnConflict = in.readVInt();
+ refresh = in.readBoolean();
+ if (in.readBoolean()) {
+ doc = new IndexRequest();
+ doc.readFrom(in);
+ }
+ int size = in.readInt();
+ if (size >= 0) {
+ fields = new String[size];
+ for (int i = 0; i < size; i++) {
+ fields[i] = in.readString();
+ }
+ }
+ if (in.readBoolean()) {
+ upsertRequest = new IndexRequest();
+ upsertRequest.readFrom(in);
+ }
+ docAsUpsert = in.readBoolean();
+ version = in.readLong();
+ versionType = VersionType.fromValue(in.readByte());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(replicationType.id());
+ out.writeByte(consistencyLevel.id());
+ out.writeSharedString(type);
+ out.writeString(id);
+ out.writeOptionalString(routing);
+ out.writeOptionalString(script);
+ out.writeOptionalString(scriptLang);
+ out.writeMap(scriptParams);
+ out.writeVInt(retryOnConflict);
+ out.writeBoolean(refresh);
+ if (doc == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ // make sure the basics are set
+ doc.index(index);
+ doc.type(type);
+ doc.id(id);
+ doc.writeTo(out);
+ }
+ if (fields == null) {
+ out.writeInt(-1);
+ } else {
+ out.writeInt(fields.length);
+ for (String field : fields) {
+ out.writeString(field);
+ }
+ }
+ if (upsertRequest == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ // make sure the basics are set
+ upsertRequest.index(index);
+ upsertRequest.type(type);
+ upsertRequest.id(id);
+ upsertRequest.writeTo(out);
+ }
+ out.writeBoolean(docAsUpsert);
+ out.writeLong(version);
+ out.writeByte(versionType.getValue());
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
new file mode 100644
index 0000000..eea372d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
@@ -0,0 +1,343 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.update;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.VersionType;
+
+import java.util.Map;
+
+/**
+ */
+public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<UpdateRequest, UpdateResponse, UpdateRequestBuilder> {
+
+ public UpdateRequestBuilder(Client client) {
+ super((InternalClient) client, new UpdateRequest());
+ }
+
+ public UpdateRequestBuilder(Client client, String index, String type, String id) {
+ super((InternalClient) client, new UpdateRequest(index, type, id));
+ }
+
+ /**
+ * Sets the type of the indexed document.
+ */
+ public UpdateRequestBuilder setType(String type) {
+ request.type(type);
+ return this;
+ }
+
+ /**
+ * Sets the id of the indexed document.
+ */
+ public UpdateRequestBuilder setId(String id) {
+ request.id(id);
+ return this;
+ }
+
+ /**
+ * Controls the shard routing of the request. Using this value to hash the shard
+ * and not the id.
+ */
+ public UpdateRequestBuilder setRouting(String routing) {
+ request.routing(routing);
+ return this;
+ }
+
+ public UpdateRequestBuilder setParent(String parent) {
+ request.parent(parent);
+ return this;
+ }
+
+ /**
+ * The script to execute. Note, make sure not to send different script each times and instead
+ * use script params if possible with the same (automatically compiled) script.
+ */
+ public UpdateRequestBuilder setScript(String script) {
+ request.script(script);
+ return this;
+ }
+
+ /**
+ * The language of the script to execute.
+ */
+ public UpdateRequestBuilder setScriptLang(String scriptLang) {
+ request.scriptLang(scriptLang);
+ return this;
+ }
+
+ /**
+ * Sets the script parameters to use with the script.
+ */
+ public UpdateRequestBuilder setScriptParams(Map<String, Object> scriptParams) {
+ request.scriptParams(scriptParams);
+ return this;
+ }
+
+ /**
+ * Add a script parameter.
+ */
+ public UpdateRequestBuilder addScriptParam(String name, Object value) {
+ request.addScriptParam(name, value);
+ return this;
+ }
+
+ /**
+ * Explicitly specify the fields that will be returned. By default, nothing is returned.
+ */
+ public UpdateRequestBuilder setFields(String... fields) {
+ request.fields(fields);
+ return this;
+ }
+
+ /**
+ * Sets the number of retries of a version conflict occurs because the document was updated between
+ * getting it and updating it. Defaults to 0.
+ */
+ public UpdateRequestBuilder setRetryOnConflict(int retryOnConflict) {
+ request.retryOnConflict(retryOnConflict);
+ return this;
+ }
+
+ /**
+ * Sets the version, which will cause the index operation to only be performed if a matching
+ * version exists and no changes happened on the doc since then.
+ */
+ public UpdateRequestBuilder setVersion(long version) {
+ request.version(version);
+ return this;
+ }
+
+ /**
+ * Sets the versioning type. Defaults to {@link org.elasticsearch.index.VersionType#INTERNAL}.
+ */
+ public UpdateRequestBuilder setVersionType(VersionType versionType) {
+ request.versionType(versionType);
+ return this;
+ }
+
+
+ /**
+ * Should a refresh be executed post this update operation causing the operation to
+ * be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
+ * to <tt>false</tt>.
+ */
+ public UpdateRequestBuilder setRefresh(boolean refresh) {
+ request.refresh(refresh);
+ return this;
+ }
+
+ /**
+ * Sets the replication type.
+ */
+ public UpdateRequestBuilder setReplicationType(ReplicationType replicationType) {
+ request.replicationType(replicationType);
+ return this;
+ }
+
+ /**
+ * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
+ */
+ public UpdateRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
+ request.consistencyLevel(consistencyLevel);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequestBuilder setDoc(IndexRequest indexRequest) {
+ request.doc(indexRequest);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequestBuilder setDoc(XContentBuilder source) {
+ request.doc(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequestBuilder setDoc(Map source) {
+ request.doc(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequestBuilder setDoc(Map source, XContentType contentType) {
+ request.doc(source, contentType);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequestBuilder setDoc(String source) {
+ request.doc(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequestBuilder setDoc(byte[] source) {
+ request.doc(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequestBuilder setDoc(byte[] source, int offset, int length) {
+ request.doc(source, offset, length);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified.
+ */
+ public UpdateRequestBuilder setDoc(String field, Object value) {
+ request.doc(field, value);
+ return this;
+ }
+
+ /**
+ * Sets the doc to use for updates when a script is not specified, the doc provided
+ * is a field and value pairs.
+ */
+ public UpdateRequestBuilder setDoc(Object... source) {
+ request.doc(source);
+ return this;
+ }
+
+ /**
+ * Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException}
+ * is thrown.
+ */
+ public UpdateRequestBuilder setUpsert(IndexRequest indexRequest) {
+ request.upsert(indexRequest);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequestBuilder setUpsert(XContentBuilder source) {
+ request.upsert(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequestBuilder setUpsert(Map source) {
+ request.upsert(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequestBuilder setUpsert(Map source, XContentType contentType) {
+ request.upsert(source, contentType);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequestBuilder setUpsert(String source) {
+ request.upsert(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequestBuilder setUpsert(byte[] source) {
+ request.upsert(source);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists.
+ */
+ public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length) {
+ request.upsert(source, offset, length);
+ return this;
+ }
+
+ /**
+ * Sets the doc source of the update request to be used when the document does not exists. The doc
+ * includes field and value pairs.
+ */
+ public UpdateRequestBuilder setUpsert(Object... source) {
+ request.upsert(source);
+ return this;
+ }
+
+ public UpdateRequestBuilder setSource(XContentBuilder source) throws Exception {
+ request.source(source);
+ return this;
+ }
+
+ public UpdateRequestBuilder setSource(byte[] source) throws Exception {
+ request.source(source);
+ return this;
+ }
+
+ public UpdateRequestBuilder setSource(byte[] source, int offset, int length) throws Exception {
+ request.source(source, offset, length);
+ return this;
+ }
+
+ public UpdateRequestBuilder setSource(BytesReference source) throws Exception {
+ request.source(source);
+ return this;
+ }
+
+ /**
+ * Sets whether the specified doc parameter should be used as upsert document.
+ */
+ public UpdateRequestBuilder setDocAsUpsert(boolean shouldUpsertDoc) {
+ request.docAsUpsert(shouldUpsertDoc);
+ return this;
+ }
+
+ @Override
+ protected void doExecute(ActionListener<UpdateResponse> listener) {
+ ((Client) client).update(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/action/update/UpdateResponse.java b/src/main/java/org/elasticsearch/action/update/UpdateResponse.java
new file mode 100644
index 0000000..e5e075e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/action/update/UpdateResponse.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.update;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.get.GetResult;
+
+import java.io.IOException;
+
+/**
+ */
+public class UpdateResponse extends ActionResponse {
+
+ private String index;
+ private String id;
+ private String type;
+ private long version;
+ private boolean created;
+ private GetResult getResult;
+
+ public UpdateResponse() {
+
+ }
+
+ public UpdateResponse(String index, String type, String id, long version, boolean created) {
+ this.index = index;
+ this.id = id;
+ this.type = type;
+ this.version = version;
+ this.created = created;
+ }
+
+ /**
+ * The index the document was indexed into.
+ */
+ public String getIndex() {
+ return this.index;
+ }
+
+ /**
+ * The type of the document indexed.
+ */
+ public String getType() {
+ return this.type;
+ }
+
+ /**
+ * The id of the document indexed.
+ */
+ public String getId() {
+ return this.id;
+ }
+
+ /**
+ * Returns the current version of the doc indexed.
+ */
+ public long getVersion() {
+ return this.version;
+ }
+
+ public void setGetResult(GetResult getResult) {
+ this.getResult = getResult;
+ }
+
+ public GetResult getGetResult() {
+ return this.getResult;
+ }
+
+ /**
+ * Returns true if document was created due to an UPSERT operation
+ */
+ public boolean isCreated() {
+ return this.created;
+
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readSharedString();
+ type = in.readSharedString();
+ id = in.readString();
+ version = in.readLong();
+ created = in.readBoolean();
+ if (in.readBoolean()) {
+ getResult = GetResult.readGetResult(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeSharedString(index);
+ out.writeSharedString(type);
+ out.writeString(id);
+ out.writeLong(version);
+ out.writeBoolean(created);
+ if (getResult == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ getResult.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
new file mode 100644
index 0000000..28aa938
--- /dev/null
+++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bootstrap;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.inject.CreationException;
+import org.elasticsearch.common.inject.spi.Message;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.jna.Natives;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.logging.log4j.LogConfigurator;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.util.Locale;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+/**
+ * A main entry point when starting from the command line.
+ */
+public class Bootstrap {
+
+ private Node node;
+
+ private static volatile Thread keepAliveThread;
+ private static volatile CountDownLatch keepAliveLatch;
+
+ private static Bootstrap bootstrap;
+
+ private void setup(boolean addShutdownHook, Tuple<Settings, Environment> tuple) throws Exception {
+// Loggers.getLogger(Bootstrap.class, tuple.v1().get("name")).info("heap_size {}/{}", JvmStats.jvmStats().mem().heapCommitted(), JvmInfo.jvmInfo().mem().heapMax());
+ if (tuple.v1().getAsBoolean("bootstrap.mlockall", false)) {
+ Natives.tryMlockall();
+ }
+ tuple = setupJmx(tuple);
+
+ NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder().settings(tuple.v1()).loadConfigSettings(false);
+ node = nodeBuilder.build();
+ if (addShutdownHook) {
+ Runtime.getRuntime().addShutdownHook(new Thread() {
+ @Override
+ public void run() {
+ node.close();
+ }
+ });
+ }
+ }
+
+ private static Tuple<Settings, Environment> setupJmx(Tuple<Settings, Environment> tuple) {
+ // We disable JMX on by default, since we don't really want the overhead of RMI (and RMI GC...)
+// if (tuple.v1().get(JmxService.SettingsConstants.CREATE_CONNECTOR) == null) {
+// // automatically create the connector if we are bootstrapping
+// Settings updated = settingsBuilder().put(tuple.v1()).put(JmxService.SettingsConstants.CREATE_CONNECTOR, true).build();
+// tuple = new Tuple<Settings, Environment>(updated, tuple.v2());
+// }
+ return tuple;
+ }
+
+ private static void setupLogging(Tuple<Settings, Environment> tuple) {
+ try {
+ tuple.v1().getClassLoader().loadClass("org.apache.log4j.Logger");
+ LogConfigurator.configure(tuple.v1());
+ } catch (ClassNotFoundException e) {
+ // no log4j
+ } catch (NoClassDefFoundError e) {
+ // no log4j
+ } catch (Exception e) {
+ System.err.println("Failed to configure logging...");
+ e.printStackTrace();
+ }
+ }
+
+ private static Tuple<Settings, Environment> initialSettings() {
+ return InternalSettingsPreparer.prepareSettings(EMPTY_SETTINGS, true);
+ }
+
+ /**
+ * hook for JSVC
+ */
+ public void init(String[] args) throws Exception {
+ Tuple<Settings, Environment> tuple = initialSettings();
+ setupLogging(tuple);
+ setup(true, tuple);
+ }
+
+ /**
+ * hook for JSVC
+ */
+ public void start() {
+ node.start();
+ }
+
+ /**
+ * hook for JSVC
+ */
+ public void stop() {
+ node.stop();
+ }
+
+
+ /**
+ * hook for JSVC
+ */
+ public void destroy() {
+ node.close();
+ }
+
+ public static void close(String[] args) {
+ bootstrap.destroy();
+ keepAliveLatch.countDown();
+ }
+
+ public static void main(String[] args) {
+ System.setProperty("es.logger.prefix", "");
+ bootstrap = new Bootstrap();
+ final String pidFile = System.getProperty("es.pidfile", System.getProperty("es-pidfile"));
+
+ if (pidFile != null) {
+ try {
+ File fPidFile = new File(pidFile);
+ if (fPidFile.getParentFile() != null) {
+ FileSystemUtils.mkdirs(fPidFile.getParentFile());
+ }
+ FileOutputStream outputStream = new FileOutputStream(fPidFile);
+ outputStream.write(Long.toString(JvmInfo.jvmInfo().pid()).getBytes());
+ outputStream.close();
+
+ fPidFile.deleteOnExit();
+ } catch (Exception e) {
+ String errorMessage = buildErrorMessage("pid", e);
+ System.err.println(errorMessage);
+ System.err.flush();
+ System.exit(3);
+ }
+ }
+
+ boolean foreground = System.getProperty("es.foreground", System.getProperty("es-foreground")) != null;
+ // handle the wrapper system property, if its a service, don't run as a service
+ if (System.getProperty("wrapper.service", "XXX").equalsIgnoreCase("true")) {
+ foreground = false;
+ }
+
+ Tuple<Settings, Environment> tuple = null;
+ try {
+ tuple = initialSettings();
+ setupLogging(tuple);
+ } catch (Exception e) {
+ String errorMessage = buildErrorMessage("Setup", e);
+ System.err.println(errorMessage);
+ System.err.flush();
+ System.exit(3);
+ }
+
+ if (System.getProperty("es.max-open-files", "false").equals("true")) {
+ ESLogger logger = Loggers.getLogger(Bootstrap.class);
+ logger.info("max_open_files [{}]", FileSystemUtils.maxOpenFiles(new File(tuple.v2().workFile(), "open_files")));
+ }
+
+ // warn if running using the client VM
+ if (JvmInfo.jvmInfo().vmName().toLowerCase(Locale.ROOT).contains("client")) {
+ ESLogger logger = Loggers.getLogger(Bootstrap.class);
+ logger.warn("jvm uses the client vm, make sure to run `java` with the server vm for best performance by adding `-server` to the command line");
+ }
+
+ String stage = "Initialization";
+ try {
+ if (!foreground) {
+ Loggers.disableConsoleLogging();
+ System.out.close();
+ }
+ bootstrap.setup(true, tuple);
+
+ stage = "Startup";
+ bootstrap.start();
+
+ if (!foreground) {
+ System.err.close();
+ }
+
+ keepAliveLatch = new CountDownLatch(1);
+ // keep this thread alive (non daemon thread) until we shutdown
+ Runtime.getRuntime().addShutdownHook(new Thread() {
+ @Override
+ public void run() {
+ keepAliveLatch.countDown();
+ }
+ });
+
+ keepAliveThread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ keepAliveLatch.await();
+ } catch (InterruptedException e) {
+ // bail out
+ }
+ }
+ }, "elasticsearch[keepAlive/" + Version.CURRENT + "]");
+ keepAliveThread.setDaemon(false);
+ keepAliveThread.start();
+ } catch (Throwable e) {
+ ESLogger logger = Loggers.getLogger(Bootstrap.class);
+ if (bootstrap.node != null) {
+ logger = Loggers.getLogger(Bootstrap.class, bootstrap.node.settings().get("name"));
+ }
+ String errorMessage = buildErrorMessage(stage, e);
+ if (foreground) {
+ System.err.println(errorMessage);
+ System.err.flush();
+ } else {
+ logger.error(errorMessage);
+ }
+ Loggers.disableConsoleLogging();
+ if (logger.isDebugEnabled()) {
+ logger.debug("Exception", e);
+ }
+ System.exit(3);
+ }
+ }
+
+ private static String buildErrorMessage(String stage, Throwable e) {
+ StringBuilder errorMessage = new StringBuilder("{").append(Version.CURRENT).append("}: ");
+ errorMessage.append(stage).append(" Failed ...\n");
+ if (e instanceof CreationException) {
+ CreationException createException = (CreationException) e;
+ Set<String> seenMessages = newHashSet();
+ int counter = 1;
+ for (Message message : createException.getErrorMessages()) {
+ String detailedMessage;
+ if (message.getCause() == null) {
+ detailedMessage = message.getMessage();
+ } else {
+ detailedMessage = ExceptionsHelper.detailedMessage(message.getCause(), true, 0);
+ }
+ if (detailedMessage == null) {
+ detailedMessage = message.getMessage();
+ }
+ if (seenMessages.contains(detailedMessage)) {
+ continue;
+ }
+ seenMessages.add(detailedMessage);
+ errorMessage.append("").append(counter++).append(") ").append(detailedMessage);
+ }
+ } else {
+ errorMessage.append("- ").append(ExceptionsHelper.detailedMessage(e, true, 0));
+ }
+ return errorMessage.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
new file mode 100644
index 0000000..cb35d33
--- /dev/null
+++ b/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bootstrap;
+
+/**
+ * A wrapper around {@link Bootstrap} just so the process will look nicely on things like jps.
+ */
+public class Elasticsearch extends Bootstrap {
+
+ public static void close(String[] args) {
+ Bootstrap.close(args);
+ }
+
+ public static void main(String[] args) {
+ Bootstrap.main(args);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/bootstrap/ElasticsearchF.java b/src/main/java/org/elasticsearch/bootstrap/ElasticsearchF.java
new file mode 100644
index 0000000..a07ec8c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/bootstrap/ElasticsearchF.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bootstrap;
+
+/**
+ * Same as {@link Elasticsearch} just runs it in the foreground by default (does not close
+ * sout and serr).
+ */
+public class ElasticsearchF {
+
+ public static void close(String[] args) {
+ Bootstrap.close(args);
+ }
+
+ public static void main(String[] args) {
+ System.setProperty("es.foreground", "yes");
+ Bootstrap.main(args);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/bulk/udp/BulkUdpModule.java b/src/main/java/org/elasticsearch/bulk/udp/BulkUdpModule.java
new file mode 100644
index 0000000..d195b56
--- /dev/null
+++ b/src/main/java/org/elasticsearch/bulk/udp/BulkUdpModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bulk.udp;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ */
+public class BulkUdpModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(BulkUdpService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/bulk/udp/BulkUdpService.java b/src/main/java/org/elasticsearch/bulk/udp/BulkUdpService.java
new file mode 100644
index 0000000..b299c15
--- /dev/null
+++ b/src/main/java/org/elasticsearch/bulk/udp/BulkUdpService.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bulk.udp;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.bulk.BulkProcessor;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.bytes.ChannelBufferBytesReference;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.PortsRange;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.jboss.netty.bootstrap.ConnectionlessBootstrap;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.*;
+import org.jboss.netty.channel.socket.nio.NioDatagramChannelFactory;
+
+import java.io.IOException;
+import java.net.BindException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
+
+/**
+ */
+public class BulkUdpService extends AbstractLifecycleComponent<BulkUdpService> {
+
+ private final Client client;
+ private final NetworkService networkService;
+
+ private final boolean enabled;
+
+ final String host;
+ final String port;
+
+ final ByteSizeValue receiveBufferSize;
+ final ReceiveBufferSizePredictorFactory receiveBufferSizePredictorFactory;
+ final int bulkActions;
+ final ByteSizeValue bulkSize;
+ final TimeValue flushInterval;
+ final int concurrentRequests;
+
+ private BulkProcessor bulkProcessor;
+ private ConnectionlessBootstrap bootstrap;
+ private Channel channel;
+
+ @Inject
+ public BulkUdpService(Settings settings, Client client, NetworkService networkService) {
+ super(settings);
+ this.client = client;
+ this.networkService = networkService;
+
+ this.host = componentSettings.get("host");
+ this.port = componentSettings.get("port", "9700-9800");
+
+ this.bulkActions = componentSettings.getAsInt("bulk_actions", 1000);
+ this.bulkSize = componentSettings.getAsBytesSize("bulk_size", new ByteSizeValue(5, ByteSizeUnit.MB));
+ this.flushInterval = componentSettings.getAsTime("flush_interval", TimeValue.timeValueSeconds(5));
+ this.concurrentRequests = componentSettings.getAsInt("concurrent_requests", 4);
+
+ this.receiveBufferSize = componentSettings.getAsBytesSize("receive_buffer_size", new ByteSizeValue(10, ByteSizeUnit.MB));
+ this.receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory(componentSettings.getAsBytesSize("receive_predictor_size", receiveBufferSize).bytesAsInt());
+
+ this.enabled = componentSettings.getAsBoolean("enabled", false);
+
+ logger.debug("using enabled [{}], host [{}], port [{}], bulk_actions [{}], bulk_size [{}], flush_interval [{}], concurrent_requests [{}]",
+ enabled, host, port, bulkActions, bulkSize, flushInterval, concurrentRequests);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ if (!enabled) {
+ return;
+ }
+ bulkProcessor = BulkProcessor.builder(client, new BulkListener())
+ .setBulkActions(bulkActions)
+ .setBulkSize(bulkSize)
+ .setFlushInterval(flushInterval)
+ .setConcurrentRequests(concurrentRequests)
+ .build();
+
+
+ bootstrap = new ConnectionlessBootstrap(new NioDatagramChannelFactory(Executors.newCachedThreadPool(daemonThreadFactory(settings, "bulk_udp_worker"))));
+
+ bootstrap.setOption("receiveBufferSize", receiveBufferSize.bytesAsInt());
+ bootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
+
+ // Enable broadcast
+ bootstrap.setOption("broadcast", "false");
+
+ bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(new Handler());
+ }
+ });
+
+
+ InetAddress hostAddressX;
+ try {
+ hostAddressX = networkService.resolveBindHostAddress(host);
+ } catch (IOException e) {
+ logger.warn("failed to resolve host {}", e, host);
+ return;
+ }
+ final InetAddress hostAddress = hostAddressX;
+
+ PortsRange portsRange = new PortsRange(port);
+ final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
+ boolean success = portsRange.iterate(new PortsRange.PortCallback() {
+ @Override
+ public boolean onPortNumber(int portNumber) {
+ try {
+ channel = bootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
+ } catch (Exception e) {
+ lastException.set(e);
+ return false;
+ }
+ return true;
+ }
+ });
+ if (!success) {
+ logger.warn("failed to bind to {}/{}", lastException.get(), hostAddress, port);
+ return;
+ }
+
+ logger.info("address {}", channel.getLocalAddress());
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ if (!enabled) {
+ return;
+ }
+ if (channel != null) {
+ channel.close().awaitUninterruptibly();
+ }
+ if (bootstrap != null) {
+ bootstrap.releaseExternalResources();
+ }
+ bulkProcessor.close();
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ class Handler extends SimpleChannelUpstreamHandler {
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
+ ChannelBuffer buffer = (ChannelBuffer) e.getMessage();
+ logger.trace("received message size [{}]", buffer.readableBytes());
+ try {
+ bulkProcessor.add(new ChannelBufferBytesReference(buffer), false, null, null);
+ } catch (Exception e1) {
+ logger.warn("failed to execute bulk request", e1);
+ }
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
+ if (e.getCause() instanceof BindException) {
+ // ignore, this happens when we retry binding to several ports, its fine if we fail...
+ return;
+ }
+ logger.warn("failure caught", e.getCause());
+ }
+ }
+
+ class BulkListener implements BulkProcessor.Listener {
+
+ @Override
+ public void beforeBulk(long executionId, BulkRequest request) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] executing [{}]/[{}]", executionId, request.numberOfActions(), new ByteSizeValue(request.estimatedSizeInBytes()));
+ }
+ }
+
+ @Override
+ public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] executed [{}]/[{}], took [{}]", executionId, request.numberOfActions(), new ByteSizeValue(request.estimatedSizeInBytes()), response.getTook());
+ }
+ if (response.hasFailures()) {
+ logger.warn("[{}] failed to execute bulk request: {}", executionId, response.buildFailureMessage());
+ }
+ }
+
+ @Override
+ public void afterBulk(long executionId, BulkRequest request, Throwable e) {
+ logger.warn("[{}] failed to execute bulk request", e, executionId);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cache/NodeCache.java b/src/main/java/org/elasticsearch/cache/NodeCache.java
new file mode 100644
index 0000000..e6a6e13
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cache/NodeCache.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache;
+
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class NodeCache extends AbstractComponent implements ClusterStateListener {
+
+ private final ClusterService clusterService;
+
+ private final ByteBufferCache byteBufferCache;
+
+ @Inject
+ public NodeCache(Settings settings, ByteBufferCache byteBufferCache, ClusterService clusterService) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.byteBufferCache = byteBufferCache;
+ clusterService.add(this);
+ }
+
+ public void close() {
+ clusterService.remove(this);
+ byteBufferCache.close();
+ }
+
+ public ByteBufferCache byteBuffer() {
+ return byteBufferCache;
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cache/NodeCacheModule.java b/src/main/java/org/elasticsearch/cache/NodeCacheModule.java
new file mode 100644
index 0000000..36363ab
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cache/NodeCacheModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache;
+
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class NodeCacheModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public NodeCacheModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(NodeCache.class).asEagerSingleton();
+ bind(ByteBufferCache.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cache/memory/ByteBufferCache.java b/src/main/java/org/elasticsearch/cache/memory/ByteBufferCache.java
new file mode 100644
index 0000000..02f0926
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cache/memory/ByteBufferCache.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache.memory;
+
+import org.apache.lucene.store.bytebuffer.ByteBufferAllocator;
+import org.apache.lucene.store.bytebuffer.CachingByteBufferAllocator;
+import org.apache.lucene.store.bytebuffer.PlainByteBufferAllocator;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ *
+ */
+public class ByteBufferCache extends AbstractComponent implements ByteBufferAllocator {
+
+ private final boolean direct;
+
+ private final ByteSizeValue smallBufferSize;
+ private final ByteSizeValue largeBufferSize;
+
+ private final ByteSizeValue smallCacheSize;
+ private final ByteSizeValue largeCacheSize;
+
+ private final ByteBufferAllocator allocator;
+
+ public ByteBufferCache() {
+ this(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ // really, for testing...
+ public ByteBufferCache(int bufferSizeInBytes, int cacheSizeInBytes, boolean direct) {
+ this(ImmutableSettings.settingsBuilder()
+ .put("cache.memory.small_buffer_size", bufferSizeInBytes)
+ .put("cache.memory.small_cache_size", cacheSizeInBytes)
+ .put("cache.memory.large_buffer_size", bufferSizeInBytes)
+ .put("cache.memory.large_cache_size", cacheSizeInBytes)
+ .put("cache.memory.direct", direct).build());
+ }
+
+ @Inject
+ public ByteBufferCache(Settings settings) {
+ super(settings);
+
+ this.direct = componentSettings.getAsBoolean("direct", true);
+ this.smallBufferSize = componentSettings.getAsBytesSize("small_buffer_size", new ByteSizeValue(1, ByteSizeUnit.KB));
+ this.largeBufferSize = componentSettings.getAsBytesSize("large_buffer_size", new ByteSizeValue(1, ByteSizeUnit.MB));
+ this.smallCacheSize = componentSettings.getAsBytesSize("small_cache_size", new ByteSizeValue(10, ByteSizeUnit.MB));
+ this.largeCacheSize = componentSettings.getAsBytesSize("large_cache_size", new ByteSizeValue(500, ByteSizeUnit.MB));
+
+ if (smallCacheSize.bytes() == 0 || largeCacheSize.bytes() == 0) {
+ this.allocator = new PlainByteBufferAllocator(direct, (int) smallBufferSize.bytes(), (int) largeBufferSize.bytes());
+ } else {
+ this.allocator = new CachingByteBufferAllocator(direct, (int) smallBufferSize.bytes(), (int) largeBufferSize.bytes(), (int) smallCacheSize.bytes(), (int) largeCacheSize.bytes());
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("using bytebuffer cache with small_buffer_size [{}], large_buffer_size [{}], small_cache_size [{}], large_cache_size [{}], direct [{}]",
+ smallBufferSize, largeBufferSize, smallCacheSize, largeCacheSize, direct);
+ }
+ }
+
+ public boolean direct() {
+ return this.direct;
+ }
+
+ public void close() {
+ allocator.close();
+ }
+
+ @Override
+ public int sizeInBytes(Type type) {
+ return allocator.sizeInBytes(type);
+ }
+
+ @Override
+ public ByteBuffer allocate(Type type) throws IOException {
+ return allocator.allocate(type);
+ }
+
+ @Override
+ public void release(ByteBuffer buffer) {
+ allocator.release(buffer);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cache/recycler/CacheRecycler.java b/src/main/java/org/elasticsearch/cache/recycler/CacheRecycler.java
new file mode 100644
index 0000000..4eb12a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cache/recycler/CacheRecycler.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache.recycler;
+
+import com.carrotsearch.hppc.*;
+import com.google.common.base.Strings;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+
+import java.util.Locale;
+
+import static org.elasticsearch.common.recycler.Recyclers.*;
+
+@SuppressWarnings("unchecked")
+public class CacheRecycler extends AbstractComponent {
+
+ public final Recycler<ObjectObjectOpenHashMap> hashMap;
+ public final Recycler<ObjectOpenHashSet> hashSet;
+ public final Recycler<DoubleObjectOpenHashMap> doubleObjectMap;
+ public final Recycler<LongObjectOpenHashMap> longObjectMap;
+ public final Recycler<LongLongOpenHashMap> longLongMap;
+ public final Recycler<IntIntOpenHashMap> intIntMap;
+ public final Recycler<FloatIntOpenHashMap> floatIntMap;
+ public final Recycler<DoubleIntOpenHashMap> doubleIntMap;
+ public final Recycler<LongIntOpenHashMap> longIntMap;
+ public final Recycler<ObjectIntOpenHashMap> objectIntMap;
+ public final Recycler<IntObjectOpenHashMap> intObjectMap;
+ public final Recycler<ObjectFloatOpenHashMap> objectFloatMap;
+
+ public void close() {
+ hashMap.close();
+ hashSet.close();
+ doubleObjectMap.close();
+ longObjectMap.close();
+ longLongMap.close();
+ intIntMap.close();
+ floatIntMap.close();
+ doubleIntMap.close();
+ longIntMap.close();
+ objectIntMap.close();
+ intObjectMap.close();
+ objectFloatMap.close();
+ }
+
+ @Inject
+ public CacheRecycler(Settings settings) {
+ super(settings);
+ final Type type = Type.parse(settings.get("type"));
+ int limit = settings.getAsInt("limit", 10);
+ int smartSize = settings.getAsInt("smart_size", 1024);
+ final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
+
+ hashMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectObjectOpenHashMap>() {
+ @Override
+ public ObjectObjectOpenHashMap newInstance(int sizing) {
+ return new ObjectObjectOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(ObjectObjectOpenHashMap value) {
+ value.clear();
+ }
+ });
+ hashSet = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectOpenHashSet>() {
+ @Override
+ public ObjectOpenHashSet newInstance(int sizing) {
+ return new ObjectOpenHashSet(size(sizing), 0.5f);
+ }
+
+ @Override
+ public void clear(ObjectOpenHashSet value) {
+ value.clear();
+ }
+ });
+ doubleObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<DoubleObjectOpenHashMap>() {
+ @Override
+ public DoubleObjectOpenHashMap newInstance(int sizing) {
+ return new DoubleObjectOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(DoubleObjectOpenHashMap value) {
+ value.clear();
+ }
+ });
+ longObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongObjectOpenHashMap>() {
+ @Override
+ public LongObjectOpenHashMap newInstance(int sizing) {
+ return new LongObjectOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(LongObjectOpenHashMap value) {
+ value.clear();
+ }
+ });
+ longLongMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongLongOpenHashMap>() {
+ @Override
+ public LongLongOpenHashMap newInstance(int sizing) {
+ return new LongLongOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(LongLongOpenHashMap value) {
+ value.clear();
+ }
+ });
+ intIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<IntIntOpenHashMap>() {
+ @Override
+ public IntIntOpenHashMap newInstance(int sizing) {
+ return new IntIntOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(IntIntOpenHashMap value) {
+ value.clear();
+ }
+ });
+ floatIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<FloatIntOpenHashMap>() {
+ @Override
+ public FloatIntOpenHashMap newInstance(int sizing) {
+ return new FloatIntOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(FloatIntOpenHashMap value) {
+ value.clear();
+ }
+ });
+ doubleIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<DoubleIntOpenHashMap>() {
+ @Override
+ public DoubleIntOpenHashMap newInstance(int sizing) {
+ return new DoubleIntOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(DoubleIntOpenHashMap value) {
+ value.clear();
+ }
+ });
+ longIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongIntOpenHashMap>() {
+ @Override
+ public LongIntOpenHashMap newInstance(int sizing) {
+ return new LongIntOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(LongIntOpenHashMap value) {
+ value.clear();
+ }
+ });
+ objectIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectIntOpenHashMap>() {
+ @Override
+ public ObjectIntOpenHashMap newInstance(int sizing) {
+ return new ObjectIntOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(ObjectIntOpenHashMap value) {
+ value.clear();
+ }
+ });
+ intObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<IntObjectOpenHashMap>() {
+ @Override
+ public IntObjectOpenHashMap newInstance(int sizing) {
+ return new IntObjectOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(IntObjectOpenHashMap value) {
+ value.clear();
+ }
+ });
+ objectFloatMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectFloatOpenHashMap>() {
+ @Override
+ public ObjectFloatOpenHashMap newInstance(int sizing) {
+ return new ObjectFloatOpenHashMap(size(sizing));
+ }
+
+ @Override
+ public void clear(ObjectFloatOpenHashMap value) {
+ value.clear();
+ }
+ });
+ }
+
+ public <K, V> Recycler.V<ObjectObjectOpenHashMap<K, V>> hashMap(int sizing) {
+ return (Recycler.V) hashMap.obtain(sizing);
+ }
+
+ public <T> Recycler.V<ObjectOpenHashSet<T>> hashSet(int sizing) {
+ return (Recycler.V) hashSet.obtain(sizing);
+ }
+
+ public <T> Recycler.V<DoubleObjectOpenHashMap<T>> doubleObjectMap(int sizing) {
+ return (Recycler.V) doubleObjectMap.obtain(sizing);
+ }
+
+ public <T> Recycler.V<LongObjectOpenHashMap<T>> longObjectMap(int sizing) {
+ return (Recycler.V) longObjectMap.obtain(sizing);
+ }
+
+ public Recycler.V<LongLongOpenHashMap> longLongMap(int sizing) {
+ return longLongMap.obtain(sizing);
+ }
+
+ public Recycler.V<IntIntOpenHashMap> intIntMap(int sizing) {
+ return intIntMap.obtain(sizing);
+ }
+
+ public Recycler.V<FloatIntOpenHashMap> floatIntMap(int sizing) {
+ return floatIntMap.obtain(sizing);
+ }
+
+ public Recycler.V<DoubleIntOpenHashMap> doubleIntMap(int sizing) {
+ return doubleIntMap.obtain(sizing);
+ }
+
+ public Recycler.V<LongIntOpenHashMap> longIntMap(int sizing) {
+ return longIntMap.obtain(sizing);
+ }
+
+ public <T> Recycler.V<ObjectIntOpenHashMap<T>> objectIntMap(int sizing) {
+ return (Recycler.V) objectIntMap.obtain(sizing);
+ }
+
+ public <T> Recycler.V<IntObjectOpenHashMap<T>> intObjectMap(int sizing) {
+ return (Recycler.V) intObjectMap.obtain(sizing);
+ }
+
+ public <T> Recycler.V<ObjectFloatOpenHashMap<T>> objectFloatMap(int sizing) {
+ return (Recycler.V) objectFloatMap.obtain(sizing);
+ }
+
+ static int size(int sizing) {
+ return sizing > 0 ? sizing : 256;
+ }
+
+ private <T> Recycler<T> build(Type type, int limit, int smartSize, int availableProcessors, Recycler.C<T> c) {
+ Recycler<T> recycler;
+ try {
+ recycler = type.build(c, limit, availableProcessors);
+ if (smartSize > 0) {
+ recycler = sizing(recycler, none(c), smartSize);
+ }
+ } catch (IllegalArgumentException ex) {
+ throw new ElasticsearchIllegalArgumentException("no type support [" + type + "] for recycler");
+ }
+
+ return recycler;
+ }
+
+ public static enum Type {
+ SOFT_THREAD_LOCAL {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
+ return threadLocal(softFactory(dequeFactory(c, limit)));
+ }
+ },
+ THREAD_LOCAL {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
+ return threadLocal(dequeFactory(c, limit));
+ }
+ },
+ QUEUE {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
+ return concurrentDeque(c, limit);
+ }
+ },
+ SOFT_CONCURRENT {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
+ return concurrent(softFactory(dequeFactory(c, limit)), availableProcessors);
+ }
+ },
+ CONCURRENT {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
+ return concurrent(dequeFactory(c, limit), availableProcessors);
+ }
+ },
+ NONE {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
+ return none(c);
+ }
+ };
+
+ public static Type parse(String type) {
+ if (Strings.isNullOrEmpty(type)) {
+ return SOFT_CONCURRENT;
+ }
+ try {
+ return Type.valueOf(type.toUpperCase(Locale.ROOT));
+ } catch (IllegalArgumentException e) {
+ throw new ElasticsearchIllegalArgumentException("no type support [" + type + "]");
+ }
+ }
+
+ abstract <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cache/recycler/CacheRecyclerModule.java b/src/main/java/org/elasticsearch/cache/recycler/CacheRecyclerModule.java
new file mode 100644
index 0000000..2c3049f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cache/recycler/CacheRecyclerModule.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache.recycler;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ */
+public class CacheRecyclerModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public CacheRecyclerModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(CacheRecycler.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cache/recycler/DefaultPageCacheRecyclerModule.java b/src/main/java/org/elasticsearch/cache/recycler/DefaultPageCacheRecyclerModule.java
new file mode 100644
index 0000000..1eafcc7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cache/recycler/DefaultPageCacheRecyclerModule.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache.recycler;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ */
+public class DefaultPageCacheRecyclerModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public DefaultPageCacheRecyclerModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(PageCacheRecycler.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java
new file mode 100644
index 0000000..12cef79
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache.recycler;
+
+import com.google.common.base.Strings;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Arrays;
+import java.util.Locale;
+
+import static org.elasticsearch.common.recycler.Recyclers.*;
+
+/** A recycler of fixed-size pages. */
+public class PageCacheRecycler extends AbstractComponent {
+
+ public static final String TYPE = "page.type";
+ public static final String LIMIT_HEAP = "page.limit.heap";
+ public static final String LIMIT_PER_THREAD = "page.limit.per_thread";
+ public static final String WEIGHT = "page.weight";
+
+ private final Recycler<byte[]> bytePage;
+ private final Recycler<int[]> intPage;
+ private final Recycler<long[]> longPage;
+ private final Recycler<double[]> doublePage;
+ private final Recycler<Object[]> objectPage;
+
+ public void close() {
+ bytePage.close();
+ intPage.close();
+ longPage.close();
+ doublePage.close();
+ objectPage.close();
+ }
+
+ private static int maximumSearchThreadPoolSize(ThreadPool threadPool, Settings settings) {
+ ThreadPool.Info searchThreadPool = threadPool.info(ThreadPool.Names.SEARCH);
+ assert searchThreadPool != null;
+ final int maxSize = searchThreadPool.getMax();
+ if (maxSize <= 0) {
+ // happens with cached thread pools, let's assume there are at most 3x ${number of processors} threads
+ return 3 * EsExecutors.boundedNumberOfProcessors(settings);
+ } else {
+ return maxSize;
+ }
+ }
+
+ // return the maximum number of pages that may be cached depending on
+ // - limit: the total amount of memory available
+ // - pageSize: the size of a single page
+ // - weight: the weight for this data type
+ // - totalWeight: the sum of all weights
+ private static int maxCount(long limit, long pageSize, double weight, double totalWeight) {
+ return (int) (weight / totalWeight * limit / pageSize);
+ }
+
+ @Inject
+ public PageCacheRecycler(Settings settings, ThreadPool threadPool) {
+ super(settings);
+ final Type type = Type.parse(componentSettings.get(TYPE));
+ final long limit = componentSettings.getAsMemory(LIMIT_HEAP, "10%").bytes();
+ final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
+ final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings);
+
+ // We have a global amount of memory that we need to divide across data types.
+ // Since some types are more useful than other ones we give them different weights.
+ // Trying to store all of them in a single stack would be problematic because eg.
+ // a work load could fill the recycler with only byte[] pages and then another
+ // workload that would work with double[] pages couldn't recycle them because there
+ // is no space left in the stack/queue. LRU/LFU policies are not an option either
+ // because they would make obtain/release too costly: we really need constant-time
+ // operations.
+ // Ultimately a better solution would be to only store one kind of data and have the
+ // ability to intepret it either as a source of bytes, doubles, longs, etc. eg. thanks
+ // to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
+ // that would need to be addressed such as garbage collection of native memory or safety
+ // of Unsafe writes.
+ final double bytesWeight = componentSettings.getAsDouble(WEIGHT + ".bytes", 1d);
+ final double intsWeight = componentSettings.getAsDouble(WEIGHT + ".ints", 1d);
+ final double longsWeight = componentSettings.getAsDouble(WEIGHT + ".longs", 1d);
+ final double doublesWeight = componentSettings.getAsDouble(WEIGHT + ".doubles", 1d);
+ // object pages are less useful to us so we give them a lower weight by default
+ final double objectsWeight = componentSettings.getAsDouble(WEIGHT + ".objects", 0.1d);
+
+ final double totalWeight = bytesWeight + intsWeight + longsWeight + doublesWeight + objectsWeight;
+
+ bytePage = build(type, maxCount(limit, BigArrays.BYTE_PAGE_SIZE, bytesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<byte[]>() {
+ @Override
+ public byte[] newInstance(int sizing) {
+ return new byte[BigArrays.BYTE_PAGE_SIZE];
+ }
+ @Override
+ public void clear(byte[] value) {}
+ });
+ intPage = build(type, maxCount(limit, BigArrays.INT_PAGE_SIZE, intsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<int[]>() {
+ @Override
+ public int[] newInstance(int sizing) {
+ return new int[BigArrays.INT_PAGE_SIZE];
+ }
+ @Override
+ public void clear(int[] value) {}
+ });
+ longPage = build(type, maxCount(limit, BigArrays.LONG_PAGE_SIZE, longsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<long[]>() {
+ @Override
+ public long[] newInstance(int sizing) {
+ return new long[BigArrays.LONG_PAGE_SIZE];
+ }
+ @Override
+ public void clear(long[] value) {}
+ });
+ doublePage = build(type, maxCount(limit, BigArrays.DOUBLE_PAGE_SIZE, doublesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<double[]>() {
+ @Override
+ public double[] newInstance(int sizing) {
+ return new double[BigArrays.DOUBLE_PAGE_SIZE];
+ }
+ @Override
+ public void clear(double[] value) {}
+ });
+ objectPage = build(type, maxCount(limit, BigArrays.OBJECT_PAGE_SIZE, objectsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<Object[]>() {
+ @Override
+ public Object[] newInstance(int sizing) {
+ return new Object[BigArrays.OBJECT_PAGE_SIZE];
+ }
+ @Override
+ public void clear(Object[] value) {
+ Arrays.fill(value, null); // we need to remove the strong refs on the objects stored in the array
+ }
+ });
+ }
+
+ public Recycler.V<byte[]> bytePage(boolean clear) {
+ final Recycler.V<byte[]> v = bytePage.obtain();
+ if (v.isRecycled() && clear) {
+ Arrays.fill(v.v(), (byte) 0);
+ }
+ return v;
+ }
+
+ public Recycler.V<int[]> intPage(boolean clear) {
+ final Recycler.V<int[]> v = intPage.obtain();
+ if (v.isRecycled() && clear) {
+ Arrays.fill(v.v(), 0);
+ }
+ return v;
+ }
+
+ public Recycler.V<long[]> longPage(boolean clear) {
+ final Recycler.V<long[]> v = longPage.obtain();
+ if (v.isRecycled() && clear) {
+ Arrays.fill(v.v(), 0L);
+ }
+ return v;
+ }
+
+ public Recycler.V<double[]> doublePage(boolean clear) {
+ final Recycler.V<double[]> v = doublePage.obtain();
+ if (v.isRecycled() && clear) {
+ Arrays.fill(v.v(), 0d);
+ }
+ return v;
+ }
+
+ public Recycler.V<Object[]> objectPage() {
+ // object pages are cleared on release anyway
+ return objectPage.obtain();
+ }
+
+ private static <T> Recycler<T> build(Type type, int limit, int estimatedThreadPoolSize, int availableProcessors, Recycler.C<T> c) {
+ final Recycler<T> recycler;
+ if (limit == 0) {
+ recycler = none(c);
+ } else {
+ recycler = type.build(c, limit, estimatedThreadPoolSize, availableProcessors);
+ }
+ return recycler;
+ }
+
+ public static enum Type {
+ SOFT_THREAD_LOCAL {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
+ return threadLocal(softFactory(dequeFactory(c, limit / estimatedThreadPoolSize)));
+ }
+ },
+ THREAD_LOCAL {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
+ return threadLocal(dequeFactory(c, limit / estimatedThreadPoolSize));
+ }
+ },
+ QUEUE {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
+ return concurrentDeque(c, limit);
+ }
+ },
+ SOFT_CONCURRENT {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
+ return concurrent(softFactory(dequeFactory(c, limit / availableProcessors)), availableProcessors);
+ }
+ },
+ CONCURRENT {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
+ return concurrent(dequeFactory(c, limit / availableProcessors), availableProcessors);
+ }
+ },
+ NONE {
+ @Override
+ <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
+ return none(c);
+ }
+ };
+
+ public static Type parse(String type) {
+ if (Strings.isNullOrEmpty(type)) {
+ return SOFT_CONCURRENT;
+ }
+ try {
+ return Type.valueOf(type.toUpperCase(Locale.ROOT));
+ } catch (IllegalArgumentException e) {
+ throw new ElasticsearchIllegalArgumentException("no type support [" + type + "]");
+ }
+ }
+
+ abstract <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecyclerModule.java b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecyclerModule.java
new file mode 100644
index 0000000..4e42c48
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecyclerModule.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache.recycler;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+
+import static org.elasticsearch.common.inject.Modules.createModule;
+
+/**
+ */
+public class PageCacheRecyclerModule extends AbstractModule implements SpawnModules {
+
+ public static final String CACHE_IMPL = "cache.recycler.page_cache_impl";
+
+ private final Settings settings;
+
+ public PageCacheRecyclerModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(createModule(settings.getAsClass(CACHE_IMPL, DefaultPageCacheRecyclerModule.class), settings));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/AdminClient.java b/src/main/java/org/elasticsearch/client/AdminClient.java
new file mode 100644
index 0000000..1fc3261
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/AdminClient.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+/**
+ * Administrative actions/operations against the cluster or the indices.
+ *
+ *
+ * @see org.elasticsearch.client.Client#admin()
+ */
+public interface AdminClient {
+
+ /**
+ * A client allowing to perform actions/operations against the cluster.
+ */
+ ClusterAdminClient cluster();
+
+ /**
+ * A client allowing to perform actions/operations against the indices.
+ */
+ IndicesAdminClient indices();
+}
diff --git a/src/main/java/org/elasticsearch/client/Client.java b/src/main/java/org/elasticsearch/client/Client.java
new file mode 100644
index 0000000..b108db8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/Client.java
@@ -0,0 +1,558 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountRequest;
+import org.elasticsearch.action.count.CountRequestBuilder;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteRequestBuilder;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.explain.ExplainRequest;
+import org.elasticsearch.action.explain.ExplainRequestBuilder;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.get.*;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.mlt.MoreLikeThisRequest;
+import org.elasticsearch.action.mlt.MoreLikeThisRequestBuilder;
+import org.elasticsearch.action.percolate.*;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.action.suggest.SuggestRequest;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.action.termvector.*;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.common.Nullable;
+
+/**
+ * A client provides a one stop interface for performing actions/operations against the cluster.
+ * <p/>
+ * <p>All operations performed are asynchronous by nature. Each action/operation has two flavors, the first
+ * simply returns an {@link org.elasticsearch.action.ActionFuture}, while the second accepts an
+ * {@link org.elasticsearch.action.ActionListener}.
+ * <p/>
+ * <p>A client can either be retrieved from a {@link org.elasticsearch.node.Node} started, or connected remotely
+ * to one or more nodes using {@link org.elasticsearch.client.transport.TransportClient}.
+ *
+ * @see org.elasticsearch.node.Node#client()
+ * @see org.elasticsearch.client.transport.TransportClient
+ */
+public interface Client {
+
+ /**
+ * Closes the client.
+ */
+ void close();
+
+ /**
+ * The admin client that can be used to perform administrative operations.
+ */
+ AdminClient admin();
+
+ /**
+ * Executes a generic action, denoted by an {@link Action}.
+ *
+ * @param action The action type to execute.
+ * @param request The action request.
+ * @param <Request> The request type.
+ * @param <Response> the response type.
+ * @param <RequestBuilder> The request builder type.
+ * @return A future allowing to get back the response.
+ */
+ <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final Action<Request, Response, RequestBuilder> action, final Request request);
+
+ /**
+ * Executes a generic action, denoted by an {@link Action}.
+ *
+ * @param action The action type to execute.
+ * @param request The action request.
+ * @param listener The listener to receive the response back.
+ * @param <Request> The request type.
+ * @param <Response> The response type.
+ * @param <RequestBuilder> The request builder type.
+ */
+ <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
+
+ /**
+ * Prepares a request builder to execute, specified by {@link Action}.
+ *
+ * @param action The action type to execute.
+ * @param <Request> The request type.
+ * @param <Response> The response type.
+ * @param <RequestBuilder> The request builder.
+ * @return The request builder, that can, at a later stage, execute the request.
+ */
+ <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final Action<Request, Response, RequestBuilder> action);
+
+
+ /**
+ * Index a JSON source associated with a given index and type.
+ * <p/>
+ * <p>The id is optional, if it is not provided, one will be generated automatically.
+ *
+ * @param request The index request
+ * @return The result future
+ * @see Requests#indexRequest(String)
+ */
+ ActionFuture<IndexResponse> index(IndexRequest request);
+
+ /**
+ * Index a document associated with a given index and type.
+ * <p/>
+ * <p>The id is optional, if it is not provided, one will be generated automatically.
+ *
+ * @param request The index request
+ * @param listener A listener to be notified with a result
+ * @see Requests#indexRequest(String)
+ */
+ void index(IndexRequest request, ActionListener<IndexResponse> listener);
+
+ /**
+ * Index a document associated with a given index and type.
+ * <p/>
+ * <p>The id is optional, if it is not provided, one will be generated automatically.
+ */
+ IndexRequestBuilder prepareIndex();
+
+ /**
+ * Updates a document based on a script.
+ *
+ * @param request The update request
+ * @return The result future
+ */
+ ActionFuture<UpdateResponse> update(UpdateRequest request);
+
+ /**
+ * Updates a document based on a script.
+ *
+ * @param request The update request
+ * @param listener A listener to be notified with a result
+ */
+ void update(UpdateRequest request, ActionListener<UpdateResponse> listener);
+
+ /**
+ * Updates a document based on a script.
+ */
+ UpdateRequestBuilder prepareUpdate();
+
+ /**
+ * Updates a document based on a script.
+ */
+ UpdateRequestBuilder prepareUpdate(String index, String type, String id);
+
+ /**
+ * Index a document associated with a given index and type.
+ * <p/>
+ * <p>The id is optional, if it is not provided, one will be generated automatically.
+ *
+ * @param index The index to index the document to
+ * @param type The type to index the document to
+ */
+ IndexRequestBuilder prepareIndex(String index, String type);
+
+ /**
+ * Index a document associated with a given index and type.
+ * <p/>
+ * <p>The id is optional, if it is not provided, one will be generated automatically.
+ *
+ * @param index The index to index the document to
+ * @param type The type to index the document to
+ * @param id The id of the document
+ */
+ IndexRequestBuilder prepareIndex(String index, String type, @Nullable String id);
+
+ /**
+ * Deletes a document from the index based on the index, type and id.
+ *
+ * @param request The delete request
+ * @return The result future
+ * @see Requests#deleteRequest(String)
+ */
+ ActionFuture<DeleteResponse> delete(DeleteRequest request);
+
+ /**
+ * Deletes a document from the index based on the index, type and id.
+ *
+ * @param request The delete request
+ * @param listener A listener to be notified with a result
+ * @see Requests#deleteRequest(String)
+ */
+ void delete(DeleteRequest request, ActionListener<DeleteResponse> listener);
+
+ /**
+ * Deletes a document from the index based on the index, type and id.
+ */
+ DeleteRequestBuilder prepareDelete();
+
+ /**
+ * Deletes a document from the index based on the index, type and id.
+ *
+ * @param index The index to delete the document from
+ * @param type The type of the document to delete
+ * @param id The id of the document to delete
+ */
+ DeleteRequestBuilder prepareDelete(String index, String type, String id);
+
+ /**
+ * Executes a bulk of index / delete operations.
+ *
+ * @param request The bulk request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#bulkRequest()
+ */
+ ActionFuture<BulkResponse> bulk(BulkRequest request);
+
+ /**
+ * Executes a bulk of index / delete operations.
+ *
+ * @param request The bulk request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#bulkRequest()
+ */
+ void bulk(BulkRequest request, ActionListener<BulkResponse> listener);
+
+ /**
+ * Executes a bulk of index / delete operations.
+ */
+ BulkRequestBuilder prepareBulk();
+
+ /**
+ * Deletes all documents from one or more indices based on a query.
+ *
+ * @param request The delete by query request
+ * @return The result future
+ * @see Requests#deleteByQueryRequest(String...)
+ */
+ ActionFuture<DeleteByQueryResponse> deleteByQuery(DeleteByQueryRequest request);
+
+ /**
+ * Deletes all documents from one or more indices based on a query.
+ *
+ * @param request The delete by query request
+ * @param listener A listener to be notified with a result
+ * @see Requests#deleteByQueryRequest(String...)
+ */
+ void deleteByQuery(DeleteByQueryRequest request, ActionListener<DeleteByQueryResponse> listener);
+
+ /**
+ * Deletes all documents from one or more indices based on a query.
+ */
+ DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices);
+
+ /**
+ * Gets the document that was indexed from an index with a type and id.
+ *
+ * @param request The get request
+ * @return The result future
+ * @see Requests#getRequest(String)
+ */
+ ActionFuture<GetResponse> get(GetRequest request);
+
+ /**
+ * Gets the document that was indexed from an index with a type and id.
+ *
+ * @param request The get request
+ * @param listener A listener to be notified with a result
+ * @see Requests#getRequest(String)
+ */
+ void get(GetRequest request, ActionListener<GetResponse> listener);
+
+ /**
+ * Gets the document that was indexed from an index with a type and id.
+ */
+ GetRequestBuilder prepareGet();
+
+ /**
+ * Gets the document that was indexed from an index with a type (optional) and id.
+ */
+ GetRequestBuilder prepareGet(String index, @Nullable String type, String id);
+
+ /**
+ * Multi get documents.
+ */
+ ActionFuture<MultiGetResponse> multiGet(MultiGetRequest request);
+
+ /**
+ * Multi get documents.
+ */
+ void multiGet(MultiGetRequest request, ActionListener<MultiGetResponse> listener);
+
+ /**
+ * Multi get documents.
+ */
+ MultiGetRequestBuilder prepareMultiGet();
+
+ /**
+ * A count of all the documents matching a specific query.
+ *
+ * @param request The count request
+ * @return The result future
+ * @see Requests#countRequest(String...)
+ */
+ ActionFuture<CountResponse> count(CountRequest request);
+
+ /**
+ * A count of all the documents matching a specific query.
+ *
+ * @param request The count request
+ * @param listener A listener to be notified of the result
+ * @see Requests#countRequest(String...)
+ */
+ void count(CountRequest request, ActionListener<CountResponse> listener);
+
+ /**
+ * A count of all the documents matching a specific query.
+ */
+ CountRequestBuilder prepareCount(String... indices);
+
+ /**
+ * Suggestion matching a specific phrase.
+ *
+ * @param request The suggest request
+ * @return The result future
+ * @see Requests#suggestRequest(String...)
+ */
+ ActionFuture<SuggestResponse> suggest(SuggestRequest request);
+
+ /**
+ * Suggestions matching a specific phrase.
+ *
+ * @param request The suggest request
+ * @param listener A listener to be notified of the result
+ * @see Requests#suggestRequest(String...)
+ */
+ void suggest(SuggestRequest request, ActionListener<SuggestResponse> listener);
+
+ /**
+ * Suggestions matching a specific phrase.
+ */
+ SuggestRequestBuilder prepareSuggest(String... indices);
+
+ /**
+ * Search across one or more indices and one or more types with a query.
+ *
+ * @param request The search request
+ * @return The result future
+ * @see Requests#searchRequest(String...)
+ */
+ ActionFuture<SearchResponse> search(SearchRequest request);
+
+ /**
+ * Search across one or more indices and one or more types with a query.
+ *
+ * @param request The search request
+ * @param listener A listener to be notified of the result
+ * @see Requests#searchRequest(String...)
+ */
+ void search(SearchRequest request, ActionListener<SearchResponse> listener);
+
+ /**
+ * Search across one or more indices and one or more types with a query.
+ */
+ SearchRequestBuilder prepareSearch(String... indices);
+
+ /**
+ * A search scroll request to continue searching a previous scrollable search request.
+ *
+ * @param request The search scroll request
+ * @return The result future
+ * @see Requests#searchScrollRequest(String)
+ */
+ ActionFuture<SearchResponse> searchScroll(SearchScrollRequest request);
+
+ /**
+ * A search scroll request to continue searching a previous scrollable search request.
+ *
+ * @param request The search scroll request
+ * @param listener A listener to be notified of the result
+ * @see Requests#searchScrollRequest(String)
+ */
+ void searchScroll(SearchScrollRequest request, ActionListener<SearchResponse> listener);
+
+ /**
+ * A search scroll request to continue searching a previous scrollable search request.
+ */
+ SearchScrollRequestBuilder prepareSearchScroll(String scrollId);
+
+ /**
+ * Performs multiple search requests.
+ */
+ ActionFuture<MultiSearchResponse> multiSearch(MultiSearchRequest request);
+
+ /**
+ * Performs multiple search requests.
+ */
+ void multiSearch(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener);
+
+ /**
+ * Performs multiple search requests.
+ */
+ MultiSearchRequestBuilder prepareMultiSearch();
+
+ /**
+ * A more like this action to search for documents that are "like" a specific document.
+ *
+ * @param request The more like this request
+ * @return The response future
+ */
+ ActionFuture<SearchResponse> moreLikeThis(MoreLikeThisRequest request);
+
+ /**
+ * A more like this action to search for documents that are "like" a specific document.
+ *
+ * @param request The more like this request
+ * @param listener A listener to be notified of the result
+ */
+ void moreLikeThis(MoreLikeThisRequest request, ActionListener<SearchResponse> listener);
+
+ /**
+ * A more like this action to search for documents that are "like" a specific document.
+ *
+ * @param index The index to load the document from
+ * @param type The type of the document
+ * @param id The id of the document
+ */
+ MoreLikeThisRequestBuilder prepareMoreLikeThis(String index, String type, String id);
+
+
+ /**
+ * An action that returns the term vectors for a specific document.
+ *
+ * @param request The term vector request
+ * @return The response future
+ */
+ ActionFuture<TermVectorResponse> termVector(TermVectorRequest request);
+
+ /**
+ * An action that returns the term vectors for a specific document.
+ *
+ * @param request The term vector request
+ * @return The response future
+ */
+ void termVector(TermVectorRequest request, ActionListener<TermVectorResponse> listener);
+
+
+ /**
+ * Builder for the term vector request.
+ *
+ * @param index The index to load the document from
+ * @param type The type of the document
+ * @param id The id of the document
+ */
+ TermVectorRequestBuilder prepareTermVector(String index, String type, String id);
+
+
+ /**
+ * Multi get term vectors.
+ */
+ ActionFuture<MultiTermVectorsResponse> multiTermVectors(MultiTermVectorsRequest request);
+
+ /**
+ * Multi get term vectors.
+ */
+ void multiTermVectors(MultiTermVectorsRequest request, ActionListener<MultiTermVectorsResponse> listener);
+
+ /**
+ * Multi get term vectors.
+ */
+ MultiTermVectorsRequestBuilder prepareMultiTermVectors();
+
+
+ /**
+ * Percolates a request returning the matches documents.
+ */
+ ActionFuture<PercolateResponse> percolate(PercolateRequest request);
+
+ /**
+ * Percolates a request returning the matches documents.
+ */
+ void percolate(PercolateRequest request, ActionListener<PercolateResponse> listener);
+
+ /**
+ * Percolates a request returning the matches documents.
+ */
+ PercolateRequestBuilder preparePercolate();
+
+ /**
+ * Performs multiple percolate requests.
+ */
+ ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request);
+
+ /**
+ * Performs multiple percolate requests.
+ */
+ void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener);
+
+ /**
+ * Performs multiple percolate requests.
+ */
+ MultiPercolateRequestBuilder prepareMultiPercolate();
+
+ /**
+ * Computes a score explanation for the specified request.
+ *
+ * @param index The index this explain is targeted for
+ * @param type The type this explain is targeted for
+ * @param id The document identifier this explain is targeted for
+ */
+ ExplainRequestBuilder prepareExplain(String index, String type, String id);
+
+ /**
+ * Computes a score explanation for the specified request.
+ *
+ * @param request The request encapsulating the query and document identifier to compute a score explanation for
+ */
+ ActionFuture<ExplainResponse> explain(ExplainRequest request);
+
+ /**
+ * Computes a score explanation for the specified request.
+ *
+ * @param request The request encapsulating the query and document identifier to compute a score explanation for
+ * @param listener A listener to be notified of the result
+ */
+ void explain(ExplainRequest request, ActionListener<ExplainResponse> listener);
+
+ /**
+ * Clears the search contexts associated with specified scroll ids.
+ */
+ ClearScrollRequestBuilder prepareClearScroll();
+
+ /**
+ * Clears the search contexts associated with specified scroll ids.
+ */
+ ActionFuture<ClearScrollResponse> clearScroll(ClearScrollRequest request);
+
+ /**
+ * Clears the search contexts associated with specified scroll ids.
+ */
+ void clearScroll(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener);
+
+}
diff --git a/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java
new file mode 100644
index 0000000..cefe335
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java
@@ -0,0 +1,432 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartRequest;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartResponse;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequestBuilder;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+
+/**
+ * Administrative actions/operations against indices.
+ *
+ * @see AdminClient#cluster()
+ */
+public interface ClusterAdminClient {
+
+ <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final ClusterAction<Request, Response, RequestBuilder> action, final Request request);
+
+ <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final ClusterAction<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
+
+ <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final ClusterAction<Request, Response, RequestBuilder> action);
+
+ /**
+ * The health of the cluster.
+ *
+ * @param request The cluster state request
+ * @return The result future
+ * @see Requests#clusterHealthRequest(String...)
+ */
+ ActionFuture<ClusterHealthResponse> health(ClusterHealthRequest request);
+
+ /**
+ * The health of the cluster.
+ *
+ * @param request The cluster state request
+ * @param listener A listener to be notified with a result
+ * @see Requests#clusterHealthRequest(String...)
+ */
+ void health(ClusterHealthRequest request, ActionListener<ClusterHealthResponse> listener);
+
+ /**
+ * The health of the cluster.
+ */
+ ClusterHealthRequestBuilder prepareHealth(String... indices);
+
+ /**
+ * The state of the cluster.
+ *
+ * @param request The cluster state request.
+ * @return The result future
+ * @see Requests#clusterStateRequest()
+ */
+ ActionFuture<ClusterStateResponse> state(ClusterStateRequest request);
+
+ /**
+ * The state of the cluster.
+ *
+ * @param request The cluster state request.
+ * @param listener A listener to be notified with a result
+ * @see Requests#clusterStateRequest()
+ */
+ void state(ClusterStateRequest request, ActionListener<ClusterStateResponse> listener);
+
+ /**
+ * The state of the cluster.
+ */
+ ClusterStateRequestBuilder prepareState();
+
+ /**
+ * Updates settings in the cluster.
+ */
+ ActionFuture<ClusterUpdateSettingsResponse> updateSettings(ClusterUpdateSettingsRequest request);
+
+ /**
+ * Update settings in the cluster.
+ */
+ void updateSettings(ClusterUpdateSettingsRequest request, ActionListener<ClusterUpdateSettingsResponse> listener);
+
+ /**
+ * Update settings in the cluster.
+ */
+ ClusterUpdateSettingsRequestBuilder prepareUpdateSettings();
+
+ /**
+ * Reroutes allocation of shards. Advance API.
+ */
+ ActionFuture<ClusterRerouteResponse> reroute(ClusterRerouteRequest request);
+
+ /**
+ * Reroutes allocation of shards. Advance API.
+ */
+ void reroute(ClusterRerouteRequest request, ActionListener<ClusterRerouteResponse> listener);
+
+ /**
+ * Update settings in the cluster.
+ */
+ ClusterRerouteRequestBuilder prepareReroute();
+
+ /**
+ * Nodes info of the cluster.
+ *
+ * @param request The nodes info request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#nodesInfoRequest(String...)
+ */
+ ActionFuture<NodesInfoResponse> nodesInfo(NodesInfoRequest request);
+
+ /**
+ * Nodes info of the cluster.
+ *
+ * @param request The nodes info request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#nodesInfoRequest(String...)
+ */
+ void nodesInfo(NodesInfoRequest request, ActionListener<NodesInfoResponse> listener);
+
+ /**
+ * Nodes info of the cluster.
+ */
+ NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds);
+
+ /**
+ * Cluster wide aggregated stats.
+ *
+ * @param request The cluster stats request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#clusterStatsRequest
+ */
+ ActionFuture<ClusterStatsResponse> clusterStats(ClusterStatsRequest request);
+
+ /**
+ * Cluster wide aggregated stats
+ *
+ * @param request The cluster stats request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#clusterStatsRequest()
+ */
+ void clusterStats(ClusterStatsRequest request, ActionListener<ClusterStatsResponse> listener);
+
+ ClusterStatsRequestBuilder prepareClusterStats();
+
+ /**
+ * Nodes stats of the cluster.
+ *
+ * @param request The nodes stats request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#nodesStatsRequest(String...)
+ */
+ ActionFuture<NodesStatsResponse> nodesStats(NodesStatsRequest request);
+
+ /**
+ * Nodes stats of the cluster.
+ *
+ * @param request The nodes info request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#nodesStatsRequest(String...)
+ */
+ void nodesStats(NodesStatsRequest request, ActionListener<NodesStatsResponse> listener);
+
+ /**
+ * Nodes stats of the cluster.
+ */
+ NodesStatsRequestBuilder prepareNodesStats(String... nodesIds);
+
+ ActionFuture<NodesHotThreadsResponse> nodesHotThreads(NodesHotThreadsRequest request);
+
+ void nodesHotThreads(NodesHotThreadsRequest request, ActionListener<NodesHotThreadsResponse> listener);
+
+ NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds);
+
+ /**
+ * Shutdown nodes in the cluster.
+ *
+ * @param request The nodes shutdown request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#nodesShutdownRequest(String...)
+ */
+ ActionFuture<NodesShutdownResponse> nodesShutdown(NodesShutdownRequest request);
+
+ /**
+ * Shutdown nodes in the cluster.
+ *
+ * @param request The nodes shutdown request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#nodesShutdownRequest(String...)
+ */
+ void nodesShutdown(NodesShutdownRequest request, ActionListener<NodesShutdownResponse> listener);
+
+ /**
+ * Shutdown nodes in the cluster.
+ */
+ NodesShutdownRequestBuilder prepareNodesShutdown(String... nodesIds);
+
+ /**
+ * Restarts nodes in the cluster.
+ *
+ * @param request The nodes restart request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#nodesRestartRequest(String...)
+ */
+ ActionFuture<NodesRestartResponse> nodesRestart(NodesRestartRequest request);
+
+ /**
+ * Restarts nodes in the cluster.
+ *
+ * @param request The nodes restart request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#nodesRestartRequest(String...)
+ */
+ void nodesRestart(NodesRestartRequest request, ActionListener<NodesRestartResponse> listener);
+
+ /**
+ * Restarts nodes in the cluster.
+ */
+ NodesRestartRequestBuilder prepareNodesRestart(String... nodesIds);
+
+ /**
+ * Returns list of shards the given search would be executed on.
+ */
+ ActionFuture<ClusterSearchShardsResponse> searchShards(ClusterSearchShardsRequest request);
+
+ /**
+ * Returns list of shards the given search would be executed on.
+ */
+ void searchShards(ClusterSearchShardsRequest request, ActionListener<ClusterSearchShardsResponse> listener);
+
+ /**
+ * Returns list of shards the given search would be executed on.
+ */
+ ClusterSearchShardsRequestBuilder prepareSearchShards();
+
+ /**
+ * Returns list of shards the given search would be executed on.
+ */
+ ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices);
+
+ /**
+ * Registers a snapshot repository.
+ */
+ ActionFuture<PutRepositoryResponse> putRepository(PutRepositoryRequest request);
+
+ /**
+ * Registers a snapshot repository.
+ */
+ void putRepository(PutRepositoryRequest request, ActionListener<PutRepositoryResponse> listener);
+
+ /**
+ * Registers a snapshot repository.
+ */
+ PutRepositoryRequestBuilder preparePutRepository(String name);
+
+ /**
+ * Unregisters a repository.
+ */
+ ActionFuture<DeleteRepositoryResponse> deleteRepository(DeleteRepositoryRequest request);
+
+ /**
+ * Unregisters a repository.
+ */
+ void deleteRepository(DeleteRepositoryRequest request, ActionListener<DeleteRepositoryResponse> listener);
+
+ /**
+ * Unregisters a repository.
+ */
+ DeleteRepositoryRequestBuilder prepareDeleteRepository(String name);
+
+ /**
+ * Gets repositories.
+ */
+ ActionFuture<GetRepositoriesResponse> getRepositories(GetRepositoriesRequest request);
+
+ /**
+ * Gets repositories.
+ */
+ void getRepositories(GetRepositoriesRequest request, ActionListener<GetRepositoriesResponse> listener);
+
+ /**
+ * Gets repositories.
+ */
+ GetRepositoriesRequestBuilder prepareGetRepositories(String... name);
+
+ /**
+ * Creates a new snapshot.
+ */
+ ActionFuture<CreateSnapshotResponse> createSnapshot(CreateSnapshotRequest request);
+
+ /**
+ * Creates a new snapshot.
+ */
+ void createSnapshot(CreateSnapshotRequest request, ActionListener<CreateSnapshotResponse> listener);
+
+ /**
+ * Creates a new snapshot.
+ */
+ CreateSnapshotRequestBuilder prepareCreateSnapshot(String repository, String name);
+
+ /**
+ * Get snapshot.
+ */
+ ActionFuture<GetSnapshotsResponse> getSnapshots(GetSnapshotsRequest request);
+
+ /**
+ * Get snapshot.
+ */
+ void getSnapshots(GetSnapshotsRequest request, ActionListener<GetSnapshotsResponse> listener);
+
+ /**
+ * Get snapshot.
+ */
+ GetSnapshotsRequestBuilder prepareGetSnapshots(String repository);
+
+ /**
+ * Delete snapshot.
+ */
+ ActionFuture<DeleteSnapshotResponse> deleteSnapshot(DeleteSnapshotRequest request);
+
+ /**
+ * Delete snapshot.
+ */
+ void deleteSnapshot(DeleteSnapshotRequest request, ActionListener<DeleteSnapshotResponse> listener);
+
+ /**
+ * Delete snapshot.
+ */
+ DeleteSnapshotRequestBuilder prepareDeleteSnapshot(String repository, String snapshot);
+
+ /**
+ * Restores a snapshot.
+ */
+ ActionFuture<RestoreSnapshotResponse> restoreSnapshot(RestoreSnapshotRequest request);
+
+ /**
+ * Restores a snapshot.
+ */
+ void restoreSnapshot(RestoreSnapshotRequest request, ActionListener<RestoreSnapshotResponse> listener);
+
+ /**
+ * Restores a snapshot.
+ */
+ RestoreSnapshotRequestBuilder prepareRestoreSnapshot(String repository, String snapshot);
+
+ /**
+ * Returns a list of the pending cluster tasks, that are scheduled to be executed. This includes operations
+ * that update the cluster state (for example, a create index operation)
+ */
+ void pendingClusterTasks(PendingClusterTasksRequest request, ActionListener<PendingClusterTasksResponse> listener);
+
+ /**
+ * Returns a list of the pending cluster tasks, that are scheduled to be executed. This includes operations
+ * that update the cluster state (for example, a create index operation)
+ */
+ ActionFuture<PendingClusterTasksResponse> pendingClusterTasks(PendingClusterTasksRequest request);
+
+ /**
+ * Returns a list of the pending cluster tasks, that are scheduled to be executed. This includes operations
+ * that update the cluster state (for example, a create index operation)
+ */
+ PendingClusterTasksRequestBuilder preparePendingClusterTasks();
+
+}
diff --git a/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/src/main/java/org/elasticsearch/client/IndicesAdminClient.java
new file mode 100644
index 0000000..8a728ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/IndicesAdminClient.java
@@ -0,0 +1,753 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequest;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequestBuilder;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequest;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.*;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
+import org.elasticsearch.action.admin.indices.open.OpenIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusRequestBuilder;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequestBuilder;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.common.Nullable;
+
+/**
+ * Administrative actions/operations against indices.
+ *
+ * @see AdminClient#indices()
+ */
+public interface IndicesAdminClient {
+
+ <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final IndicesAction<Request, Response, RequestBuilder> action, final Request request);
+
+ <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final IndicesAction<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
+
+ <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final IndicesAction<Request, Response, RequestBuilder> action);
+
+
+ /**
+ * Indices Exists.
+ *
+ * @param request The indices exists request
+ * @return The result future
+ * @see Requests#indicesExistsRequest(String...)
+ */
+ ActionFuture<IndicesExistsResponse> exists(IndicesExistsRequest request);
+
+ /**
+ * The status of one or more indices.
+ *
+ * @param request The indices status request
+ * @param listener A listener to be notified with a result
+ * @see Requests#indicesExistsRequest(String...)
+ */
+ void exists(IndicesExistsRequest request, ActionListener<IndicesExistsResponse> listener);
+
+ /**
+ * Indices exists.
+ */
+ IndicesExistsRequestBuilder prepareExists(String... indices);
+
+
+ /**
+ * Types Exists.
+ *
+ * @param request The types exists request
+ * @return The result future
+ */
+ ActionFuture<TypesExistsResponse> typesExists(TypesExistsRequest request);
+
+ /**
+ * Types exists
+ *
+ * @param request The types exists
+ * @param listener A listener to be notified with a result
+ */
+ void typesExists(TypesExistsRequest request, ActionListener<TypesExistsResponse> listener);
+
+ /**
+ * Indices exists.
+ */
+ TypesExistsRequestBuilder prepareTypesExists(String... index);
+
+ /**
+ * Indices stats.
+ */
+ ActionFuture<IndicesStatsResponse> stats(IndicesStatsRequest request);
+
+ /**
+ * Indices stats.
+ */
+ void stats(IndicesStatsRequest request, ActionListener<IndicesStatsResponse> listener);
+
+ /**
+ * Indices stats.
+ */
+ IndicesStatsRequestBuilder prepareStats(String... indices);
+
+ /**
+ * The status of one or more indices.
+ *
+ * @param request The indices status request
+ * @return The result future
+ * @see Requests#indicesStatusRequest(String...)
+ */
+ ActionFuture<IndicesStatusResponse> status(IndicesStatusRequest request);
+
+ /**
+ * The status of one or more indices.
+ *
+ * @param request The indices status request
+ * @param listener A listener to be notified with a result
+ * @see Requests#indicesStatusRequest(String...)
+ */
+ void status(IndicesStatusRequest request, ActionListener<IndicesStatusResponse> listener);
+
+ /**
+ * The status of one or more indices.
+ */
+ IndicesStatusRequestBuilder prepareStatus(String... indices);
+
+ /**
+ * The segments of one or more indices.
+ *
+ * @param request The indices segments request
+ * @return The result future
+ * @see Requests#indicesSegmentsRequest(String...)
+ */
+ ActionFuture<IndicesSegmentResponse> segments(IndicesSegmentsRequest request);
+
+ /**
+ * The segments of one or more indices.
+ *
+ * @param request The indices segments request
+ * @param listener A listener to be notified with a result
+ * @see Requests#indicesSegmentsRequest(String...)
+ */
+ void segments(IndicesSegmentsRequest request, ActionListener<IndicesSegmentResponse> listener);
+
+ /**
+ * The segments of one or more indices.
+ */
+ IndicesSegmentsRequestBuilder prepareSegments(String... indices);
+
+ /**
+ * Creates an index using an explicit request allowing to specify the settings of the index.
+ *
+ * @param request The create index request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#createIndexRequest(String)
+ */
+ ActionFuture<CreateIndexResponse> create(CreateIndexRequest request);
+
+ /**
+ * Creates an index using an explicit request allowing to specify the settings of the index.
+ *
+ * @param request The create index request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#createIndexRequest(String)
+ */
+ void create(CreateIndexRequest request, ActionListener<CreateIndexResponse> listener);
+
+ /**
+ * Creates an index using an explicit request allowing to specify the settings of the index.
+ *
+ * @param index The index name to create
+ */
+ CreateIndexRequestBuilder prepareCreate(String index);
+
+ /**
+ * Deletes an index based on the index name.
+ *
+ * @param request The delete index request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#deleteIndexRequest(String)
+ */
+ ActionFuture<DeleteIndexResponse> delete(DeleteIndexRequest request);
+
+ /**
+ * Deletes an index based on the index name.
+ *
+ * @param request The delete index request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#deleteIndexRequest(String)
+ */
+ void delete(DeleteIndexRequest request, ActionListener<DeleteIndexResponse> listener);
+
+ /**
+ * Deletes an index based on the index name.
+ *
+ * @param indices The indices to delete. Empty array to delete all indices.
+ */
+ DeleteIndexRequestBuilder prepareDelete(String... indices);
+
+ /**
+ * Closes an index based on the index name.
+ *
+ * @param request The close index request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#closeIndexRequest(String)
+ */
+ ActionFuture<CloseIndexResponse> close(CloseIndexRequest request);
+
+ /**
+ * Closes an index based on the index name.
+ *
+ * @param request The close index request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#closeIndexRequest(String)
+ */
+ void close(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener);
+
+ /**
+ * Closes one or more indices based on their index name.
+ *
+ * @param indices The name of the indices to close
+ */
+ CloseIndexRequestBuilder prepareClose(String... indices);
+
+ /**
+ * Open an index based on the index name.
+ *
+ * @param request The close index request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#openIndexRequest(String)
+ */
+ ActionFuture<OpenIndexResponse> open(OpenIndexRequest request);
+
+ /**
+ * Open an index based on the index name.
+ *
+ * @param request The close index request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#openIndexRequest(String)
+ */
+ void open(OpenIndexRequest request, ActionListener<OpenIndexResponse> listener);
+
+ /**
+ * Opens one or more indices based on their index name.
+ *
+ * @param indices The name of the indices to close
+ */
+ OpenIndexRequestBuilder prepareOpen(String... indices);
+
+ /**
+ * Explicitly refresh one or more indices (making the content indexed since the last refresh searchable).
+ *
+ * @param request The refresh request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#refreshRequest(String...)
+ */
+ ActionFuture<RefreshResponse> refresh(RefreshRequest request);
+
+ /**
+ * Explicitly refresh one or more indices (making the content indexed since the last refresh searchable).
+ *
+ * @param request The refresh request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#refreshRequest(String...)
+ */
+ void refresh(RefreshRequest request, ActionListener<RefreshResponse> listener);
+
+ /**
+ * Explicitly refresh one or more indices (making the content indexed since the last refresh searchable).
+ */
+ RefreshRequestBuilder prepareRefresh(String... indices);
+
+ /**
+ * Explicitly flush one or more indices (releasing memory from the node).
+ *
+ * @param request The flush request
+ * @return A result future
+ * @see org.elasticsearch.client.Requests#flushRequest(String...)
+ */
+ ActionFuture<FlushResponse> flush(FlushRequest request);
+
+ /**
+ * Explicitly flush one or more indices (releasing memory from the node).
+ *
+ * @param request The flush request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#flushRequest(String...)
+ */
+ void flush(FlushRequest request, ActionListener<FlushResponse> listener);
+
+ /**
+ * Explicitly flush one or more indices (releasing memory from the node).
+ */
+ FlushRequestBuilder prepareFlush(String... indices);
+
+ /**
+ * Explicitly optimize one or more indices into a the number of segments.
+ *
+ * @param request The optimize request
+ * @return A result future
+ * @see org.elasticsearch.client.Requests#optimizeRequest(String...)
+ */
+ ActionFuture<OptimizeResponse> optimize(OptimizeRequest request);
+
+ /**
+ * Explicitly optimize one or more indices into a the number of segments.
+ *
+ * @param request The optimize request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#optimizeRequest(String...)
+ */
+ void optimize(OptimizeRequest request, ActionListener<OptimizeResponse> listener);
+
+ /**
+ * Explicitly optimize one or more indices into a the number of segments.
+ */
+ OptimizeRequestBuilder prepareOptimize(String... indices);
+
+ /**
+ * Get the complete mappings of one or more types
+ */
+ void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener);
+
+ /**
+ * Get the complete mappings of one or more types
+ */
+ ActionFuture<GetMappingsResponse> getMappings(GetMappingsRequest request);
+
+ /**
+ * Get the complete mappings of one or more types
+ */
+ GetMappingsRequestBuilder prepareGetMappings(String... indices);
+
+ /**
+ * Get the mappings of specific fields
+ */
+ void getFieldMappings(GetFieldMappingsRequest request, ActionListener<GetFieldMappingsResponse> listener);
+
+ /**
+ * Get the mappings of specific fields
+ */
+ GetFieldMappingsRequestBuilder prepareGetFieldMappings(String... indices);
+
+ /**
+ * Get the mappings of specific fields
+ */
+ ActionFuture<GetFieldMappingsResponse> getFieldMappings(GetFieldMappingsRequest request);
+
+ /**
+ * Add mapping definition for a type into one or more indices.
+ *
+ * @param request The create mapping request
+ * @return A result future
+ * @see org.elasticsearch.client.Requests#putMappingRequest(String...)
+ */
+ ActionFuture<PutMappingResponse> putMapping(PutMappingRequest request);
+
+ /**
+ * Add mapping definition for a type into one or more indices.
+ *
+ * @param request The create mapping request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#putMappingRequest(String...)
+ */
+ void putMapping(PutMappingRequest request, ActionListener<PutMappingResponse> listener);
+
+ /**
+ * Add mapping definition for a type into one or more indices.
+ */
+ PutMappingRequestBuilder preparePutMapping(String... indices);
+
+ /**
+ * Deletes mapping (and all its data) from one or more indices.
+ *
+ * @param request The delete mapping request
+ * @return A result future
+ * @see org.elasticsearch.client.Requests#deleteMappingRequest(String...)
+ */
+ ActionFuture<DeleteMappingResponse> deleteMapping(DeleteMappingRequest request);
+
+ /**
+ * Deletes mapping definition for a type into one or more indices.
+ *
+ * @param request The delete mapping request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#deleteMappingRequest(String...)
+ */
+ void deleteMapping(DeleteMappingRequest request, ActionListener<DeleteMappingResponse> listener);
+
+ /**
+ * Deletes mapping definition for a type into one or more indices.
+ */
+ DeleteMappingRequestBuilder prepareDeleteMapping(String... indices);
+
+ /**
+ * Explicitly perform gateway snapshot for one or more indices.
+ *
+ * @param request The gateway snapshot request
+ * @return The result future
+ * @see org.elasticsearch.client.Requests#gatewaySnapshotRequest(String...)
+ * @deprecated Use snapshot/restore API instead
+ */
+ @Deprecated
+ ActionFuture<GatewaySnapshotResponse> gatewaySnapshot(GatewaySnapshotRequest request);
+
+ /**
+ * Explicitly perform gateway snapshot for one or more indices.
+ *
+ * @param request The gateway snapshot request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#gatewaySnapshotRequest(String...)
+ * @deprecated Use snapshot/restore API instead
+ */
+ @Deprecated
+ void gatewaySnapshot(GatewaySnapshotRequest request, ActionListener<GatewaySnapshotResponse> listener);
+
+ /**
+ * Explicitly perform gateway snapshot for one or more indices.
+ *
+ * @deprecated Use snapshot/restore API instead
+ */
+ @Deprecated
+ GatewaySnapshotRequestBuilder prepareGatewaySnapshot(String... indices);
+
+ /**
+ * Allows to add/remove aliases from indices.
+ *
+ * @param request The index aliases request
+ * @return The result future
+ * @see Requests#indexAliasesRequest()
+ */
+ ActionFuture<IndicesAliasesResponse> aliases(IndicesAliasesRequest request);
+
+ /**
+ * Allows to add/remove aliases from indices.
+ *
+ * @param request The index aliases request
+ * @param listener A listener to be notified with a result
+ * @see Requests#indexAliasesRequest()
+ */
+ void aliases(IndicesAliasesRequest request, ActionListener<IndicesAliasesResponse> listener);
+
+ /**
+ * Allows to add/remove aliases from indices.
+ */
+ IndicesAliasesRequestBuilder prepareAliases();
+
+ /**
+ * Get specific index aliases that exists in particular indices and / or by name.
+ *
+ * @param request The result future
+ */
+ ActionFuture<GetAliasesResponse> getAliases(GetAliasesRequest request);
+
+ /**
+ * Get specific index aliases that exists in particular indices and / or by name.
+ *
+ * @param request The index aliases request
+ * @param listener A listener to be notified with a result
+ */
+ void getAliases(GetAliasesRequest request, ActionListener<GetAliasesResponse> listener);
+
+ /**
+ * Get specific index aliases that exists in particular indices and / or by name.
+ */
+ GetAliasesRequestBuilder prepareGetAliases(String... aliases);
+
+ /**
+ * Allows to check to existence of aliases from indices.
+ */
+ AliasesExistRequestBuilder prepareAliasesExist(String... aliases);
+
+ /**
+ * Check to existence of index aliases.
+ *
+ * @param request The result future
+ */
+ ActionFuture<AliasesExistResponse> aliasesExist(GetAliasesRequest request);
+
+ /**
+ * Check the existence of specified index aliases.
+ *
+ * @param request The index aliases request
+ * @param listener A listener to be notified with a result
+ */
+ void aliasesExist(GetAliasesRequest request, ActionListener<AliasesExistResponse> listener);
+
+ /**
+ * Clear indices cache.
+ *
+ * @param request The clear indices cache request
+ * @return The result future
+ * @see Requests#clearIndicesCacheRequest(String...)
+ */
+ ActionFuture<ClearIndicesCacheResponse> clearCache(ClearIndicesCacheRequest request);
+
+ /**
+ * Clear indices cache.
+ *
+ * @param request The clear indices cache request
+ * @param listener A listener to be notified with a result
+ * @see Requests#clearIndicesCacheRequest(String...)
+ */
+ void clearCache(ClearIndicesCacheRequest request, ActionListener<ClearIndicesCacheResponse> listener);
+
+ /**
+ * Clear indices cache.
+ */
+ ClearIndicesCacheRequestBuilder prepareClearCache(String... indices);
+
+ /**
+ * Updates settings of one or more indices.
+ *
+ * @param request the update settings request
+ * @return The result future
+ */
+ ActionFuture<UpdateSettingsResponse> updateSettings(UpdateSettingsRequest request);
+
+ /**
+ * Updates settings of one or more indices.
+ *
+ * @param request the update settings request
+ * @param listener A listener to be notified with the response
+ */
+ void updateSettings(UpdateSettingsRequest request, ActionListener<UpdateSettingsResponse> listener);
+
+ /**
+ * Update indices settings.
+ */
+ UpdateSettingsRequestBuilder prepareUpdateSettings(String... indices);
+
+ /**
+ * Analyze text under the provided index.
+ */
+ ActionFuture<AnalyzeResponse> analyze(AnalyzeRequest request);
+
+ /**
+ * Analyze text under the provided index.
+ */
+ void analyze(AnalyzeRequest request, ActionListener<AnalyzeResponse> listener);
+
+ /**
+ * Analyze text under the provided index.
+ *
+ * @param index The index name
+ * @param text The text to analyze
+ */
+ AnalyzeRequestBuilder prepareAnalyze(@Nullable String index, String text);
+
+ /**
+ * Analyze text.
+ *
+ * @param text The text to analyze
+ */
+ AnalyzeRequestBuilder prepareAnalyze(String text);
+
+ /**
+ * Puts an index template.
+ */
+ ActionFuture<PutIndexTemplateResponse> putTemplate(PutIndexTemplateRequest request);
+
+ /**
+ * Puts an index template.
+ */
+ void putTemplate(PutIndexTemplateRequest request, ActionListener<PutIndexTemplateResponse> listener);
+
+ /**
+ * Puts an index template.
+ *
+ * @param name The name of the template.
+ */
+ PutIndexTemplateRequestBuilder preparePutTemplate(String name);
+
+ /**
+ * Deletes index template.
+ */
+ ActionFuture<DeleteIndexTemplateResponse> deleteTemplate(DeleteIndexTemplateRequest request);
+
+ /**
+ * Deletes an index template.
+ */
+ void deleteTemplate(DeleteIndexTemplateRequest request, ActionListener<DeleteIndexTemplateResponse> listener);
+
+ /**
+ * Deletes an index template.
+ *
+ * @param name The name of the template.
+ */
+ DeleteIndexTemplateRequestBuilder prepareDeleteTemplate(String name);
+
+ /**
+ * Gets index template.
+ */
+ ActionFuture<GetIndexTemplatesResponse> getTemplates(GetIndexTemplatesRequest request);
+
+ /**
+ * Gets an index template.
+ */
+ void getTemplates(GetIndexTemplatesRequest request, ActionListener<GetIndexTemplatesResponse> listener);
+
+ /**
+ * Gets an index template (optional).
+ */
+ GetIndexTemplatesRequestBuilder prepareGetTemplates(String... name);
+
+ /**
+ * Validate a query for correctness.
+ *
+ * @param request The count request
+ * @return The result future
+ * @see Requests#countRequest(String...)
+ */
+ ActionFuture<ValidateQueryResponse> validateQuery(ValidateQueryRequest request);
+
+ /**
+ * Validate a query for correctness.
+ *
+ * @param request The count request
+ * @param listener A listener to be notified of the result
+ * @see Requests#countRequest(String...)
+ */
+ void validateQuery(ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener);
+
+ /**
+ * Validate a query for correctness.
+ */
+ ValidateQueryRequestBuilder prepareValidateQuery(String... indices);
+
+ /**
+ * Puts an index search warmer to be applies when applicable.
+ */
+ ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request);
+
+ /**
+ * Puts an index search warmer to be applies when applicable.
+ */
+ void putWarmer(PutWarmerRequest request, ActionListener<PutWarmerResponse> listener);
+
+ /**
+ * Puts an index search warmer to be applies when applicable.
+ */
+ PutWarmerRequestBuilder preparePutWarmer(String name);
+
+ /**
+ * Deletes an index warmer.
+ */
+ ActionFuture<DeleteWarmerResponse> deleteWarmer(DeleteWarmerRequest request);
+
+ /**
+ * Deletes an index warmer.
+ */
+ void deleteWarmer(DeleteWarmerRequest request, ActionListener<DeleteWarmerResponse> listener);
+
+ /**
+ * Deletes an index warmer.
+ */
+ DeleteWarmerRequestBuilder prepareDeleteWarmer();
+
+ void getWarmers(GetWarmersRequest request, ActionListener<GetWarmersResponse> listener);
+
+ ActionFuture<GetWarmersResponse> getWarmers(GetWarmersRequest request);
+
+ GetWarmersRequestBuilder prepareGetWarmers(String... indices);
+
+ void getSettings(GetSettingsRequest request, ActionListener<GetSettingsResponse> listener);
+
+ ActionFuture<GetSettingsResponse> getSettings(GetSettingsRequest request);
+
+ GetSettingsRequestBuilder prepareGetSettings(String... indices);
+}
diff --git a/src/main/java/org/elasticsearch/client/Requests.java b/src/main/java/org/elasticsearch/client/Requests.java
new file mode 100644
index 0000000..8d5d3f9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/Requests.java
@@ -0,0 +1,548 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartRequest;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequest;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.count.CountRequest;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.mlt.MoreLikeThisRequest;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchScrollRequest;
+import org.elasticsearch.common.xcontent.XContentType;
+
+/**
+ * A handy one stop shop for creating requests (make sure to import static this class).
+ */
+public class Requests {
+
+ /**
+ * The content type used to generate request builders (query / search).
+ */
+ public static XContentType CONTENT_TYPE = XContentType.SMILE;
+
+ /**
+ * The default content type to use to generate source documents when indexing.
+ */
+ public static XContentType INDEX_CONTENT_TYPE = XContentType.JSON;
+
+ public static IndexRequest indexRequest() {
+ return new IndexRequest();
+ }
+
+ /**
+ * Create an index request against a specific index. Note the {@link IndexRequest#type(String)} must be
+ * set as well and optionally the {@link IndexRequest#id(String)}.
+ *
+ * @param index The index name to index the request against
+ * @return The index request
+ * @see org.elasticsearch.client.Client#index(org.elasticsearch.action.index.IndexRequest)
+ */
+ public static IndexRequest indexRequest(String index) {
+ return new IndexRequest(index);
+ }
+
+ /**
+ * Creates a delete request against a specific index. Note the {@link DeleteRequest#type(String)} and
+ * {@link DeleteRequest#id(String)} must be set.
+ *
+ * @param index The index name to delete from
+ * @return The delete request
+ * @see org.elasticsearch.client.Client#delete(org.elasticsearch.action.delete.DeleteRequest)
+ */
+ public static DeleteRequest deleteRequest(String index) {
+ return new DeleteRequest(index);
+ }
+
+ /**
+ * Creats a new bulk request.
+ */
+ public static BulkRequest bulkRequest() {
+ return new BulkRequest();
+ }
+
+ /**
+ * Creates a delete by query request. Note, the query itself must be set either by setting the JSON source
+ * of the query, or by using a {@link org.elasticsearch.index.query.QueryBuilder} (using {@link org.elasticsearch.index.query.QueryBuilders}).
+ *
+ * @param indices The indices the delete by query against. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The delete by query request
+ * @see org.elasticsearch.client.Client#deleteByQuery(org.elasticsearch.action.deletebyquery.DeleteByQueryRequest)
+ */
+ public static DeleteByQueryRequest deleteByQueryRequest(String... indices) {
+ return new DeleteByQueryRequest(indices);
+ }
+
+ /**
+ * Creates a get request to get the JSON source from an index based on a type and id. Note, the
+ * {@link GetRequest#type(String)} and {@link GetRequest#id(String)} must be set.
+ *
+ * @param index The index to get the JSON source from
+ * @return The get request
+ * @see org.elasticsearch.client.Client#get(org.elasticsearch.action.get.GetRequest)
+ */
+ public static GetRequest getRequest(String index) {
+ return new GetRequest(index);
+ }
+
+ /**
+ * Creates a count request which counts the hits matched against a query. Note, the query itself must be set
+ * either using the JSON source of the query, or using a {@link org.elasticsearch.index.query.QueryBuilder} (using {@link org.elasticsearch.index.query.QueryBuilders}).
+ *
+ * @param indices The indices to count matched documents against a query. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The count request
+ * @see org.elasticsearch.client.Client#count(org.elasticsearch.action.count.CountRequest)
+ */
+ public static CountRequest countRequest(String... indices) {
+ return new CountRequest(indices);
+ }
+
+ /**
+ * More like this request represents a request to search for documents that are "like" the provided (fetched)
+ * document.
+ *
+ * @param index The index to load the document from
+ * @return The more like this request
+ * @see org.elasticsearch.client.Client#moreLikeThis(org.elasticsearch.action.mlt.MoreLikeThisRequest)
+ */
+ public static MoreLikeThisRequest moreLikeThisRequest(String index) {
+ return new MoreLikeThisRequest(index);
+ }
+
+ /**
+ * Creates a search request against one or more indices. Note, the search source must be set either using the
+ * actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}.
+ *
+ * @param indices The indices to search against. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The search request
+ * @see org.elasticsearch.client.Client#search(org.elasticsearch.action.search.SearchRequest)
+ */
+ public static SearchRequest searchRequest(String... indices) {
+ return new SearchRequest(indices);
+ }
+
+ /**
+ * Creates a search scroll request allowing to continue searching a previous search request.
+ *
+ * @param scrollId The scroll id representing the scrollable search
+ * @return The search scroll request
+ * @see org.elasticsearch.client.Client#searchScroll(org.elasticsearch.action.search.SearchScrollRequest)
+ */
+ public static SearchScrollRequest searchScrollRequest(String scrollId) {
+ return new SearchScrollRequest(scrollId);
+ }
+
+ /**
+ * Creates an indices status request.
+ *
+ * @param indices The indices to query status about. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The indices status request
+ * @see org.elasticsearch.client.IndicesAdminClient#status(org.elasticsearch.action.admin.indices.status.IndicesStatusRequest)
+ */
+ public static IndicesStatusRequest indicesStatusRequest(String... indices) {
+ return new IndicesStatusRequest(indices);
+ }
+
+ public static IndicesSegmentsRequest indicesSegmentsRequest(String... indices) {
+ return new IndicesSegmentsRequest(indices);
+ }
+
+ /**
+ * Creates an indices exists request.
+ *
+ * @param indices The indices to check if they exists or not.
+ * @return The indices exists request
+ * @see org.elasticsearch.client.IndicesAdminClient#exists(org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest)
+ */
+ public static IndicesExistsRequest indicesExistsRequest(String... indices) {
+ return new IndicesExistsRequest(indices);
+ }
+
+ /**
+ * Creates a create index request.
+ *
+ * @param index The index to create
+ * @return The index create request
+ * @see org.elasticsearch.client.IndicesAdminClient#create(org.elasticsearch.action.admin.indices.create.CreateIndexRequest)
+ */
+ public static CreateIndexRequest createIndexRequest(String index) {
+ return new CreateIndexRequest(index);
+ }
+
+ /**
+ * Creates a delete index request.
+ *
+ * @param index The index to delete
+ * @return The delete index request
+ * @see org.elasticsearch.client.IndicesAdminClient#delete(org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest)
+ */
+ public static DeleteIndexRequest deleteIndexRequest(String index) {
+ return new DeleteIndexRequest(index);
+ }
+
+ /**
+ * Creates a close index request.
+ *
+ * @param index The index to close
+ * @return The delete index request
+ * @see org.elasticsearch.client.IndicesAdminClient#close(org.elasticsearch.action.admin.indices.close.CloseIndexRequest)
+ */
+ public static CloseIndexRequest closeIndexRequest(String index) {
+ return new CloseIndexRequest(index);
+ }
+
+ /**
+ * Creates an open index request.
+ *
+ * @param index The index to open
+ * @return The delete index request
+ * @see org.elasticsearch.client.IndicesAdminClient#open(org.elasticsearch.action.admin.indices.open.OpenIndexRequest)
+ */
+ public static OpenIndexRequest openIndexRequest(String index) {
+ return new OpenIndexRequest(index);
+ }
+
+ /**
+ * Create a create mapping request against one or more indices.
+ *
+ * @param indices The indices to create mapping. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The create mapping request
+ * @see org.elasticsearch.client.IndicesAdminClient#putMapping(org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest)
+ */
+ public static PutMappingRequest putMappingRequest(String... indices) {
+ return new PutMappingRequest(indices);
+ }
+
+ /**
+ * Deletes mapping (and all its data) from one or more indices.
+ *
+ * @param indices The indices the mapping will be deleted from. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The create mapping request
+ * @see org.elasticsearch.client.IndicesAdminClient#deleteMapping(org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequest)
+ */
+ public static DeleteMappingRequest deleteMappingRequest(String... indices) {
+ return new DeleteMappingRequest(indices);
+ }
+
+ /**
+ * Creates an index aliases request allowing to add and remove aliases.
+ *
+ * @return The index aliases request
+ */
+ public static IndicesAliasesRequest indexAliasesRequest() {
+ return new IndicesAliasesRequest();
+ }
+
+ /**
+ * Creates a refresh indices request.
+ *
+ * @param indices The indices to refresh. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The refresh request
+ * @see org.elasticsearch.client.IndicesAdminClient#refresh(org.elasticsearch.action.admin.indices.refresh.RefreshRequest)
+ */
+ public static RefreshRequest refreshRequest(String... indices) {
+ return new RefreshRequest(indices);
+ }
+
+ /**
+ * Creates a flush indices request.
+ *
+ * @param indices The indices to flush. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The flush request
+ * @see org.elasticsearch.client.IndicesAdminClient#flush(org.elasticsearch.action.admin.indices.flush.FlushRequest)
+ */
+ public static FlushRequest flushRequest(String... indices) {
+ return new FlushRequest(indices);
+ }
+
+ /**
+ * Creates an optimize request.
+ *
+ * @param indices The indices to optimize. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The optimize request
+ * @see org.elasticsearch.client.IndicesAdminClient#optimize(org.elasticsearch.action.admin.indices.optimize.OptimizeRequest)
+ */
+ public static OptimizeRequest optimizeRequest(String... indices) {
+ return new OptimizeRequest(indices);
+ }
+
+ /**
+ * Creates a gateway snapshot indices request.
+ *
+ * @param indices The indices the gateway snapshot will be performed on. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The gateway snapshot request
+ * @see org.elasticsearch.client.IndicesAdminClient#gatewaySnapshot(org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest)
+ * @deprecated Use snapshot/restore API instead
+ */
+ @Deprecated
+ public static GatewaySnapshotRequest gatewaySnapshotRequest(String... indices) {
+ return new GatewaySnapshotRequest(indices);
+ }
+
+ /**
+ * Creates a clean indices cache request.
+ *
+ * @param indices The indices to clean their caches. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The request
+ */
+ public static ClearIndicesCacheRequest clearIndicesCacheRequest(String... indices) {
+ return new ClearIndicesCacheRequest(indices);
+ }
+
+ /**
+ * A request to update indices settings.
+ *
+ * @param indices The indices to update the settings for. Use <tt>null</tt> or <tt>_all</tt> to executed against all indices.
+ * @return The request
+ */
+ public static UpdateSettingsRequest updateSettingsRequest(String... indices) {
+ return new UpdateSettingsRequest(indices);
+ }
+
+ /**
+ * Creates a cluster state request.
+ *
+ * @return The cluster state request.
+ * @see org.elasticsearch.client.ClusterAdminClient#state(org.elasticsearch.action.admin.cluster.state.ClusterStateRequest)
+ */
+ public static ClusterStateRequest clusterStateRequest() {
+ return new ClusterStateRequest();
+ }
+
+ public static ClusterRerouteRequest clusterRerouteRequest() {
+ return new ClusterRerouteRequest();
+ }
+
+ public static ClusterUpdateSettingsRequest clusterUpdateSettingsRequest() {
+ return new ClusterUpdateSettingsRequest();
+ }
+
+ /**
+ * Creates a cluster health request.
+ *
+ * @param indices The indices to provide additional cluster health information for. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The cluster health request
+ * @see org.elasticsearch.client.ClusterAdminClient#health(org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest)
+ */
+ public static ClusterHealthRequest clusterHealthRequest(String... indices) {
+ return new ClusterHealthRequest(indices);
+ }
+
+ /**
+ * List all shards for the give search
+ */
+ public static ClusterSearchShardsRequest clusterSearchShardsRequest() {
+ return new ClusterSearchShardsRequest();
+ }
+
+ /**
+ * List all shards for the give search
+ */
+ public static ClusterSearchShardsRequest clusterSearchShardsRequest(String... indices) {
+ return new ClusterSearchShardsRequest(indices);
+ }
+
+ /**
+ * Creates a nodes info request against all the nodes.
+ *
+ * @return The nodes info request
+ * @see org.elasticsearch.client.ClusterAdminClient#nodesInfo(org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest)
+ */
+ public static NodesInfoRequest nodesInfoRequest() {
+ return new NodesInfoRequest();
+ }
+
+ /**
+ * Creates a nodes info request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
+ *
+ * @param nodesIds The nodes ids to get the status for
+ * @return The nodes info request
+ * @see org.elasticsearch.client.ClusterAdminClient#nodesStats(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest)
+ */
+ public static NodesInfoRequest nodesInfoRequest(String... nodesIds) {
+ return new NodesInfoRequest(nodesIds);
+ }
+
+ /**
+ * Creates a nodes stats request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
+ *
+ * @param nodesIds The nodes ids to get the stats for
+ * @return The nodes info request
+ * @see org.elasticsearch.client.ClusterAdminClient#nodesStats(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest)
+ */
+ public static NodesStatsRequest nodesStatsRequest(String... nodesIds) {
+ return new NodesStatsRequest(nodesIds);
+ }
+
+ /**
+ * Creates a cluster stats request.
+ *
+ * @return The cluster stats request
+ * @see org.elasticsearch.client.ClusterAdminClient#clusterStats(org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest)
+ */
+ public static ClusterStatsRequest clusterStatsRequest() {
+ return new ClusterStatsRequest();
+ }
+
+ /**
+ * Shuts down all nodes in the cluster.
+ */
+ public static NodesShutdownRequest nodesShutdownRequest() {
+ return new NodesShutdownRequest();
+ }
+
+ /**
+ * Shuts down the specified nodes in the cluster.
+ *
+ * @param nodesIds The nodes ids to get the status for
+ * @return The nodes info request
+ * @see org.elasticsearch.client.ClusterAdminClient#nodesShutdown(org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest)
+ */
+ public static NodesShutdownRequest nodesShutdownRequest(String... nodesIds) {
+ return new NodesShutdownRequest(nodesIds);
+ }
+
+ /**
+ * Restarts all nodes in the cluster.
+ */
+ public static NodesRestartRequest nodesRestartRequest() {
+ return new NodesRestartRequest();
+ }
+
+ /**
+ * Restarts specific nodes in the cluster.
+ *
+ * @param nodesIds The nodes ids to restart
+ * @return The nodes info request
+ * @see org.elasticsearch.client.ClusterAdminClient#nodesRestart(org.elasticsearch.action.admin.cluster.node.restart.NodesRestartRequest)
+ */
+ public static NodesRestartRequest nodesRestartRequest(String... nodesIds) {
+ return new NodesRestartRequest(nodesIds);
+ }
+
+ /**
+ * Registers snapshot repository
+ *
+ * @param name repository name
+ * @return repository registration request
+ */
+ public static PutRepositoryRequest putRepositoryRequest(String name) {
+ return new PutRepositoryRequest(name);
+ }
+
+ /**
+ * Gets snapshot repository
+ *
+ * @param repositories names of repositories
+ * @return get repository request
+ */
+ public static GetRepositoriesRequest getRepositoryRequest(String... repositories) {
+ return new GetRepositoriesRequest(repositories);
+ }
+
+ /**
+ * Deletes registration for snapshot repository
+ *
+ * @param name repository name
+ * @return delete repository request
+ */
+ public static DeleteRepositoryRequest deleteRepositoryRequest(String name) {
+ return new DeleteRepositoryRequest(name);
+ }
+
+
+ /**
+ * Creates new snapshot
+ *
+ * @param repository repository name
+ * @param snapshot snapshot name
+ * @return create snapshot request
+ */
+ public static CreateSnapshotRequest createSnapshotRequest(String repository, String snapshot) {
+ return new CreateSnapshotRequest(repository, snapshot);
+ }
+
+ /**
+ * Gets snapshots from repository
+ *
+ * @param repository repository name
+ * @return get snapshot request
+ */
+ public static GetSnapshotsRequest getSnapshotsRequest(String repository) {
+ return new GetSnapshotsRequest(repository);
+ }
+
+ /**
+ * Restores new snapshot
+ *
+ * @param repository repository name
+ * @param snapshot snapshot name
+ * @return snapshot creation request
+ */
+ public static RestoreSnapshotRequest restoreSnapshotRequest(String repository, String snapshot) {
+ return new RestoreSnapshotRequest(repository, snapshot);
+ }
+
+ /**
+ * Restores new snapshot
+ *
+ * @param snapshot snapshot name
+ * @param repository repository name
+ * @return delete snapshot request
+ */
+ public static DeleteSnapshotRequest deleteSnapshotRequest(String repository, String snapshot) {
+ return new DeleteSnapshotRequest(repository, snapshot);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/internal/InternalClient.java b/src/main/java/org/elasticsearch/client/internal/InternalClient.java
new file mode 100644
index 0000000..c46da90
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/internal/InternalClient.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.internal;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public interface InternalClient extends Client, InternalGenericClient {
+
+ Settings settings();
+}
diff --git a/src/main/java/org/elasticsearch/client/internal/InternalClusterAdminClient.java b/src/main/java/org/elasticsearch/client/internal/InternalClusterAdminClient.java
new file mode 100644
index 0000000..bc44fc3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/internal/InternalClusterAdminClient.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.internal;
+
+import org.elasticsearch.client.ClusterAdminClient;
+
+/**
+ *
+ */
+public interface InternalClusterAdminClient extends ClusterAdminClient, InternalGenericClient {
+}
diff --git a/src/main/java/org/elasticsearch/client/internal/InternalGenericClient.java b/src/main/java/org/elasticsearch/client/internal/InternalGenericClient.java
new file mode 100644
index 0000000..44d5856
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/internal/InternalGenericClient.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.internal;
+
+import org.elasticsearch.threadpool.ThreadPool;
+
+/**
+ */
+public interface InternalGenericClient {
+
+ ThreadPool threadPool();
+}
diff --git a/src/main/java/org/elasticsearch/client/internal/InternalIndicesAdminClient.java b/src/main/java/org/elasticsearch/client/internal/InternalIndicesAdminClient.java
new file mode 100644
index 0000000..90f85d3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/internal/InternalIndicesAdminClient.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.internal;
+
+import org.elasticsearch.client.IndicesAdminClient;
+
+/**
+ *
+ */
+public interface InternalIndicesAdminClient extends IndicesAdminClient, InternalGenericClient {
+
+}
diff --git a/src/main/java/org/elasticsearch/client/node/NodeAdminClient.java b/src/main/java/org/elasticsearch/client/node/NodeAdminClient.java
new file mode 100644
index 0000000..40e2517
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/node/NodeAdminClient.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.node;
+
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class NodeAdminClient extends AbstractComponent implements AdminClient {
+
+ private final NodeIndicesAdminClient indicesAdminClient;
+
+ private final NodeClusterAdminClient clusterAdminClient;
+
+ @Inject
+ public NodeAdminClient(Settings settings, NodeClusterAdminClient clusterAdminClient, NodeIndicesAdminClient indicesAdminClient) {
+ super(settings);
+ this.indicesAdminClient = indicesAdminClient;
+ this.clusterAdminClient = clusterAdminClient;
+ }
+
+ @Override
+ public IndicesAdminClient indices() {
+ return indicesAdminClient;
+ }
+
+ @Override
+ public ClusterAdminClient cluster() {
+ return this.clusterAdminClient;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/node/NodeClient.java b/src/main/java/org/elasticsearch/client/node/NodeClient.java
new file mode 100644
index 0000000..5df16a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/node/NodeClient.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.node;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.client.support.AbstractClient;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class NodeClient extends AbstractClient implements InternalClient {
+
+ private final Settings settings;
+ private final ThreadPool threadPool;
+
+ private final NodeAdminClient admin;
+
+ private final ImmutableMap<Action, TransportAction> actions;
+
+ @Inject
+ public NodeClient(Settings settings, ThreadPool threadPool, NodeAdminClient admin, Map<GenericAction, TransportAction> actions) {
+ this.settings = settings;
+ this.threadPool = threadPool;
+ this.admin = admin;
+ MapBuilder<Action, TransportAction> actionsBuilder = new MapBuilder<Action, TransportAction>();
+ for (Map.Entry<GenericAction, TransportAction> entry : actions.entrySet()) {
+ if (entry.getKey() instanceof Action) {
+ actionsBuilder.put((Action) entry.getKey(), entry.getValue());
+ }
+ }
+ this.actions = actionsBuilder.immutableMap();
+ }
+
+ @Override
+ public Settings settings() {
+ return this.settings;
+ }
+
+ @Override
+ public ThreadPool threadPool() {
+ return this.threadPool;
+ }
+
+ @Override
+ public void close() {
+ // nothing really to do
+ }
+
+ @Override
+ public AdminClient admin() {
+ return this.admin;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
+ TransportAction<Request, Response> transportAction = actions.get(action);
+ return transportAction.execute(request);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
+ TransportAction<Request, Response> transportAction = actions.get(action);
+ transportAction.execute(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/node/NodeClientModule.java b/src/main/java/org/elasticsearch/client/node/NodeClientModule.java
new file mode 100644
index 0000000..6805d26
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/node/NodeClientModule.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.node;
+
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class NodeClientModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(ClusterAdminClient.class).to(NodeClusterAdminClient.class).asEagerSingleton();
+ bind(IndicesAdminClient.class).to(NodeIndicesAdminClient.class).asEagerSingleton();
+ bind(AdminClient.class).to(NodeAdminClient.class).asEagerSingleton();
+ bind(Client.class).to(NodeClient.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java b/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java
new file mode 100644
index 0000000..9023098
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.node;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.client.support.AbstractClusterAdminClient;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class NodeClusterAdminClient extends AbstractClusterAdminClient implements InternalClusterAdminClient {
+
+ private final ThreadPool threadPool;
+
+ private final ImmutableMap<ClusterAction, TransportAction> actions;
+
+ @Inject
+ public NodeClusterAdminClient(Settings settings, ThreadPool threadPool, Map<GenericAction, TransportAction> actions) {
+ this.threadPool = threadPool;
+ MapBuilder<ClusterAction, TransportAction> actionsBuilder = new MapBuilder<ClusterAction, TransportAction>();
+ for (Map.Entry<GenericAction, TransportAction> entry : actions.entrySet()) {
+ if (entry.getKey() instanceof ClusterAction) {
+ actionsBuilder.put((ClusterAction) entry.getKey(), entry.getValue());
+ }
+ }
+ this.actions = actionsBuilder.immutableMap();
+ }
+
+ @Override
+ public ThreadPool threadPool() {
+ return this.threadPool;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(ClusterAction<Request, Response, RequestBuilder> action, Request request) {
+ TransportAction<Request, Response> transportAction = actions.get(action);
+ return transportAction.execute(request);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(ClusterAction<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
+ TransportAction<Request, Response> transportAction = actions.get(action);
+ transportAction.execute(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java b/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java
new file mode 100644
index 0000000..dc22149
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.node;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.support.AbstractIndicesAdminClient;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class NodeIndicesAdminClient extends AbstractIndicesAdminClient implements IndicesAdminClient {
+
+ private final ThreadPool threadPool;
+
+ private final ImmutableMap<IndicesAction, TransportAction> actions;
+
+ @Inject
+ public NodeIndicesAdminClient(Settings settings, ThreadPool threadPool, Map<GenericAction, TransportAction> actions) {
+ this.threadPool = threadPool;
+ MapBuilder<IndicesAction, TransportAction> actionsBuilder = new MapBuilder<IndicesAction, TransportAction>();
+ for (Map.Entry<GenericAction, TransportAction> entry : actions.entrySet()) {
+ if (entry.getKey() instanceof IndicesAction) {
+ actionsBuilder.put((IndicesAction) entry.getKey(), entry.getValue());
+ }
+ }
+ this.actions = actionsBuilder.immutableMap();
+ }
+
+ @Override
+ public ThreadPool threadPool() {
+ return this.threadPool;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(IndicesAction<Request, Response, RequestBuilder> action, Request request) {
+ TransportAction<Request, Response> transportAction = actions.get(action);
+ return transportAction.execute(request);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(IndicesAction<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
+ TransportAction<Request, Response> transportAction = actions.get(action);
+ transportAction.execute(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/package-info.java b/src/main/java/org/elasticsearch/client/package-info.java
new file mode 100644
index 0000000..cd35002
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * The client module allowing to easily perform actions/operations.
+ */
+package org.elasticsearch.client; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClient.java
new file mode 100644
index 0000000..53e108e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/support/AbstractClient.java
@@ -0,0 +1,384 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.support;
+
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.bulk.BulkAction;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountAction;
+import org.elasticsearch.action.count.CountRequest;
+import org.elasticsearch.action.count.CountRequestBuilder;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteAction;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteRequestBuilder;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryAction;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.explain.ExplainAction;
+import org.elasticsearch.action.explain.ExplainRequest;
+import org.elasticsearch.action.explain.ExplainRequestBuilder;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.get.*;
+import org.elasticsearch.action.index.IndexAction;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.mlt.MoreLikeThisAction;
+import org.elasticsearch.action.mlt.MoreLikeThisRequest;
+import org.elasticsearch.action.mlt.MoreLikeThisRequestBuilder;
+import org.elasticsearch.action.percolate.*;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.action.suggest.SuggestAction;
+import org.elasticsearch.action.suggest.SuggestRequest;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.action.termvector.*;
+import org.elasticsearch.action.update.UpdateAction;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.Nullable;
+
+/**
+ *
+ */
+public abstract class AbstractClient implements InternalClient {
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final Action<Request, Response, RequestBuilder> action) {
+ return action.newRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<IndexResponse> index(final IndexRequest request) {
+ return execute(IndexAction.INSTANCE, request);
+ }
+
+ @Override
+ public void index(final IndexRequest request, final ActionListener<IndexResponse> listener) {
+ execute(IndexAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public IndexRequestBuilder prepareIndex() {
+ return new IndexRequestBuilder(this, null);
+ }
+
+ @Override
+ public IndexRequestBuilder prepareIndex(String index, String type) {
+ return prepareIndex(index, type, null);
+ }
+
+ @Override
+ public IndexRequestBuilder prepareIndex(String index, String type, @Nullable String id) {
+ return prepareIndex().setIndex(index).setType(type).setId(id);
+ }
+
+ @Override
+ public ActionFuture<UpdateResponse> update(final UpdateRequest request) {
+ return execute(UpdateAction.INSTANCE, request);
+ }
+
+ @Override
+ public void update(final UpdateRequest request, final ActionListener<UpdateResponse> listener) {
+ execute(UpdateAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public UpdateRequestBuilder prepareUpdate() {
+ return new UpdateRequestBuilder(this, null, null, null);
+ }
+
+ @Override
+ public UpdateRequestBuilder prepareUpdate(String index, String type, String id) {
+ return new UpdateRequestBuilder(this, index, type, id);
+ }
+
+ @Override
+ public ActionFuture<DeleteResponse> delete(final DeleteRequest request) {
+ return execute(DeleteAction.INSTANCE, request);
+ }
+
+ @Override
+ public void delete(final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
+ execute(DeleteAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public DeleteRequestBuilder prepareDelete() {
+ return new DeleteRequestBuilder(this, null);
+ }
+
+ @Override
+ public DeleteRequestBuilder prepareDelete(String index, String type, String id) {
+ return prepareDelete().setIndex(index).setType(type).setId(id);
+ }
+
+ @Override
+ public ActionFuture<BulkResponse> bulk(final BulkRequest request) {
+ return execute(BulkAction.INSTANCE, request);
+ }
+
+ @Override
+ public void bulk(final BulkRequest request, final ActionListener<BulkResponse> listener) {
+ execute(BulkAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public BulkRequestBuilder prepareBulk() {
+ return new BulkRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<DeleteByQueryResponse> deleteByQuery(final DeleteByQueryRequest request) {
+ return execute(DeleteByQueryAction.INSTANCE, request);
+ }
+
+ @Override
+ public void deleteByQuery(final DeleteByQueryRequest request, final ActionListener<DeleteByQueryResponse> listener) {
+ execute(DeleteByQueryAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices) {
+ return new DeleteByQueryRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<GetResponse> get(final GetRequest request) {
+ return execute(GetAction.INSTANCE, request);
+ }
+
+ @Override
+ public void get(final GetRequest request, final ActionListener<GetResponse> listener) {
+ execute(GetAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public GetRequestBuilder prepareGet() {
+ return new GetRequestBuilder(this, null);
+ }
+
+ @Override
+ public GetRequestBuilder prepareGet(String index, String type, String id) {
+ return prepareGet().setIndex(index).setType(type).setId(id);
+ }
+
+ @Override
+ public ActionFuture<MultiGetResponse> multiGet(final MultiGetRequest request) {
+ return execute(MultiGetAction.INSTANCE, request);
+ }
+
+ @Override
+ public void multiGet(final MultiGetRequest request, final ActionListener<MultiGetResponse> listener) {
+ execute(MultiGetAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public MultiGetRequestBuilder prepareMultiGet() {
+ return new MultiGetRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> search(final SearchRequest request) {
+ return execute(SearchAction.INSTANCE, request);
+ }
+
+ @Override
+ public void search(final SearchRequest request, final ActionListener<SearchResponse> listener) {
+ execute(SearchAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public SearchRequestBuilder prepareSearch(String... indices) {
+ return new SearchRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> searchScroll(final SearchScrollRequest request) {
+ return execute(SearchScrollAction.INSTANCE, request);
+ }
+
+ @Override
+ public void searchScroll(final SearchScrollRequest request, final ActionListener<SearchResponse> listener) {
+ execute(SearchScrollAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) {
+ return new SearchScrollRequestBuilder(this, scrollId);
+ }
+
+ @Override
+ public ActionFuture<MultiSearchResponse> multiSearch(MultiSearchRequest request) {
+ return execute(MultiSearchAction.INSTANCE, request);
+ }
+
+ @Override
+ public void multiSearch(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
+ execute(MultiSearchAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public MultiSearchRequestBuilder prepareMultiSearch() {
+ return new MultiSearchRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<CountResponse> count(final CountRequest request) {
+ return execute(CountAction.INSTANCE, request);
+ }
+
+ @Override
+ public void count(final CountRequest request, final ActionListener<CountResponse> listener) {
+ execute(CountAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public CountRequestBuilder prepareCount(String... indices) {
+ return new CountRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<SuggestResponse> suggest(final SuggestRequest request) {
+ return execute(SuggestAction.INSTANCE, request);
+ }
+
+ @Override
+ public void suggest(final SuggestRequest request, final ActionListener<SuggestResponse> listener) {
+ execute(SuggestAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public SuggestRequestBuilder prepareSuggest(String... indices) {
+ return new SuggestRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> moreLikeThis(final MoreLikeThisRequest request) {
+ return execute(MoreLikeThisAction.INSTANCE, request);
+ }
+
+ @Override
+ public void moreLikeThis(final MoreLikeThisRequest request, final ActionListener<SearchResponse> listener) {
+ execute(MoreLikeThisAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public MoreLikeThisRequestBuilder prepareMoreLikeThis(String index, String type, String id) {
+ return new MoreLikeThisRequestBuilder(this, index, type, id);
+ }
+
+ @Override
+ public ActionFuture<TermVectorResponse> termVector(final TermVectorRequest request) {
+ return execute(TermVectorAction.INSTANCE, request);
+ }
+
+ @Override
+ public void termVector(final TermVectorRequest request, final ActionListener<TermVectorResponse> listener) {
+ execute(TermVectorAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public TermVectorRequestBuilder prepareTermVector(String index, String type, String id) {
+ return new TermVectorRequestBuilder(this, index, type, id);
+ }
+
+ @Override
+ public ActionFuture<MultiTermVectorsResponse> multiTermVectors(final MultiTermVectorsRequest request) {
+ return execute(MultiTermVectorsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void multiTermVectors(final MultiTermVectorsRequest request, final ActionListener<MultiTermVectorsResponse> listener) {
+ execute(MultiTermVectorsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public MultiTermVectorsRequestBuilder prepareMultiTermVectors() {
+ return new MultiTermVectorsRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<PercolateResponse> percolate(final PercolateRequest request) {
+ return execute(PercolateAction.INSTANCE, request);
+ }
+
+ @Override
+ public void percolate(final PercolateRequest request, final ActionListener<PercolateResponse> listener) {
+ execute(PercolateAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public PercolateRequestBuilder preparePercolate() {
+ return new PercolateRequestBuilder(this);
+ }
+
+ @Override
+ public MultiPercolateRequestBuilder prepareMultiPercolate() {
+ return new MultiPercolateRequestBuilder(this);
+ }
+
+ @Override
+ public void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener) {
+ execute(MultiPercolateAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request) {
+ return execute(MultiPercolateAction.INSTANCE, request);
+ }
+
+ @Override
+ public ExplainRequestBuilder prepareExplain(String index, String type, String id) {
+ return new ExplainRequestBuilder(this, index, type, id);
+ }
+
+ @Override
+ public ActionFuture<ExplainResponse> explain(ExplainRequest request) {
+ return execute(ExplainAction.INSTANCE, request);
+ }
+
+ @Override
+ public void explain(ExplainRequest request, ActionListener<ExplainResponse> listener) {
+ execute(ExplainAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public void clearScroll(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener) {
+ execute(ClearScrollAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ActionFuture<ClearScrollResponse> clearScroll(ClearScrollRequest request) {
+ return execute(ClearScrollAction.INSTANCE, request);
+ }
+
+ @Override
+ public ClearScrollRequestBuilder prepareClearScroll() {
+ return new ClearScrollRequestBuilder(this);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java
new file mode 100644
index 0000000..6b6944e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java
@@ -0,0 +1,402 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.support;
+
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartAction;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartRequest;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartResponse;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownAction;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequestBuilder;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsAction;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotAction;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+
+/**
+ *
+ */
+public abstract class AbstractClusterAdminClient implements InternalClusterAdminClient {
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(ClusterAction<Request, Response, RequestBuilder> action) {
+ return action.newRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<ClusterHealthResponse> health(final ClusterHealthRequest request) {
+ return execute(ClusterHealthAction.INSTANCE, request);
+ }
+
+ @Override
+ public void health(final ClusterHealthRequest request, final ActionListener<ClusterHealthResponse> listener) {
+ execute(ClusterHealthAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ClusterHealthRequestBuilder prepareHealth(String... indices) {
+ return new ClusterHealthRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<ClusterStateResponse> state(final ClusterStateRequest request) {
+ return execute(ClusterStateAction.INSTANCE, request);
+ }
+
+ @Override
+ public void state(final ClusterStateRequest request, final ActionListener<ClusterStateResponse> listener) {
+ execute(ClusterStateAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ClusterStateRequestBuilder prepareState() {
+ return new ClusterStateRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<ClusterRerouteResponse> reroute(final ClusterRerouteRequest request) {
+ return execute(ClusterRerouteAction.INSTANCE, request);
+ }
+
+ @Override
+ public void reroute(final ClusterRerouteRequest request, final ActionListener<ClusterRerouteResponse> listener) {
+ execute(ClusterRerouteAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ClusterRerouteRequestBuilder prepareReroute() {
+ return new ClusterRerouteRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<ClusterUpdateSettingsResponse> updateSettings(final ClusterUpdateSettingsRequest request) {
+ return execute(ClusterUpdateSettingsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void updateSettings(final ClusterUpdateSettingsRequest request, final ActionListener<ClusterUpdateSettingsResponse> listener) {
+ execute(ClusterUpdateSettingsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() {
+ return new ClusterUpdateSettingsRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<NodesInfoResponse> nodesInfo(final NodesInfoRequest request) {
+ return execute(NodesInfoAction.INSTANCE, request);
+ }
+
+ @Override
+ public void nodesInfo(final NodesInfoRequest request, final ActionListener<NodesInfoResponse> listener) {
+ execute(NodesInfoAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds) {
+ return new NodesInfoRequestBuilder(this).setNodesIds(nodesIds);
+ }
+
+ @Override
+ public ActionFuture<NodesStatsResponse> nodesStats(final NodesStatsRequest request) {
+ return execute(NodesStatsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void nodesStats(final NodesStatsRequest request, final ActionListener<NodesStatsResponse> listener) {
+ execute(NodesStatsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public NodesStatsRequestBuilder prepareNodesStats(String... nodesIds) {
+ return new NodesStatsRequestBuilder(this).setNodesIds(nodesIds);
+ }
+
+ @Override
+ public ActionFuture<ClusterStatsResponse> clusterStats(ClusterStatsRequest request) {
+ return execute(ClusterStatsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void clusterStats(ClusterStatsRequest request, ActionListener<ClusterStatsResponse> listener) {
+ execute(ClusterStatsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ClusterStatsRequestBuilder prepareClusterStats() {
+ return new ClusterStatsRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<NodesHotThreadsResponse> nodesHotThreads(NodesHotThreadsRequest request) {
+ return execute(NodesHotThreadsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void nodesHotThreads(NodesHotThreadsRequest request, ActionListener<NodesHotThreadsResponse> listener) {
+ execute(NodesHotThreadsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds) {
+ return new NodesHotThreadsRequestBuilder(this).setNodesIds(nodesIds);
+ }
+
+ @Override
+ public ActionFuture<NodesRestartResponse> nodesRestart(final NodesRestartRequest request) {
+ return execute(NodesRestartAction.INSTANCE, request);
+ }
+
+ @Override
+ public void nodesRestart(final NodesRestartRequest request, final ActionListener<NodesRestartResponse> listener) {
+ execute(NodesRestartAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public NodesRestartRequestBuilder prepareNodesRestart(String... nodesIds) {
+ return new NodesRestartRequestBuilder(this).setNodesIds(nodesIds);
+ }
+
+ @Override
+ public ActionFuture<NodesShutdownResponse> nodesShutdown(final NodesShutdownRequest request) {
+ return execute(NodesShutdownAction.INSTANCE, request);
+ }
+
+ @Override
+ public void nodesShutdown(final NodesShutdownRequest request, final ActionListener<NodesShutdownResponse> listener) {
+ execute(NodesShutdownAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public NodesShutdownRequestBuilder prepareNodesShutdown(String... nodesIds) {
+ return new NodesShutdownRequestBuilder(this).setNodesIds(nodesIds);
+ }
+
+ @Override
+ public ActionFuture<ClusterSearchShardsResponse> searchShards(final ClusterSearchShardsRequest request) {
+ return execute(ClusterSearchShardsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void searchShards(final ClusterSearchShardsRequest request, final ActionListener<ClusterSearchShardsResponse> listener) {
+ execute(ClusterSearchShardsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ClusterSearchShardsRequestBuilder prepareSearchShards() {
+ return new ClusterSearchShardsRequestBuilder(this);
+ }
+
+ @Override
+ public ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices) {
+ return new ClusterSearchShardsRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public PendingClusterTasksRequestBuilder preparePendingClusterTasks() {
+ return new PendingClusterTasksRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<PendingClusterTasksResponse> pendingClusterTasks(PendingClusterTasksRequest request) {
+ return execute(PendingClusterTasksAction.INSTANCE, request);
+ }
+
+ @Override
+ public void pendingClusterTasks(PendingClusterTasksRequest request, ActionListener<PendingClusterTasksResponse> listener) {
+ execute(PendingClusterTasksAction.INSTANCE, request, listener);
+ }
+
+ public ActionFuture<PutRepositoryResponse> putRepository(PutRepositoryRequest request) {
+ return execute(PutRepositoryAction.INSTANCE, request);
+ }
+
+ @Override
+ public void putRepository(PutRepositoryRequest request, ActionListener<PutRepositoryResponse> listener) {
+ execute(PutRepositoryAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public PutRepositoryRequestBuilder preparePutRepository(String name) {
+ return new PutRepositoryRequestBuilder(this, name);
+ }
+
+ @Override
+ public ActionFuture<CreateSnapshotResponse> createSnapshot(CreateSnapshotRequest request) {
+ return execute(CreateSnapshotAction.INSTANCE, request);
+ }
+
+ @Override
+ public void createSnapshot(CreateSnapshotRequest request, ActionListener<CreateSnapshotResponse> listener) {
+ execute(CreateSnapshotAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public CreateSnapshotRequestBuilder prepareCreateSnapshot(String repository, String name) {
+ return new CreateSnapshotRequestBuilder(this, repository, name);
+ }
+
+ @Override
+ public ActionFuture<GetSnapshotsResponse> getSnapshots(GetSnapshotsRequest request) {
+ return execute(GetSnapshotsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void getSnapshots(GetSnapshotsRequest request, ActionListener<GetSnapshotsResponse> listener) {
+ execute(GetSnapshotsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public GetSnapshotsRequestBuilder prepareGetSnapshots(String repository) {
+ return new GetSnapshotsRequestBuilder(this, repository);
+ }
+
+
+ @Override
+ public ActionFuture<DeleteSnapshotResponse> deleteSnapshot(DeleteSnapshotRequest request) {
+ return execute(DeleteSnapshotAction.INSTANCE, request);
+ }
+
+ @Override
+ public void deleteSnapshot(DeleteSnapshotRequest request, ActionListener<DeleteSnapshotResponse> listener) {
+ execute(DeleteSnapshotAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(String repository, String name) {
+ return new DeleteSnapshotRequestBuilder(this, repository, name);
+ }
+
+
+ @Override
+ public ActionFuture<DeleteRepositoryResponse> deleteRepository(DeleteRepositoryRequest request) {
+ return execute(DeleteRepositoryAction.INSTANCE, request);
+ }
+
+ @Override
+ public void deleteRepository(DeleteRepositoryRequest request, ActionListener<DeleteRepositoryResponse> listener) {
+ execute(DeleteRepositoryAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public DeleteRepositoryRequestBuilder prepareDeleteRepository(String name) {
+ return new DeleteRepositoryRequestBuilder(this, name);
+ }
+
+ @Override
+ public ActionFuture<GetRepositoriesResponse> getRepositories(GetRepositoriesRequest request) {
+ return execute(GetRepositoriesAction.INSTANCE, request);
+ }
+
+ @Override
+ public void getRepositories(GetRepositoriesRequest request, ActionListener<GetRepositoriesResponse> listener) {
+ execute(GetRepositoriesAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public GetRepositoriesRequestBuilder prepareGetRepositories(String... name) {
+ return new GetRepositoriesRequestBuilder(this, name);
+ }
+
+ @Override
+ public ActionFuture<RestoreSnapshotResponse> restoreSnapshot(RestoreSnapshotRequest request) {
+ return execute(RestoreSnapshotAction.INSTANCE, request);
+ }
+
+ @Override
+ public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener<RestoreSnapshotResponse> listener) {
+ execute(RestoreSnapshotAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public RestoreSnapshotRequestBuilder prepareRestoreSnapshot(String repository, String snapshot) {
+ return new RestoreSnapshotRequestBuilder(this, repository, snapshot);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/support/AbstractIndicesAdminClient.java b/src/main/java/org/elasticsearch/client/support/AbstractIndicesAdminClient.java
new file mode 100644
index 0000000..d880b7d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/support/AbstractIndicesAdminClient.java
@@ -0,0 +1,622 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.support;
+
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexAction;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsAction;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequest;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushAction;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotAction;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequestBuilder;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingAction;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequest;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.*;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexAction;
+import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
+import org.elasticsearch.action.admin.indices.open.OpenIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeAction;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusAction;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusRequestBuilder;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequestBuilder;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.client.internal.InternalIndicesAdminClient;
+import org.elasticsearch.common.Nullable;
+
+/**
+ *
+ */
+public abstract class AbstractIndicesAdminClient implements InternalIndicesAdminClient {
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final IndicesAction<Request, Response, RequestBuilder> action) {
+ return action.newRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<IndicesExistsResponse> exists(final IndicesExistsRequest request) {
+ return execute(IndicesExistsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void exists(final IndicesExistsRequest request, final ActionListener<IndicesExistsResponse> listener) {
+ execute(IndicesExistsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public IndicesExistsRequestBuilder prepareExists(String... indices) {
+ return new IndicesExistsRequestBuilder(this, indices);
+ }
+
+ @Override
+ public ActionFuture<TypesExistsResponse> typesExists(TypesExistsRequest request) {
+ return execute(TypesExistsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void typesExists(TypesExistsRequest request, ActionListener<TypesExistsResponse> listener) {
+ execute(TypesExistsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public TypesExistsRequestBuilder prepareTypesExists(String... index) {
+ return new TypesExistsRequestBuilder(this, index);
+ }
+
+ @Override
+ public ActionFuture<IndicesAliasesResponse> aliases(final IndicesAliasesRequest request) {
+ return execute(IndicesAliasesAction.INSTANCE, request);
+ }
+
+ @Override
+ public void aliases(final IndicesAliasesRequest request, final ActionListener<IndicesAliasesResponse> listener) {
+ execute(IndicesAliasesAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public IndicesAliasesRequestBuilder prepareAliases() {
+ return new IndicesAliasesRequestBuilder(this);
+ }
+
+ @Override
+ public ActionFuture<GetAliasesResponse> getAliases(GetAliasesRequest request) {
+ return execute(GetAliasesAction.INSTANCE, request);
+ }
+
+ @Override
+ public void getAliases(GetAliasesRequest request, ActionListener<GetAliasesResponse> listener) {
+ execute(GetAliasesAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public GetAliasesRequestBuilder prepareGetAliases(String... aliases) {
+ return new GetAliasesRequestBuilder(this, aliases);
+ }
+
+ @Override
+ public ActionFuture<ClearIndicesCacheResponse> clearCache(final ClearIndicesCacheRequest request) {
+ return execute(ClearIndicesCacheAction.INSTANCE, request);
+ }
+
+ @Override
+ public void aliasesExist(GetAliasesRequest request, ActionListener<AliasesExistResponse> listener) {
+ execute(AliasesExistAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ActionFuture<AliasesExistResponse> aliasesExist(GetAliasesRequest request) {
+ return execute(AliasesExistAction.INSTANCE, request);
+ }
+
+ @Override
+ public AliasesExistRequestBuilder prepareAliasesExist(String... aliases) {
+ return new AliasesExistRequestBuilder(this, aliases);
+ }
+
+ @Override
+ public void clearCache(final ClearIndicesCacheRequest request, final ActionListener<ClearIndicesCacheResponse> listener) {
+ execute(ClearIndicesCacheAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ClearIndicesCacheRequestBuilder prepareClearCache(String... indices) {
+ return new ClearIndicesCacheRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<CreateIndexResponse> create(final CreateIndexRequest request) {
+ return execute(CreateIndexAction.INSTANCE, request);
+ }
+
+ @Override
+ public void create(final CreateIndexRequest request, final ActionListener<CreateIndexResponse> listener) {
+ execute(CreateIndexAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public CreateIndexRequestBuilder prepareCreate(String index) {
+ return new CreateIndexRequestBuilder(this, index);
+ }
+
+ @Override
+ public ActionFuture<DeleteIndexResponse> delete(final DeleteIndexRequest request) {
+ return execute(DeleteIndexAction.INSTANCE, request);
+ }
+
+ @Override
+ public void delete(final DeleteIndexRequest request, final ActionListener<DeleteIndexResponse> listener) {
+ execute(DeleteIndexAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public DeleteIndexRequestBuilder prepareDelete(String... indices) {
+ return new DeleteIndexRequestBuilder(this, indices);
+ }
+
+ @Override
+ public ActionFuture<CloseIndexResponse> close(final CloseIndexRequest request) {
+ return execute(CloseIndexAction.INSTANCE, request);
+ }
+
+ @Override
+ public void close(final CloseIndexRequest request, final ActionListener<CloseIndexResponse> listener) {
+ execute(CloseIndexAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public CloseIndexRequestBuilder prepareClose(String... indices) {
+ return new CloseIndexRequestBuilder(this, indices);
+ }
+
+ @Override
+ public ActionFuture<OpenIndexResponse> open(final OpenIndexRequest request) {
+ return execute(OpenIndexAction.INSTANCE, request);
+ }
+
+ @Override
+ public void open(final OpenIndexRequest request, final ActionListener<OpenIndexResponse> listener) {
+ execute(OpenIndexAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public OpenIndexRequestBuilder prepareOpen(String... indices) {
+ return new OpenIndexRequestBuilder(this, indices);
+ }
+
+ @Override
+ public ActionFuture<FlushResponse> flush(final FlushRequest request) {
+ return execute(FlushAction.INSTANCE, request);
+ }
+
+ @Override
+ public void flush(final FlushRequest request, final ActionListener<FlushResponse> listener) {
+ execute(FlushAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public FlushRequestBuilder prepareFlush(String... indices) {
+ return new FlushRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<GatewaySnapshotResponse> gatewaySnapshot(final GatewaySnapshotRequest request) {
+ return execute(GatewaySnapshotAction.INSTANCE, request);
+ }
+
+ @Override
+ public void gatewaySnapshot(final GatewaySnapshotRequest request, final ActionListener<GatewaySnapshotResponse> listener) {
+ execute(GatewaySnapshotAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public GatewaySnapshotRequestBuilder prepareGatewaySnapshot(String... indices) {
+ return new GatewaySnapshotRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) {
+ execute(GetMappingsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public void getFieldMappings(GetFieldMappingsRequest request, ActionListener<GetFieldMappingsResponse> listener) {
+ execute(GetFieldMappingsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public GetMappingsRequestBuilder prepareGetMappings(String... indices) {
+ return new GetMappingsRequestBuilder(this, indices);
+ }
+
+ @Override
+ public ActionFuture<GetMappingsResponse> getMappings(GetMappingsRequest request) {
+ return execute(GetMappingsAction.INSTANCE, request);
+ }
+
+ @Override
+ public GetFieldMappingsRequestBuilder prepareGetFieldMappings(String... indices) {
+ return new GetFieldMappingsRequestBuilder(this, indices);
+ }
+
+ @Override
+ public ActionFuture<GetFieldMappingsResponse> getFieldMappings(GetFieldMappingsRequest request) {
+ return execute(GetFieldMappingsAction.INSTANCE, request);
+ }
+
+ @Override
+ public ActionFuture<PutMappingResponse> putMapping(final PutMappingRequest request) {
+ return execute(PutMappingAction.INSTANCE, request);
+ }
+
+ @Override
+ public void putMapping(final PutMappingRequest request, final ActionListener<PutMappingResponse> listener) {
+ execute(PutMappingAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public PutMappingRequestBuilder preparePutMapping(String... indices) {
+ return new PutMappingRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<DeleteMappingResponse> deleteMapping(final DeleteMappingRequest request) {
+ return execute(DeleteMappingAction.INSTANCE, request);
+ }
+
+ @Override
+ public void deleteMapping(final DeleteMappingRequest request, final ActionListener<DeleteMappingResponse> listener) {
+ execute(DeleteMappingAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public DeleteMappingRequestBuilder prepareDeleteMapping(String... indices) {
+ return new DeleteMappingRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<OptimizeResponse> optimize(final OptimizeRequest request) {
+ return execute(OptimizeAction.INSTANCE, request);
+ }
+
+ @Override
+ public void optimize(final OptimizeRequest request, final ActionListener<OptimizeResponse> listener) {
+ execute(OptimizeAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public OptimizeRequestBuilder prepareOptimize(String... indices) {
+ return new OptimizeRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<RefreshResponse> refresh(final RefreshRequest request) {
+ return execute(RefreshAction.INSTANCE, request);
+ }
+
+ @Override
+ public void refresh(final RefreshRequest request, final ActionListener<RefreshResponse> listener) {
+ execute(RefreshAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public RefreshRequestBuilder prepareRefresh(String... indices) {
+ return new RefreshRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<IndicesStatsResponse> stats(final IndicesStatsRequest request) {
+ return execute(IndicesStatsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void stats(final IndicesStatsRequest request, final ActionListener<IndicesStatsResponse> listener) {
+ execute(IndicesStatsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public IndicesStatsRequestBuilder prepareStats(String... indices) {
+ return new IndicesStatsRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<IndicesStatusResponse> status(final IndicesStatusRequest request) {
+ return execute(IndicesStatusAction.INSTANCE, request);
+ }
+
+ @Override
+ public void status(final IndicesStatusRequest request, final ActionListener<IndicesStatusResponse> listener) {
+ execute(IndicesStatusAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public IndicesStatusRequestBuilder prepareStatus(String... indices) {
+ return new IndicesStatusRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<IndicesSegmentResponse> segments(final IndicesSegmentsRequest request) {
+ return execute(IndicesSegmentsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void segments(final IndicesSegmentsRequest request, final ActionListener<IndicesSegmentResponse> listener) {
+ execute(IndicesSegmentsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public IndicesSegmentsRequestBuilder prepareSegments(String... indices) {
+ return new IndicesSegmentsRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<UpdateSettingsResponse> updateSettings(final UpdateSettingsRequest request) {
+ return execute(UpdateSettingsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void updateSettings(final UpdateSettingsRequest request, final ActionListener<UpdateSettingsResponse> listener) {
+ execute(UpdateSettingsAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public UpdateSettingsRequestBuilder prepareUpdateSettings(String... indices) {
+ return new UpdateSettingsRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<AnalyzeResponse> analyze(final AnalyzeRequest request) {
+ return execute(AnalyzeAction.INSTANCE, request);
+ }
+
+ @Override
+ public void analyze(final AnalyzeRequest request, final ActionListener<AnalyzeResponse> listener) {
+ execute(AnalyzeAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public AnalyzeRequestBuilder prepareAnalyze(@Nullable String index, String text) {
+ return new AnalyzeRequestBuilder(this, index, text);
+ }
+
+ @Override
+ public AnalyzeRequestBuilder prepareAnalyze(String text) {
+ return new AnalyzeRequestBuilder(this, null, text);
+ }
+
+ @Override
+ public ActionFuture<PutIndexTemplateResponse> putTemplate(final PutIndexTemplateRequest request) {
+ return execute(PutIndexTemplateAction.INSTANCE, request);
+ }
+
+ @Override
+ public void putTemplate(final PutIndexTemplateRequest request, final ActionListener<PutIndexTemplateResponse> listener) {
+ execute(PutIndexTemplateAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public PutIndexTemplateRequestBuilder preparePutTemplate(String name) {
+ return new PutIndexTemplateRequestBuilder(this, name);
+ }
+
+ @Override
+ public ActionFuture<GetIndexTemplatesResponse> getTemplates(final GetIndexTemplatesRequest request) {
+ return execute(GetIndexTemplatesAction.INSTANCE, request);
+ }
+
+ @Override
+ public void getTemplates(final GetIndexTemplatesRequest request, final ActionListener<GetIndexTemplatesResponse> listener) {
+ execute(GetIndexTemplatesAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public GetIndexTemplatesRequestBuilder prepareGetTemplates(String... names) {
+ return new GetIndexTemplatesRequestBuilder(this, names);
+ }
+
+ @Override
+ public ActionFuture<DeleteIndexTemplateResponse> deleteTemplate(final DeleteIndexTemplateRequest request) {
+ return execute(DeleteIndexTemplateAction.INSTANCE, request);
+ }
+
+ @Override
+ public void deleteTemplate(final DeleteIndexTemplateRequest request, final ActionListener<DeleteIndexTemplateResponse> listener) {
+ execute(DeleteIndexTemplateAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public DeleteIndexTemplateRequestBuilder prepareDeleteTemplate(String name) {
+ return new DeleteIndexTemplateRequestBuilder(this, name);
+ }
+
+ @Override
+ public ActionFuture<ValidateQueryResponse> validateQuery(final ValidateQueryRequest request) {
+ return execute(ValidateQueryAction.INSTANCE, request);
+ }
+
+ @Override
+ public void validateQuery(final ValidateQueryRequest request, final ActionListener<ValidateQueryResponse> listener) {
+ execute(ValidateQueryAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ValidateQueryRequestBuilder prepareValidateQuery(String... indices) {
+ return new ValidateQueryRequestBuilder(this).setIndices(indices);
+ }
+
+ @Override
+ public ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request) {
+ return execute(PutWarmerAction.INSTANCE, request);
+ }
+
+ @Override
+ public void putWarmer(PutWarmerRequest request, ActionListener<PutWarmerResponse> listener) {
+ execute(PutWarmerAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public PutWarmerRequestBuilder preparePutWarmer(String name) {
+ return new PutWarmerRequestBuilder(this, name);
+ }
+
+ @Override
+ public ActionFuture<DeleteWarmerResponse> deleteWarmer(DeleteWarmerRequest request) {
+ return execute(DeleteWarmerAction.INSTANCE, request);
+ }
+
+ @Override
+ public void deleteWarmer(DeleteWarmerRequest request, ActionListener<DeleteWarmerResponse> listener) {
+ execute(DeleteWarmerAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public DeleteWarmerRequestBuilder prepareDeleteWarmer() {
+ return new DeleteWarmerRequestBuilder(this);
+ }
+
+ @Override
+ public GetWarmersRequestBuilder prepareGetWarmers(String... indices) {
+ return new GetWarmersRequestBuilder(this, indices);
+ }
+
+ @Override
+ public ActionFuture<GetWarmersResponse> getWarmers(GetWarmersRequest request) {
+ return execute(GetWarmersAction.INSTANCE, request);
+ }
+
+ @Override
+ public void getWarmers(GetWarmersRequest request, ActionListener<GetWarmersResponse> listener) {
+ execute(GetWarmersAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public GetSettingsRequestBuilder prepareGetSettings(String... indices) {
+ return new GetSettingsRequestBuilder(this, indices);
+ }
+
+ @Override
+ public ActionFuture<GetSettingsResponse> getSettings(GetSettingsRequest request) {
+ return execute(GetSettingsAction.INSTANCE, request);
+ }
+
+ @Override
+ public void getSettings(GetSettingsRequest request, ActionListener<GetSettingsResponse> listener) {
+ execute(GetSettingsAction.INSTANCE, request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java b/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java
new file mode 100644
index 0000000..ccdfaf8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import org.elasticsearch.client.transport.support.InternalTransportAdminClient;
+import org.elasticsearch.client.transport.support.InternalTransportClient;
+import org.elasticsearch.client.transport.support.InternalTransportClusterAdminClient;
+import org.elasticsearch.client.transport.support.InternalTransportIndicesAdminClient;
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class ClientTransportModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(InternalTransportClient.class).asEagerSingleton();
+ bind(InternalTransportAdminClient.class).asEagerSingleton();
+ bind(InternalTransportIndicesAdminClient.class).asEagerSingleton();
+ bind(InternalTransportClusterAdminClient.class).asEagerSingleton();
+ bind(TransportClientNodesService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/transport/NoNodeAvailableException.java b/src/main/java/org/elasticsearch/client/transport/NoNodeAvailableException.java
new file mode 100644
index 0000000..28617d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/transport/NoNodeAvailableException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ * An exception indicating no node is available to perform the operation.
+ */
+public class NoNodeAvailableException extends ElasticsearchException {
+
+ public NoNodeAvailableException() {
+ super("No node available");
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/src/main/java/org/elasticsearch/client/transport/TransportClient.java
new file mode 100644
index 0000000..efaea63
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/transport/TransportClient.java
@@ -0,0 +1,492 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountRequest;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.explain.ExplainRequest;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.mlt.MoreLikeThisRequest;
+import org.elasticsearch.action.percolate.PercolateRequest;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.action.suggest.SuggestRequest;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.action.termvector.MultiTermVectorsRequest;
+import org.elasticsearch.action.termvector.MultiTermVectorsResponse;
+import org.elasticsearch.action.termvector.TermVectorRequest;
+import org.elasticsearch.action.termvector.TermVectorResponse;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.support.AbstractClient;
+import org.elasticsearch.client.transport.support.InternalTransportClient;
+import org.elasticsearch.cluster.ClusterNameModule;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.io.CachedStreams;
+import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.monitor.MonitorService;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.plugins.PluginsModule;
+import org.elasticsearch.plugins.PluginsService;
+import org.elasticsearch.search.TransportSearchModule;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.elasticsearch.transport.TransportModule;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ * The transport client allows to create a client that is not part of the cluster, but simply connects to one
+ * or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}.
+ * <p/>
+ * <p>The transport client important modules used is the {@link org.elasticsearch.transport.TransportModule} which is
+ * started in client mode (only connects, no bind).
+ */
+public class TransportClient extends AbstractClient {
+
+ private final Injector injector;
+
+ private final Settings settings;
+
+ private final Environment environment;
+
+
+ private final PluginsService pluginsService;
+
+ private final TransportClientNodesService nodesService;
+
+ private final InternalTransportClient internalClient;
+
+
+ /**
+ * Constructs a new transport client with settings loaded either from the classpath or the file system (the
+ * <tt>elasticsearch.(yml|json)</tt> files optionally prefixed with <tt>config/</tt>).
+ */
+ public TransportClient() throws ElasticsearchException {
+ this(ImmutableSettings.Builder.EMPTY_SETTINGS, true);
+ }
+
+ /**
+ * Constructs a new transport client with explicit settings and settings loaded either from the classpath or the file
+ * system (the <tt>elasticsearch.(yml|json)</tt> files optionally prefixed with <tt>config/</tt>).
+ */
+ public TransportClient(Settings settings) {
+ this(settings, true);
+ }
+
+ /**
+ * Constructs a new transport client with explicit settings and settings loaded either from the classpath or the file
+ * system (the <tt>elasticsearch.(yml|json)</tt> files optionally prefixed with <tt>config/</tt>).
+ */
+ public TransportClient(Settings.Builder settings) {
+ this(settings.build(), true);
+ }
+
+ /**
+ * Constructs a new transport client with the provided settings and the ability to control if settings will
+ * be loaded from the classpath / file system (the <tt>elasticsearch.(yml|json)</tt> files optionally prefixed with
+ * <tt>config/</tt>).
+ *
+ * @param settings The explicit settings.
+ * @param loadConfigSettings <tt>true</tt> if settings should be loaded from the classpath/file system.
+ * @throws org.elasticsearch.ElasticsearchException
+ */
+ public TransportClient(Settings.Builder settings, boolean loadConfigSettings) throws ElasticsearchException {
+ this(settings.build(), loadConfigSettings);
+ }
+
+ /**
+ * Constructs a new transport client with the provided settings and the ability to control if settings will
+ * be loaded from the classpath / file system (the <tt>elasticsearch.(yml|json)</tt> files optionally prefixed with
+ * <tt>config/</tt>).
+ *
+ * @param pSettings The explicit settings.
+ * @param loadConfigSettings <tt>true</tt> if settings should be loaded from the classpath/file system.
+ * @throws org.elasticsearch.ElasticsearchException
+ */
+ public TransportClient(Settings pSettings, boolean loadConfigSettings) throws ElasticsearchException {
+ Tuple<Settings, Environment> tuple = InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings);
+ Settings settings = settingsBuilder().put(tuple.v1())
+ .put("network.server", false)
+ .put("node.client", true)
+ .build();
+ this.environment = tuple.v2();
+
+ this.pluginsService = new PluginsService(settings, tuple.v2());
+ this.settings = pluginsService.updatedSettings();
+
+ Version version = Version.CURRENT;
+
+ CompressorFactory.configure(this.settings);
+
+ ModulesBuilder modules = new ModulesBuilder();
+ modules.add(new Version.Module(version));
+ modules.add(new CacheRecyclerModule(settings));
+ modules.add(new PluginsModule(this.settings, pluginsService));
+ modules.add(new EnvironmentModule(environment));
+ modules.add(new SettingsModule(this.settings));
+ modules.add(new NetworkModule());
+ modules.add(new ClusterNameModule(this.settings));
+ modules.add(new ThreadPoolModule(this.settings));
+ modules.add(new TransportSearchModule());
+ modules.add(new TransportModule(this.settings));
+ modules.add(new ActionModule(true));
+ modules.add(new ClientTransportModule());
+
+ injector = modules.createInjector();
+
+ injector.getInstance(TransportService.class).start();
+
+ nodesService = injector.getInstance(TransportClientNodesService.class);
+ internalClient = injector.getInstance(InternalTransportClient.class);
+ }
+
+ /**
+ * Returns the current registered transport addresses to use (added using
+ * {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}.
+ */
+ public ImmutableList<TransportAddress> transportAddresses() {
+ return nodesService.transportAddresses();
+ }
+
+ /**
+ * Returns the current connected transport nodes that this client will use.
+ * <p/>
+ * <p>The nodes include all the nodes that are currently alive based on the transport
+ * addresses provided.
+ */
+ public ImmutableList<DiscoveryNode> connectedNodes() {
+ return nodesService.connectedNodes();
+ }
+
+ /**
+ * The list of filtered nodes that were not connected to, for example, due to
+ * mismatch in cluster name.
+ */
+ public ImmutableList<DiscoveryNode> filteredNodes() {
+ return nodesService.filteredNodes();
+ }
+
+ /**
+ * Returns the listed nodes in the transport client (ones added to it).
+ */
+ public ImmutableList<DiscoveryNode> listedNodes() {
+ return nodesService.listedNodes();
+ }
+
+ /**
+ * Adds a transport address that will be used to connect to.
+ * <p/>
+ * <p>The Node this transport address represents will be used if its possible to connect to it.
+ * If it is unavailable, it will be automatically connected to once it is up.
+ * <p/>
+ * <p>In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}.
+ */
+ public TransportClient addTransportAddress(TransportAddress transportAddress) {
+ nodesService.addTransportAddresses(transportAddress);
+ return this;
+ }
+
+ /**
+ * Adds a list of transport addresses that will be used to connect to.
+ * <p/>
+ * <p>The Node this transport address represents will be used if its possible to connect to it.
+ * If it is unavailable, it will be automatically connected to once it is up.
+ * <p/>
+ * <p>In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}.
+ */
+ public TransportClient addTransportAddresses(TransportAddress... transportAddress) {
+ nodesService.addTransportAddresses(transportAddress);
+ return this;
+ }
+
+ /**
+ * Removes a transport address from the list of transport addresses that are used to connect to.
+ */
+ public TransportClient removeTransportAddress(TransportAddress transportAddress) {
+ nodesService.removeTransportAddress(transportAddress);
+ return this;
+ }
+
+ /**
+ * Closes the client.
+ */
+ @Override
+ public void close() {
+ injector.getInstance(TransportClientNodesService.class).close();
+ injector.getInstance(TransportService.class).close();
+ try {
+ injector.getInstance(MonitorService.class).close();
+ } catch (Exception e) {
+ // ignore, might not be bounded
+ }
+
+ for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
+ injector.getInstance(plugin).close();
+ }
+
+ injector.getInstance(ThreadPool.class).shutdown();
+ try {
+ injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ // ignore
+ Thread.currentThread().interrupt();
+ }
+ try {
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ injector.getInstance(CacheRecycler.class).close();
+ injector.getInstance(PageCacheRecycler.class).close();
+
+ CachedStreams.clear();
+ }
+
+ @Override
+ public Settings settings() {
+ return this.settings;
+ }
+
+ @Override
+ public ThreadPool threadPool() {
+ return internalClient.threadPool();
+ }
+
+ @Override
+ public AdminClient admin() {
+ return internalClient.admin();
+ }
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
+ return internalClient.execute(action, request);
+ }
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
+ internalClient.execute(action, request, listener);
+ }
+
+ @Override
+ public ActionFuture<IndexResponse> index(IndexRequest request) {
+ return internalClient.index(request);
+ }
+
+ @Override
+ public void index(IndexRequest request, ActionListener<IndexResponse> listener) {
+ internalClient.index(request, listener);
+ }
+
+ @Override
+ public ActionFuture<UpdateResponse> update(UpdateRequest request) {
+ return internalClient.update(request);
+ }
+
+ @Override
+ public void update(UpdateRequest request, ActionListener<UpdateResponse> listener) {
+ internalClient.update(request, listener);
+ }
+
+ @Override
+ public ActionFuture<DeleteResponse> delete(DeleteRequest request) {
+ return internalClient.delete(request);
+ }
+
+ @Override
+ public void delete(DeleteRequest request, ActionListener<DeleteResponse> listener) {
+ internalClient.delete(request, listener);
+ }
+
+ @Override
+ public ActionFuture<BulkResponse> bulk(BulkRequest request) {
+ return internalClient.bulk(request);
+ }
+
+ @Override
+ public void bulk(BulkRequest request, ActionListener<BulkResponse> listener) {
+ internalClient.bulk(request, listener);
+ }
+
+ @Override
+ public ActionFuture<DeleteByQueryResponse> deleteByQuery(DeleteByQueryRequest request) {
+ return internalClient.deleteByQuery(request);
+ }
+
+ @Override
+ public void deleteByQuery(DeleteByQueryRequest request, ActionListener<DeleteByQueryResponse> listener) {
+ internalClient.deleteByQuery(request, listener);
+ }
+
+ @Override
+ public ActionFuture<GetResponse> get(GetRequest request) {
+ return internalClient.get(request);
+ }
+
+ @Override
+ public void get(GetRequest request, ActionListener<GetResponse> listener) {
+ internalClient.get(request, listener);
+ }
+
+ @Override
+ public ActionFuture<MultiGetResponse> multiGet(MultiGetRequest request) {
+ return internalClient.multiGet(request);
+ }
+
+ @Override
+ public void multiGet(MultiGetRequest request, ActionListener<MultiGetResponse> listener) {
+ internalClient.multiGet(request, listener);
+ }
+
+ @Override
+ public ActionFuture<CountResponse> count(CountRequest request) {
+ return internalClient.count(request);
+ }
+
+ @Override
+ public void count(CountRequest request, ActionListener<CountResponse> listener) {
+ internalClient.count(request, listener);
+ }
+
+ @Override
+ public ActionFuture<SuggestResponse> suggest(SuggestRequest request) {
+ return internalClient.suggest(request);
+ }
+
+ @Override
+ public void suggest(SuggestRequest request, ActionListener<SuggestResponse> listener) {
+ internalClient.suggest(request, listener);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> search(SearchRequest request) {
+ return internalClient.search(request);
+ }
+
+ @Override
+ public void search(SearchRequest request, ActionListener<SearchResponse> listener) {
+ internalClient.search(request, listener);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> searchScroll(SearchScrollRequest request) {
+ return internalClient.searchScroll(request);
+ }
+
+ @Override
+ public void searchScroll(SearchScrollRequest request, ActionListener<SearchResponse> listener) {
+ internalClient.searchScroll(request, listener);
+ }
+
+ @Override
+ public ActionFuture<MultiSearchResponse> multiSearch(MultiSearchRequest request) {
+ return internalClient.multiSearch(request);
+ }
+
+ @Override
+ public void multiSearch(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
+ internalClient.multiSearch(request, listener);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> moreLikeThis(MoreLikeThisRequest request) {
+ return internalClient.moreLikeThis(request);
+ }
+
+ @Override
+ public void moreLikeThis(MoreLikeThisRequest request, ActionListener<SearchResponse> listener) {
+ internalClient.moreLikeThis(request, listener);
+ }
+
+ @Override
+ public ActionFuture<TermVectorResponse> termVector(TermVectorRequest request) {
+ return internalClient.termVector(request);
+ }
+
+ @Override
+ public void termVector(TermVectorRequest request, ActionListener<TermVectorResponse> listener) {
+ internalClient.termVector(request, listener);
+ }
+
+ @Override
+ public ActionFuture<MultiTermVectorsResponse> multiTermVectors(final MultiTermVectorsRequest request) {
+ return internalClient.multiTermVectors(request);
+ }
+
+ @Override
+ public void multiTermVectors(final MultiTermVectorsRequest request, final ActionListener<MultiTermVectorsResponse> listener) {
+ internalClient.multiTermVectors(request, listener);
+ }
+
+ @Override
+ public ActionFuture<PercolateResponse> percolate(PercolateRequest request) {
+ return internalClient.percolate(request);
+ }
+
+ @Override
+ public void percolate(PercolateRequest request, ActionListener<PercolateResponse> listener) {
+ internalClient.percolate(request, listener);
+ }
+
+ @Override
+ public ActionFuture<ExplainResponse> explain(ExplainRequest request) {
+ return internalClient.explain(request);
+ }
+
+ @Override
+ public void explain(ExplainRequest request, ActionListener<ExplainResponse> listener) {
+ internalClient.explain(request, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
new file mode 100644
index 0000000..195b1c7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
@@ -0,0 +1,504 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.util.*;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+
+/**
+ *
+ */
+public class TransportClientNodesService extends AbstractComponent {
+
+ private final TimeValue nodesSamplerInterval;
+
+ private final long pingTimeout;
+
+ private final ClusterName clusterName;
+
+ private final TransportService transportService;
+
+ private final ThreadPool threadPool;
+
+ private final Version version;
+
+ // nodes that are added to be discovered
+ private volatile ImmutableList<DiscoveryNode> listedNodes = ImmutableList.of();
+
+ private final Object mutex = new Object();
+
+ private volatile ImmutableList<DiscoveryNode> nodes = ImmutableList.of();
+ private volatile ImmutableList<DiscoveryNode> filteredNodes = ImmutableList.of();
+
+ private final AtomicInteger tempNodeIdGenerator = new AtomicInteger();
+
+ private final NodeSampler nodesSampler;
+
+ private volatile ScheduledFuture nodesSamplerFuture;
+
+ private final AtomicInteger randomNodeGenerator = new AtomicInteger();
+
+ private final boolean ignoreClusterName;
+
+ private volatile boolean closed;
+
+ @Inject
+ public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService, ThreadPool threadPool, Version version) {
+ super(settings);
+ this.clusterName = clusterName;
+ this.transportService = transportService;
+ this.threadPool = threadPool;
+ this.version = version;
+
+ this.nodesSamplerInterval = componentSettings.getAsTime("nodes_sampler_interval", timeValueSeconds(5));
+ this.pingTimeout = componentSettings.getAsTime("ping_timeout", timeValueSeconds(5)).millis();
+ this.ignoreClusterName = componentSettings.getAsBoolean("ignore_cluster_name", false);
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
+ }
+
+ if (componentSettings.getAsBoolean("sniff", false)) {
+ this.nodesSampler = new SniffNodesSampler();
+ } else {
+ this.nodesSampler = new SimpleNodeSampler();
+ }
+ this.nodesSamplerFuture = threadPool.schedule(nodesSamplerInterval, ThreadPool.Names.GENERIC, new ScheduledNodeSampler());
+
+ // we want the transport service to throw connect exceptions, so we can retry
+ transportService.throwConnectException(true);
+ }
+
+ public ImmutableList<TransportAddress> transportAddresses() {
+ ImmutableList.Builder<TransportAddress> lstBuilder = ImmutableList.builder();
+ for (DiscoveryNode listedNode : listedNodes) {
+ lstBuilder.add(listedNode.address());
+ }
+ return lstBuilder.build();
+ }
+
+ public ImmutableList<DiscoveryNode> connectedNodes() {
+ return this.nodes;
+ }
+
+ public ImmutableList<DiscoveryNode> filteredNodes() {
+ return this.filteredNodes;
+ }
+
+ public ImmutableList<DiscoveryNode> listedNodes() {
+ return this.listedNodes;
+ }
+
+ public TransportClientNodesService addTransportAddresses(TransportAddress... transportAddresses) {
+ synchronized (mutex) {
+ if (closed) {
+ throw new ElasticsearchIllegalStateException("transport client is closed, can't add an address");
+ }
+ List<TransportAddress> filtered = Lists.newArrayListWithExpectedSize(transportAddresses.length);
+ for (TransportAddress transportAddress : transportAddresses) {
+ boolean found = false;
+ for (DiscoveryNode otherNode : listedNodes) {
+ if (otherNode.address().equals(transportAddress)) {
+ found = true;
+ logger.debug("address [{}] already exists with [{}], ignoring...", transportAddress, otherNode);
+ break;
+ }
+ }
+ if (!found) {
+ filtered.add(transportAddress);
+ }
+ }
+ if (filtered.isEmpty()) {
+ return this;
+ }
+ ImmutableList.Builder<DiscoveryNode> builder = ImmutableList.builder();
+ builder.addAll(listedNodes());
+ for (TransportAddress transportAddress : filtered) {
+ DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeIdGenerator.incrementAndGet(), transportAddress, version);
+ logger.debug("adding address [{}]", node);
+ builder.add(node);
+ }
+ listedNodes = builder.build();
+ nodesSampler.sample();
+ }
+ return this;
+ }
+
+ public TransportClientNodesService removeTransportAddress(TransportAddress transportAddress) {
+ synchronized (mutex) {
+ if (closed) {
+ throw new ElasticsearchIllegalStateException("transport client is closed, can't remove an address");
+ }
+ ImmutableList.Builder<DiscoveryNode> builder = ImmutableList.builder();
+ for (DiscoveryNode otherNode : listedNodes) {
+ if (!otherNode.address().equals(transportAddress)) {
+ builder.add(otherNode);
+ } else {
+ logger.debug("removing address [{}]", otherNode);
+ }
+ }
+ listedNodes = builder.build();
+ nodesSampler.sample();
+ }
+ return this;
+ }
+
+ public <T> T execute(NodeCallback<T> callback) throws ElasticsearchException {
+ ImmutableList<DiscoveryNode> nodes = this.nodes;
+ if (nodes.isEmpty()) {
+ throw new NoNodeAvailableException();
+ }
+ int index = randomNodeGenerator.incrementAndGet();
+ if (index < 0) {
+ index = 0;
+ randomNodeGenerator.set(0);
+ }
+ for (int i = 0; i < nodes.size(); i++) {
+ DiscoveryNode node = nodes.get((index + i) % nodes.size());
+ try {
+ return callback.doWithNode(node);
+ } catch (ElasticsearchException e) {
+ if (!(e.unwrapCause() instanceof ConnectTransportException)) {
+ throw e;
+ }
+ }
+ }
+ throw new NoNodeAvailableException();
+ }
+
+ public <Response> void execute(NodeListenerCallback<Response> callback, ActionListener<Response> listener) throws ElasticsearchException {
+ ImmutableList<DiscoveryNode> nodes = this.nodes;
+ if (nodes.isEmpty()) {
+ throw new NoNodeAvailableException();
+ }
+ int index = randomNodeGenerator.incrementAndGet();
+ if (index < 0) {
+ index = 0;
+ randomNodeGenerator.set(0);
+ }
+ RetryListener<Response> retryListener = new RetryListener<Response>(callback, listener, nodes, index);
+ try {
+ callback.doWithNode(nodes.get((index) % nodes.size()), retryListener);
+ } catch (ElasticsearchException e) {
+ if (e.unwrapCause() instanceof ConnectTransportException) {
+ retryListener.onFailure(e);
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ public static class RetryListener<Response> implements ActionListener<Response> {
+ private final NodeListenerCallback<Response> callback;
+ private final ActionListener<Response> listener;
+ private final ImmutableList<DiscoveryNode> nodes;
+ private final int index;
+
+ private volatile int i;
+
+ public RetryListener(NodeListenerCallback<Response> callback, ActionListener<Response> listener, ImmutableList<DiscoveryNode> nodes, int index) {
+ this.callback = callback;
+ this.listener = listener;
+ this.nodes = nodes;
+ this.index = index;
+ }
+
+ @Override
+ public void onResponse(Response response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) {
+ int i = ++this.i;
+ if (i >= nodes.size()) {
+ listener.onFailure(new NoNodeAvailableException());
+ } else {
+ try {
+ callback.doWithNode(nodes.get((index + i) % nodes.size()), this);
+ } catch (Throwable e1) {
+ // retry the next one...
+ onFailure(e);
+ }
+ }
+ } else {
+ listener.onFailure(e);
+ }
+ }
+ }
+
+ public void close() {
+ synchronized (mutex) {
+ if (closed) {
+ return;
+ }
+ closed = true;
+ nodesSamplerFuture.cancel(true);
+ for (DiscoveryNode node : nodes) {
+ transportService.disconnectFromNode(node);
+ }
+ for (DiscoveryNode listedNode : listedNodes) {
+ transportService.disconnectFromNode(listedNode);
+ }
+ nodes = ImmutableList.of();
+ }
+ }
+
+ abstract class NodeSampler {
+ public void sample() {
+ synchronized (mutex) {
+ if (closed) {
+ return;
+ }
+ doSample();
+ }
+ }
+
+ protected abstract void doSample();
+
+ /**
+ * validates a set of potentially newly discovered nodes and returns an immutable
+ * list of the nodes that has passed.
+ */
+ protected ImmutableList<DiscoveryNode> validateNewNodes(Set<DiscoveryNode> nodes) {
+ for (Iterator<DiscoveryNode> it = nodes.iterator(); it.hasNext(); ) {
+ DiscoveryNode node = it.next();
+ if (!transportService.nodeConnected(node)) {
+ try {
+ logger.trace("connecting to node [{}]", node);
+ transportService.connectToNode(node);
+ } catch (Throwable e) {
+ it.remove();
+ logger.debug("failed to connect to discovered node [" + node + "]", e);
+ }
+ }
+ }
+
+ return new ImmutableList.Builder<DiscoveryNode>().addAll(nodes).build();
+ }
+
+ }
+
+ class ScheduledNodeSampler implements Runnable {
+ @Override
+ public void run() {
+ try {
+ nodesSampler.sample();
+ if (!closed) {
+ nodesSamplerFuture = threadPool.schedule(nodesSamplerInterval, ThreadPool.Names.GENERIC, this);
+ }
+ } catch (Exception e) {
+ logger.warn("failed to sample", e);
+ }
+ }
+ }
+
+ class SimpleNodeSampler extends NodeSampler {
+
+ @Override
+ protected void doSample() {
+ HashSet<DiscoveryNode> newNodes = new HashSet<DiscoveryNode>();
+ HashSet<DiscoveryNode> newFilteredNodes = new HashSet<DiscoveryNode>();
+ for (DiscoveryNode listedNode : listedNodes) {
+ if (!transportService.nodeConnected(listedNode)) {
+ try {
+ // its a listed node, light connect to it...
+ logger.trace("connecting to listed node (light) [{}]", listedNode);
+ transportService.connectToNodeLight(listedNode);
+ } catch (Throwable e) {
+ logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode);
+ continue;
+ }
+ }
+ try {
+ NodesInfoResponse nodeInfo = transportService.submitRequest(listedNode, NodesInfoAction.NAME,
+ Requests.nodesInfoRequest("_local"),
+ TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withTimeout(pingTimeout),
+ new FutureTransportResponseHandler<NodesInfoResponse>() {
+ @Override
+ public NodesInfoResponse newInstance() {
+ return new NodesInfoResponse();
+ }
+ }).txGet();
+ if (!ignoreClusterName && !clusterName.equals(nodeInfo.getClusterName())) {
+ logger.warn("node {} not part of the cluster {}, ignoring...", listedNode, clusterName);
+ newFilteredNodes.add(listedNode);
+ } else if (nodeInfo.getNodes().length != 0) {
+ // use discovered information but do keep the original transport address, so people can control which address is exactly used.
+ DiscoveryNode nodeWithInfo = nodeInfo.getNodes()[0].getNode();
+ newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.id(), nodeWithInfo.getHostName(), nodeWithInfo.getHostAddress(), listedNode.address(), nodeWithInfo.attributes(), nodeWithInfo.version()));
+ } else {
+ // although we asked for one node, our target may not have completed initialization yet and doesn't have cluster nodes
+ logger.debug("node {} didn't return any discovery info, temporarily using transport discovery node", listedNode);
+ newNodes.add(listedNode);
+ }
+ } catch (Throwable e) {
+ logger.info("failed to get node info for {}, disconnecting...", e, listedNode);
+ transportService.disconnectFromNode(listedNode);
+ }
+ }
+
+ nodes = validateNewNodes(newNodes);
+ filteredNodes = ImmutableList.copyOf(newFilteredNodes);
+ }
+ }
+
+ class SniffNodesSampler extends NodeSampler {
+
+ @Override
+ protected void doSample() {
+ // the nodes we are going to ping include the core listed nodes that were added
+ // and the last round of discovered nodes
+ Set<DiscoveryNode> nodesToPing = Sets.newHashSet();
+ for (DiscoveryNode node : listedNodes) {
+ nodesToPing.add(node);
+ }
+ for (DiscoveryNode node : nodes) {
+ nodesToPing.add(node);
+ }
+
+ final CountDownLatch latch = new CountDownLatch(nodesToPing.size());
+ final ConcurrentMap<DiscoveryNode, ClusterStateResponse> clusterStateResponses = ConcurrentCollections.newConcurrentMap();
+ for (final DiscoveryNode listedNode : nodesToPing) {
+ threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ if (!transportService.nodeConnected(listedNode)) {
+ try {
+
+ // if its one of hte actual nodes we will talk to, not to listed nodes, fully connect
+ if (nodes.contains(listedNode)) {
+ logger.trace("connecting to cluster node [{}]", listedNode);
+ transportService.connectToNode(listedNode);
+ } else {
+ // its a listed node, light connect to it...
+ logger.trace("connecting to listed node (light) [{}]", listedNode);
+ transportService.connectToNodeLight(listedNode);
+ }
+ } catch (Exception e) {
+ logger.debug("failed to connect to node [{}], ignoring...", e, listedNode);
+ latch.countDown();
+ return;
+ }
+ }
+ transportService.sendRequest(listedNode, ClusterStateAction.NAME,
+ Requests.clusterStateRequest()
+ .clear().nodes(true).local(true),
+ TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withTimeout(pingTimeout),
+ new BaseTransportResponseHandler<ClusterStateResponse>() {
+
+ @Override
+ public ClusterStateResponse newInstance() {
+ return new ClusterStateResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(ClusterStateResponse response) {
+ clusterStateResponses.put(listedNode, response);
+ latch.countDown();
+ }
+
+ @Override
+ public void handleException(TransportException e) {
+ logger.info("failed to get local cluster state for {}, disconnecting...", e, listedNode);
+ transportService.disconnectFromNode(listedNode);
+ latch.countDown();
+ }
+ });
+ } catch (Throwable e) {
+ logger.info("failed to get local cluster state info for {}, disconnecting...", e, listedNode);
+ transportService.disconnectFromNode(listedNode);
+ latch.countDown();
+ }
+ }
+ });
+ }
+
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ return;
+ }
+
+ HashSet<DiscoveryNode> newNodes = new HashSet<DiscoveryNode>(listedNodes);
+ HashSet<DiscoveryNode> newFilteredNodes = new HashSet<DiscoveryNode>();
+ for (Map.Entry<DiscoveryNode, ClusterStateResponse> entry : clusterStateResponses.entrySet()) {
+ if (!ignoreClusterName && !clusterName.equals(entry.getValue().getClusterName())) {
+ logger.warn("node {} not part of the cluster {}, ignoring...", entry.getValue().getState().nodes().localNode(), clusterName);
+ newFilteredNodes.add(entry.getKey());
+ continue;
+ }
+ for (ObjectCursor<DiscoveryNode> cursor : entry.getValue().getState().nodes().dataNodes().values()) {
+ newNodes.add(cursor.value);
+ }
+ }
+
+ nodes = validateNewNodes(newNodes);
+ filteredNodes = ImmutableList.copyOf(newFilteredNodes);
+ }
+ }
+
+ public static interface NodeCallback<T> {
+
+ T doWithNode(DiscoveryNode node) throws ElasticsearchException;
+ }
+
+ public static interface NodeListenerCallback<Response> {
+
+ void doWithNode(DiscoveryNode node, ActionListener<Response> listener) throws ElasticsearchException;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java
new file mode 100644
index 0000000..be7d8d8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport.support;
+
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.ClusterAdminClient;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.transport.TransportClientNodesService;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class InternalTransportAdminClient extends AbstractComponent implements AdminClient {
+
+ private final TransportClientNodesService nodesService;
+
+ private final InternalTransportIndicesAdminClient indicesAdminClient;
+
+ private final InternalTransportClusterAdminClient clusterAdminClient;
+
+ @Inject
+ public InternalTransportAdminClient(Settings settings, TransportClientNodesService nodesService,
+ InternalTransportIndicesAdminClient indicesAdminClient, InternalTransportClusterAdminClient clusterAdminClient) {
+ super(settings);
+ this.nodesService = nodesService;
+ this.indicesAdminClient = indicesAdminClient;
+ this.clusterAdminClient = clusterAdminClient;
+ }
+
+ @Override
+ public IndicesAdminClient indices() {
+ return indicesAdminClient;
+ }
+
+ @Override
+ public ClusterAdminClient cluster() {
+ return clusterAdminClient;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java
new file mode 100644
index 0000000..e43a5b3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport.support;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.*;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.client.support.AbstractClient;
+import org.elasticsearch.client.transport.TransportClientNodesService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class InternalTransportClient extends AbstractClient implements InternalClient {
+
+ private final Settings settings;
+ private final ThreadPool threadPool;
+
+ private final TransportClientNodesService nodesService;
+
+ private final InternalTransportAdminClient adminClient;
+
+ private final ImmutableMap<Action, TransportActionNodeProxy> actions;
+
+ @Inject
+ public InternalTransportClient(Settings settings, ThreadPool threadPool, TransportService transportService,
+ TransportClientNodesService nodesService, InternalTransportAdminClient adminClient,
+ Map<String, GenericAction> actions) {
+ this.settings = settings;
+ this.threadPool = threadPool;
+ this.nodesService = nodesService;
+ this.adminClient = adminClient;
+
+ MapBuilder<Action, TransportActionNodeProxy> actionsBuilder = new MapBuilder<Action, TransportActionNodeProxy>();
+ for (GenericAction action : actions.values()) {
+ if (action instanceof Action) {
+ actionsBuilder.put((Action) action, new TransportActionNodeProxy(settings, action, transportService));
+ }
+ }
+ this.actions = actionsBuilder.immutableMap();
+ }
+
+ @Override
+ public void close() {
+ // nothing to do here
+ }
+
+ @Override
+ public Settings settings() {
+ return this.settings;
+ }
+
+ @Override
+ public ThreadPool threadPool() {
+ return this.threadPool;
+ }
+
+ @Override
+ public AdminClient admin() {
+ return adminClient;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final Action<Request, Response, RequestBuilder> action, final Request request) {
+ final TransportActionNodeProxy<Request, Response> proxy = actions.get(action);
+ return nodesService.execute(new TransportClientNodesService.NodeCallback<ActionFuture<Response>>() {
+ @Override
+ public ActionFuture<Response> doWithNode(DiscoveryNode node) throws ElasticsearchException {
+ return proxy.execute(node, request);
+ }
+ });
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener) {
+ final TransportActionNodeProxy<Request, Response> proxy = actions.get(action);
+ nodesService.execute(new TransportClientNodesService.NodeListenerCallback<Response>() {
+ @Override
+ public void doWithNode(DiscoveryNode node, ActionListener<Response> listener) throws ElasticsearchException {
+ proxy.execute(node, request, listener);
+ }
+ }, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java
new file mode 100644
index 0000000..4087bf2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport.support;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.admin.cluster.ClusterAction;
+import org.elasticsearch.client.internal.InternalClusterAdminClient;
+import org.elasticsearch.client.support.AbstractClusterAdminClient;
+import org.elasticsearch.client.transport.TransportClientNodesService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+
+/**
+ *
+ */
+@SuppressWarnings("unchecked")
+public class InternalTransportClusterAdminClient extends AbstractClusterAdminClient implements InternalClusterAdminClient {
+
+ private final TransportClientNodesService nodesService;
+
+ private final ThreadPool threadPool;
+
+ private final ImmutableMap<ClusterAction, TransportActionNodeProxy> actions;
+
+ @Inject
+ public InternalTransportClusterAdminClient(Settings settings, TransportClientNodesService nodesService, ThreadPool threadPool, TransportService transportService,
+ Map<String, GenericAction> actions) {
+ this.nodesService = nodesService;
+ this.threadPool = threadPool;
+ MapBuilder<ClusterAction, TransportActionNodeProxy> actionsBuilder = new MapBuilder<ClusterAction, TransportActionNodeProxy>();
+ for (GenericAction action : actions.values()) {
+ if (action instanceof ClusterAction) {
+ actionsBuilder.put((ClusterAction) action, new TransportActionNodeProxy(settings, action, transportService));
+ }
+ }
+ this.actions = actionsBuilder.immutableMap();
+ }
+
+ @Override
+ public ThreadPool threadPool() {
+ return this.threadPool;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final ClusterAction<Request, Response, RequestBuilder> action, final Request request) {
+ final TransportActionNodeProxy<Request, Response> proxy = actions.get(action);
+ return nodesService.execute(new TransportClientNodesService.NodeCallback<ActionFuture<Response>>() {
+ @Override
+ public ActionFuture<Response> doWithNode(DiscoveryNode node) throws ElasticsearchException {
+ return proxy.execute(node, request);
+ }
+ });
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final ClusterAction<Request, Response, RequestBuilder> action, final Request request, final ActionListener<Response> listener) {
+ final TransportActionNodeProxy<Request, Response> proxy = actions.get(action);
+ nodesService.execute(new TransportClientNodesService.NodeListenerCallback<Response>() {
+ @Override
+ public void doWithNode(DiscoveryNode node, ActionListener<Response> listener) throws ElasticsearchException {
+ proxy.execute(node, request, listener);
+ }
+ }, listener);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java
new file mode 100644
index 0000000..9f046a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport.support;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.admin.indices.IndicesAction;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.client.support.AbstractIndicesAdminClient;
+import org.elasticsearch.client.transport.TransportClientNodesService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Map;
+
+/**
+ *
+ */
+@SuppressWarnings("unchecked")
+public class InternalTransportIndicesAdminClient extends AbstractIndicesAdminClient implements IndicesAdminClient {
+
+ private final TransportClientNodesService nodesService;
+
+ private final ThreadPool threadPool;
+
+ private final ImmutableMap<IndicesAction, TransportActionNodeProxy> actions;
+
+ @Inject
+ public InternalTransportIndicesAdminClient(Settings settings, TransportClientNodesService nodesService, TransportService transportService, ThreadPool threadPool,
+ Map<String, GenericAction> actions) {
+ this.nodesService = nodesService;
+ this.threadPool = threadPool;
+ MapBuilder<IndicesAction, TransportActionNodeProxy> actionsBuilder = new MapBuilder<IndicesAction, TransportActionNodeProxy>();
+ for (GenericAction action : actions.values()) {
+ if (action instanceof IndicesAction) {
+ actionsBuilder.put((IndicesAction) action, new TransportActionNodeProxy(settings, action, transportService));
+ }
+ }
+ this.actions = actionsBuilder.immutableMap();
+ }
+
+ @Override
+ public ThreadPool threadPool() {
+ return this.threadPool;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final IndicesAction<Request, Response, RequestBuilder> action, final Request request) {
+ final TransportActionNodeProxy<Request, Response> proxy = actions.get(action);
+ return nodesService.execute(new TransportClientNodesService.NodeCallback<ActionFuture<Response>>() {
+ @Override
+ public ActionFuture<Response> doWithNode(DiscoveryNode node) throws ElasticsearchException {
+ return proxy.execute(node, request);
+ }
+ });
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final IndicesAction<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener) {
+ final TransportActionNodeProxy<Request, Response> proxy = actions.get(action);
+ nodesService.execute(new TransportClientNodesService.NodeListenerCallback<Response>() {
+ @Override
+ public void doWithNode(DiscoveryNode node, ActionListener<Response> listener) throws ElasticsearchException {
+ proxy.execute(node, request, listener);
+ }
+ }, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java b/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java
new file mode 100644
index 0000000..b354e4d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ * An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when
+ * all the nodes have acknowledged a cluster state update request
+ */
+public interface AckedClusterStateUpdateTask extends TimeoutClusterStateUpdateTask {
+
+ /**
+ * Called to determine which nodes the acknowledgement is expected from
+ * @param discoveryNode a node
+ * @return true if the node is expected to send ack back, false otherwise
+ */
+ boolean mustAck(DiscoveryNode discoveryNode);
+
+ /**
+ * Called once all the nodes have acknowledged the cluster state update request. Must be
+ * very lightweight execution, since it gets executed on the cluster service thread.
+ * @param t optional error that might have been thrown
+ */
+ void onAllNodesAcked(@Nullable Throwable t);
+
+ /**
+ * Called once the acknowledgement timeout defined by
+ * {@link AckedClusterStateUpdateTask#ackTimeout()} has expired
+ */
+ void onAckTimeout();
+
+ /**
+ * Acknowledgement timeout, maximum time interval to wait for acknowledgements
+ */
+ TimeValue ackTimeout();
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java
new file mode 100644
index 0000000..858108c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ *
+ */
+public class ClusterChangedEvent {
+
+ private final String source;
+
+ private final ClusterState previousState;
+
+ private final ClusterState state;
+
+ private final DiscoveryNodes.Delta nodesDelta;
+
+ public ClusterChangedEvent(String source, ClusterState state, ClusterState previousState) {
+ this.source = source;
+ this.state = state;
+ this.previousState = previousState;
+ this.nodesDelta = state.nodes().delta(previousState.nodes());
+ }
+
+ /**
+ * The source that caused this cluster event to be raised.
+ */
+ public String source() {
+ return this.source;
+ }
+
+ public ClusterState state() {
+ return this.state;
+ }
+
+ public ClusterState previousState() {
+ return this.previousState;
+ }
+
+ public boolean routingTableChanged() {
+ return state.routingTable() != previousState.routingTable();
+ }
+
+ public boolean indexRoutingTableChanged(String index) {
+ if (!state.routingTable().hasIndex(index) && !previousState.routingTable().hasIndex(index)) {
+ return false;
+ }
+ if (state.routingTable().hasIndex(index) && previousState.routingTable().hasIndex(index)) {
+ return state.routingTable().index(index) != previousState.routingTable().index(index);
+ }
+ return true;
+ }
+
+ /**
+ * Returns the indices created in this event
+ */
+ public List<String> indicesCreated() {
+ if (previousState == null) {
+ return Arrays.asList(state.metaData().indices().keys().toArray(String.class));
+ }
+ if (!metaDataChanged()) {
+ return ImmutableList.of();
+ }
+ List<String> created = null;
+ for (ObjectCursor<String> cursor : state.metaData().indices().keys()) {
+ String index = cursor.value;
+ if (!previousState.metaData().hasIndex(index)) {
+ if (created == null) {
+ created = Lists.newArrayList();
+ }
+ created.add(index);
+ }
+ }
+ return created == null ? ImmutableList.<String>of() : created;
+ }
+
+ /**
+ * Returns the indices deleted in this event
+ */
+ public List<String> indicesDeleted() {
+ if (previousState == null) {
+ return ImmutableList.of();
+ }
+ if (!metaDataChanged()) {
+ return ImmutableList.of();
+ }
+ List<String> deleted = null;
+ for (ObjectCursor<String> cursor : previousState.metaData().indices().keys()) {
+ String index = cursor.value;
+ if (!state.metaData().hasIndex(index)) {
+ if (deleted == null) {
+ deleted = Lists.newArrayList();
+ }
+ deleted.add(index);
+ }
+ }
+ return deleted == null ? ImmutableList.<String>of() : deleted;
+ }
+
+ public boolean metaDataChanged() {
+ return state.metaData() != previousState.metaData();
+ }
+
+ public boolean indexMetaDataChanged(IndexMetaData current) {
+ MetaData previousMetaData = previousState.metaData();
+ if (previousMetaData == null) {
+ return true;
+ }
+ IndexMetaData previousIndexMetaData = previousMetaData.index(current.index());
+ // no need to check on version, since disco modules will make sure to use the
+ // same instance if its a version match
+ if (previousIndexMetaData == current) {
+ return false;
+ }
+ return true;
+ }
+
+ public boolean blocksChanged() {
+ return state.blocks() != previousState.blocks();
+ }
+
+ public boolean localNodeMaster() {
+ return state.nodes().localNodeMaster();
+ }
+
+ public DiscoveryNodes.Delta nodesDelta() {
+ return this.nodesDelta;
+ }
+
+ public boolean nodesRemoved() {
+ return nodesDelta.removed();
+ }
+
+ public boolean nodesAdded() {
+ return nodesDelta.added();
+ }
+
+ public boolean nodesChanged() {
+ return nodesRemoved() || nodesAdded();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/src/main/java/org/elasticsearch/cluster/ClusterInfo.java
new file mode 100644
index 0000000..5c010c6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterInfo.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.collect.ImmutableMap;
+
+import java.util.Map;
+
+/**
+ * ClusterInfo is an object representing a map of nodes to {@link DiskUsage}
+ * and a map of shard ids to shard sizes, see
+ * <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code>
+ * for the key used in the shardSizes map
+ */
+public class ClusterInfo {
+
+ private final ImmutableMap<String, DiskUsage> usages;
+ private final ImmutableMap<String, Long> shardSizes;
+
+ public ClusterInfo(ImmutableMap<String, DiskUsage> usages, ImmutableMap<String, Long> shardSizes) {
+ this.usages = usages;
+ this.shardSizes = shardSizes;
+ }
+
+ public Map<String, DiskUsage> getNodeDiskUsages() {
+ return this.usages;
+ }
+
+ public Map<String, Long> getShardSizes() {
+ return this.shardSizes;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterInfoService.java b/src/main/java/org/elasticsearch/cluster/ClusterInfoService.java
new file mode 100644
index 0000000..568a558
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterInfoService.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+public interface ClusterInfoService {
+
+ public static ClusterInfoService EMPTY = EmptyClusterInfoService.getInstance();
+
+ public ClusterInfo getClusterInfo();
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/src/main/java/org/elasticsearch/cluster/ClusterModule.java
new file mode 100644
index 0000000..1983be0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterModule.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.cluster.action.index.*;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.metadata.*;
+import org.elasticsearch.cluster.node.DiscoveryNodeService;
+import org.elasticsearch.cluster.routing.RoutingService;
+import org.elasticsearch.cluster.routing.allocation.AllocationModule;
+import org.elasticsearch.cluster.routing.operation.OperationRoutingModule;
+import org.elasticsearch.cluster.service.InternalClusterService;
+import org.elasticsearch.cluster.settings.ClusterDynamicSettingsModule;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexDynamicSettingsModule;
+
+/**
+ *
+ */
+public class ClusterModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ public ClusterModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(new AllocationModule(settings),
+ new OperationRoutingModule(settings),
+ new ClusterDynamicSettingsModule(),
+ new IndexDynamicSettingsModule());
+ }
+
+ @Override
+ protected void configure() {
+ bind(DiscoveryNodeService.class).asEagerSingleton();
+ bind(ClusterService.class).to(InternalClusterService.class).asEagerSingleton();
+
+ bind(MetaDataService.class).asEagerSingleton();
+ bind(MetaDataCreateIndexService.class).asEagerSingleton();
+ bind(MetaDataDeleteIndexService.class).asEagerSingleton();
+ bind(MetaDataIndexStateService.class).asEagerSingleton();
+ bind(MetaDataMappingService.class).asEagerSingleton();
+ bind(MetaDataIndexAliasesService.class).asEagerSingleton();
+ bind(MetaDataUpdateSettingsService.class).asEagerSingleton();
+ bind(MetaDataIndexTemplateService.class).asEagerSingleton();
+
+ bind(RoutingService.class).asEagerSingleton();
+
+ bind(ShardStateAction.class).asEagerSingleton();
+ bind(NodeIndexDeletedAction.class).asEagerSingleton();
+ bind(NodeMappingRefreshAction.class).asEagerSingleton();
+ bind(MappingUpdatedAction.class).asEagerSingleton();
+
+ bind(ClusterInfoService.class).to(InternalClusterInfoService.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterName.java b/src/main/java/org/elasticsearch/cluster/ClusterName.java
new file mode 100644
index 0000000..3a9dd82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterName.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.settings.Settings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ClusterName implements Streamable {
+
+ public static final String SETTING = "cluster.name";
+
+ public static final ClusterName DEFAULT = new ClusterName("elasticsearch".intern());
+
+ private String value;
+
+ public static ClusterName clusterNameFromSettings(Settings settings) {
+ return new ClusterName(settings.get("cluster.name", ClusterName.DEFAULT.value()));
+ }
+
+ private ClusterName() {
+
+ }
+
+ public ClusterName(String value) {
+ this.value = value.intern();
+ }
+
+ public String value() {
+ return this.value;
+ }
+
+ public static ClusterName readClusterName(StreamInput in) throws IOException {
+ ClusterName clusterName = new ClusterName();
+ clusterName.readFrom(in);
+ return clusterName;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ value = in.readString().intern();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(value);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ClusterName that = (ClusterName) o;
+
+ if (value != null ? !value.equals(that.value) : that.value != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return value != null ? value.hashCode() : 0;
+ }
+
+ @Override
+ public String toString() {
+ return "Cluster [" + value + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterNameModule.java b/src/main/java/org/elasticsearch/cluster/ClusterNameModule.java
new file mode 100644
index 0000000..55513c3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterNameModule.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class ClusterNameModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public ClusterNameModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(ClusterName.class).toInstance(ClusterName.clusterNameFromSettings(settings));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterService.java b/src/main/java/org/elasticsearch/cluster/ClusterService.java
new file mode 100644
index 0000000..6204599
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterService.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.operation.OperationRouting;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.List;
+
+/**
+ * The cluster service allowing to both register for cluster state events ({@link ClusterStateListener})
+ * and submit state update tasks ({@link ClusterStateUpdateTask}.
+ */
+public interface ClusterService extends LifecycleComponent<ClusterService> {
+
+ /**
+ * The local node.
+ */
+ DiscoveryNode localNode();
+
+ /**
+ * The current state.
+ */
+ ClusterState state();
+
+ /**
+ * Adds an initial block to be set on the first cluster state created.
+ */
+ void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException;
+
+ /**
+ * Remove an initial block to be set on the first cluster state created.
+ */
+ void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException;
+
+ /**
+ * The operation routing.
+ */
+ OperationRouting operationRouting();
+
+ /**
+ * Adds a priority listener for updated cluster states.
+ */
+ void addFirst(ClusterStateListener listener);
+
+ /**
+ * Adds last listener.
+ */
+ void addLast(ClusterStateListener listener);
+
+ /**
+ * Adds a listener for updated cluster states.
+ */
+ void add(ClusterStateListener listener);
+
+ /**
+ * Removes a listener for updated cluster states.
+ */
+ void remove(ClusterStateListener listener);
+
+ /**
+ * Add a listener for on/off local node master events
+ */
+ void add(LocalNodeMasterListener listener);
+
+ /**
+ * Remove the given listener for on/off local master events
+ */
+ void remove(LocalNodeMasterListener listener);
+
+ /**
+ * Adds a cluster state listener that will timeout after the provided timeout.
+ */
+ void add(TimeValue timeout, TimeoutClusterStateListener listener);
+
+ /**
+ * Submits a task that will update the cluster state.
+ */
+ void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask);
+
+ /**
+ * Submits a task that will update the cluster state (the task has a default priority of {@link Priority#NORMAL}).
+ */
+ void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask);
+
+ /**
+ * Returns the tasks that are pending.
+ */
+ List<PendingClusterTask> pendingTasks();
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java
new file mode 100644
index 0000000..6b08af0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java
@@ -0,0 +1,596 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.AllocationExplanation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ *
+ */
+public class ClusterState implements ToXContent {
+
+ public interface Custom {
+
+ interface Factory<T extends Custom> {
+
+ String type();
+
+ T readFrom(StreamInput in) throws IOException;
+
+ void writeTo(T customState, StreamOutput out) throws IOException;
+
+ void toXContent(T customState, XContentBuilder builder, ToXContent.Params params);
+ }
+ }
+
+ public static Map<String, Custom.Factory> customFactories = new HashMap<String, Custom.Factory>();
+
+ /**
+ * Register a custom index meta data factory. Make sure to call it from a static block.
+ */
+ public static void registerFactory(String type, Custom.Factory factory) {
+ customFactories.put(type, factory);
+ }
+
+ @Nullable
+ public static <T extends Custom> Custom.Factory<T> lookupFactory(String type) {
+ return customFactories.get(type);
+ }
+
+ public static <T extends Custom> Custom.Factory<T> lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException {
+ Custom.Factory<T> factory = customFactories.get(type);
+ if (factory == null) {
+ throw new ElasticsearchIllegalArgumentException("No custom state factory registered for type [" + type + "]");
+ }
+ return factory;
+ }
+
+
+ private final long version;
+
+ private final RoutingTable routingTable;
+
+ private final DiscoveryNodes nodes;
+
+ private final MetaData metaData;
+
+ private final ClusterBlocks blocks;
+
+ private final AllocationExplanation allocationExplanation;
+
+ private final ImmutableOpenMap<String, Custom> customs;
+
+ // built on demand
+ private volatile RoutingNodes routingNodes;
+
+ private SettingsFilter settingsFilter;
+
+ public ClusterState(long version, ClusterState state) {
+ this(version, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.allocationExplanation(), state.customs());
+ }
+
+ public ClusterState(long version, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, AllocationExplanation allocationExplanation, ImmutableOpenMap<String, Custom> customs) {
+ this.version = version;
+ this.metaData = metaData;
+ this.routingTable = routingTable;
+ this.nodes = nodes;
+ this.blocks = blocks;
+ this.allocationExplanation = allocationExplanation;
+ this.customs = customs;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ public long getVersion() {
+ return version();
+ }
+
+ public DiscoveryNodes nodes() {
+ return this.nodes;
+ }
+
+ public DiscoveryNodes getNodes() {
+ return nodes();
+ }
+
+ public MetaData metaData() {
+ return this.metaData;
+ }
+
+ public MetaData getMetaData() {
+ return metaData();
+ }
+
+ public RoutingTable routingTable() {
+ return routingTable;
+ }
+
+ public RoutingTable getRoutingTable() {
+ return routingTable();
+ }
+
+ public RoutingNodes routingNodes() {
+ return routingTable.routingNodes(this);
+ }
+
+ public RoutingNodes getRoutingNodes() {
+ return readOnlyRoutingNodes();
+ }
+
+ public ClusterBlocks blocks() {
+ return this.blocks;
+ }
+
+ public ClusterBlocks getBlocks() {
+ return blocks;
+ }
+
+ public AllocationExplanation allocationExplanation() {
+ return this.allocationExplanation;
+ }
+
+ public AllocationExplanation getAllocationExplanation() {
+ return allocationExplanation();
+ }
+
+ public ImmutableOpenMap<String, Custom> customs() {
+ return this.customs;
+ }
+
+ public ImmutableOpenMap<String, Custom> getCustoms() {
+ return this.customs;
+ }
+
+ /**
+ * Returns a built (on demand) routing nodes view of the routing table. <b>NOTE, the routing nodes
+ * are mutable, use them just for read operations</b>
+ */
+ public RoutingNodes readOnlyRoutingNodes() {
+ if (routingNodes != null) {
+ return routingNodes;
+ }
+ routingNodes = routingTable.routingNodes(this);
+ return routingNodes;
+ }
+
+ public ClusterState settingsFilter(SettingsFilter settingsFilter) {
+ this.settingsFilter = settingsFilter;
+ return this;
+ }
+
+ public String prettyPrint() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(nodes().prettyPrint());
+ sb.append(routingTable().prettyPrint());
+ sb.append(readOnlyRoutingNodes().prettyPrint());
+ return sb.toString();
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+ builder.startObject();
+ toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ } catch (IOException e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ Set<String> metrics = Strings.splitStringByCommaToSet(params.param("metric", "_all"));
+ boolean isAllMetricsOnly = metrics.size() == 1 && metrics.contains("_all");
+
+ if (isAllMetricsOnly || metrics.contains("version")) {
+ builder.field("version", version);
+ }
+
+ if (isAllMetricsOnly || metrics.contains("master_node")) {
+ builder.field("master_node", nodes().masterNodeId());
+ }
+
+ if (isAllMetricsOnly || metrics.contains("blocks")) {
+ builder.startObject("blocks");
+
+ if (!blocks().global().isEmpty()) {
+ builder.startObject("global");
+ for (ClusterBlock block : blocks().global()) {
+ block.toXContent(builder, params);
+ }
+ builder.endObject();
+ }
+
+ if (!blocks().indices().isEmpty()) {
+ builder.startObject("indices");
+ for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks().indices().entrySet()) {
+ builder.startObject(entry.getKey());
+ for (ClusterBlock block : entry.getValue()) {
+ block.toXContent(builder, params);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+
+ builder.endObject();
+ }
+
+ // nodes
+ if (isAllMetricsOnly || metrics.contains("nodes")) {
+ builder.startObject("nodes");
+ for (DiscoveryNode node : nodes()) {
+ builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("name", node.name());
+ builder.field("transport_address", node.address().toString());
+
+ builder.startObject("attributes");
+ for (Map.Entry<String, String> attr : node.attributes().entrySet()) {
+ builder.field(attr.getKey(), attr.getValue());
+ }
+ builder.endObject();
+
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+
+ // meta data
+ if (isAllMetricsOnly || metrics.contains("metadata")) {
+ builder.startObject("metadata");
+
+ builder.startObject("templates");
+ for (ObjectCursor<IndexTemplateMetaData> cursor : metaData().templates().values()) {
+ IndexTemplateMetaData templateMetaData = cursor.value;
+ builder.startObject(templateMetaData.name(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.field("template", templateMetaData.template());
+ builder.field("order", templateMetaData.order());
+
+ builder.startObject("settings");
+ Settings settings = templateMetaData.settings();
+ if (settingsFilter != null) {
+ settings = settingsFilter.filterSettings(settings);
+ }
+ settings.toXContent(builder, params);
+ builder.endObject();
+
+ builder.startObject("mappings");
+ for (ObjectObjectCursor<String, CompressedString> cursor1 : templateMetaData.mappings()) {
+ byte[] mappingSource = cursor1.value.uncompressed();
+ XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
+ Map<String, Object> mapping = parser.map();
+ if (mapping.size() == 1 && mapping.containsKey(cursor1.key)) {
+ // the type name is the root value, reduce it
+ mapping = (Map<String, Object>) mapping.get(cursor1.key);
+ }
+ builder.field(cursor1.key);
+ builder.map(mapping);
+ }
+ builder.endObject();
+
+
+ builder.endObject();
+ }
+ builder.endObject();
+
+ builder.startObject("indices");
+ for (IndexMetaData indexMetaData : metaData()) {
+ builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH));
+
+ builder.startObject("settings");
+ Settings settings = indexMetaData.settings();
+ if (settingsFilter != null) {
+ settings = settingsFilter.filterSettings(settings);
+ }
+ settings.toXContent(builder, params);
+ builder.endObject();
+
+ builder.startObject("mappings");
+ for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
+ byte[] mappingSource = cursor.value.source().uncompressed();
+ XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
+ Map<String, Object> mapping = parser.map();
+ if (mapping.size() == 1 && mapping.containsKey(cursor.key)) {
+ // the type name is the root value, reduce it
+ mapping = (Map<String, Object>) mapping.get(cursor.key);
+ }
+ builder.field(cursor.key);
+ builder.map(mapping);
+ }
+ builder.endObject();
+
+ builder.startArray("aliases");
+ for (ObjectCursor<String> cursor : indexMetaData.aliases().keys()) {
+ builder.value(cursor.value);
+ }
+ builder.endArray();
+
+ builder.endObject();
+ }
+ builder.endObject();
+
+ for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
+ builder.startObject(cursor.key);
+ MetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
+ builder.endObject();
+ }
+
+ builder.endObject();
+ }
+
+ // routing table
+ if (isAllMetricsOnly || metrics.contains("routing_table")) {
+ builder.startObject("routing_table");
+ builder.startObject("indices");
+ for (IndexRoutingTable indexRoutingTable : routingTable()) {
+ builder.startObject(indexRoutingTable.index(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.startObject("shards");
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ builder.startArray(Integer.toString(indexShardRoutingTable.shardId().id()));
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ shardRouting.toXContent(builder, params);
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+
+ // routing nodes
+ if (isAllMetricsOnly || metrics.contains("routing_table")) {
+ builder.startObject("routing_nodes");
+ builder.startArray("unassigned");
+ for (ShardRouting shardRouting : readOnlyRoutingNodes().unassigned()) {
+ shardRouting.toXContent(builder, params);
+ }
+ builder.endArray();
+
+ builder.startObject("nodes");
+ for (RoutingNode routingNode : readOnlyRoutingNodes()) {
+ builder.startArray(routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE);
+ for (ShardRouting shardRouting : routingNode) {
+ shardRouting.toXContent(builder, params);
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+
+ builder.endObject();
+ }
+
+ if (isAllMetricsOnly || metrics.contains("routing_table")) {
+ builder.startArray("allocations");
+ for (Map.Entry<ShardId, List<AllocationExplanation.NodeExplanation>> entry : allocationExplanation().explanations().entrySet()) {
+ builder.startObject();
+ builder.field("index", entry.getKey().index().name());
+ builder.field("shard", entry.getKey().id());
+ builder.startArray("explanations");
+ for (AllocationExplanation.NodeExplanation nodeExplanation : entry.getValue()) {
+ builder.field("desc", nodeExplanation.description());
+ if (nodeExplanation.node() != null) {
+ builder.startObject("node");
+ builder.field("id", nodeExplanation.node().id());
+ builder.field("name", nodeExplanation.node().name());
+ builder.endObject();
+ }
+ }
+ builder.endArray();
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+
+ if (isAllMetricsOnly || metrics.contains("customs")) {
+ for (ObjectObjectCursor<String, Custom> cursor : customs) {
+ builder.startObject(cursor.key);
+ lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
+ builder.endObject();
+ }
+ }
+
+ return builder;
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public static Builder builder(ClusterState state) {
+ return new Builder(state);
+ }
+
+ public static class Builder {
+
+ private long version = 0;
+ private MetaData metaData = MetaData.EMPTY_META_DATA;
+ private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE;
+ private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES;
+ private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK;
+ private AllocationExplanation allocationExplanation = AllocationExplanation.EMPTY;
+ private final ImmutableOpenMap.Builder<String, Custom> customs;
+
+ public Builder() {
+ customs = ImmutableOpenMap.builder();
+ }
+
+ public Builder(ClusterState state) {
+ this.version = state.version();
+ this.nodes = state.nodes();
+ this.routingTable = state.routingTable();
+ this.metaData = state.metaData();
+ this.blocks = state.blocks();
+ this.allocationExplanation = state.allocationExplanation();
+ this.customs = ImmutableOpenMap.builder(state.customs());
+ }
+
+ public Builder nodes(DiscoveryNodes.Builder nodesBuilder) {
+ return nodes(nodesBuilder.build());
+ }
+
+ public Builder nodes(DiscoveryNodes nodes) {
+ this.nodes = nodes;
+ return this;
+ }
+
+ public Builder routingTable(RoutingTable.Builder routingTable) {
+ return routingTable(routingTable.build());
+ }
+
+ public Builder routingResult(RoutingAllocation.Result routingResult) {
+ this.routingTable = routingResult.routingTable();
+ this.allocationExplanation = routingResult.explanation();
+ return this;
+ }
+
+ public Builder routingTable(RoutingTable routingTable) {
+ this.routingTable = routingTable;
+ return this;
+ }
+
+ public Builder metaData(MetaData.Builder metaDataBuilder) {
+ return metaData(metaDataBuilder.build());
+ }
+
+ public Builder metaData(MetaData metaData) {
+ this.metaData = metaData;
+ return this;
+ }
+
+ public Builder blocks(ClusterBlocks.Builder blocksBuilder) {
+ return blocks(blocksBuilder.build());
+ }
+
+ public Builder blocks(ClusterBlocks block) {
+ this.blocks = block;
+ return this;
+ }
+
+ public Builder allocationExplanation(AllocationExplanation allocationExplanation) {
+ this.allocationExplanation = allocationExplanation;
+ return this;
+ }
+
+ public Builder version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ public Custom getCustom(String type) {
+ return customs.get(type);
+ }
+
+ public Builder putCustom(String type, Custom custom) {
+ customs.put(type, custom);
+ return this;
+ }
+
+ public Builder removeCustom(String type) {
+ customs.remove(type);
+ return this;
+ }
+
+ public ClusterState build() {
+ return new ClusterState(version, metaData, routingTable, nodes, blocks, allocationExplanation, customs.build());
+ }
+
+ public static byte[] toBytes(ClusterState state) throws IOException {
+ BytesStreamOutput os = new BytesStreamOutput();
+ writeTo(state, os);
+ return os.bytes().toBytes();
+ }
+
+ public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
+ return readFrom(new BytesStreamInput(data, false), localNode);
+ }
+
+ public static void writeTo(ClusterState state, StreamOutput out) throws IOException {
+ out.writeLong(state.version());
+ MetaData.Builder.writeTo(state.metaData(), out);
+ RoutingTable.Builder.writeTo(state.routingTable(), out);
+ DiscoveryNodes.Builder.writeTo(state.nodes(), out);
+ ClusterBlocks.Builder.writeClusterBlocks(state.blocks(), out);
+ state.allocationExplanation().writeTo(out);
+ out.writeVInt(state.customs().size());
+ for (ObjectObjectCursor<String, Custom> cursor : state.customs()) {
+ out.writeString(cursor.key);
+ lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
+ }
+ }
+
+ public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
+ Builder builder = new Builder();
+ builder.version = in.readLong();
+ builder.metaData = MetaData.Builder.readFrom(in);
+ builder.routingTable = RoutingTable.Builder.readFrom(in);
+ builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode);
+ builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in);
+ builder.allocationExplanation = AllocationExplanation.readAllocationExplanation(in);
+ int customSize = in.readVInt();
+ for (int i = 0; i < customSize; i++) {
+ String type = in.readString();
+ Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in);
+ builder.putCustom(type, customIndexMetaData);
+ }
+ return builder.build();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java b/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java
new file mode 100644
index 0000000..64e1c03
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+/**
+ * A listener to be notified when a cluster state changes.
+ *
+ *
+ */
+public interface ClusterStateListener {
+
+ /**
+ * Called when cluster state changes.
+ */
+ void clusterChanged(ClusterChangedEvent event);
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java
new file mode 100644
index 0000000..490a556
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+/**
+ * A task that can update the cluster state.
+ */
+public interface ClusterStateUpdateTask {
+
+ /**
+ * Update the cluster state based on the current state. Return the *same instance* if no state
+ * should be changed.
+ */
+ ClusterState execute(ClusterState currentState) throws Exception;
+
+ /**
+ * A callback called when execute fails.
+ */
+ void onFailure(String source, Throwable t);
+}
diff --git a/src/main/java/org/elasticsearch/cluster/DiskUsage.java b/src/main/java/org/elasticsearch/cluster/DiskUsage.java
new file mode 100644
index 0000000..2751c61
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/DiskUsage.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+/**
+ * Encapsulation class used to represent the amount of disk used on a node.
+ */
+public class DiskUsage {
+ final String nodeId;
+ final long totalBytes;
+ final long freeBytes;
+
+ public DiskUsage(String nodeId, long totalBytes, long freeBytes) {
+ if ((totalBytes < freeBytes) || (totalBytes < 0)) {
+ throw new IllegalStateException("Free bytes [" + freeBytes +
+ "] cannot be less than 0 or greater than total bytes [" + totalBytes + "]");
+ }
+ this.nodeId = nodeId;
+ this.totalBytes = totalBytes;
+ this.freeBytes = freeBytes;
+ }
+
+ public double getFreeDiskAsPercentage() {
+ double freePct = 100.0 * ((double)freeBytes / totalBytes);
+ return freePct;
+ }
+
+ public long getFreeBytes() {
+ return freeBytes;
+ }
+
+ public long getTotalBytes() {
+ return totalBytes;
+ }
+
+ public long getUsedBytes() {
+ return getTotalBytes() - getFreeBytes();
+ }
+
+ public String toString() {
+ return "[" + nodeId + "] free: " + getFreeBytes() + "[" + getFreeDiskAsPercentage() + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java b/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java
new file mode 100644
index 0000000..92a2b0a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.settings.ImmutableSettings;
+
+/**
+ * ClusterInfoService that provides empty maps for disk usage and shard sizes
+ */
+public class EmptyClusterInfoService extends AbstractComponent implements ClusterInfoService {
+
+ private final static class Holder {
+ private final static EmptyClusterInfoService instance = new EmptyClusterInfoService();
+ }
+ private final ClusterInfo emptyClusterInfo;
+
+ private EmptyClusterInfoService() {
+ super(ImmutableSettings.EMPTY);
+ emptyClusterInfo = new ClusterInfo(ImmutableMap.<String, DiskUsage>of(), ImmutableMap.<String, Long>of());
+ }
+
+ public static EmptyClusterInfoService getInstance() {
+ return Holder.instance;
+ }
+
+ @Override
+ public ClusterInfo getClusterInfo() {
+ return emptyClusterInfo;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
new file mode 100644
index 0000000..5b7fa8d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
@@ -0,0 +1,328 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.monitor.fs.FsStats;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * InternalClusterInfoService provides the ClusterInfoService interface,
+ * routinely updated on a timer. The timer can be dynamically changed by
+ * setting the <code>cluster.info.update.interval</code> setting (defaulting
+ * to 30 seconds). The InternalClusterInfoService only runs on the master node.
+ * Listens for changes in the number of data nodes and immediately submits a
+ * ClusterInfoUpdateJob if a node has been added.
+ *
+ * Every time the timer runs, gathers information about the disk usage and
+ * shard sizes across the cluster.
+ */
+public final class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
+
+ public static final String INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL = "cluster.info.update.interval";
+
+ private volatile TimeValue updateFrequency;
+
+ private volatile ImmutableMap<String, DiskUsage> usages;
+ private volatile ImmutableMap<String, Long> shardSizes;
+ private volatile boolean isMaster = false;
+ private volatile boolean enabled;
+ private final TransportNodesStatsAction transportNodesStatsAction;
+ private final TransportIndicesStatsAction transportIndicesStatsAction;
+ private final ClusterService clusterService;
+ private final ThreadPool threadPool;
+
+ @Inject
+ public InternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService,
+ TransportNodesStatsAction transportNodesStatsAction,
+ TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService,
+ ThreadPool threadPool) {
+ super(settings);
+ this.usages = ImmutableMap.of();
+ this.shardSizes = ImmutableMap.of();
+ this.transportNodesStatsAction = transportNodesStatsAction;
+ this.transportIndicesStatsAction = transportIndicesStatsAction;
+ this.clusterService = clusterService;
+ this.threadPool = threadPool;
+ this.updateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, TimeValue.timeValueSeconds(30));
+ this.enabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, false);
+ nodeSettingsService.addListener(new ApplySettings());
+
+ // Add InternalClusterInfoService to listen for Master changes
+ this.clusterService.add((LocalNodeMasterListener)this);
+ // Add to listen for state changes (when nodes are added)
+ this.clusterService.add((ClusterStateListener)this);
+ }
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ TimeValue newUpdateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, null);
+ // ClusterInfoService is only enabled if the DiskThresholdDecider is enabled
+ Boolean newEnabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null);
+
+ if (newUpdateFrequency != null) {
+ if (newUpdateFrequency.getMillis() < TimeValue.timeValueSeconds(10).getMillis()) {
+ logger.warn("[{}] set too low [{}] (< 10s)", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, newUpdateFrequency);
+ throw new IllegalStateException("Unable to set " + INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL + " less than 10 seconds");
+ } else {
+ logger.info("updating [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, updateFrequency, newUpdateFrequency);
+ InternalClusterInfoService.this.updateFrequency = newUpdateFrequency;
+ }
+ }
+
+ // We don't log about enabling it here, because the DiskThresholdDecider will already be logging about enable/disable
+ if (newEnabled != null) {
+ InternalClusterInfoService.this.enabled = newEnabled;
+ }
+ }
+ }
+
+ @Override
+ public void onMaster() {
+ this.isMaster = true;
+ if (logger.isTraceEnabled()) {
+ logger.trace("I have been elected master, scheduling a ClusterInfoUpdateJob");
+ }
+ try {
+ // Submit a job that will start after DEFAULT_STARTING_INTERVAL, and reschedule itself after running
+ threadPool.schedule(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob());
+ if (clusterService.state().getNodes().getDataNodes().size() > 1) {
+ // Submit an info update job to be run immediately
+ threadPool.executor(executorName()).execute(new ClusterInfoUpdateJob(false));
+ }
+ } catch (EsRejectedExecutionException ex) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Couldn't schedule cluster info update task - node might be shutting down", ex);
+ }
+ }
+ }
+
+ @Override
+ public void offMaster() {
+ this.isMaster = false;
+ }
+
+ @Override
+ public String executorName() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (!this.enabled) {
+ return;
+ }
+
+ // Check whether it was a data node that was added
+ boolean dataNodeAdded = false;
+ for (DiscoveryNode addedNode : event.nodesDelta().addedNodes()) {
+ if (addedNode.dataNode()) {
+ dataNodeAdded = true;
+ break;
+ }
+ }
+
+ if (this.isMaster && dataNodeAdded && clusterService.state().getNodes().getDataNodes().size() > 1) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("data node was added, retrieving new cluster info");
+ }
+ threadPool.executor(executorName()).execute(new ClusterInfoUpdateJob(false));
+ }
+
+ if (this.isMaster && event.nodesRemoved()) {
+ for (DiscoveryNode removedNode : event.nodesDelta().removedNodes()) {
+ if (removedNode.dataNode()) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Removing node from cluster info: {}", removedNode.getId());
+ }
+ Map<String, DiskUsage> newUsages = new HashMap<String, DiskUsage>(usages);
+ newUsages.remove(removedNode.getId());
+ usages = ImmutableMap.copyOf(newUsages);
+ }
+ }
+ }
+ }
+
+ @Override
+ public ClusterInfo getClusterInfo() {
+ return new ClusterInfo(usages, shardSizes);
+ }
+
+ /**
+ * Class used to submit {@link ClusterInfoUpdateJob}s on the
+ * {@link InternalClusterInfoService} threadpool, these jobs will
+ * reschedule themselves by placing a new instance of this class onto the
+ * scheduled threadpool.
+ */
+ public class SubmitReschedulingClusterInfoUpdatedJob implements Runnable {
+ @Override
+ public void run() {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Submitting new rescheduling cluster info update job");
+ }
+ try {
+ threadPool.executor(executorName()).execute(new ClusterInfoUpdateJob(true));
+ } catch (EsRejectedExecutionException ex) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Couldn't re-schedule cluster info update task - node might be shutting down", ex);
+ }
+ }
+ }
+ }
+
+
+ /**
+ * Runnable class that performs a {@Link NodesStatsRequest} to retrieve
+ * disk usages for nodes in the cluster and an {@link IndicesStatsRequest}
+ * to retrieve the sizes of all shards to ensure they can fit on nodes
+ * during shard balancing.
+ */
+ public class ClusterInfoUpdateJob implements Runnable {
+
+ // This boolean is used to signal to the ClusterInfoUpdateJob that it
+ // needs to reschedule itself to run again at a later time. It can be
+ // set to false to only run once
+ private final boolean reschedule;
+
+ public ClusterInfoUpdateJob(boolean reschedule) {
+ this.reschedule = reschedule;
+ }
+
+ @Override
+ public void run() {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Performing ClusterInfoUpdateJob");
+ }
+
+ if (isMaster && this.reschedule) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Scheduling next run for updating cluster info in: {}", updateFrequency.toString());
+ }
+ try {
+ threadPool.schedule(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob());
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Reschedule cluster info service was rejected", ex);
+ }
+ }
+ if (!enabled) {
+ // Short-circuit if not enabled
+ if (logger.isTraceEnabled()) {
+ logger.trace("Skipping ClusterInfoUpdatedJob since it is disabled");
+ }
+ return;
+ }
+
+ NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true");
+ nodesStatsRequest.clear();
+ nodesStatsRequest.fs(true);
+ nodesStatsRequest.timeout(TimeValue.timeValueSeconds(15));
+
+ transportNodesStatsAction.execute(nodesStatsRequest, new ActionListener<NodesStatsResponse>() {
+ @Override
+ public void onResponse(NodesStatsResponse nodeStatses) {
+ Map<String, DiskUsage> newUsages = new HashMap<String, DiskUsage>();
+ for (NodeStats nodeStats : nodeStatses.getNodes()) {
+ if (nodeStats.getFs() == null) {
+ logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().name());
+ } else {
+ long available = 0;
+ long total = 0;
+
+ for (FsStats.Info info : nodeStats.getFs()) {
+ available += info.getAvailable().bytes();
+ total += info.getTotal().bytes();
+ }
+ String nodeId = nodeStats.getNode().id();
+ if (logger.isTraceEnabled()) {
+ logger.trace("node: [{}], total disk: {}, available disk: {}", nodeId, total, available);
+ }
+ newUsages.put(nodeId, new DiskUsage(nodeId, total, available));
+ }
+ }
+ usages = ImmutableMap.copyOf(newUsages);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.error("Failed to execute NodeStatsAction for ClusterInfoUpdateJob", e);
+ }
+ });
+
+ IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();
+ indicesStatsRequest.clear();
+ indicesStatsRequest.store(true);
+ transportIndicesStatsAction.execute(indicesStatsRequest, new ActionListener<IndicesStatsResponse>() {
+ @Override
+ public void onResponse(IndicesStatsResponse indicesStatsResponse) {
+ ShardStats[] stats = indicesStatsResponse.getShards();
+ HashMap<String, Long> newShardSizes = new HashMap<String, Long>();
+ for (ShardStats s : stats) {
+ long size = s.getStats().getStore().sizeInBytes();
+ String sid = shardIdentifierFromRouting(s.getShardRouting());
+ if (logger.isTraceEnabled()) {
+ logger.trace("shard: {} size: {}", sid, size);
+ }
+ newShardSizes.put(sid, size);
+ }
+ shardSizes = ImmutableMap.copyOf(newShardSizes);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.error("Failed to execute IndicesStatsAction for ClusterInfoUpdateJob", e);
+ }
+ });
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("Finished ClusterInfoUpdateJob");
+ }
+ }
+ }
+
+ /**
+ * Method that incorporates the ShardId for the shard into a string that
+ * includes a 'p' or 'r' depending on whether the shard is a primary.
+ */
+ public static String shardIdentifierFromRouting(ShardRouting shardRouting) {
+ return shardRouting.shardId().toString() + "[" + (shardRouting.primary() ? "p" : "r") + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java b/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java
new file mode 100644
index 0000000..ebea8b8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster;
+
+/**
+ * Enables listening to master changes events of the local node (when the local node becomes the master, and when the local
+ * node cease being a master).
+ */
+public interface LocalNodeMasterListener {
+
+ /**
+ * Called when local node is elected to be the master
+ */
+ void onMaster();
+
+ /**
+ * Called when the local node used to be the master, a new master was elected and it's no longer the local node.
+ */
+ void offMaster();
+
+ /**
+ * The name of the executor that the implementation of the callbacks of this lister should be executed on. The thread
+ * that is responsible for managing instances of this lister is the same thread handling the cluster state events. If
+ * the work done is the callbacks above is inexpensive, this value may be {@link org.elasticsearch.threadpool.ThreadPool.Names#SAME SAME}
+ * (indicating that the callbaks will run on the same thread as the cluster state events are fired with). On the other hand,
+ * if the logic in the callbacks are heavier and take longer to process (or perhaps involve blocking due to IO operations),
+ * prefer to execute them on a separte more appropriate executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC}
+ * or {@link org.elasticsearch.threadpool.ThreadPool.Names#MANAGEMENT MANAGEMENT}).
+ *
+ * @return The name of the executor that will run the callbacks of this listener.
+ */
+ String executorName();
+
+}
+
diff --git a/src/main/java/org/elasticsearch/cluster/ProcessedClusterStateUpdateTask.java b/src/main/java/org/elasticsearch/cluster/ProcessedClusterStateUpdateTask.java
new file mode 100644
index 0000000..7207496
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ProcessedClusterStateUpdateTask.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+/**
+ * An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when
+ * the cluster state update has been processed.
+ */
+public interface ProcessedClusterStateUpdateTask extends ClusterStateUpdateTask {
+
+ /**
+ * Called when the result of the {@link #execute(ClusterState)} have been processed
+ * properly by all listeners.
+ */
+ void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState);
+}
diff --git a/src/main/java/org/elasticsearch/cluster/TimeoutClusterStateListener.java b/src/main/java/org/elasticsearch/cluster/TimeoutClusterStateListener.java
new file mode 100644
index 0000000..da38257
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/TimeoutClusterStateListener.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ * An exception to cluster state listener that allows for timeouts and for post added notifications.
+ *
+ *
+ */
+public interface TimeoutClusterStateListener extends ClusterStateListener {
+
+ void postAdded();
+
+ void onClose();
+
+ void onTimeout(TimeValue timeout);
+}
diff --git a/src/main/java/org/elasticsearch/cluster/TimeoutClusterStateUpdateTask.java b/src/main/java/org/elasticsearch/cluster/TimeoutClusterStateUpdateTask.java
new file mode 100644
index 0000000..1083e1d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/TimeoutClusterStateUpdateTask.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ * An extension interface to {@link org.elasticsearch.cluster.ClusterStateUpdateTask} that allows to associate
+ * a timeout.
+ */
+public interface TimeoutClusterStateUpdateTask extends ProcessedClusterStateUpdateTask {
+
+ /**
+ * If the cluster state update task wasn't processed by the provided timeout, call
+ * {@link #onFailure(String, Throwable)}
+ */
+ TimeValue timeout();
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateListener.java b/src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateListener.java
new file mode 100644
index 0000000..1ed1926
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateListener.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.ack;
+
+/**
+ * Listener used for cluster state updates processing
+ * Supports acknowledgement logic
+ */
+public interface ClusterStateUpdateListener {
+
+ /**
+ * Called when the cluster state update is acknowledged
+ */
+ void onResponse(ClusterStateUpdateResponse response);
+
+ /**
+ * Called when any error is thrown during the cluster state update processing
+ */
+ void onFailure(Throwable t);
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateRequest.java
new file mode 100644
index 0000000..c15310b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateRequest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.ack;
+
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ * Base class to be used when needing to update the cluster state
+ * Contains the basic fields that are always needed
+ */
+public abstract class ClusterStateUpdateRequest<T extends ClusterStateUpdateRequest<T>> {
+
+ private TimeValue ackTimeout;
+ private TimeValue masterNodeTimeout;
+
+ /**
+ * Returns the maximum time interval to wait for acknowledgements
+ */
+ public TimeValue ackTimeout() {
+ return ackTimeout;
+ }
+
+ /**
+ * Sets the acknowledgement timeout
+ */
+ @SuppressWarnings("unchecked")
+ public T ackTimeout(TimeValue ackTimeout) {
+ this.ackTimeout = ackTimeout;
+ return (T) this;
+ }
+
+ /**
+ * Returns the maximum time interval to wait for the request to
+ * be completed on the master node
+ */
+ public TimeValue masterNodeTimeout() {
+ return masterNodeTimeout;
+ }
+
+ /**
+ * Sets the master node timeout
+ */
+ @SuppressWarnings("unchecked")
+ public T masterNodeTimeout(TimeValue masterNodeTimeout) {
+ this.masterNodeTimeout = masterNodeTimeout;
+ return (T) this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateResponse.java b/src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateResponse.java
new file mode 100644
index 0000000..42db2e2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ack/ClusterStateUpdateResponse.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.ack;
+
+/**
+ * Base response returned after a cluster state update
+ */
+public class ClusterStateUpdateResponse {
+
+ private final boolean acknowledged;
+
+ public ClusterStateUpdateResponse(boolean acknowledged) {
+ this.acknowledged = acknowledged;
+ }
+
+ /**
+ * Whether the cluster state update was acknowledged or not
+ */
+ public boolean isAcknowledged() {
+ return acknowledged;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java
new file mode 100644
index 0000000..c691abe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.ack;
+
+/**
+ * Base cluster state update request that allows to execute update against multiple indices
+ */
+public abstract class IndicesClusterStateUpdateRequest<T extends IndicesClusterStateUpdateRequest<T>> extends ClusterStateUpdateRequest<T> {
+
+ private String[] indices;
+
+ /**
+ * Returns the indices the operation needs to be executed on
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ /**
+ * Sets the indices the operation needs to be executed on
+ */
+ @SuppressWarnings("unchecked")
+ public T indices(String[] indices) {
+ this.indices = indices;
+ return (T)this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java
new file mode 100644
index 0000000..a7df8e9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.action.index;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaDataMappingService;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated
+ * in the cluster state meta data (and broadcast to all members).
+ */
+public class MappingUpdatedAction extends TransportMasterNodeOperationAction<MappingUpdatedAction.MappingUpdatedRequest, MappingUpdatedAction.MappingUpdatedResponse> {
+
+ private final AtomicLong mappingUpdateOrderGen = new AtomicLong();
+ private final MetaDataMappingService metaDataMappingService;
+
+ @Inject
+ public MappingUpdatedAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
+ MetaDataMappingService metaDataMappingService) {
+ super(settings, transportService, clusterService, threadPool);
+ this.metaDataMappingService = metaDataMappingService;
+ }
+
+ public long generateNextMappingUpdateOrder() {
+ return mappingUpdateOrderGen.incrementAndGet();
+ }
+
+ @Override
+ protected String transportAction() {
+ return "cluster/mappingUpdated";
+ }
+
+ @Override
+ protected String executor() {
+ // we go async right away
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ protected MappingUpdatedRequest newRequest() {
+ return new MappingUpdatedRequest();
+ }
+
+ @Override
+ protected MappingUpdatedResponse newResponse() {
+ return new MappingUpdatedResponse();
+ }
+
+ @Override
+ protected void masterOperation(final MappingUpdatedRequest request, final ClusterState state, final ActionListener<MappingUpdatedResponse> listener) throws ElasticsearchException {
+ metaDataMappingService.updateMapping(request.index(), request.indexUUID(), request.type(), request.mappingSource(), request.order, request.nodeId, new ClusterStateUpdateListener() {
+ @Override
+ public void onResponse(ClusterStateUpdateResponse response) {
+ listener.onResponse(new MappingUpdatedResponse());
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.warn("[{}] update-mapping [{}] failed to dynamically update the mapping in cluster_state from shard", t, request.index(), request.type());
+ listener.onFailure(t);
+ }
+ });
+ }
+
+ public static class MappingUpdatedResponse extends ActionResponse {
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+ }
+
+ public static class MappingUpdatedRequest extends MasterNodeOperationRequest<MappingUpdatedRequest> {
+
+ private String index;
+ private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
+ private String type;
+ private CompressedString mappingSource;
+ private long order = -1; // -1 means not set...
+ private String nodeId = null; // null means not set
+
+ MappingUpdatedRequest() {
+ }
+
+ public MappingUpdatedRequest(String index, String indexUUID, String type, CompressedString mappingSource, long order, String nodeId) {
+ this.index = index;
+ this.indexUUID = indexUUID;
+ this.type = type;
+ this.mappingSource = mappingSource;
+ this.order = order;
+ this.nodeId = nodeId;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public String indexUUID() {
+ return indexUUID;
+ }
+
+ public String type() {
+ return type;
+ }
+
+ public CompressedString mappingSource() {
+ return mappingSource;
+ }
+
+ /**
+ * Returns -1 if not set...
+ */
+ public long order() {
+ return this.order;
+ }
+
+ /**
+ * Returns null for not set.
+ */
+ public String nodeId() {
+ return this.nodeId;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ type = in.readString();
+ mappingSource = CompressedString.readCompressedString(in);
+ indexUUID = in.readString();
+ order = in.readLong();
+ nodeId = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeString(type);
+ mappingSource.writeTo(out);
+ out.writeString(indexUUID);
+ out.writeLong(order);
+ out.writeOptionalString(nodeId);
+ }
+
+ @Override
+ public String toString() {
+ return "index [" + index + "], indexUUID [" + indexUUID + "], type [" + type + "] and source [" + mappingSource + "]";
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java b/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java
new file mode 100644
index 0000000..7a3ed3b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.action.index;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ *
+ */
+public class NodeIndexDeletedAction extends AbstractComponent {
+
+ private final ThreadPool threadPool;
+ private final TransportService transportService;
+ private final List<Listener> listeners = new CopyOnWriteArrayList<Listener>();
+
+ @Inject
+ public NodeIndexDeletedAction(Settings settings, ThreadPool threadPool, TransportService transportService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.transportService = transportService;
+ transportService.registerHandler(NodeIndexDeletedTransportHandler.ACTION, new NodeIndexDeletedTransportHandler());
+ transportService.registerHandler(NodeIndexStoreDeletedTransportHandler.ACTION, new NodeIndexStoreDeletedTransportHandler());
+ }
+
+ public void add(Listener listener) {
+ listeners.add(listener);
+ }
+
+ public void remove(Listener listener) {
+ listeners.remove(listener);
+ }
+
+ public void nodeIndexDeleted(final ClusterState clusterState, final String index, final String nodeId) throws ElasticsearchException {
+ DiscoveryNodes nodes = clusterState.nodes();
+ if (nodes.localNodeMaster()) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ innerNodeIndexDeleted(index, nodeId);
+ }
+ });
+ } else {
+ transportService.sendRequest(clusterState.nodes().masterNode(),
+ NodeIndexDeletedTransportHandler.ACTION, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
+ }
+ }
+
+ public void nodeIndexStoreDeleted(final ClusterState clusterState, final String index, final String nodeId) throws ElasticsearchException {
+ DiscoveryNodes nodes = clusterState.nodes();
+ if (nodes.localNodeMaster()) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ innerNodeIndexStoreDeleted(index, nodeId);
+ }
+ });
+ } else {
+ transportService.sendRequest(clusterState.nodes().masterNode(),
+ NodeIndexStoreDeletedTransportHandler.ACTION, new NodeIndexStoreDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
+ }
+ }
+
+ private void innerNodeIndexDeleted(String index, String nodeId) {
+ for (Listener listener : listeners) {
+ listener.onNodeIndexDeleted(index, nodeId);
+ }
+ }
+
+ private void innerNodeIndexStoreDeleted(String index, String nodeId) {
+ for (Listener listener : listeners) {
+ listener.onNodeIndexStoreDeleted(index, nodeId);
+ }
+ }
+
+ public static interface Listener {
+ void onNodeIndexDeleted(String index, String nodeId);
+
+ void onNodeIndexStoreDeleted(String index, String nodeId);
+ }
+
+ private class NodeIndexDeletedTransportHandler extends BaseTransportRequestHandler<NodeIndexDeletedMessage> {
+
+ static final String ACTION = "cluster/nodeIndexDeleted";
+
+ @Override
+ public NodeIndexDeletedMessage newInstance() {
+ return new NodeIndexDeletedMessage();
+ }
+
+ @Override
+ public void messageReceived(NodeIndexDeletedMessage message, TransportChannel channel) throws Exception {
+ innerNodeIndexDeleted(message.index, message.nodeId);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ private class NodeIndexStoreDeletedTransportHandler extends BaseTransportRequestHandler<NodeIndexStoreDeletedMessage> {
+
+ static final String ACTION = "cluster/nodeIndexStoreDeleted";
+
+ @Override
+ public NodeIndexStoreDeletedMessage newInstance() {
+ return new NodeIndexStoreDeletedMessage();
+ }
+
+ @Override
+ public void messageReceived(NodeIndexStoreDeletedMessage message, TransportChannel channel) throws Exception {
+ innerNodeIndexStoreDeleted(message.index, message.nodeId);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ static class NodeIndexDeletedMessage extends TransportRequest {
+
+ String index;
+ String nodeId;
+
+ NodeIndexDeletedMessage() {
+ }
+
+ NodeIndexDeletedMessage(String index, String nodeId) {
+ this.index = index;
+ this.nodeId = nodeId;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeString(nodeId);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ nodeId = in.readString();
+ }
+ }
+
+ static class NodeIndexStoreDeletedMessage extends TransportRequest {
+
+ String index;
+ String nodeId;
+
+ NodeIndexStoreDeletedMessage() {
+ }
+
+ NodeIndexStoreDeletedMessage(String index, String nodeId) {
+ this.index = index;
+ this.nodeId = nodeId;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeString(nodeId);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ nodeId = in.readString();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java
new file mode 100644
index 0000000..db88fe1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.action.index;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaDataMappingService;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NodeMappingRefreshAction extends AbstractComponent {
+
+ private final TransportService transportService;
+ private final MetaDataMappingService metaDataMappingService;
+
+ @Inject
+ public NodeMappingRefreshAction(Settings settings, TransportService transportService, MetaDataMappingService metaDataMappingService) {
+ super(settings);
+ this.transportService = transportService;
+ this.metaDataMappingService = metaDataMappingService;
+ transportService.registerHandler(NodeMappingRefreshTransportHandler.ACTION, new NodeMappingRefreshTransportHandler());
+ }
+
+ public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) throws ElasticsearchException {
+ DiscoveryNodes nodes = state.nodes();
+ if (nodes.localNodeMaster()) {
+ innerMappingRefresh(request);
+ } else {
+ transportService.sendRequest(state.nodes().masterNode(),
+ NodeMappingRefreshTransportHandler.ACTION, request, EmptyTransportResponseHandler.INSTANCE_SAME);
+ }
+ }
+
+ private void innerMappingRefresh(NodeMappingRefreshRequest request) {
+ metaDataMappingService.refreshMapping(request.index(), request.indexUUID(), request.types());
+ }
+
+ private class NodeMappingRefreshTransportHandler extends BaseTransportRequestHandler<NodeMappingRefreshRequest> {
+
+ static final String ACTION = "cluster/nodeMappingRefresh";
+
+ @Override
+ public NodeMappingRefreshRequest newInstance() {
+ return new NodeMappingRefreshRequest();
+ }
+
+ @Override
+ public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception {
+ innerMappingRefresh(request);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ public static class NodeMappingRefreshRequest extends TransportRequest {
+
+ private String index;
+ private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
+ private String[] types;
+ private String nodeId;
+
+ NodeMappingRefreshRequest() {
+ }
+
+ public NodeMappingRefreshRequest(String index, String indexUUID, String[] types, String nodeId) {
+ this.index = index;
+ this.indexUUID = indexUUID;
+ this.types = types;
+ this.nodeId = nodeId;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public String indexUUID() {
+ return indexUUID;
+ }
+
+
+ public String[] types() {
+ return types;
+ }
+
+ public String nodeId() {
+ return nodeId;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeStringArray(types);
+ out.writeString(nodeId);
+ out.writeString(indexUUID);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ types = in.readStringArray();
+ nodeId = in.readString();
+ indexUUID = in.readString();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
new file mode 100644
index 0000000..842936a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -0,0 +1,349 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.action.shard;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+
+import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry;
+
+/**
+ *
+ */
+public class ShardStateAction extends AbstractComponent {
+
+ private final TransportService transportService;
+ private final ClusterService clusterService;
+ private final AllocationService allocationService;
+ private final ThreadPool threadPool;
+
+ private final BlockingQueue<ShardRoutingEntry> startedShardsQueue = ConcurrentCollections.newBlockingQueue();
+ private final BlockingQueue<ShardRoutingEntry> failedShardQueue = ConcurrentCollections.newBlockingQueue();
+
+ @Inject
+ public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
+ AllocationService allocationService, ThreadPool threadPool) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+ this.allocationService = allocationService;
+ this.threadPool = threadPool;
+
+ transportService.registerHandler(ShardStartedTransportHandler.ACTION, new ShardStartedTransportHandler());
+ transportService.registerHandler(ShardFailedTransportHandler.ACTION, new ShardFailedTransportHandler());
+ }
+
+ public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason) throws ElasticsearchException {
+ ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason);
+ logger.warn("{} sending failed shard for {}", shardRouting.shardId(), shardRoutingEntry);
+ DiscoveryNodes nodes = clusterService.state().nodes();
+ if (nodes.localNodeMaster()) {
+ innerShardFailed(shardRoutingEntry);
+ } else {
+ transportService.sendRequest(clusterService.state().nodes().masterNode(),
+ ShardFailedTransportHandler.ACTION, shardRoutingEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("failed to send failed shard to [{}]", exp, clusterService.state().nodes().masterNode());
+ }
+ });
+ }
+ }
+
+ public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) throws ElasticsearchException {
+
+ ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason);
+
+ logger.debug("sending shard started for {}", shardRoutingEntry);
+
+ DiscoveryNodes nodes = clusterService.state().nodes();
+ if (nodes.localNodeMaster()) {
+ innerShardStarted(shardRoutingEntry);
+ } else {
+ transportService.sendRequest(clusterService.state().nodes().masterNode(),
+ ShardStartedTransportHandler.ACTION, new ShardRoutingEntry(shardRouting, indexUUID, reason), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("failed to send shard started to [{}]", exp, clusterService.state().nodes().masterNode());
+ }
+ });
+ }
+ }
+
+ private void innerShardFailed(final ShardRoutingEntry shardRoutingEntry) {
+ logger.warn("{} received shard failed for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
+ failedShardQueue.add(shardRoutingEntry);
+ clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.reason + "]", Priority.HIGH, new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ if (shardRoutingEntry.processed) {
+ return currentState;
+ }
+
+ List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<ShardRoutingEntry>();
+ failedShardQueue.drainTo(shardRoutingEntries);
+
+ // nothing to process (a previous event has processed it already)
+ if (shardRoutingEntries.isEmpty()) {
+ return currentState;
+ }
+
+ MetaData metaData = currentState.getMetaData();
+
+ List<ShardRouting> shardRoutingsToBeApplied = new ArrayList<ShardRouting>(shardRoutingEntries.size());
+ for (int i = 0; i < shardRoutingEntries.size(); i++) {
+ ShardRoutingEntry shardRoutingEntry = shardRoutingEntries.get(i);
+ shardRoutingEntry.processed = true;
+ ShardRouting shardRouting = shardRoutingEntry.shardRouting;
+ IndexMetaData indexMetaData = metaData.index(shardRouting.index());
+ // if there is no metadata or the current index is not of the right uuid, the index has been deleted while it was being allocated
+ // which is fine, we should just ignore this
+ if (indexMetaData == null) {
+ continue;
+ }
+ if (!indexMetaData.isSameUUID(shardRoutingEntry.indexUUID)) {
+ logger.debug("{} ignoring shard failed, different index uuid, current {}, got {}", shardRouting.shardId(), indexMetaData.getUUID(), shardRoutingEntry);
+ continue;
+ }
+
+ logger.debug("{} will apply shard failed {}", shardRouting.shardId(), shardRoutingEntry);
+ shardRoutingsToBeApplied.add(shardRouting);
+ }
+
+ RoutingAllocation.Result routingResult = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
+ if (!routingResult.changed()) {
+ return currentState;
+ }
+ return ClusterState.builder(currentState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+ });
+ }
+
+ private void innerShardStarted(final ShardRoutingEntry shardRoutingEntry) {
+ logger.debug("received shard started for {}", shardRoutingEntry);
+ // buffer shard started requests, and the state update tasks will simply drain it
+ // this is to optimize the number of "started" events we generate, and batch them
+ // possibly, we can do time based batching as well, but usually, we would want to
+ // process started events as fast as possible, to make shards available
+ startedShardsQueue.add(shardRoutingEntry);
+
+ clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.reason + "]", Priority.URGENT,
+ new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+
+ if (shardRoutingEntry.processed) {
+ return currentState;
+ }
+
+ List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<ShardRoutingEntry>();
+ startedShardsQueue.drainTo(shardRoutingEntries);
+
+ // nothing to process (a previous event has processed it already)
+ if (shardRoutingEntries.isEmpty()) {
+ return currentState;
+ }
+
+ RoutingTable routingTable = currentState.routingTable();
+ MetaData metaData = currentState.getMetaData();
+
+ List<ShardRouting> shardRoutingToBeApplied = new ArrayList<ShardRouting>(shardRoutingEntries.size());
+
+ for (int i = 0; i < shardRoutingEntries.size(); i++) {
+ ShardRoutingEntry shardRoutingEntry = shardRoutingEntries.get(i);
+ shardRoutingEntry.processed = true;
+ ShardRouting shardRouting = shardRoutingEntry.shardRouting;
+ try {
+ IndexMetaData indexMetaData = metaData.index(shardRouting.index());
+ IndexRoutingTable indexRoutingTable = routingTable.index(shardRouting.index());
+ // if there is no metadata, no routing table or the current index is not of the right uuid, the index has been deleted while it was being allocated
+ // which is fine, we should just ignore this
+ if (indexMetaData == null) {
+ continue;
+ }
+ if (indexRoutingTable == null) {
+ continue;
+ }
+
+ if (!indexMetaData.isSameUUID(shardRoutingEntry.indexUUID)) {
+ logger.debug("{} ignoring shard started, different index uuid, current {}, got {}", shardRouting.shardId(), indexMetaData.getUUID(), shardRoutingEntry);
+ continue;
+ }
+
+ // find the one that maps to us, if its already started, no need to do anything...
+ // the shard might already be started since the nodes that is starting the shards might get cluster events
+ // with the shard still initializing, and it will try and start it again (until the verification comes)
+
+ IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardRouting.id());
+
+ boolean applyShardEvent = true;
+
+ for (ShardRouting entry : indexShardRoutingTable) {
+ if (shardRouting.currentNodeId().equals(entry.currentNodeId())) {
+ // we found the same shard that exists on the same node id
+ if (!entry.initializing()) {
+ // shard is in initialized state, skipping event (probable already started)
+ logger.debug("{} ignoring shard started event for {}, current state: {}", shardRouting.shardId(), shardRoutingEntry, entry.state());
+ applyShardEvent = false;
+ }
+ }
+ }
+
+ if (applyShardEvent) {
+ shardRoutingToBeApplied.add(shardRouting);
+ logger.debug("{} will apply shard started {}", shardRouting.shardId(), shardRoutingEntry);
+ }
+
+ } catch (Throwable t) {
+ logger.error("{} unexpected failure while processing shard started [{}]", t, shardRouting.shardId(), shardRouting);
+ }
+ }
+
+ if (shardRoutingToBeApplied.isEmpty()) {
+ return currentState;
+ }
+
+ RoutingAllocation.Result routingResult = allocationService.applyStartedShards(currentState, shardRoutingToBeApplied, true);
+ if (!routingResult.changed()) {
+ return currentState;
+ }
+ return ClusterState.builder(currentState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+ });
+ }
+
+ private class ShardFailedTransportHandler extends BaseTransportRequestHandler<ShardRoutingEntry> {
+
+ static final String ACTION = "cluster/shardFailure";
+
+ @Override
+ public ShardRoutingEntry newInstance() {
+ return new ShardRoutingEntry();
+ }
+
+ @Override
+ public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
+ innerShardFailed(request);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ class ShardStartedTransportHandler extends BaseTransportRequestHandler<ShardRoutingEntry> {
+
+ static final String ACTION = "cluster/shardStarted";
+
+ @Override
+ public ShardRoutingEntry newInstance() {
+ return new ShardRoutingEntry();
+ }
+
+ @Override
+ public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
+ innerShardStarted(request);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ static class ShardRoutingEntry extends TransportRequest {
+
+ private ShardRouting shardRouting;
+
+ private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
+
+ private String reason;
+
+ volatile boolean processed; // state field, no need to serialize
+
+ private ShardRoutingEntry() {
+ }
+
+ private ShardRoutingEntry(ShardRouting shardRouting, String indexUUID, String reason) {
+ this.shardRouting = shardRouting;
+ this.reason = reason;
+ this.indexUUID = indexUUID;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardRouting = readShardRoutingEntry(in);
+ reason = in.readString();
+ indexUUID = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardRouting.writeTo(out);
+ out.writeString(reason);
+ out.writeString(indexUUID);
+ }
+
+ @Override
+ public String toString() {
+ return "" + shardRouting + ", indexUUID [" + indexUUID + "], reason [" + reason + "]";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java
new file mode 100644
index 0000000..7e76344
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.block;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Locale;
+
+/**
+ *
+ */
+public class ClusterBlock implements Serializable, Streamable, ToXContent {
+
+ private int id;
+
+ private String description;
+
+ private ClusterBlockLevel[] levels;
+
+ private boolean retryable;
+
+ private boolean disableStatePersistence = false;
+
+ private RestStatus status;
+
+ ClusterBlock() {
+ }
+
+ public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, RestStatus status, ClusterBlockLevel... levels) {
+ this.id = id;
+ this.description = description;
+ this.retryable = retryable;
+ this.disableStatePersistence = disableStatePersistence;
+ this.status = status;
+ this.levels = levels;
+ }
+
+ public int id() {
+ return this.id;
+ }
+
+ public String description() {
+ return this.description;
+ }
+
+ public RestStatus status() {
+ return this.status;
+ }
+
+ public ClusterBlockLevel[] levels() {
+ return this.levels;
+ }
+
+ public boolean contains(ClusterBlockLevel level) {
+ for (ClusterBlockLevel testLevel : levels) {
+ if (testLevel == level) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Should operations get into retry state if this block is present.
+ */
+ public boolean retryable() {
+ return this.retryable;
+ }
+
+ /**
+ * Should global state persistence be disabled when this block is present. Note,
+ * only relevant for global blocks.
+ */
+ public boolean disableStatePersistence() {
+ return this.disableStatePersistence;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Integer.toString(id));
+ builder.field("description", description);
+ builder.field("retryable", retryable);
+ if (disableStatePersistence) {
+ builder.field("disable_state_persistence", disableStatePersistence);
+ }
+ builder.startArray("levels");
+ for (ClusterBlockLevel level : levels) {
+ builder.value(level.name().toLowerCase(Locale.ROOT));
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static ClusterBlock readClusterBlock(StreamInput in) throws IOException {
+ ClusterBlock block = new ClusterBlock();
+ block.readFrom(in);
+ return block;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ id = in.readVInt();
+ description = in.readString();
+ levels = new ClusterBlockLevel[in.readVInt()];
+ for (int i = 0; i < levels.length; i++) {
+ levels[i] = ClusterBlockLevel.fromId(in.readVInt());
+ }
+ retryable = in.readBoolean();
+ disableStatePersistence = in.readBoolean();
+ status = RestStatus.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(id);
+ out.writeString(description);
+ out.writeVInt(levels.length);
+ for (ClusterBlockLevel level : levels) {
+ out.writeVInt(level.id());
+ }
+ out.writeBoolean(retryable);
+ out.writeBoolean(disableStatePersistence);
+ RestStatus.writeTo(out, status);
+ }
+
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(id).append(",").append(description).append(", blocks ");
+ for (ClusterBlockLevel level : levels) {
+ sb.append(level.name()).append(",");
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ClusterBlock that = (ClusterBlock) o;
+
+ if (id != that.id) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return id;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java
new file mode 100644
index 0000000..5627353
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.block;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class ClusterBlockException extends ElasticsearchException {
+
+ private final ImmutableSet<ClusterBlock> blocks;
+
+ public ClusterBlockException(ImmutableSet<ClusterBlock> blocks) {
+ super(buildMessage(blocks));
+ this.blocks = blocks;
+ }
+
+ public boolean retryable() {
+ for (ClusterBlock block : blocks) {
+ if (!block.retryable()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public ImmutableSet<ClusterBlock> blocks() {
+ return blocks;
+ }
+
+ private static String buildMessage(ImmutableSet<ClusterBlock> blocks) {
+ StringBuilder sb = new StringBuilder("blocked by: ");
+ for (ClusterBlock block : blocks) {
+ sb.append("[").append(block.status()).append("/").append(block.id()).append("/").append(block.description()).append("];");
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public RestStatus status() {
+ RestStatus status = null;
+ for (ClusterBlock block : blocks) {
+ if (status == null) {
+ status = block.status();
+ } else if (status.getStatus() < block.status().getStatus()) {
+ status = block.status();
+ }
+ }
+ return status;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java
new file mode 100644
index 0000000..b5c426e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.block;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ *
+ */
+public enum ClusterBlockLevel {
+ READ(0),
+ WRITE(1),
+ METADATA(2);
+
+ public static final ClusterBlockLevel[] ALL = new ClusterBlockLevel[]{READ, WRITE, METADATA};
+ public static final ClusterBlockLevel[] READ_WRITE = new ClusterBlockLevel[]{READ, WRITE};
+
+ private final int id;
+
+ ClusterBlockLevel(int id) {
+ this.id = id;
+ }
+
+ public int id() {
+ return this.id;
+ }
+
+ public static ClusterBlockLevel fromId(int id) {
+ if (id == 0) {
+ return READ;
+ } else if (id == 1) {
+ return WRITE;
+ } else if (id == 2) {
+ return METADATA;
+ }
+ throw new ElasticsearchIllegalArgumentException("No cluster block level matching [" + id + "]");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java
new file mode 100644
index 0000000..957bd40
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java
@@ -0,0 +1,337 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.block;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Represents current cluster level blocks to block dirty operations done against the cluster.
+ */
+public class ClusterBlocks {
+
+ public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(ImmutableSet.<ClusterBlock>of(), ImmutableMap.<String, ImmutableSet<ClusterBlock>>of());
+
+ private final ImmutableSet<ClusterBlock> global;
+
+ private final ImmutableMap<String, ImmutableSet<ClusterBlock>> indicesBlocks;
+
+ private final ImmutableLevelHolder[] levelHolders;
+
+ ClusterBlocks(ImmutableSet<ClusterBlock> global, ImmutableMap<String, ImmutableSet<ClusterBlock>> indicesBlocks) {
+ this.global = global;
+ this.indicesBlocks = indicesBlocks;
+
+ levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length];
+ for (ClusterBlockLevel level : ClusterBlockLevel.values()) {
+ ImmutableSet.Builder<ClusterBlock> globalBuilder = ImmutableSet.builder();
+ for (ClusterBlock block : global) {
+ if (block.contains(level)) {
+ globalBuilder.add(block);
+ }
+ }
+
+
+ ImmutableMap.Builder<String, ImmutableSet<ClusterBlock>> indicesBuilder = ImmutableMap.builder();
+ for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : indicesBlocks.entrySet()) {
+ ImmutableSet.Builder<ClusterBlock> indexBuilder = ImmutableSet.builder();
+ for (ClusterBlock block : entry.getValue()) {
+ if (block.contains(level)) {
+ indexBuilder.add(block);
+ }
+ }
+
+ indicesBuilder.put(entry.getKey(), indexBuilder.build());
+ }
+
+ levelHolders[level.id()] = new ImmutableLevelHolder(globalBuilder.build(), indicesBuilder.build());
+ }
+ }
+
+ public ImmutableSet<ClusterBlock> global() {
+ return global;
+ }
+
+ public ImmutableMap<String, ImmutableSet<ClusterBlock>> indices() {
+ return indicesBlocks;
+ }
+
+ public ImmutableSet<ClusterBlock> global(ClusterBlockLevel level) {
+ return levelHolders[level.id()].global();
+ }
+
+ public ImmutableMap<String, ImmutableSet<ClusterBlock>> indices(ClusterBlockLevel level) {
+ return levelHolders[level.id()].indices();
+ }
+
+ /**
+ * Returns <tt>true</tt> if one of the global blocks as its disable state persistence flag set.
+ */
+ public boolean disableStatePersistence() {
+ for (ClusterBlock clusterBlock : global) {
+ if (clusterBlock.disableStatePersistence()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public boolean hasGlobalBlock(ClusterBlock block) {
+ return global.contains(block);
+ }
+
+ /**
+ * Is there a global block with the provided status?
+ */
+ public boolean hasGlobalBlock(RestStatus status) {
+ for (ClusterBlock clusterBlock : global) {
+ if (clusterBlock.status().equals(status)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public boolean hasIndexBlock(String index, ClusterBlock block) {
+ return indicesBlocks.containsKey(index) && indicesBlocks.get(index).contains(block);
+ }
+
+ public void globalBlockedRaiseException(ClusterBlockLevel level) throws ClusterBlockException {
+ ClusterBlockException blockException = globalBlockedException(level);
+ if (blockException != null) {
+ throw blockException;
+ }
+ }
+
+ public ClusterBlockException globalBlockedException(ClusterBlockLevel level) {
+ if (global(level).isEmpty()) {
+ return null;
+ }
+ return new ClusterBlockException(ImmutableSet.copyOf(global(level)));
+ }
+
+ public void indexBlockedRaiseException(ClusterBlockLevel level, String index) throws ClusterBlockException {
+ ClusterBlockException blockException = indexBlockedException(level, index);
+ if (blockException != null) {
+ throw blockException;
+ }
+ }
+
+ public ClusterBlockException indexBlockedException(ClusterBlockLevel level, String index) {
+ if (!indexBlocked(level, index)) {
+ return null;
+ }
+ ImmutableSet.Builder<ClusterBlock> builder = ImmutableSet.builder();
+ builder.addAll(global(level));
+ ImmutableSet<ClusterBlock> indexBlocks = indices(level).get(index);
+ if (indexBlocks != null) {
+ builder.addAll(indexBlocks);
+ }
+ return new ClusterBlockException(builder.build());
+ }
+
+ public boolean indexBlocked(ClusterBlockLevel level, String index) {
+ if (!global(level).isEmpty()) {
+ return true;
+ }
+ ImmutableSet<ClusterBlock> indexBlocks = indices(level).get(index);
+ if (indexBlocks != null && !indexBlocks.isEmpty()) {
+ return true;
+ }
+ return false;
+ }
+
+ public ClusterBlockException indicesBlockedException(ClusterBlockLevel level, String[] indices) {
+ boolean indexIsBlocked = false;
+ for (String index : indices) {
+ if (indexBlocked(level, index)) {
+ indexIsBlocked = true;
+ }
+ }
+ if (!indexIsBlocked) {
+ return null;
+ }
+ ImmutableSet.Builder<ClusterBlock> builder = ImmutableSet.builder();
+ builder.addAll(global(level));
+ for (String index : indices) {
+ ImmutableSet<ClusterBlock> indexBlocks = indices(level).get(index);
+ if (indexBlocks != null) {
+ builder.addAll(indexBlocks);
+ }
+ }
+ return new ClusterBlockException(builder.build());
+ }
+
+ static class ImmutableLevelHolder {
+
+ static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(ImmutableSet.<ClusterBlock>of(), ImmutableMap.<String, ImmutableSet<ClusterBlock>>of());
+
+ private final ImmutableSet<ClusterBlock> global;
+ private final ImmutableMap<String, ImmutableSet<ClusterBlock>> indices;
+
+ ImmutableLevelHolder(ImmutableSet<ClusterBlock> global, ImmutableMap<String, ImmutableSet<ClusterBlock>> indices) {
+ this.global = global;
+ this.indices = indices;
+ }
+
+ public ImmutableSet<ClusterBlock> global() {
+ return global;
+ }
+
+ public ImmutableMap<String, ImmutableSet<ClusterBlock>> indices() {
+ return indices;
+ }
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public static class Builder {
+
+ private Set<ClusterBlock> global = Sets.newHashSet();
+
+ private Map<String, Set<ClusterBlock>> indices = Maps.newHashMap();
+
+ public Builder() {
+ }
+
+ public Builder blocks(ClusterBlocks blocks) {
+ global.addAll(blocks.global());
+ for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks.indices().entrySet()) {
+ if (!indices.containsKey(entry.getKey())) {
+ indices.put(entry.getKey(), Sets.<ClusterBlock>newHashSet());
+ }
+ indices.get(entry.getKey()).addAll(entry.getValue());
+ }
+ return this;
+ }
+
+ public Builder addBlocks(IndexMetaData indexMetaData) {
+ if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
+ addIndexBlock(indexMetaData.index(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
+ }
+ if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_READ_ONLY, false)) {
+ addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
+ }
+ if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, false)) {
+ addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_BLOCK);
+ }
+ if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, false)) {
+ addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_WRITE_BLOCK);
+ }
+ if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, false)) {
+ addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_METADATA_BLOCK);
+ }
+ return this;
+ }
+
+ public Builder addGlobalBlock(ClusterBlock block) {
+ global.add(block);
+ return this;
+ }
+
+ public Builder removeGlobalBlock(ClusterBlock block) {
+ global.remove(block);
+ return this;
+ }
+
+ public Builder addIndexBlock(String index, ClusterBlock block) {
+ if (!indices.containsKey(index)) {
+ indices.put(index, Sets.<ClusterBlock>newHashSet());
+ }
+ indices.get(index).add(block);
+ return this;
+ }
+
+ public Builder removeIndexBlocks(String index) {
+ if (!indices.containsKey(index)) {
+ return this;
+ }
+ indices.remove(index);
+ return this;
+ }
+
+ public Builder removeIndexBlock(String index, ClusterBlock block) {
+ if (!indices.containsKey(index)) {
+ return this;
+ }
+ indices.get(index).remove(block);
+ if (indices.get(index).isEmpty()) {
+ indices.remove(index);
+ }
+ return this;
+ }
+
+ public ClusterBlocks build() {
+ ImmutableMap.Builder<String, ImmutableSet<ClusterBlock>> indicesBuilder = ImmutableMap.builder();
+ for (Map.Entry<String, Set<ClusterBlock>> entry : indices.entrySet()) {
+ indicesBuilder.put(entry.getKey(), ImmutableSet.copyOf(entry.getValue()));
+ }
+ return new ClusterBlocks(ImmutableSet.copyOf(global), indicesBuilder.build());
+ }
+
+ public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException {
+ ImmutableSet<ClusterBlock> global = readBlockSet(in);
+ ImmutableMap.Builder<String, ImmutableSet<ClusterBlock>> indicesBuilder = ImmutableMap.builder();
+ int size = in.readVInt();
+ for (int j = 0; j < size; j++) {
+ indicesBuilder.put(in.readString().intern(), readBlockSet(in));
+ }
+ return new ClusterBlocks(global, indicesBuilder.build());
+ }
+
+ public static void writeClusterBlocks(ClusterBlocks blocks, StreamOutput out) throws IOException {
+ writeBlockSet(blocks.global(), out);
+ out.writeVInt(blocks.indices().size());
+ for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks.indices().entrySet()) {
+ out.writeString(entry.getKey());
+ writeBlockSet(entry.getValue(), out);
+ }
+ }
+
+ private static void writeBlockSet(ImmutableSet<ClusterBlock> blocks, StreamOutput out) throws IOException {
+ out.writeVInt(blocks.size());
+ for (ClusterBlock block : blocks) {
+ block.writeTo(out);
+ }
+ }
+
+ private static ImmutableSet<ClusterBlock> readBlockSet(StreamInput in) throws IOException {
+ ImmutableSet.Builder<ClusterBlock> builder = ImmutableSet.builder();
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ builder.add(ClusterBlock.readClusterBlock(in));
+ }
+ return builder.build();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java
new file mode 100644
index 0000000..29bcba3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.query.FilterBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class AliasAction implements Streamable {
+
+ public static enum Type {
+ ADD((byte) 0),
+ REMOVE((byte) 1);
+
+ private final byte value;
+
+ Type(byte value) {
+ this.value = value;
+ }
+
+ public byte value() {
+ return value;
+ }
+
+ public static Type fromValue(byte value) {
+ if (value == 0) {
+ return ADD;
+ } else if (value == 1) {
+ return REMOVE;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("No type for action [" + value + "]");
+ }
+ }
+ }
+
+ private Type actionType;
+
+ private String index;
+
+ private String alias;
+
+ @Nullable
+ private String filter;
+
+ @Nullable
+ private String indexRouting;
+
+ @Nullable
+ private String searchRouting;
+
+ private AliasAction() {
+
+ }
+
+ public AliasAction(AliasAction other) {
+ this.actionType = other.actionType;
+ this.index = other.index;
+ this.alias = other.alias;
+ this.filter = other.filter;
+ this.indexRouting = other.indexRouting;
+ this.searchRouting = other.searchRouting;
+ }
+
+ public AliasAction(Type actionType) {
+ this.actionType = actionType;
+ }
+
+ public AliasAction(Type actionType, String index, String alias) {
+ this.actionType = actionType;
+ this.index = index;
+ this.alias = alias;
+ }
+
+ public AliasAction(Type actionType, String index, String alias, String filter) {
+ this.actionType = actionType;
+ this.index = index;
+ this.alias = alias;
+ this.filter = filter;
+ }
+
+ public Type actionType() {
+ return actionType;
+ }
+
+ public AliasAction index(String index) {
+ this.index = index;
+ return this;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public AliasAction alias(String alias) {
+ this.alias = alias;
+ return this;
+ }
+
+ public String alias() {
+ return alias;
+ }
+
+ public String filter() {
+ return filter;
+ }
+
+ public AliasAction filter(String filter) {
+ this.filter = filter;
+ return this;
+ }
+
+ public AliasAction filter(Map<String, Object> filter) {
+ if (filter == null || filter.isEmpty()) {
+ this.filter = null;
+ return this;
+ }
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.map(filter);
+ this.filter = builder.string();
+ return this;
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + filter + "]", e);
+ }
+ }
+
+ public AliasAction filter(FilterBuilder filterBuilder) {
+ if (filterBuilder == null) {
+ this.filter = null;
+ return this;
+ }
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.close();
+ this.filter = builder.string();
+ return this;
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
+ }
+ }
+
+ public AliasAction routing(String routing) {
+ this.indexRouting = routing;
+ this.searchRouting = routing;
+ return this;
+ }
+
+ public String indexRouting() {
+ return indexRouting;
+ }
+
+ public AliasAction indexRouting(String indexRouting) {
+ this.indexRouting = indexRouting;
+ return this;
+ }
+
+ public String searchRouting() {
+ return searchRouting;
+ }
+
+ public AliasAction searchRouting(String searchRouting) {
+ this.searchRouting = searchRouting;
+ return this;
+ }
+
+ public static AliasAction readAliasAction(StreamInput in) throws IOException {
+ AliasAction aliasAction = new AliasAction();
+ aliasAction.readFrom(in);
+ return aliasAction;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ actionType = Type.fromValue(in.readByte());
+ index = in.readOptionalString();
+ alias = in.readOptionalString();
+ filter = in.readOptionalString();
+ indexRouting = in.readOptionalString();
+ searchRouting = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeByte(actionType.value());
+ out.writeOptionalString(index);
+ out.writeOptionalString(alias);
+ out.writeOptionalString(filter);
+ out.writeOptionalString(indexRouting);
+ out.writeOptionalString(searchRouting);
+ }
+
+ public static AliasAction newAddAliasAction(String index, String alias) {
+ return new AliasAction(Type.ADD, index, alias);
+ }
+
+ public static AliasAction newRemoveAliasAction(String index, String alias) {
+ return new AliasAction(Type.REMOVE, index, alias);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java
new file mode 100644
index 0000000..cc13cd8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java
@@ -0,0 +1,330 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public class AliasMetaData {
+
+ private final String alias;
+
+ private final CompressedString filter;
+
+ private final String indexRouting;
+
+ private final String searchRouting;
+
+ private final Set<String> searchRoutingValues;
+
+ private AliasMetaData(String alias, CompressedString filter, String indexRouting, String searchRouting) {
+ this.alias = alias;
+ this.filter = filter;
+ this.indexRouting = indexRouting;
+ this.searchRouting = searchRouting;
+ if (searchRouting != null) {
+ searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting));
+ } else {
+ searchRoutingValues = ImmutableSet.of();
+ }
+ }
+
+ public String alias() {
+ return alias;
+ }
+
+ public String getAlias() {
+ return alias();
+ }
+
+ public CompressedString filter() {
+ return filter;
+ }
+
+ public CompressedString getFilter() {
+ return filter();
+ }
+
+ public boolean filteringRequired() {
+ return filter != null;
+ }
+
+ public String getSearchRouting() {
+ return searchRouting();
+ }
+
+ public String searchRouting() {
+ return searchRouting;
+ }
+
+ public String getIndexRouting() {
+ return indexRouting();
+ }
+
+ public String indexRouting() {
+ return indexRouting;
+ }
+
+ public Set<String> searchRoutingValues() {
+ return searchRoutingValues;
+ }
+
+ public static Builder builder(String alias) {
+ return new Builder(alias);
+ }
+
+ public static Builder newAliasMetaDataBuilder(String alias) {
+ return new Builder(alias);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ AliasMetaData that = (AliasMetaData) o;
+
+ if (alias != null ? !alias.equals(that.alias) : that.alias != null) return false;
+ if (filter != null ? !filter.equals(that.filter) : that.filter != null) return false;
+ if (indexRouting != null ? !indexRouting.equals(that.indexRouting) : that.indexRouting != null) return false;
+ if (searchRouting != null ? !searchRouting.equals(that.searchRouting) : that.searchRouting != null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = alias != null ? alias.hashCode() : 0;
+ result = 31 * result + (filter != null ? filter.hashCode() : 0);
+ result = 31 * result + (indexRouting != null ? indexRouting.hashCode() : 0);
+ result = 31 * result + (searchRouting != null ? searchRouting.hashCode() : 0);
+ return result;
+ }
+
+ public static class Builder {
+
+ private String alias;
+
+ private CompressedString filter;
+
+ private String indexRouting;
+
+ private String searchRouting;
+
+
+ public Builder(String alias) {
+ this.alias = alias;
+ }
+
+ public Builder(AliasMetaData aliasMetaData) {
+ this(aliasMetaData.alias());
+ filter = aliasMetaData.filter();
+ indexRouting = aliasMetaData.indexRouting();
+ searchRouting = aliasMetaData.searchRouting();
+ }
+
+ public String alias() {
+ return alias;
+ }
+
+ public Builder filter(CompressedString filter) {
+ this.filter = filter;
+ return this;
+ }
+
+ public Builder filter(String filter) {
+ if (!Strings.hasLength(filter)) {
+ this.filter = null;
+ return this;
+ }
+ try {
+ XContentParser parser = XContentFactory.xContent(filter).createParser(filter);
+ try {
+ filter(parser.mapOrdered());
+ } finally {
+ parser.close();
+ }
+ return this;
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + filter + "]", e);
+ }
+ }
+
+ public Builder filter(Map<String, Object> filter) {
+ if (filter == null || filter.isEmpty()) {
+ this.filter = null;
+ return this;
+ }
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().map(filter);
+ this.filter = new CompressedString(builder.bytes());
+ return this;
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
+ }
+ }
+
+ public Builder filter(XContentBuilder filterBuilder) {
+ try {
+ return filter(filterBuilder.string());
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
+ }
+ }
+
+ public Builder routing(String routing) {
+ this.indexRouting = routing;
+ this.searchRouting = routing;
+ return this;
+ }
+
+ public Builder indexRouting(String indexRouting) {
+ this.indexRouting = indexRouting;
+ return this;
+ }
+
+ public Builder searchRouting(String searchRouting) {
+ this.searchRouting = searchRouting;
+ return this;
+ }
+
+ public AliasMetaData build() {
+ return new AliasMetaData(alias, filter, indexRouting, searchRouting);
+ }
+
+ public static void toXContent(AliasMetaData aliasMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject(aliasMetaData.alias(), XContentBuilder.FieldCaseConversion.NONE);
+
+ boolean binary = params.paramAsBoolean("binary", false);
+
+ if (aliasMetaData.filter() != null) {
+ if (binary) {
+ builder.field("filter", aliasMetaData.filter.compressed());
+ } else {
+ byte[] data = aliasMetaData.filter().uncompressed();
+ XContentParser parser = XContentFactory.xContent(data).createParser(data);
+ Map<String, Object> filter = parser.mapOrdered();
+ parser.close();
+ builder.field("filter", filter);
+ }
+ }
+ if (aliasMetaData.indexRouting() != null) {
+ builder.field("index_routing", aliasMetaData.indexRouting());
+ }
+ if (aliasMetaData.searchRouting() != null) {
+ builder.field("search_routing", aliasMetaData.searchRouting());
+ }
+
+ builder.endObject();
+ }
+
+ public static AliasMetaData fromXContent(XContentParser parser) throws IOException {
+ Builder builder = new Builder(parser.currentName());
+
+ String currentFieldName = null;
+ XContentParser.Token token = parser.nextToken();
+ if (token == null) {
+ // no data...
+ return builder.build();
+ }
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("filter".equals(currentFieldName)) {
+ Map<String, Object> filter = parser.mapOrdered();
+ builder.filter(filter);
+ }
+ } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
+ if ("filter".equals(currentFieldName)) {
+ builder.filter(new CompressedString(parser.binaryValue()));
+ }
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("routing".equals(currentFieldName)) {
+ builder.routing(parser.text());
+ } else if ("index_routing".equals(currentFieldName) || "indexRouting".equals(currentFieldName)) {
+ builder.indexRouting(parser.text());
+ } else if ("search_routing".equals(currentFieldName) || "searchRouting".equals(currentFieldName)) {
+ builder.searchRouting(parser.text());
+ }
+ }
+ }
+ return builder.build();
+ }
+
+ public static void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException {
+ out.writeString(aliasMetaData.alias());
+ if (aliasMetaData.filter() != null) {
+ out.writeBoolean(true);
+ aliasMetaData.filter.writeTo(out);
+ } else {
+ out.writeBoolean(false);
+ }
+ if (aliasMetaData.indexRouting() != null) {
+ out.writeBoolean(true);
+ out.writeString(aliasMetaData.indexRouting());
+ } else {
+ out.writeBoolean(false);
+ }
+ if (aliasMetaData.searchRouting() != null) {
+ out.writeBoolean(true);
+ out.writeString(aliasMetaData.searchRouting());
+ } else {
+ out.writeBoolean(false);
+ }
+
+ }
+
+ public static AliasMetaData readFrom(StreamInput in) throws IOException {
+ String alias = in.readString();
+ CompressedString filter = null;
+ if (in.readBoolean()) {
+ filter = CompressedString.readCompressedString(in);
+ }
+ String indexRouting = null;
+ if (in.readBoolean()) {
+ indexRouting = in.readString();
+ }
+ String searchRouting = null;
+ if (in.readBoolean()) {
+ searchRouting = in.readString();
+ }
+ return new AliasMetaData(alias, filter, indexRouting, searchRouting);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
new file mode 100644
index 0000000..9faf007
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
@@ -0,0 +1,715 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Preconditions;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.loader.SettingsLoader;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
+import static org.elasticsearch.common.settings.ImmutableSettings.*;
+
+/**
+ *
+ */
+public class IndexMetaData {
+
+
+ public interface Custom {
+
+ String type();
+
+ interface Factory<T extends Custom> {
+
+ String type();
+
+ T readFrom(StreamInput in) throws IOException;
+
+ void writeTo(T customIndexMetaData, StreamOutput out) throws IOException;
+
+ T fromMap(Map<String, Object> map) throws IOException;
+
+ T fromXContent(XContentParser parser) throws IOException;
+
+ void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException;
+
+ /**
+ * Merges from first to second, with first being more important, i.e., if something exists in first and second,
+ * first will prevail.
+ */
+ T merge(T first, T second);
+ }
+ }
+
+ public static Map<String, Custom.Factory> customFactories = new HashMap<String, Custom.Factory>();
+
+ static {
+ // register non plugin custom metadata
+ registerFactory(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.FACTORY);
+ }
+
+ /**
+ * Register a custom index meta data factory. Make sure to call it from a static block.
+ */
+ public static void registerFactory(String type, Custom.Factory factory) {
+ customFactories.put(type, factory);
+ }
+
+ @Nullable
+ public static <T extends Custom> Custom.Factory<T> lookupFactory(String type) {
+ return customFactories.get(type);
+ }
+
+ public static <T extends Custom> Custom.Factory<T> lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException {
+ Custom.Factory<T> factory = customFactories.get(type);
+ if (factory == null) {
+ throw new ElasticsearchIllegalArgumentException("No custom index metadata factoy registered for type [" + type + "]");
+ }
+ return factory;
+ }
+
+ public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA);
+ public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ);
+ public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.WRITE);
+ public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.METADATA);
+
+ public static enum State {
+ OPEN((byte) 0),
+ CLOSE((byte) 1);
+
+ private final byte id;
+
+ State(byte id) {
+ this.id = id;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ public static State fromId(byte id) {
+ if (id == 0) {
+ return OPEN;
+ } else if (id == 1) {
+ return CLOSE;
+ }
+ throw new ElasticsearchIllegalStateException("No state match for id [" + id + "]");
+ }
+
+ public static State fromString(String state) {
+ if ("open".equals(state)) {
+ return OPEN;
+ } else if ("close".equals(state)) {
+ return CLOSE;
+ }
+ throw new ElasticsearchIllegalStateException("No state match for [" + state + "]");
+ }
+ }
+
+ public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards";
+ public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
+ public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas";
+ public static final String SETTING_READ_ONLY = "index.blocks.read_only";
+ public static final String SETTING_BLOCKS_READ = "index.blocks.read";
+ public static final String SETTING_BLOCKS_WRITE = "index.blocks.write";
+ public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata";
+ public static final String SETTING_VERSION_CREATED = "index.version.created";
+ public static final String SETTING_UUID = "index.uuid";
+ public static final String INDEX_UUID_NA_VALUE = "_na_";
+
+ private final String index;
+ private final long version;
+
+ private final State state;
+
+ private final ImmutableOpenMap<String, AliasMetaData> aliases;
+
+ private final Settings settings;
+
+ private final ImmutableOpenMap<String, MappingMetaData> mappings;
+
+ private final ImmutableOpenMap<String, Custom> customs;
+
+ private transient final int totalNumberOfShards;
+
+ private final DiscoveryNodeFilters requireFilters;
+ private final DiscoveryNodeFilters includeFilters;
+ private final DiscoveryNodeFilters excludeFilters;
+
+ private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) {
+ Preconditions.checkArgument(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1) != -1, "must specify numberOfShards for index [" + index + "]");
+ Preconditions.checkArgument(settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1) != -1, "must specify numberOfReplicas for index [" + index + "]");
+ this.index = index;
+ this.version = version;
+ this.state = state;
+ this.settings = settings;
+ this.mappings = mappings;
+ this.customs = customs;
+ this.totalNumberOfShards = numberOfShards() * (numberOfReplicas() + 1);
+
+ this.aliases = aliases;
+
+ ImmutableMap<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
+ if (requireMap.isEmpty()) {
+ requireFilters = null;
+ } else {
+ requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
+ }
+ ImmutableMap<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
+ if (includeMap.isEmpty()) {
+ includeFilters = null;
+ } else {
+ includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
+ }
+ ImmutableMap<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
+ if (excludeMap.isEmpty()) {
+ excludeFilters = null;
+ } else {
+ excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
+ }
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public String getIndex() {
+ return index();
+ }
+
+ public String uuid() {
+ return settings.get(SETTING_UUID, INDEX_UUID_NA_VALUE);
+ }
+
+ public String getUUID() {
+ return uuid();
+ }
+
+ /**
+ * Test whether the current index UUID is the same as the given one. Returns true if either are _na_
+ */
+ public boolean isSameUUID(String otherUUID) {
+ assert otherUUID != null;
+ assert uuid() != null;
+ if (INDEX_UUID_NA_VALUE.equals(otherUUID) || INDEX_UUID_NA_VALUE.equals(uuid())) {
+ return true;
+ }
+ return otherUUID.equals(getUUID());
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ public long getVersion() {
+ return this.version;
+ }
+
+ public State state() {
+ return this.state;
+ }
+
+ public State getState() {
+ return state();
+ }
+
+ public int numberOfShards() {
+ return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1);
+ }
+
+ public int getNumberOfShards() {
+ return numberOfShards();
+ }
+
+ public int numberOfReplicas() {
+ return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
+ }
+
+ public int getNumberOfReplicas() {
+ return numberOfReplicas();
+ }
+
+ public int totalNumberOfShards() {
+ return totalNumberOfShards;
+ }
+
+ public int getTotalNumberOfShards() {
+ return totalNumberOfShards();
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+
+ public Settings getSettings() {
+ return settings();
+ }
+
+ public ImmutableOpenMap<String, AliasMetaData> aliases() {
+ return this.aliases;
+ }
+
+ public ImmutableOpenMap<String, AliasMetaData> getAliases() {
+ return aliases();
+ }
+
+ public ImmutableOpenMap<String, MappingMetaData> mappings() {
+ return mappings;
+ }
+
+ public ImmutableOpenMap<String, MappingMetaData> getMappings() {
+ return mappings();
+ }
+
+ @Nullable
+ public MappingMetaData mapping(String mappingType) {
+ return mappings.get(mappingType);
+ }
+
+ /**
+ * Sometimes, the default mapping exists and an actual mapping is not created yet (introduced),
+ * in this case, we want to return the default mapping in case it has some default mapping definitions.
+ * <p/>
+ * Note, once the mapping type is introduced, the default mapping is applied on the actual typed MappingMetaData,
+ * setting its routing, timestamp, and so on if needed.
+ */
+ @Nullable
+ public MappingMetaData mappingOrDefault(String mappingType) {
+ MappingMetaData mapping = mappings.get(mappingType);
+ if (mapping != null) {
+ return mapping;
+ }
+ return mappings.get(MapperService.DEFAULT_MAPPING);
+ }
+
+ public ImmutableOpenMap<String, Custom> customs() {
+ return this.customs;
+ }
+
+ public ImmutableOpenMap<String, Custom> getCustoms() {
+ return this.customs;
+ }
+
+ public <T extends Custom> T custom(String type) {
+ return (T) customs.get(type);
+ }
+
+ @Nullable
+ public DiscoveryNodeFilters requireFilters() {
+ return requireFilters;
+ }
+
+ @Nullable
+ public DiscoveryNodeFilters includeFilters() {
+ return includeFilters;
+ }
+
+ @Nullable
+ public DiscoveryNodeFilters excludeFilters() {
+ return excludeFilters;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ IndexMetaData that = (IndexMetaData) o;
+
+ if (!aliases.equals(that.aliases)) {
+ return false;
+ }
+ if (!index.equals(that.index)) {
+ return false;
+ }
+ if (!mappings.equals(that.mappings)) {
+ return false;
+ }
+ if (!settings.equals(that.settings)) {
+ return false;
+ }
+ if (state != that.state) {
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = index.hashCode();
+ result = 31 * result + state.hashCode();
+ result = 31 * result + aliases.hashCode();
+ result = 31 * result + settings.hashCode();
+ result = 31 * result + mappings.hashCode();
+ return result;
+ }
+
+ public static Builder builder(String index) {
+ return new Builder(index);
+ }
+
+ public static Builder builder(IndexMetaData indexMetaData) {
+ return new Builder(indexMetaData);
+ }
+
+ public static class Builder {
+
+ private String index;
+ private State state = State.OPEN;
+ private long version = 1;
+ private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
+ private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
+ private final ImmutableOpenMap.Builder<String, Custom> customs;
+
+ public Builder(String index) {
+ this.index = index;
+ this.mappings = ImmutableOpenMap.builder();
+ this.aliases = ImmutableOpenMap.builder();
+ this.customs = ImmutableOpenMap.builder();
+ }
+
+ public Builder(IndexMetaData indexMetaData) {
+ this.index = indexMetaData.index();
+ this.state = indexMetaData.state;
+ this.version = indexMetaData.version;
+ this.settings = indexMetaData.settings();
+ this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
+ this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
+ this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public Builder index(String index) {
+ this.index = index;
+ return this;
+ }
+
+ public Builder numberOfShards(int numberOfShards) {
+ settings = settingsBuilder().put(settings).put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
+ return this;
+ }
+
+ public int numberOfShards() {
+ return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1);
+ }
+
+ public Builder numberOfReplicas(int numberOfReplicas) {
+ settings = settingsBuilder().put(settings).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
+ return this;
+ }
+
+ public int numberOfReplicas() {
+ return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
+ }
+
+ public Builder settings(Settings.Builder settings) {
+ this.settings = settings.build();
+ return this;
+ }
+
+ public Builder settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public MappingMetaData mapping(String type) {
+ return mappings.get(type);
+ }
+
+ public Builder removeMapping(String mappingType) {
+ mappings.remove(mappingType);
+ return this;
+ }
+
+ public Builder putMapping(String type, String source) throws IOException {
+ XContentParser parser = XContentFactory.xContent(source).createParser(source);
+ try {
+ putMapping(new MappingMetaData(type, parser.mapOrdered()));
+ } finally {
+ parser.close();
+ }
+ return this;
+ }
+
+ public Builder putMapping(MappingMetaData mappingMd) {
+ mappings.put(mappingMd.type(), mappingMd);
+ return this;
+ }
+
+ public Builder state(State state) {
+ this.state = state;
+ return this;
+ }
+
+ public Builder putAlias(AliasMetaData aliasMetaData) {
+ aliases.put(aliasMetaData.alias(), aliasMetaData);
+ return this;
+ }
+
+ public Builder putAlias(AliasMetaData.Builder aliasMetaData) {
+ aliases.put(aliasMetaData.alias(), aliasMetaData.build());
+ return this;
+ }
+
+ public Builder removerAlias(String alias) {
+ aliases.remove(alias);
+ return this;
+ }
+
+ public Builder putCustom(String type, Custom customIndexMetaData) {
+ this.customs.put(type, customIndexMetaData);
+ return this;
+ }
+
+ public Builder removeCustom(String type) {
+ this.customs.remove(type);
+ return this;
+ }
+
+ public Custom getCustom(String type) {
+ return this.customs.get(type);
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ public Builder version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ public IndexMetaData build() {
+ ImmutableOpenMap.Builder<String, AliasMetaData> tmpAliases = aliases;
+ Settings tmpSettings = settings;
+
+ // For backward compatibility
+ String[] legacyAliases = settings.getAsArray("index.aliases");
+ if (legacyAliases.length > 0) {
+ tmpAliases = ImmutableOpenMap.builder();
+ for (String alias : legacyAliases) {
+ AliasMetaData aliasMd = AliasMetaData.newAliasMetaDataBuilder(alias).build();
+ tmpAliases.put(alias, aliasMd);
+ }
+ tmpAliases.putAll(aliases);
+ // Remove index.aliases from settings once they are migrated to the new data structure
+ tmpSettings = ImmutableSettings.settingsBuilder().put(settings).putArray("index.aliases").build();
+ }
+
+ // update default mapping on the MappingMetaData
+ if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
+ MappingMetaData defaultMapping = mappings.get(MapperService.DEFAULT_MAPPING);
+ for (ObjectCursor<MappingMetaData> cursor : mappings.values()) {
+ cursor.value.updateDefaultMapping(defaultMapping);
+ }
+ }
+
+ return new IndexMetaData(index, version, state, tmpSettings, mappings.build(), tmpAliases.build(), customs.build());
+ }
+
+ public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.field("version", indexMetaData.version());
+ builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH));
+
+ boolean binary = params.paramAsBoolean("binary", false);
+
+ builder.startObject("settings");
+ for (Map.Entry<String, String> entry : indexMetaData.settings().getAsMap().entrySet()) {
+ builder.field(entry.getKey(), entry.getValue());
+ }
+ builder.endObject();
+
+ builder.startArray("mappings");
+ for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
+ if (binary) {
+ builder.value(cursor.value.source().compressed());
+ } else {
+ byte[] data = cursor.value.source().uncompressed();
+ XContentParser parser = XContentFactory.xContent(data).createParser(data);
+ Map<String, Object> mapping = parser.mapOrdered();
+ parser.close();
+ builder.map(mapping);
+ }
+ }
+ builder.endArray();
+
+ for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.customs()) {
+ builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
+ lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
+ builder.endObject();
+ }
+
+ builder.startObject("aliases");
+ for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
+ AliasMetaData.Builder.toXContent(cursor.value, builder, params);
+ }
+ builder.endObject();
+
+
+ builder.endObject();
+ }
+
+ public static IndexMetaData fromXContent(XContentParser parser) throws IOException {
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ parser.nextToken();
+ }
+ Builder builder = new Builder(parser.currentName());
+
+ String currentFieldName = null;
+ XContentParser.Token token = parser.nextToken();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("settings".equals(currentFieldName)) {
+ builder.settings(ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));
+ } else if ("mappings".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ String mappingType = currentFieldName;
+ Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
+ builder.putMapping(new MappingMetaData(mappingType, mappingSource));
+ }
+ }
+ } else if ("aliases".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
+ }
+ } else {
+ // check if its a custom index metadata
+ Custom.Factory<Custom> factory = lookupFactory(currentFieldName);
+ if (factory == null) {
+ //TODO warn
+ parser.skipChildren();
+ } else {
+ builder.putCustom(factory.type(), factory.fromXContent(parser));
+ }
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("mappings".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
+ builder.putMapping(new MappingMetaData(new CompressedString(parser.binaryValue())));
+ } else {
+ Map<String, Object> mapping = parser.mapOrdered();
+ if (mapping.size() == 1) {
+ String mappingType = mapping.keySet().iterator().next();
+ builder.putMapping(new MappingMetaData(mappingType, mapping));
+ }
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("state".equals(currentFieldName)) {
+ builder.state(State.fromString(parser.text()));
+ } else if ("version".equals(currentFieldName)) {
+ builder.version(parser.longValue());
+ }
+ }
+ }
+ return builder.build();
+ }
+
+ public static IndexMetaData readFrom(StreamInput in) throws IOException {
+ Builder builder = new Builder(in.readString());
+ builder.version(in.readLong());
+ builder.state(State.fromId(in.readByte()));
+ builder.settings(readSettingsFromStream(in));
+ int mappingsSize = in.readVInt();
+ for (int i = 0; i < mappingsSize; i++) {
+ MappingMetaData mappingMd = MappingMetaData.readFrom(in);
+ builder.putMapping(mappingMd);
+ }
+ int aliasesSize = in.readVInt();
+ for (int i = 0; i < aliasesSize; i++) {
+ AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in);
+ builder.putAlias(aliasMd);
+ }
+ int customSize = in.readVInt();
+ for (int i = 0; i < customSize; i++) {
+ String type = in.readString();
+ Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in);
+ builder.putCustom(type, customIndexMetaData);
+ }
+ return builder.build();
+ }
+
+ public static void writeTo(IndexMetaData indexMetaData, StreamOutput out) throws IOException {
+ out.writeString(indexMetaData.index());
+ out.writeLong(indexMetaData.version());
+ out.writeByte(indexMetaData.state().id());
+ writeSettingsToStream(indexMetaData.settings(), out);
+ out.writeVInt(indexMetaData.mappings().size());
+ for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
+ MappingMetaData.writeTo(cursor.value, out);
+ }
+ out.writeVInt(indexMetaData.aliases().size());
+ for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
+ AliasMetaData.Builder.writeTo(cursor.value, out);
+ }
+ out.writeVInt(indexMetaData.customs().size());
+ for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.customs()) {
+ out.writeString(cursor.key);
+ lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
new file mode 100644
index 0000000..ad41b87
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
@@ -0,0 +1,422 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.metadata;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.Sets;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.loader.SettingsLoader;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public class IndexTemplateMetaData {
+
+ private final String name;
+
+ private final int order;
+
+ private final String template;
+
+ private final Settings settings;
+
+ // the mapping source should always include the type as top level
+ private final ImmutableOpenMap<String, CompressedString> mappings;
+
+ private final ImmutableOpenMap<String, IndexMetaData.Custom> customs;
+
+ public IndexTemplateMetaData(String name, int order, String template, Settings settings, ImmutableOpenMap<String, CompressedString> mappings, ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
+ this.name = name;
+ this.order = order;
+ this.template = template;
+ this.settings = settings;
+ this.mappings = mappings;
+ this.customs = customs;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public int order() {
+ return this.order;
+ }
+
+ public int getOrder() {
+ return order();
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public String template() {
+ return this.template;
+ }
+
+ public String getTemplate() {
+ return this.template;
+ }
+
+ public Settings settings() {
+ return this.settings;
+ }
+
+ public Settings getSettings() {
+ return settings();
+ }
+
+ public ImmutableOpenMap<String, CompressedString> mappings() {
+ return this.mappings;
+ }
+
+ public ImmutableOpenMap<String, CompressedString> getMappings() {
+ return this.mappings;
+ }
+
+ public ImmutableOpenMap<String, IndexMetaData.Custom> customs() {
+ return this.customs;
+ }
+
+ public ImmutableOpenMap<String, IndexMetaData.Custom> getCustoms() {
+ return this.customs;
+ }
+
+ public <T extends IndexMetaData.Custom> T custom(String type) {
+ return (T) customs.get(type);
+ }
+
+ public static Builder builder(String name) {
+ return new Builder(name);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ IndexTemplateMetaData that = (IndexTemplateMetaData) o;
+
+ if (order != that.order) return false;
+ if (!mappings.equals(that.mappings)) return false;
+ if (!name.equals(that.name)) return false;
+ if (!settings.equals(that.settings)) return false;
+ if (!template.equals(that.template)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = name.hashCode();
+ result = 31 * result + order;
+ result = 31 * result + template.hashCode();
+ result = 31 * result + settings.hashCode();
+ result = 31 * result + mappings.hashCode();
+ return result;
+ }
+
+ public static class Builder {
+
+ private static final Set<String> VALID_FIELDS = Sets.newHashSet("template", "order", "mappings", "settings");
+ static {
+ VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet());
+ }
+
+ private String name;
+
+ private int order;
+
+ private String template;
+
+ private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ private final ImmutableOpenMap.Builder<String, CompressedString> mappings;
+
+ private final ImmutableOpenMap.Builder<String, IndexMetaData.Custom> customs;
+
+ public Builder(String name) {
+ this.name = name;
+ mappings = ImmutableOpenMap.builder();
+ customs = ImmutableOpenMap.builder();
+ }
+
+ public Builder(IndexTemplateMetaData indexTemplateMetaData) {
+ this.name = indexTemplateMetaData.name();
+ order(indexTemplateMetaData.order());
+ template(indexTemplateMetaData.template());
+ settings(indexTemplateMetaData.settings());
+
+ mappings = ImmutableOpenMap.builder(indexTemplateMetaData.mappings());
+ customs = ImmutableOpenMap.builder(indexTemplateMetaData.customs());
+ }
+
+ public Builder order(int order) {
+ this.order = order;
+ return this;
+ }
+
+ public Builder template(String template) {
+ this.template = template;
+ return this;
+ }
+
+ public String template() {
+ return template;
+ }
+
+ public Builder settings(Settings.Builder settings) {
+ this.settings = settings.build();
+ return this;
+ }
+
+ public Builder settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public Builder removeMapping(String mappingType) {
+ mappings.remove(mappingType);
+ return this;
+ }
+
+ public Builder putMapping(String mappingType, CompressedString mappingSource) throws IOException {
+ mappings.put(mappingType, mappingSource);
+ return this;
+ }
+
+ public Builder putMapping(String mappingType, String mappingSource) throws IOException {
+ mappings.put(mappingType, new CompressedString(mappingSource));
+ return this;
+ }
+
+ public Builder putCustom(String type, IndexMetaData.Custom customIndexMetaData) {
+ this.customs.put(type, customIndexMetaData);
+ return this;
+ }
+
+ public Builder removeCustom(String type) {
+ this.customs.remove(type);
+ return this;
+ }
+
+ public IndexMetaData.Custom getCustom(String type) {
+ return this.customs.get(type);
+ }
+
+ public IndexTemplateMetaData build() {
+ return new IndexTemplateMetaData(name, order, template, settings, mappings.build(), customs.build());
+ }
+
+ public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject(indexTemplateMetaData.name(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.field("order", indexTemplateMetaData.order());
+ builder.field("template", indexTemplateMetaData.template());
+
+ builder.startObject("settings");
+ for (Map.Entry<String, String> entry : indexTemplateMetaData.settings().getAsMap().entrySet()) {
+ builder.field(entry.getKey(), entry.getValue());
+ }
+ builder.endObject();
+
+ if (params.paramAsBoolean("reduce_mappings", false)) {
+ builder.startObject("mappings");
+ for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
+ byte[] mappingSource = cursor.value.uncompressed();
+ XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
+ Map<String, Object> mapping = parser.map();
+ if (mapping.size() == 1 && mapping.containsKey(cursor.key)) {
+ // the type name is the root value, reduce it
+ mapping = (Map<String, Object>) mapping.get(cursor.key);
+ }
+ builder.field(cursor.key);
+ builder.map(mapping);
+ }
+ builder.endObject();
+ } else {
+ builder.startArray("mappings");
+ for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
+ byte[] data = cursor.value.uncompressed();
+ XContentParser parser = XContentFactory.xContent(data).createParser(data);
+ Map<String, Object> mapping = parser.mapOrderedAndClose();
+ builder.map(mapping);
+ }
+ builder.endArray();
+ }
+
+ for (ObjectObjectCursor<String, IndexMetaData.Custom> cursor : indexTemplateMetaData.customs()) {
+ builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
+ IndexMetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
+ builder.endObject();
+ }
+
+ builder.endObject();
+ }
+
+ public static IndexTemplateMetaData fromXContentStandalone(XContentParser parser) throws IOException {
+ XContentParser.Token token = parser.nextToken();
+ if (token == null) {
+ throw new IOException("no data");
+ }
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new IOException("should start object");
+ }
+ token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new IOException("the first field should be the template name");
+ }
+ return fromXContent(parser);
+ }
+
+ public static IndexTemplateMetaData fromXContent(XContentParser parser) throws IOException {
+ Builder builder = new Builder(parser.currentName());
+
+ String currentFieldName = skipTemplateName(parser);
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("settings".equals(currentFieldName)) {
+ ImmutableSettings.Builder templateSettingsBuilder = ImmutableSettings.settingsBuilder();
+ for (Map.Entry<String, String> entry : SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered()).entrySet()) {
+ if (!entry.getKey().startsWith("index.")) {
+ templateSettingsBuilder.put("index." + entry.getKey(), entry.getValue());
+ } else {
+ templateSettingsBuilder.put(entry.getKey(), entry.getValue());
+ }
+ }
+ builder.settings(templateSettingsBuilder.build());
+ } else if ("mappings".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ String mappingType = currentFieldName;
+ Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
+ builder.putMapping(mappingType, XContentFactory.jsonBuilder().map(mappingSource).string());
+ }
+ }
+ } else {
+ // check if its a custom index metadata
+ IndexMetaData.Custom.Factory<IndexMetaData.Custom> factory = IndexMetaData.lookupFactory(currentFieldName);
+ if (factory == null) {
+ //TODO warn
+ parser.skipChildren();
+ } else {
+ builder.putCustom(factory.type(), factory.fromXContent(parser));
+ }
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("mappings".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Map<String, Object> mapping = parser.mapOrdered();
+ if (mapping.size() == 1) {
+ String mappingType = mapping.keySet().iterator().next();
+ String mappingSource = XContentFactory.jsonBuilder().map(mapping).string();
+
+ if (mappingSource == null) {
+ // crap, no mapping source, warn?
+ } else {
+ builder.putMapping(mappingType, mappingSource);
+ }
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("template".equals(currentFieldName)) {
+ builder.template(parser.text());
+ } else if ("order".equals(currentFieldName)) {
+ builder.order(parser.intValue());
+ }
+ }
+ }
+ return builder.build();
+ }
+
+ private static String skipTemplateName(XContentParser parser) throws IOException {
+ XContentParser.Token token = parser.nextToken();
+ if (token != null && token == XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String currentFieldName = parser.currentName();
+ if (VALID_FIELDS.contains(currentFieldName)) {
+ return currentFieldName;
+ } else {
+ // we just hit the template name, which should be ignored and we move on
+ parser.nextToken();
+ }
+ }
+ }
+
+ return null;
+ }
+
+ public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException {
+ Builder builder = new Builder(in.readString());
+ builder.order(in.readInt());
+ builder.template(in.readString());
+ builder.settings(ImmutableSettings.readSettingsFromStream(in));
+ int mappingsSize = in.readVInt();
+ for (int i = 0; i < mappingsSize; i++) {
+ builder.putMapping(in.readString(), CompressedString.readCompressedString(in));
+ }
+ int customSize = in.readVInt();
+ for (int i = 0; i < customSize; i++) {
+ String type = in.readString();
+ IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in);
+ builder.putCustom(type, customIndexMetaData);
+ }
+ return builder.build();
+ }
+
+ public static void writeTo(IndexTemplateMetaData indexTemplateMetaData, StreamOutput out) throws IOException {
+ out.writeString(indexTemplateMetaData.name());
+ out.writeInt(indexTemplateMetaData.order());
+ out.writeString(indexTemplateMetaData.template());
+ ImmutableSettings.writeSettingsToStream(indexTemplateMetaData.settings(), out);
+ out.writeVInt(indexTemplateMetaData.mappings().size());
+ for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
+ out.writeString(cursor.key);
+ cursor.value.writeTo(out);
+ }
+ out.writeVInt(indexTemplateMetaData.customs().size());
+ for (ObjectObjectCursor<String, IndexMetaData.Custom> cursor : indexTemplateMetaData.customs()) {
+ out.writeString(cursor.key);
+ IndexMetaData.lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java
new file mode 100644
index 0000000..1f7cbc1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java
@@ -0,0 +1,692 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.TimestampParsingException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+
+/**
+ *
+ */
+public class MappingMetaData {
+
+ public static class Id {
+
+ public static final Id EMPTY = new Id(null);
+
+ private final String path;
+
+ private final String[] pathElements;
+
+ public Id(String path) {
+ this.path = path;
+ if (path == null) {
+ pathElements = Strings.EMPTY_ARRAY;
+ } else {
+ pathElements = Strings.delimitedListToStringArray(path, ".");
+ }
+ }
+
+ public boolean hasPath() {
+ return path != null;
+ }
+
+ public String path() {
+ return this.path;
+ }
+
+ public String[] pathElements() {
+ return this.pathElements;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Id id = (Id) o;
+
+ if (path != null ? !path.equals(id.path) : id.path != null) return false;
+ if (!Arrays.equals(pathElements, id.pathElements)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = path != null ? path.hashCode() : 0;
+ result = 31 * result + (pathElements != null ? Arrays.hashCode(pathElements) : 0);
+ return result;
+ }
+ }
+
+ public static class Routing {
+
+ public static final Routing EMPTY = new Routing(false, null);
+
+ private final boolean required;
+
+ private final String path;
+
+ private final String[] pathElements;
+
+ public Routing(boolean required, String path) {
+ this.required = required;
+ this.path = path;
+ if (path == null) {
+ pathElements = Strings.EMPTY_ARRAY;
+ } else {
+ pathElements = Strings.delimitedListToStringArray(path, ".");
+ }
+ }
+
+ public boolean required() {
+ return required;
+ }
+
+ public boolean hasPath() {
+ return path != null;
+ }
+
+ public String path() {
+ return this.path;
+ }
+
+ public String[] pathElements() {
+ return this.pathElements;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Routing routing = (Routing) o;
+
+ if (required != routing.required) return false;
+ if (path != null ? !path.equals(routing.path) : routing.path != null) return false;
+ if (!Arrays.equals(pathElements, routing.pathElements)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = (required ? 1 : 0);
+ result = 31 * result + (path != null ? path.hashCode() : 0);
+ result = 31 * result + (pathElements != null ? Arrays.hashCode(pathElements) : 0);
+ return result;
+ }
+ }
+
+ public static class Timestamp {
+
+ public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter) throws TimestampParsingException {
+ long ts;
+ try {
+ // if we manage to parse it, its a millisecond timestamp, just return the string as is
+ ts = Long.parseLong(timestampAsString);
+ return timestampAsString;
+ } catch (NumberFormatException e) {
+ try {
+ ts = dateTimeFormatter.parser().parseMillis(timestampAsString);
+ } catch (RuntimeException e1) {
+ throw new TimestampParsingException(timestampAsString);
+ }
+ }
+ return Long.toString(ts);
+ }
+
+
+ public static final Timestamp EMPTY = new Timestamp(false, null, TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT);
+
+ private final boolean enabled;
+
+ private final String path;
+
+ private final String format;
+
+ private final String[] pathElements;
+
+ private final FormatDateTimeFormatter dateTimeFormatter;
+
+ public Timestamp(boolean enabled, String path, String format) {
+ this.enabled = enabled;
+ this.path = path;
+ if (path == null) {
+ pathElements = Strings.EMPTY_ARRAY;
+ } else {
+ pathElements = Strings.delimitedListToStringArray(path, ".");
+ }
+ this.format = format;
+ this.dateTimeFormatter = Joda.forPattern(format);
+ }
+
+ public boolean enabled() {
+ return enabled;
+ }
+
+ public boolean hasPath() {
+ return path != null;
+ }
+
+ public String path() {
+ return this.path;
+ }
+
+ public String[] pathElements() {
+ return this.pathElements;
+ }
+
+ public String format() {
+ return this.format;
+ }
+
+ public FormatDateTimeFormatter dateTimeFormatter() {
+ return this.dateTimeFormatter;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Timestamp timestamp = (Timestamp) o;
+
+ if (enabled != timestamp.enabled) return false;
+ if (dateTimeFormatter != null ? !dateTimeFormatter.equals(timestamp.dateTimeFormatter) : timestamp.dateTimeFormatter != null)
+ return false;
+ if (format != null ? !format.equals(timestamp.format) : timestamp.format != null) return false;
+ if (path != null ? !path.equals(timestamp.path) : timestamp.path != null) return false;
+ if (!Arrays.equals(pathElements, timestamp.pathElements)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = (enabled ? 1 : 0);
+ result = 31 * result + (path != null ? path.hashCode() : 0);
+ result = 31 * result + (format != null ? format.hashCode() : 0);
+ result = 31 * result + (pathElements != null ? Arrays.hashCode(pathElements) : 0);
+ result = 31 * result + (dateTimeFormatter != null ? dateTimeFormatter.hashCode() : 0);
+ return result;
+ }
+ }
+
+ private final String type;
+
+ private final CompressedString source;
+
+ private Id id;
+ private Routing routing;
+ private Timestamp timestamp;
+ private boolean hasParentField;
+
+ public MappingMetaData(DocumentMapper docMapper) {
+ this.type = docMapper.type();
+ this.source = docMapper.mappingSource();
+ this.id = new Id(docMapper.idFieldMapper().path());
+ this.routing = new Routing(docMapper.routingFieldMapper().required(), docMapper.routingFieldMapper().path());
+ this.timestamp = new Timestamp(docMapper.timestampFieldMapper().enabled(), docMapper.timestampFieldMapper().path(), docMapper.timestampFieldMapper().dateTimeFormatter().format());
+ this.hasParentField = docMapper.parentFieldMapper().active();
+ }
+
+ public MappingMetaData(CompressedString mapping) throws IOException {
+ this.source = mapping;
+ Map<String, Object> mappingMap = XContentHelper.createParser(mapping.compressed(), 0, mapping.compressed().length).mapOrderedAndClose();
+ if (mappingMap.size() != 1) {
+ throw new ElasticsearchIllegalStateException("Can't derive type from mapping, no root type: " + mapping.string());
+ }
+ this.type = mappingMap.keySet().iterator().next();
+ initMappers((Map<String, Object>) mappingMap.get(this.type));
+ }
+
+ public MappingMetaData(Map<String, Object> mapping) throws IOException {
+ this(mapping.keySet().iterator().next(), mapping);
+ }
+
+ public MappingMetaData(String type, Map<String, Object> mapping) throws IOException {
+ this.type = type;
+ XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().map(mapping);
+ this.source = new CompressedString(mappingBuilder.bytes());
+ Map<String, Object> withoutType = mapping;
+ if (mapping.size() == 1 && mapping.containsKey(type)) {
+ withoutType = (Map<String, Object>) mapping.get(type);
+ }
+ initMappers(withoutType);
+ }
+
+ private void initMappers(Map<String, Object> withoutType) {
+ if (withoutType.containsKey("_id")) {
+ String path = null;
+ Map<String, Object> routingNode = (Map<String, Object>) withoutType.get("_id");
+ for (Map.Entry<String, Object> entry : routingNode.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("path")) {
+ path = fieldNode.toString();
+ }
+ }
+ this.id = new Id(path);
+ } else {
+ this.id = Id.EMPTY;
+ }
+ if (withoutType.containsKey("_routing")) {
+ boolean required = false;
+ String path = null;
+ Map<String, Object> routingNode = (Map<String, Object>) withoutType.get("_routing");
+ for (Map.Entry<String, Object> entry : routingNode.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("required")) {
+ required = nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("path")) {
+ path = fieldNode.toString();
+ }
+ }
+ this.routing = new Routing(required, path);
+ } else {
+ this.routing = Routing.EMPTY;
+ }
+ if (withoutType.containsKey("_timestamp")) {
+ boolean enabled = false;
+ String path = null;
+ String format = TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT;
+ Map<String, Object> timestampNode = (Map<String, Object>) withoutType.get("_timestamp");
+ for (Map.Entry<String, Object> entry : timestampNode.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("enabled")) {
+ enabled = nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("path")) {
+ path = fieldNode.toString();
+ } else if (fieldName.equals("format")) {
+ format = fieldNode.toString();
+ }
+ }
+ this.timestamp = new Timestamp(enabled, path, format);
+ } else {
+ this.timestamp = Timestamp.EMPTY;
+ }
+ if (withoutType.containsKey("_parent")) {
+ this.hasParentField = true;
+ } else {
+ this.hasParentField = false;
+ }
+ }
+
+ public MappingMetaData(String type, CompressedString source, Id id, Routing routing, Timestamp timestamp, boolean hasParentField) {
+ this.type = type;
+ this.source = source;
+ this.id = id;
+ this.routing = routing;
+ this.timestamp = timestamp;
+ this.hasParentField = hasParentField;
+ }
+
+ void updateDefaultMapping(MappingMetaData defaultMapping) {
+ if (id == Id.EMPTY) {
+ id = defaultMapping.id();
+ }
+ if (routing == Routing.EMPTY) {
+ routing = defaultMapping.routing();
+ }
+ if (timestamp == Timestamp.EMPTY) {
+ timestamp = defaultMapping.timestamp();
+ }
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public CompressedString source() {
+ return this.source;
+ }
+
+ public boolean hasParentField() {
+ return hasParentField;
+ }
+
+ /**
+ * Converts the serialized compressed form of the mappings into a parsed map.
+ */
+ public Map<String, Object> sourceAsMap() throws IOException {
+ Map<String, Object> mapping = XContentHelper.convertToMap(source.compressed(), 0, source.compressed().length, true).v2();
+ if (mapping.size() == 1 && mapping.containsKey(type())) {
+ // the type name is the root value, reduce it
+ mapping = (Map<String, Object>) mapping.get(type());
+ }
+ return mapping;
+ }
+
+ /**
+ * Converts the serialized compressed form of the mappings into a parsed map.
+ */
+ public Map<String, Object> getSourceAsMap() throws IOException {
+ return sourceAsMap();
+ }
+
+ public Id id() {
+ return this.id;
+ }
+
+ public Routing routing() {
+ return this.routing;
+ }
+
+ public Timestamp timestamp() {
+ return this.timestamp;
+ }
+
+ public ParseContext createParseContext(@Nullable String id, @Nullable String routing, @Nullable String timestamp) {
+ return new ParseContext(
+ id == null && id().hasPath(),
+ routing == null && routing().hasPath(),
+ timestamp == null && timestamp().hasPath()
+ );
+ }
+
+ public void parse(XContentParser parser, ParseContext parseContext) throws IOException {
+ innerParse(parser, parseContext);
+ }
+
+ private void innerParse(XContentParser parser, ParseContext context) throws IOException {
+ if (!context.parsingStillNeeded()) {
+ return;
+ }
+
+ XContentParser.Token t = parser.currentToken();
+ if (t == null) {
+ t = parser.nextToken();
+ }
+ if (t == XContentParser.Token.START_OBJECT) {
+ t = parser.nextToken();
+ }
+ String idPart = context.idParsingStillNeeded() ? id().pathElements()[context.locationId] : null;
+ String routingPart = context.routingParsingStillNeeded() ? routing().pathElements()[context.locationRouting] : null;
+ String timestampPart = context.timestampParsingStillNeeded() ? timestamp().pathElements()[context.locationTimestamp] : null;
+
+ for (; t == XContentParser.Token.FIELD_NAME; t = parser.nextToken()) {
+ // Must point to field name
+ String fieldName = parser.currentName();
+ // And then the value...
+ t = parser.nextToken();
+ boolean incLocationId = false;
+ boolean incLocationRouting = false;
+ boolean incLocationTimestamp = false;
+ if (context.idParsingStillNeeded() && fieldName.equals(idPart)) {
+ if (context.locationId + 1 == id.pathElements().length) {
+ if (!t.isValue()) {
+ throw new MapperParsingException("id field must be a value but was either an object or an array");
+ }
+ context.id = parser.textOrNull();
+ context.idResolved = true;
+ } else {
+ incLocationId = true;
+ }
+ }
+ if (context.routingParsingStillNeeded() && fieldName.equals(routingPart)) {
+ if (context.locationRouting + 1 == routing.pathElements().length) {
+ context.routing = parser.textOrNull();
+ context.routingResolved = true;
+ } else {
+ incLocationRouting = true;
+ }
+ }
+ if (context.timestampParsingStillNeeded() && fieldName.equals(timestampPart)) {
+ if (context.locationTimestamp + 1 == timestamp.pathElements().length) {
+ context.timestamp = parser.textOrNull();
+ context.timestampResolved = true;
+ } else {
+ incLocationTimestamp = true;
+ }
+ }
+
+ if (incLocationId || incLocationRouting || incLocationTimestamp) {
+ if (t == XContentParser.Token.START_OBJECT) {
+ context.locationId += incLocationId ? 1 : 0;
+ context.locationRouting += incLocationRouting ? 1 : 0;
+ context.locationTimestamp += incLocationTimestamp ? 1 : 0;
+ innerParse(parser, context);
+ context.locationId -= incLocationId ? 1 : 0;
+ context.locationRouting -= incLocationRouting ? 1 : 0;
+ context.locationTimestamp -= incLocationTimestamp ? 1 : 0;
+ }
+ } else {
+ parser.skipChildren();
+ }
+
+ if (!context.parsingStillNeeded()) {
+ return;
+ }
+ }
+ }
+
+ public static void writeTo(MappingMetaData mappingMd, StreamOutput out) throws IOException {
+ out.writeString(mappingMd.type());
+ mappingMd.source().writeTo(out);
+ // id
+ if (mappingMd.id().hasPath()) {
+ out.writeBoolean(true);
+ out.writeString(mappingMd.id().path());
+ } else {
+ out.writeBoolean(false);
+ }
+ // routing
+ out.writeBoolean(mappingMd.routing().required());
+ if (mappingMd.routing().hasPath()) {
+ out.writeBoolean(true);
+ out.writeString(mappingMd.routing().path());
+ } else {
+ out.writeBoolean(false);
+ }
+ // timestamp
+ out.writeBoolean(mappingMd.timestamp().enabled());
+ if (mappingMd.timestamp().hasPath()) {
+ out.writeBoolean(true);
+ out.writeString(mappingMd.timestamp().path());
+ } else {
+ out.writeBoolean(false);
+ }
+ out.writeString(mappingMd.timestamp().format());
+ out.writeBoolean(mappingMd.hasParentField());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ MappingMetaData that = (MappingMetaData) o;
+
+ if (!id.equals(that.id)) return false;
+ if (!routing.equals(that.routing)) return false;
+ if (!source.equals(that.source)) return false;
+ if (!timestamp.equals(that.timestamp)) return false;
+ if (!type.equals(that.type)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = type.hashCode();
+ result = 31 * result + source.hashCode();
+ result = 31 * result + id.hashCode();
+ result = 31 * result + routing.hashCode();
+ result = 31 * result + timestamp.hashCode();
+ return result;
+ }
+
+ public static MappingMetaData readFrom(StreamInput in) throws IOException {
+ String type = in.readString();
+ CompressedString source = CompressedString.readCompressedString(in);
+ // id
+ Id id = new Id(in.readBoolean() ? in.readString() : null);
+ // routing
+ Routing routing = new Routing(in.readBoolean(), in.readBoolean() ? in.readString() : null);
+ // timestamp
+ Timestamp timestamp = new Timestamp(in.readBoolean(), in.readBoolean() ? in.readString() : null, in.readString());
+ final boolean hasParentField = in.readBoolean();
+ return new MappingMetaData(type, source, id, routing, timestamp, hasParentField);
+ }
+
+ public static class ParseContext {
+ final boolean shouldParseId;
+ final boolean shouldParseRouting;
+ final boolean shouldParseTimestamp;
+
+ int locationId = 0;
+ int locationRouting = 0;
+ int locationTimestamp = 0;
+ boolean idResolved;
+ boolean routingResolved;
+ boolean timestampResolved;
+ String id;
+ String routing;
+ String timestamp;
+
+ public ParseContext(boolean shouldParseId, boolean shouldParseRouting, boolean shouldParseTimestamp) {
+ this.shouldParseId = shouldParseId;
+ this.shouldParseRouting = shouldParseRouting;
+ this.shouldParseTimestamp = shouldParseTimestamp;
+ }
+
+ /**
+ * The id value parsed, <tt>null</tt> if does not require parsing, or not resolved.
+ */
+ public String id() {
+ return id;
+ }
+
+ /**
+ * Does id parsing really needed at all?
+ */
+ public boolean shouldParseId() {
+ return shouldParseId;
+ }
+
+ /**
+ * Has id been resolved during the parsing phase.
+ */
+ public boolean idResolved() {
+ return idResolved;
+ }
+
+ /**
+ * Is id parsing still needed?
+ */
+ public boolean idParsingStillNeeded() {
+ return shouldParseId && !idResolved;
+ }
+
+ /**
+ * The routing value parsed, <tt>null</tt> if does not require parsing, or not resolved.
+ */
+ public String routing() {
+ return routing;
+ }
+
+ /**
+ * Does routing parsing really needed at all?
+ */
+ public boolean shouldParseRouting() {
+ return shouldParseRouting;
+ }
+
+ /**
+ * Has routing been resolved during the parsing phase.
+ */
+ public boolean routingResolved() {
+ return routingResolved;
+ }
+
+ /**
+ * Is routing parsing still needed?
+ */
+ public boolean routingParsingStillNeeded() {
+ return shouldParseRouting && !routingResolved;
+ }
+
+ /**
+ * The timestamp value parsed, <tt>null</tt> if does not require parsing, or not resolved.
+ */
+ public String timestamp() {
+ return timestamp;
+ }
+
+ /**
+ * Does timestamp parsing really needed at all?
+ */
+ public boolean shouldParseTimestamp() {
+ return shouldParseTimestamp;
+ }
+
+ /**
+ * Has timestamp been resolved during the parsing phase.
+ */
+ public boolean timestampResolved() {
+ return timestampResolved;
+ }
+
+ /**
+ * Is timestamp parsing still needed?
+ */
+ public boolean timestampParsingStillNeeded() {
+ return shouldParseTimestamp && !timestampResolved;
+ }
+
+ /**
+ * Do we really need parsing?
+ */
+ public boolean shouldParse() {
+ return shouldParseId || shouldParseRouting || shouldParseTimestamp;
+ }
+
+ /**
+ * Is parsing still needed?
+ */
+ public boolean parsingStillNeeded() {
+ return idParsingStillNeeded() || routingParsingStillNeeded() || timestampParsingStillNeeded();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
new file mode 100644
index 0000000..232485d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
@@ -0,0 +1,1380 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.carrotsearch.hppc.ObjectArrayList;
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.base.Predicate;
+import com.google.common.collect.*;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.HppcMaps;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.loader.SettingsLoader;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+
+import java.io.IOException;
+import java.util.*;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.common.settings.ImmutableSettings.*;
+
+/**
+ *
+ */
+public class MetaData implements Iterable<IndexMetaData> {
+
+ public interface Custom {
+
+ interface Factory<T extends Custom> {
+
+ String type();
+
+ T readFrom(StreamInput in) throws IOException;
+
+ void writeTo(T customIndexMetaData, StreamOutput out) throws IOException;
+
+ T fromXContent(XContentParser parser) throws IOException;
+
+ void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException;
+
+ /**
+ * Returns true if this custom metadata should be persisted as part of global cluster state
+ */
+ boolean isPersistent();
+ }
+ }
+
+ public static Map<String, Custom.Factory> customFactories = new HashMap<String, Custom.Factory>();
+
+ static {
+ // register non plugin custom metadata
+ registerFactory(RepositoriesMetaData.TYPE, RepositoriesMetaData.FACTORY);
+ registerFactory(SnapshotMetaData.TYPE, SnapshotMetaData.FACTORY);
+ registerFactory(RestoreMetaData.TYPE, RestoreMetaData.FACTORY);
+ }
+
+ /**
+ * Register a custom index meta data factory. Make sure to call it from a static block.
+ */
+ public static void registerFactory(String type, Custom.Factory factory) {
+ customFactories.put(type, factory);
+ }
+
+ @Nullable
+ public static <T extends Custom> Custom.Factory<T> lookupFactory(String type) {
+ return customFactories.get(type);
+ }
+
+ public static <T extends Custom> Custom.Factory<T> lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException {
+ Custom.Factory<T> factory = customFactories.get(type);
+ if (factory == null) {
+ throw new ElasticsearchIllegalArgumentException("No custom index metadata factory registered for type [" + type + "]");
+ }
+ return factory;
+ }
+
+
+ public static final String SETTING_READ_ONLY = "cluster.blocks.read_only";
+
+ public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA);
+
+ public static final MetaData EMPTY_META_DATA = builder().build();
+
+ public static final String GLOBAL_PERSISTENT_ONLY_PARAM = "global_persistent_only";
+
+ private final String uuid;
+ private final long version;
+
+ private final Settings transientSettings;
+ private final Settings persistentSettings;
+ private final Settings settings;
+ private final ImmutableOpenMap<String, IndexMetaData> indices;
+ private final ImmutableOpenMap<String, IndexTemplateMetaData> templates;
+ private final ImmutableOpenMap<String, Custom> customs;
+
+ private final transient int totalNumberOfShards; // Transient ? not serializable anyway?
+ private final int numberOfShards;
+
+
+ private final String[] allIndices;
+ private final String[] allOpenIndices;
+ private final String[] allClosedIndices;
+
+ private final ImmutableOpenMap<String, ImmutableOpenMap<String, AliasMetaData>> aliases;
+ private final ImmutableOpenMap<String, String[]> aliasAndIndexToIndexMap;
+
+ @SuppressWarnings("unchecked")
+ MetaData(String uuid, long version, Settings transientSettings, Settings persistentSettings, ImmutableOpenMap<String, IndexMetaData> indices, ImmutableOpenMap<String, IndexTemplateMetaData> templates, ImmutableOpenMap<String, Custom> customs) {
+ this.uuid = uuid;
+ this.version = version;
+ this.transientSettings = transientSettings;
+ this.persistentSettings = persistentSettings;
+ this.settings = ImmutableSettings.settingsBuilder().put(persistentSettings).put(transientSettings).build();
+ this.indices = indices;
+ this.customs = customs;
+ this.templates = templates;
+ int totalNumberOfShards = 0;
+ int numberOfShards = 0;
+ int numAliases = 0;
+ for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
+ totalNumberOfShards += cursor.value.totalNumberOfShards();
+ numberOfShards += cursor.value.numberOfShards();
+ numAliases += cursor.value.aliases().size();
+ }
+ this.totalNumberOfShards = totalNumberOfShards;
+ this.numberOfShards = numberOfShards;
+
+ // build all indices map
+ List<String> allIndicesLst = Lists.newArrayList();
+ for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
+ allIndicesLst.add(cursor.value.index());
+ }
+ allIndices = allIndicesLst.toArray(new String[allIndicesLst.size()]);
+ int numIndices = allIndicesLst.size();
+
+ List<String> allOpenIndices = Lists.newArrayList();
+ List<String> allClosedIndices = Lists.newArrayList();
+ for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
+ IndexMetaData indexMetaData = cursor.value;
+ if (indexMetaData.state() == IndexMetaData.State.OPEN) {
+ allOpenIndices.add(indexMetaData.index());
+ } else if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
+ allClosedIndices.add(indexMetaData.index());
+ }
+ }
+ this.allOpenIndices = allOpenIndices.toArray(new String[allOpenIndices.size()]);
+ this.allClosedIndices = allClosedIndices.toArray(new String[allClosedIndices.size()]);
+
+ // build aliases map
+ ImmutableOpenMap.Builder<String, Object> tmpAliases = ImmutableOpenMap.builder(numAliases);
+ for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
+ IndexMetaData indexMetaData = cursor.value;
+ String index = indexMetaData.index();
+ for (ObjectCursor<AliasMetaData> aliasCursor : indexMetaData.aliases().values()) {
+ AliasMetaData aliasMd = aliasCursor.value;
+ ImmutableOpenMap.Builder<String, AliasMetaData> indexAliasMap = (ImmutableOpenMap.Builder<String, AliasMetaData>) tmpAliases.get(aliasMd.alias());
+ if (indexAliasMap == null) {
+ indexAliasMap = ImmutableOpenMap.builder(indices.size());
+ tmpAliases.put(aliasMd.alias(), indexAliasMap);
+ }
+ indexAliasMap.put(index, aliasMd);
+ }
+ }
+
+ for (ObjectCursor<String> cursor : tmpAliases.keys()) {
+ String alias = cursor.value;
+ // if there is access to the raw values buffer of the map that the immutable maps wraps, then we don't need to use put, and just set array slots
+ ImmutableOpenMap<String, AliasMetaData> map = ((ImmutableOpenMap.Builder) tmpAliases.get(alias)).cast().build();
+ tmpAliases.put(alias, map);
+ }
+
+ this.aliases = tmpAliases.<String, ImmutableOpenMap<String, AliasMetaData>>cast().build();
+ ImmutableOpenMap.Builder<String, Object> aliasAndIndexToIndexMap = ImmutableOpenMap.builder(numAliases + numIndices);
+ for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
+ IndexMetaData indexMetaData = cursor.value;
+ ObjectArrayList<String> indicesLst = (ObjectArrayList<String>) aliasAndIndexToIndexMap.get(indexMetaData.index());
+ if (indicesLst == null) {
+ indicesLst = new ObjectArrayList<String>();
+ aliasAndIndexToIndexMap.put(indexMetaData.index(), indicesLst);
+ }
+ indicesLst.add(indexMetaData.index());
+
+ for (ObjectCursor<String> cursor1 : indexMetaData.aliases().keys()) {
+ String alias = cursor1.value;
+ indicesLst = (ObjectArrayList<String>) aliasAndIndexToIndexMap.get(alias);
+ if (indicesLst == null) {
+ indicesLst = new ObjectArrayList<String>();
+ aliasAndIndexToIndexMap.put(alias, indicesLst);
+ }
+ indicesLst.add(indexMetaData.index());
+ }
+ }
+
+ for (ObjectObjectCursor<String, Object> cursor : aliasAndIndexToIndexMap) {
+ String[] indicesLst = ((ObjectArrayList<String>) cursor.value).toArray(String.class);
+ aliasAndIndexToIndexMap.put(cursor.key, indicesLst);
+ }
+
+ this.aliasAndIndexToIndexMap = aliasAndIndexToIndexMap.<String, String[]>cast().build();
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ public String uuid() {
+ return this.uuid;
+ }
+
+ /**
+ * Returns the merges transient and persistent settings.
+ */
+ public Settings settings() {
+ return this.settings;
+ }
+
+ public Settings transientSettings() {
+ return this.transientSettings;
+ }
+
+ public Settings persistentSettings() {
+ return this.persistentSettings;
+ }
+
+ public ImmutableOpenMap<String, ImmutableOpenMap<String, AliasMetaData>> aliases() {
+ return this.aliases;
+ }
+
+ public ImmutableOpenMap<String, ImmutableOpenMap<String, AliasMetaData>> getAliases() {
+ return aliases();
+ }
+
+ /**
+ * Finds the specific index aliases that match with the specified aliases directly or partially via wildcards and
+ * that point to the specified concrete indices or match partially with the indices via wildcards.
+ *
+ * @param aliases The names of the index aliases to find
+ * @param concreteIndices The concrete indexes the index aliases must point to order to be returned.
+ * @return the found index aliases grouped by index
+ */
+ public ImmutableOpenMap<String, ImmutableList<AliasMetaData>> findAliases(final String[] aliases, String[] concreteIndices) {
+ assert aliases != null;
+ assert concreteIndices != null;
+ if (concreteIndices.length == 0) {
+ return ImmutableOpenMap.of();
+ }
+
+ boolean matchAllAliases = matchAllAliases(aliases);
+ ImmutableOpenMap.Builder<String, ImmutableList<AliasMetaData>> mapBuilder = ImmutableOpenMap.builder();
+ Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys());
+ for (String index : intersection) {
+ IndexMetaData indexMetaData = indices.get(index);
+ List<AliasMetaData> filteredValues = Lists.newArrayList();
+ for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
+ AliasMetaData value = cursor.value;
+ if (matchAllAliases || Regex.simpleMatch(aliases, value.alias())) {
+ filteredValues.add(value);
+ }
+ }
+
+ if (!filteredValues.isEmpty()) {
+ mapBuilder.put(index, ImmutableList.copyOf(filteredValues));
+ }
+ }
+ return mapBuilder.build();
+ }
+
+ private boolean matchAllAliases(final String[] aliases) {
+ for (String alias : aliases) {
+ if (alias.equals("_all")) {
+ return true;
+ }
+ }
+ return aliases.length == 0;
+ }
+
+ /**
+ * Checks if at least one of the specified aliases exists in the specified concrete indices. Wildcards are supported in the
+ * alias names for partial matches.
+ *
+ * @param aliases The names of the index aliases to find
+ * @param concreteIndices The concrete indexes the index aliases must point to order to be returned.
+ * @return whether at least one of the specified aliases exists in one of the specified concrete indices.
+ */
+ public boolean hasAliases(final String[] aliases, String[] concreteIndices) {
+ assert aliases != null;
+ assert concreteIndices != null;
+ if (concreteIndices.length == 0) {
+ return false;
+ }
+
+ Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys());
+ for (String index : intersection) {
+ IndexMetaData indexMetaData = indices.get(index);
+ List<AliasMetaData> filteredValues = Lists.newArrayList();
+ for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
+ AliasMetaData value = cursor.value;
+ if (Regex.simpleMatch(aliases, value.alias())) {
+ filteredValues.add(value);
+ }
+ }
+ if (!filteredValues.isEmpty()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /*
+ * Finds all mappings for types and concrete indices. Types are expanded to
+ * include all types that match the glob patterns in the types array. Empty
+ * types array, null or {"_all"} will be expanded to all types available for
+ * the given indices.
+ */
+ public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> findMappings(String[] concreteIndices, final String[] types) {
+ assert types != null;
+ assert concreteIndices != null;
+ if (concreteIndices.length == 0) {
+ return ImmutableOpenMap.of();
+ }
+
+ ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> indexMapBuilder = ImmutableOpenMap.builder();
+ Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys());
+ for (String index : intersection) {
+ IndexMetaData indexMetaData = indices.get(index);
+ ImmutableOpenMap.Builder<String, MappingMetaData> filteredMappings;
+ if (isAllTypes(types)) {
+ indexMapBuilder.put(index, indexMetaData.getMappings()); // No types specified means get it all
+
+ } else {
+ filteredMappings = ImmutableOpenMap.builder();
+ for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
+ if (Regex.simpleMatch(types, cursor.key)) {
+ filteredMappings.put(cursor.key, cursor.value);
+ }
+ }
+ if (!filteredMappings.isEmpty()) {
+ indexMapBuilder.put(index, filteredMappings.build());
+ }
+ }
+ }
+ return indexMapBuilder.build();
+ }
+
+ public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) {
+ assert uncheckedWarmers != null;
+ assert concreteIndices != null;
+ if (concreteIndices.length == 0) {
+ return ImmutableOpenMap.of();
+ }
+ // special _all check to behave the same like not specifying anything for the warmers (not for the indices)
+ final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers;
+
+ ImmutableOpenMap.Builder<String, ImmutableList<IndexWarmersMetaData.Entry>> mapBuilder = ImmutableOpenMap.builder();
+ Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys());
+ for (String index : intersection) {
+ IndexMetaData indexMetaData = indices.get(index);
+ IndexWarmersMetaData indexWarmersMetaData = indexMetaData.custom(IndexWarmersMetaData.TYPE);
+ if (indexWarmersMetaData == null || indexWarmersMetaData.entries().isEmpty()) {
+ continue;
+ }
+
+ Collection<IndexWarmersMetaData.Entry> filteredWarmers = Collections2.filter(indexWarmersMetaData.entries(), new Predicate<IndexWarmersMetaData.Entry>() {
+
+ @Override
+ public boolean apply(IndexWarmersMetaData.Entry warmer) {
+ if (warmers.length != 0 && types.length != 0) {
+ return Regex.simpleMatch(warmers, warmer.name()) && Regex.simpleMatch(types, warmer.types());
+ } else if (warmers.length != 0) {
+ return Regex.simpleMatch(warmers, warmer.name());
+ } else if (types.length != 0) {
+ return Regex.simpleMatch(types, warmer.types());
+ } else {
+ return true;
+ }
+ }
+
+ });
+ if (!filteredWarmers.isEmpty()) {
+ mapBuilder.put(index, ImmutableList.copyOf(filteredWarmers));
+ }
+ }
+ return mapBuilder.build();
+ }
+
+ /**
+ * Returns all the concrete indices.
+ */
+ public String[] concreteAllIndices() {
+ return allIndices;
+ }
+
+ public String[] getConcreteAllIndices() {
+ return concreteAllIndices();
+ }
+
+ public String[] concreteAllOpenIndices() {
+ return allOpenIndices;
+ }
+
+ public String[] getConcreteAllOpenIndices() {
+ return allOpenIndices;
+ }
+
+ public String[] concreteAllClosedIndices() {
+ return allClosedIndices;
+ }
+
+ public String[] getConcreteAllClosedIndices() {
+ return allClosedIndices;
+ }
+
+ /**
+ * Returns indexing routing for the given index.
+ */
+ public String resolveIndexRouting(@Nullable String routing, String aliasOrIndex) {
+ // Check if index is specified by an alias
+ ImmutableOpenMap<String, AliasMetaData> indexAliases = aliases.get(aliasOrIndex);
+ if (indexAliases == null || indexAliases.isEmpty()) {
+ return routing;
+ }
+ if (indexAliases.size() > 1) {
+ throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexAliases.keys().toArray(String.class)) + "], can't execute a single index op");
+ }
+ AliasMetaData aliasMd = indexAliases.values().iterator().next().value;
+ if (aliasMd.indexRouting() != null) {
+ if (routing != null) {
+ if (!routing.equals(aliasMd.indexRouting())) {
+ throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation");
+ }
+ }
+ routing = aliasMd.indexRouting();
+ }
+ if (routing != null) {
+ if (routing.indexOf(',') != -1) {
+ throw new ElasticsearchIllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + routing + "] that resolved to several routing values, rejecting operation");
+ }
+ }
+ return routing;
+ }
+
+ public Map<String, Set<String>> resolveSearchRouting(@Nullable String routing, String aliasOrIndex) {
+ return resolveSearchRouting(routing, convertFromWildcards(new String[]{aliasOrIndex}, IndicesOptions.lenient()));
+ }
+
+ public Map<String, Set<String>> resolveSearchRouting(@Nullable String routing, String[] aliasesOrIndices) {
+ if (isAllIndices(aliasesOrIndices)) {
+ return resolveSearchRoutingAllIndices(routing);
+ }
+
+ aliasesOrIndices = convertFromWildcards(aliasesOrIndices, IndicesOptions.lenient());
+
+ if (aliasesOrIndices.length == 1) {
+ return resolveSearchRoutingSingleValue(routing, aliasesOrIndices[0]);
+ }
+
+ Map<String, Set<String>> routings = null;
+ Set<String> paramRouting = null;
+ // List of indices that don't require any routing
+ Set<String> norouting = new HashSet<String>();
+ if (routing != null) {
+ paramRouting = Strings.splitStringByCommaToSet(routing);
+ }
+
+ for (String aliasOrIndex : aliasesOrIndices) {
+ ImmutableOpenMap<String, AliasMetaData> indexToRoutingMap = aliases.get(aliasOrIndex);
+ if (indexToRoutingMap != null && !indexToRoutingMap.isEmpty()) {
+ for (ObjectObjectCursor<String, AliasMetaData> indexRouting : indexToRoutingMap) {
+ if (!norouting.contains(indexRouting.key)) {
+ if (!indexRouting.value.searchRoutingValues().isEmpty()) {
+ // Routing alias
+ if (routings == null) {
+ routings = newHashMap();
+ }
+ Set<String> r = routings.get(indexRouting.key);
+ if (r == null) {
+ r = new HashSet<String>();
+ routings.put(indexRouting.key, r);
+ }
+ r.addAll(indexRouting.value.searchRoutingValues());
+ if (paramRouting != null) {
+ r.retainAll(paramRouting);
+ }
+ if (r.isEmpty()) {
+ routings.remove(indexRouting.key);
+ }
+ } else {
+ // Non-routing alias
+ if (!norouting.contains(indexRouting.key)) {
+ norouting.add(indexRouting.key);
+ if (paramRouting != null) {
+ Set<String> r = new HashSet<String>(paramRouting);
+ if (routings == null) {
+ routings = newHashMap();
+ }
+ routings.put(indexRouting.key, r);
+ } else {
+ if (routings != null) {
+ routings.remove(indexRouting.key);
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+ // Index
+ if (!norouting.contains(aliasOrIndex)) {
+ norouting.add(aliasOrIndex);
+ if (paramRouting != null) {
+ Set<String> r = new HashSet<String>(paramRouting);
+ if (routings == null) {
+ routings = newHashMap();
+ }
+ routings.put(aliasOrIndex, r);
+ } else {
+ if (routings != null) {
+ routings.remove(aliasOrIndex);
+ }
+ }
+ }
+ }
+
+ }
+ if (routings == null || routings.isEmpty()) {
+ return null;
+ }
+ return routings;
+ }
+
+ private Map<String, Set<String>> resolveSearchRoutingSingleValue(@Nullable String routing, String aliasOrIndex) {
+ Map<String, Set<String>> routings = null;
+ Set<String> paramRouting = null;
+ if (routing != null) {
+ paramRouting = Strings.splitStringByCommaToSet(routing);
+ }
+
+ ImmutableOpenMap<String, AliasMetaData> indexToRoutingMap = aliases.get(aliasOrIndex);
+ if (indexToRoutingMap != null && !indexToRoutingMap.isEmpty()) {
+ // It's an alias
+ for (ObjectObjectCursor<String, AliasMetaData> indexRouting : indexToRoutingMap) {
+ if (!indexRouting.value.searchRoutingValues().isEmpty()) {
+ // Routing alias
+ Set<String> r = new HashSet<String>(indexRouting.value.searchRoutingValues());
+ if (paramRouting != null) {
+ r.retainAll(paramRouting);
+ }
+ if (!r.isEmpty()) {
+ if (routings == null) {
+ routings = newHashMap();
+ }
+ routings.put(indexRouting.key, r);
+ }
+ } else {
+ // Non-routing alias
+ if (paramRouting != null) {
+ Set<String> r = new HashSet<String>(paramRouting);
+ if (routings == null) {
+ routings = newHashMap();
+ }
+ routings.put(indexRouting.key, r);
+ }
+ }
+ }
+ } else {
+ // It's an index
+ if (paramRouting != null) {
+ routings = ImmutableMap.of(aliasOrIndex, paramRouting);
+ }
+ }
+ return routings;
+ }
+
+ /**
+ * Sets the same routing for all indices
+ */
+ private Map<String, Set<String>> resolveSearchRoutingAllIndices(String routing) {
+ if (routing != null) {
+ Set<String> r = Strings.splitStringByCommaToSet(routing);
+ Map<String, Set<String>> routings = newHashMap();
+ String[] concreteIndices = concreteAllIndices();
+ for (String index : concreteIndices) {
+ routings.put(index, r);
+ }
+ return routings;
+ }
+ return null;
+ }
+
+ /**
+ * Translates the provided indices (possibly aliased) into actual indices.
+ */
+ public String[] concreteIndices(String[] indices) throws IndexMissingException {
+ return concreteIndices(indices, IndicesOptions.fromOptions(false, true, true, true));
+ }
+
+ /**
+ * Translates the provided indices (possibly aliased) into actual indices.
+ */
+ public String[] concreteIndicesIgnoreMissing(String[] indices) {
+ return concreteIndices(indices, IndicesOptions.fromOptions(true, true, true, false));
+ }
+
+ /**
+ * Translates the provided indices (possibly aliased) into actual indices.
+ */
+ public String[] concreteIndices(String[] aliasesOrIndices, IndicesOptions indicesOptions) throws IndexMissingException {
+ if (isAllIndices(aliasesOrIndices)) {
+ String[] concreteIndices;
+ if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) {
+ concreteIndices = concreteAllIndices();
+ } else if (indicesOptions.expandWildcardsOpen()) {
+ concreteIndices = concreteAllOpenIndices();
+ } else if (indicesOptions.expandWildcardsClosed()) {
+ concreteIndices = concreteAllClosedIndices();
+ } else {
+ assert false : "Shouldn't end up here";
+ concreteIndices = Strings.EMPTY_ARRAY;
+ }
+
+ if (!indicesOptions.allowNoIndices() && concreteIndices.length == 0) {
+ throw new IndexMissingException(new Index("_all"));
+ }
+ return concreteIndices;
+ }
+ aliasesOrIndices = convertFromWildcards(aliasesOrIndices, indicesOptions);
+ // optimize for single element index (common case)
+ if (aliasesOrIndices.length == 1) {
+ String aliasOrIndex = aliasesOrIndices[0];
+ // if a direct index name, just return the array provided
+ if (this.indices.containsKey(aliasOrIndex)) {
+ return aliasesOrIndices;
+ }
+ String[] actualLst = aliasAndIndexToIndexMap.getOrDefault(aliasOrIndex, Strings.EMPTY_ARRAY);
+ if (!indicesOptions.allowNoIndices() && actualLst == null) {
+ throw new IndexMissingException(new Index(aliasOrIndex));
+ } else {
+ return actualLst;
+ }
+ }
+
+ // check if its a possible aliased index, if not, just return the
+ // passed array
+ boolean possiblyAliased = false;
+ for (String index : aliasesOrIndices) {
+ if (!this.indices.containsKey(index)) {
+ possiblyAliased = true;
+ break;
+ }
+ }
+ if (!possiblyAliased) {
+ return aliasesOrIndices;
+ }
+
+ Set<String> actualIndices = new HashSet<String>();
+ for (String index : aliasesOrIndices) {
+ String[] actualLst = aliasAndIndexToIndexMap.get(index);
+ if (actualLst == null) {
+ if (!indicesOptions.ignoreUnavailable()) {
+ throw new IndexMissingException(new Index(index));
+ }
+ } else {
+ for (String x : actualLst) {
+ actualIndices.add(x);
+ }
+ }
+ }
+
+ if (!indicesOptions.allowNoIndices() && actualIndices.isEmpty()) {
+ throw new IndexMissingException(new Index(Arrays.toString(aliasesOrIndices)));
+ }
+ return actualIndices.toArray(new String[actualIndices.size()]);
+ }
+
+ public String concreteIndex(String index) throws IndexMissingException, ElasticsearchIllegalArgumentException {
+ // a quick check, if this is an actual index, if so, return it
+ if (indices.containsKey(index)) {
+ return index;
+ }
+ // not an actual index, fetch from an alias
+ String[] lst = aliasAndIndexToIndexMap.get(index);
+ if (lst == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ if (lst.length > 1) {
+ throw new ElasticsearchIllegalArgumentException("Alias [" + index + "] has more than one indices associated with it [" + Arrays.toString(lst) + "], can't execute a single index op");
+ }
+ return lst[0];
+ }
+
+ /**
+ * Converts a list of indices or aliases wildcards, and special +/- signs, into their respective full matches. It
+ * won't convert only to indices, but also to aliases. For example, alias_* will expand to alias_1 and alias_2, not
+ * to the respective indices those aliases point to.
+ */
+ public String[] convertFromWildcards(String[] aliasesOrIndices, IndicesOptions indicesOptions) {
+ if (aliasesOrIndices == null) {
+ return null;
+ }
+ Set<String> result = null;
+ for (int i = 0; i < aliasesOrIndices.length; i++) {
+ String aliasOrIndex = aliasesOrIndices[i];
+ if (aliasAndIndexToIndexMap.containsKey(aliasOrIndex)) {
+ if (result != null) {
+ result.add(aliasOrIndex);
+ }
+ continue;
+ }
+ boolean add = true;
+ if (aliasOrIndex.charAt(0) == '+') {
+ // if its the first, add empty result set
+ if (i == 0) {
+ result = new HashSet<String>();
+ }
+ add = true;
+ aliasOrIndex = aliasOrIndex.substring(1);
+ } else if (aliasOrIndex.charAt(0) == '-') {
+ // if its the first, fill it with all the indices...
+ if (i == 0) {
+ String[] concreteIndices;
+ if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) {
+ concreteIndices = concreteAllIndices();
+ } else if (indicesOptions.expandWildcardsOpen()) {
+ concreteIndices = concreteAllOpenIndices();
+ } else if (indicesOptions.expandWildcardsClosed()) {
+ concreteIndices = concreteAllClosedIndices();
+ } else {
+ assert false : "Shouldn't end up here";
+ concreteIndices = Strings.EMPTY_ARRAY;
+ }
+ result = new HashSet<String>(Arrays.asList(concreteIndices));
+ }
+ add = false;
+ aliasOrIndex = aliasOrIndex.substring(1);
+ }
+ if (!Regex.isSimpleMatchPattern(aliasOrIndex)) {
+ if (!indicesOptions.ignoreUnavailable() && !aliasAndIndexToIndexMap.containsKey(aliasOrIndex)) {
+ throw new IndexMissingException(new Index(aliasOrIndex));
+ }
+ if (result != null) {
+ if (add) {
+ result.add(aliasOrIndex);
+ } else {
+ result.remove(aliasOrIndex);
+ }
+ }
+ continue;
+ }
+ if (result == null) {
+ // add all the previous ones...
+ result = new HashSet<String>();
+ result.addAll(Arrays.asList(aliasesOrIndices).subList(0, i));
+ }
+ String[] indices;
+ if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) {
+ indices = concreteAllIndices();
+ } else if (indicesOptions.expandWildcardsOpen()) {
+ indices = concreteAllOpenIndices();
+ } else if (indicesOptions.expandWildcardsClosed()) {
+ indices = concreteAllClosedIndices();
+ } else {
+ assert false : "Shouldn't end up here";
+ indices = Strings.EMPTY_ARRAY;
+ }
+ boolean found = false;
+ // iterating over all concrete indices and see if there is a wildcard match
+ for (String index : indices) {
+ if (Regex.simpleMatch(aliasOrIndex, index)) {
+ found = true;
+ if (add) {
+ result.add(index);
+ } else {
+ result.remove(index);
+ }
+ }
+ }
+ // iterating over all aliases and see if there is a wildcard match
+ for (ObjectCursor<String> cursor : aliases.keys()) {
+ String alias = cursor.value;
+ if (Regex.simpleMatch(aliasOrIndex, alias)) {
+ found = true;
+ if (add) {
+ result.add(alias);
+ } else {
+ result.remove(alias);
+ }
+ }
+ }
+ if (!found && !indicesOptions.allowNoIndices()) {
+ throw new IndexMissingException(new Index(aliasOrIndex));
+ }
+ }
+ if (result == null) {
+ return aliasesOrIndices;
+ }
+ if (result.isEmpty() && !indicesOptions.allowNoIndices()) {
+ throw new IndexMissingException(new Index(Arrays.toString(aliasesOrIndices)));
+ }
+ return result.toArray(new String[result.size()]);
+ }
+
+ public boolean hasIndex(String index) {
+ return indices.containsKey(index);
+ }
+
+ public boolean hasConcreteIndex(String index) {
+ return aliasAndIndexToIndexMap.containsKey(index);
+ }
+
+ public IndexMetaData index(String index) {
+ return indices.get(index);
+ }
+
+ public ImmutableOpenMap<String, IndexMetaData> indices() {
+ return this.indices;
+ }
+
+ public ImmutableOpenMap<String, IndexMetaData> getIndices() {
+ return indices();
+ }
+
+ public ImmutableOpenMap<String, IndexTemplateMetaData> templates() {
+ return this.templates;
+ }
+
+ public ImmutableOpenMap<String, IndexTemplateMetaData> getTemplates() {
+ return this.templates;
+ }
+
+ public ImmutableOpenMap<String, Custom> customs() {
+ return this.customs;
+ }
+
+ public ImmutableOpenMap<String, Custom> getCustoms() {
+ return this.customs;
+ }
+
+ public <T extends Custom> T custom(String type) {
+ return (T) customs.get(type);
+ }
+
+ public int totalNumberOfShards() {
+ return this.totalNumberOfShards;
+ }
+
+ public int getTotalNumberOfShards() {
+ return totalNumberOfShards();
+ }
+
+ public int numberOfShards() {
+ return this.numberOfShards;
+ }
+
+ public int getNumberOfShards() {
+ return numberOfShards();
+ }
+
+
+ /**
+ * Iterates through the list of indices and selects the effective list of filtering aliases for the
+ * given index.
+ * <p/>
+ * <p>Only aliases with filters are returned. If the indices list contains a non-filtering reference to
+ * the index itself - null is returned. Returns <tt>null</tt> if no filtering is required.</p>
+ */
+ public String[] filteringAliases(String index, String... indicesOrAliases) {
+ // expand the aliases wildcard
+ indicesOrAliases = convertFromWildcards(indicesOrAliases, IndicesOptions.lenient());
+
+ if (isAllIndices(indicesOrAliases)) {
+ return null;
+ }
+ // optimize for the most common single index/alias scenario
+ if (indicesOrAliases.length == 1) {
+ String alias = indicesOrAliases[0];
+ IndexMetaData indexMetaData = this.indices.get(index);
+ if (indexMetaData == null) {
+ // Shouldn't happen
+ throw new IndexMissingException(new Index(index));
+ }
+ AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias);
+ boolean filteringRequired = aliasMetaData != null && aliasMetaData.filteringRequired();
+ if (!filteringRequired) {
+ return null;
+ }
+ return new String[]{alias};
+ }
+ List<String> filteringAliases = null;
+ for (String alias : indicesOrAliases) {
+ if (alias.equals(index)) {
+ return null;
+ }
+
+ IndexMetaData indexMetaData = this.indices.get(index);
+ if (indexMetaData == null) {
+ // Shouldn't happen
+ throw new IndexMissingException(new Index(index));
+ }
+
+ AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias);
+ // Check that this is an alias for the current index
+ // Otherwise - skip it
+ if (aliasMetaData != null) {
+ boolean filteringRequired = aliasMetaData.filteringRequired();
+ if (filteringRequired) {
+ // If filtering required - add it to the list of filters
+ if (filteringAliases == null) {
+ filteringAliases = newArrayList();
+ }
+ filteringAliases.add(alias);
+ } else {
+ // If not, we have a non filtering alias for this index - no filtering needed
+ return null;
+ }
+ }
+ }
+ if (filteringAliases == null) {
+ return null;
+ }
+ return filteringAliases.toArray(new String[filteringAliases.size()]);
+ }
+
+ /**
+ * Identifies whether the array containing index names given as argument refers to all indices
+ * The empty or null array identifies all indices
+ *
+ * @param aliasesOrIndices the array containing index names
+ * @return true if the provided array maps to all indices, false otherwise
+ */
+ public boolean isAllIndices(String[] aliasesOrIndices) {
+ return aliasesOrIndices == null || aliasesOrIndices.length == 0 || isExplicitAllPattern(aliasesOrIndices);
+ }
+
+ /**
+ * Identifies whether the array containing type names given as argument refers to all types
+ * The empty or null array identifies all types
+ *
+ * @param types the array containing index names
+ * @return true if the provided array maps to all indices, false otherwise
+ */
+ public boolean isAllTypes(String[] types) {
+ return types == null || types.length == 0 || isExplicitAllPattern(types);
+ }
+
+ /**
+ * Identifies whether the array containing index names given as argument explicitly refers to all indices
+ * The empty or null array doesn't explicitly map to all indices
+ *
+ * @param aliasesOrIndices the array containing index names
+ * @return true if the provided array explicitly maps to all indices, false otherwise
+ */
+ public boolean isExplicitAllPattern(String[] aliasesOrIndices) {
+ return aliasesOrIndices != null && aliasesOrIndices.length == 1 && "_all".equals(aliasesOrIndices[0]);
+ }
+
+ /**
+ * Identifies whether the first argument (an array containing index names) is a pattern that matches all indices
+ *
+ * @param indicesOrAliases the array containing index names
+ * @param concreteIndices array containing the concrete indices that the first argument refers to
+ * @return true if the first argument is a pattern that maps to all available indices, false otherwise
+ */
+ public boolean isPatternMatchingAllIndices(String[] indicesOrAliases, String[] concreteIndices) {
+ // if we end up matching on all indices, check, if its a wildcard parameter, or a "-something" structure
+ if (concreteIndices.length == concreteAllIndices().length && indicesOrAliases.length > 0) {
+
+ //we might have something like /-test1,+test1 that would identify all indices
+ //or something like /-test1 with test1 index missing and IndicesOptions.lenient()
+ if (indicesOrAliases[0].charAt(0) == '-') {
+ return true;
+ }
+
+ //otherwise we check if there's any simple regex
+ for (String indexOrAlias : indicesOrAliases) {
+ if (Regex.isSimpleMatchPattern(indexOrAlias)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * @param concreteIndex The concrete index to check if routing is required
+ * @param type The type to check if routing is required
+ * @return Whether routing is required according to the mapping for the specified index and type
+ */
+ public boolean routingRequired(String concreteIndex, String type) {
+ IndexMetaData indexMetaData = indices.get(concreteIndex);
+ if (indexMetaData != null) {
+ MappingMetaData mappingMetaData = indexMetaData.getMappings().get(type);
+ if (mappingMetaData != null) {
+ return mappingMetaData.routing().required();
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public UnmodifiableIterator<IndexMetaData> iterator() {
+ return indices.valuesIt();
+ }
+
+ public static boolean isGlobalStateEquals(MetaData metaData1, MetaData metaData2) {
+ if (!metaData1.persistentSettings.equals(metaData2.persistentSettings)) {
+ return false;
+ }
+ if (!metaData1.templates.equals(metaData2.templates())) {
+ return false;
+ }
+ // Check if any persistent metadata needs to be saved
+ int customCount1 = 0;
+ for (ObjectObjectCursor<String, Custom> cursor : metaData1.customs) {
+ if (customFactories.get(cursor.key).isPersistent()) {
+ if (!cursor.equals(metaData2.custom(cursor.key))) return false;
+ customCount1++;
+ }
+ }
+ int customCount2 = 0;
+ for (ObjectObjectCursor<String, Custom> cursor : metaData2.customs) {
+ if (customFactories.get(cursor.key).isPersistent()) {
+ customCount2++;
+ }
+ }
+ if (customCount1 != customCount2) return false;
+ return true;
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public static Builder builder(MetaData metaData) {
+ return new Builder(metaData);
+ }
+
+ public static class Builder {
+
+ private String uuid;
+ private long version;
+
+ private Settings transientSettings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ private Settings persistentSettings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ private final ImmutableOpenMap.Builder<String, IndexMetaData> indices;
+ private final ImmutableOpenMap.Builder<String, IndexTemplateMetaData> templates;
+ private final ImmutableOpenMap.Builder<String, Custom> customs;
+
+ public Builder() {
+ uuid = "_na_";
+ indices = ImmutableOpenMap.builder();
+ templates = ImmutableOpenMap.builder();
+ customs = ImmutableOpenMap.builder();
+ }
+
+ public Builder(MetaData metaData) {
+ this.uuid = metaData.uuid;
+ this.transientSettings = metaData.transientSettings;
+ this.persistentSettings = metaData.persistentSettings;
+ this.version = metaData.version;
+ this.indices = ImmutableOpenMap.builder(metaData.indices);
+ this.templates = ImmutableOpenMap.builder(metaData.templates);
+ this.customs = ImmutableOpenMap.builder(metaData.customs);
+ }
+
+ public Builder put(IndexMetaData.Builder indexMetaDataBuilder) {
+ // we know its a new one, increment the version and store
+ indexMetaDataBuilder.version(indexMetaDataBuilder.version() + 1);
+ IndexMetaData indexMetaData = indexMetaDataBuilder.build();
+ indices.put(indexMetaData.index(), indexMetaData);
+ return this;
+ }
+
+ public Builder put(IndexMetaData indexMetaData, boolean incrementVersion) {
+ if (indices.get(indexMetaData.index()) == indexMetaData) {
+ return this;
+ }
+ // if we put a new index metadata, increment its version
+ if (incrementVersion) {
+ indexMetaData = IndexMetaData.builder(indexMetaData).version(indexMetaData.version() + 1).build();
+ }
+ indices.put(indexMetaData.index(), indexMetaData);
+ return this;
+ }
+
+ public IndexMetaData get(String index) {
+ return indices.get(index);
+ }
+
+ public Builder remove(String index) {
+ indices.remove(index);
+ return this;
+ }
+
+ public Builder removeAllIndices() {
+ indices.clear();
+ return this;
+ }
+
+ public Builder put(IndexTemplateMetaData.Builder template) {
+ return put(template.build());
+ }
+
+ public Builder put(IndexTemplateMetaData template) {
+ templates.put(template.name(), template);
+ return this;
+ }
+
+ public Builder removeTemplate(String templateName) {
+ templates.remove(templateName);
+ return this;
+ }
+
+ public Custom getCustom(String type) {
+ return customs.get(type);
+ }
+
+ public Builder putCustom(String type, Custom custom) {
+ customs.put(type, custom);
+ return this;
+ }
+
+ public Builder removeCustom(String type) {
+ customs.remove(type);
+ return this;
+ }
+
+ public Builder updateSettings(Settings settings, String... indices) {
+ if (indices == null || indices.length == 0) {
+ indices = this.indices.keys().toArray(String.class);
+ }
+ for (String index : indices) {
+ IndexMetaData indexMetaData = this.indices.get(index);
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ put(IndexMetaData.builder(indexMetaData)
+ .settings(settingsBuilder().put(indexMetaData.settings()).put(settings)));
+ }
+ return this;
+ }
+
+ public Builder updateNumberOfReplicas(int numberOfReplicas, String... indices) {
+ if (indices == null || indices.length == 0) {
+ indices = this.indices.keys().toArray(String.class);
+ }
+ for (String index : indices) {
+ IndexMetaData indexMetaData = this.indices.get(index);
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ put(IndexMetaData.builder(indexMetaData).numberOfReplicas(numberOfReplicas));
+ }
+ return this;
+ }
+
+ public Settings transientSettings() {
+ return this.transientSettings;
+ }
+
+ public Builder transientSettings(Settings settings) {
+ this.transientSettings = settings;
+ return this;
+ }
+
+ public Settings persistentSettings() {
+ return this.persistentSettings;
+ }
+
+ public Builder persistentSettings(Settings settings) {
+ this.persistentSettings = settings;
+ return this;
+ }
+
+ public Builder version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ public Builder generateUuidIfNeeded() {
+ if (uuid.equals("_na_")) {
+ uuid = Strings.randomBase64UUID();
+ }
+ return this;
+ }
+
+ public MetaData build() {
+ return new MetaData(uuid, version, transientSettings, persistentSettings, indices.build(), templates.build(), customs.build());
+ }
+
+ public static String toXContent(MetaData metaData) throws IOException {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject();
+ toXContent(metaData, builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ return builder.string();
+ }
+
+ public static void toXContent(MetaData metaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ boolean globalPersistentOnly = params.paramAsBoolean(GLOBAL_PERSISTENT_ONLY_PARAM, false);
+ builder.startObject("meta-data");
+
+ builder.field("version", metaData.version());
+ builder.field("uuid", metaData.uuid);
+
+ if (!metaData.persistentSettings().getAsMap().isEmpty()) {
+ builder.startObject("settings");
+ for (Map.Entry<String, String> entry : metaData.persistentSettings().getAsMap().entrySet()) {
+ builder.field(entry.getKey(), entry.getValue());
+ }
+ builder.endObject();
+ }
+
+ if (!globalPersistentOnly && !metaData.transientSettings().getAsMap().isEmpty()) {
+ builder.startObject("transient_settings");
+ for (Map.Entry<String, String> entry : metaData.transientSettings().getAsMap().entrySet()) {
+ builder.field(entry.getKey(), entry.getValue());
+ }
+ builder.endObject();
+ }
+
+ builder.startObject("templates");
+ for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) {
+ IndexTemplateMetaData.Builder.toXContent(cursor.value, builder, params);
+ }
+ builder.endObject();
+
+ if (!globalPersistentOnly && !metaData.indices().isEmpty()) {
+ builder.startObject("indices");
+ for (IndexMetaData indexMetaData : metaData) {
+ IndexMetaData.Builder.toXContent(indexMetaData, builder, params);
+ }
+ builder.endObject();
+ }
+
+ for (ObjectObjectCursor<String, Custom> cursor : metaData.customs()) {
+ Custom.Factory factory = lookupFactorySafe(cursor.key);
+ if (!globalPersistentOnly || factory.isPersistent()) {
+ builder.startObject(cursor.key);
+ factory.toXContent(cursor.value, builder, params);
+ builder.endObject();
+ }
+ }
+
+ builder.endObject();
+ }
+
+ public static MetaData fromXContent(XContentParser parser) throws IOException {
+ Builder builder = new Builder();
+
+ // we might get here after the meta-data element, or on a fresh parser
+ XContentParser.Token token = parser.currentToken();
+ String currentFieldName = parser.currentName();
+ if (!"meta-data".equals(currentFieldName)) {
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ // move to the field name (meta-data)
+ token = parser.nextToken();
+ // move to the next object
+ token = parser.nextToken();
+ }
+ currentFieldName = parser.currentName();
+ if (token == null) {
+ // no data...
+ return builder.build();
+ }
+ }
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("settings".equals(currentFieldName)) {
+ builder.persistentSettings(ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build());
+ } else if ("indices".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ builder.put(IndexMetaData.Builder.fromXContent(parser), false);
+ }
+ } else if ("templates".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ builder.put(IndexTemplateMetaData.Builder.fromXContent(parser));
+ }
+ } else {
+ // check if its a custom index metadata
+ Custom.Factory<Custom> factory = lookupFactory(currentFieldName);
+ if (factory == null) {
+ //TODO warn
+ parser.skipChildren();
+ } else {
+ builder.putCustom(factory.type(), factory.fromXContent(parser));
+ }
+ }
+ } else if (token.isValue()) {
+ if ("version".equals(currentFieldName)) {
+ builder.version = parser.longValue();
+ } else if ("uuid".equals(currentFieldName)) {
+ builder.uuid = parser.text();
+ }
+ }
+ }
+ return builder.build();
+ }
+
+ public static MetaData readFrom(StreamInput in) throws IOException {
+ Builder builder = new Builder();
+ builder.version = in.readLong();
+ builder.uuid = in.readString();
+ builder.transientSettings(readSettingsFromStream(in));
+ builder.persistentSettings(readSettingsFromStream(in));
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ builder.put(IndexMetaData.Builder.readFrom(in), false);
+ }
+ size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ builder.put(IndexTemplateMetaData.Builder.readFrom(in));
+ }
+ int customSize = in.readVInt();
+ for (int i = 0; i < customSize; i++) {
+ String type = in.readString();
+ Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in);
+ builder.putCustom(type, customIndexMetaData);
+ }
+ return builder.build();
+ }
+
+ public static void writeTo(MetaData metaData, StreamOutput out) throws IOException {
+ out.writeLong(metaData.version);
+ out.writeString(metaData.uuid);
+ writeSettingsToStream(metaData.transientSettings(), out);
+ writeSettingsToStream(metaData.persistentSettings(), out);
+ out.writeVInt(metaData.indices.size());
+ for (IndexMetaData indexMetaData : metaData) {
+ IndexMetaData.Builder.writeTo(indexMetaData, out);
+ }
+ out.writeVInt(metaData.templates.size());
+ for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates.values()) {
+ IndexTemplateMetaData.Builder.writeTo(cursor.value, out);
+ }
+ out.writeVInt(metaData.customs().size());
+ for (ObjectObjectCursor<String, Custom> cursor : metaData.customs()) {
+ out.writeString(cursor.key);
+ lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
new file mode 100644
index 0000000..812bdb2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
@@ -0,0 +1,467 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.base.Charsets;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.lucene.util.CollectionUtil;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
+import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.InvalidIndexNameException;
+import org.elasticsearch.river.RiverIndexName;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStreamReader;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ * Service responsible for submitting create index requests
+ */
+public class MetaDataCreateIndexService extends AbstractComponent {
+
+ private final Environment environment;
+ private final ThreadPool threadPool;
+ private final ClusterService clusterService;
+ private final IndicesService indicesService;
+ private final AllocationService allocationService;
+ private final MetaDataService metaDataService;
+ private final Version version;
+ private final String riverIndexName;
+
+ @Inject
+ public MetaDataCreateIndexService(Settings settings, Environment environment, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService,
+ AllocationService allocationService, MetaDataService metaDataService, Version version, @RiverIndexName String riverIndexName) {
+ super(settings);
+ this.environment = environment;
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.indicesService = indicesService;
+ this.allocationService = allocationService;
+ this.metaDataService = metaDataService;
+ this.version = version;
+ this.riverIndexName = riverIndexName;
+ }
+
+ public void createIndex(final CreateIndexClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
+ ImmutableSettings.Builder updatedSettingsBuilder = ImmutableSettings.settingsBuilder();
+ for (Map.Entry<String, String> entry : request.settings().getAsMap().entrySet()) {
+ if (!entry.getKey().startsWith("index.")) {
+ updatedSettingsBuilder.put("index." + entry.getKey(), entry.getValue());
+ } else {
+ updatedSettingsBuilder.put(entry.getKey(), entry.getValue());
+ }
+ }
+ request.settings(updatedSettingsBuilder.build());
+
+ // we lock here, and not within the cluster service callback since we don't want to
+ // block the whole cluster state handling
+ final Semaphore mdLock = metaDataService.indexMetaDataLock(request.index());
+
+ // quick check to see if we can acquire a lock, otherwise spawn to a thread pool
+ if (mdLock.tryAcquire()) {
+ createIndex(request, listener, mdLock);
+ return;
+ }
+
+ threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ if (!mdLock.tryAcquire(request.masterNodeTimeout().nanos(), TimeUnit.NANOSECONDS)) {
+ listener.onFailure(new ProcessClusterEventTimeoutException(request.masterNodeTimeout(), "acquire index lock"));
+ return;
+ }
+ } catch (InterruptedException e) {
+ Thread.interrupted();
+ listener.onFailure(e);
+ return;
+ }
+
+ createIndex(request, listener, mdLock);
+ }
+ });
+ }
+
+ public void validateIndexName(String index, ClusterState state) throws ElasticsearchException {
+ if (state.routingTable().hasIndex(index)) {
+ throw new IndexAlreadyExistsException(new Index(index));
+ }
+ if (state.metaData().hasIndex(index)) {
+ throw new IndexAlreadyExistsException(new Index(index));
+ }
+ if (!Strings.validFileName(index)) {
+ throw new InvalidIndexNameException(new Index(index), index, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
+ }
+ if (index.contains("#")) {
+ throw new InvalidIndexNameException(new Index(index), index, "must not contain '#'");
+ }
+ if (!index.equals(riverIndexName) && index.charAt(0) == '_') {
+ throw new InvalidIndexNameException(new Index(index), index, "must not start with '_'");
+ }
+ if (!index.toLowerCase(Locale.ROOT).equals(index)) {
+ throw new InvalidIndexNameException(new Index(index), index, "must be lowercase");
+ }
+ if (state.metaData().aliases().containsKey(index)) {
+ throw new InvalidIndexNameException(new Index(index), index, "already exists as alias");
+ }
+ }
+
+ private void createIndex(final CreateIndexClusterStateUpdateRequest request, final ClusterStateUpdateListener listener, final Semaphore mdLock) {
+ clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", Priority.URGENT, new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ mdLock.release();
+ listener.onResponse(new ClusterStateUpdateResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ mdLock.release();
+ listener.onResponse(new ClusterStateUpdateResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.ackTimeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ mdLock.release();
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ boolean indexCreated = false;
+ String failureReason = null;
+ try {
+ validate(request, currentState);
+
+ // we only find a template when its an API call (a new index)
+ // find templates, highest order are better matching
+ List<IndexTemplateMetaData> templates = findTemplates(request, currentState);
+
+ Map<String, Custom> customs = Maps.newHashMap();
+
+ // add the request mapping
+ Map<String, Map<String, Object>> mappings = Maps.newHashMap();
+
+ for (Map.Entry<String, String> entry : request.mappings().entrySet()) {
+ mappings.put(entry.getKey(), parseMapping(entry.getValue()));
+ }
+
+ for (Map.Entry<String, Custom> entry : request.customs().entrySet()) {
+ customs.put(entry.getKey(), entry.getValue());
+ }
+
+ // apply templates, merging the mappings into the request mapping if exists
+ for (IndexTemplateMetaData template : templates) {
+ for (ObjectObjectCursor<String, CompressedString> cursor : template.mappings()) {
+ if (mappings.containsKey(cursor.key)) {
+ XContentHelper.mergeDefaults(mappings.get(cursor.key), parseMapping(cursor.value.string()));
+ } else {
+ mappings.put(cursor.key, parseMapping(cursor.value.string()));
+ }
+ }
+ // handle custom
+ for (ObjectObjectCursor<String, Custom> cursor : template.customs()) {
+ String type = cursor.key;
+ IndexMetaData.Custom custom = cursor.value;
+ IndexMetaData.Custom existing = customs.get(type);
+ if (existing == null) {
+ customs.put(type, custom);
+ } else {
+ IndexMetaData.Custom merged = IndexMetaData.lookupFactorySafe(type).merge(existing, custom);
+ customs.put(type, merged);
+ }
+ }
+ }
+
+ // now add config level mappings
+ File mappingsDir = new File(environment.configFile(), "mappings");
+ if (mappingsDir.exists() && mappingsDir.isDirectory()) {
+ // first index level
+ File indexMappingsDir = new File(mappingsDir, request.index());
+ if (indexMappingsDir.exists() && indexMappingsDir.isDirectory()) {
+ addMappings(mappings, indexMappingsDir);
+ }
+
+ // second is the _default mapping
+ File defaultMappingsDir = new File(mappingsDir, "_default");
+ if (defaultMappingsDir.exists() && defaultMappingsDir.isDirectory()) {
+ addMappings(mappings, defaultMappingsDir);
+ }
+ }
+
+ ImmutableSettings.Builder indexSettingsBuilder = settingsBuilder();
+ // apply templates, here, in reverse order, since first ones are better matching
+ for (int i = templates.size() - 1; i >= 0; i--) {
+ indexSettingsBuilder.put(templates.get(i).settings());
+ }
+ // now, put the request settings, so they override templates
+ indexSettingsBuilder.put(request.settings());
+
+ if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) {
+ if (request.index().equals(riverIndexName)) {
+ indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 1));
+ } else {
+ indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5));
+ }
+ }
+ if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {
+ if (request.index().equals(riverIndexName)) {
+ indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
+ } else {
+ indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
+ }
+ }
+
+ if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) {
+ indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS));
+ }
+
+ if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
+ indexSettingsBuilder.put(SETTING_VERSION_CREATED, version);
+ }
+ indexSettingsBuilder.put(SETTING_UUID, Strings.randomBase64UUID());
+
+ Settings actualIndexSettings = indexSettingsBuilder.build();
+
+ // Set up everything, now locally create the index to see that things are ok, and apply
+
+ // create the index here (on the master) to validate it can be created, as well as adding the mapping
+ indicesService.createIndex(request.index(), actualIndexSettings, clusterService.localNode().id());
+ indexCreated = true;
+ // now add the mappings
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ MapperService mapperService = indexService.mapperService();
+ // first, add the default mapping
+ if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
+ try {
+ mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), false);
+ } catch (Exception e) {
+ failureReason = "failed on parsing default mapping on index creation";
+ throw new MapperParsingException("mapping [" + MapperService.DEFAULT_MAPPING + "]", e);
+ }
+ }
+ for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) {
+ if (entry.getKey().equals(MapperService.DEFAULT_MAPPING)) {
+ continue;
+ }
+ try {
+ // apply the default here, its the first time we parse it
+ mapperService.merge(entry.getKey(), new CompressedString(XContentFactory.jsonBuilder().map(entry.getValue()).string()), true);
+ } catch (Exception e) {
+ failureReason = "failed on parsing mappings on index creation";
+ throw new MapperParsingException("mapping [" + entry.getKey() + "]", e);
+ }
+ }
+ // now, update the mappings with the actual source
+ Map<String, MappingMetaData> mappingsMetaData = Maps.newHashMap();
+ for (DocumentMapper mapper : mapperService) {
+ MappingMetaData mappingMd = new MappingMetaData(mapper);
+ mappingsMetaData.put(mapper.type(), mappingMd);
+ }
+
+ final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()).settings(actualIndexSettings);
+ for (MappingMetaData mappingMd : mappingsMetaData.values()) {
+ indexMetaDataBuilder.putMapping(mappingMd);
+ }
+ for (Map.Entry<String, Custom> customEntry : customs.entrySet()) {
+ indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue());
+ }
+ indexMetaDataBuilder.state(request.state());
+ final IndexMetaData indexMetaData;
+ try {
+ indexMetaData = indexMetaDataBuilder.build();
+ } catch (Exception e) {
+ failureReason = "failed to build index metadata";
+ throw e;
+ }
+
+ MetaData newMetaData = MetaData.builder(currentState.metaData())
+ .put(indexMetaData, false)
+ .build();
+
+ logger.info("[{}] creating index, cause [{}], shards [{}]/[{}], mappings {}", request.index(), request.cause(), indexMetaData.numberOfShards(), indexMetaData.numberOfReplicas(), mappings.keySet());
+
+ ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
+ if (!request.blocks().isEmpty()) {
+ for (ClusterBlock block : request.blocks()) {
+ blocks.addIndexBlock(request.index(), block);
+ }
+ }
+ if (request.state() == State.CLOSE) {
+ blocks.addIndexBlock(request.index(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
+ }
+
+ ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metaData(newMetaData).build();
+
+ if (request.state() == State.OPEN) {
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable())
+ .addAsNew(updatedState.metaData().index(request.index()));
+ RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder).build());
+ updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
+ }
+ return updatedState;
+ } finally {
+ if (indexCreated) {
+ // Index was already partially created - need to clean up
+ indicesService.removeIndex(request.index(), failureReason != null ? failureReason : "failed to create index");
+ }
+ }
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+ });
+ }
+
+ private Map<String, Object> parseMapping(String mappingSource) throws Exception {
+ return XContentFactory.xContent(mappingSource).createParser(mappingSource).mapAndClose();
+ }
+
+ private void addMappings(Map<String, Map<String, Object>> mappings, File mappingsDir) {
+ File[] mappingsFiles = mappingsDir.listFiles();
+ for (File mappingFile : mappingsFiles) {
+ if (mappingFile.isHidden()) {
+ continue;
+ }
+ int lastDotIndex = mappingFile.getName().lastIndexOf('.');
+ String mappingType = lastDotIndex != -1 ? mappingFile.getName().substring(0, lastDotIndex) : mappingFile.getName();
+ try {
+ String mappingSource = Streams.copyToString(new InputStreamReader(new FileInputStream(mappingFile), Charsets.UTF_8));
+ if (mappings.containsKey(mappingType)) {
+ XContentHelper.mergeDefaults(mappings.get(mappingType), parseMapping(mappingSource));
+ } else {
+ mappings.put(mappingType, parseMapping(mappingSource));
+ }
+ } catch (Exception e) {
+ logger.warn("failed to read / parse mapping [" + mappingType + "] from location [" + mappingFile + "], ignoring...", e);
+ }
+ }
+ }
+
+ private List<IndexTemplateMetaData> findTemplates(CreateIndexClusterStateUpdateRequest request, ClusterState state) {
+ List<IndexTemplateMetaData> templates = Lists.newArrayList();
+ for (ObjectCursor<IndexTemplateMetaData> cursor : state.metaData().templates().values()) {
+ IndexTemplateMetaData template = cursor.value;
+ if (Regex.simpleMatch(template.template(), request.index())) {
+ templates.add(template);
+ }
+ }
+
+ // see if we have templates defined under config
+ File templatesDir = new File(environment.configFile(), "templates");
+ if (templatesDir.exists() && templatesDir.isDirectory()) {
+ File[] templatesFiles = templatesDir.listFiles();
+ if (templatesFiles != null) {
+ for (File templatesFile : templatesFiles) {
+ XContentParser parser = null;
+ try {
+ byte[] templatesData = Streams.copyToByteArray(templatesFile);
+ parser = XContentHelper.createParser(templatesData, 0, templatesData.length);
+ IndexTemplateMetaData template = IndexTemplateMetaData.Builder.fromXContent(parser);
+ if (Regex.simpleMatch(template.template(), request.index())) {
+ templates.add(template);
+ }
+ } catch (Exception e) {
+ logger.warn("[{}] failed to read template [{}] from config", e, request.index(), templatesFile.getAbsolutePath());
+ } finally {
+ IOUtils.closeWhileHandlingException(parser);
+ }
+ }
+ }
+ }
+
+ CollectionUtil.timSort(templates, new Comparator<IndexTemplateMetaData>() {
+ @Override
+ public int compare(IndexTemplateMetaData o1, IndexTemplateMetaData o2) {
+ return o2.order() - o1.order();
+ }
+ });
+ return templates;
+ }
+
+ private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) throws ElasticsearchException {
+ validateIndexName(request.index(), state);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java
new file mode 100644
index 0000000..cdeac11
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java
@@ -0,0 +1,257 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask;
+import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ *
+ */
+public class MetaDataDeleteIndexService extends AbstractComponent {
+
+ private final ThreadPool threadPool;
+
+ private final ClusterService clusterService;
+
+ private final AllocationService allocationService;
+
+ private final NodeIndexDeletedAction nodeIndexDeletedAction;
+
+ private final MetaDataService metaDataService;
+
+ @Inject
+ public MetaDataDeleteIndexService(Settings settings, ThreadPool threadPool, ClusterService clusterService, AllocationService allocationService,
+ NodeIndexDeletedAction nodeIndexDeletedAction, MetaDataService metaDataService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.allocationService = allocationService;
+ this.nodeIndexDeletedAction = nodeIndexDeletedAction;
+ this.metaDataService = metaDataService;
+ }
+
+ public void deleteIndex(final Request request, final Listener userListener) {
+ // we lock here, and not within the cluster service callback since we don't want to
+ // block the whole cluster state handling
+ final Semaphore mdLock = metaDataService.indexMetaDataLock(request.index);
+
+ // quick check to see if we can acquire a lock, otherwise spawn to a thread pool
+ if (mdLock.tryAcquire()) {
+ deleteIndex(request, userListener, mdLock);
+ return;
+ }
+
+ threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ if (!mdLock.tryAcquire(request.masterTimeout.nanos(), TimeUnit.NANOSECONDS)) {
+ userListener.onFailure(new ProcessClusterEventTimeoutException(request.masterTimeout, "acquire index lock"));
+ return;
+ }
+ } catch (InterruptedException e) {
+ userListener.onFailure(e);
+ return;
+ }
+
+ deleteIndex(request, userListener, mdLock);
+ }
+ });
+ }
+
+ private void deleteIndex(final Request request, final Listener userListener, Semaphore mdLock) {
+ final DeleteIndexListener listener = new DeleteIndexListener(mdLock, userListener);
+ clusterService.submitStateUpdateTask("delete-index [" + request.index + "]", Priority.URGENT, new TimeoutClusterStateUpdateTask() {
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterTimeout;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(final ClusterState currentState) {
+ if (!currentState.metaData().hasConcreteIndex(request.index)) {
+ throw new IndexMissingException(new Index(request.index));
+ }
+
+ logger.info("[{}] deleting index", request.index);
+
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
+ routingTableBuilder.remove(request.index);
+
+ MetaData newMetaData = MetaData.builder(currentState.metaData())
+ .remove(request.index)
+ .build();
+
+ RoutingAllocation.Result routingResult = allocationService.reroute(
+ ClusterState.builder(currentState).routingTable(routingTableBuilder).metaData(newMetaData).build());
+
+ ClusterBlocks blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeIndexBlocks(request.index).build();
+
+ // wait for events from all nodes that it has been removed from their respective metadata...
+ int count = currentState.nodes().size();
+ // add the notifications that the store was deleted from *data* nodes
+ count += currentState.nodes().dataNodes().size();
+ final AtomicInteger counter = new AtomicInteger(count);
+ // this listener will be notified once we get back a notification based on the cluster state change below.
+ final NodeIndexDeletedAction.Listener nodeIndexDeleteListener = new NodeIndexDeletedAction.Listener() {
+ @Override
+ public void onNodeIndexDeleted(String index, String nodeId) {
+ if (index.equals(request.index)) {
+ if (counter.decrementAndGet() == 0) {
+ listener.onResponse(new Response(true));
+ nodeIndexDeletedAction.remove(this);
+ }
+ }
+ }
+
+ @Override
+ public void onNodeIndexStoreDeleted(String index, String nodeId) {
+ if (index.equals(request.index)) {
+ if (counter.decrementAndGet() == 0) {
+ listener.onResponse(new Response(true));
+ nodeIndexDeletedAction.remove(this);
+ }
+ }
+ }
+ };
+ nodeIndexDeletedAction.add(nodeIndexDeleteListener);
+
+ listener.future = threadPool.schedule(request.timeout, ThreadPool.Names.SAME, new Runnable() {
+ @Override
+ public void run() {
+ listener.onResponse(new Response(false));
+ nodeIndexDeletedAction.remove(nodeIndexDeleteListener);
+ }
+ });
+
+ return ClusterState.builder(currentState).routingResult(routingResult).metaData(newMetaData).blocks(blocks).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+ });
+ }
+
+ class DeleteIndexListener implements Listener {
+
+ private final AtomicBoolean notified = new AtomicBoolean();
+ private final Semaphore mdLock;
+ private final Listener listener;
+ volatile ScheduledFuture<?> future;
+
+ private DeleteIndexListener(Semaphore mdLock, Listener listener) {
+ this.mdLock = mdLock;
+ this.listener = listener;
+ }
+
+ @Override
+ public void onResponse(final Response response) {
+ if (notified.compareAndSet(false, true)) {
+ mdLock.release();
+ if (future != null) {
+ future.cancel(false);
+ }
+ listener.onResponse(response);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ if (notified.compareAndSet(false, true)) {
+ mdLock.release();
+ if (future != null) {
+ future.cancel(false);
+ }
+ listener.onFailure(t);
+ }
+ }
+ }
+
+
+ public static interface Listener {
+
+ void onResponse(Response response);
+
+ void onFailure(Throwable t);
+ }
+
+ public static class Request {
+
+ final String index;
+
+ TimeValue timeout = TimeValue.timeValueSeconds(10);
+ TimeValue masterTimeout = MasterNodeOperationRequest.DEFAULT_MASTER_NODE_TIMEOUT;
+
+ public Request(String index) {
+ this.index = index;
+ }
+
+ public Request timeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return this;
+ }
+
+ public Request masterTimeout(TimeValue masterTimeout) {
+ this.masterTimeout = masterTimeout;
+ return this;
+ }
+ }
+
+ public static class Response {
+ private final boolean acknowledged;
+
+ public Response(boolean acknowledged) {
+ this.acknowledged = acknowledged;
+ }
+
+ public boolean acknowledged() {
+ return acknowledged;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
new file mode 100644
index 0000000..b8169ed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;
+import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.InvalidAliasNameException;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Service responsible for submitting add and remove aliases requests
+ */
+public class MetaDataIndexAliasesService extends AbstractComponent {
+
+ private final ClusterService clusterService;
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public MetaDataIndexAliasesService(Settings settings, ClusterService clusterService, IndicesService indicesService) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.indicesService = indicesService;
+ }
+
+ public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
+ clusterService.submitStateUpdateTask("index-aliases", Priority.URGENT, new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new ClusterStateUpdateResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new ClusterStateUpdateResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.ackTimeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(final ClusterState currentState) {
+ List<String> indicesToClose = Lists.newArrayList();
+ Map<String, IndexService> indices = Maps.newHashMap();
+ try {
+ for (AliasAction aliasAction : request.actions()) {
+ if (!Strings.hasText(aliasAction.alias()) || !Strings.hasText(aliasAction.index())) {
+ throw new ElasticsearchIllegalArgumentException("Index name and alias name are required");
+ }
+ if (!currentState.metaData().hasIndex(aliasAction.index())) {
+ throw new IndexMissingException(new Index(aliasAction.index()));
+ }
+ if (currentState.metaData().hasIndex(aliasAction.alias())) {
+ throw new InvalidAliasNameException(new Index(aliasAction.index()), aliasAction.alias(), "an index exists with the same name as the alias");
+ }
+ if (aliasAction.indexRouting() != null && aliasAction.indexRouting().indexOf(',') != -1) {
+ throw new ElasticsearchIllegalArgumentException("alias [" + aliasAction.alias() + "] has several routing values associated with it");
+ }
+ }
+
+ boolean changed = false;
+ MetaData.Builder builder = MetaData.builder(currentState.metaData());
+ for (AliasAction aliasAction : request.actions()) {
+ IndexMetaData indexMetaData = builder.get(aliasAction.index());
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(aliasAction.index()));
+ }
+ // TODO: not copy (putAll)
+ IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
+ if (aliasAction.actionType() == AliasAction.Type.ADD) {
+ String filter = aliasAction.filter();
+ if (Strings.hasLength(filter)) {
+ // parse the filter, in order to validate it
+ IndexService indexService = indices.get(indexMetaData.index());
+ if (indexService == null) {
+ indexService = indicesService.indexService(indexMetaData.index());
+ if (indexService == null) {
+ // temporarily create the index and add mappings so we have can parse the filter
+ try {
+ indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), clusterService.localNode().id());
+ if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
+ indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false);
+ }
+ for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
+ MappingMetaData mappingMetaData = cursor.value;
+ indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), false);
+ }
+ } catch (Exception e) {
+ logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.index());
+ continue;
+ }
+ indicesToClose.add(indexMetaData.index());
+ }
+ indices.put(indexMetaData.index(), indexService);
+ }
+
+ // now, parse the filter
+ IndexQueryParserService indexQueryParser = indexService.queryParserService();
+ try {
+ XContentParser parser = XContentFactory.xContent(filter).createParser(filter);
+ try {
+ indexQueryParser.parseInnerFilter(parser);
+ } finally {
+ parser.close();
+ }
+ } catch (Throwable e) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse filter for [" + aliasAction.alias() + "]", e);
+ }
+ }
+ AliasMetaData newAliasMd = AliasMetaData.newAliasMetaDataBuilder(
+ aliasAction.alias())
+ .filter(filter)
+ .indexRouting(aliasAction.indexRouting())
+ .searchRouting(aliasAction.searchRouting())
+ .build();
+ // Check if this alias already exists
+ AliasMetaData aliasMd = indexMetaData.aliases().get(aliasAction.alias());
+ if (aliasMd != null && aliasMd.equals(newAliasMd)) {
+ // It's the same alias - ignore it
+ continue;
+ }
+ indexMetaDataBuilder.putAlias(newAliasMd);
+ } else if (aliasAction.actionType() == AliasAction.Type.REMOVE) {
+ if (!indexMetaData.aliases().containsKey(aliasAction.alias())) {
+ // This alias doesn't exist - ignore
+ continue;
+ }
+ indexMetaDataBuilder.removerAlias(aliasAction.alias());
+ }
+ changed = true;
+ builder.put(indexMetaDataBuilder);
+ }
+
+ if (changed) {
+ ClusterState updatedState = ClusterState.builder(currentState).metaData(builder).build();
+ // even though changes happened, they resulted in 0 actual changes to metadata
+ // i.e. remove and add the same alias to the same index
+ if (!updatedState.metaData().aliases().equals(currentState.metaData().aliases())) {
+ return updatedState;
+ }
+ }
+ return currentState;
+ } finally {
+ for (String index : indicesToClose) {
+ indicesService.removeIndex(index, "created for alias processing");
+ }
+ }
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
new file mode 100644
index 0000000..fc7299d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
@@ -0,0 +1,247 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest;
+import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest;
+import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException;
+import org.elasticsearch.rest.RestStatus;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Service responsible for submitting open/close index requests
+ */
+public class MetaDataIndexStateService extends AbstractComponent {
+
+ public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE);
+
+ private final ClusterService clusterService;
+
+ private final AllocationService allocationService;
+
+ @Inject
+ public MetaDataIndexStateService(Settings settings, ClusterService clusterService, AllocationService allocationService) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.allocationService = allocationService;
+ }
+
+ public void closeIndex(final CloseIndexClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
+ if (request.indices() == null || request.indices().length == 0) {
+ throw new ElasticsearchIllegalArgumentException("Index name is required");
+ }
+
+ final String indicesAsString = Arrays.toString(request.indices());
+ clusterService.submitStateUpdateTask("close-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new ClusterStateUpdateResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new ClusterStateUpdateResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.ackTimeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ List<String> indicesToClose = new ArrayList<String>();
+ for (String index : request.indices()) {
+ IndexMetaData indexMetaData = currentState.metaData().index(index);
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+
+ if (indexMetaData.state() != IndexMetaData.State.CLOSE) {
+ IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index);
+ for (IndexShardRoutingTable shard : indexRoutingTable) {
+ if (!shard.primaryAllocatedPostApi()) {
+ throw new IndexPrimaryShardNotAllocatedException(new Index(index));
+ }
+ }
+ indicesToClose.add(index);
+ }
+ }
+
+ if (indicesToClose.isEmpty()) {
+ return currentState;
+ }
+
+ logger.info("closing indices [{}]", indicesAsString);
+
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
+ .blocks(currentState.blocks());
+ for (String index : indicesToClose) {
+ mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.CLOSE));
+ blocksBuilder.addIndexBlock(index, INDEX_CLOSED_BLOCK);
+ }
+
+ ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();
+
+ RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
+ for (String index : indicesToClose) {
+ rtBuilder.remove(index);
+ }
+
+ RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder).build());
+ //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
+ return ClusterState.builder(updatedState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+ });
+ }
+
+ public void openIndex(final OpenIndexClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
+ if (request.indices() == null || request.indices().length == 0) {
+ throw new ElasticsearchIllegalArgumentException("Index name is required");
+ }
+
+ final String indicesAsString = Arrays.toString(request.indices());
+ clusterService.submitStateUpdateTask("open-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new ClusterStateUpdateResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new ClusterStateUpdateResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.ackTimeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ List<String> indicesToOpen = new ArrayList<String>();
+ for (String index : request.indices()) {
+ IndexMetaData indexMetaData = currentState.metaData().index(index);
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ if (indexMetaData.state() != IndexMetaData.State.OPEN) {
+ indicesToOpen.add(index);
+ }
+ }
+
+ if (indicesToOpen.isEmpty()) {
+ return currentState;
+ }
+
+ logger.info("opening indices [{}]", indicesAsString);
+
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
+ .blocks(currentState.blocks());
+ for (String index : indicesToOpen) {
+ mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN));
+ blocksBuilder.removeIndexBlock(index, INDEX_CLOSED_BLOCK);
+ }
+
+ ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();
+
+ RoutingTable.Builder rtBuilder = RoutingTable.builder(updatedState.routingTable());
+ for (String index : indicesToOpen) {
+ rtBuilder.addAsRecovery(updatedState.metaData().index(index));
+ }
+
+ RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder).build());
+ //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
+ return ClusterState.builder(updatedState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+ });
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
new file mode 100644
index 0000000..b49bc27
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.metadata;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
+import org.elasticsearch.indices.IndexTemplateMissingException;
+import org.elasticsearch.indices.InvalidIndexTemplateException;
+
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public class MetaDataIndexTemplateService extends AbstractComponent {
+
+ private final ClusterService clusterService;
+
+ @Inject
+ public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService) {
+ super(settings);
+ this.clusterService = clusterService;
+ }
+
+ public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
+ clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", Priority.URGENT, new TimeoutClusterStateUpdateTask() {
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterTimeout;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ Set<String> templateNames = Sets.newHashSet();
+ for (ObjectCursor<String> cursor : currentState.metaData().templates().keys()) {
+ String templateName = cursor.value;
+ if (Regex.simpleMatch(request.name, templateName)) {
+ templateNames.add(templateName);
+ }
+ }
+ if (templateNames.isEmpty()) {
+ // if its a match all pattern, and no templates are found (we have none), don't
+ // fail with index missing...
+ if (Regex.isMatchAllPattern(request.name)) {
+ return currentState;
+ }
+ throw new IndexTemplateMissingException(request.name);
+ }
+ MetaData.Builder metaData = MetaData.builder(currentState.metaData());
+ for (String templateName : templateNames) {
+ metaData.removeTemplate(templateName);
+ }
+ return ClusterState.builder(currentState).metaData(metaData).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ listener.onResponse(new RemoveResponse(true));
+ }
+ });
+ }
+
+ public void putTemplate(final PutRequest request, final PutListener listener) {
+ ImmutableSettings.Builder updatedSettingsBuilder = ImmutableSettings.settingsBuilder();
+ for (Map.Entry<String, String> entry : request.settings.getAsMap().entrySet()) {
+ if (!entry.getKey().startsWith("index.")) {
+ updatedSettingsBuilder.put("index." + entry.getKey(), entry.getValue());
+ } else {
+ updatedSettingsBuilder.put(entry.getKey(), entry.getValue());
+ }
+ }
+ request.settings(updatedSettingsBuilder.build());
+
+ if (request.name == null) {
+ listener.onFailure(new ElasticsearchIllegalArgumentException("index_template must provide a name"));
+ return;
+ }
+ if (request.template == null) {
+ listener.onFailure(new ElasticsearchIllegalArgumentException("index_template must provide a template"));
+ return;
+ }
+
+ try {
+ validate(request);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ return;
+ }
+
+ IndexTemplateMetaData.Builder templateBuilder;
+ try {
+ templateBuilder = IndexTemplateMetaData.builder(request.name);
+ templateBuilder.order(request.order);
+ templateBuilder.template(request.template);
+ templateBuilder.settings(request.settings);
+ for (Map.Entry<String, String> entry : request.mappings.entrySet()) {
+ templateBuilder.putMapping(entry.getKey(), entry.getValue());
+ }
+ for (Map.Entry<String, IndexMetaData.Custom> entry : request.customs.entrySet()) {
+ templateBuilder.putCustom(entry.getKey(), entry.getValue());
+ }
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ return;
+ }
+ final IndexTemplateMetaData template = templateBuilder.build();
+
+ clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", Priority.URGENT, new TimeoutClusterStateUpdateTask() {
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterTimeout;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ if (request.create && currentState.metaData().templates().containsKey(request.name)) {
+ throw new IndexTemplateAlreadyExistsException(request.name);
+ }
+ MetaData.Builder builder = MetaData.builder(currentState.metaData()).put(template);
+
+ return ClusterState.builder(currentState).metaData(builder).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ listener.onResponse(new PutResponse(true, template));
+ }
+ });
+ }
+
+ private void validate(PutRequest request) throws ElasticsearchException {
+ if (request.name.contains(" ")) {
+ throw new InvalidIndexTemplateException(request.name, "name must not contain a space");
+ }
+ if (request.name.contains(",")) {
+ throw new InvalidIndexTemplateException(request.name, "name must not contain a ','");
+ }
+ if (request.name.contains("#")) {
+ throw new InvalidIndexTemplateException(request.name, "name must not contain a '#'");
+ }
+ if (request.name.startsWith("_")) {
+ throw new InvalidIndexTemplateException(request.name, "name must not start with '_'");
+ }
+ if (!request.name.toLowerCase(Locale.ROOT).equals(request.name)) {
+ throw new InvalidIndexTemplateException(request.name, "name must be lower cased");
+ }
+ if (request.template.contains(" ")) {
+ throw new InvalidIndexTemplateException(request.name, "template must not contain a space");
+ }
+ if (request.template.contains(",")) {
+ throw new InvalidIndexTemplateException(request.name, "template must not contain a ','");
+ }
+ if (request.template.contains("#")) {
+ throw new InvalidIndexTemplateException(request.name, "template must not contain a '#'");
+ }
+ if (request.template.startsWith("_")) {
+ throw new InvalidIndexTemplateException(request.name, "template must not start with '_'");
+ }
+ if (!Strings.validFileNameExcludingAstrix(request.template)) {
+ throw new InvalidIndexTemplateException(request.name, "template must not container the following characters " + Strings.INVALID_FILENAME_CHARS);
+ }
+ }
+
+ public static interface PutListener {
+
+ void onResponse(PutResponse response);
+
+ void onFailure(Throwable t);
+ }
+
+ public static class PutRequest {
+ final String name;
+ final String cause;
+ boolean create;
+ int order;
+ String template;
+ Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ Map<String, String> mappings = Maps.newHashMap();
+ Map<String, IndexMetaData.Custom> customs = Maps.newHashMap();
+
+ TimeValue masterTimeout = MasterNodeOperationRequest.DEFAULT_MASTER_NODE_TIMEOUT;
+
+ public PutRequest(String cause, String name) {
+ this.cause = cause;
+ this.name = name;
+ }
+
+ public PutRequest order(int order) {
+ this.order = order;
+ return this;
+ }
+
+ public PutRequest template(String template) {
+ this.template = template;
+ return this;
+ }
+
+ public PutRequest create(boolean create) {
+ this.create = create;
+ return this;
+ }
+
+ public PutRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public PutRequest mappings(Map<String, String> mappings) {
+ this.mappings.putAll(mappings);
+ return this;
+ }
+
+ public PutRequest customs(Map<String, IndexMetaData.Custom> customs) {
+ this.customs.putAll(customs);
+ return this;
+ }
+
+ public PutRequest putMapping(String mappingType, String mappingSource) {
+ mappings.put(mappingType, mappingSource);
+ return this;
+ }
+
+ public PutRequest masterTimeout(TimeValue masterTimeout) {
+ this.masterTimeout = masterTimeout;
+ return this;
+ }
+ }
+
+ public static class PutResponse {
+ private final boolean acknowledged;
+ private final IndexTemplateMetaData template;
+
+ public PutResponse(boolean acknowledged, IndexTemplateMetaData template) {
+ this.acknowledged = acknowledged;
+ this.template = template;
+ }
+
+ public boolean acknowledged() {
+ return acknowledged;
+ }
+
+ public IndexTemplateMetaData template() {
+ return template;
+ }
+ }
+
+ public static class RemoveRequest {
+ final String name;
+ TimeValue masterTimeout = MasterNodeOperationRequest.DEFAULT_MASTER_NODE_TIMEOUT;
+
+ public RemoveRequest(String name) {
+ this.name = name;
+ }
+
+ public RemoveRequest masterTimeout(TimeValue masterTimeout) {
+ this.masterTimeout = masterTimeout;
+ return this;
+ }
+ }
+
+ public static class RemoveResponse {
+ private final boolean acknowledged;
+
+ public RemoveResponse(boolean acknowledged) {
+ this.acknowledged = acknowledged;
+ }
+
+ public boolean acknowledged() {
+ return acknowledged;
+ }
+ }
+
+ public static interface RemoveListener {
+
+ void onResponse(RemoveResponse response);
+
+ void onFailure(Throwable t);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
new file mode 100644
index 0000000..414bc40
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -0,0 +1,637 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingClusterStateUpdateRequest;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
+import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MergeMappingException;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.InvalidTypeNameException;
+import org.elasticsearch.indices.TypeMissingException;
+import org.elasticsearch.percolator.PercolatorService;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.*;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+
+/**
+ * Service responsible for submitting mapping changes
+ */
+public class MetaDataMappingService extends AbstractComponent {
+
+ private final ThreadPool threadPool;
+ private final ClusterService clusterService;
+ private final IndicesService indicesService;
+
+ // the mutex protect all the refreshOrUpdate variables!
+ private final Object refreshOrUpdateMutex = new Object();
+ private final List<MappingTask> refreshOrUpdateQueue = new ArrayList<MappingTask>();
+ private long refreshOrUpdateInsertOrder;
+ private long refreshOrUpdateProcessedInsertOrder;
+
+ @Inject
+ public MetaDataMappingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.indicesService = indicesService;
+ }
+
+ static class MappingTask {
+ final String index;
+ final String indexUUID;
+
+ MappingTask(String index, final String indexUUID) {
+ this.index = index;
+ this.indexUUID = indexUUID;
+ }
+ }
+
+ static class RefreshTask extends MappingTask {
+ final String[] types;
+
+ RefreshTask(String index, final String indexUUID, String[] types) {
+ super(index, indexUUID);
+ this.types = types;
+ }
+ }
+
+ static class UpdateTask extends MappingTask {
+ final String type;
+ final CompressedString mappingSource;
+ final long order; // -1 for unknown
+ final String nodeId; // null fr unknown
+ final ClusterStateUpdateListener listener;
+
+ UpdateTask(String index, String indexUUID, String type, CompressedString mappingSource, long order, String nodeId, ClusterStateUpdateListener listener) {
+ super(index, indexUUID);
+ this.type = type;
+ this.mappingSource = mappingSource;
+ this.order = order;
+ this.nodeId = nodeId;
+ this.listener = listener;
+ }
+ }
+
+ /**
+ * Batch method to apply all the queued refresh or update operations. The idea is to try and batch as much
+ * as possible so we won't create the same index all the time for example for the updates on the same mapping
+ * and generate a single cluster change event out of all of those.
+ */
+ ClusterState executeRefreshOrUpdate(final ClusterState currentState, final long insertionOrder) throws Exception {
+ final List<MappingTask> allTasks = new ArrayList<MappingTask>();
+
+ synchronized (refreshOrUpdateMutex) {
+ if (refreshOrUpdateQueue.isEmpty()) {
+ return currentState;
+ }
+
+ // we already processed this task in a bulk manner in a previous cluster event, simply ignore
+ // it so we will let other tasks get in and processed ones, we will handle the queued ones
+ // later on in a subsequent cluster state event
+ if (insertionOrder < refreshOrUpdateProcessedInsertOrder) {
+ return currentState;
+ }
+
+ allTasks.addAll(refreshOrUpdateQueue);
+ refreshOrUpdateQueue.clear();
+
+ refreshOrUpdateProcessedInsertOrder = refreshOrUpdateInsertOrder;
+ }
+
+ if (allTasks.isEmpty()) {
+ return currentState;
+ }
+
+ // break down to tasks per index, so we can optimize the on demand index service creation
+ // to only happen for the duration of a single index processing of its respective events
+ Map<String, List<MappingTask>> tasksPerIndex = Maps.newHashMap();
+ for (MappingTask task : allTasks) {
+ if (task.index == null) {
+ logger.debug("ignoring a mapping task of type [{}] with a null index.", task);
+ }
+ List<MappingTask> indexTasks = tasksPerIndex.get(task.index);
+ if (indexTasks == null) {
+ indexTasks = new ArrayList<MappingTask>();
+ tasksPerIndex.put(task.index, indexTasks);
+ }
+ indexTasks.add(task);
+ }
+
+ boolean dirty = false;
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+
+ for (Map.Entry<String, List<MappingTask>> entry : tasksPerIndex.entrySet()) {
+ String index = entry.getKey();
+ IndexMetaData indexMetaData = mdBuilder.get(index);
+ if (indexMetaData == null) {
+ // index got deleted on us, ignore...
+ logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index);
+ continue;
+ }
+ // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
+ // the latest (based on order) update mapping one per node
+ List<MappingTask> allIndexTasks = entry.getValue();
+ List<MappingTask> tasks = new ArrayList<MappingTask>();
+ for (MappingTask task : allIndexTasks) {
+ if (!indexMetaData.isSameUUID(task.indexUUID)) {
+ logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
+ continue;
+ }
+ boolean add = true;
+ // if its an update task, make sure we only process the latest ordered one per node
+ if (task instanceof UpdateTask) {
+ UpdateTask uTask = (UpdateTask) task;
+ // we can only do something to compare if we have the order && node
+ if (uTask.order != -1 && uTask.nodeId != null) {
+ for (int i = 0; i < tasks.size(); i++) {
+ MappingTask existing = tasks.get(i);
+ if (existing instanceof UpdateTask) {
+ UpdateTask eTask = (UpdateTask) existing;
+ if (eTask.type.equals(uTask.type)) {
+ // if we have the order, and the node id, then we can compare, and replace if applicable
+ if (eTask.order != -1 && eTask.nodeId != null) {
+ if (eTask.nodeId.equals(uTask.nodeId) && uTask.order > eTask.order) {
+ // a newer update task, we can replace so we execute it one!
+ tasks.set(i, uTask);
+ add = false;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (add) {
+ tasks.add(task);
+ }
+ }
+
+ // construct the actual index if needed, and make sure the relevant mappings are there
+ boolean removeIndex = false;
+ IndexService indexService = indicesService.indexService(index);
+ if (indexService == null) {
+ // we need to create the index here, and add the current mapping to it, so we can merge
+ indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), currentState.nodes().localNode().id());
+ removeIndex = true;
+ Set<String> typesToIntroduce = Sets.newHashSet();
+ for (MappingTask task : tasks) {
+ if (task instanceof UpdateTask) {
+ typesToIntroduce.add(((UpdateTask) task).type);
+ } else if (task instanceof RefreshTask) {
+ Collections.addAll(typesToIntroduce, ((RefreshTask) task).types);
+ }
+ }
+ for (String type : typesToIntroduce) {
+ // only add the current relevant mapping (if exists)
+ if (indexMetaData.mappings().containsKey(type)) {
+ // don't apply the default mapping, it has been applied when the mapping was created
+ indexService.mapperService().merge(type, indexMetaData.mappings().get(type).source(), false);
+ }
+ }
+ }
+
+ IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
+ try {
+ boolean indexDirty = processIndexMappingTasks(tasks, indexService, builder);
+ if (indexDirty) {
+ mdBuilder.put(builder);
+ dirty = true;
+ }
+ } finally {
+ if (removeIndex) {
+ indicesService.removeIndex(index, "created for mapping processing");
+ }
+ }
+ }
+
+ // fork sending back updates, so we won't wait to send them back on the cluster state, there
+ // might be a few of those...
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ for (Object task : allTasks) {
+ if (task instanceof UpdateTask) {
+ UpdateTask uTask = (UpdateTask) task;
+ ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true);
+ uTask.listener.onResponse(response);
+ }
+ }
+ }
+ });
+
+ if (!dirty) {
+ return currentState;
+ }
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+
+ private boolean processIndexMappingTasks(List<MappingTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
+ boolean dirty = false;
+ String index = indexService.index().name();
+ // keep track of what we already refreshed, no need to refresh it again...
+ Set<String> processedRefreshes = Sets.newHashSet();
+ for (MappingTask task : tasks) {
+ if (task instanceof RefreshTask) {
+ RefreshTask refreshTask = (RefreshTask) task;
+ try {
+ List<String> updatedTypes = Lists.newArrayList();
+ for (String type : refreshTask.types) {
+ if (processedRefreshes.contains(type)) {
+ continue;
+ }
+ DocumentMapper mapper = indexService.mapperService().documentMapper(type);
+ if (mapper == null) {
+ continue;
+ }
+ if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
+ updatedTypes.add(type);
+ builder.putMapping(new MappingMetaData(mapper));
+ }
+ processedRefreshes.add(type);
+ }
+
+ if (updatedTypes.isEmpty()) {
+ continue;
+ }
+
+ logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
+ dirty = true;
+ } catch (Throwable t) {
+ logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
+ }
+ } else if (task instanceof UpdateTask) {
+ UpdateTask updateTask = (UpdateTask) task;
+ try {
+ String type = updateTask.type;
+ CompressedString mappingSource = updateTask.mappingSource;
+
+ MappingMetaData mappingMetaData = builder.mapping(type);
+ if (mappingMetaData != null && mappingMetaData.source().equals(mappingSource)) {
+ logger.debug("[{}] update_mapping [{}] ignoring mapping update task as its source is equal to ours", index, updateTask.type);
+ continue;
+ }
+
+ DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false);
+ processedRefreshes.add(type);
+
+ // if we end up with the same mapping as the original once, ignore
+ if (mappingMetaData != null && mappingMetaData.source().equals(updatedMapper.mappingSource())) {
+ logger.debug("[{}] update_mapping [{}] ignoring mapping update task as it results in the same source as what we have", index, updateTask.type);
+ continue;
+ }
+
+ // build the updated mapping source
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource());
+ } else if (logger.isInfoEnabled()) {
+ logger.info("[{}] update_mapping [{}] (dynamic)", index, type);
+ }
+
+ builder.putMapping(new MappingMetaData(updatedMapper));
+ dirty = true;
+ } catch (Throwable t) {
+ logger.warn("[{}] failed to update-mapping in cluster state, type [{}]", index, updateTask.type);
+ }
+ } else {
+ logger.warn("illegal state, got wrong mapping task type [{}]", task);
+ }
+ }
+ return dirty;
+ }
+
+ /**
+ * Refreshes mappings if they are not the same between original and parsed version
+ */
+ public void refreshMapping(final String index, final String indexUUID, final String... types) {
+ final long insertOrder;
+ synchronized (refreshOrUpdateMutex) {
+ insertOrder = ++refreshOrUpdateInsertOrder;
+ refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types));
+ }
+ clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ClusterStateUpdateTask() {
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("failure during [{}]", t, source);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ return executeRefreshOrUpdate(currentState, insertOrder);
+ }
+ });
+ }
+
+ public void updateMapping(final String index, final String indexUUID, final String type, final CompressedString mappingSource, final long order, final String nodeId, final ClusterStateUpdateListener listener) {
+ final long insertOrder;
+ synchronized (refreshOrUpdateMutex) {
+ insertOrder = ++refreshOrUpdateInsertOrder;
+ refreshOrUpdateQueue.add(new UpdateTask(index, indexUUID, type, mappingSource, order, nodeId, listener));
+ }
+ clusterService.submitStateUpdateTask("update-mapping [" + index + "][" + type + "] / node [" + nodeId + "], order [" + order + "]", Priority.HIGH, new ClusterStateUpdateTask() {
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(final ClusterState currentState) throws Exception {
+ return executeRefreshOrUpdate(currentState, insertOrder);
+ }
+ });
+ }
+
+ public void removeMapping(final DeleteMappingClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
+ clusterService.submitStateUpdateTask("remove-mapping [" + Arrays.toString(request.types()) + "]", Priority.HIGH, new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new ClusterStateUpdateResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new ClusterStateUpdateResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.ackTimeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ if (request.indices().length == 0) {
+ throw new IndexMissingException(new Index("_all"));
+ }
+
+ MetaData.Builder builder = MetaData.builder(currentState.metaData());
+ boolean changed = false;
+ String latestIndexWithout = null;
+ for (String indexName : request.indices()) {
+ IndexMetaData indexMetaData = currentState.metaData().index(indexName);
+ IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData);
+
+ if (indexMetaData != null) {
+ boolean isLatestIndexWithout = true;
+ for (String type : request.types()) {
+ if (indexMetaData.mappings().containsKey(type)) {
+ indexBuilder.removeMapping(type);
+ changed = true;
+ isLatestIndexWithout = false;
+ }
+ }
+ if (isLatestIndexWithout) {
+ latestIndexWithout = indexMetaData.index();
+ }
+
+ }
+ builder.put(indexBuilder);
+ }
+
+ if (!changed) {
+ throw new TypeMissingException(new Index(latestIndexWithout), request.types());
+ }
+
+ logger.info("[{}] remove_mapping [{}]", request.indices(), request.types());
+
+ return ClusterState.builder(currentState).metaData(builder).build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+ });
+ }
+
+ public void putMapping(final PutMappingClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
+
+ clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new ClusterStateUpdateResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new ClusterStateUpdateResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.ackTimeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(final ClusterState currentState) throws Exception {
+ List<String> indicesToClose = Lists.newArrayList();
+ try {
+ for (String index : request.indices()) {
+ if (!currentState.metaData().hasIndex(index)) {
+ throw new IndexMissingException(new Index(index));
+ }
+ }
+
+ // pre create indices here and add mappings to them so we can merge the mappings here if needed
+ for (String index : request.indices()) {
+ if (indicesService.hasIndex(index)) {
+ continue;
+ }
+ final IndexMetaData indexMetaData = currentState.metaData().index(index);
+ IndexService indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), clusterService.localNode().id());
+ indicesToClose.add(indexMetaData.index());
+ // make sure to add custom default mapping if exists
+ if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
+ indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false);
+ }
+ // only add the current relevant mapping (if exists)
+ if (indexMetaData.mappings().containsKey(request.type())) {
+ indexService.mapperService().merge(request.type(), indexMetaData.mappings().get(request.type()).source(), false);
+ }
+ }
+
+ Map<String, DocumentMapper> newMappers = newHashMap();
+ Map<String, DocumentMapper> existingMappers = newHashMap();
+ for (String index : request.indices()) {
+ IndexService indexService = indicesService.indexService(index);
+ if (indexService != null) {
+ // try and parse it (no need to add it here) so we can bail early in case of parsing exception
+ DocumentMapper newMapper;
+ DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
+ if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
+ // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
+ newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()), false);
+ } else {
+ newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()));
+ if (existingMapper != null) {
+ // first, simulate
+ DocumentMapper.MergeResult mergeResult = existingMapper.merge(newMapper, mergeFlags().simulate(true));
+ // if we have conflicts, and we are not supposed to ignore them, throw an exception
+ if (!request.ignoreConflicts() && mergeResult.hasConflicts()) {
+ throw new MergeMappingException(mergeResult.conflicts());
+ }
+ }
+ }
+
+ newMappers.put(index, newMapper);
+ if (existingMapper != null) {
+ existingMappers.put(index, existingMapper);
+ }
+
+ } else {
+ throw new IndexMissingException(new Index(index));
+ }
+ }
+
+ String mappingType = request.type();
+ if (mappingType == null) {
+ mappingType = newMappers.values().iterator().next().type();
+ } else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
+ throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
+ }
+ if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
+ throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
+ }
+
+ final Map<String, MappingMetaData> mappings = newHashMap();
+ for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
+ String index = entry.getKey();
+ // do the actual merge here on the master, and update the mapping source
+ DocumentMapper newMapper = entry.getValue();
+ IndexService indexService = indicesService.indexService(index);
+ CompressedString existingSource = null;
+ if (existingMappers.containsKey(entry.getKey())) {
+ existingSource = existingMappers.get(entry.getKey()).mappingSource();
+ }
+ DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false);
+ CompressedString updatedSource = mergedMapper.mappingSource();
+
+ if (existingSource != null) {
+ if (existingSource.equals(updatedSource)) {
+ // same source, no changes, ignore it
+ } else {
+ // use the merged mapping source
+ mappings.put(index, new MappingMetaData(mergedMapper));
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
+ } else if (logger.isInfoEnabled()) {
+ logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
+ }
+ }
+ } else {
+ mappings.put(index, new MappingMetaData(mergedMapper));
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
+ } else if (logger.isInfoEnabled()) {
+ logger.info("[{}] create_mapping [{}]", index, newMapper.type());
+ }
+ }
+ }
+
+ if (mappings.isEmpty()) {
+ // no changes, return
+ return currentState;
+ }
+
+ MetaData.Builder builder = MetaData.builder(currentState.metaData());
+ for (String indexName : request.indices()) {
+ IndexMetaData indexMetaData = currentState.metaData().index(indexName);
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(indexName));
+ }
+ MappingMetaData mappingMd = mappings.get(indexName);
+ if (mappingMd != null) {
+ builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
+ }
+ }
+
+ return ClusterState.builder(currentState).metaData(builder).build();
+ } finally {
+ for (String index : indicesToClose) {
+ indicesService.removeIndex(index, "created for mapping processing");
+ }
+ }
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java
new file mode 100644
index 0000000..27810d5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.concurrent.Semaphore;
+
+/**
+ */
+public class MetaDataService extends AbstractComponent {
+
+ private final Semaphore[] indexMdLocks;
+
+ @Inject
+ public MetaDataService(Settings settings) {
+ super(settings);
+ indexMdLocks = new Semaphore[500];
+ for (int i = 0; i < indexMdLocks.length; i++) {
+ indexMdLocks[i] = new Semaphore(1);
+ }
+ }
+
+ public Semaphore indexMetaDataLock(String index) {
+ return indexMdLocks[Math.abs(DjbHashFunction.DJB_HASH(index) % indexMdLocks.length)];
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
new file mode 100644
index 0000000..6fdcaa1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
@@ -0,0 +1,331 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateListener;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.settings.DynamicSettings;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.settings.IndexDynamicSettings;
+
+import java.util.*;
+
+/**
+ * Service responsible for submitting update index settings requests
+ */
+public class MetaDataUpdateSettingsService extends AbstractComponent implements ClusterStateListener {
+
+ private final ClusterService clusterService;
+
+ private final AllocationService allocationService;
+
+ private final DynamicSettings dynamicSettings;
+
+ @Inject
+ public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, @IndexDynamicSettings DynamicSettings dynamicSettings) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.clusterService.add(this);
+ this.allocationService = allocationService;
+ this.dynamicSettings = dynamicSettings;
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ // update an index with number of replicas based on data nodes if possible
+ if (!event.state().nodes().localNodeMaster()) {
+ return;
+ }
+
+ Map<Integer, List<String>> nrReplicasChanged = new HashMap<Integer, List<String>>();
+
+ // we need to do this each time in case it was changed by update settings
+ for (final IndexMetaData indexMetaData : event.state().metaData()) {
+ String autoExpandReplicas = indexMetaData.settings().get(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
+ if (autoExpandReplicas != null && Booleans.parseBoolean(autoExpandReplicas, true)) { // Booleans only work for false values, just as we want it here
+ try {
+ int min;
+ int max;
+ try {
+ min = Integer.parseInt(autoExpandReplicas.substring(0, autoExpandReplicas.indexOf('-')));
+ String sMax = autoExpandReplicas.substring(autoExpandReplicas.indexOf('-') + 1);
+ if (sMax.equals("all")) {
+ max = event.state().nodes().dataNodes().size() - 1;
+ } else {
+ max = Integer.parseInt(sMax);
+ }
+ } catch (Exception e) {
+ logger.warn("failed to set [{}], wrong format [{}]", e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, autoExpandReplicas);
+ continue;
+ }
+
+ int numberOfReplicas = event.state().nodes().dataNodes().size() - 1;
+ if (numberOfReplicas < min) {
+ numberOfReplicas = min;
+ } else if (numberOfReplicas > max) {
+ numberOfReplicas = max;
+ }
+
+ // same value, nothing to do there
+ if (numberOfReplicas == indexMetaData.numberOfReplicas()) {
+ continue;
+ }
+
+ if (numberOfReplicas >= min && numberOfReplicas <= max) {
+
+ if (!nrReplicasChanged.containsKey(numberOfReplicas)) {
+ nrReplicasChanged.put(numberOfReplicas, new ArrayList<String>());
+ }
+
+ nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.index());
+ }
+ } catch (Exception e) {
+ logger.warn("[{}] failed to parse auto expand replicas", e, indexMetaData.index());
+ }
+ }
+ }
+
+ if (nrReplicasChanged.size() > 0) {
+ for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) {
+ Settings settings = ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build();
+ final List<String> indices = nrReplicasChanged.get(fNumberOfReplicas);
+
+ UpdateSettingsClusterStateUpdateRequest updateRequest = new UpdateSettingsClusterStateUpdateRequest()
+ .indices(indices.toArray(new String[indices.size()])).settings(settings)
+ .ackTimeout(TimeValue.timeValueMillis(0)) //no need to wait for ack here
+ .masterNodeTimeout(TimeValue.timeValueMinutes(10));
+
+ updateSettings(updateRequest, new ClusterStateUpdateListener() {
+ @Override
+ public void onResponse(ClusterStateUpdateResponse response) {
+ for (String index : indices) {
+ logger.info("[{}] auto expanded replicas to [{}]", index, fNumberOfReplicas);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ for (String index : indices) {
+ logger.warn("[{}] fail to auto expand replicas to [{}]", index, fNumberOfReplicas);
+ }
+ }
+ });
+ }
+ }
+ }
+
+ public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
+ ImmutableSettings.Builder updatedSettingsBuilder = ImmutableSettings.settingsBuilder();
+ for (Map.Entry<String, String> entry : request.settings().getAsMap().entrySet()) {
+ if (entry.getKey().equals("index")) {
+ continue;
+ }
+ if (!entry.getKey().startsWith("index.")) {
+ updatedSettingsBuilder.put("index." + entry.getKey(), entry.getValue());
+ } else {
+ updatedSettingsBuilder.put(entry.getKey(), entry.getValue());
+ }
+ }
+ // never allow to change the number of shards
+ for (String key : updatedSettingsBuilder.internalMap().keySet()) {
+ if (key.equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) {
+ listener.onFailure(new ElasticsearchIllegalArgumentException("can't change the number of shards for an index"));
+ return;
+ }
+ }
+
+ final Settings closeSettings = updatedSettingsBuilder.build();
+
+ final Set<String> removedSettings = Sets.newHashSet();
+ final Set<String> errors = Sets.newHashSet();
+ for (Map.Entry<String, String> setting : updatedSettingsBuilder.internalMap().entrySet()) {
+ if (!dynamicSettings.hasDynamicSetting(setting.getKey())) {
+ removedSettings.add(setting.getKey());
+ } else {
+ String error = dynamicSettings.validateDynamicSetting(setting.getKey(), setting.getValue());
+ if (error != null) {
+ errors.add("[" + setting.getKey() + "] - " + error);
+ }
+ }
+ }
+
+ if (!errors.isEmpty()) {
+ listener.onFailure(new ElasticsearchIllegalArgumentException("can't process the settings: " + errors.toString()));
+ return;
+ }
+
+ if (!removedSettings.isEmpty()) {
+ for (String removedSetting : removedSettings) {
+ updatedSettingsBuilder.remove(removedSetting);
+ }
+ }
+ final Settings openSettings = updatedSettingsBuilder.build();
+
+ clusterService.submitStateUpdateTask("update-settings", Priority.URGENT, new AckedClusterStateUpdateTask() {
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new ClusterStateUpdateResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new ClusterStateUpdateResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.ackTimeout();
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ String[] actualIndices = currentState.metaData().concreteIndices(request.indices());
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
+ MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
+
+ // allow to change any settings to a close index, and only allow dynamic settings to be changed
+ // on an open index
+ Set<String> openIndices = Sets.newHashSet();
+ Set<String> closeIndices = Sets.newHashSet();
+ for (String index : actualIndices) {
+ if (currentState.metaData().index(index).state() == IndexMetaData.State.OPEN) {
+ openIndices.add(index);
+ } else {
+ closeIndices.add(index);
+ }
+ }
+
+ if (!removedSettings.isEmpty() && !openIndices.isEmpty()) {
+ throw new ElasticsearchIllegalArgumentException(String.format(Locale.ROOT,
+ "Can't update non dynamic settings[%s] for open indices[%s]",
+ removedSettings,
+ openIndices
+ ));
+ }
+
+ int updatedNumberOfReplicas = openSettings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, -1);
+ if (updatedNumberOfReplicas != -1) {
+ routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices);
+ metaDataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices);
+ logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices);
+ }
+
+ ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
+ Boolean updatedReadOnly = openSettings.getAsBoolean(IndexMetaData.SETTING_READ_ONLY, null);
+ if (updatedReadOnly != null) {
+ for (String index : actualIndices) {
+ if (updatedReadOnly) {
+ blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
+ } else {
+ blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
+ }
+ }
+ }
+ Boolean updateMetaDataBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, null);
+ if (updateMetaDataBlock != null) {
+ for (String index : actualIndices) {
+ if (updateMetaDataBlock) {
+ blocks.addIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
+ } else {
+ blocks.removeIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
+ }
+ }
+ }
+
+ Boolean updateWriteBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, null);
+ if (updateWriteBlock != null) {
+ for (String index : actualIndices) {
+ if (updateWriteBlock) {
+ blocks.addIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
+ } else {
+ blocks.removeIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
+ }
+ }
+ }
+
+ Boolean updateReadBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, null);
+ if (updateReadBlock != null) {
+ for (String index : actualIndices) {
+ if (updateReadBlock) {
+ blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
+ } else {
+ blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
+ }
+ }
+ }
+
+ if (!openIndices.isEmpty()) {
+ String[] indices = openIndices.toArray(new String[openIndices.size()]);
+ metaDataBuilder.updateSettings(openSettings, indices);
+ }
+
+ if (!closeIndices.isEmpty()) {
+ String[] indices = closeIndices.toArray(new String[closeIndices.size()]);
+ metaDataBuilder.updateSettings(closeSettings, indices);
+ }
+
+
+ ClusterState updatedState = ClusterState.builder(currentState).metaData(metaDataBuilder).routingTable(routingTableBuilder).blocks(blocks).build();
+
+ // now, reroute in case things change that require it (like number of replicas)
+ RoutingAllocation.Result routingResult = allocationService.reroute(updatedState);
+ updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
+
+ return updatedState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/ProcessClusterEventTimeoutException.java b/src/main/java/org/elasticsearch/cluster/metadata/ProcessClusterEventTimeoutException.java
new file mode 100644
index 0000000..38fc3f7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/ProcessClusterEventTimeoutException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ */
+public class ProcessClusterEventTimeoutException extends ElasticsearchException {
+
+ public ProcessClusterEventTimeoutException(TimeValue timeValue, String source) {
+ super("failed to process cluster event (" + source + ") within " + timeValue);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java
new file mode 100644
index 0000000..0ca2792
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.loader.SettingsLoader;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Contains metadata about registered snapshot repositories
+ */
+public class RepositoriesMetaData implements MetaData.Custom {
+
+ public static final String TYPE = "repositories";
+
+ public static final Factory FACTORY = new Factory();
+
+ private final ImmutableList<RepositoryMetaData> repositories;
+
+ /**
+ * Constructs new repository metadata
+ *
+ * @param repositories list of repositories
+ */
+ public RepositoriesMetaData(RepositoryMetaData... repositories) {
+ this.repositories = ImmutableList.copyOf(repositories);
+ }
+
+ /**
+ * Returns list of currently registered repositories
+ *
+ * @return list of repositories
+ */
+ public ImmutableList<RepositoryMetaData> repositories() {
+ return this.repositories;
+ }
+
+ /**
+ * Returns a repository with a given name or null if such repository doesn't exist
+ *
+ * @param name name of repository
+ * @return repository metadata
+ */
+ public RepositoryMetaData repository(String name) {
+ for (RepositoryMetaData repository : repositories) {
+ if (name.equals(repository.name())) {
+ return repository;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Repository metadata factory
+ */
+ public static class Factory implements MetaData.Custom.Factory<RepositoriesMetaData> {
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public RepositoriesMetaData readFrom(StreamInput in) throws IOException {
+ RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()];
+ for (int i = 0; i < repository.length; i++) {
+ repository[i] = RepositoryMetaData.readFrom(in);
+ }
+ return new RepositoriesMetaData(repository);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void writeTo(RepositoriesMetaData repositories, StreamOutput out) throws IOException {
+ out.writeVInt(repositories.repositories().size());
+ for (RepositoryMetaData repository : repositories.repositories()) {
+ repository.writeTo(out);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException {
+ XContentParser.Token token;
+ List<RepositoryMetaData> repository = new ArrayList<RepositoryMetaData>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String name = parser.currentName();
+ if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object");
+ }
+ String type = null;
+ Settings settings = ImmutableSettings.EMPTY;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String currentFieldName = parser.currentName();
+ if ("type".equals(currentFieldName)) {
+ if (parser.nextToken() != XContentParser.Token.VALUE_STRING) {
+ throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type");
+ }
+ type = parser.text();
+ } else if ("settings".equals(currentFieldName)) {
+ if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params");
+ }
+ settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build();
+ } else {
+ throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]");
+ }
+ } else {
+ throw new ElasticsearchParseException("failed to parse repository [" + name + "]");
+ }
+ }
+ if (type == null) {
+ throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type");
+ }
+ repository.add(new RepositoryMetaData(name, type, settings));
+ } else {
+ throw new ElasticsearchParseException("failed to parse repositories");
+ }
+ }
+ return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()]));
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void toXContent(RepositoriesMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ for (RepositoryMetaData repository : customIndexMetaData.repositories()) {
+ toXContent(repository, builder, params);
+ }
+ }
+
+ /**
+ * Serializes information about a single repository
+ *
+ * @param repository repository metadata
+ * @param builder XContent builder
+ * @param params serialization parameters
+ * @throws IOException
+ */
+ public void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("type", repository.type());
+ builder.startObject("settings");
+ for (Map.Entry<String, String> settingEntry : repository.settings().getAsMap().entrySet()) {
+ builder.field(settingEntry.getKey(), settingEntry.getValue());
+ }
+ builder.endObject();
+
+ builder.endObject();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public boolean isPersistent() {
+ return true;
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java
new file mode 100644
index 0000000..ea50b30
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+import java.io.IOException;
+
+/**
+ * Metadata about registered repository
+ */
+public class RepositoryMetaData {
+ private final String name;
+ private final String type;
+ private final Settings settings;
+
+ /**
+ * Constructs new repository metadata
+ *
+ * @param name repository name
+ * @param type repository type
+ * @param settings repository settings
+ */
+ public RepositoryMetaData(String name, String type, Settings settings) {
+ this.name = name;
+ this.type = type;
+ this.settings = settings;
+ }
+
+ /**
+ * Returns repository name
+ *
+ * @return repository name
+ */
+ public String name() {
+ return this.name;
+ }
+
+ /**
+ * Returns repository type
+ *
+ * @return repository type
+ */
+ public String type() {
+ return this.type;
+ }
+
+ /**
+ * Returns repository settings
+ *
+ * @return repository settings
+ */
+ public Settings settings() {
+ return this.settings;
+ }
+
+
+ /**
+ * Reads repository metadata from stream input
+ *
+ * @param in stream input
+ * @return repository metadata
+ * @throws IOException
+ */
+ public static RepositoryMetaData readFrom(StreamInput in) throws IOException {
+ String name = in.readString();
+ String type = in.readString();
+ Settings settings = ImmutableSettings.readSettingsFromStream(in);
+ return new RepositoryMetaData(name, type, settings);
+ }
+
+ /**
+ * Writes repository metadata to stream output
+ *
+ * @param out stream output
+ * @throws IOException
+ */
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeString(type);
+ ImmutableSettings.writeSettingsToStream(settings, out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java
new file mode 100644
index 0000000..bfcb91b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java
@@ -0,0 +1,527 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Meta data about restore processes that are currently executing
+ */
+public class RestoreMetaData implements MetaData.Custom {
+
+ public static final String TYPE = "restore";
+
+ public static final Factory FACTORY = new Factory();
+
+ private final ImmutableList<Entry> entries;
+
+ /**
+ * Constructs new restore metadata
+ *
+ * @param entries list of currently running restore processes
+ */
+ public RestoreMetaData(ImmutableList<Entry> entries) {
+ this.entries = entries;
+ }
+
+ /**
+ * Constructs new restore metadata
+ *
+ * @param entries list of currently running restore processes
+ */
+ public RestoreMetaData(Entry... entries) {
+ this.entries = ImmutableList.copyOf(entries);
+ }
+
+ /**
+ * Returns list of currently running restore processes
+ *
+ * @return list of currently running restore processes
+ */
+ public ImmutableList<Entry> entries() {
+ return this.entries;
+ }
+
+ /**
+ * Returns currently running restore process with corresponding snapshot id or null if this snapshot is not being
+ * restored
+ *
+ * @param snapshotId snapshot id
+ * @return restore metadata or null
+ */
+ public Entry snapshot(SnapshotId snapshotId) {
+ for (Entry entry : entries) {
+ if (snapshotId.equals(entry.snapshotId())) {
+ return entry;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ RestoreMetaData that = (RestoreMetaData) o;
+
+ if (!entries.equals(that.entries)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return entries.hashCode();
+ }
+
+ /**
+ * Restore metadata
+ */
+ public static class Entry {
+ private final State state;
+ private final SnapshotId snapshotId;
+ private final ImmutableMap<ShardId, ShardRestoreStatus> shards;
+ private final ImmutableList<String> indices;
+
+ /**
+ * Creates new restore metadata
+ *
+ * @param snapshotId snapshot id
+ * @param state current state of the restore process
+ * @param indices list of indices being restored
+ * @param shards list of shards being restored and thier current restore status
+ */
+ public Entry(SnapshotId snapshotId, State state, ImmutableList<String> indices, ImmutableMap<ShardId, ShardRestoreStatus> shards) {
+ this.snapshotId = snapshotId;
+ this.state = state;
+ this.indices = indices;
+ if (shards == null) {
+ this.shards = ImmutableMap.of();
+ } else {
+ this.shards = shards;
+ }
+ }
+
+ /**
+ * Returns snapshot id
+ *
+ * @return snapshot id
+ */
+ public SnapshotId snapshotId() {
+ return this.snapshotId;
+ }
+
+ /**
+ * Returns list of shards that being restore and their status
+ *
+ * @return list of shards
+ */
+ public ImmutableMap<ShardId, ShardRestoreStatus> shards() {
+ return this.shards;
+ }
+
+ /**
+ * Returns current restore state
+ *
+ * @return restore state
+ */
+ public State state() {
+ return state;
+ }
+
+ /**
+ * Returns list of indices
+ *
+ * @return list of indices
+ */
+ public ImmutableList<String> indices() {
+ return indices;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Entry entry = (Entry) o;
+
+ if (!indices.equals(entry.indices)) return false;
+ if (!snapshotId.equals(entry.snapshotId)) return false;
+ if (!shards.equals(entry.shards)) return false;
+ if (state != entry.state) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = state.hashCode();
+ result = 31 * result + snapshotId.hashCode();
+ result = 31 * result + shards.hashCode();
+ result = 31 * result + indices.hashCode();
+ return result;
+ }
+ }
+
+ /**
+ * Represents status of a restored shard
+ */
+ public static class ShardRestoreStatus {
+ private State state;
+ private String nodeId;
+ private String reason;
+
+ private ShardRestoreStatus() {
+ }
+
+ /**
+ * Constructs a new shard restore status in initializing state on the given node
+ *
+ * @param nodeId node id
+ */
+ public ShardRestoreStatus(String nodeId) {
+ this(nodeId, State.INIT);
+ }
+
+ /**
+ * Constructs a new shard restore status in with specified state on the given node
+ *
+ * @param nodeId node id
+ * @param state restore state
+ */
+ public ShardRestoreStatus(String nodeId, State state) {
+ this(nodeId, state, null);
+ }
+
+ /**
+ * Constructs a new shard restore status in with specified state on the given node with specified failure reason
+ *
+ * @param nodeId node id
+ * @param state restore state
+ * @param reason failure reason
+ */
+ public ShardRestoreStatus(String nodeId, State state, String reason) {
+ this.nodeId = nodeId;
+ this.state = state;
+ this.reason = reason;
+ }
+
+ /**
+ * Returns current state
+ *
+ * @return current state
+ */
+ public State state() {
+ return state;
+ }
+
+ /**
+ * Returns node id of the node where shared is getting restored
+ *
+ * @return node id
+ */
+ public String nodeId() {
+ return nodeId;
+ }
+
+ /**
+ * Returns failure reason
+ *
+ * @return failure reason
+ */
+ public String reason() {
+ return reason;
+ }
+
+ /**
+ * Reads restore status from stream input
+ *
+ * @param in stream input
+ * @return restore status
+ * @throws IOException
+ */
+ public static ShardRestoreStatus readShardRestoreStatus(StreamInput in) throws IOException {
+ ShardRestoreStatus shardSnapshotStatus = new ShardRestoreStatus();
+ shardSnapshotStatus.readFrom(in);
+ return shardSnapshotStatus;
+ }
+
+ /**
+ * Reads restore status from stream input
+ *
+ * @param in stream input
+ * @throws IOException
+ */
+ public void readFrom(StreamInput in) throws IOException {
+ nodeId = in.readOptionalString();
+ state = State.fromValue(in.readByte());
+ reason = in.readOptionalString();
+ }
+
+ /**
+ * Writes restore status to stream output
+ *
+ * @param out stream input
+ * @throws IOException
+ */
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeOptionalString(nodeId);
+ out.writeByte(state.value);
+ out.writeOptionalString(reason);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ShardRestoreStatus status = (ShardRestoreStatus) o;
+
+ if (nodeId != null ? !nodeId.equals(status.nodeId) : status.nodeId != null) return false;
+ if (reason != null ? !reason.equals(status.reason) : status.reason != null) return false;
+ if (state != status.state) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = state != null ? state.hashCode() : 0;
+ result = 31 * result + (nodeId != null ? nodeId.hashCode() : 0);
+ result = 31 * result + (reason != null ? reason.hashCode() : 0);
+ return result;
+ }
+ }
+
+ /**
+ * Shard restore process state
+ */
+ public static enum State {
+ /**
+ * Initializing state
+ */
+ INIT((byte) 0),
+ /**
+ * Started state
+ */
+ STARTED((byte) 1),
+ /**
+ * Restore finished successfully
+ */
+ SUCCESS((byte) 2),
+ /**
+ * Restore failed
+ */
+ FAILURE((byte) 3);
+
+ private byte value;
+
+ /**
+ * Constructs new state
+ *
+ * @param value state code
+ */
+ State(byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns state code
+ *
+ * @return state code
+ */
+ public byte value() {
+ return value;
+ }
+
+ /**
+ * Returns true if restore process completed (either successfully or with failure)
+ *
+ * @return true if restore process completed
+ */
+ public boolean completed() {
+ return this == SUCCESS || this == FAILURE;
+ }
+
+ /**
+ * Returns state corresponding to state code
+ *
+ * @param value stat code
+ * @return state
+ */
+ public static State fromValue(byte value) {
+ switch (value) {
+ case 0:
+ return INIT;
+ case 1:
+ return STARTED;
+ case 2:
+ return SUCCESS;
+ case 3:
+ return FAILURE;
+ default:
+ throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]");
+ }
+ }
+ }
+
+ /**
+ * Restore metadata factory
+ */
+ public static class Factory implements MetaData.Custom.Factory<RestoreMetaData> {
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public RestoreMetaData readFrom(StreamInput in) throws IOException {
+ Entry[] entries = new Entry[in.readVInt()];
+ for (int i = 0; i < entries.length; i++) {
+ SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
+ State state = State.fromValue(in.readByte());
+ int indices = in.readVInt();
+ ImmutableList.Builder<String> indexBuilder = ImmutableList.builder();
+ for (int j = 0; j < indices; j++) {
+ indexBuilder.add(in.readString());
+ }
+ ImmutableMap.Builder<ShardId, ShardRestoreStatus> builder = ImmutableMap.<ShardId, ShardRestoreStatus>builder();
+ int shards = in.readVInt();
+ for (int j = 0; j < shards; j++) {
+ ShardId shardId = ShardId.readShardId(in);
+ ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in);
+ builder.put(shardId, shardState);
+ }
+ entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build());
+ }
+ return new RestoreMetaData(entries);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void writeTo(RestoreMetaData repositories, StreamOutput out) throws IOException {
+ out.writeVInt(repositories.entries().size());
+ for (Entry entry : repositories.entries()) {
+ entry.snapshotId().writeTo(out);
+ out.writeByte(entry.state().value());
+ out.writeVInt(entry.indices().size());
+ for (String index : entry.indices()) {
+ out.writeString(index);
+ }
+ out.writeVInt(entry.shards().size());
+ for (Map.Entry<ShardId, ShardRestoreStatus> shardEntry : entry.shards().entrySet()) {
+ shardEntry.getKey().writeTo(out);
+ shardEntry.getValue().writeTo(out);
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public RestoreMetaData fromXContent(XContentParser parser) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void toXContent(RestoreMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startArray("snapshots");
+ for (Entry entry : customIndexMetaData.entries()) {
+ toXContent(entry, builder, params);
+ }
+ builder.endArray();
+ }
+
+ /**
+ * Serializes single restore operation
+ *
+ * @param entry restore operation metadata
+ * @param builder XContent builder
+ * @param params serialization parameters
+ * @throws IOException
+ */
+ public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ builder.field("snapshot", entry.snapshotId().getSnapshot());
+ builder.field("repository", entry.snapshotId().getRepository());
+ builder.field("state", entry.state());
+ builder.startArray("indices");
+ {
+ for (String index : entry.indices()) {
+ builder.value(index);
+ }
+ }
+ builder.endArray();
+ builder.startArray("shards");
+ {
+ for (Map.Entry<ShardId, ShardRestoreStatus> shardEntry : entry.shards.entrySet()) {
+ ShardId shardId = shardEntry.getKey();
+ ShardRestoreStatus status = shardEntry.getValue();
+ builder.startObject();
+ {
+ builder.field("index", shardId.getIndex());
+ builder.field("shard", shardId.getId());
+ builder.field("state", status.state());
+ }
+ builder.endObject();
+ }
+ }
+
+ builder.endArray();
+ builder.endObject();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public boolean isPersistent() {
+ return false;
+ }
+
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java
new file mode 100644
index 0000000..d8175c5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * Snapshot ID - repository name + snapshot name
+ */
+public class SnapshotId implements Serializable, Streamable {
+
+ private String repository;
+
+ private String snapshot;
+
+ // Caching hash code
+ private int hashCode;
+
+ private SnapshotId() {
+ }
+
+ /**
+ * Constructs new snapshot id
+ *
+ * @param repository repository name
+ * @param snapshot snapshot name
+ */
+ public SnapshotId(String repository, String snapshot) {
+ this.repository = repository;
+ this.snapshot = snapshot;
+ this.hashCode = computeHashCode();
+ }
+
+ /**
+ * Returns repository name
+ *
+ * @return repository name
+ */
+ public String getRepository() {
+ return repository;
+ }
+
+ /**
+ * Returns snapshot name
+ *
+ * @return snapshot name
+ */
+ public String getSnapshot() {
+ return snapshot;
+ }
+
+ @Override
+ public String toString() {
+ return repository + ":" + snapshot;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null) return false;
+ SnapshotId snapshotId = (SnapshotId) o;
+ return snapshot.equals(snapshotId.snapshot) && repository.equals(snapshotId.repository);
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ private int computeHashCode() {
+ int result = repository != null ? repository.hashCode() : 0;
+ result = 31 * result + snapshot.hashCode();
+ return result;
+ }
+
+ /**
+ * Reads snapshot id from stream input
+ *
+ * @param in stream input
+ * @return snapshot id
+ * @throws IOException
+ */
+ public static SnapshotId readSnapshotId(StreamInput in) throws IOException {
+ SnapshotId snapshot = new SnapshotId();
+ snapshot.readFrom(in);
+ return snapshot;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ repository = in.readString();
+ snapshot = in.readString();
+ hashCode = computeHashCode();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(repository);
+ out.writeString(snapshot);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java
new file mode 100644
index 0000000..7235c85
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java
@@ -0,0 +1,410 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Meta data about snapshots that are currently executing
+ */
+public class SnapshotMetaData implements MetaData.Custom {
+ public static final String TYPE = "snapshots";
+
+ public static final Factory FACTORY = new Factory();
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ SnapshotMetaData that = (SnapshotMetaData) o;
+
+ if (!entries.equals(that.entries)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return entries.hashCode();
+ }
+
+ public static class Entry {
+ private final State state;
+ private final SnapshotId snapshotId;
+ private final boolean includeGlobalState;
+ private final ImmutableMap<ShardId, ShardSnapshotStatus> shards;
+ private final ImmutableList<String> indices;
+
+ public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, ImmutableList<String> indices, ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
+ this.state = state;
+ this.snapshotId = snapshotId;
+ this.includeGlobalState = includeGlobalState;
+ this.indices = indices;
+ if (shards == null) {
+ this.shards = ImmutableMap.of();
+ } else {
+ this.shards = shards;
+ }
+ }
+
+ public SnapshotId snapshotId() {
+ return this.snapshotId;
+ }
+
+ public ImmutableMap<ShardId, ShardSnapshotStatus> shards() {
+ return this.shards;
+ }
+
+ public State state() {
+ return state;
+ }
+
+ public ImmutableList<String> indices() {
+ return indices;
+ }
+
+ public boolean includeGlobalState() {
+ return includeGlobalState;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Entry entry = (Entry) o;
+
+ if (includeGlobalState != entry.includeGlobalState) return false;
+ if (!indices.equals(entry.indices)) return false;
+ if (!shards.equals(entry.shards)) return false;
+ if (!snapshotId.equals(entry.snapshotId)) return false;
+ if (state != entry.state) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = state.hashCode();
+ result = 31 * result + snapshotId.hashCode();
+ result = 31 * result + (includeGlobalState ? 1 : 0);
+ result = 31 * result + shards.hashCode();
+ result = 31 * result + indices.hashCode();
+ return result;
+ }
+ }
+
+ public static class ShardSnapshotStatus {
+ private State state;
+ private String nodeId;
+ private String reason;
+
+ private ShardSnapshotStatus() {
+ }
+
+ public ShardSnapshotStatus(String nodeId) {
+ this(nodeId, State.INIT);
+ }
+
+ public ShardSnapshotStatus(String nodeId, State state) {
+ this(nodeId, state, null);
+ }
+
+ public ShardSnapshotStatus(String nodeId, State state, String reason) {
+ this.nodeId = nodeId;
+ this.state = state;
+ this.reason = reason;
+ }
+
+ public State state() {
+ return state;
+ }
+
+ public String nodeId() {
+ return nodeId;
+ }
+
+ public String reason() {
+ return reason;
+ }
+
+ public static ShardSnapshotStatus readShardSnapshotStatus(StreamInput in) throws IOException {
+ ShardSnapshotStatus shardSnapshotStatus = new ShardSnapshotStatus();
+ shardSnapshotStatus.readFrom(in);
+ return shardSnapshotStatus;
+ }
+
+ public void readFrom(StreamInput in) throws IOException {
+ nodeId = in.readOptionalString();
+ state = State.fromValue(in.readByte());
+ reason = in.readOptionalString();
+ }
+
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeOptionalString(nodeId);
+ out.writeByte(state.value);
+ out.writeOptionalString(reason);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ShardSnapshotStatus status = (ShardSnapshotStatus) o;
+
+ if (nodeId != null ? !nodeId.equals(status.nodeId) : status.nodeId != null) return false;
+ if (reason != null ? !reason.equals(status.reason) : status.reason != null) return false;
+ if (state != status.state) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = state != null ? state.hashCode() : 0;
+ result = 31 * result + (nodeId != null ? nodeId.hashCode() : 0);
+ result = 31 * result + (reason != null ? reason.hashCode() : 0);
+ return result;
+ }
+ }
+
+ public static enum State {
+ INIT((byte) 0),
+ STARTED((byte) 1),
+ SUCCESS((byte) 2),
+ FAILED((byte) 3),
+ ABORTED((byte) 4),
+ MISSING((byte) 5);
+
+ private byte value;
+
+ State(byte value) {
+ this.value = value;
+ }
+
+ public byte value() {
+ return value;
+ }
+
+ public boolean completed() {
+ switch (this) {
+ case INIT:
+ return false;
+ case STARTED:
+ return false;
+ case SUCCESS:
+ return true;
+ case FAILED:
+ return true;
+ case ABORTED:
+ return false;
+ case MISSING:
+ return true;
+ default:
+ assert false;
+ return true;
+ }
+ }
+
+ public boolean failed() {
+ switch (this) {
+ case INIT:
+ return false;
+ case STARTED:
+ return false;
+ case SUCCESS:
+ return false;
+ case FAILED:
+ return true;
+ case ABORTED:
+ return true;
+ case MISSING:
+ return true;
+ default:
+ assert false;
+ return false;
+ }
+ }
+
+ public static State fromValue(byte value) {
+ switch (value) {
+ case 0:
+ return INIT;
+ case 1:
+ return STARTED;
+ case 2:
+ return SUCCESS;
+ case 3:
+ return FAILED;
+ case 4:
+ return ABORTED;
+ case 5:
+ return MISSING;
+ default:
+ throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]");
+ }
+ }
+ }
+
+ private final ImmutableList<Entry> entries;
+
+
+ public SnapshotMetaData(ImmutableList<Entry> entries) {
+ this.entries = entries;
+ }
+
+ public SnapshotMetaData(Entry... entries) {
+ this.entries = ImmutableList.copyOf(entries);
+ }
+
+ public ImmutableList<Entry> entries() {
+ return this.entries;
+ }
+
+ public Entry snapshot(SnapshotId snapshotId) {
+ for (Entry entry : entries) {
+ if (snapshotId.equals(entry.snapshotId())) {
+ return entry;
+ }
+ }
+ return null;
+ }
+
+
+ public static class Factory implements MetaData.Custom.Factory<SnapshotMetaData> {
+
+ @Override
+ public String type() {
+ return TYPE; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public SnapshotMetaData readFrom(StreamInput in) throws IOException {
+ Entry[] entries = new Entry[in.readVInt()];
+ for (int i = 0; i < entries.length; i++) {
+ SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
+ boolean includeGlobalState = in.readBoolean();
+ State state = State.fromValue(in.readByte());
+ int indices = in.readVInt();
+ ImmutableList.Builder<String> indexBuilder = ImmutableList.builder();
+ for (int j = 0; j < indices; j++) {
+ indexBuilder.add(in.readString());
+ }
+ ImmutableMap.Builder<ShardId, ShardSnapshotStatus> builder = ImmutableMap.<ShardId, ShardSnapshotStatus>builder();
+ int shards = in.readVInt();
+ for (int j = 0; j < shards; j++) {
+ ShardId shardId = ShardId.readShardId(in);
+ String nodeId = in.readOptionalString();
+ State shardState = State.fromValue(in.readByte());
+ builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
+ }
+ entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), builder.build());
+ }
+ return new SnapshotMetaData(entries);
+ }
+
+ @Override
+ public void writeTo(SnapshotMetaData repositories, StreamOutput out) throws IOException {
+ out.writeVInt(repositories.entries().size());
+ for (Entry entry : repositories.entries()) {
+ entry.snapshotId().writeTo(out);
+ out.writeBoolean(entry.includeGlobalState());
+ out.writeByte(entry.state().value());
+ out.writeVInt(entry.indices().size());
+ for (String index : entry.indices()) {
+ out.writeString(index);
+ }
+ out.writeVInt(entry.shards().size());
+ for (Map.Entry<ShardId, ShardSnapshotStatus> shardEntry : entry.shards().entrySet()) {
+ shardEntry.getKey().writeTo(out);
+ out.writeOptionalString(shardEntry.getValue().nodeId());
+ out.writeByte(shardEntry.getValue().state().value());
+ }
+ }
+ }
+
+ @Override
+ public SnapshotMetaData fromXContent(XContentParser parser) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void toXContent(SnapshotMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startArray("snapshots");
+ for (Entry entry : customIndexMetaData.entries()) {
+ toXContent(entry, builder, params);
+ }
+ builder.endArray();
+ }
+
+ public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ builder.field("repository", entry.snapshotId().getRepository());
+ builder.field("snapshot", entry.snapshotId().getSnapshot());
+ builder.field("include_global_state", entry.includeGlobalState());
+ builder.field("state", entry.state());
+ builder.startArray("indices");
+ {
+ for (String index : entry.indices()) {
+ builder.value(index);
+ }
+ }
+ builder.endArray();
+ builder.startArray("shards");
+ {
+ for (Map.Entry<ShardId, ShardSnapshotStatus> shardEntry : entry.shards.entrySet()) {
+ ShardId shardId = shardEntry.getKey();
+ ShardSnapshotStatus status = shardEntry.getValue();
+ builder.startObject();
+ {
+ builder.field("index", shardId.getIndex());
+ builder.field("shard", shardId.getId());
+ builder.field("state", status.state());
+ builder.field("node", status.nodeId());
+ }
+ builder.endObject();
+ }
+ }
+
+ builder.endArray();
+ builder.endObject();
+ }
+
+ public boolean isPersistent() {
+ return false;
+ }
+
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
new file mode 100644
index 0000000..4db132c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
@@ -0,0 +1,326 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.node;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.transport.TransportAddressSerializers;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Map;
+
+import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream;
+
+/**
+ * A discovery node represents a node that is part of the cluster.
+ */
+public class DiscoveryNode implements Streamable, Serializable {
+
+ public static boolean localNode(Settings settings) {
+ if (settings.get("node.local") != null) {
+ return settings.getAsBoolean("node.local", false);
+ }
+ if (settings.get("node.mode") != null) {
+ String nodeMode = settings.get("node.mode");
+ if ("local".equals(nodeMode)) {
+ return true;
+ } else if ("network".equals(nodeMode)) {
+ return false;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unsupported node.mode [" + nodeMode + "]. Should be one of [local, network].");
+ }
+ }
+ return false;
+ }
+
+ public static boolean nodeRequiresLocalStorage(Settings settings) {
+ return !(settings.getAsBoolean("node.client", false) || (!settings.getAsBoolean("node.data", true) && !settings.getAsBoolean("node.master", true)));
+ }
+
+ public static boolean clientNode(Settings settings) {
+ String client = settings.get("node.client");
+ return client != null && client.equals("true");
+ }
+
+ public static boolean masterNode(Settings settings) {
+ String master = settings.get("node.master");
+ if (master == null) {
+ return !clientNode(settings);
+ }
+ return master.equals("true");
+ }
+
+ public static boolean dataNode(Settings settings) {
+ String data = settings.get("node.data");
+ if (data == null) {
+ return !clientNode(settings);
+ }
+ return data.equals("true");
+ }
+
+ public static final ImmutableList<DiscoveryNode> EMPTY_LIST = ImmutableList.of();
+
+ private String nodeName = "";
+ private String nodeId;
+ private String hostName;
+ private String hostAddress;
+ private TransportAddress address;
+ private ImmutableMap<String, String> attributes;
+ private Version version = Version.CURRENT;
+
+ DiscoveryNode() {
+ }
+
+ public DiscoveryNode(String nodeId, TransportAddress address, Version version) {
+ this("", nodeId, address, ImmutableMap.<String, String>of(), version);
+ }
+
+ public DiscoveryNode(String nodeName, String nodeId, TransportAddress address, Map<String, String> attributes, Version version) {
+ this(nodeName, nodeId, NetworkUtils.getLocalHostName(""), NetworkUtils.getLocalHostAddress(""), address, attributes, version);
+ }
+
+ public DiscoveryNode(String nodeName, String nodeId, String hostName, String hostAddress, TransportAddress address, Map<String, String> attributes, Version version) {
+ if (nodeName != null) {
+ this.nodeName = nodeName.intern();
+ }
+ ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
+ for (Map.Entry<String, String> entry : attributes.entrySet()) {
+ builder.put(entry.getKey().intern(), entry.getValue().intern());
+ }
+ this.attributes = builder.build();
+ this.nodeId = nodeId.intern();
+ this.hostName = hostName.intern();
+ this.hostAddress = hostAddress.intern();
+ this.address = address;
+ this.version = version;
+ }
+
+ /**
+ * Should this node form a connection to the provided node.
+ */
+ public boolean shouldConnectTo(DiscoveryNode otherNode) {
+ if (clientNode() && otherNode.clientNode()) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * The address that the node can be communicated with.
+ */
+ public TransportAddress address() {
+ return address;
+ }
+
+ /**
+ * The address that the node can be communicated with.
+ */
+ public TransportAddress getAddress() {
+ return address();
+ }
+
+ /**
+ * The unique id of the node.
+ */
+ public String id() {
+ return nodeId;
+ }
+
+ /**
+ * The unique id of the node.
+ */
+ public String getId() {
+ return id();
+ }
+
+ /**
+ * The name of the node.
+ */
+ public String name() {
+ return this.nodeName;
+ }
+
+ /**
+ * The name of the node.
+ */
+ public String getName() {
+ return name();
+ }
+
+ /**
+ * The node attributes.
+ */
+ public ImmutableMap<String, String> attributes() {
+ return this.attributes;
+ }
+
+ /**
+ * The node attributes.
+ */
+ public ImmutableMap<String, String> getAttributes() {
+ return attributes();
+ }
+
+ /**
+ * Should this node hold data (shards) or not.
+ */
+ public boolean dataNode() {
+ String data = attributes.get("data");
+ if (data == null) {
+ return !clientNode();
+ }
+ return data.equals("true");
+ }
+
+ /**
+ * Should this node hold data (shards) or not.
+ */
+ public boolean isDataNode() {
+ return dataNode();
+ }
+
+ /**
+ * Is the node a client node or not.
+ */
+ public boolean clientNode() {
+ String client = attributes.get("client");
+ return client != null && client.equals("true");
+ }
+
+ public boolean isClientNode() {
+ return clientNode();
+ }
+
+ /**
+ * Can this node become master or not.
+ */
+ public boolean masterNode() {
+ String master = attributes.get("master");
+ if (master == null) {
+ return !clientNode();
+ }
+ return master.equals("true");
+ }
+
+ /**
+ * Can this node become master or not.
+ */
+ public boolean isMasterNode() {
+ return masterNode();
+ }
+
+ public Version version() {
+ return this.version;
+ }
+
+ public String getHostName() {
+ return this.hostName;
+ }
+
+ public String getHostAddress() {
+ return this.hostAddress;
+ }
+
+ public Version getVersion() {
+ return this.version;
+ }
+
+ public static DiscoveryNode readNode(StreamInput in) throws IOException {
+ DiscoveryNode node = new DiscoveryNode();
+ node.readFrom(in);
+ return node;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ nodeName = in.readString().intern();
+ nodeId = in.readString().intern();
+ hostName = in.readString().intern();
+ hostAddress = in.readString().intern();
+ address = TransportAddressSerializers.addressFromStream(in);
+ int size = in.readVInt();
+ ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
+ for (int i = 0; i < size; i++) {
+ builder.put(in.readString().intern(), in.readString().intern());
+ }
+ attributes = builder.build();
+ version = Version.readVersion(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(nodeName);
+ out.writeString(nodeId);
+ out.writeString(hostName);
+ out.writeString(hostAddress);
+ addressToStream(out, address);
+ out.writeVInt(attributes.size());
+ for (Map.Entry<String, String> entry : attributes.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeString(entry.getValue());
+ }
+ Version.writeVersion(version, out);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof DiscoveryNode))
+ return false;
+
+ DiscoveryNode other = (DiscoveryNode) obj;
+ return this.nodeId.equals(other.nodeId);
+ }
+
+ @Override
+ public int hashCode() {
+ return nodeId.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ if (nodeName.length() > 0) {
+ sb.append('[').append(nodeName).append(']');
+ }
+ if (nodeId != null) {
+ sb.append('[').append(nodeId).append(']');
+ }
+ if (Strings.hasLength(hostName)) {
+ sb.append('[').append(hostName).append(']');
+ }
+ if (address != null) {
+ sb.append('[').append(address).append(']');
+ }
+ if (!attributes.isEmpty()) {
+ sb.append(attributes);
+ }
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java
new file mode 100644
index 0000000..a0c7f08
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.node;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ */
+public class DiscoveryNodeFilters {
+
+ public static enum OpType {
+ AND,
+ OR
+ }
+
+ ;
+
+ public static DiscoveryNodeFilters buildFromSettings(OpType opType, String prefix, Settings settings) {
+ return buildFromKeyValue(opType, settings.getByPrefix(prefix).getAsMap());
+ }
+
+ public static DiscoveryNodeFilters buildFromKeyValue(OpType opType, Map<String, String> filters) {
+ Map<String, String[]> bFilters = new HashMap<String, String[]>();
+ for (Map.Entry<String, String> entry : filters.entrySet()) {
+ String[] values = Strings.splitStringByCommaToArray(entry.getValue());
+ if (values.length > 0) {
+ bFilters.put(entry.getKey(), values);
+ }
+ }
+ if (bFilters.isEmpty()) {
+ return null;
+ }
+ return new DiscoveryNodeFilters(opType, bFilters);
+ }
+
+ private final Map<String, String[]> filters;
+
+ private final OpType opType;
+
+ DiscoveryNodeFilters(OpType opType, Map<String, String[]> filters) {
+ this.opType = opType;
+ this.filters = filters;
+ }
+
+ public boolean match(DiscoveryNode node) {
+ for (Map.Entry<String, String[]> entry : filters.entrySet()) {
+ String attr = entry.getKey();
+ String[] values = entry.getValue();
+ if ("_ip".equals(attr)) {
+ for (String value : values) {
+ if (Regex.simpleMatch(value, node.getHostAddress())) {
+ if (opType == OpType.OR) {
+ return true;
+ }
+ } else {
+ if (opType == OpType.AND) {
+ return false;
+ }
+ }
+ }
+ } else if ("_host".equals(attr)) {
+ for (String value : values) {
+ if (Regex.simpleMatch(value, node.getHostName())) {
+ if (opType == OpType.OR) {
+ return true;
+ }
+ } else {
+ if (opType == OpType.AND) {
+ return false;
+ }
+ }
+ if (Regex.simpleMatch(value, node.getHostAddress())) {
+ if (opType == OpType.OR) {
+ return true;
+ }
+ } else {
+ if (opType == OpType.AND) {
+ return false;
+ }
+ }
+ }
+ } else if ("_id".equals(attr)) {
+ for (String value : values) {
+ if (node.id().equals(value)) {
+ if (opType == OpType.OR) {
+ return true;
+ }
+ } else {
+ if (opType == OpType.AND) {
+ return false;
+ }
+ }
+ }
+ } else if ("_name".equals(attr) || "name".equals(attr)) {
+ for (String value : values) {
+ if (Regex.simpleMatch(value, node.name())) {
+ if (opType == OpType.OR) {
+ return true;
+ }
+ } else {
+ if (opType == OpType.AND) {
+ return false;
+ }
+ }
+ }
+ } else {
+ String nodeAttributeValue = node.attributes().get(attr);
+ if (nodeAttributeValue == null) {
+ if (opType == OpType.AND) {
+ return false;
+ } else {
+ continue;
+ }
+ }
+ for (String value : values) {
+ if (Regex.simpleMatch(value, nodeAttributeValue)) {
+ if (opType == OpType.OR) {
+ return true;
+ }
+ } else {
+ if (opType == OpType.AND) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ if (opType == OpType.OR) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ /**
+ * Generates a human-readable string for the DiscoverNodeFilters.
+ * Example: {@code _id:"id1 OR blah",name:"blah OR name2"}
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ int entryCount = filters.size();
+ for (Map.Entry<String, String[]> entry : filters.entrySet()) {
+ String attr = entry.getKey();
+ String[] values = entry.getValue();
+ sb.append(attr);
+ sb.append(":\"");
+ int valueCount = values.length;
+ for (String value : values) {
+ sb.append(value);
+ if (valueCount > 1) {
+ sb.append(" " + opType.toString() + " ");
+ }
+ valueCount--;
+ }
+ sb.append("\"");
+ if (entryCount > 1) {
+ sb.append(",");
+ }
+ entryCount--;
+ }
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java
new file mode 100644
index 0000000..2deb9b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.node;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ */
+public class DiscoveryNodeService extends AbstractComponent {
+
+ private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<CustomAttributesProvider>();
+
+ @Inject
+ public DiscoveryNodeService(Settings settings) {
+ super(settings);
+ }
+
+ public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) {
+ customAttributesProviders.add(customAttributesProvider);
+ return this;
+ }
+
+ public Map<String, String> buildAttributes() {
+ Map<String, String> attributes = Maps.newHashMap(settings.getByPrefix("node.").getAsMap());
+ attributes.remove("name"); // name is extracted in other places
+ if (attributes.containsKey("client")) {
+ if (attributes.get("client").equals("false")) {
+ attributes.remove("client"); // this is the default
+ } else {
+ // if we are client node, don't store data ...
+ attributes.put("data", "false");
+ }
+ }
+ if (attributes.containsKey("data")) {
+ if (attributes.get("data").equals("true")) {
+ attributes.remove("data");
+ }
+ }
+
+ for (CustomAttributesProvider provider : customAttributesProviders) {
+ try {
+ Map<String, String> customAttributes = provider.buildAttributes();
+ if (customAttributes != null) {
+ for (Map.Entry<String, String> entry : customAttributes.entrySet()) {
+ if (!attributes.containsKey(entry.getKey())) {
+ attributes.put(entry.getKey(), entry.getValue());
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("failed to build custom attributes from provider [{}]", e, provider);
+ }
+ }
+
+ return attributes;
+ }
+
+ public static interface CustomAttributesProvider {
+
+ Map<String, String> buildAttributes();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java
new file mode 100644
index 0000000..95df914
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java
@@ -0,0 +1,638 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.node;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.UnmodifiableIterator;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.transport.TransportAddress;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to
+ * access, modify merge / diff discovery nodes.
+ */
+public class DiscoveryNodes implements Iterable<DiscoveryNode> {
+
+ public static final DiscoveryNodes EMPTY_NODES = builder().build();
+
+ private final ImmutableOpenMap<String, DiscoveryNode> nodes;
+ private final ImmutableOpenMap<String, DiscoveryNode> dataNodes;
+ private final ImmutableOpenMap<String, DiscoveryNode> masterNodes;
+
+ private final String masterNodeId;
+ private final String localNodeId;
+
+ private DiscoveryNodes(ImmutableOpenMap<String, DiscoveryNode> nodes, ImmutableOpenMap<String, DiscoveryNode> dataNodes, ImmutableOpenMap<String, DiscoveryNode> masterNodes, String masterNodeId, String localNodeId) {
+ this.nodes = nodes;
+ this.dataNodes = dataNodes;
+ this.masterNodes = masterNodes;
+ this.masterNodeId = masterNodeId;
+ this.localNodeId = localNodeId;
+ }
+
+ @Override
+ public UnmodifiableIterator<DiscoveryNode> iterator() {
+ return nodes.valuesIt();
+ }
+
+ /**
+ * Is this a valid nodes that has the minimal information set. The minimal set is defined
+ * by the localNodeId being set.
+ */
+ public boolean valid() {
+ return localNodeId != null;
+ }
+
+ /**
+ * Returns <tt>true</tt> if the local node is the master node.
+ */
+ public boolean localNodeMaster() {
+ if (localNodeId == null) {
+ // we don't know yet the local node id, return false
+ return false;
+ }
+ return localNodeId.equals(masterNodeId);
+ }
+
+ /**
+ * Get the number of known nodes
+ *
+ * @return number of nodes
+ */
+ public int size() {
+ return nodes.size();
+ }
+
+ /**
+ * Get the number of known nodes
+ *
+ * @return number of nodes
+ */
+ public int getSize() {
+ return size();
+ }
+
+ /**
+ * Get a {@link Map} of the discovered nodes arranged by their ids
+ *
+ * @return {@link Map} of the discovered nodes arranged by their ids
+ */
+ public ImmutableOpenMap<String, DiscoveryNode> nodes() {
+ return this.nodes;
+ }
+
+ /**
+ * Get a {@link Map} of the discovered nodes arranged by their ids
+ *
+ * @return {@link Map} of the discovered nodes arranged by their ids
+ */
+ public ImmutableOpenMap<String, DiscoveryNode> getNodes() {
+ return nodes();
+ }
+
+ /**
+ * Get a {@link Map} of the discovered data nodes arranged by their ids
+ *
+ * @return {@link Map} of the discovered data nodes arranged by their ids
+ */
+ public ImmutableOpenMap<String, DiscoveryNode> dataNodes() {
+ return this.dataNodes;
+ }
+
+ /**
+ * Get a {@link Map} of the discovered data nodes arranged by their ids
+ *
+ * @return {@link Map} of the discovered data nodes arranged by their ids
+ */
+ public ImmutableOpenMap<String, DiscoveryNode> getDataNodes() {
+ return dataNodes();
+ }
+
+ /**
+ * Get a {@link Map} of the discovered master nodes arranged by their ids
+ *
+ * @return {@link Map} of the discovered master nodes arranged by their ids
+ */
+ public ImmutableOpenMap<String, DiscoveryNode> masterNodes() {
+ return this.masterNodes;
+ }
+
+ /**
+ * Get a {@link Map} of the discovered master nodes arranged by their ids
+ *
+ * @return {@link Map} of the discovered master nodes arranged by their ids
+ */
+ public ImmutableOpenMap<String, DiscoveryNode> getMasterNodes() {
+ return masterNodes();
+ }
+
+ /**
+ * Get a {@link Map} of the discovered master and data nodes arranged by their ids
+ *
+ * @return {@link Map} of the discovered master and data nodes arranged by their ids
+ */
+ public ImmutableOpenMap<String, DiscoveryNode> masterAndDataNodes() {
+ ImmutableOpenMap.Builder<String, DiscoveryNode> nodes = ImmutableOpenMap.builder(dataNodes);
+ nodes.putAll(masterNodes);
+ return nodes.build();
+ }
+
+ /**
+ * Get a node by its id
+ *
+ * @param nodeId id of the wanted node
+ * @return wanted node if it exists. Otherwise <code>null</code>
+ */
+ public DiscoveryNode get(String nodeId) {
+ return nodes.get(nodeId);
+ }
+
+ /**
+ * Determine if a given node exists
+ *
+ * @param nodeId id of the node which existence should be verified
+ * @return <code>true</code> if the node exists. Otherwise <code>false</code>
+ */
+ public boolean nodeExists(String nodeId) {
+ return nodes.containsKey(nodeId);
+ }
+
+ /**
+ * Get the id of the master node
+ *
+ * @return id of the master
+ */
+ public String masterNodeId() {
+ return this.masterNodeId;
+ }
+
+ /**
+ * Get the id of the master node
+ *
+ * @return id of the master
+ */
+ public String getMasterNodeId() {
+ return masterNodeId();
+ }
+
+ /**
+ * Get the id of the local node
+ *
+ * @return id of the local node
+ */
+ public String localNodeId() {
+ return this.localNodeId;
+ }
+
+ /**
+ * Get the id of the local node
+ *
+ * @return id of the local node
+ */
+ public String getLocalNodeId() {
+ return localNodeId();
+ }
+
+ /**
+ * Get the local node
+ *
+ * @return local node
+ */
+ public DiscoveryNode localNode() {
+ return nodes.get(localNodeId);
+ }
+
+ /**
+ * Get the local node
+ *
+ * @return local node
+ */
+ public DiscoveryNode getLocalNode() {
+ return localNode();
+ }
+
+ /**
+ * Get the master node
+ *
+ * @return master node
+ */
+ public DiscoveryNode masterNode() {
+ return nodes.get(masterNodeId);
+ }
+
+ /**
+ * Get the master node
+ *
+ * @return master node
+ */
+ public DiscoveryNode getMasterNode() {
+ return masterNode();
+ }
+
+ /**
+ * Get a node by its address
+ *
+ * @param address {@link TransportAddress} of the wanted node
+ * @return node identified by the given address or <code>null</code> if no such node exists
+ */
+ public DiscoveryNode findByAddress(TransportAddress address) {
+ for (ObjectCursor<DiscoveryNode> cursor : nodes.values()) {
+ DiscoveryNode node = cursor.value;
+ if (node.address().equals(address)) {
+ return node;
+ }
+ }
+ return null;
+ }
+
+ public boolean isAllNodes(String... nodesIds) {
+ return nodesIds == null || nodesIds.length == 0 || (nodesIds.length == 1 && nodesIds[0].equals("_all"));
+ }
+
+ /**
+ * Resolve a node with a given id
+ *
+ * @param node id of the node to discover
+ * @return discovered node matching the given id
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if more than one node matches the request or no nodes have been resolved
+ */
+ public DiscoveryNode resolveNode(String node) {
+ String[] resolvedNodeIds = resolveNodesIds(node);
+ if (resolvedNodeIds.length > 1) {
+ throw new ElasticsearchIllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node");
+ }
+ if (resolvedNodeIds.length == 0) {
+ throw new ElasticsearchIllegalArgumentException("failed to resolve [" + node + " ], no matching nodes");
+ }
+ return nodes.get(resolvedNodeIds[0]);
+ }
+
+ public String[] resolveNodesIds(String... nodesIds) {
+ if (isAllNodes(nodesIds)) {
+ int index = 0;
+ nodesIds = new String[nodes.size()];
+ for (DiscoveryNode node : this) {
+ nodesIds[index++] = node.id();
+ }
+ return nodesIds;
+ } else {
+ ObjectOpenHashSet<String> resolvedNodesIds = new ObjectOpenHashSet<String>(nodesIds.length);
+ for (String nodeId : nodesIds) {
+ if (nodeId.equals("_local")) {
+ String localNodeId = localNodeId();
+ if (localNodeId != null) {
+ resolvedNodesIds.add(localNodeId);
+ }
+ } else if (nodeId.equals("_master")) {
+ String masterNodeId = masterNodeId();
+ if (masterNodeId != null) {
+ resolvedNodesIds.add(masterNodeId);
+ }
+ } else if (nodeExists(nodeId)) {
+ resolvedNodesIds.add(nodeId);
+ } else {
+ // not a node id, try and search by name
+ for (DiscoveryNode node : this) {
+ if (Regex.simpleMatch(nodeId, node.name())) {
+ resolvedNodesIds.add(node.id());
+ }
+ }
+ for (DiscoveryNode node : this) {
+ if (Regex.simpleMatch(nodeId, node.getHostAddress())) {
+ resolvedNodesIds.add(node.id());
+ } else if (Regex.simpleMatch(nodeId, node.getHostName())) {
+ resolvedNodesIds.add(node.id());
+ }
+ }
+ int index = nodeId.indexOf(':');
+ if (index != -1) {
+ String matchAttrName = nodeId.substring(0, index);
+ String matchAttrValue = nodeId.substring(index + 1);
+ if ("data".equals(matchAttrName)) {
+ if (Booleans.parseBoolean(matchAttrValue, true)) {
+ resolvedNodesIds.addAll(dataNodes.keys());
+ } else {
+ resolvedNodesIds.removeAll(dataNodes.keys());
+ }
+ } else if ("master".equals(matchAttrName)) {
+ if (Booleans.parseBoolean(matchAttrValue, true)) {
+ resolvedNodesIds.addAll(masterNodes.keys());
+ } else {
+ resolvedNodesIds.removeAll(masterNodes.keys());
+ }
+ } else {
+ for (DiscoveryNode node : this) {
+ for (Map.Entry<String, String> entry : node.attributes().entrySet()) {
+ String attrName = entry.getKey();
+ String attrValue = entry.getValue();
+ if (Regex.simpleMatch(matchAttrName, attrName) && Regex.simpleMatch(matchAttrValue, attrValue)) {
+ resolvedNodesIds.add(node.id());
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return resolvedNodesIds.toArray(String.class);
+ }
+ }
+
+ public DiscoveryNodes removeDeadMembers(Set<String> newNodes, String masterNodeId) {
+ Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId);
+ for (DiscoveryNode node : this) {
+ if (newNodes.contains(node.id())) {
+ builder.put(node);
+ }
+ }
+ return builder.build();
+ }
+
+ public DiscoveryNodes newNode(DiscoveryNode node) {
+ return new Builder(this).put(node).build();
+ }
+
+ /**
+ * Returns the changes comparing this nodes to the provided nodes.
+ */
+ public Delta delta(DiscoveryNodes other) {
+ List<DiscoveryNode> removed = newArrayList();
+ List<DiscoveryNode> added = newArrayList();
+ for (DiscoveryNode node : other) {
+ if (!this.nodeExists(node.id())) {
+ removed.add(node);
+ }
+ }
+ for (DiscoveryNode node : this) {
+ if (!other.nodeExists(node.id())) {
+ added.add(node);
+ }
+ }
+ DiscoveryNode previousMasterNode = null;
+ DiscoveryNode newMasterNode = null;
+ if (masterNodeId != null) {
+ if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) {
+ previousMasterNode = other.masterNode();
+ newMasterNode = masterNode();
+ }
+ }
+ return new Delta(previousMasterNode, newMasterNode, localNodeId, ImmutableList.copyOf(removed), ImmutableList.copyOf(added));
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{");
+ for (DiscoveryNode node : this) {
+ sb.append(node).append(',');
+ }
+ sb.append("}");
+ return sb.toString();
+ }
+
+ public String prettyPrint() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("nodes: \n");
+ for (DiscoveryNode node : this) {
+ sb.append(" ").append(node);
+ if (node == localNode()) {
+ sb.append(", local");
+ }
+ if (node == masterNode()) {
+ sb.append(", master");
+ }
+ sb.append("\n");
+ }
+ return sb.toString();
+ }
+
+ public Delta emptyDelta() {
+ return new Delta(null, null, localNodeId, DiscoveryNode.EMPTY_LIST, DiscoveryNode.EMPTY_LIST);
+ }
+
+ public static class Delta {
+
+ private final String localNodeId;
+ private final DiscoveryNode previousMasterNode;
+ private final DiscoveryNode newMasterNode;
+ private final ImmutableList<DiscoveryNode> removed;
+ private final ImmutableList<DiscoveryNode> added;
+
+ public Delta(String localNodeId, ImmutableList<DiscoveryNode> removed, ImmutableList<DiscoveryNode> added) {
+ this(null, null, localNodeId, removed, added);
+ }
+
+ public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, ImmutableList<DiscoveryNode> removed, ImmutableList<DiscoveryNode> added) {
+ this.previousMasterNode = previousMasterNode;
+ this.newMasterNode = newMasterNode;
+ this.localNodeId = localNodeId;
+ this.removed = removed;
+ this.added = added;
+ }
+
+ public boolean hasChanges() {
+ return masterNodeChanged() || !removed.isEmpty() || !added.isEmpty();
+ }
+
+ public boolean masterNodeChanged() {
+ return newMasterNode != null;
+ }
+
+ public DiscoveryNode previousMasterNode() {
+ return previousMasterNode;
+ }
+
+ public DiscoveryNode newMasterNode() {
+ return newMasterNode;
+ }
+
+ public boolean removed() {
+ return !removed.isEmpty();
+ }
+
+ public ImmutableList<DiscoveryNode> removedNodes() {
+ return removed;
+ }
+
+ public boolean added() {
+ return !added.isEmpty();
+ }
+
+ public ImmutableList<DiscoveryNode> addedNodes() {
+ return added;
+ }
+
+ public String shortSummary() {
+ StringBuilder sb = new StringBuilder();
+ if (!removed() && masterNodeChanged()) {
+ if (newMasterNode.id().equals(localNodeId)) {
+ // we are the master, no nodes we removed, we are actually the first master
+ sb.append("new_master ").append(newMasterNode());
+ } else {
+ // we are not the master, so we just got this event. No nodes were removed, so its not a *new* master
+ sb.append("detected_master ").append(newMasterNode());
+ }
+ } else {
+ if (masterNodeChanged()) {
+ sb.append("master {new ").append(newMasterNode());
+ if (previousMasterNode() != null) {
+ sb.append(", previous ").append(previousMasterNode());
+ }
+ sb.append("}");
+ }
+ if (removed()) {
+ if (masterNodeChanged()) {
+ sb.append(", ");
+ }
+ sb.append("removed {");
+ for (DiscoveryNode node : removedNodes()) {
+ sb.append(node).append(',');
+ }
+ sb.append("}");
+ }
+ }
+ if (added()) {
+ // don't print if there is one added, and it is us
+ if (!(addedNodes().size() == 1 && addedNodes().get(0).id().equals(localNodeId))) {
+ if (removed() || masterNodeChanged()) {
+ sb.append(", ");
+ }
+ sb.append("added {");
+ for (DiscoveryNode node : addedNodes()) {
+ if (!node.id().equals(localNodeId)) {
+ // don't print ourself
+ sb.append(node).append(',');
+ }
+ }
+ sb.append("}");
+ }
+ }
+ return sb.toString();
+ }
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public static Builder builder(DiscoveryNodes nodes) {
+ return new Builder(nodes);
+ }
+
+ public static class Builder {
+
+ private final ImmutableOpenMap.Builder<String, DiscoveryNode> nodes;
+ private String masterNodeId;
+ private String localNodeId;
+
+ public Builder() {
+ nodes = ImmutableOpenMap.builder();
+ }
+
+ public Builder(DiscoveryNodes nodes) {
+ this.masterNodeId = nodes.masterNodeId();
+ this.localNodeId = nodes.localNodeId();
+ this.nodes = ImmutableOpenMap.builder(nodes.nodes());
+ }
+
+ public Builder put(DiscoveryNode node) {
+ nodes.put(node.id(), node);
+ return this;
+ }
+
+ public Builder remove(String nodeId) {
+ nodes.remove(nodeId);
+ return this;
+ }
+
+ public Builder masterNodeId(String masterNodeId) {
+ this.masterNodeId = masterNodeId;
+ return this;
+ }
+
+ public Builder localNodeId(String localNodeId) {
+ this.localNodeId = localNodeId;
+ return this;
+ }
+
+ public DiscoveryNodes build() {
+ ImmutableOpenMap.Builder<String, DiscoveryNode> dataNodesBuilder = ImmutableOpenMap.builder();
+ ImmutableOpenMap.Builder<String, DiscoveryNode> masterNodesBuilder = ImmutableOpenMap.builder();
+ for (ObjectObjectCursor<String, DiscoveryNode> nodeEntry : nodes) {
+ if (nodeEntry.value.dataNode()) {
+ dataNodesBuilder.put(nodeEntry.key, nodeEntry.value);
+ }
+ if (nodeEntry.value.masterNode()) {
+ masterNodesBuilder.put(nodeEntry.key, nodeEntry.value);
+ }
+ }
+ return new DiscoveryNodes(nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), masterNodeId, localNodeId);
+ }
+
+ public static void writeTo(DiscoveryNodes nodes, StreamOutput out) throws IOException {
+ if (nodes.masterNodeId() == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(nodes.masterNodeId);
+ }
+ out.writeVInt(nodes.size());
+ for (DiscoveryNode node : nodes) {
+ node.writeTo(out);
+ }
+ }
+
+ public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
+ Builder builder = new Builder();
+ if (in.readBoolean()) {
+ builder.masterNodeId(in.readString());
+ }
+ if (localNode != null) {
+ builder.localNodeId(localNode.id());
+ }
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ DiscoveryNode node = DiscoveryNode.readNode(in);
+ if (localNode != null && node.id().equals(localNode.id())) {
+ // reuse the same instance of our address and local node id for faster equality
+ node = localNode;
+ }
+ builder.put(node);
+ }
+ return builder.build();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/GroupShardsIterator.java b/src/main/java/org/elasticsearch/cluster/routing/GroupShardsIterator.java
new file mode 100644
index 0000000..384f96b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/GroupShardsIterator.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import java.util.Collection;
+import java.util.Iterator;
+
+/**
+ * This class implements a compilation of {@link ShardIterator}s. Each {@link ShardIterator}
+ * iterated by this {@link Iterable} represents a group of shards.
+ *
+*/
+public class GroupShardsIterator implements Iterable<ShardIterator> {
+
+ private final Collection<ShardIterator> iterators;
+
+ public GroupShardsIterator(Collection<ShardIterator> iterators) {
+ this.iterators = iterators;
+ }
+
+ /**
+ * Returns the total number of shards within all groups
+ * @return total number of shards
+ */
+ public int totalSize() {
+ int size = 0;
+ for (ShardIterator shard : iterators) {
+ size += shard.size();
+ }
+ return size;
+ }
+
+ /**
+ * Returns the total number of shards plus the number of empty groups
+ * @return number of shards and empty groups
+ */
+ public int totalSizeWith1ForEmpty() {
+ int size = 0;
+ for (ShardIterator shard : iterators) {
+ int sizeActive = shard.size();
+ if (sizeActive == 0) {
+ size += 1;
+ } else {
+ size += sizeActive;
+ }
+ }
+ return size;
+ }
+
+ /**
+ * Return the number of groups
+ * @return number of groups
+ */
+ public int size() {
+ return iterators.size();
+ }
+
+ /**
+ * Return all group iterators
+ * @return
+ */
+ public Collection<ShardIterator> iterators() {
+ return iterators;
+ }
+
+ @Override
+ public Iterator<ShardIterator> iterator() {
+ return iterators.iterator();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java b/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java
new file mode 100644
index 0000000..d2b395b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+/**
+ * This exception defines illegal states of shard routing
+ */
+public class IllegalShardRoutingStateException extends RoutingException {
+
+ private final ShardRouting shard;
+
+ public IllegalShardRoutingStateException(ShardRouting shard, String message) {
+ this(shard, message, null);
+ }
+
+ public IllegalShardRoutingStateException(ShardRouting shard, String message, Throwable cause) {
+ super(shard.shortSummary() + ": " + message, cause);
+ this.shard = shard;
+ }
+
+ /**
+ * Returns the shard instance referenced by this exception
+ * @return shard instance referenced by this exception
+ */
+ public ShardRouting shard() {
+ return shard;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/ImmutableShardRouting.java b/src/main/java/org/elasticsearch/cluster/routing/ImmutableShardRouting.java
new file mode 100644
index 0000000..d4616f9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/ImmutableShardRouting.java
@@ -0,0 +1,351 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * {@link ImmutableShardRouting} immutably encapsulates information about shard
+ * routings like id, state, version, etc.
+ */
+public class ImmutableShardRouting implements Streamable, Serializable, ShardRouting {
+
+ protected String index;
+
+ protected int shardId;
+
+ protected String currentNodeId;
+
+ protected String relocatingNodeId;
+
+ protected boolean primary;
+
+ protected ShardRoutingState state;
+
+ protected long version;
+
+ private transient ShardId shardIdentifier;
+
+ protected RestoreSource restoreSource;
+
+ private final transient ImmutableList<ShardRouting> asList;
+
+ ImmutableShardRouting() {
+ this.asList = ImmutableList.of((ShardRouting) this);
+ }
+
+ public ImmutableShardRouting(ShardRouting copy) {
+ this(copy.index(), copy.id(), copy.currentNodeId(), copy.primary(), copy.state(), copy.version());
+ this.relocatingNodeId = copy.relocatingNodeId();
+ this.restoreSource = copy.restoreSource();
+ if (copy instanceof ImmutableShardRouting) {
+ this.shardIdentifier = ((ImmutableShardRouting) copy).shardIdentifier;
+ }
+ }
+
+ public ImmutableShardRouting(ShardRouting copy, long version) {
+ this(copy.index(), copy.id(), copy.currentNodeId(), copy.primary(), copy.state(), copy.version());
+ this.relocatingNodeId = copy.relocatingNodeId();
+ this.restoreSource = copy.restoreSource();
+ this.version = version;
+ if (copy instanceof ImmutableShardRouting) {
+ this.shardIdentifier = ((ImmutableShardRouting) copy).shardIdentifier;
+ }
+ }
+
+ public ImmutableShardRouting(String index, int shardId, String currentNodeId,
+ String relocatingNodeId, boolean primary, ShardRoutingState state, long version) {
+ this(index, shardId, currentNodeId, primary, state, version);
+ this.relocatingNodeId = relocatingNodeId;
+ }
+
+ public ImmutableShardRouting(String index, int shardId, String currentNodeId,
+ String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version) {
+ this(index, shardId, currentNodeId, relocatingNodeId, primary, state, version);
+ this.restoreSource = restoreSource;
+ }
+
+ public ImmutableShardRouting(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state, long version) {
+ this.index = index;
+ this.shardId = shardId;
+ this.currentNodeId = currentNodeId;
+ this.primary = primary;
+ this.state = state;
+ this.asList = ImmutableList.of((ShardRouting) this);
+ this.version = version;
+ }
+
+ @Override
+ public String index() {
+ return this.index;
+ }
+
+ @Override
+ public String getIndex() {
+ return index();
+ }
+
+ @Override
+ public int id() {
+ return this.shardId;
+ }
+
+ @Override
+ public int getId() {
+ return id();
+ }
+
+ @Override
+ public long version() {
+ return this.version;
+ }
+
+ @Override
+ public boolean unassigned() {
+ return state == ShardRoutingState.UNASSIGNED;
+ }
+
+ @Override
+ public boolean initializing() {
+ return state == ShardRoutingState.INITIALIZING;
+ }
+
+ @Override
+ public boolean active() {
+ return started() || relocating();
+ }
+
+ @Override
+ public boolean started() {
+ return state == ShardRoutingState.STARTED;
+ }
+
+ @Override
+ public boolean relocating() {
+ return state == ShardRoutingState.RELOCATING;
+ }
+
+ @Override
+ public boolean assignedToNode() {
+ return currentNodeId != null;
+ }
+
+ @Override
+ public String currentNodeId() {
+ return this.currentNodeId;
+ }
+
+ @Override
+ public String relocatingNodeId() {
+ return this.relocatingNodeId;
+ }
+
+ @Override
+ public RestoreSource restoreSource() {
+ return restoreSource;
+ }
+
+ @Override
+ public boolean primary() {
+ return this.primary;
+ }
+
+ @Override
+ public ShardRoutingState state() {
+ return this.state;
+ }
+
+ @Override
+ public ShardId shardId() {
+ if (shardIdentifier != null) {
+ return shardIdentifier;
+ }
+ shardIdentifier = new ShardId(index, shardId);
+ return shardIdentifier;
+ }
+
+ @Override
+ public ShardIterator shardsIt() {
+ return new PlainShardIterator(shardId(), asList);
+ }
+
+ public static ImmutableShardRouting readShardRoutingEntry(StreamInput in) throws IOException {
+ ImmutableShardRouting entry = new ImmutableShardRouting();
+ entry.readFrom(in);
+ return entry;
+ }
+
+ public static ImmutableShardRouting readShardRoutingEntry(StreamInput in, String index, int shardId) throws IOException {
+ ImmutableShardRouting entry = new ImmutableShardRouting();
+ entry.readFrom(in, index, shardId);
+ return entry;
+ }
+
+ public void readFrom(StreamInput in, String index, int shardId) throws IOException {
+ this.index = index;
+ this.shardId = shardId;
+ readFromThin(in);
+ }
+
+ @Override
+ public void readFromThin(StreamInput in) throws IOException {
+ version = in.readLong();
+ if (in.readBoolean()) {
+ currentNodeId = in.readString();
+ }
+
+ if (in.readBoolean()) {
+ relocatingNodeId = in.readString();
+ }
+
+ primary = in.readBoolean();
+ state = ShardRoutingState.fromValue(in.readByte());
+
+ restoreSource = RestoreSource.readOptionalRestoreSource(in);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ readFrom(in, in.readString(), in.readVInt());
+ }
+
+ /**
+ * Writes shard information to {@link StreamOutput} without writing index name and shard id
+ *
+ * @param out {@link StreamOutput} to write shard information to
+ * @throws IOException if something happens during write
+ */
+ public void writeToThin(StreamOutput out) throws IOException {
+ out.writeLong(version);
+ if (currentNodeId != null) {
+ out.writeBoolean(true);
+ out.writeString(currentNodeId);
+ } else {
+ out.writeBoolean(false);
+ }
+
+ if (relocatingNodeId != null) {
+ out.writeBoolean(true);
+ out.writeString(relocatingNodeId);
+ } else {
+ out.writeBoolean(false);
+ }
+
+ out.writeBoolean(primary);
+ out.writeByte(state.value());
+
+ if (restoreSource != null) {
+ out.writeBoolean(true);
+ restoreSource.writeTo(out);
+ } else {
+ out.writeBoolean(false);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(index);
+ out.writeVInt(shardId);
+ writeToThin(out);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ // we check on instanceof so we also handle the MutableShardRouting case as well
+ if (o == null || !(o instanceof ImmutableShardRouting)) return false;
+
+ ImmutableShardRouting that = (ImmutableShardRouting) o;
+
+ if (primary != that.primary) return false;
+ if (shardId != that.shardId) return false;
+ if (currentNodeId != null ? !currentNodeId.equals(that.currentNodeId) : that.currentNodeId != null)
+ return false;
+ if (index != null ? !index.equals(that.index) : that.index != null) return false;
+ if (relocatingNodeId != null ? !relocatingNodeId.equals(that.relocatingNodeId) : that.relocatingNodeId != null)
+ return false;
+ if (state != that.state) return false;
+ if (restoreSource != null ? !restoreSource.equals(that.restoreSource) : that.restoreSource != null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = index != null ? index.hashCode() : 0;
+ result = 31 * result + shardId;
+ result = 31 * result + (currentNodeId != null ? currentNodeId.hashCode() : 0);
+ result = 31 * result + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
+ result = 31 * result + (primary ? 1 : 0);
+ result = 31 * result + (state != null ? state.hashCode() : 0);
+ result = 31 * result + (restoreSource != null ? restoreSource.hashCode() : 0);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return shortSummary();
+ }
+
+ @Override
+ public String shortSummary() {
+ StringBuilder sb = new StringBuilder();
+ sb.append('[').append(index).append(']').append('[').append(shardId).append(']');
+ sb.append(", node[").append(currentNodeId).append("], ");
+ if (relocatingNodeId != null) {
+ sb.append("relocating [").append(relocatingNodeId).append("], ");
+ }
+ if (primary) {
+ sb.append("[P]");
+ } else {
+ sb.append("[R]");
+ }
+ if (this.restoreSource != null) {
+ sb.append(", restoring[" + restoreSource + "]");
+ }
+ sb.append(", s[").append(state).append("]");
+ return sb.toString();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject()
+ .field("state", state())
+ .field("primary", primary())
+ .field("node", currentNodeId())
+ .field("relocating_node", relocatingNodeId())
+ .field("shard", shardId().id())
+ .field("index", shardId().index().name());
+ if (restoreSource() != null) {
+ builder.field("restore_source");
+ restoreSource().toXContent(builder, params);
+ }
+ return builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
new file mode 100644
index 0000000..e18a350
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
@@ -0,0 +1,526 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import com.carrotsearch.hppc.cursors.IntCursor;
+import com.carrotsearch.hppc.cursors.IntObjectCursor;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Sets;
+import com.google.common.collect.UnmodifiableIterator;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.collect.ImmutableOpenIntMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * The {@link IndexRoutingTable} represents routing information for a single
+ * index. The routing table maintains a list of all shards in the index. A
+ * single shard in this context has one more instances namely exactly one
+ * {@link ShardRouting#primary() primary} and 1 or more replicas. In other
+ * words, each instance of a shard is considered a replica while only one
+ * replica per shard is a <tt>primary</tt> replica. The <tt>primary</tt> replica
+ * can be seen as the "leader" of the shard acting as the primary entry point
+ * for operations on a specific shard.
+ * <p>
+ * Note: The term replica is not directly
+ * reflected in the routing table or in releated classes, replicas are
+ * represented as {@link ShardRouting}.
+ * </p>
+ */
+public class IndexRoutingTable implements Iterable<IndexShardRoutingTable> {
+
+ private final String index;
+ private final ShardShuffler shuffler;
+
+ // note, we assume that when the index routing is created, ShardRoutings are created for all possible number of
+ // shards with state set to UNASSIGNED
+ private final ImmutableOpenIntMap<IndexShardRoutingTable> shards;
+
+ private final ImmutableList<ShardRouting> allShards;
+ private final ImmutableList<ShardRouting> allActiveShards;
+
+ IndexRoutingTable(String index, ImmutableOpenIntMap<IndexShardRoutingTable> shards) {
+ this.index = index;
+ this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt());
+ this.shards = shards;
+ ImmutableList.Builder<ShardRouting> allShards = ImmutableList.builder();
+ ImmutableList.Builder<ShardRouting> allActiveShards = ImmutableList.builder();
+ for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
+ for (ShardRouting shardRouting : cursor.value) {
+ allShards.add(shardRouting);
+ if (shardRouting.active()) {
+ allActiveShards.add(shardRouting);
+ }
+ }
+ }
+ this.allShards = allShards.build();
+ this.allActiveShards = allActiveShards.build();
+ }
+
+ /**
+ * Return the index id
+ *
+ * @return id of the index
+ */
+ public String index() {
+ return this.index;
+ }
+
+
+ /**
+ * Return the index id
+ *
+ * @return id of the index
+ */
+ public String getIndex() {
+ return index();
+ }
+
+ /**
+ * creates a new {@link IndexRoutingTable} with all shard versions normalized
+ *
+ * @return new {@link IndexRoutingTable}
+ */
+ public IndexRoutingTable normalizeVersions() {
+ IndexRoutingTable.Builder builder = new Builder(this.index);
+ for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
+ builder.addIndexShard(cursor.value.normalizeVersions());
+ }
+ return builder.build();
+ }
+
+ public void validate(RoutingTableValidation validation, MetaData metaData) {
+ if (!metaData.hasIndex(index())) {
+ validation.addIndexFailure(index(), "Exists in routing does not exists in metadata");
+ return;
+ }
+ IndexMetaData indexMetaData = metaData.index(index());
+ for (String failure : validate(indexMetaData)) {
+ validation.addIndexFailure(index, failure);
+ }
+
+ }
+
+ /**
+ * validate based on a meta data, returning failures found
+ */
+ public List<String> validate(IndexMetaData indexMetaData) {
+ ArrayList<String> failures = new ArrayList<String>();
+
+ // check the number of shards
+ if (indexMetaData.numberOfShards() != shards().size()) {
+ Set<Integer> expected = Sets.newHashSet();
+ for (int i = 0; i < indexMetaData.numberOfShards(); i++) {
+ expected.add(i);
+ }
+ for (IndexShardRoutingTable indexShardRoutingTable : this) {
+ expected.remove(indexShardRoutingTable.shardId().id());
+ }
+ failures.add("Wrong number of shards in routing table, missing: " + expected);
+ }
+ // check the replicas
+ for (IndexShardRoutingTable indexShardRoutingTable : this) {
+ int routingNumberOfReplicas = indexShardRoutingTable.size() - 1;
+ if (routingNumberOfReplicas != indexMetaData.numberOfReplicas()) {
+ failures.add("Shard [" + indexShardRoutingTable.shardId().id()
+ + "] routing table has wrong number of replicas, expected [" + indexMetaData.numberOfReplicas() + "], got [" + routingNumberOfReplicas + "]");
+ }
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (!shardRouting.index().equals(index())) {
+ failures.add("shard routing has an index [" + shardRouting.index() + "] that is different than the routing table");
+ }
+ }
+ }
+ return failures;
+ }
+
+ @Override
+ public UnmodifiableIterator<IndexShardRoutingTable> iterator() {
+ return shards.valuesIt();
+ }
+
+ /**
+ * Calculates the number of nodes that hold one or more shards of this index
+ * {@link IndexRoutingTable} excluding the nodes with the node ids give as
+ * the <code>excludedNodes</code> parameter.
+ *
+ * @param excludedNodes id of nodes that will be excluded
+ * @return number of distinct nodes this index has at least one shard allocated on
+ */
+ public int numberOfNodesShardsAreAllocatedOn(String... excludedNodes) {
+ Set<String> nodes = Sets.newHashSet();
+ for (IndexShardRoutingTable shardRoutingTable : this) {
+ for (ShardRouting shardRouting : shardRoutingTable) {
+ if (shardRouting.assignedToNode()) {
+ String currentNodeId = shardRouting.currentNodeId();
+ boolean excluded = false;
+ if (excludedNodes != null) {
+ for (String excludedNode : excludedNodes) {
+ if (currentNodeId.equals(excludedNode)) {
+ excluded = true;
+ break;
+ }
+ }
+ }
+ if (!excluded) {
+ nodes.add(currentNodeId);
+ }
+ }
+ }
+ }
+ return nodes.size();
+ }
+
+ public ImmutableOpenIntMap<IndexShardRoutingTable> shards() {
+ return shards;
+ }
+
+ public ImmutableOpenIntMap<IndexShardRoutingTable> getShards() {
+ return shards();
+ }
+
+ public IndexShardRoutingTable shard(int shardId) {
+ return shards.get(shardId);
+ }
+
+ /**
+ * Returns <code>true</code> if all shards are primary and active. Otherwise <code>false</code>.
+ */
+ public boolean allPrimaryShardsActive() {
+ return primaryShardsActive() == shards().size();
+ }
+
+ /**
+ * Calculates the number of primary shards in active state in routing table
+ *
+ * @return number of active primary shards
+ */
+ public int primaryShardsActive() {
+ int counter = 0;
+ for (IndexShardRoutingTable shardRoutingTable : this) {
+ if (shardRoutingTable.primaryShard().active()) {
+ counter++;
+ }
+ }
+ return counter;
+ }
+
+ /**
+ * Returns <code>true</code> if all primary shards are in
+ * {@link ShardRoutingState#UNASSIGNED} state. Otherwise <code>false</code>.
+ */
+ public boolean allPrimaryShardsUnassigned() {
+ return primaryShardsUnassigned() == shards.size();
+ }
+
+ /**
+ * Calculates the number of primary shards in the routing table the are in
+ * {@link ShardRoutingState#UNASSIGNED} state.
+ */
+ public int primaryShardsUnassigned() {
+ int counter = 0;
+ for (IndexShardRoutingTable shardRoutingTable : this) {
+ if (shardRoutingTable.primaryShard().unassigned()) {
+ counter++;
+ }
+ }
+ return counter;
+ }
+
+ /**
+ * Returns a {@link List} of shards that match one of the states listed in {@link ShardRoutingState states}
+ *
+ * @param states a set of {@link ShardRoutingState states}
+ * @return a {@link List} of shards that match one of the given {@link ShardRoutingState states}
+ */
+ public List<ShardRouting> shardsWithState(ShardRoutingState... states) {
+ List<ShardRouting> shards = newArrayList();
+ for (IndexShardRoutingTable shardRoutingTable : this) {
+ shards.addAll(shardRoutingTable.shardsWithState(states));
+ }
+ return shards;
+ }
+
+ /**
+ * Returns an unordered iterator over all shards (including replicas).
+ */
+ public ShardsIterator randomAllShardsIt() {
+ return new PlainShardsIterator(shuffler.shuffle(allShards));
+ }
+
+ /**
+ * Returns an unordered iterator over all active shards (including replicas).
+ */
+ public ShardsIterator randomAllActiveShardsIt() {
+ return new PlainShardsIterator(shuffler.shuffle(allActiveShards));
+ }
+
+ /**
+ * A group shards iterator where each group ({@link ShardIterator}
+ * is an iterator across shard replication group.
+ */
+ public GroupShardsIterator groupByShardsIt() {
+ // use list here since we need to maintain identity across shards
+ ArrayList<ShardIterator> set = new ArrayList<ShardIterator>(shards.size());
+ for (IndexShardRoutingTable indexShard : this) {
+ set.add(indexShard.shardsIt());
+ }
+ return new GroupShardsIterator(set);
+ }
+
+ /**
+ * A groups shards iterator where each groups is a single {@link ShardRouting} and a group
+ * is created for each shard routing.
+ * <p/>
+ * <p>This basically means that components that use the {@link GroupShardsIterator} will iterate
+ * over *all* the shards (all the replicas) within the index.</p>
+ */
+ public GroupShardsIterator groupByAllIt() {
+ // use list here since we need to maintain identity across shards
+ ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
+ for (IndexShardRoutingTable indexShard : this) {
+ for (ShardRouting shardRouting : indexShard) {
+ set.add(shardRouting.shardsIt());
+ }
+ }
+ return new GroupShardsIterator(set);
+ }
+
+ public void validate() throws RoutingValidationException {
+ }
+
+ public static Builder builder(String index) {
+ return new Builder(index);
+ }
+
+ public static class Builder {
+
+ private final String index;
+ private final ImmutableOpenIntMap.Builder<IndexShardRoutingTable> shards = ImmutableOpenIntMap.builder();
+
+ public Builder(String index) {
+ this.index = index;
+ }
+
+ /**
+ * Reads an {@link IndexRoutingTable} from an {@link StreamInput}
+ *
+ * @param in {@link StreamInput} to read the {@link IndexRoutingTable} from
+ * @return {@link IndexRoutingTable} read
+ * @throws IOException if something happens during read
+ */
+ public static IndexRoutingTable readFrom(StreamInput in) throws IOException {
+ String index = in.readString();
+ Builder builder = new Builder(index);
+
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index));
+ }
+
+ return builder.build();
+ }
+
+ /**
+ * Writes an {@link IndexRoutingTable} to a {@link StreamOutput}.
+ *
+ * @param index {@link IndexRoutingTable} to write
+ * @param out {@link StreamOutput} to write to
+ * @throws IOException if something happens during write
+ */
+ public static void writeTo(IndexRoutingTable index, StreamOutput out) throws IOException {
+ out.writeString(index.index());
+ out.writeVInt(index.shards.size());
+ for (IndexShardRoutingTable indexShard : index) {
+ IndexShardRoutingTable.Builder.writeToThin(indexShard, out);
+ }
+ }
+
+ /**
+ * Initializes a new empty index, as if it was created from an API.
+ */
+ public Builder initializeAsNew(IndexMetaData indexMetaData) {
+ return initializeEmpty(indexMetaData, true);
+ }
+
+ /**
+ * Initializes a new empty index, as if it was created from an API.
+ */
+ public Builder initializeAsRecovery(IndexMetaData indexMetaData) {
+ return initializeEmpty(indexMetaData, false);
+ }
+
+ /**
+ * Initializes a new empty index, to be restored from a snapshot
+ */
+ public Builder initializeAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
+ return initializeAsRestore(indexMetaData, restoreSource, true);
+ }
+
+ /**
+ * Initializes an existing index, to be restored from a snapshot
+ */
+ public Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
+ return initializeAsRestore(indexMetaData, restoreSource, false);
+ }
+
+ /**
+ * Initializes an index, to be restored from snapshot
+ */
+ private Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, boolean asNew) {
+ if (!shards.isEmpty()) {
+ throw new ElasticsearchIllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
+ }
+ for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) {
+ IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId), asNew ? false : true);
+ for (int i = 0; i <= indexMetaData.numberOfReplicas(); i++) {
+ indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, shardId, null, null, i == 0 ? restoreSource : null, i == 0, ShardRoutingState.UNASSIGNED, 0));
+ }
+ shards.put(shardId, indexShardRoutingBuilder.build());
+ }
+ return this;
+ }
+
+ /**
+ * Initializes a new empty index, with an option to control if its from an API or not.
+ */
+ private Builder initializeEmpty(IndexMetaData indexMetaData, boolean asNew) {
+ if (!shards.isEmpty()) {
+ throw new ElasticsearchIllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
+ }
+ for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) {
+ IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId), asNew ? false : true);
+ for (int i = 0; i <= indexMetaData.numberOfReplicas(); i++) {
+ indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, shardId, null, i == 0, ShardRoutingState.UNASSIGNED, 0));
+ }
+ shards.put(shardId, indexShardRoutingBuilder.build());
+ }
+ return this;
+ }
+
+ public Builder addReplica() {
+ for (IntCursor cursor : shards.keys()) {
+ int shardId = cursor.value;
+ // version 0, will get updated when reroute will happen
+ ImmutableShardRouting shard = new ImmutableShardRouting(index, shardId, null, false, ShardRoutingState.UNASSIGNED, 0);
+ shards.put(shardId,
+ new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build()
+ );
+ }
+ return this;
+ }
+
+ public Builder removeReplica() {
+ for (IntCursor cursor : shards.keys()) {
+ int shardId = cursor.value;
+ IndexShardRoutingTable indexShard = shards.get(shardId);
+ if (indexShard.replicaShards().isEmpty()) {
+ // nothing to do here!
+ return this;
+ }
+ // re-add all the current ones
+ IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(indexShard.shardId(), indexShard.primaryAllocatedPostApi());
+ for (ShardRouting shardRouting : indexShard) {
+ builder.addShard(new ImmutableShardRouting(shardRouting));
+ }
+ // first check if there is one that is not assigned to a node, and remove it
+ boolean removed = false;
+ for (ShardRouting shardRouting : indexShard) {
+ if (!shardRouting.primary() && !shardRouting.assignedToNode()) {
+ builder.removeShard(shardRouting);
+ removed = true;
+ break;
+ }
+ }
+ if (!removed) {
+ for (ShardRouting shardRouting : indexShard) {
+ if (!shardRouting.primary()) {
+ builder.removeShard(shardRouting);
+ removed = true;
+ break;
+ }
+ }
+ }
+ shards.put(shardId, builder.build());
+ }
+ return this;
+ }
+
+ public Builder addIndexShard(IndexShardRoutingTable indexShard) {
+ shards.put(indexShard.shardId().id(), indexShard);
+ return this;
+ }
+
+ /**
+ * Clears the post allocation flag for the specified shard
+ */
+ public Builder clearPostAllocationFlag(ShardId shardId) {
+ assert this.index.equals(shardId.index().name());
+ IndexShardRoutingTable indexShard = shards.get(shardId.id());
+ shards.put(indexShard.shardId().id(), new IndexShardRoutingTable(indexShard.shardId(), indexShard.shards(), false));
+ return this;
+ }
+
+ /**
+ * Adds a new shard routing (makes a copy of it), with reference data used from the index shard routing table
+ * if it needs to be created.
+ */
+ public Builder addShard(IndexShardRoutingTable refData, ShardRouting shard) {
+ IndexShardRoutingTable indexShard = shards.get(shard.id());
+ if (indexShard == null) {
+ indexShard = new IndexShardRoutingTable.Builder(refData.shardId(), refData.primaryAllocatedPostApi()).addShard(new ImmutableShardRouting(shard)).build();
+ } else {
+ indexShard = new IndexShardRoutingTable.Builder(indexShard).addShard(new ImmutableShardRouting(shard)).build();
+ }
+ shards.put(indexShard.shardId().id(), indexShard);
+ return this;
+ }
+
+ public IndexRoutingTable build() throws RoutingValidationException {
+ IndexRoutingTable indexRoutingTable = new IndexRoutingTable(index, shards.build());
+ indexRoutingTable.validate();
+ return indexRoutingTable;
+ }
+ }
+
+ public String prettyPrint() {
+ StringBuilder sb = new StringBuilder("-- index [" + index + "]\n");
+ for (IndexShardRoutingTable indexShard : this) {
+ sb.append("----shard_id [").append(indexShard.shardId().index().name()).append("][").append(indexShard.shardId().id()).append("]\n");
+ for (ShardRouting shard : indexShard) {
+ sb.append("--------").append(shard.shortSummary()).append("\n");
+ }
+ }
+ return sb.toString();
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java
new file mode 100644
index 0000000..3951fea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java
@@ -0,0 +1,604 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.UnmodifiableIterator;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.*;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * {@link IndexShardRoutingTable} encapsulates all instances of a single shard.
+ * Each Elasticsearch index consists of multiple shards, each shard encapsulates
+ * a disjoint set of the index data and each shard has one or more instances
+ * referred to as replicas of a shard. Given that, this class encapsulates all
+ * replicas (instances) for a single index shard.
+ */
+public class IndexShardRoutingTable implements Iterable<ShardRouting> {
+
+ final ShardShuffler shuffler;
+ final ShardId shardId;
+
+ final ShardRouting primary;
+ final ImmutableList<ShardRouting> primaryAsList;
+ final ImmutableList<ShardRouting> replicas;
+ final ImmutableList<ShardRouting> shards;
+ final ImmutableList<ShardRouting> activeShards;
+ final ImmutableList<ShardRouting> assignedShards;
+
+ /**
+ * The initializing list, including ones that are initializing on a target node because of relocation.
+ * If we can come up with a better variable name, it would be nice...
+ */
+ final ImmutableList<ShardRouting> allInitializingShards;
+
+ final boolean primaryAllocatedPostApi;
+
+ IndexShardRoutingTable(ShardId shardId, ImmutableList<ShardRouting> shards, boolean primaryAllocatedPostApi) {
+ this.shardId = shardId;
+ this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt());
+ this.shards = shards;
+ this.primaryAllocatedPostApi = primaryAllocatedPostApi;
+
+ ShardRouting primary = null;
+ ImmutableList.Builder<ShardRouting> replicas = ImmutableList.builder();
+ ImmutableList.Builder<ShardRouting> activeShards = ImmutableList.builder();
+ ImmutableList.Builder<ShardRouting> assignedShards = ImmutableList.builder();
+ ImmutableList.Builder<ShardRouting> allInitializingShards = ImmutableList.builder();
+
+ for (ShardRouting shard : shards) {
+ if (shard.primary()) {
+ primary = shard;
+ } else {
+ replicas.add(shard);
+ }
+ if (shard.active()) {
+ activeShards.add(shard);
+ }
+ if (shard.initializing()) {
+ allInitializingShards.add(shard);
+ }
+ if (shard.relocating()) {
+ // create the target initializing shard routing on the node the shard is relocating to
+ allInitializingShards.add(new ImmutableShardRouting(shard.index(), shard.id(), shard.relocatingNodeId(), shard.currentNodeId(), shard.primary(), ShardRoutingState.INITIALIZING, shard.version()));
+ }
+ if (shard.assignedToNode()) {
+ assignedShards.add(shard);
+ }
+ }
+
+ this.primary = primary;
+ if (primary != null) {
+ this.primaryAsList = ImmutableList.of(primary);
+ } else {
+ this.primaryAsList = ImmutableList.of();
+ }
+ this.replicas = replicas.build();
+ this.activeShards = activeShards.build();
+ this.assignedShards = assignedShards.build();
+ this.allInitializingShards = allInitializingShards.build();
+ }
+
+ /**
+ * Normalizes all shard routings to the same version.
+ */
+ public IndexShardRoutingTable normalizeVersions() {
+ if (shards.isEmpty()) {
+ return this;
+ }
+ if (shards.size() == 1) {
+ return this;
+ }
+ long highestVersion = shards.get(0).version();
+ boolean requiresNormalization = false;
+ for (int i = 1; i < shards.size(); i++) {
+ if (shards.get(i).version() != highestVersion) {
+ requiresNormalization = true;
+ }
+ if (shards.get(i).version() > highestVersion) {
+ highestVersion = shards.get(i).version();
+ }
+ }
+ if (!requiresNormalization) {
+ return this;
+ }
+ List<ShardRouting> shardRoutings = new ArrayList<ShardRouting>(shards.size());
+ for (int i = 0; i < shards.size(); i++) {
+ if (shards.get(i).version() == highestVersion) {
+ shardRoutings.add(shards.get(i));
+ } else {
+ shardRoutings.add(new ImmutableShardRouting(shards.get(i), highestVersion));
+ }
+ }
+ return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shardRoutings), primaryAllocatedPostApi);
+ }
+
+ /**
+ * Has this shard group primary shard been allocated post API creation. Will be set to
+ * <code>true</code> if it was created because of recovery action.
+ */
+ public boolean primaryAllocatedPostApi() {
+ return primaryAllocatedPostApi;
+ }
+
+ /**
+ * Returns the shards id
+ *
+ * @return id of the shard
+ */
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ /**
+ * Returns the shards id
+ *
+ * @return id of the shard
+ */
+ public ShardId getShardId() {
+ return shardId();
+ }
+
+ @Override
+ public UnmodifiableIterator<ShardRouting> iterator() {
+ return shards.iterator();
+ }
+
+ /**
+ * Returns the number of this shards instances.
+ */
+ public int size() {
+ return shards.size();
+ }
+
+ /**
+ * Returns the number of this shards instances.
+ */
+ public int getSize() {
+ return size();
+ }
+
+ /**
+ * Returns a {@link ImmutableList} of shards
+ *
+ * @return a {@link ImmutableList} of shards
+ */
+ public ImmutableList<ShardRouting> shards() {
+ return this.shards;
+ }
+
+ /**
+ * Returns a {@link ImmutableList} of shards
+ *
+ * @return a {@link ImmutableList} of shards
+ */
+ public ImmutableList<ShardRouting> getShards() {
+ return shards();
+ }
+
+ /**
+ * Returns a {@link ImmutableList} of active shards
+ *
+ * @return a {@link ImmutableList} of shards
+ */
+ public ImmutableList<ShardRouting> activeShards() {
+ return this.activeShards;
+ }
+
+ /**
+ * Returns a {@link ImmutableList} of active shards
+ *
+ * @return a {@link ImmutableList} of shards
+ */
+ public ImmutableList<ShardRouting> getActiveShards() {
+ return activeShards();
+ }
+
+ /**
+ * Returns a {@link ImmutableList} of assigned shards
+ *
+ * @return a {@link ImmutableList} of shards
+ */
+ public ImmutableList<ShardRouting> assignedShards() {
+ return this.assignedShards;
+ }
+
+ /**
+ * Returns a {@link ImmutableList} of assigned shards
+ *
+ * @return a {@link ImmutableList} of shards
+ */
+ public ImmutableList<ShardRouting> getAssignedShards() {
+ return this.assignedShards;
+ }
+
+ /**
+ * Returns the number of shards in a specific state
+ *
+ * @param state state of the shards to count
+ * @return number of shards in <code>state</code>
+ */
+ public int countWithState(ShardRoutingState state) {
+ int count = 0;
+ for (ShardRouting shard : this) {
+ if (state == shard.state()) {
+ count++;
+ }
+ }
+ return count;
+ }
+
+ public ShardIterator shardsRandomIt() {
+ return new PlainShardIterator(shardId, shuffler.shuffle(shards));
+ }
+
+ public ShardIterator shardsIt() {
+ return new PlainShardIterator(shardId, shards);
+ }
+
+ public ShardIterator shardsIt(int seed) {
+ return new PlainShardIterator(shardId, shuffler.shuffle(shards, seed));
+ }
+
+ public ShardIterator activeShardsRandomIt() {
+ return new PlainShardIterator(shardId, shuffler.shuffle(activeShards));
+ }
+
+ public ShardIterator activeShardsIt() {
+ return new PlainShardIterator(shardId, activeShards);
+ }
+
+ public ShardIterator activeShardsIt(int seed) {
+ return new PlainShardIterator(shardId, shuffler.shuffle(activeShards, seed));
+ }
+
+ /**
+ * Returns an iterator over active and initializing shards. Making sure though that
+ * its random within the active shards, and initializing shards are the last to iterate through.
+ */
+ public ShardIterator activeInitializingShardsRandomIt() {
+ return activeInitializingShardsIt(shuffler.nextSeed());
+ }
+
+ /**
+ * Returns an iterator over active and initializing shards. Making sure though that
+ * its random within the active shards, and initializing shards are the last to iterate through.
+ */
+ public ShardIterator activeInitializingShardsIt(int seed) {
+ if (allInitializingShards.isEmpty()) {
+ return new PlainShardIterator(shardId, shuffler.shuffle(activeShards, seed));
+ }
+ ArrayList<ShardRouting> ordered = new ArrayList<ShardRouting>(activeShards.size() + allInitializingShards.size());
+ ordered.addAll(shuffler.shuffle(activeShards, seed));
+ ordered.addAll(allInitializingShards);
+ return new PlainShardIterator(shardId, ordered);
+ }
+
+ public ShardIterator assignedShardsRandomIt() {
+ return new PlainShardIterator(shardId, shuffler.shuffle(assignedShards));
+ }
+
+ public ShardIterator assignedShardsIt() {
+ return new PlainShardIterator(shardId, assignedShards);
+ }
+
+ public ShardIterator assignedShardsIt(int seed) {
+ return new PlainShardIterator(shardId, shuffler.shuffle(assignedShards, seed));
+ }
+
+ /**
+ * Returns an iterator only on the primary shard.
+ */
+ public ShardIterator primaryShardIt() {
+ return new PlainShardIterator(shardId, primaryAsList);
+ }
+
+ public ShardIterator primaryActiveInitializingShardIt() {
+ if (!primaryAsList.isEmpty() && !primaryAsList.get(0).active() && !primaryAsList.get(0).initializing()) {
+ List<ShardRouting> primaryList = ImmutableList.of();
+ return new PlainShardIterator(shardId, primaryList);
+ }
+ return primaryShardIt();
+ }
+
+ public ShardIterator primaryFirstActiveInitializingShardsIt() {
+ ArrayList<ShardRouting> ordered = new ArrayList<ShardRouting>(activeShards.size() + allInitializingShards.size());
+ // fill it in a randomized fashion
+ for (ShardRouting shardRouting : shuffler.shuffle(activeShards)) {
+ ordered.add(shardRouting);
+ if (shardRouting.primary()) {
+ // switch, its the matching node id
+ ordered.set(ordered.size() - 1, ordered.get(0));
+ ordered.set(0, shardRouting);
+ }
+ }
+ // no need to worry about primary first here..., its temporal
+ if (!allInitializingShards.isEmpty()) {
+ ordered.addAll(allInitializingShards);
+ }
+ return new PlainShardIterator(shardId, ordered);
+ }
+
+ public ShardIterator onlyNodeActiveInitializingShardsIt(String nodeId) {
+ ArrayList<ShardRouting> ordered = new ArrayList<ShardRouting>(activeShards.size() + allInitializingShards.size());
+ // fill it in a randomized fashion
+ for (int i = 0; i < activeShards.size(); i++) {
+ ShardRouting shardRouting = activeShards.get(i);
+ if (nodeId.equals(shardRouting.currentNodeId())) {
+ ordered.add(shardRouting);
+ }
+ }
+ for (int i = 0; i < allInitializingShards.size(); i++) {
+ ShardRouting shardRouting = allInitializingShards.get(i);
+ if (nodeId.equals(shardRouting.currentNodeId())) {
+ ordered.add(shardRouting);
+ }
+ }
+ return new PlainShardIterator(shardId, ordered);
+ }
+
+ public ShardIterator preferNodeActiveInitializingShardsIt(String nodeId) {
+ ArrayList<ShardRouting> ordered = new ArrayList<ShardRouting>(activeShards.size() + allInitializingShards.size());
+ // fill it in a randomized fashion
+ for (ShardRouting shardRouting : shuffler.shuffle(activeShards)) {
+ ordered.add(shardRouting);
+ if (nodeId.equals(shardRouting.currentNodeId())) {
+ // switch, its the matching node id
+ ordered.set(ordered.size() - 1, ordered.get(0));
+ ordered.set(0, shardRouting);
+ }
+ }
+ if (!allInitializingShards.isEmpty()) {
+ ordered.addAll(allInitializingShards);
+ }
+ return new PlainShardIterator(shardId, ordered);
+ }
+
+ static class AttributesKey {
+
+ final String[] attributes;
+
+ AttributesKey(String[] attributes) {
+ this.attributes = attributes;
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(attributes);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return Arrays.equals(attributes, ((AttributesKey) obj).attributes);
+ }
+ }
+
+ static class AttributesRoutings {
+
+ public final ImmutableList<ShardRouting> withSameAttribute;
+ public final ImmutableList<ShardRouting> withoutSameAttribute;
+ public final int totalSize;
+
+ AttributesRoutings(ImmutableList<ShardRouting> withSameAttribute, ImmutableList<ShardRouting> withoutSameAttribute) {
+ this.withSameAttribute = withSameAttribute;
+ this.withoutSameAttribute = withoutSameAttribute;
+ this.totalSize = withoutSameAttribute.size() + withSameAttribute.size();
+ }
+ }
+
+ private volatile Map<AttributesKey, AttributesRoutings> activeShardsByAttributes = ImmutableMap.of();
+ private volatile Map<AttributesKey, AttributesRoutings> initializingShardsByAttributes = ImmutableMap.of();
+ private final Object shardsByAttributeMutex = new Object();
+
+ private AttributesRoutings getActiveAttribute(AttributesKey key, DiscoveryNodes nodes) {
+ AttributesRoutings shardRoutings = activeShardsByAttributes.get(key);
+ if (shardRoutings == null) {
+ synchronized (shardsByAttributeMutex) {
+ ArrayList<ShardRouting> from = new ArrayList<ShardRouting>(activeShards);
+ ImmutableList<ShardRouting> to = collectAttributeShards(key, nodes, from);
+
+ shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from));
+ activeShardsByAttributes = MapBuilder.newMapBuilder(activeShardsByAttributes).put(key, shardRoutings).immutableMap();
+ }
+ }
+ return shardRoutings;
+ }
+
+ private AttributesRoutings getInitializingAttribute(AttributesKey key, DiscoveryNodes nodes) {
+ AttributesRoutings shardRoutings = initializingShardsByAttributes.get(key);
+ if (shardRoutings == null) {
+ synchronized (shardsByAttributeMutex) {
+ ArrayList<ShardRouting> from = new ArrayList<ShardRouting>(allInitializingShards);
+ ImmutableList<ShardRouting> to = collectAttributeShards(key, nodes, from);
+ shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from));
+ initializingShardsByAttributes = MapBuilder.newMapBuilder(initializingShardsByAttributes).put(key, shardRoutings).immutableMap();
+ }
+ }
+ return shardRoutings;
+ }
+
+ private static ImmutableList<ShardRouting> collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList<ShardRouting> from) {
+ final ArrayList<ShardRouting> to = new ArrayList<ShardRouting>();
+ for (final String attribute : key.attributes) {
+ final String localAttributeValue = nodes.localNode().attributes().get(attribute);
+ if (localAttributeValue != null) {
+ for (Iterator<ShardRouting> iterator = from.iterator(); iterator.hasNext(); ) {
+ ShardRouting fromShard = iterator.next();
+ final DiscoveryNode discoveryNode = nodes.get(fromShard.currentNodeId());
+ if (discoveryNode == null) {
+ iterator.remove(); // node is not present anymore - ignore shard
+ } else if (localAttributeValue.equals(discoveryNode.attributes().get(attribute))) {
+ iterator.remove();
+ to.add(fromShard);
+ }
+ }
+ }
+ }
+ return ImmutableList.copyOf(to);
+ }
+
+ public ShardIterator preferAttributesActiveInitializingShardsIt(String[] attributes, DiscoveryNodes nodes) {
+ return preferAttributesActiveInitializingShardsIt(attributes, nodes, shuffler.nextSeed());
+ }
+
+ public ShardIterator preferAttributesActiveInitializingShardsIt(String[] attributes, DiscoveryNodes nodes, int seed) {
+ AttributesKey key = new AttributesKey(attributes);
+ AttributesRoutings activeRoutings = getActiveAttribute(key, nodes);
+ AttributesRoutings initializingRoutings = getInitializingAttribute(key, nodes);
+
+ // we now randomize, once between the ones that have the same attributes, and once for the ones that don't
+ // we don't want to mix between the two!
+ ArrayList<ShardRouting> ordered = new ArrayList<ShardRouting>(activeRoutings.totalSize + initializingRoutings.totalSize);
+ ordered.addAll(shuffler.shuffle(activeRoutings.withSameAttribute, seed));
+ ordered.addAll(shuffler.shuffle(activeRoutings.withoutSameAttribute, seed));
+ ordered.addAll(shuffler.shuffle(initializingRoutings.withSameAttribute, seed));
+ ordered.addAll(shuffler.shuffle(initializingRoutings.withoutSameAttribute, seed));
+ return new PlainShardIterator(shardId, ordered);
+ }
+
+ public ShardRouting primaryShard() {
+ return primary;
+ }
+
+ public List<ShardRouting> replicaShards() {
+ return this.replicas;
+ }
+
+ public List<ShardRouting> replicaShardsWithState(ShardRoutingState... states) {
+ List<ShardRouting> shards = newArrayList();
+ for (ShardRouting shardEntry : replicas) {
+ for (ShardRoutingState state : states) {
+ if (shardEntry.state() == state) {
+ shards.add(shardEntry);
+ }
+ }
+ }
+ return shards;
+ }
+
+ public List<ShardRouting> shardsWithState(ShardRoutingState... states) {
+ List<ShardRouting> shards = newArrayList();
+ for (ShardRouting shardEntry : this) {
+ for (ShardRoutingState state : states) {
+ if (shardEntry.state() == state) {
+ shards.add(shardEntry);
+ }
+ }
+ }
+ return shards;
+ }
+
+ public static class Builder {
+
+ private ShardId shardId;
+
+ private final List<ShardRouting> shards;
+
+ private boolean primaryAllocatedPostApi;
+
+ public Builder(IndexShardRoutingTable indexShard) {
+ this.shardId = indexShard.shardId;
+ this.shards = newArrayList(indexShard.shards);
+ this.primaryAllocatedPostApi = indexShard.primaryAllocatedPostApi();
+ }
+
+ public Builder(ShardId shardId, boolean primaryAllocatedPostApi) {
+ this.shardId = shardId;
+ this.shards = newArrayList();
+ this.primaryAllocatedPostApi = primaryAllocatedPostApi;
+ }
+
+ public Builder addShard(ImmutableShardRouting shardEntry) {
+ for (ShardRouting shard : shards) {
+ // don't add two that map to the same node id
+ // we rely on the fact that a node does not have primary and backup of the same shard
+ if (shard.assignedToNode() && shardEntry.assignedToNode()
+ && shard.currentNodeId().equals(shardEntry.currentNodeId())) {
+ return this;
+ }
+ }
+ shards.add(shardEntry);
+ return this;
+ }
+
+ public Builder removeShard(ShardRouting shardEntry) {
+ shards.remove(shardEntry);
+ return this;
+ }
+
+ public IndexShardRoutingTable build() {
+ // we can automatically set allocatedPostApi to true if the primary is active
+ if (!primaryAllocatedPostApi) {
+ for (ShardRouting shardRouting : shards) {
+ if (shardRouting.primary() && shardRouting.active()) {
+ primaryAllocatedPostApi = true;
+ }
+ }
+ }
+ return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shards), primaryAllocatedPostApi);
+ }
+
+ public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException {
+ String index = in.readString();
+ return readFromThin(in, index);
+ }
+
+ public static IndexShardRoutingTable readFromThin(StreamInput in, String index) throws IOException {
+ int iShardId = in.readVInt();
+ boolean allocatedPostApi = in.readBoolean();
+ Builder builder = new Builder(new ShardId(index, iShardId), allocatedPostApi);
+
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ ImmutableShardRouting shard = ImmutableShardRouting.readShardRoutingEntry(in, index, iShardId);
+ builder.addShard(shard);
+ }
+
+ return builder.build();
+ }
+
+ public static void writeTo(IndexShardRoutingTable indexShard, StreamOutput out) throws IOException {
+ out.writeString(indexShard.shardId().index().name());
+ writeToThin(indexShard, out);
+ }
+
+ public static void writeToThin(IndexShardRoutingTable indexShard, StreamOutput out) throws IOException {
+ out.writeVInt(indexShard.shardId.id());
+ out.writeBoolean(indexShard.primaryAllocatedPostApi());
+
+ out.writeVInt(indexShard.shards.size());
+ for (ShardRouting entry : indexShard) {
+ entry.writeToThin(out);
+ }
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/MutableShardRouting.java b/src/main/java/org/elasticsearch/cluster/routing/MutableShardRouting.java
new file mode 100644
index 0000000..875db3d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/MutableShardRouting.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+/**
+ * Similar to {@link ImmutableShardRouting} this class keeps metadata of the current shard. But unlike
+ * {@link ImmutableShardRouting} the information kept in this class can be modified.
+ * These modifications include changing the primary state, relocating and assigning the shard
+ * represented by this class
+ */
+public class MutableShardRouting extends ImmutableShardRouting {
+
+ public MutableShardRouting(ShardRouting copy) {
+ super(copy);
+ }
+
+ public MutableShardRouting(ShardRouting copy, long version) {
+ super(copy);
+ this.version = version;
+ }
+
+ public MutableShardRouting(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state, long version) {
+ super(index, shardId, currentNodeId, primary, state, version);
+ }
+
+ public MutableShardRouting(String index, int shardId, String currentNodeId,
+ String relocatingNodeId, boolean primary, ShardRoutingState state, long version) {
+ super(index, shardId, currentNodeId, relocatingNodeId, null, primary, state, version);
+ }
+
+ public MutableShardRouting(String index, int shardId, String currentNodeId,
+ String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version) {
+ super(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version);
+ }
+
+ /**
+ * Assign this shard to a node.
+ *
+ * @param nodeId id of the node to assign this shard to
+ */
+ void assignToNode(String nodeId) {
+ version++;
+
+ if (currentNodeId == null) {
+ assert state == ShardRoutingState.UNASSIGNED;
+
+ state = ShardRoutingState.INITIALIZING;
+ currentNodeId = nodeId;
+ relocatingNodeId = null;
+ } else if (state == ShardRoutingState.STARTED) {
+ state = ShardRoutingState.RELOCATING;
+ relocatingNodeId = nodeId;
+ } else if (state == ShardRoutingState.RELOCATING) {
+ assert nodeId.equals(relocatingNodeId);
+ }
+ }
+
+ /**
+ * Relocate the shard to another node.
+ *
+ * @param relocatingNodeId id of the node to relocate the shard
+ */
+ void relocate(String relocatingNodeId) {
+ version++;
+ assert state == ShardRoutingState.STARTED;
+ state = ShardRoutingState.RELOCATING;
+ this.relocatingNodeId = relocatingNodeId;
+ }
+
+ /**
+ * Cancel relocation of a shard. The shards state must be set
+ * to <code>RELOCATING</code>.
+ */
+ void cancelRelocation() {
+ version++;
+ assert state == ShardRoutingState.RELOCATING;
+ assert assignedToNode();
+ assert relocatingNodeId != null;
+
+ state = ShardRoutingState.STARTED;
+ relocatingNodeId = null;
+ }
+
+ /**
+ * Set the shards state to <code>UNASSIGNED</code>.
+ * //TODO document the state
+ */
+ void deassignNode() {
+ version++;
+ assert state != ShardRoutingState.UNASSIGNED;
+
+ state = ShardRoutingState.UNASSIGNED;
+ this.currentNodeId = null;
+ this.relocatingNodeId = null;
+ }
+
+ /**
+ * Set the shards state to <code>STARTED</code>. The shards state must be
+ * <code>INITIALIZING</code> or <code>RELOCATING</code>. Any relocation will be
+ * canceled.
+ */
+ void moveToStarted() {
+ version++;
+ assert state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING;
+ relocatingNodeId = null;
+ restoreSource = null;
+ state = ShardRoutingState.STARTED;
+ }
+
+ /**
+ * Make the shard primary unless it's not Primary
+ * //TODO: doc exception
+ */
+ void moveToPrimary() {
+ version++;
+ if (primary) {
+ throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
+ }
+ primary = true;
+ }
+
+ /**
+ * Set the primary shard to non-primary
+ */
+ void moveFromPrimary() {
+ version++;
+ if (!primary) {
+ throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica");
+ }
+ primary = false;
+ }
+
+ private long hashVersion = version-1;
+ private int hashCode = 0;
+
+ @Override
+ public int hashCode() {
+ hashCode = (hashVersion != version ? super.hashCode() : hashCode);
+ hashVersion = version;
+ return hashCode;
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/cluster/routing/PlainShardIterator.java b/src/main/java/org/elasticsearch/cluster/routing/PlainShardIterator.java
new file mode 100644
index 0000000..4d5434f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/PlainShardIterator.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.List;
+
+/**
+ * The {@link PlainShardIterator} is a {@link ShardsIterator} which iterates all
+ * shards or a given {@link ShardId shard id}
+ */
+public class PlainShardIterator extends PlainShardsIterator implements ShardIterator {
+
+ private final ShardId shardId;
+
+ /**
+ * Creates a {@link PlainShardIterator} instance that iterates over a subset of the given shards
+ * this the a given <code>shardId</code>.
+ *
+ * @param shardId shard id of the group
+ * @param shards shards to iterate
+ */
+ public PlainShardIterator(ShardId shardId, List<ShardRouting> shards) {
+ super(shards);
+ this.shardId = shardId;
+ }
+
+
+ @Override
+ public ShardId shardId() {
+ return this.shardId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ ShardIterator that = (ShardIterator) o;
+ return shardId.equals(that.shardId());
+ }
+
+ @Override
+ public int hashCode() {
+ return shardId.hashCode();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java b/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java
new file mode 100644
index 0000000..b3dc44c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing;
+
+import java.util.List;
+import java.util.ListIterator;
+
+/**
+ * A simple {@link ShardsIterator} that iterates a list or sub-list of
+ * {@link ShardRouting shard routings}.
+ */
+public class PlainShardsIterator implements ShardsIterator {
+
+ private final List<ShardRouting> shards;
+
+ private ListIterator<ShardRouting> iterator;
+
+ public PlainShardsIterator(List<ShardRouting> shards) {
+ this.shards = shards;
+ this.iterator = shards.listIterator();
+ }
+
+ @Override
+ public void reset() {
+ iterator = shards.listIterator();
+ }
+
+ @Override
+ public int remaining() {
+ return shards.size() - iterator.nextIndex();
+ }
+
+ @Override
+ public ShardRouting firstOrNull() {
+ if (shards.isEmpty()) {
+ return null;
+ }
+ return shards.get(0);
+ }
+
+ @Override
+ public ShardRouting nextOrNull() {
+ if (iterator.hasNext()) {
+ return iterator.next();
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public int size() {
+ return shards.size();
+ }
+
+ @Override
+ public int sizeActive() {
+ int count = 0;
+ for (ShardRouting shard : shards) {
+ if (shard.active()) {
+ count++;
+ }
+ }
+ return count;
+ }
+
+ @Override
+ public int assignedReplicasIncludingRelocating() {
+ int count = 0;
+ for (ShardRouting shard : shards) {
+ if (shard.unassigned()) {
+ continue;
+ }
+ // if the shard is primary and relocating, add one to the counter since we perform it on the replica as well
+ // (and we already did it on the primary)
+ if (shard.primary()) {
+ if (shard.relocating()) {
+ count++;
+ }
+ } else {
+ count++;
+ // if we are relocating the replica, we want to perform the index operation on both the relocating
+ // shard and the target shard. This means that we won't loose index operations between end of recovery
+ // and reassignment of the shard by the master node
+ if (shard.relocating()) {
+ count++;
+ }
+ }
+ }
+ return count;
+ }
+
+ @Override
+ public Iterable<ShardRouting> asUnordered() {
+ return shards;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java b/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java
new file mode 100644
index 0000000..d36be1b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * Represents snapshot and index from which a recovering index should be restored
+ */
+public class RestoreSource implements Streamable, ToXContent {
+
+ private SnapshotId snapshotId;
+
+ private String index;
+
+ RestoreSource() {
+ }
+
+ public RestoreSource(SnapshotId snapshotId, String index) {
+ this.snapshotId = snapshotId;
+ this.index = index;
+ }
+
+ public SnapshotId snapshotId() {
+ return snapshotId;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public static RestoreSource readRestoreSource(StreamInput in) throws IOException {
+ RestoreSource restoreSource = new RestoreSource();
+ restoreSource.readFrom(in);
+ return restoreSource;
+ }
+
+ public static RestoreSource readOptionalRestoreSource(StreamInput in) throws IOException {
+ return in.readOptionalStreamable(new RestoreSource());
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ snapshotId = SnapshotId.readSnapshotId(in);
+ index = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ snapshotId.writeTo(out);
+ out.writeString(index);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder.startObject()
+ .field("repository", snapshotId.getRepository())
+ .field("snapshot", snapshotId.getSnapshot())
+ .field("index", index)
+ .endObject();
+ }
+
+ @Override
+ public String toString() {
+ return snapshotId.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ RestoreSource that = (RestoreSource) o;
+
+ if (!index.equals(that.index)) return false;
+ if (!snapshotId.equals(that.snapshotId)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = snapshotId.hashCode();
+ result = 31 * result + index.hashCode();
+ return result;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/RotationShardShuffler.java b/src/main/java/org/elasticsearch/cluster/routing/RotationShardShuffler.java
new file mode 100644
index 0000000..3e45ff0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/RotationShardShuffler.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.common.util.CollectionUtils;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Basic {@link ShardShuffler} implementation that uses an {@link AtomicInteger} to generate seeds and uses a rotation to permute shards.
+ */
+public class RotationShardShuffler extends ShardShuffler {
+
+ private final AtomicInteger seed;
+
+ public RotationShardShuffler(int seed) {
+ this.seed = new AtomicInteger(seed);
+ }
+
+ @Override
+ public int nextSeed() {
+ return seed.getAndIncrement();
+ }
+
+ @Override
+ public List<ShardRouting> shuffle(List<ShardRouting> shards, int seed) {
+ return CollectionUtils.rotate(shards, seed);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingException.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingException.java
new file mode 100644
index 0000000..c5c7652
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ * A base {@link Exception}s for all exceptions thrown by routing related operations.
+ */
+public class RoutingException extends ElasticsearchException {
+
+ public RoutingException(String message) {
+ super(message);
+ }
+
+ public RoutingException(String message, Throwable cause) {
+ super(message, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java
new file mode 100644
index 0000000..aa69a23
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards
+ * that are hosted on that nodes. Each {@link RoutingNode} has a unique node id that can be used to identify the node.
+ */
+public class RoutingNode implements Iterable<MutableShardRouting> {
+
+ private final String nodeId;
+
+ private final DiscoveryNode node;
+
+ private final List<MutableShardRouting> shards;
+
+ public RoutingNode(String nodeId, DiscoveryNode node) {
+ this(nodeId, node, new ArrayList<MutableShardRouting>());
+ }
+
+ public RoutingNode(String nodeId, DiscoveryNode node, List<MutableShardRouting> shards) {
+ this.nodeId = nodeId;
+ this.node = node;
+ this.shards = shards;
+ }
+
+ @Override
+ public Iterator<MutableShardRouting> iterator() {
+ return Iterators.unmodifiableIterator(shards.iterator());
+ }
+
+ Iterator<MutableShardRouting> mutableIterator() {
+ return shards.iterator();
+ }
+
+ /**
+ * Returns the nodes {@link DiscoveryNode}.
+ *
+ * @return discoveryNode of this node
+ */
+ public DiscoveryNode node() {
+ return this.node;
+ }
+
+ /**
+ * Get the id of this node
+ * @return id of the node
+ */
+ public String nodeId() {
+ return this.nodeId;
+ }
+
+ public int size() {
+ return shards.size();
+ }
+
+ /**
+ * Add a new shard to this node
+ * @param shard Shard to crate on this Node
+ */
+ void add(MutableShardRouting shard) {
+ // TODO use Set with ShardIds for faster lookup.
+ for (MutableShardRouting shardRouting : shards) {
+ if (shardRouting.shardId().equals(shard.shardId())) {
+ throw new ElasticsearchIllegalStateException("Trying to add a shard [" + shard.shardId().index().name() + "][" + shard.shardId().id() + "] to a node [" + nodeId + "] where it already exists");
+ }
+ }
+ shards.add(shard);
+ }
+
+ /**
+ * Determine the number of shards with a specific state
+ * @param states set of states which should be counted
+ * @return number of shards
+ */
+ public int numberOfShardsWithState(ShardRoutingState... states) {
+ int count = 0;
+ for (MutableShardRouting shardEntry : this) {
+ for (ShardRoutingState state : states) {
+ if (shardEntry.state() == state) {
+ count++;
+ }
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Determine the shards with a specific state
+ * @param states set of states which should be listed
+ * @return List of shards
+ */
+ public List<MutableShardRouting> shardsWithState(ShardRoutingState... states) {
+ List<MutableShardRouting> shards = newArrayList();
+ for (MutableShardRouting shardEntry : this) {
+ for (ShardRoutingState state : states) {
+ if (shardEntry.state() == state) {
+ shards.add(shardEntry);
+ }
+ }
+ }
+ return shards;
+ }
+
+ /**
+ * Determine the shards of an index with a specific state
+ * @param index id of the index
+ * @param states set of states which should be listed
+ * @return a list of shards
+ */
+ public List<MutableShardRouting> shardsWithState(String index, ShardRoutingState... states) {
+ List<MutableShardRouting> shards = newArrayList();
+
+ for (MutableShardRouting shardEntry : this) {
+ if (!shardEntry.index().equals(index)) {
+ continue;
+ }
+ for (ShardRoutingState state : states) {
+ if (shardEntry.state() == state) {
+ shards.add(shardEntry);
+ }
+ }
+ }
+ return shards;
+ }
+
+ /**
+ * The number of shards on this node that will not be eventually relocated.
+ */
+ public int numberOfOwningShards() {
+ int count = 0;
+ for (MutableShardRouting shardEntry : this) {
+ if (shardEntry.state() != ShardRoutingState.RELOCATING) {
+ count++;
+ }
+ }
+
+ return count;
+ }
+
+ public String prettyPrint() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("-----node_id[").append(nodeId).append("][" + (node == null ? "X" : "V") + "]\n");
+ for (MutableShardRouting entry : shards) {
+ sb.append("--------").append(entry.shortSummary()).append('\n');
+ }
+ return sb.toString();
+ }
+
+ public MutableShardRouting get(int i) {
+ return shards.get(i) ;
+ }
+
+ public Collection<MutableShardRouting> copyShards() {
+ return new ArrayList<MutableShardRouting>(shards);
+ }
+
+ public boolean isEmpty() {
+ return shards.isEmpty();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
new file mode 100644
index 0000000..2012cbb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
@@ -0,0 +1,767 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.base.Predicate;
+import com.google.common.collect.*;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.*;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+import static com.google.common.collect.Sets.newHashSet;
+
+/**
+ * {@link RoutingNodes} represents a copy the routing information contained in
+ * the {@link ClusterState cluster state}.
+ */
+public class RoutingNodes implements Iterable<RoutingNode> {
+
+ private final MetaData metaData;
+
+ private final ClusterBlocks blocks;
+
+ private final RoutingTable routingTable;
+
+ private final Map<String, RoutingNode> nodesToShards = newHashMap();
+
+ private final UnassignedShards unassignedShards = new UnassignedShards();
+
+ private final List<MutableShardRouting> ignoredUnassignedShards = newArrayList();
+
+ private final Map<ShardId, List<MutableShardRouting>> assignedShards = newHashMap();
+
+ private int inactivePrimaryCount = 0;
+
+ private int inactiveShardCount = 0;
+
+ private int relocatingShards = 0;
+
+ private Set<ShardId> clearPostAllocationFlag;
+
+ private final Map<String, ObjectIntOpenHashMap<String>> nodesPerAttributeNames = new HashMap<String, ObjectIntOpenHashMap<String>>();
+
+ public RoutingNodes(ClusterState clusterState) {
+ this.metaData = clusterState.metaData();
+ this.blocks = clusterState.blocks();
+ this.routingTable = clusterState.routingTable();
+
+ Map<String, List<MutableShardRouting>> nodesToShards = newHashMap();
+ // fill in the nodeToShards with the "live" nodes
+ for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
+ nodesToShards.put(cursor.value.id(), new ArrayList<MutableShardRouting>());
+ }
+
+ // fill in the inverse of node -> shards allocated
+ // also fill replicaSet information
+ for (IndexRoutingTable indexRoutingTable : routingTable.indicesRouting().values()) {
+ for (IndexShardRoutingTable indexShard : indexRoutingTable) {
+ for (ShardRouting shard : indexShard) {
+ // to get all the shards belonging to an index, including the replicas,
+ // we define a replica set and keep track of it. A replica set is identified
+ // by the ShardId, as this is common for primary and replicas.
+ // A replica Set might have one (and not more) replicas with the state of RELOCATING.
+ if (shard.assignedToNode()) {
+ List<MutableShardRouting> entries = nodesToShards.get(shard.currentNodeId());
+ if (entries == null) {
+ entries = newArrayList();
+ nodesToShards.put(shard.currentNodeId(), entries);
+ }
+ MutableShardRouting sr = new MutableShardRouting(shard);
+ entries.add(sr);
+ assignedShardsAdd(sr);
+ if (shard.relocating()) {
+ entries = nodesToShards.get(shard.relocatingNodeId());
+ relocatingShards++;
+ if (entries == null) {
+ entries = newArrayList();
+ nodesToShards.put(shard.relocatingNodeId(), entries);
+ }
+ // add the counterpart shard with relocatingNodeId reflecting the source from which
+ // it's relocating from.
+ sr = new MutableShardRouting(shard.index(), shard.id(), shard.relocatingNodeId(),
+ shard.currentNodeId(), shard.primary(), ShardRoutingState.INITIALIZING, shard.version());
+ entries.add(sr);
+ assignedShardsAdd(sr);
+ } else if (!shard.active()) { // shards that are initializing without being relocated
+ if (shard.primary()) {
+ inactivePrimaryCount++;
+ }
+ inactiveShardCount++;
+ }
+ } else {
+ MutableShardRouting sr = new MutableShardRouting(shard);
+ assignedShardsAdd(sr);
+ unassignedShards.add(sr);
+ }
+ }
+ }
+ }
+ for (Map.Entry<String, List<MutableShardRouting>> entry : nodesToShards.entrySet()) {
+ String nodeId = entry.getKey();
+ this.nodesToShards.put(nodeId, new RoutingNode(nodeId, clusterState.nodes().get(nodeId), entry.getValue()));
+ }
+ }
+
+ @Override
+ public Iterator<RoutingNode> iterator() {
+ return Iterators.unmodifiableIterator(nodesToShards.values().iterator());
+ }
+
+ public RoutingTable routingTable() {
+ return routingTable;
+ }
+
+ public RoutingTable getRoutingTable() {
+ return routingTable();
+ }
+
+ public MetaData metaData() {
+ return this.metaData;
+ }
+
+ public MetaData getMetaData() {
+ return metaData();
+ }
+
+ public ClusterBlocks blocks() {
+ return this.blocks;
+ }
+
+ public ClusterBlocks getBlocks() {
+ return this.blocks;
+ }
+
+ public int requiredAverageNumberOfShardsPerNode() {
+ int totalNumberOfShards = 0;
+ // we need to recompute to take closed shards into account
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ IndexMetaData indexMetaData = cursor.value;
+ if (indexMetaData.state() == IndexMetaData.State.OPEN) {
+ totalNumberOfShards += indexMetaData.totalNumberOfShards();
+ }
+ }
+ return totalNumberOfShards / nodesToShards.size();
+ }
+
+ public boolean hasUnassigned() {
+ return !unassignedShards.isEmpty();
+ }
+
+ public List<MutableShardRouting> ignoredUnassigned() {
+ return this.ignoredUnassignedShards;
+ }
+
+ public UnassignedShards unassigned() {
+ return this.unassignedShards;
+ }
+
+ public RoutingNodesIterator nodes() {
+ return new RoutingNodesIterator(nodesToShards.values().iterator());
+ }
+
+ /**
+ * Clears the post allocation flag for the provided shard id. NOTE: this should be used cautiously
+ * since it will lead to data loss of the primary shard is not allocated, as it will allocate
+ * the primary shard on a node and *not* expect it to have an existing valid index there.
+ */
+ public void addClearPostAllocationFlag(ShardId shardId) {
+ if (clearPostAllocationFlag == null) {
+ clearPostAllocationFlag = Sets.newHashSet();
+ }
+ clearPostAllocationFlag.add(shardId);
+ }
+
+ public Iterable<ShardId> getShardsToClearPostAllocationFlag() {
+ if (clearPostAllocationFlag == null) {
+ return ImmutableSet.of();
+ }
+ return clearPostAllocationFlag;
+ }
+
+ public RoutingNode node(String nodeId) {
+ return nodesToShards.get(nodeId);
+ }
+
+ public ObjectIntOpenHashMap<String> nodesPerAttributesCounts(String attributeName) {
+ ObjectIntOpenHashMap<String> nodesPerAttributesCounts = nodesPerAttributeNames.get(attributeName);
+ if (nodesPerAttributesCounts != null) {
+ return nodesPerAttributesCounts;
+ }
+ nodesPerAttributesCounts = new ObjectIntOpenHashMap<String>();
+ for (RoutingNode routingNode : this) {
+ String attrValue = routingNode.node().attributes().get(attributeName);
+ nodesPerAttributesCounts.addTo(attrValue, 1);
+ }
+ nodesPerAttributeNames.put(attributeName, nodesPerAttributesCounts);
+ return nodesPerAttributesCounts;
+ }
+
+ public boolean hasUnassignedPrimaries() {
+ return unassignedShards.numPrimaries() > 0;
+ }
+
+ public boolean hasUnassignedShards() {
+ return !unassignedShards.isEmpty();
+ }
+
+ public boolean hasInactivePrimaries() {
+ return inactivePrimaryCount > 0;
+ }
+
+ public boolean hasInactiveShards() {
+ return inactiveShardCount > 0;
+ }
+
+ public int getRelocatingShardCount() {
+ return relocatingShards;
+ }
+
+ /**
+ * Returns the active primary shard for the given ShardRouting or <code>null</code> if
+ * no primary is found or the primary is not active.
+ */
+ public MutableShardRouting activePrimary(ShardRouting shard) {
+ assert !shard.primary();
+ for (MutableShardRouting shardRouting : assignedShards(shard.shardId())) {
+ if (shardRouting.primary() && shardRouting.active()) {
+ return shardRouting;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns one active replica shard for the given ShardRouting shard ID or <code>null</code> if
+ * no active replica is found.
+ */
+ public MutableShardRouting activeReplica(ShardRouting shard) {
+ for (MutableShardRouting shardRouting : assignedShards(shard.shardId())) {
+ if (!shardRouting.primary() && shardRouting.active()) {
+ return shardRouting;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns all shards that are not in the state UNASSIGNED with the same shard
+ * ID as the given shard.
+ */
+ public Iterable<MutableShardRouting> assignedShards(ShardRouting shard) {
+ return assignedShards(shard.shardId());
+ }
+
+ /**
+ * Returns <code>true</code> iff all replicas are active for the given shard routing. Otherwise <code>false</code>
+ */
+ public boolean allReplicasActive(ShardRouting shardRouting) {
+ final List<MutableShardRouting> shards = assignedShards(shardRouting.shardId());
+ if (shards.isEmpty() || shards.size() < this.routingTable.index(shardRouting.index()).shard(shardRouting.id()).size()) {
+ return false; // if we are empty nothing is active if we have less than total at least one is unassigned
+ }
+ for (MutableShardRouting shard : shards) {
+ if (!shard.active()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public List<MutableShardRouting> shards(Predicate<MutableShardRouting> predicate) {
+ List<MutableShardRouting> shards = newArrayList();
+ for (RoutingNode routingNode : this) {
+ for (MutableShardRouting shardRouting : routingNode) {
+ if (predicate.apply(shardRouting)) {
+ shards.add(shardRouting);
+ }
+ }
+ }
+ return shards;
+ }
+
+ public List<MutableShardRouting> shardsWithState(ShardRoutingState... state) {
+ // TODO these are used on tests only - move into utils class
+ List<MutableShardRouting> shards = newArrayList();
+ for (RoutingNode routingNode : this) {
+ shards.addAll(routingNode.shardsWithState(state));
+ }
+ return shards;
+ }
+
+ public List<MutableShardRouting> shardsWithState(String index, ShardRoutingState... state) {
+ // TODO these are used on tests only - move into utils class
+ List<MutableShardRouting> shards = newArrayList();
+ for (RoutingNode routingNode : this) {
+ shards.addAll(routingNode.shardsWithState(index, state));
+ }
+ return shards;
+ }
+
+ public String prettyPrint() {
+ StringBuilder sb = new StringBuilder("routing_nodes:\n");
+ for (RoutingNode routingNode : this) {
+ sb.append(routingNode.prettyPrint());
+ }
+ sb.append("---- unassigned\n");
+ for (MutableShardRouting shardEntry : unassignedShards) {
+ sb.append("--------").append(shardEntry.shortSummary()).append('\n');
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Assign a shard to a node. This will increment the inactiveShardCount counter
+ * and the inactivePrimaryCount counter if the shard is the primary.
+ * In case the shard is already assigned and started, it will be marked as
+ * relocating, which is accounted for, too, so the number of concurrent relocations
+ * can be retrieved easily.
+ * This method can be called several times for the same shard, only the first time
+ * will change the state.
+ *
+ * INITIALIZING => INITIALIZING
+ * UNASSIGNED => INITIALIZING
+ * STARTED => RELOCATING
+ * RELOCATING => RELOCATING
+ *
+ * @param shard the shard to be assigned
+ * @param nodeId the nodeId this shard should initialize on or relocate from
+ */
+ public void assign(MutableShardRouting shard, String nodeId) {
+ // state will not change if the shard is already initializing.
+ ShardRoutingState oldState = shard.state();
+ shard.assignToNode(nodeId);
+ node(nodeId).add(shard);
+ if (oldState == ShardRoutingState.UNASSIGNED) {
+ inactiveShardCount++;
+ if (shard.primary()) {
+ inactivePrimaryCount++;
+ }
+ }
+
+ if (shard.state() == ShardRoutingState.RELOCATING) {
+ relocatingShards++;
+ }
+ assignedShardsAdd(shard);
+ }
+
+ /**
+ * Relocate a shard to another node.
+ */
+ public void relocate(MutableShardRouting shard, String nodeId) {
+ relocatingShards++;
+ shard.relocate(nodeId);
+ }
+
+ /**
+ * Mark a shard as started and adjusts internal statistics.
+ */
+ public void started(MutableShardRouting shard) {
+ if (!shard.active() && shard.relocatingNodeId() == null) {
+ inactiveShardCount--;
+ if (shard.primary()) {
+ inactivePrimaryCount--;
+ }
+ } else if (shard.relocating()) {
+ relocatingShards--;
+ }
+ assert !shard.started();
+ shard.moveToStarted();
+ }
+
+ /**
+ * Cancels a relocation of a shard that shard must relocating.
+ */
+ public void cancelRelocation(MutableShardRouting shard) {
+ relocatingShards--;
+ shard.cancelRelocation();
+ }
+
+ /**
+ * swaps the status of a shard, making replicas primary and vice versa.
+ *
+ * @param shards the shard to have its primary status swapped.
+ */
+ public void swapPrimaryFlag(MutableShardRouting... shards) {
+ for (MutableShardRouting shard : shards) {
+ if (shard.primary()) {
+ shard.moveFromPrimary();
+ if (shard.unassigned()) {
+ unassignedShards.primaries--;
+ }
+ } else {
+ shard.moveToPrimary();
+ if (shard.unassigned()) {
+ unassignedShards.primaries++;
+ }
+ }
+ }
+ }
+
+ private static final List<MutableShardRouting> EMPTY = Collections.emptyList();
+
+ private List<MutableShardRouting> assignedShards(ShardId shardId) {
+ final List<MutableShardRouting> replicaSet = assignedShards.get(shardId);
+ return replicaSet == null ? EMPTY : Collections.unmodifiableList(replicaSet);
+ }
+
+ /**
+ * Cancels the give shard from the Routing nodes internal statistics and cancels
+ * the relocation if the shard is relocating.
+ * @param shard
+ */
+ private void remove(MutableShardRouting shard) {
+ if (!shard.active() && shard.relocatingNodeId() == null) {
+ inactiveShardCount--;
+ assert inactiveShardCount >= 0;
+ if (shard.primary()) {
+ inactivePrimaryCount--;
+ }
+ } else if (shard.relocating()) {
+ cancelRelocation(shard);
+ }
+ assignedShardsRemove(shard);
+ }
+
+ private void assignedShardsAdd(MutableShardRouting shard) {
+ if (shard.unassigned()) {
+ // no unassigned
+ return;
+ }
+ List<MutableShardRouting> shards = assignedShards.get(shard.shardId());
+ if (shards == null) {
+ shards = Lists.newArrayList();
+ assignedShards.put(shard.shardId(), shards);
+ }
+ assert assertInstanceNotInList(shard, shards);
+ shards.add(shard);
+ }
+
+ private boolean assertInstanceNotInList(MutableShardRouting shard, List<MutableShardRouting> shards) {
+ for (MutableShardRouting s : shards) {
+ assert s != shard;
+ }
+ return true;
+ }
+
+ private void assignedShardsRemove(MutableShardRouting shard) {
+ final List<MutableShardRouting> replicaSet = assignedShards.get(shard.shardId());
+ if (replicaSet != null) {
+ final Iterator<MutableShardRouting> iterator = replicaSet.iterator();
+ while(iterator.hasNext()) {
+ // yes we check identity here
+ if (shard == iterator.next()) {
+ iterator.remove();
+ return;
+ }
+ }
+ assert false : "Illegal state";
+ }
+ }
+
+ public boolean isKnown(DiscoveryNode node) {
+ return nodesToShards.containsKey(node.getId());
+ }
+
+ public void addNode(DiscoveryNode node) {
+ RoutingNode routingNode = new RoutingNode(node.id(), node);
+ nodesToShards.put(routingNode.nodeId(), routingNode);
+ }
+
+ public RoutingNodeIterator routingNodeIter(String nodeId) {
+ final RoutingNode routingNode = nodesToShards.get(nodeId);
+ if (routingNode == null) {
+ return null;
+ }
+ assert assertShardStats(this);
+ return new RoutingNodeIterator(routingNode);
+ }
+
+ public RoutingNode[] toArray() {
+ return nodesToShards.values().toArray(new RoutingNode[nodesToShards.size()]);
+ }
+
+ public final static class UnassignedShards implements Iterable<MutableShardRouting> {
+
+ private final List<MutableShardRouting> unassigned;
+
+ private int primaries = 0;
+ private long transactionId = 0;
+ private final UnassignedShards source;
+ private final long sourceTransactionId;
+
+ public UnassignedShards(UnassignedShards other) {
+ source = other;
+ sourceTransactionId = other.transactionId;
+ unassigned = new ArrayList<MutableShardRouting>(other.unassigned);
+ primaries = other.primaries;
+ }
+
+ public UnassignedShards() {
+ unassigned = new ArrayList<MutableShardRouting>();
+ source = null;
+ sourceTransactionId = -1;
+ }
+
+ public void add(MutableShardRouting mutableShardRouting) {
+ if(mutableShardRouting.primary()) {
+ primaries++;
+ }
+ unassigned.add(mutableShardRouting);
+ transactionId++;
+ }
+
+ public void addAll(Collection<MutableShardRouting> mutableShardRoutings) {
+ for (MutableShardRouting r : mutableShardRoutings) {
+ add(r);
+ }
+ }
+
+ public int size() {
+ return unassigned.size();
+ }
+
+ public int numPrimaries() {
+ return primaries;
+ }
+
+ @Override
+ public Iterator<MutableShardRouting> iterator() {
+ final Iterator<MutableShardRouting> iterator = unassigned.iterator();
+ return new Iterator<MutableShardRouting>() {
+ private MutableShardRouting current;
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public MutableShardRouting next() {
+ return current = iterator.next();
+ }
+
+ @Override
+ public void remove() {
+ iterator.remove();
+ if (current.primary()) {
+ primaries--;
+ }
+ transactionId++;
+ }
+ };
+ }
+
+ public boolean isEmpty() {
+ return unassigned.isEmpty();
+ }
+
+ public void shuffle() {
+ Collections.shuffle(unassigned);
+ }
+
+ public void clear() {
+ transactionId++;
+ unassigned.clear();
+ primaries = 0;
+ }
+
+ public void transactionEnd(UnassignedShards shards) {
+ assert shards.source == this && shards.sourceTransactionId == transactionId :
+ "Expected ID: " + shards.sourceTransactionId + " actual: " + transactionId + " Expected Source: " + shards.source + " actual: " + this;
+ transactionId++;
+ this.unassigned.clear();
+ this.unassigned.addAll(shards.unassigned);
+ this.primaries = shards.primaries;
+ }
+
+ public UnassignedShards transactionBegin() {
+ return new UnassignedShards(this);
+ }
+
+ public MutableShardRouting[] drain() {
+ MutableShardRouting[] mutableShardRoutings = unassigned.toArray(new MutableShardRouting[unassigned.size()]);
+ unassigned.clear();
+ primaries = 0;
+ transactionId++;
+ return mutableShardRoutings;
+ }
+ }
+
+
+ /**
+ * Calculates RoutingNodes statistics by iterating over all {@link MutableShardRouting}s
+ * in the cluster to ensure the book-keeping is correct.
+ * For performance reasons, this should only be called from asserts
+ *
+ * @return this method always returns <code>true</code> or throws an assertion error. If assertion are not enabled
+ * this method does nothing.
+ */
+ public static boolean assertShardStats(RoutingNodes routingNodes) {
+ boolean run = false;
+ assert (run = true); // only run if assertions are enabled!
+ if (!run) {
+ return true;
+ }
+ int unassignedPrimaryCount = 0;
+ int inactivePrimaryCount = 0;
+ int inactiveShardCount = 0;
+ int relocating = 0;
+ final Set<ShardId> seenShards = newHashSet();
+ Map<String, Integer> indicesAndShards = new HashMap<String, Integer>();
+ for (RoutingNode node : routingNodes) {
+ for (MutableShardRouting shard : node) {
+ if (!shard.active() && shard.relocatingNodeId() == null) {
+ if (!shard.relocating()) {
+ inactiveShardCount++;
+ if (shard.primary()) {
+ inactivePrimaryCount++;
+ }
+ }
+ }
+ if (shard.relocating()) {
+ relocating++;
+ }
+ seenShards.add(shard.shardId());
+ Integer i = indicesAndShards.get(shard.index());
+ if (i == null) {
+ i = shard.id();
+ }
+ indicesAndShards.put(shard.index(), Math.max(i, shard.id()));
+ }
+ }
+ // Assert that the active shard routing are identical.
+ Set<Map.Entry<String, Integer>> entries = indicesAndShards.entrySet();
+ final List<MutableShardRouting> shards = newArrayList();
+ for (Map.Entry<String, Integer> e : entries) {
+ String index = e.getKey();
+ for (int i = 0; i < e.getValue(); i++) {
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting shardRouting : routingNode) {
+ if (shardRouting.index().equals(index) && shardRouting.id() == i) {
+ shards.add(shardRouting);
+ }
+ }
+ }
+ List<MutableShardRouting> mutableShardRoutings = routingNodes.assignedShards(new ShardId(index, i));
+ assert mutableShardRoutings.size() == shards.size();
+ for (MutableShardRouting r : mutableShardRoutings) {
+ assert shards.contains(r);
+ shards.remove(r);
+ }
+ assert shards.isEmpty();
+ }
+ }
+
+ for (MutableShardRouting shard : routingNodes.unassigned()) {
+ if (shard.primary()) {
+ unassignedPrimaryCount++;
+ }
+ seenShards.add(shard.shardId());
+ }
+
+ assert unassignedPrimaryCount == routingNodes.unassignedShards.numPrimaries() :
+ "Unassigned primaries is [" + unassignedPrimaryCount + "] but RoutingNodes returned unassigned primaries [" + routingNodes.unassigned().numPrimaries() + "]";
+ assert inactivePrimaryCount == routingNodes.inactivePrimaryCount :
+ "Inactive Primary count [" + inactivePrimaryCount + "] but RoutingNodes returned inactive primaries [" + routingNodes.inactivePrimaryCount + "]";
+ assert inactiveShardCount == routingNodes.inactiveShardCount :
+ "Inactive Shard count [" + inactiveShardCount + "] but RoutingNodes returned inactive shards [" + routingNodes.inactiveShardCount + "]";
+ assert routingNodes.getRelocatingShardCount() == relocating : "Relocating shards mismatch [" + routingNodes.getRelocatingShardCount() + "] but expected [" + relocating + "]";
+ return true;
+ }
+
+
+ public class RoutingNodesIterator implements Iterator<RoutingNode>, Iterable<MutableShardRouting> {
+ private RoutingNode current;
+ private final Iterator<RoutingNode> delegate;
+
+ public RoutingNodesIterator(Iterator<RoutingNode> iterator) {
+ delegate = iterator;
+ }
+
+ @Override
+ public boolean hasNext() {
+ return delegate.hasNext();
+ }
+
+ @Override
+ public RoutingNode next() {
+ return current = delegate.next();
+ }
+
+ public RoutingNodeIterator nodeShards() {
+ return new RoutingNodeIterator(current);
+ }
+
+ @Override
+ public void remove() {
+ delegate.remove();
+ }
+
+ @Override
+ public Iterator<MutableShardRouting> iterator() {
+ return nodeShards();
+ }
+ }
+
+ public final class RoutingNodeIterator implements Iterator<MutableShardRouting>, Iterable<MutableShardRouting> {
+ private final RoutingNode iterable;
+ private MutableShardRouting shard;
+ private final Iterator<MutableShardRouting> delegate;
+
+ public RoutingNodeIterator(RoutingNode iterable) {
+ this.delegate = iterable.mutableIterator();
+ this.iterable = iterable;
+ }
+
+ @Override
+ public boolean hasNext() {
+ return delegate.hasNext();
+ }
+
+ @Override
+ public MutableShardRouting next() {
+ return shard = delegate.next();
+ }
+
+ public void remove() {
+ delegate.remove();
+ RoutingNodes.this.remove(shard);
+ }
+
+ @Override
+ public Iterator<MutableShardRouting> iterator() {
+ return iterable.iterator();
+ }
+
+ public void moveToUnassigned() {
+ iterator().remove();
+ unassigned().add(new MutableShardRouting(shard.index(), shard.id(),
+ null, shard.primary(), ShardRoutingState.UNASSIGNED, shard.version() + 1));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java
new file mode 100644
index 0000000..eb22095
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.Future;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+
+/**
+ * A {@link RoutingService} listens to clusters state. When this service
+ * receives a {@link ClusterChangedEvent} the cluster state will be verified and
+ * the routing tables might be updated.
+ * <p>
+ * Note: The {@link RoutingService} is responsible for cluster wide operations
+ * that include modifications to the cluster state. Such an operation can only
+ * be performed on the clusters master node. Unless the local node this service
+ * is running on is the clusters master node this service will not perform any
+ * actions.
+ * </p>
+ */
+public class RoutingService extends AbstractLifecycleComponent<RoutingService> implements ClusterStateListener {
+
+ private static final String CLUSTER_UPDATE_TASK_SOURCE = "routing-table-updater";
+
+ private final ThreadPool threadPool;
+
+ private final ClusterService clusterService;
+
+ private final AllocationService allocationService;
+
+ private final TimeValue schedule;
+
+ private volatile boolean routingTableDirty = false;
+
+ private volatile Future scheduledRoutingTableFuture;
+
+ @Inject
+ public RoutingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, AllocationService allocationService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.allocationService = allocationService;
+ this.schedule = componentSettings.getAsTime("schedule", timeValueSeconds(10));
+ clusterService.addFirst(this);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ if (scheduledRoutingTableFuture != null) {
+ scheduledRoutingTableFuture.cancel(true);
+ scheduledRoutingTableFuture = null;
+ }
+ clusterService.remove(this);
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (event.source().equals(CLUSTER_UPDATE_TASK_SOURCE)) {
+ // that's us, ignore this event
+ return;
+ }
+ if (event.state().nodes().localNodeMaster()) {
+ // we are master, schedule the routing table updater
+ if (scheduledRoutingTableFuture == null) {
+ // a new master (us), make sure we reroute shards
+ routingTableDirty = true;
+ scheduledRoutingTableFuture = threadPool.scheduleWithFixedDelay(new RoutingTableUpdater(), schedule);
+ }
+ if (event.nodesRemoved()) {
+ // if nodes were removed, we don't want to wait for the scheduled task
+ // since we want to get primary election as fast as possible
+ routingTableDirty = true;
+ reroute();
+ // Commented out since we make sure to reroute whenever shards changes state or metadata changes state
+// } else if (event.routingTableChanged()) {
+// routingTableDirty = true;
+// reroute();
+ } else {
+ if (event.nodesAdded()) {
+ for (DiscoveryNode node : event.nodesDelta().addedNodes()) {
+ if (node.dataNode()) {
+ routingTableDirty = true;
+ break;
+ }
+ }
+ }
+ }
+ } else {
+ if (scheduledRoutingTableFuture != null) {
+ scheduledRoutingTableFuture.cancel(true);
+ scheduledRoutingTableFuture = null;
+ }
+ }
+ }
+
+ private void reroute() {
+ try {
+ if (!routingTableDirty) {
+ return;
+ }
+ if (lifecycle.stopped()) {
+ return;
+ }
+ clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE, Priority.HIGH, new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ RoutingAllocation.Result routingResult = allocationService.reroute(currentState);
+ if (!routingResult.changed()) {
+ // no state changed
+ return currentState;
+ }
+ return ClusterState.builder(currentState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+ });
+ routingTableDirty = false;
+ } catch (Exception e) {
+ logger.warn("Failed to reroute routing table", e);
+ }
+ }
+
+ private class RoutingTableUpdater implements Runnable {
+
+ @Override
+ public void run() {
+ reroute();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
new file mode 100644
index 0000000..efca8ae
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
@@ -0,0 +1,458 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import com.google.common.collect.*;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndexMissingException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ * Represents a global cluster-wide routing table for all indices including the
+ * version of the current routing state.
+ *
+ * @see IndexRoutingTable
+ */
+public class RoutingTable implements Iterable<IndexRoutingTable> {
+
+ public static final RoutingTable EMPTY_ROUTING_TABLE = builder().build();
+
+ private final long version;
+
+ // index to IndexRoutingTable map
+ private final ImmutableMap<String, IndexRoutingTable> indicesRouting;
+
+ RoutingTable(long version, Map<String, IndexRoutingTable> indicesRouting) {
+ this.version = version;
+ this.indicesRouting = ImmutableMap.copyOf(indicesRouting);
+ }
+
+ /**
+ * Returns the version of the {@link RoutingTable}.
+ *
+ * @return version of the {@link RoutingTable}
+ */
+ public long version() {
+ return this.version;
+ }
+
+ @Override
+ public UnmodifiableIterator<IndexRoutingTable> iterator() {
+ return indicesRouting.values().iterator();
+ }
+
+ public boolean hasIndex(String index) {
+ return indicesRouting.containsKey(index);
+ }
+
+ public IndexRoutingTable index(String index) {
+ return indicesRouting.get(index);
+ }
+
+ public Map<String, IndexRoutingTable> indicesRouting() {
+ return indicesRouting;
+ }
+
+ public Map<String, IndexRoutingTable> getIndicesRouting() {
+ return indicesRouting();
+ }
+
+ public RoutingNodes routingNodes(ClusterState state) {
+ return new RoutingNodes(state);
+ }
+
+ public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException {
+ RoutingTableValidation validation = validate(metaData);
+ if (!validation.valid()) {
+ throw new RoutingValidationException(validation);
+ }
+ return this;
+ }
+
+ public RoutingTableValidation validate(MetaData metaData) {
+ RoutingTableValidation validation = new RoutingTableValidation();
+ for (IndexRoutingTable indexRoutingTable : this) {
+ indexRoutingTable.validate(validation, metaData);
+ }
+ return validation;
+ }
+
+ public List<ShardRouting> shardsWithState(ShardRoutingState... states) {
+ List<ShardRouting> shards = newArrayList();
+ for (IndexRoutingTable indexRoutingTable : this) {
+ shards.addAll(indexRoutingTable.shardsWithState(states));
+ }
+ return shards;
+ }
+
+ /**
+ * All the shards (replicas) for the provided indices.
+ *
+ * @param indices The indices to return all the shards (replicas), can be <tt>null</tt> or empty array to indicate all indices
+ * @return All the shards matching the specific index
+ * @throws IndexMissingException If an index passed does not exists
+ */
+ public List<ShardRouting> allShards(String... indices) throws IndexMissingException {
+ List<ShardRouting> shards = Lists.newArrayList();
+ if (indices == null || indices.length == 0) {
+ indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
+ }
+ for (String index : indices) {
+ IndexRoutingTable indexRoutingTable = index(index);
+ if (indexRoutingTable == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ shards.add(shardRouting);
+ }
+ }
+ }
+ return shards;
+ }
+
+ /**
+ * All the shards (primary + replicas) for the provided indices grouped (each group is a single element, consisting
+ * of the shard). This is handy for components that expect to get group iterators, but still want in some
+ * cases to iterate over all the shards (and not just one shard in replication group).
+ *
+ * @param indices The indices to return all the shards (replicas), can be <tt>null</tt> or empty array to indicate all indices
+ * @return All the shards grouped into a single shard element group each
+ * @throws IndexMissingException If an index passed does not exists
+ * @see IndexRoutingTable#groupByAllIt()
+ */
+ public GroupShardsIterator allShardsGrouped(String... indices) throws IndexMissingException {
+ // use list here since we need to maintain identity across shards
+ ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
+ if (indices == null || indices.length == 0) {
+ indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
+ }
+ for (String index : indices) {
+ IndexRoutingTable indexRoutingTable = index(index);
+ if (indexRoutingTable == null) {
+ continue;
+ // we simply ignore indices that don't exists (make sense for operations that use it currently)
+// throw new IndexMissingException(new Index(index));
+ }
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ set.add(shardRouting.shardsIt());
+ }
+ }
+ }
+ return new GroupShardsIterator(set);
+ }
+
+ public GroupShardsIterator allActiveShardsGrouped(String[] indices, boolean includeEmpty) throws IndexMissingException {
+ // use list here since we need to maintain identity across shards
+ ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
+ if (indices == null || indices.length == 0) {
+ indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
+ }
+ for (String index : indices) {
+ IndexRoutingTable indexRoutingTable = index(index);
+ if (indexRoutingTable == null) {
+ continue;
+ // we simply ignore indices that don't exists (make sense for operations that use it currently)
+// throw new IndexMissingException(new Index(index));
+ }
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (shardRouting.active()) {
+ set.add(shardRouting.shardsIt());
+ } else if (includeEmpty) { // we need this for counting properly, just make it an empty one
+ set.add(new PlainShardIterator(shardRouting.shardId(), ImmutableList.<ShardRouting>of()));
+ }
+ }
+ }
+ }
+ return new GroupShardsIterator(set);
+ }
+
+ public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty) throws IndexMissingException {
+ // use list here since we need to maintain identity across shards
+ ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
+ if (indices == null || indices.length == 0) {
+ indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
+ }
+ for (String index : indices) {
+ IndexRoutingTable indexRoutingTable = index(index);
+ if (indexRoutingTable == null) {
+ continue;
+ // we simply ignore indices that don't exists (make sense for operations that use it currently)
+// throw new IndexMissingException(new Index(index));
+ }
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (shardRouting.assignedToNode()) {
+ set.add(shardRouting.shardsIt());
+ } else if (includeEmpty) { // we need this for counting properly, just make it an empty one
+ set.add(new PlainShardIterator(shardRouting.shardId(), ImmutableList.<ShardRouting>of()));
+ }
+ }
+ }
+ }
+ return new GroupShardsIterator(set);
+ }
+
+ /**
+ * All the *active* primary shards for the provided indices grouped (each group is a single element, consisting
+ * of the primary shard). This is handy for components that expect to get group iterators, but still want in some
+ * cases to iterate over all primary shards (and not just one shard in replication group).
+ *
+ * @param indices The indices to return all the shards (replicas), can be <tt>null</tt> or empty array to indicate all indices
+ * @return All the primary shards grouped into a single shard element group each
+ * @throws IndexMissingException If an index passed does not exists
+ * @see IndexRoutingTable#groupByAllIt()
+ */
+ public GroupShardsIterator activePrimaryShardsGrouped(String[] indices, boolean includeEmpty) throws IndexMissingException {
+ // use list here since we need to maintain identity across shards
+ ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
+ if (indices == null || indices.length == 0) {
+ indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
+ }
+ for (String index : indices) {
+ IndexRoutingTable indexRoutingTable = index(index);
+ if (indexRoutingTable == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ ShardRouting primary = indexShardRoutingTable.primaryShard();
+ if (primary.active()) {
+ set.add(primary.shardsIt());
+ } else if (includeEmpty) { // we need this for counting properly, just make it an empty one
+ set.add(new PlainShardIterator(primary.shardId(), ImmutableList.<ShardRouting>of()));
+ }
+ }
+ }
+ return new GroupShardsIterator(set);
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public static Builder builder(RoutingTable routingTable) {
+ return new Builder(routingTable);
+ }
+
+ public static class Builder {
+
+ private long version;
+ private final Map<String, IndexRoutingTable> indicesRouting = newHashMap();
+
+ public Builder() {
+
+ }
+
+ public Builder(RoutingTable routingTable) {
+ version = routingTable.version;
+ for (IndexRoutingTable indexRoutingTable : routingTable) {
+ indicesRouting.put(indexRoutingTable.index(), indexRoutingTable);
+ }
+ }
+
+ public Builder updateNodes(RoutingNodes routingNodes) {
+ // this is being called without pre initializing the routing table, so we must copy over the version as well
+ this.version = routingNodes.routingTable().version();
+
+ Map<String, IndexRoutingTable.Builder> indexRoutingTableBuilders = newHashMap();
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting shardRoutingEntry : routingNode) {
+ // every relocating shard has a double entry, ignore the target one.
+ if (shardRoutingEntry.state() == ShardRoutingState.INITIALIZING && shardRoutingEntry.relocatingNodeId() != null)
+ continue;
+
+ String index = shardRoutingEntry.index();
+ IndexRoutingTable.Builder indexBuilder = indexRoutingTableBuilders.get(index);
+ if (indexBuilder == null) {
+ indexBuilder = new IndexRoutingTable.Builder(index);
+ indexRoutingTableBuilders.put(index, indexBuilder);
+ }
+
+ IndexShardRoutingTable refData = routingNodes.routingTable().index(shardRoutingEntry.index()).shard(shardRoutingEntry.id());
+ indexBuilder.addShard(refData, shardRoutingEntry);
+ }
+ }
+ for (MutableShardRouting shardRoutingEntry : Iterables.concat(routingNodes.unassigned(), routingNodes.ignoredUnassigned())) {
+ String index = shardRoutingEntry.index();
+ IndexRoutingTable.Builder indexBuilder = indexRoutingTableBuilders.get(index);
+ if (indexBuilder == null) {
+ indexBuilder = new IndexRoutingTable.Builder(index);
+ indexRoutingTableBuilders.put(index, indexBuilder);
+ }
+ IndexShardRoutingTable refData = routingNodes.routingTable().index(shardRoutingEntry.index()).shard(shardRoutingEntry.id());
+ indexBuilder.addShard(refData, shardRoutingEntry);
+ }
+
+ for (ShardId shardId : routingNodes.getShardsToClearPostAllocationFlag()) {
+ IndexRoutingTable.Builder indexRoutingBuilder = indexRoutingTableBuilders.get(shardId.index().name());
+ if (indexRoutingBuilder != null) {
+ indexRoutingBuilder.clearPostAllocationFlag(shardId);
+ }
+ }
+
+ for (IndexRoutingTable.Builder indexBuilder : indexRoutingTableBuilders.values()) {
+ add(indexBuilder);
+ }
+ return this;
+ }
+
+ public Builder updateNumberOfReplicas(int numberOfReplicas, String... indices) throws IndexMissingException {
+ if (indices == null || indices.length == 0) {
+ indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
+ }
+ for (String index : indices) {
+ IndexRoutingTable indexRoutingTable = indicesRouting.get(index);
+ if (indexRoutingTable == null) {
+ // ignore index missing failure, its closed...
+ continue;
+ }
+ int currentNumberOfReplicas = indexRoutingTable.shards().get(0).size() - 1; // remove the required primary
+ IndexRoutingTable.Builder builder = new IndexRoutingTable.Builder(index);
+ // re-add all the shards
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ builder.addIndexShard(indexShardRoutingTable);
+ }
+ if (currentNumberOfReplicas < numberOfReplicas) {
+ // now, add "empty" ones
+ for (int i = 0; i < (numberOfReplicas - currentNumberOfReplicas); i++) {
+ builder.addReplica();
+ }
+ } else if (currentNumberOfReplicas > numberOfReplicas) {
+ int delta = currentNumberOfReplicas - numberOfReplicas;
+ if (delta <= 0) {
+ // ignore, can't remove below the current one...
+ } else {
+ for (int i = 0; i < delta; i++) {
+ builder.removeReplica();
+ }
+ }
+ }
+ indicesRouting.put(index, builder.build());
+ }
+ return this;
+ }
+
+ public Builder addAsNew(IndexMetaData indexMetaData) {
+ if (indexMetaData.state() == IndexMetaData.State.OPEN) {
+ IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
+ .initializeAsNew(indexMetaData);
+ add(indexRoutingBuilder);
+ }
+ return this;
+ }
+
+ public Builder addAsRecovery(IndexMetaData indexMetaData) {
+ if (indexMetaData.state() == IndexMetaData.State.OPEN) {
+ IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
+ .initializeAsRecovery(indexMetaData);
+ add(indexRoutingBuilder);
+ }
+ return this;
+ }
+
+ public Builder addAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
+ IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
+ .initializeAsRestore(indexMetaData, restoreSource);
+ add(indexRoutingBuilder);
+ return this;
+ }
+
+ public Builder addAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
+ IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
+ .initializeAsNewRestore(indexMetaData, restoreSource);
+ add(indexRoutingBuilder);
+ return this;
+ }
+
+ public Builder add(IndexRoutingTable indexRoutingTable) {
+ indexRoutingTable.validate();
+ indicesRouting.put(indexRoutingTable.index(), indexRoutingTable);
+ return this;
+ }
+
+ public Builder add(IndexRoutingTable.Builder indexRoutingTableBuilder) {
+ add(indexRoutingTableBuilder.build());
+ return this;
+ }
+
+ public Builder remove(String index) {
+ indicesRouting.remove(index);
+ return this;
+ }
+
+ public Builder version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ public RoutingTable build() {
+ // normalize the versions right before we build it...
+ for (IndexRoutingTable indexRoutingTable : indicesRouting.values()) {
+ indicesRouting.put(indexRoutingTable.index(), indexRoutingTable.normalizeVersions());
+ }
+ return new RoutingTable(version, indicesRouting);
+ }
+
+ public static RoutingTable readFrom(StreamInput in) throws IOException {
+ Builder builder = new Builder();
+ builder.version = in.readLong();
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in);
+ builder.add(index);
+ }
+
+ return builder.build();
+ }
+
+ public static void writeTo(RoutingTable table, StreamOutput out) throws IOException {
+ out.writeLong(table.version);
+ out.writeVInt(table.indicesRouting.size());
+ for (IndexRoutingTable index : table.indicesRouting.values()) {
+ IndexRoutingTable.Builder.writeTo(index, out);
+ }
+ }
+ }
+
+ public String prettyPrint() {
+ StringBuilder sb = new StringBuilder("routing_table:\n");
+ for (Map.Entry<String, IndexRoutingTable> entry : indicesRouting.entrySet()) {
+ sb.append(entry.getValue().prettyPrint()).append('\n');
+ }
+ return sb.toString();
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java
new file mode 100644
index 0000000..6a0eac2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Lists.newArrayListWithCapacity;
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ * Encapsulates the result of a routing table validation and provides access to
+ * validation failures.
+ */
+public class RoutingTableValidation implements Serializable, Streamable {
+
+ private boolean valid = true;
+
+ private List<String> failures;
+
+ private Map<String, List<String>> indicesFailures;
+
+ public RoutingTableValidation() {
+ }
+
+ public boolean valid() {
+ return valid;
+ }
+
+ public List<String> allFailures() {
+ if (failures().isEmpty() && indicesFailures().isEmpty()) {
+ return ImmutableList.of();
+ }
+ List<String> allFailures = newArrayList(failures());
+ for (Map.Entry<String, List<String>> entry : indicesFailures().entrySet()) {
+ for (String failure : entry.getValue()) {
+ allFailures.add("Index [" + entry.getKey() + "]: " + failure);
+ }
+ }
+ return allFailures;
+ }
+
+ public List<String> failures() {
+ if (failures == null) {
+ return ImmutableList.of();
+ }
+ return failures;
+ }
+
+ public Map<String, List<String>> indicesFailures() {
+ if (indicesFailures == null) {
+ return ImmutableMap.of();
+ }
+ return indicesFailures;
+ }
+
+ public List<String> indexFailures(String index) {
+ if (indicesFailures == null) {
+ return ImmutableList.of();
+ }
+ List<String> indexFailures = indicesFailures.get(index);
+ if (indexFailures == null) {
+ return ImmutableList.of();
+ }
+ return indexFailures;
+ }
+
+ public void addFailure(String failure) {
+ valid = false;
+ if (failures == null) {
+ failures = newArrayList();
+ }
+ failures.add(failure);
+ }
+
+ public void addIndexFailure(String index, String failure) {
+ valid = false;
+ if (indicesFailures == null) {
+ indicesFailures = newHashMap();
+ }
+ List<String> indexFailures = indicesFailures.get(index);
+ if (indexFailures == null) {
+ indexFailures = Lists.newArrayList();
+ indicesFailures.put(index, indexFailures);
+ }
+ indexFailures.add(failure);
+ }
+
+ @Override
+ public String toString() {
+ return allFailures().toString();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ valid = in.readBoolean();
+ int size = in.readVInt();
+ if (size == 0) {
+ failures = ImmutableList.of();
+ } else {
+ failures = Lists.newArrayListWithCapacity(size);
+ for (int i = 0; i < size; i++) {
+ failures.add(in.readString());
+ }
+ }
+ size = in.readVInt();
+ if (size == 0) {
+ indicesFailures = ImmutableMap.of();
+ } else {
+ indicesFailures = newHashMap();
+ for (int i = 0; i < size; i++) {
+ String index = in.readString();
+ int size2 = in.readVInt();
+ List<String> indexFailures = newArrayListWithCapacity(size2);
+ for (int j = 0; j < size2; j++) {
+ indexFailures.add(in.readString());
+ }
+ indicesFailures.put(index, indexFailures);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeBoolean(valid);
+ if (failures == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(failures.size());
+ for (String failure : failures) {
+ out.writeString(failure);
+ }
+ }
+ if (indicesFailures == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(indicesFailures.size());
+ for (Map.Entry<String, List<String>> entry : indicesFailures.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeVInt(entry.getValue().size());
+ for (String failure : entry.getValue()) {
+ out.writeString(failure);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingValidationException.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingValidationException.java
new file mode 100644
index 0000000..691b4aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingValidationException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+/**
+ * This class defines {@link RoutingException}s related to
+ * the validation of routing
+ */
+public class RoutingValidationException extends RoutingException {
+
+ private final RoutingTableValidation validation;
+
+ public RoutingValidationException(RoutingTableValidation validation) {
+ super(validation.toString());
+ this.validation = validation;
+ }
+
+ public RoutingTableValidation validation() {
+ return this.validation;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/routing/ShardIterator.java b/src/main/java/org/elasticsearch/cluster/routing/ShardIterator.java
new file mode 100644
index 0000000..ceeb651
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/ShardIterator.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * Allows to iterate over a set of shard instances (routing) within a shard id group.
+ */
+public interface ShardIterator extends ShardsIterator {
+
+ /**
+ * The shard id this group relates to.
+ */
+ ShardId shardId();
+
+ /**
+ * Resets the iterator.
+ */
+ void reset();
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
new file mode 100644
index 0000000..33b299f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * Shard routing represents the state of a shard instance allocated in the cluster.
+ */
+public interface ShardRouting extends Streamable, Serializable, ToXContent {
+
+ /**
+ * The shard id.
+ */
+ ShardId shardId();
+
+ /**
+ * The index name.
+ */
+ String index();
+
+ /**
+ * The index name.
+ */
+ String getIndex();
+
+ /**
+ * The shard id.
+ */
+ int id();
+
+ /**
+ * The shard id.
+ */
+ int getId();
+
+ /**
+ * The routing version associated with the shard.
+ */
+ long version();
+
+ /**
+ * The shard state.
+ */
+ ShardRoutingState state();
+
+ /**
+ * The shard is unassigned (not allocated to any node).
+ */
+ boolean unassigned();
+
+ /**
+ * The shard is initializing (usually recovering either from peer shard
+ * or from gateway).
+ */
+ boolean initializing();
+
+ /**
+ * The shard is in started mode.
+ */
+ boolean started();
+
+ /**
+ * Returns <code>true</code> iff the this shard is currently relocating to
+ * another node. Otherwise <code>false</code>
+ *
+ * @see ShardRoutingState#RELOCATING
+ */
+ boolean relocating();
+
+ /**
+ * Returns <code>true</code> iff the this shard is currently
+ * {@link ShardRoutingState#STARTED started} or
+ * {@link ShardRoutingState#RELOCATING relocating} to another node.
+ * Otherwise <code>false</code>
+ */
+ boolean active();
+
+ /**
+ * Returns <code>true</code> iff this shard is assigned to a node ie. not
+ * {@link ShardRoutingState#UNASSIGNED unassigned}. Otherwise <code>false</code>
+ */
+ boolean assignedToNode();
+
+ /**
+ * The current node id the shard is allocated on.
+ */
+ String currentNodeId();
+
+ /**
+ * The relocating node id the shard is either relocating to or relocating from.
+ */
+ String relocatingNodeId();
+
+ /**
+ * Snapshot id and repository where this shard is being restored from
+ */
+ RestoreSource restoreSource();
+
+ /**
+ * Returns <code>true</code> iff this shard is a primary.
+ */
+ boolean primary();
+
+ /**
+ * A short description of the shard.
+ */
+ String shortSummary();
+
+ /**
+ * A shard iterator with just this shard in it.
+ */
+ ShardIterator shardsIt();
+
+ /**
+ * Does not write index name and shard id
+ */
+ void writeToThin(StreamOutput out) throws IOException;
+
+ void readFromThin(StreamInput in) throws ClassNotFoundException, IOException;
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java b/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java
new file mode 100644
index 0000000..7bea17c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+/**
+ * Represents the current state of a {@link ShardRouting} as defined by the
+ * cluster.
+ */
+public enum ShardRoutingState {
+ /**
+ * The shard is not assigned to any node.
+ */
+ UNASSIGNED((byte) 1),
+ /**
+ * The shard is initializing (probably recovering from either a peer shard
+ * or gateway).
+ */
+ INITIALIZING((byte) 2),
+ /**
+ * The shard is started.
+ */
+ STARTED((byte) 3),
+ /**
+ * The shard is in the process being relocated.
+ */
+ RELOCATING((byte) 4);
+
+ private byte value;
+
+ ShardRoutingState(byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Byte value of this {@link ShardRoutingState}
+ * @return Byte value of this {@link ShardRoutingState}
+ */
+ public byte value() {
+ return this.value;
+ }
+
+ public static ShardRoutingState fromValue(byte value) {
+ switch (value) {
+ case 1:
+ return UNASSIGNED;
+ case 2:
+ return INITIALIZING;
+ case 3:
+ return STARTED;
+ case 4:
+ return RELOCATING;
+ default:
+ throw new ElasticsearchIllegalStateException("No routing state mapped for [" + value + "]");
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/ShardShuffler.java b/src/main/java/org/elasticsearch/cluster/routing/ShardShuffler.java
new file mode 100644
index 0000000..35ea17e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/ShardShuffler.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import java.util.List;
+
+/**
+ * A shuffler for shards whose primary goal is to balance load.
+ */
+public abstract class ShardShuffler {
+
+ /**
+ * Return a new seed.
+ */
+ public abstract int nextSeed();
+
+ /**
+ * Return a shuffled view over the list of shards. The behavior of this method must be deterministic: if the same list and the same seed
+ * are provided twice, then the result needs to be the same.
+ */
+ public abstract List<ShardRouting> shuffle(List<ShardRouting> shards, int seed);
+
+ /**
+ * Equivalent to calling <code>shuffle(shards, nextSeed())</code>.
+ */
+ public List<ShardRouting> shuffle(List<ShardRouting> shards) {
+ return shuffle(shards, nextSeed());
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java b/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java
new file mode 100644
index 0000000..57db824
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing;
+
+/**
+ * Allows to iterate over unrelated shards.
+ */
+public interface ShardsIterator {
+
+ /**
+ * Resets the iterator to its initial state.
+ */
+ void reset();
+
+ /**
+ * The number of shard routing instances.
+ *
+ * @return number of shard routing instances in this iterator
+ */
+ int size();
+
+ /**
+ * The number of active shard routing instances
+ *
+ * @return number of active shard routing instances
+ */
+ int sizeActive();
+
+ /**
+ * Returns the number of replicas in this iterator that are not in the
+ * {@link ShardRoutingState#UNASSIGNED}. The returned double-counts replicas
+ * that are in the state {@link ShardRoutingState#RELOCATING}
+ */
+ int assignedReplicasIncludingRelocating();
+
+ /**
+ * Returns the next shard, or <tt>null</tt> if none available.
+ */
+ ShardRouting nextOrNull();
+
+ /**
+ * Returns the first shard, or <tt>null</tt>, without
+ * incrementing the iterator.
+ *
+ * @see ShardRouting#assignedToNode()
+ */
+ ShardRouting firstOrNull();
+
+ /**
+ * Return the number of shards remaining in this {@link ShardsIterator}
+ *
+ * @return number of shard remaining
+ */
+ int remaining();
+
+ @Override
+ int hashCode();
+
+ @Override
+ boolean equals(Object other);
+
+ Iterable<ShardRouting> asUnordered();
+}
+
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationExplanation.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationExplanation.java
new file mode 100644
index 0000000..4bd19ca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationExplanation.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Instances of this class keeps explanations of decisions that have been made by allocation.
+ * An {@link AllocationExplanation} consists of a set of per node explanations.
+ * Since {@link NodeExplanation}s are related to shards an {@link AllocationExplanation} maps
+ * a shards id to a set of {@link NodeExplanation}s.
+ */
+public class AllocationExplanation implements Streamable {
+
+ public static final AllocationExplanation EMPTY = new AllocationExplanation();
+
+ /**
+ * Instances of this class keep messages and informations about nodes of an allocation
+ */
+ public static class NodeExplanation {
+ private final DiscoveryNode node;
+
+ private final String description;
+
+ /**
+ * Creates a new {@link NodeExplanation}
+ *
+ * @param node node referenced by {@link This} {@link NodeExplanation}
+ * @param description a message associated with the given node
+ */
+ public NodeExplanation(DiscoveryNode node, String description) {
+ this.node = node;
+ this.description = description;
+ }
+
+ /**
+ * The node referenced by the explanation
+ * @return referenced node
+ */
+ public DiscoveryNode node() {
+ return node;
+ }
+
+ /**
+ * Get the explanation for the node
+ * @return explanation for the node
+ */
+ public String description() {
+ return description;
+ }
+ }
+
+ private final Map<ShardId, List<NodeExplanation>> explanations = Maps.newHashMap();
+
+ /**
+ * Create and add a node explanation to this explanation referencing a shard
+ * @param shardId id the of the referenced shard
+ * @param nodeExplanation Explanation itself
+ * @return AllocationExplanation involving the explanation
+ */
+ public AllocationExplanation add(ShardId shardId, NodeExplanation nodeExplanation) {
+ List<NodeExplanation> list = explanations.get(shardId);
+ if (list == null) {
+ list = Lists.newArrayList();
+ explanations.put(shardId, list);
+ }
+ list.add(nodeExplanation);
+ return this;
+ }
+
+ /**
+ * List of explanations involved by this AllocationExplanation
+ * @return Map of shard ids and corresponding explanations
+ */
+ public Map<ShardId, List<NodeExplanation>> explanations() {
+ return this.explanations;
+ }
+
+ /**
+ * Read an {@link AllocationExplanation} from an {@link StreamInput}
+ * @param in {@link StreamInput} to read from
+ * @return a new {@link AllocationExplanation} read from the stream
+ * @throws IOException if something bad happened while reading
+ */
+ public static AllocationExplanation readAllocationExplanation(StreamInput in) throws IOException {
+ AllocationExplanation e = new AllocationExplanation();
+ e.readFrom(in);
+ return e;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ ShardId shardId = ShardId.readShardId(in);
+ int size2 = in.readVInt();
+ List<NodeExplanation> ne = Lists.newArrayListWithCapacity(size2);
+ for (int j = 0; j < size2; j++) {
+ DiscoveryNode node = null;
+ if (in.readBoolean()) {
+ node = DiscoveryNode.readNode(in);
+ }
+ ne.add(new NodeExplanation(node, in.readString()));
+ }
+ explanations.put(shardId, ne);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(explanations.size());
+ for (Map.Entry<ShardId, List<NodeExplanation>> entry : explanations.entrySet()) {
+ entry.getKey().writeTo(out);
+ out.writeVInt(entry.getValue().size());
+ for (NodeExplanation nodeExplanation : entry.getValue()) {
+ if (nodeExplanation.node() == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ nodeExplanation.node().writeTo(out);
+ }
+ out.writeString(nodeExplanation.description());
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationModule.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationModule.java
new file mode 100644
index 0000000..d79ed7b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationModule.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecidersModule;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * The {@link AllocationModule} manages several
+ * modules related to the allocation process. To do so
+ * it manages a {@link ShardsAllocatorModule} and an {@link AllocationDecidersModule}.
+ */
+public class AllocationModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ //TODO: Documentation
+ public AllocationModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(new ShardsAllocatorModule(settings), new AllocationDecidersModule(settings));
+ }
+
+ @Override
+ protected void configure() {
+ bind(AllocationService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
new file mode 100644
index 0000000..d6c1902
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -0,0 +1,539 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
+
+/**
+ * This service manages the node allocation of a cluster. For this reason the
+ * {@link AllocationService} keeps {@link AllocationDeciders} to choose nodes
+ * for shard allocation. This class also manages new nodes joining the cluster
+ * and rerouting of shards.
+ */
+public class AllocationService extends AbstractComponent {
+
+ private final AllocationDeciders allocationDeciders;
+ private final ClusterInfoService clusterInfoService;
+ private final ShardsAllocators shardsAllocators;
+
+ @Inject
+ public AllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocators shardsAllocators, ClusterInfoService clusterInfoService) {
+ super(settings);
+ this.allocationDeciders = allocationDeciders;
+ this.shardsAllocators = shardsAllocators;
+ this.clusterInfoService = clusterInfoService;
+ }
+
+ /**
+ * Applies the started shards. Note, shards can be called several times within this method.
+ * <p/>
+ * <p>If the same instance of the routing table is returned, then no change has been made.</p>
+ */
+ public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<? extends ShardRouting> startedShards) {
+ return applyStartedShards(clusterState, startedShards, true);
+ }
+
+ public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<? extends ShardRouting> startedShards, boolean withReroute) {
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ // shuffle the unassigned nodes, just so we won't have things like poison failed shards
+ routingNodes.unassigned().shuffle();
+ StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
+ boolean changed = applyStartedShards(routingNodes, startedShards);
+ if (!changed) {
+ return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
+ }
+ shardsAllocators.applyStartedShards(allocation);
+ if (withReroute) {
+ reroute(allocation);
+ }
+ return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
+ }
+
+ public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
+ return applyFailedShards(clusterState, ImmutableList.of(failedShard));
+ }
+
+ /**
+ * Applies the failed shards. Note, shards can be called several times within this method.
+ * <p/>
+ * <p>If the same instance of the routing table is returned, then no change has been made.</p>
+ */
+ public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, List<ShardRouting> failedShards) {
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ // shuffle the unassigned nodes, just so we won't have things like poison failed shards
+ routingNodes.unassigned().shuffle();
+ FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), failedShards, clusterInfoService.getClusterInfo());
+ boolean changed = false;
+ for (ShardRouting failedShard : failedShards) {
+ changed |= applyFailedShard(allocation, failedShard, true);
+ }
+ if (!changed) {
+ return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
+ }
+ shardsAllocators.applyFailedShards(allocation);
+ reroute(allocation);
+ return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
+ }
+
+ public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {
+ return reroute(clusterState, commands, false);
+ }
+
+ public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean debug) throws ElasticsearchException {
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ // we don't shuffle the unassigned shards here, to try and get as close as possible to
+ // a consistent result of the effect the commands have on the routing
+ // this allows systems to dry run the commands, see the resulting cluster state, and act on it
+ RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo());
+ allocation.debugDecision(debug);
+ // we ignore disable allocation, because commands are explicit
+ allocation.ignoreDisable(true);
+ commands.execute(allocation);
+ // we revert the ignore disable flag, since when rerouting, we want the original setting to take place
+ allocation.ignoreDisable(false);
+ // the assumption is that commands will move / act on shards (or fail through exceptions)
+ // so, there will always be shard "movements", so no need to check on reroute
+ reroute(allocation);
+ return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
+ }
+
+ /**
+ * Reroutes the routing table based on the live nodes.
+ * <p/>
+ * <p>If the same instance of the routing table is returned, then no change has been made.
+ */
+ public RoutingAllocation.Result reroute(ClusterState clusterState) {
+ return reroute(clusterState, false);
+ }
+
+ /**
+ * Reroutes the routing table based on the live nodes.
+ * <p/>
+ * <p>If the same instance of the routing table is returned, then no change has been made.
+ */
+ public RoutingAllocation.Result reroute(ClusterState clusterState, boolean debug) {
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ // shuffle the unassigned nodes, just so we won't have things like poison failed shards
+ routingNodes.unassigned().shuffle();
+ RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo());
+ allocation.debugDecision(debug);
+ if (!reroute(allocation)) {
+ return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
+ }
+ return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
+ }
+
+ /**
+ * Only handles reroute but *without* any reassignment of unassigned shards or rebalancing. Does
+ * make sure to handle removed nodes, but only moved the shards to UNASSIGNED, does not reassign
+ * them.
+ */
+ public RoutingAllocation.Result rerouteWithNoReassign(ClusterState clusterState) {
+ return rerouteWithNoReassign(clusterState, false);
+ }
+
+ /**
+ * Only handles reroute but *without* any reassignment of unassigned shards or rebalancing. Does
+ * make sure to handle removed nodes, but only moved the shards to UNASSIGNED, does not reassign
+ * them.
+ */
+ public RoutingAllocation.Result rerouteWithNoReassign(ClusterState clusterState, boolean debug) {
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ // shuffle the unassigned nodes, just so we won't have things like poison failed shards
+ routingNodes.unassigned().shuffle();
+ RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo());
+ allocation.debugDecision(debug);
+ boolean changed = false;
+ // first, clear from the shards any node id they used to belong to that is now dead
+ changed |= deassociateDeadNodes(allocation);
+
+ // create a sorted list of from nodes with least number of shards to the maximum ones
+ applyNewNodes(allocation);
+
+ // elect primaries *before* allocating unassigned, so backups of primaries that failed
+ // will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*)
+ changed |= electPrimariesAndUnassignDanglingReplicas(allocation);
+
+ if (!changed) {
+ return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
+ }
+ return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
+ }
+
+ private boolean reroute(RoutingAllocation allocation) {
+ boolean changed = false;
+ // first, clear from the shards any node id they used to belong to that is now dead
+ changed |= deassociateDeadNodes(allocation);
+
+ // create a sorted list of from nodes with least number of shards to the maximum ones
+ applyNewNodes(allocation);
+
+ // elect primaries *before* allocating unassigned, so backups of primaries that failed
+ // will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*)
+ changed |= electPrimariesAndUnassignDanglingReplicas(allocation);
+
+ // now allocate all the unassigned to available nodes
+ if (allocation.routingNodes().hasUnassigned()) {
+ changed |= shardsAllocators.allocateUnassigned(allocation);
+ // elect primaries again, in case this is needed with unassigned allocation
+ changed |= electPrimariesAndUnassignDanglingReplicas(allocation);
+ }
+
+ // move shards that no longer can be allocated
+ changed |= moveShards(allocation);
+
+ // rebalance
+ changed |= shardsAllocators.rebalance(allocation);
+ assert RoutingNodes.assertShardStats(allocation.routingNodes());
+ return changed;
+ }
+
+ private boolean moveShards(RoutingAllocation allocation) {
+ boolean changed = false;
+
+ // create a copy of the shards interleaving between nodes, and check if they can remain
+ List<MutableShardRouting> shards = new ArrayList<MutableShardRouting>();
+ int index = 0;
+ boolean found = true;
+ final RoutingNodes routingNodes = allocation.routingNodes();
+ while (found) {
+ found = false;
+ for (RoutingNode routingNode : routingNodes) {
+ if (index >= routingNode.size()) {
+ continue;
+ }
+ found = true;
+ shards.add(routingNode.get(index));
+ }
+ index++;
+ }
+ for (int i = 0; i < shards.size(); i++) {
+ MutableShardRouting shardRouting = shards.get(i);
+ // we can only move started shards...
+ if (!shardRouting.started()) {
+ continue;
+ }
+ final RoutingNode routingNode = routingNodes.node(shardRouting.currentNodeId());
+ Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
+ if (decision.type() == Decision.Type.NO) {
+ logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
+ boolean moved = shardsAllocators.move(shardRouting, routingNode, allocation);
+ if (!moved) {
+ logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
+ } else {
+ assert RoutingNodes.assertShardStats(allocation.routingNodes());
+ changed = true;
+ }
+ }
+ }
+ return changed;
+ }
+
+ private boolean electPrimariesAndUnassignDanglingReplicas(RoutingAllocation allocation) {
+ boolean changed = false;
+ RoutingNodes routingNodes = allocation.routingNodes();
+ if (!routingNodes.hasUnassignedPrimaries()) {
+ // move out if we don't have unassigned primaries
+ return changed;
+ }
+ for (MutableShardRouting shardEntry : routingNodes.unassigned()) {
+ if (shardEntry.primary()) {
+ MutableShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry);
+ if (candidate != null) {
+ routingNodes.swapPrimaryFlag(shardEntry, candidate);
+ if (candidate.relocatingNodeId() != null) {
+ changed = true;
+ // its also relocating, make sure to move the other routing to primary
+ RoutingNode node = routingNodes.node(candidate.relocatingNodeId());
+ if (node != null) {
+ for (MutableShardRouting shardRouting : node) {
+ if (shardRouting.shardId().equals(candidate.shardId()) && !shardRouting.primary()) {
+ routingNodes.swapPrimaryFlag(shardRouting);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // go over and remove dangling replicas that are initializing, but we couldn't elect primary ones...
+ List<ShardRouting> shardsToFail = null;
+ if (routingNodes.hasUnassignedPrimaries()) {
+ for (MutableShardRouting shardEntry : routingNodes.unassigned()) {
+ if (shardEntry.primary()) {
+ for(MutableShardRouting routing : routingNodes.assignedShards(shardEntry)) {
+ if (!routing.primary()) {
+ changed = true;
+ if (shardsToFail == null) {
+ shardsToFail = new ArrayList<ShardRouting>();
+ }
+ shardsToFail.add(routing);
+ }
+ }
+ }
+ }
+ if (shardsToFail != null) {
+ for (ShardRouting shardToFail : shardsToFail) {
+ applyFailedShard(allocation, shardToFail, false);
+ }
+ }
+ }
+ return changed;
+ }
+
+ /**
+ * Applies the new nodes to the routing nodes and returns them (just the
+ * new nodes);
+ */
+ private void applyNewNodes(RoutingAllocation allocation) {
+ final RoutingNodes routingNodes = allocation.routingNodes();
+ for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().dataNodes().values()) {
+ DiscoveryNode node = cursor.value;
+ if (!routingNodes.isKnown(node)) {
+ routingNodes.addNode(node);
+ }
+ }
+ }
+
+ private boolean deassociateDeadNodes(RoutingAllocation allocation) {
+ boolean changed = false;
+ for (RoutingNodes.RoutingNodesIterator it = allocation.routingNodes().nodes(); it.hasNext(); ) {
+ RoutingNode node = it.next();
+ if (allocation.nodes().dataNodes().containsKey(node.nodeId())) {
+ // its a live node, continue
+ continue;
+ }
+ changed = true;
+ // now, go over all the shards routing on the node, and fail them
+ for (MutableShardRouting shardRouting : node.copyShards()) {
+ applyFailedShard(allocation, shardRouting, false);
+ }
+ // its a dead node, remove it, note, its important to remove it *after* we apply failed shard
+ // since it relies on the fact that the RoutingNode exists in the list of nodes
+ it.remove();
+ }
+ return changed;
+ }
+
+ private boolean applyStartedShards(RoutingNodes routingNodes, Iterable<? extends ShardRouting> startedShardEntries) {
+ boolean dirty = false;
+ // apply shards might be called several times with the same shard, ignore it
+ for (ShardRouting startedShard : startedShardEntries) {
+ assert startedShard.state() == INITIALIZING;
+
+ // retrieve the relocating node id before calling startedShard().
+ String relocatingNodeId = null;
+
+ RoutingNodes.RoutingNodeIterator currentRoutingNode = routingNodes.routingNodeIter(startedShard.currentNodeId());
+ if (currentRoutingNode != null) {
+ for (MutableShardRouting shard : currentRoutingNode) {
+ if (shard.shardId().equals(startedShard.shardId())) {
+ relocatingNodeId = shard.relocatingNodeId();
+ if (!shard.started()) {
+ dirty = true;
+ routingNodes.started(shard);
+ }
+ break;
+ }
+ }
+ }
+
+ // startedShard is the current state of the shard (post relocation for example)
+ // this means that after relocation, the state will be started and the currentNodeId will be
+ // the node we relocated to
+
+ if (relocatingNodeId == null) {
+ continue;
+ }
+
+ RoutingNodes.RoutingNodeIterator sourceRoutingNode = routingNodes.routingNodeIter(relocatingNodeId);
+ if (sourceRoutingNode != null) {
+ while (sourceRoutingNode.hasNext()) {
+ MutableShardRouting shard = sourceRoutingNode.next();
+ if (shard.shardId().equals(startedShard.shardId())) {
+ if (shard.relocating()) {
+ dirty = true;
+ sourceRoutingNode.remove();
+ break;
+ }
+ }
+ }
+ }
+ }
+ return dirty;
+ }
+
+ /**
+ * Applies the relevant logic to handle a failed shard. Returns <tt>true</tt> if changes happened that
+ * require relocation.
+ */
+ private boolean applyFailedShard(RoutingAllocation allocation, ShardRouting failedShard, boolean addToIgnoreList) {
+ // create a copy of the failed shard, since we assume we can change possible references to it without
+ // changing the state of failed shard
+ failedShard = new ImmutableShardRouting(failedShard);
+
+ IndexRoutingTable indexRoutingTable = allocation.routingTable().index(failedShard.index());
+ if (indexRoutingTable == null) {
+ return false;
+ }
+
+ RoutingNodes routingNodes = allocation.routingNodes();
+ if (failedShard.relocatingNodeId() != null) {
+ // the shard is relocating, either in initializing (recovery from another node) or relocating (moving to another node)
+ if (failedShard.state() == INITIALIZING) {
+ // the shard is initializing and recovering from another node
+ boolean dirty = false;
+ // first, we need to cancel the current node that is being initialized
+ RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.currentNodeId());
+ if (initializingNode != null) {
+ while(initializingNode.hasNext()) {
+ MutableShardRouting shardRouting = initializingNode.next();
+ if (shardRouting.equals(failedShard)) {
+ dirty = true;
+ initializingNode.remove();
+ if (addToIgnoreList) {
+ // make sure we ignore this shard on the relevant node
+ allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
+ }
+
+ break;
+ }
+ }
+ }
+ if (dirty) {
+ // now, find the node that we are relocating *from*, and cancel its relocation
+ RoutingNode relocatingFromNode = routingNodes.node(failedShard.relocatingNodeId());
+ if (relocatingFromNode != null) {
+ for (MutableShardRouting shardRouting : relocatingFromNode) {
+ if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.relocating()) {
+ dirty = true;
+ routingNodes.cancelRelocation(shardRouting);
+ break;
+ }
+ }
+ }
+ }
+ return dirty;
+ } else if (failedShard.state() == RELOCATING) {
+ boolean dirty = false;
+ // the shard is relocating, meaning its the source the shard is relocating from
+ // first, we need to cancel the current relocation from the current node
+ // now, find the node that we are recovering from, cancel the relocation, remove it from the node
+ // and add it to the unassigned shards list...
+ RoutingNodes.RoutingNodeIterator relocatingFromNode = routingNodes.routingNodeIter(failedShard.currentNodeId());
+ if (relocatingFromNode != null) {
+ while(relocatingFromNode.hasNext()) {
+ MutableShardRouting shardRouting = relocatingFromNode.next();
+ if (shardRouting.equals(failedShard)) {
+ dirty = true;
+ relocatingFromNode.remove();
+ if (addToIgnoreList) {
+ // make sure we ignore this shard on the relevant node
+ allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
+ }
+
+ routingNodes.unassigned().add(new MutableShardRouting(failedShard.index(), failedShard.id(),
+ null, failedShard.primary(), ShardRoutingState.UNASSIGNED, failedShard.version() + 1));
+ break;
+ }
+ }
+ }
+ if (dirty) {
+ // next, we need to find the target initializing shard that is recovering from, and remove it...
+ RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.relocatingNodeId());
+ if (initializingNode != null) {
+ while (initializingNode.hasNext()) {
+ MutableShardRouting shardRouting = initializingNode.next();
+ if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.state() == INITIALIZING) {
+ dirty = true;
+ initializingNode.remove();
+ }
+ }
+ }
+ }
+ return dirty;
+ } else {
+ throw new ElasticsearchIllegalStateException("illegal state for a failed shard, relocating node id is set, but state does not match: " + failedShard);
+ }
+ } else {
+ // the shard is not relocating, its either started, or initializing, just cancel it and move on...
+ boolean dirty = false;
+ RoutingNodes.RoutingNodeIterator node = routingNodes.routingNodeIter(failedShard.currentNodeId());
+ if (node != null) {
+ while(node.hasNext()) {
+ MutableShardRouting shardRouting = node.next();
+ if (shardRouting.equals(failedShard)) {
+ dirty = true;
+ if (addToIgnoreList) {
+ // make sure we ignore this shard on the relevant node
+ allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
+ }
+ node.remove();
+ // move all the shards matching the failed shard to the end of the unassigned list
+ // so we give a chance for other allocations and won't create poison failed allocations
+ // that can keep other shards from being allocated (because of limits applied on how many
+ // shards we can start per node)
+ List<MutableShardRouting> shardsToMove = Lists.newArrayList();
+ for (Iterator<MutableShardRouting> unassignedIt = routingNodes.unassigned().iterator(); unassignedIt.hasNext(); ) {
+ MutableShardRouting unassignedShardRouting = unassignedIt.next();
+ if (unassignedShardRouting.shardId().equals(failedShard.shardId())) {
+ unassignedIt.remove();
+ shardsToMove.add(unassignedShardRouting);
+ }
+ }
+ if (!shardsToMove.isEmpty()) {
+ routingNodes.unassigned().addAll(shardsToMove);
+ }
+
+ routingNodes.unassigned().add(new MutableShardRouting(failedShard.index(), failedShard.id(), null,
+ null, failedShard.restoreSource(), failedShard.primary(), ShardRoutingState.UNASSIGNED, failedShard.version() + 1));
+
+ break;
+ }
+ }
+ }
+ return dirty;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java
new file mode 100644
index 0000000..721514f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterInfo;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+
+import java.util.List;
+
+/**
+ * This {@link RoutingAllocation} keeps a shard which routing
+ * allocation has faild
+ */
+public class FailedRerouteAllocation extends RoutingAllocation {
+
+ private final List<ShardRouting> failedShards;
+
+ public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<ShardRouting> failedShards, ClusterInfo clusterInfo) {
+ super(deciders, routingNodes, nodes, clusterInfo);
+ this.failedShards = failedShards;
+ }
+
+ public List<ShardRouting> failedShards() {
+ return failedShards;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
new file mode 100644
index 0000000..f667855
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterInfo;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * The {@link RoutingAllocation} keep the state of the current allocation
+ * of shards and holds the {@link AllocationDeciders} which are responsible
+ * for the current routing state.
+ */
+public class RoutingAllocation {
+
+ /**
+ * this class is used to describe results of a {@link RoutingAllocation}
+ */
+ public static class Result {
+
+ private final boolean changed;
+
+ private final RoutingTable routingTable;
+
+ private final AllocationExplanation explanation;
+
+ /**
+ * Creates a new {@link RoutingAllocation.Result}
+ *
+ * @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
+ * @param routingTable the {@link RoutingTable} this Result references
+ * @param explanation Explanation of the Result
+ */
+ public Result(boolean changed, RoutingTable routingTable, AllocationExplanation explanation) {
+ this.changed = changed;
+ this.routingTable = routingTable;
+ this.explanation = explanation;
+ }
+
+ /** determine whether the actual {@link RoutingTable} has been changed
+ * @return <code>true</code> if the {@link RoutingTable} has been changed by allocation. Otherwise <code>false</code>
+ */
+ public boolean changed() {
+ return this.changed;
+ }
+
+ /**
+ * Get the {@link RoutingTable} referenced by this result
+ * @return referenced {@link RoutingTable}
+ */
+ public RoutingTable routingTable() {
+ return routingTable;
+ }
+
+ /**
+ * Get the explanation of this result
+ * @return explanation
+ */
+ public AllocationExplanation explanation() {
+ return explanation;
+ }
+ }
+
+ private final AllocationDeciders deciders;
+
+ private final RoutingNodes routingNodes;
+
+ private final DiscoveryNodes nodes;
+
+ private final AllocationExplanation explanation = new AllocationExplanation();
+
+ private final ClusterInfo clusterInfo;
+
+ private Map<ShardId, Set<String>> ignoredShardToNodes = null;
+
+ private boolean ignoreDisable = false;
+
+ private boolean debugDecision = false;
+
+ /**
+ * Creates a new {@link RoutingAllocation}
+ *
+ * @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations
+ * @param routingNodes Routing nodes in the current cluster
+ * @param nodes TODO: Documentation
+ */
+ public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, ClusterInfo clusterInfo) {
+ this.deciders = deciders;
+ this.routingNodes = routingNodes;
+ this.nodes = nodes;
+ this.clusterInfo = clusterInfo;
+ }
+
+ /**
+ * Get {@link AllocationDeciders} used for allocation
+ * @return {@link AllocationDeciders} used for allocation
+ */
+ public AllocationDeciders deciders() {
+ return this.deciders;
+ }
+
+ /**
+ * Get routing table of current nodes
+ * @return current routing table
+ */
+ public RoutingTable routingTable() {
+ return routingNodes.routingTable();
+ }
+
+ /**
+ * Get current routing nodes
+ * @return routing nodes
+ */
+ public RoutingNodes routingNodes() {
+ return routingNodes;
+ }
+
+ /**
+ * Get metadata of routing nodes
+ * @return Metadata of routing nodes
+ */
+ public MetaData metaData() {
+ return routingNodes.metaData();
+ }
+
+ /**
+ * Get discovery nodes in current routing
+ * @return discovery nodes
+ */
+ public DiscoveryNodes nodes() {
+ return nodes;
+ }
+
+ public ClusterInfo clusterInfo() {
+ return clusterInfo;
+ }
+
+ /**
+ * Get explanations of current routing
+ * @return explanation of routing
+ */
+ public AllocationExplanation explanation() {
+ return explanation;
+ }
+
+ public void ignoreDisable(boolean ignoreDisable) {
+ this.ignoreDisable = ignoreDisable;
+ }
+
+ public boolean ignoreDisable() {
+ return this.ignoreDisable;
+ }
+
+ public void debugDecision(boolean debug) {
+ this.debugDecision = debug;
+ }
+
+ public boolean debugDecision() {
+ return this.debugDecision;
+ }
+
+ public void addIgnoreShardForNode(ShardId shardId, String nodeId) {
+ if (ignoredShardToNodes == null) {
+ ignoredShardToNodes = new HashMap<ShardId, Set<String>>();
+ }
+ Set<String> nodes = ignoredShardToNodes.get(shardId);
+ if (nodes == null) {
+ nodes = new HashSet<String>();
+ ignoredShardToNodes.put(shardId, nodes);
+ }
+ nodes.add(nodeId);
+ }
+
+ public boolean shouldIgnoreShardForNode(ShardId shardId, String nodeId) {
+ if (ignoredShardToNodes == null) {
+ return false;
+ }
+ Set<String> nodes = ignoredShardToNodes.get(shardId);
+ return nodes != null && nodes.contains(nodeId);
+ }
+
+ /**
+ * Create a routing decision, including the reason if the debug flag is
+ * turned on
+ */
+ public Decision decision(Decision decision, String reason, Object... params) {
+ if (debugDecision()) {
+ return Decision.single(decision.type(), reason, params);
+ } else {
+ return decision;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java
new file mode 100644
index 0000000..da69419
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterInfo;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+
+import java.util.List;
+
+/**
+ * This {@link RoutingAllocation} holds a list of started shards within a
+ * cluster
+ */
+public class StartedRerouteAllocation extends RoutingAllocation {
+
+ private final List<? extends ShardRouting> startedShards;
+
+ public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
+ super(deciders, routingNodes, nodes, clusterInfo);
+ this.startedShards = startedShards;
+ }
+
+ /**
+ * Get started shards
+ * @return list of started shards
+ */
+ public List<? extends ShardRouting> startedShards() {
+ return startedShards;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
new file mode 100644
index 0000000..4e55d56
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
@@ -0,0 +1,1096 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.allocator;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.IntroSorter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
+import org.elasticsearch.common.collect.IdentityHashSet;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.util.*;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
+
+/**
+ * The {@link BalancedShardsAllocator} re-balances the nodes allocations
+ * within an cluster based on a {@link WeightFunction}. The clusters balance is defined by four parameters which can be set
+ * in the cluster update API that allows changes in real-time:
+ * <p/>
+ * <ul><li><code>cluster.routing.allocation.balance.shard</code> - The <b>shard balance</b> defines the weight factor
+ * for shards allocated on a {@link RoutingNode}</li>
+ * <li><code>cluster.routing.allocation.balance.index</code> - The <b>index balance</b> defines a factor to the number
+ * of {@link org.elasticsearch.cluster.routing.ShardRouting}s per index allocated on a specific node</li>
+ * <li><code>cluster.routing.allocation.balance.primary</code> - the <b>primary balance</b> defines a weight factor for
+ * the number of primaries of a specific index allocated on a node</li>
+ * <li><code>cluster.routing.allocation.balance.threshold</code> - A <b>threshold</b> to set the minimal optimization
+ * value of operations that should be performed</li>
+ * </ul>
+ * <p/>
+ * These parameters are combined in a {@link WeightFunction} that allows calculation of node weights which
+ * are used to re-balance shards based on global as well as per-index factors.
+ */
+public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator {
+
+ public static final String SETTING_THRESHOLD = "cluster.routing.allocation.balance.threshold";
+ public static final String SETTING_INDEX_BALANCE_FACTOR = "cluster.routing.allocation.balance.index";
+ public static final String SETTING_SHARD_BALANCE_FACTOR = "cluster.routing.allocation.balance.shard";
+ public static final String SETTING_PRIMARY_BALANCE_FACTOR = "cluster.routing.allocation.balance.primary";
+
+ private static final float DEFAULT_INDEX_BALANCE_FACTOR = 0.5f;
+ private static final float DEFAULT_SHARD_BALANCE_FACTOR = 0.45f;
+ private static final float DEFAULT_PRIMARY_BALANCE_FACTOR = 0.05f;
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ final float indexBalance = settings.getAsFloat(SETTING_INDEX_BALANCE_FACTOR, weightFunction.indexBalance);
+ final float shardBalance = settings.getAsFloat(SETTING_SHARD_BALANCE_FACTOR, weightFunction.shardBalance);
+ final float primaryBalance = settings.getAsFloat(SETTING_PRIMARY_BALANCE_FACTOR, weightFunction.primaryBalance);
+ float threshold = settings.getAsFloat(SETTING_THRESHOLD, BalancedShardsAllocator.this.threshold);
+ if (threshold <= 0.0f) {
+ throw new ElasticsearchIllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold);
+ }
+ BalancedShardsAllocator.this.threshold = threshold;
+ BalancedShardsAllocator.this.weightFunction = new WeightFunction(indexBalance, shardBalance, primaryBalance);
+ }
+ }
+
+ private volatile WeightFunction weightFunction = new WeightFunction(DEFAULT_INDEX_BALANCE_FACTOR, DEFAULT_SHARD_BALANCE_FACTOR, DEFAULT_PRIMARY_BALANCE_FACTOR);
+
+ private volatile float threshold = 1.0f;
+
+
+ public BalancedShardsAllocator(Settings settings) {
+ this(settings, new NodeSettingsService(settings));
+ }
+
+ @Inject
+ public BalancedShardsAllocator(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ ApplySettings applySettings = new ApplySettings();
+ applySettings.onRefreshSettings(settings);
+ nodeSettingsService.addListener(applySettings);
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ }
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ return rebalance(allocation);
+ }
+
+ @Override
+ public boolean rebalance(RoutingAllocation allocation) {
+ final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
+ return balancer.balance();
+ }
+
+ @Override
+ public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
+ return balancer.move(shardRouting, node);
+ }
+
+ /**
+ * Returns the currently configured delta threshold
+ */
+ public float getThreshold() {
+ return threshold;
+ }
+
+ /**
+ * Returns the index related weight factor.
+ */
+ public float getIndexBalance() {
+ return weightFunction.indexBalance;
+ }
+
+ /**
+ * Returns the primary related weight factor.
+ */
+ public float getPrimaryBalance() {
+ return weightFunction.primaryBalance;
+ }
+
+ /**
+ * Returns the shard related weight factor.
+ */
+ public float getShardBalance() {
+ return weightFunction.shardBalance;
+ }
+
+
+ /**
+ * This class is the primary weight function used to create balanced over nodes and shards in the cluster.
+ * Currently this function has 3 properties:
+ * <ul>
+ * <li><code>index balance</code> - balance property over shards per index</li>
+ * <li><code>shard balance</code> - balance property over shards per cluster</li>
+ * <li><code>primary balance</code> - balance property over primaries per cluster</li>
+ * </ul>
+ * <p>
+ * Each of these properties are expressed as factor such that the properties factor defines the relative importance of the property for the
+ * weight function. For example if the weight function should calculate the weights only based on a global (shard) balance the index and primary balance
+ * can be set to <tt>0.0</tt> and will in turn have no effect on the distribution.
+ * </p>
+ * The weight per index is calculated based on the following formula:
+ * <ul>
+ * <li>
+ * <code>weight<sub>index</sub>(node, index) = indexBalance * (node.numShards(index) - avgShardsPerNode(index))</code>
+ * </li>
+ * <li>
+ * <code>weight<sub>node</sub>(node, index) = shardBalance * (node.numShards() - avgShardsPerNode)</code>
+ * </li>
+ * <li>
+ * <code>weight<sub>primary</sub>(node, index) = primaryBalance * (node.numPrimaries() - avgPrimariesPerNode)</code>
+ * </li>
+ * </ul>
+ * <code>weight(node, index) = weight<sub>index</sub>(node, index) + weight<sub>node</sub>(node, index) + weight<sub>primary</sub>(node, index)</code>
+ */
+ public static class WeightFunction {
+
+ private final float indexBalance;
+ private final float shardBalance;
+ private final float primaryBalance;
+ private final EnumMap<Operation, float[]> thetaMap = new EnumMap<BalancedShardsAllocator.Operation, float[]>(Operation.class);
+
+ public WeightFunction(float indexBalance, float shardBalance, float primaryBalance) {
+ float sum = indexBalance + shardBalance + primaryBalance;
+ if (sum <= 0.0f) {
+ throw new ElasticsearchIllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum);
+ }
+ final float[] defaultTheta = new float[]{shardBalance / sum, indexBalance / sum, primaryBalance / sum};
+ for (Operation operation : Operation.values()) {
+ switch (operation) {
+ case THRESHOLD_CHECK:
+ sum = indexBalance + shardBalance;
+ if (sum <= 0.0f) {
+ thetaMap.put(operation, defaultTheta);
+ } else {
+ thetaMap.put(operation, new float[]{shardBalance / sum, indexBalance / sum, 0});
+ }
+ break;
+ case BALANCE:
+ case ALLOCATE:
+ case MOVE:
+ thetaMap.put(operation, defaultTheta);
+ break;
+ default:
+ assert false;
+ }
+ }
+ this.indexBalance = indexBalance;
+ this.shardBalance = shardBalance;
+ this.primaryBalance = primaryBalance;
+ }
+
+ public float weight(Operation operation, Balancer balancer, ModelNode node, String index) {
+ final float weightShard = (node.numShards() - balancer.avgShardsPerNode());
+ final float weightIndex = (node.numShards(index) - balancer.avgShardsPerNode(index));
+ final float weightPrimary = (node.numPrimaries() - balancer.avgPrimariesPerNode());
+ final float[] theta = thetaMap.get(operation);
+ assert theta != null;
+ return theta[0] * weightShard + theta[1] * weightIndex + theta[2] * weightPrimary;
+ }
+
+ }
+
+ /**
+ * An enum that donates the actual operation the {@link WeightFunction} is
+ * applied to.
+ */
+ public static enum Operation {
+ /**
+ * Provided during balance operations.
+ */
+ BALANCE,
+ /**
+ * Provided during initial allocation operation for unassigned shards.
+ */
+ ALLOCATE,
+ /**
+ * Provided during move operation.
+ */
+ MOVE,
+ /**
+ * Provided when the weight delta is checked against the configured threshold.
+ * This can be used to ignore tie-breaking weight factors that should not
+ * solely trigger a relocation unless the delta is above the threshold.
+ */
+ THRESHOLD_CHECK
+ }
+
+ /**
+ * A {@link Balancer}
+ */
+ public static class Balancer {
+
+ private final ESLogger logger;
+ private final Map<String, ModelNode> nodes = new HashMap<String, ModelNode>();
+ private final HashSet<String> indices = new HashSet<String>();
+ private final RoutingAllocation allocation;
+ private final RoutingNodes routingNodes;
+ private final WeightFunction weight;
+
+ private final float threshold;
+ private final MetaData metaData;
+
+ private final Predicate<MutableShardRouting> assignedFilter = new Predicate<MutableShardRouting>() {
+ @Override
+ public boolean apply(MutableShardRouting input) {
+ return input.assignedToNode();
+ }
+ };
+
+
+ public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
+ this.logger = logger;
+ this.allocation = allocation;
+ this.weight = weight;
+ this.threshold = threshold;
+ this.routingNodes = allocation.routingNodes();
+ for (RoutingNode node : routingNodes) {
+ nodes.put(node.nodeId(), new ModelNode(node.nodeId()));
+ }
+ metaData = routingNodes.metaData();
+ }
+
+ /**
+ * Returns an array view on the nodes in the balancer. Nodes should not be removed from this list.
+ */
+ private ModelNode[] nodesArray() {
+ return nodes.values().toArray(new ModelNode[nodes.size()]);
+ }
+
+ /**
+ * Returns the average of shards per node for the given index
+ */
+ public float avgShardsPerNode(String index) {
+ return ((float) metaData.index(index).totalNumberOfShards()) / nodes.size();
+ }
+
+ /**
+ * Returns the global average of shards per node
+ */
+ public float avgShardsPerNode() {
+ return ((float) metaData.totalNumberOfShards()) / nodes.size();
+ }
+
+ /**
+ * Returns the global average of primaries per node
+ */
+ public float avgPrimariesPerNode() {
+ return ((float) metaData.numberOfShards()) / nodes.size();
+ }
+
+ /**
+ * Returns the average of primaries per node for the given index
+ */
+ public float avgPrimariesPerNode(String index) {
+ return ((float) metaData.index(index).numberOfShards()) / nodes.size();
+ }
+
+ /**
+ * Returns a new {@link NodeSorter} that sorts the nodes based on their
+ * current weight with respect to the index passed to the sorter. The
+ * returned sorter is not sorted. Use {@link NodeSorter#reset(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Operation, String)}
+ * to sort based on an index.
+ */
+ private NodeSorter newNodeSorter() {
+ return new NodeSorter(nodesArray(), weight, this);
+ }
+
+ private boolean initialize(RoutingNodes routing, RoutingNodes.UnassignedShards unassigned) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Start distributing Shards");
+ }
+ indices.addAll(allocation.routingTable().indicesRouting().keySet());
+ buildModelFromAssigned(routing.shards(assignedFilter));
+ return allocateUnassigned(unassigned, routing.ignoredUnassigned());
+ }
+
+ private static boolean lessThan(float delta, float threshold) {
+ /* deltas close to the threshold are "rounded" to the threshold manually
+ to prevent floating point problems if the delta is very close to the
+ threshold ie. 1.000000002 which can trigger unnecessary balance actions*/
+ return delta <= threshold + 0.001f;
+ }
+
+ /**
+ * Balances the nodes on the cluster model according to the weight
+ * function. The configured threshold is the minimum delta between the
+ * weight of the maximum node and the minimum node according to the
+ * {@link WeightFunction}. This weight is calculated per index to
+ * distribute shards evenly per index. The balancer tries to relocate
+ * shards only if the delta exceeds the threshold. If the default case
+ * the threshold is set to <tt>1.0</tt> to enforce gaining relocation
+ * only, or in other words relocations that move the weight delta closer
+ * to <tt>0.0</tt>
+ *
+ * @return <code>true</code> if the current configuration has been
+ * changed, otherwise <code>false</code>
+ */
+ public boolean balance() {
+ if (this.nodes.isEmpty()) {
+ /* with no nodes this is pointless */
+ return false;
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("Start balancing cluster");
+ }
+ final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
+ boolean changed = initialize(routingNodes, unassigned);
+ if (!changed) {
+ NodeSorter sorter = newNodeSorter();
+ if (nodes.size() > 1) { /* skip if we only have one node */
+ for (String index : buildWeightOrderedIndidces(Operation.BALANCE, sorter)) {
+ sorter.reset(Operation.BALANCE, index);
+ final float[] weights = sorter.weights;
+ final ModelNode[] modelNodes = sorter.modelNodes;
+ int lowIdx = 0;
+ int highIdx = weights.length - 1;
+ while (true) {
+ final ModelNode minNode = modelNodes[lowIdx];
+ final ModelNode maxNode = modelNodes[highIdx];
+ advance_range:
+ if (maxNode.numShards(index) > 0) {
+ float delta = weights[highIdx] - weights[lowIdx];
+ delta = lessThan(delta, threshold) ? delta : sorter.weight(Operation.THRESHOLD_CHECK, maxNode) - sorter.weight(Operation.THRESHOLD_CHECK, minNode);
+ if (lessThan(delta, threshold)) {
+ if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta?
+ && (weights[highIdx-1] - weights[0] > threshold) // check if we need to break at all
+ ) {
+ /* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible
+ * due to some allocation decider restrictions like zone awareness. if one zone has for instance
+ * less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we
+ * can't move to the "lighter" shards since otherwise the zone would go over capacity.
+ *
+ * This break jumps straight to the condition below were we start moving from the high index towards
+ * the low index to shrink the window we are considering for balance from the other direction.
+ * (check shrinking the window from MAX to MIN)
+ * See #3580
+ */
+ break advance_range;
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]",
+ index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
+ }
+ break;
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]",
+ maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
+ }
+ /* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes.
+ * a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */
+ if (tryRelocateShard(Operation.BALANCE, minNode, maxNode, index, delta)) {
+ /*
+ * TODO we could be a bit smarter here, we don't need to fully sort necessarily
+ * we could just find the place to insert linearly but the win might be minor
+ * compared to the added complexity
+ */
+ weights[lowIdx] = sorter.weight(Operation.BALANCE, modelNodes[lowIdx]);
+ weights[highIdx] = sorter.weight(Operation.BALANCE, modelNodes[highIdx]);
+ sorter.sort(0, weights.length);
+ lowIdx = 0;
+ highIdx = weights.length - 1;
+ changed = true;
+ continue;
+ }
+ }
+ if (lowIdx < highIdx - 1) {
+ /* Shrinking the window from MIN to MAX
+ * we can't move from any shard from the min node lets move on to the next node
+ * and see if the threshold still holds. We either don't have any shard of this
+ * index on this node of allocation deciders prevent any relocation.*/
+ lowIdx++;
+ } else if (lowIdx > 0) {
+ /* Shrinking the window from MAX to MIN
+ * now we go max to min since obviously we can't move anything to the max node
+ * lets pick the next highest */
+ lowIdx = 0;
+ highIdx--;
+ } else {
+ /* we are done here, we either can't relocate anymore or we are balanced */
+ break;
+ }
+ }
+ }
+ }
+ }
+ routingNodes.unassigned().transactionEnd(unassigned);
+ return changed;
+ }
+
+ /**
+ * This builds a initial index ordering where the indices are returned
+ * in most unbalanced first. We need this in order to prevent over
+ * allocations on added nodes from one index when the weight parameters
+ * for global balance overrule the index balance at an intermediate
+ * state. For example this can happen if we have 3 nodes and 3 indices
+ * with 3 shards and 1 shard. At the first stage all three nodes hold
+ * 2 shard for each index. now we add another node and the first index
+ * is balanced moving 3 two of the nodes over to the new node since it
+ * has no shards yet and global balance for the node is way below
+ * average. To re-balance we need to move shards back eventually likely
+ * to the nodes we relocated them from.
+ */
+ private String[] buildWeightOrderedIndidces(Operation operation, NodeSorter sorter) {
+ final String[] indices = this.indices.toArray(new String[this.indices.size()]);
+ final float[] deltas = new float[indices.length];
+ for (int i = 0; i < deltas.length; i++) {
+ sorter.reset(operation, indices[i]);
+ deltas[i] = sorter.delta();
+ }
+ new IntroSorter() {
+
+ float pivotWeight;
+
+ @Override
+ protected void swap(int i, int j) {
+ final String tmpIdx = indices[i];
+ indices[i] = indices[j];
+ indices[j] = tmpIdx;
+ final float tmpDelta = deltas[i];
+ deltas[i] = deltas[j];
+ deltas[j] = tmpDelta;
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ return Float.compare(deltas[j], deltas[i]);
+ }
+
+ @Override
+ protected void setPivot(int i) {
+ pivotWeight = deltas[i];
+ }
+
+ @Override
+ protected int comparePivot(int j) {
+ return Float.compare(deltas[j], pivotWeight);
+ }
+ }.sort(0, deltas.length);
+
+ return indices;
+ }
+
+ /**
+ * This function executes a move operation moving the given shard from
+ * the given node to the minimal eligible node with respect to the
+ * weight function. Iff the shard is moved the shard will be set to
+ * {@link ShardRoutingState#RELOCATING} and a shadow instance of this
+ * shard is created with an incremented version in the state
+ * {@link ShardRoutingState#INITIALIZING}.
+ *
+ * @return <code>true</code> iff the shard has successfully been moved.
+ */
+ public boolean move(MutableShardRouting shard, RoutingNode node ) {
+ if (nodes.isEmpty() || !shard.started()) {
+ /* with no nodes or a not started shard this is pointless */
+ return false;
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("Try moving shard [{}] from [{}]", shard, node);
+ }
+ final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
+ boolean changed = initialize(routingNodes, unassigned);
+ if (!changed) {
+ final ModelNode sourceNode = nodes.get(node.nodeId());
+ assert sourceNode != null;
+ final NodeSorter sorter = newNodeSorter();
+ sorter.reset(Operation.MOVE, shard.getIndex());
+ final ModelNode[] nodes = sorter.modelNodes;
+ assert sourceNode.containsShard(shard);
+ /*
+ * the sorter holds the minimum weight node first for the shards index.
+ * We now walk through the nodes until we find a node to allocate the shard.
+ * This is not guaranteed to be balanced after this operation we still try best effort to
+ * allocate on the minimal eligible node.
+ */
+
+ for (ModelNode currentNode : nodes) {
+ if (currentNode.getNodeId().equals(node.nodeId())) {
+ continue;
+ }
+ RoutingNode target = routingNodes.node(currentNode.getNodeId());
+ Decision decision = allocation.deciders().canAllocate(shard, target, allocation);
+ if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
+ sourceNode.removeShard(shard);
+ final MutableShardRouting initializingShard = new MutableShardRouting(shard.index(), shard.id(), currentNode.getNodeId(),
+ shard.currentNodeId(), shard.restoreSource(), shard.primary(), INITIALIZING, shard.version() + 1);
+ currentNode.addShard(initializingShard, decision);
+ routingNodes.assign(initializingShard, target.nodeId());
+ routingNodes.relocate(shard, target.nodeId()); // set the node to relocate after we added the initializing shard
+ if (logger.isTraceEnabled()) {
+ logger.trace("Moved shard [{}] to node [{}]", shard, currentNode.getNodeId());
+ }
+ changed = true;
+ break;
+ }
+ }
+ }
+ routingNodes.unassigned().transactionEnd(unassigned);
+ return changed;
+ }
+
+ /**
+ * Builds the internal model from all shards in the given
+ * {@link Iterable}. All shards in the {@link Iterable} must be assigned
+ * to a node. This method will skip shards in the state
+ * {@link ShardRoutingState#RELOCATING} since each relocating shard has
+ * a shadow shard in the state {@link ShardRoutingState#INITIALIZING}
+ * on the target node which we respect during the allocation / balancing
+ * process. In short, this method recreates the status-quo in the cluster.
+ */
+ private void buildModelFromAssigned(Iterable<MutableShardRouting> shards) {
+ for (MutableShardRouting shard : shards) {
+ assert shard.assignedToNode();
+ /* we skip relocating shards here since we expect an initializing shard with the same id coming in */
+ if (shard.state() == RELOCATING) {
+ continue;
+ }
+ ModelNode node = nodes.get(shard.currentNodeId());
+ assert node != null;
+ node.addShard(shard, Decision.single(Type.YES, "Already allocated on node", node.getNodeId()));
+ if (logger.isTraceEnabled()) {
+ logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId());
+ }
+ }
+ }
+
+ /**
+ * Allocates all given shards on the minimal eligable node for the shards index
+ * with respect to the weight function. All given shards must be unassigned.
+ */
+ private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned, List<MutableShardRouting> ignoredUnassigned) {
+ assert !nodes.isEmpty();
+ if (logger.isTraceEnabled()) {
+ logger.trace("Start allocating unassigned shards");
+ }
+ if (unassigned.isEmpty()) {
+ return false;
+ }
+ boolean changed = false;
+
+ /*
+ * TODO: We could be smarter here and group the shards by index and then
+ * use the sorter to save some iterations.
+ */
+ final AllocationDeciders deciders = allocation.deciders();
+ final Comparator<MutableShardRouting> comparator = new Comparator<MutableShardRouting>() {
+ @Override
+ public int compare(MutableShardRouting o1,
+ MutableShardRouting o2) {
+ if (o1.primary() ^ o2.primary()) {
+ return o1.primary() ? -1 : o2.primary() ? 1 : 0;
+ }
+ final int indexCmp;
+ if ((indexCmp = o1.index().compareTo(o2.index())) == 0) {
+ return o1.getId() - o2.getId();
+ }
+ return indexCmp;
+ }
+ };
+ /*
+ * we use 2 arrays and move replicas to the second array once we allocated an identical
+ * replica in the current iteration to make sure all indices get allocated in the same manner.
+ * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with 2 replica and 1 shard would look like:
+ * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)]
+ * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with
+ * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ingoreUnassigned.
+ */
+ MutableShardRouting[] primary = unassigned.drain();
+ MutableShardRouting[] secondary = new MutableShardRouting[primary.length];
+ int secondaryLength = 0;
+ int primaryLength = primary.length;
+ ArrayUtil.timSort(primary, comparator);
+ final Set<ModelNode> throttledNodes = new IdentityHashSet<ModelNode>();
+ do {
+ for (int i = 0; i < primaryLength; i++) {
+ MutableShardRouting shard = primary[i];
+ if (!shard.primary()) {
+ boolean drop = deciders.canAllocate(shard, allocation).type() == Type.NO;
+ if (drop) {
+ ignoredUnassigned.add(shard);
+ while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
+ ignoredUnassigned.add(primary[++i]);
+ }
+ continue;
+ } else {
+ while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
+ secondary[secondaryLength++] = primary[++i];
+ }
+ }
+ }
+ assert !shard.assignedToNode() : shard;
+ /* find an node with minimal weight we can allocate on*/
+ float minWeight = Float.POSITIVE_INFINITY;
+ ModelNode minNode = null;
+ Decision decision = null;
+ if (throttledNodes.size() < nodes.size()) {
+ /* Don't iterate over an identity hashset here the
+ * iteration order is different for each run and makes testing hard */
+ for (ModelNode node : nodes.values()) {
+ if (throttledNodes.contains(node)) {
+ continue;
+ }
+ /*
+ * The shard we add is removed below to simulate the
+ * addition for weight calculation we use Decision.ALWAYS to
+ * not violate the not null condition.
+ */
+ if (!node.containsShard(shard)) {
+ node.addShard(shard, Decision.ALWAYS);
+ float currentWeight = weight.weight(Operation.ALLOCATE, this, node, shard.index());
+ /*
+ * Remove the shard from the node again this is only a
+ * simulation
+ */
+ Decision removed = node.removeShard(shard);
+ assert removed != null;
+ /*
+ * Unless the operation is not providing any gains we
+ * don't check deciders
+ */
+ if (currentWeight <= minWeight) {
+ Decision currentDecision = deciders.canAllocate(shard, routingNodes.node(node.getNodeId()), allocation);
+ NOUPDATE:
+ if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) {
+ if (currentWeight == minWeight) {
+ /* we have an equal weight tie breaking:
+ * 1. if one decision is YES prefer it
+ * 2. prefer the node that holds the primary for this index with the next id in the ring ie.
+ * for the 3 shards 2 replica case we try to build up:
+ * 1 2 0
+ * 2 0 1
+ * 0 1 2
+ * such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater
+ * than the id of the shard we need to assign. This works find when new indices are created since
+ * primaries are added first and we only add one shard set a time in this algorithm.
+ */
+ if (currentDecision.type() == decision.type()) {
+ final int repId = shard.id();
+ final int nodeHigh = node.highestPrimary(shard.index());
+ final int minNodeHigh = minNode.highestPrimary(shard.index());
+ if ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) && (nodeHigh < minNodeHigh))
+ || (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)) {
+ minNode = node;
+ minWeight = currentWeight;
+ decision = currentDecision;
+ } else {
+ break NOUPDATE;
+ }
+ } else if (currentDecision.type() != Type.YES) {
+ break NOUPDATE;
+ }
+ }
+ minNode = node;
+ minWeight = currentWeight;
+ decision = currentDecision;
+ }
+ }
+ }
+ }
+ }
+ assert decision != null && minNode != null || decision == null && minNode == null;
+ if (minNode != null) {
+ minNode.addShard(shard, decision);
+ if (decision.type() == Type.YES) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
+ }
+ routingNodes.assign(shard, routingNodes.node(minNode.getNodeId()).nodeId());
+ changed = true;
+ continue; // don't add to ignoreUnassigned
+ } else {
+ final RoutingNode node = routingNodes.node(minNode.getNodeId());
+ if (deciders.canAllocate(node, allocation).type() != Type.YES) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Can not allocate on node [{}] remove from round decisin [{}]", node, decision.type());
+ }
+ throttledNodes.add(minNode);
+ }
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("No eligable node found to assign shard [{}] decision [{}]", shard, decision.type());
+ }
+ } else if (logger.isTraceEnabled()) {
+ logger.trace("No Node found to assign shard [{}]", shard);
+ }
+ ignoredUnassigned.add(shard);
+ if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas
+ while(secondaryLength > 0 && comparator.compare(shard, secondary[secondaryLength-1]) == 0) {
+ ignoredUnassigned.add(secondary[--secondaryLength]);
+ }
+ }
+ }
+ primaryLength = secondaryLength;
+ MutableShardRouting[] tmp = primary;
+ primary = secondary;
+ secondary = tmp;
+ secondaryLength = 0;
+ } while (primaryLength > 0);
+ // clear everything we have either added it or moved to ingoreUnassigned
+ return changed;
+ }
+
+ /**
+ * Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the
+ * balance model. Iff this method returns a <code>true</code> the relocation has already been executed on the
+ * simulation model as well as on the cluster.
+ */
+ private boolean tryRelocateShard(Operation operation, ModelNode minNode, ModelNode maxNode, String idx, float minCost) {
+ final ModelIndex index = maxNode.getIndex(idx);
+ Decision decision = null;
+ if (index != null) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Try relocating shard for index index [{}] from node [{}] to node [{}]", idx, maxNode.getNodeId(),
+ minNode.getNodeId());
+ }
+ final RoutingNode node = routingNodes.node(minNode.getNodeId());
+ MutableShardRouting candidate = null;
+ final AllocationDeciders deciders = allocation.deciders();
+ /* make a copy since we modify this list in the loop */
+ final ArrayList<MutableShardRouting> shards = new ArrayList<MutableShardRouting>(index.getAllShards());
+ for (MutableShardRouting shard : shards) {
+ if (shard.started()) {
+ // skip initializing, unassigned and relocating shards we can't relocate them anyway
+ Decision allocationDecision = deciders.canAllocate(shard, node, allocation);
+ Decision rebalanceDecision = deciders.canRebalance(shard, allocation);
+ if (((allocationDecision.type() == Type.YES) || (allocationDecision.type() == Type.THROTTLE))
+ && ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) {
+ Decision srcDecision;
+ if ((srcDecision = maxNode.removeShard(shard)) != null) {
+ minNode.addShard(shard, srcDecision);
+ final float delta = weight.weight(operation, this, minNode, idx) - weight.weight(operation, this, maxNode, idx);
+ if (delta < minCost ||
+ (candidate != null && delta == minCost && candidate.id() > shard.id())) {
+ /* this last line is a tie-breaker to make the shard allocation alg deterministic
+ * otherwise we rely on the iteration order of the index.getAllShards() which is a set.*/
+ minCost = delta;
+ candidate = shard;
+ decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
+ }
+ minNode.removeShard(shard);
+ maxNode.addShard(shard, srcDecision);
+ }
+ }
+ }
+ }
+
+ if (candidate != null) {
+
+ /* allocate on the model even if not throttled */
+ maxNode.removeShard(candidate);
+ minNode.addShard(candidate, decision);
+ if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */
+ if (logger.isTraceEnabled()) {
+ logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(),
+ minNode.getNodeId());
+ }
+ /* now allocate on the cluster - if we are started we need to relocate the shard */
+ if (candidate.started()) {
+ RoutingNode lowRoutingNode = routingNodes.node(minNode.getNodeId());
+ routingNodes.assign(new MutableShardRouting(candidate.index(), candidate.id(), lowRoutingNode.nodeId(), candidate
+ .currentNodeId(), candidate.restoreSource(), candidate.primary(), INITIALIZING, candidate.version() + 1), lowRoutingNode.nodeId());
+ routingNodes.relocate(candidate, lowRoutingNode.nodeId());
+
+ } else {
+ assert candidate.unassigned();
+ routingNodes.assign(candidate, routingNodes.node(minNode.getNodeId()).nodeId());
+ }
+ return true;
+
+ }
+ }
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("Couldn't find shard to relocate from node [{}] to node [{}] allocation decision [{}]", maxNode.getNodeId(),
+ minNode.getNodeId(), decision == null ? "NO" : decision.type().name());
+ }
+ return false;
+ }
+
+ }
+
+ static class ModelNode implements Iterable<ModelIndex> {
+ private final String id;
+ private final Map<String, ModelIndex> indices = new HashMap<String, ModelIndex>();
+ /* cached stats - invalidated on add/remove and lazily calculated */
+ private int numShards = -1;
+ private int numPrimaries = -1;
+
+ public ModelNode(String id) {
+ this.id = id;
+ }
+
+ public ModelIndex getIndex(String indexId) {
+ return indices.get(indexId);
+ }
+
+ public String getNodeId() {
+ return id;
+ }
+
+ public int numShards() {
+ if (numShards == -1) {
+ int sum = 0;
+ for (ModelIndex index : indices.values()) {
+ sum += index.numShards();
+ }
+ numShards = sum;
+ }
+ return numShards;
+ }
+
+ public int numShards(String idx) {
+ ModelIndex index = indices.get(idx);
+ return index == null ? 0 : index.numShards();
+ }
+
+ public int numPrimaries(String idx) {
+ ModelIndex index = indices.get(idx);
+ return index == null ? 0 : index.numPrimaries();
+ }
+
+ public int numPrimaries() {
+ if (numPrimaries == -1) {
+ int sum = 0;
+ for (ModelIndex index : indices.values()) {
+ sum += index.numPrimaries();
+ }
+ numPrimaries = sum;
+ }
+ return numPrimaries;
+ }
+
+ public Collection<MutableShardRouting> shards() {
+ Collection<MutableShardRouting> result = new ArrayList<MutableShardRouting>();
+ for (ModelIndex index : indices.values()) {
+ result.addAll(index.getAllShards());
+ }
+ return result;
+ }
+
+ public int highestPrimary(String index) {
+ ModelIndex idx = indices.get(index);
+ if (idx != null) {
+ return idx.highestPrimary();
+ }
+ return -1;
+ }
+
+ public void addShard(MutableShardRouting shard, Decision decision) {
+ numPrimaries = numShards = -1;
+ ModelIndex index = indices.get(shard.index());
+ if (index == null) {
+ index = new ModelIndex(shard.index());
+ indices.put(index.getIndexId(), index);
+ }
+ index.addShard(shard, decision);
+ }
+
+ public Decision removeShard(MutableShardRouting shard) {
+ numPrimaries = numShards = -1;
+ ModelIndex index = indices.get(shard.index());
+ Decision removed = null;
+ if (index != null) {
+ removed = index.removeShard(shard);
+ if (removed != null && index.numShards() == 0) {
+ indices.remove(shard.index());
+ }
+ }
+ return removed;
+ }
+
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Node(").append(id).append(")");
+ return sb.toString();
+ }
+
+ @Override
+ public Iterator<ModelIndex> iterator() {
+ return indices.values().iterator();
+ }
+
+ public boolean containsShard(MutableShardRouting shard) {
+ ModelIndex index = getIndex(shard.getIndex());
+ return index == null ? false : index.containsShard(shard);
+ }
+
+ }
+
+ static final class ModelIndex {
+ private final String id;
+ private final Map<MutableShardRouting, Decision> shards = new HashMap<MutableShardRouting, Decision>();
+ private int numPrimaries = -1;
+ private int highestPrimary = -1;
+
+ public ModelIndex(String id) {
+ this.id = id;
+ }
+
+ public int highestPrimary() {
+ if (highestPrimary == -1) {
+ int maxId = -1;
+ for (MutableShardRouting shard : shards.keySet()) {
+ if (shard.primary()) {
+ maxId = Math.max(maxId, shard.id());
+ }
+ }
+ return highestPrimary = maxId;
+ }
+ return highestPrimary;
+ }
+
+ public String getIndexId() {
+ return id;
+ }
+
+ public Decision getDecicion(MutableShardRouting shard) {
+ return shards.get(shard);
+ }
+
+ public int numShards() {
+ return shards.size();
+ }
+
+ public Collection<MutableShardRouting> getAllShards() {
+ return shards.keySet();
+ }
+
+ public int numPrimaries() {
+ if (numPrimaries == -1) {
+ int num = 0;
+ for (MutableShardRouting shard : shards.keySet()) {
+ if (shard.primary()) {
+ num++;
+ }
+ }
+ return numPrimaries = num;
+ }
+ return numPrimaries;
+ }
+
+ public Decision removeShard(MutableShardRouting shard) {
+ highestPrimary = numPrimaries = -1;
+ return shards.remove(shard);
+ }
+
+ public void addShard(MutableShardRouting shard, Decision decision) {
+ highestPrimary = numPrimaries = -1;
+ assert decision != null;
+ assert !shards.containsKey(shard) : "Shard already allocated on current node: " + shards.get(shard) + " " + shard;
+ shards.put(shard, decision);
+ }
+
+ public boolean containsShard(MutableShardRouting shard) {
+ return shards.containsKey(shard);
+ }
+ }
+
+ static final class NodeSorter extends IntroSorter {
+
+ final ModelNode[] modelNodes;
+ /* the nodes weights with respect to the current weight function / index */
+ final float[] weights;
+ private final WeightFunction function;
+ private String index;
+ private final Balancer balancer;
+ private float pivotWeight;
+
+ public NodeSorter(ModelNode[] modelNodes, WeightFunction function, Balancer balancer) {
+ this.function = function;
+ this.balancer = balancer;
+ this.modelNodes = modelNodes;
+ weights = new float[modelNodes.length];
+ }
+
+ /**
+ * Resets the sorter, recalculates the weights per node and sorts the
+ * nodes by weight, with minimal weight first.
+ */
+ public void reset(Operation operation, String index) {
+ this.index = index;
+ for (int i = 0; i < weights.length; i++) {
+ weights[i] = weight(operation, modelNodes[i]);
+ }
+ sort(0, modelNodes.length);
+ }
+
+ public float weight(Operation operation, ModelNode node) {
+ return function.weight(operation, balancer, node, index);
+ }
+
+ @Override
+ protected void swap(int i, int j) {
+ final ModelNode tmpNode = modelNodes[i];
+ modelNodes[i] = modelNodes[j];
+ modelNodes[j] = tmpNode;
+ final float tmpWeight = weights[i];
+ weights[i] = weights[j];
+ weights[j] = tmpWeight;
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ return Float.compare(weights[i], weights[j]);
+ }
+
+ @Override
+ protected void setPivot(int i) {
+ pivotWeight = weights[i];
+ }
+
+ @Override
+ protected int comparePivot(int j) {
+ return Float.compare(pivotWeight, weights[j]);
+ }
+
+ public float delta() {
+ return weights[weights.length - 1] - weights[0];
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java
new file mode 100644
index 0000000..48f39f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.allocator;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+
+/**
+ * A {@link ShardsAllocator} that tries to balance shards across nodes in the
+ * cluster such that each node holds approximatly the same number of shards. The
+ * allocations algorithm operates on a cluster ie. is index-agnostic. While the
+ * number of shards per node might be balanced across the cluster a single node
+ * can hold mulitple shards from a single index such that the shard of an index
+ * are not necessarily balanced across nodes. Yet, due to high-level
+ * {@link AllocationDecider decisions} multiple instances of the same shard
+ * won't be allocated on the same node.
+ * <p>
+ * During {@link #rebalance(RoutingAllocation) re-balancing} the allocator takes
+ * shards from the <tt>most busy</tt> nodes and tries to relocate the shards to
+ * the least busy node until the number of shards per node are equal for all
+ * nodes in the cluster or until no shards can be relocated anymore.
+ * </p>
+ */
+public class EvenShardsCountAllocator extends AbstractComponent implements ShardsAllocator {
+
+ @Inject
+ public EvenShardsCountAllocator(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ }
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ boolean changed = false;
+ RoutingNodes routingNodes = allocation.routingNodes();
+ /*
+ * 1. order nodes by the number of shards allocated on them least one first (this takes relocation into account)
+ * ie. if a shard is relocating the target nodes shard count is incremented.
+ * 2. iterate over the unassigned shards
+ * 2a. find the least busy node in the cluster that allows allocation for the current unassigned shard
+ * 2b. if a node is found add the shard to the node and remove it from the unassigned shards
+ * 3. iterate over the remaining unassigned shards and try to allocate them on next possible node
+ */
+ // order nodes by number of shards (asc)
+ RoutingNode[] nodes = sortedNodesLeastToHigh(allocation);
+
+ Iterator<MutableShardRouting> unassignedIterator = routingNodes.unassigned().iterator();
+ int lastNode = 0;
+
+ while (unassignedIterator.hasNext()) {
+ MutableShardRouting shard = unassignedIterator.next();
+ // do the allocation, finding the least "busy" node
+ for (int i = 0; i < nodes.length; i++) {
+ RoutingNode node = nodes[lastNode];
+ lastNode++;
+ if (lastNode == nodes.length) {
+ lastNode = 0;
+ }
+
+ Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
+ if (decision.type() == Decision.Type.YES) {
+ int numberOfShardsToAllocate = routingNodes.requiredAverageNumberOfShardsPerNode() - node.size();
+ if (numberOfShardsToAllocate <= 0) {
+ continue;
+ }
+
+ changed = true;
+ allocation.routingNodes().assign(shard, node.nodeId());
+ unassignedIterator.remove();
+ break;
+ }
+ }
+ }
+
+ // allocate all the unassigned shards above the average per node.
+ for (Iterator<MutableShardRouting> it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
+ MutableShardRouting shard = it.next();
+ // go over the nodes and try and allocate the remaining ones
+ for (RoutingNode routingNode : sortedNodesLeastToHigh(allocation)) {
+ Decision decision = allocation.deciders().canAllocate(shard, routingNode, allocation);
+ if (decision.type() == Decision.Type.YES) {
+ changed = true;
+ allocation.routingNodes().assign(shard, routingNode.nodeId());
+ it.remove();
+ break;
+ }
+ }
+ }
+ return changed;
+ }
+
+ @Override
+ public boolean rebalance(RoutingAllocation allocation) {
+ // take shards form busy nodes and move them to less busy nodes
+ boolean changed = false;
+ RoutingNode[] sortedNodesLeastToHigh = sortedNodesLeastToHigh(allocation);
+ if (sortedNodesLeastToHigh.length == 0) {
+ return false;
+ }
+ int lowIndex = 0;
+ int highIndex = sortedNodesLeastToHigh.length - 1;
+ boolean relocationPerformed;
+ do {
+ relocationPerformed = false;
+ while (lowIndex != highIndex) {
+ RoutingNode lowRoutingNode = sortedNodesLeastToHigh[lowIndex];
+ RoutingNode highRoutingNode = sortedNodesLeastToHigh[highIndex];
+ int averageNumOfShards = allocation.routingNodes().requiredAverageNumberOfShardsPerNode();
+
+ // only active shards can be removed so must count only active ones.
+ if (highRoutingNode.numberOfOwningShards() <= averageNumOfShards) {
+ highIndex--;
+ continue;
+ }
+
+ if (lowRoutingNode.size() >= averageNumOfShards) {
+ lowIndex++;
+ continue;
+ }
+
+ // Take a started shard from a "busy" node and move it to less busy node and go on
+ boolean relocated = false;
+ List<MutableShardRouting> startedShards = highRoutingNode.shardsWithState(STARTED);
+ for (MutableShardRouting startedShard : startedShards) {
+ Decision rebalanceDecision = allocation.deciders().canRebalance(startedShard, allocation);
+ if (rebalanceDecision.type() == Decision.Type.NO) {
+ continue;
+ }
+
+ Decision allocateDecision = allocation.deciders().canAllocate(startedShard, lowRoutingNode, allocation);
+ if (allocateDecision.type() == Decision.Type.YES) {
+ changed = true;
+ allocation.routingNodes().assign(new MutableShardRouting(startedShard.index(), startedShard.id(),
+ lowRoutingNode.nodeId(), startedShard.currentNodeId(), startedShard.restoreSource(),
+ startedShard.primary(), INITIALIZING, startedShard.version() + 1), lowRoutingNode.nodeId());
+
+ allocation.routingNodes().relocate(startedShard, lowRoutingNode.nodeId());
+ relocated = true;
+ relocationPerformed = true;
+ break;
+ }
+ }
+
+ if (!relocated) {
+ highIndex--;
+ }
+ }
+ } while (relocationPerformed);
+ return changed;
+ }
+
+ @Override
+ public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ if (!shardRouting.started()) {
+ return false;
+ }
+ boolean changed = false;
+ RoutingNode[] sortedNodesLeastToHigh = sortedNodesLeastToHigh(allocation);
+ if (sortedNodesLeastToHigh.length == 0) {
+ return false;
+ }
+
+ for (RoutingNode nodeToCheck : sortedNodesLeastToHigh) {
+ // check if its the node we are moving from, no sense to check on it
+ if (nodeToCheck.nodeId().equals(node.nodeId())) {
+ continue;
+ }
+ Decision decision = allocation.deciders().canAllocate(shardRouting, nodeToCheck, allocation);
+ if (decision.type() == Decision.Type.YES) {
+ allocation.routingNodes().assign(new MutableShardRouting(shardRouting.index(), shardRouting.id(),
+ nodeToCheck.nodeId(), shardRouting.currentNodeId(), shardRouting.restoreSource(),
+ shardRouting.primary(), INITIALIZING, shardRouting.version() + 1), nodeToCheck.nodeId());
+
+ allocation.routingNodes().relocate(shardRouting, nodeToCheck.nodeId());
+ changed = true;
+ break;
+ }
+ }
+
+ return changed;
+ }
+
+ private RoutingNode[] sortedNodesLeastToHigh(RoutingAllocation allocation) {
+ // create count per node id, taking into account relocations
+ final ObjectIntOpenHashMap<String> nodeCounts = new ObjectIntOpenHashMap<String>();
+ for (RoutingNode node : allocation.routingNodes()) {
+ for (int i = 0; i < node.size(); i++) {
+ ShardRouting shardRouting = node.get(i);
+ String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId();
+ nodeCounts.addTo(nodeId, 1);
+ }
+ }
+ RoutingNode[] nodes = allocation.routingNodes().toArray();
+ Arrays.sort(nodes, new Comparator<RoutingNode>() {
+ @Override
+ public int compare(RoutingNode o1, RoutingNode o2) {
+ return nodeCounts.get(o1.nodeId()) - nodeCounts.get(o2.nodeId());
+ }
+ });
+ return nodes;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/GatewayAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/GatewayAllocator.java
new file mode 100644
index 0000000..81b5192
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/GatewayAllocator.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.allocator;
+
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+
+/**
+ * The gateway allocator allows for a pluggable control of the gateway to allocate unassigned shards.
+ */
+public interface GatewayAllocator {
+
+ /**
+ * Apply all shards
+ * @param allocation
+ */
+ void applyStartedShards(StartedRerouteAllocation allocation);
+
+ void applyFailedShards(FailedRerouteAllocation allocation);
+
+ boolean allocateUnassigned(RoutingAllocation allocation);
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java
new file mode 100644
index 0000000..effcb36
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.allocator;
+
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+
+/**
+ * <p>
+ * A {@link ShardsAllocator} is the main entry point for shard allocation on nodes in the cluster.
+ * The allocator makes basic decision where a shard instance will be allocated, if already allocated instances
+ * need relocate to other nodes due to node failures or due to rebalancing decisions.
+ * </p>
+ */
+public interface ShardsAllocator {
+
+ /**
+ * Applies changes on started nodes based on the implemented algorithm. For example if a
+ * shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING}
+ * this allocator might apply some cleanups on the node that used to hold the shard.
+ * @param allocation all started {@link ShardRouting shards}
+ */
+ void applyStartedShards(StartedRerouteAllocation allocation);
+
+ /**
+ * Applies changes on failed nodes based on the implemented algorithm.
+ * @param allocation all failed {@link ShardRouting shards}
+ */
+ void applyFailedShards(FailedRerouteAllocation allocation);
+
+ /**
+ * Assign all unassigned shards to nodes
+ *
+ * @param allocation current node allocation
+ * @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
+ */
+ boolean allocateUnassigned(RoutingAllocation allocation);
+
+ /**
+ * Rebalancing number of shards on all nodes
+ *
+ * @param allocation current node allocation
+ * @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
+ */
+ boolean rebalance(RoutingAllocation allocation);
+
+ /**
+ * Moves a shard from the given node to other node.
+ *
+ * @param shardRouting the shard to move
+ * @param node A node containing the shard
+ * @param allocation current node allocation
+ * @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
+ */
+ boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation);
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java
new file mode 100644
index 0000000..fe11197
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.allocator;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.none.NoneGatewayAllocator;
+
+/**
+ */
+public class ShardsAllocatorModule extends AbstractModule {
+
+ public static final String EVEN_SHARD_COUNT_ALLOCATOR_KEY = "even_shard";
+
+ public static final String BALANCED_ALLOCATOR_KEY = "balanced"; // default
+
+ public static final String TYPE_KEY = "cluster.routing.allocation.type";
+
+ private Settings settings;
+
+ private Class<? extends ShardsAllocator> shardsAllocator;
+
+ private Class<? extends GatewayAllocator> gatewayAllocator = NoneGatewayAllocator.class;
+
+ public ShardsAllocatorModule(Settings settings) {
+ this.settings = settings;
+ shardsAllocator = loadShardsAllocator(settings);
+ }
+
+ public void setGatewayAllocator(Class<? extends GatewayAllocator> gatewayAllocator) {
+ this.gatewayAllocator = gatewayAllocator;
+ }
+
+ public void setShardsAllocator(Class<? extends ShardsAllocator> shardsAllocator) {
+ this.shardsAllocator = shardsAllocator;
+ }
+
+ @Override
+ protected void configure() {
+ if (shardsAllocator == null) {
+ shardsAllocator = loadShardsAllocator(settings);
+ }
+ bind(GatewayAllocator.class).to(gatewayAllocator).asEagerSingleton();
+ bind(ShardsAllocator.class).to(shardsAllocator).asEagerSingleton();
+ }
+
+ private Class<? extends ShardsAllocator> loadShardsAllocator(Settings settings) {
+ final Class<? extends ShardsAllocator> shardsAllocator;
+ final String type = settings.get(TYPE_KEY, BALANCED_ALLOCATOR_KEY);
+ if (BALANCED_ALLOCATOR_KEY.equals(type)) {
+ shardsAllocator = BalancedShardsAllocator.class;
+ } else if (EVEN_SHARD_COUNT_ALLOCATOR_KEY.equals(type)) {
+ shardsAllocator = EvenShardsCountAllocator.class;
+ } else {
+ shardsAllocator = settings.getAsClass(TYPE_KEY, BalancedShardsAllocator.class,
+ "org.elasticsearch.cluster.routing.allocation.allocator.", "Allocator");
+ }
+ return shardsAllocator;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java
new file mode 100644
index 0000000..8cd4c08
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.allocator;
+
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.none.NoneGatewayAllocator;
+
+/**
+ * The {@link ShardsAllocator} class offers methods for allocating shard within a cluster.
+ * These methods include moving shards and re-balancing the cluster. It also allows management
+ * of shards by their state.
+ */
+public class ShardsAllocators extends AbstractComponent implements ShardsAllocator {
+
+ private final GatewayAllocator gatewayAllocator;
+ private final ShardsAllocator allocator;
+
+ public ShardsAllocators() {
+ this(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public ShardsAllocators(Settings settings) {
+ this(settings, new NoneGatewayAllocator(), new BalancedShardsAllocator(settings));
+ }
+
+ @Inject
+ public ShardsAllocators(Settings settings, GatewayAllocator gatewayAllocator, ShardsAllocator allocator) {
+ super(settings);
+ this.gatewayAllocator = gatewayAllocator;
+ this.allocator = allocator;
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+ gatewayAllocator.applyStartedShards(allocation);
+ allocator.applyStartedShards(allocation);
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ gatewayAllocator.applyFailedShards(allocation);
+ allocator.applyFailedShards(allocation);
+ }
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ boolean changed = false;
+ changed |= gatewayAllocator.allocateUnassigned(allocation);
+ changed |= allocator.allocateUnassigned(allocation);
+ return changed;
+ }
+
+ @Override
+ public boolean rebalance(RoutingAllocation allocation) {
+ return allocator.rebalance(allocation);
+ }
+
+ @Override
+ public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return allocator.move(shardRouting, node, allocation);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java
new file mode 100644
index 0000000..58301a0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.command;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * Allocates an unassigned shard to a specific node. Note, primary allocation will "force"
+ * allocation which might mean one will loose data if using local gateway..., use with care
+ * with the <tt>allowPrimary</tt> flag.
+ */
+public class AllocateAllocationCommand implements AllocationCommand {
+
+ public static final String NAME = "allocate";
+
+ public static class Factory implements AllocationCommand.Factory<AllocateAllocationCommand> {
+
+ @Override
+ public AllocateAllocationCommand readFrom(StreamInput in) throws IOException {
+ return new AllocateAllocationCommand(ShardId.readShardId(in), in.readString(), in.readBoolean());
+ }
+
+ @Override
+ public void writeTo(AllocateAllocationCommand command, StreamOutput out) throws IOException {
+ command.shardId().writeTo(out);
+ out.writeString(command.node());
+ out.writeBoolean(command.allowPrimary());
+ }
+
+ @Override
+ public AllocateAllocationCommand fromXContent(XContentParser parser) throws IOException {
+ String index = null;
+ int shardId = -1;
+ String nodeId = null;
+ boolean allowPrimary = false;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ index = parser.text();
+ } else if ("shard".equals(currentFieldName)) {
+ shardId = parser.intValue();
+ } else if ("node".equals(currentFieldName)) {
+ nodeId = parser.text();
+ } else if ("allow_primary".equals(currentFieldName) || "allowPrimary".equals(currentFieldName)) {
+ allowPrimary = parser.booleanValue();
+ } else {
+ throw new ElasticsearchParseException("[allocate] command does not support field [" + currentFieldName + "]");
+ }
+ } else {
+ throw new ElasticsearchParseException("[allocate] command does not support complex json tokens [" + token + "]");
+ }
+ }
+ if (index == null) {
+ throw new ElasticsearchParseException("[allocate] command missing the index parameter");
+ }
+ if (shardId == -1) {
+ throw new ElasticsearchParseException("[allocate] command missing the shard parameter");
+ }
+ if (nodeId == null) {
+ throw new ElasticsearchParseException("[allocate] command missing the node parameter");
+ }
+ return new AllocateAllocationCommand(new ShardId(index, shardId), nodeId, allowPrimary);
+ }
+
+ @Override
+ public void toXContent(AllocateAllocationCommand command, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ builder.field("index", command.shardId().index());
+ builder.field("shard", command.shardId().id());
+ builder.field("node", command.node());
+ builder.field("allow_primary", command.allowPrimary());
+ builder.endObject();
+ }
+ }
+
+ private final ShardId shardId;
+ private final String node;
+ private final boolean allowPrimary;
+
+ /**
+ * Create a new {@link AllocateAllocationCommand}
+ *
+ * @param shardId {@link ShardId} of the shrad to assign
+ * @param node Node to assign the shard to
+ * @param allowPrimary should the node be allow to allocate the shard as primary
+ */
+ public AllocateAllocationCommand(ShardId shardId, String node, boolean allowPrimary) {
+ this.shardId = shardId;
+ this.node = node;
+ this.allowPrimary = allowPrimary;
+ }
+
+ @Override
+ public String name() {
+ return NAME;
+ }
+
+ /**
+ * Get the shards id
+ *
+ * @return id of the shard
+ */
+ public ShardId shardId() {
+ return this.shardId;
+ }
+
+ /**
+ * Get the id of the Node
+ *
+ * @return id of the Node
+ */
+ public String node() {
+ return this.node;
+ }
+
+ /**
+ * Determine if primary allocation is allowed
+ *
+ * @return <code>true</code> if primary allocation is allowed. Otherwise <code>false</code>
+ */
+ public boolean allowPrimary() {
+ return this.allowPrimary;
+ }
+
+ @Override
+ public void execute(RoutingAllocation allocation) throws ElasticsearchException {
+ DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
+
+ MutableShardRouting shardRouting = null;
+ for (MutableShardRouting routing : allocation.routingNodes().unassigned()) {
+ if (routing.shardId().equals(shardId)) {
+ // prefer primaries first to allocate
+ if (shardRouting == null || routing.primary()) {
+ shardRouting = routing;
+ }
+ }
+ }
+
+ if (shardRouting == null) {
+ throw new ElasticsearchIllegalArgumentException("[allocate] failed to find " + shardId + " on the list of unassigned shards");
+ }
+
+ if (shardRouting.primary() && !allowPrimary) {
+ throw new ElasticsearchIllegalArgumentException("[allocate] trying to allocate a primary shard " + shardId + "], which is disabled");
+ }
+
+ RoutingNode routingNode = allocation.routingNodes().node(discoNode.id());
+ if (routingNode == null) {
+ if (!discoNode.dataNode()) {
+ throw new ElasticsearchIllegalArgumentException("Allocation can only be done on data nodes, not [" + node + "]");
+ } else {
+ throw new ElasticsearchIllegalStateException("Could not find [" + node + "] among the routing nodes");
+ }
+ }
+
+ Decision decision = allocation.deciders().canAllocate(shardRouting, routingNode, allocation);
+ if (decision.type() == Decision.Type.NO) {
+ throw new ElasticsearchIllegalArgumentException("[allocate] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision);
+ }
+ // go over and remove it from the unassigned
+ for (Iterator<MutableShardRouting> it = allocation.routingNodes().unassigned().iterator(); it.hasNext(); ) {
+ if (it.next() != shardRouting) {
+ continue;
+ }
+ it.remove();
+ allocation.routingNodes().assign(shardRouting, routingNode.nodeId());
+ if (shardRouting.primary()) {
+ // we need to clear the post allocation flag, since its an explicit allocation of the primary shard
+ // and we want to force allocate it (and create a new index for it)
+ allocation.routingNodes().addClearPostAllocationFlag(shardRouting.shardId());
+ }
+ break;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java
new file mode 100644
index 0000000..5145152
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.command;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * This interface defines the basic methods of commands for allocation
+ */
+public interface AllocationCommand {
+
+ /**
+ * Factory to create {@link AllocationCommand}s
+ * @param <T> Type of {@link AllocationCommand}s created by this {@link Factory}
+ */
+ interface Factory<T extends AllocationCommand> {
+
+ /**
+ * Reads an {@link AllocationCommand} of type <code>T</code> from a {@link StreamInput}
+ * @param in {@link StreamInput} to read the {@link AllocationCommand} from
+ * @return {@link AllocationCommand} read from the {@link StreamInput}
+ * @throws IOException if something happens during reading
+ */
+ T readFrom(StreamInput in) throws IOException;
+
+ /**
+ * Writes an {@link AllocationCommand} to a {@link StreamOutput}
+ * @param command {@link AllocationCommand} to write
+ * @param out {@link StreamOutput} to write the {@link AllocationCommand} to
+ * @throws IOException if something happens during writing the command
+ */
+ void writeTo(T command, StreamOutput out) throws IOException;
+
+ /**
+ * Reads an {@link AllocationCommand} of type <code>T</code> from a {@link XContentParser}
+ * @param parser {@link XContentParser} to use
+ * @return {@link AllocationCommand} read
+ * @throws IOException if something happens during reading
+ */
+ T fromXContent(XContentParser parser) throws IOException;
+
+ /**
+ * Writes an {@link AllocationCommand} using an {@link XContentBuilder}
+ * @param command {@link AllocationCommand} to write
+ * @param builder {@link XContentBuilder} to use
+ * @param params parameters to use when writing the command
+ * @throws IOException if something happens during writing the command
+ */
+ void toXContent(T command, XContentBuilder builder, ToXContent.Params params) throws IOException;
+ }
+
+ /**
+ * Get the name of the command
+ * @return name of the command
+ */
+ String name();
+
+ /**
+ * Executes the command on a {@link RoutingAllocation} setup
+ * @param allocation {@link RoutingAllocation} to modify
+ * @throws org.elasticsearch.ElasticsearchException if something happens during reconfiguration
+ */
+ void execute(RoutingAllocation allocation) throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java
new file mode 100644
index 0000000..97ede6e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.command;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A simple {@link AllocationCommand} composite managing several
+ * {@link AllocationCommand} implementations
+ */
+public class AllocationCommands {
+
+ private static Map<String, AllocationCommand.Factory> factories = new HashMap<String, AllocationCommand.Factory>();
+
+ /**
+ * Register a custom index meta data factory. Make sure to call it from a static block.
+ */
+ public static void registerFactory(String type, AllocationCommand.Factory factory) {
+ factories.put(type, factory);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Nullable
+ public static <T extends AllocationCommand> AllocationCommand.Factory<T> lookupFactory(String name) {
+ return factories.get(name);
+ }
+
+ @SuppressWarnings("unchecked")
+ public static <T extends AllocationCommand> AllocationCommand.Factory<T> lookupFactorySafe(String name) throws ElasticsearchIllegalArgumentException {
+ AllocationCommand.Factory<T> factory = factories.get(name);
+ if (factory == null) {
+ throw new ElasticsearchIllegalArgumentException("No allocation command factory registered for name [" + name + "]");
+ }
+ return factory;
+ }
+
+ static {
+ registerFactory(AllocateAllocationCommand.NAME, new AllocateAllocationCommand.Factory());
+ registerFactory(CancelAllocationCommand.NAME, new CancelAllocationCommand.Factory());
+ registerFactory(MoveAllocationCommand.NAME, new MoveAllocationCommand.Factory());
+ }
+
+ private final List<AllocationCommand> commands = Lists.newArrayList();
+
+ /**
+ * Creates a new set of {@link AllocationCommands}
+ *
+ * @param commands {@link AllocationCommand}s that are wrapped by this instance
+ */
+ public AllocationCommands(AllocationCommand... commands) {
+ if (commands != null) {
+ this.commands.addAll(Arrays.asList(commands));
+ }
+ }
+
+ /**
+ * Adds a set of commands to this collection
+ * @param commands Array of commands to add to this instance
+ * @return {@link AllocationCommands} with the given commands added
+ */
+ public AllocationCommands add(AllocationCommand... commands) {
+ if (commands != null) {
+ this.commands.addAll(Arrays.asList(commands));
+ }
+ return this;
+ }
+
+ /**
+ * Get the commands wrapped by this instance
+ * @return {@link List} of commands
+ */
+ public List<AllocationCommand> commands() {
+ return this.commands;
+ }
+
+ /**
+ * Executes all wrapped commands on a given {@link RoutingAllocation}
+ * @param allocation {@link RoutingAllocation} to apply this command to
+ * @throws org.elasticsearch.ElasticsearchException if something happens during execution
+ */
+ public void execute(RoutingAllocation allocation) throws ElasticsearchException {
+ for (AllocationCommand command : commands) {
+ command.execute(allocation);
+ }
+ }
+
+ /**
+ * Reads a {@link AllocationCommands} from a {@link StreamInput}
+ * @param in {@link StreamInput} to read from
+ * @return {@link AllocationCommands} read
+ *
+ * @throws IOException if something happens during read
+ */
+ public static AllocationCommands readFrom(StreamInput in) throws IOException {
+ AllocationCommands commands = new AllocationCommands();
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ String name = in.readString();
+ commands.add(lookupFactorySafe(name).readFrom(in));
+ }
+ return commands;
+ }
+
+ /**
+ * Writes {@link AllocationCommands} to a {@link StreamOutput}
+ *
+ * @param commands Commands to write
+ * @param out {@link StreamOutput} to write the commands to
+ * @throws IOException if something happens during write
+ */
+ public static void writeTo(AllocationCommands commands, StreamOutput out) throws IOException {
+ out.writeVInt(commands.commands.size());
+ for (AllocationCommand command : commands.commands) {
+ out.writeString(command.name());
+ lookupFactorySafe(command.name()).writeTo(command, out);
+ }
+ }
+
+ /**
+ * Reads {@link AllocationCommands} from a {@link XContentParser}
+ * <pre>
+ * {
+ * "commands" : [
+ * {"allocate" : {"index" : "test", "shard" : 0, "node" : "test"}}
+ * ]
+ * }
+ * </pre>
+ * @param parser {@link XContentParser} to read the commands from
+ * @return {@link AllocationCommands} read
+ * @throws IOException if something bad happens while reading the stream
+ */
+ public static AllocationCommands fromXContent(XContentParser parser) throws IOException {
+ AllocationCommands commands = new AllocationCommands();
+
+ XContentParser.Token token = parser.currentToken();
+ if (token == null) {
+ throw new ElasticsearchParseException("No commands");
+ }
+ if (token == XContentParser.Token.FIELD_NAME) {
+ if (!parser.currentName().equals("commands")) {
+ throw new ElasticsearchParseException("expected field name to be named `commands`, got " + parser.currentName());
+ }
+ if (!parser.currentName().equals("commands")) {
+ throw new ElasticsearchParseException("expected field name to be named `commands`, got " + parser.currentName());
+ }
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_ARRAY) {
+ throw new ElasticsearchParseException("commands should follow with an array element");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ // ok...
+ } else {
+ throw new ElasticsearchParseException("expected either field name commands, or start array, got " + token);
+ }
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.START_OBJECT) {
+ // move to the command name
+ token = parser.nextToken();
+ String commandName = parser.currentName();
+ token = parser.nextToken();
+ commands.add(AllocationCommands.lookupFactorySafe(commandName).fromXContent(parser));
+ // move to the end object one
+ if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ throw new ElasticsearchParseException("allocation command is malformed, done parsing a command, but didn't get END_OBJECT, got " + token);
+ }
+ } else {
+ throw new ElasticsearchParseException("allocation command is malformed, got token " + token);
+ }
+ }
+ return commands;
+ }
+
+ /**
+ * Writes {@link AllocationCommands} to a {@link XContentBuilder}
+ *
+ * @param commands {@link AllocationCommands} to write
+ * @param builder {@link XContentBuilder} to use
+ * @param params Parameters to use for building
+ * @throws IOException if something bad happens while building the content
+ */
+ public static void toXContent(AllocationCommands commands, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startArray("commands");
+ for (AllocationCommand command : commands.commands) {
+ builder.startObject();
+ builder.field(command.name());
+ AllocationCommands.lookupFactorySafe(command.name()).toXContent(command, builder, params);
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java
new file mode 100644
index 0000000..497e6ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.command;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
+
+/**
+ * A command that cancels relocation, or recovery of a given shard on a node.
+ */
+public class CancelAllocationCommand implements AllocationCommand {
+
+ public static final String NAME = "cancel";
+
+ /**
+ * Factory creating {@link CancelAllocationCommand}s
+ */
+ public static class Factory implements AllocationCommand.Factory<CancelAllocationCommand> {
+
+ @Override
+ public CancelAllocationCommand readFrom(StreamInput in) throws IOException {
+ return new CancelAllocationCommand(ShardId.readShardId(in), in.readString(), in.readBoolean());
+ }
+
+ @Override
+ public void writeTo(CancelAllocationCommand command, StreamOutput out) throws IOException {
+ command.shardId().writeTo(out);
+ out.writeString(command.node());
+ out.writeBoolean(command.allowPrimary());
+ }
+
+ @Override
+ public CancelAllocationCommand fromXContent(XContentParser parser) throws IOException {
+ String index = null;
+ int shardId = -1;
+ String nodeId = null;
+ boolean allowPrimary = false;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ index = parser.text();
+ } else if ("shard".equals(currentFieldName)) {
+ shardId = parser.intValue();
+ } else if ("node".equals(currentFieldName)) {
+ nodeId = parser.text();
+ } else if ("allow_primary".equals(currentFieldName) || "allowPrimary".equals(currentFieldName)) {
+ allowPrimary = parser.booleanValue();
+ } else {
+ throw new ElasticsearchParseException("[cancel] command does not support field [" + currentFieldName + "]");
+ }
+ } else {
+ throw new ElasticsearchParseException("[cancel] command does not support complex json tokens [" + token + "]");
+ }
+ }
+ if (index == null) {
+ throw new ElasticsearchParseException("[cancel] command missing the index parameter");
+ }
+ if (shardId == -1) {
+ throw new ElasticsearchParseException("[cancel] command missing the shard parameter");
+ }
+ if (nodeId == null) {
+ throw new ElasticsearchParseException("[cancel] command missing the node parameter");
+ }
+ return new CancelAllocationCommand(new ShardId(index, shardId), nodeId, allowPrimary);
+ }
+
+ @Override
+ public void toXContent(CancelAllocationCommand command, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ builder.field("index", command.shardId().index());
+ builder.field("shard", command.shardId().id());
+ builder.field("node", command.node());
+ builder.field("allow_primary", command.allowPrimary());
+ builder.endObject();
+ }
+ }
+
+
+ private final ShardId shardId;
+ private final String node;
+ private final boolean allowPrimary;
+
+ /**
+ * Creates a new {@link CancelAllocationCommand}
+ *
+ * @param shardId id of the shard which allocation should be canceled
+ * @param node id of the node that manages the shard which allocation should be canceled
+ * @param allowPrimary
+ */
+ public CancelAllocationCommand(ShardId shardId, String node, boolean allowPrimary) {
+ this.shardId = shardId;
+ this.node = node;
+ this.allowPrimary = allowPrimary;
+ }
+
+ @Override
+ public String name() {
+ return NAME;
+ }
+
+ /**
+ * Get the id of the shard which allocation should be canceled
+ * @return id of the shard which allocation should be canceled
+ */
+ public ShardId shardId() {
+ return this.shardId;
+ }
+
+ /**
+ * Get the id of the node that manages the shard which allocation should be canceled
+ * @return id of the node that manages the shard which allocation should be canceled
+ */
+ public String node() {
+ return this.node;
+ }
+
+ public boolean allowPrimary() {
+ return this.allowPrimary;
+ }
+
+ @Override
+ public void execute(RoutingAllocation allocation) throws ElasticsearchException {
+ DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
+ boolean found = false;
+ for (RoutingNodes.RoutingNodeIterator it = allocation.routingNodes().routingNodeIter(discoNode.id()); it.hasNext(); ) {
+ MutableShardRouting shardRouting = it.next();
+ if (!shardRouting.shardId().equals(shardId)) {
+ continue;
+ }
+ found = true;
+ if (shardRouting.relocatingNodeId() != null) {
+ if (shardRouting.initializing()) {
+ // the shard is initializing and recovering from another node, simply cancel the recovery
+ it.remove();
+ // and cancel the relocating state from the shard its being relocated from
+ RoutingNode relocatingFromNode = allocation.routingNodes().node(shardRouting.relocatingNodeId());
+ if (relocatingFromNode != null) {
+ for (MutableShardRouting fromShardRouting : relocatingFromNode) {
+ if (fromShardRouting.shardId().equals(shardRouting.shardId()) && fromShardRouting.state() == RELOCATING) {
+ allocation.routingNodes().cancelRelocation(fromShardRouting);
+ break;
+ }
+ }
+ }
+ } else if (shardRouting.relocating()) {
+
+ // the shard is relocating to another node, cancel the recovery on the other node, and deallocate this one
+ if (!allowPrimary && shardRouting.primary()) {
+ // can't cancel a primary shard being initialized
+ throw new ElasticsearchIllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + discoNode + ", shard is primary and initializing its state");
+ }
+ it.moveToUnassigned();
+ // now, go and find the shard that is initializing on the target node, and cancel it as well...
+ RoutingNodes.RoutingNodeIterator initializingNode = allocation.routingNodes().routingNodeIter(shardRouting.relocatingNodeId());
+ if (initializingNode != null) {
+ while (initializingNode.hasNext()) {
+ MutableShardRouting initializingShardRouting = initializingNode.next();
+ if (initializingShardRouting.shardId().equals(shardRouting.shardId()) && initializingShardRouting.state() == INITIALIZING) {
+ initializingNode.remove();
+ }
+ }
+ }
+ }
+ } else {
+ // the shard is not relocating, its either started, or initializing, just cancel it and move on...
+ if (!allowPrimary && shardRouting.primary()) {
+ // can't cancel a primary shard being initialized
+ throw new ElasticsearchIllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + discoNode + ", shard is primary and started");
+ }
+ it.remove();
+ allocation.routingNodes().unassigned().add(new MutableShardRouting(shardRouting.index(), shardRouting.id(),
+ null, shardRouting.primary(), ShardRoutingState.UNASSIGNED, shardRouting.version() + 1));
+ }
+ }
+ if (!found) {
+ throw new ElasticsearchIllegalArgumentException("[cancel_allocation] can't cancel " + shardId + ", failed to find it on node " + discoNode);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java
new file mode 100644
index 0000000..1fc21e4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.command;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+
+/**
+ * A command that moves a shard from a specific node to another node.<br />
+ * <b>Note:</b> The shard needs to be in the state
+ * {@link ShardRoutingState#STARTED} in order to be moved.
+ */
+public class MoveAllocationCommand implements AllocationCommand {
+
+ public static final String NAME = "move";
+
+ public static class Factory implements AllocationCommand.Factory<MoveAllocationCommand> {
+
+ @Override
+ public MoveAllocationCommand readFrom(StreamInput in) throws IOException {
+ return new MoveAllocationCommand(ShardId.readShardId(in), in.readString(), in.readString());
+ }
+
+ @Override
+ public void writeTo(MoveAllocationCommand command, StreamOutput out) throws IOException {
+ command.shardId().writeTo(out);
+ out.writeString(command.fromNode());
+ out.writeString(command.toNode());
+ }
+
+ @Override
+ public MoveAllocationCommand fromXContent(XContentParser parser) throws IOException {
+ String index = null;
+ int shardId = -1;
+ String fromNode = null;
+ String toNode = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ index = parser.text();
+ } else if ("shard".equals(currentFieldName)) {
+ shardId = parser.intValue();
+ } else if ("from_node".equals(currentFieldName) || "fromNode".equals(currentFieldName)) {
+ fromNode = parser.text();
+ } else if ("to_node".equals(currentFieldName) || "toNode".equals(currentFieldName)) {
+ toNode = parser.text();
+ } else {
+ throw new ElasticsearchParseException("[move] command does not support field [" + currentFieldName + "]");
+ }
+ } else {
+ throw new ElasticsearchParseException("[move] command does not support complex json tokens [" + token + "]");
+ }
+ }
+ if (index == null) {
+ throw new ElasticsearchParseException("[move] command missing the index parameter");
+ }
+ if (shardId == -1) {
+ throw new ElasticsearchParseException("[move] command missing the shard parameter");
+ }
+ if (fromNode == null) {
+ throw new ElasticsearchParseException("[move] command missing the from_node parameter");
+ }
+ if (toNode == null) {
+ throw new ElasticsearchParseException("[move] command missing the to_node parameter");
+ }
+ return new MoveAllocationCommand(new ShardId(index, shardId), fromNode, toNode);
+ }
+
+ @Override
+ public void toXContent(MoveAllocationCommand command, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ builder.field("index", command.shardId().index());
+ builder.field("shard", command.shardId().id());
+ builder.field("from_node", command.fromNode());
+ builder.field("to_node", command.toNode());
+ builder.endObject();
+ }
+ }
+
+ private final ShardId shardId;
+ private final String fromNode;
+ private final String toNode;
+
+ public MoveAllocationCommand(ShardId shardId, String fromNode, String toNode) {
+ this.shardId = shardId;
+ this.fromNode = fromNode;
+ this.toNode = toNode;
+ }
+
+ @Override
+ public String name() {
+ return NAME;
+ }
+
+ public ShardId shardId() {
+ return this.shardId;
+ }
+
+ public String fromNode() {
+ return this.fromNode;
+ }
+
+ public String toNode() {
+ return this.toNode;
+ }
+
+ @Override
+ public void execute(RoutingAllocation allocation) throws ElasticsearchException {
+ DiscoveryNode fromDiscoNode = allocation.nodes().resolveNode(fromNode);
+ DiscoveryNode toDiscoNode = allocation.nodes().resolveNode(toNode);
+
+ boolean found = false;
+ for (MutableShardRouting shardRouting : allocation.routingNodes().node(fromDiscoNode.id())) {
+ if (!shardRouting.shardId().equals(shardId)) {
+ continue;
+ }
+ found = true;
+
+ // TODO we can possibly support also relocating cases, where we cancel relocation and move...
+ if (!shardRouting.started()) {
+ throw new ElasticsearchIllegalArgumentException("[move_allocation] can't move " + shardId + ", shard is not started (state = " + shardRouting.state() + "]");
+ }
+
+ RoutingNode toRoutingNode = allocation.routingNodes().node(toDiscoNode.id());
+ Decision decision = allocation.deciders().canAllocate(shardRouting, toRoutingNode, allocation);
+ if (decision.type() == Decision.Type.NO) {
+ throw new ElasticsearchIllegalArgumentException("[move_allocation] can't move " + shardId + ", from " + fromDiscoNode + ", to " + toDiscoNode + ", since its not allowed, reason: " + decision);
+ }
+ if (decision.type() == Decision.Type.THROTTLE) {
+ // its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it...
+ }
+
+ allocation.routingNodes().assign(new MutableShardRouting(shardRouting.index(), shardRouting.id(),
+ toRoutingNode.nodeId(), shardRouting.currentNodeId(), shardRouting.restoreSource(),
+ shardRouting.primary(), ShardRoutingState.INITIALIZING, shardRouting.version() + 1), toRoutingNode.nodeId());
+
+ allocation.routingNodes().relocate(shardRouting, toRoutingNode.nodeId());
+ }
+
+ if (!found) {
+ throw new ElasticsearchIllegalArgumentException("[move_allocation] can't move " + shardId + ", failed to find it on node " + fromDiscoNode);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java
new file mode 100644
index 0000000..a99e1b8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * {@link AllocationDecider} is an abstract base class that allows to make
+ * dynamic cluster- or index-wide shard allocation decisions on a per-node
+ * basis.
+ */
+public abstract class AllocationDecider extends AbstractComponent {
+
+ /**
+ * Initializes a new {@link AllocationDecider}
+ * @param settings {@link Settings} used by this {@link AllocationDecider}
+ */
+ protected AllocationDecider(Settings settings) {
+ super(settings);
+ }
+
+ /**
+ * Returns a {@link Decision} whether the given shard routing can be
+ * re-balanced to the given allocation. The default is
+ * {@link Decision#ALWAYS}.
+ */
+ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
+ return Decision.ALWAYS;
+ }
+
+ /**
+ * Returns a {@link Decision} whether the given shard routing can be
+ * allocated on the given node. The default is {@link Decision#ALWAYS}.
+ */
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return Decision.ALWAYS;
+ }
+
+ /**
+ * Returns a {@link Decision} whether the given shard routing can be remain
+ * on the given node. The default is {@link Decision#ALWAYS}.
+ */
+ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return Decision.ALWAYS;
+ }
+
+ /**
+ * Returns a {@link Decision} whether the given shard routing can be allocated at all at this state of the
+ * {@link RoutingAllocation}. The default is {@link Decision#ALWAYS}.
+ */
+ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
+ return Decision.ALWAYS;
+ }
+
+ /**
+ * Returns a {@link Decision} whether the given node can allow any allocation at all at this state of the
+ * {@link RoutingAllocation}. The default is {@link Decision#ALWAYS}.
+ */
+ public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
+ return Decision.ALWAYS;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java
new file mode 100644
index 0000000..a74fdc7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Set;
+
+/**
+ * A composite {@link AllocationDecider} combining the "decision" of multiple
+ * {@link AllocationDecider} implementations into a single allocation decision.
+ */
+public class AllocationDeciders extends AllocationDecider {
+
+ private final AllocationDecider[] allocations;
+
+ public AllocationDeciders(Settings settings, AllocationDecider[] allocations) {
+ super(settings);
+ this.allocations = allocations;
+ }
+
+ @Inject
+ public AllocationDeciders(Settings settings, Set<AllocationDecider> allocations) {
+ this(settings, allocations.toArray(new AllocationDecider[allocations.size()]));
+ }
+
+ @Override
+ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
+ Decision.Multi ret = new Decision.Multi();
+ for (AllocationDecider allocationDecider : allocations) {
+ Decision decision = allocationDecider.canRebalance(shardRouting, allocation);
+ // short track if a NO is returned.
+ if (decision == Decision.NO) {
+ if (!allocation.debugDecision()) {
+ return decision;
+ } else {
+ ret.add(decision);
+ }
+ } else if (decision != Decision.ALWAYS) {
+ ret.add(decision);
+ }
+ }
+ return ret;
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ if (allocation.shouldIgnoreShardForNode(shardRouting.shardId(), node.nodeId())) {
+ return Decision.NO;
+ }
+ Decision.Multi ret = new Decision.Multi();
+ for (AllocationDecider allocationDecider : allocations) {
+ Decision decision = allocationDecider.canAllocate(shardRouting, node, allocation);
+ // short track if a NO is returned.
+ if (decision == Decision.NO) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName());
+ }
+ // short circuit only if debugging is not enabled
+ if (!allocation.debugDecision()) {
+ return decision;
+ } else {
+ ret.add(decision);
+ }
+ } else if (decision != Decision.ALWAYS) {
+ // the assumption is that a decider that returns the static instance Decision#ALWAYS
+ // does not really implements canAllocate
+ ret.add(decision);
+ }
+ }
+ return ret;
+ }
+
+ @Override
+ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ if (allocation.shouldIgnoreShardForNode(shardRouting.shardId(), node.nodeId())) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Shard [{}] should be ignored for node [{}]", shardRouting, node.nodeId());
+ }
+ return Decision.NO;
+ }
+ Decision.Multi ret = new Decision.Multi();
+ for (AllocationDecider allocationDecider : allocations) {
+ Decision decision = allocationDecider.canRemain(shardRouting, node, allocation);
+ // short track if a NO is returned.
+ if (decision == Decision.NO) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Shard [{}] can not remain on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName());
+ }
+ if (!allocation.debugDecision()) {
+ return decision;
+ } else {
+ ret.add(decision);
+ }
+ } else if (decision != Decision.ALWAYS) {
+ ret.add(decision);
+ }
+ }
+ return ret;
+ }
+
+ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
+ Decision.Multi ret = new Decision.Multi();
+ for (AllocationDecider allocationDecider : allocations) {
+ Decision decision = allocationDecider.canAllocate(shardRouting, allocation);
+ // short track if a NO is returned.
+ if (decision == Decision.NO) {
+ if (!allocation.debugDecision()) {
+ return decision;
+ } else {
+ ret.add(decision);
+ }
+ } else if (decision != Decision.ALWAYS) {
+ ret.add(decision);
+ }
+ }
+ return ret;
+ }
+
+ public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
+ Decision.Multi ret = new Decision.Multi();
+ for (AllocationDecider allocationDecider : allocations) {
+ Decision decision = allocationDecider.canAllocate(node, allocation);
+ // short track if a NO is returned.
+ if (decision == Decision.NO) {
+ if (!allocation.debugDecision()) {
+ return decision;
+ } else {
+ ret.add(decision);
+ }
+ } else if (decision != Decision.ALWAYS) {
+ ret.add(decision);
+ }
+ }
+ return ret;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersModule.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersModule.java
new file mode 100644
index 0000000..6f19e95
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersModule.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.List;
+
+/**
+ * This module configures several {@link AllocationDecider}s
+ * that make configuration specific decisions if shards can be allocated on certain nodes.
+ *
+ * @see Decision
+ * @see AllocationDecider
+ */
+public class AllocationDecidersModule extends AbstractModule {
+
+ private final Settings settings;
+
+ private List<Class<? extends AllocationDecider>> allocations = Lists.newArrayList();
+
+ public AllocationDecidersModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ public AllocationDecidersModule add(Class<? extends AllocationDecider> allocationDecider) {
+ this.allocations.add(allocationDecider);
+ return this;
+ }
+
+ @Override
+ protected void configure() {
+ Multibinder<AllocationDecider> allocationMultibinder = Multibinder.newSetBinder(binder(), AllocationDecider.class);
+ for (Class<? extends AllocationDecider> deciderClass : DEFAULT_ALLOCATION_DECIDERS) {
+ allocationMultibinder.addBinding().to(deciderClass);
+ }
+ for (Class<? extends AllocationDecider> allocation : allocations) {
+ allocationMultibinder.addBinding().to(allocation);
+ }
+
+ bind(AllocationDeciders.class).asEagerSingleton();
+ }
+
+ public static final ImmutableSet<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS = ImmutableSet.<Class<? extends AllocationDecider>>builder().
+ add(SameShardAllocationDecider.class).
+ add(FilterAllocationDecider.class).
+ add(ReplicaAfterPrimaryActiveAllocationDecider.class).
+ add(ThrottlingAllocationDecider.class).
+ add(RebalanceOnlyWhenActiveAllocationDecider.class).
+ add(ClusterRebalanceAllocationDecider.class).
+ add(ConcurrentRebalanceAllocationDecider.class).
+ add(EnableAllocationDecider.class). // new enable allocation logic should proceed old disable allocation logic
+ add(DisableAllocationDecider.class).
+ add(AwarenessAllocationDecider.class).
+ add(ShardsLimitAllocationDecider.class).
+ add(NodeVersionAllocationDecider.class).
+ add(DiskThresholdDecider.class).
+ add(SnapshotInProgressAllocationDecider.class).build();
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
new file mode 100644
index 0000000..8e51027
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
@@ -0,0 +1,247 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.google.common.collect.Maps;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * This {@link AllocationDecider} controls shard allocation based on
+ * <tt>awareness</tt> key-value pairs defined in the node configuration.
+ * Awareness explicitly controls where replicas should be allocated based on
+ * attributes like node or physical rack locations. Awareness attributes accept
+ * arbitrary configuration keys like a rack data-center identifier. For example
+ * the setting:
+ * <p/>
+ * <pre>
+ * cluster.routing.allocation.awareness.attributes: rack_id
+ * </pre>
+ * <p/>
+ * will cause allocations to be distributed over different racks such that
+ * ideally at least one replicas of the all shard is available on the same rack.
+ * To enable allocation awareness in this example nodes should contain a value
+ * for the <tt>rack_id</tt> key like:
+ * <p/>
+ * <pre>
+ * node.rack_id:1
+ * </pre>
+ * <p/>
+ * Awareness can also be used to prevent over-allocation in the case of node or
+ * even "zone" failure. For example in cloud-computing infrastructures like
+ * Amazone AWS a cluster might span over multiple "zones". Awareness can be used
+ * to distribute replicas to individual zones by setting:
+ * <p/>
+ * <pre>
+ * cluster.routing.allocation.awareness.attributes: zone
+ * </pre>
+ * <p/>
+ * and forcing allocation to be aware of the following zone the data resides in:
+ * <p/>
+ * <pre>
+ * cluster.routing.allocation.awareness.force.zone.values: zone1,zone2
+ * </pre>
+ * <p/>
+ * In contrast to regular awareness this setting will prevent over-allocation on
+ * <tt>zone1</tt> even if <tt>zone2</tt> fails partially or becomes entirely
+ * unavailable. Nodes that belong to a certain zone / group should be started
+ * with the zone id configured on the node-level settings like:
+ * <p/>
+ * <pre>
+ * node.zone: zone1
+ * </pre>
+ */
+public class AwarenessAllocationDecider extends AllocationDecider {
+
+ public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES = "cluster.routing.allocation.awareness.attributes";
+ public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP = "cluster.routing.allocation.awareness.force.";
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ String[] awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null);
+ if (awarenessAttributes == null && "".equals(settings.get(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null))) {
+ awarenessAttributes = Strings.EMPTY_ARRAY; // the empty string resets this
+ }
+ if (awarenessAttributes != null) {
+ logger.info("updating [cluster.routing.allocation.awareness.attributes] from [{}] to [{}]", AwarenessAllocationDecider.this.awarenessAttributes, awarenessAttributes);
+ AwarenessAllocationDecider.this.awarenessAttributes = awarenessAttributes;
+ }
+ Map<String, String[]> forcedAwarenessAttributes = new HashMap<String, String[]>(AwarenessAllocationDecider.this.forcedAwarenessAttributes);
+ Map<String, Settings> forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP);
+ if (!forceGroups.isEmpty()) {
+ for (Map.Entry<String, Settings> entry : forceGroups.entrySet()) {
+ String[] aValues = entry.getValue().getAsArray("values");
+ if (aValues.length > 0) {
+ forcedAwarenessAttributes.put(entry.getKey(), aValues);
+ }
+ }
+ }
+ AwarenessAllocationDecider.this.forcedAwarenessAttributes = forcedAwarenessAttributes;
+ }
+ }
+
+ private String[] awarenessAttributes;
+
+ private Map<String, String[]> forcedAwarenessAttributes;
+
+ /**
+ * Creates a new {@link AwarenessAllocationDecider} instance
+ */
+ public AwarenessAllocationDecider() {
+ this(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ /**
+ * Creates a new {@link AwarenessAllocationDecider} instance from given settings
+ *
+ * @param settings {@link Settings} to use
+ */
+ public AwarenessAllocationDecider(Settings settings) {
+ this(settings, new NodeSettingsService(settings));
+ }
+
+ @Inject
+ public AwarenessAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ this.awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES);
+
+ forcedAwarenessAttributes = Maps.newHashMap();
+ Map<String, Settings> forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP);
+ for (Map.Entry<String, Settings> entry : forceGroups.entrySet()) {
+ String[] aValues = entry.getValue().getAsArray("values");
+ if (aValues.length > 0) {
+ forcedAwarenessAttributes.put(entry.getKey(), aValues);
+ }
+ }
+
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ /**
+ * Get the attributes defined by this instance
+ *
+ * @return attributes defined by this instance
+ */
+ public String[] awarenessAttributes() {
+ return this.awarenessAttributes;
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return underCapacity(shardRouting, node, allocation, true);
+ }
+
+ @Override
+ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return underCapacity(shardRouting, node, allocation, false);
+ }
+
+ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation, boolean moveToNode) {
+ if (awarenessAttributes.length == 0) {
+ return allocation.decision(Decision.YES, "no allocation awareness enabled");
+ }
+
+ IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.index());
+ int shardCount = indexMetaData.numberOfReplicas() + 1; // 1 for primary
+ for (String awarenessAttribute : awarenessAttributes) {
+ // the node the shard exists on must be associated with an awareness attribute
+ if (!node.node().attributes().containsKey(awarenessAttribute)) {
+ return allocation.decision(Decision.NO, "node does not contain awareness attribute: [%s]", awarenessAttribute);
+ }
+
+ // build attr_value -> nodes map
+ ObjectIntOpenHashMap<String> nodesPerAttribute = allocation.routingNodes().nodesPerAttributesCounts(awarenessAttribute);
+
+ // build the count of shards per attribute value
+ ObjectIntOpenHashMap<String> shardPerAttribute = new ObjectIntOpenHashMap<String>();
+ for (MutableShardRouting assignedShard : allocation.routingNodes().assignedShards(shardRouting)) {
+ // if the shard is relocating, then make sure we count it as part of the node it is relocating to
+ if (assignedShard.relocating()) {
+ RoutingNode relocationNode = allocation.routingNodes().node(assignedShard.relocatingNodeId());
+ shardPerAttribute.addTo(relocationNode.node().attributes().get(awarenessAttribute), 1);
+ } else if (assignedShard.started()) {
+ RoutingNode routingNode = allocation.routingNodes().node(assignedShard.currentNodeId());
+ shardPerAttribute.addTo(routingNode.node().attributes().get(awarenessAttribute), 1);
+ }
+ }
+
+ if (moveToNode) {
+ if (shardRouting.assignedToNode()) {
+ String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId();
+ if (!node.nodeId().equals(nodeId)) {
+ // we work on different nodes, move counts around
+ shardPerAttribute.putOrAdd(allocation.routingNodes().node(nodeId).node().attributes().get(awarenessAttribute), 0, -1);
+ shardPerAttribute.addTo(node.node().attributes().get(awarenessAttribute), 1);
+ }
+ } else {
+ shardPerAttribute.addTo(node.node().attributes().get(awarenessAttribute), 1);
+ }
+ }
+
+ int numberOfAttributes = nodesPerAttribute.size();
+ String[] fullValues = forcedAwarenessAttributes.get(awarenessAttribute);
+ if (fullValues != null) {
+ for (String fullValue : fullValues) {
+ if (!shardPerAttribute.containsKey(fullValue)) {
+ numberOfAttributes++;
+ }
+ }
+ }
+ // TODO should we remove ones that are not part of full list?
+
+ int averagePerAttribute = shardCount / numberOfAttributes;
+ int totalLeftover = shardCount % numberOfAttributes;
+ int requiredCountPerAttribute;
+ if (averagePerAttribute == 0) {
+ // if we have more attributes values than shard count, no leftover
+ totalLeftover = 0;
+ requiredCountPerAttribute = 1;
+ } else {
+ requiredCountPerAttribute = averagePerAttribute;
+ }
+ int leftoverPerAttribute = totalLeftover == 0 ? 0 : 1;
+
+ int currentNodeCount = shardPerAttribute.get(node.node().attributes().get(awarenessAttribute));
+ // if we are above with leftover, then we know we are not good, even with mod
+ if (currentNodeCount > (requiredCountPerAttribute + leftoverPerAttribute)) {
+ return allocation.decision(Decision.NO, "too many shards on nodes for attribute: [%s]", awarenessAttribute);
+ }
+ // all is well, we are below or same as average
+ if (currentNodeCount <= requiredCountPerAttribute) {
+ continue;
+ }
+ }
+
+ return allocation.decision(Decision.YES, "node meets awareness requirements");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
new file mode 100644
index 0000000..0919f26
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Locale;
+
+/**
+ * This {@link AllocationDecider} controls re-balancing operations based on the
+ * cluster wide active shard state. This decided can not be configured in
+ * real-time and should be pre-cluster start via
+ * <tt>cluster.routing.allocation.allow_rebalance</tt>. This setting respects the following
+ * values:
+ * <ul>
+ * <li><tt>indices_primaries_active</tt> - Re-balancing is allowed only once all
+ * primary shards on all indices are active.</li>
+ *
+ * <li><tt>indices_all_active</tt> - Re-balancing is allowed only once all
+ * shards on all indices are active.</li>
+ *
+ * <li><tt>always</tt> - Re-balancing is allowed once a shard replication group
+ * is active</li>
+ * </ul>
+ */
+public class ClusterRebalanceAllocationDecider extends AllocationDecider {
+
+ /**
+ * An enum representation for the configured re-balance type.
+ */
+ public static enum ClusterRebalanceType {
+ /**
+ * Re-balancing is allowed once a shard replication group is active
+ */
+ ALWAYS,
+ /**
+ * Re-balancing is allowed only once all primary shards on all indices are active.
+ */
+ INDICES_PRIMARIES_ACTIVE,
+ /**
+ * Re-balancing is allowed only once all shards on all indices are active.
+ */
+ INDICES_ALL_ACTIVE
+ }
+
+ private final ClusterRebalanceType type;
+
+ @Inject
+ public ClusterRebalanceAllocationDecider(Settings settings) {
+ super(settings);
+ String allowRebalance = settings.get("cluster.routing.allocation.allow_rebalance", "indices_all_active");
+ if ("always".equalsIgnoreCase(allowRebalance)) {
+ type = ClusterRebalanceType.ALWAYS;
+ } else if ("indices_primaries_active".equalsIgnoreCase(allowRebalance) || "indicesPrimariesActive".equalsIgnoreCase(allowRebalance)) {
+ type = ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE;
+ } else if ("indices_all_active".equalsIgnoreCase(allowRebalance) || "indicesAllActive".equalsIgnoreCase(allowRebalance)) {
+ type = ClusterRebalanceType.INDICES_ALL_ACTIVE;
+ } else {
+ logger.warn("[cluster.routing.allocation.allow_rebalance] has a wrong value {}, defaulting to 'indices_all_active'", allowRebalance);
+ type = ClusterRebalanceType.INDICES_ALL_ACTIVE;
+ }
+ logger.debug("using [cluster.routing.allocation.allow_rebalance] with [{}]", type.toString().toLowerCase(Locale.ROOT));
+ }
+
+ @Override
+ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
+ if (type == ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE) {
+ // check if there are unassigned primaries.
+ if ( allocation.routingNodes().hasUnassignedPrimaries() ) {
+ return allocation.decision(Decision.NO, "cluster has unassigned primary shards");
+ }
+ // check if there are initializing primaries that don't have a relocatingNodeId entry.
+ if ( allocation.routingNodes().hasInactivePrimaries() ) {
+ return allocation.decision(Decision.NO, "cluster has inactive primary shards");
+ }
+
+ return allocation.decision(Decision.YES, "all primary shards are active");
+ }
+ if (type == ClusterRebalanceType.INDICES_ALL_ACTIVE) {
+ // check if there are unassigned shards.
+ if ( allocation.routingNodes().hasUnassignedShards() ) {
+ return allocation.decision(Decision.NO, "cluster has unassigned shards");
+ }
+ // in case all indices are assigned, are there initializing shards which
+ // are not relocating?
+ if ( allocation.routingNodes().hasInactiveShards() ) {
+ return allocation.decision(Decision.NO, "cluster has inactive shards");
+ }
+ }
+ // type == Type.ALWAYS
+ return allocation.decision(Decision.YES, "all shards are active");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
new file mode 100644
index 0000000..3ce0566
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+/**
+ * Similar to the {@link ClusterRebalanceAllocationDecider} this
+ * {@link AllocationDecider} controls the number of currently in-progress
+ * re-balance (relocation) operations and restricts node allocations if the
+ * configured threashold is reached. The default number of concurrent rebalance
+ * operations is set to <tt>2</tt>
+ * <p/>
+ * Re-balance operations can be controlled in real-time via the cluster update API using
+ * <tt>cluster.routing.allocation.cluster_concurrent_rebalance</tt>. Iff this
+ * setting is set to <tt>-1</tt> the number of concurrent re-balance operations
+ * are unlimited.
+ */
+public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
+
+ public static final String CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE = "cluster.routing.allocation.cluster_concurrent_rebalance";
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ int clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance);
+ if (clusterConcurrentRebalance != ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance) {
+ logger.info("updating [cluster.routing.allocation.cluster_concurrent_rebalance] from [{}], to [{}]", ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance, clusterConcurrentRebalance);
+ ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance = clusterConcurrentRebalance;
+ }
+ }
+ }
+
+ private volatile int clusterConcurrentRebalance;
+
+ @Inject
+ public ConcurrentRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ this.clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 2);
+ logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance);
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ @Override
+ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
+ if (clusterConcurrentRebalance == -1) {
+ return allocation.decision(Decision.YES, "all concurrent rebalances are allowed");
+ }
+ if (allocation.routingNodes().getRelocatingShardCount() >= clusterConcurrentRebalance) {
+ return allocation.decision(Decision.NO, "too man concurrent rebalances [%d], limit: [%d]",
+ allocation.routingNodes().getRelocatingShardCount(), clusterConcurrentRebalance);
+ }
+ return allocation.decision(Decision.YES, "below threshold [%d] for concurrent rebalances", clusterConcurrentRebalance);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java
new file mode 100644
index 0000000..5d5fba5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * This abstract class defining basic {@link Decision} used during shard
+ * allocation process.
+ *
+ * @see AllocationDecider
+ */
+public abstract class Decision {
+
+ public static final Decision ALWAYS = new Single(Type.YES);
+ public static final Decision YES = new Single(Type.YES);
+ public static final Decision NO = new Single(Type.NO);
+ public static final Decision THROTTLE = new Single(Type.THROTTLE);
+
+ /**
+ * Creates a simple decision
+ * @param type {@link Type} of the decision
+ * @param explanation explanation of the decision
+ * @param explanationParams additional parameters for the decision
+ * @return new {@link Decision} instance
+ */
+ public static Decision single(Type type, String explanation, Object... explanationParams) {
+ return new Single(type, explanation, explanationParams);
+ }
+
+ /**
+ * This enumeration defines the
+ * possible types of decisions
+ */
+ public static enum Type {
+ YES,
+ NO,
+ THROTTLE
+ }
+
+ /**
+ * Get the {@link Type} of this decision
+ * @return {@link Type} of this decision
+ */
+ public abstract Type type();
+
+ /**
+ * Simple class representing a single decision
+ */
+ public static class Single extends Decision {
+ private final Type type;
+ private final String explanation;
+ private final Object[] explanationParams;
+
+ /**
+ * Creates a new {@link Single} decision of a given type
+ * @param type {@link Type} of the decision
+ */
+ public Single(Type type) {
+ this(type, null, (Object[]) null);
+ }
+
+ /**
+ * Creates a new {@link Single} decision of a given type
+ *
+ * @param type {@link Type} of the decision
+ * @param explanation An explanation of this {@link Decision}
+ * @param explanationParams A set of additional parameters
+ */
+ public Single(Type type, String explanation, Object... explanationParams) {
+ this.type = type;
+ this.explanation = explanation;
+ this.explanationParams = explanationParams;
+ }
+
+ @Override
+ public Type type() {
+ return this.type;
+ }
+
+ @Override
+ public String toString() {
+ if (explanation == null) {
+ return type + "()";
+ }
+ return type + "(" + String.format(Locale.ROOT, explanation, explanationParams) + ")";
+ }
+ }
+
+ /**
+ * Simple class representing a list of decisions
+ */
+ public static class Multi extends Decision {
+
+ private final List<Decision> decisions = Lists.newArrayList();
+
+ /**
+ * Add a decission to this {@link Multi}decision instance
+ * @param decision {@link Decision} to add
+ * @return {@link Multi}decision instance with the given decision added
+ */
+ public Multi add(Decision decision) {
+ decisions.add(decision);
+ return this;
+ }
+
+ @Override
+ public Type type() {
+ Type ret = Type.YES;
+ for (int i = 0; i < decisions.size(); i++) {
+ Type type = decisions.get(i).type();
+ if (type == Type.NO) {
+ return type;
+ } else if (type == Type.THROTTLE) {
+ ret = type;
+ }
+ }
+ return ret;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for (Decision decision : decisions) {
+ sb.append("[").append(decision.toString()).append("]");
+ }
+ return sb.toString();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DisableAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DisableAllocationDecider.java
new file mode 100644
index 0000000..8382626
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DisableAllocationDecider.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+/**
+ * This {@link AllocationDecider} prevents cluster-wide shard allocations. The
+ * behavior of this {@link AllocationDecider} can be changed in real-time via
+ * the cluster settings API. It respects the following settings:
+ * <ul>
+ * <li><tt>cluster.routing.allocation.disable_new_allocation</tt> - if set to
+ * <code>true</code> no new shard-allocation are allowed. Note: this setting is
+ * only applied if the allocated shard is a primary and it has not been
+ * allocated before the this setting was applied.</li>
+ * <p/>
+ * <li><tt>cluster.routing.allocation.disable_allocation</tt> - if set to
+ * <code>true</code> cluster wide allocations are disabled</li>
+ * <p/>
+ * <li><tt>cluster.routing.allocation.disable_replica_allocation</tt> - if set
+ * to <code>true</code> cluster wide replica allocations are disabled while
+ * primary shards can still be allocated</li>
+ * </ul>
+ * <p/>
+ * <p>
+ * Note: all of the above settings might be ignored if the allocation happens on
+ * a shard that explicitly ignores disabled allocations via
+ * {@link RoutingAllocation#ignoreDisable()}. Which is set if allocation are
+ * explicit.
+ * </p>
+ *
+ * @deprecated In favour for {@link EnableAllocationDecider}.
+ */
+@Deprecated
+public class DisableAllocationDecider extends AllocationDecider {
+
+ public static final String CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION = "cluster.routing.allocation.disable_new_allocation";
+ public static final String CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION = "cluster.routing.allocation.disable_allocation";
+ public static final String CLUSTER_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION = "cluster.routing.allocation.disable_replica_allocation";
+
+ public static final String INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION = "index.routing.allocation.disable_new_allocation";
+ public static final String INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION = "index.routing.allocation.disable_allocation";
+ public static final String INDEX_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION = "index.routing.allocation.disable_replica_allocation";
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ boolean disableNewAllocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, DisableAllocationDecider.this.disableNewAllocation);
+ if (disableNewAllocation != DisableAllocationDecider.this.disableNewAllocation) {
+ logger.info("updating [cluster.routing.allocation.disable_new_allocation] from [{}] to [{}]", DisableAllocationDecider.this.disableNewAllocation, disableNewAllocation);
+ DisableAllocationDecider.this.disableNewAllocation = disableNewAllocation;
+ }
+
+ boolean disableAllocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, DisableAllocationDecider.this.disableAllocation);
+ if (disableAllocation != DisableAllocationDecider.this.disableAllocation) {
+ logger.info("updating [cluster.routing.allocation.disable_allocation] from [{}] to [{}]", DisableAllocationDecider.this.disableAllocation, disableAllocation);
+ DisableAllocationDecider.this.disableAllocation = disableAllocation;
+ }
+
+ boolean disableReplicaAllocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION, DisableAllocationDecider.this.disableReplicaAllocation);
+ if (disableReplicaAllocation != DisableAllocationDecider.this.disableReplicaAllocation) {
+ logger.info("updating [cluster.routing.allocation.disable_replica_allocation] from [{}] to [{}]", DisableAllocationDecider.this.disableReplicaAllocation, disableReplicaAllocation);
+ DisableAllocationDecider.this.disableReplicaAllocation = disableReplicaAllocation;
+ }
+ }
+ }
+
+ private volatile boolean disableNewAllocation;
+ private volatile boolean disableAllocation;
+ private volatile boolean disableReplicaAllocation;
+
+ @Inject
+ public DisableAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ this.disableNewAllocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, false);
+ this.disableAllocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, false);
+ this.disableReplicaAllocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION, false);
+
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ if (allocation.ignoreDisable()) {
+ return allocation.decision(Decision.YES, "allocation disabling is ignored");
+ }
+ Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).settings();
+ if (shardRouting.primary() && !allocation.routingNodes().routingTable().index(shardRouting.index()).shard(shardRouting.id()).primaryAllocatedPostApi()) {
+ // if its primary, and it hasn't been allocated post API (meaning its a "fresh newly created shard"), only disable allocation
+ // on a special disable allocation flag
+ if (indexSettings.getAsBoolean(INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, disableNewAllocation)) {
+ return allocation.decision(Decision.NO, "new primary allocation is disabled");
+ } else {
+ return allocation.decision(Decision.YES, "new primary allocation is enabled");
+ }
+ }
+ if (indexSettings.getAsBoolean(INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION, disableAllocation)) {
+ return allocation.decision(Decision.NO, "all allocation is disabled");
+ }
+ if (indexSettings.getAsBoolean(INDEX_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION, disableReplicaAllocation)) {
+ if (shardRouting.primary()) {
+ return allocation.decision(Decision.YES, "primary allocation is enabled");
+ } else {
+ return allocation.decision(Decision.NO, "replica allocation is disabled");
+ }
+ }
+ return allocation.decision(Decision.YES, "all allocation is enabled");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
new file mode 100644
index 0000000..109c912
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
@@ -0,0 +1,346 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.cluster.ClusterInfo;
+import org.elasticsearch.cluster.DiskUsage;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.util.Map;
+
+import static org.elasticsearch.cluster.InternalClusterInfoService.shardIdentifierFromRouting;
+
+/**
+ * The {@link DiskThresholdDecider} checks that the node a shard is potentially
+ * being allocated to has enough disk space.
+ *
+ * It has three configurable settings, all of which can be changed dynamically:
+ *
+ * <code>cluster.routing.allocation.disk.watermark.low</code> is the low disk
+ * watermark. New shards will not allocated to a node with usage higher than this,
+ * although this watermark may be passed by allocating a shard. It defaults to
+ * 0.70 (70.0%).
+ *
+ * <code>cluster.routing.allocation.disk.watermark.high</code> is the high disk
+ * watermark. If a node has usage higher than this, shards are not allowed to
+ * remain on the node. In addition, if allocating a shard to a node causes the
+ * node to pass this watermark, it will not be allowed. It defaults to
+ * 0.85 (85.0%).
+ *
+ * Both watermark settings are expressed in terms of used disk percentage, or
+ * exact byte values for free space (like "500mb")
+ *
+ * <code>cluster.routing.allocation.disk.threshold_enabled</code> is used to
+ * enable or disable this decider. It defaults to false (disabled).
+ */
+public class DiskThresholdDecider extends AllocationDecider {
+
+ private volatile Double freeDiskThresholdLow;
+ private volatile Double freeDiskThresholdHigh;
+ private volatile ByteSizeValue freeBytesThresholdLow;
+ private volatile ByteSizeValue freeBytesThresholdHigh;
+ private volatile boolean enabled;
+
+ public static final String CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED = "cluster.routing.allocation.disk.threshold_enabled";
+ public static final String CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.low";
+ public static final String CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.high";
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ String newLowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, null);
+ String newHighWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, null);
+ Boolean newEnableSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null);
+
+ if (newEnableSetting != null) {
+ logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED,
+ DiskThresholdDecider.this.enabled, newEnableSetting);
+ DiskThresholdDecider.this.enabled = newEnableSetting;
+ }
+ if (newLowWatermark != null) {
+ if (!validWatermarkSetting(newLowWatermark)) {
+ throw new ElasticsearchParseException("Unable to parse low watermark: [" + newLowWatermark + "]");
+ }
+ logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, newLowWatermark);
+ DiskThresholdDecider.this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(newLowWatermark);
+ DiskThresholdDecider.this.freeBytesThresholdLow = thresholdBytesFromWatermark(newLowWatermark);
+ }
+ if (newHighWatermark != null) {
+ if (!validWatermarkSetting(newHighWatermark)) {
+ throw new ElasticsearchParseException("Unable to parse high watermark: [" + newHighWatermark + "]");
+ }
+ logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, newHighWatermark);
+ DiskThresholdDecider.this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(newHighWatermark);
+ DiskThresholdDecider.this.freeBytesThresholdHigh = thresholdBytesFromWatermark(newHighWatermark);
+ }
+ }
+ }
+
+ public DiskThresholdDecider(Settings settings) {
+ this(settings, new NodeSettingsService(settings));
+ }
+
+ @Inject
+ public DiskThresholdDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "0.7");
+ String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "0.85");
+
+ if (!validWatermarkSetting(lowWatermark)) {
+ throw new ElasticsearchParseException("Unable to parse low watermark: [" + lowWatermark + "]");
+ }
+ if (!validWatermarkSetting(highWatermark)) {
+ throw new ElasticsearchParseException("Unable to parse high watermark: [" + highWatermark + "]");
+ }
+ // Watermark is expressed in terms of used data, but we need "free" data watermark
+ this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
+ this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);
+
+ this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark);
+ this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark);
+
+ this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, false);
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ if (!enabled) {
+ return allocation.decision(Decision.YES, "disk threshold decider disabled");
+ }
+ // Allow allocation regardless if only a single node is available
+ if (allocation.nodes().size() <= 1) {
+ return allocation.decision(Decision.YES, "only a single node is present");
+ }
+
+ ClusterInfo clusterInfo = allocation.clusterInfo();
+ if (clusterInfo == null) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Cluster info unavailable for disk threshold decider, allowing allocation.");
+ }
+ return allocation.decision(Decision.YES, "cluster info unavailable");
+ }
+
+ Map<String, DiskUsage> usages = clusterInfo.getNodeDiskUsages();
+ Map<String, Long> shardSizes = clusterInfo.getShardSizes();
+ if (usages.isEmpty()) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Unable to determine disk usages for disk-aware allocation, allowing allocation");
+ }
+ return allocation.decision(Decision.YES, "disk usages unavailable");
+ }
+
+ DiskUsage usage = usages.get(node.nodeId());
+ if (usage == null) {
+ // If there is no usage, and we have other nodes in the cluster,
+ // use the average usage for all nodes as the usage for this node
+ usage = averageUsage(node, usages);
+ if (logger.isDebugEnabled()) {
+ logger.debug("Unable to determine disk usage for [{}], defaulting to average across nodes [{} total] [{} free] [{}% free]",
+ node.nodeId(), usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeDiskAsPercentage());
+ }
+ }
+
+ // First, check that the node currently over the low watermark
+ double freeDiskPercentage = usage.getFreeDiskAsPercentage();
+ long freeBytes = usage.getFreeBytes();
+ if (logger.isDebugEnabled()) {
+ logger.debug("Node [{}] has {}% free disk", node.nodeId(), freeDiskPercentage);
+ }
+ if (freeBytes < freeBytesThresholdLow.bytes()) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Less than the required {} free bytes threshold ({} bytes free) on node {}, preventing allocation",
+ freeBytesThresholdLow, freeBytes, node.nodeId());
+ }
+ return allocation.decision(Decision.NO, "less than required [%s] free on node, free: [%s]",
+ freeBytesThresholdLow, new ByteSizeValue(freeBytes));
+ }
+ if (freeDiskPercentage < freeDiskThresholdLow) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Less than the required {}% free disk threshold ({}% free) on node [{}], preventing allocation",
+ freeDiskThresholdLow, freeDiskPercentage, node.nodeId());
+ }
+ return allocation.decision(Decision.NO, "less than required [%d%%] free disk on node, free: [%d%%]",
+ freeDiskThresholdLow, freeDiskThresholdLow);
+ }
+
+ // Secondly, check that allocating the shard to this node doesn't put it above the high watermark
+ Long shardSize = shardSizes.get(shardIdentifierFromRouting(shardRouting));
+ shardSize = shardSize == null ? 0 : shardSize;
+ double freeSpaceAfterShard = this.freeDiskPercentageAfterShardAssigned(usage, shardSize);
+ long freeBytesAfterShard = freeBytes - shardSize;
+ if (freeBytesAfterShard < freeBytesThresholdHigh.bytes()) {
+ logger.warn("After allocating, node [{}] would have less than the required {} free bytes threshold ({} bytes free), preventing allocation",
+ node.nodeId(), freeBytesThresholdHigh, freeBytesAfterShard);
+ return allocation.decision(Decision.NO, "after allocation less than required [%s] free on node, free: [%s]",
+ freeBytesThresholdLow, new ByteSizeValue(freeBytesAfterShard));
+ }
+ if (freeSpaceAfterShard < freeDiskThresholdHigh) {
+ logger.warn("After allocating, node [{}] would have less than the required {}% free disk threshold ({}% free), preventing allocation",
+ node.nodeId(), freeDiskThresholdHigh, freeSpaceAfterShard);
+ return allocation.decision(Decision.NO, "after allocation less than required [%d%%] free disk on node, free: [%d%%]",
+ freeDiskThresholdLow, freeSpaceAfterShard);
+ }
+
+ return allocation.decision(Decision.YES, "enough disk for shard on node, free: [%s]", new ByteSizeValue(freeBytes));
+ }
+
+ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ if (!enabled) {
+ return allocation.decision(Decision.YES, "disk threshold decider disabled");
+ }
+ // Allow allocation regardless if only a single node is available
+ if (allocation.nodes().size() <= 1) {
+ return allocation.decision(Decision.YES, "only a single node is present");
+ }
+
+ ClusterInfo clusterInfo = allocation.clusterInfo();
+ if (clusterInfo == null) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Cluster info unavailable for disk threshold decider, allowing allocation.");
+ }
+ return allocation.decision(Decision.YES, "cluster info unavailable");
+ }
+
+ Map<String, DiskUsage> usages = clusterInfo.getNodeDiskUsages();
+ if (usages.isEmpty()) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Unable to determine disk usages for disk-aware allocation, allowing allocation");
+ }
+ return allocation.decision(Decision.YES, "disk usages unavailable");
+ }
+
+ DiskUsage usage = usages.get(node.nodeId());
+ if (usage == null) {
+ // If there is no usage, and we have other nodes in the cluster,
+ // use the average usage for all nodes as the usage for this node
+ usage = averageUsage(node, usages);
+ if (logger.isDebugEnabled()) {
+ logger.debug("Unable to determine disk usage for {}, defaulting to average across nodes [{} total] [{} free] [{}% free]",
+ node.nodeId(), usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeDiskAsPercentage());
+ }
+ }
+
+ // If this node is already above the high threshold, the shard cannot remain (get it off!)
+ double freeDiskPercentage = usage.getFreeDiskAsPercentage();
+ long freeBytes = usage.getFreeBytes();
+ if (logger.isDebugEnabled()) {
+ logger.debug("Node [{}] has {}% free disk ({} bytes)", node.nodeId(), freeDiskPercentage, freeBytes);
+ }
+ if (freeBytes < freeBytesThresholdHigh.bytes()) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Less than the required {} free bytes threshold ({} bytes free) on node {}, shard cannot remain",
+ freeBytesThresholdHigh, freeBytes, node.nodeId());
+ }
+ return allocation.decision(Decision.NO, "after allocation less than required [%s] free on node, free: [%s]",
+ freeBytesThresholdHigh, new ByteSizeValue(freeBytes));
+ }
+ if (freeDiskPercentage < freeDiskThresholdHigh) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Less than the required {}% free disk threshold ({}% free) on node {}, shard cannot remain",
+ freeDiskThresholdHigh, freeDiskPercentage, node.nodeId());
+ }
+ return allocation.decision(Decision.NO, "after allocation less than required [%d%%] free disk on node, free: [%d%%]",
+ freeDiskThresholdHigh, freeDiskPercentage);
+ }
+
+ return allocation.decision(Decision.YES, "enough disk for shard to remain on node, free: [%s]", new ByteSizeValue(freeBytes));
+ }
+
+ /**
+ * Returns a {@link DiskUsage} for the {@link RoutingNode} using the
+ * average usage of other nodes in the disk usage map.
+ * @param node Node to return an averaged DiskUsage object for
+ * @param usages Map of nodeId to DiskUsage for all known nodes
+ * @return DiskUsage representing given node using the average disk usage
+ */
+ public DiskUsage averageUsage(RoutingNode node, Map<String, DiskUsage> usages) {
+ long totalBytes = 0;
+ long freeBytes = 0;
+ for (DiskUsage du : usages.values()) {
+ totalBytes += du.getTotalBytes();
+ freeBytes += du.getFreeBytes();
+ }
+ return new DiskUsage(node.nodeId(), totalBytes / usages.size(), freeBytes / usages.size());
+ }
+
+ /**
+ * Given the DiskUsage for a node and the size of the shard, return the
+ * percentage of free disk if the shard were to be allocated to the node.
+ * @param usage A DiskUsage for the node to have space computed for
+ * @param shardSize Size in bytes of the shard
+ * @return Percentage of free space after the shard is assigned to the node
+ */
+ public double freeDiskPercentageAfterShardAssigned(DiskUsage usage, Long shardSize) {
+ shardSize = (shardSize == null) ? 0 : shardSize;
+ return 100.0 - (((double)(usage.getUsedBytes() + shardSize) / usage.getTotalBytes()) * 100.0);
+ }
+
+ /**
+ * Attempts to parse the watermark into a percentage, returning 100.0% if
+ * it cannot be parsed.
+ */
+ public double thresholdPercentageFromWatermark(String watermark) {
+ try {
+ return 100.0 * Double.parseDouble(watermark);
+ } catch (NumberFormatException ex) {
+ return 100.0;
+ }
+ }
+
+ /**
+ * Attempts to parse the watermark into a {@link ByteSizeValue}, returning
+ * a ByteSizeValue of 0 bytes if the value cannot be parsed.
+ */
+ public ByteSizeValue thresholdBytesFromWatermark(String watermark) {
+ try {
+ return ByteSizeValue.parseBytesSizeValue(watermark);
+ } catch (ElasticsearchParseException ex) {
+ return ByteSizeValue.parseBytesSizeValue("0b");
+ }
+ }
+
+ /**
+ * Checks if a watermark string is a valid percentage or byte size value,
+ * returning true if valid, false if invalid.
+ */
+ public boolean validWatermarkSetting(String watermark) {
+ try {
+ double w = Double.parseDouble(watermark);
+ if (w < 0 || w > 1.0) {
+ return false;
+ }
+ return true;
+ } catch (NumberFormatException e) {
+ try {
+ ByteSizeValue.parseBytesSizeValue(watermark);
+ return true;
+ } catch (ElasticsearchParseException ex) {
+ return false;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
new file mode 100644
index 0000000..c0871ea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.util.Locale;
+
+/**
+ * This allocation decider allows shard allocations via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE}
+ * and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE}. The per index settings overrides the cluster wide
+ * setting. Depending on the
+ *
+ * Both settings can have the following values:
+ * <ul>
+ * <li> <code>NONE</code>, no shard allocation is allowed.
+ * <li> <code>NEW_PRIMARIES</code> only primary shards of new indices are allowed to be allocated
+ * <li> <code>PRIMARIES</code> only primary shards (of any index) are allowed to be allocated
+ * <li> <code>ALL</code> all shards are allowed to be allocated
+ * </ul>
+ */
+public class EnableAllocationDecider extends AllocationDecider implements NodeSettingsService.Listener {
+
+ public static final String CLUSTER_ROUTING_ALLOCATION_ENABLE = "cluster.routing.allocation.enable";
+ public static final String INDEX_ROUTING_ALLOCATION_ENABLE = "index.routing.allocation.enable";
+
+ private volatile Allocation enable;
+
+ @Inject
+ public EnableAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ this.enable = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.ALL.name()));
+ nodeSettingsService.addListener(this);
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ if (allocation.ignoreDisable()) {
+ return allocation.decision(Decision.YES, "allocation disabling is ignored");
+ }
+
+ Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).settings();
+ String enableIndexValue = indexSettings.get(INDEX_ROUTING_ALLOCATION_ENABLE);
+ final Allocation enable;
+ if (enableIndexValue != null) {
+ enable = Allocation.parse(enableIndexValue);
+ } else {
+ enable = this.enable;
+ }
+ switch (enable) {
+ case ALL:
+ return allocation.decision(Decision.YES, "all allocations are allowed");
+ case NONE:
+ return allocation.decision(Decision.NO, "no allocations are allowed");
+ case NEW_PRIMARIES:
+ if (shardRouting.primary() && !allocation.routingNodes().routingTable().index(shardRouting.index()).shard(shardRouting.id()).primaryAllocatedPostApi()) {
+ return allocation.decision(Decision.YES, "new primary allocations are allowed");
+ } else {
+ return allocation.decision(Decision.NO, "non-new primary allocations are disallowed");
+ }
+ case PRIMARIES:
+ if (shardRouting.primary()) {
+ return allocation.decision(Decision.YES, "primary allocations are allowed");
+ } else {
+ return allocation.decision(Decision.NO, "replica allocations are disallowed");
+ }
+ default:
+ throw new ElasticsearchIllegalStateException("Unknown allocation option");
+ }
+ }
+
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ Allocation enable = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enable.name()));
+ if (enable != this.enable) {
+ logger.info("updating [cluster.routing.allocation.enable] from [{}] to [{}]", this.enable, enable);
+ EnableAllocationDecider.this.enable = enable;
+ }
+ }
+
+ public enum Allocation {
+
+ NONE,
+ NEW_PRIMARIES,
+ PRIMARIES,
+ ALL;
+
+ public static Allocation parse(String strValue) {
+ if (strValue == null) {
+ return null;
+ } else {
+ strValue = strValue.toUpperCase(Locale.ROOT);
+ try {
+ return Allocation.valueOf(strValue);
+ } catch (IllegalArgumentException e) {
+ throw new ElasticsearchIllegalArgumentException("Illegal allocation.enable value [" + strValue + "]");
+ }
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
new file mode 100644
index 0000000..dc1b983
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
+
+/**
+ * This {@link AllocationDecider} control shard allocation by include and
+ * exclude filters via dynamic cluster and index routing settings.
+ * <p>
+ * This filter is used to make explicit decision on which nodes certain shard
+ * can / should be allocated. The decision if a shard can be allocated, must not
+ * be allocated or should be allocated is based on either cluster wide dynamic
+ * settings (<tt>cluster.routing.allocation.*</tt>) or index specific dynamic
+ * settings (<tt>index.routing.allocation.*</tt>). All of those settings can be
+ * changed at runtime via the cluster or the index update settings API.
+ * </p>
+ * Note: Cluster settings are applied first and will override index specific
+ * settings such that if a shard can be allocated according to the index routing
+ * settings it wont be allocated on a node if the cluster specific settings
+ * would disallow the allocation. Filters are applied in the following order:
+ * <ol>
+ * <li><tt>required</tt> - filters required allocations.
+ * If any <tt>required</tt> filters are set the allocation is denied if the index is <b>not</b> in the set of <tt>required</tt> to allocate on the filtered node</li>
+ * <p/>
+ * <li><tt>include</tt> - filters "allowed" allocations.
+ * If any <tt>include</tt> filters are set the allocation is denied if the index is <b>not</b> in the set of <tt>include</tt> filters for the filtered node</li>
+ * <p/>
+ * <li><tt>exclude</tt> - filters "prohibited" allocations.
+ * If any <tt>exclude</tt> filters are set the allocation is denied if the index is in the set of <tt>exclude</tt> filters for the filtered node</li>
+ * </ol>
+ */
+public class FilterAllocationDecider extends AllocationDecider {
+
+ public static final String INDEX_ROUTING_REQUIRE_GROUP = "index.routing.allocation.require.";
+ public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include.";
+ public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude.";
+
+ public static final String CLUSTER_ROUTING_REQUIRE_GROUP = "cluster.routing.allocation.require.";
+ public static final String CLUSTER_ROUTING_INCLUDE_GROUP = "cluster.routing.allocation.include.";
+ public static final String CLUSTER_ROUTING_EXCLUDE_GROUP = "cluster.routing.allocation.exclude.";
+
+ private volatile DiscoveryNodeFilters clusterRequireFilters;
+ private volatile DiscoveryNodeFilters clusterIncludeFilters;
+ private volatile DiscoveryNodeFilters clusterExcludeFilters;
+
+ @Inject
+ public FilterAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ ImmutableMap<String, String> requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap();
+ if (requireMap.isEmpty()) {
+ clusterRequireFilters = null;
+ } else {
+ clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
+ }
+ ImmutableMap<String, String> includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap();
+ if (includeMap.isEmpty()) {
+ clusterIncludeFilters = null;
+ } else {
+ clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
+ }
+ ImmutableMap<String, String> excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap();
+ if (excludeMap.isEmpty()) {
+ clusterExcludeFilters = null;
+ } else {
+ clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
+ }
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return shouldFilter(shardRouting, node, allocation);
+ }
+
+ @Override
+ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return shouldFilter(shardRouting, node, allocation);
+ }
+
+ private Decision shouldFilter(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ if (clusterRequireFilters != null) {
+ if (!clusterRequireFilters.match(node.node())) {
+ return allocation.decision(Decision.NO, "node does not match global required filters [%s]", clusterRequireFilters);
+ }
+ }
+ if (clusterIncludeFilters != null) {
+ if (!clusterIncludeFilters.match(node.node())) {
+ return allocation.decision(Decision.NO, "node does not match global include filters [%s]", clusterIncludeFilters);
+ }
+ }
+ if (clusterExcludeFilters != null) {
+ if (clusterExcludeFilters.match(node.node())) {
+ return allocation.decision(Decision.NO, "node matches global exclude filters [%s]", clusterExcludeFilters);
+ }
+ }
+
+ IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
+ if (indexMd.requireFilters() != null) {
+ if (!indexMd.requireFilters().match(node.node())) {
+ return allocation.decision(Decision.NO, "node does not match index required filters [%s]", indexMd.requireFilters());
+ }
+ }
+ if (indexMd.includeFilters() != null) {
+ if (!indexMd.includeFilters().match(node.node())) {
+ return allocation.decision(Decision.NO, "node does not match index include filters [%s]", indexMd.includeFilters());
+ }
+ }
+ if (indexMd.excludeFilters() != null) {
+ if (indexMd.excludeFilters().match(node.node())) {
+ return allocation.decision(Decision.NO, "node matches index exclude filters [%s]", indexMd.excludeFilters());
+ }
+ }
+
+ return allocation.decision(Decision.YES, "node passes include/exclude/require filters");
+ }
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ ImmutableMap<String, String> requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap();
+ if (!requireMap.isEmpty()) {
+ clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
+ }
+ ImmutableMap<String, String> includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap();
+ if (!includeMap.isEmpty()) {
+ clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
+ }
+ ImmutableMap<String, String> excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap();
+ if (!excludeMap.isEmpty()) {
+ clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
new file mode 100644
index 0000000..9723a6a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * An allocation decider that prevents relocation or allocation from nodes
+ * that might not be version compatible. If we relocate from a node that runs
+ * a newer version than the node we relocate to this might cause {@link org.apache.lucene.index.IndexFormatTooNewException}
+ * on the lowest level since it might have already written segments that use a new postings format or codec that is not
+ * available on the target node.
+ */
+public class NodeVersionAllocationDecider extends AllocationDecider {
+
+ @Inject
+ public NodeVersionAllocationDecider(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ String sourceNodeId = shardRouting.currentNodeId();
+ /* if sourceNodeId is not null we do a relocation and just check the version of the node
+ * that we are currently allocate on. If not we are initializing and recover from primary.*/
+ if (sourceNodeId == null) { // we allocate - check primary
+ if (shardRouting.primary()) {
+ // we are the primary we can allocate wherever
+ return allocation.decision(Decision.YES, "primary shard can be allocated anywhere");
+ }
+ final MutableShardRouting primary = allocation.routingNodes().activePrimary(shardRouting);
+ if (primary == null) { // we have a primary - it's a start ;)
+ return allocation.decision(Decision.YES, "no active primary shard yet");
+ }
+ sourceNodeId = primary.currentNodeId();
+ }
+ return isVersionCompatible(allocation.routingNodes(), sourceNodeId, node, allocation);
+
+ }
+
+ private Decision isVersionCompatible(final RoutingNodes routingNodes, final String sourceNodeId, final RoutingNode target, RoutingAllocation allocation) {
+ final RoutingNode source = routingNodes.node(sourceNodeId);
+ if (target.node().version().onOrAfter(source.node().version())) {
+ /* we can allocate if we can recover from a node that is younger or on the same version
+ * if the primary is already running on a newer version that won't work due to possible
+ * differences in the lucene index format etc.*/
+ return allocation.decision(Decision.YES, "target node version [%s] is same or newer than source node version [%s]",
+ target.node().version(), source.node().version());
+ } else {
+ return allocation.decision(Decision.NO, "target node version [%s] is older than source node version [%s]",
+ target.node().version(), source.node().version());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java
new file mode 100644
index 0000000..750eb7e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * Only allow rebalancing when all shards are active within the shard replication group.
+ */
+public class RebalanceOnlyWhenActiveAllocationDecider extends AllocationDecider {
+
+ @Inject
+ public RebalanceOnlyWhenActiveAllocationDecider(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
+ // its ok to check for active here, since in relocation, a shard is split into two in routing
+ // nodes, once relocating, and one initializing
+ if (!allocation.routingNodes().allReplicasActive(shardRouting)) {
+ return allocation.decision(Decision.NO, "not all replicas are active in cluster");
+ }
+ return allocation.decision(Decision.YES, "all replicas are active in cluster");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java
new file mode 100644
index 0000000..ce4a5fa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * An allocation strategy that only allows for a replica to be allocated when the primary is active.
+ */
+public class ReplicaAfterPrimaryActiveAllocationDecider extends AllocationDecider {
+
+ @Inject
+ public ReplicaAfterPrimaryActiveAllocationDecider(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return canAllocate(shardRouting, allocation);
+ }
+
+ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
+ if (shardRouting.primary()) {
+ return allocation.decision(Decision.YES, "shard is primary");
+ }
+ MutableShardRouting primary = allocation.routingNodes().activePrimary(shardRouting);
+ if (primary == null) {
+ return allocation.decision(Decision.NO, "primary shard is not yet active");
+ }
+ return allocation.decision(Decision.YES, "primary is already active");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
new file mode 100644
index 0000000..d97be4c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * An allocation decider that prevents multiple instances of the same shard to
+ * be allocated on the same <tt>node</tt>.
+ *
+ * The {@value #SAME_HOST_SETTING} setting allows to perform a check to prevent
+ * allocation of multiple instances of the same shard on a single <tt>host</tt>,
+ * based on host name and host address. Defaults to `false`, meaning that no
+ * check is performed by default.
+ *
+ * <p>
+ * Note: this setting only applies if multiple nodes are started on the same
+ * <tt>host</tt>. Allocations of multiple copies of the same shard on the same
+ * <tt>node</tt> are not allowed independently of this setting.
+ * </p>
+ */
+public class SameShardAllocationDecider extends AllocationDecider {
+
+ public static final String SAME_HOST_SETTING = "cluster.routing.allocation.same_shard.host";
+
+ private final boolean sameHost;
+
+ @Inject
+ public SameShardAllocationDecider(Settings settings) {
+ super(settings);
+
+ this.sameHost = settings.getAsBoolean(SAME_HOST_SETTING, false);
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ Iterable<MutableShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting);
+ for (MutableShardRouting assignedShard : assignedShards) {
+ if (node.nodeId().equals(assignedShard.currentNodeId())) {
+ return allocation.decision(Decision.NO, "shard cannot be allocated on same node [%s] it already exists on", node.nodeId());
+ }
+ }
+ if (sameHost) {
+ if (node.node() != null) {
+ for (RoutingNode checkNode : allocation.routingNodes()) {
+ if (checkNode.node() == null) {
+ continue;
+ }
+ // check if its on the same host as the one we want to allocate to
+ boolean checkNodeOnSameHost = false;
+ if (Strings.hasLength(checkNode.node().getHostAddress()) && Strings.hasLength(node.node().getHostAddress())) {
+ if (checkNode.node().getHostAddress().equals(node.node().getHostAddress())) {
+ checkNodeOnSameHost = true;
+ }
+ } else if (Strings.hasLength(checkNode.node().getHostName()) && Strings.hasLength(node.node().getHostName())) {
+ if (checkNode.node().getHostName().equals(node.node().getHostName())) {
+ checkNodeOnSameHost = true;
+ }
+ }
+ if (checkNodeOnSameHost) {
+ for (MutableShardRouting assignedShard : assignedShards) {
+ if (checkNode.nodeId().equals(assignedShard.currentNodeId())) {
+ return allocation.decision(Decision.NO, "shard cannot be allocated on same host [%s] it already exists on",
+ node.nodeId());
+ }
+ }
+ }
+ }
+ }
+ }
+ return allocation.decision(Decision.YES, "shard is not allocated to same node or host");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
new file mode 100644
index 0000000..779ca14
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * This {@link AllocationDecider} limits the number of shards per node on a per
+ * index basis. The allocator prevents a single node to hold more than
+ * {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index during the allocation
+ * process. The limits of this decider can be changed in real-time via a the
+ * index settings API.
+ * <p>
+ * If {@value #INDEX_TOTAL_SHARDS_PER_NODE} is reset to a negative value shards
+ * per index are unlimited per node. Shards currently in the
+ * {@link ShardRoutingState#RELOCATING relocating} state are ignored by this
+ * {@link AllocationDecider} until the shard changed its state to either
+ * {@link ShardRoutingState#STARTED started},
+ * {@link ShardRoutingState#INITIALIZING inializing} or
+ * {@link ShardRoutingState#UNASSIGNED unassigned}
+ * <p>
+ * Note: Reducing the number of shards per node via the index update API can
+ * trigger relocation and significant additional load on the clusters nodes.
+ * </p>
+ */
+public class ShardsLimitAllocationDecider extends AllocationDecider {
+
+ /**
+ * Controls the maximum number of shards per index on a single elastic
+ * search node. Negative values are interpreted as unlimited.
+ */
+ public static final String INDEX_TOTAL_SHARDS_PER_NODE = "index.routing.allocation.total_shards_per_node";
+
+ @Inject
+ public ShardsLimitAllocationDecider(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
+ int totalShardsPerNode = indexMd.settings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
+ if (totalShardsPerNode <= 0) {
+ return allocation.decision(Decision.YES, "total shard limit disabled: [%d] <= 0", totalShardsPerNode);
+ }
+
+ int nodeCount = 0;
+ for (MutableShardRouting nodeShard : node) {
+ if (!nodeShard.index().equals(shardRouting.index())) {
+ continue;
+ }
+ // don't count relocating shards...
+ if (nodeShard.relocating()) {
+ continue;
+ }
+ nodeCount++;
+ }
+ if (nodeCount >= totalShardsPerNode) {
+ return allocation.decision(Decision.NO, "too many shards for this index on node [%d], limit: [%d]",
+ nodeCount, totalShardsPerNode);
+ }
+ return allocation.decision(Decision.YES, "shard count under limit [%d] of total shards per node", totalShardsPerNode);
+ }
+
+ @Override
+ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
+ int totalShardsPerNode = indexMd.settings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
+ if (totalShardsPerNode <= 0) {
+ return allocation.decision(Decision.YES, "total shard limit disabled: [%d] <= 0", totalShardsPerNode);
+ }
+
+ int nodeCount = 0;
+ for (MutableShardRouting nodeShard : node) {;
+ if (!nodeShard.index().equals(shardRouting.index())) {
+ continue;
+ }
+ // don't count relocating shards...
+ if (nodeShard.relocating()) {
+ continue;
+ }
+ nodeCount++;
+ }
+ if (nodeCount > totalShardsPerNode) {
+ return allocation.decision(Decision.NO, "too many shards for this index on node [%d], limit: [%d]",
+ nodeCount, totalShardsPerNode);
+ }
+ return allocation.decision(Decision.YES, "shard count under limit [%d] of total shards per node", totalShardsPerNode);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
new file mode 100644
index 0000000..76d1293
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.metadata.SnapshotMetaData;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+/**
+ * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that
+ * are currently been snapshotted to be moved to other nodes.
+ */
+public class SnapshotInProgressAllocationDecider extends AllocationDecider {
+
+ /**
+ * Disables relocation of shards that are currently being snapshotted.
+ */
+ public static final String CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED = "cluster.routing.allocation.snapshot.relocation_enabled";
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ boolean newEnableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation);
+ if (newEnableRelocation != enableRelocation) {
+ logger.info("updating [{}] from [{}], to [{}]", CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation, newEnableRelocation);
+ enableRelocation = newEnableRelocation;
+ }
+ }
+ }
+
+ private volatile boolean enableRelocation = false;
+
+ /**
+ * Creates a new {@link org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider} instance
+ */
+ public SnapshotInProgressAllocationDecider() {
+ this(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ /**
+ * Creates a new {@link org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider} instance from given settings
+ *
+ * @param settings {@link org.elasticsearch.common.settings.Settings} to use
+ */
+ public SnapshotInProgressAllocationDecider(Settings settings) {
+ this(settings, new NodeSettingsService(settings));
+ }
+
+ @Inject
+ public SnapshotInProgressAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ enableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation);
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ /**
+ * Returns a {@link Decision} whether the given shard routing can be
+ * re-balanced to the given allocation. The default is
+ * {@link Decision#ALWAYS}.
+ */
+ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
+ return canMove(shardRouting, allocation);
+ }
+
+ /**
+ * Returns a {@link Decision} whether the given shard routing can be
+ * allocated on the given node. The default is {@link Decision#ALWAYS}.
+ */
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return canMove(shardRouting, allocation);
+ }
+
+ private Decision canMove(ShardRouting shardRouting, RoutingAllocation allocation) {
+ if (!enableRelocation && shardRouting.primary()) {
+ // Only primary shards are snapshotted
+
+ SnapshotMetaData snapshotMetaData = allocation.metaData().custom(SnapshotMetaData.TYPE);
+ if (snapshotMetaData == null) {
+ // Snapshots are not running
+ return allocation.decision(Decision.YES, "no snapshots are currently running");
+ }
+
+ for (SnapshotMetaData.Entry snapshot : snapshotMetaData.entries()) {
+ SnapshotMetaData.ShardSnapshotStatus shardSnapshotStatus = snapshot.shards().get(shardRouting.shardId());
+ if (shardSnapshotStatus != null && !shardSnapshotStatus.state().completed() && shardSnapshotStatus.nodeId() != null && shardSnapshotStatus.nodeId().equals(shardRouting.currentNodeId())) {
+ logger.trace("Preventing snapshotted shard [{}] to be moved from node [{}]", shardRouting.shardId(), shardSnapshotStatus.nodeId());
+ return allocation.decision(Decision.NO, "snapshot for shard [%s] is currently running on node [%s]",
+ shardRouting.shardId(), shardSnapshotStatus.nodeId());
+ }
+ }
+ }
+ return allocation.decision(Decision.YES, "shard not primary or relocation disabled");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
new file mode 100644
index 0000000..7c2d26a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+/**
+ * {@link ThrottlingAllocationDecider} controls the recovery process per node in
+ * the cluster. It exposes two settings via the cluster update API that allow
+ * changes in real-time:
+ * <p/>
+ * <ul>
+ * <li><tt>cluster.routing.allocation.node_initial_primaries_recoveries</tt> -
+ * restricts the number of initial primary shard recovery operations on a single
+ * node. The default is <tt>4</tt></li>
+ * <p/>
+ * <li><tt>cluster.routing.allocation.node_concurrent_recoveries</tt> -
+ * restricts the number of concurrent recovery operations on a single node. The
+ * default is <tt>2</tt></li>
+ * </ul>
+ * <p/>
+ * If one of the above thresholds is exceeded per node this allocation decider
+ * will return {@link Decision#THROTTLE} as a hit to upstream logic to throttle
+ * the allocation process to prevent overloading nodes due to too many concurrent recovery
+ * processes.
+ */
+public class ThrottlingAllocationDecider extends AllocationDecider {
+
+ public static final String CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = "cluster.routing.allocation.node_initial_primaries_recoveries";
+ public static final String CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = "cluster.routing.allocation.node_concurrent_recoveries";
+ public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2;
+ public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4;
+
+ private volatile int primariesInitialRecoveries;
+ private volatile int concurrentRecoveries;
+
+ @Inject
+ public ThrottlingAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+
+ this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES);
+ this.concurrentRecoveries = settings.getAsInt("cluster.routing.allocation.concurrent_recoveries", settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES));
+ logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries);
+
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ if (shardRouting.primary()) {
+ assert shardRouting.unassigned() || shardRouting.active();
+ if (shardRouting.unassigned()) {
+ // primary is unassigned, means we are going to do recovery from gateway
+ // count *just the primary* currently doing recovery on the node and check against concurrent_recoveries
+ int primariesInRecovery = 0;
+ for (MutableShardRouting shard : node) {
+ // when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node*
+ // we only count initial recoveries here, so we need to make sure that relocating node is null
+ if (shard.state() == ShardRoutingState.INITIALIZING && shard.primary() && shard.relocatingNodeId() == null) {
+ primariesInRecovery++;
+ }
+ }
+ if (primariesInRecovery >= primariesInitialRecoveries) {
+ return allocation.decision(Decision.THROTTLE, "too many primaries currently recovering [%d], limit: [%d]",
+ primariesInRecovery, primariesInitialRecoveries);
+ } else {
+ return allocation.decision(Decision.YES, "below primary recovery limit of [%d]", primariesInitialRecoveries);
+ }
+ }
+ }
+
+ // either primary or replica doing recovery (from peer shard)
+
+ // count the number of recoveries on the node, its for both target (INITIALIZING) and source (RELOCATING)
+ return canAllocate(node, allocation);
+ }
+
+ public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
+ int currentRecoveries = 0;
+ for (MutableShardRouting shard : node) {
+ if (shard.state() == ShardRoutingState.INITIALIZING || shard.state() == ShardRoutingState.RELOCATING) {
+ currentRecoveries++;
+ }
+ }
+ if (currentRecoveries >= concurrentRecoveries) {
+ return allocation.decision(Decision.THROTTLE, "too many shards currently recovering [%d], limit: [%d]",
+ currentRecoveries, concurrentRecoveries);
+ } else {
+ return allocation.decision(Decision.YES, "below shard recovery limit of [%d]", concurrentRecoveries);
+ }
+ }
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ int primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, ThrottlingAllocationDecider.this.primariesInitialRecoveries);
+ if (primariesInitialRecoveries != ThrottlingAllocationDecider.this.primariesInitialRecoveries) {
+ logger.info("updating [cluster.routing.allocation.node_initial_primaries_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.primariesInitialRecoveries, primariesInitialRecoveries);
+ ThrottlingAllocationDecider.this.primariesInitialRecoveries = primariesInitialRecoveries;
+ }
+
+ int concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, ThrottlingAllocationDecider.this.concurrentRecoveries);
+ if (concurrentRecoveries != ThrottlingAllocationDecider.this.concurrentRecoveries) {
+ logger.info("updating [cluster.routing.allocation.node_concurrent_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.concurrentRecoveries, concurrentRecoveries);
+ ThrottlingAllocationDecider.this.concurrentRecoveries = concurrentRecoveries;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/operation/OperationRouting.java b/src/main/java/org/elasticsearch/cluster/routing/operation/OperationRouting.java
new file mode 100644
index 0000000..f3b3fbf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/operation/OperationRouting.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.operation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.IndexShardMissingException;
+import org.elasticsearch.indices.IndexMissingException;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public interface OperationRouting {
+
+ ShardIterator indexShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) throws IndexMissingException, IndexShardMissingException;
+
+ ShardIterator deleteShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) throws IndexMissingException, IndexShardMissingException;
+
+ GroupShardsIterator broadcastDeleteShards(ClusterState clusterState, String index) throws IndexMissingException, IndexShardMissingException;
+
+ ShardIterator getShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing, @Nullable String preference) throws IndexMissingException, IndexShardMissingException;
+
+ ShardIterator getShards(ClusterState clusterState, String index, int shardId, @Nullable String preference) throws IndexMissingException, IndexShardMissingException;
+
+ GroupShardsIterator deleteByQueryShards(ClusterState clusterState, String index, @Nullable Set<String> routing) throws IndexMissingException;
+
+ int searchShardsCount(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) throws IndexMissingException;
+
+ GroupShardsIterator searchShards(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) throws IndexMissingException;
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/operation/OperationRoutingModule.java b/src/main/java/org/elasticsearch/cluster/routing/operation/OperationRoutingModule.java
new file mode 100644
index 0000000..4ebd5b8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/operation/OperationRoutingModule.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.operation;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.cluster.routing.operation.hash.HashFunction;
+import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
+import org.elasticsearch.cluster.routing.operation.plain.PlainOperationRoutingModule;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+
+import static org.elasticsearch.common.inject.Modules.createModule;
+
+/**
+ *
+ */
+public class OperationRoutingModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ public OperationRoutingModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(createModule(settings.getAsClass("cluster.routing.operation.type", PlainOperationRoutingModule.class, "org.elasticsearch.cluster.routing.operation.", "OperationRoutingModule"), settings));
+ }
+
+ @Override
+ protected void configure() {
+ bind(HashFunction.class).to(settings.getAsClass("cluster.routing.operation.hash.type", DjbHashFunction.class, "org.elasticsearch.cluster.routing.operation.hash.", "HashFunction")).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/operation/hash/HashFunction.java b/src/main/java/org/elasticsearch/cluster/routing/operation/hash/HashFunction.java
new file mode 100644
index 0000000..09c6d6f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/operation/hash/HashFunction.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.operation.hash;
+
+/**
+ * Simple hash function interface used for shard routing.
+ */
+public interface HashFunction {
+
+ /**
+ * Calculate a hash value for routing
+ * @param routing String to calculate the hash value from
+ * @return hash value of the given routing string
+ */
+ int hash(String routing);
+
+ /**
+ * Calculate a hash value for routing and its type
+ * @param type types name
+ * @param routing String to calculate the hash value from
+ * @return hash value of the given type and routing string
+ */
+ int hash(String type, String id);
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/operation/hash/djb/DjbHashFunction.java b/src/main/java/org/elasticsearch/cluster/routing/operation/hash/djb/DjbHashFunction.java
new file mode 100644
index 0000000..b36ea94
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/operation/hash/djb/DjbHashFunction.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.operation.hash.djb;
+
+import org.elasticsearch.cluster.routing.operation.hash.HashFunction;
+
+/**
+ * This class implements the efficient hash function
+ * developed by <i>Daniel J. Bernstein</i>.
+ */
+public class DjbHashFunction implements HashFunction {
+
+ public static int DJB_HASH(String value) {
+ long hash = 5381;
+
+ for (int i = 0; i < value.length(); i++) {
+ hash = ((hash << 5) + hash) + value.charAt(i);
+ }
+
+ return (int) hash;
+ }
+
+ public static int DJB_HASH(byte[] value, int offset, int length) {
+ long hash = 5381;
+
+ final int end = offset + length;
+ for (int i = offset; i < end; i++) {
+ hash = ((hash << 5) + hash) + value[i];
+ }
+
+ return (int) hash;
+ }
+
+ @Override
+ public int hash(String routing) {
+ return DJB_HASH(routing);
+ }
+
+ @Override
+ public int hash(String type, String id) {
+ long hash = 5381;
+
+ for (int i = 0; i < type.length(); i++) {
+ hash = ((hash << 5) + hash) + type.charAt(i);
+ }
+
+ for (int i = 0; i < id.length(); i++) {
+ hash = ((hash << 5) + hash) + id.charAt(i);
+ }
+
+ return (int) hash;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/operation/hash/simple/SimpleHashFunction.java b/src/main/java/org/elasticsearch/cluster/routing/operation/hash/simple/SimpleHashFunction.java
new file mode 100644
index 0000000..c66fd76
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/operation/hash/simple/SimpleHashFunction.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.operation.hash.simple;
+
+import org.elasticsearch.cluster.routing.operation.hash.HashFunction;
+
+/**
+ * This class implements a simple hash function based on Java Build-In {@link Object#hashCode()}
+ */
+public class SimpleHashFunction implements HashFunction {
+
+ @Override
+ public int hash(String routing) {
+ return routing.hashCode();
+ }
+
+ @Override
+ public int hash(String type, String id) {
+ return type.hashCode() + 31 * id.hashCode();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRouting.java b/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRouting.java
new file mode 100644
index 0000000..0da3c2d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRouting.java
@@ -0,0 +1,292 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.operation.plain;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
+import org.elasticsearch.cluster.routing.operation.OperationRouting;
+import org.elasticsearch.cluster.routing.operation.hash.HashFunction;
+import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexShardMissingException;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndexMissingException;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public class PlainOperationRouting extends AbstractComponent implements OperationRouting {
+
+ private final HashFunction hashFunction;
+
+ private final boolean useType;
+
+ private final AwarenessAllocationDecider awarenessAllocationDecider;
+
+ @Inject
+ public PlainOperationRouting(Settings indexSettings, HashFunction hashFunction, AwarenessAllocationDecider awarenessAllocationDecider) {
+ super(indexSettings);
+ this.hashFunction = hashFunction;
+ this.useType = indexSettings.getAsBoolean("cluster.routing.operation.use_type", false);
+ this.awarenessAllocationDecider = awarenessAllocationDecider;
+ }
+
+ @Override
+ public ShardIterator indexShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) throws IndexMissingException, IndexShardMissingException {
+ return shards(clusterState, index, type, id, routing).shardsIt();
+ }
+
+ @Override
+ public ShardIterator deleteShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) throws IndexMissingException, IndexShardMissingException {
+ return shards(clusterState, index, type, id, routing).shardsIt();
+ }
+
+ @Override
+ public ShardIterator getShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing, @Nullable String preference) throws IndexMissingException, IndexShardMissingException {
+ return preferenceActiveShardIterator(shards(clusterState, index, type, id, routing), clusterState.nodes().localNodeId(), clusterState.nodes(), preference);
+ }
+
+ @Override
+ public ShardIterator getShards(ClusterState clusterState, String index, int shardId, @Nullable String preference) throws IndexMissingException, IndexShardMissingException {
+ return preferenceActiveShardIterator(shards(clusterState, index, shardId), clusterState.nodes().localNodeId(), clusterState.nodes(), preference);
+ }
+
+ @Override
+ public GroupShardsIterator broadcastDeleteShards(ClusterState clusterState, String index) throws IndexMissingException {
+ return indexRoutingTable(clusterState, index).groupByShardsIt();
+ }
+
+ @Override
+ public GroupShardsIterator deleteByQueryShards(ClusterState clusterState, String index, @Nullable Set<String> routing) throws IndexMissingException {
+ if (routing == null || routing.isEmpty()) {
+ return indexRoutingTable(clusterState, index).groupByShardsIt();
+ }
+
+ // we use set here and not identity set since we might get duplicates
+ HashSet<ShardIterator> set = new HashSet<ShardIterator>();
+ IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index);
+ for (String r : routing) {
+ int shardId = shardId(clusterState, index, null, null, r);
+ IndexShardRoutingTable indexShard = indexRouting.shard(shardId);
+ if (indexShard == null) {
+ throw new IndexShardMissingException(new ShardId(index, shardId));
+ }
+ set.add(indexShard.shardsRandomIt());
+ }
+ return new GroupShardsIterator(set);
+ }
+
+ @Override
+ public int searchShardsCount(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) throws IndexMissingException {
+ final Set<IndexShardRoutingTable> shards = computeTargetedShards(clusterState, concreteIndices, routing);
+ return shards.size();
+ }
+
+ @Override
+ public GroupShardsIterator searchShards(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) throws IndexMissingException {
+ final Set<IndexShardRoutingTable> shards = computeTargetedShards(clusterState, concreteIndices, routing);
+ final Set<ShardIterator> set = new HashSet<ShardIterator>(shards.size());
+ for (IndexShardRoutingTable shard : shards) {
+ ShardIterator iterator = preferenceActiveShardIterator(shard, clusterState.nodes().localNodeId(), clusterState.nodes(), preference);
+ if (iterator != null) {
+ set.add(iterator);
+ }
+ }
+ return new GroupShardsIterator(set);
+ }
+
+ private static final Map<String, Set<String>> EMPTY_ROUTING = Collections.emptyMap();
+
+ private Set<IndexShardRoutingTable> computeTargetedShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map<String, Set<String>> routing) throws IndexMissingException {
+ routing = routing == null ? EMPTY_ROUTING : routing; // just use an empty map
+ final Set<IndexShardRoutingTable> set = new HashSet<IndexShardRoutingTable>();
+ // we use set here and not list since we might get duplicates
+ for (String index : concreteIndices) {
+ final IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index);
+ final Set<String> effectiveRouting = routing.get(index);
+ if (effectiveRouting != null) {
+ for (String r : effectiveRouting) {
+ int shardId = shardId(clusterState, index, null, null, r);
+ IndexShardRoutingTable indexShard = indexRouting.shard(shardId);
+ if (indexShard == null) {
+ throw new IndexShardMissingException(new ShardId(index, shardId));
+ }
+ // we might get duplicates, but that's ok, they will override one another
+ set.add(indexShard);
+ }
+ } else {
+ for (IndexShardRoutingTable indexShard : indexRouting) {
+ set.add(indexShard);
+ }
+ }
+ }
+ return set;
+ }
+
+ private ShardIterator preferenceActiveShardIterator(IndexShardRoutingTable indexShard, String localNodeId, DiscoveryNodes nodes, @Nullable String preference) {
+ if (preference == null || preference.isEmpty()) {
+ String[] awarenessAttributes = awarenessAllocationDecider.awarenessAttributes();
+ if (awarenessAttributes.length == 0) {
+ return indexShard.activeInitializingShardsRandomIt();
+ } else {
+ return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes);
+ }
+ }
+ if (preference.charAt(0) == '_') {
+ if (preference.startsWith("_shards:")) {
+ // starts with _shards, so execute on specific ones
+ int index = preference.indexOf(';');
+ String shards;
+ if (index == -1) {
+ shards = preference.substring("_shards:".length());
+ } else {
+ shards = preference.substring("_shards:".length(), index);
+ }
+ String[] ids = Strings.splitStringByCommaToArray(shards);
+ boolean found = false;
+ for (String id : ids) {
+ if (Integer.parseInt(id) == indexShard.shardId().id()) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ return null;
+ }
+ // no more preference
+ if (index == -1 || index == preference.length() - 1) {
+ String[] awarenessAttributes = awarenessAllocationDecider.awarenessAttributes();
+ if (awarenessAttributes.length == 0) {
+ return indexShard.activeInitializingShardsRandomIt();
+ } else {
+ return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes);
+ }
+ } else {
+ // update the preference and continue
+ preference = preference.substring(index + 1);
+ }
+ }
+ if (preference.startsWith("_prefer_node:")) {
+ return indexShard.preferNodeActiveInitializingShardsIt(preference.substring("_prefer_node:".length()));
+ }
+ if ("_local".equals(preference)) {
+ return indexShard.preferNodeActiveInitializingShardsIt(localNodeId);
+ }
+ if ("_primary".equals(preference)) {
+ return indexShard.primaryActiveInitializingShardIt();
+ }
+ if ("_primary_first".equals(preference) || "_primaryFirst".equals(preference)) {
+ return indexShard.primaryFirstActiveInitializingShardsIt();
+ }
+ if ("_only_local".equals(preference) || "_onlyLocal".equals(preference)) {
+ return indexShard.onlyNodeActiveInitializingShardsIt(localNodeId);
+ }
+ if (preference.startsWith("_only_node:")) {
+ String nodeId = preference.substring("_only_node:".length());
+ ensureNodeIdExists(nodes, nodeId);
+ return indexShard.onlyNodeActiveInitializingShardsIt(nodeId);
+ }
+ }
+ // if not, then use it as the index
+ String[] awarenessAttributes = awarenessAllocationDecider.awarenessAttributes();
+ if (awarenessAttributes.length == 0) {
+ return indexShard.activeInitializingShardsIt(DjbHashFunction.DJB_HASH(preference));
+ } else {
+ return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, DjbHashFunction.DJB_HASH(preference));
+ }
+ }
+
+ public IndexMetaData indexMetaData(ClusterState clusterState, String index) {
+ IndexMetaData indexMetaData = clusterState.metaData().index(index);
+ if (indexMetaData == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ return indexMetaData;
+ }
+
+ protected IndexRoutingTable indexRoutingTable(ClusterState clusterState, String index) {
+ IndexRoutingTable indexRouting = clusterState.routingTable().index(index);
+ if (indexRouting == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ return indexRouting;
+ }
+
+
+ // either routing is set, or type/id are set
+
+ protected IndexShardRoutingTable shards(ClusterState clusterState, String index, String type, String id, String routing) {
+ int shardId = shardId(clusterState, index, type, id, routing);
+ return shards(clusterState, index, shardId);
+ }
+
+ protected IndexShardRoutingTable shards(ClusterState clusterState, String index, int shardId) {
+ IndexShardRoutingTable indexShard = indexRoutingTable(clusterState, index).shard(shardId);
+ if (indexShard == null) {
+ throw new IndexShardMissingException(new ShardId(index, shardId));
+ }
+ return indexShard;
+ }
+
+ private int shardId(ClusterState clusterState, String index, String type, @Nullable String id, @Nullable String routing) {
+ if (routing == null) {
+ if (!useType) {
+ return Math.abs(hash(id) % indexMetaData(clusterState, index).numberOfShards());
+ } else {
+ return Math.abs(hash(type, id) % indexMetaData(clusterState, index).numberOfShards());
+ }
+ }
+ return Math.abs(hash(routing) % indexMetaData(clusterState, index).numberOfShards());
+ }
+
+ protected int hash(String routing) {
+ return hashFunction.hash(routing);
+ }
+
+ protected int hash(String type, String id) {
+ if (type == null || "_all".equals(type)) {
+ throw new ElasticsearchIllegalArgumentException("Can't route an operation with no type and having type part of the routing (for backward comp)");
+ }
+ return hashFunction.hash(type, id);
+ }
+
+ private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) {
+ if (!nodes.dataNodes().keys().contains(nodeId)) {
+ throw new ElasticsearchIllegalArgumentException("No data node with id[" + nodeId + "] found");
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRoutingModule.java b/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRoutingModule.java
new file mode 100644
index 0000000..a76cacd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRoutingModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.operation.plain;
+
+import org.elasticsearch.cluster.routing.operation.OperationRouting;
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class PlainOperationRoutingModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(OperationRouting.class).to(PlainOperationRouting.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java
new file mode 100644
index 0000000..a449141
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java
@@ -0,0 +1,702 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.service;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.ClusterState.Builder;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.operation.OperationRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.*;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.DiscoveryService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.*;
+
+import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
+
+/**
+ *
+ */
+public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
+
+ private final ThreadPool threadPool;
+
+ private final DiscoveryService discoveryService;
+
+ private final OperationRouting operationRouting;
+
+ private final TransportService transportService;
+
+ private final NodeSettingsService nodeSettingsService;
+
+ private final TimeValue reconnectInterval;
+
+ private volatile PrioritizedEsThreadPoolExecutor updateTasksExecutor;
+
+ private final List<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<ClusterStateListener>();
+ private final List<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<ClusterStateListener>();
+ private final List<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<ClusterStateListener>();
+ private final LocalNodeMasterListeners localNodeMasterListeners;
+
+ private final Queue<NotifyTimeout> onGoingTimeouts = ConcurrentCollections.newQueue();
+
+ private volatile ClusterState clusterState = ClusterState.builder().build();
+
+ private final ClusterBlocks.Builder initialBlocks = ClusterBlocks.builder().addGlobalBlock(Discovery.NO_MASTER_BLOCK);
+
+ private volatile ScheduledFuture reconnectToNodes;
+
+ @Inject
+ public InternalClusterService(Settings settings, DiscoveryService discoveryService, OperationRouting operationRouting, TransportService transportService,
+ NodeSettingsService nodeSettingsService, ThreadPool threadPool) {
+ super(settings);
+ this.operationRouting = operationRouting;
+ this.transportService = transportService;
+ this.discoveryService = discoveryService;
+ this.threadPool = threadPool;
+ this.nodeSettingsService = nodeSettingsService;
+
+ this.nodeSettingsService.setClusterService(this);
+
+ this.reconnectInterval = componentSettings.getAsTime("reconnect_interval", TimeValue.timeValueSeconds(10));
+
+ localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
+ }
+
+ public NodeSettingsService settingsService() {
+ return this.nodeSettingsService;
+ }
+
+ public void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException {
+ if (lifecycle.started()) {
+ throw new ElasticsearchIllegalStateException("can't set initial block when started");
+ }
+ initialBlocks.addGlobalBlock(block);
+ }
+
+ @Override
+ public void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException {
+ if (lifecycle.started()) {
+ throw new ElasticsearchIllegalStateException("can't set initial block when started");
+ }
+ initialBlocks.removeGlobalBlock(block);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ add(localNodeMasterListeners);
+ this.clusterState = ClusterState.builder().blocks(initialBlocks).build();
+ this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(daemonThreadFactory(settings, "clusterService#updateTask"));
+ this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ this.reconnectToNodes.cancel(true);
+ for (NotifyTimeout onGoingTimeout : onGoingTimeouts) {
+ onGoingTimeout.cancel();
+ onGoingTimeout.listener.onClose();
+ }
+ updateTasksExecutor.shutdown();
+ try {
+ updateTasksExecutor.awaitTermination(10, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ remove(localNodeMasterListeners);
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ @Override
+ public DiscoveryNode localNode() {
+ return discoveryService.localNode();
+ }
+
+ @Override
+ public OperationRouting operationRouting() {
+ return operationRouting;
+ }
+
+ public ClusterState state() {
+ return this.clusterState;
+ }
+
+ public void addFirst(ClusterStateListener listener) {
+ priorityClusterStateListeners.add(listener);
+ }
+
+ public void addLast(ClusterStateListener listener) {
+ lastClusterStateListeners.add(listener);
+ }
+
+ public void add(ClusterStateListener listener) {
+ clusterStateListeners.add(listener);
+ }
+
+ public void remove(ClusterStateListener listener) {
+ clusterStateListeners.remove(listener);
+ priorityClusterStateListeners.remove(listener);
+ lastClusterStateListeners.remove(listener);
+ for (Iterator<NotifyTimeout> it = onGoingTimeouts.iterator(); it.hasNext(); ) {
+ NotifyTimeout timeout = it.next();
+ if (timeout.listener.equals(listener)) {
+ timeout.cancel();
+ it.remove();
+ }
+ }
+ }
+
+ @Override
+ public void add(LocalNodeMasterListener listener) {
+ localNodeMasterListeners.add(listener);
+ }
+
+ @Override
+ public void remove(LocalNodeMasterListener listener) {
+ localNodeMasterListeners.remove(listener);
+ }
+
+ public void add(final TimeValue timeout, final TimeoutClusterStateListener listener) {
+ if (lifecycle.stoppedOrClosed()) {
+ listener.onClose();
+ return;
+ }
+ // call the post added notification on the same event thread
+ try {
+ updateTasksExecutor.execute(new PrioritizedRunnable(Priority.HIGH) {
+ @Override
+ public void run() {
+ NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout);
+ notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout);
+ onGoingTimeouts.add(notifyTimeout);
+ clusterStateListeners.add(listener);
+ listener.postAdded();
+ }
+ });
+ } catch (EsRejectedExecutionException e) {
+ if (lifecycle.stoppedOrClosed()) {
+ listener.onClose();
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) {
+ submitStateUpdateTask(source, Priority.NORMAL, updateTask);
+ }
+
+ public void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask) {
+ if (!lifecycle.started()) {
+ return;
+ }
+ try {
+ final UpdateTask task = new UpdateTask(source, priority, updateTask);
+ if (updateTask instanceof TimeoutClusterStateUpdateTask) {
+ final TimeoutClusterStateUpdateTask timeoutUpdateTask = (TimeoutClusterStateUpdateTask) updateTask;
+ updateTasksExecutor.execute(task, threadPool.scheduler(), timeoutUpdateTask.timeout(), new Runnable() {
+ @Override
+ public void run() {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ timeoutUpdateTask.onFailure(task.source, new ProcessClusterEventTimeoutException(timeoutUpdateTask.timeout(), task.source));
+ }
+ });
+ }
+ });
+ } else {
+ updateTasksExecutor.execute(task);
+ }
+ } catch (EsRejectedExecutionException e) {
+ // ignore cases where we are shutting down..., there is really nothing interesting
+ // to be done here...
+ if (!lifecycle.stoppedOrClosed()) {
+ throw e;
+ }
+ }
+ }
+
+ @Override
+ public List<PendingClusterTask> pendingTasks() {
+ long now = System.currentTimeMillis();
+ PrioritizedEsThreadPoolExecutor.Pending[] pendings = updateTasksExecutor.getPending();
+ List<PendingClusterTask> pendingClusterTasks = new ArrayList<PendingClusterTask>(pendings.length);
+ for (PrioritizedEsThreadPoolExecutor.Pending pending : pendings) {
+ final String source;
+ final long timeInQueue;
+ if (pending.task instanceof UpdateTask) {
+ UpdateTask updateTask = (UpdateTask) pending.task;
+ source = updateTask.source;
+ timeInQueue = now - updateTask.addedAt;
+ } else {
+ source = "unknown";
+ timeInQueue = -1;
+ }
+
+ pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new StringText(source), timeInQueue));
+ }
+ return pendingClusterTasks;
+ }
+
+ class UpdateTask extends PrioritizedRunnable {
+
+ public final String source;
+ public final ClusterStateUpdateTask updateTask;
+ public final long addedAt = System.currentTimeMillis();
+
+ UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
+ super(priority);
+ this.source = source;
+ this.updateTask = updateTask;
+ }
+
+ @Override
+ public void run() {
+ if (!lifecycle.started()) {
+ logger.debug("processing [{}]: ignoring, cluster_service not started", source);
+ return;
+ }
+ logger.debug("processing [{}]: execute", source);
+ ClusterState previousClusterState = clusterState;
+ ClusterState newClusterState;
+ try {
+ newClusterState = updateTask.execute(previousClusterState);
+ } catch (Throwable e) {
+ if (logger.isTraceEnabled()) {
+ StringBuilder sb = new StringBuilder("failed to execute cluster state update, state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
+ sb.append(previousClusterState.nodes().prettyPrint());
+ sb.append(previousClusterState.routingTable().prettyPrint());
+ sb.append(previousClusterState.readOnlyRoutingNodes().prettyPrint());
+ logger.trace(sb.toString(), e);
+ }
+ updateTask.onFailure(source, e);
+ return;
+ }
+
+ if (previousClusterState == newClusterState) {
+ logger.debug("processing [{}]: no change in cluster_state", source);
+ if (updateTask instanceof AckedClusterStateUpdateTask) {
+ //no need to wait for ack if nothing changed, the update can be counted as acknowledged
+ ((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
+ }
+ if (updateTask instanceof ProcessedClusterStateUpdateTask) {
+ ((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
+ }
+ return;
+ }
+
+ try {
+ Discovery.AckListener ackListener = new NoOpAckListener();
+ if (newClusterState.nodes().localNodeMaster()) {
+ // only the master controls the version numbers
+ Builder builder = ClusterState.builder(newClusterState).version(newClusterState.version() + 1);
+ if (previousClusterState.routingTable() != newClusterState.routingTable()) {
+ builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1));
+ }
+ if (previousClusterState.metaData() != newClusterState.metaData()) {
+ builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
+ }
+ newClusterState = builder.build();
+
+ if (updateTask instanceof AckedClusterStateUpdateTask) {
+ final AckedClusterStateUpdateTask ackedUpdateTask = (AckedClusterStateUpdateTask) updateTask;
+ if (ackedUpdateTask.ackTimeout() == null || ackedUpdateTask.ackTimeout().millis() == 0) {
+ ackedUpdateTask.onAckTimeout();
+ } else {
+ try {
+ ackListener = new AckCountDownListener(ackedUpdateTask, newClusterState.version(), newClusterState.nodes(), threadPool);
+ } catch (EsRejectedExecutionException ex) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
+ }
+ //timeout straightaway, otherwise we could wait forever as the timeout thread has not started
+ ackedUpdateTask.onAckTimeout();
+ }
+ }
+ }
+ } else {
+ if (previousClusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK) && !newClusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
+ // force an update, its a fresh update from the master as we transition from a start of not having a master to having one
+ // have a fresh instances of routing and metadata to remove the chance that version might be the same
+ Builder builder = ClusterState.builder(newClusterState);
+ builder.routingTable(RoutingTable.builder(newClusterState.routingTable()));
+ builder.metaData(MetaData.builder(newClusterState.metaData()));
+ newClusterState = builder.build();
+ logger.debug("got first state from fresh master [{}]", newClusterState.nodes().masterNodeId());
+ } else if (newClusterState.version() < previousClusterState.version()) {
+ // we got a cluster state with older version, when we are *not* the master, let it in since it might be valid
+ // we check on version where applicable, like at ZenDiscovery#handleNewClusterStateFromMaster
+ logger.debug("got smaller cluster state when not master [" + newClusterState.version() + "<" + previousClusterState.version() + "] from source [" + source + "]");
+ }
+ }
+
+ if (logger.isTraceEnabled()) {
+ StringBuilder sb = new StringBuilder("cluster state updated:\nversion [").append(newClusterState.version()).append("], source [").append(source).append("]\n");
+ sb.append(newClusterState.nodes().prettyPrint());
+ sb.append(newClusterState.routingTable().prettyPrint());
+ sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
+ logger.trace(sb.toString());
+ } else if (logger.isDebugEnabled()) {
+ logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
+ }
+
+ ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
+ // new cluster state, notify all listeners
+ final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
+ if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
+ String summary = nodesDelta.shortSummary();
+ if (summary.length() > 0) {
+ logger.info("{}, reason: {}", summary, source);
+ }
+ }
+
+ // TODO, do this in parallel (and wait)
+ for (DiscoveryNode node : nodesDelta.addedNodes()) {
+ if (!nodeRequiresConnection(node)) {
+ continue;
+ }
+ try {
+ transportService.connectToNode(node);
+ } catch (Throwable e) {
+ // the fault detection will detect it as failed as well
+ logger.warn("failed to connect to node [" + node + "]", e);
+ }
+ }
+
+ // if we are the master, publish the new state to all nodes
+ // we publish here before we send a notification to all the listeners, since if it fails
+ // we don't want to notify
+ if (newClusterState.nodes().localNodeMaster()) {
+ logger.debug("publishing cluster state version {}", newClusterState.version());
+ discoveryService.publish(newClusterState, ackListener);
+ }
+
+ // update the current cluster state
+ clusterState = newClusterState;
+ logger.debug("set local cluster state to version {}", newClusterState.version());
+
+ for (ClusterStateListener listener : priorityClusterStateListeners) {
+ listener.clusterChanged(clusterChangedEvent);
+ }
+ for (ClusterStateListener listener : clusterStateListeners) {
+ listener.clusterChanged(clusterChangedEvent);
+ }
+ for (ClusterStateListener listener : lastClusterStateListeners) {
+ listener.clusterChanged(clusterChangedEvent);
+ }
+
+ if (!nodesDelta.removedNodes().isEmpty()) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ for (DiscoveryNode node : nodesDelta.removedNodes()) {
+ transportService.disconnectFromNode(node);
+ }
+ }
+ });
+ }
+
+ //manual ack only from the master at the end of the publish
+ if (newClusterState.nodes().localNodeMaster()) {
+ try {
+ ackListener.onNodeAck(localNode(), null);
+ } catch (Throwable t) {
+ logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
+ }
+ }
+
+ if (updateTask instanceof ProcessedClusterStateUpdateTask) {
+ ((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
+ }
+
+ logger.debug("processing [{}]: done applying updated cluster_state (version: {})", source, newClusterState.version());
+ } catch (Throwable t) {
+ StringBuilder sb = new StringBuilder("failed to apply updated cluster state:\nversion [").append(newClusterState.version()).append("], source [").append(source).append("]\n");
+ sb.append(newClusterState.nodes().prettyPrint());
+ sb.append(newClusterState.routingTable().prettyPrint());
+ sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
+ logger.warn(sb.toString(), t);
+ // TODO: do we want to call updateTask.onFailure here?
+ }
+ }
+ }
+
+ class NotifyTimeout implements Runnable {
+ final TimeoutClusterStateListener listener;
+ final TimeValue timeout;
+ ScheduledFuture future;
+
+ NotifyTimeout(TimeoutClusterStateListener listener, TimeValue timeout) {
+ this.listener = listener;
+ this.timeout = timeout;
+ }
+
+ public void cancel() {
+ future.cancel(false);
+ }
+
+ @Override
+ public void run() {
+ if (future.isCancelled()) {
+ return;
+ }
+ if (lifecycle.stoppedOrClosed()) {
+ listener.onClose();
+ } else {
+ listener.onTimeout(this.timeout);
+ }
+ // note, we rely on the listener to remove itself in case of timeout if needed
+ }
+ }
+
+ private class ReconnectToNodes implements Runnable {
+
+ private ConcurrentMap<DiscoveryNode, Integer> failureCount = ConcurrentCollections.newConcurrentMap();
+
+ @Override
+ public void run() {
+ // master node will check against all nodes if its alive with certain discoveries implementations,
+ // but we can't rely on that, so we check on it as well
+ for (DiscoveryNode node : clusterState.nodes()) {
+ if (lifecycle.stoppedOrClosed()) {
+ return;
+ }
+ if (!nodeRequiresConnection(node)) {
+ continue;
+ }
+ if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time...
+ if (!transportService.nodeConnected(node)) {
+ try {
+ transportService.connectToNode(node);
+ } catch (Exception e) {
+ if (lifecycle.stoppedOrClosed()) {
+ return;
+ }
+ if (clusterState.nodes().nodeExists(node.id())) { // double check here as well, maybe its gone?
+ Integer nodeFailureCount = failureCount.get(node);
+ if (nodeFailureCount == null) {
+ nodeFailureCount = 1;
+ } else {
+ nodeFailureCount = nodeFailureCount + 1;
+ }
+ // log every 6th failure
+ if ((nodeFailureCount % 6) == 0) {
+ // reset the failure count...
+ nodeFailureCount = 0;
+ logger.warn("failed to reconnect to node {}", e, node);
+ }
+ failureCount.put(node, nodeFailureCount);
+ }
+ }
+ }
+ }
+ }
+ // go over and remove failed nodes that have been removed
+ DiscoveryNodes nodes = clusterState.nodes();
+ for (Iterator<DiscoveryNode> failedNodesIt = failureCount.keySet().iterator(); failedNodesIt.hasNext(); ) {
+ DiscoveryNode failedNode = failedNodesIt.next();
+ if (!nodes.nodeExists(failedNode.id())) {
+ failedNodesIt.remove();
+ }
+ }
+ if (lifecycle.started()) {
+ reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this);
+ }
+ }
+ }
+
+ private boolean nodeRequiresConnection(DiscoveryNode node) {
+ return localNode().shouldConnectTo(node);
+ }
+
+ private static class LocalNodeMasterListeners implements ClusterStateListener {
+
+ private final List<LocalNodeMasterListener> listeners = new CopyOnWriteArrayList<LocalNodeMasterListener>();
+ private final ThreadPool threadPool;
+ private volatile boolean master = false;
+
+ private LocalNodeMasterListeners(ThreadPool threadPool) {
+ this.threadPool = threadPool;
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (!master && event.localNodeMaster()) {
+ master = true;
+ for (LocalNodeMasterListener listener : listeners) {
+ Executor executor = threadPool.executor(listener.executorName());
+ executor.execute(new OnMasterRunnable(listener));
+ }
+ return;
+ }
+
+ if (master && !event.localNodeMaster()) {
+ master = false;
+ for (LocalNodeMasterListener listener : listeners) {
+ Executor executor = threadPool.executor(listener.executorName());
+ executor.execute(new OffMasterRunnable(listener));
+ }
+ }
+ }
+
+ private void add(LocalNodeMasterListener listener) {
+ listeners.add(listener);
+ }
+
+ private void remove(LocalNodeMasterListener listener) {
+ listeners.remove(listener);
+ }
+
+ private void clear() {
+ listeners.clear();
+ }
+ }
+
+ private static class OnMasterRunnable implements Runnable {
+
+ private final LocalNodeMasterListener listener;
+
+ private OnMasterRunnable(LocalNodeMasterListener listener) {
+ this.listener = listener;
+ }
+
+ @Override
+ public void run() {
+ listener.onMaster();
+ }
+ }
+
+ private static class OffMasterRunnable implements Runnable {
+
+ private final LocalNodeMasterListener listener;
+
+ private OffMasterRunnable(LocalNodeMasterListener listener) {
+ this.listener = listener;
+ }
+
+ @Override
+ public void run() {
+ listener.offMaster();
+ }
+ }
+
+ private static class NoOpAckListener implements Discovery.AckListener {
+ @Override
+ public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
+ }
+
+ @Override
+ public void onTimeout() {
+ }
+ }
+
+ private static class AckCountDownListener implements Discovery.AckListener {
+
+ private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class);
+
+ private final AckedClusterStateUpdateTask ackedUpdateTask;
+ private final CountDown countDown;
+ private final DiscoveryNodes nodes;
+ private final long clusterStateVersion;
+ private final Future<?> ackTimeoutCallback;
+ private Throwable lastFailure;
+
+ AckCountDownListener(AckedClusterStateUpdateTask ackedUpdateTask, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
+ this.ackedUpdateTask = ackedUpdateTask;
+ this.clusterStateVersion = clusterStateVersion;
+ this.nodes = nodes;
+ int countDown = 0;
+ for (DiscoveryNode node : nodes) {
+ if (ackedUpdateTask.mustAck(node)) {
+ countDown++;
+ }
+ }
+ //we always wait for at least 1 node (the master)
+ countDown = Math.max(1, countDown);
+ logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
+ this.countDown = new CountDown(countDown);
+ this.ackTimeoutCallback = threadPool.schedule(ackedUpdateTask.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
+ @Override
+ public void run() {
+ onTimeout();
+ }
+ });
+ }
+
+ @Override
+ public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
+ if (!ackedUpdateTask.mustAck(node)) {
+ //we always wait for the master ack anyway
+ if (!node.equals(nodes.masterNode())) {
+ return;
+ }
+ }
+ if (t == null) {
+ logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion);
+ } else {
+ this.lastFailure = t;
+ logger.debug("ack received from node [{}], cluster_state update (version: {})", t, node, clusterStateVersion);
+ }
+
+ if (countDown.countDown()) {
+ logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion);
+ ackTimeoutCallback.cancel(true);
+ ackedUpdateTask.onAllNodesAcked(lastFailure);
+ }
+ }
+
+ @Override
+ public void onTimeout() {
+ if (countDown.fastForward()) {
+ logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion);
+ ackedUpdateTask.onAckTimeout();
+ }
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java b/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java
new file mode 100644
index 0000000..3efd396
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.service;
+
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+/**
+ */
+public class PendingClusterTask implements Streamable {
+
+ private long insertOrder;
+ private Priority priority;
+ private Text source;
+ private long timeInQueue;
+
+ public PendingClusterTask() {
+ }
+
+ public PendingClusterTask(long insertOrder, Priority priority, Text source, long timeInQueue) {
+ this.insertOrder = insertOrder;
+ this.priority = priority;
+ this.source = source;
+ this.timeInQueue = timeInQueue;
+ }
+
+ public long insertOrder() {
+ return insertOrder;
+ }
+
+ public long getInsertOrder() {
+ return insertOrder();
+ }
+
+ public Priority priority() {
+ return priority;
+ }
+
+ public Priority getPriority() {
+ return priority();
+ }
+
+ public Text source() {
+ return source;
+ }
+
+ public Text getSource() {
+ return source();
+ }
+
+ public long timeInQueueInMillis() {
+ return timeInQueue;
+ }
+
+ public long getTimeInQueueInMillis() {
+ return timeInQueueInMillis();
+ }
+
+ public TimeValue getTimeInQueue() {
+ return new TimeValue(getTimeInQueueInMillis());
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ insertOrder = in.readVLong();
+ priority = Priority.fromByte(in.readByte());
+ source = in.readText();
+ timeInQueue = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(insertOrder);
+ out.writeByte(priority.value());
+ out.writeText(source);
+ out.writeVLong(timeInQueue);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java
new file mode 100644
index 0000000..b537c44
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.common.inject.BindingAnnotation;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.ElementType.PARAMETER;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+
+@BindingAnnotation
+@Target({FIELD, PARAMETER})
+@Retention(RUNTIME)
+@Documented
+public @interface ClusterDynamicSettings {
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java
new file mode 100644
index 0000000..200855d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.cluster.InternalClusterInfoService;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.*;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
+import org.elasticsearch.indices.fielddata.breaker.InternalCircuitBreakerService;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.indices.ttl.IndicesTTLService;
+import org.elasticsearch.threadpool.ThreadPool;
+
+/**
+ */
+public class ClusterDynamicSettingsModule extends AbstractModule {
+
+ private final DynamicSettings clusterDynamicSettings;
+
+ public ClusterDynamicSettingsModule() {
+ clusterDynamicSettings = new DynamicSettings();
+ clusterDynamicSettings.addDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES);
+ clusterDynamicSettings.addDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "*");
+ clusterDynamicSettings.addDynamicSetting(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, Validator.FLOAT);
+ clusterDynamicSettings.addDynamicSetting(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, Validator.FLOAT);
+ clusterDynamicSettings.addDynamicSetting(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, Validator.FLOAT);
+ clusterDynamicSettings.addDynamicSetting(BalancedShardsAllocator.SETTING_THRESHOLD, Validator.NON_NEGATIVE_FLOAT);
+ clusterDynamicSettings.addDynamicSetting(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, Validator.INTEGER);
+ clusterDynamicSettings.addDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE);
+ clusterDynamicSettings.addDynamicSetting(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION);
+ clusterDynamicSettings.addDynamicSetting(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION);
+ clusterDynamicSettings.addDynamicSetting(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION);
+ clusterDynamicSettings.addDynamicSetting(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, Validator.INTEGER);
+ clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*");
+ clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*");
+ clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*");
+ clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_SIZE);
+ clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_EXPIRE, Validator.TIME);
+ clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_TYPE);
+ clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
+ clusterDynamicSettings.addDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME);
+ clusterDynamicSettings.addDynamicSetting(MetaData.SETTING_READ_ONLY);
+ clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, Validator.BYTES_SIZE);
+ clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, Validator.INTEGER);
+ clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, Validator.BYTES_SIZE);
+ clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_COMPRESS);
+ clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER);
+ clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER);
+ clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
+ clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_SIZE_PER_SEC, Validator.BYTES_SIZE);
+ clusterDynamicSettings.addDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*");
+ clusterDynamicSettings.addDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER);
+ clusterDynamicSettings.addDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER);
+ clusterDynamicSettings.addDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK);
+ clusterDynamicSettings.addDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK);
+ clusterDynamicSettings.addDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED);
+ clusterDynamicSettings.addDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, Validator.TIME);
+ clusterDynamicSettings.addDynamicSetting(SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED);
+ clusterDynamicSettings.addDynamicSetting(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, Validator.MEMORY_SIZE);
+ clusterDynamicSettings.addDynamicSetting(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
+ clusterDynamicSettings.addDynamicSetting(DestructiveOperations.REQUIRES_NAME);
+ }
+
+ public void addDynamicSettings(String... settings) {
+ clusterDynamicSettings.addDynamicSettings(settings);
+ }
+
+ public void addDynamicSetting(String setting, Validator validator) {
+ clusterDynamicSettings.addDynamicSetting(setting, validator);
+ }
+
+
+ @Override
+ protected void configure() {
+ bind(DynamicSettings.class).annotatedWith(ClusterDynamicSettings.class).toInstance(clusterDynamicSettings);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/cluster/settings/DynamicSettings.java b/src/main/java/org/elasticsearch/cluster/settings/DynamicSettings.java
new file mode 100644
index 0000000..1fb240f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/settings/DynamicSettings.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.regex.Regex;
+
+import java.util.Map;
+
+/**
+ */
+public class DynamicSettings {
+
+ private ImmutableMap<String, Validator> dynamicSettings = ImmutableMap.of();
+
+ public boolean hasDynamicSetting(String key) {
+ for (String dynamicSetting : dynamicSettings.keySet()) {
+ if (Regex.simpleMatch(dynamicSetting, key)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public String validateDynamicSetting(String dynamicSetting, String value) {
+ for (Map.Entry<String, Validator> setting : dynamicSettings.entrySet()) {
+ if (Regex.simpleMatch(dynamicSetting, setting.getKey())) {
+ return setting.getValue().validate(dynamicSetting, value);
+ }
+ }
+ return null;
+ }
+
+ public synchronized void addDynamicSetting(String setting, Validator validator) {
+ MapBuilder<String, Validator> updatedSettings = MapBuilder.newMapBuilder(dynamicSettings);
+ updatedSettings.put(setting, validator);
+ dynamicSettings = updatedSettings.immutableMap();
+ }
+
+ public synchronized void addDynamicSetting(String setting) {
+ addDynamicSetting(setting, Validator.EMPTY);
+ }
+
+
+ public synchronized void addDynamicSettings(String... settings) {
+ MapBuilder<String, Validator> updatedSettings = MapBuilder.newMapBuilder(dynamicSettings);
+ for (String setting : settings) {
+ updatedSettings.put(setting, Validator.EMPTY);
+ }
+ dynamicSettings = updatedSettings.immutableMap();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/cluster/settings/Validator.java b/src/main/java/org/elasticsearch/cluster/settings/Validator.java
new file mode 100644
index 0000000..ef37b21
--- /dev/null
+++ b/src/main/java/org/elasticsearch/cluster/settings/Validator.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.unit.TimeValue;
+
+import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
+import static org.elasticsearch.common.unit.MemorySizeValue.parseBytesSizeValueOrHeapRatio;
+
+
+/**
+ * Validates a setting, returning a failure message if applicable.
+ */
+public interface Validator {
+
+ String validate(String setting, String value);
+
+ public static final Validator EMPTY = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ return null;
+ }
+ };
+
+ public static final Validator TIME = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ if (TimeValue.parseTimeValue(value, null) == null) {
+ return "cannot parse value [" + value + "] as time";
+ }
+ } catch (ElasticsearchParseException ex) {
+ return "cannot parse value [" + value + "] as time";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator FLOAT = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ Float.parseFloat(value);
+ } catch (NumberFormatException ex) {
+ return "cannot parse value [" + value + "] as a float";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator NON_NEGATIVE_FLOAT = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ if (Float.parseFloat(value) < 0.0) {
+ return "the value of the setting " + setting + " must be a non negative float";
+ }
+ } catch (NumberFormatException ex) {
+ return "cannot parse value [" + value + "] as a double";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator DOUBLE = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ Double.parseDouble(value);
+ } catch (NumberFormatException ex) {
+ return "cannot parse value [" + value + "] as a double";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator NON_NEGATIVE_DOUBLE = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ if (Double.parseDouble(value) < 0.0) {
+ return "the value of the setting " + setting + " must be a non negative double";
+ }
+ } catch (NumberFormatException ex) {
+ return "cannot parse value [" + value + "] as a double";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator DOUBLE_GTE_2 = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ if (Double.parseDouble(value) < 2.0) {
+ return "the value of the setting " + setting + " must be >= 2.0";
+ }
+ } catch (NumberFormatException ex) {
+ return "cannot parse value [" + value + "] as a double";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator INTEGER = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ Integer.parseInt(value);
+ } catch (NumberFormatException ex) {
+ return "cannot parse value [" + value + "] as an integer";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator POSITIVE_INTEGER = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ if (Integer.parseInt(value) <= 0) {
+ return "the value of the setting " + setting + " must be a positive integer";
+ }
+ } catch (NumberFormatException ex) {
+ return "cannot parse value [" + value + "] as an integer";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator NON_NEGATIVE_INTEGER = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ if (Integer.parseInt(value) < 0) {
+ return "the value of the setting " + setting + " must be a non negative integer";
+ }
+ } catch (NumberFormatException ex) {
+ return "cannot parse value [" + value + "] as an integer";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator INTEGER_GTE_2 = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ if (Integer.parseInt(value) < 2) {
+ return "the value of the setting " + setting + " must be >= 2";
+ }
+ } catch (NumberFormatException ex) {
+ return "cannot parse value [" + value + "] as an integer";
+ }
+ return null;
+ }
+ };
+
+ public static final Validator BYTES_SIZE = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ parseBytesSizeValue(value);
+ } catch (ElasticsearchParseException ex) {
+ return ex.getMessage();
+ }
+ return null;
+ }
+ };
+
+ public static final Validator MEMORY_SIZE = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+ try {
+ parseBytesSizeValueOrHeapRatio(value);
+ } catch (ElasticsearchParseException ex) {
+ return ex.getMessage();
+ }
+ return null;
+ }
+ };
+
+ public static final Validator BOOLEAN = new Validator() {
+ @Override
+ public String validate(String setting, String value) {
+
+ if (value != null && (Booleans.isExplicitFalse(value) || Booleans.isExplicitTrue(value))) {
+ return null;
+ }
+ return "cannot parse value [" + value + "] as a boolean";
+ }
+ };
+}
diff --git a/src/main/java/org/elasticsearch/common/Base64.java b/src/main/java/org/elasticsearch/common/Base64.java
new file mode 100644
index 0000000..7860279
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Base64.java
@@ -0,0 +1,2095 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common;
+
+import java.nio.charset.Charset;
+import java.util.Locale;
+
+/**
+ * <p>Encodes and decodes to and from Base64 notation.</p>
+ * <p>Homepage: <a href="http://iharder.net/base64">http://iharder.net/base64</a>.</p>
+ * <p/>
+ * <p>Example:</p>
+ * <p/>
+ * <code>String encoded = Base64.encode( myByteArray );</code>
+ * <br />
+ * <code>byte[] myByteArray = Base64.decode( encoded );</code>
+ * <p/>
+ * <p>The <tt>options</tt> parameter, which appears in a few places, is used to pass
+ * several pieces of information to the encoder. In the "higher level" methods such as
+ * encodeBytes( bytes, options ) the options parameter can be used to indicate such
+ * things as first gzipping the bytes before encoding them, not inserting linefeeds,
+ * and encoding using the URL-safe and Ordered dialects.</p>
+ * <p/>
+ * <p>Note, according to <a href="http://www.faqs.org/rfcs/rfc3548.html">RFC3548</a>,
+ * Section 2.1, implementations should not add line feeds unless explicitly told
+ * to do so. I've got Base64 set to this behavior now, although earlier versions
+ * broke lines by default.</p>
+ * <p/>
+ * <p>The constants defined in Base64 can be OR-ed together to combine options, so you
+ * might make a call like this:</p>
+ * <p/>
+ * <code>String encoded = Base64.encodeBytes( mybytes, Base64.GZIP | Base64.DO_BREAK_LINES );</code>
+ * <p>to compress the data before encoding it and then making the output have newline characters.</p>
+ * <p>Also...</p>
+ * <code>String encoded = Base64.encodeBytes( crazyString.getBytes() );</code>
+ * <p/>
+ * <p/>
+ * <p/>
+ * <p>
+ * Change Log:
+ * </p>
+ * <ul>
+ * <li>v2.3.7 - Fixed subtle bug when base 64 input stream contained the
+ * value 01111111, which is an invalid base 64 character but should not
+ * throw an ArrayIndexOutOfBoundsException either. Led to discovery of
+ * mishandling (or potential for better handling) of other bad input
+ * characters. You should now get an IOException if you try decoding
+ * something that has bad characters in it.</li>
+ * <li>v2.3.6 - Fixed bug when breaking lines and the final byte of the encoded
+ * string ended in the last column; the buffer was not properly shrunk and
+ * contained an extra (null) byte that made it into the string.</li>
+ * <li>v2.3.5 - Fixed bug in {@link #encodeFromFile} where estimated buffer size
+ * was wrong for files of size 31, 34, and 37 bytes.</li>
+ * <li>v2.3.4 - Fixed bug when working with gzipped streams whereby flushing
+ * the Base64.OutputStream closed the Base64 encoding (by padding with equals
+ * signs) too soon. Also added an option to suppress the automatic decoding
+ * of gzipped streams. Also added experimental support for specifying a
+ * class loader when using the
+ * {@link #decodeToObject(java.lang.String, int, java.lang.ClassLoader)}
+ * method.</li>
+ * <li>v2.3.3 - Changed default char encoding to US-ASCII which reduces the internal Java
+ * footprint with its CharEncoders and so forth. Fixed some javadocs that were
+ * inconsistent. Removed imports and specified things like java.io.IOException
+ * explicitly inline.</li>
+ * <li>v2.3.2 - Reduced memory footprint! Finally refined the "guessing" of how big the
+ * final encoded data will be so that the code doesn't have to create two output
+ * arrays: an oversized initial one and then a final, exact-sized one. Big win
+ * when using the {@link #encodeBytesToBytes(byte[])} family of methods (and not
+ * using the gzip options which uses a different mechanism with streams and stuff).</li>
+ * <li>v2.3.1 - Added {@link #encodeBytesToBytes(byte[], int, int, int)} and some
+ * similar helper methods to be more efficient with memory by not returning a
+ * String but just a byte array.</li>
+ * <li>v2.3 - <strong>This is not a drop-in replacement!</strong> This is two years of comments
+ * and bug fixes queued up and finally executed. Thanks to everyone who sent
+ * me stuff, and I'm sorry I wasn't able to distribute your fixes to everyone else.
+ * Much bad coding was cleaned up including throwing exceptions where necessary
+ * instead of returning null values or something similar. Here are some changes
+ * that may affect you:
+ * <ul>
+ * <li><em>Does not break lines, by default.</em> This is to keep in compliance with
+ * <a href="http://www.faqs.org/rfcs/rfc3548.html">RFC3548</a>.</li>
+ * <li><em>Throws exceptions instead of returning null values.</em> Because some operations
+ * (especially those that may permit the GZIP option) use IO streams, there
+ * is a possiblity of an java.io.IOException being thrown. After some discussion and
+ * thought, I've changed the behavior of the methods to throw java.io.IOExceptions
+ * rather than return null if ever there's an error. I think this is more
+ * appropriate, though it will require some changes to your code. Sorry,
+ * it should have been done this way to begin with.</li>
+ * <li><em>Removed all references to System.out, System.err, and the like.</em>
+ * Shame on me. All I can say is sorry they were ever there.</li>
+ * <li><em>Throws NullPointerExceptions and IllegalArgumentExceptions</em> as needed
+ * such as when passed arrays are null or offsets are invalid.</li>
+ * <li>Cleaned up as much javadoc as I could to avoid any javadoc warnings.
+ * This was especially annoying before for people who were thorough in their
+ * own projects and then had gobs of javadoc warnings on this file.</li>
+ * </ul>
+ * <li>v2.2.1 - Fixed bug using URL_SAFE and ORDERED encodings. Fixed bug
+ * when using very small files (~&lt; 40 bytes).</li>
+ * <li>v2.2 - Added some helper methods for encoding/decoding directly from
+ * one file to the next. Also added a main() method to support command line
+ * encoding/decoding from one file to the next. Also added these Base64 dialects:
+ * <ol>
+ * <li>The default is RFC3548 format.</li>
+ * <li>Calling Base64.setFormat(Base64.BASE64_FORMAT.URLSAFE_FORMAT) generates
+ * URL and file name friendly format as described in Section 4 of RFC3548.
+ * http://www.faqs.org/rfcs/rfc3548.html</li>
+ * <li>Calling Base64.setFormat(Base64.BASE64_FORMAT.ORDERED_FORMAT) generates
+ * URL and file name friendly format that preserves lexical ordering as described
+ * in http://www.faqs.org/qa/rfcc-1940.html</li>
+ * </ol>
+ * Special thanks to Jim Kellerman at <a href="http://www.powerset.com/">http://www.powerset.com/</a>
+ * for contributing the new Base64 dialects.
+ * </li>
+ * <p/>
+ * <li>v2.1 - Cleaned up javadoc comments and unused variables and methods. Added
+ * some convenience methods for reading and writing to and from files.</li>
+ * <li>v2.0.2 - Now specifies UTF-8 encoding in places where the code fails on systems
+ * with other encodings (like EBCDIC).</li>
+ * <li>v2.0.1 - Fixed an error when decoding a single byte, that is, when the
+ * encoded data was a single byte.</li>
+ * <li>v2.0 - I got rid of methods that used booleans to set options.
+ * Now everything is more consolidated and cleaner. The code now detects
+ * when data that's being decoded is gzip-compressed and will decompress it
+ * automatically. Generally things are cleaner. You'll probably have to
+ * change some method calls that you were making to support the new
+ * options format (<tt>int</tt>s that you "OR" together).</li>
+ * <li>v1.5.1 - Fixed bug when decompressing and decoding to a
+ * byte[] using <tt>decode( String s, boolean gzipCompressed )</tt>.
+ * Added the ability to "suspend" encoding in the Output Stream so
+ * you can turn on and off the encoding if you need to embed base64
+ * data in an otherwise "normal" stream (like an XML file).</li>
+ * <li>v1.5 - Output stream pases on flush() command but doesn't do anything itself.
+ * This helps when using GZIP streams.
+ * Added the ability to GZip-compress objects before encoding them.</li>
+ * <li>v1.4 - Added helper methods to read/write files.</li>
+ * <li>v1.3.6 - Fixed OutputStream.flush() so that 'position' is reset.</li>
+ * <li>v1.3.5 - Added flag to turn on and off line breaks. Fixed bug in input stream
+ * where last buffer being read, if not completely full, was not returned.</li>
+ * <li>v1.3.4 - Fixed when "improperly padded stream" error was thrown at the wrong time.</li>
+ * <li>v1.3.3 - Fixed I/O streams which were totally messed up.</li>
+ * </ul>
+ * <p/>
+ * <p>
+ * I am placing this code in the Public Domain. Do with it as you will.
+ * This software comes with no guarantees or warranties but with
+ * plenty of well-wishing instead!
+ * Please visit <a href="http://iharder.net/base64">http://iharder.net/base64</a>
+ * periodically to check for updates or to contribute improvements.
+ * </p>
+ *
+ * @author Robert Harder
+ * @author rob@iharder.net
+ * @version 2.3.7
+ */
+public class Base64 {
+
+/* ******** P U B L I C F I E L D S ******** */
+
+
+ /**
+ * No options specified. Value is zero.
+ */
+ public final static int NO_OPTIONS = 0;
+
+ /**
+ * Specify encoding in first bit. Value is one.
+ */
+ public final static int ENCODE = 1;
+
+
+ /**
+ * Specify decoding in first bit. Value is zero.
+ */
+ public final static int DECODE = 0;
+
+
+ /**
+ * Specify that data should be gzip-compressed in second bit. Value is two.
+ */
+ public final static int GZIP = 2;
+
+ /**
+ * Specify that gzipped data should <em>not</em> be automatically gunzipped.
+ */
+ public final static int DONT_GUNZIP = 4;
+
+
+ /**
+ * Do break lines when encoding. Value is 8.
+ */
+ public final static int DO_BREAK_LINES = 8;
+
+ /**
+ * Encode using Base64-like encoding that is URL- and Filename-safe as described
+ * in Section 4 of RFC3548:
+ * <a href="http://www.faqs.org/rfcs/rfc3548.html">http://www.faqs.org/rfcs/rfc3548.html</a>.
+ * It is important to note that data encoded this way is <em>not</em> officially valid Base64,
+ * or at the very least should not be called Base64 without also specifying that is
+ * was encoded using the URL- and Filename-safe dialect.
+ */
+ public final static int URL_SAFE = 16;
+
+
+ /**
+ * Encode using the special "ordered" dialect of Base64 described here:
+ * <a href="http://www.faqs.org/qa/rfcc-1940.html">http://www.faqs.org/qa/rfcc-1940.html</a>.
+ */
+ public final static int ORDERED = 32;
+
+
+/* ******** P R I V A T E F I E L D S ******** */
+
+
+ /**
+ * Maximum line length (76) of Base64 output.
+ */
+ private final static int MAX_LINE_LENGTH = 76;
+
+
+ /**
+ * The equals sign (=) as a byte.
+ */
+ private final static byte EQUALS_SIGN = (byte) '=';
+
+
+ /**
+ * The new line character (\n) as a byte.
+ */
+ private final static byte NEW_LINE = (byte) '\n';
+
+
+ /**
+ * Preferred encoding.
+ */
+ public final static Charset PREFERRED_ENCODING = Charset.forName("US-ASCII");
+
+
+ private final static byte WHITE_SPACE_ENC = -5; // Indicates white space in encoding
+ private final static byte EQUALS_SIGN_ENC = -1; // Indicates equals sign in encoding
+
+
+/* ******** S T A N D A R D B A S E 6 4 A L P H A B E T ******** */
+
+ /**
+ * The 64 valid Base64 values.
+ */
+ /* Host platform me be something funny like EBCDIC, so we hardcode these values. */
+ private final static byte[] _STANDARD_ALPHABET = {
+ (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
+ (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N',
+ (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U',
+ (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z',
+ (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g',
+ (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n',
+ (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u',
+ (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z',
+ (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4', (byte) '5',
+ (byte) '6', (byte) '7', (byte) '8', (byte) '9', (byte) '+', (byte) '/'
+ };
+
+
+ /**
+ * Translates a Base64 value to either its 6-bit reconstruction value
+ * or a negative number indicating some other meaning.
+ */
+ private final static byte[] _STANDARD_DECODABET = {
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8
+ -5, -5, // Whitespace: Tab and Linefeed
+ -9, -9, // Decimal 11 - 12
+ -5, // Whitespace: Carriage Return
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26
+ -9, -9, -9, -9, -9, // Decimal 27 - 31
+ -5, // Whitespace: Space
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42
+ 62, // Plus sign at decimal 43
+ -9, -9, -9, // Decimal 44 - 46
+ 63, // Slash at decimal 47
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // Numbers zero through nine
+ -9, -9, -9, // Decimal 58 - 60
+ -1, // Equals sign at decimal 61
+ -9, -9, -9, // Decimal 62 - 64
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, // Letters 'A' through 'N'
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'O' through 'Z'
+ -9, -9, -9, -9, -9, -9, // Decimal 91 - 96
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, // Letters 'a' through 'm'
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // Letters 'n' through 'z'
+ -9, -9, -9, -9, -9 // Decimal 123 - 127
+ , -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 128 - 139
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 140 - 152
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 153 - 165
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 166 - 178
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 179 - 191
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 192 - 204
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 205 - 217
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 218 - 230
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 231 - 243
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9 // Decimal 244 - 255
+ };
+
+
+/* ******** U R L S A F E B A S E 6 4 A L P H A B E T ******** */
+
+ /**
+ * Used in the URL- and Filename-safe dialect described in Section 4 of RFC3548:
+ * <a href="http://www.faqs.org/rfcs/rfc3548.html">http://www.faqs.org/rfcs/rfc3548.html</a>.
+ * Notice that the last two bytes become "hyphen" and "underscore" instead of "plus" and "slash."
+ */
+ private final static byte[] _URL_SAFE_ALPHABET = {
+ (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
+ (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N',
+ (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U',
+ (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z',
+ (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g',
+ (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n',
+ (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u',
+ (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z',
+ (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4', (byte) '5',
+ (byte) '6', (byte) '7', (byte) '8', (byte) '9', (byte) '-', (byte) '_'
+ };
+
+ /**
+ * Used in decoding URL- and Filename-safe dialects of Base64.
+ */
+ private final static byte[] _URL_SAFE_DECODABET = {
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8
+ -5, -5, // Whitespace: Tab and Linefeed
+ -9, -9, // Decimal 11 - 12
+ -5, // Whitespace: Carriage Return
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26
+ -9, -9, -9, -9, -9, // Decimal 27 - 31
+ -5, // Whitespace: Space
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42
+ -9, // Plus sign at decimal 43
+ -9, // Decimal 44
+ 62, // Minus sign at decimal 45
+ -9, // Decimal 46
+ -9, // Slash at decimal 47
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // Numbers zero through nine
+ -9, -9, -9, // Decimal 58 - 60
+ -1, // Equals sign at decimal 61
+ -9, -9, -9, // Decimal 62 - 64
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, // Letters 'A' through 'N'
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'O' through 'Z'
+ -9, -9, -9, -9, // Decimal 91 - 94
+ 63, // Underscore at decimal 95
+ -9, // Decimal 96
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, // Letters 'a' through 'm'
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // Letters 'n' through 'z'
+ -9, -9, -9, -9, -9 // Decimal 123 - 127
+ , -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 128 - 139
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 140 - 152
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 153 - 165
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 166 - 178
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 179 - 191
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 192 - 204
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 205 - 217
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 218 - 230
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 231 - 243
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9 // Decimal 244 - 255
+ };
+
+
+/* ******** O R D E R E D B A S E 6 4 A L P H A B E T ******** */
+
+ /**
+ * I don't get the point of this technique, but someone requested it,
+ * and it is described here:
+ * <a href="http://www.faqs.org/qa/rfcc-1940.html">http://www.faqs.org/qa/rfcc-1940.html</a>.
+ */
+ private final static byte[] _ORDERED_ALPHABET = {
+ (byte) '-',
+ (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4',
+ (byte) '5', (byte) '6', (byte) '7', (byte) '8', (byte) '9',
+ (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
+ (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N',
+ (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U',
+ (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z',
+ (byte) '_',
+ (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g',
+ (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n',
+ (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u',
+ (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z'
+ };
+
+ /**
+ * Used in decoding the "ordered" dialect of Base64.
+ */
+ private final static byte[] _ORDERED_DECODABET = {
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8
+ -5, -5, // Whitespace: Tab and Linefeed
+ -9, -9, // Decimal 11 - 12
+ -5, // Whitespace: Carriage Return
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26
+ -9, -9, -9, -9, -9, // Decimal 27 - 31
+ -5, // Whitespace: Space
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42
+ -9, // Plus sign at decimal 43
+ -9, // Decimal 44
+ 0, // Minus sign at decimal 45
+ -9, // Decimal 46
+ -9, // Slash at decimal 47
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // Numbers zero through nine
+ -9, -9, -9, // Decimal 58 - 60
+ -1, // Equals sign at decimal 61
+ -9, -9, -9, // Decimal 62 - 64
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, // Letters 'A' through 'M'
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, // Letters 'N' through 'Z'
+ -9, -9, -9, -9, // Decimal 91 - 94
+ 37, // Underscore at decimal 95
+ -9, // Decimal 96
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, // Letters 'a' through 'm'
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, // Letters 'n' through 'z'
+ -9, -9, -9, -9, -9 // Decimal 123 - 127
+ , -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 128 - 139
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 140 - 152
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 153 - 165
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 166 - 178
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 179 - 191
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 192 - 204
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 205 - 217
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 218 - 230
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 231 - 243
+ -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9 // Decimal 244 - 255
+ };
+
+
+/* ******** D E T E R M I N E W H I C H A L H A B E T ******** */
+
+
+ /**
+ * Returns one of the _SOMETHING_ALPHABET byte arrays depending on
+ * the options specified.
+ * It's possible, though silly, to specify ORDERED <b>and</b> URLSAFE
+ * in which case one of them will be picked, though there is
+ * no guarantee as to which one will be picked.
+ */
+ private final static byte[] getAlphabet(int options) {
+ if ((options & URL_SAFE) == URL_SAFE) {
+ return _URL_SAFE_ALPHABET;
+ } else if ((options & ORDERED) == ORDERED) {
+ return _ORDERED_ALPHABET;
+ } else {
+ return _STANDARD_ALPHABET;
+ }
+ } // end getAlphabet
+
+
+ /**
+ * Returns one of the _SOMETHING_DECODABET byte arrays depending on
+ * the options specified.
+ * It's possible, though silly, to specify ORDERED and URL_SAFE
+ * in which case one of them will be picked, though there is
+ * no guarantee as to which one will be picked.
+ */
+ private final static byte[] getDecodabet(int options) {
+ if ((options & URL_SAFE) == URL_SAFE) {
+ return _URL_SAFE_DECODABET;
+ } else if ((options & ORDERED) == ORDERED) {
+ return _ORDERED_DECODABET;
+ } else {
+ return _STANDARD_DECODABET;
+ }
+ } // end getAlphabet
+
+
+ /**
+ * Defeats instantiation.
+ */
+ private Base64() {
+ }
+
+
+/* ******** E N C O D I N G M E T H O D S ******** */
+
+
+ /**
+ * Encodes up to the first three bytes of array <var>threeBytes</var>
+ * and returns a four-byte array in Base64 notation.
+ * The actual number of significant bytes in your array is
+ * given by <var>numSigBytes</var>.
+ * The array <var>threeBytes</var> needs only be as big as
+ * <var>numSigBytes</var>.
+ * Code can reuse a byte array by passing a four-byte array as <var>b4</var>.
+ *
+ * @param b4 A reusable byte array to reduce array instantiation
+ * @param threeBytes the array to convert
+ * @param numSigBytes the number of significant bytes in your array
+ * @return four byte array in Base64 notation.
+ * @since 1.5.1
+ */
+ private static byte[] encode3to4(byte[] b4, byte[] threeBytes, int numSigBytes, int options) {
+ encode3to4(threeBytes, 0, numSigBytes, b4, 0, options);
+ return b4;
+ } // end encode3to4
+
+
+ /**
+ * <p>Encodes up to three bytes of the array <var>source</var>
+ * and writes the resulting four Base64 bytes to <var>destination</var>.
+ * The source and destination arrays can be manipulated
+ * anywhere along their length by specifying
+ * <var>srcOffset</var> and <var>destOffset</var>.
+ * This method does not check to make sure your arrays
+ * are large enough to accomodate <var>srcOffset</var> + 3 for
+ * the <var>source</var> array or <var>destOffset</var> + 4 for
+ * the <var>destination</var> array.
+ * The actual number of significant bytes in your array is
+ * given by <var>numSigBytes</var>.</p>
+ * <p>This is the lowest level of the encoding methods with
+ * all possible parameters.</p>
+ *
+ * @param source the array to convert
+ * @param srcOffset the index where conversion begins
+ * @param numSigBytes the number of significant bytes in your array
+ * @param destination the array to hold the conversion
+ * @param destOffset the index where output will be put
+ * @return the <var>destination</var> array
+ * @since 1.3
+ */
+ private static byte[] encode3to4(
+ byte[] source, int srcOffset, int numSigBytes,
+ byte[] destination, int destOffset, int options) {
+
+ byte[] ALPHABET = getAlphabet(options);
+
+ // 1 2 3
+ // 01234567890123456789012345678901 Bit position
+ // --------000000001111111122222222 Array position from threeBytes
+ // --------| || || || | Six bit groups to index ALPHABET
+ // >>18 >>12 >> 6 >> 0 Right shift necessary
+ // 0x3f 0x3f 0x3f Additional AND
+
+ // Create buffer with zero-padding if there are only one or two
+ // significant bytes passed in the array.
+ // We have to shift left 24 in order to flush out the 1's that appear
+ // when Java treats a value as negative that is cast from a byte to an int.
+ int inBuff = (numSigBytes > 0 ? ((source[srcOffset] << 24) >>> 8) : 0)
+ | (numSigBytes > 1 ? ((source[srcOffset + 1] << 24) >>> 16) : 0)
+ | (numSigBytes > 2 ? ((source[srcOffset + 2] << 24) >>> 24) : 0);
+
+ switch (numSigBytes) {
+ case 3:
+ destination[destOffset] = ALPHABET[(inBuff >>> 18)];
+ destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f];
+ destination[destOffset + 2] = ALPHABET[(inBuff >>> 6) & 0x3f];
+ destination[destOffset + 3] = ALPHABET[(inBuff) & 0x3f];
+ return destination;
+
+ case 2:
+ destination[destOffset] = ALPHABET[(inBuff >>> 18)];
+ destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f];
+ destination[destOffset + 2] = ALPHABET[(inBuff >>> 6) & 0x3f];
+ destination[destOffset + 3] = EQUALS_SIGN;
+ return destination;
+
+ case 1:
+ destination[destOffset] = ALPHABET[(inBuff >>> 18)];
+ destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f];
+ destination[destOffset + 2] = EQUALS_SIGN;
+ destination[destOffset + 3] = EQUALS_SIGN;
+ return destination;
+
+ default:
+ return destination;
+ } // end switch
+ } // end encode3to4
+
+
+ /**
+ * Performs Base64 encoding on the <code>raw</code> ByteBuffer,
+ * writing it to the <code>encoded</code> ByteBuffer.
+ * This is an experimental feature. Currently it does not
+ * pass along any options (such as {@link #DO_BREAK_LINES}
+ * or {@link #GZIP}.
+ *
+ * @param raw input buffer
+ * @param encoded output buffer
+ * @since 2.3
+ */
+ public static void encode(java.nio.ByteBuffer raw, java.nio.ByteBuffer encoded) {
+ byte[] raw3 = new byte[3];
+ byte[] enc4 = new byte[4];
+
+ while (raw.hasRemaining()) {
+ int rem = Math.min(3, raw.remaining());
+ raw.get(raw3, 0, rem);
+ Base64.encode3to4(enc4, raw3, rem, Base64.NO_OPTIONS);
+ encoded.put(enc4);
+ } // end input remaining
+ }
+
+
+ /**
+ * Performs Base64 encoding on the <code>raw</code> ByteBuffer,
+ * writing it to the <code>encoded</code> CharBuffer.
+ * This is an experimental feature. Currently it does not
+ * pass along any options (such as {@link #DO_BREAK_LINES}
+ * or {@link #GZIP}.
+ *
+ * @param raw input buffer
+ * @param encoded output buffer
+ * @since 2.3
+ */
+ public static void encode(java.nio.ByteBuffer raw, java.nio.CharBuffer encoded) {
+ byte[] raw3 = new byte[3];
+ byte[] enc4 = new byte[4];
+
+ while (raw.hasRemaining()) {
+ int rem = Math.min(3, raw.remaining());
+ raw.get(raw3, 0, rem);
+ Base64.encode3to4(enc4, raw3, rem, Base64.NO_OPTIONS);
+ for (int i = 0; i < 4; i++) {
+ encoded.put((char) (enc4[i] & 0xFF));
+ }
+ } // end input remaining
+ }
+
+
+ /**
+ * Serializes an object and returns the Base64-encoded
+ * version of that serialized object.
+ * <p/>
+ * <p>As of v 2.3, if the object
+ * cannot be serialized or there is another error,
+ * the method will throw an java.io.IOException. <b>This is new to v2.3!</b>
+ * In earlier versions, it just returned a null value, but
+ * in retrospect that's a pretty poor way to handle it.</p>
+ * <p/>
+ * The object is not GZip-compressed before being encoded.
+ *
+ * @param serializableObject The object to encode
+ * @return The Base64-encoded object
+ * @throws java.io.IOException if there is an error
+ * @throws NullPointerException if serializedObject is null
+ * @since 1.4
+ */
+ public static String encodeObject(java.io.Serializable serializableObject)
+ throws java.io.IOException {
+ return encodeObject(serializableObject, NO_OPTIONS);
+ } // end encodeObject
+
+
+ /**
+ * Serializes an object and returns the Base64-encoded
+ * version of that serialized object.
+ * <p/>
+ * <p>As of v 2.3, if the object
+ * cannot be serialized or there is another error,
+ * the method will throw an java.io.IOException. <b>This is new to v2.3!</b>
+ * In earlier versions, it just returned a null value, but
+ * in retrospect that's a pretty poor way to handle it.</p>
+ * <p/>
+ * The object is not GZip-compressed before being encoded.
+ * <p/>
+ * Example options:<pre>
+ * GZIP: gzip-compresses object before encoding it.
+ * DO_BREAK_LINES: break lines at 76 characters
+ * </pre>
+ * <p/>
+ * Example: <code>encodeObject( myObj, Base64.GZIP )</code> or
+ * <p/>
+ * Example: <code>encodeObject( myObj, Base64.GZIP | Base64.DO_BREAK_LINES )</code>
+ *
+ * @param serializableObject The object to encode
+ * @param options Specified options
+ * @return The Base64-encoded object
+ * @throws java.io.IOException if there is an error
+ * @see Base64#GZIP
+ * @see Base64#DO_BREAK_LINES
+ * @since 2.0
+ */
+ public static String encodeObject(java.io.Serializable serializableObject, int options)
+ throws java.io.IOException {
+
+ if (serializableObject == null) {
+ throw new NullPointerException("Cannot serialize a null object.");
+ } // end if: null
+
+ // Streams
+ java.io.ByteArrayOutputStream baos = null;
+ java.io.OutputStream b64os = null;
+ java.util.zip.GZIPOutputStream gzos = null;
+ java.io.ObjectOutputStream oos = null;
+
+
+ try {
+ // ObjectOutputStream -> (GZIP) -> Base64 -> ByteArrayOutputStream
+ baos = new java.io.ByteArrayOutputStream();
+ b64os = new Base64.OutputStream(baos, ENCODE | options);
+ if ((options & GZIP) != 0) {
+ // Gzip
+ gzos = new java.util.zip.GZIPOutputStream(b64os);
+ oos = new java.io.ObjectOutputStream(gzos);
+ } else {
+ // Not gzipped
+ oos = new java.io.ObjectOutputStream(b64os);
+ }
+ oos.writeObject(serializableObject);
+ } // end try
+ catch (java.io.IOException e) {
+ // Catch it and then throw it immediately so that
+ // the finally{} block is called for cleanup.
+ throw e;
+ } // end catch
+ finally {
+ try {
+ oos.close();
+ } catch (Exception e) {
+ }
+ try {
+ gzos.close();
+ } catch (Exception e) {
+ }
+ try {
+ b64os.close();
+ } catch (Exception e) {
+ }
+ try {
+ baos.close();
+ } catch (Exception e) {
+ }
+ } // end finally
+
+ // Return value according to relevant encoding.
+ return new String(baos.toByteArray(), PREFERRED_ENCODING);
+
+ } // end encode
+
+
+ /**
+ * Encodes a byte array into Base64 notation.
+ * Does not GZip-compress data.
+ *
+ * @param source The data to convert
+ * @return The data in Base64-encoded form
+ * @throws NullPointerException if source array is null
+ * @since 1.4
+ */
+ public static String encodeBytes(byte[] source) {
+ // Since we're not going to have the GZIP encoding turned on,
+ // we're not going to have an java.io.IOException thrown, so
+ // we should not force the user to have to catch it.
+ String encoded = null;
+ try {
+ encoded = encodeBytes(source, 0, source.length, NO_OPTIONS);
+ } catch (java.io.IOException ex) {
+ assert false : ex.getMessage();
+ } // end catch
+ assert encoded != null;
+ return encoded;
+ } // end encodeBytes
+
+
+ /**
+ * Encodes a byte array into Base64 notation.
+ * <p>
+ * Example options:<pre>
+ * GZIP: gzip-compresses object before encoding it.
+ * DO_BREAK_LINES: break lines at 76 characters
+ * <i>Note: Technically, this makes your encoding non-compliant.</i>
+ * </pre>
+ * <p>
+ * Example: <code>encodeBytes( myData, Base64.GZIP )</code> or
+ * <p>
+ * Example: <code>encodeBytes( myData, Base64.GZIP | Base64.DO_BREAK_LINES )</code>
+ * <p/>
+ * <p/>
+ * <p>As of v 2.3, if there is an error with the GZIP stream,
+ * the method will throw an java.io.IOException. <b>This is new to v2.3!</b>
+ * In earlier versions, it just returned a null value, but
+ * in retrospect that's a pretty poor way to handle it.</p>
+ *
+ * @param source The data to convert
+ * @param options Specified options
+ * @return The Base64-encoded data as a String
+ * @throws java.io.IOException if there is an error
+ * @throws NullPointerException if source array is null
+ * @see Base64#GZIP
+ * @see Base64#DO_BREAK_LINES
+ * @since 2.0
+ */
+ public static String encodeBytes(byte[] source, int options) throws java.io.IOException {
+ return encodeBytes(source, 0, source.length, options);
+ } // end encodeBytes
+
+
+ /**
+ * Encodes a byte array into Base64 notation.
+ * Does not GZip-compress data.
+ * <p/>
+ * <p>As of v 2.3, if there is an error,
+ * the method will throw an java.io.IOException. <b>This is new to v2.3!</b>
+ * In earlier versions, it just returned a null value, but
+ * in retrospect that's a pretty poor way to handle it.</p>
+ *
+ * @param source The data to convert
+ * @param off Offset in array where conversion should begin
+ * @param len Length of data to convert
+ * @return The Base64-encoded data as a String
+ * @throws NullPointerException if source array is null
+ * @throws IllegalArgumentException if source array, offset, or length are invalid
+ * @since 1.4
+ */
+ public static String encodeBytes(byte[] source, int off, int len) {
+ // Since we're not going to have the GZIP encoding turned on,
+ // we're not going to have an java.io.IOException thrown, so
+ // we should not force the user to have to catch it.
+ String encoded = null;
+ try {
+ encoded = encodeBytes(source, off, len, NO_OPTIONS);
+ } catch (java.io.IOException ex) {
+ assert false : ex.getMessage();
+ } // end catch
+ assert encoded != null;
+ return encoded;
+ } // end encodeBytes
+
+
+ /**
+ * Encodes a byte array into Base64 notation.
+ * <p>
+ * Example options:<pre>
+ * GZIP: gzip-compresses object before encoding it.
+ * DO_BREAK_LINES: break lines at 76 characters
+ * <i>Note: Technically, this makes your encoding non-compliant.</i>
+ * </pre>
+ * <p>
+ * Example: <code>encodeBytes( myData, Base64.GZIP )</code> or
+ * <p>
+ * Example: <code>encodeBytes( myData, Base64.GZIP | Base64.DO_BREAK_LINES )</code>
+ * <p/>
+ * <p/>
+ * <p>As of v 2.3, if there is an error with the GZIP stream,
+ * the method will throw an java.io.IOException. <b>This is new to v2.3!</b>
+ * In earlier versions, it just returned a null value, but
+ * in retrospect that's a pretty poor way to handle it.</p>
+ *
+ * @param source The data to convert
+ * @param off Offset in array where conversion should begin
+ * @param len Length of data to convert
+ * @param options Specified options
+ * @return The Base64-encoded data as a String
+ * @throws java.io.IOException if there is an error
+ * @throws NullPointerException if source array is null
+ * @throws IllegalArgumentException if source array, offset, or length are invalid
+ * @see Base64#GZIP
+ * @see Base64#DO_BREAK_LINES
+ * @since 2.0
+ */
+ public static String encodeBytes(byte[] source, int off, int len, int options) throws java.io.IOException {
+ byte[] encoded = encodeBytesToBytes(source, off, len, options);
+
+ // Return value according to relevant encoding.
+ return new String(encoded, PREFERRED_ENCODING);
+
+ } // end encodeBytes
+
+
+ /**
+ * Similar to {@link #encodeBytes(byte[])} but returns
+ * a byte array instead of instantiating a String. This is more efficient
+ * if you're working with I/O streams and have large data sets to encode.
+ *
+ * @param source The data to convert
+ * @return The Base64-encoded data as a byte[] (of ASCII characters)
+ * @throws NullPointerException if source array is null
+ * @since 2.3.1
+ */
+ public static byte[] encodeBytesToBytes(byte[] source) {
+ byte[] encoded = null;
+ try {
+ encoded = encodeBytesToBytes(source, 0, source.length, Base64.NO_OPTIONS);
+ } catch (java.io.IOException ex) {
+ assert false : "IOExceptions only come from GZipping, which is turned off: " + ex.getMessage();
+ }
+ return encoded;
+ }
+
+
+ /**
+ * Similar to {@link #encodeBytes(byte[], int, int, int)} but returns
+ * a byte array instead of instantiating a String. This is more efficient
+ * if you're working with I/O streams and have large data sets to encode.
+ *
+ * @param source The data to convert
+ * @param off Offset in array where conversion should begin
+ * @param len Length of data to convert
+ * @param options Specified options
+ * @return The Base64-encoded data as a String
+ * @throws java.io.IOException if there is an error
+ * @throws NullPointerException if source array is null
+ * @throws IllegalArgumentException if source array, offset, or length are invalid
+ * @see Base64#GZIP
+ * @see Base64#DO_BREAK_LINES
+ * @since 2.3.1
+ */
+ public static byte[] encodeBytesToBytes(byte[] source, int off, int len, int options) throws java.io.IOException {
+
+ if (source == null) {
+ throw new NullPointerException("Cannot serialize a null array.");
+ } // end if: null
+
+ if (off < 0) {
+ throw new IllegalArgumentException("Cannot have negative offset: " + off);
+ } // end if: off < 0
+
+ if (len < 0) {
+ throw new IllegalArgumentException("Cannot have length offset: " + len);
+ } // end if: len < 0
+
+ if (off + len > source.length) {
+ throw new IllegalArgumentException(
+ String.format(Locale.ROOT, "Cannot have offset of %d and length of %d with array of length %d", off, len, source.length));
+ } // end if: off < 0
+
+
+ // Compress?
+ if ((options & GZIP) != 0) {
+ java.io.ByteArrayOutputStream baos = null;
+ java.util.zip.GZIPOutputStream gzos = null;
+ Base64.OutputStream b64os = null;
+
+ try {
+ // GZip -> Base64 -> ByteArray
+ baos = new java.io.ByteArrayOutputStream();
+ b64os = new Base64.OutputStream(baos, ENCODE | options);
+ gzos = new java.util.zip.GZIPOutputStream(b64os);
+
+ gzos.write(source, off, len);
+ gzos.close();
+ } // end try
+ catch (java.io.IOException e) {
+ // Catch it and then throw it immediately so that
+ // the finally{} block is called for cleanup.
+ throw e;
+ } // end catch
+ finally {
+ try {
+ gzos.close();
+ } catch (Exception e) {
+ }
+ try {
+ b64os.close();
+ } catch (Exception e) {
+ }
+ try {
+ baos.close();
+ } catch (Exception e) {
+ }
+ } // end finally
+
+ return baos.toByteArray();
+ } // end if: compress
+
+ // Else, don't compress. Better not to use streams at all then.
+ else {
+ boolean breakLines = (options & DO_BREAK_LINES) != 0;
+
+ //int len43 = len * 4 / 3;
+ //byte[] outBuff = new byte[ ( len43 ) // Main 4:3
+ // + ( (len % 3) > 0 ? 4 : 0 ) // Account for padding
+ // + (breakLines ? ( len43 / MAX_LINE_LENGTH ) : 0) ]; // New lines
+ // Try to determine more precisely how big the array needs to be.
+ // If we get it right, we don't have to do an array copy, and
+ // we save a bunch of memory.
+ int encLen = (len / 3) * 4 + (len % 3 > 0 ? 4 : 0); // Bytes needed for actual encoding
+ if (breakLines) {
+ encLen += encLen / MAX_LINE_LENGTH; // Plus extra newline characters
+ }
+ byte[] outBuff = new byte[encLen];
+
+
+ int d = 0;
+ int e = 0;
+ int len2 = len - 2;
+ int lineLength = 0;
+ for (; d < len2; d += 3, e += 4) {
+ encode3to4(source, d + off, 3, outBuff, e, options);
+
+ lineLength += 4;
+ if (breakLines && lineLength >= MAX_LINE_LENGTH) {
+ outBuff[e + 4] = NEW_LINE;
+ e++;
+ lineLength = 0;
+ } // end if: end of line
+ } // en dfor: each piece of array
+
+ if (d < len) {
+ encode3to4(source, d + off, len - d, outBuff, e, options);
+ e += 4;
+ } // end if: some padding needed
+
+
+ // Only resize array if we didn't guess it right.
+ if (e <= outBuff.length - 1) {
+ // If breaking lines and the last byte falls right at
+ // the line length (76 bytes per line), there will be
+ // one extra byte, and the array will need to be resized.
+ // Not too bad of an estimate on array size, I'd say.
+ byte[] finalOut = new byte[e];
+ System.arraycopy(outBuff, 0, finalOut, 0, e);
+ //System.err.println("Having to resize array from " + outBuff.length + " to " + e );
+ return finalOut;
+ } else {
+ //System.err.println("No need to resize array.");
+ return outBuff;
+ }
+
+ } // end else: don't compress
+
+ } // end encodeBytesToBytes
+
+
+/* ******** D E C O D I N G M E T H O D S ******** */
+
+
+ /**
+ * Decodes four bytes from array <var>source</var>
+ * and writes the resulting bytes (up to three of them)
+ * to <var>destination</var>.
+ * The source and destination arrays can be manipulated
+ * anywhere along their length by specifying
+ * <var>srcOffset</var> and <var>destOffset</var>.
+ * This method does not check to make sure your arrays
+ * are large enough to accomodate <var>srcOffset</var> + 4 for
+ * the <var>source</var> array or <var>destOffset</var> + 3 for
+ * the <var>destination</var> array.
+ * This method returns the actual number of bytes that
+ * were converted from the Base64 encoding.
+ * <p>This is the lowest level of the decoding methods with
+ * all possible parameters.</p>
+ *
+ * @param source the array to convert
+ * @param srcOffset the index where conversion begins
+ * @param destination the array to hold the conversion
+ * @param destOffset the index where output will be put
+ * @param options alphabet type is pulled from this (standard, url-safe, ordered)
+ * @return the number of decoded bytes converted
+ * @throws NullPointerException if source or destination arrays are null
+ * @throws IllegalArgumentException if srcOffset or destOffset are invalid
+ * or there is not enough room in the array.
+ * @since 1.3
+ */
+ private static int decode4to3(
+ byte[] source, int srcOffset,
+ byte[] destination, int destOffset, int options) {
+
+ // Lots of error checking and exception throwing
+ if (source == null) {
+ throw new NullPointerException("Source array was null.");
+ } // end if
+ if (destination == null) {
+ throw new NullPointerException("Destination array was null.");
+ } // end if
+ if (srcOffset < 0 || srcOffset + 3 >= source.length) {
+ throw new IllegalArgumentException(String.format(Locale.ROOT,
+ "Source array with length %d cannot have offset of %d and still process four bytes.", source.length, srcOffset));
+ } // end if
+ if (destOffset < 0 || destOffset + 2 >= destination.length) {
+ throw new IllegalArgumentException(String.format(Locale.ROOT,
+ "Destination array with length %d cannot have offset of %d and still store three bytes.", destination.length, destOffset));
+ } // end if
+
+
+ byte[] DECODABET = getDecodabet(options);
+
+ // Example: Dk==
+ if (source[srcOffset + 2] == EQUALS_SIGN) {
+ // Two ways to do the same thing. Don't know which way I like best.
+ //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 )
+ // | ( ( DECODABET[ source[ srcOffset + 1] ] << 24 ) >>> 12 );
+ int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18)
+ | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12);
+
+ destination[destOffset] = (byte) (outBuff >>> 16);
+ return 1;
+ }
+
+ // Example: DkL=
+ else if (source[srcOffset + 3] == EQUALS_SIGN) {
+ // Two ways to do the same thing. Don't know which way I like best.
+ //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 )
+ // | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 )
+ // | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 );
+ int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18)
+ | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12)
+ | ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6);
+
+ destination[destOffset] = (byte) (outBuff >>> 16);
+ destination[destOffset + 1] = (byte) (outBuff >>> 8);
+ return 2;
+ }
+
+ // Example: DkLE
+ else {
+ // Two ways to do the same thing. Don't know which way I like best.
+ //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 )
+ // | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 )
+ // | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 )
+ // | ( ( DECODABET[ source[ srcOffset + 3 ] ] << 24 ) >>> 24 );
+ int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18)
+ | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12)
+ | ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6)
+ | ((DECODABET[source[srcOffset + 3]] & 0xFF));
+
+
+ destination[destOffset] = (byte) (outBuff >> 16);
+ destination[destOffset + 1] = (byte) (outBuff >> 8);
+ destination[destOffset + 2] = (byte) (outBuff);
+
+ return 3;
+ }
+ } // end decodeToBytes
+
+
+ /**
+ * Low-level access to decoding ASCII characters in
+ * the form of a byte array. <strong>Ignores GUNZIP option, if
+ * it's set.</strong> This is not generally a recommended method,
+ * although it is used internally as part of the decoding process.
+ * Special case: if len = 0, an empty array is returned. Still,
+ * if you need more speed and reduced memory footprint (and aren't
+ * gzipping), consider this method.
+ *
+ * @param source The Base64 encoded data
+ * @return decoded data
+ * @since 2.3.1
+ */
+ public static byte[] decode(byte[] source)
+ throws java.io.IOException {
+ byte[] decoded = null;
+// try {
+ decoded = decode(source, 0, source.length, Base64.NO_OPTIONS);
+// } catch( java.io.IOException ex ) {
+// assert false : "IOExceptions only come from GZipping, which is turned off: " + ex.getMessage();
+// }
+ return decoded;
+ }
+
+
+ /**
+ * Low-level access to decoding ASCII characters in
+ * the form of a byte array. <strong>Ignores GUNZIP option, if
+ * it's set.</strong> This is not generally a recommended method,
+ * although it is used internally as part of the decoding process.
+ * Special case: if len = 0, an empty array is returned. Still,
+ * if you need more speed and reduced memory footprint (and aren't
+ * gzipping), consider this method.
+ *
+ * @param source The Base64 encoded data
+ * @param off The offset of where to begin decoding
+ * @param len The length of characters to decode
+ * @param options Can specify options such as alphabet type to use
+ * @return decoded data
+ * @throws java.io.IOException If bogus characters exist in source data
+ * @since 1.3
+ */
+ public static byte[] decode(byte[] source, int off, int len, int options)
+ throws java.io.IOException {
+
+ // Lots of error checking and exception throwing
+ if (source == null) {
+ throw new NullPointerException("Cannot decode null source array.");
+ } // end if
+ if (off < 0 || off + len > source.length) {
+ throw new IllegalArgumentException(String.format(Locale.ROOT,
+ "Source array with length %d cannot have offset of %d and process %d bytes.", source.length, off, len));
+ } // end if
+
+ if (len == 0) {
+ return new byte[0];
+ } else if (len < 4) {
+ throw new IllegalArgumentException(
+ "Base64-encoded string must have at least four characters, but length specified was " + len);
+ } // end if
+
+ byte[] DECODABET = getDecodabet(options);
+
+ int len34 = len * 3 / 4; // Estimate on array size
+ byte[] outBuff = new byte[len34]; // Upper limit on size of output
+ int outBuffPosn = 0; // Keep track of where we're writing
+
+ byte[] b4 = new byte[4]; // Four byte buffer from source, eliminating white space
+ int b4Posn = 0; // Keep track of four byte input buffer
+ int i = 0; // Source array counter
+ byte sbiDecode = 0; // Special value from DECODABET
+
+ for (i = off; i < off + len; i++) { // Loop through source
+
+ sbiDecode = DECODABET[source[i] & 0xFF];
+
+ // White space, Equals sign, or legit Base64 character
+ // Note the values such as -5 and -9 in the
+ // DECODABETs at the top of the file.
+ if (sbiDecode >= WHITE_SPACE_ENC) {
+ if (sbiDecode >= EQUALS_SIGN_ENC) {
+ b4[b4Posn++] = source[i]; // Save non-whitespace
+ if (b4Posn > 3) { // Time to decode?
+ outBuffPosn += decode4to3(b4, 0, outBuff, outBuffPosn, options);
+ b4Posn = 0;
+
+ // If that was the equals sign, break out of 'for' loop
+ if (source[i] == EQUALS_SIGN) {
+ break;
+ } // end if: equals sign
+ } // end if: quartet built
+ } // end if: equals sign or better
+ } // end if: white space, equals sign or better
+ else {
+ // There's a bad input character in the Base64 stream.
+ throw new java.io.IOException(String.format(Locale.ROOT,
+ "Bad Base64 input character decimal %d in array position %d", ((int) source[i]) & 0xFF, i));
+ } // end else:
+ } // each input character
+
+ byte[] out = new byte[outBuffPosn];
+ System.arraycopy(outBuff, 0, out, 0, outBuffPosn);
+ return out;
+ } // end decode
+
+
+ /**
+ * Decodes data from Base64 notation, automatically
+ * detecting gzip-compressed data and decompressing it.
+ *
+ * @param s the string to decode
+ * @return the decoded data
+ * @throws java.io.IOException If there is a problem
+ * @since 1.4
+ */
+ public static byte[] decode(String s) throws java.io.IOException {
+ return decode(s, NO_OPTIONS);
+ }
+
+
+ /**
+ * Decodes data from Base64 notation, automatically
+ * detecting gzip-compressed data and decompressing it.
+ *
+ * @param s the string to decode
+ * @param options encode options such as URL_SAFE
+ * @return the decoded data
+ * @throws java.io.IOException if there is an error
+ * @throws NullPointerException if <tt>s</tt> is null
+ * @since 1.4
+ */
+ public static byte[] decode(String s, int options) throws java.io.IOException {
+
+ if (s == null) {
+ throw new NullPointerException("Input string was null.");
+ } // end if
+
+ byte[] bytes = s.getBytes(PREFERRED_ENCODING);
+ //</change>
+
+ // Decode
+ bytes = decode(bytes, 0, bytes.length, options);
+
+ // Check to see if it's gzip-compressed
+ // GZIP Magic Two-Byte Number: 0x8b1f (35615)
+ boolean dontGunzip = (options & DONT_GUNZIP) != 0;
+ if ((bytes != null) && (bytes.length >= 4) && (!dontGunzip)) {
+
+ int head = ((int) bytes[0] & 0xff) | ((bytes[1] << 8) & 0xff00);
+ if (java.util.zip.GZIPInputStream.GZIP_MAGIC == head) {
+ java.io.ByteArrayInputStream bais = null;
+ java.util.zip.GZIPInputStream gzis = null;
+ java.io.ByteArrayOutputStream baos = null;
+ byte[] buffer = new byte[2048];
+ int length = 0;
+
+ try {
+ baos = new java.io.ByteArrayOutputStream();
+ bais = new java.io.ByteArrayInputStream(bytes);
+ gzis = new java.util.zip.GZIPInputStream(bais);
+
+ while ((length = gzis.read(buffer)) >= 0) {
+ baos.write(buffer, 0, length);
+ } // end while: reading input
+
+ // No error? Get new bytes.
+ bytes = baos.toByteArray();
+
+ } // end try
+ catch (java.io.IOException e) {
+ // e.printStackTrace();
+ // Just return originally-decoded bytes
+ } // end catch
+ finally {
+ try {
+ baos.close();
+ } catch (Exception e) {
+ }
+ try {
+ gzis.close();
+ } catch (Exception e) {
+ }
+ try {
+ bais.close();
+ } catch (Exception e) {
+ }
+ } // end finally
+
+ } // end if: gzipped
+ } // end if: bytes.length >= 2
+
+ return bytes;
+ } // end decode
+
+
+ /**
+ * Attempts to decode Base64 data and deserialize a Java
+ * Object within. Returns <tt>null</tt> if there was an error.
+ *
+ * @param encodedObject The Base64 data to decode
+ * @return The decoded and deserialized object
+ * @throws NullPointerException if encodedObject is null
+ * @throws java.io.IOException if there is a general error
+ * @throws ClassNotFoundException if the decoded object is of a
+ * class that cannot be found by the JVM
+ * @since 1.5
+ */
+ public static Object decodeToObject(String encodedObject)
+ throws java.io.IOException, java.lang.ClassNotFoundException {
+ return decodeToObject(encodedObject, NO_OPTIONS, null);
+ }
+
+
+ /**
+ * Attempts to decode Base64 data and deserialize a Java
+ * Object within. Returns <tt>null</tt> if there was an error.
+ * If <tt>loader</tt> is not null, it will be the class loader
+ * used when deserializing.
+ *
+ * @param encodedObject The Base64 data to decode
+ * @param options Various parameters related to decoding
+ * @param loader Optional class loader to use in deserializing classes.
+ * @return The decoded and deserialized object
+ * @throws NullPointerException if encodedObject is null
+ * @throws java.io.IOException if there is a general error
+ * @throws ClassNotFoundException if the decoded object is of a
+ * class that cannot be found by the JVM
+ * @since 2.3.4
+ */
+ public static Object decodeToObject(
+ String encodedObject, int options, final ClassLoader loader)
+ throws java.io.IOException, java.lang.ClassNotFoundException {
+
+ // Decode and gunzip if necessary
+ byte[] objBytes = decode(encodedObject, options);
+
+ java.io.ByteArrayInputStream bais = null;
+ java.io.ObjectInputStream ois = null;
+ Object obj = null;
+
+ try {
+ bais = new java.io.ByteArrayInputStream(objBytes);
+
+ // If no custom class loader is provided, use Java's builtin OIS.
+ if (loader == null) {
+ ois = new java.io.ObjectInputStream(bais);
+ } // end if: no loader provided
+
+ // Else make a customized object input stream that uses
+ // the provided class loader.
+ else {
+ ois = new java.io.ObjectInputStream(bais) {
+ @Override
+ public Class<?> resolveClass(java.io.ObjectStreamClass streamClass)
+ throws java.io.IOException, ClassNotFoundException {
+ Class<?> c = Class.forName(streamClass.getName(), false, loader);
+ if (c == null) {
+ return super.resolveClass(streamClass);
+ } else {
+ return c; // Class loader knows of this class.
+ } // end else: not null
+ } // end resolveClass
+ }; // end ois
+ } // end else: no custom class loader
+
+ obj = ois.readObject();
+ } // end try
+ catch (java.io.IOException e) {
+ throw e; // Catch and throw in order to execute finally{}
+ } // end catch
+ catch (java.lang.ClassNotFoundException e) {
+ throw e; // Catch and throw in order to execute finally{}
+ } // end catch
+ finally {
+ try {
+ bais.close();
+ } catch (Exception e) {
+ }
+ try {
+ ois.close();
+ } catch (Exception e) {
+ }
+ } // end finally
+
+ return obj;
+ } // end decodeObject
+
+
+ /**
+ * Convenience method for encoding data to a file.
+ * <p/>
+ * <p>As of v 2.3, if there is a error,
+ * the method will throw an java.io.IOException. <b>This is new to v2.3!</b>
+ * In earlier versions, it just returned false, but
+ * in retrospect that's a pretty poor way to handle it.</p>
+ *
+ * @param dataToEncode byte array of data to encode in base64 form
+ * @param filename Filename for saving encoded data
+ * @throws java.io.IOException if there is an error
+ * @throws NullPointerException if dataToEncode is null
+ * @since 2.1
+ */
+ public static void encodeToFile(byte[] dataToEncode, String filename)
+ throws java.io.IOException {
+
+ if (dataToEncode == null) {
+ throw new NullPointerException("Data to encode was null.");
+ } // end iff
+
+ Base64.OutputStream bos = null;
+ try {
+ bos = new Base64.OutputStream(
+ new java.io.FileOutputStream(filename), Base64.ENCODE);
+ bos.write(dataToEncode);
+ } // end try
+ catch (java.io.IOException e) {
+ throw e; // Catch and throw to execute finally{} block
+ } // end catch: java.io.IOException
+ finally {
+ try {
+ bos.close();
+ } catch (Exception e) {
+ }
+ } // end finally
+
+ } // end encodeToFile
+
+
+ /**
+ * Convenience method for decoding data to a file.
+ * <p/>
+ * <p>As of v 2.3, if there is a error,
+ * the method will throw an java.io.IOException. <b>This is new to v2.3!</b>
+ * In earlier versions, it just returned false, but
+ * in retrospect that's a pretty poor way to handle it.</p>
+ *
+ * @param dataToDecode Base64-encoded data as a string
+ * @param filename Filename for saving decoded data
+ * @throws java.io.IOException if there is an error
+ * @since 2.1
+ */
+ public static void decodeToFile(String dataToDecode, String filename)
+ throws java.io.IOException {
+
+ Base64.OutputStream bos = null;
+ try {
+ bos = new Base64.OutputStream(
+ new java.io.FileOutputStream(filename), Base64.DECODE);
+ bos.write(dataToDecode.getBytes(PREFERRED_ENCODING));
+ } // end try
+ catch (java.io.IOException e) {
+ throw e; // Catch and throw to execute finally{} block
+ } // end catch: java.io.IOException
+ finally {
+ try {
+ bos.close();
+ } catch (Exception e) {
+ }
+ } // end finally
+
+ } // end decodeToFile
+
+
+ /**
+ * Convenience method for reading a base64-encoded
+ * file and decoding it.
+ * <p/>
+ * <p>As of v 2.3, if there is a error,
+ * the method will throw an java.io.IOException. <b>This is new to v2.3!</b>
+ * In earlier versions, it just returned false, but
+ * in retrospect that's a pretty poor way to handle it.</p>
+ *
+ * @param filename Filename for reading encoded data
+ * @return decoded byte array
+ * @throws java.io.IOException if there is an error
+ * @since 2.1
+ */
+ public static byte[] decodeFromFile(String filename)
+ throws java.io.IOException {
+
+ byte[] decodedData = null;
+ Base64.InputStream bis = null;
+ try {
+ // Set up some useful variables
+ java.io.File file = new java.io.File(filename);
+ byte[] buffer = null;
+ int length = 0;
+ int numBytes = 0;
+
+ // Check for size of file
+ if (file.length() > Integer.MAX_VALUE) {
+ throw new java.io.IOException("File is too big for this convenience method (" + file.length() + " bytes).");
+ } // end if: file too big for int index
+ buffer = new byte[(int) file.length()];
+
+ // Open a stream
+ bis = new Base64.InputStream(
+ new java.io.BufferedInputStream(
+ new java.io.FileInputStream(file)), Base64.DECODE);
+
+ // Read until done
+ while ((numBytes = bis.read(buffer, length, 4096)) >= 0) {
+ length += numBytes;
+ } // end while
+
+ // Save in a variable to return
+ decodedData = new byte[length];
+ System.arraycopy(buffer, 0, decodedData, 0, length);
+
+ } // end try
+ catch (java.io.IOException e) {
+ throw e; // Catch and release to execute finally{}
+ } // end catch: java.io.IOException
+ finally {
+ try {
+ bis.close();
+ } catch (Exception e) {
+ }
+ } // end finally
+
+ return decodedData;
+ } // end decodeFromFile
+
+
+ /**
+ * Convenience method for reading a binary file
+ * and base64-encoding it.
+ * <p/>
+ * <p>As of v 2.3, if there is a error,
+ * the method will throw an java.io.IOException. <b>This is new to v2.3!</b>
+ * In earlier versions, it just returned false, but
+ * in retrospect that's a pretty poor way to handle it.</p>
+ *
+ * @param filename Filename for reading binary data
+ * @return base64-encoded string
+ * @throws java.io.IOException if there is an error
+ * @since 2.1
+ */
+ public static String encodeFromFile(String filename)
+ throws java.io.IOException {
+
+ String encodedData = null;
+ Base64.InputStream bis = null;
+ try {
+ // Set up some useful variables
+ java.io.File file = new java.io.File(filename);
+ byte[] buffer = new byte[Math.max((int) (file.length() * 1.4 + 1), 40)]; // Need max() for math on small files (v2.2.1); Need +1 for a few corner cases (v2.3.5)
+ int length = 0;
+ int numBytes = 0;
+
+ // Open a stream
+ bis = new Base64.InputStream(
+ new java.io.BufferedInputStream(
+ new java.io.FileInputStream(file)), Base64.ENCODE);
+
+ // Read until done
+ while ((numBytes = bis.read(buffer, length, 4096)) >= 0) {
+ length += numBytes;
+ } // end while
+
+ // Save in a variable to return
+ encodedData = new String(buffer, 0, length, Base64.PREFERRED_ENCODING);
+
+ } // end try
+ catch (java.io.IOException e) {
+ throw e; // Catch and release to execute finally{}
+ } // end catch: java.io.IOException
+ finally {
+ try {
+ bis.close();
+ } catch (Exception e) {
+ }
+ } // end finally
+
+ return encodedData;
+ } // end encodeFromFile
+
+ /**
+ * Reads <tt>infile</tt> and encodes it to <tt>outfile</tt>.
+ *
+ * @param infile Input file
+ * @param outfile Output file
+ * @throws java.io.IOException if there is an error
+ * @since 2.2
+ */
+ public static void encodeFileToFile(String infile, String outfile)
+ throws java.io.IOException {
+
+ String encoded = Base64.encodeFromFile(infile);
+ java.io.OutputStream out = null;
+ try {
+ out = new java.io.BufferedOutputStream(
+ new java.io.FileOutputStream(outfile));
+ out.write(encoded.getBytes("US-ASCII")); // Strict, 7-bit output.
+ } // end try
+ catch (java.io.IOException e) {
+ throw e; // Catch and release to execute finally{}
+ } // end catch
+ finally {
+ try {
+ out.close();
+ } catch (Exception ex) {
+ }
+ } // end finally
+ } // end encodeFileToFile
+
+
+ /**
+ * Reads <tt>infile</tt> and decodes it to <tt>outfile</tt>.
+ *
+ * @param infile Input file
+ * @param outfile Output file
+ * @throws java.io.IOException if there is an error
+ * @since 2.2
+ */
+ public static void decodeFileToFile(String infile, String outfile)
+ throws java.io.IOException {
+
+ byte[] decoded = Base64.decodeFromFile(infile);
+ java.io.OutputStream out = null;
+ try {
+ out = new java.io.BufferedOutputStream(
+ new java.io.FileOutputStream(outfile));
+ out.write(decoded);
+ } // end try
+ catch (java.io.IOException e) {
+ throw e; // Catch and release to execute finally{}
+ } // end catch
+ finally {
+ try {
+ out.close();
+ } catch (Exception ex) {
+ }
+ } // end finally
+ } // end decodeFileToFile
+
+
+ /* ******** I N N E R C L A S S I N P U T S T R E A M ******** */
+
+
+ /**
+ * A {@link Base64.InputStream} will read data from another
+ * <tt>java.io.InputStream</tt>, given in the constructor,
+ * and encode/decode to/from Base64 notation on the fly.
+ *
+ * @see Base64
+ * @since 1.3
+ */
+ public static class InputStream extends java.io.FilterInputStream {
+
+ private boolean encode; // Encoding or decoding
+ private int position; // Current position in the buffer
+ private byte[] buffer; // Small buffer holding converted data
+ private int bufferLength; // Length of buffer (3 or 4)
+ private int numSigBytes; // Number of meaningful bytes in the buffer
+ private int lineLength;
+ private boolean breakLines; // Break lines at less than 80 characters
+ private int options; // Record options used to create the stream.
+ private byte[] decodabet; // Local copies to avoid extra method calls
+
+
+ /**
+ * Constructs a {@link Base64.InputStream} in DECODE mode.
+ *
+ * @param in the <tt>java.io.InputStream</tt> from which to read data.
+ * @since 1.3
+ */
+ public InputStream(java.io.InputStream in) {
+ this(in, DECODE);
+ } // end constructor
+
+
+ /**
+ * Constructs a {@link Base64.InputStream} in
+ * either ENCODE or DECODE mode.
+ * <p/>
+ * Valid options:<pre>
+ * ENCODE or DECODE: Encode or Decode as data is read.
+ * DO_BREAK_LINES: break lines at 76 characters
+ * (only meaningful when encoding)</i>
+ * </pre>
+ * <p/>
+ * Example: <code>new Base64.InputStream( in, Base64.DECODE )</code>
+ *
+ * @param in the <tt>java.io.InputStream</tt> from which to read data.
+ * @param options Specified options
+ * @see Base64#ENCODE
+ * @see Base64#DECODE
+ * @see Base64#DO_BREAK_LINES
+ * @since 2.0
+ */
+ public InputStream(java.io.InputStream in, int options) {
+
+ super(in);
+ this.options = options; // Record for later
+ this.breakLines = (options & DO_BREAK_LINES) > 0;
+ this.encode = (options & ENCODE) > 0;
+ this.bufferLength = encode ? 4 : 3;
+ this.buffer = new byte[bufferLength];
+ this.position = -1;
+ this.lineLength = 0;
+ this.decodabet = getDecodabet(options);
+ } // end constructor
+
+ /**
+ * Reads enough of the input stream to convert
+ * to/from Base64 and returns the next byte.
+ *
+ * @return next byte
+ * @since 1.3
+ */
+ @Override
+ public int read() throws java.io.IOException {
+
+ // Do we need to get data?
+ if (position < 0) {
+ if (encode) {
+ byte[] b3 = new byte[3];
+ int numBinaryBytes = 0;
+ for (int i = 0; i < 3; i++) {
+ int b = in.read();
+
+ // If end of stream, b is -1.
+ if (b >= 0) {
+ b3[i] = (byte) b;
+ numBinaryBytes++;
+ } else {
+ break; // out of for loop
+ } // end else: end of stream
+
+ } // end for: each needed input byte
+
+ if (numBinaryBytes > 0) {
+ encode3to4(b3, 0, numBinaryBytes, buffer, 0, options);
+ position = 0;
+ numSigBytes = 4;
+ } // end if: got data
+ else {
+ return -1; // Must be end of stream
+ } // end else
+ } // end if: encoding
+
+ // Else decoding
+ else {
+ byte[] b4 = new byte[4];
+ int i = 0;
+ for (i = 0; i < 4; i++) {
+ // Read four "meaningful" bytes:
+ int b = 0;
+ do {
+ b = in.read();
+ }
+ while (b >= 0 && decodabet[b & 0x7f] <= WHITE_SPACE_ENC);
+
+ if (b < 0) {
+ break; // Reads a -1 if end of stream
+ } // end if: end of stream
+
+ b4[i] = (byte) b;
+ } // end for: each needed input byte
+
+ if (i == 4) {
+ numSigBytes = decode4to3(b4, 0, buffer, 0, options);
+ position = 0;
+ } // end if: got four characters
+ else if (i == 0) {
+ return -1;
+ } // end else if: also padded correctly
+ else {
+ // Must have broken out from above.
+ throw new java.io.IOException("Improperly padded Base64 input.");
+ } // end
+
+ } // end else: decode
+ } // end else: get data
+
+ // Got data?
+ if (position >= 0) {
+ // End of relevant data?
+ if ( /*!encode &&*/ position >= numSigBytes) {
+ return -1;
+ } // end if: got data
+
+ if (encode && breakLines && lineLength >= MAX_LINE_LENGTH) {
+ lineLength = 0;
+ return '\n';
+ } // end if
+ else {
+ lineLength++; // This isn't important when decoding
+ // but throwing an extra "if" seems
+ // just as wasteful.
+
+ int b = buffer[position++];
+
+ if (position >= bufferLength) {
+ position = -1;
+ } // end if: end
+
+ return b & 0xFF; // This is how you "cast" a byte that's
+ // intended to be unsigned.
+ } // end else
+ } // end if: position >= 0
+
+ // Else error
+ else {
+ throw new java.io.IOException("Error in Base64 code reading stream.");
+ } // end else
+ } // end read
+
+
+ /**
+ * Calls {@link #read()} repeatedly until the end of stream
+ * is reached or <var>len</var> bytes are read.
+ * Returns number of bytes read into array or -1 if
+ * end of stream is encountered.
+ *
+ * @param dest array to hold values
+ * @param off offset for array
+ * @param len max number of bytes to read into array
+ * @return bytes read into array or -1 if end of stream is encountered.
+ * @since 1.3
+ */
+ @Override
+ public int read(byte[] dest, int off, int len)
+ throws java.io.IOException {
+ int i;
+ int b;
+ for (i = 0; i < len; i++) {
+ b = read();
+
+ if (b >= 0) {
+ dest[off + i] = (byte) b;
+ } else if (i == 0) {
+ return -1;
+ } else {
+ break; // Out of 'for' loop
+ } // Out of 'for' loop
+ } // end for: each byte read
+ return i;
+ } // end read
+
+ } // end inner class InputStream
+
+
+ /* ******** I N N E R C L A S S O U T P U T S T R E A M ******** */
+
+
+ /**
+ * A {@link Base64.OutputStream} will write data to another
+ * <tt>java.io.OutputStream</tt>, given in the constructor,
+ * and encode/decode to/from Base64 notation on the fly.
+ *
+ * @see Base64
+ * @since 1.3
+ */
+ public static class OutputStream extends java.io.FilterOutputStream {
+
+ private boolean encode;
+ private int position;
+ private byte[] buffer;
+ private int bufferLength;
+ private int lineLength;
+ private boolean breakLines;
+ private byte[] b4; // Scratch used in a few places
+ private boolean suspendEncoding;
+ private int options; // Record for later
+ private byte[] decodabet; // Local copies to avoid extra method calls
+
+ /**
+ * Constructs a {@link Base64.OutputStream} in ENCODE mode.
+ *
+ * @param out the <tt>java.io.OutputStream</tt> to which data will be written.
+ * @since 1.3
+ */
+ public OutputStream(java.io.OutputStream out) {
+ this(out, ENCODE);
+ } // end constructor
+
+
+ /**
+ * Constructs a {@link Base64.OutputStream} in
+ * either ENCODE or DECODE mode.
+ * <p/>
+ * Valid options:<pre>
+ * ENCODE or DECODE: Encode or Decode as data is read.
+ * DO_BREAK_LINES: don't break lines at 76 characters
+ * (only meaningful when encoding)</i>
+ * </pre>
+ * <p/>
+ * Example: <code>new Base64.OutputStream( out, Base64.ENCODE )</code>
+ *
+ * @param out the <tt>java.io.OutputStream</tt> to which data will be written.
+ * @param options Specified options.
+ * @see Base64#ENCODE
+ * @see Base64#DECODE
+ * @see Base64#DO_BREAK_LINES
+ * @since 1.3
+ */
+ public OutputStream(java.io.OutputStream out, int options) {
+ super(out);
+ this.breakLines = (options & DO_BREAK_LINES) != 0;
+ this.encode = (options & ENCODE) != 0;
+ this.bufferLength = encode ? 3 : 4;
+ this.buffer = new byte[bufferLength];
+ this.position = 0;
+ this.lineLength = 0;
+ this.suspendEncoding = false;
+ this.b4 = new byte[4];
+ this.options = options;
+ this.decodabet = getDecodabet(options);
+ } // end constructor
+
+
+ /**
+ * Writes the byte to the output stream after
+ * converting to/from Base64 notation.
+ * When encoding, bytes are buffered three
+ * at a time before the output stream actually
+ * gets a write() call.
+ * When decoding, bytes are buffered four
+ * at a time.
+ *
+ * @param theByte the byte to write
+ * @since 1.3
+ */
+ @Override
+ public void write(int theByte)
+ throws java.io.IOException {
+ // Encoding suspended?
+ if (suspendEncoding) {
+ this.out.write(theByte);
+ return;
+ } // end if: supsended
+
+ // Encode?
+ if (encode) {
+ buffer[position++] = (byte) theByte;
+ if (position >= bufferLength) { // Enough to encode.
+
+ this.out.write(encode3to4(b4, buffer, bufferLength, options));
+
+ lineLength += 4;
+ if (breakLines && lineLength >= MAX_LINE_LENGTH) {
+ this.out.write(NEW_LINE);
+ lineLength = 0;
+ } // end if: end of line
+
+ position = 0;
+ } // end if: enough to output
+ } // end if: encoding
+
+ // Else, Decoding
+ else {
+ // Meaningful Base64 character?
+ if (decodabet[theByte & 0x7f] > WHITE_SPACE_ENC) {
+ buffer[position++] = (byte) theByte;
+ if (position >= bufferLength) { // Enough to output.
+
+ int len = Base64.decode4to3(buffer, 0, b4, 0, options);
+ out.write(b4, 0, len);
+ position = 0;
+ } // end if: enough to output
+ } // end if: meaningful base64 character
+ else if (decodabet[theByte & 0x7f] != WHITE_SPACE_ENC) {
+ throw new java.io.IOException("Invalid character in Base64 data.");
+ } // end else: not white space either
+ } // end else: decoding
+ } // end write
+
+
+ /**
+ * Calls {@link #write(int)} repeatedly until <var>len</var>
+ * bytes are written.
+ *
+ * @param theBytes array from which to read bytes
+ * @param off offset for array
+ * @param len max number of bytes to read into array
+ * @since 1.3
+ */
+ @Override
+ public void write(byte[] theBytes, int off, int len)
+ throws java.io.IOException {
+ // Encoding suspended?
+ if (suspendEncoding) {
+ this.out.write(theBytes, off, len);
+ return;
+ } // end if: supsended
+
+ for (int i = 0; i < len; i++) {
+ write(theBytes[off + i]);
+ } // end for: each byte written
+
+ } // end write
+
+
+ /**
+ * Method added by PHIL. [Thanks, PHIL. -Rob]
+ * This pads the buffer without closing the stream.
+ *
+ * @throws java.io.IOException if there's an error.
+ */
+ public void flushBase64() throws java.io.IOException {
+ if (position > 0) {
+ if (encode) {
+ out.write(encode3to4(b4, buffer, position, options));
+ position = 0;
+ } // end if: encoding
+ else {
+ throw new java.io.IOException("Base64 input not properly padded.");
+ } // end else: decoding
+ } // end if: buffer partially full
+
+ } // end flush
+
+
+ /**
+ * Flushes and closes (I think, in the superclass) the stream.
+ *
+ * @since 1.3
+ */
+ @Override
+ public void close() throws java.io.IOException {
+ // 1. Ensure that pending characters are written
+ flushBase64();
+
+ // 2. Actually close the stream
+ // Base class both flushes and closes.
+ super.close();
+
+ buffer = null;
+ out = null;
+ } // end close
+
+
+ /**
+ * Suspends encoding of the stream.
+ * May be helpful if you need to embed a piece of
+ * base64-encoded data in a stream.
+ *
+ * @throws java.io.IOException if there's an error flushing
+ * @since 1.5.1
+ */
+ public void suspendEncoding() throws java.io.IOException {
+ flushBase64();
+ this.suspendEncoding = true;
+ } // end suspendEncoding
+
+
+ /**
+ * Resumes encoding of the stream.
+ * May be helpful if you need to embed a piece of
+ * base64-encoded data in a stream.
+ *
+ * @since 1.5.1
+ */
+ public void resumeEncoding() {
+ this.suspendEncoding = false;
+ } // end resumeEncoding
+
+
+ } // end inner class OutputStream
+
+
+} // end class Base64
diff --git a/src/main/java/org/elasticsearch/common/Booleans.java b/src/main/java/org/elasticsearch/common/Booleans.java
new file mode 100644
index 0000000..830e10e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Booleans.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+/**
+ *
+ */
+public class Booleans {
+
+ public static boolean parseBoolean(char[] text, int offset, int length, boolean defaultValue) {
+ if (text == null || length == 0) {
+ return defaultValue;
+ }
+ if (length == 1) {
+ return text[offset] != '0';
+ }
+ if (length == 2) {
+ return !(text[offset] == 'n' && text[offset + 1] == 'o');
+ }
+ if (length == 3) {
+ return !(text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f');
+ }
+ if (length == 5) {
+ return !(text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' && text[offset + 4] == 'e');
+ }
+ return true;
+ }
+
+ /**
+ * returns true if the a sequence of chars is one of "true","false","on","off","yes","no","0","1"
+ *
+ * @param text sequence to check
+ * @param offset offset to start
+ * @param length length to check
+ */
+ public static boolean isBoolean(char[] text, int offset, int length) {
+ if (text == null || length == 0) {
+ return false;
+ }
+ if (length == 1) {
+ return text[offset] == '0' || text[offset] == '1';
+ }
+ if (length == 2) {
+ return (text[offset] == 'n' && text[offset + 1] == 'o') || (text[offset] == 'o' && text[offset + 1] == 'n');
+ }
+ if (length == 3) {
+ return (text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f') ||
+ (text[offset] == 'y' && text[offset + 1] == 'e' && text[offset + 2] == 's');
+ }
+ if (length == 4) {
+ return (text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e');
+ }
+ if (length == 5) {
+ return (text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' && text[offset + 4] == 'e');
+ }
+ return false;
+ }
+
+
+ public static boolean parseBoolean(String value, boolean defaultValue) {
+ if (value == null) {
+ return defaultValue;
+ }
+ return !(value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no"));
+ }
+
+ public static Boolean parseBoolean(String value, Boolean defaultValue) {
+ if (value == null) {
+ return defaultValue;
+ }
+ return !(value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no"));
+ }
+
+ public static boolean isExplicitFalse(String value) {
+ return (value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no"));
+ }
+
+ public static boolean isExplicitTrue(String value) {
+ return (value.equals("true") || value.equals("1") || value.equals("on") || value.equals("yes"));
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/Classes.java b/src/main/java/org/elasticsearch/common/Classes.java
new file mode 100644
index 0000000..dee807a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Classes.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.NoClassSettingsException;
+
+import java.lang.reflect.Modifier;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Locale;
+
+import static org.elasticsearch.common.Strings.toCamelCase;
+
+/**
+ *
+ */
+public class Classes {
+
+ /**
+ * The package separator character '.'
+ */
+ private static final char PACKAGE_SEPARATOR = '.';
+
+ /**
+ * Return the default ClassLoader to use: typically the thread context
+ * ClassLoader, if available; the ClassLoader that loaded the ClassUtils
+ * class will be used as fallback.
+ * <p/>
+ * <p>Call this method if you intend to use the thread context ClassLoader
+ * in a scenario where you absolutely need a non-null ClassLoader reference:
+ * for example, for class path resource loading (but not necessarily for
+ * <code>Class.forName</code>, which accepts a <code>null</code> ClassLoader
+ * reference as well).
+ *
+ * @return the default ClassLoader (never <code>null</code>)
+ * @see java.lang.Thread#getContextClassLoader()
+ */
+ public static ClassLoader getDefaultClassLoader() {
+ ClassLoader cl = null;
+ try {
+ cl = Thread.currentThread().getContextClassLoader();
+ } catch (Throwable ex) {
+ // Cannot access thread context ClassLoader - falling back to system class loader...
+ }
+ if (cl == null) {
+ // No thread context class loader -> use class loader of this class.
+ cl = Classes.class.getClassLoader();
+ }
+ return cl;
+ }
+
+ /**
+ * Determine the name of the package of the given class:
+ * e.g. "java.lang" for the <code>java.lang.String</code> class.
+ *
+ * @param clazz the class
+ * @return the package name, or the empty String if the class
+ * is defined in the default package
+ */
+ public static String getPackageName(Class<?> clazz) {
+ String className = clazz.getName();
+ int lastDotIndex = className.lastIndexOf(PACKAGE_SEPARATOR);
+ return (lastDotIndex != -1 ? className.substring(0, lastDotIndex) : "");
+ }
+
+ public static String getPackageNameNoDomain(Class<?> clazz) {
+ String fullPackage = getPackageName(clazz);
+ if (fullPackage.startsWith("org.") || fullPackage.startsWith("com.") || fullPackage.startsWith("net.")) {
+ return fullPackage.substring(4);
+ }
+ return fullPackage;
+ }
+
+ public static boolean isInnerClass(Class<?> clazz) {
+ return !Modifier.isStatic(clazz.getModifiers())
+ && clazz.getEnclosingClass() != null;
+ }
+
+ public static boolean isConcrete(Class<?> clazz) {
+ int modifiers = clazz.getModifiers();
+ return !clazz.isInterface() && !Modifier.isAbstract(modifiers);
+ }
+
+ public static <T> Class<? extends T> loadClass(ClassLoader classLoader, String className, String prefixPackage, String suffixClassName) {
+ return loadClass(classLoader, className, prefixPackage, suffixClassName, null);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ public static <T> Class<? extends T> loadClass(ClassLoader classLoader, String className, String prefixPackage, String suffixClassName, String errorPrefix) {
+ Throwable t = null;
+ String[] classNames = classNames(className, prefixPackage, suffixClassName);
+ for (String fullClassName : classNames) {
+ try {
+ return (Class<? extends T>) classLoader.loadClass(fullClassName);
+ } catch (ClassNotFoundException ex) {
+ t = ex;
+ } catch (NoClassDefFoundError er) {
+ t = er;
+ }
+ }
+ if (errorPrefix == null) {
+ errorPrefix = "failed to load class";
+ }
+ throw new NoClassSettingsException(errorPrefix + " with value [" + className + "]; tried " + Arrays.toString(classNames), t);
+ }
+
+ private static String[] classNames(String className, String prefixPackage, String suffixClassName) {
+ String prefixValue = prefixPackage;
+ int packageSeparator = className.lastIndexOf('.');
+ String classNameValue = className;
+ // If class name contains package use it as package prefix instead of specified default one
+ if (packageSeparator > 0) {
+ prefixValue = className.substring(0, packageSeparator + 1);
+ classNameValue = className.substring(packageSeparator + 1);
+ }
+ return new String[]{
+ className,
+ prefixValue + Strings.capitalize(toCamelCase(classNameValue)) + suffixClassName,
+ prefixValue + toCamelCase(classNameValue) + "." + Strings.capitalize(toCamelCase(classNameValue)) + suffixClassName,
+ prefixValue + toCamelCase(classNameValue).toLowerCase(Locale.ROOT) + "." + Strings.capitalize(toCamelCase(classNameValue)) + suffixClassName,
+ };
+ }
+
+ private Classes() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/Explicit.java b/src/main/java/org/elasticsearch/common/Explicit.java
new file mode 100644
index 0000000..3132f22
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Explicit.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+/**
+ * Holds a value that is either:
+ * a) set implicitly e.g. through some default value
+ * b) set explicitly e.g. from a user selection
+ *
+ * When merging conflicting configuration settings such as
+ * field mapping settings it is preferable to preserve an explicit
+ * choice rather than a choice made only made implicitly by defaults.
+ *
+ */
+public class Explicit<T> {
+
+ private final T value;
+ private final boolean explicit;
+ /**
+ * Create a value with an indication if this was an explicit choice
+ * @param value a setting value
+ * @param explicit true if the value passed is a conscious decision, false if using some kind of default
+ */
+ public Explicit(T value, boolean explicit) {
+ this.value = value;
+ this.explicit = explicit;
+ }
+
+ public T value() {
+ return this.value;
+ }
+
+ /**
+ *
+ * @return true if the value passed is a conscious decision, false if using some kind of default
+ */
+ public boolean explicit() {
+ return this.explicit;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/Names.java b/src/main/java/org/elasticsearch/common/Names.java
new file mode 100644
index 0000000..3d6d8fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Names.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import com.google.common.base.Charsets;
+import jsr166y.ThreadLocalRandom;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.util.Random;
+
+/**
+ *
+ */
+public abstract class Names {
+
+ public static String randomNodeName(URL nodeNames) {
+ BufferedReader reader = null;
+ try {
+ reader = new BufferedReader(new InputStreamReader(nodeNames.openStream(), Charsets.UTF_8));
+ int numberOfNames = 0;
+ while (reader.readLine() != null) {
+ numberOfNames++;
+ }
+ reader.close();
+ reader = new BufferedReader(new InputStreamReader(nodeNames.openStream(), Charsets.UTF_8));
+ int number = ((ThreadLocalRandom.current().nextInt(numberOfNames)) % numberOfNames);
+ for (int i = 0; i < number; i++) {
+ reader.readLine();
+ }
+ return reader.readLine();
+ } catch (IOException e) {
+ return null;
+ } finally {
+ try {
+ if (reader != null) {
+ reader.close();
+ }
+ } catch (IOException e) {
+ // ignore this exception
+ }
+ }
+ }
+
+ public static String randomNodeName(InputStream nodeNames) {
+ if (nodeNames == null) {
+ return null;
+ }
+ try {
+ BufferedReader reader = new BufferedReader(new InputStreamReader(nodeNames, Charsets.UTF_8));
+ int numberOfNames = Integer.parseInt(reader.readLine());
+ int number = ((new Random().nextInt(numberOfNames)) % numberOfNames) - 2; // remove 2 for last line and first line
+ for (int i = 0; i < number; i++) {
+ reader.readLine();
+ }
+ return reader.readLine();
+ } catch (Exception e) {
+ return null;
+ } finally {
+ try {
+ nodeNames.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ }
+
+ private Names() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/Nullable.java b/src/main/java/org/elasticsearch/common/Nullable.java
new file mode 100644
index 0000000..4f017e4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Nullable.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import java.lang.annotation.*;
+
+/**
+ * The presence of this annotation on a method parameter indicates that
+ * {@code null} is an acceptable value for that parameter. It should not be
+ * used for parameters of primitive types.
+ *
+ *
+ */
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.PARAMETER, ElementType.FIELD, ElementType.METHOD})
+public @interface Nullable {
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/Numbers.java b/src/main/java/org/elasticsearch/common/Numbers.java
new file mode 100644
index 0000000..df57c55
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Numbers.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * A set of utilities for numbers.
+ */
+public final class Numbers {
+
+ private Numbers() {
+
+ }
+
+ /**
+ * Converts a byte array to an short.
+ *
+ * @param arr The byte array to convert to an short
+ * @return The int converted
+ */
+ public static short bytesToShort(byte[] arr) {
+ return (short) (((arr[0] & 0xff) << 8) | (arr[1] & 0xff));
+ }
+
+ public static short bytesToShort(BytesRef bytes) {
+ return (short) (((bytes.bytes[bytes.offset] & 0xff) << 8) | (bytes.bytes[bytes.offset + 1] & 0xff));
+ }
+
+ /**
+ * Converts a byte array to an int.
+ *
+ * @param arr The byte array to convert to an int
+ * @return The int converted
+ */
+ public static int bytesToInt(byte[] arr) {
+ return (arr[0] << 24) | ((arr[1] & 0xff) << 16) | ((arr[2] & 0xff) << 8) | (arr[3] & 0xff);
+ }
+
+ public static int bytesToInt(BytesRef bytes) {
+ return (bytes.bytes[bytes.offset] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff);
+ }
+
+ /**
+ * Converts a byte array to a long.
+ *
+ * @param arr The byte array to convert to a long
+ * @return The long converter
+ */
+ public static long bytesToLong(byte[] arr) {
+ int high = (arr[0] << 24) | ((arr[1] & 0xff) << 16) | ((arr[2] & 0xff) << 8) | (arr[3] & 0xff);
+ int low = (arr[4] << 24) | ((arr[5] & 0xff) << 16) | ((arr[6] & 0xff) << 8) | (arr[7] & 0xff);
+ return (((long) high) << 32) | (low & 0x0ffffffffL);
+ }
+
+ public static long bytesToLong(BytesRef bytes) {
+ int high = (bytes.bytes[bytes.offset + 0] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff);
+ int low = (bytes.bytes[bytes.offset + 4] << 24) | ((bytes.bytes[bytes.offset + 5] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 6] & 0xff) << 8) | (bytes.bytes[bytes.offset + 7] & 0xff);
+ return (((long) high) << 32) | (low & 0x0ffffffffL);
+ }
+
+ /**
+ * Converts a byte array to float.
+ *
+ * @param arr The byte array to convert to a float
+ * @return The float converted
+ */
+ public static float bytesToFloat(byte[] arr) {
+ return Float.intBitsToFloat(bytesToInt(arr));
+ }
+
+ public static float bytesToFloat(BytesRef bytes) {
+ return Float.intBitsToFloat(bytesToInt(bytes));
+ }
+
+ /**
+ * Converts a byte array to double.
+ *
+ * @param arr The byte array to convert to a double
+ * @return The double converted
+ */
+ public static double bytesToDouble(byte[] arr) {
+ return Double.longBitsToDouble(bytesToLong(arr));
+ }
+
+ public static double bytesToDouble(BytesRef bytes) {
+ return Double.longBitsToDouble(bytesToLong(bytes));
+ }
+
+ /**
+ * Converts an int to a byte array.
+ *
+ * @param val The int to convert to a byte array
+ * @return The byte array converted
+ */
+ public static byte[] intToBytes(int val) {
+ byte[] arr = new byte[4];
+ arr[0] = (byte) (val >>> 24);
+ arr[1] = (byte) (val >>> 16);
+ arr[2] = (byte) (val >>> 8);
+ arr[3] = (byte) (val);
+ return arr;
+ }
+
+ /**
+ * Converts an int to a byte array.
+ *
+ * @param val The int to convert to a byte array
+ * @return The byte array converted
+ */
+ public static byte[] shortToBytes(int val) {
+ byte[] arr = new byte[2];
+ arr[0] = (byte) (val >>> 8);
+ arr[1] = (byte) (val);
+ return arr;
+ }
+
+ /**
+ * Converts a long to a byte array.
+ *
+ * @param val The long to convert to a byte array
+ * @return The byte array converted
+ */
+ public static byte[] longToBytes(long val) {
+ byte[] arr = new byte[8];
+ arr[0] = (byte) (val >>> 56);
+ arr[1] = (byte) (val >>> 48);
+ arr[2] = (byte) (val >>> 40);
+ arr[3] = (byte) (val >>> 32);
+ arr[4] = (byte) (val >>> 24);
+ arr[5] = (byte) (val >>> 16);
+ arr[6] = (byte) (val >>> 8);
+ arr[7] = (byte) (val);
+ return arr;
+ }
+
+ /**
+ * Converts a float to a byte array.
+ *
+ * @param val The float to convert to a byte array
+ * @return The byte array converted
+ */
+ public static byte[] floatToBytes(float val) {
+ return intToBytes(Float.floatToRawIntBits(val));
+ }
+
+ /**
+ * Converts a double to a byte array.
+ *
+ * @param val The double to convert to a byte array
+ * @return The byte array converted
+ */
+ public static byte[] doubleToBytes(double val) {
+ return longToBytes(Double.doubleToRawLongBits(val));
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/ParseField.java b/src/main/java/org/elasticsearch/common/ParseField.java
new file mode 100644
index 0000000..d459aed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/ParseField.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+import java.util.EnumSet;
+import java.util.HashSet;
+
+/**
+ */
+public class ParseField {
+ private final String camelCaseName;
+ private final String underscoreName;
+ private final String[] deprecatedNames;
+
+ public static final EnumSet<Flag> EMPTY_FLAGS = EnumSet.noneOf(Flag.class);
+
+ public static enum Flag {
+ STRICT
+ }
+
+ public ParseField(String value, String... deprecatedNames) {
+ camelCaseName = Strings.toCamelCase(value);
+ underscoreName = Strings.toUnderscoreCase(value);
+ if (deprecatedNames == null || deprecatedNames.length == 0) {
+ this.deprecatedNames = Strings.EMPTY_ARRAY;
+ } else {
+ final HashSet<String> set = new HashSet<String>();
+ for (String depName : deprecatedNames) {
+ set.add(Strings.toCamelCase(depName));
+ set.add(Strings.toUnderscoreCase(depName));
+ }
+ this.deprecatedNames = set.toArray(new String[0]);
+ }
+ }
+
+ public String getPreferredName(){
+ return underscoreName;
+ }
+
+ public ParseField withDeprecation(String... deprecatedNames) {
+ return new ParseField(this.underscoreName, deprecatedNames);
+ }
+
+ public boolean match(String currentFieldName) {
+ return match(currentFieldName, EMPTY_FLAGS);
+ }
+
+ public boolean match(String currentFieldName, EnumSet<Flag> flags) {
+ if (currentFieldName.equals(camelCaseName) || currentFieldName.equals(underscoreName)) {
+ return true;
+ }
+ for (String depName : deprecatedNames) {
+ if (currentFieldName.equals(depName)) {
+ if (flags.contains(Flag.STRICT)) {
+ throw new ElasticsearchIllegalArgumentException("Deprecated field [" + currentFieldName + "] used expected [" + underscoreName + "] instead");
+ }
+ return true;
+ }
+ }
+ return false;
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/common/Preconditions.java b/src/main/java/org/elasticsearch/common/Preconditions.java
new file mode 100644
index 0000000..bc6aef9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Preconditions.java
@@ -0,0 +1,490 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.ElasticsearchNullPointerException;
+
+import java.util.Collection;
+import java.util.NoSuchElementException;
+
+/**
+ * Simple static methods to be called at the start of your own methods to verify
+ * correct arguments and state. This allows constructs such as
+ * <pre>
+ * if (count <= 0) {
+ * throw new ElasticsearchIllegalArgumentException("must be positive: " + count);
+ * }</pre>
+ *
+ * to be replaced with the more compact
+ * <pre>
+ * checkArgument(count > 0, "must be positive: %s", count);</pre>
+ *
+ * Note that the sense of the expression is inverted; with {@code Preconditions}
+ * you declare what you expect to be <i>true</i>, just as you do with an
+ * <a href="http://java.sun.com/j2se/1.5.0/docs/guide/language/assert.html">
+ * {@code assert}</a> or a JUnit {@code assertTrue()} call.
+ *
+ * <p>Take care not to confuse precondition checking with other similar types
+ * of checks! Precondition exceptions -- including those provided here, but also
+ * {@link IndexOutOfBoundsException}, {@link NoSuchElementException}, {@link
+ * UnsupportedOperationException} and others -- are used to signal that the
+ * <i>calling method</i> has made an error. This tells the caller that it should
+ * not have invoked the method when it did, with the arguments it did, or
+ * perhaps <i>ever</i>. Postcondition or other invariant failures should not
+ * throw these types of exceptions.
+ *
+ * <p><b>Note:</b> The methods of the {@code Preconditions} class are highly
+ * unusual in one way: they are <i>supposed to</i> throw exceptions, and promise
+ * in their specifications to do so even when given perfectly valid input. That
+ * is, {@code null} is a valid parameter to the method {@link
+ * #checkNotNull(Object)} -- and technically this parameter could be even marked
+ * as Nullable -- yet the method will still throw an exception anyway,
+ * because that's what its contract says to do.
+ *
+ *
+ */
+public final class Preconditions {
+ private Preconditions() {
+ }
+
+ /**
+ * Ensures the truth of an expression involving one or more parameters to the
+ * calling method.
+ *
+ * @param expression a boolean expression
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if {@code expression} is false
+ */
+ public static void checkArgument(boolean expression) {
+ if (!expression) {
+ throw new ElasticsearchIllegalArgumentException();
+ }
+ }
+
+ /**
+ * Ensures the truth of an expression involving one or more parameters to the
+ * calling method.
+ *
+ * @param expression a boolean expression
+ * @param errorMessage the exception message to use if the check fails; will
+ * be converted to a string using {@link String#valueOf(Object)}
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if {@code expression} is false
+ */
+ public static void checkArgument(boolean expression, Object errorMessage) {
+ if (!expression) {
+ throw new ElasticsearchIllegalArgumentException(String.valueOf(errorMessage));
+ }
+ }
+
+ /**
+ * Ensures the truth of an expression involving one or more parameters to the
+ * calling method.
+ *
+ * @param expression a boolean expression
+ * @param errorMessageTemplate a template for the exception message should the
+ * check fail. The message is formed by replacing each {@code %s}
+ * placeholder in the template with an argument. These are matched by
+ * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc.
+ * Unmatched arguments will be appended to the formatted message in square
+ * braces. Unmatched placeholders will be left as-is.
+ * @param errorMessageArgs the arguments to be substituted into the message
+ * template. Arguments are converted to strings using
+ * {@link String#valueOf(Object)}.
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if {@code expression} is false
+ * @throws org.elasticsearch.ElasticsearchNullPointerException
+ * if the check fails and either {@code
+ * errorMessageTemplate} or {@code errorMessageArgs} is null (don't let
+ * this happen)
+ */
+ public static void checkArgument(boolean expression,
+ String errorMessageTemplate, Object... errorMessageArgs) {
+ if (!expression) {
+ throw new ElasticsearchIllegalArgumentException(
+ format(errorMessageTemplate, errorMessageArgs));
+ }
+ }
+
+ /**
+ * Ensures the truth of an expression involving the state of the calling
+ * instance, but not involving any parameters to the calling method.
+ *
+ * @param expression a boolean expression
+ * @throws org.elasticsearch.ElasticsearchIllegalStateException
+ * if {@code expression} is false
+ */
+ public static void checkState(boolean expression) {
+ if (!expression) {
+ throw new ElasticsearchIllegalStateException();
+ }
+ }
+
+ /**
+ * Ensures the truth of an expression involving the state of the calling
+ * instance, but not involving any parameters to the calling method.
+ *
+ * @param expression a boolean expression
+ * @param errorMessage the exception message to use if the check fails; will
+ * be converted to a string using {@link String#valueOf(Object)}
+ * @throws org.elasticsearch.ElasticsearchIllegalStateException
+ * if {@code expression} is false
+ */
+ public static void checkState(boolean expression, Object errorMessage) {
+ if (!expression) {
+ throw new ElasticsearchIllegalStateException(String.valueOf(errorMessage));
+ }
+ }
+
+ /**
+ * Ensures the truth of an expression involving the state of the calling
+ * instance, but not involving any parameters to the calling method.
+ *
+ * @param expression a boolean expression
+ * @param errorMessageTemplate a template for the exception message should the
+ * check fail. The message is formed by replacing each {@code %s}
+ * placeholder in the template with an argument. These are matched by
+ * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc.
+ * Unmatched arguments will be appended to the formatted message in square
+ * braces. Unmatched placeholders will be left as-is.
+ * @param errorMessageArgs the arguments to be substituted into the message
+ * template. Arguments are converted to strings using
+ * {@link String#valueOf(Object)}.
+ * @throws org.elasticsearch.ElasticsearchIllegalStateException
+ * if {@code expression} is false
+ * @throws org.elasticsearch.ElasticsearchNullPointerException
+ * if the check fails and either {@code
+ * errorMessageTemplate} or {@code errorMessageArgs} is null (don't let
+ * this happen)
+ */
+ public static void checkState(boolean expression,
+ String errorMessageTemplate, Object... errorMessageArgs) {
+ if (!expression) {
+ throw new ElasticsearchIllegalStateException(
+ format(errorMessageTemplate, errorMessageArgs));
+ }
+ }
+
+ /**
+ * Ensures that an object reference passed as a parameter to the calling
+ * method is not null.
+ *
+ * @param reference an object reference
+ * @return the non-null reference that was validated
+ * @throws org.elasticsearch.ElasticsearchNullPointerException
+ * if {@code reference} is null
+ */
+ public static <T> T checkNotNull(T reference) {
+ if (reference == null) {
+ throw new ElasticsearchNullPointerException();
+ }
+ return reference;
+ }
+
+ /**
+ * Ensures that an object reference passed as a parameter to the calling
+ * method is not null.
+ *
+ * @param reference an object reference
+ * @param errorMessage the exception message to use if the check fails; will
+ * be converted to a string using {@link String#valueOf(Object)}
+ * @return the non-null reference that was validated
+ * @throws org.elasticsearch.ElasticsearchNullPointerException
+ * if {@code reference} is null
+ */
+ public static <T> T checkNotNull(T reference, Object errorMessage) {
+ if (reference == null) {
+ throw new ElasticsearchNullPointerException(String.valueOf(errorMessage));
+ }
+ return reference;
+ }
+
+ /**
+ * Ensures that an object reference passed as a parameter to the calling
+ * method is not null.
+ *
+ * @param reference an object reference
+ * @param errorMessageTemplate a template for the exception message should the
+ * check fail. The message is formed by replacing each {@code %s}
+ * placeholder in the template with an argument. These are matched by
+ * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc.
+ * Unmatched arguments will be appended to the formatted message in square
+ * braces. Unmatched placeholders will be left as-is.
+ * @param errorMessageArgs the arguments to be substituted into the message
+ * template. Arguments are converted to strings using
+ * {@link String#valueOf(Object)}.
+ * @return the non-null reference that was validated
+ * @throws org.elasticsearch.ElasticsearchNullPointerException
+ * if {@code reference} is null
+ */
+ public static <T> T checkNotNull(T reference, String errorMessageTemplate,
+ Object... errorMessageArgs) {
+ if (reference == null) {
+ // If either of these parameters is null, the right thing happens anyway
+ throw new ElasticsearchNullPointerException(
+ format(errorMessageTemplate, errorMessageArgs));
+ }
+ return reference;
+ }
+
+ /**
+ * Ensures that an {@code Iterable} object passed as a parameter to the
+ * calling method is not null and contains no null elements.
+ *
+ * @param iterable the iterable to check the contents of
+ * @return the non-null {@code iterable} reference just validated
+ * @throws org.elasticsearch.ElasticsearchNullPointerException
+ * if {@code iterable} is null or contains at
+ * least one null element
+ */
+ public static <T extends Iterable<?>> T checkContentsNotNull(T iterable) {
+ if (containsOrIsNull(iterable)) {
+ throw new ElasticsearchNullPointerException();
+ }
+ return iterable;
+ }
+
+ /**
+ * Ensures that an {@code Iterable} object passed as a parameter to the
+ * calling method is not null and contains no null elements.
+ *
+ * @param iterable the iterable to check the contents of
+ * @param errorMessage the exception message to use if the check fails; will
+ * be converted to a string using {@link String#valueOf(Object)}
+ * @return the non-null {@code iterable} reference just validated
+ * @throws org.elasticsearch.ElasticsearchNullPointerException
+ * if {@code iterable} is null or contains at
+ * least one null element
+ */
+ public static <T extends Iterable<?>> T checkContentsNotNull(
+ T iterable, Object errorMessage) {
+ if (containsOrIsNull(iterable)) {
+ throw new ElasticsearchNullPointerException(String.valueOf(errorMessage));
+ }
+ return iterable;
+ }
+
+ /**
+ * Ensures that an {@code Iterable} object passed as a parameter to the
+ * calling method is not null and contains no null elements.
+ *
+ * @param iterable the iterable to check the contents of
+ * @param errorMessageTemplate a template for the exception message should the
+ * check fail. The message is formed by replacing each {@code %s}
+ * placeholder in the template with an argument. These are matched by
+ * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc.
+ * Unmatched arguments will be appended to the formatted message in square
+ * braces. Unmatched placeholders will be left as-is.
+ * @param errorMessageArgs the arguments to be substituted into the message
+ * template. Arguments are converted to strings using
+ * {@link String#valueOf(Object)}.
+ * @return the non-null {@code iterable} reference just validated
+ * @throws org.elasticsearch.ElasticsearchNullPointerException
+ * if {@code iterable} is null or contains at
+ * least one null element
+ */
+ public static <T extends Iterable<?>> T checkContentsNotNull(T iterable,
+ String errorMessageTemplate, Object... errorMessageArgs) {
+ if (containsOrIsNull(iterable)) {
+ throw new ElasticsearchNullPointerException(
+ format(errorMessageTemplate, errorMessageArgs));
+ }
+ return iterable;
+ }
+
+ private static boolean containsOrIsNull(Iterable<?> iterable) {
+ if (iterable == null) {
+ return true;
+ }
+
+ if (iterable instanceof Collection) {
+ Collection<?> collection = (Collection<?>) iterable;
+ try {
+ return collection.contains(null);
+ } catch (ElasticsearchNullPointerException e) {
+ // A NPE implies that the collection doesn't contain null.
+ return false;
+ }
+ } else {
+ for (Object element : iterable) {
+ if (element == null) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+
+ /**
+ * Ensures that {@code index} specifies a valid <i>element</i> in an array,
+ * list or string of size {@code size}. An element index may range from zero,
+ * inclusive, to {@code size}, exclusive.
+ *
+ * @param index a user-supplied index identifying an element of an array, list
+ * or string
+ * @param size the size of that array, list or string
+ * @throws IndexOutOfBoundsException if {@code index} is negative or is not
+ * less than {@code size}
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if {@code size} is negative
+ */
+ public static void checkElementIndex(int index, int size) {
+ checkElementIndex(index, size, "index");
+ }
+
+ /**
+ * Ensures that {@code index} specifies a valid <i>element</i> in an array,
+ * list or string of size {@code size}. An element index may range from zero,
+ * inclusive, to {@code size}, exclusive.
+ *
+ * @param index a user-supplied index identifying an element of an array, list
+ * or string
+ * @param size the size of that array, list or string
+ * @param desc the text to use to describe this index in an error message
+ * @throws IndexOutOfBoundsException if {@code index} is negative or is not
+ * less than {@code size}
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if {@code size} is negative
+ */
+ public static void checkElementIndex(int index, int size, String desc) {
+ checkArgument(size >= 0, "negative size: %s", size);
+ if (index < 0) {
+ throw new IndexOutOfBoundsException(
+ format("%s (%s) must not be negative", desc, index));
+ }
+ if (index >= size) {
+ throw new IndexOutOfBoundsException(
+ format("%s (%s) must be less than size (%s)", desc, index, size));
+ }
+ }
+
+ /**
+ * Ensures that {@code index} specifies a valid <i>position</i> in an array,
+ * list or string of size {@code size}. A position index may range from zero
+ * to {@code size}, inclusive.
+ *
+ * @param index a user-supplied index identifying a position in an array, list
+ * or string
+ * @param size the size of that array, list or string
+ * @throws IndexOutOfBoundsException if {@code index} is negative or is
+ * greater than {@code size}
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if {@code size} is negative
+ */
+ public static void checkPositionIndex(int index, int size) {
+ checkPositionIndex(index, size, "index");
+ }
+
+ /**
+ * Ensures that {@code index} specifies a valid <i>position</i> in an array,
+ * list or string of size {@code size}. A position index may range from zero
+ * to {@code size}, inclusive.
+ *
+ * @param index a user-supplied index identifying a position in an array, list
+ * or string
+ * @param size the size of that array, list or string
+ * @param desc the text to use to describe this index in an error message
+ * @throws IndexOutOfBoundsException if {@code index} is negative or is
+ * greater than {@code size}
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if {@code size} is negative
+ */
+ public static void checkPositionIndex(int index, int size, String desc) {
+ checkArgument(size >= 0, "negative size: %s", size);
+ if (index < 0) {
+ throw new IndexOutOfBoundsException(format(
+ "%s (%s) must not be negative", desc, index));
+ }
+ if (index > size) {
+ throw new IndexOutOfBoundsException(format(
+ "%s (%s) must not be greater than size (%s)", desc, index, size));
+ }
+ }
+
+ /**
+ * Ensures that {@code start} and {@code end} specify a valid <i>positions</i>
+ * in an array, list or string of size {@code size}, and are in order. A
+ * position index may range from zero to {@code size}, inclusive.
+ *
+ * @param start a user-supplied index identifying a starting position in an
+ * array, list or string
+ * @param end a user-supplied index identifying a ending position in an array,
+ * list or string
+ * @param size the size of that array, list or string
+ * @throws IndexOutOfBoundsException if either index is negative or is
+ * greater than {@code size}, or if {@code end} is less than {@code start}
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if {@code size} is negative
+ */
+ public static void checkPositionIndexes(int start, int end, int size) {
+ checkPositionIndex(start, size, "start index");
+ checkPositionIndex(end, size, "end index");
+ if (end < start) {
+ throw new IndexOutOfBoundsException(format(
+ "end index (%s) must not be less than start index (%s)", end, start));
+ }
+ }
+
+ /**
+ * Substitutes each {@code %s} in {@code template} with an argument. These
+ * are matched by position - the first {@code %s} gets {@code args[0]}, etc.
+ * If there are more arguments than placeholders, the unmatched arguments will
+ * be appended to the end of the formatted message in square braces.
+ *
+ * @param template a non-null string containing 0 or more {@code %s}
+ * placeholders.
+ * @param args the arguments to be substituted into the message
+ * template. Arguments are converted to strings using
+ * {@link String#valueOf(Object)}. Arguments can be null.
+ */
+ // VisibleForTesting
+ static String format(String template, Object... args) {
+ // start substituting the arguments into the '%s' placeholders
+ StringBuilder builder = new StringBuilder(
+ template.length() + 16 * args.length);
+ int templateStart = 0;
+ int i = 0;
+ while (i < args.length) {
+ int placeholderStart = template.indexOf("%s", templateStart);
+ if (placeholderStart == -1) {
+ break;
+ }
+ builder.append(template.substring(templateStart, placeholderStart));
+ builder.append(args[i++]);
+ templateStart = placeholderStart + 2;
+ }
+ builder.append(template.substring(templateStart));
+
+ // if we run out of placeholders, append the extra args in square braces
+ if (i < args.length) {
+ builder.append(" [");
+ builder.append(args[i++]);
+ while (i < args.length) {
+ builder.append(", ");
+ builder.append(args[i++]);
+ }
+ builder.append("]");
+ }
+
+ return builder.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/Priority.java b/src/main/java/org/elasticsearch/common/Priority.java
new file mode 100644
index 0000000..6d7dd10
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Priority.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ *
+ */
+public final class Priority implements Comparable<Priority> {
+
+ public static Priority fromByte(byte b) {
+ switch (b) {
+ case 0:
+ return URGENT;
+ case 1:
+ return HIGH;
+ case 2:
+ return NORMAL;
+ case 3:
+ return LOW;
+ case 4:
+ return LANGUID;
+ default:
+ throw new ElasticsearchIllegalArgumentException("can't find priority for [" + b + "]");
+ }
+ }
+
+ public static Priority URGENT = new Priority((byte) 0);
+ public static Priority HIGH = new Priority((byte) 1);
+ public static Priority NORMAL = new Priority((byte) 2);
+ public static Priority LOW = new Priority((byte) 3);
+ public static Priority LANGUID = new Priority((byte) 4);
+
+ private final byte value;
+
+ private Priority(byte value) {
+ this.value = value;
+ }
+
+ public byte value() {
+ return this.value;
+ }
+
+ public int compareTo(Priority p) {
+ return this.value - p.value;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || Priority.class != o.getClass()) return false;
+
+ Priority priority = (Priority) o;
+
+ if (value != priority.value) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return (int) value;
+ }
+
+ @Override
+ public String toString() {
+ switch (value) {
+ case (byte) 0:
+ return "URGENT";
+ case (byte) 1:
+ return "HIGH";
+ case (byte) 2:
+ return "NORMAL";
+ case (byte) 3:
+ return "LOW";
+ default:
+ return "LANGUID";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/StopWatch.java b/src/main/java/org/elasticsearch/common/StopWatch.java
new file mode 100644
index 0000000..eec3712
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/StopWatch.java
@@ -0,0 +1,295 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.text.NumberFormat;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Simple stop watch, allowing for timing of a number of tasks,
+ * exposing total running time and running time for each named task.
+ * <p/>
+ * <p>Conceals use of <code>System.currentTimeMillis()</code>, improving the
+ * readability of application code and reducing the likelihood of calculation errors.
+ * <p/>
+ * <p>Note that this object is not designed to be thread-safe and does not
+ * use synchronization.
+ * <p/>
+ * <p>This class is normally used to verify performance during proof-of-concepts
+ * and in development, rather than as part of production applications.
+ *
+ *
+ */
+public class StopWatch {
+
+ /**
+ * Identifier of this stop watch.
+ * Handy when we have output from multiple stop watches
+ * and need to distinguish between them in log or console output.
+ */
+ private final String id;
+
+ private boolean keepTaskList = true;
+
+ private final List<TaskInfo> taskList = new LinkedList<TaskInfo>();
+
+ /**
+ * Start time of the current task
+ */
+ private long startTimeMillis;
+
+ /**
+ * Is the stop watch currently running?
+ */
+ private boolean running;
+
+ /**
+ * Name of the current task
+ */
+ private String currentTaskName;
+
+ private TaskInfo lastTaskInfo;
+
+ private int taskCount;
+
+ /**
+ * Total running time
+ */
+ private long totalTimeMillis;
+
+ /**
+ * Construct a new stop watch. Does not start any task.
+ */
+ public StopWatch() {
+ this.id = "";
+ }
+
+ /**
+ * Construct a new stop watch with the given id.
+ * Does not start any task.
+ *
+ * @param id identifier for this stop watch.
+ * Handy when we have output from multiple stop watches
+ * and need to distinguish between them.
+ */
+ public StopWatch(String id) {
+ this.id = id;
+ }
+
+ /**
+ * Determine whether the TaskInfo array is built over time. Set this to
+ * "false" when using a StopWatch for millions of intervals, or the task
+ * info structure will consume excessive memory. Default is "true".
+ */
+ public StopWatch keepTaskList(boolean keepTaskList) {
+ this.keepTaskList = keepTaskList;
+ return this;
+ }
+
+ /**
+ * Start an unnamed task. The results are undefined if {@link #stop()}
+ * or timing methods are called without invoking this method.
+ *
+ * @see #stop()
+ */
+ public StopWatch start() throws IllegalStateException {
+ return start("");
+ }
+
+ /**
+ * Start a named task. The results are undefined if {@link #stop()}
+ * or timing methods are called without invoking this method.
+ *
+ * @param taskName the name of the task to start
+ * @see #stop()
+ */
+ public StopWatch start(String taskName) throws IllegalStateException {
+ if (this.running) {
+ throw new IllegalStateException("Can't start StopWatch: it's already running");
+ }
+ this.startTimeMillis = System.currentTimeMillis();
+ this.running = true;
+ this.currentTaskName = taskName;
+ return this;
+ }
+
+ /**
+ * Stop the current task. The results are undefined if timing
+ * methods are called without invoking at least one pair
+ * {@link #start()} / {@link #stop()} methods.
+ *
+ * @see #start()
+ */
+ public StopWatch stop() throws IllegalStateException {
+ if (!this.running) {
+ throw new IllegalStateException("Can't stop StopWatch: it's not running");
+ }
+ long lastTime = System.currentTimeMillis() - this.startTimeMillis;
+ this.totalTimeMillis += lastTime;
+ this.lastTaskInfo = new TaskInfo(this.currentTaskName, lastTime);
+ if (this.keepTaskList) {
+ this.taskList.add(lastTaskInfo);
+ }
+ ++this.taskCount;
+ this.running = false;
+ this.currentTaskName = null;
+ return this;
+ }
+
+ /**
+ * Return whether the stop watch is currently running.
+ */
+ public boolean isRunning() {
+ return this.running;
+ }
+
+ /**
+ * Return the time taken by the last task.
+ */
+ public TimeValue lastTaskTime() throws IllegalStateException {
+ if (this.lastTaskInfo == null) {
+ throw new IllegalStateException("No tests run: can't get last interval");
+ }
+ return this.lastTaskInfo.getTime();
+ }
+
+ /**
+ * Return the name of the last task.
+ */
+ public String lastTaskName() throws IllegalStateException {
+ if (this.lastTaskInfo == null) {
+ throw new IllegalStateException("No tests run: can't get last interval");
+ }
+ return this.lastTaskInfo.getTaskName();
+ }
+
+ /**
+ * Return the total time for all tasks.
+ */
+ public TimeValue totalTime() {
+ return new TimeValue(totalTimeMillis, TimeUnit.MILLISECONDS);
+ }
+
+ /**
+ * Return the number of tasks timed.
+ */
+ public int taskCount() {
+ return taskCount;
+ }
+
+ /**
+ * Return an array of the data for tasks performed.
+ */
+ public TaskInfo[] taskInfo() {
+ if (!this.keepTaskList) {
+ throw new UnsupportedOperationException("Task info is not being kept!");
+ }
+ return this.taskList.toArray(new TaskInfo[this.taskList.size()]);
+ }
+
+ /**
+ * Return a short description of the total running time.
+ */
+ public String shortSummary() {
+ return "StopWatch '" + this.id + "': running time = " + totalTime();
+ }
+
+ /**
+ * Return a string with a table describing all tasks performed.
+ * For custom reporting, call getTaskInfo() and use the task info directly.
+ */
+ public String prettyPrint() {
+ StringBuilder sb = new StringBuilder(shortSummary());
+ sb.append('\n');
+ if (!this.keepTaskList) {
+ sb.append("No task info kept");
+ } else {
+ sb.append("-----------------------------------------\n");
+ sb.append("ms % Task name\n");
+ sb.append("-----------------------------------------\n");
+ NumberFormat nf = NumberFormat.getNumberInstance(Locale.ROOT);
+ nf.setMinimumIntegerDigits(5);
+ nf.setGroupingUsed(false);
+ NumberFormat pf = NumberFormat.getPercentInstance(Locale.ROOT);
+ pf.setMinimumIntegerDigits(3);
+ pf.setGroupingUsed(false);
+ for (TaskInfo task : taskInfo()) {
+ sb.append(nf.format(task.getTime().millis())).append(" ");
+ sb.append(pf.format(task.getTime().secondsFrac() / totalTime().secondsFrac())).append(" ");
+ sb.append(task.getTaskName()).append("\n");
+ }
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Return an informative string describing all tasks performed
+ * For custom reporting, call <code>getTaskInfo()</code> and use the task info directly.
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder(shortSummary());
+ if (this.keepTaskList) {
+ for (TaskInfo task : taskInfo()) {
+ sb.append("; [").append(task.getTaskName()).append("] took ").append(task.getTime());
+ long percent = Math.round((100.0f * task.getTime().millis()) / totalTime().millis());
+ sb.append(" = ").append(percent).append("%");
+ }
+ } else {
+ sb.append("; no task info kept");
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Inner class to hold data about one task executed within the stop watch.
+ */
+ public static class TaskInfo {
+
+ private final String taskName;
+
+ private final TimeValue timeValue;
+
+ private TaskInfo(String taskName, long timeMillis) {
+ this.taskName = taskName;
+ this.timeValue = new TimeValue(timeMillis, TimeUnit.MILLISECONDS);
+ }
+
+ /**
+ * Return the name of this task.
+ */
+ public String getTaskName() {
+ return taskName;
+ }
+
+ /**
+ * Return the time this task took.
+ */
+ public TimeValue getTime() {
+ return timeValue;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/Strings.java b/src/main/java/org/elasticsearch/common/Strings.java
new file mode 100644
index 0000000..c2bfbfd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Strings.java
@@ -0,0 +1,1581 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.io.FastStringReader;
+import org.elasticsearch.common.util.CollectionUtils;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.security.SecureRandom;
+import java.util.*;
+
+/**
+ *
+ */
+public class Strings {
+
+ public static final String[] EMPTY_ARRAY = new String[0];
+
+ private static final String FOLDER_SEPARATOR = "/";
+
+ private static final String WINDOWS_FOLDER_SEPARATOR = "\\";
+
+ private static final String TOP_PATH = "src/test";
+
+ private static final String CURRENT_PATH = ".";
+
+ private static final char EXTENSION_SEPARATOR = '.';
+
+ public static void tabify(int tabs, String from, StringBuilder to) throws Exception {
+ final BufferedReader reader = new BufferedReader(new FastStringReader(from));
+ try {
+ String line;
+ while ((line = reader.readLine()) != null) {
+ for (int i = 0; i < tabs; i++) {
+ to.append('\t');
+ }
+ to.append(line).append('\n');
+ }
+ } finally {
+ reader.close();
+ }
+ }
+
+ public static void spaceify(int spaces, String from, StringBuilder to) throws Exception {
+ final BufferedReader reader = new BufferedReader(new FastStringReader(from));
+ try {
+ String line;
+ while ((line = reader.readLine()) != null) {
+ for (int i = 0; i < spaces; i++) {
+ to.append(' ');
+ }
+ to.append(line).append('\n');
+ }
+ } finally {
+ reader.close();
+ }
+ }
+
+ /**
+ * Splits a backslash escaped string on the separator.
+ * <p/>
+ * Current backslash escaping supported:
+ * <br> \n \t \r \b \f are escaped the same as a Java String
+ * <br> Other characters following a backslash are produced verbatim (\c => c)
+ *
+ * @param s the string to split
+ * @param separator the separator to split on
+ * @param decode decode backslash escaping
+ */
+ public static List<String> splitSmart(String s, String separator, boolean decode) {
+ ArrayList<String> lst = new ArrayList<String>(2);
+ StringBuilder sb = new StringBuilder();
+ int pos = 0, end = s.length();
+ while (pos < end) {
+ if (s.startsWith(separator, pos)) {
+ if (sb.length() > 0) {
+ lst.add(sb.toString());
+ sb = new StringBuilder();
+ }
+ pos += separator.length();
+ continue;
+ }
+
+ char ch = s.charAt(pos++);
+ if (ch == '\\') {
+ if (!decode) sb.append(ch);
+ if (pos >= end) break; // ERROR, or let it go?
+ ch = s.charAt(pos++);
+ if (decode) {
+ switch (ch) {
+ case 'n':
+ ch = '\n';
+ break;
+ case 't':
+ ch = '\t';
+ break;
+ case 'r':
+ ch = '\r';
+ break;
+ case 'b':
+ ch = '\b';
+ break;
+ case 'f':
+ ch = '\f';
+ break;
+ }
+ }
+ }
+
+ sb.append(ch);
+ }
+
+ if (sb.length() > 0) {
+ lst.add(sb.toString());
+ }
+
+ return lst;
+ }
+
+
+ public static List<String> splitWS(String s, boolean decode) {
+ ArrayList<String> lst = new ArrayList<String>(2);
+ StringBuilder sb = new StringBuilder();
+ int pos = 0, end = s.length();
+ while (pos < end) {
+ char ch = s.charAt(pos++);
+ if (Character.isWhitespace(ch)) {
+ if (sb.length() > 0) {
+ lst.add(sb.toString());
+ sb = new StringBuilder();
+ }
+ continue;
+ }
+
+ if (ch == '\\') {
+ if (!decode) sb.append(ch);
+ if (pos >= end) break; // ERROR, or let it go?
+ ch = s.charAt(pos++);
+ if (decode) {
+ switch (ch) {
+ case 'n':
+ ch = '\n';
+ break;
+ case 't':
+ ch = '\t';
+ break;
+ case 'r':
+ ch = '\r';
+ break;
+ case 'b':
+ ch = '\b';
+ break;
+ case 'f':
+ ch = '\f';
+ break;
+ }
+ }
+ }
+
+ sb.append(ch);
+ }
+
+ if (sb.length() > 0) {
+ lst.add(sb.toString());
+ }
+
+ return lst;
+ }
+
+ //---------------------------------------------------------------------
+ // General convenience methods for working with Strings
+ //---------------------------------------------------------------------
+
+ /**
+ * Check that the given CharSequence is neither <code>null</code> nor of length 0.
+ * Note: Will return <code>true</code> for a CharSequence that purely consists of whitespace.
+ * <p><pre>
+ * StringUtils.hasLength(null) = false
+ * StringUtils.hasLength("") = false
+ * StringUtils.hasLength(" ") = true
+ * StringUtils.hasLength("Hello") = true
+ * </pre>
+ *
+ * @param str the CharSequence to check (may be <code>null</code>)
+ * @return <code>true</code> if the CharSequence is not null and has length
+ * @see #hasText(String)
+ */
+ public static boolean hasLength(CharSequence str) {
+ return (str != null && str.length() > 0);
+ }
+
+ /**
+ * Check that the given String is neither <code>null</code> nor of length 0.
+ * Note: Will return <code>true</code> for a String that purely consists of whitespace.
+ *
+ * @param str the String to check (may be <code>null</code>)
+ * @return <code>true</code> if the String is not null and has length
+ * @see #hasLength(CharSequence)
+ */
+ public static boolean hasLength(String str) {
+ return hasLength((CharSequence) str);
+ }
+
+ /**
+ * Check whether the given CharSequence has actual text.
+ * More specifically, returns <code>true</code> if the string not <code>null</code>,
+ * its length is greater than 0, and it contains at least one non-whitespace character.
+ * <p><pre>
+ * StringUtils.hasText(null) = false
+ * StringUtils.hasText("") = false
+ * StringUtils.hasText(" ") = false
+ * StringUtils.hasText("12345") = true
+ * StringUtils.hasText(" 12345 ") = true
+ * </pre>
+ *
+ * @param str the CharSequence to check (may be <code>null</code>)
+ * @return <code>true</code> if the CharSequence is not <code>null</code>,
+ * its length is greater than 0, and it does not contain whitespace only
+ * @see java.lang.Character#isWhitespace
+ */
+ public static boolean hasText(CharSequence str) {
+ if (!hasLength(str)) {
+ return false;
+ }
+ int strLen = str.length();
+ for (int i = 0; i < strLen; i++) {
+ if (!Character.isWhitespace(str.charAt(i))) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Check whether the given String has actual text.
+ * More specifically, returns <code>true</code> if the string not <code>null</code>,
+ * its length is greater than 0, and it contains at least one non-whitespace character.
+ *
+ * @param str the String to check (may be <code>null</code>)
+ * @return <code>true</code> if the String is not <code>null</code>, its length is
+ * greater than 0, and it does not contain whitespace only
+ * @see #hasText(CharSequence)
+ */
+ public static boolean hasText(String str) {
+ return hasText((CharSequence) str);
+ }
+
+ /**
+ * Check whether the given CharSequence contains any whitespace characters.
+ *
+ * @param str the CharSequence to check (may be <code>null</code>)
+ * @return <code>true</code> if the CharSequence is not empty and
+ * contains at least 1 whitespace character
+ * @see java.lang.Character#isWhitespace
+ */
+ public static boolean containsWhitespace(CharSequence str) {
+ if (!hasLength(str)) {
+ return false;
+ }
+ int strLen = str.length();
+ for (int i = 0; i < strLen; i++) {
+ if (Character.isWhitespace(str.charAt(i))) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Check whether the given String contains any whitespace characters.
+ *
+ * @param str the String to check (may be <code>null</code>)
+ * @return <code>true</code> if the String is not empty and
+ * contains at least 1 whitespace character
+ * @see #containsWhitespace(CharSequence)
+ */
+ public static boolean containsWhitespace(String str) {
+ return containsWhitespace((CharSequence) str);
+ }
+
+ /**
+ * Trim leading and trailing whitespace from the given String.
+ *
+ * @param str the String to check
+ * @return the trimmed String
+ * @see java.lang.Character#isWhitespace
+ */
+ public static String trimWhitespace(String str) {
+ if (!hasLength(str)) {
+ return str;
+ }
+ StringBuilder sb = new StringBuilder(str);
+ while (sb.length() > 0 && Character.isWhitespace(sb.charAt(0))) {
+ sb.deleteCharAt(0);
+ }
+ while (sb.length() > 0 && Character.isWhitespace(sb.charAt(sb.length() - 1))) {
+ sb.deleteCharAt(sb.length() - 1);
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Trim <i>all</i> whitespace from the given String:
+ * leading, trailing, and inbetween characters.
+ *
+ * @param str the String to check
+ * @return the trimmed String
+ * @see java.lang.Character#isWhitespace
+ */
+ public static String trimAllWhitespace(String str) {
+ if (!hasLength(str)) {
+ return str;
+ }
+ StringBuilder sb = new StringBuilder(str);
+ int index = 0;
+ while (sb.length() > index) {
+ if (Character.isWhitespace(sb.charAt(index))) {
+ sb.deleteCharAt(index);
+ } else {
+ index++;
+ }
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Trim leading whitespace from the given String.
+ *
+ * @param str the String to check
+ * @return the trimmed String
+ * @see java.lang.Character#isWhitespace
+ */
+ public static String trimLeadingWhitespace(String str) {
+ if (!hasLength(str)) {
+ return str;
+ }
+ StringBuilder sb = new StringBuilder(str);
+ while (sb.length() > 0 && Character.isWhitespace(sb.charAt(0))) {
+ sb.deleteCharAt(0);
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Trim trailing whitespace from the given String.
+ *
+ * @param str the String to check
+ * @return the trimmed String
+ * @see java.lang.Character#isWhitespace
+ */
+ public static String trimTrailingWhitespace(String str) {
+ if (!hasLength(str)) {
+ return str;
+ }
+ StringBuilder sb = new StringBuilder(str);
+ while (sb.length() > 0 && Character.isWhitespace(sb.charAt(sb.length() - 1))) {
+ sb.deleteCharAt(sb.length() - 1);
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Trim all occurences of the supplied leading character from the given String.
+ *
+ * @param str the String to check
+ * @param leadingCharacter the leading character to be trimmed
+ * @return the trimmed String
+ */
+ public static String trimLeadingCharacter(String str, char leadingCharacter) {
+ if (!hasLength(str)) {
+ return str;
+ }
+ StringBuilder sb = new StringBuilder(str);
+ while (sb.length() > 0 && sb.charAt(0) == leadingCharacter) {
+ sb.deleteCharAt(0);
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Trim all occurences of the supplied trailing character from the given String.
+ *
+ * @param str the String to check
+ * @param trailingCharacter the trailing character to be trimmed
+ * @return the trimmed String
+ */
+ public static String trimTrailingCharacter(String str, char trailingCharacter) {
+ if (!hasLength(str)) {
+ return str;
+ }
+ StringBuilder sb = new StringBuilder(str);
+ while (sb.length() > 0 && sb.charAt(sb.length() - 1) == trailingCharacter) {
+ sb.deleteCharAt(sb.length() - 1);
+ }
+ return sb.toString();
+ }
+
+
+ /**
+ * Test if the given String starts with the specified prefix,
+ * ignoring upper/lower case.
+ *
+ * @param str the String to check
+ * @param prefix the prefix to look for
+ * @see java.lang.String#startsWith
+ */
+ public static boolean startsWithIgnoreCase(String str, String prefix) {
+ if (str == null || prefix == null) {
+ return false;
+ }
+ if (str.startsWith(prefix)) {
+ return true;
+ }
+ if (str.length() < prefix.length()) {
+ return false;
+ }
+ String lcStr = str.substring(0, prefix.length()).toLowerCase(Locale.ROOT);
+ String lcPrefix = prefix.toLowerCase(Locale.ROOT);
+ return lcStr.equals(lcPrefix);
+ }
+
+ /**
+ * Test if the given String ends with the specified suffix,
+ * ignoring upper/lower case.
+ *
+ * @param str the String to check
+ * @param suffix the suffix to look for
+ * @see java.lang.String#endsWith
+ */
+ public static boolean endsWithIgnoreCase(String str, String suffix) {
+ if (str == null || suffix == null) {
+ return false;
+ }
+ if (str.endsWith(suffix)) {
+ return true;
+ }
+ if (str.length() < suffix.length()) {
+ return false;
+ }
+
+ String lcStr = str.substring(str.length() - suffix.length()).toLowerCase(Locale.ROOT);
+ String lcSuffix = suffix.toLowerCase(Locale.ROOT);
+ return lcStr.equals(lcSuffix);
+ }
+
+ /**
+ * Test whether the given string matches the given substring
+ * at the given index.
+ *
+ * @param str the original string (or StringBuilder)
+ * @param index the index in the original string to start matching against
+ * @param substring the substring to match at the given index
+ */
+ public static boolean substringMatch(CharSequence str, int index, CharSequence substring) {
+ for (int j = 0; j < substring.length(); j++) {
+ int i = index + j;
+ if (i >= str.length() || str.charAt(i) != substring.charAt(j)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Count the occurrences of the substring in string s.
+ *
+ * @param str string to search in. Return 0 if this is null.
+ * @param sub string to search for. Return 0 if this is null.
+ */
+ public static int countOccurrencesOf(String str, String sub) {
+ if (str == null || sub == null || str.length() == 0 || sub.length() == 0) {
+ return 0;
+ }
+ int count = 0;
+ int pos = 0;
+ int idx;
+ while ((idx = str.indexOf(sub, pos)) != -1) {
+ ++count;
+ pos = idx + sub.length();
+ }
+ return count;
+ }
+
+ /**
+ * Replace all occurences of a substring within a string with
+ * another string.
+ *
+ * @param inString String to examine
+ * @param oldPattern String to replace
+ * @param newPattern String to insert
+ * @return a String with the replacements
+ */
+ public static String replace(String inString, String oldPattern, String newPattern) {
+ if (!hasLength(inString) || !hasLength(oldPattern) || newPattern == null) {
+ return inString;
+ }
+ StringBuilder sb = new StringBuilder();
+ int pos = 0; // our position in the old string
+ int index = inString.indexOf(oldPattern);
+ // the index of an occurrence we've found, or -1
+ int patLen = oldPattern.length();
+ while (index >= 0) {
+ sb.append(inString.substring(pos, index));
+ sb.append(newPattern);
+ pos = index + patLen;
+ index = inString.indexOf(oldPattern, pos);
+ }
+ sb.append(inString.substring(pos));
+ // remember to append any characters to the right of a match
+ return sb.toString();
+ }
+
+ /**
+ * Delete all occurrences of the given substring.
+ *
+ * @param inString the original String
+ * @param pattern the pattern to delete all occurrences of
+ * @return the resulting String
+ */
+ public static String delete(String inString, String pattern) {
+ return replace(inString, pattern, "");
+ }
+
+ /**
+ * Delete any character in a given String.
+ *
+ * @param inString the original String
+ * @param charsToDelete a set of characters to delete.
+ * E.g. "az\n" will delete 'a's, 'z's and new lines.
+ * @return the resulting String
+ */
+ public static String deleteAny(String inString, String charsToDelete) {
+ if (!hasLength(inString) || !hasLength(charsToDelete)) {
+ return inString;
+ }
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < inString.length(); i++) {
+ char c = inString.charAt(i);
+ if (charsToDelete.indexOf(c) == -1) {
+ sb.append(c);
+ }
+ }
+ return sb.toString();
+ }
+
+
+ //---------------------------------------------------------------------
+ // Convenience methods for working with formatted Strings
+ //---------------------------------------------------------------------
+
+ /**
+ * Quote the given String with single quotes.
+ *
+ * @param str the input String (e.g. "myString")
+ * @return the quoted String (e.g. "'myString'"),
+ * or <code>null<code> if the input was <code>null</code>
+ */
+ public static String quote(String str) {
+ return (str != null ? "'" + str + "'" : null);
+ }
+
+ /**
+ * Turn the given Object into a String with single quotes
+ * if it is a String; keeping the Object as-is else.
+ *
+ * @param obj the input Object (e.g. "myString")
+ * @return the quoted String (e.g. "'myString'"),
+ * or the input object as-is if not a String
+ */
+ public static Object quoteIfString(Object obj) {
+ return (obj instanceof String ? quote((String) obj) : obj);
+ }
+
+ /**
+ * Unqualify a string qualified by a '.' dot character. For example,
+ * "this.name.is.qualified", returns "qualified".
+ *
+ * @param qualifiedName the qualified name
+ */
+ public static String unqualify(String qualifiedName) {
+ return unqualify(qualifiedName, '.');
+ }
+
+ /**
+ * Unqualify a string qualified by a separator character. For example,
+ * "this:name:is:qualified" returns "qualified" if using a ':' separator.
+ *
+ * @param qualifiedName the qualified name
+ * @param separator the separator
+ */
+ public static String unqualify(String qualifiedName, char separator) {
+ return qualifiedName.substring(qualifiedName.lastIndexOf(separator) + 1);
+ }
+
+ /**
+ * Capitalize a <code>String</code>, changing the first letter to
+ * upper case as per {@link Character#toUpperCase(char)}.
+ * No other letters are changed.
+ *
+ * @param str the String to capitalize, may be <code>null</code>
+ * @return the capitalized String, <code>null</code> if null
+ */
+ public static String capitalize(String str) {
+ return changeFirstCharacterCase(str, true);
+ }
+
+ /**
+ * Uncapitalize a <code>String</code>, changing the first letter to
+ * lower case as per {@link Character#toLowerCase(char)}.
+ * No other letters are changed.
+ *
+ * @param str the String to uncapitalize, may be <code>null</code>
+ * @return the uncapitalized String, <code>null</code> if null
+ */
+ public static String uncapitalize(String str) {
+ return changeFirstCharacterCase(str, false);
+ }
+
+ private static String changeFirstCharacterCase(String str, boolean capitalize) {
+ if (str == null || str.length() == 0) {
+ return str;
+ }
+ StringBuilder sb = new StringBuilder(str.length());
+ if (capitalize) {
+ sb.append(Character.toUpperCase(str.charAt(0)));
+ } else {
+ sb.append(Character.toLowerCase(str.charAt(0)));
+ }
+ sb.append(str.substring(1));
+ return sb.toString();
+ }
+
+ public static final ImmutableSet<Character> INVALID_FILENAME_CHARS = ImmutableSet.of('\\', '/', '*', '?', '"', '<', '>', '|', ' ', ',');
+
+ public static boolean validFileName(String fileName) {
+ for (int i = 0; i < fileName.length(); i++) {
+ char c = fileName.charAt(i);
+ if (INVALID_FILENAME_CHARS.contains(c)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public static boolean validFileNameExcludingAstrix(String fileName) {
+ for (int i = 0; i < fileName.length(); i++) {
+ char c = fileName.charAt(i);
+ if (c != '*' && INVALID_FILENAME_CHARS.contains(c)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Extract the filename from the given path,
+ * e.g. "mypath/myfile.txt" -> "myfile.txt".
+ *
+ * @param path the file path (may be <code>null</code>)
+ * @return the extracted filename, or <code>null</code> if none
+ */
+ public static String getFilename(String path) {
+ if (path == null) {
+ return null;
+ }
+ int separatorIndex = path.lastIndexOf(FOLDER_SEPARATOR);
+ return (separatorIndex != -1 ? path.substring(separatorIndex + 1) : path);
+ }
+
+ /**
+ * Extract the filename extension from the given path,
+ * e.g. "mypath/myfile.txt" -> "txt".
+ *
+ * @param path the file path (may be <code>null</code>)
+ * @return the extracted filename extension, or <code>null</code> if none
+ */
+ public static String getFilenameExtension(String path) {
+ if (path == null) {
+ return null;
+ }
+ int sepIndex = path.lastIndexOf(EXTENSION_SEPARATOR);
+ return (sepIndex != -1 ? path.substring(sepIndex + 1) : null);
+ }
+
+ /**
+ * Strip the filename extension from the given path,
+ * e.g. "mypath/myfile.txt" -> "mypath/myfile".
+ *
+ * @param path the file path (may be <code>null</code>)
+ * @return the path with stripped filename extension,
+ * or <code>null</code> if none
+ */
+ public static String stripFilenameExtension(String path) {
+ if (path == null) {
+ return null;
+ }
+ int sepIndex = path.lastIndexOf(EXTENSION_SEPARATOR);
+ return (sepIndex != -1 ? path.substring(0, sepIndex) : path);
+ }
+
+ /**
+ * Apply the given relative path to the given path,
+ * assuming standard Java folder separation (i.e. "/" separators);
+ *
+ * @param path the path to start from (usually a full file path)
+ * @param relativePath the relative path to apply
+ * (relative to the full file path above)
+ * @return the full file path that results from applying the relative path
+ */
+ public static String applyRelativePath(String path, String relativePath) {
+ int separatorIndex = path.lastIndexOf(FOLDER_SEPARATOR);
+ if (separatorIndex != -1) {
+ String newPath = path.substring(0, separatorIndex);
+ if (!relativePath.startsWith(FOLDER_SEPARATOR)) {
+ newPath += FOLDER_SEPARATOR;
+ }
+ return newPath + relativePath;
+ } else {
+ return relativePath;
+ }
+ }
+
+ /**
+ * Normalize the path by suppressing sequences like "path/.." and
+ * inner simple dots.
+ * <p>The result is convenient for path comparison. For other uses,
+ * notice that Windows separators ("\") are replaced by simple slashes.
+ *
+ * @param path the original path
+ * @return the normalized path
+ */
+ public static String cleanPath(String path) {
+ if (path == null) {
+ return null;
+ }
+ String pathToUse = replace(path, WINDOWS_FOLDER_SEPARATOR, FOLDER_SEPARATOR);
+
+ // Strip prefix from path to analyze, to not treat it as part of the
+ // first path element. This is necessary to correctly parse paths like
+ // "file:core/../core/io/Resource.class", where the ".." should just
+ // strip the first "core" directory while keeping the "file:" prefix.
+ int prefixIndex = pathToUse.indexOf(":");
+ String prefix = "";
+ if (prefixIndex != -1) {
+ prefix = pathToUse.substring(0, prefixIndex + 1);
+ pathToUse = pathToUse.substring(prefixIndex + 1);
+ }
+ if (pathToUse.startsWith(FOLDER_SEPARATOR)) {
+ prefix = prefix + FOLDER_SEPARATOR;
+ pathToUse = pathToUse.substring(1);
+ }
+
+ String[] pathArray = delimitedListToStringArray(pathToUse, FOLDER_SEPARATOR);
+ List<String> pathElements = new LinkedList<String>();
+ int tops = 0;
+
+ for (int i = pathArray.length - 1; i >= 0; i--) {
+ String element = pathArray[i];
+ if (CURRENT_PATH.equals(element)) {
+ // Points to current directory - drop it.
+ } else if (TOP_PATH.equals(element)) {
+ // Registering top path found.
+ tops++;
+ } else {
+ if (tops > 0) {
+ // Merging path element with element corresponding to top path.
+ tops--;
+ } else {
+ // Normal path element found.
+ pathElements.add(0, element);
+ }
+ }
+ }
+
+ // Remaining top paths need to be retained.
+ for (int i = 0; i < tops; i++) {
+ pathElements.add(0, TOP_PATH);
+ }
+
+ return prefix + collectionToDelimitedString(pathElements, FOLDER_SEPARATOR);
+ }
+
+ /**
+ * Compare two paths after normalization of them.
+ *
+ * @param path1 first path for comparison
+ * @param path2 second path for comparison
+ * @return whether the two paths are equivalent after normalization
+ */
+ public static boolean pathEquals(String path1, String path2) {
+ return cleanPath(path1).equals(cleanPath(path2));
+ }
+
+ /**
+ * Parse the given <code>localeString</code> into a {@link Locale}.
+ * <p>This is the inverse operation of {@link Locale#toString Locale's toString}.
+ *
+ * @param localeString the locale string, following <code>Locale's</code>
+ * <code>toString()</code> format ("en", "en_UK", etc);
+ * also accepts spaces as separators, as an alternative to underscores
+ * @return a corresponding <code>Locale</code> instance
+ */
+ public static Locale parseLocaleString(String localeString) {
+ String[] parts = tokenizeToStringArray(localeString, "_ ", false, false);
+ String language = (parts.length != 0 ? parts[0] : "");
+ String country = (parts.length > 1 ? parts[1] : "");
+ String variant = "";
+ if (parts.length >= 2) {
+ // There is definitely a variant, and it is everything after the country
+ // code sans the separator between the country code and the variant.
+ int endIndexOfCountryCode = localeString.indexOf(country) + country.length();
+ // Strip off any leading '_' and whitespace, what's left is the variant.
+ variant = trimLeadingWhitespace(localeString.substring(endIndexOfCountryCode));
+ if (variant.startsWith("_")) {
+ variant = trimLeadingCharacter(variant, '_');
+ }
+ }
+ return (language.length() > 0 ? new Locale(language, country, variant) : null);
+ }
+
+ /**
+ * Determine the RFC 3066 compliant language tag,
+ * as used for the HTTP "Accept-Language" header.
+ *
+ * @param locale the Locale to transform to a language tag
+ * @return the RFC 3066 compliant language tag as String
+ */
+ public static String toLanguageTag(Locale locale) {
+ return locale.getLanguage() + (hasText(locale.getCountry()) ? "-" + locale.getCountry() : "");
+ }
+
+
+ //---------------------------------------------------------------------
+ // Convenience methods for working with String arrays
+ //---------------------------------------------------------------------
+
+ /**
+ * Append the given String to the given String array, returning a new array
+ * consisting of the input array contents plus the given String.
+ *
+ * @param array the array to append to (can be <code>null</code>)
+ * @param str the String to append
+ * @return the new array (never <code>null</code>)
+ */
+ public static String[] addStringToArray(String[] array, String str) {
+ if (isEmpty(array)) {
+ return new String[]{str};
+ }
+ String[] newArr = new String[array.length + 1];
+ System.arraycopy(array, 0, newArr, 0, array.length);
+ newArr[array.length] = str;
+ return newArr;
+ }
+
+ /**
+ * Concatenate the given String arrays into one,
+ * with overlapping array elements included twice.
+ * <p>The order of elements in the original arrays is preserved.
+ *
+ * @param array1 the first array (can be <code>null</code>)
+ * @param array2 the second array (can be <code>null</code>)
+ * @return the new array (<code>null</code> if both given arrays were <code>null</code>)
+ */
+ public static String[] concatenateStringArrays(String[] array1, String[] array2) {
+ if (isEmpty(array1)) {
+ return array2;
+ }
+ if (isEmpty(array2)) {
+ return array1;
+ }
+ String[] newArr = new String[array1.length + array2.length];
+ System.arraycopy(array1, 0, newArr, 0, array1.length);
+ System.arraycopy(array2, 0, newArr, array1.length, array2.length);
+ return newArr;
+ }
+
+ /**
+ * Merge the given String arrays into one, with overlapping
+ * array elements only included once.
+ * <p>The order of elements in the original arrays is preserved
+ * (with the exception of overlapping elements, which are only
+ * included on their first occurence).
+ *
+ * @param array1 the first array (can be <code>null</code>)
+ * @param array2 the second array (can be <code>null</code>)
+ * @return the new array (<code>null</code> if both given arrays were <code>null</code>)
+ */
+ public static String[] mergeStringArrays(String[] array1, String[] array2) {
+ if (isEmpty(array1)) {
+ return array2;
+ }
+ if (isEmpty(array2)) {
+ return array1;
+ }
+ List<String> result = new ArrayList<String>();
+ result.addAll(Arrays.asList(array1));
+ for (String str : array2) {
+ if (!result.contains(str)) {
+ result.add(str);
+ }
+ }
+ return toStringArray(result);
+ }
+
+ /**
+ * Turn given source String array into sorted array.
+ *
+ * @param array the source array
+ * @return the sorted array (never <code>null</code>)
+ */
+ public static String[] sortStringArray(String[] array) {
+ if (isEmpty(array)) {
+ return new String[0];
+ }
+ Arrays.sort(array);
+ return array;
+ }
+
+ /**
+ * Copy the given Collection into a String array.
+ * The Collection must contain String elements only.
+ *
+ * @param collection the Collection to copy
+ * @return the String array (<code>null</code> if the passed-in
+ * Collection was <code>null</code>)
+ */
+ public static String[] toStringArray(Collection<String> collection) {
+ if (collection == null) {
+ return null;
+ }
+ return collection.toArray(new String[collection.size()]);
+ }
+
+ /**
+ * Copy the given Enumeration into a String array.
+ * The Enumeration must contain String elements only.
+ *
+ * @param enumeration the Enumeration to copy
+ * @return the String array (<code>null</code> if the passed-in
+ * Enumeration was <code>null</code>)
+ */
+ public static String[] toStringArray(Enumeration<String> enumeration) {
+ if (enumeration == null) {
+ return null;
+ }
+ List<String> list = Collections.list(enumeration);
+ return list.toArray(new String[list.size()]);
+ }
+
+ /**
+ * Trim the elements of the given String array,
+ * calling <code>String.trim()</code> on each of them.
+ *
+ * @param array the original String array
+ * @return the resulting array (of the same size) with trimmed elements
+ */
+ public static String[] trimArrayElements(String[] array) {
+ if (isEmpty(array)) {
+ return new String[0];
+ }
+ String[] result = new String[array.length];
+ for (int i = 0; i < array.length; i++) {
+ String element = array[i];
+ result[i] = (element != null ? element.trim() : null);
+ }
+ return result;
+ }
+
+ /**
+ * Remove duplicate Strings from the given array.
+ * Also sorts the array, as it uses a TreeSet.
+ *
+ * @param array the String array
+ * @return an array without duplicates, in natural sort order
+ */
+ public static String[] removeDuplicateStrings(String[] array) {
+ if (isEmpty(array)) {
+ return array;
+ }
+ Set<String> set = new TreeSet<String>();
+ set.addAll(Arrays.asList(array));
+ return toStringArray(set);
+ }
+
+ public static Set<String> splitStringByCommaToSet(final String s) {
+ return splitStringToSet(s, ',');
+ }
+
+ public static String[] splitStringByCommaToArray(final String s) {
+ return splitStringToArray(s, ',');
+ }
+
+ public static Set<String> splitStringToSet(final String s, final char c) {
+ final char[] chars = s.toCharArray();
+ int count = 1;
+ for (final char x : chars) {
+ if (x == c) {
+ count++;
+ }
+ }
+ // TODO (MvG): No push: hppc or jcf?
+ final Set<String> result = new HashSet<String>(count);
+ final int len = chars.length;
+ int start = 0; // starting index in chars of the current substring.
+ int pos = 0; // current index in chars.
+ for (; pos < len; pos++) {
+ if (chars[pos] == c) {
+ int size = pos - start;
+ if (size > 0) { // only add non empty strings
+ result.add(new String(chars, start, size));
+ }
+ start = pos + 1;
+ }
+ }
+ int size = pos - start;
+ if (size > 0) {
+ result.add(new String(chars, start, size));
+ }
+ return result;
+ }
+
+ public static String[] splitStringToArray(final CharSequence s, final char c) {
+ if (s == null || s.length() == 0) {
+ return Strings.EMPTY_ARRAY;
+ }
+ int count = 1;
+ for (int i = 0; i < s.length(); i++) {
+ if (s.charAt(i) == c) {
+ count++;
+ }
+ }
+ final String[] result = new String[count];
+ final StringBuilder builder = new StringBuilder();
+ int res = 0;
+ for (int i = 0; i < s.length(); i++) {
+ if (s.charAt(i) == c) {
+ if (builder.length() > 0) {
+ result[res++] = builder.toString();
+ builder.setLength(0);
+ }
+
+ } else {
+ builder.append(s.charAt(i));
+ }
+ }
+ if (builder.length() > 0) {
+ result[res++] = builder.toString();
+ }
+ if (res != count) {
+ // we have empty strings, copy over to a new array
+ String[] result1 = new String[res];
+ System.arraycopy(result, 0, result1, 0, res);
+ return result1;
+ }
+ return result;
+ }
+
+ /**
+ * Split a String at the first occurrence of the delimiter.
+ * Does not include the delimiter in the result.
+ *
+ * @param toSplit the string to split
+ * @param delimiter to split the string up with
+ * @return a two element array with index 0 being before the delimiter, and
+ * index 1 being after the delimiter (neither element includes the delimiter);
+ * or <code>null</code> if the delimiter wasn't found in the given input String
+ */
+ public static String[] split(String toSplit, String delimiter) {
+ if (!hasLength(toSplit) || !hasLength(delimiter)) {
+ return null;
+ }
+ int offset = toSplit.indexOf(delimiter);
+ if (offset < 0) {
+ return null;
+ }
+ String beforeDelimiter = toSplit.substring(0, offset);
+ String afterDelimiter = toSplit.substring(offset + delimiter.length());
+ return new String[]{beforeDelimiter, afterDelimiter};
+ }
+
+ /**
+ * Take an array Strings and split each element based on the given delimiter.
+ * A <code>Properties</code> instance is then generated, with the left of the
+ * delimiter providing the key, and the right of the delimiter providing the value.
+ * <p>Will trim both the key and value before adding them to the
+ * <code>Properties</code> instance.
+ *
+ * @param array the array to process
+ * @param delimiter to split each element using (typically the equals symbol)
+ * @return a <code>Properties</code> instance representing the array contents,
+ * or <code>null</code> if the array to process was null or empty
+ */
+ public static Properties splitArrayElementsIntoProperties(String[] array, String delimiter) {
+ return splitArrayElementsIntoProperties(array, delimiter, null);
+ }
+
+ /**
+ * Take an array Strings and split each element based on the given delimiter.
+ * A <code>Properties</code> instance is then generated, with the left of the
+ * delimiter providing the key, and the right of the delimiter providing the value.
+ * <p>Will trim both the key and value before adding them to the
+ * <code>Properties</code> instance.
+ *
+ * @param array the array to process
+ * @param delimiter to split each element using (typically the equals symbol)
+ * @param charsToDelete one or more characters to remove from each element
+ * prior to attempting the split operation (typically the quotation mark
+ * symbol), or <code>null</code> if no removal should occur
+ * @return a <code>Properties</code> instance representing the array contents,
+ * or <code>null</code> if the array to process was <code>null</code> or empty
+ */
+ public static Properties splitArrayElementsIntoProperties(
+ String[] array, String delimiter, String charsToDelete) {
+
+ if (isEmpty(array)) {
+ return null;
+ }
+ Properties result = new Properties();
+ for (String element : array) {
+ if (charsToDelete != null) {
+ element = deleteAny(element, charsToDelete);
+ }
+ String[] splittedElement = split(element, delimiter);
+ if (splittedElement == null) {
+ continue;
+ }
+ result.setProperty(splittedElement[0].trim(), splittedElement[1].trim());
+ }
+ return result;
+ }
+
+ /**
+ * Tokenize the given String into a String array via a StringTokenizer.
+ * Trims tokens and omits empty tokens.
+ * <p>The given delimiters string is supposed to consist of any number of
+ * delimiter characters. Each of those characters can be used to separate
+ * tokens. A delimiter is always a single character; for multi-character
+ * delimiters, consider using <code>delimitedListToStringArray</code>
+ *
+ * @param str the String to tokenize
+ * @param delimiters the delimiter characters, assembled as String
+ * (each of those characters is individually considered as delimiter).
+ * @return an array of the tokens
+ * @see java.util.StringTokenizer
+ * @see java.lang.String#trim()
+ * @see #delimitedListToStringArray
+ */
+ public static String[] tokenizeToStringArray(String str, String delimiters) {
+ return tokenizeToStringArray(str, delimiters, true, true);
+ }
+
+ /**
+ * Tokenize the given String into a String array via a StringTokenizer.
+ * <p>The given delimiters string is supposed to consist of any number of
+ * delimiter characters. Each of those characters can be used to separate
+ * tokens. A delimiter is always a single character; for multi-character
+ * delimiters, consider using <code>delimitedListToStringArray</code>
+ *
+ * @param str the String to tokenize
+ * @param delimiters the delimiter characters, assembled as String
+ * (each of those characters is individually considered as delimiter)
+ * @param trimTokens trim the tokens via String's <code>trim</code>
+ * @param ignoreEmptyTokens omit empty tokens from the result array
+ * (only applies to tokens that are empty after trimming; StringTokenizer
+ * will not consider subsequent delimiters as token in the first place).
+ * @return an array of the tokens (<code>null</code> if the input String
+ * was <code>null</code>)
+ * @see java.util.StringTokenizer
+ * @see java.lang.String#trim()
+ * @see #delimitedListToStringArray
+ */
+ public static String[] tokenizeToStringArray(
+ String str, String delimiters, boolean trimTokens, boolean ignoreEmptyTokens) {
+
+ if (str == null) {
+ return null;
+ }
+ StringTokenizer st = new StringTokenizer(str, delimiters);
+ List<String> tokens = new ArrayList<String>();
+ while (st.hasMoreTokens()) {
+ String token = st.nextToken();
+ if (trimTokens) {
+ token = token.trim();
+ }
+ if (!ignoreEmptyTokens || token.length() > 0) {
+ tokens.add(token);
+ }
+ }
+ return toStringArray(tokens);
+ }
+
+ /**
+ * Take a String which is a delimited list and convert it to a String array.
+ * <p>A single delimiter can consists of more than one character: It will still
+ * be considered as single delimiter string, rather than as bunch of potential
+ * delimiter characters - in contrast to <code>tokenizeToStringArray</code>.
+ *
+ * @param str the input String
+ * @param delimiter the delimiter between elements (this is a single delimiter,
+ * rather than a bunch individual delimiter characters)
+ * @return an array of the tokens in the list
+ * @see #tokenizeToStringArray
+ */
+ public static String[] delimitedListToStringArray(String str, String delimiter) {
+ return delimitedListToStringArray(str, delimiter, null);
+ }
+
+ /**
+ * Take a String which is a delimited list and convert it to a String array.
+ * <p>A single delimiter can consists of more than one character: It will still
+ * be considered as single delimiter string, rather than as bunch of potential
+ * delimiter characters - in contrast to <code>tokenizeToStringArray</code>.
+ *
+ * @param str the input String
+ * @param delimiter the delimiter between elements (this is a single delimiter,
+ * rather than a bunch individual delimiter characters)
+ * @param charsToDelete a set of characters to delete. Useful for deleting unwanted
+ * line breaks: e.g. "\r\n\f" will delete all new lines and line feeds in a String.
+ * @return an array of the tokens in the list
+ * @see #tokenizeToStringArray
+ */
+ public static String[] delimitedListToStringArray(String str, String delimiter, String charsToDelete) {
+ if (str == null) {
+ return new String[0];
+ }
+ if (delimiter == null) {
+ return new String[]{str};
+ }
+ List<String> result = new ArrayList<String>();
+ if ("".equals(delimiter)) {
+ for (int i = 0; i < str.length(); i++) {
+ result.add(deleteAny(str.substring(i, i + 1), charsToDelete));
+ }
+ } else {
+ int pos = 0;
+ int delPos;
+ while ((delPos = str.indexOf(delimiter, pos)) != -1) {
+ result.add(deleteAny(str.substring(pos, delPos), charsToDelete));
+ pos = delPos + delimiter.length();
+ }
+ if (str.length() > 0 && pos <= str.length()) {
+ // Add rest of String, but not in case of empty input.
+ result.add(deleteAny(str.substring(pos), charsToDelete));
+ }
+ }
+ return toStringArray(result);
+ }
+
+ /**
+ * Convert a CSV list into an array of Strings.
+ *
+ * @param str the input String
+ * @return an array of Strings, or the empty array in case of empty input
+ */
+ public static String[] commaDelimitedListToStringArray(String str) {
+ return delimitedListToStringArray(str, ",");
+ }
+
+ /**
+ * Convenience method to convert a CSV string list to a set.
+ * Note that this will suppress duplicates.
+ *
+ * @param str the input String
+ * @return a Set of String entries in the list
+ */
+ public static Set<String> commaDelimitedListToSet(String str) {
+ Set<String> set = new TreeSet<String>();
+ String[] tokens = commaDelimitedListToStringArray(str);
+ set.addAll(Arrays.asList(tokens));
+ return set;
+ }
+
+ /**
+ * Convenience method to return a Collection as a delimited (e.g. CSV)
+ * String. E.g. useful for <code>toString()</code> implementations.
+ *
+ * @param coll the Collection to display
+ * @param delim the delimiter to use (probably a ",")
+ * @param prefix the String to start each element with
+ * @param suffix the String to end each element with
+ * @return the delimited String
+ */
+ public static String collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix) {
+ return collectionToDelimitedString(coll, delim, prefix, suffix, new StringBuilder());
+ }
+
+ public static String collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix, StringBuilder sb) {
+ if (Iterables.isEmpty(coll)) {
+ return "";
+ }
+ Iterator<?> it = coll.iterator();
+ while (it.hasNext()) {
+ sb.append(prefix).append(it.next()).append(suffix);
+ if (it.hasNext()) {
+ sb.append(delim);
+ }
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Convenience method to return a Collection as a delimited (e.g. CSV)
+ * String. E.g. useful for <code>toString()</code> implementations.
+ *
+ * @param coll the Collection to display
+ * @param delim the delimiter to use (probably a ",")
+ * @return the delimited String
+ */
+ public static String collectionToDelimitedString(Iterable<?> coll, String delim) {
+ return collectionToDelimitedString(coll, delim, "", "");
+ }
+
+ /**
+ * Convenience method to return a Collection as a CSV String.
+ * E.g. useful for <code>toString()</code> implementations.
+ *
+ * @param coll the Collection to display
+ * @return the delimited String
+ */
+ public static String collectionToCommaDelimitedString(Iterable<?> coll) {
+ return collectionToDelimitedString(coll, ",");
+ }
+
+ /**
+ * Convenience method to return a String array as a delimited (e.g. CSV)
+ * String. E.g. useful for <code>toString()</code> implementations.
+ *
+ * @param arr the array to display
+ * @param delim the delimiter to use (probably a ",")
+ * @return the delimited String
+ */
+ public static String arrayToDelimitedString(Object[] arr, String delim) {
+ return arrayToDelimitedString(arr, delim, new StringBuilder());
+ }
+
+ public static String arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) {
+ if (isEmpty(arr)) {
+ return "";
+ }
+ for (int i = 0; i < arr.length; i++) {
+ if (i > 0) {
+ sb.append(delim);
+ }
+ sb.append(arr[i]);
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Convenience method to return a String array as a CSV String.
+ * E.g. useful for <code>toString()</code> implementations.
+ *
+ * @param arr the array to display
+ * @return the delimited String
+ */
+ public static String arrayToCommaDelimitedString(Object[] arr) {
+ return arrayToDelimitedString(arr, ",");
+ }
+
+ /**
+ * Format the double value with a single decimal points, trimming trailing '.0'.
+ */
+ public static String format1Decimals(double value, String suffix) {
+ String p = String.valueOf(value);
+ int ix = p.indexOf('.') + 1;
+ int ex = p.indexOf('E');
+ char fraction = p.charAt(ix);
+ if (fraction == '0') {
+ if (ex != -1) {
+ return p.substring(0, ix - 1) + p.substring(ex) + suffix;
+ } else {
+ return p.substring(0, ix - 1) + suffix;
+ }
+ } else {
+ if (ex != -1) {
+ return p.substring(0, ix) + fraction + p.substring(ex) + suffix;
+ } else {
+ return p.substring(0, ix) + fraction + suffix;
+ }
+ }
+ }
+
+ public static String toCamelCase(String value) {
+ return toCamelCase(value, null);
+ }
+
+ public static String toCamelCase(String value, StringBuilder sb) {
+ boolean changed = false;
+ for (int i = 0; i < value.length(); i++) {
+ char c = value.charAt(i);
+ if (c == '_') {
+ if (!changed) {
+ if (sb != null) {
+ sb.setLength(0);
+ } else {
+ sb = new StringBuilder();
+ }
+ // copy it over here
+ for (int j = 0; j < i; j++) {
+ sb.append(value.charAt(j));
+ }
+ changed = true;
+ }
+ if (i < value.length() - 1) {
+ sb.append(Character.toUpperCase(value.charAt(++i)));
+ }
+ } else {
+ if (changed) {
+ sb.append(c);
+ }
+ }
+ }
+ if (!changed) {
+ return value;
+ }
+ return sb.toString();
+ }
+
+ public static String toUnderscoreCase(String value) {
+ return toUnderscoreCase(value, null);
+ }
+
+ public static String toUnderscoreCase(String value, StringBuilder sb) {
+ boolean changed = false;
+ for (int i = 0; i < value.length(); i++) {
+ char c = value.charAt(i);
+ if (Character.isUpperCase(c)) {
+ if (!changed) {
+ if (sb != null) {
+ sb.setLength(0);
+ } else {
+ sb = new StringBuilder();
+ }
+ // copy it over here
+ for (int j = 0; j < i; j++) {
+ sb.append(value.charAt(j));
+ }
+ changed = true;
+ if (i == 0) {
+ sb.append(Character.toLowerCase(c));
+ } else {
+ sb.append('_');
+ sb.append(Character.toLowerCase(c));
+ }
+ } else {
+ sb.append('_');
+ sb.append(Character.toLowerCase(c));
+ }
+ } else {
+ if (changed) {
+ sb.append(c);
+ }
+ }
+ }
+ if (!changed) {
+ return value;
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Determine whether the given array is empty:
+ * i.e. <code>null</code> or of zero length.
+ *
+ * @param array the array to check
+ */
+ private static boolean isEmpty(Object[] array) {
+ return (array == null || array.length == 0);
+ }
+
+ private Strings() {
+ }
+
+ public static byte[] toUTF8Bytes(CharSequence charSequence) {
+ return toUTF8Bytes(charSequence, new BytesRef());
+ }
+
+ public static byte[] toUTF8Bytes(CharSequence charSequence, BytesRef spare) {
+ UnicodeUtil.UTF16toUTF8(charSequence, 0, charSequence.length(), spare);
+ final byte[] bytes = new byte[spare.length];
+ System.arraycopy(spare.bytes, spare.offset, bytes, 0, bytes.length);
+ return bytes;
+ }
+
+ private static class SecureRandomHolder {
+ // class loading is atomic - this is a lazy & safe singleton
+ private static final SecureRandom INSTANCE = new SecureRandom();
+ }
+
+ /**
+ * Returns a Base64 encoded version of a Version 4.0 compatible UUID
+ * as defined here: http://www.ietf.org/rfc/rfc4122.txt
+ */
+ public static String randomBase64UUID() {
+ return randomBase64UUID(SecureRandomHolder.INSTANCE);
+ }
+
+ /**
+ * Returns a Base64 encoded version of a Version 4.0 compatible UUID
+ * randomly initialized by the given {@link Random} instance
+ * as defined here: http://www.ietf.org/rfc/rfc4122.txt
+ */
+ public static String randomBase64UUID(Random random) {
+ final byte[] randomBytes = new byte[16];
+ random.nextBytes(randomBytes);
+
+ /* Set the version to version 4 (see http://www.ietf.org/rfc/rfc4122.txt)
+ * The randomly or pseudo-randomly generated version.
+ * The version number is in the most significant 4 bits of the time
+ * stamp (bits 4 through 7 of the time_hi_and_version field).*/
+ randomBytes[6] &= 0x0f; /* clear the 4 most significant bits for the version */
+ randomBytes[6] |= 0x40; /* set the version to 0100 / 0x40 */
+
+ /* Set the variant:
+ * The high field of th clock sequence multiplexed with the variant.
+ * We set only the MSB of the variant*/
+ randomBytes[8] &= 0x3f; /* clear the 2 most significant bits */
+ randomBytes[8] |= 0x80; /* set the variant (MSB is set)*/
+ try {
+ byte[] encoded = Base64.encodeBytesToBytes(randomBytes, 0, randomBytes.length, Base64.URL_SAFE);
+ // we know the bytes are 16, and not a multi of 3, so remove the 2 padding chars that are added
+ assert encoded[encoded.length - 1] == '=';
+ assert encoded[encoded.length - 2] == '=';
+ // we always have padding of two at the end, encode it differently
+ return new String(encoded, 0, encoded.length - 2, Base64.PREFERRED_ENCODING);
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalStateException("should not be thrown");
+ }
+ }
+
+ /**
+ * Return substring(beginIndex, endIndex) that is impervious to string length.
+ */
+ public static String substring(String s, int beginIndex, int endIndex) {
+ if (s == null) {
+ return s;
+ }
+
+ int realEndIndex = s.length() > 0 ? s.length() - 1 : 0;
+
+ if (endIndex > realEndIndex) {
+ return s.substring(beginIndex);
+ } else {
+ return s.substring(beginIndex, endIndex);
+ }
+ }
+
+ /**
+ * If an array only consists of zero or one element, which is "*" or "_all" return an empty array
+ * which is usually used as everything
+ */
+ public static boolean isAllOrWildcard(String[] data) {
+ return CollectionUtils.isEmpty(data) ||
+ data.length == 1 && ("_all".equals(data[0]) || "*".equals(data[0]));
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/Table.java b/src/main/java/org/elasticsearch/common/Table.java
new file mode 100644
index 0000000..dddc5d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/Table.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ */
+public class Table {
+
+ private List<Cell> headers = new ArrayList<Cell>();
+ private List<List<Cell>> rows = new ArrayList<List<Cell>>();
+ private Map<String, List<Cell>> map = Maps.newHashMap();
+ private Map<String, Cell> headerMap = Maps.newHashMap();
+ private List<Cell> currentCells;
+ private boolean inHeaders = false;
+
+ public Table startHeaders() {
+ inHeaders = true;
+ currentCells = new ArrayList<Cell>();
+ return this;
+ }
+
+ public Table endHeaders() {
+ if (currentCells == null || currentCells.isEmpty()) {
+ throw new ElasticsearchIllegalStateException("no headers added...");
+ }
+ inHeaders = false;
+ headers = currentCells;
+ currentCells = null;
+
+ /* Create associative structure for columns that
+ * contain the same cells as the rows:
+ *
+ * header1 => [Cell, Cell, ...]
+ * header2 => [Cell, Cell, ...]
+ * header3 => [Cell, Cell, ...]
+ *
+ * Also populate map to look up headers by name.
+ *
+ */
+ for (Cell header : headers) {
+ map.put(header.value.toString(), new ArrayList<Cell>());
+ headerMap.put(header.value.toString(), header);
+ }
+
+ return this;
+ }
+
+ public Table startRow() {
+ if (headers.isEmpty()) {
+ throw new ElasticsearchIllegalStateException("no headers added...");
+ }
+ currentCells = new ArrayList<Cell>(headers.size());
+ return this;
+ }
+
+ public Table endRow(boolean check) {
+ if (currentCells == null) {
+ throw new ElasticsearchIllegalStateException("no row started...");
+ }
+ if (check && (currentCells.size() != headers.size())) {
+ StringBuilder s = new StringBuilder();
+ s.append("mismatch on number of cells ");
+ s.append(currentCells.size());
+ s.append(" in a row compared to header ");
+ s.append(headers.size());
+ throw new ElasticsearchIllegalStateException(s.toString());
+ }
+ rows.add(currentCells);
+ currentCells = null;
+ return this;
+ }
+
+ public Table endRow() {
+ endRow(true);
+ return this;
+ }
+
+ public Table addCell(Object value) {
+ return addCell(value, "");
+ }
+
+ public Table addCell(Object value, String attributes) {
+ if (currentCells == null) {
+ throw new ElasticsearchIllegalStateException("no block started...");
+ }
+ if (!inHeaders) {
+ if (currentCells.size() == headers.size()) {
+ throw new ElasticsearchIllegalStateException("can't add more cells to a row than the header");
+ }
+ }
+ Map<String, String> mAttr;
+ if (attributes.length() == 0) {
+ if (inHeaders) {
+ mAttr = ImmutableMap.of();
+ } else {
+ // get the attributes of the header cell we are going to add to
+ mAttr = headers.get(currentCells.size()).attr;
+ }
+ } else {
+ mAttr = new HashMap<String, String>();
+ if (!inHeaders) {
+ // get the attributes of the header cell we are going to add
+ mAttr.putAll(headers.get(currentCells.size()).attr);
+ }
+ String[] sAttrs = Strings.splitStringToArray(attributes, ';');
+ for (String sAttr : sAttrs) {
+ if (sAttr.length() == 0) {
+ continue;
+ }
+ int idx = sAttr.indexOf(':');
+ mAttr.put(sAttr.substring(0, idx), sAttr.substring(idx + 1));
+ }
+ }
+
+ Cell cell = new Cell(value, mAttr);
+ int cellIndex = currentCells.size();
+ currentCells.add(cell);
+
+ // If we're in a value row, also populate the named column.
+ if (!inHeaders) {
+ String hdr = (String) headers.get(cellIndex).value;
+ map.get(hdr).add(cell);
+ }
+
+ return this;
+ }
+
+ public List<Cell> getHeaders() {
+ return this.headers;
+ }
+
+ public List<List<Cell>> getRows() {
+ return rows;
+ }
+
+ public Map<String, List<Cell>> getAsMap() {
+ return this.map;
+ }
+
+ public Map<String, Cell> getHeaderMap() {
+ return this.headerMap;
+ }
+
+ public Cell findHeaderByName(String header) {
+ for (Cell cell : headers) {
+ if (cell.value.toString().equals(header)) {
+ return cell;
+ }
+ }
+ return null;
+ }
+
+ public static class Cell {
+ public final Object value;
+ public final Map<String, String> attr;
+
+ public Cell(Object value, Cell other) {
+ this.value = value;
+ this.attr = other.attr;
+ }
+
+ public Cell(Object value) {
+ this.value = value;
+ this.attr = new HashMap<String, String>();
+ }
+
+ public Cell(Object value, Map<String, String> attr) {
+ this.value = value;
+ this.attr = attr;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java
new file mode 100644
index 0000000..9f94bbf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore;
+
+import com.google.common.collect.ImmutableMap;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public interface BlobContainer {
+
+ interface BlobNameFilter {
+ /**
+ * Return <tt>false</tt> if the blob should be filtered.
+ */
+ boolean accept(String blobName);
+ }
+
+ interface ReadBlobListener {
+
+ void onPartial(byte[] data, int offset, int size) throws IOException;
+
+ void onCompleted();
+
+ void onFailure(Throwable t);
+ }
+
+ BlobPath path();
+
+ boolean blobExists(String blobName);
+
+ void readBlob(String blobName, ReadBlobListener listener);
+
+ byte[] readBlobFully(String blobName) throws IOException;
+
+ boolean deleteBlob(String blobName) throws IOException;
+
+ void deleteBlobsByPrefix(String blobNamePrefix) throws IOException;
+
+ void deleteBlobsByFilter(BlobNameFilter filter) throws IOException;
+
+ ImmutableMap<String, BlobMetaData> listBlobs() throws IOException;
+
+ ImmutableMap<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/BlobMetaData.java b/src/main/java/org/elasticsearch/common/blobstore/BlobMetaData.java
new file mode 100644
index 0000000..3f69e26
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/BlobMetaData.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore;
+
+/**
+ *
+ */
+public interface BlobMetaData {
+
+ String name();
+
+ long length();
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java b/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java
new file mode 100644
index 0000000..5c1ff00
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore;
+
+import com.google.common.collect.ImmutableList;
+
+import java.util.Iterator;
+
+/**
+ *
+ */
+public class BlobPath implements Iterable<String> {
+
+ private final ImmutableList<String> paths;
+
+ public BlobPath() {
+ this.paths = ImmutableList.of();
+ }
+
+ public static BlobPath cleanPath() {
+ return new BlobPath();
+ }
+
+ private BlobPath(ImmutableList<String> paths) {
+ this.paths = paths;
+ }
+
+ @Override
+ public Iterator<String> iterator() {
+ return paths.iterator();
+ }
+
+ public String[] toArray() {
+ return paths.toArray(new String[paths.size()]);
+ }
+
+ public BlobPath add(String path) {
+ ImmutableList.Builder<String> builder = ImmutableList.builder();
+ return new BlobPath(builder.addAll(paths).add(path).build());
+ }
+
+ public String buildAsString(String separator) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < paths.size(); i++) {
+ sb.append(paths.get(i));
+ if (i < (paths.size() - 1)) {
+ sb.append(separator);
+ }
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for (String path : paths) {
+ sb.append('[').append(path).append(']');
+ }
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java b/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java
new file mode 100644
index 0000000..34844e2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.blobstore;
+
+/**
+ *
+ */
+public interface BlobStore {
+
+ ImmutableBlobContainer immutableBlobContainer(BlobPath path);
+
+ void delete(BlobPath path);
+
+ void close();
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/BlobStoreException.java b/src/main/java/org/elasticsearch/common/blobstore/BlobStoreException.java
new file mode 100644
index 0000000..6be2458
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/BlobStoreException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class BlobStoreException extends ElasticsearchException {
+
+ public BlobStoreException(String msg) {
+ super(msg);
+ }
+
+ public BlobStoreException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/ImmutableBlobContainer.java b/src/main/java/org/elasticsearch/common/blobstore/ImmutableBlobContainer.java
new file mode 100644
index 0000000..f6d516f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/ImmutableBlobContainer.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ *
+ */
+public interface ImmutableBlobContainer extends BlobContainer {
+
+ interface WriterListener {
+ void onCompleted();
+
+ void onFailure(Throwable t);
+ }
+
+ void writeBlob(String blobName, InputStream is, long sizeInBytes, WriterListener listener);
+
+ void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/fs/AbstractFsBlobContainer.java b/src/main/java/org/elasticsearch/common/blobstore/fs/AbstractFsBlobContainer.java
new file mode 100644
index 0000000..e93c0f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/fs/AbstractFsBlobContainer.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore.fs;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
+import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
+import org.elasticsearch.common.collect.MapBuilder;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class AbstractFsBlobContainer extends AbstractBlobContainer {
+
+ protected final FsBlobStore blobStore;
+
+ protected final File path;
+
+ public AbstractFsBlobContainer(FsBlobStore blobStore, BlobPath blobPath, File path) {
+ super(blobPath);
+ this.blobStore = blobStore;
+ this.path = path;
+ }
+
+ public File filePath() {
+ return this.path;
+ }
+
+ public ImmutableMap<String, BlobMetaData> listBlobs() throws IOException {
+ File[] files = path.listFiles();
+ if (files == null || files.length == 0) {
+ return ImmutableMap.of();
+ }
+ // using MapBuilder and not ImmutableMap.Builder as it seems like File#listFiles might return duplicate files!
+ MapBuilder<String, BlobMetaData> builder = MapBuilder.newMapBuilder();
+ for (File file : files) {
+ if (file.isFile()) {
+ builder.put(file.getName(), new PlainBlobMetaData(file.getName(), file.length()));
+ }
+ }
+ return builder.immutableMap();
+ }
+
+ public boolean deleteBlob(String blobName) throws IOException {
+ return new File(path, blobName).delete();
+ }
+
+ @Override
+ public boolean blobExists(String blobName) {
+ return new File(path, blobName).exists();
+ }
+
+ @Override
+ public void readBlob(final String blobName, final ReadBlobListener listener) {
+ blobStore.executor().execute(new Runnable() {
+ @Override
+ public void run() {
+ byte[] buffer = new byte[blobStore.bufferSizeInBytes()];
+ FileInputStream is = null;
+ try {
+ is = new FileInputStream(new File(path, blobName));
+ int bytesRead;
+ while ((bytesRead = is.read(buffer)) != -1) {
+ listener.onPartial(buffer, 0, bytesRead);
+ }
+ } catch (Throwable t) {
+ IOUtils.closeWhileHandlingException(is);
+ listener.onFailure(t);
+ return;
+ }
+ try {
+ IOUtils.closeWhileHandlingException(is);
+ listener.onCompleted();
+ } catch (Throwable t) {
+ listener.onFailure(t);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java
new file mode 100644
index 0000000..a2a99bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore.fs;
+
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.blobstore.BlobStoreException;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+import java.io.File;
+import java.util.concurrent.Executor;
+
+/**
+ *
+ */
+public class FsBlobStore extends AbstractComponent implements BlobStore {
+
+ private final Executor executor;
+
+ private final File path;
+
+ private final int bufferSizeInBytes;
+
+ public FsBlobStore(Settings settings, Executor executor, File path) {
+ super(settings);
+ this.path = path;
+ if (!path.exists()) {
+ boolean b = FileSystemUtils.mkdirs(path);
+ if (!b) {
+ throw new BlobStoreException("Failed to create directory at [" + path + "]");
+ }
+ }
+ if (!path.isDirectory()) {
+ throw new BlobStoreException("Path is not a directory at [" + path + "]");
+ }
+ this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();
+ this.executor = executor;
+ }
+
+ @Override
+ public String toString() {
+ return path.toString();
+ }
+
+ public File path() {
+ return path;
+ }
+
+ public int bufferSizeInBytes() {
+ return this.bufferSizeInBytes;
+ }
+
+ public Executor executor() {
+ return executor;
+ }
+
+ @Override
+ public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
+ return new FsImmutableBlobContainer(this, path, buildAndCreate(path));
+ }
+
+ @Override
+ public void delete(BlobPath path) {
+ FileSystemUtils.deleteRecursively(buildPath(path));
+ }
+
+ @Override
+ public void close() {
+ // nothing to do here...
+ }
+
+ private synchronized File buildAndCreate(BlobPath path) {
+ File f = buildPath(path);
+ FileSystemUtils.mkdirs(f);
+ return f;
+ }
+
+ private File buildPath(BlobPath path) {
+ String[] paths = path.toArray();
+ if (paths.length == 0) {
+ return path();
+ }
+ File blobPath = new File(this.path, paths[0]);
+ if (paths.length > 1) {
+ for (int i = 1; i < paths.length; i++) {
+ blobPath = new File(blobPath, paths[i]);
+ }
+ }
+ return blobPath;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/fs/FsImmutableBlobContainer.java b/src/main/java/org/elasticsearch/common/blobstore/fs/FsImmutableBlobContainer.java
new file mode 100644
index 0000000..50b6823
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/fs/FsImmutableBlobContainer.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore.fs;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+import org.elasticsearch.common.blobstore.support.BlobStores;
+import org.elasticsearch.common.io.FileSystemUtils;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
+
+/**
+ *
+ */
+public class FsImmutableBlobContainer extends AbstractFsBlobContainer implements ImmutableBlobContainer {
+
+ public FsImmutableBlobContainer(FsBlobStore blobStore, BlobPath blobPath, File path) {
+ super(blobStore, blobPath, path);
+ }
+
+ @Override
+ public void writeBlob(final String blobName, final InputStream is, final long sizeInBytes, final WriterListener listener) {
+ blobStore.executor().execute(new Runnable() {
+ @Override
+ public void run() {
+ File file = new File(path, blobName);
+ RandomAccessFile raf;
+ try {
+ raf = new RandomAccessFile(file, "rw");
+ // clean the file if it exists
+ raf.setLength(0);
+ } catch (Exception e) {
+ listener.onFailure(e);
+ return;
+ }
+ try {
+ try {
+ long bytesWritten = 0;
+ byte[] buffer = new byte[blobStore.bufferSizeInBytes()];
+ int bytesRead;
+ while ((bytesRead = is.read(buffer)) != -1) {
+ raf.write(buffer, 0, bytesRead);
+ bytesWritten += bytesRead;
+ }
+ if (bytesWritten != sizeInBytes) {
+ listener.onFailure(new ElasticsearchIllegalStateException("[" + blobName + "]: wrote [" + bytesWritten + "], expected to write [" + sizeInBytes + "]"));
+ return;
+ }
+ } finally {
+ try {
+ is.close();
+ } catch (IOException ex) {
+ // do nothing
+ }
+ try {
+ raf.close();
+ } catch (IOException ex) {
+ // do nothing
+ }
+ }
+ FileSystemUtils.syncFile(file);
+ listener.onCompleted();
+ } catch (Exception e) {
+ // just on the safe size, try and delete it on failure
+ try {
+ if (file.exists()) {
+ file.delete();
+ }
+ } catch (Exception e1) {
+ // ignore
+ }
+ listener.onFailure(e);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
+ BlobStores.syncWriteBlob(this, blobName, is, sizeInBytes);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java b/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java
new file mode 100644
index 0000000..1a8a3b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore.support;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.blobstore.BlobContainer;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ *
+ */
+public abstract class AbstractBlobContainer implements BlobContainer {
+
+ private final BlobPath path;
+
+ protected AbstractBlobContainer(BlobPath path) {
+ this.path = path;
+ }
+
+ @Override
+ public BlobPath path() {
+ return this.path;
+ }
+
+ @Override
+ public byte[] readBlobFully(String blobName) throws IOException {
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();
+ final ByteArrayOutputStream bos = new ByteArrayOutputStream();
+
+ readBlob(blobName, new ReadBlobListener() {
+ @Override
+ public void onPartial(byte[] data, int offset, int size) {
+ bos.write(data, offset, size);
+ }
+
+ @Override
+ public void onCompleted() {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ failure.set(t);
+ latch.countDown();
+ }
+ });
+
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted while waiting to read [" + blobName + "]");
+ }
+
+ if (failure.get() != null) {
+ if (failure.get() instanceof IOException) {
+ throw (IOException) failure.get();
+ } else {
+ throw new IOException("Failed to get [" + blobName + "]", failure.get());
+ }
+ }
+ return bos.toByteArray();
+ }
+
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
+ ImmutableMap<String, BlobMetaData> allBlobs = listBlobs();
+ ImmutableMap.Builder<String, BlobMetaData> blobs = ImmutableMap.builder();
+ for (BlobMetaData blob : allBlobs.values()) {
+ if (blob.name().startsWith(blobNamePrefix)) {
+ blobs.put(blob.name(), blob);
+ }
+ }
+ return blobs.build();
+ }
+
+ @Override
+ public void deleteBlobsByPrefix(final String blobNamePrefix) throws IOException {
+ deleteBlobsByFilter(new BlobNameFilter() {
+ @Override
+ public boolean accept(String blobName) {
+ return blobName.startsWith(blobNamePrefix);
+ }
+ });
+ }
+
+ @Override
+ public void deleteBlobsByFilter(BlobNameFilter filter) throws IOException {
+ ImmutableMap<String, BlobMetaData> blobs = listBlobs();
+ for (BlobMetaData blob : blobs.values()) {
+ if (filter.accept(blob.name())) {
+ deleteBlob(blob.name());
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/support/BlobStores.java b/src/main/java/org/elasticsearch/common/blobstore/support/BlobStores.java
new file mode 100644
index 0000000..6a28e46
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/support/BlobStores.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore.support;
+
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InterruptedIOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ *
+ */
+public class BlobStores {
+
+ public static void syncWriteBlob(ImmutableBlobContainer blobContainer, String blobName, InputStream is, long sizeInBytes) throws IOException {
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();
+ blobContainer.writeBlob(blobName, is, sizeInBytes, new ImmutableBlobContainer.WriterListener() {
+ @Override
+ public void onCompleted() {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ failure.set(t);
+ latch.countDown();
+ }
+ });
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted while waiting to write [" + blobName + "]");
+ }
+
+ if (failure.get() != null) {
+ if (failure.get() instanceof IOException) {
+ throw (IOException) failure.get();
+ } else {
+ throw new IOException("Failed to get [" + blobName + "]", failure.get());
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/support/PlainBlobMetaData.java b/src/main/java/org/elasticsearch/common/blobstore/support/PlainBlobMetaData.java
new file mode 100644
index 0000000..ea93163
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/support/PlainBlobMetaData.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore.support;
+
+import org.elasticsearch.common.blobstore.BlobMetaData;
+
+/**
+ *
+ */
+public class PlainBlobMetaData implements BlobMetaData {
+
+ private final String name;
+
+ private final long length;
+
+ public PlainBlobMetaData(String name, long length) {
+ this.name = name;
+ this.length = length;
+ }
+
+ @Override
+ public String name() {
+ return this.name;
+ }
+
+ @Override
+ public long length() {
+ return this.length;
+ }
+
+ @Override
+ public String toString() {
+ return "name [" + name + "], length [" + length + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobContainer.java b/src/main/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobContainer.java
new file mode 100644
index 0000000..26f0ca0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobContainer.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore.url;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+
+/**
+ * URL blob implementation of {@link org.elasticsearch.common.blobstore.BlobContainer}
+ */
+public abstract class AbstractURLBlobContainer extends AbstractBlobContainer {
+
+ protected final URLBlobStore blobStore;
+
+ protected final URL path;
+
+ /**
+ * Constructs new AbstractURLBlobContainer
+ *
+ * @param blobStore blob store
+ * @param blobPath blob path for this container
+ * @param path URL for this container
+ */
+ public AbstractURLBlobContainer(URLBlobStore blobStore, BlobPath blobPath, URL path) {
+ super(blobPath);
+ this.blobStore = blobStore;
+ this.path = path;
+ }
+
+ /**
+ * Returns URL for this container
+ *
+ * @return URL for this container
+ */
+ public URL url() {
+ return this.path;
+ }
+
+ /**
+ * This operation is not supported by AbstractURLBlobContainer
+ */
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobs() throws IOException {
+ throw new UnsupportedOperationException("URL repository doesn't support this operation");
+ }
+
+ /**
+ * This operation is not supported by AbstractURLBlobContainer
+ */
+ @Override
+ public boolean deleteBlob(String blobName) throws IOException {
+ throw new UnsupportedOperationException("URL repository is read only");
+ }
+
+ /**
+ * This operation is not supported by AbstractURLBlobContainer
+ */
+ @Override
+ public boolean blobExists(String blobName) {
+ throw new UnsupportedOperationException("URL repository doesn't support this operation");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void readBlob(final String blobName, final ReadBlobListener listener) {
+ blobStore.executor().execute(new Runnable() {
+ @Override
+ public void run() {
+ byte[] buffer = new byte[blobStore.bufferSizeInBytes()];
+ InputStream is = null;
+ try {
+ is = new URL(path, blobName).openStream();
+ int bytesRead;
+ while ((bytesRead = is.read(buffer)) != -1) {
+ listener.onPartial(buffer, 0, bytesRead);
+ }
+ } catch (Throwable t) {
+ IOUtils.closeWhileHandlingException(is);
+ listener.onFailure(t);
+ return;
+ }
+ try {
+ IOUtils.closeWhileHandlingException(is);
+ listener.onCompleted();
+ } catch (Throwable t) {
+ listener.onFailure(t);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java b/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java
new file mode 100644
index 0000000..42cdb9d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore.url;
+
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.blobstore.BlobStoreException;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.concurrent.Executor;
+
+/**
+ * Read-only URL-based blob store
+ */
+public class URLBlobStore extends AbstractComponent implements BlobStore {
+
+ private final Executor executor;
+
+ private final URL path;
+
+ private final int bufferSizeInBytes;
+
+ /**
+ * Constructs new read-only URL-based blob store
+ * <p/>
+ * The following settings are supported
+ * <dl>
+ * <dt>buffer_size</dt>
+ * <dd>- size of the read buffer, defaults to 100KB</dd>
+ * </dl>
+ *
+ * @param settings settings
+ * @param executor executor for read operations
+ * @param path base URL
+ */
+ public URLBlobStore(Settings settings, Executor executor, URL path) {
+ super(settings);
+ this.path = path;
+ this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();
+ this.executor = executor;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String toString() {
+ return path.toString();
+ }
+
+ /**
+ * Returns base URL
+ *
+ * @return base URL
+ */
+ public URL path() {
+ return path;
+ }
+
+ /**
+ * Returns read buffer size
+ *
+ * @return read buffer size
+ */
+ public int bufferSizeInBytes() {
+ return this.bufferSizeInBytes;
+ }
+
+ /**
+ * Returns executor used for read operations
+ *
+ * @return executor
+ */
+ public Executor executor() {
+ return executor;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
+ try {
+ return new URLImmutableBlobContainer(this, path, buildPath(path));
+ } catch (MalformedURLException ex) {
+ throw new BlobStoreException("malformed URL " + path, ex);
+ }
+ }
+
+ /**
+ * This operation is not supported by URL Blob Store
+ *
+ * @param path
+ */
+ @Override
+ public void delete(BlobPath path) {
+ throw new UnsupportedOperationException("URL repository is read only");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void close() {
+ // nothing to do here...
+ }
+
+ /**
+ * Builds URL using base URL and specified path
+ *
+ * @param path relative path
+ * @return Base URL + path
+ * @throws MalformedURLException
+ */
+ private URL buildPath(BlobPath path) throws MalformedURLException {
+ String[] paths = path.toArray();
+ if (paths.length == 0) {
+ return path();
+ }
+ URL blobPath = new URL(this.path, paths[0] + "/");
+ if (paths.length > 1) {
+ for (int i = 1; i < paths.length; i++) {
+ blobPath = new URL(blobPath, paths[i] + "/");
+ }
+ }
+ return blobPath;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/blobstore/url/URLImmutableBlobContainer.java b/src/main/java/org/elasticsearch/common/blobstore/url/URLImmutableBlobContainer.java
new file mode 100644
index 0000000..af37ac4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/blobstore/url/URLImmutableBlobContainer.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.blobstore.url;
+
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+
+/**
+ * Read-only URL-based implementation of {@link ImmutableBlobContainer}
+ */
+public class URLImmutableBlobContainer extends AbstractURLBlobContainer implements ImmutableBlobContainer {
+
+ /**
+ * Constructs a new URLImmutableBlobContainer
+ *
+ * @param blobStore blob store
+ * @param blobPath blob path to this container
+ * @param path URL of this container
+ */
+ public URLImmutableBlobContainer(URLBlobStore blobStore, BlobPath blobPath, URL path) {
+ super(blobStore, blobPath, path);
+ }
+
+ /**
+ * This operation is not supported by URL Blob Container
+ */
+ @Override
+ public void writeBlob(final String blobName, final InputStream is, final long sizeInBytes, final WriterListener listener) {
+ throw new UnsupportedOperationException("URL repository is read only");
+ }
+
+ /**
+ * This operation is not supported by URL Blob Container
+ */
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
+ throw new UnsupportedOperationException("URL repository is read only");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java b/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java
new file mode 100644
index 0000000..c2c2bf2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.breaker;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ * Exception thrown when the circuit breaker trips
+ */
+public class CircuitBreakingException extends ElasticsearchException {
+
+ // TODO: maybe add more neat metrics here?
+ public CircuitBreakingException(String message) {
+ super(message);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java b/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java
new file mode 100644
index 0000000..6605535
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.breaker;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * MemoryCircuitBreaker is a circuit breaker that breaks once a
+ * configurable memory limit has been reached.
+ */
+public class MemoryCircuitBreaker {
+
+ private final long memoryBytesLimit;
+ private final double overheadConstant;
+ private final AtomicLong used;
+ private final ESLogger logger;
+
+
+ /**
+ * Create a circuit breaker that will break if the number of estimated
+ * bytes grows above the limit. All estimations will be multiplied by
+ * the given overheadConstant. This breaker starts with 0 bytes used.
+ * @param limit circuit breaker limit
+ * @param overheadConstant constant multiplier for byte estimations
+ */
+ public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, ESLogger logger) {
+ this.memoryBytesLimit = limit.bytes();
+ this.overheadConstant = overheadConstant;
+ this.used = new AtomicLong(0);
+ this.logger = logger;
+ if (logger.isTraceEnabled()) {
+ logger.trace("Creating MemoryCircuitBreaker with a limit of {} bytes ({}) and a overhead constant of {}",
+ this.memoryBytesLimit, limit, this.overheadConstant);
+ }
+ }
+
+ /**
+ * Create a circuit breaker that will break if the number of estimated
+ * bytes grows above the limit. All estimations will be multiplied by
+ * the given overheadConstant. Uses the given oldBreaker to initialize
+ * the starting offset.
+ * @param limit circuit breaker limit
+ * @param overheadConstant constant multiplier for byte estimations
+ * @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
+ */
+ public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, ESLogger logger) {
+ this.memoryBytesLimit = limit.bytes();
+ this.overheadConstant = overheadConstant;
+ if (oldBreaker == null) {
+ this.used = new AtomicLong(0);
+ } else {
+ this.used = oldBreaker.used;
+ }
+ this.logger = logger;
+ if (logger.isTraceEnabled()) {
+ logger.trace("Creating MemoryCircuitBreaker with a limit of {} bytes ({}) and a overhead constant of {}",
+ this.memoryBytesLimit, limit, this.overheadConstant);
+ }
+ }
+
+ /**
+ * Method used to trip the breaker
+ * @throws CircuitBreakingException
+ */
+ public void circuitBreak() throws CircuitBreakingException {
+ throw new CircuitBreakingException("Data too large, data would be larger than limit of [" +
+ memoryBytesLimit + "] bytes");
+ }
+
+ /**
+ * Add a number of bytes, tripping the circuit breaker if the aggregated
+ * estimates are above the limit. Automatically trips the breaker if the
+ * memory limit is set to 0. Will never trip the breaker if the limit is
+ * set < 0, but can still be used to aggregate estimations.
+ * @param bytes number of bytes to add to the breaker
+ * @return number of "used" bytes so far
+ * @throws CircuitBreakingException
+ */
+ public double addEstimateBytesAndMaybeBreak(long bytes) throws CircuitBreakingException {
+ // short-circuit on no data allowed, immediately throwing an exception
+ if (memoryBytesLimit == 0) {
+ circuitBreak();
+ }
+
+ long newUsed;
+ // If there is no limit (-1), we can optimize a bit by using
+ // .addAndGet() instead of looping (because we don't have to check a
+ // limit), which makes the RamAccountingTermsEnum case faster.
+ if (this.memoryBytesLimit == -1) {
+ newUsed = this.used.addAndGet(bytes);
+ if (logger.isTraceEnabled()) {
+ logger.trace("Adding [{}] to used bytes [new used: [{}], limit: [-1b]]",
+ new ByteSizeValue(bytes), new ByteSizeValue(newUsed));
+ }
+ return newUsed;
+ }
+
+ // Otherwise, check the addition and commit the addition, looping if
+ // there are conflicts. May result in additional logging, but it's
+ // trace logging and shouldn't be counted on for additions.
+ long currentUsed;
+ do {
+ currentUsed = this.used.get();
+ newUsed = currentUsed + bytes;
+ long newUsedWithOverhead = (long)(newUsed * overheadConstant);
+ if (logger.isTraceEnabled()) {
+ logger.trace("Adding [{}] to used bytes [new used: [{}], limit: {} [{}], estimate: {} [{}]]",
+ new ByteSizeValue(bytes), new ByteSizeValue(newUsed),
+ memoryBytesLimit, new ByteSizeValue(memoryBytesLimit),
+ newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead));
+ }
+ if (memoryBytesLimit > 0 && newUsedWithOverhead > memoryBytesLimit) {
+ logger.error("New used memory {} [{}] would be larger than configured breaker: {} [{}], breaking",
+ newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead),
+ memoryBytesLimit, new ByteSizeValue(memoryBytesLimit));
+ circuitBreak();
+ }
+ // Attempt to set the new used value, but make sure it hasn't changed
+ // underneath us, if it has, keep trying until we are able to set it
+ } while (!this.used.compareAndSet(currentUsed, newUsed));
+
+ return newUsed;
+ }
+
+ /**
+ * Add an <b>exact</b> number of bytes, not checking for tripping the
+ * circuit breaker. This bypasses the overheadConstant multiplication.
+ * @param bytes number of bytes to add to the breaker
+ * @return number of "used" bytes so far
+ */
+ public long addWithoutBreaking(long bytes) {
+ long u = used.addAndGet(bytes);
+ if (logger.isTraceEnabled()) {
+ logger.trace("Adjusted breaker by [{}] bytes, now [{}]", bytes, u);
+ }
+ assert u >= 0 : "Used bytes: [" + u + "] must be >= 0";
+ return u;
+ }
+
+ /**
+ * @return the number of aggregated "used" bytes so far
+ */
+ public long getUsed() {
+ return this.used.get();
+ }
+
+ /**
+ * @return the maximum number of bytes before the circuit breaker will trip
+ */
+ public long getMaximum() {
+ return this.memoryBytesLimit;
+ }
+
+ /**
+ * @return the constant multiplier the breaker uses for aggregations
+ */
+ public double getOverhead() {
+ return this.overheadConstant;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java b/src/main/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java
new file mode 100644
index 0000000..d6bc096
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.bytes;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.CharBuffer;
+import java.nio.charset.CharacterCodingException;
+import java.nio.charset.CharsetDecoder;
+import java.nio.charset.CoderResult;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.util.CharsetUtil;
+
+import com.google.common.base.Charsets;
+
+/**
+ */
+public class ByteBufferBytesReference implements BytesReference {
+
+ private final ByteBuffer buffer;
+
+ public ByteBufferBytesReference(ByteBuffer buffer) {
+ this.buffer = buffer;
+ }
+
+ @Override
+ public byte get(int index) {
+ return buffer.get(buffer.position() + index);
+ }
+
+ @Override
+ public int length() {
+ return buffer.remaining();
+ }
+
+ @Override
+ public BytesReference slice(int from, int length) {
+ ByteBuffer dup = buffer.duplicate();
+ dup.position(buffer.position() + from);
+ dup.limit(buffer.position() + from + length);
+ return new ByteBufferBytesReference(dup);
+ }
+
+ @Override
+ public StreamInput streamInput() {
+ return new ByteBufferStreamInput(buffer);
+ }
+
+ @Override
+ public void writeTo(OutputStream os) throws IOException {
+ if (buffer.hasArray()) {
+ os.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
+ } else {
+ byte[] tmp = new byte[8192];
+ ByteBuffer buf = buffer.duplicate();
+ while (buf.hasRemaining()) {
+ buf.get(tmp, 0, Math.min(tmp.length, buf.remaining()));
+ os.write(tmp);
+ }
+ }
+ }
+
+ @Override
+ public byte[] toBytes() {
+ if (!buffer.hasRemaining()) {
+ return BytesRef.EMPTY_BYTES;
+ }
+ byte[] tmp = new byte[buffer.remaining()];
+ buffer.duplicate().get(tmp);
+ return tmp;
+ }
+
+ @Override
+ public BytesArray toBytesArray() {
+ if (buffer.hasArray()) {
+ return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
+ }
+ return new BytesArray(toBytes());
+ }
+
+ @Override
+ public BytesArray copyBytesArray() {
+ return new BytesArray(toBytes());
+ }
+
+ @Override
+ public ChannelBuffer toChannelBuffer() {
+ return ChannelBuffers.wrappedBuffer(buffer);
+ }
+
+ @Override
+ public boolean hasArray() {
+ return buffer.hasArray();
+ }
+
+ @Override
+ public byte[] array() {
+ return buffer.array();
+ }
+
+ @Override
+ public int arrayOffset() {
+ return buffer.arrayOffset() + buffer.position();
+ }
+
+ @Override
+ public int hashCode() {
+ return Helper.bytesHashCode(this);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return Helper.bytesEqual(this, (BytesReference) obj);
+ }
+
+ @Override
+ public String toUtf8() {
+ if (!buffer.hasRemaining()) {
+ return "";
+ }
+ final CharsetDecoder decoder = CharsetUtil.getDecoder(Charsets.UTF_8);
+ final CharBuffer dst = CharBuffer.allocate(
+ (int) ((double) buffer.remaining() * decoder.maxCharsPerByte()));
+ try {
+ CoderResult cr = decoder.decode(buffer, dst, true);
+ if (!cr.isUnderflow()) {
+ cr.throwException();
+ }
+ cr = decoder.flush(dst);
+ if (!cr.isUnderflow()) {
+ cr.throwException();
+ }
+ } catch (CharacterCodingException x) {
+ throw new IllegalStateException(x);
+ }
+ return dst.flip().toString();
+ }
+
+ @Override
+ public BytesRef toBytesRef() {
+ if (buffer.hasArray()) {
+ return new BytesRef(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
+ }
+ return new BytesRef(toBytes());
+ }
+
+ @Override
+ public BytesRef copyBytesRef() {
+ return new BytesRef(toBytes());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/src/main/java/org/elasticsearch/common/bytes/BytesArray.java
new file mode 100644
index 0000000..33b0741
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/bytes/BytesArray.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.bytes;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+
+import com.google.common.base.Charsets;
+
+public class BytesArray implements BytesReference {
+
+ public static final BytesArray EMPTY = new BytesArray(BytesRef.EMPTY_BYTES, 0, 0);
+
+ private byte[] bytes;
+ private int offset;
+ private int length;
+
+ public BytesArray(String bytes) {
+ BytesRef bytesRef = new BytesRef();
+ UnicodeUtil.UTF16toUTF8(bytes, 0, bytes.length(), bytesRef);
+ this.bytes = bytesRef.bytes;
+ this.offset = bytesRef.offset;
+ this.length = bytesRef.length;
+ }
+
+ public BytesArray(BytesRef bytesRef) {
+ this(bytesRef, false);
+ }
+
+ public BytesArray(BytesRef bytesRef, boolean deepCopy) {
+ if (deepCopy) {
+ BytesRef copy = BytesRef.deepCopyOf(bytesRef);
+ bytes = copy.bytes;
+ offset = copy.offset;
+ length = copy.length;
+ } else {
+ bytes = bytesRef.bytes;
+ offset = bytesRef.offset;
+ length = bytesRef.length;
+ }
+ }
+
+ public BytesArray(byte[] bytes) {
+ this.bytes = bytes;
+ this.offset = 0;
+ this.length = bytes.length;
+ }
+
+ public BytesArray(byte[] bytes, int offset, int length) {
+ this.bytes = bytes;
+ this.offset = offset;
+ this.length = length;
+ }
+
+ @Override
+ public byte get(int index) {
+ return bytes[offset + index];
+ }
+
+ @Override
+ public int length() {
+ return length;
+ }
+
+ @Override
+ public BytesReference slice(int from, int length) {
+ if (from < 0 || (from + length) > this.length) {
+ throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" + from + "], length [" + length + "]");
+ }
+ return new BytesArray(bytes, offset + from, length);
+ }
+
+ @Override
+ public StreamInput streamInput() {
+ return new BytesStreamInput(bytes, offset, length, false);
+ }
+
+ @Override
+ public void writeTo(OutputStream os) throws IOException {
+ os.write(bytes, offset, length);
+ }
+
+ @Override
+ public byte[] toBytes() {
+ if (offset == 0 && bytes.length == length) {
+ return bytes;
+ }
+ return Arrays.copyOfRange(bytes, offset, offset + length);
+ }
+
+ @Override
+ public BytesArray toBytesArray() {
+ return this;
+ }
+
+ @Override
+ public BytesArray copyBytesArray() {
+ return new BytesArray(Arrays.copyOfRange(bytes, offset, offset + length));
+ }
+
+ @Override
+ public ChannelBuffer toChannelBuffer() {
+ return ChannelBuffers.wrappedBuffer(bytes, offset, length);
+ }
+
+ @Override
+ public boolean hasArray() {
+ return true;
+ }
+
+ @Override
+ public byte[] array() {
+ return bytes;
+ }
+
+ @Override
+ public int arrayOffset() {
+ return offset;
+ }
+
+ @Override
+ public String toUtf8() {
+ if (length == 0) {
+ return "";
+ }
+ return new String(bytes, offset, length, Charsets.UTF_8);
+ }
+
+ @Override
+ public BytesRef toBytesRef() {
+ return new BytesRef(bytes, offset, length);
+ }
+
+ @Override
+ public BytesRef copyBytesRef() {
+ return new BytesRef(Arrays.copyOfRange(bytes, offset, offset + length));
+ }
+
+ @Override
+ public int hashCode() {
+ return Helper.bytesHashCode(this);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return Helper.bytesEqual(this, (BytesReference) obj);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/src/main/java/org/elasticsearch/common/bytes/BytesReference.java
new file mode 100644
index 0000000..34072f0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/bytes/BytesReference.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.bytes;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * A reference to bytes.
+ */
+public interface BytesReference {
+
+ public static class Helper {
+
+ public static boolean bytesEqual(BytesReference a, BytesReference b) {
+ if (a == b) {
+ return true;
+ }
+ if (a.length() != b.length()) {
+ return false;
+ }
+ if (!a.hasArray()) {
+ a = a.toBytesArray();
+ }
+ if (!b.hasArray()) {
+ b = b.toBytesArray();
+ }
+ int bUpTo = b.arrayOffset();
+ final byte[] aArray = a.array();
+ final byte[] bArray = b.array();
+ final int end = a.arrayOffset() + a.length();
+ for (int aUpTo = a.arrayOffset(); aUpTo < end; aUpTo++, bUpTo++) {
+ if (aArray[aUpTo] != bArray[bUpTo]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public static int bytesHashCode(BytesReference a) {
+ if (!a.hasArray()) {
+ a = a.toBytesArray();
+ }
+ int result = 0;
+ final int end = a.arrayOffset() + a.length();
+ for (int i = a.arrayOffset(); i < end; i++) {
+ result = 31 * result + a.array()[i];
+ }
+ return result;
+ }
+ }
+
+ /**
+ * Returns the byte at the specified index. Need to be between 0 and length.
+ */
+ byte get(int index);
+
+ /**
+ * The length.
+ */
+ int length();
+
+ /**
+ * Slice the bytes from the <tt>from</tt> index up to <tt>length</tt>.
+ */
+ BytesReference slice(int from, int length);
+
+ /**
+ * A stream input of the bytes.
+ */
+ StreamInput streamInput();
+
+ /**
+ * Writes the bytes directly to the output stream.
+ */
+ void writeTo(OutputStream os) throws IOException;
+
+ /**
+ * Returns the bytes as a single byte array.
+ */
+ byte[] toBytes();
+
+ /**
+ * Returns the bytes as a byte array, possibly sharing the underlying byte buffer.
+ */
+ BytesArray toBytesArray();
+
+ /**
+ * Returns the bytes copied over as a byte array.
+ */
+ BytesArray copyBytesArray();
+
+ /**
+ * Returns the bytes as a channel buffer.
+ */
+ ChannelBuffer toChannelBuffer();
+
+ /**
+ * Is there an underlying byte array for this bytes reference.
+ */
+ boolean hasArray();
+
+ /**
+ * The underlying byte array (if exists).
+ */
+ byte[] array();
+
+ /**
+ * The offset into the underlying byte array.
+ */
+ int arrayOffset();
+
+ /**
+ * Converts to a string based on utf8.
+ */
+ String toUtf8();
+
+ /**
+ * Converts to Lucene BytesRef.
+ */
+ BytesRef toBytesRef();
+
+ /**
+ * Converts to a copied Lucene BytesRef.
+ */
+ BytesRef copyBytesRef();
+}
diff --git a/src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java b/src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java
new file mode 100644
index 0000000..2be2112
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.bytes;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.transport.netty.ChannelBufferStreamInputFactory;
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ */
+public class ChannelBufferBytesReference implements BytesReference {
+
+ private final ChannelBuffer buffer;
+
+ public ChannelBufferBytesReference(ChannelBuffer buffer) {
+ this.buffer = buffer;
+ }
+
+ @Override
+ public byte get(int index) {
+ return buffer.getByte(buffer.readerIndex() + index);
+ }
+
+ @Override
+ public int length() {
+ return buffer.readableBytes();
+ }
+
+ @Override
+ public BytesReference slice(int from, int length) {
+ return new ChannelBufferBytesReference(buffer.slice(from, length));
+ }
+
+ @Override
+ public StreamInput streamInput() {
+ return ChannelBufferStreamInputFactory.create(buffer.duplicate());
+ }
+
+ @Override
+ public void writeTo(OutputStream os) throws IOException {
+ buffer.getBytes(buffer.readerIndex(), os, length());
+ }
+
+ @Override
+ public byte[] toBytes() {
+ return copyBytesArray().toBytes();
+ }
+
+ @Override
+ public BytesArray toBytesArray() {
+ if (buffer.hasArray()) {
+ return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes());
+ }
+ return copyBytesArray();
+ }
+
+ @Override
+ public BytesArray copyBytesArray() {
+ byte[] copy = new byte[buffer.readableBytes()];
+ buffer.getBytes(buffer.readerIndex(), copy);
+ return new BytesArray(copy);
+ }
+
+ @Override
+ public ChannelBuffer toChannelBuffer() {
+ return buffer.duplicate();
+ }
+
+ @Override
+ public boolean hasArray() {
+ return buffer.hasArray();
+ }
+
+ @Override
+ public byte[] array() {
+ return buffer.array();
+ }
+
+ @Override
+ public int arrayOffset() {
+ return buffer.arrayOffset() + buffer.readerIndex();
+ }
+
+ @Override
+ public String toUtf8() {
+ return buffer.toString(Charsets.UTF_8);
+ }
+
+ @Override
+ public BytesRef toBytesRef() {
+ if (buffer.hasArray()) {
+ return new BytesRef(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes());
+ }
+ byte[] copy = new byte[buffer.readableBytes()];
+ buffer.getBytes(buffer.readerIndex(), copy);
+ return new BytesRef(copy);
+ }
+
+ @Override
+ public BytesRef copyBytesRef() {
+ byte[] copy = new byte[buffer.readableBytes()];
+ buffer.getBytes(buffer.readerIndex(), copy);
+ return new BytesRef(copy);
+ }
+
+ @Override
+ public int hashCode() {
+ return Helper.bytesHashCode(this);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return Helper.bytesEqual(this, (BytesReference) obj);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/bytes/HashedBytesArray.java b/src/main/java/org/elasticsearch/common/bytes/HashedBytesArray.java
new file mode 100644
index 0000000..d29fc1f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/bytes/HashedBytesArray.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.bytes;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * A bytes array reference that caches the hash code.
+ */
+public class HashedBytesArray implements BytesReference {
+
+ private final byte[] bytes;
+
+ /**
+ * Cache the hash code for the string
+ */
+ private int hash; // Defaults to 0
+
+ public HashedBytesArray(byte[] bytes) {
+ this.bytes = bytes;
+ }
+
+ @Override
+ public byte get(int index) {
+ return bytes[index];
+ }
+
+ @Override
+ public int length() {
+ return bytes.length;
+ }
+
+ @Override
+ public BytesReference slice(int from, int length) {
+ if (from < 0 || (from + length) > bytes.length) {
+ throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + bytes.length + "], with slice parameters from [" + from + "], length [" + length + "]");
+ }
+ return new BytesArray(bytes, from, length);
+ }
+
+ @Override
+ public StreamInput streamInput() {
+ return new BytesStreamInput(bytes, false);
+ }
+
+ @Override
+ public void writeTo(OutputStream os) throws IOException {
+ os.write(bytes);
+ }
+
+ @Override
+ public byte[] toBytes() {
+ return bytes;
+ }
+
+ @Override
+ public BytesArray toBytesArray() {
+ return new BytesArray(bytes);
+ }
+
+ @Override
+ public BytesArray copyBytesArray() {
+ byte[] copy = new byte[bytes.length];
+ System.arraycopy(bytes, 0, copy, 0, bytes.length);
+ return new BytesArray(copy);
+ }
+
+ @Override
+ public ChannelBuffer toChannelBuffer() {
+ return ChannelBuffers.wrappedBuffer(bytes, 0, bytes.length);
+ }
+
+ @Override
+ public boolean hasArray() {
+ return true;
+ }
+
+ @Override
+ public byte[] array() {
+ return bytes;
+ }
+
+ @Override
+ public int arrayOffset() {
+ return 0;
+ }
+
+ @Override
+ public String toUtf8() {
+ if (bytes.length == 0) {
+ return "";
+ }
+ return new String(bytes, Charsets.UTF_8);
+ }
+
+ @Override
+ public BytesRef toBytesRef() {
+ return new BytesRef(bytes);
+ }
+
+ @Override
+ public BytesRef copyBytesRef() {
+ byte[] copy = new byte[bytes.length];
+ System.arraycopy(bytes, 0, copy, 0, bytes.length);
+ return new BytesRef(copy);
+ }
+
+ @Override
+ public int hashCode() {
+ if (hash == 0) {
+ hash = Helper.bytesHashCode(this);
+ }
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return Helper.bytesEqual(this, (BytesReference) obj);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/collect/BoundedTreeSet.java b/src/main/java/org/elasticsearch/common/collect/BoundedTreeSet.java
new file mode 100644
index 0000000..2adec0f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/collect/BoundedTreeSet.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.TreeSet;
+
+/**
+ * A {@link TreeSet} that is bounded by size.
+ *
+ *
+ */
+public class BoundedTreeSet<E> extends TreeSet<E> {
+
+ private final int size;
+
+ public BoundedTreeSet(int size) {
+ this.size = size;
+ }
+
+ public BoundedTreeSet(Comparator<? super E> comparator, int size) {
+ super(comparator);
+ this.size = size;
+ }
+
+ @Override
+ public boolean add(E e) {
+ boolean result = super.add(e);
+ rebound();
+ return result;
+ }
+
+ @Override
+ public boolean addAll(Collection<? extends E> c) {
+ boolean result = super.addAll(c);
+ rebound();
+ return result;
+ }
+
+ private void rebound() {
+ while (size() > size) {
+ remove(last());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/collect/HppcMaps.java b/src/main/java/org/elasticsearch/common/collect/HppcMaps.java
new file mode 100644
index 0000000..be6f1da
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/collect/HppcMaps.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectLookupContainer;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+import java.util.Iterator;
+
+/**
+ */
+public final class HppcMaps {
+
+ private HppcMaps() {
+ }
+
+ /**
+ * Returns a new map with the given initial capacity
+ */
+ public static <K, V> ObjectObjectOpenHashMap<K, V> newMap(int capacity) {
+ return new ObjectObjectOpenHashMap<K, V>(capacity);
+ }
+
+ /**
+ * Returns a new map with a default initial capacity of
+ * {@value com.carrotsearch.hppc.HashContainerUtils#DEFAULT_CAPACITY}
+ */
+ public static <K, V> ObjectObjectOpenHashMap<K, V> newMap() {
+ return newMap(16);
+ }
+
+ /**
+ * Returns a map like {@link #newMap()} that does not accept <code>null</code> keys
+ */
+ public static <K, V> ObjectObjectOpenHashMap<K, V> newNoNullKeysMap() {
+ return ensureNoNullKeys(16);
+ }
+
+ /**
+ * Returns a map like {@link #newMap(int)} that does not accept <code>null</code> keys
+ */
+ public static <K, V> ObjectObjectOpenHashMap<K, V> newNoNullKeysMap(int capacity) {
+ return ensureNoNullKeys(capacity);
+ }
+
+ /**
+ * Wraps the given map and prevent adding of <code>null</code> keys.
+ */
+ public static <K, V> ObjectObjectOpenHashMap<K, V> ensureNoNullKeys(int capacity) {
+ return new ObjectObjectOpenHashMap<K, V>(capacity) {
+
+ @Override
+ public V put(K key, V value) {
+ if (key == null) {
+ throw new ElasticsearchIllegalArgumentException("Map key must not be null");
+ }
+ return super.put(key, value);
+ }
+
+ };
+ }
+
+ /**
+ * @return an intersection view over the two specified containers (which can be KeyContainer or ObjectOpenHashSet).
+ */
+ // Hppc has forEach, but this means we need to build an intermediate set, with this method we just iterate
+ // over each unique value without creating a third set.
+ public static <T> Iterable<T> intersection(ObjectLookupContainer<T> container1, final ObjectLookupContainer<T> container2) {
+ assert container1 != null && container2 != null;
+ final Iterator<ObjectCursor<T>> iterator = container1.iterator();
+ final Iterator<T> intersection = new Iterator<T>() {
+
+ T current;
+
+ @Override
+ public boolean hasNext() {
+ if (iterator.hasNext()) {
+ do {
+ T next = iterator.next().value;
+ if (container2.contains(next)) {
+ current = next;
+ return true;
+ }
+ } while (iterator.hasNext());
+ }
+ return false;
+ }
+
+ @Override
+ public T next() {
+ return current;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ return new Iterable<T>() {
+ @Override
+ public Iterator<T> iterator() {
+ return intersection;
+ }
+ };
+ }
+
+ public final static class Object {
+
+ public final static class Integer {
+
+ public static <V> ObjectIntOpenHashMap<V> ensureNoNullKeys(int capacity, float loadFactor) {
+ return new ObjectIntOpenHashMap<V>(capacity, loadFactor) {
+
+ @Override
+ public int put(V key, int value) {
+ if (key == null) {
+ throw new ElasticsearchIllegalArgumentException("Map key must not be null");
+ }
+ return super.put(key, value);
+ }
+ };
+ }
+
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/collect/IdentityHashSet.java b/src/main/java/org/elasticsearch/common/collect/IdentityHashSet.java
new file mode 100644
index 0000000..20110eb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/collect/IdentityHashSet.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import java.util.*;
+
+/**
+ *
+ */
+public class IdentityHashSet<E>
+ extends AbstractSet<E>
+ implements Set<E>, Cloneable, java.io.Serializable {
+
+ static final long serialVersionUID = -5024744406713321677L;
+
+ private transient IdentityHashMap<E, Object> map;
+
+ // Dummy value to associate with an Object in the backing Map
+ private static final Object PRESENT = new Object();
+
+ public IdentityHashSet() {
+ map = new IdentityHashMap<E, Object>();
+ }
+
+ public IdentityHashSet(Collection<? extends E> c) {
+ map = new IdentityHashMap<E, Object>(Math.max((int) (c.size() / .75f) + 1, 16));
+ addAll(c);
+ }
+
+ public IdentityHashSet(int expectedSize) {
+ map = new IdentityHashMap<E, Object>(expectedSize);
+ }
+
+ /**
+ * Returns an iterator over the elements in this set. The elements
+ * are returned in no particular order.
+ *
+ * @return an Iterator over the elements in this set
+ * @see ConcurrentModificationException
+ */
+ public Iterator<E> iterator() {
+ return map.keySet().iterator();
+ }
+
+ /**
+ * Returns the number of elements in this set (its cardinality).
+ *
+ * @return the number of elements in this set (its cardinality)
+ */
+ public int size() {
+ return map.size();
+ }
+
+ /**
+ * Returns <tt>true</tt> if this set contains no elements.
+ *
+ * @return <tt>true</tt> if this set contains no elements
+ */
+ public boolean isEmpty() {
+ return map.isEmpty();
+ }
+
+ /**
+ * Returns <tt>true</tt> if this set contains the specified element.
+ * More formally, returns <tt>true</tt> if and only if this set
+ * contains an element <tt>e</tt> such that
+ * <tt>(o==null&nbsp;?&nbsp;e==null&nbsp;:&nbsp;o.equals(e))</tt>.
+ *
+ * @param o element whose presence in this set is to be tested
+ * @return <tt>true</tt> if this set contains the specified element
+ */
+ public boolean contains(Object o) {
+ return map.containsKey(o);
+ }
+
+ /**
+ * Adds the specified element to this set if it is not already present.
+ * More formally, adds the specified element <tt>e</tt> to this set if
+ * this set contains no element <tt>e2</tt> such that
+ * <tt>(e==null&nbsp;?&nbsp;e2==null&nbsp;:&nbsp;e.equals(e2))</tt>.
+ * If this set already contains the element, the call leaves the set
+ * unchanged and returns <tt>false</tt>.
+ *
+ * @param e element to be added to this set
+ * @return <tt>true</tt> if this set did not already contain the specified
+ * element
+ */
+ public boolean add(E e) {
+ return map.put(e, PRESENT) == null;
+ }
+
+ /**
+ * Removes the specified element from this set if it is present.
+ * More formally, removes an element <tt>e</tt> such that
+ * <tt>(o==null&nbsp;?&nbsp;e==null&nbsp;:&nbsp;o.equals(e))</tt>,
+ * if this set contains such an element. Returns <tt>true</tt> if
+ * this set contained the element (or equivalently, if this set
+ * changed as a result of the call). (This set will not contain the
+ * element once the call returns.)
+ *
+ * @param o object to be removed from this set, if present
+ * @return <tt>true</tt> if the set contained the specified element
+ */
+ public boolean remove(Object o) {
+ return map.remove(o) == PRESENT;
+ }
+
+ /**
+ * Removes all of the elements from this set.
+ * The set will be empty after this call returns.
+ */
+ public void clear() {
+ map.clear();
+ }
+
+ /**
+ * Returns a shallow copy of this <tt>HashSet</tt> instance: the elements
+ * themselves are not cloned.
+ *
+ * @return a shallow copy of this set
+ */
+ public Object clone() {
+ try {
+ IdentityHashSet<E> newSet = (IdentityHashSet<E>) super.clone();
+ newSet.map = (IdentityHashMap<E, Object>) map.clone();
+ return newSet;
+ } catch (CloneNotSupportedException e) {
+ throw new InternalError();
+ }
+ }
+
+ /**
+ * Index the state of this <tt>HashSet</tt> instance to a stream (that is,
+ * serialize it).
+ *
+ * @serialData The capacity of the backing <tt>HashMap</tt> instance
+ * (int), and its load factor (float) are emitted, followed by
+ * the size of the set (the number of elements it contains)
+ * (int), followed by all of its elements (each an Object) in
+ * no particular order.
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ // Write out any hidden serialization magic
+ s.defaultWriteObject();
+
+ // Write out size
+ s.writeInt(map.size());
+
+ // Write out all elements in the proper order.
+ for (Iterator i = map.keySet().iterator(); i.hasNext(); )
+ s.writeObject(i.next());
+ }
+
+ /**
+ * Reconstitute the <tt>HashSet</tt> instance from a stream (that is,
+ * deserialize it).
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ // Read in any hidden serialization magic
+ s.defaultReadObject();
+
+ // Read in size
+ int size = s.readInt();
+
+ map = new IdentityHashMap<E, Object>(size);
+
+ // Read in all elements in the proper order.
+ for (int i = 0; i < size; i++) {
+ E e = (E) s.readObject();
+ map.put(e, PRESENT);
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java b/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java
new file mode 100644
index 0000000..1b55ce5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.carrotsearch.hppc.*;
+import com.carrotsearch.hppc.cursors.IntCursor;
+import com.carrotsearch.hppc.cursors.IntObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.predicates.IntPredicate;
+import com.carrotsearch.hppc.procedures.IntObjectProcedure;
+import com.google.common.collect.UnmodifiableIterator;
+
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * An immutable map implementation based on open hash map.
+ * <p/>
+ * Can be constructed using a {@link #builder()}, or using {@link #builder(org.elasticsearch.common.collect.ImmutableOpenIntMap)} (which is an optimized
+ * option to copy over existing content and modify it).
+ */
+public final class ImmutableOpenIntMap<VType> implements Iterable<IntObjectCursor<VType>> {
+
+ private final IntObjectOpenHashMap<VType> map;
+
+ private ImmutableOpenIntMap(IntObjectOpenHashMap<VType> map) {
+ this.map = map;
+ }
+
+ /**
+ * @return Returns the value associated with the given key or the default value
+ * for the key type, if the key is not associated with any value.
+ * <p/>
+ * <b>Important note:</b> For primitive type values, the value returned for a non-existing
+ * key may not be the default value of the primitive type (it may be any value previously
+ * assigned to that slot).
+ */
+ public VType get(int key) {
+ return map.get(key);
+ }
+
+ /**
+ * Returns <code>true</code> if this container has an association to a value for
+ * the given key.
+ */
+ public boolean containsKey(int key) {
+ return map.containsKey(key);
+ }
+
+ /**
+ * @return Returns the current size (number of assigned keys) in the container.
+ */
+ public int size() {
+ return map.size();
+ }
+
+ /**
+ * @return Return <code>true</code> if this hash map contains no assigned keys.
+ */
+ public boolean isEmpty() {
+ return map.isEmpty();
+ }
+
+ /**
+ * Returns a cursor over the entries (key-value pairs) in this map. The iterator is
+ * implemented as a cursor and it returns <b>the same cursor instance</b> on every
+ * call to {@link java.util.Iterator#next()}. To read the current key and value use the cursor's
+ * public fields. An example is shown below.
+ * <pre>
+ * for (IntShortCursor c : intShortMap)
+ * {
+ * System.out.println(&quot;index=&quot; + c.index
+ * + &quot; key=&quot; + c.key
+ * + &quot; value=&quot; + c.value);
+ * }
+ * </pre>
+ * <p/>
+ * <p>The <code>index</code> field inside the cursor gives the internal index inside
+ * the container's implementation. The interpretation of this index depends on
+ * to the container.
+ */
+ @Override
+ public Iterator<IntObjectCursor<VType>> iterator() {
+ return map.iterator();
+ }
+
+ /**
+ * Returns a specialized view of the keys of this associated container.
+ * The view additionally implements {@link com.carrotsearch.hppc.ObjectLookupContainer}.
+ */
+ public IntLookupContainer keys() {
+ return map.keys();
+ }
+
+ /**
+ * Returns a direct iterator over the keys.
+ */
+ public UnmodifiableIterator<Integer> keysIt() {
+ final Iterator<IntCursor> iterator = map.keys().iterator();
+ return new UnmodifiableIterator<Integer>() {
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public Integer next() {
+ return iterator.next().value;
+ }
+ };
+ }
+
+ /**
+ * @return Returns a container with all values stored in this map.
+ */
+ public ObjectContainer<VType> values() {
+ return map.values();
+ }
+
+ /**
+ * Returns a direct iterator over the keys.
+ */
+ public UnmodifiableIterator<VType> valuesIt() {
+ final Iterator<ObjectCursor<VType>> iterator = map.values().iterator();
+ return new UnmodifiableIterator<VType>() {
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public VType next() {
+ return iterator.next().value;
+ }
+ };
+ }
+
+ @Override
+ public String toString() {
+ return map.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ImmutableOpenIntMap that = (ImmutableOpenIntMap) o;
+
+ if (!map.equals(that.map)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return map.hashCode();
+ }
+
+ @SuppressWarnings("unchecked")
+ private static final ImmutableOpenIntMap EMPTY = new ImmutableOpenIntMap(new IntObjectOpenHashMap());
+
+ @SuppressWarnings("unchecked")
+ public static <VType> ImmutableOpenIntMap<VType> of() {
+ return EMPTY;
+ }
+
+ public static <VType> Builder<VType> builder() {
+ return new Builder<VType>();
+ }
+
+ public static <VType> Builder<VType> builder(int size) {
+ return new Builder<VType>(size);
+ }
+
+ public static <VType> Builder<VType> builder(ImmutableOpenIntMap<VType> map) {
+ return new Builder<VType>(map);
+ }
+
+ public static class Builder<VType> implements IntObjectMap<VType> {
+
+ private IntObjectOpenHashMap<VType> map;
+
+ public Builder() {
+ //noinspection unchecked
+ this(EMPTY);
+ }
+
+ public Builder(int size) {
+ this.map = new IntObjectOpenHashMap<VType>(size);
+ }
+
+ public Builder(ImmutableOpenIntMap<VType> map) {
+ this.map = map.map.clone();
+ }
+
+ /**
+ * Builds a new instance of the
+ */
+ public ImmutableOpenIntMap<VType> build() {
+ IntObjectOpenHashMap<VType> map = this.map;
+ this.map = null; // nullify the map, so any operation post build will fail! (hackish, but safest)
+ return new ImmutableOpenIntMap<VType>(map);
+ }
+
+ /**
+ * Puts all the entries in the map to the builder.
+ */
+ public Builder<VType> putAll(Map<Integer, VType> map) {
+ for (Map.Entry<Integer, VType> entry : map.entrySet()) {
+ this.map.put(entry.getKey(), entry.getValue());
+ }
+ return this;
+ }
+
+ /**
+ * A put operation that can be used in the fluent pattern.
+ */
+ public Builder<VType> fPut(int key, VType value) {
+ map.put(key, value);
+ return this;
+ }
+
+ @Override
+ public VType put(int key, VType value) {
+ return map.put(key, value);
+ }
+
+ @Override
+ public VType get(int key) {
+ return map.get(key);
+ }
+
+ @Override
+ public VType getOrDefault(int kType, VType vType) {
+ return map.getOrDefault(kType, vType);
+ }
+
+ /**
+ * Remove that can be used in the fluent pattern.
+ */
+ public Builder<VType> fRemove(int key) {
+ map.remove(key);
+ return this;
+ }
+
+ @Override
+ public VType remove(int key) {
+ return map.remove(key);
+ }
+
+ @Override
+ public Iterator<IntObjectCursor<VType>> iterator() {
+ return map.iterator();
+ }
+
+ @Override
+ public boolean containsKey(int key) {
+ return map.containsKey(key);
+ }
+
+ @Override
+ public int size() {
+ return map.size();
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return map.isEmpty();
+ }
+
+ @Override
+ public void clear() {
+ map.clear();
+ }
+
+ @Override
+ public int putAll(IntObjectAssociativeContainer<? extends VType> container) {
+ return map.putAll(container);
+ }
+
+ @Override
+ public int putAll(Iterable<? extends IntObjectCursor<? extends VType>> iterable) {
+ return map.putAll(iterable);
+ }
+
+ @Override
+ public int removeAll(IntContainer container) {
+ return map.removeAll(container);
+ }
+
+ @Override
+ public int removeAll(IntPredicate predicate) {
+ return map.removeAll(predicate);
+ }
+
+ @Override
+ public <T extends IntObjectProcedure<? super VType>> T forEach(T procedure) {
+ return map.forEach(procedure);
+ }
+
+ @Override
+ public IntCollection keys() {
+ return map.keys();
+ }
+
+ @Override
+ public ObjectContainer<VType> values() {
+ return map.values();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/collect/ImmutableOpenLongMap.java b/src/main/java/org/elasticsearch/common/collect/ImmutableOpenLongMap.java
new file mode 100644
index 0000000..48b29c3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/collect/ImmutableOpenLongMap.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.carrotsearch.hppc.*;
+import com.carrotsearch.hppc.cursors.LongCursor;
+import com.carrotsearch.hppc.cursors.LongObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.predicates.LongPredicate;
+import com.carrotsearch.hppc.procedures.LongObjectProcedure;
+import com.google.common.collect.UnmodifiableIterator;
+
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * An immutable map implementation based on open hash map.
+ * <p/>
+ * Can be constructed using a {@link #builder()}, or using {@link #builder(org.elasticsearch.common.collect.ImmutableOpenLongMap)} (which is an optimized
+ * option to copy over existing content and modify it).
+ */
+public final class ImmutableOpenLongMap<VType> implements Iterable<LongObjectCursor<VType>> {
+
+ private final LongObjectOpenHashMap<VType> map;
+
+ private ImmutableOpenLongMap(LongObjectOpenHashMap<VType> map) {
+ this.map = map;
+ }
+
+ /**
+ * @return Returns the value associated with the given key or the default value
+ * for the key type, if the key is not associated with any value.
+ * <p/>
+ * <b>Important note:</b> For primitive type values, the value returned for a non-existing
+ * key may not be the default value of the primitive type (it may be any value previously
+ * assigned to that slot).
+ */
+ public VType get(long key) {
+ return map.get(key);
+ }
+
+ /**
+ * Returns <code>true</code> if this container has an association to a value for
+ * the given key.
+ */
+ public boolean containsKey(long key) {
+ return map.containsKey(key);
+ }
+
+ /**
+ * @return Returns the current size (number of assigned keys) in the container.
+ */
+ public int size() {
+ return map.size();
+ }
+
+ /**
+ * @return Return <code>true</code> if this hash map contains no assigned keys.
+ */
+ public boolean isEmpty() {
+ return map.isEmpty();
+ }
+
+ /**
+ * Returns a cursor over the entries (key-value pairs) in this map. The iterator is
+ * implemented as a cursor and it returns <b>the same cursor instance</b> on every
+ * call to {@link java.util.Iterator#next()}. To read the current key and value use the cursor's
+ * public fields. An example is shown below.
+ * <pre>
+ * for (IntShortCursor c : intShortMap)
+ * {
+ * System.out.println(&quot;index=&quot; + c.index
+ * + &quot; key=&quot; + c.key
+ * + &quot; value=&quot; + c.value);
+ * }
+ * </pre>
+ * <p/>
+ * <p>The <code>index</code> field inside the cursor gives the internal index inside
+ * the container's implementation. The interpretation of this index depends on
+ * to the container.
+ */
+ @Override
+ public Iterator<LongObjectCursor<VType>> iterator() {
+ return map.iterator();
+ }
+
+ /**
+ * Returns a specialized view of the keys of this associated container.
+ * The view additionally implements {@link com.carrotsearch.hppc.ObjectLookupContainer}.
+ */
+ public LongLookupContainer keys() {
+ return map.keys();
+ }
+
+ /**
+ * Returns a direct iterator over the keys.
+ */
+ public UnmodifiableIterator<Long> keysIt() {
+ final Iterator<LongCursor> iterator = map.keys().iterator();
+ return new UnmodifiableIterator<Long>() {
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public Long next() {
+ return iterator.next().value;
+ }
+ };
+ }
+
+ /**
+ * @return Returns a container with all values stored in this map.
+ */
+ public ObjectContainer<VType> values() {
+ return map.values();
+ }
+
+ /**
+ * Returns a direct iterator over the keys.
+ */
+ public UnmodifiableIterator<VType> valuesIt() {
+ final Iterator<ObjectCursor<VType>> iterator = map.values().iterator();
+ return new UnmodifiableIterator<VType>() {
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public VType next() {
+ return iterator.next().value;
+ }
+ };
+ }
+
+ @Override
+ public String toString() {
+ return map.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ImmutableOpenLongMap that = (ImmutableOpenLongMap) o;
+
+ if (!map.equals(that.map)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return map.hashCode();
+ }
+
+ @SuppressWarnings("unchecked")
+ private static final ImmutableOpenLongMap EMPTY = new ImmutableOpenLongMap(new LongObjectOpenHashMap());
+
+ @SuppressWarnings("unchecked")
+ public static <VType> ImmutableOpenLongMap<VType> of() {
+ return EMPTY;
+ }
+
+ public static <VType> Builder<VType> builder() {
+ return new Builder<VType>();
+ }
+
+ public static <VType> Builder<VType> builder(int size) {
+ return new Builder<VType>(size);
+ }
+
+ public static <VType> Builder<VType> builder(ImmutableOpenLongMap<VType> map) {
+ return new Builder<VType>(map);
+ }
+
+ public static class Builder<VType> implements LongObjectMap<VType> {
+
+ private LongObjectOpenHashMap<VType> map;
+
+ public Builder() {
+ //noinspection unchecked
+ this(EMPTY);
+ }
+
+ public Builder(int size) {
+ this.map = new LongObjectOpenHashMap<VType>(size);
+ }
+
+ public Builder(ImmutableOpenLongMap<VType> map) {
+ this.map = map.map.clone();
+ }
+
+ /**
+ * Builds a new instance of the
+ */
+ public ImmutableOpenLongMap<VType> build() {
+ LongObjectOpenHashMap<VType> map = this.map;
+ this.map = null; // nullify the map, so any operation post build will fail! (hackish, but safest)
+ return new ImmutableOpenLongMap<VType>(map);
+ }
+
+ /**
+ * Puts all the entries in the map to the builder.
+ */
+ public Builder<VType> putAll(Map<Long, VType> map) {
+ for (Map.Entry<Long, VType> entry : map.entrySet()) {
+ this.map.put(entry.getKey(), entry.getValue());
+ }
+ return this;
+ }
+
+ /**
+ * A put operation that can be used in the fluent pattern.
+ */
+ public Builder<VType> fPut(long key, VType value) {
+ map.put(key, value);
+ return this;
+ }
+
+ @Override
+ public VType put(long key, VType value) {
+ return map.put(key, value);
+ }
+
+ @Override
+ public VType get(long key) {
+ return map.get(key);
+ }
+
+ @Override
+ public VType getOrDefault(long kType, VType vType) {
+ return map.getOrDefault(kType, vType);
+ }
+
+ /**
+ * Remove that can be used in the fluent pattern.
+ */
+ public Builder<VType> fRemove(long key) {
+ map.remove(key);
+ return this;
+ }
+
+ @Override
+ public VType remove(long key) {
+ return map.remove(key);
+ }
+
+ @Override
+ public Iterator<LongObjectCursor<VType>> iterator() {
+ return map.iterator();
+ }
+
+ @Override
+ public boolean containsKey(long key) {
+ return map.containsKey(key);
+ }
+
+ @Override
+ public int size() {
+ return map.size();
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return map.isEmpty();
+ }
+
+ @Override
+ public void clear() {
+ map.clear();
+ }
+
+ @Override
+ public int putAll(LongObjectAssociativeContainer<? extends VType> container) {
+ return map.putAll(container);
+ }
+
+ @Override
+ public int putAll(Iterable<? extends LongObjectCursor<? extends VType>> iterable) {
+ return map.putAll(iterable);
+ }
+
+ @Override
+ public int removeAll(LongContainer container) {
+ return map.removeAll(container);
+ }
+
+ @Override
+ public int removeAll(LongPredicate predicate) {
+ return map.removeAll(predicate);
+ }
+
+ @Override
+ public <T extends LongObjectProcedure<? super VType>> T forEach(T procedure) {
+ return map.forEach(procedure);
+ }
+
+ @Override
+ public LongCollection keys() {
+ return map.keys();
+ }
+
+ @Override
+ public ObjectContainer<VType> values() {
+ return map.values();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java b/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java
new file mode 100644
index 0000000..19c7431
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.carrotsearch.hppc.*;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.carrotsearch.hppc.predicates.ObjectPredicate;
+import com.carrotsearch.hppc.procedures.ObjectObjectProcedure;
+import com.google.common.collect.UnmodifiableIterator;
+
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * An immutable map implementation based on open hash map.
+ * <p/>
+ * Can be constructed using a {@link #builder()}, or using {@link #builder(ImmutableOpenMap)} (which is an optimized
+ * option to copy over existing content and modify it).
+ */
+public final class ImmutableOpenMap<KType, VType> implements Iterable<ObjectObjectCursor<KType, VType>> {
+
+ private final ObjectObjectOpenHashMap<KType, VType> map;
+
+ private ImmutableOpenMap(ObjectObjectOpenHashMap<KType, VType> map) {
+ this.map = map;
+ }
+
+ /**
+ * @return Returns the value associated with the given key or the default value
+ * for the key type, if the key is not associated with any value.
+ * <p/>
+ * <b>Important note:</b> For primitive type values, the value returned for a non-existing
+ * key may not be the default value of the primitive type (it may be any value previously
+ * assigned to that slot).
+ */
+ public VType get(KType key) {
+ return map.get(key);
+ }
+
+ /**
+ * @return Returns the value associated with the given key or the provided default value if the
+ * key is not associated with any value.
+ */
+ public VType getOrDefault(KType key, VType defaultValue) {
+ return map.getOrDefault(key, defaultValue);
+ }
+
+ /**
+ * Returns <code>true</code> if this container has an association to a value for
+ * the given key.
+ */
+ public boolean containsKey(KType key) {
+ return map.containsKey(key);
+ }
+
+ /**
+ * @return Returns the current size (number of assigned keys) in the container.
+ */
+ public int size() {
+ return map.size();
+ }
+
+ /**
+ * @return Return <code>true</code> if this hash map contains no assigned keys.
+ */
+ public boolean isEmpty() {
+ return map.isEmpty();
+ }
+
+ /**
+ * Returns a cursor over the entries (key-value pairs) in this map. The iterator is
+ * implemented as a cursor and it returns <b>the same cursor instance</b> on every
+ * call to {@link Iterator#next()}. To read the current key and value use the cursor's
+ * public fields. An example is shown below.
+ * <pre>
+ * for (IntShortCursor c : intShortMap)
+ * {
+ * System.out.println(&quot;index=&quot; + c.index
+ * + &quot; key=&quot; + c.key
+ * + &quot; value=&quot; + c.value);
+ * }
+ * </pre>
+ * <p/>
+ * <p>The <code>index</code> field inside the cursor gives the internal index inside
+ * the container's implementation. The interpretation of this index depends on
+ * to the container.
+ */
+ @Override
+ public Iterator<ObjectObjectCursor<KType, VType>> iterator() {
+ return map.iterator();
+ }
+
+ /**
+ * Returns a specialized view of the keys of this associated container.
+ * The view additionally implements {@link ObjectLookupContainer}.
+ */
+ public ObjectLookupContainer<KType> keys() {
+ return map.keys();
+ }
+
+ /**
+ * Returns a direct iterator over the keys.
+ */
+ public UnmodifiableIterator<KType> keysIt() {
+ final Iterator<ObjectCursor<KType>> iterator = map.keys().iterator();
+ return new UnmodifiableIterator<KType>() {
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public KType next() {
+ return iterator.next().value;
+ }
+ };
+ }
+
+ /**
+ * @return Returns a container with all values stored in this map.
+ */
+ public ObjectContainer<VType> values() {
+ return map.values();
+ }
+
+ /**
+ * Returns a direct iterator over the keys.
+ */
+ public UnmodifiableIterator<VType> valuesIt() {
+ final Iterator<ObjectCursor<VType>> iterator = map.values().iterator();
+ return new UnmodifiableIterator<VType>() {
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public VType next() {
+ return iterator.next().value;
+ }
+ };
+ }
+
+ @Override
+ public String toString() {
+ return map.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ImmutableOpenMap that = (ImmutableOpenMap) o;
+
+ if (!map.equals(that.map)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return map.hashCode();
+ }
+
+ @SuppressWarnings("unchecked")
+ private static final ImmutableOpenMap EMPTY = new ImmutableOpenMap(new ObjectObjectOpenHashMap());
+
+ @SuppressWarnings("unchecked")
+ public static <KType, VType> ImmutableOpenMap<KType, VType> of() {
+ return EMPTY;
+ }
+
+ public static <KType, VType> Builder<KType, VType> builder() {
+ return new Builder<KType, VType>();
+ }
+
+ public static <KType, VType> Builder<KType, VType> builder(int size) {
+ return new Builder<KType, VType>(size);
+ }
+
+ public static <KType, VType> Builder<KType, VType> builder(ImmutableOpenMap<KType, VType> map) {
+ return new Builder<KType, VType>(map);
+ }
+
+ public static class Builder<KType, VType> implements ObjectObjectMap<KType, VType> {
+
+ private ObjectObjectOpenHashMap<KType, VType> map;
+
+ public Builder() {
+ //noinspection unchecked
+ this(EMPTY);
+ }
+
+ public Builder(int size) {
+ this.map = new ObjectObjectOpenHashMap<KType, VType>(size);
+ }
+
+ public Builder(ImmutableOpenMap<KType, VType> map) {
+ this.map = map.map.clone();
+ }
+
+ /**
+ * Builds a new instance of the
+ */
+ public ImmutableOpenMap<KType, VType> build() {
+ ObjectObjectOpenHashMap<KType, VType> map = this.map;
+ this.map = null; // nullify the map, so any operation post build will fail! (hackish, but safest)
+ return new ImmutableOpenMap<KType, VType>(map);
+ }
+
+ /**
+ * Puts all the entries in the map to the builder.
+ */
+ public Builder<KType, VType> putAll(Map<KType, VType> map) {
+ for (Map.Entry<KType, VType> entry : map.entrySet()) {
+ this.map.put(entry.getKey(), entry.getValue());
+ }
+ return this;
+ }
+
+ /**
+ * A put operation that can be used in the fluent pattern.
+ */
+ public Builder<KType, VType> fPut(KType key, VType value) {
+ map.put(key, value);
+ return this;
+ }
+
+ @Override
+ public VType put(KType key, VType value) {
+ return map.put(key, value);
+ }
+
+ @Override
+ public VType get(KType key) {
+ return map.get(key);
+ }
+
+ @Override
+ public VType getOrDefault(KType kType, VType vType) {
+ return map.getOrDefault(kType, vType);
+ }
+
+ @Override
+ public int putAll(ObjectObjectAssociativeContainer<? extends KType, ? extends VType> container) {
+ return map.putAll(container);
+ }
+
+ @Override
+ public int putAll(Iterable<? extends ObjectObjectCursor<? extends KType, ? extends VType>> iterable) {
+ return map.putAll(iterable);
+ }
+
+ /**
+ * Remove that can be used in the fluent pattern.
+ */
+ public Builder<KType, VType> fRemove(KType key) {
+ map.remove(key);
+ return this;
+ }
+
+ @Override
+ public VType remove(KType key) {
+ return map.remove(key);
+ }
+
+ @Override
+ public Iterator<ObjectObjectCursor<KType, VType>> iterator() {
+ return map.iterator();
+ }
+
+ @Override
+ public boolean containsKey(KType key) {
+ return map.containsKey(key);
+ }
+
+ @Override
+ public int size() {
+ return map.size();
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return map.isEmpty();
+ }
+
+ @Override
+ public int removeAll(ObjectContainer<? extends KType> container) {
+ return map.removeAll(container);
+ }
+
+ @Override
+ public int removeAll(ObjectPredicate<? super KType> predicate) {
+ return map.removeAll(predicate);
+ }
+
+ @Override
+ public <T extends ObjectObjectProcedure<? super KType, ? super VType>> T forEach(T procedure) {
+ return map.forEach(procedure);
+ }
+
+ @Override
+ public void clear() {
+ map.clear();
+ }
+
+ @Override
+ public ObjectCollection<KType> keys() {
+ return map.keys();
+ }
+
+ @Override
+ public ObjectContainer<VType> values() {
+ return map.values();
+ }
+
+ @SuppressWarnings("unchecked")
+ public <K, V> Builder<K, V> cast() {
+ return (Builder) this;
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/collect/Iterators2.java b/src/main/java/org/elasticsearch/common/collect/Iterators2.java
new file mode 100644
index 0000000..b2ca021
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/collect/Iterators2.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.PeekingIterator;
+import com.google.common.collect.UnmodifiableIterator;
+
+import java.util.Comparator;
+import java.util.Iterator;
+
+public enum Iterators2 {
+ ;
+
+ /** Remove duplicated elements from an iterator over sorted content. */
+ public static <T> Iterator<T> deduplicateSorted(Iterator<? extends T> iterator, final Comparator<? super T> comparator) {
+ final PeekingIterator<T> it = Iterators.peekingIterator(iterator);
+ return new UnmodifiableIterator<T>() {
+
+ @Override
+ public boolean hasNext() {
+ return it.hasNext();
+ }
+
+ @Override
+ public T next() {
+ final T ret = it.next();
+ while (it.hasNext() && comparator.compare(ret, it.peek()) == 0) {
+ it.next();
+ }
+ assert !it.hasNext() || comparator.compare(ret, it.peek()) < 0 : "iterator is not sorted: " + ret + " > " + it.peek();
+ return ret;
+ }
+
+ };
+ }
+
+ /** Return a merged view over several iterators, optionally deduplicating equivalent entries. */
+ public static <T> Iterator<T> mergeSorted(Iterable<Iterator<? extends T>> iterators, Comparator<? super T> comparator, boolean deduplicate) {
+ Iterator<T> it = Iterators.mergeSorted(iterators, comparator);
+ if (deduplicate) {
+ it = deduplicateSorted(it, comparator);
+ }
+ return it;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/collect/MapBuilder.java b/src/main/java/org/elasticsearch/common/collect/MapBuilder.java
new file mode 100644
index 0000000..cc65b7c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/collect/MapBuilder.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.google.common.collect.ImmutableMap;
+
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ *
+ */
+public class MapBuilder<K, V> {
+
+ public static <K, V> MapBuilder<K, V> newMapBuilder() {
+ return new MapBuilder<K, V>();
+ }
+
+ public static <K, V> MapBuilder<K, V> newMapBuilder(Map<K, V> map) {
+ return new MapBuilder<K, V>(map);
+ }
+
+ private Map<K, V> map = newHashMap();
+
+ public MapBuilder() {
+ this.map = newHashMap();
+ }
+
+ public MapBuilder(Map<K, V> map) {
+ this.map = newHashMap(map);
+ }
+
+ public MapBuilder<K, V> putAll(Map<K, V> map) {
+ this.map.putAll(map);
+ return this;
+ }
+
+ public MapBuilder<K, V> put(K key, V value) {
+ this.map.put(key, value);
+ return this;
+ }
+
+ public MapBuilder<K, V> remove(K key) {
+ this.map.remove(key);
+ return this;
+ }
+
+ public MapBuilder<K, V> clear() {
+ this.map.clear();
+ return this;
+ }
+
+ public V get(K key) {
+ return map.get(key);
+ }
+
+ public boolean containsKey(K key) {
+ return map.containsKey(key);
+ }
+
+ public boolean isEmpty() {
+ return map.isEmpty();
+ }
+
+ public Map<K, V> map() {
+ return this.map;
+ }
+
+ public ImmutableMap<K, V> immutableMap() {
+ return ImmutableMap.copyOf(map);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/collect/Tuple.java b/src/main/java/org/elasticsearch/common/collect/Tuple.java
new file mode 100644
index 0000000..dac73d4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/collect/Tuple.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+/**
+ *
+ */
+public class Tuple<V1, V2> {
+
+ public static <V1, V2> Tuple<V1, V2> tuple(V1 v1, V2 v2) {
+ return new Tuple<V1, V2>(v1, v2);
+ }
+
+ private final V1 v1;
+ private final V2 v2;
+
+ public Tuple(V1 v1, V2 v2) {
+ this.v1 = v1;
+ this.v2 = v2;
+ }
+
+ public V1 v1() {
+ return v1;
+ }
+
+ public V2 v2() {
+ return v2;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Tuple tuple = (Tuple) o;
+
+ if (v1 != null ? !v1.equals(tuple.v1) : tuple.v1 != null) return false;
+ if (v2 != null ? !v2.equals(tuple.v2) : tuple.v2 != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = v1 != null ? v1.hashCode() : 0;
+ result = 31 * result + (v2 != null ? v2.hashCode() : 0);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "Tuple [v1=" + v1 + ", v2=" + v2 + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/component/AbstractComponent.java b/src/main/java/org/elasticsearch/common/component/AbstractComponent.java
new file mode 100644
index 0000000..c5cdcd2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/component/AbstractComponent.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.component;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class AbstractComponent {
+
+ protected final ESLogger logger;
+
+ protected final Settings settings;
+
+ protected final Settings componentSettings;
+
+ public AbstractComponent(Settings settings) {
+ this.logger = Loggers.getLogger(getClass(), settings);
+ this.settings = settings;
+ this.componentSettings = settings.getComponentSettings(getClass());
+ }
+
+ public AbstractComponent(Settings settings, String prefixSettings) {
+ this.logger = Loggers.getLogger(getClass(), settings);
+ this.settings = settings;
+ this.componentSettings = settings.getComponentSettings(prefixSettings, getClass());
+ }
+
+ public AbstractComponent(Settings settings, Class customClass) {
+ this.logger = Loggers.getLogger(customClass, settings);
+ this.settings = settings;
+ this.componentSettings = settings.getComponentSettings(customClass);
+ }
+
+ public AbstractComponent(Settings settings, String prefixSettings, Class customClass) {
+ this.logger = Loggers.getLogger(customClass, settings);
+ this.settings = settings;
+ this.componentSettings = settings.getComponentSettings(prefixSettings, customClass);
+ }
+
+ public AbstractComponent(Settings settings, Class loggerClass, Class componentClass) {
+ this.logger = Loggers.getLogger(loggerClass, settings);
+ this.settings = settings;
+ this.componentSettings = settings.getComponentSettings(componentClass);
+ }
+
+ public AbstractComponent(Settings settings, String prefixSettings, Class loggerClass, Class componentClass) {
+ this.logger = Loggers.getLogger(loggerClass, settings);
+ this.settings = settings;
+ this.componentSettings = settings.getComponentSettings(prefixSettings, componentClass);
+ }
+
+ public String nodeName() {
+ return settings.get("name", "");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java b/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java
new file mode 100644
index 0000000..613a196
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.component;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ *
+ */
+public abstract class AbstractLifecycleComponent<T> extends AbstractComponent implements LifecycleComponent<T> {
+
+ protected final Lifecycle lifecycle = new Lifecycle();
+
+ private final List<LifecycleListener> listeners = new CopyOnWriteArrayList<LifecycleListener>();
+
+ protected AbstractLifecycleComponent(Settings settings) {
+ super(settings);
+ }
+
+ protected AbstractLifecycleComponent(Settings settings, Class customClass) {
+ super(settings, customClass);
+ }
+
+ protected AbstractLifecycleComponent(Settings settings, Class loggerClass, Class componentClass) {
+ super(settings, loggerClass, componentClass);
+ }
+
+ protected AbstractLifecycleComponent(Settings settings, String prefixSettings) {
+ super(settings, prefixSettings);
+ }
+
+ protected AbstractLifecycleComponent(Settings settings, String prefixSettings, Class customClass) {
+ super(settings, prefixSettings, customClass);
+ }
+
+ protected AbstractLifecycleComponent(Settings settings, String prefixSettings, Class loggerClass, Class componentClass) {
+ super(settings, prefixSettings, loggerClass, componentClass);
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ return this.lifecycle.state();
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+ listeners.add(listener);
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+ listeners.remove(listener);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public T start() throws ElasticsearchException {
+ if (!lifecycle.canMoveToStarted()) {
+ return (T) this;
+ }
+ for (LifecycleListener listener : listeners) {
+ listener.beforeStart();
+ }
+ doStart();
+ lifecycle.moveToStarted();
+ for (LifecycleListener listener : listeners) {
+ listener.afterStart();
+ }
+ return (T) this;
+ }
+
+ protected abstract void doStart() throws ElasticsearchException;
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public T stop() throws ElasticsearchException {
+ if (!lifecycle.canMoveToStopped()) {
+ return (T) this;
+ }
+ for (LifecycleListener listener : listeners) {
+ listener.beforeStop();
+ }
+ lifecycle.moveToStopped();
+ doStop();
+ for (LifecycleListener listener : listeners) {
+ listener.afterStop();
+ }
+ return (T) this;
+ }
+
+ protected abstract void doStop() throws ElasticsearchException;
+
+ @Override
+ public void close() throws ElasticsearchException {
+ if (lifecycle.started()) {
+ stop();
+ }
+ if (!lifecycle.canMoveToClosed()) {
+ return;
+ }
+ for (LifecycleListener listener : listeners) {
+ listener.beforeClose();
+ }
+ lifecycle.moveToClosed();
+ doClose();
+ for (LifecycleListener listener : listeners) {
+ listener.afterClose();
+ }
+ }
+
+ protected abstract void doClose() throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/common/component/CloseableComponent.java b/src/main/java/org/elasticsearch/common/component/CloseableComponent.java
new file mode 100644
index 0000000..2ef75ef
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/component/CloseableComponent.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.component;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public interface CloseableComponent {
+
+ void close() throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/common/component/Lifecycle.java b/src/main/java/org/elasticsearch/common/component/Lifecycle.java
new file mode 100644
index 0000000..92bc7aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/component/Lifecycle.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.component;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+/**
+ * Lifecycle state. Allows the following transitions:
+ * <ul>
+ * <li>INITIALIZED -> STARTED, STOPPED, CLOSED</li>
+ * <li>STARTED -> STOPPED</li>
+ * <li>STOPPED -> STARTED, CLOSED</li>
+ * <li>CLOSED -> </li>
+ * </ul>
+ * <p/>
+ * <p>Also allows to stay in the same state. For example, when calling stop on a component, the
+ * following logic can be applied:
+ * <p/>
+ * <pre>
+ * public void stop() {
+ * if (!lifeccycleState.moveToStopped()) {
+ * return;
+ * }
+ * // continue with stop logic
+ * }
+ * </pre>
+ * <p/>
+ * <p>Note, closed is only allowed to be called when stopped, so make sure to stop the component first.
+ * Here is how the logic can be applied:
+ * <p/>
+ * <pre>
+ * public void close() {
+ * if (lifecycleState.started()) {
+ * stop();
+ * }
+ * if (!lifecycleState.moveToClosed()) {
+ * return;
+ * }
+ * // perofrm close logic here
+ * }
+ * </pre>
+ */
+public class Lifecycle {
+
+ public static enum State {
+ INITIALIZED,
+ STOPPED,
+ STARTED,
+ CLOSED
+ }
+
+ private volatile State state = State.INITIALIZED;
+
+ public State state() {
+ return this.state;
+ }
+
+ /**
+ * Returns <tt>true</tt> if the state is initialized.
+ */
+ public boolean initialized() {
+ return state == State.INITIALIZED;
+ }
+
+ /**
+ * Returns <tt>true</tt> if the state is started.
+ */
+ public boolean started() {
+ return state == State.STARTED;
+ }
+
+ /**
+ * Returns <tt>true</tt> if the state is stopped.
+ */
+ public boolean stopped() {
+ return state == State.STOPPED;
+ }
+
+ /**
+ * Returns <tt>true</tt> if the state is closed.
+ */
+ public boolean closed() {
+ return state == State.CLOSED;
+ }
+
+ public boolean stoppedOrClosed() {
+ Lifecycle.State state = this.state;
+ return state == State.STOPPED || state == State.CLOSED;
+ }
+
+ public boolean canMoveToStarted() throws ElasticsearchIllegalStateException {
+ State localState = this.state;
+ if (localState == State.INITIALIZED || localState == State.STOPPED) {
+ return true;
+ }
+ if (localState == State.STARTED) {
+ return false;
+ }
+ if (localState == State.CLOSED) {
+ throw new ElasticsearchIllegalStateException("Can't move to started state when closed");
+ }
+ throw new ElasticsearchIllegalStateException("Can't move to started with unknown state");
+ }
+
+
+ public boolean moveToStarted() throws ElasticsearchIllegalStateException {
+ State localState = this.state;
+ if (localState == State.INITIALIZED || localState == State.STOPPED) {
+ state = State.STARTED;
+ return true;
+ }
+ if (localState == State.STARTED) {
+ return false;
+ }
+ if (localState == State.CLOSED) {
+ throw new ElasticsearchIllegalStateException("Can't move to started state when closed");
+ }
+ throw new ElasticsearchIllegalStateException("Can't move to started with unknown state");
+ }
+
+ public boolean canMoveToStopped() throws ElasticsearchIllegalStateException {
+ State localState = state;
+ if (localState == State.STARTED) {
+ return true;
+ }
+ if (localState == State.INITIALIZED || localState == State.STOPPED) {
+ return false;
+ }
+ if (localState == State.CLOSED) {
+ throw new ElasticsearchIllegalStateException("Can't move to started state when closed");
+ }
+ throw new ElasticsearchIllegalStateException("Can't move to started with unknown state");
+ }
+
+ public boolean moveToStopped() throws ElasticsearchIllegalStateException {
+ State localState = state;
+ if (localState == State.STARTED) {
+ state = State.STOPPED;
+ return true;
+ }
+ if (localState == State.INITIALIZED || localState == State.STOPPED) {
+ return false;
+ }
+ if (localState == State.CLOSED) {
+ throw new ElasticsearchIllegalStateException("Can't move to started state when closed");
+ }
+ throw new ElasticsearchIllegalStateException("Can't move to started with unknown state");
+ }
+
+ public boolean canMoveToClosed() throws ElasticsearchIllegalStateException {
+ State localState = state;
+ if (localState == State.CLOSED) {
+ return false;
+ }
+ if (localState == State.STARTED) {
+ throw new ElasticsearchIllegalStateException("Can't move to closed before moving to stopped mode");
+ }
+ return true;
+ }
+
+
+ public boolean moveToClosed() throws ElasticsearchIllegalStateException {
+ State localState = state;
+ if (localState == State.CLOSED) {
+ return false;
+ }
+ if (localState == State.STARTED) {
+ throw new ElasticsearchIllegalStateException("Can't move to closed before moving to stopped mode");
+ }
+ state = State.CLOSED;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return state.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java b/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java
new file mode 100644
index 0000000..9718ae1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.component;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public interface LifecycleComponent<T> extends CloseableComponent {
+
+ Lifecycle.State lifecycleState();
+
+ void addLifecycleListener(LifecycleListener listener);
+
+ void removeLifecycleListener(LifecycleListener listener);
+
+ T start() throws ElasticsearchException;
+
+ T stop() throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/common/component/LifecycleListener.java b/src/main/java/org/elasticsearch/common/component/LifecycleListener.java
new file mode 100644
index 0000000..c553957
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/component/LifecycleListener.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.component;
+
+/**
+ *
+ */
+public interface LifecycleListener {
+
+ void beforeStart();
+
+ void afterStart();
+
+ void beforeStop();
+
+ void afterStop();
+
+ void beforeClose();
+
+ void afterClose();
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/CompressedIndexInput.java b/src/main/java/org/elasticsearch/common/compress/CompressedIndexInput.java
new file mode 100644
index 0000000..6b67836
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/CompressedIndexInput.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import org.apache.lucene.store.IndexInput;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.LongArray;
+
+import java.io.EOFException;
+import java.io.IOException;
+
+/**
+ * @deprecated Used only for backward comp. to read old compressed files, since we now use codec based compression
+ */
+@Deprecated
+public abstract class CompressedIndexInput<T extends CompressorContext> extends IndexInput {
+
+ private IndexInput in;
+ protected final T context;
+
+ private int version;
+ private long totalUncompressedLength;
+ private LongArray offsets;
+
+ private boolean closed;
+
+ protected byte[] uncompressed;
+ protected int uncompressedLength;
+ private int position = 0;
+ private int valid = 0;
+ private int currentOffsetIdx;
+ private long currentUncompressedChunkPointer;
+
+ public CompressedIndexInput(IndexInput in, T context) throws IOException {
+ super("compressed(" + in.toString() + ")");
+ this.in = in;
+ this.context = context;
+ readHeader(in);
+ this.version = in.readInt();
+ long metaDataPosition = in.readLong();
+ long headerLength = in.getFilePointer();
+ in.seek(metaDataPosition);
+ this.totalUncompressedLength = in.readVLong();
+ int size = in.readVInt();
+ offsets = BigArrays.newLongArray(size);
+ for (int i = 0; i < size; i++) {
+ offsets.set(i, in.readVLong());
+ }
+ this.currentOffsetIdx = -1;
+ this.currentUncompressedChunkPointer = 0;
+ in.seek(headerLength);
+ }
+
+ /**
+ * Method is overridden to report number of bytes that can now be read
+ * from decoded data buffer, without reading bytes from the underlying
+ * stream.
+ * Never throws an exception; returns number of bytes available without
+ * further reads from underlying source; -1 if stream has been closed, or
+ * 0 if an actual read (and possible blocking) is needed to find out.
+ */
+ public int available() throws IOException {
+ // if closed, return -1;
+ if (closed) {
+ return -1;
+ }
+ int left = (valid - position);
+ return (left <= 0) ? 0 : left;
+ }
+
+ @Override
+ public byte readByte() throws IOException {
+ if (!readyBuffer()) {
+ throw new EOFException();
+ }
+ return uncompressed[position++];
+ }
+
+ public int read(byte[] buffer, int offset, int length, boolean fullRead) throws IOException {
+ if (length < 1) {
+ return 0;
+ }
+ if (!readyBuffer()) {
+ return -1;
+ }
+ // First let's read however much data we happen to have...
+ int chunkLength = Math.min(valid - position, length);
+ System.arraycopy(uncompressed, position, buffer, offset, chunkLength);
+ position += chunkLength;
+
+ if (chunkLength == length || !fullRead) {
+ return chunkLength;
+ }
+ // Need more data, then
+ int totalRead = chunkLength;
+ do {
+ offset += chunkLength;
+ if (!readyBuffer()) {
+ break;
+ }
+ chunkLength = Math.min(valid - position, (length - totalRead));
+ System.arraycopy(uncompressed, position, buffer, offset, chunkLength);
+ position += chunkLength;
+ totalRead += chunkLength;
+ } while (totalRead < length);
+
+ return totalRead;
+ }
+
+ @Override
+ public void readBytes(byte[] b, int offset, int len) throws IOException {
+ int result = read(b, offset, len, true /* we want to have full reads, thats the contract... */);
+ if (result < len) {
+ throw new EOFException();
+ }
+ }
+
+ @Override
+ public long getFilePointer() {
+ return currentUncompressedChunkPointer + position;
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ int idx = (int) (pos / uncompressedLength);
+ if (idx >= offsets.size()) {
+ // set the next "readyBuffer" to EOF
+ currentOffsetIdx = idx;
+ position = 0;
+ valid = 0;
+ return;
+ }
+
+ // TODO: optimize so we won't have to readyBuffer on seek, can keep the position around, and set it on readyBuffer in this case
+ if (idx != currentOffsetIdx) {
+ long pointer = offsets.get(idx);
+ in.seek(pointer);
+ position = 0;
+ valid = 0;
+ currentOffsetIdx = idx - 1; // we are going to increase it in readyBuffer...
+ readyBuffer();
+ }
+ position = (int) (pos % uncompressedLength);
+ }
+
+ @Override
+ public long length() {
+ return totalUncompressedLength;
+ }
+
+ @Override
+ public void close() throws IOException {
+ position = valid = 0;
+ if (!closed) {
+ closed = true;
+ doClose();
+ in.close();
+ }
+ }
+
+ protected abstract void doClose() throws IOException;
+
+ protected boolean readyBuffer() throws IOException {
+ if (position < valid) {
+ return true;
+ }
+ if (closed) {
+ return false;
+ }
+ // we reached the end...
+ if (currentOffsetIdx + 1 >= offsets.size()) {
+ return false;
+ }
+ valid = uncompress(in, uncompressed);
+ if (valid < 0) {
+ return false;
+ }
+ currentOffsetIdx++;
+ currentUncompressedChunkPointer = ((long) currentOffsetIdx) * uncompressedLength;
+ position = 0;
+ return (position < valid);
+ }
+
+ protected abstract void readHeader(IndexInput in) throws IOException;
+
+ /**
+ * Uncompress the data into the out array, returning the size uncompressed
+ */
+ protected abstract int uncompress(IndexInput in, byte[] out) throws IOException;
+
+ @Override
+ public IndexInput clone() {
+ // we clone and we need to make sure we keep the same positions!
+ CompressedIndexInput cloned = (CompressedIndexInput) super.clone();
+ cloned.uncompressed = new byte[uncompressedLength];
+ System.arraycopy(uncompressed, 0, cloned.uncompressed, 0, uncompressedLength);
+ cloned.in = (IndexInput) cloned.in.clone();
+ return cloned;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/CompressedStreamInput.java b/src/main/java/org/elasticsearch/common/compress/CompressedStreamInput.java
new file mode 100644
index 0000000..3df98a7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/CompressedStreamInput.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.stream.StreamInput;
+
+import java.io.EOFException;
+import java.io.IOException;
+
+/**
+ */
+public abstract class CompressedStreamInput<T extends CompressorContext> extends StreamInput {
+
+ private final StreamInput in;
+ protected final CompressorContext context;
+
+ private boolean closed;
+
+ protected byte[] uncompressed;
+ private int position = 0;
+ private int valid = 0;
+
+ public CompressedStreamInput(StreamInput in, T context) throws IOException {
+ this.in = in;
+ this.context = context;
+ super.setVersion(in.getVersion());
+ readHeader(in);
+ }
+
+ @Override
+ public StreamInput setVersion(Version version) {
+ in.setVersion(version);
+ return super.setVersion(version);
+ }
+
+ /**
+ * Expert!, resets to buffer start, without the need to decompress it again.
+ */
+ public void resetToBufferStart() {
+ this.position = 0;
+ }
+
+ /**
+ * Method is overridden to report number of bytes that can now be read
+ * from decoded data buffer, without reading bytes from the underlying
+ * stream.
+ * Never throws an exception; returns number of bytes available without
+ * further reads from underlying source; -1 if stream has been closed, or
+ * 0 if an actual read (and possible blocking) is needed to find out.
+ */
+ @Override
+ public int available() throws IOException {
+ // if closed, return -1;
+ if (closed) {
+ return -1;
+ }
+ int left = (valid - position);
+ return (left <= 0) ? 0 : left;
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (!readyBuffer()) {
+ return -1;
+ }
+ return uncompressed[position++] & 255;
+ }
+
+ @Override
+ public byte readByte() throws IOException {
+ if (!readyBuffer()) {
+ throw new EOFException();
+ }
+ return uncompressed[position++];
+ }
+
+ @Override
+ public int read(byte[] buffer, int offset, int length) throws IOException {
+ return read(buffer, offset, length, false);
+ }
+
+ public int read(byte[] buffer, int offset, int length, boolean fullRead) throws IOException {
+ if (length < 1) {
+ return 0;
+ }
+ if (!readyBuffer()) {
+ return -1;
+ }
+ // First let's read however much data we happen to have...
+ int chunkLength = Math.min(valid - position, length);
+ System.arraycopy(uncompressed, position, buffer, offset, chunkLength);
+ position += chunkLength;
+
+ if (chunkLength == length || !fullRead) {
+ return chunkLength;
+ }
+ // Need more data, then
+ int totalRead = chunkLength;
+ do {
+ offset += chunkLength;
+ if (!readyBuffer()) {
+ break;
+ }
+ chunkLength = Math.min(valid - position, (length - totalRead));
+ System.arraycopy(uncompressed, position, buffer, offset, chunkLength);
+ position += chunkLength;
+ totalRead += chunkLength;
+ } while (totalRead < length);
+
+ return totalRead;
+ }
+
+ @Override
+ public void readBytes(byte[] b, int offset, int len) throws IOException {
+ int result = read(b, offset, len, true /* we want to have full reads, thats the contract... */);
+ if (result < len) {
+ throw new EOFException();
+ }
+ }
+
+ @Override
+ public void reset() throws IOException {
+ this.position = 0;
+ this.valid = 0;
+ in.reset();
+ }
+
+ @Override
+ public void close() throws IOException {
+ position = valid = 0;
+ if (!closed) {
+ closed = true;
+ doClose();
+ in.close();
+ }
+ }
+
+ protected abstract void doClose() throws IOException;
+
+ /**
+ * Fill the uncompressed bytes buffer by reading the underlying inputStream.
+ */
+ protected boolean readyBuffer() throws IOException {
+ if (position < valid) {
+ return true;
+ }
+ if (closed) {
+ return false;
+ }
+ valid = uncompress(in, uncompressed);
+ if (valid < 0) {
+ return false;
+ }
+ position = 0;
+ return (position < valid);
+ }
+
+ protected abstract void readHeader(StreamInput in) throws IOException;
+
+ /**
+ * Uncompress the data into the out array, returning the size uncompressed
+ */
+ protected abstract int uncompress(StreamInput in, byte[] out) throws IOException;
+
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/CompressedStreamOutput.java b/src/main/java/org/elasticsearch/common/compress/CompressedStreamOutput.java
new file mode 100644
index 0000000..009fddc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/CompressedStreamOutput.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public abstract class CompressedStreamOutput<T extends CompressorContext> extends StreamOutput {
+
+ private final StreamOutput out;
+ protected final T context;
+
+ protected byte[] uncompressed;
+ protected int uncompressedLength;
+ private int position = 0;
+
+ private boolean closed;
+
+ public CompressedStreamOutput(StreamOutput out, T context) throws IOException {
+ this.out = out;
+ this.context = context;
+ super.setVersion(out.getVersion());
+ writeHeader(out);
+ }
+
+ @Override
+ public StreamOutput setVersion(Version version) {
+ out.setVersion(version);
+ return super.setVersion(version);
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ if (position >= uncompressedLength) {
+ flushBuffer();
+ }
+ uncompressed[position++] = (byte) b;
+ }
+
+ @Override
+ public void writeByte(byte b) throws IOException {
+ if (position >= uncompressedLength) {
+ flushBuffer();
+ }
+ uncompressed[position++] = b;
+ }
+
+ @Override
+ public void writeBytes(byte[] input, int offset, int length) throws IOException {
+ // ES, check if length is 0, and don't write in this case
+ if (length == 0) {
+ return;
+ }
+ final int BUFFER_LEN = uncompressedLength;
+
+ // simple case first: buffering only (for trivially short writes)
+ int free = BUFFER_LEN - position;
+ if (free >= length) {
+ System.arraycopy(input, offset, uncompressed, position, length);
+ position += length;
+ return;
+ }
+ // fill partial input as much as possible and flush
+ if (position > 0) {
+ System.arraycopy(input, offset, uncompressed, position, free);
+ position += free;
+ flushBuffer();
+ offset += free;
+ length -= free;
+ }
+
+ // then write intermediate full block, if any, without copying:
+ while (length >= BUFFER_LEN) {
+ compress(input, offset, BUFFER_LEN, out);
+ offset += BUFFER_LEN;
+ length -= BUFFER_LEN;
+ }
+
+ // and finally, copy leftovers in input, if any
+ if (length > 0) {
+ System.arraycopy(input, offset, uncompressed, 0, length);
+ }
+ position = length;
+ }
+
+ @Override
+ public void flush() throws IOException {
+ flushBuffer();
+ out.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (!closed) {
+ flushBuffer();
+ closed = true;
+ doClose();
+ out.close();
+ }
+ }
+
+ protected abstract void doClose() throws IOException;
+
+ @Override
+ public void reset() throws IOException {
+ position = 0;
+ out.reset();
+ }
+
+ private void flushBuffer() throws IOException {
+ if (position > 0) {
+ compress(uncompressed, 0, position, out);
+ position = 0;
+ }
+ }
+
+ protected abstract void writeHeader(StreamOutput out) throws IOException;
+
+ /**
+ * Compresses the data into the output
+ */
+ protected abstract void compress(byte[] data, int offset, int len, StreamOutput out) throws IOException;
+
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/CompressedString.java b/src/main/java/org/elasticsearch/common/compress/CompressedString.java
new file mode 100644
index 0000000..2596a4d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/CompressedString.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ *
+ */
+public class CompressedString implements Streamable {
+
+ private byte[] bytes;
+
+ CompressedString() {
+ }
+
+ /**
+ * Constructor assuming the data provided is compressed (UTF8). It uses the provided
+ * array without copying it.
+ */
+ public CompressedString(byte[] compressed) {
+ this.bytes = compressed;
+ }
+
+ public CompressedString(BytesReference data) throws IOException {
+ Compressor compressor = CompressorFactory.compressor(data);
+ if (compressor != null) {
+ // already compressed...
+ this.bytes = data.toBytes();
+ } else {
+ BytesArray bytesArray = data.toBytesArray();
+ this.bytes = CompressorFactory.defaultCompressor().compress(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length());
+ }
+ }
+
+ /**
+ * Constructs a new compressed string, assuming the bytes are UTF8, by copying it over.
+ *
+ * @param data The byte array
+ * @param offset Offset into the byte array
+ * @param length The length of the data
+ * @throws IOException
+ */
+ public CompressedString(byte[] data, int offset, int length) throws IOException {
+ Compressor compressor = CompressorFactory.compressor(data, offset, length);
+ if (compressor != null) {
+ // already compressed...
+ this.bytes = Arrays.copyOfRange(data, offset, offset + length);
+ } else {
+ // default to LZF
+ this.bytes = CompressorFactory.defaultCompressor().compress(data, offset, length);
+ }
+ }
+
+ public CompressedString(String str) throws IOException {
+ BytesRef result = new BytesRef(str);
+ this.bytes = CompressorFactory.defaultCompressor().compress(result.bytes, result.offset, result.length);
+ }
+
+ public byte[] compressed() {
+ return this.bytes;
+ }
+
+ public byte[] uncompressed() throws IOException {
+ Compressor compressor = CompressorFactory.compressor(bytes);
+ return compressor.uncompress(bytes, 0, bytes.length);
+ }
+
+ public String string() throws IOException {
+ return new BytesRef(uncompressed()).utf8ToString();
+ }
+
+ public static CompressedString readCompressedString(StreamInput in) throws IOException {
+ CompressedString compressedString = new CompressedString();
+ compressedString.readFrom(in);
+ return compressedString;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ bytes = new byte[in.readVInt()];
+ in.readBytes(bytes, 0, bytes.length);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(bytes.length);
+ out.writeBytes(bytes);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ CompressedString that = (CompressedString) o;
+
+ if (!Arrays.equals(bytes, that.bytes)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return bytes != null ? Arrays.hashCode(bytes) : 0;
+ }
+
+ @Override
+ public String toString() {
+ try {
+ return string();
+ } catch (IOException e) {
+ return "_na_";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/Compressor.java b/src/main/java/org/elasticsearch/common/compress/Compressor.java
new file mode 100644
index 0000000..3f3ba4b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/Compressor.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import org.apache.lucene.store.IndexInput;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import java.io.IOException;
+
+/**
+ */
+public interface Compressor {
+
+ String type();
+
+ void configure(Settings settings);
+
+ boolean isCompressed(BytesReference bytes);
+
+ boolean isCompressed(byte[] data, int offset, int length);
+
+ boolean isCompressed(ChannelBuffer buffer);
+
+ boolean isCompressed(IndexInput in) throws IOException;
+
+ /**
+ * Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(byte[], int, int)}.
+ */
+ byte[] uncompress(byte[] data, int offset, int length) throws IOException;
+
+ /**
+ * Compresses the provided data, data can be detected as compressed using {@link #isCompressed(byte[], int, int)}.
+ */
+ byte[] compress(byte[] data, int offset, int length) throws IOException;
+
+ CompressedStreamInput streamInput(StreamInput in) throws IOException;
+
+ CompressedStreamOutput streamOutput(StreamOutput out) throws IOException;
+
+ /**
+ * @deprecated Used for backward comp. since we now use Lucene compressed codec.
+ */
+ CompressedIndexInput indexInput(IndexInput in) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/CompressorContext.java b/src/main/java/org/elasticsearch/common/compress/CompressorContext.java
new file mode 100644
index 0000000..9ad7055
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/CompressorContext.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+/**
+ */
+public interface CompressorContext {
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java b/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java
new file mode 100644
index 0000000..9eb9c9d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import org.apache.lucene.store.IndexInput;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.compress.lzf.LZFCompressor;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ */
+public class CompressorFactory {
+
+ private static final LZFCompressor LZF = new LZFCompressor();
+
+ private static final Compressor[] compressors;
+ private static final ImmutableMap<String, Compressor> compressorsByType;
+ private static Compressor defaultCompressor;
+
+ static {
+ List<Compressor> compressorsX = Lists.newArrayList();
+ compressorsX.add(LZF);
+
+ compressors = compressorsX.toArray(new Compressor[compressorsX.size()]);
+ MapBuilder<String, Compressor> compressorsByTypeX = MapBuilder.newMapBuilder();
+ for (Compressor compressor : compressors) {
+ compressorsByTypeX.put(compressor.type(), compressor);
+ }
+ compressorsByType = compressorsByTypeX.immutableMap();
+
+ defaultCompressor = LZF;
+ }
+
+ public static synchronized void configure(Settings settings) {
+ for (Compressor compressor : compressors) {
+ compressor.configure(settings);
+ }
+ String defaultType = settings.get("compress.default.type", "lzf").toLowerCase(Locale.ENGLISH);
+ boolean found = false;
+ for (Compressor compressor : compressors) {
+ if (defaultType.equalsIgnoreCase(compressor.type())) {
+ defaultCompressor = compressor;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ Loggers.getLogger(CompressorFactory.class).warn("failed to find default type [{}]", defaultType);
+ }
+ }
+
+ public static synchronized void setDefaultCompressor(Compressor defaultCompressor) {
+ CompressorFactory.defaultCompressor = defaultCompressor;
+ }
+
+ public static Compressor defaultCompressor() {
+ return defaultCompressor;
+ }
+
+ public static boolean isCompressed(BytesReference bytes) {
+ return compressor(bytes) != null;
+ }
+
+ public static boolean isCompressed(byte[] data) {
+ return compressor(data, 0, data.length) != null;
+ }
+
+ public static boolean isCompressed(byte[] data, int offset, int length) {
+ return compressor(data, offset, length) != null;
+ }
+
+ public static boolean isCompressed(IndexInput in) throws IOException {
+ return compressor(in) != null;
+ }
+
+ @Nullable
+ public static Compressor compressor(BytesReference bytes) {
+ for (Compressor compressor : compressors) {
+ if (compressor.isCompressed(bytes)) {
+ return compressor;
+ }
+ }
+ return null;
+ }
+
+ @Nullable
+ public static Compressor compressor(byte[] data) {
+ return compressor(data, 0, data.length);
+ }
+
+ @Nullable
+ public static Compressor compressor(byte[] data, int offset, int length) {
+ for (Compressor compressor : compressors) {
+ if (compressor.isCompressed(data, offset, length)) {
+ return compressor;
+ }
+ }
+ return null;
+ }
+
+ @Nullable
+ public static Compressor compressor(ChannelBuffer buffer) {
+ for (Compressor compressor : compressors) {
+ if (compressor.isCompressed(buffer)) {
+ return compressor;
+ }
+ }
+ return null;
+ }
+
+ @Nullable
+ public static Compressor compressor(IndexInput in) throws IOException {
+ for (Compressor compressor : compressors) {
+ if (compressor.isCompressed(in)) {
+ return compressor;
+ }
+ }
+ return null;
+ }
+
+ public static Compressor compressor(String type) {
+ return compressorsByType.get(type);
+ }
+
+ /**
+ * Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(byte[], int, int)}.
+ */
+ public static BytesReference uncompressIfNeeded(BytesReference bytes) throws IOException {
+ Compressor compressor = compressor(bytes);
+ if (compressor != null) {
+ if (bytes.hasArray()) {
+ return new BytesArray(compressor.uncompress(bytes.array(), bytes.arrayOffset(), bytes.length()));
+ }
+ StreamInput compressed = compressor.streamInput(bytes.streamInput());
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ Streams.copy(compressed, bStream);
+ compressed.close();
+ return bStream.bytes();
+ }
+ return bytes;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedIndexInput.java b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedIndexInput.java
new file mode 100644
index 0000000..6196fbe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedIndexInput.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import com.ning.compress.lzf.ChunkDecoder;
+import com.ning.compress.lzf.LZFChunk;
+import org.apache.lucene.store.IndexInput;
+import org.elasticsearch.common.compress.CompressedIndexInput;
+import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ */
+@Deprecated
+public class LZFCompressedIndexInput extends CompressedIndexInput<LZFCompressorContext> {
+
+ private final ChunkDecoder decoder;
+ // scratch area buffer
+ private byte[] inputBuffer;
+
+ public LZFCompressedIndexInput(IndexInput in, ChunkDecoder decoder) throws IOException {
+ super(in, LZFCompressorContext.INSTANCE);
+
+ this.decoder = decoder;
+ this.uncompressed = new byte[LZFChunk.MAX_CHUNK_LEN];
+ this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
+ this.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
+ }
+
+ @Override
+ protected void readHeader(IndexInput in) throws IOException {
+ byte[] header = new byte[LZFCompressor.LUCENE_HEADER.length];
+ in.readBytes(header, 0, header.length, false);
+ if (!Arrays.equals(header, LZFCompressor.LUCENE_HEADER)) {
+ throw new IOException("wrong lzf compressed header [" + Arrays.toString(header) + "]");
+ }
+ }
+
+ @Override
+ protected int uncompress(IndexInput in, byte[] out) throws IOException {
+ return decoder.decodeChunk(new InputStreamIndexInput(in, Long.MAX_VALUE), inputBuffer, out);
+ }
+
+ @Override
+ protected void doClose() throws IOException {
+ // nothing to do here...
+ }
+
+ @Override
+ public IndexInput clone() {
+ LZFCompressedIndexInput cloned = (LZFCompressedIndexInput) super.clone();
+ cloned.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
+ return cloned;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamInput.java b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamInput.java
new file mode 100644
index 0000000..caaaadb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamInput.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import com.ning.compress.BufferRecycler;
+import com.ning.compress.lzf.ChunkDecoder;
+import com.ning.compress.lzf.LZFChunk;
+import org.elasticsearch.common.compress.CompressedStreamInput;
+import org.elasticsearch.common.io.stream.StreamInput;
+
+import java.io.IOException;
+
+/**
+ */
+public class LZFCompressedStreamInput extends CompressedStreamInput<LZFCompressorContext> {
+
+ private final BufferRecycler recycler;
+
+ private final ChunkDecoder decoder;
+
+ // scratch area buffer
+ private byte[] inputBuffer;
+
+ public LZFCompressedStreamInput(StreamInput in, ChunkDecoder decoder) throws IOException {
+ super(in, LZFCompressorContext.INSTANCE);
+ this.recycler = BufferRecycler.instance();
+ this.decoder = decoder;
+
+ this.uncompressed = recycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
+ this.inputBuffer = recycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
+ }
+
+ @Override
+ public void readHeader(StreamInput in) throws IOException {
+ // nothing to do here, each chunk has a header
+ }
+
+ @Override
+ public int uncompress(StreamInput in, byte[] out) throws IOException {
+ return decoder.decodeChunk(in, inputBuffer, out);
+ }
+
+ @Override
+ protected void doClose() throws IOException {
+ byte[] buf = inputBuffer;
+ if (buf != null) {
+ inputBuffer = null;
+ recycler.releaseInputBuffer(buf);
+ }
+ buf = uncompressed;
+ if (buf != null) {
+ uncompressed = null;
+ recycler.releaseDecodeBuffer(uncompressed);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java
new file mode 100644
index 0000000..00899fd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import com.ning.compress.BufferRecycler;
+import com.ning.compress.lzf.ChunkEncoder;
+import com.ning.compress.lzf.LZFChunk;
+import org.elasticsearch.common.compress.CompressedStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public class LZFCompressedStreamOutput extends CompressedStreamOutput<LZFCompressorContext> {
+
+ private final BufferRecycler recycler;
+ private final ChunkEncoder encoder;
+
+ public LZFCompressedStreamOutput(StreamOutput out) throws IOException {
+ super(out, LZFCompressorContext.INSTANCE);
+ this.recycler = BufferRecycler.instance();
+ this.uncompressed = this.recycler.allocOutputBuffer(LZFChunk.MAX_CHUNK_LEN);
+ this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
+ this.encoder = new ChunkEncoder(LZFChunk.MAX_CHUNK_LEN);
+ }
+
+ @Override
+ public void writeHeader(StreamOutput out) throws IOException {
+ // nothing to do here, each chunk has a header of its own
+ }
+
+ @Override
+ protected void compress(byte[] data, int offset, int len, StreamOutput out) throws IOException {
+ encoder.encodeAndWriteChunk(data, offset, len, out);
+ }
+
+ @Override
+ protected void doClose() throws IOException {
+ byte[] buf = uncompressed;
+ if (buf != null) {
+ uncompressed = null;
+ recycler.releaseOutputBuffer(buf);
+ }
+ encoder.close();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java
new file mode 100644
index 0000000..297d2ea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import com.ning.compress.lzf.ChunkDecoder;
+import com.ning.compress.lzf.LZFChunk;
+import com.ning.compress.lzf.LZFEncoder;
+import com.ning.compress.lzf.util.ChunkDecoderFactory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.compress.CompressedIndexInput;
+import org.elasticsearch.common.compress.CompressedStreamInput;
+import org.elasticsearch.common.compress.CompressedStreamOutput;
+import org.elasticsearch.common.compress.Compressor;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import java.io.IOException;
+
+/**
+ */
+public class LZFCompressor implements Compressor {
+
+ static final byte[] LUCENE_HEADER = {'L', 'Z', 'F', 0};
+
+ public static final String TYPE = "lzf";
+
+ private ChunkDecoder decoder;
+
+ public LZFCompressor() {
+ if (Constants.SUN_OS) {
+ this.decoder = ChunkDecoderFactory.safeInstance();
+ } else {
+ this.decoder = ChunkDecoderFactory.optimalInstance();
+ }
+ Loggers.getLogger(LZFCompressor.class).debug("using [{}] decoder", this.decoder.getClass().getSimpleName());
+ }
+
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ @Override
+ public void configure(Settings settings) {
+ String decoderType = settings.get("compress.lzf.decoder", null);
+ if (decoderType != null) {
+ if ("optimal".equalsIgnoreCase(decoderType)) {
+ this.decoder = ChunkDecoderFactory.optimalInstance();
+ Loggers.getLogger(LZFCompressor.class).debug("using [{}] decoder", this.decoder.getClass().getSimpleName());
+ } else if ("safe".equalsIgnoreCase(decoderType)) {
+ this.decoder = ChunkDecoderFactory.safeInstance();
+ Loggers.getLogger(LZFCompressor.class).debug("using [{}] decoder", this.decoder.getClass().getSimpleName());
+ } else {
+ Loggers.getLogger(LZFCompressor.class).warn("decoder type not recognized [{}], still using [{}]", decoderType, this.decoder.getClass().getSimpleName());
+ }
+ }
+ }
+
+ @Override
+ public boolean isCompressed(BytesReference bytes) {
+ return bytes.length() >= 3 &&
+ bytes.get(0) == LZFChunk.BYTE_Z &&
+ bytes.get(1) == LZFChunk.BYTE_V &&
+ (bytes.get(2) == LZFChunk.BLOCK_TYPE_COMPRESSED || bytes.get(2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
+ }
+
+ @Override
+ public boolean isCompressed(byte[] data, int offset, int length) {
+ return length >= 3 &&
+ data[offset] == LZFChunk.BYTE_Z &&
+ data[offset + 1] == LZFChunk.BYTE_V &&
+ (data[offset + 2] == LZFChunk.BLOCK_TYPE_COMPRESSED || data[offset + 2] == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
+ }
+
+ @Override
+ public boolean isCompressed(ChannelBuffer buffer) {
+ int offset = buffer.readerIndex();
+ return buffer.readableBytes() >= 3 &&
+ buffer.getByte(offset) == LZFChunk.BYTE_Z &&
+ buffer.getByte(offset + 1) == LZFChunk.BYTE_V &&
+ (buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_COMPRESSED || buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
+ }
+
+ @Override
+ public boolean isCompressed(IndexInput in) throws IOException {
+ long currentPointer = in.getFilePointer();
+ // since we have some metdata before the first compressed header, we check on our specific header
+ if (in.length() - currentPointer < (LUCENE_HEADER.length)) {
+ return false;
+ }
+ for (int i = 0; i < LUCENE_HEADER.length; i++) {
+ if (in.readByte() != LUCENE_HEADER[i]) {
+ in.seek(currentPointer);
+ return false;
+ }
+ }
+ in.seek(currentPointer);
+ return true;
+ }
+
+ @Override
+ public byte[] uncompress(byte[] data, int offset, int length) throws IOException {
+ return decoder.decode(data, offset, length);
+ }
+
+ @Override
+ public byte[] compress(byte[] data, int offset, int length) throws IOException {
+ return LZFEncoder.encode(data, offset, length);
+ }
+
+ @Override
+ public CompressedStreamInput streamInput(StreamInput in) throws IOException {
+ return new LZFCompressedStreamInput(in, decoder);
+ }
+
+ @Override
+ public CompressedStreamOutput streamOutput(StreamOutput out) throws IOException {
+ return new LZFCompressedStreamOutput(out);
+ }
+
+ @Override
+ public CompressedIndexInput indexInput(IndexInput in) throws IOException {
+ return new LZFCompressedIndexInput(in, decoder);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressorContext.java b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressorContext.java
new file mode 100644
index 0000000..89c7b18
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressorContext.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import org.elasticsearch.common.compress.CompressorContext;
+
+/**
+ */
+public class LZFCompressorContext implements CompressorContext {
+
+ public static final LZFCompressorContext INSTANCE = new LZFCompressorContext();
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/GeoDistance.java b/src/main/java/org/elasticsearch/common/geo/GeoDistance.java
new file mode 100644
index 0000000..6bac086
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/GeoDistance.java
@@ -0,0 +1,368 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import org.apache.lucene.util.SloppyMath;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.unit.DistanceUnit;
+
+import java.util.Locale;
+
+/**
+ * Geo distance calculation.
+ */
+public enum GeoDistance {
+ /**
+ * Calculates distance as points on a plane. Faster, but less accurate than {@link #ARC}.
+ */
+ PLANE() {
+ @Override
+ public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) {
+ double px = targetLongitude - sourceLongitude;
+ double py = targetLatitude - sourceLatitude;
+ return Math.sqrt(px * px + py * py) * unit.getDistancePerDegree();
+ }
+
+ @Override
+ public double normalize(double distance, DistanceUnit unit) {
+ return distance;
+ }
+
+ @Override
+ public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
+ return new PlaneFixedSourceDistance(sourceLatitude, sourceLongitude, unit);
+ }
+ },
+
+ /**
+ * Calculates distance factor.
+ */
+ FACTOR() {
+ @Override
+ public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) {
+ double longitudeDifference = targetLongitude - sourceLongitude;
+ double a = Math.toRadians(90D - sourceLatitude);
+ double c = Math.toRadians(90D - targetLatitude);
+ return (Math.cos(a) * Math.cos(c)) + (Math.sin(a) * Math.sin(c) * Math.cos(Math.toRadians(longitudeDifference)));
+ }
+
+ @Override
+ public double normalize(double distance, DistanceUnit unit) {
+ return Math.cos(distance / unit.getEarthRadius());
+ }
+
+ @Override
+ public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
+ return new FactorFixedSourceDistance(sourceLatitude, sourceLongitude, unit);
+ }
+ },
+ /**
+ * Calculates distance as points on a globe.
+ */
+ ARC() {
+ @Override
+ public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) {
+ double x1 = sourceLatitude * Math.PI / 180D;
+ double x2 = targetLatitude * Math.PI / 180D;
+ double h1 = (1D - Math.cos(x1 - x2)) / 2D;
+ double h2 = (1D - Math.cos((sourceLongitude - targetLongitude) * Math.PI / 180D)) / 2D;
+ double h = h1 + Math.cos(x1) * Math.cos(x2) * h2;
+ return unit.fromMeters(GeoUtils.EARTH_MEAN_RADIUS * 2D * Math.asin(Math.min(1, Math.sqrt(h))));
+ }
+
+ @Override
+ public double normalize(double distance, DistanceUnit unit) {
+ return distance;
+ }
+
+ @Override
+ public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
+ return new ArcFixedSourceDistance(sourceLatitude, sourceLongitude, unit);
+ }
+ },
+ /**
+ * Calculates distance as points on a globe in a sloppy way. Close to the pole areas the accuracy
+ * of this function decreases.
+ */
+ SLOPPY_ARC() {
+
+ @Override
+ public double normalize(double distance, DistanceUnit unit) {
+ return distance;
+ }
+
+ @Override
+ public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) {
+ return unit.fromMeters(SloppyMath.haversin(sourceLatitude, sourceLongitude, targetLatitude, targetLongitude) * 1000.0);
+ }
+
+ @Override
+ public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
+ return new SloppyArcFixedSourceDistance(sourceLatitude, sourceLongitude, unit);
+ }
+ };
+
+ /**
+ * Default {@link GeoDistance} function. This method should be used, If no specific function has been selected.
+ * This is an alias for <code>SLOPPY_ARC</code>
+ */
+ public static final GeoDistance DEFAULT = SLOPPY_ARC;
+
+ public abstract double normalize(double distance, DistanceUnit unit);
+
+ public abstract double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit);
+
+ public abstract FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit);
+
+ private static final double MIN_LAT = Math.toRadians(-90d); // -PI/2
+ private static final double MAX_LAT = Math.toRadians(90d); // PI/2
+ private static final double MIN_LON = Math.toRadians(-180d); // -PI
+ private static final double MAX_LON = Math.toRadians(180d); // PI
+
+ public static DistanceBoundingCheck distanceBoundingCheck(double sourceLatitude, double sourceLongitude, double distance, DistanceUnit unit) {
+ // angular distance in radians on a great circle
+ double radDist = distance / unit.getEarthRadius();
+
+ double radLat = Math.toRadians(sourceLatitude);
+ double radLon = Math.toRadians(sourceLongitude);
+
+ double minLat = radLat - radDist;
+ double maxLat = radLat + radDist;
+
+ double minLon, maxLon;
+ if (minLat > MIN_LAT && maxLat < MAX_LAT) {
+ double deltaLon = Math.asin(Math.sin(radDist) / Math.cos(radLat));
+ minLon = radLon - deltaLon;
+ if (minLon < MIN_LON) minLon += 2d * Math.PI;
+ maxLon = radLon + deltaLon;
+ if (maxLon > MAX_LON) maxLon -= 2d * Math.PI;
+ } else {
+ // a pole is within the distance
+ minLat = Math.max(minLat, MIN_LAT);
+ maxLat = Math.min(maxLat, MAX_LAT);
+ minLon = MIN_LON;
+ maxLon = MAX_LON;
+ }
+
+ GeoPoint topLeft = new GeoPoint(Math.toDegrees(maxLat), Math.toDegrees(minLon));
+ GeoPoint bottomRight = new GeoPoint(Math.toDegrees(minLat), Math.toDegrees(maxLon));
+ if (minLon > maxLon) {
+ return new Meridian180DistanceBoundingCheck(topLeft, bottomRight);
+ }
+ return new SimpleDistanceBoundingCheck(topLeft, bottomRight);
+ }
+
+ /**
+ * Get a {@link GeoDistance} according to a given name. Valid values are
+ *
+ * <ul>
+ * <li><b>plane</b> for <code>GeoDistance.PLANE</code></li>
+ * <li><b>sloppy_arc</b> for <code>GeoDistance.SLOPPY_ARC</code></li>
+ * <li><b>factor</b> for <code>GeoDistance.FACTOR</code></li>
+ * <li><b>arc</b> for <code>GeoDistance.ARC</code></li>
+ * </ul>
+ *
+ * @param name name of the {@link GeoDistance}
+ * @return a {@link GeoDistance}
+ */
+ public static GeoDistance fromString(String name) {
+ name = name.toLowerCase(Locale.ROOT);
+ if ("plane".equals(name)) {
+ return PLANE;
+ } else if ("arc".equals(name)) {
+ return ARC;
+ } else if ("sloppy_arc".equals(name)) {
+ return SLOPPY_ARC;
+ } else if ("factor".equals(name)) {
+ return FACTOR;
+ }
+ throw new ElasticsearchIllegalArgumentException("No geo distance for [" + name + "]");
+ }
+
+ public static interface FixedSourceDistance {
+
+ double calculate(double targetLatitude, double targetLongitude);
+ }
+
+ public static interface DistanceBoundingCheck {
+
+ boolean isWithin(double targetLatitude, double targetLongitude);
+
+ GeoPoint topLeft();
+
+ GeoPoint bottomRight();
+ }
+
+ public static AlwaysDistanceBoundingCheck ALWAYS_INSTANCE = new AlwaysDistanceBoundingCheck();
+
+ private static class AlwaysDistanceBoundingCheck implements DistanceBoundingCheck {
+ @Override
+ public boolean isWithin(double targetLatitude, double targetLongitude) {
+ return true;
+ }
+
+ @Override
+ public GeoPoint topLeft() {
+ return null;
+ }
+
+ @Override
+ public GeoPoint bottomRight() {
+ return null;
+ }
+ }
+
+ public static class Meridian180DistanceBoundingCheck implements DistanceBoundingCheck {
+
+ private final GeoPoint topLeft;
+ private final GeoPoint bottomRight;
+
+ public Meridian180DistanceBoundingCheck(GeoPoint topLeft, GeoPoint bottomRight) {
+ this.topLeft = topLeft;
+ this.bottomRight = bottomRight;
+ }
+
+ @Override
+ public boolean isWithin(double targetLatitude, double targetLongitude) {
+ return (targetLatitude >= bottomRight.lat() && targetLatitude <= topLeft.lat()) &&
+ (targetLongitude >= topLeft.lon() || targetLongitude <= bottomRight.lon());
+ }
+
+ @Override
+ public GeoPoint topLeft() {
+ return topLeft;
+ }
+
+ @Override
+ public GeoPoint bottomRight() {
+ return bottomRight;
+ }
+ }
+
+ public static class SimpleDistanceBoundingCheck implements DistanceBoundingCheck {
+ private final GeoPoint topLeft;
+ private final GeoPoint bottomRight;
+
+ public SimpleDistanceBoundingCheck(GeoPoint topLeft, GeoPoint bottomRight) {
+ this.topLeft = topLeft;
+ this.bottomRight = bottomRight;
+ }
+
+ @Override
+ public boolean isWithin(double targetLatitude, double targetLongitude) {
+ return (targetLatitude >= bottomRight.lat() && targetLatitude <= topLeft.lat()) &&
+ (targetLongitude >= topLeft.lon() && targetLongitude <= bottomRight.lon());
+ }
+
+ @Override
+ public GeoPoint topLeft() {
+ return topLeft;
+ }
+
+ @Override
+ public GeoPoint bottomRight() {
+ return bottomRight;
+ }
+ }
+
+ public static class PlaneFixedSourceDistance implements FixedSourceDistance {
+
+ private final double sourceLatitude;
+ private final double sourceLongitude;
+ private final double distancePerDegree;
+
+ public PlaneFixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
+ this.sourceLatitude = sourceLatitude;
+ this.sourceLongitude = sourceLongitude;
+ this.distancePerDegree = unit.getDistancePerDegree();
+ }
+
+ @Override
+ public double calculate(double targetLatitude, double targetLongitude) {
+ double px = targetLongitude - sourceLongitude;
+ double py = targetLatitude - sourceLatitude;
+ return Math.sqrt(px * px + py * py) * distancePerDegree;
+ }
+ }
+
+ public static class FactorFixedSourceDistance implements FixedSourceDistance {
+
+ private final double sourceLongitude;
+
+ private final double a;
+ private final double sinA;
+ private final double cosA;
+
+ public FactorFixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
+ this.sourceLongitude = sourceLongitude;
+ this.a = Math.toRadians(90D - sourceLatitude);
+ this.sinA = Math.sin(a);
+ this.cosA = Math.cos(a);
+ }
+
+ @Override
+ public double calculate(double targetLatitude, double targetLongitude) {
+ double longitudeDifference = targetLongitude - sourceLongitude;
+ double c = Math.toRadians(90D - targetLatitude);
+ return (cosA * Math.cos(c)) + (sinA * Math.sin(c) * Math.cos(Math.toRadians(longitudeDifference)));
+ }
+ }
+
+ /**
+ * Basic implementation of {@link FixedSourceDistance}. This class keeps the basic parameters for a distance
+ * functions based on a fixed source. Namely latitude, longitude and unit.
+ */
+ public static abstract class FixedSourceDistanceBase implements FixedSourceDistance {
+ protected final double sourceLatitude;
+ protected final double sourceLongitude;
+ protected final DistanceUnit unit;
+
+ public FixedSourceDistanceBase(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
+ this.sourceLatitude = sourceLatitude;
+ this.sourceLongitude = sourceLongitude;
+ this.unit = unit;
+ }
+ }
+
+ public static class ArcFixedSourceDistance extends FixedSourceDistanceBase {
+
+ public ArcFixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
+ super(sourceLatitude, sourceLongitude, unit);
+ }
+
+ @Override
+ public double calculate(double targetLatitude, double targetLongitude) {
+ return ARC.calculate(sourceLatitude, sourceLongitude, targetLatitude, targetLongitude, unit);
+ }
+
+ }
+
+ public static class SloppyArcFixedSourceDistance extends FixedSourceDistanceBase {
+
+ public SloppyArcFixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
+ super(sourceLatitude, sourceLongitude, unit);
+ }
+
+ @Override
+ public double calculate(double targetLatitude, double targetLongitude) {
+ return SLOPPY_ARC.calculate(sourceLatitude, sourceLongitude, targetLatitude, targetLongitude, unit);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java
new file mode 100644
index 0000000..d24cfbf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java
@@ -0,0 +1,487 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+import java.util.ArrayList;
+import java.util.List;
+
+
+/**
+ * Utilities for encoding and decoding geohashes. Based on
+ * http://en.wikipedia.org/wiki/Geohash.
+ */
+// LUCENE MONITOR: monitor against spatial package
+// replaced with native DECODE_MAP
+public class GeoHashUtils {
+
+ private static final char[] BASE_32 = {'0', '1', '2', '3', '4', '5', '6',
+ '7', '8', '9', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'k', 'm', 'n',
+ 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'};
+
+ public static final int PRECISION = 12;
+ private static final int[] BITS = {16, 8, 4, 2, 1};
+
+ private GeoHashUtils() {
+ }
+
+ public static String encode(double latitude, double longitude) {
+ return encode(latitude, longitude, PRECISION);
+ }
+
+ /**
+ * Encodes the given latitude and longitude into a geohash
+ *
+ * @param latitude Latitude to encode
+ * @param longitude Longitude to encode
+ * @return Geohash encoding of the longitude and latitude
+ */
+ public static String encode(double latitude, double longitude, int precision) {
+// double[] latInterval = {-90.0, 90.0};
+// double[] lngInterval = {-180.0, 180.0};
+ double latInterval0 = -90.0;
+ double latInterval1 = 90.0;
+ double lngInterval0 = -180.0;
+ double lngInterval1 = 180.0;
+
+ final StringBuilder geohash = new StringBuilder();
+ boolean isEven = true;
+
+ int bit = 0;
+ int ch = 0;
+
+ while (geohash.length() < precision) {
+ double mid = 0.0;
+ if (isEven) {
+// mid = (lngInterval[0] + lngInterval[1]) / 2D;
+ mid = (lngInterval0 + lngInterval1) / 2D;
+ if (longitude > mid) {
+ ch |= BITS[bit];
+// lngInterval[0] = mid;
+ lngInterval0 = mid;
+ } else {
+// lngInterval[1] = mid;
+ lngInterval1 = mid;
+ }
+ } else {
+// mid = (latInterval[0] + latInterval[1]) / 2D;
+ mid = (latInterval0 + latInterval1) / 2D;
+ if (latitude > mid) {
+ ch |= BITS[bit];
+// latInterval[0] = mid;
+ latInterval0 = mid;
+ } else {
+// latInterval[1] = mid;
+ latInterval1 = mid;
+ }
+ }
+
+ isEven = !isEven;
+
+ if (bit < 4) {
+ bit++;
+ } else {
+ geohash.append(BASE_32[ch]);
+ bit = 0;
+ ch = 0;
+ }
+ }
+
+ return geohash.toString();
+ }
+
+ private static final char encode(int x, int y) {
+ return BASE_32[((x & 1) + ((y & 1) * 2) + ((x & 2) * 2) + ((y & 2) * 4) + ((x & 4) * 4)) % 32];
+ }
+
+ /**
+ * Calculate all neighbors of a given geohash cell.
+ *
+ * @param geohash Geohash of the defines cell
+ * @return geohashes of all neighbor cells
+ */
+ public static List<String> neighbors(String geohash) {
+ return addNeighbors(geohash, geohash.length(), new ArrayList<String>(8));
+ }
+
+ /**
+ * Calculate the geohash of a neighbor of a geohash
+ *
+ * @param geohash the geohash of a cell
+ * @param level level of the geohash
+ * @param dx delta of the first grid coordinate (must be -1, 0 or +1)
+ * @param dy delta of the second grid coordinate (must be -1, 0 or +1)
+ * @return geohash of the defined cell
+ */
+ private final static String neighbor(String geohash, int level, int dx, int dy) {
+ int cell = decode(geohash.charAt(level - 1));
+
+ // Decoding the Geohash bit pattern to determine grid coordinates
+ int x0 = cell & 1; // first bit of x
+ int y0 = cell & 2; // first bit of y
+ int x1 = cell & 4; // second bit of x
+ int y1 = cell & 8; // second bit of y
+ int x2 = cell & 16; // third bit of x
+
+ // combine the bitpattern to grid coordinates.
+ // note that the semantics of x and y are swapping
+ // on each level
+ int x = x0 + (x1 / 2) + (x2 / 4);
+ int y = (y0 / 2) + (y1 / 4);
+
+ if (level == 1) {
+ // Root cells at north (namely "bcfguvyz") or at
+ // south (namely "0145hjnp") do not have neighbors
+ // in north/south direction
+ if ((dy < 0 && y == 0) || (dy > 0 && y == 3)) {
+ return null;
+ } else {
+ return Character.toString(encode(x + dx, y + dy));
+ }
+ } else {
+ // define grid coordinates for next level
+ final int nx = ((level % 2) == 1) ? (x + dx) : (x + dy);
+ final int ny = ((level % 2) == 1) ? (y + dy) : (y + dx);
+
+ // define grid limits for current level
+ final int xLimit = ((level % 2) == 0) ? 7 : 3;
+ final int yLimit = ((level % 2) == 0) ? 3 : 7;
+
+ // if the defined neighbor has the same parent a the current cell
+ // encode the cell direcly. Otherwise find the cell next to this
+ // cell recursively. Since encoding wraps around within a cell
+ // it can be encoded here.
+ if (nx >= 0 && nx <= xLimit && ny >= 0 && ny < yLimit) {
+ return geohash.substring(0, level - 1) + encode(nx, ny);
+ } else {
+ String neighbor = neighbor(geohash, level - 1, dx, dy);
+ if(neighbor != null) {
+ return neighbor + encode(nx, ny);
+ } else {
+ return null;
+ }
+ }
+ }
+ }
+
+ /**
+ * Add all geohashes of the cells next to a given geohash to a list.
+ *
+ * @param geohash Geohash of a specified cell
+ * @param length level of the given geohash
+ * @param neighbors list to add the neighbors to
+ * @return the given list
+ */
+ private static final List<String> addNeighbors(String geohash, int length, List<String> neighbors) {
+ String south = neighbor(geohash, length, 0, -1);
+ String north = neighbor(geohash, length, 0, +1);
+
+ if (north != null) {
+ neighbors.add(neighbor(north, length, -1, 0));
+ neighbors.add(north);
+ neighbors.add(neighbor(north, length, +1, 0));
+ }
+
+ neighbors.add(neighbor(geohash, length, -1, 0));
+ neighbors.add(neighbor(geohash, length, +1, 0));
+
+ if (south != null) {
+ neighbors.add(neighbor(south, length, -1, 0));
+ neighbors.add(south);
+ neighbors.add(neighbor(south, length, +1, 0));
+ }
+
+ return neighbors;
+ }
+
+ private static final int decode(char geo) {
+ switch (geo) {
+ case '0':
+ return 0;
+ case '1':
+ return 1;
+ case '2':
+ return 2;
+ case '3':
+ return 3;
+ case '4':
+ return 4;
+ case '5':
+ return 5;
+ case '6':
+ return 6;
+ case '7':
+ return 7;
+ case '8':
+ return 8;
+ case '9':
+ return 9;
+ case 'b':
+ return 10;
+ case 'c':
+ return 11;
+ case 'd':
+ return 12;
+ case 'e':
+ return 13;
+ case 'f':
+ return 14;
+ case 'g':
+ return 15;
+ case 'h':
+ return 16;
+ case 'j':
+ return 17;
+ case 'k':
+ return 18;
+ case 'm':
+ return 19;
+ case 'n':
+ return 20;
+ case 'p':
+ return 21;
+ case 'q':
+ return 22;
+ case 'r':
+ return 23;
+ case 's':
+ return 24;
+ case 't':
+ return 25;
+ case 'u':
+ return 26;
+ case 'v':
+ return 27;
+ case 'w':
+ return 28;
+ case 'x':
+ return 29;
+ case 'y':
+ return 30;
+ case 'z':
+ return 31;
+ default:
+ throw new ElasticsearchIllegalArgumentException("the character '" + geo + "' is not a valid geohash character");
+ }
+ }
+
+ /**
+ * Decodes the given geohash
+ *
+ * @param geohash Geohash to decocde
+ * @return {@link GeoPoint} at the center of cell, given by the geohash
+ */
+ public static GeoPoint decode(String geohash) {
+ return decode(geohash, new GeoPoint());
+ }
+
+ /**
+ * Decodes the given geohash into a latitude and longitude
+ *
+ * @param geohash Geohash to decocde
+ * @return the given {@link GeoPoint} reseted to the center of
+ * cell, given by the geohash
+ */
+ public static GeoPoint decode(String geohash, GeoPoint ret) {
+ double[] interval = decodeCell(geohash);
+ return ret.reset((interval[0] + interval[1]) / 2D, (interval[2] + interval[3]) / 2D);
+ }
+
+ /**
+ * Decodes the given geohash into a geohash cell defined by the points nothWest and southEast
+ *
+ * @param geohash Geohash to deocde
+ * @param northWest the point north/west of the cell
+ * @param southEast the point south/east of the cell
+ */
+ public static void decodeCell(String geohash, GeoPoint northWest, GeoPoint southEast) {
+ double[] interval = decodeCell(geohash);
+ northWest.reset(interval[1], interval[2]);
+ southEast.reset(interval[0], interval[3]);
+ }
+
+ private static double[] decodeCell(String geohash) {
+ double[] interval = {-90.0, 90.0, -180.0, 180.0};
+ boolean isEven = true;
+
+ for (int i = 0; i < geohash.length(); i++) {
+ final int cd = decode(geohash.charAt(i));
+
+ for (int mask : BITS) {
+ if (isEven) {
+ if ((cd & mask) != 0) {
+ interval[2] = (interval[2] + interval[3]) / 2D;
+ } else {
+ interval[3] = (interval[2] + interval[3]) / 2D;
+ }
+ } else {
+ if ((cd & mask) != 0) {
+ interval[0] = (interval[0] + interval[1]) / 2D;
+ } else {
+ interval[1] = (interval[0] + interval[1]) / 2D;
+ }
+ }
+ isEven = !isEven;
+ }
+ }
+ return interval;
+ }
+
+ //========== long-based encodings for geohashes ========================================
+
+
+ /**
+ * Encodes latitude and longitude information into a single long with variable precision.
+ * Up to 12 levels of precision are supported which should offer sub-metre resolution.
+ *
+ * @param latitude
+ * @param longitude
+ * @param precision The required precision between 1 and 12
+ * @return A single long where 4 bits are used for holding the precision and the remaining
+ * 60 bits are reserved for 5 bit cell identifiers giving up to 12 layers.
+ */
+ public static long encodeAsLong(double latitude, double longitude, int precision) {
+ if((precision>12)||(precision<1))
+ {
+ throw new ElasticsearchIllegalArgumentException("Illegal precision length of "+precision+
+ ". Long-based geohashes only support precisions between 1 and 12");
+ }
+ double latInterval0 = -90.0;
+ double latInterval1 = 90.0;
+ double lngInterval0 = -180.0;
+ double lngInterval1 = 180.0;
+
+ long geohash = 0l;
+ boolean isEven = true;
+
+ int bit = 0;
+ int ch = 0;
+
+ int geohashLength=0;
+ while (geohashLength < precision) {
+ double mid = 0.0;
+ if (isEven) {
+ mid = (lngInterval0 + lngInterval1) / 2D;
+ if (longitude > mid) {
+ ch |= BITS[bit];
+ lngInterval0 = mid;
+ } else {
+ lngInterval1 = mid;
+ }
+ } else {
+ mid = (latInterval0 + latInterval1) / 2D;
+ if (latitude > mid) {
+ ch |= BITS[bit];
+ latInterval0 = mid;
+ } else {
+ latInterval1 = mid;
+ }
+ }
+
+ isEven = !isEven;
+
+ if (bit < 4) {
+ bit++;
+ } else {
+ geohashLength++;
+ geohash|=ch;
+ if(geohashLength<precision){
+ geohash<<=5;
+ }
+ bit = 0;
+ ch = 0;
+ }
+ }
+ geohash<<=4;
+ geohash|=precision;
+ return geohash;
+ }
+
+ /**
+ * Formats a geohash held as a long as a more conventional
+ * String-based geohash
+ * @param geohashAsLong a geohash encoded as a long
+ * @return A traditional base32-based String representation of a geohash
+ */
+ public static String toString(long geohashAsLong)
+ {
+ int precision= (int) (geohashAsLong&15);
+ char[] chars=new char[precision];
+ geohashAsLong>>=4;
+ for (int i = precision-1; i >=0 ; i--) {
+ chars[i]= BASE_32[(int) (geohashAsLong&31)];
+ geohashAsLong>>=5;
+ }
+ return new String(chars);
+ }
+
+
+
+ public static GeoPoint decode(long geohash) {
+ GeoPoint point = new GeoPoint();
+ decode(geohash, point);
+ return point;
+ }
+
+ /**
+ * Decodes the given long-format geohash into a latitude and longitude
+ *
+ * @param geohash long format Geohash to decode
+ * @param ret The Geopoint into which the latitude and longitude will be stored
+ */
+ public static void decode(long geohash, GeoPoint ret) {
+ double[] interval = decodeCell(geohash);
+ ret.reset((interval[0] + interval[1]) / 2D, (interval[2] + interval[3]) / 2D);
+
+ }
+
+ private static double[] decodeCell(long geohash) {
+ double[] interval = {-90.0, 90.0, -180.0, 180.0};
+ boolean isEven = true;
+
+ int precision= (int) (geohash&15);
+ geohash>>=4;
+ int[]cds=new int[precision];
+ for (int i = precision-1; i >=0 ; i--) {
+ cds[i] = (int) (geohash&31);
+ geohash>>=5;
+ }
+
+ for (int i = 0; i <cds.length ; i++) {
+ final int cd = cds[i];
+ for (int mask : BITS) {
+ if (isEven) {
+ if ((cd & mask) != 0) {
+ interval[2] = (interval[2] + interval[3]) / 2D;
+ } else {
+ interval[3] = (interval[2] + interval[3]) / 2D;
+ }
+ } else {
+ if ((cd & mask) != 0) {
+ interval[0] = (interval[0] + interval[1]) / 2D;
+ } else {
+ interval[1] = (interval[0] + interval[1]) / 2D;
+ }
+ }
+ isEven = !isEven;
+ }
+ }
+ return interval;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
new file mode 100644
index 0000000..e2243d1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class GeoPoint {
+
+ public static final String LATITUDE = GeoPointFieldMapper.Names.LAT;
+ public static final String LONGITUDE = GeoPointFieldMapper.Names.LON;
+ public static final String GEOHASH = GeoPointFieldMapper.Names.GEOHASH;
+
+ private double lat;
+ private double lon;
+
+ public GeoPoint() {
+ }
+
+ public GeoPoint(double lat, double lon) {
+ this.lat = lat;
+ this.lon = lon;
+ }
+
+ public GeoPoint reset(double lat, double lon) {
+ this.lat = lat;
+ this.lon = lon;
+ return this;
+ }
+
+ public GeoPoint resetLat(double lat) {
+ this.lat = lat;
+ return this;
+ }
+
+ public GeoPoint resetLon(double lon) {
+ this.lon = lon;
+ return this;
+ }
+
+ public GeoPoint resetFromString(String value) {
+ int comma = value.indexOf(',');
+ if (comma != -1) {
+ lat = Double.parseDouble(value.substring(0, comma).trim());
+ lon = Double.parseDouble(value.substring(comma + 1).trim());
+ } else {
+ resetFromGeoHash(value);
+ }
+ return this;
+ }
+
+ public GeoPoint resetFromGeoHash(String hash) {
+ GeoHashUtils.decode(hash, this);
+ return this;
+ }
+
+ void latlon(double lat, double lon) {
+ this.lat = lat;
+ this.lon = lon;
+ }
+
+ public final double lat() {
+ return this.lat;
+ }
+
+ public final double getLat() {
+ return this.lat;
+ }
+
+ public final double lon() {
+ return this.lon;
+ }
+
+ public final double getLon() {
+ return this.lon;
+ }
+
+ public final String geohash() {
+ return GeoHashUtils.encode(lat, lon);
+ }
+
+ public final String getGeohash() {
+ return GeoHashUtils.encode(lat, lon);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ GeoPoint geoPoint = (GeoPoint) o;
+
+ if (Double.compare(geoPoint.lat, lat) != 0) return false;
+ if (Double.compare(geoPoint.lon, lon) != 0) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result;
+ long temp;
+ temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L;
+ result = (int) (temp ^ (temp >>> 32));
+ temp = lon != +0.0d ? Double.doubleToLongBits(lon) : 0L;
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ return result;
+ }
+
+ public String toString() {
+ return "[" + lat + ", " + lon + "]";
+ }
+
+ public static GeoPoint parseFromLatLon(String latLon) {
+ GeoPoint point = new GeoPoint();
+ point.resetFromString(latLon);
+ return point;
+ }
+
+ /**
+ * Parse a {@link GeoPoint} with a {@link XContentParser}:
+ *
+ * @param parser {@link XContentParser} to parse the value from
+ * @return new {@link GeoPoint} parsed from the parse
+ *
+ * @throws IOException
+ * @throws org.elasticsearch.ElasticsearchParseException
+ */
+ public static GeoPoint parse(XContentParser parser) throws IOException, ElasticsearchParseException {
+ return parse(parser, new GeoPoint());
+ }
+
+ /**
+ * Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms:
+ *
+ * <ul>
+ * <li>Object: <pre>{&quot;lat&quot;: <i>&lt;latitude&gt;</i>, &quot;lon&quot;: <i>&lt;longitude&gt;</i>}</pre></li>
+ * <li>String: <pre>&quot;<i>&lt;latitude&gt;</i>,<i>&lt;longitude&gt;</i>&quot;</pre></li>
+ * <li>Geohash: <pre>&quot;<i>&lt;geohash&gt;</i>&quot;</pre></li>
+ * <li>Array: <pre>[<i>&lt;longitude&gt;</i>,<i>&lt;latitude&gt;</i>]</pre></li>
+ * </ul>
+ *
+ * @param parser {@link XContentParser} to parse the value from
+ * @param point A {@link GeoPoint} that will be reset by the values parsed
+ * @return new {@link GeoPoint} parsed from the parse
+ *
+ * @throws IOException
+ * @throws org.elasticsearch.ElasticsearchParseException
+ */
+ public static GeoPoint parse(XContentParser parser, GeoPoint point) throws IOException, ElasticsearchParseException {
+ if(parser.currentToken() == Token.START_OBJECT) {
+ while(parser.nextToken() != Token.END_OBJECT) {
+ if(parser.currentToken() == Token.FIELD_NAME) {
+ String field = parser.text();
+ if(LATITUDE.equals(field)) {
+ parser.nextToken();
+ switch (parser.currentToken()) {
+ case VALUE_NUMBER:
+ case VALUE_STRING:
+ point.resetLat(parser.doubleValue(true));
+ break;
+ default:
+ throw new ElasticsearchParseException("latitude must be a number");
+ }
+ } else if (LONGITUDE.equals(field)) {
+ parser.nextToken();
+ switch (parser.currentToken()) {
+ case VALUE_NUMBER:
+ case VALUE_STRING:
+ point.resetLon(parser.doubleValue(true));
+ break;
+ default:
+ throw new ElasticsearchParseException("longitude must be a number");
+ }
+ } else if (GEOHASH.equals(field)) {
+ if(parser.nextToken() == Token.VALUE_STRING) {
+ point.resetFromGeoHash(parser.text());
+ } else {
+ throw new ElasticsearchParseException("geohash must be a string");
+ }
+ } else {
+ throw new ElasticsearchParseException("field must be either '" + LATITUDE + "', '" + LONGITUDE + "' or '" + GEOHASH + "'");
+ }
+ } else {
+ throw new ElasticsearchParseException("Token '"+parser.currentToken()+"' not allowed");
+ }
+ }
+ return point;
+ } else if(parser.currentToken() == Token.START_ARRAY) {
+ int element = 0;
+ while(parser.nextToken() != Token.END_ARRAY) {
+ if(parser.currentToken() == Token.VALUE_NUMBER) {
+ element++;
+ if(element == 1) {
+ point.resetLon(parser.doubleValue());
+ } else if(element == 2) {
+ point.resetLat(parser.doubleValue());
+ } else {
+ throw new ElasticsearchParseException("only two values allowed");
+ }
+ } else {
+ throw new ElasticsearchParseException("Numeric value expected");
+ }
+ }
+ return point;
+ } else if(parser.currentToken() == Token.VALUE_STRING) {
+ String data = parser.text();
+ int comma = data.indexOf(',');
+ if(comma > 0) {
+ double lat = Double.parseDouble(data.substring(0, comma).trim());
+ double lon = Double.parseDouble(data.substring(comma + 1).trim());
+ return point.reset(lat, lon);
+ } else {
+ point.resetFromGeoHash(data);
+ return point;
+ }
+ } else {
+ throw new ElasticsearchParseException("geo_point expected");
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/src/main/java/org/elasticsearch/common/geo/GeoUtils.java
new file mode 100644
index 0000000..2efc6eb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/GeoUtils.java
@@ -0,0 +1,288 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.elasticsearch.common.unit.DistanceUnit;
+
+/**
+ */
+public class GeoUtils {
+
+ /** Earth ellipsoid major axis defined by WGS 84 in meters */
+ public static final double EARTH_SEMI_MAJOR_AXIS = 6378137.0; // meters (WGS 84)
+
+ /** Earth ellipsoid minor axis defined by WGS 84 in meters */
+ public static final double EARTH_SEMI_MINOR_AXIS = 6356752.314245; // meters (WGS 84)
+
+ /** Earth mean radius defined by WGS 84 in meters */
+ public static final double EARTH_MEAN_RADIUS = 6371008.7714D; // meters (WGS 84)
+
+ /** Earth axis ratio defined by WGS 84 (0.996647189335) */
+ public static final double EARTH_AXIS_RATIO = EARTH_SEMI_MINOR_AXIS / EARTH_SEMI_MAJOR_AXIS;
+
+ /** Earth ellipsoid equator length in meters */
+ public static final double EARTH_EQUATOR = 2*Math.PI * EARTH_SEMI_MAJOR_AXIS;
+
+ /** Earth ellipsoid polar distance in meters */
+ public static final double EARTH_POLAR_DISTANCE = Math.PI * EARTH_SEMI_MINOR_AXIS;
+
+ /**
+ * Calculate the width (in meters) of geohash cells at a specific level
+ * @param level geohash level must be greater or equal to zero
+ * @return the width of cells at level in meters
+ */
+ public static double geoHashCellWidth(int level) {
+ assert level>=0;
+ // Geohash cells are split into 32 cells at each level. the grid
+ // alternates at each level between a 8x4 and a 4x8 grid
+ return EARTH_EQUATOR / (1L<<((((level+1)/2)*3) + ((level/2)*2)));
+ }
+
+ /**
+ * Calculate the width (in meters) of quadtree cells at a specific level
+ * @param level quadtree level must be greater or equal to zero
+ * @return the width of cells at level in meters
+ */
+ public static double quadTreeCellWidth(int level) {
+ assert level >=0;
+ return EARTH_EQUATOR / (1L<<level);
+ }
+
+ /**
+ * Calculate the height (in meters) of geohash cells at a specific level
+ * @param level geohash level must be greater or equal to zero
+ * @return the height of cells at level in meters
+ */
+ public static double geoHashCellHeight(int level) {
+ assert level>=0;
+ // Geohash cells are split into 32 cells at each level. the grid
+ // alternates at each level between a 8x4 and a 4x8 grid
+ return EARTH_POLAR_DISTANCE / (1L<<((((level+1)/2)*2) + ((level/2)*3)));
+ }
+
+ /**
+ * Calculate the height (in meters) of quadtree cells at a specific level
+ * @param level quadtree level must be greater or equal to zero
+ * @return the height of cells at level in meters
+ */
+ public static double quadTreeCellHeight(int level) {
+ assert level>=0;
+ return EARTH_POLAR_DISTANCE / (1L<<level);
+ }
+
+ /**
+ * Calculate the size (in meters) of geohash cells at a specific level
+ * @param level geohash level must be greater or equal to zero
+ * @return the size of cells at level in meters
+ */
+ public static double geoHashCellSize(int level) {
+ assert level>=0;
+ final double w = geoHashCellWidth(level);
+ final double h = geoHashCellHeight(level);
+ return Math.sqrt(w*w + h*h);
+ }
+
+ /**
+ * Calculate the size (in meters) of quadtree cells at a specific level
+ * @param level quadtree level must be greater or equal to zero
+ * @return the size of cells at level in meters
+ */
+ public static double quadTreeCellSize(int level) {
+ assert level>=0;
+ return Math.sqrt(EARTH_POLAR_DISTANCE*EARTH_POLAR_DISTANCE + EARTH_EQUATOR*EARTH_EQUATOR) / (1L<<level);
+ }
+
+ /**
+ * Calculate the number of levels needed for a specific precision. Quadtree
+ * cells will not exceed the specified size (diagonal) of the precision.
+ * @param meters Maximum size of cells in meters (must greater than zero)
+ * @return levels need to achieve precision
+ */
+ public static int quadTreeLevelsForPrecision(double meters) {
+ assert meters >= 0;
+ if(meters == 0) {
+ return QuadPrefixTree.MAX_LEVELS_POSSIBLE;
+ } else {
+ final double ratio = 1+(EARTH_POLAR_DISTANCE / EARTH_EQUATOR); // cell ratio
+ final double width = Math.sqrt((meters*meters)/(ratio*ratio)); // convert to cell width
+ final long part = Math.round(Math.ceil(EARTH_EQUATOR / width));
+ final int level = Long.SIZE - Long.numberOfLeadingZeros(part)-1; // (log_2)
+ return (part<=(1l<<level)) ?level :(level+1); // adjust level
+ }
+ }
+
+ /**
+ * Calculate the number of levels needed for a specific precision. QuadTree
+ * cells will not exceed the specified size (diagonal) of the precision.
+ * @param distance Maximum size of cells as unit string (must greater or equal to zero)
+ * @return levels need to achieve precision
+ */
+ public static int quadTreeLevelsForPrecision(String distance) {
+ return quadTreeLevelsForPrecision(DistanceUnit.METERS.parse(distance, DistanceUnit.DEFAULT));
+ }
+
+ /**
+ * Calculate the number of levels needed for a specific precision. GeoHash
+ * cells will not exceed the specified size (diagonal) of the precision.
+ * @param meters Maximum size of cells in meters (must greater or equal to zero)
+ * @return levels need to achieve precision
+ */
+ public static int geoHashLevelsForPrecision(double meters) {
+ assert meters >= 0;
+
+ if(meters == 0) {
+ return GeohashPrefixTree.getMaxLevelsPossible();
+ } else {
+ final double ratio = 1+(EARTH_POLAR_DISTANCE / EARTH_EQUATOR); // cell ratio
+ final double width = Math.sqrt((meters*meters)/(ratio*ratio)); // convert to cell width
+ final double part = Math.ceil(EARTH_EQUATOR / width);
+ if(part == 1)
+ return 1;
+ final int bits = (int)Math.round(Math.ceil(Math.log(part) / Math.log(2)));
+ final int full = bits / 5; // number of 5 bit subdivisions
+ final int left = bits - full*5; // bit representing the last level
+ final int even = full + (left>0?1:0); // number of even levels
+ final int odd = full + (left>3?1:0); // number of odd levels
+ return even+odd;
+ }
+ }
+
+ /**
+ * Calculate the number of levels needed for a specific precision. GeoHash
+ * cells will not exceed the specified size (diagonal) of the precision.
+ * @param distance Maximum size of cells as unit string (must greater or equal to zero)
+ * @return levels need to achieve precision
+ */
+ public static int geoHashLevelsForPrecision(String distance) {
+ return geoHashLevelsForPrecision(DistanceUnit.METERS.parse(distance, DistanceUnit.DEFAULT));
+ }
+
+ /**
+ * Normalize longitude to lie within the -180 (exclusive) to 180 (inclusive) range.
+ *
+ * @param lon Longitude to normalize
+ * @return The normalized longitude.
+ */
+ public static double normalizeLon(double lon) {
+ return centeredModulus(lon, 360);
+ }
+
+ /**
+ * Normalize latitude to lie within the -90 to 90 (both inclusive) range.
+ * <p/>
+ * Note: You should not normalize longitude and latitude separately,
+ * because when normalizing latitude it may be necessary to
+ * add a shift of 180&deg; in the longitude.
+ * For this purpose, you should call the
+ * {@link #normalizePoint(GeoPoint)} function.
+ *
+ * @param lat Latitude to normalize
+ * @return The normalized latitude.
+ * @see #normalizePoint(GeoPoint)
+ */
+ public static double normalizeLat(double lat) {
+ lat = centeredModulus(lat, 360);
+ if (lat < -90) {
+ lat = -180 - lat;
+ } else if (lat > 90) {
+ lat = 180 - lat;
+ }
+ return lat;
+ }
+
+ /**
+ * Normalize the geo {@code Point} for its coordinates to lie within their
+ * respective normalized ranges.
+ * <p/>
+ * Note: A shift of 180&deg; is applied in the longitude if necessary,
+ * in order to normalize properly the latitude.
+ *
+ * @param point The point to normalize in-place.
+ */
+ public static void normalizePoint(GeoPoint point) {
+ normalizePoint(point, true, true);
+ }
+
+ /**
+ * Normalize the geo {@code Point} for the given coordinates to lie within
+ * their respective normalized ranges.
+ * <p/>
+ * You can control which coordinate gets normalized with the two flags.
+ * <p/>
+ * Note: A shift of 180&deg; is applied in the longitude if necessary,
+ * in order to normalize properly the latitude.
+ * If normalizing latitude but not longitude, it is assumed that
+ * the longitude is in the form x+k*360, with x in ]-180;180],
+ * and k is meaningful to the application.
+ * Therefore x will be adjusted while keeping k preserved.
+ *
+ * @param point The point to normalize in-place.
+ * @param normLat Whether to normalize latitude or leave it as is.
+ * @param normLon Whether to normalize longitude.
+ */
+ public static void normalizePoint(GeoPoint point, boolean normLat, boolean normLon) {
+ double lat = point.lat();
+ double lon = point.lon();
+
+ normLat = normLat && (lat>90 || lat <= -90);
+ normLon = normLon && (lon>180 || lon <= -180);
+
+ if (normLat) {
+ lat = centeredModulus(lat, 360);
+ boolean shift = true;
+ if (lat < -90) {
+ lat = -180 - lat;
+ } else if (lat > 90) {
+ lat = 180 - lat;
+ } else {
+ // No need to shift the longitude, and the latitude is normalized
+ shift = false;
+ }
+ if (shift) {
+ if (normLon) {
+ lon += 180;
+ } else {
+ // Longitude won't be normalized,
+ // keep it in the form x+k*360 (with x in ]-180;180])
+ // by only changing x, assuming k is meaningful for the user application.
+ lon += normalizeLon(lon) > 0 ? -180 : 180;
+ }
+ }
+ }
+ if (normLon) {
+ lon = centeredModulus(lon, 360);
+ }
+ point.reset(lat, lon);
+ }
+
+ private static double centeredModulus(double dividend, double divisor) {
+ double rtn = dividend % divisor;
+ if (rtn <= 0) {
+ rtn += divisor;
+ }
+ if (rtn > divisor / 2) {
+ rtn -= divisor;
+ }
+ return rtn;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java b/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java
new file mode 100644
index 0000000..6ee1693
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import java.util.Locale;
+
+/**
+ * Enum representing the relationship between a Query / Filter Shape and indexed Shapes
+ * that will be used to determine if a Document should be matched or not
+ */
+public enum ShapeRelation {
+
+ INTERSECTS("intersects"),
+ DISJOINT("disjoint"),
+ WITHIN("within");
+
+ private final String relationName;
+
+ ShapeRelation(String relationName) {
+ this.relationName = relationName;
+ }
+
+ public static ShapeRelation getRelationByName(String name) {
+ name = name.toLowerCase(Locale.ENGLISH);
+ for (ShapeRelation relation : ShapeRelation.values()) {
+ if (relation.relationName.equals(name)) {
+ return relation;
+ }
+ }
+ return null;
+ }
+
+ public String getRelationName() {
+ return relationName;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java b/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java
new file mode 100644
index 0000000..882bfcb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import org.elasticsearch.common.Classes;
+
+/**
+ */
+public class ShapesAvailability {
+
+ public static final boolean SPATIAL4J_AVAILABLE;
+ public static final boolean JTS_AVAILABLE;
+
+ static {
+ boolean xSPATIAL4J_AVAILABLE;
+ try {
+ Classes.getDefaultClassLoader().loadClass("com.spatial4j.core.shape.impl.PointImpl");
+ xSPATIAL4J_AVAILABLE = true;
+ } catch (Throwable t) {
+ xSPATIAL4J_AVAILABLE = false;
+ }
+ SPATIAL4J_AVAILABLE = xSPATIAL4J_AVAILABLE;
+
+ boolean xJTS_AVAILABLE;
+ try {
+ Classes.getDefaultClassLoader().loadClass("com.vividsolutions.jts.geom.GeometryFactory");
+ xJTS_AVAILABLE = true;
+ } catch (Throwable t) {
+ xJTS_AVAILABLE = false;
+ }
+ JTS_AVAILABLE = xJTS_AVAILABLE;
+ }
+
+
+ private ShapesAvailability() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java b/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java
new file mode 100644
index 0000000..02ee8a7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.geo;
+
+import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.TermQueryPrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
+
+import java.lang.String;
+
+/**
+ *
+ */
+public enum SpatialStrategy {
+
+ TERM("term"),
+ RECURSIVE("recursive");
+
+ private final String strategyName;
+
+ private SpatialStrategy(String strategyName) {
+ this.strategyName = strategyName;
+ }
+
+ public String getStrategyName() {
+ return strategyName;
+ }
+
+ public PrefixTreeStrategy create(SpatialPrefixTree grid, String fieldName) {
+ if (this == TERM) {
+ return new TermQueryPrefixTreeStrategy(grid, fieldName);
+ }
+ return new RecursivePrefixTreeStrategy(grid, fieldName);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/BaseLineStringBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/BaseLineStringBuilder.java
new file mode 100644
index 0000000..090623b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/BaseLineStringBuilder.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.Geometry;
+import com.vividsolutions.jts.geom.GeometryFactory;
+import com.vividsolutions.jts.geom.LineString;
+
+public abstract class BaseLineStringBuilder<E extends BaseLineStringBuilder<E>> extends PointCollection<E> {
+
+ protected BaseLineStringBuilder() {
+ this(new ArrayList<Coordinate>());
+ }
+
+ protected BaseLineStringBuilder(ArrayList<Coordinate> points) {
+ super(points);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return coordinatesToXcontent(builder, false);
+ }
+
+ @Override
+ public Shape build() {
+ Coordinate[] coordinates = points.toArray(new Coordinate[points.size()]);
+ Geometry geometry;
+ if(wrapdateline) {
+ ArrayList<LineString> strings = decompose(FACTORY, coordinates, new ArrayList<LineString>());
+
+ if(strings.size() == 1) {
+ geometry = strings.get(0);
+ } else {
+ LineString[] linestrings = strings.toArray(new LineString[strings.size()]);
+ geometry = FACTORY.createMultiLineString(linestrings);
+ }
+
+ } else {
+ geometry = FACTORY.createLineString(coordinates);
+ }
+ return new JtsGeometry(geometry, SPATIAL_CONTEXT, !wrapdateline);
+ }
+
+ protected static ArrayList<LineString> decompose(GeometryFactory factory, Coordinate[] coordinates, ArrayList<LineString> strings) {
+ for(Coordinate[] part : decompose(+DATELINE, coordinates)) {
+ for(Coordinate[] line : decompose(-DATELINE, part)) {
+ strings.add(factory.createLineString(line));
+ }
+ }
+ return strings;
+ }
+
+ /**
+ * Decompose a linestring given as array of coordinates at a vertical line.
+ *
+ * @param dateline x-axis intercept of the vertical line
+ * @param coordinates coordinates forming the linestring
+ * @return array of linestrings given as coordinate arrays
+ */
+ protected static Coordinate[][] decompose(double dateline, Coordinate[] coordinates) {
+ int offset = 0;
+ ArrayList<Coordinate[]> parts = new ArrayList<Coordinate[]>();
+
+ double shift = coordinates[0].x > DATELINE ? DATELINE : (coordinates[0].x < -DATELINE ? -DATELINE : 0);
+
+ for (int i = 1; i < coordinates.length; i++) {
+ double t = intersection(coordinates[i-1], coordinates[i], dateline);
+ if(!Double.isNaN(t)) {
+ Coordinate[] part;
+ if(t<1) {
+ part = Arrays.copyOfRange(coordinates, offset, i+1);
+ part[part.length-1] = Edge.position(coordinates[i-1], coordinates[i], t);
+ coordinates[offset+i-1] = Edge.position(coordinates[i-1], coordinates[i], t);
+ shift(shift, part);
+ offset = i-1;
+ shift = coordinates[i].x > DATELINE ? DATELINE : (coordinates[i].x < -DATELINE ? -DATELINE : 0);
+ } else {
+ part = shift(shift, Arrays.copyOfRange(coordinates, offset, i+1));
+ offset = i;
+ }
+ parts.add(part);
+ }
+ }
+
+ if(offset == 0) {
+ parts.add(shift(shift, coordinates));
+ } else if(offset < coordinates.length-1) {
+ Coordinate[] part = Arrays.copyOfRange(coordinates, offset, coordinates.length);
+ parts.add(shift(shift, part));
+ }
+ return parts.toArray(new Coordinate[parts.size()][]);
+ }
+
+ private static Coordinate[] shift(double shift, Coordinate...coordinates) {
+ if(shift != 0) {
+ for (int j = 0; j < coordinates.length; j++) {
+ coordinates[j] = new Coordinate(coordinates[j].x - 2 * shift, coordinates[j].y);
+ }
+ }
+ return coordinates;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/BasePolygonBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/BasePolygonBuilder.java
new file mode 100644
index 0000000..7570f19
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/BasePolygonBuilder.java
@@ -0,0 +1,475 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.Geometry;
+import com.vividsolutions.jts.geom.GeometryFactory;
+import com.vividsolutions.jts.geom.LinearRing;
+import com.vividsolutions.jts.geom.MultiPolygon;
+import com.vividsolutions.jts.geom.Polygon;
+
+/**
+ * The {@link BasePolygonBuilder} implements the groundwork to create polygons. This contains
+ * Methods to wrap polygons at the dateline and building shapes from the data held by the
+ * builder.
+ * Since this Builder can be embedded to other builders (i.e. {@link MultiPolygonBuilder})
+ * the class of the embedding builder is given by the generic argument <code>E</code>
+
+ * @param <E> type of the embedding class
+ */
+public abstract class BasePolygonBuilder<E extends BasePolygonBuilder<E>> extends ShapeBuilder {
+
+ public static final GeoShapeType TYPE = GeoShapeType.POLYGON;
+
+ // Linear ring defining the shell of the polygon
+ protected Ring<E> shell;
+
+ // List of linear rings defining the holes of the polygon
+ protected final ArrayList<BaseLineStringBuilder<?>> holes = new ArrayList<BaseLineStringBuilder<?>>();
+
+ @SuppressWarnings("unchecked")
+ private E thisRef() {
+ return (E)this;
+ }
+
+ public E point(double longitude, double latitude) {
+ shell.point(longitude, latitude);
+ return thisRef();
+ }
+
+ /**
+ * Add a point to the shell of the polygon
+ * @param coordinate coordinate of the new point
+ * @return this
+ */
+ public E point(Coordinate coordinate) {
+ shell.point(coordinate);
+ return thisRef();
+ }
+
+ /**
+ * Add a array of points to the shell of the polygon
+ * @param coordinates coordinates of the new points to add
+ * @return this
+ */
+ public E points(Coordinate...coordinates) {
+ shell.points(coordinates);
+ return thisRef();
+ }
+
+ /**
+ * Add a new hole to the polygon
+ * @param hole linear ring defining the hole
+ * @return this
+ */
+ public E hole(BaseLineStringBuilder<?> hole) {
+ holes.add(hole);
+ return thisRef();
+ }
+
+ /**
+ * build new hole to the polygon
+ * @param hole linear ring defining the hole
+ * @return this
+ */
+ public Ring<E> hole() {
+ Ring<E> hole = new Ring<E>(thisRef());
+ this.holes.add(hole);
+ return hole;
+ }
+
+ /**
+ * Close the shell of the polygon
+ * @return parent
+ */
+ public ShapeBuilder close() {
+ return shell.close();
+ }
+
+ /**
+ * The coordinates setup by the builder will be assembled to a polygon. The result will consist of
+ * a set of polygons. Each of these components holds a list of linestrings defining the polygon: the
+ * first set of coordinates will be used as the shell of the polygon. The others are defined to holes
+ * within the polygon.
+ * This Method also wraps the polygons at the dateline. In order to this fact the result may
+ * contains more polygons and less holes than defined in the builder it self.
+ *
+ * @return coordinates of the polygon
+ */
+ public Coordinate[][][] coordinates() {
+ int numEdges = shell.points.size()-1; // Last point is repeated
+ for (int i = 0; i < holes.size(); i++) {
+ numEdges += holes.get(i).points.size()-1;
+ }
+
+ Edge[] edges = new Edge[numEdges];
+ Edge[] holeComponents = new Edge[holes.size()];
+
+ int offset = createEdges(0, true, shell, edges, 0);
+ for (int i = 0; i < holes.size(); i++) {
+ int length = createEdges(i+1, false, this.holes.get(i), edges, offset);
+ holeComponents[i] = edges[offset];
+ offset += length;
+ }
+
+ int numHoles = holeComponents.length;
+
+ numHoles = merge(edges, 0, intersections(+DATELINE, edges), holeComponents, numHoles);
+ numHoles = merge(edges, 0, intersections(-DATELINE, edges), holeComponents, numHoles);
+
+ return compose(edges, holeComponents, numHoles);
+ }
+
+ @Override
+ public Shape build() {
+ Geometry geometry = buildGeometry(FACTORY, wrapdateline);
+ return new JtsGeometry(geometry, SPATIAL_CONTEXT, !wrapdateline);
+ }
+
+ protected XContentBuilder coordinatesArray(XContentBuilder builder, Params params) throws IOException {
+ shell.coordinatesToXcontent(builder, true);
+ for(BaseLineStringBuilder<?> hole : holes) {
+ hole.coordinatesToXcontent(builder, true);
+ }
+ return builder;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(FIELD_TYPE, TYPE.shapename);
+ builder.startArray(FIELD_COORDINATES);
+ coordinatesArray(builder, params);
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public Geometry buildGeometry(GeometryFactory factory, boolean fixDateline) {
+ if(fixDateline) {
+ Coordinate[][][] polygons = coordinates();
+ return polygons.length == 1
+ ? polygon(factory, polygons[0])
+ : multipolygon(factory, polygons);
+ } else {
+ return toPolygon(factory);
+ }
+ }
+
+ public Polygon toPolygon() {
+ return toPolygon(FACTORY);
+ }
+
+ protected Polygon toPolygon(GeometryFactory factory) {
+ final LinearRing shell = linearRing(factory, this.shell.points);
+ final LinearRing[] holes = new LinearRing[this.holes.size()];
+ Iterator<BaseLineStringBuilder<?>> iterator = this.holes.iterator();
+ for (int i = 0; iterator.hasNext(); i++) {
+ holes[i] = linearRing(factory, iterator.next().points);
+ }
+ return factory.createPolygon(shell, holes);
+ }
+
+ protected static LinearRing linearRing(GeometryFactory factory, ArrayList<Coordinate> coordinates) {
+ return factory.createLinearRing(coordinates.toArray(new Coordinate[coordinates.size()]));
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return TYPE;
+ }
+
+ protected static Polygon polygon(GeometryFactory factory, Coordinate[][] polygon) {
+ LinearRing shell = factory.createLinearRing(polygon[0]);
+ LinearRing[] holes;
+
+ if(polygon.length > 1) {
+ holes = new LinearRing[polygon.length-1];
+ for (int i = 0; i < holes.length; i++) {
+ holes[i] = factory.createLinearRing(polygon[i+1]);
+ }
+ } else {
+ holes = null;
+ }
+ return factory.createPolygon(shell, holes);
+ }
+
+ /**
+ * Create a Multipolygon from a set of coordinates. Each primary array contains a polygon which
+ * in turn contains an array of linestrings. These line Strings are represented as an array of
+ * coordinates. The first linestring will be the shell of the polygon the others define holes
+ * within the polygon.
+ *
+ * @param factory {@link GeometryFactory} to use
+ * @param polygons definition of polygons
+ * @return a new Multipolygon
+ */
+ protected static MultiPolygon multipolygon(GeometryFactory factory, Coordinate[][][] polygons) {
+ Polygon[] polygonSet = new Polygon[polygons.length];
+ for (int i = 0; i < polygonSet.length; i++) {
+ polygonSet[i] = polygon(factory, polygons[i]);
+ }
+ return factory.createMultiPolygon(polygonSet);
+ }
+
+ /**
+ * This method sets the component id of all edges in a ring to a given id and shifts the
+ * coordinates of this component according to the dateline
+ *
+ * @param edge An arbitrary edge of the component
+ * @param id id to apply to the component
+ * @param edges a list of edges to which all edges of the component will be added (could be <code>null</code>)
+ * @return number of edges that belong to this component
+ */
+ private static int component(final Edge edge, final int id, final ArrayList<Edge> edges) {
+ // find a coordinate that is not part of the dateline
+ Edge any = edge;
+ while(any.coordinate.x == +DATELINE || any.coordinate.x == -DATELINE) {
+ if((any = any.next) == edge) {
+ break;
+ }
+ }
+
+ double shift = any.coordinate.x > DATELINE ? DATELINE : (any.coordinate.x < -DATELINE ? -DATELINE : 0);
+ if (debugEnabled()) {
+ LOGGER.debug("shift: {[]}", shift);
+ }
+
+ // run along the border of the component, collect the
+ // edges, shift them according to the dateline and
+ // update the component id
+ int length = 0;
+ Edge current = edge;
+ do {
+
+ current.coordinate = shift(current.coordinate, shift);
+ current.component = id;
+ if(edges != null) {
+ edges.add(current);
+ }
+
+ length++;
+ } while((current = current.next) != edge);
+
+ return length;
+ }
+
+ /**
+ * Compute all coordinates of a component
+ * @param component an arbitrary edge of the component
+ * @param coordinates Array of coordinates to write the result to
+ * @return the coordinates parameter
+ */
+ private static Coordinate[] coordinates(Edge component, Coordinate[] coordinates) {
+ for (int i = 0; i < coordinates.length; i++) {
+ coordinates[i] = (component = component.next).coordinate;
+ }
+ return coordinates;
+ }
+
+ private static Coordinate[][][] buildCoordinates(ArrayList<ArrayList<Coordinate[]>> components) {
+ Coordinate[][][] result = new Coordinate[components.size()][][];
+ for (int i = 0; i < result.length; i++) {
+ ArrayList<Coordinate[]> component = components.get(i);
+ result[i] = component.toArray(new Coordinate[component.size()][]);
+ }
+
+ if(debugEnabled()) {
+ for (int i = 0; i < result.length; i++) {
+ LOGGER.debug("Component {[]}:", i);
+ for (int j = 0; j < result[i].length; j++) {
+ LOGGER.debug("\t" + Arrays.toString(result[i][j]));
+ }
+ }
+ }
+
+ return result;
+ }
+
+ private static final Coordinate[][] EMPTY = new Coordinate[0][];
+
+ private static Coordinate[][] holes(Edge[] holes, int numHoles) {
+ if (numHoles == 0) {
+ return EMPTY;
+ }
+ final Coordinate[][] points = new Coordinate[numHoles][];
+
+ for (int i = 0; i < numHoles; i++) {
+ int length = component(holes[i], -(i+1), null); // mark as visited by inverting the sign
+ points[i] = coordinates(holes[i], new Coordinate[length+1]);
+ }
+
+ return points;
+ }
+
+ private static Edge[] edges(Edge[] edges, int numHoles, ArrayList<ArrayList<Coordinate[]>> components) {
+ ArrayList<Edge> mainEdges = new ArrayList<Edge>(edges.length);
+
+ for (int i = 0; i < edges.length; i++) {
+ if (edges[i].component >= 0) {
+ int length = component(edges[i], -(components.size()+numHoles+1), mainEdges);
+ ArrayList<Coordinate[]> component = new ArrayList<Coordinate[]>();
+ component.add(coordinates(edges[i], new Coordinate[length+1]));
+ components.add(component);
+ }
+ }
+
+ return mainEdges.toArray(new Edge[mainEdges.size()]);
+ }
+
+ private static Coordinate[][][] compose(Edge[] edges, Edge[] holes, int numHoles) {
+ final ArrayList<ArrayList<Coordinate[]>> components = new ArrayList<ArrayList<Coordinate[]>>();
+ assign(holes, holes(holes, numHoles), numHoles, edges(edges, numHoles, components), components);
+ return buildCoordinates(components);
+ }
+
+ private static void assign(Edge[] holes, Coordinate[][] points, int numHoles, Edge[] edges, ArrayList<ArrayList<Coordinate[]>> components) {
+ // Assign Hole to related components
+ // To find the new component the hole belongs to all intersections of the
+ // polygon edges with a vertical line are calculated. This vertical line
+ // is an arbitrary point of the hole. The polygon edge next to this point
+ // is part of the polygon the hole belongs to.
+ if (debugEnabled()) {
+ LOGGER.debug("Holes: " + Arrays.toString(holes));
+ }
+ for (int i = 0; i < numHoles; i++) {
+ final Edge current = holes[i];
+ final int intersections = intersections(current.coordinate.x, edges);
+ final int pos = Arrays.binarySearch(edges, 0, intersections, current, INTERSECTION_ORDER);
+ assert pos < 0 : "illegal state: two edges cross the datum at the same position";
+ final int index = -(pos+2);
+ final int component = -edges[index].component - numHoles - 1;
+
+ if(debugEnabled()) {
+ LOGGER.debug("\tposition ("+index+") of edge "+current+": " + edges[index]);
+ LOGGER.debug("\tComponent: " + component);
+ LOGGER.debug("\tHole intersections ("+current.coordinate.x+"): " + Arrays.toString(edges));
+ }
+
+ components.get(component).add(points[i]);
+ }
+ }
+
+ private static int merge(Edge[] intersections, int offset, int length, Edge[] holes, int numHoles) {
+ // Intersections appear pairwise. On the first edge the inner of
+ // of the polygon is entered. On the second edge the outer face
+ // is entered. Other kinds of intersections are discard by the
+ // intersection function
+
+ for (int i = 0; i < length; i += 2) {
+ Edge e1 = intersections[offset + i + 0];
+ Edge e2 = intersections[offset + i + 1];
+
+ // If two segments are connected maybe a hole must be deleted
+ // Since Edges of components appear pairwise we need to check
+ // the second edge only (the first edge is either polygon or
+ // already handled)
+ if (e2.component > 0) {
+ //TODO: Check if we could save the set null step
+ numHoles--;
+ holes[e2.component-1] = holes[numHoles];
+ holes[numHoles] = null;
+ }
+ connect(e1, e2);
+ }
+ return numHoles;
+ }
+
+ private static void connect(Edge in, Edge out) {
+ assert in != null && out != null;
+ assert in != out;
+ // Connecting two Edges by inserting the point at
+ // dateline intersection and connect these by adding
+ // two edges between this points. One per direction
+ if(in.intersect != in.next.coordinate) {
+ // NOTE: the order of the object creation is crucial here! Don't change it!
+ // first edge has no point on dateline
+ Edge e1 = new Edge(in.intersect, in.next);
+
+ if(out.intersect != out.next.coordinate) {
+ // second edge has no point on dateline
+ Edge e2 = new Edge(out.intersect, out.next);
+ in.next = new Edge(in.intersect, e2, in.intersect);
+ } else {
+ // second edge intersects with dateline
+ in.next = new Edge(in.intersect, out.next, in.intersect);
+ }
+ out.next = new Edge(out.intersect, e1, out.intersect);
+ } else {
+ // first edge intersects with dateline
+ Edge e2 = new Edge(out.intersect, in.next, out.intersect);
+
+ if(out.intersect != out.next.coordinate) {
+ // second edge has no point on dateline
+ Edge e1 = new Edge(out.intersect, out.next);
+ in.next = new Edge(in.intersect, e1, in.intersect);
+
+ } else {
+ // second edge intersects with dateline
+ in.next = new Edge(in.intersect, out.next, in.intersect);
+ }
+ out.next = e2;
+ }
+ }
+
+ private static int createEdges(int component, boolean direction, BaseLineStringBuilder<?> line, Edge[] edges, int offset) {
+ Coordinate[] points = line.coordinates(false); // last point is repeated
+ Edge.ring(component, direction, points, 0, edges, offset, points.length-1);
+ return points.length-1;
+ }
+
+ public static class Ring<P extends ShapeBuilder> extends BaseLineStringBuilder<Ring<P>> {
+
+ private final P parent;
+
+ protected Ring(P parent) {
+ this(parent, new ArrayList<Coordinate>());
+ }
+
+ protected Ring(P parent, ArrayList<Coordinate> points) {
+ super(points);
+ this.parent = parent;
+ }
+
+ public P close() {
+ Coordinate start = points.get(0);
+ Coordinate end = points.get(points.size()-1);
+ if(start.x != end.x || start.y != end.y) {
+ points.add(start);
+ }
+ return parent;
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return null;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java
new file mode 100644
index 0000000..5792668
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import com.spatial4j.core.shape.Circle;
+import com.vividsolutions.jts.geom.Coordinate;
+import java.io.IOException;
+
+public class CircleBuilder extends ShapeBuilder {
+
+ public static final String FIELD_RADIUS = "radius";
+ public static final GeoShapeType TYPE = GeoShapeType.CIRCLE;
+
+ private DistanceUnit unit;
+ private double radius;
+ private Coordinate center;
+
+ /**
+ * Set the center of the circle
+ *
+ * @param center coordinate of the circles center
+ * @return this
+ */
+ public CircleBuilder center(Coordinate center) {
+ this.center = center;
+ return this;
+ }
+
+ /**
+ * set the center of the circle
+ * @param lon longitude of the center
+ * @param lat latitude of the center
+ * @return this
+ */
+ public CircleBuilder center(double lon, double lat) {
+ return center(new Coordinate(lon, lat));
+ }
+
+ /**
+ * Set the radius of the circle. The String value will be parsed by {@link DistanceUnit}
+ * @param radius Value and unit of the circle combined in a string
+ * @return this
+ */
+ public CircleBuilder radius(String radius) {
+ return radius(DistanceUnit.Distance.parseDistance(radius));
+ }
+
+ /**
+ * Set the radius of the circle
+ * @param radius radius of the circle (see {@link DistanceUnit.Distance})
+ * @return this
+ */
+ public CircleBuilder radius(Distance radius) {
+ return radius(radius.value, radius.unit);
+ }
+
+ /**
+ * Set the radius of the circle
+ * @param radius value of the circles radius
+ * @param unit unit name of the radius value (see {@link DistanceUnit})
+ * @return this
+ */
+ public CircleBuilder radius(double radius, String unit) {
+ return radius(radius, DistanceUnit.fromString(unit));
+ }
+
+ /**
+ * Set the radius of the circle
+ * @param radius value of the circles radius
+ * @param unit unit of the radius value (see {@link DistanceUnit})
+ * @return this
+ */
+ public CircleBuilder radius(double radius, DistanceUnit unit) {
+ this.unit = unit;
+ this.radius = radius;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(FIELD_TYPE, TYPE.shapename);
+ builder.field(FIELD_RADIUS, unit.toString(radius));
+ builder.field(FIELD_COORDINATES);
+ toXContent(builder, center);
+ return builder.endObject();
+ }
+
+ @Override
+ public Circle build() {
+ return SPATIAL_CONTEXT.makeCircle(center.x, center.y, 180 * radius / unit.getEarthCircumference());
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return TYPE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
new file mode 100644
index 0000000..9de216b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import com.spatial4j.core.shape.Rectangle;
+import com.vividsolutions.jts.geom.Coordinate;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+public class EnvelopeBuilder extends ShapeBuilder {
+
+ public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
+
+ protected Coordinate topLeft;
+ protected Coordinate bottomRight;
+
+ public EnvelopeBuilder topLeft(Coordinate topLeft) {
+ this.topLeft = topLeft;
+ return this;
+ }
+
+ public EnvelopeBuilder topLeft(double longitude, double latitude) {
+ return topLeft(coordinate(longitude, latitude));
+ }
+
+ public EnvelopeBuilder bottomRight(Coordinate bottomRight) {
+ this.bottomRight = bottomRight;
+ return this;
+ }
+
+ public EnvelopeBuilder bottomRight(double longitude, double latitude) {
+ return bottomRight(coordinate(longitude, latitude));
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(FIELD_TYPE, TYPE.shapename);
+ builder.startArray(FIELD_COORDINATES);
+ toXContent(builder, topLeft);
+ toXContent(builder, bottomRight);
+ builder.endArray();
+ return builder.endObject();
+ }
+
+ @Override
+ public Rectangle build() {
+ return SPATIAL_CONTEXT.makeRectangle(topLeft.x, bottomRight.x, bottomRight.y, topLeft.y);
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return TYPE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
new file mode 100644
index 0000000..c581475
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+public class LineStringBuilder extends BaseLineStringBuilder<LineStringBuilder> {
+
+ public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(FIELD_TYPE, TYPE.shapename);
+ builder.field(FIELD_COORDINATES);
+ coordinatesToXcontent(builder, false);
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return TYPE;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
new file mode 100644
index 0000000..4c53fce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.Geometry;
+import com.vividsolutions.jts.geom.LineString;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+
+public class MultiLineStringBuilder extends ShapeBuilder {
+
+ public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING;
+
+ private final ArrayList<BaseLineStringBuilder<?>> lines = new ArrayList<BaseLineStringBuilder<?>>();
+
+ public InternalLineStringBuilder linestring() {
+ InternalLineStringBuilder line = new InternalLineStringBuilder(this);
+ this.lines.add(line);
+ return line;
+ }
+
+ public MultiLineStringBuilder linestring(BaseLineStringBuilder<?> line) {
+ this.lines.add(line);
+ return this;
+ }
+
+ public Coordinate[][] coordinates() {
+ Coordinate[][] result = new Coordinate[lines.size()][];
+ for (int i = 0; i < result.length; i++) {
+ result[i] = lines.get(i).coordinates(false);
+ }
+ return result;
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(FIELD_TYPE, TYPE.shapename);
+ builder.field(FIELD_COORDINATES);
+ builder.startArray();
+ for(BaseLineStringBuilder<?> line : lines) {
+ line.coordinatesToXcontent(builder, false);
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public Shape build() {
+ final Geometry geometry;
+ if(wrapdateline) {
+ ArrayList<LineString> parts = new ArrayList<LineString>();
+ for (BaseLineStringBuilder<?> line : lines) {
+ BaseLineStringBuilder.decompose(FACTORY, line.coordinates(false), parts);
+ }
+ if(parts.size() == 1) {
+ geometry = parts.get(0);
+ } else {
+ LineString[] lineStrings = parts.toArray(new LineString[parts.size()]);
+ geometry = FACTORY.createMultiLineString(lineStrings);
+ }
+ } else {
+ LineString[] lineStrings = new LineString[lines.size()];
+ Iterator<BaseLineStringBuilder<?>> iterator = lines.iterator();
+ for (int i = 0; iterator.hasNext(); i++) {
+ lineStrings[i] = FACTORY.createLineString(iterator.next().coordinates(false));
+ }
+ geometry = FACTORY.createMultiLineString(lineStrings);
+ }
+ return new JtsGeometry(geometry, SPATIAL_CONTEXT, true);
+ }
+
+ public static class InternalLineStringBuilder extends BaseLineStringBuilder<InternalLineStringBuilder> {
+
+ private final MultiLineStringBuilder collection;
+
+ public InternalLineStringBuilder(MultiLineStringBuilder collection) {
+ super();
+ this.collection = collection;
+ }
+
+ public MultiLineStringBuilder end() {
+ return collection;
+ }
+
+ public Coordinate[] coordinates() {
+ return super.coordinates(false);
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return null;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java
new file mode 100644
index 0000000..2e417b5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.MultiPoint;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+public class MultiPointBuilder extends PointCollection<MultiPointBuilder> {
+
+ public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT;
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(FIELD_TYPE, TYPE.shapename);
+ builder.field(FIELD_COORDINATES);
+ super.coordinatesToXcontent(builder, false);
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public Shape build() {
+ MultiPoint geometry = FACTORY.createMultiPoint(points.toArray(new Coordinate[points.size()]));
+ return new JtsGeometry(geometry, SPATIAL_CONTEXT, true);
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return TYPE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
new file mode 100644
index 0000000..a889888
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.Geometry;
+import com.vividsolutions.jts.geom.Polygon;
+
+public class MultiPolygonBuilder extends ShapeBuilder {
+
+ public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON;
+
+ protected final ArrayList<BasePolygonBuilder<?>> polygons = new ArrayList<BasePolygonBuilder<?>>();
+
+ public MultiPolygonBuilder polygon(BasePolygonBuilder<?> polygon) {
+ this.polygons.add(polygon);
+ return this;
+ }
+
+ public InternalPolygonBuilder polygon() {
+ InternalPolygonBuilder polygon = new InternalPolygonBuilder(this);
+ this.polygon(polygon);
+ return polygon;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(FIELD_TYPE, TYPE.shapename);
+ builder.startArray(FIELD_COORDINATES);
+ for(BasePolygonBuilder<?> polygon : polygons) {
+ builder.startArray();
+ polygon.coordinatesArray(builder, params);
+ builder.endArray();
+ }
+ builder.endArray();
+ return builder.endObject();
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return TYPE;
+ }
+
+ @Override
+ public Shape build() {
+
+ Polygon[] polygons;
+
+ if(wrapdateline) {
+ ArrayList<Polygon> polygonSet = new ArrayList<Polygon>(this.polygons.size());
+ for (BasePolygonBuilder<?> polygon : this.polygons) {
+ for(Coordinate[][] part : polygon.coordinates()) {
+ polygonSet.add(PolygonBuilder.polygon(FACTORY, part));
+ }
+ }
+
+ polygons = polygonSet.toArray(new Polygon[polygonSet.size()]);
+ } else {
+ polygons = new Polygon[this.polygons.size()];
+ Iterator<BasePolygonBuilder<?>> iterator = this.polygons.iterator();
+ for (int i = 0; iterator.hasNext(); i++) {
+ polygons[i] = iterator.next().toPolygon(FACTORY);
+ }
+ }
+
+ Geometry geometry = polygons.length == 1
+ ? polygons[0]
+ : FACTORY.createMultiPolygon(polygons);
+
+ return new JtsGeometry(geometry, SPATIAL_CONTEXT, !wrapdateline);
+ }
+
+ public static class InternalPolygonBuilder extends BasePolygonBuilder<InternalPolygonBuilder> {
+
+ private final MultiPolygonBuilder collection;
+
+ private InternalPolygonBuilder(MultiPolygonBuilder collection) {
+ super();
+ this.collection = collection;
+ this.shell = new Ring<InternalPolygonBuilder>(this);
+ }
+
+ @Override
+ public MultiPolygonBuilder close() {
+ super.close();
+ return collection;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java
new file mode 100644
index 0000000..53c6738
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import java.io.IOException;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import com.spatial4j.core.shape.Point;
+import com.vividsolutions.jts.geom.Coordinate;
+
+public class PointBuilder extends ShapeBuilder {
+
+ public static final GeoShapeType TYPE = GeoShapeType.POINT;
+
+ private Coordinate coordinate;
+
+ public PointBuilder coordinate(Coordinate coordinate) {
+ this.coordinate = coordinate;
+ return this;
+ }
+
+ public double longitude() {
+ return coordinate.x;
+ }
+
+ public double latitude() {
+ return coordinate.y;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(FIELD_TYPE, TYPE.shapename);
+ builder.field(FIELD_COORDINATES);
+ toXContent(builder, coordinate);
+ return builder.endObject();
+ }
+
+ @Override
+ public Point build() {
+ return SPATIAL_CONTEXT.makePoint(coordinate.x, coordinate.y);
+ }
+
+ @Override
+ public GeoShapeType type() {
+ return TYPE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/PointCollection.java b/src/main/java/org/elasticsearch/common/geo/builders/PointCollection.java
new file mode 100644
index 0000000..99e3981
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/PointCollection.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import com.vividsolutions.jts.geom.Coordinate;
+
+/**
+ * The {@link PointCollection} is an abstract base implementation for all GeoShapes. It simply handles a set of points.
+ */
+public abstract class PointCollection<E extends PointCollection<E>> extends ShapeBuilder {
+
+ protected final ArrayList<Coordinate> points;
+
+ protected PointCollection() {
+ this(new ArrayList<Coordinate>());
+ }
+
+ protected PointCollection(ArrayList<Coordinate> points) {
+ this.points = points;
+ }
+
+ @SuppressWarnings("unchecked")
+ private E thisRef() {
+ return (E)this;
+ }
+
+ /**
+ * Add a new point to the collection
+ * @param longitude longitude of the coordinate
+ * @param latitude latitude of the coordinate
+ * @return this
+ */
+ public E point(double longitude, double latitude) {
+ return this.point(coordinate(longitude, latitude));
+ }
+
+ /**
+ * Add a new point to the collection
+ * @param coordinate coordinate of the point
+ * @return this
+ */
+ public E point(Coordinate coordinate) {
+ this.points.add(coordinate);
+ return thisRef();
+ }
+
+ /**
+ * Add a array of points to the collection
+ *
+ * @param coordinates array of {@link Coordinate}s to add
+ * @return this
+ */
+ public E points(Coordinate...coordinates) {
+ return this.points(Arrays.asList(coordinates));
+ }
+
+ /**
+ * Add a collection of points to the collection
+ *
+ * @param coordinates array of {@link Coordinate}s to add
+ * @return this
+ */
+ public E points(Collection<? extends Coordinate> coordinates) {
+ this.points.addAll(coordinates);
+ return thisRef();
+ }
+
+ /**
+ * Copy all points to a new Array
+ *
+ * @param closed if set to true the first point of the array is repeated as last element
+ * @return Array of coordinates
+ */
+ protected Coordinate[] coordinates(boolean closed) {
+ Coordinate[] result = points.toArray(new Coordinate[points.size() + (closed?1:0)]);
+ if(closed) {
+ result[result.length-1] = result[0];
+ }
+ return result;
+ }
+
+ /**
+ * builds an array of coordinates to a {@link XContentBuilder}
+ *
+ * @param builder builder to use
+ * @param closed repeat the first point at the end of the array if it's not already defines as last element of the array
+ * @return the builder
+ * @throws IOException
+ */
+ protected XContentBuilder coordinatesToXcontent(XContentBuilder builder, boolean closed) throws IOException {
+ builder.startArray();
+ for(Coordinate point : points) {
+ toXContent(builder, point);
+ }
+ if(closed) {
+ Coordinate start = points.get(0);
+ Coordinate end = points.get(points.size()-1);
+ if(start.x != end.x || start.y != end.y) {
+ toXContent(builder, points.get(0));
+ }
+ }
+ builder.endArray();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
new file mode 100644
index 0000000..94faa68
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import java.util.ArrayList;
+
+import com.vividsolutions.jts.geom.Coordinate;
+
+public class PolygonBuilder extends BasePolygonBuilder<PolygonBuilder> {
+
+ public PolygonBuilder() {
+ this(new ArrayList<Coordinate>());
+ }
+
+ protected PolygonBuilder(ArrayList<Coordinate> points) {
+ super();
+ this.shell = new Ring<PolygonBuilder>(this, points);
+ }
+
+ @Override
+ public PolygonBuilder close() {
+ super.close();
+ return this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
new file mode 100644
index 0000000..aab1068
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
@@ -0,0 +1,629 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+
+import com.spatial4j.core.context.jts.JtsSpatialContext;
+import com.spatial4j.core.shape.Shape;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.GeometryFactory;
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * Basic class for building GeoJSON shapes like Polygons, Linestrings, etc
+ */
+public abstract class ShapeBuilder implements ToXContent {
+
+ protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
+
+ private static final boolean DEBUG;
+ static {
+ // if asserts are enabled we run the debug statements even if they are not logged
+ // to prevent exceptions only present if debug enabled
+ boolean debug = false;
+ assert debug = true;
+ DEBUG = debug;
+ }
+
+ public static final double DATELINE = 180;
+ public static final GeometryFactory FACTORY = new GeometryFactory();
+ public static final JtsSpatialContext SPATIAL_CONTEXT = new JtsSpatialContext(true);
+
+ protected final boolean wrapdateline = true;
+
+ protected ShapeBuilder() {
+
+ }
+
+ protected static Coordinate coordinate(double longitude, double latitude) {
+ return new Coordinate(longitude, latitude);
+ }
+
+ /**
+ * Create a new point
+ *
+ * @param longitude longitude of the point
+ * @param latitude latitude of the point
+ * @return a new {@link PointBuilder}
+ */
+ public static PointBuilder newPoint(double longitude, double latitude) {
+ return newPoint(new Coordinate(longitude, latitude));
+ }
+
+ /**
+ * Create a new {@link PointBuilder} from a {@link Coordinate}
+ * @param coordinate coordinate defining the position of the point
+ * @return a new {@link PointBuilder}
+ */
+ public static PointBuilder newPoint(Coordinate coordinate) {
+ return new PointBuilder().coordinate(coordinate);
+ }
+
+ /**
+ * Create a new set of points
+ * @return new {@link MultiPointBuilder}
+ */
+ public static MultiPointBuilder newMultiPoint() {
+ return new MultiPointBuilder();
+ }
+
+ /**
+ * Create a new lineString
+ * @return a new {@link LineStringBuilder}
+ */
+ public static LineStringBuilder newLineString() {
+ return new LineStringBuilder();
+ }
+
+ /**
+ * Create a new Collection of lineStrings
+ * @return a new {@link MultiLineStringBuilder}
+ */
+ public static MultiLineStringBuilder newMultiLinestring() {
+ return new MultiLineStringBuilder();
+ }
+
+ /**
+ * Create a new Polygon
+ * @return a new {@link PointBuilder}
+ */
+ public static PolygonBuilder newPolygon() {
+ return new PolygonBuilder();
+ }
+
+ /**
+ * Create a new Collection of polygons
+ * @return a new {@link MultiPolygonBuilder}
+ */
+ public static MultiPolygonBuilder newMultiPolygon() {
+ return new MultiPolygonBuilder();
+ }
+
+ /**
+ * create a new Circle
+ * @return a new {@link CircleBuilder}
+ */
+ public static CircleBuilder newCircleBuilder() {
+ return new CircleBuilder();
+ }
+
+ /**
+ * create a new rectangle
+ * @return a new {@link EnvelopeBuilder}
+ */
+ public static EnvelopeBuilder newEnvelope() {
+ return new EnvelopeBuilder();
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder xcontent = JsonXContent.contentBuilder();
+ return toXContent(xcontent, EMPTY_PARAMS).prettyPrint().string();
+ } catch (IOException e) {
+ return super.toString();
+ }
+ }
+
+ /**
+ * Create a new Shape from this builder. Since calling this method could change the
+ * defined shape. (by inserting new coordinates or change the position of points)
+ * the builder looses its validity. So this method should only be called once on a builder
+ * @return new {@link Shape} defined by the builder
+ */
+ public abstract Shape build();
+
+ /**
+ * Recursive method which parses the arrays of coordinates used to define
+ * Shapes
+ *
+ * @param parser
+ * Parser that will be read from
+ * @return CoordinateNode representing the start of the coordinate tree
+ * @throws IOException
+ * Thrown if an error occurs while reading from the
+ * XContentParser
+ */
+ private static CoordinateNode parseCoordinates(XContentParser parser) throws IOException {
+ XContentParser.Token token = parser.nextToken();
+
+ // Base case
+ if (token != XContentParser.Token.START_ARRAY) {
+ double lon = parser.doubleValue();
+ token = parser.nextToken();
+ double lat = parser.doubleValue();
+ token = parser.nextToken();
+ return new CoordinateNode(new Coordinate(lon, lat));
+ }
+
+ List<CoordinateNode> nodes = new ArrayList<CoordinateNode>();
+ while (token != XContentParser.Token.END_ARRAY) {
+ nodes.add(parseCoordinates(parser));
+ token = parser.nextToken();
+ }
+
+ return new CoordinateNode(nodes);
+ }
+
+ /**
+ * Create a new {@link ShapeBuilder} from {@link XContent}
+ * @param parser parser to read the GeoShape from
+ * @return {@link ShapeBuilder} read from the parser or null
+ * if the parsers current token has been <code><null</code>
+ * @throws IOException if the input could not be read
+ */
+ public static ShapeBuilder parse(XContentParser parser) throws IOException {
+ return GeoShapeType.parse(parser);
+ }
+
+ protected static XContentBuilder toXContent(XContentBuilder builder, Coordinate coordinate) throws IOException {
+ return builder.startArray().value(coordinate.x).value(coordinate.y).endArray();
+ }
+
+ protected static Coordinate shift(Coordinate coordinate, double dateline) {
+ if (dateline == 0) {
+ return coordinate;
+ } else {
+ return new Coordinate(-2 * dateline + coordinate.x, coordinate.y);
+ }
+ }
+
+ /**
+ * get the shapes type
+ * @return type of the shape
+ */
+ public abstract GeoShapeType type();
+
+ /**
+ * Calculate the intersection of a line segment and a vertical dateline.
+ *
+ * @param p1
+ * start-point of the line segment
+ * @param p2
+ * end-point of the line segment
+ * @param dateline
+ * x-coordinate of the vertical dateline
+ * @return position of the intersection in the open range (0..1] if the line
+ * segment intersects with the line segment. Otherwise this method
+ * returns {@link Double#NaN}
+ */
+ protected static final double intersection(Coordinate p1, Coordinate p2, double dateline) {
+ if (p1.x == p2.x) {
+ return Double.NaN;
+ } else {
+ final double t = (dateline - p1.x) / (p2.x - p1.x);
+ if (t > 1 || t <= 0) {
+ return Double.NaN;
+ } else {
+ return t;
+ }
+ }
+ }
+
+ /**
+ * Calculate all intersections of line segments and a vertical line. The
+ * Array of edges will be ordered asc by the y-coordinate of the
+ * intersections of edges.
+ *
+ * @param dateline
+ * x-coordinate of the dateline
+ * @param edges
+ * set of edges that may intersect with the dateline
+ * @return number of intersecting edges
+ */
+ protected static int intersections(double dateline, Edge[] edges) {
+ int numIntersections = 0;
+ assert !Double.isNaN(dateline);
+ for (int i = 0; i < edges.length; i++) {
+ Coordinate p1 = edges[i].coordinate;
+ Coordinate p2 = edges[i].next.coordinate;
+ assert !Double.isNaN(p2.x) && !Double.isNaN(p1.x);
+ edges[i].intersect = IntersectionOrder.SENTINEL;
+
+ double position = intersection(p1, p2, dateline);
+ if (!Double.isNaN(position)) {
+ if (position == 1) {
+ if (Double.compare(p1.x, dateline) == Double.compare(edges[i].next.next.coordinate.x, dateline)) {
+ // Ignore the ear
+ continue;
+ } else if (p2.x == dateline) {
+ // Ignore Linesegment on dateline
+ continue;
+ }
+ }
+ edges[i].intersection(position);
+ numIntersections++;
+ }
+ }
+ Arrays.sort(edges, INTERSECTION_ORDER);
+ return numIntersections;
+ }
+
+ /**
+ * Node used to represent a tree of coordinates.
+ * <p/>
+ * Can either be a leaf node consisting of a Coordinate, or a parent with
+ * children
+ */
+ protected static class CoordinateNode implements ToXContent {
+
+ protected final Coordinate coordinate;
+ protected final List<CoordinateNode> children;
+
+ /**
+ * Creates a new leaf CoordinateNode
+ *
+ * @param coordinate
+ * Coordinate for the Node
+ */
+ protected CoordinateNode(Coordinate coordinate) {
+ this.coordinate = coordinate;
+ this.children = null;
+ }
+
+ /**
+ * Creates a new parent CoordinateNode
+ *
+ * @param children
+ * Children of the Node
+ */
+ protected CoordinateNode(List<CoordinateNode> children) {
+ this.children = children;
+ this.coordinate = null;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (children == null) {
+ builder.startArray().value(coordinate.x).value(coordinate.y).endArray();
+ } else {
+ builder.startArray();
+ for (CoordinateNode child : children) {
+ child.toXContent(builder, params);
+ }
+ builder.endArray();
+ }
+ return builder;
+ }
+ }
+
+ /**
+ * This helper class implements a linked list for {@link Coordinate}. It contains
+ * fields for a dateline intersection and component id
+ */
+ protected static final class Edge {
+ Coordinate coordinate; // coordinate of the start point
+ Edge next; // next segment
+ Coordinate intersect; // potential intersection with dateline
+ int component = -1; // id of the component this edge belongs to
+
+ protected Edge(Coordinate coordinate, Edge next, Coordinate intersection) {
+ this.coordinate = coordinate;
+ this.next = next;
+ this.intersect = intersection;
+ if (next != null) {
+ this.component = next.component;
+ }
+ }
+
+ protected Edge(Coordinate coordinate, Edge next) {
+ this(coordinate, next, IntersectionOrder.SENTINEL);
+ }
+
+ private static final int top(Coordinate[] points, int offset, int length) {
+ int top = 0; // we start at 1 here since top points to 0
+ for (int i = 1; i < length; i++) {
+ if (points[offset + i].y < points[offset + top].y) {
+ top = i;
+ } else if (points[offset + i].y == points[offset + top].y) {
+ if (points[offset + i].x < points[offset + top].x) {
+ top = i;
+ }
+ }
+ }
+ return top;
+ }
+
+ /**
+ * Concatenate a set of points to a polygon
+ *
+ * @param component
+ * component id of the polygon
+ * @param direction
+ * direction of the ring
+ * @param points
+ * list of points to concatenate
+ * @param pointOffset
+ * index of the first point
+ * @param edges
+ * Array of edges to write the result to
+ * @param edgeOffset
+ * index of the first edge in the result
+ * @param length
+ * number of points to use
+ * @return the edges creates
+ */
+ private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
+ int length) {
+ assert edges.length >= length+edgeOffset;
+ assert points.length >= length+pointOffset;
+ edges[edgeOffset] = new Edge(points[pointOffset], null);
+ for (int i = 1; i < length; i++) {
+ if (direction) {
+ edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
+ edges[edgeOffset + i].component = component;
+ } else {
+ edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
+ edges[edgeOffset + i - 1].component = component;
+ }
+ }
+
+ if (direction) {
+ edges[edgeOffset].next = edges[edgeOffset + length - 1];
+ edges[edgeOffset].component = component;
+ } else {
+ edges[edgeOffset + length - 1].next = edges[edgeOffset];
+ edges[edgeOffset + length - 1].component = component;
+ }
+
+ return edges;
+ }
+
+ /**
+ * Create a connected list of a list of coordinates
+ *
+ * @param points
+ * array of point
+ * @param offset
+ * index of the first point
+ * @param length
+ * number of points
+ * @return Array of edges
+ */
+ protected static Edge[] ring(int component, boolean direction, Coordinate[] points, int offset, Edge[] edges, int toffset,
+ int length) {
+ // calculate the direction of the points:
+ // find the point a the top of the set and check its
+ // neighbors orientation. So direction is equivalent
+ // to clockwise/counterclockwise
+ final int top = top(points, offset, length);
+ final int prev = (offset + ((top + length - 1) % length));
+ final int next = (offset + ((top + 1) % length));
+ final boolean orientation = points[offset + prev].x > points[offset + next].x;
+ return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
+ }
+
+ /**
+ * Set the intersection of this line segment to the given position
+ *
+ * @param position
+ * position of the intersection [0..1]
+ * @return the {@link Coordinate} of the intersection
+ */
+ protected Coordinate intersection(double position) {
+ return intersect = position(coordinate, next.coordinate, position);
+ }
+
+ public static Coordinate position(Coordinate p1, Coordinate p2, double position) {
+ if (position == 0) {
+ return p1;
+ } else if (position == 1) {
+ return p2;
+ } else {
+ final double x = p1.x + position * (p2.x - p1.x);
+ final double y = p1.y + position * (p2.y - p1.y);
+ return new Coordinate(x, y);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "Edge[Component=" + component + "; start=" + coordinate + " " + "; intersection=" + intersect + "]";
+ }
+ }
+
+ protected static final IntersectionOrder INTERSECTION_ORDER = new IntersectionOrder();
+
+ private static final class IntersectionOrder implements Comparator<Edge> {
+ private static final Coordinate SENTINEL = new Coordinate(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY);
+
+ @Override
+ public int compare(Edge o1, Edge o2) {
+ return Double.compare(o1.intersect.y, o2.intersect.y);
+ }
+
+ }
+
+ public static final String FIELD_TYPE = "type";
+ public static final String FIELD_COORDINATES = "coordinates";
+
+ protected static final boolean debugEnabled() {
+ return LOGGER.isDebugEnabled() || DEBUG;
+ }
+
+ /**
+ * Enumeration that lists all {@link GeoShapeType}s that can be handled
+ */
+ public static enum GeoShapeType {
+ POINT("point"),
+ MULTIPOINT("multipoint"),
+ LINESTRING("linestring"),
+ MULTILINESTRING("multilinestring"),
+ POLYGON("polygon"),
+ MULTIPOLYGON("multipolygon"),
+ ENVELOPE("envelope"),
+ CIRCLE("circle");
+
+ protected final String shapename;
+
+ private GeoShapeType(String shapename) {
+ this.shapename = shapename;
+ }
+
+ public static GeoShapeType forName(String geoshapename) {
+ String typename = geoshapename.toLowerCase(Locale.ROOT);
+ for (GeoShapeType type : values()) {
+ if(type.shapename.equals(typename)) {
+ return type;
+ }
+ }
+ throw new ElasticsearchIllegalArgumentException("unknown geo_shape ["+geoshapename+"]");
+ }
+
+ public static ShapeBuilder parse(XContentParser parser) throws IOException {
+ if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
+ return null;
+ } else if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchParseException("Shape must be an object consisting of type and coordinates");
+ }
+
+ GeoShapeType shapeType = null;
+ Distance radius = null;
+ CoordinateNode node = null;
+
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String fieldName = parser.currentName();
+
+ if (FIELD_TYPE.equals(fieldName)) {
+ parser.nextToken();
+ shapeType = GeoShapeType.forName(parser.text());
+ } else if (FIELD_COORDINATES.equals(fieldName)) {
+ parser.nextToken();
+ node = parseCoordinates(parser);
+ } else if (CircleBuilder.FIELD_RADIUS.equals(fieldName)) {
+ parser.nextToken();
+ radius = Distance.parseDistance(parser.text());
+ } else {
+ parser.nextToken();
+ parser.skipChildren();
+ }
+ }
+ }
+
+ if (shapeType == null) {
+ throw new ElasticsearchParseException("Shape type not included");
+ } else if (node == null) {
+ throw new ElasticsearchParseException("Coordinates not included");
+ } else if (radius != null && GeoShapeType.CIRCLE != shapeType) {
+ throw new ElasticsearchParseException("Field [" + CircleBuilder.FIELD_RADIUS + "] is supported for [" + CircleBuilder.TYPE
+ + "] only");
+ }
+
+ switch (shapeType) {
+ case POINT: return parsePoint(node);
+ case MULTIPOINT: return parseMultiPoint(node);
+ case LINESTRING: return parseLineString(node);
+ case MULTILINESTRING: return parseMultiLine(node);
+ case POLYGON: return parsePolygon(node);
+ case MULTIPOLYGON: return parseMultiPolygon(node);
+ case CIRCLE: return parseCircle(node, radius);
+ case ENVELOPE: return parseEnvelope(node);
+ default:
+ throw new ElasticsearchParseException("Shape type [" + shapeType + "] not included");
+ }
+ }
+
+ protected static PointBuilder parsePoint(CoordinateNode node) {
+ return newPoint(node.coordinate);
+ }
+
+ protected static CircleBuilder parseCircle(CoordinateNode coordinates, Distance radius) {
+ return newCircleBuilder().center(coordinates.coordinate).radius(radius);
+ }
+
+ protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) {
+ return newEnvelope().topLeft(coordinates.children.get(0).coordinate).bottomRight(coordinates.children.get(1).coordinate);
+ }
+
+ protected static MultiPointBuilder parseMultiPoint(CoordinateNode coordinates) {
+ MultiPointBuilder points = new MultiPointBuilder();
+ for (CoordinateNode node : coordinates.children) {
+ points.point(node.coordinate);
+ }
+ return points;
+ }
+
+ protected static LineStringBuilder parseLineString(CoordinateNode coordinates) {
+ LineStringBuilder line = newLineString();
+ for (CoordinateNode node : coordinates.children) {
+ line.point(node.coordinate);
+ }
+ return line;
+ }
+
+ protected static MultiLineStringBuilder parseMultiLine(CoordinateNode coordinates) {
+ MultiLineStringBuilder multiline = newMultiLinestring();
+ for (CoordinateNode node : coordinates.children) {
+ multiline.linestring(parseLineString(node));
+ }
+ return multiline;
+ }
+
+ protected static PolygonBuilder parsePolygon(CoordinateNode coordinates) {
+ LineStringBuilder shell = parseLineString(coordinates.children.get(0));
+ PolygonBuilder polygon = new PolygonBuilder(shell.points);
+ for (int i = 1; i < coordinates.children.size(); i++) {
+ polygon.hole(parseLineString(coordinates.children.get(i)));
+ }
+ return polygon;
+ }
+
+ protected static MultiPolygonBuilder parseMultiPolygon(CoordinateNode coordinates) {
+ MultiPolygonBuilder polygons = newMultiPolygon();
+ for (CoordinateNode node : coordinates.children) {
+ polygons.polygon(parsePolygon(node));
+ }
+ return polygons;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java b/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java
new file mode 100644
index 0000000..c4d05dd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java
@@ -0,0 +1,384 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.http.client;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.*;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.net.URLConnection;
+
+/**
+ *
+ */
+public class HttpDownloadHelper {
+
+ private boolean useTimestamp = false;
+ private boolean skipExisting = false;
+
+ public boolean download(URL source, File dest, @Nullable DownloadProgress progress, TimeValue timeout) throws Exception {
+ if (dest.exists() && skipExisting) {
+ return true;
+ }
+
+ //don't do any progress, unless asked
+ if (progress == null) {
+ progress = new NullProgress();
+ }
+
+ //set the timestamp to the file date.
+ long timestamp = 0;
+
+ boolean hasTimestamp = false;
+ if (useTimestamp && dest.exists()) {
+ timestamp = dest.lastModified();
+ hasTimestamp = true;
+ }
+
+ GetThread getThread = new GetThread(source, dest, hasTimestamp, timestamp, progress);
+
+ try {
+ getThread.setDaemon(true);
+ getThread.start();
+ getThread.join(timeout.millis());
+
+ if (getThread.isAlive()) {
+ throw new ElasticsearchTimeoutException("The GET operation took longer than " + timeout + ", stopping it.");
+ }
+ }
+ catch (InterruptedException ie) {
+ return false;
+ } finally {
+ getThread.closeStreams();
+ }
+
+ return getThread.wasSuccessful();
+ }
+
+
+ /**
+ * Interface implemented for reporting
+ * progress of downloading.
+ */
+ public interface DownloadProgress {
+ /**
+ * begin a download
+ */
+ void beginDownload();
+
+ /**
+ * tick handler
+ */
+ void onTick();
+
+ /**
+ * end a download
+ */
+ void endDownload();
+ }
+
+ /**
+ * do nothing with progress info
+ */
+ public static class NullProgress implements DownloadProgress {
+
+ /**
+ * begin a download
+ */
+ public void beginDownload() {
+
+ }
+
+ /**
+ * tick handler
+ */
+ public void onTick() {
+ }
+
+ /**
+ * end a download
+ */
+ public void endDownload() {
+
+ }
+ }
+
+ /**
+ * verbose progress system prints to some output stream
+ */
+ public static class VerboseProgress implements DownloadProgress {
+ private int dots = 0;
+ // CheckStyle:VisibilityModifier OFF - bc
+ PrintStream out;
+ // CheckStyle:VisibilityModifier ON
+
+ /**
+ * Construct a verbose progress reporter.
+ *
+ * @param out the output stream.
+ */
+ public VerboseProgress(PrintStream out) {
+ this.out = out;
+ }
+
+ /**
+ * begin a download
+ */
+ public void beginDownload() {
+ out.print("Downloading ");
+ dots = 0;
+ }
+
+ /**
+ * tick handler
+ */
+ public void onTick() {
+ out.print(".");
+ if (dots++ > 50) {
+ out.flush();
+ dots = 0;
+ }
+ }
+
+ /**
+ * end a download
+ */
+ public void endDownload() {
+ out.println("DONE");
+ out.flush();
+ }
+ }
+
+ private class GetThread extends Thread {
+
+ private final URL source;
+ private final File dest;
+ private final boolean hasTimestamp;
+ private final long timestamp;
+ private final DownloadProgress progress;
+
+ private boolean success = false;
+ private IOException ioexception = null;
+ private InputStream is = null;
+ private OutputStream os = null;
+ private URLConnection connection;
+ private int redirections = 0;
+
+ GetThread(URL source, File dest, boolean h, long t, DownloadProgress p) {
+ this.source = source;
+ this.dest = dest;
+ hasTimestamp = h;
+ timestamp = t;
+ progress = p;
+ }
+
+ public void run() {
+ try {
+ success = get();
+ } catch (IOException ioex) {
+ ioexception = ioex;
+ }
+ }
+
+ private boolean get() throws IOException {
+
+ connection = openConnection(source);
+
+ if (connection == null) {
+ return false;
+ }
+
+ boolean downloadSucceeded = downloadFile();
+
+ //if (and only if) the use file time option is set, then
+ //the saved file now has its timestamp set to that of the
+ //downloaded file
+ if (downloadSucceeded && useTimestamp) {
+ updateTimeStamp();
+ }
+
+ return downloadSucceeded;
+ }
+
+
+ private boolean redirectionAllowed(URL aSource, URL aDest) throws IOException {
+ // Argh, github does this...
+// if (!(aSource.getProtocol().equals(aDest.getProtocol()) || ("http"
+// .equals(aSource.getProtocol()) && "https".equals(aDest
+// .getProtocol())))) {
+// String message = "Redirection detected from "
+// + aSource.getProtocol() + " to " + aDest.getProtocol()
+// + ". Protocol switch unsafe, not allowed.";
+// throw new IOException(message);
+// }
+
+ redirections++;
+ if (redirections > 5) {
+ String message = "More than " + 5 + " times redirected, giving up";
+ throw new IOException(message);
+ }
+
+
+ return true;
+ }
+
+ private URLConnection openConnection(URL aSource) throws IOException {
+
+ // set up the URL connection
+ URLConnection connection = aSource.openConnection();
+ // modify the headers
+ // NB: things like user authentication could go in here too.
+ if (hasTimestamp) {
+ connection.setIfModifiedSince(timestamp);
+ }
+
+ if (connection instanceof HttpURLConnection) {
+ ((HttpURLConnection) connection).setInstanceFollowRedirects(false);
+ ((HttpURLConnection) connection).setUseCaches(true);
+ ((HttpURLConnection) connection).setConnectTimeout(5000);
+ }
+ // connect to the remote site (may take some time)
+ connection.connect();
+
+ // First check on a 301 / 302 (moved) response (HTTP only)
+ if (connection instanceof HttpURLConnection) {
+ HttpURLConnection httpConnection = (HttpURLConnection) connection;
+ int responseCode = httpConnection.getResponseCode();
+ if (responseCode == HttpURLConnection.HTTP_MOVED_PERM ||
+ responseCode == HttpURLConnection.HTTP_MOVED_TEMP ||
+ responseCode == HttpURLConnection.HTTP_SEE_OTHER) {
+ String newLocation = httpConnection.getHeaderField("Location");
+ String message = aSource
+ + (responseCode == HttpURLConnection.HTTP_MOVED_PERM ? " permanently"
+ : "") + " moved to " + newLocation;
+ URL newURL = new URL(newLocation);
+ if (!redirectionAllowed(aSource, newURL)) {
+ return null;
+ }
+ return openConnection(newURL);
+ }
+ // next test for a 304 result (HTTP only)
+ long lastModified = httpConnection.getLastModified();
+ if (responseCode == HttpURLConnection.HTTP_NOT_MODIFIED
+ || (lastModified != 0 && hasTimestamp && timestamp >= lastModified)) {
+ // not modified so no file download. just return
+ // instead and trace out something so the user
+ // doesn't think that the download happened when it
+ // didn't
+ return null;
+ }
+ // test for 401 result (HTTP only)
+ if (responseCode == HttpURLConnection.HTTP_UNAUTHORIZED) {
+ String message = "HTTP Authorization failure";
+ throw new IOException(message);
+ }
+ }
+
+ //REVISIT: at this point even non HTTP connections may
+ //support the if-modified-since behaviour -we just check
+ //the date of the content and skip the write if it is not
+ //newer. Some protocols (FTP) don't include dates, of
+ //course.
+ return connection;
+ }
+
+ private boolean downloadFile() throws FileNotFoundException, IOException {
+ IOException lastEx = null;
+ for (int i = 0; i < 3; i++) {
+ // this three attempt trick is to get round quirks in different
+ // Java implementations. Some of them take a few goes to bind
+ // property; we ignore the first couple of such failures.
+ try {
+ is = connection.getInputStream();
+ break;
+ } catch (IOException ex) {
+ lastEx = ex;
+ }
+ }
+ if (is == null) {
+ throw new IOException("Can't get " + source + " to " + dest, lastEx);
+ }
+
+ os = new FileOutputStream(dest);
+ progress.beginDownload();
+ boolean finished = false;
+ try {
+ byte[] buffer = new byte[1024 * 100];
+ int length;
+ while (!isInterrupted() && (length = is.read(buffer)) >= 0) {
+ os.write(buffer, 0, length);
+ progress.onTick();
+ }
+ finished = !isInterrupted();
+ } finally {
+ if (!finished) {
+ // we have started to (over)write dest, but failed.
+ // Try to delete the garbage we'd otherwise leave
+ // behind.
+ IOUtils.closeWhileHandlingException(os, is);
+ dest.delete();
+ } else {
+ IOUtils.close(os, is);
+ }
+ }
+ progress.endDownload();
+ return true;
+ }
+
+ private void updateTimeStamp() {
+ long remoteTimestamp = connection.getLastModified();
+ if (remoteTimestamp != 0) {
+ dest.setLastModified(remoteTimestamp);
+ }
+ }
+
+ /**
+ * Has the download completed successfully?
+ * <p/>
+ * <p>Re-throws any exception caught during executaion.</p>
+ */
+ boolean wasSuccessful() throws IOException {
+ if (ioexception != null) {
+ throw ioexception;
+ }
+ return success;
+ }
+
+ /**
+ * Closes streams, interrupts the download, may delete the
+ * output file.
+ */
+ void closeStreams() throws IOException {
+ interrupt();
+ if (success) {
+ IOUtils.close(is, os);
+ } else {
+ IOUtils.closeWhileHandlingException(is, os);
+ if (dest != null && dest.exists()) {
+ dest.delete();
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/AbstractModule.java b/src/main/java/org/elasticsearch/common/inject/AbstractModule.java
new file mode 100644
index 0000000..4b15f4b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/AbstractModule.java
@@ -0,0 +1,240 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.binder.AnnotatedBindingBuilder;
+import org.elasticsearch.common.inject.binder.AnnotatedConstantBindingBuilder;
+import org.elasticsearch.common.inject.binder.LinkedBindingBuilder;
+import org.elasticsearch.common.inject.matcher.Matcher;
+import org.elasticsearch.common.inject.spi.Message;
+import org.elasticsearch.common.inject.spi.TypeConverter;
+import org.elasticsearch.common.inject.spi.TypeListener;
+
+import java.lang.annotation.Annotation;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * A support class for {@link Module}s which reduces repetition and results in
+ * a more readable configuration. Simply extend this class, implement {@link
+ * #configure()}, and call the inherited methods which mirror those found in
+ * {@link Binder}. For example:
+ * <p/>
+ * <pre>
+ * public class MyModule extends AbstractModule {
+ * protected void configure() {
+ * bind(Service.class).to(ServiceImpl.class).in(Singleton.class);
+ * bind(CreditCardPaymentService.class);
+ * bind(PaymentService.class).to(CreditCardPaymentService.class);
+ * bindConstant().annotatedWith(Names.named("port")).to(8080);
+ * }
+ * }
+ * </pre>
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public abstract class AbstractModule implements Module {
+
+ Binder binder;
+
+ public final synchronized void configure(Binder builder) {
+ checkState(this.binder == null, "Re-entry is not allowed.");
+
+ this.binder = checkNotNull(builder, "builder");
+ try {
+ configure();
+ } finally {
+ this.binder = null;
+ }
+ }
+
+ /**
+ * Configures a {@link Binder} via the exposed methods.
+ */
+ protected abstract void configure();
+
+ /**
+ * Gets direct access to the underlying {@code Binder}.
+ */
+ protected Binder binder() {
+ return binder;
+ }
+
+ /**
+ * @see Binder#bindScope(Class, Scope)
+ */
+ protected void bindScope(Class<? extends Annotation> scopeAnnotation,
+ Scope scope) {
+ binder.bindScope(scopeAnnotation, scope);
+ }
+
+ /**
+ * @see Binder#bind(Key)
+ */
+ protected <T> LinkedBindingBuilder<T> bind(Key<T> key) {
+ return binder.bind(key);
+ }
+
+ /**
+ * @see Binder#bind(TypeLiteral)
+ */
+ protected <T> AnnotatedBindingBuilder<T> bind(TypeLiteral<T> typeLiteral) {
+ return binder.bind(typeLiteral);
+ }
+
+ /**
+ * @see Binder#bind(Class)
+ */
+ protected <T> AnnotatedBindingBuilder<T> bind(Class<T> clazz) {
+ return binder.bind(clazz);
+ }
+
+ /**
+ * @see Binder#bindConstant()
+ */
+ protected AnnotatedConstantBindingBuilder bindConstant() {
+ return binder.bindConstant();
+ }
+
+ /**
+ * @see Binder#install(Module)
+ */
+ protected void install(Module module) {
+ binder.install(module);
+ }
+
+ /**
+ * @see Binder#addError(String, Object[])
+ */
+ protected void addError(String message, Object... arguments) {
+ binder.addError(message, arguments);
+ }
+
+ /**
+ * @see Binder#addError(Throwable)
+ */
+ protected void addError(Throwable t) {
+ binder.addError(t);
+ }
+
+ /**
+ * @see Binder#addError(Message)
+ * @since 2.0
+ */
+ protected void addError(Message message) {
+ binder.addError(message);
+ }
+
+ /**
+ * @see Binder#requestInjection(Object)
+ * @since 2.0
+ */
+ protected void requestInjection(Object instance) {
+ binder.requestInjection(instance);
+ }
+
+ /**
+ * @see Binder#requestStaticInjection(Class[])
+ */
+ protected void requestStaticInjection(Class<?>... types) {
+ binder.requestStaticInjection(types);
+ }
+
+ /**
+ * Adds a dependency from this module to {@code key}. When the injector is
+ * created, Guice will report an error if {@code key} cannot be injected.
+ * Note that this requirement may be satisfied by implicit binding, such as
+ * a public no-arguments constructor.
+ *
+ * @since 2.0
+ */
+ protected void requireBinding(Key<?> key) {
+ binder.getProvider(key);
+ }
+
+ /**
+ * Adds a dependency from this module to {@code type}. When the injector is
+ * created, Guice will report an error if {@code type} cannot be injected.
+ * Note that this requirement may be satisfied by implicit binding, such as
+ * a public no-arguments constructor.
+ *
+ * @since 2.0
+ */
+ protected void requireBinding(Class<?> type) {
+ binder.getProvider(type);
+ }
+
+ /**
+ * @see Binder#getProvider(Key)
+ * @since 2.0
+ */
+ protected <T> Provider<T> getProvider(Key<T> key) {
+ return binder.getProvider(key);
+ }
+
+ /**
+ * @see Binder#getProvider(Class)
+ * @since 2.0
+ */
+ protected <T> Provider<T> getProvider(Class<T> type) {
+ return binder.getProvider(type);
+ }
+
+ /**
+ * @see Binder#convertToTypes
+ * @since 2.0
+ */
+ protected void convertToTypes(Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeConverter converter) {
+ binder.convertToTypes(typeMatcher, converter);
+ }
+
+ /**
+ * @see Binder#currentStage()
+ * @since 2.0
+ */
+ protected Stage currentStage() {
+ return binder.currentStage();
+ }
+
+ /**
+ * @see Binder#getMembersInjector(Class)
+ * @since 2.0
+ */
+ protected <T> MembersInjector<T> getMembersInjector(Class<T> type) {
+ return binder.getMembersInjector(type);
+ }
+
+ /**
+ * @see Binder#getMembersInjector(TypeLiteral)
+ * @since 2.0
+ */
+ protected <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> type) {
+ return binder.getMembersInjector(type);
+ }
+
+ /**
+ * @see Binder#bindListener(org.elasticsearch.common.inject.matcher.Matcher,
+ * org.elasticsearch.common.inject.spi.TypeListener)
+ * @since 2.0
+ */
+ protected void bindListener(Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeListener listener) {
+ binder.bindListener(typeMatcher, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/AbstractProcessor.java b/src/main/java/org/elasticsearch/common/inject/AbstractProcessor.java
new file mode 100644
index 0000000..fad6926
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/AbstractProcessor.java
@@ -0,0 +1,106 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.*;
+
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Abstract base class for creating an injector from module elements.
+ * <p/>
+ * <p>Extending classes must return {@code true} from any overridden
+ * {@code visit*()} methods, in order for the element processor to remove the
+ * handled element.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+abstract class AbstractProcessor implements ElementVisitor<Boolean> {
+
+ protected Errors errors;
+ protected InjectorImpl injector;
+
+ protected AbstractProcessor(Errors errors) {
+ this.errors = errors;
+ }
+
+ public void process(Iterable<InjectorShell> isolatedInjectorBuilders) {
+ for (InjectorShell injectorShell : isolatedInjectorBuilders) {
+ process(injectorShell.getInjector(), injectorShell.getElements());
+ }
+ }
+
+ public void process(InjectorImpl injector, List<Element> elements) {
+ Errors errorsAnyElement = this.errors;
+ this.injector = injector;
+ try {
+ for (Iterator<Element> i = elements.iterator(); i.hasNext(); ) {
+ Element element = i.next();
+ this.errors = errorsAnyElement.withSource(element.getSource());
+ Boolean allDone = element.acceptVisitor(this);
+ if (allDone) {
+ i.remove();
+ }
+ }
+ } finally {
+ this.errors = errorsAnyElement;
+ this.injector = null;
+ }
+ }
+
+ public Boolean visit(Message message) {
+ return false;
+ }
+
+ public Boolean visit(ScopeBinding scopeBinding) {
+ return false;
+ }
+
+ public Boolean visit(InjectionRequest injectionRequest) {
+ return false;
+ }
+
+ public Boolean visit(StaticInjectionRequest staticInjectionRequest) {
+ return false;
+ }
+
+ public Boolean visit(TypeConverterBinding typeConverterBinding) {
+ return false;
+ }
+
+ public <T> Boolean visit(Binding<T> binding) {
+ return false;
+ }
+
+ public <T> Boolean visit(ProviderLookup<T> providerLookup) {
+ return false;
+ }
+
+ public Boolean visit(PrivateElements privateElements) {
+ return false;
+ }
+
+ public <T> Boolean visit(MembersInjectorLookup<T> lookup) {
+ return false;
+ }
+
+ public Boolean visit(TypeListenerBinding binding) {
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Binder.java b/src/main/java/org/elasticsearch/common/inject/Binder.java
new file mode 100644
index 0000000..9315513
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Binder.java
@@ -0,0 +1,377 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.binder.AnnotatedBindingBuilder;
+import org.elasticsearch.common.inject.binder.AnnotatedConstantBindingBuilder;
+import org.elasticsearch.common.inject.binder.LinkedBindingBuilder;
+import org.elasticsearch.common.inject.matcher.Matcher;
+import org.elasticsearch.common.inject.spi.Message;
+import org.elasticsearch.common.inject.spi.TypeConverter;
+import org.elasticsearch.common.inject.spi.TypeListener;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * Collects configuration information (primarily <i>bindings</i>) which will be
+ * used to create an {@link Injector}. Guice provides this object to your
+ * application's {@link Module} implementors so they may each contribute
+ * their own bindings and other registrations.
+ * <p/>
+ * <h3>The Guice Binding EDSL</h3>
+ * <p/>
+ * Guice uses an <i>embedded domain-specific language</i>, or EDSL, to help you
+ * create bindings simply and readably. This approach is great for overall
+ * usability, but it does come with a small cost: <b>it is difficult to
+ * learn how to use the Binding EDSL by reading
+ * method-level javadocs</b>. Instead, you should consult the series of
+ * examples below. To save space, these examples omit the opening
+ * {@code binder}, just as you will if your module extends
+ * {@link AbstractModule}.
+ * <p/>
+ * <pre>
+ * bind(ServiceImpl.class);</pre>
+ *
+ * This statement does essentially nothing; it "binds the {@code ServiceImpl}
+ * class to itself" and does not change Guice's default behavior. You may still
+ * want to use this if you prefer your {@link Module} class to serve as an
+ * explicit <i>manifest</i> for the services it provides. Also, in rare cases,
+ * Guice may be unable to validate a binding at injector creation time unless it
+ * is given explicitly.
+ *
+ * <pre>
+ * bind(Service.class).to(ServiceImpl.class);</pre>
+ *
+ * Specifies that a request for a {@code Service} instance with no binding
+ * annotations should be treated as if it were a request for a
+ * {@code ServiceImpl} instance. This <i>overrides</i> the function of any
+ * {@link ImplementedBy @ImplementedBy} or {@link ProvidedBy @ProvidedBy}
+ * annotations found on {@code Service}, since Guice will have already
+ * "moved on" to {@code ServiceImpl} before it reaches the point when it starts
+ * looking for these annotations.
+ *
+ * <pre>
+ * bind(Service.class).toProvider(ServiceProvider.class);</pre>
+ *
+ * In this example, {@code ServiceProvider} must extend or implement
+ * {@code Provider<Service>}. This binding specifies that Guice should resolve
+ * an unannotated injection request for {@code Service} by first resolving an
+ * instance of {@code ServiceProvider} in the regular way, then calling
+ * {@link Provider#get get()} on the resulting Provider instance to obtain the
+ * {@code Service} instance.
+ *
+ * <p>The {@link Provider} you use here does not have to be a "factory"; that
+ * is, a provider which always <i>creates</i> each instance it provides.
+ * However, this is generally a good practice to follow. You can then use
+ * Guice's concept of {@link Scope scopes} to guide when creation should happen
+ * -- "letting Guice work for you".
+ *
+ * <pre>
+ * bind(Service.class).annotatedWith(Red.class).to(ServiceImpl.class);</pre>
+ *
+ * Like the previous example, but only applies to injection requests that use
+ * the binding annotation {@code @Red}. If your module also includes bindings
+ * for particular <i>values</i> of the {@code @Red} annotation (see below),
+ * then this binding will serve as a "catch-all" for any values of {@code @Red}
+ * that have no exact match in the bindings.
+ *
+ * <pre>
+ * bind(ServiceImpl.class).in(Singleton.class);
+ * // or, alternatively
+ * bind(ServiceImpl.class).in(Scopes.SINGLETON);</pre>
+ *
+ * Either of these statements places the {@code ServiceImpl} class into
+ * singleton scope. Guice will create only one instance of {@code ServiceImpl}
+ * and will reuse it for all injection requests of this type. Note that it is
+ * still possible to bind another instance of {@code ServiceImpl} if the second
+ * binding is qualified by an annotation as in the previous example. Guice is
+ * not overly concerned with <i>preventing</i> you from creating multiple
+ * instances of your "singletons", only with <i>enabling</i> your application to
+ * share only one instance if that's all you tell Guice you need.
+ *
+ * <p><b>Note:</b> a scope specified in this way <i>overrides</i> any scope that
+ * was specified with an annotation on the {@code ServiceImpl} class.
+ *
+ * <p>Besides {@link Singleton}/{@link Scopes#SINGLETON}, there are
+ * servlet-specific scopes available in
+ * {@code com.google.inject.servlet.ServletScopes}, and your Modules can
+ * contribute their own custom scopes for use here as well.
+ *
+ * <pre>
+ * bind(new TypeLiteral&lt;PaymentService&lt;CreditCard>>() {})
+ * .to(CreditCardPaymentService.class);</pre>
+ *
+ * This admittedly odd construct is the way to bind a parameterized type. It
+ * tells Guice how to honor an injection request for an element of type
+ * {@code PaymentService<CreditCard>}. The class
+ * {@code CreditCardPaymentService} must implement the
+ * {@code PaymentService<CreditCard>} interface. Guice cannot currently bind or
+ * inject a generic type, such as {@code Set<E>}; all type parameters must be
+ * fully specified.
+ *
+ * <pre>
+ * bind(Service.class).toInstance(new ServiceImpl());
+ * // or, alternatively
+ * bind(Service.class).toInstance(SomeLegacyRegistry.getService());</pre>
+ *
+ * In this example, your module itself, <i>not Guice</i>, takes responsibility
+ * for obtaining a {@code ServiceImpl} instance, then asks Guice to always use
+ * this single instance to fulfill all {@code Service} injection requests. When
+ * the {@link Injector} is created, it will automatically perform field
+ * and method injection for this instance, but any injectable constructor on
+ * {@code ServiceImpl} is simply ignored. Note that using this approach results
+ * in "eager loading" behavior that you can't control.
+ *
+ * <pre>
+ * bindConstant().annotatedWith(ServerHost.class).to(args[0]);</pre>
+ *
+ * Sets up a constant binding. Constant injections must always be annotated.
+ * When a constant binding's value is a string, it is eligile for conversion to
+ * all primitive types, to {@link Enum#valueOf(Class, String) all enums}, and to
+ * {@link Class#forName class literals}. Conversions for other types can be
+ * configured using {@link #convertToTypes(Matcher, TypeConverter)
+ * convertToTypes()}.
+ *
+ * <pre>
+ * {@literal @}Color("red") Color red; // A member variable (field)
+ * . . .
+ * red = MyModule.class.getDeclaredField("red").getAnnotation(Color.class);
+ * bind(Service.class).annotatedWith(red).to(RedService.class);</pre>
+ *
+ * If your binding annotation has parameters you can apply different bindings to
+ * different specific values of your annotation. Getting your hands on the
+ * right instance of the annotation is a bit of a pain -- one approach, shown
+ * above, is to apply a prototype annotation to a field in your module class, so
+ * that you can read this annotation instance and give it to Guice.
+ *
+ * <pre>
+ * bind(Service.class)
+ * .annotatedWith(Names.named("blue"))
+ * .to(BlueService.class);</pre>
+ *
+ * Differentiating by names is a common enough use case that we provided a
+ * standard annotation, {@link org.elasticsearch.common.inject.name.Named @Named}. Because of
+ * Guice's library support, binding by name is quite easier than in the
+ * arbitrary binding annotation case we just saw. However, remember that these
+ * names will live in a single flat namespace with all the other names used in
+ * your application.
+ *
+ * <p>The above list of examples is far from exhaustive. If you can think of
+ * how the concepts of one example might coexist with the concepts from another,
+ * you can most likely weave the two together. If the two concepts make no
+ * sense with each other, you most likely won't be able to do it. In a few
+ * cases Guice will let something bogus slip by, and will then inform you of
+ * the problems at runtime, as soon as you try to create your Injector.
+ *
+ * <p>The other methods of Binder such as {@link #bindScope},
+ * {@link #bindInterceptor}, {@link #install}, {@link #requestStaticInjection},
+ * {@link #addError} and {@link #currentStage} are not part of the Binding EDSL;
+ * you can learn how to use these in the usual way, from the method
+ * documentation.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @author kevinb@google.com (Kevin Bourrillion)
+ */
+public interface Binder {
+
+ /**
+ * Binds a scope to an annotation.
+ */
+ void bindScope(Class<? extends Annotation> annotationType, Scope scope);
+
+ /**
+ * See the EDSL examples at {@link Binder}.
+ */
+ <T> LinkedBindingBuilder<T> bind(Key<T> key);
+
+ /**
+ * See the EDSL examples at {@link Binder}.
+ */
+ <T> AnnotatedBindingBuilder<T> bind(TypeLiteral<T> typeLiteral);
+
+ /**
+ * See the EDSL examples at {@link Binder}.
+ */
+ <T> AnnotatedBindingBuilder<T> bind(Class<T> type);
+
+ /**
+ * See the EDSL examples at {@link Binder}.
+ */
+ AnnotatedConstantBindingBuilder bindConstant();
+
+ /**
+ * Upon successful creation, the {@link Injector} will inject instance fields
+ * and methods of the given object.
+ *
+ * @param type of instance
+ * @param instance for which members will be injected
+ * @since 2.0
+ */
+ <T> void requestInjection(TypeLiteral<T> type, T instance);
+
+ /**
+ * Upon successful creation, the {@link Injector} will inject instance fields
+ * and methods of the given object.
+ *
+ * @param instance for which members will be injected
+ * @since 2.0
+ */
+ void requestInjection(Object instance);
+
+ /**
+ * Upon successful creation, the {@link Injector} will inject static fields
+ * and methods in the given classes.
+ *
+ * @param types for which static members will be injected
+ */
+ void requestStaticInjection(Class<?>... types);
+
+ /**
+ * Uses the given module to configure more bindings.
+ */
+ void install(Module module);
+
+ /**
+ * Gets the current stage.
+ */
+ Stage currentStage();
+
+ /**
+ * Records an error message which will be presented to the user at a later
+ * time. Unlike throwing an exception, this enable us to continue
+ * configuring the Injector and discover more errors. Uses {@link
+ * String#format(String, Object[])} to insert the arguments into the
+ * message.
+ */
+ void addError(String message, Object... arguments);
+
+ /**
+ * Records an exception, the full details of which will be logged, and the
+ * message of which will be presented to the user at a later
+ * time. If your Module calls something that you worry may fail, you should
+ * catch the exception and pass it into this.
+ */
+ void addError(Throwable t);
+
+ /**
+ * Records an error message to be presented to the user at a later time.
+ *
+ * @since 2.0
+ */
+ void addError(Message message);
+
+ /**
+ * Returns the provider used to obtain instances for the given injection key.
+ * The returned will not be valid until the {@link Injector} has been
+ * created. The provider will throw an {@code IllegalStateException} if you
+ * try to use it beforehand.
+ *
+ * @since 2.0
+ */
+ <T> Provider<T> getProvider(Key<T> key);
+
+ /**
+ * Returns the provider used to obtain instances for the given injection type.
+ * The returned provider will not be valid until the {@link Injector} has been
+ * created. The provider will throw an {@code IllegalStateException} if you
+ * try to use it beforehand.
+ *
+ * @since 2.0
+ */
+ <T> Provider<T> getProvider(Class<T> type);
+
+ /**
+ * Returns the members injector used to inject dependencies into methods and fields on instances
+ * of the given type {@code T}. The returned members injector will not be valid until the main
+ * {@link Injector} has been created. The members injector will throw an {@code
+ * IllegalStateException} if you try to use it beforehand.
+ *
+ * @param typeLiteral type to get members injector for
+ * @since 2.0
+ */
+ <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral);
+
+ /**
+ * Returns the members injector used to inject dependencies into methods and fields on instances
+ * of the given type {@code T}. The returned members injector will not be valid until the main
+ * {@link Injector} has been created. The members injector will throw an {@code
+ * IllegalStateException} if you try to use it beforehand.
+ *
+ * @param type type to get members injector for
+ * @since 2.0
+ */
+ <T> MembersInjector<T> getMembersInjector(Class<T> type);
+
+ /**
+ * Binds a type converter. The injector will use the given converter to
+ * convert string constants to matching types as needed.
+ *
+ * @param typeMatcher matches types the converter can handle
+ * @param converter converts values
+ * @since 2.0
+ */
+ void convertToTypes(Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeConverter converter);
+
+ /**
+ * Registers a listener for injectable types. Guice will notify the listener when it encounters
+ * injectable types matched by the given type matcher.
+ *
+ * @param typeMatcher that matches injectable types the listener should be notified of
+ * @param listener for injectable types matched by typeMatcher
+ * @since 2.0
+ */
+ void bindListener(Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeListener listener);
+
+ /**
+ * Returns a binder that uses {@code source} as the reference location for
+ * configuration errors. This is typically a {@link StackTraceElement}
+ * for {@code .java} source but it could any binding source, such as the
+ * path to a {@code .properties} file.
+ *
+ * @param source any object representing the source location and has a
+ * concise {@link Object#toString() toString()} value
+ * @return a binder that shares its configuration with this binder
+ * @since 2.0
+ */
+ Binder withSource(Object source);
+
+ /**
+ * Returns a binder that skips {@code classesToSkip} when identify the
+ * calling code. The caller's {@link StackTraceElement} is used to locate
+ * the source of configuration errors.
+ *
+ * @param classesToSkip library classes that create bindings on behalf of
+ * their clients.
+ * @return a binder that shares its configuration with this binder.
+ * @since 2.0
+ */
+ Binder skipSources(Class... classesToSkip);
+
+ /**
+ * Creates a new private child environment for bindings and other configuration. The returned
+ * binder can be used to add and configuration information in this environment. See {@link
+ * PrivateModule} for details.
+ *
+ * @return a binder that inherits configuration from this binder. Only exposed configuration on
+ * the returned binder will be visible to this binder.
+ * @since 2.0
+ */
+ PrivateBinder newPrivateBinder();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Binding.java b/src/main/java/org/elasticsearch/common/inject/Binding.java
new file mode 100644
index 0000000..b854d1d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Binding.java
@@ -0,0 +1,91 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.spi.BindingScopingVisitor;
+import org.elasticsearch.common.inject.spi.BindingTargetVisitor;
+import org.elasticsearch.common.inject.spi.Element;
+
+/**
+ * A mapping from a key (type and optional annotation) to the strategy for getting instances of the
+ * type. This interface is part of the introspection API and is intended primarily for use by
+ * tools.
+ * <p/>
+ * <p>Bindings are created in several ways:
+ * <ul>
+ * <li>Explicitly in a module, via {@code bind()} and {@code bindConstant()}
+ * statements:
+ * <pre>
+ * bind(Service.class).annotatedWith(Red.class).to(ServiceImpl.class);
+ * bindConstant().annotatedWith(ServerHost.class).to(args[0]);</pre></li>
+ * <li>Implicitly by the Injector by following a type's {@link ImplementedBy
+ * pointer} {@link ProvidedBy annotations} or by using its {@link Inject annotated} or
+ * default constructor.</li>
+ * <li>By converting a bound instance to a different type.</li>
+ * <li>For {@link Provider providers}, by delegating to the binding for the provided type.</li>
+ * </ul>
+ *
+ *
+ * <p>They exist on both modules and on injectors, and their behaviour is different for each:
+ * <ul>
+ * <li><strong>Module bindings</strong> are incomplete and cannot be used to provide instances.
+ * This is because the applicable scopes and interceptors may not be known until an injector
+ * is created. From a tool's perspective, module bindings are like the injector's source
+ * code. They can be inspected or rewritten, but this analysis must be done statically.</li>
+ * <li><strong>Injector bindings</strong> are complete and valid and can be used to provide
+ * instances. From a tools' perspective, injector bindings are like reflection for an
+ * injector. They have full runtime information, including the complete graph of injections
+ * necessary to satisfy a binding.</li>
+ * </ul>
+ *
+ * @param <T> the bound type. The injected is always assignable to this type.
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public interface Binding<T> extends Element {
+
+ /**
+ * Returns the key for this binding.
+ */
+ Key<T> getKey();
+
+ /**
+ * Returns the scoped provider guice uses to fulfill requests for this
+ * binding.
+ *
+ * @throws UnsupportedOperationException when invoked on a {@link Binding}
+ * created via {@link org.elasticsearch.common.inject.spi.Elements#getElements}. This
+ * method is only supported on {@link Binding}s returned from an injector.
+ */
+ Provider<T> getProvider();
+
+ /**
+ * Accepts a target visitor. Invokes the visitor method specific to this binding's target.
+ *
+ * @param visitor to call back on
+ * @since 2.0
+ */
+ <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor);
+
+ /**
+ * Accepts a scoping visitor. Invokes the visitor method specific to this binding's scoping.
+ *
+ * @param visitor to call back on
+ * @since 2.0
+ */
+ <V> V acceptScopingVisitor(BindingScopingVisitor<V> visitor);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/BindingAnnotation.java b/src/main/java/org/elasticsearch/common/inject/BindingAnnotation.java
new file mode 100644
index 0000000..816c306
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/BindingAnnotation.java
@@ -0,0 +1,42 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.ANNOTATION_TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * Annotates annotations which are used for binding. Only one such annotation
+ * may apply to a single injection point. You must also annotate binder
+ * annotations with {@code @Retention(RUNTIME)}. For example:
+ * <p/>
+ * <pre>
+ * {@code @}Retention(RUNTIME)
+ * {@code @}Target({ FIELD, PARAMETER, METHOD })
+ * {@code @}BindingAnnotation
+ * public {@code @}interface Transactional {}
+ * </pre>
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+@Target(ANNOTATION_TYPE)
+@Retention(RUNTIME)
+public @interface BindingAnnotation {
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java b/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java
new file mode 100644
index 0000000..db3cb6b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java
@@ -0,0 +1,268 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.*;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Handles {@link Binder#bind} and {@link Binder#bindConstant} elements.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class BindingProcessor extends AbstractProcessor {
+
+ private final List<CreationListener> creationListeners = Lists.newArrayList();
+ private final Initializer initializer;
+ private final List<Runnable> uninitializedBindings = Lists.newArrayList();
+
+ BindingProcessor(Errors errors, Initializer initializer) {
+ super(errors);
+ this.initializer = initializer;
+ }
+
+ @Override
+ public <T> Boolean visit(Binding<T> command) {
+ final Object source = command.getSource();
+
+ if (Void.class.equals(command.getKey().getRawType())) {
+ if (command instanceof ProviderInstanceBinding
+ && ((ProviderInstanceBinding) command).getProviderInstance() instanceof ProviderMethod) {
+ errors.voidProviderMethod();
+ } else {
+ errors.missingConstantValues();
+ }
+ return true;
+ }
+
+ final Key<T> key = command.getKey();
+ Class<? super T> rawType = key.getTypeLiteral().getRawType();
+
+ if (rawType == Provider.class) {
+ errors.bindingToProvider();
+ return true;
+ }
+
+ validateKey(command.getSource(), command.getKey());
+
+ final Scoping scoping = Scopes.makeInjectable(
+ ((BindingImpl<?>) command).getScoping(), injector, errors);
+
+ command.acceptTargetVisitor(new BindingTargetVisitor<T, Void>() {
+
+ public Void visit(InstanceBinding<? extends T> binding) {
+ Set<InjectionPoint> injectionPoints = binding.getInjectionPoints();
+ T instance = binding.getInstance();
+ Initializable<T> ref = initializer.requestInjection(
+ injector, instance, source, injectionPoints);
+ ConstantFactory<? extends T> factory = new ConstantFactory<T>(ref);
+ InternalFactory<? extends T> scopedFactory = Scopes.scope(key, injector, factory, scoping);
+ putBinding(new InstanceBindingImpl<T>(injector, key, source, scopedFactory, injectionPoints,
+ instance));
+ return null;
+ }
+
+ public Void visit(ProviderInstanceBinding<? extends T> binding) {
+ Provider<? extends T> provider = binding.getProviderInstance();
+ Set<InjectionPoint> injectionPoints = binding.getInjectionPoints();
+ Initializable<Provider<? extends T>> initializable = initializer
+ .<Provider<? extends T>>requestInjection(injector, provider, source, injectionPoints);
+ InternalFactory<T> factory = new InternalFactoryToProviderAdapter<T>(initializable, source);
+ InternalFactory<? extends T> scopedFactory = Scopes.scope(key, injector, factory, scoping);
+ putBinding(new ProviderInstanceBindingImpl<T>(injector, key, source, scopedFactory, scoping,
+ provider, injectionPoints));
+ return null;
+ }
+
+ public Void visit(ProviderKeyBinding<? extends T> binding) {
+ Key<? extends Provider<? extends T>> providerKey = binding.getProviderKey();
+ BoundProviderFactory<T> boundProviderFactory
+ = new BoundProviderFactory<T>(injector, providerKey, source);
+ creationListeners.add(boundProviderFactory);
+ InternalFactory<? extends T> scopedFactory = Scopes.scope(
+ key, injector, (InternalFactory<? extends T>) boundProviderFactory, scoping);
+ putBinding(new LinkedProviderBindingImpl<T>(
+ injector, key, source, scopedFactory, scoping, providerKey));
+ return null;
+ }
+
+ public Void visit(LinkedKeyBinding<? extends T> binding) {
+ Key<? extends T> linkedKey = binding.getLinkedKey();
+ if (key.equals(linkedKey)) {
+ errors.recursiveBinding();
+ }
+
+ FactoryProxy<T> factory = new FactoryProxy<T>(injector, key, linkedKey, source);
+ creationListeners.add(factory);
+ InternalFactory<? extends T> scopedFactory = Scopes.scope(key, injector, factory, scoping);
+ putBinding(
+ new LinkedBindingImpl<T>(injector, key, source, scopedFactory, scoping, linkedKey));
+ return null;
+ }
+
+ public Void visit(UntargettedBinding<? extends T> untargetted) {
+ // Error: Missing implementation.
+ // Example: bind(Date.class).annotatedWith(Red.class);
+ // We can't assume abstract types aren't injectable. They may have an
+ // @ImplementedBy annotation or something.
+ if (key.hasAnnotationType()) {
+ errors.missingImplementation(key);
+ putBinding(invalidBinding(injector, key, source));
+ return null;
+ }
+
+ // This cast is safe after the preceeding check.
+ final BindingImpl<T> binding;
+ try {
+ binding = injector.createUnitializedBinding(key, scoping, source, errors);
+ putBinding(binding);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ putBinding(invalidBinding(injector, key, source));
+ return null;
+ }
+
+ uninitializedBindings.add(new Runnable() {
+ public void run() {
+ try {
+ ((InjectorImpl) binding.getInjector()).initializeBinding(
+ binding, errors.withSource(source));
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+ }
+ });
+
+ return null;
+ }
+
+ public Void visit(ExposedBinding<? extends T> binding) {
+ throw new IllegalArgumentException("Cannot apply a non-module element");
+ }
+
+ public Void visit(ConvertedConstantBinding<? extends T> binding) {
+ throw new IllegalArgumentException("Cannot apply a non-module element");
+ }
+
+ public Void visit(ConstructorBinding<? extends T> binding) {
+ throw new IllegalArgumentException("Cannot apply a non-module element");
+ }
+
+ public Void visit(ProviderBinding<? extends T> binding) {
+ throw new IllegalArgumentException("Cannot apply a non-module element");
+ }
+ });
+
+ return true;
+ }
+
+ @Override
+ public Boolean visit(PrivateElements privateElements) {
+ for (Key<?> key : privateElements.getExposedKeys()) {
+ bindExposed(privateElements, key);
+ }
+ return false; // leave the private elements for the PrivateElementsProcessor to handle
+ }
+
+ private <T> void bindExposed(PrivateElements privateElements, Key<T> key) {
+ ExposedKeyFactory<T> exposedKeyFactory = new ExposedKeyFactory<T>(key, privateElements);
+ creationListeners.add(exposedKeyFactory);
+ putBinding(new ExposedBindingImpl<T>(
+ injector, privateElements.getExposedSource(key), key, exposedKeyFactory, privateElements));
+ }
+
+ private <T> void validateKey(Object source, Key<T> key) {
+ Annotations.checkForMisplacedScopeAnnotations(key.getRawType(), source, errors);
+ }
+
+ <T> UntargettedBindingImpl<T> invalidBinding(InjectorImpl injector, Key<T> key, Object source) {
+ return new UntargettedBindingImpl<T>(injector, key, source);
+ }
+
+ public void initializeBindings() {
+ for (Runnable initializer : uninitializedBindings) {
+ initializer.run();
+ }
+ }
+
+ public void runCreationListeners() {
+ for (CreationListener creationListener : creationListeners) {
+ creationListener.notify(errors);
+ }
+ }
+
+ private void putBinding(BindingImpl<?> binding) {
+ Key<?> key = binding.getKey();
+
+ Class<?> rawType = key.getRawType();
+ if (FORBIDDEN_TYPES.contains(rawType)) {
+ errors.cannotBindToGuiceType(rawType.getSimpleName());
+ return;
+ }
+
+ Binding<?> original = injector.state.getExplicitBinding(key);
+ if (original != null && !isOkayDuplicate(original, binding)) {
+ errors.bindingAlreadySet(key, original.getSource());
+ return;
+ }
+
+ // prevent the parent from creating a JIT binding for this key
+ injector.state.parent().blacklist(key);
+ injector.state.putBinding(key, binding);
+ }
+
+ /**
+ * We tolerate duplicate bindings only if one exposes the other.
+ *
+ * @param original the binding in the parent injector (candidate for an exposing binding)
+ * @param binding the binding to check (candidate for the exposed binding)
+ */
+ private boolean isOkayDuplicate(Binding<?> original, BindingImpl<?> binding) {
+ if (original instanceof ExposedBindingImpl) {
+ ExposedBindingImpl exposed = (ExposedBindingImpl) original;
+ InjectorImpl exposedFrom = (InjectorImpl) exposed.getPrivateElements().getInjector();
+ return (exposedFrom == binding.getInjector());
+ }
+ return false;
+ }
+
+ // It's unfortunate that we have to maintain a blacklist of specific
+ // classes, but we can't easily block the whole package because of
+ // all our unit tests.
+ private static final Set<Class<?>> FORBIDDEN_TYPES = ImmutableSet.of(
+ AbstractModule.class,
+ Binder.class,
+ Binding.class,
+ Injector.class,
+ Key.class,
+ MembersInjector.class,
+ Module.class,
+ Provider.class,
+ Scope.class,
+ TypeLiteral.class);
+ // TODO(jessewilson): fix BuiltInModule, then add Stage
+
+ interface CreationListener {
+ void notify(Errors errors);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/BoundProviderFactory.java b/src/main/java/org/elasticsearch/common/inject/BoundProviderFactory.java
new file mode 100644
index 0000000..39f2932
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/BoundProviderFactory.java
@@ -0,0 +1,68 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.BindingProcessor.CreationListener;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.InternalContext;
+import org.elasticsearch.common.inject.internal.InternalFactory;
+import org.elasticsearch.common.inject.spi.Dependency;
+
+/**
+ * Delegates to a custom factory which is also bound in the injector.
+ */
+class BoundProviderFactory<T> implements InternalFactory<T>, CreationListener {
+
+ private final InjectorImpl injector;
+ final Key<? extends Provider<? extends T>> providerKey;
+ final Object source;
+ private InternalFactory<? extends Provider<? extends T>> providerFactory;
+
+ BoundProviderFactory(
+ InjectorImpl injector,
+ Key<? extends Provider<? extends T>> providerKey,
+ Object source) {
+ this.injector = injector;
+ this.providerKey = providerKey;
+ this.source = source;
+ }
+
+ public void notify(Errors errors) {
+ try {
+ providerFactory = injector.getInternalFactory(providerKey, errors.withSource(source));
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+ }
+
+ public T get(Errors errors, InternalContext context, Dependency<?> dependency)
+ throws ErrorsException {
+ errors = errors.withSource(providerKey);
+ Provider<? extends T> provider = providerFactory.get(errors, context, dependency);
+ try {
+ return errors.checkForNull(provider.get(), source, dependency);
+ } catch (RuntimeException userException) {
+ throw errors.errorInProvider(userException).toException();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return providerKey.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ConfigurationException.java b/src/main/java/org/elasticsearch/common/inject/ConfigurationException.java
new file mode 100644
index 0000000..db7f250
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ConfigurationException.java
@@ -0,0 +1,83 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.Message;
+
+import java.util.Collection;
+
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * Thrown when a programming error such as a misplaced annotation, illegal binding, or unsupported
+ * scope is found. Clients should catch this exception, log it, and stop execution.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class ConfigurationException extends RuntimeException {
+
+ private final ImmutableSet<Message> messages;
+ private Object partialValue = null;
+
+ /**
+ * Creates a ConfigurationException containing {@code messages}.
+ */
+ public ConfigurationException(Iterable<Message> messages) {
+ this.messages = ImmutableSet.copyOf(messages);
+ initCause(Errors.getOnlyCause(this.messages));
+ }
+
+ /**
+ * Returns a copy of this configuration exception with the specified partial value.
+ */
+ public ConfigurationException withPartialValue(Object partialValue) {
+ checkState(this.partialValue == null,
+ "Can't clobber existing partial value %s with %s", this.partialValue, partialValue);
+ ConfigurationException result = new ConfigurationException(messages);
+ result.partialValue = partialValue;
+ return result;
+ }
+
+ /**
+ * Returns messages for the errors that caused this exception.
+ */
+ public Collection<Message> getErrorMessages() {
+ return messages;
+ }
+
+ /**
+ * Returns a value that was only partially computed due to this exception. The caller can use
+ * this while collecting additional configuration problems.
+ *
+ * @return the partial value, or {@code null} if none was set. The type of the partial value is
+ * specified by the throwing method.
+ */
+ @SuppressWarnings("unchecked") // this is *extremely* unsafe. We trust the caller here.
+ public <E> E getPartialValue() {
+ return (E) partialValue;
+ }
+
+ @Override
+ public String getMessage() {
+ return Errors.format("Guice configuration errors", messages);
+ }
+
+ private static final long serialVersionUID = 0;
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java b/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java
new file mode 100644
index 0000000..db34eec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java
@@ -0,0 +1,43 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.Dependency;
+
+/**
+ * @author crazybob@google.com (Bob Lee)
+ */
+class ConstantFactory<T> implements InternalFactory<T> {
+
+ private final Initializable<T> initializable;
+
+ public ConstantFactory(Initializable<T> initializable) {
+ this.initializable = initializable;
+ }
+
+ public T get(Errors errors, InternalContext context, Dependency dependency)
+ throws ErrorsException {
+ return initializable.get(errors);
+ }
+
+ public String toString() {
+ return new ToStringBuilder(ConstantFactory.class)
+ .add("value", initializable)
+ .toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ConstructionProxy.java b/src/main/java/org/elasticsearch/common/inject/ConstructionProxy.java
new file mode 100644
index 0000000..d88e84d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ConstructionProxy.java
@@ -0,0 +1,47 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+
+/**
+ * Proxies calls to a {@link java.lang.reflect.Constructor} for a class
+ * {@code T}.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+interface ConstructionProxy<T> {
+
+ /**
+ * Constructs an instance of {@code T} for the given arguments.
+ */
+ T newInstance(Object... arguments) throws InvocationTargetException;
+
+ /**
+ * Returns the injection point for this constructor.
+ */
+ InjectionPoint getInjectionPoint();
+
+ /**
+ * Returns the injected constructor. If the injected constructor is synthetic (such as generated
+ * code for method interception), the natural constructor is returned.
+ */
+ Constructor<T> getConstructor();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java b/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java
new file mode 100644
index 0000000..081e43d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java
@@ -0,0 +1,30 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ * Creates {@link ConstructionProxy} instances.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+interface ConstructionProxyFactory<T> {
+
+ /**
+ * Gets a construction proxy for the given constructor.
+ */
+ ConstructionProxy<T> create();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java b/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java
new file mode 100644
index 0000000..44723a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.BindingTargetVisitor;
+import org.elasticsearch.common.inject.spi.ConstructorBinding;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+import java.util.Set;
+
+import static com.google.common.base.Preconditions.checkState;
+
+class ConstructorBindingImpl<T> extends BindingImpl<T> implements ConstructorBinding<T> {
+
+ private final Factory<T> factory;
+
+ private ConstructorBindingImpl(Injector injector, Key<T> key, Object source,
+ InternalFactory<? extends T> scopedFactory, Scoping scoping, Factory<T> factory) {
+ super(injector, key, source, scopedFactory, scoping);
+ this.factory = factory;
+ }
+
+ static <T> ConstructorBindingImpl<T> create(
+ InjectorImpl injector, Key<T> key, Object source, Scoping scoping) {
+ Factory<T> factoryFactory = new Factory<T>();
+ InternalFactory<? extends T> scopedFactory
+ = Scopes.scope(key, injector, factoryFactory, scoping);
+ return new ConstructorBindingImpl<T>(
+ injector, key, source, scopedFactory, scoping, factoryFactory);
+ }
+
+ public void initialize(InjectorImpl injector, Errors errors) throws ErrorsException {
+ factory.constructorInjector = injector.constructors.get(getKey().getTypeLiteral(), errors);
+ }
+
+ public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
+ checkState(factory.constructorInjector != null, "not initialized");
+ return visitor.visit(this);
+ }
+
+ public InjectionPoint getConstructor() {
+ checkState(factory.constructorInjector != null, "Binding is not ready");
+ return factory.constructorInjector.getConstructionProxy().getInjectionPoint();
+ }
+
+ public Set<InjectionPoint> getInjectableMembers() {
+ checkState(factory.constructorInjector != null, "Binding is not ready");
+ return factory.constructorInjector.getInjectableMembers();
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ return Dependency.forInjectionPoints(new ImmutableSet.Builder<InjectionPoint>()
+ .add(getConstructor())
+ .addAll(getInjectableMembers())
+ .build());
+ }
+
+ public void applyTo(Binder binder) {
+ throw new UnsupportedOperationException("This element represents a synthetic binding.");
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(ConstructorBinding.class)
+ .add("key", getKey())
+ .add("source", getSource())
+ .add("scope", getScoping())
+ .toString();
+ }
+
+ private static class Factory<T> implements InternalFactory<T> {
+ private ConstructorInjector<T> constructorInjector;
+
+ @SuppressWarnings("unchecked")
+ public T get(Errors errors, InternalContext context, Dependency<?> dependency)
+ throws ErrorsException {
+ checkState(constructorInjector != null, "Constructor not ready");
+
+ // This may not actually be safe because it could return a super type of T (if that's all the
+ // client needs), but it should be OK in practice thanks to the wonders of erasure.
+ return (T) constructorInjector.construct(errors, context, dependency.getKey().getRawType());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java b/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java
new file mode 100644
index 0000000..f732a0d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java
@@ -0,0 +1,109 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.internal.ConstructionContext;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.InternalContext;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+import java.lang.reflect.InvocationTargetException;
+
+/**
+ * Creates instances using an injectable constructor. After construction, all injectable fields and
+ * methods are injected.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+class ConstructorInjector<T> {
+
+ private final ImmutableSet<InjectionPoint> injectableMembers;
+ private final SingleParameterInjector<?>[] parameterInjectors;
+ private final ConstructionProxy<T> constructionProxy;
+ private final MembersInjectorImpl<T> membersInjector;
+
+ ConstructorInjector(ImmutableSet<InjectionPoint> injectableMembers,
+ ConstructionProxy<T> constructionProxy,
+ SingleParameterInjector<?>[] parameterInjectors,
+ MembersInjectorImpl<T> membersInjector)
+ throws ErrorsException {
+ this.injectableMembers = injectableMembers;
+ this.constructionProxy = constructionProxy;
+ this.parameterInjectors = parameterInjectors;
+ this.membersInjector = membersInjector;
+ }
+
+ public ImmutableSet<InjectionPoint> getInjectableMembers() {
+ return injectableMembers;
+ }
+
+ ConstructionProxy<T> getConstructionProxy() {
+ return constructionProxy;
+ }
+
+ /**
+ * Construct an instance. Returns {@code Object} instead of {@code T} because
+ * it may return a proxy.
+ */
+ Object construct(Errors errors, InternalContext context, Class<?> expectedType)
+ throws ErrorsException {
+ ConstructionContext<T> constructionContext = context.getConstructionContext(this);
+
+ // We have a circular reference between constructors. Return a proxy.
+ if (constructionContext.isConstructing()) {
+ // TODO (crazybob): if we can't proxy this object, can we proxy the other object?
+ return constructionContext.createProxy(errors, expectedType);
+ }
+
+ // If we're re-entering this factory while injecting fields or methods,
+ // return the same instance. This prevents infinite loops.
+ T t = constructionContext.getCurrentReference();
+ if (t != null) {
+ return t;
+ }
+
+ try {
+ // First time through...
+ constructionContext.startConstruction();
+ try {
+ Object[] parameters = SingleParameterInjector.getAll(errors, context, parameterInjectors);
+ t = constructionProxy.newInstance(parameters);
+ constructionContext.setProxyDelegates(t);
+ } finally {
+ constructionContext.finishConstruction();
+ }
+
+ // Store reference. If an injector re-enters this factory, they'll get the same reference.
+ constructionContext.setCurrentReference(t);
+
+ membersInjector.injectMembers(t, errors, context);
+ membersInjector.notifyListeners(t, errors);
+
+ return t;
+ } catch (InvocationTargetException userException) {
+ Throwable cause = userException.getCause() != null
+ ? userException.getCause()
+ : userException;
+ throw errors.withSource(constructionProxy.getInjectionPoint())
+ .errorInjectingConstructor(cause).toException();
+ } finally {
+ constructionContext.removeCurrentReference();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java b/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java
new file mode 100644
index 0000000..cd86144
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java
@@ -0,0 +1,76 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.FailableCache;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+/**
+ * Constructor injectors by type.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class ConstructorInjectorStore {
+ private final InjectorImpl injector;
+
+ private final FailableCache<TypeLiteral<?>, ConstructorInjector<?>> cache
+ = new FailableCache<TypeLiteral<?>, ConstructorInjector<?>>() {
+ @SuppressWarnings("unchecked")
+ protected ConstructorInjector<?> create(TypeLiteral<?> type, Errors errors)
+ throws ErrorsException {
+ return createConstructor(type, errors);
+ }
+ };
+
+ ConstructorInjectorStore(InjectorImpl injector) {
+ this.injector = injector;
+ }
+
+ /**
+ * Returns a new complete constructor injector with injection listeners registered.
+ */
+ @SuppressWarnings("unchecked") // the ConstructorInjector type always agrees with the passed type
+ public <T> ConstructorInjector<T> get(TypeLiteral<T> key, Errors errors) throws ErrorsException {
+ return (ConstructorInjector<T>) cache.get(key, errors);
+ }
+
+ private <T> ConstructorInjector<T> createConstructor(TypeLiteral<T> type, Errors errors)
+ throws ErrorsException {
+ int numErrorsBefore = errors.size();
+
+ InjectionPoint injectionPoint;
+ try {
+ injectionPoint = InjectionPoint.forConstructorOf(type);
+ } catch (ConfigurationException e) {
+ errors.merge(e.getErrorMessages());
+ throw errors.toException();
+ }
+
+ SingleParameterInjector<?>[] constructorParameterInjectors
+ = injector.getParametersInjectors(injectionPoint.getDependencies(), errors);
+ MembersInjectorImpl<T> membersInjector = injector.membersInjectorStore.get(type, errors);
+
+ ConstructionProxyFactory<T> factory = new DefaultConstructionProxyFactory<T>(injectionPoint);
+
+ errors.throwIfNewErrors(numErrorsBefore);
+
+ return new ConstructorInjector<T>(membersInjector.getInjectionPoints(), factory.create(),
+ constructorParameterInjectors, membersInjector);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ContextualCallable.java b/src/main/java/org/elasticsearch/common/inject/ContextualCallable.java
new file mode 100644
index 0000000..19ddf4f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ContextualCallable.java
@@ -0,0 +1,27 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.InternalContext;
+
+/**
+ * @author crazybob@google.com (Bob Lee)
+ */
+interface ContextualCallable<T> {
+ T call(InternalContext context) throws ErrorsException;
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/CreationException.java b/src/main/java/org/elasticsearch/common/inject/CreationException.java
new file mode 100644
index 0000000..4553bc2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/CreationException.java
@@ -0,0 +1,59 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.Message;
+
+import java.util.Collection;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * Thrown when errors occur while creating a {@link Injector}. Includes a list of encountered
+ * errors. Clients should catch this exception, log it, and stop execution.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class CreationException extends RuntimeException {
+
+ private final ImmutableSet<Message> messages;
+
+ /**
+ * Creates a CreationException containing {@code messages}.
+ */
+ public CreationException(Collection<Message> messages) {
+ this.messages = ImmutableSet.copyOf(messages);
+ checkArgument(!this.messages.isEmpty());
+ initCause(Errors.getOnlyCause(this.messages));
+ }
+
+ /**
+ * Returns messages for the errors that caused this exception.
+ */
+ public Collection<Message> getErrorMessages() {
+ return messages;
+ }
+
+ @Override
+ public String getMessage() {
+ return Errors.format("Guice creation errors", messages);
+ }
+
+ private static final long serialVersionUID = 0;
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java b/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java
new file mode 100644
index 0000000..f1c860a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java
@@ -0,0 +1,71 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Modifier;
+
+/**
+ * Produces construction proxies that invoke the class constructor.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+class DefaultConstructionProxyFactory<T> implements ConstructionProxyFactory<T> {
+
+ private final InjectionPoint injectionPoint;
+
+ /**
+ * @param injectionPoint an injection point whose member is a constructor of {@code T}.
+ */
+ DefaultConstructionProxyFactory(InjectionPoint injectionPoint) {
+ this.injectionPoint = injectionPoint;
+ }
+
+ public ConstructionProxy<T> create() {
+ @SuppressWarnings("unchecked") // the injection point is for a constructor of T
+ final Constructor<T> constructor = (Constructor<T>) injectionPoint.getMember();
+
+ // Use FastConstructor if the constructor is public.
+ if (Modifier.isPublic(constructor.getModifiers())) {
+ } else {
+ constructor.setAccessible(true);
+ }
+
+ return new ConstructionProxy<T>() {
+ public T newInstance(Object... arguments) throws InvocationTargetException {
+ try {
+ return constructor.newInstance(arguments);
+ } catch (InstantiationException e) {
+ throw new AssertionError(e); // shouldn't happen, we know this is a concrete type
+ } catch (IllegalAccessException e) {
+ throw new AssertionError(e); // a security manager is blocking us, we're hosed
+ }
+ }
+
+ public InjectionPoint getInjectionPoint() {
+ return injectionPoint;
+ }
+
+ public Constructor<T> getConstructor() {
+ return constructor;
+ }
+ };
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/DeferredLookups.java b/src/main/java/org/elasticsearch/common/inject/DeferredLookups.java
new file mode 100644
index 0000000..5f0dcd4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/DeferredLookups.java
@@ -0,0 +1,60 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.Element;
+import org.elasticsearch.common.inject.spi.MembersInjectorLookup;
+import org.elasticsearch.common.inject.spi.ProviderLookup;
+
+import java.util.List;
+
+/**
+ * Returns providers and members injectors that haven't yet been initialized. As a part of injector
+ * creation it's necessary to {@link #initialize initialize} these lookups.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class DeferredLookups implements Lookups {
+ private final InjectorImpl injector;
+ private final List<Element> lookups = Lists.newArrayList();
+
+ public DeferredLookups(InjectorImpl injector) {
+ this.injector = injector;
+ }
+
+ /**
+ * Initialize the specified lookups, either immediately or when the injector is created.
+ */
+ public void initialize(Errors errors) {
+ injector.lookups = injector;
+ new LookupProcessor(errors).process(injector, lookups);
+ }
+
+ public <T> Provider<T> getProvider(Key<T> key) {
+ ProviderLookup<T> lookup = new ProviderLookup<T>(key, key);
+ lookups.add(lookup);
+ return lookup.getProvider();
+ }
+
+ public <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> type) {
+ MembersInjectorLookup<T> lookup = new MembersInjectorLookup<T>(type, type);
+ lookups.add(lookup);
+ return lookup.getMembersInjector();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java b/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java
new file mode 100644
index 0000000..ccd5f9a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java
@@ -0,0 +1,114 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.InjectionListener;
+import org.elasticsearch.common.inject.spi.Message;
+import org.elasticsearch.common.inject.spi.TypeEncounter;
+
+import java.util.List;
+
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+final class EncounterImpl<T> implements TypeEncounter<T> {
+
+ private final Errors errors;
+ private final Lookups lookups;
+ private List<MembersInjector<? super T>> membersInjectors; // lazy
+ private List<InjectionListener<? super T>> injectionListeners; // lazy
+ private boolean valid = true;
+
+ public EncounterImpl(Errors errors, Lookups lookups) {
+ this.errors = errors;
+ this.lookups = lookups;
+ }
+
+ public void invalidate() {
+ valid = false;
+ }
+
+ public ImmutableList<MembersInjector<? super T>> getMembersInjectors() {
+ return membersInjectors == null
+ ? ImmutableList.<MembersInjector<? super T>>of()
+ : ImmutableList.copyOf(membersInjectors);
+ }
+
+ public ImmutableList<InjectionListener<? super T>> getInjectionListeners() {
+ return injectionListeners == null
+ ? ImmutableList.<InjectionListener<? super T>>of()
+ : ImmutableList.copyOf(injectionListeners);
+ }
+
+ public void register(MembersInjector<? super T> membersInjector) {
+ checkState(valid, "Encounters may not be used after hear() returns.");
+
+ if (membersInjectors == null) {
+ membersInjectors = Lists.newArrayList();
+ }
+
+ membersInjectors.add(membersInjector);
+ }
+
+ public void register(InjectionListener<? super T> injectionListener) {
+ checkState(valid, "Encounters may not be used after hear() returns.");
+
+ if (injectionListeners == null) {
+ injectionListeners = Lists.newArrayList();
+ }
+
+ injectionListeners.add(injectionListener);
+ }
+
+ public void addError(String message, Object... arguments) {
+ checkState(valid, "Encounters may not be used after hear() returns.");
+ errors.addMessage(message, arguments);
+ }
+
+ public void addError(Throwable t) {
+ checkState(valid, "Encounters may not be used after hear() returns.");
+ errors.errorInUserCode(t, "An exception was caught and reported. Message: %s", t.getMessage());
+ }
+
+ public void addError(Message message) {
+ checkState(valid, "Encounters may not be used after hear() returns.");
+ errors.addMessage(message);
+ }
+
+ public <T> Provider<T> getProvider(Key<T> key) {
+ checkState(valid, "Encounters may not be used after hear() returns.");
+ return lookups.getProvider(key);
+ }
+
+ public <T> Provider<T> getProvider(Class<T> type) {
+ return getProvider(Key.get(type));
+ }
+
+ public <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral) {
+ checkState(valid, "Encounters may not be used after hear() returns.");
+ return lookups.getMembersInjector(typeLiteral);
+ }
+
+ public <T> MembersInjector<T> getMembersInjector(Class<T> type) {
+ return getMembersInjector(TypeLiteral.get(type));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/Exposed.java b/src/main/java/org/elasticsearch/common/inject/Exposed.java
new file mode 100644
index 0000000..992da18
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Exposed.java
@@ -0,0 +1,37 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * Acccompanies a {@literal @}{@link org.elasticsearch.common.inject.Provides Provides} method annotation in a
+ * private module to indicate that the provided binding is exposed.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+@Target(ElementType.METHOD)
+@Retention(RUNTIME)
+@Documented
+public @interface Exposed {
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ExposedKeyFactory.java b/src/main/java/org/elasticsearch/common/inject/ExposedKeyFactory.java
new file mode 100644
index 0000000..0cdb8a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ExposedKeyFactory.java
@@ -0,0 +1,56 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.PrivateElements;
+
+/**
+ * This factory exists in a parent injector. When invoked, it retrieves its value from a child
+ * injector.
+ */
+class ExposedKeyFactory<T> implements InternalFactory<T>, BindingProcessor.CreationListener {
+ private final Key<T> key;
+ private final PrivateElements privateElements;
+ private BindingImpl<T> delegate;
+
+ public ExposedKeyFactory(Key<T> key, PrivateElements privateElements) {
+ this.key = key;
+ this.privateElements = privateElements;
+ }
+
+ public void notify(Errors errors) {
+ InjectorImpl privateInjector = (InjectorImpl) privateElements.getInjector();
+ BindingImpl<T> explicitBinding = privateInjector.state.getExplicitBinding(key);
+
+ // validate that the child injector has its own factory. If the getInternalFactory() returns
+ // this, then that child injector doesn't have a factory (and getExplicitBinding has returned
+ // its parent's binding instead
+ if (explicitBinding.getInternalFactory() == this) {
+ errors.withSource(explicitBinding.getSource()).exposedButNotBound(key);
+ return;
+ }
+
+ this.delegate = explicitBinding;
+ }
+
+ public T get(Errors errors, InternalContext context, Dependency<?> dependency)
+ throws ErrorsException {
+ return delegate.getInternalFactory().get(errors, context, dependency);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/FactoryProxy.java b/src/main/java/org/elasticsearch/common/inject/FactoryProxy.java
new file mode 100644
index 0000000..0c40fe5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/FactoryProxy.java
@@ -0,0 +1,62 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.Dependency;
+
+/**
+ * A placeholder which enables us to swap in the real factory once the injector is created.
+ */
+class FactoryProxy<T> implements InternalFactory<T>, BindingProcessor.CreationListener {
+
+ private final InjectorImpl injector;
+ private final Key<T> key;
+ private final Key<? extends T> targetKey;
+ private final Object source;
+
+ private InternalFactory<? extends T> targetFactory;
+
+ FactoryProxy(InjectorImpl injector, Key<T> key, Key<? extends T> targetKey, Object source) {
+ this.injector = injector;
+ this.key = key;
+ this.targetKey = targetKey;
+ this.source = source;
+ }
+
+ public void notify(final Errors errors) {
+ try {
+ targetFactory = injector.getInternalFactory(targetKey, errors.withSource(source));
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+ }
+
+ public T get(Errors errors, InternalContext context, Dependency<?> dependency)
+ throws ErrorsException {
+ return targetFactory.get(errors.withSource(targetKey), context, dependency);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(FactoryProxy.class)
+ .add("key", key)
+ .add("provider", targetFactory)
+ .toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Guice.java b/src/main/java/org/elasticsearch/common/inject/Guice.java
new file mode 100644
index 0000000..7b303e5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Guice.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.util.Arrays;
+
+/**
+ * The entry point to the Guice framework. Creates {@link Injector}s from
+ * {@link Module}s.
+ * <p/>
+ * <p>Guice supports a model of development that draws clear boundaries between
+ * APIs, Implementations of these APIs, Modules which configure these
+ * implementations, and finally Applications which consist of a collection of
+ * Modules. It is the Application, which typically defines your {@code main()}
+ * method, that bootstraps the Guice Injector using the {@code Guice} class, as
+ * in this example:
+ * <pre>
+ * public class FooApplication {
+ * public static void main(String[] args) {
+ * Injector injector = Guice.createInjector(
+ * new ModuleA(),
+ * new ModuleB(),
+ * . . .
+ * new FooApplicationFlagsModule(args)
+ * );
+ *
+ * // Now just bootstrap the application and you're done
+ * FooStarter starter = injector.getInstance(FooStarter.class);
+ * starter.runApplication();
+ * }
+ * }
+ * </pre>
+ */
+public final class Guice {
+
+ private Guice() {
+ }
+
+ /**
+ * Creates an injector for the given set of modules.
+ *
+ * @throws CreationException if one or more errors occur during Injector
+ * construction
+ */
+ public static Injector createInjector(Module... modules) {
+ return createInjector(Arrays.asList(modules));
+ }
+
+ /**
+ * Creates an injector for the given set of modules.
+ *
+ * @throws CreationException if one or more errors occur during Injector
+ * creation
+ */
+ public static Injector createInjector(Iterable<? extends Module> modules) {
+ return createInjector(Stage.DEVELOPMENT, modules);
+ }
+
+ /**
+ * Creates an injector for the given set of modules, in a given development
+ * stage.
+ *
+ * @throws CreationException if one or more errors occur during Injector
+ * creation
+ */
+ public static Injector createInjector(Stage stage, Module... modules) {
+ return createInjector(stage, Arrays.asList(modules));
+ }
+
+ /**
+ * Creates an injector for the given set of modules, in a given development
+ * stage.
+ *
+ * @throws CreationException if one or more errors occur during Injector
+ * construction
+ */
+ public static Injector createInjector(Stage stage,
+ Iterable<? extends Module> modules) {
+ return new InjectorBuilder()
+ .stage(stage)
+ .addModules(modules)
+ .build();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java b/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java
new file mode 100644
index 0000000..52e3509
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * A pointer to the default implementation of a type.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+@Retention(RUNTIME)
+@Target(TYPE)
+public @interface ImplementedBy {
+
+ /**
+ * The implementation type.
+ */
+ Class<?> value();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/InheritingState.java b/src/main/java/org/elasticsearch/common/inject/InheritingState.java
new file mode 100644
index 0000000..71d0d82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/InheritingState.java
@@ -0,0 +1,150 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+import org.elasticsearch.common.inject.spi.TypeListenerBinding;
+
+import java.lang.annotation.Annotation;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class InheritingState implements State {
+
+ private final State parent;
+
+ // Must be a linked hashmap in order to preserve order of bindings in Modules.
+ private final Map<Key<?>, Binding<?>> explicitBindingsMutable = Maps.newLinkedHashMap();
+ private final Map<Key<?>, Binding<?>> explicitBindings
+ = Collections.unmodifiableMap(explicitBindingsMutable);
+ private final Map<Class<? extends Annotation>, Scope> scopes = Maps.newHashMap();
+ private final List<MatcherAndConverter> converters = Lists.newArrayList();
+ private final List<TypeListenerBinding> listenerBindings = Lists.newArrayList();
+ private WeakKeySet blacklistedKeys = new WeakKeySet();
+ private final Object lock;
+
+ InheritingState(State parent) {
+ this.parent = checkNotNull(parent, "parent");
+ this.lock = (parent == State.NONE) ? this : parent.lock();
+ }
+
+ public State parent() {
+ return parent;
+ }
+
+ @SuppressWarnings("unchecked") // we only put in BindingImpls that match their key types
+ public <T> BindingImpl<T> getExplicitBinding(Key<T> key) {
+ Binding<?> binding = explicitBindings.get(key);
+ return binding != null ? (BindingImpl<T>) binding : parent.getExplicitBinding(key);
+ }
+
+ public Map<Key<?>, Binding<?>> getExplicitBindingsThisLevel() {
+ return explicitBindings;
+ }
+
+ public void putBinding(Key<?> key, BindingImpl<?> binding) {
+ explicitBindingsMutable.put(key, binding);
+ }
+
+ public Scope getScope(Class<? extends Annotation> annotationType) {
+ Scope scope = scopes.get(annotationType);
+ return scope != null ? scope : parent.getScope(annotationType);
+ }
+
+ public void putAnnotation(Class<? extends Annotation> annotationType, Scope scope) {
+ scopes.put(annotationType, scope);
+ }
+
+ public Iterable<MatcherAndConverter> getConvertersThisLevel() {
+ return converters;
+ }
+
+ public void addConverter(MatcherAndConverter matcherAndConverter) {
+ converters.add(matcherAndConverter);
+ }
+
+ public MatcherAndConverter getConverter(
+ String stringValue, TypeLiteral<?> type, Errors errors, Object source) {
+ MatcherAndConverter matchingConverter = null;
+ for (State s = this; s != State.NONE; s = s.parent()) {
+ for (MatcherAndConverter converter : s.getConvertersThisLevel()) {
+ if (converter.getTypeMatcher().matches(type)) {
+ if (matchingConverter != null) {
+ errors.ambiguousTypeConversion(stringValue, source, type, matchingConverter, converter);
+ }
+ matchingConverter = converter;
+ }
+ }
+ }
+ return matchingConverter;
+ }
+
+ public void addTypeListener(TypeListenerBinding listenerBinding) {
+ listenerBindings.add(listenerBinding);
+ }
+
+ public List<TypeListenerBinding> getTypeListenerBindings() {
+ List<TypeListenerBinding> parentBindings = parent.getTypeListenerBindings();
+ List<TypeListenerBinding> result
+ = new ArrayList<TypeListenerBinding>(parentBindings.size() + 1);
+ result.addAll(parentBindings);
+ result.addAll(listenerBindings);
+ return result;
+ }
+
+ public void blacklist(Key<?> key) {
+ parent.blacklist(key);
+ blacklistedKeys.add(key);
+ }
+
+ public boolean isBlacklisted(Key<?> key) {
+ return blacklistedKeys.contains(key);
+ }
+
+ @Override
+ public void clearBlacklisted() {
+ blacklistedKeys = new WeakKeySet();
+ }
+
+ @Override
+ public void makeAllBindingsToEagerSingletons(Injector injector) {
+ Map<Key<?>, Binding<?>> x = Maps.newLinkedHashMap();
+ for (Map.Entry<Key<?>, Binding<?>> entry : this.explicitBindingsMutable.entrySet()) {
+ Key key = entry.getKey();
+ BindingImpl<?> binding = (BindingImpl<?>) entry.getValue();
+ Object value = binding.getProvider().get();
+ x.put(key, new InstanceBindingImpl<Object>(injector, key, SourceProvider.UNKNOWN_SOURCE, new InternalFactory.Instance(value), ImmutableSet.<InjectionPoint>of(), value));
+ }
+ this.explicitBindingsMutable.clear();
+ this.explicitBindingsMutable.putAll(x);
+ }
+
+ public Object lock() {
+ return lock;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Initializable.java b/src/main/java/org/elasticsearch/common/inject/Initializable.java
new file mode 100644
index 0000000..01a48e7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Initializable.java
@@ -0,0 +1,33 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+
+/**
+ * Holds a reference that requires initialization to be performed before it can be used.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+interface Initializable<T> {
+
+ /**
+ * Ensures the reference is initialized, then returns it.
+ */
+ T get(Errors errors) throws ErrorsException;
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Initializables.java b/src/main/java/org/elasticsearch/common/inject/Initializables.java
new file mode 100644
index 0000000..b2c017e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Initializables.java
@@ -0,0 +1,42 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+
+/**
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class Initializables {
+
+ /**
+ * Returns an initializable for an instance that requires no initialization.
+ */
+ static <T> Initializable<T> of(final T instance) {
+ return new Initializable<T>() {
+ public T get(Errors errors) throws ErrorsException {
+ return instance;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(instance);
+ }
+ };
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Initializer.java b/src/main/java/org/elasticsearch/common/inject/Initializer.java
new file mode 100644
index 0000000..259e09e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Initializer.java
@@ -0,0 +1,164 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Manages and injects instances at injector-creation time. This is made more complicated by
+ * instances that request other instances while they're being injected. We overcome this by using
+ * {@link Initializable}, which attempts to perform injection before use.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class Initializer {
+ /**
+ * the only thread that we'll use to inject members.
+ */
+ private final Thread creatingThread = Thread.currentThread();
+
+ /**
+ * zero means everything is injected.
+ */
+ private final CountDownLatch ready = new CountDownLatch(1);
+
+ /**
+ * Maps instances that need injection to a source that registered them
+ */
+ private final Map<Object, InjectableReference<?>> pendingInjection = Maps.newIdentityHashMap();
+
+ /**
+ * Registers an instance for member injection when that step is performed.
+ *
+ * @param instance an instance that optionally has members to be injected (each annotated with
+ * @param source the source location that this injection was requested
+ * @Inject).
+ */
+ public <T> Initializable<T> requestInjection(InjectorImpl injector, T instance, Object source,
+ Set<InjectionPoint> injectionPoints) {
+ checkNotNull(source);
+
+ // short circuit if the object has no injections
+ if (instance == null
+ || (injectionPoints.isEmpty() && !injector.membersInjectorStore.hasTypeListeners())) {
+ return Initializables.of(instance);
+ }
+
+ InjectableReference<T> initializable = new InjectableReference<T>(injector, instance, source);
+ pendingInjection.put(instance, initializable);
+ return initializable;
+ }
+
+ /**
+ * Prepares member injectors for all injected instances. This prompts Guice to do static analysis
+ * on the injected instances.
+ */
+ void validateOustandingInjections(Errors errors) {
+ for (InjectableReference<?> reference : pendingInjection.values()) {
+ try {
+ reference.validate(errors);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+ }
+ }
+
+ /**
+ * Performs creation-time injections on all objects that require it. Whenever fulfilling an
+ * injection depends on another object that requires injection, we inject it first. If the two
+ * instances are codependent (directly or transitively), ordering of injection is arbitrary.
+ */
+ void injectAll(final Errors errors) {
+ // loop over a defensive copy since ensureInjected() mutates the set. Unfortunately, that copy
+ // is made complicated by a bug in IBM's JDK, wherein entrySet().toArray(Object[]) doesn't work
+ for (InjectableReference<?> reference : Lists.newArrayList(pendingInjection.values())) {
+ try {
+ reference.get(errors);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+ }
+
+ if (!pendingInjection.isEmpty()) {
+ throw new AssertionError("Failed to satisfy " + pendingInjection);
+ }
+
+ ready.countDown();
+ }
+
+ private class InjectableReference<T> implements Initializable<T> {
+ private final InjectorImpl injector;
+ private final T instance;
+ private final Object source;
+ private MembersInjectorImpl<T> membersInjector;
+
+ public InjectableReference(InjectorImpl injector, T instance, Object source) {
+ this.injector = injector;
+ this.instance = checkNotNull(instance, "instance");
+ this.source = checkNotNull(source, "source");
+ }
+
+ public void validate(Errors errors) throws ErrorsException {
+ @SuppressWarnings("unchecked") // the type of 'T' is a TypeLiteral<T>
+ TypeLiteral<T> type = TypeLiteral.get((Class<T>) instance.getClass());
+ membersInjector = injector.membersInjectorStore.get(type, errors.withSource(source));
+ }
+
+ /**
+ * Reentrant. If {@code instance} was registered for injection at injector-creation time, this
+ * method will ensure that all its members have been injected before returning.
+ */
+ public T get(Errors errors) throws ErrorsException {
+ if (ready.getCount() == 0) {
+ return instance;
+ }
+
+ // just wait for everything to be injected by another thread
+ if (Thread.currentThread() != creatingThread) {
+ try {
+ ready.await();
+ return instance;
+ } catch (InterruptedException e) {
+ // Give up, since we don't know if our injection is ready
+ throw new RuntimeException(e);
+ }
+ }
+
+ // toInject needs injection, do it right away. we only do this once, even if it fails
+ if (pendingInjection.remove(instance) != null) {
+ membersInjector.injectAndNotify(instance, errors.withSource(source));
+ }
+
+ return instance;
+ }
+
+ @Override
+ public String toString() {
+ return instance.toString();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/Inject.java b/src/main/java/org/elasticsearch/common/inject/Inject.java
new file mode 100644
index 0000000..c118c4d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Inject.java
@@ -0,0 +1,67 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.*;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * Annotates members of your implementation class (constructors, methods
+ * and fields) into which the {@link Injector} should inject values.
+ * The Injector fulfills injection requests for:
+ * <p/>
+ * <ul>
+ * <li>Every instance it constructs. The class being constructed must have
+ * exactly one of its constructors marked with {@code @Inject} or must have a
+ * constructor taking no parameters. The Injector then proceeds to perform
+ * method and field injections.
+ * <p/>
+ * <li>Pre-constructed instances passed to {@link Injector#injectMembers},
+ * {@link org.elasticsearch.common.inject.binder.LinkedBindingBuilder#toInstance(Object)} and
+ * {@link org.elasticsearch.common.inject.binder.LinkedBindingBuilder#toProvider(Provider)}.
+ * In this case all constructors are, of course, ignored.
+ * <p/>
+ * <li>Static fields and methods of classes which any {@link Module} has
+ * specifically requested static injection for, using
+ * {@link Binder#requestStaticInjection}.
+ * </ul>
+ * <p/>
+ * In all cases, a member can be injected regardless of its Java access
+ * specifier (private, default, protected, public).
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+@Target({METHOD, CONSTRUCTOR, FIELD})
+@Retention(RUNTIME)
+@Documented
+public @interface Inject {
+
+ /**
+ * If true, and the appropriate binding is not found,
+ * the Injector will skip injection of this method or field rather than
+ * produce an error. When applied to a field, any default value already
+ * assigned to the field will remain (guice will not actively null out the
+ * field). When applied to a method, the method will only be invoked if
+ * bindings for <i>all</i> parameters are found. When applied to a
+ * constructor, an error will result upon Injector creation.
+ */
+ boolean optional() default false;
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java b/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java
new file mode 100644
index 0000000..09308f7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java
@@ -0,0 +1,124 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.InternalContext;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+import org.elasticsearch.common.inject.spi.InjectionRequest;
+import org.elasticsearch.common.inject.spi.StaticInjectionRequest;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Handles {@link Binder#requestInjection} and {@link Binder#requestStaticInjection} commands.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @author mikeward@google.com (Mike Ward)
+ */
+class InjectionRequestProcessor extends AbstractProcessor {
+
+ private final List<StaticInjection> staticInjections = Lists.newArrayList();
+ private final Initializer initializer;
+
+ InjectionRequestProcessor(Errors errors, Initializer initializer) {
+ super(errors);
+ this.initializer = initializer;
+ }
+
+ @Override
+ public Boolean visit(StaticInjectionRequest request) {
+ staticInjections.add(new StaticInjection(injector, request));
+ return true;
+ }
+
+ @Override
+ public Boolean visit(InjectionRequest request) {
+ Set<InjectionPoint> injectionPoints;
+ try {
+ injectionPoints = request.getInjectionPoints();
+ } catch (ConfigurationException e) {
+ errors.merge(e.getErrorMessages());
+ injectionPoints = e.getPartialValue();
+ }
+
+ initializer.requestInjection(
+ injector, request.getInstance(), request.getSource(), injectionPoints);
+ return true;
+ }
+
+ public void validate() {
+ for (StaticInjection staticInjection : staticInjections) {
+ staticInjection.validate();
+ }
+ }
+
+ public void injectMembers() {
+ for (StaticInjection staticInjection : staticInjections) {
+ staticInjection.injectMembers();
+ }
+ }
+
+ /**
+ * A requested static injection.
+ */
+ private class StaticInjection {
+ final InjectorImpl injector;
+ final Object source;
+ final StaticInjectionRequest request;
+ ImmutableList<SingleMemberInjector> memberInjectors;
+
+ public StaticInjection(InjectorImpl injector, StaticInjectionRequest request) {
+ this.injector = injector;
+ this.source = request.getSource();
+ this.request = request;
+ }
+
+ void validate() {
+ Errors errorsForMember = errors.withSource(source);
+ Set<InjectionPoint> injectionPoints;
+ try {
+ injectionPoints = request.getInjectionPoints();
+ } catch (ConfigurationException e) {
+ errors.merge(e.getErrorMessages());
+ injectionPoints = e.getPartialValue();
+ }
+ memberInjectors = injector.membersInjectorStore.getInjectors(
+ injectionPoints, errorsForMember);
+ }
+
+ void injectMembers() {
+ try {
+ injector.callInContext(new ContextualCallable<Void>() {
+ public Void call(InternalContext context) {
+ for (SingleMemberInjector injector : memberInjectors) {
+ injector.inject(errors, context, null);
+ }
+ return null;
+ }
+ });
+ } catch (ErrorsException e) {
+ throw new AssertionError();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Injector.java b/src/main/java/org/elasticsearch/common/inject/Injector.java
new file mode 100644
index 0000000..d7ee0e8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Injector.java
@@ -0,0 +1,211 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Builds the graphs of objects that make up your application. The injector tracks the dependencies
+ * for each type and uses bindings to inject them. This is the core of Guice, although you rarely
+ * interact with it directly. This "behind-the-scenes" operation is what distinguishes dependency
+ * injection from its cousin, the service locator pattern.
+ * <p/>
+ * <p>Contains several default bindings:
+ * <p/>
+ * <ul>
+ * <li>This {@link Injector} instance itself
+ * <li>A {@code Provider<T>} for each binding of type {@code T}
+ * <li>The {@link java.util.logging.Logger} for the class being injected
+ * <li>The {@link Stage} in which the Injector was created
+ * </ul>
+ * <p/>
+ * Injectors are created using the facade class {@link Guice}.
+ * <p/>
+ * <p>An injector can also {@link #injectMembers(Object) inject the dependencies} of
+ * already-constructed instances. This can be used to interoperate with objects created by other
+ * frameworks or services.
+ * <p/>
+ * <p>Injectors can be {@link #createChildInjector(Iterable) hierarchical}. Child injectors inherit
+ * the configuration of their parent injectors, but the converse does not hold.
+ * <p/>
+ * <p>The injector's {@link #getBindings() internal bindings} are available for introspection. This
+ * enables tools and extensions to operate on an injector reflectively.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public interface Injector {
+
+ /**
+ * Injects dependencies into the fields and methods of {@code instance}. Ignores the presence or
+ * absence of an injectable constructor.
+ * <p/>
+ * <p>Whenever Guice creates an instance, it performs this injection automatically (after first
+ * performing constructor injection), so if you're able to let Guice create all your objects for
+ * you, you'll never need to use this method.
+ *
+ * @param instance to inject members on
+ * @see Binder#getMembersInjector(Class) for a preferred alternative that supports checks before
+ * run time
+ */
+ void injectMembers(Object instance);
+
+ /**
+ * Returns the members injector used to inject dependencies into methods and fields on instances
+ * of the given type {@code T}.
+ *
+ * @param typeLiteral type to get members injector for
+ * @see Binder#getMembersInjector(TypeLiteral) for an alternative that offers up front error
+ * detection
+ * @since 2.0
+ */
+ <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral);
+
+ /**
+ * Returns the members injector used to inject dependencies into methods and fields on instances
+ * of the given type {@code T}. When feasible, use {@link Binder#getMembersInjector(TypeLiteral)}
+ * instead to get increased up front error detection.
+ *
+ * @param type type to get members injector for
+ * @see Binder#getMembersInjector(Class) for an alternative that offers up front error
+ * detection
+ * @since 2.0
+ */
+ <T> MembersInjector<T> getMembersInjector(Class<T> type);
+
+ /**
+ * Returns all explicit bindings.
+ * <p/>
+ * <p>The returned map does not include bindings inherited from a {@link #getParent() parent
+ * injector}, should one exist. The returned map is guaranteed to iterate (for example, with
+ * its {@link java.util.Map#entrySet()} iterator) in the order of insertion. In other words,
+ * the order in which bindings appear in user Modules.
+ * <p/>
+ * <p>This method is part of the Guice SPI and is intended for use by tools and extensions.
+ */
+ Map<Key<?>, Binding<?>> getBindings();
+
+ /**
+ * Returns the binding for the given injection key. This will be an explicit bindings if the key
+ * was bound explicitly by a module, or an implicit binding otherwise. The implicit binding will
+ * be created if necessary.
+ * <p/>
+ * <p>This method is part of the Guice SPI and is intended for use by tools and extensions.
+ *
+ * @throws ConfigurationException if this injector cannot find or create the binding.
+ */
+ <T> Binding<T> getBinding(Key<T> key);
+
+ /**
+ * Returns the binding for the given type. This will be an explicit bindings if the injection key
+ * was bound explicitly by a module, or an implicit binding otherwise. The implicit binding will
+ * be created if necessary.
+ * <p/>
+ * <p>This method is part of the Guice SPI and is intended for use by tools and extensions.
+ *
+ * @throws ConfigurationException if this injector cannot find or create the binding.
+ * @since 2.0
+ */
+ <T> Binding<T> getBinding(Class<T> type);
+
+ /**
+ * Returns all explicit bindings for {@code type}.
+ * <p/>
+ * <p>This method is part of the Guice SPI and is intended for use by tools and extensions.
+ */
+ <T> List<Binding<T>> findBindingsByType(TypeLiteral<T> type);
+
+ /**
+ * Returns the provider used to obtain instances for the given injection key. When feasible, avoid
+ * using this method, in favor of having Guice inject your dependencies ahead of time.
+ *
+ * @throws ConfigurationException if this injector cannot find or create the provider.
+ * @see Binder#getProvider(Key) for an alternative that offers up front error detection
+ */
+ <T> Provider<T> getProvider(Key<T> key);
+
+ /**
+ * Returns the provider used to obtain instances for the given type. When feasible, avoid
+ * using this method, in favor of having Guice inject your dependencies ahead of time.
+ *
+ * @throws ConfigurationException if this injector cannot find or create the provider.
+ * @see Binder#getProvider(Class) for an alternative that offers up front error detection
+ */
+ <T> Provider<T> getProvider(Class<T> type);
+
+ /**
+ * Returns the appropriate instance for the given injection key; equivalent to {@code
+ * getProvider(key).get()}. When feasible, avoid using this method, in favor of having Guice
+ * inject your dependencies ahead of time.
+ *
+ * @throws ConfigurationException if this injector cannot find or create the provider.
+ * @throws ProvisionException if there was a runtime failure while providing an instance.
+ */
+ <T> T getInstance(Key<T> key);
+
+ /**
+ * Returns the appropriate instance for the given injection type; equivalent to {@code
+ * getProvider(type).get()}. When feasible, avoid using this method, in favor of having Guice
+ * inject your dependencies ahead of time.
+ *
+ * @throws ConfigurationException if this injector cannot find or create the provider.
+ * @throws ProvisionException if there was a runtime failure while providing an instance.
+ */
+ <T> T getInstance(Class<T> type);
+
+ /**
+ * Returns this injector's parent, or {@code null} if this is a top-level injector.
+ *
+ * @since 2.0
+ */
+ Injector getParent();
+
+ /**
+ * Returns a new injector that inherits all state from this injector. All bindings, scopes,
+ * interceptors and type converters are inherited -- they are visible to the child injector.
+ * Elements of the child injector are not visible to its parent.
+ * <p/>
+ * <p>Just-in-time bindings created for child injectors will be created in an ancestor injector
+ * whenever possible. This allows for scoped instances to be shared between injectors. Use
+ * explicit bindings to prevent bindings from being shared with the parent injector.
+ * <p/>
+ * <p>No key may be bound by both an injector and one of its ancestors. This includes just-in-time
+ * bindings. The lone exception is the key for {@code Injector.class}, which is bound by each
+ * injector to itself.
+ *
+ * @since 2.0
+ */
+ Injector createChildInjector(Iterable<? extends Module> modules);
+
+ /**
+ * Returns a new injector that inherits all state from this injector. All bindings, scopes,
+ * interceptors and type converters are inherited -- they are visible to the child injector.
+ * Elements of the child injector are not visible to its parent.
+ * <p/>
+ * <p>Just-in-time bindings created for child injectors will be created in an ancestor injector
+ * whenever possible. This allows for scoped instances to be shared between injectors. Use
+ * explicit bindings to prevent bindings from being shared with the parent injector.
+ * <p/>
+ * <p>No key may be bound by both an injector and one of its ancestors. This includes just-in-time
+ * bindings. The lone exception is the key for {@code Injector.class}, which is bound by each
+ * injector to itself.
+ *
+ * @since 2.0
+ */
+ Injector createChildInjector(Module... modules);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/InjectorBuilder.java b/src/main/java/org/elasticsearch/common/inject/InjectorBuilder.java
new file mode 100644
index 0000000..8568ec9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/InjectorBuilder.java
@@ -0,0 +1,290 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.Dependency;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Builds a tree of injectors. This is a primary injector, plus child injectors needed for each
+ * {@link Binder#newPrivateBinder() private environment}. The primary injector is not necessarily a
+ * top-level injector.
+ * <p/>
+ * <p>Injector construction happens in two phases.
+ * <ol>
+ * <li>Static building. In this phase, we interpret commands, create bindings, and inspect
+ * dependencies. During this phase, we hold a lock to ensure consistency with parent injectors.
+ * No user code is executed in this phase.</li>
+ * <li>Dynamic injection. In this phase, we call user code. We inject members that requested
+ * injection. This may require user's objects be created and their providers be called. And we
+ * create eager singletons. In this phase, user code may have started other threads. This phase
+ * is not executed for injectors created using {@link Stage#TOOL the tool stage}</li>
+ * </ol>
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class InjectorBuilder {
+
+ private final Stopwatch stopwatch = new Stopwatch();
+ private final Errors errors = new Errors();
+
+ private Stage stage;
+
+ private final Initializer initializer = new Initializer();
+ private final BindingProcessor bindingProcesor;
+ private final InjectionRequestProcessor injectionRequestProcessor;
+
+ private final InjectorShell.Builder shellBuilder = new InjectorShell.Builder();
+ private List<InjectorShell> shells;
+
+ InjectorBuilder() {
+ injectionRequestProcessor = new InjectionRequestProcessor(errors, initializer);
+ bindingProcesor = new BindingProcessor(errors, initializer);
+ }
+
+ /**
+ * Sets the stage for the created injector. If the stage is {@link Stage#PRODUCTION}, this class
+ * will eagerly load singletons.
+ */
+ InjectorBuilder stage(Stage stage) {
+ shellBuilder.stage(stage);
+ this.stage = stage;
+ return this;
+ }
+
+ /**
+ * Sets the parent of the injector to-be-constructed. As a side effect, this sets this injector's
+ * stage to the stage of {@code parent}.
+ */
+ InjectorBuilder parentInjector(InjectorImpl parent) {
+ shellBuilder.parent(parent);
+ return stage(parent.getInstance(Stage.class));
+ }
+
+ InjectorBuilder addModules(Iterable<? extends Module> modules) {
+ shellBuilder.addModules(modules);
+ return this;
+ }
+
+ Injector build() {
+ if (shellBuilder == null) {
+ throw new AssertionError("Already built, builders are not reusable.");
+ }
+
+ // Synchronize while we're building up the bindings and other injector state. This ensures that
+ // the JIT bindings in the parent injector don't change while we're being built
+ synchronized (shellBuilder.lock()) {
+ shells = shellBuilder.build(initializer, bindingProcesor, stopwatch, errors);
+ stopwatch.resetAndLog("Injector construction");
+
+ initializeStatically();
+ }
+
+ // If we're in the tool stage, stop here. Don't eagerly inject or load anything.
+ if (stage == Stage.TOOL) {
+ return new ToolStageInjector(primaryInjector());
+ }
+
+ injectDynamically();
+
+ return primaryInjector();
+ }
+
+ /**
+ * Initialize and validate everything.
+ */
+ private void initializeStatically() {
+ bindingProcesor.initializeBindings();
+ stopwatch.resetAndLog("Binding initialization");
+
+ for (InjectorShell shell : shells) {
+ shell.getInjector().index();
+ }
+ stopwatch.resetAndLog("Binding indexing");
+
+ injectionRequestProcessor.process(shells);
+ stopwatch.resetAndLog("Collecting injection requests");
+
+ bindingProcesor.runCreationListeners();
+ stopwatch.resetAndLog("Binding validation");
+
+ injectionRequestProcessor.validate();
+ stopwatch.resetAndLog("Static validation");
+
+ initializer.validateOustandingInjections(errors);
+ stopwatch.resetAndLog("Instance member validation");
+
+ new LookupProcessor(errors).process(shells);
+ for (InjectorShell shell : shells) {
+ ((DeferredLookups) shell.getInjector().lookups).initialize(errors);
+ }
+ stopwatch.resetAndLog("Provider verification");
+
+ for (InjectorShell shell : shells) {
+ if (!shell.getElements().isEmpty()) {
+ throw new AssertionError("Failed to execute " + shell.getElements());
+ }
+ }
+
+ errors.throwCreationExceptionIfErrorsExist();
+ }
+
+ /**
+ * Returns the injector being constructed. This is not necessarily the root injector.
+ */
+ private Injector primaryInjector() {
+ return shells.get(0).getInjector();
+ }
+
+ /**
+ * Inject everything that can be injected. This method is intentionally not synchronized. If we
+ * locked while injecting members (ie. running user code), things would deadlock should the user
+ * code build a just-in-time binding from another thread.
+ */
+ private void injectDynamically() {
+ injectionRequestProcessor.injectMembers();
+ stopwatch.resetAndLog("Static member injection");
+
+ initializer.injectAll(errors);
+ stopwatch.resetAndLog("Instance injection");
+ errors.throwCreationExceptionIfErrorsExist();
+
+ for (InjectorShell shell : shells) {
+ loadEagerSingletons(shell.getInjector(), stage, errors);
+ }
+ stopwatch.resetAndLog("Preloading singletons");
+ errors.throwCreationExceptionIfErrorsExist();
+ }
+
+ /**
+ * Loads eager singletons, or all singletons if we're in Stage.PRODUCTION. Bindings discovered
+ * while we're binding these singletons are not be eager.
+ */
+ public void loadEagerSingletons(InjectorImpl injector, Stage stage, final Errors errors) {
+ @SuppressWarnings("unchecked") // casting Collection<Binding> to Collection<BindingImpl> is safe
+ Set<BindingImpl<?>> candidateBindings = ImmutableSet.copyOf(Iterables.concat(
+ (Collection) injector.state.getExplicitBindingsThisLevel().values(),
+ injector.jitBindings.values()));
+ for (final BindingImpl<?> binding : candidateBindings) {
+ if (binding.getScoping().isEagerSingleton(stage)) {
+ try {
+ injector.callInContext(new ContextualCallable<Void>() {
+ Dependency<?> dependency = Dependency.get(binding.getKey());
+
+ public Void call(InternalContext context) {
+ context.setDependency(dependency);
+ Errors errorsForBinding = errors.withSource(dependency);
+ try {
+ binding.getInternalFactory().get(errorsForBinding, context, dependency);
+ } catch (ErrorsException e) {
+ errorsForBinding.merge(e.getErrors());
+ } finally {
+ context.setDependency(null);
+ }
+
+ return null;
+ }
+ });
+ } catch (ErrorsException e) {
+ throw new AssertionError();
+ }
+ }
+ }
+ }
+
+ /**
+ * {@link Injector} exposed to users in {@link Stage#TOOL}.
+ */
+ static class ToolStageInjector implements Injector {
+ private final Injector delegateInjector;
+
+ ToolStageInjector(Injector delegateInjector) {
+ this.delegateInjector = delegateInjector;
+ }
+
+ public void injectMembers(Object o) {
+ throw new UnsupportedOperationException(
+ "Injector.injectMembers(Object) is not supported in Stage.TOOL");
+ }
+
+ public Map<Key<?>, Binding<?>> getBindings() {
+ return this.delegateInjector.getBindings();
+ }
+
+ public <T> Binding<T> getBinding(Key<T> key) {
+ return this.delegateInjector.getBinding(key);
+ }
+
+ public <T> Binding<T> getBinding(Class<T> type) {
+ return this.delegateInjector.getBinding(type);
+ }
+
+ public <T> List<Binding<T>> findBindingsByType(TypeLiteral<T> type) {
+ return this.delegateInjector.findBindingsByType(type);
+ }
+
+ public Injector getParent() {
+ return delegateInjector.getParent();
+ }
+
+ public Injector createChildInjector(Iterable<? extends Module> modules) {
+ return delegateInjector.createChildInjector(modules);
+ }
+
+ public Injector createChildInjector(Module... modules) {
+ return delegateInjector.createChildInjector(modules);
+ }
+
+ public <T> Provider<T> getProvider(Key<T> key) {
+ throw new UnsupportedOperationException(
+ "Injector.getProvider(Key<T>) is not supported in Stage.TOOL");
+ }
+
+ public <T> Provider<T> getProvider(Class<T> type) {
+ throw new UnsupportedOperationException(
+ "Injector.getProvider(Class<T>) is not supported in Stage.TOOL");
+ }
+
+ public <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral) {
+ throw new UnsupportedOperationException(
+ "Injector.getMembersInjector(TypeLiteral<T>) is not supported in Stage.TOOL");
+ }
+
+ public <T> MembersInjector<T> getMembersInjector(Class<T> type) {
+ throw new UnsupportedOperationException(
+ "Injector.getMembersInjector(Class<T>) is not supported in Stage.TOOL");
+ }
+
+ public <T> T getInstance(Key<T> key) {
+ throw new UnsupportedOperationException(
+ "Injector.getInstance(Key<T>) is not supported in Stage.TOOL");
+ }
+
+ public <T> T getInstance(Class<T> type) {
+ throw new UnsupportedOperationException(
+ "Injector.getInstance(Class<T>) is not supported in Stage.TOOL");
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java
new file mode 100644
index 0000000..1a1bd12
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java
@@ -0,0 +1,863 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Classes;
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.*;
+import org.elasticsearch.common.inject.util.Providers;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.*;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.common.inject.internal.Annotations.findScopeAnnotation;
+
+/**
+ * Default {@link Injector} implementation.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @see InjectorBuilder
+ */
+class InjectorImpl implements Injector, Lookups {
+ final State state;
+ final InjectorImpl parent;
+ boolean readOnly;
+ BindingsMultimap bindingsMultimap = new BindingsMultimap();
+ final Initializer initializer;
+
+ /**
+ * Just-in-time binding cache. Guarded by state.lock()
+ */
+ Map<Key<?>, BindingImpl<?>> jitBindings = Maps.newHashMap();
+
+ Lookups lookups = new DeferredLookups(this);
+
+ InjectorImpl(@Nullable InjectorImpl parent, State state, Initializer initializer) {
+ this.parent = parent;
+ this.state = state;
+ this.initializer = initializer;
+
+ if (parent != null) {
+ localContext = parent.localContext;
+ } else {
+ localContext = new ThreadLocal<Object[]>() {
+ protected Object[] initialValue() {
+ return new Object[1];
+ }
+ };
+ }
+ }
+
+ /**
+ * Indexes bindings by type.
+ */
+ void index() {
+ for (Binding<?> binding : state.getExplicitBindingsThisLevel().values()) {
+ index(binding);
+ }
+ }
+
+ <T> void index(Binding<T> binding) {
+ bindingsMultimap.put(binding.getKey().getTypeLiteral(), binding);
+ }
+
+ public <T> List<Binding<T>> findBindingsByType(TypeLiteral<T> type) {
+ return bindingsMultimap.getAll(type);
+ }
+
+ /**
+ * Returns the binding for {@code key}
+ */
+ public <T> BindingImpl<T> getBinding(Key<T> key) {
+ Errors errors = new Errors(key);
+ try {
+ BindingImpl<T> result = getBindingOrThrow(key, errors);
+ errors.throwConfigurationExceptionIfErrorsExist();
+ return result;
+ } catch (ErrorsException e) {
+ throw new ConfigurationException(errors.merge(e.getErrors()).getMessages());
+ }
+ }
+
+ /**
+ * Gets a binding implementation. First, it check to see if the parent has a binding. If the
+ * parent has a binding and the binding is scoped, it will use that binding. Otherwise, this
+ * checks for an explicit binding. If no explicit binding is found, it looks for a just-in-time
+ * binding.
+ */
+ public <T> BindingImpl<T> getBindingOrThrow(Key<T> key, Errors errors)
+ throws ErrorsException {
+ // Check explicit bindings, i.e. bindings created by modules.
+ BindingImpl<T> binding = state.getExplicitBinding(key);
+ if (binding != null) {
+ return binding;
+ }
+
+ // Look for an on-demand binding.
+ return getJustInTimeBinding(key, errors);
+ }
+
+ public <T> Binding<T> getBinding(Class<T> type) {
+ return getBinding(Key.get(type));
+ }
+
+ public Injector getParent() {
+ return parent;
+ }
+
+ public Injector createChildInjector(Iterable<? extends Module> modules) {
+ return new InjectorBuilder()
+ .parentInjector(this)
+ .addModules(modules)
+ .build();
+ }
+
+ public Injector createChildInjector(Module... modules) {
+ return createChildInjector(ImmutableList.copyOf(modules));
+ }
+
+ /**
+ * Returns a just-in-time binding for {@code key}, creating it if necessary.
+ *
+ * @throws ErrorsException if the binding could not be created.
+ */
+ private <T> BindingImpl<T> getJustInTimeBinding(Key<T> key, Errors errors)
+ throws ErrorsException {
+ synchronized (state.lock()) {
+ // first try to find a JIT binding that we've already created
+ for (InjectorImpl injector = this; injector != null; injector = injector.parent) {
+ @SuppressWarnings("unchecked") // we only store bindings that match their key
+ BindingImpl<T> binding = (BindingImpl<T>) injector.jitBindings.get(key);
+
+ if (binding != null) {
+ return binding;
+ }
+ }
+
+ return createJustInTimeBindingRecursive(key, errors);
+ }
+ }
+
+ /**
+ * Returns true if the key type is Provider (but not a subclass of Provider).
+ */
+ static boolean isProvider(Key<?> key) {
+ return key.getTypeLiteral().getRawType().equals(Provider.class);
+ }
+
+ /**
+ * Returns true if the key type is MembersInjector (but not a subclass of MembersInjector).
+ */
+ static boolean isMembersInjector(Key<?> key) {
+ return key.getTypeLiteral().getRawType().equals(MembersInjector.class)
+ && !key.hasAnnotationType();
+ }
+
+ private <T> BindingImpl<MembersInjector<T>> createMembersInjectorBinding(
+ Key<MembersInjector<T>> key, Errors errors) throws ErrorsException {
+ Type membersInjectorType = key.getTypeLiteral().getType();
+ if (!(membersInjectorType instanceof ParameterizedType)) {
+ throw errors.cannotInjectRawMembersInjector().toException();
+ }
+
+ @SuppressWarnings("unchecked") // safe because T came from Key<MembersInjector<T>>
+ TypeLiteral<T> instanceType = (TypeLiteral<T>) TypeLiteral.get(
+ ((ParameterizedType) membersInjectorType).getActualTypeArguments()[0]);
+ MembersInjector<T> membersInjector = membersInjectorStore.get(instanceType, errors);
+
+ InternalFactory<MembersInjector<T>> factory = new ConstantFactory<MembersInjector<T>>(
+ Initializables.of(membersInjector));
+
+
+ return new InstanceBindingImpl<MembersInjector<T>>(this, key, SourceProvider.UNKNOWN_SOURCE,
+ factory, ImmutableSet.<InjectionPoint>of(), membersInjector);
+ }
+
+ /**
+ * Creates a synthetic binding to {@code Provider<T>}, i.e. a binding to the provider from
+ * {@code Binding<T>}.
+ */
+ private <T> BindingImpl<Provider<T>> createProviderBinding(Key<Provider<T>> key, Errors errors)
+ throws ErrorsException {
+ Type providerType = key.getTypeLiteral().getType();
+
+ // If the Provider has no type parameter (raw Provider)...
+ if (!(providerType instanceof ParameterizedType)) {
+ throw errors.cannotInjectRawProvider().toException();
+ }
+
+ Type entryType = ((ParameterizedType) providerType).getActualTypeArguments()[0];
+
+ @SuppressWarnings("unchecked") // safe because T came from Key<Provider<T>>
+ Key<T> providedKey = (Key<T>) key.ofType(entryType);
+
+ BindingImpl<T> delegate = getBindingOrThrow(providedKey, errors);
+ return new ProviderBindingImpl<T>(this, key, delegate);
+ }
+
+ static class ProviderBindingImpl<T> extends BindingImpl<Provider<T>>
+ implements ProviderBinding<Provider<T>> {
+ final BindingImpl<T> providedBinding;
+
+ ProviderBindingImpl(InjectorImpl injector, Key<Provider<T>> key, Binding<T> providedBinding) {
+ super(injector, key, providedBinding.getSource(), createInternalFactory(providedBinding),
+ Scoping.UNSCOPED);
+ this.providedBinding = (BindingImpl<T>) providedBinding;
+ }
+
+ static <T> InternalFactory<Provider<T>> createInternalFactory(Binding<T> providedBinding) {
+ final Provider<T> provider = providedBinding.getProvider();
+ return new InternalFactory<Provider<T>>() {
+ public Provider<T> get(Errors errors, InternalContext context, Dependency dependency) {
+ return provider;
+ }
+ };
+ }
+
+ public Key<? extends T> getProvidedKey() {
+ return providedBinding.getKey();
+ }
+
+ public <V> V acceptTargetVisitor(BindingTargetVisitor<? super Provider<T>, V> visitor) {
+ return visitor.visit(this);
+ }
+
+ public void applyTo(Binder binder) {
+ throw new UnsupportedOperationException("This element represents a synthetic binding.");
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(ProviderKeyBinding.class)
+ .add("key", getKey())
+ .add("providedKey", getProvidedKey())
+ .toString();
+ }
+ }
+
+ /**
+ * Converts a constant string binding to the required type.
+ *
+ * @return the binding if it could be resolved, or null if the binding doesn't exist
+ * @throws org.elasticsearch.common.inject.internal.ErrorsException
+ * if there was an error resolving the binding
+ */
+ private <T> BindingImpl<T> convertConstantStringBinding(Key<T> key, Errors errors)
+ throws ErrorsException {
+ // Find a constant string binding.
+ Key<String> stringKey = key.ofType(String.class);
+ BindingImpl<String> stringBinding = state.getExplicitBinding(stringKey);
+ if (stringBinding == null || !stringBinding.isConstant()) {
+ return null;
+ }
+
+ String stringValue = stringBinding.getProvider().get();
+ Object source = stringBinding.getSource();
+
+ // Find a matching type converter.
+ TypeLiteral<T> type = key.getTypeLiteral();
+ MatcherAndConverter matchingConverter = state.getConverter(stringValue, type, errors, source);
+
+ if (matchingConverter == null) {
+ // No converter can handle the given type.
+ return null;
+ }
+
+ // Try to convert the string. A failed conversion results in an error.
+ try {
+ @SuppressWarnings("unchecked") // This cast is safe because we double check below.
+ T converted = (T) matchingConverter.getTypeConverter().convert(stringValue, type);
+
+ if (converted == null) {
+ throw errors.converterReturnedNull(stringValue, source, type, matchingConverter)
+ .toException();
+ }
+
+ if (!type.getRawType().isInstance(converted)) {
+ throw errors.conversionTypeError(stringValue, source, type, matchingConverter, converted)
+ .toException();
+ }
+
+ return new ConvertedConstantBindingImpl<T>(this, key, converted, stringBinding);
+ } catch (ErrorsException e) {
+ throw e;
+ } catch (RuntimeException e) {
+ throw errors.conversionError(stringValue, source, type, matchingConverter, e)
+ .toException();
+ }
+ }
+
+ private static class ConvertedConstantBindingImpl<T>
+ extends BindingImpl<T> implements ConvertedConstantBinding<T> {
+ final T value;
+ final Provider<T> provider;
+ final Binding<String> originalBinding;
+
+ ConvertedConstantBindingImpl(
+ Injector injector, Key<T> key, T value, Binding<String> originalBinding) {
+ super(injector, key, originalBinding.getSource(),
+ new ConstantFactory<T>(Initializables.of(value)), Scoping.UNSCOPED);
+ this.value = value;
+ provider = Providers.of(value);
+ this.originalBinding = originalBinding;
+ }
+
+ @Override
+ public Provider<T> getProvider() {
+ return provider;
+ }
+
+ public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
+ return visitor.visit(this);
+ }
+
+ public T getValue() {
+ return value;
+ }
+
+ public Key<String> getSourceKey() {
+ return originalBinding.getKey();
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ return ImmutableSet.<Dependency<?>>of(Dependency.get(getSourceKey()));
+ }
+
+ public void applyTo(Binder binder) {
+ throw new UnsupportedOperationException("This element represents a synthetic binding.");
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(ConvertedConstantBinding.class)
+ .add("key", getKey())
+ .add("sourceKey", getSourceKey())
+ .add("value", value)
+ .toString();
+ }
+ }
+
+ <T> void initializeBinding(BindingImpl<T> binding, Errors errors) throws ErrorsException {
+ // Put the partially constructed binding in the map a little early. This enables us to handle
+ // circular dependencies. Example: FooImpl -> BarImpl -> FooImpl.
+ // Note: We don't need to synchronize on state.lock() during injector creation.
+ // TODO: for the above example, remove the binding for BarImpl if the binding for FooImpl fails
+ if (binding instanceof ConstructorBindingImpl<?>) {
+ Key<T> key = binding.getKey();
+ jitBindings.put(key, binding);
+ boolean successful = false;
+ try {
+ ((ConstructorBindingImpl) binding).initialize(this, errors);
+ successful = true;
+ } finally {
+ if (!successful) {
+ jitBindings.remove(key);
+ }
+ }
+ }
+ }
+
+ /**
+ * Creates a binding for an injectable type with the given scope. Looks for a scope on the type if
+ * none is specified.
+ */
+ <T> BindingImpl<T> createUnitializedBinding(Key<T> key, Scoping scoping, Object source,
+ Errors errors) throws ErrorsException {
+ Class<?> rawType = key.getTypeLiteral().getRawType();
+
+ // Don't try to inject arrays, or enums.
+ if (rawType.isArray() || rawType.isEnum()) {
+ throw errors.missingImplementation(key).toException();
+ }
+
+ // Handle TypeLiteral<T> by binding the inner type
+ if (rawType == TypeLiteral.class) {
+ @SuppressWarnings("unchecked") // we have to fudge the inner type as Object
+ BindingImpl<T> binding = (BindingImpl<T>) createTypeLiteralBinding(
+ (Key<TypeLiteral<Object>>) key, errors);
+ return binding;
+ }
+
+ // Handle @ImplementedBy
+ ImplementedBy implementedBy = rawType.getAnnotation(ImplementedBy.class);
+ if (implementedBy != null) {
+ Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors);
+ return createImplementedByBinding(key, scoping, implementedBy, errors);
+ }
+
+ // Handle @ProvidedBy.
+ ProvidedBy providedBy = rawType.getAnnotation(ProvidedBy.class);
+ if (providedBy != null) {
+ Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors);
+ return createProvidedByBinding(key, scoping, providedBy, errors);
+ }
+
+ // We can't inject abstract classes.
+ // TODO: Method interceptors could actually enable us to implement
+ // abstract types. Should we remove this restriction?
+ if (Modifier.isAbstract(rawType.getModifiers())) {
+ throw errors.missingImplementation(key).toException();
+ }
+
+ // Error: Inner class.
+ if (Classes.isInnerClass(rawType)) {
+ throw errors.cannotInjectInnerClass(rawType).toException();
+ }
+
+ if (!scoping.isExplicitlyScoped()) {
+ Class<? extends Annotation> scopeAnnotation = findScopeAnnotation(errors, rawType);
+ if (scopeAnnotation != null) {
+ scoping = Scopes.makeInjectable(Scoping.forAnnotation(scopeAnnotation),
+ this, errors.withSource(rawType));
+ }
+ }
+
+ return ConstructorBindingImpl.create(this, key, source, scoping);
+ }
+
+ /**
+ * Converts a binding for a {@code Key<TypeLiteral<T>>} to the value {@code TypeLiteral<T>}. It's
+ * a bit awkward because we have to pull out the inner type in the type literal.
+ */
+ private <T> BindingImpl<TypeLiteral<T>> createTypeLiteralBinding(
+ Key<TypeLiteral<T>> key, Errors errors) throws ErrorsException {
+ Type typeLiteralType = key.getTypeLiteral().getType();
+ if (!(typeLiteralType instanceof ParameterizedType)) {
+ throw errors.cannotInjectRawTypeLiteral().toException();
+ }
+
+ ParameterizedType parameterizedType = (ParameterizedType) typeLiteralType;
+ Type innerType = parameterizedType.getActualTypeArguments()[0];
+
+ // this is unforunate. We don't support building TypeLiterals for type variable like 'T'. If
+ // this proves problematic, we can probably fix TypeLiteral to support type variables
+ if (!(innerType instanceof Class)
+ && !(innerType instanceof GenericArrayType)
+ && !(innerType instanceof ParameterizedType)) {
+ throw errors.cannotInjectTypeLiteralOf(innerType).toException();
+ }
+
+ @SuppressWarnings("unchecked") // by definition, innerType == T, so this is safe
+ TypeLiteral<T> value = (TypeLiteral<T>) TypeLiteral.get(innerType);
+ InternalFactory<TypeLiteral<T>> factory = new ConstantFactory<TypeLiteral<T>>(
+ Initializables.of(value));
+ return new InstanceBindingImpl<TypeLiteral<T>>(this, key, SourceProvider.UNKNOWN_SOURCE,
+ factory, ImmutableSet.<InjectionPoint>of(), value);
+ }
+
+ /**
+ * Creates a binding for a type annotated with @ProvidedBy.
+ */
+ <T> BindingImpl<T> createProvidedByBinding(Key<T> key, Scoping scoping,
+ ProvidedBy providedBy, Errors errors) throws ErrorsException {
+ final Class<?> rawType = key.getTypeLiteral().getRawType();
+ final Class<? extends Provider<?>> providerType = providedBy.value();
+
+ // Make sure it's not the same type. TODO: Can we check for deeper loops?
+ if (providerType == rawType) {
+ throw errors.recursiveProviderType().toException();
+ }
+
+ // Assume the provider provides an appropriate type. We double check at runtime.
+ @SuppressWarnings("unchecked")
+ final Key<? extends Provider<T>> providerKey
+ = (Key<? extends Provider<T>>) Key.get(providerType);
+ final BindingImpl<? extends Provider<?>> providerBinding
+ = getBindingOrThrow(providerKey, errors);
+
+ InternalFactory<T> internalFactory = new InternalFactory<T>() {
+ public T get(Errors errors, InternalContext context, Dependency dependency)
+ throws ErrorsException {
+ errors = errors.withSource(providerKey);
+ Provider<?> provider = providerBinding.getInternalFactory().get(
+ errors, context, dependency);
+ try {
+ Object o = provider.get();
+ if (o != null && !rawType.isInstance(o)) {
+ throw errors.subtypeNotProvided(providerType, rawType).toException();
+ }
+ @SuppressWarnings("unchecked") // protected by isInstance() check above
+ T t = (T) o;
+ return t;
+ } catch (RuntimeException e) {
+ throw errors.errorInProvider(e).toException();
+ }
+ }
+ };
+
+ return new LinkedProviderBindingImpl<T>(
+ this,
+ key,
+ rawType /* source */,
+ Scopes.<T>scope(key, this, internalFactory, scoping),
+ scoping,
+ providerKey);
+ }
+
+ /**
+ * Creates a binding for a type annotated with @ImplementedBy.
+ */
+ <T> BindingImpl<T> createImplementedByBinding(Key<T> key, Scoping scoping,
+ ImplementedBy implementedBy, Errors errors)
+ throws ErrorsException {
+ Class<?> rawType = key.getTypeLiteral().getRawType();
+ Class<?> implementationType = implementedBy.value();
+
+ // Make sure it's not the same type. TODO: Can we check for deeper cycles?
+ if (implementationType == rawType) {
+ throw errors.recursiveImplementationType().toException();
+ }
+
+ // Make sure implementationType extends type.
+ if (!rawType.isAssignableFrom(implementationType)) {
+ throw errors.notASubtype(implementationType, rawType).toException();
+ }
+
+ @SuppressWarnings("unchecked") // After the preceding check, this cast is safe.
+ Class<? extends T> subclass = (Class<? extends T>) implementationType;
+
+ // Look up the target binding.
+ final Key<? extends T> targetKey = Key.get(subclass);
+ final BindingImpl<? extends T> targetBinding = getBindingOrThrow(targetKey, errors);
+
+ InternalFactory<T> internalFactory = new InternalFactory<T>() {
+ public T get(Errors errors, InternalContext context, Dependency<?> dependency)
+ throws ErrorsException {
+ return targetBinding.getInternalFactory().get(
+ errors.withSource(targetKey), context, dependency);
+ }
+ };
+
+ return new LinkedBindingImpl<T>(
+ this,
+ key,
+ rawType /* source */,
+ Scopes.<T>scope(key, this, internalFactory, scoping),
+ scoping,
+ targetKey);
+ }
+
+ /**
+ * Attempts to create a just-in-time binding for {@code key} in the root injector, falling back to
+ * other ancestor injectors until this injector is tried.
+ */
+ private <T> BindingImpl<T> createJustInTimeBindingRecursive(Key<T> key, Errors errors)
+ throws ErrorsException {
+ // ask the parent to create the JIT binding
+ if (parent != null && !parent.readOnly /* ES: don't check on parent if its read only, its already created all the bindings it can*/) {
+ try {
+ return parent.createJustInTimeBindingRecursive(key, new Errors());
+ } catch (ErrorsException ignored) {
+ }
+ }
+
+ if (state.isBlacklisted(key)) {
+ throw errors.childBindingAlreadySet(key).toException();
+ }
+
+ BindingImpl<T> binding = createJustInTimeBinding(key, errors);
+ state.parent().blacklist(key);
+ jitBindings.put(key, binding);
+ return binding;
+ }
+
+ /**
+ * Returns a new just-in-time binding created by resolving {@code key}. The strategies used to
+ * create just-in-time bindings are:
+ * <ol>
+ * <li>Internalizing Providers. If the requested binding is for {@code Provider<T>}, we delegate
+ * to the binding for {@code T}.
+ * <li>Converting constants.
+ * <li>ImplementedBy and ProvidedBy annotations. Only for unannotated keys.
+ * <li>The constructor of the raw type. Only for unannotated keys.
+ * </ol>
+ *
+ * @throws org.elasticsearch.common.inject.internal.ErrorsException
+ * if the binding cannot be created.
+ */
+ <T> BindingImpl<T> createJustInTimeBinding(Key<T> key, Errors errors) throws ErrorsException {
+ if (state.isBlacklisted(key)) {
+ throw errors.childBindingAlreadySet(key).toException();
+ }
+
+ // Handle cases where T is a Provider<?>.
+ if (isProvider(key)) {
+ // These casts are safe. We know T extends Provider<X> and that given Key<Provider<X>>,
+ // createProviderBinding() will return BindingImpl<Provider<X>>.
+ @SuppressWarnings("unchecked")
+ BindingImpl binding = createProviderBinding((Key) key, errors);
+ return binding;
+ }
+
+ // Handle cases where T is a MembersInjector<?>
+ if (isMembersInjector(key)) {
+ // These casts are safe. T extends MembersInjector<X> and that given Key<MembersInjector<X>>,
+ // createMembersInjectorBinding() will return BindingImpl<MembersInjector<X>>.
+ @SuppressWarnings("unchecked")
+ BindingImpl binding = createMembersInjectorBinding((Key) key, errors);
+ return binding;
+ }
+
+ // Try to convert a constant string binding to the requested type.
+ BindingImpl<T> convertedBinding = convertConstantStringBinding(key, errors);
+ if (convertedBinding != null) {
+ return convertedBinding;
+ }
+
+ // If the key has an annotation...
+ if (key.hasAnnotationType()) {
+ // Look for a binding without annotation attributes or return null.
+ if (key.hasAttributes()) {
+ try {
+ Errors ignored = new Errors();
+ return getBindingOrThrow(key.withoutAttributes(), ignored);
+ } catch (ErrorsException ignored) {
+ // throw with a more appropriate message below
+ }
+ }
+ throw errors.missingImplementation(key).toException();
+ }
+
+ Object source = key.getTypeLiteral().getRawType();
+ BindingImpl<T> binding = createUnitializedBinding(key, Scoping.UNSCOPED, source, errors);
+ initializeBinding(binding, errors);
+ return binding;
+ }
+
+ <T> InternalFactory<? extends T> getInternalFactory(Key<T> key, Errors errors)
+ throws ErrorsException {
+ return getBindingOrThrow(key, errors).getInternalFactory();
+ }
+
+ // not test-covered
+ public Map<Key<?>, Binding<?>> getBindings() {
+ return state.getExplicitBindingsThisLevel();
+ }
+
+ private static class BindingsMultimap {
+ final Map<TypeLiteral<?>, List<Binding<?>>> multimap = Maps.newHashMap();
+
+ <T> void put(TypeLiteral<T> type, Binding<T> binding) {
+ List<Binding<?>> bindingsForType = multimap.get(type);
+ if (bindingsForType == null) {
+ bindingsForType = Lists.newArrayList();
+ multimap.put(type, bindingsForType);
+ }
+ bindingsForType.add(binding);
+ }
+
+
+ @SuppressWarnings("unchecked")
+ // safe because we only put matching entries into the map
+ <T> List<Binding<T>> getAll(TypeLiteral<T> type) {
+ List<Binding<?>> bindings = multimap.get(type);
+ return bindings != null
+ ? Collections.<Binding<T>>unmodifiableList((List) multimap.get(type))
+ : ImmutableList.<Binding<T>>of();
+ }
+ }
+
+ /**
+ * Returns parameter injectors, or {@code null} if there are no parameters.
+ */
+ SingleParameterInjector<?>[] getParametersInjectors(
+ List<Dependency<?>> parameters, Errors errors) throws ErrorsException {
+ if (parameters.isEmpty()) {
+ return null;
+ }
+
+ int numErrorsBefore = errors.size();
+ SingleParameterInjector<?>[] result = new SingleParameterInjector<?>[parameters.size()];
+ int i = 0;
+ for (Dependency<?> parameter : parameters) {
+ try {
+ result[i++] = createParameterInjector(parameter, errors.withSource(parameter));
+ } catch (ErrorsException rethrownBelow) {
+ // rethrown below
+ }
+ }
+
+ errors.throwIfNewErrors(numErrorsBefore);
+ return result;
+ }
+
+ <T> SingleParameterInjector<T> createParameterInjector(final Dependency<T> dependency,
+ final Errors errors) throws ErrorsException {
+ InternalFactory<? extends T> factory = getInternalFactory(dependency.getKey(), errors);
+ return new SingleParameterInjector<T>(dependency, factory);
+ }
+
+ /**
+ * Invokes a method.
+ */
+ interface MethodInvoker {
+ Object invoke(Object target, Object... parameters)
+ throws IllegalAccessException, InvocationTargetException;
+ }
+
+ /**
+ * Cached constructor injectors for each type
+ */
+ ConstructorInjectorStore constructors = new ConstructorInjectorStore(this);
+
+ /**
+ * Cached field and method injectors for each type.
+ */
+ MembersInjectorStore membersInjectorStore;
+
+ @SuppressWarnings("unchecked") // the members injector type is consistent with instance's type
+ public void injectMembers(Object instance) {
+ MembersInjector membersInjector = getMembersInjector(instance.getClass());
+ membersInjector.injectMembers(instance);
+ }
+
+ public <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral) {
+ Errors errors = new Errors(typeLiteral);
+ try {
+ return membersInjectorStore.get(typeLiteral, errors);
+ } catch (ErrorsException e) {
+ throw new ConfigurationException(errors.merge(e.getErrors()).getMessages());
+ }
+ }
+
+ public <T> MembersInjector<T> getMembersInjector(Class<T> type) {
+ return getMembersInjector(TypeLiteral.get(type));
+ }
+
+ public <T> Provider<T> getProvider(Class<T> type) {
+ return getProvider(Key.get(type));
+ }
+
+ <T> Provider<T> getProviderOrThrow(final Key<T> key, Errors errors) throws ErrorsException {
+ final InternalFactory<? extends T> factory = getInternalFactory(key, errors);
+ // ES: optimize for a common case of read only instance getting from the parent...
+ if (factory instanceof InternalFactory.Instance) {
+ return new Provider<T>() {
+ @Override
+ public T get() {
+ try {
+ return (T) ((InternalFactory.Instance) factory).get(null, null, null);
+ } catch (ErrorsException e) {
+ // ignore
+ }
+ // should never happen...
+ assert false;
+ return null;
+ }
+ };
+ }
+
+ final Dependency<T> dependency = Dependency.get(key);
+ return new Provider<T>() {
+ public T get() {
+ final Errors errors = new Errors(dependency);
+ try {
+ T t = callInContext(new ContextualCallable<T>() {
+ public T call(InternalContext context) throws ErrorsException {
+ context.setDependency(dependency);
+ try {
+ return factory.get(errors, context, dependency);
+ } finally {
+ context.setDependency(null);
+ }
+ }
+ });
+ errors.throwIfNewErrors(0);
+ return t;
+ } catch (ErrorsException e) {
+ throw new ProvisionException(errors.merge(e.getErrors()).getMessages());
+ }
+ }
+
+ @Override
+ public String toString() {
+ return factory.toString();
+ }
+ };
+ }
+
+ public <T> Provider<T> getProvider(final Key<T> key) {
+ Errors errors = new Errors(key);
+ try {
+ Provider<T> result = getProviderOrThrow(key, errors);
+ errors.throwIfNewErrors(0);
+ return result;
+ } catch (ErrorsException e) {
+ throw new ConfigurationException(errors.merge(e.getErrors()).getMessages());
+ }
+ }
+
+ public <T> T getInstance(Key<T> key) {
+ return getProvider(key).get();
+ }
+
+ public <T> T getInstance(Class<T> type) {
+ return getProvider(type).get();
+ }
+
+ final ThreadLocal<Object[]> localContext;
+
+ /**
+ * Looks up thread local context. Creates (and removes) a new context if necessary.
+ */
+ <T> T callInContext(ContextualCallable<T> callable) throws ErrorsException {
+ Object[] reference = localContext.get();
+ if (reference[0] == null) {
+ reference[0] = new InternalContext();
+ try {
+ return callable.call((InternalContext) reference[0]);
+ } finally {
+ // Only clear the context if this call created it.
+ reference[0] = null;
+ }
+ } else {
+ // Someone else will clean up this context.
+ return callable.call((InternalContext) reference[0]);
+ }
+ }
+
+ public String toString() {
+ return new ToStringBuilder(Injector.class)
+ .add("bindings", state.getExplicitBindingsThisLevel().values())
+ .toString();
+ }
+
+ // ES_GUICE: clear caches
+ public void clearCache() {
+ state.clearBlacklisted();
+ constructors = new ConstructorInjectorStore(this);
+ membersInjectorStore = new MembersInjectorStore(this, state.getTypeListenerBindings());
+ jitBindings = Maps.newHashMap();
+ }
+
+ // ES_GUICE: make all registered bindings act as eager singletons
+ public void readOnlyAllSingletons() {
+ readOnly = true;
+ state.makeAllBindingsToEagerSingletons(this);
+ bindingsMultimap = new BindingsMultimap();
+ // reindex the bindings
+ index();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/InjectorShell.java b/src/main/java/org/elasticsearch/common/inject/InjectorShell.java
new file mode 100644
index 0000000..3babb71
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/InjectorShell.java
@@ -0,0 +1,250 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.*;
+
+import java.util.List;
+import java.util.logging.Logger;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+import static org.elasticsearch.common.inject.Scopes.SINGLETON;
+
+/**
+ * A partially-initialized injector. See {@link InjectorBuilder}, which uses this to build a tree
+ * of injectors in batch.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class InjectorShell {
+
+ private final List<Element> elements;
+ private final InjectorImpl injector;
+ private final PrivateElements privateElements;
+
+ private InjectorShell(Builder builder, List<Element> elements, InjectorImpl injector) {
+ this.privateElements = builder.privateElements;
+ this.elements = elements;
+ this.injector = injector;
+ }
+
+ PrivateElements getPrivateElements() {
+ return privateElements;
+ }
+
+ InjectorImpl getInjector() {
+ return injector;
+ }
+
+ List<Element> getElements() {
+ return elements;
+ }
+
+ static class Builder {
+ private final List<Element> elements = Lists.newArrayList();
+ private final List<Module> modules = Lists.newArrayList();
+
+ /**
+ * lazily constructed
+ */
+ private State state;
+
+ private InjectorImpl parent;
+ private Stage stage;
+
+ /**
+ * null unless this exists in a {@link Binder#newPrivateBinder private environment}
+ */
+ private PrivateElementsImpl privateElements;
+
+ Builder parent(InjectorImpl parent) {
+ this.parent = parent;
+ this.state = new InheritingState(parent.state);
+ return this;
+ }
+
+ Builder stage(Stage stage) {
+ this.stage = stage;
+ return this;
+ }
+
+ Builder privateElements(PrivateElements privateElements) {
+ this.privateElements = (PrivateElementsImpl) privateElements;
+ this.elements.addAll(privateElements.getElements());
+ return this;
+ }
+
+ void addModules(Iterable<? extends Module> modules) {
+ for (Module module : modules) {
+ this.modules.add(module);
+ }
+ }
+
+ /**
+ * Synchronize on this before calling {@link #build}.
+ */
+ Object lock() {
+ return getState().lock();
+ }
+
+ /**
+ * Creates and returns the injector shells for the current modules. Multiple shells will be
+ * returned if any modules contain {@link Binder#newPrivateBinder private environments}. The
+ * primary injector will be first in the returned list.
+ */
+ List<InjectorShell> build(Initializer initializer, BindingProcessor bindingProcessor,
+ Stopwatch stopwatch, Errors errors) {
+ checkState(stage != null, "Stage not initialized");
+ checkState(privateElements == null || parent != null, "PrivateElements with no parent");
+ checkState(state != null, "no state. Did you remember to lock() ?");
+
+ InjectorImpl injector = new InjectorImpl(parent, state, initializer);
+ if (privateElements != null) {
+ privateElements.initInjector(injector);
+ }
+
+ // bind Stage and Singleton if this is a top-level injector
+ if (parent == null) {
+ modules.add(0, new RootModule(stage));
+ new TypeConverterBindingProcessor(errors).prepareBuiltInConverters(injector);
+ }
+
+ elements.addAll(Elements.getElements(stage, modules));
+ stopwatch.resetAndLog("Module execution");
+
+ new MessageProcessor(errors).process(injector, elements);
+
+ new TypeListenerBindingProcessor(errors).process(injector, elements);
+ List<TypeListenerBinding> listenerBindings = injector.state.getTypeListenerBindings();
+ injector.membersInjectorStore = new MembersInjectorStore(injector, listenerBindings);
+ stopwatch.resetAndLog("TypeListeners creation");
+
+ new ScopeBindingProcessor(errors).process(injector, elements);
+ stopwatch.resetAndLog("Scopes creation");
+
+ new TypeConverterBindingProcessor(errors).process(injector, elements);
+ stopwatch.resetAndLog("Converters creation");
+
+ bindInjector(injector);
+ bindLogger(injector);
+ bindingProcessor.process(injector, elements);
+ stopwatch.resetAndLog("Binding creation");
+
+ List<InjectorShell> injectorShells = Lists.newArrayList();
+ injectorShells.add(new InjectorShell(this, elements, injector));
+
+ // recursively build child shells
+ PrivateElementProcessor processor = new PrivateElementProcessor(errors, stage);
+ processor.process(injector, elements);
+ for (Builder builder : processor.getInjectorShellBuilders()) {
+ injectorShells.addAll(builder.build(initializer, bindingProcessor, stopwatch, errors));
+ }
+ stopwatch.resetAndLog("Private environment creation");
+
+ return injectorShells;
+ }
+
+ private State getState() {
+ if (state == null) {
+ state = new InheritingState(State.NONE);
+ }
+ return state;
+ }
+ }
+
+ /**
+ * The Injector is a special case because we allow both parent and child injectors to both have
+ * a binding for that key.
+ */
+ private static void bindInjector(InjectorImpl injector) {
+ Key<Injector> key = Key.get(Injector.class);
+ InjectorFactory injectorFactory = new InjectorFactory(injector);
+ injector.state.putBinding(key,
+ new ProviderInstanceBindingImpl<Injector>(injector, key, SourceProvider.UNKNOWN_SOURCE,
+ injectorFactory, Scoping.UNSCOPED, injectorFactory,
+ ImmutableSet.<InjectionPoint>of()));
+ }
+
+ private static class InjectorFactory implements InternalFactory<Injector>, Provider<Injector> {
+ private final Injector injector;
+
+ private InjectorFactory(Injector injector) {
+ this.injector = injector;
+ }
+
+ public Injector get(Errors errors, InternalContext context, Dependency<?> dependency)
+ throws ErrorsException {
+ return injector;
+ }
+
+ public Injector get() {
+ return injector;
+ }
+
+ public String toString() {
+ return "Provider<Injector>";
+ }
+ }
+
+ /**
+ * The Logger is a special case because it knows the injection point of the injected member. It's
+ * the only binding that does this.
+ */
+ private static void bindLogger(InjectorImpl injector) {
+ Key<Logger> key = Key.get(Logger.class);
+ LoggerFactory loggerFactory = new LoggerFactory();
+ injector.state.putBinding(key,
+ new ProviderInstanceBindingImpl<Logger>(injector, key,
+ SourceProvider.UNKNOWN_SOURCE, loggerFactory, Scoping.UNSCOPED,
+ loggerFactory, ImmutableSet.<InjectionPoint>of()));
+ }
+
+ private static class LoggerFactory implements InternalFactory<Logger>, Provider<Logger> {
+ public Logger get(Errors errors, InternalContext context, Dependency<?> dependency) {
+ InjectionPoint injectionPoint = dependency.getInjectionPoint();
+ return injectionPoint == null
+ ? Logger.getAnonymousLogger()
+ : Logger.getLogger(injectionPoint.getMember().getDeclaringClass().getName());
+ }
+
+ public Logger get() {
+ return Logger.getAnonymousLogger();
+ }
+
+ public String toString() {
+ return "Provider<Logger>";
+ }
+ }
+
+ private static class RootModule implements Module {
+ final Stage stage;
+
+ private RootModule(Stage stage) {
+ this.stage = checkNotNull(stage, "stage");
+ }
+
+ public void configure(Binder binder) {
+ binder = binder.withSource(SourceProvider.UNKNOWN_SOURCE);
+ binder.bind(Stage.class).toInstance(stage);
+ binder.bindScope(Singleton.class, SINGLETON);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Injectors.java b/src/main/java/org/elasticsearch/common/inject/Injectors.java
new file mode 100644
index 0000000..805acb5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Injectors.java
@@ -0,0 +1,244 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.common.inject.matcher.Matcher;
+import org.elasticsearch.common.inject.name.Names;
+import org.elasticsearch.common.inject.spi.Message;
+
+import java.lang.reflect.Type;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+/**
+ *
+ */
+public class Injectors {
+
+ public static Throwable getFirstErrorFailure(CreationException e) {
+ if (e.getErrorMessages().isEmpty()) {
+ return e;
+ }
+ // return the first message that has root cause, probably an actual error
+ for (Message message : e.getErrorMessages()) {
+ if (message.getCause() != null) {
+ return message.getCause();
+ }
+ }
+ return e;
+ }
+
+ /**
+ * Returns an instance of the given type with the {@link org.elasticsearch.common.inject.name.Named}
+ * annotation value.
+ * <p/>
+ * This method allows you to switch this code
+ * <code>injector.getInstance(Key.get(type, Names.named(name)));</code>
+ * <p/>
+ * to the more concise
+ * <code>Injectors.getInstance(injector, type, name);</code>
+ */
+ public static <T> T getInstance(Injector injector, java.lang.Class<T> type, String name) {
+ return injector.getInstance(Key.get(type, Names.named(name)));
+ }
+
+ /**
+ * Returns a collection of all instances of the given base type
+ *
+ * @param baseClass the base type of objects required
+ * @param <T> the base type
+ * @return a set of objects returned from this injector
+ */
+ public static <T> Set<T> getInstancesOf(Injector injector, Class<T> baseClass) {
+ Set<T> answer = Sets.newHashSet();
+ Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
+ for (Entry<Key<?>, Binding<?>> entry : entries) {
+ Key<?> key = entry.getKey();
+ Class<?> keyType = getKeyType(key);
+ if (keyType != null && baseClass.isAssignableFrom(keyType)) {
+ Binding<?> binding = entry.getValue();
+ Object value = binding.getProvider().get();
+ if (value != null) {
+ T castValue = baseClass.cast(value);
+ answer.add(castValue);
+ }
+ }
+ }
+ return answer;
+ }
+
+ /**
+ * Returns a collection of all instances matching the given matcher
+ *
+ * @param matcher matches the types to return instances
+ * @return a set of objects returned from this injector
+ */
+ public static <T> Set<T> getInstancesOf(Injector injector, Matcher<Class> matcher) {
+ Set<T> answer = Sets.newHashSet();
+ Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
+ for (Entry<Key<?>, Binding<?>> entry : entries) {
+ Key<?> key = entry.getKey();
+ Class<?> keyType = getKeyType(key);
+ if (keyType != null && matcher.matches(keyType)) {
+ Binding<?> binding = entry.getValue();
+ Object value = binding.getProvider().get();
+ answer.add((T) value);
+ }
+ }
+ return answer;
+ }
+
+ /**
+ * Returns a collection of all of the providers matching the given matcher
+ *
+ * @param matcher matches the types to return instances
+ * @return a set of objects returned from this injector
+ */
+ public static <T> Set<Provider<T>> getProvidersOf(Injector injector, Matcher<Class> matcher) {
+ Set<Provider<T>> answer = Sets.newHashSet();
+ Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
+ for (Entry<Key<?>, Binding<?>> entry : entries) {
+ Key<?> key = entry.getKey();
+ Class<?> keyType = getKeyType(key);
+ if (keyType != null && matcher.matches(keyType)) {
+ Binding<?> binding = entry.getValue();
+ answer.add((Provider<T>) binding.getProvider());
+ }
+ }
+ return answer;
+ }
+
+ /**
+ * Returns a collection of all providers of the given base type
+ *
+ * @param baseClass the base type of objects required
+ * @param <T> the base type
+ * @return a set of objects returned from this injector
+ */
+ public static <T> Set<Provider<T>> getProvidersOf(Injector injector, Class<T> baseClass) {
+ Set<Provider<T>> answer = Sets.newHashSet();
+ Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
+ for (Entry<Key<?>, Binding<?>> entry : entries) {
+ Key<?> key = entry.getKey();
+ Class<?> keyType = getKeyType(key);
+ if (keyType != null && baseClass.isAssignableFrom(keyType)) {
+ Binding<?> binding = entry.getValue();
+ answer.add((Provider<T>) binding.getProvider());
+ }
+ }
+ return answer;
+ }
+
+ /**
+ * Returns true if a binding exists for the given matcher
+ */
+ public static boolean hasBinding(Injector injector, Matcher<Class> matcher) {
+ return !getBindingsOf(injector, matcher).isEmpty();
+ }
+
+ /**
+ * Returns true if a binding exists for the given base class
+ */
+ public static boolean hasBinding(Injector injector, Class<?> baseClass) {
+ return !getBindingsOf(injector, baseClass).isEmpty();
+ }
+
+ /**
+ * Returns true if a binding exists for the given key
+ */
+ public static boolean hasBinding(Injector injector, Key<?> key) {
+ Binding<?> binding = getBinding(injector, key);
+ return binding != null;
+ }
+
+ /**
+ * Returns the binding for the given key or null if there is no such binding
+ */
+ public static Binding<?> getBinding(Injector injector, Key<?> key) {
+ Map<Key<?>, Binding<?>> bindings = injector.getBindings();
+ Binding<?> binding = bindings.get(key);
+ return binding;
+ }
+
+ /**
+ * Returns a collection of all of the bindings matching the given matcher
+ *
+ * @param matcher matches the types to return instances
+ * @return a set of objects returned from this injector
+ */
+ public static Set<Binding<?>> getBindingsOf(Injector injector, Matcher<Class> matcher) {
+ Set<Binding<?>> answer = Sets.newHashSet();
+ Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
+ for (Entry<Key<?>, Binding<?>> entry : entries) {
+ Key<?> key = entry.getKey();
+ Class<?> keyType = getKeyType(key);
+ if (keyType != null && matcher.matches(keyType)) {
+ answer.add(entry.getValue());
+ }
+ }
+ return answer;
+ }
+
+ /**
+ * Returns a collection of all bindings of the given base type
+ *
+ * @param baseClass the base type of objects required
+ * @return a set of objects returned from this injector
+ */
+ public static Set<Binding<?>> getBindingsOf(Injector injector, Class<?> baseClass) {
+ Set<Binding<?>> answer = Sets.newHashSet();
+ Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
+ for (Entry<Key<?>, Binding<?>> entry : entries) {
+ Key<?> key = entry.getKey();
+ Class<?> keyType = getKeyType(key);
+ if (keyType != null && baseClass.isAssignableFrom(keyType)) {
+ answer.add(entry.getValue());
+ }
+ }
+ return answer;
+ }
+
+ /**
+ * Returns the key type of the given key
+ */
+ public static <T> Class<?> getKeyType(Key<?> key) {
+ Class<?> keyType = null;
+ TypeLiteral<?> typeLiteral = key.getTypeLiteral();
+ Type type = typeLiteral.getType();
+ if (type instanceof Class) {
+ keyType = (Class<?>) type;
+ }
+ return keyType;
+ }
+
+
+ public static void close(Injector injector) {
+
+ }
+
+ public static void cleanCaches(Injector injector) {
+ ((InjectorImpl) injector).clearCache();
+ if (injector.getParent() != null) {
+ cleanCaches(injector.getParent());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/InternalFactoryToProviderAdapter.java b/src/main/java/org/elasticsearch/common/inject/InternalFactoryToProviderAdapter.java
new file mode 100644
index 0000000..f492bd1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/InternalFactoryToProviderAdapter.java
@@ -0,0 +1,55 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.spi.Dependency;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * @author crazybob@google.com (Bob Lee)
+ */
+class InternalFactoryToProviderAdapter<T> implements InternalFactory<T> {
+
+ private final Initializable<Provider<? extends T>> initializable;
+ private final Object source;
+
+ public InternalFactoryToProviderAdapter(Initializable<Provider<? extends T>> initializable) {
+ this(initializable, SourceProvider.UNKNOWN_SOURCE);
+ }
+
+ public InternalFactoryToProviderAdapter(
+ Initializable<Provider<? extends T>> initializable, Object source) {
+ this.initializable = checkNotNull(initializable, "provider");
+ this.source = checkNotNull(source, "source");
+ }
+
+ public T get(Errors errors, InternalContext context, Dependency<?> dependency)
+ throws ErrorsException {
+ try {
+ return errors.checkForNull(initializable.get(errors).get(), source, dependency);
+ } catch (RuntimeException userException) {
+ throw errors.withSource(source).errorInProvider(userException).toException();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return initializable.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Key.java b/src/main/java/org/elasticsearch/common/inject/Key.java
new file mode 100644
index 0000000..f92ff7e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Key.java
@@ -0,0 +1,511 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Annotations;
+import org.elasticsearch.common.inject.internal.MoreTypes;
+import org.elasticsearch.common.inject.internal.ToStringBuilder;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Binding key consisting of an injection type and an optional annotation.
+ * Matches the type and annotation at a point of injection.
+ * <p/>
+ * <p>For example, {@code Key.get(Service.class, Transactional.class)} will
+ * match:
+ * <p/>
+ * <pre>
+ * {@literal @}Inject
+ * public void setService({@literal @}Transactional Service service) {
+ * ...
+ * }
+ * </pre>
+ * <p/>
+ * <p>{@code Key} supports generic types via subclassing just like {@link
+ * TypeLiteral}.
+ * <p/>
+ * <p>Keys do not differentiate between primitive types (int, char, etc.) and
+ * their correpsonding wrapper types (Integer, Character, etc.). Primitive
+ * types will be replaced with their wrapper types when keys are created.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class Key<T> {
+
+ private final AnnotationStrategy annotationStrategy;
+
+ private final TypeLiteral<T> typeLiteral;
+ private final int hashCode;
+
+ /**
+ * Constructs a new key. Derives the type from this class's type parameter.
+ * <p/>
+ * <p>Clients create an empty anonymous subclass. Doing so embeds the type
+ * parameter in the anonymous class's type hierarchy so we can reconstitute it
+ * at runtime despite erasure.
+ * <p/>
+ * <p>Example usage for a binding of type {@code Foo} annotated with
+ * {@code @Bar}:
+ * <p/>
+ * <p>{@code new Key<Foo>(Bar.class) {}}.
+ */
+ @SuppressWarnings("unchecked")
+ protected Key(Class<? extends Annotation> annotationType) {
+ this.annotationStrategy = strategyFor(annotationType);
+ this.typeLiteral = (TypeLiteral<T>) TypeLiteral.fromSuperclassTypeParameter(getClass());
+ this.hashCode = computeHashCode();
+ }
+
+ /**
+ * Constructs a new key. Derives the type from this class's type parameter.
+ * <p/>
+ * <p>Clients create an empty anonymous subclass. Doing so embeds the type
+ * parameter in the anonymous class's type hierarchy so we can reconstitute it
+ * at runtime despite erasure.
+ * <p/>
+ * <p>Example usage for a binding of type {@code Foo} annotated with
+ * {@code @Bar}:
+ * <p/>
+ * <p>{@code new Key<Foo>(new Bar()) {}}.
+ */
+ @SuppressWarnings("unchecked")
+ protected Key(Annotation annotation) {
+ // no usages, not test-covered
+ this.annotationStrategy = strategyFor(annotation);
+ this.typeLiteral = (TypeLiteral<T>) TypeLiteral.fromSuperclassTypeParameter(getClass());
+ this.hashCode = computeHashCode();
+ }
+
+ /**
+ * Constructs a new key. Derives the type from this class's type parameter.
+ * <p/>
+ * <p>Clients create an empty anonymous subclass. Doing so embeds the type
+ * parameter in the anonymous class's type hierarchy so we can reconstitute it
+ * at runtime despite erasure.
+ * <p/>
+ * <p>Example usage for a binding of type {@code Foo}:
+ * <p/>
+ * <p>{@code new Key<Foo>() {}}.
+ */
+ @SuppressWarnings("unchecked")
+ protected Key() {
+ this.annotationStrategy = NullAnnotationStrategy.INSTANCE;
+ this.typeLiteral = (TypeLiteral<T>) TypeLiteral.fromSuperclassTypeParameter(getClass());
+ this.hashCode = computeHashCode();
+ }
+
+ /**
+ * Unsafe. Constructs a key from a manually specified type.
+ */
+ @SuppressWarnings("unchecked")
+ private Key(Type type, AnnotationStrategy annotationStrategy) {
+ this.annotationStrategy = annotationStrategy;
+ this.typeLiteral = MoreTypes.makeKeySafe((TypeLiteral<T>) TypeLiteral.get(type));
+ this.hashCode = computeHashCode();
+ }
+
+ /**
+ * Constructs a key from a manually specified type.
+ */
+ private Key(TypeLiteral<T> typeLiteral, AnnotationStrategy annotationStrategy) {
+ this.annotationStrategy = annotationStrategy;
+ this.typeLiteral = MoreTypes.makeKeySafe(typeLiteral);
+ this.hashCode = computeHashCode();
+ }
+
+ private int computeHashCode() {
+ return typeLiteral.hashCode() * 31 + annotationStrategy.hashCode();
+ }
+
+ /**
+ * Gets the key type.
+ */
+ public final TypeLiteral<T> getTypeLiteral() {
+ return typeLiteral;
+ }
+
+ /**
+ * Gets the annotation type.
+ */
+ public final Class<? extends Annotation> getAnnotationType() {
+ return annotationStrategy.getAnnotationType();
+ }
+
+ /**
+ * Gets the annotation.
+ */
+ public final Annotation getAnnotation() {
+ return annotationStrategy.getAnnotation();
+ }
+
+ boolean hasAnnotationType() {
+ return annotationStrategy.getAnnotationType() != null;
+ }
+
+ String getAnnotationName() {
+ Annotation annotation = annotationStrategy.getAnnotation();
+ if (annotation != null) {
+ return annotation.toString();
+ }
+
+ // not test-covered
+ return annotationStrategy.getAnnotationType().toString();
+ }
+
+ Class<? super T> getRawType() {
+ return typeLiteral.getRawType();
+ }
+
+ /**
+ * Gets the key of this key's provider.
+ */
+ Key<Provider<T>> providerKey() {
+ return ofType(typeLiteral.providerType());
+ }
+
+ @Override
+ public final boolean equals(Object o) {
+ if (o == this) {
+ return true;
+ }
+ if (!(o instanceof Key<?>)) {
+ return false;
+ }
+ Key<?> other = (Key<?>) o;
+ return annotationStrategy.equals(other.annotationStrategy)
+ && typeLiteral.equals(other.typeLiteral);
+ }
+
+ @Override
+ public final int hashCode() {
+ return this.hashCode;
+ }
+
+ @Override
+ public final String toString() {
+ return new ToStringBuilder(Key.class)
+ .add("type", typeLiteral)
+ .add("annotation", annotationStrategy)
+ .toString();
+ }
+
+ /**
+ * Gets a key for an injection type and an annotation strategy.
+ */
+ static <T> Key<T> get(Class<T> type,
+ AnnotationStrategy annotationStrategy) {
+ return new Key<T>(type, annotationStrategy);
+ }
+
+ /**
+ * Gets a key for an injection type.
+ */
+ public static <T> Key<T> get(Class<T> type) {
+ return new Key<T>(type, NullAnnotationStrategy.INSTANCE);
+ }
+
+ /**
+ * Gets a key for an injection type and an annotation type.
+ */
+ public static <T> Key<T> get(Class<T> type,
+ Class<? extends Annotation> annotationType) {
+ return new Key<T>(type, strategyFor(annotationType));
+ }
+
+ /**
+ * Gets a key for an injection type and an annotation.
+ */
+ public static <T> Key<T> get(Class<T> type, Annotation annotation) {
+ return new Key<T>(type, strategyFor(annotation));
+ }
+
+ /**
+ * Gets a key for an injection type.
+ */
+ public static Key<?> get(Type type) {
+ return new Key<Object>(type, NullAnnotationStrategy.INSTANCE);
+ }
+
+ /**
+ * Gets a key for an injection type and an annotation type.
+ */
+ public static Key<?> get(Type type,
+ Class<? extends Annotation> annotationType) {
+ return new Key<Object>(type, strategyFor(annotationType));
+ }
+
+ /**
+ * Gets a key for an injection type and an annotation.
+ */
+ public static Key<?> get(Type type, Annotation annotation) {
+ return new Key<Object>(type, strategyFor(annotation));
+ }
+
+ /**
+ * Gets a key for an injection type.
+ */
+ public static <T> Key<T> get(TypeLiteral<T> typeLiteral) {
+ return new Key<T>(typeLiteral, NullAnnotationStrategy.INSTANCE);
+ }
+
+ /**
+ * Gets a key for an injection type and an annotation type.
+ */
+ public static <T> Key<T> get(TypeLiteral<T> typeLiteral,
+ Class<? extends Annotation> annotationType) {
+ return new Key<T>(typeLiteral, strategyFor(annotationType));
+ }
+
+ /**
+ * Gets a key for an injection type and an annotation.
+ */
+ public static <T> Key<T> get(TypeLiteral<T> typeLiteral,
+ Annotation annotation) {
+ return new Key<T>(typeLiteral, strategyFor(annotation));
+ }
+
+ /**
+ * Returns a new key of the specified type with the same annotation as this
+ * key.
+ */
+ <T> Key<T> ofType(Class<T> type) {
+ return new Key<T>(type, annotationStrategy);
+ }
+
+ /**
+ * Returns a new key of the specified type with the same annotation as this
+ * key.
+ */
+ Key<?> ofType(Type type) {
+ return new Key<Object>(type, annotationStrategy);
+ }
+
+ /**
+ * Returns a new key of the specified type with the same annotation as this
+ * key.
+ */
+ <T> Key<T> ofType(TypeLiteral<T> type) {
+ return new Key<T>(type, annotationStrategy);
+ }
+
+ /**
+ * Returns true if this key has annotation attributes.
+ */
+ boolean hasAttributes() {
+ return annotationStrategy.hasAttributes();
+ }
+
+ /**
+ * Returns this key without annotation attributes, i.e. with only the
+ * annotation type.
+ */
+ Key<T> withoutAttributes() {
+ return new Key<T>(typeLiteral, annotationStrategy.withoutAttributes());
+ }
+
+ interface AnnotationStrategy {
+ Annotation getAnnotation();
+
+ Class<? extends Annotation> getAnnotationType();
+
+ boolean hasAttributes();
+
+ AnnotationStrategy withoutAttributes();
+ }
+
+ /**
+ * Returns {@code true} if the given annotation type has no attributes.
+ */
+ static boolean isMarker(Class<? extends Annotation> annotationType) {
+ return annotationType.getDeclaredMethods().length == 0;
+ }
+
+ /**
+ * Gets the strategy for an annotation.
+ */
+ static AnnotationStrategy strategyFor(Annotation annotation) {
+ checkNotNull(annotation, "annotation");
+ Class<? extends Annotation> annotationType = annotation.annotationType();
+ ensureRetainedAtRuntime(annotationType);
+ ensureIsBindingAnnotation(annotationType);
+
+ if (annotationType.getDeclaredMethods().length == 0) {
+ return new AnnotationTypeStrategy(annotationType, annotation);
+ }
+
+ return new AnnotationInstanceStrategy(annotation);
+ }
+
+ /**
+ * Gets the strategy for an annotation type.
+ */
+ static AnnotationStrategy strategyFor(Class<? extends Annotation> annotationType) {
+ checkNotNull(annotationType, "annotation type");
+ ensureRetainedAtRuntime(annotationType);
+ ensureIsBindingAnnotation(annotationType);
+ return new AnnotationTypeStrategy(annotationType, null);
+ }
+
+ private static void ensureRetainedAtRuntime(
+ Class<? extends Annotation> annotationType) {
+ checkArgument(Annotations.isRetainedAtRuntime(annotationType),
+ "%s is not retained at runtime. Please annotate it with @Retention(RUNTIME).",
+ annotationType.getName());
+ }
+
+ private static void ensureIsBindingAnnotation(
+ Class<? extends Annotation> annotationType) {
+ checkArgument(isBindingAnnotation(annotationType),
+ "%s is not a binding annotation. Please annotate it with @BindingAnnotation.",
+ annotationType.getName());
+ }
+
+ static enum NullAnnotationStrategy implements AnnotationStrategy {
+ INSTANCE;
+
+ public boolean hasAttributes() {
+ return false;
+ }
+
+ public AnnotationStrategy withoutAttributes() {
+ throw new UnsupportedOperationException("Key already has no attributes.");
+ }
+
+ public Annotation getAnnotation() {
+ return null;
+ }
+
+ public Class<? extends Annotation> getAnnotationType() {
+ return null;
+ }
+
+ @Override
+ public String toString() {
+ return "[none]";
+ }
+ }
+
+ // this class not test-covered
+ static class AnnotationInstanceStrategy implements AnnotationStrategy {
+
+ final Annotation annotation;
+
+ AnnotationInstanceStrategy(Annotation annotation) {
+ this.annotation = checkNotNull(annotation, "annotation");
+ }
+
+ public boolean hasAttributes() {
+ return true;
+ }
+
+ public AnnotationStrategy withoutAttributes() {
+ return new AnnotationTypeStrategy(getAnnotationType(), annotation);
+ }
+
+ public Annotation getAnnotation() {
+ return annotation;
+ }
+
+ public Class<? extends Annotation> getAnnotationType() {
+ return annotation.annotationType();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof AnnotationInstanceStrategy)) {
+ return false;
+ }
+
+ AnnotationInstanceStrategy other = (AnnotationInstanceStrategy) o;
+ return annotation.equals(other.annotation);
+ }
+
+ @Override
+ public int hashCode() {
+ return annotation.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return annotation.toString();
+ }
+ }
+
+ static class AnnotationTypeStrategy implements AnnotationStrategy {
+
+ final Class<? extends Annotation> annotationType;
+
+ // Keep the instance around if we have it so the client can request it.
+ final Annotation annotation;
+
+ AnnotationTypeStrategy(Class<? extends Annotation> annotationType,
+ Annotation annotation) {
+ this.annotationType = checkNotNull(annotationType, "annotation type");
+ this.annotation = annotation;
+ }
+
+ public boolean hasAttributes() {
+ return false;
+ }
+
+ public AnnotationStrategy withoutAttributes() {
+ throw new UnsupportedOperationException("Key already has no attributes.");
+ }
+
+ public Annotation getAnnotation() {
+ return annotation;
+ }
+
+ public Class<? extends Annotation> getAnnotationType() {
+ return annotationType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof AnnotationTypeStrategy)) {
+ return false;
+ }
+
+ AnnotationTypeStrategy other = (AnnotationTypeStrategy) o;
+ return annotationType.equals(other.annotationType);
+ }
+
+ @Override
+ public int hashCode() {
+ return annotationType.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "@" + annotationType.getName();
+ }
+ }
+
+ static boolean isBindingAnnotation(Annotation annotation) {
+ return isBindingAnnotation(annotation.annotationType());
+ }
+
+ static boolean isBindingAnnotation(
+ Class<? extends Annotation> annotationType) {
+ return annotationType.getAnnotation(BindingAnnotation.class) != null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/LookupProcessor.java b/src/main/java/org/elasticsearch/common/inject/LookupProcessor.java
new file mode 100644
index 0000000..0bcfaa2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/LookupProcessor.java
@@ -0,0 +1,61 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.spi.MembersInjectorLookup;
+import org.elasticsearch.common.inject.spi.ProviderLookup;
+
+/**
+ * Handles {@link Binder#getProvider} and {@link Binder#getMembersInjector(TypeLiteral)} commands.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class LookupProcessor extends AbstractProcessor {
+
+ LookupProcessor(Errors errors) {
+ super(errors);
+ }
+
+ @Override
+ public <T> Boolean visit(MembersInjectorLookup<T> lookup) {
+ try {
+ MembersInjector<T> membersInjector
+ = injector.membersInjectorStore.get(lookup.getType(), errors);
+ lookup.initializeDelegate(membersInjector);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors()); // TODO: source
+ }
+
+ return true;
+ }
+
+ @Override
+ public <T> Boolean visit(ProviderLookup<T> lookup) {
+ // ensure the provider can be created
+ try {
+ Provider<T> provider = injector.getProviderOrThrow(lookup.getKey(), errors);
+ lookup.initializeDelegate(provider);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors()); // TODO: source
+ }
+
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Lookups.java b/src/main/java/org/elasticsearch/common/inject/Lookups.java
new file mode 100644
index 0000000..b5eb690
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Lookups.java
@@ -0,0 +1,30 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ * Accessors for providers and members injectors. The returned values will not be functional until
+ * the injector has been created.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+interface Lookups {
+
+ <T> Provider<T> getProvider(Key<T> key);
+
+ <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> type);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/MembersInjector.java b/src/main/java/org/elasticsearch/common/inject/MembersInjector.java
new file mode 100644
index 0000000..3893bf1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/MembersInjector.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ * Injects dependencies into the fields and methods on instances of type {@code T}. Ignores the
+ * presence or absence of an injectable constructor.
+ *
+ * @param <T> type to inject members of
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface MembersInjector<T> {
+
+ /**
+ * Injects dependencies into the fields and methods of {@code instance}. Ignores the presence or
+ * absence of an injectable constructor.
+ * <p/>
+ * <p>Whenever Guice creates an instance, it performs this injection automatically (after first
+ * performing constructor injection), so if you're able to let Guice create all your objects for
+ * you, you'll never need to use this method.
+ *
+ * @param instance to inject members on. May be {@code null}.
+ */
+ void injectMembers(T instance);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java b/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java
new file mode 100644
index 0000000..6ae6ffc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java
@@ -0,0 +1,119 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.InternalContext;
+import org.elasticsearch.common.inject.spi.InjectionListener;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+/**
+ * Injects members of instances of a given type.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class MembersInjectorImpl<T> implements MembersInjector<T> {
+ private final TypeLiteral<T> typeLiteral;
+ private final InjectorImpl injector;
+ private final ImmutableList<SingleMemberInjector> memberInjectors;
+ private final ImmutableList<MembersInjector<? super T>> userMembersInjectors;
+ private final ImmutableList<InjectionListener<? super T>> injectionListeners;
+
+ MembersInjectorImpl(InjectorImpl injector, TypeLiteral<T> typeLiteral,
+ EncounterImpl<T> encounter, ImmutableList<SingleMemberInjector> memberInjectors) {
+ this.injector = injector;
+ this.typeLiteral = typeLiteral;
+ this.memberInjectors = memberInjectors;
+ this.userMembersInjectors = encounter.getMembersInjectors();
+ this.injectionListeners = encounter.getInjectionListeners();
+ }
+
+ public ImmutableList<SingleMemberInjector> getMemberInjectors() {
+ return memberInjectors;
+ }
+
+ public void injectMembers(T instance) {
+ Errors errors = new Errors(typeLiteral);
+ try {
+ injectAndNotify(instance, errors);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+
+ errors.throwProvisionExceptionIfErrorsExist();
+ }
+
+ void injectAndNotify(final T instance, final Errors errors) throws ErrorsException {
+ if (instance == null) {
+ return;
+ }
+
+ injector.callInContext(new ContextualCallable<Void>() {
+ public Void call(InternalContext context) throws ErrorsException {
+ injectMembers(instance, errors, context);
+ return null;
+ }
+ });
+
+ notifyListeners(instance, errors);
+ }
+
+ void notifyListeners(T instance, Errors errors) throws ErrorsException {
+ int numErrorsBefore = errors.size();
+ for (InjectionListener<? super T> injectionListener : injectionListeners) {
+ try {
+ injectionListener.afterInjection(instance);
+ } catch (RuntimeException e) {
+ errors.errorNotifyingInjectionListener(injectionListener, typeLiteral, e);
+ }
+ }
+ errors.throwIfNewErrors(numErrorsBefore);
+ }
+
+ void injectMembers(T t, Errors errors, InternalContext context) {
+ // optimization: use manual for/each to save allocating an iterator here
+ for (int i = 0, size = memberInjectors.size(); i < size; i++) {
+ memberInjectors.get(i).inject(errors, context, t);
+ }
+
+ // optimization: use manual for/each to save allocating an iterator here
+ for (int i = 0, size = userMembersInjectors.size(); i < size; i++) {
+ MembersInjector<? super T> userMembersInjector = userMembersInjectors.get(i);
+ try {
+ userMembersInjector.injectMembers(t);
+ } catch (RuntimeException e) {
+ errors.errorInUserInjector(userMembersInjector, typeLiteral, e);
+ }
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "MembersInjector<" + typeLiteral + ">";
+ }
+
+ public ImmutableSet<InjectionPoint> getInjectionPoints() {
+ ImmutableSet.Builder<InjectionPoint> builder = ImmutableSet.builder();
+ for (SingleMemberInjector memberInjector : memberInjectors) {
+ builder.add(memberInjector.getInjectionPoint());
+ }
+ return builder.build();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java b/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java
new file mode 100644
index 0000000..2904ccd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java
@@ -0,0 +1,125 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.FailableCache;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+import org.elasticsearch.common.inject.spi.TypeListenerBinding;
+
+import java.lang.reflect.Field;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Members injectors by type.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class MembersInjectorStore {
+ private final InjectorImpl injector;
+ private final ImmutableList<TypeListenerBinding> typeListenerBindings;
+
+ private final FailableCache<TypeLiteral<?>, MembersInjectorImpl<?>> cache
+ = new FailableCache<TypeLiteral<?>, MembersInjectorImpl<?>>() {
+ @Override
+ protected MembersInjectorImpl<?> create(TypeLiteral<?> type, Errors errors)
+ throws ErrorsException {
+ return createWithListeners(type, errors);
+ }
+ };
+
+ MembersInjectorStore(InjectorImpl injector,
+ List<TypeListenerBinding> typeListenerBindings) {
+ this.injector = injector;
+ this.typeListenerBindings = ImmutableList.copyOf(typeListenerBindings);
+ }
+
+ /**
+ * Returns true if any type listeners are installed. Other code may take shortcuts when there
+ * aren't any type listeners.
+ */
+ public boolean hasTypeListeners() {
+ return !typeListenerBindings.isEmpty();
+ }
+
+ /**
+ * Returns a new complete members injector with injection listeners registered.
+ */
+ @SuppressWarnings("unchecked") // the MembersInjector type always agrees with the passed type
+ public <T> MembersInjectorImpl<T> get(TypeLiteral<T> key, Errors errors) throws ErrorsException {
+ return (MembersInjectorImpl<T>) cache.get(key, errors);
+ }
+
+ /**
+ * Creates a new members injector and attaches both injection listeners and method aspects.
+ */
+ private <T> MembersInjectorImpl<T> createWithListeners(TypeLiteral<T> type, Errors errors)
+ throws ErrorsException {
+ int numErrorsBefore = errors.size();
+
+ Set<InjectionPoint> injectionPoints;
+ try {
+ injectionPoints = InjectionPoint.forInstanceMethodsAndFields(type);
+ } catch (ConfigurationException e) {
+ errors.merge(e.getErrorMessages());
+ injectionPoints = e.getPartialValue();
+ }
+ ImmutableList<SingleMemberInjector> injectors = getInjectors(injectionPoints, errors);
+ errors.throwIfNewErrors(numErrorsBefore);
+
+ EncounterImpl<T> encounter = new EncounterImpl<T>(errors, injector.lookups);
+ for (TypeListenerBinding typeListener : typeListenerBindings) {
+ if (typeListener.getTypeMatcher().matches(type)) {
+ try {
+ typeListener.getListener().hear(type, encounter);
+ } catch (RuntimeException e) {
+ errors.errorNotifyingTypeListener(typeListener, type, e);
+ }
+ }
+ }
+ encounter.invalidate();
+ errors.throwIfNewErrors(numErrorsBefore);
+
+ return new MembersInjectorImpl<T>(injector, type, encounter, injectors);
+ }
+
+ /**
+ * Returns the injectors for the specified injection points.
+ */
+ ImmutableList<SingleMemberInjector> getInjectors(
+ Set<InjectionPoint> injectionPoints, Errors errors) {
+ List<SingleMemberInjector> injectors = Lists.newArrayList();
+ for (InjectionPoint injectionPoint : injectionPoints) {
+ try {
+ Errors errorsForMember = injectionPoint.isOptional()
+ ? new Errors(injectionPoint)
+ : errors.withSource(injectionPoint);
+ SingleMemberInjector injector = injectionPoint.getMember() instanceof Field
+ ? new SingleFieldInjector(this.injector, injectionPoint, errorsForMember)
+ : new SingleMethodInjector(this.injector, injectionPoint, errorsForMember);
+ injectors.add(injector);
+ } catch (ErrorsException ignoredForNow) {
+ // ignored for now
+ }
+ }
+ return ImmutableList.copyOf(injectors);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/MessageProcessor.java b/src/main/java/org/elasticsearch/common/inject/MessageProcessor.java
new file mode 100644
index 0000000..2acba68
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/MessageProcessor.java
@@ -0,0 +1,54 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.Message;
+
+/**
+ * Handles {@link Binder#addError} commands.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class MessageProcessor extends AbstractProcessor {
+
+ //private static final Logger logger = Logger.getLogger(Guice.class.getName());
+
+ MessageProcessor(Errors errors) {
+ super(errors);
+ }
+
+ @Override
+ public Boolean visit(Message message) {
+ // ES_GUICE: don't log failures using jdk logging
+// if (message.getCause() != null) {
+// String rootMessage = getRootMessage(message.getCause());
+// logger.log(Level.INFO,
+// "An exception was caught and reported. Message: " + rootMessage,
+// message.getCause());
+// }
+
+ errors.addMessage(message);
+ return true;
+ }
+
+ public static String getRootMessage(Throwable t) {
+ Throwable cause = t.getCause();
+ return cause == null ? t.toString() : getRootMessage(cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Module.java b/src/main/java/org/elasticsearch/common/inject/Module.java
new file mode 100644
index 0000000..9af716d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Module.java
@@ -0,0 +1,43 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ * A module contributes configuration information, typically interface
+ * bindings, which will be used to create an {@link Injector}. A Guice-based
+ * application is ultimately composed of little more than a set of
+ * {@code Module}s and some bootstrapping code.
+ * <p/>
+ * <p>Your Module classes can use a more streamlined syntax by extending
+ * {@link AbstractModule} rather than implementing this interface directly.
+ * <p/>
+ * <p>In addition to the bindings configured via {@link #configure}, bindings
+ * will be created for all methods annotated with {@literal @}{@link Provides}.
+ * Use scope and binding annotations on these methods to configure the
+ * bindings.
+ */
+public interface Module {
+
+ /**
+ * Contributes bindings and other configurations for this module to {@code binder}.
+ * <p/>
+ * <p><strong>Do not invoke this method directly</strong> to install submodules. Instead use
+ * {@link Binder#install(Module)}, which ensures that {@link Provides provider methods} are
+ * discovered.
+ */
+ void configure(Binder binder);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Modules.java b/src/main/java/org/elasticsearch/common/inject/Modules.java
new file mode 100644
index 0000000..4e5ae23
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Modules.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+
+import java.lang.reflect.Constructor;
+
+/**
+ *
+ */
+public class Modules {
+
+ public static Module createModule(String moduleClass, Settings settings) throws ClassNotFoundException {
+ return createModule((Class<? extends Module>) settings.getClassLoader().loadClass(moduleClass), settings);
+ }
+
+ public static Module createModule(Class<? extends Module> moduleClass, @Nullable Settings settings) {
+ Constructor<? extends Module> constructor;
+ try {
+ constructor = moduleClass.getConstructor(Settings.class);
+ try {
+ return constructor.newInstance(settings);
+ } catch (Exception e) {
+ throw new ElasticsearchException("Failed to create module [" + moduleClass + "]", e);
+ }
+ } catch (NoSuchMethodException e) {
+ try {
+ constructor = moduleClass.getConstructor();
+ try {
+ return constructor.newInstance();
+ } catch (Exception e1) {
+ throw new ElasticsearchException("Failed to create module [" + moduleClass + "]", e);
+ }
+ } catch (NoSuchMethodException e1) {
+ throw new ElasticsearchException("No constructor for [" + moduleClass + "]");
+ }
+ }
+ }
+
+ public static void processModules(Iterable<Module> modules) {
+ for (Module module : modules) {
+ if (module instanceof PreProcessModule) {
+ for (Module module1 : modules) {
+ ((PreProcessModule) module).processModule(module1);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java b/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java
new file mode 100644
index 0000000..3443312
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.Lists;
+
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class ModulesBuilder implements Iterable<Module> {
+
+ private final List<Module> modules = Lists.newArrayList();
+
+ public ModulesBuilder add(Module... modules) {
+ for (Module module : modules) {
+ add(module);
+ }
+ return this;
+ }
+
+ public ModulesBuilder add(Module module) {
+ modules.add(module);
+ if (module instanceof SpawnModules) {
+ Iterable<? extends Module> spawned = ((SpawnModules) module).spawnModules();
+ for (Module spawn : spawned) {
+ add(spawn);
+ }
+ }
+ return this;
+ }
+
+ @Override
+ public Iterator<Module> iterator() {
+ return modules.iterator();
+ }
+
+ public Injector createInjector() {
+ Modules.processModules(modules);
+ Injector injector = Guice.createInjector(modules);
+ Injectors.cleanCaches(injector);
+ // in ES, we always create all instances as if they are eager singletons
+ // this allows for considerable memory savings (no need to store construction info) as well as cycles
+ ((InjectorImpl) injector).readOnlyAllSingletons();
+ return injector;
+ }
+
+ public Injector createChildInjector(Injector injector) {
+ Modules.processModules(modules);
+ Injector childInjector = injector.createChildInjector(modules);
+ Injectors.cleanCaches(childInjector);
+ // in ES, we always create all instances as if they are eager singletons
+ // this allows for considerable memory savings (no need to store construction info) as well as cycles
+ ((InjectorImpl) childInjector).readOnlyAllSingletons();
+ return childInjector;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/OutOfScopeException.java b/src/main/java/org/elasticsearch/common/inject/OutOfScopeException.java
new file mode 100644
index 0000000..14096a3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/OutOfScopeException.java
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ * Thrown from {@link Provider#get} when an attempt is made to access a scoped
+ * object while the scope in question is not currently active.
+ *
+ * @author kevinb@google.com (Kevin Bourrillion)
+ * @since 2.0
+ */
+public final class OutOfScopeException extends RuntimeException {
+
+ public OutOfScopeException(String message) {
+ super(message);
+ }
+
+ public OutOfScopeException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public OutOfScopeException(Throwable cause) {
+ super(cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/PreProcessModule.java b/src/main/java/org/elasticsearch/common/inject/PreProcessModule.java
new file mode 100644
index 0000000..ff1d999
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/PreProcessModule.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ * A module can implement this interface to allow to pre process other modules
+ * before an injector is created.
+ *
+ *
+ */
+public interface PreProcessModule {
+
+ void processModule(Module module);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java b/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java
new file mode 100644
index 0000000..0724081
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java
@@ -0,0 +1,52 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.binder.AnnotatedElementBuilder;
+
+/**
+ * Returns a binder whose configuration information is hidden from its environment by default. See
+ * {@link org.elasticsearch.common.inject.PrivateModule PrivateModule} for details.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface PrivateBinder extends Binder {
+
+ /**
+ * Makes the binding for {@code key} available to the enclosing environment
+ */
+ void expose(Key<?> key);
+
+ /**
+ * Makes a binding for {@code type} available to the enclosing environment. Use {@link
+ * org.elasticsearch.common.inject.binder.AnnotatedElementBuilder#annotatedWith(Class) annotatedWith()} to expose {@code type} with a
+ * binding annotation.
+ */
+ AnnotatedElementBuilder expose(Class<?> type);
+
+ /**
+ * Makes a binding for {@code type} available to the enclosing environment. Use {@link
+ * AnnotatedElementBuilder#annotatedWith(Class) annotatedWith()} to expose {@code type} with a
+ * binding annotation.
+ */
+ AnnotatedElementBuilder expose(TypeLiteral<?> type);
+
+ PrivateBinder withSource(Object source);
+
+ PrivateBinder skipSources(Class... classesToSkip);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/PrivateElementProcessor.java b/src/main/java/org/elasticsearch/common/inject/PrivateElementProcessor.java
new file mode 100644
index 0000000..9b0fd3d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/PrivateElementProcessor.java
@@ -0,0 +1,53 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.PrivateElements;
+
+import java.util.List;
+
+/**
+ * Handles {@link Binder#newPrivateBinder()} elements.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class PrivateElementProcessor extends AbstractProcessor {
+
+ private final Stage stage;
+ private final List<InjectorShell.Builder> injectorShellBuilders = Lists.newArrayList();
+
+ PrivateElementProcessor(Errors errors, Stage stage) {
+ super(errors);
+ this.stage = stage;
+ }
+
+ @Override
+ public Boolean visit(PrivateElements privateElements) {
+ InjectorShell.Builder builder = new InjectorShell.Builder()
+ .parent(injector)
+ .stage(stage)
+ .privateElements(privateElements);
+ injectorShellBuilders.add(builder);
+ return true;
+ }
+
+ public List<InjectorShell.Builder> getInjectorShellBuilders() {
+ return injectorShellBuilders;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/PrivateModule.java b/src/main/java/org/elasticsearch/common/inject/PrivateModule.java
new file mode 100644
index 0000000..81472ff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/PrivateModule.java
@@ -0,0 +1,287 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.binder.AnnotatedBindingBuilder;
+import org.elasticsearch.common.inject.binder.AnnotatedConstantBindingBuilder;
+import org.elasticsearch.common.inject.binder.AnnotatedElementBuilder;
+import org.elasticsearch.common.inject.binder.LinkedBindingBuilder;
+import org.elasticsearch.common.inject.matcher.Matcher;
+import org.elasticsearch.common.inject.spi.Message;
+import org.elasticsearch.common.inject.spi.TypeConverter;
+import org.elasticsearch.common.inject.spi.TypeListener;
+
+import java.lang.annotation.Annotation;
+
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * A module whose configuration information is hidden from its environment by default. Only bindings
+ * that are explicitly exposed will be available to other modules and to the users of the injector.
+ * This module may expose the bindings it creates and the bindings of the modules it installs.
+ * <p/>
+ * <p>A private module can be nested within a regular module or within another private module using
+ * {@link Binder#install install()}. Its bindings live in a new environment that inherits bindings,
+ * type converters, scopes, and interceptors from the surrounding ("parent") environment. When you
+ * nest multiple private modules, the result is a tree of environments where the injector's
+ * environment is the root.
+ * <p/>
+ * <p>Guice EDSL bindings can be exposed with {@link #expose(Class) expose()}. {@literal @}{@link
+ * org.elasticsearch.common.inject.Provides Provides} bindings can be exposed with the {@literal @}{@link
+ * Exposed} annotation:
+ * <p/>
+ * <pre>
+ * public class FooBarBazModule extends PrivateModule {
+ * protected void configure() {
+ * bind(Foo.class).to(RealFoo.class);
+ * expose(Foo.class);
+ *
+ * install(new TransactionalBarModule());
+ * expose(Bar.class).annotatedWith(Transactional.class);
+ *
+ * bind(SomeImplementationDetail.class);
+ * install(new MoreImplementationDetailsModule());
+ * }
+ *
+ * {@literal @}Provides {@literal @}Exposed
+ * public Baz provideBaz() {
+ * return new SuperBaz();
+ * }
+ * }
+ * </pre>
+ * <p/>
+ * <p>Private modules are implemented using {@link Injector#createChildInjector(Module[]) parent
+ * injectors}. When it can satisfy their dependencies, just-in-time bindings will be created in the
+ * root environment. Such bindings are shared among all environments in the tree.
+ * <p/>
+ * <p>The scope of a binding is constrained to its environment. A singleton bound in a private
+ * module will be unique to its environment. But a binding for the same type in a different private
+ * module will yield a different instance.
+ * <p/>
+ * <p>A shared binding that injects the {@code Injector} gets the root injector, which only has
+ * access to bindings in the root environment. An explicit binding that injects the {@code Injector}
+ * gets access to all bindings in the child environment.
+ * <p/>
+ * <p>To promote a just-in-time binding to an explicit binding, bind it:
+ * <pre>
+ * bind(FooImpl.class);
+ * </pre>
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public abstract class PrivateModule implements Module {
+
+ /**
+ * Like abstract module, the binder of the current private module
+ */
+ private PrivateBinder binder;
+
+ public final synchronized void configure(Binder binder) {
+ checkState(this.binder == null, "Re-entry is not allowed.");
+
+ // Guice treats PrivateModules specially and passes in a PrivateBinder automatically.
+ this.binder = (PrivateBinder) binder.skipSources(PrivateModule.class);
+ try {
+ configure();
+ } finally {
+ this.binder = null;
+ }
+ }
+
+ /**
+ * Creates bindings and other configurations private to this module. Use {@link #expose(Class)
+ * expose()} to make the bindings in this module available externally.
+ */
+ protected abstract void configure();
+
+ /**
+ * Makes the binding for {@code key} available to other modules and the injector.
+ */
+ protected final <T> void expose(Key<T> key) {
+ binder.expose(key);
+ }
+
+ /**
+ * Makes a binding for {@code type} available to other modules and the injector. Use {@link
+ * AnnotatedElementBuilder#annotatedWith(Class) annotatedWith()} to expose {@code type} with a
+ * binding annotation.
+ */
+ protected final AnnotatedElementBuilder expose(Class<?> type) {
+ return binder.expose(type);
+ }
+
+ /**
+ * Makes a binding for {@code type} available to other modules and the injector. Use {@link
+ * AnnotatedElementBuilder#annotatedWith(Class) annotatedWith()} to expose {@code type} with a
+ * binding annotation.
+ */
+ protected final AnnotatedElementBuilder expose(TypeLiteral<?> type) {
+ return binder.expose(type);
+ }
+
+ // everything below is copied from AbstractModule
+
+ /**
+ * Returns the current binder.
+ */
+ protected final PrivateBinder binder() {
+ return binder;
+ }
+
+ /**
+ * @see Binder#bindScope(Class, Scope)
+ */
+ protected final void bindScope(Class<? extends Annotation> scopeAnnotation, Scope scope) {
+ binder.bindScope(scopeAnnotation, scope);
+ }
+
+ /**
+ * @see Binder#bind(Key)
+ */
+ protected final <T> LinkedBindingBuilder<T> bind(Key<T> key) {
+ return binder.bind(key);
+ }
+
+ /**
+ * @see Binder#bind(TypeLiteral)
+ */
+ protected final <T> AnnotatedBindingBuilder<T> bind(TypeLiteral<T> typeLiteral) {
+ return binder.bind(typeLiteral);
+ }
+
+ /**
+ * @see Binder#bind(Class)
+ */
+ protected final <T> AnnotatedBindingBuilder<T> bind(Class<T> clazz) {
+ return binder.bind(clazz);
+ }
+
+ /**
+ * @see Binder#bindConstant()
+ */
+ protected final AnnotatedConstantBindingBuilder bindConstant() {
+ return binder.bindConstant();
+ }
+
+ /**
+ * @see Binder#install(Module)
+ */
+ protected final void install(Module module) {
+ binder.install(module);
+ }
+
+ /**
+ * @see Binder#addError(String, Object[])
+ */
+ protected final void addError(String message, Object... arguments) {
+ binder.addError(message, arguments);
+ }
+
+ /**
+ * @see Binder#addError(Throwable)
+ */
+ protected final void addError(Throwable t) {
+ binder.addError(t);
+ }
+
+ /**
+ * @see Binder#addError(Message)
+ */
+ protected final void addError(Message message) {
+ binder.addError(message);
+ }
+
+ /**
+ * @see Binder#requestInjection(Object)
+ */
+ protected final void requestInjection(Object instance) {
+ binder.requestInjection(instance);
+ }
+
+ /**
+ * @see Binder#requestStaticInjection(Class[])
+ */
+ protected final void requestStaticInjection(Class<?>... types) {
+ binder.requestStaticInjection(types);
+ }
+
+ /**
+ * Instructs Guice to require a binding to the given key.
+ */
+ protected final void requireBinding(Key<?> key) {
+ binder.getProvider(key);
+ }
+
+ /**
+ * Instructs Guice to require a binding to the given type.
+ */
+ protected final void requireBinding(Class<?> type) {
+ binder.getProvider(type);
+ }
+
+ /**
+ * @see Binder#getProvider(Key)
+ */
+ protected final <T> Provider<T> getProvider(Key<T> key) {
+ return binder.getProvider(key);
+ }
+
+ /**
+ * @see Binder#getProvider(Class)
+ */
+ protected final <T> Provider<T> getProvider(Class<T> type) {
+ return binder.getProvider(type);
+ }
+
+ /**
+ * @see Binder#convertToTypes(org.elasticsearch.common.inject.matcher.Matcher, org.elasticsearch.common.inject.spi.TypeConverter)
+ */
+ protected final void convertToTypes(Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeConverter converter) {
+ binder.convertToTypes(typeMatcher, converter);
+ }
+
+ /**
+ * @see Binder#currentStage()
+ */
+ protected final Stage currentStage() {
+ return binder.currentStage();
+ }
+
+ /**
+ * @see Binder#getMembersInjector(Class)
+ */
+ protected <T> MembersInjector<T> getMembersInjector(Class<T> type) {
+ return binder.getMembersInjector(type);
+ }
+
+ /**
+ * @see Binder#getMembersInjector(TypeLiteral)
+ */
+ protected <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> type) {
+ return binder.getMembersInjector(type);
+ }
+
+ /**
+ * @see Binder#bindListener(org.elasticsearch.common.inject.matcher.Matcher, org.elasticsearch.common.inject.spi.TypeListener)
+ */
+ protected void bindListener(Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeListener listener) {
+ binder.bindListener(typeMatcher, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java b/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java
new file mode 100644
index 0000000..6232dd7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * A pointer to the default provider type for a type.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+@Retention(RUNTIME)
+@Target(TYPE)
+public @interface ProvidedBy {
+
+ /**
+ * The implementation type.
+ */
+ Class<? extends Provider<?>> value();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Provider.java b/src/main/java/org/elasticsearch/common/inject/Provider.java
new file mode 100644
index 0000000..f9f4466
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Provider.java
@@ -0,0 +1,55 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ * An object capable of providing instances of type {@code T}. Providers are used in numerous ways
+ * by Guice:
+ * <p/>
+ * <ul>
+ * <li>When the default means for obtaining instances (an injectable or parameterless constructor)
+ * is insufficient for a particular binding, the module can specify a custom {@code Provider}
+ * instead, to control exactly how Guice creates or obtains instances for the binding.
+ * <p/>
+ * <li>An implementation class may always choose to have a {@code Provider<T>} instance injected,
+ * rather than having a {@code T} injected directly. This may give you access to multiple
+ * instances, instances you wish to safely mutate and discard, instances which are out of scope
+ * (e.g. using a {@code @RequestScoped} object from within a {@code @SessionScoped} object), or
+ * instances that will be initialized lazily.
+ * <p/>
+ * <li>A custom {@link Scope} is implemented as a decorator of {@code Provider<T>}, which decides
+ * when to delegate to the backing provider and when to provide the instance some other way.
+ * <p/>
+ * <li>The {@link Injector} offers access to the {@code Provider<T>} it uses to fulfill requests
+ * for a given key, via the {@link Injector#getProvider} methods.
+ * </ul>
+ *
+ * @param <T> the type of object this provides
+ * @author crazybob@google.com (Bob Lee)
+ */
+public interface Provider<T> {
+
+ /**
+ * Provides an instance of {@code T}. Must never return {@code null}.
+ *
+ * @throws OutOfScopeException when an attempt is made to access a scoped object while the scope
+ * in question is not currently active
+ * @throws ProvisionException if an instance cannot be provided. Such exceptions include messages
+ * and throwables to describe why provision failed.
+ */
+ T get();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java b/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java
new file mode 100644
index 0000000..4a7931c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java
@@ -0,0 +1,59 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.InternalContext;
+import org.elasticsearch.common.inject.internal.InternalFactory;
+import org.elasticsearch.common.inject.spi.Dependency;
+
+/**
+ * @author crazybob@google.com (Bob Lee)
+ */
+class ProviderToInternalFactoryAdapter<T> implements Provider<T> {
+
+ private final InjectorImpl injector;
+ private final InternalFactory<? extends T> internalFactory;
+
+ public ProviderToInternalFactoryAdapter(InjectorImpl injector,
+ InternalFactory<? extends T> internalFactory) {
+ this.injector = injector;
+ this.internalFactory = internalFactory;
+ }
+
+ public T get() {
+ final Errors errors = new Errors();
+ try {
+ T t = injector.callInContext(new ContextualCallable<T>() {
+ public T call(InternalContext context) throws ErrorsException {
+ Dependency dependency = context.getDependency();
+ return internalFactory.get(errors, context, dependency);
+ }
+ });
+ errors.throwIfNewErrors(0);
+ return t;
+ } catch (ErrorsException e) {
+ throw new ProvisionException(errors.merge(e.getErrors()).getMessages());
+ }
+ }
+
+ @Override
+ public String toString() {
+ return internalFactory.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Provides.java b/src/main/java/org/elasticsearch/common/inject/Provides.java
new file mode 100644
index 0000000..0920b85
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Provides.java
@@ -0,0 +1,37 @@
+/**
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.METHOD;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * Annotates methods of a {@link Module} to create a provider method binding. The method's return
+ * type is bound to its returned value. Guice will pass dependencies to the method as parameters.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @since 2.0
+ */
+@Documented
+@Target(METHOD)
+@Retention(RUNTIME)
+public @interface Provides {
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ProvisionException.java b/src/main/java/org/elasticsearch/common/inject/ProvisionException.java
new file mode 100644
index 0000000..4c0c365
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ProvisionException.java
@@ -0,0 +1,70 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.Message;
+
+import java.util.Collection;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * Indicates that there was a runtime failure while providing an instance.
+ *
+ * @author kevinb@google.com (Kevin Bourrillion)
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class ProvisionException extends RuntimeException {
+
+ private final ImmutableSet<Message> messages;
+
+ /**
+ * Creates a ConfigurationException containing {@code messages}.
+ */
+ public ProvisionException(Iterable<Message> messages) {
+ this.messages = ImmutableSet.copyOf(messages);
+ checkArgument(!this.messages.isEmpty());
+ initCause(Errors.getOnlyCause(this.messages));
+ }
+
+ public ProvisionException(String message, Throwable cause) {
+ super(cause);
+ this.messages = ImmutableSet.of(new Message(ImmutableList.of(), message, cause));
+ }
+
+ public ProvisionException(String message) {
+ this.messages = ImmutableSet.of(new Message(message));
+ }
+
+ /**
+ * Returns messages for the errors that caused this exception.
+ */
+ public Collection<Message> getErrorMessages() {
+ return messages;
+ }
+
+ @Override
+ public String getMessage() {
+ return Errors.format("Guice provision errors", messages);
+ }
+
+ private static final long serialVersionUID = 0;
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Reflection.java b/src/main/java/org/elasticsearch/common/inject/Reflection.java
new file mode 100644
index 0000000..ba34e6a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Reflection.java
@@ -0,0 +1,49 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.elasticsearch.common.inject;
+
+import java.lang.reflect.Constructor;
+
+/**
+ * Abstraction for Java's reflection APIs. This interface exists to provide a single place where
+ * runtime reflection can be substituted for another mechanism such as CGLib or compile-time code
+ * generation.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class Reflection {
+
+ /**
+ * A placeholder. This enables us to continue processing and gather more
+ * errors but blows up if you actually try to use it.
+ */
+ static class InvalidConstructor {
+ InvalidConstructor() {
+ throw new AssertionError();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ static <T> Constructor<T> invalidConstructor() {
+ try {
+ return (Constructor<T>) InvalidConstructor.class.getDeclaredConstructor();
+ } catch (NoSuchMethodException e) {
+ throw new AssertionError(e);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Scope.java b/src/main/java/org/elasticsearch/common/inject/Scope.java
new file mode 100644
index 0000000..c817800
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Scope.java
@@ -0,0 +1,59 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ * A scope is a level of visibility that instances provided by Guice may have.
+ * By default, an instance created by the {@link Injector} has <i>no scope</i>,
+ * meaning it has no state from the framework's perspective -- the
+ * {@code Injector} creates it, injects it once into the class that required it,
+ * and then immediately forgets it. Associating a scope with a particular
+ * binding allows the created instance to be "remembered" and possibly used
+ * again for other injections.
+ * <p/>
+ * <p>An example of a scope is {@link Scopes#SINGLETON}.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public interface Scope {
+
+ /**
+ * Scopes a provider. The returned provider returns objects from this scope.
+ * If an object does not exist in this scope, the provider can use the given
+ * unscoped provider to retrieve one.
+ * <p/>
+ * <p>Scope implementations are strongly encouraged to override
+ * {@link Object#toString} in the returned provider and include the backing
+ * provider's {@code toString()} output.
+ *
+ * @param key binding key
+ * @param unscoped locates an instance when one doesn't already exist in this
+ * scope.
+ * @return a new provider which only delegates to the given unscoped provider
+ * when an instance of the requested object doesn't already exist in this
+ * scope
+ */
+ public <T> Provider<T> scope(Key<T> key, Provider<T> unscoped);
+
+ /**
+ * A short but useful description of this scope. For comparison, the standard
+ * scopes that ship with guice use the descriptions
+ * {@code "Scopes.SINGLETON"}, {@code "ServletScopes.SESSION"} and
+ * {@code "ServletScopes.REQUEST"}.
+ */
+ String toString();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ScopeAnnotation.java b/src/main/java/org/elasticsearch/common/inject/ScopeAnnotation.java
new file mode 100644
index 0000000..7c6ab29
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ScopeAnnotation.java
@@ -0,0 +1,42 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.ANNOTATION_TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * Annotates annotations which are used for scoping. Only one such annotation
+ * may apply to a single implementation class. You must also annotate scope
+ * annotations with {@code @Retention(RUNTIME)}. For example:
+ * <p/>
+ * <pre>
+ * {@code @}Retention(RUNTIME)
+ * {@code @}Target(TYPE)
+ * {@code @}ScopeAnnotation
+ * public {@code @}interface SessionScoped {}
+ * </pre>
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+@Target(ANNOTATION_TYPE)
+@Retention(RUNTIME)
+public @interface ScopeAnnotation {
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/ScopeBindingProcessor.java b/src/main/java/org/elasticsearch/common/inject/ScopeBindingProcessor.java
new file mode 100644
index 0000000..019d04c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/ScopeBindingProcessor.java
@@ -0,0 +1,64 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Annotations;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.ScopeBinding;
+
+import java.lang.annotation.Annotation;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Handles {@link Binder#bindScope} commands.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class ScopeBindingProcessor extends AbstractProcessor {
+
+ ScopeBindingProcessor(Errors errors) {
+ super(errors);
+ }
+
+ @Override
+ public Boolean visit(ScopeBinding command) {
+ Scope scope = command.getScope();
+ Class<? extends Annotation> annotationType = command.getAnnotationType();
+
+ if (!Annotations.isScopeAnnotation(annotationType)) {
+ errors.withSource(annotationType).missingScopeAnnotation();
+ // Go ahead and bind anyway so we don't get collateral errors.
+ }
+
+ if (!Annotations.isRetainedAtRuntime(annotationType)) {
+ errors.withSource(annotationType)
+ .missingRuntimeRetention(command.getSource());
+ // Go ahead and bind anyway so we don't get collateral errors.
+ }
+
+ Scope existing = injector.state.getScope(checkNotNull(annotationType, "annotation type"));
+ if (existing != null) {
+ errors.duplicateScopes(existing, annotationType, scope);
+ } else {
+ injector.state.putAnnotation(annotationType, checkNotNull(scope, "scope"));
+ }
+
+ return true;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/Scopes.java b/src/main/java/org/elasticsearch/common/inject/Scopes.java
new file mode 100644
index 0000000..ed5b261
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Scopes.java
@@ -0,0 +1,136 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.InternalFactory;
+import org.elasticsearch.common.inject.internal.Scoping;
+
+import java.lang.annotation.Annotation;
+import java.util.Locale;
+
+/**
+ * Built-in scope implementations.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class Scopes {
+
+ private Scopes() {
+ }
+
+ /**
+ * One instance per {@link Injector}. Also see {@code @}{@link Singleton}.
+ */
+ public static final Scope SINGLETON = new Scope() {
+ public <T> Provider<T> scope(Key<T> key, final Provider<T> creator) {
+ return new Provider<T>() {
+
+ private volatile T instance;
+
+ // DCL on a volatile is safe as of Java 5, which we obviously require.
+ @SuppressWarnings("DoubleCheckedLocking")
+ public T get() {
+ if (instance == null) {
+ /*
+ * Use a pretty coarse lock. We don't want to run into deadlocks
+ * when two threads try to load circularly-dependent objects.
+ * Maybe one of these days we will identify independent graphs of
+ * objects and offer to load them in parallel.
+ */
+ synchronized (InjectorImpl.class) {
+ if (instance == null) {
+ instance = creator.get();
+ }
+ }
+ }
+ return instance;
+ }
+
+ public String toString() {
+ return String.format(Locale.ROOT, "%s[%s]", creator, SINGLETON);
+ }
+ };
+ }
+
+ @Override
+ public String toString() {
+ return "Scopes.SINGLETON";
+ }
+ };
+
+ /**
+ * No scope; the same as not applying any scope at all. Each time the
+ * Injector obtains an instance of an object with "no scope", it injects this
+ * instance then immediately forgets it. When the next request for the same
+ * binding arrives it will need to obtain the instance over again.
+ * <p/>
+ * <p>This exists only in case a class has been annotated with a scope
+ * annotation such as {@link Singleton @Singleton}, and you need to override
+ * this to "no scope" in your binding.
+ *
+ * @since 2.0
+ */
+ public static final Scope NO_SCOPE = new Scope() {
+ public <T> Provider<T> scope(Key<T> key, Provider<T> unscoped) {
+ return unscoped;
+ }
+
+ @Override
+ public String toString() {
+ return "Scopes.NO_SCOPE";
+ }
+ };
+
+ /**
+ * Scopes an internal factory.
+ */
+ static <T> InternalFactory<? extends T> scope(Key<T> key, InjectorImpl injector,
+ InternalFactory<? extends T> creator, Scoping scoping) {
+
+ if (scoping.isNoScope()) {
+ return creator;
+ }
+
+ Scope scope = scoping.getScopeInstance();
+
+ Provider<T> scoped
+ = scope.scope(key, new ProviderToInternalFactoryAdapter<T>(injector, creator));
+ return new InternalFactoryToProviderAdapter<T>(
+ Initializables.<Provider<? extends T>>of(scoped));
+ }
+
+ /**
+ * Replaces annotation scopes with instance scopes using the Injector's annotation-to-instance
+ * map. If the scope annotation has no corresponding instance, an error will be added and unscoped
+ * will be retuned.
+ */
+ static Scoping makeInjectable(Scoping scoping, InjectorImpl injector, Errors errors) {
+ Class<? extends Annotation> scopeAnnotation = scoping.getScopeAnnotation();
+ if (scopeAnnotation == null) {
+ return scoping;
+ }
+
+ Scope scope = injector.state.getScope(scopeAnnotation);
+ if (scope != null) {
+ return Scoping.forInstance(scope);
+ }
+
+ errors.scopeNotFound(scopeAnnotation);
+ return Scoping.UNSCOPED;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java b/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java
new file mode 100644
index 0000000..e8408c4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java
@@ -0,0 +1,67 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.InternalContext;
+import org.elasticsearch.common.inject.internal.InternalFactory;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+import java.lang.reflect.Field;
+
+/**
+ * Sets an injectable field.
+ */
+class SingleFieldInjector implements SingleMemberInjector {
+ final Field field;
+ final InjectionPoint injectionPoint;
+ final Dependency<?> dependency;
+ final InternalFactory<?> factory;
+
+ public SingleFieldInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors)
+ throws ErrorsException {
+ this.injectionPoint = injectionPoint;
+ this.field = (Field) injectionPoint.getMember();
+ this.dependency = injectionPoint.getDependencies().get(0);
+
+ // Ewwwww...
+ field.setAccessible(true);
+ factory = injector.getInternalFactory(dependency.getKey(), errors);
+ }
+
+ public InjectionPoint getInjectionPoint() {
+ return injectionPoint;
+ }
+
+ public void inject(Errors errors, InternalContext context, Object o) {
+ errors = errors.withSource(dependency);
+
+ context.setDependency(dependency);
+ try {
+ Object value = factory.get(errors, context, dependency);
+ field.set(o, value);
+ } catch (ErrorsException e) {
+ errors.withSource(injectionPoint).merge(e.getErrors());
+ } catch (IllegalAccessException e) {
+ throw new AssertionError(e); // a security manager is blocking us, we're hosed
+ } finally {
+ context.setDependency(null);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java b/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java
new file mode 100644
index 0000000..791a4f8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java
@@ -0,0 +1,30 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.InternalContext;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+/**
+ * Injects a field or method of a given object.
+ */
+interface SingleMemberInjector {
+ void inject(Errors errors, InternalContext context, Object o);
+
+ InjectionPoint getInjectionPoint();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java b/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java
new file mode 100644
index 0000000..9bc1143
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java
@@ -0,0 +1,88 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.InjectorImpl.MethodInvoker;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.InternalContext;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+/**
+ * Invokes an injectable method.
+ */
+class SingleMethodInjector implements SingleMemberInjector {
+ final MethodInvoker methodInvoker;
+ final SingleParameterInjector<?>[] parameterInjectors;
+ final InjectionPoint injectionPoint;
+
+ public SingleMethodInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors)
+ throws ErrorsException {
+ this.injectionPoint = injectionPoint;
+ final Method method = (Method) injectionPoint.getMember();
+ methodInvoker = createMethodInvoker(method);
+ parameterInjectors = injector.getParametersInjectors(injectionPoint.getDependencies(), errors);
+ }
+
+ private MethodInvoker createMethodInvoker(final Method method) {
+
+ // We can't use FastMethod if the method is private.
+ int modifiers = method.getModifiers();
+ if (!Modifier.isPrivate(modifiers) && !Modifier.isProtected(modifiers)) {
+ }
+
+ if (!Modifier.isPublic(modifiers)) {
+ method.setAccessible(true);
+ }
+
+ return new MethodInvoker() {
+ public Object invoke(Object target, Object... parameters)
+ throws IllegalAccessException, InvocationTargetException {
+ return method.invoke(target, parameters);
+ }
+ };
+ }
+
+ public InjectionPoint getInjectionPoint() {
+ return injectionPoint;
+ }
+
+ public void inject(Errors errors, InternalContext context, Object o) {
+ Object[] parameters;
+ try {
+ parameters = SingleParameterInjector.getAll(errors, context, parameterInjectors);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ return;
+ }
+
+ try {
+ methodInvoker.invoke(o, parameters);
+ } catch (IllegalAccessException e) {
+ throw new AssertionError(e); // a security manager is blocking us, we're hosed
+ } catch (InvocationTargetException userException) {
+ Throwable cause = userException.getCause() != null
+ ? userException.getCause()
+ : userException;
+ errors.withSource(injectionPoint).errorInjectingMethod(cause);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/SingleParameterInjector.java b/src/main/java/org/elasticsearch/common/inject/SingleParameterInjector.java
new file mode 100644
index 0000000..372b483
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/SingleParameterInjector.java
@@ -0,0 +1,75 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.internal.InternalContext;
+import org.elasticsearch.common.inject.internal.InternalFactory;
+import org.elasticsearch.common.inject.spi.Dependency;
+
+/**
+ * Resolves a single parameter, to be used in a constructor or method invocation.
+ */
+class SingleParameterInjector<T> {
+ private static final Object[] NO_ARGUMENTS = {};
+
+ private final Dependency<T> dependency;
+ private final InternalFactory<? extends T> factory;
+
+ SingleParameterInjector(Dependency<T> dependency, InternalFactory<? extends T> factory) {
+ this.dependency = dependency;
+ this.factory = factory;
+ }
+
+ private T inject(Errors errors, InternalContext context) throws ErrorsException {
+ context.setDependency(dependency);
+ try {
+ return factory.get(errors.withSource(dependency), context, dependency);
+ } finally {
+ context.setDependency(null);
+ }
+ }
+
+ /**
+ * Returns an array of parameter values.
+ */
+ static Object[] getAll(Errors errors, InternalContext context,
+ SingleParameterInjector<?>[] parameterInjectors) throws ErrorsException {
+ if (parameterInjectors == null) {
+ return NO_ARGUMENTS;
+ }
+
+ int numErrorsBefore = errors.size();
+
+ int size = parameterInjectors.length;
+ Object[] parameters = new Object[size];
+
+ // optimization: use manual for/each to save allocating an iterator here
+ for (int i = 0; i < size; i++) {
+ SingleParameterInjector<?> parameterInjector = parameterInjectors[i];
+ try {
+ parameters[i] = parameterInjector.inject(errors, context);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+ }
+
+ errors.throwIfNewErrors(numErrorsBefore);
+ return parameters;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Singleton.java b/src/main/java/org/elasticsearch/common/inject/Singleton.java
new file mode 100644
index 0000000..eeca5e0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Singleton.java
@@ -0,0 +1,35 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * Apply this to implementation classes when you want only one instance
+ * (per {@link Injector}) to be reused for all injections for that binding.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+@Target({ElementType.TYPE, ElementType.METHOD})
+@Retention(RUNTIME)
+@ScopeAnnotation
+public @interface Singleton {
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/SpawnModules.java b/src/main/java/org/elasticsearch/common/inject/SpawnModules.java
new file mode 100644
index 0000000..b2d1c54
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/SpawnModules.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ *
+ */
+public interface SpawnModules {
+
+ Iterable<? extends Module> spawnModules();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/Stage.java b/src/main/java/org/elasticsearch/common/inject/Stage.java
new file mode 100644
index 0000000..f6969a2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/Stage.java
@@ -0,0 +1,44 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+/**
+ * The stage we're running in.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public enum Stage {
+
+ /**
+ * We're running in a tool (an IDE plugin for example). We need binding meta data but not a
+ * functioning Injector. Do not inject members of instances. Do not load eager singletons. Do as
+ * little as possible so our tools run nice and snappy. Injectors created in this stage cannot
+ * be used to satisfy injections.
+ */
+ TOOL,
+
+ /**
+ * We want fast startup times at the expense of runtime performance and some up front error
+ * checking.
+ */
+ DEVELOPMENT,
+
+ /**
+ * We want to catch errors as early as possible and take performance hits up front.
+ */
+ PRODUCTION
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/State.java b/src/main/java/org/elasticsearch/common/inject/State.java
new file mode 100644
index 0000000..b9a4f81
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/State.java
@@ -0,0 +1,165 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.internal.BindingImpl;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.MatcherAndConverter;
+import org.elasticsearch.common.inject.spi.TypeListenerBinding;
+
+import java.lang.annotation.Annotation;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * The inheritable data within an injector. This class is intended to allow parent and local
+ * injector data to be accessed as a unit.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+interface State {
+
+ static final State NONE = new State() {
+ public State parent() {
+ throw new UnsupportedOperationException();
+ }
+
+ public <T> BindingImpl<T> getExplicitBinding(Key<T> key) {
+ return null;
+ }
+
+ public Map<Key<?>, Binding<?>> getExplicitBindingsThisLevel() {
+ throw new UnsupportedOperationException();
+ }
+
+ public void putBinding(Key<?> key, BindingImpl<?> binding) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Scope getScope(Class<? extends Annotation> scopingAnnotation) {
+ return null;
+ }
+
+ public void putAnnotation(Class<? extends Annotation> annotationType, Scope scope) {
+ throw new UnsupportedOperationException();
+ }
+
+ public void addConverter(MatcherAndConverter matcherAndConverter) {
+ throw new UnsupportedOperationException();
+ }
+
+ public MatcherAndConverter getConverter(String stringValue, TypeLiteral<?> type, Errors errors,
+ Object source) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Iterable<MatcherAndConverter> getConvertersThisLevel() {
+ return ImmutableSet.of();
+ }
+
+ public void addTypeListener(TypeListenerBinding typeListenerBinding) {
+ throw new UnsupportedOperationException();
+ }
+
+ public List<TypeListenerBinding> getTypeListenerBindings() {
+ return ImmutableList.of();
+ }
+
+ public void blacklist(Key<?> key) {
+ }
+
+ public boolean isBlacklisted(Key<?> key) {
+ return true;
+ }
+
+ @Override
+ public void clearBlacklisted() {
+ }
+
+ @Override
+ public void makeAllBindingsToEagerSingletons(Injector injector) {
+ }
+
+ public Object lock() {
+ throw new UnsupportedOperationException();
+ }
+ };
+
+ State parent();
+
+ /**
+ * Gets a binding which was specified explicitly in a module, or null.
+ */
+ <T> BindingImpl<T> getExplicitBinding(Key<T> key);
+
+ /**
+ * Returns the explicit bindings at this level only.
+ */
+ Map<Key<?>, Binding<?>> getExplicitBindingsThisLevel();
+
+ void putBinding(Key<?> key, BindingImpl<?> binding);
+
+ /**
+ * Returns the matching scope, or null.
+ */
+ Scope getScope(Class<? extends Annotation> scopingAnnotation);
+
+ void putAnnotation(Class<? extends Annotation> annotationType, Scope scope);
+
+ void addConverter(MatcherAndConverter matcherAndConverter);
+
+ /**
+ * Returns the matching converter for {@code type}, or null if none match.
+ */
+ MatcherAndConverter getConverter(
+ String stringValue, TypeLiteral<?> type, Errors errors, Object source);
+
+ /**
+ * Returns all converters at this level only.
+ */
+ Iterable<MatcherAndConverter> getConvertersThisLevel();
+
+ void addTypeListener(TypeListenerBinding typeListenerBinding);
+
+ List<TypeListenerBinding> getTypeListenerBindings();
+
+ /**
+ * Forbids the corresponding injector from creating a binding to {@code key}. Child injectors
+ * blacklist their bound keys on their parent injectors to prevent just-in-time bindings on the
+ * parent injector that would conflict.
+ */
+ void blacklist(Key<?> key);
+
+ /**
+ * Returns true if {@code key} is forbidden from being bound in this injector. This indicates that
+ * one of this injector's descendent's has bound the key.
+ */
+ boolean isBlacklisted(Key<?> key);
+
+ /**
+ * Returns the shared lock for all injector data. This is a low-granularity, high-contention lock
+ * to be used when reading mutable data (ie. just-in-time bindings, and binding blacklists).
+ */
+ Object lock();
+
+ // ES_GUICE: clean blacklist keys
+ void clearBlacklisted();
+
+ void makeAllBindingsToEagerSingletons(Injector injector);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/TypeConverterBindingProcessor.java b/src/main/java/org/elasticsearch/common/inject/TypeConverterBindingProcessor.java
new file mode 100644
index 0000000..040ef40
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/TypeConverterBindingProcessor.java
@@ -0,0 +1,184 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.MatcherAndConverter;
+import org.elasticsearch.common.inject.internal.SourceProvider;
+import org.elasticsearch.common.inject.internal.Strings;
+import org.elasticsearch.common.inject.matcher.AbstractMatcher;
+import org.elasticsearch.common.inject.matcher.Matcher;
+import org.elasticsearch.common.inject.matcher.Matchers;
+import org.elasticsearch.common.inject.spi.TypeConverter;
+import org.elasticsearch.common.inject.spi.TypeConverterBinding;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Type;
+
+/**
+ * Handles {@link Binder#convertToTypes} commands.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class TypeConverterBindingProcessor extends AbstractProcessor {
+
+ TypeConverterBindingProcessor(Errors errors) {
+ super(errors);
+ }
+
+ /**
+ * Installs default converters for primitives, enums, and class literals.
+ */
+ public void prepareBuiltInConverters(InjectorImpl injector) {
+ this.injector = injector;
+ try {
+ // Configure type converters.
+ convertToPrimitiveType(int.class, Integer.class);
+ convertToPrimitiveType(long.class, Long.class);
+ convertToPrimitiveType(boolean.class, Boolean.class);
+ convertToPrimitiveType(byte.class, Byte.class);
+ convertToPrimitiveType(short.class, Short.class);
+ convertToPrimitiveType(float.class, Float.class);
+ convertToPrimitiveType(double.class, Double.class);
+
+ convertToClass(Character.class, new TypeConverter() {
+ public Object convert(String value, TypeLiteral<?> toType) {
+ value = value.trim();
+ if (value.length() != 1) {
+ throw new RuntimeException("Length != 1.");
+ }
+ return value.charAt(0);
+ }
+
+ @Override
+ public String toString() {
+ return "TypeConverter<Character>";
+ }
+ });
+
+ convertToClasses(Matchers.subclassesOf(Enum.class), new TypeConverter() {
+ @SuppressWarnings("unchecked")
+ public Object convert(String value, TypeLiteral<?> toType) {
+ return Enum.valueOf((Class) toType.getRawType(), value);
+ }
+
+ @Override
+ public String toString() {
+ return "TypeConverter<E extends Enum<E>>";
+ }
+ });
+
+ internalConvertToTypes(
+ new AbstractMatcher<TypeLiteral<?>>() {
+ public boolean matches(TypeLiteral<?> typeLiteral) {
+ return typeLiteral.getRawType() == Class.class;
+ }
+
+ @Override
+ public String toString() {
+ return "Class<?>";
+ }
+ },
+ new TypeConverter() {
+ @SuppressWarnings("unchecked")
+ public Object convert(String value, TypeLiteral<?> toType) {
+ try {
+ return Class.forName(value);
+ } catch (ClassNotFoundException e) {
+ throw new RuntimeException(e.getMessage());
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "TypeConverter<Class<?>>";
+ }
+ }
+ );
+ } finally {
+ this.injector = null;
+ }
+ }
+
+ private <T> void convertToPrimitiveType(Class<T> primitiveType, final Class<T> wrapperType) {
+ try {
+ final Method parser = wrapperType.getMethod(
+ "parse" + Strings.capitalize(primitiveType.getName()), String.class);
+
+ TypeConverter typeConverter = new TypeConverter() {
+ @SuppressWarnings("unchecked")
+ public Object convert(String value, TypeLiteral<?> toType) {
+ try {
+ return parser.invoke(null, value);
+ } catch (IllegalAccessException e) {
+ throw new AssertionError(e);
+ } catch (InvocationTargetException e) {
+ throw new RuntimeException(e.getTargetException().getMessage());
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "TypeConverter<" + wrapperType.getSimpleName() + ">";
+ }
+ };
+
+ convertToClass(wrapperType, typeConverter);
+ } catch (NoSuchMethodException e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ private <T> void convertToClass(Class<T> type, TypeConverter converter) {
+ convertToClasses(Matchers.identicalTo(type), converter);
+ }
+
+ private void convertToClasses(final Matcher<? super Class<?>> typeMatcher,
+ TypeConverter converter) {
+ internalConvertToTypes(new AbstractMatcher<TypeLiteral<?>>() {
+ public boolean matches(TypeLiteral<?> typeLiteral) {
+ Type type = typeLiteral.getType();
+ if (!(type instanceof Class)) {
+ return false;
+ }
+ Class<?> clazz = (Class<?>) type;
+ return typeMatcher.matches(clazz);
+ }
+
+ @Override
+ public String toString() {
+ return typeMatcher.toString();
+ }
+ }, converter);
+ }
+
+ private void internalConvertToTypes(Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeConverter converter) {
+ injector.state.addConverter(
+ new MatcherAndConverter(typeMatcher, converter, SourceProvider.UNKNOWN_SOURCE));
+ }
+
+ @Override
+ public Boolean visit(TypeConverterBinding command) {
+ injector.state.addConverter(new MatcherAndConverter(
+ command.getTypeMatcher(), command.getTypeConverter(), command.getSource()));
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/TypeListenerBindingProcessor.java b/src/main/java/org/elasticsearch/common/inject/TypeListenerBindingProcessor.java
new file mode 100644
index 0000000..4139fc0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/TypeListenerBindingProcessor.java
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.TypeListenerBinding;
+
+/**
+ * Handles {@link Binder#bindListener} commands.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class TypeListenerBindingProcessor extends AbstractProcessor {
+
+ TypeListenerBindingProcessor(Errors errors) {
+ super(errors);
+ }
+
+ @Override
+ public Boolean visit(TypeListenerBinding binding) {
+ injector.state.addTypeListener(binding);
+ return true;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java b/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java
new file mode 100644
index 0000000..0a31c20
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java
@@ -0,0 +1,344 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.internal.MoreTypes;
+import org.elasticsearch.common.inject.util.Types;
+
+import java.lang.reflect.*;
+import java.util.List;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.elasticsearch.common.inject.internal.MoreTypes.canonicalize;
+
+/**
+ * Represents a generic type {@code T}. Java doesn't yet provide a way to
+ * represent generic types, so this class does. Forces clients to create a
+ * subclass of this class which enables retrieval the type information even at
+ * runtime.
+ * <p/>
+ * <p>For example, to create a type literal for {@code List<String>}, you can
+ * create an empty anonymous inner class:
+ * <p/>
+ * <p/>
+ * {@code TypeLiteral<List<String>> list = new TypeLiteral<List<String>>() {};}
+ * <p/>
+ * <p>This syntax cannot be used to create type literals that have wildcard
+ * parameters, such as {@code Class<?>} or {@code List<? extends CharSequence>}.
+ * Such type literals must be constructed programatically, either by {@link
+ * Method#getGenericReturnType extracting types from members} or by using the
+ * {@link Types} factory class.
+ * <p/>
+ * <p>Along with modeling generic types, this class can resolve type parameters.
+ * For example, to figure out what type {@code keySet()} returns on a {@code
+ * Map<Integer, String>}, use this code:<pre> {@code
+ * <p/>
+ * TypeLiteral<Map<Integer, String>> mapType
+ * = new TypeLiteral<Map<Integer, String>>() {};
+ * TypeLiteral<?> keySetType
+ * = mapType.getReturnType(Map.class.getMethod("keySet"));
+ * System.out.println(keySetType); // prints "Set<Integer>"}</pre>
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public class TypeLiteral<T> {
+
+ final Class<? super T> rawType;
+ final Type type;
+ final int hashCode;
+
+ /**
+ * Constructs a new type literal. Derives represented class from type
+ * parameter.
+ * <p/>
+ * <p>Clients create an empty anonymous subclass. Doing so embeds the type
+ * parameter in the anonymous class's type hierarchy so we can reconstitute it
+ * at runtime despite erasure.
+ */
+ @SuppressWarnings("unchecked")
+ protected TypeLiteral() {
+ this.type = getSuperclassTypeParameter(getClass());
+ this.rawType = (Class<? super T>) MoreTypes.getRawType(type);
+ this.hashCode = MoreTypes.hashCode(type);
+ }
+
+ /**
+ * Unsafe. Constructs a type literal manually.
+ */
+ @SuppressWarnings("unchecked")
+ TypeLiteral(Type type) {
+ this.type = canonicalize(checkNotNull(type, "type"));
+ this.rawType = (Class<? super T>) MoreTypes.getRawType(this.type);
+ this.hashCode = MoreTypes.hashCode(this.type);
+ }
+
+ /**
+ * Returns the type from super class's type parameter in {@link MoreTypes#canonicalize(Type)
+ * canonical form}.
+ */
+ static Type getSuperclassTypeParameter(Class<?> subclass) {
+ Type superclass = subclass.getGenericSuperclass();
+ if (superclass instanceof Class) {
+ throw new RuntimeException("Missing type parameter.");
+ }
+ ParameterizedType parameterized = (ParameterizedType) superclass;
+ return canonicalize(parameterized.getActualTypeArguments()[0]);
+ }
+
+ /**
+ * Gets type literal from super class's type parameter.
+ */
+ static TypeLiteral<?> fromSuperclassTypeParameter(Class<?> subclass) {
+ return new TypeLiteral<Object>(getSuperclassTypeParameter(subclass));
+ }
+
+ /**
+ * Returns the raw (non-generic) type for this type.
+ *
+ * @since 2.0
+ */
+ public final Class<? super T> getRawType() {
+ return rawType;
+ }
+
+ /**
+ * Gets underlying {@code Type} instance.
+ */
+ public final Type getType() {
+ return type;
+ }
+
+ /**
+ * Gets the type of this type's provider.
+ */
+ @SuppressWarnings("unchecked")
+ final TypeLiteral<Provider<T>> providerType() {
+ // This cast is safe and wouldn't generate a warning if Type had a type
+ // parameter.
+ return (TypeLiteral<Provider<T>>) get(Types.providerOf(getType()));
+ }
+
+ @Override
+ public final int hashCode() {
+ return this.hashCode;
+ }
+
+ @Override
+ public final boolean equals(Object o) {
+ return o instanceof TypeLiteral<?>
+ && MoreTypes.equals(type, ((TypeLiteral) o).type);
+ }
+
+ @Override
+ public final String toString() {
+ return MoreTypes.toString(type);
+ }
+
+ /**
+ * Gets type literal for the given {@code Type} instance.
+ */
+ public static TypeLiteral<?> get(Type type) {
+ return new TypeLiteral<Object>(type);
+ }
+
+ /**
+ * Gets type literal for the given {@code Class} instance.
+ */
+ public static <T> TypeLiteral<T> get(Class<T> type) {
+ return new TypeLiteral<T>(type);
+ }
+
+
+ /**
+ * Returns an immutable list of the resolved types.
+ */
+ private List<TypeLiteral<?>> resolveAll(Type[] types) {
+ TypeLiteral<?>[] result = new TypeLiteral<?>[types.length];
+ for (int t = 0; t < types.length; t++) {
+ result[t] = resolve(types[t]);
+ }
+ return ImmutableList.copyOf(result);
+ }
+
+ /**
+ * Resolves known type parameters in {@code toResolve} and returns the result.
+ */
+ TypeLiteral<?> resolve(Type toResolve) {
+ return TypeLiteral.get(resolveType(toResolve));
+ }
+
+ Type resolveType(Type toResolve) {
+ // this implementation is made a little more complicated in an attempt to avoid object-creation
+ while (true) {
+ if (toResolve instanceof TypeVariable) {
+ TypeVariable original = (TypeVariable) toResolve;
+ toResolve = MoreTypes.resolveTypeVariable(type, rawType, original);
+ if (toResolve == original) {
+ return toResolve;
+ }
+
+ } else if (toResolve instanceof GenericArrayType) {
+ GenericArrayType original = (GenericArrayType) toResolve;
+ Type componentType = original.getGenericComponentType();
+ Type newComponentType = resolveType(componentType);
+ return componentType == newComponentType
+ ? original
+ : Types.arrayOf(newComponentType);
+
+ } else if (toResolve instanceof ParameterizedType) {
+ ParameterizedType original = (ParameterizedType) toResolve;
+ Type ownerType = original.getOwnerType();
+ Type newOwnerType = resolveType(ownerType);
+ boolean changed = newOwnerType != ownerType;
+
+ Type[] args = original.getActualTypeArguments();
+ for (int t = 0, length = args.length; t < length; t++) {
+ Type resolvedTypeArgument = resolveType(args[t]);
+ if (resolvedTypeArgument != args[t]) {
+ if (!changed) {
+ args = args.clone();
+ changed = true;
+ }
+ args[t] = resolvedTypeArgument;
+ }
+ }
+
+ return changed
+ ? Types.newParameterizedTypeWithOwner(newOwnerType, original.getRawType(), args)
+ : original;
+
+ } else if (toResolve instanceof WildcardType) {
+ WildcardType original = (WildcardType) toResolve;
+ Type[] originalLowerBound = original.getLowerBounds();
+ Type[] originalUpperBound = original.getUpperBounds();
+
+ if (originalLowerBound.length == 1) {
+ Type lowerBound = resolveType(originalLowerBound[0]);
+ if (lowerBound != originalLowerBound[0]) {
+ return Types.supertypeOf(lowerBound);
+ }
+ } else if (originalUpperBound.length == 1) {
+ Type upperBound = resolveType(originalUpperBound[0]);
+ if (upperBound != originalUpperBound[0]) {
+ return Types.subtypeOf(upperBound);
+ }
+ }
+ return original;
+
+ } else {
+ return toResolve;
+ }
+ }
+ }
+
+ /**
+ * Returns the generic form of {@code supertype}. For example, if this is {@code
+ * ArrayList<String>}, this returns {@code Iterable<String>} given the input {@code
+ * Iterable.class}.
+ *
+ * @param supertype a superclass of, or interface implemented by, this.
+ * @since 2.0
+ */
+ public TypeLiteral<?> getSupertype(Class<?> supertype) {
+ checkArgument(supertype.isAssignableFrom(rawType),
+ "%s is not a supertype of %s", supertype, this.type);
+ return resolve(MoreTypes.getGenericSupertype(type, rawType, supertype));
+ }
+
+ /**
+ * Returns the resolved generic type of {@code field}.
+ *
+ * @param field a field defined by this or any superclass.
+ * @since 2.0
+ */
+ public TypeLiteral<?> getFieldType(Field field) {
+ checkArgument(field.getDeclaringClass().isAssignableFrom(rawType),
+ "%s is not defined by a supertype of %s", field, type);
+ return resolve(field.getGenericType());
+ }
+
+ /**
+ * Returns the resolved generic parameter types of {@code methodOrConstructor}.
+ *
+ * @param methodOrConstructor a method or constructor defined by this or any supertype.
+ * @since 2.0
+ */
+ public List<TypeLiteral<?>> getParameterTypes(Member methodOrConstructor) {
+ Type[] genericParameterTypes;
+
+ if (methodOrConstructor instanceof Method) {
+ Method method = (Method) methodOrConstructor;
+ checkArgument(method.getDeclaringClass().isAssignableFrom(rawType),
+ "%s is not defined by a supertype of %s", method, type);
+ genericParameterTypes = method.getGenericParameterTypes();
+
+ } else if (methodOrConstructor instanceof Constructor) {
+ Constructor constructor = (Constructor) methodOrConstructor;
+ checkArgument(constructor.getDeclaringClass().isAssignableFrom(rawType),
+ "%s does not construct a supertype of %s", constructor, type);
+ genericParameterTypes = constructor.getGenericParameterTypes();
+
+ } else {
+ throw new IllegalArgumentException("Not a method or a constructor: " + methodOrConstructor);
+ }
+
+ return resolveAll(genericParameterTypes);
+ }
+
+ /**
+ * Returns the resolved generic exception types thrown by {@code constructor}.
+ *
+ * @param methodOrConstructor a method or constructor defined by this or any supertype.
+ * @since 2.0
+ */
+ public List<TypeLiteral<?>> getExceptionTypes(Member methodOrConstructor) {
+ Type[] genericExceptionTypes;
+
+ if (methodOrConstructor instanceof Method) {
+ Method method = (Method) methodOrConstructor;
+ checkArgument(method.getDeclaringClass().isAssignableFrom(rawType),
+ "%s is not defined by a supertype of %s", method, type);
+ genericExceptionTypes = method.getGenericExceptionTypes();
+
+ } else if (methodOrConstructor instanceof Constructor) {
+ Constructor<?> constructor = (Constructor<?>) methodOrConstructor;
+ checkArgument(constructor.getDeclaringClass().isAssignableFrom(rawType),
+ "%s does not construct a supertype of %s", constructor, type);
+ genericExceptionTypes = constructor.getGenericExceptionTypes();
+
+ } else {
+ throw new IllegalArgumentException("Not a method or a constructor: " + methodOrConstructor);
+ }
+
+ return resolveAll(genericExceptionTypes);
+ }
+
+ /**
+ * Returns the resolved generic return type of {@code method}.
+ *
+ * @param method a method defined by this or any supertype.
+ * @since 2.0
+ */
+ public TypeLiteral<?> getReturnType(Method method) {
+ checkArgument(method.getDeclaringClass().isAssignableFrom(rawType),
+ "%s is not defined by a supertype of %s", method, type);
+ return resolve(method.getGenericReturnType());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/WeakKeySet.java b/src/main/java/org/elasticsearch/common/inject/WeakKeySet.java
new file mode 100644
index 0000000..bff939d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/WeakKeySet.java
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject;
+
+import com.google.common.collect.Sets;
+
+import java.util.Set;
+
+/**
+ * Minimal set that doesn't hold strong references to the contained keys.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+final class WeakKeySet {
+
+ /**
+ * We store strings rather than keys so we don't hold strong references.
+ * <p/>
+ * <p>One potential problem with this approach is that parent and child injectors cannot define
+ * keys whose class names are equal but class loaders are different. This shouldn't be an issue
+ * in practice.
+ */
+ private Set<String> backingSet = Sets.newHashSet();
+
+ public boolean add(Key<?> key) {
+ return backingSet.add(key.toString());
+ }
+
+ public boolean contains(Object o) {
+ return o instanceof Key && backingSet.contains(o.toString());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/assistedinject/Assisted.java b/src/main/java/org/elasticsearch/common/inject/assistedinject/Assisted.java
new file mode 100644
index 0000000..7dba708
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/assistedinject/Assisted.java
@@ -0,0 +1,43 @@
+/**
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.assistedinject;
+
+import org.elasticsearch.common.inject.BindingAnnotation;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.*;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * Annotates an injected parameter or field whose value comes from an argument to a factory method.
+ *
+ * @author jmourits@google.com (Jerome Mourits)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+@BindingAnnotation
+@Target({FIELD, PARAMETER, METHOD})
+@Retention(RUNTIME)
+public @interface Assisted {
+
+ /**
+ * The unique name for this parameter. This is matched to the {@literal @Assisted} constructor
+ * parameter with the same value. Names are not necessary when the parameter types are distinct.
+ */
+ String value() default "";
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedConstructor.java b/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedConstructor.java
new file mode 100644
index 0000000..29bbd6b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedConstructor.java
@@ -0,0 +1,100 @@
+/**
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.assistedinject;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.TypeLiteral;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Type;
+import java.util.*;
+
+/**
+ * Internal respresentation of a constructor annotated with
+ * {@link AssistedInject}
+ *
+ * @author jmourits@google.com (Jerome Mourits)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class AssistedConstructor<T> {
+
+ private final Constructor<T> constructor;
+ private final ParameterListKey assistedParameters;
+ private final List<Parameter> allParameters;
+
+ @SuppressWarnings("unchecked")
+ public AssistedConstructor(Constructor<T> constructor, List<TypeLiteral<?>> parameterTypes) {
+ this.constructor = constructor;
+
+ Annotation[][] annotations = constructor.getParameterAnnotations();
+
+ List<Type> typeList = Lists.newArrayList();
+ allParameters = new ArrayList<Parameter>();
+
+ // categorize params as @Assisted or @Injected
+ for (int i = 0; i < parameterTypes.size(); i++) {
+ Parameter parameter = new Parameter(parameterTypes.get(i).getType(), annotations[i]);
+ allParameters.add(parameter);
+ if (parameter.isProvidedByFactory()) {
+ typeList.add(parameter.getType());
+ }
+ }
+ this.assistedParameters = new ParameterListKey(typeList);
+ }
+
+ /**
+ * Returns the {@link ParameterListKey} for this constructor. The
+ * {@link ParameterListKey} is created from the ordered list of {@link Assisted}
+ * constructor parameters.
+ */
+ public ParameterListKey getAssistedParameters() {
+ return assistedParameters;
+ }
+
+ /**
+ * Returns an ordered list of all constructor parameters (both
+ * {@link Assisted} and {@link Inject}ed).
+ */
+ public List<Parameter> getAllParameters() {
+ return allParameters;
+ }
+
+ public Set<Class<?>> getDeclaredExceptions() {
+ return new HashSet<Class<?>>(Arrays.asList(constructor.getExceptionTypes()));
+ }
+
+ /**
+ * Returns an instance of T, constructed using this constructor, with the
+ * supplied arguments.
+ */
+ public T newInstance(Object[] args) throws Throwable {
+ constructor.setAccessible(true);
+ try {
+ return constructor.newInstance(args);
+ } catch (InvocationTargetException e) {
+ throw e.getCause();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return constructor.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedInject.java b/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedInject.java
new file mode 100644
index 0000000..cbba8be
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedInject.java
@@ -0,0 +1,44 @@
+/**
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.assistedinject;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.CONSTRUCTOR;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * <p>Constructors annotated with {@code @AssistedInject} indicate that they can be instantiated by
+ * the {@link FactoryProvider}. Each constructor must exactly match one corresponding factory method
+ * within the factory interface.
+ * <p/>
+ * <p>Constructor parameters must be either supplied by the factory interface and marked with
+ * <code>@Assisted</code>, or they must be injectable.
+ *
+ * @author jmourits@google.com (Jerome Mourits)
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @deprecated {@link FactoryProvider} now works better with the standard {@literal @Inject}
+ * annotation. When using that annotation, parameters are matched by name and type rather than
+ * by position. In addition, values that use the standard {@literal @Inject} constructor
+ * annotation are eligible for method interception.
+ */
+@Target({CONSTRUCTOR})
+@Retention(RUNTIME)
+@Deprecated
+public @interface AssistedInject {
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java b/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java
new file mode 100644
index 0000000..5b29ce4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java
@@ -0,0 +1,338 @@
+/**
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.assistedinject;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.HasDependencies;
+import org.elasticsearch.common.inject.spi.Message;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.*;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Provides a factory that combines the caller's arguments with injector-supplied values to
+ * construct objects.
+ * <p/>
+ * <h3>Defining a factory</h3>
+ * Create an interface whose methods return the constructed type, or any of its supertypes. The
+ * method's parameters are the arguments required to build the constructed type.
+ * <pre>public interface PaymentFactory {
+ * Payment create(Date startDate, Money amount);
+ * }</pre>
+ * You can name your factory methods whatever you like, such as <i>create</i>, <i>createPayment</i>
+ * or <i>newPayment</i>.
+ * <p/>
+ * <h3>Creating a type that accepts factory parameters</h3>
+ * {@code constructedType} is a concrete class with an {@literal @}{@link Inject}-annotated
+ * constructor. In addition to injector-supplied parameters, the constructor should have
+ * parameters that match each of the factory method's parameters. Each factory-supplied parameter
+ * requires an {@literal @}{@link Assisted} annotation. This serves to document that the parameter
+ * is not bound by your application's modules.
+ * <pre>public class RealPayment implements Payment {
+ * {@literal @}Inject
+ * public RealPayment(
+ * CreditService creditService,
+ * AuthService authService,
+ * <strong>{@literal @}Assisted Date startDate</strong>,
+ * <strong>{@literal @}Assisted Money amount</strong>) {
+ * ...
+ * }
+ * }</pre>
+ * Any parameter that permits a null value should also be annotated {@code @Nullable}.
+ * <p/>
+ * <h3>Configuring factories</h3>
+ * In your {@link org.elasticsearch.common.inject.Module module}, bind the factory interface to the returned
+ * factory:
+ * <pre>bind(PaymentFactory.class).toProvider(
+ * FactoryProvider.newFactory(PaymentFactory.class, RealPayment.class));</pre>
+ * As a side-effect of this binding, Guice will inject the factory to initialize it for use. The
+ * factory cannot be used until the injector has been initialized.
+ * <p/>
+ * <h3>Using the factory</h3>
+ * Inject your factory into your application classes. When you use the factory, your arguments
+ * will be combined with values from the injector to construct an instance.
+ * <pre>public class PaymentAction {
+ * {@literal @}Inject private PaymentFactory paymentFactory;
+ * <p/>
+ * public void doPayment(Money amount) {
+ * Payment payment = paymentFactory.create(new Date(), amount);
+ * payment.apply();
+ * }
+ * }</pre>
+ * <p/>
+ * <h3>Making parameter types distinct</h3>
+ * The types of the factory method's parameters must be distinct. To use multiple parameters of
+ * the same type, use a named {@literal @}{@link Assisted} annotation to disambiguate the
+ * parameters. The names must be applied to the factory method's parameters:
+ * <p/>
+ * <pre>public interface PaymentFactory {
+ * Payment create(
+ * <strong>{@literal @}Assisted("startDate")</strong> Date startDate,
+ * <strong>{@literal @}Assisted("dueDate")</strong> Date dueDate,
+ * Money amount);
+ * } </pre>
+ * ...and to the concrete type's constructor parameters:
+ * <pre>public class RealPayment implements Payment {
+ * {@literal @}Inject
+ * public RealPayment(
+ * CreditService creditService,
+ * AuthService authService,
+ * <strong>{@literal @}Assisted("startDate")</strong> Date startDate,
+ * <strong>{@literal @}Assisted("dueDate")</strong> Date dueDate,
+ * <strong>{@literal @}Assisted</strong> Money amount) {
+ * ...
+ * }
+ * }</pre>
+ * <p/>
+ * <h3>Values are created by Guice</h3>
+ * Returned factories use child injectors to create values. The values are eligible for method
+ * interception. In addition, {@literal @}{@literal Inject} members will be injected before they are
+ * returned.
+ * <p/>
+ * <h3>Backwards compatibility using {@literal @}AssistedInject</h3>
+ * Instead of the {@literal @}Inject annotation, you may annotate the constructed classes with
+ * {@literal @}{@link AssistedInject}. This triggers a limited backwards-compatability mode.
+ * <p/>
+ * <p>Instead of matching factory method arguments to constructor parameters using their names, the
+ * <strong>parameters are matched by their order</strong>. The first factory method argument is
+ * used for the first {@literal @}Assisted constructor parameter, etc.. Annotation names have no
+ * effect.
+ * <p/>
+ * <p>Returned values are <strong>not created by Guice</strong>. These types are not eligible for
+ * method interception. They do receive post-construction member injection.
+ *
+ * @param <F> The factory interface
+ * @author jmourits@google.com (Jerome Mourits)
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @author dtm@google.com (Daniel Martin)
+ */
+public class FactoryProvider<F> implements Provider<F>, HasDependencies {
+
+ /*
+ * This class implements the old @AssistedInject implementation that manually matches constructors
+ * to factory methods. The new child injector implementation lives in FactoryProvider2.
+ */
+
+ private Injector injector;
+
+ private final TypeLiteral<F> factoryType;
+ private final Map<Method, AssistedConstructor<?>> factoryMethodToConstructor;
+
+ public static <F> Provider<F> newFactory(
+ Class<F> factoryType, Class<?> implementationType) {
+ return newFactory(TypeLiteral.get(factoryType), TypeLiteral.get(implementationType));
+ }
+
+ public static <F> Provider<F> newFactory(
+ TypeLiteral<F> factoryType, TypeLiteral<?> implementationType) {
+ Map<Method, AssistedConstructor<?>> factoryMethodToConstructor
+ = createMethodMapping(factoryType, implementationType);
+
+ if (!factoryMethodToConstructor.isEmpty()) {
+ return new FactoryProvider<F>(factoryType, factoryMethodToConstructor);
+ } else {
+ return new FactoryProvider2<F>(factoryType, Key.get(implementationType));
+ }
+ }
+
+ private FactoryProvider(TypeLiteral<F> factoryType,
+ Map<Method, AssistedConstructor<?>> factoryMethodToConstructor) {
+ this.factoryType = factoryType;
+ this.factoryMethodToConstructor = factoryMethodToConstructor;
+ checkDeclaredExceptionsMatch();
+ }
+
+ @Inject
+ void setInjectorAndCheckUnboundParametersAreInjectable(Injector injector) {
+ this.injector = injector;
+ for (AssistedConstructor<?> c : factoryMethodToConstructor.values()) {
+ for (Parameter p : c.getAllParameters()) {
+ if (!p.isProvidedByFactory() && !paramCanBeInjected(p, injector)) {
+ // this is lame - we're not using the proper mechanism to add an
+ // error to the injector. Throughout this class we throw exceptions
+ // to add errors, which isn't really the best way in Guice
+ throw newConfigurationException("Parameter of type '%s' is not injectable or annotated "
+ + "with @Assisted for Constructor '%s'", p, c);
+ }
+ }
+ }
+ }
+
+ private void checkDeclaredExceptionsMatch() {
+ for (Map.Entry<Method, AssistedConstructor<?>> entry : factoryMethodToConstructor.entrySet()) {
+ for (Class<?> constructorException : entry.getValue().getDeclaredExceptions()) {
+ if (!isConstructorExceptionCompatibleWithFactoryExeception(
+ constructorException, entry.getKey().getExceptionTypes())) {
+ throw newConfigurationException("Constructor %s declares an exception, but no compatible "
+ + "exception is thrown by the factory method %s", entry.getValue(), entry.getKey());
+ }
+ }
+ }
+ }
+
+ private boolean isConstructorExceptionCompatibleWithFactoryExeception(
+ Class<?> constructorException, Class<?>[] factoryExceptions) {
+ for (Class<?> factoryException : factoryExceptions) {
+ if (factoryException.isAssignableFrom(constructorException)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private boolean paramCanBeInjected(Parameter parameter, Injector injector) {
+ return parameter.isBound(injector);
+ }
+
+ private static Map<Method, AssistedConstructor<?>> createMethodMapping(
+ TypeLiteral<?> factoryType, TypeLiteral<?> implementationType) {
+ List<AssistedConstructor<?>> constructors = Lists.newArrayList();
+
+ for (Constructor<?> constructor : implementationType.getRawType().getDeclaredConstructors()) {
+ if (constructor.getAnnotation(AssistedInject.class) != null) {
+ @SuppressWarnings("unchecked") // the constructor type and implementation type agree
+ AssistedConstructor assistedConstructor = new AssistedConstructor(
+ constructor, implementationType.getParameterTypes(constructor));
+ constructors.add(assistedConstructor);
+ }
+ }
+
+ if (constructors.isEmpty()) {
+ return ImmutableMap.of();
+ }
+
+ Method[] factoryMethods = factoryType.getRawType().getMethods();
+
+ if (constructors.size() != factoryMethods.length) {
+ throw newConfigurationException("Constructor mismatch: %s has %s @AssistedInject "
+ + "constructors, factory %s has %s creation methods", implementationType,
+ constructors.size(), factoryType, factoryMethods.length);
+ }
+
+ Map<ParameterListKey, AssistedConstructor> paramsToConstructor = Maps.newHashMap();
+
+ for (AssistedConstructor c : constructors) {
+ if (paramsToConstructor.containsKey(c.getAssistedParameters())) {
+ throw new RuntimeException("Duplicate constructor, " + c);
+ }
+ paramsToConstructor.put(c.getAssistedParameters(), c);
+ }
+
+ Map<Method, AssistedConstructor<?>> result = Maps.newHashMap();
+ for (Method method : factoryMethods) {
+ if (!method.getReturnType().isAssignableFrom(implementationType.getRawType())) {
+ throw newConfigurationException("Return type of method %s is not assignable from %s",
+ method, implementationType);
+ }
+
+ List<Type> parameterTypes = Lists.newArrayList();
+ for (TypeLiteral<?> parameterType : factoryType.getParameterTypes(method)) {
+ parameterTypes.add(parameterType.getType());
+ }
+ ParameterListKey methodParams = new ParameterListKey(parameterTypes);
+
+ if (!paramsToConstructor.containsKey(methodParams)) {
+ throw newConfigurationException("%s has no @AssistInject constructor that takes the "
+ + "@Assisted parameters %s in that order. @AssistInject constructors are %s",
+ implementationType, methodParams, paramsToConstructor.values());
+ }
+
+ method.getParameterAnnotations();
+ for (Annotation[] parameterAnnotations : method.getParameterAnnotations()) {
+ for (Annotation parameterAnnotation : parameterAnnotations) {
+ if (parameterAnnotation.annotationType() == Assisted.class) {
+ throw newConfigurationException("Factory method %s has an @Assisted parameter, which "
+ + "is incompatible with the deprecated @AssistedInject annotation. Please replace "
+ + "@AssistedInject with @Inject on the %s constructor.",
+ method, implementationType);
+ }
+ }
+ }
+
+ AssistedConstructor matchingConstructor = paramsToConstructor.remove(methodParams);
+
+ result.put(method, matchingConstructor);
+ }
+ return result;
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ List<Dependency<?>> dependencies = Lists.newArrayList();
+ for (AssistedConstructor<?> constructor : factoryMethodToConstructor.values()) {
+ for (Parameter parameter : constructor.getAllParameters()) {
+ if (!parameter.isProvidedByFactory()) {
+ dependencies.add(Dependency.get(parameter.getPrimaryBindingKey()));
+ }
+ }
+ }
+ return ImmutableSet.copyOf(dependencies);
+ }
+
+ public F get() {
+ InvocationHandler invocationHandler = new InvocationHandler() {
+ public Object invoke(Object proxy, Method method, Object[] creationArgs) throws Throwable {
+ // pass methods from Object.class to the proxy
+ if (method.getDeclaringClass().equals(Object.class)) {
+ return method.invoke(this, creationArgs);
+ }
+
+ AssistedConstructor<?> constructor = factoryMethodToConstructor.get(method);
+ Object[] constructorArgs = gatherArgsForConstructor(constructor, creationArgs);
+ Object objectToReturn = constructor.newInstance(constructorArgs);
+ injector.injectMembers(objectToReturn);
+ return objectToReturn;
+ }
+
+ public Object[] gatherArgsForConstructor(
+ AssistedConstructor<?> constructor,
+ Object[] factoryArgs) {
+ int numParams = constructor.getAllParameters().size();
+ int argPosition = 0;
+ Object[] result = new Object[numParams];
+
+ for (int i = 0; i < numParams; i++) {
+ Parameter parameter = constructor.getAllParameters().get(i);
+ if (parameter.isProvidedByFactory()) {
+ result[i] = factoryArgs[argPosition];
+ argPosition++;
+ } else {
+ result[i] = parameter.getValue(injector);
+ }
+ }
+ return result;
+ }
+ };
+
+ @SuppressWarnings("unchecked") // we imprecisely treat the class literal of T as a Class<T>
+ Class<F> factoryRawType = (Class) factoryType.getRawType();
+ return factoryRawType.cast(Proxy.newProxyInstance(factoryRawType.getClassLoader(),
+ new Class[]{factoryRawType}, invocationHandler));
+ }
+
+ private static ConfigurationException newConfigurationException(String format, Object... args) {
+ return new ConfigurationException(ImmutableSet.of(new Message(Errors.format(format, args))));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java b/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java
new file mode 100644
index 0000000..32ae6c6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java
@@ -0,0 +1,262 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.assistedinject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.ErrorsException;
+import org.elasticsearch.common.inject.spi.Message;
+import org.elasticsearch.common.inject.util.Providers;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.util.Arrays;
+import java.util.List;
+
+import static com.google.common.base.Preconditions.checkState;
+import static org.elasticsearch.common.inject.internal.Annotations.getKey;
+
+/**
+ * The newer implementation of factory provider. This implementation uses a child injector to
+ * create values.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @author dtm@google.com (Daniel Martin)
+ */
+final class FactoryProvider2<F> implements InvocationHandler, Provider<F> {
+
+ /**
+ * if a factory method parameter isn't annotated, it gets this annotation.
+ */
+ static final Assisted DEFAULT_ANNOTATION = new Assisted() {
+ public String value() {
+ return "";
+ }
+
+ public Class<? extends Annotation> annotationType() {
+ return Assisted.class;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof Assisted
+ && ((Assisted) o).value().equals("");
+ }
+
+ @Override
+ public int hashCode() {
+ return 127 * "value".hashCode() ^ "".hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "@" + Assisted.class.getName() + "(value=)";
+ }
+ };
+
+ /**
+ * the produced type, or null if all methods return concrete types
+ */
+ private final Key<?> producedType;
+ private final ImmutableMap<Method, Key<?>> returnTypesByMethod;
+ private final ImmutableMap<Method, ImmutableList<Key<?>>> paramTypes;
+
+ /**
+ * the hosting injector, or null if we haven't been initialized yet
+ */
+ private Injector injector;
+
+ /**
+ * the factory interface, implemented and provided
+ */
+ private final F factory;
+
+ /**
+ * @param factoryType a Java interface that defines one or more create methods.
+ * @param producedType a concrete type that is assignable to the return types of all factory
+ * methods.
+ */
+ FactoryProvider2(TypeLiteral<F> factoryType, Key<?> producedType) {
+ this.producedType = producedType;
+
+ Errors errors = new Errors();
+
+ @SuppressWarnings("unchecked") // we imprecisely treat the class literal of T as a Class<T>
+ Class<F> factoryRawType = (Class) factoryType.getRawType();
+
+ try {
+ ImmutableMap.Builder<Method, Key<?>> returnTypesBuilder = ImmutableMap.builder();
+ ImmutableMap.Builder<Method, ImmutableList<Key<?>>> paramTypesBuilder
+ = ImmutableMap.builder();
+ // TODO: also grab methods from superinterfaces
+ for (Method method : factoryRawType.getMethods()) {
+ Key<?> returnType = getKey(
+ factoryType.getReturnType(method), method, method.getAnnotations(), errors);
+ returnTypesBuilder.put(method, returnType);
+ List<TypeLiteral<?>> params = factoryType.getParameterTypes(method);
+ Annotation[][] paramAnnotations = method.getParameterAnnotations();
+ int p = 0;
+ List<Key<?>> keys = Lists.newArrayList();
+ for (TypeLiteral<?> param : params) {
+ Key<?> paramKey = getKey(param, method, paramAnnotations[p++], errors);
+ keys.add(assistKey(method, paramKey, errors));
+ }
+ paramTypesBuilder.put(method, ImmutableList.copyOf(keys));
+ }
+ returnTypesByMethod = returnTypesBuilder.build();
+ paramTypes = paramTypesBuilder.build();
+ } catch (ErrorsException e) {
+ throw new ConfigurationException(e.getErrors().getMessages());
+ }
+
+ factory = factoryRawType.cast(Proxy.newProxyInstance(factoryRawType.getClassLoader(),
+ new Class[]{factoryRawType}, this));
+ }
+
+ public F get() {
+ return factory;
+ }
+
+ /**
+ * Returns a key similar to {@code key}, but with an {@literal @}Assisted binding annotation.
+ * This fails if another binding annotation is clobbered in the process. If the key already has
+ * the {@literal @}Assisted annotation, it is returned as-is to preserve any String value.
+ */
+ private <T> Key<T> assistKey(Method method, Key<T> key, Errors errors) throws ErrorsException {
+ if (key.getAnnotationType() == null) {
+ return Key.get(key.getTypeLiteral(), DEFAULT_ANNOTATION);
+ } else if (key.getAnnotationType() == Assisted.class) {
+ return key;
+ } else {
+ errors.withSource(method).addMessage(
+ "Only @Assisted is allowed for factory parameters, but found @%s",
+ key.getAnnotationType());
+ throw errors.toException();
+ }
+ }
+
+ /**
+ * At injector-creation time, we initialize the invocation handler. At this time we make sure
+ * all factory methods will be able to build the target types.
+ */
+ @Inject
+ void initialize(Injector injector) {
+ if (this.injector != null) {
+ throw new ConfigurationException(ImmutableList.of(new Message(FactoryProvider2.class,
+ "Factories.create() factories may only be used in one Injector!")));
+ }
+
+ this.injector = injector;
+
+ for (Method method : returnTypesByMethod.keySet()) {
+ Object[] args = new Object[method.getParameterTypes().length];
+ Arrays.fill(args, "dummy object for validating Factories");
+ getBindingFromNewInjector(method, args); // throws if the binding isn't properly configured
+ }
+ }
+
+ /**
+ * Creates a child injector that binds the args, and returns the binding for the method's result.
+ */
+ public Binding<?> getBindingFromNewInjector(final Method method, final Object[] args) {
+ checkState(injector != null,
+ "Factories.create() factories cannot be used until they're initialized by Guice.");
+
+ final Key<?> returnType = returnTypesByMethod.get(method);
+
+ Module assistedModule = new AbstractModule() {
+ @SuppressWarnings("unchecked") // raw keys are necessary for the args array and return value
+ protected void configure() {
+ Binder binder = binder().withSource(method);
+
+ int p = 0;
+ for (Key<?> paramKey : paramTypes.get(method)) {
+ // Wrap in a Provider to cover null, and to prevent Guice from injecting the parameter
+ binder.bind((Key) paramKey).toProvider(Providers.of(args[p++]));
+ }
+
+ if (producedType != null && !returnType.equals(producedType)) {
+ binder.bind(returnType).to((Key) producedType);
+ } else {
+ binder.bind(returnType);
+ }
+ }
+ };
+
+ Injector forCreate = injector.createChildInjector(assistedModule);
+ return forCreate.getBinding(returnType);
+ }
+
+ /**
+ * When a factory method is invoked, we create a child injector that binds all parameters, then
+ * use that to get an instance of the return type.
+ */
+ public Object invoke(Object proxy, final Method method, final Object[] args) throws Throwable {
+ if (method.getDeclaringClass() == Object.class) {
+ return method.invoke(this, args);
+ }
+
+ Provider<?> provider = getBindingFromNewInjector(method, args).getProvider();
+ try {
+ return provider.get();
+ } catch (ProvisionException e) {
+ // if this is an exception declared by the factory method, throw it as-is
+ if (e.getErrorMessages().size() == 1) {
+ Message onlyError = Iterables.getOnlyElement(e.getErrorMessages());
+ Throwable cause = onlyError.getCause();
+ if (cause != null && canRethrow(method, cause)) {
+ throw cause;
+ }
+ }
+ throw e;
+ }
+ }
+
+ @Override
+ public String toString() {
+ return factory.getClass().getInterfaces()[0].getName()
+ + " for " + producedType.getTypeLiteral();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o == this || o == factory;
+ }
+
+ /**
+ * Returns true if {@code thrown} can be thrown by {@code invoked} without wrapping.
+ */
+ static boolean canRethrow(Method invoked, Throwable thrown) {
+ if (thrown instanceof Error || thrown instanceof RuntimeException) {
+ return true;
+ }
+
+ for (Class<?> declared : invoked.getExceptionTypes()) {
+ if (declared.isInstance(thrown)) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/assistedinject/Parameter.java b/src/main/java/org/elasticsearch/common/inject/assistedinject/Parameter.java
new file mode 100644
index 0000000..a882b9f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/assistedinject/Parameter.java
@@ -0,0 +1,154 @@
+/**
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.assistedinject;
+
+import org.elasticsearch.common.inject.*;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * Models a method or constructor parameter.
+ *
+ * @author jmourits@google.com (Jerome Mourits)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class Parameter {
+
+ private final Type type;
+ private final boolean isAssisted;
+ private final Annotation bindingAnnotation;
+ private final boolean isProvider;
+
+ public Parameter(Type type, Annotation[] annotations) {
+ this.type = type;
+ this.bindingAnnotation = getBindingAnnotation(annotations);
+ this.isAssisted = hasAssistedAnnotation(annotations);
+ this.isProvider = isProvider(type);
+ }
+
+ public boolean isProvidedByFactory() {
+ return isAssisted;
+ }
+
+ public Type getType() {
+ return type;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ if (isAssisted) {
+ result.append("@Assisted");
+ result.append(" ");
+ }
+ if (bindingAnnotation != null) {
+ result.append(bindingAnnotation.toString());
+ result.append(" ");
+ }
+ result.append(type.toString());
+ return result.toString();
+ }
+
+ private boolean hasAssistedAnnotation(Annotation[] annotations) {
+ for (Annotation annotation : annotations) {
+ if (annotation.annotationType().equals(Assisted.class)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns the Guice {@link Key} for this parameter.
+ */
+ public Object getValue(Injector injector) {
+ return isProvider
+ ? injector.getProvider(getBindingForType(getProvidedType(type)))
+ : injector.getInstance(getPrimaryBindingKey());
+ }
+
+ public boolean isBound(Injector injector) {
+ return isBound(injector, getPrimaryBindingKey())
+ || isBound(injector, fixAnnotations(getPrimaryBindingKey()));
+ }
+
+ private boolean isBound(Injector injector, Key<?> key) {
+ // This method is particularly lame - we really need an API that can test
+ // for any binding, implicit or explicit
+ try {
+ return injector.getBinding(key) != null;
+ } catch (ConfigurationException e) {
+ return false;
+ }
+ }
+
+ /**
+ * Replace annotation instances with annotation types, this is only
+ * appropriate for testing if a key is bound and not for injecting.
+ * <p/>
+ * See Guice bug 125,
+ * http://code.google.com/p/google-guice/issues/detail?id=125
+ */
+ public Key<?> fixAnnotations(Key<?> key) {
+ return key.getAnnotation() == null
+ ? key
+ : Key.get(key.getTypeLiteral(), key.getAnnotation().annotationType());
+ }
+
+ Key<?> getPrimaryBindingKey() {
+ return isProvider
+ ? getBindingForType(getProvidedType(type))
+ : getBindingForType(type);
+ }
+
+ private Type getProvidedType(Type type) {
+ return ((ParameterizedType) type).getActualTypeArguments()[0];
+ }
+
+ private boolean isProvider(Type type) {
+ return type instanceof ParameterizedType
+ && ((ParameterizedType) type).getRawType() == Provider.class;
+ }
+
+ private Key<?> getBindingForType(Type type) {
+ return bindingAnnotation != null
+ ? Key.get(type, bindingAnnotation)
+ : Key.get(type);
+ }
+
+ /**
+ * Returns the unique binding annotation from the specified list, or
+ * {@code null} if there are none.
+ *
+ * @throws IllegalStateException if multiple binding annotations exist.
+ */
+ private Annotation getBindingAnnotation(Annotation[] annotations) {
+ Annotation bindingAnnotation = null;
+ for (Annotation a : annotations) {
+ if (a.annotationType().getAnnotation(BindingAnnotation.class) != null) {
+ checkArgument(bindingAnnotation == null,
+ "Parameter has multiple binding annotations: %s and %s", bindingAnnotation, a);
+ bindingAnnotation = a;
+ }
+ }
+ return bindingAnnotation;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/assistedinject/ParameterListKey.java b/src/main/java/org/elasticsearch/common/inject/assistedinject/ParameterListKey.java
new file mode 100644
index 0000000..ba2bbd9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/assistedinject/ParameterListKey.java
@@ -0,0 +1,66 @@
+/**
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.assistedinject;
+
+import org.elasticsearch.common.inject.TypeLiteral;
+
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * A list of {@link TypeLiteral}s to match an injectable Constructor's assited
+ * parameter types to the corresponding factory method.
+ *
+ * @author jmourits@google.com (Jerome Mourits)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class ParameterListKey {
+
+ private final List<Type> paramList;
+
+ public ParameterListKey(List<Type> paramList) {
+ this.paramList = new ArrayList<Type>(paramList);
+ }
+
+ public ParameterListKey(Type[] types) {
+ this(Arrays.asList(types));
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == this) {
+ return true;
+ }
+ if (!(o instanceof ParameterListKey)) {
+ return false;
+ }
+ ParameterListKey other = (ParameterListKey) o;
+ return paramList.equals(other.paramList);
+ }
+
+ @Override
+ public int hashCode() {
+ return paramList.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return paramList.toString();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/assistedinject/package-info.java b/src/main/java/org/elasticsearch/common/inject/assistedinject/package-info.java
new file mode 100644
index 0000000..92d4c31
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/assistedinject/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Extension for combining factory interfaces with injection; this extension requires {@code
+ * guice-jndi-2.0.jar}.
+ */
+package org.elasticsearch.common.inject.assistedinject; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/binder/AnnotatedBindingBuilder.java b/src/main/java/org/elasticsearch/common/inject/binder/AnnotatedBindingBuilder.java
new file mode 100644
index 0000000..a093f7d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/binder/AnnotatedBindingBuilder.java
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.binder;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public interface AnnotatedBindingBuilder<T> extends LinkedBindingBuilder<T> {
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ LinkedBindingBuilder<T> annotatedWith(
+ Class<? extends Annotation> annotationType);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ LinkedBindingBuilder<T> annotatedWith(Annotation annotation);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/binder/AnnotatedConstantBindingBuilder.java b/src/main/java/org/elasticsearch/common/inject/binder/AnnotatedConstantBindingBuilder.java
new file mode 100644
index 0000000..bd92496
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/binder/AnnotatedConstantBindingBuilder.java
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.binder;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public interface AnnotatedConstantBindingBuilder {
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ ConstantBindingBuilder annotatedWith(
+ Class<? extends Annotation> annotationType);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ ConstantBindingBuilder annotatedWith(Annotation annotation);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/binder/AnnotatedElementBuilder.java b/src/main/java/org/elasticsearch/common/inject/binder/AnnotatedElementBuilder.java
new file mode 100644
index 0000000..d41f308
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/binder/AnnotatedElementBuilder.java
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.binder;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface AnnotatedElementBuilder {
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ void annotatedWith(Class<? extends Annotation> annotationType);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ void annotatedWith(Annotation annotation);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/binder/ConstantBindingBuilder.java b/src/main/java/org/elasticsearch/common/inject/binder/ConstantBindingBuilder.java
new file mode 100644
index 0000000..de56d49
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/binder/ConstantBindingBuilder.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.binder;
+
+/**
+ * Binds to a constant value.
+ */
+public interface ConstantBindingBuilder {
+
+ /**
+ * Binds constant to the given value.
+ */
+ void to(String value);
+
+ /**
+ * Binds constant to the given value.
+ */
+ void to(int value);
+
+ /**
+ * Binds constant to the given value.
+ */
+ void to(long value);
+
+ /**
+ * Binds constant to the given value.
+ */
+ void to(boolean value);
+
+ /**
+ * Binds constant to the given value.
+ */
+ void to(double value);
+
+ /**
+ * Binds constant to the given value.
+ */
+ void to(float value);
+
+ /**
+ * Binds constant to the given value.
+ */
+ void to(short value);
+
+ /**
+ * Binds constant to the given value.
+ */
+ void to(char value);
+
+ /**
+ * Binds constant to the given value.
+ */
+ void to(Class<?> value);
+
+ /**
+ * Binds constant to the given value.
+ */
+ <E extends Enum<E>> void to(E value);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/binder/LinkedBindingBuilder.java b/src/main/java/org/elasticsearch/common/inject/binder/LinkedBindingBuilder.java
new file mode 100644
index 0000000..831935c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/binder/LinkedBindingBuilder.java
@@ -0,0 +1,70 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.binder;
+
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.Provider;
+import org.elasticsearch.common.inject.TypeLiteral;
+
+/**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public interface LinkedBindingBuilder<T> extends ScopedBindingBuilder {
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ ScopedBindingBuilder to(Class<? extends T> implementation);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ ScopedBindingBuilder to(TypeLiteral<? extends T> implementation);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ ScopedBindingBuilder to(Key<? extends T> targetKey);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ *
+ * @see org.elasticsearch.common.inject.Injector#injectMembers
+ */
+ void toInstance(T instance);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ *
+ * @see org.elasticsearch.common.inject.Injector#injectMembers
+ */
+ ScopedBindingBuilder toProvider(Provider<? extends T> provider);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ ScopedBindingBuilder toProvider(
+ Class<? extends Provider<? extends T>> providerType);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ ScopedBindingBuilder toProvider(
+ Key<? extends Provider<? extends T>> providerKey);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java b/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java
new file mode 100644
index 0000000..b25bed5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java
@@ -0,0 +1,47 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.binder;
+
+import org.elasticsearch.common.inject.Scope;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public interface ScopedBindingBuilder {
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ void in(Class<? extends Annotation> scopeAnnotation);
+
+ /**
+ * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}.
+ */
+ void in(Scope scope);
+
+ /**
+ * Instructs the {@link org.elasticsearch.common.inject.Injector} to eagerly initialize this
+ * singleton-scoped binding upon creation. Useful for application
+ * initialization logic. See the EDSL examples at
+ * {@link org.elasticsearch.common.inject.Binder}.
+ */
+ void asEagerSingleton();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/binder/package-info.java b/src/main/java/org/elasticsearch/common/inject/binder/package-info.java
new file mode 100644
index 0000000..83dccbf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/binder/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Interfaces which make up {@link org.elasticsearch.common.inject.Binder}'s
+ * expression language.
+ */
+package org.elasticsearch.common.inject.binder; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java b/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java
new file mode 100644
index 0000000..b1057a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java
@@ -0,0 +1,136 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.Scope;
+import org.elasticsearch.common.inject.spi.Element;
+import org.elasticsearch.common.inject.spi.InstanceBinding;
+
+import java.lang.annotation.Annotation;
+import java.util.List;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Bind a value or constant.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public abstract class AbstractBindingBuilder<T> {
+
+ public static final String IMPLEMENTATION_ALREADY_SET = "Implementation is set more than once.";
+ public static final String SINGLE_INSTANCE_AND_SCOPE
+ = "Setting the scope is not permitted when binding to a single instance.";
+ public static final String SCOPE_ALREADY_SET = "Scope is set more than once.";
+ public static final String BINDING_TO_NULL = "Binding to null instances is not allowed. "
+ + "Use toProvider(Providers.of(null)) if this is your intended behaviour.";
+ public static final String CONSTANT_VALUE_ALREADY_SET = "Constant value is set more than once.";
+ public static final String ANNOTATION_ALREADY_SPECIFIED
+ = "More than one annotation is specified for this binding.";
+
+ protected static final Key<?> NULL_KEY = Key.get(Void.class);
+
+ protected List<Element> elements;
+ protected int position;
+ protected final Binder binder;
+ private BindingImpl<T> binding;
+
+ public AbstractBindingBuilder(Binder binder, List<Element> elements, Object source, Key<T> key) {
+ this.binder = binder;
+ this.elements = elements;
+ this.position = elements.size();
+ this.binding = new UntargettedBindingImpl<T>(source, key, Scoping.UNSCOPED);
+ elements.add(position, this.binding);
+ }
+
+ protected BindingImpl<T> getBinding() {
+ return binding;
+ }
+
+ protected BindingImpl<T> setBinding(BindingImpl<T> binding) {
+ this.binding = binding;
+ elements.set(position, binding);
+ return binding;
+ }
+
+ /**
+ * Sets the binding to a copy with the specified annotation on the bound key
+ */
+ protected BindingImpl<T> annotatedWithInternal(Class<? extends Annotation> annotationType) {
+ checkNotNull(annotationType, "annotationType");
+ checkNotAnnotated();
+ return setBinding(binding.withKey(
+ Key.get(this.binding.getKey().getTypeLiteral(), annotationType)));
+ }
+
+ /**
+ * Sets the binding to a copy with the specified annotation on the bound key
+ */
+ protected BindingImpl<T> annotatedWithInternal(Annotation annotation) {
+ checkNotNull(annotation, "annotation");
+ checkNotAnnotated();
+ return setBinding(binding.withKey(
+ Key.get(this.binding.getKey().getTypeLiteral(), annotation)));
+ }
+
+ public void in(final Class<? extends Annotation> scopeAnnotation) {
+ checkNotNull(scopeAnnotation, "scopeAnnotation");
+ checkNotScoped();
+ setBinding(getBinding().withScoping(Scoping.forAnnotation(scopeAnnotation)));
+ }
+
+ public void in(final Scope scope) {
+ checkNotNull(scope, "scope");
+ checkNotScoped();
+ setBinding(getBinding().withScoping(Scoping.forInstance(scope)));
+ }
+
+ public void asEagerSingleton() {
+ checkNotScoped();
+ setBinding(getBinding().withScoping(Scoping.EAGER_SINGLETON));
+ }
+
+ protected boolean keyTypeIsSet() {
+ return !Void.class.equals(binding.getKey().getTypeLiteral().getType());
+ }
+
+ protected void checkNotTargetted() {
+ if (!(binding instanceof UntargettedBindingImpl)) {
+ binder.addError(IMPLEMENTATION_ALREADY_SET);
+ }
+ }
+
+ protected void checkNotAnnotated() {
+ if (binding.getKey().getAnnotationType() != null) {
+ binder.addError(ANNOTATION_ALREADY_SPECIFIED);
+ }
+ }
+
+ protected void checkNotScoped() {
+ // Scoping isn't allowed when we have only one instance.
+ if (binding instanceof InstanceBinding) {
+ binder.addError(SINGLE_INSTANCE_AND_SCOPE);
+ return;
+ }
+
+ if (binding.getScoping().isExplicitlyScoped()) {
+ binder.addError(SCOPE_ALREADY_SET);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/Annotations.java b/src/main/java/org/elasticsearch/common/inject/internal/Annotations.java
new file mode 100644
index 0000000..24b3b80
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/Annotations.java
@@ -0,0 +1,123 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.Classes;
+import org.elasticsearch.common.inject.BindingAnnotation;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.ScopeAnnotation;
+import org.elasticsearch.common.inject.TypeLiteral;
+
+import java.lang.annotation.Annotation;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.reflect.Member;
+
+/**
+ * Annotation utilities.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class Annotations {
+
+ /**
+ * Returns true if the given annotation is retained at runtime.
+ */
+ public static boolean isRetainedAtRuntime(Class<? extends Annotation> annotationType) {
+ Retention retention = annotationType.getAnnotation(Retention.class);
+ return retention != null && retention.value() == RetentionPolicy.RUNTIME;
+ }
+
+ /**
+ * Returns the scope annotation on {@code type}, or null if none is specified.
+ */
+ public static Class<? extends Annotation> findScopeAnnotation(
+ Errors errors, Class<?> implementation) {
+ return findScopeAnnotation(errors, implementation.getAnnotations());
+ }
+
+ /**
+ * Returns the scoping annotation, or null if there isn't one.
+ */
+ public static Class<? extends Annotation> findScopeAnnotation(Errors errors, Annotation[] annotations) {
+ Class<? extends Annotation> found = null;
+
+ for (Annotation annotation : annotations) {
+ if (annotation.annotationType().getAnnotation(ScopeAnnotation.class) != null) {
+ if (found != null) {
+ errors.duplicateScopeAnnotations(found, annotation.annotationType());
+ } else {
+ found = annotation.annotationType();
+ }
+ }
+ }
+
+ return found;
+ }
+
+ public static boolean isScopeAnnotation(Class<? extends Annotation> annotationType) {
+ return annotationType.getAnnotation(ScopeAnnotation.class) != null;
+ }
+
+ /**
+ * Adds an error if there is a misplaced annotations on {@code type}. Scoping
+ * annotations are not allowed on abstract classes or interfaces.
+ */
+ public static void checkForMisplacedScopeAnnotations(
+ Class<?> type, Object source, Errors errors) {
+ if (Classes.isConcrete(type)) {
+ return;
+ }
+
+ Class<? extends Annotation> scopeAnnotation = findScopeAnnotation(errors, type);
+ if (scopeAnnotation != null) {
+ errors.withSource(type).scopeAnnotationOnAbstractType(scopeAnnotation, type, source);
+ }
+ }
+
+ /**
+ * Gets a key for the given type, member and annotations.
+ */
+ public static Key<?> getKey(TypeLiteral<?> type, Member member, Annotation[] annotations,
+ Errors errors) throws ErrorsException {
+ int numErrorsBefore = errors.size();
+ Annotation found = findBindingAnnotation(errors, member, annotations);
+ errors.throwIfNewErrors(numErrorsBefore);
+ return found == null ? Key.get(type) : Key.get(type, found);
+ }
+
+ /**
+ * Returns the binding annotation on {@code member}, or null if there isn't one.
+ */
+ public static Annotation findBindingAnnotation(
+ Errors errors, Member member, Annotation[] annotations) {
+ Annotation found = null;
+
+ for (Annotation annotation : annotations) {
+ if (annotation.annotationType().getAnnotation(BindingAnnotation.class) != null) {
+ if (found != null) {
+ errors.duplicateBindingAnnotations(member,
+ found.annotationType(), annotation.annotationType());
+ } else {
+ found = annotation;
+ }
+ }
+ }
+
+ return found;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/AsynchronousComputationException.java b/src/main/java/org/elasticsearch/common/inject/internal/AsynchronousComputationException.java
new file mode 100644
index 0000000..880725a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/AsynchronousComputationException.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+/**
+ * Wraps an exception that occured during a computation in a different thread.
+ *
+ * @author Bob Lee
+ */
+public class AsynchronousComputationException extends ComputationException {
+
+ public AsynchronousComputationException(Throwable cause) {
+ super(cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java b/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java
new file mode 100644
index 0000000..9824382
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java
@@ -0,0 +1,134 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.binder.AnnotatedBindingBuilder;
+import org.elasticsearch.common.inject.spi.Element;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+import org.elasticsearch.common.inject.spi.Message;
+
+import java.lang.annotation.Annotation;
+import java.util.List;
+import java.util.Set;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Bind a non-constant key.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public class BindingBuilder<T> extends AbstractBindingBuilder<T>
+ implements AnnotatedBindingBuilder<T> {
+
+ public BindingBuilder(Binder binder, List<Element> elements, Object source, Key<T> key) {
+ super(binder, elements, source, key);
+ }
+
+ public BindingBuilder<T> annotatedWith(Class<? extends Annotation> annotationType) {
+ annotatedWithInternal(annotationType);
+ return this;
+ }
+
+ public BindingBuilder<T> annotatedWith(Annotation annotation) {
+ annotatedWithInternal(annotation);
+ return this;
+ }
+
+ public BindingBuilder<T> to(Class<? extends T> implementation) {
+ return to(Key.get(implementation));
+ }
+
+ public BindingBuilder<T> to(TypeLiteral<? extends T> implementation) {
+ return to(Key.get(implementation));
+ }
+
+ public BindingBuilder<T> to(Key<? extends T> linkedKey) {
+ checkNotNull(linkedKey, "linkedKey");
+ checkNotTargetted();
+ BindingImpl<T> base = getBinding();
+ setBinding(new LinkedBindingImpl<T>(
+ base.getSource(), base.getKey(), base.getScoping(), linkedKey));
+ return this;
+ }
+
+ public void toInstance(T instance) {
+ checkNotTargetted();
+
+ // lookup the injection points, adding any errors to the binder's errors list
+ Set<InjectionPoint> injectionPoints;
+ if (instance != null) {
+ try {
+ injectionPoints = InjectionPoint.forInstanceMethodsAndFields(instance.getClass());
+ } catch (ConfigurationException e) {
+ for (Message message : e.getErrorMessages()) {
+ binder.addError(message);
+ }
+ injectionPoints = e.getPartialValue();
+ }
+ } else {
+ binder.addError(BINDING_TO_NULL);
+ injectionPoints = ImmutableSet.of();
+ }
+
+ BindingImpl<T> base = getBinding();
+ setBinding(new InstanceBindingImpl<T>(
+ base.getSource(), base.getKey(), base.getScoping(), injectionPoints, instance));
+ }
+
+ public BindingBuilder<T> toProvider(Provider<? extends T> provider) {
+ checkNotNull(provider, "provider");
+ checkNotTargetted();
+
+ // lookup the injection points, adding any errors to the binder's errors list
+ Set<InjectionPoint> injectionPoints;
+ try {
+ injectionPoints = InjectionPoint.forInstanceMethodsAndFields(provider.getClass());
+ } catch (ConfigurationException e) {
+ for (Message message : e.getErrorMessages()) {
+ binder.addError(message);
+ }
+ injectionPoints = e.getPartialValue();
+ }
+
+ BindingImpl<T> base = getBinding();
+ setBinding(new ProviderInstanceBindingImpl<T>(
+ base.getSource(), base.getKey(), base.getScoping(), injectionPoints, provider));
+ return this;
+ }
+
+ public BindingBuilder<T> toProvider(Class<? extends Provider<? extends T>> providerType) {
+ return toProvider(Key.get(providerType));
+ }
+
+ public BindingBuilder<T> toProvider(Key<? extends Provider<? extends T>> providerKey) {
+ checkNotNull(providerKey, "providerKey");
+ checkNotTargetted();
+
+ BindingImpl<T> base = getBinding();
+ setBinding(new LinkedProviderBindingImpl<T>(
+ base.getSource(), base.getKey(), base.getScoping(), providerKey));
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return "BindingBuilder<" + getBinding().getKey().getTypeLiteral() + ">";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/BindingImpl.java b/src/main/java/org/elasticsearch/common/inject/internal/BindingImpl.java
new file mode 100644
index 0000000..2bca257
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/BindingImpl.java
@@ -0,0 +1,120 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.Binding;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.Provider;
+import org.elasticsearch.common.inject.spi.BindingScopingVisitor;
+import org.elasticsearch.common.inject.spi.ElementVisitor;
+import org.elasticsearch.common.inject.spi.InstanceBinding;
+
+/**
+ * @author crazybob@google.com (Bob Lee)
+ */
+public abstract class BindingImpl<T> implements Binding<T> {
+
+ private final Injector injector;
+ private final Key<T> key;
+ private final Object source;
+ private final Scoping scoping;
+ private final InternalFactory<? extends T> internalFactory;
+
+ public BindingImpl(Injector injector, Key<T> key, Object source,
+ InternalFactory<? extends T> internalFactory, Scoping scoping) {
+ this.injector = injector;
+ this.key = key;
+ this.source = source;
+ this.internalFactory = internalFactory;
+ this.scoping = scoping;
+ }
+
+ protected BindingImpl(Object source, Key<T> key, Scoping scoping) {
+ this.internalFactory = null;
+ this.injector = null;
+ this.source = source;
+ this.key = key;
+ this.scoping = scoping;
+ }
+
+ public Key<T> getKey() {
+ return key;
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ private volatile Provider<T> provider;
+
+ public Provider<T> getProvider() {
+ if (provider == null) {
+ if (injector == null) {
+ throw new UnsupportedOperationException("getProvider() not supported for module bindings");
+ }
+
+ provider = injector.getProvider(key);
+ }
+ return provider;
+ }
+
+ public InternalFactory<? extends T> getInternalFactory() {
+ return internalFactory;
+ }
+
+ public Scoping getScoping() {
+ return scoping;
+ }
+
+ /**
+ * Is this a constant binding? This returns true for constant bindings as
+ * well as toInstance() bindings.
+ */
+ public boolean isConstant() {
+ return this instanceof InstanceBinding;
+ }
+
+ public <V> V acceptVisitor(ElementVisitor<V> visitor) {
+ return visitor.visit(this);
+ }
+
+ public <V> V acceptScopingVisitor(BindingScopingVisitor<V> visitor) {
+ return scoping.acceptVisitor(visitor);
+ }
+
+ protected BindingImpl<T> withScoping(Scoping scoping) {
+ throw new AssertionError();
+ }
+
+ protected BindingImpl<T> withKey(Key<T> key) {
+ throw new AssertionError();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(Binding.class)
+ .add("key", key)
+ .add("scope", scoping)
+ .add("source", source)
+ .toString();
+ }
+
+ public Injector getInjector() {
+ return injector;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ComputationException.java b/src/main/java/org/elasticsearch/common/inject/internal/ComputationException.java
new file mode 100644
index 0000000..67d5bd2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ComputationException.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+/**
+ * Wraps an exception that occured during a computation.
+ */
+public class ComputationException extends RuntimeException {
+
+ public ComputationException(Throwable cause) {
+ super(cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ConstantBindingBuilderImpl.java b/src/main/java/org/elasticsearch/common/inject/internal/ConstantBindingBuilderImpl.java
new file mode 100644
index 0000000..7d9e91f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ConstantBindingBuilderImpl.java
@@ -0,0 +1,128 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.binder.AnnotatedConstantBindingBuilder;
+import org.elasticsearch.common.inject.binder.ConstantBindingBuilder;
+import org.elasticsearch.common.inject.spi.Element;
+import org.elasticsearch.common.inject.spi.InjectionPoint;
+
+import java.lang.annotation.Annotation;
+import java.util.List;
+
+/**
+ * Bind a constant.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public final class ConstantBindingBuilderImpl<T>
+ extends AbstractBindingBuilder<T>
+ implements AnnotatedConstantBindingBuilder, ConstantBindingBuilder {
+
+ @SuppressWarnings("unchecked") // constant bindings start out with T unknown
+ public ConstantBindingBuilderImpl(Binder binder, List<Element> elements, Object source) {
+ super(binder, elements, source, (Key<T>) NULL_KEY);
+ }
+
+ public ConstantBindingBuilder annotatedWith(Class<? extends Annotation> annotationType) {
+ annotatedWithInternal(annotationType);
+ return this;
+ }
+
+ public ConstantBindingBuilder annotatedWith(Annotation annotation) {
+ annotatedWithInternal(annotation);
+ return this;
+ }
+
+ public void to(final String value) {
+ toConstant(String.class, value);
+ }
+
+ public void to(final int value) {
+ toConstant(Integer.class, value);
+ }
+
+ public void to(final long value) {
+ toConstant(Long.class, value);
+ }
+
+ public void to(final boolean value) {
+ toConstant(Boolean.class, value);
+ }
+
+ public void to(final double value) {
+ toConstant(Double.class, value);
+ }
+
+ public void to(final float value) {
+ toConstant(Float.class, value);
+ }
+
+ public void to(final short value) {
+ toConstant(Short.class, value);
+ }
+
+ public void to(final char value) {
+ toConstant(Character.class, value);
+ }
+
+ public void to(final Class<?> value) {
+ toConstant(Class.class, value);
+ }
+
+ public <E extends Enum<E>> void to(final E value) {
+ toConstant(value.getDeclaringClass(), value);
+ }
+
+ private void toConstant(Class<?> type, Object instance) {
+ // this type will define T, so these assignments are safe
+ @SuppressWarnings("unchecked")
+ Class<T> typeAsClassT = (Class<T>) type;
+ @SuppressWarnings("unchecked")
+ T instanceAsT = (T) instance;
+
+ if (keyTypeIsSet()) {
+ binder.addError(CONSTANT_VALUE_ALREADY_SET);
+ return;
+ }
+
+ BindingImpl<T> base = getBinding();
+ Key<T> key;
+ if (base.getKey().getAnnotation() != null) {
+ key = Key.get(typeAsClassT, base.getKey().getAnnotation());
+ } else if (base.getKey().getAnnotationType() != null) {
+ key = Key.get(typeAsClassT, base.getKey().getAnnotationType());
+ } else {
+ key = Key.get(typeAsClassT);
+ }
+
+ if (instanceAsT == null) {
+ binder.addError(BINDING_TO_NULL);
+ }
+
+ setBinding(new InstanceBindingImpl<T>(
+ base.getSource(), key, base.getScoping(), ImmutableSet.<InjectionPoint>of(), instanceAsT));
+ }
+
+ @Override
+ public String toString() {
+ return "ConstantBindingBuilder";
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java b/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java
new file mode 100644
index 0000000..a339746
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java
@@ -0,0 +1,124 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Context of a dependency construction. Used to manage circular references.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class ConstructionContext<T> {
+
+ T currentReference;
+ boolean constructing;
+
+ List<DelegatingInvocationHandler<T>> invocationHandlers;
+
+ public T getCurrentReference() {
+ return currentReference;
+ }
+
+ public void removeCurrentReference() {
+ this.currentReference = null;
+ }
+
+ public void setCurrentReference(T currentReference) {
+ this.currentReference = currentReference;
+ }
+
+ public boolean isConstructing() {
+ return constructing;
+ }
+
+ public void startConstruction() {
+ this.constructing = true;
+ }
+
+ public void finishConstruction() {
+ this.constructing = false;
+ invocationHandlers = null;
+ }
+
+ public Object createProxy(Errors errors, Class<?> expectedType) throws ErrorsException {
+ // TODO: if I create a proxy which implements all the interfaces of
+ // the implementation type, I'll be able to get away with one proxy
+ // instance (as opposed to one per caller).
+
+ if (!expectedType.isInterface()) {
+ throw errors.cannotSatisfyCircularDependency(expectedType).toException();
+ }
+
+ if (invocationHandlers == null) {
+ invocationHandlers = new ArrayList<DelegatingInvocationHandler<T>>();
+ }
+
+ DelegatingInvocationHandler<T> invocationHandler
+ = new DelegatingInvocationHandler<T>();
+ invocationHandlers.add(invocationHandler);
+
+ // ES: Replace, since we don't use bytecode gen, just get the type class loader, or system if its null
+ //ClassLoader classLoader = BytecodeGen.getClassLoader(expectedType);
+ ClassLoader classLoader = expectedType.getClassLoader() == null ? ClassLoader.getSystemClassLoader() : expectedType.getClassLoader();
+ return expectedType.cast(Proxy.newProxyInstance(classLoader,
+ new Class[]{expectedType}, invocationHandler));
+ }
+
+ public void setProxyDelegates(T delegate) {
+ if (invocationHandlers != null) {
+ for (DelegatingInvocationHandler<T> handler : invocationHandlers) {
+ handler.setDelegate(delegate);
+ }
+ }
+ }
+
+ static class DelegatingInvocationHandler<T> implements InvocationHandler {
+
+ T delegate;
+
+ public Object invoke(Object proxy, Method method, Object[] args)
+ throws Throwable {
+ if (delegate == null) {
+ throw new IllegalStateException("This is a proxy used to support"
+ + " circular references involving constructors. The object we're"
+ + " proxying is not constructed yet. Please wait until after"
+ + " injection has completed to use this object.");
+ }
+
+ try {
+ // This appears to be not test-covered
+ return method.invoke(delegate, args);
+ } catch (IllegalAccessException e) {
+ throw new RuntimeException(e);
+ } catch (IllegalArgumentException e) {
+ throw new RuntimeException(e);
+ } catch (InvocationTargetException e) {
+ throw e.getTargetException();
+ }
+ }
+
+ void setDelegate(T delegate) {
+ this.delegate = delegate;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ErrorHandler.java b/src/main/java/org/elasticsearch/common/inject/internal/ErrorHandler.java
new file mode 100644
index 0000000..86f1321
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ErrorHandler.java
@@ -0,0 +1,37 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.spi.Message;
+
+/**
+ * Handles errors in the Injector.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public interface ErrorHandler {
+
+ /**
+ * Handles an error.
+ */
+ void handle(Object source, Errors errors);
+
+ /**
+ * Handles a user-reported error.
+ */
+ void handle(Message message);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/Errors.java b/src/main/java/org/elasticsearch/common/inject/internal/Errors.java
new file mode 100644
index 0000000..cd9d32f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/Errors.java
@@ -0,0 +1,640 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.spi.*;
+
+import java.io.PrintWriter;
+import java.io.Serializable;
+import java.io.StringWriter;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.Member;
+import java.lang.reflect.Type;
+import java.util.*;
+
+/**
+ * A collection of error messages. If this type is passed as a method parameter, the method is
+ * considered to have executed succesfully only if new errors were not added to this collection.
+ * <p/>
+ * <p>Errors can be chained to provide additional context. To add context, call {@link #withSource}
+ * to create a new Errors instance that contains additional context. All messages added to the
+ * returned instance will contain full context.
+ * <p/>
+ * <p>To avoid messages with redundant context, {@link #withSource} should be added sparingly. A
+ * good rule of thumb is to assume a ethod's caller has already specified enough context to
+ * identify that method. When calling a method that's defined in a different context, call that
+ * method with an errors object that includes its context.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public final class Errors implements Serializable {
+
+ /**
+ * The root errors object. Used to access the list of error messages.
+ */
+ private final Errors root;
+
+ /**
+ * The parent errors object. Used to obtain the chain of source objects.
+ */
+ private final Errors parent;
+
+ /**
+ * The leaf source for errors added here.
+ */
+ private final Object source;
+
+ /**
+ * null unless (root == this) and error messages exist. Never an empty list.
+ */
+ private List<Message> errors; // lazy, use getErrorsForAdd()
+
+ public Errors() {
+ this.root = this;
+ this.parent = null;
+ this.source = SourceProvider.UNKNOWN_SOURCE;
+ }
+
+ public Errors(Object source) {
+ this.root = this;
+ this.parent = null;
+ this.source = source;
+ }
+
+ private Errors(Errors parent, Object source) {
+ this.root = parent.root;
+ this.parent = parent;
+ this.source = source;
+ }
+
+ /**
+ * Returns an instance that uses {@code source} as a reference point for newly added errors.
+ */
+ public Errors withSource(Object source) {
+ return source == SourceProvider.UNKNOWN_SOURCE
+ ? this
+ : new Errors(this, source);
+ }
+
+ /**
+ * We use a fairly generic error message here. The motivation is to share the
+ * same message for both bind time errors:
+ * <pre><code>Guice.createInjector(new AbstractModule() {
+ * public void configure() {
+ * bind(Runnable.class);
+ * }
+ * }</code></pre>
+ * ...and at provide-time errors:
+ * <pre><code>Guice.createInjector().getInstance(Runnable.class);</code></pre>
+ * Otherwise we need to know who's calling when resolving a just-in-time
+ * binding, which makes things unnecessarily complex.
+ */
+ public Errors missingImplementation(Key key) {
+ return addMessage("No implementation for %s was bound.", key);
+ }
+
+ public Errors converterReturnedNull(String stringValue, Object source,
+ TypeLiteral<?> type, MatcherAndConverter matchingConverter) {
+ return addMessage("Received null converting '%s' (bound at %s) to %s%n"
+ + " using %s.",
+ stringValue, convert(source), type, matchingConverter);
+ }
+
+ public Errors conversionTypeError(String stringValue, Object source, TypeLiteral<?> type,
+ MatcherAndConverter matchingConverter, Object converted) {
+ return addMessage("Type mismatch converting '%s' (bound at %s) to %s%n"
+ + " using %s.%n"
+ + " Converter returned %s.",
+ stringValue, convert(source), type, matchingConverter, converted);
+ }
+
+ public Errors conversionError(String stringValue, Object source,
+ TypeLiteral<?> type, MatcherAndConverter matchingConverter, RuntimeException cause) {
+ return errorInUserCode(cause, "Error converting '%s' (bound at %s) to %s%n"
+ + " using %s.%n"
+ + " Reason: %s",
+ stringValue, convert(source), type, matchingConverter, cause);
+ }
+
+ public Errors ambiguousTypeConversion(String stringValue, Object source, TypeLiteral<?> type,
+ MatcherAndConverter a, MatcherAndConverter b) {
+ return addMessage("Multiple converters can convert '%s' (bound at %s) to %s:%n"
+ + " %s and%n"
+ + " %s.%n"
+ + " Please adjust your type converter configuration to avoid overlapping matches.",
+ stringValue, convert(source), type, a, b);
+ }
+
+ public Errors bindingToProvider() {
+ return addMessage("Binding to Provider is not allowed.");
+ }
+
+ public Errors subtypeNotProvided(Class<? extends Provider<?>> providerType,
+ Class<?> type) {
+ return addMessage("%s doesn't provide instances of %s.", providerType, type);
+ }
+
+ public Errors notASubtype(Class<?> implementationType, Class<?> type) {
+ return addMessage("%s doesn't extend %s.", implementationType, type);
+ }
+
+ public Errors recursiveImplementationType() {
+ return addMessage("@ImplementedBy points to the same class it annotates.");
+ }
+
+ public Errors recursiveProviderType() {
+ return addMessage("@ProvidedBy points to the same class it annotates.");
+ }
+
+ public Errors missingRuntimeRetention(Object source) {
+ return addMessage("Please annotate with @Retention(RUNTIME).%n"
+ + " Bound at %s.", convert(source));
+ }
+
+ public Errors missingScopeAnnotation() {
+ return addMessage("Please annotate with @ScopeAnnotation.");
+ }
+
+ public Errors optionalConstructor(Constructor constructor) {
+ return addMessage("%s is annotated @Inject(optional=true), "
+ + "but constructors cannot be optional.", constructor);
+ }
+
+ public Errors cannotBindToGuiceType(String simpleName) {
+ return addMessage("Binding to core guice framework type is not allowed: %s.", simpleName);
+ }
+
+ public Errors scopeNotFound(Class<? extends Annotation> scopeAnnotation) {
+ return addMessage("No scope is bound to %s.", scopeAnnotation);
+ }
+
+ public Errors scopeAnnotationOnAbstractType(
+ Class<? extends Annotation> scopeAnnotation, Class<?> type, Object source) {
+ return addMessage("%s is annotated with %s, but scope annotations are not supported "
+ + "for abstract types.%n Bound at %s.", type, scopeAnnotation, convert(source));
+ }
+
+ public Errors misplacedBindingAnnotation(Member member, Annotation bindingAnnotation) {
+ return addMessage("%s is annotated with %s, but binding annotations should be applied "
+ + "to its parameters instead.", member, bindingAnnotation);
+ }
+
+ private static final String CONSTRUCTOR_RULES =
+ "Classes must have either one (and only one) constructor "
+ + "annotated with @Inject or a zero-argument constructor that is not private.";
+
+ public Errors missingConstructor(Class<?> implementation) {
+ return addMessage("Could not find a suitable constructor in %s. " + CONSTRUCTOR_RULES,
+ implementation);
+ }
+
+ public Errors tooManyConstructors(Class<?> implementation) {
+ return addMessage("%s has more than one constructor annotated with @Inject. "
+ + CONSTRUCTOR_RULES, implementation);
+ }
+
+ public Errors duplicateScopes(Scope existing,
+ Class<? extends Annotation> annotationType, Scope scope) {
+ return addMessage("Scope %s is already bound to %s. Cannot bind %s.", existing,
+ annotationType, scope);
+ }
+
+ public Errors voidProviderMethod() {
+ return addMessage("Provider methods must return a value. Do not return void.");
+ }
+
+ public Errors missingConstantValues() {
+ return addMessage("Missing constant value. Please call to(...).");
+ }
+
+ public Errors cannotInjectInnerClass(Class<?> type) {
+ return addMessage("Injecting into inner classes is not supported. "
+ + "Please use a 'static' class (top-level or nested) instead of %s.", type);
+ }
+
+ public Errors duplicateBindingAnnotations(Member member,
+ Class<? extends Annotation> a, Class<? extends Annotation> b) {
+ return addMessage("%s has more than one annotation annotated with @BindingAnnotation: "
+ + "%s and %s", member, a, b);
+ }
+
+ public Errors duplicateScopeAnnotations(
+ Class<? extends Annotation> a, Class<? extends Annotation> b) {
+ return addMessage("More than one scope annotation was found: %s and %s.", a, b);
+ }
+
+ public Errors recursiveBinding() {
+ return addMessage("Binding points to itself.");
+ }
+
+ public Errors bindingAlreadySet(Key<?> key, Object source) {
+ return addMessage("A binding to %s was already configured at %s.", key, convert(source));
+ }
+
+ public Errors childBindingAlreadySet(Key<?> key) {
+ return addMessage("A binding to %s already exists on a child injector.", key);
+ }
+
+ public Errors errorInjectingMethod(Throwable cause) {
+ return errorInUserCode(cause, "Error injecting method, %s", cause);
+ }
+
+ public Errors errorNotifyingTypeListener(TypeListenerBinding listener,
+ TypeLiteral<?> type, Throwable cause) {
+ return errorInUserCode(cause,
+ "Error notifying TypeListener %s (bound at %s) of %s.%n"
+ + " Reason: %s",
+ listener.getListener(), convert(listener.getSource()), type, cause);
+ }
+
+ public Errors errorInjectingConstructor(Throwable cause) {
+ return errorInUserCode(cause, "Error injecting constructor, %s", cause);
+ }
+
+ public Errors errorInProvider(RuntimeException runtimeException) {
+ return errorInUserCode(runtimeException, "Error in custom provider, %s", runtimeException);
+ }
+
+ public Errors errorInUserInjector(
+ MembersInjector<?> listener, TypeLiteral<?> type, RuntimeException cause) {
+ return errorInUserCode(cause, "Error injecting %s using %s.%n"
+ + " Reason: %s", type, listener, cause);
+ }
+
+ public Errors errorNotifyingInjectionListener(
+ InjectionListener<?> listener, TypeLiteral<?> type, RuntimeException cause) {
+ return errorInUserCode(cause, "Error notifying InjectionListener %s of %s.%n"
+ + " Reason: %s", listener, type, cause);
+ }
+
+ public void exposedButNotBound(Key<?> key) {
+ addMessage("Could not expose() %s, it must be explicitly bound.", key);
+ }
+
+ public static Collection<Message> getMessagesFromThrowable(Throwable throwable) {
+ if (throwable instanceof ProvisionException) {
+ return ((ProvisionException) throwable).getErrorMessages();
+ } else if (throwable instanceof ConfigurationException) {
+ return ((ConfigurationException) throwable).getErrorMessages();
+ } else if (throwable instanceof CreationException) {
+ return ((CreationException) throwable).getErrorMessages();
+ } else {
+ return ImmutableSet.of();
+ }
+ }
+
+ public Errors errorInUserCode(Throwable cause, String messageFormat, Object... arguments) {
+ Collection<Message> messages = getMessagesFromThrowable(cause);
+
+ if (!messages.isEmpty()) {
+ return merge(messages);
+ } else {
+ return addMessage(cause, messageFormat, arguments);
+ }
+ }
+
+ public Errors cannotInjectRawProvider() {
+ return addMessage("Cannot inject a Provider that has no type parameter");
+ }
+
+ public Errors cannotInjectRawMembersInjector() {
+ return addMessage("Cannot inject a MembersInjector that has no type parameter");
+ }
+
+ public Errors cannotInjectTypeLiteralOf(Type unsupportedType) {
+ return addMessage("Cannot inject a TypeLiteral of %s", unsupportedType);
+ }
+
+ public Errors cannotInjectRawTypeLiteral() {
+ return addMessage("Cannot inject a TypeLiteral that has no type parameter");
+ }
+
+ public Errors cannotSatisfyCircularDependency(Class<?> expectedType) {
+ return addMessage(
+ "Tried proxying %s to support a circular dependency, but it is not an interface.",
+ expectedType);
+ }
+
+ public void throwCreationExceptionIfErrorsExist() {
+ if (!hasErrors()) {
+ return;
+ }
+
+ throw new CreationException(getMessages());
+ }
+
+ public void throwConfigurationExceptionIfErrorsExist() {
+ if (!hasErrors()) {
+ return;
+ }
+
+ throw new ConfigurationException(getMessages());
+ }
+
+ public void throwProvisionExceptionIfErrorsExist() {
+ if (!hasErrors()) {
+ return;
+ }
+
+ throw new ProvisionException(getMessages());
+ }
+
+ private Message merge(Message message) {
+ List<Object> sources = Lists.newArrayList();
+ sources.addAll(getSources());
+ sources.addAll(message.getSources());
+ return new Message(sources, message.getMessage(), message.getCause());
+ }
+
+ public Errors merge(Collection<Message> messages) {
+ for (Message message : messages) {
+ addMessage(merge(message));
+ }
+ return this;
+ }
+
+ public Errors merge(Errors moreErrors) {
+ if (moreErrors.root == root || moreErrors.root.errors == null) {
+ return this;
+ }
+
+ merge(moreErrors.root.errors);
+ return this;
+ }
+
+ public List<Object> getSources() {
+ List<Object> sources = Lists.newArrayList();
+ for (Errors e = this; e != null; e = e.parent) {
+ if (e.source != SourceProvider.UNKNOWN_SOURCE) {
+ sources.add(0, e.source);
+ }
+ }
+ return sources;
+ }
+
+ public void throwIfNewErrors(int expectedSize) throws ErrorsException {
+ if (size() == expectedSize) {
+ return;
+ }
+
+ throw toException();
+ }
+
+ public ErrorsException toException() {
+ return new ErrorsException(this);
+ }
+
+ public boolean hasErrors() {
+ return root.errors != null;
+ }
+
+ public Errors addMessage(String messageFormat, Object... arguments) {
+ return addMessage(null, messageFormat, arguments);
+ }
+
+ private Errors addMessage(Throwable cause, String messageFormat, Object... arguments) {
+ String message = format(messageFormat, arguments);
+ addMessage(new Message(getSources(), message, cause));
+ return this;
+ }
+
+ public Errors addMessage(Message message) {
+ if (root.errors == null) {
+ root.errors = Lists.newArrayList();
+ }
+ root.errors.add(message);
+ return this;
+ }
+
+ public static String format(String messageFormat, Object... arguments) {
+ for (int i = 0; i < arguments.length; i++) {
+ arguments[i] = Errors.convert(arguments[i]);
+ }
+ return String.format(Locale.ROOT, messageFormat, arguments);
+ }
+
+ public List<Message> getMessages() {
+ if (root.errors == null) {
+ return ImmutableList.of();
+ }
+
+ List<Message> result = Lists.newArrayList(root.errors);
+ CollectionUtil.timSort(result, new Comparator<Message>() {
+ public int compare(Message a, Message b) {
+ return a.getSource().compareTo(b.getSource());
+ }
+ });
+
+ return result;
+ }
+
+ /**
+ * Returns the formatted message for an exception with the specified messages.
+ */
+ public static String format(String heading, Collection<Message> errorMessages) {
+ final Formatter fmt = new Formatter(Locale.ROOT);
+ try {
+ fmt.format(heading).format(":%n%n");
+ int index = 1;
+ boolean displayCauses = getOnlyCause(errorMessages) == null;
+
+ for (Message errorMessage : errorMessages) {
+ fmt.format("%s) %s%n", index++, errorMessage.getMessage());
+
+ List<Object> dependencies = errorMessage.getSources();
+ for (int i = dependencies.size() - 1; i >= 0; i--) {
+ Object source = dependencies.get(i);
+ formatSource(fmt, source);
+ }
+
+ Throwable cause = errorMessage.getCause();
+ if (displayCauses && cause != null) {
+ StringWriter writer = new StringWriter();
+ cause.printStackTrace(new PrintWriter(writer));
+ fmt.format("Caused by: %s", writer.getBuffer());
+ }
+
+ fmt.format("%n");
+ }
+
+ if (errorMessages.size() == 1) {
+ fmt.format("1 error");
+ } else {
+ fmt.format("%s errors", errorMessages.size());
+ }
+
+ return fmt.toString();
+ } finally {
+ fmt.close();
+ }
+ }
+
+ /**
+ * Returns {@code value} if it is non-null allowed to be null. Otherwise a message is added and
+ * an {@code ErrorsException} is thrown.
+ */
+ public <T> T checkForNull(T value, Object source, Dependency<?> dependency)
+ throws ErrorsException {
+ if (value != null || dependency.isNullable()) {
+ return value;
+ }
+
+ int parameterIndex = dependency.getParameterIndex();
+ String parameterName = (parameterIndex != -1)
+ ? "parameter " + parameterIndex + " of "
+ : "";
+ addMessage("null returned by binding at %s%n but %s%s is not @Nullable",
+ source, parameterName, dependency.getInjectionPoint().getMember());
+
+ throw toException();
+ }
+
+ /**
+ * Returns the cause throwable if there is exactly one cause in {@code messages}. If there are
+ * zero or multiple messages with causes, null is returned.
+ */
+ public static Throwable getOnlyCause(Collection<Message> messages) {
+ Throwable onlyCause = null;
+ for (Message message : messages) {
+ Throwable messageCause = message.getCause();
+ if (messageCause == null) {
+ continue;
+ }
+
+ if (onlyCause != null) {
+ return null;
+ }
+
+ onlyCause = messageCause;
+ }
+
+ return onlyCause;
+ }
+
+ public int size() {
+ return root.errors == null ? 0 : root.errors.size();
+ }
+
+ private static abstract class Converter<T> {
+
+ final Class<T> type;
+
+ Converter(Class<T> type) {
+ this.type = type;
+ }
+
+ boolean appliesTo(Object o) {
+ return type.isAssignableFrom(o.getClass());
+ }
+
+ String convert(Object o) {
+ return toString(type.cast(o));
+ }
+
+ abstract String toString(T t);
+ }
+
+ private static final Collection<Converter<?>> converters = ImmutableList.of(
+ new Converter<Class>(Class.class) {
+ public String toString(Class c) {
+ return c.getName();
+ }
+ },
+ new Converter<Member>(Member.class) {
+ public String toString(Member member) {
+ return MoreTypes.toString(member);
+ }
+ },
+ new Converter<Key>(Key.class) {
+ public String toString(Key key) {
+ if (key.getAnnotationType() != null) {
+ return key.getTypeLiteral() + " annotated with "
+ + (key.getAnnotation() != null ? key.getAnnotation() : key.getAnnotationType());
+ } else {
+ return key.getTypeLiteral().toString();
+ }
+ }
+ }
+ );
+
+ public static Object convert(Object o) {
+ for (Converter<?> converter : converters) {
+ if (converter.appliesTo(o)) {
+ return converter.convert(o);
+ }
+ }
+ return o;
+ }
+
+ public static void formatSource(Formatter formatter, Object source) {
+ if (source instanceof Dependency) {
+ Dependency<?> dependency = (Dependency<?>) source;
+ InjectionPoint injectionPoint = dependency.getInjectionPoint();
+ if (injectionPoint != null) {
+ formatInjectionPoint(formatter, dependency, injectionPoint);
+ } else {
+ formatSource(formatter, dependency.getKey());
+ }
+
+ } else if (source instanceof InjectionPoint) {
+ formatInjectionPoint(formatter, null, (InjectionPoint) source);
+
+ } else if (source instanceof Class) {
+ formatter.format(" at %s%n", StackTraceElements.forType((Class<?>) source));
+
+ } else if (source instanceof Member) {
+ formatter.format(" at %s%n", StackTraceElements.forMember((Member) source));
+
+ } else if (source instanceof TypeLiteral) {
+ formatter.format(" while locating %s%n", source);
+
+ } else if (source instanceof Key) {
+ Key<?> key = (Key<?>) source;
+ formatter.format(" while locating %s%n", convert(key));
+
+ } else {
+ formatter.format(" at %s%n", source);
+ }
+ }
+
+ public static void formatInjectionPoint(Formatter formatter, Dependency<?> dependency,
+ InjectionPoint injectionPoint) {
+ Member member = injectionPoint.getMember();
+ Class<? extends Member> memberType = MoreTypes.memberType(member);
+
+ if (memberType == Field.class) {
+ dependency = injectionPoint.getDependencies().get(0);
+ formatter.format(" while locating %s%n", convert(dependency.getKey()));
+ formatter.format(" for field at %s%n", StackTraceElements.forMember(member));
+
+ } else if (dependency != null) {
+ formatter.format(" while locating %s%n", convert(dependency.getKey()));
+ formatter.format(" for parameter %s at %s%n",
+ dependency.getParameterIndex(), StackTraceElements.forMember(member));
+
+ } else {
+ formatSource(formatter, injectionPoint.getMember());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ErrorsException.java b/src/main/java/org/elasticsearch/common/inject/internal/ErrorsException.java
new file mode 100644
index 0000000..c570853
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ErrorsException.java
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.elasticsearch.common.inject.internal;
+
+/**
+ * Indicates that a result could not be returned while preparing or resolving a binding. The caller
+ * should {@link Errors#merge(Errors) merge} the errors from this exception with their existing
+ * errors.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public class ErrorsException extends Exception {
+
+ private final Errors errors;
+
+ public ErrorsException(Errors errors) {
+ this.errors = errors;
+ }
+
+ public Errors getErrors() {
+ return errors;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ExpirationTimer.java b/src/main/java/org/elasticsearch/common/inject/internal/ExpirationTimer.java
new file mode 100644
index 0000000..b401773
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ExpirationTimer.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import java.util.Timer;
+
+/**
+ * Timer used for entry expiration in MapMaker.
+ */
+class ExpirationTimer {
+ static Timer instance = new Timer(true);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ExposedBindingImpl.java b/src/main/java/org/elasticsearch/common/inject/internal/ExposedBindingImpl.java
new file mode 100644
index 0000000..c89952c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ExposedBindingImpl.java
@@ -0,0 +1,78 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.spi.BindingTargetVisitor;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.ExposedBinding;
+import org.elasticsearch.common.inject.spi.PrivateElements;
+
+import java.util.Set;
+
+public class ExposedBindingImpl<T> extends BindingImpl<T> implements ExposedBinding<T> {
+
+ private final PrivateElements privateElements;
+
+ public ExposedBindingImpl(Injector injector, Object source, Key<T> key,
+ InternalFactory<T> factory, PrivateElements privateElements) {
+ super(injector, key, source, factory, Scoping.UNSCOPED);
+ this.privateElements = privateElements;
+ }
+
+ public ExposedBindingImpl(Object source, Key<T> key, Scoping scoping,
+ PrivateElements privateElements) {
+ super(source, key, scoping);
+ this.privateElements = privateElements;
+ }
+
+ public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
+ return visitor.visit(this);
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ return ImmutableSet.<Dependency<?>>of(Dependency.get(Key.get(Injector.class)));
+ }
+
+ public PrivateElements getPrivateElements() {
+ return privateElements;
+ }
+
+ public BindingImpl<T> withScoping(Scoping scoping) {
+ return new ExposedBindingImpl<T>(getSource(), getKey(), scoping, privateElements);
+ }
+
+ public ExposedBindingImpl<T> withKey(Key<T> key) {
+ return new ExposedBindingImpl<T>(getSource(), key, getScoping(), privateElements);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(ExposedBinding.class)
+ .add("key", getKey())
+ .add("source", getSource())
+ .add("privateElements", privateElements)
+ .toString();
+ }
+
+ public void applyTo(Binder binder) {
+ throw new UnsupportedOperationException("This element represents a synthetic binding.");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ExposureBuilder.java b/src/main/java/org/elasticsearch/common/inject/internal/ExposureBuilder.java
new file mode 100644
index 0000000..d9aef08
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ExposureBuilder.java
@@ -0,0 +1,69 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.binder.AnnotatedElementBuilder;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * For private binder's expose() method.
+ */
+public class ExposureBuilder<T> implements AnnotatedElementBuilder {
+ private final Binder binder;
+ private final Object source;
+ private Key<T> key;
+
+ public ExposureBuilder(Binder binder, Object source, Key<T> key) {
+ this.binder = binder;
+ this.source = source;
+ this.key = key;
+ }
+
+ protected void checkNotAnnotated() {
+ if (key.getAnnotationType() != null) {
+ binder.addError(AbstractBindingBuilder.ANNOTATION_ALREADY_SPECIFIED);
+ }
+ }
+
+ public void annotatedWith(Class<? extends Annotation> annotationType) {
+ com.google.common.base.Preconditions.checkNotNull(annotationType, "annotationType");
+ checkNotAnnotated();
+ key = Key.get(key.getTypeLiteral(), annotationType);
+ }
+
+ public void annotatedWith(Annotation annotation) {
+ com.google.common.base.Preconditions.checkNotNull(annotation, "annotation");
+ checkNotAnnotated();
+ key = Key.get(key.getTypeLiteral(), annotation);
+ }
+
+ public Key<?> getKey() {
+ return key;
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ @Override
+ public String toString() {
+ return "AnnotatedElementBuilder";
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/FailableCache.java b/src/main/java/org/elasticsearch/common/inject/internal/FailableCache.java
new file mode 100644
index 0000000..f050755
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/FailableCache.java
@@ -0,0 +1,64 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+import java.util.concurrent.ExecutionException;
+
+/**
+ * Lazily creates (and caches) values for keys. If creating the value fails (with errors), an
+ * exception is thrown on retrieval.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public abstract class FailableCache<K, V> {
+
+ private final LoadingCache<K, Object> delegate = CacheBuilder.newBuilder().build(new CacheLoader<K, Object>() {
+ @Override
+ public Object load(K key) throws Exception {
+ Errors errors = new Errors();
+ V result = null;
+ try {
+ result = FailableCache.this.create(key, errors);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+ return errors.hasErrors() ? errors : result;
+ }
+ });
+
+ protected abstract V create(K key, Errors errors) throws ErrorsException;
+
+ public V get(K key, Errors errors) throws ErrorsException {
+ try {
+ Object resultOrError = delegate.get(key);
+ if (resultOrError instanceof Errors) {
+ errors.merge((Errors) resultOrError);
+ throw errors.toException();
+ } else {
+ @SuppressWarnings("unchecked") // create returned a non-error result, so this is safe
+ V result = (V) resultOrError;
+ return result;
+ }
+ } catch (ExecutionException e) {
+ throw new RuntimeException(e);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java b/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java
new file mode 100644
index 0000000..4710c58
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java
@@ -0,0 +1,96 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.Provider;
+import org.elasticsearch.common.inject.spi.*;
+import org.elasticsearch.common.inject.util.Providers;
+
+import java.util.Set;
+
+public class InstanceBindingImpl<T> extends BindingImpl<T> implements InstanceBinding<T> {
+
+ final T instance;
+ final Provider<T> provider;
+ final ImmutableSet<InjectionPoint> injectionPoints;
+
+ public InstanceBindingImpl(Injector injector, Key<T> key, Object source,
+ InternalFactory<? extends T> internalFactory, Set<InjectionPoint> injectionPoints,
+ T instance) {
+ super(injector, key, source, internalFactory, Scoping.UNSCOPED);
+ this.injectionPoints = ImmutableSet.copyOf(injectionPoints);
+ this.instance = instance;
+ this.provider = Providers.of(instance);
+ }
+
+ public InstanceBindingImpl(Object source, Key<T> key, Scoping scoping,
+ Set<InjectionPoint> injectionPoints, T instance) {
+ super(source, key, scoping);
+ this.injectionPoints = ImmutableSet.copyOf(injectionPoints);
+ this.instance = instance;
+ this.provider = Providers.of(instance);
+ }
+
+ @Override
+ public Provider<T> getProvider() {
+ return this.provider;
+ }
+
+ public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
+ return visitor.visit(this);
+ }
+
+ public T getInstance() {
+ return instance;
+ }
+
+ public Set<InjectionPoint> getInjectionPoints() {
+ return injectionPoints;
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ return instance instanceof HasDependencies
+ ? ImmutableSet.copyOf(((HasDependencies) instance).getDependencies())
+ : Dependency.forInjectionPoints(injectionPoints);
+ }
+
+ public BindingImpl<T> withScoping(Scoping scoping) {
+ return new InstanceBindingImpl<T>(getSource(), getKey(), scoping, injectionPoints, instance);
+ }
+
+ public BindingImpl<T> withKey(Key<T> key) {
+ return new InstanceBindingImpl<T>(getSource(), key, getScoping(), injectionPoints, instance);
+ }
+
+ public void applyTo(Binder binder) {
+ // instance bindings aren't scoped
+ binder.withSource(getSource()).bind(getKey()).toInstance(instance);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(InstanceBinding.class)
+ .add("key", getKey())
+ .add("source", getSource())
+ .add("instance", instance)
+ .toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/InternalContext.java b/src/main/java/org/elasticsearch/common/inject/internal/InternalContext.java
new file mode 100644
index 0000000..b16342c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/InternalContext.java
@@ -0,0 +1,53 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.inject.spi.Dependency;
+
+import java.util.Map;
+
+/**
+ * Internal context. Used to coordinate injections and support circular
+ * dependencies.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public final class InternalContext {
+
+ private Map<Object, ConstructionContext<?>> constructionContexts = Maps.newHashMap();
+ private Dependency dependency;
+
+ @SuppressWarnings("unchecked")
+ public <T> ConstructionContext<T> getConstructionContext(Object key) {
+ ConstructionContext<T> constructionContext
+ = (ConstructionContext<T>) constructionContexts.get(key);
+ if (constructionContext == null) {
+ constructionContext = new ConstructionContext<T>();
+ constructionContexts.put(key, constructionContext);
+ }
+ return constructionContext;
+ }
+
+ public Dependency getDependency() {
+ return dependency;
+ }
+
+ public void setDependency(Dependency dependency) {
+ this.dependency = dependency;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/InternalFactory.java b/src/main/java/org/elasticsearch/common/inject/internal/InternalFactory.java
new file mode 100644
index 0000000..c1b36d6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/InternalFactory.java
@@ -0,0 +1,61 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.spi.Dependency;
+
+/**
+ * Creates objects which will be injected.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public interface InternalFactory<T> {
+
+ /**
+ * ES:
+ * An factory that returns a pre created instance.
+ */
+ public static class Instance<T> implements InternalFactory<T> {
+
+ private final T object;
+
+ public Instance(T object) {
+ this.object = object;
+ }
+
+ @Override
+ public T get(Errors errors, InternalContext context, Dependency<?> dependency) throws ErrorsException {
+ return object;
+ }
+
+ @Override
+ public String toString() {
+ return object.toString();
+ }
+ }
+
+ /**
+ * Creates an object to be injected.
+ *
+ * @param context of this injection
+ * @return instance to be injected
+ * @throws org.elasticsearch.common.inject.internal.ErrorsException
+ * if a value cannot be provided
+ */
+ T get(Errors errors, InternalContext context, Dependency<?> dependency)
+ throws ErrorsException;
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/Join.java b/src/main/java/org/elasticsearch/common/inject/internal/Join.java
new file mode 100644
index 0000000..fd8348d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/Join.java
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.Lists;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Utility for joining pieces of text separated by a delimiter. It can handle
+ * iterators, collections, arrays, and varargs, and can append to any
+ * {@link Appendable} or just return a {@link String}. For example,
+ * {@code join(":", "a", "b", "c")} returns {@code "a:b:c"}.
+ * <p/>
+ * <p>All methods of this class throw {@link NullPointerException} when a value
+ * of {@code null} is supplied for any parameter. The elements within the
+ * collection, iterator, array, or varargs parameter list <i>may</i> be null --
+ * these will be represented in the output by the string {@code "null"}.
+ *
+ * @author Kevin Bourrillion
+ */
+public final class Join {
+ private Join() {
+ }
+
+ /**
+ * Returns a string containing the {@code tokens}, converted to strings if
+ * necessary, separated by {@code delimiter}. If {@code tokens} is empty, it
+ * returns an empty string.
+ * <p/>
+ * <p>Each token will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param delimiter a string to append between every element, but not at the
+ * beginning or end
+ * @param tokens objects to append
+ * @return a string consisting of the joined elements
+ */
+ public static String join(String delimiter, Iterable<?> tokens) {
+ return join(delimiter, tokens.iterator());
+ }
+
+ /**
+ * Returns a string containing the {@code tokens}, converted to strings if
+ * necessary, separated by {@code delimiter}. If {@code tokens} is empty, it
+ * returns an empty string.
+ * <p/>
+ * <p>Each token will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param delimiter a string to append between every element, but not at the
+ * beginning or end
+ * @param tokens objects to append
+ * @return a string consisting of the joined elements
+ */
+ public static String join(String delimiter, Object[] tokens) {
+ return join(delimiter, Arrays.asList(tokens));
+ }
+
+ /**
+ * Returns a string containing the {@code tokens}, converted to strings if
+ * necessary, separated by {@code delimiter}.
+ * <p/>
+ * <p>Each token will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param delimiter a string to append between every element, but not at the
+ * beginning or end
+ * @param firstToken the first object to append
+ * @param otherTokens subsequent objects to append
+ * @return a string consisting of the joined elements
+ */
+ public static String join(
+ String delimiter, @Nullable Object firstToken, Object... otherTokens) {
+ checkNotNull(otherTokens);
+ return join(delimiter, Lists.newArrayList(firstToken, otherTokens));
+ }
+
+ /**
+ * Returns a string containing the {@code tokens}, converted to strings if
+ * necessary, separated by {@code delimiter}. If {@code tokens} is empty, it
+ * returns an empty string.
+ * <p/>
+ * <p>Each token will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param delimiter a string to append between every element, but not at the
+ * beginning or end
+ * @param tokens objects to append
+ * @return a string consisting of the joined elements
+ */
+ public static String join(String delimiter, Iterator<?> tokens) {
+ StringBuilder sb = new StringBuilder();
+ join(sb, delimiter, tokens);
+ return sb.toString();
+ }
+
+ /**
+ * Returns a string containing the contents of {@code map}, with entries
+ * separated by {@code entryDelimiter}, and keys and values separated with
+ * {@code keyValueSeparator}.
+ * <p/>
+ * <p>Each key and value will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param keyValueSeparator a string to append between every key and its
+ * associated value
+ * @param entryDelimiter a string to append between every entry, but not at
+ * the beginning or end
+ * @param map the map containing the data to join
+ * @return a string consisting of the joined entries of the map; empty if the
+ * map is empty
+ */
+ public static String join(
+ String keyValueSeparator, String entryDelimiter, Map<?, ?> map) {
+ return join(new StringBuilder(), keyValueSeparator, entryDelimiter, map)
+ .toString();
+ }
+
+ /**
+ * Appends each of the {@code tokens} to {@code appendable}, separated by
+ * {@code delimiter}.
+ * <p/>
+ * <p>Each token will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param appendable the object to append the results to
+ * @param delimiter a string to append between every element, but not at the
+ * beginning or end
+ * @param tokens objects to append
+ * @return the same {@code Appendable} instance that was passed in
+ * @throws JoinException if an {@link IOException} occurs
+ */
+ public static <T extends Appendable> T join(
+ T appendable, String delimiter, Iterable<?> tokens) {
+ return join(appendable, delimiter, tokens.iterator());
+ }
+
+ /**
+ * Appends each of the {@code tokens} to {@code appendable}, separated by
+ * {@code delimiter}.
+ * <p/>
+ * <p>Each token will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param appendable the object to append the results to
+ * @param delimiter a string to append between every element, but not at the
+ * beginning or end
+ * @param tokens objects to append
+ * @return the same {@code Appendable} instance that was passed in
+ * @throws JoinException if an {@link IOException} occurs
+ */
+ public static <T extends Appendable> T join(
+ T appendable, String delimiter, Object[] tokens) {
+ return join(appendable, delimiter, Arrays.asList(tokens));
+ }
+
+ /**
+ * Appends each of the {@code tokens} to {@code appendable}, separated by
+ * {@code delimiter}.
+ * <p/>
+ * <p>Each token will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param appendable the object to append the results to
+ * @param delimiter a string to append between every element, but not at the
+ * beginning or end
+ * @param firstToken the first object to append
+ * @param otherTokens subsequent objects to append
+ * @return the same {@code Appendable} instance that was passed in
+ * @throws JoinException if an {@link IOException} occurs
+ */
+ public static <T extends Appendable> T join(T appendable, String delimiter,
+ @Nullable Object firstToken, Object... otherTokens) {
+ checkNotNull(otherTokens);
+ return join(appendable, delimiter, Lists.newArrayList(firstToken, otherTokens));
+ }
+
+ /**
+ * Appends each of the {@code tokens} to {@code appendable}, separated by
+ * {@code delimiter}.
+ * <p/>
+ * <p>Each token will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param appendable the object to append the results to
+ * @param delimiter a string to append between every element, but not at the
+ * beginning or end
+ * @param tokens objects to append
+ * @return the same {@code Appendable} instance that was passed in
+ * @throws JoinException if an {@link IOException} occurs
+ */
+ public static <T extends Appendable> T join(
+ T appendable, String delimiter, Iterator<?> tokens) {
+
+ /* This method is the workhorse of the class */
+
+ checkNotNull(appendable);
+ checkNotNull(delimiter);
+ if (tokens.hasNext()) {
+ try {
+ appendOneToken(appendable, tokens.next());
+ while (tokens.hasNext()) {
+ appendable.append(delimiter);
+ appendOneToken(appendable, tokens.next());
+ }
+ } catch (IOException e) {
+ throw new JoinException(e);
+ }
+ }
+ return appendable;
+ }
+
+ /**
+ * Appends the contents of {@code map} to {@code appendable}, with entries
+ * separated by {@code entryDelimiter}, and keys and values separated with
+ * {@code keyValueSeparator}.
+ * <p/>
+ * <p>Each key and value will be converted to a {@link CharSequence} using
+ * {@link String#valueOf(Object)}, if it isn't a {@link CharSequence} already.
+ * Note that this implies that null tokens will be appended as the
+ * four-character string {@code "null"}.
+ *
+ * @param appendable the object to append the results to
+ * @param keyValueSeparator a string to append between every key and its
+ * associated value
+ * @param entryDelimiter a string to append between every entry, but not at
+ * the beginning or end
+ * @param map the map containing the data to join
+ * @return the same {@code Appendable} instance that was passed in
+ */
+ public static <T extends Appendable> T join(T appendable,
+ String keyValueSeparator, String entryDelimiter, Map<?, ?> map) {
+ checkNotNull(appendable);
+ checkNotNull(keyValueSeparator);
+ checkNotNull(entryDelimiter);
+ Iterator<? extends Map.Entry<?, ?>> entries = map.entrySet().iterator();
+ if (entries.hasNext()) {
+ try {
+ appendOneEntry(appendable, keyValueSeparator, entries.next());
+ while (entries.hasNext()) {
+ appendable.append(entryDelimiter);
+ appendOneEntry(appendable, keyValueSeparator, entries.next());
+ }
+ } catch (IOException e) {
+ throw new JoinException(e);
+ }
+ }
+ return appendable;
+ }
+
+ private static void appendOneEntry(
+ Appendable appendable, String keyValueSeparator, Map.Entry<?, ?> entry)
+ throws IOException {
+ appendOneToken(appendable, entry.getKey());
+ appendable.append(keyValueSeparator);
+ appendOneToken(appendable, entry.getValue());
+ }
+
+ private static void appendOneToken(Appendable appendable, Object token)
+ throws IOException {
+ appendable.append(toCharSequence(token));
+ }
+
+ private static CharSequence toCharSequence(Object token) {
+ return (token instanceof CharSequence)
+ ? (CharSequence) token
+ : String.valueOf(token);
+ }
+
+ /**
+ * Exception thrown in response to an {@link IOException} from the supplied
+ * {@link Appendable}. This is used because most callers won't want to
+ * worry about catching an IOException.
+ */
+ public static class JoinException extends RuntimeException {
+ private JoinException(IOException cause) {
+ super(cause);
+ }
+
+ private static final long serialVersionUID = 1L;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java b/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java
new file mode 100644
index 0000000..c4b8e49
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.spi.BindingTargetVisitor;
+import org.elasticsearch.common.inject.spi.LinkedKeyBinding;
+
+public final class LinkedBindingImpl<T> extends BindingImpl<T> implements LinkedKeyBinding<T> {
+
+ final Key<? extends T> targetKey;
+
+ public LinkedBindingImpl(Injector injector, Key<T> key, Object source,
+ InternalFactory<? extends T> internalFactory, Scoping scoping,
+ Key<? extends T> targetKey) {
+ super(injector, key, source, internalFactory, scoping);
+ this.targetKey = targetKey;
+ }
+
+ public LinkedBindingImpl(Object source, Key<T> key, Scoping scoping, Key<? extends T> targetKey) {
+ super(source, key, scoping);
+ this.targetKey = targetKey;
+ }
+
+ public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
+ return visitor.visit(this);
+ }
+
+ public Key<? extends T> getLinkedKey() {
+ return targetKey;
+ }
+
+ public BindingImpl<T> withScoping(Scoping scoping) {
+ return new LinkedBindingImpl<T>(getSource(), getKey(), scoping, targetKey);
+ }
+
+ public BindingImpl<T> withKey(Key<T> key) {
+ return new LinkedBindingImpl<T>(getSource(), key, getScoping(), targetKey);
+ }
+
+ public void applyTo(Binder binder) {
+ getScoping().applyTo(binder.withSource(getSource()).bind(getKey()).to(getLinkedKey()));
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(LinkedKeyBinding.class)
+ .add("key", getKey())
+ .add("source", getSource())
+ .add("scope", getScoping())
+ .add("target", targetKey)
+ .toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java b/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java
new file mode 100644
index 0000000..416dab2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.Provider;
+import org.elasticsearch.common.inject.spi.BindingTargetVisitor;
+import org.elasticsearch.common.inject.spi.ProviderKeyBinding;
+
+public final class LinkedProviderBindingImpl<T>
+ extends BindingImpl<T> implements ProviderKeyBinding<T> {
+
+ final Key<? extends Provider<? extends T>> providerKey;
+
+ public LinkedProviderBindingImpl(Injector injector, Key<T> key, Object source,
+ InternalFactory<? extends T> internalFactory, Scoping scoping,
+ Key<? extends Provider<? extends T>> providerKey) {
+ super(injector, key, source, internalFactory, scoping);
+ this.providerKey = providerKey;
+ }
+
+ LinkedProviderBindingImpl(Object source, Key<T> key, Scoping scoping,
+ Key<? extends Provider<? extends T>> providerKey) {
+ super(source, key, scoping);
+ this.providerKey = providerKey;
+ }
+
+ public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
+ return visitor.visit(this);
+ }
+
+ public Key<? extends Provider<? extends T>> getProviderKey() {
+ return providerKey;
+ }
+
+ public BindingImpl<T> withScoping(Scoping scoping) {
+ return new LinkedProviderBindingImpl<T>(getSource(), getKey(), scoping, providerKey);
+ }
+
+ public BindingImpl<T> withKey(Key<T> key) {
+ return new LinkedProviderBindingImpl<T>(getSource(), key, getScoping(), providerKey);
+ }
+
+ public void applyTo(Binder binder) {
+ getScoping().applyTo(binder.withSource(getSource())
+ .bind(getKey()).toProvider(getProviderKey()));
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(ProviderKeyBinding.class)
+ .add("key", getKey())
+ .add("source", getSource())
+ .add("scope", getScoping())
+ .add("provider", providerKey)
+ .toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/MatcherAndConverter.java b/src/main/java/org/elasticsearch/common/inject/internal/MatcherAndConverter.java
new file mode 100644
index 0000000..7ee9313
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/MatcherAndConverter.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.TypeLiteral;
+import org.elasticsearch.common.inject.matcher.Matcher;
+import org.elasticsearch.common.inject.spi.TypeConverter;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * @author crazybob@google.com (Bob Lee)
+ */
+public final class MatcherAndConverter {
+
+ private final Matcher<? super TypeLiteral<?>> typeMatcher;
+ private final TypeConverter typeConverter;
+ private final Object source;
+
+ public MatcherAndConverter(Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeConverter typeConverter, Object source) {
+ this.typeMatcher = checkNotNull(typeMatcher, "type matcher");
+ this.typeConverter = checkNotNull(typeConverter, "converter");
+ this.source = source;
+ }
+
+ public TypeConverter getTypeConverter() {
+ return typeConverter;
+ }
+
+ public Matcher<? super TypeLiteral<?>> getTypeMatcher() {
+ return typeMatcher;
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ @Override
+ public String toString() {
+ return typeConverter + " which matches " + typeMatcher
+ + " (bound at " + source + ")";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/MoreTypes.java b/src/main/java/org/elasticsearch/common/inject/internal/MoreTypes.java
new file mode 100644
index 0000000..6b283e1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/MoreTypes.java
@@ -0,0 +1,667 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.ConfigurationException;
+import org.elasticsearch.common.inject.TypeLiteral;
+import org.elasticsearch.common.inject.spi.Message;
+
+import java.io.Serializable;
+import java.lang.reflect.*;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.NoSuchElementException;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Static methods for working with types that we aren't publishing in the
+ * public {@code Types} API.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public class MoreTypes {
+
+ public static final Type[] EMPTY_TYPE_ARRAY = new Type[]{};
+
+ private MoreTypes() {
+ }
+
+ private static final Map<TypeLiteral<?>, TypeLiteral<?>> PRIMITIVE_TO_WRAPPER
+ = new ImmutableMap.Builder<TypeLiteral<?>, TypeLiteral<?>>()
+ .put(TypeLiteral.get(boolean.class), TypeLiteral.get(Boolean.class))
+ .put(TypeLiteral.get(byte.class), TypeLiteral.get(Byte.class))
+ .put(TypeLiteral.get(short.class), TypeLiteral.get(Short.class))
+ .put(TypeLiteral.get(int.class), TypeLiteral.get(Integer.class))
+ .put(TypeLiteral.get(long.class), TypeLiteral.get(Long.class))
+ .put(TypeLiteral.get(float.class), TypeLiteral.get(Float.class))
+ .put(TypeLiteral.get(double.class), TypeLiteral.get(Double.class))
+ .put(TypeLiteral.get(char.class), TypeLiteral.get(Character.class))
+ .put(TypeLiteral.get(void.class), TypeLiteral.get(Void.class))
+ .build();
+
+ /**
+ * Returns an equivalent type that's safe for use in a key. The returned type will be free of
+ * primitive types. Type literals of primitives will return the corresponding wrapper types.
+ *
+ * @throws ConfigurationException if {@code type} contains a type variable
+ */
+ public static <T> TypeLiteral<T> makeKeySafe(TypeLiteral<T> type) {
+ if (!isFullySpecified(type.getType())) {
+ String message = type + " cannot be used as a key; It is not fully specified.";
+ throw new ConfigurationException(ImmutableSet.of(new Message(message)));
+ }
+
+ @SuppressWarnings("unchecked")
+ TypeLiteral<T> wrappedPrimitives = (TypeLiteral<T>) PRIMITIVE_TO_WRAPPER.get(type);
+ return wrappedPrimitives != null
+ ? wrappedPrimitives
+ : type;
+ }
+
+ /**
+ * Returns true if {@code type} is free from type variables.
+ */
+ private static boolean isFullySpecified(Type type) {
+ if (type instanceof Class) {
+ return true;
+
+ } else if (type instanceof CompositeType) {
+ return ((CompositeType) type).isFullySpecified();
+
+ } else if (type instanceof TypeVariable) {
+ return false;
+
+ } else {
+ return ((CompositeType) canonicalize(type)).isFullySpecified();
+ }
+ }
+
+ /**
+ * Returns a type that is functionally equal but not necessarily equal
+ * according to {@link Object#equals(Object) Object.equals()}. The returned
+ * type is {@link Serializable}.
+ */
+ public static Type canonicalize(Type type) {
+ if (type instanceof ParameterizedTypeImpl
+ || type instanceof GenericArrayTypeImpl
+ || type instanceof WildcardTypeImpl) {
+ return type;
+
+ } else if (type instanceof ParameterizedType) {
+ ParameterizedType p = (ParameterizedType) type;
+ return new ParameterizedTypeImpl(p.getOwnerType(),
+ p.getRawType(), p.getActualTypeArguments());
+
+ } else if (type instanceof GenericArrayType) {
+ GenericArrayType g = (GenericArrayType) type;
+ return new GenericArrayTypeImpl(g.getGenericComponentType());
+
+ } else if (type instanceof Class && ((Class<?>) type).isArray()) {
+ Class<?> c = (Class<?>) type;
+ return new GenericArrayTypeImpl(c.getComponentType());
+
+ } else if (type instanceof WildcardType) {
+ WildcardType w = (WildcardType) type;
+ return new WildcardTypeImpl(w.getUpperBounds(), w.getLowerBounds());
+
+ } else {
+ // type is either serializable as-is or unsupported
+ return type;
+ }
+ }
+
+ /**
+ * Returns a type that's functionally equal but not necessarily equal
+ * according to {@link Object#equals(Object) Object.equals}. The returned
+ * member is {@link Serializable}.
+ */
+ public static Member serializableCopy(Member member) {
+ return member instanceof MemberImpl
+ ? member
+ : new MemberImpl(member);
+ }
+
+ public static Class<?> getRawType(Type type) {
+ if (type instanceof Class<?>) {
+ // type is a normal class.
+ return (Class<?>) type;
+
+ } else if (type instanceof ParameterizedType) {
+ ParameterizedType parameterizedType = (ParameterizedType) type;
+
+ // I'm not exactly sure why getRawType() returns Type instead of Class.
+ // Neal isn't either but suspects some pathological case related
+ // to nested classes exists.
+ Type rawType = parameterizedType.getRawType();
+ checkArgument(rawType instanceof Class,
+ "Expected a Class, but <%s> is of type %s", type, type.getClass().getName());
+ return (Class<?>) rawType;
+
+ } else if (type instanceof GenericArrayType) {
+ // TODO: Is this sufficient?
+ return Object[].class;
+
+ } else if (type instanceof TypeVariable) {
+ // we could use the variable's bounds, but that'll won't work if there are multiple.
+ // having a raw type that's more general than necessary is okay
+ return Object.class;
+
+ } else {
+ throw new IllegalArgumentException("Expected a Class, ParameterizedType, or "
+ + "GenericArrayType, but <" + type + "> is of type " + type.getClass().getName());
+ }
+ }
+
+ /**
+ * Returns true if {@code a} and {@code b} are equal.
+ */
+ public static boolean equals(Type a, Type b) {
+ if (a == b) {
+ // also handles (a == null && b == null)
+ return true;
+
+ } else if (a instanceof Class) {
+ // Class already specifies equals().
+ return a.equals(b);
+
+ } else if (a instanceof ParameterizedType) {
+ if (!(b instanceof ParameterizedType)) {
+ return false;
+ }
+
+ // TODO: save a .clone() call
+ ParameterizedType pa = (ParameterizedType) a;
+ ParameterizedType pb = (ParameterizedType) b;
+ return Objects.equal(pa.getOwnerType(), pb.getOwnerType())
+ && pa.getRawType().equals(pb.getRawType())
+ && Arrays.equals(pa.getActualTypeArguments(), pb.getActualTypeArguments());
+
+ } else if (a instanceof GenericArrayType) {
+ if (!(b instanceof GenericArrayType)) {
+ return false;
+ }
+
+ GenericArrayType ga = (GenericArrayType) a;
+ GenericArrayType gb = (GenericArrayType) b;
+ return equals(ga.getGenericComponentType(), gb.getGenericComponentType());
+
+ } else if (a instanceof WildcardType) {
+ if (!(b instanceof WildcardType)) {
+ return false;
+ }
+
+ WildcardType wa = (WildcardType) a;
+ WildcardType wb = (WildcardType) b;
+ return Arrays.equals(wa.getUpperBounds(), wb.getUpperBounds())
+ && Arrays.equals(wa.getLowerBounds(), wb.getLowerBounds());
+
+ } else if (a instanceof TypeVariable) {
+ if (!(b instanceof TypeVariable)) {
+ return false;
+ }
+ TypeVariable<?> va = (TypeVariable) a;
+ TypeVariable<?> vb = (TypeVariable) b;
+ return va.getGenericDeclaration() == vb.getGenericDeclaration()
+ && va.getName().equals(vb.getName());
+
+ } else {
+ // This isn't a type we support. Could be a generic array type, wildcard type, etc.
+ return false;
+ }
+ }
+
+ /**
+ * Returns the hashCode of {@code type}.
+ */
+ public static int hashCode(Type type) {
+ if (type instanceof Class) {
+ // Class specifies hashCode().
+ return type.hashCode();
+
+ } else if (type instanceof ParameterizedType) {
+ ParameterizedType p = (ParameterizedType) type;
+ return Arrays.hashCode(p.getActualTypeArguments())
+ ^ p.getRawType().hashCode()
+ ^ hashCodeOrZero(p.getOwnerType());
+
+ } else if (type instanceof GenericArrayType) {
+ return hashCode(((GenericArrayType) type).getGenericComponentType());
+
+ } else if (type instanceof WildcardType) {
+ WildcardType w = (WildcardType) type;
+ return Arrays.hashCode(w.getLowerBounds()) ^ Arrays.hashCode(w.getUpperBounds());
+
+ } else {
+ // This isn't a type we support. Probably a type variable
+ return hashCodeOrZero(type);
+ }
+ }
+
+ private static int hashCodeOrZero(Object o) {
+ return o != null ? o.hashCode() : 0;
+ }
+
+ public static String toString(Type type) {
+ if (type instanceof Class<?>) {
+ return ((Class) type).getName();
+
+ } else if (type instanceof ParameterizedType) {
+ ParameterizedType parameterizedType = (ParameterizedType) type;
+ Type[] arguments = parameterizedType.getActualTypeArguments();
+ Type ownerType = parameterizedType.getOwnerType();
+ StringBuilder stringBuilder = new StringBuilder();
+ if (ownerType != null) {
+ stringBuilder.append(toString(ownerType)).append(".");
+ }
+ stringBuilder.append(toString(parameterizedType.getRawType()));
+ if (arguments.length > 0) {
+ stringBuilder
+ .append("<")
+ .append(toString(arguments[0]));
+ for (int i = 1; i < arguments.length; i++) {
+ stringBuilder.append(", ").append(toString(arguments[i]));
+ }
+ }
+ return stringBuilder.append(">").toString();
+
+ } else if (type instanceof GenericArrayType) {
+ return toString(((GenericArrayType) type).getGenericComponentType()) + "[]";
+
+ } else if (type instanceof WildcardType) {
+ WildcardType wildcardType = (WildcardType) type;
+ Type[] lowerBounds = wildcardType.getLowerBounds();
+ Type[] upperBounds = wildcardType.getUpperBounds();
+
+ if (upperBounds.length != 1 || lowerBounds.length > 1) {
+ throw new UnsupportedOperationException("Unsupported wildcard type " + type);
+ }
+
+ if (lowerBounds.length == 1) {
+ if (upperBounds[0] != Object.class) {
+ throw new UnsupportedOperationException("Unsupported wildcard type " + type);
+ }
+ return "? super " + toString(lowerBounds[0]);
+ } else if (upperBounds[0] == Object.class) {
+ return "?";
+ } else {
+ return "? extends " + toString(upperBounds[0]);
+ }
+
+ } else {
+ return type.toString();
+ }
+ }
+
+ /**
+ * Returns {@code Field.class}, {@code Method.class} or {@code Constructor.class}.
+ */
+ public static Class<? extends Member> memberType(Member member) {
+ checkNotNull(member, "member");
+
+ if (member instanceof MemberImpl) {
+ return ((MemberImpl) member).memberType;
+
+ } else if (member instanceof Field) {
+ return Field.class;
+
+ } else if (member instanceof Method) {
+ return Method.class;
+
+ } else if (member instanceof Constructor) {
+ return Constructor.class;
+
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported implementation class for Member, " + member.getClass());
+ }
+ }
+
+ /**
+ * Formats a member as concise string, such as {@code java.util.ArrayList.size},
+ * {@code java.util.ArrayList<init>()} or {@code java.util.List.remove()}.
+ */
+ public static String toString(Member member) {
+ Class<? extends Member> memberType = memberType(member);
+
+ if (memberType == Method.class) {
+ return member.getDeclaringClass().getName() + "." + member.getName() + "()";
+ } else if (memberType == Field.class) {
+ return member.getDeclaringClass().getName() + "." + member.getName();
+ } else if (memberType == Constructor.class) {
+ return member.getDeclaringClass().getName() + ".<init>()";
+ } else {
+ throw new AssertionError();
+ }
+ }
+
+ public static String memberKey(Member member) {
+ checkNotNull(member, "member");
+
+ return "<NO_MEMBER_KEY>";
+ }
+
+ /**
+ * Returns the generic supertype for {@code supertype}. For example, given a class {@code
+ * IntegerSet}, the result for when supertype is {@code Set.class} is {@code Set<Integer>} and the
+ * result when the supertype is {@code Collection.class} is {@code Collection<Integer>}.
+ */
+ public static Type getGenericSupertype(Type type, Class<?> rawType, Class<?> toResolve) {
+ if (toResolve == rawType) {
+ return type;
+ }
+
+ // we skip searching through interfaces if unknown is an interface
+ if (toResolve.isInterface()) {
+ Class[] interfaces = rawType.getInterfaces();
+ for (int i = 0, length = interfaces.length; i < length; i++) {
+ if (interfaces[i] == toResolve) {
+ return rawType.getGenericInterfaces()[i];
+ } else if (toResolve.isAssignableFrom(interfaces[i])) {
+ return getGenericSupertype(rawType.getGenericInterfaces()[i], interfaces[i], toResolve);
+ }
+ }
+ }
+
+ // check our supertypes
+ if (!rawType.isInterface()) {
+ while (rawType != Object.class) {
+ Class<?> rawSupertype = rawType.getSuperclass();
+ if (rawSupertype == toResolve) {
+ return rawType.getGenericSuperclass();
+ } else if (toResolve.isAssignableFrom(rawSupertype)) {
+ return getGenericSupertype(rawType.getGenericSuperclass(), rawSupertype, toResolve);
+ }
+ rawType = rawSupertype;
+ }
+ }
+
+ // we can't resolve this further
+ return toResolve;
+ }
+
+ public static Type resolveTypeVariable(Type type, Class<?> rawType, TypeVariable unknown) {
+ Class<?> declaredByRaw = declaringClassOf(unknown);
+
+ // we can't reduce this further
+ if (declaredByRaw == null) {
+ return unknown;
+ }
+
+ Type declaredBy = getGenericSupertype(type, rawType, declaredByRaw);
+ if (declaredBy instanceof ParameterizedType) {
+ int index = indexOf(declaredByRaw.getTypeParameters(), unknown);
+ return ((ParameterizedType) declaredBy).getActualTypeArguments()[index];
+ }
+
+ return unknown;
+ }
+
+ private static int indexOf(Object[] array, Object toFind) {
+ for (int i = 0; i < array.length; i++) {
+ if (toFind.equals(array[i])) {
+ return i;
+ }
+ }
+ throw new NoSuchElementException();
+ }
+
+ /**
+ * Returns the declaring class of {@code typeVariable}, or {@code null} if it was not declared by
+ * a class.
+ */
+ private static Class<?> declaringClassOf(TypeVariable typeVariable) {
+ GenericDeclaration genericDeclaration = typeVariable.getGenericDeclaration();
+ return genericDeclaration instanceof Class
+ ? (Class<?>) genericDeclaration
+ : null;
+ }
+
+ public static class ParameterizedTypeImpl
+ implements ParameterizedType, Serializable, CompositeType {
+ private final Type ownerType;
+ private final Type rawType;
+ private final Type[] typeArguments;
+
+ public ParameterizedTypeImpl(Type ownerType, Type rawType, Type... typeArguments) {
+ // require an owner type if the raw type needs it
+ if (rawType instanceof Class<?>) {
+ Class rawTypeAsClass = (Class) rawType;
+ checkArgument(ownerType != null || rawTypeAsClass.getEnclosingClass() == null,
+ "No owner type for enclosed %s", rawType);
+ checkArgument(ownerType == null || rawTypeAsClass.getEnclosingClass() != null,
+ "Owner type for unenclosed %s", rawType);
+ }
+
+ this.ownerType = ownerType == null ? null : canonicalize(ownerType);
+ this.rawType = canonicalize(rawType);
+ this.typeArguments = typeArguments.clone();
+ for (int t = 0; t < this.typeArguments.length; t++) {
+ checkNotNull(this.typeArguments[t], "type parameter");
+ checkNotPrimitive(this.typeArguments[t], "type parameters");
+ this.typeArguments[t] = canonicalize(this.typeArguments[t]);
+ }
+ }
+
+ public Type[] getActualTypeArguments() {
+ return typeArguments.clone();
+ }
+
+ public Type getRawType() {
+ return rawType;
+ }
+
+ public Type getOwnerType() {
+ return ownerType;
+ }
+
+ public boolean isFullySpecified() {
+ if (ownerType != null && !MoreTypes.isFullySpecified(ownerType)) {
+ return false;
+ }
+
+ if (!MoreTypes.isFullySpecified(rawType)) {
+ return false;
+ }
+
+ for (Type type : typeArguments) {
+ if (!MoreTypes.isFullySpecified(type)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof ParameterizedType
+ && MoreTypes.equals(this, (ParameterizedType) other);
+ }
+
+ @Override
+ public int hashCode() {
+ return MoreTypes.hashCode(this);
+ }
+
+ @Override
+ public String toString() {
+ return MoreTypes.toString(this);
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ public static class GenericArrayTypeImpl
+ implements GenericArrayType, Serializable, CompositeType {
+ private final Type componentType;
+
+ public GenericArrayTypeImpl(Type componentType) {
+ this.componentType = canonicalize(componentType);
+ }
+
+ public Type getGenericComponentType() {
+ return componentType;
+ }
+
+ public boolean isFullySpecified() {
+ return MoreTypes.isFullySpecified(componentType);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof GenericArrayType
+ && MoreTypes.equals(this, (GenericArrayType) o);
+ }
+
+ @Override
+ public int hashCode() {
+ return MoreTypes.hashCode(this);
+ }
+
+ @Override
+ public String toString() {
+ return MoreTypes.toString(this);
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ /**
+ * The WildcardType interface supports multiple upper bounds and multiple
+ * lower bounds. We only support what the Java 6 language needs - at most one
+ * bound. If a lower bound is set, the upper bound must be Object.class.
+ */
+ public static class WildcardTypeImpl implements WildcardType, Serializable, CompositeType {
+ private final Type upperBound;
+ private final Type lowerBound;
+
+ public WildcardTypeImpl(Type[] upperBounds, Type[] lowerBounds) {
+ checkArgument(lowerBounds.length <= 1, "Must have at most one lower bound.");
+ checkArgument(upperBounds.length == 1, "Must have exactly one upper bound.");
+
+ if (lowerBounds.length == 1) {
+ checkNotNull(lowerBounds[0], "lowerBound");
+ checkNotPrimitive(lowerBounds[0], "wildcard bounds");
+ checkArgument(upperBounds[0] == Object.class, "bounded both ways");
+ this.lowerBound = canonicalize(lowerBounds[0]);
+ this.upperBound = Object.class;
+
+ } else {
+ checkNotNull(upperBounds[0], "upperBound");
+ checkNotPrimitive(upperBounds[0], "wildcard bounds");
+ this.lowerBound = null;
+ this.upperBound = canonicalize(upperBounds[0]);
+ }
+ }
+
+ public Type[] getUpperBounds() {
+ return new Type[]{upperBound};
+ }
+
+ public Type[] getLowerBounds() {
+ return lowerBound != null ? new Type[]{lowerBound} : EMPTY_TYPE_ARRAY;
+ }
+
+ public boolean isFullySpecified() {
+ return MoreTypes.isFullySpecified(upperBound)
+ && (lowerBound == null || MoreTypes.isFullySpecified(lowerBound));
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof WildcardType
+ && MoreTypes.equals(this, (WildcardType) other);
+ }
+
+ @Override
+ public int hashCode() {
+ return MoreTypes.hashCode(this);
+ }
+
+ @Override
+ public String toString() {
+ return MoreTypes.toString(this);
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ private static void checkNotPrimitive(Type type, String use) {
+ checkArgument(!(type instanceof Class<?>) || !((Class) type).isPrimitive(),
+ "Primitive types are not allowed in %s: %s", use, type);
+ }
+
+ /**
+ * We cannot serialize the built-in Java member classes, which prevents us from using Members in
+ * our exception types. We workaround this with this serializable implementation. It includes all
+ * of the API methods, plus everything we use for line numbers and messaging.
+ */
+ public static class MemberImpl implements Member, Serializable {
+ private final Class<?> declaringClass;
+ private final String name;
+ private final int modifiers;
+ private final boolean synthetic;
+ private final Class<? extends Member> memberType;
+ private final String memberKey;
+
+ private MemberImpl(Member member) {
+ this.declaringClass = member.getDeclaringClass();
+ this.name = member.getName();
+ this.modifiers = member.getModifiers();
+ this.synthetic = member.isSynthetic();
+ this.memberType = memberType(member);
+ this.memberKey = memberKey(member);
+ }
+
+ public Class getDeclaringClass() {
+ return declaringClass;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public int getModifiers() {
+ return modifiers;
+ }
+
+ public boolean isSynthetic() {
+ return synthetic;
+ }
+
+ @Override
+ public String toString() {
+ return MoreTypes.toString(this);
+ }
+ }
+
+ /**
+ * A type formed from other types, such as arrays, parameterized types or wildcard types
+ */
+ private interface CompositeType {
+ /**
+ * Returns true if there are no type variables in this type.
+ */
+ boolean isFullySpecified();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/NullOutputException.java b/src/main/java/org/elasticsearch/common/inject/internal/NullOutputException.java
new file mode 100644
index 0000000..cc5a23a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/NullOutputException.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+/**
+ * Thrown when a computer function returns null. This subclass exists so
+ * that our ReferenceCache adapter can differentiate null output from null
+ * keys, but we don't want to make this public otherwise.
+ *
+ * @author Bob Lee
+ */
+class NullOutputException extends NullPointerException {
+ public NullOutputException(String s) {
+ super(s);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/Nullability.java b/src/main/java/org/elasticsearch/common/inject/internal/Nullability.java
new file mode 100644
index 0000000..621ff4d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/Nullability.java
@@ -0,0 +1,32 @@
+package org.elasticsearch.common.inject.internal;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * Whether a member supports null values injected.
+ * <p/>
+ * <p>Support for {@code Nullable} annotations in Guice is loose.
+ * Any annotation type whose simplename is "Nullable" is sufficient to indicate
+ * support for null values injected.
+ * <p/>
+ * <p>This allows support for JSR-305's
+ * <a href="http://groups.google.com/group/jsr-305/web/proposed-annotations">
+ * javax.annotation.meta.Nullable</a> annotation and IntelliJ IDEA's
+ * <a href="http://www.jetbrains.com/idea/documentation/howto.html">
+ * org.jetbrains.annotations.Nullable</a>.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public class Nullability {
+ private Nullability() {
+ }
+
+ public static boolean allowsNull(Annotation[] annotations) {
+ for (Annotation a : annotations) {
+ if ("Nullable".equals(a.annotationType().getSimpleName())) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java b/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java
new file mode 100644
index 0000000..ec48aed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import java.lang.annotation.*;
+
+/**
+ * The presence of this annotation on a method parameter indicates that
+ * {@code null} is an acceptable value for that parameter. It should not be
+ * used for parameters of primitive types.
+ * <p/>
+ * <p>This annotation may be used with the Google Web Toolkit (GWT).
+ *
+ * @author Kevin Bourrillion
+ */
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.PARAMETER, ElementType.FIELD})
+public @interface Nullable {
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java b/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java
new file mode 100644
index 0000000..39bdff1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java
@@ -0,0 +1,142 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.PrivateBinder;
+import org.elasticsearch.common.inject.spi.Element;
+import org.elasticsearch.common.inject.spi.ElementVisitor;
+import org.elasticsearch.common.inject.spi.PrivateElements;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.base.Preconditions.*;
+
+/**
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public final class PrivateElementsImpl implements PrivateElements {
+
+ /*
+ * This class acts as both a value object and as a builder. When getElements() is called, an
+ * immutable collection of elements is constructed and the original mutable list is nulled out.
+ * Similarly, the exposed keys are made immutable on access.
+ */
+
+ private final Object source;
+
+ private List<Element> elementsMutable = Lists.newArrayList();
+ private List<ExposureBuilder<?>> exposureBuilders = Lists.newArrayList();
+
+ /**
+ * lazily instantiated
+ */
+ private ImmutableList<Element> elements;
+
+ /**
+ * lazily instantiated
+ */
+ private ImmutableMap<Key<?>, Object> exposedKeysToSources;
+ private Injector injector;
+
+ public PrivateElementsImpl(Object source) {
+ this.source = checkNotNull(source, "source");
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ public List<Element> getElements() {
+ if (elements == null) {
+ elements = ImmutableList.copyOf(elementsMutable);
+ elementsMutable = null;
+ }
+
+ return elements;
+ }
+
+ public Injector getInjector() {
+ return injector;
+ }
+
+ public void initInjector(Injector injector) {
+ checkState(this.injector == null, "injector already initialized");
+ this.injector = checkNotNull(injector, "injector");
+ }
+
+ public Set<Key<?>> getExposedKeys() {
+ if (exposedKeysToSources == null) {
+ Map<Key<?>, Object> exposedKeysToSourcesMutable = Maps.newLinkedHashMap();
+ for (ExposureBuilder<?> exposureBuilder : exposureBuilders) {
+ exposedKeysToSourcesMutable.put(exposureBuilder.getKey(), exposureBuilder.getSource());
+ }
+ exposedKeysToSources = ImmutableMap.copyOf(exposedKeysToSourcesMutable);
+ exposureBuilders = null;
+ }
+
+ return exposedKeysToSources.keySet();
+ }
+
+ public <T> T acceptVisitor(ElementVisitor<T> visitor) {
+ return visitor.visit(this);
+ }
+
+ public List<Element> getElementsMutable() {
+ return elementsMutable;
+ }
+
+ public void addExposureBuilder(ExposureBuilder<?> exposureBuilder) {
+ exposureBuilders.add(exposureBuilder);
+ }
+
+ public void applyTo(Binder binder) {
+ PrivateBinder privateBinder = binder.withSource(source).newPrivateBinder();
+
+ for (Element element : getElements()) {
+ element.applyTo(privateBinder);
+ }
+
+ getExposedKeys(); // ensure exposedKeysToSources is populated
+ for (Map.Entry<Key<?>, Object> entry : exposedKeysToSources.entrySet()) {
+ privateBinder.withSource(entry.getValue()).expose(entry.getKey());
+ }
+ }
+
+ public Object getExposedSource(Key<?> key) {
+ getExposedKeys(); // ensure exposedKeysToSources is populated
+ Object source = exposedKeysToSources.get(key);
+ checkArgument(source != null, "%s not exposed by %s.", key, this);
+ return source;
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(PrivateElements.class)
+ .add("exposedKeys", getExposedKeys())
+ .add("source", getSource())
+ .toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java b/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java
new file mode 100644
index 0000000..b384b79
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java
@@ -0,0 +1,92 @@
+/*
+Copyright (C) 2007 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.Provider;
+import org.elasticsearch.common.inject.spi.*;
+
+import java.util.Set;
+
+public final class ProviderInstanceBindingImpl<T> extends BindingImpl<T>
+ implements ProviderInstanceBinding<T> {
+
+ final Provider<? extends T> providerInstance;
+ final ImmutableSet<InjectionPoint> injectionPoints;
+
+ public ProviderInstanceBindingImpl(Injector injector, Key<T> key,
+ Object source, InternalFactory<? extends T> internalFactory, Scoping scoping,
+ Provider<? extends T> providerInstance,
+ Set<InjectionPoint> injectionPoints) {
+ super(injector, key, source, internalFactory, scoping);
+ this.providerInstance = providerInstance;
+ this.injectionPoints = ImmutableSet.copyOf(injectionPoints);
+ }
+
+ public ProviderInstanceBindingImpl(Object source, Key<T> key, Scoping scoping,
+ Set<InjectionPoint> injectionPoints, Provider<? extends T> providerInstance) {
+ super(source, key, scoping);
+ this.injectionPoints = ImmutableSet.copyOf(injectionPoints);
+ this.providerInstance = providerInstance;
+ }
+
+ public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
+ return visitor.visit(this);
+ }
+
+ public Provider<? extends T> getProviderInstance() {
+ return providerInstance;
+ }
+
+ public Set<InjectionPoint> getInjectionPoints() {
+ return injectionPoints;
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ return providerInstance instanceof HasDependencies
+ ? ImmutableSet.copyOf(((HasDependencies) providerInstance).getDependencies())
+ : Dependency.forInjectionPoints(injectionPoints);
+ }
+
+ public BindingImpl<T> withScoping(Scoping scoping) {
+ return new ProviderInstanceBindingImpl<T>(
+ getSource(), getKey(), scoping, injectionPoints, providerInstance);
+ }
+
+ public BindingImpl<T> withKey(Key<T> key) {
+ return new ProviderInstanceBindingImpl<T>(
+ getSource(), key, getScoping(), injectionPoints, providerInstance);
+ }
+
+ public void applyTo(Binder binder) {
+ getScoping().applyTo(
+ binder.withSource(getSource()).bind(getKey()).toProvider(getProviderInstance()));
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(ProviderInstanceBinding.class)
+ .add("key", getKey())
+ .add("source", getSource())
+ .add("scope", getScoping())
+ .add("provider", providerInstance)
+ .toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java b/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java
new file mode 100644
index 0000000..6872fa3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java
@@ -0,0 +1,111 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.ProviderWithDependencies;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * A provider that invokes a method and returns its result.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public class ProviderMethod<T> implements ProviderWithDependencies<T> {
+ private final Key<T> key;
+ private final Class<? extends Annotation> scopeAnnotation;
+ private final Object instance;
+ private final Method method;
+ private final ImmutableSet<Dependency<?>> dependencies;
+ private final List<Provider<?>> parameterProviders;
+ private final boolean exposed;
+
+ /**
+ * @param method the method to invoke. Its return type must be the same type as {@code key}.
+ */
+ ProviderMethod(Key<T> key, Method method, Object instance,
+ ImmutableSet<Dependency<?>> dependencies, List<Provider<?>> parameterProviders,
+ Class<? extends Annotation> scopeAnnotation) {
+ this.key = key;
+ this.scopeAnnotation = scopeAnnotation;
+ this.instance = instance;
+ this.dependencies = dependencies;
+ this.method = method;
+ this.parameterProviders = parameterProviders;
+ this.exposed = method.getAnnotation(Exposed.class) != null;
+
+ method.setAccessible(true);
+ }
+
+ public Key<T> getKey() {
+ return key;
+ }
+
+ public Method getMethod() {
+ return method;
+ }
+
+ // exposed for GIN
+ public Object getInstance() {
+ return instance;
+ }
+
+ public void configure(Binder binder) {
+ binder = binder.withSource(method);
+
+ if (scopeAnnotation != null) {
+ binder.bind(key).toProvider(this).in(scopeAnnotation);
+ } else {
+ binder.bind(key).toProvider(this);
+ }
+
+ if (exposed) {
+ // the cast is safe 'cause the only binder we have implements PrivateBinder. If there's a
+ // misplaced @Exposed, calling this will add an error to the binder's error queue
+ ((PrivateBinder) binder).expose(key);
+ }
+ }
+
+ public T get() {
+ Object[] parameters = new Object[parameterProviders.size()];
+ for (int i = 0; i < parameters.length; i++) {
+ parameters[i] = parameterProviders.get(i).get();
+ }
+
+ try {
+ // We know this cast is safe becase T is the method's return type.
+ @SuppressWarnings({"unchecked", "UnnecessaryLocalVariable"})
+ T result = (T) method.invoke(instance, parameters);
+ return result;
+ } catch (IllegalAccessException e) {
+ throw new AssertionError(e);
+ } catch (InvocationTargetException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ return dependencies;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java b/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java
new file mode 100644
index 0000000..6c29361
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java
@@ -0,0 +1,132 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.Message;
+import org.elasticsearch.common.inject.util.Modules;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Member;
+import java.lang.reflect.Method;
+import java.util.List;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Creates bindings to methods annotated with {@literal @}{@link Provides}. Use the scope and
+ * binding annotations on the provider method to configure the binding.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public final class ProviderMethodsModule implements Module {
+ private final Object delegate;
+ private final TypeLiteral<?> typeLiteral;
+
+ private ProviderMethodsModule(Object delegate) {
+ this.delegate = checkNotNull(delegate, "delegate");
+ this.typeLiteral = TypeLiteral.get(this.delegate.getClass());
+ }
+
+ /**
+ * Returns a module which creates bindings for provider methods from the given module.
+ */
+ public static Module forModule(Module module) {
+ return forObject(module);
+ }
+
+ /**
+ * Returns a module which creates bindings for provider methods from the given object.
+ * This is useful notably for <a href="http://code.google.com/p/google-gin/">GIN</a>
+ */
+ public static Module forObject(Object object) {
+ // avoid infinite recursion, since installing a module always installs itself
+ if (object instanceof ProviderMethodsModule) {
+ return Modules.EMPTY_MODULE;
+ }
+
+ return new ProviderMethodsModule(object);
+ }
+
+ public synchronized void configure(Binder binder) {
+ for (ProviderMethod<?> providerMethod : getProviderMethods(binder)) {
+ providerMethod.configure(binder);
+ }
+ }
+
+ public List<ProviderMethod<?>> getProviderMethods(Binder binder) {
+ List<ProviderMethod<?>> result = Lists.newArrayList();
+ for (Class<?> c = delegate.getClass(); c != Object.class; c = c.getSuperclass()) {
+ for (Method method : c.getDeclaredMethods()) {
+ if (method.getAnnotation(Provides.class) != null) {
+ result.add(createProviderMethod(binder, method));
+ }
+ }
+ }
+ return result;
+ }
+
+ <T> ProviderMethod<T> createProviderMethod(Binder binder, final Method method) {
+ binder = binder.withSource(method);
+ Errors errors = new Errors(method);
+
+ // prepare the parameter providers
+ List<Dependency<?>> dependencies = Lists.newArrayList();
+ List<Provider<?>> parameterProviders = Lists.newArrayList();
+ List<TypeLiteral<?>> parameterTypes = typeLiteral.getParameterTypes(method);
+ Annotation[][] parameterAnnotations = method.getParameterAnnotations();
+ for (int i = 0; i < parameterTypes.size(); i++) {
+ Key<?> key = getKey(errors, parameterTypes.get(i), method, parameterAnnotations[i]);
+ dependencies.add(Dependency.get(key));
+ parameterProviders.add(binder.getProvider(key));
+ }
+
+ @SuppressWarnings("unchecked") // Define T as the method's return type.
+ TypeLiteral<T> returnType = (TypeLiteral<T>) typeLiteral.getReturnType(method);
+
+ Key<T> key = getKey(errors, returnType, method, method.getAnnotations());
+ Class<? extends Annotation> scopeAnnotation
+ = Annotations.findScopeAnnotation(errors, method.getAnnotations());
+
+ for (Message message : errors.getMessages()) {
+ binder.addError(message);
+ }
+
+ return new ProviderMethod<T>(key, method, delegate, ImmutableSet.copyOf(dependencies),
+ parameterProviders, scopeAnnotation);
+ }
+
+ <T> Key<T> getKey(Errors errors, TypeLiteral<T> type, Member member, Annotation[] annotations) {
+ Annotation bindingAnnotation = Annotations.findBindingAnnotation(errors, member, annotations);
+ return bindingAnnotation == null ? Key.get(type) : Key.get(type, bindingAnnotation);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof ProviderMethodsModule
+ && ((ProviderMethodsModule) o).delegate == delegate;
+ }
+
+ @Override
+ public int hashCode() {
+ return delegate.hashCode();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/Scoping.java b/src/main/java/org/elasticsearch/common/inject/internal/Scoping.java
new file mode 100644
index 0000000..a069187
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/Scoping.java
@@ -0,0 +1,223 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.Scope;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.inject.Singleton;
+import org.elasticsearch.common.inject.Stage;
+import org.elasticsearch.common.inject.binder.ScopedBindingBuilder;
+import org.elasticsearch.common.inject.spi.BindingScopingVisitor;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * References a scope, either directly (as a scope instance), or indirectly (as a scope annotation).
+ * The scope's eager or laziness is also exposed.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public abstract class Scoping {
+
+ /**
+ * No scoping annotation has been applied. Note that this is different from {@code
+ * in(Scopes.NO_SCOPE)}, where the 'NO_SCOPE' has been explicitly applied.
+ */
+ public static final Scoping UNSCOPED = new Scoping() {
+ public <V> V acceptVisitor(BindingScopingVisitor<V> visitor) {
+ return visitor.visitNoScoping();
+ }
+
+ @Override
+ public Scope getScopeInstance() {
+ return Scopes.NO_SCOPE;
+ }
+
+ @Override
+ public String toString() {
+ return Scopes.NO_SCOPE.toString();
+ }
+
+ public void applyTo(ScopedBindingBuilder scopedBindingBuilder) {
+ // do nothing
+ }
+ };
+
+ public static final Scoping SINGLETON_ANNOTATION = new Scoping() {
+ public <V> V acceptVisitor(BindingScopingVisitor<V> visitor) {
+ return visitor.visitScopeAnnotation(Singleton.class);
+ }
+
+ @Override
+ public Class<? extends Annotation> getScopeAnnotation() {
+ return Singleton.class;
+ }
+
+ @Override
+ public String toString() {
+ return Singleton.class.getName();
+ }
+
+ public void applyTo(ScopedBindingBuilder scopedBindingBuilder) {
+ scopedBindingBuilder.in(Singleton.class);
+ }
+ };
+
+ public static final Scoping SINGLETON_INSTANCE = new Scoping() {
+ public <V> V acceptVisitor(BindingScopingVisitor<V> visitor) {
+ return visitor.visitScope(Scopes.SINGLETON);
+ }
+
+ @Override
+ public Scope getScopeInstance() {
+ return Scopes.SINGLETON;
+ }
+
+ @Override
+ public String toString() {
+ return Scopes.SINGLETON.toString();
+ }
+
+ public void applyTo(ScopedBindingBuilder scopedBindingBuilder) {
+ scopedBindingBuilder.in(Scopes.SINGLETON);
+ }
+ };
+
+ public static final Scoping EAGER_SINGLETON = new Scoping() {
+ public <V> V acceptVisitor(BindingScopingVisitor<V> visitor) {
+ return visitor.visitEagerSingleton();
+ }
+
+ @Override
+ public Scope getScopeInstance() {
+ return Scopes.SINGLETON;
+ }
+
+ @Override
+ public String toString() {
+ return "eager singleton";
+ }
+
+ public void applyTo(ScopedBindingBuilder scopedBindingBuilder) {
+ scopedBindingBuilder.asEagerSingleton();
+ }
+ };
+
+ public static Scoping forAnnotation(final Class<? extends Annotation> scopingAnnotation) {
+ if (scopingAnnotation == Singleton.class) {
+ return SINGLETON_ANNOTATION;
+ }
+
+ return new Scoping() {
+ public <V> V acceptVisitor(BindingScopingVisitor<V> visitor) {
+ return visitor.visitScopeAnnotation(scopingAnnotation);
+ }
+
+ @Override
+ public Class<? extends Annotation> getScopeAnnotation() {
+ return scopingAnnotation;
+ }
+
+ @Override
+ public String toString() {
+ return scopingAnnotation.getName();
+ }
+
+ public void applyTo(ScopedBindingBuilder scopedBindingBuilder) {
+ scopedBindingBuilder.in(scopingAnnotation);
+ }
+ };
+ }
+
+ public static Scoping forInstance(final Scope scope) {
+ if (scope == Scopes.SINGLETON) {
+ return SINGLETON_INSTANCE;
+ }
+
+ return new Scoping() {
+ public <V> V acceptVisitor(BindingScopingVisitor<V> visitor) {
+ return visitor.visitScope(scope);
+ }
+
+ @Override
+ public Scope getScopeInstance() {
+ return scope;
+ }
+
+ @Override
+ public String toString() {
+ return scope.toString();
+ }
+
+ public void applyTo(ScopedBindingBuilder scopedBindingBuilder) {
+ scopedBindingBuilder.in(scope);
+ }
+ };
+ }
+
+ /**
+ * Returns true if this scope was explicitly applied. If no scope was explicitly applied then the
+ * scoping annotation will be used.
+ */
+ public boolean isExplicitlyScoped() {
+ return this != UNSCOPED;
+ }
+
+ /**
+ * Returns true if this is the default scope. In this case a new instance will be provided for
+ * each injection.
+ */
+ public boolean isNoScope() {
+ return getScopeInstance() == Scopes.NO_SCOPE;
+ }
+
+ /**
+ * Returns true if this scope is a singleton that should be loaded eagerly in {@code stage}.
+ */
+ public boolean isEagerSingleton(Stage stage) {
+ if (this == EAGER_SINGLETON) {
+ return true;
+ }
+
+ if (stage == Stage.PRODUCTION) {
+ return this == SINGLETON_ANNOTATION || this == SINGLETON_INSTANCE;
+ }
+
+ return false;
+ }
+
+ /**
+ * Returns the scope instance, or {@code null} if that isn't known for this instance.
+ */
+ public Scope getScopeInstance() {
+ return null;
+ }
+
+ /**
+ * Returns the scope annotation, or {@code null} if that isn't known for this instance.
+ */
+ public Class<? extends Annotation> getScopeAnnotation() {
+ return null;
+ }
+
+ public abstract <V> V acceptVisitor(BindingScopingVisitor<V> visitor);
+
+ public abstract void applyTo(ScopedBindingBuilder scopedBindingBuilder);
+
+ private Scoping() {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/SourceProvider.java b/src/main/java/org/elasticsearch/common/inject/internal/SourceProvider.java
new file mode 100644
index 0000000..4c34be0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/SourceProvider.java
@@ -0,0 +1,81 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * Provides access to the calling line of code.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class SourceProvider {
+
+ /**
+ * Indicates that the source is unknown.
+ */
+ public static final Object UNKNOWN_SOURCE = "[unknown source]";
+
+ private final ImmutableSet<String> classNamesToSkip;
+
+ public SourceProvider() {
+ this.classNamesToSkip = ImmutableSet.of(SourceProvider.class.getName());
+ }
+
+ public static final SourceProvider DEFAULT_INSTANCE
+ = new SourceProvider(ImmutableSet.of(SourceProvider.class.getName()));
+
+ private SourceProvider(Iterable<String> classesToSkip) {
+ this.classNamesToSkip = ImmutableSet.copyOf(classesToSkip);
+ }
+
+ /**
+ * Returns a new instance that also skips {@code moreClassesToSkip}.
+ */
+ public SourceProvider plusSkippedClasses(Class... moreClassesToSkip) {
+ return new SourceProvider(Iterables.concat(classNamesToSkip, asStrings(moreClassesToSkip)));
+ }
+
+ /**
+ * Returns the class names as Strings
+ */
+ private static List<String> asStrings(Class... classes) {
+ List<String> strings = Lists.newArrayList();
+ for (Class c : classes) {
+ strings.add(c.getName());
+ }
+ return strings;
+ }
+
+ /**
+ * Returns the calling line of code. The selected line is the nearest to the top of the stack that
+ * is not skipped.
+ */
+ public StackTraceElement get() {
+ for (final StackTraceElement element : new Throwable().getStackTrace()) {
+ String className = element.getClassName();
+ if (!classNamesToSkip.contains(className)) {
+ return element;
+ }
+ }
+ throw new AssertionError();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/StackTraceElements.java b/src/main/java/org/elasticsearch/common/inject/internal/StackTraceElements.java
new file mode 100644
index 0000000..2b55ddd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/StackTraceElements.java
@@ -0,0 +1,50 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Member;
+
+/**
+ * Creates stack trace elements for members.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class StackTraceElements {
+
+ public static Object forMember(Member member) {
+ if (member == null) {
+ return SourceProvider.UNKNOWN_SOURCE;
+ }
+
+ Class declaringClass = member.getDeclaringClass();
+
+ String fileName = null;
+ int lineNumber = -1;
+
+ Class<? extends Member> memberType = MoreTypes.memberType(member);
+ String memberName = memberType == Constructor.class ? "<init>" : member.getName();
+ return new StackTraceElement(declaringClass.getName(), memberName, fileName, lineNumber);
+ }
+
+ public static Object forType(Class<?> implementation) {
+ String fileName = null;
+ int lineNumber = -1;
+
+ return new StackTraceElement(implementation.getName(), "class", fileName, lineNumber);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/Stopwatch.java b/src/main/java/org/elasticsearch/common/inject/internal/Stopwatch.java
new file mode 100644
index 0000000..eab5f22
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/Stopwatch.java
@@ -0,0 +1,49 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import java.util.logging.Logger;
+
+/**
+ * Enables simple performance monitoring.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class Stopwatch {
+ private static final Logger logger = Logger.getLogger(Stopwatch.class.getName());
+
+ private long start = System.currentTimeMillis();
+
+ /**
+ * Resets and returns elapsed time in milliseconds.
+ */
+ public long reset() {
+ long now = System.currentTimeMillis();
+ try {
+ return now - start;
+ } finally {
+ start = now;
+ }
+ }
+
+ /**
+ * Resets and logs elapsed time in milliseconds.
+ */
+ public void resetAndLog(String label) {
+ logger.fine(label + ": " + reset() + "ms");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/Strings.java b/src/main/java/org/elasticsearch/common/inject/internal/Strings.java
new file mode 100644
index 0000000..7c2e703
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/Strings.java
@@ -0,0 +1,58 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+/**
+ * String utilities.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class Strings {
+ private Strings() {
+ }
+
+ /**
+ * Returns a string that is equivalent to the specified string with its
+ * first character converted to uppercase as by {@link String#toUpperCase}.
+ * The returned string will have the same value as the specified string if
+ * its first character is non-alphabetic, if its first character is already
+ * uppercase, or if the specified string is of length 0.
+ * <p/>
+ * <p>For example:
+ * <pre>
+ * capitalize("foo bar").equals("Foo bar");
+ * capitalize("2b or not 2b").equals("2b or not 2b")
+ * capitalize("Foo bar").equals("Foo bar");
+ * capitalize("").equals("");
+ * </pre>
+ *
+ * @param s the string whose first character is to be uppercased
+ * @return a string equivalent to <tt>s</tt> with its first character
+ * converted to uppercase
+ * @throws NullPointerException if <tt>s</tt> is null
+ */
+ public static String capitalize(String s) {
+ if (s.length() == 0) {
+ return s;
+ }
+ char first = s.charAt(0);
+ char capitalized = Character.toUpperCase(first);
+ return (first == capitalized)
+ ? s
+ : capitalized + s.substring(1);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ToStringBuilder.java b/src/main/java/org/elasticsearch/common/inject/internal/ToStringBuilder.java
new file mode 100644
index 0000000..1e6f423
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/ToStringBuilder.java
@@ -0,0 +1,53 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * Helps with {@code toString()} methods.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class ToStringBuilder {
+
+ // Linked hash map ensures ordering.
+ final Map<String, Object> map = new LinkedHashMap<String, Object>();
+
+ final String name;
+
+ public ToStringBuilder(String name) {
+ this.name = name;
+ }
+
+ public ToStringBuilder(Class type) {
+ this.name = type.getSimpleName();
+ }
+
+ public ToStringBuilder add(String name, Object value) {
+ if (map.put(name, value) != null) {
+ throw new RuntimeException("Duplicate names: " + name);
+ }
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return name + map.toString().replace('{', '[').replace('}', ']');
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/UniqueAnnotations.java b/src/main/java/org/elasticsearch/common/inject/internal/UniqueAnnotations.java
new file mode 100644
index 0000000..eee4535
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/UniqueAnnotations.java
@@ -0,0 +1,77 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.BindingAnnotation;
+
+import java.lang.annotation.Annotation;
+import java.lang.annotation.Retention;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public class UniqueAnnotations {
+ private UniqueAnnotations() {
+ }
+
+ private static final AtomicInteger nextUniqueValue = new AtomicInteger(1);
+
+ /**
+ * Returns an annotation instance that is not equal to any other annotation
+ * instances, for use in creating distinct {@link org.elasticsearch.common.inject.Key}s.
+ */
+ public static Annotation create() {
+ return create(nextUniqueValue.getAndIncrement());
+ }
+
+ static Annotation create(final int value) {
+ return new Internal() {
+ public int value() {
+ return value;
+ }
+
+ public Class<? extends Annotation> annotationType() {
+ return Internal.class;
+ }
+
+ @Override
+ public String toString() {
+ return "@" + Internal.class.getName() + "(value=" + value + ")";
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof Internal
+ && ((Internal) o).value() == value();
+ }
+
+ @Override
+ public int hashCode() {
+ return (127 * "value".hashCode()) ^ value;
+ }
+ };
+ }
+
+ @Retention(RUNTIME)
+ @BindingAnnotation
+ @interface Internal {
+ int value();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java b/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java
new file mode 100644
index 0000000..cf3b5ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.internal;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.spi.BindingTargetVisitor;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.UntargettedBinding;
+
+public class UntargettedBindingImpl<T> extends BindingImpl<T> implements UntargettedBinding<T> {
+
+ public UntargettedBindingImpl(Injector injector, Key<T> key, Object source) {
+ super(injector, key, source, new InternalFactory<T>() {
+ public T get(Errors errors, InternalContext context, Dependency<?> dependency) {
+ throw new AssertionError();
+ }
+ }, Scoping.UNSCOPED);
+ }
+
+ public UntargettedBindingImpl(Object source, Key<T> key, Scoping scoping) {
+ super(source, key, scoping);
+ }
+
+ public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
+ return visitor.visit(this);
+ }
+
+ public BindingImpl<T> withScoping(Scoping scoping) {
+ return new UntargettedBindingImpl<T>(getSource(), getKey(), scoping);
+ }
+
+ public BindingImpl<T> withKey(Key<T> key) {
+ return new UntargettedBindingImpl<T>(getSource(), key, getScoping());
+ }
+
+ public void applyTo(Binder binder) {
+ getScoping().applyTo(binder.withSource(getSource()).bind(getKey()));
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(UntargettedBinding.class)
+ .add("key", getKey())
+ .add("source", getSource())
+ .toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/internal/package-info.java b/src/main/java/org/elasticsearch/common/inject/internal/package-info.java
new file mode 100644
index 0000000..7c41943
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/internal/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <i>Guice</i> (sounds like like "juice")
+ */
+
+package org.elasticsearch.common.inject.internal; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/matcher/AbstractMatcher.java b/src/main/java/org/elasticsearch/common/inject/matcher/AbstractMatcher.java
new file mode 100644
index 0000000..7144e3d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/matcher/AbstractMatcher.java
@@ -0,0 +1,99 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.matcher;
+
+import java.io.Serializable;
+
+/**
+ * Implements {@code and()} and {@code or()}.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public abstract class AbstractMatcher<T> implements Matcher<T> {
+
+ public Matcher<T> and(final Matcher<? super T> other) {
+ return new AndMatcher<T>(this, other);
+ }
+
+ public Matcher<T> or(Matcher<? super T> other) {
+ return new OrMatcher<T>(this, other);
+ }
+
+ private static class AndMatcher<T> extends AbstractMatcher<T> implements Serializable {
+ private final Matcher<? super T> a, b;
+
+ public AndMatcher(Matcher<? super T> a, Matcher<? super T> b) {
+ this.a = a;
+ this.b = b;
+ }
+
+ public boolean matches(T t) {
+ return a.matches(t) && b.matches(t);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof AndMatcher
+ && ((AndMatcher) other).a.equals(a)
+ && ((AndMatcher) other).b.equals(b);
+ }
+
+ @Override
+ public int hashCode() {
+ return 41 * (a.hashCode() ^ b.hashCode());
+ }
+
+ @Override
+ public String toString() {
+ return "and(" + a + ", " + b + ")";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ private static class OrMatcher<T> extends AbstractMatcher<T> implements Serializable {
+ private final Matcher<? super T> a, b;
+
+ public OrMatcher(Matcher<? super T> a, Matcher<? super T> b) {
+ this.a = a;
+ this.b = b;
+ }
+
+ public boolean matches(T t) {
+ return a.matches(t) || b.matches(t);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof OrMatcher
+ && ((OrMatcher) other).a.equals(a)
+ && ((OrMatcher) other).b.equals(b);
+ }
+
+ @Override
+ public int hashCode() {
+ return 37 * (a.hashCode() ^ b.hashCode());
+ }
+
+ @Override
+ public String toString() {
+ return "or(" + a + ", " + b + ")";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/matcher/Matcher.java b/src/main/java/org/elasticsearch/common/inject/matcher/Matcher.java
new file mode 100644
index 0000000..e2bfa1b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/matcher/Matcher.java
@@ -0,0 +1,42 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.matcher;
+
+/**
+ * Returns {@code true} or {@code false} for a given input.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public interface Matcher<T> {
+
+ /**
+ * Returns {@code true} if this matches {@code t}, {@code false} otherwise.
+ */
+ boolean matches(T t);
+
+ /**
+ * Returns a new matcher which returns {@code true} if both this and the
+ * given matcher return {@code true}.
+ */
+ Matcher<T> and(Matcher<? super T> other);
+
+ /**
+ * Returns a new matcher which returns {@code true} if either this or the
+ * given matcher return {@code true}.
+ */
+ Matcher<T> or(Matcher<? super T> other);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java b/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java
new file mode 100644
index 0000000..1b5d2ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java
@@ -0,0 +1,429 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.matcher;
+
+import java.io.Serializable;
+import java.lang.annotation.Annotation;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.reflect.AnnotatedElement;
+import java.lang.reflect.Method;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Matcher implementations. Supports matching classes and methods.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class Matchers {
+ private Matchers() {
+ }
+
+ /**
+ * Returns a matcher which matches any input.
+ */
+ public static Matcher<Object> any() {
+ return ANY;
+ }
+
+ private static final Matcher<Object> ANY = new Any();
+
+ private static class Any extends AbstractMatcher<Object> implements Serializable {
+ public boolean matches(Object o) {
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "any()";
+ }
+
+ public Object readResolve() {
+ return any();
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ /**
+ * Inverts the given matcher.
+ */
+ public static <T> Matcher<T> not(final Matcher<? super T> p) {
+ return new Not<T>(p);
+ }
+
+ private static class Not<T> extends AbstractMatcher<T> implements Serializable {
+ final Matcher<? super T> delegate;
+
+ private Not(Matcher<? super T> delegate) {
+ this.delegate = checkNotNull(delegate, "delegate");
+ }
+
+ public boolean matches(T t) {
+ return !delegate.matches(t);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof Not
+ && ((Not) other).delegate.equals(delegate);
+ }
+
+ @Override
+ public int hashCode() {
+ return -delegate.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "not(" + delegate + ")";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ private static void checkForRuntimeRetention(
+ Class<? extends Annotation> annotationType) {
+ Retention retention = annotationType.getAnnotation(Retention.class);
+ checkArgument(retention != null && retention.value() == RetentionPolicy.RUNTIME,
+ "Annotation " + annotationType.getSimpleName() + " is missing RUNTIME retention");
+ }
+
+ /**
+ * Returns a matcher which matches elements (methods, classes, etc.)
+ * with a given annotation.
+ */
+ public static Matcher<AnnotatedElement> annotatedWith(
+ final Class<? extends Annotation> annotationType) {
+ return new AnnotatedWithType(annotationType);
+ }
+
+ private static class AnnotatedWithType extends AbstractMatcher<AnnotatedElement>
+ implements Serializable {
+ private final Class<? extends Annotation> annotationType;
+
+ public AnnotatedWithType(Class<? extends Annotation> annotationType) {
+ this.annotationType = checkNotNull(annotationType, "annotation type");
+ checkForRuntimeRetention(annotationType);
+ }
+
+ public boolean matches(AnnotatedElement element) {
+ return element.getAnnotation(annotationType) != null;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof AnnotatedWithType
+ && ((AnnotatedWithType) other).annotationType.equals(annotationType);
+ }
+
+ @Override
+ public int hashCode() {
+ return 37 * annotationType.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "annotatedWith(" + annotationType.getSimpleName() + ".class)";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ /**
+ * Returns a matcher which matches elements (methods, classes, etc.)
+ * with a given annotation.
+ */
+ public static Matcher<AnnotatedElement> annotatedWith(
+ final Annotation annotation) {
+ return new AnnotatedWith(annotation);
+ }
+
+ private static class AnnotatedWith extends AbstractMatcher<AnnotatedElement>
+ implements Serializable {
+ private final Annotation annotation;
+
+ public AnnotatedWith(Annotation annotation) {
+ this.annotation = checkNotNull(annotation, "annotation");
+ checkForRuntimeRetention(annotation.annotationType());
+ }
+
+ public boolean matches(AnnotatedElement element) {
+ Annotation fromElement = element.getAnnotation(annotation.annotationType());
+ return fromElement != null && annotation.equals(fromElement);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof AnnotatedWith
+ && ((AnnotatedWith) other).annotation.equals(annotation);
+ }
+
+ @Override
+ public int hashCode() {
+ return 37 * annotation.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "annotatedWith(" + annotation + ")";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ /**
+ * Returns a matcher which matches subclasses of the given type (as well as
+ * the given type).
+ */
+ public static Matcher<Class> subclassesOf(final Class<?> superclass) {
+ return new SubclassesOf(superclass);
+ }
+
+ private static class SubclassesOf extends AbstractMatcher<Class>
+ implements Serializable {
+ private final Class<?> superclass;
+
+ public SubclassesOf(Class<?> superclass) {
+ this.superclass = checkNotNull(superclass, "superclass");
+ }
+
+ public boolean matches(Class subclass) {
+ return superclass.isAssignableFrom(subclass);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof SubclassesOf
+ && ((SubclassesOf) other).superclass.equals(superclass);
+ }
+
+ @Override
+ public int hashCode() {
+ return 37 * superclass.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "subclassesOf(" + superclass.getSimpleName() + ".class)";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ /**
+ * Returns a matcher which matches objects equal to the given object.
+ */
+ public static Matcher<Object> only(Object value) {
+ return new Only(value);
+ }
+
+ private static class Only extends AbstractMatcher<Object>
+ implements Serializable {
+ private final Object value;
+
+ public Only(Object value) {
+ this.value = checkNotNull(value, "value");
+ }
+
+ public boolean matches(Object other) {
+ return value.equals(other);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof Only
+ && ((Only) other).value.equals(value);
+ }
+
+ @Override
+ public int hashCode() {
+ return 37 * value.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "only(" + value + ")";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ /**
+ * Returns a matcher which matches only the given object.
+ */
+ public static Matcher<Object> identicalTo(final Object value) {
+ return new IdenticalTo(value);
+ }
+
+ private static class IdenticalTo extends AbstractMatcher<Object>
+ implements Serializable {
+ private final Object value;
+
+ public IdenticalTo(Object value) {
+ this.value = checkNotNull(value, "value");
+ }
+
+ public boolean matches(Object other) {
+ return value == other;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof IdenticalTo
+ && ((IdenticalTo) other).value == value;
+ }
+
+ @Override
+ public int hashCode() {
+ return 37 * System.identityHashCode(value);
+ }
+
+ @Override
+ public String toString() {
+ return "identicalTo(" + value + ")";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ /**
+ * Returns a matcher which matches classes in the given package. Packages are specific to their
+ * classloader, so classes with the same package name may not have the same package at runtime.
+ */
+ public static Matcher<Class> inPackage(final Package targetPackage) {
+ return new InPackage(targetPackage);
+ }
+
+ private static class InPackage extends AbstractMatcher<Class> implements Serializable {
+ private final transient Package targetPackage;
+ private final String packageName;
+
+ public InPackage(Package targetPackage) {
+ this.targetPackage = checkNotNull(targetPackage, "package");
+ this.packageName = targetPackage.getName();
+ }
+
+ public boolean matches(Class c) {
+ return c.getPackage().equals(targetPackage);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof InPackage
+ && ((InPackage) other).targetPackage.equals(targetPackage);
+ }
+
+ @Override
+ public int hashCode() {
+ return 37 * targetPackage.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "inPackage(" + targetPackage.getName() + ")";
+ }
+
+ public Object readResolve() {
+ return inPackage(Package.getPackage(packageName));
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ /**
+ * Returns a matcher which matches classes in the given package and its subpackages. Unlike
+ * {@link #inPackage(Package) inPackage()}, this matches classes from any classloader.
+ *
+ * @since 2.0
+ */
+ public static Matcher<Class> inSubpackage(final String targetPackageName) {
+ return new InSubpackage(targetPackageName);
+ }
+
+ private static class InSubpackage extends AbstractMatcher<Class> implements Serializable {
+ private final String targetPackageName;
+
+ public InSubpackage(String targetPackageName) {
+ this.targetPackageName = targetPackageName;
+ }
+
+ public boolean matches(Class c) {
+ String classPackageName = c.getPackage().getName();
+ return classPackageName.equals(targetPackageName)
+ || classPackageName.startsWith(targetPackageName + ".");
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof InSubpackage
+ && ((InSubpackage) other).targetPackageName.equals(targetPackageName);
+ }
+
+ @Override
+ public int hashCode() {
+ return 37 * targetPackageName.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "inSubpackage(" + targetPackageName + ")";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+
+ /**
+ * Returns a matcher which matches methods with matching return types.
+ */
+ public static Matcher<Method> returns(
+ final Matcher<? super Class<?>> returnType) {
+ return new Returns(returnType);
+ }
+
+ private static class Returns extends AbstractMatcher<Method> implements Serializable {
+ private final Matcher<? super Class<?>> returnType;
+
+ public Returns(Matcher<? super Class<?>> returnType) {
+ this.returnType = checkNotNull(returnType, "return type matcher");
+ }
+
+ public boolean matches(Method m) {
+ return returnType.matches(m.getReturnType());
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof Returns
+ && ((Returns) other).returnType.equals(returnType);
+ }
+
+ @Override
+ public int hashCode() {
+ return 37 * returnType.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "returns(" + returnType + ")";
+ }
+
+ private static final long serialVersionUID = 0;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/matcher/package-info.java b/src/main/java/org/elasticsearch/common/inject/matcher/package-info.java
new file mode 100644
index 0000000..15aff76
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/matcher/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Used for matching things. Primarily used to pick out methods to which to
+ * apply interceptors.
+ */
+package org.elasticsearch.common.inject.matcher; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/multibindings/Element.java b/src/main/java/org/elasticsearch/common/inject/multibindings/Element.java
new file mode 100644
index 0000000..02323dc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/multibindings/Element.java
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.elasticsearch.common.inject.multibindings;
+
+import org.elasticsearch.common.inject.BindingAnnotation;
+
+import java.lang.annotation.Retention;
+
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * An internal binding annotation applied to each element in a multibinding.
+ * All elements are assigned a globally-unique id to allow different modules
+ * to contribute multibindings independently.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+@Retention(RUNTIME)
+@BindingAnnotation
+@interface Element {
+ String setName();
+
+ int uniqueId();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java b/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java
new file mode 100644
index 0000000..fe3aa1d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java
@@ -0,0 +1,374 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.multibindings;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.binder.LinkedBindingBuilder;
+import org.elasticsearch.common.inject.multibindings.Multibinder.RealMultibinder;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.ProviderWithDependencies;
+import org.elasticsearch.common.inject.util.Types;
+
+import java.lang.annotation.Annotation;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import static org.elasticsearch.common.inject.util.Types.newParameterizedType;
+import static org.elasticsearch.common.inject.util.Types.newParameterizedTypeWithOwner;
+
+/**
+ * An API to bind multiple map entries separately, only to later inject them as
+ * a complete map. MapBinder is intended for use in your application's module:
+ * <pre><code>
+ * public class SnacksModule extends AbstractModule {
+ * protected void configure() {
+ * MapBinder&lt;String, Snack&gt; mapbinder
+ * = MapBinder.newMapBinder(binder(), String.class, Snack.class);
+ * mapbinder.addBinding("twix").toInstance(new Twix());
+ * mapbinder.addBinding("snickers").toProvider(SnickersProvider.class);
+ * mapbinder.addBinding("skittles").to(Skittles.class);
+ * }
+ * }</code></pre>
+ * <p/>
+ * <p>With this binding, a {@link Map}{@code <String, Snack>} can now be
+ * injected:
+ * <pre><code>
+ * class SnackMachine {
+ * {@literal @}Inject
+ * public SnackMachine(Map&lt;String, Snack&gt; snacks) { ... }
+ * }</code></pre>
+ * <p/>
+ * <p>In addition to binding {@code Map<K, V>}, a mapbinder will also bind
+ * {@code Map<K, Provider<V>>} for lazy value provision:
+ * <pre><code>
+ * class SnackMachine {
+ * {@literal @}Inject
+ * public SnackMachine(Map&lt;String, Provider&lt;Snack&gt;&gt; snackProviders) { ... }
+ * }</code></pre>
+ * <p/>
+ * <p>Creating mapbindings from different modules is supported. For example, it
+ * is okay to have both {@code CandyModule} and {@code ChipsModule} both
+ * create their own {@code MapBinder<String, Snack>}, and to each contribute
+ * bindings to the snacks map. When that map is injected, it will contain
+ * entries from both modules.
+ * <p/>
+ * <p>Values are resolved at map injection time. If a value is bound to a
+ * provider, that provider's get method will be called each time the map is
+ * injected (unless the binding is also scoped, or a map of providers is injected).
+ * <p/>
+ * <p>Annotations are used to create different maps of the same key/value
+ * type. Each distinct annotation gets its own independent map.
+ * <p/>
+ * <p><strong>Keys must be distinct.</strong> If the same key is bound more than
+ * once, map injection will fail.
+ * <p/>
+ * <p><strong>Keys must be non-null.</strong> {@code addBinding(null)} will
+ * throw an unchecked exception.
+ * <p/>
+ * <p><strong>Values must be non-null to use map injection.</strong> If any
+ * value is null, map injection will fail (although injecting a map of providers
+ * will not).
+ *
+ * @author dpb@google.com (David P. Baker)
+ */
+public abstract class MapBinder<K, V> {
+ private MapBinder() {
+ }
+
+ /**
+ * Returns a new mapbinder that collects entries of {@code keyType}/{@code valueType} in a
+ * {@link Map} that is itself bound with no binding annotation.
+ */
+ public static <K, V> MapBinder<K, V> newMapBinder(Binder binder,
+ TypeLiteral<K> keyType, TypeLiteral<V> valueType) {
+ binder = binder.skipSources(MapBinder.class, RealMapBinder.class);
+ return newMapBinder(binder, valueType,
+ Key.get(mapOf(keyType, valueType)),
+ Key.get(mapOfProviderOf(keyType, valueType)),
+ Multibinder.newSetBinder(binder, entryOfProviderOf(keyType, valueType)));
+ }
+
+ /**
+ * Returns a new mapbinder that collects entries of {@code keyType}/{@code valueType} in a
+ * {@link Map} that is itself bound with no binding annotation.
+ */
+ public static <K, V> MapBinder<K, V> newMapBinder(Binder binder,
+ Class<K> keyType, Class<V> valueType) {
+ return newMapBinder(binder, TypeLiteral.get(keyType), TypeLiteral.get(valueType));
+ }
+
+ /**
+ * Returns a new mapbinder that collects entries of {@code keyType}/{@code valueType} in a
+ * {@link Map} that is itself bound with {@code annotation}.
+ */
+ public static <K, V> MapBinder<K, V> newMapBinder(Binder binder,
+ TypeLiteral<K> keyType, TypeLiteral<V> valueType, Annotation annotation) {
+ binder = binder.skipSources(MapBinder.class, RealMapBinder.class);
+ return newMapBinder(binder, valueType,
+ Key.get(mapOf(keyType, valueType), annotation),
+ Key.get(mapOfProviderOf(keyType, valueType), annotation),
+ Multibinder.newSetBinder(binder, entryOfProviderOf(keyType, valueType), annotation));
+ }
+
+ /**
+ * Returns a new mapbinder that collects entries of {@code keyType}/{@code valueType} in a
+ * {@link Map} that is itself bound with {@code annotation}.
+ */
+ public static <K, V> MapBinder<K, V> newMapBinder(Binder binder,
+ Class<K> keyType, Class<V> valueType, Annotation annotation) {
+ return newMapBinder(binder, TypeLiteral.get(keyType), TypeLiteral.get(valueType), annotation);
+ }
+
+ /**
+ * Returns a new mapbinder that collects entries of {@code keyType}/{@code valueType} in a
+ * {@link Map} that is itself bound with {@code annotationType}.
+ */
+ public static <K, V> MapBinder<K, V> newMapBinder(Binder binder, TypeLiteral<K> keyType,
+ TypeLiteral<V> valueType, Class<? extends Annotation> annotationType) {
+ binder = binder.skipSources(MapBinder.class, RealMapBinder.class);
+ return newMapBinder(binder, valueType,
+ Key.get(mapOf(keyType, valueType), annotationType),
+ Key.get(mapOfProviderOf(keyType, valueType), annotationType),
+ Multibinder.newSetBinder(binder, entryOfProviderOf(keyType, valueType), annotationType));
+ }
+
+ /**
+ * Returns a new mapbinder that collects entries of {@code keyType}/{@code valueType} in a
+ * {@link Map} that is itself bound with {@code annotationType}.
+ */
+ public static <K, V> MapBinder<K, V> newMapBinder(Binder binder, Class<K> keyType,
+ Class<V> valueType, Class<? extends Annotation> annotationType) {
+ return newMapBinder(
+ binder, TypeLiteral.get(keyType), TypeLiteral.get(valueType), annotationType);
+ }
+
+ @SuppressWarnings("unchecked") // a map of <K, V> is safely a Map<K, V>
+ private static <K, V> TypeLiteral<Map<K, V>> mapOf(
+ TypeLiteral<K> keyType, TypeLiteral<V> valueType) {
+ return (TypeLiteral<Map<K, V>>) TypeLiteral.get(
+ Types.mapOf(keyType.getType(), valueType.getType()));
+ }
+
+ @SuppressWarnings("unchecked") // a provider map <K, V> is safely a Map<K, Provider<V>>
+ private static <K, V> TypeLiteral<Map<K, Provider<V>>> mapOfProviderOf(
+ TypeLiteral<K> keyType, TypeLiteral<V> valueType) {
+ return (TypeLiteral<Map<K, Provider<V>>>) TypeLiteral.get(
+ Types.mapOf(keyType.getType(), newParameterizedType(Provider.class, valueType.getType())));
+ }
+
+ @SuppressWarnings("unchecked") // a provider entry <K, V> is safely a Map.Entry<K, Provider<V>>
+ private static <K, V> TypeLiteral<Map.Entry<K, Provider<V>>> entryOfProviderOf(
+ TypeLiteral<K> keyType, TypeLiteral<V> valueType) {
+ return (TypeLiteral<Entry<K, Provider<V>>>) TypeLiteral.get(newParameterizedTypeWithOwner(
+ Map.class, Entry.class, keyType.getType(), Types.providerOf(valueType.getType())));
+ }
+
+ private static <K, V> MapBinder<K, V> newMapBinder(Binder binder, TypeLiteral<V> valueType,
+ Key<Map<K, V>> mapKey, Key<Map<K, Provider<V>>> providerMapKey,
+ Multibinder<Entry<K, Provider<V>>> entrySetBinder) {
+ RealMapBinder<K, V> mapBinder = new RealMapBinder<K, V>(
+ binder, valueType, mapKey, providerMapKey, entrySetBinder);
+ binder.install(mapBinder);
+ return mapBinder;
+ }
+
+ /**
+ * Returns a binding builder used to add a new entry in the map. Each
+ * key must be distinct (and non-null). Bound providers will be evaluated each
+ * time the map is injected.
+ * <p/>
+ * <p>It is an error to call this method without also calling one of the
+ * {@code to} methods on the returned binding builder.
+ * <p/>
+ * <p>Scoping elements independently is supported. Use the {@code in} method
+ * to specify a binding scope.
+ */
+ public abstract LinkedBindingBuilder<V> addBinding(K key);
+
+ /**
+ * The actual mapbinder plays several roles:
+ * <p/>
+ * <p>As a MapBinder, it acts as a factory for LinkedBindingBuilders for
+ * each of the map's values. It delegates to a {@link Multibinder} of
+ * entries (keys to value providers).
+ * <p/>
+ * <p>As a Module, it installs the binding to the map itself, as well as to
+ * a corresponding map whose values are providers. It uses the entry set
+ * multibinder to construct the map and the provider map.
+ * <p/>
+ * <p>As a module, this implements equals() and hashcode() in order to trick
+ * Guice into executing its configure() method only once. That makes it so
+ * that multiple mapbinders can be created for the same target map, but
+ * only one is bound. Since the list of bindings is retrieved from the
+ * injector itself (and not the mapbinder), each mapbinder has access to
+ * all contributions from all equivalent mapbinders.
+ * <p/>
+ * <p>Rather than binding a single Map.Entry&lt;K, V&gt;, the map binder
+ * binds keys and values independently. This allows the values to be properly
+ * scoped.
+ * <p/>
+ * <p>We use a subclass to hide 'implements Module' from the public API.
+ */
+ private static final class RealMapBinder<K, V> extends MapBinder<K, V> implements Module {
+ private final TypeLiteral<V> valueType;
+ private final Key<Map<K, V>> mapKey;
+ private final Key<Map<K, Provider<V>>> providerMapKey;
+ private final RealMultibinder<Map.Entry<K, Provider<V>>> entrySetBinder;
+
+ /* the target injector's binder. non-null until initialization, null afterwards */
+ private Binder binder;
+
+ private RealMapBinder(Binder binder, TypeLiteral<V> valueType,
+ Key<Map<K, V>> mapKey, Key<Map<K, Provider<V>>> providerMapKey,
+ Multibinder<Map.Entry<K, Provider<V>>> entrySetBinder) {
+ this.valueType = valueType;
+ this.mapKey = mapKey;
+ this.providerMapKey = providerMapKey;
+ this.entrySetBinder = (RealMultibinder<Entry<K, Provider<V>>>) entrySetBinder;
+ this.binder = binder;
+ }
+
+ /**
+ * This creates two bindings. One for the {@code Map.Entry<K, Provider<V>>}
+ * and another for {@code V}.
+ */
+ @Override
+ public LinkedBindingBuilder<V> addBinding(K key) {
+ Multibinder.checkNotNull(key, "key");
+ Multibinder.checkConfiguration(!isInitialized(), "MapBinder was already initialized");
+
+ Key<V> valueKey = Key.get(valueType, new RealElement(entrySetBinder.getSetName()));
+ entrySetBinder.addBinding().toInstance(new MapEntry<K, Provider<V>>(key,
+ binder.getProvider(valueKey)));
+ return binder.bind(valueKey);
+ }
+
+ public void configure(Binder binder) {
+ Multibinder.checkConfiguration(!isInitialized(), "MapBinder was already initialized");
+
+ final ImmutableSet<Dependency<?>> dependencies
+ = ImmutableSet.<Dependency<?>>of(Dependency.get(entrySetBinder.getSetKey()));
+
+ // binds a Map<K, Provider<V>> from a collection of Map<Entry<K, Provider<V>>
+ final Provider<Set<Entry<K, Provider<V>>>> entrySetProvider = binder
+ .getProvider(entrySetBinder.getSetKey());
+ binder.bind(providerMapKey).toProvider(new ProviderWithDependencies<Map<K, Provider<V>>>() {
+ private Map<K, Provider<V>> providerMap;
+
+ @SuppressWarnings("unused")
+ @Inject
+ void initialize() {
+ RealMapBinder.this.binder = null;
+
+ Map<K, Provider<V>> providerMapMutable = new LinkedHashMap<K, Provider<V>>();
+ for (Entry<K, Provider<V>> entry : entrySetProvider.get()) {
+ Multibinder.checkConfiguration(providerMapMutable.put(entry.getKey(), entry.getValue()) == null,
+ "Map injection failed due to duplicated key \"%s\"", entry.getKey());
+ }
+
+ providerMap = Collections.unmodifiableMap(providerMapMutable);
+ }
+
+ public Map<K, Provider<V>> get() {
+ return providerMap;
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ return dependencies;
+ }
+ });
+
+ final Provider<Map<K, Provider<V>>> mapProvider = binder.getProvider(providerMapKey);
+ binder.bind(mapKey).toProvider(new ProviderWithDependencies<Map<K, V>>() {
+ public Map<K, V> get() {
+ Map<K, V> map = new LinkedHashMap<K, V>();
+ for (Entry<K, Provider<V>> entry : mapProvider.get().entrySet()) {
+ V value = entry.getValue().get();
+ K key = entry.getKey();
+ Multibinder.checkConfiguration(value != null,
+ "Map injection failed due to null value for key \"%s\"", key);
+ map.put(key, value);
+ }
+ return Collections.unmodifiableMap(map);
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ return dependencies;
+ }
+ });
+ }
+
+ private boolean isInitialized() {
+ return binder == null;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof RealMapBinder
+ && ((RealMapBinder<?, ?>) o).mapKey.equals(mapKey);
+ }
+
+ @Override
+ public int hashCode() {
+ return mapKey.hashCode();
+ }
+
+ private static final class MapEntry<K, V> implements Map.Entry<K, V> {
+ private final K key;
+ private final V value;
+
+ private MapEntry(K key, V value) {
+ this.key = key;
+ this.value = value;
+ }
+
+ public K getKey() {
+ return key;
+ }
+
+ public V getValue() {
+ return value;
+ }
+
+ public V setValue(V value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof Map.Entry
+ && key.equals(((Map.Entry<?, ?>) obj).getKey())
+ && value.equals(((Map.Entry<?, ?>) obj).getValue());
+ }
+
+ @Override
+ public int hashCode() {
+ return 127 * ("key".hashCode() ^ key.hashCode())
+ + 127 * ("value".hashCode() ^ value.hashCode());
+ }
+
+ @Override
+ public String toString() {
+ return "MapEntry(" + key + ", " + value + ")";
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java b/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java
new file mode 100644
index 0000000..3dfd108
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java
@@ -0,0 +1,322 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.multibindings;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.binder.LinkedBindingBuilder;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.spi.Dependency;
+import org.elasticsearch.common.inject.spi.HasDependencies;
+import org.elasticsearch.common.inject.spi.Message;
+import org.elasticsearch.common.inject.util.Types;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import java.util.Collections;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * An API to bind multiple values separately, only to later inject them as a
+ * complete collection. Multibinder is intended for use in your application's
+ * module:
+ * <pre><code>
+ * public class SnacksModule extends AbstractModule {
+ * protected void configure() {
+ * Multibinder&lt;Snack&gt; multibinder
+ * = Multibinder.newSetBinder(binder(), Snack.class);
+ * multibinder.addBinding().toInstance(new Twix());
+ * multibinder.addBinding().toProvider(SnickersProvider.class);
+ * multibinder.addBinding().to(Skittles.class);
+ * }
+ * }</code></pre>
+ * <p/>
+ * <p>With this binding, a {@link Set}{@code <Snack>} can now be injected:
+ * <pre><code>
+ * class SnackMachine {
+ * {@literal @}Inject
+ * public SnackMachine(Set&lt;Snack&gt; snacks) { ... }
+ * }</code></pre>
+ * <p/>
+ * <p>Create multibindings from different modules is supported. For example, it
+ * is okay to have both {@code CandyModule} and {@code ChipsModule} to both
+ * create their own {@code Multibinder<Snack>}, and to each contribute bindings
+ * to the set of snacks. When that set is injected, it will contain elements
+ * from both modules.
+ * <p/>
+ * <p>Elements are resolved at set injection time. If an element is bound to a
+ * provider, that provider's get method will be called each time the set is
+ * injected (unless the binding is also scoped).
+ * <p/>
+ * <p>Annotations are be used to create different sets of the same element
+ * type. Each distinct annotation gets its own independent collection of
+ * elements.
+ * <p/>
+ * <p><strong>Elements must be distinct.</strong> If multiple bound elements
+ * have the same value, set injection will fail.
+ * <p/>
+ * <p><strong>Elements must be non-null.</strong> If any set element is null,
+ * set injection will fail.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+public abstract class Multibinder<T> {
+ private Multibinder() {
+ }
+
+ /**
+ * Returns a new multibinder that collects instances of {@code type} in a {@link Set} that is
+ * itself bound with no binding annotation.
+ */
+ public static <T> Multibinder<T> newSetBinder(Binder binder, TypeLiteral<T> type) {
+ binder = binder.skipSources(RealMultibinder.class, Multibinder.class);
+ RealMultibinder<T> result = new RealMultibinder<T>(binder, type, "",
+ Key.get(Multibinder.<T>setOf(type)));
+ binder.install(result);
+ return result;
+ }
+
+ /**
+ * Returns a new multibinder that collects instances of {@code type} in a {@link Set} that is
+ * itself bound with no binding annotation.
+ */
+ public static <T> Multibinder<T> newSetBinder(Binder binder, Class<T> type) {
+ return newSetBinder(binder, TypeLiteral.get(type));
+ }
+
+ /**
+ * Returns a new multibinder that collects instances of {@code type} in a {@link Set} that is
+ * itself bound with {@code annotation}.
+ */
+ public static <T> Multibinder<T> newSetBinder(
+ Binder binder, TypeLiteral<T> type, Annotation annotation) {
+ binder = binder.skipSources(RealMultibinder.class, Multibinder.class);
+ RealMultibinder<T> result = new RealMultibinder<T>(binder, type, annotation.toString(),
+ Key.get(Multibinder.<T>setOf(type), annotation));
+ binder.install(result);
+ return result;
+ }
+
+ /**
+ * Returns a new multibinder that collects instances of {@code type} in a {@link Set} that is
+ * itself bound with {@code annotation}.
+ */
+ public static <T> Multibinder<T> newSetBinder(
+ Binder binder, Class<T> type, Annotation annotation) {
+ return newSetBinder(binder, TypeLiteral.get(type), annotation);
+ }
+
+ /**
+ * Returns a new multibinder that collects instances of {@code type} in a {@link Set} that is
+ * itself bound with {@code annotationType}.
+ */
+ public static <T> Multibinder<T> newSetBinder(Binder binder, TypeLiteral<T> type,
+ Class<? extends Annotation> annotationType) {
+ binder = binder.skipSources(RealMultibinder.class, Multibinder.class);
+ RealMultibinder<T> result = new RealMultibinder<T>(binder, type, "@" + annotationType.getName(),
+ Key.get(Multibinder.<T>setOf(type), annotationType));
+ binder.install(result);
+ return result;
+ }
+
+ /**
+ * Returns a new multibinder that collects instances of {@code type} in a {@link Set} that is
+ * itself bound with {@code annotationType}.
+ */
+ public static <T> Multibinder<T> newSetBinder(Binder binder, Class<T> type,
+ Class<? extends Annotation> annotationType) {
+ return newSetBinder(binder, TypeLiteral.get(type), annotationType);
+ }
+
+ @SuppressWarnings("unchecked") // wrapping a T in a Set safely returns a Set<T>
+ private static <T> TypeLiteral<Set<T>> setOf(TypeLiteral<T> elementType) {
+ Type type = Types.setOf(elementType.getType());
+ return (TypeLiteral<Set<T>>) TypeLiteral.get(type);
+ }
+
+ /**
+ * Returns a binding builder used to add a new element in the set. Each
+ * bound element must have a distinct value. Bound providers will be
+ * evaluated each time the set is injected.
+ * <p/>
+ * <p>It is an error to call this method without also calling one of the
+ * {@code to} methods on the returned binding builder.
+ * <p/>
+ * <p>Scoping elements independently is supported. Use the {@code in} method
+ * to specify a binding scope.
+ */
+ public abstract LinkedBindingBuilder<T> addBinding();
+
+ /**
+ * The actual multibinder plays several roles:
+ * <p/>
+ * <p>As a Multibinder, it acts as a factory for LinkedBindingBuilders for
+ * each of the set's elements. Each binding is given an annotation that
+ * identifies it as a part of this set.
+ * <p/>
+ * <p>As a Module, it installs the binding to the set itself. As a module,
+ * this implements equals() and hashcode() in order to trick Guice into
+ * executing its configure() method only once. That makes it so that
+ * multiple multibinders can be created for the same target collection, but
+ * only one is bound. Since the list of bindings is retrieved from the
+ * injector itself (and not the multibinder), each multibinder has access to
+ * all contributions from all multibinders.
+ * <p/>
+ * <p>As a Provider, this constructs the set instances.
+ * <p/>
+ * <p>We use a subclass to hide 'implements Module, Provider' from the public
+ * API.
+ */
+ static final class RealMultibinder<T> extends Multibinder<T>
+ implements Module, Provider<Set<T>>, HasDependencies {
+
+ private final TypeLiteral<T> elementType;
+ private final String setName;
+ private final Key<Set<T>> setKey;
+
+ /* the target injector's binder. non-null until initialization, null afterwards */
+ private Binder binder;
+
+ /* a provider for each element in the set. null until initialization, non-null afterwards */
+ private List<Provider<T>> providers;
+ private Set<Dependency<?>> dependencies;
+
+ private RealMultibinder(Binder binder, TypeLiteral<T> elementType,
+ String setName, Key<Set<T>> setKey) {
+ this.binder = checkNotNull(binder, "binder");
+ this.elementType = checkNotNull(elementType, "elementType");
+ this.setName = checkNotNull(setName, "setName");
+ this.setKey = checkNotNull(setKey, "setKey");
+ }
+
+ @SuppressWarnings("unchecked")
+ public void configure(Binder binder) {
+ checkConfiguration(!isInitialized(), "Multibinder was already initialized");
+
+ binder.bind(setKey).toProvider(this);
+ }
+
+ @Override
+ public LinkedBindingBuilder<T> addBinding() {
+ checkConfiguration(!isInitialized(), "Multibinder was already initialized");
+
+ return binder.bind(Key.get(elementType, new RealElement(setName)));
+ }
+
+ /**
+ * Invoked by Guice at Injector-creation time to prepare providers for each
+ * element in this set. At this time the set's size is known, but its
+ * contents are only evaluated when get() is invoked.
+ */
+ @Inject
+ void initialize(Injector injector) {
+ providers = Lists.newArrayList();
+ List<Dependency<?>> dependencies = Lists.newArrayList();
+ for (Binding<?> entry : injector.findBindingsByType(elementType)) {
+
+ if (keyMatches(entry.getKey())) {
+ @SuppressWarnings("unchecked") // protected by findBindingsByType()
+ Binding<T> binding = (Binding<T>) entry;
+ providers.add(binding.getProvider());
+ dependencies.add(Dependency.get(binding.getKey()));
+ }
+ }
+
+ this.dependencies = ImmutableSet.copyOf(dependencies);
+ this.binder = null;
+ }
+
+ private boolean keyMatches(Key<?> key) {
+ return key.getTypeLiteral().equals(elementType)
+ && key.getAnnotation() instanceof Element
+ && ((Element) key.getAnnotation()).setName().equals(setName);
+ }
+
+ private boolean isInitialized() {
+ return binder == null;
+ }
+
+ public Set<T> get() {
+ checkConfiguration(isInitialized(), "Multibinder is not initialized");
+
+ Set<T> result = new LinkedHashSet<T>();
+ for (Provider<T> provider : providers) {
+ final T newValue = provider.get();
+ checkConfiguration(newValue != null, "Set injection failed due to null element");
+ checkConfiguration(result.add(newValue),
+ "Set injection failed due to duplicated element \"%s\"", newValue);
+ }
+ return Collections.unmodifiableSet(result);
+ }
+
+ String getSetName() {
+ return setName;
+ }
+
+ Key<Set<T>> getSetKey() {
+ return setKey;
+ }
+
+ public Set<Dependency<?>> getDependencies() {
+ return dependencies;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof RealMultibinder
+ && ((RealMultibinder<?>) o).setKey.equals(setKey);
+ }
+
+ @Override
+ public int hashCode() {
+ return setKey.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return new StringBuilder()
+ .append(setName)
+ .append(setName.length() > 0 ? " " : "")
+ .append("Multibinder<")
+ .append(elementType)
+ .append(">")
+ .toString();
+ }
+ }
+
+ static void checkConfiguration(boolean condition, String format, Object... args) {
+ if (condition) {
+ return;
+ }
+
+ throw new ConfigurationException(ImmutableSet.of(new Message(Errors.format(format, args))));
+ }
+
+ static <T> T checkNotNull(T reference, String name) {
+ if (reference != null) {
+ return reference;
+ }
+
+ NullPointerException npe = new NullPointerException(name);
+ throw new ConfigurationException(ImmutableSet.of(
+ new Message(ImmutableList.of(), npe.toString(), npe)));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/multibindings/RealElement.java b/src/main/java/org/elasticsearch/common/inject/multibindings/RealElement.java
new file mode 100644
index 0000000..eb6fdf5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/multibindings/RealElement.java
@@ -0,0 +1,66 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.multibindings;
+
+import java.lang.annotation.Annotation;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * @author jessewilson@google.com (Jesse Wilson)
+ */
+class RealElement implements Element {
+ private static final AtomicInteger nextUniqueId = new AtomicInteger(1);
+
+ private final int uniqueId;
+ private final String setName;
+
+ RealElement(String setName) {
+ uniqueId = nextUniqueId.getAndIncrement();
+ this.setName = setName;
+ }
+
+ public String setName() {
+ return setName;
+ }
+
+ public int uniqueId() {
+ return uniqueId;
+ }
+
+ public Class<? extends Annotation> annotationType() {
+ return Element.class;
+ }
+
+ @Override
+ public String toString() {
+ return "@" + Element.class.getName() + "(setName=" + setName
+ + ",uniqueId=" + uniqueId + ")";
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof Element
+ && ((Element) o).setName().equals(setName())
+ && ((Element) o).uniqueId() == uniqueId();
+ }
+
+ @Override
+ public int hashCode() {
+ return 127 * ("setName".hashCode() ^ setName.hashCode())
+ + 127 * ("uniqueId".hashCode() ^ uniqueId);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/multibindings/package-info.java b/src/main/java/org/elasticsearch/common/inject/multibindings/package-info.java
new file mode 100644
index 0000000..3834112
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/multibindings/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Extension for binding multiple instances in a collection; this extension requires {@code
+ * guice-multibindings-2.0.jar}.
+ */
+package org.elasticsearch.common.inject.multibindings; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/name/Named.java b/src/main/java/org/elasticsearch/common/inject/name/Named.java
new file mode 100644
index 0000000..0d914be
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/name/Named.java
@@ -0,0 +1,37 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.name;
+
+import org.elasticsearch.common.inject.BindingAnnotation;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ * Annotates named things.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+@Retention(RUNTIME)
+@Target({ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
+@BindingAnnotation
+public @interface Named {
+ String value();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/name/NamedImpl.java b/src/main/java/org/elasticsearch/common/inject/name/NamedImpl.java
new file mode 100644
index 0000000..4961fe3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/name/NamedImpl.java
@@ -0,0 +1,59 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.name;
+
+import java.io.Serializable;
+import java.lang.annotation.Annotation;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+class NamedImpl implements Named, Serializable {
+
+ private final String value;
+
+ public NamedImpl(String value) {
+ this.value = checkNotNull(value, "name");
+ }
+
+ public String value() {
+ return this.value;
+ }
+
+ public int hashCode() {
+ // This is specified in java.lang.Annotation.
+ return (127 * "value".hashCode()) ^ value.hashCode();
+ }
+
+ public boolean equals(Object o) {
+ if (!(o instanceof Named)) {
+ return false;
+ }
+
+ Named other = (Named) o;
+ return value.equals(other.value());
+ }
+
+ public String toString() {
+ return "@" + Named.class.getName() + "(value=" + value + ")";
+ }
+
+ public Class<? extends Annotation> annotationType() {
+ return Named.class;
+ }
+
+ private static final long serialVersionUID = 0;
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/name/Names.java b/src/main/java/org/elasticsearch/common/inject/name/Names.java
new file mode 100644
index 0000000..7f3636f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/name/Names.java
@@ -0,0 +1,71 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.name;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Key;
+
+import java.util.Enumeration;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Utility methods for use with {@code @}{@link Named}.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public class Names {
+
+ private Names() {
+ }
+
+ /**
+ * Creates a {@link Named} annotation with {@code name} as the value.
+ */
+ public static Named named(String name) {
+ return new NamedImpl(name);
+ }
+
+ /**
+ * Creates a constant binding to {@code @Named(key)} for each entry in
+ * {@code properties}.
+ */
+ public static void bindProperties(Binder binder, Map<String, String> properties) {
+ binder = binder.skipSources(Names.class);
+ for (Map.Entry<String, String> entry : properties.entrySet()) {
+ String key = entry.getKey();
+ String value = entry.getValue();
+ binder.bind(Key.get(String.class, new NamedImpl(key))).toInstance(value);
+ }
+ }
+
+ /**
+ * Creates a constant binding to {@code @Named(key)} for each property. This
+ * method binds all properties including those inherited from
+ * {@link Properties#defaults defaults}.
+ */
+ public static void bindProperties(Binder binder, Properties properties) {
+ binder = binder.skipSources(Names.class);
+
+ // use enumeration to include the default properties
+ for (Enumeration<?> e = properties.propertyNames(); e.hasMoreElements(); ) {
+ String propertyName = (String) e.nextElement();
+ String value = properties.getProperty(propertyName);
+ binder.bind(Key.get(String.class, new NamedImpl(propertyName))).toInstance(value);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/name/package-info.java b/src/main/java/org/elasticsearch/common/inject/name/package-info.java
new file mode 100644
index 0000000..b785fb6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/name/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Support for binding to string-based names.
+ */
+package org.elasticsearch.common.inject.name; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/package-info.java b/src/main/java/org/elasticsearch/common/inject/package-info.java
new file mode 100644
index 0000000..44ff5b2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/package-info.java
@@ -0,0 +1,47 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <i>Google Guice</i> (pronounced "juice") is an ultra-lightweight dependency
+ * injection framework. Please refer to the Guice
+ * <a href="http://docs.google.com/Doc?id=dd2fhx4z_5df5hw8">User's Guide</a>
+ * for a gentle introduction.
+ *
+ * <p>The principal public APIs in this package are:
+ *
+ * <dl>
+ * <dt>{@link org.elasticsearch.common.inject.Inject}
+ * <dd>The annotation you will use in your implementation classes to tell Guice
+ * where and how it should send in ("inject") the objects you depend on
+ * (your "dependencies").
+ *
+ * <dt>{@link org.elasticsearch.common.inject.Module}
+ * <dd>The interface you will implement in order to specify "bindings" --
+ * instructions for how Guice should handle injection -- for a particular
+ * set of interfaces.
+ *
+ * <dt>{@link org.elasticsearch.common.inject.Binder}
+ * <dd>The object that Guice passes into your {@link org.elasticsearch.common.inject.Module}
+ * to collect these bindings.
+ *
+ * <dt>{@link org.elasticsearch.common.inject.Provider}
+ * <dd>The interface you will implement when you need to customize exactly how
+ * Guice creates instances for a particular binding.
+ *
+ * </dl>
+ *
+ */
+package org.elasticsearch.common.inject; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/BindingScopingVisitor.java b/src/main/java/org/elasticsearch/common/inject/spi/BindingScopingVisitor.java
new file mode 100644
index 0000000..fb2f4ff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/BindingScopingVisitor.java
@@ -0,0 +1,57 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Scope;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * Visits each of the strategies used to scope an injection.
+ *
+ * @param <V> any type to be returned by the visit method. Use {@link Void} with
+ * {@code return null} if no return type is needed.
+ * @since 2.0
+ */
+public interface BindingScopingVisitor<V> {
+
+ /**
+ * Visit an eager singleton or single instance. This scope strategy is found on both module and
+ * injector bindings.
+ */
+ V visitEagerSingleton();
+
+ /**
+ * Visit a scope instance. This scope strategy is found on both module and injector bindings.
+ */
+ V visitScope(Scope scope);
+
+ /**
+ * Visit a scope annotation. This scope strategy is found only on module bindings. The instance
+ * that implements this scope is registered by {@link org.elasticsearch.common.inject.Binder#bindScope(Class,
+ * Scope) Binder.bindScope()}.
+ */
+ V visitScopeAnnotation(Class<? extends Annotation> scopeAnnotation);
+
+ /**
+ * Visit an unspecified or unscoped strategy. On a module, this strategy indicates that the
+ * injector should use scoping annotations to find a scope. On an injector, it indicates that
+ * no scope is applied to the binding. An unscoped binding will behave like a scoped one when it
+ * is linked to a scoped binding.
+ */
+ V visitNoScoping();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/BindingTargetVisitor.java b/src/main/java/org/elasticsearch/common/inject/spi/BindingTargetVisitor.java
new file mode 100644
index 0000000..b136115
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/BindingTargetVisitor.java
@@ -0,0 +1,83 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+/**
+ * Visits each of the strategies used to find an instance to satisfy an injection.
+ *
+ * @param <V> any type to be returned by the visit method. Use {@link Void} with
+ * {@code return null} if no return type is needed.
+ * @since 2.0
+ */
+public interface BindingTargetVisitor<T, V> {
+
+ /**
+ * Visit a instance binding. The same instance is returned for every injection. This target is
+ * found in both module and injector bindings.
+ */
+ V visit(InstanceBinding<? extends T> binding);
+
+ /**
+ * Visit a provider instance binding. The provider's {@code get} method is invoked to resolve
+ * injections. This target is found in both module and injector bindings.
+ */
+ V visit(ProviderInstanceBinding<? extends T> binding);
+
+ /**
+ * Visit a provider key binding. To resolve injections, the provider key is first resolved, then
+ * that provider's {@code get} method is invoked. This target is found in both module and injector
+ * bindings.
+ */
+ V visit(ProviderKeyBinding<? extends T> binding);
+
+ /**
+ * Visit a linked key binding. The other key's binding is used to resolve injections. This
+ * target is found in both module and injector bindings.
+ */
+ V visit(LinkedKeyBinding<? extends T> binding);
+
+ /**
+ * Visit a binding to a key exposed from an enclosed private environment. This target is only
+ * found in injector bindings.
+ */
+ V visit(ExposedBinding<? extends T> binding);
+
+ /**
+ * Visit an untargetted binding. This target is found only on module bindings. It indicates
+ * that the injector should use its implicit binding strategies to resolve injections.
+ */
+ V visit(UntargettedBinding<? extends T> binding);
+
+ /**
+ * Visit a constructor binding. To resolve injections, an instance is instantiated by invoking
+ * {@code constructor}. This target is found only on injector bindings.
+ */
+ V visit(ConstructorBinding<? extends T> binding);
+
+ /**
+ * Visit a binding created from converting a bound instance to a new type. The source binding
+ * has the same binding annotation but a different type. This target is found only on injector
+ * bindings.
+ */
+ V visit(ConvertedConstantBinding<? extends T> binding);
+
+ /**
+ * Visit a binding to a {@link org.elasticsearch.common.inject.Provider} that delegates to the binding for the
+ * provided type. This target is found only on injector bindings.
+ */
+ V visit(ProviderBinding<? extends T> binding);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ConstructorBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/ConstructorBinding.java
new file mode 100644
index 0000000..df464fa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ConstructorBinding.java
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+
+import java.util.Set;
+
+/**
+ * A binding to the constructor of a concrete clss. To resolve injections, an instance is
+ * instantiated by invoking the constructor.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface ConstructorBinding<T> extends Binding<T>, HasDependencies {
+
+ /**
+ * Gets the constructor this binding injects.
+ */
+ InjectionPoint getConstructor();
+
+ /**
+ * Returns all instance method and field injection points on {@code type}.
+ *
+ * @return a possibly empty set of injection points. The set has a specified iteration order. All
+ * fields are returned and then all methods. Within the fields, supertype fields are returned
+ * before subtype fields. Similarly, supertype methods are returned before subtype methods.
+ */
+ Set<InjectionPoint> getInjectableMembers();
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ConvertedConstantBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/ConvertedConstantBinding.java
new file mode 100644
index 0000000..cede704
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ConvertedConstantBinding.java
@@ -0,0 +1,48 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+import org.elasticsearch.common.inject.Key;
+
+import java.util.Set;
+
+/**
+ * A binding created from converting a bound instance to a new type. The source binding has the same
+ * binding annotation but a different type.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface ConvertedConstantBinding<T> extends Binding<T>, HasDependencies {
+
+ /**
+ * Returns the converted value.
+ */
+ T getValue();
+
+ /**
+ * Returns the key for the source binding. That binding can e retrieved from an injector using
+ * {@link org.elasticsearch.common.inject.Injector#getBinding(Key) Injector.getBinding(key)}.
+ */
+ Key<String> getSourceKey();
+
+ /**
+ * Returns a singleton set containing only the converted key.
+ */
+ Set<Dependency<?>> getDependencies();
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingScopingVisitor.java b/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingScopingVisitor.java
new file mode 100644
index 0000000..bc22d60
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingScopingVisitor.java
@@ -0,0 +1,56 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Scope;
+
+import java.lang.annotation.Annotation;
+
+/**
+ * No-op visitor for subclassing. All interface methods simply delegate to
+ * {@link #visitOther()}, returning its result.
+ *
+ * @param <V> any type to be returned by the visit method. Use {@link Void} with
+ * {@code return null} if no return type is needed.
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public class DefaultBindingScopingVisitor<V> implements BindingScopingVisitor<V> {
+
+ /**
+ * Default visit implementation. Returns {@code null}.
+ */
+ protected V visitOther() {
+ return null;
+ }
+
+ public V visitEagerSingleton() {
+ return visitOther();
+ }
+
+ public V visitScope(Scope scope) {
+ return visitOther();
+ }
+
+ public V visitScopeAnnotation(Class<? extends Annotation> scopeAnnotation) {
+ return visitOther();
+ }
+
+ public V visitNoScoping() {
+ return visitOther();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingTargetVisitor.java b/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingTargetVisitor.java
new file mode 100644
index 0000000..d730357
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingTargetVisitor.java
@@ -0,0 +1,76 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+
+/**
+ * No-op visitor for subclassing. All interface methods simply delegate to {@link
+ * #visitOther(Binding)}, returning its result.
+ *
+ * @param <V> any type to be returned by the visit method. Use {@link Void} with
+ * {@code return null} if no return type is needed.
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public abstract class DefaultBindingTargetVisitor<T, V> implements BindingTargetVisitor<T, V> {
+
+ /**
+ * Default visit implementation. Returns {@code null}.
+ */
+ protected V visitOther(Binding<? extends T> binding) {
+ return null;
+ }
+
+ public V visit(InstanceBinding<? extends T> instanceBinding) {
+ return visitOther(instanceBinding);
+ }
+
+ public V visit(ProviderInstanceBinding<? extends T> providerInstanceBinding) {
+ return visitOther(providerInstanceBinding);
+ }
+
+ public V visit(ProviderKeyBinding<? extends T> providerKeyBinding) {
+ return visitOther(providerKeyBinding);
+ }
+
+ public V visit(LinkedKeyBinding<? extends T> linkedKeyBinding) {
+ return visitOther(linkedKeyBinding);
+ }
+
+ public V visit(ExposedBinding<? extends T> exposedBinding) {
+ return visitOther(exposedBinding);
+ }
+
+ public V visit(UntargettedBinding<? extends T> untargettedBinding) {
+ return visitOther(untargettedBinding);
+ }
+
+ public V visit(ConstructorBinding<? extends T> constructorBinding) {
+ return visitOther(constructorBinding);
+ }
+
+ public V visit(ConvertedConstantBinding<? extends T> convertedConstantBinding) {
+ return visitOther(convertedConstantBinding);
+ }
+
+ // javac says it's an error to cast ProviderBinding<? extends T> to Binding<? extends T>
+ @SuppressWarnings("unchecked")
+ public V visit(ProviderBinding<? extends T> providerBinding) {
+ return visitOther((Binding<? extends T>) providerBinding);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/DefaultElementVisitor.java b/src/main/java/org/elasticsearch/common/inject/spi/DefaultElementVisitor.java
new file mode 100644
index 0000000..4ba0b75
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/DefaultElementVisitor.java
@@ -0,0 +1,78 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+
+/**
+ * No-op visitor for subclassing. All interface methods simply delegate to
+ * {@link #visitOther(Element)}, returning its result.
+ *
+ * @param <V> any type to be returned by the visit method. Use {@link Void} with
+ * {@code return null} if no return type is needed.
+ * @author sberlin@gmail.com (Sam Berlin)
+ * @since 2.0
+ */
+public abstract class DefaultElementVisitor<V> implements ElementVisitor<V> {
+
+ /**
+ * Default visit implementation. Returns {@code null}.
+ */
+ protected V visitOther(Element element) {
+ return null;
+ }
+
+ public V visit(Message message) {
+ return visitOther(message);
+ }
+
+ public <T> V visit(Binding<T> binding) {
+ return visitOther(binding);
+ }
+
+ public V visit(ScopeBinding scopeBinding) {
+ return visitOther(scopeBinding);
+ }
+
+ public V visit(TypeConverterBinding typeConverterBinding) {
+ return visitOther(typeConverterBinding);
+ }
+
+ public <T> V visit(ProviderLookup<T> providerLookup) {
+ return visitOther(providerLookup);
+ }
+
+ public V visit(InjectionRequest injectionRequest) {
+ return visitOther(injectionRequest);
+ }
+
+ public V visit(StaticInjectionRequest staticInjectionRequest) {
+ return visitOther(staticInjectionRequest);
+ }
+
+ public V visit(PrivateElements privateElements) {
+ return visitOther(privateElements);
+ }
+
+ public <T> V visit(MembersInjectorLookup<T> lookup) {
+ return visitOther(lookup);
+ }
+
+ public V visit(TypeListenerBinding binding) {
+ return visitOther(binding);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/Dependency.java b/src/main/java/org/elasticsearch/common/inject/spi/Dependency.java
new file mode 100644
index 0000000..50e3979
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/Dependency.java
@@ -0,0 +1,130 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.Key;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * A variable that can be resolved by an injector.
+ * <p/>
+ * <p>Use {@link #get} to build a freestanding dependency, or {@link InjectionPoint} to build one
+ * that's attached to a constructor, method or field.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class Dependency<T> {
+ private final InjectionPoint injectionPoint;
+ private final Key<T> key;
+ private final boolean nullable;
+ private final int parameterIndex;
+
+ Dependency(InjectionPoint injectionPoint, Key<T> key,
+ boolean nullable, int parameterIndex) {
+ this.injectionPoint = injectionPoint;
+ this.key = key;
+ this.nullable = nullable;
+ this.parameterIndex = parameterIndex;
+ }
+
+ /**
+ * Returns a new dependency that is not attached to an injection point. The returned dependency is
+ * nullable.
+ */
+ public static <T> Dependency<T> get(Key<T> key) {
+ return new Dependency<T>(null, key, true, -1);
+ }
+
+ /**
+ * Returns the dependencies from the given injection points.
+ */
+ public static Set<Dependency<?>> forInjectionPoints(Set<InjectionPoint> injectionPoints) {
+ List<Dependency<?>> dependencies = Lists.newArrayList();
+ for (InjectionPoint injectionPoint : injectionPoints) {
+ dependencies.addAll(injectionPoint.getDependencies());
+ }
+ return ImmutableSet.copyOf(dependencies);
+ }
+
+ /**
+ * Returns the key to the binding that satisfies this dependency.
+ */
+ public Key<T> getKey() {
+ return this.key;
+ }
+
+ /**
+ * Returns true if null is a legal value for this dependency.
+ */
+ public boolean isNullable() {
+ return nullable;
+ }
+
+ /**
+ * Returns the injection point to which this dependency belongs, or null if this dependency isn't
+ * attached to a particular injection point.
+ */
+ public InjectionPoint getInjectionPoint() {
+ return injectionPoint;
+ }
+
+ /**
+ * Returns the index of this dependency in the injection point's parameter list, or {@code -1} if
+ * this dependency does not belong to a parameter list. Only method and constuctor dependencies
+ * are elements in a parameter list.
+ */
+ public int getParameterIndex() {
+ return parameterIndex;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(injectionPoint, parameterIndex, key);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof Dependency) {
+ Dependency dependency = (Dependency) o;
+ return Objects.equal(injectionPoint, dependency.injectionPoint)
+ && Objects.equal(parameterIndex, dependency.parameterIndex)
+ && Objects.equal(key, dependency.key);
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append(key);
+ if (injectionPoint != null) {
+ builder.append("@").append(injectionPoint);
+ if (parameterIndex != -1) {
+ builder.append("[").append(parameterIndex).append("]");
+ }
+ }
+ return builder.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/Element.java b/src/main/java/org/elasticsearch/common/inject/spi/Element.java
new file mode 100644
index 0000000..a222c37
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/Element.java
@@ -0,0 +1,64 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binder;
+
+/**
+ * A core component of a module or injector.
+ * <p/>
+ * <p>The elements of a module can be inspected, validated and rewritten. Use {@link
+ * Elements#getElements(org.elasticsearch.common.inject.Module[]) Elements.getElements()} to read the elements
+ * from a module, and {@link Elements#getModule(Iterable) Elements.getModule()} to rewrite them.
+ * This can be used for static analysis and generation of Guice modules.
+ * <p/>
+ * <p>The elements of an injector can be inspected and exercised. Use {@link
+ * org.elasticsearch.common.inject.Injector#getBindings Injector.getBindings()} to reflect on Guice injectors.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @author crazybob@google.com (Bob Lee)
+ * @since 2.0
+ */
+public interface Element {
+
+ /**
+ * Returns an arbitrary object containing information about the "place" where this element was
+ * configured. Used by Guice in the production of descriptive error messages.
+ * <p/>
+ * <p>Tools might specially handle types they know about; {@code StackTraceElement} is a good
+ * example. Tools should simply call {@code toString()} on the source object if the type is
+ * unfamiliar.
+ */
+ Object getSource();
+
+ /**
+ * Accepts an element visitor. Invokes the visitor method specific to this element's type.
+ *
+ * @param visitor to call back on
+ */
+ <T> T acceptVisitor(ElementVisitor<T> visitor);
+
+ /**
+ * Writes this module element to the given binder (optional operation).
+ *
+ * @param binder to apply configuration element to
+ * @throws UnsupportedOperationException if the {@code applyTo} method is not supported by this
+ * element.
+ */
+ void applyTo(Binder binder);
+
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ElementVisitor.java b/src/main/java/org/elasticsearch/common/inject/spi/ElementVisitor.java
new file mode 100644
index 0000000..8998779
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ElementVisitor.java
@@ -0,0 +1,81 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+
+/**
+ * Visit elements.
+ *
+ * @param <V> any type to be returned by the visit method. Use {@link Void} with
+ * {@code return null} if no return type is needed.
+ * @since 2.0
+ */
+public interface ElementVisitor<V> {
+
+ /**
+ * Visit a mapping from a key (type and optional annotation) to the strategy for getting
+ * instances of the type.
+ */
+ <T> V visit(Binding<T> binding);
+
+ /**
+ * Visit a registration of a scope annotation with the scope that implements it.
+ */
+ V visit(ScopeBinding binding);
+
+ /**
+ * Visit a registration of type converters for matching target types.
+ */
+ V visit(TypeConverterBinding binding);
+
+ /**
+ * Visit a request to inject the instance fields and methods of an instance.
+ */
+ V visit(InjectionRequest request);
+
+ /**
+ * Visit a request to inject the static fields and methods of type.
+ */
+ V visit(StaticInjectionRequest request);
+
+ /**
+ * Visit a lookup of the provider for a type.
+ */
+ <T> V visit(ProviderLookup<T> lookup);
+
+ /**
+ * Visit a lookup of the members injector.
+ */
+ <T> V visit(MembersInjectorLookup<T> lookup);
+
+ /**
+ * Visit an error message and the context in which it occured.
+ */
+ V visit(Message message);
+
+ /**
+ * Visit a collection of configuration elements for a {@linkplain org.elasticsearch.common.inject.PrivateBinder
+ * private binder}.
+ */
+ V visit(PrivateElements elements);
+
+ /**
+ * Visit an injectable type listener binding.
+ */
+ V visit(TypeListenerBinding binding);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/src/main/java/org/elasticsearch/common/inject/spi/Elements.java
new file mode 100644
index 0000000..d9b9c06
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/Elements.java
@@ -0,0 +1,334 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import org.elasticsearch.bootstrap.Bootstrap;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.binder.AnnotatedBindingBuilder;
+import org.elasticsearch.common.inject.binder.AnnotatedConstantBindingBuilder;
+import org.elasticsearch.common.inject.binder.AnnotatedElementBuilder;
+import org.elasticsearch.common.inject.internal.*;
+import org.elasticsearch.common.inject.matcher.Matcher;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.lang.annotation.Annotation;
+import java.util.*;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * Exposes elements of a module so they can be inspected, validated or {@link
+ * Element#applyTo(Binder) rewritten}.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class Elements {
+ private static final BindingTargetVisitor<Object, Object> GET_INSTANCE_VISITOR
+ = new DefaultBindingTargetVisitor<Object, Object>() {
+ @Override
+ public Object visit(InstanceBinding<?> binding) {
+ return binding.getInstance();
+ }
+
+ @Override
+ protected Object visitOther(Binding<?> binding) {
+ throw new IllegalArgumentException();
+ }
+ };
+
+ /**
+ * Records the elements executed by {@code modules}.
+ */
+ public static List<Element> getElements(Module... modules) {
+ return getElements(Stage.DEVELOPMENT, Arrays.asList(modules));
+ }
+
+ /**
+ * Records the elements executed by {@code modules}.
+ */
+ public static List<Element> getElements(Stage stage, Module... modules) {
+ return getElements(stage, Arrays.asList(modules));
+ }
+
+ /**
+ * Records the elements executed by {@code modules}.
+ */
+ public static List<Element> getElements(Iterable<? extends Module> modules) {
+ return getElements(Stage.DEVELOPMENT, modules);
+ }
+
+ /**
+ * Records the elements executed by {@code modules}.
+ */
+ public static List<Element> getElements(Stage stage, Iterable<? extends Module> modules) {
+ RecordingBinder binder = new RecordingBinder(stage);
+ for (Module module : modules) {
+ binder.install(module);
+ }
+ return Collections.unmodifiableList(binder.elements);
+ }
+
+ /**
+ * Returns the module composed of {@code elements}.
+ */
+ public static Module getModule(final Iterable<? extends Element> elements) {
+ return new Module() {
+ public void configure(Binder binder) {
+ for (Element element : elements) {
+ element.applyTo(binder);
+ }
+ }
+ };
+ }
+
+ @SuppressWarnings("unchecked")
+ static <T> BindingTargetVisitor<T, T> getInstanceVisitor() {
+ return (BindingTargetVisitor<T, T>) GET_INSTANCE_VISITOR;
+ }
+
+ private static class RecordingBinder implements Binder, PrivateBinder {
+ private final Stage stage;
+ private final Set<Module> modules;
+ private final List<Element> elements;
+ private final Object source;
+ private final SourceProvider sourceProvider;
+
+ /**
+ * The binder where exposed bindings will be created
+ */
+ private final RecordingBinder parent;
+ private final PrivateElementsImpl privateElements;
+
+ private RecordingBinder(Stage stage) {
+ this.stage = stage;
+ this.modules = Sets.newHashSet();
+ this.elements = Lists.newArrayList();
+ this.source = null;
+ this.sourceProvider = new SourceProvider().plusSkippedClasses(
+ Elements.class, RecordingBinder.class, AbstractModule.class,
+ ConstantBindingBuilderImpl.class, AbstractBindingBuilder.class, BindingBuilder.class);
+ this.parent = null;
+ this.privateElements = null;
+ }
+
+ /**
+ * Creates a recording binder that's backed by {@code prototype}.
+ */
+ private RecordingBinder(
+ RecordingBinder prototype, Object source, SourceProvider sourceProvider) {
+ checkArgument(source == null ^ sourceProvider == null);
+
+ this.stage = prototype.stage;
+ this.modules = prototype.modules;
+ this.elements = prototype.elements;
+ this.source = source;
+ this.sourceProvider = sourceProvider;
+ this.parent = prototype.parent;
+ this.privateElements = prototype.privateElements;
+ }
+
+ /**
+ * Creates a private recording binder.
+ */
+ private RecordingBinder(RecordingBinder parent, PrivateElementsImpl privateElements) {
+ this.stage = parent.stage;
+ this.modules = Sets.newHashSet();
+ this.elements = privateElements.getElementsMutable();
+ this.source = parent.source;
+ this.sourceProvider = parent.sourceProvider;
+ this.parent = parent;
+ this.privateElements = privateElements;
+ }
+
+ public void bindScope(Class<? extends Annotation> annotationType, Scope scope) {
+ elements.add(new ScopeBinding(getSource(), annotationType, scope));
+ }
+
+ @SuppressWarnings("unchecked") // it is safe to use the type literal for the raw type
+ public void requestInjection(Object instance) {
+ requestInjection((TypeLiteral) TypeLiteral.get(instance.getClass()), instance);
+ }
+
+ public <T> void requestInjection(TypeLiteral<T> type, T instance) {
+ elements.add(new InjectionRequest<T>(getSource(), type, instance));
+ }
+
+ public <T> MembersInjector<T> getMembersInjector(final TypeLiteral<T> typeLiteral) {
+ final MembersInjectorLookup<T> element
+ = new MembersInjectorLookup<T>(getSource(), typeLiteral);
+ elements.add(element);
+ return element.getMembersInjector();
+ }
+
+ public <T> MembersInjector<T> getMembersInjector(Class<T> type) {
+ return getMembersInjector(TypeLiteral.get(type));
+ }
+
+ public void bindListener(Matcher<? super TypeLiteral<?>> typeMatcher, TypeListener listener) {
+ elements.add(new TypeListenerBinding(getSource(), listener, typeMatcher));
+ }
+
+ public void requestStaticInjection(Class<?>... types) {
+ for (Class<?> type : types) {
+ elements.add(new StaticInjectionRequest(getSource(), type));
+ }
+ }
+
+ public void install(Module module) {
+ if (modules.add(module)) {
+ Binder binder = this;
+ if (module instanceof PrivateModule) {
+ binder = binder.newPrivateBinder();
+ }
+
+ try {
+ module.configure(binder);
+ } catch (RuntimeException e) {
+ Collection<Message> messages = Errors.getMessagesFromThrowable(e);
+ if (!messages.isEmpty()) {
+ elements.addAll(messages);
+ } else {
+ addError(e);
+ }
+ }
+ binder.install(ProviderMethodsModule.forModule(module));
+ }
+ }
+
+ public Stage currentStage() {
+ return stage;
+ }
+
+ public void addError(String message, Object... arguments) {
+ elements.add(new Message(getSource(), Errors.format(message, arguments)));
+ }
+
+ public void addError(Throwable t) {
+ String message = "An exception was caught and reported. Message: " + t.getMessage();
+ elements.add(new Message(ImmutableList.of(getSource()), message, t));
+ }
+
+ public void addError(Message message) {
+ elements.add(message);
+ }
+
+ public <T> AnnotatedBindingBuilder<T> bind(Key<T> key) {
+ return new BindingBuilder<T>(this, elements, getSource(), key);
+ }
+
+ public <T> AnnotatedBindingBuilder<T> bind(TypeLiteral<T> typeLiteral) {
+ return bind(Key.get(typeLiteral));
+ }
+
+ public <T> AnnotatedBindingBuilder<T> bind(Class<T> type) {
+ return bind(Key.get(type));
+ }
+
+ public AnnotatedConstantBindingBuilder bindConstant() {
+ return new ConstantBindingBuilderImpl<Void>(this, elements, getSource());
+ }
+
+ public <T> Provider<T> getProvider(final Key<T> key) {
+ final ProviderLookup<T> element = new ProviderLookup<T>(getSource(), key);
+ elements.add(element);
+ return element.getProvider();
+ }
+
+ public <T> Provider<T> getProvider(Class<T> type) {
+ return getProvider(Key.get(type));
+ }
+
+ public void convertToTypes(Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeConverter converter) {
+ elements.add(new TypeConverterBinding(getSource(), typeMatcher, converter));
+ }
+
+ public RecordingBinder withSource(final Object source) {
+ return new RecordingBinder(this, source, null);
+ }
+
+ public RecordingBinder skipSources(Class... classesToSkip) {
+ // if a source is specified explicitly, we don't need to skip sources
+ if (source != null) {
+ return this;
+ }
+
+ SourceProvider newSourceProvider = sourceProvider.plusSkippedClasses(classesToSkip);
+ return new RecordingBinder(this, null, newSourceProvider);
+ }
+
+ public PrivateBinder newPrivateBinder() {
+ PrivateElementsImpl privateElements = new PrivateElementsImpl(getSource());
+ elements.add(privateElements);
+ return new RecordingBinder(this, privateElements);
+ }
+
+ public void expose(Key<?> key) {
+ exposeInternal(key);
+ }
+
+ public AnnotatedElementBuilder expose(Class<?> type) {
+ return exposeInternal(Key.get(type));
+ }
+
+ public AnnotatedElementBuilder expose(TypeLiteral<?> type) {
+ return exposeInternal(Key.get(type));
+ }
+
+ private <T> AnnotatedElementBuilder exposeInternal(Key<T> key) {
+ if (privateElements == null) {
+ addError("Cannot expose %s on a standard binder. "
+ + "Exposed bindings are only applicable to private binders.", key);
+ return new AnnotatedElementBuilder() {
+ public void annotatedWith(Class<? extends Annotation> annotationType) {
+ }
+
+ public void annotatedWith(Annotation annotation) {
+ }
+ };
+ }
+
+ ExposureBuilder<T> builder = new ExposureBuilder<T>(this, getSource(), key);
+ privateElements.addExposureBuilder(builder);
+ return builder;
+ }
+
+ private static ESLogger logger = Loggers.getLogger(Bootstrap.class);
+
+ protected Object getSource() {
+ Object ret;
+ if (logger.isDebugEnabled()) {
+ ret = sourceProvider != null
+ ? sourceProvider.get()
+ : source;
+ } else {
+ ret = source;
+ }
+ return ret == null ? "_unknown_" : ret;
+ }
+
+ @Override
+ public String toString() {
+ return "Binder";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ExposedBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/ExposedBinding.java
new file mode 100644
index 0000000..c6bd50b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ExposedBinding.java
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Binding;
+
+/**
+ * A binding to a key exposed from an enclosed private environment.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface ExposedBinding<T> extends Binding<T>, HasDependencies {
+
+ /**
+ * Returns the enclosed environment that holds the original binding.
+ */
+ PrivateElements getPrivateElements();
+
+ /**
+ * Unsupported. Always throws {@link UnsupportedOperationException}.
+ */
+ void applyTo(Binder binder);
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/HasDependencies.java b/src/main/java/org/elasticsearch/common/inject/spi/HasDependencies.java
new file mode 100644
index 0000000..3ff1ffd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/HasDependencies.java
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import java.util.Set;
+
+/**
+ * Implemented by {@link org.elasticsearch.common.inject.Binding bindings}, {@link org.elasticsearch.common.inject.Provider
+ * providers} and instances that expose their dependencies explicitly.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface HasDependencies {
+
+ /**
+ * Returns the known dependencies for this type. If this has dependencies whose values are not
+ * known statically, a dependency for the {@link org.elasticsearch.common.inject.Injector Injector} will be
+ * included in the returned set.
+ *
+ * @return a possibly empty set
+ */
+ Set<Dependency<?>> getDependencies();
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java b/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java
new file mode 100644
index 0000000..1f5b969
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+/**
+ * Listens for injections into instances of type {@code I}. Useful for performing further
+ * injections, post-injection initialization, and more.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface InjectionListener<I> {
+
+ /**
+ * Invoked by Guice after it injects the fields and methods of instance.
+ *
+ * @param injectee instance that Guice injected dependencies into
+ */
+ void afterInjection(I injectee);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java
new file mode 100644
index 0000000..ceb629d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java
@@ -0,0 +1,401 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.ConfigurationException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.TypeLiteral;
+import org.elasticsearch.common.inject.internal.*;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.*;
+import java.util.*;
+
+import static org.elasticsearch.common.inject.internal.MoreTypes.getRawType;
+
+/**
+ * A constructor, field or method that can receive injections. Typically this is a member with the
+ * {@literal @}{@link Inject} annotation. For non-private, no argument constructors, the member may
+ * omit the annotation.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @since 2.0
+ */
+public final class InjectionPoint {
+
+ private final boolean optional;
+ private final Member member;
+ private final ImmutableList<Dependency<?>> dependencies;
+
+ private InjectionPoint(Member member,
+ ImmutableList<Dependency<?>> dependencies, boolean optional) {
+ this.member = member;
+ this.dependencies = dependencies;
+ this.optional = optional;
+ }
+
+ InjectionPoint(TypeLiteral<?> type, Method method) {
+ this.member = method;
+
+ Inject inject = method.getAnnotation(Inject.class);
+ this.optional = inject.optional();
+
+ this.dependencies = forMember(method, type, method.getParameterAnnotations());
+ }
+
+ InjectionPoint(TypeLiteral<?> type, Constructor<?> constructor) {
+ this.member = constructor;
+ this.optional = false;
+ this.dependencies = forMember(constructor, type, constructor.getParameterAnnotations());
+ }
+
+ InjectionPoint(TypeLiteral<?> type, Field field) {
+ this.member = field;
+
+ Inject inject = field.getAnnotation(Inject.class);
+ this.optional = inject.optional();
+
+ Annotation[] annotations = field.getAnnotations();
+
+ Errors errors = new Errors(field);
+ Key<?> key = null;
+ try {
+ key = Annotations.getKey(type.getFieldType(field), field, annotations, errors);
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+ errors.throwConfigurationExceptionIfErrorsExist();
+
+ this.dependencies = ImmutableList.<Dependency<?>>of(
+ newDependency(key, Nullability.allowsNull(annotations), -1));
+ }
+
+ private ImmutableList<Dependency<?>> forMember(Member member, TypeLiteral<?> type,
+ Annotation[][] paramterAnnotations) {
+ Errors errors = new Errors(member);
+ Iterator<Annotation[]> annotationsIterator = Arrays.asList(paramterAnnotations).iterator();
+
+ List<Dependency<?>> dependencies = Lists.newArrayList();
+ int index = 0;
+
+ for (TypeLiteral<?> parameterType : type.getParameterTypes(member)) {
+ try {
+ Annotation[] parameterAnnotations = annotationsIterator.next();
+ Key<?> key = Annotations.getKey(parameterType, member, parameterAnnotations, errors);
+ dependencies.add(newDependency(key, Nullability.allowsNull(parameterAnnotations), index));
+ index++;
+ } catch (ErrorsException e) {
+ errors.merge(e.getErrors());
+ }
+ }
+
+ errors.throwConfigurationExceptionIfErrorsExist();
+ return ImmutableList.copyOf(dependencies);
+ }
+
+ // This metohd is necessary to create a Dependency<T> with proper generic type information
+ private <T> Dependency<T> newDependency(Key<T> key, boolean allowsNull, int parameterIndex) {
+ return new Dependency<T>(this, key, allowsNull, parameterIndex);
+ }
+
+ /**
+ * Returns the injected constructor, field, or method.
+ */
+ public Member getMember() {
+ return member;
+ }
+
+ /**
+ * Returns the dependencies for this injection point. If the injection point is for a method or
+ * constructor, the dependencies will correspond to that member's parameters. Field injection
+ * points always have a single dependency for the field itself.
+ *
+ * @return a possibly-empty list
+ */
+ public List<Dependency<?>> getDependencies() {
+ return dependencies;
+ }
+
+ /**
+ * Returns true if this injection point shall be skipped if the injector cannot resolve bindings
+ * for all required dependencies. Both explicit bindings (as specified in a module), and implicit
+ * bindings ({@literal @}{@link org.elasticsearch.common.inject.ImplementedBy ImplementedBy}, default
+ * constructors etc.) may be used to satisfy optional injection points.
+ */
+ public boolean isOptional() {
+ return optional;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof InjectionPoint
+ && member.equals(((InjectionPoint) o).member);
+ }
+
+ @Override
+ public int hashCode() {
+ return member.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return MoreTypes.toString(member);
+ }
+
+ /**
+ * Returns a new injection point for the injectable constructor of {@code type}.
+ *
+ * @param type a concrete type with exactly one constructor annotated {@literal @}{@link Inject},
+ * or a no-arguments constructor that is not private.
+ * @throws ConfigurationException if there is no injectable constructor, more than one injectable
+ * constructor, or if parameters of the injectable constructor are malformed, such as a
+ * parameter with multiple binding annotations.
+ */
+ public static InjectionPoint forConstructorOf(TypeLiteral<?> type) {
+ Class<?> rawType = getRawType(type.getType());
+ Errors errors = new Errors(rawType);
+
+ Constructor<?> injectableConstructor = null;
+ for (Constructor<?> constructor : rawType.getDeclaredConstructors()) {
+ Inject inject = constructor.getAnnotation(Inject.class);
+ if (inject != null) {
+ if (inject.optional()) {
+ errors.optionalConstructor(constructor);
+ }
+
+ if (injectableConstructor != null) {
+ errors.tooManyConstructors(rawType);
+ }
+
+ injectableConstructor = constructor;
+ checkForMisplacedBindingAnnotations(injectableConstructor, errors);
+ }
+ }
+
+ errors.throwConfigurationExceptionIfErrorsExist();
+
+ if (injectableConstructor != null) {
+ return new InjectionPoint(type, injectableConstructor);
+ }
+
+ // If no annotated constructor is found, look for a no-arg constructor instead.
+ try {
+ Constructor<?> noArgConstructor = rawType.getDeclaredConstructor();
+
+ // Disallow private constructors on non-private classes (unless they have @Inject)
+ if (Modifier.isPrivate(noArgConstructor.getModifiers())
+ && !Modifier.isPrivate(rawType.getModifiers())) {
+ errors.missingConstructor(rawType);
+ throw new ConfigurationException(errors.getMessages());
+ }
+
+ checkForMisplacedBindingAnnotations(noArgConstructor, errors);
+ return new InjectionPoint(type, noArgConstructor);
+ } catch (NoSuchMethodException e) {
+ errors.missingConstructor(rawType);
+ throw new ConfigurationException(errors.getMessages());
+ }
+ }
+
+ /**
+ * Returns a new injection point for the injectable constructor of {@code type}.
+ *
+ * @param type a concrete type with exactly one constructor annotated {@literal @}{@link Inject},
+ * or a no-arguments constructor that is not private.
+ * @throws ConfigurationException if there is no injectable constructor, more than one injectable
+ * constructor, or if parameters of the injectable constructor are malformed, such as a
+ * parameter with multiple binding annotations.
+ */
+ public static InjectionPoint forConstructorOf(Class<?> type) {
+ return forConstructorOf(TypeLiteral.get(type));
+ }
+
+ /**
+ * Returns all static method and field injection points on {@code type}.
+ *
+ * @return a possibly empty set of injection points. The set has a specified iteration order. All
+ * fields are returned and then all methods. Within the fields, supertype fields are returned
+ * before subtype fields. Similarly, supertype methods are returned before subtype methods.
+ * @throws ConfigurationException if there is a malformed injection point on {@code type}, such as
+ * a field with multiple binding annotations. The exception's {@link
+ * ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
+ * of the valid injection points.
+ */
+ public static Set<InjectionPoint> forStaticMethodsAndFields(TypeLiteral type) {
+ List<InjectionPoint> sink = Lists.newArrayList();
+ Errors errors = new Errors();
+
+ addInjectionPoints(type, Factory.FIELDS, true, sink, errors);
+ addInjectionPoints(type, Factory.METHODS, true, sink, errors);
+
+ ImmutableSet<InjectionPoint> result = ImmutableSet.copyOf(sink);
+ if (errors.hasErrors()) {
+ throw new ConfigurationException(errors.getMessages()).withPartialValue(result);
+ }
+ return result;
+ }
+
+ /**
+ * Returns all static method and field injection points on {@code type}.
+ *
+ * @return a possibly empty set of injection points. The set has a specified iteration order. All
+ * fields are returned and then all methods. Within the fields, supertype fields are returned
+ * before subtype fields. Similarly, supertype methods are returned before subtype methods.
+ * @throws ConfigurationException if there is a malformed injection point on {@code type}, such as
+ * a field with multiple binding annotations. The exception's {@link
+ * ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
+ * of the valid injection points.
+ */
+ public static Set<InjectionPoint> forStaticMethodsAndFields(Class<?> type) {
+ return forStaticMethodsAndFields(TypeLiteral.get(type));
+ }
+
+ /**
+ * Returns all instance method and field injection points on {@code type}.
+ *
+ * @return a possibly empty set of injection points. The set has a specified iteration order. All
+ * fields are returned and then all methods. Within the fields, supertype fields are returned
+ * before subtype fields. Similarly, supertype methods are returned before subtype methods.
+ * @throws ConfigurationException if there is a malformed injection point on {@code type}, such as
+ * a field with multiple binding annotations. The exception's {@link
+ * ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
+ * of the valid injection points.
+ */
+ public static Set<InjectionPoint> forInstanceMethodsAndFields(TypeLiteral<?> type) {
+ List<InjectionPoint> sink = Lists.newArrayList();
+ Errors errors = new Errors();
+
+ // TODO (crazybob): Filter out overridden members.
+ addInjectionPoints(type, Factory.FIELDS, false, sink, errors);
+ addInjectionPoints(type, Factory.METHODS, false, sink, errors);
+
+ ImmutableSet<InjectionPoint> result = ImmutableSet.copyOf(sink);
+ if (errors.hasErrors()) {
+ throw new ConfigurationException(errors.getMessages()).withPartialValue(result);
+ }
+ return result;
+ }
+
+ /**
+ * Returns all instance method and field injection points on {@code type}.
+ *
+ * @return a possibly empty set of injection points. The set has a specified iteration order. All
+ * fields are returned and then all methods. Within the fields, supertype fields are returned
+ * before subtype fields. Similarly, supertype methods are returned before subtype methods.
+ * @throws ConfigurationException if there is a malformed injection point on {@code type}, such as
+ * a field with multiple binding annotations. The exception's {@link
+ * ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
+ * of the valid injection points.
+ */
+ public static Set<InjectionPoint> forInstanceMethodsAndFields(Class<?> type) {
+ return forInstanceMethodsAndFields(TypeLiteral.get(type));
+ }
+
+ private static void checkForMisplacedBindingAnnotations(Member member, Errors errors) {
+ Annotation misplacedBindingAnnotation = Annotations.findBindingAnnotation(
+ errors, member, ((AnnotatedElement) member).getAnnotations());
+ if (misplacedBindingAnnotation == null) {
+ return;
+ }
+
+ // don't warn about misplaced binding annotations on methods when there's a field with the same
+ // name. In Scala, fields always get accessor methods (that we need to ignore). See bug 242.
+ if (member instanceof Method) {
+ try {
+ if (member.getDeclaringClass().getDeclaredField(member.getName()) != null) {
+ return;
+ }
+ } catch (NoSuchFieldException ignore) {
+ }
+ }
+
+ errors.misplacedBindingAnnotation(member, misplacedBindingAnnotation);
+ }
+
+ private static <M extends Member & AnnotatedElement> void addInjectionPoints(TypeLiteral<?> type,
+ Factory<M> factory, boolean statics, Collection<InjectionPoint> injectionPoints,
+ Errors errors) {
+ if (type.getType() == Object.class) {
+ return;
+ }
+
+ // Add injectors for superclass first.
+ TypeLiteral<?> superType = type.getSupertype(type.getRawType().getSuperclass());
+ addInjectionPoints(superType, factory, statics, injectionPoints, errors);
+
+ // Add injectors for all members next
+ addInjectorsForMembers(type, factory, statics, injectionPoints, errors);
+ }
+
+ private static <M extends Member & AnnotatedElement> void addInjectorsForMembers(
+ TypeLiteral<?> typeLiteral, Factory<M> factory, boolean statics,
+ Collection<InjectionPoint> injectionPoints, Errors errors) {
+ for (M member : factory.getMembers(getRawType(typeLiteral.getType()))) {
+ if (isStatic(member) != statics) {
+ continue;
+ }
+
+ Inject inject = member.getAnnotation(Inject.class);
+ if (inject == null) {
+ continue;
+ }
+
+ try {
+ injectionPoints.add(factory.create(typeLiteral, member, errors));
+ } catch (ConfigurationException ignorable) {
+ if (!inject.optional()) {
+ errors.merge(ignorable.getErrorMessages());
+ }
+ }
+ }
+ }
+
+ private static boolean isStatic(Member member) {
+ return Modifier.isStatic(member.getModifiers());
+ }
+
+ private interface Factory<M extends Member & AnnotatedElement> {
+ Factory<Field> FIELDS = new Factory<Field>() {
+ public Field[] getMembers(Class<?> type) {
+ return type.getDeclaredFields();
+ }
+
+ public InjectionPoint create(TypeLiteral<?> typeLiteral, Field member, Errors errors) {
+ return new InjectionPoint(typeLiteral, member);
+ }
+ };
+
+ Factory<Method> METHODS = new Factory<Method>() {
+ public Method[] getMembers(Class<?> type) {
+ return type.getDeclaredMethods();
+ }
+
+ public InjectionPoint create(TypeLiteral<?> typeLiteral, Method member, Errors errors) {
+ checkForMisplacedBindingAnnotations(member, errors);
+ return new InjectionPoint(typeLiteral, member);
+ }
+ };
+
+ M[] getMembers(Class<?> type);
+
+ InjectionPoint create(TypeLiteral<?> typeLiteral, M member, Errors errors);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/InjectionRequest.java b/src/main/java/org/elasticsearch/common/inject/spi/InjectionRequest.java
new file mode 100644
index 0000000..26b6295
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/InjectionRequest.java
@@ -0,0 +1,84 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.ConfigurationException;
+import org.elasticsearch.common.inject.TypeLiteral;
+
+import java.util.Set;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * A request to inject the instance fields and methods of an instance. Requests are created
+ * explicitly in a module using {@link org.elasticsearch.common.inject.Binder#requestInjection(Object)
+ * requestInjection()} statements:
+ * <pre>
+ * requestInjection(serviceInstance);</pre>
+ *
+ * @author mikeward@google.com (Mike Ward)
+ * @since 2.0
+ */
+public final class InjectionRequest<T> implements Element {
+
+ private final Object source;
+ private final TypeLiteral<T> type;
+ private final T instance;
+
+ public InjectionRequest(Object source, TypeLiteral<T> type, T instance) {
+ this.source = checkNotNull(source, "source");
+ this.type = checkNotNull(type, "type");
+ this.instance = checkNotNull(instance, "instance");
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ public T getInstance() {
+ return instance;
+ }
+
+ public TypeLiteral<T> getType() {
+ return type;
+ }
+
+ /**
+ * Returns the instance methods and fields of {@code instance} that will be injected to fulfill
+ * this request.
+ *
+ * @return a possibly empty set of injection points. The set has a specified iteration order. All
+ * fields are returned and then all methods. Within the fields, supertype fields are returned
+ * before subtype fields. Similarly, supertype methods are returned before subtype methods.
+ * @throws ConfigurationException if there is a malformed injection point on the class of {@code
+ * instance}, such as a field with multiple binding annotations. The exception's {@link
+ * ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
+ * of the valid injection points.
+ */
+ public Set<InjectionPoint> getInjectionPoints() throws ConfigurationException {
+ return InjectionPoint.forInstanceMethodsAndFields(instance.getClass());
+ }
+
+ public <R> R acceptVisitor(ElementVisitor<R> visitor) {
+ return visitor.visit(this);
+ }
+
+ public void applyTo(Binder binder) {
+ binder.withSource(getSource()).requestInjection(type, instance);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/InstanceBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/InstanceBinding.java
new file mode 100644
index 0000000..13712fe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/InstanceBinding.java
@@ -0,0 +1,44 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+
+import java.util.Set;
+
+/**
+ * A binding to a single instance. The same instance is returned for every injection.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface InstanceBinding<T> extends Binding<T>, HasDependencies {
+
+ /**
+ * Returns the user-supplied instance.
+ */
+ T getInstance();
+
+ /**
+ * Returns the field and method injection points of the instance, injected at injector-creation
+ * time only.
+ *
+ * @return a possibly empty set
+ */
+ Set<InjectionPoint> getInjectionPoints();
+
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/LinkedKeyBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/LinkedKeyBinding.java
new file mode 100644
index 0000000..31b1b74
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/LinkedKeyBinding.java
@@ -0,0 +1,36 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+import org.elasticsearch.common.inject.Key;
+
+/**
+ * A binding to a linked key. The other key's binding is used to resolve injections.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface LinkedKeyBinding<T> extends Binding<T> {
+
+ /**
+ * Returns the linked key used to resolve injections. That binding can be retrieved from an
+ * injector using {@link org.elasticsearch.common.inject.Injector#getBinding(Key) Injector.getBinding(key)}.
+ */
+ Key<? extends T> getLinkedKey();
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/MembersInjectorLookup.java b/src/main/java/org/elasticsearch/common/inject/spi/MembersInjectorLookup.java
new file mode 100644
index 0000000..873ffc3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/MembersInjectorLookup.java
@@ -0,0 +1,104 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.MembersInjector;
+import org.elasticsearch.common.inject.TypeLiteral;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * A lookup of the members injector for a type. Lookups are created explicitly in a module using
+ * {@link org.elasticsearch.common.inject.Binder#getMembersInjector(Class) getMembersInjector()} statements:
+ * <pre>
+ * MembersInjector&lt;PaymentService&gt; membersInjector
+ * = getMembersInjector(PaymentService.class);</pre>
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @since 2.0
+ */
+public final class MembersInjectorLookup<T> implements Element {
+
+ private final Object source;
+ private final TypeLiteral<T> type;
+ private MembersInjector<T> delegate;
+
+ public MembersInjectorLookup(Object source, TypeLiteral<T> type) {
+ this.source = checkNotNull(source, "source");
+ this.type = checkNotNull(type, "type");
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ /**
+ * Gets the type containing the members to be injected.
+ */
+ public TypeLiteral<T> getType() {
+ return type;
+ }
+
+ public <T> T acceptVisitor(ElementVisitor<T> visitor) {
+ return visitor.visit(this);
+ }
+
+ /**
+ * Sets the actual members injector.
+ *
+ * @throws IllegalStateException if the delegate is already set
+ */
+ public void initializeDelegate(MembersInjector<T> delegate) {
+ checkState(this.delegate == null, "delegate already initialized");
+ this.delegate = checkNotNull(delegate, "delegate");
+ }
+
+ public void applyTo(Binder binder) {
+ initializeDelegate(binder.withSource(getSource()).getMembersInjector(type));
+ }
+
+ /**
+ * Returns the delegate members injector, or {@code null} if it has not yet been initialized.
+ * The delegate will be initialized when this element is processed, or otherwise used to create
+ * an injector.
+ */
+ public MembersInjector<T> getDelegate() {
+ return delegate;
+ }
+
+ /**
+ * Returns the looked up members injector. The result is not valid until this lookup has been
+ * initialized, which usually happens when the injector is created. The members injector will
+ * throw an {@code IllegalStateException} if you try to use it beforehand.
+ */
+ public MembersInjector<T> getMembersInjector() {
+ return new MembersInjector<T>() {
+ public void injectMembers(T instance) {
+ checkState(delegate != null,
+ "This MembersInjector cannot be used until the Injector has been created.");
+ delegate.injectMembers(instance);
+ }
+
+ @Override
+ public String toString() {
+ return "MembersInjector<" + type + ">";
+ }
+ };
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/Message.java b/src/main/java/org/elasticsearch/common/inject/spi/Message.java
new file mode 100644
index 0000000..fd088f5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/Message.java
@@ -0,0 +1,142 @@
+/**
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.internal.Errors;
+import org.elasticsearch.common.inject.internal.SourceProvider;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+import java.util.List;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * An error message and the context in which it occured. Messages are usually created internally by
+ * Guice and its extensions. Messages can be created explicitly in a module using {@link
+ * org.elasticsearch.common.inject.Binder#addError(Throwable) addError()} statements:
+ * <pre>
+ * try {
+ * bindPropertiesFromFile();
+ * } catch (IOException e) {
+ * addError(e);
+ * }</pre>
+ *
+ * @author crazybob@google.com (Bob Lee)
+ */
+public final class Message implements Serializable, Element {
+ private final String message;
+ private final Throwable cause;
+ private final List<Object> sources;
+
+ /**
+ * @since 2.0
+ */
+ public Message(List<Object> sources, String message, Throwable cause) {
+ this.sources = ImmutableList.copyOf(sources);
+ this.message = checkNotNull(message, "message");
+ this.cause = cause;
+ }
+
+ public Message(Object source, String message) {
+ this(ImmutableList.of(source), message, null);
+ }
+
+ public Message(String message) {
+ this(ImmutableList.of(), message, null);
+ }
+
+ public String getSource() {
+ return sources.isEmpty()
+ ? SourceProvider.UNKNOWN_SOURCE.toString()
+ : Errors.convert(sources.get(sources.size() - 1)).toString();
+ }
+
+ /**
+ * @since 2.0
+ */
+ public List<Object> getSources() {
+ return sources;
+ }
+
+ /**
+ * Gets the error message text.
+ */
+ public String getMessage() {
+ return message;
+ }
+
+ /**
+ * @since 2.0
+ */
+ public <T> T acceptVisitor(ElementVisitor<T> visitor) {
+ return visitor.visit(this);
+ }
+
+ /**
+ * Returns the throwable that caused this message, or {@code null} if this
+ * message was not caused by a throwable.
+ *
+ * @since 2.0
+ */
+ public Throwable getCause() {
+ return cause;
+ }
+
+ @Override
+ public String toString() {
+ return message;
+ }
+
+ @Override
+ public int hashCode() {
+ return message.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof Message)) {
+ return false;
+ }
+ Message e = (Message) o;
+ return message.equals(e.message) && Objects.equal(cause, e.cause) && sources.equals(e.sources);
+ }
+
+ /**
+ * @since 2.0
+ */
+ public void applyTo(Binder binder) {
+ binder.withSource(getSource()).addError(this);
+ }
+
+ /**
+ * When serialized, we eagerly convert sources to strings. This hurts our formatting, but it
+ * guarantees that the receiving end will be able to read the message.
+ */
+ private Object writeReplace() throws ObjectStreamException {
+ Object[] sourcesAsStrings = sources.toArray();
+ for (int i = 0; i < sourcesAsStrings.length; i++) {
+ sourcesAsStrings[i] = Errors.convert(sourcesAsStrings[i]).toString();
+ }
+ return new Message(ImmutableList.copyOf(sourcesAsStrings), message, cause);
+ }
+
+ private static final long serialVersionUID = 0;
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/PrivateElements.java b/src/main/java/org/elasticsearch/common/inject/spi/PrivateElements.java
new file mode 100644
index 0000000..64ead9c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/PrivateElements.java
@@ -0,0 +1,61 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Key;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * A private collection of elements that are hidden from the enclosing injector or module by
+ * default. See {@link org.elasticsearch.common.inject.PrivateModule PrivateModule} for details.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface PrivateElements extends Element {
+
+ /**
+ * Returns the configuration information in this private environment.
+ */
+ List<Element> getElements();
+
+ /**
+ * Returns the child injector that hosts these private elements, or null if the elements haven't
+ * been used to create an injector.
+ */
+ Injector getInjector();
+
+ /**
+ * Returns the unique exposed keys for these private elements.
+ */
+ Set<Key<?>> getExposedKeys();
+
+ /**
+ * Returns an arbitrary object containing information about the "place" where this key was
+ * exposed. Used by Guice in the production of descriptive error messages.
+ * <p/>
+ * <p>Tools might specially handle types they know about; {@code StackTraceElement} is a good
+ * example. Tools should simply call {@code toString()} on the source object if the type is
+ * unfamiliar.
+ *
+ * @param key one of the keys exposed by this module.
+ */
+ Object getExposedSource(Key<?> key);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ProviderBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/ProviderBinding.java
new file mode 100644
index 0000000..b6f67b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ProviderBinding.java
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.Provider;
+
+/**
+ * A binding to a {@link Provider} that delegates to the binding for the provided type. This binding
+ * is used whenever a {@code Provider<T>} is injected (as opposed to injecting {@code T} directly).
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface ProviderBinding<T extends Provider<?>> extends Binding<T> {
+
+ /**
+ * Returns the key whose binding is used to {@link Provider#get provide instances}. That binding
+ * can be retrieved from an injector using {@link org.elasticsearch.common.inject.Injector#getBinding(Key)
+ * Injector.getBinding(providedKey)}
+ */
+ Key<?> getProvidedKey();
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ProviderInstanceBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/ProviderInstanceBinding.java
new file mode 100644
index 0000000..246bc8a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ProviderInstanceBinding.java
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+import org.elasticsearch.common.inject.Provider;
+
+import java.util.Set;
+
+/**
+ * A binding to a provider instance. The provider's {@code get} method is invoked to resolve
+ * injections.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface ProviderInstanceBinding<T> extends Binding<T>, HasDependencies {
+
+ /**
+ * Returns the user-supplied, unscoped provider.
+ */
+ Provider<? extends T> getProviderInstance();
+
+ /**
+ * Returns the field and method injection points of the provider, injected at injector-creation
+ * time only.
+ *
+ * @return a possibly empty set
+ */
+ Set<InjectionPoint> getInjectionPoints();
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ProviderKeyBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/ProviderKeyBinding.java
new file mode 100644
index 0000000..4d934f8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ProviderKeyBinding.java
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.Provider;
+
+/**
+ * A binding to a provider key. To resolve injections, the provider key is first resolved, then that
+ * provider's {@code get} method is invoked.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface ProviderKeyBinding<T> extends Binding<T> {
+
+ /**
+ * Returns the key used to resolve the provider's binding. That binding can be retrieved from an
+ * injector using {@link org.elasticsearch.common.inject.Injector#getBinding(Key)
+ * Injector.getBinding(providerKey)}
+ */
+ Key<? extends Provider<? extends T>> getProviderKey();
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ProviderLookup.java b/src/main/java/org/elasticsearch/common/inject/spi/ProviderLookup.java
new file mode 100644
index 0000000..fc9bbd1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ProviderLookup.java
@@ -0,0 +1,99 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.Provider;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * A lookup of the provider for a type. Lookups are created explicitly in a module using
+ * {@link org.elasticsearch.common.inject.Binder#getProvider(Class) getProvider()} statements:
+ * <pre>
+ * Provider&lt;PaymentService&gt; paymentServiceProvider
+ * = getProvider(PaymentService.class);</pre>
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class ProviderLookup<T> implements Element {
+ private final Object source;
+ private final Key<T> key;
+ private Provider<T> delegate;
+
+ public ProviderLookup(Object source, Key<T> key) {
+ this.source = checkNotNull(source, "source");
+ this.key = checkNotNull(key, "key");
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ public Key<T> getKey() {
+ return key;
+ }
+
+ public <T> T acceptVisitor(ElementVisitor<T> visitor) {
+ return visitor.visit(this);
+ }
+
+ /**
+ * Sets the actual provider.
+ *
+ * @throws IllegalStateException if the delegate is already set
+ */
+ public void initializeDelegate(Provider<T> delegate) {
+ checkState(this.delegate == null, "delegate already initialized");
+ this.delegate = checkNotNull(delegate, "delegate");
+ }
+
+ public void applyTo(Binder binder) {
+ initializeDelegate(binder.withSource(getSource()).getProvider(key));
+ }
+
+ /**
+ * Returns the delegate provider, or {@code null} if it has not yet been initialized. The delegate
+ * will be initialized when this element is processed, or otherwise used to create an injector.
+ */
+ public Provider<T> getDelegate() {
+ return delegate;
+ }
+
+ /**
+ * Returns the looked up provider. The result is not valid until this lookup has been initialized,
+ * which usually happens when the injector is created. The provider will throw an {@code
+ * IllegalStateException} if you try to use it beforehand.
+ */
+ public Provider<T> getProvider() {
+ return new Provider<T>() {
+ public T get() {
+ checkState(delegate != null,
+ "This Provider cannot be used until the Injector has been created.");
+ return delegate.get();
+ }
+
+ @Override
+ public String toString() {
+ return "Provider<" + key.getTypeLiteral() + ">";
+ }
+ };
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ProviderWithDependencies.java b/src/main/java/org/elasticsearch/common/inject/spi/ProviderWithDependencies.java
new file mode 100644
index 0000000..2c21437
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ProviderWithDependencies.java
@@ -0,0 +1,28 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Provider;
+
+/**
+ * A provider with dependencies on other injected types. If a {@link Provider} has dependencies that
+ * aren't specified in injections, this interface should be used to expose all dependencies.
+ *
+ * @since 2.0
+ */
+public interface ProviderWithDependencies<T> extends Provider<T>, HasDependencies {
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/ScopeBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/ScopeBinding.java
new file mode 100644
index 0000000..cefd060
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/ScopeBinding.java
@@ -0,0 +1,67 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.Scope;
+
+import java.lang.annotation.Annotation;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Registration of a scope annotation with the scope that implements it. Instances are created
+ * explicitly in a module using {@link org.elasticsearch.common.inject.Binder#bindScope(Class, Scope) bindScope()}
+ * statements:
+ * <pre>
+ * Scope recordScope = new RecordScope();
+ * bindScope(RecordScoped.class, new RecordScope());</pre>
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class ScopeBinding implements Element {
+ private final Object source;
+ private final Class<? extends Annotation> annotationType;
+ private final Scope scope;
+
+ ScopeBinding(Object source, Class<? extends Annotation> annotationType, Scope scope) {
+ this.source = checkNotNull(source, "source");
+ this.annotationType = checkNotNull(annotationType, "annotationType");
+ this.scope = checkNotNull(scope, "scope");
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ public Class<? extends Annotation> getAnnotationType() {
+ return annotationType;
+ }
+
+ public Scope getScope() {
+ return scope;
+ }
+
+ public <T> T acceptVisitor(ElementVisitor<T> visitor) {
+ return visitor.visit(this);
+ }
+
+ public void applyTo(Binder binder) {
+ binder.withSource(getSource()).bindScope(annotationType, scope);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/StaticInjectionRequest.java b/src/main/java/org/elasticsearch/common/inject/spi/StaticInjectionRequest.java
new file mode 100644
index 0000000..dc233f2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/StaticInjectionRequest.java
@@ -0,0 +1,76 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.ConfigurationException;
+
+import java.util.Set;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * A request to inject the static fields and methods of a type. Requests are created
+ * explicitly in a module using {@link org.elasticsearch.common.inject.Binder#requestStaticInjection(Class[])
+ * requestStaticInjection()} statements:
+ * <pre>
+ * requestStaticInjection(MyLegacyService.class);</pre>
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class StaticInjectionRequest implements Element {
+ private final Object source;
+ private final Class<?> type;
+
+ StaticInjectionRequest(Object source, Class<?> type) {
+ this.source = checkNotNull(source, "source");
+ this.type = checkNotNull(type, "type");
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ public Class<?> getType() {
+ return type;
+ }
+
+ /**
+ * Returns the static methods and fields of {@code type} that will be injected to fulfill this
+ * request.
+ *
+ * @return a possibly empty set of injection points. The set has a specified iteration order. All
+ * fields are returned and then all methods. Within the fields, supertype fields are returned
+ * before subtype fields. Similarly, supertype methods are returned before subtype methods.
+ * @throws ConfigurationException if there is a malformed injection point on {@code type}, such as
+ * a field with multiple binding annotations. The exception's {@link
+ * ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
+ * of the valid injection points.
+ */
+ public Set<InjectionPoint> getInjectionPoints() throws ConfigurationException {
+ return InjectionPoint.forStaticMethodsAndFields(type);
+ }
+
+ public void applyTo(Binder binder) {
+ binder.withSource(getSource()).requestStaticInjection(type);
+ }
+
+ public <T> T acceptVisitor(ElementVisitor<T> visitor) {
+ return visitor.visit(this);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/TypeConverter.java b/src/main/java/org/elasticsearch/common/inject/spi/TypeConverter.java
new file mode 100644
index 0000000..27539d9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/TypeConverter.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.TypeLiteral;
+
+/**
+ * Converts constant string values to a different type.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @since 2.0
+ */
+public interface TypeConverter {
+
+ /**
+ * Converts a string value. Throws an exception if a conversion error occurs.
+ */
+ Object convert(String value, TypeLiteral<?> toType);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/TypeConverterBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/TypeConverterBinding.java
new file mode 100644
index 0000000..d0522aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/TypeConverterBinding.java
@@ -0,0 +1,66 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.TypeLiteral;
+import org.elasticsearch.common.inject.matcher.Matcher;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Registration of type converters for matching target types. Instances are created
+ * explicitly in a module using {@link org.elasticsearch.common.inject.Binder#convertToTypes(Matcher,
+ * TypeConverter) convertToTypes()} statements:
+ * <pre>
+ * convertToTypes(Matchers.only(DateTime.class), new DateTimeConverter());</pre>
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class TypeConverterBinding implements Element {
+ private final Object source;
+ private final Matcher<? super TypeLiteral<?>> typeMatcher;
+ private final TypeConverter typeConverter;
+
+ TypeConverterBinding(Object source, Matcher<? super TypeLiteral<?>> typeMatcher,
+ TypeConverter typeConverter) {
+ this.source = checkNotNull(source, "source");
+ this.typeMatcher = checkNotNull(typeMatcher, "typeMatcher");
+ this.typeConverter = checkNotNull(typeConverter, "typeConverter");
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ public Matcher<? super TypeLiteral<?>> getTypeMatcher() {
+ return typeMatcher;
+ }
+
+ public TypeConverter getTypeConverter() {
+ return typeConverter;
+ }
+
+ public <T> T acceptVisitor(ElementVisitor<T> visitor) {
+ return visitor.visit(this);
+ }
+
+ public void applyTo(Binder binder) {
+ binder.withSource(getSource()).convertToTypes(typeMatcher, typeConverter);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/TypeEncounter.java b/src/main/java/org/elasticsearch/common/inject/spi/TypeEncounter.java
new file mode 100644
index 0000000..8cb0944
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/TypeEncounter.java
@@ -0,0 +1,100 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Key;
+import org.elasticsearch.common.inject.MembersInjector;
+import org.elasticsearch.common.inject.Provider;
+import org.elasticsearch.common.inject.TypeLiteral;
+
+/**
+ * Context of an injectable type encounter. Enables reporting errors, registering injection
+ * listeners and binding method interceptors for injectable type {@code I}. It is an error to use
+ * an encounter after the {@link TypeListener#hear(TypeLiteral, TypeEncounter) hear()} method has
+ * returned.
+ *
+ * @param <I> the injectable type encountered
+ * @since 2.0
+ */
+public interface TypeEncounter<I> {
+
+ /**
+ * Records an error message for type {@code I} which will be presented to the user at a later
+ * time. Unlike throwing an exception, this enable us to continue configuring the Injector and
+ * discover more errors. Uses {@link String#format(String, Object[])} to insert the arguments
+ * into the message.
+ */
+ void addError(String message, Object... arguments);
+
+ /**
+ * Records an exception for type {@code I}, the full details of which will be logged, and the
+ * message of which will be presented to the user at a later time. If your type listener calls
+ * something that you worry may fail, you should catch the exception and pass it to this method.
+ */
+ void addError(Throwable t);
+
+ /**
+ * Records an error message to be presented to the user at a later time.
+ */
+ void addError(Message message);
+
+ /**
+ * Returns the provider used to obtain instances for the given injection key. The returned
+ * provider will not be valid until the injector has been created. The provider will throw an
+ * {@code IllegalStateException} if you try to use it beforehand.
+ */
+ <T> Provider<T> getProvider(Key<T> key);
+
+ /**
+ * Returns the provider used to obtain instances for the given injection type. The returned
+ * provider will not be valid until the injetor has been created. The provider will throw an
+ * {@code IllegalStateException} if you try to use it beforehand.
+ */
+ <T> Provider<T> getProvider(Class<T> type);
+
+ /**
+ * Returns the members injector used to inject dependencies into methods and fields on instances
+ * of the given type {@code T}. The returned members injector will not be valid until the main
+ * injector has been created. The members injector will throw an {@code IllegalStateException}
+ * if you try to use it beforehand.
+ *
+ * @param typeLiteral type to get members injector for
+ */
+ <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral);
+
+ /**
+ * Returns the members injector used to inject dependencies into methods and fields on instances
+ * of the given type {@code T}. The returned members injector will not be valid until the main
+ * injector has been created. The members injector will throw an {@code IllegalStateException}
+ * if you try to use it beforehand.
+ *
+ * @param type type to get members injector for
+ */
+ <T> MembersInjector<T> getMembersInjector(Class<T> type);
+
+ /**
+ * Registers a members injector for type {@code I}. Guice will use the members injector after its
+ * performed its own injections on an instance of {@code I}.
+ */
+ void register(MembersInjector<? super I> membersInjector);
+
+ /**
+ * Registers an injection listener for type {@code I}. Guice will notify the listener after all
+ * injections have been performed on an instance of {@code I}.
+ */
+ void register(InjectionListener<? super I> listener);
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/TypeListener.java b/src/main/java/org/elasticsearch/common/inject/spi/TypeListener.java
new file mode 100644
index 0000000..f066683
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/TypeListener.java
@@ -0,0 +1,47 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.TypeLiteral;
+
+/**
+ * Listens for Guice to encounter injectable types. If a given type has its constructor injected in
+ * one situation but only its methods and fields injected in another, Guice will notify this
+ * listener once.
+ * <p/>
+ * <p>Useful for extra type checking, {@linkplain TypeEncounter#register(InjectionListener)
+ * registering injection listeners}, and {@linkplain TypeEncounter#bindInterceptor(
+ *org.elasticsearch.common.inject.matcher.Matcher, org.aopalliance.intercept.MethodInterceptor[])
+ * binding method interceptors}.
+ *
+ * @since 2.0
+ */
+public interface TypeListener {
+
+ /**
+ * Invoked when Guice encounters a new type eligible for constructor or members injection.
+ * Called during injector creation (or afterwords if Guice encounters a type at run time and
+ * creates a JIT binding).
+ *
+ * @param type encountered by Guice
+ * @param encounter context of this encounter, enables reporting errors, registering injection
+ * listeners and binding method interceptors for {@code type}.
+ * @param <I> the injectable type
+ */
+ <I> void hear(TypeLiteral<I> type, TypeEncounter<I> encounter);
+
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/TypeListenerBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/TypeListenerBinding.java
new file mode 100644
index 0000000..9df9156
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/TypeListenerBinding.java
@@ -0,0 +1,71 @@
+/**
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binder;
+import org.elasticsearch.common.inject.TypeLiteral;
+import org.elasticsearch.common.inject.matcher.Matcher;
+
+/**
+ * Binds types (picked using a Matcher) to an type listener. Registrations are created explicitly in
+ * a module using {@link org.elasticsearch.common.inject.Binder#bindListener(Matcher, TypeListener)} statements:
+ * <p/>
+ * <pre>
+ * register(only(new TypeLiteral&lt;PaymentService&lt;CreditCard>>() {}), listener);</pre>
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class TypeListenerBinding implements Element {
+
+ private final Object source;
+ private final Matcher<? super TypeLiteral<?>> typeMatcher;
+ private final TypeListener listener;
+
+ TypeListenerBinding(Object source, TypeListener listener,
+ Matcher<? super TypeLiteral<?>> typeMatcher) {
+ this.source = source;
+ this.listener = listener;
+ this.typeMatcher = typeMatcher;
+ }
+
+ /**
+ * Returns the registered listener.
+ */
+ public TypeListener getListener() {
+ return listener;
+ }
+
+ /**
+ * Returns the type matcher which chooses which types the listener should be notified of.
+ */
+ public Matcher<? super TypeLiteral<?>> getTypeMatcher() {
+ return typeMatcher;
+ }
+
+ public Object getSource() {
+ return source;
+ }
+
+ public <T> T acceptVisitor(ElementVisitor<T> visitor) {
+ return visitor.visit(this);
+ }
+
+ public void applyTo(Binder binder) {
+ binder.withSource(getSource()).bindListener(typeMatcher, listener);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/UntargettedBinding.java b/src/main/java/org/elasticsearch/common/inject/spi/UntargettedBinding.java
new file mode 100644
index 0000000..ba2dc9a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/UntargettedBinding.java
@@ -0,0 +1,29 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.spi;
+
+import org.elasticsearch.common.inject.Binding;
+
+/**
+ * An untargetted binding. This binding indicates that the injector should use its implicit binding
+ * strategies to resolve injections.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public interface UntargettedBinding<T> extends Binding<T> {
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/spi/package-info.java b/src/main/java/org/elasticsearch/common/inject/spi/package-info.java
new file mode 100644
index 0000000..f4f39e6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/spi/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2006 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Guice service provider interface
+ */
+
+package org.elasticsearch.common.inject.spi; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/util/Modules.java b/src/main/java/org/elasticsearch/common/inject/util/Modules.java
new file mode 100644
index 0000000..a7506cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/util/Modules.java
@@ -0,0 +1,269 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.util;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.inject.spi.*;
+
+import java.lang.annotation.Annotation;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Static utility methods for creating and working with instances of {@link Module}.
+ *
+ * @author jessewilson@google.com (Jesse Wilson)
+ * @since 2.0
+ */
+public final class Modules {
+ private Modules() {
+ }
+
+ public static final Module EMPTY_MODULE = new Module() {
+ public void configure(Binder binder) {
+ }
+ };
+
+ /**
+ * Returns a builder that creates a module that overlays override modules over the given
+ * modules. If a key is bound in both sets of modules, only the binding from the override modules
+ * is kept. This can be used to replace the bindings of a production module with test bindings:
+ * <pre>
+ * Module functionalTestModule
+ * = Modules.override(new ProductionModule()).with(new TestModule());
+ * </pre>
+ * <p/>
+ * <p>Prefer to write smaller modules that can be reused and tested without overrides.
+ *
+ * @param modules the modules whose bindings are open to be overridden
+ */
+ public static OverriddenModuleBuilder override(Module... modules) {
+ return new RealOverriddenModuleBuilder(Arrays.asList(modules));
+ }
+
+ /**
+ * Returns a builder that creates a module that overlays override modules over the given
+ * modules. If a key is bound in both sets of modules, only the binding from the override modules
+ * is kept. This can be used to replace the bindings of a production module with test bindings:
+ * <pre>
+ * Module functionalTestModule
+ * = Modules.override(getProductionModules()).with(getTestModules());
+ * </pre>
+ * <p/>
+ * <p>Prefer to write smaller modules that can be reused and tested without overrides.
+ *
+ * @param modules the modules whose bindings are open to be overridden
+ */
+ public static OverriddenModuleBuilder override(Iterable<? extends Module> modules) {
+ return new RealOverriddenModuleBuilder(modules);
+ }
+
+ /**
+ * Returns a new module that installs all of {@code modules}.
+ */
+ public static Module combine(Module... modules) {
+ return combine(ImmutableSet.copyOf(modules));
+ }
+
+ /**
+ * Returns a new module that installs all of {@code modules}.
+ */
+ public static Module combine(Iterable<? extends Module> modules) {
+ final Set<Module> modulesSet = ImmutableSet.copyOf(modules);
+ return new Module() {
+ public void configure(Binder binder) {
+ binder = binder.skipSources(getClass());
+ for (Module module : modulesSet) {
+ binder.install(module);
+ }
+ }
+ };
+ }
+
+ /**
+ * See the EDSL example at {@link Modules#override(Module[]) override()}.
+ */
+ public interface OverriddenModuleBuilder {
+
+ /**
+ * See the EDSL example at {@link Modules#override(Module[]) override()}.
+ */
+ Module with(Module... overrides);
+
+ /**
+ * See the EDSL example at {@link Modules#override(Module[]) override()}.
+ */
+ Module with(Iterable<? extends Module> overrides);
+ }
+
+ private static final class RealOverriddenModuleBuilder implements OverriddenModuleBuilder {
+ private final ImmutableSet<Module> baseModules;
+
+ private RealOverriddenModuleBuilder(Iterable<? extends Module> baseModules) {
+ this.baseModules = ImmutableSet.copyOf(baseModules);
+ }
+
+ public Module with(Module... overrides) {
+ return with(Arrays.asList(overrides));
+ }
+
+ public Module with(final Iterable<? extends Module> overrides) {
+ return new AbstractModule() {
+ @Override
+ public void configure() {
+ final List<Element> elements = Elements.getElements(baseModules);
+ final List<Element> overrideElements = Elements.getElements(overrides);
+
+ final Set<Key> overriddenKeys = Sets.newHashSet();
+ final Set<Class<? extends Annotation>> overridesScopeAnnotations = Sets.newHashSet();
+
+ // execute the overrides module, keeping track of which keys and scopes are bound
+ new ModuleWriter(binder()) {
+ @Override
+ public <T> Void visit(Binding<T> binding) {
+ overriddenKeys.add(binding.getKey());
+ return super.visit(binding);
+ }
+
+ @Override
+ public Void visit(ScopeBinding scopeBinding) {
+ overridesScopeAnnotations.add(scopeBinding.getAnnotationType());
+ return super.visit(scopeBinding);
+ }
+
+ @Override
+ public Void visit(PrivateElements privateElements) {
+ overriddenKeys.addAll(privateElements.getExposedKeys());
+ return super.visit(privateElements);
+ }
+ }.writeAll(overrideElements);
+
+ // execute the original module, skipping all scopes and overridden keys. We only skip each
+ // overridden binding once so things still blow up if the module binds the same thing
+ // multiple times.
+ final Map<Scope, Object> scopeInstancesInUse = Maps.newHashMap();
+ final List<ScopeBinding> scopeBindings = Lists.newArrayList();
+ new ModuleWriter(binder()) {
+ @Override
+ public <T> Void visit(Binding<T> binding) {
+ if (!overriddenKeys.remove(binding.getKey())) {
+ super.visit(binding);
+
+ // Record when a scope instance is used in a binding
+ Scope scope = getScopeInstanceOrNull(binding);
+ if (scope != null) {
+ scopeInstancesInUse.put(scope, binding.getSource());
+ }
+ }
+
+ return null;
+ }
+
+ @Override
+ public Void visit(PrivateElements privateElements) {
+ PrivateBinder privateBinder = binder.withSource(privateElements.getSource())
+ .newPrivateBinder();
+
+ Set<Key<?>> skippedExposes = Sets.newHashSet();
+
+ for (Key<?> key : privateElements.getExposedKeys()) {
+ if (overriddenKeys.remove(key)) {
+ skippedExposes.add(key);
+ } else {
+ privateBinder.withSource(privateElements.getExposedSource(key)).expose(key);
+ }
+ }
+
+ // we're not skipping deep exposes, but that should be okay. If we ever need to, we
+ // have to search through this set of elements for PrivateElements, recursively
+ for (Element element : privateElements.getElements()) {
+ if (element instanceof Binding
+ && skippedExposes.contains(((Binding) element).getKey())) {
+ continue;
+ }
+ element.applyTo(privateBinder);
+ }
+
+ return null;
+ }
+
+ @Override
+ public Void visit(ScopeBinding scopeBinding) {
+ scopeBindings.add(scopeBinding);
+ return null;
+ }
+ }.writeAll(elements);
+
+ // execute the scope bindings, skipping scopes that have been overridden. Any scope that
+ // is overridden and in active use will prompt an error
+ new ModuleWriter(binder()) {
+ @Override
+ public Void visit(ScopeBinding scopeBinding) {
+ if (!overridesScopeAnnotations.remove(scopeBinding.getAnnotationType())) {
+ super.visit(scopeBinding);
+ } else {
+ Object source = scopeInstancesInUse.get(scopeBinding.getScope());
+ if (source != null) {
+ binder().withSource(source).addError(
+ "The scope for @%s is bound directly and cannot be overridden.",
+ scopeBinding.getAnnotationType().getSimpleName());
+ }
+ }
+ return null;
+ }
+ }.writeAll(scopeBindings);
+
+ // TODO: bind the overridden keys using multibinder
+ }
+
+ private Scope getScopeInstanceOrNull(Binding<?> binding) {
+ return binding.acceptScopingVisitor(new DefaultBindingScopingVisitor<Scope>() {
+ public Scope visitScope(Scope scope) {
+ return scope;
+ }
+ });
+ }
+ };
+ }
+ }
+
+ private static class ModuleWriter extends DefaultElementVisitor<Void> {
+ protected final Binder binder;
+
+ ModuleWriter(Binder binder) {
+ this.binder = binder;
+ }
+
+ @Override
+ protected Void visitOther(Element element) {
+ element.applyTo(binder);
+ return null;
+ }
+
+ void writeAll(Iterable<? extends Element> elements) {
+ for (Element element : elements) {
+ element.acceptVisitor(this);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/util/Providers.java b/src/main/java/org/elasticsearch/common/inject/util/Providers.java
new file mode 100644
index 0000000..c62c143
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/util/Providers.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2007 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.elasticsearch.common.inject.util;
+
+import org.elasticsearch.common.inject.Provider;
+
+/**
+ * Static utility methods for creating and working with instances of
+ * {@link Provider}.
+ *
+ * @author Kevin Bourrillion (kevinb9n@gmail.com)
+ * @since 2.0
+ */
+public final class Providers {
+
+ private Providers() {
+ }
+
+ /**
+ * Returns a provider which always provides {@code instance}. This should not
+ * be necessary to use in your application, but is helpful for several types
+ * of unit tests.
+ *
+ * @param instance the instance that should always be provided. This is also
+ * permitted to be null, to enable aggressive testing, although in real
+ * life a Guice-supplied Provider will never return null.
+ */
+ public static <T> Provider<T> of(final T instance) {
+ return new Provider<T>() {
+ public T get() {
+ return instance;
+ }
+
+ @Override
+ public String toString() {
+ return "of(" + instance + ")";
+ }
+ };
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/inject/util/Types.java b/src/main/java/org/elasticsearch/common/inject/util/Types.java
new file mode 100644
index 0000000..ffd8aa6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/util/Types.java
@@ -0,0 +1,135 @@
+/**
+ * Copyright (C) 2008 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.elasticsearch.common.inject.util;
+
+import org.elasticsearch.common.inject.Provider;
+import org.elasticsearch.common.inject.internal.MoreTypes;
+import org.elasticsearch.common.inject.internal.MoreTypes.GenericArrayTypeImpl;
+import org.elasticsearch.common.inject.internal.MoreTypes.ParameterizedTypeImpl;
+import org.elasticsearch.common.inject.internal.MoreTypes.WildcardTypeImpl;
+
+import java.lang.reflect.GenericArrayType;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.lang.reflect.WildcardType;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Static methods for working with types.
+ *
+ * @author crazybob@google.com (Bob Lee)
+ * @since 2.0
+ */
+public final class Types {
+ private Types() {
+ }
+
+ /**
+ * Returns a new parameterized type, applying {@code typeArguments} to
+ * {@code rawType}. The returned type does not have an owner type.
+ *
+ * @return a {@link java.io.Serializable serializable} parameterized type.
+ */
+ public static ParameterizedType newParameterizedType(Type rawType, Type... typeArguments) {
+ return newParameterizedTypeWithOwner(null, rawType, typeArguments);
+ }
+
+ /**
+ * Returns a new parameterized type, applying {@code typeArguments} to
+ * {@code rawType} and enclosed by {@code ownerType}.
+ *
+ * @return a {@link java.io.Serializable serializable} parameterized type.
+ */
+ public static ParameterizedType newParameterizedTypeWithOwner(
+ Type ownerType, Type rawType, Type... typeArguments) {
+ return new ParameterizedTypeImpl(ownerType, rawType, typeArguments);
+ }
+
+ /**
+ * Returns an array type whose elements are all instances of
+ * {@code componentType}.
+ *
+ * @return a {@link java.io.Serializable serializable} generic array type.
+ */
+ public static GenericArrayType arrayOf(Type componentType) {
+ return new GenericArrayTypeImpl(componentType);
+ }
+
+ /**
+ * Returns a type that represents an unknown type that extends {@code bound}.
+ * For example, if {@code bound} is {@code CharSequence.class}, this returns
+ * {@code ? extends CharSequence}. If {@code bound} is {@code Object.class},
+ * this returns {@code ?}, which is shorthand for {@code ? extends Object}.
+ */
+ public static WildcardType subtypeOf(Type bound) {
+ return new WildcardTypeImpl(new Type[]{bound}, MoreTypes.EMPTY_TYPE_ARRAY);
+ }
+
+ /**
+ * Returns a type that represents an unknown supertype of {@code bound}. For
+ * example, if {@code bound} is {@code String.class}, this returns {@code ?
+ * super String}.
+ */
+ public static WildcardType supertypeOf(Type bound) {
+ return new WildcardTypeImpl(new Type[]{Object.class}, new Type[]{bound});
+ }
+
+ /**
+ * Returns a type modelling a {@link List} whose elements are of type
+ * {@code elementType}.
+ *
+ * @return a {@link java.io.Serializable serializable} parameterized type.
+ */
+ public static ParameterizedType listOf(Type elementType) {
+ return newParameterizedType(List.class, elementType);
+ }
+
+ /**
+ * Returns a type modelling a {@link Set} whose elements are of type
+ * {@code elementType}.
+ *
+ * @return a {@link java.io.Serializable serializable} parameterized type.
+ */
+ public static ParameterizedType setOf(Type elementType) {
+ return newParameterizedType(Set.class, elementType);
+ }
+
+ /**
+ * Returns a type modelling a {@link Map} whose keys are of type
+ * {@code keyType} and whose values are of type {@code valueType}.
+ *
+ * @return a {@link java.io.Serializable serializable} parameterized type.
+ */
+ public static ParameterizedType mapOf(Type keyType, Type valueType) {
+ return newParameterizedType(Map.class, keyType, valueType);
+ }
+
+ // for other custom collections types, use newParameterizedType()
+
+ /**
+ * Returns a type modelling a {@link Provider} that provides elements of type
+ * {@code elementType}.
+ *
+ * @return a {@link java.io.Serializable serializable} parameterized type.
+ */
+ public static ParameterizedType providerOf(Type providedType) {
+ return newParameterizedType(Provider.class, providedType);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/inject/util/package-info.java b/src/main/java/org/elasticsearch/common/inject/util/package-info.java
new file mode 100644
index 0000000..e20bd94
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/inject/util/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Helper methods for working with Guice.
+ */
+package org.elasticsearch.common.inject.util; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/io/BooleanStreamable.java b/src/main/java/org/elasticsearch/common/io/BooleanStreamable.java
new file mode 100644
index 0000000..1a7c2ce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/BooleanStreamable.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BooleanStreamable implements Streamable {
+
+ private boolean value;
+
+ public BooleanStreamable() {
+ }
+
+ public BooleanStreamable(boolean value) {
+ this.value = value;
+ }
+
+ public void set(boolean newValue) {
+ value = newValue;
+ }
+
+ public boolean get() {
+ return this.value;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ value = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeBoolean(value);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/BytesStream.java b/src/main/java/org/elasticsearch/common/io/BytesStream.java
new file mode 100644
index 0000000..903c1dc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/BytesStream.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import org.elasticsearch.common.bytes.BytesReference;
+
+public interface BytesStream {
+
+ BytesReference bytes();
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/io/CachedStreams.java b/src/main/java/org/elasticsearch/common/io/CachedStreams.java
new file mode 100644
index 0000000..12af1f0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/CachedStreams.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import org.elasticsearch.common.io.stream.CachedStreamInput;
+
+public class CachedStreams {
+
+ public static void clear() {
+ CachedStreamInput.clear();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/io/CharSequenceReader.java b/src/main/java/org/elasticsearch/common/io/CharSequenceReader.java
new file mode 100644
index 0000000..cb6c3bc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/CharSequenceReader.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public abstract class CharSequenceReader extends Reader implements CharSequence {
+}
diff --git a/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java b/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java
new file mode 100644
index 0000000..c18bfe5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class FastCharArrayReader extends Reader {
+
+ /**
+ * The character buffer.
+ */
+ protected char buf[];
+
+ /**
+ * The current buffer position.
+ */
+ protected int pos;
+
+ /**
+ * The position of mark in buffer.
+ */
+ protected int markedPos = 0;
+
+ /**
+ * The index of the end of this buffer. There is not valid
+ * data at or beyond this index.
+ */
+ protected int count;
+
+ /**
+ * Creates a CharArrayReader from the specified array of chars.
+ *
+ * @param buf Input buffer (not copied)
+ */
+ public FastCharArrayReader(char buf[]) {
+ this.buf = buf;
+ this.pos = 0;
+ this.count = buf.length;
+ }
+
+ /**
+ * Creates a CharArrayReader from the specified array of chars.
+ * <p/>
+ * <p> The resulting reader will start reading at the given
+ * <tt>offset</tt>. The total number of <tt>char</tt> values that can be
+ * read from this reader will be either <tt>length</tt> or
+ * <tt>buf.length-offset</tt>, whichever is smaller.
+ *
+ * @param buf Input buffer (not copied)
+ * @param offset Offset of the first char to read
+ * @param length Number of chars to read
+ * @throws IllegalArgumentException If <tt>offset</tt> is negative or greater than
+ * <tt>buf.length</tt>, or if <tt>length</tt> is negative, or if
+ * the sum of these two values is negative.
+ */
+ public FastCharArrayReader(char buf[], int offset, int length) {
+ if ((offset < 0) || (offset > buf.length) || (length < 0) ||
+ ((offset + length) < 0)) {
+ throw new IllegalArgumentException();
+ }
+ this.buf = buf;
+ this.pos = offset;
+ this.count = Math.min(offset + length, buf.length);
+ this.markedPos = offset;
+ }
+
+ /**
+ * Checks to make sure that the stream has not been closed
+ */
+ private void ensureOpen() throws IOException {
+ if (buf == null)
+ throw new IOException("Stream closed");
+ }
+
+ /**
+ * Reads a single character.
+ *
+ * @throws IOException If an I/O error occurs
+ */
+ public int read() throws IOException {
+ ensureOpen();
+ if (pos >= count)
+ return -1;
+ else
+ return buf[pos++];
+ }
+
+ /**
+ * Reads characters into a portion of an array.
+ *
+ * @param b Destination buffer
+ * @param off Offset at which to start storing characters
+ * @param len Maximum number of characters to read
+ * @return The actual number of characters read, or -1 if
+ * the end of the stream has been reached
+ * @throws IOException If an I/O error occurs
+ */
+ public int read(char b[], int off, int len) throws IOException {
+ ensureOpen();
+ if ((off < 0) || (off > b.length) || (len < 0) ||
+ ((off + len) > b.length) || ((off + len) < 0)) {
+ throw new IndexOutOfBoundsException();
+ } else if (len == 0) {
+ return 0;
+ }
+
+ if (pos >= count) {
+ return -1;
+ }
+ if (pos + len > count) {
+ len = count - pos;
+ }
+ if (len <= 0) {
+ return 0;
+ }
+ System.arraycopy(buf, pos, b, off, len);
+ pos += len;
+ return len;
+ }
+
+ /**
+ * Skips characters. Returns the number of characters that were skipped.
+ * <p/>
+ * <p>The <code>n</code> parameter may be negative, even though the
+ * <code>skip</code> method of the {@link Reader} superclass throws
+ * an exception in this case. If <code>n</code> is negative, then
+ * this method does nothing and returns <code>0</code>.
+ *
+ * @param n The number of characters to skip
+ * @return The number of characters actually skipped
+ * @throws IOException If the stream is closed, or an I/O error occurs
+ */
+ public long skip(long n) throws IOException {
+ ensureOpen();
+ if (pos + n > count) {
+ n = count - pos;
+ }
+ if (n < 0) {
+ return 0;
+ }
+ pos += n;
+ return n;
+ }
+
+ /**
+ * Tells whether this stream is ready to be read. Character-array readers
+ * are always ready to be read.
+ *
+ * @throws IOException If an I/O error occurs
+ */
+ public boolean ready() throws IOException {
+ ensureOpen();
+ return (count - pos) > 0;
+ }
+
+ /**
+ * Tells whether this stream supports the mark() operation, which it does.
+ */
+ public boolean markSupported() {
+ return true;
+ }
+
+ /**
+ * Marks the present position in the stream. Subsequent calls to reset()
+ * will reposition the stream to this point.
+ *
+ * @param readAheadLimit Limit on the number of characters that may be
+ * read while still preserving the mark. Because
+ * the stream's input comes from a character array,
+ * there is no actual limit; hence this argument is
+ * ignored.
+ * @throws IOException If an I/O error occurs
+ */
+ public void mark(int readAheadLimit) throws IOException {
+ ensureOpen();
+ markedPos = pos;
+ }
+
+ /**
+ * Resets the stream to the most recent mark, or to the beginning if it has
+ * never been marked.
+ *
+ * @throws IOException If an I/O error occurs
+ */
+ public void reset() throws IOException {
+ ensureOpen();
+ pos = markedPos;
+ }
+
+ /**
+ * Closes the stream and releases any system resources associated with
+ * it. Once the stream has been closed, further read(), ready(),
+ * mark(), reset(), or skip() invocations will throw an IOException.
+ * Closing a previously closed stream has no effect.
+ */
+ public void close() {
+ buf = null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/FastCharArrayWriter.java b/src/main/java/org/elasticsearch/common/io/FastCharArrayWriter.java
new file mode 100644
index 0000000..05ec582
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/FastCharArrayWriter.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import java.io.IOException;
+import java.io.Writer;
+import java.util.Arrays;
+
+/**
+ * A similar class to {@link java.io.CharArrayWriter} allowing to get the underlying <tt>char[]</tt> buffer.
+ */
+public class FastCharArrayWriter extends Writer {
+
+ /**
+ * The buffer where data is stored.
+ */
+ protected char buf[];
+
+ /**
+ * The number of chars in the buffer.
+ */
+ protected int count;
+
+ /**
+ * Creates a new CharArrayWriter.
+ */
+ public FastCharArrayWriter() {
+ this(32);
+ }
+
+ /**
+ * Creates a new CharArrayWriter with the specified initial size.
+ *
+ * @param initialSize an int specifying the initial buffer size.
+ * @throws IllegalArgumentException if initialSize is negative
+ */
+ public FastCharArrayWriter(int initialSize) {
+ if (initialSize < 0) {
+ throw new IllegalArgumentException("Negative initial size: "
+ + initialSize);
+ }
+ buf = new char[initialSize];
+ }
+
+ /**
+ * Writes a character to the buffer.
+ */
+ public void write(int c) {
+ int newcount = count + 1;
+ if (newcount > buf.length) {
+ buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount));
+ }
+ buf[count] = (char) c;
+ count = newcount;
+ }
+
+ /**
+ * Writes characters to the buffer.
+ *
+ * @param c the data to be written
+ * @param off the start offset in the data
+ * @param len the number of chars that are written
+ */
+ public void write(char c[], int off, int len) {
+ if ((off < 0) || (off > c.length) || (len < 0) ||
+ ((off + len) > c.length) || ((off + len) < 0)) {
+ throw new IndexOutOfBoundsException();
+ } else if (len == 0) {
+ return;
+ }
+ int newcount = count + len;
+ if (newcount > buf.length) {
+ buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount));
+ }
+ System.arraycopy(c, off, buf, count, len);
+ count = newcount;
+ }
+
+ /**
+ * Write a portion of a string to the buffer.
+ *
+ * @param str String to be written from
+ * @param off Offset from which to start reading characters
+ * @param len Number of characters to be written
+ */
+ public void write(String str, int off, int len) {
+ int newcount = count + len;
+ if (newcount > buf.length) {
+ buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount));
+ }
+ str.getChars(off, off + len, buf, count);
+ count = newcount;
+ }
+
+ /**
+ * Writes the contents of the buffer to another character stream.
+ *
+ * @param out the output stream to write to
+ * @throws java.io.IOException If an I/O error occurs.
+ */
+ public void writeTo(Writer out) throws IOException {
+ out.write(buf, 0, count);
+ }
+
+ /**
+ * Appends the specified character sequence to this writer.
+ * <p/>
+ * <p> An invocation of this method of the form <tt>out.append(csq)</tt>
+ * behaves in exactly the same way as the invocation
+ * <p/>
+ * <pre>
+ * out.write(csq.toString()) </pre>
+ *
+ * <p> Depending on the specification of <tt>toString</tt> for the
+ * character sequence <tt>csq</tt>, the entire sequence may not be
+ * appended. For instance, invoking the <tt>toString</tt> method of a
+ * character buffer will return a subsequence whose content depends upon
+ * the buffer's position and limit.
+ *
+ * @param csq The character sequence to append. If <tt>csq</tt> is
+ * <tt>null</tt>, then the four characters <tt>"null"</tt> are
+ * appended to this writer.
+ * @return This writer
+ * @since 1.5
+ */
+ public FastCharArrayWriter append(CharSequence csq) {
+ String s = (csq == null ? "null" : csq.toString());
+ write(s, 0, s.length());
+ return this;
+ }
+
+ /**
+ * Appends a subsequence of the specified character sequence to this writer.
+ * <p/>
+ * <p> An invocation of this method of the form <tt>out.append(csq, start,
+ * end)</tt> when <tt>csq</tt> is not <tt>null</tt>, behaves in
+ * exactly the same way as the invocation
+ * <p/>
+ * <pre>
+ * out.write(csq.subSequence(start, end).toString()) </pre>
+ *
+ * @param csq The character sequence from which a subsequence will be
+ * appended. If <tt>csq</tt> is <tt>null</tt>, then characters
+ * will be appended as if <tt>csq</tt> contained the four
+ * characters <tt>"null"</tt>.
+ * @param start The index of the first character in the subsequence
+ * @param end The index of the character following the last character in the
+ * subsequence
+ * @return This writer
+ * @throws IndexOutOfBoundsException If <tt>start</tt> or <tt>end</tt> are negative, <tt>start</tt>
+ * is greater than <tt>end</tt>, or <tt>end</tt> is greater than
+ * <tt>csq.length()</tt>
+ * @since 1.5
+ */
+ public FastCharArrayWriter append(CharSequence csq, int start, int end) {
+ String s = (csq == null ? "null" : csq).subSequence(start, end).toString();
+ write(s, 0, s.length());
+ return this;
+ }
+
+ /**
+ * Appends the specified character to this writer.
+ * <p/>
+ * <p> An invocation of this method of the form <tt>out.append(c)</tt>
+ * behaves in exactly the same way as the invocation
+ * <p/>
+ * <pre>
+ * out.write(c) </pre>
+ *
+ * @param c The 16-bit character to append
+ * @return This writer
+ * @since 1.5
+ */
+ public FastCharArrayWriter append(char c) {
+ write(c);
+ return this;
+ }
+
+ /**
+ * Resets the buffer so that you can use it again without
+ * throwing away the already allocated buffer.
+ */
+ public void reset() {
+ count = 0;
+ }
+
+ /**
+ * Returns a copy of the input data.
+ *
+ * @return an array of chars copied from the input data.
+ */
+ public char toCharArray()[] {
+ return Arrays.copyOf(buf, count);
+ }
+
+ /**
+ * Returns the underlying char array. Note, use {@link #size()} in order to know the size of
+ * of the actual content within the array.
+ */
+ public char[] unsafeCharArray() {
+ return buf;
+ }
+
+ /**
+ * Returns the current size of the buffer.
+ *
+ * @return an int representing the current size of the buffer.
+ */
+ public int size() {
+ return count;
+ }
+
+ /**
+ * Converts input data to a string.
+ *
+ * @return the string.
+ */
+ public String toString() {
+ return new String(buf, 0, count);
+ }
+
+ /**
+ * Converts the input data to a string with trimmed whitespaces.
+ */
+ public String toStringTrim() {
+ int st = 0;
+ int len = count;
+ char[] val = buf; /* avoid getfield opcode */
+
+ while ((st < len) && (val[st] <= ' ')) {
+ st++;
+ len--;
+ }
+ while ((st < len) && (val[len - 1] <= ' ')) {
+ len--;
+ }
+ return new String(buf, st, len);
+ }
+
+ /**
+ * Flush the stream.
+ */
+ public void flush() {
+ }
+
+ /**
+ * Close the stream. This method does not release the buffer, since its
+ * contents might still be required. Note: Invoking this method in this class
+ * will have no effect.
+ */
+ public void close() {
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/io/FastStringReader.java b/src/main/java/org/elasticsearch/common/io/FastStringReader.java
new file mode 100644
index 0000000..154caf3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/FastStringReader.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ * A character stream whose source is a string that is <b>not thread safe</b>
+ * <p/>
+ * (shay.banon
+ * )
+ */
+public class FastStringReader extends CharSequenceReader {
+
+ private String str;
+ private int length;
+ private int next = 0;
+ private int mark = 0;
+
+ /**
+ * Creates a new string reader.
+ *
+ * @param s String providing the character stream.
+ */
+ public FastStringReader(String s) {
+ this.str = s;
+ this.length = s.length();
+ }
+
+ /**
+ * Check to make sure that the stream has not been closed
+ */
+ private void ensureOpen() throws IOException {
+ if (length == -1)
+ throw new IOException("Stream closed");
+ }
+
+ @Override
+ public int length() {
+ return length;
+ }
+
+ @Override
+ public char charAt(int index) {
+ return str.charAt(index);
+ }
+
+ @Override
+ public CharSequence subSequence(int start, int end) {
+ return str.subSequence(start, end);
+ }
+
+ /**
+ * Reads a single character.
+ *
+ * @return The character read, or -1 if the end of the stream has been
+ * reached
+ * @throws IOException If an I/O error occurs
+ */
+ @Override
+ public int read() throws IOException {
+ ensureOpen();
+ if (next >= length)
+ return -1;
+ return str.charAt(next++);
+ }
+
+ /**
+ * Reads characters into a portion of an array.
+ *
+ * @param cbuf Destination buffer
+ * @param off Offset at which to start writing characters
+ * @param len Maximum number of characters to read
+ * @return The number of characters read, or -1 if the end of the
+ * stream has been reached
+ * @throws IOException If an I/O error occurs
+ */
+ @Override
+ public int read(char cbuf[], int off, int len) throws IOException {
+ ensureOpen();
+ if (len == 0) {
+ return 0;
+ }
+ if (next >= length)
+ return -1;
+ int n = Math.min(length - next, len);
+ str.getChars(next, next + n, cbuf, off);
+ next += n;
+ return n;
+ }
+
+ /**
+ * Skips the specified number of characters in the stream. Returns
+ * the number of characters that were skipped.
+ * <p/>
+ * <p>The <code>ns</code> parameter may be negative, even though the
+ * <code>skip</code> method of the {@link Reader} superclass throws
+ * an exception in this case. Negative values of <code>ns</code> cause the
+ * stream to skip backwards. Negative return values indicate a skip
+ * backwards. It is not possible to skip backwards past the beginning of
+ * the string.
+ * <p/>
+ * <p>If the entire string has been read or skipped, then this method has
+ * no effect and always returns 0.
+ *
+ * @throws IOException If an I/O error occurs
+ */
+ @Override
+ public long skip(long ns) throws IOException {
+ ensureOpen();
+ if (next >= length)
+ return 0;
+ // Bound skip by beginning and end of the source
+ long n = Math.min(length - next, ns);
+ n = Math.max(-next, n);
+ next += n;
+ return n;
+ }
+
+ /**
+ * Tells whether this stream is ready to be read.
+ *
+ * @return True if the next read() is guaranteed not to block for input
+ * @throws IOException If the stream is closed
+ */
+ @Override
+ public boolean ready() throws IOException {
+ ensureOpen();
+ return true;
+ }
+
+ /**
+ * Tells whether this stream supports the mark() operation, which it does.
+ */
+ @Override
+ public boolean markSupported() {
+ return true;
+ }
+
+ /**
+ * Marks the present position in the stream. Subsequent calls to reset()
+ * will reposition the stream to this point.
+ *
+ * @param readAheadLimit Limit on the number of characters that may be
+ * read while still preserving the mark. Because
+ * the stream's input comes from a string, there
+ * is no actual limit, so this argument must not
+ * be negative, but is otherwise ignored.
+ * @throws IllegalArgumentException If readAheadLimit is < 0
+ * @throws IOException If an I/O error occurs
+ */
+ @Override
+ public void mark(int readAheadLimit) throws IOException {
+ if (readAheadLimit < 0) {
+ throw new IllegalArgumentException("Read-ahead limit < 0");
+ }
+ ensureOpen();
+ mark = next;
+ }
+
+ /**
+ * Resets the stream to the most recent mark, or to the beginning of the
+ * string if it has never been marked.
+ *
+ * @throws IOException If an I/O error occurs
+ */
+ @Override
+ public void reset() throws IOException {
+ ensureOpen();
+ next = mark;
+ }
+
+ /**
+ * Closes the stream and releases any system resources associated with
+ * it. Once the stream has been closed, further read(),
+ * ready(), mark(), or reset() invocations will throw an IOException.
+ * Closing a previously closed stream has no effect.
+ */
+ public void close() {
+ length = -1;
+ }
+
+ @Override
+ public String toString() {
+ return str;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/FileChannelInputStream.java b/src/main/java/org/elasticsearch/common/io/FileChannelInputStream.java
new file mode 100644
index 0000000..308c234
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/FileChannelInputStream.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+/**
+ *
+ */
+public class FileChannelInputStream extends InputStream {
+
+ private final FileChannel channel;
+
+ private long position;
+
+ private long length;
+
+ private ByteBuffer bb = null;
+ private byte[] bs = null; // Invoker's previous array
+ private byte[] b1 = null;
+
+ private long markPosition;
+
+ /**
+ * @param channel The channel to read from
+ * @param position The position to start reading from
+ * @param length The length to read
+ */
+ public FileChannelInputStream(FileChannel channel, long position, long length) {
+ this.channel = channel;
+ this.position = position;
+ this.markPosition = position;
+ this.length = position + length; // easier to work with total length
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (b1 == null) {
+ b1 = new byte[1];
+ }
+ int n = read(b1);
+ if (n == 1) {
+ return b1[0] & 0xff;
+ }
+ return -1;
+ }
+
+ @Override
+ public int read(byte[] bs, int off, int len) throws IOException {
+ if (len == 0) {
+ return 0;
+ }
+
+ if ((length - position) < len) {
+ len = (int) (length - position);
+ }
+
+ if (len == 0) {
+ return -1;
+ }
+
+ ByteBuffer bb = ((this.bs == bs) ? this.bb : ByteBuffer.wrap(bs));
+ bb.limit(Math.min(off + len, bb.capacity()));
+ bb.position(off);
+
+ this.bb = bb;
+ this.bs = bs;
+ int read = channel.read(bb, position);
+ if (read > 0) {
+ position += read;
+ }
+ return read;
+ }
+
+ @Override
+ public boolean markSupported() {
+ return true;
+ }
+
+ @Override
+ public void mark(int readlimit) {
+ this.markPosition = position;
+ }
+
+ @Override
+ public void reset() throws IOException {
+ position = markPosition;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java b/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java
new file mode 100644
index 0000000..25147e4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.*;
+import java.nio.channels.FileChannel;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public class FileSystemUtils {
+
+ private static ESLogger logger = ESLoggerFactory.getLogger(FileSystemUtils.class.getName());
+
+ private static final long mkdirsStallTimeout = TimeValue.timeValueMinutes(5).millis();
+ private static final Object mkdirsMutex = new Object();
+ private static volatile Thread mkdirsThread;
+ private static volatile long mkdirsStartTime;
+
+ public static boolean mkdirs(File dir) {
+ synchronized (mkdirsMutex) {
+ try {
+ mkdirsThread = Thread.currentThread();
+ mkdirsStartTime = System.currentTimeMillis();
+ return dir.mkdirs();
+ } finally {
+ mkdirsThread = null;
+ }
+ }
+ }
+
+ public static void checkMkdirsStall(long currentTime) {
+ Thread mkdirsThread1 = mkdirsThread;
+ long stallTime = currentTime - mkdirsStartTime;
+ if (mkdirsThread1 != null && (stallTime > mkdirsStallTimeout)) {
+ logger.error("mkdirs stalled for {} on {}, trying to interrupt", new TimeValue(stallTime), mkdirsThread1.getName());
+ mkdirsThread1.interrupt(); // try and interrupt it...
+ }
+ }
+
+ public static int maxOpenFiles(File testDir) {
+ boolean dirCreated = false;
+ if (!testDir.exists()) {
+ dirCreated = true;
+ testDir.mkdirs();
+ }
+ List<RandomAccessFile> files = new ArrayList<RandomAccessFile>();
+ try {
+ while (true) {
+ files.add(new RandomAccessFile(new File(testDir, "tmp" + files.size()), "rw"));
+ }
+ } catch (IOException ioe) {
+ int i = 0;
+ for (RandomAccessFile raf : files) {
+ try {
+ raf.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ new File(testDir, "tmp" + i++).delete();
+ }
+ if (dirCreated) {
+ deleteRecursively(testDir);
+ }
+ }
+ return files.size();
+ }
+
+
+ public static boolean hasExtensions(File root, String... extensions) {
+ if (root != null && root.exists()) {
+ if (root.isDirectory()) {
+ File[] children = root.listFiles();
+ if (children != null) {
+ for (File child : children) {
+ if (child.isDirectory()) {
+ boolean has = hasExtensions(child, extensions);
+ if (has) {
+ return true;
+ }
+ } else {
+ for (String extension : extensions) {
+ if (child.getName().endsWith(extension)) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if at least one of the files exists.
+ */
+ public static boolean exists(File... files) {
+ for (File file : files) {
+ if (file.exists()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static boolean deleteRecursively(File[] roots) {
+ boolean deleted = true;
+ for (File root : roots) {
+ deleted &= deleteRecursively(root);
+ }
+ return deleted;
+ }
+
+ public static boolean deleteRecursively(File root) {
+ return deleteRecursively(root, true);
+ }
+
+ private static boolean innerDeleteRecursively(File root) {
+ return deleteRecursively(root, true);
+ }
+
+ /**
+ * Delete the supplied {@link java.io.File} - for directories,
+ * recursively delete any nested directories or files as well.
+ *
+ * @param root the root <code>File</code> to delete
+ * @param deleteRoot whether or not to delete the root itself or just the content of the root.
+ * @return <code>true</code> if the <code>File</code> was deleted,
+ * otherwise <code>false</code>
+ */
+ public static boolean deleteRecursively(File root, boolean deleteRoot) {
+ if (root != null && root.exists()) {
+ if (root.isDirectory()) {
+ File[] children = root.listFiles();
+ if (children != null) {
+ for (File aChildren : children) {
+ innerDeleteRecursively(aChildren);
+ }
+ }
+ }
+
+ if (deleteRoot) {
+ return root.delete();
+ } else {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static void syncFile(File fileToSync) throws IOException {
+ boolean success = false;
+ int retryCount = 0;
+ IOException exc = null;
+ while (!success && retryCount < 5) {
+ retryCount++;
+ RandomAccessFile file = null;
+ try {
+ try {
+ file = new RandomAccessFile(fileToSync, "rw");
+ file.getFD().sync();
+ success = true;
+ } finally {
+ if (file != null)
+ file.close();
+ }
+ } catch (IOException ioe) {
+ if (exc == null)
+ exc = ioe;
+ try {
+ // Pause 5 msec
+ Thread.sleep(5);
+ } catch (InterruptedException ie) {
+ throw new InterruptedIOException(ie.getMessage());
+ }
+ }
+ }
+ }
+
+ public static void copyFile(File sourceFile, File destinationFile) throws IOException {
+ FileInputStream sourceIs = null;
+ FileChannel source = null;
+ FileOutputStream destinationOs = null;
+ FileChannel destination = null;
+ try {
+ sourceIs = new FileInputStream(sourceFile);
+ source = sourceIs.getChannel();
+ destinationOs = new FileOutputStream(destinationFile);
+ destination = destinationOs.getChannel();
+ destination.transferFrom(source, 0, source.size());
+ } finally {
+ if (source != null) {
+ source.close();
+ }
+ if (sourceIs != null) {
+ sourceIs.close();
+ }
+ if (destination != null) {
+ destination.close();
+ }
+ if (destinationOs != null) {
+ destinationOs.close();
+ }
+ }
+ }
+
+ private FileSystemUtils() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/Streams.java b/src/main/java/org/elasticsearch/common/io/Streams.java
new file mode 100644
index 0000000..1d78786
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/Streams.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.Preconditions;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+
+import java.io.*;
+
+/**
+ * Simple utility methods for file and stream copying.
+ * All copy methods use a block size of 4096 bytes,
+ * and close all affected streams when done.
+ * <p/>
+ * <p>Mainly for use within the framework,
+ * but also useful for application code.
+ */
+public abstract class Streams {
+
+ public static final int BUFFER_SIZE = 1024 * 8;
+
+
+ //---------------------------------------------------------------------
+ // Copy methods for java.io.File
+ //---------------------------------------------------------------------
+
+ /**
+ * Copy the contents of the given input File to the given output File.
+ *
+ * @param in the file to copy from
+ * @param out the file to copy to
+ * @return the number of bytes copied
+ * @throws IOException in case of I/O errors
+ */
+ public static long copy(File in, File out) throws IOException {
+ Preconditions.checkNotNull(in, "No input File specified");
+ Preconditions.checkNotNull(out, "No output File specified");
+ return copy(new BufferedInputStream(new FileInputStream(in)),
+ new BufferedOutputStream(new FileOutputStream(out)));
+ }
+
+ /**
+ * Copy the contents of the given byte array to the given output File.
+ *
+ * @param in the byte array to copy from
+ * @param out the file to copy to
+ * @throws IOException in case of I/O errors
+ */
+ public static void copy(byte[] in, File out) throws IOException {
+ Preconditions.checkNotNull(in, "No input byte array specified");
+ Preconditions.checkNotNull(out, "No output File specified");
+ ByteArrayInputStream inStream = new ByteArrayInputStream(in);
+ OutputStream outStream = new BufferedOutputStream(new FileOutputStream(out));
+ copy(inStream, outStream);
+ }
+
+ /**
+ * Copy the contents of the given input File into a new byte array.
+ *
+ * @param in the file to copy from
+ * @return the new byte array that has been copied to
+ * @throws IOException in case of I/O errors
+ */
+ public static byte[] copyToByteArray(File in) throws IOException {
+ Preconditions.checkNotNull(in, "No input File specified");
+ return copyToByteArray(new BufferedInputStream(new FileInputStream(in)));
+ }
+
+
+ //---------------------------------------------------------------------
+ // Copy methods for java.io.InputStream / java.io.OutputStream
+ //---------------------------------------------------------------------
+
+
+ public static long copy(InputStream in, OutputStream out) throws IOException {
+ return copy(in, out, new byte[BUFFER_SIZE]);
+ }
+
+ /**
+ * Copy the contents of the given InputStream to the given OutputStream.
+ * Closes both streams when done.
+ *
+ * @param in the stream to copy from
+ * @param out the stream to copy to
+ * @return the number of bytes copied
+ * @throws IOException in case of I/O errors
+ */
+ public static long copy(InputStream in, OutputStream out, byte[] buffer) throws IOException {
+ Preconditions.checkNotNull(in, "No InputStream specified");
+ Preconditions.checkNotNull(out, "No OutputStream specified");
+ try {
+ long byteCount = 0;
+ int bytesRead;
+ while ((bytesRead = in.read(buffer)) != -1) {
+ out.write(buffer, 0, bytesRead);
+ byteCount += bytesRead;
+ }
+ out.flush();
+ return byteCount;
+ } finally {
+ try {
+ in.close();
+ } catch (IOException ex) {
+ // do nothing
+ }
+ try {
+ out.close();
+ } catch (IOException ex) {
+ // do nothing
+ }
+ }
+ }
+
+ /**
+ * Copy the contents of the given byte array to the given OutputStream.
+ * Closes the stream when done.
+ *
+ * @param in the byte array to copy from
+ * @param out the OutputStream to copy to
+ * @throws IOException in case of I/O errors
+ */
+ public static void copy(byte[] in, OutputStream out) throws IOException {
+ Preconditions.checkNotNull(in, "No input byte array specified");
+ Preconditions.checkNotNull(out, "No OutputStream specified");
+ try {
+ out.write(in);
+ } finally {
+ try {
+ out.close();
+ } catch (IOException ex) {
+ // do nothing
+ }
+ }
+ }
+
+ /**
+ * Copy the contents of the given InputStream into a new byte array.
+ * Closes the stream when done.
+ *
+ * @param in the stream to copy from
+ * @return the new byte array that has been copied to
+ * @throws IOException in case of I/O errors
+ */
+ public static byte[] copyToByteArray(InputStream in) throws IOException {
+ BytesStreamOutput out = new BytesStreamOutput();
+ copy(in, out);
+ return out.bytes().toBytes();
+ }
+
+
+ //---------------------------------------------------------------------
+ // Copy methods for java.io.Reader / java.io.Writer
+ //---------------------------------------------------------------------
+
+ /**
+ * Copy the contents of the given Reader to the given Writer.
+ * Closes both when done.
+ *
+ * @param in the Reader to copy from
+ * @param out the Writer to copy to
+ * @return the number of characters copied
+ * @throws IOException in case of I/O errors
+ */
+ public static int copy(Reader in, Writer out) throws IOException {
+ Preconditions.checkNotNull(in, "No Reader specified");
+ Preconditions.checkNotNull(out, "No Writer specified");
+ try {
+ int byteCount = 0;
+ char[] buffer = new char[BUFFER_SIZE];
+ int bytesRead;
+ while ((bytesRead = in.read(buffer)) != -1) {
+ out.write(buffer, 0, bytesRead);
+ byteCount += bytesRead;
+ }
+ out.flush();
+ return byteCount;
+ } finally {
+ try {
+ in.close();
+ } catch (IOException ex) {
+ // do nothing
+ }
+ try {
+ out.close();
+ } catch (IOException ex) {
+ // do nothing
+ }
+ }
+ }
+
+ /**
+ * Copy the contents of the given String to the given output Writer.
+ * Closes the write when done.
+ *
+ * @param in the String to copy from
+ * @param out the Writer to copy to
+ * @throws IOException in case of I/O errors
+ */
+ public static void copy(String in, Writer out) throws IOException {
+ Preconditions.checkNotNull(in, "No input String specified");
+ Preconditions.checkNotNull(out, "No Writer specified");
+ try {
+ out.write(in);
+ } finally {
+ try {
+ out.close();
+ } catch (IOException ex) {
+ // do nothing
+ }
+ }
+ }
+
+ /**
+ * Copy the contents of the given Reader into a String.
+ * Closes the reader when done.
+ *
+ * @param in the reader to copy from
+ * @return the String that has been copied to
+ * @throws IOException in case of I/O errors
+ */
+ public static String copyToString(Reader in) throws IOException {
+ StringWriter out = new StringWriter();
+ copy(in, out);
+ return out.toString();
+ }
+
+ public static String copyToStringFromClasspath(ClassLoader classLoader, String path) throws IOException {
+ InputStream is = classLoader.getResourceAsStream(path);
+ if (is == null) {
+ throw new FileNotFoundException("Resource [" + path + "] not found in classpath with class loader [" + classLoader + "]");
+ }
+ return copyToString(new InputStreamReader(is, Charsets.UTF_8));
+ }
+
+ public static String copyToStringFromClasspath(String path) throws IOException {
+ InputStream is = Streams.class.getResourceAsStream(path);
+ if (is == null) {
+ throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
+ }
+ return copyToString(new InputStreamReader(is, Charsets.UTF_8));
+ }
+
+ public static byte[] copyToBytesFromClasspath(String path) throws IOException {
+ InputStream is = Streams.class.getResourceAsStream(path);
+ if (is == null) {
+ throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
+ }
+ return copyToByteArray(is);
+ }
+
+ public static int readFully(Reader reader, char[] dest) throws IOException {
+ return readFully(reader, dest, 0, dest.length);
+ }
+
+ public static int readFully(Reader reader, char[] dest, int offset, int len) throws IOException {
+ int read = 0;
+ while (read < len) {
+ final int r = reader.read(dest, offset + read, len - read);
+ if (r == -1) {
+ break;
+ }
+ read += r;
+ }
+ return read;
+ }
+
+ public static int readFully(InputStream reader, byte[] dest) throws IOException {
+ return readFully(reader, dest, 0, dest.length);
+ }
+
+ public static int readFully(InputStream reader, byte[] dest, int offset, int len) throws IOException {
+ int read = 0;
+ while (read < len) {
+ final int r = reader.read(dest, offset + read, len - read);
+ if (r == -1) {
+ break;
+ }
+ read += r;
+ }
+ return read;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/ThrowableObjectInputStream.java b/src/main/java/org/elasticsearch/common/io/ThrowableObjectInputStream.java
new file mode 100644
index 0000000..b1dffd6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/ThrowableObjectInputStream.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import org.elasticsearch.common.Classes;
+
+import java.io.*;
+
+/**
+ *
+ */
+public class ThrowableObjectInputStream extends ObjectInputStream {
+
+ private final ClassLoader classLoader;
+
+ public ThrowableObjectInputStream(InputStream in) throws IOException {
+ this(in, null);
+ }
+
+ public ThrowableObjectInputStream(InputStream in, ClassLoader classLoader) throws IOException {
+ super(in);
+ this.classLoader = classLoader;
+ }
+
+ @Override
+ protected void readStreamHeader() throws IOException, StreamCorruptedException {
+ int version = readByte() & 0xFF;
+ if (version != STREAM_VERSION) {
+ throw new StreamCorruptedException(
+ "Unsupported version: " + version);
+ }
+ }
+
+ @Override
+ protected ObjectStreamClass readClassDescriptor()
+ throws IOException, ClassNotFoundException {
+ int type = read();
+ if (type < 0) {
+ throw new EOFException();
+ }
+ switch (type) {
+ case ThrowableObjectOutputStream.TYPE_EXCEPTION:
+ return ObjectStreamClass.lookup(Exception.class);
+ case ThrowableObjectOutputStream.TYPE_STACKTRACEELEMENT:
+ return ObjectStreamClass.lookup(StackTraceElement.class);
+ case ThrowableObjectOutputStream.TYPE_FAT_DESCRIPTOR:
+ return super.readClassDescriptor();
+ case ThrowableObjectOutputStream.TYPE_THIN_DESCRIPTOR:
+ String className = readUTF();
+ Class<?> clazz = loadClass(className);
+ return ObjectStreamClass.lookup(clazz);
+ default:
+ throw new StreamCorruptedException(
+ "Unexpected class descriptor type: " + type);
+ }
+ }
+
+ @Override
+ protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
+ String className = desc.getName();
+ try {
+ return loadClass(className);
+ } catch (ClassNotFoundException ex) {
+ return super.resolveClass(desc);
+ }
+ }
+
+ protected Class<?> loadClass(String className) throws ClassNotFoundException {
+ Class<?> clazz;
+ ClassLoader classLoader = this.classLoader;
+ if (classLoader == null) {
+ classLoader = Classes.getDefaultClassLoader();
+ }
+
+ if (classLoader != null) {
+ clazz = classLoader.loadClass(className);
+ } else {
+ clazz = Class.forName(className);
+ }
+ return clazz;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/ThrowableObjectOutputStream.java b/src/main/java/org/elasticsearch/common/io/ThrowableObjectOutputStream.java
new file mode 100644
index 0000000..282b290
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/ThrowableObjectOutputStream.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import java.io.IOException;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamClass;
+import java.io.OutputStream;
+
+/**
+ *
+ */
+public class ThrowableObjectOutputStream extends ObjectOutputStream {
+
+ static final int TYPE_FAT_DESCRIPTOR = 0;
+ static final int TYPE_THIN_DESCRIPTOR = 1;
+
+ private static final String EXCEPTION_CLASSNAME = Exception.class.getName();
+ static final int TYPE_EXCEPTION = 2;
+
+ private static final String STACKTRACEELEMENT_CLASSNAME = StackTraceElement.class.getName();
+ static final int TYPE_STACKTRACEELEMENT = 3;
+
+
+ public ThrowableObjectOutputStream(OutputStream out) throws IOException {
+ super(out);
+ }
+
+ @Override
+ protected void writeStreamHeader() throws IOException {
+ writeByte(STREAM_VERSION);
+ }
+
+ @Override
+ protected void writeClassDescriptor(ObjectStreamClass desc) throws IOException {
+ if (desc.getName().equals(EXCEPTION_CLASSNAME)) {
+ write(TYPE_EXCEPTION);
+ } else if (desc.getName().equals(STACKTRACEELEMENT_CLASSNAME)) {
+ write(TYPE_STACKTRACEELEMENT);
+ } else {
+ Class<?> clazz = desc.forClass();
+ if (clazz.isPrimitive() || clazz.isArray()) {
+ write(TYPE_FAT_DESCRIPTOR);
+ super.writeClassDescriptor(desc);
+ } else {
+ write(TYPE_THIN_DESCRIPTOR);
+ writeUTF(desc.getName());
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java b/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java
new file mode 100644
index 0000000..59e1698
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java
@@ -0,0 +1,333 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import java.io.CharConversionException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.Writer;
+
+
+/**
+ */
+public final class UTF8StreamWriter extends Writer {
+
+ /**
+ * Holds the current output stream or <code>null</code> if closed.
+ */
+ private OutputStream _outputStream;
+
+ /**
+ * Holds the bytes' buffer.
+ */
+ private final byte[] _bytes;
+
+ /**
+ * Holds the bytes buffer index.
+ */
+ private int _index;
+
+ /**
+ * Creates a UTF-8 writer having a byte buffer of moderate capacity (2048).
+ */
+ public UTF8StreamWriter() {
+ _bytes = new byte[2048];
+ }
+
+ /**
+ * Creates a UTF-8 writer having a byte buffer of specified capacity.
+ *
+ * @param capacity the capacity of the byte buffer.
+ */
+ public UTF8StreamWriter(int capacity) {
+ _bytes = new byte[capacity];
+ }
+
+ /**
+ * Sets the output stream to use for writing until this writer is closed.
+ * For example:[code]
+ * Writer writer = new UTF8StreamWriter().setOutputStream(out);
+ * [/code] is equivalent but writes faster than [code]
+ * Writer writer = new java.io.OutputStreamWriter(out, "UTF-8");
+ * [/code]
+ *
+ * @param out the output stream.
+ * @return this UTF-8 writer.
+ * @throws IllegalStateException if this writer is being reused and
+ * it has not been {@link #close closed} or {@link #reset reset}.
+ */
+ public UTF8StreamWriter setOutput(OutputStream out) {
+ if (_outputStream != null)
+ throw new IllegalStateException("Writer not closed or reset");
+ _outputStream = out;
+ return this;
+ }
+
+ /**
+ * Writes a single character. This method supports 16-bits
+ * character surrogates.
+ *
+ * @param c <code>char</code> the character to be written (possibly
+ * a surrogate).
+ * @throws IOException if an I/O error occurs.
+ */
+ public void write(char c) throws IOException {
+ if ((c < 0xd800) || (c > 0xdfff)) {
+ write((int) c);
+ } else if (c < 0xdc00) { // High surrogate.
+ _highSurrogate = c;
+ } else { // Low surrogate.
+ int code = ((_highSurrogate - 0xd800) << 10) + (c - 0xdc00)
+ + 0x10000;
+ write(code);
+ }
+ }
+
+ private char _highSurrogate;
+
+ /**
+ * Writes a character given its 31-bits Unicode.
+ *
+ * @param code the 31 bits Unicode of the character to be written.
+ * @throws IOException if an I/O error occurs.
+ */
+ public void write(int code) throws IOException {
+ if ((code & 0xffffff80) == 0) {
+ _bytes[_index] = (byte) code;
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ } else { // Writes more than one byte.
+ write2(code);
+ }
+ }
+
+ private void write2(int c) throws IOException {
+ if ((c & 0xfffff800) == 0) { // 2 bytes.
+ _bytes[_index] = (byte) (0xc0 | (c >> 6));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | (c & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ } else if ((c & 0xffff0000) == 0) { // 3 bytes.
+ _bytes[_index] = (byte) (0xe0 | (c >> 12));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 6) & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | (c & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ } else if ((c & 0xff200000) == 0) { // 4 bytes.
+ _bytes[_index] = (byte) (0xf0 | (c >> 18));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 12) & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 6) & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | (c & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ } else if ((c & 0xf4000000) == 0) { // 5 bytes.
+ _bytes[_index] = (byte) (0xf8 | (c >> 24));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 18) & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 12) & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 6) & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | (c & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ } else if ((c & 0x80000000) == 0) { // 6 bytes.
+ _bytes[_index] = (byte) (0xfc | (c >> 30));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 24) & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 18) & 0x3f));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 12) & 0x3F));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | ((c >> 6) & 0x3F));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ _bytes[_index] = (byte) (0x80 | (c & 0x3F));
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ } else {
+ throw new CharConversionException("Illegal character U+"
+ + Integer.toHexString(c));
+ }
+ }
+
+ /**
+ * Writes a portion of an array of characters.
+ *
+ * @param cbuf the array of characters.
+ * @param off the offset from which to start writing characters.
+ * @param len the number of characters to write.
+ * @throws IOException if an I/O error occurs.
+ */
+ public void write(char cbuf[], int off, int len) throws IOException {
+ final int off_plus_len = off + len;
+ for (int i = off; i < off_plus_len; ) {
+ char c = cbuf[i++];
+ if (c < 0x80) {
+ _bytes[_index] = (byte) c;
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ } else {
+ write(c);
+ }
+ }
+ }
+
+ /**
+ * Writes a portion of a string.
+ *
+ * @param str a String.
+ * @param off the offset from which to start writing characters.
+ * @param len the number of characters to write.
+ * @throws IOException if an I/O error occurs
+ */
+ public void write(String str, int off, int len) throws IOException {
+ final int off_plus_len = off + len;
+ for (int i = off; i < off_plus_len; ) {
+ char c = str.charAt(i++);
+ if (c < 0x80) {
+ _bytes[_index] = (byte) c;
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ } else {
+ write(c);
+ }
+ }
+ }
+
+ /**
+ * Writes the specified character sequence.
+ *
+ * @param csq the character sequence.
+ * @throws IOException if an I/O error occurs
+ */
+ public void write(CharSequence csq) throws IOException {
+ final int length = csq.length();
+ for (int i = 0; i < length; ) {
+ char c = csq.charAt(i++);
+ if (c < 0x80) {
+ _bytes[_index] = (byte) c;
+ if (++_index >= _bytes.length) {
+ flushBuffer();
+ }
+ } else {
+ write(c);
+ }
+ }
+ }
+
+ /**
+ * Flushes the stream. If the stream has saved any characters from the
+ * various write() methods in a buffer, write them immediately to their
+ * intended destination. Then, if that destination is another character or
+ * byte stream, flush it. Thus one flush() invocation will flush all the
+ * buffers in a chain of Writers and OutputStreams.
+ *
+ * @throws IOException if an I/O error occurs.
+ */
+ public void flush() throws IOException {
+ flushBuffer();
+ _outputStream.flush();
+ }
+
+ /**
+ * Closes and {@link #reset resets} this writer for reuse.
+ *
+ * @throws IOException if an I/O error occurs
+ */
+ public void close() throws IOException {
+ if (_outputStream != null) {
+ flushBuffer();
+ _outputStream.close();
+ reset();
+ }
+ }
+
+ /**
+ * Flushes the internal bytes buffer.
+ *
+ * @throws IOException if an I/O error occurs
+ */
+ private void flushBuffer() throws IOException {
+ if (_outputStream == null)
+ throw new IOException("Stream closed");
+ _outputStream.write(_bytes, 0, _index);
+ _index = 0;
+ }
+
+ // Implements Reusable.
+ public void reset() {
+ _highSurrogate = 0;
+ _index = 0;
+ _outputStream = null;
+ }
+
+ /**
+ * @deprecated Replaced by {@link #setOutput(OutputStream)}
+ */
+ public UTF8StreamWriter setOutputStream(OutputStream out) {
+ return this.setOutput(out);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/io/stream/AdapterStreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/AdapterStreamInput.java
new file mode 100644
index 0000000..e2838f1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/AdapterStreamInput.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.text.Text;
+
+import java.io.IOException;
+
+/**
+ */
+public abstract class AdapterStreamInput extends StreamInput {
+
+ protected StreamInput in;
+
+ protected AdapterStreamInput() {
+ }
+
+ public AdapterStreamInput(StreamInput in) {
+ this.in = in;
+ super.setVersion(in.getVersion());
+ }
+
+ @Override
+ public StreamInput setVersion(Version version) {
+ in.setVersion(version);
+ return super.setVersion(version);
+ }
+
+ public void reset(StreamInput in) {
+ this.in = in;
+ }
+
+ @Override
+ public byte readByte() throws IOException {
+ return in.readByte();
+ }
+
+ @Override
+ public void readBytes(byte[] b, int offset, int len) throws IOException {
+ in.readBytes(b, offset, len);
+ }
+
+ @Override
+ public BytesReference readBytesReference() throws IOException {
+ return in.readBytesReference();
+ }
+
+ @Override
+ public BytesReference readBytesReference(int length) throws IOException {
+ return in.readBytesReference(length);
+ }
+
+ @Override
+ public void reset() throws IOException {
+ in.reset();
+ }
+
+ @Override
+ public void close() throws IOException {
+ in.close();
+ }
+
+ @Override
+ public int read() throws IOException {
+ return in.read();
+ }
+
+ // override ones to direct them
+
+
+ @Override
+ public void readFully(byte[] b) throws IOException {
+ in.readFully(b);
+ }
+
+ @Override
+ public short readShort() throws IOException {
+ return in.readShort();
+ }
+
+ @Override
+ public int readInt() throws IOException {
+ return in.readInt();
+ }
+
+ @Override
+ public int readVInt() throws IOException {
+ return in.readVInt();
+ }
+
+ @Override
+ public long readLong() throws IOException {
+ return in.readLong();
+ }
+
+ @Override
+ public long readVLong() throws IOException {
+ return in.readVLong();
+ }
+
+ @Override
+ public String readString() throws IOException {
+ return in.readString();
+ }
+
+ @Override
+ public String readSharedString() throws IOException {
+ return in.readSharedString();
+ }
+
+ @Override
+ public Text readText() throws IOException {
+ return in.readText();
+ }
+
+ @Override
+ public Text readSharedText() throws IOException {
+ return in.readSharedText();
+ }
+
+ @Override
+ public int read(byte[] b) throws IOException {
+ return in.read(b);
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ return in.read(b, off, len);
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ return in.skip(n);
+ }
+
+ @Override
+ public int available() throws IOException {
+ return in.available();
+ }
+
+ @Override
+ public void mark(int readlimit) {
+ in.mark(readlimit);
+ }
+
+ @Override
+ public boolean markSupported() {
+ return in.markSupported();
+ }
+
+ @Override
+ public String toString() {
+ return in.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/AdapterStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/AdapterStreamOutput.java
new file mode 100644
index 0000000..add9b49
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/AdapterStreamOutput.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.text.Text;
+
+import java.io.IOException;
+
+/**
+ */
+public class AdapterStreamOutput extends StreamOutput {
+
+ protected StreamOutput out;
+
+ public AdapterStreamOutput(StreamOutput out) {
+ this.out = out;
+ super.setVersion(out.getVersion());
+ }
+
+ @Override
+ public StreamOutput setVersion(Version version) {
+ out.setVersion(version);
+ return super.setVersion(version);
+ }
+
+ public void setOut(StreamOutput out) {
+ this.out = out;
+ }
+
+ public StreamOutput wrappedOut() {
+ return this.out;
+ }
+
+ @Override
+ public boolean seekPositionSupported() {
+ return out.seekPositionSupported();
+ }
+
+ @Override
+ public long position() throws IOException {
+ return out.position();
+ }
+
+ @Override
+ public void seek(long position) throws IOException {
+ out.seek(position);
+ }
+
+ @Override
+ public void writeByte(byte b) throws IOException {
+ out.writeByte(b);
+ }
+
+ @Override
+ public void writeBytes(byte[] b, int offset, int length) throws IOException {
+ out.writeBytes(b, offset, length);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ out.close();
+ }
+
+ @Override
+ public void reset() throws IOException {
+ out.reset();
+ }
+
+ @Override
+ public void writeBytes(byte[] b) throws IOException {
+ out.writeBytes(b);
+ }
+
+ @Override
+ public void writeBytes(byte[] b, int length) throws IOException {
+ out.writeBytes(b, length);
+ }
+
+ @Override
+ public void writeBytesReference(@Nullable BytesReference bytes) throws IOException {
+ out.writeBytesReference(bytes);
+ }
+
+ @Override
+ public void writeInt(int i) throws IOException {
+ out.writeInt(i);
+ }
+
+ @Override
+ public void writeVInt(int i) throws IOException {
+ out.writeVInt(i);
+ }
+
+ @Override
+ public void writeLong(long i) throws IOException {
+ out.writeLong(i);
+ }
+
+ @Override
+ public void writeVLong(long i) throws IOException {
+ out.writeVLong(i);
+ }
+
+ @Override
+ public void writeString(String str) throws IOException {
+ out.writeString(str);
+ }
+
+ @Override
+ public void writeSharedString(String str) throws IOException {
+ out.writeSharedString(str);
+ }
+
+ @Override
+ public void writeText(Text text) throws IOException {
+ out.writeText(text);
+ }
+
+ @Override
+ public void writeSharedText(Text text) throws IOException {
+ out.writeSharedText(text);
+ }
+
+ @Override
+ public void writeFloat(float v) throws IOException {
+ out.writeFloat(v);
+ }
+
+ @Override
+ public void writeDouble(double v) throws IOException {
+ out.writeDouble(v);
+ }
+
+ @Override
+ public void writeBoolean(boolean b) throws IOException {
+ out.writeBoolean(b);
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ out.write(b);
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ out.write(b, off, len);
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ out.write(b);
+ }
+
+ @Override
+ public String toString() {
+ return out.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java
new file mode 100644
index 0000000..18bc6a0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.io.stream;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ */
+public class ByteBufferStreamInput extends StreamInput {
+
+ private final ByteBuffer buffer;
+
+ public ByteBufferStreamInput(ByteBuffer buffer) {
+ this.buffer = buffer;
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (!buffer.hasRemaining()) {
+ return -1;
+ }
+ return buffer.get() & 0xFF;
+ }
+
+ @Override
+ public byte readByte() throws IOException {
+ if (!buffer.hasRemaining()) {
+ throw new EOFException();
+ }
+ return buffer.get();
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if (!buffer.hasRemaining()) {
+ return -1;
+ }
+
+ len = Math.min(len, buffer.remaining());
+ buffer.get(b, off, len);
+ return len;
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ if (n > buffer.remaining()) {
+ int ret = buffer.position();
+ buffer.position(buffer.limit());
+ return ret;
+ }
+ buffer.position((int) (buffer.position() + n));
+ return n;
+ }
+
+ @Override
+ public void readBytes(byte[] b, int offset, int len) throws IOException {
+ if (buffer.remaining() > len) {
+ throw new EOFException();
+ }
+ buffer.get(b, offset, len);
+ }
+
+ @Override
+ public void reset() throws IOException {
+ buffer.reset();
+ }
+
+ @Override
+ public int available() throws IOException {
+ return buffer.remaining();
+ }
+
+ @Override
+ public void mark(int readlimit) {
+ buffer.mark();
+ }
+
+ @Override
+ public boolean markSupported() {
+ return true;
+ }
+
+ @Override
+ public void close() throws IOException {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamInput.java
new file mode 100644
index 0000000..b45c2d1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamInput.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+
+import java.io.EOFException;
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BytesStreamInput extends StreamInput {
+
+ protected byte buf[];
+
+ protected int pos;
+
+ protected int count;
+
+ private final boolean unsafe;
+
+ public BytesStreamInput(BytesReference bytes) {
+ if (!bytes.hasArray()) {
+ bytes = bytes.toBytesArray();
+ }
+ this.buf = bytes.array();
+ this.pos = bytes.arrayOffset();
+ this.count = bytes.length();
+ this.unsafe = false;
+ }
+
+ public BytesStreamInput(byte buf[], boolean unsafe) {
+ this(buf, 0, buf.length, unsafe);
+ }
+
+ public BytesStreamInput(byte buf[], int offset, int length, boolean unsafe) {
+ this.buf = buf;
+ this.pos = offset;
+ this.count = Math.min(offset + length, buf.length);
+ this.unsafe = unsafe;
+ }
+
+ @Override
+ public BytesReference readBytesReference(int length) throws IOException {
+ if (unsafe) {
+ return super.readBytesReference(length);
+ }
+ BytesArray bytes = new BytesArray(buf, pos, length);
+ pos += length;
+ return bytes;
+ }
+
+ @Override
+ public BytesRef readBytesRef(int length) throws IOException {
+ if (unsafe) {
+ return super.readBytesRef(length);
+ }
+ BytesRef bytes = new BytesRef(buf, pos, length);
+ pos += length;
+ return bytes;
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ if (pos + n > count) {
+ n = count - pos;
+ }
+ if (n < 0) {
+ return 0;
+ }
+ pos += n;
+ return n;
+ }
+
+ public int position() {
+ return this.pos;
+ }
+
+ @Override
+ public int read() throws IOException {
+ return (pos < count) ? (buf[pos++] & 0xff) : -1;
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if (b == null) {
+ throw new NullPointerException();
+ } else if (off < 0 || len < 0 || len > b.length - off) {
+ throw new IndexOutOfBoundsException();
+ }
+ if (pos >= count) {
+ return -1;
+ }
+ if (pos + len > count) {
+ len = count - pos;
+ }
+ if (len <= 0) {
+ return 0;
+ }
+ System.arraycopy(buf, pos, b, off, len);
+ pos += len;
+ return len;
+ }
+
+ public byte[] underlyingBuffer() {
+ return buf;
+ }
+
+ @Override
+ public byte readByte() throws IOException {
+ if (pos >= count) {
+ throw new EOFException();
+ }
+ return buf[pos++];
+ }
+
+ @Override
+ public void readBytes(byte[] b, int offset, int len) throws IOException {
+ if (len == 0) {
+ return;
+ }
+ if (pos >= count) {
+ throw new EOFException();
+ }
+ if (pos + len > count) {
+ len = count - pos;
+ }
+ if (len <= 0) {
+ throw new EOFException();
+ }
+ System.arraycopy(buf, pos, b, offset, len);
+ pos += len;
+ }
+
+ @Override
+ public void reset() throws IOException {
+ pos = 0;
+ }
+
+ @Override
+ public void close() throws IOException {
+ // nothing to do here...
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java
new file mode 100644
index 0000000..17b87c7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.BytesStream;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BytesStreamOutput extends StreamOutput implements BytesStream {
+
+ public static final int DEFAULT_SIZE = 2 * 1024;
+
+ public static final int OVERSIZE_LIMIT = 256 * 1024;
+
+ /**
+ * The buffer where data is stored.
+ */
+ protected byte buf[];
+
+ /**
+ * The number of valid bytes in the buffer.
+ */
+ protected int count;
+
+ public BytesStreamOutput() {
+ this(DEFAULT_SIZE);
+ }
+
+ public BytesStreamOutput(int size) {
+ this.buf = new byte[size];
+ }
+
+ @Override
+ public boolean seekPositionSupported() {
+ return true;
+ }
+
+ @Override
+ public long position() throws IOException {
+ return count;
+ }
+
+ @Override
+ public void seek(long position) throws IOException {
+ if (position > Integer.MAX_VALUE) {
+ throw new UnsupportedOperationException();
+ }
+ count = (int) position;
+ }
+
+ @Override
+ public void writeByte(byte b) throws IOException {
+ int newcount = count + 1;
+ if (newcount > buf.length) {
+ buf = grow(newcount);
+ }
+ buf[count] = b;
+ count = newcount;
+ }
+
+ public void skip(int length) {
+ int newcount = count + length;
+ if (newcount > buf.length) {
+ buf = grow(newcount);
+ }
+ count = newcount;
+ }
+
+ @Override
+ public void writeBytes(byte[] b, int offset, int length) throws IOException {
+ if (length == 0) {
+ return;
+ }
+ int newcount = count + length;
+ if (newcount > buf.length) {
+ buf = grow(newcount);
+ }
+ System.arraycopy(b, offset, buf, count, length);
+ count = newcount;
+ }
+
+ private byte[] grow(int newCount) {
+ // try and grow faster while we are small...
+ if (newCount < OVERSIZE_LIMIT) {
+ newCount = Math.max(buf.length << 1, newCount);
+ }
+ return ArrayUtil.grow(buf, newCount);
+ }
+
+ public void seek(int seekTo) {
+ count = seekTo;
+ }
+
+ public void reset() {
+ count = 0;
+ }
+
+ public int bufferSize() {
+ return buf.length;
+ }
+
+ @Override
+ public void flush() throws IOException {
+ // nothing to do there
+ }
+
+ @Override
+ public void close() throws IOException {
+ // nothing to do here
+ }
+
+ @Override
+ public BytesReference bytes() {
+ return new BytesArray(buf, 0, count);
+ }
+
+ /**
+ * Returns the current size of the buffer.
+ *
+ * @return the value of the <code>count</code> field, which is the number
+ * of valid bytes in this output stream.
+ * @see java.io.ByteArrayOutputStream#count
+ */
+ public int size() {
+ return count;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/CachedStreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/CachedStreamInput.java
new file mode 100644
index 0000000..a3a0143
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/CachedStreamInput.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.elasticsearch.common.compress.Compressor;
+
+import java.io.IOException;
+import java.lang.ref.SoftReference;
+
+/**
+ *
+ */
+public class CachedStreamInput {
+
+ static class Entry {
+ final HandlesStreamInput handles;
+
+ Entry(HandlesStreamInput handles) {
+ this.handles = handles;
+ }
+ }
+
+ private static final ThreadLocal<SoftReference<Entry>> cache = new ThreadLocal<SoftReference<Entry>>();
+
+ static Entry instance() {
+ SoftReference<Entry> ref = cache.get();
+ Entry entry = ref == null ? null : ref.get();
+ if (entry == null) {
+ HandlesStreamInput handles = new HandlesStreamInput();
+ entry = new Entry(handles);
+ cache.set(new SoftReference<Entry>(entry));
+ }
+ return entry;
+ }
+
+ public static void clear() {
+ cache.remove();
+ }
+
+ public static StreamInput compressed(Compressor compressor, StreamInput in) throws IOException {
+ return compressor.streamInput(in);
+ }
+
+ public static HandlesStreamInput cachedHandles(StreamInput in) {
+ HandlesStreamInput handles = instance().handles;
+ handles.reset(in);
+ return handles;
+ }
+
+ public static HandlesStreamInput cachedHandlesCompressed(Compressor compressor, StreamInput in) throws IOException {
+ Entry entry = instance();
+ entry.handles.reset(compressor.streamInput(in));
+ return entry.handles;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/DataOutputStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/DataOutputStreamOutput.java
new file mode 100644
index 0000000..aa542b1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/DataOutputStreamOutput.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import java.io.Closeable;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ *
+ */
+public class DataOutputStreamOutput extends StreamOutput {
+
+ private final DataOutput out;
+
+ public DataOutputStreamOutput(DataOutput out) {
+ this.out = out;
+ }
+
+ @Override
+ public void writeByte(byte b) throws IOException {
+ out.writeByte(b);
+ }
+
+ @Override
+ public void writeBytes(byte[] b, int offset, int length) throws IOException {
+ out.write(b, offset, length);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ // nothing to do there...
+ }
+
+ @Override
+ public void reset() throws IOException {
+ // nothing to do there...
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (out instanceof Closeable) {
+ ((Closeable) out).close();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/io/stream/HandlesStreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/HandlesStreamInput.java
new file mode 100644
index 0000000..037ea19
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/HandlesStreamInput.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import org.elasticsearch.common.text.Text;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class HandlesStreamInput extends AdapterStreamInput {
+
+ private final IntObjectOpenHashMap<String> handles = new IntObjectOpenHashMap<String>();
+ private final IntObjectOpenHashMap<Text> handlesText = new IntObjectOpenHashMap<Text>();
+
+ HandlesStreamInput() {
+ super();
+ }
+
+ public HandlesStreamInput(StreamInput in) {
+ super(in);
+ }
+
+ @Override
+ public String readSharedString() throws IOException {
+ byte b = in.readByte();
+ if (b == 0) {
+ // full string with handle
+ int handle = in.readVInt();
+ String s = in.readString();
+ handles.put(handle, s);
+ return s;
+ } else if (b == 1) {
+ return handles.get(in.readVInt());
+ } else {
+ throw new IOException("Expected handle header, got [" + b + "]");
+ }
+ }
+
+ @Override
+ public String readString() throws IOException {
+ return in.readString();
+ }
+
+ @Override
+ public Text readSharedText() throws IOException {
+ byte b = in.readByte();
+ if (b == 0) {
+ int handle = in.readVInt();
+ Text s = in.readText();
+ handlesText.put(handle, s);
+ return s;
+ } else if (b == 1) {
+ return handlesText.get(in.readVInt());
+ } else if (b == 2) {
+ return in.readText();
+ } else {
+ throw new IOException("Expected handle header, got [" + b + "]");
+ }
+ }
+
+ @Override
+ public void reset() throws IOException {
+ super.reset();
+ cleanHandles();
+ }
+
+ public void reset(StreamInput in) {
+ super.reset(in);
+ cleanHandles();
+ }
+
+ public void cleanHandles() {
+ handles.clear();
+ handlesText.clear();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/HandlesStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/HandlesStreamOutput.java
new file mode 100644
index 0000000..c34978f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/HandlesStreamOutput.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.elasticsearch.common.text.Text;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class HandlesStreamOutput extends AdapterStreamOutput {
+
+ private final ObjectIntOpenHashMap<String> handles = new ObjectIntOpenHashMap<String>();
+ private final ObjectIntOpenHashMap<Text> handlesText = new ObjectIntOpenHashMap<Text>();
+
+ public HandlesStreamOutput(StreamOutput out) {
+ super(out);
+ }
+
+ @Override
+ public void writeSharedString(String str) throws IOException {
+ if (handles.containsKey(str)) {
+ out.writeByte((byte) 1);
+ out.writeVInt(handles.lget());
+ } else {
+ int handle = handles.size();
+ handles.put(str, handle);
+ out.writeByte((byte) 0);
+ out.writeVInt(handle);
+ out.writeString(str);
+ }
+ }
+
+ @Override
+ public void writeString(String s) throws IOException {
+ out.writeString(s);
+ }
+
+ @Override
+ public void writeSharedText(Text text) throws IOException {
+ if (handlesText.containsKey(text)) {
+ out.writeByte((byte) 1);
+ out.writeVInt(handlesText.lget());
+ } else {
+ int handle = handlesText.size();
+ handlesText.put(text, handle);
+ out.writeByte((byte) 0);
+ out.writeVInt(handle);
+ out.writeText(text);
+ }
+ }
+
+ @Override
+ public void reset() throws IOException {
+ clear();
+ if (out != null) {
+ out.reset();
+ }
+ }
+
+ public void clear() {
+ handles.clear();
+ handlesText.clear();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java
new file mode 100644
index 0000000..ffe8d29
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.elasticsearch.common.io.Streams;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ *
+ */
+public class InputStreamStreamInput extends StreamInput {
+
+ private final InputStream is;
+
+ public InputStreamStreamInput(InputStream is) {
+ this.is = is;
+ }
+
+ @Override
+ public byte readByte() throws IOException {
+ int ch = is.read();
+ if (ch < 0)
+ throw new EOFException();
+ return (byte) (ch);
+ }
+
+ @Override
+ public void readBytes(byte[] b, int offset, int len) throws IOException {
+ if (len < 0)
+ throw new IndexOutOfBoundsException();
+ final int read = Streams.readFully(is, b, offset, len);
+ if (read != len) {
+ throw new EOFException();
+ }
+ }
+
+ @Override
+ public void reset() throws IOException {
+ is.reset();
+ }
+
+ @Override
+ public void close() throws IOException {
+ is.close();
+ }
+
+ @Override
+ public int read() throws IOException {
+ return is.read();
+ }
+
+ @Override
+ public int read(byte[] b) throws IOException {
+ return is.read(b);
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ return is.read(b, off, len);
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ return is.skip(n);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/OutputStreamStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/OutputStreamStreamOutput.java
new file mode 100644
index 0000000..93e1fe8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/OutputStreamStreamOutput.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ */
+public class OutputStreamStreamOutput extends StreamOutput {
+
+ private final OutputStream out;
+
+ public OutputStreamStreamOutput(OutputStream out) {
+ this.out = out;
+ }
+
+ @Override
+ public void writeByte(byte b) throws IOException {
+ out.write(b);
+ }
+
+ @Override
+ public void writeBytes(byte[] b, int offset, int length) throws IOException {
+ out.write(b, offset, length);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ out.close();
+ }
+
+ @Override
+ public void reset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
new file mode 100644
index 0000000..72b7b2a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
@@ -0,0 +1,496 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.joda.time.DateTime;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.ref.SoftReference;
+import java.util.*;
+
+/**
+ *
+ */
+public abstract class StreamInput extends InputStream {
+
+ private static final ThreadLocal<SoftReference<char[]>> charCache = new ThreadLocal<SoftReference<char[]>>();
+
+ private static char[] charCache(int size) {
+ SoftReference<char[]> ref = charCache.get();
+ char[] arr = (ref == null) ? null : ref.get();
+ if (arr == null || arr.length < size) {
+ arr = new char[ArrayUtil.oversize(size, RamUsageEstimator.NUM_BYTES_CHAR)];
+ charCache.set(new SoftReference<char[]>(arr));
+ }
+ return arr;
+ }
+
+ private Version version = Version.CURRENT;
+
+ public Version getVersion() {
+ return this.version;
+ }
+
+ public StreamInput setVersion(Version version) {
+ this.version = version;
+ return this;
+ }
+
+ /**
+ * Reads and returns a single byte.
+ */
+ public abstract byte readByte() throws IOException;
+
+ /**
+ * Reads a specified number of bytes into an array at the specified offset.
+ *
+ * @param b the array to read bytes into
+ * @param offset the offset in the array to start storing bytes
+ * @param len the number of bytes to read
+ */
+ public abstract void readBytes(byte[] b, int offset, int len) throws IOException;
+
+ /**
+ * Reads a bytes reference from this stream, might hold an actual reference to the underlying
+ * bytes of the stream.
+ */
+ public BytesReference readBytesReference() throws IOException {
+ int length = readVInt();
+ return readBytesReference(length);
+ }
+
+ /**
+ * Reads a bytes reference from this stream, might hold an actual reference to the underlying
+ * bytes of the stream.
+ */
+ public BytesReference readBytesReference(int length) throws IOException {
+ if (length == 0) {
+ return BytesArray.EMPTY;
+ }
+ byte[] bytes = new byte[length];
+ readBytes(bytes, 0, length);
+ return new BytesArray(bytes, 0, length);
+ }
+
+ public BytesRef readBytesRef() throws IOException {
+ int length = readVInt();
+ return readBytesRef(length);
+ }
+
+ public BytesRef readBytesRef(int length) throws IOException {
+ if (length == 0) {
+ return new BytesRef();
+ }
+ byte[] bytes = new byte[length];
+ readBytes(bytes, 0, length);
+ return new BytesRef(bytes, 0, length);
+ }
+
+ public void readFully(byte[] b) throws IOException {
+ readBytes(b, 0, b.length);
+ }
+
+ public short readShort() throws IOException {
+ return (short) (((readByte() & 0xFF) << 8) | (readByte() & 0xFF));
+ }
+
+ /**
+ * Reads four bytes and returns an int.
+ */
+ public int readInt() throws IOException {
+ return ((readByte() & 0xFF) << 24) | ((readByte() & 0xFF) << 16)
+ | ((readByte() & 0xFF) << 8) | (readByte() & 0xFF);
+ }
+
+ /**
+ * Reads an int stored in variable-length format. Reads between one and
+ * five bytes. Smaller values take fewer bytes. Negative numbers
+ * will always use all 5 bytes and are therefore better serialized
+ * using {@link #readInt}
+ */
+ public int readVInt() throws IOException {
+ byte b = readByte();
+ int i = b & 0x7F;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7F) << 7;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7F) << 14;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7F) << 21;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ assert (b & 0x80) == 0;
+ return i | ((b & 0x7F) << 28);
+ }
+
+ /**
+ * Reads eight bytes and returns a long.
+ */
+ public long readLong() throws IOException {
+ return (((long) readInt()) << 32) | (readInt() & 0xFFFFFFFFL);
+ }
+
+ /**
+ * Reads a long stored in variable-length format. Reads between one and
+ * nine bytes. Smaller values take fewer bytes. Negative numbers are not
+ * supported.
+ */
+ public long readVLong() throws IOException {
+ byte b = readByte();
+ long i = b & 0x7FL;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7FL) << 7;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7FL) << 14;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7FL) << 21;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7FL) << 28;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7FL) << 35;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7FL) << 42;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ i |= (b & 0x7FL) << 49;
+ if ((b & 0x80) == 0) {
+ return i;
+ }
+ b = readByte();
+ assert (b & 0x80) == 0;
+ return i | ((b & 0x7FL) << 56);
+ }
+
+ @Nullable
+ public Text readOptionalText() throws IOException {
+ int length = readInt();
+ if (length == -1) {
+ return null;
+ }
+ return new StringAndBytesText(readBytesReference(length));
+ }
+
+ public Text readText() throws IOException {
+ // use StringAndBytes so we can cache the string if its ever converted to it
+ int length = readInt();
+ return new StringAndBytesText(readBytesReference(length));
+ }
+
+ public Text[] readTextArray() throws IOException {
+ int size = readVInt();
+ if (size == 0) {
+ return StringText.EMPTY_ARRAY;
+ }
+ Text[] ret = new Text[size];
+ for (int i = 0; i < size; i++) {
+ ret[i] = readText();
+ }
+ return ret;
+ }
+
+ public Text readSharedText() throws IOException {
+ return readText();
+ }
+
+ @Nullable
+ public String readOptionalString() throws IOException {
+ if (readBoolean()) {
+ return readString();
+ }
+ return null;
+ }
+
+ @Nullable
+ public String readOptionalSharedString() throws IOException {
+ if (readBoolean()) {
+ return readSharedString();
+ }
+ return null;
+ }
+
+ public String readString() throws IOException {
+ int charCount = readVInt();
+ char[] chars = charCache(charCount);
+ int c, charIndex = 0;
+ while (charIndex < charCount) {
+ c = readByte() & 0xff;
+ switch (c >> 4) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ chars[charIndex++] = (char) c;
+ break;
+ case 12:
+ case 13:
+ chars[charIndex++] = (char) ((c & 0x1F) << 6 | readByte() & 0x3F);
+ break;
+ case 14:
+ chars[charIndex++] = (char) ((c & 0x0F) << 12 | (readByte() & 0x3F) << 6 | (readByte() & 0x3F) << 0);
+ break;
+ }
+ }
+ return new String(chars, 0, charCount);
+ }
+
+ public String readSharedString() throws IOException {
+ return readString();
+ }
+
+
+ public final float readFloat() throws IOException {
+ return Float.intBitsToFloat(readInt());
+ }
+
+ public final double readDouble() throws IOException {
+ return Double.longBitsToDouble(readLong());
+ }
+
+ /**
+ * Reads a boolean.
+ */
+ public final boolean readBoolean() throws IOException {
+ return readByte() != 0;
+ }
+
+ @Nullable
+ public final Boolean readOptionalBoolean() throws IOException {
+ byte val = readByte();
+ if (val == 2) {
+ return null;
+ }
+ if (val == 1) {
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Resets the stream.
+ */
+ public abstract void reset() throws IOException;
+
+ /**
+ * Closes the stream to further operations.
+ */
+ public abstract void close() throws IOException;
+
+// // IS
+//
+// @Override public int read() throws IOException {
+// return readByte();
+// }
+//
+// // Here, we assume that we always can read the full byte array
+//
+// @Override public int read(byte[] b, int off, int len) throws IOException {
+// readBytes(b, off, len);
+// return len;
+// }
+
+ public String[] readStringArray() throws IOException {
+ int size = readVInt();
+ if (size == 0) {
+ return Strings.EMPTY_ARRAY;
+ }
+ String[] ret = new String[size];
+ for (int i = 0; i < size; i++) {
+ ret[i] = readString();
+ }
+ return ret;
+ }
+
+ @Nullable
+ public Map<String, Object> readMap() throws IOException {
+ return (Map<String, Object>) readGenericValue();
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Nullable
+ public Object readGenericValue() throws IOException {
+ byte type = readByte();
+ switch (type) {
+ case -1:
+ return null;
+ case 0:
+ return readString();
+ case 1:
+ return readInt();
+ case 2:
+ return readLong();
+ case 3:
+ return readFloat();
+ case 4:
+ return readDouble();
+ case 5:
+ return readBoolean();
+ case 6:
+ int bytesSize = readVInt();
+ byte[] value = new byte[bytesSize];
+ readBytes(value, 0, bytesSize);
+ return value;
+ case 7:
+ int size = readVInt();
+ List list = new ArrayList(size);
+ for (int i = 0; i < size; i++) {
+ list.add(readGenericValue());
+ }
+ return list;
+ case 8:
+ int size8 = readVInt();
+ Object[] list8 = new Object[size8];
+ for (int i = 0; i < size8; i++) {
+ list8[i] = readGenericValue();
+ }
+ return list8;
+ case 9:
+ int size9 = readVInt();
+ Map map9 = new LinkedHashMap(size9);
+ for (int i = 0; i < size9; i++) {
+ map9.put(readSharedString(), readGenericValue());
+ }
+ return map9;
+ case 10:
+ int size10 = readVInt();
+ Map map10 = new HashMap(size10);
+ for (int i = 0; i < size10; i++) {
+ map10.put(readSharedString(), readGenericValue());
+ }
+ return map10;
+ case 11:
+ return readByte();
+ case 12:
+ return new Date(readLong());
+ case 13:
+ return new DateTime(readLong());
+ case 14:
+ return readBytesReference();
+ case 15:
+ return readText();
+ case 16:
+ return readShort();
+ case 17:
+ return readPrimitiveIntArray();
+ case 18:
+ return readPrimitiveLongArray();
+ case 19:
+ return readPrimitiveFloatArray();
+ case 20:
+ return readPrimitiveDoubleArray();
+ default:
+ throw new IOException("Can't read unknown type [" + type + "]");
+ }
+ }
+
+ private Object readPrimitiveIntArray() throws IOException {
+ int length = readVInt();
+ int[] values = new int[length];
+ for(int i=0; i<length; i++) {
+ values[i] = readInt();
+ }
+ return values;
+ }
+
+ private Object readPrimitiveLongArray() throws IOException {
+ int length = readVInt();
+ long[] values = new long[length];
+ for(int i=0; i<length; i++) {
+ values[i] = readLong();
+ }
+ return values;
+ }
+
+ private Object readPrimitiveFloatArray() throws IOException {
+ int length = readVInt();
+ float[] values = new float[length];
+ for(int i=0; i<length; i++) {
+ values[i] = readFloat();
+ }
+ return values;
+ }
+
+ private Object readPrimitiveDoubleArray() throws IOException {
+ int length = readVInt();
+ double[] values = new double[length];
+ for(int i=0; i<length; i++) {
+ values[i] = readDouble();
+ }
+ return values;
+ }
+
+ /**
+ * Serializes a potential null value.
+ */
+ public <T extends Streamable> T readOptionalStreamable(T streamable) throws IOException {
+ if (readBoolean()) {
+ streamable.readFrom(this);
+ return streamable;
+ } else {
+ return null;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
new file mode 100644
index 0000000..a9cd1fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
@@ -0,0 +1,467 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.UTF8StreamWriter;
+import org.elasticsearch.common.text.Text;
+import org.joda.time.ReadableInstant;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.ref.SoftReference;
+import java.util.Date;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public abstract class StreamOutput extends OutputStream {
+
+ private static ThreadLocal<SoftReference<UTF8StreamWriter>> utf8StreamWriter = new ThreadLocal<SoftReference<UTF8StreamWriter>>();
+
+ public static UTF8StreamWriter utf8StreamWriter() {
+ SoftReference<UTF8StreamWriter> ref = utf8StreamWriter.get();
+ UTF8StreamWriter writer = (ref == null) ? null : ref.get();
+ if (writer == null) {
+ writer = new UTF8StreamWriter(1024 * 4);
+ utf8StreamWriter.set(new SoftReference<UTF8StreamWriter>(writer));
+ }
+ writer.reset();
+ return writer;
+ }
+
+ private Version version = Version.CURRENT;
+
+ public Version getVersion() {
+ return this.version;
+ }
+
+ public StreamOutput setVersion(Version version) {
+ this.version = version;
+ return this;
+ }
+
+ public boolean seekPositionSupported() {
+ return false;
+ }
+
+ public long position() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ public void seek(long position) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Writes a single byte.
+ */
+ public abstract void writeByte(byte b) throws IOException;
+
+ /**
+ * Writes an array of bytes.
+ *
+ * @param b the bytes to write
+ */
+ public void writeBytes(byte[] b) throws IOException {
+ writeBytes(b, 0, b.length);
+ }
+
+ /**
+ * Writes an array of bytes.
+ *
+ * @param b the bytes to write
+ * @param length the number of bytes to write
+ */
+ public void writeBytes(byte[] b, int length) throws IOException {
+ writeBytes(b, 0, length);
+ }
+
+ /**
+ * Writes an array of bytes.
+ *
+ * @param b the bytes to write
+ * @param offset the offset in the byte array
+ * @param length the number of bytes to write
+ */
+ public abstract void writeBytes(byte[] b, int offset, int length) throws IOException;
+
+ /**
+ * Writes the bytes reference, including a length header.
+ */
+ public void writeBytesReference(@Nullable BytesReference bytes) throws IOException {
+ if (bytes == null) {
+ writeVInt(0);
+ return;
+ }
+ writeVInt(bytes.length());
+ bytes.writeTo(this);
+ }
+
+ public void writeBytesRef(BytesRef bytes) throws IOException {
+ if (bytes == null) {
+ writeVInt(0);
+ return;
+ }
+ writeVInt(bytes.length);
+ write(bytes.bytes, bytes.offset, bytes.length);
+ }
+
+ public final void writeShort(short v) throws IOException {
+ writeByte((byte) (v >> 8));
+ writeByte((byte) v);
+ }
+
+ /**
+ * Writes an int as four bytes.
+ */
+ public void writeInt(int i) throws IOException {
+ writeByte((byte) (i >> 24));
+ writeByte((byte) (i >> 16));
+ writeByte((byte) (i >> 8));
+ writeByte((byte) i);
+ }
+
+ /**
+ * Writes an int in a variable-length format. Writes between one and
+ * five bytes. Smaller values take fewer bytes. Negative numbers
+ * will always use all 5 bytes and are therefore better serialized
+ * using {@link #writeInt}
+ */
+ public void writeVInt(int i) throws IOException {
+ while ((i & ~0x7F) != 0) {
+ writeByte((byte) ((i & 0x7f) | 0x80));
+ i >>>= 7;
+ }
+ writeByte((byte) i);
+ }
+
+ /**
+ * Writes a long as eight bytes.
+ */
+ public void writeLong(long i) throws IOException {
+ writeInt((int) (i >> 32));
+ writeInt((int) i);
+ }
+
+ /**
+ * Writes an long in a variable-length format. Writes between one and nine
+ * bytes. Smaller values take fewer bytes. Negative numbers are not
+ * supported.
+ */
+ public void writeVLong(long i) throws IOException {
+ assert i >= 0;
+ while ((i & ~0x7F) != 0) {
+ writeByte((byte) ((i & 0x7f) | 0x80));
+ i >>>= 7;
+ }
+ writeByte((byte) i);
+ }
+
+ public void writeOptionalString(@Nullable String str) throws IOException {
+ if (str == null) {
+ writeBoolean(false);
+ } else {
+ writeBoolean(true);
+ writeString(str);
+ }
+ }
+
+ public void writeOptionalSharedString(@Nullable String str) throws IOException {
+ if (str == null) {
+ writeBoolean(false);
+ } else {
+ writeBoolean(true);
+ writeSharedString(str);
+ }
+ }
+
+ public void writeOptionalText(@Nullable Text text) throws IOException {
+ if (text == null) {
+ writeInt(-1);
+ } else {
+ writeText(text);
+ }
+ }
+
+ public void writeText(Text text) throws IOException {
+ if (!text.hasBytes() && seekPositionSupported()) {
+ long pos1 = position();
+ // make room for the size
+ seek(pos1 + 4);
+ UTF8StreamWriter utf8StreamWriter = utf8StreamWriter();
+ utf8StreamWriter.setOutput(this);
+ utf8StreamWriter.write(text.string());
+ utf8StreamWriter.close();
+ long pos2 = position();
+ seek(pos1);
+ writeInt((int) (pos2 - pos1 - 4));
+ seek(pos2);
+ } else {
+ BytesReference bytes = text.bytes();
+ writeInt(bytes.length());
+ bytes.writeTo(this);
+ }
+ }
+
+ public void writeTextArray(Text[] array) throws IOException {
+ writeVInt(array.length);
+ for (Text t : array) {
+ writeText(t);
+ }
+ }
+
+ public void writeSharedText(Text text) throws IOException {
+ writeText(text);
+ }
+
+ public void writeString(String str) throws IOException {
+ int charCount = str.length();
+ writeVInt(charCount);
+ int c;
+ for (int i = 0; i < charCount; i++) {
+ c = str.charAt(i);
+ if (c <= 0x007F) {
+ writeByte((byte) c);
+ } else if (c > 0x07FF) {
+ writeByte((byte) (0xE0 | c >> 12 & 0x0F));
+ writeByte((byte) (0x80 | c >> 6 & 0x3F));
+ writeByte((byte) (0x80 | c >> 0 & 0x3F));
+ } else {
+ writeByte((byte) (0xC0 | c >> 6 & 0x1F));
+ writeByte((byte) (0x80 | c >> 0 & 0x3F));
+ }
+ }
+ }
+
+ public void writeSharedString(String str) throws IOException {
+ writeString(str);
+ }
+
+ public void writeFloat(float v) throws IOException {
+ writeInt(Float.floatToIntBits(v));
+ }
+
+ public void writeDouble(double v) throws IOException {
+ writeLong(Double.doubleToLongBits(v));
+ }
+
+
+ private static byte ZERO = 0;
+ private static byte ONE = 1;
+ private static byte TWO = 2;
+
+ /**
+ * Writes a boolean.
+ */
+ public void writeBoolean(boolean b) throws IOException {
+ writeByte(b ? ONE : ZERO);
+ }
+
+ public void writeOptionalBoolean(@Nullable Boolean b) throws IOException {
+ if (b == null) {
+ writeByte(TWO);
+ } else {
+ writeByte(b ? ONE : ZERO);
+ }
+ }
+
+ /**
+ * Forces any buffered output to be written.
+ */
+ public abstract void flush() throws IOException;
+
+ /**
+ * Closes this stream to further operations.
+ */
+ public abstract void close() throws IOException;
+
+ public abstract void reset() throws IOException;
+
+ @Override
+ public void write(int b) throws IOException {
+ writeByte((byte) b);
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ writeBytes(b, off, len);
+ }
+
+ public void writeStringArray(String[] array) throws IOException {
+ writeVInt(array.length);
+ for (String s : array) {
+ writeString(s);
+ }
+ }
+
+ /**
+ * Writes a string array, for nullable string, writes it as 0 (empty string).
+ */
+ public void writeStringArrayNullable(@Nullable String[] array) throws IOException {
+ if (array == null) {
+ writeVInt(0);
+ } else {
+ writeVInt(array.length);
+ for (String s : array) {
+ writeString(s);
+ }
+ }
+ }
+
+ public void writeMap(@Nullable Map<String, Object> map) throws IOException {
+ writeGenericValue(map);
+ }
+
+ public void writeGenericValue(@Nullable Object value) throws IOException {
+ if (value == null) {
+ writeByte((byte) -1);
+ return;
+ }
+ Class type = value.getClass();
+ if (type == String.class) {
+ writeByte((byte) 0);
+ writeString((String) value);
+ } else if (type == Integer.class) {
+ writeByte((byte) 1);
+ writeInt((Integer) value);
+ } else if (type == Long.class) {
+ writeByte((byte) 2);
+ writeLong((Long) value);
+ } else if (type == Float.class) {
+ writeByte((byte) 3);
+ writeFloat((Float) value);
+ } else if (type == Double.class) {
+ writeByte((byte) 4);
+ writeDouble((Double) value);
+ } else if (type == Boolean.class) {
+ writeByte((byte) 5);
+ writeBoolean((Boolean) value);
+ } else if (type == byte[].class) {
+ writeByte((byte) 6);
+ writeVInt(((byte[]) value).length);
+ writeBytes(((byte[]) value));
+ } else if (value instanceof List) {
+ writeByte((byte) 7);
+ List list = (List) value;
+ writeVInt(list.size());
+ for (Object o : list) {
+ writeGenericValue(o);
+ }
+ } else if (value instanceof Object[]) {
+ writeByte((byte) 8);
+ Object[] list = (Object[]) value;
+ writeVInt(list.length);
+ for (Object o : list) {
+ writeGenericValue(o);
+ }
+ } else if (value instanceof Map) {
+ if (value instanceof LinkedHashMap) {
+ writeByte((byte) 9);
+ } else {
+ writeByte((byte) 10);
+ }
+ Map<String, Object> map = (Map<String, Object>) value;
+ writeVInt(map.size());
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ writeSharedString(entry.getKey());
+ writeGenericValue(entry.getValue());
+ }
+ } else if (type == Byte.class) {
+ writeByte((byte) 11);
+ writeByte((Byte) value);
+ } else if (type == Date.class) {
+ writeByte((byte) 12);
+ writeLong(((Date) value).getTime());
+ } else if (value instanceof ReadableInstant) {
+ writeByte((byte) 13);
+ writeLong(((ReadableInstant) value).getMillis());
+ } else if (value instanceof BytesReference) {
+ writeByte((byte) 14);
+ writeBytesReference((BytesReference) value);
+ } else if (value instanceof Text) {
+ writeByte((byte) 15);
+ writeText((Text) value);
+ } else if (type == Short.class) {
+ writeByte((byte) 16);
+ writeShort((Short) value);
+ } else if (type == int[].class) {
+ writeByte((byte) 17);
+ writePrimitiveIntArray((int[]) value);
+ } else if (type == long[].class) {
+ writeByte((byte) 18);
+ writePrimitiveLongArray((long[]) value);
+ } else if (type == float[].class) {
+ writeByte((byte) 19);
+ writePrimitiveFloatArray((float[]) value);
+ } else if (type == double[].class) {
+ writeByte((byte) 20);
+ writePrimitiveDoubleArray((double[]) value);
+ } else {
+ throw new IOException("Can't write type [" + type + "]");
+ }
+ }
+
+ private void writePrimitiveIntArray(int[] value) throws IOException {
+ writeVInt(value.length);
+ for (int i=0; i<value.length; i++) {
+ writeInt(value[i]);
+ }
+ }
+
+ private void writePrimitiveLongArray(long[] value) throws IOException {
+ writeVInt(value.length);
+ for (int i=0; i<value.length; i++) {
+ writeLong(value[i]);
+ }
+ }
+
+ private void writePrimitiveFloatArray(float[] value) throws IOException {
+ writeVInt(value.length);
+ for (int i=0; i<value.length; i++) {
+ writeFloat(value[i]);
+ }
+ }
+
+ private void writePrimitiveDoubleArray(double[] value) throws IOException {
+ writeVInt(value.length);
+ for (int i=0; i<value.length; i++) {
+ writeDouble(value[i]);
+ }
+ }
+
+ /**
+ * Serializes a potential null value.
+ */
+ public void writeOptionalStreamable(@Nullable Streamable streamable) throws IOException {
+ if (streamable != null) {
+ writeBoolean(true);
+ streamable.writeTo(this);
+ } else {
+ writeBoolean(false);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/Streamable.java b/src/main/java/org/elasticsearch/common/io/stream/Streamable.java
new file mode 100644
index 0000000..f2b7711
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/Streamable.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public interface Streamable {
+
+ void readFrom(StreamInput in) throws IOException;
+
+ void writeTo(StreamOutput out) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/common/jna/CLibrary.java b/src/main/java/org/elasticsearch/common/jna/CLibrary.java
new file mode 100644
index 0000000..72f367d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/jna/CLibrary.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.jna;
+
+import com.sun.jna.Native;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+
+/**
+ *
+ */
+public class CLibrary {
+
+ private static ESLogger logger = Loggers.getLogger(CLibrary.class);
+
+ public static final int MCL_CURRENT = 1;
+ public static final int MCL_FUTURE = 2;
+
+ public static final int ENOMEM = 12;
+
+ static {
+ try {
+ Native.register("c");
+ } catch (NoClassDefFoundError e) {
+ logger.warn("jna not found. native methods (mlockall) will be disabled.");
+ } catch (UnsatisfiedLinkError e) {
+ logger.debug("unable to link C library. native methods (mlockall) will be disabled.");
+ }
+ }
+
+ public static native int mlockall(int flags);
+
+ public static native int munlockall();
+
+ private CLibrary() {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/jna/Natives.java b/src/main/java/org/elasticsearch/common/jna/Natives.java
new file mode 100644
index 0000000..33fa68d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/jna/Natives.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.jna;
+
+import com.sun.jna.Native;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.Locale;
+
+/**
+ *
+ */
+public class Natives {
+
+ private static ESLogger logger = Loggers.getLogger(Natives.class);
+ // Set to true, in case native mlockall call was successful
+ public static boolean LOCAL_MLOCKALL = false;
+
+ public static void tryMlockall() {
+ int errno = Integer.MIN_VALUE;
+ try {
+ int result = CLibrary.mlockall(CLibrary.MCL_CURRENT);
+ if (result != 0) {
+ errno = Native.getLastError();
+ } else {
+ LOCAL_MLOCKALL = true;
+ }
+ } catch (UnsatisfiedLinkError e) {
+ // this will have already been logged by CLibrary, no need to repeat it
+ return;
+ }
+
+ if (errno != Integer.MIN_VALUE) {
+ if (errno == CLibrary.ENOMEM && System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("linux")) {
+ logger.warn("Unable to lock JVM memory (ENOMEM)."
+ + " This can result in part of the JVM being swapped out."
+ + " Increase RLIMIT_MEMLOCK or run elasticsearch as root.");
+ } else if (!System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("mac")) {
+ // OS X allows mlockall to be called, but always returns an error
+ logger.warn("Unknown mlockall error " + errno);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/joda/DateMathParser.java b/src/main/java/org/elasticsearch/common/joda/DateMathParser.java
new file mode 100644
index 0000000..12d0d42
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/joda/DateMathParser.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.joda;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.joda.time.DateTimeZone;
+import org.joda.time.MutableDateTime;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class DateMathParser {
+
+ private final FormatDateTimeFormatter dateTimeFormatter;
+
+ private final TimeUnit timeUnit;
+
+ public DateMathParser(FormatDateTimeFormatter dateTimeFormatter, TimeUnit timeUnit) {
+ this.dateTimeFormatter = dateTimeFormatter;
+ this.timeUnit = timeUnit;
+ }
+
+ public long parse(String text, long now) {
+ return parse(text, now, false);
+ }
+
+ public long parseRoundCeil(String text, long now) {
+ return parse(text, now, true);
+ }
+
+ public long parse(String text, long now, boolean roundCeil) {
+ long time;
+ String mathString;
+ if (text.startsWith("now")) {
+ time = now;
+ mathString = text.substring("now".length());
+ } else {
+ int index = text.indexOf("||");
+ String parseString;
+ if (index == -1) {
+ parseString = text;
+ mathString = ""; // nothing else
+ } else {
+ parseString = text.substring(0, index);
+ mathString = text.substring(index + 2);
+ }
+ if (roundCeil) {
+ time = parseRoundCeilStringValue(parseString);
+ } else {
+ time = parseStringValue(parseString);
+ }
+ }
+
+ if (mathString.isEmpty()) {
+ return time;
+ }
+
+ return parseMath(mathString, time, roundCeil);
+ }
+
+ private long parseMath(String mathString, long time, boolean roundUp) throws ElasticsearchParseException {
+ MutableDateTime dateTime = new MutableDateTime(time, DateTimeZone.UTC);
+ try {
+ for (int i = 0; i < mathString.length(); ) {
+ char c = mathString.charAt(i++);
+ int type;
+ if (c == '/') {
+ type = 0;
+ } else if (c == '+') {
+ type = 1;
+ } else if (c == '-') {
+ type = 2;
+ } else {
+ throw new ElasticsearchParseException("operator not supported for date math [" + mathString + "]");
+ }
+
+ int num;
+ if (!Character.isDigit(mathString.charAt(i))) {
+ num = 1;
+ } else {
+ int numFrom = i;
+ while (Character.isDigit(mathString.charAt(i))) {
+ i++;
+ }
+ num = Integer.parseInt(mathString.substring(numFrom, i));
+ }
+ if (type == 0) {
+ // rounding is only allowed on whole numbers
+ if (num != 1) {
+ throw new ElasticsearchParseException("rounding `/` can only be used on single unit types [" + mathString + "]");
+ }
+ }
+ char unit = mathString.charAt(i++);
+ switch (unit) {
+ case 'y':
+ if (type == 0) {
+ if (roundUp) {
+ dateTime.yearOfCentury().roundCeiling();
+ } else {
+ dateTime.yearOfCentury().roundFloor();
+ }
+ } else if (type == 1) {
+ dateTime.addYears(num);
+ } else if (type == 2) {
+ dateTime.addYears(-num);
+ }
+ break;
+ case 'M':
+ if (type == 0) {
+ if (roundUp) {
+ dateTime.monthOfYear().roundCeiling();
+ } else {
+ dateTime.monthOfYear().roundFloor();
+ }
+ } else if (type == 1) {
+ dateTime.addMonths(num);
+ } else if (type == 2) {
+ dateTime.addMonths(-num);
+ }
+ break;
+ case 'w':
+ if (type == 0) {
+ if (roundUp) {
+ dateTime.weekOfWeekyear().roundCeiling();
+ } else {
+ dateTime.weekOfWeekyear().roundFloor();
+ }
+ } else if (type == 1) {
+ dateTime.addWeeks(num);
+ } else if (type == 2) {
+ dateTime.addWeeks(-num);
+ }
+ break;
+ case 'd':
+ if (type == 0) {
+ if (roundUp) {
+ dateTime.dayOfMonth().roundCeiling();
+ } else {
+ dateTime.dayOfMonth().roundFloor();
+ }
+ } else if (type == 1) {
+ dateTime.addDays(num);
+ } else if (type == 2) {
+ dateTime.addDays(-num);
+ }
+ break;
+ case 'h':
+ case 'H':
+ if (type == 0) {
+ if (roundUp) {
+ dateTime.hourOfDay().roundCeiling();
+ } else {
+ dateTime.hourOfDay().roundFloor();
+ }
+ } else if (type == 1) {
+ dateTime.addHours(num);
+ } else if (type == 2) {
+ dateTime.addHours(-num);
+ }
+ break;
+ case 'm':
+ if (type == 0) {
+ if (roundUp) {
+ dateTime.minuteOfHour().roundCeiling();
+ } else {
+ dateTime.minuteOfHour().roundFloor();
+ }
+ } else if (type == 1) {
+ dateTime.addMinutes(num);
+ } else if (type == 2) {
+ dateTime.addMinutes(-num);
+ }
+ break;
+ case 's':
+ if (type == 0) {
+ if (roundUp) {
+ dateTime.secondOfMinute().roundCeiling();
+ } else {
+ dateTime.secondOfMinute().roundFloor();
+ }
+ } else if (type == 1) {
+ dateTime.addSeconds(num);
+ } else if (type == 2) {
+ dateTime.addSeconds(-num);
+ }
+ break;
+ default:
+ throw new ElasticsearchParseException("unit [" + unit + "] not supported for date math [" + mathString + "]");
+ }
+ }
+ } catch (Exception e) {
+ if (e instanceof ElasticsearchParseException) {
+ throw (ElasticsearchParseException) e;
+ }
+ throw new ElasticsearchParseException("failed to parse date math [" + mathString + "]");
+ }
+ return dateTime.getMillis();
+ }
+
+ private long parseStringValue(String value) {
+ try {
+ return dateTimeFormatter.parser().parseMillis(value);
+ } catch (RuntimeException e) {
+ try {
+ long time = Long.parseLong(value);
+ return timeUnit.toMillis(time);
+ } catch (NumberFormatException e1) {
+ throw new ElasticsearchParseException("failed to parse date field [" + value + "], tried both date format [" + dateTimeFormatter.format() + "], and timestamp number", e);
+ }
+ }
+ }
+
+ private long parseRoundCeilStringValue(String value) {
+ try {
+ // we create a date time for inclusive upper range, we "include" by default the day level data
+ // so something like 2011-01-01 will include the full first day of 2011.
+ // we also use 1970-01-01 as the base for it so we can handle searches like 10:12:55 (just time)
+ // since when we index those, the base is 1970-01-01
+ MutableDateTime dateTime = new MutableDateTime(1970, 1, 1, 23, 59, 59, 999, DateTimeZone.UTC);
+ int location = dateTimeFormatter.parser().parseInto(dateTime, value, 0);
+ // if we parsed all the string value, we are good
+ if (location == value.length()) {
+ return dateTime.getMillis();
+ }
+ // if we did not manage to parse, or the year is really high year which is unreasonable
+ // see if its a number
+ if (location <= 0 || dateTime.getYear() > 5000) {
+ try {
+ long time = Long.parseLong(value);
+ return timeUnit.toMillis(time);
+ } catch (NumberFormatException e1) {
+ throw new ElasticsearchParseException("failed to parse date field [" + value + "], tried both date format [" + dateTimeFormatter.format() + "], and timestamp number");
+ }
+ }
+ return dateTime.getMillis();
+ } catch (RuntimeException e) {
+ try {
+ long time = Long.parseLong(value);
+ return timeUnit.toMillis(time);
+ } catch (NumberFormatException e1) {
+ throw new ElasticsearchParseException("failed to parse date field [" + value + "], tried both date format [" + dateTimeFormatter.format() + "], and timestamp number", e);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/joda/FormatDateTimeFormatter.java b/src/main/java/org/elasticsearch/common/joda/FormatDateTimeFormatter.java
new file mode 100644
index 0000000..d29687e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/joda/FormatDateTimeFormatter.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.joda;
+
+import java.util.Locale;
+
+import org.joda.time.format.DateTimeFormatter;
+
+/**
+ * A simple wrapper around {@link DateTimeFormatter} that retains the
+ * format that was used to create it.
+ */
+public class FormatDateTimeFormatter {
+
+ private final String format;
+
+ private final DateTimeFormatter parser;
+
+ private final DateTimeFormatter printer;
+
+ private final Locale locale;
+
+ public FormatDateTimeFormatter(String format, DateTimeFormatter parser, Locale locale) {
+ this(format, parser, parser, locale);
+ }
+
+ public FormatDateTimeFormatter(String format, DateTimeFormatter parser, DateTimeFormatter printer, Locale locale) {
+ this.format = format;
+ this.locale = locale;
+ this.printer = locale == null ? printer.withDefaultYear(1970) : printer.withLocale(locale).withDefaultYear(1970);
+ this.parser = locale == null ? parser.withDefaultYear(1970) : parser.withLocale(locale).withDefaultYear(1970);
+ }
+
+ public String format() {
+ return format;
+ }
+
+ public DateTimeFormatter parser() {
+ return parser;
+ }
+
+ public DateTimeFormatter printer() {
+ return this.printer;
+ }
+
+ public Locale locale() {
+ return locale;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/joda/Joda.java b/src/main/java/org/elasticsearch/common/joda/Joda.java
new file mode 100644
index 0000000..c5bb25c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/joda/Joda.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.joda;
+
+import java.util.Locale;
+
+import org.elasticsearch.common.Strings;
+import org.joda.time.*;
+import org.joda.time.field.DividedDateTimeField;
+import org.joda.time.field.OffsetDateTimeField;
+import org.joda.time.field.ScaledDurationField;
+import org.joda.time.format.*;
+
+/**
+ *
+ */
+public class Joda {
+
+ public static FormatDateTimeFormatter forPattern(String input) {
+ return forPattern(input, Locale.ROOT);
+ }
+
+ /**
+ * Parses a joda based pattern, including some named ones (similar to the built in Joda ISO ones).
+ */
+ public static FormatDateTimeFormatter forPattern(String input, Locale locale) {
+ if (Strings.hasLength(input)) {
+ input = input.trim();
+ }
+ if (input == null || input.length() == 0) {
+ throw new IllegalArgumentException("No date pattern provided");
+ }
+
+ DateTimeFormatter formatter;
+ if ("basicDate".equals(input) || "basic_date".equals(input)) {
+ formatter = ISODateTimeFormat.basicDate();
+ } else if ("basicDateTime".equals(input) || "basic_date_time".equals(input)) {
+ formatter = ISODateTimeFormat.basicDateTime();
+ } else if ("basicDateTimeNoMillis".equals(input) || "basic_date_time_no_millis".equals(input)) {
+ formatter = ISODateTimeFormat.basicDateTimeNoMillis();
+ } else if ("basicOrdinalDate".equals(input) || "basic_ordinal_date".equals(input)) {
+ formatter = ISODateTimeFormat.basicOrdinalDate();
+ } else if ("basicOrdinalDateTime".equals(input) || "basic_ordinal_date_time".equals(input)) {
+ formatter = ISODateTimeFormat.basicOrdinalDateTime();
+ } else if ("basicOrdinalDateTimeNoMillis".equals(input) || "basic_ordinal_date_time_no_millis".equals(input)) {
+ formatter = ISODateTimeFormat.basicOrdinalDateTimeNoMillis();
+ } else if ("basicTime".equals(input) || "basic_time".equals(input)) {
+ formatter = ISODateTimeFormat.basicTime();
+ } else if ("basicTimeNoMillis".equals(input) || "basic_time_no_millis".equals(input)) {
+ formatter = ISODateTimeFormat.basicTimeNoMillis();
+ } else if ("basicTTime".equals(input) || "basic_t_Time".equals(input)) {
+ formatter = ISODateTimeFormat.basicTTime();
+ } else if ("basicTTimeNoMillis".equals(input) || "basic_t_time_no_millis".equals(input)) {
+ formatter = ISODateTimeFormat.basicTTimeNoMillis();
+ } else if ("basicWeekDate".equals(input) || "basic_week_date".equals(input)) {
+ formatter = ISODateTimeFormat.basicWeekDate();
+ } else if ("basicWeekDateTime".equals(input) || "basic_week_date_time".equals(input)) {
+ formatter = ISODateTimeFormat.basicWeekDateTime();
+ } else if ("basicWeekDateTimeNoMillis".equals(input) || "basic_week_date_time_no_millis".equals(input)) {
+ formatter = ISODateTimeFormat.basicWeekDateTimeNoMillis();
+ } else if ("date".equals(input)) {
+ formatter = ISODateTimeFormat.date();
+ } else if ("dateHour".equals(input) || "date_hour".equals(input)) {
+ formatter = ISODateTimeFormat.dateHour();
+ } else if ("dateHourMinute".equals(input) || "date_hour_minute".equals(input)) {
+ formatter = ISODateTimeFormat.dateHourMinute();
+ } else if ("dateHourMinuteSecond".equals(input) || "date_hour_minute_second".equals(input)) {
+ formatter = ISODateTimeFormat.dateHourMinuteSecond();
+ } else if ("dateHourMinuteSecondFraction".equals(input) || "date_hour_minute_second_fraction".equals(input)) {
+ formatter = ISODateTimeFormat.dateHourMinuteSecondFraction();
+ } else if ("dateHourMinuteSecondMillis".equals(input) || "date_hour_minute_second_millis".equals(input)) {
+ formatter = ISODateTimeFormat.dateHourMinuteSecondMillis();
+ } else if ("dateOptionalTime".equals(input) || "date_optional_time".equals(input)) {
+ // in this case, we have a separate parser and printer since the dataOptionalTimeParser can't print
+ // this sucks we should use the root local by default and not be dependent on the node
+ return new FormatDateTimeFormatter(input,
+ ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC),
+ ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC), locale);
+ } else if ("dateTime".equals(input) || "date_time".equals(input)) {
+ formatter = ISODateTimeFormat.dateTime();
+ } else if ("dateTimeNoMillis".equals(input) || "date_time_no_millis".equals(input)) {
+ formatter = ISODateTimeFormat.dateTimeNoMillis();
+ } else if ("hour".equals(input)) {
+ formatter = ISODateTimeFormat.hour();
+ } else if ("hourMinute".equals(input) || "hour_minute".equals(input)) {
+ formatter = ISODateTimeFormat.hourMinute();
+ } else if ("hourMinuteSecond".equals(input) || "hour_minute_second".equals(input)) {
+ formatter = ISODateTimeFormat.hourMinuteSecond();
+ } else if ("hourMinuteSecondFraction".equals(input) || "hour_minute_second_fraction".equals(input)) {
+ formatter = ISODateTimeFormat.hourMinuteSecondFraction();
+ } else if ("hourMinuteSecondMillis".equals(input) || "hour_minute_second_millis".equals(input)) {
+ formatter = ISODateTimeFormat.hourMinuteSecondMillis();
+ } else if ("ordinalDate".equals(input) || "ordinal_date".equals(input)) {
+ formatter = ISODateTimeFormat.ordinalDate();
+ } else if ("ordinalDateTime".equals(input) || "ordinal_date_time".equals(input)) {
+ formatter = ISODateTimeFormat.ordinalDateTime();
+ } else if ("ordinalDateTimeNoMillis".equals(input) || "ordinal_date_time_no_millis".equals(input)) {
+ formatter = ISODateTimeFormat.ordinalDateTimeNoMillis();
+ } else if ("time".equals(input)) {
+ formatter = ISODateTimeFormat.time();
+ } else if ("tTime".equals(input) || "t_time".equals(input)) {
+ formatter = ISODateTimeFormat.tTime();
+ } else if ("tTimeNoMillis".equals(input) || "t_time_no_millis".equals(input)) {
+ formatter = ISODateTimeFormat.tTimeNoMillis();
+ } else if ("weekDate".equals(input) || "week_date".equals(input)) {
+ formatter = ISODateTimeFormat.weekDate();
+ } else if ("weekDateTime".equals(input) || "week_date_time".equals(input)) {
+ formatter = ISODateTimeFormat.weekDateTime();
+ } else if ("weekyear".equals(input) || "week_year".equals(input)) {
+ formatter = ISODateTimeFormat.weekyear();
+ } else if ("weekyearWeek".equals(input)) {
+ formatter = ISODateTimeFormat.weekyearWeek();
+ } else if ("year".equals(input)) {
+ formatter = ISODateTimeFormat.year();
+ } else if ("yearMonth".equals(input) || "year_month".equals(input)) {
+ formatter = ISODateTimeFormat.yearMonth();
+ } else if ("yearMonthDay".equals(input) || "year_month_day".equals(input)) {
+ formatter = ISODateTimeFormat.yearMonthDay();
+ } else if (Strings.hasLength(input) && input.contains("||")) {
+ String[] formats = Strings.delimitedListToStringArray(input, "||");
+ DateTimeParser[] parsers = new DateTimeParser[formats.length];
+
+ if (formats.length == 1) {
+ formatter = forPattern(input, locale).parser();
+ } else {
+ DateTimeFormatter dateTimeFormatter = null;
+ for (int i = 0; i < formats.length; i++) {
+ DateTimeFormatter currentFormatter = forPattern(formats[i], locale).parser();
+ if (dateTimeFormatter == null) {
+ dateTimeFormatter = currentFormatter;
+ }
+ parsers[i] = currentFormatter.getParser();
+ }
+
+ DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(dateTimeFormatter.withZone(DateTimeZone.UTC).getPrinter(), parsers);
+ formatter = builder.toFormatter();
+ }
+ } else {
+ try {
+ formatter = DateTimeFormat.forPattern(input);
+ } catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException("Invalid format: [" + input + "]: " + e.getMessage(), e);
+ }
+ }
+
+ return new FormatDateTimeFormatter(input, formatter.withZone(DateTimeZone.UTC), locale);
+ }
+
+
+ public static final DurationFieldType Quarters = new DurationFieldType("quarters") {
+ private static final long serialVersionUID = -8167713675442491871L;
+
+ public DurationField getField(Chronology chronology) {
+ return new ScaledDurationField(chronology.months(), Quarters, 3);
+ }
+ };
+
+ public static final DateTimeFieldType QuarterOfYear = new DateTimeFieldType("quarterOfYear") {
+ private static final long serialVersionUID = -5677872459807379123L;
+
+ public DurationFieldType getDurationType() {
+ return Quarters;
+ }
+
+ public DurationFieldType getRangeDurationType() {
+ return DurationFieldType.years();
+ }
+
+ public DateTimeField getField(Chronology chronology) {
+ return new OffsetDateTimeField(new DividedDateTimeField(new OffsetDateTimeField(chronology.monthOfYear(), -1), QuarterOfYear, 3), 1);
+ }
+ };
+}
diff --git a/src/main/java/org/elasticsearch/common/lease/Releasable.java b/src/main/java/org/elasticsearch/common/lease/Releasable.java
new file mode 100644
index 0000000..b4f2888
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lease/Releasable.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lease;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public interface Releasable {
+
+ boolean release() throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/common/lease/Releasables.java b/src/main/java/org/elasticsearch/common/lease/Releasables.java
new file mode 100644
index 0000000..b0b35c7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lease/Releasables.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lease;
+
+import java.util.Arrays;
+
+/** Utility methods to work with {@link Releasable}s. */
+public enum Releasables {
+ ;
+
+ private static void rethrow(Throwable t) {
+ if (t instanceof RuntimeException) {
+ throw (RuntimeException) t;
+ }
+ if (t instanceof Error) {
+ throw (Error) t;
+ }
+ throw new RuntimeException(t);
+ }
+
+ private static void release(Iterable<Releasable> releasables, boolean ignoreException) {
+ Throwable th = null;
+ for (Releasable releasable : releasables) {
+ if (releasable != null) {
+ try {
+ releasable.release();
+ } catch (Throwable t) {
+ if (th == null) {
+ th = t;
+ }
+ }
+ }
+ }
+ if (th != null && !ignoreException) {
+ rethrow(th);
+ }
+ }
+
+ /** Release the provided {@link Releasable}s. */
+ public static void release(Iterable<Releasable> releasables) {
+ release(releasables, false);
+ }
+
+ /** Release the provided {@link Releasable}s. */
+ public static void release(Releasable... releasables) {
+ release(Arrays.asList(releasables));
+ }
+
+ /** Release the provided {@link Releasable}s, ignoring exceptions. */
+ public static void releaseWhileHandlingException(Iterable<Releasable> releasables) {
+ release(releasables, true);
+ }
+
+ /** Release the provided {@link Releasable}s, ignoring exceptions. */
+ public static void releaseWhileHandlingException(Releasable... releasables) {
+ releaseWhileHandlingException(Arrays.asList(releasables));
+ }
+
+ /** Release the provided {@link Releasable}s, ignoring exceptions if <code>success</code> is <tt>false</tt>. */
+ public static void release(boolean success, Iterable<Releasable> releasables) {
+ if (success) {
+ release(releasables);
+ } else {
+ releaseWhileHandlingException(releasables);
+ }
+ }
+
+ /** Release the provided {@link Releasable}s, ignoring exceptions if <code>success</code> is <tt>false</tt>. */
+ public static void release(boolean success, Releasable... releasables) {
+ release(success, Arrays.asList(releasables));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/ESLogger.java b/src/main/java/org/elasticsearch/common/logging/ESLogger.java
new file mode 100644
index 0000000..2136236
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/ESLogger.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging;
+
+/**
+ *
+ */
+public interface ESLogger {
+
+ String getPrefix();
+
+ String getName();
+
+ /**
+ * Allows to set the logger level
+ * If the new level is null, the logger will inherit its level
+ * from its nearest ancestor with a specific (non-null) level value.
+ * @param level the new level
+ */
+ void setLevel(String level);
+
+ /**
+ * Returns the current logger level
+ * If the level is null, it means that the logger inherits its level
+ * from its nearest ancestor with a specific (non-null) level value.
+ * @return the logger level
+ */
+ String getLevel();
+
+ /**
+ * Returns {@code true} if a TRACE level message is logged.
+ */
+ boolean isTraceEnabled();
+
+ /**
+ * Returns {@code true} if a DEBUG level message is logged.
+ */
+ boolean isDebugEnabled();
+
+ /**
+ * Returns {@code true} if an INFO level message is logged.
+ */
+ boolean isInfoEnabled();
+
+ /**
+ * Returns {@code true} if a WARN level message is logged.
+ */
+ boolean isWarnEnabled();
+
+ /**
+ * Returns {@code true} if an ERROR level message is logged.
+ */
+ boolean isErrorEnabled();
+
+ /**
+ * Logs a DEBUG level message.
+ */
+ void trace(String msg, Object... params);
+
+ /**
+ * Logs a DEBUG level message.
+ */
+ void trace(String msg, Throwable cause, Object... params);
+
+ /**
+ * Logs a DEBUG level message.
+ */
+ void debug(String msg, Object... params);
+
+ /**
+ * Logs a DEBUG level message.
+ */
+ void debug(String msg, Throwable cause, Object... params);
+
+ /**
+ * Logs an INFO level message.
+ */
+ void info(String msg, Object... params);
+
+ /**
+ * Logs an INFO level message.
+ */
+ void info(String msg, Throwable cause, Object... params);
+
+ /**
+ * Logs a WARN level message.
+ */
+ void warn(String msg, Object... params);
+
+ /**
+ * Logs a WARN level message.
+ */
+ void warn(String msg, Throwable cause, Object... params);
+
+ /**
+ * Logs an ERROR level message.
+ */
+ void error(String msg, Object... params);
+
+ /**
+ * Logs an ERROR level message.
+ */
+ void error(String msg, Throwable cause, Object... params);
+
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java
new file mode 100644
index 0000000..d24497c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging;
+
+import org.elasticsearch.common.logging.jdk.JdkESLoggerFactory;
+import org.elasticsearch.common.logging.log4j.Log4jESLoggerFactory;
+import org.elasticsearch.common.logging.slf4j.Slf4jESLoggerFactory;
+
+/**
+ *
+ */
+public abstract class ESLoggerFactory {
+
+ private static volatile ESLoggerFactory defaultFactory = new JdkESLoggerFactory();
+
+ static {
+ try {
+ Class<?> loggerClazz = Class.forName("org.apache.log4j.Logger");
+ // below will throw a NoSuchMethod failure with using slf4j log4j bridge
+ loggerClazz.getMethod("setLevel", Class.forName("org.apache.log4j.Level"));
+ defaultFactory = new Log4jESLoggerFactory();
+ } catch (Throwable e) {
+ // no log4j
+ try {
+ Class.forName("org.slf4j.Logger");
+ defaultFactory = new Slf4jESLoggerFactory();
+ } catch (Throwable e1) {
+ // no slf4j
+ }
+ }
+ }
+
+ /**
+ * Changes the default factory.
+ */
+ public static void setDefaultFactory(ESLoggerFactory defaultFactory) {
+ if (defaultFactory == null) {
+ throw new NullPointerException("defaultFactory");
+ }
+ ESLoggerFactory.defaultFactory = defaultFactory;
+ }
+
+
+ public static ESLogger getLogger(String prefix, String name) {
+ return defaultFactory.newInstance(prefix == null ? null : prefix.intern(), name.intern());
+ }
+
+ public static ESLogger getLogger(String name) {
+ return defaultFactory.newInstance(name.intern());
+ }
+
+ public static ESLogger getRootLogger() {
+ return defaultFactory.rootLogger();
+ }
+
+ public ESLogger newInstance(String name) {
+ return newInstance(null, name);
+ }
+
+ protected abstract ESLogger rootLogger();
+
+ protected abstract ESLogger newInstance(String prefix, String name);
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/Loggers.java b/src/main/java/org/elasticsearch/common/logging/Loggers.java
new file mode 100644
index 0000000..5b10095
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/Loggers.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Classes;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.river.RiverName;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static java.util.Arrays.asList;
+
+/**
+ * A set of utilities around Logging.
+ *
+ *
+ */
+public class Loggers {
+
+ private final static String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
+
+ public static final String SPACE = " ";
+
+ private static boolean consoleLoggingEnabled = true;
+
+ public static void disableConsoleLogging() {
+ consoleLoggingEnabled = false;
+ }
+
+ public static void enableConsoleLogging() {
+ consoleLoggingEnabled = true;
+ }
+
+ public static boolean consoleLoggingEnabled() {
+ return consoleLoggingEnabled;
+ }
+
+ public static ESLogger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) {
+ return getLogger(clazz, settings, shardId.index(), Lists.asList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
+ }
+
+ public static ESLogger getLogger(Class clazz, Settings settings, Index index, String... prefixes) {
+ return getLogger(clazz, settings, Lists.asList(SPACE, index.name(), prefixes).toArray(new String[0]));
+ }
+
+ public static ESLogger getLogger(Class clazz, Settings settings, RiverName riverName, String... prefixes) {
+ List<String> l = Lists.newArrayList();
+ l.add(SPACE);
+ l.add(riverName.type());
+ l.add(riverName.name());
+ l.addAll(Lists.newArrayList(prefixes));
+ return getLogger(clazz, settings, l.toArray(new String[l.size()]));
+ }
+
+ public static ESLogger getLogger(Class clazz, Settings settings, String... prefixes) {
+ return getLogger(buildClassLoggerName(clazz), settings, prefixes);
+ }
+
+ public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) {
+ List<String> prefixesList = newArrayList();
+ if (settings.getAsBoolean("logger.logHostAddress", false)) {
+ try {
+ prefixesList.add(InetAddress.getLocalHost().getHostAddress());
+ } catch (UnknownHostException e) {
+ // ignore
+ }
+ }
+ if (settings.getAsBoolean("logger.logHostName", false)) {
+ try {
+ prefixesList.add(InetAddress.getLocalHost().getHostName());
+ } catch (UnknownHostException e) {
+ // ignore
+ }
+ }
+ String name = settings.get("name");
+ if (name != null) {
+ prefixesList.add(name);
+ }
+ if (prefixes != null && prefixes.length > 0) {
+ prefixesList.addAll(asList(prefixes));
+ }
+ return getLogger(getLoggerName(loggerName), prefixesList.toArray(new String[prefixesList.size()]));
+ }
+
+ public static ESLogger getLogger(ESLogger parentLogger, String s) {
+ return ESLoggerFactory.getLogger(parentLogger.getPrefix(), getLoggerName(parentLogger.getName() + s));
+ }
+
+ public static ESLogger getLogger(String s) {
+ return ESLoggerFactory.getLogger(getLoggerName(s));
+ }
+
+ public static ESLogger getLogger(Class clazz) {
+ return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz)));
+ }
+
+ public static ESLogger getLogger(Class clazz, String... prefixes) {
+ return getLogger(buildClassLoggerName(clazz), prefixes);
+ }
+
+ public static ESLogger getLogger(String name, String... prefixes) {
+ String prefix = null;
+ if (prefixes != null && prefixes.length > 0) {
+ StringBuilder sb = new StringBuilder();
+ for (String prefixX : prefixes) {
+ if (prefixX != null) {
+ if (prefixX.equals(SPACE)) {
+ sb.append(" ");
+ } else {
+ sb.append("[").append(prefixX).append("]");
+ }
+ }
+ }
+ if (sb.length() > 0) {
+ sb.append(" ");
+ prefix = sb.toString();
+ }
+ }
+ return ESLoggerFactory.getLogger(prefix, getLoggerName(name));
+ }
+
+ private static String buildClassLoggerName(Class clazz) {
+ String name = clazz.getName();
+ if (name.startsWith("org.elasticsearch.")) {
+ name = Classes.getPackageName(clazz);
+ }
+ return name;
+ }
+
+ private static String getLoggerName(String name) {
+ if (name.startsWith("org.elasticsearch.")) {
+ name = name.substring("org.elasticsearch.".length());
+ }
+ return commonPrefix + name;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/jdk/JdkESLogger.java b/src/main/java/org/elasticsearch/common/logging/jdk/JdkESLogger.java
new file mode 100644
index 0000000..786f1ae
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/jdk/JdkESLogger.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.jdk;
+
+import org.elasticsearch.common.logging.support.AbstractESLogger;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ *
+ */
+public class JdkESLogger extends AbstractESLogger {
+
+ private final Logger logger;
+
+ private final String name;
+
+ public JdkESLogger(String prefix, String name, Logger logger) {
+ super(prefix);
+ this.logger = logger;
+ this.name = name;
+ }
+
+ @Override
+ public void setLevel(String level) {
+ if (level == null) {
+ logger.setLevel(null);
+ } else if ("error".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.SEVERE);
+ } else if ("warn".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.WARNING);
+ } else if ("info".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.INFO);
+ } else if ("debug".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.FINE);
+ } else if ("trace".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.FINE);
+ }
+ }
+
+ @Override
+ public String getLevel() {
+ if (logger.getLevel() == null) {
+ return null;
+ }
+ return logger.getLevel().toString();
+ }
+
+ @Override
+ public String getName() {
+ return logger.getName();
+ }
+
+ @Override
+ public boolean isTraceEnabled() {
+ return logger.isLoggable(Level.FINEST);
+ }
+
+ @Override
+ public boolean isDebugEnabled() {
+ return logger.isLoggable(Level.FINE);
+ }
+
+ @Override
+ public boolean isInfoEnabled() {
+ return logger.isLoggable(Level.INFO);
+ }
+
+ @Override
+ public boolean isWarnEnabled() {
+ return logger.isLoggable(Level.WARNING);
+ }
+
+ @Override
+ public boolean isErrorEnabled() {
+ return logger.isLoggable(Level.SEVERE);
+ }
+
+ @Override
+ protected void internalTrace(String msg) {
+ logger.logp(Level.FINEST, name, null, msg);
+ }
+
+ @Override
+ protected void internalTrace(String msg, Throwable cause) {
+ logger.logp(Level.FINEST, name, null, msg, cause);
+ }
+
+ @Override
+ protected void internalDebug(String msg) {
+ logger.logp(Level.FINE, name, null, msg);
+ }
+
+ @Override
+ protected void internalDebug(String msg, Throwable cause) {
+ logger.logp(Level.FINE, name, null, msg, cause);
+ }
+
+ @Override
+ protected void internalInfo(String msg) {
+ logger.logp(Level.INFO, name, null, msg);
+ }
+
+ @Override
+ protected void internalInfo(String msg, Throwable cause) {
+ logger.logp(Level.INFO, name, null, msg, cause);
+ }
+
+ @Override
+ protected void internalWarn(String msg) {
+ logger.logp(Level.WARNING, name, null, msg);
+ }
+
+ @Override
+ protected void internalWarn(String msg, Throwable cause) {
+ logger.logp(Level.WARNING, name, null, msg, cause);
+ }
+
+ @Override
+ protected void internalError(String msg) {
+ logger.logp(Level.SEVERE, name, null, msg);
+ }
+
+ @Override
+ protected void internalError(String msg, Throwable cause) {
+ logger.logp(Level.SEVERE, name, null, msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/jdk/JdkESLoggerFactory.java b/src/main/java/org/elasticsearch/common/logging/jdk/JdkESLoggerFactory.java
new file mode 100644
index 0000000..fae62ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/jdk/JdkESLoggerFactory.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.jdk;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+
+import java.util.logging.LogManager;
+
+/**
+ *
+ */
+public class JdkESLoggerFactory extends ESLoggerFactory {
+
+ @Override
+ protected ESLogger rootLogger() {
+ return getLogger("");
+ }
+
+ @Override
+ protected ESLogger newInstance(String prefix, String name) {
+ final java.util.logging.Logger logger = java.util.logging.Logger.getLogger(name);
+ return new JdkESLogger(prefix, name, logger);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/log4j/ConsoleAppender.java b/src/main/java/org/elasticsearch/common/logging/log4j/ConsoleAppender.java
new file mode 100644
index 0000000..cbeb5a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/log4j/ConsoleAppender.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.log4j;
+
+import org.apache.log4j.Layout;
+import org.apache.log4j.WriterAppender;
+import org.apache.log4j.helpers.LogLog;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * ConsoleAppender appends log events to <code>System.out</code> or
+ * <code>System.err</code> using a layout specified by the user. The
+ * default target is <code>System.out</code>.
+ * <p/>
+ * <p>Elasticsearch: Adapter from log4j to allow to disable console logging...</p>
+ *
+ * @author Ceki G&uuml;lc&uuml;
+ * @author Curt Arnold
+ * @since 1.1
+ */
+public class ConsoleAppender extends WriterAppender {
+
+ public static final String SYSTEM_OUT = "System.out";
+ public static final String SYSTEM_ERR = "System.err";
+
+ protected String target = SYSTEM_OUT;
+
+ /**
+ * Determines if the appender honors reassignments of System.out
+ * or System.err made after configuration.
+ */
+ private boolean follow = true;
+
+ /**
+ * Constructs an unconfigured appender.
+ */
+ public ConsoleAppender() {
+ }
+
+ /**
+ * Creates a configured appender.
+ *
+ * @param layout layout, may not be null.
+ */
+ public ConsoleAppender(Layout layout) {
+ this(layout, SYSTEM_OUT);
+ }
+
+ /**
+ * Creates a configured appender.
+ *
+ * @param layout layout, may not be null.
+ * @param target target, either "System.err" or "System.out".
+ */
+ public ConsoleAppender(Layout layout, String target) {
+ setLayout(layout);
+ setTarget(target);
+ activateOptions();
+ }
+
+ /**
+ * Sets the value of the <b>Target</b> option. Recognized values
+ * are "System.out" and "System.err". Any other value will be
+ * ignored.
+ */
+ public void setTarget(String value) {
+ String v = value.trim();
+
+ if (SYSTEM_OUT.equalsIgnoreCase(v)) {
+ target = SYSTEM_OUT;
+ } else if (SYSTEM_ERR.equalsIgnoreCase(v)) {
+ target = SYSTEM_ERR;
+ } else {
+ targetWarn(value);
+ }
+ }
+
+ /**
+ * Returns the current value of the <b>Target</b> property. The
+ * default value of the option is "System.out".
+ * <p/>
+ * See also {@link #setTarget}.
+ */
+ public String getTarget() {
+ return target;
+ }
+
+ /**
+ * Sets whether the appender honors reassignments of System.out
+ * or System.err made after configuration.
+ *
+ * @param newValue if true, appender will use value of System.out or
+ * System.err in force at the time when logging events are appended.
+ * @since 1.2.13
+ */
+ public final void setFollow(final boolean newValue) {
+ follow = newValue;
+ }
+
+ /**
+ * Gets whether the appender honors reassignments of System.out
+ * or System.err made after configuration.
+ *
+ * @return true if appender will use value of System.out or
+ * System.err in force at the time when logging events are appended.
+ * @since 1.2.13
+ */
+ public final boolean getFollow() {
+ return follow;
+ }
+
+ void targetWarn(String val) {
+ LogLog.warn("[" + val + "] should be System.out or System.err.");
+ LogLog.warn("Using previously set target, System.out by default.");
+ }
+
+ /**
+ * Prepares the appender for use.
+ */
+ public void activateOptions() {
+ if (follow) {
+ if (target.equals(SYSTEM_ERR)) {
+ setWriter(createWriter(new SystemErrStream()));
+ } else {
+ setWriter(createWriter(new SystemOutStream()));
+ }
+ } else {
+ if (target.equals(SYSTEM_ERR)) {
+ setWriter(createWriter(System.err));
+ } else {
+ setWriter(createWriter(System.out));
+ }
+ }
+
+ super.activateOptions();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ protected
+ final void closeWriter() {
+ if (follow) {
+ super.closeWriter();
+ }
+ }
+
+
+ /**
+ * An implementation of OutputStream that redirects to the
+ * current System.err.
+ */
+ private static class SystemErrStream extends OutputStream {
+ public SystemErrStream() {
+ }
+
+ public void close() {
+ }
+
+ public void flush() {
+ System.err.flush();
+ }
+
+ public void write(final byte[] b) throws IOException {
+ if (!Loggers.consoleLoggingEnabled()) {
+ return;
+ }
+ System.err.write(b);
+ }
+
+ public void write(final byte[] b, final int off, final int len)
+ throws IOException {
+ if (!Loggers.consoleLoggingEnabled()) {
+ return;
+ }
+ System.err.write(b, off, len);
+ }
+
+ public void write(final int b) throws IOException {
+ if (!Loggers.consoleLoggingEnabled()) {
+ return;
+ }
+ System.err.write(b);
+ }
+ }
+
+ /**
+ * An implementation of OutputStream that redirects to the
+ * current System.out.
+ */
+ private static class SystemOutStream extends OutputStream {
+ public SystemOutStream() {
+ }
+
+ public void close() {
+ }
+
+ public void flush() {
+ System.out.flush();
+ }
+
+ public void write(final byte[] b) throws IOException {
+ if (!Loggers.consoleLoggingEnabled()) {
+ return;
+ }
+ System.out.write(b);
+ }
+
+ public void write(final byte[] b, final int off, final int len)
+ throws IOException {
+ if (!Loggers.consoleLoggingEnabled()) {
+ return;
+ }
+ System.out.write(b, off, len);
+ }
+
+ public void write(final int b) throws IOException {
+ if (!Loggers.consoleLoggingEnabled()) {
+ return;
+ }
+ System.out.write(b);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/log4j/Log4jESLogger.java b/src/main/java/org/elasticsearch/common/logging/log4j/Log4jESLogger.java
new file mode 100644
index 0000000..444e838
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/log4j/Log4jESLogger.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.log4j;
+
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.elasticsearch.common.logging.support.AbstractESLogger;
+
+/**
+ *
+ */
+public class Log4jESLogger extends AbstractESLogger {
+
+ private final org.apache.log4j.Logger logger;
+
+ public Log4jESLogger(String prefix, Logger logger) {
+ super(prefix);
+ this.logger = logger;
+ }
+
+ public void setLevel(String level) {
+ if (level == null) {
+ logger.setLevel(null);
+ } else if ("error".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.ERROR);
+ } else if ("warn".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.WARN);
+ } else if ("info".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.INFO);
+ } else if ("debug".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.DEBUG);
+ } else if ("trace".equalsIgnoreCase(level)) {
+ logger.setLevel(Level.TRACE);
+ }
+ }
+
+ @Override
+ public String getLevel() {
+ if (logger.getLevel() == null) {
+ return null;
+ }
+ return logger.getLevel().toString();
+ }
+
+ @Override
+ public String getName() {
+ return logger.getName();
+ }
+
+ @Override
+ public boolean isTraceEnabled() {
+ return logger.isTraceEnabled();
+ }
+
+ @Override
+ public boolean isDebugEnabled() {
+ return logger.isDebugEnabled();
+ }
+
+ @Override
+ public boolean isInfoEnabled() {
+ return logger.isInfoEnabled();
+ }
+
+ @Override
+ public boolean isWarnEnabled() {
+ return logger.isEnabledFor(Level.WARN);
+ }
+
+ @Override
+ public boolean isErrorEnabled() {
+ return logger.isEnabledFor(Level.ERROR);
+ }
+
+ @Override
+ protected void internalTrace(String msg) {
+ logger.trace(msg);
+ }
+
+ @Override
+ protected void internalTrace(String msg, Throwable cause) {
+ logger.trace(msg, cause);
+ }
+
+ @Override
+ protected void internalDebug(String msg) {
+ logger.debug(msg);
+ }
+
+ @Override
+ protected void internalDebug(String msg, Throwable cause) {
+ logger.debug(msg, cause);
+ }
+
+ @Override
+ protected void internalInfo(String msg) {
+ logger.info(msg);
+ }
+
+ @Override
+ protected void internalInfo(String msg, Throwable cause) {
+ logger.info(msg, cause);
+ }
+
+ @Override
+ protected void internalWarn(String msg) {
+ logger.warn(msg);
+ }
+
+ @Override
+ protected void internalWarn(String msg, Throwable cause) {
+ logger.warn(msg, cause);
+ }
+
+ @Override
+ protected void internalError(String msg) {
+ logger.error(msg);
+ }
+
+ @Override
+ protected void internalError(String msg, Throwable cause) {
+ logger.error(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerFactory.java b/src/main/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerFactory.java
new file mode 100644
index 0000000..1bfb2d8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerFactory.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.log4j;
+
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+
+/**
+ *
+ */
+public class Log4jESLoggerFactory extends ESLoggerFactory {
+
+ @Override
+ protected ESLogger rootLogger() {
+ return new Log4jESLogger(null, Logger.getRootLogger());
+ }
+
+ @Override
+ protected ESLogger newInstance(String prefix, String name) {
+ final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(name);
+ return new Log4jESLogger(prefix, logger);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java b/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java
new file mode 100644
index 0000000..766494c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.log4j;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.log4j.PropertyConfigurator;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.FailedToResolveConfigException;
+
+import java.util.Map;
+import java.util.Properties;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ *
+ */
+public class LogConfigurator {
+
+ private static boolean loaded;
+
+ private static ImmutableMap<String, String> replacements = new MapBuilder<String, String>()
+ .put("console", "org.elasticsearch.common.logging.log4j.ConsoleAppender")
+ .put("async", "org.apache.log4j.AsyncAppender")
+ .put("dailyRollingFile", "org.apache.log4j.DailyRollingFileAppender")
+ .put("externallyRolledFile", "org.apache.log4j.ExternallyRolledFileAppender")
+ .put("file", "org.apache.log4j.FileAppender")
+ .put("jdbc", "org.apache.log4j.jdbc.JDBCAppender")
+ .put("jms", "org.apache.log4j.net.JMSAppender")
+ .put("lf5", "org.apache.log4j.lf5.LF5Appender")
+ .put("ntevent", "org.apache.log4j.nt.NTEventLogAppender")
+ .put("null", "org.apache.log4j.NullAppender")
+ .put("rollingFile", "org.apache.log4j.RollingFileAppender")
+ .put("smtp", "org.apache.log4j.net.SMTPAppender")
+ .put("socket", "org.apache.log4j.net.SocketAppender")
+ .put("socketHub", "org.apache.log4j.net.SocketHubAppender")
+ .put("syslog", "org.apache.log4j.net.SyslogAppender")
+ .put("telnet", "org.apache.log4j.net.TelnetAppender")
+ // layouts
+ .put("simple", "org.apache.log4j.SimpleLayout")
+ .put("html", "org.apache.log4j.HTMLLayout")
+ .put("pattern", "org.apache.log4j.PatternLayout")
+ .put("consolePattern", "org.apache.log4j.PatternLayout")
+ .put("ttcc", "org.apache.log4j.TTCCLayout")
+ .put("xml", "org.apache.log4j.XMLLayout")
+ .immutableMap();
+
+ public static void configure(Settings settings) {
+ if (loaded) {
+ return;
+ }
+ loaded = true;
+ Environment environment = new Environment(settings);
+ ImmutableSettings.Builder settingsBuilder = settingsBuilder().put(settings);
+ try {
+ settingsBuilder.loadFromUrl(environment.resolveConfig("logging.yml"));
+ } catch (FailedToResolveConfigException e) {
+ // ignore
+ } catch (NoClassDefFoundError e) {
+ // ignore, no yaml
+ }
+ try {
+ settingsBuilder.loadFromUrl(environment.resolveConfig("logging.json"));
+ } catch (FailedToResolveConfigException e) {
+ // ignore
+ }
+ try {
+ settingsBuilder.loadFromUrl(environment.resolveConfig("logging.properties"));
+ } catch (FailedToResolveConfigException e) {
+ // ignore
+ }
+ settingsBuilder
+ .putProperties("elasticsearch.", System.getProperties())
+ .putProperties("es.", System.getProperties())
+ .replacePropertyPlaceholders();
+ Properties props = new Properties();
+ for (Map.Entry<String, String> entry : settingsBuilder.build().getAsMap().entrySet()) {
+ String key = "log4j." + entry.getKey();
+ String value = entry.getValue();
+ if (replacements.containsKey(value)) {
+ value = replacements.get(value);
+ }
+ if (key.endsWith(".value")) {
+ props.setProperty(key.substring(0, key.length() - ".value".length()), value);
+ } else if (key.endsWith(".type")) {
+ props.setProperty(key.substring(0, key.length() - ".type".length()), value);
+ } else {
+ props.setProperty(key, value);
+ }
+ }
+ PropertyConfigurator.configure(props);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/slf4j/Slf4jESLogger.java b/src/main/java/org/elasticsearch/common/logging/slf4j/Slf4jESLogger.java
new file mode 100644
index 0000000..35f20f2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/slf4j/Slf4jESLogger.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.slf4j;
+
+import org.elasticsearch.common.logging.support.AbstractESLogger;
+import org.slf4j.Logger;
+
+/**
+ *
+ */
+public class Slf4jESLogger extends AbstractESLogger {
+
+ private final Logger logger;
+
+ public Slf4jESLogger(String prefix, Logger logger) {
+ super(prefix);
+ this.logger = logger;
+ }
+
+ @Override
+ public void setLevel(String level) {
+ // can't set it in slf4j...
+ }
+
+ @Override
+ public String getLevel() {
+ // can't get it in slf4j...
+ return null;
+ }
+
+ @Override
+ public String getName() {
+ return logger.getName();
+ }
+
+ @Override
+ public boolean isTraceEnabled() {
+ return logger.isTraceEnabled();
+ }
+
+ @Override
+ public boolean isDebugEnabled() {
+ return logger.isDebugEnabled();
+ }
+
+ @Override
+ public boolean isInfoEnabled() {
+ return logger.isInfoEnabled();
+ }
+
+ @Override
+ public boolean isWarnEnabled() {
+ return logger.isWarnEnabled();
+ }
+
+ @Override
+ public boolean isErrorEnabled() {
+ return logger.isErrorEnabled();
+ }
+
+ @Override
+ protected void internalTrace(String msg) {
+ logger.trace(msg);
+ }
+
+ @Override
+ protected void internalTrace(String msg, Throwable cause) {
+ logger.trace(msg, cause);
+ }
+
+ @Override
+ protected void internalDebug(String msg) {
+ logger.debug(msg);
+ }
+
+ @Override
+ protected void internalDebug(String msg, Throwable cause) {
+ logger.debug(msg, cause);
+ }
+
+ @Override
+ protected void internalInfo(String msg) {
+ logger.info(msg);
+ }
+
+ @Override
+ protected void internalInfo(String msg, Throwable cause) {
+ logger.info(msg, cause);
+ }
+
+ @Override
+ protected void internalWarn(String msg) {
+ logger.warn(msg);
+ }
+
+ @Override
+ protected void internalWarn(String msg, Throwable cause) {
+ logger.warn(msg, cause);
+ }
+
+ @Override
+ protected void internalError(String msg) {
+ logger.error(msg);
+ }
+
+ @Override
+ protected void internalError(String msg, Throwable cause) {
+ logger.error(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/slf4j/Slf4jESLoggerFactory.java b/src/main/java/org/elasticsearch/common/logging/slf4j/Slf4jESLoggerFactory.java
new file mode 100644
index 0000000..1fa1386
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/slf4j/Slf4jESLoggerFactory.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.slf4j;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ */
+public class Slf4jESLoggerFactory extends ESLoggerFactory {
+
+ @Override
+ protected ESLogger rootLogger() {
+ return getLogger(Logger.ROOT_LOGGER_NAME);
+ }
+
+ @Override
+ protected ESLogger newInstance(String prefix, String name) {
+ return new Slf4jESLogger(prefix, LoggerFactory.getLogger(name));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/support/AbstractESLogger.java b/src/main/java/org/elasticsearch/common/logging/support/AbstractESLogger.java
new file mode 100644
index 0000000..441e241
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/support/AbstractESLogger.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.support;
+
+import org.elasticsearch.common.logging.ESLogger;
+
+/**
+ *
+ */
+public abstract class AbstractESLogger implements ESLogger {
+
+ private final String prefix;
+
+ protected AbstractESLogger(String prefix) {
+ this.prefix = prefix;
+ }
+
+ @Override
+ public String getPrefix() {
+ return this.prefix;
+ }
+
+ @Override
+ public void trace(String msg, Object... params) {
+ if (isTraceEnabled()) {
+ internalTrace(LoggerMessageFormat.format(prefix, msg, params));
+ }
+ }
+
+ protected abstract void internalTrace(String msg);
+
+ @Override
+ public void trace(String msg, Throwable cause, Object... params) {
+ if (isTraceEnabled()) {
+ internalTrace(LoggerMessageFormat.format(prefix, msg, params), cause);
+ }
+ }
+
+ protected abstract void internalTrace(String msg, Throwable cause);
+
+
+ @Override
+ public void debug(String msg, Object... params) {
+ if (isDebugEnabled()) {
+ internalDebug(LoggerMessageFormat.format(prefix, msg, params));
+ }
+ }
+
+ protected abstract void internalDebug(String msg);
+
+ @Override
+ public void debug(String msg, Throwable cause, Object... params) {
+ if (isDebugEnabled()) {
+ internalDebug(LoggerMessageFormat.format(prefix, msg, params), cause);
+ }
+ }
+
+ protected abstract void internalDebug(String msg, Throwable cause);
+
+
+ @Override
+ public void info(String msg, Object... params) {
+ if (isInfoEnabled()) {
+ internalInfo(LoggerMessageFormat.format(prefix, msg, params));
+ }
+ }
+
+ protected abstract void internalInfo(String msg);
+
+ @Override
+ public void info(String msg, Throwable cause, Object... params) {
+ if (isInfoEnabled()) {
+ internalInfo(LoggerMessageFormat.format(prefix, msg, params), cause);
+ }
+ }
+
+ protected abstract void internalInfo(String msg, Throwable cause);
+
+
+ @Override
+ public void warn(String msg, Object... params) {
+ if (isWarnEnabled()) {
+ internalWarn(LoggerMessageFormat.format(prefix, msg, params));
+ }
+ }
+
+ protected abstract void internalWarn(String msg);
+
+ @Override
+ public void warn(String msg, Throwable cause, Object... params) {
+ if (isWarnEnabled()) {
+ internalWarn(LoggerMessageFormat.format(prefix, msg, params), cause);
+ }
+ }
+
+ protected abstract void internalWarn(String msg, Throwable cause);
+
+
+ @Override
+ public void error(String msg, Object... params) {
+ if (isErrorEnabled()) {
+ internalError(LoggerMessageFormat.format(prefix, msg, params));
+ }
+ }
+
+ protected abstract void internalError(String msg);
+
+ @Override
+ public void error(String msg, Throwable cause, Object... params) {
+ if (isErrorEnabled()) {
+ internalError(LoggerMessageFormat.format(prefix, msg, params), cause);
+ }
+ }
+
+ protected abstract void internalError(String msg, Throwable cause);
+}
diff --git a/src/main/java/org/elasticsearch/common/logging/support/LoggerMessageFormat.java b/src/main/java/org/elasticsearch/common/logging/support/LoggerMessageFormat.java
new file mode 100644
index 0000000..510f410
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/logging/support/LoggerMessageFormat.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.support;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ *
+ */
+public class LoggerMessageFormat {
+
+ static final char DELIM_START = '{';
+ static final char DELIM_STOP = '}';
+ static final String DELIM_STR = "{}";
+ private static final char ESCAPE_CHAR = '\\';
+
+ public static String format(final String messagePattern, final Object... argArray) {
+ return format(null, messagePattern, argArray);
+ }
+
+ public static String format(final String prefix, final String messagePattern, final Object... argArray) {
+ if (messagePattern == null) {
+ return null;
+ }
+ if (argArray == null) {
+ if (prefix == null) {
+ return messagePattern;
+ } else {
+ return prefix + messagePattern;
+ }
+ }
+ int i = 0;
+ int j;
+ final StringBuilder sbuf = new StringBuilder(messagePattern.length() + 50);
+ if (prefix != null) {
+ sbuf.append(prefix);
+ }
+
+ for (int L = 0; L < argArray.length; L++) {
+
+ j = messagePattern.indexOf(DELIM_STR, i);
+
+ if (j == -1) {
+ // no more variables
+ if (i == 0) { // this is a simple string
+ return messagePattern;
+ } else { // add the tail string which contains no variables and return
+ // the result.
+ sbuf.append(messagePattern.substring(i, messagePattern.length()));
+ return sbuf.toString();
+ }
+ } else {
+ if (isEscapedDelimeter(messagePattern, j)) {
+ if (!isDoubleEscaped(messagePattern, j)) {
+ L--; // DELIM_START was escaped, thus should not be incremented
+ sbuf.append(messagePattern.substring(i, j - 1));
+ sbuf.append(DELIM_START);
+ i = j + 1;
+ } else {
+ // The escape character preceding the delimiter start is
+ // itself escaped: "abc x:\\{}"
+ // we have to consume one backward slash
+ sbuf.append(messagePattern.substring(i, j - 1));
+ deeplyAppendParameter(sbuf, argArray[L], new HashMap());
+ i = j + 2;
+ }
+ } else {
+ // normal case
+ sbuf.append(messagePattern.substring(i, j));
+ deeplyAppendParameter(sbuf, argArray[L], new HashMap());
+ i = j + 2;
+ }
+ }
+ }
+ // append the characters following the last {} pair.
+ sbuf.append(messagePattern.substring(i, messagePattern.length()));
+ return sbuf.toString();
+ }
+
+ static boolean isEscapedDelimeter(String messagePattern,
+ int delimeterStartIndex) {
+
+ if (delimeterStartIndex == 0) {
+ return false;
+ }
+ char potentialEscape = messagePattern.charAt(delimeterStartIndex - 1);
+ if (potentialEscape == ESCAPE_CHAR) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ static boolean isDoubleEscaped(String messagePattern, int delimeterStartIndex) {
+ if (delimeterStartIndex >= 2 && messagePattern.charAt(delimeterStartIndex - 2) == ESCAPE_CHAR) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private static void deeplyAppendParameter(StringBuilder sbuf, Object o, Map seenMap) {
+ if (o == null) {
+ sbuf.append("null");
+ return;
+ }
+ if (!o.getClass().isArray()) {
+ safeObjectAppend(sbuf, o);
+ } else {
+ // check for primitive array types because they
+ // unfortunately cannot be cast to Object[]
+ if (o instanceof boolean[]) {
+ booleanArrayAppend(sbuf, (boolean[]) o);
+ } else if (o instanceof byte[]) {
+ byteArrayAppend(sbuf, (byte[]) o);
+ } else if (o instanceof char[]) {
+ charArrayAppend(sbuf, (char[]) o);
+ } else if (o instanceof short[]) {
+ shortArrayAppend(sbuf, (short[]) o);
+ } else if (o instanceof int[]) {
+ intArrayAppend(sbuf, (int[]) o);
+ } else if (o instanceof long[]) {
+ longArrayAppend(sbuf, (long[]) o);
+ } else if (o instanceof float[]) {
+ floatArrayAppend(sbuf, (float[]) o);
+ } else if (o instanceof double[]) {
+ doubleArrayAppend(sbuf, (double[]) o);
+ } else {
+ objectArrayAppend(sbuf, (Object[]) o, seenMap);
+ }
+ }
+ }
+
+ private static void safeObjectAppend(StringBuilder sbuf, Object o) {
+ try {
+ String oAsString = o.toString();
+ sbuf.append(oAsString);
+ } catch (Throwable t) {
+ sbuf.append("[FAILED toString()]");
+ }
+
+ }
+
+ private static void objectArrayAppend(StringBuilder sbuf, Object[] a, Map seenMap) {
+ sbuf.append('[');
+ if (!seenMap.containsKey(a)) {
+ seenMap.put(a, null);
+ final int len = a.length;
+ for (int i = 0; i < len; i++) {
+ deeplyAppendParameter(sbuf, a[i], seenMap);
+ if (i != len - 1)
+ sbuf.append(", ");
+ }
+ // allow repeats in siblings
+ seenMap.remove(a);
+ } else {
+ sbuf.append("...");
+ }
+ sbuf.append(']');
+ }
+
+ private static void booleanArrayAppend(StringBuilder sbuf, boolean[] a) {
+ sbuf.append('[');
+ final int len = a.length;
+ for (int i = 0; i < len; i++) {
+ sbuf.append(a[i]);
+ if (i != len - 1)
+ sbuf.append(", ");
+ }
+ sbuf.append(']');
+ }
+
+ private static void byteArrayAppend(StringBuilder sbuf, byte[] a) {
+ sbuf.append('[');
+ final int len = a.length;
+ for (int i = 0; i < len; i++) {
+ sbuf.append(a[i]);
+ if (i != len - 1)
+ sbuf.append(", ");
+ }
+ sbuf.append(']');
+ }
+
+ private static void charArrayAppend(StringBuilder sbuf, char[] a) {
+ sbuf.append('[');
+ final int len = a.length;
+ for (int i = 0; i < len; i++) {
+ sbuf.append(a[i]);
+ if (i != len - 1)
+ sbuf.append(", ");
+ }
+ sbuf.append(']');
+ }
+
+ private static void shortArrayAppend(StringBuilder sbuf, short[] a) {
+ sbuf.append('[');
+ final int len = a.length;
+ for (int i = 0; i < len; i++) {
+ sbuf.append(a[i]);
+ if (i != len - 1)
+ sbuf.append(", ");
+ }
+ sbuf.append(']');
+ }
+
+ private static void intArrayAppend(StringBuilder sbuf, int[] a) {
+ sbuf.append('[');
+ final int len = a.length;
+ for (int i = 0; i < len; i++) {
+ sbuf.append(a[i]);
+ if (i != len - 1)
+ sbuf.append(", ");
+ }
+ sbuf.append(']');
+ }
+
+ private static void longArrayAppend(StringBuilder sbuf, long[] a) {
+ sbuf.append('[');
+ final int len = a.length;
+ for (int i = 0; i < len; i++) {
+ sbuf.append(a[i]);
+ if (i != len - 1)
+ sbuf.append(", ");
+ }
+ sbuf.append(']');
+ }
+
+ private static void floatArrayAppend(StringBuilder sbuf, float[] a) {
+ sbuf.append('[');
+ final int len = a.length;
+ for (int i = 0; i < len; i++) {
+ sbuf.append(a[i]);
+ if (i != len - 1)
+ sbuf.append(", ");
+ }
+ sbuf.append(']');
+ }
+
+ private static void doubleArrayAppend(StringBuilder sbuf, double[] a) {
+ sbuf.append('[');
+ final int len = a.length;
+ for (int i = 0; i < len; i++) {
+ sbuf.append(a[i]);
+ if (i != len - 1)
+ sbuf.append(", ");
+ }
+ sbuf.append(']');
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java b/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java
new file mode 100644
index 0000000..293c2ed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.util.BytesRef;
+
+/**
+ */
+public class BytesRefs {
+
+ /**
+ * Converts a value to a string, taking special care if its a {@link BytesRef} to call
+ * {@link org.apache.lucene.util.BytesRef#utf8ToString()}.
+ */
+ public static String toString(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof BytesRef) {
+ return ((BytesRef) value).utf8ToString();
+ }
+ return value.toString();
+ }
+
+ /**
+ * Converts an object value to BytesRef.
+ */
+ public static BytesRef toBytesRef(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof BytesRef) {
+ return (BytesRef) value;
+ }
+ return new BytesRef(value.toString());
+ }
+
+ public static BytesRef toBytesRef(Object value, BytesRef spare) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof BytesRef) {
+ return (BytesRef) value;
+ }
+ spare.copyChars(value.toString());
+ return spare;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/Directories.java b/src/main/java/org/elasticsearch/common/lucene/Directories.java
new file mode 100644
index 0000000..6318356
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/Directories.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.store.Directory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+/**
+ * A set of utilities for Lucene {@link Directory}.
+ *
+ *
+ */
+public class Directories {
+
+ /**
+ * Returns the estimated size of a {@link Directory}.
+ */
+ public static long estimateSize(Directory directory) throws IOException {
+ long estimatedSize = 0;
+ String[] files = directory.listAll();
+ for (String file : files) {
+ try {
+ estimatedSize += directory.fileLength(file);
+ } catch (FileNotFoundException e) {
+ // ignore, the file is not there no more
+ }
+ }
+ return estimatedSize;
+ }
+
+ private Directories() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/HashedBytesRef.java b/src/main/java/org/elasticsearch/common/lucene/HashedBytesRef.java
new file mode 100644
index 0000000..9e602d3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/HashedBytesRef.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * A wrapped to {@link BytesRef} that also caches the hashCode for it.
+ */
+public class HashedBytesRef {
+
+ public BytesRef bytes;
+ public int hash;
+
+ public HashedBytesRef() {
+ }
+
+ public HashedBytesRef(String bytes) {
+ this(new BytesRef(bytes));
+ }
+
+ public HashedBytesRef(BytesRef bytes) {
+ this(bytes, bytes.hashCode());
+ }
+
+ public HashedBytesRef(BytesRef bytes, int hash) {
+ this.bytes = bytes;
+ this.hash = hash;
+ }
+
+ public HashedBytesRef resetHashCode() {
+ this.hash = bytes.hashCode();
+ return this;
+ }
+
+ public HashedBytesRef reset(BytesRef bytes, int hash) {
+ this.bytes = bytes;
+ this.hash = hash;
+ return this;
+ }
+
+ @Override
+ public int hashCode() {
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof HashedBytesRef) {
+ return bytes.equals(((HashedBytesRef) other).bytes);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return bytes.toString();
+ }
+
+ public HashedBytesRef deepCopy() {
+ return deepCopyOf(this);
+ }
+
+ public static HashedBytesRef deepCopyOf(HashedBytesRef other) {
+ BytesRef copy = new BytesRef();
+ copy.copyBytes(other.bytes);
+ return new HashedBytesRef(copy, other.hash);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/IndexCommitDelegate.java b/src/main/java/org/elasticsearch/common/lucene/IndexCommitDelegate.java
new file mode 100644
index 0000000..7831a75
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/IndexCommitDelegate.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.store.Directory;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * A simple delegate that delegates all {@link IndexCommit} calls to a delegated
+ * {@link IndexCommit}.
+ *
+ *
+ */
+public abstract class IndexCommitDelegate extends IndexCommit {
+
+ protected final IndexCommit delegate;
+
+ /**
+ * Constructs a new {@link IndexCommit} that will delegate all calls
+ * to the provided delegate.
+ *
+ * @param delegate The delegate
+ */
+ public IndexCommitDelegate(IndexCommit delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public String getSegmentsFileName() {
+ return delegate.getSegmentsFileName();
+ }
+
+ @Override
+ public Collection<String> getFileNames() throws IOException {
+ return delegate.getFileNames();
+ }
+
+ @Override
+ public Directory getDirectory() {
+ return delegate.getDirectory();
+ }
+
+ @Override
+ public void delete() {
+ delegate.delete();
+ }
+
+ @Override
+ public boolean isDeleted() {
+ return delegate.isDeleted();
+ }
+
+ @Override
+ public int getSegmentCount() {
+ return delegate.getSegmentCount();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return delegate.equals(other);
+ }
+
+ @Override
+ public int hashCode() {
+ return delegate.hashCode();
+ }
+
+ @Override
+ public long getGeneration() {
+ return delegate.getGeneration();
+ }
+
+ @Override
+ public Map<String, String> getUserData() throws IOException {
+ return delegate.getUserData();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java b/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java
new file mode 100644
index 0000000..0952911
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/LoggerInfoStream.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import com.google.common.base.Charsets;
+
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.io.UnsupportedEncodingException;
+
+/**
+ * A {@link java.io.PrintStream} that logs each {@link #println(String)} into a logger
+ * under trace level.
+ * <p/>
+ * <p>Provides also factory methods that basically append to the logger name provide the
+ * {@link #SUFFIX}.
+ *
+ *
+ */
+public class LoggerInfoStream extends PrintStream {
+
+ public static final String SUFFIX = ".lucene";
+
+ /**
+ * Creates a new {@link LoggerInfoStream} based on the provided logger
+ * by appending to its <tt>NAME</tt> the {@link #SUFFIX}.
+ */
+ public static LoggerInfoStream getInfoStream(ESLogger logger) {
+ try {
+ return new LoggerInfoStream(Loggers.getLogger(logger, SUFFIX));
+ } catch (UnsupportedEncodingException e) {
+ // no UTF-8 ?
+ throw new RuntimeException(e);
+ }
+ }
+
+ /**
+ * Creates a new {@link LoggerInfoStream} based on the provided name
+ * by appending to it the {@link #SUFFIX}.
+ */
+ public static LoggerInfoStream getInfoStream(String name) {
+ try {
+ return new LoggerInfoStream(Loggers.getLogger(name + SUFFIX));
+ } catch (UnsupportedEncodingException e) {
+ // no UTF-8 ?
+ throw new RuntimeException(e);
+ }
+ }
+
+ private final ESLogger logger;
+
+ /**
+ * Constucts a new instance based on the provided logger. Will output
+ * each {@link #println(String)} operation as a trace level.
+ * @throws UnsupportedEncodingException
+ */
+ public LoggerInfoStream(ESLogger logger) throws UnsupportedEncodingException {
+ super((OutputStream) null, false, Charsets.UTF_8.name());
+ this.logger = logger;
+ }
+
+ /**
+ * Override only the method Lucene actually uses.
+ */
+ @Override
+ public void println(String x) {
+ logger.trace(x);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java
new file mode 100644
index 0000000..b7028b1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java
@@ -0,0 +1,376 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.SegmentInfos;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.index.analysis.AnalyzerScope;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class Lucene {
+
+ public static final Version VERSION = Version.LUCENE_46;
+ public static final Version ANALYZER_VERSION = VERSION;
+ public static final Version QUERYPARSER_VERSION = VERSION;
+
+ public static final NamedAnalyzer STANDARD_ANALYZER = new NamedAnalyzer("_standard", AnalyzerScope.GLOBAL, new StandardAnalyzer(ANALYZER_VERSION));
+ public static final NamedAnalyzer KEYWORD_ANALYZER = new NamedAnalyzer("_keyword", AnalyzerScope.GLOBAL, new KeywordAnalyzer());
+
+ public static final int NO_DOC = -1;
+
+ public static ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0];
+
+ @SuppressWarnings("deprecation")
+ public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) {
+ if (version == null) {
+ return defaultVersion;
+ }
+ if ("4.6".equals(version)) {
+ return VERSION.LUCENE_46;
+ }
+ if ("4.5".equals(version)) {
+ return VERSION.LUCENE_45;
+ }
+ if ("4.4".equals(version)) {
+ return VERSION.LUCENE_44;
+ }
+ if ("4.3".equals(version)) {
+ return Version.LUCENE_43;
+ }
+ if ("4.2".equals(version)) {
+ return Version.LUCENE_42;
+ }
+ if ("4.1".equals(version)) {
+ return Version.LUCENE_41;
+ }
+ if ("4.0".equals(version)) {
+ return Version.LUCENE_40;
+ }
+ if ("3.6".equals(version)) {
+ return Version.LUCENE_36;
+ }
+ if ("3.5".equals(version)) {
+ return Version.LUCENE_35;
+ }
+ if ("3.4".equals(version)) {
+ return Version.LUCENE_34;
+ }
+ if ("3.3".equals(version)) {
+ return Version.LUCENE_33;
+ }
+ if ("3.2".equals(version)) {
+ return Version.LUCENE_32;
+ }
+ if ("3.1".equals(version)) {
+ return Version.LUCENE_31;
+ }
+ if ("3.0".equals(version)) {
+ return Version.LUCENE_30;
+ }
+ logger.warn("no version match {}, default to {}", version, defaultVersion);
+ return defaultVersion;
+ }
+
+ /**
+ * Reads the segments infos, failing if it fails to load
+ */
+ public static SegmentInfos readSegmentInfos(Directory directory) throws IOException {
+ final SegmentInfos sis = new SegmentInfos();
+ sis.read(directory);
+ return sis;
+ }
+
+ public static long count(IndexSearcher searcher, Query query) throws IOException {
+ TotalHitCountCollector countCollector = new TotalHitCountCollector();
+ // we don't need scores, so wrap it in a constant score query
+ if (!(query instanceof ConstantScoreQuery)) {
+ query = new ConstantScoreQuery(query);
+ }
+ searcher.search(query, countCollector);
+ return countCollector.getTotalHits();
+ }
+
+ /**
+ * Closes the index writer, returning <tt>false</tt> if it failed to close.
+ */
+ public static boolean safeClose(IndexWriter writer) {
+ if (writer == null) {
+ return true;
+ }
+ try {
+ writer.close();
+ return true;
+ } catch (Throwable e) {
+ return false;
+ }
+ }
+
+ public static TopDocs readTopDocs(StreamInput in) throws IOException {
+ if (!in.readBoolean()) {
+ // no docs
+ return null;
+ }
+ if (in.readBoolean()) {
+ int totalHits = in.readVInt();
+ float maxScore = in.readFloat();
+
+ SortField[] fields = new SortField[in.readVInt()];
+ for (int i = 0; i < fields.length; i++) {
+ String field = null;
+ if (in.readBoolean()) {
+ field = in.readString();
+ }
+ fields[i] = new SortField(field, readSortType(in), in.readBoolean());
+ }
+
+ FieldDoc[] fieldDocs = new FieldDoc[in.readVInt()];
+ for (int i = 0; i < fieldDocs.length; i++) {
+ Comparable[] cFields = new Comparable[in.readVInt()];
+ for (int j = 0; j < cFields.length; j++) {
+ byte type = in.readByte();
+ if (type == 0) {
+ cFields[j] = null;
+ } else if (type == 1) {
+ cFields[j] = in.readString();
+ } else if (type == 2) {
+ cFields[j] = in.readInt();
+ } else if (type == 3) {
+ cFields[j] = in.readLong();
+ } else if (type == 4) {
+ cFields[j] = in.readFloat();
+ } else if (type == 5) {
+ cFields[j] = in.readDouble();
+ } else if (type == 6) {
+ cFields[j] = in.readByte();
+ } else if (type == 7) {
+ cFields[j] = in.readShort();
+ } else if (type == 8) {
+ cFields[j] = in.readBoolean();
+ } else if (type == 9) {
+ cFields[j] = in.readBytesRef();
+ } else {
+ throw new IOException("Can't match type [" + type + "]");
+ }
+ }
+ fieldDocs[i] = new FieldDoc(in.readVInt(), in.readFloat(), cFields);
+ }
+ return new TopFieldDocs(totalHits, fieldDocs, fields, maxScore);
+ } else {
+ int totalHits = in.readVInt();
+ float maxScore = in.readFloat();
+
+ ScoreDoc[] scoreDocs = new ScoreDoc[in.readVInt()];
+ for (int i = 0; i < scoreDocs.length; i++) {
+ scoreDocs[i] = new ScoreDoc(in.readVInt(), in.readFloat());
+ }
+ return new TopDocs(totalHits, scoreDocs, maxScore);
+ }
+ }
+
+ public static void writeTopDocs(StreamOutput out, TopDocs topDocs, int from) throws IOException {
+ if (topDocs.scoreDocs.length - from < 0) {
+ out.writeBoolean(false);
+ return;
+ }
+ out.writeBoolean(true);
+ if (topDocs instanceof TopFieldDocs) {
+ out.writeBoolean(true);
+ TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs;
+
+ out.writeVInt(topDocs.totalHits);
+ out.writeFloat(topDocs.getMaxScore());
+
+ out.writeVInt(topFieldDocs.fields.length);
+ for (SortField sortField : topFieldDocs.fields) {
+ if (sortField.getField() == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(sortField.getField());
+ }
+ if (sortField.getComparatorSource() != null) {
+ writeSortType(out, ((IndexFieldData.XFieldComparatorSource) sortField.getComparatorSource()).reducedType());
+ } else {
+ writeSortType(out, sortField.getType());
+ }
+ out.writeBoolean(sortField.getReverse());
+ }
+
+ out.writeVInt(topDocs.scoreDocs.length - from);
+ int index = 0;
+ for (ScoreDoc doc : topFieldDocs.scoreDocs) {
+ if (index++ < from) {
+ continue;
+ }
+ FieldDoc fieldDoc = (FieldDoc) doc;
+ out.writeVInt(fieldDoc.fields.length);
+ for (Object field : fieldDoc.fields) {
+ if (field == null) {
+ out.writeByte((byte) 0);
+ } else {
+ Class type = field.getClass();
+ if (type == String.class) {
+ out.writeByte((byte) 1);
+ out.writeString((String) field);
+ } else if (type == Integer.class) {
+ out.writeByte((byte) 2);
+ out.writeInt((Integer) field);
+ } else if (type == Long.class) {
+ out.writeByte((byte) 3);
+ out.writeLong((Long) field);
+ } else if (type == Float.class) {
+ out.writeByte((byte) 4);
+ out.writeFloat((Float) field);
+ } else if (type == Double.class) {
+ out.writeByte((byte) 5);
+ out.writeDouble((Double) field);
+ } else if (type == Byte.class) {
+ out.writeByte((byte) 6);
+ out.writeByte((Byte) field);
+ } else if (type == Short.class) {
+ out.writeByte((byte) 7);
+ out.writeShort((Short) field);
+ } else if (type == Boolean.class) {
+ out.writeByte((byte) 8);
+ out.writeBoolean((Boolean) field);
+ } else if (type == BytesRef.class) {
+ out.writeByte((byte) 9);
+ out.writeBytesRef((BytesRef) field);
+ } else {
+ throw new IOException("Can't handle sort field value of type [" + type + "]");
+ }
+ }
+ }
+
+ out.writeVInt(doc.doc);
+ out.writeFloat(doc.score);
+ }
+ } else {
+ out.writeBoolean(false);
+ out.writeVInt(topDocs.totalHits);
+ out.writeFloat(topDocs.getMaxScore());
+
+ out.writeVInt(topDocs.scoreDocs.length - from);
+ int index = 0;
+ for (ScoreDoc doc : topDocs.scoreDocs) {
+ if (index++ < from) {
+ continue;
+ }
+ out.writeVInt(doc.doc);
+ out.writeFloat(doc.score);
+ }
+ }
+ }
+
+ // LUCENE 4 UPGRADE: We might want to maintain our own ordinal, instead of Lucene's ordinal
+ public static SortField.Type readSortType(StreamInput in) throws IOException {
+ return SortField.Type.values()[in.readVInt()];
+ }
+
+ public static void writeSortType(StreamOutput out, SortField.Type sortType) throws IOException {
+ out.writeVInt(sortType.ordinal());
+ }
+
+ public static Explanation readExplanation(StreamInput in) throws IOException {
+ float value = in.readFloat();
+ String description = in.readString();
+ Explanation explanation = new Explanation(value, description);
+ if (in.readBoolean()) {
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ explanation.addDetail(readExplanation(in));
+ }
+ }
+ return explanation;
+ }
+
+ public static void writeExplanation(StreamOutput out, Explanation explanation) throws IOException {
+ out.writeFloat(explanation.getValue());
+ out.writeString(explanation.getDescription());
+ Explanation[] subExplanations = explanation.getDetails();
+ if (subExplanations == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(subExplanations.length);
+ for (Explanation subExp : subExplanations) {
+ writeExplanation(out, subExp);
+ }
+ }
+ }
+
+ public static class ExistsCollector extends Collector {
+
+ private boolean exists;
+
+ public void reset() {
+ exists = false;
+ }
+
+ public boolean exists() {
+ return exists;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ this.exists = false;
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ exists = true;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+ }
+
+ private Lucene() {
+
+ }
+
+ public static final boolean indexExists(final Directory directory) throws IOException {
+ return DirectoryReader.indexExists(directory);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java b/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java
new file mode 100644
index 0000000..91500ce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.ScoreCachingWrappingScorer;
+import org.apache.lucene.search.Scorer;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MinimumScoreCollector extends Collector {
+
+ private final Collector collector;
+
+ private final float minimumScore;
+
+ private Scorer scorer;
+
+ public MinimumScoreCollector(Collector collector, float minimumScore) {
+ this.collector = collector;
+ this.minimumScore = minimumScore;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ if (!(scorer instanceof ScoreCachingWrappingScorer)) {
+ scorer = new ScoreCachingWrappingScorer(scorer);
+ }
+ this.scorer = scorer;
+ collector.setScorer(scorer);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ if (scorer.score() >= minimumScore) {
+ collector.collect(doc);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ collector.setNextReader(context);
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return collector.acceptsDocsOutOfOrder();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/lucene/MultiCollector.java b/src/main/java/org/elasticsearch/common/lucene/MultiCollector.java
new file mode 100644
index 0000000..2dcb8da
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/MultiCollector.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.ScoreCachingWrappingScorer;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.common.lucene.search.XCollector;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MultiCollector extends XCollector {
+
+ private final Collector collector;
+
+ private final Collector[] collectors;
+
+ public MultiCollector(Collector collector, Collector[] collectors) {
+ this.collector = collector;
+ this.collectors = collectors;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ // always wrap it in a scorer wrapper
+ if (!(scorer instanceof ScoreCachingWrappingScorer)) {
+ scorer = new ScoreCachingWrappingScorer(scorer);
+ }
+ collector.setScorer(scorer);
+ for (Collector collector : collectors) {
+ collector.setScorer(scorer);
+ }
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ collector.collect(doc);
+ for (Collector collector : collectors) {
+ collector.collect(doc);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ collector.setNextReader(context);
+ for (Collector collector : collectors) {
+ collector.setNextReader(context);
+ }
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ if (!collector.acceptsDocsOutOfOrder()) {
+ return false;
+ }
+ for (Collector collector : collectors) {
+ if (!collector.acceptsDocsOutOfOrder()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public void postCollection() {
+ if (collector instanceof XCollector) {
+ ((XCollector) collector).postCollection();
+ }
+ for (Collector collector : collectors) {
+ if (collector instanceof XCollector) {
+ ((XCollector) collector).postCollection();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/ReaderContextAware.java b/src/main/java/org/elasticsearch/common/lucene/ReaderContextAware.java
new file mode 100644
index 0000000..8dbb3cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/ReaderContextAware.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.index.AtomicReaderContext;
+
+/**
+ *
+ */
+public interface ReaderContextAware {
+
+ public void setNextReader(AtomicReaderContext reader);
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java b/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java
new file mode 100644
index 0000000..e5708df
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.search.Scorer;
+
+/**
+ *
+ */
+public interface ScorerAware {
+
+ void setScorer(Scorer scorer);
+
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/SegmentReaderUtils.java b/src/main/java/org/elasticsearch/common/lucene/SegmentReaderUtils.java
new file mode 100644
index 0000000..20acf62
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/SegmentReaderUtils.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.FilterAtomicReader;
+import org.apache.lucene.index.SegmentReader;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.Nullable;
+
+import java.lang.reflect.Field;
+
+public class SegmentReaderUtils {
+
+ private static final Field FILTER_ATOMIC_READER_IN;
+
+ static {
+ Field in = null;
+ try { // and another one bites the dust...
+ in = FilterAtomicReader.class.getDeclaredField("in");
+ in.setAccessible(true);
+ } catch (NoSuchFieldException e) {
+ assert false : "Failed to get field: " + e.getMessage();
+ }
+ FILTER_ATOMIC_READER_IN = in;
+
+ }
+
+ /**
+ * Tries to extract a segment reader from the given index reader.
+ * If no SegmentReader can be extracted an {@link org.elasticsearch.ElasticsearchIllegalStateException} is thrown.
+ */
+ @Nullable
+ public static SegmentReader segmentReader(AtomicReader reader) {
+ return internalSegmentReader(reader, true);
+ }
+
+ /**
+ * Tries to extract a segment reader from the given index reader and returns it, otherwise <code>null</code>
+ * is returned
+ */
+ @Nullable
+ public static SegmentReader segmentReaderOrNull(AtomicReader reader) {
+ return internalSegmentReader(reader, false);
+ }
+
+ public static boolean registerCoreListener(AtomicReader reader, SegmentReader.CoreClosedListener listener) {
+ SegmentReader segReader = SegmentReaderUtils.segmentReaderOrNull(reader);
+ if (segReader != null) {
+ segReader.addCoreClosedListener(listener);
+ return true;
+ }
+ return false;
+ }
+
+ private static SegmentReader internalSegmentReader(AtomicReader reader, boolean fail) {
+ if (reader == null) {
+ return null;
+ }
+ if (reader instanceof SegmentReader) {
+ return (SegmentReader) reader;
+ } else if (reader instanceof FilterAtomicReader) {
+ final FilterAtomicReader fReader = (FilterAtomicReader) reader;
+ try {
+ return FILTER_ATOMIC_READER_IN == null ? null :
+ segmentReader((AtomicReader) FILTER_ATOMIC_READER_IN.get(fReader));
+ } catch (IllegalAccessException e) {
+ }
+ }
+ if (fail) {
+ // hard fail - we can't get a SegmentReader
+ throw new ElasticsearchIllegalStateException("Can not extract segment reader from given index reader [" + reader + "]");
+ }
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java b/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java
new file mode 100644
index 0000000..ce2d034
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.all;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.io.FastCharArrayWriter;
+import org.elasticsearch.common.io.FastStringReader;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import static com.google.common.collect.Sets.newHashSet;
+
+/**
+ *
+ */
+public class AllEntries extends Reader {
+
+ public static class Entry {
+ private final String name;
+ private final FastStringReader reader;
+ private final int startOffset;
+ private final float boost;
+
+ public Entry(String name, FastStringReader reader, int startOffset, float boost) {
+ this.name = name;
+ this.reader = reader;
+ this.startOffset = startOffset;
+ this.boost = boost;
+ }
+
+ public int startOffset() {
+ return startOffset;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public float boost() {
+ return this.boost;
+ }
+
+ public FastStringReader reader() {
+ return this.reader;
+ }
+ }
+
+ private final List<Entry> entries = Lists.newArrayList();
+
+ private Entry current;
+
+ private Iterator<Entry> it;
+
+ private boolean itsSeparatorTime = false;
+
+ private boolean customBoost = false;
+
+ public void addText(String name, String text, float boost) {
+ if (boost != 1.0f) {
+ customBoost = true;
+ }
+ final int lastStartOffset;
+ if (entries.isEmpty()) {
+ lastStartOffset = -1;
+ } else {
+ final Entry last = entries.get(entries.size() - 1);
+ lastStartOffset = last.startOffset() + last.reader().length();
+ }
+ final int startOffset = lastStartOffset + 1; // +1 because we insert a space between tokens
+ Entry entry = new Entry(name, new FastStringReader(text), startOffset, boost);
+ entries.add(entry);
+ }
+
+ public boolean customBoost() {
+ return customBoost;
+ }
+
+ public void clear() {
+ this.entries.clear();
+ this.current = null;
+ this.it = null;
+ itsSeparatorTime = false;
+ }
+
+ public void reset() {
+ try {
+ for (Entry entry : entries) {
+ entry.reader().reset();
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalStateException("should not happen");
+ }
+ it = entries.iterator();
+ if (it.hasNext()) {
+ current = it.next();
+ itsSeparatorTime = true;
+ }
+ }
+
+
+ public String buildText() {
+ reset();
+ FastCharArrayWriter writer = new FastCharArrayWriter();
+ for (Entry entry : entries) {
+ writer.append(entry.reader());
+ writer.append(' ');
+ }
+ reset();
+ return writer.toString();
+ }
+
+ public List<Entry> entries() {
+ return this.entries;
+ }
+
+ public Set<String> fields() {
+ Set<String> fields = newHashSet();
+ for (Entry entry : entries) {
+ fields.add(entry.name());
+ }
+ return fields;
+ }
+
+ // compute the boost for a token with the given startOffset
+ public float boost(int startOffset) {
+ if (!entries.isEmpty()) {
+ int lo = 0, hi = entries.size() - 1;
+ while (lo <= hi) {
+ final int mid = (lo + hi) >>> 1;
+ final int midOffset = entries.get(mid).startOffset();
+ if (startOffset < midOffset) {
+ hi = mid - 1;
+ } else {
+ lo = mid + 1;
+ }
+ }
+ final int index = Math.max(0, hi); // protection against broken token streams
+ assert entries.get(index).startOffset() <= startOffset;
+ assert index == entries.size() - 1 || entries.get(index + 1).startOffset() > startOffset;
+ return entries.get(index).boost();
+ }
+ return 1.0f;
+ }
+
+ @Override
+ public int read(char[] cbuf, int off, int len) throws IOException {
+ if (current == null) {
+ return -1;
+ }
+ if (customBoost) {
+ int result = current.reader().read(cbuf, off, len);
+ if (result == -1) {
+ if (itsSeparatorTime) {
+ itsSeparatorTime = false;
+ cbuf[off] = ' ';
+ return 1;
+ }
+ itsSeparatorTime = true;
+ // close(); No need to close, we work on in mem readers
+ if (it.hasNext()) {
+ current = it.next();
+ } else {
+ current = null;
+ }
+ return read(cbuf, off, len);
+ }
+ return result;
+ } else {
+ int read = 0;
+ while (len > 0) {
+ int result = current.reader().read(cbuf, off, len);
+ if (result == -1) {
+ if (it.hasNext()) {
+ current = it.next();
+ } else {
+ current = null;
+ if (read == 0) {
+ return -1;
+ }
+ return read;
+ }
+ cbuf[off++] = ' ';
+ read++;
+ len--;
+ } else {
+ read += result;
+ off += result;
+ len -= result;
+ }
+ }
+ return read;
+ }
+ }
+
+ @Override
+ public void close() {
+ if (current != null) {
+ // no need to close, these are readers on strings
+ current = null;
+ }
+ }
+
+
+ @Override
+ public boolean ready() throws IOException {
+ return (current != null) && current.reader().ready();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for (Entry entry : entries) {
+ sb.append(entry.name()).append(',');
+ }
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/all/AllField.java b/src/main/java/org/elasticsearch/common/lucene/all/AllField.java
new file mode 100644
index 0000000..daf2b67
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/all/AllField.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.all;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.elasticsearch.ElasticsearchException;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class AllField extends Field {
+
+ private final AllEntries allEntries;
+
+ private final Analyzer analyzer;
+
+ public AllField(String name, AllEntries allEntries, Analyzer analyzer, FieldType fieldType) {
+ super(name, fieldType);
+ this.allEntries = allEntries;
+ this.analyzer = analyzer;
+ }
+
+ @Override
+ public String stringValue() {
+ if (fieldType().stored()) {
+ return allEntries.buildText();
+ }
+ return null;
+ }
+
+ @Override
+ public Reader readerValue() {
+ return null;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ try {
+ allEntries.reset(); // reset the all entries, just in case it was read already
+ return AllTokenStream.allTokenStream(name, allEntries, analyzer);
+ } catch (IOException e) {
+ throw new ElasticsearchException("Failed to create token stream");
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java
new file mode 100644
index 0000000..7cc3d5a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.all;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
+import org.apache.lucene.search.spans.SpanScorer;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.search.spans.SpanWeight;
+import org.apache.lucene.search.spans.TermSpans;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
+
+import static org.apache.lucene.analysis.payloads.PayloadHelper.decodeFloat;
+
+/**
+ * A term query that takes all payload boost values into account.
+ *
+ *
+ */
+public class AllTermQuery extends SpanTermQuery {
+
+ public AllTermQuery(Term term) {
+ super(term);
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ return new AllTermWeight(this, searcher);
+ }
+
+ protected class AllTermWeight extends SpanWeight {
+
+ public AllTermWeight(AllTermQuery query, IndexSearcher searcher) throws IOException {
+ super(query, searcher);
+ }
+
+ @Override
+ public AllTermSpanScorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
+ boolean topScorer, Bits acceptDocs) throws IOException {
+ if (this.stats == null) {
+ return null;
+ }
+ SimScorer sloppySimScorer = similarity.simScorer(stats, context);
+ return new AllTermSpanScorer((TermSpans) query.getSpans(context, acceptDocs, termContexts), this, sloppySimScorer);
+ }
+
+ protected class AllTermSpanScorer extends SpanScorer {
+ protected DocsAndPositionsEnum positions;
+ protected float payloadScore;
+ protected int payloadsSeen;
+
+ public AllTermSpanScorer(TermSpans spans, Weight weight, Similarity.SimScorer docScorer) throws IOException {
+ super(spans, weight, docScorer);
+ positions = spans.getPostings();
+ }
+
+ @Override
+ protected boolean setFreqCurrentDoc() throws IOException {
+ if (!more) {
+ return false;
+ }
+ doc = spans.doc();
+ freq = 0.0f;
+ numMatches = 0;
+ payloadScore = 0;
+ payloadsSeen = 0;
+ do {
+ int matchLength = spans.end() - spans.start();
+
+ freq += docScorer.computeSlopFactor(matchLength);
+ numMatches++;
+ processPayload();
+
+ more = spans.next();// this moves positions to the next match
+ } while (more && (doc == spans.doc()));
+ return true;
+ }
+
+ protected void processPayload() throws IOException {
+ final BytesRef payload;
+ if ((payload = positions.getPayload()) != null) {
+ payloadScore += decodeFloat(payload.bytes, payload.offset);
+ payloadsSeen++;
+
+ } else {
+ // zero out the payload?
+ }
+ }
+
+ /**
+ * @return {@link #getSpanScore()} * {@link #getPayloadScore()}
+ * @throws IOException
+ */
+ @Override
+ public float score() throws IOException {
+ return getSpanScore() * getPayloadScore();
+ }
+
+ /**
+ * Returns the SpanScorer score only.
+ * <p/>
+ * Should not be overridden without good cause!
+ *
+ * @return the score for just the Span part w/o the payload
+ * @throws IOException
+ * @see #score()
+ */
+ protected float getSpanScore() throws IOException {
+ return super.score();
+ }
+
+ /**
+ * The score for the payload
+ */
+ protected float getPayloadScore() {
+ return payloadsSeen > 0 ? (payloadScore / payloadsSeen) : 1;
+ }
+
+ }
+
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException{
+ AllTermSpanScorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
+ if (scorer != null) {
+ int newDoc = scorer.advance(doc);
+ if (newDoc == doc) {
+ float freq = scorer.sloppyFreq();
+ SimScorer docScorer = similarity.simScorer(stats, context);
+ ComplexExplanation inner = new ComplexExplanation();
+ inner.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
+ Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+ inner.addDetail(scoreExplanation);
+ inner.setValue(scoreExplanation.getValue());
+ inner.setMatch(true);
+ ComplexExplanation result = new ComplexExplanation();
+ result.addDetail(inner);
+ Explanation payloadBoost = new Explanation();
+ result.addDetail(payloadBoost);
+ final float payloadScore = scorer.getPayloadScore();
+ payloadBoost.setValue(payloadScore);
+ // GSI: I suppose we could toString the payload, but I don't think that
+ // would be a good idea
+ payloadBoost.setDescription("allPayload(...)");
+ result.setValue(inner.getValue() * payloadScore);
+ result.setDescription("btq, product of:");
+ return result;
+ }
+ }
+
+ return new ComplexExplanation(false, 0.0f, "no matching term");
+
+
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode() + 1;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (!super.equals(obj))
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ return true;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java b/src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java
new file mode 100644
index 0000000..076cbac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.all;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
+
+import static org.apache.lucene.analysis.payloads.PayloadHelper.encodeFloat;
+
+/**
+ *
+ */
+public final class AllTokenStream extends TokenFilter {
+
+ public static TokenStream allTokenStream(String allFieldName, AllEntries allEntries, Analyzer analyzer) throws IOException {
+ return new AllTokenStream(analyzer.tokenStream(allFieldName, allEntries), allEntries);
+ }
+
+ private final BytesRef payloadSpare = new BytesRef(new byte[4]);
+
+ private final AllEntries allEntries;
+
+ private final OffsetAttribute offsetAttribute;
+ private final PayloadAttribute payloadAttribute;
+
+ AllTokenStream(TokenStream input, AllEntries allEntries) {
+ super(input);
+ this.allEntries = allEntries;
+ offsetAttribute = addAttribute(OffsetAttribute.class);
+ payloadAttribute = addAttribute(PayloadAttribute.class);
+ }
+
+ public AllEntries allEntries() {
+ return allEntries;
+ }
+
+ @Override
+ public final boolean incrementToken() throws IOException {
+ if (!input.incrementToken()) {
+ return false;
+ }
+ final float boost = allEntries.boost(offsetAttribute.startOffset());
+ if (boost != 1.0f) {
+ encodeFloat(boost, payloadSpare.bytes, payloadSpare.offset);
+ payloadAttribute.setPayload(payloadSpare);
+ } else {
+ payloadAttribute.setPayload(null);
+ }
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return allEntries.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/AllDocIdSet.java b/src/main/java/org/elasticsearch/common/lucene/docset/AllDocIdSet.java
new file mode 100644
index 0000000..7dad875
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/docset/AllDocIdSet.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.docset;
+
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+
+/**
+ * A {@link DocIdSet} that matches all docs up to a {@code maxDoc}.
+ */
+public class AllDocIdSet extends DocIdSet {
+
+ private final int maxDoc;
+
+ public AllDocIdSet(int maxDoc) {
+ this.maxDoc = maxDoc;
+ }
+
+ /**
+ * Does not go to the reader and ask for data, so can be cached.
+ */
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ return new Iterator(maxDoc);
+ }
+
+ @Override
+ public Bits bits() throws IOException {
+ return new Bits.MatchAllBits(maxDoc);
+ }
+
+ public static final class Iterator extends DocIdSetIterator {
+
+ private final int maxDoc;
+ private int doc = -1;
+
+ public Iterator(int maxDoc) {
+ this.maxDoc = maxDoc;
+ }
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ if (++doc < maxDoc) {
+ return doc;
+ }
+ return doc = NO_MORE_DOCS;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ doc = target;
+ if (doc < maxDoc) {
+ return doc;
+ }
+ return doc = NO_MORE_DOCS;
+ }
+
+ @Override
+ public long cost() {
+
+ return maxDoc;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/AndDocIdSet.java b/src/main/java/org/elasticsearch/common/lucene/docset/AndDocIdSet.java
new file mode 100644
index 0000000..c35812f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/docset/AndDocIdSet.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.docset;
+
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public class AndDocIdSet extends DocIdSet {
+
+ private final DocIdSet[] sets;
+
+ public AndDocIdSet(DocIdSet[] sets) {
+ this.sets = sets;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ for (DocIdSet set : sets) {
+ if (!set.isCacheable()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public Bits bits() throws IOException {
+ Bits[] bits = new Bits[sets.length];
+ for (int i = 0; i < sets.length; i++) {
+ bits[i] = sets[i].bits();
+ if (bits[i] == null) {
+ return null;
+ }
+ }
+ return new AndBits(bits);
+ }
+
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ // we try and be smart here, if we can iterate through docsets quickly, prefer to iterate
+ // over them as much as possible, before actually going to "bits" based ones to check
+ List<DocIdSet> iterators = new ArrayList<DocIdSet>(sets.length);
+ List<Bits> bits = new ArrayList<Bits>(sets.length);
+ for (DocIdSet set : sets) {
+ if (DocIdSets.isFastIterator(set)) {
+ iterators.add(set);
+ } else {
+ Bits bit = set.bits();
+ if (bit != null) {
+ bits.add(bit);
+ } else {
+ iterators.add(set);
+ }
+ }
+ }
+ if (bits.isEmpty()) {
+ return IteratorBasedIterator.newDocIdSetIterator(iterators.toArray(new DocIdSet[iterators.size()]));
+ }
+ if (iterators.isEmpty()) {
+ return new BitsDocIdSetIterator(new AndBits(bits.toArray(new Bits[bits.size()])));
+ }
+ // combination of both..., first iterating over the "fast" ones, and then checking on the more
+ // expensive ones
+ return new BitsDocIdSetIterator.FilteredIterator(
+ IteratorBasedIterator.newDocIdSetIterator(iterators.toArray(new DocIdSet[iterators.size()])),
+ new AndBits(bits.toArray(new Bits[bits.size()]))
+ );
+ }
+
+ static class AndBits implements Bits {
+
+ private final Bits[] bits;
+
+ AndBits(Bits[] bits) {
+ this.bits = bits;
+ }
+
+ @Override
+ public boolean get(int index) {
+ for (Bits bit : bits) {
+ if (!bit.get(index)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public int length() {
+ return bits[0].length();
+ }
+ }
+
+ static class IteratorBasedIterator extends DocIdSetIterator {
+ private int lastReturn = -1;
+ private final DocIdSetIterator[] iterators;
+ private final long cost;
+
+
+ public static DocIdSetIterator newDocIdSetIterator(DocIdSet[] sets) throws IOException {
+ if (sets.length == 0) {
+ return DocIdSetIterator.empty();
+ }
+ final DocIdSetIterator[] iterators = new DocIdSetIterator[sets.length];
+ int j = 0;
+ long cost = Integer.MAX_VALUE;
+ for (DocIdSet set : sets) {
+ if (set == null) {
+ return DocIdSetIterator.empty();
+ } else {
+ DocIdSetIterator docIdSetIterator = set.iterator();
+ if (docIdSetIterator == null) {
+ return DocIdSetIterator.empty();// non matching
+ }
+ iterators[j++] = docIdSetIterator;
+ cost = Math.min(cost, docIdSetIterator.cost());
+ }
+ }
+ if (sets.length == 1) {
+ // shortcut if there is only one valid iterator.
+ return iterators[0];
+ }
+ return new IteratorBasedIterator(iterators, cost);
+ }
+
+ private IteratorBasedIterator(DocIdSetIterator[] iterators, long cost) throws IOException {
+ this.iterators = iterators;
+ this.cost = cost;
+ }
+
+ @Override
+ public final int docID() {
+ return lastReturn;
+ }
+
+ @Override
+ public final int nextDoc() throws IOException {
+
+ if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) {
+ assert false : "Illegal State - DocIdSetIterator is already exhausted";
+ return DocIdSetIterator.NO_MORE_DOCS;
+ }
+
+ DocIdSetIterator dcit = iterators[0];
+ int target = dcit.nextDoc();
+ int size = iterators.length;
+ int skip = 0;
+ int i = 1;
+ while (i < size) {
+ if (i != skip) {
+ dcit = iterators[i];
+ int docid = dcit.advance(target);
+ if (docid > target) {
+ target = docid;
+ if (i != 0) {
+ skip = i;
+ i = 0;
+ continue;
+ } else
+ skip = 0;
+ }
+ }
+ i++;
+ }
+ return (lastReturn = target);
+ }
+
+ @Override
+ public final int advance(int target) throws IOException {
+
+ if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) {
+ assert false : "Illegal State - DocIdSetIterator is already exhausted";
+ return DocIdSetIterator.NO_MORE_DOCS;
+ }
+
+ DocIdSetIterator dcit = iterators[0];
+ target = dcit.advance(target);
+ int size = iterators.length;
+ int skip = 0;
+ int i = 1;
+ while (i < size) {
+ if (i != skip) {
+ dcit = iterators[i];
+ int docid = dcit.advance(target);
+ if (docid > target) {
+ target = docid;
+ if (i != 0) {
+ skip = i;
+ i = 0;
+ continue;
+ } else {
+ skip = 0;
+ }
+ }
+ }
+ i++;
+ }
+ return (lastReturn = target);
+ }
+
+ @Override
+ public long cost() {
+ return cost;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/BitsDocIdSetIterator.java b/src/main/java/org/elasticsearch/common/lucene/docset/BitsDocIdSetIterator.java
new file mode 100644
index 0000000..8ad2b6c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/docset/BitsDocIdSetIterator.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.docset;
+
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.FilteredDocIdSetIterator;
+import org.apache.lucene.util.Bits;
+
+/**
+ * A {@link Bits} based iterator.
+ */
+public class BitsDocIdSetIterator extends MatchDocIdSetIterator {
+
+ private final Bits bits;
+
+ public BitsDocIdSetIterator(Bits bits) {
+ super(bits.length());
+ this.bits = bits;
+ }
+
+ public BitsDocIdSetIterator(int maxDoc, Bits bits) {
+ super(maxDoc);
+ this.bits = bits;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ return bits.get(doc);
+ }
+
+ public static class FilteredIterator extends FilteredDocIdSetIterator {
+
+ private final Bits bits;
+
+ FilteredIterator(DocIdSetIterator innerIter, Bits bits) {
+ super(innerIter);
+ this.bits = bits;
+ }
+
+ @Override
+ protected boolean match(int doc) {
+ return bits.get(doc);
+ }
+ }
+
+ @Override
+ public long cost() {
+ return this.bits.length();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/ContextDocIdSet.java b/src/main/java/org/elasticsearch/common/lucene/docset/ContextDocIdSet.java
new file mode 100644
index 0000000..b56579c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/docset/ContextDocIdSet.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.docset;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+
+/**
+ * A holder for a {@link DocIdSet} and the {@link AtomicReaderContext} it is associated with.
+ */
+public class ContextDocIdSet {
+
+ public final AtomicReaderContext context;
+ public final DocIdSet docSet;
+
+ public ContextDocIdSet(AtomicReaderContext context, DocIdSet docSet) {
+ this.context = context;
+ this.docSet = docSet;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java
new file mode 100644
index 0000000..0ecef99
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.docset;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.OpenBitSetIterator;
+import org.elasticsearch.common.Nullable;
+
+import java.io.IOException;
+
+/**
+ */
+public class DocIdSets {
+
+ public static long sizeInBytes(DocIdSet docIdSet) {
+ if (docIdSet instanceof FixedBitSet) {
+ return ((FixedBitSet) docIdSet).getBits().length * 8 + 16;
+ }
+ // only for empty ones and unknowns...
+ return 1;
+ }
+
+ /**
+ * Is it an empty {@link DocIdSet}?
+ */
+ public static boolean isEmpty(@Nullable DocIdSet set) {
+ return set == null || set == EMPTY_DOCIDSET;
+ }
+
+ /**
+ * Is {@link org.apache.lucene.search.DocIdSetIterator} implemented in a "fast" manner.
+ * For example, it does not ends up iterating one doc at a time check for its "value".
+ */
+ public static boolean isFastIterator(DocIdSet set) {
+ return set instanceof FixedBitSet;
+ }
+
+ /**
+ * Is {@link org.apache.lucene.search.DocIdSetIterator} implemented in a "fast" manner.
+ * For example, it does not ends up iterating one doc at a time check for its "value".
+ */
+ public static boolean isFastIterator(DocIdSetIterator iterator) {
+ // this is the iterator in the FixedBitSet.
+ return iterator instanceof OpenBitSetIterator;
+ }
+
+ /**
+ * Converts to a cacheable {@link DocIdSet}
+ * <p/>
+ * Note, we don't use {@link org.apache.lucene.search.DocIdSet#isCacheable()} because execution
+ * might be expensive even if its cacheable (i.e. not going back to the reader to execute). We effectively
+ * always either return an empty {@link DocIdSet} or {@link FixedBitSet} but never <code>null</code>.
+ */
+ public static DocIdSet toCacheable(AtomicReader reader, @Nullable DocIdSet set) throws IOException {
+ if (set == null || set == EMPTY_DOCIDSET) {
+ return EMPTY_DOCIDSET;
+ }
+ DocIdSetIterator it = set.iterator();
+ if (it == null) {
+ return EMPTY_DOCIDSET;
+ }
+ int doc = it.nextDoc();
+ if (doc == DocIdSetIterator.NO_MORE_DOCS) {
+ return EMPTY_DOCIDSET;
+ }
+ if (set instanceof FixedBitSet) {
+ return set;
+ }
+ // TODO: should we use WAH8DocIdSet like Lucene?
+ FixedBitSet fixedBitSet = new FixedBitSet(reader.maxDoc());
+ do {
+ fixedBitSet.set(doc);
+ doc = it.nextDoc();
+ } while (doc != DocIdSetIterator.NO_MORE_DOCS);
+ return fixedBitSet;
+ }
+
+ /** An empty {@code DocIdSet} instance */
+ protected static final DocIdSet EMPTY_DOCIDSET = new DocIdSet() {
+
+ @Override
+ public DocIdSetIterator iterator() {
+ return DocIdSetIterator.empty();
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ // we explicitly provide no random access, as this filter is 100% sparse and iterator exits faster
+ @Override
+ public Bits bits() {
+ return null;
+ }
+ };
+
+ /**
+ * Gets a set to bits.
+ */
+ public static Bits toSafeBits(AtomicReader reader, @Nullable DocIdSet set) throws IOException {
+ if (set == null) {
+ return new Bits.MatchNoBits(reader.maxDoc());
+ }
+ Bits bits = set.bits();
+ if (bits != null) {
+ return bits;
+ }
+ DocIdSetIterator iterator = set.iterator();
+ if (iterator == null) {
+ return new Bits.MatchNoBits(reader.maxDoc());
+ }
+ return toFixedBitSet(iterator, reader.maxDoc());
+ }
+
+ /**
+ * Creates a {@link FixedBitSet} from an iterator.
+ */
+ public static FixedBitSet toFixedBitSet(DocIdSetIterator iterator, int numBits) throws IOException {
+ FixedBitSet set = new FixedBitSet(numBits);
+ int doc;
+ while ((doc = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+ set.set(doc);
+ }
+ return set;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/MatchDocIdSet.java b/src/main/java/org/elasticsearch/common/lucene/docset/MatchDocIdSet.java
new file mode 100644
index 0000000..32751c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/docset/MatchDocIdSet.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.docset;
+
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.FilteredDocIdSetIterator;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.Nullable;
+
+import java.io.IOException;
+
+/**
+ * A {@link DocIdSet} that works on a "doc" level by checking if it matches or not.
+ */
+public abstract class MatchDocIdSet extends DocIdSet implements Bits {
+
+ private final int maxDoc;
+ private final Bits acceptDocs;
+
+ protected MatchDocIdSet(int maxDoc, @Nullable Bits acceptDocs) {
+ this.maxDoc = maxDoc;
+ this.acceptDocs = acceptDocs;
+ }
+
+ /**
+ * Does this document match?
+ */
+ protected abstract boolean matchDoc(int doc);
+
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ if (acceptDocs == null) {
+ return new NoAcceptDocsIterator(maxDoc);
+ } else if (acceptDocs instanceof FixedBitSet) {
+ return new FixedBitSetIterator(((DocIdSet) acceptDocs).iterator());
+ } else {
+ return new BothIterator(maxDoc, acceptDocs);
+ }
+ }
+
+ @Override
+ public Bits bits() throws IOException {
+ return this;
+ }
+
+ @Override
+ public boolean get(int index) {
+ return matchDoc(index);
+ }
+
+ @Override
+ public int length() {
+ return maxDoc;
+ }
+
+ class NoAcceptDocsIterator extends DocIdSetIterator {
+
+ private final int maxDoc;
+ private int doc = -1;
+
+ NoAcceptDocsIterator(int maxDoc) {
+ this.maxDoc = maxDoc;
+ }
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int nextDoc() {
+ do {
+ doc++;
+ if (doc >= maxDoc) {
+ return doc = NO_MORE_DOCS;
+ }
+ } while (!matchDoc(doc));
+ return doc;
+ }
+
+ @Override
+ public int advance(int target) {
+ for (doc = target; doc < maxDoc; doc++) {
+ if (matchDoc(doc)) {
+ return doc;
+ }
+ }
+ return doc = NO_MORE_DOCS;
+ }
+
+ @Override
+ public long cost() {
+ return maxDoc;
+ }
+
+ }
+
+ class FixedBitSetIterator extends FilteredDocIdSetIterator {
+
+ FixedBitSetIterator(DocIdSetIterator innerIter) {
+ super(innerIter);
+ }
+
+ @Override
+ protected boolean match(int doc) {
+ return matchDoc(doc);
+ }
+ }
+
+ class BothIterator extends DocIdSetIterator {
+ private final int maxDoc;
+ private final Bits acceptDocs;
+ private int doc = -1;
+
+ BothIterator(int maxDoc, Bits acceptDocs) {
+ this.maxDoc = maxDoc;
+ this.acceptDocs = acceptDocs;
+ }
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int nextDoc() {
+ do {
+ doc++;
+ if (doc >= maxDoc) {
+ return doc = NO_MORE_DOCS;
+ }
+ } while (!(matchDoc(doc) && acceptDocs.get(doc)));
+ return doc;
+ }
+
+ @Override
+ public int advance(int target) {
+ for (doc = target; doc < maxDoc; doc++) {
+ if (matchDoc(doc) && acceptDocs.get(doc)) {
+ return doc;
+ }
+ }
+ return doc = NO_MORE_DOCS;
+ }
+
+ @Override
+ public long cost() {
+ return maxDoc;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/MatchDocIdSetIterator.java b/src/main/java/org/elasticsearch/common/lucene/docset/MatchDocIdSetIterator.java
new file mode 100644
index 0000000..653e17c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/docset/MatchDocIdSetIterator.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.docset;
+
+import org.apache.lucene.search.DocIdSetIterator;
+
+import java.io.IOException;
+
+/**
+ */
+public abstract class MatchDocIdSetIterator extends DocIdSetIterator {
+ private final int maxDoc;
+ private int doc = -1;
+
+ public MatchDocIdSetIterator(int maxDoc) {
+ this.maxDoc = maxDoc;
+ }
+
+ protected abstract boolean matchDoc(int doc);
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ do {
+ doc++;
+ if (doc >= maxDoc) {
+ return doc = NO_MORE_DOCS;
+ }
+ } while (!matchDoc(doc));
+ return doc;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ if (target >= maxDoc) {
+ return doc = NO_MORE_DOCS;
+ }
+ doc = target;
+ while (!matchDoc(doc)) {
+ doc++;
+ if (doc >= maxDoc) {
+ return doc = NO_MORE_DOCS;
+ }
+ }
+ return doc;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/NotDocIdSet.java b/src/main/java/org/elasticsearch/common/lucene/docset/NotDocIdSet.java
new file mode 100644
index 0000000..670f4ac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/docset/NotDocIdSet.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.docset;
+
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+
+/**
+ * A {@link DocIdSet} that matches the "inverse" of the provided doc id set.
+ */
+public class NotDocIdSet extends DocIdSet {
+
+ private final DocIdSet set;
+ private final int maxDoc;
+
+ public NotDocIdSet(DocIdSet set, int maxDoc) {
+ this.maxDoc = maxDoc;
+ this.set = set;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return set.isCacheable();
+ }
+
+ @Override
+ public Bits bits() throws IOException {
+ Bits bits = set.bits();
+ if (bits == null) {
+ return null;
+ }
+ return new NotBits(bits);
+ }
+
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ DocIdSetIterator it = set.iterator();
+ if (it == null) {
+ return new AllDocIdSet.Iterator(maxDoc);
+ }
+ // TODO: can we optimize for the FixedBitSet case?
+ // if we have bits, its much faster to just check on the flipped end potentially
+ // really depends on the nature of the Bits, specifically with FixedBitSet, where
+ // most of the docs are set?
+ Bits bits = set.bits();
+ if (bits != null) {
+ return new BitsBasedIterator(bits);
+ }
+ return new IteratorBasedIterator(maxDoc, it);
+ }
+
+ public static class NotBits implements Bits {
+
+ private final Bits bits;
+
+ public NotBits(Bits bits) {
+ this.bits = bits;
+ }
+
+ @Override
+ public boolean get(int index) {
+ return !bits.get(index);
+ }
+
+ @Override
+ public int length() {
+ return bits.length();
+ }
+ }
+
+ public static class BitsBasedIterator extends MatchDocIdSetIterator {
+
+ private final Bits bits;
+
+ public BitsBasedIterator(Bits bits) {
+ super(bits.length());
+ this.bits = bits;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ return !bits.get(doc);
+ }
+
+ @Override
+ public long cost() {
+ return bits.length();
+ }
+ }
+
+ public static class IteratorBasedIterator extends DocIdSetIterator {
+ private final int max;
+ private DocIdSetIterator it1;
+ private int lastReturn = -1;
+ private int innerDocid = -1;
+ private final long cost;
+
+ IteratorBasedIterator(int max, DocIdSetIterator it) throws IOException {
+ this.max = max;
+ this.it1 = it;
+ this.cost = it1.cost();
+ if ((innerDocid = it1.nextDoc()) == DocIdSetIterator.NO_MORE_DOCS) {
+ it1 = null;
+ }
+ }
+
+ @Override
+ public int docID() {
+ return lastReturn;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return advance(0);
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+
+ if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) {
+ return DocIdSetIterator.NO_MORE_DOCS;
+ }
+
+ if (target <= lastReturn) target = lastReturn + 1;
+
+ if (it1 != null && innerDocid < target) {
+ if ((innerDocid = it1.advance(target)) == DocIdSetIterator.NO_MORE_DOCS) {
+ it1 = null;
+ }
+ }
+
+ while (it1 != null && innerDocid == target) {
+ target++;
+ if (target >= max) {
+ return (lastReturn = DocIdSetIterator.NO_MORE_DOCS);
+ }
+ if ((innerDocid = it1.advance(target)) == DocIdSetIterator.NO_MORE_DOCS) {
+ it1 = null;
+ }
+ }
+
+ // ADDED THIS, bug in original code
+ if (target >= max) {
+ return (lastReturn = DocIdSetIterator.NO_MORE_DOCS);
+ }
+
+ return (lastReturn = target);
+ }
+
+ @Override
+ public long cost() {
+ return cost;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/OrDocIdSet.java b/src/main/java/org/elasticsearch/common/lucene/docset/OrDocIdSet.java
new file mode 100644
index 0000000..7b67525
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/docset/OrDocIdSet.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.docset;
+
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class OrDocIdSet extends DocIdSet {
+
+ private final DocIdSet[] sets;
+
+ public OrDocIdSet(DocIdSet[] sets) {
+ this.sets = sets;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ for (DocIdSet set : sets) {
+ if (!set.isCacheable()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public Bits bits() throws IOException {
+ Bits[] bits = new Bits[sets.length];
+ for (int i = 0; i < sets.length; i++) {
+ bits[i] = sets[i].bits();
+ if (bits[i] == null) {
+ return null;
+ }
+ }
+ return new OrBits(bits);
+ }
+
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ return new IteratorBasedIterator(sets);
+ }
+
+ static class OrBits implements Bits {
+ private final Bits[] bits;
+
+ OrBits(Bits[] bits) {
+ this.bits = bits;
+ }
+
+ @Override
+ public boolean get(int index) {
+ for (Bits bit : bits) {
+ if (bit.get(index)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public int length() {
+ return bits[0].length();
+ }
+ }
+
+ static class IteratorBasedIterator extends DocIdSetIterator {
+
+ final class Item {
+ public final DocIdSetIterator iter;
+ public int doc;
+
+ public Item(DocIdSetIterator iter) {
+ this.iter = iter;
+ this.doc = -1;
+ }
+ }
+
+ private int _curDoc;
+ private final Item[] _heap;
+ private int _size;
+ private final long cost;
+
+ IteratorBasedIterator(DocIdSet[] sets) throws IOException {
+ _curDoc = -1;
+ _heap = new Item[sets.length];
+ _size = 0;
+ long cost = 0;
+ for (DocIdSet set : sets) {
+ DocIdSetIterator iterator = set.iterator();
+ if (iterator != null) {
+ _heap[_size++] = new Item(iterator);
+ cost += iterator.cost();
+ }
+ }
+ this.cost = cost;
+ if (_size == 0) _curDoc = DocIdSetIterator.NO_MORE_DOCS;
+ }
+
+ @Override
+ public final int docID() {
+ return _curDoc;
+ }
+
+ @Override
+ public final int nextDoc() throws IOException {
+ if (_curDoc == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
+
+ Item top = _heap[0];
+ while (true) {
+ DocIdSetIterator topIter = top.iter;
+ int docid;
+ if ((docid = topIter.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+ top.doc = docid;
+ heapAdjust();
+ } else {
+ heapRemoveRoot();
+ if (_size == 0) return (_curDoc = DocIdSetIterator.NO_MORE_DOCS);
+ }
+ top = _heap[0];
+ int topDoc = top.doc;
+ if (topDoc > _curDoc) {
+ return (_curDoc = topDoc);
+ }
+ }
+ }
+
+ @Override
+ public final int advance(int target) throws IOException {
+ if (_curDoc == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
+
+ if (target <= _curDoc) target = _curDoc + 1;
+
+ Item top = _heap[0];
+ while (true) {
+ DocIdSetIterator topIter = top.iter;
+ int docid;
+ if ((docid = topIter.advance(target)) != DocIdSetIterator.NO_MORE_DOCS) {
+ top.doc = docid;
+ heapAdjust();
+ } else {
+ heapRemoveRoot();
+ if (_size == 0) return (_curDoc = DocIdSetIterator.NO_MORE_DOCS);
+ }
+ top = _heap[0];
+ int topDoc = top.doc;
+ if (topDoc >= target) {
+ return (_curDoc = topDoc);
+ }
+ }
+ }
+
+// Organize subScorers into a min heap with scorers generating the earlest document on top.
+ /*
+ private final void heapify() {
+ int size = _size;
+ for (int i=(size>>1)-1; i>=0; i--)
+ heapAdjust(i);
+ }
+ */
+ /* The subtree of subScorers at root is a min heap except possibly for its root element.
+ * Bubble the root down as required to make the subtree a heap.
+ */
+
+ private final void heapAdjust() {
+ final Item[] heap = _heap;
+ final Item top = heap[0];
+ final int doc = top.doc;
+ final int size = _size;
+ int i = 0;
+
+ while (true) {
+ int lchild = (i << 1) + 1;
+ if (lchild >= size) break;
+
+ Item left = heap[lchild];
+ int ldoc = left.doc;
+
+ int rchild = lchild + 1;
+ if (rchild < size) {
+ Item right = heap[rchild];
+ int rdoc = right.doc;
+
+ if (rdoc <= ldoc) {
+ if (doc <= rdoc) break;
+
+ heap[i] = right;
+ i = rchild;
+ continue;
+ }
+ }
+
+ if (doc <= ldoc) break;
+
+ heap[i] = left;
+ i = lchild;
+ }
+ heap[i] = top;
+ }
+
+ // Remove the root Scorer from subScorers and re-establish it as a heap
+
+ private void heapRemoveRoot() {
+ _size--;
+ if (_size > 0) {
+ Item tmp = _heap[0];
+ _heap[0] = _heap[_size];
+ _heap[_size] = tmp; // keep the finished iterator at the end for debugging
+ heapAdjust();
+ }
+ }
+
+ @Override
+ public long cost() {
+ return cost;
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/AndFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/AndFilter.java
new file mode 100644
index 0000000..51f91ed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/AndFilter.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.docset.AndDocIdSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class AndFilter extends Filter {
+
+ private final List<? extends Filter> filters;
+
+ public AndFilter(List<? extends Filter> filters) {
+ this.filters = filters;
+ }
+
+ public List<? extends Filter> filters() {
+ return filters;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ if (filters.size() == 1) {
+ return filters.get(0).getDocIdSet(context, acceptDocs);
+ }
+ DocIdSet[] sets = new DocIdSet[filters.size()];
+ for (int i = 0; i < filters.size(); i++) {
+ DocIdSet set = filters.get(i).getDocIdSet(context, acceptDocs);
+ if (DocIdSets.isEmpty(set)) { // none matching for this filter, we AND, so return EMPTY
+ return null;
+ }
+ sets[i] = set;
+ }
+ return new AndDocIdSet(sets);
+ }
+
+ @Override
+ public int hashCode() {
+ int hash = 7;
+ hash = 31 * hash + (null == filters ? 0 : filters.hashCode());
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+
+ if ((obj == null) || (obj.getClass() != this.getClass()))
+ return false;
+
+ AndFilter other = (AndFilter) obj;
+ return equalFilters(filters, other.filters);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ for (Filter filter : filters) {
+ if (builder.length() > 0) {
+ builder.append(' ');
+ }
+ builder.append('+');
+ builder.append(filter);
+ }
+ return builder.toString();
+ }
+
+ private boolean equalFilters(List<? extends Filter> filters1, List<? extends Filter> filters2) {
+ return (filters1 == filters2) || ((filters1 != null) && filters1.equals(filters2));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/ApplyAcceptedDocsFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/ApplyAcceptedDocsFilter.java
new file mode 100644
index 0000000..b506c68
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/ApplyAcceptedDocsFilter.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+
+import java.io.IOException;
+
+/**
+ * The assumption is that the underlying filter might not apply the accepted docs, so this filter helps to wrap
+ * the actual filter and apply the actual accepted docs.
+ */
+// TODO: we can try and be smart, and only apply if if a filter is cached (down the "chain") since that's the only place that acceptDocs are not applied in ES
+public class ApplyAcceptedDocsFilter extends Filter {
+
+ private final Filter filter;
+
+ public ApplyAcceptedDocsFilter(Filter filter) {
+ this.filter = filter;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ DocIdSet docIdSet = filter.getDocIdSet(context, acceptDocs);
+ if (DocIdSets.isEmpty(docIdSet)) {
+ return null;
+ }
+ if (acceptDocs == null) {
+ return docIdSet;
+ }
+ if (acceptDocs == context.reader().getLiveDocs()) {
+ // optimized wrapper for not deleted cases
+ return new NotDeletedDocIdSet(docIdSet, acceptDocs);
+ }
+ return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
+ }
+
+ public Filter filter() {
+ return this.filter;
+ }
+
+ @Override
+ public String toString() {
+ return filter.toString();
+ }
+
+ static class NotDeletedDocIdSet extends DocIdSet {
+
+ private final DocIdSet innerSet;
+ private final Bits liveDocs;
+
+ NotDeletedDocIdSet(DocIdSet innerSet, Bits liveDocs) {
+ this.innerSet = innerSet;
+ this.liveDocs = liveDocs;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return innerSet.isCacheable();
+ }
+
+ @Override
+ public Bits bits() throws IOException {
+ Bits bits = innerSet.bits();
+ if (bits == null) {
+ return null;
+ }
+ return new NotDeleteBits(bits, liveDocs);
+ }
+
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ if (!DocIdSets.isFastIterator(innerSet) && liveDocs instanceof FixedBitSet) {
+ // might as well iterate over the live docs..., since the iterator is not fast enough
+ // but we can only do that if we have Bits..., in short, we reverse the order...
+ Bits bits = innerSet.bits();
+ if (bits != null) {
+ return new NotDeletedDocIdSetIterator(((FixedBitSet) liveDocs).iterator(), bits);
+ }
+ }
+ DocIdSetIterator iterator = innerSet.iterator();
+ if (iterator == null) {
+ return null;
+ }
+ return new NotDeletedDocIdSetIterator(iterator, liveDocs);
+ }
+ }
+
+ static class NotDeleteBits implements Bits {
+
+ private final Bits bits;
+ private final Bits liveDocs;
+
+ NotDeleteBits(Bits bits, Bits liveDocs) {
+ this.bits = bits;
+ this.liveDocs = liveDocs;
+ }
+
+ @Override
+ public boolean get(int index) {
+ return liveDocs.get(index) && bits.get(index);
+ }
+
+ @Override
+ public int length() {
+ return bits.length();
+ }
+ }
+
+ static class NotDeletedDocIdSetIterator extends FilteredDocIdSetIterator {
+
+ private final Bits match;
+
+ NotDeletedDocIdSetIterator(DocIdSetIterator innerIter, Bits match) {
+ super(innerIter);
+ this.match = match;
+ }
+
+ @Override
+ protected boolean match(int doc) {
+ return match.get(doc);
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((filter == null) ? 0 : filter.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ ApplyAcceptedDocsFilter other = (ApplyAcceptedDocsFilter) obj;
+ if (filter == null) {
+ if (other.filter != null)
+ return false;
+ } else if (!filter.equals(other.filter))
+ return false;
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java
new file mode 100644
index 0000000..027f794
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.search.Filter;
+
+/**
+ * A marker indicating that this is a cached filter.
+ */
+public abstract class CachedFilter extends Filter {
+
+ public static boolean isCached(Filter filter) {
+ return filter instanceof CachedFilter;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/EmptyScorer.java b/src/main/java/org/elasticsearch/common/lucene/search/EmptyScorer.java
new file mode 100644
index 0000000..9162736
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/EmptyScorer.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class EmptyScorer extends Scorer {
+
+ private int docId = -1;
+
+ public EmptyScorer(Weight weight) {
+ super(weight);
+ }
+
+ @Override
+ public float score() throws IOException {
+ throw new UnsupportedOperationException("Should never be called");
+ }
+
+ @Override
+ public int freq() throws IOException {
+ throw new UnsupportedOperationException("Should never be called");
+ }
+
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ assert docId != NO_MORE_DOCS;
+ return docId = NO_MORE_DOCS;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ return slowAdvance(target);
+ }
+
+ @Override
+ public long cost() {
+ return 0;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java
new file mode 100644
index 0000000..1ac1050
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FilteredCollector extends XCollector {
+
+ private final Collector collector;
+
+ private final Filter filter;
+
+ private Bits docSet;
+
+ public FilteredCollector(Collector collector, Filter filter) {
+ this.collector = collector;
+ this.filter = filter;
+ }
+
+ @Override
+ public void postCollection() {
+ if (collector instanceof XCollector) {
+ ((XCollector) collector).postCollection();
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ collector.setScorer(scorer);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ if (docSet.get(doc)) {
+ collector.collect(doc);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ collector.setNextReader(context);
+ docSet = DocIdSets.toSafeBits(context.reader(), filter.getDocIdSet(context, null));
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return collector.acceptsDocsOutOfOrder();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/LimitFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/LimitFilter.java
new file mode 100644
index 0000000..df83594
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/LimitFilter.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.lucene.docset.MatchDocIdSet;
+
+import java.io.IOException;
+
+public class LimitFilter extends NoCacheFilter {
+
+ private final int limit;
+ private int counter;
+
+ public LimitFilter(int limit) {
+ this.limit = limit;
+ }
+
+ public int getLimit() {
+ return limit;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ if (counter > limit) {
+ return null;
+ }
+ return new LimitDocIdSet(context.reader().maxDoc(), acceptDocs, limit);
+ }
+
+ public class LimitDocIdSet extends MatchDocIdSet {
+
+ private final int limit;
+
+ public LimitDocIdSet(int maxDoc, @Nullable Bits acceptDocs, int limit) {
+ super(maxDoc, acceptDocs);
+ this.limit = limit;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ if (++counter > limit) {
+ return false;
+ }
+ return true;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilter.java
new file mode 100644
index 0000000..41f2178
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilter.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.docset.AllDocIdSet;
+
+import java.io.IOException;
+
+/**
+ * A filter that matches on all docs.
+ */
+public class MatchAllDocsFilter extends Filter {
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ return new AllDocIdSet(context.reader().maxDoc());
+ }
+
+ @Override
+ public int hashCode() {
+ return this.getClass().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+
+ if (obj == null) {
+ return false;
+ }
+
+ if (obj.getClass() == this.getClass()) {
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return "*:*";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsFilter.java
new file mode 100644
index 0000000..f130a19
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsFilter.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+
+/**
+ * A filter that matches no docs.
+ */
+public class MatchNoDocsFilter extends Filter {
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ return null;
+ }
+
+ @Override
+ public int hashCode() {
+ return this.getClass().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+
+ if (obj == null) {
+ return false;
+ }
+
+ if (obj.getClass() == this.getClass()) {
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return "MatchNoDocsFilter";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java
new file mode 100644
index 0000000..0f06f39
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * Query that matches no documents.
+ */
+public final class MatchNoDocsQuery extends Query {
+
+ /**
+ * Weight implementation that matches no documents.
+ */
+ private class MatchNoDocsWeight extends Weight {
+
+ @Override
+ public String toString() {
+ return "weight(" + MatchNoDocsQuery.this + ")";
+ }
+
+ @Override
+ public Query getQuery() {
+ return MatchNoDocsQuery.this;
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+ return null;
+ }
+
+ @Override
+ public Explanation explain(final AtomicReaderContext context,
+ final int doc) {
+ return new ComplexExplanation(false, 0, "MatchNoDocs matches nothing");
+ }
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ return new MatchNoDocsWeight();
+ }
+
+ @Override
+ public void extractTerms(final Set<Term> terms) {
+ }
+
+ @Override
+ public String toString(final String field) {
+ return "MatchNoDocsQuery";
+ }
+
+ @Override
+ public boolean equals(final Object o) {
+ if (o instanceof MatchNoDocsQuery) {
+ return getBoost() == ((MatchNoDocsQuery) o).getBoost();
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return getClass().hashCode() ^ Float.floatToIntBits(getBoost());
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java
new file mode 100644
index 0000000..ef4ba65
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.queries.mlt.MoreLikeThis;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.similarities.DefaultSimilarity;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.TFIDFSimilarity;
+import org.elasticsearch.common.io.FastStringReader;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+/**
+ *
+ */
+public class MoreLikeThisQuery extends Query {
+
+ public static final float DEFAULT_PERCENT_TERMS_TO_MATCH = 0.3f;
+
+ private TFIDFSimilarity similarity;
+
+ private String likeText;
+ private String[] moreLikeFields;
+ private Analyzer analyzer;
+ private float percentTermsToMatch = DEFAULT_PERCENT_TERMS_TO_MATCH;
+ private int minTermFrequency = MoreLikeThis.DEFAULT_MIN_TERM_FREQ;
+ private int maxQueryTerms = MoreLikeThis.DEFAULT_MAX_QUERY_TERMS;
+ private Set<?> stopWords = MoreLikeThis.DEFAULT_STOP_WORDS;
+ private int minDocFreq = MoreLikeThis.DEFAULT_MIN_DOC_FREQ;
+ private int maxDocFreq = MoreLikeThis.DEFAULT_MAX_DOC_FREQ;
+ private int minWordLen = MoreLikeThis.DEFAULT_MIN_WORD_LENGTH;
+ private int maxWordLen = MoreLikeThis.DEFAULT_MAX_WORD_LENGTH;
+ private boolean boostTerms = MoreLikeThis.DEFAULT_BOOST;
+ private float boostTermsFactor = 1;
+
+
+ public MoreLikeThisQuery() {
+
+ }
+
+ public MoreLikeThisQuery(String likeText, String[] moreLikeFields, Analyzer analyzer) {
+ this.likeText = likeText;
+ this.moreLikeFields = moreLikeFields;
+ this.analyzer = analyzer;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = boostTerms ? 1 : 0;
+ result = 31 * result + Float.floatToIntBits(boostTermsFactor);
+ result = 31 * result + likeText.hashCode();
+ result = 31 * result + maxDocFreq;
+ result = 31 * result + maxQueryTerms;
+ result = 31 * result + maxWordLen;
+ result = 31 * result + minDocFreq;
+ result = 31 * result + minTermFrequency;
+ result = 31 * result + minWordLen;
+ result = 31 * result + Arrays.hashCode(moreLikeFields);
+ result = 31 * result + Float.floatToIntBits(percentTermsToMatch);
+ result = 31 * result + (stopWords == null ? 0 : stopWords.hashCode());
+ result = 31 * result + Float.floatToIntBits(getBoost());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || getClass() != obj.getClass())
+ return false;
+ MoreLikeThisQuery other = (MoreLikeThisQuery) obj;
+ if (getBoost() != other.getBoost())
+ return false;
+ if (!analyzer.equals(other.analyzer))
+ return false;
+ if (boostTerms != other.boostTerms)
+ return false;
+ if (boostTermsFactor != other.boostTermsFactor)
+ return false;
+ if (!likeText.equals(other.likeText))
+ return false;
+ if (maxDocFreq != other.maxDocFreq)
+ return false;
+ if (maxQueryTerms != other.maxQueryTerms)
+ return false;
+ if (maxWordLen != other.maxWordLen)
+ return false;
+ if (minDocFreq != other.minDocFreq)
+ return false;
+ if (minTermFrequency != other.minTermFrequency)
+ return false;
+ if (minWordLen != other.minWordLen)
+ return false;
+ if (!Arrays.equals(moreLikeFields, other.moreLikeFields))
+ return false;
+ if (percentTermsToMatch != other.percentTermsToMatch)
+ return false;
+ if (similarity == null) {
+ if (other.similarity != null)
+ return false;
+ } else if (!similarity.equals(other.similarity))
+ return false;
+ if (stopWords == null) {
+ if (other.stopWords != null)
+ return false;
+ } else if (!stopWords.equals(other.stopWords))
+ return false;
+ return true;
+ }
+
+ @Override
+ public Query rewrite(IndexReader reader) throws IOException {
+ MoreLikeThis mlt = new MoreLikeThis(reader, similarity == null ? new DefaultSimilarity() : similarity);
+
+ mlt.setFieldNames(moreLikeFields);
+ mlt.setAnalyzer(analyzer);
+ mlt.setMinTermFreq(minTermFrequency);
+ mlt.setMinDocFreq(minDocFreq);
+ mlt.setMaxDocFreq(maxDocFreq);
+ mlt.setMaxQueryTerms(maxQueryTerms);
+ mlt.setMinWordLen(minWordLen);
+ mlt.setMaxWordLen(maxWordLen);
+ mlt.setStopWords(stopWords);
+ mlt.setBoost(boostTerms);
+ mlt.setBoostFactor(boostTermsFactor);
+ //LUCENE 4 UPGRADE this mapps the 3.6 behavior (only use the first field)
+ BooleanQuery bq = (BooleanQuery) mlt.like(new FastStringReader(likeText), moreLikeFields[0]);
+ BooleanClause[] clauses = bq.getClauses();
+
+ bq.setMinimumNumberShouldMatch((int) (clauses.length * percentTermsToMatch));
+
+ bq.setBoost(getBoost());
+ return bq;
+ }
+
+ @Override
+ public String toString(String field) {
+ return "like:" + likeText;
+ }
+
+ public String getLikeText() {
+ return likeText;
+ }
+
+ public void setLikeText(String likeText) {
+ this.likeText = likeText;
+ }
+
+ public String[] getMoreLikeFields() {
+ return moreLikeFields;
+ }
+
+ public void setMoreLikeFields(String[] moreLikeFields) {
+ this.moreLikeFields = moreLikeFields;
+ }
+
+ public Similarity getSimilarity() {
+ return similarity;
+ }
+
+ public void setSimilarity(Similarity similarity) {
+ if (similarity == null || similarity instanceof TFIDFSimilarity) {
+ //LUCENE 4 UPGRADE we need TFIDF similarity here so I only set it if it is an instance of it
+ this.similarity = (TFIDFSimilarity) similarity;
+ }
+ }
+
+ public Analyzer getAnalyzer() {
+ return analyzer;
+ }
+
+ public void setAnalyzer(Analyzer analyzer) {
+ this.analyzer = analyzer;
+ }
+
+ public float getPercentTermsToMatch() {
+ return percentTermsToMatch;
+ }
+
+ public void setPercentTermsToMatch(float percentTermsToMatch) {
+ this.percentTermsToMatch = percentTermsToMatch;
+ }
+
+ public int getMinTermFrequency() {
+ return minTermFrequency;
+ }
+
+ public void setMinTermFrequency(int minTermFrequency) {
+ this.minTermFrequency = minTermFrequency;
+ }
+
+ public int getMaxQueryTerms() {
+ return maxQueryTerms;
+ }
+
+ public void setMaxQueryTerms(int maxQueryTerms) {
+ this.maxQueryTerms = maxQueryTerms;
+ }
+
+ public Set<?> getStopWords() {
+ return stopWords;
+ }
+
+ public void setStopWords(Set<?> stopWords) {
+ this.stopWords = stopWords;
+ }
+
+ public int getMinDocFreq() {
+ return minDocFreq;
+ }
+
+ public void setMinDocFreq(int minDocFreq) {
+ this.minDocFreq = minDocFreq;
+ }
+
+ public int getMaxDocFreq() {
+ return maxDocFreq;
+ }
+
+ public void setMaxDocFreq(int maxDocFreq) {
+ this.maxDocFreq = maxDocFreq;
+ }
+
+ public int getMinWordLen() {
+ return minWordLen;
+ }
+
+ public void setMinWordLen(int minWordLen) {
+ this.minWordLen = minWordLen;
+ }
+
+ public int getMaxWordLen() {
+ return maxWordLen;
+ }
+
+ public void setMaxWordLen(int maxWordLen) {
+ this.maxWordLen = maxWordLen;
+ }
+
+ public boolean isBoostTerms() {
+ return boostTerms;
+ }
+
+ public void setBoostTerms(boolean boostTerms) {
+ this.boostTerms = boostTerms;
+ }
+
+ public float getBoostTermsFactor() {
+ return boostTermsFactor;
+ }
+
+ public void setBoostTermsFactor(float boostTermsFactor) {
+ this.boostTermsFactor = boostTermsFactor;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java
new file mode 100644
index 0000000..74ccb86
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.MultiPhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.util.ToStringUtils;
+
+import java.io.IOException;
+import java.util.*;
+
+public class MultiPhrasePrefixQuery extends Query {
+
+ private String field;
+ private ArrayList<Term[]> termArrays = new ArrayList<Term[]>();
+ private ArrayList<Integer> positions = new ArrayList<Integer>();
+ private int maxExpansions = Integer.MAX_VALUE;
+
+ private int slop = 0;
+
+ /**
+ * Sets the phrase slop for this query.
+ *
+ * @see org.apache.lucene.search.PhraseQuery#setSlop(int)
+ */
+ public void setSlop(int s) {
+ slop = s;
+ }
+
+ public void setMaxExpansions(int maxExpansions) {
+ this.maxExpansions = maxExpansions;
+ }
+
+ /**
+ * Sets the phrase slop for this query.
+ *
+ * @see org.apache.lucene.search.PhraseQuery#getSlop()
+ */
+ public int getSlop() {
+ return slop;
+ }
+
+ /**
+ * Add a single term at the next position in the phrase.
+ *
+ * @see org.apache.lucene.search.PhraseQuery#add(Term)
+ */
+ public void add(Term term) {
+ add(new Term[]{term});
+ }
+
+ /**
+ * Add multiple terms at the next position in the phrase. Any of the terms
+ * may match.
+ *
+ * @see org.apache.lucene.search.PhraseQuery#add(Term)
+ */
+ public void add(Term[] terms) {
+ int position = 0;
+ if (positions.size() > 0)
+ position = positions.get(positions.size() - 1).intValue() + 1;
+
+ add(terms, position);
+ }
+
+ /**
+ * Allows to specify the relative position of terms within the phrase.
+ *
+ * @param terms
+ * @param position
+ * @see org.apache.lucene.search.PhraseQuery#add(Term, int)
+ */
+ public void add(Term[] terms, int position) {
+ if (termArrays.size() == 0)
+ field = terms[0].field();
+
+ for (int i = 0; i < terms.length; i++) {
+ if (terms[i].field() != field) {
+ throw new IllegalArgumentException(
+ "All phrase terms must be in the same field (" + field + "): "
+ + terms[i]);
+ }
+ }
+
+ termArrays.add(terms);
+ positions.add(Integer.valueOf(position));
+ }
+
+ /**
+ * Returns a List of the terms in the multiphrase.
+ * Do not modify the List or its contents.
+ */
+ public List<Term[]> getTermArrays() {
+ return Collections.unmodifiableList(termArrays);
+ }
+
+ /**
+ * Returns the relative positions of terms in this phrase.
+ */
+ public int[] getPositions() {
+ int[] result = new int[positions.size()];
+ for (int i = 0; i < positions.size(); i++)
+ result[i] = positions.get(i).intValue();
+ return result;
+ }
+
+ @Override
+ public Query rewrite(IndexReader reader) throws IOException {
+ if (termArrays.isEmpty()) {
+ return new MatchNoDocsQuery();
+ }
+ MultiPhraseQuery query = new MultiPhraseQuery();
+ query.setSlop(slop);
+ int sizeMinus1 = termArrays.size() - 1;
+ for (int i = 0; i < sizeMinus1; i++) {
+ query.add(termArrays.get(i), positions.get(i));
+ }
+ Term[] suffixTerms = termArrays.get(sizeMinus1);
+ int position = positions.get(sizeMinus1);
+ ObjectOpenHashSet<Term> terms = new ObjectOpenHashSet<Term>();
+ for (Term term : suffixTerms) {
+ getPrefixTerms(terms, term, reader);
+ if (terms.size() > maxExpansions) {
+ break;
+ }
+ }
+ if (terms.isEmpty()) {
+ return Queries.newMatchNoDocsQuery();
+ }
+ query.add(terms.toArray(Term.class), position);
+ return query.rewrite(reader);
+ }
+
+ private void getPrefixTerms(ObjectOpenHashSet<Term> terms, final Term prefix, final IndexReader reader) throws IOException {
+ // SlowCompositeReaderWrapper could be used... but this would merge all terms from each segment into one terms
+ // instance, which is very expensive. Therefore I think it is better to iterate over each leaf individually.
+ TermsEnum termsEnum = null;
+ List<AtomicReaderContext> leaves = reader.leaves();
+ for (AtomicReaderContext leaf : leaves) {
+ Terms _terms = leaf.reader().terms(field);
+ if (_terms == null) {
+ continue;
+ }
+
+ termsEnum = _terms.iterator(termsEnum);
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(prefix.bytes());
+ if (TermsEnum.SeekStatus.END == seekStatus) {
+ continue;
+ }
+
+ for (BytesRef term = termsEnum.term(); term != null; term = termsEnum.next()) {
+ if (!StringHelper.startsWith(term, prefix.bytes())) {
+ break;
+ }
+
+ terms.add(new Term(field, BytesRef.deepCopyOf(term)));
+ if (terms.size() >= maxExpansions) {
+ return;
+ }
+ }
+ }
+ }
+
+ @Override
+ public final String toString(String f) {
+ StringBuilder buffer = new StringBuilder();
+ if (field == null || !field.equals(f)) {
+ buffer.append(field);
+ buffer.append(":");
+ }
+
+ buffer.append("\"");
+ Iterator<Term[]> i = termArrays.iterator();
+ while (i.hasNext()) {
+ Term[] terms = i.next();
+ if (terms.length > 1) {
+ buffer.append("(");
+ for (int j = 0; j < terms.length; j++) {
+ buffer.append(terms[j].text());
+ if (j < terms.length - 1)
+ buffer.append(" ");
+ }
+ buffer.append(")");
+ } else {
+ buffer.append(terms[0].text());
+ }
+ if (i.hasNext())
+ buffer.append(" ");
+ }
+ buffer.append("\"");
+
+ if (slop != 0) {
+ buffer.append("~");
+ buffer.append(slop);
+ }
+
+ buffer.append(ToStringUtils.boost(getBoost()));
+
+ return buffer.toString();
+ }
+
+ /**
+ * Returns true if <code>o</code> is equal to this.
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof MultiPhrasePrefixQuery)) return false;
+ MultiPhrasePrefixQuery other = (MultiPhrasePrefixQuery) o;
+ return this.getBoost() == other.getBoost()
+ && this.slop == other.slop
+ && termArraysEquals(this.termArrays, other.termArrays)
+ && this.positions.equals(other.positions);
+ }
+
+ /**
+ * Returns a hash code value for this object.
+ */
+ @Override
+ public int hashCode() {
+ return Float.floatToIntBits(getBoost())
+ ^ slop
+ ^ termArraysHashCode()
+ ^ positions.hashCode()
+ ^ 0x4AC65113;
+ }
+
+ // Breakout calculation of the termArrays hashcode
+ private int termArraysHashCode() {
+ int hashCode = 1;
+ for (final Term[] termArray : termArrays) {
+ hashCode = 31 * hashCode
+ + (termArray == null ? 0 : Arrays.hashCode(termArray));
+ }
+ return hashCode;
+ }
+
+ // Breakout calculation of the termArrays equals
+ private boolean termArraysEquals(List<Term[]> termArrays1, List<Term[]> termArrays2) {
+ if (termArrays1.size() != termArrays2.size()) {
+ return false;
+ }
+ ListIterator<Term[]> iterator1 = termArrays1.listIterator();
+ ListIterator<Term[]> iterator2 = termArrays2.listIterator();
+ while (iterator1.hasNext()) {
+ Term[] termArray1 = iterator1.next();
+ Term[] termArray2 = iterator2.next();
+ if (!(termArray1 == null ? termArray2 == null : Arrays.equals(termArray1,
+ termArray2))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public String getField() {
+ return field;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java
new file mode 100644
index 0000000..6ec76d9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+
+/**
+ * A marker interface for {@link org.apache.lucene.search.Filter} denoting the filter
+ * as one that should not be cached, ever.
+ */
+public abstract class NoCacheFilter extends Filter {
+
+ private static final class NoCacheFilterWrapper extends NoCacheFilter {
+ private final Filter delegate;
+ private NoCacheFilterWrapper(Filter delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ return delegate.getDocIdSet(context, acceptDocs);
+ }
+
+ @Override
+ public int hashCode() {
+ return delegate.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj instanceof NoCacheFilterWrapper) {
+ return delegate.equals(((NoCacheFilterWrapper)obj).delegate);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+
+ return "no_cache(" + delegate + ")";
+ }
+
+ }
+
+ /**
+ * Wraps a filter in a NoCacheFilter or returns it if it already is a NoCacheFilter.
+ */
+ public static Filter wrap(Filter filter) {
+ if (filter instanceof NoCacheFilter) {
+ return filter;
+ }
+ return new NoCacheFilterWrapper(filter);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/NoopCollector.java b/src/main/java/org/elasticsearch/common/lucene/search/NoopCollector.java
new file mode 100644
index 0000000..908ff52
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/NoopCollector.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.Scorer;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NoopCollector extends Collector {
+
+ public static final NoopCollector NOOP_COLLECTOR = new NoopCollector();
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/NotFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/NotFilter.java
new file mode 100644
index 0000000..3c8867e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/NotFilter.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.docset.AllDocIdSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.docset.NotDocIdSet;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NotFilter extends Filter {
+
+ private final Filter filter;
+
+ public NotFilter(Filter filter) {
+ this.filter = filter;
+ }
+
+ public Filter filter() {
+ return filter;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ DocIdSet set = filter.getDocIdSet(context, acceptDocs);
+ if (DocIdSets.isEmpty(set)) {
+ return new AllDocIdSet(context.reader().maxDoc());
+ }
+ return new NotDocIdSet(set, context.reader().maxDoc());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ NotFilter notFilter = (NotFilter) o;
+ return !(filter != null ? !filter.equals(notFilter.filter) : notFilter.filter != null);
+ }
+
+ @Override
+ public String toString() {
+ return "NotFilter(" + filter + ")";
+ }
+
+ @Override
+ public int hashCode() {
+ return filter != null ? filter.hashCode() : 0;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/OrFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/OrFilter.java
new file mode 100644
index 0000000..b68f22f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/OrFilter.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.docset.OrDocIdSet;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public class OrFilter extends Filter {
+
+ private final List<? extends Filter> filters;
+
+ public OrFilter(List<? extends Filter> filters) {
+ this.filters = filters;
+ }
+
+ public List<? extends Filter> filters() {
+ return filters;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ if (filters.size() == 1) {
+ return filters.get(0).getDocIdSet(context, acceptDocs);
+ }
+ List<DocIdSet> sets = new ArrayList<DocIdSet>(filters.size());
+ for (int i = 0; i < filters.size(); i++) {
+ DocIdSet set = filters.get(i).getDocIdSet(context, acceptDocs);
+ if (DocIdSets.isEmpty(set)) { // none matching for this filter, continue
+ continue;
+ }
+ sets.add(set);
+ }
+ if (sets.size() == 0) {
+ return null;
+ }
+ if (sets.size() == 1) {
+ return sets.get(0);
+ }
+ return new OrDocIdSet(sets.toArray(new DocIdSet[sets.size()]));
+ }
+
+ @Override
+ public int hashCode() {
+ int hash = 7;
+ hash = 31 * hash + (null == filters ? 0 : filters.hashCode());
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+
+ if ((obj == null) || (obj.getClass() != this.getClass()))
+ return false;
+
+ OrFilter other = (OrFilter) obj;
+ return equalFilters(filters, other.filters);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ for (Filter filter : filters) {
+ if (builder.length() > 0) {
+ builder.append(' ');
+ }
+ builder.append(filter);
+ }
+ return builder.toString();
+ }
+
+ private boolean equalFilters(List<? extends Filter> filters1, List<? extends Filter> filters2) {
+ return (filters1 == filters2) || ((filters1 != null) && filters1.equals(filters2));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java
new file mode 100644
index 0000000..87d6288
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.search.*;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
+
+import java.util.List;
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class Queries {
+
+ /**
+ * A match all docs filter. Note, requires no caching!.
+ */
+ public final static Filter MATCH_ALL_FILTER = new MatchAllDocsFilter();
+ public final static Filter MATCH_NO_FILTER = new MatchNoDocsFilter();
+
+ public static Query newMatchAllQuery() {
+ // We don't use MatchAllDocsQuery, its slower than the one below ... (much slower)
+ // NEVER cache this XConstantScore Query it's not immutable and based on #3521
+ // some code might set a boost on this query.
+ return new XConstantScoreQuery(MATCH_ALL_FILTER);
+ }
+
+ /** Return a query that matches no document. */
+ public static Query newMatchNoDocsQuery() {
+ return new MatchNoDocsQuery();
+ }
+
+ /**
+ * Optimizes the given query and returns the optimized version of it.
+ */
+ public static Query optimizeQuery(Query q) {
+ if (q instanceof BooleanQuery) {
+ BooleanQuery booleanQuery = (BooleanQuery) q;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+ if (clauses.length == 1) {
+ BooleanClause clause = clauses[0];
+ if (clause.getOccur() == BooleanClause.Occur.MUST) {
+ Query query = clause.getQuery();
+ query.setBoost(booleanQuery.getBoost() * query.getBoost());
+ return optimizeQuery(query);
+ }
+ if (clause.getOccur() == BooleanClause.Occur.SHOULD && booleanQuery.getMinimumNumberShouldMatch() > 0) {
+ Query query = clause.getQuery();
+ query.setBoost(booleanQuery.getBoost() * query.getBoost());
+ return optimizeQuery(query);
+ }
+ }
+ }
+ return q;
+ }
+
+ public static boolean isNegativeQuery(Query q) {
+ if (!(q instanceof BooleanQuery)) {
+ return false;
+ }
+ List<BooleanClause> clauses = ((BooleanQuery) q).clauses();
+ if (clauses.isEmpty()) {
+ return false;
+ }
+ for (BooleanClause clause : clauses) {
+ if (!clause.isProhibited()) return false;
+ }
+ return true;
+ }
+
+ public static Query fixNegativeQueryIfNeeded(Query q) {
+ if (isNegativeQuery(q)) {
+ BooleanQuery newBq = (BooleanQuery) q.clone();
+ newBq.add(newMatchAllQuery(), BooleanClause.Occur.MUST);
+ return newBq;
+ }
+ return q;
+ }
+
+ public static boolean isConstantMatchAllQuery(Query query) {
+ if (query instanceof XConstantScoreQuery) {
+ XConstantScoreQuery scoreQuery = (XConstantScoreQuery) query;
+ if (scoreQuery.getFilter() instanceof MatchAllDocsFilter) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static void applyMinimumShouldMatch(BooleanQuery query, @Nullable String minimumShouldMatch) {
+ if (minimumShouldMatch == null) {
+ return;
+ }
+ int optionalClauses = 0;
+ for (BooleanClause c : query.clauses()) {
+ if (c.getOccur() == BooleanClause.Occur.SHOULD) {
+ optionalClauses++;
+ }
+ }
+
+ int msm = calculateMinShouldMatch(optionalClauses, minimumShouldMatch);
+ if (0 < msm) {
+ query.setMinimumNumberShouldMatch(msm);
+ }
+ }
+
+ private static Pattern spaceAroundLessThanPattern = Pattern.compile("(\\s+<\\s*)|(\\s*<\\s+)");
+ private static Pattern spacePattern = Pattern.compile(" ");
+ private static Pattern lessThanPattern = Pattern.compile("<");
+
+ public static int calculateMinShouldMatch(int optionalClauseCount, String spec) {
+ int result = optionalClauseCount;
+ spec = spec.trim();
+
+ if (-1 < spec.indexOf("<")) {
+ /* we have conditional spec(s) */
+ spec = spaceAroundLessThanPattern.matcher(spec).replaceAll("<");
+ for (String s : spacePattern.split(spec)) {
+ String[] parts = lessThanPattern.split(s, 0);
+ int upperBound = Integer.parseInt(parts[0]);
+ if (optionalClauseCount <= upperBound) {
+ return result;
+ } else {
+ result = calculateMinShouldMatch
+ (optionalClauseCount, parts[1]);
+ }
+ }
+ return result;
+ }
+
+ /* otherwise, simple expresion */
+
+ if (-1 < spec.indexOf('%')) {
+ /* percentage - assume the % was the last char. If not, let Integer.parseInt fail. */
+ spec = spec.substring(0, spec.length() - 1);
+ int percent = Integer.parseInt(spec);
+ float calc = (result * percent) * (1 / 100f);
+ result = calc < 0 ? result + (int) calc : (int) calc;
+ } else {
+ int calc = Integer.parseInt(spec);
+ result = calc < 0 ? result + calc : calc;
+ }
+
+ return (optionalClauseCount < result ?
+ optionalClauseCount : (result < 0 ? 0 : result));
+
+ }
+
+ public static Filter wrap(Query query) {
+ return FACTORY.wrap(query);
+ }
+
+ private static final QueryWrapperFilterFactory FACTORY = new QueryWrapperFilterFactory();
+ // NOTE: This is a separate class since we added QueryWrapperFilter as a forbidden API
+ // that way we can exclude only the inner class without excluding the entire Queries class
+ // and potentially miss a forbidden API usage!
+ private static final class QueryWrapperFilterFactory {
+
+ public Filter wrap(Query query) {
+ if (CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(query)) {
+ return new CustomQueryWrappingFilter(query);
+ } else {
+ return new QueryWrapperFilter(query);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/RegexpFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/RegexpFilter.java
new file mode 100644
index 0000000..9af6bee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/RegexpFilter.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.MultiTermQueryWrapperFilter;
+import org.apache.lucene.search.RegexpQuery;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.automaton.RegExp;
+
+import java.io.IOException;
+
+/**
+ * A lazy regexp filter which only builds the automaton on the first call to {@link #getDocIdSet(AtomicReaderContext, Bits)}.
+ * It is not thread safe (so can't be applied on multiple segments concurrently)
+ */
+public class RegexpFilter extends Filter {
+
+ private final Term term;
+ private final int flags;
+
+ // use delegation here to support efficient implementation of equals & hashcode for this
+ // filter (as it will be used as the filter cache key)
+ private final InternalFilter filter;
+
+ public RegexpFilter(Term term) {
+ this(term, RegExp.ALL);
+ }
+
+ public RegexpFilter(Term term, int flags) {
+ filter = new InternalFilter(term, flags);
+ this.term = term;
+ this.flags = flags;
+ }
+
+ public String field() {
+ return term.field();
+ }
+
+ public String regexp() {
+ return term.text();
+ }
+
+ public int flags() {
+ return flags;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ return filter.getDocIdSet(context, acceptDocs);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ org.elasticsearch.common.lucene.search.RegexpFilter that = (org.elasticsearch.common.lucene.search.RegexpFilter) o;
+
+ if (flags != that.flags) return false;
+ if (term != null ? !term.equals(that.term) : that.term != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = term != null ? term.hashCode() : 0;
+ result = 31 * result + flags;
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ // todo should we also show the flags?
+ return term.field() + ":" + term.text();
+ }
+
+ static class InternalFilter extends MultiTermQueryWrapperFilter<RegexpQuery> {
+
+ public InternalFilter(Term term) {
+ super(new RegexpQuery(term));
+ }
+
+ public InternalFilter(Term term, int flags) {
+ super(new RegexpQuery(term, flags));
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/XBooleanFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/XBooleanFilter.java
new file mode 100644
index 0000000..96984f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/XBooleanFilter.java
@@ -0,0 +1,416 @@
+package org.elasticsearch.common.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.queries.FilterClause;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.docset.AllDocIdSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.docset.NotDocIdSet;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * Similar to {@link org.apache.lucene.queries.BooleanFilter}.
+ * <p/>
+ * Our own variance mainly differs by the fact that we pass the acceptDocs down to the filters
+ * and don't filter based on them at the end. Our logic is a bit different, and we filter based on that
+ * at the top level filter chain.
+ */
+public class XBooleanFilter extends Filter implements Iterable<FilterClause> {
+
+ final List<FilterClause> clauses = new ArrayList<FilterClause>();
+
+ /**
+ * Returns the a DocIdSetIterator representing the Boolean composition
+ * of the filters that have been added.
+ */
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ FixedBitSet res = null;
+ final AtomicReader reader = context.reader();
+
+ // optimize single case...
+ if (clauses.size() == 1) {
+ FilterClause clause = clauses.get(0);
+ DocIdSet set = clause.getFilter().getDocIdSet(context, acceptDocs);
+ if (clause.getOccur() == Occur.MUST_NOT) {
+ if (DocIdSets.isEmpty(set)) {
+ return new AllDocIdSet(reader.maxDoc());
+ } else {
+ return new NotDocIdSet(set, reader.maxDoc());
+ }
+ }
+ // SHOULD or MUST, just return the set...
+ if (DocIdSets.isEmpty(set)) {
+ return null;
+ }
+ return set;
+ }
+
+ // first, go over and see if we can shortcut the execution
+ // and gather Bits if we need to
+ List<ResultClause> results = new ArrayList<ResultClause>(clauses.size());
+ boolean hasShouldClauses = false;
+ boolean hasNonEmptyShouldClause = false;
+ boolean hasMustClauses = false;
+ boolean hasMustNotClauses = false;
+ for (int i = 0; i < clauses.size(); i++) {
+ FilterClause clause = clauses.get(i);
+ DocIdSet set = clause.getFilter().getDocIdSet(context, acceptDocs);
+ if (clause.getOccur() == Occur.MUST) {
+ hasMustClauses = true;
+ if (DocIdSets.isEmpty(set)) {
+ return null;
+ }
+ } else if (clause.getOccur() == Occur.SHOULD) {
+ hasShouldClauses = true;
+ if (DocIdSets.isEmpty(set)) {
+ continue;
+ }
+ hasNonEmptyShouldClause = true;
+ } else if (clause.getOccur() == Occur.MUST_NOT) {
+ hasMustNotClauses = true;
+ if (DocIdSets.isEmpty(set)) {
+ // we mark empty ones as null for must_not, handle it in the next run...
+ results.add(new ResultClause(null, null, clause));
+ continue;
+ }
+ }
+ Bits bits = null;
+ if (!DocIdSets.isFastIterator(set)) {
+ bits = set.bits();
+ }
+ results.add(new ResultClause(set, bits, clause));
+ }
+
+ if (hasShouldClauses && !hasNonEmptyShouldClause) {
+ return null;
+ }
+
+ // now, go over the clauses and apply the "fast" ones first...
+ hasNonEmptyShouldClause = false;
+ boolean hasBits = false;
+ // But first we need to handle the "fast" should clauses, otherwise a should clause can unset docs
+ // that don't match with a must or must_not clause.
+ List<ResultClause> fastOrClauses = new ArrayList<ResultClause>();
+ for (int i = 0; i < results.size(); i++) {
+ ResultClause clause = results.get(i);
+ // we apply bits in based ones (slow) in the second run
+ if (clause.bits != null) {
+ hasBits = true;
+ continue;
+ }
+ if (clause.clause.getOccur() == Occur.SHOULD) {
+ if (hasMustClauses || hasMustNotClauses) {
+ fastOrClauses.add(clause);
+ } else if (res == null) {
+ DocIdSetIterator it = clause.docIdSet.iterator();
+ if (it != null) {
+ hasNonEmptyShouldClause = true;
+ res = new FixedBitSet(reader.maxDoc());
+ res.or(it);
+ }
+ } else {
+ DocIdSetIterator it = clause.docIdSet.iterator();
+ if (it != null) {
+ hasNonEmptyShouldClause = true;
+ res.or(it);
+ }
+ }
+ }
+ }
+
+ // Now we safely handle the "fast" must and must_not clauses.
+ for (int i = 0; i < results.size(); i++) {
+ ResultClause clause = results.get(i);
+ // we apply bits in based ones (slow) in the second run
+ if (clause.bits != null) {
+ hasBits = true;
+ continue;
+ }
+ if (clause.clause.getOccur() == Occur.MUST) {
+ DocIdSetIterator it = clause.docIdSet.iterator();
+ if (it == null) {
+ return null;
+ }
+ if (res == null) {
+ res = new FixedBitSet(reader.maxDoc());
+ res.or(it);
+ } else {
+ res.and(it);
+ }
+ } else if (clause.clause.getOccur() == Occur.MUST_NOT) {
+ if (res == null) {
+ res = new FixedBitSet(reader.maxDoc());
+ res.set(0, reader.maxDoc()); // NOTE: may set bits on deleted docs
+ }
+ if (clause.docIdSet != null) {
+ DocIdSetIterator it = clause.docIdSet.iterator();
+ if (it != null) {
+ res.andNot(it);
+ }
+ }
+ }
+ }
+
+ if (!hasBits) {
+ if (!fastOrClauses.isEmpty()) {
+ DocIdSetIterator it = res.iterator();
+ at_least_one_should_clause_iter:
+ for (int setDoc = it.nextDoc(); setDoc != DocIdSetIterator.NO_MORE_DOCS; setDoc = it.nextDoc()) {
+ for (ResultClause fastOrClause : fastOrClauses) {
+ DocIdSetIterator clauseIterator = fastOrClause.iterator();
+ if (clauseIterator == null) {
+ continue;
+ }
+ if (iteratorMatch(clauseIterator, setDoc)) {
+ hasNonEmptyShouldClause = true;
+ continue at_least_one_should_clause_iter;
+ }
+ }
+ res.clear(setDoc);
+ }
+ }
+
+ if (hasShouldClauses && !hasNonEmptyShouldClause) {
+ return null;
+ } else {
+ return res;
+ }
+ }
+
+ // we have some clauses with bits, apply them...
+ // we let the "res" drive the computation, and check Bits for that
+ List<ResultClause> slowOrClauses = new ArrayList<ResultClause>();
+ for (int i = 0; i < results.size(); i++) {
+ ResultClause clause = results.get(i);
+ if (clause.bits == null) {
+ continue;
+ }
+ if (clause.clause.getOccur() == Occur.SHOULD) {
+ if (hasMustClauses || hasMustNotClauses) {
+ slowOrClauses.add(clause);
+ } else {
+ if (res == null) {
+ DocIdSetIterator it = clause.docIdSet.iterator();
+ if (it == null) {
+ continue;
+ }
+ hasNonEmptyShouldClause = true;
+ res = new FixedBitSet(reader.maxDoc());
+ res.or(it);
+ } else {
+ for (int doc = 0; doc < reader.maxDoc(); doc++) {
+ if (!res.get(doc) && clause.bits.get(doc)) {
+ hasNonEmptyShouldClause = true;
+ res.set(doc);
+ }
+ }
+ }
+ }
+ } else if (clause.clause.getOccur() == Occur.MUST) {
+ if (res == null) {
+ // nothing we can do, just or it...
+ res = new FixedBitSet(reader.maxDoc());
+ DocIdSetIterator it = clause.docIdSet.iterator();
+ if (it == null) {
+ return null;
+ }
+ res.or(it);
+ } else {
+ Bits bits = clause.bits;
+ // use the "res" to drive the iteration
+ DocIdSetIterator it = res.iterator();
+ for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
+ if (!bits.get(doc)) {
+ res.clear(doc);
+ }
+ }
+ }
+ } else if (clause.clause.getOccur() == Occur.MUST_NOT) {
+ if (res == null) {
+ res = new FixedBitSet(reader.maxDoc());
+ res.set(0, reader.maxDoc()); // NOTE: may set bits on deleted docs
+ DocIdSetIterator it = clause.docIdSet.iterator();
+ if (it != null) {
+ res.andNot(it);
+ }
+ } else {
+ Bits bits = clause.bits;
+ // let res drive the iteration
+ DocIdSetIterator it = res.iterator();
+ for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
+ if (bits.get(doc)) {
+ res.clear(doc);
+ }
+ }
+ }
+ }
+ }
+
+ // From a boolean_logic behavior point of view a should clause doesn't have impact on a bool filter if there
+ // is already a must or must_not clause. However in the current ES bool filter behaviour at least one should
+ // clause must match in order for a doc to be a match. What we do here is checking if matched docs match with
+ // any should filter. TODO: Add an option to have disable minimum_should_match=1 behaviour
+ if (!slowOrClauses.isEmpty() || !fastOrClauses.isEmpty()) {
+ DocIdSetIterator it = res.iterator();
+ at_least_one_should_clause_iter:
+ for (int setDoc = it.nextDoc(); setDoc != DocIdSetIterator.NO_MORE_DOCS; setDoc = it.nextDoc()) {
+ for (ResultClause fastOrClause : fastOrClauses) {
+ DocIdSetIterator clauseIterator = fastOrClause.iterator();
+ if (clauseIterator == null) {
+ continue;
+ }
+ if (iteratorMatch(clauseIterator, setDoc)) {
+ hasNonEmptyShouldClause = true;
+ continue at_least_one_should_clause_iter;
+ }
+ }
+ for (ResultClause slowOrClause : slowOrClauses) {
+ if (slowOrClause.bits.get(setDoc)) {
+ hasNonEmptyShouldClause = true;
+ continue at_least_one_should_clause_iter;
+ }
+ }
+ res.clear(setDoc);
+ }
+ }
+
+ if (hasShouldClauses && !hasNonEmptyShouldClause) {
+ return null;
+ } else {
+ return res;
+ }
+
+ }
+
+ /**
+ * Adds a new FilterClause to the Boolean Filter container
+ *
+ * @param filterClause A FilterClause object containing a Filter and an Occur parameter
+ */
+ public void add(FilterClause filterClause) {
+ clauses.add(filterClause);
+ }
+
+ public final void add(Filter filter, Occur occur) {
+ add(new FilterClause(filter, occur));
+ }
+
+ /**
+ * Returns the list of clauses
+ */
+ public List<FilterClause> clauses() {
+ return clauses;
+ }
+
+ /**
+ * Returns an iterator on the clauses in this query. It implements the {@link Iterable} interface to
+ * make it possible to do:
+ * <pre class="prettyprint">for (FilterClause clause : booleanFilter) {}</pre>
+ */
+ public final Iterator<FilterClause> iterator() {
+ return clauses().iterator();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+
+ if ((obj == null) || (obj.getClass() != this.getClass())) {
+ return false;
+ }
+
+ final XBooleanFilter other = (XBooleanFilter) obj;
+ return clauses.equals(other.clauses);
+ }
+
+ @Override
+ public int hashCode() {
+ return 657153718 ^ clauses.hashCode();
+ }
+
+ /**
+ * Prints a user-readable version of this Filter.
+ */
+ @Override
+ public String toString() {
+ final StringBuilder buffer = new StringBuilder("BooleanFilter(");
+ final int minLen = buffer.length();
+ for (final FilterClause c : clauses) {
+ if (buffer.length() > minLen) {
+ buffer.append(' ');
+ }
+ buffer.append(c);
+ }
+ return buffer.append(')').toString();
+ }
+
+ static class ResultClause {
+
+ public final DocIdSet docIdSet;
+ public final Bits bits;
+ public final FilterClause clause;
+
+ DocIdSetIterator docIdSetIterator;
+
+ ResultClause(DocIdSet docIdSet, Bits bits, FilterClause clause) {
+ this.docIdSet = docIdSet;
+ this.bits = bits;
+ this.clause = clause;
+ }
+
+ /**
+ * @return An iterator, but caches it for subsequent usage. Don't use if iterator is consumed in one invocation.
+ */
+ DocIdSetIterator iterator() throws IOException {
+ if (docIdSetIterator != null) {
+ return docIdSetIterator;
+ } else {
+ return docIdSetIterator = docIdSet.iterator();
+ }
+ }
+
+ }
+
+ static boolean iteratorMatch(DocIdSetIterator docIdSetIterator, int target) throws IOException {
+ assert docIdSetIterator != null;
+ int current = docIdSetIterator.docID();
+ if (current == DocIdSetIterator.NO_MORE_DOCS || target < current) {
+ return false;
+ } else {
+ if (current == target) {
+ return true;
+ } else {
+ return docIdSetIterator.advance(target) == target;
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/XCollector.java b/src/main/java/org/elasticsearch/common/lucene/search/XCollector.java
new file mode 100644
index 0000000..942a8a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/XCollector.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.search.Collector;
+
+/**
+ * An extension to {@link Collector} that allows for a callback when
+ * collection is done.
+ */
+public abstract class XCollector extends Collector {
+
+ public void postCollection() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/XConstantScoreQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/XConstantScoreQuery.java
new file mode 100644
index 0000000..4d04108
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/XConstantScoreQuery.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Filter;
+
+/**
+ * We still need sometimes to exclude deletes, because we don't remove them always with acceptDocs on filters
+ */
+public class XConstantScoreQuery extends ConstantScoreQuery {
+
+ private final Filter actualFilter;
+
+ public XConstantScoreQuery(Filter filter) {
+ super(new ApplyAcceptedDocsFilter(filter));
+ this.actualFilter = filter;
+ }
+
+ // trick so any external systems still think that its the actual filter we use, and not the
+ // deleted filter
+ @Override
+ public Filter getFilter() {
+ return this.actualFilter;
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/XFilteredQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/XFilteredQuery.java
new file mode 100644
index 0000000..f80982a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/XFilteredQuery.java
@@ -0,0 +1,261 @@
+package org.elasticsearch.common.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.FilteredQuery.FilterStrategy;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+
+import java.io.IOException;
+import java.util.Set;
+
+
+/**
+ * A query that applies a filter to the results of another query.
+ * <p/>
+ * <p>Note: the bits are retrieved from the filter each time this
+ * query is used in a search - use a CachingWrapperFilter to avoid
+ * regenerating the bits every time.
+ *
+ * @see CachingWrapperFilter
+ * @since 1.4
+ */
+// Changes are marked with //CHANGE:
+// Delegate to FilteredQuery - this version fixes the bug in LUCENE-4705 and uses ApplyAcceptedDocsFilter internally
+public final class XFilteredQuery extends Query {
+ private final Filter rawFilter;
+ private final FilteredQuery delegate;
+ private final FilterStrategy strategy;
+
+ /**
+ * Constructs a new query which applies a filter to the results of the original query.
+ * {@link Filter#getDocIdSet} will be called every time this query is used in a search.
+ *
+ * @param query Query to be filtered, cannot be <code>null</code>.
+ * @param filter Filter to apply to query results, cannot be <code>null</code>.
+ */
+ public XFilteredQuery(Query query, Filter filter) {
+ this(query, filter, FilteredQuery.RANDOM_ACCESS_FILTER_STRATEGY);
+ }
+
+ /**
+ * Expert: Constructs a new query which applies a filter to the results of the original query.
+ * {@link Filter#getDocIdSet} will be called every time this query is used in a search.
+ *
+ * @param query Query to be filtered, cannot be <code>null</code>.
+ * @param filter Filter to apply to query results, cannot be <code>null</code>.
+ * @param strategy a filter strategy used to create a filtered scorer.
+ * @see FilterStrategy
+ */
+ public XFilteredQuery(Query query, Filter filter, FilterStrategy strategy) {
+ this(new FilteredQuery(query, new ApplyAcceptedDocsFilter(filter), strategy), filter, strategy);
+ }
+
+ private XFilteredQuery(FilteredQuery delegate, Filter filter, FilterStrategy strategy) {
+ this.delegate = delegate;
+ // CHANGE: we need to wrap it in post application of accepted docs
+ this.rawFilter = filter;
+ this.strategy = strategy;
+ }
+
+ /**
+ * Returns a Weight that applies the filter to the enclosed query's Weight.
+ * This is accomplished by overriding the Scorer returned by the Weight.
+ */
+ @Override
+ public Weight createWeight(final IndexSearcher searcher) throws IOException {
+ return delegate.createWeight(searcher);
+ }
+
+ /**
+ * Rewrites the query. If the wrapped is an instance of
+ * {@link MatchAllDocsQuery} it returns a {@link ConstantScoreQuery}. Otherwise
+ * it returns a new {@code FilteredQuery} wrapping the rewritten query.
+ */
+ @Override
+ public Query rewrite(IndexReader reader) throws IOException {
+ Query query = delegate.getQuery();
+ final Query queryRewritten = query.rewrite(reader);
+
+ // CHANGE: if we push back to Lucene, would love to have an extension for "isMatchAllQuery"
+ if (queryRewritten instanceof MatchAllDocsQuery || Queries.isConstantMatchAllQuery(queryRewritten)) {
+ // Special case: If the query is a MatchAllDocsQuery, we only
+ // return a CSQ(filter).
+ final Query rewritten = new ConstantScoreQuery(delegate.getFilter());
+ // Combine boost of MatchAllDocsQuery and the wrapped rewritten query:
+ rewritten.setBoost(delegate.getBoost() * queryRewritten.getBoost());
+ return rewritten;
+ }
+
+ if (queryRewritten != query) {
+ // rewrite to a new FilteredQuery wrapping the rewritten query
+ final Query rewritten = new XFilteredQuery(queryRewritten, rawFilter, strategy);
+ rewritten.setBoost(delegate.getBoost());
+ return rewritten;
+ } else {
+ // nothing to rewrite, we are done!
+ return this;
+ }
+ }
+
+ @Override
+ public void setBoost(float b) {
+ delegate.setBoost(b);
+ }
+
+ @Override
+ public float getBoost() {
+ return delegate.getBoost();
+ }
+
+ /**
+ * Returns this FilteredQuery's (unfiltered) Query
+ */
+ public final Query getQuery() {
+ return delegate.getQuery();
+ }
+
+ /**
+ * Returns this FilteredQuery's filter
+ */
+ public final Filter getFilter() {
+ // CHANGE: unwrap the accepted docs filter
+ if (rawFilter instanceof ApplyAcceptedDocsFilter) {
+ return ((ApplyAcceptedDocsFilter) rawFilter).filter();
+ }
+ return rawFilter;
+ }
+
+ // inherit javadoc
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ delegate.extractTerms(terms);
+ }
+
+ /**
+ * Prints a user-readable version of this query.
+ */
+ @Override
+ public String toString(String s) {
+ return delegate.toString(s);
+ }
+
+ /**
+ * Returns true iff <code>o</code> is equal to this.
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof XFilteredQuery)) {
+ return false;
+ } else {
+ return delegate.equals(((XFilteredQuery)o).delegate);
+ }
+ }
+
+ /**
+ * Returns a hash code value for this object.
+ */
+ @Override
+ public int hashCode() {
+ return delegate.hashCode();
+ }
+
+ // CHANGE: Add custom random access strategy, allowing to set the threshold
+ // CHANGE: Add filter first filter strategy
+ public static final FilterStrategy ALWAYS_RANDOM_ACCESS_FILTER_STRATEGY = new CustomRandomAccessFilterStrategy(0);
+
+ public static final CustomRandomAccessFilterStrategy CUSTOM_FILTER_STRATEGY = new CustomRandomAccessFilterStrategy();
+
+ /**
+ * Extends {@link org.apache.lucene.search.FilteredQuery.RandomAccessFilterStrategy}.
+ * <p/>
+ * Adds a threshold value, which defaults to -1. When set to -1, it will check if the filter docSet is
+ * *not* a fast docSet, and if not, it will use {@link FilteredQuery#QUERY_FIRST_FILTER_STRATEGY} (since
+ * the assumption is that its a "slow" filter and better computed only on whatever matched the query).
+ * <p/>
+ * If the threshold value is 0, it always tries to pass "down" the filter as acceptDocs, and it the filter
+ * can't be represented as Bits (never really), then it uses {@link FilteredQuery#LEAP_FROG_QUERY_FIRST_STRATEGY}.
+ * <p/>
+ * If the above conditions are not met, then it reverts to the {@link FilteredQuery.RandomAccessFilterStrategy} logic,
+ * with the threshold used to control {@link #useRandomAccess(org.apache.lucene.util.Bits, int)}.
+ */
+ public static class CustomRandomAccessFilterStrategy extends FilteredQuery.RandomAccessFilterStrategy {
+
+ private final int threshold;
+
+ public CustomRandomAccessFilterStrategy() {
+ this.threshold = -1;
+ }
+
+ public CustomRandomAccessFilterStrategy(int threshold) {
+ this.threshold = threshold;
+ }
+
+ @Override
+ public Scorer filteredScorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Weight weight, DocIdSet docIdSet) throws IOException {
+ // CHANGE: If threshold is 0, always pass down the accept docs, don't pay the price of calling nextDoc even...
+ if (threshold == 0) {
+ final Bits filterAcceptDocs = docIdSet.bits();
+ if (filterAcceptDocs != null) {
+ return weight.scorer(context, scoreDocsInOrder, topScorer, filterAcceptDocs);
+ } else {
+ return FilteredQuery.LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer(context, scoreDocsInOrder, topScorer, weight, docIdSet);
+ }
+ }
+
+ // CHANGE: handle "default" value
+ if (threshold == -1) {
+ // default value, don't iterate on only apply filter after query if its not a "fast" docIdSet
+ if (!DocIdSets.isFastIterator(docIdSet)) {
+ return FilteredQuery.QUERY_FIRST_FILTER_STRATEGY.filteredScorer(context, scoreDocsInOrder, topScorer, weight, docIdSet);
+ }
+ }
+
+ return super.filteredScorer(context, scoreDocsInOrder, topScorer, weight, docIdSet);
+ }
+
+ /**
+ * Expert: decides if a filter should be executed as "random-access" or not.
+ * random-access means the filter "filters" in a similar way as deleted docs are filtered
+ * in Lucene. This is faster when the filter accepts many documents.
+ * However, when the filter is very sparse, it can be faster to execute the query+filter
+ * as a conjunction in some cases.
+ * <p/>
+ * The default implementation returns <code>true</code> if the first document accepted by the
+ * filter is < threshold, if threshold is -1 (the default), then it checks for < 100.
+ */
+ protected boolean useRandomAccess(Bits bits, int firstFilterDoc) {
+ // "default"
+ if (threshold == -1) {
+ return firstFilterDoc < 100;
+ }
+ //TODO once we have a cost API on filters and scorers we should rethink this heuristic
+ return firstFilterDoc < threshold;
+ }
+ }
+
+ @Override
+ public Query clone() {
+ return new XFilteredQuery((FilteredQuery) delegate.clone(), rawFilter, strategy);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java
new file mode 100644
index 0000000..43f7aad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search.function;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Explanation;
+
+/**
+ *
+ */
+public class BoostScoreFunction extends ScoreFunction {
+
+ private final float boost;
+
+ public BoostScoreFunction(float boost) {
+ super(CombineFunction.MULT);
+ this.boost = boost;
+ }
+
+ public float getBoost() {
+ return boost;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) {
+ // nothing to do here...
+ }
+
+ @Override
+ public double score(int docId, float subQueryScore) {
+ return boost;
+ }
+
+ @Override
+ public Explanation explainScore(int docId, Explanation subQueryExpl) {
+ Explanation exp = new Explanation(boost, "static boost factor");
+ exp.addDetail(new Explanation(boost, "boostFactor"));
+ return exp;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o)
+ return true;
+ if (o == null || getClass() != o.getClass())
+ return false;
+
+ BoostScoreFunction that = (BoostScoreFunction) o;
+
+ if (Float.compare(that.boost, boost) != 0)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return (boost != +0.0f ? Float.floatToIntBits(boost) : 0);
+ }
+
+ @Override
+ public String toString() {
+ return "boost[" + boost + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java
new file mode 100644
index 0000000..d5455fa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search.function;
+
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+
+public enum CombineFunction {
+ MULT {
+ @Override
+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {
+ return toFloat(queryBoost * queryScore * Math.min(funcScore, maxBoost));
+ }
+
+ @Override
+ public String getName() {
+ return "multiply";
+ }
+
+ @Override
+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {
+ float score = queryBoost * Math.min(funcExpl.getValue(), maxBoost) * queryExpl.getValue();
+ ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:");
+ res.addDetail(queryExpl);
+ ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of");
+ minExpl.addDetail(funcExpl);
+ minExpl.addDetail(new Explanation(maxBoost, "maxBoost"));
+ res.addDetail(minExpl);
+ res.addDetail(new Explanation(queryBoost, "queryBoost"));
+ return res;
+ }
+ },
+ REPLACE {
+ @Override
+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {
+ return toFloat(queryBoost * Math.min(funcScore, maxBoost));
+ }
+
+ @Override
+ public String getName() {
+ return "replace";
+ }
+
+ @Override
+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {
+ float score = queryBoost * Math.min(funcExpl.getValue(), maxBoost);
+ ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:");
+ ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of");
+ minExpl.addDetail(funcExpl);
+ minExpl.addDetail(new Explanation(maxBoost, "maxBoost"));
+ res.addDetail(minExpl);
+ res.addDetail(new Explanation(queryBoost, "queryBoost"));
+ return res;
+ }
+
+ },
+ SUM {
+ @Override
+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {
+ return toFloat(queryBoost * (queryScore + Math.min(funcScore, maxBoost)));
+ }
+
+ @Override
+ public String getName() {
+ return "sum";
+ }
+
+ @Override
+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {
+ float score = queryBoost * (Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue());
+ ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:");
+ ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of");
+ minExpl.addDetail(funcExpl);
+ minExpl.addDetail(new Explanation(maxBoost, "maxBoost"));
+ ComplexExplanation sumExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue(),
+ "sum of");
+ sumExpl.addDetail(queryExpl);
+ sumExpl.addDetail(minExpl);
+ res.addDetail(sumExpl);
+ res.addDetail(new Explanation(queryBoost, "queryBoost"));
+ return res;
+ }
+
+ },
+ AVG {
+ @Override
+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {
+ return toFloat((queryBoost * (Math.min(funcScore, maxBoost) + queryScore) / 2.0));
+ }
+
+ @Override
+ public String getName() {
+ return "avg";
+ }
+
+ @Override
+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {
+ float score = toFloat(queryBoost * (queryExpl.getValue() + Math.min(funcExpl.getValue(), maxBoost)) / 2.0);
+ ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:");
+ ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of");
+ minExpl.addDetail(funcExpl);
+ minExpl.addDetail(new Explanation(maxBoost, "maxBoost"));
+ ComplexExplanation avgExpl = new ComplexExplanation(true,
+ toFloat((Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()) / 2.0), "avg of");
+ avgExpl.addDetail(queryExpl);
+ avgExpl.addDetail(minExpl);
+ res.addDetail(avgExpl);
+ res.addDetail(new Explanation(queryBoost, "queryBoost"));
+ return res;
+ }
+
+ },
+ MIN {
+ @Override
+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {
+ return toFloat(queryBoost * Math.min(queryScore, Math.min(funcScore, maxBoost)));
+ }
+
+ @Override
+ public String getName() {
+ return "min";
+ }
+
+ @Override
+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {
+ float score = toFloat(queryBoost * Math.min(queryExpl.getValue(), Math.min(funcExpl.getValue(), maxBoost)));
+ ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:");
+ ComplexExplanation innerMinExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of");
+ innerMinExpl.addDetail(funcExpl);
+ innerMinExpl.addDetail(new Explanation(maxBoost, "maxBoost"));
+ ComplexExplanation outerMinExpl = new ComplexExplanation(true, Math.min(Math.min(funcExpl.getValue(), maxBoost),
+ queryExpl.getValue()), "min of");
+ outerMinExpl.addDetail(queryExpl);
+ outerMinExpl.addDetail(innerMinExpl);
+ res.addDetail(outerMinExpl);
+ res.addDetail(new Explanation(queryBoost, "queryBoost"));
+ return res;
+ }
+
+ },
+ MAX {
+ @Override
+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {
+ return toFloat(queryBoost * (Math.max(queryScore, Math.min(funcScore, maxBoost))));
+ }
+
+ @Override
+ public String getName() {
+ return "max";
+ }
+
+ @Override
+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {
+ float score = toFloat(queryBoost * Math.max(queryExpl.getValue(), Math.min(funcExpl.getValue(), maxBoost)));
+ ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:");
+ ComplexExplanation innerMinExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of");
+ innerMinExpl.addDetail(funcExpl);
+ innerMinExpl.addDetail(new Explanation(maxBoost, "maxBoost"));
+ ComplexExplanation outerMaxExpl = new ComplexExplanation(true, Math.max(Math.min(funcExpl.getValue(), maxBoost),
+ queryExpl.getValue()), "max of");
+ outerMaxExpl.addDetail(queryExpl);
+ outerMaxExpl.addDetail(innerMinExpl);
+ res.addDetail(outerMaxExpl);
+ res.addDetail(new Explanation(queryBoost, "queryBoost"));
+ return res;
+ }
+
+ };
+
+ public abstract float combine(double queryBoost, double queryScore, double funcScore, double maxBoost);
+
+ public abstract String getName();
+
+ public static float toFloat(double input) {
+ assert deviation(input) <= 0.001 : "input " + input + " out of float scope for function score deviation: " + deviation(input);
+ return (float) input;
+ }
+
+ private static double deviation(double input) { // only with assert!
+ float floatVersion = (float) input;
+ return Double.compare(floatVersion, input) == 0 || input == 0.0d ? 0 : 1.d - (floatVersion) / input;
+ }
+
+ public abstract ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost);
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java
new file mode 100644
index 0000000..ba5764f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java
@@ -0,0 +1,379 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search.function;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * A query that allows for a pluggable boost function / filter. If it matches
+ * the filter, it will be boosted by the formula.
+ */
+public class FiltersFunctionScoreQuery extends Query {
+
+ public static class FilterFunction {
+ public final Filter filter;
+ public final ScoreFunction function;
+
+ public FilterFunction(Filter filter, ScoreFunction function) {
+ this.filter = filter;
+ this.function = function;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o)
+ return true;
+ if (o == null || getClass() != o.getClass())
+ return false;
+
+ FilterFunction that = (FilterFunction) o;
+
+ if (filter != null ? !filter.equals(that.filter) : that.filter != null)
+ return false;
+ if (function != null ? !function.equals(that.function) : that.function != null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = filter != null ? filter.hashCode() : 0;
+ result = 31 * result + (function != null ? function.hashCode() : 0);
+ return result;
+ }
+ }
+
+ public static enum ScoreMode {
+ First, Avg, Max, Sum, Min, Multiply
+ }
+
+ Query subQuery;
+ final FilterFunction[] filterFunctions;
+ final ScoreMode scoreMode;
+ final float maxBoost;
+
+ protected CombineFunction combineFunction;
+
+ public FiltersFunctionScoreQuery(Query subQuery, ScoreMode scoreMode, FilterFunction[] filterFunctions, float maxBoost) {
+ this.subQuery = subQuery;
+ this.scoreMode = scoreMode;
+ this.filterFunctions = filterFunctions;
+ this.maxBoost = maxBoost;
+ combineFunction = CombineFunction.MULT;
+ }
+
+ public FiltersFunctionScoreQuery setCombineFunction(CombineFunction combineFunction) {
+ this.combineFunction = combineFunction;
+ return this;
+ }
+
+ public Query getSubQuery() {
+ return subQuery;
+ }
+
+ public FilterFunction[] getFilterFunctions() {
+ return filterFunctions;
+ }
+
+ @Override
+ public Query rewrite(IndexReader reader) throws IOException {
+ Query newQ = subQuery.rewrite(reader);
+ if (newQ == subQuery)
+ return this;
+ FiltersFunctionScoreQuery bq = (FiltersFunctionScoreQuery) this.clone();
+ bq.subQuery = newQ;
+ return bq;
+ }
+
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ subQuery.extractTerms(terms);
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ Weight subQueryWeight = subQuery.createWeight(searcher);
+ return new CustomBoostFactorWeight(subQueryWeight, filterFunctions.length);
+ }
+
+ class CustomBoostFactorWeight extends Weight {
+
+ final Weight subQueryWeight;
+ final Bits[] docSets;
+
+ public CustomBoostFactorWeight(Weight subQueryWeight, int filterFunctionLength) throws IOException {
+ this.subQueryWeight = subQueryWeight;
+ this.docSets = new Bits[filterFunctionLength];
+ }
+
+ public Query getQuery() {
+ return FiltersFunctionScoreQuery.this;
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ float sum = subQueryWeight.getValueForNormalization();
+ sum *= getBoost() * getBoost();
+ return sum;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ subQueryWeight.normalize(norm, topLevelBoost * getBoost());
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+ // we ignore scoreDocsInOrder parameter, because we need to score in
+ // order if documents are scored with a script. The
+ // ShardLookup depends on in order scoring.
+ Scorer subQueryScorer = subQueryWeight.scorer(context, true, false, acceptDocs);
+ if (subQueryScorer == null) {
+ return null;
+ }
+ for (int i = 0; i < filterFunctions.length; i++) {
+ FilterFunction filterFunction = filterFunctions[i];
+ filterFunction.function.setNextReader(context);
+ docSets[i] = DocIdSets.toSafeBits(context.reader(), filterFunction.filter.getDocIdSet(context, acceptDocs));
+ }
+ return new CustomBoostFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, docSets, combineFunction);
+ }
+
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+
+ Explanation subQueryExpl = subQueryWeight.explain(context, doc);
+ if (!subQueryExpl.isMatch()) {
+ return subQueryExpl;
+ }
+ // First: Gather explanations for all filters
+ List<ComplexExplanation> filterExplanations = new ArrayList<ComplexExplanation>();
+ for (FilterFunction filterFunction : filterFunctions) {
+ Bits docSet = DocIdSets.toSafeBits(context.reader(),
+ filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs()));
+ if (docSet.get(doc)) {
+ filterFunction.function.setNextReader(context);
+ Explanation functionExplanation = filterFunction.function.explainScore(doc, subQueryExpl);
+ double factor = functionExplanation.getValue();
+ float sc = CombineFunction.toFloat(factor);
+ ComplexExplanation filterExplanation = new ComplexExplanation(true, sc, "function score, product of:");
+ filterExplanation.addDetail(new Explanation(1.0f, "match filter: " + filterFunction.filter.toString()));
+ filterExplanation.addDetail(functionExplanation);
+ filterExplanations.add(filterExplanation);
+ }
+ }
+ if (filterExplanations.size() == 0) {
+ float sc = getBoost() * subQueryExpl.getValue();
+ Explanation res = new ComplexExplanation(true, sc, "function score, no filter match, product of:");
+ res.addDetail(subQueryExpl);
+ res.addDetail(new Explanation(getBoost(), "queryBoost"));
+ return res;
+ }
+
+ // Second: Compute the factor that would have been computed by the
+ // filters
+ double factor = 1.0;
+ switch (scoreMode) {
+ case First:
+
+ factor = filterExplanations.get(0).getValue();
+ break;
+ case Max:
+ factor = Double.NEGATIVE_INFINITY;
+ for (int i = 0; i < filterExplanations.size(); i++) {
+ factor = Math.max(filterExplanations.get(i).getValue(), factor);
+ }
+ break;
+ case Min:
+ factor = Double.POSITIVE_INFINITY;
+ for (int i = 0; i < filterExplanations.size(); i++) {
+ factor = Math.min(filterExplanations.get(i).getValue(), factor);
+ }
+ break;
+ case Multiply:
+ for (int i = 0; i < filterExplanations.size(); i++) {
+ factor *= filterExplanations.get(i).getValue();
+ }
+ break;
+ default: // Avg / Total
+ double totalFactor = 0.0f;
+ int count = 0;
+ for (int i = 0; i < filterExplanations.size(); i++) {
+ totalFactor += filterExplanations.get(i).getValue();
+ count++;
+ }
+ if (count != 0) {
+ factor = totalFactor;
+ if (scoreMode == ScoreMode.Avg) {
+ factor /= count;
+ }
+ }
+ }
+ ComplexExplanation factorExplanaition = new ComplexExplanation(true, CombineFunction.toFloat(factor),
+ "function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]");
+ for (int i = 0; i < filterExplanations.size(); i++) {
+ factorExplanaition.addDetail(filterExplanations.get(i));
+ }
+ return combineFunction.explain(getBoost(), subQueryExpl, factorExplanaition, maxBoost);
+ }
+ }
+
+ static class CustomBoostFactorScorer extends Scorer {
+
+ private final float subQueryBoost;
+ private final Scorer scorer;
+ private final FilterFunction[] filterFunctions;
+ private final ScoreMode scoreMode;
+ private final float maxBoost;
+ private final Bits[] docSets;
+ private final CombineFunction scoreCombiner;
+
+ private CustomBoostFactorScorer(CustomBoostFactorWeight w, Scorer scorer, ScoreMode scoreMode, FilterFunction[] filterFunctions,
+ float maxBoost, Bits[] docSets, CombineFunction scoreCombiner) throws IOException {
+ super(w);
+ this.subQueryBoost = w.getQuery().getBoost();
+ this.scorer = scorer;
+ this.scoreMode = scoreMode;
+ this.filterFunctions = filterFunctions;
+ this.maxBoost = maxBoost;
+ this.docSets = docSets;
+ this.scoreCombiner = scoreCombiner;
+ }
+
+ @Override
+ public int docID() {
+ return scorer.docID();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ return scorer.advance(target);
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return scorer.nextDoc();
+ }
+
+ @Override
+ public float score() throws IOException {
+ int docId = scorer.docID();
+ double factor = 1.0f;
+ float subQueryScore = scorer.score();
+ if (scoreMode == ScoreMode.First) {
+ for (int i = 0; i < filterFunctions.length; i++) {
+ if (docSets[i].get(docId)) {
+ factor = filterFunctions[i].function.score(docId, subQueryScore);
+ break;
+ }
+ }
+ } else if (scoreMode == ScoreMode.Max) {
+ double maxFactor = Double.NEGATIVE_INFINITY;
+ for (int i = 0; i < filterFunctions.length; i++) {
+ if (docSets[i].get(docId)) {
+ maxFactor = Math.max(filterFunctions[i].function.score(docId, subQueryScore), maxFactor);
+ }
+ }
+ if (maxFactor != Float.NEGATIVE_INFINITY) {
+ factor = maxFactor;
+ }
+ } else if (scoreMode == ScoreMode.Min) {
+ double minFactor = Double.POSITIVE_INFINITY;
+ for (int i = 0; i < filterFunctions.length; i++) {
+ if (docSets[i].get(docId)) {
+ minFactor = Math.min(filterFunctions[i].function.score(docId, subQueryScore), minFactor);
+ }
+ }
+ if (minFactor != Float.POSITIVE_INFINITY) {
+ factor = minFactor;
+ }
+ } else if (scoreMode == ScoreMode.Multiply) {
+ for (int i = 0; i < filterFunctions.length; i++) {
+ if (docSets[i].get(docId)) {
+ factor *= filterFunctions[i].function.score(docId, subQueryScore);
+ }
+ }
+ } else { // Avg / Total
+ double totalFactor = 0.0f;
+ int count = 0;
+ for (int i = 0; i < filterFunctions.length; i++) {
+ if (docSets[i].get(docId)) {
+ totalFactor += filterFunctions[i].function.score(docId, subQueryScore);
+ count++;
+ }
+ }
+ if (count != 0) {
+ factor = totalFactor;
+ if (scoreMode == ScoreMode.Avg) {
+ factor /= count;
+ }
+ }
+ }
+ return scoreCombiner.combine(subQueryBoost, subQueryScore, factor, maxBoost);
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return scorer.freq();
+ }
+
+ @Override
+ public long cost() {
+ return scorer.cost();
+ }
+ }
+
+ public String toString(String field) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("function score (").append(subQuery.toString(field)).append(", functions: [");
+ for (FilterFunction filterFunction : filterFunctions) {
+ sb.append("{filter(").append(filterFunction.filter).append("), function [").append(filterFunction.function).append("]}");
+ }
+ sb.append("])");
+ sb.append(ToStringUtils.boost(getBoost()));
+ return sb.toString();
+ }
+
+ public boolean equals(Object o) {
+ if (o == null || getClass() != o.getClass())
+ return false;
+ FiltersFunctionScoreQuery other = (FiltersFunctionScoreQuery) o;
+ if (this.getBoost() != other.getBoost())
+ return false;
+ if (!this.subQuery.equals(other.subQuery)) {
+ return false;
+ }
+ return Arrays.equals(this.filterFunctions, other.filterFunctions);
+ }
+
+ public int hashCode() {
+ return subQuery.hashCode() + 31 * Arrays.hashCode(filterFunctions) ^ Float.floatToIntBits(getBoost());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java
new file mode 100644
index 0000000..1479bc8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search.function;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A query that allows for a pluggable boost function to be applied to it.
+ */
+public class FunctionScoreQuery extends Query {
+
+ Query subQuery;
+ final ScoreFunction function;
+ float maxBoost = Float.MAX_VALUE;
+ CombineFunction combineFunction;
+
+ public FunctionScoreQuery(Query subQuery, ScoreFunction function) {
+ this.subQuery = subQuery;
+ this.function = function;
+ this.combineFunction = function.getDefaultScoreCombiner();
+ }
+
+ public void setCombineFunction(CombineFunction combineFunction) {
+ this.combineFunction = combineFunction;
+ }
+
+ public void setMaxBoost(float maxBoost) {
+ this.maxBoost = maxBoost;
+ }
+
+ public float getMaxBoost() {
+ return this.maxBoost;
+ }
+
+ public Query getSubQuery() {
+ return subQuery;
+ }
+
+ public ScoreFunction getFunction() {
+ return function;
+ }
+
+ @Override
+ public Query rewrite(IndexReader reader) throws IOException {
+ Query newQ = subQuery.rewrite(reader);
+ if (newQ == subQuery) {
+ return this;
+ }
+ FunctionScoreQuery bq = (FunctionScoreQuery) this.clone();
+ bq.subQuery = newQ;
+ return bq;
+ }
+
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ subQuery.extractTerms(terms);
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ Weight subQueryWeight = subQuery.createWeight(searcher);
+ return new CustomBoostFactorWeight(subQueryWeight);
+ }
+
+ class CustomBoostFactorWeight extends Weight {
+
+ final Weight subQueryWeight;
+
+ public CustomBoostFactorWeight(Weight subQueryWeight) throws IOException {
+ this.subQueryWeight = subQueryWeight;
+ }
+
+ public Query getQuery() {
+ return FunctionScoreQuery.this;
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ float sum = subQueryWeight.getValueForNormalization();
+ sum *= getBoost() * getBoost();
+ return sum;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ subQueryWeight.normalize(norm, topLevelBoost * getBoost());
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+ // we ignore scoreDocsInOrder parameter, because we need to score in
+ // order if documents are scored with a script. The
+ // ShardLookup depends on in order scoring.
+ Scorer subQueryScorer = subQueryWeight.scorer(context, true, false, acceptDocs);
+ if (subQueryScorer == null) {
+ return null;
+ }
+ function.setNextReader(context);
+ return new CustomBoostFactorScorer(this, subQueryScorer, function, maxBoost, combineFunction);
+ }
+
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+ Explanation subQueryExpl = subQueryWeight.explain(context, doc);
+ if (!subQueryExpl.isMatch()) {
+ return subQueryExpl;
+ }
+ function.setNextReader(context);
+ Explanation functionExplanation = function.explainScore(doc, subQueryExpl);
+ return combineFunction.explain(getBoost(), subQueryExpl, functionExplanation, maxBoost);
+ }
+ }
+
+ static class CustomBoostFactorScorer extends Scorer {
+
+ private final float subQueryBoost;
+ private final Scorer scorer;
+ private final ScoreFunction function;
+ private final float maxBoost;
+ private final CombineFunction scoreCombiner;
+
+ private CustomBoostFactorScorer(CustomBoostFactorWeight w, Scorer scorer, ScoreFunction function, float maxBoost, CombineFunction scoreCombiner)
+ throws IOException {
+ super(w);
+ this.subQueryBoost = w.getQuery().getBoost();
+ this.scorer = scorer;
+ this.function = function;
+ this.maxBoost = maxBoost;
+ this.scoreCombiner = scoreCombiner;
+ }
+
+ @Override
+ public int docID() {
+ return scorer.docID();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ return scorer.advance(target);
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return scorer.nextDoc();
+ }
+
+ @Override
+ public float score() throws IOException {
+ float score = scorer.score();
+ return scoreCombiner.combine(subQueryBoost, score,
+ function.score(scorer.docID(), score), maxBoost);
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return scorer.freq();
+ }
+
+ @Override
+ public long cost() {
+ return scorer.cost();
+ }
+ }
+
+ public String toString(String field) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("function score (").append(subQuery.toString(field)).append(",function=").append(function).append(')');
+ sb.append(ToStringUtils.boost(getBoost()));
+ return sb.toString();
+ }
+
+ public boolean equals(Object o) {
+ if (o == null || getClass() != o.getClass())
+ return false;
+ FunctionScoreQuery other = (FunctionScoreQuery) o;
+ return this.getBoost() == other.getBoost() && this.subQuery.equals(other.subQuery) && this.function.equals(other.function)
+ && this.maxBoost == other.maxBoost;
+ }
+
+ public int hashCode() {
+ return subQuery.hashCode() + 31 * function.hashCode() ^ Float.floatToIntBits(getBoost());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java
new file mode 100644
index 0000000..89203c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene.search.function;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Explanation;
+
+/**
+ *
+ */
+public class RandomScoreFunction extends ScoreFunction {
+
+ private final PRNG prng;
+ private int docBase;
+
+ public RandomScoreFunction(long seed) {
+ super(CombineFunction.MULT);
+ this.prng = new PRNG(seed);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) {
+ this.docBase = context.docBase;
+ }
+
+ @Override
+ public double score(int docId, float subQueryScore) {
+ return prng.random(docBase + docId);
+ }
+
+ @Override
+ public Explanation explainScore(int docId, Explanation subQueryExpl) {
+ Explanation exp = new Explanation();
+ exp.setDescription("random score function (seed: " + prng.originalSeed + ")");
+ exp.addDetail(subQueryExpl);
+ return exp;
+ }
+
+ /**
+ * Algorithm largely based on {@link java.util.Random} except this one is not
+ * thread safe and it incorporates the doc id on next();
+ */
+ static class PRNG {
+
+ private static final long multiplier = 0x5DEECE66DL;
+ private static final long addend = 0xBL;
+ private static final long mask = (1L << 48) - 1;
+
+ final long originalSeed;
+ long seed;
+
+ PRNG(long seed) {
+ this.originalSeed = seed;
+ this.seed = (seed ^ multiplier) & mask;
+ }
+
+ public float random(int doc) {
+ if (doc == 0) {
+ doc = 0xCAFEBAB;
+ }
+
+ long rand = doc;
+ rand |= rand << 32;
+ rand ^= rand;
+ return nextFloat(rand);
+ }
+
+ public float nextFloat(long rand) {
+ seed = (seed * multiplier + addend) & mask;
+ rand ^= seed;
+ double result = rand / (double)(1L << 54);
+ return (float) result;
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/ScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/ScoreFunction.java
new file mode 100644
index 0000000..671a14d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/function/ScoreFunction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search.function;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Explanation;
+
+/**
+ *
+ */
+public abstract class ScoreFunction {
+
+ private final CombineFunction scoreCombiner;
+
+ public abstract void setNextReader(AtomicReaderContext context);
+
+ public abstract double score(int docId, float subQueryScore);
+
+ public abstract Explanation explainScore(int docId, Explanation subQueryExpl);
+
+ public CombineFunction getDefaultScoreCombiner() {
+ return scoreCombiner;
+ }
+
+ protected ScoreFunction(CombineFunction scoreCombiner) {
+ this.scoreCombiner = scoreCombiner;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java
new file mode 100644
index 0000000..bc9fff9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search.function;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.script.ExplainableSearchScript;
+import org.elasticsearch.script.SearchScript;
+
+import java.util.Map;
+
+public class ScriptScoreFunction extends ScoreFunction {
+
+ private final String sScript;
+
+ private final Map<String, Object> params;
+
+ private final SearchScript script;
+
+
+ public ScriptScoreFunction(String sScript, Map<String, Object> params, SearchScript script) {
+ super(CombineFunction.REPLACE);
+ this.sScript = sScript;
+ this.params = params;
+ this.script = script;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext ctx) {
+ script.setNextReader(ctx);
+ }
+
+ @Override
+ public double score(int docId, float subQueryScore) {
+ script.setNextDocId(docId);
+ script.setNextScore(subQueryScore);
+ return script.runAsDouble();
+ }
+
+ @Override
+ public Explanation explainScore(int docId, Explanation subQueryExpl) {
+ Explanation exp;
+ if (script instanceof ExplainableSearchScript) {
+ script.setNextDocId(docId);
+ script.setNextScore(subQueryExpl.getValue());
+ exp = ((ExplainableSearchScript) script).explain(subQueryExpl);
+ } else {
+ double score = score(docId, subQueryExpl.getValue());
+ exp = new Explanation(CombineFunction.toFloat(score), "script score function: composed of:");
+ exp.addDetail(subQueryExpl);
+ }
+ return exp;
+ }
+
+ @Override
+ public String toString() {
+ return "script[" + sScript + "], params [" + params + "]";
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/lucene/store/ChecksumIndexOutput.java b/src/main/java/org/elasticsearch/common/lucene/store/ChecksumIndexOutput.java
new file mode 100644
index 0000000..85e5bdf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/store/ChecksumIndexOutput.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.store;
+
+import org.apache.lucene.store.IndexOutput;
+
+import java.io.IOException;
+import java.util.zip.Checksum;
+
+/**
+ */
+public class ChecksumIndexOutput extends IndexOutput {
+
+ private final IndexOutput out;
+
+ private final Checksum digest;
+
+ public ChecksumIndexOutput(IndexOutput out, Checksum digest) {
+ this.out = out;
+ this.digest = digest;
+ }
+
+ public Checksum digest() {
+ return digest;
+ }
+
+ @Override
+ public void writeByte(byte b) throws IOException {
+ out.writeByte(b);
+ digest.update(b);
+ }
+
+ @Override
+ public void setLength(long length) throws IOException {
+ out.setLength(length);
+ }
+
+ // don't override copyBytes, since we need to read it and compute it
+// @Override
+// public void copyBytes(DataInput input, long numBytes) throws IOException {
+// super.copyBytes(input, numBytes);
+// }
+
+
+ @Override
+ public String toString() {
+ return out.toString();
+ }
+
+ @Override
+ public void writeBytes(byte[] b, int offset, int length) throws IOException {
+ out.writeBytes(b, offset, length);
+ digest.update(b, offset, length);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ out.close();
+ }
+
+ @Override
+ public long getFilePointer() {
+ return out.getFilePointer();
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ out.seek(pos);
+ }
+
+ @Override
+ public long length() throws IOException {
+ return out.length();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java b/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java
new file mode 100644
index 0000000..bb63978
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.store;
+
+import org.apache.lucene.store.IndexInput;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ *
+ */
+public class InputStreamIndexInput extends InputStream {
+
+ private final IndexInput indexInput;
+
+ private final long limit;
+
+ private final long actualSizeToRead;
+
+ private long counter = 0;
+
+ private long markPointer;
+ private long markCounter;
+
+ public InputStreamIndexInput(IndexInput indexInput, long limit) {
+ this.indexInput = indexInput;
+ this.limit = limit;
+ if ((indexInput.length() - indexInput.getFilePointer()) > limit) {
+ actualSizeToRead = limit;
+ } else {
+ actualSizeToRead = indexInput.length() - indexInput.getFilePointer();
+ }
+ }
+
+ public long actualSizeToRead() {
+ return actualSizeToRead;
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if (b == null) {
+ throw new NullPointerException();
+ } else if (off < 0 || len < 0 || len > b.length - off) {
+ throw new IndexOutOfBoundsException();
+ }
+ if (indexInput.getFilePointer() >= indexInput.length()) {
+ return -1;
+ }
+ if (indexInput.getFilePointer() + len > indexInput.length()) {
+ len = (int) (indexInput.length() - indexInput.getFilePointer());
+ }
+ if (counter + len > limit) {
+ len = (int) (limit - counter);
+ }
+ if (len <= 0) {
+ return -1;
+ }
+ indexInput.readBytes(b, off, len, false);
+ counter += len;
+ return len;
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (counter++ >= limit) {
+ return -1;
+ }
+ return (indexInput.getFilePointer() < indexInput.length()) ? (indexInput.readByte() & 0xff) : -1;
+ }
+
+ @Override
+ public boolean markSupported() {
+ return true;
+ }
+
+ @Override
+ public synchronized void mark(int readlimit) {
+ markPointer = indexInput.getFilePointer();
+ markCounter = counter;
+ }
+
+ @Override
+ public synchronized void reset() throws IOException {
+ indexInput.seek(markPointer);
+ counter = markCounter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/store/OutputStreamIndexOutput.java b/src/main/java/org/elasticsearch/common/lucene/store/OutputStreamIndexOutput.java
new file mode 100644
index 0000000..61c42ab
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/store/OutputStreamIndexOutput.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.store;
+
+import org.apache.lucene.store.IndexOutput;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ */
+public class OutputStreamIndexOutput extends OutputStream {
+
+ private final IndexOutput out;
+
+ public OutputStreamIndexOutput(IndexOutput out) {
+ this.out = out;
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ out.writeByte((byte) b);
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ out.writeBytes(b, b.length);
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ out.writeBytes(b, off, len);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ out.close();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/store/ThreadSafeInputStreamIndexInput.java b/src/main/java/org/elasticsearch/common/lucene/store/ThreadSafeInputStreamIndexInput.java
new file mode 100644
index 0000000..1d3084e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/store/ThreadSafeInputStreamIndexInput.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.store;
+
+import org.apache.lucene.store.IndexInput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ThreadSafeInputStreamIndexInput extends InputStreamIndexInput {
+
+ public ThreadSafeInputStreamIndexInput(IndexInput indexInput, long limit) {
+ super(indexInput, limit);
+ }
+
+ @Override
+ public synchronized int read(byte[] b, int off, int len) throws IOException {
+ return super.read(b, off, len);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/lucene/uid/Versions.java b/src/main/java/org/elasticsearch/common/lucene/uid/Versions.java
new file mode 100644
index 0000000..a978d62
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/lucene/uid/Versions.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.uid;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+
+/** Utility class to resolve the Lucene doc ID and version for a given uid. */
+public class Versions {
+
+ public static final long MATCH_ANY = 0L; // Version was not specified by the user
+ public static final long NOT_FOUND = -1L;
+ public static final long NOT_SET = -2L;
+
+ private Versions() {}
+
+ /** Wraps an {@link AtomicReaderContext}, a doc ID <b>relative to the context doc base</b> and a version. */
+ public static class DocIdAndVersion {
+ public final int docId;
+ public final long version;
+ public final AtomicReaderContext context;
+
+ public DocIdAndVersion(int docId, long version, AtomicReaderContext context) {
+ this.docId = docId;
+ this.version = version;
+ this.context = context;
+ }
+ }
+
+ /**
+ * Load the internal doc ID and version for the uid from the reader, returning<ul>
+ * <li>null if the uid wasn't found,
+ * <li>a doc ID and a version otherwise, the version being potentially set to {@link #NOT_SET} if the uid has no associated version
+ * </ul>
+ */
+ public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term) throws IOException {
+ // iterate backwards to optimize for the frequently updated documents
+ // which are likely to be in the last segments
+ final List<AtomicReaderContext> leaves = reader.leaves();
+ for (int i = leaves.size() - 1; i >= 0; --i) {
+ final DocIdAndVersion docIdAndVersion = loadDocIdAndVersion(leaves.get(i), term);
+ if (docIdAndVersion != null) {
+ assert docIdAndVersion.version != NOT_FOUND;
+ return docIdAndVersion;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Load the version for the uid from the reader, returning<ul>
+ * <li>{@link #NOT_FOUND} if no matching doc exists,
+ * <li>{@link #NOT_SET} if no version is available,
+ * <li>the version associated with the provided uid otherwise
+ * </ul>
+ */
+ public static long loadVersion(IndexReader reader, Term term) throws IOException {
+ final DocIdAndVersion docIdAndVersion = loadDocIdAndVersion(reader, term);
+ return docIdAndVersion == null ? NOT_FOUND : docIdAndVersion.version;
+ }
+
+ /** Same as {@link #loadDocIdAndVersion(IndexReader, Term)} but operates directly on a reader context. */
+ public static DocIdAndVersion loadDocIdAndVersion(AtomicReaderContext readerContext, Term term) throws IOException {
+ assert term.field().equals(UidFieldMapper.NAME);
+ final AtomicReader reader = readerContext.reader();
+ final Bits liveDocs = reader.getLiveDocs();
+ final Terms terms = reader.terms(UidFieldMapper.NAME);
+ assert terms != null : "All segments must have a _uid field, but " + reader + " doesn't";
+ final TermsEnum termsEnum = terms.iterator(null);
+ if (!termsEnum.seekExact(term.bytes())) {
+ return null;
+ }
+
+ // Versions are stored as doc values...
+ final NumericDocValues versions = reader.getNumericDocValues(VersionFieldMapper.NAME);
+ if (versions != null || !terms.hasPayloads()) {
+ // only the last doc that matches the _uid is interesting here: if it is deleted, then there is
+ // no match otherwise previous docs are necessarily either deleted or nested docs
+ final DocsEnum docs = termsEnum.docs(null, null);
+ int docID = DocsEnum.NO_MORE_DOCS;
+ for (int d = docs.nextDoc(); d != DocsEnum.NO_MORE_DOCS; d = docs.nextDoc()) {
+ docID = d;
+ }
+ assert docID != DocsEnum.NO_MORE_DOCS; // would mean that the term exists but has no match at all
+ if (liveDocs != null && !liveDocs.get(docID)) {
+ return null;
+ } else if (versions != null) {
+ return new DocIdAndVersion(docID, versions.get(docID), readerContext);
+ } else {
+ // _uid found, but no doc values and no payloads
+ return new DocIdAndVersion(docID, NOT_SET, readerContext);
+ }
+ }
+
+ // ... but used to be stored as payloads
+ final DocsAndPositionsEnum dpe = termsEnum.docsAndPositions(liveDocs, null, DocsAndPositionsEnum.FLAG_PAYLOADS);
+ assert dpe != null; // terms has payloads
+ int docID = DocsEnum.NO_MORE_DOCS;
+ for (int d = dpe.nextDoc(); d != DocsEnum.NO_MORE_DOCS; d = dpe.nextDoc()) {
+ docID = d;
+ dpe.nextPosition();
+ final BytesRef payload = dpe.getPayload();
+ if (payload != null && payload.length == 8) {
+ return new DocIdAndVersion(d, Numbers.bytesToLong(payload), readerContext);
+ }
+ }
+
+ if (docID == DocsEnum.NO_MORE_DOCS) {
+ return null;
+ } else {
+ return new DocIdAndVersion(docID, NOT_SET, readerContext);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/math/UnboxedMathUtils.java b/src/main/java/org/elasticsearch/common/math/UnboxedMathUtils.java
new file mode 100644
index 0000000..c465e52
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/math/UnboxedMathUtils.java
@@ -0,0 +1,587 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.math;
+
+import jsr166y.ThreadLocalRandom;
+
+/**
+ *
+ */
+public class UnboxedMathUtils {
+
+ public static double sin(Short a) {
+ return Math.sin(a.doubleValue());
+ }
+
+ public static double sin(Integer a) {
+ return Math.sin(a.doubleValue());
+ }
+
+ public static double sin(Float a) {
+ return Math.sin(a.doubleValue());
+ }
+
+ public static double sin(Long a) {
+ return Math.sin(a.doubleValue());
+ }
+
+ public static double sin(Double a) {
+ return Math.sin(a);
+ }
+
+ public static double cos(Short a) {
+ return Math.cos(a.doubleValue());
+ }
+
+ public static double cos(Integer a) {
+ return Math.cos(a.doubleValue());
+ }
+
+ public static double cos(Float a) {
+ return Math.cos(a.doubleValue());
+ }
+
+ public static double cos(Long a) {
+ return Math.cos(a.doubleValue());
+ }
+
+ public static double cos(Double a) {
+ return Math.cos(a);
+ }
+
+ public static double tan(Short a) {
+ return Math.tan(a.doubleValue());
+ }
+
+ public static double tan(Integer a) {
+ return Math.tan(a.doubleValue());
+ }
+
+ public static double tan(Float a) {
+ return Math.tan(a.doubleValue());
+ }
+
+ public static double tan(Long a) {
+ return Math.tan(a.doubleValue());
+ }
+
+ public static double tan(Double a) {
+ return Math.tan(a);
+ }
+
+ public static double asin(Short a) {
+ return Math.asin(a.doubleValue());
+ }
+
+ public static double asin(Integer a) {
+ return Math.asin(a.doubleValue());
+ }
+
+ public static double asin(Float a) {
+ return Math.asin(a.doubleValue());
+ }
+
+ public static double asin(Long a) {
+ return Math.asin(a.doubleValue());
+ }
+
+ public static double asin(Double a) {
+ return Math.asin(a);
+ }
+
+ public static double acos(Short a) {
+ return Math.acos(a.doubleValue());
+ }
+
+
+ public static double acos(Integer a) {
+ return Math.acos(a.doubleValue());
+ }
+
+
+ public static double acos(Float a) {
+ return Math.acos(a.doubleValue());
+ }
+
+ public static double acos(Long a) {
+ return Math.acos(a.doubleValue());
+ }
+
+ public static double acos(Double a) {
+ return Math.acos(a);
+ }
+
+ public static double atan(Short a) {
+ return Math.atan(a.doubleValue());
+ }
+
+ public static double atan(Integer a) {
+ return Math.atan(a.doubleValue());
+ }
+
+ public static double atan(Float a) {
+ return Math.atan(a.doubleValue());
+ }
+
+ public static double atan(Long a) {
+ return Math.atan(a.doubleValue());
+ }
+
+ public static double atan(Double a) {
+ return Math.atan(a);
+ }
+
+ public static double toRadians(Short angdeg) {
+ return Math.toRadians(angdeg.doubleValue());
+ }
+
+ public static double toRadians(Integer angdeg) {
+ return Math.toRadians(angdeg.doubleValue());
+ }
+
+ public static double toRadians(Float angdeg) {
+ return Math.toRadians(angdeg.doubleValue());
+ }
+
+ public static double toRadians(Long angdeg) {
+ return Math.toRadians(angdeg.doubleValue());
+ }
+
+ public static double toRadians(Double angdeg) {
+ return Math.toRadians(angdeg);
+ }
+
+ public static double toDegrees(Short angrad) {
+ return Math.toDegrees(angrad.doubleValue());
+ }
+
+ public static double toDegrees(Integer angrad) {
+ return Math.toDegrees(angrad.doubleValue());
+ }
+
+ public static double toDegrees(Float angrad) {
+ return Math.toDegrees(angrad.doubleValue());
+ }
+
+ public static double toDegrees(Long angrad) {
+ return Math.toDegrees(angrad.doubleValue());
+ }
+
+ public static double toDegrees(Double angrad) {
+ return Math.toDegrees(angrad);
+ }
+
+ public static double exp(Short a) {
+ return Math.exp(a.doubleValue());
+ }
+
+ public static double exp(Integer a) {
+ return Math.exp(a.doubleValue());
+ }
+
+ public static double exp(Float a) {
+ return Math.exp(a.doubleValue());
+ }
+
+ public static double exp(Long a) {
+ return Math.exp(a.doubleValue());
+ }
+
+ public static double exp(Double a) {
+ return Math.exp(a);
+ }
+
+ public static double log(Short a) {
+ return Math.log(a.doubleValue());
+ }
+
+ public static double log(Integer a) {
+ return Math.log(a.doubleValue());
+ }
+
+ public static double log(Float a) {
+ return Math.log(a.doubleValue());
+ }
+
+ public static double log(Long a) {
+ return Math.log(a.doubleValue());
+ }
+
+ public static double log(Double a) {
+ return Math.log(a);
+ }
+
+ public static double log10(Short a) {
+ return Math.log10(a.doubleValue());
+ }
+
+ public static double log10(Integer a) {
+ return Math.log10(a.doubleValue());
+ }
+
+ public static double log10(Float a) {
+ return Math.log10(a.doubleValue());
+ }
+
+ public static double log10(Long a) {
+ return Math.log10(a.doubleValue());
+ }
+
+ public static double log10(Double a) {
+ return Math.log10(a);
+ }
+
+ public static double sqrt(Short a) {
+ return Math.sqrt(a.doubleValue());
+ }
+
+ public static double sqrt(Integer a) {
+ return Math.sqrt(a.doubleValue());
+ }
+
+ public static double sqrt(Float a) {
+ return Math.sqrt(a.doubleValue());
+ }
+
+ public static double sqrt(Long a) {
+ return Math.sqrt(a.doubleValue());
+ }
+
+ public static double sqrt(Double a) {
+ return Math.sqrt(a);
+ }
+
+ public static double cbrt(Short a) {
+ return Math.cbrt(a.doubleValue());
+ }
+
+ public static double cbrt(Integer a) {
+ return Math.cbrt(a.doubleValue());
+ }
+
+ public static double cbrt(Float a) {
+ return Math.cbrt(a.doubleValue());
+ }
+
+ public static double cbrt(Long a) {
+ return Math.cbrt(a.doubleValue());
+ }
+
+ public static double cbrt(Double a) {
+ return Math.cbrt(a);
+ }
+
+ public static double IEEEremainder(Short f1, Short f2) {
+ return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue());
+ }
+
+ public static double IEEEremainder(Integer f1, Integer f2) {
+ return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue());
+ }
+
+ public static double IEEEremainder(Float f1, Float f2) {
+ return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue());
+ }
+
+ public static double IEEEremainder(Long f1, Long f2) {
+ return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue());
+ }
+
+ public static double IEEEremainder(Double f1, Double f2) {
+ return Math.IEEEremainder(f1, f2);
+ }
+
+ public static double ceil(Short a) {
+ return Math.ceil(a.doubleValue());
+ }
+
+ public static double ceil(Integer a) {
+ return Math.ceil(a.doubleValue());
+ }
+
+ public static double ceil(Float a) {
+ return Math.ceil(a.doubleValue());
+ }
+
+ public static double ceil(Long a) {
+ return Math.ceil(a.doubleValue());
+ }
+
+ public static double ceil(Double a) {
+ return Math.ceil(a);
+ }
+
+ public static double floor(Short a) {
+ return Math.floor(a.doubleValue());
+ }
+
+ public static double floor(Integer a) {
+ return Math.floor(a.doubleValue());
+ }
+
+ public static double floor(Float a) {
+ return Math.floor(a.doubleValue());
+ }
+
+ public static double floor(Long a) {
+ return Math.floor(a.doubleValue());
+ }
+
+ public static double floor(Double a) {
+ return Math.floor(a);
+ }
+
+ public static double rint(Short a) {
+ return Math.rint(a.doubleValue());
+ }
+
+ public static double rint(Integer a) {
+ return Math.rint(a.doubleValue());
+ }
+
+ public static double rint(Float a) {
+ return Math.rint(a.doubleValue());
+ }
+
+ public static double rint(Long a) {
+ return Math.rint(a.doubleValue());
+ }
+
+ public static double rint(Double a) {
+ return Math.rint(a);
+ }
+
+ public static double atan2(Short y, Short x) {
+ return Math.atan2(y.doubleValue(), x.doubleValue());
+ }
+
+ public static double atan2(Integer y, Integer x) {
+ return Math.atan2(y.doubleValue(), x.doubleValue());
+ }
+
+ public static double atan2(Float y, Float x) {
+ return Math.atan2(y.doubleValue(), x.doubleValue());
+ }
+
+ public static double atan2(Long y, Long x) {
+ return Math.atan2(y.doubleValue(), x.doubleValue());
+ }
+
+ public static double atan2(Double y, Double x) {
+ return Math.atan2(y, x);
+ }
+
+ public static double pow(Short a, Short b) {
+ return Math.pow(a.doubleValue(), b.doubleValue());
+ }
+
+ public static double pow(Integer a, Integer b) {
+ return Math.pow(a.doubleValue(), b.doubleValue());
+ }
+
+ public static double pow(Float a, Float b) {
+ return Math.pow(a.doubleValue(), b.doubleValue());
+ }
+
+ public static double pow(Long a, Long b) {
+ return Math.pow(a.doubleValue(), b.doubleValue());
+ }
+
+ public static double pow(Double a, Double b) {
+ return Math.pow(a, b);
+ }
+
+ public static int round(Short a) {
+ return Math.round(a.floatValue());
+ }
+
+ public static int round(Integer a) {
+ return Math.round(a.floatValue());
+ }
+
+ public static int round(Float a) {
+ return Math.round(a);
+ }
+
+ public static long round(Long a) {
+ return Math.round(a.doubleValue());
+ }
+
+ public static long round(Double a) {
+ return Math.round(a);
+ }
+
+ public static double random() {
+ return ThreadLocalRandom.current().nextDouble();
+ }
+
+ public static double randomDouble() {
+ return ThreadLocalRandom.current().nextDouble();
+ }
+
+ public static double randomFloat() {
+ return ThreadLocalRandom.current().nextFloat();
+ }
+
+ public static double randomInt() {
+ return ThreadLocalRandom.current().nextInt();
+ }
+
+ public static double randomInt(Integer i) {
+ return ThreadLocalRandom.current().nextInt(i);
+ }
+
+ public static double randomLong() {
+ return ThreadLocalRandom.current().nextLong();
+ }
+
+ public static double randomLong(Long l) {
+ return ThreadLocalRandom.current().nextLong(l);
+ }
+
+ public static int abs(Integer a) {
+ return Math.abs(a);
+ }
+
+ public static long abs(Long a) {
+ return Math.abs(a);
+ }
+
+ public static float abs(Float a) {
+ return Math.abs(a);
+ }
+
+ public static double abs(Double a) {
+ return Math.abs(a);
+ }
+
+ public static int max(Integer a, Integer b) {
+ return Math.max(a, b);
+ }
+
+ public static long max(Long a, Long b) {
+ return Math.max(a, b);
+ }
+
+ public static float max(Float a, Float b) {
+ return Math.max(a, b);
+ }
+
+ public static double max(Double a, Double b) {
+ return Math.max(a, b);
+ }
+
+ public static int min(Integer a, Integer b) {
+ return Math.min(a, b);
+ }
+
+ public static long min(Long a, Long b) {
+ return Math.min(a, b);
+ }
+
+ public static float min(Float a, Float b) {
+ return Math.min(a, b);
+ }
+
+ public static double min(Double a, Double b) {
+ return Math.min(a, b);
+ }
+
+ public static double ulp(Double d) {
+ return Math.ulp(d);
+ }
+
+ public static float ulp(Float f) {
+ return Math.ulp(f);
+ }
+
+ public static double signum(Double d) {
+ return Math.signum(d);
+ }
+
+ public static float signum(Float f) {
+ return Math.signum(f);
+ }
+
+ public static double sinh(Double x) {
+ return Math.sinh(x);
+ }
+
+ public static double cosh(Double x) {
+ return Math.cosh(x);
+ }
+
+ public static double tanh(Double x) {
+ return Math.tanh(x);
+ }
+
+ public static double hypot(Double x, Double y) {
+ return Math.hypot(x, y);
+ }
+
+ public static double expm1(Double x) {
+ return Math.expm1(x);
+ }
+
+ public static double log1p(Double x) {
+ return Math.log1p(x);
+ }
+
+ public static double copySign(Double magnitude, Double sign) {
+ return Math.copySign(magnitude, sign);
+ }
+
+ public static float copySign(Float magnitude, Float sign) {
+ return Math.copySign(magnitude, sign);
+ }
+
+ public static int getExponent(Float f) {
+ return Math.getExponent(f);
+ }
+
+ public static int getExponent(Double d) {
+ return Math.getExponent(d);
+ }
+
+ public static double nextAfter(Double start, Double direction) {
+ return Math.nextAfter(start, direction);
+ }
+
+ public static float nextAfter(Float start, Double direction) {
+ return Math.nextAfter(start, direction);
+ }
+
+ public static double nextUp(Double d) {
+ return Math.nextUp(d);
+ }
+
+ public static float nextUp(Float f) {
+ return Math.nextUp(f);
+ }
+
+
+ public static double scalb(Double d, Integer scaleFactor) {
+ return Math.scalb(d, scaleFactor);
+ }
+
+ public static float scalb(Float f, Integer scaleFactor) {
+ return Math.scalb(f, scaleFactor);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/metrics/CounterMetric.java b/src/main/java/org/elasticsearch/common/metrics/CounterMetric.java
new file mode 100644
index 0000000..c072ea8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/metrics/CounterMetric.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.metrics;
+
+import jsr166e.LongAdder;
+
+/**
+ */
+public class CounterMetric implements Metric {
+
+ private final LongAdder counter = new LongAdder();
+
+ public void inc() {
+ counter.increment();
+ }
+
+ public void inc(long n) {
+ counter.add(n);
+ }
+
+ public void dec() {
+ counter.decrement();
+ }
+
+ public void dec(long n) {
+ counter.add(-n);
+ }
+
+ public long count() {
+ return counter.sum();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/metrics/EWMA.java b/src/main/java/org/elasticsearch/common/metrics/EWMA.java
new file mode 100644
index 0000000..ad12ddf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/metrics/EWMA.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.metrics;
+
+import jsr166e.LongAdder;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * An exponentially-weighted moving average.
+ *
+ * @see <a href="http://www.teamquest.com/pdfs/whitepaper/ldavg1.pdf">UNIX Load Average Part 1: How It Works</a>
+ * @see <a href="http://www.teamquest.com/pdfs/whitepaper/ldavg2.pdf">UNIX Load Average Part 2: Not Your Average Average</a>
+ * <p/>
+ * Taken from codahale metric module, changed to use LongAdder
+ */
+public class EWMA {
+ private static final double M1_ALPHA = 1 - Math.exp(-5 / 60.0);
+ private static final double M5_ALPHA = 1 - Math.exp(-5 / 60.0 / 5);
+ private static final double M15_ALPHA = 1 - Math.exp(-5 / 60.0 / 15);
+
+ private volatile boolean initialized = false;
+ private volatile double rate = 0.0;
+
+ private final LongAdder uncounted = new LongAdder();
+ private final double alpha, interval;
+
+ /**
+ * Creates a new EWMA which is equivalent to the UNIX one minute load average and which expects to be ticked every
+ * 5 seconds.
+ *
+ * @return a one-minute EWMA
+ */
+ public static EWMA oneMinuteEWMA() {
+ return new EWMA(M1_ALPHA, 5, TimeUnit.SECONDS);
+ }
+
+ /**
+ * Creates a new EWMA which is equivalent to the UNIX five minute load average and which expects to be ticked every
+ * 5 seconds.
+ *
+ * @return a five-minute EWMA
+ */
+ public static EWMA fiveMinuteEWMA() {
+ return new EWMA(M5_ALPHA, 5, TimeUnit.SECONDS);
+ }
+
+ /**
+ * Creates a new EWMA which is equivalent to the UNIX fifteen minute load average and which expects to be ticked
+ * every 5 seconds.
+ *
+ * @return a fifteen-minute EWMA
+ */
+ public static EWMA fifteenMinuteEWMA() {
+ return new EWMA(M15_ALPHA, 5, TimeUnit.SECONDS);
+ }
+
+ /**
+ * Create a new EWMA with a specific smoothing constant.
+ *
+ * @param alpha the smoothing constant
+ * @param interval the expected tick interval
+ * @param intervalUnit the time unit of the tick interval
+ */
+ public EWMA(double alpha, long interval, TimeUnit intervalUnit) {
+ this.interval = intervalUnit.toNanos(interval);
+ this.alpha = alpha;
+ }
+
+ /**
+ * Update the moving average with a new value.
+ *
+ * @param n the new value
+ */
+ public void update(long n) {
+ uncounted.add(n);
+ }
+
+ /**
+ * Mark the passage of time and decay the current rate accordingly.
+ */
+ public void tick() {
+ final long count = uncounted.sumThenReset();
+ double instantRate = count / interval;
+ if (initialized) {
+ rate += (alpha * (instantRate - rate));
+ } else {
+ rate = instantRate;
+ initialized = true;
+ }
+ }
+
+ /**
+ * Returns the rate in the given units of time.
+ *
+ * @param rateUnit the unit of time
+ * @return the rate
+ */
+ public double rate(TimeUnit rateUnit) {
+ return rate * (double) rateUnit.toNanos(1);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/metrics/MeanMetric.java b/src/main/java/org/elasticsearch/common/metrics/MeanMetric.java
new file mode 100644
index 0000000..3102cfb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/metrics/MeanMetric.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.metrics;
+
+import jsr166e.LongAdder;
+
+/**
+ */
+public class MeanMetric implements Metric {
+
+ private final LongAdder counter = new LongAdder();
+ private final LongAdder sum = new LongAdder();
+
+ public void inc(long n) {
+ counter.increment();
+ sum.add(n);
+ }
+
+ public void dec(long n) {
+ counter.decrement();
+ sum.add(-n);
+ }
+
+ public long count() {
+ return counter.sum();
+ }
+
+ public long sum() {
+ return sum.sum();
+ }
+
+ public double mean() {
+ long count = count();
+ if (count > 0) {
+ return sum.sum() / (double) count;
+ }
+ return 0.0;
+ }
+
+ public void clear() {
+ counter.reset();
+ sum.reset();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/metrics/MeterMetric.java b/src/main/java/org/elasticsearch/common/metrics/MeterMetric.java
new file mode 100644
index 0000000..aeda3d2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/metrics/MeterMetric.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.metrics;
+
+import jsr166e.LongAdder;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A meter metric which measures mean throughput and one-, five-, and
+ * fifteen-minute exponentially-weighted moving average throughputs.
+ *
+ * @see <a href="http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average">EMA</a>
+ * <p/>
+ * taken from codahale metric module, replaced with LongAdder
+ */
+public class MeterMetric implements Metric {
+ private static final long INTERVAL = 5; // seconds
+
+ private final EWMA m1Rate = EWMA.oneMinuteEWMA();
+ private final EWMA m5Rate = EWMA.fiveMinuteEWMA();
+ private final EWMA m15Rate = EWMA.fifteenMinuteEWMA();
+
+ private final LongAdder count = new LongAdder();
+ private final long startTime = System.nanoTime();
+ private final TimeUnit rateUnit;
+ private final ScheduledFuture<?> future;
+
+ public MeterMetric(ScheduledExecutorService tickThread, TimeUnit rateUnit) {
+ this.rateUnit = rateUnit;
+ this.future = tickThread.scheduleAtFixedRate(new Runnable() {
+ @Override
+ public void run() {
+ tick();
+ }
+ }, INTERVAL, INTERVAL, TimeUnit.SECONDS);
+ }
+
+ public TimeUnit rateUnit() {
+ return rateUnit;
+ }
+
+ /**
+ * Updates the moving averages.
+ */
+ void tick() {
+ m1Rate.tick();
+ m5Rate.tick();
+ m15Rate.tick();
+ }
+
+ /**
+ * Mark the occurrence of an event.
+ */
+ public void mark() {
+ mark(1);
+ }
+
+ /**
+ * Mark the occurrence of a given number of events.
+ *
+ * @param n the number of events
+ */
+ public void mark(long n) {
+ count.add(n);
+ m1Rate.update(n);
+ m5Rate.update(n);
+ m15Rate.update(n);
+ }
+
+ public long count() {
+ return count.sum();
+ }
+
+ public double fifteenMinuteRate() {
+ return m15Rate.rate(rateUnit);
+ }
+
+ public double fiveMinuteRate() {
+ return m5Rate.rate(rateUnit);
+ }
+
+ public double meanRate() {
+ long count = count();
+ if (count == 0) {
+ return 0.0;
+ } else {
+ final long elapsed = (System.nanoTime() - startTime);
+ return convertNsRate(count / (double) elapsed);
+ }
+ }
+
+ public double oneMinuteRate() {
+ return m1Rate.rate(rateUnit);
+ }
+
+ private double convertNsRate(double ratePerNs) {
+ return ratePerNs * (double) rateUnit.toNanos(1);
+ }
+
+ public void stop() {
+ future.cancel(false);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/metrics/Metric.java b/src/main/java/org/elasticsearch/common/metrics/Metric.java
new file mode 100644
index 0000000..b986c28
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/metrics/Metric.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.metrics;
+
+/**
+ */
+public interface Metric {
+}
diff --git a/src/main/java/org/elasticsearch/common/netty/KeepFrameDecoder.java b/src/main/java/org/elasticsearch/common/netty/KeepFrameDecoder.java
new file mode 100644
index 0000000..d2b02b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/netty/KeepFrameDecoder.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.netty;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.handler.codec.frame.FrameDecoder;
+
+/**
+ * A marker to not remove frame decoder from the resulting jar so plugins can use it.
+ */
+public class KeepFrameDecoder extends FrameDecoder {
+
+ public static final KeepFrameDecoder decoder = new KeepFrameDecoder();
+
+ @Override
+ protected Object decode(ChannelHandlerContext ctx, Channel channel, ChannelBuffer buffer) throws Exception {
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/netty/NettyStaticSetup.java b/src/main/java/org/elasticsearch/common/netty/NettyStaticSetup.java
new file mode 100644
index 0000000..319a91f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/netty/NettyStaticSetup.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.netty;
+
+import org.elasticsearch.transport.netty.NettyInternalESLoggerFactory;
+import org.jboss.netty.logging.InternalLogger;
+import org.jboss.netty.logging.InternalLoggerFactory;
+import org.jboss.netty.util.ThreadNameDeterminer;
+import org.jboss.netty.util.ThreadRenamingRunnable;
+
+/**
+ */
+public class NettyStaticSetup {
+
+ private static EsThreadNameDeterminer ES_THREAD_NAME_DETERMINER = new EsThreadNameDeterminer();
+
+ public static class EsThreadNameDeterminer implements ThreadNameDeterminer {
+ @Override
+ public String determineThreadName(String currentThreadName, String proposedThreadName) throws Exception {
+ // we control the thread name with a context, so use both
+ return currentThreadName + "{" + proposedThreadName + "}";
+ }
+ }
+
+ static {
+ InternalLoggerFactory.setDefaultFactory(new NettyInternalESLoggerFactory() {
+ @Override
+ public InternalLogger newInstance(String name) {
+ return super.newInstance(name.replace("org.jboss.netty.", "netty.").replace("org.jboss.netty.", "netty."));
+ }
+ });
+
+ ThreadRenamingRunnable.setThreadNameDeterminer(ES_THREAD_NAME_DETERMINER);
+ }
+
+ public static void setup() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/netty/OpenChannelsHandler.java b/src/main/java/org/elasticsearch/common/netty/OpenChannelsHandler.java
new file mode 100644
index 0000000..73b624a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/netty/OpenChannelsHandler.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.netty;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.jboss.netty.channel.*;
+
+import java.util.Set;
+
+/**
+ *
+ */
+@ChannelHandler.Sharable
+public class OpenChannelsHandler implements ChannelUpstreamHandler {
+
+ final Set<Channel> openChannels = ConcurrentCollections.newConcurrentSet();
+ final CounterMetric openChannelsMetric = new CounterMetric();
+ final CounterMetric totalChannelsMetric = new CounterMetric();
+
+ final ESLogger logger;
+
+ public OpenChannelsHandler(ESLogger logger) {
+ this.logger = logger;
+ }
+
+ final ChannelFutureListener remover = new ChannelFutureListener() {
+ public void operationComplete(ChannelFuture future) throws Exception {
+ boolean removed = openChannels.remove(future.getChannel());
+ if (removed) {
+ openChannelsMetric.dec();
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("channel closed: {}", future.getChannel());
+ }
+ }
+ };
+
+ @Override
+ public void handleUpstream(ChannelHandlerContext ctx, ChannelEvent e) throws Exception {
+ if (e instanceof ChannelStateEvent) {
+ ChannelStateEvent evt = (ChannelStateEvent) e;
+ // OPEN is also sent to when closing channel, but with FALSE on it to indicate it closes
+ if (evt.getState() == ChannelState.OPEN && Boolean.TRUE.equals(evt.getValue())) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("channel opened: {}", ctx.getChannel());
+ }
+ boolean added = openChannels.add(ctx.getChannel());
+ if (added) {
+ openChannelsMetric.inc();
+ totalChannelsMetric.inc();
+ ctx.getChannel().getCloseFuture().addListener(remover);
+ }
+ }
+ }
+ ctx.sendUpstream(e);
+ }
+
+ public long numberOfOpenChannels() {
+ return openChannelsMetric.count();
+ }
+
+ public long totalChannels() {
+ return totalChannelsMetric.count();
+ }
+
+ public void close() {
+ for (Channel channel : openChannels) {
+ channel.close().awaitUninterruptibly();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/src/main/java/org/elasticsearch/common/network/NetworkModule.java
new file mode 100644
index 0000000..6f5d728
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/network/NetworkModule.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.network;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class NetworkModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(NetworkService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/network/NetworkService.java b/src/main/java/org/elasticsearch/common/network/NetworkService.java
new file mode 100644
index 0000000..f946229
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/network/NetworkService.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.network;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.UnknownHostException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class NetworkService extends AbstractComponent {
+
+ public static final String LOCAL = "#local#";
+
+ private static final String GLOBAL_NETWORK_HOST_SETTING = "network.host";
+ private static final String GLOBAL_NETWORK_BINDHOST_SETTING = "network.bind_host";
+ private static final String GLOBAL_NETWORK_PUBLISHHOST_SETTING = "network.publish_host";
+
+ public static final class TcpSettings {
+ public static final String TCP_NO_DELAY = "network.tcp.no_delay";
+ public static final String TCP_KEEP_ALIVE = "network.tcp.keep_alive";
+ public static final String TCP_REUSE_ADDRESS = "network.tcp.reuse_address";
+ public static final String TCP_SEND_BUFFER_SIZE = "network.tcp.send_buffer_size";
+ public static final String TCP_RECEIVE_BUFFER_SIZE = "network.tcp.receive_buffer_size";
+ public static final String TCP_BLOCKING = "network.tcp.blocking";
+ public static final String TCP_BLOCKING_SERVER = "network.tcp.blocking_server";
+ public static final String TCP_BLOCKING_CLIENT = "network.tcp.blocking_client";
+ public static final String TCP_CONNECT_TIMEOUT = "network.tcp.connect_timeout";
+
+ public static final ByteSizeValue TCP_DEFAULT_SEND_BUFFER_SIZE = null;
+ public static final ByteSizeValue TCP_DEFAULT_RECEIVE_BUFFER_SIZE = null;
+ public static final TimeValue TCP_DEFAULT_CONNECT_TIMEOUT = new TimeValue(30, TimeUnit.SECONDS);
+ }
+
+ /**
+ * A custom name resolver can support custom lookup keys (my_net_key:ipv4) and also change
+ * the default inet address used in case no settings is provided.
+ */
+ public static interface CustomNameResolver {
+ /**
+ * Resolves the default value if possible. If not, return <tt>null</tt>.
+ */
+ InetAddress resolveDefault();
+
+ /**
+ * Resolves a custom value handling, return <tt>null</tt> if can't handle it.
+ */
+ InetAddress resolveIfPossible(String value);
+ }
+
+ private final List<CustomNameResolver> customNameResolvers = new CopyOnWriteArrayList<CustomNameResolver>();
+
+ @Inject
+ public NetworkService(Settings settings) {
+ super(settings);
+ InetSocketTransportAddress.setResolveAddress(settings.getAsBoolean("network.address.serialization.resolve", false));
+ }
+
+ /**
+ * Add a custom name resolver.
+ */
+ public void addCustomNameResolver(CustomNameResolver customNameResolver) {
+ customNameResolvers.add(customNameResolver);
+ }
+
+
+ public InetAddress resolveBindHostAddress(String bindHost) throws IOException {
+ return resolveBindHostAddress(bindHost, null);
+ }
+
+ public InetAddress resolveBindHostAddress(String bindHost, String defaultValue2) throws IOException {
+ return resolveInetAddress(bindHost, settings.get(GLOBAL_NETWORK_BINDHOST_SETTING, settings.get(GLOBAL_NETWORK_HOST_SETTING)), defaultValue2);
+ }
+
+ public InetAddress resolvePublishHostAddress(String publishHost) throws IOException {
+ InetAddress address = resolvePublishHostAddress(publishHost, null);
+ // verify that its not a local address
+ if (address == null || address.isAnyLocalAddress()) {
+ address = NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.StackType.IPv4);
+ if (address == null) {
+ address = NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.getIpStackType());
+ if (address == null) {
+ address = NetworkUtils.getLocalAddress();
+ if (address == null) {
+ return NetworkUtils.getLocalhost(NetworkUtils.StackType.IPv4);
+ }
+ }
+ }
+ }
+ return address;
+ }
+
+ public InetAddress resolvePublishHostAddress(String publishHost, String defaultValue2) throws IOException {
+ return resolveInetAddress(publishHost, settings.get(GLOBAL_NETWORK_PUBLISHHOST_SETTING, settings.get(GLOBAL_NETWORK_HOST_SETTING)), defaultValue2);
+ }
+
+ public InetAddress resolveInetAddress(String host, String defaultValue1, String defaultValue2) throws UnknownHostException, IOException {
+ if (host == null) {
+ host = defaultValue1;
+ }
+ if (host == null) {
+ host = defaultValue2;
+ }
+ if (host == null) {
+ for (CustomNameResolver customNameResolver : customNameResolvers) {
+ InetAddress inetAddress = customNameResolver.resolveDefault();
+ if (inetAddress != null) {
+ return inetAddress;
+ }
+ }
+ return null;
+ }
+ String origHost = host;
+ if ((host.startsWith("#") && host.endsWith("#")) || (host.startsWith("_") && host.endsWith("_"))) {
+ host = host.substring(1, host.length() - 1);
+
+ for (CustomNameResolver customNameResolver : customNameResolvers) {
+ InetAddress inetAddress = customNameResolver.resolveIfPossible(host);
+ if (inetAddress != null) {
+ return inetAddress;
+ }
+ }
+
+ if (host.equals("local")) {
+ return NetworkUtils.getLocalAddress();
+ } else if (host.startsWith("non_loopback")) {
+ if (host.toLowerCase(Locale.ROOT).endsWith(":ipv4")) {
+ return NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.StackType.IPv4);
+ } else if (host.toLowerCase(Locale.ROOT).endsWith(":ipv6")) {
+ return NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.StackType.IPv6);
+ } else {
+ return NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.getIpStackType());
+ }
+ } else {
+ NetworkUtils.StackType stackType = NetworkUtils.getIpStackType();
+ if (host.toLowerCase(Locale.ROOT).endsWith(":ipv4")) {
+ stackType = NetworkUtils.StackType.IPv4;
+ host = host.substring(0, host.length() - 5);
+ } else if (host.toLowerCase(Locale.ROOT).endsWith(":ipv6")) {
+ stackType = NetworkUtils.StackType.IPv6;
+ host = host.substring(0, host.length() - 5);
+ }
+ Collection<NetworkInterface> allInterfs = NetworkUtils.getAllAvailableInterfaces();
+ for (NetworkInterface ni : allInterfs) {
+ if (!ni.isUp()) {
+ continue;
+ }
+ if (host.equals(ni.getName()) || host.equals(ni.getDisplayName())) {
+ if (ni.isLoopback()) {
+ return NetworkUtils.getFirstAddress(ni, stackType);
+ } else {
+ return NetworkUtils.getFirstNonLoopbackAddress(ni, stackType);
+ }
+ }
+ }
+ }
+ throw new IOException("Failed to find network interface for [" + origHost + "]");
+ }
+ return InetAddress.getByName(host);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/network/NetworkUtils.java b/src/main/java/org/elasticsearch/common/network/NetworkUtils.java
new file mode 100644
index 0000000..0e4aff2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/network/NetworkUtils.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.network;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.os.OsUtils;
+
+import java.lang.reflect.Method;
+import java.net.*;
+import java.util.*;
+
+/**
+ *
+ */
+public abstract class NetworkUtils {
+
+ private final static ESLogger logger = Loggers.getLogger(NetworkUtils.class);
+
+ public static enum StackType {
+ IPv4, IPv6, Unknown
+ }
+
+ public static final String IPv4_SETTING = "java.net.preferIPv4Stack";
+ public static final String IPv6_SETTING = "java.net.preferIPv6Addresses";
+
+ public static final String NON_LOOPBACK_ADDRESS = "non_loopback_address";
+
+ private final static InetAddress localAddress;
+
+ static {
+ InetAddress localAddressX = null;
+ try {
+ localAddressX = InetAddress.getLocalHost();
+ } catch (UnknownHostException e) {
+ logger.trace("Failed to find local host", e);
+ }
+ localAddress = localAddressX;
+ }
+
+ public static Boolean defaultReuseAddress() {
+ return OsUtils.WINDOWS ? null : true;
+ }
+
+ public static boolean isIPv4() {
+ return System.getProperty("java.net.preferIPv4Stack") != null && System.getProperty("java.net.preferIPv4Stack").equals("true");
+ }
+
+ public static InetAddress getIPv4Localhost() throws UnknownHostException {
+ return getLocalhost(StackType.IPv4);
+ }
+
+ public static InetAddress getIPv6Localhost() throws UnknownHostException {
+ return getLocalhost(StackType.IPv6);
+ }
+
+ public static InetAddress getLocalAddress() {
+ return localAddress;
+ }
+
+ public static String getLocalHostName(String defaultHostName) {
+ if (localAddress == null) {
+ return defaultHostName;
+ }
+ String hostName = localAddress.getHostName();
+ if (hostName == null) {
+ return defaultHostName;
+ }
+ return hostName;
+ }
+
+ public static String getLocalHostAddress(String defaultHostAddress) {
+ if (localAddress == null) {
+ return defaultHostAddress;
+ }
+ String hostAddress = localAddress.getHostAddress();
+ if (hostAddress == null) {
+ return defaultHostAddress;
+ }
+ return hostAddress;
+ }
+
+ public static InetAddress getLocalhost(StackType ip_version) throws UnknownHostException {
+ if (ip_version == StackType.IPv4)
+ return InetAddress.getByName("127.0.0.1");
+ else
+ return InetAddress.getByName("::1");
+ }
+
+ public static boolean canBindToMcastAddress() {
+ return OsUtils.LINUX || OsUtils.SOLARIS || OsUtils.HP;
+ }
+
+
+ /**
+ * Returns the first non-loopback address on any interface on the current host.
+ *
+ * @param ip_version Constraint on IP version of address to be returned, 4 or 6
+ */
+ public static InetAddress getFirstNonLoopbackAddress(StackType ip_version) throws SocketException {
+ InetAddress address = null;
+
+ Enumeration intfs = NetworkInterface.getNetworkInterfaces();
+
+ List<NetworkInterface> intfsList = Lists.newArrayList();
+ while (intfs.hasMoreElements()) {
+ intfsList.add((NetworkInterface) intfs.nextElement());
+ }
+
+ // order by index, assuming first ones are more interesting
+ try {
+ final Method getIndexMethod = NetworkInterface.class.getDeclaredMethod("getIndex");
+ getIndexMethod.setAccessible(true);
+
+ CollectionUtil.timSort(intfsList, new Comparator<NetworkInterface>() {
+ @Override
+ public int compare(NetworkInterface o1, NetworkInterface o2) {
+ try {
+ return ((Integer) getIndexMethod.invoke(o1)).intValue() - ((Integer) getIndexMethod.invoke(o2)).intValue();
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalStateException("failed to fetch index of network interface");
+ }
+ }
+ });
+ } catch (Exception e) {
+ // ignore
+ }
+
+ for (NetworkInterface intf : intfsList) {
+ try {
+ if (!intf.isUp() || intf.isLoopback())
+ continue;
+ } catch (Exception e) {
+ // might happen when calling on a network interface that does not exists
+ continue;
+ }
+ address = getFirstNonLoopbackAddress(intf, ip_version);
+ if (address != null) {
+ return address;
+ }
+ }
+
+ return null;
+ }
+
+
+ /**
+ * Returns the first non-loopback address on the given interface on the current host.
+ *
+ * @param intf the interface to be checked
+ * @param ipVersion Constraint on IP version of address to be returned, 4 or 6
+ */
+ public static InetAddress getFirstNonLoopbackAddress(NetworkInterface intf, StackType ipVersion) throws SocketException {
+ if (intf == null)
+ throw new IllegalArgumentException("Network interface pointer is null");
+
+ for (Enumeration addresses = intf.getInetAddresses(); addresses.hasMoreElements(); ) {
+ InetAddress address = (InetAddress) addresses.nextElement();
+ if (!address.isLoopbackAddress()) {
+ if ((address instanceof Inet4Address && ipVersion == StackType.IPv4) ||
+ (address instanceof Inet6Address && ipVersion == StackType.IPv6))
+ return address;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns the first address with the proper ipVersion on the given interface on the current host.
+ *
+ * @param intf the interface to be checked
+ * @param ipVersion Constraint on IP version of address to be returned, 4 or 6
+ */
+ public static InetAddress getFirstAddress(NetworkInterface intf, StackType ipVersion) throws SocketException {
+ if (intf == null)
+ throw new IllegalArgumentException("Network interface pointer is null");
+
+ for (Enumeration addresses = intf.getInetAddresses(); addresses.hasMoreElements(); ) {
+ InetAddress address = (InetAddress) addresses.nextElement();
+ if ((address instanceof Inet4Address && ipVersion == StackType.IPv4) ||
+ (address instanceof Inet6Address && ipVersion == StackType.IPv6))
+ return address;
+ }
+ return null;
+ }
+
+ /**
+ * A function to check if an interface supports an IP version (i.e has addresses
+ * defined for that IP version).
+ *
+ * @param intf
+ * @return
+ */
+ public static boolean interfaceHasIPAddresses(NetworkInterface intf, StackType ipVersion) throws SocketException, UnknownHostException {
+ boolean supportsVersion = false;
+ if (intf != null) {
+ // get all the InetAddresses defined on the interface
+ Enumeration addresses = intf.getInetAddresses();
+ while (addresses != null && addresses.hasMoreElements()) {
+ // get the next InetAddress for the current interface
+ InetAddress address = (InetAddress) addresses.nextElement();
+
+ // check if we find an address of correct version
+ if ((address instanceof Inet4Address && (ipVersion == StackType.IPv4)) ||
+ (address instanceof Inet6Address && (ipVersion == StackType.IPv6))) {
+ supportsVersion = true;
+ break;
+ }
+ }
+ } else {
+ throw new UnknownHostException("network interface not found");
+ }
+ return supportsVersion;
+ }
+
+ /**
+ * Tries to determine the type of IP stack from the available interfaces and their addresses and from the
+ * system properties (java.net.preferIPv4Stack and java.net.preferIPv6Addresses)
+ *
+ * @return StackType.IPv4 for an IPv4 only stack, StackYTypeIPv6 for an IPv6 only stack, and StackType.Unknown
+ * if the type cannot be detected
+ */
+ public static StackType getIpStackType() {
+ boolean isIPv4StackAvailable = isStackAvailable(true);
+ boolean isIPv6StackAvailable = isStackAvailable(false);
+
+ // if only IPv4 stack available
+ if (isIPv4StackAvailable && !isIPv6StackAvailable) {
+ return StackType.IPv4;
+ }
+ // if only IPv6 stack available
+ else if (isIPv6StackAvailable && !isIPv4StackAvailable) {
+ return StackType.IPv6;
+ }
+ // if dual stack
+ else if (isIPv4StackAvailable && isIPv6StackAvailable) {
+ // get the System property which records user preference for a stack on a dual stack machine
+ if (Boolean.getBoolean(IPv4_SETTING)) // has preference over java.net.preferIPv6Addresses
+ return StackType.IPv4;
+ if (Boolean.getBoolean(IPv6_SETTING))
+ return StackType.IPv6;
+ return StackType.IPv6;
+ }
+ return StackType.Unknown;
+ }
+
+
+ public static boolean isStackAvailable(boolean ipv4) {
+ Collection<InetAddress> allAddrs = getAllAvailableAddresses();
+ for (InetAddress addr : allAddrs)
+ if (ipv4 && addr instanceof Inet4Address || (!ipv4 && addr instanceof Inet6Address))
+ return true;
+ return false;
+ }
+
+
+ /**
+ * Returns all the available interfaces, including first level sub interfaces.
+ */
+ public static List<NetworkInterface> getAllAvailableInterfaces() throws SocketException {
+ List<NetworkInterface> allInterfaces = new ArrayList<NetworkInterface>();
+ for (Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces(); interfaces.hasMoreElements(); ) {
+ NetworkInterface intf = interfaces.nextElement();
+ allInterfaces.add(intf);
+
+ Enumeration<NetworkInterface> subInterfaces = intf.getSubInterfaces();
+ if (subInterfaces != null && subInterfaces.hasMoreElements()) {
+ while (subInterfaces.hasMoreElements()) {
+ allInterfaces.add(subInterfaces.nextElement());
+ }
+ }
+ }
+ return allInterfaces;
+ }
+
+ public static Collection<InetAddress> getAllAvailableAddresses() {
+ Set<InetAddress> retval = new HashSet<InetAddress>();
+ Enumeration en;
+
+ try {
+ en = NetworkInterface.getNetworkInterfaces();
+ if (en == null)
+ return retval;
+ while (en.hasMoreElements()) {
+ NetworkInterface intf = (NetworkInterface) en.nextElement();
+ Enumeration<InetAddress> addrs = intf.getInetAddresses();
+ while (addrs.hasMoreElements())
+ retval.add(addrs.nextElement());
+ }
+ } catch (SocketException e) {
+ logger.warn("Failed to derive all available interfaces", e);
+ }
+
+ return retval;
+ }
+
+
+ private NetworkUtils() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/os/OsUtils.java b/src/main/java/org/elasticsearch/common/os/OsUtils.java
new file mode 100644
index 0000000..f072803
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/os/OsUtils.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.os;
+
+import java.util.Locale;
+
+/**
+ *
+ */
+public class OsUtils {
+
+ /**
+ * The value of <tt>System.getProperty("os.name")<tt>.
+ */
+ public static final String OS_NAME = System.getProperty("os.name");
+ /**
+ * True iff running on Linux.
+ */
+ public static final boolean LINUX = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("linux");
+ /**
+ * True iff running on Windows.
+ */
+ public static final boolean WINDOWS = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("windows");
+ /**
+ * True iff running on SunOS.
+ */
+ public static final boolean SOLARIS = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("sun");
+ /**
+ * True iff running on Mac.
+ */
+ public static final boolean MAC = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("mac");
+ /**
+ * True iff running on HP.
+ */
+ public static final boolean HP = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("hp");
+
+
+ private OsUtils() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/path/PathTrie.java b/src/main/java/org/elasticsearch/common/path/PathTrie.java
new file mode 100644
index 0000000..8600a4e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/path/PathTrie.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.path;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.Strings;
+
+import java.util.Map;
+
+import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
+
+/**
+ *
+ */
+public class PathTrie<T> {
+
+ public static interface Decoder {
+ String decode(String value);
+ }
+
+ public static final Decoder NO_DECODER = new Decoder() {
+ @Override
+ public String decode(String value) {
+ return value;
+ }
+ };
+
+ private final Decoder decoder;
+ private final TrieNode<T> root;
+ private final char separator;
+ private T rootValue;
+
+ public PathTrie() {
+ this('/', "*", NO_DECODER);
+ }
+
+ public PathTrie(Decoder decoder) {
+ this('/', "*", decoder);
+ }
+
+ public PathTrie(char separator, String wildcard, Decoder decoder) {
+ this.decoder = decoder;
+ this.separator = separator;
+ root = new TrieNode<T>(new String(new char[]{separator}), null, null, wildcard);
+ }
+
+ public class TrieNode<T> {
+ private transient String key;
+ private transient T value;
+ private boolean isWildcard;
+ private final String wildcard;
+
+ private transient String namedWildcard;
+
+ private ImmutableMap<String, TrieNode<T>> children;
+
+ private final TrieNode parent;
+
+ public TrieNode(String key, T value, TrieNode parent, String wildcard) {
+ this.key = key;
+ this.wildcard = wildcard;
+ this.isWildcard = (key.equals(wildcard));
+ this.parent = parent;
+ this.value = value;
+ this.children = ImmutableMap.of();
+ if (isNamedWildcard(key)) {
+ namedWildcard = key.substring(key.indexOf('{') + 1, key.indexOf('}'));
+ } else {
+ namedWildcard = null;
+ }
+ }
+
+ public void updateKeyWithNamedWildcard(String key) {
+ this.key = key;
+ namedWildcard = key.substring(key.indexOf('{') + 1, key.indexOf('}'));
+ }
+
+ public boolean isWildcard() {
+ return isWildcard;
+ }
+
+ public synchronized void addChild(TrieNode<T> child) {
+ children = newMapBuilder(children).put(child.key, child).immutableMap();
+ }
+
+ public TrieNode getChild(String key) {
+ return children.get(key);
+ }
+
+ public synchronized void insert(String[] path, int index, T value) {
+ if (index >= path.length)
+ return;
+
+ String token = path[index];
+ String key = token;
+ if (isNamedWildcard(token)) {
+ key = wildcard;
+ }
+ TrieNode<T> node = children.get(key);
+ if (node == null) {
+ if (index == (path.length - 1)) {
+ node = new TrieNode<T>(token, value, this, wildcard);
+ } else {
+ node = new TrieNode<T>(token, null, this, wildcard);
+ }
+ children = newMapBuilder(children).put(key, node).immutableMap();
+ } else {
+ if (isNamedWildcard(token)) {
+ node.updateKeyWithNamedWildcard(token);
+ }
+
+ // in case the target(last) node already exist but without a value
+ // than the value should be updated.
+ if (index == (path.length - 1)) {
+ assert (node.value == null || node.value == value);
+ if (node.value == null) {
+ node.value = value;
+ }
+ }
+ }
+
+ node.insert(path, index + 1, value);
+ }
+
+ private boolean isNamedWildcard(String key) {
+ return key.indexOf('{') != -1 && key.indexOf('}') != -1;
+ }
+
+ private String namedWildcard() {
+ return namedWildcard;
+ }
+
+ private boolean isNamedWildcard() {
+ return namedWildcard != null;
+ }
+
+ public T retrieve(String[] path, int index, Map<String, String> params) {
+ if (index >= path.length)
+ return null;
+
+ String token = path[index];
+ TrieNode<T> node = children.get(token);
+ boolean usedWildcard;
+ if (node == null) {
+ node = children.get(wildcard);
+ if (node == null) {
+ return null;
+ }
+ usedWildcard = true;
+ } else {
+ usedWildcard = token.equals(wildcard);
+ }
+
+ put(params, node, token);
+
+ if (index == (path.length - 1)) {
+ return node.value;
+ }
+
+ T res = node.retrieve(path, index + 1, params);
+ if (res == null && !usedWildcard) {
+ node = children.get(wildcard);
+ if (node != null) {
+ put(params, node, token);
+ res = node.retrieve(path, index + 1, params);
+ }
+ }
+
+ return res;
+ }
+
+ private void put(Map<String, String> params, TrieNode<T> node, String value) {
+ if (params != null && node.isNamedWildcard()) {
+ params.put(node.namedWildcard(), decoder.decode(value));
+ }
+ }
+ }
+
+ public void insert(String path, T value) {
+ String[] strings = Strings.splitStringToArray(path, separator);
+ if (strings.length == 0) {
+ rootValue = value;
+ return;
+ }
+ int index = 0;
+ // supports initial delimiter.
+ if (strings.length > 0 && strings[0].isEmpty()) {
+ index = 1;
+ }
+ root.insert(strings, index, value);
+ }
+
+ public T retrieve(String path) {
+ return retrieve(path, null);
+ }
+
+ public T retrieve(String path, Map<String, String> params) {
+ if (path.length() == 0) {
+ return rootValue;
+ }
+ String[] strings = Strings.splitStringToArray(path, separator);
+ if (strings.length == 0) {
+ return rootValue;
+ }
+ int index = 0;
+ // supports initial delimiter.
+ if (strings.length > 0 && strings[0].isEmpty()) {
+ index = 1;
+ }
+ return root.retrieve(strings, index, params);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java b/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java
new file mode 100644
index 0000000..599ca67
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.property;
+
+import org.elasticsearch.common.Preconditions;
+import org.elasticsearch.common.Strings;
+
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+
+/**
+ * Utility class for working with Strings that have placeholder values in them. A placeholder takes the form
+ * <tt>${name}</tt>. Using <tt>PropertyPlaceholder</tt> these placeholders can be substituted for
+ * user-supplied values.
+ * <p/>
+ * <p> Values for substitution can be supplied using a {@link Properties} instance or using a
+ * {@link PlaceholderResolver}.
+ */
+public class PropertyPlaceholder {
+
+ private final String placeholderPrefix;
+ private final String placeholderSuffix;
+ private final boolean ignoreUnresolvablePlaceholders;
+
+ /**
+ * Creates a new <code>PropertyPlaceholderHelper</code> that uses the supplied prefix and suffix. Unresolvable
+ * placeholders are ignored.
+ *
+ * @param placeholderPrefix the prefix that denotes the start of a placeholder.
+ * @param placeholderSuffix the suffix that denotes the end of a placeholder.
+ */
+ public PropertyPlaceholder(String placeholderPrefix, String placeholderSuffix) {
+ this(placeholderPrefix, placeholderSuffix, true);
+ }
+
+ /**
+ * Creates a new <code>PropertyPlaceholderHelper</code> that uses the supplied prefix and suffix.
+ *
+ * @param placeholderPrefix the prefix that denotes the start of a placeholder.
+ * @param placeholderSuffix the suffix that denotes the end of a placeholder.
+ * @param ignoreUnresolvablePlaceholders indicates whether unresolvable placeholders should be ignored
+ * (<code>true</code>) or cause an exception (<code>false</code>).
+ */
+ public PropertyPlaceholder(String placeholderPrefix, String placeholderSuffix,
+ boolean ignoreUnresolvablePlaceholders) {
+ Preconditions.checkNotNull(placeholderPrefix, "Argument 'placeholderPrefix' must not be null.");
+ Preconditions.checkNotNull(placeholderSuffix, "Argument 'placeholderSuffix' must not be null.");
+ this.placeholderPrefix = placeholderPrefix;
+ this.placeholderSuffix = placeholderSuffix;
+ this.ignoreUnresolvablePlaceholders = ignoreUnresolvablePlaceholders;
+ }
+
+ /**
+ * Replaces all placeholders of format <code>${name}</code> with the value returned from the supplied {@link
+ * PlaceholderResolver}.
+ *
+ * @param value the value containing the placeholders to be replaced.
+ * @param placeholderResolver the <code>PlaceholderResolver</code> to use for replacement.
+ * @return the supplied value with placeholders replaced inline.
+ */
+ public String replacePlaceholders(String value, PlaceholderResolver placeholderResolver) {
+ Preconditions.checkNotNull(value, "Argument 'value' must not be null.");
+ return parseStringValue(value, placeholderResolver, new HashSet<String>());
+ }
+
+ protected String parseStringValue(String strVal, PlaceholderResolver placeholderResolver,
+ Set<String> visitedPlaceholders) {
+ StringBuilder buf = new StringBuilder(strVal);
+
+ int startIndex = strVal.indexOf(this.placeholderPrefix);
+ while (startIndex != -1) {
+ int endIndex = findPlaceholderEndIndex(buf, startIndex);
+ if (endIndex != -1) {
+ String placeholder = buf.substring(startIndex + this.placeholderPrefix.length(), endIndex);
+ if (!visitedPlaceholders.add(placeholder)) {
+ throw new IllegalArgumentException(
+ "Circular placeholder reference '" + placeholder + "' in property definitions");
+ }
+ // Recursive invocation, parsing placeholders contained in the placeholder key.
+ placeholder = parseStringValue(placeholder, placeholderResolver, visitedPlaceholders);
+
+ // Now obtain the value for the fully resolved key...
+ int defaultValueIdx = placeholder.indexOf(':');
+ String defaultValue = null;
+ if (defaultValueIdx != -1) {
+ defaultValue = placeholder.substring(defaultValueIdx + 1);
+ placeholder = placeholder.substring(0, defaultValueIdx);
+ }
+ String propVal = placeholderResolver.resolvePlaceholder(placeholder);
+ if (propVal == null) {
+ propVal = defaultValue;
+ }
+ if (propVal == null && placeholderResolver.shouldIgnoreMissing(placeholder)) {
+ propVal = "";
+ }
+ if (propVal != null) {
+ // Recursive invocation, parsing placeholders contained in the
+ // previously resolved placeholder value.
+ propVal = parseStringValue(propVal, placeholderResolver, visitedPlaceholders);
+ buf.replace(startIndex, endIndex + this.placeholderSuffix.length(), propVal);
+ startIndex = buf.indexOf(this.placeholderPrefix, startIndex + propVal.length());
+ } else if (this.ignoreUnresolvablePlaceholders) {
+ // Proceed with unprocessed value.
+ startIndex = buf.indexOf(this.placeholderPrefix, endIndex + this.placeholderSuffix.length());
+ } else {
+ throw new IllegalArgumentException("Could not resolve placeholder '" + placeholder + "'");
+ }
+
+ visitedPlaceholders.remove(placeholder);
+ } else {
+ startIndex = -1;
+ }
+ }
+
+ return buf.toString();
+ }
+
+ private int findPlaceholderEndIndex(CharSequence buf, int startIndex) {
+ int index = startIndex + this.placeholderPrefix.length();
+ int withinNestedPlaceholder = 0;
+ while (index < buf.length()) {
+ if (Strings.substringMatch(buf, index, this.placeholderSuffix)) {
+ if (withinNestedPlaceholder > 0) {
+ withinNestedPlaceholder--;
+ index = index + this.placeholderPrefix.length() - 1;
+ } else {
+ return index;
+ }
+ } else if (Strings.substringMatch(buf, index, this.placeholderPrefix)) {
+ withinNestedPlaceholder++;
+ index = index + this.placeholderPrefix.length();
+ } else {
+ index++;
+ }
+ }
+ return -1;
+ }
+
+ /**
+ * Strategy interface used to resolve replacement values for placeholders contained in Strings.
+ *
+ * @see PropertyPlaceholder
+ */
+ public static interface PlaceholderResolver {
+
+ /**
+ * Resolves the supplied placeholder name into the replacement value.
+ *
+ * @param placeholderName the name of the placeholder to resolve.
+ * @return the replacement value or <code>null</code> if no replacement is to be made.
+ */
+ String resolvePlaceholder(String placeholderName);
+
+ boolean shouldIgnoreMissing(String placeholderName);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java b/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java
new file mode 100644
index 0000000..6730c51
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+
+abstract class AbstractRecycler<T> implements Recycler<T> {
+
+ protected final Recycler.C<T> c;
+
+ protected AbstractRecycler(Recycler.C<T> c) {
+ this.c = c;
+ }
+
+ public V<T> obtain() {
+ return obtain(-1);
+ }
+
+ @Override
+ public void close() {
+ // no-op by default
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java b/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java
new file mode 100644
index 0000000..4c22df2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+
+import java.util.Deque;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A {@link Recycler} implementation based on a concurrent {@link Deque}. This implementation is thread-safe.
+ */
+public class ConcurrentDequeRecycler<T> extends DequeRecycler<T> {
+
+ // we maintain size separately because concurrent deque implementations typically have linear-time size() impls
+ final AtomicInteger size;
+
+ public ConcurrentDequeRecycler(C<T> c, int maxSize) {
+ super(c, ConcurrentCollections.<T>newDeque(), maxSize);
+ this.size = new AtomicInteger();
+ }
+
+ @Override
+ public void close() {
+ assert deque.size() == size.get();
+ super.close();
+ size.set(0);
+ }
+
+ @Override
+ public V<T> obtain(int sizing) {
+ final V<T> v = super.obtain(sizing);
+ if (v.isRecycled()) {
+ size.decrementAndGet();
+ }
+ return v;
+ }
+
+ @Override
+ protected boolean beforeRelease() {
+ return size.incrementAndGet() <= maxSize;
+ }
+
+ @Override
+ protected void afterRelease(boolean recycled) {
+ if (!recycled) {
+ size.decrementAndGet();
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java b/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java
new file mode 100644
index 0000000..c10ebcc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+import java.util.Deque;
+
+/**
+ * A {@link Recycler} implementation based on a {@link Deque}. This implementation is NOT thread-safe.
+ */
+public class DequeRecycler<T> extends AbstractRecycler<T> {
+
+ final Deque<T> deque;
+ final int maxSize;
+
+ public DequeRecycler(C<T> c, Deque<T> queue, int maxSize) {
+ super(c);
+ this.deque = queue;
+ this.maxSize = maxSize;
+ }
+
+ @Override
+ public void close() {
+ deque.clear();
+ }
+
+ @Override
+ public V<T> obtain(int sizing) {
+ final T v = deque.pollFirst();
+ if (v == null) {
+ return new DV(c.newInstance(sizing), false);
+ }
+ return new DV(v, true);
+ }
+
+ /** Called before releasing an object, returns true if the object should be recycled and false otherwise. */
+ protected boolean beforeRelease() {
+ return deque.size() < maxSize;
+ }
+
+ /** Called after a release. */
+ protected void afterRelease(boolean recycled) {}
+
+ private class DV implements Recycler.V<T> {
+
+ T value;
+ final boolean recycled;
+
+ DV(T value, boolean recycled) {
+ this.value = value;
+ this.recycled = recycled;
+ }
+
+ @Override
+ public T v() {
+ return value;
+ }
+
+ @Override
+ public boolean isRecycled() {
+ return recycled;
+ }
+
+ @Override
+ public boolean release() {
+ if (value == null) {
+ throw new ElasticsearchIllegalStateException("recycler entry already released...");
+ }
+ final boolean recycle = beforeRelease();
+ if (recycle) {
+ c.clear(value);
+ deque.addFirst(value);
+ }
+ value = null;
+ afterRelease(recycle);
+ return true;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java b/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java
new file mode 100644
index 0000000..9bb6b86
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+abstract class FilterRecycler<T> implements Recycler<T> {
+
+ /** Get the delegate instance to foward calls to. */
+ protected abstract Recycler<T> getDelegate();
+
+ /** Wrap a recycled reference. */
+ protected Recycler.V<T> wrap(Recycler.V<T> delegate) {
+ return delegate;
+ }
+
+ @Override
+ public Recycler.V<T> obtain(int sizing) {
+ return wrap(getDelegate().obtain(sizing));
+ }
+
+ @Override
+ public Recycler.V<T> obtain() {
+ return wrap(getDelegate().obtain());
+ }
+
+ @Override
+ public void close() {
+ getDelegate().close();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java b/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java
new file mode 100644
index 0000000..0b7a3a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+/**
+ */
+public class NoneRecycler<T> extends AbstractRecycler<T> {
+
+ public NoneRecycler(C<T> c) {
+ super(c);
+ }
+
+ @Override
+ public V<T> obtain(int sizing) {
+ return new NV<T>(c.newInstance(sizing));
+ }
+
+ @Override
+ public void close() {
+
+ }
+
+ public static class NV<T> implements Recycler.V<T> {
+
+ T value;
+
+ NV(T value) {
+ this.value = value;
+ }
+
+ @Override
+ public T v() {
+ return value;
+ }
+
+ @Override
+ public boolean isRecycled() {
+ return false;
+ }
+
+ @Override
+ public boolean release() {
+ if (value == null) {
+ throw new ElasticsearchIllegalStateException("recycler entry already released...");
+ }
+ value = null;
+ return true;
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/common/recycler/Recycler.java b/src/main/java/org/elasticsearch/common/recycler/Recycler.java
new file mode 100644
index 0000000..b74acba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/recycler/Recycler.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+import org.elasticsearch.common.lease.Releasable;
+
+/**
+ */
+public interface Recycler<T> {
+
+ public static interface Factory<T> {
+ Recycler<T> build();
+ }
+
+ public static interface C<T> {
+
+ /** Create a new empty instance of the given size. */
+ T newInstance(int sizing);
+
+ /** Clear the data. This operation is called when the data-structure is released. */
+ void clear(T value);
+ }
+
+ public static interface V<T> extends Releasable {
+
+ /** Reference to the value. */
+ T v();
+
+ /** Whether this instance has been recycled (true) or newly allocated (false). */
+ boolean isRecycled();
+
+ }
+
+ void close();
+
+ V<T> obtain();
+
+ V<T> obtain(int sizing);
+}
diff --git a/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/src/main/java/org/elasticsearch/common/recycler/Recyclers.java
new file mode 100644
index 0000000..c1797bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/recycler/Recyclers.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+import com.carrotsearch.hppc.hash.MurmurHash3;
+import com.google.common.collect.Queues;
+import org.apache.lucene.util.CloseableThreadLocal;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+import java.lang.ref.SoftReference;
+
+public enum Recyclers {
+ ;
+
+ /** Return a {@link Recycler} that never recycles entries. */
+ public static <T> Recycler<T> none(Recycler.C<T> c) {
+ return new NoneRecycler<T>(c);
+ }
+
+ /** Return a concurrent recycler based on a deque. */
+ public static <T> Recycler<T> concurrentDeque(Recycler.C<T> c, int limit) {
+ return new ConcurrentDequeRecycler<T>(c, limit);
+ }
+
+ /** Return a recycler based on a deque. */
+ public static <T> Recycler<T> deque(Recycler.C<T> c, int limit) {
+ return new DequeRecycler<T>(c, Queues.<T>newArrayDeque(), limit);
+ }
+
+ /** Return a recycler based on a deque. */
+ public static <T> Recycler.Factory<T> dequeFactory(final Recycler.C<T> c, final int limit) {
+ return new Recycler.Factory<T>() {
+ @Override
+ public Recycler<T> build() {
+ return deque(c, limit);
+ }
+ };
+ }
+
+ /** Wrap two recyclers and forward to calls to <code>smallObjectRecycler</code> when <code>size &lt; minSize</code> and to
+ * <code>defaultRecycler</code> otherwise. */
+ public static <T> Recycler<T> sizing(final Recycler<T> defaultRecycler, final Recycler<T> smallObjectRecycler, final int minSize) {
+ return new FilterRecycler<T>() {
+
+ @Override
+ protected Recycler<T> getDelegate() {
+ return defaultRecycler;
+ }
+
+ @Override
+ public Recycler.V<T> obtain(int sizing) {
+ if (sizing > 0 && sizing < minSize) {
+ return smallObjectRecycler.obtain(sizing);
+ }
+ return super.obtain(sizing);
+ }
+
+ @Override
+ public void close() {
+ defaultRecycler.close();
+ smallObjectRecycler.close();
+ }
+
+ };
+ }
+
+ /** Create a thread-local recycler, where each thread will have its own instance, create through the provided factory. */
+ public static <T> Recycler<T> threadLocal(final Recycler.Factory<T> factory) {
+ return new FilterRecycler<T>() {
+
+ private final CloseableThreadLocal<Recycler<T>> recyclers;
+
+ {
+ recyclers = new CloseableThreadLocal<Recycler<T>>() {
+ @Override
+ protected Recycler<T> initialValue() {
+ return factory.build();
+ }
+ };
+ }
+
+ @Override
+ protected Recycler<T> getDelegate() {
+ return recyclers.get();
+ }
+
+ @Override
+ public void close() {
+ recyclers.close();
+ }
+
+ };
+ }
+
+ /** Create a recycler that is wrapped inside a soft reference, so that it cannot cause {@link OutOfMemoryError}s. */
+ public static <T> Recycler<T> soft(final Recycler.Factory<T> factory) {
+ return new FilterRecycler<T>() {
+
+ SoftReference<Recycler<T>> ref;
+
+ {
+ ref = new SoftReference<Recycler<T>>(null);
+ }
+
+ @Override
+ protected Recycler<T> getDelegate() {
+ Recycler<T> recycler = ref.get();
+ if (recycler == null) {
+ recycler = factory.build();
+ ref = new SoftReference<Recycler<T>>(recycler);
+ }
+ return recycler;
+ }
+
+ };
+ }
+
+ /** Create a recycler that wraps data in a SoftReference.
+ * @see #soft(org.elasticsearch.common.recycler.Recycler.Factory) */
+ public static <T> Recycler.Factory<T> softFactory(final Recycler.Factory<T> factory) {
+ return new Recycler.Factory<T>() {
+ @Override
+ public Recycler<T> build() {
+ return soft(factory);
+ }
+ };
+ }
+
+ /** Wrap the provided recycler so that calls to {@link Recycler#obtain()} and {@link Recycler.V#release()} are protected by
+ * a lock. */
+ public static <T> Recycler<T> locked(final Recycler<T> recycler) {
+ return new FilterRecycler<T>() {
+
+ private final Object lock;
+
+ {
+ this.lock = new Object();
+ }
+
+ @Override
+ protected Recycler<T> getDelegate() {
+ return recycler;
+ }
+
+ @Override
+ public org.elasticsearch.common.recycler.Recycler.V<T> obtain(int sizing) {
+ synchronized (lock) {
+ return super.obtain(sizing);
+ }
+ }
+
+ @Override
+ public org.elasticsearch.common.recycler.Recycler.V<T> obtain() {
+ synchronized (lock) {
+ return super.obtain();
+ }
+ }
+
+ @Override
+ protected Recycler.V<T> wrap(final Recycler.V<T> delegate) {
+ return new Recycler.V<T>() {
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ synchronized (lock) {
+ return delegate.release();
+ }
+ }
+
+ @Override
+ public T v() {
+ return delegate.v();
+ }
+
+ @Override
+ public boolean isRecycled() {
+ return delegate.isRecycled();
+ }
+
+ };
+ }
+
+ };
+ }
+
+ /** Create a concurrent implementation that can support concurrent access from <code>concurrencyLevel</code> threads with little contention. */
+ public static <T> Recycler<T> concurrent(final Recycler.Factory<T> factory, final int concurrencyLevel) {
+ if (concurrencyLevel < 1) {
+ throw new ElasticsearchIllegalArgumentException("concurrencyLevel must be >= 1");
+ }
+ if (concurrencyLevel == 1) {
+ return locked(factory.build());
+ }
+ return new FilterRecycler<T>() {
+
+ private final Recycler<T>[] recyclers;
+ {
+ @SuppressWarnings("unchecked")
+ final Recycler<T>[] recyclers = new Recycler[concurrencyLevel];
+ this.recyclers = recyclers;
+ for (int i = 0; i < concurrencyLevel; ++i) {
+ recyclers[i] = locked(factory.build());
+ }
+ }
+
+ final int slot() {
+ final long id = Thread.currentThread().getId();
+ // don't trust Thread.hashCode to have equiprobable low bits
+ int slot = (int) MurmurHash3.hash(id);
+ // make positive, otherwise % may return negative numbers
+ slot &= 0x7FFFFFFF;
+ slot %= concurrencyLevel;
+ return slot;
+ }
+
+ @Override
+ protected Recycler<T> getDelegate() {
+ return recyclers[slot()];
+ }
+
+ @Override
+ public void close() {
+ for (Recycler<T> recycler : recyclers) {
+ recycler.close();
+ }
+ }
+
+ };
+ }
+
+ public static <T> Recycler<T> concurrent(final Recycler.Factory<T> factory) {
+ return concurrent(factory, Runtime.getRuntime().availableProcessors());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/regex/Regex.java b/src/main/java/org/elasticsearch/common/regex/Regex.java
new file mode 100644
index 0000000..67f4f13
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/regex/Regex.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.regex;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+
+import java.util.Locale;
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class Regex {
+
+ /**
+ * This Regex / {@link Pattern} flag is supported from Java 7 on.
+ * If set on a Java6 JVM the flag will be ignored.
+ */
+ public static final int UNICODE_CHARACTER_CLASS = 0x100; // supported in JAVA7
+
+ /**
+ * Is the str a simple match pattern.
+ */
+ public static boolean isSimpleMatchPattern(String str) {
+ return str.indexOf('*') != -1;
+ }
+
+ public static boolean isMatchAllPattern(String str) {
+ return str.equals("*");
+ }
+
+ /**
+ * Match a String against the given pattern, supporting the following simple
+ * pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an
+ * arbitrary number of pattern parts), as well as direct equality.
+ *
+ * @param pattern the pattern to match against
+ * @param str the String to match
+ * @return whether the String matches the given pattern
+ */
+ public static boolean simpleMatch(String pattern, String str) {
+ if (pattern == null || str == null) {
+ return false;
+ }
+ int firstIndex = pattern.indexOf('*');
+ if (firstIndex == -1) {
+ return pattern.equals(str);
+ }
+ if (firstIndex == 0) {
+ if (pattern.length() == 1) {
+ return true;
+ }
+ int nextIndex = pattern.indexOf('*', firstIndex + 1);
+ if (nextIndex == -1) {
+ return str.endsWith(pattern.substring(1));
+ } else if (nextIndex == 1) {
+ // Double wildcard "**" - skipping the first "*"
+ return simpleMatch(pattern.substring(1), str);
+ }
+ String part = pattern.substring(1, nextIndex);
+ int partIndex = str.indexOf(part);
+ while (partIndex != -1) {
+ if (simpleMatch(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) {
+ return true;
+ }
+ partIndex = str.indexOf(part, partIndex + 1);
+ }
+ return false;
+ }
+ return (str.length() >= firstIndex &&
+ pattern.substring(0, firstIndex).equals(str.substring(0, firstIndex)) &&
+ simpleMatch(pattern.substring(firstIndex), str.substring(firstIndex)));
+ }
+
+ /**
+ * Match a String against the given patterns, supporting the following simple
+ * pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an
+ * arbitrary number of pattern parts), as well as direct equality.
+ *
+ * @param patterns the patterns to match against
+ * @param str the String to match
+ * @return whether the String matches any of the given patterns
+ */
+ public static boolean simpleMatch(String[] patterns, String str) {
+ if (patterns != null) {
+ for (String pattern : patterns) {
+ if (simpleMatch(pattern, str)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public static boolean simpleMatch(String[] patterns, String[] types) {
+ if (patterns != null && types != null) {
+ for (String type : types) {
+ for (String pattern : patterns) {
+ if (simpleMatch(pattern, type)) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ public static Pattern compile(String regex, String flags) {
+ int pFlags = flags == null ? 0 : flagsFromString(flags);
+ return Pattern.compile(regex, pFlags);
+ }
+
+ public static int flagsFromString(String flags) {
+ int pFlags = 0;
+ for (String s : Strings.delimitedListToStringArray(flags, "|")) {
+ if (s.isEmpty()) {
+ continue;
+ }
+ s = s.toUpperCase(Locale.ROOT);
+ if ("CASE_INSENSITIVE".equals(s)) {
+ pFlags |= Pattern.CASE_INSENSITIVE;
+ } else if ("MULTILINE".equals(s)) {
+ pFlags |= Pattern.MULTILINE;
+ } else if ("DOTALL".equals(s)) {
+ pFlags |= Pattern.DOTALL;
+ } else if ("UNICODE_CASE".equals(s)) {
+ pFlags |= Pattern.UNICODE_CASE;
+ } else if ("CANON_EQ".equals(s)) {
+ pFlags |= Pattern.CANON_EQ;
+ } else if ("UNIX_LINES".equals(s)) {
+ pFlags |= Pattern.UNIX_LINES;
+ } else if ("LITERAL".equals(s)) {
+ pFlags |= Pattern.LITERAL;
+ } else if ("COMMENTS".equals(s)) {
+ pFlags |= Pattern.COMMENTS;
+ } else if ("UNICODE_CHAR_CLASS".equals(s)) {
+ pFlags |= UNICODE_CHARACTER_CLASS;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Unknown regex flag [" + s + "]");
+ }
+ }
+ return pFlags;
+ }
+
+ public static String flagsToString(int flags) {
+ StringBuilder sb = new StringBuilder();
+ if ((flags & Pattern.CASE_INSENSITIVE) != 0) {
+ sb.append("CASE_INSENSITIVE|");
+ }
+ if ((flags & Pattern.MULTILINE) != 0) {
+ sb.append("MULTILINE|");
+ }
+ if ((flags & Pattern.DOTALL) != 0) {
+ sb.append("DOTALL|");
+ }
+ if ((flags & Pattern.UNICODE_CASE) != 0) {
+ sb.append("UNICODE_CASE|");
+ }
+ if ((flags & Pattern.CANON_EQ) != 0) {
+ sb.append("CANON_EQ|");
+ }
+ if ((flags & Pattern.UNIX_LINES) != 0) {
+ sb.append("UNIX_LINES|");
+ }
+ if ((flags & Pattern.LITERAL) != 0) {
+ sb.append("LITERAL|");
+ }
+ if ((flags & Pattern.COMMENTS) != 0) {
+ sb.append("COMMENTS|");
+ }
+ if ((flags & UNICODE_CHARACTER_CLASS) != 0) {
+ sb.append("UNICODE_CHAR_CLASS|");
+ }
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java b/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java
new file mode 100644
index 0000000..e02342f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.rounding;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.joda.Joda;
+import org.joda.time.DateTimeField;
+import org.joda.time.chrono.ISOChronology;
+
+/**
+ *
+ */
+public enum DateTimeUnit {
+
+ WEEK_OF_WEEKYEAR( (byte) 1, ISOChronology.getInstanceUTC().weekOfWeekyear()),
+ YEAR_OF_CENTURY( (byte) 2, ISOChronology.getInstanceUTC().yearOfCentury()),
+ QUARTER( (byte) 3, Joda.QuarterOfYear.getField(ISOChronology.getInstanceUTC())),
+ MONTH_OF_YEAR( (byte) 4, ISOChronology.getInstanceUTC().monthOfYear()),
+ DAY_OF_MONTH( (byte) 5, ISOChronology.getInstanceUTC().dayOfMonth()),
+ HOUR_OF_DAY( (byte) 6, ISOChronology.getInstanceUTC().hourOfDay()),
+ MINUTES_OF_HOUR( (byte) 7, ISOChronology.getInstanceUTC().minuteOfHour()),
+ SECOND_OF_MINUTE( (byte) 8, ISOChronology.getInstanceUTC().secondOfMinute());
+
+ private final byte id;
+ private final DateTimeField field;
+
+ private DateTimeUnit(byte id, DateTimeField field) {
+ this.id = id;
+ this.field = field;
+ }
+
+ public byte id() {
+ return id;
+ }
+
+ public DateTimeField field() {
+ return field;
+ }
+
+ public static DateTimeUnit resolve(byte id) {
+ switch (id) {
+ case 1: return WEEK_OF_WEEKYEAR;
+ case 2: return YEAR_OF_CENTURY;
+ case 3: return QUARTER;
+ case 4: return MONTH_OF_YEAR;
+ case 5: return DAY_OF_MONTH;
+ case 6: return HOUR_OF_DAY;
+ case 7: return MINUTES_OF_HOUR;
+ case 8: return SECOND_OF_MINUTE;
+ default: throw new ElasticsearchException("Unknown date time unit id [" + id + "]");
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/rounding/Rounding.java b/src/main/java/org/elasticsearch/common/rounding/Rounding.java
new file mode 100644
index 0000000..054b674
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/rounding/Rounding.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.rounding;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+/**
+ * A strategy for rounding long values.
+ */
+public interface Rounding extends Streamable {
+
+ byte id();
+
+ /**
+ * Rounds the given value.
+ *
+ * @param value The value to round.
+ * @return The rounded value.
+ */
+ long round(long value);
+
+ /**
+ * Given the rounded value (which was potentially generated by {@link #round(long)}, returns the next rounding value. For example, with
+ * interval based rounding, if the interval is 3, {@code nextRoundValue(6) = 9 }.
+ *
+ * @param value The current rounding value
+ * @return The next rounding value;
+ */
+ long nextRoundingValue(long value);
+
+ /**
+ * Rounding strategy which is based on an interval
+ *
+ * {@code rounded = value - (value % interval) }
+ */
+ public static class Interval implements Rounding {
+
+ final static byte ID = 0;
+
+ private long interval;
+
+ public Interval() { // for serialization
+ }
+
+ /**
+ * Creates a new interval rounding.
+ *
+ * @param interval The interval
+ */
+ public Interval(long interval) {
+ this.interval = interval;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ static long round(long value, long interval) {
+ long rem = value % interval;
+ // We need this condition because % may return a negative result on negative numbers
+ // According to Google caliper's IntModBenchmark, using a condition is faster than attempts to use tricks to avoid
+ // the condition. Moreover, in our case, the condition is very likely to be always true (dates, prices, distances),
+ // so easily predictable by the CPU
+ if (rem < 0) {
+ rem += interval;
+ }
+ return value - rem;
+ }
+
+ @Override
+ public long round(long value) {
+ return round(value, interval);
+ }
+
+ @Override
+ public long nextRoundingValue(long value) {
+ assert value == round(value);
+ return value + interval;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ interval = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(interval);
+ }
+ }
+
+ public static class Streams {
+
+ public static void write(Rounding rounding, StreamOutput out) throws IOException {
+ out.writeByte(rounding.id());
+ rounding.writeTo(out);
+ }
+
+ public static Rounding read(StreamInput in) throws IOException {
+ Rounding rounding = null;
+ byte id = in.readByte();
+ switch (id) {
+ case Interval.ID: rounding = new Interval(); break;
+ case TimeZoneRounding.TimeTimeZoneRoundingFloor.ID: rounding = new TimeZoneRounding.TimeTimeZoneRoundingFloor(); break;
+ case TimeZoneRounding.UTCTimeZoneRoundingFloor.ID: rounding = new TimeZoneRounding.UTCTimeZoneRoundingFloor(); break;
+ case TimeZoneRounding.DayTimeZoneRoundingFloor.ID: rounding = new TimeZoneRounding.DayTimeZoneRoundingFloor(); break;
+ case TimeZoneRounding.UTCIntervalTimeZoneRounding.ID: rounding = new TimeZoneRounding.UTCIntervalTimeZoneRounding(); break;
+ case TimeZoneRounding.TimeIntervalTimeZoneRounding.ID: rounding = new TimeZoneRounding.TimeIntervalTimeZoneRounding(); break;
+ case TimeZoneRounding.DayIntervalTimeZoneRounding.ID: rounding = new TimeZoneRounding.DayIntervalTimeZoneRounding(); break;
+ case TimeZoneRounding.FactorTimeZoneRounding.ID: rounding = new TimeZoneRounding.FactorTimeZoneRounding(); break;
+ case TimeZoneRounding.PrePostTimeZoneRounding.ID: rounding = new TimeZoneRounding.PrePostTimeZoneRounding(); break;
+ default: throw new ElasticsearchException("unknown rounding id [" + id + "]");
+ }
+ rounding.readFrom(in);
+ return rounding;
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java
new file mode 100644
index 0000000..d6b1ad9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java
@@ -0,0 +1,509 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.rounding;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+import org.joda.time.DateTimeConstants;
+import org.joda.time.DateTimeZone;
+
+import java.io.IOException;
+
+/**
+ */
+public abstract class TimeZoneRounding implements Rounding {
+
+ public abstract long round(long utcMillis);
+
+ public static Builder builder(DateTimeUnit unit) {
+ return new Builder(unit);
+ }
+
+ public static Builder builder(TimeValue interval) {
+ return new Builder(interval);
+ }
+
+ public static class Builder {
+
+ private DateTimeUnit unit;
+ private long interval = -1;
+
+ private DateTimeZone preTz = DateTimeZone.UTC;
+ private DateTimeZone postTz = DateTimeZone.UTC;
+
+ private float factor = 1.0f;
+
+ private long preOffset;
+ private long postOffset;
+
+ private boolean preZoneAdjustLargeInterval = false;
+
+ public Builder(DateTimeUnit unit) {
+ this.unit = unit;
+ this.interval = -1;
+ }
+
+ public Builder(TimeValue interval) {
+ this.unit = null;
+ this.interval = interval.millis();
+ }
+
+ public Builder preZone(DateTimeZone preTz) {
+ this.preTz = preTz;
+ return this;
+ }
+
+ public Builder preZoneAdjustLargeInterval(boolean preZoneAdjustLargeInterval) {
+ this.preZoneAdjustLargeInterval = preZoneAdjustLargeInterval;
+ return this;
+ }
+
+ public Builder postZone(DateTimeZone postTz) {
+ this.postTz = postTz;
+ return this;
+ }
+
+ public Builder preOffset(long preOffset) {
+ this.preOffset = preOffset;
+ return this;
+ }
+
+ public Builder postOffset(long postOffset) {
+ this.postOffset = postOffset;
+ return this;
+ }
+
+ public Builder factor(float factor) {
+ this.factor = factor;
+ return this;
+ }
+
+ public TimeZoneRounding build() {
+ TimeZoneRounding timeZoneRounding;
+ if (unit != null) {
+ if (preTz.equals(DateTimeZone.UTC) && postTz.equals(DateTimeZone.UTC)) {
+ timeZoneRounding = new UTCTimeZoneRoundingFloor(unit);
+ } else if (preZoneAdjustLargeInterval || unit.field().getDurationField().getUnitMillis() < DateTimeConstants.MILLIS_PER_HOUR * 12) {
+ timeZoneRounding = new TimeTimeZoneRoundingFloor(unit, preTz, postTz);
+ } else {
+ timeZoneRounding = new DayTimeZoneRoundingFloor(unit, preTz, postTz);
+ }
+ } else {
+ if (preTz.equals(DateTimeZone.UTC) && postTz.equals(DateTimeZone.UTC)) {
+ timeZoneRounding = new UTCIntervalTimeZoneRounding(interval);
+ } else if (preZoneAdjustLargeInterval || interval < DateTimeConstants.MILLIS_PER_HOUR * 12) {
+ timeZoneRounding = new TimeIntervalTimeZoneRounding(interval, preTz, postTz);
+ } else {
+ timeZoneRounding = new DayIntervalTimeZoneRounding(interval, preTz, postTz);
+ }
+ }
+ if (preOffset != 0 || postOffset != 0) {
+ timeZoneRounding = new PrePostTimeZoneRounding(timeZoneRounding, preOffset, postOffset);
+ }
+ if (factor != 1.0f) {
+ timeZoneRounding = new FactorTimeZoneRounding(timeZoneRounding, factor);
+ }
+ return timeZoneRounding;
+ }
+ }
+
+ static class TimeTimeZoneRoundingFloor extends TimeZoneRounding {
+
+ static final byte ID = 1;
+
+ private DateTimeUnit unit;
+ private DateTimeZone preTz;
+ private DateTimeZone postTz;
+
+ TimeTimeZoneRoundingFloor() { // for serialization
+ }
+
+ TimeTimeZoneRoundingFloor(DateTimeUnit unit, DateTimeZone preTz, DateTimeZone postTz) {
+ this.unit = unit;
+ this.preTz = preTz;
+ this.postTz = postTz;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public long round(long utcMillis) {
+ long time = utcMillis + preTz.getOffset(utcMillis);
+ time = unit.field().roundFloor(time);
+ // now, time is still in local, move it to UTC (or the adjustLargeInterval flag is set)
+ time = time - preTz.getOffset(time);
+ // now apply post Tz
+ time = time + postTz.getOffset(time);
+ return time;
+ }
+
+ @Override
+ public long nextRoundingValue(long value) {
+// return value + unit.field().getDurationField().getUnitMillis();
+ return unit.field().roundCeiling(value + 1);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ unit = DateTimeUnit.resolve(in.readByte());
+ preTz = DateTimeZone.forID(in.readSharedString());
+ postTz = DateTimeZone.forID(in.readSharedString());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeByte(unit.id());
+ out.writeSharedString(preTz.getID());
+ out.writeSharedString(postTz.getID());
+ }
+ }
+
+ static class UTCTimeZoneRoundingFloor extends TimeZoneRounding {
+
+ final static byte ID = 2;
+
+ private DateTimeUnit unit;
+
+ UTCTimeZoneRoundingFloor() { // for serialization
+ }
+
+ UTCTimeZoneRoundingFloor(DateTimeUnit unit) {
+ this.unit = unit;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public long round(long utcMillis) {
+ return unit.field().roundFloor(utcMillis);
+ }
+
+ @Override
+ public long nextRoundingValue(long value) {
+ return unit.field().roundCeiling(value + 1);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ unit = DateTimeUnit.resolve(in.readByte());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeByte(unit.id());
+ }
+ }
+
+ static class DayTimeZoneRoundingFloor extends TimeZoneRounding {
+
+ final static byte ID = 3;
+
+ private DateTimeUnit unit;
+ private DateTimeZone preTz;
+ private DateTimeZone postTz;
+
+ DayTimeZoneRoundingFloor() { // for serialization
+ }
+
+ DayTimeZoneRoundingFloor(DateTimeUnit unit, DateTimeZone preTz, DateTimeZone postTz) {
+ this.unit = unit;
+ this.preTz = preTz;
+ this.postTz = postTz;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public long round(long utcMillis) {
+ long time = utcMillis + preTz.getOffset(utcMillis);
+ time = unit.field().roundFloor(time);
+ // after rounding, since its day level (and above), its actually UTC!
+ // now apply post Tz
+ time = time + postTz.getOffset(time);
+ return time;
+ }
+
+ @Override
+ public long nextRoundingValue(long value) {
+ return unit.field().getDurationField().getUnitMillis() + value;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ unit = DateTimeUnit.resolve(in.readByte());
+ preTz = DateTimeZone.forID(in.readSharedString());
+ postTz = DateTimeZone.forID(in.readSharedString());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeByte(unit.id());
+ out.writeSharedString(preTz.getID());
+ out.writeSharedString(postTz.getID());
+ }
+ }
+
+ static class UTCIntervalTimeZoneRounding extends TimeZoneRounding {
+
+ final static byte ID = 4;
+
+ private long interval;
+
+ UTCIntervalTimeZoneRounding() { // for serialization
+ }
+
+ UTCIntervalTimeZoneRounding(long interval) {
+ this.interval = interval;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public long round(long utcMillis) {
+ return Rounding.Interval.round(utcMillis, interval);
+ }
+
+ @Override
+ public long nextRoundingValue(long value) {
+ return value + interval;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ interval = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(interval);
+ }
+ }
+
+
+ static class TimeIntervalTimeZoneRounding extends TimeZoneRounding {
+
+ final static byte ID = 5;
+
+ private long interval;
+ private DateTimeZone preTz;
+ private DateTimeZone postTz;
+
+ TimeIntervalTimeZoneRounding() { // for serialization
+ }
+
+ TimeIntervalTimeZoneRounding(long interval, DateTimeZone preTz, DateTimeZone postTz) {
+ this.interval = interval;
+ this.preTz = preTz;
+ this.postTz = postTz;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public long round(long utcMillis) {
+ long time = utcMillis + preTz.getOffset(utcMillis);
+ time = Rounding.Interval.round(time, interval);
+ // now, time is still in local, move it to UTC
+ time = time - preTz.getOffset(time);
+ // now apply post Tz
+ time = time + postTz.getOffset(time);
+ return time;
+ }
+
+ @Override
+ public long nextRoundingValue(long value) {
+ return value + interval;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ interval = in.readVLong();
+ preTz = DateTimeZone.forID(in.readSharedString());
+ postTz = DateTimeZone.forID(in.readSharedString());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(interval);
+ out.writeSharedString(preTz.getID());
+ out.writeSharedString(postTz.getID());
+ }
+ }
+
+ static class DayIntervalTimeZoneRounding extends TimeZoneRounding {
+
+ final static byte ID = 6;
+
+ private long interval;
+ private DateTimeZone preTz;
+ private DateTimeZone postTz;
+
+ DayIntervalTimeZoneRounding() { // for serialization
+ }
+
+ DayIntervalTimeZoneRounding(long interval, DateTimeZone preTz, DateTimeZone postTz) {
+ this.interval = interval;
+ this.preTz = preTz;
+ this.postTz = postTz;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public long round(long utcMillis) {
+ long time = utcMillis + preTz.getOffset(utcMillis);
+ time = Rounding.Interval.round(time, interval);
+ // after rounding, since its day level (and above), its actually UTC!
+ // now apply post Tz
+ time = time + postTz.getOffset(time);
+ return time;
+ }
+
+ @Override
+ public long nextRoundingValue(long value) {
+ return value + interval;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ interval = in.readVLong();
+ preTz = DateTimeZone.forID(in.readSharedString());
+ postTz = DateTimeZone.forID(in.readSharedString());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(interval);
+ out.writeSharedString(preTz.getID());
+ out.writeSharedString(postTz.getID());
+ }
+ }
+
+ static class FactorTimeZoneRounding extends TimeZoneRounding {
+
+ final static byte ID = 7;
+
+ private TimeZoneRounding timeZoneRounding;
+
+ private float factor;
+
+ FactorTimeZoneRounding() { // for serialization
+ }
+
+ FactorTimeZoneRounding(TimeZoneRounding timeZoneRounding, float factor) {
+ this.timeZoneRounding = timeZoneRounding;
+ this.factor = factor;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public long round(long utcMillis) {
+ return timeZoneRounding.round((long) (factor * utcMillis));
+ }
+
+ @Override
+ public long nextRoundingValue(long value) {
+ return timeZoneRounding.nextRoundingValue(value);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ timeZoneRounding = (TimeZoneRounding) Rounding.Streams.read(in);
+ factor = in.readFloat();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ Rounding.Streams.write(timeZoneRounding, out);
+ out.writeFloat(factor);
+ }
+ }
+
+ static class PrePostTimeZoneRounding extends TimeZoneRounding {
+
+ final static byte ID = 8;
+
+ private TimeZoneRounding timeZoneRounding;
+
+ private long preOffset;
+ private long postOffset;
+
+ PrePostTimeZoneRounding() { // for serialization
+ }
+
+ PrePostTimeZoneRounding(TimeZoneRounding timeZoneRounding, long preOffset, long postOffset) {
+ this.timeZoneRounding = timeZoneRounding;
+ this.preOffset = preOffset;
+ this.postOffset = postOffset;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public long round(long utcMillis) {
+ return postOffset + timeZoneRounding.round(utcMillis + preOffset);
+ }
+
+ @Override
+ public long nextRoundingValue(long value) {
+ return postOffset + timeZoneRounding.nextRoundingValue(value - postOffset);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ timeZoneRounding = (TimeZoneRounding) Rounding.Streams.read(in);
+ preOffset = in.readVLong();
+ postOffset = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ Rounding.Streams.write(timeZoneRounding, out);
+ out.writeVLong(preOffset);
+ out.writeVLong(postOffset);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java
new file mode 100644
index 0000000..e357293
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java
@@ -0,0 +1,1043 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Classes;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.property.PropertyPlaceholder;
+import org.elasticsearch.common.settings.loader.SettingsLoader;
+import org.elasticsearch.common.settings.loader.SettingsLoaderFactory;
+import org.elasticsearch.common.unit.*;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.Strings.toCamelCase;
+import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
+import static org.elasticsearch.common.unit.SizeValue.parseSizeValue;
+import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
+
+/**
+ * An immutable implementation of {@link Settings}.
+ */
+public class ImmutableSettings implements Settings {
+
+ public static final Settings EMPTY = new Builder().build();
+
+ private ImmutableMap<String, String> settings;
+ private transient ClassLoader classLoader;
+
+ ImmutableSettings(Map<String, String> settings, ClassLoader classLoader) {
+ this.settings = ImmutableMap.copyOf(settings);
+ this.classLoader = classLoader;
+ }
+
+ @Override
+ public ClassLoader getClassLoader() {
+ return this.classLoader == null ? Classes.getDefaultClassLoader() : classLoader;
+ }
+
+ @Override
+ public ClassLoader getClassLoaderIfSet() {
+ return this.classLoader;
+ }
+
+ @Override
+ public ImmutableMap<String, String> getAsMap() {
+ return this.settings;
+ }
+
+ @Override
+ public Map<String, Object> getAsStructuredMap() {
+ Map<String, Object> map = Maps.newHashMapWithExpectedSize(2);
+ for (Map.Entry<String, String> entry : settings.entrySet()) {
+ processSetting(map, "", entry.getKey(), entry.getValue());
+ }
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ if (entry.getValue() instanceof Map) {
+ @SuppressWarnings("unchecked") Map<String, Object> valMap = (Map<String, Object>) entry.getValue();
+ entry.setValue(convertMapsToArrays(valMap));
+ }
+ }
+
+ return map;
+ }
+
+ private void processSetting(Map<String, Object> map, String prefix, String setting, String value) {
+ int prefixLength = setting.indexOf('.');
+ if (prefixLength == -1) {
+ @SuppressWarnings("unchecked") Map<String, Object> innerMap = (Map<String, Object>) map.get(prefix + setting);
+ if (innerMap != null) {
+ // It supposed to be a value, but we already have a map stored, need to convert this map to "." notation
+ for (Map.Entry<String, Object> entry : innerMap.entrySet()) {
+ map.put(prefix + setting + "." + entry.getKey(), entry.getValue());
+ }
+ }
+ map.put(prefix + setting, value);
+ } else {
+ String key = setting.substring(0, prefixLength);
+ String rest = setting.substring(prefixLength + 1);
+ Object existingValue = map.get(prefix + key);
+ if (existingValue == null) {
+ Map<String, Object> newMap = Maps.newHashMapWithExpectedSize(2);
+ processSetting(newMap, "", rest, value);
+ map.put(key, newMap);
+ } else {
+ if (existingValue instanceof Map) {
+ @SuppressWarnings("unchecked")
+ Map<String, Object> innerMap = (Map<String, Object>) existingValue;
+ processSetting(innerMap, "", rest, value);
+ map.put(key, innerMap);
+ } else {
+ // It supposed to be a map, but we already have a value stored, which is not a map
+ // fall back to "." notation
+ processSetting(map, prefix + key + ".", rest, value);
+ }
+ }
+ }
+ }
+
+ private Object convertMapsToArrays(Map<String, Object> map) {
+ if (map.isEmpty()) {
+ return map;
+ }
+ boolean isArray = true;
+ int maxIndex = -1;
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ if (isArray) {
+ try {
+ int index = Integer.parseInt(entry.getKey());
+ if (index >= 0) {
+ maxIndex = Math.max(maxIndex, index);
+ } else {
+ isArray = false;
+ }
+ } catch (NumberFormatException ex) {
+ isArray = false;
+ }
+ }
+ if (entry.getValue() instanceof Map) {
+ @SuppressWarnings("unchecked") Map<String, Object> valMap = (Map<String, Object>) entry.getValue();
+ entry.setValue(convertMapsToArrays(valMap));
+ }
+ }
+ if (isArray && (maxIndex + 1) == map.size()) {
+ ArrayList<Object> newValue = Lists.newArrayListWithExpectedSize(maxIndex + 1);
+ for (int i = 0; i <= maxIndex; i++) {
+ Object obj = map.get(Integer.toString(i));
+ if (obj == null) {
+ // Something went wrong. Different format?
+ // Bailout!
+ return map;
+ }
+ newValue.add(obj);
+ }
+ return newValue;
+ }
+ return map;
+ }
+
+
+ @Override
+ public Settings getComponentSettings(Class component) {
+ if (component.getName().startsWith("org.elasticsearch")) {
+ return getComponentSettings("org.elasticsearch", component);
+ }
+ // not starting with org.elasticsearch, just remove the first package part (probably org/net/com)
+ return getComponentSettings(component.getName().substring(0, component.getName().indexOf('.')), component);
+ }
+
+ @Override
+ public Settings getComponentSettings(String prefix, Class component) {
+ String type = component.getName();
+ if (!type.startsWith(prefix)) {
+ throw new SettingsException("Component [" + type + "] does not start with prefix [" + prefix + "]");
+ }
+ String settingPrefix = type.substring(prefix.length() + 1); // 1 for the '.'
+ settingPrefix = settingPrefix.substring(0, settingPrefix.length() - component.getSimpleName().length()); // remove the simple class name (keep the dot)
+ return getByPrefix(settingPrefix);
+ }
+
+ @Override
+ public Settings getByPrefix(String prefix) {
+ Builder builder = new Builder();
+ for (Map.Entry<String, String> entry : getAsMap().entrySet()) {
+ if (entry.getKey().startsWith(prefix)) {
+ if (entry.getKey().length() < prefix.length()) {
+ // ignore this one
+ continue;
+ }
+ builder.put(entry.getKey().substring(prefix.length()), entry.getValue());
+ }
+ }
+ builder.classLoader(classLoader);
+ return builder.build();
+ }
+
+ @Override
+ public String get(String setting) {
+ String retVal = settings.get(setting);
+ if (retVal != null) {
+ return retVal;
+ }
+ // try camel case version
+ return settings.get(toCamelCase(setting));
+ }
+
+ @Override
+ public String get(String[] settings) {
+ for (String setting : settings) {
+ String retVal = this.settings.get(setting);
+ if (retVal != null) {
+ return retVal;
+ }
+ retVal = this.settings.get(toCamelCase(setting));
+ if (retVal != null) {
+ return retVal;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public String get(String setting, String defaultValue) {
+ String retVal = get(setting);
+ return retVal == null ? defaultValue : retVal;
+ }
+
+ @Override
+ public String get(String[] settings, String defaultValue) {
+ String retVal = get(settings);
+ return retVal == null ? defaultValue : retVal;
+ }
+
+ @Override
+ public Float getAsFloat(String setting, Float defaultValue) {
+ String sValue = get(setting);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Float.parseFloat(sValue);
+ } catch (NumberFormatException e) {
+ throw new SettingsException("Failed to parse float setting [" + setting + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @Override
+ public Float getAsFloat(String[] settings, Float defaultValue) throws SettingsException {
+ String sValue = get(settings);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Float.parseFloat(sValue);
+ } catch (NumberFormatException e) {
+ throw new SettingsException("Failed to parse float setting [" + Arrays.toString(settings) + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @Override
+ public Double getAsDouble(String setting, Double defaultValue) {
+ String sValue = get(setting);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Double.parseDouble(sValue);
+ } catch (NumberFormatException e) {
+ throw new SettingsException("Failed to parse double setting [" + setting + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @Override
+ public Double getAsDouble(String[] settings, Double defaultValue) {
+ String sValue = get(settings);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Double.parseDouble(sValue);
+ } catch (NumberFormatException e) {
+ throw new SettingsException("Failed to parse double setting [" + Arrays.toString(settings) + "] with value [" + sValue + "]", e);
+ }
+ }
+
+
+ @Override
+ public Integer getAsInt(String setting, Integer defaultValue) {
+ String sValue = get(setting);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Integer.parseInt(sValue);
+ } catch (NumberFormatException e) {
+ throw new SettingsException("Failed to parse int setting [" + setting + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @Override
+ public Integer getAsInt(String[] settings, Integer defaultValue) {
+ String sValue = get(settings);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Integer.parseInt(sValue);
+ } catch (NumberFormatException e) {
+ throw new SettingsException("Failed to parse int setting [" + Arrays.toString(settings) + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @Override
+ public Long getAsLong(String setting, Long defaultValue) {
+ String sValue = get(setting);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Long.parseLong(sValue);
+ } catch (NumberFormatException e) {
+ throw new SettingsException("Failed to parse long setting [" + setting + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @Override
+ public Long getAsLong(String[] settings, Long defaultValue) {
+ String sValue = get(settings);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Long.parseLong(sValue);
+ } catch (NumberFormatException e) {
+ throw new SettingsException("Failed to parse long setting [" + Arrays.toString(settings) + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @Override
+ public Boolean getAsBoolean(String setting, Boolean defaultValue) {
+ return Booleans.parseBoolean(get(setting), defaultValue);
+ }
+
+ @Override
+ public Boolean getAsBoolean(String[] settings, Boolean defaultValue) {
+ return Booleans.parseBoolean(get(settings), defaultValue);
+ }
+
+ @Override
+ public TimeValue getAsTime(String setting, TimeValue defaultValue) {
+ return parseTimeValue(get(setting), defaultValue);
+ }
+
+ @Override
+ public TimeValue getAsTime(String[] settings, TimeValue defaultValue) {
+ return parseTimeValue(get(settings), defaultValue);
+ }
+
+ @Override
+ public ByteSizeValue getAsBytesSize(String setting, ByteSizeValue defaultValue) throws SettingsException {
+ return parseBytesSizeValue(get(setting), defaultValue);
+ }
+
+ @Override
+ public ByteSizeValue getAsBytesSize(String[] settings, ByteSizeValue defaultValue) throws SettingsException {
+ return parseBytesSizeValue(get(settings), defaultValue);
+ }
+
+ @Override
+ public ByteSizeValue getAsMemory(String setting, String defaultValue) throws SettingsException {
+ return MemorySizeValue.parseBytesSizeValueOrHeapRatio(get(setting, defaultValue));
+ }
+
+ @Override
+ public ByteSizeValue getAsMemory(String[] settings, String defaultValue) throws SettingsException {
+ return MemorySizeValue.parseBytesSizeValueOrHeapRatio(get(settings, defaultValue));
+ }
+
+ @Override
+ public SizeValue getAsSize(String setting, SizeValue defaultValue) throws SettingsException {
+ return parseSizeValue(get(setting), defaultValue);
+ }
+
+ @Override
+ public SizeValue getAsSize(String[] settings, SizeValue defaultValue) throws SettingsException {
+ return parseSizeValue(get(settings), defaultValue);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public <T> Class<? extends T> getAsClass(String setting, Class<? extends T> defaultClazz) throws NoClassSettingsException {
+ String sValue = get(setting);
+ if (sValue == null) {
+ return defaultClazz;
+ }
+ try {
+ return (Class<? extends T>) getClassLoader().loadClass(sValue);
+ } catch (ClassNotFoundException e) {
+ throw new NoClassSettingsException("Failed to load class setting [" + setting + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public <T> Class<? extends T> getAsClass(String setting, Class<? extends T> defaultClazz, String prefixPackage, String suffixClassName) throws NoClassSettingsException {
+ String sValue = get(setting);
+ if (sValue == null) {
+ return defaultClazz;
+ }
+ String fullClassName = sValue;
+ try {
+ return (Class<? extends T>) getClassLoader().loadClass(fullClassName);
+ } catch (ClassNotFoundException e) {
+ String prefixValue = prefixPackage;
+ int packageSeparator = sValue.lastIndexOf('.');
+ if (packageSeparator > 0) {
+ prefixValue = sValue.substring(0, packageSeparator + 1);
+ sValue = sValue.substring(packageSeparator + 1);
+ }
+ fullClassName = prefixValue + Strings.capitalize(toCamelCase(sValue)) + suffixClassName;
+ try {
+ return (Class<? extends T>) getClassLoader().loadClass(fullClassName);
+ } catch (ClassNotFoundException e1) {
+ return loadClass(prefixValue, sValue, suffixClassName, setting);
+ } catch (NoClassDefFoundError e1) {
+ return loadClass(prefixValue, sValue, suffixClassName, setting);
+ }
+ }
+ }
+
+ private <T> Class<? extends T> loadClass(String prefixValue, String sValue, String suffixClassName, String setting) {
+ String fullClassName = prefixValue + toCamelCase(sValue).toLowerCase(Locale.ROOT) + "." + Strings.capitalize(toCamelCase(sValue)) + suffixClassName;
+ try {
+ return (Class<? extends T>) getClassLoader().loadClass(fullClassName);
+ } catch (ClassNotFoundException e2) {
+ throw new NoClassSettingsException("Failed to load class setting [" + setting + "] with value [" + get(setting) + "]", e2);
+ }
+ }
+
+ @Override
+ public String[] getAsArray(String settingPrefix) throws SettingsException {
+ return getAsArray(settingPrefix, Strings.EMPTY_ARRAY, true);
+ }
+
+ @Override
+ public String[] getAsArray(String settingPrefix, String[] defaultArray) throws SettingsException {
+ return getAsArray(settingPrefix, defaultArray, true);
+ }
+
+ @Override
+ public String[] getAsArray(String settingPrefix, String[] defaultArray, Boolean commaDelimited) throws SettingsException {
+ List<String> result = Lists.newArrayList();
+
+ if (get(settingPrefix) != null) {
+ if (commaDelimited) {
+ String[] strings = Strings.splitStringByCommaToArray(get(settingPrefix));
+ if (strings.length > 0) {
+ for (String string : strings) {
+ result.add(string.trim());
+ }
+ }
+ } else {
+ result.add(get(settingPrefix).trim());
+ }
+ }
+
+ int counter = 0;
+ while (true) {
+ String value = get(settingPrefix + '.' + (counter++));
+ if (value == null) {
+ break;
+ }
+ result.add(value.trim());
+ }
+ if (result.isEmpty()) {
+ return defaultArray;
+ }
+ return result.toArray(new String[result.size()]);
+ }
+
+ @Override
+ public Map<String, Settings> getGroups(String settingPrefix) throws SettingsException {
+ return getGroups(settingPrefix, false);
+ }
+
+ @Override
+ public Map<String, Settings> getGroups(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException {
+ if (!Strings.hasLength(settingPrefix)) {
+ throw new ElasticsearchIllegalArgumentException("illegal setting prefix " + settingPrefix);
+ }
+ if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') {
+ settingPrefix = settingPrefix + ".";
+ }
+ // we don't really care that it might happen twice
+ Map<String, Map<String, String>> map = new LinkedHashMap<String, Map<String, String>>();
+ for (Object o : settings.keySet()) {
+ String setting = (String) o;
+ if (setting.startsWith(settingPrefix)) {
+ String nameValue = setting.substring(settingPrefix.length());
+ int dotIndex = nameValue.indexOf('.');
+ if (dotIndex == -1) {
+ if (ignoreNonGrouped) {
+ continue;
+ }
+ throw new SettingsException("Failed to get setting group for [" + settingPrefix + "] setting prefix and setting [" + setting + "] because of a missing '.'");
+ }
+ String name = nameValue.substring(0, dotIndex);
+ String value = nameValue.substring(dotIndex + 1);
+ Map<String, String> groupSettings = map.get(name);
+ if (groupSettings == null) {
+ groupSettings = new LinkedHashMap<String, String>();
+ map.put(name, groupSettings);
+ }
+ groupSettings.put(value, get(setting));
+ }
+ }
+ Map<String, Settings> retVal = new LinkedHashMap<String, Settings>();
+ for (Map.Entry<String, Map<String, String>> entry : map.entrySet()) {
+ retVal.put(entry.getKey(), new ImmutableSettings(Collections.unmodifiableMap(entry.getValue()), classLoader));
+ }
+ return Collections.unmodifiableMap(retVal);
+ }
+
+ @Override
+ public Version getAsVersion(String setting, Version defaultVersion) throws SettingsException {
+ String sValue = get(setting);
+ if (sValue == null) {
+ return defaultVersion;
+ }
+ try {
+ return Version.fromId(Integer.parseInt(sValue));
+ } catch (Exception e) {
+ throw new SettingsException("Failed to parse version setting [" + setting + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @Override
+ public String toDelimitedString(char delimiter) {
+ StringBuilder sb = new StringBuilder();
+ for (Map.Entry<String, String> entry : settings.entrySet()) {
+ sb.append(entry.getKey()).append("=").append(entry.getValue()).append(delimiter);
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ImmutableSettings that = (ImmutableSettings) o;
+
+ if (classLoader != null ? !classLoader.equals(that.classLoader) : that.classLoader != null) return false;
+ if (settings != null ? !settings.equals(that.settings) : that.settings != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = settings != null ? settings.hashCode() : 0;
+ result = 31 * result + (classLoader != null ? classLoader.hashCode() : 0);
+ return result;
+ }
+
+ public static Settings readSettingsFromStream(StreamInput in) throws IOException {
+ Builder builder = new Builder();
+ int numberOfSettings = in.readVInt();
+ for (int i = 0; i < numberOfSettings; i++) {
+ builder.put(in.readString(), in.readString());
+ }
+ return builder.build();
+ }
+
+ public static void writeSettingsToStream(Settings settings, StreamOutput out) throws IOException {
+ out.writeVInt(settings.getAsMap().size());
+ for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeString(entry.getValue());
+ }
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ /**
+ * Returns a builder to be used in order to build settings.
+ */
+ public static Builder settingsBuilder() {
+ return new Builder();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (!params.paramAsBoolean("flat_settings", false)) {
+ for (Map.Entry<String, Object> entry : getAsStructuredMap().entrySet()) {
+ builder.field(entry.getKey(), entry.getValue());
+ }
+ } else {
+ for (Map.Entry<String, String> entry : getAsMap().entrySet()) {
+ builder.field(entry.getKey(), entry.getValue(), XContentBuilder.FieldCaseConversion.NONE);
+ }
+ }
+ return builder;
+ }
+
+ /**
+ * A builder allowing to put different settings and then {@link #build()} an immutable
+ * settings implementation. Use {@link ImmutableSettings#settingsBuilder()} in order to
+ * construct it.
+ */
+ public static class Builder implements Settings.Builder {
+
+ public static final Settings EMPTY_SETTINGS = new Builder().build();
+
+ private final Map<String, String> map = new LinkedHashMap<String, String>();
+
+ private ClassLoader classLoader;
+
+ private Builder() {
+
+ }
+
+ public Map<String, String> internalMap() {
+ return this.map;
+ }
+
+ /**
+ * Removes the provided setting from the internal map holding the current list of settings.
+ */
+ public String remove(String key) {
+ return map.remove(key);
+ }
+
+ /**
+ * Returns a setting value based on the setting key.
+ */
+ public String get(String key) {
+ String retVal = map.get(key);
+ if (retVal != null) {
+ return retVal;
+ }
+ // try camel case version
+ return map.get(toCamelCase(key));
+ }
+
+ /**
+ * Puts tuples of key value pairs of settings. Simplified version instead of repeating calling
+ * put for each one.
+ */
+ public Builder put(Object... settings) {
+ if (settings.length == 1) {
+ // support cases where the actual type gets lost down the road...
+ if (settings[0] instanceof Map) {
+ //noinspection unchecked
+ return put((Map) settings[0]);
+ } else if (settings[0] instanceof Settings) {
+ return put((Settings) settings[0]);
+ }
+ }
+ if ((settings.length % 2) != 0) {
+ throw new ElasticsearchIllegalArgumentException("array settings of key + value order doesn't hold correct number of arguments (" + settings.length + ")");
+ }
+ for (int i = 0; i < settings.length; i++) {
+ put(settings[i++].toString(), settings[i].toString());
+ }
+ return this;
+ }
+
+ /**
+ * Sets a setting with the provided setting key and value.
+ *
+ * @param key The setting key
+ * @param value The setting value
+ * @return The builder
+ */
+ public Builder put(String key, String value) {
+ map.put(key, value);
+ return this;
+ }
+
+ /**
+ * Sets a setting with the provided setting key and class as value.
+ *
+ * @param key The setting key
+ * @param clazz The setting class value
+ * @return The builder
+ */
+ public Builder put(String key, Class clazz) {
+ map.put(key, clazz.getName());
+ return this;
+ }
+
+ /**
+ * Sets the setting with the provided setting key and the boolean value.
+ *
+ * @param setting The setting key
+ * @param value The boolean value
+ * @return The builder
+ */
+ public Builder put(String setting, boolean value) {
+ put(setting, String.valueOf(value));
+ return this;
+ }
+
+ /**
+ * Sets the setting with the provided setting key and the int value.
+ *
+ * @param setting The setting key
+ * @param value The int value
+ * @return The builder
+ */
+ public Builder put(String setting, int value) {
+ put(setting, String.valueOf(value));
+ return this;
+ }
+
+ public Builder put(String setting, Version version) {
+ put(setting, version.id);
+ return this;
+ }
+
+ /**
+ * Sets the setting with the provided setting key and the long value.
+ *
+ * @param setting The setting key
+ * @param value The long value
+ * @return The builder
+ */
+ public Builder put(String setting, long value) {
+ put(setting, String.valueOf(value));
+ return this;
+ }
+
+ /**
+ * Sets the setting with the provided setting key and the float value.
+ *
+ * @param setting The setting key
+ * @param value The float value
+ * @return The builder
+ */
+ public Builder put(String setting, float value) {
+ put(setting, String.valueOf(value));
+ return this;
+ }
+
+ /**
+ * Sets the setting with the provided setting key and the double value.
+ *
+ * @param setting The setting key
+ * @param value The double value
+ * @return The builder
+ */
+ public Builder put(String setting, double value) {
+ put(setting, String.valueOf(value));
+ return this;
+ }
+
+ /**
+ * Sets the setting with the provided setting key and the time value.
+ *
+ * @param setting The setting key
+ * @param value The time value
+ * @return The builder
+ */
+ public Builder put(String setting, long value, TimeUnit timeUnit) {
+ put(setting, timeUnit.toMillis(value));
+ return this;
+ }
+
+ /**
+ * Sets the setting with the provided setting key and the size value.
+ *
+ * @param setting The setting key
+ * @param value The size value
+ * @return The builder
+ */
+ public Builder put(String setting, long value, ByteSizeUnit sizeUnit) {
+ put(setting, sizeUnit.toBytes(value));
+ return this;
+ }
+
+ /**
+ * Sets the setting with the provided setting key and an array of values.
+ *
+ * @param setting The setting key
+ * @param values The values
+ * @return The builder
+ */
+ public Builder putArray(String setting, String... values) {
+ remove(setting);
+ int counter = 0;
+ while (true) {
+ String value = map.remove(setting + '.' + (counter++));
+ if (value == null) {
+ break;
+ }
+ }
+ for (int i = 0; i < values.length; i++) {
+ put(setting + "." + i, values[i]);
+ }
+ return this;
+ }
+
+ /**
+ * Sets the setting group.
+ */
+ public Builder put(String settingPrefix, String groupName, String[] settings, String[] values) throws SettingsException {
+ if (settings.length != values.length) {
+ throw new SettingsException("The settings length must match the value length");
+ }
+ for (int i = 0; i < settings.length; i++) {
+ if (values[i] == null) {
+ continue;
+ }
+ put(settingPrefix + "." + groupName + "." + settings[i], values[i]);
+ }
+ return this;
+ }
+
+ /**
+ * Sets all the provided settings.
+ */
+ public Builder put(Settings settings) {
+ map.putAll(settings.getAsMap());
+ classLoader = settings.getClassLoaderIfSet();
+ return this;
+ }
+
+ /**
+ * Sets all the provided settings.
+ */
+ public Builder put(Map<String, String> settings) {
+ map.putAll(settings);
+ return this;
+ }
+
+ /**
+ * Sets all the provided settings.
+ */
+ public Builder put(Properties properties) {
+ for (Map.Entry entry : properties.entrySet()) {
+ map.put((String) entry.getKey(), (String) entry.getValue());
+ }
+ return this;
+ }
+
+ public Builder loadFromDelimitedString(String value, char delimiter) {
+ String[] values = Strings.splitStringToArray(value, delimiter);
+ for (String s : values) {
+ int index = s.indexOf('=');
+ if (index == -1) {
+ throw new ElasticsearchIllegalArgumentException("value [" + s + "] for settings loaded with delimiter [" + delimiter + "] is malformed, missing =");
+ }
+ map.put(s.substring(0, index), s.substring(index + 1));
+ }
+ return this;
+ }
+
+ /**
+ * Loads settings from the actual string content that represents them using the
+ * {@link SettingsLoaderFactory#loaderFromSource(String)}.
+ */
+ public Builder loadFromSource(String source) {
+ SettingsLoader settingsLoader = SettingsLoaderFactory.loaderFromSource(source);
+ try {
+ Map<String, String> loadedSettings = settingsLoader.load(source);
+ put(loadedSettings);
+ } catch (Exception e) {
+ throw new SettingsException("Failed to load settings from [" + source + "]", e);
+ }
+ return this;
+ }
+
+ /**
+ * Loads settings from a url that represents them using the
+ * {@link SettingsLoaderFactory#loaderFromSource(String)}.
+ */
+ public Builder loadFromUrl(URL url) throws SettingsException {
+ try {
+ return loadFromStream(url.toExternalForm(), url.openStream());
+ } catch (IOException e) {
+ throw new SettingsException("Failed to open stream for url [" + url.toExternalForm() + "]", e);
+ }
+ }
+
+ /**
+ * Loads settings from a stream that represents them using the
+ * {@link SettingsLoaderFactory#loaderFromSource(String)}.
+ */
+ public Builder loadFromStream(String resourceName, InputStream is) throws SettingsException {
+ SettingsLoader settingsLoader = SettingsLoaderFactory.loaderFromResource(resourceName);
+ try {
+ Map<String, String> loadedSettings = settingsLoader.load(Streams.copyToString(new InputStreamReader(is, Charsets.UTF_8)));
+ put(loadedSettings);
+ } catch (Exception e) {
+ throw new SettingsException("Failed to load settings from [" + resourceName + "]", e);
+ }
+ return this;
+ }
+
+ /**
+ * Loads settings from classpath that represents them using the
+ * {@link SettingsLoaderFactory#loaderFromSource(String)}.
+ */
+ public Builder loadFromClasspath(String resourceName) throws SettingsException {
+ ClassLoader classLoader = this.classLoader;
+ if (classLoader == null) {
+ classLoader = Classes.getDefaultClassLoader();
+ }
+ InputStream is = classLoader.getResourceAsStream(resourceName);
+ if (is == null) {
+ return this;
+ }
+
+ return loadFromStream(resourceName, is);
+ }
+
+ /**
+ * Sets the class loader associated with the settings built.
+ */
+ public Builder classLoader(ClassLoader classLoader) {
+ this.classLoader = classLoader;
+ return this;
+ }
+
+ /**
+ * Puts all the properties with keys starting with the provided <tt>prefix</tt>.
+ *
+ * @param prefix The prefix to filter property key by
+ * @param properties The properties to put
+ * @return The builder
+ */
+ public Builder putProperties(String prefix, Properties properties) {
+ for (Object key1 : properties.keySet()) {
+ String key = (String) key1;
+ String value = properties.getProperty(key);
+ if (key.startsWith(prefix)) {
+ map.put(key.substring(prefix.length()), value);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Puts all the properties with keys starting with the provided <tt>prefix</tt>.
+ *
+ * @param prefix The prefix to filter property key by
+ * @param properties The properties to put
+ * @return The builder
+ */
+ public Builder putProperties(String prefix, Properties properties, String[] ignorePrefixes) {
+ for (Object key1 : properties.keySet()) {
+ String key = (String) key1;
+ String value = properties.getProperty(key);
+ if (key.startsWith(prefix)) {
+ boolean ignore = false;
+ for (String ignorePrefix : ignorePrefixes) {
+ if (key.startsWith(ignorePrefix)) {
+ ignore = true;
+ break;
+ }
+ }
+ if (!ignore) {
+ map.put(key.substring(prefix.length()), value);
+ }
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Runs across all the settings set on this builder and replaces <tt>${...}</tt> elements in the
+ * each setting value according to the following logic:
+ * <p/>
+ * <p>First, tries to resolve it against a System property ({@link System#getProperty(String)}), next,
+ * tries and resolve it against an environment variable ({@link System#getenv(String)}), and last, tries
+ * and replace it with another setting already set on this builder.
+ */
+ public Builder replacePropertyPlaceholders() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new PropertyPlaceholder.PlaceholderResolver() {
+ @Override
+ public String resolvePlaceholder(String placeholderName) {
+ if (placeholderName.startsWith("env.")) {
+ // explicit env var prefix
+ return System.getenv(placeholderName.substring("env.".length()));
+ }
+ String value = System.getProperty(placeholderName);
+ if (value != null) {
+ return value;
+ }
+ value = System.getenv(placeholderName);
+ if (value != null) {
+ return value;
+ }
+ return map.get(placeholderName);
+ }
+
+ @Override
+ public boolean shouldIgnoreMissing(String placeholderName) {
+ // if its an explicit env var, we are ok with not having a value for it and treat it as optional
+ if (placeholderName.startsWith("env.")) {
+ return true;
+ }
+ return false;
+ }
+ };
+ for (Map.Entry<String, String> entry : Maps.newHashMap(map).entrySet()) {
+ String value = propertyPlaceholder.replacePlaceholders(entry.getValue(), placeholderResolver);
+ // if the values exists and has length, we should maintain it in the map
+ // otherwise, the replace process resolved into removing it
+ if (Strings.hasLength(value)) {
+ map.put(entry.getKey(), value);
+ } else {
+ map.remove(entry.getKey());
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Builds a {@link Settings} (underlying uses {@link ImmutableSettings}) based on everything
+ * set on this builder.
+ */
+ public Settings build() {
+ return new ImmutableSettings(Collections.unmodifiableMap(map), classLoader);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/NoClassSettingsException.java b/src/main/java/org/elasticsearch/common/settings/NoClassSettingsException.java
new file mode 100644
index 0000000..dbeeaad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/NoClassSettingsException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+/**
+ * A specific type of {@link SettingsException} indicating failure to load a class
+ * based on a settings value.
+ *
+ *
+ */
+public class NoClassSettingsException extends SettingsException {
+
+ public NoClassSettingsException(String message) {
+ super(message);
+ }
+
+ public NoClassSettingsException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/Settings.java b/src/main/java/org/elasticsearch/common/settings/Settings.java
new file mode 100644
index 0000000..f5a0cc3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/Settings.java
@@ -0,0 +1,315 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+
+import java.util.Map;
+
+/**
+ * Immutable settings allowing to control the configuration.
+ * <p/>
+ * <p>Using {@link ImmutableSettings#settingsBuilder()} in order to create a builder
+ * which in turn can create an immutable implementation of settings.
+ *
+ * @see ImmutableSettings
+ */
+public interface Settings extends ToXContent {
+
+ /**
+ * Component settings for a specific component. Returns all the settings for the given class, where the
+ * FQN of the class is used, without the <tt>org.elasticsearch<tt> prefix. If there is no <tt>org.elasticsearch</tt>
+ * prefix, then the prefix used is the first part of the package name (<tt>org</tt> / <tt>com</tt> / ...)
+ */
+ Settings getComponentSettings(Class component);
+
+ /**
+ * Component settings for a specific component. Returns all the settings for the given class, where the
+ * FQN of the class is used, without provided prefix.
+ */
+ Settings getComponentSettings(String prefix, Class component);
+
+ /**
+ * A settings that are filtered (and key is removed) with the specified prefix.
+ */
+ Settings getByPrefix(String prefix);
+
+ /**
+ * The class loader associated with this settings, or {@link org.elasticsearch.common.Classes#getDefaultClassLoader()}
+ * if not set.
+ */
+ ClassLoader getClassLoader();
+
+ /**
+ * The class loader associated with this settings, but only if explicitly set, otherwise <tt>null</tt>.
+ */
+ @Nullable
+ ClassLoader getClassLoaderIfSet();
+
+ /**
+ * The settings as a flat {@link java.util.Map}.
+ */
+ ImmutableMap<String, String> getAsMap();
+
+ /**
+ * The settings as a structured {@link java.util.Map}.
+ */
+ Map<String, Object> getAsStructuredMap();
+
+ /**
+ * Returns the setting value associated with the setting key.
+ *
+ * @param setting The setting key
+ * @return The setting value, <tt>null</tt> if it does not exists.
+ */
+ String get(String setting);
+
+ /**
+ * Returns the setting value associated with the first setting key.
+ */
+ String get(String[] settings);
+
+ /**
+ * Returns the setting value associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ String get(String setting, String defaultValue);
+
+ /**
+ * Returns the setting value associated with the first setting key, if none exists,
+ * returns the default value provided.
+ */
+ String get(String[] settings, String defaultValue);
+
+ /**
+ * Returns group settings for the given setting prefix.
+ */
+ Map<String, Settings> getGroups(String settingPrefix) throws SettingsException;
+
+ /**
+ * Returns group settings for the given setting prefix.
+ */
+ Map<String, Settings> getGroups(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException;
+
+ /**
+ * Returns the setting value (as float) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ Float getAsFloat(String setting, Float defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as float) associated with teh first setting key, if none
+ * exists, returns the default value provided.
+ */
+ Float getAsFloat(String[] settings, Float defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as double) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ Double getAsDouble(String setting, Double defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as double) associated with teh first setting key, if none
+ * exists, returns the default value provided.
+ */
+ Double getAsDouble(String[] settings, Double defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as int) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ Integer getAsInt(String setting, Integer defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as int) associated with the first setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ Integer getAsInt(String[] settings, Integer defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as long) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ Long getAsLong(String setting, Long defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as long) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ Long getAsLong(String[] settings, Long defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as boolean) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ Boolean getAsBoolean(String setting, Boolean defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as boolean) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ Boolean getAsBoolean(String[] settings, Boolean defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as time) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ TimeValue getAsTime(String setting, TimeValue defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as time) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ TimeValue getAsTime(String[] settings, TimeValue defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as size) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ ByteSizeValue getAsBytesSize(String setting, ByteSizeValue defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as size) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ ByteSizeValue getAsBytesSize(String[] settings, ByteSizeValue defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as size) associated with the setting key. Provided values can either be
+ * absolute values (intepreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size
+ * (eg. 12%). If it does not exists, parses the default value provided.
+ */
+ ByteSizeValue getAsMemory(String setting, String defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as size) associated with the setting key. Provided values can either be
+ * absolute values (intepreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size
+ * (eg. 12%). If it does not exists, parses the default value provided.
+ */
+ ByteSizeValue getAsMemory(String[] setting, String defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as size) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ SizeValue getAsSize(String setting, SizeValue defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as size) associated with the setting key. If it does not exists,
+ * returns the default value provided.
+ */
+ SizeValue getAsSize(String[] settings, SizeValue defaultValue) throws SettingsException;
+
+ /**
+ * Returns the setting value (as a class) associated with the setting key. If it does not exists,
+ * returns the default class provided.
+ *
+ * @param setting The setting key
+ * @param defaultClazz The class to return if no value is associated with the setting
+ * @param <T> The type of the class
+ * @return The class setting value, or the default class provided is no value exists
+ * @throws NoClassSettingsException Failure to load a class
+ */
+ <T> Class<? extends T> getAsClass(String setting, Class<? extends T> defaultClazz) throws NoClassSettingsException;
+
+ /**
+ * Returns the setting value (as a class) associated with the setting key. If the value itself fails to
+ * represent a loadable class, the value will be appended to the <tt>prefixPackage</tt> and suffixed with the
+ * <tt>suffixClassName</tt> and it will try to be loaded with it.
+ *
+ * @param setting The setting key
+ * @param defaultClazz The class to return if no value is associated with the setting
+ * @param prefixPackage The prefix package to prefix the value with if failing to load the class as is
+ * @param suffixClassName The suffix class name to prefix the value with if failing to load the class as is
+ * @param <T> The type of the class
+ * @return The class represented by the setting value, or the default class provided if no value exists
+ * @throws NoClassSettingsException Failure to load the class
+ */
+ <T> Class<? extends T> getAsClass(String setting, Class<? extends T> defaultClazz, String prefixPackage, String suffixClassName) throws NoClassSettingsException;
+
+ /**
+ * The values associated with a setting prefix as an array. The settings array is in the format of:
+ * <tt>settingPrefix.[index]</tt>.
+ * <p/>
+ * <p>It will also automatically load a comma separated list under the settingPrefix and merge with
+ * the numbered format.
+ *
+ * @param settingPrefix The setting prefix to load the array by
+ * @param defaultArray The default array to use if no value is specified
+ * @param commaDelimited Whether to try to parse a string as a comma-delimited value
+ * @return The setting array values
+ * @throws SettingsException
+ */
+ String[] getAsArray(String settingPrefix, String[] defaultArray, Boolean commaDelimited) throws SettingsException;
+
+ /**
+ * The values associated with a setting prefix as an array. The settings array is in the format of:
+ * <tt>settingPrefix.[index]</tt>.
+ * <p/>
+ * <p>If commaDelimited is true, it will automatically load a comma separated list under the settingPrefix and merge with
+ * the numbered format.
+ *
+ * @param settingPrefix The setting prefix to load the array by
+ * @return The setting array values
+ * @throws SettingsException
+ */
+ String[] getAsArray(String settingPrefix, String[] defaultArray) throws SettingsException;
+
+ /**
+ * The values associated with a setting prefix as an array. The settings array is in the format of:
+ * <tt>settingPrefix.[index]</tt>.
+ * <p/>
+ * <p>It will also automatically load a comma separated list under the settingPrefix and merge with
+ * the numbered format.
+ *
+ * @param settingPrefix The setting prefix to load the array by
+ * @return The setting array values
+ * @throws SettingsException
+ */
+ String[] getAsArray(String settingPrefix) throws SettingsException;
+
+ /**
+ * Returns a parsed version.
+ */
+ Version getAsVersion(String setting, Version defaultVersion) throws SettingsException;
+
+ /**
+ * Returns the settings as delimited string.
+ */
+ String toDelimitedString(char delimiter);
+
+ /**
+ * A settings builder interface.
+ */
+ interface Builder {
+
+ /**
+ * Builds the settings.
+ */
+ Settings build();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/SettingsException.java b/src/main/java/org/elasticsearch/common/settings/SettingsException.java
new file mode 100644
index 0000000..9a4576f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/SettingsException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ * A generic failure to handle settings.
+ *
+ *
+ */
+public class SettingsException extends ElasticsearchException {
+
+ public SettingsException(String message) {
+ super(message);
+ }
+
+ public SettingsException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/SettingsFilter.java b/src/main/java/org/elasticsearch/common/settings/SettingsFilter.java
new file mode 100644
index 0000000..06bfb0d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/SettingsFilter.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ *
+ */
+public class SettingsFilter extends AbstractComponent {
+
+ public static interface Filter {
+
+ void filter(ImmutableSettings.Builder settings);
+ }
+
+ private final CopyOnWriteArrayList<Filter> filters = new CopyOnWriteArrayList<Filter>();
+
+ @Inject
+ public SettingsFilter(Settings settings) {
+ super(settings);
+ }
+
+ public void addFilter(Filter filter) {
+ filters.add(filter);
+ }
+
+ public void removeFilter(Filter filter) {
+ filters.remove(filter);
+ }
+
+ public Settings filterSettings(Settings settings) {
+ ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder().put(settings);
+ for (Filter filter : filters) {
+ filter.filter(builder);
+ }
+ return builder.build();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/src/main/java/org/elasticsearch/common/settings/SettingsModule.java
new file mode 100644
index 0000000..20e6576
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/SettingsModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ * A module that binds the provided settings to the {@link Settings} interface.
+ *
+ *
+ */
+public class SettingsModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public SettingsModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(Settings.class).toInstance(settings);
+ bind(SettingsFilter.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java b/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java
new file mode 100644
index 0000000..f6f7719
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.common.xcontent.XContentType;
+
+/**
+ * Settings loader that loads (parses) the settings in a json format by flattening them
+ * into a map.
+ */
+public class JsonSettingsLoader extends XContentSettingsLoader {
+
+ @Override
+ public XContentType contentType() {
+ return XContentType.JSON;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java b/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java
new file mode 100644
index 0000000..6aab32c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.io.FastStringReader;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Properties;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ * Settings loader that loads (parses) the settings in a properties format.
+ */
+public class PropertiesSettingsLoader implements SettingsLoader {
+
+ @Override
+ public Map<String, String> load(String source) throws IOException {
+ Properties props = new Properties();
+ FastStringReader reader = new FastStringReader(source);
+ try {
+ props.load(reader);
+ Map<String, String> result = newHashMap();
+ for (Map.Entry entry : props.entrySet()) {
+ result.put((String) entry.getKey(), (String) entry.getValue());
+ }
+ return result;
+ } finally {
+ IOUtils.closeWhileHandlingException(reader);
+ }
+ }
+
+ @Override
+ public Map<String, String> load(byte[] source) throws IOException {
+ Properties props = new Properties();
+ BytesStreamInput stream = new BytesStreamInput(source, false);
+ try {
+ props.load(stream);
+ Map<String, String> result = newHashMap();
+ for (Map.Entry entry : props.entrySet()) {
+ result.put((String) entry.getKey(), (String) entry.getValue());
+ }
+ return result;
+ } finally {
+ IOUtils.closeWhileHandlingException(stream);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoader.java b/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoader.java
new file mode 100644
index 0000000..86e905e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoader.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.common.Nullable;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ * Provides the ability to load settings (in the form of a simple Map) from
+ * the actual source content that represents them.
+ */
+public interface SettingsLoader {
+
+ static class Helper {
+
+ public static Map<String, String> loadNestedFromMap(@Nullable Map map) {
+ Map<String, String> settings = newHashMap();
+ if (map == null) {
+ return settings;
+ }
+ StringBuilder sb = new StringBuilder();
+ List<String> path = newArrayList();
+ serializeMap(settings, sb, path, map);
+ return settings;
+ }
+
+ private static void serializeMap(Map<String, String> settings, StringBuilder sb, List<String> path, Map<Object, Object> map) {
+ for (Map.Entry<Object, Object> entry : map.entrySet()) {
+ if (entry.getValue() instanceof Map) {
+ path.add((String) entry.getKey());
+ serializeMap(settings, sb, path, (Map<Object, Object>) entry.getValue());
+ path.remove(path.size() - 1);
+ } else if (entry.getValue() instanceof List) {
+ path.add((String) entry.getKey());
+ serializeList(settings, sb, path, (List) entry.getValue());
+ path.remove(path.size() - 1);
+ } else {
+ serializeValue(settings, sb, path, (String) entry.getKey(), entry.getValue());
+ }
+ }
+ }
+
+ private static void serializeList(Map<String, String> settings, StringBuilder sb, List<String> path, List list) {
+ int counter = 0;
+ for (Object listEle : list) {
+ if (listEle instanceof Map) {
+ path.add(Integer.toString(counter));
+ serializeMap(settings, sb, path, (Map<Object, Object>) listEle);
+ path.remove(path.size() - 1);
+ } else if (listEle instanceof List) {
+ path.add(Integer.toString(counter));
+ serializeList(settings, sb, path, (List) listEle);
+ path.remove(path.size() - 1);
+ } else {
+ serializeValue(settings, sb, path, Integer.toString(counter), listEle);
+ }
+ counter++;
+ }
+ }
+
+ private static void serializeValue(Map<String, String> settings, StringBuilder sb, List<String> path, String name, Object value) {
+ if (value == null) {
+ return;
+ }
+ sb.setLength(0);
+ for (String pathEle : path) {
+ sb.append(pathEle).append('.');
+ }
+ sb.append(name);
+ settings.put(sb.toString(), value.toString());
+ }
+ }
+
+
+ /**
+ * Loads (parses) the settings from a source string.
+ */
+ Map<String, String> load(String source) throws IOException;
+
+ /**
+ * Loads (parses) the settings from a source bytes.
+ */
+ Map<String, String> load(byte[] source) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java b/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java
new file mode 100644
index 0000000..e55cb10
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+/**
+ * A settings loader factory automatically trying to identify what type of
+ * {@link SettingsLoader} to use.
+ *
+ *
+ */
+public final class SettingsLoaderFactory {
+
+ private SettingsLoaderFactory() {
+
+ }
+
+ /**
+ * Returns a {@link SettingsLoader} based on the resource name.
+ */
+ public static SettingsLoader loaderFromResource(String resourceName) {
+ if (resourceName.endsWith(".json")) {
+ return new JsonSettingsLoader();
+ } else if (resourceName.endsWith(".yml") || resourceName.endsWith(".yaml")) {
+ return new YamlSettingsLoader();
+ } else if (resourceName.endsWith(".properties")) {
+ return new PropertiesSettingsLoader();
+ } else {
+ // lets default to the json one
+ return new JsonSettingsLoader();
+ }
+ }
+
+ /**
+ * Returns a {@link SettingsLoader} based on the actual settings source.
+ */
+ public static SettingsLoader loaderFromSource(String source) {
+ if (source.indexOf('{') != -1 && source.indexOf('}') != -1) {
+ return new JsonSettingsLoader();
+ }
+ if (source.indexOf(':') != -1) {
+ return new YamlSettingsLoader();
+ }
+ return new PropertiesSettingsLoader();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java b/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java
new file mode 100644
index 0000000..3f1d70f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ * Settings loader that loads (parses) the settings in a xcontent format by flattening them
+ * into a map.
+ */
+public abstract class XContentSettingsLoader implements SettingsLoader {
+
+ public abstract XContentType contentType();
+
+ @Override
+ public Map<String, String> load(String source) throws IOException {
+ XContentParser parser = XContentFactory.xContent(contentType()).createParser(source);
+ try {
+ return load(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public Map<String, String> load(byte[] source) throws IOException {
+ XContentParser parser = XContentFactory.xContent(contentType()).createParser(source);
+ try {
+ return load(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ public Map<String, String> load(XContentParser jp) throws IOException {
+ StringBuilder sb = new StringBuilder();
+ Map<String, String> settings = newHashMap();
+ List<String> path = newArrayList();
+ XContentParser.Token token = jp.nextToken();
+ if (token == null) {
+ return settings;
+ }
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchParseException("malformed, expected settings to start with 'object', instead was [" + token + "]");
+ }
+ serializeObject(settings, sb, path, jp, null);
+ return settings;
+ }
+
+ private void serializeObject(Map<String, String> settings, StringBuilder sb, List<String> path, XContentParser parser, String objFieldName) throws IOException {
+ if (objFieldName != null) {
+ path.add(objFieldName);
+ }
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.START_OBJECT) {
+ serializeObject(settings, sb, path, parser, currentFieldName);
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ serializeArray(settings, sb, path, parser, currentFieldName);
+ } else if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_NULL) {
+ // ignore this
+ } else {
+ serializeValue(settings, sb, path, parser, currentFieldName);
+
+ }
+ }
+
+ if (objFieldName != null) {
+ path.remove(path.size() - 1);
+ }
+ }
+
+ private void serializeArray(Map<String, String> settings, StringBuilder sb, List<String> path, XContentParser parser, String fieldName) throws IOException {
+ XContentParser.Token token;
+ int counter = 0;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.START_OBJECT) {
+ serializeObject(settings, sb, path, parser, fieldName + '.' + (counter++));
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ serializeArray(settings, sb, path, parser, fieldName + '.' + (counter++));
+ } else if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_NULL) {
+ // ignore
+ } else {
+ serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++));
+ }
+ }
+ }
+
+ private void serializeValue(Map<String, String> settings, StringBuilder sb, List<String> path, XContentParser parser, String fieldName) throws IOException {
+ sb.setLength(0);
+ for (String pathEle : path) {
+ sb.append(pathEle).append('.');
+ }
+ sb.append(fieldName);
+ settings.put(sb.toString(), parser.text());
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java b/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java
new file mode 100644
index 0000000..5c37a15
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Settings loader that loads (parses) the settings in a yaml format by flattening them
+ * into a map.
+ */
+public class YamlSettingsLoader extends XContentSettingsLoader {
+
+ @Override
+ public XContentType contentType() {
+ return XContentType.YAML;
+ }
+
+ @Override
+ public Map<String, String> load(String source) throws IOException {
+ // replace tabs with whitespace (yaml does not accept tabs, but many users might use it still...)
+ return super.load(source.replace("\t", " "));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/settings/loader/package-info.java b/src/main/java/org/elasticsearch/common/settings/loader/package-info.java
new file mode 100644
index 0000000..c367b49
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/loader/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Settings loader (parser) allowing to parse different "source" formats into
+ * a {@link org.elasticsearch.common.settings.Settings}.
+ */
+package org.elasticsearch.common.settings.loader; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/settings/package-info.java b/src/main/java/org/elasticsearch/common/settings/package-info.java
new file mode 100644
index 0000000..1d6c7d8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/settings/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Settings based utility.
+ */
+package org.elasticsearch.common.settings; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/text/BytesText.java b/src/main/java/org/elasticsearch/common/text/BytesText.java
new file mode 100644
index 0000000..3ee0774
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/text/BytesText.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.text;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.bytes.BytesReference;
+
+/**
+ * A {@link BytesReference} representation of the text, will always convert on the fly to a {@link String}.
+ */
+public class BytesText implements Text {
+
+ private BytesReference bytes;
+ private int hash;
+
+ public BytesText(BytesReference bytes) {
+ this.bytes = bytes;
+ }
+
+ @Override
+ public boolean hasBytes() {
+ return true;
+ }
+
+ @Override
+ public BytesReference bytes() {
+ return bytes;
+ }
+
+ @Override
+ public boolean hasString() {
+ return false;
+ }
+
+ @Override
+ public String string() {
+ // TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
+ if (!bytes.hasArray()) {
+ bytes = bytes.toBytesArray();
+ }
+ return new String(bytes.array(), bytes.arrayOffset(), bytes.length(), Charsets.UTF_8);
+ }
+
+ @Override
+ public String toString() {
+ return string();
+ }
+
+ @Override
+ public int hashCode() {
+ if (hash == 0) {
+ hash = bytes.hashCode();
+ }
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return bytes().equals(((Text) obj).bytes());
+ }
+
+ @Override
+ public int compareTo(Text text) {
+ return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java b/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java
new file mode 100644
index 0000000..1ec8195
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.text;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+
+/**
+ * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if
+ * the other is requests, caches the other one in a local reference so no additional conversion will be needed.
+ */
+public class StringAndBytesText implements Text {
+
+ public static final Text[] EMPTY_ARRAY = new Text[0];
+
+ public static Text[] convertFromStringArray(String[] strings) {
+ if (strings.length == 0) {
+ return EMPTY_ARRAY;
+ }
+ Text[] texts = new Text[strings.length];
+ for (int i = 0; i < strings.length; i++) {
+ texts[i] = new StringAndBytesText(strings[i]);
+ }
+ return texts;
+ }
+
+ private BytesReference bytes;
+ private String text;
+ private int hash;
+
+ public StringAndBytesText(BytesReference bytes) {
+ this.bytes = bytes;
+ }
+
+ public StringAndBytesText(String text) {
+ this.text = text;
+ }
+
+ @Override
+ public boolean hasBytes() {
+ return bytes != null;
+ }
+
+ @Override
+ public BytesReference bytes() {
+ if (bytes == null) {
+ bytes = new BytesArray(text.getBytes(Charsets.UTF_8));
+ }
+ return bytes;
+ }
+
+ @Override
+ public boolean hasString() {
+ return text != null;
+ }
+
+ @Override
+ public String string() {
+ // TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
+ if (text == null) {
+ if (!bytes.hasArray()) {
+ bytes = bytes.toBytesArray();
+ }
+ text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), Charsets.UTF_8);
+ }
+ return text;
+ }
+
+ @Override
+ public String toString() {
+ return string();
+ }
+
+ @Override
+ public int hashCode() {
+ if (hash == 0) {
+ hash = bytes().hashCode();
+ }
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return bytes().equals(((Text) obj).bytes());
+ }
+
+ @Override
+ public int compareTo(Text text) {
+ return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/text/StringText.java b/src/main/java/org/elasticsearch/common/text/StringText.java
new file mode 100644
index 0000000..646e344
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/text/StringText.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.text;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+
+/**
+ * A {@link String} only representation of the text. Will always convert to bytes on the fly.
+ */
+public class StringText implements Text {
+
+ public static final Text[] EMPTY_ARRAY = new Text[0];
+
+ public static Text[] convertFromStringArray(String[] strings) {
+ if (strings.length == 0) {
+ return EMPTY_ARRAY;
+ }
+ Text[] texts = new Text[strings.length];
+ for (int i = 0; i < strings.length; i++) {
+ texts[i] = new StringText(strings[i]);
+ }
+ return texts;
+ }
+
+ private final String text;
+ private int hash;
+
+ public StringText(String text) {
+ this.text = text;
+ }
+
+ @Override
+ public boolean hasBytes() {
+ return false;
+ }
+
+ @Override
+ public BytesReference bytes() {
+ return new BytesArray(text.getBytes(Charsets.UTF_8));
+ }
+
+ @Override
+ public boolean hasString() {
+ return true;
+ }
+
+ @Override
+ public String string() {
+ return text;
+ }
+
+ @Override
+ public String toString() {
+ return string();
+ }
+
+ @Override
+ public int hashCode() {
+ // we use bytes here so we can be consistent with other text implementations
+ if (hash == 0) {
+ hash = bytes().hashCode();
+ }
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ // we use bytes here so we can be consistent with other text implementations
+ return bytes().equals(((Text) obj).bytes());
+ }
+
+ @Override
+ public int compareTo(Text text) {
+ return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/text/Text.java b/src/main/java/org/elasticsearch/common/text/Text.java
new file mode 100644
index 0000000..1f02dd0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/text/Text.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.text;
+
+import org.elasticsearch.common.bytes.BytesReference;
+
+import java.io.Serializable;
+
+/**
+ * Text represents a (usually) long text data. We use this abstraction instead of {@link String}
+ * so we can represent it in a more optimized manner in memory as well as serializing it over the
+ * network as well as converting it to json format.
+ */
+public interface Text extends Comparable<Text>, Serializable {
+
+ /**
+ * Are bytes available without the need to be converted into bytes when calling {@link #bytes()}.
+ */
+ boolean hasBytes();
+
+ /**
+ * The UTF8 bytes representing the the text, might be converted on the fly, see {@link #hasBytes()}
+ */
+ BytesReference bytes();
+
+ /**
+ * Is there a {@link String} representation of the text. If not, then it {@link #hasBytes()}.
+ */
+ boolean hasString();
+
+ /**
+ * Returns the string representation of the text, might be converted to a string on the fly.
+ */
+ String string();
+
+ /**
+ * Returns the string representation of the text, might be converted to a string on the fly.
+ */
+ String toString();
+}
diff --git a/src/main/java/org/elasticsearch/common/text/UTF8SortedAsUnicodeComparator.java b/src/main/java/org/elasticsearch/common/text/UTF8SortedAsUnicodeComparator.java
new file mode 100644
index 0000000..3541c20
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/text/UTF8SortedAsUnicodeComparator.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.text;
+
+import org.elasticsearch.common.bytes.BytesReference;
+
+import java.util.Comparator;
+
+// LUCENE 4 UPGRADE: Is this the right way of comparing bytesreferences inside Text instances?
+// Copied from Lucene's BytesRef comparator
+public class UTF8SortedAsUnicodeComparator implements Comparator<BytesReference> {
+
+ public final static Comparator<BytesReference> utf8SortedAsUnicodeSortOrder = new UTF8SortedAsUnicodeComparator();
+
+ // Only singleton
+ private UTF8SortedAsUnicodeComparator() {
+ }
+
+ public int compare(BytesReference a, BytesReference b) {
+ if (a.hasArray() && b.hasArray()) {
+ final byte[] aBytes = a.array();
+ int aUpto = a.arrayOffset();
+ final byte[] bBytes = b.array();
+ int bUpto = b.arrayOffset();
+
+ final int aStop = aUpto + Math.min(a.length(), b.length());
+ while (aUpto < aStop) {
+ int aByte = aBytes[aUpto++] & 0xff;
+ int bByte = bBytes[bUpto++] & 0xff;
+
+ int diff = aByte - bByte;
+ if (diff != 0) {
+ return diff;
+ }
+ }
+
+ // One is a prefix of the other, or, they are equal:
+ return a.length() - b.length();
+ } else {
+ final byte[] aBytes = a.toBytes();
+ int aUpto = 0;
+ final byte[] bBytes = b.toBytes();
+ int bUpto = 0;
+
+ final int aStop = aUpto + Math.min(a.length(), b.length());
+ while (aUpto < aStop) {
+ int aByte = aBytes[aUpto++] & 0xff;
+ int bByte = bBytes[bUpto++] & 0xff;
+
+ int diff = aByte - bByte;
+ if (diff != 0) {
+ return diff;
+ }
+ }
+
+ // One is a prefix of the other, or, they are equal:
+ return a.length() - b.length();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java b/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java
new file mode 100644
index 0000000..f3bbeb0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+/**
+ * A bounded transport address is a tuple of two {@link TransportAddress}, one that represents
+ * the address the transport is bounded on, the the published one represents the one clients should
+ * communicate on.
+ *
+ *
+ */
+public class BoundTransportAddress implements Streamable {
+
+ private TransportAddress boundAddress;
+
+ private TransportAddress publishAddress;
+
+ BoundTransportAddress() {
+ }
+
+ public BoundTransportAddress(TransportAddress boundAddress, TransportAddress publishAddress) {
+ this.boundAddress = boundAddress;
+ this.publishAddress = publishAddress;
+ }
+
+ public TransportAddress boundAddress() {
+ return boundAddress;
+ }
+
+ public TransportAddress publishAddress() {
+ return publishAddress;
+ }
+
+ public static BoundTransportAddress readBoundTransportAddress(StreamInput in) throws IOException {
+ BoundTransportAddress addr = new BoundTransportAddress();
+ addr.readFrom(in);
+ return addr;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ boundAddress = TransportAddressSerializers.addressFromStream(in);
+ publishAddress = TransportAddressSerializers.addressFromStream(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ TransportAddressSerializers.addressToStream(out, boundAddress);
+ TransportAddressSerializers.addressToStream(out, publishAddress);
+ }
+
+ @Override
+ public String toString() {
+ return "bound_address {" + boundAddress + "}, publish_address {" + publishAddress + "}";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java b/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java
new file mode 100644
index 0000000..0f0a21d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class DummyTransportAddress implements TransportAddress {
+
+ public static final DummyTransportAddress INSTANCE = new DummyTransportAddress();
+
+ DummyTransportAddress() {
+ }
+
+ @Override
+ public short uniqueAddressTypeId() {
+ return 0;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java b/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java
new file mode 100644
index 0000000..202a93a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+
+/**
+ * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}).
+ */
+public class InetSocketTransportAddress implements TransportAddress {
+
+ private static boolean resolveAddress = false;
+
+ public static void setResolveAddress(boolean resolveAddress) {
+ InetSocketTransportAddress.resolveAddress = resolveAddress;
+ }
+
+ private InetSocketAddress address;
+
+ InetSocketTransportAddress() {
+
+ }
+
+ public InetSocketTransportAddress(String hostname, int port) {
+ this(new InetSocketAddress(hostname, port));
+ }
+
+ public InetSocketTransportAddress(InetAddress address, int port) {
+ this(new InetSocketAddress(address, port));
+ }
+
+ public InetSocketTransportAddress(InetSocketAddress address) {
+ this.address = address;
+ }
+
+ public static InetSocketTransportAddress readInetSocketTransportAddress(StreamInput in) throws IOException {
+ InetSocketTransportAddress address = new InetSocketTransportAddress();
+ address.readFrom(in);
+ return address;
+ }
+
+ @Override
+ public short uniqueAddressTypeId() {
+ return 1;
+ }
+
+ public InetSocketAddress address() {
+ return this.address;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readByte() == 0) {
+ int len = in.readByte();
+ byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
+ in.readFully(a);
+ InetAddress inetAddress;
+ if (len == 16) {
+ int scope_id = in.readInt();
+ inetAddress = Inet6Address.getByAddress(null, a, scope_id);
+ } else {
+ inetAddress = InetAddress.getByAddress(a);
+ }
+ int port = in.readInt();
+ this.address = new InetSocketAddress(inetAddress, port);
+ } else {
+ this.address = new InetSocketAddress(in.readString(), in.readInt());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (!resolveAddress && address.getAddress() != null) {
+ out.writeByte((byte) 0);
+ byte[] bytes = address().getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
+ out.writeByte((byte) bytes.length); // 1 byte
+ out.write(bytes, 0, bytes.length);
+ if (address().getAddress() instanceof Inet6Address)
+ out.writeInt(((Inet6Address) address.getAddress()).getScopeId());
+ } else {
+ out.writeByte((byte) 1);
+ out.writeString(address.getHostName());
+ }
+ out.writeInt(address.getPort());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ InetSocketTransportAddress address1 = (InetSocketTransportAddress) o;
+ return address.equals(address1.address);
+ }
+
+ @Override
+ public int hashCode() {
+ return address != null ? address.hashCode() : 0;
+ }
+
+ @Override
+ public String toString() {
+ return "inet[" + address + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/transport/LocalTransportAddress.java b/src/main/java/org/elasticsearch/common/transport/LocalTransportAddress.java
new file mode 100644
index 0000000..0f7fbcd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/transport/LocalTransportAddress.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class LocalTransportAddress implements TransportAddress {
+
+ private String id;
+
+ LocalTransportAddress() {
+ }
+
+ public LocalTransportAddress(String id) {
+ this.id = id;
+ }
+
+ public String id() {
+ return this.id;
+ }
+
+ @Override
+ public short uniqueAddressTypeId() {
+ return 2;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ id = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(id);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ LocalTransportAddress that = (LocalTransportAddress) o;
+
+ if (id != null ? !id.equals(that.id) : that.id != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return id != null ? id.hashCode() : 0;
+ }
+
+ @Override
+ public String toString() {
+ return "local[" + id + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/transport/NetworkExceptionHelper.java b/src/main/java/org/elasticsearch/common/transport/NetworkExceptionHelper.java
new file mode 100644
index 0000000..48751b9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/transport/NetworkExceptionHelper.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.transport;
+
+import java.net.ConnectException;
+import java.nio.channels.ClosedChannelException;
+
+/**
+ *
+ */
+public class NetworkExceptionHelper {
+
+ public static boolean isConnectException(Throwable e) {
+ if (e instanceof ConnectException) {
+ return true;
+ }
+ return false;
+ }
+
+ public static boolean isCloseConnectionException(Throwable e) {
+ if (e instanceof ClosedChannelException) {
+ return true;
+ }
+ if (e.getMessage() != null) {
+ // UGLY!, this exception messages seems to represent closed connection
+ if (e.getMessage().contains("Connection reset by peer")) {
+ return true;
+ }
+ if (e.getMessage().contains("connection was aborted")) {
+ return true;
+ }
+ if (e.getMessage().contains("forcibly closed")) {
+ return true;
+ }
+ if (e.getMessage().contains("Broken pipe")) {
+ return true;
+ }
+ if (e.getMessage().contains("Connection timed out")) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/transport/PortsRange.java b/src/main/java/org/elasticsearch/common/transport/PortsRange.java
new file mode 100644
index 0000000..e1e4571
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/transport/PortsRange.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.transport;
+
+
+import com.carrotsearch.hppc.IntArrayList;
+
+import java.util.StringTokenizer;
+
+/**
+ *
+ */
+public class PortsRange {
+
+ private final String portRange;
+
+ public PortsRange(String portRange) {
+ this.portRange = portRange;
+ }
+
+ public int[] ports() throws NumberFormatException {
+ final IntArrayList ports = new IntArrayList();
+ iterate(new PortCallback() {
+ @Override
+ public boolean onPortNumber(int portNumber) {
+ ports.add(portNumber);
+ return false;
+ }
+ });
+ return ports.toArray();
+ }
+
+ public boolean iterate(PortCallback callback) throws NumberFormatException {
+ StringTokenizer st = new StringTokenizer(portRange, ",");
+ boolean success = false;
+ while (st.hasMoreTokens() && !success) {
+ String portToken = st.nextToken().trim();
+ int index = portToken.indexOf('-');
+ if (index == -1) {
+ int portNumber = Integer.parseInt(portToken.trim());
+ success = callback.onPortNumber(portNumber);
+ if (success) {
+ break;
+ }
+ } else {
+ int startPort = Integer.parseInt(portToken.substring(0, index).trim());
+ int endPort = Integer.parseInt(portToken.substring(index + 1).trim());
+ if (endPort < startPort) {
+ throw new IllegalArgumentException("Start port [" + startPort + "] must be greater than end port [" + endPort + "]");
+ }
+ for (int i = startPort; i <= endPort; i++) {
+ success = callback.onPortNumber(i);
+ if (success) {
+ break;
+ }
+ }
+ }
+ }
+ return success;
+ }
+
+ public static interface PortCallback {
+ boolean onPortNumber(int portNumber);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/transport/TransportAddress.java b/src/main/java/org/elasticsearch/common/transport/TransportAddress.java
new file mode 100644
index 0000000..d70a201
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/transport/TransportAddress.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.transport;
+
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.Serializable;
+
+/**
+ *
+ */
+public interface TransportAddress extends Streamable, Serializable {
+
+ short uniqueAddressTypeId();
+}
diff --git a/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java b/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java
new file mode 100644
index 0000000..242000b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.transport;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+
+import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
+
+/**
+ * A global registry of all different types of {@link org.elasticsearch.common.transport.TransportAddress} allowing
+ * to perform serialization of them.
+ * <p/>
+ * <p>By default, adds {@link org.elasticsearch.common.transport.InetSocketTransportAddress}.
+ *
+ *
+ */
+public abstract class TransportAddressSerializers {
+
+ private static final ESLogger logger = Loggers.getLogger(TransportAddressSerializers.class);
+
+ private static ImmutableMap<Short, Constructor<? extends TransportAddress>> addressConstructors = ImmutableMap.of();
+
+ static {
+ try {
+ addAddressType(DummyTransportAddress.INSTANCE);
+ addAddressType(new InetSocketTransportAddress());
+ addAddressType(new LocalTransportAddress());
+ } catch (Exception e) {
+ logger.warn("Failed to add InetSocketTransportAddress", e);
+ }
+ }
+
+ public static synchronized void addAddressType(TransportAddress address) throws Exception {
+ if (addressConstructors.containsKey(address.uniqueAddressTypeId())) {
+ throw new ElasticsearchIllegalStateException("Address [" + address.uniqueAddressTypeId() + "] already bound");
+ }
+ Constructor<? extends TransportAddress> constructor = address.getClass().getDeclaredConstructor();
+ constructor.setAccessible(true);
+ addressConstructors = newMapBuilder(addressConstructors).put(address.uniqueAddressTypeId(), constructor).immutableMap();
+ }
+
+ public static TransportAddress addressFromStream(StreamInput input) throws IOException {
+ short addressUniqueId = input.readShort();
+ Constructor<? extends TransportAddress> constructor = addressConstructors.get(addressUniqueId);
+ if (constructor == null) {
+ throw new IOException("No transport address mapped to [" + addressUniqueId + "]");
+ }
+ TransportAddress address;
+ try {
+ address = constructor.newInstance();
+ } catch (Exception e) {
+ throw new IOException("Failed to create class with constructor [" + constructor + "]", e);
+ }
+ address.readFrom(input);
+ return address;
+ }
+
+ public static void addressToStream(StreamOutput out, TransportAddress address) throws IOException {
+ out.writeShort(address.uniqueAddressTypeId());
+ address.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java b/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java
new file mode 100644
index 0000000..4a15995
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+/**
+ * A <tt>SizeUnit</tt> represents size at a given unit of
+ * granularity and provides utility methods to convert across units.
+ * A <tt>SizeUnit</tt> does not maintain size information, but only
+ * helps organize and use size representations that may be maintained
+ * separately across various contexts.
+ *
+ *
+ */
+public enum ByteSizeUnit {
+ BYTES {
+ @Override
+ public long toBytes(long size) {
+ return size;
+ }
+
+ @Override
+ public long toKB(long size) {
+ return size / (C1 / C0);
+ }
+
+ @Override
+ public long toMB(long size) {
+ return size / (C2 / C0);
+ }
+
+ @Override
+ public long toGB(long size) {
+ return size / (C3 / C0);
+ }
+
+ @Override
+ public long toTB(long size) {
+ return size / (C4 / C0);
+ }
+
+ @Override
+ public long toPB(long size) {
+ return size / (C5 / C0);
+ }
+ },
+ KB {
+ @Override
+ public long toBytes(long size) {
+ return x(size, C1 / C0, MAX / (C1 / C0));
+ }
+
+ @Override
+ public long toKB(long size) {
+ return size;
+ }
+
+ @Override
+ public long toMB(long size) {
+ return size / (C2 / C1);
+ }
+
+ @Override
+ public long toGB(long size) {
+ return size / (C3 / C1);
+ }
+
+ @Override
+ public long toTB(long size) {
+ return size / (C4 / C1);
+ }
+
+ @Override
+ public long toPB(long size) {
+ return size / (C5 / C1);
+ }
+ },
+ MB {
+ @Override
+ public long toBytes(long size) {
+ return x(size, C2 / C0, MAX / (C2 / C0));
+ }
+
+ @Override
+ public long toKB(long size) {
+ return x(size, C2 / C1, MAX / (C2 / C1));
+ }
+
+ @Override
+ public long toMB(long size) {
+ return size;
+ }
+
+ @Override
+ public long toGB(long size) {
+ return size / (C3 / C2);
+ }
+
+ @Override
+ public long toTB(long size) {
+ return size / (C4 / C2);
+ }
+
+ @Override
+ public long toPB(long size) {
+ return size / (C5 / C2);
+ }
+ },
+ GB {
+ @Override
+ public long toBytes(long size) {
+ return x(size, C3 / C0, MAX / (C3 / C0));
+ }
+
+ @Override
+ public long toKB(long size) {
+ return x(size, C3 / C1, MAX / (C3 / C1));
+ }
+
+ @Override
+ public long toMB(long size) {
+ return x(size, C3 / C2, MAX / (C3 / C2));
+ }
+
+ @Override
+ public long toGB(long size) {
+ return size;
+ }
+
+ @Override
+ public long toTB(long size) {
+ return size / (C4 / C3);
+ }
+
+ @Override
+ public long toPB(long size) {
+ return size / (C5 / C3);
+ }
+ },
+ TB {
+ @Override
+ public long toBytes(long size) {
+ return x(size, C4 / C0, MAX / (C4 / C0));
+ }
+
+ @Override
+ public long toKB(long size) {
+ return x(size, C4 / C1, MAX / (C4 / C1));
+ }
+
+ @Override
+ public long toMB(long size) {
+ return x(size, C4 / C2, MAX / (C4 / C2));
+ }
+
+ @Override
+ public long toGB(long size) {
+ return x(size, C4 / C3, MAX / (C4 / C3));
+ }
+
+ @Override
+ public long toTB(long size) {
+ return size;
+ }
+
+ @Override
+ public long toPB(long size) {
+ return size / (C5 / C4);
+ }
+ },
+ PB {
+ @Override
+ public long toBytes(long size) {
+ return x(size, C5 / C0, MAX / (C5 / C0));
+ }
+
+ @Override
+ public long toKB(long size) {
+ return x(size, C5 / C1, MAX / (C5 / C1));
+ }
+
+ @Override
+ public long toMB(long size) {
+ return x(size, C5 / C2, MAX / (C5 / C2));
+ }
+
+ @Override
+ public long toGB(long size) {
+ return x(size, C5 / C3, MAX / (C5 / C3));
+ }
+
+ @Override
+ public long toTB(long size) {
+ return x(size, C5 / C4, MAX / (C5 / C4));
+ }
+
+ @Override
+ public long toPB(long size) {
+ return size;
+ }
+ };
+
+ static final long C0 = 1L;
+ static final long C1 = C0 * 1024L;
+ static final long C2 = C1 * 1024L;
+ static final long C3 = C2 * 1024L;
+ static final long C4 = C3 * 1024L;
+ static final long C5 = C4 * 1024L;
+
+ static final long MAX = Long.MAX_VALUE;
+
+ /**
+ * Scale d by m, checking for overflow.
+ * This has a short name to make above code more readable.
+ */
+ static long x(long d, long m, long over) {
+ if (d > over) return Long.MAX_VALUE;
+ if (d < -over) return Long.MIN_VALUE;
+ return d * m;
+ }
+
+
+ public abstract long toBytes(long size);
+
+ public abstract long toKB(long size);
+
+ public abstract long toMB(long size);
+
+ public abstract long toGB(long size);
+
+ public abstract long toTB(long size);
+
+ public abstract long toPB(long size);
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java
new file mode 100644
index 0000000..539b25d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Locale;
+
+/**
+ *
+ */
+public class ByteSizeValue implements Serializable, Streamable {
+
+ private long size;
+
+ private ByteSizeUnit sizeUnit;
+
+ private ByteSizeValue() {
+
+ }
+
+ public ByteSizeValue(long bytes) {
+ this(bytes, ByteSizeUnit.BYTES);
+ }
+
+ public ByteSizeValue(long size, ByteSizeUnit sizeUnit) {
+ this.size = size;
+ this.sizeUnit = sizeUnit;
+ }
+
+ public int bytesAsInt() throws ElasticsearchIllegalArgumentException {
+ long bytes = bytes();
+ if (bytes > Integer.MAX_VALUE) {
+ throw new ElasticsearchIllegalArgumentException("size [" + toString() + "] is bigger than max int");
+ }
+ return (int) bytes;
+ }
+
+ public long bytes() {
+ return sizeUnit.toBytes(size);
+ }
+
+ public long getBytes() {
+ return bytes();
+ }
+
+ public long kb() {
+ return sizeUnit.toKB(size);
+ }
+
+ public long getKb() {
+ return kb();
+ }
+
+ public long mb() {
+ return sizeUnit.toMB(size);
+ }
+
+ public long getMb() {
+ return mb();
+ }
+
+ public long gb() {
+ return sizeUnit.toGB(size);
+ }
+
+ public long getGb() {
+ return gb();
+ }
+
+ public long tb() {
+ return sizeUnit.toTB(size);
+ }
+
+ public long getTb() {
+ return tb();
+ }
+
+ public long pb() {
+ return sizeUnit.toPB(size);
+ }
+
+ public long getPb() {
+ return pb();
+ }
+
+ public double kbFrac() {
+ return ((double) bytes()) / ByteSizeUnit.C1;
+ }
+
+ public double getKbFrac() {
+ return kbFrac();
+ }
+
+ public double mbFrac() {
+ return ((double) bytes()) / ByteSizeUnit.C2;
+ }
+
+ public double getMbFrac() {
+ return mbFrac();
+ }
+
+ public double gbFrac() {
+ return ((double) bytes()) / ByteSizeUnit.C3;
+ }
+
+ public double getGbFrac() {
+ return gbFrac();
+ }
+
+ public double tbFrac() {
+ return ((double) bytes()) / ByteSizeUnit.C4;
+ }
+
+ public double getTbFrac() {
+ return tbFrac();
+ }
+
+ public double pbFrac() {
+ return ((double) bytes()) / ByteSizeUnit.C5;
+ }
+
+ public double getPbFrac() {
+ return pbFrac();
+ }
+
+ @Override
+ public String toString() {
+ long bytes = bytes();
+ double value = bytes;
+ String suffix = "b";
+ if (bytes >= ByteSizeUnit.C5) {
+ value = pbFrac();
+ suffix = "pb";
+ } else if (bytes >= ByteSizeUnit.C4) {
+ value = tbFrac();
+ suffix = "tb";
+ } else if (bytes >= ByteSizeUnit.C3) {
+ value = gbFrac();
+ suffix = "gb";
+ } else if (bytes >= ByteSizeUnit.C2) {
+ value = mbFrac();
+ suffix = "mb";
+ } else if (bytes >= ByteSizeUnit.C1) {
+ value = kbFrac();
+ suffix = "kb";
+ }
+ return Strings.format1Decimals(value, suffix);
+ }
+
+ public static ByteSizeValue parseBytesSizeValue(String sValue) throws ElasticsearchParseException {
+ return parseBytesSizeValue(sValue, null);
+ }
+
+ public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue defaultValue) throws ElasticsearchParseException {
+ if (sValue == null) {
+ return defaultValue;
+ }
+ long bytes;
+ try {
+ String lastTwoChars = sValue.substring(sValue.length() - Math.min(2, sValue.length())).toLowerCase(Locale.ROOT);
+ if (lastTwoChars.endsWith("k")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * ByteSizeUnit.C1);
+ } else if (lastTwoChars.endsWith("kb")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 2)) * ByteSizeUnit.C1);
+ } else if (lastTwoChars.endsWith("m")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * ByteSizeUnit.C2);
+ } else if (lastTwoChars.endsWith("mb")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 2)) * ByteSizeUnit.C2);
+ } else if (lastTwoChars.endsWith("g")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * ByteSizeUnit.C3);
+ } else if (lastTwoChars.endsWith("gb")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 2)) * ByteSizeUnit.C3);
+ } else if (lastTwoChars.endsWith("t")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * ByteSizeUnit.C4);
+ } else if (lastTwoChars.endsWith("tb")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 2)) * ByteSizeUnit.C4);
+ } else if (lastTwoChars.endsWith("p")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * ByteSizeUnit.C5);
+ } else if (lastTwoChars.endsWith("pb")) {
+ bytes = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 2)) * ByteSizeUnit.C5);
+ } else if (lastTwoChars.endsWith("b")) {
+ bytes = Long.parseLong(sValue.substring(0, sValue.length() - 1));
+ } else {
+ bytes = Long.parseLong(sValue);
+ }
+ } catch (NumberFormatException e) {
+ throw new ElasticsearchParseException("Failed to parse [" + sValue + "]", e);
+ }
+ return new ByteSizeValue(bytes, ByteSizeUnit.BYTES);
+ }
+
+ public static ByteSizeValue readBytesSizeValue(StreamInput in) throws IOException {
+ ByteSizeValue sizeValue = new ByteSizeValue();
+ sizeValue.readFrom(in);
+ return sizeValue;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ size = in.readVLong();
+ sizeUnit = ByteSizeUnit.BYTES;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(bytes());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ByteSizeValue sizeValue = (ByteSizeValue) o;
+
+ if (size != sizeValue.size) return false;
+ if (sizeUnit != sizeValue.sizeUnit) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = (int) (size ^ (size >>> 32));
+ result = 31 * result + (sizeUnit != null ? sizeUnit.hashCode() : 0);
+ return result;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java b/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java
new file mode 100644
index 0000000..d1db86f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java
@@ -0,0 +1,324 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * The DistanceUnit enumerates several units for measuring distances. These units
+ * provide methods for converting strings and methods to convert units among each
+ * others. Some methods like {@link DistanceUnit#getEarthCircumference} refer to
+ * the earth ellipsoid defined in {@link GeoUtils}. The default unit used within
+ * this project is <code>METERS</code> which is defined by <code>DEFAULT</code>
+ */
+public enum DistanceUnit {
+ INCH(0.0254, "in", "inch"),
+ YARD(0.9144, "yd", "yards"),
+ FEET(0.3048, "ft", "feet"),
+ MILES(1609.344, "mi", "miles"),
+ KILOMETERS(1000.0, "km", "kilometers"),
+ MILLIMETERS(0.001, "mm", "millimeters"),
+ CENTIMETERS(0.01, "cm", "centimeters"),
+
+ // since 'm' is suffix of other unit
+ // it must be the last entry of unit
+ // names ending with 'm'. otherwise
+ // parsing would fail
+ METERS(1, "m", "meters");
+
+ public static DistanceUnit DEFAULT = METERS;
+
+ private double meters;
+ private final String[] names;
+
+ DistanceUnit(double meters, String...names) {
+ this.meters = meters;
+ this.names = names;
+ }
+
+ /**
+ * Measures the circumference of earth in this unit
+ *
+ * @return length of earth circumference in this unit
+ */
+ public double getEarthCircumference() {
+ return GeoUtils.EARTH_EQUATOR / meters;
+ }
+
+ /**
+ * Measures the radius of earth in this unit
+ *
+ * @return length of earth radius in this unit
+ */
+ public double getEarthRadius() {
+ return GeoUtils.EARTH_SEMI_MAJOR_AXIS / meters;
+ }
+
+ /**
+ * Measures a longitude in this unit
+ *
+ * @return length of a longitude degree in this unit
+ */
+ public double getDistancePerDegree() {
+ return GeoUtils.EARTH_EQUATOR / (360.0 * meters);
+ }
+
+ /**
+ * Convert a value into meters
+ *
+ * @param distance distance in this unit
+ * @return value in meters
+ */
+ public double toMeters(double distance) {
+ return convert(distance, this, DistanceUnit.METERS);
+ }
+
+ /**
+ * Convert a value given in meters to a value of this unit
+ *
+ * @param distance distance in meters
+ * @return value in this unit
+ */
+ public double fromMeters(double distance) {
+ return convert(distance, DistanceUnit.METERS, this);
+ }
+
+ /**
+ * Convert a given value into another unit
+ *
+ * @param distance value in this unit
+ * @param unit source unit
+ * @return value in this unit
+ */
+ public double convert(double distance, DistanceUnit unit) {
+ return convert(distance, unit, this);
+ }
+
+ /**
+ * Convert a value to a distance string
+ *
+ * @param distance value to convert
+ * @return String representation of the distance
+ */
+ public String toString(double distance) {
+ return distance + toString();
+ }
+
+ @Override
+ public String toString() {
+ return names[0];
+ }
+
+ /**
+ * Converts the given distance from the given DistanceUnit, to the given DistanceUnit
+ *
+ * @param distance Distance to convert
+ * @param from Unit to convert the distance from
+ * @param to Unit of distance to convert to
+ * @return Given distance converted to the distance in the given unit
+ */
+ public static double convert(double distance, DistanceUnit from, DistanceUnit to) {
+ if (from == to) {
+ return distance;
+ } else {
+ return distance * from.meters / to.meters;
+ }
+ }
+
+ /**
+ * Parses a given distance and converts it to the specified unit.
+ *
+ * @param distance String defining a distance (value and unit)
+ * @param defaultUnit unit assumed if none is defined
+ * @param to unit of result
+ * @return parsed distance
+ */
+ public static double parse(String distance, DistanceUnit defaultUnit, DistanceUnit to) {
+ Distance dist = Distance.parseDistance(distance, defaultUnit);
+ return convert(dist.value, dist.unit, to);
+ }
+
+ /**
+ * Parses a given distance and converts it to this unit.
+ *
+ * @param distance String defining a distance (value and unit)
+ * @param defaultUnit unit to expect if none if provided
+ * @return parsed distance
+ */
+ public double parse(String distance, DistanceUnit defaultUnit) {
+ return parse(distance, defaultUnit, this);
+ }
+
+ /**
+ * Convert a String to a {@link DistanceUnit}
+ *
+ * @param unit name of the unit
+ * @return unit matching the given name
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if no unit matches the given name
+ */
+ public static DistanceUnit fromString(String unit) {
+ for (DistanceUnit dunit : values()) {
+ for (String name : dunit.names) {
+ if(name.equals(unit)) {
+ return dunit;
+ }
+ }
+ }
+ throw new ElasticsearchIllegalArgumentException("No distance unit match [" + unit + "]");
+ }
+
+ /**
+ * Parses the suffix of a given distance string and return the corresponding {@link DistanceUnit}
+ *
+ * @param distance string representing a distance
+ * @param defaultUnit default unit to use, if no unit is provided by the string
+ * @return unit of the given distance
+ */
+ public static DistanceUnit parseUnit(String distance, DistanceUnit defaultUnit) {
+ for (DistanceUnit unit : values()) {
+ for (String name : unit.names) {
+ if(distance.endsWith(name)) {
+ return unit;
+ }
+ }
+ }
+ return defaultUnit;
+ }
+
+ /**
+ * Write a {@link DistanceUnit} to a {@link StreamOutput}
+ *
+ * @param out {@link StreamOutput} to write to
+ * @param unit {@link DistanceUnit} to write
+ * @throws IOException
+ */
+ public static void writeDistanceUnit(StreamOutput out, DistanceUnit unit) throws IOException {
+ out.writeByte((byte) unit.ordinal());
+ }
+
+ /**
+ * Read a {@link DistanceUnit} from a {@link StreamInput}
+ *
+ * @param in {@link StreamInput} to read the {@link DistanceUnit} from
+ * @return {@link DistanceUnit} read from the {@link StreamInput}
+ * @throws IOException if no unit can be read from the {@link StreamInput}
+ * @thrown ElasticsearchIllegalArgumentException if no matching {@link DistanceUnit} can be found
+ */
+ public static DistanceUnit readDistanceUnit(StreamInput in) throws IOException {
+ byte b = in.readByte();
+
+ if(b<0 || b>=values().length) {
+ throw new ElasticsearchIllegalArgumentException("No type for distance unit matching [" + b + "]");
+ } else {
+ return values()[b];
+ }
+ }
+
+ /**
+ * This class implements a value+unit tuple.
+ */
+ public static class Distance implements Comparable<Distance> {
+ public final double value;
+ public final DistanceUnit unit;
+
+ public Distance(double value, DistanceUnit unit) {
+ super();
+ this.value = value;
+ this.unit = unit;
+ }
+
+ /**
+ * Converts a {@link Distance} value given in a specific {@link DistanceUnit} into
+ * a value equal to the specified value but in a other {@link DistanceUnit}.
+ *
+ * @param unit unit of the result
+ * @return converted distance
+ */
+ public Distance convert(DistanceUnit unit) {
+ if(this.unit == unit) {
+ return this;
+ } else {
+ return new Distance(DistanceUnit.convert(value, this.unit, unit), unit);
+ }
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if(obj == null) {
+ return false;
+ } else if (obj instanceof Distance) {
+ Distance other = (Distance) obj;
+ return DistanceUnit.convert(value, unit, other.unit) == other.value;
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return Double.valueOf(value * unit.meters).hashCode();
+ }
+
+ @Override
+ public int compareTo(Distance o) {
+ return Double.compare(value, DistanceUnit.convert(o.value, o.unit, unit));
+ }
+
+ @Override
+ public String toString() {
+ return unit.toString(value);
+ }
+
+ /**
+ * Parse a {@link Distance} from a given String. If no unit is given
+ * <code>DistanceUnit.DEFAULT</code> will be used
+ *
+ * @param distance String defining a {@link Distance}
+ * @return parsed {@link Distance}
+ */
+ public static Distance parseDistance(String distance) {
+ return parseDistance(distance, DEFAULT);
+ }
+
+ /**
+ * Parse a {@link Distance} from a given String
+ *
+ * @param distance String defining a {@link Distance}
+ * @param defaultUnit {@link DistanceUnit} to be assumed
+ * if not unit is provided in the first argument
+ * @return parsed {@link Distance}
+ */
+ private static Distance parseDistance(String distance, DistanceUnit defaultUnit) {
+ for (DistanceUnit unit : values()) {
+ for (String name : unit.names) {
+ if(distance.endsWith(name)) {
+ return new Distance(Double.parseDouble(distance.substring(0, distance.length() - name.length())), unit);
+ }
+ }
+ }
+ return new Distance(Double.parseDouble(distance), defaultUnit);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/unit/Fuzziness.java b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java
new file mode 100644
index 0000000..712b37a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.unit;
+
+import org.apache.lucene.search.FuzzyQuery;
+import org.apache.lucene.util.automaton.LevenshteinAutomata;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.Preconditions;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * A unit class that encapsulates all in-exact search
+ * parsing and conversion from similarities to edit distances
+ * etc.
+ */
+public final class Fuzziness implements ToXContent {
+
+ public static final XContentBuilderString X_FIELD_NAME = new XContentBuilderString("fuzziness");
+ public static final Fuzziness ZERO = new Fuzziness(0);
+ public static final Fuzziness ONE = new Fuzziness(1);
+ public static final Fuzziness TWO = new Fuzziness(2);
+ public static final Fuzziness AUTO = new Fuzziness("AUTO");
+ public static final ParseField FIELD = new ParseField(X_FIELD_NAME.camelCase().getValue());
+
+ private final Object fuzziness;
+
+ private Fuzziness(int fuzziness) {
+ Preconditions.checkArgument(fuzziness >= 0 && fuzziness <= 2, "Valid edit distances are [0, 1, 2] but was [" + fuzziness + "]");
+ this.fuzziness = fuzziness;
+ }
+
+ private Fuzziness(float fuzziness) {
+ Preconditions.checkArgument(fuzziness >= 0.0 && fuzziness < 1.0f, "Valid similarities must be in the interval [0..1] but was [" + fuzziness + "]");
+ this.fuzziness = fuzziness;
+ }
+
+ private Fuzziness(String fuzziness) {
+ this.fuzziness = fuzziness;
+ }
+
+ /**
+ * Creates a {@link Fuzziness} instance from a similarity. The value must be in the range <tt>[0..1)</tt>
+ */
+ public static Fuzziness fromSimilarity(float similarity) {
+ return new Fuzziness(similarity);
+ }
+
+ /**
+ * Creates a {@link Fuzziness} instance from an edit distance. The value must be one of <tt>[0, 1, 2]</tt>
+ */
+ public static Fuzziness fromEdits(int edits) {
+ return new Fuzziness(edits);
+ }
+
+ public static Fuzziness build(Object fuzziness) {
+ if (fuzziness instanceof Fuzziness) {
+ return (Fuzziness) fuzziness;
+ }
+ String string = fuzziness.toString();
+ if (AUTO.asString().equalsIgnoreCase(string)) {
+ return AUTO;
+ }
+ return new Fuzziness(string);
+ }
+
+ public static Fuzziness parse(XContentParser parser) throws IOException {
+ XContentParser.Token token = parser.currentToken();
+ switch (token) {
+ case VALUE_STRING:
+ case VALUE_NUMBER:
+ final String fuzziness = parser.text();
+ if (AUTO.asString().equalsIgnoreCase(fuzziness)) {
+ return AUTO;
+ }
+ try {
+ final int minimumSimilarity = Integer.parseInt(fuzziness);
+ switch (minimumSimilarity) {
+ case 0:
+ return ZERO;
+ case 1:
+ return ONE;
+ case 2:
+ return TWO;
+ default:
+ return build(fuzziness);
+ }
+ } catch (NumberFormatException ex) {
+ return build(fuzziness);
+ }
+
+ default:
+ throw new ElasticsearchIllegalArgumentException("Can't parse fuzziness on token: [" + token + "]");
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return toXContent(builder, params, true);
+ }
+
+ public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean includeFieldName) throws IOException {
+ if (includeFieldName) {
+ builder.field(X_FIELD_NAME, fuzziness);
+ } else {
+ builder.value(fuzziness);
+ }
+ return builder;
+ }
+
+ public int asDistance() {
+ return asDistance(null);
+ }
+
+ public int asDistance(String text) {
+ if (fuzziness instanceof String) {
+ if (this == AUTO) { //AUTO
+ final int len = termLen(text);
+ if (len <= 2) {
+ return 0;
+ } else if (len > 5) {
+ return 2;
+ } else {
+ return 1;
+ }
+ }
+ }
+ return FuzzyQuery.floatToEdits(asFloat(), termLen(text));
+ }
+
+ public TimeValue asTimeValue() {
+ if (this == AUTO) {
+ return TimeValue.timeValueMillis(1);
+ } else {
+ return TimeValue.parseTimeValue(fuzziness.toString(), null);
+ }
+ }
+
+ public long asLong() {
+ if (this == AUTO) {
+ return 1;
+ }
+ try {
+ return Long.parseLong(fuzziness.toString());
+ } catch (NumberFormatException ex) {
+ return (long) Double.parseDouble(fuzziness.toString());
+ }
+ }
+
+ public int asInt() {
+ if (this == AUTO) {
+ return 1;
+ }
+ try {
+ return Integer.parseInt(fuzziness.toString());
+ } catch (NumberFormatException ex) {
+ return (int) Float.parseFloat(fuzziness.toString());
+ }
+ }
+
+ public short asShort() {
+ if (this == AUTO) {
+ return 1;
+ }
+ try {
+ return Short.parseShort(fuzziness.toString());
+ } catch (NumberFormatException ex) {
+ return (short) Float.parseFloat(fuzziness.toString());
+ }
+ }
+
+ public byte asByte() {
+ if (this == AUTO) {
+ return 1;
+ }
+ try {
+ return Byte.parseByte(fuzziness.toString());
+ } catch (NumberFormatException ex) {
+ return (byte) Float.parseFloat(fuzziness.toString());
+ }
+ }
+
+ public double asDouble() {
+ if (this == AUTO) {
+ return 1d;
+ }
+ return Double.parseDouble(fuzziness.toString());
+ }
+
+ public float asFloat() {
+ if (this == AUTO) {
+ return 1f;
+ }
+ return Float.parseFloat(fuzziness.toString());
+ }
+
+ public float asSimilarity() {
+ return asSimilarity(null);
+ }
+
+ public float asSimilarity(String text) {
+ if (this == AUTO) {
+ final int len = termLen(text);
+ if (len <= 2) {
+ return 0.0f;
+ } else if (len > 5) {
+ return 0.5f;
+ } else {
+ return 0.66f;
+ }
+// return dist == 0 ? dist : Math.min(0.999f, Math.max(0.0f, 1.0f - ((float) dist/ (float) termLen(text))));
+ }
+ if (fuzziness instanceof Float) { // it's a similarity
+ return ((Float) fuzziness).floatValue();
+ } else if (fuzziness instanceof Integer) { // it's an edit!
+ int dist = Math.min(((Integer) fuzziness).intValue(),
+ LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
+ return Math.min(0.999f, Math.max(0.0f, 1.0f - ((float) dist / (float) termLen(text))));
+ } else {
+ final float similarity = Float.parseFloat(fuzziness.toString());
+ if (similarity >= 0.0f && similarity < 1.0f) {
+ return similarity;
+ }
+ }
+ throw new ElasticsearchIllegalArgumentException("Can't get similarity from fuzziness [" + fuzziness + "]");
+ }
+
+ private int termLen(String text) {
+ return text == null ? 5 : text.codePointCount(0, text.length()); // 5 avg term length in english
+ }
+
+ public String asString() {
+ return fuzziness.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java b/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java
new file mode 100644
index 0000000..81a2934
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+
+import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
+
+/** Utility methods to get memory sizes. */
+public enum MemorySizeValue {
+ ;
+
+ /** Parse the provided string as a memory size. This method either accepts absolute values such as
+ * <tt>42</tt> (default assumed unit is byte) or <tt>2mb</tt>, or percentages of the heap size: if
+ * the heap is 1G, <tt>10%</tt> will be parsed as <tt>100mb</tt>. */
+ public static ByteSizeValue parseBytesSizeValueOrHeapRatio(String sValue) {
+ if (sValue.endsWith("%")) {
+ final String percentAsString = sValue.substring(0, sValue.length() - 1);
+ try {
+ final double percent = Double.parseDouble(percentAsString);
+ if (percent < 0 || percent > 100) {
+ throw new ElasticsearchParseException("Percentage should be in [0-100], got " + percentAsString);
+ }
+ return new ByteSizeValue((long) ((percent / 100) * JvmInfo.jvmInfo().getMem().getHeapMax().bytes()), ByteSizeUnit.BYTES);
+ } catch (NumberFormatException e) {
+ throw new ElasticsearchParseException("Failed to parse [" + percentAsString + "] as a double", e);
+ }
+ } else {
+ return parseBytesSizeValue(sValue);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/unit/Percent.java b/src/main/java/org/elasticsearch/common/unit/Percent.java
new file mode 100644
index 0000000..4000b22
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/unit/Percent.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class Percent implements Streamable, Serializable {
+
+ private double value;
+
+ public Percent(double value) {
+ this.value = value;
+ }
+
+ public double value() {
+ return value;
+ }
+
+ public String toString() {
+ return format(value);
+ }
+
+ public static String format(double value) {
+ String p = String.valueOf(value * 100.0);
+ int ix = p.indexOf(".") + 1;
+ return p.substring(0, ix) + p.substring(ix, ix + 1) + "%";
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ value = in.readDouble();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeDouble(value);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/unit/SizeUnit.java b/src/main/java/org/elasticsearch/common/unit/SizeUnit.java
new file mode 100644
index 0000000..984931d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/unit/SizeUnit.java
@@ -0,0 +1,244 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+/**
+ *
+ */
+public enum SizeUnit {
+ SINGLE {
+ @Override
+ public long toSingles(long size) {
+ return size;
+ }
+
+ @Override
+ public long toKilo(long size) {
+ return size / (C1 / C0);
+ }
+
+ @Override
+ public long toMega(long size) {
+ return size / (C2 / C0);
+ }
+
+ @Override
+ public long toGiga(long size) {
+ return size / (C3 / C0);
+ }
+
+ @Override
+ public long toTera(long size) {
+ return size / (C4 / C0);
+ }
+
+ @Override
+ public long toPeta(long size) {
+ return size / (C5 / C0);
+ }
+ },
+ KILO {
+ @Override
+ public long toSingles(long size) {
+ return x(size, C1 / C0, MAX / (C1 / C0));
+ }
+
+ @Override
+ public long toKilo(long size) {
+ return size;
+ }
+
+ @Override
+ public long toMega(long size) {
+ return size / (C2 / C1);
+ }
+
+ @Override
+ public long toGiga(long size) {
+ return size / (C3 / C1);
+ }
+
+ @Override
+ public long toTera(long size) {
+ return size / (C4 / C1);
+ }
+
+ @Override
+ public long toPeta(long size) {
+ return size / (C5 / C1);
+ }
+ },
+ MEGA {
+ @Override
+ public long toSingles(long size) {
+ return x(size, C2 / C0, MAX / (C2 / C0));
+ }
+
+ @Override
+ public long toKilo(long size) {
+ return x(size, C2 / C1, MAX / (C2 / C1));
+ }
+
+ @Override
+ public long toMega(long size) {
+ return size;
+ }
+
+ @Override
+ public long toGiga(long size) {
+ return size / (C3 / C2);
+ }
+
+ @Override
+ public long toTera(long size) {
+ return size / (C4 / C2);
+ }
+
+ @Override
+ public long toPeta(long size) {
+ return size / (C5 / C2);
+ }
+ },
+ GIGA {
+ @Override
+ public long toSingles(long size) {
+ return x(size, C3 / C0, MAX / (C3 / C0));
+ }
+
+ @Override
+ public long toKilo(long size) {
+ return x(size, C3 / C1, MAX / (C3 / C1));
+ }
+
+ @Override
+ public long toMega(long size) {
+ return x(size, C3 / C2, MAX / (C3 / C2));
+ }
+
+ @Override
+ public long toGiga(long size) {
+ return size;
+ }
+
+ @Override
+ public long toTera(long size) {
+ return size / (C4 / C3);
+ }
+
+ @Override
+ public long toPeta(long size) {
+ return size / (C5 / C3);
+ }
+ },
+ TERA {
+ @Override
+ public long toSingles(long size) {
+ return x(size, C4 / C0, MAX / (C4 / C0));
+ }
+
+ @Override
+ public long toKilo(long size) {
+ return x(size, C4 / C1, MAX / (C4 / C1));
+ }
+
+ @Override
+ public long toMega(long size) {
+ return x(size, C4 / C2, MAX / (C4 / C2));
+ }
+
+ @Override
+ public long toGiga(long size) {
+ return x(size, C4 / C3, MAX / (C4 / C3));
+ }
+
+ @Override
+ public long toTera(long size) {
+ return size;
+ }
+
+ @Override
+ public long toPeta(long size) {
+ return size / (C5 / C0);
+ }
+ },
+ PETA {
+ @Override
+ public long toSingles(long size) {
+ return x(size, C5 / C0, MAX / (C5 / C0));
+ }
+
+ @Override
+ public long toKilo(long size) {
+ return x(size, C5 / C1, MAX / (C5 / C1));
+ }
+
+ @Override
+ public long toMega(long size) {
+ return x(size, C5 / C2, MAX / (C5 / C2));
+ }
+
+ @Override
+ public long toGiga(long size) {
+ return x(size, C5 / C3, MAX / (C5 / C3));
+ }
+
+ @Override
+ public long toTera(long size) {
+ return x(size, C5 / C4, MAX / (C5 / C4));
+ }
+
+ @Override
+ public long toPeta(long size) {
+ return size;
+ }
+ };
+
+ static final long C0 = 1L;
+ static final long C1 = C0 * 1000L;
+ static final long C2 = C1 * 1000L;
+ static final long C3 = C2 * 1000L;
+ static final long C4 = C3 * 1000L;
+ static final long C5 = C4 * 1000L;
+
+ static final long MAX = Long.MAX_VALUE;
+
+ /**
+ * Scale d by m, checking for overflow.
+ * This has a short name to make above code more readable.
+ */
+ static long x(long d, long m, long over) {
+ if (d > over) return Long.MAX_VALUE;
+ if (d < -over) return Long.MIN_VALUE;
+ return d * m;
+ }
+
+
+ public abstract long toSingles(long size);
+
+ public abstract long toKilo(long size);
+
+ public abstract long toMega(long size);
+
+ public abstract long toGiga(long size);
+
+ public abstract long toTera(long size);
+
+ public abstract long toPeta(long size);
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/unit/SizeValue.java b/src/main/java/org/elasticsearch/common/unit/SizeValue.java
new file mode 100644
index 0000000..6e2c638
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/unit/SizeValue.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class SizeValue implements Serializable, Streamable {
+
+ private long size;
+
+ private SizeUnit sizeUnit;
+
+ private SizeValue() {
+
+ }
+
+ public SizeValue(long singles) {
+ this(singles, SizeUnit.SINGLE);
+ }
+
+ public SizeValue(long size, SizeUnit sizeUnit) {
+ this.size = size;
+ this.sizeUnit = sizeUnit;
+ }
+
+ public long singles() {
+ return sizeUnit.toSingles(size);
+ }
+
+ public long getSingles() {
+ return singles();
+ }
+
+ public long kilo() {
+ return sizeUnit.toKilo(size);
+ }
+
+ public long getKilo() {
+ return kilo();
+ }
+
+ public long mega() {
+ return sizeUnit.toMega(size);
+ }
+
+ public long getMega() {
+ return mega();
+ }
+
+ public long giga() {
+ return sizeUnit.toGiga(size);
+ }
+
+ public long getGiga() {
+ return giga();
+ }
+
+ public long tera() {
+ return sizeUnit.toTera(size);
+ }
+
+ public long getTera() {
+ return tera();
+ }
+
+ public long peta() {
+ return sizeUnit.toPeta(size);
+ }
+
+ public long getPeta() {
+ return peta();
+ }
+
+ public double kiloFrac() {
+ return ((double) singles()) / SizeUnit.C1;
+ }
+
+ public double getKiloFrac() {
+ return kiloFrac();
+ }
+
+ public double megaFrac() {
+ return ((double) singles()) / SizeUnit.C2;
+ }
+
+ public double getMegaFrac() {
+ return megaFrac();
+ }
+
+ public double gigaFrac() {
+ return ((double) singles()) / SizeUnit.C3;
+ }
+
+ public double getGigaFrac() {
+ return gigaFrac();
+ }
+
+ public double teraFrac() {
+ return ((double) singles()) / SizeUnit.C4;
+ }
+
+ public double getTeraFrac() {
+ return teraFrac();
+ }
+
+ public double petaFrac() {
+ return ((double) singles()) / SizeUnit.C5;
+ }
+
+ public double getPetaFrac() {
+ return petaFrac();
+ }
+
+ @Override
+ public String toString() {
+ long singles = singles();
+ double value = singles;
+ String suffix = "";
+ if (singles >= SizeUnit.C5) {
+ value = petaFrac();
+ suffix = "p";
+ } else if (singles >= SizeUnit.C4) {
+ value = teraFrac();
+ suffix = "t";
+ } else if (singles >= SizeUnit.C3) {
+ value = gigaFrac();
+ suffix = "g";
+ } else if (singles >= SizeUnit.C2) {
+ value = megaFrac();
+ suffix = "m";
+ } else if (singles >= SizeUnit.C1) {
+ value = kiloFrac();
+ suffix = "k";
+ }
+
+ return Strings.format1Decimals(value, suffix);
+ }
+
+ public static SizeValue parseSizeValue(String sValue) throws ElasticsearchParseException {
+ return parseSizeValue(sValue, null);
+ }
+
+ public static SizeValue parseSizeValue(String sValue, SizeValue defaultValue) throws ElasticsearchParseException {
+ if (sValue == null) {
+ return defaultValue;
+ }
+ long singles;
+ try {
+ if (sValue.endsWith("b")) {
+ singles = Long.parseLong(sValue.substring(0, sValue.length() - 1));
+ } else if (sValue.endsWith("k") || sValue.endsWith("K")) {
+ singles = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * SizeUnit.C1);
+ } else if (sValue.endsWith("m") || sValue.endsWith("M")) {
+ singles = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * SizeUnit.C2);
+ } else if (sValue.endsWith("g") || sValue.endsWith("G")) {
+ singles = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * SizeUnit.C3);
+ } else if (sValue.endsWith("t") || sValue.endsWith("T")) {
+ singles = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * SizeUnit.C4);
+ } else if (sValue.endsWith("p") || sValue.endsWith("P")) {
+ singles = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * SizeUnit.C5);
+ } else {
+ singles = Long.parseLong(sValue);
+ }
+ } catch (NumberFormatException e) {
+ throw new ElasticsearchParseException("Failed to parse [" + sValue + "]", e);
+ }
+ return new SizeValue(singles, SizeUnit.SINGLE);
+ }
+
+ public static SizeValue readSizeValue(StreamInput in) throws IOException {
+ SizeValue sizeValue = new SizeValue();
+ sizeValue.readFrom(in);
+ return sizeValue;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ size = in.readVLong();
+ sizeUnit = SizeUnit.SINGLE;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(singles());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ SizeValue sizeValue = (SizeValue) o;
+
+ if (size != sizeValue.size) return false;
+ if (sizeUnit != sizeValue.sizeUnit) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = (int) (size ^ (size >>> 32));
+ result = 31 * result + (sizeUnit != null ? sizeUnit.hashCode() : 0);
+ return result;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/src/main/java/org/elasticsearch/common/unit/TimeValue.java
new file mode 100644
index 0000000..5ae3a1c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/unit/TimeValue.java
@@ -0,0 +1,301 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.joda.time.Period;
+import org.joda.time.PeriodType;
+import org.joda.time.format.PeriodFormat;
+import org.joda.time.format.PeriodFormatter;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class TimeValue implements Serializable, Streamable {
+
+ public static TimeValue timeValueNanos(long nanos) {
+ return new TimeValue(nanos, TimeUnit.NANOSECONDS);
+ }
+
+ public static TimeValue timeValueMillis(long millis) {
+ return new TimeValue(millis, TimeUnit.MILLISECONDS);
+ }
+
+ public static TimeValue timeValueSeconds(long seconds) {
+ return new TimeValue(seconds, TimeUnit.SECONDS);
+ }
+
+ public static TimeValue timeValueMinutes(long minutes) {
+ return new TimeValue(minutes, TimeUnit.MINUTES);
+ }
+
+ public static TimeValue timeValueHours(long hours) {
+ return new TimeValue(hours, TimeUnit.HOURS);
+ }
+
+ private long duration;
+
+ private TimeUnit timeUnit;
+
+ private TimeValue() {
+
+ }
+
+ public TimeValue(long millis) {
+ this(millis, TimeUnit.MILLISECONDS);
+ }
+
+ public TimeValue(long duration, TimeUnit timeUnit) {
+ this.duration = duration;
+ this.timeUnit = timeUnit;
+ }
+
+ public long nanos() {
+ return timeUnit.toNanos(duration);
+ }
+
+ public long getNanos() {
+ return nanos();
+ }
+
+ public long micros() {
+ return timeUnit.toMicros(duration);
+ }
+
+ public long getMicros() {
+ return micros();
+ }
+
+ public long millis() {
+ return timeUnit.toMillis(duration);
+ }
+
+ public long getMillis() {
+ return millis();
+ }
+
+ public long seconds() {
+ return timeUnit.toSeconds(duration);
+ }
+
+ public long getSeconds() {
+ return seconds();
+ }
+
+ public long minutes() {
+ return timeUnit.toMinutes(duration);
+ }
+
+ public long getMinutes() {
+ return minutes();
+ }
+
+ public long hours() {
+ return timeUnit.toHours(duration);
+ }
+
+ public long getHours() {
+ return hours();
+ }
+
+ public long days() {
+ return timeUnit.toDays(duration);
+ }
+
+ public long getDays() {
+ return days();
+ }
+
+ public double microsFrac() {
+ return ((double) nanos()) / C1;
+ }
+
+ public double getMicrosFrac() {
+ return microsFrac();
+ }
+
+ public double millisFrac() {
+ return ((double) nanos()) / C2;
+ }
+
+ public double getMillisFrac() {
+ return millisFrac();
+ }
+
+ public double secondsFrac() {
+ return ((double) nanos()) / C3;
+ }
+
+ public double getSecondsFrac() {
+ return secondsFrac();
+ }
+
+ public double minutesFrac() {
+ return ((double) nanos()) / C4;
+ }
+
+ public double getMinutesFrac() {
+ return minutesFrac();
+ }
+
+ public double hoursFrac() {
+ return ((double) nanos()) / C5;
+ }
+
+ public double getHoursFrac() {
+ return hoursFrac();
+ }
+
+ public double daysFrac() {
+ return ((double) nanos()) / C6;
+ }
+
+ public double getDaysFrac() {
+ return daysFrac();
+ }
+
+ private final PeriodFormatter defaultFormatter = PeriodFormat.getDefault()
+ .withParseType(PeriodType.standard());
+
+ public String format() {
+ Period period = new Period(millis());
+ return defaultFormatter.print(period);
+ }
+
+ public String format(PeriodType type) {
+ Period period = new Period(millis());
+ return PeriodFormat.getDefault().withParseType(type).print(period);
+ }
+
+ @Override
+ public String toString() {
+ if (duration < 0) {
+ return Long.toString(duration);
+ }
+ long nanos = nanos();
+ if (nanos == 0) {
+ return "0s";
+ }
+ double value = nanos;
+ String suffix = "nanos";
+ if (nanos >= C6) {
+ value = daysFrac();
+ suffix = "d";
+ } else if (nanos >= C5) {
+ value = hoursFrac();
+ suffix = "h";
+ } else if (nanos >= C4) {
+ value = minutesFrac();
+ suffix = "m";
+ } else if (nanos >= C3) {
+ value = secondsFrac();
+ suffix = "s";
+ } else if (nanos >= C2) {
+ value = millisFrac();
+ suffix = "ms";
+ } else if (nanos >= C1) {
+ value = microsFrac();
+ suffix = "micros";
+ }
+ return Strings.format1Decimals(value, suffix);
+ }
+
+ public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue) {
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ long millis;
+ if (sValue.endsWith("S")) {
+ millis = Long.parseLong(sValue.substring(0, sValue.length() - 1));
+ } else if (sValue.endsWith("ms")) {
+ millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 2)));
+ } else if (sValue.endsWith("s")) {
+ millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 1000);
+ } else if (sValue.endsWith("m")) {
+ millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 60 * 1000);
+ } else if (sValue.endsWith("H") || sValue.endsWith("h")) {
+ millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 60 * 60 * 1000);
+ } else if (sValue.endsWith("d")) {
+ millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 24 * 60 * 60 * 1000);
+ } else if (sValue.endsWith("w")) {
+ millis = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * 7 * 24 * 60 * 60 * 1000);
+ } else {
+ millis = Long.parseLong(sValue);
+ }
+ return new TimeValue(millis, TimeUnit.MILLISECONDS);
+ } catch (NumberFormatException e) {
+ throw new ElasticsearchParseException("Failed to parse [" + sValue + "]", e);
+ }
+ }
+
+ static final long C0 = 1L;
+ static final long C1 = C0 * 1000L;
+ static final long C2 = C1 * 1000L;
+ static final long C3 = C2 * 1000L;
+ static final long C4 = C3 * 60L;
+ static final long C5 = C4 * 60L;
+ static final long C6 = C5 * 24L;
+
+ public static TimeValue readTimeValue(StreamInput in) throws IOException {
+ TimeValue timeValue = new TimeValue();
+ timeValue.readFrom(in);
+ return timeValue;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ duration = in.readLong();
+ timeUnit = TimeUnit.NANOSECONDS;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(nanos());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ TimeValue timeValue = (TimeValue) o;
+
+ if (duration != timeValue.duration) return false;
+ if (timeUnit != timeValue.timeUnit) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = (int) (duration ^ (duration >>> 32));
+ result = 31 * result + (timeUnit != null ? timeUnit.hashCode() : 0);
+ return result;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/AbstractArray.java b/src/main/java/org/elasticsearch/common/util/AbstractArray.java
new file mode 100644
index 0000000..f075ca5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/AbstractArray.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.lease.Releasable;
+
+abstract class AbstractArray implements Releasable {
+
+ public final PageCacheRecycler recycler;
+ public final boolean clearOnResize;
+ private boolean released = false;
+
+ AbstractArray(PageCacheRecycler recycler, boolean clearOnResize) {
+ this.recycler = recycler;
+ this.clearOnResize = clearOnResize;
+ }
+
+ @Override
+ public boolean release() {
+ assert !released : "double release";
+ released = true;
+ return true; // nothing to release by default
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java b/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java
new file mode 100644
index 0000000..7b27621
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/AbstractBigArray.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.google.common.base.Preconditions;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.recycler.Recycler;
+
+import java.lang.reflect.Array;
+import java.util.Arrays;
+
+/** Common implementation for array lists that slice data into fixed-size blocks. */
+abstract class AbstractBigArray extends AbstractArray {
+
+ private Recycler.V<?>[] cache;
+
+ private final int pageShift;
+ private final int pageMask;
+ protected long size;
+
+ protected AbstractBigArray(int pageSize, PageCacheRecycler recycler, boolean clearOnResize) {
+ super(recycler, clearOnResize);
+ Preconditions.checkArgument(pageSize >= 128, "pageSize must be >= 128");
+ Preconditions.checkArgument((pageSize & (pageSize - 1)) == 0, "pageSize must be a power of two");
+ this.pageShift = Integer.numberOfTrailingZeros(pageSize);
+ this.pageMask = pageSize - 1;
+ size = 0;
+ if (this.recycler != null) {
+ cache = new Recycler.V<?>[16];
+ } else {
+ cache = null;
+ }
+ }
+
+ final int numPages(long capacity) {
+ final long numPages = (capacity + pageMask) >>> pageShift;
+ Preconditions.checkArgument(numPages <= Integer.MAX_VALUE, "pageSize=" + (pageMask + 1) + " is too small for such as capacity: " + capacity);
+ return (int) numPages;
+ }
+
+ final int pageSize() {
+ return pageMask + 1;
+ }
+
+ final int pageIndex(long index) {
+ return (int) (index >>> pageShift);
+ }
+
+ final int indexInPage(long index) {
+ return (int) (index & pageMask);
+ }
+
+ public final long size() {
+ return size;
+ }
+
+ protected abstract int numBytesPerElement();
+
+ public final long sizeInBytes() {
+ // rough approximate, we only take into account the size of the values, not the overhead of the array objects
+ return ((long) pageIndex(size - 1) + 1) * pageSize() * numBytesPerElement();
+ }
+
+ private static <T> T[] grow(T[] array, int minSize) {
+ if (array.length < minSize) {
+ final int newLen = ArrayUtil.oversize(minSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
+ array = Arrays.copyOf(array, newLen);
+ }
+ return array;
+ }
+
+ private <T> T registerNewPage(Recycler.V<T> v, int page, int expectedSize) {
+ cache = grow(cache, page + 1);
+ assert cache[page] == null;
+ cache[page] = v;
+ assert Array.getLength(v.v()) == expectedSize;
+ return v.v();
+ }
+
+ protected final byte[] newBytePage(int page) {
+ if (recycler != null) {
+ final Recycler.V<byte[]> v = recycler.bytePage(clearOnResize);
+ return registerNewPage(v, page, BigArrays.BYTE_PAGE_SIZE);
+ } else {
+ return new byte[BigArrays.BYTE_PAGE_SIZE];
+ }
+ }
+
+ protected final int[] newIntPage(int page) {
+ if (recycler != null) {
+ final Recycler.V<int[]> v = recycler.intPage(clearOnResize);
+ return registerNewPage(v, page, BigArrays.INT_PAGE_SIZE);
+ } else {
+ return new int[BigArrays.INT_PAGE_SIZE];
+ }
+ }
+
+ protected final long[] newLongPage(int page) {
+ if (recycler != null) {
+ final Recycler.V<long[]> v = recycler.longPage(clearOnResize);
+ return registerNewPage(v, page, BigArrays.LONG_PAGE_SIZE);
+ } else {
+ return new long[BigArrays.LONG_PAGE_SIZE];
+ }
+ }
+
+ protected final double[] newDoublePage(int page) {
+ if (recycler != null) {
+ final Recycler.V<double[]> v = recycler.doublePage(clearOnResize);
+ return registerNewPage(v, page, BigArrays.DOUBLE_PAGE_SIZE);
+ } else {
+ return new double[BigArrays.DOUBLE_PAGE_SIZE];
+ }
+ }
+
+ protected final Object[] newObjectPage(int page) {
+ if (recycler != null) {
+ final Recycler.V<Object[]> v = recycler.objectPage();
+ return registerNewPage(v, page, BigArrays.OBJECT_PAGE_SIZE);
+ } else {
+ return new Object[BigArrays.OBJECT_PAGE_SIZE];
+ }
+ }
+
+ protected final void releasePage(int page) {
+ if (recycler != null) {
+ cache[page].release();
+ cache[page] = null;
+ }
+ }
+
+ @Override
+ public final boolean release() {
+ super.release();
+ if (recycler != null) {
+ Releasables.release(cache);
+ cache = null;
+ }
+ return true;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BigArray.java b/src/main/java/org/elasticsearch/common/util/BigArray.java
new file mode 100644
index 0000000..418e76b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BigArray.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.elasticsearch.common.lease.Releasable;
+
+/** Base abstraction of an array. */
+interface BigArray extends Releasable {
+
+ /** Return the length of this array. */
+ public long size();
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BigArrays.java b/src/main/java/org/elasticsearch/common/util/BigArrays.java
new file mode 100644
index 0000000..77a6ee5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BigArrays.java
@@ -0,0 +1,459 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.google.common.base.Preconditions;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+
+import java.util.Arrays;
+
+/** Utility class to work with arrays. */
+public enum BigArrays {
+ ;
+
+ /** Page size in bytes: 16KB */
+ public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
+ public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_BYTE;
+ public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_INT;
+ public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_LONG;
+ public static final int DOUBLE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_DOUBLE;
+ public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+
+ /** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */
+ public static long overSize(long minTargetSize) {
+ return overSize(minTargetSize, PAGE_SIZE_IN_BYTES / 8, 1);
+ }
+
+ /** Return the next size to grow to that is &gt;= <code>minTargetSize</code>.
+ * Inspired from {@link ArrayUtil#oversize(int, int)} and adapted to play nicely with paging. */
+ public static long overSize(long minTargetSize, int pageSize, int bytesPerElement) {
+ Preconditions.checkArgument(minTargetSize >= 0, "minTargetSize must be >= 0");
+ Preconditions.checkArgument(pageSize >= 0, "pageSize must be > 0");
+ Preconditions.checkArgument(bytesPerElement > 0, "bytesPerElement must be > 0");
+
+ long newSize;
+ if (minTargetSize < pageSize) {
+ newSize = ArrayUtil.oversize((int)minTargetSize, bytesPerElement);
+ } else {
+ newSize = minTargetSize + (minTargetSize >>> 3);
+ }
+
+ if (newSize > pageSize) {
+ // round to a multiple of pageSize
+ newSize = newSize - (newSize % pageSize) + pageSize;
+ assert newSize % pageSize == 0;
+ }
+
+ return newSize;
+ }
+
+ static boolean indexIsInt(long index) {
+ return index == (int) index;
+ }
+
+ private static class ByteArrayWrapper extends AbstractArray implements ByteArray {
+
+ private final byte[] array;
+
+ ByteArrayWrapper(byte[] array, PageCacheRecycler recycler, boolean clearOnResize) {
+ super(recycler, clearOnResize);
+ this.array = array;
+ }
+
+ @Override
+ public long size() {
+ return array.length;
+ }
+
+ @Override
+ public byte get(long index) {
+ assert indexIsInt(index);
+ return array[(int) index];
+ }
+
+ @Override
+ public byte set(long index, byte value) {
+ assert indexIsInt(index);
+ final byte ret = array[(int) index];
+ array[(int) index] = value;
+ return ret;
+ }
+
+ @Override
+ public void get(long index, int len, BytesRef ref) {
+ assert indexIsInt(index);
+ ref.bytes = array;
+ ref.offset = (int) index;
+ ref.length = len;
+ }
+
+ @Override
+ public void set(long index, byte[] buf, int offset, int len) {
+ assert indexIsInt(index);
+ System.arraycopy(buf, offset, array, (int) index, len);
+ }
+
+ }
+
+ private static class IntArrayWrapper extends AbstractArray implements IntArray {
+
+ private final int[] array;
+
+ IntArrayWrapper(int[] array, PageCacheRecycler recycler, boolean clearOnResize) {
+ super(recycler, clearOnResize);
+ this.array = array;
+ }
+
+ @Override
+ public long size() {
+ return array.length;
+ }
+
+ @Override
+ public int get(long index) {
+ assert indexIsInt(index);
+ return array[(int) index];
+ }
+
+ @Override
+ public int set(long index, int value) {
+ assert indexIsInt(index);
+ final int ret = array[(int) index];
+ array[(int) index] = value;
+ return ret;
+ }
+
+ @Override
+ public int increment(long index, int inc) {
+ assert indexIsInt(index);
+ return array[(int) index] += inc;
+ }
+
+ }
+
+ private static class LongArrayWrapper extends AbstractArray implements LongArray {
+
+ private final long[] array;
+
+ LongArrayWrapper(long[] array, PageCacheRecycler recycler, boolean clearOnResize) {
+ super(recycler, clearOnResize);
+ this.array = array;
+ }
+
+ @Override
+ public long size() {
+ return array.length;
+ }
+
+ @Override
+ public long get(long index) {
+ assert indexIsInt(index);
+ return array[(int) index];
+ }
+
+ @Override
+ public long set(long index, long value) {
+ assert indexIsInt(index);
+ final long ret = array[(int) index];
+ array[(int) index] = value;
+ return ret;
+ }
+
+ @Override
+ public long increment(long index, long inc) {
+ assert indexIsInt(index);
+ return array[(int) index] += inc;
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, long value) {
+ assert indexIsInt(fromIndex);
+ assert indexIsInt(toIndex);
+ Arrays.fill(array, (int) fromIndex, (int) toIndex, value);
+ }
+ }
+
+ private static class DoubleArrayWrapper extends AbstractArray implements DoubleArray {
+
+ private final double[] array;
+
+ DoubleArrayWrapper(double[] array, PageCacheRecycler recycler, boolean clearOnResize) {
+ super(recycler, clearOnResize);
+ this.array = array;
+ }
+
+ @Override
+ public long size() {
+ return array.length;
+ }
+
+ @Override
+ public double get(long index) {
+ assert indexIsInt(index);
+ return array[(int) index];
+ }
+
+ @Override
+ public double set(long index, double value) {
+ assert indexIsInt(index);
+ double ret = array[(int) index];
+ array[(int) index] = value;
+ return ret;
+ }
+
+ @Override
+ public double increment(long index, double inc) {
+ assert indexIsInt(index);
+ return array[(int) index] += inc;
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, double value) {
+ assert indexIsInt(fromIndex);
+ assert indexIsInt(toIndex);
+ Arrays.fill(array, (int) fromIndex, (int) toIndex, value);
+ }
+
+ }
+
+ private static class ObjectArrayWrapper<T> extends AbstractArray implements ObjectArray<T> {
+
+ private final Object[] array;
+
+ ObjectArrayWrapper(Object[] array, PageCacheRecycler recycler) {
+ super(recycler, true);
+ this.array = array;
+ }
+
+ @Override
+ public long size() {
+ return array.length;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public T get(long index) {
+ assert indexIsInt(index);
+ return (T) array[(int) index];
+ }
+
+ @Override
+ public T set(long index, T value) {
+ assert indexIsInt(index);
+ @SuppressWarnings("unchecked")
+ T ret = (T) array[(int) index];
+ array[(int) index] = value;
+ return ret;
+ }
+
+ }
+
+ /** Allocate a new {@link ByteArray} of the given capacity. */
+ public static ByteArray newByteArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
+ if (size <= BYTE_PAGE_SIZE) {
+ return new ByteArrayWrapper(new byte[(int) size], recycler, clearOnResize);
+ } else {
+ return new BigByteArray(size, recycler, clearOnResize);
+ }
+ }
+
+ /** Allocate a new {@link ByteArray} of the given capacity. */
+ public static ByteArray newByteArray(long size) {
+ return newByteArray(size, null, true);
+ }
+
+ /** Resize the array to the exact provided size. */
+ public static ByteArray resize(ByteArray array, long size) {
+ if (array instanceof BigByteArray) {
+ ((BigByteArray) array).resize(size);
+ return array;
+ } else {
+ AbstractArray arr = (AbstractArray) array;
+ final ByteArray newArray = newByteArray(size, arr.recycler, arr.clearOnResize);
+ final byte[] rawArray = ((ByteArrayWrapper) array).array;
+ newArray.set(0, rawArray, 0, (int) Math.min(rawArray.length, newArray.size()));
+ return newArray;
+ }
+ }
+
+ /** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
+ public static ByteArray grow(ByteArray array, long minSize) {
+ if (minSize <= array.size()) {
+ return array;
+ }
+ final long newSize = overSize(minSize, BYTE_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_BYTE);
+ return resize(array, newSize);
+ }
+
+ /** Allocate a new {@link IntArray} of the given capacity. */
+ public static IntArray newIntArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
+ if (size <= INT_PAGE_SIZE) {
+ return new IntArrayWrapper(new int[(int) size], recycler, clearOnResize);
+ } else {
+ return new BigIntArray(size, recycler, clearOnResize);
+ }
+ }
+
+ /** Allocate a new {@link IntArray} of the given capacity. */
+ public static IntArray newIntArray(long size) {
+ return newIntArray(size, null, true);
+ }
+
+ /** Resize the array to the exact provided size. */
+ public static IntArray resize(IntArray array, long size) {
+ if (array instanceof BigIntArray) {
+ ((BigIntArray) array).resize(size);
+ return array;
+ } else {
+ AbstractArray arr = (AbstractArray) array;
+ final IntArray newArray = newIntArray(size, arr.recycler, arr.clearOnResize);
+ for (long i = 0, end = Math.min(size, array.size()); i < end; ++i) {
+ newArray.set(i, array.get(i));
+ }
+ return newArray;
+ }
+ }
+
+ /** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
+ public static IntArray grow(IntArray array, long minSize) {
+ if (minSize <= array.size()) {
+ return array;
+ }
+ final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_INT);
+ return resize(array, newSize);
+ }
+
+ /** Allocate a new {@link LongArray} of the given capacity. */
+ public static LongArray newLongArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
+ if (size <= LONG_PAGE_SIZE) {
+ return new LongArrayWrapper(new long[(int) size], recycler, clearOnResize);
+ } else {
+ return new BigLongArray(size, recycler, clearOnResize);
+ }
+ }
+
+ /** Allocate a new {@link LongArray} of the given capacity. */
+ public static LongArray newLongArray(long size) {
+ return newLongArray(size, null, true);
+ }
+
+ /** Resize the array to the exact provided size. */
+ public static LongArray resize(LongArray array, long size) {
+ if (array instanceof BigLongArray) {
+ ((BigLongArray) array).resize(size);
+ return array;
+ } else {
+ AbstractArray arr = (AbstractArray) array;
+ final LongArray newArray = newLongArray(size, arr.recycler, arr.clearOnResize);
+ for (long i = 0, end = Math.min(size, array.size()); i < end; ++i) {
+ newArray.set(i, array.get(i));
+ }
+ return newArray;
+ }
+ }
+
+ /** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
+ public static LongArray grow(LongArray array, long minSize) {
+ if (minSize <= array.size()) {
+ return array;
+ }
+ final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG);
+ return resize(array, newSize);
+ }
+
+ /** Allocate a new {@link DoubleArray} of the given capacity. */
+ public static DoubleArray newDoubleArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
+ if (size <= LONG_PAGE_SIZE) {
+ return new DoubleArrayWrapper(new double[(int) size], recycler, clearOnResize);
+ } else {
+ return new BigDoubleArray(size, recycler, clearOnResize);
+ }
+ }
+
+ /** Allocate a new {@link DoubleArray} of the given capacity. */
+ public static DoubleArray newDoubleArray(long size) {
+ return newDoubleArray(size, null, true);
+ }
+
+ /** Resize the array to the exact provided size. */
+ public static DoubleArray resize(DoubleArray array, long size) {
+ if (array instanceof BigDoubleArray) {
+ ((BigDoubleArray) array).resize(size);
+ return array;
+ } else {
+ AbstractArray arr = (AbstractArray) array;
+ final DoubleArray newArray = newDoubleArray(size, arr.recycler, arr.clearOnResize);
+ for (long i = 0, end = Math.min(size, array.size()); i < end; ++i) {
+ newArray.set(i, array.get(i));
+ }
+ return newArray;
+ }
+ }
+
+ /** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
+ public static DoubleArray grow(DoubleArray array, long minSize) {
+ if (minSize <= array.size()) {
+ return array;
+ }
+ final long newSize = overSize(minSize, DOUBLE_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_DOUBLE);
+ return resize(array, newSize);
+ }
+
+ /** Allocate a new {@link ObjectArray} of the given capacity. */
+ public static <T> ObjectArray<T> newObjectArray(long size, PageCacheRecycler recycler) {
+ if (size <= OBJECT_PAGE_SIZE) {
+ return new ObjectArrayWrapper<T>(new Object[(int) size], recycler);
+ } else {
+ return new BigObjectArray<T>(size, recycler);
+ }
+ }
+
+ /** Allocate a new {@link ObjectArray} of the given capacity. */
+ public static <T> ObjectArray<T> newObjectArray(long size) {
+ return newObjectArray(size, null);
+ }
+
+ /** Resize the array to the exact provided size. */
+ public static <T> ObjectArray<T> resize(ObjectArray<T> array, long size) {
+ if (array instanceof BigObjectArray) {
+ ((BigObjectArray<?>) array).resize(size);
+ return array;
+ } else {
+ final ObjectArray<T> newArray = newObjectArray(size, ((AbstractArray) array).recycler);
+ for (long i = 0, end = Math.min(size, array.size()); i < end; ++i) {
+ newArray.set(i, array.get(i));
+ }
+ return newArray;
+ }
+ }
+
+ /** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
+ public static <T> ObjectArray<T> grow(ObjectArray<T> array, long minSize) {
+ if (minSize <= array.size()) {
+ return array;
+ }
+ final long newSize = overSize(minSize, OBJECT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
+ return resize(array, newSize);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/src/main/java/org/elasticsearch/common/util/BigByteArray.java
new file mode 100644
index 0000000..84ee330
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BigByteArray.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.util.BigArrays.BYTE_PAGE_SIZE;
+
+/**
+ * Byte array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of
+ * configurable length.
+ */
+final class BigByteArray extends AbstractBigArray implements ByteArray {
+
+ private byte[][] pages;
+
+ /** Constructor. */
+ public BigByteArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
+ super(BYTE_PAGE_SIZE, recycler, clearOnResize);
+ this.size = size;
+ pages = new byte[numPages(size)][];
+ for (int i = 0; i < pages.length; ++i) {
+ pages[i] = newBytePage(i);
+ }
+ }
+
+ @Override
+ public byte get(long index) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return pages[pageIndex][indexInPage];
+ }
+
+ @Override
+ public byte set(long index, byte value) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ final byte[] page = pages[pageIndex];
+ final byte ret = page[indexInPage];
+ page[indexInPage] = value;
+ return ret;
+ }
+
+ @Override
+ public void get(long index, int len, BytesRef ref) {
+ assert index + len <= size();
+ int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ if (indexInPage + len <= pageSize()) {
+ ref.bytes = pages[pageIndex];
+ ref.offset = indexInPage;
+ ref.length = len;
+ } else {
+ ref.bytes = new byte[len];
+ ref.offset = 0;
+ ref.length = pageSize() - indexInPage;
+ System.arraycopy(pages[pageIndex], indexInPage, ref.bytes, 0, ref.length);
+ do {
+ ++pageIndex;
+ final int copyLength = Math.min(pageSize(), len - ref.length);
+ System.arraycopy(pages[pageIndex], 0, ref.bytes, ref.length, copyLength);
+ ref.length += copyLength;
+ } while (ref.length < len);
+ }
+ }
+
+ @Override
+ public void set(long index, byte[] buf, int offset, int len) {
+ assert index + len <= size();
+ int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ if (indexInPage + len <= pageSize()) {
+ System.arraycopy(buf, offset, pages[pageIndex], indexInPage, len);
+ } else {
+ int copyLen = pageSize() - indexInPage;
+ System.arraycopy(buf, offset, pages[pageIndex], indexInPage, copyLen);
+ do {
+ ++pageIndex;
+ offset += copyLen;
+ len -= copyLen;
+ copyLen = Math.min(len, pageSize());
+ System.arraycopy(buf, offset, pages[pageIndex], 0, copyLen);
+ } while (len > copyLen);
+ }
+ }
+
+ @Override
+ protected int numBytesPerElement() {
+ return RamUsageEstimator.NUM_BYTES_BYTE;
+ }
+
+ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
+ public void resize(long newSize) {
+ final int numPages = numPages(newSize);
+ if (numPages > pages.length) {
+ pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
+ }
+ for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
+ pages[i] = newBytePage(i);
+ }
+ for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
+ pages[i] = null;
+ releasePage(i);
+ }
+ this.size = newSize;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java b/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java
new file mode 100644
index 0000000..e39b38f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.google.common.base.Preconditions;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.util.BigArrays.DOUBLE_PAGE_SIZE;
+
+/**
+ * Double array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of
+ * configurable length.
+ */
+final class BigDoubleArray extends AbstractBigArray implements DoubleArray {
+
+ private double[][] pages;
+
+ /** Constructor. */
+ public BigDoubleArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
+ super(DOUBLE_PAGE_SIZE, recycler, clearOnResize);
+ this.size = size;
+ pages = new double[numPages(size)][];
+ for (int i = 0; i < pages.length; ++i) {
+ pages[i] = newDoublePage(i);
+ }
+ }
+
+ @Override
+ public double get(long index) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return pages[pageIndex][indexInPage];
+ }
+
+ @Override
+ public double set(long index, double value) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ final double[] page = pages[pageIndex];
+ final double ret = page[indexInPage];
+ page[indexInPage] = value;
+ return ret;
+ }
+
+ @Override
+ public double increment(long index, double inc) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return pages[pageIndex][indexInPage] += inc;
+ }
+
+ @Override
+ protected int numBytesPerElement() {
+ return RamUsageEstimator.NUM_BYTES_INT;
+ }
+
+ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
+ public void resize(long newSize) {
+ final int numPages = numPages(newSize);
+ if (numPages > pages.length) {
+ pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
+ }
+ for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
+ pages[i] = newDoublePage(i);
+ }
+ for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
+ pages[i] = null;
+ releasePage(i);
+ }
+ this.size = newSize;
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, double value) {
+ Preconditions.checkArgument(fromIndex <= toIndex);
+ final int fromPage = pageIndex(fromIndex);
+ final int toPage = pageIndex(toIndex - 1);
+ if (fromPage == toPage) {
+ Arrays.fill(pages[fromPage], indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value);
+ } else {
+ Arrays.fill(pages[fromPage], indexInPage(fromIndex), pages[fromPage].length, value);
+ for (int i = fromPage + 1; i < toPage; ++i) {
+ Arrays.fill(pages[i], value);
+ }
+ Arrays.fill(pages[toPage], 0, indexInPage(toIndex - 1) + 1, value);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BigDoubleArrayList.java b/src/main/java/org/elasticsearch/common/util/BigDoubleArrayList.java
new file mode 100644
index 0000000..662d465
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BigDoubleArrayList.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+
+import java.util.Arrays;
+
+/**
+ * Float array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of
+ * configurable length.
+ */
+public final class BigDoubleArrayList extends AbstractBigArray {
+
+ /**
+ * Default page size, 16KB of memory per page.
+ */
+ private static final int DEFAULT_PAGE_SIZE = 1 << 11;
+
+ private double[][] pages;
+
+ public BigDoubleArrayList(int pageSize, long initialCapacity) {
+ super(pageSize, null, true);
+ pages = new double[numPages(initialCapacity)][];
+ }
+
+ public BigDoubleArrayList(long initialCapacity) {
+ this(DEFAULT_PAGE_SIZE, initialCapacity);
+ }
+
+ public BigDoubleArrayList() {
+ this(1024);
+ }
+
+ public double get(long index) {
+ assert index >= 0 && index < size;
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return pages[pageIndex][indexInPage];
+ }
+
+ public void add(double d) {
+ final int pageIndex = pageIndex(size);
+ if (pageIndex >= pages.length) {
+ final int newLength = ArrayUtil.oversize(pageIndex + 1, numBytesPerElement());
+ pages = Arrays.copyOf(pages, newLength);
+ }
+ if (pages[pageIndex] == null) {
+ pages[pageIndex] = new double[pageSize()];
+ }
+ final int indexInPage = indexInPage(size);
+ pages[pageIndex][indexInPage] = d;
+ ++size;
+ }
+
+ @Override
+ protected int numBytesPerElement() {
+ return RamUsageEstimator.NUM_BYTES_DOUBLE;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BigFloatArrayList.java b/src/main/java/org/elasticsearch/common/util/BigFloatArrayList.java
new file mode 100644
index 0000000..732de87
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BigFloatArrayList.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/**
+ * Float array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of
+ * configurable length.
+ */
+public final class BigFloatArrayList extends AbstractBigArray {
+
+ /**
+ * Default page size, 16KB of memory per page.
+ */
+ private static final int DEFAULT_PAGE_SIZE = 1 << 12;
+
+ private float[][] pages;
+
+ public BigFloatArrayList(int pageSize, long initialCapacity) {
+ super(pageSize, null, true);
+ pages = new float[numPages(initialCapacity)][];
+ }
+
+ public BigFloatArrayList(long initialCapacity) {
+ this(DEFAULT_PAGE_SIZE, initialCapacity);
+ }
+
+ public BigFloatArrayList() {
+ this(1024);
+ }
+
+ public float get(long index) {
+ assert index >= 0 && index < size;
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return pages[pageIndex][indexInPage];
+ }
+
+ public void add(float f) {
+ final int pageIndex = pageIndex(size);
+ pages = ArrayUtil.grow(pages, pageIndex + 1);
+ if (pages[pageIndex] == null) {
+ pages[pageIndex] = new float[pageSize()];
+ }
+ final int indexInPage = indexInPage(size);
+ pages[pageIndex][indexInPage] = f;
+ ++size;
+ }
+
+ @Override
+ protected int numBytesPerElement() {
+ return RamUsageEstimator.NUM_BYTES_FLOAT;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BigIntArray.java b/src/main/java/org/elasticsearch/common/util/BigIntArray.java
new file mode 100644
index 0000000..6ab9212
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BigIntArray.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.util.BigArrays.INT_PAGE_SIZE;
+
+/**
+ * Int array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of
+ * configurable length.
+ */
+final class BigIntArray extends AbstractBigArray implements IntArray {
+
+ private int[][] pages;
+
+ /** Constructor. */
+ public BigIntArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
+ super(INT_PAGE_SIZE, recycler, clearOnResize);
+ this.size = size;
+ pages = new int[numPages(size)][];
+ for (int i = 0; i < pages.length; ++i) {
+ pages[i] = newIntPage(i);
+ }
+ }
+
+ @Override
+ public int get(long index) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return pages[pageIndex][indexInPage];
+ }
+
+ @Override
+ public int set(long index, int value) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ final int[] page = pages[pageIndex];
+ final int ret = page[indexInPage];
+ page[indexInPage] = value;
+ return ret;
+ }
+
+ @Override
+ public int increment(long index, int inc) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return pages[pageIndex][indexInPage] += inc;
+ }
+
+ @Override
+ protected int numBytesPerElement() {
+ return RamUsageEstimator.NUM_BYTES_INT;
+ }
+
+ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
+ public void resize(long newSize) {
+ final int numPages = numPages(newSize);
+ if (numPages > pages.length) {
+ pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
+ }
+ for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
+ pages[i] = newIntPage(i);
+ }
+ for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
+ pages[i] = null;
+ releasePage(i);
+ }
+ this.size = newSize;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BigLongArray.java b/src/main/java/org/elasticsearch/common/util/BigLongArray.java
new file mode 100644
index 0000000..d84b043
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BigLongArray.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.google.common.base.Preconditions;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.util.BigArrays.LONG_PAGE_SIZE;
+
+/**
+ * Long array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of
+ * configurable length.
+ */
+final class BigLongArray extends AbstractBigArray implements LongArray {
+
+ private long[][] pages;
+
+ /** Constructor. */
+ public BigLongArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
+ super(LONG_PAGE_SIZE, recycler, clearOnResize);
+ this.size = size;
+ pages = new long[numPages(size)][];
+ for (int i = 0; i < pages.length; ++i) {
+ pages[i] = newLongPage(i);
+ }
+ }
+
+ @Override
+ public long get(long index) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return pages[pageIndex][indexInPage];
+ }
+
+ @Override
+ public long set(long index, long value) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ final long[] page = pages[pageIndex];
+ final long ret = page[indexInPage];
+ page[indexInPage] = value;
+ return ret;
+ }
+
+ @Override
+ public long increment(long index, long inc) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return pages[pageIndex][indexInPage] += inc;
+ }
+
+ @Override
+ protected int numBytesPerElement() {
+ return RamUsageEstimator.NUM_BYTES_LONG;
+ }
+
+ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
+ public void resize(long newSize) {
+ final int numPages = numPages(newSize);
+ if (numPages > pages.length) {
+ pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
+ }
+ for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
+ pages[i] = newLongPage(i);
+ }
+ for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
+ pages[i] = null;
+ releasePage(i);
+ }
+ this.size = newSize;
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, long value) {
+ Preconditions.checkArgument(fromIndex <= toIndex);
+ final int fromPage = pageIndex(fromIndex);
+ final int toPage = pageIndex(toIndex - 1);
+ if (fromPage == toPage) {
+ Arrays.fill(pages[fromPage], indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value);
+ } else {
+ Arrays.fill(pages[fromPage], indexInPage(fromIndex), pages[fromPage].length, value);
+ for (int i = fromPage + 1; i < toPage; ++i) {
+ Arrays.fill(pages[i], value);
+ }
+ Arrays.fill(pages[toPage], 0, indexInPage(toIndex - 1) + 1, value);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BigObjectArray.java b/src/main/java/org/elasticsearch/common/util/BigObjectArray.java
new file mode 100644
index 0000000..960687c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BigObjectArray.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.util.BigArrays.OBJECT_PAGE_SIZE;
+
+/**
+ * Int array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of
+ * configurable length.
+ */
+final class BigObjectArray<T> extends AbstractBigArray implements ObjectArray<T> {
+
+ private Object[][] pages;
+
+ /** Constructor. */
+ public BigObjectArray(long size, PageCacheRecycler recycler) {
+ super(OBJECT_PAGE_SIZE, recycler, true);
+ this.size = size;
+ pages = new Object[numPages(size)][];
+ for (int i = 0; i < pages.length; ++i) {
+ pages[i] = newObjectPage(i);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public T get(long index) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ return (T) pages[pageIndex][indexInPage];
+ }
+
+ @Override
+ public T set(long index, T value) {
+ final int pageIndex = pageIndex(index);
+ final int indexInPage = indexInPage(index);
+ final Object[] page = pages[pageIndex];
+ @SuppressWarnings("unchecked")
+ final T ret = (T) page[indexInPage];
+ page[indexInPage] = value;
+ return ret;
+ }
+
+ @Override
+ protected int numBytesPerElement() {
+ return RamUsageEstimator.NUM_BYTES_INT;
+ }
+
+ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
+ public void resize(long newSize) {
+ final int numPages = numPages(newSize);
+ if (numPages > pages.length) {
+ pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
+ }
+ for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
+ pages[i] = newObjectPage(i);
+ }
+ for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
+ pages[i] = null;
+ releasePage(i);
+ }
+ this.size = newSize;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/BloomFilter.java b/src/main/java/org/elasticsearch/common/util/BloomFilter.java
new file mode 100644
index 0000000..9dd3ebb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/BloomFilter.java
@@ -0,0 +1,557 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import com.google.common.math.LongMath;
+import com.google.common.primitives.Ints;
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.SizeValue;
+
+import java.io.IOException;
+import java.math.RoundingMode;
+import java.util.Arrays;
+import java.util.Comparator;
+
+/**
+ * A bloom filter. Inspired by Guava bloom filter implementation though with some optimizations.
+ */
+public class BloomFilter {
+
+ /**
+ * A factory that can use different fpp based on size.
+ */
+ public static class Factory {
+
+ public static final Factory DEFAULT = buildDefault();
+
+ private static Factory buildDefault() {
+ // Some numbers:
+ // 10k =0.001: 140.4kb , 10 Hashes
+ // 10k =0.01 : 93.6kb , 6 Hashes
+ // 100k=0.01 : 936.0kb , 6 Hashes
+ // 100k=0.03 : 712.7kb , 5 Hashes
+ // 500k=0.01 : 4.5mb , 6 Hashes
+ // 500k=0.03 : 3.4mb , 5 Hashes
+ // 500k=0.05 : 2.9mb , 4 Hashes
+ // 1m=0.01 : 9.1mb , 6 Hashes
+ // 1m=0.03 : 6.9mb , 5 Hashes
+ // 1m=0.05 : 5.9mb , 4 Hashes
+ // 5m=0.01 : 45.7mb , 6 Hashes
+ // 5m=0.03 : 34.8mb , 5 Hashes
+ // 5m=0.05 : 29.7mb , 4 Hashes
+ // 50m=0.01 : 457.0mb , 6 Hashes
+ // 50m=0.03 : 297.3mb , 4 Hashes
+ // 50m=0.10 : 228.5mb , 3 Hashes
+ return buildFromString("10k=0.01,1m=0.03");
+ }
+
+ /**
+ * Supports just passing fpp, as in "0.01", and also ranges, like "50k=0.01,1m=0.05". If
+ * its null, returns {@link #buildDefault()}.
+ */
+ public static Factory buildFromString(@Nullable String config) {
+ if (config == null) {
+ return buildDefault();
+ }
+ String[] sEntries = Strings.splitStringToArray(config, ',');
+ if (sEntries.length == 0) {
+ if (config.length() > 0) {
+ return new Factory(new Entry[]{new Entry(0, Double.parseDouble(config))});
+ }
+ return buildDefault();
+ }
+ Entry[] entries = new Entry[sEntries.length];
+ for (int i = 0; i < sEntries.length; i++) {
+ int index = sEntries[i].indexOf('=');
+ entries[i] = new Entry(
+ (int) SizeValue.parseSizeValue(sEntries[i].substring(0, index).trim()).singles(),
+ Double.parseDouble(sEntries[i].substring(index + 1).trim())
+ );
+ }
+ return new Factory(entries);
+ }
+
+ private final Entry[] entries;
+
+ public Factory(Entry[] entries) {
+ this.entries = entries;
+ // the order is from the upper most expected insertions to the lowest
+ Arrays.sort(this.entries, new Comparator<Entry>() {
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ return o2.expectedInsertions - o1.expectedInsertions;
+ }
+ });
+ }
+
+ public BloomFilter createFilter(int expectedInsertions) {
+ for (Entry entry : entries) {
+ if (expectedInsertions > entry.expectedInsertions) {
+ return BloomFilter.create(expectedInsertions, entry.fpp);
+ }
+ }
+ return BloomFilter.create(expectedInsertions, 0.03);
+ }
+
+ public static class Entry {
+ public final int expectedInsertions;
+ public final double fpp;
+
+ Entry(int expectedInsertions, double fpp) {
+ this.expectedInsertions = expectedInsertions;
+ this.fpp = fpp;
+ }
+ }
+ }
+
+ /**
+ * Creates a bloom filter based on the with the expected number
+ * of insertions and expected false positive probability.
+ *
+ * @param expectedInsertions the number of expected insertions to the constructed
+ * @param fpp the desired false positive probability (must be positive and less than 1.0)
+ */
+ public static BloomFilter create(int expectedInsertions, double fpp) {
+ return create(expectedInsertions, fpp, -1);
+ }
+
+ /**
+ * Creates a bloom filter based on the expected number of insertions, expected false positive probability,
+ * and number of hash functions.
+ *
+ * @param expectedInsertions the number of expected insertions to the constructed
+ * @param fpp the desired false positive probability (must be positive and less than 1.0)
+ * @param numHashFunctions the number of hash functions to use (must be less than or equal to 255)
+ */
+ public static BloomFilter create(int expectedInsertions, double fpp, int numHashFunctions) {
+ if (expectedInsertions == 0) {
+ expectedInsertions = 1;
+ }
+ /*
+ * TODO(user): Put a warning in the javadoc about tiny fpp values,
+ * since the resulting size is proportional to -log(p), but there is not
+ * much of a point after all, e.g. optimalM(1000, 0.0000000000000001) = 76680
+ * which is less that 10kb. Who cares!
+ */
+ long numBits = optimalNumOfBits(expectedInsertions, fpp);
+
+ // calculate the optimal number of hash functions
+ if (numHashFunctions == -1) {
+ numHashFunctions = optimalNumOfHashFunctions(expectedInsertions, numBits);
+ }
+
+ try {
+ return new BloomFilter(new BitArray(numBits), numHashFunctions);
+ } catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException("Could not create BloomFilter of " + numBits + " bits", e);
+ }
+ }
+
+ public static BloomFilter deserialize(DataInput in) throws IOException {
+ int version = in.readInt(); // we do nothing with this now..., defaults to 0
+
+ int numLongs = in.readInt();
+ long[] data = new long[numLongs];
+ for (int i = 0; i < numLongs; i++) {
+ data[i] = in.readLong();
+ }
+
+ int numberOfHashFunctions = in.readInt();
+
+ int hashType = in.readInt(); // again, nothing to do now...
+
+ return new BloomFilter(new BitArray(data), numberOfHashFunctions);
+ }
+
+ public static void serilaize(BloomFilter filter, DataOutput out) throws IOException {
+ out.writeInt(0); // version
+
+ BitArray bits = filter.bits;
+ out.writeInt(bits.data.length);
+ for (long l : bits.data) {
+ out.writeLong(l);
+ }
+
+ out.writeInt(filter.numHashFunctions);
+
+ out.writeInt(0); // hashType
+ }
+
+ // TODO: don't duplicate serilaize/writeTo and deserialize/readFrom code
+ public static BloomFilter readFrom(StreamInput in) throws IOException {
+ int version = in.readVInt(); // we do nothing with this now..., defaults to 0
+
+ int numLongs = in.readVInt();
+ long[] data = new long[numLongs];
+ for (int i = 0; i < numLongs; i++) {
+ data[i] = in.readLong();
+ }
+
+ int numberOfHashFunctions = in.readVInt();
+
+ int hashType = in.readVInt(); // again, nothing to do now...
+
+ return new BloomFilter(new BitArray(data), numberOfHashFunctions);
+ }
+
+ public static void writeTo(BloomFilter filter, StreamOutput out) throws IOException {
+ out.writeVInt(0); // version
+
+ BitArray bits = filter.bits;
+ out.writeVInt(bits.data.length);
+ for (long l : bits.data) {
+ out.writeLong(l);
+ }
+
+ out.writeVInt(filter.numHashFunctions);
+
+ out.writeVInt(0); // hashType
+ }
+
+ /**
+ * The bit set of the BloomFilter (not necessarily power of 2!)
+ */
+ final BitArray bits;
+ /**
+ * Number of hashes per element
+ */
+ final int numHashFunctions;
+
+ BloomFilter(BitArray bits, int numHashFunctions) {
+ this.bits = bits;
+ this.numHashFunctions = numHashFunctions;
+ /*
+ * This only exists to forbid BFs that cannot use the compact persistent representation.
+ * If it ever throws, at a user who was not intending to use that representation, we should
+ * reconsider
+ */
+ if (numHashFunctions > 255) {
+ throw new IllegalArgumentException("Currently we don't allow BloomFilters that would use more than 255 hash functions");
+ }
+ }
+
+ public boolean put(BytesRef value) {
+ long hash64 = hash3_x64_128(value.bytes, value.offset, value.length, 0);
+ int hash1 = (int) hash64;
+ int hash2 = (int) (hash64 >>> 32);
+ boolean bitsChanged = false;
+ for (int i = 1; i <= numHashFunctions; i++) {
+ int nextHash = hash1 + i * hash2;
+ if (nextHash < 0) {
+ nextHash = ~nextHash;
+ }
+ bitsChanged |= bits.set(nextHash % bits.size());
+ }
+ return bitsChanged;
+ }
+
+ public boolean mightContain(BytesRef value) {
+ long hash64 = hash3_x64_128(value.bytes, value.offset, value.length, 0);
+ int hash1 = (int) hash64;
+ int hash2 = (int) (hash64 >>> 32);
+ for (int i = 1; i <= numHashFunctions; i++) {
+ int nextHash = hash1 + i * hash2;
+ if (nextHash < 0) {
+ nextHash = ~nextHash;
+ }
+ if (!bits.get(nextHash % bits.size())) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public int getNumHashFunctions() {
+ return this.numHashFunctions;
+ }
+
+ public long getSizeInBytes() {
+ return bits.size() + 8;
+ }
+
+ @Override
+ public int hashCode() {
+ return bits.hashCode() + numHashFunctions;
+ }
+
+ /**
+ * Returns the probability that {@linkplain #mightContain(BytesRef)} will erroneously return
+ * {@code true} for an object that has not actually been put in the {@code BloomFilter}.
+ * <p/>
+ * <p>Ideally, this number should be close to the {@code fpp} parameter
+ * passed in create, or smaller. If it is
+ * significantly higher, it is usually the case that too many elements (more than
+ * expected) have been put in the {@code BloomFilter}, degenerating it.
+ */
+ public double getExpectedFpp() {
+ // You down with FPP? (Yeah you know me!) Who's down with FPP? (Every last homie!)
+ return Math.pow((double) bits.bitCount() / bits.size(), numHashFunctions);
+ }
+
+ /*
+ * Cheat sheet:
+ *
+ * m: total bits
+ * n: expected insertions
+ * b: m/n, bits per insertion
+
+ * p: expected false positive probability
+ *
+ * 1) Optimal k = b * ln2
+ * 2) p = (1 - e ^ (-kn/m))^k
+ * 3) For optimal k: p = 2 ^ (-k) ~= 0.6185^b
+ * 4) For optimal k: m = -nlnp / ((ln2) ^ 2)
+ */
+
+ /**
+ * Computes the optimal k (number of hashes per element inserted in Bloom filter), given the
+ * expected insertions and total number of bits in the Bloom filter.
+ * <p/>
+ * See http://en.wikipedia.org/wiki/File:Bloom_filter_fp_probability.svg for the formula.
+ *
+ * @param n expected insertions (must be positive)
+ * @param m total number of bits in Bloom filter (must be positive)
+ */
+ static int optimalNumOfHashFunctions(long n, long m) {
+ return Math.max(1, (int) Math.round(m / n * Math.log(2)));
+ }
+
+ /**
+ * Computes m (total bits of Bloom filter) which is expected to achieve, for the specified
+ * expected insertions, the required false positive probability.
+ * <p/>
+ * See http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives for the formula.
+ *
+ * @param n expected insertions (must be positive)
+ * @param p false positive rate (must be 0 < p < 1)
+ */
+ static long optimalNumOfBits(long n, double p) {
+ if (p == 0) {
+ p = Double.MIN_VALUE;
+ }
+ return (long) (-n * Math.log(p) / (Math.log(2) * Math.log(2)));
+ }
+
+ // START : MURMUR 3_128
+
+ protected static long getblock(byte[] key, int offset, int index) {
+ int i_8 = index << 3;
+ int blockOffset = offset + i_8;
+ return ((long) key[blockOffset + 0] & 0xff) + (((long) key[blockOffset + 1] & 0xff) << 8) +
+ (((long) key[blockOffset + 2] & 0xff) << 16) + (((long) key[blockOffset + 3] & 0xff) << 24) +
+ (((long) key[blockOffset + 4] & 0xff) << 32) + (((long) key[blockOffset + 5] & 0xff) << 40) +
+ (((long) key[blockOffset + 6] & 0xff) << 48) + (((long) key[blockOffset + 7] & 0xff) << 56);
+ }
+
+ protected static long rotl64(long v, int n) {
+ return ((v << n) | (v >>> (64 - n)));
+ }
+
+ protected static long fmix(long k) {
+ k ^= k >>> 33;
+ k *= 0xff51afd7ed558ccdL;
+ k ^= k >>> 33;
+ k *= 0xc4ceb9fe1a85ec53L;
+ k ^= k >>> 33;
+
+ return k;
+ }
+
+ public static long hash3_x64_128(byte[] key, int offset, int length, long seed) {
+ final int nblocks = length >> 4; // Process as 128-bit blocks.
+
+ long h1 = seed;
+ long h2 = seed;
+
+ long c1 = 0x87c37b91114253d5L;
+ long c2 = 0x4cf5ad432745937fL;
+
+ //----------
+ // body
+
+ for (int i = 0; i < nblocks; i++) {
+ long k1 = getblock(key, offset, i * 2 + 0);
+ long k2 = getblock(key, offset, i * 2 + 1);
+
+ k1 *= c1;
+ k1 = rotl64(k1, 31);
+ k1 *= c2;
+ h1 ^= k1;
+
+ h1 = rotl64(h1, 27);
+ h1 += h2;
+ h1 = h1 * 5 + 0x52dce729;
+
+ k2 *= c2;
+ k2 = rotl64(k2, 33);
+ k2 *= c1;
+ h2 ^= k2;
+
+ h2 = rotl64(h2, 31);
+ h2 += h1;
+ h2 = h2 * 5 + 0x38495ab5;
+ }
+
+ //----------
+ // tail
+
+ // Advance offset to the unprocessed tail of the data.
+ offset += nblocks * 16;
+
+ long k1 = 0;
+ long k2 = 0;
+
+ switch (length & 15) {
+ case 15:
+ k2 ^= ((long) key[offset + 14]) << 48;
+ case 14:
+ k2 ^= ((long) key[offset + 13]) << 40;
+ case 13:
+ k2 ^= ((long) key[offset + 12]) << 32;
+ case 12:
+ k2 ^= ((long) key[offset + 11]) << 24;
+ case 11:
+ k2 ^= ((long) key[offset + 10]) << 16;
+ case 10:
+ k2 ^= ((long) key[offset + 9]) << 8;
+ case 9:
+ k2 ^= ((long) key[offset + 8]) << 0;
+ k2 *= c2;
+ k2 = rotl64(k2, 33);
+ k2 *= c1;
+ h2 ^= k2;
+
+ case 8:
+ k1 ^= ((long) key[offset + 7]) << 56;
+ case 7:
+ k1 ^= ((long) key[offset + 6]) << 48;
+ case 6:
+ k1 ^= ((long) key[offset + 5]) << 40;
+ case 5:
+ k1 ^= ((long) key[offset + 4]) << 32;
+ case 4:
+ k1 ^= ((long) key[offset + 3]) << 24;
+ case 3:
+ k1 ^= ((long) key[offset + 2]) << 16;
+ case 2:
+ k1 ^= ((long) key[offset + 1]) << 8;
+ case 1:
+ k1 ^= ((long) key[offset]);
+ k1 *= c1;
+ k1 = rotl64(k1, 31);
+ k1 *= c2;
+ h1 ^= k1;
+ }
+
+ //----------
+ // finalization
+
+ h1 ^= length;
+ h2 ^= length;
+
+ h1 += h2;
+ h2 += h1;
+
+ h1 = fmix(h1);
+ h2 = fmix(h2);
+
+ h1 += h2;
+ h2 += h1;
+
+ //return (new long[]{h1, h2});
+ // SAME AS GUAVA, they take the first long out of the 128bit
+ return h1;
+ }
+
+ // END: MURMUR 3_128
+
+ // Note: We use this instead of java.util.BitSet because we need access to the long[] data field
+ static class BitArray {
+ final long[] data;
+ int bitCount;
+
+ BitArray(long bits) {
+ this(new long[Ints.checkedCast(LongMath.divide(bits, 64, RoundingMode.CEILING))]);
+ }
+
+ // Used by serialization
+ BitArray(long[] data) {
+ this.data = data;
+ int bitCount = 0;
+ for (long value : data) {
+ bitCount += Long.bitCount(value);
+ }
+ this.bitCount = bitCount;
+ }
+
+ /**
+ * Returns true if the bit changed value.
+ */
+ boolean set(int index) {
+ if (!get(index)) {
+ data[index >> 6] |= (1L << index);
+ bitCount++;
+ return true;
+ }
+ return false;
+ }
+
+ boolean get(int index) {
+ return (data[index >> 6] & (1L << index)) != 0;
+ }
+
+ /**
+ * Number of bits
+ */
+ int size() {
+ return data.length * Long.SIZE;
+ }
+
+ /**
+ * Number of set bits (1s)
+ */
+ int bitCount() {
+ return bitCount;
+ }
+
+ BitArray copy() {
+ return new BitArray(data.clone());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof BitArray) {
+ BitArray bitArray = (BitArray) o;
+ return Arrays.equals(data, bitArray.data);
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(data);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/ByteArray.java b/src/main/java/org/elasticsearch/common/util/ByteArray.java
new file mode 100644
index 0000000..40c8c83
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/ByteArray.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * Abstraction of an array of byte values.
+ */
+public interface ByteArray extends BigArray {
+
+ /**
+ * Get an element given its index.
+ */
+ public abstract byte get(long index);
+
+ /**
+ * Set a value at the given index and return the previous value.
+ */
+ public abstract byte set(long index, byte value);
+
+ /**
+ * Get a reference to a slice.
+ */
+ public abstract void get(long index, int len, BytesRef ref);
+
+ /**
+ * Bulk set.
+ */
+ public abstract void set(long index, byte[] buf, int offset, int len);
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/ByteUtils.java b/src/main/java/org/elasticsearch/common/util/ByteUtils.java
new file mode 100644
index 0000000..100d4d1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/ByteUtils.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+
+
+/** Utility methods to do byte-level encoding. These methods are biased towards little-endian byte order because it is the most
+ * common byte order and reading several bytes at once may be optimizable in the future with the help of sun.mist.Unsafe. */
+public enum ByteUtils {
+ ;
+
+ public static final int MAX_BYTES_VLONG = 9;
+
+ /** Zig-zag decode. */
+ public static long zigZagDecode(long n) {
+ return ((n >>> 1) ^ -(n & 1));
+ }
+
+ /** Zig-zag encode: this helps transforming small signed numbers into small positive numbers. */
+ public static long zigZagEncode(long n) {
+ return (n >> 63) ^ (n << 1);
+ }
+
+ /** Write a long in little-endian format. */
+ public static void writeLongLE(long l, byte[] arr, int offset) {
+ for (int i = 0; i < 8; ++i) {
+ arr[offset++] = (byte) l;
+ l >>>= 8;
+ }
+ assert l == 0;
+ }
+
+ /** Write a long in little-endian format. */
+ public static long readLongLE(byte[] arr, int offset) {
+ long l = arr[offset++] & 0xFFL;
+ for (int i = 1; i < 8; ++i) {
+ l |= (arr[offset++] & 0xFFL) << (8 * i);
+ }
+ return l;
+ }
+
+ /** Write an int in little-endian format. */
+ public static void writeIntLE(int l, byte[] arr, int offset) {
+ for (int i = 0; i < 4; ++i) {
+ arr[offset++] = (byte) l;
+ l >>>= 8;
+ }
+ assert l == 0;
+ }
+
+ /** Read an int in little-endian format. */
+ public static int readIntLE(byte[] arr, int offset) {
+ int l = arr[offset++] & 0xFF;
+ for (int i = 1; i < 4; ++i) {
+ l |= (arr[offset++] & 0xFF) << (8 * i);
+ }
+ return l;
+ }
+
+ /** Write a double in little-endian format. */
+ public static void writeDoubleLE(double d, byte[] arr, int offset) {
+ writeLongLE(Double.doubleToRawLongBits(d), arr, offset);
+ }
+
+ /** Read a double in little-endian format. */
+ public static double readDoubleLE(byte[] arr, int offset) {
+ return Double.longBitsToDouble(readLongLE(arr, offset));
+ }
+
+ /** Write a float in little-endian format. */
+ public static void writeFloatLE(float d, byte[] arr, int offset) {
+ writeIntLE(Float.floatToRawIntBits(d), arr, offset);
+ }
+
+ /** Read a float in little-endian format. */
+ public static float readFloatLE(byte[] arr, int offset) {
+ return Float.intBitsToFloat(readIntLE(arr, offset));
+ }
+
+ /** Same as DataOutput#writeVLong but accepts negative values (written on 9 bytes). */
+ public static void writeVLong(ByteArrayDataOutput out, long i) {
+ for (int k = 0; k < 8 && (i & ~0x7FL) != 0L; ++k) {
+ out.writeByte((byte)((i & 0x7FL) | 0x80L));
+ i >>>= 7;
+ }
+ out.writeByte((byte)i);
+ }
+
+ /** Same as DataOutput#readVLong but can read negative values (read on 9 bytes). */
+ public static long readVLong(ByteArrayDataInput in) {
+ // unwinded because of hotspot bugs, see Lucene's impl
+ byte b = in.readByte();
+ if (b >= 0) return b;
+ long i = b & 0x7FL;
+ b = in.readByte();
+ i |= (b & 0x7FL) << 7;
+ if (b >= 0) return i;
+ b = in.readByte();
+ i |= (b & 0x7FL) << 14;
+ if (b >= 0) return i;
+ b = in.readByte();
+ i |= (b & 0x7FL) << 21;
+ if (b >= 0) return i;
+ b = in.readByte();
+ i |= (b & 0x7FL) << 28;
+ if (b >= 0) return i;
+ b = in.readByte();
+ i |= (b & 0x7FL) << 35;
+ if (b >= 0) return i;
+ b = in.readByte();
+ i |= (b & 0x7FL) << 42;
+ if (b >= 0) return i;
+ b = in.readByte();
+ i |= (b & 0x7FL) << 49;
+ if (b >= 0) return i;
+ b = in.readByte();
+ i |= (b & 0xFFL) << 56;
+ return i;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/src/main/java/org/elasticsearch/common/util/CollectionUtils.java
new file mode 100644
index 0000000..778bb75
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/CollectionUtils.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.carrotsearch.hppc.DoubleArrayList;
+import com.carrotsearch.hppc.FloatArrayList;
+import com.carrotsearch.hppc.LongArrayList;
+import com.google.common.primitives.Longs;
+import org.apache.lucene.util.IntroSorter;
+import org.elasticsearch.common.Preconditions;
+
+import java.util.AbstractList;
+import java.util.List;
+import java.util.RandomAccess;
+
+/** Collections-related utility methods. */
+public enum CollectionUtils {
+ ;
+
+ public static void sort(LongArrayList list) {
+ sort(list.buffer, list.size());
+ }
+
+ public static void sort(final long[] array, int len) {
+ new IntroSorter() {
+
+ long pivot;
+
+ @Override
+ protected void swap(int i, int j) {
+ final long tmp = array[i];
+ array[i] = array[j];
+ array[j] = tmp;
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ return Longs.compare(array[i], array[j]);
+ }
+
+ @Override
+ protected void setPivot(int i) {
+ pivot = array[i];
+ }
+
+ @Override
+ protected int comparePivot(int j) {
+ return Longs.compare(pivot, array[j]);
+ }
+
+ }.sort(0, len);
+ }
+
+ public static void sortAndDedup(LongArrayList list) {
+ list.elementsCount = sortAndDedup(list.buffer, list.elementsCount);
+ }
+
+ /** Sort and deduplicate values in-place, then return the unique element count. */
+ public static int sortAndDedup(long[] array, int len) {
+ if (len <= 1) {
+ return len;
+ }
+ sort(array, len);
+ int uniqueCount = 1;
+ for (int i = 1; i < len; ++i) {
+ if (array[i] != array[i - 1]) {
+ array[uniqueCount++] = array[i];
+ }
+ }
+ return uniqueCount;
+ }
+
+ public static void sort(FloatArrayList list) {
+ sort(list.buffer, list.size());
+ }
+
+ public static void sort(final float[] array, int len) {
+ new IntroSorter() {
+
+ float pivot;
+
+ @Override
+ protected void swap(int i, int j) {
+ final float tmp = array[i];
+ array[i] = array[j];
+ array[j] = tmp;
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ return Float.compare(array[i], array[j]);
+ }
+
+ @Override
+ protected void setPivot(int i) {
+ pivot = array[i];
+ }
+
+ @Override
+ protected int comparePivot(int j) {
+ return Float.compare(pivot, array[j]);
+ }
+
+ }.sort(0, len);
+ }
+
+ public static void sortAndDedup(FloatArrayList list) {
+ list.elementsCount = sortAndDedup(list.buffer, list.elementsCount);
+ }
+
+ /** Sort and deduplicate values in-place, then return the unique element count. */
+ public static int sortAndDedup(float[] array, int len) {
+ if (len <= 1) {
+ return len;
+ }
+ sort(array, len);
+ int uniqueCount = 1;
+ for (int i = 1; i < len; ++i) {
+ if (Float.compare(array[i], array[i - 1]) != 0) {
+ array[uniqueCount++] = array[i];
+ }
+ }
+ return uniqueCount;
+ }
+
+ public static void sort(DoubleArrayList list) {
+ sort(list.buffer, list.size());
+ }
+
+ public static void sort(final double[] array, int len) {
+ new IntroSorter() {
+
+ double pivot;
+
+ @Override
+ protected void swap(int i, int j) {
+ final double tmp = array[i];
+ array[i] = array[j];
+ array[j] = tmp;
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ return Double.compare(array[i], array[j]);
+ }
+
+ @Override
+ protected void setPivot(int i) {
+ pivot = array[i];
+ }
+
+ @Override
+ protected int comparePivot(int j) {
+ return Double.compare(pivot, array[j]);
+ }
+
+ }.sort(0, len);
+ }
+
+ public static void sortAndDedup(DoubleArrayList list) {
+ list.elementsCount = sortAndDedup(list.buffer, list.elementsCount);
+ }
+
+ /** Sort and deduplicate values in-place, then return the unique element count. */
+ public static int sortAndDedup(double[] array, int len) {
+ if (len <= 1) {
+ return len;
+ }
+ sort(array, len);
+ int uniqueCount = 1;
+ for (int i = 1; i < len; ++i) {
+ if (Double.compare(array[i], array[i - 1]) != 0) {
+ array[uniqueCount++] = array[i];
+ }
+ }
+ return uniqueCount;
+ }
+
+ /**
+ * Checks if the given array contains any elements.
+ *
+ * @param array The array to check
+ *
+ * @return false if the array contains an element, true if not or the array is null.
+ */
+ public static boolean isEmpty(Object[] array) {
+ return array == null || array.length == 0;
+ }
+
+ /**
+ * Return a rotated view of the given list with the given distance.
+ */
+ public static <T> List<T> rotate(final List<T> list, int distance) {
+ if (list.isEmpty()) {
+ return list;
+ }
+
+ int d = distance % list.size();
+ if (d < 0) {
+ d += list.size();
+ }
+
+ if (d == 0) {
+ return list;
+ }
+
+ return new RotatedList<T>(list, d);
+ }
+
+ private static class RotatedList<T> extends AbstractList<T> implements RandomAccess {
+
+ private final List<T> in;
+ private final int distance;
+
+ public RotatedList(List<T> list, int distance) {
+ Preconditions.checkArgument(distance >= 0 && distance < list.size());
+ Preconditions.checkArgument(list instanceof RandomAccess);
+ this.in = list;
+ this.distance = distance;
+ }
+
+ @Override
+ public T get(int index) {
+ int idx = distance + index;
+ if (idx < 0 || idx >= in.size()) {
+ idx -= in.size();
+ }
+ return in.get(idx);
+ }
+
+ @Override
+ public int size() {
+ return in.size();
+ }
+
+ };
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/Comparators.java b/src/main/java/org/elasticsearch/common/util/Comparators.java
new file mode 100644
index 0000000..d9943d1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/Comparators.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import java.util.Comparator;
+
+/**
+ * {@link Comparator}-related utility methods.
+ */
+public enum Comparators {
+ ;
+
+ /**
+ * Compare <code>d1</code> against <code>d2</code>, pushing {@value Double#NaN} at the bottom.
+ */
+ public static int compareDiscardNaN(double d1, double d2, boolean asc) {
+ if (Double.isNaN(d1)) {
+ return Double.isNaN(d2) ? 0 : 1;
+ } else if (Double.isNaN(d2)) {
+ return -1;
+ } else {
+ return asc ? Double.compare(d1, d2) : Double.compare(d2, d1);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/DoubleArray.java b/src/main/java/org/elasticsearch/common/util/DoubleArray.java
new file mode 100644
index 0000000..f38b6a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/DoubleArray.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+/**
+ * Abstraction of an array of double values.
+ */
+public interface DoubleArray extends BigArray {
+
+ /**
+ * Get an element given its index.
+ */
+ public abstract double get(long index);
+
+ /**
+ * Set a value at the given index and return the previous value.
+ */
+ public abstract double set(long index, double value);
+
+ /**
+ * Increment value at the given index by <code>inc</code> and return the value.
+ */
+ public abstract double increment(long index, double inc);
+
+ /**
+ * Fill slots between <code>fromIndex</code> inclusive to <code>toIndex</code> exclusive with <code>value</code>.
+ */
+ public abstract void fill(long fromIndex, long toIndex, double value);
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/IntArray.java b/src/main/java/org/elasticsearch/common/util/IntArray.java
new file mode 100644
index 0000000..1ee973b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/IntArray.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+/**
+ * Abstraction of an array of integer values.
+ */
+public interface IntArray extends BigArray {
+
+ /**
+ * Get an element given its index.
+ */
+ public abstract int get(long index);
+
+ /**
+ * Set a value at the given index and return the previous value.
+ */
+ public abstract int set(long index, int value);
+
+ /**
+ * Increment value at the given index by <code>inc</code> and return the value.
+ */
+ public abstract int increment(long index, int inc);
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/LongArray.java b/src/main/java/org/elasticsearch/common/util/LongArray.java
new file mode 100644
index 0000000..670f729
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/LongArray.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+/**
+ * Abstraction of an array of long values.
+ */
+public interface LongArray extends BigArray {
+
+ /**
+ * Get an element given its index.
+ */
+ public abstract long get(long index);
+
+ /**
+ * Set a value at the given index and return the previous value.
+ */
+ public abstract long set(long index, long value);
+
+ /**
+ * Increment value at the given index by <code>inc</code> and return the value.
+ */
+ public abstract long increment(long index, long inc);
+
+ /**
+ * Fill slots between <code>fromIndex</code> inclusive to <code>toIndex</code> exclusive with <code>value</code>.
+ */
+ public abstract void fill(long fromIndex, long toIndex, long value);
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/MinimalMap.java b/src/main/java/org/elasticsearch/common/util/MinimalMap.java
new file mode 100644
index 0000000..b9eedc8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/MinimalMap.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+public abstract class MinimalMap<K, V> implements Map<K, V> {
+
+ public boolean isEmpty() {
+ throw new UnsupportedOperationException("entrySet() not supported!");
+ }
+
+ public V put(K key, V value) {
+ throw new UnsupportedOperationException("put(Object, Object) not supported!");
+ }
+
+ public void putAll(Map<? extends K, ? extends V> m) {
+ throw new UnsupportedOperationException("putAll(Map<? extends K, ? extends V>) not supported!");
+ }
+
+ public V remove(Object key) {
+ throw new UnsupportedOperationException("remove(Object) not supported!");
+ }
+
+ public void clear() {
+ throw new UnsupportedOperationException("clear() not supported!");
+ }
+
+ public Set<K> keySet() {
+ throw new UnsupportedOperationException("keySet() not supported!");
+ }
+
+ public Collection<V> values() {
+ throw new UnsupportedOperationException("values() not supported!");
+ }
+
+ public Set<Entry<K, V>> entrySet() {
+ throw new UnsupportedOperationException("entrySet() not supported!");
+ }
+
+ public boolean containsValue(Object value) {
+ throw new UnsupportedOperationException("containsValue(Object) not supported!");
+ }
+
+ public int size() {
+ throw new UnsupportedOperationException("size() not supported!");
+ }
+
+ public boolean containsKey(Object k) {
+ throw new UnsupportedOperationException("containsKey(Object) not supported!");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/util/ObjectArray.java b/src/main/java/org/elasticsearch/common/util/ObjectArray.java
new file mode 100644
index 0000000..c105b72
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/ObjectArray.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+/**
+ * Abstraction of an array of object values.
+ */
+public interface ObjectArray<T> extends BigArray {
+
+ /**
+ * Get an element given its index.
+ */
+ public abstract T get(long index);
+
+ /**
+ * Set a value at the given index and return the previous value.
+ */
+ public abstract T set(long index, T value);
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/SlicedDoubleList.java b/src/main/java/org/elasticsearch/common/util/SlicedDoubleList.java
new file mode 100644
index 0000000..1126064
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/SlicedDoubleList.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import java.util.AbstractList;
+import java.util.RandomAccess;
+
+import org.apache.lucene.util.ArrayUtil;
+
+import com.google.common.primitives.Doubles;
+
+public final class SlicedDoubleList extends AbstractList<Double> implements RandomAccess {
+
+ public static final SlicedDoubleList EMPTY = new SlicedDoubleList(0);
+
+ public double[] values;
+ public int offset;
+ public int length;
+
+ public SlicedDoubleList(int capacity) {
+ this(new double[capacity], 0, capacity);
+ }
+
+ public SlicedDoubleList(double[] values, int offset, int length) {
+ this.values = values;
+ this.offset = offset;
+ this.length = length;
+ }
+
+ @Override
+ public int size() {
+ return length;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return size() == 0;
+ }
+
+ @Override
+ public Double get(int index) {
+ assert index < size();
+ return values[offset + index];
+ }
+
+ @Override
+ public boolean contains(Object target) {
+ // Overridden to prevent a ton of boxing
+ return (target instanceof Double)
+ && indexOf(values, (Double) target, offset, offset+length) != -1;
+ }
+
+ @Override
+ public int indexOf(Object target) {
+ // Overridden to prevent a ton of boxing
+ if (target instanceof Double) {
+ int i = indexOf(values, (Double) target, offset, offset+length);
+ if (i >= 0) {
+ return i - offset;
+ }
+ }
+ return -1;
+ }
+
+ @Override
+ public int lastIndexOf(Object target) {
+ // Overridden to prevent a ton of boxing
+ if (target instanceof Double) {
+ int i = lastIndexOf(values, (Double) target, offset, offset+length);
+ if (i >= 0) {
+ return i - offset;
+ }
+ }
+ return -1;
+ }
+
+ @Override
+ public Double set(int index, Double element) {
+ throw new UnsupportedOperationException("modifying list opertations are not implemented");
+ }
+
+ @Override
+ public boolean equals(Object object) {
+ if (object == this) {
+ return true;
+ }
+ if (object instanceof SlicedDoubleList) {
+ SlicedDoubleList that = (SlicedDoubleList) object;
+ int size = size();
+ if (that.size() != size) {
+ return false;
+ }
+ for (int i = 0; i < size; i++) {
+ if (values[offset + i] != that.values[that.offset + i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return super.equals(object);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = 1;
+ for (int i = 0; i < length; i++) {
+ result = 31 * result + Doubles.hashCode(values[offset+i]);
+ }
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder(size() * 10);
+ builder.append('[');
+ if (length > 0) {
+ builder.append(values[offset]);
+ for (int i = 1; i < length; i++) {
+ builder.append(", ").append(values[offset+i]);
+ }
+ }
+ return builder.append(']').toString();
+ }
+
+ private static int indexOf(double[] array, double target, int start, int end) {
+ for (int i = start; i < end; i++) {
+ if (array[i] == target) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ private static int lastIndexOf(double[] array, double target, int start, int end) {
+ for (int i = end - 1; i >= start; i--) {
+ if (array[i] == target) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ public void grow(int newLength) {
+ assert offset == 0;
+ values = ArrayUtil.grow(values, newLength);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/util/SlicedLongList.java b/src/main/java/org/elasticsearch/common/util/SlicedLongList.java
new file mode 100644
index 0000000..cb03a9d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/SlicedLongList.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import java.util.AbstractList;
+import java.util.RandomAccess;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.LongsRef;
+
+import com.google.common.primitives.Longs;
+
+public final class SlicedLongList extends AbstractList<Long> implements RandomAccess {
+
+ public static final SlicedLongList EMPTY = new SlicedLongList(LongsRef.EMPTY_LONGS, 0, 0);
+
+ public long[] values;
+ public int offset;
+ public int length;
+
+ public SlicedLongList(int capacity) {
+ this(new long[capacity], 0, capacity);
+ }
+
+ public SlicedLongList(long[] values, int offset, int length) {
+ this.values = values;
+ this.offset = offset;
+ this.length = length;
+ }
+
+ @Override
+ public int size() {
+ return length;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return size() == 0;
+ }
+
+ @Override
+ public Long get(int index) {
+ assert index < size();
+ return values[offset + index];
+ }
+
+ @Override
+ public boolean contains(Object target) {
+ // Overridden to prevent a ton of boxing
+ return (target instanceof Long)
+ && indexOf(values, (Long) target, offset, offset+length) != -1;
+ }
+
+ @Override
+ public int indexOf(Object target) {
+ // Overridden to prevent a ton of boxing
+ if (target instanceof Long) {
+ int i = indexOf(values, (Long) target, offset, offset+length);
+ if (i >= 0) {
+ return i - offset;
+ }
+ }
+ return -1;
+ }
+
+ @Override
+ public int lastIndexOf(Object target) {
+ // Overridden to prevent a ton of boxing
+ if (target instanceof Long) {
+ int i = lastIndexOf(values, (Long) target, offset, offset+length);
+ if (i >= 0) {
+ return i - offset;
+ }
+ }
+ return -1;
+ }
+
+ @Override
+ public Long set(int index, Long element) {
+ throw new UnsupportedOperationException("modifying list opertations are not implemented");
+ }
+
+ @Override
+ public boolean equals(Object object) {
+ if (object == this) {
+ return true;
+ }
+ if (object instanceof SlicedLongList) {
+ SlicedLongList that = (SlicedLongList) object;
+ int size = size();
+ if (that.size() != size) {
+ return false;
+ }
+ for (int i = 0; i < size; i++) {
+ if (values[offset + i] != that.values[that.offset + i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return super.equals(object);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = 1;
+ for (int i = 0; i < length; i++) {
+ result = 31 * result + Longs.hashCode(values[offset+i]);
+ }
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder(size() * 10);
+ builder.append('[');
+ if (length > 0) {
+ builder.append(values[offset]);
+ for (int i = 1; i < length; i++) {
+ builder.append(", ").append(values[offset+i]);
+ }
+ }
+ return builder.append(']').toString();
+ }
+
+ private static int indexOf(long[] array, long target, int start, int end) {
+ for (int i = start; i < end; i++) {
+ if (array[i] == target) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ private static int lastIndexOf(long[] array, long target, int start, int end) {
+ for (int i = end - 1; i >= start; i--) {
+ if (array[i] == target) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ public void grow(int newLength) {
+ assert offset == 0;
+ values = ArrayUtil.grow(values, newLength);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/util/SlicedObjectList.java b/src/main/java/org/elasticsearch/common/util/SlicedObjectList.java
new file mode 100644
index 0000000..2b1130e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/SlicedObjectList.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import java.util.AbstractList;
+import java.util.RandomAccess;
+
+// TODO this could use some javadocs
+public abstract class SlicedObjectList<T> extends AbstractList<T> implements RandomAccess {
+
+ public T[] values;
+ public int offset;
+ public int length;
+
+ public SlicedObjectList(T[] values) {
+ this(values, 0, values.length);
+ }
+
+ public SlicedObjectList(T[] values, int offset, int length) {
+ this.values = values;
+ this.offset = offset;
+ this.length = length;
+ }
+
+ @Override
+ public int size() {
+ return length;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return size() == 0;
+ }
+
+ @Override
+ public T get(int index) {
+ assert index < size();
+ return values[offset + index];
+ }
+
+ @Override
+ public T set(int index, T element) {
+ throw new UnsupportedOperationException("modifying list opertations are not implemented");
+ }
+
+ @Override
+ public boolean equals(Object object) {
+ if (object == this) {
+ return true;
+ }
+ if (object instanceof SlicedObjectList) {
+ SlicedObjectList<?> that = (SlicedObjectList<?>) object;
+ int size = size();
+ if (that.size() != size) {
+ return false;
+ }
+ for (int i = 0; i < size; i++) {
+ if (values[offset + i].equals(that.values[that.offset + i])) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return super.equals(object);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = 1;
+ for (int i = 0; i < length; i++) {
+ result = 31 * result + values[offset+i].hashCode();
+ }
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder(size() * 10);
+ builder.append('[');
+ if (length > 0) {
+ builder.append(values[offset]);
+ for (int i = 1; i < length; i++) {
+ builder.append(", ").append(values[offset+i]);
+ }
+ }
+ return builder.append(']').toString();
+ }
+
+ public abstract void grow(int newLength);
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/util/UnsafeUtils.java b/src/main/java/org/elasticsearch/common/util/UnsafeUtils.java
new file mode 100644
index 0000000..e8f7107
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/UnsafeUtils.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.BytesRef;
+import sun.misc.Unsafe;
+
+import java.lang.reflect.Field;
+
+/** Utility methods that use {@link Unsafe}. */
+public enum UnsafeUtils {
+ ;
+
+ private static final Unsafe UNSAFE;
+ private static final long BYTE_ARRAY_OFFSET;
+ private static final int BYTE_ARRAY_SCALE;
+
+ static {
+ try {
+ Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
+ theUnsafe.setAccessible(true);
+ UNSAFE = (Unsafe) theUnsafe.get(null);
+ BYTE_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(byte[].class);
+ BYTE_ARRAY_SCALE = UNSAFE.arrayIndexScale(byte[].class);
+ } catch (IllegalAccessException e) {
+ throw new ExceptionInInitializerError("Cannot access Unsafe");
+ } catch (NoSuchFieldException e) {
+ throw new ExceptionInInitializerError("Cannot access Unsafe");
+ } catch (SecurityException e) {
+ throw new ExceptionInInitializerError("Cannot access Unsafe");
+ }
+ }
+
+ // Don't expose these methods directly, they are too easy to mis-use since they depend on the byte order.
+ // If you need methods to read integers, please expose a method that makes the byte order explicit such
+ // as readIntLE (little endian).
+
+ // Also, please ***NEVER*** expose any method that writes using Unsafe, this is too dangerous
+
+ private static long readLong(byte[] src, int offset) {
+ return UNSAFE.getLong(src, BYTE_ARRAY_OFFSET + offset);
+ }
+
+ private static int readInt(byte[] src, int offset) {
+ return UNSAFE.getInt(src, BYTE_ARRAY_OFFSET + offset);
+ }
+
+ private static short readShort(byte[] src, int offset) {
+ return UNSAFE.getShort(src, BYTE_ARRAY_OFFSET + offset);
+ }
+
+ private static byte readByte(byte[] src, int offset) {
+ return UNSAFE.getByte(src, BYTE_ARRAY_OFFSET + BYTE_ARRAY_SCALE * offset);
+ }
+
+ /** Compare the two given {@link BytesRef}s for equality. */
+ public static boolean equals(BytesRef b1, BytesRef b2) {
+ int len = b1.length;
+ if (b2.length != len) {
+ return false;
+ }
+ int o1 = b1.offset, o2 = b2.offset;
+ while (len >= 8) {
+ if (readLong(b1.bytes, o1) != readLong(b2.bytes, o2)) {
+ return false;
+ }
+ len -= 8;
+ o1 += 8;
+ o2 += 8;
+ }
+ if (len >= 4) {
+ if (readInt(b1.bytes, o1) != readInt(b2.bytes, o2)) {
+ return false;
+ }
+ len -= 4;
+ o1 += 4;
+ o2 += 4;
+ }
+ if (len >= 2) {
+ if (readShort(b1.bytes, o1) != readShort(b2.bytes, o2)) {
+ return false;
+ }
+ len -= 2;
+ o1 += 2;
+ o2 += 2;
+ }
+ if (len == 1) {
+ if (readByte(b1.bytes, o1) != readByte(b2.bytes, o2)) {
+ return false;
+ }
+ } else {
+ assert len == 0;
+ }
+ return true;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRunnable.java b/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRunnable.java
new file mode 100644
index 0000000..aa17f85
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRunnable.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+/**
+ * An extension to runnable.
+ */
+public abstract class AbstractRunnable implements Runnable {
+
+ /**
+ * Should the runnable force its execution in case it gets rejected?
+ */
+ public boolean isForceExecution() {
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java b/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java
new file mode 100644
index 0000000..c7e03f9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ElasticsearchGenerationException;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ * A list backed by an {@link AtomicReferenceArray} with potential null values, easily allowing
+ * to get the concrete values as a list using {@link #asList()}.
+ */
+public class AtomicArray<E> {
+
+ private static final AtomicArray EMPTY = new AtomicArray(0);
+
+ @SuppressWarnings("unchecked")
+ public static <E> E empty() {
+ return (E) EMPTY;
+ }
+
+ private final AtomicReferenceArray<E> array;
+ private volatile List<Entry<E>> nonNullList;
+
+ public AtomicArray(int size) {
+ array = new AtomicReferenceArray<E>(size);
+ }
+
+ /**
+ * The size of the expected results, including potential null values.
+ */
+ public int length() {
+ return array.length();
+ }
+
+
+ /**
+ * Sets the element at position {@code i} to the given value.
+ *
+ * @param i the index
+ * @param value the new value
+ */
+ public void set(int i, E value) {
+ array.set(i, value);
+ if (nonNullList != null) { // read first, lighter, and most times it will be null...
+ nonNullList = null;
+ }
+ }
+
+ /**
+ * Gets the current value at position {@code i}.
+ *
+ * @param i the index
+ * @return the current value
+ */
+ public E get(int i) {
+ return array.get(i);
+ }
+
+ /**
+ * Returns the it as a non null list, with an Entry wrapping each value allowing to
+ * retain its index.
+ */
+ public List<Entry<E>> asList() {
+ if (nonNullList == null) {
+ if (array == null || array.length() == 0) {
+ nonNullList = ImmutableList.of();
+ } else {
+ List<Entry<E>> list = new ArrayList<Entry<E>>(array.length());
+ for (int i = 0; i < array.length(); i++) {
+ E e = array.get(i);
+ if (e != null) {
+ list.add(new Entry<E>(i, e));
+ }
+ }
+ nonNullList = list;
+ }
+ }
+ return nonNullList;
+ }
+
+ /**
+ * Copies the content of the underlying atomic array to a normal one.
+ */
+ public E[] toArray(E[] a) {
+ if (a.length != array.length()) {
+ throw new ElasticsearchGenerationException("AtomicArrays can only be copied to arrays of the same size");
+ }
+ for (int i = 0; i < array.length(); i++) {
+ a[i] = array.get(i);
+ }
+ return a;
+ }
+
+ /**
+ * An entry within the array.
+ */
+ public static class Entry<E> {
+ /**
+ * The original index of the value within the array.
+ */
+ public final int index;
+ /**
+ * The value.
+ */
+ public final E value;
+
+ public Entry(int index, E value) {
+ this.index = index;
+ this.value = value;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/BaseFuture.java b/src/main/java/org/elasticsearch/common/util/concurrent/BaseFuture.java
new file mode 100644
index 0000000..964a6c7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/BaseFuture.java
@@ -0,0 +1,365 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import com.google.common.annotations.Beta;
+import org.elasticsearch.common.Nullable;
+
+import java.util.concurrent.*;
+import java.util.concurrent.locks.AbstractQueuedSynchronizer;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * An abstract implementation of the {@link com.google.common.util.concurrent.ListenableFuture} interface. This
+ * class is preferable to {@link java.util.concurrent.FutureTask} for two
+ * reasons: It implements {@code ListenableFuture}, and it does not implement
+ * {@code Runnable}. (If you want a {@code Runnable} implementation of {@code
+ * ListenableFuture}, create a {@link com.google.common.util.concurrent.ListenableFutureTask}, or submit your
+ * tasks to a {@link com.google.common.util.concurrent.ListeningExecutorService}.)
+ * <p/>
+ * <p>This class implements all methods in {@code ListenableFuture}.
+ * Subclasses should provide a way to set the result of the computation through
+ * the protected methods {@link #set(Object)} and
+ * {@link #setException(Throwable)}. Subclasses may also override {@link
+ * #interruptTask()}, which will be invoked automatically if a call to {@link
+ * #cancel(boolean) cancel(true)} succeeds in canceling the future.
+ * <p/>
+ * <p>{@code AbstractFuture} uses an {@link AbstractQueuedSynchronizer} to deal
+ * with concurrency issues and guarantee thread safety.
+ * <p/>
+ * <p>The state changing methods all return a boolean indicating success or
+ * failure in changing the future's state. Valid states are running,
+ * completed, failed, or cancelled.
+ * <p/>
+ * <p>This class uses an {@link com.google.common.util.concurrent.ExecutionList} to guarantee that all registered
+ * listeners will be executed, either when the future finishes or, for listeners
+ * that are added after the future completes, immediately.
+ * {@code Runnable}-{@code Executor} pairs are stored in the execution list but
+ * are not necessarily executed in the order in which they were added. (If a
+ * listener is added after the Future is complete, it will be executed
+ * immediately, even if earlier listeners have not been executed. Additionally,
+ * executors need not guarantee FIFO execution, or different listeners may run
+ * in different executors.)
+ *
+ * @author Sven Mawson
+ * @since 1.0
+ */
+// Same as AbstractFuture from Guava, but without the listeners
+public abstract class BaseFuture<V> implements Future<V> {
+
+ /**
+ * Synchronization control for AbstractFutures.
+ */
+ private final Sync<V> sync = new Sync<V>();
+
+ /*
+ * Improve the documentation of when InterruptedException is thrown. Our
+ * behavior matches the JDK's, but the JDK's documentation is misleading.
+ */
+
+ /**
+ * {@inheritDoc}
+ * <p/>
+ * <p>The default {@link BaseFuture} implementation throws {@code
+ * InterruptedException} if the current thread is interrupted before or during
+ * the call, even if the value is already available.
+ *
+ * @throws InterruptedException if the current thread was interrupted before
+ * or during the call (optional but recommended).
+ * @throws CancellationException {@inheritDoc}
+ */
+ @Override
+ public V get(long timeout, TimeUnit unit) throws InterruptedException,
+ TimeoutException, ExecutionException {
+ return sync.get(unit.toNanos(timeout));
+ }
+
+ /*
+ * Improve the documentation of when InterruptedException is thrown. Our
+ * behavior matches the JDK's, but the JDK's documentation is misleading.
+ */
+
+ /**
+ * {@inheritDoc}
+ * <p/>
+ * <p>The default {@link BaseFuture} implementation throws {@code
+ * InterruptedException} if the current thread is interrupted before or during
+ * the call, even if the value is already available.
+ *
+ * @throws InterruptedException if the current thread was interrupted before
+ * or during the call (optional but recommended).
+ * @throws CancellationException {@inheritDoc}
+ */
+ @Override
+ public V get() throws InterruptedException, ExecutionException {
+ return sync.get();
+ }
+
+ @Override
+ public boolean isDone() {
+ return sync.isDone();
+ }
+
+ @Override
+ public boolean isCancelled() {
+ return sync.isCancelled();
+ }
+
+ @Override
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ if (!sync.cancel()) {
+ return false;
+ }
+ done();
+ if (mayInterruptIfRunning) {
+ interruptTask();
+ }
+ return true;
+ }
+
+ /**
+ * Subclasses can override this method to implement interruption of the
+ * future's computation. The method is invoked automatically by a successful
+ * call to {@link #cancel(boolean) cancel(true)}.
+ * <p/>
+ * <p>The default implementation does nothing.
+ *
+ * @since 10.0
+ */
+ protected void interruptTask() {
+ }
+
+ /**
+ * Subclasses should invoke this method to set the result of the computation
+ * to {@code value}. This will set the state of the future to
+ * {@link BaseFuture.Sync#COMPLETED} and call {@link #done()} if the
+ * state was successfully changed.
+ *
+ * @param value the value that was the result of the task.
+ * @return true if the state was successfully changed.
+ */
+ protected boolean set(@Nullable V value) {
+ boolean result = sync.set(value);
+ if (result) {
+ done();
+ }
+ return result;
+ }
+
+ /**
+ * Subclasses should invoke this method to set the result of the computation
+ * to an error, {@code throwable}. This will set the state of the future to
+ * {@link BaseFuture.Sync#COMPLETED} and call {@link #done()} if the
+ * state was successfully changed.
+ *
+ * @param throwable the exception that the task failed with.
+ * @return true if the state was successfully changed.
+ * @throws Error if the throwable was an {@link Error}.
+ */
+ protected boolean setException(Throwable throwable) {
+ boolean result = sync.setException(checkNotNull(throwable));
+ if (result) {
+ done();
+ }
+
+ // If it's an Error, we want to make sure it reaches the top of the
+ // call stack, so we rethrow it.
+
+ // we want to notify the listeners we have with errors as well, as it breaks
+ // how we work in ES in terms of using assertions
+// if (throwable instanceof Error) {
+// throw (Error) throwable;
+// }
+ return result;
+ }
+
+ @Beta
+ protected void done() {
+ }
+
+ /**
+ * <p>Following the contract of {@link AbstractQueuedSynchronizer} we create a
+ * private subclass to hold the synchronizer. This synchronizer is used to
+ * implement the blocking and waiting calls as well as to handle state changes
+ * in a thread-safe manner. The current state of the future is held in the
+ * Sync state, and the lock is released whenever the state changes to either
+ * {@link #COMPLETED} or {@link #CANCELLED}.
+ * <p/>
+ * <p>To avoid races between threads doing release and acquire, we transition
+ * to the final state in two steps. One thread will successfully CAS from
+ * RUNNING to COMPLETING, that thread will then set the result of the
+ * computation, and only then transition to COMPLETED or CANCELLED.
+ * <p/>
+ * <p>We don't use the integer argument passed between acquire methods so we
+ * pass around a -1 everywhere.
+ */
+ static final class Sync<V> extends AbstractQueuedSynchronizer {
+
+ private static final long serialVersionUID = 0L;
+
+ /* Valid states. */
+ static final int RUNNING = 0;
+ static final int COMPLETING = 1;
+ static final int COMPLETED = 2;
+ static final int CANCELLED = 4;
+
+ private V value;
+ private Throwable exception;
+
+ /*
+ * Acquisition succeeds if the future is done, otherwise it fails.
+ */
+ @Override
+ protected int tryAcquireShared(int ignored) {
+ if (isDone()) {
+ return 1;
+ }
+ return -1;
+ }
+
+ /*
+ * We always allow a release to go through, this means the state has been
+ * successfully changed and the result is available.
+ */
+ @Override
+ protected boolean tryReleaseShared(int finalState) {
+ setState(finalState);
+ return true;
+ }
+
+ /**
+ * Blocks until the task is complete or the timeout expires. Throws a
+ * {@link TimeoutException} if the timer expires, otherwise behaves like
+ * {@link #get()}.
+ */
+ V get(long nanos) throws TimeoutException, CancellationException,
+ ExecutionException, InterruptedException {
+
+ // Attempt to acquire the shared lock with a timeout.
+ if (!tryAcquireSharedNanos(-1, nanos)) {
+ throw new TimeoutException("Timeout waiting for task.");
+ }
+
+ return getValue();
+ }
+
+ /**
+ * Blocks until {@link #complete(Object, Throwable, int)} has been
+ * successfully called. Throws a {@link CancellationException} if the task
+ * was cancelled, or a {@link ExecutionException} if the task completed with
+ * an error.
+ */
+ V get() throws CancellationException, ExecutionException,
+ InterruptedException {
+
+ // Acquire the shared lock allowing interruption.
+ acquireSharedInterruptibly(-1);
+ return getValue();
+ }
+
+ /**
+ * Implementation of the actual value retrieval. Will return the value
+ * on success, an exception on failure, a cancellation on cancellation, or
+ * an illegal state if the synchronizer is in an invalid state.
+ */
+ private V getValue() throws CancellationException, ExecutionException {
+ int state = getState();
+ switch (state) {
+ case COMPLETED:
+ if (exception != null) {
+ throw new ExecutionException(exception);
+ } else {
+ return value;
+ }
+
+ case CANCELLED:
+ throw new CancellationException("Task was cancelled.");
+
+ default:
+ throw new IllegalStateException(
+ "Error, synchronizer in invalid state: " + state);
+ }
+ }
+
+ /**
+ * Checks if the state is {@link #COMPLETED} or {@link #CANCELLED}.
+ */
+ boolean isDone() {
+ return (getState() & (COMPLETED | CANCELLED)) != 0;
+ }
+
+ /**
+ * Checks if the state is {@link #CANCELLED}.
+ */
+ boolean isCancelled() {
+ return getState() == CANCELLED;
+ }
+
+ /**
+ * Transition to the COMPLETED state and set the value.
+ */
+ boolean set(@Nullable V v) {
+ return complete(v, null, COMPLETED);
+ }
+
+ /**
+ * Transition to the COMPLETED state and set the exception.
+ */
+ boolean setException(Throwable t) {
+ return complete(null, t, COMPLETED);
+ }
+
+ /**
+ * Transition to the CANCELLED state.
+ */
+ boolean cancel() {
+ return complete(null, null, CANCELLED);
+ }
+
+ /**
+ * Implementation of completing a task. Either {@code v} or {@code t} will
+ * be set but not both. The {@code finalState} is the state to change to
+ * from {@link #RUNNING}. If the state is not in the RUNNING state we
+ * return {@code false} after waiting for the state to be set to a valid
+ * final state ({@link #COMPLETED} or {@link #CANCELLED}).
+ *
+ * @param v the value to set as the result of the computation.
+ * @param t the exception to set as the result of the computation.
+ * @param finalState the state to transition to.
+ */
+ private boolean complete(@Nullable V v, @Nullable Throwable t,
+ int finalState) {
+ boolean doCompletion = compareAndSetState(RUNNING, COMPLETING);
+ if (doCompletion) {
+ // If this thread successfully transitioned to COMPLETING, set the value
+ // and exception and then release to the final state.
+ this.value = v;
+ this.exception = t;
+ releaseShared(finalState);
+ } else if (getState() == COMPLETING) {
+ // If some other thread is currently completing the future, block until
+ // they are done so we can guarantee completion.
+ acquireShared(-1);
+ }
+ return doCompletion;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java b/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java
new file mode 100644
index 0000000..6edf4d1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import com.google.common.collect.Sets;
+import jsr166e.ConcurrentHashMapV8;
+import jsr166y.ConcurrentLinkedDeque;
+import jsr166y.LinkedTransferQueue;
+
+import java.util.Deque;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ *
+ */
+public abstract class ConcurrentCollections {
+
+ private final static boolean useConcurrentHashMapV8 = Boolean.parseBoolean(System.getProperty("es.useConcurrentHashMapV8", "false"));
+ private final static boolean useLinkedTransferQueue = Boolean.parseBoolean(System.getProperty("es.useLinkedTransferQueue", "false"));
+
+ static final int aggressiveConcurrencyLevel;
+
+ static {
+ aggressiveConcurrencyLevel = Math.max(Runtime.getRuntime().availableProcessors() * 2, 16);
+ }
+
+ /**
+ * Creates a new CHM with an aggressive concurrency level, aimed at high concurrent update rate long living maps.
+ */
+ public static <K, V> ConcurrentMap<K, V> newConcurrentMapWithAggressiveConcurrency() {
+ if (useConcurrentHashMapV8) {
+ return new ConcurrentHashMapV8<K, V>(16, 0.75f, aggressiveConcurrencyLevel);
+ }
+ return new ConcurrentHashMap<K, V>(16, 0.75f, aggressiveConcurrencyLevel);
+ }
+
+ public static <K, V> ConcurrentMap<K, V> newConcurrentMap() {
+ if (useConcurrentHashMapV8) {
+ return new ConcurrentHashMapV8<K, V>();
+ }
+ return new ConcurrentHashMap<K, V>();
+ }
+
+ /**
+ * Creates a new CHM with an aggressive concurrency level, aimed at highly updateable long living maps.
+ */
+ public static <V> ConcurrentMapLong<V> newConcurrentMapLongWithAggressiveConcurrency() {
+ return new ConcurrentHashMapLong<V>(ConcurrentCollections.<Long, V>newConcurrentMapWithAggressiveConcurrency());
+ }
+
+ public static <V> ConcurrentMapLong<V> newConcurrentMapLong() {
+ return new ConcurrentHashMapLong<V>(ConcurrentCollections.<Long, V>newConcurrentMap());
+ }
+
+ public static <V> Set<V> newConcurrentSet() {
+ return Sets.newSetFromMap(ConcurrentCollections.<V, Boolean>newConcurrentMap());
+ }
+
+ public static <T> Queue<T> newQueue() {
+ if (useLinkedTransferQueue) {
+ return new LinkedTransferQueue<T>();
+ }
+ return new ConcurrentLinkedQueue<T>();
+ }
+
+ public static <T> Deque<T> newDeque() {
+ return new ConcurrentLinkedDeque<T>();
+ }
+
+ public static <T> BlockingQueue<T> newBlockingQueue() {
+ return new LinkedTransferQueue<T>();
+ }
+
+ private ConcurrentCollections() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java b/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java
new file mode 100644
index 0000000..5f25cea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ *
+ */
+public class ConcurrentHashMapLong<T> implements ConcurrentMapLong<T> {
+
+ private final ConcurrentMap<Long, T> map;
+
+ public ConcurrentHashMapLong(ConcurrentMap<Long, T> map) {
+ this.map = map;
+ }
+
+ @Override
+ public T get(long key) {
+ return map.get(key);
+ }
+
+ @Override
+ public T remove(long key) {
+ return map.remove(key);
+ }
+
+ @Override
+ public T put(long key, T value) {
+ return map.put(key, value);
+ }
+
+ @Override
+ public T putIfAbsent(long key, T value) {
+ return map.putIfAbsent(key, value);
+ }
+
+ // MAP DELEGATION
+
+ @Override
+ public boolean isEmpty() {
+ return map.isEmpty();
+ }
+
+ @Override
+ public int size() {
+ return map.size();
+ }
+
+ @Override
+ public T get(Object key) {
+ return map.get(key);
+ }
+
+ @Override
+ public boolean containsKey(Object key) {
+ return map.containsKey(key);
+ }
+
+ @Override
+ public boolean containsValue(Object value) {
+ return map.containsValue(value);
+ }
+
+ public T put(Long key, T value) {
+ return map.put(key, value);
+ }
+
+ public T putIfAbsent(Long key, T value) {
+ return map.putIfAbsent(key, value);
+ }
+
+ public void putAll(Map<? extends Long, ? extends T> m) {
+ map.putAll(m);
+ }
+
+ @Override
+ public T remove(Object key) {
+ return map.remove(key);
+ }
+
+ @Override
+ public boolean remove(Object key, Object value) {
+ return map.remove(key, value);
+ }
+
+ public boolean replace(Long key, T oldValue, T newValue) {
+ return map.replace(key, oldValue, newValue);
+ }
+
+ public T replace(Long key, T value) {
+ return map.replace(key, value);
+ }
+
+ @Override
+ public void clear() {
+ map.clear();
+ }
+
+ @Override
+ public Set<Long> keySet() {
+ return map.keySet();
+ }
+
+ @Override
+ public Collection<T> values() {
+ return map.values();
+ }
+
+ @Override
+ public Set<Entry<Long, T>> entrySet() {
+ return map.entrySet();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return map.equals(o);
+ }
+
+ @Override
+ public int hashCode() {
+ return map.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return map.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentMapLong.java b/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentMapLong.java
new file mode 100644
index 0000000..82212ad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentMapLong.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ *
+ */
+public interface ConcurrentMapLong<T> extends ConcurrentMap<Long, T> {
+
+ T get(long key);
+
+ T remove(long key);
+
+ T put(long key, T value);
+
+ T putIfAbsent(long key, T value);
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java b/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java
new file mode 100644
index 0000000..bfd530c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A simple thread safe count-down class that in contrast to a {@link CountDownLatch}
+ * never blocks. This class is useful if a certain action has to wait for N concurrent
+ * tasks to return or a timeout to occur in order to proceed.
+ */
+public final class CountDown {
+
+ private final AtomicInteger countDown;
+ private final int originalCount;
+
+ public CountDown(int count) {
+ if (count < 0) {
+ throw new ElasticsearchIllegalArgumentException("count must be greater or equal to 0 but was: " + count);
+ }
+ this.originalCount = count;
+ this.countDown = new AtomicInteger(count);
+ }
+
+ /**
+ * Decrements the count-down and returns <code>true</code> iff this call
+ * reached zero otherwise <code>false</code>
+ */
+ public boolean countDown() {
+ assert originalCount > 0;
+ for (;;) {
+ final int current = countDown.get();
+ assert current >= 0;
+ if (current == 0) {
+ return false;
+ }
+ if (countDown.compareAndSet(current, current - 1)) {
+ return current == 1;
+ }
+ }
+ }
+
+ /**
+ * Fast forwards the count-down to zero and returns <code>true</code> iff
+ * the count down reached zero with this fast forward call otherwise
+ * <code>false</code>
+ */
+ public boolean fastForward() {
+ assert originalCount > 0;
+ assert countDown.get() >= 0;
+ return countDown.getAndSet(0) > 0;
+ }
+
+ /**
+ * Returns <code>true</code> iff the count-down has reached zero. Otherwise <code>false</code>
+ */
+ public boolean isCountedDown() {
+ assert countDown.get() >= 0;
+ return countDown.get() == 0;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java
new file mode 100644
index 0000000..cf62394
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.metrics.CounterMetric;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+
+/**
+ */
+public class EsAbortPolicy implements XRejectedExecutionHandler {
+
+ private final CounterMetric rejected = new CounterMetric();
+ public static final String SHUTTING_DOWN_KEY = "(shutting down)";
+
+ @Override
+ public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
+ if (r instanceof AbstractRunnable) {
+ if (((AbstractRunnable) r).isForceExecution()) {
+ BlockingQueue<Runnable> queue = executor.getQueue();
+ if (!(queue instanceof SizeBlockingQueue)) {
+ throw new ElasticsearchIllegalStateException("forced execution, but expected a size queue");
+ }
+ try {
+ ((SizeBlockingQueue) queue).forcePut(r);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new ElasticsearchIllegalStateException("forced execution, but got interrupted", e);
+ }
+ return;
+ }
+ }
+ rejected.inc();
+ StringBuilder sb = new StringBuilder("rejected execution ");
+ if (executor.isShutdown()) {
+ sb.append(SHUTTING_DOWN_KEY + " ");
+ } else {
+ if (executor.getQueue() instanceof SizeBlockingQueue) {
+ sb.append("(queue capacity ").append(((SizeBlockingQueue) executor.getQueue()).capacity()).append(") ");
+ }
+ }
+ sb.append("on ").append(r.toString());
+ throw new EsRejectedExecutionException(sb.toString());
+ }
+
+ @Override
+ public long rejected() {
+ return rejected.count();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java
new file mode 100644
index 0000000..202a6a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import jsr166y.LinkedTransferQueue;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ *
+ */
+public class EsExecutors {
+
+ /**
+ * Returns the number of processors available but at most <tt>32</tt>.
+ */
+ public static int boundedNumberOfProcessors(Settings settings) {
+ /* This relates to issues where machines with large number of cores
+ * ie. >= 48 create too many threads and run into OOM see #3478
+ * We just use an 32 core upper-bound here to not stress the system
+ * too much with too many created threads */
+ return settings.getAsInt("processors", Math.min(32, Runtime.getRuntime().availableProcessors()));
+ }
+
+ public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(ThreadFactory threadFactory) {
+ return new PrioritizedEsThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory);
+ }
+
+ public static EsThreadPoolExecutor newScaling(int min, int max, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
+ ExecutorScalingQueue<Runnable> queue = new ExecutorScalingQueue<Runnable>();
+ // we force the execution, since we might run into concurrency issues in offer for ScalingBlockingQueue
+ EsThreadPoolExecutor executor = new EsThreadPoolExecutor(min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy());
+ queue.executor = executor;
+ return executor;
+ }
+
+ public static EsThreadPoolExecutor newCached(long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
+ return new EsThreadPoolExecutor(0, Integer.MAX_VALUE, keepAliveTime, unit, new SynchronousQueue<Runnable>(), threadFactory, new EsAbortPolicy());
+ }
+
+ public static EsThreadPoolExecutor newFixed(int size, int queueCapacity, ThreadFactory threadFactory) {
+ BlockingQueue<Runnable> queue;
+ if (queueCapacity < 0) {
+ queue = ConcurrentCollections.newBlockingQueue();
+ } else {
+ queue = new SizeBlockingQueue<Runnable>(ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity);
+ }
+ return new EsThreadPoolExecutor(size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy());
+ }
+
+ public static String threadName(Settings settings, String namePrefix) {
+ String name = settings.get("name");
+ if (name == null) {
+ name = "elasticsearch";
+ } else {
+ name = "elasticsearch[" + name + "]";
+ }
+ return name + "[" + namePrefix + "]";
+ }
+
+ public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) {
+ return daemonThreadFactory(threadName(settings, namePrefix));
+ }
+
+ public static ThreadFactory daemonThreadFactory(String namePrefix) {
+ return new EsThreadFactory(namePrefix);
+ }
+
+ static class EsThreadFactory implements ThreadFactory {
+ final ThreadGroup group;
+ final AtomicInteger threadNumber = new AtomicInteger(1);
+ final String namePrefix;
+
+ public EsThreadFactory(String namePrefix) {
+ this.namePrefix = namePrefix;
+ SecurityManager s = System.getSecurityManager();
+ group = (s != null) ? s.getThreadGroup() :
+ Thread.currentThread().getThreadGroup();
+ }
+
+ @Override
+ public Thread newThread(Runnable r) {
+ Thread t = new Thread(group, r,
+ namePrefix + "[T#" + threadNumber.getAndIncrement() + "]",
+ 0);
+ t.setDaemon(true);
+ return t;
+ }
+ }
+
+ /**
+ * Cannot instantiate.
+ */
+ private EsExecutors() {
+ }
+
+
+ static class ExecutorScalingQueue<E> extends LinkedTransferQueue<E> {
+
+ ThreadPoolExecutor executor;
+
+ public ExecutorScalingQueue() {
+ }
+
+ @Override
+ public boolean offer(E e) {
+ if (!tryTransfer(e)) {
+ int left = executor.getMaximumPoolSize() - executor.getCorePoolSize();
+ if (left > 0) {
+ return false;
+ } else {
+ return super.offer(e);
+ }
+ } else {
+ return true;
+ }
+ }
+ }
+
+ /**
+ * A handler for rejected tasks that adds the specified element to this queue,
+ * waiting if necessary for space to become available.
+ */
+ static class ForceQueuePolicy implements XRejectedExecutionHandler {
+ public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
+ try {
+ executor.getQueue().put(r);
+ } catch (InterruptedException e) {
+ //should never happen since we never wait
+ throw new EsRejectedExecutionException(e);
+ }
+ }
+
+ @Override
+ public long rejected() {
+ return 0;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java
new file mode 100644
index 0000000..d5be2dc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ */
+public class EsRejectedExecutionException extends ElasticsearchException {
+
+ public EsRejectedExecutionException(String message) {
+ super(message);
+ }
+
+ public EsRejectedExecutionException() {
+ super(null);
+ }
+
+ public EsRejectedExecutionException(Throwable e) {
+ super(null, e);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java
new file mode 100644
index 0000000..a9788fc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * An extension to thread pool executor, allowing (in the future) to add specific additional stats to it.
+ */
+public class EsThreadPoolExecutor extends ThreadPoolExecutor {
+
+ private volatile ShutdownListener listener;
+
+ private final Object monitor = new Object();
+
+ EsThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
+ this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, new EsAbortPolicy());
+ }
+
+ EsThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler) {
+ super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
+ }
+
+ public void shutdown(ShutdownListener listener) {
+ synchronized (monitor) {
+ if (this.listener != null) {
+ throw new ElasticsearchIllegalStateException("Shutdown was already called on this thread pool");
+ }
+ if (isTerminated()) {
+ listener.onTerminated();
+ } else {
+ this.listener = listener;
+ }
+ }
+ shutdown();
+ }
+
+ @Override
+ protected synchronized void terminated() {
+ super.terminated();
+ synchronized (monitor) {
+ if (listener != null) {
+ try {
+ listener.onTerminated();
+ } finally {
+ listener = null;
+ }
+ }
+ }
+ }
+
+ public static interface ShutdownListener {
+ public void onTerminated();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java
new file mode 100644
index 0000000..7495e6c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * This class manages locks. Locks can be accessed with an identifier and are
+ * created the first time they are acquired and removed if no thread hold the
+ * lock. The latter is important to assure that the list of locks does not grow
+ * infinitely.
+ *
+ * A Thread can acquire a lock only once.
+ *
+ * */
+
+public class KeyedLock<T> {
+
+ private final ConcurrentMap<T, KeyLock> map = new ConcurrentHashMap<T, KeyLock>();
+
+ private final ThreadLocal<KeyLock> threadLocal = new ThreadLocal<KeyedLock.KeyLock>();
+
+ public void acquire(T key) {
+ while (true) {
+ if (threadLocal.get() != null) {
+ // if we are here, the thread already has the lock
+ throw new ElasticsearchIllegalStateException("Lock already accquired in Thread" + Thread.currentThread().getId()
+ + " for key " + key);
+ }
+ KeyLock perNodeLock = map.get(key);
+ if (perNodeLock == null) {
+ KeyLock newLock = new KeyLock();
+ perNodeLock = map.putIfAbsent(key, newLock);
+ if (perNodeLock == null) {
+ newLock.lock();
+ threadLocal.set(newLock);
+ return;
+ }
+ }
+ assert perNodeLock != null;
+ int i = perNodeLock.count.get();
+ if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) {
+ perNodeLock.lock();
+ threadLocal.set(perNodeLock);
+ return;
+ }
+ }
+ }
+
+ public void release(T key) {
+ KeyLock lock = threadLocal.get();
+ if (lock == null) {
+ throw new ElasticsearchIllegalStateException("Lock not accquired");
+ }
+ assert lock.isHeldByCurrentThread();
+ assert lock == map.get(key);
+ lock.unlock();
+ threadLocal.set(null);
+ int decrementAndGet = lock.count.decrementAndGet();
+ if (decrementAndGet == 0) {
+ map.remove(key, lock);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private final static class KeyLock extends ReentrantLock {
+ private final AtomicInteger count = new AtomicInteger(1);
+ }
+
+ public boolean hasLockedKeys() {
+ return !map.isEmpty();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java b/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java
new file mode 100644
index 0000000..878645e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.common.logging.ESLogger;
+
+/**
+ */
+public class LoggingRunnable implements Runnable {
+
+ private final Runnable runnable;
+
+ private final ESLogger logger;
+
+ public LoggingRunnable(ESLogger logger, Runnable runnable) {
+ this.runnable = runnable;
+ this.logger = logger;
+ }
+
+ @Override
+ public void run() {
+ try {
+ runnable.run();
+ } catch (Exception e) {
+ logger.warn("failed to execute [{}]", e, runnable.toString());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java b/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java
new file mode 100644
index 0000000..28db378
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.common.Priority;
+
+import java.util.concurrent.Callable;
+
+/**
+ *
+ */
+public abstract class PrioritizedCallable<T> implements Callable<T>, Comparable<PrioritizedCallable> {
+
+ private final Priority priority;
+
+ public static <T> PrioritizedCallable<T> wrap(Callable<T> callable, Priority priority) {
+ return new Wrapped<T>(callable, priority);
+ }
+
+ protected PrioritizedCallable(Priority priority) {
+ this.priority = priority;
+ }
+
+ @Override
+ public int compareTo(PrioritizedCallable pc) {
+ return priority.compareTo(pc.priority);
+ }
+
+ public Priority priority() {
+ return priority;
+ }
+
+ static class Wrapped<T> extends PrioritizedCallable<T> {
+
+ private final Callable<T> callable;
+
+ private Wrapped(Callable<T> callable, Priority priority) {
+ super(priority);
+ this.callable = callable;
+ }
+
+ @Override
+ public T call() throws Exception {
+ return callable.call();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java b/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java
new file mode 100644
index 0000000..058e20e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * A prioritizing executor which uses a priority queue as a work queue. The jobs that will be submitted will be treated
+ * as {@link PrioritizedRunnable} and/or {@link PrioritizedCallable}, those tasks that are not instances of these two will
+ * be wrapped and assign a default {@link Priority#NORMAL} priority.
+ * <p/>
+ * Note, if two tasks have the same priority, the first to arrive will be executed first (FIFO style).
+ */
+public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor {
+
+ private AtomicLong insertionOrder = new AtomicLong();
+
+ PrioritizedEsThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
+ super(corePoolSize, maximumPoolSize, keepAliveTime, unit, new PriorityBlockingQueue<Runnable>(), threadFactory);
+ }
+
+ public Pending[] getPending() {
+ Object[] objects = getQueue().toArray();
+ Pending[] infos = new Pending[objects.length];
+ for (int i = 0; i < objects.length; i++) {
+ Object obj = objects[i];
+ if (obj instanceof TieBreakingPrioritizedRunnable) {
+ TieBreakingPrioritizedRunnable t = (TieBreakingPrioritizedRunnable) obj;
+ infos[i] = new Pending(t.runnable, t.priority(), t.insertionOrder);
+ } else if (obj instanceof PrioritizedFutureTask) {
+ PrioritizedFutureTask t = (PrioritizedFutureTask) obj;
+ infos[i] = new Pending(t.task, t.priority, t.insertionOrder);
+ }
+ }
+ return infos;
+ }
+
+ public void execute(Runnable command, final ScheduledExecutorService timer, final TimeValue timeout, final Runnable timeoutCallback) {
+ if (command instanceof PrioritizedRunnable) {
+ command = new TieBreakingPrioritizedRunnable((PrioritizedRunnable) command, insertionOrder.incrementAndGet());
+ } else if (!(command instanceof PrioritizedFutureTask)) { // it might be a callable wrapper...
+ command = new TieBreakingPrioritizedRunnable(command, Priority.NORMAL, insertionOrder.incrementAndGet());
+ }
+ super.execute(command);
+ if (timeout.nanos() >= 0) {
+ final Runnable fCommand = command;
+ timer.schedule(new Runnable() {
+ @Override
+ public void run() {
+ boolean removed = getQueue().remove(fCommand);
+ if (removed) {
+ timeoutCallback.run();
+ }
+ }
+ }, timeout.nanos(), TimeUnit.NANOSECONDS);
+ }
+ }
+
+ @Override
+ public void execute(Runnable command) {
+ if (command instanceof PrioritizedRunnable) {
+ command = new TieBreakingPrioritizedRunnable((PrioritizedRunnable) command, insertionOrder.incrementAndGet());
+ } else if (!(command instanceof PrioritizedFutureTask)) { // it might be a callable wrapper...
+ command = new TieBreakingPrioritizedRunnable(command, Priority.NORMAL, insertionOrder.incrementAndGet());
+ }
+ super.execute(command);
+ }
+
+ @Override
+ protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
+ if (!(runnable instanceof PrioritizedRunnable)) {
+ runnable = PrioritizedRunnable.wrap(runnable, Priority.NORMAL);
+ }
+ return new PrioritizedFutureTask<T>((PrioritizedRunnable) runnable, value, insertionOrder.incrementAndGet());
+ }
+
+ @Override
+ protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
+ if (!(callable instanceof PrioritizedCallable)) {
+ callable = PrioritizedCallable.wrap(callable, Priority.NORMAL);
+ }
+ return new PrioritizedFutureTask<T>((PrioritizedCallable<T>) callable, insertionOrder.incrementAndGet());
+ }
+
+ public static class Pending {
+ public final Object task;
+ public final Priority priority;
+ public final long insertionOrder;
+
+ public Pending(Object task, Priority priority, long insertionOrder) {
+ this.task = task;
+ this.priority = priority;
+ this.insertionOrder = insertionOrder;
+ }
+ }
+
+ static class TieBreakingPrioritizedRunnable extends PrioritizedRunnable {
+
+ final Runnable runnable;
+ final long insertionOrder;
+
+ TieBreakingPrioritizedRunnable(PrioritizedRunnable runnable, long insertionOrder) {
+ this(runnable, runnable.priority(), insertionOrder);
+ }
+
+ TieBreakingPrioritizedRunnable(Runnable runnable, Priority priority, long insertionOrder) {
+ super(priority);
+ this.runnable = runnable;
+ this.insertionOrder = insertionOrder;
+ }
+
+ @Override
+ public void run() {
+ runnable.run();
+ }
+
+ @Override
+ public int compareTo(PrioritizedRunnable pr) {
+ int res = super.compareTo(pr);
+ if (res != 0 || !(pr instanceof TieBreakingPrioritizedRunnable)) {
+ return res;
+ }
+ return insertionOrder < ((TieBreakingPrioritizedRunnable) pr).insertionOrder ? -1 : 1;
+ }
+ }
+
+ static class PrioritizedFutureTask<T> extends FutureTask<T> implements Comparable<PrioritizedFutureTask> {
+
+ final Object task;
+ final Priority priority;
+ final long insertionOrder;
+
+ public PrioritizedFutureTask(PrioritizedRunnable runnable, T value, long insertionOrder) {
+ super(runnable, value);
+ this.task = runnable;
+ this.priority = runnable.priority();
+ this.insertionOrder = insertionOrder;
+ }
+
+ public PrioritizedFutureTask(PrioritizedCallable<T> callable, long insertionOrder) {
+ super(callable);
+ this.task = callable;
+ this.priority = callable.priority();
+ this.insertionOrder = insertionOrder;
+ }
+
+ @Override
+ public int compareTo(PrioritizedFutureTask pft) {
+ int res = priority.compareTo(pft.priority);
+ if (res != 0) {
+ return res;
+ }
+ return insertionOrder < pft.insertionOrder ? -1 : 1;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java b/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java
new file mode 100644
index 0000000..bd830ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.common.Priority;
+
+/**
+ *
+ */
+public abstract class PrioritizedRunnable implements Runnable, Comparable<PrioritizedRunnable> {
+
+ private final Priority priority;
+
+ public static PrioritizedRunnable wrap(Runnable runnable, Priority priority) {
+ return new Wrapped(runnable, priority);
+ }
+
+ protected PrioritizedRunnable(Priority priority) {
+ this.priority = priority;
+ }
+
+ @Override
+ public int compareTo(PrioritizedRunnable pr) {
+ return priority.compareTo(pr.priority);
+ }
+
+ public Priority priority() {
+ return priority;
+ }
+
+ static class Wrapped extends PrioritizedRunnable {
+
+ private final Runnable runnable;
+
+ private Wrapped(Runnable runnable, Priority priority) {
+ super(priority);
+ this.runnable = runnable;
+ }
+
+ @Override
+ public void run() {
+ runnable.run();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java b/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java
new file mode 100644
index 0000000..f174d2e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+import java.util.AbstractQueue;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A size based queue wrapping another blocking queue to provide (somewhat relaxed) capacity checks.
+ * Mainly makes sense to use with blocking queues that are unbounded to provide the ability to do
+ * capacity verification.
+ */
+public class SizeBlockingQueue<E> extends AbstractQueue<E> implements BlockingQueue<E> {
+
+ private final BlockingQueue<E> queue;
+ private final int capacity;
+
+ private final AtomicInteger size = new AtomicInteger();
+
+ public SizeBlockingQueue(BlockingQueue<E> queue, int capacity) {
+ assert capacity >= 0;
+ this.queue = queue;
+ this.capacity = capacity;
+ }
+
+ @Override
+ public int size() {
+ return size.get();
+ }
+
+ public int capacity() {
+ return this.capacity;
+ }
+
+ @Override
+ public Iterator<E> iterator() {
+ final Iterator<E> it = queue.iterator();
+ return new Iterator<E>() {
+ E current;
+
+ @Override
+ public boolean hasNext() {
+ return it.hasNext();
+ }
+
+ @Override
+ public E next() {
+ current = it.next();
+ return current;
+ }
+
+ @Override
+ public void remove() {
+ // note, we can't call #remove on the iterator because we need to know
+ // if it was removed or not
+ if (queue.remove(current)) {
+ size.decrementAndGet();
+ }
+ }
+ };
+ }
+
+ @Override
+ public E peek() {
+ return queue.peek();
+ }
+
+ @Override
+ public E poll() {
+ E e = queue.poll();
+ if (e != null) {
+ size.decrementAndGet();
+ }
+ return e;
+ }
+
+ @Override
+ public E poll(long timeout, TimeUnit unit) throws InterruptedException {
+ E e = queue.poll(timeout, unit);
+ if (e != null) {
+ size.decrementAndGet();
+ }
+ return e;
+ }
+
+ @Override
+ public boolean remove(Object o) {
+ boolean v = queue.remove(o);
+ if (v) {
+ size.decrementAndGet();
+ }
+ return v;
+ }
+
+ /**
+ * Forces adding an element to the queue, without doing size checks.
+ */
+ public void forcePut(E e) throws InterruptedException {
+ size.incrementAndGet();
+ try {
+ queue.put(e);
+ } catch (InterruptedException ie) {
+ size.decrementAndGet();
+ throw ie;
+ }
+ }
+
+
+ @Override
+ public boolean offer(E e) {
+ int count = size.incrementAndGet();
+ if (count > capacity) {
+ size.decrementAndGet();
+ return false;
+ }
+ boolean offered = queue.offer(e);
+ if (!offered) {
+ size.decrementAndGet();
+ }
+ return offered;
+ }
+
+ @Override
+ public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException {
+ // note, not used in ThreadPoolExecutor
+ throw new ElasticsearchIllegalStateException("offer with timeout not allowed on size queue");
+ }
+
+ @Override
+ public void put(E e) throws InterruptedException {
+ // note, not used in ThreadPoolExecutor
+ throw new ElasticsearchIllegalStateException("put not allowed on size queue");
+ }
+
+ @Override
+ public E take() throws InterruptedException {
+ E e;
+ try {
+ e = queue.take();
+ size.decrementAndGet();
+ } catch (InterruptedException ie) {
+ throw ie;
+ }
+ return e;
+ }
+
+ @Override
+ public int remainingCapacity() {
+ return capacity - size.get();
+ }
+
+ @Override
+ public int drainTo(Collection<? super E> c) {
+ int v = queue.drainTo(c);
+ size.addAndGet(-v);
+ return v;
+ }
+
+ @Override
+ public int drainTo(Collection<? super E> c, int maxElements) {
+ int v = queue.drainTo(c, maxElements);
+ size.addAndGet(-v);
+ return v;
+ }
+
+ @Override
+ public Object[] toArray() {
+ return queue.toArray();
+ }
+
+ @Override
+ public <T> T[] toArray(T[] a) {
+ return (T[]) queue.toArray(a);
+ }
+
+ @Override
+ public boolean contains(Object o) {
+ return queue.contains(o);
+ }
+
+ @Override
+ public boolean containsAll(Collection<?> c) {
+ return queue.containsAll(c);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java b/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java
new file mode 100644
index 0000000..2a773a3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java
@@ -0,0 +1,303 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+
+/**
+ * A synchronization aid that allows a set of threads to all wait for each other
+ * to reach a common barrier point. Barriers are useful in programs involving a
+ * fixed sized party of threads that must occasionally wait for each other.
+ * <code>ThreadBarrier</code> adds a <i>cause</i> to
+ * {@link BrokenBarrierException} thrown by a {@link #reset()} operation defined
+ * by {@link CyclicBarrier}.
+ * <p/>
+ * <p/>
+ * <b>Sample usage:</b> <br>
+ * <li>Barrier as a synchronization and Exception handling aid </li>
+ * <li>Barrier as a trigger for elapsed notification events </li>
+ * <p/>
+ * <pre>
+ * class MyTestClass implements RemoteEventListener
+ * {
+ * final ThreadBarrier barrier;
+ *
+ * class Worker implements Runnable
+ * {
+ * public void run()
+ * {
+ * barrier.await(); //wait for all threads to reach run
+ * try
+ * {
+ * prepare();
+ * barrier.await(); //wait for all threads to prepare
+ * process();
+ * barrier.await(); //wait for all threads to process
+ * }
+ * catch(Throwable t){
+ * log(&quot;Worker thread caught exception&quot;, t);
+ * barrier.reset(t);
+ * }
+ * }
+ * }
+ *
+ * public void testThreads() {
+ * barrier = new ThreadBarrier(N_THREADS + 1);
+ * for (int i = 0; i &lt; N; ++i)
+ * new Thread(new Worker()).start();
+ *
+ * try{
+ * barrier.await(); //wait for all threads to reach run
+ * barrier.await(); //wait for all threads to prepare
+ * barrier.await(); //wait for all threads to process
+ * }
+ * catch(BrokenBarrierException bbe) {
+ * Assert.fail(bbe);
+ * }
+ * }
+ *
+ * int actualNotificationCount = 0;
+ * public synchronized void notify (RemoteEvent event) {
+ * try{
+ * actualNotificationCount++;
+ * if (actualNotificationCount == EXPECTED_COUNT)
+ * barrier.await(); //signal when all notifications arrive
+ *
+ * // too many notifications?
+ * Assert.assertFalse(&quot;Exceeded notification count&quot;,
+ * actualNotificationCount > EXPECTED_COUNT);
+ * }
+ * catch(Throwable t) {
+ * log(&quot;Worker thread caught exception&quot;, t);
+ * barrier.reset(t);
+ * }
+ * }
+ *
+ * public void testNotify() {
+ * barrier = new ThreadBarrier(N_LISTENERS + 1);
+ * registerNotification();
+ * triggerNotifications();
+ *
+ * //wait until either all notifications arrive, or
+ * //until a MAX_TIMEOUT is reached.
+ * barrier.await(MAX_TIMEOUT);
+ *
+ * //check if all notifications were accounted for or timed-out
+ * Assert.assertEquals(&quot;Notification count&quot;,
+ * EXPECTED_COUNT, actualNotificationCount);
+ *
+ * //inspect that the barrier isn't broken
+ * barrier.inspect(); //throws BrokenBarrierException if broken
+ * }
+ * }
+ * </pre>
+ *
+ *
+ */
+public class ThreadBarrier extends CyclicBarrier {
+ /**
+ * The cause of a {@link BrokenBarrierException} and {@link TimeoutException}
+ * thrown from an await() when {@link #reset(Throwable)} was invoked.
+ */
+ private Throwable cause;
+
+ public ThreadBarrier(int parties) {
+ super(parties);
+ }
+
+ public ThreadBarrier(int parties, Runnable barrierAction) {
+ super(parties, barrierAction);
+ }
+
+ @Override
+ public int await() throws InterruptedException, BrokenBarrierException {
+ try {
+ breakIfBroken();
+ return super.await();
+ } catch (BrokenBarrierException bbe) {
+ initCause(bbe);
+ throw bbe;
+ }
+ }
+
+ @Override
+ public int await(long timeout, TimeUnit unit) throws InterruptedException, BrokenBarrierException, TimeoutException {
+ try {
+ breakIfBroken();
+ return super.await(timeout, unit);
+ } catch (BrokenBarrierException bbe) {
+ initCause(bbe);
+ throw bbe;
+ } catch (TimeoutException te) {
+ initCause(te);
+ throw te;
+ }
+ }
+
+ /**
+ * Resets the barrier to its initial state. If any parties are
+ * currently waiting at the barrier, they will return with a
+ * {@link BrokenBarrierException}. Note that resets <em>after</em>
+ * a breakage has occurred for other reasons can be complicated to
+ * carry out; threads need to re-synchronize in some other way,
+ * and choose one to perform the reset. It may be preferable to
+ * instead create a new barrier for subsequent use.
+ *
+ * @param cause The cause of the BrokenBarrierException
+ */
+ public synchronized void reset(Throwable cause) {
+ if (!isBroken()) {
+ super.reset();
+ }
+
+ if (this.cause == null) {
+ this.cause = cause;
+ }
+ }
+
+ /**
+ * Queries if this barrier is in a broken state. Note that if
+ * {@link #reset(Throwable)} is invoked the barrier will remain broken, while
+ * {@link #reset()} will reset the barrier to its initial state and
+ * {@link #isBroken()} will return false.
+ *
+ * @return {@code true} if one or more parties broke out of this barrier due
+ * to interruption or timeout since construction or the last reset,
+ * or a barrier action failed due to an exception; {@code false}
+ * otherwise.
+ * @see #inspect()
+ */
+ @Override
+ public synchronized boolean isBroken() {
+ return this.cause != null || super.isBroken();
+ }
+
+ /**
+ * Inspects if the barrier is broken. If for any reason, the barrier
+ * was broken, a {@link BrokenBarrierException} will be thrown. Otherwise,
+ * would return gracefully.
+ *
+ * @throws BrokenBarrierException With a nested broken cause.
+ */
+ public synchronized void inspect() throws BrokenBarrierException {
+ try {
+ breakIfBroken();
+ } catch (BrokenBarrierException bbe) {
+ initCause(bbe);
+ throw bbe;
+ }
+ }
+
+ /**
+ * breaks this barrier if it has been reset or broken for any other reason.
+ * <p/>
+ * Note: This call is not atomic in respect to await/reset calls. A
+ * breakIfBroken() may be context switched to invoke a reset() prior to
+ * await(). This resets the barrier to its initial state - parties not
+ * currently waiting at the barrier will not be accounted for! An await that
+ * wasn't time limited, will block indefinitely.
+ *
+ * @throws BrokenBarrierException an empty BrokenBarrierException.
+ */
+ private synchronized void breakIfBroken()
+ throws BrokenBarrierException {
+ if (isBroken()) {
+ throw new BrokenBarrierException();
+ }
+ }
+
+ /**
+ * Initializes the cause of this throwable to the specified value. The cause
+ * is the throwable that was initialized by {@link #reset(Throwable)}.
+ *
+ * @param t throwable.
+ */
+ private synchronized void initCause(Throwable t) {
+ t.initCause(this.cause);
+ }
+
+ /**
+ * A Barrier action to be used in conjunction with {@link ThreadBarrier} to
+ * measure performance between barrier awaits. This runnable will execute
+ * when the barrier is tripped. Make sure to reset() the timer before next
+ * Measurement.
+ *
+ * @see ThreadBarrier#ThreadBarrier(int, Runnable)
+ * <p/>
+ * <B>Usage example:</B><br>
+ * <pre><code>
+ * BarrierTimer timer = new BarrierTimer();
+ * ThreadBarrier barrier = new ThreadBarrier( nTHREADS + 1, timer );
+ * ..
+ * barrier.await(); // starts timer when all threads trip on await
+ * barrier.await(); // stops timer when all threads trip on await
+ * ..
+ * long time = timer.getTimeInNanos();
+ * long tpi = time / ((long)nREPEATS * nTHREADS); //throughput per thread iteration
+ * long secs = timer.getTimeInSeconds(); //total runtime in seconds
+ * ..
+ * timer.reset(); // reuse timer
+ * </code></pre>
+ */
+ public static class BarrierTimer implements Runnable {
+ volatile boolean started;
+ volatile long startTime;
+ volatile long endTime;
+
+ public void run() {
+ long t = System.nanoTime();
+ if (!started) {
+ started = true;
+ startTime = t;
+ } else
+ endTime = t;
+ }
+
+ /**
+ * resets (clears) this timer before next execution.
+ */
+ public void reset() {
+ started = false;
+ }
+
+ /**
+ * Returns the elapsed time between two successive barrier executions.
+ *
+ * @return elapsed time in nanoseconds.
+ */
+ public long getTimeInNanos() {
+ return endTime - startTime;
+ }
+
+ /**
+ * Returns the elapsed time between two successive barrier executions.
+ *
+ * @return elapsed time in seconds.
+ */
+ public double getTimeInSeconds() {
+ long time = endTime - startTime;
+ return (time) / 1000000000.0;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java b/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java
new file mode 100644
index 0000000..a0e7c9e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class UncategorizedExecutionException extends ElasticsearchException {
+
+ public UncategorizedExecutionException(String msg) {
+ super(msg);
+ }
+
+ public UncategorizedExecutionException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/XRejectedExecutionHandler.java b/src/main/java/org/elasticsearch/common/util/concurrent/XRejectedExecutionHandler.java
new file mode 100644
index 0000000..e58f2ab
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/util/concurrent/XRejectedExecutionHandler.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import java.util.concurrent.RejectedExecutionHandler;
+
+/**
+ */
+public interface XRejectedExecutionHandler extends RejectedExecutionHandler {
+
+ /**
+ * The number of rejected executions.
+ */
+ long rejected();
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java b/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java
new file mode 100644
index 0000000..b4bfb66
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import org.elasticsearch.common.Booleans;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * An interface allowing to transfer an object to "XContent" using an {@link XContentBuilder}.
+ */
+public interface ToXContent {
+
+ public static interface Params {
+ String param(String key);
+
+ String param(String key, String defaultValue);
+
+ boolean paramAsBoolean(String key, boolean defaultValue);
+
+ Boolean paramAsBoolean(String key, Boolean defaultValue);
+
+ /**
+ * @deprecated since 1.0.0
+ * use {@link ToXContent.Params#paramAsBoolean(String, Boolean)} instead
+ */
+ @Deprecated
+ Boolean paramAsBooleanOptional(String key, Boolean defaultValue);
+ }
+
+ public static final Params EMPTY_PARAMS = new Params() {
+ @Override
+ public String param(String key) {
+ return null;
+ }
+
+ @Override
+ public String param(String key, String defaultValue) {
+ return defaultValue;
+ }
+
+ @Override
+ public boolean paramAsBoolean(String key, boolean defaultValue) {
+ return defaultValue;
+ }
+
+ @Override
+ public Boolean paramAsBoolean(String key, Boolean defaultValue) {
+ return defaultValue;
+ }
+
+ @Override @Deprecated
+ public Boolean paramAsBooleanOptional(String key, Boolean defaultValue) {
+ return paramAsBoolean(key, defaultValue);
+ }
+ };
+
+ public static class MapParams implements Params {
+
+ private final Map<String, String> params;
+
+ public MapParams(Map<String, String> params) {
+ this.params = params;
+ }
+
+ @Override
+ public String param(String key) {
+ return params.get(key);
+ }
+
+ @Override
+ public String param(String key, String defaultValue) {
+ String value = params.get(key);
+ if (value == null) {
+ return defaultValue;
+ }
+ return value;
+ }
+
+ @Override
+ public boolean paramAsBoolean(String key, boolean defaultValue) {
+ return Booleans.parseBoolean(param(key), defaultValue);
+ }
+
+ @Override
+ public Boolean paramAsBoolean(String key, Boolean defaultValue) {
+ return Booleans.parseBoolean(param(key), defaultValue);
+ }
+
+ @Override @Deprecated
+ public Boolean paramAsBooleanOptional(String key, Boolean defaultValue) {
+ return paramAsBoolean(key, defaultValue);
+ }
+ }
+
+ public static class DelegatingMapParams extends MapParams {
+
+ private final Params delegate;
+
+ public DelegatingMapParams(Map<String, String> params, Params delegate) {
+ super(params);
+ this.delegate = delegate;
+ }
+
+ @Override
+ public String param(String key) {
+ return super.param(key, delegate.param(key));
+ }
+
+ @Override
+ public String param(String key, String defaultValue) {
+ return super.param(key, delegate.param(key, defaultValue));
+ }
+
+ @Override
+ public boolean paramAsBoolean(String key, boolean defaultValue) {
+ return super.paramAsBoolean(key, delegate.paramAsBoolean(key, defaultValue));
+ }
+
+ @Override
+ public Boolean paramAsBoolean(String key, Boolean defaultValue) {
+ return super.paramAsBoolean(key, delegate.paramAsBoolean(key, defaultValue));
+ }
+
+ @Override @Deprecated
+ public Boolean paramAsBooleanOptional(String key, Boolean defaultValue) {
+ return super.paramAsBooleanOptional(key, delegate.paramAsBooleanOptional(key, defaultValue));
+ }
+ }
+
+ XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContent.java b/src/main/java/org/elasticsearch/common/xcontent/XContent.java
new file mode 100644
index 0000000..d9cf704
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContent.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import org.elasticsearch.common.bytes.BytesReference;
+
+import java.io.*;
+
+/**
+ * A generic abstraction on top of handling content, inspired by JSON and pull parsing.
+ */
+public interface XContent {
+
+ /**
+ * The type this content handles and produces.
+ */
+ XContentType type();
+
+ byte streamSeparator();
+
+ /**
+ * Creates a new generator using the provided output stream.
+ */
+ XContentGenerator createGenerator(OutputStream os) throws IOException;
+
+ /**
+ * Creates a new generator using the provided writer.
+ */
+ XContentGenerator createGenerator(Writer writer) throws IOException;
+
+ /**
+ * Creates a parser over the provided string content.
+ */
+ XContentParser createParser(String content) throws IOException;
+
+ /**
+ * Creates a parser over the provided input stream.
+ */
+ XContentParser createParser(InputStream is) throws IOException;
+
+ /**
+ * Creates a parser over the provided bytes.
+ */
+ XContentParser createParser(byte[] data) throws IOException;
+
+ /**
+ * Creates a parser over the provided bytes.
+ */
+ XContentParser createParser(byte[] data, int offset, int length) throws IOException;
+
+ /**
+ * Creates a parser over the provided bytes.
+ */
+ XContentParser createParser(BytesReference bytes) throws IOException;
+
+ /**
+ * Creates a parser over the provided reader.
+ */
+ XContentParser createParser(Reader reader) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
new file mode 100644
index 0000000..d4c3b38
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
@@ -0,0 +1,1208 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.io.BytesStream;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.joda.time.DateTimeZone;
+import org.joda.time.ReadableInstant;
+import org.joda.time.format.DateTimeFormatter;
+import org.joda.time.format.ISODateTimeFormat;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.Map;
+
+/**
+ *
+ */
+public final class XContentBuilder implements BytesStream {
+
+ public static enum FieldCaseConversion {
+ /**
+ * No conversion will occur.
+ */
+ NONE,
+ /**
+ * Camel Case will be converted to Underscore casing.
+ */
+ UNDERSCORE,
+ /**
+ * Underscore will be converted to Camel case.
+ */
+ CAMELCASE
+ }
+
+ public final static DateTimeFormatter defaultDatePrinter = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
+
+ protected static FieldCaseConversion globalFieldCaseConversion = FieldCaseConversion.NONE;
+
+ public static void globalFieldCaseConversion(FieldCaseConversion globalFieldCaseConversion) {
+ XContentBuilder.globalFieldCaseConversion = globalFieldCaseConversion;
+ }
+
+ public static XContentBuilder builder(XContent xContent) throws IOException {
+ return new XContentBuilder(xContent, new BytesStreamOutput());
+ }
+
+ private XContentGenerator generator;
+
+ private final OutputStream bos;
+
+ private FieldCaseConversion fieldCaseConversion = globalFieldCaseConversion;
+
+ private StringBuilder cachedStringBuilder;
+
+ private boolean humanReadable = false;
+
+ /**
+ * Constructs a new builder using the provided xcontent and an OutputStream. Make sure
+ * to call {@link #close()} when the builder is done with.
+ */
+ public XContentBuilder(XContent xContent, OutputStream bos) throws IOException {
+ this.bos = bos;
+ this.generator = xContent.createGenerator(bos);
+ }
+
+ public XContentBuilder fieldCaseConversion(FieldCaseConversion fieldCaseConversion) {
+ this.fieldCaseConversion = fieldCaseConversion;
+ return this;
+ }
+
+ public XContentType contentType() {
+ return generator.contentType();
+ }
+
+ public XContentBuilder prettyPrint() {
+ generator.usePrettyPrint();
+ return this;
+ }
+
+ public XContentBuilder lfAtEnd() {
+ generator.usePrintLineFeedAtEnd();
+ return this;
+ }
+
+ public XContentBuilder humanReadable(boolean humanReadable) {
+ this.humanReadable = humanReadable;
+ return this;
+ }
+
+ public boolean humanReadable() {
+ return this.humanReadable;
+ }
+
+ public XContentBuilder field(String name, ToXContent xContent) throws IOException {
+ field(name);
+ xContent.toXContent(this, ToXContent.EMPTY_PARAMS);
+ return this;
+ }
+
+ public XContentBuilder field(String name, ToXContent xContent, ToXContent.Params params) throws IOException {
+ field(name);
+ xContent.toXContent(this, params);
+ return this;
+ }
+
+ public XContentBuilder startObject(String name) throws IOException {
+ field(name);
+ startObject();
+ return this;
+ }
+
+ public XContentBuilder startObject(String name, FieldCaseConversion conversion) throws IOException {
+ field(name, conversion);
+ startObject();
+ return this;
+ }
+
+ public XContentBuilder startObject(XContentBuilderString name) throws IOException {
+ field(name);
+ startObject();
+ return this;
+ }
+
+ public XContentBuilder startObject(XContentBuilderString name, FieldCaseConversion conversion) throws IOException {
+ field(name, conversion);
+ startObject();
+ return this;
+ }
+
+ public XContentBuilder startObject() throws IOException {
+ generator.writeStartObject();
+ return this;
+ }
+
+ public XContentBuilder endObject() throws IOException {
+ generator.writeEndObject();
+ return this;
+ }
+
+ public XContentBuilder array(String name, String... values) throws IOException {
+ startArray(name);
+ for (String value : values) {
+ value(value);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder array(XContentBuilderString name, String... values) throws IOException {
+ startArray(name);
+ for (String value : values) {
+ value(value);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder array(String name, Object... values) throws IOException {
+ startArray(name);
+ for (Object value : values) {
+ value(value);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder array(XContentBuilderString name, Object... values) throws IOException {
+ startArray(name);
+ for (Object value : values) {
+ value(value);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder startArray(String name, FieldCaseConversion conversion) throws IOException {
+ field(name, conversion);
+ startArray();
+ return this;
+ }
+
+ public XContentBuilder startArray(String name) throws IOException {
+ field(name);
+ startArray();
+ return this;
+ }
+
+ public XContentBuilder startArray(XContentBuilderString name) throws IOException {
+ field(name);
+ startArray();
+ return this;
+ }
+
+ public XContentBuilder startArray() throws IOException {
+ generator.writeStartArray();
+ return this;
+ }
+
+ public XContentBuilder endArray() throws IOException {
+ generator.writeEndArray();
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name) throws IOException {
+ if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) {
+ generator.writeFieldName(name.underscore());
+ } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) {
+ generator.writeFieldName(name.camelCase());
+ } else {
+ generator.writeFieldName(name.underscore());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, FieldCaseConversion conversion) throws IOException {
+ if (conversion == FieldCaseConversion.UNDERSCORE) {
+ generator.writeFieldName(name.underscore());
+ } else if (conversion == FieldCaseConversion.CAMELCASE) {
+ generator.writeFieldName(name.camelCase());
+ } else {
+ generator.writeFieldName(name.underscore());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(String name) throws IOException {
+ if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) {
+ if (cachedStringBuilder == null) {
+ cachedStringBuilder = new StringBuilder();
+ }
+ name = Strings.toUnderscoreCase(name, cachedStringBuilder);
+ } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) {
+ if (cachedStringBuilder == null) {
+ cachedStringBuilder = new StringBuilder();
+ }
+ name = Strings.toCamelCase(name, cachedStringBuilder);
+ }
+ generator.writeFieldName(name);
+ return this;
+ }
+
+ public XContentBuilder field(String name, FieldCaseConversion conversion) throws IOException {
+ if (conversion == FieldCaseConversion.UNDERSCORE) {
+ if (cachedStringBuilder == null) {
+ cachedStringBuilder = new StringBuilder();
+ }
+ name = Strings.toUnderscoreCase(name, cachedStringBuilder);
+ } else if (conversion == FieldCaseConversion.CAMELCASE) {
+ if (cachedStringBuilder == null) {
+ cachedStringBuilder = new StringBuilder();
+ }
+ name = Strings.toCamelCase(name, cachedStringBuilder);
+ }
+ generator.writeFieldName(name);
+ return this;
+ }
+
+ public XContentBuilder field(String name, char[] value, int offset, int length) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeString(value, offset, length);
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, char[] value, int offset, int length) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeString(value, offset, length);
+ }
+ return this;
+ }
+
+ public XContentBuilder field(String name, String value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeString(value);
+ }
+ return this;
+ }
+
+ public XContentBuilder field(String name, String value, FieldCaseConversion conversion) throws IOException {
+ field(name, conversion);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeString(value);
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, String value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeString(value);
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, String value, FieldCaseConversion conversion) throws IOException {
+ field(name, conversion);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeString(value);
+ }
+ return this;
+ }
+
+ public XContentBuilder field(String name, Integer value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeNumber(value.intValue());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Integer value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeNumber(value.intValue());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(String name, int value) throws IOException {
+ field(name);
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, int value) throws IOException {
+ field(name);
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder field(String name, Long value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeNumber(value.longValue());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Long value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeNumber(value.longValue());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(String name, long value) throws IOException {
+ field(name);
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, long value) throws IOException {
+ field(name);
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder field(String name, Float value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeNumber(value.floatValue());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Float value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeNumber(value.floatValue());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(String name, float value) throws IOException {
+ field(name);
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, float value) throws IOException {
+ field(name);
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder field(String name, Double value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeNumber(value);
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Double value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeNumber(value);
+ }
+ return this;
+ }
+
+ public XContentBuilder field(String name, double value) throws IOException {
+ field(name);
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, double value) throws IOException {
+ field(name);
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder field(String name, BigDecimal value) throws IOException {
+ return field(name, value, value.scale(), RoundingMode.HALF_UP, true);
+ }
+
+ public XContentBuilder field(XContentBuilderString name, BigDecimal value) throws IOException {
+ return field(name, value, value.scale(), RoundingMode.HALF_UP, true);
+ }
+
+ public XContentBuilder field(String name, BigDecimal value, int scale, RoundingMode rounding, boolean toDouble) throws IOException {
+ field(name);
+ if (toDouble) {
+ try {
+ generator.writeNumber(value.setScale(scale, rounding).doubleValue());
+ } catch (ArithmeticException e) {
+ generator.writeString(value.toEngineeringString());
+ }
+ } else {
+ generator.writeString(value.toEngineeringString());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, BigDecimal value, int scale, RoundingMode rounding, boolean toDouble) throws IOException {
+ field(name);
+ if (toDouble) {
+ try {
+ generator.writeNumber(value.setScale(scale, rounding).doubleValue());
+ } catch (ArithmeticException e) {
+ generator.writeString(value.toEngineeringString());
+ }
+ } else {
+ generator.writeString(value.toEngineeringString());
+ }
+ return this;
+ }
+
+ public XContentBuilder field(String name, BytesReference value) throws IOException {
+ field(name);
+ if (!value.hasArray()) {
+ value = value.toBytesArray();
+ }
+ generator.writeBinary(value.array(), value.arrayOffset(), value.length());
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, BytesReference value) throws IOException {
+ field(name);
+ if (!value.hasArray()) {
+ value = value.toBytesArray();
+ }
+ generator.writeBinary(value.array(), value.arrayOffset(), value.length());
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, BytesRef value) throws IOException {
+ field(name);
+ generator.writeUTF8String(value.bytes, value.offset, value.length);
+ return this;
+ }
+
+ public XContentBuilder field(String name, Text value) throws IOException {
+ field(name);
+ if (value.hasBytes() && value.bytes().hasArray()) {
+ generator.writeUTF8String(value.bytes().array(), value.bytes().arrayOffset(), value.bytes().length());
+ return this;
+ }
+ if (value.hasString()) {
+ generator.writeString(value.string());
+ return this;
+ }
+ // TODO: TextBytesOptimization we can use a buffer here to convert it? maybe add a request to jackson to support InputStream as well?
+ BytesArray bytesArray = value.bytes().toBytesArray();
+ generator.writeUTF8String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length());
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Text value) throws IOException {
+ field(name);
+ if (value.hasBytes() && value.bytes().hasArray()) {
+ generator.writeUTF8String(value.bytes().array(), value.bytes().arrayOffset(), value.bytes().length());
+ return this;
+ }
+ if (value.hasString()) {
+ generator.writeString(value.string());
+ return this;
+ }
+ // TODO: TextBytesOptimization we can use a buffer here to convert it? maybe add a request to jackson to support InputStream as well?
+ BytesArray bytesArray = value.bytes().toBytesArray();
+ generator.writeUTF8String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length());
+ return this;
+ }
+
+ public XContentBuilder field(String name, byte[] value, int offset, int length) throws IOException {
+ field(name);
+ generator.writeBinary(value, offset, length);
+ return this;
+ }
+
+ public XContentBuilder field(String name, Map<String, Object> value) throws IOException {
+ field(name);
+ value(value);
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Map<String, Object> value) throws IOException {
+ field(name);
+ value(value);
+ return this;
+ }
+
+ public XContentBuilder field(String name, Iterable value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Iterable value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(String name, String... value) throws IOException {
+ startArray(name);
+ for (String o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+
+ public XContentBuilder field(XContentBuilderString name, String... value) throws IOException {
+ startArray(name);
+ for (String o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(String name, Object... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Object... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(String name, int... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, int offset, int length, int... value) throws IOException {
+ assert ((offset >= 0) && (value.length > length));
+ startArray(name);
+ for (int i = offset; i < length; i++) {
+ value(value[i]);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, int... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(String name, long... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, long... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(String name, float... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, float... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(String name, double... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, double... value) throws IOException {
+ startArray(name);
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder field(String name, Object value) throws IOException {
+ field(name);
+ writeValue(value);
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Object value) throws IOException {
+ field(name);
+ writeValue(value);
+ return this;
+ }
+
+ public XContentBuilder value(Object value) throws IOException {
+ writeValue(value);
+ return this;
+ }
+
+ public XContentBuilder field(String name, boolean value) throws IOException {
+ field(name);
+ generator.writeBoolean(value);
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, boolean value) throws IOException {
+ field(name);
+ generator.writeBoolean(value);
+ return this;
+ }
+
+ public XContentBuilder field(String name, byte[] value) throws IOException {
+ field(name);
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ generator.writeBinary(value);
+ }
+ return this;
+ }
+
+ public XContentBuilder field(XContentBuilderString name, byte[] value) throws IOException {
+ field(name);
+ return value(value);
+ }
+
+ public XContentBuilder field(String name, ReadableInstant date) throws IOException {
+ field(name);
+ return value(date);
+ }
+
+ public XContentBuilder field(XContentBuilderString name, ReadableInstant date) throws IOException {
+ field(name);
+ return value(date);
+ }
+
+ public XContentBuilder field(String name, ReadableInstant date, DateTimeFormatter formatter) throws IOException {
+ field(name);
+ return value(date, formatter);
+ }
+
+ public XContentBuilder field(XContentBuilderString name, ReadableInstant date, DateTimeFormatter formatter) throws IOException {
+ field(name);
+ return value(date, formatter);
+ }
+
+ public XContentBuilder field(String name, Date date) throws IOException {
+ field(name);
+ return value(date);
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Date date) throws IOException {
+ field(name);
+ return value(date);
+ }
+
+ public XContentBuilder field(String name, Date date, DateTimeFormatter formatter) throws IOException {
+ field(name);
+ return value(date, formatter);
+ }
+
+ public XContentBuilder field(XContentBuilderString name, Date date, DateTimeFormatter formatter) throws IOException {
+ field(name);
+ return value(date, formatter);
+ }
+
+ public XContentBuilder nullField(String name) throws IOException {
+ generator.writeNullField(name);
+ return this;
+ }
+
+ public XContentBuilder nullField(XContentBuilderString name) throws IOException {
+ field(name);
+ generator.writeNull();
+ return this;
+ }
+
+ public XContentBuilder nullValue() throws IOException {
+ generator.writeNull();
+ return this;
+ }
+
+ public XContentBuilder rawField(String fieldName, byte[] content) throws IOException {
+ generator.writeRawField(fieldName, content, bos);
+ return this;
+ }
+
+ public XContentBuilder rawField(String fieldName, byte[] content, int offset, int length) throws IOException {
+ generator.writeRawField(fieldName, content, offset, length, bos);
+ return this;
+ }
+
+ public XContentBuilder rawField(String fieldName, InputStream content) throws IOException {
+ generator.writeRawField(fieldName, content, bos);
+ return this;
+ }
+
+ public XContentBuilder rawField(String fieldName, BytesReference content) throws IOException {
+ generator.writeRawField(fieldName, content, bos);
+ return this;
+ }
+
+ public XContentBuilder timeValueField(XContentBuilderString rawFieldName, XContentBuilderString readableFieldName, TimeValue timeValue) throws IOException {
+ if (humanReadable) {
+ field(readableFieldName, timeValue.toString());
+ }
+ field(rawFieldName, timeValue.millis());
+ return this;
+ }
+
+ public XContentBuilder timeValueField(XContentBuilderString rawFieldName, XContentBuilderString readableFieldName, long rawTime) throws IOException {
+ if (humanReadable) {
+ field(readableFieldName, new TimeValue(rawTime).toString());
+ }
+ field(rawFieldName, rawTime);
+ return this;
+ }
+
+ public XContentBuilder byteSizeField(XContentBuilderString rawFieldName, XContentBuilderString readableFieldName, ByteSizeValue byteSizeValue) throws IOException {
+ if (humanReadable) {
+ field(readableFieldName, byteSizeValue.toString());
+ }
+ field(rawFieldName, byteSizeValue.bytes());
+ return this;
+ }
+
+ public XContentBuilder byteSizeField(XContentBuilderString rawFieldName, XContentBuilderString readableFieldName, long rawSize) throws IOException {
+ if (humanReadable) {
+ field(readableFieldName, new ByteSizeValue(rawSize).toString());
+ }
+ field(rawFieldName, rawSize);
+ return this;
+ }
+
+ public XContentBuilder value(Boolean value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ return value(value.booleanValue());
+ }
+
+ public XContentBuilder value(boolean value) throws IOException {
+ generator.writeBoolean(value);
+ return this;
+ }
+
+ public XContentBuilder value(ReadableInstant date) throws IOException {
+ return value(date, defaultDatePrinter);
+ }
+
+ public XContentBuilder value(ReadableInstant date, DateTimeFormatter dateTimeFormatter) throws IOException {
+ if (date == null) {
+ return nullValue();
+ }
+ return value(dateTimeFormatter.print(date));
+ }
+
+ public XContentBuilder value(Date date) throws IOException {
+ return value(date, defaultDatePrinter);
+ }
+
+ public XContentBuilder value(Date date, DateTimeFormatter dateTimeFormatter) throws IOException {
+ if (date == null) {
+ return nullValue();
+ }
+ return value(dateTimeFormatter.print(date.getTime()));
+ }
+
+ public XContentBuilder value(Integer value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ return value(value.intValue());
+ }
+
+ public XContentBuilder value(int value) throws IOException {
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder value(Long value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ return value(value.longValue());
+ }
+
+ public XContentBuilder value(long value) throws IOException {
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder value(Float value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ return value(value.floatValue());
+ }
+
+ public XContentBuilder value(float value) throws IOException {
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder value(Double value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ return value(value.doubleValue());
+ }
+
+ public XContentBuilder value(double value) throws IOException {
+ generator.writeNumber(value);
+ return this;
+ }
+
+ public XContentBuilder value(String value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ generator.writeString(value);
+ return this;
+ }
+
+ public XContentBuilder value(byte[] value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ generator.writeBinary(value);
+ return this;
+ }
+
+ public XContentBuilder value(byte[] value, int offset, int length) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ generator.writeBinary(value, offset, length);
+ return this;
+ }
+
+ public XContentBuilder value(BytesReference value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ if (!value.hasArray()) {
+ value = value.toBytesArray();
+ }
+ generator.writeBinary(value.array(), value.arrayOffset(), value.length());
+ return this;
+ }
+
+ public XContentBuilder value(Text value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ if (value.hasBytes() && value.bytes().hasArray()) {
+ generator.writeUTF8String(value.bytes().array(), value.bytes().arrayOffset(), value.bytes().length());
+ return this;
+ }
+ if (value.hasString()) {
+ generator.writeString(value.string());
+ return this;
+ }
+ BytesArray bytesArray = value.bytes().toBytesArray();
+ generator.writeUTF8String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length());
+ return this;
+ }
+
+ public XContentBuilder map(Map<String, Object> map) throws IOException {
+ if (map == null) {
+ return nullValue();
+ }
+ writeMap(map);
+ return this;
+ }
+
+ public XContentBuilder value(Map<String, Object> map) throws IOException {
+ if (map == null) {
+ return nullValue();
+ }
+ writeMap(map);
+ return this;
+ }
+
+ public XContentBuilder value(Iterable value) throws IOException {
+ if (value == null) {
+ return nullValue();
+ }
+ startArray();
+ for (Object o : value) {
+ value(o);
+ }
+ endArray();
+ return this;
+ }
+
+ public XContentBuilder copyCurrentStructure(XContentParser parser) throws IOException {
+ generator.copyCurrentStructure(parser);
+ return this;
+ }
+
+ public XContentBuilder flush() throws IOException {
+ generator.flush();
+ return this;
+ }
+
+ public void close() {
+ try {
+ generator.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+
+ public XContentGenerator generator() {
+ return this.generator;
+ }
+
+ public OutputStream stream() {
+ return this.bos;
+ }
+
+ @Override
+ public BytesReference bytes() {
+ close();
+ return ((BytesStream) bos).bytes();
+ }
+
+ /**
+ * Returns the actual stream used.
+ */
+ public BytesStream bytesStream() throws IOException {
+ close();
+ return (BytesStream) bos;
+ }
+
+ /**
+ * Returns a string representation of the builder (only applicable for text based xcontent).
+ * <p/>
+ * <p>Only applicable when the builder is constructed with {@link FastByteArrayOutputStream}.
+ */
+ public String string() throws IOException {
+ close();
+ BytesArray bytesArray = bytes().toBytesArray();
+ return new String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length(), Charsets.UTF_8);
+ }
+
+
+ private void writeMap(Map<String, Object> map) throws IOException {
+ generator.writeStartObject();
+
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ field(entry.getKey());
+ Object value = entry.getValue();
+ if (value == null) {
+ generator.writeNull();
+ } else {
+ writeValue(value);
+ }
+ }
+ generator.writeEndObject();
+ }
+
+ private void writeValue(Object value) throws IOException {
+ if (value == null) {
+ generator.writeNull();
+ return;
+ }
+ Class type = value.getClass();
+ if (type == String.class) {
+ generator.writeString((String) value);
+ } else if (type == Integer.class) {
+ generator.writeNumber(((Integer) value).intValue());
+ } else if (type == Long.class) {
+ generator.writeNumber(((Long) value).longValue());
+ } else if (type == Float.class) {
+ generator.writeNumber(((Float) value).floatValue());
+ } else if (type == Double.class) {
+ generator.writeNumber(((Double) value).doubleValue());
+ } else if (type == Short.class) {
+ generator.writeNumber(((Short) value).shortValue());
+ } else if (type == Boolean.class) {
+ generator.writeBoolean(((Boolean) value).booleanValue());
+ } else if (type == GeoPoint.class) {
+ generator.writeStartObject();
+ generator.writeNumberField("lat", ((GeoPoint) value).lat());
+ generator.writeNumberField("lon", ((GeoPoint) value).lon());
+ generator.writeEndObject();
+ } else if (value instanceof Map) {
+ writeMap((Map) value);
+ } else if (value instanceof Iterable) {
+ generator.writeStartArray();
+ for (Object v : (Iterable) value) {
+ writeValue(v);
+ }
+ generator.writeEndArray();
+ } else if (value instanceof Object[]) {
+ generator.writeStartArray();
+ for (Object v : (Object[]) value) {
+ writeValue(v);
+ }
+ generator.writeEndArray();
+ } else if (type == byte[].class) {
+ generator.writeBinary((byte[]) value);
+ } else if (value instanceof Date) {
+ generator.writeString(XContentBuilder.defaultDatePrinter.print(((Date) value).getTime()));
+ } else if (value instanceof Calendar) {
+ generator.writeString(XContentBuilder.defaultDatePrinter.print((((Calendar) value)).getTimeInMillis()));
+ } else if (value instanceof ReadableInstant) {
+ generator.writeString(XContentBuilder.defaultDatePrinter.print((((ReadableInstant) value)).getMillis()));
+ } else if (value instanceof BytesReference) {
+ BytesReference bytes = (BytesReference) value;
+ if (!bytes.hasArray()) {
+ bytes = bytes.toBytesArray();
+ }
+ generator.writeBinary(bytes.array(), bytes.arrayOffset(), bytes.length());
+ } else if (value instanceof Text) {
+ Text text = (Text) value;
+ if (text.hasBytes() && text.bytes().hasArray()) {
+ generator.writeUTF8String(text.bytes().array(), text.bytes().arrayOffset(), text.bytes().length());
+ } else if (text.hasString()) {
+ generator.writeString(text.string());
+ } else {
+ BytesArray bytesArray = text.bytes().toBytesArray();
+ generator.writeUTF8String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length());
+ }
+ } else if (value instanceof ToXContent) {
+ ((ToXContent) value).toXContent(this, ToXContent.EMPTY_PARAMS);
+ } else if (value instanceof double[]) {
+ generator.writeStartArray();
+ for (double v : (double[]) value) {
+ generator.writeNumber(v);
+ }
+ generator.writeEndArray();
+ } else if (value instanceof long[]) {
+ generator.writeStartArray();
+ for (long v : (long[]) value) {
+ generator.writeNumber(v);
+ }
+ generator.writeEndArray();
+ } else if (value instanceof int[]) {
+ generator.writeStartArray();
+ for (int v : (int[]) value) {
+ generator.writeNumber(v);
+ }
+ generator.writeEndArray();
+ } else if (value instanceof float[]) {
+ generator.writeStartArray();
+ for (float v : (float[]) value) {
+ generator.writeNumber(v);
+ }
+ generator.writeEndArray();
+ } else if (value instanceof short[]) {
+ generator.writeStartArray();
+ for (float v : (short[]) value) {
+ generator.writeNumber(v);
+ }
+ generator.writeEndArray();
+ } else {
+ // if this is a "value" object, like enum, DistanceUnit, ..., just toString it
+ // yea, it can be misleading when toString a Java class, but really, jackson should be used in that case
+ generator.writeString(value.toString());
+ //throw new ElasticsearchIllegalArgumentException("type not supported for generic value conversion: " + type);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderString.java b/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderString.java
new file mode 100644
index 0000000..ea1f6f1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderString.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import org.elasticsearch.common.Strings;
+
+/**
+ *
+ */
+public class XContentBuilderString {
+
+ private final XContentString underscore;
+
+ private final XContentString camelCase;
+
+ public XContentBuilderString(String value) {
+ underscore = new XContentString(Strings.toUnderscoreCase(value));
+ camelCase = new XContentString(Strings.toCamelCase(value));
+ }
+
+ public XContentString underscore() {
+ return underscore;
+ }
+
+ public XContentString camelCase() {
+ return camelCase;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java b/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java
new file mode 100644
index 0000000..68498af
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import com.fasterxml.jackson.dataformat.smile.SmileConstants;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.common.xcontent.smile.SmileXContent;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+/**
+ * A one stop to use {@link org.elasticsearch.common.xcontent.XContent} and {@link XContentBuilder}.
+ */
+public class XContentFactory {
+
+ private static int GUESS_HEADER_LENGTH = 20;
+
+ /**
+ * Returns a content builder using JSON format ({@link org.elasticsearch.common.xcontent.XContentType#JSON}.
+ */
+ public static XContentBuilder jsonBuilder() throws IOException {
+ return contentBuilder(XContentType.JSON);
+ }
+
+ /**
+ * Constructs a new json builder that will output the result into the provided output stream.
+ */
+ public static XContentBuilder jsonBuilder(OutputStream os) throws IOException {
+ return new XContentBuilder(JsonXContent.jsonXContent, os);
+ }
+
+ /**
+ * Returns a content builder using SMILE format ({@link org.elasticsearch.common.xcontent.XContentType#SMILE}.
+ */
+ public static XContentBuilder smileBuilder() throws IOException {
+ return contentBuilder(XContentType.SMILE);
+ }
+
+ /**
+ * Constructs a new json builder that will output the result into the provided output stream.
+ */
+ public static XContentBuilder smileBuilder(OutputStream os) throws IOException {
+ return new XContentBuilder(SmileXContent.smileXContent, os);
+ }
+
+ /**
+ * Returns a content builder using YAML format ({@link org.elasticsearch.common.xcontent.XContentType#YAML}.
+ */
+ public static XContentBuilder yamlBuilder() throws IOException {
+ return contentBuilder(XContentType.YAML);
+ }
+
+ /**
+ * Constructs a new yaml builder that will output the result into the provided output stream.
+ */
+ public static XContentBuilder yamlBuilder(OutputStream os) throws IOException {
+ return new XContentBuilder(YamlXContent.yamlXContent, os);
+ }
+
+ /**
+ * Constructs a xcontent builder that will output the result into the provided output stream.
+ */
+ public static XContentBuilder contentBuilder(XContentType type, OutputStream outputStream) throws IOException {
+ if (type == XContentType.JSON) {
+ return jsonBuilder(outputStream);
+ } else if (type == XContentType.SMILE) {
+ return smileBuilder(outputStream);
+ } else if (type == XContentType.YAML) {
+ return yamlBuilder(outputStream);
+ }
+ throw new ElasticsearchIllegalArgumentException("No matching content type for " + type);
+ }
+
+ /**
+ * Returns a binary content builder for the provided content type.
+ */
+ public static XContentBuilder contentBuilder(XContentType type) throws IOException {
+ if (type == XContentType.JSON) {
+ return JsonXContent.contentBuilder();
+ } else if (type == XContentType.SMILE) {
+ return SmileXContent.contentBuilder();
+ } else if (type == XContentType.YAML) {
+ return YamlXContent.contentBuilder();
+ }
+ throw new ElasticsearchIllegalArgumentException("No matching content type for " + type);
+ }
+
+ /**
+ * Returns the {@link org.elasticsearch.common.xcontent.XContent} for the provided content type.
+ */
+ public static XContent xContent(XContentType type) {
+ return type.xContent();
+ }
+
+ /**
+ * Guesses the content type based on the provided char sequence.
+ */
+ public static XContentType xContentType(CharSequence content) {
+ int length = content.length() < GUESS_HEADER_LENGTH ? content.length() : GUESS_HEADER_LENGTH;
+ if (length == 0) {
+ return null;
+ }
+ char first = content.charAt(0);
+ if (first == '{') {
+ return XContentType.JSON;
+ }
+ // Should we throw a failure here? Smile idea is to use it in bytes....
+ if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && content.charAt(2) == SmileConstants.HEADER_BYTE_3) {
+ return XContentType.SMILE;
+ }
+ if (length > 2 && first == '-' && content.charAt(1) == '-' && content.charAt(2) == '-') {
+ return XContentType.YAML;
+ }
+
+ for (int i = 0; i < length; i++) {
+ char c = content.charAt(i);
+ if (c == '{') {
+ return XContentType.JSON;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Guesses the content (type) based on the provided char sequence.
+ */
+ public static XContent xContent(CharSequence content) {
+ XContentType type = xContentType(content);
+ if (type == null) {
+ throw new ElasticsearchParseException("Failed to derive xcontent from " + content);
+ }
+ return xContent(type);
+ }
+
+ /**
+ * Guesses the content type based on the provided bytes.
+ */
+ public static XContent xContent(byte[] data) {
+ return xContent(data, 0, data.length);
+ }
+
+ /**
+ * Guesses the content type based on the provided bytes.
+ */
+ public static XContent xContent(byte[] data, int offset, int length) {
+ XContentType type = xContentType(data, offset, length);
+ if (type == null) {
+ throw new ElasticsearchParseException("Failed to derive xcontent from (offset=" + offset + ", length=" + length + "): " + Arrays.toString(data));
+ }
+ return xContent(type);
+ }
+
+ /**
+ * Guesses the content type based on the provided bytes.
+ */
+ public static XContentType xContentType(byte[] data) {
+ return xContentType(data, 0, data.length);
+ }
+
+ /**
+ * Guesses the content type based on the provided input stream.
+ */
+ public static XContentType xContentType(InputStream si) throws IOException {
+ int first = si.read();
+ if (first == -1) {
+ return null;
+ }
+ int second = si.read();
+ if (second == -1) {
+ return null;
+ }
+ if (first == SmileConstants.HEADER_BYTE_1 && second == SmileConstants.HEADER_BYTE_2) {
+ int third = si.read();
+ if (third == SmileConstants.HEADER_BYTE_3) {
+ return XContentType.SMILE;
+ }
+ }
+ if (first == '{' || second == '{') {
+ return XContentType.JSON;
+ }
+ if (first == '-' && second == '-') {
+ int third = si.read();
+ if (third == '-') {
+ return XContentType.YAML;
+ }
+ }
+ for (int i = 2; i < GUESS_HEADER_LENGTH; i++) {
+ int val = si.read();
+ if (val == -1) {
+ return null;
+ }
+ if (val == '{') {
+ return XContentType.JSON;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Guesses the content type based on the provided bytes.
+ */
+ public static XContentType xContentType(byte[] data, int offset, int length) {
+ return xContentType(new BytesArray(data, offset, length));
+ }
+
+ public static XContent xContent(BytesReference bytes) {
+ XContentType type = xContentType(bytes);
+ if (type == null) {
+ throw new ElasticsearchParseException("Failed to derive xcontent from " + bytes);
+ }
+ return xContent(type);
+ }
+
+ /**
+ * Guesses the content type based on the provided bytes.
+ */
+ public static XContentType xContentType(BytesReference bytes) {
+ int length = bytes.length() < GUESS_HEADER_LENGTH ? bytes.length() : GUESS_HEADER_LENGTH;
+ if (length == 0) {
+ return null;
+ }
+ byte first = bytes.get(0);
+ if (first == '{') {
+ return XContentType.JSON;
+ }
+ if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes.get(1) == SmileConstants.HEADER_BYTE_2 && bytes.get(2) == SmileConstants.HEADER_BYTE_3) {
+ return XContentType.SMILE;
+ }
+ if (length > 2 && first == '-' && bytes.get(1) == '-' && bytes.get(2) == '-') {
+ return XContentType.YAML;
+ }
+ for (int i = 0; i < length; i++) {
+ if (bytes.get(i) == '{') {
+ return XContentType.JSON;
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java b/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java
new file mode 100644
index 0000000..a17ef93
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import org.elasticsearch.common.bytes.BytesReference;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ *
+ */
+public interface XContentGenerator {
+
+ XContentType contentType();
+
+ void usePrettyPrint();
+
+ void usePrintLineFeedAtEnd();
+
+ void writeStartArray() throws IOException;
+
+ void writeEndArray() throws IOException;
+
+ void writeStartObject() throws IOException;
+
+ void writeEndObject() throws IOException;
+
+ void writeFieldName(String name) throws IOException;
+
+ void writeFieldName(XContentString name) throws IOException;
+
+ void writeString(String text) throws IOException;
+
+ void writeString(char[] text, int offset, int len) throws IOException;
+
+ void writeUTF8String(byte[] text, int offset, int length) throws IOException;
+
+ void writeBinary(byte[] data, int offset, int len) throws IOException;
+
+ void writeBinary(byte[] data) throws IOException;
+
+ void writeNumber(int v) throws IOException;
+
+ void writeNumber(long v) throws IOException;
+
+ void writeNumber(double d) throws IOException;
+
+ void writeNumber(float f) throws IOException;
+
+ void writeBoolean(boolean state) throws IOException;
+
+ void writeNull() throws IOException;
+
+
+ void writeStringField(String fieldName, String value) throws IOException;
+
+ void writeStringField(XContentString fieldName, String value) throws IOException;
+
+ void writeBooleanField(String fieldName, boolean value) throws IOException;
+
+ void writeBooleanField(XContentString fieldName, boolean value) throws IOException;
+
+ void writeNullField(String fieldName) throws IOException;
+
+ void writeNullField(XContentString fieldName) throws IOException;
+
+ void writeNumberField(String fieldName, int value) throws IOException;
+
+ void writeNumberField(XContentString fieldName, int value) throws IOException;
+
+ void writeNumberField(String fieldName, long value) throws IOException;
+
+ void writeNumberField(XContentString fieldName, long value) throws IOException;
+
+ void writeNumberField(String fieldName, double value) throws IOException;
+
+ void writeNumberField(XContentString fieldName, double value) throws IOException;
+
+ void writeNumberField(String fieldName, float value) throws IOException;
+
+ void writeNumberField(XContentString fieldName, float value) throws IOException;
+
+ void writeBinaryField(String fieldName, byte[] data) throws IOException;
+
+ void writeBinaryField(XContentString fieldName, byte[] data) throws IOException;
+
+ void writeArrayFieldStart(String fieldName) throws IOException;
+
+ void writeArrayFieldStart(XContentString fieldName) throws IOException;
+
+ void writeObjectFieldStart(String fieldName) throws IOException;
+
+ void writeObjectFieldStart(XContentString fieldName) throws IOException;
+
+ void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException;
+
+ void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException;
+
+ void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException;
+
+ void writeRawField(String fieldName, BytesReference content, OutputStream bos) throws IOException;
+
+ void copyCurrentStructure(XContentParser parser) throws IOException;
+
+ void flush() throws IOException;
+
+ void close() throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
new file mode 100644
index 0000000..bfec6da
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
@@ -0,0 +1,428 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.compress.CompressedStreamInput;
+import org.elasticsearch.common.compress.Compressor;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+@SuppressWarnings("unchecked")
+public class XContentHelper {
+
+ public static XContentParser createParser(BytesReference bytes) throws IOException {
+ if (bytes.hasArray()) {
+ return createParser(bytes.array(), bytes.arrayOffset(), bytes.length());
+ }
+ Compressor compressor = CompressorFactory.compressor(bytes);
+ if (compressor != null) {
+ CompressedStreamInput compressedInput = compressor.streamInput(bytes.streamInput());
+ XContentType contentType = XContentFactory.xContentType(compressedInput);
+ compressedInput.resetToBufferStart();
+ return XContentFactory.xContent(contentType).createParser(compressedInput);
+ } else {
+ return XContentFactory.xContent(bytes).createParser(bytes.streamInput());
+ }
+ }
+
+
+ public static XContentParser createParser(byte[] data, int offset, int length) throws IOException {
+ Compressor compressor = CompressorFactory.compressor(data, offset, length);
+ if (compressor != null) {
+ CompressedStreamInput compressedInput = compressor.streamInput(new BytesStreamInput(data, offset, length, false));
+ XContentType contentType = XContentFactory.xContentType(compressedInput);
+ compressedInput.resetToBufferStart();
+ return XContentFactory.xContent(contentType).createParser(compressedInput);
+ } else {
+ return XContentFactory.xContent(data, offset, length).createParser(data, offset, length);
+ }
+ }
+
+ public static Tuple<XContentType, Map<String, Object>> convertToMap(BytesReference bytes, boolean ordered) throws ElasticsearchParseException {
+ if (bytes.hasArray()) {
+ return convertToMap(bytes.array(), bytes.arrayOffset(), bytes.length(), ordered);
+ }
+ try {
+ XContentParser parser;
+ XContentType contentType;
+ Compressor compressor = CompressorFactory.compressor(bytes);
+ if (compressor != null) {
+ CompressedStreamInput compressedStreamInput = compressor.streamInput(bytes.streamInput());
+ contentType = XContentFactory.xContentType(compressedStreamInput);
+ compressedStreamInput.resetToBufferStart();
+ parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput);
+ } else {
+ contentType = XContentFactory.xContentType(bytes);
+ parser = XContentFactory.xContent(contentType).createParser(bytes.streamInput());
+ }
+ if (ordered) {
+ return Tuple.tuple(contentType, parser.mapOrderedAndClose());
+ } else {
+ return Tuple.tuple(contentType, parser.mapAndClose());
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("Failed to parse content to map", e);
+ }
+ }
+
+ public static Tuple<XContentType, Map<String, Object>> convertToMap(byte[] data, boolean ordered) throws ElasticsearchParseException {
+ return convertToMap(data, 0, data.length, ordered);
+ }
+
+ public static Tuple<XContentType, Map<String, Object>> convertToMap(byte[] data, int offset, int length, boolean ordered) throws ElasticsearchParseException {
+ try {
+ XContentParser parser;
+ XContentType contentType;
+ Compressor compressor = CompressorFactory.compressor(data, offset, length);
+ if (compressor != null) {
+ CompressedStreamInput compressedStreamInput = compressor.streamInput(new BytesStreamInput(data, offset, length, false));
+ contentType = XContentFactory.xContentType(compressedStreamInput);
+ compressedStreamInput.resetToBufferStart();
+ parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput);
+ } else {
+ contentType = XContentFactory.xContentType(data, offset, length);
+ parser = XContentFactory.xContent(contentType).createParser(data, offset, length);
+ }
+ if (ordered) {
+ return Tuple.tuple(contentType, parser.mapOrderedAndClose());
+ } else {
+ return Tuple.tuple(contentType, parser.mapAndClose());
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("Failed to parse content to map", e);
+ }
+ }
+
+ public static String convertToJson(BytesReference bytes, boolean reformatJson) throws IOException {
+ return convertToJson(bytes, reformatJson, false);
+ }
+
+ public static String convertToJson(BytesReference bytes, boolean reformatJson, boolean prettyPrint) throws IOException {
+ if (bytes.hasArray()) {
+ return convertToJson(bytes.array(), bytes.arrayOffset(), bytes.length(), reformatJson, prettyPrint);
+ }
+ XContentType xContentType = XContentFactory.xContentType(bytes);
+ if (xContentType == XContentType.JSON && !reformatJson) {
+ BytesArray bytesArray = bytes.toBytesArray();
+ return new String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length(), Charsets.UTF_8);
+ }
+ XContentParser parser = null;
+ try {
+ parser = XContentFactory.xContent(xContentType).createParser(bytes.streamInput());
+ parser.nextToken();
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ if (prettyPrint) {
+ builder.prettyPrint();
+ }
+ builder.copyCurrentStructure(parser);
+ return builder.string();
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ public static String convertToJson(byte[] data, int offset, int length, boolean reformatJson) throws IOException {
+ return convertToJson(data, offset, length, reformatJson, false);
+ }
+
+ public static String convertToJson(byte[] data, int offset, int length, boolean reformatJson, boolean prettyPrint) throws IOException {
+ XContentType xContentType = XContentFactory.xContentType(data, offset, length);
+ if (xContentType == XContentType.JSON && !reformatJson) {
+ return new String(data, offset, length, Charsets.UTF_8);
+ }
+ XContentParser parser = null;
+ try {
+ parser = XContentFactory.xContent(xContentType).createParser(data, offset, length);
+ parser.nextToken();
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ if (prettyPrint) {
+ builder.prettyPrint();
+ }
+ builder.copyCurrentStructure(parser);
+ return builder.string();
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ /**
+ * Updates the provided changes into the source. If the key exists in the changes, it overrides the one in source
+ * unless both are Maps, in which case it recuersively updated it.
+ */
+ public static void update(Map<String, Object> source, Map<String, Object> changes) {
+ for (Map.Entry<String, Object> changesEntry : changes.entrySet()) {
+ if (!source.containsKey(changesEntry.getKey())) {
+ // safe to copy, change does not exist in source
+ source.put(changesEntry.getKey(), changesEntry.getValue());
+ } else {
+ if (source.get(changesEntry.getKey()) instanceof Map && changesEntry.getValue() instanceof Map) {
+ // recursive merge maps
+ update((Map<String, Object>) source.get(changesEntry.getKey()), (Map<String, Object>) changesEntry.getValue());
+ } else {
+ // update the field
+ source.put(changesEntry.getKey(), changesEntry.getValue());
+ }
+ }
+ }
+ }
+
+ /**
+ * Merges the defaults provided as the second parameter into the content of the first. Only does recursive merge
+ * for inner maps.
+ */
+ @SuppressWarnings({"unchecked"})
+ public static void mergeDefaults(Map<String, Object> content, Map<String, Object> defaults) {
+ for (Map.Entry<String, Object> defaultEntry : defaults.entrySet()) {
+ if (!content.containsKey(defaultEntry.getKey())) {
+ // copy it over, it does not exists in the content
+ content.put(defaultEntry.getKey(), defaultEntry.getValue());
+ } else {
+ // in the content and in the default, only merge compound ones (maps)
+ if (content.get(defaultEntry.getKey()) instanceof Map && defaultEntry.getValue() instanceof Map) {
+ mergeDefaults((Map<String, Object>) content.get(defaultEntry.getKey()), (Map<String, Object>) defaultEntry.getValue());
+ } else if (content.get(defaultEntry.getKey()) instanceof List && defaultEntry.getValue() instanceof List) {
+ List defaultList = (List) defaultEntry.getValue();
+ List contentList = (List) content.get(defaultEntry.getKey());
+
+ List mergedList = new ArrayList();
+ if (allListValuesAreMapsOfOne(defaultList) && allListValuesAreMapsOfOne(contentList)) {
+ // all are in the form of [ {"key1" : {}}, {"key2" : {}} ], merge based on keys
+ Map<String, Map<String, Object>> processed = Maps.newLinkedHashMap();
+ for (Object o : contentList) {
+ Map<String, Object> map = (Map<String, Object>) o;
+ Map.Entry<String, Object> entry = map.entrySet().iterator().next();
+ processed.put(entry.getKey(), map);
+ }
+ for (Object o : defaultList) {
+ Map<String, Object> map = (Map<String, Object>) o;
+ Map.Entry<String, Object> entry = map.entrySet().iterator().next();
+ if (processed.containsKey(entry.getKey())) {
+ mergeDefaults(processed.get(entry.getKey()), map);
+ } else {
+ // put the default entries after the content ones.
+ processed.put(entry.getKey(), map);
+ }
+ }
+ for (Map<String, Object> map : processed.values()) {
+ mergedList.add(map);
+ }
+ } else {
+ // if both are lists, simply combine them, first the defaults, then the content
+ // just make sure not to add the same value twice
+ mergedList.addAll(defaultList);
+ for (Object o : contentList) {
+ if (!mergedList.contains(o)) {
+ mergedList.add(o);
+ }
+ }
+ }
+ content.put(defaultEntry.getKey(), mergedList);
+ }
+ }
+ }
+ }
+
+ private static boolean allListValuesAreMapsOfOne(List list) {
+ for (Object o : list) {
+ if (!(o instanceof Map)) {
+ return false;
+ }
+ if (((Map) o).size() != 1) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public static void copyCurrentStructure(XContentGenerator generator, XContentParser parser) throws IOException {
+ XContentParser.Token t = parser.currentToken();
+
+ // Let's handle field-name separately first
+ if (t == XContentParser.Token.FIELD_NAME) {
+ generator.writeFieldName(parser.currentName());
+ t = parser.nextToken();
+ // fall-through to copy the associated value
+ }
+
+ switch (t) {
+ case START_ARRAY:
+ generator.writeStartArray();
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ copyCurrentStructure(generator, parser);
+ }
+ generator.writeEndArray();
+ break;
+ case START_OBJECT:
+ generator.writeStartObject();
+ while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ copyCurrentStructure(generator, parser);
+ }
+ generator.writeEndObject();
+ break;
+ default: // others are simple:
+ copyCurrentEvent(generator, parser);
+ }
+ }
+
+ public static void copyCurrentEvent(XContentGenerator generator, XContentParser parser) throws IOException {
+ switch (parser.currentToken()) {
+ case START_OBJECT:
+ generator.writeStartObject();
+ break;
+ case END_OBJECT:
+ generator.writeEndObject();
+ break;
+ case START_ARRAY:
+ generator.writeStartArray();
+ break;
+ case END_ARRAY:
+ generator.writeEndArray();
+ break;
+ case FIELD_NAME:
+ generator.writeFieldName(parser.currentName());
+ break;
+ case VALUE_STRING:
+ if (parser.hasTextCharacters()) {
+ generator.writeString(parser.textCharacters(), parser.textOffset(), parser.textLength());
+ } else {
+ generator.writeString(parser.text());
+ }
+ break;
+ case VALUE_NUMBER:
+ switch (parser.numberType()) {
+ case INT:
+ generator.writeNumber(parser.intValue());
+ break;
+ case LONG:
+ generator.writeNumber(parser.longValue());
+ break;
+ case FLOAT:
+ generator.writeNumber(parser.floatValue());
+ break;
+ case DOUBLE:
+ generator.writeNumber(parser.doubleValue());
+ break;
+ }
+ break;
+ case VALUE_BOOLEAN:
+ generator.writeBoolean(parser.booleanValue());
+ break;
+ case VALUE_NULL:
+ generator.writeNull();
+ break;
+ case VALUE_EMBEDDED_OBJECT:
+ generator.writeBinary(parser.binaryValue());
+ }
+ }
+
+ /**
+ * Directly writes the source to the output builder
+ */
+ public static void writeDirect(BytesReference source, XContentBuilder rawBuilder, ToXContent.Params params) throws IOException {
+ Compressor compressor = CompressorFactory.compressor(source);
+ if (compressor != null) {
+ CompressedStreamInput compressedStreamInput = compressor.streamInput(source.streamInput());
+ XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
+ compressedStreamInput.resetToBufferStart();
+ if (contentType == rawBuilder.contentType()) {
+ Streams.copy(compressedStreamInput, rawBuilder.stream());
+ } else {
+ XContentParser parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput);
+ try {
+ parser.nextToken();
+ rawBuilder.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+ } else {
+ XContentType contentType = XContentFactory.xContentType(source);
+ if (contentType == rawBuilder.contentType()) {
+ source.writeTo(rawBuilder.stream());
+ } else {
+ XContentParser parser = XContentFactory.xContent(contentType).createParser(source);
+ try {
+ parser.nextToken();
+ rawBuilder.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+ }
+ }
+
+ /**
+ * Writes a "raw" (bytes) field, handling cases where the bytes are compressed, and tries to optimize writing using
+ * {@link XContentBuilder#rawField(String, org.elasticsearch.common.bytes.BytesReference)}.
+ */
+ public static void writeRawField(String field, BytesReference source, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ Compressor compressor = CompressorFactory.compressor(source);
+ if (compressor != null) {
+ CompressedStreamInput compressedStreamInput = compressor.streamInput(source.streamInput());
+ XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
+ compressedStreamInput.resetToBufferStart();
+ if (contentType == builder.contentType()) {
+ builder.rawField(field, compressedStreamInput);
+ } else {
+ XContentParser parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput);
+ try {
+ parser.nextToken();
+ builder.field(field);
+ builder.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+ } else {
+ XContentType contentType = XContentFactory.xContentType(source);
+ if (contentType == builder.contentType()) {
+ builder.rawField(field, source);
+ } else {
+ XContentParser parser = XContentFactory.xContent(contentType).createParser(source);
+ try {
+ parser.nextToken();
+ builder.field(field);
+ builder.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java
new file mode 100644
index 0000000..497dbe7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import org.apache.lucene.util.BytesRef;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public interface XContentParser extends Closeable {
+
+ enum Token {
+ START_OBJECT {
+ @Override
+ public boolean isValue() {
+ return false;
+ }
+ },
+
+ END_OBJECT {
+ @Override
+ public boolean isValue() {
+ return false;
+ }
+ },
+
+ START_ARRAY {
+ @Override
+ public boolean isValue() {
+ return false;
+ }
+ },
+
+ END_ARRAY {
+ @Override
+ public boolean isValue() {
+ return false;
+ }
+ },
+
+ FIELD_NAME {
+ @Override
+ public boolean isValue() {
+ return false;
+ }
+ },
+
+ VALUE_STRING {
+ @Override
+ public boolean isValue() {
+ return true;
+ }
+ },
+
+ VALUE_NUMBER {
+ @Override
+ public boolean isValue() {
+ return true;
+ }
+ },
+
+ VALUE_BOOLEAN {
+ @Override
+ public boolean isValue() {
+ return true;
+ }
+ },
+
+ // usually a binary value
+ VALUE_EMBEDDED_OBJECT {
+ @Override
+ public boolean isValue() {
+ return true;
+ }
+ },
+
+ VALUE_NULL {
+ @Override
+ public boolean isValue() {
+ return false;
+ }
+ };
+
+ public abstract boolean isValue();
+ }
+
+ enum NumberType {
+ INT, LONG, FLOAT, DOUBLE
+ }
+
+ XContentType contentType();
+
+ Token nextToken() throws IOException;
+
+ void skipChildren() throws IOException;
+
+ Token currentToken();
+
+ String currentName() throws IOException;
+
+ Map<String, Object> map() throws IOException;
+
+ Map<String, Object> mapOrdered() throws IOException;
+
+ Map<String, Object> mapAndClose() throws IOException;
+
+ Map<String, Object> mapOrderedAndClose() throws IOException;
+
+ String text() throws IOException;
+
+ String textOrNull() throws IOException;
+
+ BytesRef bytesOrNull() throws IOException;
+
+ BytesRef bytes() throws IOException;
+
+ Object objectText() throws IOException;
+
+ Object objectBytes() throws IOException;
+
+ boolean hasTextCharacters();
+
+ char[] textCharacters() throws IOException;
+
+ int textLength() throws IOException;
+
+ int textOffset() throws IOException;
+
+ Number numberValue() throws IOException;
+
+ NumberType numberType() throws IOException;
+
+ /**
+ * Is the number type estimated or not (i.e. an int might actually be a long, its just low enough
+ * to be an int).
+ */
+ boolean estimatedNumberType();
+
+ short shortValue(boolean coerce) throws IOException;
+
+ int intValue(boolean coerce) throws IOException;
+
+ long longValue(boolean coerce) throws IOException;
+
+ float floatValue(boolean coerce) throws IOException;
+
+ double doubleValue(boolean coerce) throws IOException;
+
+ short shortValue() throws IOException;
+
+ int intValue() throws IOException;
+
+ long longValue() throws IOException;
+
+ float floatValue() throws IOException;
+
+ double doubleValue() throws IOException;
+
+ /**
+ * returns true if the current value is boolean in nature.
+ * values that are considered booleans:
+ * - boolean value (true/false)
+ * - numeric integers (=0 is considered as false, !=0 is true)
+ * - one of the following strings: "true","false","on","off","yes","no","1","0"
+ */
+ boolean isBooleanValue() throws IOException;
+
+ boolean booleanValue() throws IOException;
+
+ byte[] binaryValue() throws IOException;
+
+ void close();
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentString.java b/src/main/java/org/elasticsearch/common/xcontent/XContentString.java
new file mode 100644
index 0000000..5127038
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContentString.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import com.fasterxml.jackson.core.io.SerializedString;
+
+/**
+ *
+ */
+public class XContentString extends SerializedString {
+
+ public XContentString(String v) {
+ super(v);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentType.java b/src/main/java/org/elasticsearch/common/xcontent/XContentType.java
new file mode 100644
index 0000000..b658199
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContentType.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.common.xcontent.smile.SmileXContent;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+
+/**
+ * The content type of {@link org.elasticsearch.common.xcontent.XContent}.
+ */
+public enum XContentType {
+
+ /**
+ * A JSON based content type.
+ */
+ JSON(0) {
+ @Override
+ public String restContentType() {
+ return "application/json; charset=UTF-8";
+ }
+
+ @Override
+ public String shortName() {
+ return "json";
+ }
+
+ @Override
+ public XContent xContent() {
+ return JsonXContent.jsonXContent;
+ }
+ },
+ /**
+ * The jackson based smile binary format. Fast and compact binary format.
+ */
+ SMILE(1) {
+ @Override
+ public String restContentType() {
+ return "application/smile";
+ }
+
+ @Override
+ public String shortName() {
+ return "smile";
+ }
+
+ @Override
+ public XContent xContent() {
+ return SmileXContent.smileXContent;
+ }
+ },
+ /**
+ * A YAML based content type.
+ */
+ YAML(2) {
+ @Override
+ public String restContentType() {
+ return "application/yaml";
+ }
+
+ @Override
+ public String shortName() {
+ return "yaml";
+ }
+
+ @Override
+ public XContent xContent() {
+ return YamlXContent.yamlXContent;
+ }
+ };
+
+ public static XContentType fromRestContentType(String contentType) {
+ if (contentType == null) {
+ return null;
+ }
+ if ("application/json".equals(contentType) || "json".equalsIgnoreCase(contentType)) {
+ return JSON;
+ }
+
+ if ("application/smile".equals(contentType) || "smile".equalsIgnoreCase(contentType)) {
+ return SMILE;
+ }
+
+ if ("application/yaml".equals(contentType) || "yaml".equalsIgnoreCase(contentType)) {
+ return YAML;
+ }
+
+ return null;
+ }
+
+ private int index;
+
+ XContentType(int index) {
+ this.index = index;
+ }
+
+ public int index() {
+ return index;
+ }
+
+ public abstract String restContentType();
+
+ public abstract String shortName();
+
+ public abstract XContent xContent();
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java
new file mode 100644
index 0000000..a488a95
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.json;
+
+import com.fasterxml.jackson.core.JsonEncoding;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonParser;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.FastStringReader;
+import org.elasticsearch.common.xcontent.*;
+
+import java.io.*;
+
+/**
+ * A JSON based content implementation using Jackson.
+ */
+public class JsonXContent implements XContent {
+
+ public static XContentBuilder contentBuilder() throws IOException {
+ return XContentBuilder.builder(jsonXContent);
+ }
+
+ private final static JsonFactory jsonFactory;
+ public final static JsonXContent jsonXContent;
+
+ static {
+ jsonFactory = new JsonFactory();
+ jsonFactory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true);
+ jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true);
+ jsonFactory.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
+ jsonXContent = new JsonXContent();
+ }
+
+ private JsonXContent() {
+ }
+
+ @Override
+ public XContentType type() {
+ return XContentType.JSON;
+ }
+
+ @Override
+ public byte streamSeparator() {
+ return '\n';
+ }
+
+ @Override
+ public XContentGenerator createGenerator(OutputStream os) throws IOException {
+ return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8));
+ }
+
+ @Override
+ public XContentGenerator createGenerator(Writer writer) throws IOException {
+ return new JsonXContentGenerator(jsonFactory.createGenerator(writer));
+ }
+
+ @Override
+ public XContentParser createParser(String content) throws IOException {
+ return new JsonXContentParser(jsonFactory.createParser(new FastStringReader(content)));
+ }
+
+ @Override
+ public XContentParser createParser(InputStream is) throws IOException {
+ return new JsonXContentParser(jsonFactory.createParser(is));
+ }
+
+ @Override
+ public XContentParser createParser(byte[] data) throws IOException {
+ return new JsonXContentParser(jsonFactory.createParser(data));
+ }
+
+ @Override
+ public XContentParser createParser(byte[] data, int offset, int length) throws IOException {
+ return new JsonXContentParser(jsonFactory.createParser(data, offset, length));
+ }
+
+ @Override
+ public XContentParser createParser(BytesReference bytes) throws IOException {
+ if (bytes.hasArray()) {
+ return createParser(bytes.array(), bytes.arrayOffset(), bytes.length());
+ }
+ return createParser(bytes.streamInput());
+ }
+
+ @Override
+ public XContentParser createParser(Reader reader) throws IOException {
+ return new JsonXContentParser(jsonFactory.createParser(reader));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java
new file mode 100644
index 0000000..cd56f8d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.json;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.io.SerializedString;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.xcontent.*;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ *
+ */
+public class JsonXContentGenerator implements XContentGenerator {
+
+ protected final JsonGenerator generator;
+ private boolean writeLineFeedAtEnd;
+
+ public JsonXContentGenerator(JsonGenerator generator) {
+ this.generator = generator;
+ }
+
+ @Override
+ public XContentType contentType() {
+ return XContentType.JSON;
+ }
+
+ @Override
+ public void usePrettyPrint() {
+ generator.useDefaultPrettyPrinter();
+ }
+
+ @Override
+ public void usePrintLineFeedAtEnd() {
+ writeLineFeedAtEnd = true;
+ }
+
+ @Override
+ public void writeStartArray() throws IOException {
+ generator.writeStartArray();
+ }
+
+ @Override
+ public void writeEndArray() throws IOException {
+ generator.writeEndArray();
+ }
+
+ @Override
+ public void writeStartObject() throws IOException {
+ generator.writeStartObject();
+ }
+
+ @Override
+ public void writeEndObject() throws IOException {
+ generator.writeEndObject();
+ }
+
+ @Override
+ public void writeFieldName(String name) throws IOException {
+ generator.writeFieldName(name);
+ }
+
+ @Override
+ public void writeFieldName(XContentString name) throws IOException {
+ generator.writeFieldName(name);
+ }
+
+ @Override
+ public void writeString(String text) throws IOException {
+ generator.writeString(text);
+ }
+
+ @Override
+ public void writeString(char[] text, int offset, int len) throws IOException {
+ generator.writeString(text, offset, len);
+ }
+
+ @Override
+ public void writeUTF8String(byte[] text, int offset, int length) throws IOException {
+ generator.writeUTF8String(text, offset, length);
+ }
+
+ @Override
+ public void writeBinary(byte[] data, int offset, int len) throws IOException {
+ generator.writeBinary(data, offset, len);
+ }
+
+ @Override
+ public void writeBinary(byte[] data) throws IOException {
+ generator.writeBinary(data);
+ }
+
+ @Override
+ public void writeNumber(int v) throws IOException {
+ generator.writeNumber(v);
+ }
+
+ @Override
+ public void writeNumber(long v) throws IOException {
+ generator.writeNumber(v);
+ }
+
+ @Override
+ public void writeNumber(double d) throws IOException {
+ generator.writeNumber(d);
+ }
+
+ @Override
+ public void writeNumber(float f) throws IOException {
+ generator.writeNumber(f);
+ }
+
+ @Override
+ public void writeBoolean(boolean state) throws IOException {
+ generator.writeBoolean(state);
+ }
+
+ @Override
+ public void writeNull() throws IOException {
+ generator.writeNull();
+ }
+
+ @Override
+ public void writeStringField(String fieldName, String value) throws IOException {
+ generator.writeStringField(fieldName, value);
+ }
+
+ @Override
+ public void writeStringField(XContentString fieldName, String value) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeString(value);
+ }
+
+ @Override
+ public void writeBooleanField(String fieldName, boolean value) throws IOException {
+ generator.writeBooleanField(fieldName, value);
+ }
+
+ @Override
+ public void writeBooleanField(XContentString fieldName, boolean value) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeBoolean(value);
+ }
+
+ @Override
+ public void writeNullField(String fieldName) throws IOException {
+ generator.writeNullField(fieldName);
+ }
+
+ @Override
+ public void writeNullField(XContentString fieldName) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeNull();
+ }
+
+ @Override
+ public void writeNumberField(String fieldName, int value) throws IOException {
+ generator.writeNumberField(fieldName, value);
+ }
+
+ @Override
+ public void writeNumberField(XContentString fieldName, int value) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeNumber(value);
+ }
+
+ @Override
+ public void writeNumberField(String fieldName, long value) throws IOException {
+ generator.writeNumberField(fieldName, value);
+ }
+
+ @Override
+ public void writeNumberField(XContentString fieldName, long value) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeNumber(value);
+ }
+
+ @Override
+ public void writeNumberField(String fieldName, double value) throws IOException {
+ generator.writeNumberField(fieldName, value);
+ }
+
+ @Override
+ public void writeNumberField(XContentString fieldName, double value) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeNumber(value);
+ }
+
+ @Override
+ public void writeNumberField(String fieldName, float value) throws IOException {
+ generator.writeNumberField(fieldName, value);
+ }
+
+ @Override
+ public void writeNumberField(XContentString fieldName, float value) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeNumber(value);
+ }
+
+ @Override
+ public void writeBinaryField(String fieldName, byte[] data) throws IOException {
+ generator.writeBinaryField(fieldName, data);
+ }
+
+ @Override
+ public void writeBinaryField(XContentString fieldName, byte[] value) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeBinary(value);
+ }
+
+ @Override
+ public void writeArrayFieldStart(String fieldName) throws IOException {
+ generator.writeArrayFieldStart(fieldName);
+ }
+
+ @Override
+ public void writeArrayFieldStart(XContentString fieldName) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeStartArray();
+ }
+
+ @Override
+ public void writeObjectFieldStart(String fieldName) throws IOException {
+ generator.writeObjectFieldStart(fieldName);
+ }
+
+ @Override
+ public void writeObjectFieldStart(XContentString fieldName) throws IOException {
+ generator.writeFieldName(fieldName);
+ generator.writeStartObject();
+ }
+
+ @Override
+ public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
+ generator.writeRaw(", \"");
+ generator.writeRaw(fieldName);
+ generator.writeRaw("\" : ");
+ flush();
+ bos.write(content);
+ }
+
+ @Override
+ public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
+ generator.writeRaw(", \"");
+ generator.writeRaw(fieldName);
+ generator.writeRaw("\" : ");
+ flush();
+ bos.write(content, offset, length);
+ }
+
+ @Override
+ public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException {
+ generator.writeRaw(", \"");
+ generator.writeRaw(fieldName);
+ generator.writeRaw("\" : ");
+ flush();
+ Streams.copy(content, bos);
+ }
+
+ @Override
+ public final void writeRawField(String fieldName, BytesReference content, OutputStream bos) throws IOException {
+ XContentType contentType = XContentFactory.xContentType(content);
+ if (contentType != null) {
+ writeObjectRaw(fieldName, content, bos);
+ } else {
+ writeFieldName(fieldName);
+ // we could potentially optimize this to not rely on exception logic...
+ String sValue = content.toUtf8();
+ try {
+ writeNumber(Long.parseLong(sValue));
+ } catch (NumberFormatException e) {
+ try {
+ writeNumber(Double.parseDouble(sValue));
+ } catch (NumberFormatException e1) {
+ writeString(sValue);
+ }
+ }
+ }
+ }
+
+ protected void writeObjectRaw(String fieldName, BytesReference content, OutputStream bos) throws IOException {
+ generator.writeRaw(", \"");
+ generator.writeRaw(fieldName);
+ generator.writeRaw("\" : ");
+ flush();
+ content.writeTo(bos);
+ }
+
+ @Override
+ public void copyCurrentStructure(XContentParser parser) throws IOException {
+ // the start of the parser
+ if (parser.currentToken() == null) {
+ parser.nextToken();
+ }
+ if (parser instanceof JsonXContentParser) {
+ generator.copyCurrentStructure(((JsonXContentParser) parser).parser);
+ } else {
+ XContentHelper.copyCurrentStructure(this, parser);
+ }
+ }
+
+ @Override
+ public void flush() throws IOException {
+ generator.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (generator.isClosed()) {
+ return;
+ }
+ if (writeLineFeedAtEnd) {
+ flush();
+ generator.writeRaw(LF);
+ }
+ generator.close();
+ }
+
+ private static final SerializedString LF = new SerializedString("\n");
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java
new file mode 100644
index 0000000..3c09d28
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.json;
+
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.support.AbstractXContentParser;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class JsonXContentParser extends AbstractXContentParser {
+
+ final JsonParser parser;
+
+ public JsonXContentParser(JsonParser parser) {
+ this.parser = parser;
+ }
+
+ @Override
+ public XContentType contentType() {
+ return XContentType.JSON;
+ }
+
+ @Override
+ public Token nextToken() throws IOException {
+ return convertToken(parser.nextToken());
+ }
+
+ @Override
+ public void skipChildren() throws IOException {
+ parser.skipChildren();
+ }
+
+ @Override
+ public Token currentToken() {
+ return convertToken(parser.getCurrentToken());
+ }
+
+ @Override
+ public NumberType numberType() throws IOException {
+ return convertNumberType(parser.getNumberType());
+ }
+
+ @Override
+ public boolean estimatedNumberType() {
+ return true;
+ }
+
+ @Override
+ public String currentName() throws IOException {
+ return parser.getCurrentName();
+ }
+
+ @Override
+ protected boolean doBooleanValue() throws IOException {
+ return parser.getBooleanValue();
+ }
+
+ @Override
+ public String text() throws IOException {
+ return parser.getText();
+ }
+
+ @Override
+ public BytesRef bytes() throws IOException {
+ BytesRef bytes = new BytesRef();
+ UnicodeUtil.UTF16toUTF8(parser.getTextCharacters(), parser.getTextOffset(), parser.getTextLength(), bytes);
+ return bytes;
+ }
+
+ @Override
+ public Object objectText() throws IOException {
+ JsonToken currentToken = parser.getCurrentToken();
+ if (currentToken == JsonToken.VALUE_STRING) {
+ return text();
+ } else if (currentToken == JsonToken.VALUE_NUMBER_INT || currentToken == JsonToken.VALUE_NUMBER_FLOAT) {
+ return parser.getNumberValue();
+ } else if (currentToken == JsonToken.VALUE_TRUE) {
+ return Boolean.TRUE;
+ } else if (currentToken == JsonToken.VALUE_FALSE) {
+ return Boolean.FALSE;
+ } else if (currentToken == JsonToken.VALUE_NULL) {
+ return null;
+ } else {
+ return text();
+ }
+ }
+
+ @Override
+ public Object objectBytes() throws IOException {
+ JsonToken currentToken = parser.getCurrentToken();
+ if (currentToken == JsonToken.VALUE_STRING) {
+ return bytes();
+ } else if (currentToken == JsonToken.VALUE_NUMBER_INT || currentToken == JsonToken.VALUE_NUMBER_FLOAT) {
+ return parser.getNumberValue();
+ } else if (currentToken == JsonToken.VALUE_TRUE) {
+ return Boolean.TRUE;
+ } else if (currentToken == JsonToken.VALUE_FALSE) {
+ return Boolean.FALSE;
+ } else if (currentToken == JsonToken.VALUE_NULL) {
+ return null;
+ } else {
+ return bytes();
+ }
+ }
+
+ @Override
+ public boolean hasTextCharacters() {
+ return parser.hasTextCharacters();
+ }
+
+ @Override
+ public char[] textCharacters() throws IOException {
+ return parser.getTextCharacters();
+ }
+
+ @Override
+ public int textLength() throws IOException {
+ return parser.getTextLength();
+ }
+
+ @Override
+ public int textOffset() throws IOException {
+ return parser.getTextOffset();
+ }
+
+ @Override
+ public Number numberValue() throws IOException {
+ return parser.getNumberValue();
+ }
+
+ @Override
+ public short doShortValue() throws IOException {
+ return parser.getShortValue();
+ }
+
+ @Override
+ public int doIntValue() throws IOException {
+ return parser.getIntValue();
+ }
+
+ @Override
+ public long doLongValue() throws IOException {
+ return parser.getLongValue();
+ }
+
+ @Override
+ public float doFloatValue() throws IOException {
+ return parser.getFloatValue();
+ }
+
+ @Override
+ public double doDoubleValue() throws IOException {
+ return parser.getDoubleValue();
+ }
+
+ @Override
+ public byte[] binaryValue() throws IOException {
+ return parser.getBinaryValue();
+ }
+
+ @Override
+ public void close() {
+ try {
+ parser.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+
+ private NumberType convertNumberType(JsonParser.NumberType numberType) {
+ switch (numberType) {
+ case INT:
+ return NumberType.INT;
+ case LONG:
+ return NumberType.LONG;
+ case FLOAT:
+ return NumberType.FLOAT;
+ case DOUBLE:
+ return NumberType.DOUBLE;
+ }
+ throw new ElasticsearchIllegalStateException("No matching token for number_type [" + numberType + "]");
+ }
+
+ private Token convertToken(JsonToken token) {
+ if (token == null) {
+ return null;
+ }
+ switch (token) {
+ case FIELD_NAME:
+ return Token.FIELD_NAME;
+ case VALUE_FALSE:
+ case VALUE_TRUE:
+ return Token.VALUE_BOOLEAN;
+ case VALUE_STRING:
+ return Token.VALUE_STRING;
+ case VALUE_NUMBER_INT:
+ case VALUE_NUMBER_FLOAT:
+ return Token.VALUE_NUMBER;
+ case VALUE_NULL:
+ return Token.VALUE_NULL;
+ case START_OBJECT:
+ return Token.START_OBJECT;
+ case END_OBJECT:
+ return Token.END_OBJECT;
+ case START_ARRAY:
+ return Token.START_ARRAY;
+ case END_ARRAY:
+ return Token.END_ARRAY;
+ case VALUE_EMBEDDED_OBJECT:
+ return Token.VALUE_EMBEDDED_OBJECT;
+ }
+ throw new ElasticsearchIllegalStateException("No matching token for json_token [" + token + "]");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java
new file mode 100644
index 0000000..7dfdb62
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.smile;
+
+import com.fasterxml.jackson.core.JsonEncoding;
+import com.fasterxml.jackson.dataformat.smile.SmileFactory;
+import com.fasterxml.jackson.dataformat.smile.SmileGenerator;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.FastStringReader;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.common.xcontent.json.JsonXContentParser;
+
+import java.io.*;
+
+/**
+ * A Smile based content implementation using Jackson.
+ */
+public class SmileXContent implements XContent {
+
+ public static XContentBuilder contentBuilder() throws IOException {
+ return XContentBuilder.builder(smileXContent);
+ }
+
+ final static SmileFactory smileFactory;
+ public final static SmileXContent smileXContent;
+
+ static {
+ smileFactory = new SmileFactory();
+ smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); // for now, this is an overhead, might make sense for web sockets
+ smileXContent = new SmileXContent();
+ }
+
+ private SmileXContent() {
+ }
+
+ @Override
+ public XContentType type() {
+ return XContentType.SMILE;
+ }
+
+ @Override
+ public byte streamSeparator() {
+ return (byte) 0xFF;
+ }
+
+ @Override
+ public XContentGenerator createGenerator(OutputStream os) throws IOException {
+ return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8));
+ }
+
+ @Override
+ public XContentGenerator createGenerator(Writer writer) throws IOException {
+ return new SmileXContentGenerator(smileFactory.createGenerator(writer));
+ }
+
+ @Override
+ public XContentParser createParser(String content) throws IOException {
+ return new SmileXContentParser(smileFactory.createParser(new FastStringReader(content)));
+ }
+
+ @Override
+ public XContentParser createParser(InputStream is) throws IOException {
+ return new SmileXContentParser(smileFactory.createParser(is));
+ }
+
+ @Override
+ public XContentParser createParser(byte[] data) throws IOException {
+ return new SmileXContentParser(smileFactory.createParser(data));
+ }
+
+ @Override
+ public XContentParser createParser(byte[] data, int offset, int length) throws IOException {
+ return new SmileXContentParser(smileFactory.createParser(data, offset, length));
+ }
+
+ @Override
+ public XContentParser createParser(BytesReference bytes) throws IOException {
+ if (bytes.hasArray()) {
+ return createParser(bytes.array(), bytes.arrayOffset(), bytes.length());
+ }
+ return createParser(bytes.streamInput());
+ }
+
+ @Override
+ public XContentParser createParser(Reader reader) throws IOException {
+ return new SmileXContentParser(smileFactory.createParser(reader));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java
new file mode 100644
index 0000000..7a9b48c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.smile;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.dataformat.smile.SmileParser;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ *
+ */
+public class SmileXContentGenerator extends JsonXContentGenerator {
+
+ public SmileXContentGenerator(JsonGenerator generator) {
+ super(generator);
+ }
+
+ @Override
+ public XContentType contentType() {
+ return XContentType.SMILE;
+ }
+
+ @Override
+ public void usePrintLineFeedAtEnd() {
+ // nothing here
+ }
+
+ @Override
+ public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException {
+ writeFieldName(fieldName);
+ SmileParser parser = SmileXContent.smileFactory.createParser(content);
+ try {
+ parser.nextToken();
+ generator.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
+ writeFieldName(fieldName);
+ SmileParser parser = SmileXContent.smileFactory.createParser(content);
+ try {
+ parser.nextToken();
+ generator.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ protected void writeObjectRaw(String fieldName, BytesReference content, OutputStream bos) throws IOException {
+ writeFieldName(fieldName);
+ SmileParser parser;
+ if (content.hasArray()) {
+ parser = SmileXContent.smileFactory.createParser(content.array(), content.arrayOffset(), content.length());
+ } else {
+ parser = SmileXContent.smileFactory.createParser(content.streamInput());
+ }
+ try {
+ parser.nextToken();
+ generator.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
+ writeFieldName(fieldName);
+ SmileParser parser = SmileXContent.smileFactory.createParser(content, offset, length);
+ try {
+ parser.nextToken();
+ generator.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java
new file mode 100644
index 0000000..2bbf99d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.smile;
+
+import com.fasterxml.jackson.core.JsonParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.json.JsonXContentParser;
+
+/**
+ *
+ */
+public class SmileXContentParser extends JsonXContentParser {
+
+ public SmileXContentParser(JsonParser parser) {
+ super(parser);
+ }
+
+ @Override
+ public XContentType contentType() {
+ return XContentType.SMILE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentGenerator.java b/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentGenerator.java
new file mode 100644
index 0000000..e400a48
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentGenerator.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support;
+
+import org.elasticsearch.common.xcontent.XContentGenerator;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class AbstractXContentGenerator implements XContentGenerator {
+
+ @Override
+ public void writeStringField(String fieldName, String value) throws IOException {
+ writeFieldName(fieldName);
+ writeString(value);
+ }
+
+ @Override
+ public void writeBooleanField(String fieldName, boolean value) throws IOException {
+ writeFieldName(fieldName);
+ writeBoolean(value);
+ }
+
+ @Override
+ public void writeNullField(String fieldName) throws IOException {
+ writeFieldName(fieldName);
+ writeNull();
+ }
+
+ @Override
+ public void writeNumberField(String fieldName, int value) throws IOException {
+ writeFieldName(fieldName);
+ writeNumber(value);
+ }
+
+ @Override
+ public void writeNumberField(String fieldName, long value) throws IOException {
+ writeFieldName(fieldName);
+ writeNumber(value);
+ }
+
+ @Override
+ public void writeNumberField(String fieldName, double value) throws IOException {
+ writeFieldName(fieldName);
+ writeNumber(value);
+ }
+
+ @Override
+ public void writeNumberField(String fieldName, float value) throws IOException {
+ writeFieldName(fieldName);
+ writeNumber(value);
+ }
+
+ @Override
+ public void writeBinaryField(String fieldName, byte[] data) throws IOException {
+ writeFieldName(fieldName);
+ writeBinary(data);
+ }
+
+ @Override
+ public void writeArrayFieldStart(String fieldName) throws IOException {
+ writeFieldName(fieldName);
+ writeStartArray();
+ }
+
+ @Override
+ public void writeObjectFieldStart(String fieldName) throws IOException {
+ writeFieldName(fieldName);
+ writeStartObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java
new file mode 100644
index 0000000..7d55b6a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ *
+ */
+public abstract class AbstractXContentParser implements XContentParser {
+
+ //Currently this is not a setting that can be changed and is a policy
+ // that relates to how parsing of things like "boost" are done across
+ // the whole of Elasticsearch (eg if String "1.0" is a valid float).
+ // The idea behind keeping it as a constant is that we can track
+ // references to this policy decision throughout the codebase and find
+ // and change any code that needs to apply an alternative policy.
+ public static final boolean DEFAULT_NUMBER_COEERCE_POLICY = true;
+
+ private static void checkCoerceString(boolean coeerce, Class<? extends Number> clazz) {
+ if (!coeerce) {
+ //Need to throw type IllegalArgumentException as current catch logic in
+ //NumberFieldMapper.parseCreateField relies on this for "malformed" value detection
+ throw new IllegalArgumentException(clazz.getSimpleName() + " value passed as String");
+ }
+ }
+
+
+
+ // The 3rd party parsers we rely on are known to silently truncate fractions: see
+ // http://fasterxml.github.io/jackson-core/javadoc/2.3.0/com/fasterxml/jackson/core/JsonParser.html#getShortValue()
+ // If this behaviour is flagged as undesirable and any truncation occurs
+ // then this method is called to trigger the"malformed" handling logic
+ void ensureNumberConversion(boolean coerce, long result, Class<? extends Number> clazz) throws IOException {
+ if (!coerce) {
+ double fullVal = doDoubleValue();
+ if (result != fullVal) {
+ // Need to throw type IllegalArgumentException as current catch
+ // logic in NumberFieldMapper.parseCreateField relies on this
+ // for "malformed" value detection
+ throw new IllegalArgumentException(fullVal + " cannot be converted to " + clazz.getSimpleName() + " without data loss");
+ }
+ }
+ }
+
+ @Override
+ public boolean isBooleanValue() throws IOException {
+ switch (currentToken()) {
+ case VALUE_BOOLEAN:
+ return true;
+ case VALUE_NUMBER:
+ NumberType numberType = numberType();
+ return numberType == NumberType.LONG || numberType == NumberType.INT;
+ case VALUE_STRING:
+ return Booleans.isBoolean(textCharacters(), textOffset(), textLength());
+ default:
+ return false;
+ }
+ }
+
+ @Override
+ public boolean booleanValue() throws IOException {
+ Token token = currentToken();
+ if (token == Token.VALUE_NUMBER) {
+ return intValue() != 0;
+ } else if (token == Token.VALUE_STRING) {
+ return Booleans.parseBoolean(textCharacters(), textOffset(), textLength(), false /* irrelevant */);
+ }
+ return doBooleanValue();
+ }
+
+ protected abstract boolean doBooleanValue() throws IOException;
+
+ @Override
+ public short shortValue() throws IOException {
+ return shortValue(DEFAULT_NUMBER_COEERCE_POLICY);
+ }
+
+ @Override
+ public short shortValue(boolean coerce) throws IOException {
+ Token token = currentToken();
+ if (token == Token.VALUE_STRING) {
+ checkCoerceString(coerce, Short.class);
+ return Short.parseShort(text());
+ }
+ short result = doShortValue();
+ ensureNumberConversion(coerce, result, Short.class);
+ return result;
+ }
+
+ protected abstract short doShortValue() throws IOException;
+
+ @Override
+ public int intValue() throws IOException {
+ return intValue(DEFAULT_NUMBER_COEERCE_POLICY);
+ }
+
+
+ @Override
+ public int intValue(boolean coerce) throws IOException {
+ Token token = currentToken();
+ if (token == Token.VALUE_STRING) {
+ checkCoerceString(coerce, Integer.class);
+ return Integer.parseInt(text());
+ }
+ int result = doIntValue();
+ ensureNumberConversion(coerce, result, Integer.class);
+ return result;
+ }
+
+ protected abstract int doIntValue() throws IOException;
+
+ @Override
+ public long longValue() throws IOException {
+ return longValue(DEFAULT_NUMBER_COEERCE_POLICY);
+ }
+
+ @Override
+ public long longValue(boolean coerce) throws IOException {
+ Token token = currentToken();
+ if (token == Token.VALUE_STRING) {
+ checkCoerceString(coerce, Long.class);
+ return Long.parseLong(text());
+ }
+ long result = doLongValue();
+ ensureNumberConversion(coerce, result, Long.class);
+ return result;
+ }
+
+ protected abstract long doLongValue() throws IOException;
+
+ @Override
+ public float floatValue() throws IOException {
+ return floatValue(DEFAULT_NUMBER_COEERCE_POLICY);
+ }
+
+ @Override
+ public float floatValue(boolean coerce) throws IOException {
+ Token token = currentToken();
+ if (token == Token.VALUE_STRING) {
+ checkCoerceString(coerce, Float.class);
+ return Float.parseFloat(text());
+ }
+ return doFloatValue();
+ }
+
+ protected abstract float doFloatValue() throws IOException;
+
+
+ @Override
+ public double doubleValue() throws IOException {
+ return doubleValue(DEFAULT_NUMBER_COEERCE_POLICY);
+ }
+
+ @Override
+ public double doubleValue(boolean coerce) throws IOException {
+ Token token = currentToken();
+ if (token == Token.VALUE_STRING) {
+ checkCoerceString(coerce, Double.class);
+ return Double.parseDouble(text());
+ }
+ return doDoubleValue();
+ }
+
+ protected abstract double doDoubleValue() throws IOException;
+
+ @Override
+ public String textOrNull() throws IOException {
+ if (currentToken() == Token.VALUE_NULL) {
+ return null;
+ }
+ return text();
+ }
+
+
+ @Override
+ public BytesRef bytesOrNull() throws IOException {
+ if (currentToken() == Token.VALUE_NULL) {
+ return null;
+ }
+ return bytes();
+ }
+
+ @Override
+ public Map<String, Object> map() throws IOException {
+ return readMap(this);
+ }
+
+ @Override
+ public Map<String, Object> mapOrdered() throws IOException {
+ return readOrderedMap(this);
+ }
+
+ @Override
+ public Map<String, Object> mapAndClose() throws IOException {
+ try {
+ return map();
+ } finally {
+ close();
+ }
+ }
+
+ @Override
+ public Map<String, Object> mapOrderedAndClose() throws IOException {
+ try {
+ return mapOrdered();
+ } finally {
+ close();
+ }
+ }
+
+
+ static interface MapFactory {
+ Map<String, Object> newMap();
+ }
+
+ static final MapFactory SIMPLE_MAP_FACTORY = new MapFactory() {
+ @Override
+ public Map<String, Object> newMap() {
+ return new HashMap<String, Object>();
+ }
+ };
+
+ static final MapFactory ORDERED_MAP_FACTORY = new MapFactory() {
+ @Override
+ public Map<String, Object> newMap() {
+ return new LinkedHashMap<String, Object>();
+ }
+ };
+
+ static Map<String, Object> readMap(XContentParser parser) throws IOException {
+ return readMap(parser, SIMPLE_MAP_FACTORY);
+ }
+
+ static Map<String, Object> readOrderedMap(XContentParser parser) throws IOException {
+ return readMap(parser, ORDERED_MAP_FACTORY);
+ }
+
+ static Map<String, Object> readMap(XContentParser parser, MapFactory mapFactory) throws IOException {
+ Map<String, Object> map = mapFactory.newMap();
+ XContentParser.Token t = parser.currentToken();
+ if (t == null) {
+ t = parser.nextToken();
+ }
+ if (t == XContentParser.Token.START_OBJECT) {
+ t = parser.nextToken();
+ }
+ for (; t == XContentParser.Token.FIELD_NAME; t = parser.nextToken()) {
+ // Must point to field name
+ String fieldName = parser.currentName();
+ // And then the value...
+ t = parser.nextToken();
+ Object value = readValue(parser, mapFactory, t);
+ map.put(fieldName, value);
+ }
+ return map;
+ }
+
+ private static List<Object> readList(XContentParser parser, MapFactory mapFactory, XContentParser.Token t) throws IOException {
+ ArrayList<Object> list = new ArrayList<Object>();
+ while ((t = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ list.add(readValue(parser, mapFactory, t));
+ }
+ return list;
+ }
+
+ private static Object readValue(XContentParser parser, MapFactory mapFactory, XContentParser.Token t) throws IOException {
+ if (t == XContentParser.Token.VALUE_NULL) {
+ return null;
+ } else if (t == XContentParser.Token.VALUE_STRING) {
+ return parser.text();
+ } else if (t == XContentParser.Token.VALUE_NUMBER) {
+ XContentParser.NumberType numberType = parser.numberType();
+ if (numberType == XContentParser.NumberType.INT) {
+ return parser.intValue();
+ } else if (numberType == XContentParser.NumberType.LONG) {
+ return parser.longValue();
+ } else if (numberType == XContentParser.NumberType.FLOAT) {
+ return parser.floatValue();
+ } else if (numberType == XContentParser.NumberType.DOUBLE) {
+ return parser.doubleValue();
+ }
+ } else if (t == XContentParser.Token.VALUE_BOOLEAN) {
+ return parser.booleanValue();
+ } else if (t == XContentParser.Token.START_OBJECT) {
+ return readMap(parser, mapFactory);
+ } else if (t == XContentParser.Token.START_ARRAY) {
+ return readList(parser, mapFactory, t);
+ } else if (t == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
+ return parser.binaryValue();
+ }
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java b/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java
new file mode 100644
index 0000000..ab316e9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java
@@ -0,0 +1,390 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class XContentMapValues {
+
+ /**
+ * Extracts raw values (string, int, and so on) based on the path provided returning all of them
+ * as a single list.
+ */
+ public static List<Object> extractRawValues(String path, Map<String, Object> map) {
+ List<Object> values = Lists.newArrayList();
+ String[] pathElements = Strings.splitStringToArray(path, '.');
+ if (pathElements.length == 0) {
+ return values;
+ }
+ extractRawValues(values, map, pathElements, 0);
+ return values;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ private static void extractRawValues(List values, Map<String, Object> part, String[] pathElements, int index) {
+ if (index == pathElements.length) {
+ return;
+ }
+
+ String key = pathElements[index];
+ Object currentValue = part.get(key);
+ int nextIndex = index + 1;
+ while (currentValue == null && nextIndex != pathElements.length) {
+ key += "." + pathElements[nextIndex];
+ currentValue = part.get(key);
+ nextIndex++;
+ }
+
+ if (currentValue == null) {
+ return;
+ }
+
+ if (currentValue instanceof Map) {
+ extractRawValues(values, (Map<String, Object>) currentValue, pathElements, nextIndex);
+ } else if (currentValue instanceof List) {
+ extractRawValues(values, (List) currentValue, pathElements, nextIndex);
+ } else {
+ values.add(currentValue);
+ }
+ }
+
+ @SuppressWarnings({"unchecked"})
+ private static void extractRawValues(List values, List<Object> part, String[] pathElements, int index) {
+ for (Object value : part) {
+ if (value == null) {
+ continue;
+ }
+ if (value instanceof Map) {
+ extractRawValues(values, (Map<String, Object>) value, pathElements, index);
+ } else if (value instanceof List) {
+ extractRawValues(values, (List) value, pathElements, index);
+ } else {
+ values.add(value);
+ }
+ }
+ }
+
+ public static Object extractValue(String path, Map<String, Object> map) {
+ String[] pathElements = Strings.splitStringToArray(path, '.');
+ if (pathElements.length == 0) {
+ return null;
+ }
+ return extractValue(pathElements, 0, map);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ private static Object extractValue(String[] pathElements, int index, Object currentValue) {
+ if (index == pathElements.length) {
+ return currentValue;
+ }
+ if (currentValue == null) {
+ return null;
+ }
+ if (currentValue instanceof Map) {
+ Map map = (Map) currentValue;
+ String key = pathElements[index];
+ Object mapValue = map.get(key);
+ int nextIndex = index + 1;
+ while (mapValue == null && nextIndex != pathElements.length) {
+ key += "." + pathElements[nextIndex];
+ mapValue = map.get(key);
+ nextIndex++;
+ }
+ return extractValue(pathElements, nextIndex, mapValue);
+ }
+ if (currentValue instanceof List) {
+ List valueList = (List) currentValue;
+ List newList = new ArrayList(valueList.size());
+ for (Object o : valueList) {
+ Object listValue = extractValue(pathElements, index, o);
+ if (listValue != null) {
+ newList.add(listValue);
+ }
+ }
+ return newList;
+ }
+ return null;
+ }
+
+ public static Map<String, Object> filter(Map<String, Object> map, String[] includes, String[] excludes) {
+ Map<String, Object> result = Maps.newHashMap();
+ filter(map, result, includes == null ? Strings.EMPTY_ARRAY : includes, excludes == null ? Strings.EMPTY_ARRAY : excludes, new StringBuilder());
+ return result;
+ }
+
+ private static void filter(Map<String, Object> map, Map<String, Object> into, String[] includes, String[] excludes, StringBuilder sb) {
+ if (includes.length == 0 && excludes.length == 0) {
+ into.putAll(map);
+ return;
+ }
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ String key = entry.getKey();
+ int mark = sb.length();
+ if (sb.length() > 0) {
+ sb.append('.');
+ }
+ sb.append(key);
+ String path = sb.toString();
+
+ if (Regex.simpleMatch(excludes, path)) {
+ sb.setLength(mark);
+ continue;
+ }
+
+ boolean exactIncludeMatch = false; // true if the current position was specifically mentioned
+ boolean pathIsPrefixOfAnInclude = false; // true if potentially a sub scope can be included
+ if (includes.length == 0) {
+ // implied match anything
+ exactIncludeMatch = true;
+ } else {
+ for (String include : includes) {
+ // check for prefix matches as well to see if we need to zero in, something like: obj1.arr1.* or *.field
+ // note, this does not work well with middle matches, like obj1.*.obj3
+ if (include.charAt(0) == '*') {
+ if (Regex.simpleMatch(include, path)) {
+ exactIncludeMatch = true;
+ break;
+ }
+ pathIsPrefixOfAnInclude = true;
+ break;
+ }
+ if (include.startsWith(path)) {
+ if (include.length() == path.length()) {
+ exactIncludeMatch = true;
+ break;
+ } else if (include.length() > path.length() && include.charAt(path.length()) == '.') {
+ // include might may match deeper paths. Dive deeper.
+ pathIsPrefixOfAnInclude = true;
+ break;
+ }
+ }
+ if (Regex.simpleMatch(include, path)) {
+ exactIncludeMatch = true;
+ break;
+ }
+ }
+ }
+
+ if (!(pathIsPrefixOfAnInclude || exactIncludeMatch)) {
+ // skip subkeys, not interesting.
+ sb.setLength(mark);
+ continue;
+ }
+
+
+ if (entry.getValue() instanceof Map) {
+ Map<String, Object> innerInto = Maps.newHashMap();
+ // if we had an exact match, we want give deeper excludes their chance
+ filter((Map<String, Object>) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);
+ if (exactIncludeMatch || !innerInto.isEmpty()) {
+ into.put(entry.getKey(), innerInto);
+ }
+ } else if (entry.getValue() instanceof List) {
+ List<Object> list = (List<Object>) entry.getValue();
+ List<Object> innerInto = new ArrayList<Object>(list.size());
+ // if we had an exact match, we want give deeper excludes their chance
+ filter(list, innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);
+ into.put(entry.getKey(), innerInto);
+ } else if (exactIncludeMatch) {
+ into.put(entry.getKey(), entry.getValue());
+ }
+ sb.setLength(mark);
+ }
+ }
+
+ private static void filter(List<Object> from, List<Object> to, String[] includes, String[] excludes, StringBuilder sb) {
+ if (includes.length == 0 && excludes.length == 0) {
+ to.addAll(from);
+ return;
+ }
+
+ for (Object o : from) {
+ if (o instanceof Map) {
+ Map<String, Object> innerInto = Maps.newHashMap();
+ filter((Map<String, Object>) o, innerInto, includes, excludes, sb);
+ if (!innerInto.isEmpty()) {
+ to.add(innerInto);
+ }
+ } else if (o instanceof List) {
+ List<Object> innerInto = new ArrayList<Object>();
+ filter((List<Object>) o, innerInto, includes, excludes, sb);
+ if (!innerInto.isEmpty()) {
+ to.add(innerInto);
+ }
+ } else {
+ to.add(o);
+ }
+ }
+ }
+
+ public static boolean isObject(Object node) {
+ return node instanceof Map;
+ }
+
+ public static boolean isArray(Object node) {
+ return node instanceof List;
+ }
+
+ public static String nodeStringValue(Object node, String defaultValue) {
+ if (node == null) {
+ return defaultValue;
+ }
+ return node.toString();
+ }
+
+ public static float nodeFloatValue(Object node, float defaultValue) {
+ if (node == null) {
+ return defaultValue;
+ }
+ return nodeFloatValue(node);
+ }
+
+ public static float nodeFloatValue(Object node) {
+ if (node instanceof Number) {
+ return ((Number) node).floatValue();
+ }
+ return Float.parseFloat(node.toString());
+ }
+
+ public static double nodeDoubleValue(Object node, double defaultValue) {
+ if (node == null) {
+ return defaultValue;
+ }
+ return nodeDoubleValue(node);
+ }
+
+ public static double nodeDoubleValue(Object node) {
+ if (node instanceof Number) {
+ return ((Number) node).doubleValue();
+ }
+ return Double.parseDouble(node.toString());
+ }
+
+ public static int nodeIntegerValue(Object node) {
+ if (node instanceof Number) {
+ return ((Number) node).intValue();
+ }
+ return Integer.parseInt(node.toString());
+ }
+
+ public static int nodeIntegerValue(Object node, int defaultValue) {
+ if (node == null) {
+ return defaultValue;
+ }
+ if (node instanceof Number) {
+ return ((Number) node).intValue();
+ }
+ return Integer.parseInt(node.toString());
+ }
+
+ public static short nodeShortValue(Object node, short defaultValue) {
+ if (node == null) {
+ return defaultValue;
+ }
+ return nodeShortValue(node);
+ }
+
+ public static short nodeShortValue(Object node) {
+ if (node instanceof Number) {
+ return ((Number) node).shortValue();
+ }
+ return Short.parseShort(node.toString());
+ }
+
+ public static byte nodeByteValue(Object node, byte defaultValue) {
+ if (node == null) {
+ return defaultValue;
+ }
+ return nodeByteValue(node);
+ }
+
+ public static byte nodeByteValue(Object node) {
+ if (node instanceof Number) {
+ return ((Number) node).byteValue();
+ }
+ return Byte.parseByte(node.toString());
+ }
+
+ public static long nodeLongValue(Object node, long defaultValue) {
+ if (node == null) {
+ return defaultValue;
+ }
+ return nodeLongValue(node);
+ }
+
+ public static long nodeLongValue(Object node) {
+ if (node instanceof Number) {
+ return ((Number) node).longValue();
+ }
+ return Long.parseLong(node.toString());
+ }
+
+ public static boolean nodeBooleanValue(Object node, boolean defaultValue) {
+ if (node == null) {
+ return defaultValue;
+ }
+ return nodeBooleanValue(node);
+ }
+
+ public static boolean nodeBooleanValue(Object node) {
+ if (node instanceof Boolean) {
+ return (Boolean) node;
+ }
+ if (node instanceof Number) {
+ return ((Number) node).intValue() != 0;
+ }
+ String value = node.toString();
+ return !(value.equals("false") || value.equals("0") || value.equals("off"));
+ }
+
+ public static TimeValue nodeTimeValue(Object node, TimeValue defaultValue) {
+ if (node == null) {
+ return defaultValue;
+ }
+ return nodeTimeValue(node);
+ }
+
+ public static TimeValue nodeTimeValue(Object node) {
+ if (node instanceof Number) {
+ return TimeValue.timeValueMillis(((Number) node).longValue());
+ }
+ return TimeValue.parseTimeValue(node.toString(), null);
+ }
+
+ public static Map<String, Object> nodeMapValue(Object node, String desc) {
+ if (node instanceof Map) {
+ return (Map<String, Object>) node;
+ } else {
+ throw new ElasticsearchParseException(desc + " should be a hash but was of type: " + node.getClass());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java
new file mode 100644
index 0000000..e711184
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.yaml;
+
+import com.fasterxml.jackson.core.JsonEncoding;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.FastStringReader;
+import org.elasticsearch.common.xcontent.*;
+
+import java.io.*;
+
+/**
+ * A YAML based content implementation using Jackson.
+ */
+public class YamlXContent implements XContent {
+
+ public static XContentBuilder contentBuilder() throws IOException {
+ return XContentBuilder.builder(yamlXContent);
+ }
+
+ final static YAMLFactory yamlFactory;
+ public final static YamlXContent yamlXContent;
+
+ static {
+ yamlFactory = new YAMLFactory();
+ yamlXContent = new YamlXContent();
+ }
+
+ private YamlXContent() {
+ }
+
+ @Override
+ public XContentType type() {
+ return XContentType.YAML;
+ }
+
+ @Override
+ public byte streamSeparator() {
+ throw new ElasticsearchParseException("yaml does not support stream parsing...");
+ }
+
+ @Override
+ public XContentGenerator createGenerator(OutputStream os) throws IOException {
+ return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8));
+ }
+
+ @Override
+ public XContentGenerator createGenerator(Writer writer) throws IOException {
+ return new YamlXContentGenerator(yamlFactory.createGenerator(writer));
+ }
+
+ @Override
+ public XContentParser createParser(String content) throws IOException {
+ return new YamlXContentParser(yamlFactory.createParser(new FastStringReader(content)));
+ }
+
+ @Override
+ public XContentParser createParser(InputStream is) throws IOException {
+ return new YamlXContentParser(yamlFactory.createParser(is));
+ }
+
+ @Override
+ public XContentParser createParser(byte[] data) throws IOException {
+ return new YamlXContentParser(yamlFactory.createParser(data));
+ }
+
+ @Override
+ public XContentParser createParser(byte[] data, int offset, int length) throws IOException {
+ return new YamlXContentParser(yamlFactory.createParser(data, offset, length));
+ }
+
+ @Override
+ public XContentParser createParser(BytesReference bytes) throws IOException {
+ if (bytes.hasArray()) {
+ return createParser(bytes.array(), bytes.arrayOffset(), bytes.length());
+ }
+ return createParser(bytes.streamInput());
+ }
+
+ @Override
+ public XContentParser createParser(Reader reader) throws IOException {
+ return new YamlXContentParser(yamlFactory.createParser(reader));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java
new file mode 100644
index 0000000..487ca38
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.yaml;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.dataformat.yaml.YAMLParser;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ *
+ */
+public class YamlXContentGenerator extends JsonXContentGenerator {
+
+ public YamlXContentGenerator(JsonGenerator generator) {
+ super(generator);
+ }
+
+ @Override
+ public XContentType contentType() {
+ return XContentType.YAML;
+ }
+
+ @Override
+ public void usePrintLineFeedAtEnd() {
+ // nothing here
+ }
+
+ @Override
+ public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException {
+ writeFieldName(fieldName);
+ YAMLParser parser = YamlXContent.yamlFactory.createParser(content);
+ try {
+ parser.nextToken();
+ generator.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
+ writeFieldName(fieldName);
+ YAMLParser parser = YamlXContent.yamlFactory.createParser(content);
+ try {
+ parser.nextToken();
+ generator.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ protected void writeObjectRaw(String fieldName, BytesReference content, OutputStream bos) throws IOException {
+ writeFieldName(fieldName);
+ YAMLParser parser;
+ if (content.hasArray()) {
+ parser = YamlXContent.yamlFactory.createParser(content.array(), content.arrayOffset(), content.length());
+ } else {
+ parser = YamlXContent.yamlFactory.createParser(content.streamInput());
+ }
+ try {
+ parser.nextToken();
+ generator.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
+ writeFieldName(fieldName);
+ YAMLParser parser = YamlXContent.yamlFactory.createParser(content, offset, length);
+ try {
+ parser.nextToken();
+ generator.copyCurrentStructure(parser);
+ } finally {
+ parser.close();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java
new file mode 100644
index 0000000..3b674c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.yaml;
+
+import com.fasterxml.jackson.core.JsonParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.json.JsonXContentParser;
+
+/**
+ *
+ */
+public class YamlXContentParser extends JsonXContentParser {
+
+ public YamlXContentParser(JsonParser parser) {
+ super(parser);
+ }
+
+ @Override
+ public XContentType contentType() {
+ return XContentType.YAML;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java b/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java
new file mode 100644
index 0000000..34fac6c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.discovery;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+
+/**
+ * Allows to wait for all nodes to reply to the publish of a new cluster state
+ * and notifies the {@link org.elasticsearch.discovery.Discovery.AckListener}
+ * so that the cluster state update can be acknowledged
+ */
+public class AckClusterStatePublishResponseHandler extends BlockingClusterStatePublishResponseHandler {
+
+ private static final ESLogger logger = ESLoggerFactory.getLogger(AckClusterStatePublishResponseHandler.class.getName());
+
+ private final Discovery.AckListener ackListener;
+
+ /**
+ * Creates a new AckClusterStatePublishResponseHandler
+ * @param nonMasterNodes number of nodes that are supposed to reply to a cluster state publish from master
+ * @param ackListener the {@link org.elasticsearch.discovery.Discovery.AckListener} to notify for each response
+ * gotten from non master nodes
+ */
+ public AckClusterStatePublishResponseHandler(int nonMasterNodes, Discovery.AckListener ackListener) {
+ //Don't count the master as acknowledged, because it's not done yet
+ //otherwise we might end up with all the nodes but the master holding the latest cluster state
+ super(nonMasterNodes);
+ this.ackListener = ackListener;
+ }
+
+ @Override
+ public void onResponse(DiscoveryNode node) {
+ super.onResponse(node);
+ onNodeAck(ackListener, node, null);
+ }
+
+ @Override
+ public void onFailure(DiscoveryNode node, Throwable t) {
+ try {
+ super.onFailure(node, t);
+ } finally {
+ onNodeAck(ackListener, node, t);
+ }
+ }
+
+ private void onNodeAck(final Discovery.AckListener ackListener, DiscoveryNode node, Throwable t) {
+ try {
+ ackListener.onNodeAck(node, t);
+ } catch (Throwable t1) {
+ logger.debug("error while processing ack for node [{}]", t1, node);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandler.java b/src/main/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandler.java
new file mode 100644
index 0000000..76a35ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandler.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.discovery;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+
+/**
+ * Default implementation of {@link ClusterStatePublishResponseHandler}, allows to await a reply
+ * to a cluster state publish from all non master nodes, up to a timeout
+ */
+public class BlockingClusterStatePublishResponseHandler implements ClusterStatePublishResponseHandler {
+
+ private final CountDownLatch latch;
+
+ /**
+ * Creates a new BlockingClusterStatePublishResponseHandler
+ * @param nonMasterNodes number of nodes that are supposed to reply to a cluster state publish from master
+ */
+ public BlockingClusterStatePublishResponseHandler(int nonMasterNodes) {
+ //Don't count the master, as it's the one that does the publish
+ //the master won't call onResponse either
+ this.latch = new CountDownLatch(nonMasterNodes);
+ }
+
+ @Override
+ public void onResponse(DiscoveryNode node) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(DiscoveryNode node, Throwable t) {
+ latch.countDown();
+ }
+
+ @Override
+ public boolean awaitAllNodes(TimeValue timeout) throws InterruptedException {
+ return latch.await(timeout.millis(), TimeUnit.MILLISECONDS);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/ClusterStatePublishResponseHandler.java b/src/main/java/org/elasticsearch/discovery/ClusterStatePublishResponseHandler.java
new file mode 100644
index 0000000..1a5d878
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/ClusterStatePublishResponseHandler.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.discovery;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ * Handles responses obtained when publishing a new cluster state from master to all non master nodes.
+ * Allows to await a reply from all non master nodes, up to a timeout
+ */
+public interface ClusterStatePublishResponseHandler {
+
+ /**
+ * Called for each response obtained from non master nodes
+ * @param node the node that replied to the publish event
+ */
+ void onResponse(DiscoveryNode node);
+
+ /**
+ * Called for each failure obtained from non master nodes
+ * @param node the node that replied to the publish event
+ */
+ void onFailure(DiscoveryNode node, Throwable t);
+
+ /**
+ * Allows to wait for all non master nodes to reply to the publish event up to a timeout
+ * @param timeout the timeout
+ * @return true if the timeout expired or not, false otherwise
+ * @throws InterruptedException
+ */
+ boolean awaitAllNodes(TimeValue timeout) throws InterruptedException;
+}
diff --git a/src/main/java/org/elasticsearch/discovery/Discovery.java b/src/main/java/org/elasticsearch/discovery/Discovery.java
new file mode 100644
index 0000000..7925c88
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/Discovery.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ * A pluggable module allowing to implement discovery of other nodes, publishing of the cluster
+ * state to all nodes, electing a master of the cluster that raises cluster state change
+ * events.
+ */
+public interface Discovery extends LifecycleComponent<Discovery> {
+
+ final ClusterBlock NO_MASTER_BLOCK = new ClusterBlock(2, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
+
+ public static final TimeValue DEFAULT_PUBLISH_TIMEOUT = TimeValue.timeValueSeconds(30);
+
+ DiscoveryNode localNode();
+
+ void addListener(InitialStateDiscoveryListener listener);
+
+ void removeListener(InitialStateDiscoveryListener listener);
+
+ String nodeDescription();
+
+ /**
+ * Here as a hack to solve dep injection problem...
+ */
+ void setNodeService(@Nullable NodeService nodeService);
+
+ /**
+ * Another hack to solve dep injection problem..., note, this will be called before
+ * any start is called.
+ */
+ void setAllocationService(AllocationService allocationService);
+
+ /**
+ * Publish all the changes to the cluster from the master (can be called just by the master). The publish
+ * process should not publish this state to the master as well! (the master is sending it...).
+ *
+ * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether
+ * they updated their own cluster state or not.
+ */
+ void publish(ClusterState clusterState, AckListener ackListener);
+
+ public static interface AckListener {
+ void onNodeAck(DiscoveryNode node, @Nullable Throwable t);
+ void onTimeout();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/DiscoveryException.java b/src/main/java/org/elasticsearch/discovery/DiscoveryException.java
new file mode 100644
index 0000000..51c9b87
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/DiscoveryException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class DiscoveryException extends ElasticsearchException {
+
+ public DiscoveryException(String message) {
+ super(message);
+ }
+
+ public DiscoveryException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
new file mode 100644
index 0000000..98b5b45
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.Modules;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.local.LocalDiscoveryModule;
+import org.elasticsearch.discovery.zen.ZenDiscoveryModule;
+
+/**
+ *
+ */
+public class DiscoveryModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ public DiscoveryModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ Class<? extends Module> defaultDiscoveryModule;
+ if (DiscoveryNode.localNode(settings)) {
+ defaultDiscoveryModule = LocalDiscoveryModule.class;
+ } else {
+ defaultDiscoveryModule = ZenDiscoveryModule.class;
+ }
+ return ImmutableList.of(Modules.createModule(settings.getAsClass("discovery.type", defaultDiscoveryModule, "org.elasticsearch.discovery.", "DiscoveryModule"), settings));
+ }
+
+ @Override
+ protected void configure() {
+ bind(DiscoveryService.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java
new file mode 100644
index 0000000..f4fff6f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryService> {
+
+ private final TimeValue initialStateTimeout;
+
+ private final Discovery discovery;
+
+ private volatile boolean initialStateReceived;
+
+ @Inject
+ public DiscoveryService(Settings settings, Discovery discovery) {
+ super(settings);
+ this.discovery = discovery;
+ this.initialStateTimeout = componentSettings.getAsTime("initial_state_timeout", TimeValue.timeValueSeconds(30));
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ final CountDownLatch latch = new CountDownLatch(1);
+ InitialStateDiscoveryListener listener = new InitialStateDiscoveryListener() {
+ @Override
+ public void initialStateProcessed() {
+ latch.countDown();
+ }
+ };
+ discovery.addListener(listener);
+ try {
+ discovery.start();
+ if (initialStateTimeout.millis() > 0) {
+ try {
+ logger.trace("waiting for {} for the initial state to be set by the discovery", initialStateTimeout);
+ if (latch.await(initialStateTimeout.millis(), TimeUnit.MILLISECONDS)) {
+ logger.trace("initial state set from discovery");
+ initialStateReceived = true;
+ } else {
+ initialStateReceived = false;
+ logger.warn("waited for {} and no initial state was set by the discovery", initialStateTimeout);
+ }
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ } finally {
+ discovery.removeListener(listener);
+ }
+ logger.info(discovery.nodeDescription());
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ discovery.stop();
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ discovery.close();
+ }
+
+ public DiscoveryNode localNode() {
+ return discovery.localNode();
+ }
+
+ /**
+ * Returns <tt>true</tt> if the initial state was received within the timeout waiting for it
+ * on {@link #doStart()}.
+ */
+ public boolean initialStateReceived() {
+ return initialStateReceived;
+ }
+
+ public String nodeDescription() {
+ return discovery.nodeDescription();
+ }
+
+ /**
+ * Publish all the changes to the cluster from the master (can be called just by the master). The publish
+ * process should not publish this state to the master as well! (the master is sending it...).
+ * <p/>
+ * The {@link org.elasticsearch.discovery.Discovery.AckListener} allows to acknowledge the publish
+ * event based on the response gotten from all nodes
+ */
+ public void publish(ClusterState clusterState, Discovery.AckListener ackListener) {
+ if (lifecycle.started()) {
+ discovery.publish(clusterState, ackListener);
+ }
+ }
+
+ public static String generateNodeId(Settings settings) {
+ String seed = settings.get("discovery.id.seed");
+ if (seed != null) {
+ Strings.randomBase64UUID(new Random(Long.parseLong(seed)));
+ }
+ return Strings.randomBase64UUID();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java b/src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java
new file mode 100644
index 0000000..8488488
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+/**
+ * A listener that should be called by the {@link org.elasticsearch.discovery.Discovery} component
+ * when the first valid initial cluster state has been submitted and processed by the cluster service.
+ * <p/>
+ * <p>Note, this listener should be registered with the discovery service before it has started.
+ *
+ *
+ */
+public interface InitialStateDiscoveryListener {
+
+ void initialStateProcessed();
+}
diff --git a/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java b/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java
new file mode 100644
index 0000000..b2b4e93
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class MasterNotDiscoveredException extends ElasticsearchException {
+
+ public MasterNotDiscoveredException() {
+ super("");
+ }
+
+ public MasterNotDiscoveredException(String message) {
+ super(message);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java
new file mode 100644
index 0000000..9d8f3e7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java
@@ -0,0 +1,379 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.local;
+
+import com.google.common.base.Objects;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodeService;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.internal.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.discovery.*;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.cluster.ClusterState.Builder;
+
+/**
+ *
+ */
+public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery {
+
+ private static final LocalDiscovery[] NO_MEMBERS = new LocalDiscovery[0];
+
+ private final TransportService transportService;
+ private final ClusterService clusterService;
+ private final DiscoveryNodeService discoveryNodeService;
+ private AllocationService allocationService;
+ private final ClusterName clusterName;
+ private final Version version;
+
+ private final TimeValue publishTimeout;
+
+ private DiscoveryNode localNode;
+
+ private volatile boolean master = false;
+
+ private final AtomicBoolean initialStateSent = new AtomicBoolean();
+
+ private final CopyOnWriteArrayList<InitialStateDiscoveryListener> initialStateListeners = new CopyOnWriteArrayList<InitialStateDiscoveryListener>();
+
+ private static final ConcurrentMap<ClusterName, ClusterGroup> clusterGroups = ConcurrentCollections.newConcurrentMap();
+
+ @Inject
+ public LocalDiscovery(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService,
+ DiscoveryNodeService discoveryNodeService, Version version) {
+ super(settings);
+ this.clusterName = clusterName;
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+ this.discoveryNodeService = discoveryNodeService;
+ this.version = version;
+
+ this.publishTimeout = settings.getAsTime("discovery.zen.publish_timeout", DEFAULT_PUBLISH_TIMEOUT);
+ }
+
+ @Override
+ public void setNodeService(@Nullable NodeService nodeService) {
+ // nothing to do here
+ }
+
+ @Override
+ public void setAllocationService(AllocationService allocationService) {
+ this.allocationService = allocationService;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ synchronized (clusterGroups) {
+ ClusterGroup clusterGroup = clusterGroups.get(clusterName);
+ if (clusterGroup == null) {
+ clusterGroup = new ClusterGroup();
+ clusterGroups.put(clusterName, clusterGroup);
+ }
+ logger.debug("Connected to cluster [{}]", clusterName);
+ this.localNode = new DiscoveryNode(settings.get("name"), DiscoveryService.generateNodeId(settings), transportService.boundAddress().publishAddress(),
+ discoveryNodeService.buildAttributes(), version);
+
+ clusterGroup.members().add(this);
+
+ LocalDiscovery firstMaster = null;
+ for (LocalDiscovery localDiscovery : clusterGroup.members()) {
+ if (localDiscovery.localNode().masterNode()) {
+ firstMaster = localDiscovery;
+ break;
+ }
+ }
+
+ if (firstMaster != null && firstMaster.equals(this)) {
+ // we are the first master (and the master)
+ master = true;
+ final LocalDiscovery master = firstMaster;
+ clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
+ for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
+ nodesBuilder.put(discovery.localNode);
+ }
+ nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
+ // remove the NO_MASTER block in this case
+ ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ sendInitialStateEventIfNeeded();
+ }
+ });
+ } else if (firstMaster != null) {
+ // update as fast as we can the local node state with the new metadata (so we create indices for example)
+ final ClusterState masterState = firstMaster.clusterService.state();
+ clusterService.submitStateUpdateTask("local-disco(detected_master)", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ // make sure we have the local node id set, we might need it as a result of the new metadata
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()).put(localNode).localNodeId(localNode.id());
+ return ClusterState.builder(currentState).metaData(masterState.metaData()).nodes(nodesBuilder).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+ });
+
+ // tell the master to send the fact that we are here
+ final LocalDiscovery master = firstMaster;
+ firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode + "])", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
+ for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
+ nodesBuilder.put(discovery.localNode);
+ }
+ nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
+ return ClusterState.builder(currentState).nodes(nodesBuilder).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ sendInitialStateEventIfNeeded();
+ }
+ });
+ }
+ } // else, no master node, the next node that will start will fill things in...
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ synchronized (clusterGroups) {
+ ClusterGroup clusterGroup = clusterGroups.get(clusterName);
+ if (clusterGroup == null) {
+ logger.warn("Illegal state, should not have an empty cluster group when stopping, I should be there at teh very least...");
+ return;
+ }
+ clusterGroup.members().remove(this);
+ if (clusterGroup.members().isEmpty()) {
+ // no more members, remove and return
+ clusterGroups.remove(clusterName);
+ return;
+ }
+
+ LocalDiscovery firstMaster = null;
+ for (LocalDiscovery localDiscovery : clusterGroup.members()) {
+ if (localDiscovery.localNode().masterNode()) {
+ firstMaster = localDiscovery;
+ break;
+ }
+ }
+
+ if (firstMaster != null) {
+ // if the removed node is the master, make the next one as the master
+ if (master) {
+ firstMaster.master = true;
+ }
+
+ final Set<String> newMembers = newHashSet();
+ for (LocalDiscovery discovery : clusterGroup.members()) {
+ newMembers.add(discovery.localNode.id());
+ }
+
+ final LocalDiscovery master = firstMaster;
+ master.clusterService.submitStateUpdateTask("local-disco-update", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode.id());
+ DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes());
+ if (delta.added()) {
+ logger.warn("No new nodes should be created when a new discovery view is accepted");
+ }
+ // reroute here, so we eagerly remove dead nodes from the routing
+ ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build();
+ RoutingAllocation.Result routingResult = master.allocationService.reroute(ClusterState.builder(updatedState).build());
+ return ClusterState.builder(updatedState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+ });
+ }
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ @Override
+ public DiscoveryNode localNode() {
+ return localNode;
+ }
+
+ @Override
+ public void addListener(InitialStateDiscoveryListener listener) {
+ this.initialStateListeners.add(listener);
+ }
+
+ @Override
+ public void removeListener(InitialStateDiscoveryListener listener) {
+ this.initialStateListeners.remove(listener);
+ }
+
+ @Override
+ public String nodeDescription() {
+ return clusterName.value() + "/" + localNode.id();
+ }
+
+ public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) {
+ if (!master) {
+ throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master");
+ }
+ LocalDiscovery[] members = members();
+ if (members.length > 0) {
+ publish(members, clusterState, new AckClusterStatePublishResponseHandler(members.length - 1, ackListener));
+ }
+ }
+
+ private LocalDiscovery[] members() {
+ ClusterGroup clusterGroup = clusterGroups.get(clusterName);
+ if (clusterGroup == null) {
+ return NO_MEMBERS;
+ }
+ Queue<LocalDiscovery> members = clusterGroup.members();
+ return members.toArray(new LocalDiscovery[members.size()]);
+ }
+
+ private void publish(LocalDiscovery[] members, ClusterState clusterState, final ClusterStatePublishResponseHandler publishResponseHandler) {
+
+ try {
+ // we do the marshaling intentionally, to check it works well...
+ final byte[] clusterStateBytes = Builder.toBytes(clusterState);
+
+ for (final LocalDiscovery discovery : members) {
+ if (discovery.master) {
+ continue;
+ }
+ final ClusterState nodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode);
+ // ignore cluster state messages that do not include "me", not in the game yet...
+ if (nodeSpecificClusterState.nodes().localNode() != null) {
+ discovery.clusterService.submitStateUpdateTask("local-disco-receive(from master)", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ if (nodeSpecificClusterState.version() < currentState.version() && Objects.equal(nodeSpecificClusterState.nodes().masterNodeId(), currentState.nodes().masterNodeId())) {
+ return currentState;
+ }
+
+ ClusterState.Builder builder = ClusterState.builder(nodeSpecificClusterState);
+ // if the routing table did not change, use the original one
+ if (nodeSpecificClusterState.routingTable().version() == currentState.routingTable().version()) {
+ builder.routingTable(currentState.routingTable());
+ }
+ if (nodeSpecificClusterState.metaData().version() == currentState.metaData().version()) {
+ builder.metaData(currentState.metaData());
+ }
+
+ return builder.build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ publishResponseHandler.onFailure(discovery.localNode, t);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ sendInitialStateEventIfNeeded();
+ publishResponseHandler.onResponse(discovery.localNode);
+ }
+ });
+ } else {
+ publishResponseHandler.onResponse(discovery.localNode);
+ }
+ }
+
+ if (publishTimeout.millis() > 0) {
+ try {
+ boolean awaited = publishResponseHandler.awaitAllNodes(publishTimeout);
+ if (!awaited) {
+ logger.debug("awaiting all nodes to process published state {} timed out, timeout {}", clusterState.version(), publishTimeout);
+ }
+ } catch (InterruptedException e) {
+ // ignore & restore interrupt
+ Thread.currentThread().interrupt();
+ }
+ }
+
+
+ } catch (Exception e) {
+ // failure to marshal or un-marshal
+ throw new ElasticsearchIllegalStateException("Cluster state failed to serialize", e);
+ }
+ }
+
+ private void sendInitialStateEventIfNeeded() {
+ if (initialStateSent.compareAndSet(false, true)) {
+ for (InitialStateDiscoveryListener listener : initialStateListeners) {
+ listener.initialStateProcessed();
+ }
+ }
+ }
+
+ private class ClusterGroup {
+
+ private Queue<LocalDiscovery> members = ConcurrentCollections.newQueue();
+
+ Queue<LocalDiscovery> members() {
+ return members;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/local/LocalDiscoveryModule.java b/src/main/java/org/elasticsearch/discovery/local/LocalDiscoveryModule.java
new file mode 100644
index 0000000..7f14529
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/local/LocalDiscoveryModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.local;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.discovery.Discovery;
+
+/**
+ *
+ */
+public class LocalDiscoveryModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(Discovery.class).to(LocalDiscovery.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java b/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java
new file mode 100644
index 0000000..f845cbe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen;
+
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.node.service.NodeService;
+
+/**
+ *
+ */
+public interface DiscoveryNodesProvider {
+
+ DiscoveryNodes nodes();
+
+ @Nullable
+ NodeService nodeService();
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
new file mode 100644
index 0000000..761b515
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
@@ -0,0 +1,969 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodeService;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.internal.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.DiscoveryService;
+import org.elasticsearch.discovery.InitialStateDiscoveryListener;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.fd.MasterFaultDetection;
+import org.elasticsearch.discovery.zen.fd.NodesFaultDetection;
+import org.elasticsearch.discovery.zen.membership.MembershipAction;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.discovery.zen.ping.ZenPingService;
+import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
+import org.elasticsearch.gateway.GatewayService;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+
+/**
+ *
+ */
+public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, DiscoveryNodesProvider {
+
+ private final ThreadPool threadPool;
+ private final TransportService transportService;
+ private final ClusterService clusterService;
+ private AllocationService allocationService;
+ private final ClusterName clusterName;
+ private final DiscoveryNodeService discoveryNodeService;
+ private final ZenPingService pingService;
+ private final MasterFaultDetection masterFD;
+ private final NodesFaultDetection nodesFD;
+ private final PublishClusterStateAction publishClusterState;
+ private final MembershipAction membership;
+ private final Version version;
+
+
+ private final TimeValue pingTimeout;
+
+ // a flag that should be used only for testing
+ private final boolean sendLeaveRequest;
+
+ private final ElectMasterService electMaster;
+
+ private final boolean masterElectionFilterClientNodes;
+ private final boolean masterElectionFilterDataNodes;
+
+
+ private DiscoveryNode localNode;
+
+ private final CopyOnWriteArrayList<InitialStateDiscoveryListener> initialStateListeners = new CopyOnWriteArrayList<InitialStateDiscoveryListener>();
+
+ private volatile boolean master = false;
+
+ private volatile DiscoveryNodes latestDiscoNodes;
+
+ private volatile Thread currentJoinThread;
+
+ private final AtomicBoolean initialStateSent = new AtomicBoolean();
+
+
+ @Nullable
+ private NodeService nodeService;
+
+ @Inject
+ public ZenDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ TransportService transportService, ClusterService clusterService, NodeSettingsService nodeSettingsService,
+ DiscoveryNodeService discoveryNodeService, ZenPingService pingService, Version version) {
+ super(settings);
+ this.clusterName = clusterName;
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+ this.discoveryNodeService = discoveryNodeService;
+ this.pingService = pingService;
+ this.version = version;
+
+ // also support direct discovery.zen settings, for cases when it gets extended
+ this.pingTimeout = settings.getAsTime("discovery.zen.ping.timeout", settings.getAsTime("discovery.zen.ping_timeout", componentSettings.getAsTime("ping_timeout", componentSettings.getAsTime("initial_ping_timeout", timeValueSeconds(3)))));
+ this.sendLeaveRequest = componentSettings.getAsBoolean("send_leave_request", true);
+
+ this.masterElectionFilterClientNodes = settings.getAsBoolean("discovery.zen.master_election.filter_client", true);
+ this.masterElectionFilterDataNodes = settings.getAsBoolean("discovery.zen.master_election.filter_data", false);
+
+ logger.debug("using ping.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", pingTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
+
+ this.electMaster = new ElectMasterService(settings);
+ nodeSettingsService.addListener(new ApplySettings());
+
+ this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, this);
+ this.masterFD.addListener(new MasterNodeFailureListener());
+
+ this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService);
+ this.nodesFD.addListener(new NodeFailureListener());
+
+ this.publishClusterState = new PublishClusterStateAction(settings, transportService, this, new NewClusterStateListener());
+ this.pingService.setNodesProvider(this);
+ this.membership = new MembershipAction(settings, transportService, this, new MembershipListener());
+
+ transportService.registerHandler(RejoinClusterRequestHandler.ACTION, new RejoinClusterRequestHandler());
+ }
+
+ @Override
+ public void setNodeService(@Nullable NodeService nodeService) {
+ this.nodeService = nodeService;
+ }
+
+ @Override
+ public void setAllocationService(AllocationService allocationService) {
+ this.allocationService = allocationService;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
+ // note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
+ final String nodeId = DiscoveryService.generateNodeId(settings);
+ localNode = new DiscoveryNode(settings.get("name"), nodeId, transportService.boundAddress().publishAddress(), nodeAttributes, version);
+ latestDiscoNodes = new DiscoveryNodes.Builder().put(localNode).localNodeId(localNode.id()).build();
+ nodesFD.updateNodes(latestDiscoNodes);
+ pingService.start();
+
+ // do the join on a different thread, the DiscoveryService waits for 30s anyhow till it is discovered
+ asyncJoinCluster();
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ pingService.stop();
+ masterFD.stop("zen disco stop");
+ nodesFD.stop();
+ initialStateSent.set(false);
+ if (sendLeaveRequest) {
+ if (!master && latestDiscoNodes.masterNode() != null) {
+ try {
+ membership.sendLeaveRequestBlocking(latestDiscoNodes.masterNode(), localNode, TimeValue.timeValueSeconds(1));
+ } catch (Exception e) {
+ logger.debug("failed to send leave request to master [{}]", e, latestDiscoNodes.masterNode());
+ }
+ } else {
+ DiscoveryNode[] possibleMasters = electMaster.nextPossibleMasters(latestDiscoNodes.nodes().values(), 5);
+ for (DiscoveryNode possibleMaster : possibleMasters) {
+ if (localNode.equals(possibleMaster)) {
+ continue;
+ }
+ try {
+ membership.sendLeaveRequest(latestDiscoNodes.masterNode(), possibleMaster);
+ } catch (Exception e) {
+ logger.debug("failed to send leave request from master [{}] to possible master [{}]", e, latestDiscoNodes.masterNode(), possibleMaster);
+ }
+ }
+ }
+ }
+ master = false;
+ if (currentJoinThread != null) {
+ try {
+ currentJoinThread.interrupt();
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ masterFD.close();
+ nodesFD.close();
+ publishClusterState.close();
+ membership.close();
+ pingService.close();
+ }
+
+ @Override
+ public DiscoveryNode localNode() {
+ return localNode;
+ }
+
+ @Override
+ public void addListener(InitialStateDiscoveryListener listener) {
+ this.initialStateListeners.add(listener);
+ }
+
+ @Override
+ public void removeListener(InitialStateDiscoveryListener listener) {
+ this.initialStateListeners.remove(listener);
+ }
+
+ @Override
+ public String nodeDescription() {
+ return clusterName.value() + "/" + localNode.id();
+ }
+
+ @Override
+ public DiscoveryNodes nodes() {
+ DiscoveryNodes latestNodes = this.latestDiscoNodes;
+ if (latestNodes != null) {
+ return latestNodes;
+ }
+ // have not decided yet, just send the local node
+ return DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()).build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return this.nodeService;
+ }
+
+ @Override
+ public void publish(ClusterState clusterState, AckListener ackListener) {
+ if (!master) {
+ throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master");
+ }
+ latestDiscoNodes = clusterState.nodes();
+ nodesFD.updateNodes(clusterState.nodes());
+ publishClusterState.publish(clusterState, ackListener);
+ }
+
+ private void asyncJoinCluster() {
+ if (currentJoinThread != null) {
+ // we are already joining, ignore...
+ logger.trace("a join thread already running");
+ return;
+ }
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ currentJoinThread = Thread.currentThread();
+ try {
+ innerJoinCluster();
+ } finally {
+ currentJoinThread = null;
+ }
+ }
+ });
+ }
+
+ private void innerJoinCluster() {
+ boolean retry = true;
+ while (retry) {
+ if (lifecycle.stoppedOrClosed()) {
+ return;
+ }
+ retry = false;
+ DiscoveryNode masterNode = findMaster();
+ if (masterNode == null) {
+ logger.trace("no masterNode returned");
+ retry = true;
+ continue;
+ }
+ if (localNode.equals(masterNode)) {
+ this.master = true;
+ nodesFD.start(); // start the nodes FD
+ clusterService.submitStateUpdateTask("zen-disco-join (elected_as_master)", Priority.URGENT, new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder()
+ .localNodeId(localNode.id())
+ .masterNodeId(localNode.id())
+ // put our local node
+ .put(localNode);
+ // update the fact that we are the master...
+ latestDiscoNodes = builder.build();
+ ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NO_MASTER_BLOCK).build();
+ return ClusterState.builder(currentState).nodes(latestDiscoNodes).blocks(clusterBlocks).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ sendInitialStateEventIfNeeded();
+ }
+ });
+ } else {
+ this.master = false;
+ try {
+ // first, make sure we can connect to the master
+ transportService.connectToNode(masterNode);
+ } catch (Exception e) {
+ logger.warn("failed to connect to master [{}], retrying...", e, masterNode);
+ retry = true;
+ continue;
+ }
+ // send join request
+ try {
+ membership.sendJoinRequestBlocking(masterNode, localNode, pingTimeout);
+ } catch (Exception e) {
+ if (e instanceof ElasticsearchException) {
+ logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ((ElasticsearchException) e).getDetailedMessage());
+ } else {
+ logger.info("failed to send join request to master [{}], reason [{}]", masterNode, e.getMessage());
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("detailed failed reason", e);
+ }
+ // failed to send the join request, retry
+ retry = true;
+ continue;
+ }
+ masterFD.start(masterNode, "initial_join");
+ // no need to submit the received cluster state, we will get it from the master when it publishes
+ // the fact that we joined
+ }
+ }
+ }
+
+ private void handleLeaveRequest(final DiscoveryNode node) {
+ if (lifecycleState() != Lifecycle.State.STARTED) {
+ // not started, ignore a node failure
+ return;
+ }
+ if (master) {
+ clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", Priority.URGENT, new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.id());
+ latestDiscoNodes = builder.build();
+ currentState = ClusterState.builder(currentState).nodes(latestDiscoNodes).build();
+ // check if we have enough master nodes, if not, we need to move into joining the cluster again
+ if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
+ return rejoin(currentState, "not enough master nodes");
+ }
+ // eagerly run reroute to remove dead nodes from routing table
+ RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(currentState).build());
+ return ClusterState.builder(currentState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+ });
+ } else {
+ handleMasterGone(node, "shut_down");
+ }
+ }
+
+ private void handleNodeFailure(final DiscoveryNode node, String reason) {
+ if (lifecycleState() != Lifecycle.State.STARTED) {
+ // not started, ignore a node failure
+ return;
+ }
+ if (!master) {
+ // nothing to do here...
+ return;
+ }
+ clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, Priority.URGENT, new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes())
+ .remove(node.id());
+ latestDiscoNodes = builder.build();
+ currentState = ClusterState.builder(currentState).nodes(latestDiscoNodes).build();
+ // check if we have enough master nodes, if not, we need to move into joining the cluster again
+ if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
+ return rejoin(currentState, "not enough master nodes");
+ }
+ // eagerly run reroute to remove dead nodes from routing table
+ RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(currentState).build());
+ return ClusterState.builder(currentState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ sendInitialStateEventIfNeeded();
+ }
+ });
+ }
+
+ private void handleMinimumMasterNodesChanged(final int minimumMasterNodes) {
+ if (lifecycleState() != Lifecycle.State.STARTED) {
+ // not started, ignore a node failure
+ return;
+ }
+ final int prevMinimumMasterNode = ZenDiscovery.this.electMaster.minimumMasterNodes();
+ ZenDiscovery.this.electMaster.minimumMasterNodes(minimumMasterNodes);
+ if (!master) {
+ // We only set the new value. If the master doesn't see enough nodes it will revoke it's mastership.
+ return;
+ }
+ clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", Priority.URGENT, new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ // check if we have enough master nodes, if not, we need to move into joining the cluster again
+ if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
+ return rejoin(currentState, "not enough master nodes on change of minimum_master_nodes from [" + prevMinimumMasterNode + "] to [" + minimumMasterNodes + "]");
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ sendInitialStateEventIfNeeded();
+ }
+ });
+ }
+
+ private void handleMasterGone(final DiscoveryNode masterNode, final String reason) {
+ if (lifecycleState() != Lifecycle.State.STARTED) {
+ // not started, ignore a master failure
+ return;
+ }
+ if (master) {
+ // we might get this on both a master telling us shutting down, and then the disconnect failure
+ return;
+ }
+
+ logger.info("master_left [{}], reason [{}]", masterNode, reason);
+
+ clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", Priority.URGENT, new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ if (!masterNode.id().equals(currentState.nodes().masterNodeId())) {
+ // master got switched on us, no need to send anything
+ return currentState;
+ }
+
+ DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(currentState.nodes())
+ // make sure the old master node, which has failed, is not part of the nodes we publish
+ .remove(masterNode.id())
+ .masterNodeId(null).build();
+
+ if (!electMaster.hasEnoughMasterNodes(discoveryNodes)) {
+ return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "not enough master nodes after master left (reason = " + reason + ")");
+ }
+
+ final DiscoveryNode electedMaster = electMaster.electMaster(discoveryNodes); // elect master
+ if (localNode.equals(electedMaster)) {
+ master = true;
+ masterFD.stop("got elected as new master since master left (reason = " + reason + ")");
+ nodesFD.start();
+ discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(localNode.id()).build();
+ latestDiscoNodes = discoveryNodes;
+ return ClusterState.builder(currentState).nodes(latestDiscoNodes).build();
+ } else {
+ nodesFD.stop();
+ if (electedMaster != null) {
+ discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(electedMaster.id()).build();
+ masterFD.restart(electedMaster, "possible elected master since master left (reason = " + reason + ")");
+ latestDiscoNodes = discoveryNodes;
+ return ClusterState.builder(currentState)
+ .nodes(latestDiscoNodes)
+ .build();
+ } else {
+ return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master_left and no other node elected to become master");
+ }
+ }
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ sendInitialStateEventIfNeeded();
+ }
+
+ });
+ }
+
+ static class ProcessClusterState {
+ final ClusterState clusterState;
+ final PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed;
+ volatile boolean processed;
+
+ ProcessClusterState(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ this.clusterState = clusterState;
+ this.newStateProcessed = newStateProcessed;
+ }
+ }
+
+ private final BlockingQueue<ProcessClusterState> processNewClusterStates = ConcurrentCollections.newBlockingQueue();
+
+ void handleNewClusterStateFromMaster(ClusterState newClusterState, final PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ if (master) {
+ final ClusterState newState = newClusterState;
+ clusterService.submitStateUpdateTask("zen-disco-master_receive_cluster_state_from_another_master [" + newState.nodes().masterNode() + "]", Priority.URGENT, new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ if (newState.version() > currentState.version()) {
+ logger.warn("received cluster state from [{}] which is also master but with a newer cluster_state, rejoining to cluster...", newState.nodes().masterNode());
+ return rejoin(currentState, "zen-disco-master_receive_cluster_state_from_another_master [" + newState.nodes().masterNode() + "]");
+ } else {
+ logger.warn("received cluster state from [{}] which is also master but with an older cluster_state, telling [{}] to rejoin the cluster", newState.nodes().masterNode(), newState.nodes().masterNode());
+ transportService.sendRequest(newState.nodes().masterNode(), RejoinClusterRequestHandler.ACTION, new RejoinClusterRequest(currentState.nodes().localNodeId()), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("failed to send rejoin request to [{}]", exp, newState.nodes().masterNode());
+ }
+ });
+ return currentState;
+ }
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ newStateProcessed.onNewClusterStateProcessed();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ newStateProcessed.onNewClusterStateFailed(t);
+ }
+
+ });
+ } else {
+ if (newClusterState.nodes().localNode() == null) {
+ logger.warn("received a cluster state from [{}] and not part of the cluster, should not happen", newClusterState.nodes().masterNode());
+ newStateProcessed.onNewClusterStateFailed(new ElasticsearchIllegalStateException("received state from a node that is not part of the cluster"));
+ } else {
+ if (currentJoinThread != null) {
+ logger.debug("got a new state from master node, though we are already trying to rejoin the cluster");
+ }
+
+ final ProcessClusterState processClusterState = new ProcessClusterState(newClusterState, newStateProcessed);
+ processNewClusterStates.add(processClusterState);
+
+ clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + newClusterState.nodes().masterNode() + "])", Priority.URGENT, new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ // we already processed it in a previous event
+ if (processClusterState.processed) {
+ return currentState;
+ }
+
+ // TODO: once improvement that we can do is change the message structure to include version and masterNodeId
+ // at the start, this will allow us to keep the "compressed bytes" around, and only parse the first page
+ // to figure out if we need to use it or not, and only once we picked the latest one, parse the whole state
+
+
+ // try and get the state with the highest version out of all the ones with the same master node id
+ ProcessClusterState stateToProcess = processNewClusterStates.poll();
+ if (stateToProcess == null) {
+ return currentState;
+ }
+ stateToProcess.processed = true;
+ while (true) {
+ ProcessClusterState potentialState = processNewClusterStates.peek();
+ // nothing else in the queue, bail
+ if (potentialState == null) {
+ break;
+ }
+ // if its not from the same master, then bail
+ if (!Objects.equal(stateToProcess.clusterState.nodes().masterNodeId(), potentialState.clusterState.nodes().masterNodeId())) {
+ break;
+ }
+
+ // we are going to use it for sure, poll (remove) it
+ potentialState = processNewClusterStates.poll();
+ potentialState.processed = true;
+
+ if (potentialState.clusterState.version() > stateToProcess.clusterState.version()) {
+ // we found a new one
+ stateToProcess = potentialState;
+ }
+ }
+
+ ClusterState updatedState = stateToProcess.clusterState;
+
+ // if the new state has a smaller version, and it has the same master node, then no need to process it
+ if (updatedState.version() < currentState.version() && Objects.equal(updatedState.nodes().masterNodeId(), currentState.nodes().masterNodeId())) {
+ return currentState;
+ }
+
+ // we don't need to do this, since we ping the master, and get notified when it has moved from being a master
+ // because it doesn't have enough master nodes...
+ //if (!electMaster.hasEnoughMasterNodes(newState.nodes())) {
+ // return disconnectFromCluster(newState, "not enough master nodes on new cluster state received from [" + newState.nodes().masterNode() + "]");
+ //}
+
+ latestDiscoNodes = updatedState.nodes();
+
+ // check to see that we monitor the correct master of the cluster
+ if (masterFD.masterNode() == null || !masterFD.masterNode().equals(latestDiscoNodes.masterNode())) {
+ masterFD.restart(latestDiscoNodes.masterNode(), "new cluster state received and we are monitoring the wrong master [" + masterFD.masterNode() + "]");
+ }
+
+ ClusterState.Builder builder = ClusterState.builder(updatedState);
+ // if the routing table did not change, use the original one
+ if (updatedState.routingTable().version() == currentState.routingTable().version()) {
+ builder.routingTable(currentState.routingTable());
+ }
+ // same for metadata
+ if (updatedState.metaData().version() == currentState.metaData().version()) {
+ builder.metaData(currentState.metaData());
+ } else {
+ // if its not the same version, only copy over new indices or ones that changed the version
+ MetaData.Builder metaDataBuilder = MetaData.builder(updatedState.metaData()).removeAllIndices();
+ for (IndexMetaData indexMetaData : updatedState.metaData()) {
+ IndexMetaData currentIndexMetaData = currentState.metaData().index(indexMetaData.index());
+ if (currentIndexMetaData == null || currentIndexMetaData.version() != indexMetaData.version()) {
+ metaDataBuilder.put(indexMetaData, false);
+ } else {
+ metaDataBuilder.put(currentIndexMetaData, false);
+ }
+ }
+ builder.metaData(metaDataBuilder);
+ }
+
+ return builder.build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ newStateProcessed.onNewClusterStateFailed(t);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ sendInitialStateEventIfNeeded();
+ newStateProcessed.onNewClusterStateProcessed();
+ }
+ });
+ }
+ }
+ }
+
+ private ClusterState handleJoinRequest(final DiscoveryNode node) {
+ if (!master) {
+ throw new ElasticsearchIllegalStateException("Node [" + localNode + "] not master for join request from [" + node + "]");
+ }
+
+ ClusterState state = clusterService.state();
+ if (!transportService.addressSupported(node.address().getClass())) {
+ // TODO, what should we do now? Maybe inform that node that its crap?
+ logger.warn("received a wrong address type from [{}], ignoring...", node);
+ } else {
+ // try and connect to the node, if it fails, we can raise an exception back to the client...
+ transportService.connectToNode(node);
+ state = clusterService.state();
+
+ // validate the join request, will throw a failure if it fails, which will get back to the
+ // node calling the join request
+ membership.sendValidateJoinRequestBlocking(node, state, pingTimeout);
+
+ clusterService.submitStateUpdateTask("zen-disco-receive(join from node[" + node + "])", Priority.URGENT, new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ if (currentState.nodes().nodeExists(node.id())) {
+ // the node already exists in the cluster
+ logger.warn("received a join request for an existing node [{}]", node);
+ // still send a new cluster state, so it will be re published and possibly update the other node
+ return ClusterState.builder(currentState).build();
+ }
+ DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes());
+ for (DiscoveryNode existingNode : currentState.nodes()) {
+ if (node.address().equals(existingNode.address())) {
+ builder.remove(existingNode.id());
+ logger.warn("received join request from node [{}], but found existing node {} with same address, removing existing node", node, existingNode);
+ }
+ }
+ latestDiscoNodes = builder.build();
+ // add the new node now (will update latestDiscoNodes on publish)
+ return ClusterState.builder(currentState).nodes(latestDiscoNodes.newNode(node)).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+ });
+ }
+ return state;
+ }
+
+ private DiscoveryNode findMaster() {
+ ZenPing.PingResponse[] fullPingResponses = pingService.pingAndWait(pingTimeout);
+ if (fullPingResponses == null) {
+ logger.trace("No full ping responses");
+ return null;
+ }
+ if (logger.isTraceEnabled()) {
+ StringBuilder sb = new StringBuilder("full ping responses:");
+ if (fullPingResponses.length == 0) {
+ sb.append(" {none}");
+ } else {
+ for (ZenPing.PingResponse pingResponse : fullPingResponses) {
+ sb.append("\n\t--> ").append("target [").append(pingResponse.target()).append("], master [").append(pingResponse.master()).append("]");
+ }
+ }
+ logger.trace(sb.toString());
+ }
+
+ // filter responses
+ List<ZenPing.PingResponse> pingResponses = Lists.newArrayList();
+ for (ZenPing.PingResponse pingResponse : fullPingResponses) {
+ DiscoveryNode node = pingResponse.target();
+ if (masterElectionFilterClientNodes && (node.clientNode() || (!node.masterNode() && !node.dataNode()))) {
+ // filter out the client node, which is a client node, or also one that is not data and not master (effectively, client)
+ } else if (masterElectionFilterDataNodes && (!node.masterNode() && node.dataNode())) {
+ // filter out data node that is not also master
+ } else {
+ pingResponses.add(pingResponse);
+ }
+ }
+
+ if (logger.isDebugEnabled()) {
+ StringBuilder sb = new StringBuilder("filtered ping responses: (filter_client[").append(masterElectionFilterClientNodes).append("], filter_data[").append(masterElectionFilterDataNodes).append("])");
+ if (pingResponses.isEmpty()) {
+ sb.append(" {none}");
+ } else {
+ for (ZenPing.PingResponse pingResponse : pingResponses) {
+ sb.append("\n\t--> ").append("target [").append(pingResponse.target()).append("], master [").append(pingResponse.master()).append("]");
+ }
+ }
+ logger.debug(sb.toString());
+ }
+ List<DiscoveryNode> pingMasters = newArrayList();
+ for (ZenPing.PingResponse pingResponse : pingResponses) {
+ if (pingResponse.master() != null) {
+ pingMasters.add(pingResponse.master());
+ }
+ }
+
+ Set<DiscoveryNode> possibleMasterNodes = Sets.newHashSet();
+ possibleMasterNodes.add(localNode);
+ for (ZenPing.PingResponse pingResponse : pingResponses) {
+ possibleMasterNodes.add(pingResponse.target());
+ }
+ // if we don't have enough master nodes, we bail, even if we get a response that indicates
+ // there is a master by other node, we don't see enough...
+ if (!electMaster.hasEnoughMasterNodes(possibleMasterNodes)) {
+ return null;
+ }
+
+ if (pingMasters.isEmpty()) {
+ // lets tie break between discovered nodes
+ DiscoveryNode electedMaster = electMaster.electMaster(possibleMasterNodes);
+ if (localNode.equals(electedMaster)) {
+ return localNode;
+ }
+ } else {
+ DiscoveryNode electedMaster = electMaster.electMaster(pingMasters);
+ if (electedMaster != null) {
+ return electedMaster;
+ }
+ }
+ return null;
+ }
+
+ private ClusterState rejoin(ClusterState clusterState, String reason) {
+ logger.warn(reason + ", current nodes: {}", clusterState.nodes());
+ nodesFD.stop();
+ masterFD.stop(reason);
+ master = false;
+
+ ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(clusterState.blocks())
+ .addGlobalBlock(NO_MASTER_BLOCK)
+ .addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)
+ .build();
+
+ // clear the routing table, we have no master, so we need to recreate the routing when we reform the cluster
+ RoutingTable routingTable = RoutingTable.builder().build();
+ // we also clean the metadata, since we are going to recover it if we become master
+ MetaData metaData = MetaData.builder().build();
+
+ // clean the nodes, we are now not connected to anybody, since we try and reform the cluster
+ latestDiscoNodes = new DiscoveryNodes.Builder().put(localNode).localNodeId(localNode.id()).build();
+
+ asyncJoinCluster();
+
+ return ClusterState.builder(clusterState)
+ .blocks(clusterBlocks)
+ .nodes(latestDiscoNodes)
+ .routingTable(routingTable)
+ .metaData(metaData)
+ .build();
+ }
+
+ private void sendInitialStateEventIfNeeded() {
+ if (initialStateSent.compareAndSet(false, true)) {
+ for (InitialStateDiscoveryListener listener : initialStateListeners) {
+ listener.initialStateProcessed();
+ }
+ }
+ }
+
+ private class NewClusterStateListener implements PublishClusterStateAction.NewClusterStateListener {
+
+ @Override
+ public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
+ handleNewClusterStateFromMaster(clusterState, newStateProcessed);
+ }
+ }
+
+ private class MembershipListener implements MembershipAction.MembershipListener {
+ @Override
+ public ClusterState onJoin(DiscoveryNode node) {
+ return handleJoinRequest(node);
+ }
+
+ @Override
+ public void onLeave(DiscoveryNode node) {
+ handleLeaveRequest(node);
+ }
+ }
+
+ private class NodeFailureListener implements NodesFaultDetection.Listener {
+
+ @Override
+ public void onNodeFailure(DiscoveryNode node, String reason) {
+ handleNodeFailure(node, reason);
+ }
+ }
+
+ private class MasterNodeFailureListener implements MasterFaultDetection.Listener {
+
+ @Override
+ public void onMasterFailure(DiscoveryNode masterNode, String reason) {
+ handleMasterGone(masterNode, reason);
+ }
+
+ @Override
+ public void onDisconnectedFromMaster() {
+ // got disconnected from the master, send a join request
+ DiscoveryNode masterNode = latestDiscoNodes.masterNode();
+ try {
+ membership.sendJoinRequest(masterNode, localNode);
+ } catch (Exception e) {
+ logger.warn("failed to send join request on disconnection from master [{}]", masterNode);
+ }
+ }
+ }
+
+ static class RejoinClusterRequest extends TransportRequest {
+
+ private String fromNodeId;
+
+ RejoinClusterRequest(String fromNodeId) {
+ this.fromNodeId = fromNodeId;
+ }
+
+ RejoinClusterRequest() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ fromNodeId = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalString(fromNodeId);
+ }
+ }
+
+ class RejoinClusterRequestHandler extends BaseTransportRequestHandler<RejoinClusterRequest> {
+
+ static final String ACTION = "discovery/zen/rejoin";
+
+ @Override
+ public RejoinClusterRequest newInstance() {
+ return new RejoinClusterRequest();
+ }
+
+ @Override
+ public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception {
+ clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", Priority.URGENT, new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ try {
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ } catch (Exception e) {
+ logger.warn("failed to send response on rejoin cluster request handling", e);
+ }
+ return rejoin(currentState, "received a request to rejoin the cluster from [" + request.fromNodeId + "]");
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ int minimumMasterNodes = settings.getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES,
+ ZenDiscovery.this.electMaster.minimumMasterNodes());
+ if (minimumMasterNodes != ZenDiscovery.this.electMaster.minimumMasterNodes()) {
+ logger.info("updating {} from [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES,
+ ZenDiscovery.this.electMaster.minimumMasterNodes(), minimumMasterNodes);
+ handleMinimumMasterNodesChanged(minimumMasterNodes);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscoveryModule.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscoveryModule.java
new file mode 100644
index 0000000..e67c4e2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscoveryModule.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.zen.ping.ZenPingService;
+import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
+
+import java.util.List;
+
+/**
+ */
+public class ZenDiscoveryModule extends AbstractModule {
+
+ private final List<Class<? extends UnicastHostsProvider>> unicastHostProviders = Lists.newArrayList();
+
+ /**
+ * Adds a custom unicast hosts provider to build a dynamic list of unicast hosts list when doing unicast discovery.
+ */
+ public ZenDiscoveryModule addUnicastHostProvider(Class<? extends UnicastHostsProvider> unicastHostProvider) {
+ unicastHostProviders.add(unicastHostProvider);
+ return this;
+ }
+
+ @Override
+ protected void configure() {
+ bind(ZenPingService.class).asEagerSingleton();
+ Multibinder<UnicastHostsProvider> unicastHostsProviderMultibinder = Multibinder.newSetBinder(binder(), UnicastHostsProvider.class);
+ for (Class<? extends UnicastHostsProvider> unicastHostProvider : unicastHostProviders) {
+ unicastHostsProviderMultibinder.addBinding().to(unicastHostProvider);
+ }
+ bindDiscovery();
+ }
+
+ protected void bindDiscovery() {
+ bind(Discovery.class).to(ZenDiscovery.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java
new file mode 100644
index 0000000..bcfa1dc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.elect;
+
+import com.carrotsearch.hppc.ObjectContainer;
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class ElectMasterService extends AbstractComponent {
+
+ public static final String DISCOVERY_ZEN_MINIMUM_MASTER_NODES = "discovery.zen.minimum_master_nodes";
+
+ private final NodeComparator nodeComparator = new NodeComparator();
+
+ private volatile int minimumMasterNodes;
+
+ public ElectMasterService(Settings settings) {
+ super(settings);
+ this.minimumMasterNodes = settings.getAsInt(DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1);
+ logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
+ }
+
+ public void minimumMasterNodes(int minimumMasterNodes) {
+ this.minimumMasterNodes = minimumMasterNodes;
+ }
+
+ public int minimumMasterNodes() {
+ return minimumMasterNodes;
+ }
+
+ public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) {
+ if (minimumMasterNodes < 1) {
+ return true;
+ }
+ int count = 0;
+ for (DiscoveryNode node : nodes) {
+ if (node.masterNode()) {
+ count++;
+ }
+ }
+ return count >= minimumMasterNodes;
+ }
+
+ /**
+ * Returns a list of the next possible masters.
+ */
+ public DiscoveryNode[] nextPossibleMasters(ObjectContainer<DiscoveryNode> nodes, int numberOfPossibleMasters) {
+ List<DiscoveryNode> sortedNodes = sortedMasterNodes(Arrays.asList(nodes.toArray(DiscoveryNode.class)));
+ if (sortedNodes == null) {
+ return new DiscoveryNode[0];
+ }
+ List<DiscoveryNode> nextPossibleMasters = Lists.newArrayListWithCapacity(numberOfPossibleMasters);
+ int counter = 0;
+ for (DiscoveryNode nextPossibleMaster : sortedNodes) {
+ if (++counter >= numberOfPossibleMasters) {
+ break;
+ }
+ nextPossibleMasters.add(nextPossibleMaster);
+ }
+ return nextPossibleMasters.toArray(new DiscoveryNode[nextPossibleMasters.size()]);
+ }
+
+ /**
+ * Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
+ * if no master has been elected.
+ */
+ public DiscoveryNode electMaster(Iterable<DiscoveryNode> nodes) {
+ List<DiscoveryNode> sortedNodes = sortedMasterNodes(nodes);
+ if (sortedNodes == null || sortedNodes.isEmpty()) {
+ return null;
+ }
+ return sortedNodes.get(0);
+ }
+
+ private List<DiscoveryNode> sortedMasterNodes(Iterable<DiscoveryNode> nodes) {
+ List<DiscoveryNode> possibleNodes = Lists.newArrayList(nodes);
+ if (possibleNodes.isEmpty()) {
+ return null;
+ }
+ // clean non master nodes
+ for (Iterator<DiscoveryNode> it = possibleNodes.iterator(); it.hasNext(); ) {
+ DiscoveryNode node = it.next();
+ if (!node.masterNode()) {
+ it.remove();
+ }
+ }
+ CollectionUtil.introSort(possibleNodes, nodeComparator);
+ return possibleNodes;
+ }
+
+ private static class NodeComparator implements Comparator<DiscoveryNode> {
+
+ @Override
+ public int compare(DiscoveryNode o1, DiscoveryNode o2) {
+ return o1.id().compareTo(o2.id());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java
new file mode 100644
index 0000000..3e823a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java
@@ -0,0 +1,449 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.fd;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.elasticsearch.transport.TransportRequestOptions.options;
+
+/**
+ * A fault detection that pings the master periodically to see if its alive.
+ */
+public class MasterFaultDetection extends AbstractComponent {
+
+ public static interface Listener {
+
+ void onMasterFailure(DiscoveryNode masterNode, String reason);
+
+ void onDisconnectedFromMaster();
+ }
+
+ private final ThreadPool threadPool;
+
+ private final TransportService transportService;
+
+ private final DiscoveryNodesProvider nodesProvider;
+
+ private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<Listener>();
+
+
+ private final boolean connectOnNetworkDisconnect;
+
+ private final TimeValue pingInterval;
+
+ private final TimeValue pingRetryTimeout;
+
+ private final int pingRetryCount;
+
+ // used mainly for testing, should always be true
+ private final boolean registerConnectionListener;
+
+
+ private final FDConnectionListener connectionListener;
+
+ private volatile MasterPinger masterPinger;
+
+ private final Object masterNodeMutex = new Object();
+
+ private volatile DiscoveryNode masterNode;
+
+ private volatile int retryCount;
+
+ private final AtomicBoolean notifiedMasterFailure = new AtomicBoolean();
+
+ public MasterFaultDetection(Settings settings, ThreadPool threadPool, TransportService transportService, DiscoveryNodesProvider nodesProvider) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.transportService = transportService;
+ this.nodesProvider = nodesProvider;
+
+ this.connectOnNetworkDisconnect = componentSettings.getAsBoolean("connect_on_network_disconnect", true);
+ this.pingInterval = componentSettings.getAsTime("ping_interval", timeValueSeconds(1));
+ this.pingRetryTimeout = componentSettings.getAsTime("ping_timeout", timeValueSeconds(30));
+ this.pingRetryCount = componentSettings.getAsInt("ping_retries", 3);
+ this.registerConnectionListener = componentSettings.getAsBoolean("register_connection_listener", true);
+
+ logger.debug("[master] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount);
+
+ this.connectionListener = new FDConnectionListener();
+ if (registerConnectionListener) {
+ transportService.addConnectionListener(connectionListener);
+ }
+
+ transportService.registerHandler(MasterPingRequestHandler.ACTION, new MasterPingRequestHandler());
+ }
+
+ public DiscoveryNode masterNode() {
+ return this.masterNode;
+ }
+
+ public void addListener(Listener listener) {
+ listeners.add(listener);
+ }
+
+ public void removeListener(Listener listener) {
+ listeners.remove(listener);
+ }
+
+ public void restart(DiscoveryNode masterNode, String reason) {
+ synchronized (masterNodeMutex) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[master] restarting fault detection against master [{}], reason [{}]", masterNode, reason);
+ }
+ innerStop();
+ innerStart(masterNode);
+ }
+ }
+
+ public void start(final DiscoveryNode masterNode, String reason) {
+ synchronized (masterNodeMutex) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[master] starting fault detection against master [{}], reason [{}]", masterNode, reason);
+ }
+ innerStart(masterNode);
+ }
+ }
+
+ private void innerStart(final DiscoveryNode masterNode) {
+ this.masterNode = masterNode;
+ this.retryCount = 0;
+ this.notifiedMasterFailure.set(false);
+
+ // try and connect to make sure we are connected
+ try {
+ transportService.connectToNode(masterNode);
+ } catch (final Exception e) {
+ // notify master failure (which stops also) and bail..
+ notifyMasterFailure(masterNode, "failed to perform initial connect [" + e.getMessage() + "]");
+ return;
+ }
+ if (masterPinger != null) {
+ masterPinger.stop();
+ }
+ this.masterPinger = new MasterPinger();
+ // start the ping process
+ threadPool.schedule(pingInterval, ThreadPool.Names.SAME, masterPinger);
+ }
+
+ public void stop(String reason) {
+ synchronized (masterNodeMutex) {
+ if (masterNode != null) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[master] stopping fault detection against master [{}], reason [{}]", masterNode, reason);
+ }
+ }
+ innerStop();
+ }
+ }
+
+ private void innerStop() {
+ // also will stop the next ping schedule
+ this.retryCount = 0;
+ if (masterPinger != null) {
+ masterPinger.stop();
+ masterPinger = null;
+ }
+ this.masterNode = null;
+ }
+
+ public void close() {
+ stop("closing");
+ this.listeners.clear();
+ transportService.removeConnectionListener(connectionListener);
+ transportService.removeHandler(MasterPingRequestHandler.ACTION);
+ }
+
+ private void handleTransportDisconnect(DiscoveryNode node) {
+ synchronized (masterNodeMutex) {
+ if (!node.equals(this.masterNode)) {
+ return;
+ }
+ if (connectOnNetworkDisconnect) {
+ try {
+ transportService.connectToNode(node);
+ // if all is well, make sure we restart the pinger
+ if (masterPinger != null) {
+ masterPinger.stop();
+ }
+ this.masterPinger = new MasterPinger();
+ threadPool.schedule(pingInterval, ThreadPool.Names.SAME, masterPinger);
+ } catch (Exception e) {
+ logger.trace("[master] [{}] transport disconnected (with verified connect)", masterNode);
+ notifyMasterFailure(masterNode, "transport disconnected (with verified connect)");
+ }
+ } else {
+ logger.trace("[master] [{}] transport disconnected", node);
+ notifyMasterFailure(node, "transport disconnected");
+ }
+ }
+ }
+
+ private void notifyDisconnectedFromMaster() {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ for (Listener listener : listeners) {
+ listener.onDisconnectedFromMaster();
+ }
+ }
+ });
+ }
+
+ private void notifyMasterFailure(final DiscoveryNode masterNode, final String reason) {
+ if (notifiedMasterFailure.compareAndSet(false, true)) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ for (Listener listener : listeners) {
+ listener.onMasterFailure(masterNode, reason);
+ }
+ }
+ });
+ stop("master failure, " + reason);
+ }
+ }
+
+ private class FDConnectionListener implements TransportConnectionListener {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ handleTransportDisconnect(node);
+ }
+ }
+
+ private class MasterPinger implements Runnable {
+
+ private volatile boolean running = true;
+
+ public void stop() {
+ this.running = false;
+ }
+
+ @Override
+ public void run() {
+ if (!running) {
+ // return and don't spawn...
+ return;
+ }
+ final DiscoveryNode masterToPing = masterNode;
+ if (masterToPing == null) {
+ // master is null, should not happen, but we are still running, so reschedule
+ threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
+ return;
+ }
+ transportService.sendRequest(masterToPing, MasterPingRequestHandler.ACTION, new MasterPingRequest(nodesProvider.nodes().localNode().id(), masterToPing.id()), options().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout),
+ new BaseTransportResponseHandler<MasterPingResponseResponse>() {
+ @Override
+ public MasterPingResponseResponse newInstance() {
+ return new MasterPingResponseResponse();
+ }
+
+ @Override
+ public void handleResponse(MasterPingResponseResponse response) {
+ if (!running) {
+ return;
+ }
+ // reset the counter, we got a good result
+ MasterFaultDetection.this.retryCount = 0;
+ // check if the master node did not get switched on us..., if it did, we simply return with no reschedule
+ if (masterToPing.equals(MasterFaultDetection.this.masterNode())) {
+ if (!response.connectedToMaster) {
+ logger.trace("[master] [{}] does not have us registered with it...", masterToPing);
+ notifyDisconnectedFromMaster();
+ }
+ // we don't stop on disconnection from master, we keep pinging it
+ threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
+ }
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ if (!running) {
+ return;
+ }
+ if (exp instanceof ConnectTransportException) {
+ // ignore this one, we already handle it by registering a connection listener
+ return;
+ }
+ synchronized (masterNodeMutex) {
+ // check if the master node did not get switched on us...
+ if (masterToPing.equals(MasterFaultDetection.this.masterNode())) {
+ if (exp.getCause() instanceof NoLongerMasterException) {
+ logger.debug("[master] pinging a master {} that is no longer a master", masterNode);
+ notifyMasterFailure(masterToPing, "no longer master");
+ return;
+ } else if (exp.getCause() instanceof NotMasterException) {
+ logger.debug("[master] pinging a master {} that is not the master", masterNode);
+ notifyMasterFailure(masterToPing, "not master");
+ return;
+ } else if (exp.getCause() instanceof NodeDoesNotExistOnMasterException) {
+ logger.debug("[master] pinging a master {} but we do not exists on it, act as if its master failure", masterNode);
+ notifyMasterFailure(masterToPing, "do not exists on master, act as master failure");
+ return;
+ }
+ int retryCount = ++MasterFaultDetection.this.retryCount;
+ logger.trace("[master] failed to ping [{}], retry [{}] out of [{}]", exp, masterNode, retryCount, pingRetryCount);
+ if (retryCount >= pingRetryCount) {
+ logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", masterNode, pingRetryCount, pingRetryTimeout);
+ // not good, failure
+ notifyMasterFailure(masterToPing, "failed to ping, tried [" + pingRetryCount + "] times, each with maximum [" + pingRetryTimeout + "] timeout");
+ } else {
+ // resend the request, not reschedule, rely on send timeout
+ transportService.sendRequest(masterToPing, MasterPingRequestHandler.ACTION, new MasterPingRequest(nodesProvider.nodes().localNode().id(), masterToPing.id()), options().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout), this);
+ }
+ }
+ }
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ static class NoLongerMasterException extends ElasticsearchIllegalStateException {
+ @Override
+ public Throwable fillInStackTrace() {
+ return null;
+ }
+ }
+
+ static class NotMasterException extends ElasticsearchIllegalStateException {
+ @Override
+ public Throwable fillInStackTrace() {
+ return null;
+ }
+ }
+
+ static class NodeDoesNotExistOnMasterException extends ElasticsearchIllegalStateException {
+ @Override
+ public Throwable fillInStackTrace() {
+ return null;
+ }
+ }
+
+ private class MasterPingRequestHandler extends BaseTransportRequestHandler<MasterPingRequest> {
+
+ public static final String ACTION = "discovery/zen/fd/masterPing";
+
+ @Override
+ public MasterPingRequest newInstance() {
+ return new MasterPingRequest();
+ }
+
+ @Override
+ public void messageReceived(MasterPingRequest request, TransportChannel channel) throws Exception {
+ DiscoveryNodes nodes = nodesProvider.nodes();
+ // check if we are really the same master as the one we seemed to be think we are
+ // this can happen if the master got "kill -9" and then another node started using the same port
+ if (!request.masterNodeId.equals(nodes.localNodeId())) {
+ throw new NotMasterException();
+ }
+ // if we are no longer master, fail...
+ if (!nodes.localNodeMaster()) {
+ throw new NoLongerMasterException();
+ }
+ if (!nodes.nodeExists(request.nodeId)) {
+ throw new NodeDoesNotExistOnMasterException();
+ }
+ // send a response, and note if we are connected to the master or not
+ channel.sendResponse(new MasterPingResponseResponse(nodes.nodeExists(request.nodeId)));
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+
+ private static class MasterPingRequest extends TransportRequest {
+
+ private String nodeId;
+
+ private String masterNodeId;
+
+ private MasterPingRequest() {
+ }
+
+ private MasterPingRequest(String nodeId, String masterNodeId) {
+ this.nodeId = nodeId;
+ this.masterNodeId = masterNodeId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodeId = in.readString();
+ masterNodeId = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(nodeId);
+ out.writeString(masterNodeId);
+ }
+ }
+
+ private static class MasterPingResponseResponse extends TransportResponse {
+
+ private boolean connectedToMaster;
+
+ private MasterPingResponseResponse() {
+ }
+
+ private MasterPingResponseResponse(boolean connectedToMaster) {
+ this.connectedToMaster = connectedToMaster;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ connectedToMaster = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(connectedToMaster);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java
new file mode 100644
index 0000000..679f4d5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java
@@ -0,0 +1,347 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.fd;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import static org.elasticsearch.cluster.node.DiscoveryNodes.EMPTY_NODES;
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
+import static org.elasticsearch.transport.TransportRequestOptions.options;
+
+/**
+ * A fault detection of multiple nodes.
+ */
+public class NodesFaultDetection extends AbstractComponent {
+
+ public static interface Listener {
+
+ void onNodeFailure(DiscoveryNode node, String reason);
+ }
+
+ private final ThreadPool threadPool;
+
+ private final TransportService transportService;
+
+
+ private final boolean connectOnNetworkDisconnect;
+
+ private final TimeValue pingInterval;
+
+ private final TimeValue pingRetryTimeout;
+
+ private final int pingRetryCount;
+
+ // used mainly for testing, should always be true
+ private final boolean registerConnectionListener;
+
+
+ private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<Listener>();
+
+ private final ConcurrentMap<DiscoveryNode, NodeFD> nodesFD = newConcurrentMap();
+
+ private final FDConnectionListener connectionListener;
+
+ private volatile DiscoveryNodes latestNodes = EMPTY_NODES;
+
+ private volatile boolean running = false;
+
+ public NodesFaultDetection(Settings settings, ThreadPool threadPool, TransportService transportService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.transportService = transportService;
+
+ this.connectOnNetworkDisconnect = componentSettings.getAsBoolean("connect_on_network_disconnect", true);
+ this.pingInterval = componentSettings.getAsTime("ping_interval", timeValueSeconds(1));
+ this.pingRetryTimeout = componentSettings.getAsTime("ping_timeout", timeValueSeconds(30));
+ this.pingRetryCount = componentSettings.getAsInt("ping_retries", 3);
+ this.registerConnectionListener = componentSettings.getAsBoolean("register_connection_listener", true);
+
+ logger.debug("[node ] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount);
+
+ transportService.registerHandler(PingRequestHandler.ACTION, new PingRequestHandler());
+
+ this.connectionListener = new FDConnectionListener();
+ if (registerConnectionListener) {
+ transportService.addConnectionListener(connectionListener);
+ }
+ }
+
+ public void addListener(Listener listener) {
+ listeners.add(listener);
+ }
+
+ public void removeListener(Listener listener) {
+ listeners.remove(listener);
+ }
+
+ public void updateNodes(DiscoveryNodes nodes) {
+ DiscoveryNodes prevNodes = latestNodes;
+ this.latestNodes = nodes;
+ if (!running) {
+ return;
+ }
+ DiscoveryNodes.Delta delta = nodes.delta(prevNodes);
+ for (DiscoveryNode newNode : delta.addedNodes()) {
+ if (newNode.id().equals(nodes.localNodeId())) {
+ // no need to monitor the local node
+ continue;
+ }
+ if (!nodesFD.containsKey(newNode)) {
+ nodesFD.put(newNode, new NodeFD());
+ threadPool.schedule(pingInterval, ThreadPool.Names.SAME, new SendPingRequest(newNode));
+ }
+ }
+ for (DiscoveryNode removedNode : delta.removedNodes()) {
+ nodesFD.remove(removedNode);
+ }
+ }
+
+ public NodesFaultDetection start() {
+ if (running) {
+ return this;
+ }
+ running = true;
+ return this;
+ }
+
+ public NodesFaultDetection stop() {
+ if (!running) {
+ return this;
+ }
+ running = false;
+ return this;
+ }
+
+ public void close() {
+ stop();
+ transportService.removeHandler(PingRequestHandler.ACTION);
+ transportService.removeConnectionListener(connectionListener);
+ }
+
+ private void handleTransportDisconnect(DiscoveryNode node) {
+ if (!latestNodes.nodeExists(node.id())) {
+ return;
+ }
+ NodeFD nodeFD = nodesFD.remove(node);
+ if (nodeFD == null) {
+ return;
+ }
+ if (!running) {
+ return;
+ }
+ nodeFD.running = false;
+ if (connectOnNetworkDisconnect) {
+ try {
+ transportService.connectToNode(node);
+ nodesFD.put(node, new NodeFD());
+ threadPool.schedule(pingInterval, ThreadPool.Names.SAME, new SendPingRequest(node));
+ } catch (Exception e) {
+ logger.trace("[node ] [{}] transport disconnected (with verified connect)", node);
+ notifyNodeFailure(node, "transport disconnected (with verified connect)");
+ }
+ } else {
+ logger.trace("[node ] [{}] transport disconnected", node);
+ notifyNodeFailure(node, "transport disconnected");
+ }
+ }
+
+ private void notifyNodeFailure(final DiscoveryNode node, final String reason) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ for (Listener listener : listeners) {
+ listener.onNodeFailure(node, reason);
+ }
+ }
+ });
+ }
+
+ private class SendPingRequest implements Runnable {
+
+ private final DiscoveryNode node;
+
+ private SendPingRequest(DiscoveryNode node) {
+ this.node = node;
+ }
+
+ @Override
+ public void run() {
+ if (!running) {
+ return;
+ }
+ transportService.sendRequest(node, PingRequestHandler.ACTION, new PingRequest(node.id()), options().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout),
+ new BaseTransportResponseHandler<PingResponse>() {
+ @Override
+ public PingResponse newInstance() {
+ return new PingResponse();
+ }
+
+ @Override
+ public void handleResponse(PingResponse response) {
+ if (!running) {
+ return;
+ }
+ NodeFD nodeFD = nodesFD.get(node);
+ if (nodeFD != null) {
+ if (!nodeFD.running) {
+ return;
+ }
+ nodeFD.retryCount = 0;
+ threadPool.schedule(pingInterval, ThreadPool.Names.SAME, SendPingRequest.this);
+ }
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ // check if the master node did not get switched on us...
+ if (!running) {
+ return;
+ }
+ if (exp instanceof ConnectTransportException) {
+ // ignore this one, we already handle it by registering a connection listener
+ return;
+ }
+ NodeFD nodeFD = nodesFD.get(node);
+ if (nodeFD != null) {
+ if (!nodeFD.running) {
+ return;
+ }
+ int retryCount = ++nodeFD.retryCount;
+ logger.trace("[node ] failed to ping [{}], retry [{}] out of [{}]", exp, node, retryCount, pingRetryCount);
+ if (retryCount >= pingRetryCount) {
+ logger.debug("[node ] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", node, pingRetryCount, pingRetryTimeout);
+ // not good, failure
+ if (nodesFD.remove(node) != null) {
+ notifyNodeFailure(node, "failed to ping, tried [" + pingRetryCount + "] times, each with maximum [" + pingRetryTimeout + "] timeout");
+ }
+ } else {
+ // resend the request, not reschedule, rely on send timeout
+ transportService.sendRequest(node, PingRequestHandler.ACTION, new PingRequest(node.id()),
+ options().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout), this);
+ }
+ }
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ static class NodeFD {
+ volatile int retryCount;
+ volatile boolean running = true;
+ }
+
+ private class FDConnectionListener implements TransportConnectionListener {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ handleTransportDisconnect(node);
+ }
+ }
+
+
+ class PingRequestHandler extends BaseTransportRequestHandler<PingRequest> {
+
+ public static final String ACTION = "discovery/zen/fd/ping";
+
+ @Override
+ public PingRequest newInstance() {
+ return new PingRequest();
+ }
+
+ @Override
+ public void messageReceived(PingRequest request, TransportChannel channel) throws Exception {
+ // if we are not the node we are supposed to be pinged, send an exception
+ // this can happen when a kill -9 is sent, and another node is started using the same port
+ if (!latestNodes.localNodeId().equals(request.nodeId)) {
+ throw new ElasticsearchIllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + latestNodes.localNodeId() + "]");
+ }
+ channel.sendResponse(new PingResponse());
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+
+ static class PingRequest extends TransportRequest {
+
+ // the (assumed) node id we are pinging
+ private String nodeId;
+
+ PingRequest() {
+ }
+
+ PingRequest(String nodeId) {
+ this.nodeId = nodeId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodeId = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(nodeId);
+ }
+ }
+
+ private static class PingResponse extends TransportResponse {
+
+ private PingResponse() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java
new file mode 100644
index 0000000..824e760
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.membership;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class MembershipAction extends AbstractComponent {
+
+ public static interface MembershipListener {
+ ClusterState onJoin(DiscoveryNode node);
+
+ void onLeave(DiscoveryNode node);
+ }
+
+ private final TransportService transportService;
+
+ private final DiscoveryNodesProvider nodesProvider;
+
+ private final MembershipListener listener;
+
+ public MembershipAction(Settings settings, TransportService transportService, DiscoveryNodesProvider nodesProvider, MembershipListener listener) {
+ super(settings);
+ this.transportService = transportService;
+ this.nodesProvider = nodesProvider;
+ this.listener = listener;
+
+ transportService.registerHandler(JoinRequestRequestHandler.ACTION, new JoinRequestRequestHandler());
+ transportService.registerHandler(ValidateJoinRequestRequestHandler.ACTION, new ValidateJoinRequestRequestHandler());
+ transportService.registerHandler(LeaveRequestRequestHandler.ACTION, new LeaveRequestRequestHandler());
+ }
+
+ public void close() {
+ transportService.removeHandler(JoinRequestRequestHandler.ACTION);
+ transportService.removeHandler(ValidateJoinRequestRequestHandler.ACTION);
+ transportService.removeHandler(LeaveRequestRequestHandler.ACTION);
+ }
+
+ public void sendLeaveRequest(DiscoveryNode masterNode, DiscoveryNode node) {
+ transportService.sendRequest(node, LeaveRequestRequestHandler.ACTION, new LeaveRequest(masterNode), EmptyTransportResponseHandler.INSTANCE_SAME);
+ }
+
+ public void sendLeaveRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) throws ElasticsearchException {
+ transportService.submitRequest(masterNode, LeaveRequestRequestHandler.ACTION, new LeaveRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS);
+ }
+
+ public void sendJoinRequest(DiscoveryNode masterNode, DiscoveryNode node) {
+ transportService.sendRequest(masterNode, JoinRequestRequestHandler.ACTION, new JoinRequest(node, false), EmptyTransportResponseHandler.INSTANCE_SAME);
+ }
+
+ public ClusterState sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) throws ElasticsearchException {
+ return transportService.submitRequest(masterNode, JoinRequestRequestHandler.ACTION, new JoinRequest(node, true), new FutureTransportResponseHandler<JoinResponse>() {
+ @Override
+ public JoinResponse newInstance() {
+ return new JoinResponse();
+ }
+ }).txGet(timeout.millis(), TimeUnit.MILLISECONDS).clusterState;
+ }
+
+ /**
+ * Validates the join request, throwing a failure if it failed.
+ */
+ public void sendValidateJoinRequestBlocking(DiscoveryNode node, ClusterState clusterState, TimeValue timeout) throws ElasticsearchException {
+ transportService.submitRequest(node, ValidateJoinRequestRequestHandler.ACTION, new ValidateJoinRequest(clusterState), EmptyTransportResponseHandler.INSTANCE_SAME)
+ .txGet(timeout.millis(), TimeUnit.MILLISECONDS);
+ }
+
+ static class JoinRequest extends TransportRequest {
+
+ DiscoveryNode node;
+
+ boolean withClusterState;
+
+ private JoinRequest() {
+ }
+
+ private JoinRequest(DiscoveryNode node, boolean withClusterState) {
+ this.node = node;
+ this.withClusterState = withClusterState;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ node = DiscoveryNode.readNode(in);
+ withClusterState = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ node.writeTo(out);
+ out.writeBoolean(withClusterState);
+ }
+ }
+
+ class JoinResponse extends TransportResponse {
+
+ ClusterState clusterState;
+
+ JoinResponse() {
+ }
+
+ JoinResponse(ClusterState clusterState) {
+ this.clusterState = clusterState;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ ClusterState.Builder.writeTo(clusterState, out);
+ }
+ }
+
+ private class JoinRequestRequestHandler extends BaseTransportRequestHandler<JoinRequest> {
+
+ static final String ACTION = "discovery/zen/join";
+
+ @Override
+ public JoinRequest newInstance() {
+ return new JoinRequest();
+ }
+
+ @Override
+ public void messageReceived(JoinRequest request, TransportChannel channel) throws Exception {
+ ClusterState clusterState = listener.onJoin(request.node);
+ if (request.withClusterState) {
+ channel.sendResponse(new JoinResponse(clusterState));
+ } else {
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+ }
+
+ class ValidateJoinRequest extends TransportRequest {
+
+ ClusterState clusterState;
+
+ ValidateJoinRequest() {
+ }
+
+ ValidateJoinRequest(ClusterState clusterState) {
+ this.clusterState = clusterState;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ ClusterState.Builder.writeTo(clusterState, out);
+ }
+ }
+
+ private class ValidateJoinRequestRequestHandler extends BaseTransportRequestHandler<ValidateJoinRequest> {
+
+ static final String ACTION = "discovery/zen/join/validate";
+
+ @Override
+ public ValidateJoinRequest newInstance() {
+ return new ValidateJoinRequest();
+ }
+
+ @Override
+ public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception {
+ // for now, the mere fact that we can serialize the cluster state acts as validation....
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+ }
+
+ static class LeaveRequest extends TransportRequest {
+
+ private DiscoveryNode node;
+
+ private LeaveRequest() {
+ }
+
+ private LeaveRequest(DiscoveryNode node) {
+ this.node = node;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ node = DiscoveryNode.readNode(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ node.writeTo(out);
+ }
+ }
+
+ private class LeaveRequestRequestHandler extends BaseTransportRequestHandler<LeaveRequest> {
+
+ static final String ACTION = "discovery/zen/leave";
+
+ @Override
+ public LeaveRequest newInstance() {
+ return new LeaveRequest();
+ }
+
+ @Override
+ public void messageReceived(LeaveRequest request, TransportChannel channel) throws Exception {
+ listener.onLeave(request.node);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java
new file mode 100644
index 0000000..8483175
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.ClusterName.readClusterName;
+import static org.elasticsearch.cluster.node.DiscoveryNode.readNode;
+
+/**
+ *
+ */
+public interface ZenPing extends LifecycleComponent<ZenPing> {
+
+ void setNodesProvider(DiscoveryNodesProvider nodesProvider);
+
+ void ping(PingListener listener, TimeValue timeout) throws ElasticsearchException;
+
+ public interface PingListener {
+
+ void onPing(PingResponse[] pings);
+ }
+
+ public static class PingResponse implements Streamable {
+
+ public static PingResponse[] EMPTY = new PingResponse[0];
+
+ private ClusterName clusterName;
+
+ private DiscoveryNode target;
+
+ private DiscoveryNode master;
+
+ private PingResponse() {
+ }
+
+ public PingResponse(DiscoveryNode target, DiscoveryNode master, ClusterName clusterName) {
+ this.target = target;
+ this.master = master;
+ this.clusterName = clusterName;
+ }
+
+ public ClusterName clusterName() {
+ return this.clusterName;
+ }
+
+ public DiscoveryNode target() {
+ return target;
+ }
+
+ public DiscoveryNode master() {
+ return master;
+ }
+
+ public static PingResponse readPingResponse(StreamInput in) throws IOException {
+ PingResponse response = new PingResponse();
+ response.readFrom(in);
+ return response;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ clusterName = readClusterName(in);
+ target = readNode(in);
+ if (in.readBoolean()) {
+ master = readNode(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ clusterName.writeTo(out);
+ target.writeTo(out);
+ if (master == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ master.writeTo(out);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "ping_response{target [" + target + "], master [" + master + "], cluster_name[" + clusterName.value() + "]}";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingException.java b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingException.java
new file mode 100644
index 0000000..02d87cc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping;
+
+import org.elasticsearch.discovery.DiscoveryException;
+
+/**
+ *
+ */
+public class ZenPingException extends DiscoveryException {
+
+ public ZenPingException(String message) {
+ super(message);
+ }
+
+ public ZenPingException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java
new file mode 100644
index 0000000..c3e6741
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.discovery.zen.ping.multicast.MulticastZenPing;
+import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
+import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ *
+ */
+public class ZenPingService extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
+
+ private volatile ImmutableList<? extends ZenPing> zenPings = ImmutableList.of();
+
+ // here for backward comp. with discovery plugins
+ public ZenPingService(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, NetworkService networkService,
+ @Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
+ this(settings, threadPool, transportService, clusterName, networkService, Version.CURRENT, unicastHostsProviders);
+ }
+
+ @Inject
+ public ZenPingService(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, NetworkService networkService,
+ Version version, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
+ super(settings);
+ ImmutableList.Builder<ZenPing> zenPingsBuilder = ImmutableList.builder();
+ if (componentSettings.getAsBoolean("multicast.enabled", true)) {
+ zenPingsBuilder.add(new MulticastZenPing(settings, threadPool, transportService, clusterName, networkService, version));
+ }
+ // always add the unicast hosts, so it will be able to receive unicast requests even when working in multicast
+ zenPingsBuilder.add(new UnicastZenPing(settings, threadPool, transportService, clusterName, version, unicastHostsProviders));
+
+ this.zenPings = zenPingsBuilder.build();
+ }
+
+ public ImmutableList<? extends ZenPing> zenPings() {
+ return this.zenPings;
+ }
+
+ public void zenPings(ImmutableList<? extends ZenPing> pings) {
+ this.zenPings = pings;
+ if (lifecycle.started()) {
+ for (ZenPing zenPing : zenPings) {
+ zenPing.start();
+ }
+ } else if (lifecycle.stopped()) {
+ for (ZenPing zenPing : zenPings) {
+ zenPing.stop();
+ }
+ }
+ }
+
+ @Override
+ public void setNodesProvider(DiscoveryNodesProvider nodesProvider) {
+ if (lifecycle.started()) {
+ throw new ElasticsearchIllegalStateException("Can't set nodes provider when started");
+ }
+ for (ZenPing zenPing : zenPings) {
+ zenPing.setNodesProvider(nodesProvider);
+ }
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ for (ZenPing zenPing : zenPings) {
+ zenPing.start();
+ }
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ for (ZenPing zenPing : zenPings) {
+ zenPing.stop();
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ for (ZenPing zenPing : zenPings) {
+ zenPing.close();
+ }
+ }
+
+ public PingResponse[] pingAndWait(TimeValue timeout) {
+ final AtomicReference<PingResponse[]> response = new AtomicReference<PingResponse[]>();
+ final CountDownLatch latch = new CountDownLatch(1);
+ ping(new PingListener() {
+ @Override
+ public void onPing(PingResponse[] pings) {
+ response.set(pings);
+ latch.countDown();
+ }
+ }, timeout);
+ try {
+ latch.await();
+ return response.get();
+ } catch (InterruptedException e) {
+ logger.trace("pingAndWait interrupted");
+ return null;
+ }
+ }
+
+ @Override
+ public void ping(PingListener listener, TimeValue timeout) throws ElasticsearchException {
+ ImmutableList<? extends ZenPing> zenPings = this.zenPings;
+ CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings);
+ for (ZenPing zenPing : zenPings) {
+ try {
+ zenPing.ping(compoundPingListener, timeout);
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Ping execution rejected", ex);
+ compoundPingListener.onPing(null);
+ }
+ }
+ }
+
+ private static class CompoundPingListener implements PingListener {
+
+ private final PingListener listener;
+
+ private final AtomicInteger counter;
+
+ private ConcurrentMap<DiscoveryNode, PingResponse> responses = ConcurrentCollections.newConcurrentMap();
+
+ private CompoundPingListener(PingListener listener, ImmutableList<? extends ZenPing> zenPings) {
+ this.listener = listener;
+ this.counter = new AtomicInteger(zenPings.size());
+ }
+
+ @Override
+ public void onPing(PingResponse[] pings) {
+ if (pings != null) {
+ for (PingResponse pingResponse : pings) {
+ responses.put(pingResponse.target(), pingResponse);
+ }
+ }
+ if (counter.decrementAndGet() == 0) {
+ listener.onPing(responses.values().toArray(new PingResponse[responses.size()]));
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java
new file mode 100644
index 0000000..e9fe57a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java
@@ -0,0 +1,568 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping.multicast;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.io.stream.*;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.net.*;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.cluster.node.DiscoveryNode.readNode;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
+import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
+
+/**
+ *
+ */
+public class MulticastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
+
+ private static final byte[] INTERNAL_HEADER = new byte[]{1, 9, 8, 4};
+
+ private final String address;
+ private final int port;
+ private final String group;
+ private final int bufferSize;
+ private final int ttl;
+
+ private final ThreadPool threadPool;
+ private final TransportService transportService;
+ private final ClusterName clusterName;
+ private final NetworkService networkService;
+ private final Version version;
+ private volatile DiscoveryNodesProvider nodesProvider;
+
+ private final boolean pingEnabled;
+
+ private volatile Receiver receiver;
+ private volatile Thread receiverThread;
+ private volatile MulticastSocket multicastSocket;
+ private DatagramPacket datagramPacketSend;
+ private DatagramPacket datagramPacketReceive;
+
+ private final AtomicInteger pingIdGenerator = new AtomicInteger();
+ private final Map<Integer, ConcurrentMap<DiscoveryNode, PingResponse>> receivedResponses = newConcurrentMap();
+
+ private final Object sendMutex = new Object();
+ private final Object receiveMutex = new Object();
+
+ public MulticastZenPing(ThreadPool threadPool, TransportService transportService, ClusterName clusterName, Version version) {
+ this(EMPTY_SETTINGS, threadPool, transportService, clusterName, new NetworkService(EMPTY_SETTINGS), version);
+ }
+
+ public MulticastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, NetworkService networkService, Version version) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.transportService = transportService;
+ this.clusterName = clusterName;
+ this.networkService = networkService;
+ this.version = version;
+
+ this.address = componentSettings.get("address");
+ this.port = componentSettings.getAsInt("port", 54328);
+ this.group = componentSettings.get("group", "224.2.2.4");
+ this.bufferSize = componentSettings.getAsInt("buffer_size", 2048);
+ this.ttl = componentSettings.getAsInt("ttl", 3);
+
+ this.pingEnabled = componentSettings.getAsBoolean("ping.enabled", true);
+
+ logger.debug("using group [{}], with port [{}], ttl [{}], and address [{}]", group, port, ttl, address);
+
+ this.transportService.registerHandler(MulticastPingResponseRequestHandler.ACTION, new MulticastPingResponseRequestHandler());
+ }
+
+ @Override
+ public void setNodesProvider(DiscoveryNodesProvider nodesProvider) {
+ if (lifecycle.started()) {
+ throw new ElasticsearchIllegalStateException("Can't set nodes provider when started");
+ }
+ this.nodesProvider = nodesProvider;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ try {
+ this.datagramPacketReceive = new DatagramPacket(new byte[bufferSize], bufferSize);
+ this.datagramPacketSend = new DatagramPacket(new byte[bufferSize], bufferSize, InetAddress.getByName(group), port);
+ } catch (Exception e) {
+ logger.warn("disabled, failed to setup multicast (datagram) discovery : {}", e.getMessage());
+ if (logger.isDebugEnabled()) {
+ logger.debug("disabled, failed to setup multicast (datagram) discovery", e);
+ }
+ return;
+ }
+
+ InetAddress multicastInterface = null;
+ try {
+ MulticastSocket multicastSocket;
+// if (NetworkUtils.canBindToMcastAddress()) {
+// try {
+// multicastSocket = new MulticastSocket(new InetSocketAddress(group, port));
+// } catch (Exception e) {
+// logger.debug("Failed to create multicast socket by binding to group address, binding to port", e);
+// multicastSocket = new MulticastSocket(port);
+// }
+// } else {
+ multicastSocket = new MulticastSocket(port);
+// }
+
+ multicastSocket.setTimeToLive(ttl);
+
+ // set the send interface
+ multicastInterface = networkService.resolvePublishHostAddress(address);
+ multicastSocket.setInterface(multicastInterface);
+ multicastSocket.joinGroup(InetAddress.getByName(group));
+
+ multicastSocket.setReceiveBufferSize(bufferSize);
+ multicastSocket.setSendBufferSize(bufferSize);
+ multicastSocket.setSoTimeout(60000);
+
+ this.multicastSocket = multicastSocket;
+
+ this.receiver = new Receiver();
+ this.receiverThread = daemonThreadFactory(settings, "discovery#multicast#receiver").newThread(receiver);
+ this.receiverThread.start();
+ } catch (Exception e) {
+ datagramPacketReceive = null;
+ datagramPacketSend = null;
+ if (multicastSocket != null) {
+ multicastSocket.close();
+ multicastSocket = null;
+ }
+ logger.warn("disabled, failed to setup multicast discovery on port [{}], [{}]: {}", port, multicastInterface, e.getMessage());
+ if (logger.isDebugEnabled()) {
+ logger.debug("disabled, failed to setup multicast discovery on {}", e, multicastInterface);
+ }
+ }
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ if (receiver != null) {
+ receiver.stop();
+ }
+ if (receiverThread != null) {
+ receiverThread.interrupt();
+ }
+ if (multicastSocket != null) {
+ multicastSocket.close();
+ multicastSocket = null;
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ public PingResponse[] pingAndWait(TimeValue timeout) {
+ final AtomicReference<PingResponse[]> response = new AtomicReference<PingResponse[]>();
+ final CountDownLatch latch = new CountDownLatch(1);
+ try {
+ ping(new PingListener() {
+ @Override
+ public void onPing(PingResponse[] pings) {
+ response.set(pings);
+ latch.countDown();
+ }
+ }, timeout);
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Ping execution rejected", ex);
+ return PingResponse.EMPTY;
+ }
+ try {
+ latch.await();
+ return response.get();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ return PingResponse.EMPTY;
+ }
+ }
+
+ @Override
+ public void ping(final PingListener listener, final TimeValue timeout) {
+ if (!pingEnabled) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ listener.onPing(PingResponse.EMPTY);
+ }
+ });
+ return;
+ }
+ final int id = pingIdGenerator.incrementAndGet();
+ receivedResponses.put(id, ConcurrentCollections.<DiscoveryNode, PingResponse>newConcurrentMap());
+ sendPingRequest(id);
+ // try and send another ping request halfway through (just in case someone woke up during it...)
+ // this can be a good trade-off to nailing the initial lookup or un-delivered messages
+ threadPool.schedule(TimeValue.timeValueMillis(timeout.millis() / 2), ThreadPool.Names.GENERIC, new Runnable() {
+ @Override
+ public void run() {
+ try {
+ sendPingRequest(id);
+ } catch (Exception e) {
+ logger.warn("[{}] failed to send second ping request", e, id);
+ }
+ }
+ });
+ threadPool.schedule(timeout, ThreadPool.Names.GENERIC, new Runnable() {
+ @Override
+ public void run() {
+ ConcurrentMap<DiscoveryNode, PingResponse> responses = receivedResponses.remove(id);
+ listener.onPing(responses.values().toArray(new PingResponse[responses.size()]));
+ }
+ });
+ }
+
+ private void sendPingRequest(int id) {
+ if (multicastSocket == null) {
+ return;
+ }
+ synchronized (sendMutex) {
+ try {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput out = new HandlesStreamOutput(bStream);
+ out.writeBytes(INTERNAL_HEADER);
+ Version.writeVersion(version, out);
+ out.writeInt(id);
+ clusterName.writeTo(out);
+ nodesProvider.nodes().localNode().writeTo(out);
+ out.close();
+ datagramPacketSend.setData(bStream.bytes().toBytes());
+ multicastSocket.send(datagramPacketSend);
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] sending ping request", id);
+ }
+ } catch (Exception e) {
+ if (lifecycle.stoppedOrClosed()) {
+ return;
+ }
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to send multicast ping request", e);
+ } else {
+ logger.warn("failed to send multicast ping request: {}", ExceptionsHelper.detailedMessage(e));
+ }
+ }
+ }
+ }
+
+ class MulticastPingResponseRequestHandler extends BaseTransportRequestHandler<MulticastPingResponse> {
+
+ static final String ACTION = "discovery/zen/multicast";
+
+ @Override
+ public MulticastPingResponse newInstance() {
+ return new MulticastPingResponse();
+ }
+
+ @Override
+ public void messageReceived(MulticastPingResponse request, TransportChannel channel) throws Exception {
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] received {}", request.id, request.pingResponse);
+ }
+ ConcurrentMap<DiscoveryNode, PingResponse> responses = receivedResponses.get(request.id);
+ if (responses == null) {
+ logger.warn("received ping response {} with no matching id [{}]", request.pingResponse, request.id);
+ } else {
+ responses.put(request.pingResponse.target(), request.pingResponse);
+ }
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ static class MulticastPingResponse extends TransportRequest {
+
+ int id;
+
+ PingResponse pingResponse;
+
+ MulticastPingResponse() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readInt();
+ pingResponse = PingResponse.readPingResponse(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(id);
+ pingResponse.writeTo(out);
+ }
+ }
+
+
+ private class Receiver implements Runnable {
+
+ private volatile boolean running = true;
+
+ public void stop() {
+ running = false;
+ }
+
+ @Override
+ public void run() {
+ while (running) {
+ try {
+ int id = -1;
+ DiscoveryNode requestingNodeX = null;
+ ClusterName clusterName = null;
+
+ Map<String, Object> externalPingData = null;
+ XContentType xContentType = null;
+
+ synchronized (receiveMutex) {
+ try {
+ multicastSocket.receive(datagramPacketReceive);
+ } catch (SocketTimeoutException ignore) {
+ continue;
+ } catch (Exception e) {
+ if (running) {
+ if (multicastSocket.isClosed()) {
+ logger.warn("multicast socket closed while running, restarting...");
+ // for some reason, the socket got closed on us while we are still running
+ // make a best effort in trying to start the multicast socket again...
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ MulticastZenPing.this.stop();
+ MulticastZenPing.this.start();
+ }
+ });
+ running = false;
+ return;
+ } else {
+ logger.warn("failed to receive packet, throttling...", e);
+ Thread.sleep(500);
+ }
+ }
+ continue;
+ }
+ try {
+ boolean internal = false;
+ if (datagramPacketReceive.getLength() > 4) {
+ int counter = 0;
+ for (; counter < INTERNAL_HEADER.length; counter++) {
+ if (datagramPacketReceive.getData()[datagramPacketReceive.getOffset() + counter] != INTERNAL_HEADER[counter]) {
+ break;
+ }
+ }
+ if (counter == INTERNAL_HEADER.length) {
+ internal = true;
+ }
+ }
+ if (internal) {
+ StreamInput input = CachedStreamInput.cachedHandles(new BytesStreamInput(datagramPacketReceive.getData(), datagramPacketReceive.getOffset() + INTERNAL_HEADER.length, datagramPacketReceive.getLength(), true));
+ Version version = Version.readVersion(input);
+ input.setVersion(version);
+ id = input.readInt();
+ clusterName = ClusterName.readClusterName(input);
+ requestingNodeX = readNode(input);
+ } else {
+ xContentType = XContentFactory.xContentType(datagramPacketReceive.getData(), datagramPacketReceive.getOffset(), datagramPacketReceive.getLength());
+ if (xContentType != null) {
+ // an external ping
+ externalPingData = XContentFactory.xContent(xContentType)
+ .createParser(datagramPacketReceive.getData(), datagramPacketReceive.getOffset(), datagramPacketReceive.getLength())
+ .mapAndClose();
+ } else {
+ throw new ElasticsearchIllegalStateException("failed multicast message, probably message from previous version");
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("failed to read requesting data from {}", e, datagramPacketReceive.getSocketAddress());
+ continue;
+ }
+ }
+ if (externalPingData != null) {
+ handleExternalPingRequest(externalPingData, xContentType, datagramPacketReceive.getSocketAddress());
+ } else {
+ handleNodePingRequest(id, requestingNodeX, clusterName);
+ }
+ } catch (Exception e) {
+ if (running) {
+ logger.warn("unexpected exception in multicast receiver", e);
+ }
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private void handleExternalPingRequest(Map<String, Object> externalPingData, XContentType contentType, SocketAddress remoteAddress) {
+ if (externalPingData.containsKey("response")) {
+ // ignoring responses sent over the multicast channel
+ logger.trace("got an external ping response (ignoring) from {}, content {}", remoteAddress, externalPingData);
+ return;
+ }
+
+ if (multicastSocket == null) {
+ logger.debug("can't send ping response, no socket, from {}, content {}", remoteAddress, externalPingData);
+ return;
+ }
+
+ Map<String, Object> request = (Map<String, Object>) externalPingData.get("request");
+ if (request == null) {
+ logger.warn("malformed external ping request, no 'request' element from {}, content {}", remoteAddress, externalPingData);
+ return;
+ }
+
+ String clusterName = request.containsKey("cluster_name") ? request.get("cluster_name").toString() : request.containsKey("clusterName") ? request.get("clusterName").toString() : null;
+ if (clusterName == null) {
+ logger.warn("malformed external ping request, missing 'cluster_name' element within request, from {}, content {}", remoteAddress, externalPingData);
+ return;
+ }
+
+ if (!clusterName.equals(MulticastZenPing.this.clusterName.value())) {
+ logger.trace("got request for cluster_name {}, but our cluster_name is {}, from {}, content {}", clusterName, MulticastZenPing.this.clusterName.value(), remoteAddress, externalPingData);
+ return;
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("got external ping request from {}, content {}", remoteAddress, externalPingData);
+ }
+
+ try {
+ DiscoveryNode localNode = nodesProvider.nodes().localNode();
+
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ builder.startObject().startObject("response");
+ builder.field("cluster_name", MulticastZenPing.this.clusterName.value());
+ builder.startObject("version").field("number", version.number()).field("snapshot_build", version.snapshot).endObject();
+ builder.field("transport_address", localNode.address().toString());
+
+ if (nodesProvider.nodeService() != null) {
+ for (Map.Entry<String, String> attr : nodesProvider.nodeService().attributes().entrySet()) {
+ builder.field(attr.getKey(), attr.getValue());
+ }
+ }
+
+ builder.startObject("attributes");
+ for (Map.Entry<String, String> attr : localNode.attributes().entrySet()) {
+ builder.field(attr.getKey(), attr.getValue());
+ }
+ builder.endObject();
+
+ builder.endObject().endObject();
+ synchronized (sendMutex) {
+ BytesReference bytes = builder.bytes();
+ datagramPacketSend.setData(bytes.array(), bytes.arrayOffset(), bytes.length());
+ multicastSocket.send(datagramPacketSend);
+ if (logger.isTraceEnabled()) {
+ logger.trace("sending external ping response {}", builder.string());
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("failed to send external multicast response", e);
+ }
+ }
+
+ private void handleNodePingRequest(int id, DiscoveryNode requestingNodeX, ClusterName clusterName) {
+ if (!pingEnabled) {
+ return;
+ }
+ DiscoveryNodes discoveryNodes = nodesProvider.nodes();
+ final DiscoveryNode requestingNode = requestingNodeX;
+ if (requestingNode.id().equals(discoveryNodes.localNodeId())) {
+ // that's me, ignore
+ return;
+ }
+ if (!clusterName.equals(MulticastZenPing.this.clusterName)) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] received ping_request from [{}], but wrong cluster_name [{}], expected [{}], ignoring", id, requestingNode, clusterName, MulticastZenPing.this.clusterName);
+ }
+ return;
+ }
+ // don't connect between two client nodes, no need for that...
+ if (!discoveryNodes.localNode().shouldConnectTo(requestingNode)) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] received ping_request from [{}], both are client nodes, ignoring", id, requestingNode, clusterName);
+ }
+ return;
+ }
+ final MulticastPingResponse multicastPingResponse = new MulticastPingResponse();
+ multicastPingResponse.id = id;
+ multicastPingResponse.pingResponse = new PingResponse(discoveryNodes.localNode(), discoveryNodes.masterNode(), clusterName);
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] received ping_request from [{}], sending {}", id, requestingNode, multicastPingResponse.pingResponse);
+ }
+
+ if (!transportService.nodeConnected(requestingNode)) {
+ // do the connect and send on a thread pool
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ // connect to the node if possible
+ try {
+ transportService.connectToNode(requestingNode);
+ transportService.sendRequest(requestingNode, MulticastPingResponseRequestHandler.ACTION, multicastPingResponse, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("failed to receive confirmation on sent ping response to [{}]", exp, requestingNode);
+ }
+ });
+ } catch (Exception e) {
+ logger.warn("failed to connect to requesting node {}", e, requestingNode);
+ }
+ }
+ });
+ } else {
+ transportService.sendRequest(requestingNode, MulticastPingResponseRequestHandler.ACTION, multicastPingResponse, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("failed to receive confirmation on sent ping response to [{}]", exp, requestingNode);
+ }
+ });
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastHostsProvider.java b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastHostsProvider.java
new file mode 100644
index 0000000..dbfaed5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastHostsProvider.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping.unicast;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+import java.util.List;
+
+/**
+ * A pluggable provider of the list of unicast hosts to use for unicast discovery.
+ */
+public interface UnicastHostsProvider {
+
+ /**
+ * Builds the dynamic list of unicast hosts to be used for unicast discovery.
+ */
+ List<DiscoveryNode> buildDynamicNodes();
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java
new file mode 100644
index 0000000..91e8034
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java
@@ -0,0 +1,484 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping.unicast;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
+import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
+import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPingResponse;
+
+/**
+ *
+ */
+public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
+
+ public static final int LIMIT_PORTS_COUNT = 1;
+
+ private final ThreadPool threadPool;
+ private final TransportService transportService;
+ private final ClusterName clusterName;
+ private final Version version;
+
+ private final int concurrentConnects;
+
+ private final DiscoveryNode[] nodes;
+
+ private volatile DiscoveryNodesProvider nodesProvider;
+
+ private final AtomicInteger pingIdGenerator = new AtomicInteger();
+
+ private final Map<Integer, ConcurrentMap<DiscoveryNode, PingResponse>> receivedResponses = newConcurrentMap();
+
+ // a list of temporal responses a node will return for a request (holds requests from other nodes)
+ private final Queue<PingResponse> temporalResponses = ConcurrentCollections.newQueue();
+
+ private final CopyOnWriteArrayList<UnicastHostsProvider> hostsProviders = new CopyOnWriteArrayList<UnicastHostsProvider>();
+
+ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, Version version, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.transportService = transportService;
+ this.clusterName = clusterName;
+ this.version = version;
+
+ if (unicastHostsProviders != null) {
+ for (UnicastHostsProvider unicastHostsProvider : unicastHostsProviders) {
+ addHostsProvider(unicastHostsProvider);
+ }
+ }
+
+ this.concurrentConnects = componentSettings.getAsInt("concurrent_connects", 10);
+ String[] hostArr = componentSettings.getAsArray("hosts");
+ // trim the hosts
+ for (int i = 0; i < hostArr.length; i++) {
+ hostArr[i] = hostArr[i].trim();
+ }
+ List<String> hosts = Lists.newArrayList(hostArr);
+ logger.debug("using initial hosts {}, with concurrent_connects [{}]", hosts, concurrentConnects);
+
+ List<DiscoveryNode> nodes = Lists.newArrayList();
+ int idCounter = 0;
+ for (String host : hosts) {
+ try {
+ TransportAddress[] addresses = transportService.addressesFromString(host);
+ // we only limit to 1 addresses, makes no sense to ping 100 ports
+ for (int i = 0; (i < addresses.length && i < LIMIT_PORTS_COUNT); i++) {
+ nodes.add(new DiscoveryNode("#zen_unicast_" + (++idCounter) + "#", addresses[i], version));
+ }
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to resolve address for [" + host + "]", e);
+ }
+ }
+ this.nodes = nodes.toArray(new DiscoveryNode[nodes.size()]);
+
+ transportService.registerHandler(UnicastPingRequestHandler.ACTION, new UnicastPingRequestHandler());
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ transportService.removeHandler(UnicastPingRequestHandler.ACTION);
+ }
+
+ public void addHostsProvider(UnicastHostsProvider provider) {
+ hostsProviders.add(provider);
+ }
+
+ public void removeHostsProvider(UnicastHostsProvider provider) {
+ hostsProviders.remove(provider);
+ }
+
+ @Override
+ public void setNodesProvider(DiscoveryNodesProvider nodesProvider) {
+ this.nodesProvider = nodesProvider;
+ }
+
+ public PingResponse[] pingAndWait(TimeValue timeout) {
+ final AtomicReference<PingResponse[]> response = new AtomicReference<PingResponse[]>();
+ final CountDownLatch latch = new CountDownLatch(1);
+ ping(new PingListener() {
+ @Override
+ public void onPing(PingResponse[] pings) {
+ response.set(pings);
+ latch.countDown();
+ }
+ }, timeout);
+ try {
+ latch.await();
+ return response.get();
+ } catch (InterruptedException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public void ping(final PingListener listener, final TimeValue timeout) throws ElasticsearchException {
+ final SendPingsHandler sendPingsHandler = new SendPingsHandler(pingIdGenerator.incrementAndGet());
+ receivedResponses.put(sendPingsHandler.id(), ConcurrentCollections.<DiscoveryNode, PingResponse>newConcurrentMap());
+ sendPings(timeout, null, sendPingsHandler);
+ threadPool.schedule(TimeValue.timeValueMillis(timeout.millis() / 2), ThreadPool.Names.GENERIC, new Runnable() {
+ @Override
+ public void run() {
+ try {
+ sendPings(timeout, null, sendPingsHandler);
+ threadPool.schedule(TimeValue.timeValueMillis(timeout.millis() / 2), ThreadPool.Names.GENERIC, new Runnable() {
+ @Override
+ public void run() {
+ try {
+ sendPings(timeout, TimeValue.timeValueMillis(timeout.millis() / 2), sendPingsHandler);
+ ConcurrentMap<DiscoveryNode, PingResponse> responses = receivedResponses.remove(sendPingsHandler.id());
+ sendPingsHandler.close();
+ for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) {
+ logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node);
+ transportService.disconnectFromNode(node);
+ }
+ listener.onPing(responses.values().toArray(new PingResponse[responses.size()]));
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Ping execution rejected", ex);
+ }
+ }
+ });
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Ping execution rejected", ex);
+ }
+ }
+ });
+ }
+
+ class SendPingsHandler {
+ private final int id;
+ private volatile ExecutorService executor;
+ private final Set<DiscoveryNode> nodeToDisconnect = ConcurrentCollections.newConcurrentSet();
+ private volatile boolean closed;
+
+ SendPingsHandler(int id) {
+ this.id = id;
+ }
+
+ public int id() {
+ return this.id;
+ }
+
+ public boolean isClosed() {
+ return this.closed;
+ }
+
+ public Executor executor() {
+ if (executor == null) {
+ ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]");
+ executor = EsExecutors.newScaling(0, concurrentConnects, 60, TimeUnit.SECONDS, threadFactory);
+ }
+ return executor;
+ }
+
+ public void close() {
+ closed = true;
+ if (executor != null) {
+ executor.shutdownNow();
+ executor = null;
+ }
+ }
+ }
+
+ void sendPings(final TimeValue timeout, @Nullable TimeValue waitTime, final SendPingsHandler sendPingsHandler) {
+ final UnicastPingRequest pingRequest = new UnicastPingRequest();
+ pingRequest.id = sendPingsHandler.id();
+ pingRequest.timeout = timeout;
+ DiscoveryNodes discoNodes = nodesProvider.nodes();
+ pingRequest.pingResponse = new PingResponse(discoNodes.localNode(), discoNodes.masterNode(), clusterName);
+
+ List<DiscoveryNode> nodesToPing = newArrayList(nodes);
+ for (UnicastHostsProvider provider : hostsProviders) {
+ nodesToPing.addAll(provider.buildDynamicNodes());
+ }
+
+ final CountDownLatch latch = new CountDownLatch(nodesToPing.size());
+ for (final DiscoveryNode node : nodesToPing) {
+ // make sure we are connected
+ boolean nodeFoundByAddressX;
+ DiscoveryNode nodeToSendX = discoNodes.findByAddress(node.address());
+ if (nodeToSendX != null) {
+ nodeFoundByAddressX = true;
+ } else {
+ nodeToSendX = node;
+ nodeFoundByAddressX = false;
+ }
+ final DiscoveryNode nodeToSend = nodeToSendX;
+
+ final boolean nodeFoundByAddress = nodeFoundByAddressX;
+ if (!transportService.nodeConnected(nodeToSend)) {
+ if (sendPingsHandler.isClosed()) {
+ return;
+ }
+ sendPingsHandler.nodeToDisconnect.add(nodeToSend);
+ // fork the connection to another thread
+ sendPingsHandler.executor().execute(new Runnable() {
+ @Override
+ public void run() {
+ if (sendPingsHandler.isClosed()) {
+ return;
+ }
+ boolean success = false;
+ try {
+ // connect to the node, see if we manage to do it, if not, bail
+ if (!nodeFoundByAddress) {
+ logger.trace("[{}] connecting (light) to {}", sendPingsHandler.id(), nodeToSend);
+ transportService.connectToNodeLight(nodeToSend);
+ } else {
+ logger.trace("[{}] connecting to {}", sendPingsHandler.id(), nodeToSend);
+ transportService.connectToNode(nodeToSend);
+ }
+ logger.trace("[{}] connected to {}", sendPingsHandler.id(), node);
+ if (receivedResponses.containsKey(sendPingsHandler.id())) {
+ // we are connected and still in progress, send the ping request
+ sendPingRequestToNode(sendPingsHandler.id(), timeout, pingRequest, latch, node, nodeToSend);
+ } else {
+ // connect took too long, just log it and bail
+ latch.countDown();
+ logger.trace("[{}] connect to {} was too long outside of ping window, bailing", sendPingsHandler.id(), node);
+ }
+ success = true;
+ } catch (ConnectTransportException e) {
+ // can't connect to the node - this is a more common path!
+ logger.trace("[{}] failed to connect to {}", e, sendPingsHandler.id(), nodeToSend);
+ } catch (Throwable e) {
+ logger.warn("[{}] failed send ping to {}", e, sendPingsHandler.id(), nodeToSend);
+ } finally {
+ if (!success) {
+ latch.countDown();
+ }
+ }
+ }
+ });
+ } else {
+ sendPingRequestToNode(sendPingsHandler.id(), timeout, pingRequest, latch, node, nodeToSend);
+ }
+ }
+ if (waitTime != null) {
+ try {
+ latch.await(waitTime.millis(), TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ }
+
+ private void sendPingRequestToNode(final int id, TimeValue timeout, UnicastPingRequest pingRequest, final CountDownLatch latch, final DiscoveryNode node, final DiscoveryNode nodeToSend) {
+ logger.trace("[{}] sending to {}", id, nodeToSend);
+ transportService.sendRequest(nodeToSend, UnicastPingRequestHandler.ACTION, pingRequest, TransportRequestOptions.options().withTimeout((long) (timeout.millis() * 1.25)), new BaseTransportResponseHandler<UnicastPingResponse>() {
+
+ @Override
+ public UnicastPingResponse newInstance() {
+ return new UnicastPingResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(UnicastPingResponse response) {
+ logger.trace("[{}] received response from {}: {}", id, nodeToSend, Arrays.toString(response.pingResponses));
+ try {
+ DiscoveryNodes discoveryNodes = nodesProvider.nodes();
+ for (PingResponse pingResponse : response.pingResponses) {
+ if (pingResponse.target().id().equals(discoveryNodes.localNodeId())) {
+ // that's us, ignore
+ continue;
+ }
+ if (!pingResponse.clusterName().equals(clusterName)) {
+ // not part of the cluster
+ logger.debug("[{}] filtering out response from {}, not same cluster_name [{}]", id, pingResponse.target(), pingResponse.clusterName().value());
+ continue;
+ }
+ ConcurrentMap<DiscoveryNode, PingResponse> responses = receivedResponses.get(response.id);
+ if (responses == null) {
+ logger.warn("received ping response {} with no matching id [{}]", pingResponse, response.id);
+ } else {
+ PingResponse existingResponse = responses.get(pingResponse.target());
+ if (existingResponse == null) {
+ responses.put(pingResponse.target(), pingResponse);
+ } else {
+ // try and merge the best ping response for it, i.e. if the new one
+ // doesn't have the master node set, and the existing one does, then
+ // the existing one is better, so we keep it
+ if (pingResponse.master() != null) {
+ responses.put(pingResponse.target(), pingResponse);
+ }
+ }
+ }
+ }
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ latch.countDown();
+ if (exp instanceof ConnectTransportException) {
+ // ok, not connected...
+ logger.trace("failed to connect to {}", exp, nodeToSend);
+ } else {
+ logger.warn("failed to send ping to [{}]", exp, node);
+ }
+ }
+ });
+ }
+
+ private UnicastPingResponse handlePingRequest(final UnicastPingRequest request) {
+ if (lifecycle.stoppedOrClosed()) {
+ throw new ElasticsearchIllegalStateException("received ping request while stopped/closed");
+ }
+ temporalResponses.add(request.pingResponse);
+ threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, new Runnable() {
+ @Override
+ public void run() {
+ temporalResponses.remove(request.pingResponse);
+ }
+ });
+
+ List<PingResponse> pingResponses = newArrayList(temporalResponses);
+ DiscoveryNodes discoNodes = nodesProvider.nodes();
+ pingResponses.add(new PingResponse(discoNodes.localNode(), discoNodes.masterNode(), clusterName));
+
+
+ UnicastPingResponse unicastPingResponse = new UnicastPingResponse();
+ unicastPingResponse.id = request.id;
+ unicastPingResponse.pingResponses = pingResponses.toArray(new PingResponse[pingResponses.size()]);
+
+ return unicastPingResponse;
+ }
+
+ class UnicastPingRequestHandler extends BaseTransportRequestHandler<UnicastPingRequest> {
+
+ static final String ACTION = "discovery/zen/unicast";
+
+ @Override
+ public UnicastPingRequest newInstance() {
+ return new UnicastPingRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(UnicastPingRequest request, TransportChannel channel) throws Exception {
+ channel.sendResponse(handlePingRequest(request));
+ }
+ }
+
+ static class UnicastPingRequest extends TransportRequest {
+
+ int id;
+
+ TimeValue timeout;
+
+ PingResponse pingResponse;
+
+ UnicastPingRequest() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readInt();
+ timeout = readTimeValue(in);
+ pingResponse = readPingResponse(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(id);
+ timeout.writeTo(out);
+ pingResponse.writeTo(out);
+ }
+ }
+
+ static class UnicastPingResponse extends TransportResponse {
+
+ int id;
+
+ PingResponse[] pingResponses;
+
+ UnicastPingResponse() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readInt();
+ pingResponses = new PingResponse[in.readVInt()];
+ for (int i = 0; i < pingResponses.length; i++) {
+ pingResponses[i] = readPingResponse(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(id);
+ out.writeVInt(pingResponses.length);
+ for (PingResponse pingResponse : pingResponses) {
+ pingResponse.writeTo(out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java
new file mode 100644
index 0000000..e8489ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.publish;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.compress.Compressor;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.stream.*;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler;
+import org.elasticsearch.discovery.ClusterStatePublishResponseHandler;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class PublishClusterStateAction extends AbstractComponent {
+
+ public static interface NewClusterStateListener {
+
+ static interface NewStateProcessed {
+
+ void onNewClusterStateProcessed();
+
+ void onNewClusterStateFailed(Throwable t);
+ }
+
+ void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed);
+ }
+
+ private final TransportService transportService;
+ private final DiscoveryNodesProvider nodesProvider;
+ private final NewClusterStateListener listener;
+
+ private final TimeValue publishTimeout;
+
+ public PublishClusterStateAction(Settings settings, TransportService transportService, DiscoveryNodesProvider nodesProvider,
+ NewClusterStateListener listener) {
+ super(settings);
+ this.transportService = transportService;
+ this.nodesProvider = nodesProvider;
+ this.listener = listener;
+
+ this.publishTimeout = settings.getAsTime("discovery.zen.publish_timeout", Discovery.DEFAULT_PUBLISH_TIMEOUT);
+
+ transportService.registerHandler(PublishClusterStateRequestHandler.ACTION, new PublishClusterStateRequestHandler());
+ }
+
+ public void close() {
+ transportService.removeHandler(PublishClusterStateRequestHandler.ACTION);
+ }
+
+ public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) {
+ publish(clusterState, new AckClusterStatePublishResponseHandler(clusterState.nodes().size() - 1, ackListener));
+ }
+
+ private void publish(ClusterState clusterState, final ClusterStatePublishResponseHandler publishResponseHandler) {
+
+ DiscoveryNode localNode = nodesProvider.nodes().localNode();
+
+ Map<Version, BytesReference> serializedStates = Maps.newHashMap();
+
+ for (final DiscoveryNode node : clusterState.nodes()) {
+ if (node.equals(localNode)) {
+ continue;
+ }
+ // try and serialize the cluster state once (or per version), so we don't serialize it
+ // per node when we send it over the wire, compress it while we are at it...
+ BytesReference bytes = serializedStates.get(node.version());
+ if (bytes == null) {
+ try {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput stream = new HandlesStreamOutput(CompressorFactory.defaultCompressor().streamOutput(bStream));
+ stream.setVersion(node.version());
+ ClusterState.Builder.writeTo(clusterState, stream);
+ stream.close();
+ bytes = bStream.bytes();
+ serializedStates.put(node.version(), bytes);
+ } catch (Throwable e) {
+ logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node);
+ publishResponseHandler.onFailure(node, e);
+ continue;
+ }
+ }
+ try {
+ TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false);
+ // no need to put a timeout on the options here, because we want the response to eventually be received
+ // and not log an error if it arrives after the timeout
+ transportService.sendRequest(node, PublishClusterStateRequestHandler.ACTION,
+ new BytesTransportRequest(bytes, node.version()),
+ options, // no need to compress, we already compressed the bytes
+
+ new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ publishResponseHandler.onResponse(node);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ logger.debug("failed to send cluster state to [{}]", exp, node);
+ publishResponseHandler.onFailure(node, exp);
+ }
+ });
+ } catch (Throwable t) {
+ logger.debug("error sending cluster state to [{}]", t, node);
+ publishResponseHandler.onFailure(node, t);
+ }
+ }
+
+ if (publishTimeout.millis() > 0) {
+ // only wait if the publish timeout is configured...
+ try {
+ boolean awaited = publishResponseHandler.awaitAllNodes(publishTimeout);
+ if (!awaited) {
+ logger.debug("awaiting all nodes to process published state {} timed out, timeout {}", clusterState.version(), publishTimeout);
+ }
+ } catch (InterruptedException e) {
+ // ignore & restore interrupt
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ private class PublishClusterStateRequestHandler extends BaseTransportRequestHandler<BytesTransportRequest> {
+
+ static final String ACTION = "discovery/zen/publish";
+
+ @Override
+ public BytesTransportRequest newInstance() {
+ return new BytesTransportRequest();
+ }
+
+ @Override
+ public void messageReceived(BytesTransportRequest request, final TransportChannel channel) throws Exception {
+ Compressor compressor = CompressorFactory.compressor(request.bytes());
+ StreamInput in;
+ if (compressor != null) {
+ in = CachedStreamInput.cachedHandlesCompressed(compressor, request.bytes().streamInput());
+ } else {
+ in = CachedStreamInput.cachedHandles(request.bytes().streamInput());
+ }
+ in.setVersion(request.version());
+ ClusterState clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode());
+ logger.debug("received cluster state version {}", clusterState.version());
+ listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() {
+ @Override
+ public void onNewClusterStateProcessed() {
+ try {
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ } catch (Throwable e) {
+ logger.debug("failed to send response on cluster state processed", e);
+ }
+ }
+
+ @Override
+ public void onNewClusterStateFailed(Throwable t) {
+ try {
+ channel.sendResponse(t);
+ } catch (Throwable e) {
+ logger.debug("failed to send response on cluster state processed", e);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/env/Environment.java b/src/main/java/org/elasticsearch/env/Environment.java
new file mode 100644
index 0000000..e73898e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/env/Environment.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.env;
+
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.settings.Settings;
+
+import com.google.common.base.Charsets;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.MalformedURLException;
+import java.net.URL;
+
+import static org.elasticsearch.common.Strings.cleanPath;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+/**
+ * The environment of where things exists.
+ */
+public class Environment {
+
+ private final Settings settings;
+
+ private final File homeFile;
+
+ private final File workFile;
+
+ private final File workWithClusterFile;
+
+ private final File[] dataFiles;
+
+ private final File[] dataWithClusterFiles;
+
+ private final File configFile;
+
+ private final File pluginsFile;
+
+ private final File logsFile;
+
+ public Environment() {
+ this(EMPTY_SETTINGS);
+ }
+
+ public Environment(Settings settings) {
+ this.settings = settings;
+ if (settings.get("path.home") != null) {
+ homeFile = new File(cleanPath(settings.get("path.home")));
+ } else {
+ homeFile = new File(System.getProperty("user.dir"));
+ }
+
+ if (settings.get("path.conf") != null) {
+ configFile = new File(cleanPath(settings.get("path.conf")));
+ } else {
+ configFile = new File(homeFile, "config");
+ }
+
+ if (settings.get("path.plugins") != null) {
+ pluginsFile = new File(cleanPath(settings.get("path.plugins")));
+ } else {
+ pluginsFile = new File(homeFile, "plugins");
+ }
+
+ if (settings.get("path.work") != null) {
+ workFile = new File(cleanPath(settings.get("path.work")));
+ } else {
+ workFile = new File(homeFile, "work");
+ }
+ workWithClusterFile = new File(workFile, ClusterName.clusterNameFromSettings(settings).value());
+
+ String[] dataPaths = settings.getAsArray("path.data");
+ if (dataPaths.length > 0) {
+ dataFiles = new File[dataPaths.length];
+ dataWithClusterFiles = new File[dataPaths.length];
+ for (int i = 0; i < dataPaths.length; i++) {
+ dataFiles[i] = new File(dataPaths[i]);
+ dataWithClusterFiles[i] = new File(dataFiles[i], ClusterName.clusterNameFromSettings(settings).value());
+ }
+ } else {
+ dataFiles = new File[]{new File(homeFile, "data")};
+ dataWithClusterFiles = new File[]{new File(new File(homeFile, "data"), ClusterName.clusterNameFromSettings(settings).value())};
+ }
+
+ if (settings.get("path.logs") != null) {
+ logsFile = new File(cleanPath(settings.get("path.logs")));
+ } else {
+ logsFile = new File(homeFile, "logs");
+ }
+ }
+
+ /**
+ * The settings used to build this environment.
+ */
+ public Settings settings() {
+ return this.settings;
+ }
+
+ /**
+ * The home of the installation.
+ */
+ public File homeFile() {
+ return homeFile;
+ }
+
+ /**
+ * The work location.
+ */
+ public File workFile() {
+ return workFile;
+ }
+
+ /**
+ * The work location with the cluster name as a sub directory.
+ */
+ public File workWithClusterFile() {
+ return workWithClusterFile;
+ }
+
+ /**
+ * The data location.
+ */
+ public File[] dataFiles() {
+ return dataFiles;
+ }
+
+ /**
+ * The data location with the cluster name as a sub directory.
+ */
+ public File[] dataWithClusterFiles() {
+ return dataWithClusterFiles;
+ }
+
+ /**
+ * The config location.
+ */
+ public File configFile() {
+ return configFile;
+ }
+
+ public File pluginsFile() {
+ return pluginsFile;
+ }
+
+ public File logsFile() {
+ return logsFile;
+ }
+
+ public String resolveConfigAndLoadToString(String path) throws FailedToResolveConfigException, IOException {
+ return Streams.copyToString(new InputStreamReader(resolveConfig(path).openStream(), Charsets.UTF_8));
+ }
+
+ public URL resolveConfig(String path) throws FailedToResolveConfigException {
+ String origPath = path;
+ // first, try it as a path on the file system
+ File f1 = new File(path);
+ if (f1.exists()) {
+ try {
+ return f1.toURI().toURL();
+ } catch (MalformedURLException e) {
+ throw new FailedToResolveConfigException("Failed to resolve path [" + f1 + "]", e);
+ }
+ }
+ if (path.startsWith("/")) {
+ path = path.substring(1);
+ }
+ // next, try it relative to the config location
+ File f2 = new File(configFile, path);
+ if (f2.exists()) {
+ try {
+ return f2.toURI().toURL();
+ } catch (MalformedURLException e) {
+ throw new FailedToResolveConfigException("Failed to resolve path [" + f2 + "]", e);
+ }
+ }
+ // try and load it from the classpath directly
+ URL resource = settings.getClassLoader().getResource(path);
+ if (resource != null) {
+ return resource;
+ }
+ // try and load it from the classpath with config/ prefix
+ if (!path.startsWith("config/")) {
+ resource = settings.getClassLoader().getResource("config/" + path);
+ if (resource != null) {
+ return resource;
+ }
+ }
+ throw new FailedToResolveConfigException("Failed to resolve config path [" + origPath + "], tried file path [" + f1 + "], path file [" + f2 + "], and classpath");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/env/EnvironmentModule.java b/src/main/java/org/elasticsearch/env/EnvironmentModule.java
new file mode 100644
index 0000000..8a4aa1f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/env/EnvironmentModule.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.env;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class EnvironmentModule extends AbstractModule {
+
+ private final Environment environment;
+
+ public EnvironmentModule(Environment environment) {
+ this.environment = environment;
+ }
+
+ @Override
+ protected void configure() {
+ bind(Environment.class).toInstance(environment);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/env/FailedToResolveConfigException.java b/src/main/java/org/elasticsearch/env/FailedToResolveConfigException.java
new file mode 100644
index 0000000..e6b2a82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/env/FailedToResolveConfigException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.env;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class FailedToResolveConfigException extends ElasticsearchException {
+
+ public FailedToResolveConfigException(String msg) {
+ super(msg);
+ }
+
+ public FailedToResolveConfigException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/src/main/java/org/elasticsearch/env/NodeEnvironment.java
new file mode 100644
index 0000000..db27140
--- /dev/null
+++ b/src/main/java/org/elasticsearch/env/NodeEnvironment.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.env;
+
+import com.google.common.collect.Sets;
+import com.google.common.primitives.Ints;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.NativeFSLockFactory;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+/**
+ *
+ */
+public class NodeEnvironment extends AbstractComponent {
+
+ private final File[] nodeFiles;
+ private final File[] nodeIndicesLocations;
+
+ private final Lock[] locks;
+
+ private final int localNodeId;
+
+ @Inject
+ public NodeEnvironment(Settings settings, Environment environment) {
+ super(settings);
+
+ if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) {
+ nodeFiles = null;
+ nodeIndicesLocations = null;
+ locks = null;
+ localNodeId = -1;
+ return;
+ }
+
+ File[] nodesFiles = new File[environment.dataWithClusterFiles().length];
+ Lock[] locks = new Lock[environment.dataWithClusterFiles().length];
+ int localNodeId = -1;
+ IOException lastException = null;
+ int maxLocalStorageNodes = settings.getAsInt("node.max_local_storage_nodes", 50);
+ for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
+ for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) {
+ File dir = new File(new File(environment.dataWithClusterFiles()[dirIndex], "nodes"), Integer.toString(possibleLockId));
+ if (!dir.exists()) {
+ FileSystemUtils.mkdirs(dir);
+ }
+ logger.trace("obtaining node lock on {} ...", dir.getAbsolutePath());
+ try {
+ NativeFSLockFactory lockFactory = new NativeFSLockFactory(dir);
+ Lock tmpLock = lockFactory.makeLock("node.lock");
+ boolean obtained = tmpLock.obtain();
+ if (obtained) {
+ locks[dirIndex] = tmpLock;
+ nodesFiles[dirIndex] = dir;
+ localNodeId = possibleLockId;
+ } else {
+ logger.trace("failed to obtain node lock on {}", dir.getAbsolutePath());
+ // release all the ones that were obtained up until now
+ for (int i = 0; i < locks.length; i++) {
+ if (locks[i] != null) {
+ try {
+ locks[i].release();
+ } catch (Exception e1) {
+ // ignore
+ }
+ }
+ locks[i] = null;
+ }
+ break;
+ }
+ } catch (IOException e) {
+ logger.trace("failed to obtain node lock on {}", e, dir.getAbsolutePath());
+ lastException = new IOException("failed to obtain lock on " + dir.getAbsolutePath(), e);
+ // release all the ones that were obtained up until now
+ for (int i = 0; i < locks.length; i++) {
+ if (locks[i] != null) {
+ try {
+ locks[i].release();
+ } catch (Exception e1) {
+ // ignore
+ }
+ }
+ locks[i] = null;
+ }
+ break;
+ }
+ }
+ if (locks[0] != null) {
+ // we found a lock, break
+ break;
+ }
+ }
+ if (locks[0] == null) {
+ throw new ElasticsearchIllegalStateException("Failed to obtain node lock, is the following location writable?: " + Arrays.toString(environment.dataWithClusterFiles()), lastException);
+ }
+
+ this.localNodeId = localNodeId;
+ this.locks = locks;
+ this.nodeFiles = nodesFiles;
+ if (logger.isDebugEnabled()) {
+ logger.debug("using node location [{}], local_node_id [{}]", nodesFiles, localNodeId);
+ }
+ if (logger.isTraceEnabled()) {
+ StringBuilder sb = new StringBuilder("node data locations details:\n");
+ for (File file : nodesFiles) {
+ sb.append(" -> ").append(file.getAbsolutePath()).append(", free_space [").append(new ByteSizeValue(file.getFreeSpace())).append("], usable_space [").append(new ByteSizeValue(file.getUsableSpace())).append("]\n");
+ }
+ logger.trace(sb.toString());
+ }
+
+ this.nodeIndicesLocations = new File[nodeFiles.length];
+ for (int i = 0; i < nodeFiles.length; i++) {
+ nodeIndicesLocations[i] = new File(nodeFiles[i], "indices");
+ }
+ }
+
+ public int localNodeId() {
+ return this.localNodeId;
+ }
+
+ public boolean hasNodeFile() {
+ return nodeFiles != null && locks != null;
+ }
+
+ public File[] nodeDataLocations() {
+ if (nodeFiles == null || locks == null) {
+ throw new ElasticsearchIllegalStateException("node is not configured to store local location");
+ }
+ return nodeFiles;
+ }
+
+ public File[] indicesLocations() {
+ return nodeIndicesLocations;
+ }
+
+ public File[] indexLocations(Index index) {
+ File[] indexLocations = new File[nodeFiles.length];
+ for (int i = 0; i < nodeFiles.length; i++) {
+ indexLocations[i] = new File(new File(nodeFiles[i], "indices"), index.name());
+ }
+ return indexLocations;
+ }
+
+ public File[] shardLocations(ShardId shardId) {
+ File[] shardLocations = new File[nodeFiles.length];
+ for (int i = 0; i < nodeFiles.length; i++) {
+ shardLocations[i] = new File(new File(new File(nodeFiles[i], "indices"), shardId.index().name()), Integer.toString(shardId.id()));
+ }
+ return shardLocations;
+ }
+
+ public Set<String> findAllIndices() throws Exception {
+ if (nodeFiles == null || locks == null) {
+ throw new ElasticsearchIllegalStateException("node is not configured to store local location");
+ }
+ Set<String> indices = Sets.newHashSet();
+ for (File indicesLocation : nodeIndicesLocations) {
+ File[] indicesList = indicesLocation.listFiles();
+ if (indicesList == null) {
+ continue;
+ }
+ for (File indexLocation : indicesList) {
+ if (indexLocation.isDirectory()) {
+ indices.add(indexLocation.getName());
+ }
+ }
+ }
+ return indices;
+ }
+
+ public Set<ShardId> findAllShardIds() throws Exception {
+ if (nodeFiles == null || locks == null) {
+ throw new ElasticsearchIllegalStateException("node is not configured to store local location");
+ }
+ Set<ShardId> shardIds = Sets.newHashSet();
+ for (File indicesLocation : nodeIndicesLocations) {
+ File[] indicesList = indicesLocation.listFiles();
+ if (indicesList == null) {
+ continue;
+ }
+ for (File indexLocation : indicesList) {
+ if (!indexLocation.isDirectory()) {
+ continue;
+ }
+ String indexName = indexLocation.getName();
+ File[] shardsList = indexLocation.listFiles();
+ if (shardsList == null) {
+ continue;
+ }
+ for (File shardLocation : shardsList) {
+ if (!shardLocation.isDirectory()) {
+ continue;
+ }
+ Integer shardId = Ints.tryParse(shardLocation.getName());
+ if (shardId != null) {
+ shardIds.add(new ShardId(indexName, shardId));
+ }
+ }
+ }
+ }
+ return shardIds;
+ }
+
+ public void close() {
+ if (locks != null) {
+ for (Lock lock : locks) {
+ try {
+ logger.trace("releasing lock [{}]", lock);
+ lock.release();
+ } catch (IOException e) {
+ logger.trace("failed to release lock [{}]", e, lock);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/env/NodeEnvironmentModule.java b/src/main/java/org/elasticsearch/env/NodeEnvironmentModule.java
new file mode 100644
index 0000000..162c310
--- /dev/null
+++ b/src/main/java/org/elasticsearch/env/NodeEnvironmentModule.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.env;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class NodeEnvironmentModule extends AbstractModule {
+
+ private final NodeEnvironment nodeEnvironment;
+
+ public NodeEnvironmentModule() {
+ this(null);
+ }
+
+ public NodeEnvironmentModule(@Nullable NodeEnvironment nodeEnvironment) {
+ this.nodeEnvironment = nodeEnvironment;
+ }
+
+ @Override
+ protected void configure() {
+ if (nodeEnvironment != null) {
+ bind(NodeEnvironment.class).toInstance(nodeEnvironment);
+ } else {
+ bind(NodeEnvironment.class).asEagerSingleton();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/gateway/Gateway.java b/src/main/java/org/elasticsearch/gateway/Gateway.java
new file mode 100644
index 0000000..f693c91
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/Gateway.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.inject.Module;
+
+/**
+ *
+ */
+public interface Gateway extends LifecycleComponent<Gateway> {
+
+ String type();
+
+ void performStateRecovery(GatewayStateRecoveredListener listener) throws GatewayException;
+
+ Class<? extends Module> suggestIndexGateway();
+
+ void reset() throws Exception;
+
+ interface GatewayStateRecoveredListener {
+ void onSuccess(ClusterState recoveredState);
+
+ void onFailure(String message);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/GatewayException.java b/src/main/java/org/elasticsearch/gateway/GatewayException.java
new file mode 100644
index 0000000..498808d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/GatewayException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class GatewayException extends ElasticsearchException {
+
+ public GatewayException(String msg) {
+ super(msg);
+ }
+
+ public GatewayException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/GatewayModule.java b/src/main/java/org/elasticsearch/gateway/GatewayModule.java
new file mode 100644
index 0000000..b8b2c0d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/GatewayModule.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.Modules;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.local.LocalGatewayModule;
+
+/**
+ *
+ */
+public class GatewayModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ public GatewayModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(Modules.createModule(settings.getAsClass("gateway.type", LocalGatewayModule.class, "org.elasticsearch.gateway.", "GatewayModule"), settings));
+ }
+
+ @Override
+ protected void configure() {
+ bind(GatewayService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/GatewayService.java b/src/main/java/org/elasticsearch/gateway/GatewayService.java
new file mode 100644
index 0000000..e4ef239
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/GatewayService.java
@@ -0,0 +1,303 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.DiscoveryService;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ *
+ */
+public class GatewayService extends AbstractLifecycleComponent<GatewayService> implements ClusterStateListener {
+
+ public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
+
+ private final Gateway gateway;
+
+ private final ThreadPool threadPool;
+
+ private final AllocationService allocationService;
+
+ private final ClusterService clusterService;
+
+ private final DiscoveryService discoveryService;
+
+ private final TimeValue recoverAfterTime;
+ private final int recoverAfterNodes;
+ private final int expectedNodes;
+ private final int recoverAfterDataNodes;
+ private final int expectedDataNodes;
+ private final int recoverAfterMasterNodes;
+ private final int expectedMasterNodes;
+
+
+ private final AtomicBoolean recovered = new AtomicBoolean();
+ private final AtomicBoolean scheduledRecovery = new AtomicBoolean();
+
+ @Inject
+ public GatewayService(Settings settings, Gateway gateway, AllocationService allocationService, ClusterService clusterService, DiscoveryService discoveryService, ThreadPool threadPool) {
+ super(settings);
+ this.gateway = gateway;
+ this.allocationService = allocationService;
+ this.clusterService = clusterService;
+ this.discoveryService = discoveryService;
+ this.threadPool = threadPool;
+ // allow to control a delay of when indices will get created
+ this.recoverAfterTime = componentSettings.getAsTime("recover_after_time", null);
+ this.recoverAfterNodes = componentSettings.getAsInt("recover_after_nodes", -1);
+ this.expectedNodes = componentSettings.getAsInt("expected_nodes", -1);
+ this.recoverAfterDataNodes = componentSettings.getAsInt("recover_after_data_nodes", -1);
+ this.expectedDataNodes = componentSettings.getAsInt("expected_data_nodes", -1);
+ // default the recover after master nodes to the minimum master nodes in the discovery
+ this.recoverAfterMasterNodes = componentSettings.getAsInt("recover_after_master_nodes", settings.getAsInt("discovery.zen.minimum_master_nodes", -1));
+ this.expectedMasterNodes = componentSettings.getAsInt("expected_master_nodes", -1);
+
+ // Add the not recovered as initial state block, we don't allow anything until
+ this.clusterService.addInitialStateBlock(STATE_NOT_RECOVERED_BLOCK);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ gateway.start();
+ // if we received initial state, see if we can recover within the start phase, so we hold the
+ // node from starting until we recovered properly
+ if (discoveryService.initialStateReceived()) {
+ ClusterState clusterState = clusterService.state();
+ DiscoveryNodes nodes = clusterState.nodes();
+ if (clusterState.nodes().localNodeMaster() && clusterState.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) {
+ if (clusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
+ logger.debug("not recovering from gateway, no master elected yet");
+ } else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
+ logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
+ } else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
+ logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]");
+ } else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
+ logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]");
+ } else {
+ boolean ignoreRecoverAfterTime;
+ if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) {
+ // no expected is set, don't ignore the timeout
+ ignoreRecoverAfterTime = false;
+ } else {
+ // one of the expected is set, see if all of them meet the need, and ignore the timeout in this case
+ ignoreRecoverAfterTime = true;
+ if (expectedNodes != -1 && (nodes.masterAndDataNodes().size() < expectedNodes)) { // does not meet the expected...
+ ignoreRecoverAfterTime = false;
+ }
+ if (expectedMasterNodes != -1 && (nodes.masterNodes().size() < expectedMasterNodes)) { // does not meet the expected...
+ ignoreRecoverAfterTime = false;
+ }
+ if (expectedDataNodes != -1 && (nodes.dataNodes().size() < expectedDataNodes)) { // does not meet the expected...
+ ignoreRecoverAfterTime = false;
+ }
+ }
+ performStateRecovery(ignoreRecoverAfterTime);
+ }
+ }
+ } else {
+ logger.debug("can't wait on start for (possibly) reading state from gateway, will do it asynchronously");
+ }
+ clusterService.addLast(this);
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ clusterService.remove(this);
+ gateway.stop();
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ gateway.close();
+ }
+
+ @Override
+ public void clusterChanged(final ClusterChangedEvent event) {
+ if (lifecycle.stoppedOrClosed()) {
+ return;
+ }
+ if (event.state().blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
+ // we need to clear those flags, since we might need to recover again in case we disconnect
+ // from the cluster and then reconnect
+ recovered.set(false);
+ scheduledRecovery.set(false);
+ }
+ if (event.localNodeMaster() && event.state().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) {
+ ClusterState clusterState = event.state();
+ DiscoveryNodes nodes = clusterState.nodes();
+ if (event.state().blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
+ logger.debug("not recovering from gateway, no master elected yet");
+ } else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
+ logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
+ } else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
+ logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]");
+ } else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
+ logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]");
+ } else {
+ boolean ignoreRecoverAfterTime;
+ if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) {
+ // no expected is set, don't ignore the timeout
+ ignoreRecoverAfterTime = false;
+ } else {
+ // one of the expected is set, see if all of them meet the need, and ignore the timeout in this case
+ ignoreRecoverAfterTime = true;
+ if (expectedNodes != -1 && (nodes.masterAndDataNodes().size() < expectedNodes)) { // does not meet the expected...
+ ignoreRecoverAfterTime = false;
+ }
+ if (expectedMasterNodes != -1 && (nodes.masterNodes().size() < expectedMasterNodes)) { // does not meet the expected...
+ ignoreRecoverAfterTime = false;
+ }
+ if (expectedDataNodes != -1 && (nodes.dataNodes().size() < expectedDataNodes)) { // does not meet the expected...
+ ignoreRecoverAfterTime = false;
+ }
+ }
+ final boolean fIgnoreRecoverAfterTime = ignoreRecoverAfterTime;
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ performStateRecovery(fIgnoreRecoverAfterTime);
+ }
+ });
+ }
+ }
+ }
+
+ private void performStateRecovery(boolean ignoreRecoverAfterTime) {
+ final Gateway.GatewayStateRecoveredListener recoveryListener = new GatewayRecoveryListener(new CountDownLatch(1));
+
+ if (!ignoreRecoverAfterTime && recoverAfterTime != null) {
+ if (scheduledRecovery.compareAndSet(false, true)) {
+ logger.debug("delaying initial state recovery for [{}]", recoverAfterTime);
+ threadPool.schedule(recoverAfterTime, ThreadPool.Names.GENERIC, new Runnable() {
+ @Override
+ public void run() {
+ if (recovered.compareAndSet(false, true)) {
+ logger.trace("performing state recovery...");
+ gateway.performStateRecovery(recoveryListener);
+ }
+ }
+ });
+ }
+ } else {
+ if (recovered.compareAndSet(false, true)) {
+ logger.trace("performing state recovery...");
+ gateway.performStateRecovery(recoveryListener);
+ }
+ }
+ }
+
+ class GatewayRecoveryListener implements Gateway.GatewayStateRecoveredListener {
+
+ private final CountDownLatch latch;
+
+ GatewayRecoveryListener(CountDownLatch latch) {
+ this.latch = latch;
+ }
+
+ @Override
+ public void onSuccess(final ClusterState recoveredState) {
+ logger.trace("successful state recovery, importing cluster state...");
+ clusterService.submitStateUpdateTask("local-gateway-elected-state", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ assert currentState.metaData().indices().isEmpty();
+
+ // remove the block, since we recovered from gateway
+ ClusterBlocks.Builder blocks = ClusterBlocks.builder()
+ .blocks(currentState.blocks())
+ .blocks(recoveredState.blocks())
+ .removeGlobalBlock(STATE_NOT_RECOVERED_BLOCK);
+
+ MetaData.Builder metaDataBuilder = MetaData.builder(recoveredState.metaData());
+ // automatically generate a UID for the metadata if we need to
+ metaDataBuilder.generateUuidIfNeeded();
+
+ if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) {
+ blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
+ }
+
+ for (IndexMetaData indexMetaData : recoveredState.metaData()) {
+ metaDataBuilder.put(indexMetaData, false);
+ blocks.addBlocks(indexMetaData);
+ }
+
+ // update the state to reflect the new metadata and routing
+ ClusterState updatedState = ClusterState.builder(currentState)
+ .blocks(blocks)
+ .metaData(metaDataBuilder)
+ .build();
+
+ // initialize all index routing tables as empty
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable());
+ for (ObjectCursor<IndexMetaData> cursor : updatedState.metaData().indices().values()) {
+ routingTableBuilder.addAsRecovery(cursor.value);
+ }
+ // start with 0 based versions for routing table
+ routingTableBuilder.version(0);
+
+ // now, reroute
+ RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder).build());
+
+ return ClusterState.builder(updatedState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ logger.info("recovered [{}] indices into cluster_state", newState.metaData().indices().size());
+ latch.countDown();
+ }
+ });
+ }
+
+ @Override
+ public void onFailure(String message) {
+ recovered.set(false);
+ scheduledRecovery.set(false);
+ // don't remove the block here, we don't want to allow anything in such a case
+ logger.info("metadata state not restored, reason: {}", message);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/blobstore/BlobReuseExistingGatewayAllocator.java b/src/main/java/org/elasticsearch/gateway/blobstore/BlobReuseExistingGatewayAllocator.java
new file mode 100644
index 0000000..1fcd4b8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/blobstore/BlobReuseExistingGatewayAllocator.java
@@ -0,0 +1,315 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.blobstore;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.allocator.GatewayAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.index.gateway.CommitPoint;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.internal.InternalNode;
+import org.elasticsearch.transport.ConnectTransportException;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ *
+ */
+public class BlobReuseExistingGatewayAllocator extends AbstractComponent implements GatewayAllocator {
+
+ private final Node node;
+
+ private final TransportNodesListShardStoreMetaData listShardStoreMetaData;
+
+ private final TimeValue listTimeout;
+
+ private final ConcurrentMap<ShardId, CommitPoint> cachedCommitPoints = ConcurrentCollections.newConcurrentMap();
+
+ private final ConcurrentMap<ShardId, Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData>> cachedStores = ConcurrentCollections.newConcurrentMap();
+
+ @Inject
+ public BlobReuseExistingGatewayAllocator(Settings settings, Node node,
+ TransportNodesListShardStoreMetaData transportNodesListShardStoreMetaData) {
+ super(settings);
+ this.node = node; // YACK!, we need the Gateway, but it creates crazy circular dependency
+ this.listShardStoreMetaData = transportNodesListShardStoreMetaData;
+
+ this.listTimeout = componentSettings.getAsTime("list_timeout", TimeValue.timeValueSeconds(30));
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+ for (ShardRouting shardRouting : allocation.startedShards()) {
+ cachedCommitPoints.remove(shardRouting.shardId());
+ cachedStores.remove(shardRouting.shardId());
+ }
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ for (ShardRouting failedShard : allocation.failedShards()) {
+ cachedCommitPoints.remove(failedShard.shardId());
+ cachedStores.remove(failedShard.shardId());
+ }
+ }
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ boolean changed = false;
+
+ DiscoveryNodes nodes = allocation.nodes();
+ RoutingNodes routingNodes = allocation.routingNodes();
+
+ if (nodes.dataNodes().isEmpty()) {
+ return changed;
+ }
+
+ if (!routingNodes.hasUnassigned()) {
+ return changed;
+ }
+
+ Iterator<MutableShardRouting> unassignedIterator = routingNodes.unassigned().iterator();
+ while (unassignedIterator.hasNext()) {
+ MutableShardRouting shard = unassignedIterator.next();
+
+ // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
+ boolean canBeAllocatedToAtLeastOneNode = false;
+ for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) {
+ RoutingNode node = routingNodes.node(cursor.value.id());
+ if (node == null) {
+ continue;
+ }
+ // if its THROTTLING, we are not going to allocate it to this node, so ignore it as well
+ Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
+ if (decision.type() == Decision.Type.YES) {
+ canBeAllocatedToAtLeastOneNode = true;
+ break;
+ }
+ }
+
+ if (!canBeAllocatedToAtLeastOneNode) {
+ continue;
+ }
+
+ Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores = buildShardStores(nodes, shard);
+
+ long lastSizeMatched = 0;
+ DiscoveryNode lastDiscoNodeMatched = null;
+ RoutingNode lastNodeMatched = null;
+
+ for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> nodeStoreEntry : shardStores.entrySet()) {
+ DiscoveryNode discoNode = nodeStoreEntry.getKey();
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue();
+ logger.trace("{}: checking node [{}]", shard, discoNode);
+
+ if (storeFilesMetaData == null) {
+ // already allocated on that node...
+ continue;
+ }
+
+ RoutingNode node = routingNodes.node(discoNode.id());
+ if (node == null) {
+ continue;
+ }
+
+ // check if we can allocate on that node...
+ // we only check for NO, since if this node is THROTTLING and it has enough "same data"
+ // then we will try and assign it next time
+ if (allocation.deciders().canAllocate(shard, node, allocation).type() == Decision.Type.NO) {
+ continue;
+ }
+
+ // if it is already allocated, we can't assign to it...
+ if (storeFilesMetaData.allocated()) {
+ continue;
+ }
+
+
+ // if its a primary, it will be recovered from the gateway, find one that is closet to it
+ if (shard.primary()) {
+ try {
+ CommitPoint commitPoint = cachedCommitPoints.get(shard.shardId());
+ if (commitPoint == null) {
+ commitPoint = ((BlobStoreGateway) ((InternalNode) this.node).injector().getInstance(Gateway.class)).findCommitPoint(shard.index(), shard.id());
+ if (commitPoint != null) {
+ cachedCommitPoints.put(shard.shardId(), commitPoint);
+ } else {
+ cachedCommitPoints.put(shard.shardId(), CommitPoint.NULL);
+ }
+ } else if (commitPoint == CommitPoint.NULL) {
+ commitPoint = null;
+ }
+
+ if (commitPoint == null) {
+ break;
+ }
+
+ long sizeMatched = 0;
+ for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
+ CommitPoint.FileInfo fileInfo = commitPoint.findPhysicalIndexFile(storeFileMetaData.name());
+ if (fileInfo != null) {
+ if (fileInfo.isSame(storeFileMetaData)) {
+ logger.trace("{}: [{}] reusing file since it exists on remote node and on gateway", shard, storeFileMetaData.name());
+ sizeMatched += storeFileMetaData.length();
+ } else {
+ logger.trace("{}: [{}] ignore file since it exists on remote node and on gateway but is different", shard, storeFileMetaData.name());
+ }
+ } else {
+ logger.trace("{}: [{}] exists on remote node, does not exists on gateway", shard, storeFileMetaData.name());
+ }
+ }
+ if (sizeMatched > lastSizeMatched) {
+ lastSizeMatched = sizeMatched;
+ lastDiscoNodeMatched = discoNode;
+ lastNodeMatched = node;
+ logger.trace("{}: node elected for pre_allocation [{}], total_size_matched [{}]", shard, discoNode, new ByteSizeValue(sizeMatched));
+ } else {
+ logger.trace("{}: node ignored for pre_allocation [{}], total_size_matched [{}] smaller than last_size_matched [{}]", shard, discoNode, new ByteSizeValue(sizeMatched), new ByteSizeValue(lastSizeMatched));
+ }
+ } catch (Exception e) {
+ // failed, log and try and allocate based on size
+ logger.debug("Failed to guess allocation of primary based on gateway for " + shard, e);
+ }
+ } else {
+ // if its backup, see if there is a primary that *is* allocated, and try and assign a location that is closest to it
+ // note, since we replicate operations, this might not be the same (different flush intervals)
+ MutableShardRouting primaryShard = routingNodes.activePrimary(shard);
+ if (primaryShard != null) {
+ assert primaryShard.active();
+ DiscoveryNode primaryNode = nodes.get(primaryShard.currentNodeId());
+ if (primaryNode != null) {
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryNodeStore = shardStores.get(primaryNode);
+ if (primaryNodeStore != null && primaryNodeStore.allocated()) {
+ long sizeMatched = 0;
+
+ for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
+ if (primaryNodeStore.fileExists(storeFileMetaData.name()) && primaryNodeStore.file(storeFileMetaData.name()).isSame(storeFileMetaData)) {
+ sizeMatched += storeFileMetaData.length();
+ }
+ }
+ if (sizeMatched > lastSizeMatched) {
+ lastSizeMatched = sizeMatched;
+ lastDiscoNodeMatched = discoNode;
+ lastNodeMatched = node;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (lastNodeMatched != null) {
+ if (allocation.deciders().canAllocate(shard, lastNodeMatched, allocation).type() == Decision.Type.THROTTLE) {
+ if (logger.isTraceEnabled()) {
+ logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store with total_size [{}]", shard.index(), shard.id(), shard, lastDiscoNodeMatched, new ByteSizeValue(lastSizeMatched));
+ }
+ // we are throttling this, but we have enough to allocate to this node, ignore it for now
+ unassignedIterator.remove();
+ routingNodes.ignoredUnassigned().add(shard);
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store with total_size [{}]", shard.index(), shard.id(), shard, lastDiscoNodeMatched, new ByteSizeValue(lastSizeMatched));
+ }
+ // we found a match
+ changed = true;
+ allocation.routingNodes().assign(shard, lastNodeMatched.nodeId());
+ unassignedIterator.remove();
+ }
+ }
+ }
+ return changed;
+ }
+
+ private Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> buildShardStores(DiscoveryNodes nodes, MutableShardRouting shard) {
+ Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores = cachedStores.get(shard.shardId());
+ ObjectOpenHashSet<String> nodesIds;
+ if (shardStores == null) {
+ shardStores = Maps.newHashMap();
+ cachedStores.put(shard.shardId(), shardStores);
+ nodesIds = ObjectOpenHashSet.from(nodes.dataNodes().keys());
+ } else {
+ nodesIds = ObjectOpenHashSet.newInstance();
+ // clean nodes that have failed
+ for (Iterator<DiscoveryNode> it = shardStores.keySet().iterator(); it.hasNext(); ) {
+ DiscoveryNode node = it.next();
+ if (!nodes.nodeExists(node.id())) {
+ it.remove();
+ }
+ }
+
+ for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) {
+ DiscoveryNode node = cursor.value;
+ if (!shardStores.containsKey(node)) {
+ nodesIds.add(node.id());
+ }
+ }
+ }
+
+ if (!nodesIds.isEmpty()) {
+ String[] nodesIdsArray = nodesIds.toArray(String.class);
+ TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData nodesStoreFilesMetaData = listShardStoreMetaData.list(shard.shardId(), false, nodesIdsArray, listTimeout).actionGet();
+ if (logger.isTraceEnabled()) {
+ if (nodesStoreFilesMetaData.failures().length > 0) {
+ StringBuilder sb = new StringBuilder(shard + ": failures when trying to list stores on nodes:");
+ for (int i = 0; i < nodesStoreFilesMetaData.failures().length; i++) {
+ Throwable cause = ExceptionsHelper.unwrapCause(nodesStoreFilesMetaData.failures()[i]);
+ if (cause instanceof ConnectTransportException) {
+ continue;
+ }
+ sb.append("\n -> ").append(nodesStoreFilesMetaData.failures()[i].getDetailedMessage());
+ }
+ logger.trace(sb.toString());
+ }
+ }
+
+ for (TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData nodeStoreFilesMetaData : nodesStoreFilesMetaData) {
+ if (nodeStoreFilesMetaData.storeFilesMetaData() != null) {
+ shardStores.put(nodeStoreFilesMetaData.getNode(), nodeStoreFilesMetaData.storeFilesMetaData());
+ }
+ }
+ }
+
+ return shardStores;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGateway.java b/src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGateway.java
new file mode 100644
index 0000000..904c92c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGateway.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.blobstore;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.blobstore.*;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.gateway.GatewayException;
+import org.elasticsearch.gateway.shared.SharedStorageGateway;
+import org.elasticsearch.index.gateway.CommitPoint;
+import org.elasticsearch.index.gateway.CommitPoints;
+import org.elasticsearch.index.gateway.blobstore.BlobStoreIndexGateway;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public abstract class BlobStoreGateway extends SharedStorageGateway {
+
+ private BlobStore blobStore;
+
+ private ByteSizeValue chunkSize;
+
+ private BlobPath basePath;
+
+ private ImmutableBlobContainer metaDataBlobContainer;
+
+ private boolean compress;
+
+ private volatile int currentIndex;
+
+ protected BlobStoreGateway(Settings settings, ThreadPool threadPool, ClusterService clusterService) {
+ super(settings, threadPool, clusterService);
+ }
+
+ protected void initialize(BlobStore blobStore, ClusterName clusterName, @Nullable ByteSizeValue defaultChunkSize) throws IOException {
+ this.blobStore = blobStore;
+ this.chunkSize = componentSettings.getAsBytesSize("chunk_size", defaultChunkSize);
+ this.basePath = BlobPath.cleanPath().add(clusterName.value());
+ this.metaDataBlobContainer = blobStore.immutableBlobContainer(basePath.add("metadata"));
+ this.currentIndex = findLatestIndex();
+ this.compress = componentSettings.getAsBoolean("compress", true);
+ logger.debug("Latest metadata found at index [" + currentIndex + "]");
+ }
+
+ @Override
+ public String toString() {
+ return type() + "://" + blobStore + "/" + basePath;
+ }
+
+ public BlobStore blobStore() {
+ return blobStore;
+ }
+
+ public BlobPath basePath() {
+ return basePath;
+ }
+
+ public ByteSizeValue chunkSize() {
+ return this.chunkSize;
+ }
+
+ @Override
+ public void reset() throws Exception {
+ blobStore.delete(BlobPath.cleanPath());
+ }
+
+ @Override
+ public MetaData read() throws GatewayException {
+ try {
+ this.currentIndex = findLatestIndex();
+ } catch (IOException e) {
+ throw new GatewayException("Failed to find latest metadata to read from", e);
+ }
+ if (currentIndex == -1)
+ return null;
+ String metaData = "metadata-" + currentIndex;
+
+ try {
+ return readMetaData(metaDataBlobContainer.readBlobFully(metaData));
+ } catch (GatewayException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new GatewayException("Failed to read metadata [" + metaData + "] from gateway", e);
+ }
+ }
+
+ public CommitPoint findCommitPoint(String index, int shardId) throws IOException {
+ BlobPath path = BlobStoreIndexGateway.shardPath(basePath, index, shardId);
+ ImmutableBlobContainer container = blobStore.immutableBlobContainer(path);
+ ImmutableMap<String, BlobMetaData> blobs = container.listBlobs();
+ List<CommitPoint> commitPointsList = Lists.newArrayList();
+ for (BlobMetaData md : blobs.values()) {
+ if (md.length() == 0) { // a commit point that was not flushed yet...
+ continue;
+ }
+ if (md.name().startsWith("commit-")) {
+ try {
+ commitPointsList.add(CommitPoints.fromXContent(container.readBlobFully(md.name())));
+ } catch (Exception e) {
+ logger.warn("failed to read commit point at path {} with name [{}]", e, path, md.name());
+ }
+ }
+ }
+ CommitPoints commitPoints = new CommitPoints(commitPointsList);
+ if (commitPoints.commits().isEmpty()) {
+ return null;
+ }
+ return commitPoints.commits().get(0);
+ }
+
+ @Override
+ protected void delete(IndexMetaData indexMetaData) throws ElasticsearchException {
+ BlobPath indexPath = basePath().add("indices").add(indexMetaData.index());
+ blobStore.delete(indexPath);
+ }
+
+ @Override
+ public void write(MetaData metaData) throws GatewayException {
+ final String newMetaData = "metadata-" + (currentIndex + 1);
+ try {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput stream = bStream;
+ if (compress) {
+ stream = CompressorFactory.defaultCompressor().streamOutput(stream);
+ }
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
+ builder.startObject();
+ MetaData.Builder.toXContent(metaData, builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ builder.close();
+ metaDataBlobContainer.writeBlob(newMetaData, bStream.bytes().streamInput(), bStream.bytes().length());
+ } catch (IOException e) {
+ throw new GatewayException("Failed to write metadata [" + newMetaData + "]", e);
+ }
+
+ currentIndex++;
+
+ try {
+ metaDataBlobContainer.deleteBlobsByFilter(new BlobContainer.BlobNameFilter() {
+ @Override
+ public boolean accept(String blobName) {
+ return blobName.startsWith("metadata-") && !newMetaData.equals(blobName);
+ }
+ });
+ } catch (IOException e) {
+ logger.debug("Failed to delete old metadata, will do it next time", e);
+ }
+ }
+
+ private int findLatestIndex() throws IOException {
+ ImmutableMap<String, BlobMetaData> blobs = metaDataBlobContainer.listBlobsByPrefix("metadata-");
+
+ int index = -1;
+ for (BlobMetaData md : blobs.values()) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("[findLatestMetadata]: Processing [" + md.name() + "]");
+ }
+ String name = md.name();
+ int fileIndex = Integer.parseInt(name.substring(name.indexOf('-') + 1));
+ if (fileIndex >= index) {
+ // try and read the meta data
+ byte[] data = null;
+ try {
+ data = metaDataBlobContainer.readBlobFully(name);
+ readMetaData(data);
+ index = fileIndex;
+ } catch (IOException e) {
+ logger.warn("[findLatestMetadata]: failed to read metadata from [{}], data_length [{}] ignoring...", e, name, data == null ? "na" : data.length);
+ }
+ }
+ }
+
+ return index;
+ }
+
+ private MetaData readMetaData(byte[] data) throws IOException {
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(data, 0, data.length);
+ return MetaData.Builder.fromXContent(parser);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGatewayModule.java b/src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGatewayModule.java
new file mode 100644
index 0000000..4844861
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGatewayModule.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.blobstore;
+
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.PreProcessModule;
+
+/**
+ *
+ */
+public abstract class BlobStoreGatewayModule extends AbstractModule implements PreProcessModule {
+
+ @Override
+ public void processModule(Module module) {
+ if (module instanceof ShardsAllocatorModule) {
+ ((ShardsAllocatorModule) module).setGatewayAllocator(BlobReuseExistingGatewayAllocator.class);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/fs/FsGateway.java b/src/main/java/org/elasticsearch/gateway/fs/FsGateway.java
new file mode 100644
index 0000000..1002da5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/fs/FsGateway.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.fs;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.blobstore.fs.FsBlobStore;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.gateway.blobstore.BlobStoreGateway;
+import org.elasticsearch.index.gateway.fs.FsIndexGatewayModule;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class FsGateway extends BlobStoreGateway {
+
+ private final ExecutorService concurrentStreamPool;
+
+ @Inject
+ public FsGateway(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ Environment environment, ClusterName clusterName) throws IOException {
+ super(settings, threadPool, clusterService);
+
+ File gatewayFile;
+ String location = componentSettings.get("location");
+ if (location == null) {
+ logger.warn("using local fs location for gateway, should be changed to be a shared location across nodes");
+ gatewayFile = new File(environment.dataFiles()[0], "gateway");
+ } else {
+ gatewayFile = new File(location);
+ }
+
+ int concurrentStreams = componentSettings.getAsInt("concurrent_streams", 5);
+ this.concurrentStreamPool = EsExecutors.newScaling(1, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[fs_stream]"));
+
+ initialize(new FsBlobStore(componentSettings, concurrentStreamPool, gatewayFile), clusterName, null);
+ }
+
+ @Override
+ public String type() {
+ return "fs";
+ }
+
+ @Override
+ public Class<? extends Module> suggestIndexGateway() {
+ return FsIndexGatewayModule.class;
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ super.doClose();
+ concurrentStreamPool.shutdown();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/fs/FsGatewayModule.java b/src/main/java/org/elasticsearch/gateway/fs/FsGatewayModule.java
new file mode 100644
index 0000000..c8dd4b8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/fs/FsGatewayModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.fs;
+
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.gateway.blobstore.BlobStoreGatewayModule;
+
+/**
+ *
+ */
+public class FsGatewayModule extends BlobStoreGatewayModule {
+
+ @Override
+ protected void configure() {
+ bind(Gateway.class).to(FsGateway.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/local/LocalGateway.java b/src/main/java/org/elasticsearch/gateway/local/LocalGateway.java
new file mode 100644
index 0000000..120d423
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/local/LocalGateway.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local;
+
+import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.FailedNodeException;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.gateway.GatewayException;
+import org.elasticsearch.gateway.local.state.meta.LocalGatewayMetaState;
+import org.elasticsearch.gateway.local.state.meta.TransportNodesListGatewayMetaState;
+import org.elasticsearch.gateway.local.state.shards.LocalGatewayShardsState;
+import org.elasticsearch.index.gateway.local.LocalIndexGatewayModule;
+
+/**
+ *
+ */
+public class LocalGateway extends AbstractLifecycleComponent<Gateway> implements Gateway, ClusterStateListener {
+
+ private final ClusterService clusterService;
+
+ private final NodeEnvironment nodeEnv;
+
+ private final LocalGatewayShardsState shardsState;
+ private final LocalGatewayMetaState metaState;
+
+ private final TransportNodesListGatewayMetaState listGatewayMetaState;
+
+ private final String initialMeta;
+
+ @Inject
+ public LocalGateway(Settings settings, ClusterService clusterService, NodeEnvironment nodeEnv,
+ LocalGatewayShardsState shardsState, LocalGatewayMetaState metaState,
+ TransportNodesListGatewayMetaState listGatewayMetaState) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.nodeEnv = nodeEnv;
+ this.metaState = metaState;
+ this.listGatewayMetaState = listGatewayMetaState;
+
+ this.shardsState = shardsState;
+
+ clusterService.addLast(this);
+
+ // we define what is our minimum "master" nodes, use that to allow for recovery
+ this.initialMeta = componentSettings.get("initial_meta", settings.get("discovery.zen.minimum_master_nodes", "1"));
+ }
+
+ @Override
+ public String type() {
+ return "local";
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ clusterService.remove(this);
+ }
+
+ @Override
+ public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
+ ObjectOpenHashSet<String> nodesIds = ObjectOpenHashSet.from(clusterService.state().nodes().masterNodes().keys());
+ logger.trace("performing state recovery from {}", nodesIds);
+ TransportNodesListGatewayMetaState.NodesLocalGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds.toArray(String.class), null).actionGet();
+
+
+ int requiredAllocation = 1;
+ try {
+ if ("quorum".equals(initialMeta)) {
+ if (nodesIds.size() > 2) {
+ requiredAllocation = (nodesIds.size() / 2) + 1;
+ }
+ } else if ("quorum-1".equals(initialMeta) || "half".equals(initialMeta)) {
+ if (nodesIds.size() > 2) {
+ requiredAllocation = ((1 + nodesIds.size()) / 2);
+ }
+ } else if ("one".equals(initialMeta)) {
+ requiredAllocation = 1;
+ } else if ("full".equals(initialMeta) || "all".equals(initialMeta)) {
+ requiredAllocation = nodesIds.size();
+ } else if ("full-1".equals(initialMeta) || "all-1".equals(initialMeta)) {
+ if (nodesIds.size() > 1) {
+ requiredAllocation = nodesIds.size() - 1;
+ }
+ } else {
+ requiredAllocation = Integer.parseInt(initialMeta);
+ }
+ } catch (Exception e) {
+ logger.warn("failed to derived initial_meta from value {}", initialMeta);
+ }
+
+ if (nodesState.failures().length > 0) {
+ for (FailedNodeException failedNodeException : nodesState.failures()) {
+ logger.warn("failed to fetch state from node", failedNodeException);
+ }
+ }
+
+ ObjectFloatOpenHashMap<String> indices = new ObjectFloatOpenHashMap<String>();
+ MetaData electedGlobalState = null;
+ int found = 0;
+ for (TransportNodesListGatewayMetaState.NodeLocalGatewayMetaState nodeState : nodesState) {
+ if (nodeState.metaData() == null) {
+ continue;
+ }
+ found++;
+ if (electedGlobalState == null) {
+ electedGlobalState = nodeState.metaData();
+ } else if (nodeState.metaData().version() > electedGlobalState.version()) {
+ electedGlobalState = nodeState.metaData();
+ }
+ for (ObjectCursor<IndexMetaData> cursor : nodeState.metaData().indices().values()) {
+ indices.addTo(cursor.value.index(), 1);
+ }
+ }
+ if (found < requiredAllocation) {
+ listener.onFailure("found [" + found + "] metadata states, required [" + requiredAllocation + "]");
+ return;
+ }
+ // update the global state, and clean the indices, we elect them in the next phase
+ MetaData.Builder metaDataBuilder = MetaData.builder(electedGlobalState).removeAllIndices();
+ final boolean[] states = indices.allocated;
+ final Object[] keys = indices.keys;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ String index = (String) keys[i];
+ IndexMetaData electedIndexMetaData = null;
+ int indexMetaDataCount = 0;
+ for (TransportNodesListGatewayMetaState.NodeLocalGatewayMetaState nodeState : nodesState) {
+ if (nodeState.metaData() == null) {
+ continue;
+ }
+ IndexMetaData indexMetaData = nodeState.metaData().index(index);
+ if (indexMetaData == null) {
+ continue;
+ }
+ if (electedIndexMetaData == null) {
+ electedIndexMetaData = indexMetaData;
+ } else if (indexMetaData.version() > electedIndexMetaData.version()) {
+ electedIndexMetaData = indexMetaData;
+ }
+ indexMetaDataCount++;
+ }
+ if (electedIndexMetaData != null) {
+ if (indexMetaDataCount < requiredAllocation) {
+ logger.debug("[{}] found [{}], required [{}], not adding", index, indexMetaDataCount, requiredAllocation);
+ }
+ metaDataBuilder.put(electedIndexMetaData, false);
+ }
+ }
+ }
+ ClusterState.Builder builder = ClusterState.builder();
+ builder.metaData(metaDataBuilder);
+ listener.onSuccess(builder.build());
+ }
+
+ @Override
+ public Class<? extends Module> suggestIndexGateway() {
+ return LocalIndexGatewayModule.class;
+ }
+
+ @Override
+ public void reset() throws Exception {
+ FileSystemUtils.deleteRecursively(nodeEnv.nodeDataLocations());
+ }
+
+ @Override
+ public void clusterChanged(final ClusterChangedEvent event) {
+ // order is important, first metaState, and then shardsState
+ // so dangling indices will be recorded
+ metaState.clusterChanged(event);
+ shardsState.clusterChanged(event);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/local/LocalGatewayAllocator.java b/src/main/java/org/elasticsearch/gateway/local/LocalGatewayAllocator.java
new file mode 100644
index 0000000..4bc89d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/local/LocalGatewayAllocator.java
@@ -0,0 +1,465 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local;
+
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.predicates.ObjectPredicate;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.allocator.GatewayAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.gateway.local.state.shards.TransportNodesListGatewayStartedShards;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
+import org.elasticsearch.transport.ConnectTransportException;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ *
+ */
+public class LocalGatewayAllocator extends AbstractComponent implements GatewayAllocator {
+
+ public static final String INDEX_RECOVERY_INITIAL_SHARDS = "index.recovery.initial_shards";
+
+ private final TransportNodesListGatewayStartedShards listGatewayStartedShards;
+
+ private final TransportNodesListShardStoreMetaData listShardStoreMetaData;
+
+ private final ConcurrentMap<ShardId, Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData>> cachedStores = ConcurrentCollections.newConcurrentMap();
+
+ private final ConcurrentMap<ShardId, ObjectLongOpenHashMap<DiscoveryNode>> cachedShardsState = ConcurrentCollections.newConcurrentMap();
+
+ private final TimeValue listTimeout;
+
+ private final String initialShards;
+
+ @Inject
+ public LocalGatewayAllocator(Settings settings,
+ TransportNodesListGatewayStartedShards listGatewayStartedShards, TransportNodesListShardStoreMetaData listShardStoreMetaData) {
+ super(settings);
+ this.listGatewayStartedShards = listGatewayStartedShards;
+ this.listShardStoreMetaData = listShardStoreMetaData;
+
+ this.listTimeout = componentSettings.getAsTime("list_timeout", TimeValue.timeValueSeconds(30));
+ this.initialShards = componentSettings.get("initial_shards", "quorum");
+
+ logger.debug("using initial_shards [{}], list_timeout [{}]", initialShards, listTimeout);
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+ for (ShardRouting shardRouting : allocation.startedShards()) {
+ cachedStores.remove(shardRouting.shardId());
+ cachedShardsState.remove(shardRouting.shardId());
+ }
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ for (ShardRouting failedShard : allocation.failedShards()) {
+ cachedStores.remove(failedShard.shardId());
+ cachedShardsState.remove(failedShard.shardId());
+ }
+ }
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ boolean changed = false;
+ DiscoveryNodes nodes = allocation.nodes();
+ RoutingNodes routingNodes = allocation.routingNodes();
+
+ // First, handle primaries, they must find a place to be allocated on here
+ Iterator<MutableShardRouting> unassignedIterator = routingNodes.unassigned().iterator();
+ while (unassignedIterator.hasNext()) {
+ MutableShardRouting shard = unassignedIterator.next();
+
+ if (!shard.primary()) {
+ continue;
+ }
+
+ // this is an API allocation, ignore since we know there is no data...
+ if (!routingNodes.routingTable().index(shard.index()).shard(shard.id()).primaryAllocatedPostApi()) {
+ continue;
+ }
+
+ ObjectLongOpenHashMap<DiscoveryNode> nodesState = buildShardStates(nodes, shard);
+
+ int numberOfAllocationsFound = 0;
+ long highestVersion = -1;
+ Set<DiscoveryNode> nodesWithHighestVersion = Sets.newHashSet();
+ final boolean[] states = nodesState.allocated;
+ final Object[] keys = nodesState.keys;
+ final long[] values = nodesState.values;
+ for (int i = 0; i < states.length; i++) {
+ if (!states[i]) {
+ continue;
+ }
+
+ DiscoveryNode node = (DiscoveryNode) keys[i];
+ long version = values[i];
+ // since we don't check in NO allocation, we need to double check here
+ if (allocation.shouldIgnoreShardForNode(shard.shardId(), node.id())) {
+ continue;
+ }
+ if (version != -1) {
+ numberOfAllocationsFound++;
+ if (highestVersion == -1) {
+ nodesWithHighestVersion.add(node);
+ highestVersion = version;
+ } else {
+ if (version > highestVersion) {
+ nodesWithHighestVersion.clear();
+ nodesWithHighestVersion.add(node);
+ highestVersion = version;
+ } else if (version == highestVersion) {
+ nodesWithHighestVersion.add(node);
+ }
+ }
+ }
+ }
+
+ // check if the counts meets the minimum set
+ int requiredAllocation = 1;
+ try {
+ IndexMetaData indexMetaData = routingNodes.metaData().index(shard.index());
+ String initialShards = indexMetaData.settings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
+ if ("quorum".equals(initialShards)) {
+ if (indexMetaData.numberOfReplicas() > 1) {
+ requiredAllocation = ((1 + indexMetaData.numberOfReplicas()) / 2) + 1;
+ }
+ } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) {
+ if (indexMetaData.numberOfReplicas() > 2) {
+ requiredAllocation = ((1 + indexMetaData.numberOfReplicas()) / 2);
+ }
+ } else if ("one".equals(initialShards)) {
+ requiredAllocation = 1;
+ } else if ("full".equals(initialShards) || "all".equals(initialShards)) {
+ requiredAllocation = indexMetaData.numberOfReplicas() + 1;
+ } else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) {
+ if (indexMetaData.numberOfReplicas() > 1) {
+ requiredAllocation = indexMetaData.numberOfReplicas();
+ }
+ } else {
+ requiredAllocation = Integer.parseInt(initialShards);
+ }
+ } catch (Exception e) {
+ logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard);
+ }
+
+ // not enough found for this shard, continue...
+ if (numberOfAllocationsFound < requiredAllocation) {
+ // we can't really allocate, so ignore it and continue
+ unassignedIterator.remove();
+ routingNodes.ignoredUnassigned().add(shard);
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}], required_number [{}]", shard.index(), shard.id(), numberOfAllocationsFound, requiredAllocation);
+ }
+ continue;
+ }
+
+ Set<DiscoveryNode> throttledNodes = Sets.newHashSet();
+ Set<DiscoveryNode> noNodes = Sets.newHashSet();
+ for (DiscoveryNode discoNode : nodesWithHighestVersion) {
+ RoutingNode node = routingNodes.node(discoNode.id());
+ Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
+ if (decision.type() == Decision.Type.THROTTLE) {
+ throttledNodes.add(discoNode);
+ } else if (decision.type() == Decision.Type.NO) {
+ noNodes.add(discoNode);
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, discoNode);
+ }
+ // we found a match
+ changed = true;
+ // make sure we create one with the version from the recovered state
+ allocation.routingNodes().assign(new MutableShardRouting(shard, highestVersion), node.nodeId());
+ unassignedIterator.remove();
+
+ // found a node, so no throttling, no "no", and break out of the loop
+ throttledNodes.clear();
+ noNodes.clear();
+ break;
+ }
+ }
+ if (throttledNodes.isEmpty()) {
+ // if we have a node that we "can't" allocate to, force allocation, since this is our master data!
+ if (!noNodes.isEmpty()) {
+ DiscoveryNode discoNode = noNodes.iterator().next();
+ RoutingNode node = routingNodes.node(discoNode.id());
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, discoNode);
+ }
+ // we found a match
+ changed = true;
+ // make sure we create one with the version from the recovered state
+ allocation.routingNodes().assign(new MutableShardRouting(shard, highestVersion), node.nodeId());
+ unassignedIterator.remove();
+ }
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, throttledNodes);
+ }
+ // we are throttling this, but we have enough to allocate to this node, ignore it for now
+ unassignedIterator.remove();
+ routingNodes.ignoredUnassigned().add(shard);
+ }
+ }
+
+ if (!routingNodes.hasUnassigned()) {
+ return changed;
+ }
+
+ // Now, handle replicas, try to assign them to nodes that are similar to the one the primary was allocated on
+ unassignedIterator = routingNodes.unassigned().iterator();
+ while (unassignedIterator.hasNext()) {
+ MutableShardRouting shard = unassignedIterator.next();
+
+ // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
+ boolean canBeAllocatedToAtLeastOneNode = false;
+ for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) {
+ RoutingNode node = routingNodes.node(cursor.value.id());
+ if (node == null) {
+ continue;
+ }
+ // if we can't allocate it on a node, ignore it, for example, this handles
+ // cases for only allocating a replica after a primary
+ Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
+ if (decision.type() == Decision.Type.YES) {
+ canBeAllocatedToAtLeastOneNode = true;
+ break;
+ }
+ }
+
+ if (!canBeAllocatedToAtLeastOneNode) {
+ continue;
+ }
+
+ Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores = buildShardStores(nodes, shard);
+
+ long lastSizeMatched = 0;
+ DiscoveryNode lastDiscoNodeMatched = null;
+ RoutingNode lastNodeMatched = null;
+
+ for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> nodeStoreEntry : shardStores.entrySet()) {
+ DiscoveryNode discoNode = nodeStoreEntry.getKey();
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue();
+ logger.trace("{}: checking node [{}]", shard, discoNode);
+
+ if (storeFilesMetaData == null) {
+ // already allocated on that node...
+ continue;
+ }
+
+ RoutingNode node = routingNodes.node(discoNode.id());
+ if (node == null) {
+ continue;
+ }
+
+ // check if we can allocate on that node...
+ // we only check for NO, since if this node is THROTTLING and it has enough "same data"
+ // then we will try and assign it next time
+ Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
+ if (decision.type() == Decision.Type.NO) {
+ continue;
+ }
+
+ // if it is already allocated, we can't assign to it...
+ if (storeFilesMetaData.allocated()) {
+ continue;
+ }
+
+ if (!shard.primary()) {
+ MutableShardRouting primaryShard = routingNodes.activePrimary(shard);
+ if (primaryShard != null) {
+ assert primaryShard.active();
+ DiscoveryNode primaryNode = nodes.get(primaryShard.currentNodeId());
+ if (primaryNode != null) {
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryNodeStore = shardStores.get(primaryNode);
+ if (primaryNodeStore != null && primaryNodeStore.allocated()) {
+ long sizeMatched = 0;
+
+ for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
+ if (primaryNodeStore.fileExists(storeFileMetaData.name()) && primaryNodeStore.file(storeFileMetaData.name()).isSame(storeFileMetaData)) {
+ sizeMatched += storeFileMetaData.length();
+ }
+ }
+ if (sizeMatched > lastSizeMatched) {
+ lastSizeMatched = sizeMatched;
+ lastDiscoNodeMatched = discoNode;
+ lastNodeMatched = node;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (lastNodeMatched != null) {
+ // we only check on THROTTLE since we checked before before on NO
+ Decision decision = allocation.deciders().canAllocate(shard, lastNodeMatched, allocation);
+ if (decision.type() == Decision.Type.THROTTLE) {
+ if (logger.isTraceEnabled()) {
+ logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store with total_size [{}]", shard.index(), shard.id(), shard, lastDiscoNodeMatched, new ByteSizeValue(lastSizeMatched));
+ }
+ // we are throttling this, but we have enough to allocate to this node, ignore it for now
+ unassignedIterator.remove();
+ routingNodes.ignoredUnassigned().add(shard);
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store with total_size [{}]", shard.index(), shard.id(), shard, lastDiscoNodeMatched, new ByteSizeValue(lastSizeMatched));
+ }
+ // we found a match
+ changed = true;
+ allocation.routingNodes().assign(shard, lastNodeMatched.nodeId());
+ unassignedIterator.remove();
+ }
+ }
+ }
+ return changed;
+ }
+
+ private ObjectLongOpenHashMap<DiscoveryNode> buildShardStates(final DiscoveryNodes nodes, MutableShardRouting shard) {
+ ObjectLongOpenHashMap<DiscoveryNode> shardStates = cachedShardsState.get(shard.shardId());
+ ObjectOpenHashSet<String> nodeIds;
+ if (shardStates == null) {
+ shardStates = new ObjectLongOpenHashMap<DiscoveryNode>();
+ cachedShardsState.put(shard.shardId(), shardStates);
+ nodeIds = ObjectOpenHashSet.from(nodes.dataNodes().keys());
+ } else {
+ // clean nodes that have failed
+ shardStates.keys().removeAll(new ObjectPredicate<DiscoveryNode>() {
+ @Override
+ public boolean apply(DiscoveryNode node) {
+ return !nodes.nodeExists(node.id());
+ }
+ });
+ nodeIds = ObjectOpenHashSet.newInstance();
+ // we have stored cached from before, see if the nodes changed, if they have, go fetch again
+ for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) {
+ DiscoveryNode node = cursor.value;
+ if (!shardStates.containsKey(node)) {
+ nodeIds.add(node.id());
+ }
+ }
+ }
+ if (nodeIds.isEmpty()) {
+ return shardStates;
+ }
+
+ String[] nodesIdsArray = nodeIds.toArray(String.class);
+ TransportNodesListGatewayStartedShards.NodesLocalGatewayStartedShards response = listGatewayStartedShards.list(shard.shardId(), nodesIdsArray, listTimeout).actionGet();
+ if (logger.isDebugEnabled()) {
+ if (response.failures().length > 0) {
+ StringBuilder sb = new StringBuilder(shard + ": failures when trying to list shards on nodes:");
+ for (int i = 0; i < response.failures().length; i++) {
+ Throwable cause = ExceptionsHelper.unwrapCause(response.failures()[i]);
+ if (cause instanceof ConnectTransportException) {
+ continue;
+ }
+ sb.append("\n -> ").append(response.failures()[i].getDetailedMessage());
+ }
+ logger.debug(sb.toString());
+ }
+ }
+
+ for (TransportNodesListGatewayStartedShards.NodeLocalGatewayStartedShards nodeShardState : response) {
+ // -1 version means it does not exists, which is what the API returns, and what we expect to
+ shardStates.put(nodeShardState.getNode(), nodeShardState.version());
+ }
+ return shardStates;
+ }
+
+ private Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> buildShardStores(DiscoveryNodes nodes, MutableShardRouting shard) {
+ Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores = cachedStores.get(shard.shardId());
+ ObjectOpenHashSet<String> nodesIds;
+ if (shardStores == null) {
+ shardStores = Maps.newHashMap();
+ cachedStores.put(shard.shardId(), shardStores);
+ nodesIds = ObjectOpenHashSet.from(nodes.dataNodes().keys());
+ } else {
+ nodesIds = ObjectOpenHashSet.newInstance();
+ // clean nodes that have failed
+ for (Iterator<DiscoveryNode> it = shardStores.keySet().iterator(); it.hasNext(); ) {
+ DiscoveryNode node = it.next();
+ if (!nodes.nodeExists(node.id())) {
+ it.remove();
+ }
+ }
+
+ for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) {
+ DiscoveryNode node = cursor.value;
+ if (!shardStores.containsKey(node)) {
+ nodesIds.add(node.id());
+ }
+ }
+ }
+
+ if (!nodesIds.isEmpty()) {
+ String[] nodesIdsArray = nodesIds.toArray(String.class);
+ TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData nodesStoreFilesMetaData = listShardStoreMetaData.list(shard.shardId(), false, nodesIdsArray, listTimeout).actionGet();
+ if (logger.isTraceEnabled()) {
+ if (nodesStoreFilesMetaData.failures().length > 0) {
+ StringBuilder sb = new StringBuilder(shard + ": failures when trying to list stores on nodes:");
+ for (int i = 0; i < nodesStoreFilesMetaData.failures().length; i++) {
+ Throwable cause = ExceptionsHelper.unwrapCause(nodesStoreFilesMetaData.failures()[i]);
+ if (cause instanceof ConnectTransportException) {
+ continue;
+ }
+ sb.append("\n -> ").append(nodesStoreFilesMetaData.failures()[i].getDetailedMessage());
+ }
+ logger.trace(sb.toString());
+ }
+ }
+
+ for (TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData nodeStoreFilesMetaData : nodesStoreFilesMetaData) {
+ if (nodeStoreFilesMetaData.storeFilesMetaData() != null) {
+ shardStores.put(nodeStoreFilesMetaData.getNode(), nodeStoreFilesMetaData.storeFilesMetaData());
+ }
+ }
+ }
+
+ return shardStores;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/local/LocalGatewayModule.java b/src/main/java/org/elasticsearch/gateway/local/LocalGatewayModule.java
new file mode 100644
index 0000000..9f25347
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/local/LocalGatewayModule.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local;
+
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.PreProcessModule;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.gateway.local.state.meta.LocalAllocateDangledIndices;
+import org.elasticsearch.gateway.local.state.meta.LocalGatewayMetaState;
+import org.elasticsearch.gateway.local.state.meta.TransportNodesListGatewayMetaState;
+import org.elasticsearch.gateway.local.state.shards.LocalGatewayShardsState;
+import org.elasticsearch.gateway.local.state.shards.TransportNodesListGatewayStartedShards;
+
+/**
+ *
+ */
+public class LocalGatewayModule extends AbstractModule implements PreProcessModule {
+
+ @Override
+ protected void configure() {
+ bind(Gateway.class).to(LocalGateway.class).asEagerSingleton();
+ bind(LocalGatewayShardsState.class).asEagerSingleton();
+ bind(TransportNodesListGatewayMetaState.class).asEagerSingleton();
+ bind(LocalGatewayMetaState.class).asEagerSingleton();
+ bind(TransportNodesListGatewayStartedShards.class).asEagerSingleton();
+ bind(LocalAllocateDangledIndices.class).asEagerSingleton();
+ }
+
+ @Override
+ public void processModule(Module module) {
+ if (module instanceof ShardsAllocatorModule) {
+ ((ShardsAllocatorModule) module).setGatewayAllocator(LocalGatewayAllocator.class);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalAllocateDangledIndices.java b/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalAllocateDangledIndices.java
new file mode 100644
index 0000000..157a1a6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalAllocateDangledIndices.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local.state.meta;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.MasterNotDiscoveredException;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ */
+public class LocalAllocateDangledIndices extends AbstractComponent {
+
+ private final TransportService transportService;
+
+ private final ClusterService clusterService;
+
+ private final AllocationService allocationService;
+
+ @Inject
+ public LocalAllocateDangledIndices(Settings settings, TransportService transportService, ClusterService clusterService, AllocationService allocationService) {
+ super(settings);
+ this.transportService = transportService;
+ this.clusterService = clusterService;
+ this.allocationService = allocationService;
+ transportService.registerHandler(AllocateDangledRequestHandler.ACTION, new AllocateDangledRequestHandler());
+ }
+
+ public void allocateDangled(IndexMetaData[] indices, final Listener listener) {
+ ClusterState clusterState = clusterService.state();
+ DiscoveryNode masterNode = clusterState.nodes().masterNode();
+ if (masterNode == null) {
+ listener.onFailure(new MasterNotDiscoveredException("no master to send allocate dangled request"));
+ return;
+ }
+ AllocateDangledRequest request = new AllocateDangledRequest(clusterService.localNode(), indices);
+ transportService.sendRequest(masterNode, AllocateDangledRequestHandler.ACTION, request, new TransportResponseHandler<AllocateDangledResponse>() {
+ @Override
+ public AllocateDangledResponse newInstance() {
+ return new AllocateDangledResponse();
+ }
+
+ @Override
+ public void handleResponse(AllocateDangledResponse response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+
+ public static interface Listener {
+ void onResponse(AllocateDangledResponse response);
+
+ void onFailure(Throwable e);
+ }
+
+ class AllocateDangledRequestHandler extends BaseTransportRequestHandler<AllocateDangledRequest> {
+
+ public static final String ACTION = "/gateway/local/allocate_dangled";
+
+ @Override
+ public AllocateDangledRequest newInstance() {
+ return new AllocateDangledRequest();
+ }
+
+ @Override
+ public void messageReceived(final AllocateDangledRequest request, final TransportChannel channel) throws Exception {
+ String[] indexNames = new String[request.indices.length];
+ for (int i = 0; i < request.indices.length; i++) {
+ indexNames[i] = request.indices[i].index();
+ }
+ clusterService.submitStateUpdateTask("allocation dangled indices " + Arrays.toString(indexNames), new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ if (currentState.blocks().disableStatePersistence()) {
+ return currentState;
+ }
+ MetaData.Builder metaData = MetaData.builder(currentState.metaData());
+ ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
+
+ boolean importNeeded = false;
+ StringBuilder sb = new StringBuilder();
+ for (IndexMetaData indexMetaData : request.indices) {
+ if (currentState.metaData().hasIndex(indexMetaData.index())) {
+ continue;
+ }
+ importNeeded = true;
+ metaData.put(indexMetaData, false);
+ blocks.addBlocks(indexMetaData);
+ routingTableBuilder.addAsRecovery(indexMetaData);
+ sb.append("[").append(indexMetaData.index()).append("/").append(indexMetaData.state()).append("]");
+ }
+ if (!importNeeded) {
+ return currentState;
+ }
+ logger.info("auto importing dangled indices {} from [{}]", sb.toString(), request.fromNode);
+
+ ClusterState updatedState = ClusterState.builder(currentState).metaData(metaData).blocks(blocks).routingTable(routingTableBuilder).build();
+
+ // now, reroute
+ RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder).build());
+
+ return ClusterState.builder(updatedState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure during [{}]", t, source);
+ try {
+ channel.sendResponse(t);
+ } catch (Exception e) {
+ logger.error("failed send response for allocating dangled", e);
+ }
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ try {
+ channel.sendResponse(new AllocateDangledResponse(true));
+ } catch (IOException e) {
+ logger.error("failed send response for allocating dangled", e);
+ }
+ }
+ });
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ static class AllocateDangledRequest extends TransportRequest {
+
+ DiscoveryNode fromNode;
+ IndexMetaData[] indices;
+
+ AllocateDangledRequest() {
+ }
+
+ AllocateDangledRequest(DiscoveryNode fromNode, IndexMetaData[] indices) {
+ this.fromNode = fromNode;
+ this.indices = indices;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ fromNode = DiscoveryNode.readNode(in);
+ indices = new IndexMetaData[in.readVInt()];
+ for (int i = 0; i < indices.length; i++) {
+ indices[i] = IndexMetaData.Builder.readFrom(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ fromNode.writeTo(out);
+ out.writeVInt(indices.length);
+ for (IndexMetaData indexMetaData : indices) {
+ IndexMetaData.Builder.writeTo(indexMetaData, out);
+ }
+ }
+ }
+
+ public static class AllocateDangledResponse extends TransportResponse {
+
+ private boolean ack;
+
+ AllocateDangledResponse() {
+ }
+
+ AllocateDangledResponse(boolean ack) {
+ this.ack = ack;
+ }
+
+ public boolean ack() {
+ return ack;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ ack = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBoolean(ack);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalGatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalGatewayMetaState.java
new file mode 100644
index 0000000..c3f42e5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalGatewayMetaState.java
@@ -0,0 +1,695 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local.state.meta;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ScheduledFuture;
+
+/**
+ *
+ */
+public class LocalGatewayMetaState extends AbstractComponent implements ClusterStateListener {
+
+ static enum AutoImportDangledState {
+ NO() {
+ @Override
+ public boolean shouldImport() {
+ return false;
+ }
+ },
+ YES() {
+ @Override
+ public boolean shouldImport() {
+ return true;
+ }
+ },
+ CLOSED() {
+ @Override
+ public boolean shouldImport() {
+ return true;
+ }
+ };
+
+ public abstract boolean shouldImport();
+
+ public static AutoImportDangledState fromString(String value) {
+ if ("no".equalsIgnoreCase(value)) {
+ return NO;
+ } else if ("yes".equalsIgnoreCase(value)) {
+ return YES;
+ } else if ("closed".equalsIgnoreCase(value)) {
+ return CLOSED;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("failed to parse [" + value + "], not a valid auto dangling import type");
+ }
+ }
+ }
+
+ private final NodeEnvironment nodeEnv;
+ private final ThreadPool threadPool;
+
+ private final LocalAllocateDangledIndices allocateDangledIndices;
+ private final NodeIndexDeletedAction nodeIndexDeletedAction;
+
+ @Nullable
+ private volatile MetaData currentMetaData;
+
+ private final XContentType format;
+ private final ToXContent.Params formatParams;
+ private final ToXContent.Params globalOnlyFormatParams;
+
+
+ private final AutoImportDangledState autoImportDangled;
+ private final TimeValue danglingTimeout;
+ private final Map<String, DanglingIndex> danglingIndices = ConcurrentCollections.newConcurrentMap();
+ private final Object danglingMutex = new Object();
+
+ @Inject
+ public LocalGatewayMetaState(Settings settings, ThreadPool threadPool, NodeEnvironment nodeEnv,
+ TransportNodesListGatewayMetaState nodesListGatewayMetaState, LocalAllocateDangledIndices allocateDangledIndices,
+ NodeIndexDeletedAction nodeIndexDeletedAction) throws Exception {
+ super(settings);
+ this.nodeEnv = nodeEnv;
+ this.threadPool = threadPool;
+ this.format = XContentType.fromRestContentType(settings.get("format", "smile"));
+ this.allocateDangledIndices = allocateDangledIndices;
+ this.nodeIndexDeletedAction = nodeIndexDeletedAction;
+ nodesListGatewayMetaState.init(this);
+
+ if (this.format == XContentType.SMILE) {
+ Map<String, String> params = Maps.newHashMap();
+ params.put("binary", "true");
+ formatParams = new ToXContent.MapParams(params);
+ Map<String, String> globalOnlyParams = Maps.newHashMap();
+ globalOnlyParams.put("binary", "true");
+ globalOnlyParams.put(MetaData.GLOBAL_PERSISTENT_ONLY_PARAM, "true");
+ globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams);
+ } else {
+ formatParams = ToXContent.EMPTY_PARAMS;
+ Map<String, String> globalOnlyParams = Maps.newHashMap();
+ globalOnlyParams.put(MetaData.GLOBAL_PERSISTENT_ONLY_PARAM, "true");
+ globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams);
+ }
+
+ this.autoImportDangled = AutoImportDangledState.fromString(settings.get("gateway.local.auto_import_dangled", AutoImportDangledState.YES.toString()));
+ this.danglingTimeout = settings.getAsTime("gateway.local.dangling_timeout", TimeValue.timeValueHours(2));
+
+ logger.debug("using gateway.local.auto_import_dangled [{}], with gateway.local.dangling_timeout [{}]", this.autoImportDangled, this.danglingTimeout);
+
+ if (DiscoveryNode.masterNode(settings)) {
+ try {
+ pre019Upgrade();
+ long start = System.currentTimeMillis();
+ loadState();
+ logger.debug("took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start));
+ } catch (Exception e) {
+ logger.error("failed to read local state, exiting...", e);
+ throw e;
+ }
+ }
+ }
+
+ public MetaData loadMetaState() throws Exception {
+ return loadState();
+ }
+
+ public boolean isDangling(String index) {
+ return danglingIndices.containsKey(index);
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (event.state().blocks().disableStatePersistence()) {
+ // reset the current metadata, we need to start fresh...
+ this.currentMetaData = null;
+ return;
+ }
+
+ MetaData newMetaData = event.state().metaData();
+ // we don't check if metaData changed, since we might be called several times and we need to check dangling...
+
+ boolean success = true;
+ // only applied to master node, writing the global and index level states
+ if (event.state().nodes().localNode().masterNode()) {
+ // check if the global state changed?
+ if (currentMetaData == null || !MetaData.isGlobalStateEquals(currentMetaData, newMetaData)) {
+ try {
+ writeGlobalState("changed", newMetaData, currentMetaData);
+ } catch (Throwable e) {
+ success = false;
+ }
+ }
+
+ // check and write changes in indices
+ for (IndexMetaData indexMetaData : newMetaData) {
+ String writeReason = null;
+ IndexMetaData currentIndexMetaData;
+ if (currentMetaData == null) {
+ // a new event..., check from the state stored
+ currentIndexMetaData = loadIndex(indexMetaData.index());
+ } else {
+ currentIndexMetaData = currentMetaData.index(indexMetaData.index());
+ }
+ if (currentIndexMetaData == null) {
+ writeReason = "freshly created";
+ } else if (currentIndexMetaData.version() != indexMetaData.version()) {
+ writeReason = "version changed from [" + currentIndexMetaData.version() + "] to [" + indexMetaData.version() + "]";
+ }
+
+ // we update the writeReason only if we really need to write it
+ if (writeReason == null) {
+ continue;
+ }
+
+ try {
+ writeIndex(writeReason, indexMetaData, currentIndexMetaData);
+ } catch (Throwable e) {
+ success = false;
+ }
+ }
+ }
+
+ // delete indices that were there before, but are deleted now
+ // we need to do it so they won't be detected as dangling
+ if (currentMetaData != null) {
+ // only delete indices when we already received a state (currentMetaData != null)
+ // and we had a go at processing dangling indices at least once
+ // this will also delete the _state of the index itself
+ for (IndexMetaData current : currentMetaData) {
+ if (danglingIndices.containsKey(current.index())) {
+ continue;
+ }
+ if (!newMetaData.hasIndex(current.index())) {
+ logger.debug("[{}] deleting index that is no longer part of the metadata (indices: [{}])", current.index(), newMetaData.indices().keys());
+ if (nodeEnv.hasNodeFile()) {
+ FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index())));
+ }
+ try {
+ nodeIndexDeletedAction.nodeIndexStoreDeleted(event.state(), current.index(), event.state().nodes().localNodeId());
+ } catch (Throwable e) {
+ logger.debug("[{}] failed to notify master on local index store deletion", e, current.index());
+ }
+ }
+ }
+ }
+
+ // handle dangling indices, we handle those for all nodes that have a node file (data or master)
+ if (nodeEnv.hasNodeFile()) {
+ if (danglingTimeout.millis() >= 0) {
+ synchronized (danglingMutex) {
+ for (String danglingIndex : danglingIndices.keySet()) {
+ if (newMetaData.hasIndex(danglingIndex)) {
+ logger.debug("[{}] no longer dangling (created), removing", danglingIndex);
+ DanglingIndex removed = danglingIndices.remove(danglingIndex);
+ removed.future.cancel(false);
+ }
+ }
+ // delete indices that are no longer part of the metadata
+ try {
+ for (String indexName : nodeEnv.findAllIndices()) {
+ // if we have the index on the metadata, don't delete it
+ if (newMetaData.hasIndex(indexName)) {
+ continue;
+ }
+ if (danglingIndices.containsKey(indexName)) {
+ // already dangling, continue
+ continue;
+ }
+ IndexMetaData indexMetaData = loadIndex(indexName);
+ if (indexMetaData != null) {
+ if (danglingTimeout.millis() == 0) {
+ logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, timeout set to 0, deleting now", indexName);
+ FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(indexName)));
+ } else {
+ logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, scheduling to delete in [{}], auto import to cluster state [{}]", indexName, danglingTimeout, autoImportDangled);
+ danglingIndices.put(indexName, new DanglingIndex(indexName, threadPool.schedule(danglingTimeout, ThreadPool.Names.SAME, new RemoveDanglingIndex(indexName))));
+ }
+ }
+ }
+ } catch (Throwable e) {
+ logger.warn("failed to find dangling indices", e);
+ }
+ }
+ }
+ if (autoImportDangled.shouldImport() && !danglingIndices.isEmpty()) {
+ final List<IndexMetaData> dangled = Lists.newArrayList();
+ for (String indexName : danglingIndices.keySet()) {
+ IndexMetaData indexMetaData = loadIndex(indexName);
+ if (indexMetaData == null) {
+ logger.debug("failed to find state for dangling index [{}]", indexName);
+ continue;
+ }
+ // we might have someone copying over an index, renaming the directory, handle that
+ if (!indexMetaData.index().equals(indexName)) {
+ logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.index());
+ indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build();
+ }
+ if (autoImportDangled == AutoImportDangledState.CLOSED) {
+ indexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).build();
+ }
+ if (indexMetaData != null) {
+ dangled.add(indexMetaData);
+ }
+ }
+ IndexMetaData[] dangledIndices = dangled.toArray(new IndexMetaData[dangled.size()]);
+ try {
+ allocateDangledIndices.allocateDangled(dangledIndices, new LocalAllocateDangledIndices.Listener() {
+ @Override
+ public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) {
+ logger.trace("allocated dangled");
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.info("failed to send allocated dangled", e);
+ }
+ });
+ } catch (Throwable e) {
+ logger.warn("failed to send allocate dangled", e);
+ }
+ }
+ }
+
+ if (success) {
+ currentMetaData = newMetaData;
+ }
+ }
+
+ private void deleteIndex(String index) {
+ logger.trace("[{}] delete index state", index);
+ File[] indexLocations = nodeEnv.indexLocations(new Index(index));
+ for (File indexLocation : indexLocations) {
+ if (!indexLocation.exists()) {
+ continue;
+ }
+ FileSystemUtils.deleteRecursively(new File(indexLocation, "_state"));
+ }
+ }
+
+ private void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception {
+ logger.trace("[{}] writing state, reason [{}]", indexMetaData.index(), reason);
+ XContentBuilder builder = XContentFactory.contentBuilder(format, new BytesStreamOutput());
+ builder.startObject();
+ IndexMetaData.Builder.toXContent(indexMetaData, builder, formatParams);
+ builder.endObject();
+ builder.flush();
+
+ String stateFileName = "state-" + indexMetaData.version();
+ Throwable lastFailure = null;
+ boolean wroteAtLeastOnce = false;
+ for (File indexLocation : nodeEnv.indexLocations(new Index(indexMetaData.index()))) {
+ File stateLocation = new File(indexLocation, "_state");
+ FileSystemUtils.mkdirs(stateLocation);
+ File stateFile = new File(stateLocation, stateFileName);
+
+ FileOutputStream fos = null;
+ try {
+ fos = new FileOutputStream(stateFile);
+ BytesReference bytes = builder.bytes();
+ fos.write(bytes.array(), bytes.arrayOffset(), bytes.length());
+ fos.getChannel().force(true);
+ fos.close();
+ wroteAtLeastOnce = true;
+ } catch (Throwable e) {
+ lastFailure = e;
+ } finally {
+ IOUtils.closeWhileHandlingException(fos);
+ }
+ }
+
+ if (!wroteAtLeastOnce) {
+ logger.warn("[{}]: failed to state", lastFailure, indexMetaData.index());
+ throw new IOException("failed to write state for [" + indexMetaData.index() + "]", lastFailure);
+ }
+
+ // delete the old files
+ if (previousIndexMetaData != null && previousIndexMetaData.version() != indexMetaData.version()) {
+ for (File indexLocation : nodeEnv.indexLocations(new Index(indexMetaData.index()))) {
+ File[] files = new File(indexLocation, "_state").listFiles();
+ if (files == null) {
+ continue;
+ }
+ for (File file : files) {
+ if (!file.getName().startsWith("state-")) {
+ continue;
+ }
+ if (file.getName().equals(stateFileName)) {
+ continue;
+ }
+ file.delete();
+ }
+ }
+ }
+ }
+
+ private void writeGlobalState(String reason, MetaData metaData, @Nullable MetaData previousMetaData) throws Exception {
+ logger.trace("[_global] writing state, reason [{}]", reason);
+
+ XContentBuilder builder = XContentFactory.contentBuilder(format);
+ builder.startObject();
+ MetaData.Builder.toXContent(metaData, builder, globalOnlyFormatParams);
+ builder.endObject();
+ builder.flush();
+ String globalFileName = "global-" + metaData.version();
+ Throwable lastFailure = null;
+ boolean wroteAtLeastOnce = false;
+ for (File dataLocation : nodeEnv.nodeDataLocations()) {
+ File stateLocation = new File(dataLocation, "_state");
+ FileSystemUtils.mkdirs(stateLocation);
+ File stateFile = new File(stateLocation, globalFileName);
+
+ FileOutputStream fos = null;
+ try {
+ fos = new FileOutputStream(stateFile);
+ BytesReference bytes = builder.bytes();
+ fos.write(bytes.array(), bytes.arrayOffset(), bytes.length());
+ fos.getChannel().force(true);
+ fos.close();
+ wroteAtLeastOnce = true;
+ } catch (Throwable e) {
+ lastFailure = e;
+ } finally {
+ IOUtils.closeWhileHandlingException(fos);
+ }
+ }
+
+ if (!wroteAtLeastOnce) {
+ logger.warn("[_global]: failed to write global state", lastFailure);
+ throw new IOException("failed to write global state", lastFailure);
+ }
+
+ // delete the old files
+ for (File dataLocation : nodeEnv.nodeDataLocations()) {
+ File[] files = new File(dataLocation, "_state").listFiles();
+ if (files == null) {
+ continue;
+ }
+ for (File file : files) {
+ if (!file.getName().startsWith("global-")) {
+ continue;
+ }
+ if (file.getName().equals(globalFileName)) {
+ continue;
+ }
+ file.delete();
+ }
+ }
+ }
+
+ private MetaData loadState() throws Exception {
+ MetaData globalMetaData = loadGlobalState();
+ MetaData.Builder metaDataBuilder;
+ if (globalMetaData != null) {
+ metaDataBuilder = MetaData.builder(globalMetaData);
+ } else {
+ metaDataBuilder = MetaData.builder();
+ }
+
+ Set<String> indices = nodeEnv.findAllIndices();
+ for (String index : indices) {
+ IndexMetaData indexMetaData = loadIndex(index);
+ if (indexMetaData == null) {
+ logger.debug("[{}] failed to find metadata for existing index location", index);
+ } else {
+ metaDataBuilder.put(indexMetaData, false);
+ }
+ }
+ return metaDataBuilder.build();
+ }
+
+ @Nullable
+ private IndexMetaData loadIndex(String index) {
+ long highestVersion = -1;
+ IndexMetaData indexMetaData = null;
+ for (File indexLocation : nodeEnv.indexLocations(new Index(index))) {
+ File stateDir = new File(indexLocation, "_state");
+ if (!stateDir.exists() || !stateDir.isDirectory()) {
+ continue;
+ }
+ // now, iterate over the current versions, and find latest one
+ File[] stateFiles = stateDir.listFiles();
+ if (stateFiles == null) {
+ continue;
+ }
+ for (File stateFile : stateFiles) {
+ if (!stateFile.getName().startsWith("state-")) {
+ continue;
+ }
+ try {
+ long version = Long.parseLong(stateFile.getName().substring("state-".length()));
+ if (version > highestVersion) {
+ byte[] data = Streams.copyToByteArray(new FileInputStream(stateFile));
+ if (data.length == 0) {
+ logger.debug("[{}]: no data for [" + stateFile.getAbsolutePath() + "], ignoring...", index);
+ continue;
+ }
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(data, 0, data.length);
+ parser.nextToken(); // move to START_OBJECT
+ indexMetaData = IndexMetaData.Builder.fromXContent(parser);
+ highestVersion = version;
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+ } catch (Throwable e) {
+ logger.debug("[{}]: failed to read [" + stateFile.getAbsolutePath() + "], ignoring...", e, index);
+ }
+ }
+ }
+ return indexMetaData;
+ }
+
+ private MetaData loadGlobalState() {
+ long highestVersion = -1;
+ MetaData metaData = null;
+ for (File dataLocation : nodeEnv.nodeDataLocations()) {
+ File stateLocation = new File(dataLocation, "_state");
+ if (!stateLocation.exists()) {
+ continue;
+ }
+ File[] stateFiles = stateLocation.listFiles();
+ if (stateFiles == null) {
+ continue;
+ }
+ for (File stateFile : stateFiles) {
+ String name = stateFile.getName();
+ if (!name.startsWith("global-")) {
+ continue;
+ }
+ try {
+ long version = Long.parseLong(stateFile.getName().substring("global-".length()));
+ if (version > highestVersion) {
+ byte[] data = Streams.copyToByteArray(new FileInputStream(stateFile));
+ if (data.length == 0) {
+ logger.debug("[_global] no data for [" + stateFile.getAbsolutePath() + "], ignoring...");
+ continue;
+ }
+
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(data, 0, data.length);
+ metaData = MetaData.Builder.fromXContent(parser);
+ highestVersion = version;
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+ } catch (Throwable e) {
+ logger.debug("failed to load global state from [{}]", e, stateFile.getAbsolutePath());
+ }
+ }
+ }
+
+ return metaData;
+ }
+
+ private void pre019Upgrade() throws Exception {
+ long index = -1;
+ File metaDataFile = null;
+ MetaData metaData = null;
+ long version = -1;
+ for (File dataLocation : nodeEnv.nodeDataLocations()) {
+ File stateLocation = new File(dataLocation, "_state");
+ if (!stateLocation.exists()) {
+ continue;
+ }
+ File[] stateFiles = stateLocation.listFiles();
+ if (stateFiles == null) {
+ continue;
+ }
+ for (File stateFile : stateFiles) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("[upgrade]: processing [" + stateFile.getName() + "]");
+ }
+ String name = stateFile.getName();
+ if (!name.startsWith("metadata-")) {
+ continue;
+ }
+ long fileIndex = Long.parseLong(name.substring(name.indexOf('-') + 1));
+ if (fileIndex >= index) {
+ // try and read the meta data
+ try {
+ byte[] data = Streams.copyToByteArray(new FileInputStream(stateFile));
+ if (data.length == 0) {
+ continue;
+ }
+ XContentParser parser = XContentHelper.createParser(data, 0, data.length);
+ try {
+ String currentFieldName = null;
+ XContentParser.Token token = parser.nextToken();
+ if (token != null) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("meta-data".equals(currentFieldName)) {
+ metaData = MetaData.Builder.fromXContent(parser);
+ }
+ } else if (token.isValue()) {
+ if ("version".equals(currentFieldName)) {
+ version = parser.longValue();
+ }
+ }
+ }
+ }
+ } finally {
+ parser.close();
+ }
+ index = fileIndex;
+ metaDataFile = stateFile;
+ } catch (IOException e) {
+ logger.warn("failed to read pre 0.19 state from [" + name + "], ignoring...", e);
+ }
+ }
+ }
+ }
+ if (metaData == null) {
+ return;
+ }
+
+ logger.info("found old metadata state, loading metadata from [{}] and converting to new metadata location and strucutre...", metaDataFile.getAbsolutePath());
+
+ writeGlobalState("upgrade", MetaData.builder(metaData).version(version).build(), null);
+ for (IndexMetaData indexMetaData : metaData) {
+ IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData).version(version);
+ // set the created version to 0.18
+ indexMetaDataBuilder.settings(ImmutableSettings.settingsBuilder().put(indexMetaData.settings()).put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_18_0));
+ writeIndex("upgrade", indexMetaDataBuilder.build(), null);
+ }
+
+ // rename shards state to backup state
+ File backupFile = new File(metaDataFile.getParentFile(), "backup-" + metaDataFile.getName());
+ if (!metaDataFile.renameTo(backupFile)) {
+ throw new IOException("failed to rename old state to backup state [" + metaDataFile.getAbsolutePath() + "]");
+ }
+
+ // delete all other shards state files
+ for (File dataLocation : nodeEnv.nodeDataLocations()) {
+ File stateLocation = new File(dataLocation, "_state");
+ if (!stateLocation.exists()) {
+ continue;
+ }
+ File[] stateFiles = stateLocation.listFiles();
+ if (stateFiles == null) {
+ continue;
+ }
+ for (File stateFile : stateFiles) {
+ String name = stateFile.getName();
+ if (!name.startsWith("metadata-")) {
+ continue;
+ }
+ stateFile.delete();
+ }
+ }
+
+ logger.info("conversion to new metadata location and format done, backup create at [{}]", backupFile.getAbsolutePath());
+ }
+
+ class RemoveDanglingIndex implements Runnable {
+
+ private final String index;
+
+ RemoveDanglingIndex(String index) {
+ this.index = index;
+ }
+
+ @Override
+ public void run() {
+ synchronized (danglingMutex) {
+ DanglingIndex remove = danglingIndices.remove(index);
+ // no longer there...
+ if (remove == null) {
+ return;
+ }
+ logger.info("[{}] deleting dangling index", index);
+ FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(index)));
+ }
+ }
+ }
+
+ static class DanglingIndex {
+ public final String index;
+ public final ScheduledFuture future;
+
+ DanglingIndex(String index, ScheduledFuture future) {
+ this.index = index;
+ this.future = future;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/local/state/meta/TransportNodesListGatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/local/state/meta/TransportNodesListGatewayMetaState.java
new file mode 100644
index 0000000..1d13328
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/local/state/meta/TransportNodesListGatewayMetaState.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local.state.meta;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.FailedNodeException;
+import org.elasticsearch.action.support.nodes.*;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public class TransportNodesListGatewayMetaState extends TransportNodesOperationAction<TransportNodesListGatewayMetaState.Request, TransportNodesListGatewayMetaState.NodesLocalGatewayMetaState, TransportNodesListGatewayMetaState.NodeRequest, TransportNodesListGatewayMetaState.NodeLocalGatewayMetaState> {
+
+ private LocalGatewayMetaState metaState;
+
+ @Inject
+ public TransportNodesListGatewayMetaState(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
+ super(settings, clusterName, threadPool, clusterService, transportService);
+ }
+
+ TransportNodesListGatewayMetaState init(LocalGatewayMetaState metaState) {
+ this.metaState = metaState;
+ return this;
+ }
+
+ public ActionFuture<NodesLocalGatewayMetaState> list(String[] nodesIds, @Nullable TimeValue timeout) {
+ return execute(new Request(nodesIds).timeout(timeout));
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ protected String transportAction() {
+ return "/gateway/local/meta-state";
+ }
+
+ @Override
+ protected boolean transportCompress() {
+ return true; // compress since the metadata can become large
+ }
+
+ @Override
+ protected Request newRequest() {
+ return new Request();
+ }
+
+ @Override
+ protected NodeRequest newNodeRequest() {
+ return new NodeRequest();
+ }
+
+ @Override
+ protected NodeRequest newNodeRequest(String nodeId, Request request) {
+ return new NodeRequest(nodeId, request);
+ }
+
+ @Override
+ protected NodeLocalGatewayMetaState newNodeResponse() {
+ return new NodeLocalGatewayMetaState();
+ }
+
+ @Override
+ protected NodesLocalGatewayMetaState newResponse(Request request, AtomicReferenceArray responses) {
+ final List<NodeLocalGatewayMetaState> nodesList = Lists.newArrayList();
+ final List<FailedNodeException> failures = Lists.newArrayList();
+ for (int i = 0; i < responses.length(); i++) {
+ Object resp = responses.get(i);
+ if (resp instanceof NodeLocalGatewayMetaState) { // will also filter out null response for unallocated ones
+ nodesList.add((NodeLocalGatewayMetaState) resp);
+ } else if (resp instanceof FailedNodeException) {
+ failures.add((FailedNodeException) resp);
+ }
+ }
+ return new NodesLocalGatewayMetaState(clusterName, nodesList.toArray(new NodeLocalGatewayMetaState[nodesList.size()]),
+ failures.toArray(new FailedNodeException[failures.size()]));
+ }
+
+ @Override
+ protected NodeLocalGatewayMetaState nodeOperation(NodeRequest request) throws ElasticsearchException {
+ try {
+ return new NodeLocalGatewayMetaState(clusterService.localNode(), metaState.loadMetaState());
+ } catch (Exception e) {
+ throw new ElasticsearchException("failed to load metadata", e);
+ }
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return true;
+ }
+
+ static class Request extends NodesOperationRequest<Request> {
+
+ public Request() {
+ }
+
+ public Request(String... nodesIds) {
+ super(nodesIds);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+ }
+
+ public static class NodesLocalGatewayMetaState extends NodesOperationResponse<NodeLocalGatewayMetaState> {
+
+ private FailedNodeException[] failures;
+
+ NodesLocalGatewayMetaState() {
+ }
+
+ public NodesLocalGatewayMetaState(ClusterName clusterName, NodeLocalGatewayMetaState[] nodes, FailedNodeException[] failures) {
+ super(clusterName, nodes);
+ this.failures = failures;
+ }
+
+ public FailedNodeException[] failures() {
+ return failures;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodes = new NodeLocalGatewayMetaState[in.readVInt()];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = new NodeLocalGatewayMetaState();
+ nodes[i].readFrom(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(nodes.length);
+ for (NodeLocalGatewayMetaState response : nodes) {
+ response.writeTo(out);
+ }
+ }
+ }
+
+
+ static class NodeRequest extends NodeOperationRequest {
+
+ NodeRequest() {
+ }
+
+ NodeRequest(String nodeId, TransportNodesListGatewayMetaState.Request request) {
+ super(request, nodeId);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ }
+ }
+
+ public static class NodeLocalGatewayMetaState extends NodeOperationResponse {
+
+ private MetaData metaData;
+
+ NodeLocalGatewayMetaState() {
+ }
+
+ public NodeLocalGatewayMetaState(DiscoveryNode node, MetaData metaData) {
+ super(node);
+ this.metaData = metaData;
+ }
+
+ public MetaData metaData() {
+ return metaData;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ if (in.readBoolean()) {
+ metaData = MetaData.Builder.readFrom(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (metaData == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ MetaData.Builder.writeTo(metaData, out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/local/state/shards/LocalGatewayShardsState.java b/src/main/java/org/elasticsearch/gateway/local/state/shards/LocalGatewayShardsState.java
new file mode 100644
index 0000000..96774ce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/local/state/shards/LocalGatewayShardsState.java
@@ -0,0 +1,453 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local.state.shards;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.gateway.local.state.meta.LocalGatewayMetaState;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ */
+public class LocalGatewayShardsState extends AbstractComponent implements ClusterStateListener {
+
+ private final NodeEnvironment nodeEnv;
+ private final LocalGatewayMetaState metaState;
+
+ private volatile Map<ShardId, ShardStateInfo> currentState = Maps.newHashMap();
+
+ @Inject
+ public LocalGatewayShardsState(Settings settings, NodeEnvironment nodeEnv, TransportNodesListGatewayStartedShards listGatewayStartedShards, LocalGatewayMetaState metaState) throws Exception {
+ super(settings);
+ this.nodeEnv = nodeEnv;
+ this.metaState = metaState;
+ listGatewayStartedShards.initGateway(this);
+
+ if (DiscoveryNode.dataNode(settings)) {
+ try {
+ pre019Upgrade();
+ long start = System.currentTimeMillis();
+ currentState = loadShardsStateInfo();
+ logger.debug("took {} to load started shards state", TimeValue.timeValueMillis(System.currentTimeMillis() - start));
+ } catch (Exception e) {
+ logger.error("failed to read local state (started shards), exiting...", e);
+ throw e;
+ }
+ }
+ }
+
+ public Map<ShardId, ShardStateInfo> currentStartedShards() {
+ return this.currentState;
+ }
+
+ public ShardStateInfo loadShardInfo(ShardId shardId) throws Exception {
+ return loadShardStateInfo(shardId);
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (event.state().blocks().disableStatePersistence()) {
+ return;
+ }
+
+ if (!event.state().nodes().localNode().dataNode()) {
+ return;
+ }
+
+ if (!event.routingTableChanged()) {
+ return;
+ }
+
+ Map<ShardId, ShardStateInfo> newState = Maps.newHashMap();
+ newState.putAll(this.currentState);
+
+
+ // remove from the current state all the shards that are completely started somewhere, we won't need them anymore
+ // and if they are still here, we will add them in the next phase
+ // Also note, this works well when closing an index, since a closed index will have no routing shards entries
+ // so they won't get removed (we want to keep the fact that those shards are allocated on this node if needed)
+ for (IndexRoutingTable indexRoutingTable : event.state().routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ if (indexShardRoutingTable.countWithState(ShardRoutingState.STARTED) == indexShardRoutingTable.size()) {
+ newState.remove(indexShardRoutingTable.shardId());
+ }
+ }
+ }
+ // remove deleted indices from the started shards
+ for (ShardId shardId : currentState.keySet()) {
+ if (!event.state().metaData().hasIndex(shardId.index().name())) {
+ newState.remove(shardId);
+ }
+ }
+ // now, add all the ones that are active and on this node
+ RoutingNode routingNode = event.state().readOnlyRoutingNodes().node(event.state().nodes().localNodeId());
+ if (routingNode != null) {
+ // our node is not in play yet...
+ for (MutableShardRouting shardRouting : routingNode) {
+ if (shardRouting.active()) {
+ newState.put(shardRouting.shardId(), new ShardStateInfo(shardRouting.version(), shardRouting.primary()));
+ }
+ }
+ }
+
+ // go over the write started shards if needed
+ for (Iterator<Map.Entry<ShardId, ShardStateInfo>> it = newState.entrySet().iterator(); it.hasNext(); ) {
+ Map.Entry<ShardId, ShardStateInfo> entry = it.next();
+ ShardId shardId = entry.getKey();
+ ShardStateInfo shardStateInfo = entry.getValue();
+
+ String writeReason = null;
+ ShardStateInfo currentShardStateInfo = currentState.get(shardId);
+ if (currentShardStateInfo == null) {
+ writeReason = "freshly started, version [" + shardStateInfo.version + "]";
+ } else if (currentShardStateInfo.version != shardStateInfo.version) {
+ writeReason = "version changed from [" + currentShardStateInfo.version + "] to [" + shardStateInfo.version + "]";
+ }
+
+ // we update the write reason if we really need to write a new one...
+ if (writeReason == null) {
+ continue;
+ }
+
+ try {
+ writeShardState(writeReason, shardId, shardStateInfo, currentShardStateInfo);
+ } catch (Exception e) {
+ // we failed to write the shard state, remove it from our builder, we will try and write
+ // it next time...
+ it.remove();
+ }
+ }
+
+ // REMOVED: don't delete shard state, rely on IndicesStore to delete the shard location
+ // only once all shards are allocated on another node
+ // now, go over the current ones and delete ones that are not in the new one
+// for (Map.Entry<ShardId, ShardStateInfo> entry : currentState.entrySet()) {
+// ShardId shardId = entry.getKey();
+// if (!newState.containsKey(shardId)) {
+// if (!metaState.isDangling(shardId.index().name())) {
+// deleteShardState(shardId);
+// }
+// }
+// }
+
+ this.currentState = newState;
+ }
+
+ private Map<ShardId, ShardStateInfo> loadShardsStateInfo() throws Exception {
+ Set<ShardId> shardIds = nodeEnv.findAllShardIds();
+ long highestVersion = -1;
+ Map<ShardId, ShardStateInfo> shardsState = Maps.newHashMap();
+ for (ShardId shardId : shardIds) {
+ ShardStateInfo shardStateInfo = loadShardStateInfo(shardId);
+ if (shardStateInfo == null) {
+ continue;
+ }
+ shardsState.put(shardId, shardStateInfo);
+
+ // update the global version
+ if (shardStateInfo.version > highestVersion) {
+ highestVersion = shardStateInfo.version;
+ }
+ }
+ return shardsState;
+ }
+
+ private ShardStateInfo loadShardStateInfo(ShardId shardId) {
+ long highestShardVersion = -1;
+ ShardStateInfo highestShardState = null;
+ for (File shardLocation : nodeEnv.shardLocations(shardId)) {
+ File shardStateDir = new File(shardLocation, "_state");
+ if (!shardStateDir.exists() || !shardStateDir.isDirectory()) {
+ continue;
+ }
+ // now, iterate over the current versions, and find latest one
+ File[] stateFiles = shardStateDir.listFiles();
+ if (stateFiles == null) {
+ continue;
+ }
+ for (File stateFile : stateFiles) {
+ if (!stateFile.getName().startsWith("state-")) {
+ continue;
+ }
+ try {
+ long version = Long.parseLong(stateFile.getName().substring("state-".length()));
+ if (version > highestShardVersion) {
+ byte[] data = Streams.copyToByteArray(new FileInputStream(stateFile));
+ if (data.length == 0) {
+ logger.debug("[{}][{}]: not data for [" + stateFile.getAbsolutePath() + "], ignoring...", shardId.index().name(), shardId.id());
+ continue;
+ }
+ ShardStateInfo readState = readShardState(data);
+ if (readState == null) {
+ logger.debug("[{}][{}]: not data for [" + stateFile.getAbsolutePath() + "], ignoring...", shardId.index().name(), shardId.id());
+ continue;
+ }
+ assert readState.version == version;
+ highestShardState = readState;
+ highestShardVersion = version;
+ }
+ } catch (Exception e) {
+ logger.debug("[{}][{}]: failed to read [" + stateFile.getAbsolutePath() + "], ignoring...", e, shardId.index().name(), shardId.id());
+ }
+ }
+ }
+ return highestShardState;
+ }
+
+ @Nullable
+ private ShardStateInfo readShardState(byte[] data) throws Exception {
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(data, 0, data.length);
+ XContentParser.Token token = parser.nextToken();
+ if (token == null) {
+ return null;
+ }
+ long version = -1;
+ Boolean primary = null;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("version".equals(currentFieldName)) {
+ version = parser.longValue();
+ } else if ("primary".equals(currentFieldName)) {
+ primary = parser.booleanValue();
+ }
+ }
+ }
+ return new ShardStateInfo(version, primary);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ private void writeShardState(String reason, ShardId shardId, ShardStateInfo shardStateInfo, @Nullable ShardStateInfo previousStateInfo) throws Exception {
+ logger.trace("[{}][{}] writing shard state, reason [{}]", shardId.index().name(), shardId.id(), reason);
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, new BytesStreamOutput());
+ builder.prettyPrint();
+ builder.startObject();
+ builder.field("version", shardStateInfo.version);
+ if (shardStateInfo.primary != null) {
+ builder.field("primary", shardStateInfo.primary);
+ }
+ builder.endObject();
+ builder.flush();
+
+ Exception lastFailure = null;
+ boolean wroteAtLeastOnce = false;
+ for (File shardLocation : nodeEnv.shardLocations(shardId)) {
+ File shardStateDir = new File(shardLocation, "_state");
+ FileSystemUtils.mkdirs(shardStateDir);
+ File stateFile = new File(shardStateDir, "state-" + shardStateInfo.version);
+
+
+ FileOutputStream fos = null;
+ try {
+ fos = new FileOutputStream(stateFile);
+ BytesReference bytes = builder.bytes();
+ fos.write(bytes.array(), bytes.arrayOffset(), bytes.length());
+ fos.getChannel().force(true);
+ fos.close();
+ wroteAtLeastOnce = true;
+ } catch (Exception e) {
+ lastFailure = e;
+ } finally {
+ IOUtils.closeWhileHandlingException(fos);
+ }
+ }
+
+ if (!wroteAtLeastOnce) {
+ logger.warn("[{}][{}]: failed to write shard state", shardId.index().name(), shardId.id(), lastFailure);
+ throw new IOException("failed to write shard state for " + shardId, lastFailure);
+ }
+
+ // delete the old files
+ if (previousStateInfo != null && previousStateInfo.version != shardStateInfo.version) {
+ for (File shardLocation : nodeEnv.shardLocations(shardId)) {
+ File stateFile = new File(new File(shardLocation, "_state"), "state-" + previousStateInfo.version);
+ stateFile.delete();
+ }
+ }
+ }
+
+ private void deleteShardState(ShardId shardId) {
+ logger.trace("[{}][{}] delete shard state", shardId.index().name(), shardId.id());
+ File[] shardLocations = nodeEnv.shardLocations(shardId);
+ for (File shardLocation : shardLocations) {
+ if (!shardLocation.exists()) {
+ continue;
+ }
+ FileSystemUtils.deleteRecursively(new File(shardLocation, "_state"));
+ }
+ }
+
+ private void pre019Upgrade() throws Exception {
+ long index = -1;
+ File latest = null;
+ for (File dataLocation : nodeEnv.nodeDataLocations()) {
+ File stateLocation = new File(dataLocation, "_state");
+ if (!stateLocation.exists()) {
+ continue;
+ }
+ File[] stateFiles = stateLocation.listFiles();
+ if (stateFiles == null) {
+ continue;
+ }
+ for (File stateFile : stateFiles) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("[find_latest_state]: processing [" + stateFile.getName() + "]");
+ }
+ String name = stateFile.getName();
+ if (!name.startsWith("shards-")) {
+ continue;
+ }
+ long fileIndex = Long.parseLong(name.substring(name.indexOf('-') + 1));
+ if (fileIndex >= index) {
+ // try and read the meta data
+ try {
+ byte[] data = Streams.copyToByteArray(new FileInputStream(stateFile));
+ if (data.length == 0) {
+ logger.debug("[upgrade]: not data for [" + name + "], ignoring...");
+ }
+ pre09ReadState(data);
+ index = fileIndex;
+ latest = stateFile;
+ } catch (IOException e) {
+ logger.warn("[upgrade]: failed to read state from [" + name + "], ignoring...", e);
+ }
+ }
+ }
+ }
+ if (latest == null) {
+ return;
+ }
+
+ logger.info("found old shards state, loading started shards from [{}] and converting to new shards state locations...", latest.getAbsolutePath());
+ Map<ShardId, ShardStateInfo> shardsState = pre09ReadState(Streams.copyToByteArray(new FileInputStream(latest)));
+
+ for (Map.Entry<ShardId, ShardStateInfo> entry : shardsState.entrySet()) {
+ writeShardState("upgrade", entry.getKey(), entry.getValue(), null);
+ }
+
+ // rename shards state to backup state
+ File backupFile = new File(latest.getParentFile(), "backup-" + latest.getName());
+ if (!latest.renameTo(backupFile)) {
+ throw new IOException("failed to rename old state to backup state [" + latest.getAbsolutePath() + "]");
+ }
+
+ // delete all other shards state files
+ for (File dataLocation : nodeEnv.nodeDataLocations()) {
+ File stateLocation = new File(dataLocation, "_state");
+ if (!stateLocation.exists()) {
+ continue;
+ }
+ File[] stateFiles = stateLocation.listFiles();
+ if (stateFiles == null) {
+ continue;
+ }
+ for (File stateFile : stateFiles) {
+ String name = stateFile.getName();
+ if (!name.startsWith("shards-")) {
+ continue;
+ }
+ stateFile.delete();
+ }
+ }
+
+ logger.info("conversion to new shards state location and format done, backup create at [{}]", backupFile.getAbsolutePath());
+ }
+
+ private Map<ShardId, ShardStateInfo> pre09ReadState(byte[] data) throws IOException {
+ XContentParser parser = null;
+ try {
+ Map<ShardId, ShardStateInfo> shardsState = Maps.newHashMap();
+
+ parser = XContentHelper.createParser(data, 0, data.length);
+
+ String currentFieldName = null;
+ XContentParser.Token token = parser.nextToken();
+ if (token == null) {
+ // no data...
+ return shardsState;
+ }
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("shards".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.START_OBJECT) {
+ String shardIndex = null;
+ int shardId = -1;
+ long version = -1;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ shardIndex = parser.text();
+ } else if ("id".equals(currentFieldName)) {
+ shardId = parser.intValue();
+ } else if ("version".equals(currentFieldName)) {
+ version = parser.longValue();
+ }
+ }
+ }
+ shardsState.put(new ShardId(shardIndex, shardId), new ShardStateInfo(version, null));
+ }
+ }
+ }
+ }
+ }
+ return shardsState;
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/local/state/shards/ShardStateInfo.java b/src/main/java/org/elasticsearch/gateway/local/state/shards/ShardStateInfo.java
new file mode 100644
index 0000000..f771115
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/local/state/shards/ShardStateInfo.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local.state.shards;
+
+import org.elasticsearch.common.Nullable;
+
+/**
+ */
+public class ShardStateInfo {
+
+ public final long version;
+
+ // can be null if we don't know...
+ @Nullable
+ public final Boolean primary;
+
+ public ShardStateInfo(long version, Boolean primary) {
+ this.version = version;
+ this.primary = primary;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/local/state/shards/TransportNodesListGatewayStartedShards.java b/src/main/java/org/elasticsearch/gateway/local/state/shards/TransportNodesListGatewayStartedShards.java
new file mode 100644
index 0000000..49edb04
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/local/state/shards/TransportNodesListGatewayStartedShards.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local.state.shards;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.FailedNodeException;
+import org.elasticsearch.action.support.nodes.*;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public class TransportNodesListGatewayStartedShards extends TransportNodesOperationAction<TransportNodesListGatewayStartedShards.Request, TransportNodesListGatewayStartedShards.NodesLocalGatewayStartedShards, TransportNodesListGatewayStartedShards.NodeRequest, TransportNodesListGatewayStartedShards.NodeLocalGatewayStartedShards> {
+
+ private LocalGatewayShardsState shardsState;
+
+ @Inject
+ public TransportNodesListGatewayStartedShards(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
+ super(settings, clusterName, threadPool, clusterService, transportService);
+ }
+
+ TransportNodesListGatewayStartedShards initGateway(LocalGatewayShardsState shardsState) {
+ this.shardsState = shardsState;
+ return this;
+ }
+
+ public ActionFuture<NodesLocalGatewayStartedShards> list(ShardId shardId, String[] nodesIds, @Nullable TimeValue timeout) {
+ return execute(new Request(shardId, nodesIds).timeout(timeout));
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ protected String transportAction() {
+ return "/gateway/local/started-shards";
+ }
+
+ @Override
+ protected boolean transportCompress() {
+ return true; // this can become big...
+ }
+
+ @Override
+ protected Request newRequest() {
+ return new Request();
+ }
+
+ @Override
+ protected NodeRequest newNodeRequest() {
+ return new NodeRequest();
+ }
+
+ @Override
+ protected NodeRequest newNodeRequest(String nodeId, Request request) {
+ return new NodeRequest(nodeId, request);
+ }
+
+ @Override
+ protected NodeLocalGatewayStartedShards newNodeResponse() {
+ return new NodeLocalGatewayStartedShards();
+ }
+
+ @Override
+ protected NodesLocalGatewayStartedShards newResponse(Request request, AtomicReferenceArray responses) {
+ final List<NodeLocalGatewayStartedShards> nodesList = Lists.newArrayList();
+ final List<FailedNodeException> failures = Lists.newArrayList();
+ for (int i = 0; i < responses.length(); i++) {
+ Object resp = responses.get(i);
+ if (resp instanceof NodeLocalGatewayStartedShards) { // will also filter out null response for unallocated ones
+ nodesList.add((NodeLocalGatewayStartedShards) resp);
+ } else if (resp instanceof FailedNodeException) {
+ failures.add((FailedNodeException) resp);
+ }
+ }
+ return new NodesLocalGatewayStartedShards(clusterName, nodesList.toArray(new NodeLocalGatewayStartedShards[nodesList.size()]),
+ failures.toArray(new FailedNodeException[failures.size()]));
+ }
+
+ @Override
+ protected NodeLocalGatewayStartedShards nodeOperation(NodeRequest request) throws ElasticsearchException {
+ try {
+ ShardStateInfo shardStateInfo = shardsState.loadShardInfo(request.shardId);
+ if (shardStateInfo != null) {
+ return new NodeLocalGatewayStartedShards(clusterService.localNode(), shardStateInfo.version);
+ }
+ return new NodeLocalGatewayStartedShards(clusterService.localNode(), -1);
+ } catch (Exception e) {
+ throw new ElasticsearchException("failed to load started shards", e);
+ }
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return true;
+ }
+
+ static class Request extends NodesOperationRequest<Request> {
+
+ private ShardId shardId;
+
+ public Request() {
+ }
+
+ public Request(ShardId shardId, Set<String> nodesIds) {
+ super(nodesIds.toArray(new String[nodesIds.size()]));
+ this.shardId = shardId;
+ }
+ public Request(ShardId shardId, String... nodesIds) {
+ super(nodesIds);
+ this.shardId = shardId;
+ }
+
+ public ShardId shardId() {
+ return this.shardId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = ShardId.readShardId(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardId.writeTo(out);
+ }
+ }
+
+ public static class NodesLocalGatewayStartedShards extends NodesOperationResponse<NodeLocalGatewayStartedShards> {
+
+ private FailedNodeException[] failures;
+
+ NodesLocalGatewayStartedShards() {
+ }
+
+ public NodesLocalGatewayStartedShards(ClusterName clusterName, NodeLocalGatewayStartedShards[] nodes, FailedNodeException[] failures) {
+ super(clusterName, nodes);
+ this.failures = failures;
+ }
+
+ public FailedNodeException[] failures() {
+ return failures;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodes = new NodeLocalGatewayStartedShards[in.readVInt()];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = new NodeLocalGatewayStartedShards();
+ nodes[i].readFrom(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(nodes.length);
+ for (NodeLocalGatewayStartedShards response : nodes) {
+ response.writeTo(out);
+ }
+ }
+ }
+
+
+ static class NodeRequest extends NodeOperationRequest {
+
+ ShardId shardId;
+
+ NodeRequest() {
+ }
+
+ NodeRequest(String nodeId, TransportNodesListGatewayStartedShards.Request request) {
+ super(request, nodeId);
+ this.shardId = request.shardId();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = ShardId.readShardId(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardId.writeTo(out);
+ }
+ }
+
+ public static class NodeLocalGatewayStartedShards extends NodeOperationResponse {
+
+ private long version = -1;
+
+ NodeLocalGatewayStartedShards() {
+ }
+
+ public NodeLocalGatewayStartedShards(DiscoveryNode node, long version) {
+ super(node);
+ this.version = version;
+ }
+
+ public boolean hasVersion() {
+ return version != -1;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ version = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(version);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/none/NoneGateway.java b/src/main/java/org/elasticsearch/gateway/none/NoneGateway.java
new file mode 100644
index 0000000..df1c45e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/none/NoneGateway.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.none;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.gateway.GatewayException;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.gateway.none.NoneIndexGatewayModule;
+
+/**
+ *
+ */
+public class NoneGateway extends AbstractLifecycleComponent<Gateway> implements Gateway, ClusterStateListener {
+
+ public static final String TYPE = "none";
+
+ private final ClusterService clusterService;
+ private final NodeEnvironment nodeEnv;
+ private final NodeIndexDeletedAction nodeIndexDeletedAction;
+
+ @Nullable
+ private volatile MetaData currentMetaData;
+
+ @Inject
+ public NoneGateway(Settings settings, ClusterService clusterService, NodeEnvironment nodeEnv, NodeIndexDeletedAction nodeIndexDeletedAction) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.nodeEnv = nodeEnv;
+ this.nodeIndexDeletedAction = nodeIndexDeletedAction;
+
+ clusterService.addLast(this);
+ }
+
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ @Override
+ public String toString() {
+ return "_none_";
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ @Override
+ public void performStateRecovery(GatewayStateRecoveredListener listener) throws GatewayException {
+ logger.debug("performing state recovery");
+ listener.onSuccess(ClusterState.builder().build());
+ }
+
+ @Override
+ public Class<? extends Module> suggestIndexGateway() {
+ return NoneIndexGatewayModule.class;
+ }
+
+ @Override
+ public void reset() {
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (event.state().blocks().disableStatePersistence()) {
+ // reset the current metadata, we need to start fresh...
+ this.currentMetaData = null;
+ return;
+ }
+
+ MetaData newMetaData = event.state().metaData();
+
+ // delete indices that were there before, but are deleted now
+ // we need to do it so they won't be detected as dangling
+ if (currentMetaData != null) {
+ // only delete indices when we already received a state (currentMetaData != null)
+ for (IndexMetaData current : currentMetaData) {
+ if (!newMetaData.hasIndex(current.index())) {
+ logger.debug("[{}] deleting index that is no longer part of the metadata (indices: [{}])", current.index(), newMetaData.indices().keys());
+ if (nodeEnv.hasNodeFile()) {
+ FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index())));
+ }
+ try {
+ nodeIndexDeletedAction.nodeIndexStoreDeleted(event.state(), current.index(), event.state().nodes().localNodeId());
+ } catch (Exception e) {
+ logger.debug("[{}] failed to notify master on local index store deletion", e, current.index());
+ }
+ }
+ }
+ }
+
+ currentMetaData = newMetaData;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/none/NoneGatewayAllocator.java b/src/main/java/org/elasticsearch/gateway/none/NoneGatewayAllocator.java
new file mode 100644
index 0000000..dde4162
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/none/NoneGatewayAllocator.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.none;
+
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.allocator.GatewayAllocator;
+
+/**
+ */
+public class NoneGatewayAllocator implements GatewayAllocator {
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ }
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/none/NoneGatewayModule.java b/src/main/java/org/elasticsearch/gateway/none/NoneGatewayModule.java
new file mode 100644
index 0000000..7f4f122
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/none/NoneGatewayModule.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.none;
+
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.PreProcessModule;
+import org.elasticsearch.gateway.Gateway;
+
+/**
+ */
+public class NoneGatewayModule extends AbstractModule implements PreProcessModule {
+
+ @Override
+ public void processModule(Module module) {
+ if (module instanceof ShardsAllocatorModule) {
+ ((ShardsAllocatorModule) module).setGatewayAllocator(NoneGatewayAllocator.class);
+ }
+ }
+
+ @Override
+ protected void configure() {
+ bind(Gateway.class).to(NoneGateway.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/gateway/shared/SharedStorageGateway.java b/src/main/java/org/elasticsearch/gateway/shared/SharedStorageGateway.java
new file mode 100644
index 0000000..d829af3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/gateway/shared/SharedStorageGateway.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.shared;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.gateway.GatewayException;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import static java.util.concurrent.Executors.newSingleThreadExecutor;
+import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
+
+/**
+ *
+ */
+public abstract class SharedStorageGateway extends AbstractLifecycleComponent<Gateway> implements Gateway, ClusterStateListener {
+
+ private final ClusterService clusterService;
+
+ private final ThreadPool threadPool;
+
+ private ExecutorService writeStateExecutor;
+
+ private volatile MetaData currentMetaData;
+
+ private NodeEnvironment nodeEnv;
+
+ private NodeIndexDeletedAction nodeIndexDeletedAction;
+
+ public SharedStorageGateway(Settings settings, ThreadPool threadPool, ClusterService clusterService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.writeStateExecutor = newSingleThreadExecutor(daemonThreadFactory(settings, "gateway#writeMetaData"));
+ clusterService.addLast(this);
+ logger.warn("shared gateway has been deprecated, please use the (default) local gateway");
+ }
+
+ @Inject
+ public void setNodeEnv(NodeEnvironment nodeEnv) {
+ this.nodeEnv = nodeEnv;
+ }
+
+ // here as setter injection not to break backward comp. with extensions of this class..
+ @Inject
+ public void setNodeIndexDeletedAction(NodeIndexDeletedAction nodeIndexDeletedAction) {
+ this.nodeIndexDeletedAction = nodeIndexDeletedAction;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ clusterService.remove(this);
+ writeStateExecutor.shutdown();
+ try {
+ writeStateExecutor.awaitTermination(10, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+
+ @Override
+ public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ logger.debug("reading state from gateway {} ...", this);
+ StopWatch stopWatch = new StopWatch().start();
+ MetaData metaData;
+ try {
+ metaData = read();
+ logger.debug("read state from gateway {}, took {}", this, stopWatch.stop().totalTime());
+ if (metaData == null) {
+ logger.debug("no state read from gateway");
+ listener.onSuccess(ClusterState.builder().build());
+ } else {
+ listener.onSuccess(ClusterState.builder().metaData(metaData).build());
+ }
+ } catch (Exception e) {
+ logger.error("failed to read from gateway", e);
+ listener.onFailure(ExceptionsHelper.detailedMessage(e));
+ }
+ }
+ });
+ }
+
+ @Override
+ public void clusterChanged(final ClusterChangedEvent event) {
+ if (!lifecycle.started()) {
+ return;
+ }
+
+ // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
+ if (event.state().blocks().disableStatePersistence()) {
+ this.currentMetaData = null;
+ return;
+ }
+
+ if (!event.metaDataChanged()) {
+ return;
+ }
+ writeStateExecutor.execute(new Runnable() {
+ @Override
+ public void run() {
+ Set<String> indicesDeleted = Sets.newHashSet();
+ if (event.localNodeMaster()) {
+ logger.debug("writing to gateway {} ...", this);
+ StopWatch stopWatch = new StopWatch().start();
+ try {
+ write(event.state().metaData());
+ logger.debug("wrote to gateway {}, took {}", this, stopWatch.stop().totalTime());
+ // TODO, we need to remember that we failed, maybe add a retry scheduler?
+ } catch (Exception e) {
+ logger.error("failed to write to gateway", e);
+ }
+ if (currentMetaData != null) {
+ for (IndexMetaData current : currentMetaData) {
+ if (!event.state().metaData().hasIndex(current.index())) {
+ delete(current);
+ indicesDeleted.add(current.index());
+ }
+ }
+ }
+ }
+ if (nodeEnv != null && nodeEnv.hasNodeFile()) {
+ if (currentMetaData != null) {
+ for (IndexMetaData current : currentMetaData) {
+ if (!event.state().metaData().hasIndex(current.index())) {
+ FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index())));
+ indicesDeleted.add(current.index());
+ }
+ }
+ }
+ }
+ currentMetaData = event.state().metaData();
+
+ for (String indexDeleted : indicesDeleted) {
+ try {
+ nodeIndexDeletedAction.nodeIndexStoreDeleted(event.state(), indexDeleted, event.state().nodes().localNodeId());
+ } catch (Exception e) {
+ logger.debug("[{}] failed to notify master on local index store deletion", e, indexDeleted);
+ }
+ }
+ }
+ });
+ }
+
+ protected abstract MetaData read() throws ElasticsearchException;
+
+ protected abstract void write(MetaData metaData) throws ElasticsearchException;
+
+ protected abstract void delete(IndexMetaData indexMetaData) throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/http/BindHttpException.java b/src/main/java/org/elasticsearch/http/BindHttpException.java
new file mode 100644
index 0000000..3fdad9c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/BindHttpException.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+/**
+ *
+ */
+public class BindHttpException extends HttpException {
+
+ public BindHttpException(String message) {
+ super(message);
+ }
+
+ public BindHttpException(String message, Throwable cause) {
+ super(message, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/http/HttpChannel.java b/src/main/java/org/elasticsearch/http/HttpChannel.java
new file mode 100644
index 0000000..569426f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpChannel.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+import org.elasticsearch.rest.RestChannel;
+
+/**
+ *
+ */
+public interface HttpChannel extends RestChannel {
+
+}
diff --git a/src/main/java/org/elasticsearch/http/HttpException.java b/src/main/java/org/elasticsearch/http/HttpException.java
new file mode 100644
index 0000000..7593afa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class HttpException extends ElasticsearchException {
+
+ public HttpException(String message) {
+ super(message);
+ }
+
+ public HttpException(String message, Throwable cause) {
+ super(message, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/http/HttpInfo.java b/src/main/java/org/elasticsearch/http/HttpInfo.java
new file mode 100644
index 0000000..da8bffd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpInfo.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class HttpInfo implements Streamable, Serializable, ToXContent {
+
+ private BoundTransportAddress address;
+ private long maxContentLength;
+
+ HttpInfo() {
+ }
+
+ public HttpInfo(BoundTransportAddress address, long maxContentLength) {
+ this.address = address;
+ this.maxContentLength = maxContentLength;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString HTTP = new XContentBuilderString("http");
+ static final XContentBuilderString BOUND_ADDRESS = new XContentBuilderString("bound_address");
+ static final XContentBuilderString PUBLISH_ADDRESS = new XContentBuilderString("publish_address");
+ static final XContentBuilderString MAX_CONTENT_LENGTH = new XContentBuilderString("max_content_length");
+ static final XContentBuilderString MAX_CONTENT_LENGTH_IN_BYTES = new XContentBuilderString("max_content_length_in_bytes");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.HTTP);
+ builder.field(Fields.BOUND_ADDRESS, address.boundAddress().toString());
+ builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString());
+ builder.byteSizeField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength);
+ builder.endObject();
+ return builder;
+ }
+
+ public static HttpInfo readHttpInfo(StreamInput in) throws IOException {
+ HttpInfo info = new HttpInfo();
+ info.readFrom(in);
+ return info;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ address = BoundTransportAddress.readBoundTransportAddress(in);
+ maxContentLength = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ address.writeTo(out);
+ out.writeLong(maxContentLength);
+ }
+
+ public BoundTransportAddress address() {
+ return address;
+ }
+
+ public BoundTransportAddress getAddress() {
+ return address();
+ }
+
+ public ByteSizeValue maxContentLength() {
+ return new ByteSizeValue(maxContentLength);
+ }
+
+ public ByteSizeValue getMaxContentLength() {
+ return maxContentLength();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/http/HttpRequest.java b/src/main/java/org/elasticsearch/http/HttpRequest.java
new file mode 100644
index 0000000..99456b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpRequest.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+import org.elasticsearch.rest.RestRequest;
+
+/**
+ *
+ */
+public abstract class HttpRequest extends RestRequest {
+
+}
diff --git a/src/main/java/org/elasticsearch/http/HttpResponse.java b/src/main/java/org/elasticsearch/http/HttpResponse.java
new file mode 100644
index 0000000..2b8dcdc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpResponse.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+import org.elasticsearch.rest.RestResponse;
+
+/**
+ *
+ */
+public interface HttpResponse extends RestResponse {
+
+}
diff --git a/src/main/java/org/elasticsearch/http/HttpServer.java b/src/main/java/org/elasticsearch/http/HttpServer.java
new file mode 100644
index 0000000..25527ae
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpServer.java
@@ -0,0 +1,247 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.rest.*;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.rest.RestStatus.*;
+
+/**
+ *
+ */
+public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
+
+ private final Environment environment;
+
+ private final HttpServerTransport transport;
+
+ private final RestController restController;
+
+ private final NodeService nodeService;
+
+ private final boolean disableSites;
+
+ private final PluginSiteFilter pluginSiteFilter = new PluginSiteFilter();
+
+ @Inject
+ public HttpServer(Settings settings, Environment environment, HttpServerTransport transport,
+ RestController restController,
+ NodeService nodeService) {
+ super(settings);
+ this.environment = environment;
+ this.transport = transport;
+ this.restController = restController;
+ this.nodeService = nodeService;
+ nodeService.setHttpServer(this);
+
+ this.disableSites = componentSettings.getAsBoolean("disable_sites", false);
+
+ transport.httpServerAdapter(new Dispatcher(this));
+ }
+
+ static class Dispatcher implements HttpServerAdapter {
+
+ private final HttpServer server;
+
+ Dispatcher(HttpServer server) {
+ this.server = server;
+ }
+
+ @Override
+ public void dispatchRequest(HttpRequest request, HttpChannel channel) {
+ server.internalDispatchRequest(request, channel);
+ }
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ transport.start();
+ if (logger.isInfoEnabled()) {
+ logger.info("{}", transport.boundAddress());
+ }
+ nodeService.putAttribute("http_address", transport.boundAddress().publishAddress().toString());
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ nodeService.removeAttribute("http_address");
+ transport.stop();
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ transport.close();
+ }
+
+ public HttpInfo info() {
+ return transport.info();
+ }
+
+ public HttpStats stats() {
+ return transport.stats();
+ }
+
+ public void internalDispatchRequest(final HttpRequest request, final HttpChannel channel) {
+ if (request.rawPath().startsWith("/_plugin/")) {
+ RestFilterChain filterChain = restController.filterChain(pluginSiteFilter);
+ filterChain.continueProcessing(request, channel);
+ return;
+ }
+ restController.dispatchRequest(request, channel);
+ }
+
+
+ class PluginSiteFilter extends RestFilter {
+
+ @Override
+ public void process(RestRequest request, RestChannel channel, RestFilterChain filterChain) {
+ handlePluginSite((HttpRequest) request, (HttpChannel) channel);
+ }
+ }
+
+ void handlePluginSite(HttpRequest request, HttpChannel channel) {
+ if (disableSites) {
+ channel.sendResponse(new StringRestResponse(FORBIDDEN));
+ return;
+ }
+ if (request.method() == RestRequest.Method.OPTIONS) {
+ // when we have OPTIONS request, simply send OK by default (with the Access Control Origin header which gets automatically added)
+ StringRestResponse response = new StringRestResponse(OK);
+ channel.sendResponse(response);
+ return;
+ }
+ if (request.method() != RestRequest.Method.GET) {
+ channel.sendResponse(new StringRestResponse(FORBIDDEN));
+ return;
+ }
+ // TODO for a "/_plugin" endpoint, we should have a page that lists all the plugins?
+
+ String path = request.rawPath().substring("/_plugin/".length());
+ int i1 = path.indexOf('/');
+ String pluginName;
+ String sitePath;
+ if (i1 == -1) {
+ pluginName = path;
+ sitePath = null;
+ // If a trailing / is missing, we redirect to the right page #2654
+ channel.sendResponse(new HttpRedirectRestResponse(request.rawPath() + "/"));
+ return;
+ } else {
+ pluginName = path.substring(0, i1);
+ sitePath = path.substring(i1 + 1);
+ }
+
+ if (sitePath.length() == 0) {
+ sitePath = "/index.html";
+ }
+
+ // Convert file separators.
+ sitePath = sitePath.replace('/', File.separatorChar);
+
+ // this is a plugin provided site, serve it as static files from the plugin location
+ File siteFile = new File(new File(environment.pluginsFile(), pluginName), "_site");
+ File file = new File(siteFile, sitePath);
+ if (!file.exists() || file.isHidden()) {
+ channel.sendResponse(new StringRestResponse(NOT_FOUND));
+ return;
+ }
+ if (!file.isFile()) {
+ // If it's not a dir, we send a 403
+ if (!file.isDirectory()) {
+ channel.sendResponse(new StringRestResponse(FORBIDDEN));
+ return;
+ }
+ // We don't serve dir but if index.html exists in dir we should serve it
+ file = new File(file, "index.html");
+ if (!file.exists() || file.isHidden() || !file.isFile()) {
+ channel.sendResponse(new StringRestResponse(FORBIDDEN));
+ return;
+ }
+ }
+ if (!file.getAbsolutePath().startsWith(siteFile.getAbsolutePath())) {
+ channel.sendResponse(new StringRestResponse(FORBIDDEN));
+ return;
+ }
+ try {
+ byte[] data = Streams.copyToByteArray(file);
+ channel.sendResponse(new BytesRestResponse(data, guessMimeType(sitePath)));
+ } catch (IOException e) {
+ channel.sendResponse(new StringRestResponse(INTERNAL_SERVER_ERROR));
+ }
+ }
+
+
+ // TODO: Don't respond with a mime type that violates the request's Accept header
+ private String guessMimeType(String path) {
+ int lastDot = path.lastIndexOf('.');
+ if (lastDot == -1) {
+ return "";
+ }
+ String extension = path.substring(lastDot + 1).toLowerCase(Locale.ROOT);
+ String mimeType = DEFAULT_MIME_TYPES.get(extension);
+ if (mimeType == null) {
+ return "";
+ }
+ return mimeType;
+ }
+
+ static {
+ // This is not an exhaustive list, just the most common types. Call registerMimeType() to add more.
+ Map<String, String> mimeTypes = new HashMap<String, String>();
+ mimeTypes.put("txt", "text/plain");
+ mimeTypes.put("css", "text/css");
+ mimeTypes.put("csv", "text/csv");
+ mimeTypes.put("htm", "text/html");
+ mimeTypes.put("html", "text/html");
+ mimeTypes.put("xml", "text/xml");
+ mimeTypes.put("js", "text/javascript"); // Technically it should be application/javascript (RFC 4329), but IE8 struggles with that
+ mimeTypes.put("xhtml", "application/xhtml+xml");
+ mimeTypes.put("json", "application/json");
+ mimeTypes.put("pdf", "application/pdf");
+ mimeTypes.put("zip", "application/zip");
+ mimeTypes.put("tar", "application/x-tar");
+ mimeTypes.put("gif", "image/gif");
+ mimeTypes.put("jpeg", "image/jpeg");
+ mimeTypes.put("jpg", "image/jpeg");
+ mimeTypes.put("tiff", "image/tiff");
+ mimeTypes.put("tif", "image/tiff");
+ mimeTypes.put("png", "image/png");
+ mimeTypes.put("svg", "image/svg+xml");
+ mimeTypes.put("ico", "image/vnd.microsoft.icon");
+ mimeTypes.put("mp3", "audio/mpeg");
+ DEFAULT_MIME_TYPES = ImmutableMap.copyOf(mimeTypes);
+ }
+
+ public static final Map<String, String> DEFAULT_MIME_TYPES;
+}
diff --git a/src/main/java/org/elasticsearch/http/HttpServerAdapter.java b/src/main/java/org/elasticsearch/http/HttpServerAdapter.java
new file mode 100644
index 0000000..a73456f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpServerAdapter.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+/**
+ *
+ */
+public interface HttpServerAdapter {
+
+ void dispatchRequest(HttpRequest request, HttpChannel channel);
+}
diff --git a/src/main/java/org/elasticsearch/http/HttpServerModule.java b/src/main/java/org/elasticsearch/http/HttpServerModule.java
new file mode 100644
index 0000000..9527a06
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpServerModule.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.Modules;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.netty.NettyHttpServerTransportModule;
+
+/**
+ *
+ */
+public class HttpServerModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ public HttpServerModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(Modules.createModule(settings.getAsClass("http.type", NettyHttpServerTransportModule.class, "org.elasticsearch.http.", "HttpServerTransportModule"), settings));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ protected void configure() {
+ bind(HttpServer.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/http/HttpServerTransport.java b/src/main/java/org/elasticsearch/http/HttpServerTransport.java
new file mode 100644
index 0000000..fa75f1d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpServerTransport.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+
+/**
+ *
+ */
+public interface HttpServerTransport extends LifecycleComponent<HttpServerTransport> {
+
+ BoundTransportAddress boundAddress();
+
+ HttpInfo info();
+
+ HttpStats stats();
+
+ void httpServerAdapter(HttpServerAdapter httpServerAdapter);
+}
diff --git a/src/main/java/org/elasticsearch/http/HttpStats.java b/src/main/java/org/elasticsearch/http/HttpStats.java
new file mode 100644
index 0000000..7cbe8a7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/HttpStats.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+public class HttpStats implements Streamable, ToXContent {
+
+ private long serverOpen;
+ private long totalOpen;
+
+ HttpStats() {
+
+ }
+
+ public HttpStats(long serverOpen, long totalOpen) {
+ this.serverOpen = serverOpen;
+ this.totalOpen = totalOpen;
+ }
+
+ public long getServerOpen() {
+ return this.serverOpen;
+ }
+
+ public long getTotalOpen() {
+ return this.totalOpen;
+ }
+
+ public static HttpStats readHttpStats(StreamInput in) throws IOException {
+ HttpStats stats = new HttpStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ serverOpen = in.readVLong();
+ totalOpen = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(serverOpen);
+ out.writeVLong(totalOpen);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString HTTP = new XContentBuilderString("http");
+ static final XContentBuilderString CURRENT_OPEN = new XContentBuilderString("current_open");
+ static final XContentBuilderString TOTAL_OPENED = new XContentBuilderString("total_opened");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.HTTP);
+ builder.field(Fields.CURRENT_OPEN, serverOpen);
+ builder.field(Fields.TOTAL_OPENED, totalOpen);
+ builder.endObject();
+ return builder;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java b/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java
new file mode 100644
index 0000000..516bf1b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/netty/HttpRequestHandler.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http.netty;
+
+import org.jboss.netty.channel.*;
+import org.jboss.netty.handler.codec.http.HttpRequest;
+
+
+/**
+ *
+ */
+@ChannelHandler.Sharable
+public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
+
+ private final NettyHttpServerTransport serverTransport;
+
+ public HttpRequestHandler(NettyHttpServerTransport serverTransport) {
+ this.serverTransport = serverTransport;
+ }
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
+ HttpRequest request = (HttpRequest) e.getMessage();
+ // the netty HTTP handling always copy over the buffer to its own buffer, either in NioWorker internally
+ // when reading, or using a cumalation buffer
+ serverTransport.dispatchRequest(new NettyHttpRequest(request, e.getChannel()), new NettyHttpChannel(serverTransport, e.getChannel(), request));
+ super.messageReceived(ctx, e);
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
+ serverTransport.exceptionCaught(ctx, e);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java b/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java
new file mode 100644
index 0000000..eac0760
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http.netty;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.http.HttpChannel;
+import org.elasticsearch.http.HttpException;
+import org.elasticsearch.rest.RestResponse;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.XContentRestResponse;
+import org.elasticsearch.rest.support.RestUtils;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+import org.jboss.netty.handler.codec.http.*;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public class NettyHttpChannel implements HttpChannel {
+ private final NettyHttpServerTransport transport;
+ private final Channel channel;
+ private final org.jboss.netty.handler.codec.http.HttpRequest request;
+
+ public NettyHttpChannel(NettyHttpServerTransport transport, Channel channel, org.jboss.netty.handler.codec.http.HttpRequest request) {
+ this.transport = transport;
+ this.channel = channel;
+ this.request = request;
+ }
+
+ @Override
+ public void sendResponse(RestResponse response) {
+
+ // Decide whether to close the connection or not.
+ boolean http10 = request.getProtocolVersion().equals(HttpVersion.HTTP_1_0);
+ boolean close =
+ HttpHeaders.Values.CLOSE.equalsIgnoreCase(request.headers().get(HttpHeaders.Names.CONNECTION)) ||
+ (http10 && !HttpHeaders.Values.KEEP_ALIVE.equalsIgnoreCase(request.headers().get(HttpHeaders.Names.CONNECTION)));
+
+ // Build the response object.
+ HttpResponseStatus status = getStatus(response.status());
+ org.jboss.netty.handler.codec.http.HttpResponse resp;
+ if (http10) {
+ resp = new DefaultHttpResponse(HttpVersion.HTTP_1_0, status);
+ if (!close) {
+ resp.headers().add(HttpHeaders.Names.CONNECTION, "Keep-Alive");
+ }
+ } else {
+ resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
+ }
+ if (RestUtils.isBrowser(request.headers().get(HttpHeaders.Names.USER_AGENT))) {
+ if (transport.settings().getAsBoolean("http.cors.enabled", true)) {
+ // Add support for cross-origin Ajax requests (CORS)
+ resp.headers().add("Access-Control-Allow-Origin", transport.settings().get("http.cors.allow-origin", "*"));
+ if (request.getMethod() == HttpMethod.OPTIONS) {
+ // Allow Ajax requests based on the CORS "preflight" request
+ resp.headers().add("Access-Control-Max-Age", transport.settings().getAsInt("http.cors.max-age", 1728000));
+ resp.headers().add("Access-Control-Allow-Methods", transport.settings().get("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE"));
+ resp.headers().add("Access-Control-Allow-Headers", transport.settings().get("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length"));
+ }
+ }
+ }
+
+ String opaque = request.headers().get("X-Opaque-Id");
+ if (opaque != null) {
+ resp.headers().add("X-Opaque-Id", opaque);
+ }
+
+ // Add all custom headers
+ Map<String, List<String>> customHeaders = response.getHeaders();
+ if (customHeaders != null) {
+ for (Map.Entry<String, List<String>> headerEntry : customHeaders.entrySet()) {
+ for (String headerValue : headerEntry.getValue()) {
+ resp.headers().add(headerEntry.getKey(), headerValue);
+ }
+ }
+ }
+
+ // Convert the response content to a ChannelBuffer.
+ ChannelBuffer buf;
+ try {
+ if (response instanceof XContentRestResponse) {
+ // if its a builder based response, and it was created with a CachedStreamOutput, we can release it
+ // after we write the response, and no need to do an extra copy because its not thread safe
+ XContentBuilder builder = ((XContentRestResponse) response).builder();
+ if (response.contentThreadSafe()) {
+ buf = builder.bytes().toChannelBuffer();
+ } else {
+ buf = builder.bytes().copyBytesArray().toChannelBuffer();
+ }
+ } else {
+ if (response.contentThreadSafe()) {
+ buf = ChannelBuffers.wrappedBuffer(response.content(), response.contentOffset(), response.contentLength());
+ } else {
+ buf = ChannelBuffers.copiedBuffer(response.content(), response.contentOffset(), response.contentLength());
+ }
+ }
+ } catch (IOException e) {
+ throw new HttpException("Failed to convert response to bytes", e);
+ }
+ if (response.prefixContent() != null || response.suffixContent() != null) {
+ ChannelBuffer prefixBuf = ChannelBuffers.EMPTY_BUFFER;
+ if (response.prefixContent() != null) {
+ prefixBuf = ChannelBuffers.copiedBuffer(response.prefixContent(), response.prefixContentOffset(), response.prefixContentLength());
+ }
+ ChannelBuffer suffixBuf = ChannelBuffers.EMPTY_BUFFER;
+ if (response.suffixContent() != null) {
+ suffixBuf = ChannelBuffers.copiedBuffer(response.suffixContent(), response.suffixContentOffset(), response.suffixContentLength());
+ }
+ buf = ChannelBuffers.wrappedBuffer(prefixBuf, buf, suffixBuf);
+ }
+ resp.setContent(buf);
+ resp.headers().add(HttpHeaders.Names.CONTENT_TYPE, response.contentType());
+
+ resp.headers().add(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(buf.readableBytes()));
+
+ if (transport.resetCookies) {
+ String cookieString = request.headers().get(HttpHeaders.Names.COOKIE);
+ if (cookieString != null) {
+ CookieDecoder cookieDecoder = new CookieDecoder();
+ Set<Cookie> cookies = cookieDecoder.decode(cookieString);
+ if (!cookies.isEmpty()) {
+ // Reset the cookies if necessary.
+ CookieEncoder cookieEncoder = new CookieEncoder(true);
+ for (Cookie cookie : cookies) {
+ cookieEncoder.addCookie(cookie);
+ }
+ resp.headers().add(HttpHeaders.Names.SET_COOKIE, cookieEncoder.encode());
+ }
+ }
+ }
+
+ // Write the response.
+ ChannelFuture future = channel.write(resp);
+ // Close the connection after the write operation is done if necessary.
+ if (close) {
+ future.addListener(ChannelFutureListener.CLOSE);
+ }
+ }
+
+ private HttpResponseStatus getStatus(RestStatus status) {
+ switch (status) {
+ case CONTINUE:
+ return HttpResponseStatus.CONTINUE;
+ case SWITCHING_PROTOCOLS:
+ return HttpResponseStatus.SWITCHING_PROTOCOLS;
+ case OK:
+ return HttpResponseStatus.OK;
+ case CREATED:
+ return HttpResponseStatus.CREATED;
+ case ACCEPTED:
+ return HttpResponseStatus.ACCEPTED;
+ case NON_AUTHORITATIVE_INFORMATION:
+ return HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION;
+ case NO_CONTENT:
+ return HttpResponseStatus.NO_CONTENT;
+ case RESET_CONTENT:
+ return HttpResponseStatus.RESET_CONTENT;
+ case PARTIAL_CONTENT:
+ return HttpResponseStatus.PARTIAL_CONTENT;
+ case MULTI_STATUS:
+ // no status for this??
+ return HttpResponseStatus.INTERNAL_SERVER_ERROR;
+ case MULTIPLE_CHOICES:
+ return HttpResponseStatus.MULTIPLE_CHOICES;
+ case MOVED_PERMANENTLY:
+ return HttpResponseStatus.MOVED_PERMANENTLY;
+ case FOUND:
+ return HttpResponseStatus.FOUND;
+ case SEE_OTHER:
+ return HttpResponseStatus.SEE_OTHER;
+ case NOT_MODIFIED:
+ return HttpResponseStatus.NOT_MODIFIED;
+ case USE_PROXY:
+ return HttpResponseStatus.USE_PROXY;
+ case TEMPORARY_REDIRECT:
+ return HttpResponseStatus.TEMPORARY_REDIRECT;
+ case BAD_REQUEST:
+ return HttpResponseStatus.BAD_REQUEST;
+ case UNAUTHORIZED:
+ return HttpResponseStatus.UNAUTHORIZED;
+ case PAYMENT_REQUIRED:
+ return HttpResponseStatus.PAYMENT_REQUIRED;
+ case FORBIDDEN:
+ return HttpResponseStatus.FORBIDDEN;
+ case NOT_FOUND:
+ return HttpResponseStatus.NOT_FOUND;
+ case METHOD_NOT_ALLOWED:
+ return HttpResponseStatus.METHOD_NOT_ALLOWED;
+ case NOT_ACCEPTABLE:
+ return HttpResponseStatus.NOT_ACCEPTABLE;
+ case PROXY_AUTHENTICATION:
+ return HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED;
+ case REQUEST_TIMEOUT:
+ return HttpResponseStatus.REQUEST_TIMEOUT;
+ case CONFLICT:
+ return HttpResponseStatus.CONFLICT;
+ case GONE:
+ return HttpResponseStatus.GONE;
+ case LENGTH_REQUIRED:
+ return HttpResponseStatus.LENGTH_REQUIRED;
+ case PRECONDITION_FAILED:
+ return HttpResponseStatus.PRECONDITION_FAILED;
+ case REQUEST_ENTITY_TOO_LARGE:
+ return HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE;
+ case REQUEST_URI_TOO_LONG:
+ return HttpResponseStatus.REQUEST_URI_TOO_LONG;
+ case UNSUPPORTED_MEDIA_TYPE:
+ return HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE;
+ case REQUESTED_RANGE_NOT_SATISFIED:
+ return HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE;
+ case EXPECTATION_FAILED:
+ return HttpResponseStatus.EXPECTATION_FAILED;
+ case UNPROCESSABLE_ENTITY:
+ return HttpResponseStatus.BAD_REQUEST;
+ case LOCKED:
+ return HttpResponseStatus.BAD_REQUEST;
+ case FAILED_DEPENDENCY:
+ return HttpResponseStatus.BAD_REQUEST;
+ case INTERNAL_SERVER_ERROR:
+ return HttpResponseStatus.INTERNAL_SERVER_ERROR;
+ case NOT_IMPLEMENTED:
+ return HttpResponseStatus.NOT_IMPLEMENTED;
+ case BAD_GATEWAY:
+ return HttpResponseStatus.BAD_GATEWAY;
+ case SERVICE_UNAVAILABLE:
+ return HttpResponseStatus.SERVICE_UNAVAILABLE;
+ case GATEWAY_TIMEOUT:
+ return HttpResponseStatus.GATEWAY_TIMEOUT;
+ case HTTP_VERSION_NOT_SUPPORTED:
+ return HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED;
+ default:
+ return HttpResponseStatus.INTERNAL_SERVER_ERROR;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java b/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java
new file mode 100644
index 0000000..682eb15
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http.netty;
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.ChannelBufferBytesReference;
+import org.elasticsearch.http.HttpRequest;
+import org.elasticsearch.rest.support.RestUtils;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+
+import java.net.SocketAddress;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ *
+ */
+public class NettyHttpRequest extends HttpRequest {
+
+ private final org.jboss.netty.handler.codec.http.HttpRequest request;
+ private final Channel channel;
+ private final Map<String, String> params;
+ private final String rawPath;
+ private final BytesReference content;
+
+ public NettyHttpRequest(org.jboss.netty.handler.codec.http.HttpRequest request, Channel channel) {
+ this.request = request;
+ this.channel = channel;
+ this.params = new HashMap<String, String>();
+ if (request.getContent().readable()) {
+ this.content = new ChannelBufferBytesReference(request.getContent());
+ } else {
+ this.content = BytesArray.EMPTY;
+ }
+
+ String uri = request.getUri();
+ int pathEndPos = uri.indexOf('?');
+ if (pathEndPos < 0) {
+ this.rawPath = uri;
+ } else {
+ this.rawPath = uri.substring(0, pathEndPos);
+ RestUtils.decodeQueryString(uri, pathEndPos + 1, params);
+ }
+ }
+
+ @Override
+ public Method method() {
+ HttpMethod httpMethod = request.getMethod();
+ if (httpMethod == HttpMethod.GET)
+ return Method.GET;
+
+ if (httpMethod == HttpMethod.POST)
+ return Method.POST;
+
+ if (httpMethod == HttpMethod.PUT)
+ return Method.PUT;
+
+ if (httpMethod == HttpMethod.DELETE)
+ return Method.DELETE;
+
+ if (httpMethod == HttpMethod.HEAD) {
+ return Method.HEAD;
+ }
+
+ if (httpMethod == HttpMethod.OPTIONS) {
+ return Method.OPTIONS;
+ }
+
+ return Method.GET;
+ }
+
+ @Override
+ public String uri() {
+ return request.getUri();
+ }
+
+ @Override
+ public String rawPath() {
+ return rawPath;
+ }
+
+ @Override
+ public Map<String, String> params() {
+ return params;
+ }
+
+ @Override
+ public boolean hasContent() {
+ return content.length() > 0;
+ }
+
+ @Override
+ public boolean contentUnsafe() {
+ // Netty http decoder always copies over the http content
+ return false;
+ }
+
+ @Override
+ public BytesReference content() {
+ return content;
+ }
+
+ /**
+ * Returns the remote address where this rest request channel is "connected to". The
+ * returned {@link SocketAddress} is supposed to be down-cast into more
+ * concrete type such as {@link java.net.InetSocketAddress} to retrieve
+ * the detailed information.
+ */
+ @Override
+ public SocketAddress getRemoteAddress() {
+ return channel.getRemoteAddress();
+ }
+
+ /**
+ * Returns the local address where this request channel is bound to. The returned
+ * {@link SocketAddress} is supposed to be down-cast into more concrete
+ * type such as {@link java.net.InetSocketAddress} to retrieve the detailed
+ * information.
+ */
+ @Override
+ public SocketAddress getLocalAddress() {
+ return channel.getLocalAddress();
+ }
+
+ @Override
+ public String header(String name) {
+ return request.headers().get(name);
+ }
+
+ @Override
+ public Iterable<Map.Entry<String, String>> headers() {
+ return request.headers().entries();
+ }
+
+ @Override
+ public boolean hasParam(String key) {
+ return params.containsKey(key);
+ }
+
+ @Override
+ public String param(String key) {
+ return params.get(key);
+ }
+
+ @Override
+ public String param(String key, String defaultValue) {
+ String value = params.get(key);
+ if (value == null) {
+ return defaultValue;
+ }
+ return value;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java
new file mode 100644
index 0000000..d589947
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java
@@ -0,0 +1,362 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http.netty;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.netty.NettyStaticSetup;
+import org.elasticsearch.common.netty.OpenChannelsHandler;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.NetworkExceptionHelper;
+import org.elasticsearch.common.transport.PortsRange;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.http.*;
+import org.elasticsearch.http.HttpRequest;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.transport.BindTransportException;
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.channel.*;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory;
+import org.jboss.netty.handler.codec.http.*;
+import org.jboss.netty.handler.timeout.ReadTimeoutException;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.common.network.NetworkService.TcpSettings.*;
+import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
+
+/**
+ *
+ */
+public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpServerTransport> implements HttpServerTransport {
+
+ static {
+ NettyStaticSetup.setup();
+ }
+
+ private final NetworkService networkService;
+
+ final ByteSizeValue maxContentLength;
+ final ByteSizeValue maxInitialLineLength;
+ final ByteSizeValue maxHeaderSize;
+ final ByteSizeValue maxChunkSize;
+
+ private final int workerCount;
+
+ private final boolean blockingServer;
+
+ final boolean compression;
+
+ private final int compressionLevel;
+
+ final boolean resetCookies;
+
+ private final String port;
+
+ private final String bindHost;
+
+ private final String publishHost;
+
+ private final Boolean tcpNoDelay;
+
+ private final Boolean tcpKeepAlive;
+
+ private final Boolean reuseAddress;
+
+ private final ByteSizeValue tcpSendBufferSize;
+ private final ByteSizeValue tcpReceiveBufferSize;
+ private final ReceiveBufferSizePredictorFactory receiveBufferSizePredictorFactory;
+
+ final ByteSizeValue maxCumulationBufferCapacity;
+ final int maxCompositeBufferComponents;
+
+ private volatile ServerBootstrap serverBootstrap;
+
+ private volatile BoundTransportAddress boundAddress;
+
+ private volatile Channel serverChannel;
+
+ OpenChannelsHandler serverOpenChannels;
+
+ private volatile HttpServerAdapter httpServerAdapter;
+
+ @Inject
+ public NettyHttpServerTransport(Settings settings, NetworkService networkService) {
+ super(settings);
+ this.networkService = networkService;
+
+ if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
+ System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
+ }
+
+ ByteSizeValue maxContentLength = componentSettings.getAsBytesSize("max_content_length", settings.getAsBytesSize("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB)));
+ this.maxChunkSize = componentSettings.getAsBytesSize("max_chunk_size", settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
+ this.maxHeaderSize = componentSettings.getAsBytesSize("max_header_size", settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
+ this.maxInitialLineLength = componentSettings.getAsBytesSize("max_initial_line_length", settings.getAsBytesSize("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB)));
+ // don't reset cookies by default, since I don't think we really need to
+ // note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
+ this.resetCookies = componentSettings.getAsBoolean("reset_cookies", settings.getAsBoolean("http.reset_cookies", false));
+ this.maxCumulationBufferCapacity = componentSettings.getAsBytesSize("max_cumulation_buffer_capacity", null);
+ this.maxCompositeBufferComponents = componentSettings.getAsInt("max_composite_buffer_components", -1);
+ this.workerCount = componentSettings.getAsInt("worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
+ this.blockingServer = settings.getAsBoolean("http.blocking_server", settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));
+ this.port = componentSettings.get("port", settings.get("http.port", "9200-9300"));
+ this.bindHost = componentSettings.get("bind_host", settings.get("http.bind_host", settings.get("http.host")));
+ this.publishHost = componentSettings.get("publish_host", settings.get("http.publish_host", settings.get("http.host")));
+ this.tcpNoDelay = componentSettings.getAsBoolean("tcp_no_delay", settings.getAsBoolean(TCP_NO_DELAY, true));
+ this.tcpKeepAlive = componentSettings.getAsBoolean("tcp_keep_alive", settings.getAsBoolean(TCP_KEEP_ALIVE, true));
+ this.reuseAddress = componentSettings.getAsBoolean("reuse_address", settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
+ this.tcpSendBufferSize = componentSettings.getAsBytesSize("tcp_send_buffer_size", settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
+ this.tcpReceiveBufferSize = componentSettings.getAsBytesSize("tcp_receive_buffer_size", settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
+
+ long defaultReceiverPredictor = 512 * 1024;
+ if (JvmInfo.jvmInfo().mem().directMemoryMax().bytes() > 0) {
+ // we can guess a better default...
+ long l = (long) ((0.3 * JvmInfo.jvmInfo().mem().directMemoryMax().bytes()) / workerCount);
+ defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
+ }
+
+ // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
+ ByteSizeValue receivePredictorMin = componentSettings.getAsBytesSize("receive_predictor_min", componentSettings.getAsBytesSize("receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
+ ByteSizeValue receivePredictorMax = componentSettings.getAsBytesSize("receive_predictor_max", componentSettings.getAsBytesSize("receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
+ if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
+ receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
+ } else {
+ receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
+ }
+
+ this.compression = settings.getAsBoolean("http.compression", false);
+ this.compressionLevel = settings.getAsInt("http.compression_level", 6);
+
+ // validate max content length
+ if (maxContentLength.bytes() > Integer.MAX_VALUE) {
+ logger.warn("maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
+ maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
+ }
+ this.maxContentLength = maxContentLength;
+
+ logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], receive_predictor[{}->{}]",
+ maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictorMin, receivePredictorMax);
+ }
+
+ public Settings settings() {
+ return this.settings;
+ }
+
+ public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {
+ this.httpServerAdapter = httpServerAdapter;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ this.serverOpenChannels = new OpenChannelsHandler(logger);
+
+ if (blockingServer) {
+ serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory(
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_boss")),
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_worker"))
+ ));
+ } else {
+ serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_boss")),
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_worker")),
+ workerCount));
+ }
+
+ serverBootstrap.setPipelineFactory(new MyChannelPipelineFactory(this));
+
+ if (tcpNoDelay != null) {
+ serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay);
+ }
+ if (tcpKeepAlive != null) {
+ serverBootstrap.setOption("child.keepAlive", tcpKeepAlive);
+ }
+ if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
+ serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes());
+ }
+ if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
+ serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes());
+ }
+ serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
+ serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
+ if (reuseAddress != null) {
+ serverBootstrap.setOption("reuseAddress", reuseAddress);
+ serverBootstrap.setOption("child.reuseAddress", reuseAddress);
+ }
+
+ // Bind and start to accept incoming connections.
+ InetAddress hostAddressX;
+ try {
+ hostAddressX = networkService.resolveBindHostAddress(bindHost);
+ } catch (IOException e) {
+ throw new BindHttpException("Failed to resolve host [" + bindHost + "]", e);
+ }
+ final InetAddress hostAddress = hostAddressX;
+
+ PortsRange portsRange = new PortsRange(port);
+ final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
+ boolean success = portsRange.iterate(new PortsRange.PortCallback() {
+ @Override
+ public boolean onPortNumber(int portNumber) {
+ try {
+ serverChannel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
+ } catch (Exception e) {
+ lastException.set(e);
+ return false;
+ }
+ return true;
+ }
+ });
+ if (!success) {
+ throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get());
+ }
+
+ InetSocketAddress boundAddress = (InetSocketAddress) serverChannel.getLocalAddress();
+ InetSocketAddress publishAddress;
+ try {
+ publishAddress = new InetSocketAddress(networkService.resolvePublishHostAddress(publishHost), boundAddress.getPort());
+ } catch (Exception e) {
+ throw new BindTransportException("Failed to resolve publish address", e);
+ }
+ this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress));
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ if (serverChannel != null) {
+ serverChannel.close().awaitUninterruptibly();
+ serverChannel = null;
+ }
+
+ if (serverOpenChannels != null) {
+ serverOpenChannels.close();
+ serverOpenChannels = null;
+ }
+
+ if (serverBootstrap != null) {
+ serverBootstrap.releaseExternalResources();
+ serverBootstrap = null;
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ public BoundTransportAddress boundAddress() {
+ return this.boundAddress;
+ }
+
+ @Override
+ public HttpInfo info() {
+ return new HttpInfo(boundAddress(), maxContentLength.bytes());
+ }
+
+ @Override
+ public HttpStats stats() {
+ OpenChannelsHandler channels = serverOpenChannels;
+ return new HttpStats(channels == null ? 0 : channels.numberOfOpenChannels(), channels == null ? 0 : channels.totalChannels());
+ }
+
+ void dispatchRequest(HttpRequest request, HttpChannel channel) {
+ httpServerAdapter.dispatchRequest(request, channel);
+ }
+
+ void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
+ if (e.getCause() instanceof ReadTimeoutException) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Connection timeout [{}]", ctx.getChannel().getRemoteAddress());
+ }
+ ctx.getChannel().close();
+ } else {
+ if (!lifecycle.started()) {
+ // ignore
+ return;
+ }
+ if (!NetworkExceptionHelper.isCloseConnectionException(e.getCause())) {
+ logger.warn("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel());
+ ctx.getChannel().close();
+ } else {
+ logger.debug("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel());
+ ctx.getChannel().close();
+ }
+ }
+ }
+
+ static class MyChannelPipelineFactory implements ChannelPipelineFactory {
+
+ private final NettyHttpServerTransport transport;
+
+ private final HttpRequestHandler requestHandler;
+
+ MyChannelPipelineFactory(NettyHttpServerTransport transport) {
+ this.transport = transport;
+ this.requestHandler = new HttpRequestHandler(transport);
+ }
+
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ ChannelPipeline pipeline = Channels.pipeline();
+ pipeline.addLast("openChannels", transport.serverOpenChannels);
+ HttpRequestDecoder requestDecoder = new HttpRequestDecoder(
+ (int) transport.maxInitialLineLength.bytes(),
+ (int) transport.maxHeaderSize.bytes(),
+ (int) transport.maxChunkSize.bytes()
+ );
+ if (transport.maxCumulationBufferCapacity != null) {
+ if (transport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
+ requestDecoder.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
+ } else {
+ requestDecoder.setMaxCumulationBufferCapacity((int) transport.maxCumulationBufferCapacity.bytes());
+ }
+ }
+ if (transport.maxCompositeBufferComponents != -1) {
+ requestDecoder.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
+ }
+ pipeline.addLast("decoder", requestDecoder);
+ if (transport.compression) {
+ pipeline.addLast("decoder_compress", new HttpContentDecompressor());
+ }
+ HttpChunkAggregator httpChunkAggregator = new HttpChunkAggregator((int) transport.maxContentLength.bytes());
+ if (transport.maxCompositeBufferComponents != -1) {
+ httpChunkAggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
+ }
+ pipeline.addLast("aggregator", httpChunkAggregator);
+ pipeline.addLast("encoder", new HttpResponseEncoder());
+ if (transport.compression) {
+ pipeline.addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel));
+ }
+ pipeline.addLast("handler", requestHandler);
+ return pipeline;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransportModule.java b/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransportModule.java
new file mode 100644
index 0000000..40ea150
--- /dev/null
+++ b/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransportModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.http.netty;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.http.HttpServerTransport;
+
+/**
+ *
+ */
+public class NettyHttpServerTransportModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(HttpServerTransport.class).to(NettyHttpServerTransport.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java
new file mode 100644
index 0000000..45f2333
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public abstract class AbstractIndexComponent implements IndexComponent {
+
+ protected final ESLogger logger;
+
+ protected final Index index;
+
+ protected final Settings indexSettings;
+
+ protected final Settings componentSettings;
+
+ /**
+ * Constructs a new index component, with the index name and its settings.
+ *
+ * @param index The index name
+ * @param indexSettings The index settings
+ */
+ protected AbstractIndexComponent(Index index, @IndexSettings Settings indexSettings) {
+ this.index = index;
+ this.indexSettings = indexSettings;
+ this.componentSettings = indexSettings.getComponentSettings(getClass());
+
+ this.logger = Loggers.getLogger(getClass(), indexSettings, index);
+ }
+
+ /**
+ * Constructs a new index component, with the index name and its settings, as well as settings prefix.
+ *
+ * @param index The index name
+ * @param indexSettings The index settings
+ * @param prefixSettings A settings prefix (like "com.mycompany") to simplify extracting the component settings
+ */
+ protected AbstractIndexComponent(Index index, @IndexSettings Settings indexSettings, String prefixSettings) {
+ this.index = index;
+ this.indexSettings = indexSettings;
+ this.componentSettings = indexSettings.getComponentSettings(prefixSettings, getClass());
+
+ this.logger = Loggers.getLogger(getClass(), indexSettings, index);
+ }
+
+ @Override
+ public Index index() {
+ return this.index;
+ }
+
+ public String nodeName() {
+ return indexSettings.get("name", "");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/AlreadyExpiredException.java b/src/main/java/org/elasticsearch/index/AlreadyExpiredException.java
new file mode 100644
index 0000000..9d1bff2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/AlreadyExpiredException.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException;
+
+public class AlreadyExpiredException extends ElasticsearchException implements IgnoreOnRecoveryEngineException {
+ private String index;
+ private String type;
+ private String id;
+ private final long timestamp;
+ private final long ttl;
+ private final long now;
+
+ public AlreadyExpiredException(String index, String type, String id, long timestamp, long ttl, long now) {
+ super("already expired [" + index + "]/[" + type + "]/[" + id + "] due to expire at [" + (timestamp + ttl) + "] and was processed at [" + now + "]");
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ this.timestamp = timestamp;
+ this.ttl = ttl;
+ this.now = now;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public String type() {
+ return type;
+ }
+
+ public String id() {
+ return id;
+ }
+
+ public long timestamp() {
+ return timestamp;
+ }
+
+ public long ttl() {
+ return ttl;
+ }
+
+ public long now() {
+ return now;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/CloseableIndexComponent.java b/src/main/java/org/elasticsearch/index/CloseableIndexComponent.java
new file mode 100644
index 0000000..422f667
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/CloseableIndexComponent.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public interface CloseableIndexComponent {
+
+ /**
+ * Closes the index component. A boolean indicating if its part of an actual index
+ * deletion or not is passed.
+ */
+ void close() throws ElasticsearchException;
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/Index.java b/src/main/java/org/elasticsearch/index/Index.java
new file mode 100644
index 0000000..1237050
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/Index.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class Index implements Serializable, Streamable {
+
+ private String name;
+
+ private Index() {
+
+ }
+
+ public Index(String name) {
+ this.name = name.intern();
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public String getName() {
+ return name();
+ }
+
+ @Override
+ public String toString() {
+ return "[" + name + "]";
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null) return false;
+ Index index1 = (Index) o;
+ return name.equals(index1.name);
+ }
+
+ @Override
+ public int hashCode() {
+ return name.hashCode();
+ }
+
+ public static Index readIndexName(StreamInput in) throws IOException {
+ Index index = new Index();
+ index.readFrom(in);
+ return index;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString().intern();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/IndexComponent.java b/src/main/java/org/elasticsearch/index/IndexComponent.java
new file mode 100644
index 0000000..c2959c9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/IndexComponent.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+/**
+ *
+ */
+public interface IndexComponent {
+
+ Index index();
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/IndexException.java b/src/main/java/org/elasticsearch/index/IndexException.java
new file mode 100644
index 0000000..a3d18cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/IndexException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class IndexException extends ElasticsearchException {
+
+ private final Index index;
+
+ public IndexException(Index index, String msg) {
+ this(index, msg, null);
+ }
+
+ public IndexException(Index index, String msg, Throwable cause) {
+ this(index, true, msg, cause);
+ }
+
+ protected IndexException(Index index, boolean withSpace, String msg, Throwable cause) {
+ super("[" + (index == null ? "_na" : index.name()) + "]" + (withSpace ? " " : "") + msg, cause);
+ this.index = index;
+ }
+
+ public Index index() {
+ return index;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/IndexModule.java b/src/main/java/org/elasticsearch/index/IndexModule.java
new file mode 100644
index 0000000..030a38a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/IndexModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.service.InternalIndexService;
+
+/**
+ *
+ */
+public class IndexModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public IndexModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(IndexService.class).to(InternalIndexService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/IndexNameModule.java b/src/main/java/org/elasticsearch/index/IndexNameModule.java
new file mode 100644
index 0000000..0a2ee1c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/IndexNameModule.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class IndexNameModule extends AbstractModule {
+
+ private final Index index;
+
+ public IndexNameModule(Index index) {
+ this.index = index;
+ }
+
+ @Override
+ protected void configure() {
+ bind(Index.class).toInstance(index);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java b/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java
new file mode 100644
index 0000000..54f700e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class IndexShardAlreadyExistsException extends ElasticsearchException {
+
+ public IndexShardAlreadyExistsException(String message) {
+ super(message);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/IndexShardMissingException.java b/src/main/java/org/elasticsearch/index/IndexShardMissingException.java
new file mode 100644
index 0000000..c59dcbb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/IndexShardMissingException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class IndexShardMissingException extends IndexShardException {
+
+ public IndexShardMissingException(ShardId shardId) {
+ super(shardId, "missing");
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/LocalNodeId.java b/src/main/java/org/elasticsearch/index/LocalNodeId.java
new file mode 100644
index 0000000..a045636
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/LocalNodeId.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.common.inject.BindingAnnotation;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.ElementType.PARAMETER;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ *
+ */
+@BindingAnnotation
+@Target({FIELD, PARAMETER})
+@Retention(RUNTIME)
+@Documented
+public @interface LocalNodeId {
+}
diff --git a/src/main/java/org/elasticsearch/index/LocalNodeIdModule.java b/src/main/java/org/elasticsearch/index/LocalNodeIdModule.java
new file mode 100644
index 0000000..82e36cd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/LocalNodeIdModule.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class LocalNodeIdModule extends AbstractModule {
+
+ private final String localNodeId;
+
+ public LocalNodeIdModule(String localNodeId) {
+ this.localNodeId = localNodeId;
+ }
+
+ @Override
+ protected void configure() {
+ bind(String.class).annotatedWith(LocalNodeId.class).toInstance(localNodeId);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/VersionType.java b/src/main/java/org/elasticsearch/index/VersionType.java
new file mode 100644
index 0000000..80eff25
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/VersionType.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.lucene.uid.Versions;
+
+/**
+ *
+ */
+public enum VersionType {
+ INTERNAL((byte) 0) {
+ /**
+ * - always returns false if currentVersion == {@link Versions#NOT_SET}
+ * - always accepts expectedVersion == {@link Versions#MATCH_ANY}
+ * - if expectedVersion is set, always conflict if currentVersion == {@link Versions#NOT_FOUND}
+ */
+ @Override
+ public boolean isVersionConflict(long currentVersion, long expectedVersion) {
+ return currentVersion != Versions.NOT_SET && expectedVersion != Versions.MATCH_ANY
+ && (currentVersion == Versions.NOT_FOUND || currentVersion != expectedVersion);
+ }
+
+ @Override
+ public long updateVersion(long currentVersion, long expectedVersion) {
+ return (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
+ }
+
+ },
+ EXTERNAL((byte) 1) {
+ /**
+ * - always returns false if currentVersion == {@link Versions#NOT_SET}
+ * - always conflict if expectedVersion == {@link Versions#MATCH_ANY} (we need something to set)
+ * - accepts currentVersion == {@link Versions#NOT_FOUND}
+ */
+ @Override
+ public boolean isVersionConflict(long currentVersion, long expectedVersion) {
+ return currentVersion != Versions.NOT_SET && currentVersion != Versions.NOT_FOUND
+ && (expectedVersion == Versions.MATCH_ANY || currentVersion >= expectedVersion);
+ }
+
+ @Override
+ public long updateVersion(long currentVersion, long expectedVersion) {
+ return expectedVersion;
+ }
+ };
+
+ private final byte value;
+
+ VersionType(byte value) {
+ this.value = value;
+ }
+
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Checks whether the current version conflicts with the expected version, based on the current version type.
+ *
+ * @return true if versions conflict false o.w.
+ */
+ public abstract boolean isVersionConflict(long currentVersion, long expectedVersion);
+
+ /**
+ * Returns the new version for a document, based on its current one and the specified in the request
+ *
+ * @return new version
+ */
+ public abstract long updateVersion(long currentVersion, long expectedVersion);
+
+ public static VersionType fromString(String versionType) {
+ if ("internal".equals(versionType)) {
+ return INTERNAL;
+ } else if ("external".equals(versionType)) {
+ return EXTERNAL;
+ }
+ throw new ElasticsearchIllegalArgumentException("No version type match [" + versionType + "]");
+ }
+
+ public static VersionType fromString(String versionType, VersionType defaultVersionType) {
+ if (versionType == null) {
+ return defaultVersionType;
+ }
+ if ("internal".equals(versionType)) {
+ return INTERNAL;
+ } else if ("external".equals(versionType)) {
+ return EXTERNAL;
+ }
+ throw new ElasticsearchIllegalArgumentException("No version type match [" + versionType + "]");
+ }
+
+ public static VersionType fromValue(byte value) {
+ if (value == 0) {
+ return INTERNAL;
+ } else if (value == 1) {
+ return EXTERNAL;
+ }
+ throw new ElasticsearchIllegalArgumentException("No version type match [" + value + "]");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/aliases/IndexAlias.java b/src/main/java/org/elasticsearch/index/aliases/IndexAlias.java
new file mode 100644
index 0000000..01b1a01
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/aliases/IndexAlias.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.aliases;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.compress.CompressedString;
+
+/**
+ *
+ */
+public class IndexAlias {
+
+ private String alias;
+
+ private CompressedString filter;
+
+ private Filter parsedFilter;
+
+ public IndexAlias(String alias, @Nullable CompressedString filter, @Nullable Filter parsedFilter) {
+ this.alias = alias;
+ this.filter = filter;
+ this.parsedFilter = parsedFilter;
+ }
+
+ public String alias() {
+ return alias;
+ }
+
+ @Nullable
+ public CompressedString filter() {
+ return filter;
+ }
+
+ @Nullable
+ public Filter parsedFilter() {
+ return parsedFilter;
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java
new file mode 100644
index 0000000..4a63c29
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.aliases;
+
+import org.apache.lucene.queries.FilterClause;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.AliasFilterParsingException;
+import org.elasticsearch.indices.InvalidAliasNameException;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ *
+ */
+public class IndexAliasesService extends AbstractIndexComponent implements Iterable<IndexAlias> {
+
+ private final IndexQueryParserService indexQueryParser;
+ private final Map<String, IndexAlias> aliases = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
+
+ @Inject
+ public IndexAliasesService(Index index, @IndexSettings Settings indexSettings, IndexQueryParserService indexQueryParser) {
+ super(index, indexSettings);
+ this.indexQueryParser = indexQueryParser;
+ }
+
+ public boolean hasAlias(String alias) {
+ return aliases.containsKey(alias);
+ }
+
+ public IndexAlias alias(String alias) {
+ return aliases.get(alias);
+ }
+
+ public IndexAlias create(String alias, @Nullable CompressedString filter) {
+ return new IndexAlias(alias, filter, parse(alias, filter));
+ }
+
+ public void add(String alias, @Nullable CompressedString filter) {
+ add(new IndexAlias(alias, filter, parse(alias, filter)));
+ }
+
+ public void addAll(Map<String, IndexAlias> aliases) {
+ this.aliases.putAll(aliases);
+ }
+
+ /**
+ * Returns the filter associated with listed filtering aliases.
+ * <p/>
+ * <p>The list of filtering aliases should be obtained by calling MetaData.filteringAliases.
+ * Returns <tt>null</tt> if no filtering is required.</p>
+ */
+ public Filter aliasFilter(String... aliases) {
+ if (aliases == null || aliases.length == 0) {
+ return null;
+ }
+ if (aliases.length == 1) {
+ IndexAlias indexAlias = alias(aliases[0]);
+ if (indexAlias == null) {
+ // This shouldn't happen unless alias disappeared after filteringAliases was called.
+ throw new InvalidAliasNameException(index, aliases[0], "Unknown alias name was passed to alias Filter");
+ }
+ return indexAlias.parsedFilter();
+ } else {
+ // we need to bench here a bit, to see maybe it makes sense to use OrFilter
+ XBooleanFilter combined = new XBooleanFilter();
+ for (String alias : aliases) {
+ IndexAlias indexAlias = alias(alias);
+ if (indexAlias == null) {
+ // This shouldn't happen unless alias disappeared after filteringAliases was called.
+ throw new InvalidAliasNameException(index, aliases[0], "Unknown alias name was passed to alias Filter");
+ }
+ if (indexAlias.parsedFilter() != null) {
+ combined.add(new FilterClause(indexAlias.parsedFilter(), BooleanClause.Occur.SHOULD));
+ } else {
+ // The filter might be null only if filter was removed after filteringAliases was called
+ return null;
+ }
+ }
+ if (combined.clauses().size() == 0) {
+ return null;
+ }
+ if (combined.clauses().size() == 1) {
+ return combined.clauses().get(0).getFilter();
+ }
+ return combined;
+ }
+ }
+
+ private void add(IndexAlias indexAlias) {
+ aliases.put(indexAlias.alias(), indexAlias);
+ }
+
+ public void remove(String alias) {
+ aliases.remove(alias);
+ }
+
+ private Filter parse(String alias, CompressedString filter) {
+ if (filter == null) {
+ return null;
+ }
+ try {
+ byte[] filterSource = filter.uncompressed();
+ XContentParser parser = XContentFactory.xContent(filterSource).createParser(filterSource);
+ try {
+ ParsedFilter parsedFilter = indexQueryParser.parseInnerFilter(parser);
+ return parsedFilter == null ? null : parsedFilter.filter();
+ } finally {
+ parser.close();
+ }
+ } catch (IOException ex) {
+ throw new AliasFilterParsingException(index, alias, "Invalid alias filter", ex);
+ }
+ }
+
+ @Override
+ public Iterator<IndexAlias> iterator() {
+ return aliases.values().iterator();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/aliases/IndexAliasesServiceModule.java b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesServiceModule.java
new file mode 100644
index 0000000..1bb9a58
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesServiceModule.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.aliases;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class IndexAliasesServiceModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexAliasesService.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactory.java
new file mode 100644
index 0000000..455ba0f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ASCIIFoldingTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public ASCIIFoldingTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new ASCIIFoldingFilter(tokenStream);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/AbstractCharFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/AbstractCharFilterFactory.java
new file mode 100644
index 0000000..29ff682
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AbstractCharFilterFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public abstract class AbstractCharFilterFactory extends AbstractIndexComponent implements CharFilterFactory {
+
+ private final String name;
+
+ public AbstractCharFilterFactory(Index index, @IndexSettings Settings indexSettings, String name) {
+ super(index, indexSettings);
+ this.name = name;
+ }
+
+ @Override
+ public String name() {
+ return this.name;
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/index/analysis/AbstractIndexAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/AbstractIndexAnalyzerProvider.java
new file mode 100644
index 0000000..b45f0e2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AbstractIndexAnalyzerProvider.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public abstract class AbstractIndexAnalyzerProvider<T extends Analyzer> extends AbstractIndexComponent implements AnalyzerProvider<T> {
+
+ private final String name;
+
+ protected final Version version;
+
+ /**
+ * Constructs a new analyzer component, with the index name and its settings and the analyzer name.
+ *
+ * @param index The index name
+ * @param indexSettings The index settings
+ * @param name The analyzer name
+ */
+ public AbstractIndexAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, String name, Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.version = Analysis.parseAnalysisVersion(indexSettings, settings, logger);
+ }
+
+ /**
+ * Constructs a new analyzer component, with the index name and its settings and the analyzer name.
+ *
+ * @param index The index name
+ * @param indexSettings The index settings
+ * @param prefixSettings A settings prefix (like "com.mycompany") to simplify extracting the component settings
+ * @param name The analyzer name
+ */
+ public AbstractIndexAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, String prefixSettings, String name, Settings settings) {
+ super(index, indexSettings, prefixSettings);
+ this.name = name;
+ this.version = Analysis.parseAnalysisVersion(indexSettings, settings, logger);
+ }
+
+ /**
+ * Returns the injected name of the analyzer.
+ */
+ @Override
+ public final String name() {
+ return this.name;
+ }
+
+ @Override
+ public final AnalyzerScope scope() {
+ return AnalyzerScope.INDEX;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java
new file mode 100644
index 0000000..f4efeda
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public abstract class AbstractTokenFilterFactory extends AbstractIndexComponent implements TokenFilterFactory {
+
+ private final String name;
+
+ protected final Version version;
+
+ public AbstractTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, String name, Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.version = Analysis.parseAnalysisVersion(indexSettings, settings, logger);
+ }
+
+ @Override
+ public String name() {
+ return this.name;
+ }
+
+ public final Version version() {
+ return version;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java
new file mode 100644
index 0000000..94c80bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public abstract class AbstractTokenizerFactory extends AbstractIndexComponent implements TokenizerFactory {
+
+ private final String name;
+
+ protected final Version version;
+
+
+ public AbstractTokenizerFactory(Index index, @IndexSettings Settings indexSettings, String name, Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.version = Analysis.parseAnalysisVersion(indexSettings, settings, logger);
+ }
+
+ @Override
+ public String name() {
+ return this.name;
+ }
+
+ public final Version version() {
+ return version;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/src/main/java/org/elasticsearch/index/analysis/Analysis.java
new file mode 100644
index 0000000..eef0d7d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/Analysis.java
@@ -0,0 +1,310 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.ar.ArabicAnalyzer;
+import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
+import org.apache.lucene.analysis.br.BrazilianAnalyzer;
+import org.apache.lucene.analysis.ca.CatalanAnalyzer;
+import org.apache.lucene.analysis.cz.CzechAnalyzer;
+import org.apache.lucene.analysis.da.DanishAnalyzer;
+import org.apache.lucene.analysis.de.GermanAnalyzer;
+import org.apache.lucene.analysis.el.GreekAnalyzer;
+import org.apache.lucene.analysis.en.EnglishAnalyzer;
+import org.apache.lucene.analysis.es.SpanishAnalyzer;
+import org.apache.lucene.analysis.eu.BasqueAnalyzer;
+import org.apache.lucene.analysis.fa.PersianAnalyzer;
+import org.apache.lucene.analysis.fi.FinnishAnalyzer;
+import org.apache.lucene.analysis.fr.FrenchAnalyzer;
+import org.apache.lucene.analysis.gl.GalicianAnalyzer;
+import org.apache.lucene.analysis.hi.HindiAnalyzer;
+import org.apache.lucene.analysis.hu.HungarianAnalyzer;
+import org.apache.lucene.analysis.hy.ArmenianAnalyzer;
+import org.apache.lucene.analysis.id.IndonesianAnalyzer;
+import org.apache.lucene.analysis.it.ItalianAnalyzer;
+import org.apache.lucene.analysis.nl.DutchAnalyzer;
+import org.apache.lucene.analysis.no.NorwegianAnalyzer;
+import org.apache.lucene.analysis.pt.PortugueseAnalyzer;
+import org.apache.lucene.analysis.ro.RomanianAnalyzer;
+import org.apache.lucene.analysis.ru.RussianAnalyzer;
+import org.apache.lucene.analysis.sv.SwedishAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tr.TurkishAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.net.URL;
+import java.util.*;
+
+/**
+ *
+ */
+public class Analysis {
+
+ public static Version parseAnalysisVersion(@IndexSettings Settings indexSettings, Settings settings, ESLogger logger) {
+ // check for explicit version on the specific analyzer component
+ String sVersion = settings.get("version");
+ if (sVersion != null) {
+ return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
+ }
+ // check for explicit version on the index itself as default for all analysis components
+ sVersion = indexSettings.get("index.analysis.version");
+ if (sVersion != null) {
+ return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
+ }
+ // resolve the analysis version based on the version the index was created with
+ return indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).luceneVersion;
+ }
+
+ public static boolean isNoStopwords(Settings settings) {
+ String value = settings.get("stopwords");
+ return value != null && "_none_".equals(value);
+ }
+
+ public static CharArraySet parseStemExclusion(Settings settings, CharArraySet defaultStemExclusion, Version version) {
+ String value = settings.get("stem_exclusion");
+ if (value != null) {
+ if ("_none_".equals(value)) {
+ return CharArraySet.EMPTY_SET;
+ } else {
+ // LUCENE 4 UPGRADE: Should be settings.getAsBoolean("stem_exclusion_case", false)?
+ return new CharArraySet(version, Strings.commaDelimitedListToSet(value), false);
+ }
+ }
+ String[] stopWords = settings.getAsArray("stem_exclusion", null);
+ if (stopWords != null) {
+ // LUCENE 4 UPGRADE: Should be settings.getAsBoolean("stem_exclusion_case", false)?
+ return new CharArraySet(version, ImmutableList.of(stopWords), false);
+ } else {
+ return defaultStemExclusion;
+ }
+ }
+
+ public static final ImmutableMap<String, Set<?>> namedStopWords = MapBuilder.<String, Set<?>>newMapBuilder()
+ .put("_arabic_", ArabicAnalyzer.getDefaultStopSet())
+ .put("_armenian_", ArmenianAnalyzer.getDefaultStopSet())
+ .put("_basque_", BasqueAnalyzer.getDefaultStopSet())
+ .put("_brazilian_", BrazilianAnalyzer.getDefaultStopSet())
+ .put("_bulgarian_", BulgarianAnalyzer.getDefaultStopSet())
+ .put("_catalan_", CatalanAnalyzer.getDefaultStopSet())
+ .put("_czech_", CzechAnalyzer.getDefaultStopSet())
+ .put("_danish_", DanishAnalyzer.getDefaultStopSet())
+ .put("_dutch_", DutchAnalyzer.getDefaultStopSet())
+ .put("_english_", EnglishAnalyzer.getDefaultStopSet())
+ .put("_finnish_", FinnishAnalyzer.getDefaultStopSet())
+ .put("_french_", FrenchAnalyzer.getDefaultStopSet())
+ .put("_galician_", GalicianAnalyzer.getDefaultStopSet())
+ .put("_german_", GermanAnalyzer.getDefaultStopSet())
+ .put("_greek_", GreekAnalyzer.getDefaultStopSet())
+ .put("_hindi_", HindiAnalyzer.getDefaultStopSet())
+ .put("_hungarian_", HungarianAnalyzer.getDefaultStopSet())
+ .put("_indonesian_", IndonesianAnalyzer.getDefaultStopSet())
+ .put("_italian_", ItalianAnalyzer.getDefaultStopSet())
+ .put("_norwegian_", NorwegianAnalyzer.getDefaultStopSet())
+ .put("_persian_", PersianAnalyzer.getDefaultStopSet())
+ .put("_portuguese_", PortugueseAnalyzer.getDefaultStopSet())
+ .put("_romanian_", RomanianAnalyzer.getDefaultStopSet())
+ .put("_russian_", RussianAnalyzer.getDefaultStopSet())
+ .put("_spanish_", SpanishAnalyzer.getDefaultStopSet())
+ .put("_swedish_", SwedishAnalyzer.getDefaultStopSet())
+ .put("_turkish_", TurkishAnalyzer.getDefaultStopSet())
+ .immutableMap();
+
+ public static CharArraySet parseWords(Environment env, Settings settings, String name, CharArraySet defaultWords, ImmutableMap<String, Set<?>> namedWords, Version version, boolean ignoreCase) {
+ String value = settings.get(name);
+ if (value != null) {
+ if ("_none_".equals(value)) {
+ return CharArraySet.EMPTY_SET;
+ } else {
+ return resolveNamedWords(Strings.commaDelimitedListToSet(value), namedWords, version, ignoreCase);
+ }
+ }
+ List<String> pathLoadedWords = getWordList(env, settings, name);
+ if (pathLoadedWords != null) {
+ return resolveNamedWords(pathLoadedWords, namedWords, version, ignoreCase);
+ }
+ return defaultWords;
+ }
+
+ public static CharArraySet parseCommonWords(Environment env, Settings settings, CharArraySet defaultCommonWords, Version version, boolean ignoreCase) {
+ return parseWords(env, settings, "common_words", defaultCommonWords, namedStopWords, version, ignoreCase);
+ }
+
+ public static CharArraySet parseArticles(Environment env, Settings settings, Version version) {
+ return parseWords(env, settings, "articles", null, null, version, settings.getAsBoolean("articles_case", false));
+ }
+
+ public static CharArraySet parseStopWords(Environment env, Settings settings, CharArraySet defaultStopWords, Version version) {
+ return parseStopWords(env, settings, defaultStopWords, version, settings.getAsBoolean("stopwords_case", false));
+ }
+
+ public static CharArraySet parseStopWords(Environment env, Settings settings, CharArraySet defaultStopWords, Version version, boolean ignoreCase) {
+ return parseWords(env, settings, "stopwords", defaultStopWords, namedStopWords, version, ignoreCase);
+ }
+
+ private static CharArraySet resolveNamedWords(Collection<String> words, ImmutableMap<String, Set<?>> namedWords, Version version, boolean ignoreCase) {
+ if (namedWords == null) {
+ return new CharArraySet(version, words, ignoreCase);
+ }
+ CharArraySet setWords = new CharArraySet(version, words.size(), ignoreCase);
+ for (String word : words) {
+ if (namedWords.containsKey(word)) {
+ setWords.addAll(namedWords.get(word));
+ } else {
+ setWords.add(word);
+ }
+ }
+ return setWords;
+ }
+
+ public static CharArraySet getWordSet(Environment env, Settings settings, String settingsPrefix, Version version) {
+ List<String> wordList = getWordList(env, settings, settingsPrefix);
+ if (wordList == null) {
+ return null;
+ }
+ return new CharArraySet(version, wordList, settings.getAsBoolean(settingsPrefix + "_case", false));
+ }
+
+ /**
+ * Fetches a list of words from the specified settings file. The list should either be available at the key
+ * specified by settingsPrefix or in a file specified by settingsPrefix + _path.
+ *
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * If the word list cannot be found at either key.
+ */
+ public static List<String> getWordList(Environment env, Settings settings, String settingPrefix) {
+ String wordListPath = settings.get(settingPrefix + "_path", null);
+
+ if (wordListPath == null) {
+ String[] explicitWordList = settings.getAsArray(settingPrefix, null);
+ if (explicitWordList == null) {
+ return null;
+ } else {
+ return Arrays.asList(explicitWordList);
+ }
+ }
+
+ URL wordListFile = env.resolveConfig(wordListPath);
+
+ try {
+ return loadWordList(new InputStreamReader(wordListFile.openStream(), Charsets.UTF_8), "#");
+ } catch (IOException ioe) {
+ String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, ioe.getMessage());
+ throw new ElasticsearchIllegalArgumentException(message);
+ }
+ }
+
+ public static List<String> loadWordList(Reader reader, String comment) throws IOException {
+ final List<String> result = new ArrayList<String>();
+ BufferedReader br = null;
+ try {
+ if (reader instanceof BufferedReader) {
+ br = (BufferedReader) reader;
+ } else {
+ br = new BufferedReader(reader);
+ }
+ String word = null;
+ while ((word = br.readLine()) != null) {
+ if (!Strings.hasText(word)) {
+ continue;
+ }
+ if (!word.startsWith(comment)) {
+ result.add(word.trim());
+ }
+ }
+ } finally {
+ if (br != null)
+ br.close();
+ }
+ return result;
+ }
+
+ /**
+ * @return null If no settings set for "settingsPrefix" then return <code>null</code>.
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * If the Reader can not be instantiated.
+ */
+ public static Reader getReaderFromFile(Environment env, Settings settings, String settingPrefix) {
+ String filePath = settings.get(settingPrefix, null);
+
+ if (filePath == null) {
+ return null;
+ }
+
+ URL fileUrl = env.resolveConfig(filePath);
+
+ Reader reader = null;
+ try {
+ reader = new InputStreamReader(fileUrl.openStream(), Charsets.UTF_8);
+ } catch (IOException ioe) {
+ String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, ioe.getMessage());
+ throw new ElasticsearchIllegalArgumentException(message);
+ }
+
+ return reader;
+ }
+
+ /**
+ * Check whether the provided token stream is able to provide character
+ * terms.
+ * <p>Although most analyzers generate character terms (CharTermAttribute),
+ * some token only contain binary terms (BinaryTermAttribute,
+ * CharTermAttribute being a special type of BinaryTermAttribute), such as
+ * {@link NumericTokenStream} and unsuitable for highlighting and
+ * more-like-this queries which expect character terms.</p>
+ */
+ public static boolean isCharacterTokenStream(TokenStream tokenStream) {
+ try {
+ tokenStream.addAttribute(CharTermAttribute.class);
+ return true;
+ } catch (IllegalArgumentException e) {
+ return false;
+ }
+ }
+
+ /**
+ * Check whether {@link TokenStream}s generated with <code>analyzer</code>
+ * provide with character terms.
+ * @see #isCharacterTokenStream(TokenStream)
+ */
+ public static boolean generatesCharacterTokenStream(Analyzer analyzer, String fieldName) throws IOException {
+ return isCharacterTokenStream(analyzer.tokenStream(fieldName, ""));
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java b/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java
new file mode 100644
index 0000000..fa5a857
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java
@@ -0,0 +1,557 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.inject.assistedinject.FactoryProvider;
+import org.elasticsearch.common.inject.multibindings.MapBinder;
+import org.elasticsearch.common.settings.NoClassSettingsException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
+import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+
+import java.util.LinkedList;
+import java.util.Map;
+
+/**
+ *
+ */
+public class AnalysisModule extends AbstractModule {
+
+ public static class AnalysisBinderProcessor {
+
+ public void processCharFilters(CharFiltersBindings charFiltersBindings) {
+
+ }
+
+ public static class CharFiltersBindings {
+ private final Map<String, Class<? extends CharFilterFactory>> charFilters = Maps.newHashMap();
+
+ public CharFiltersBindings() {
+ }
+
+ public void processCharFilter(String name, Class<? extends CharFilterFactory> charFilterFactory) {
+ charFilters.put(name, charFilterFactory);
+ }
+ }
+
+ public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
+
+ }
+
+ public static class TokenFiltersBindings {
+ private final Map<String, Class<? extends TokenFilterFactory>> tokenFilters = Maps.newHashMap();
+
+ public TokenFiltersBindings() {
+ }
+
+ public void processTokenFilter(String name, Class<? extends TokenFilterFactory> tokenFilterFactory) {
+ tokenFilters.put(name, tokenFilterFactory);
+ }
+ }
+
+ public void processTokenizers(TokenizersBindings tokenizersBindings) {
+
+ }
+
+ public static class TokenizersBindings {
+ private final Map<String, Class<? extends TokenizerFactory>> tokenizers = Maps.newHashMap();
+
+ public TokenizersBindings() {
+ }
+
+ public void processTokenizer(String name, Class<? extends TokenizerFactory> tokenizerFactory) {
+ tokenizers.put(name, tokenizerFactory);
+ }
+ }
+
+ public void processAnalyzers(AnalyzersBindings analyzersBindings) {
+
+ }
+
+ public static class AnalyzersBindings {
+ private final Map<String, Class<? extends AnalyzerProvider>> analyzers = Maps.newHashMap();
+
+ public AnalyzersBindings() {
+ }
+
+ public void processAnalyzer(String name, Class<? extends AnalyzerProvider> analyzerProvider) {
+ analyzers.put(name, analyzerProvider);
+ }
+ }
+ }
+
+ private final Settings settings;
+
+ private final IndicesAnalysisService indicesAnalysisService;
+
+ private final LinkedList<AnalysisBinderProcessor> processors = Lists.newLinkedList();
+
+ private final Map<String, Class<? extends CharFilterFactory>> charFilters = Maps.newHashMap();
+ private final Map<String, Class<? extends TokenFilterFactory>> tokenFilters = Maps.newHashMap();
+ private final Map<String, Class<? extends TokenizerFactory>> tokenizers = Maps.newHashMap();
+ private final Map<String, Class<? extends AnalyzerProvider>> analyzers = Maps.newHashMap();
+
+
+ public AnalysisModule(Settings settings) {
+ this(settings, null);
+ }
+
+ public AnalysisModule(Settings settings, IndicesAnalysisService indicesAnalysisService) {
+ this.settings = settings;
+ this.indicesAnalysisService = indicesAnalysisService;
+ processors.add(new DefaultProcessor());
+ try {
+ processors.add(new ExtendedProcessor());
+ } catch (Throwable t) {
+ // ignore. no extended ones
+ }
+ }
+
+ public AnalysisModule addProcessor(AnalysisBinderProcessor processor) {
+ processors.addFirst(processor);
+ return this;
+ }
+
+ public AnalysisModule addCharFilter(String name, Class<? extends CharFilterFactory> charFilter) {
+ charFilters.put(name, charFilter);
+ return this;
+ }
+
+ public AnalysisModule addTokenFilter(String name, Class<? extends TokenFilterFactory> tokenFilter) {
+ tokenFilters.put(name, tokenFilter);
+ return this;
+ }
+
+ public AnalysisModule addTokenizer(String name, Class<? extends TokenizerFactory> tokenizer) {
+ tokenizers.put(name, tokenizer);
+ return this;
+ }
+
+ public AnalysisModule addAnalyzer(String name, Class<? extends AnalyzerProvider> analyzer) {
+ analyzers.put(name, analyzer);
+ return this;
+ }
+
+ @Override
+ protected void configure() {
+ MapBinder<String, CharFilterFactoryFactory> charFilterBinder
+ = MapBinder.newMapBinder(binder(), String.class, CharFilterFactoryFactory.class);
+
+ // CHAR FILTERS
+
+ AnalysisBinderProcessor.CharFiltersBindings charFiltersBindings = new AnalysisBinderProcessor.CharFiltersBindings();
+ for (AnalysisBinderProcessor processor : processors) {
+ processor.processCharFilters(charFiltersBindings);
+ }
+ charFiltersBindings.charFilters.putAll(charFilters);
+
+ Map<String, Settings> charFiltersSettings = settings.getGroups("index.analysis.char_filter");
+ for (Map.Entry<String, Settings> entry : charFiltersSettings.entrySet()) {
+ String charFilterName = entry.getKey();
+ Settings charFilterSettings = entry.getValue();
+
+ Class<? extends CharFilterFactory> type = null;
+ try {
+ type = charFilterSettings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "CharFilterFactory");
+ } catch (NoClassSettingsException e) {
+ // nothing found, see if its in bindings as a binding name
+ if (charFilterSettings.get("type") != null) {
+ type = charFiltersBindings.charFilters.get(Strings.toUnderscoreCase(charFilterSettings.get("type")));
+ if (type == null) {
+ type = charFiltersBindings.charFilters.get(Strings.toCamelCase(charFilterSettings.get("type")));
+ }
+ }
+ if (type == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find char filter type [" + charFilterSettings.get("type") + "] for [" + charFilterName + "]", e);
+ }
+ }
+ if (type == null) {
+ // nothing found, see if its in bindings as a binding name
+ throw new ElasticsearchIllegalArgumentException("Char Filter [" + charFilterName + "] must have a type associated with it");
+ }
+ charFilterBinder.addBinding(charFilterName).toProvider(FactoryProvider.newFactory(CharFilterFactoryFactory.class, type)).in(Scopes.SINGLETON);
+ }
+ // go over the char filters in the bindings and register the ones that are not configured
+ for (Map.Entry<String, Class<? extends CharFilterFactory>> entry : charFiltersBindings.charFilters.entrySet()) {
+ String charFilterName = entry.getKey();
+ Class<? extends CharFilterFactory> clazz = entry.getValue();
+ // we don't want to re-register one that already exists
+ if (charFiltersSettings.containsKey(charFilterName)) {
+ continue;
+ }
+ // check, if it requires settings, then don't register it, we know default has no settings...
+ if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
+ continue;
+ }
+ // register it as default under the name
+ if (indicesAnalysisService != null && indicesAnalysisService.hasCharFilter(charFilterName)) {
+ // don't register it here, we will use explicitly register it in the AnalysisService
+ //charFilterBinder.addBinding(charFilterName).toInstance(indicesAnalysisService.charFilterFactoryFactory(charFilterName));
+ } else {
+ charFilterBinder.addBinding(charFilterName).toProvider(FactoryProvider.newFactory(CharFilterFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
+ }
+ }
+
+
+ // TOKEN FILTERS
+
+ MapBinder<String, TokenFilterFactoryFactory> tokenFilterBinder
+ = MapBinder.newMapBinder(binder(), String.class, TokenFilterFactoryFactory.class);
+
+ // initial default bindings
+ AnalysisBinderProcessor.TokenFiltersBindings tokenFiltersBindings = new AnalysisBinderProcessor.TokenFiltersBindings();
+ for (AnalysisBinderProcessor processor : processors) {
+ processor.processTokenFilters(tokenFiltersBindings);
+ }
+ tokenFiltersBindings.tokenFilters.putAll(tokenFilters);
+
+ Map<String, Settings> tokenFiltersSettings = settings.getGroups("index.analysis.filter");
+ for (Map.Entry<String, Settings> entry : tokenFiltersSettings.entrySet()) {
+ String tokenFilterName = entry.getKey();
+ Settings tokenFilterSettings = entry.getValue();
+
+ Class<? extends TokenFilterFactory> type = null;
+ try {
+ type = tokenFilterSettings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "TokenFilterFactory");
+ } catch (NoClassSettingsException e) {
+ // nothing found, see if its in bindings as a binding name
+ if (tokenFilterSettings.get("type") != null) {
+ type = tokenFiltersBindings.tokenFilters.get(Strings.toUnderscoreCase(tokenFilterSettings.get("type")));
+ if (type == null) {
+ type = tokenFiltersBindings.tokenFilters.get(Strings.toCamelCase(tokenFilterSettings.get("type")));
+ }
+ }
+ if (type == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find token filter type [" + tokenFilterSettings.get("type") + "] for [" + tokenFilterName + "]", e);
+ }
+ }
+ if (type == null) {
+ throw new ElasticsearchIllegalArgumentException("token filter [" + tokenFilterName + "] must have a type associated with it");
+ }
+ tokenFilterBinder.addBinding(tokenFilterName).toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, type)).in(Scopes.SINGLETON);
+ }
+ // go over the filters in the bindings and register the ones that are not configured
+ for (Map.Entry<String, Class<? extends TokenFilterFactory>> entry : tokenFiltersBindings.tokenFilters.entrySet()) {
+ String tokenFilterName = entry.getKey();
+ Class<? extends TokenFilterFactory> clazz = entry.getValue();
+ // we don't want to re-register one that already exists
+ if (tokenFiltersSettings.containsKey(tokenFilterName)) {
+ continue;
+ }
+ // check, if it requires settings, then don't register it, we know default has no settings...
+ if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
+ continue;
+ }
+ // register it as default under the name
+ if (indicesAnalysisService != null && indicesAnalysisService.hasTokenFilter(tokenFilterName)) {
+ // don't register it here, we will use explicitly register it in the AnalysisService
+ // tokenFilterBinder.addBinding(tokenFilterName).toInstance(indicesAnalysisService.tokenFilterFactoryFactory(tokenFilterName));
+ } else {
+ tokenFilterBinder.addBinding(tokenFilterName).toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
+ }
+ }
+
+ // TOKENIZER
+
+ MapBinder<String, TokenizerFactoryFactory> tokenizerBinder
+ = MapBinder.newMapBinder(binder(), String.class, TokenizerFactoryFactory.class);
+
+ // initial default bindings
+ AnalysisBinderProcessor.TokenizersBindings tokenizersBindings = new AnalysisBinderProcessor.TokenizersBindings();
+ for (AnalysisBinderProcessor processor : processors) {
+ processor.processTokenizers(tokenizersBindings);
+ }
+ tokenizersBindings.tokenizers.putAll(tokenizers);
+
+ Map<String, Settings> tokenizersSettings = settings.getGroups("index.analysis.tokenizer");
+ for (Map.Entry<String, Settings> entry : tokenizersSettings.entrySet()) {
+ String tokenizerName = entry.getKey();
+ Settings tokenizerSettings = entry.getValue();
+
+
+ Class<? extends TokenizerFactory> type = null;
+ try {
+ type = tokenizerSettings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "TokenizerFactory");
+ } catch (NoClassSettingsException e) {
+ // nothing found, see if its in bindings as a binding name
+ if (tokenizerSettings.get("type") != null) {
+ type = tokenizersBindings.tokenizers.get(Strings.toUnderscoreCase(tokenizerSettings.get("type")));
+ if (type == null) {
+ type = tokenizersBindings.tokenizers.get(Strings.toCamelCase(tokenizerSettings.get("type")));
+ }
+ }
+ if (type == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find tokenizer type [" + tokenizerSettings.get("type") + "] for [" + tokenizerName + "]", e);
+ }
+ }
+ if (type == null) {
+ throw new ElasticsearchIllegalArgumentException("token filter [" + tokenizerName + "] must have a type associated with it");
+ }
+ tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, type)).in(Scopes.SINGLETON);
+ }
+ // go over the tokenizers in the bindings and register the ones that are not configured
+ for (Map.Entry<String, Class<? extends TokenizerFactory>> entry : tokenizersBindings.tokenizers.entrySet()) {
+ String tokenizerName = entry.getKey();
+ Class<? extends TokenizerFactory> clazz = entry.getValue();
+ // we don't want to re-register one that already exists
+ if (tokenizersSettings.containsKey(tokenizerName)) {
+ continue;
+ }
+ // check, if it requires settings, then don't register it, we know default has no settings...
+ if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
+ continue;
+ }
+ // register it as default under the name
+ if (indicesAnalysisService != null && indicesAnalysisService.hasTokenizer(tokenizerName)) {
+ // don't register it here, we will use explicitly register it in the AnalysisService
+ // tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
+ } else {
+ tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
+ }
+ }
+
+ // ANALYZER
+
+ MapBinder<String, AnalyzerProviderFactory> analyzerBinder
+ = MapBinder.newMapBinder(binder(), String.class, AnalyzerProviderFactory.class);
+
+ // initial default bindings
+ AnalysisBinderProcessor.AnalyzersBindings analyzersBindings = new AnalysisBinderProcessor.AnalyzersBindings();
+ for (AnalysisBinderProcessor processor : processors) {
+ processor.processAnalyzers(analyzersBindings);
+ }
+ analyzersBindings.analyzers.putAll(analyzers);
+
+ Map<String, Settings> analyzersSettings = settings.getGroups("index.analysis.analyzer");
+ for (Map.Entry<String, Settings> entry : analyzersSettings.entrySet()) {
+ String analyzerName = entry.getKey();
+ Settings analyzerSettings = entry.getValue();
+
+ Class<? extends AnalyzerProvider> type = null;
+ try {
+ type = analyzerSettings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "AnalyzerProvider");
+ } catch (NoClassSettingsException e) {
+ // nothing found, see if its in bindings as a binding name
+ if (analyzerSettings.get("type") != null) {
+ type = analyzersBindings.analyzers.get(Strings.toUnderscoreCase(analyzerSettings.get("type")));
+ if (type == null) {
+ type = analyzersBindings.analyzers.get(Strings.toCamelCase(analyzerSettings.get("type")));
+ }
+ }
+ if (type == null) {
+ // no specific type, check if it has a tokenizer associated with it
+ String tokenizerName = analyzerSettings.get("tokenizer");
+ if (tokenizerName != null) {
+ // we have a tokenizer, use the CustomAnalyzer
+ type = CustomAnalyzerProvider.class;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]", e);
+ }
+ }
+ }
+ if (type == null) {
+ // no specific type, check if it has a tokenizer associated with it
+ String tokenizerName = analyzerSettings.get("tokenizer");
+ if (tokenizerName != null) {
+ // we have a tokenizer, use the CustomAnalyzer
+ type = CustomAnalyzerProvider.class;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]");
+ }
+ }
+ analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, type)).in(Scopes.SINGLETON);
+ }
+
+
+ // go over the analyzers in the bindings and register the ones that are not configured
+ for (Map.Entry<String, Class<? extends AnalyzerProvider>> entry : analyzersBindings.analyzers.entrySet()) {
+ String analyzerName = entry.getKey();
+ Class<? extends AnalyzerProvider> clazz = entry.getValue();
+ // we don't want to re-register one that already exists
+ if (analyzersSettings.containsKey(analyzerName)) {
+ continue;
+ }
+ // check, if it requires settings, then don't register it, we know default has no settings...
+ if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
+ continue;
+ }
+ // register it as default under the name
+ if (indicesAnalysisService != null && indicesAnalysisService.hasAnalyzer(analyzerName)) {
+ // don't register it here, we will use explicitly register it in the AnalysisService
+ // analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, clazz)).in(Scopes.SINGLETON);
+ } else {
+ analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, clazz)).in(Scopes.SINGLETON);
+ }
+ }
+
+ bind(AnalysisService.class).in(Scopes.SINGLETON);
+ }
+
+ private static class DefaultProcessor extends AnalysisBinderProcessor {
+
+ @Override
+ public void processCharFilters(CharFiltersBindings charFiltersBindings) {
+ charFiltersBindings.processCharFilter("html_strip", HtmlStripCharFilterFactory.class);
+ charFiltersBindings.processCharFilter("pattern_replace", PatternReplaceCharFilterFactory.class);
+ }
+
+ @Override
+ public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
+ tokenFiltersBindings.processTokenFilter("stop", StopTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("reverse", ReverseTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("asciifolding", ASCIIFoldingTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("length", LengthTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("lowercase", LowerCaseTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("porter_stem", PorterStemTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("kstem", KStemTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("standard", StandardTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("nGram", NGramTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("ngram", NGramTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("edgeNGram", EdgeNGramTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("edge_ngram", EdgeNGramTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("shingle", ShingleTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("unique", UniqueTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("truncate", TruncateTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("trim", TrimTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("limit", LimitTokenCountFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("common_grams", CommonGramsTokenFilterFactory.class);
+ }
+
+ @Override
+ public void processTokenizers(TokenizersBindings tokenizersBindings) {
+ tokenizersBindings.processTokenizer("standard", StandardTokenizerFactory.class);
+ tokenizersBindings.processTokenizer("uax_url_email", UAX29URLEmailTokenizerFactory.class);
+ tokenizersBindings.processTokenizer("path_hierarchy", PathHierarchyTokenizerFactory.class);
+ tokenizersBindings.processTokenizer("keyword", KeywordTokenizerFactory.class);
+ tokenizersBindings.processTokenizer("letter", LetterTokenizerFactory.class);
+ tokenizersBindings.processTokenizer("lowercase", LowerCaseTokenizerFactory.class);
+ tokenizersBindings.processTokenizer("whitespace", WhitespaceTokenizerFactory.class);
+
+ tokenizersBindings.processTokenizer("nGram", NGramTokenizerFactory.class);
+ tokenizersBindings.processTokenizer("ngram", NGramTokenizerFactory.class);
+ tokenizersBindings.processTokenizer("edgeNGram", EdgeNGramTokenizerFactory.class);
+ tokenizersBindings.processTokenizer("edge_ngram", EdgeNGramTokenizerFactory.class);
+ }
+
+ @Override
+ public void processAnalyzers(AnalyzersBindings analyzersBindings) {
+ analyzersBindings.processAnalyzer("default", StandardAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("standard", StandardAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("standard_html_strip", StandardHtmlStripAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("simple", SimpleAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("stop", StopAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("whitespace", WhitespaceAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("keyword", KeywordAnalyzerProvider.class);
+ }
+ }
+
+ private static class ExtendedProcessor extends AnalysisBinderProcessor {
+ @Override
+ public void processCharFilters(CharFiltersBindings charFiltersBindings) {
+ charFiltersBindings.processCharFilter("mapping", MappingCharFilterFactory.class);
+ }
+
+ @Override
+ public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
+ tokenFiltersBindings.processTokenFilter("snowball", SnowballTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("stemmer", StemmerTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("word_delimiter", WordDelimiterTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("delimited_payload_filter", DelimitedPayloadTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("synonym", SynonymTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("elision", ElisionTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("keep", KeepWordFilterFactory.class);
+
+ tokenFiltersBindings.processTokenFilter("pattern_capture", PatternCaptureGroupTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("pattern_replace", PatternReplaceTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("dictionary_decompounder", DictionaryCompoundWordTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("hyphenation_decompounder", HyphenationCompoundWordTokenFilterFactory.class);
+
+ tokenFiltersBindings.processTokenFilter("arabic_stem", ArabicStemTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("brazilian_stem", BrazilianStemTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("czech_stem", CzechStemTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("dutch_stem", DutchStemTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("french_stem", FrenchStemTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("german_stem", GermanStemTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("russian_stem", RussianStemTokenFilterFactory.class);
+
+ tokenFiltersBindings.processTokenFilter("keyword_marker", KeywordMarkerTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("stemmer_override", StemmerOverrideTokenFilterFactory.class);
+
+ tokenFiltersBindings.processTokenFilter("arabic_normalization", ArabicNormalizationFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("persian_normalization", PersianNormalizationFilterFactory.class);
+
+ tokenFiltersBindings.processTokenFilter("hunspell", HunspellTokenFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("cjk_bigram", CJKBigramFilterFactory.class);
+ tokenFiltersBindings.processTokenFilter("cjk_width", CJKWidthFilterFactory.class);
+
+
+ }
+
+ @Override
+ public void processTokenizers(TokenizersBindings tokenizersBindings) {
+ tokenizersBindings.processTokenizer("pattern", PatternTokenizerFactory.class);
+ }
+
+ @Override
+ public void processAnalyzers(AnalyzersBindings analyzersBindings) {
+ analyzersBindings.processAnalyzer("pattern", PatternAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("snowball", SnowballAnalyzerProvider.class);
+
+ analyzersBindings.processAnalyzer("arabic", ArabicAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("armenian", ArmenianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("basque", BasqueAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("brazilian", BrazilianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("bulgarian", BulgarianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("catalan", CatalanAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("chinese", ChineseAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("cjk", CjkAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("czech", CzechAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("danish", DanishAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("dutch", DutchAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("english", EnglishAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("finnish", FinnishAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("french", FrenchAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("galician", GalicianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("german", GermanAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("greek", GreekAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("hindi", HindiAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("hungarian", HungarianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("indonesian", IndonesianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("italian", ItalianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("latvian", LatvianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("norwegian", NorwegianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("persian", PersianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("portuguese", PortugueseAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("romanian", RomanianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("russian", RussianAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("spanish", SpanishAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("swedish", SwedishAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("turkish", TurkishAnalyzerProvider.class);
+ analyzersBindings.processAnalyzer("thai", ThaiAnalyzerProvider.class);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java b/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java
new file mode 100644
index 0000000..f9aa164
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.analysis.Analyzer;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.component.CloseableComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ *
+ */
+public class AnalysisService extends AbstractIndexComponent implements CloseableComponent {
+
+ private final ImmutableMap<String, NamedAnalyzer> analyzers;
+ private final ImmutableMap<String, TokenizerFactory> tokenizers;
+ private final ImmutableMap<String, CharFilterFactory> charFilters;
+ private final ImmutableMap<String, TokenFilterFactory> tokenFilters;
+
+ private final NamedAnalyzer defaultAnalyzer;
+ private final NamedAnalyzer defaultIndexAnalyzer;
+ private final NamedAnalyzer defaultSearchAnalyzer;
+ private final NamedAnalyzer defaultSearchQuoteAnalyzer;
+
+ public AnalysisService(Index index) {
+ this(index, ImmutableSettings.Builder.EMPTY_SETTINGS, null, null, null, null, null);
+ }
+
+ @Inject
+ public AnalysisService(Index index, @IndexSettings Settings indexSettings, @Nullable IndicesAnalysisService indicesAnalysisService,
+ @Nullable Map<String, AnalyzerProviderFactory> analyzerFactoryFactories,
+ @Nullable Map<String, TokenizerFactoryFactory> tokenizerFactoryFactories,
+ @Nullable Map<String, CharFilterFactoryFactory> charFilterFactoryFactories,
+ @Nullable Map<String, TokenFilterFactoryFactory> tokenFilterFactoryFactories) {
+ super(index, indexSettings);
+
+ Map<String, TokenizerFactory> tokenizers = newHashMap();
+ if (tokenizerFactoryFactories != null) {
+ Map<String, Settings> tokenizersSettings = indexSettings.getGroups("index.analysis.tokenizer");
+ for (Map.Entry<String, TokenizerFactoryFactory> entry : tokenizerFactoryFactories.entrySet()) {
+ String tokenizerName = entry.getKey();
+ TokenizerFactoryFactory tokenizerFactoryFactory = entry.getValue();
+
+ Settings tokenizerSettings = tokenizersSettings.get(tokenizerName);
+ if (tokenizerSettings == null) {
+ tokenizerSettings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+
+ TokenizerFactory tokenizerFactory = tokenizerFactoryFactory.create(tokenizerName, tokenizerSettings);
+ tokenizers.put(tokenizerName, tokenizerFactory);
+ tokenizers.put(Strings.toCamelCase(tokenizerName), tokenizerFactory);
+ }
+ }
+
+ if (indicesAnalysisService != null) {
+ for (Map.Entry<String, PreBuiltTokenizerFactoryFactory> entry : indicesAnalysisService.tokenizerFactories().entrySet()) {
+ String name = entry.getKey();
+ if (!tokenizers.containsKey(name)) {
+ tokenizers.put(name, entry.getValue().create(name, ImmutableSettings.Builder.EMPTY_SETTINGS));
+ }
+ name = Strings.toCamelCase(entry.getKey());
+ if (!name.equals(entry.getKey())) {
+ if (!tokenizers.containsKey(name)) {
+ tokenizers.put(name, entry.getValue().create(name, ImmutableSettings.Builder.EMPTY_SETTINGS));
+ }
+ }
+ }
+ }
+
+ this.tokenizers = ImmutableMap.copyOf(tokenizers);
+
+ Map<String, CharFilterFactory> charFilters = newHashMap();
+ if (charFilterFactoryFactories != null) {
+ Map<String, Settings> charFiltersSettings = indexSettings.getGroups("index.analysis.char_filter");
+ for (Map.Entry<String, CharFilterFactoryFactory> entry : charFilterFactoryFactories.entrySet()) {
+ String charFilterName = entry.getKey();
+ CharFilterFactoryFactory charFilterFactoryFactory = entry.getValue();
+
+ Settings charFilterSettings = charFiltersSettings.get(charFilterName);
+ if (charFilterSettings == null) {
+ charFilterSettings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+
+ CharFilterFactory tokenFilterFactory = charFilterFactoryFactory.create(charFilterName, charFilterSettings);
+ charFilters.put(charFilterName, tokenFilterFactory);
+ charFilters.put(Strings.toCamelCase(charFilterName), tokenFilterFactory);
+ }
+ }
+
+ if (indicesAnalysisService != null) {
+ for (Map.Entry<String, PreBuiltCharFilterFactoryFactory> entry : indicesAnalysisService.charFilterFactories().entrySet()) {
+ String name = entry.getKey();
+ if (!charFilters.containsKey(name)) {
+ charFilters.put(name, entry.getValue().create(name, ImmutableSettings.Builder.EMPTY_SETTINGS));
+ }
+ name = Strings.toCamelCase(entry.getKey());
+ if (!name.equals(entry.getKey())) {
+ if (!charFilters.containsKey(name)) {
+ charFilters.put(name, entry.getValue().create(name, ImmutableSettings.Builder.EMPTY_SETTINGS));
+ }
+ }
+ }
+ }
+
+ this.charFilters = ImmutableMap.copyOf(charFilters);
+
+ Map<String, TokenFilterFactory> tokenFilters = newHashMap();
+ if (tokenFilterFactoryFactories != null) {
+ Map<String, Settings> tokenFiltersSettings = indexSettings.getGroups("index.analysis.filter");
+ for (Map.Entry<String, TokenFilterFactoryFactory> entry : tokenFilterFactoryFactories.entrySet()) {
+ String tokenFilterName = entry.getKey();
+ TokenFilterFactoryFactory tokenFilterFactoryFactory = entry.getValue();
+
+ Settings tokenFilterSettings = tokenFiltersSettings.get(tokenFilterName);
+ if (tokenFilterSettings == null) {
+ tokenFilterSettings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+
+ TokenFilterFactory tokenFilterFactory = tokenFilterFactoryFactory.create(tokenFilterName, tokenFilterSettings);
+ tokenFilters.put(tokenFilterName, tokenFilterFactory);
+ tokenFilters.put(Strings.toCamelCase(tokenFilterName), tokenFilterFactory);
+ }
+ }
+
+ // pre initialize the globally registered ones into the map
+ if (indicesAnalysisService != null) {
+ for (Map.Entry<String, PreBuiltTokenFilterFactoryFactory> entry : indicesAnalysisService.tokenFilterFactories().entrySet()) {
+ String name = entry.getKey();
+ if (!tokenFilters.containsKey(name)) {
+ tokenFilters.put(name, entry.getValue().create(name, ImmutableSettings.Builder.EMPTY_SETTINGS));
+ }
+ name = Strings.toCamelCase(entry.getKey());
+ if (!name.equals(entry.getKey())) {
+ if (!tokenFilters.containsKey(name)) {
+ tokenFilters.put(name, entry.getValue().create(name, ImmutableSettings.Builder.EMPTY_SETTINGS));
+ }
+ }
+ }
+ }
+ this.tokenFilters = ImmutableMap.copyOf(tokenFilters);
+
+ Map<String, AnalyzerProvider> analyzerProviders = newHashMap();
+ if (analyzerFactoryFactories != null) {
+ Map<String, Settings> analyzersSettings = indexSettings.getGroups("index.analysis.analyzer");
+ for (Map.Entry<String, AnalyzerProviderFactory> entry : analyzerFactoryFactories.entrySet()) {
+ String analyzerName = entry.getKey();
+ AnalyzerProviderFactory analyzerFactoryFactory = entry.getValue();
+
+ Settings analyzerSettings = analyzersSettings.get(analyzerName);
+ if (analyzerSettings == null) {
+ analyzerSettings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+
+ AnalyzerProvider analyzerFactory = analyzerFactoryFactory.create(analyzerName, analyzerSettings);
+ analyzerProviders.put(analyzerName, analyzerFactory);
+ }
+ }
+ if (indicesAnalysisService != null) {
+ for (Map.Entry<String, PreBuiltAnalyzerProviderFactory> entry : indicesAnalysisService.analyzerProviderFactories().entrySet()) {
+ String name = entry.getKey();
+ Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
+ if (!analyzerProviders.containsKey(name)) {
+ analyzerProviders.put(name, entry.getValue().create(name, ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, indexVersion).build()));
+ }
+ String camelCaseName = Strings.toCamelCase(name);
+ if (!camelCaseName.equals(entry.getKey()) && !analyzerProviders.containsKey(camelCaseName)) {
+ analyzerProviders.put(camelCaseName, entry.getValue().create(name, ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, indexVersion).build()));
+ }
+ }
+ }
+
+ if (!analyzerProviders.containsKey("default")) {
+ analyzerProviders.put("default", new StandardAnalyzerProvider(index, indexSettings, null, "default", ImmutableSettings.Builder.EMPTY_SETTINGS));
+ }
+ if (!analyzerProviders.containsKey("default_index")) {
+ analyzerProviders.put("default_index", analyzerProviders.get("default"));
+ }
+ if (!analyzerProviders.containsKey("default_search")) {
+ analyzerProviders.put("default_search", analyzerProviders.get("default"));
+ }
+ if (!analyzerProviders.containsKey("default_search_quoted")) {
+ analyzerProviders.put("default_search_quoted", analyzerProviders.get("default_search"));
+ }
+
+ Map<String, NamedAnalyzer> analyzers = newHashMap();
+ for (AnalyzerProvider analyzerFactory : analyzerProviders.values()) {
+ if (analyzerFactory instanceof CustomAnalyzerProvider) {
+ ((CustomAnalyzerProvider) analyzerFactory).build(this);
+ }
+ Analyzer analyzerF = analyzerFactory.get();
+ if (analyzerF == null) {
+ throw new ElasticsearchIllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer");
+ }
+ NamedAnalyzer analyzer;
+ // if we got a named analyzer back, use it...
+ if (analyzerF instanceof NamedAnalyzer) {
+ analyzer = (NamedAnalyzer) analyzerF;
+ } else {
+ analyzer = new NamedAnalyzer(analyzerFactory.name(), analyzerFactory.scope(), analyzerF);
+ }
+ analyzers.put(analyzerFactory.name(), analyzer);
+ analyzers.put(Strings.toCamelCase(analyzerFactory.name()), analyzer);
+ String strAliases = indexSettings.get("index.analysis.analyzer." + analyzerFactory.name() + ".alias");
+ if (strAliases != null) {
+ for (String alias : Strings.commaDelimitedListToStringArray(strAliases)) {
+ analyzers.put(alias, analyzer);
+ }
+ }
+ String[] aliases = indexSettings.getAsArray("index.analysis.analyzer." + analyzerFactory.name() + ".alias");
+ for (String alias : aliases) {
+ analyzers.put(alias, analyzer);
+ }
+ }
+
+ defaultAnalyzer = analyzers.get("default");
+ if (defaultAnalyzer == null) {
+ throw new ElasticsearchIllegalArgumentException("no default analyzer configured");
+ }
+ defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : analyzers.get("default");
+ defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : analyzers.get("default");
+ defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer;
+
+ this.analyzers = ImmutableMap.copyOf(analyzers);
+ }
+
+ public void close() {
+ for (NamedAnalyzer analyzer : analyzers.values()) {
+ if (analyzer.scope() == AnalyzerScope.INDEX) {
+ try {
+ analyzer.close();
+ } catch (NullPointerException e) {
+ // because analyzers are aliased, they might be closed several times
+ // an NPE is thrown in this case, so ignore....
+ } catch (Exception e) {
+ logger.debug("failed to close analyzer " + analyzer);
+ }
+ }
+ }
+ }
+
+ public NamedAnalyzer analyzer(String name) {
+ return analyzers.get(name);
+ }
+
+ public NamedAnalyzer defaultAnalyzer() {
+ return defaultAnalyzer;
+ }
+
+ public NamedAnalyzer defaultIndexAnalyzer() {
+ return defaultIndexAnalyzer;
+ }
+
+ public NamedAnalyzer defaultSearchAnalyzer() {
+ return defaultSearchAnalyzer;
+ }
+
+ public NamedAnalyzer defaultSearchQuoteAnalyzer() {
+ return defaultSearchQuoteAnalyzer;
+ }
+
+ public TokenizerFactory tokenizer(String name) {
+ return tokenizers.get(name);
+ }
+
+ public CharFilterFactory charFilter(String name) {
+ return charFilters.get(name);
+ }
+
+ public TokenFilterFactory tokenFilter(String name) {
+ return tokenFilters.get(name);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalysisSettingsRequired.java b/src/main/java/org/elasticsearch/index/analysis/AnalysisSettingsRequired.java
new file mode 100644
index 0000000..155b78a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AnalysisSettingsRequired.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import java.lang.annotation.*;
+
+/**
+ * A marker annotation on {@link CharFilterFactory}, {@link AnalyzerProvider}, {@link TokenFilterFactory},
+ * or {@link @TokenizerFactory} which will cause the provider/factory to only be created when explicit settings
+ * are provided.
+ */
+@Target({ElementType.TYPE})
+@Retention(RetentionPolicy.RUNTIME)
+@Documented
+public @interface AnalysisSettingsRequired {
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java
new file mode 100644
index 0000000..7af8ae0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.elasticsearch.common.inject.Provider;
+
+/**
+ *
+ */
+public interface AnalyzerProvider<T extends Analyzer> extends Provider<T> {
+
+ String name();
+
+ AnalyzerScope scope();
+
+ T get();
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalyzerProviderFactory.java b/src/main/java/org/elasticsearch/index/analysis/AnalyzerProviderFactory.java
new file mode 100644
index 0000000..a4a3e7f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AnalyzerProviderFactory.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public interface AnalyzerProviderFactory {
+
+ AnalyzerProvider create(String name, Settings settings);
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java b/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java
new file mode 100644
index 0000000..c4795d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+/**
+ *
+ */
+public enum AnalyzerScope {
+ INDEX,
+ INDICES,
+ GLOBAL
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java
new file mode 100644
index 0000000..9bf4bb0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.ar.ArabicAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider<ArabicAnalyzer> {
+
+ private final ArabicAnalyzer arabicAnalyzer;
+
+ @Inject
+ public ArabicAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ arabicAnalyzer = new ArabicAnalyzer(version,
+ Analysis.parseStopWords(env, settings, ArabicAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public ArabicAnalyzer get() {
+ return this.arabicAnalyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/ArabicNormalizationFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/ArabicNormalizationFilterFactory.java
new file mode 100644
index 0000000..cee4709
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ArabicNormalizationFilterFactory.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ArabicNormalizationFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public ArabicNormalizationFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new ArabicNormalizationFilter(tokenStream);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/ArabicStemTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/ArabicStemTokenFilterFactory.java
new file mode 100644
index 0000000..aa324b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ArabicStemTokenFilterFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.ar.ArabicStemFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ArabicStemTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public ArabicStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new ArabicStemFilter(tokenStream);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java
new file mode 100644
index 0000000..82ee105
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.hy.ArmenianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ArmenianAnalyzerProvider extends AbstractIndexAnalyzerProvider<ArmenianAnalyzer> {
+
+ private final ArmenianAnalyzer analyzer;
+
+ @Inject
+ public ArmenianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new ArmenianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, ArmenianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public ArmenianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java
new file mode 100644
index 0000000..730f434
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.eu.BasqueAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class BasqueAnalyzerProvider extends AbstractIndexAnalyzerProvider<BasqueAnalyzer> {
+
+ private final BasqueAnalyzer analyzer;
+
+ @Inject
+ public BasqueAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new BasqueAnalyzer(version,
+ Analysis.parseStopWords(env, settings, BasqueAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public BasqueAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java
new file mode 100644
index 0000000..5ac47d2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.br.BrazilianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class BrazilianAnalyzerProvider extends AbstractIndexAnalyzerProvider<BrazilianAnalyzer> {
+
+ private final BrazilianAnalyzer analyzer;
+
+ @Inject
+ public BrazilianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new BrazilianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, BrazilianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public BrazilianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java
new file mode 100644
index 0000000..6b8b128
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.br.BrazilianStemFilter;
+import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+/**
+ *
+ */
+public class BrazilianStemTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final CharArraySet exclusions;
+
+ @Inject
+ public BrazilianStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new BrazilianStemFilter(new SetKeywordMarkerFilter(tokenStream, exclusions));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java
new file mode 100644
index 0000000..2efc064
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<BulgarianAnalyzer> {
+
+ private final BulgarianAnalyzer analyzer;
+
+ @Inject
+ public BulgarianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new BulgarianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, BulgarianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public BulgarianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/CJKBigramFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/CJKBigramFilterFactory.java
new file mode 100644
index 0000000..444f007
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CJKBigramFilterFactory.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.cjk.CJKBigramFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Factory that creates a {@link CJKBigramFilter} to form bigrams of CJK terms
+ * that are generated from StandardTokenizer or ICUTokenizer.
+ * <p/>
+ * CJK types are set by these tokenizers, but you can also use flags to
+ * explicitly control which of the CJK scripts are turned into bigrams.
+ * <p/>
+ * By default, when a CJK character has no adjacent characters to form a bigram,
+ * it is output in unigram form. If you want to always output both unigrams and
+ * bigrams, set the <code>outputUnigrams</code> flag. This can be used for a
+ * combined unigram+bigram approach.
+ * <p/>
+ * In all cases, all non-CJK input is passed thru unmodified.
+ */
+public final class CJKBigramFilterFactory extends AbstractTokenFilterFactory {
+
+ private final int flags;
+ private final boolean outputUnigrams;
+
+ @Inject
+ public CJKBigramFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ outputUnigrams = settings.getAsBoolean("output_unigrams", false);
+ final String[] asArray = settings.getAsArray("ignored_scripts");
+ Set<String> scripts = new HashSet<String>(Arrays.asList("han", "hiragana", "katakana", "hangul"));
+ if (asArray != null) {
+ scripts.removeAll(Arrays.asList(asArray));
+ }
+ int flags = 0;
+ for (String script : scripts) {
+ if ("han".equals(script)) {
+ flags |= CJKBigramFilter.HAN;
+ } else if ("hiragana".equals(script)) {
+ flags |= CJKBigramFilter.HIRAGANA;
+ } else if ("katakana".equals(script)) {
+ flags |= CJKBigramFilter.KATAKANA;
+ } else if ("hangul".equals(script)) {
+ flags |= CJKBigramFilter.HANGUL;
+ }
+ }
+ this.flags = flags;
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new CJKBigramFilter(tokenStream, flags, outputUnigrams);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/CJKWidthFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/CJKWidthFilterFactory.java
new file mode 100644
index 0000000..d1aa0c2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CJKWidthFilterFactory.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.cjk.CJKWidthFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+
+public final class CJKWidthFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public CJKWidthFilterFactory(Index index, Settings indexSettings, String name, Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new CJKWidthFilter(tokenStream);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java
new file mode 100644
index 0000000..8b56932
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.ca.CatalanAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider<CatalanAnalyzer> {
+
+ private final CatalanAnalyzer analyzer;
+
+ @Inject
+ public CatalanAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new CatalanAnalyzer(version,
+ Analysis.parseStopWords(env, settings, CatalanAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public CatalanAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java
new file mode 100644
index 0000000..b76af08
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public interface CharFilterFactory {
+
+ String name();
+
+ Reader create(Reader tokenStream);
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/CharFilterFactoryFactory.java b/src/main/java/org/elasticsearch/index/analysis/CharFilterFactoryFactory.java
new file mode 100644
index 0000000..e7d3af0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CharFilterFactoryFactory.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public interface CharFilterFactoryFactory {
+
+ CharFilterFactory create(String name, Settings settings);
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/CharMatcher.java b/src/main/java/org/elasticsearch/index/analysis/CharMatcher.java
new file mode 100644
index 0000000..6cc532b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CharMatcher.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * A class to match character code points.
+ */
+public interface CharMatcher {
+
+ public static class ByUnicodeCategory implements CharMatcher {
+
+ public static CharMatcher of(byte unicodeCategory) {
+ return new ByUnicodeCategory(unicodeCategory);
+ }
+
+ private final byte unicodeType;
+
+ ByUnicodeCategory(byte unicodeType) {
+ this.unicodeType = unicodeType;
+ }
+
+ @Override
+ public boolean isTokenChar(int c) {
+ return Character.getType(c) == unicodeType;
+ }
+ }
+
+ public enum Basic implements CharMatcher {
+ LETTER {
+ @Override
+ public boolean isTokenChar(int c) {
+ return Character.isLetter(c);
+ }
+ },
+ DIGIT {
+ @Override
+ public boolean isTokenChar(int c) {
+ return Character.isDigit(c);
+ }
+ },
+ WHITESPACE {
+ @Override
+ public boolean isTokenChar(int c) {
+ return Character.isWhitespace(c);
+ }
+ },
+ PUNCTUATION {
+ @Override
+ public boolean isTokenChar(int c) {
+ switch (Character.getType(c)) {
+ case Character.START_PUNCTUATION:
+ case Character.END_PUNCTUATION:
+ case Character.OTHER_PUNCTUATION:
+ case Character.CONNECTOR_PUNCTUATION:
+ case Character.DASH_PUNCTUATION:
+ case Character.INITIAL_QUOTE_PUNCTUATION:
+ case Character.FINAL_QUOTE_PUNCTUATION:
+ return true;
+ default:
+ return false;
+ }
+ }
+ },
+ SYMBOL {
+ @Override
+ public boolean isTokenChar(int c) {
+ switch (Character.getType(c)) {
+ case Character.CURRENCY_SYMBOL:
+ case Character.MATH_SYMBOL:
+ case Character.OTHER_SYMBOL:
+ return true;
+ default:
+ return false;
+ }
+ }
+ }
+ }
+
+ public final class Builder {
+ private final Set<CharMatcher> matchers;
+ Builder() {
+ matchers = new HashSet<CharMatcher>();
+ }
+ public Builder or(CharMatcher matcher) {
+ matchers.add(matcher);
+ return this;
+ }
+ public CharMatcher build() {
+ switch (matchers.size()) {
+ case 0:
+ return new CharMatcher() {
+ @Override
+ public boolean isTokenChar(int c) {
+ return false;
+ }
+ };
+ case 1:
+ return matchers.iterator().next();
+ default:
+ return new CharMatcher() {
+ @Override
+ public boolean isTokenChar(int c) {
+ for (CharMatcher matcher : matchers) {
+ if (matcher.isTokenChar(c)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ };
+ }
+ }
+ }
+
+ /** Returns true if, and only if, the provided character matches this character class. */
+ public boolean isTokenChar(int c);
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java
new file mode 100644
index 0000000..84a1f99
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.cn.ChineseAnalyzer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider<ChineseAnalyzer> {
+
+ private final ChineseAnalyzer analyzer;
+
+ @Inject
+ public ChineseAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new ChineseAnalyzer();
+ }
+
+ @Override
+ public ChineseAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java
new file mode 100644
index 0000000..0a9fae9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.cjk.CJKAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class CjkAnalyzerProvider extends AbstractIndexAnalyzerProvider<CJKAnalyzer> {
+
+ private final CJKAnalyzer analyzer;
+
+ @Inject
+ public CjkAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ CharArraySet stopWords = Analysis.parseStopWords(env, settings, CJKAnalyzer.getDefaultStopSet(), version);
+
+ analyzer = new CJKAnalyzer(version, stopWords);
+ }
+
+ @Override
+ public CJKAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java
new file mode 100644
index 0000000..2aa6936
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
+import org.apache.lucene.analysis.commongrams.CommonGramsQueryFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ *
+ */
+@AnalysisSettingsRequired
+public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final CharArraySet words;
+
+ private final boolean ignoreCase;
+
+ private final boolean queryMode;
+
+ @Inject
+ public CommonGramsTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.ignoreCase = settings.getAsBoolean("ignore_case", false);
+ this.queryMode = settings.getAsBoolean("query_mode", false);
+ this.words = Analysis.parseCommonWords(env, settings, null, version, ignoreCase);
+
+ if (this.words == null) {
+ throw new ElasticsearchIllegalArgumentException("mising or empty [common_words] or [common_words_path] configuration for common_grams token filter");
+ }
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ CommonGramsFilter filter = new CommonGramsFilter(version, tokenStream, words);
+ if (queryMode) {
+ return new CommonGramsQueryFilter(filter);
+ } else {
+ return filter;
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java
new file mode 100644
index 0000000..4d79e70
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public final class CustomAnalyzer extends Analyzer {
+
+ private final TokenizerFactory tokenizerFactory;
+
+ private final CharFilterFactory[] charFilters;
+
+ private final TokenFilterFactory[] tokenFilters;
+
+ private final int positionIncrementGap;
+ private final int offsetGap;
+
+ public CustomAnalyzer(TokenizerFactory tokenizerFactory, CharFilterFactory[] charFilters, TokenFilterFactory[] tokenFilters) {
+ this(tokenizerFactory, charFilters, tokenFilters, 0, -1);
+ }
+
+ public CustomAnalyzer(TokenizerFactory tokenizerFactory, CharFilterFactory[] charFilters, TokenFilterFactory[] tokenFilters,
+ int positionOffsetGap, int offsetGap) {
+ this.tokenizerFactory = tokenizerFactory;
+ this.charFilters = charFilters;
+ this.tokenFilters = tokenFilters;
+ this.positionIncrementGap = positionOffsetGap;
+ this.offsetGap = offsetGap;
+ }
+
+
+ public TokenizerFactory tokenizerFactory() {
+ return tokenizerFactory;
+ }
+
+ public TokenFilterFactory[] tokenFilters() {
+ return tokenFilters;
+ }
+
+ public CharFilterFactory[] charFilters() {
+ return charFilters;
+ }
+
+ @Override
+ public int getPositionIncrementGap(String fieldName) {
+ return this.positionIncrementGap;
+ }
+
+ @Override
+ public int getOffsetGap(String field) {
+ if (offsetGap < 0) {
+ return super.getOffsetGap(field);
+ }
+ return this.offsetGap;
+ }
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer tokenizer = tokenizerFactory.create(reader);
+ TokenStream tokenStream = tokenizer;
+ for (TokenFilterFactory tokenFilter : tokenFilters) {
+ tokenStream = tokenFilter.create(tokenStream);
+ }
+ return new TokenStreamComponents(tokenizer, tokenStream);
+ }
+
+ @Override
+ protected Reader initReader(String fieldName, Reader reader) {
+ if (charFilters != null && charFilters.length > 0) {
+ for (CharFilterFactory charFilter : charFilters) {
+ reader = charFilter.create(reader);
+ }
+ }
+ return reader;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java
new file mode 100644
index 0000000..8975ba0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * A custom analyzer that is built out of a single {@link org.apache.lucene.analysis.Tokenizer} and a list
+ * of {@link org.apache.lucene.analysis.TokenFilter}s.
+ */
+public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<CustomAnalyzer> {
+
+ private final Settings analyzerSettings;
+
+ private CustomAnalyzer customAnalyzer;
+
+ @Inject
+ public CustomAnalyzerProvider(Index index, @IndexSettings Settings indexSettings,
+ @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.analyzerSettings = settings;
+ }
+
+ public void build(AnalysisService analysisService) {
+ String tokenizerName = analyzerSettings.get("tokenizer");
+ if (tokenizerName == null) {
+ throw new IllegalArgumentException("Custom Analyzer [" + name() + "] must be configured with a tokenizer");
+ }
+
+ TokenizerFactory tokenizer = analysisService.tokenizer(tokenizerName);
+ if (tokenizer == null) {
+ throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find tokenizer under name [" + tokenizerName + "]");
+ }
+
+ List<CharFilterFactory> charFilters = newArrayList();
+ String[] charFilterNames = analyzerSettings.getAsArray("char_filter");
+ for (String charFilterName : charFilterNames) {
+ CharFilterFactory charFilter = analysisService.charFilter(charFilterName);
+ if (charFilter == null) {
+ throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find char_filter under name [" + charFilterName + "]");
+ }
+ charFilters.add(charFilter);
+ }
+
+ List<TokenFilterFactory> tokenFilters = newArrayList();
+ String[] tokenFilterNames = analyzerSettings.getAsArray("filter");
+ for (String tokenFilterName : tokenFilterNames) {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter(tokenFilterName);
+ if (tokenFilter == null) {
+ throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find filter under name [" + tokenFilterName + "]");
+ }
+ tokenFilters.add(tokenFilter);
+ }
+
+ int positionOffsetGap = analyzerSettings.getAsInt("position_offset_gap", 0);
+ int offsetGap = analyzerSettings.getAsInt("offset_gap", -1);
+
+ this.customAnalyzer = new CustomAnalyzer(tokenizer,
+ charFilters.toArray(new CharFilterFactory[charFilters.size()]),
+ tokenFilters.toArray(new TokenFilterFactory[tokenFilters.size()]),
+ positionOffsetGap,
+ offsetGap
+ );
+ }
+
+ @Override
+ public CustomAnalyzer get() {
+ return this.customAnalyzer;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java
new file mode 100644
index 0000000..945091c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.cz.CzechAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider<CzechAnalyzer> {
+
+ private final CzechAnalyzer analyzer;
+
+ @Inject
+ public CzechAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new CzechAnalyzer(version,
+ Analysis.parseStopWords(env, settings, CzechAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public CzechAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/CzechStemTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/CzechStemTokenFilterFactory.java
new file mode 100644
index 0000000..6d0cd01
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/CzechStemTokenFilterFactory.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.cz.CzechStemFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+public class CzechStemTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public CzechStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new CzechStemFilter(tokenStream);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java
new file mode 100644
index 0000000..3a53670
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.da.DanishAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class DanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<DanishAnalyzer> {
+
+ private final DanishAnalyzer analyzer;
+
+ @Inject
+ public DanishAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new DanishAnalyzer(version,
+ Analysis.parseStopWords(env, settings, DanishAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public DanishAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/DelimitedPayloadTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/DelimitedPayloadTokenFilterFactory.java
new file mode 100644
index 0000000..4283517
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/DelimitedPayloadTokenFilterFactory.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.payloads.*;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class DelimitedPayloadTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ static final char DEFAULT_DELIMITER = '|';
+ static final PayloadEncoder DEFAULT_ENCODER = new FloatEncoder();
+
+ static final String ENCODING = "encoding";
+ static final String DELIMITER = "delimiter";
+
+ char delimiter;
+ PayloadEncoder encoder;
+
+ @Inject
+ public DelimitedPayloadTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name,
+ @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ String delimiterConf = settings.get(DELIMITER);
+ if (delimiterConf != null) {
+ delimiter = delimiterConf.charAt(0);
+ } else {
+ delimiter = DEFAULT_DELIMITER;
+ }
+
+ if (settings.get(ENCODING) != null) {
+ if (settings.get(ENCODING).equals("float")) {
+ encoder = new FloatEncoder();
+ } else if (settings.get(ENCODING).equals("int")) {
+ encoder = new IntegerEncoder();
+ } else if (settings.get(ENCODING).equals("identity")) {
+ encoder = new IdentityEncoder();
+ }
+ } else {
+ encoder = DEFAULT_ENCODER;
+ }
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(tokenStream, delimiter, encoder);
+ return filter;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java
new file mode 100644
index 0000000..ae274b3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.nl.DutchAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class DutchAnalyzerProvider extends AbstractIndexAnalyzerProvider<DutchAnalyzer> {
+
+ private final DutchAnalyzer analyzer;
+
+ @Inject
+ public DutchAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new DutchAnalyzer(version,
+ Analysis.parseStopWords(env, settings, DutchAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public DutchAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java
new file mode 100644
index 0000000..2cafeb2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
+import org.apache.lucene.analysis.nl.DutchStemFilter;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class DutchStemTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final CharArraySet exclusions;
+
+ @Inject
+ public DutchStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new DutchStemFilter(new SetKeywordMarkerFilter(tokenStream, exclusions));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java
new file mode 100644
index 0000000..c01379c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
+import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter.Side;
+import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
+import org.apache.lucene.analysis.ngram.NGramTokenFilter;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+
+/**
+ *
+ */
+public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final int minGram;
+
+ private final int maxGram;
+
+ private final EdgeNGramTokenFilter.Side side;
+
+ private org.elasticsearch.Version esVersion;
+
+ @Inject
+ public EdgeNGramTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
+ this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
+ this.side = EdgeNGramTokenFilter.Side.getSide(settings.get("side", Lucene43EdgeNGramTokenizer.DEFAULT_SIDE.getLabel()));
+ this.esVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ if (version.onOrAfter(Version.LUCENE_43) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) {
+ /*
+ * We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
+ * Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
+ */
+ final Version version = this.version == Version.LUCENE_43 ? Version.LUCENE_44 : this.version; // always use 4.4 or higher
+ TokenStream result = tokenStream;
+ // side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect
+ if (side == Side.BACK) {
+ result = new ReverseStringFilter(version, result);
+ }
+ result = new EdgeNGramTokenFilter(version, result, minGram, maxGram);
+ if (side == Side.BACK) {
+ result = new ReverseStringFilter(version, result);
+ }
+ return result;
+ }
+ return new EdgeNGramTokenFilter(version, tokenStream, side, minGram, maxGram);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java
new file mode 100644
index 0000000..a1c7837
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
+import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
+import org.apache.lucene.analysis.ngram.NGramTokenizer;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+
+import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenChars;
+
+/**
+ *
+ */
+@SuppressWarnings("deprecation")
+public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
+
+ private final int minGram;
+
+ private final int maxGram;
+
+ private final Lucene43EdgeNGramTokenizer.Side side;
+
+ private final CharMatcher matcher;
+
+ protected org.elasticsearch.Version esVersion;
+
+
+ @Inject
+ public EdgeNGramTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
+ this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
+ this.side = Lucene43EdgeNGramTokenizer.Side.getSide(settings.get("side", Lucene43EdgeNGramTokenizer.DEFAULT_SIDE.getLabel()));
+ this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
+ this.esVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ if (version.onOrAfter(Version.LUCENE_43) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) {
+ /*
+ * We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
+ * Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
+ */
+ if (side == Lucene43EdgeNGramTokenizer.Side.BACK) {
+ throw new ElasticsearchIllegalArgumentException("side=back is not supported anymore. Please fix your analysis chain or use"
+ + " an older compatibility version (<=4.2) but beware that it might cause highlighting bugs."
+ + " To obtain the same behavior as the previous version please use \"edgeNGram\" filter which still supports side=back"
+ + " in combination with a \"keyword\" tokenizer");
+ }
+ final Version version = this.version == Version.LUCENE_43 ? Version.LUCENE_44 : this.version; // always use 4.4 or higher
+ if (matcher == null) {
+ return new EdgeNGramTokenizer(version, reader, minGram, maxGram);
+ } else {
+ return new EdgeNGramTokenizer(version, reader, minGram, maxGram) {
+ @Override
+ protected boolean isTokenChar(int chr) {
+ return matcher.isTokenChar(chr);
+ }
+ };
+ }
+ } else {
+ return new Lucene43EdgeNGramTokenizer(version, reader, side, minGram, maxGram);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java
new file mode 100644
index 0000000..ac05f55
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.util.ElisionFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final CharArraySet articles;
+
+ @Inject
+ public ElisionTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.articles = Analysis.parseArticles(env, settings, version);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new ElisionFilter(tokenStream, articles);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java
new file mode 100644
index 0000000..1bfb754
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.en.EnglishAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class EnglishAnalyzerProvider extends AbstractIndexAnalyzerProvider<EnglishAnalyzer> {
+
+ private final EnglishAnalyzer analyzer;
+
+ @Inject
+ public EnglishAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new EnglishAnalyzer(version,
+ Analysis.parseStopWords(env, settings, EnglishAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public EnglishAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java
new file mode 100644
index 0000000..7d62a9c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.AnalyzerWrapper;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+
+/**
+ *
+ */
+public final class FieldNameAnalyzer extends AnalyzerWrapper {
+
+ private final ImmutableOpenMap<String, Analyzer> analyzers;
+
+ private final Analyzer defaultAnalyzer;
+
+ public FieldNameAnalyzer(ImmutableOpenMap<String, Analyzer> analyzers, Analyzer defaultAnalyzer) {
+ this.analyzers = analyzers;
+ this.defaultAnalyzer = defaultAnalyzer;
+ }
+
+ public ImmutableOpenMap<String, Analyzer> analyzers() {
+ return analyzers;
+ }
+
+ public Analyzer defaultAnalyzer() {
+ return defaultAnalyzer;
+ }
+
+ @Override
+ protected Analyzer getWrappedAnalyzer(String fieldName) {
+ return getAnalyzer(fieldName);
+ }
+
+ @Override
+ protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
+ return components;
+ }
+
+ private Analyzer getAnalyzer(String name) {
+ Analyzer analyzer = analyzers.get(name);
+ if (analyzer != null) {
+ return analyzer;
+ }
+ return defaultAnalyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java
new file mode 100644
index 0000000..2e5deed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.fi.FinnishAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class FinnishAnalyzerProvider extends AbstractIndexAnalyzerProvider<FinnishAnalyzer> {
+
+ private final FinnishAnalyzer analyzer;
+
+ @Inject
+ public FinnishAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new FinnishAnalyzer(version,
+ Analysis.parseStopWords(env, settings, FinnishAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public FinnishAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java
new file mode 100644
index 0000000..9de511f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.fr.FrenchAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class FrenchAnalyzerProvider extends AbstractIndexAnalyzerProvider<FrenchAnalyzer> {
+
+ private final FrenchAnalyzer analyzer;
+
+ @Inject
+ public FrenchAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new FrenchAnalyzer(version,
+ Analysis.parseStopWords(env, settings, FrenchAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public FrenchAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java
new file mode 100644
index 0000000..25b18bb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.fr.FrenchStemFilter;
+import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class FrenchStemTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final CharArraySet exclusions;
+
+ @Inject
+ public FrenchStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new FrenchStemFilter(new SetKeywordMarkerFilter(tokenStream, exclusions));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java
new file mode 100644
index 0000000..26800a2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.gl.GalicianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class GalicianAnalyzerProvider extends AbstractIndexAnalyzerProvider<GalicianAnalyzer> {
+
+ private final GalicianAnalyzer analyzer;
+
+ @Inject
+ public GalicianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new GalicianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, GalicianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public GalicianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java
new file mode 100644
index 0000000..98b2101
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.de.GermanAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class GermanAnalyzerProvider extends AbstractIndexAnalyzerProvider<GermanAnalyzer> {
+
+ private final GermanAnalyzer analyzer;
+
+ @Inject
+ public GermanAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new GermanAnalyzer(version,
+ Analysis.parseStopWords(env, settings, GermanAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public GermanAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java
new file mode 100644
index 0000000..6bd9eec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.de.GermanStemFilter;
+import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class GermanStemTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final CharArraySet exclusions;
+
+ @Inject
+ public GermanStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new GermanStemFilter(new SetKeywordMarkerFilter(tokenStream, exclusions));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java
new file mode 100644
index 0000000..3502e39
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.el.GreekAnalyzer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class GreekAnalyzerProvider extends AbstractIndexAnalyzerProvider<GreekAnalyzer> {
+
+ private final GreekAnalyzer analyzer;
+
+ @Inject
+ public GreekAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new GreekAnalyzer(version,
+ Analysis.parseStopWords(env, settings, GreekAnalyzer.getDefaultStopSet(), version));
+ }
+
+ @Override
+ public GreekAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java
new file mode 100644
index 0000000..bdd937b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.hi.HindiAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class HindiAnalyzerProvider extends AbstractIndexAnalyzerProvider<HindiAnalyzer> {
+
+ private final HindiAnalyzer analyzer;
+
+ @Inject
+ public HindiAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new HindiAnalyzer(version,
+ Analysis.parseStopWords(env, settings, HindiAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public HindiAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/HtmlStripCharFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/HtmlStripCharFilterFactory.java
new file mode 100644
index 0000000..18b2dda
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/HtmlStripCharFilterFactory.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.analysis.charfilter.HTMLStripCharFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public class HtmlStripCharFilterFactory extends AbstractCharFilterFactory {
+
+ private final ImmutableSet<String> escapedTags;
+
+ @Inject
+ public HtmlStripCharFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name);
+ String[] escapedTags = settings.getAsArray("escaped_tags");
+ if (escapedTags.length > 0) {
+ this.escapedTags = ImmutableSet.copyOf(escapedTags);
+ } else {
+ this.escapedTags = null;
+ }
+ }
+
+ public ImmutableSet<String> escapedTags() {
+ return escapedTags;
+ }
+
+ @Override
+ public Reader create(Reader tokenStream) {
+ return new HTMLStripCharFilter(tokenStream, escapedTags);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java
new file mode 100644
index 0000000..d6f82fd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.hu.HungarianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class HungarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<HungarianAnalyzer> {
+
+ private final HungarianAnalyzer analyzer;
+
+ @Inject
+ public HungarianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new HungarianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, HungarianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public HungarianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java
new file mode 100644
index 0000000..1c6e4a9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import java.util.Locale;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.hunspell.HunspellDictionary;
+import org.apache.lucene.analysis.hunspell.HunspellStemFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.analysis.HunspellService;
+
+@AnalysisSettingsRequired
+public class HunspellTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final HunspellDictionary dictionary;
+ private final boolean dedup;
+ private final int recursionLevel;
+
+ @Inject
+ public HunspellTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings, HunspellService hunspellService) {
+ super(index, indexSettings, name, settings);
+
+ String locale = settings.get("locale", settings.get("language", settings.get("lang", null)));
+ if (locale == null) {
+ throw new ElasticsearchIllegalArgumentException("missing [locale | language | lang] configuration for hunspell token filter");
+ }
+
+ dictionary = hunspellService.getDictionary(locale);
+ if (dictionary == null) {
+ throw new ElasticsearchIllegalArgumentException(String.format(Locale.ROOT, "Unknown hunspell dictionary for locale [%s]", locale));
+ }
+
+ dedup = settings.getAsBoolean("dedup", true);
+
+ recursionLevel = settings.getAsInt("recursion_level", 2);
+ if (recursionLevel < 0) {
+ throw new ElasticsearchIllegalArgumentException(String.format(Locale.ROOT, "Negative recursion level not allowed for hunspell [%d]", recursionLevel));
+ }
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new HunspellStemFilter(tokenStream, dictionary, dedup, recursionLevel);
+ }
+
+ public boolean dedup() {
+ return dedup;
+ }
+
+ public int recursionLevel() {
+ return recursionLevel;
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java
new file mode 100644
index 0000000..086dcac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.id.IndonesianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class IndonesianAnalyzerProvider extends AbstractIndexAnalyzerProvider<IndonesianAnalyzer> {
+
+ private final IndonesianAnalyzer analyzer;
+
+ @Inject
+ public IndonesianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new IndonesianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, IndonesianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public IndonesianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java
new file mode 100644
index 0000000..92d5de3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.it.ItalianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ItalianAnalyzerProvider extends AbstractIndexAnalyzerProvider<ItalianAnalyzer> {
+
+ private final ItalianAnalyzer analyzer;
+
+ @Inject
+ public ItalianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new ItalianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, ItalianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public ItalianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/KStemTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/KStemTokenFilterFactory.java
new file mode 100644
index 0000000..78294c6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/KStemTokenFilterFactory.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.en.KStemFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+public class KStemTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public KStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new KStemFilter(tokenStream);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java
new file mode 100644
index 0000000..1178aed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.KeepWordFilter;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ * A {@link TokenFilterFactory} for {@link KeepWordFilter}. This filter only
+ * keep tokens that are contained in the term set configured via
+ * {@value #KEEP_WORDS_KEY} setting. This filter acts like an inverse stop
+ * filter.
+ * <p/>
+ * Configuration options:
+ * <p/>
+ * <ul>
+ * <li>{@value #KEEP_WORDS_KEY} the array of words / tokens to keep.</li>
+ * <p/>
+ * <li>{@value #KEEP_WORDS_PATH_KEY} an reference to a file containing the words
+ * / tokens to keep. Note: this is an alternative to {@value #KEEP_WORDS_KEY} if
+ * both are set an exception will be thrown.</li>
+ * <p/>
+ * <li>{@value #ENABLE_POS_INC_KEY} <code>true</code> iff the filter should
+ * maintain position increments for dropped tokens. The default is
+ * <code>true</code>.</li>
+ * <p/>
+ * <li>{@value #KEEP_WORDS_CASE_KEY} to use case sensitive keep words. The
+ * default is <code>false</code> which corresponds to case-sensitive.</li>
+ * </ul>
+ *
+ * @see StopTokenFilterFactory
+ */
+@AnalysisSettingsRequired
+public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
+ private final CharArraySet keepWords;
+ private final boolean enablePositionIncrements;
+ private static final String KEEP_WORDS_KEY = "keep_words";
+ private static final String KEEP_WORDS_PATH_KEY = KEEP_WORDS_KEY + "_path";
+ private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc
+ private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
+
+ @Inject
+ public KeepWordFilterFactory(Index index, @IndexSettings Settings indexSettings,
+ Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+
+ final String[] arrayKeepWords = settings.getAsArray(KEEP_WORDS_KEY, null);
+ final String keepWordsPath = settings.get(KEEP_WORDS_PATH_KEY, null);
+ if ((arrayKeepWords == null && keepWordsPath == null) || (arrayKeepWords != null && keepWordsPath != null)) {
+ // we don't allow both or none
+ throw new ElasticsearchIllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `"
+ + KEEP_WORDS_PATH_KEY + "` to be configured");
+ }
+ if (version.onOrAfter(Version.LUCENE_44) && settings.get(ENABLE_POS_INC_KEY) != null) {
+ throw new ElasticsearchIllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use"
+ + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
+ }
+ enablePositionIncrements = version.onOrAfter(Version.LUCENE_44) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true);
+
+ this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY, version);
+
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ if (version.onOrAfter(Version.LUCENE_44)) {
+ return new KeepWordFilter(version, tokenStream, keepWords);
+ }
+ return new KeepWordFilter(version, enablePositionIncrements, tokenStream, keepWords);
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java
new file mode 100644
index 0000000..2de249d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class KeywordAnalyzerProvider extends AbstractIndexAnalyzerProvider<KeywordAnalyzer> {
+
+ private final KeywordAnalyzer keywordAnalyzer;
+
+ @Inject
+ public KeywordAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.keywordAnalyzer = new KeywordAnalyzer();
+ }
+
+ @Override
+ public KeywordAnalyzer get() {
+ return this.keywordAnalyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java
new file mode 100644
index 0000000..155e48b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import java.util.Set;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+@AnalysisSettingsRequired
+public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final CharArraySet keywordLookup;
+
+ @Inject
+ public KeywordMarkerTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+
+ boolean ignoreCase = settings.getAsBoolean("ignore_case", false);
+ Set<?> rules = Analysis.getWordSet(env, settings, "keywords", version);
+ if (rules == null) {
+ throw new ElasticsearchIllegalArgumentException("keyword filter requires either `keywords` or `keywords_path` to be configured");
+ }
+ keywordLookup = new CharArraySet(version, rules, ignoreCase);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new SetKeywordMarkerFilter(tokenStream, keywordLookup);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java
new file mode 100644
index 0000000..e97ea41
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.KeywordTokenizer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public class KeywordTokenizerFactory extends AbstractTokenizerFactory {
+
+ private final int bufferSize;
+
+ @Inject
+ public KeywordTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ bufferSize = settings.getAsInt("buffer_size", 256);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ return new KeywordTokenizer(reader, bufferSize);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java
new file mode 100644
index 0000000..573aa19
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.lv.LatvianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class LatvianAnalyzerProvider extends AbstractIndexAnalyzerProvider<LatvianAnalyzer> {
+
+ private final LatvianAnalyzer analyzer;
+
+ @Inject
+ public LatvianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new LatvianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, LatvianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public LatvianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java
new file mode 100644
index 0000000..5a2917b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+import org.apache.lucene.util.Version;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.LengthFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class LengthTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final int min;
+ private final int max;
+ private final boolean enablePositionIncrements;
+ private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
+
+ @Inject
+ public LengthTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ min = settings.getAsInt("min", 0);
+ max = settings.getAsInt("max", Integer.MAX_VALUE);
+ if (version.onOrAfter(Version.LUCENE_44) && settings.get(ENABLE_POS_INC_KEY) != null) {
+ throw new ElasticsearchIllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use"
+ + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
+ }
+ enablePositionIncrements = version.onOrAfter(Version.LUCENE_44) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ if (version.onOrAfter(Version.LUCENE_44)) {
+ return new LengthFilter(version, tokenStream, min, max);
+ }
+ return new LengthFilter(version, enablePositionIncrements, tokenStream, min, max);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java
new file mode 100644
index 0000000..9ae20a7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LetterTokenizer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public class LetterTokenizerFactory extends AbstractTokenizerFactory {
+
+ @Inject
+ public LetterTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ return new LetterTokenizer(version, reader);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactory.java
new file mode 100644
index 0000000..fe954f8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactory.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class LimitTokenCountFilterFactory extends AbstractTokenFilterFactory {
+
+ final int maxTokenCount;
+ final boolean consumeAllTokens;
+
+ @Inject
+ public LimitTokenCountFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.maxTokenCount = settings.getAsInt("max_token_count", 1);
+ this.consumeAllTokens = settings.getAsBoolean("consume_all_tokens", false);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new LimitTokenCountFilter(tokenStream, maxTokenCount, consumeAllTokens);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java
new file mode 100644
index 0000000..9dc7f66
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.el.GreekLowerCaseFilter;
+import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class LowerCaseTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final String lang;
+
+ @Inject
+ public LowerCaseTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.lang = settings.get("language", null);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ if (lang == null) {
+ return new LowerCaseFilter(version, tokenStream);
+ } else if (lang.equalsIgnoreCase("greek")) {
+ return new GreekLowerCaseFilter(version, tokenStream);
+ } else if (lang.equalsIgnoreCase("turkish")) {
+ return new TurkishLowerCaseFilter(tokenStream);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("language [" + lang + "] not support for lower case");
+ }
+ }
+}
+
+
diff --git a/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java
new file mode 100644
index 0000000..7eca07b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LowerCaseTokenizer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory {
+
+ @Inject
+ public LowerCaseTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ return new LowerCaseTokenizer(version, reader);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java
new file mode 100644
index 0000000..3b298e6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.charfilter.MappingCharFilter;
+import org.apache.lucene.analysis.charfilter.NormalizeCharMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+@AnalysisSettingsRequired
+public class MappingCharFilterFactory extends AbstractCharFilterFactory {
+
+ private final NormalizeCharMap normMap;
+
+ @Inject
+ public MappingCharFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name);
+
+ List<String> rules = Analysis.getWordList(env, settings, "mappings");
+ if (rules == null) {
+ throw new ElasticsearchIllegalArgumentException("mapping requires either `mappings` or `mappings_path` to be configured");
+ }
+
+ NormalizeCharMap.Builder normMapBuilder = new NormalizeCharMap.Builder();
+ parseRules(rules, normMapBuilder);
+ normMap = normMapBuilder.build();
+ }
+
+ @Override
+ public Reader create(Reader tokenStream) {
+ return new MappingCharFilter(normMap, tokenStream);
+ }
+
+ // source => target
+ private static Pattern rulePattern = Pattern.compile("(.*)\\s*=>\\s*(.*)\\s*$");
+
+ /**
+ * parses a list of MappingCharFilter style rules into a normalize char map
+ */
+ private void parseRules(List<String> rules, NormalizeCharMap.Builder map) {
+ for (String rule : rules) {
+ Matcher m = rulePattern.matcher(rule);
+ if (!m.find())
+ throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]");
+ String lhs = parseString(m.group(1).trim());
+ String rhs = parseString(m.group(2).trim());
+ if (lhs == null || rhs == null)
+ throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]. Illegal mapping.");
+ map.add(lhs, rhs);
+ }
+ }
+
+ char[] out = new char[256];
+
+ private String parseString(String s) {
+ int readPos = 0;
+ int len = s.length();
+ int writePos = 0;
+ while (readPos < len) {
+ char c = s.charAt(readPos++);
+ if (c == '\\') {
+ if (readPos >= len)
+ throw new RuntimeException("Invalid escaped char in [" + s + "]");
+ c = s.charAt(readPos++);
+ switch (c) {
+ case '\\':
+ c = '\\';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ case 't':
+ c = '\t';
+ break;
+ case 'r':
+ c = '\r';
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'f':
+ c = '\f';
+ break;
+ case 'u':
+ if (readPos + 3 >= len)
+ throw new RuntimeException("Invalid escaped char in [" + s + "]");
+ c = (char) Integer.parseInt(s.substring(readPos, readPos + 4), 16);
+ readPos += 4;
+ break;
+ }
+ }
+ out[writePos++] = c;
+ }
+ return new String(out, 0, writePos);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java
new file mode 100644
index 0000000..e6436cc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.ngram.NGramTokenFilter;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+
+/**
+ *
+ */
+public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final int minGram;
+
+ private final int maxGram;
+
+
+ @Inject
+ public NGramTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
+ this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ final Version version = this.version == Version.LUCENE_43 ? Version.LUCENE_44 : this.version; // we supported it since 4.3
+ return new NGramTokenFilter(version, tokenStream, minGram, maxGram);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java
new file mode 100644
index 0000000..f8985a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer;
+import org.apache.lucene.analysis.ngram.NGramTokenizer;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ *
+ */
+public class NGramTokenizerFactory extends AbstractTokenizerFactory {
+
+ private final int minGram;
+ private final int maxGram;
+ private final CharMatcher matcher;
+ private org.elasticsearch.Version esVersion;
+
+ static final Map<String, CharMatcher> MATCHERS;
+
+ static {
+ ImmutableMap.Builder<String, CharMatcher> builder = ImmutableMap.builder();
+ builder.put("letter", CharMatcher.Basic.LETTER);
+ builder.put("digit", CharMatcher.Basic.DIGIT);
+ builder.put("whitespace", CharMatcher.Basic.WHITESPACE);
+ builder.put("punctuation", CharMatcher.Basic.PUNCTUATION);
+ builder.put("symbol", CharMatcher.Basic.SYMBOL);
+ // Populate with unicode categories from java.lang.Character
+ for (Field field : Character.class.getFields()) {
+ if (!field.getName().startsWith("DIRECTIONALITY")
+ && Modifier.isPublic(field.getModifiers())
+ && Modifier.isStatic(field.getModifiers())
+ && field.getType() == byte.class) {
+ try {
+ builder.put(field.getName().toLowerCase(Locale.ROOT), CharMatcher.ByUnicodeCategory.of(field.getByte(null)));
+ } catch (Exception e) {
+ // just ignore
+ continue;
+ }
+ }
+ }
+ MATCHERS = builder.build();
+ }
+
+ static CharMatcher parseTokenChars(String[] characterClasses) {
+ if (characterClasses == null || characterClasses.length == 0) {
+ return null;
+ }
+ CharMatcher.Builder builder = new CharMatcher.Builder();
+ for (String characterClass : characterClasses) {
+ characterClass = characterClass.toLowerCase(Locale.ROOT).trim();
+ CharMatcher matcher = MATCHERS.get(characterClass);
+ if (matcher == null) {
+ throw new ElasticsearchIllegalArgumentException("Unknown token type: '" + characterClass + "', must be one of " + MATCHERS.keySet());
+ }
+ builder.or(matcher);
+ }
+ return builder.build();
+ }
+
+ @Inject
+ public NGramTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
+ this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
+ this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
+ this.esVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public Tokenizer create(Reader reader) {
+ if (version.onOrAfter(Version.LUCENE_43) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) {
+ /*
+ * We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
+ * Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
+ */
+ final Version version = this.version == Version.LUCENE_43 ? Version.LUCENE_44 : this.version; // always use 4.4 or higher
+ if (matcher == null) {
+ return new NGramTokenizer(version, reader, minGram, maxGram);
+ } else {
+ return new NGramTokenizer(version, reader, minGram, maxGram) {
+ @Override
+ protected boolean isTokenChar(int chr) {
+ return matcher.isTokenChar(chr);
+ }
+ };
+ }
+ } else {
+ return new Lucene43NGramTokenizer(reader, minGram, maxGram);
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java
new file mode 100644
index 0000000..16b0074
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CustomAnalyzerWrapper;
+
+/**
+ * Named analyzer is an analyzer wrapper around an actual analyzer ({@link #analyzer} that is associated
+ * with a name ({@link #name()}.
+ */
+public class NamedAnalyzer extends CustomAnalyzerWrapper {
+
+ private final String name;
+ private final AnalyzerScope scope;
+ private final Analyzer analyzer;
+ private final int positionOffsetGap;
+
+ public NamedAnalyzer(NamedAnalyzer analyzer, int positionOffsetGap) {
+ this(analyzer.name(), analyzer.scope(), analyzer.analyzer(), positionOffsetGap);
+ }
+
+ public NamedAnalyzer(String name, Analyzer analyzer) {
+ this(name, AnalyzerScope.INDEX, analyzer);
+ }
+
+ public NamedAnalyzer(String name, AnalyzerScope scope, Analyzer analyzer) {
+ this(name, scope, analyzer, Integer.MIN_VALUE);
+ }
+
+ public NamedAnalyzer(String name, AnalyzerScope scope, Analyzer analyzer, int positionOffsetGap) {
+ super(analyzer.getReuseStrategy());
+ this.name = name;
+ this.scope = scope;
+ this.analyzer = analyzer;
+ this.positionOffsetGap = positionOffsetGap;
+ }
+
+ /**
+ * The name of the analyzer.
+ */
+ public String name() {
+ return this.name;
+ }
+
+ /**
+ * The scope of the analyzer.
+ */
+ public AnalyzerScope scope() {
+ return this.scope;
+ }
+
+ /**
+ * The actual analyzer.
+ */
+ public Analyzer analyzer() {
+ return this.analyzer;
+ }
+
+ @Override
+ protected Analyzer getWrappedAnalyzer(String fieldName) {
+ return this.analyzer;
+ }
+
+ @Override
+ protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
+ return components;
+ }
+
+ @Override
+ public int getPositionIncrementGap(String fieldName) {
+ if (positionOffsetGap != Integer.MIN_VALUE) {
+ return positionOffsetGap;
+ }
+ return super.getPositionIncrementGap(fieldName);
+ }
+
+ @Override
+ public String toString() {
+ return "analyzer name[" + name + "], analyzer [" + analyzer + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java
new file mode 100644
index 0000000..2908a97
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.no.NorwegianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class NorwegianAnalyzerProvider extends AbstractIndexAnalyzerProvider<NorwegianAnalyzer> {
+
+ private final NorwegianAnalyzer analyzer;
+
+ @Inject
+ public NorwegianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new NorwegianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, NorwegianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public NorwegianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/NumericAnalyzer.java
new file mode 100644
index 0000000..4a9f8f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericAnalyzer.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public abstract class NumericAnalyzer<T extends NumericTokenizer> extends Analyzer {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ try {
+ // LUCENE 4 UPGRADE: in reusableTokenStream the buffer size was char[120]
+ // Not sure if this is intentional or not
+ return new TokenStreamComponents(createNumericTokenizer(reader, new char[32]));
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to create numeric tokenizer", e);
+ }
+ }
+
+ protected abstract T createNumericTokenizer(Reader reader, char[] buffer) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericDateAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/NumericDateAnalyzer.java
new file mode 100644
index 0000000..c30f9c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericDateAnalyzer.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.util.NumericUtils;
+import org.joda.time.format.DateTimeFormatter;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericDateAnalyzer extends NumericAnalyzer<NumericDateTokenizer> {
+
+ private final int precisionStep;
+
+ private final DateTimeFormatter dateTimeFormatter;
+
+ public NumericDateAnalyzer(DateTimeFormatter dateTimeFormatter) {
+ this(NumericUtils.PRECISION_STEP_DEFAULT, dateTimeFormatter);
+ }
+
+ public NumericDateAnalyzer(int precisionStep, DateTimeFormatter dateTimeFormatter) {
+ this.precisionStep = precisionStep;
+ this.dateTimeFormatter = dateTimeFormatter;
+ }
+
+ @Override
+ protected NumericDateTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException {
+ return new NumericDateTokenizer(reader, precisionStep, buffer, dateTimeFormatter);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java b/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java
new file mode 100644
index 0000000..82f83fd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.joda.time.format.DateTimeFormatter;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericDateTokenizer extends NumericTokenizer {
+
+ public NumericDateTokenizer(Reader reader, int precisionStep, char[] buffer, DateTimeFormatter dateTimeFormatter) throws IOException {
+ super(reader, new NumericTokenStream(precisionStep), buffer, dateTimeFormatter);
+ }
+
+ @Override
+ protected void setValue(NumericTokenStream tokenStream, String value) {
+ tokenStream.setLongValue(((DateTimeFormatter) extra).parseMillis(value));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java
new file mode 100644
index 0000000..22a579c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import org.apache.lucene.util.NumericUtils;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericDoubleAnalyzer extends NumericAnalyzer<NumericDoubleTokenizer> {
+
+ private final static IntObjectOpenHashMap<NamedAnalyzer> builtIn;
+
+ static {
+ builtIn = new IntObjectOpenHashMap<NamedAnalyzer>();
+ builtIn.put(Integer.MAX_VALUE, new NamedAnalyzer("_double/max", AnalyzerScope.GLOBAL, new NumericDoubleAnalyzer(Integer.MAX_VALUE)));
+ for (int i = 0; i <= 64; i += 4) {
+ builtIn.put(i, new NamedAnalyzer("_double/" + i, AnalyzerScope.GLOBAL, new NumericDoubleAnalyzer(i)));
+ }
+ }
+
+ public static NamedAnalyzer buildNamedAnalyzer(int precisionStep) {
+ NamedAnalyzer namedAnalyzer = builtIn.get(precisionStep);
+ if (namedAnalyzer == null) {
+ namedAnalyzer = new NamedAnalyzer("_double/" + precisionStep, AnalyzerScope.INDEX, new NumericDoubleAnalyzer(precisionStep));
+ }
+ return namedAnalyzer;
+ }
+
+ private final int precisionStep;
+
+ public NumericDoubleAnalyzer() {
+ this(NumericUtils.PRECISION_STEP_DEFAULT);
+ }
+
+ public NumericDoubleAnalyzer(int precisionStep) {
+ this.precisionStep = precisionStep;
+ }
+
+ @Override
+ protected NumericDoubleTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException {
+ return new NumericDoubleTokenizer(reader, precisionStep, buffer);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericDoubleTokenizer.java b/src/main/java/org/elasticsearch/index/analysis/NumericDoubleTokenizer.java
new file mode 100644
index 0000000..89cb381
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericDoubleTokenizer.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericDoubleTokenizer extends NumericTokenizer {
+
+ public NumericDoubleTokenizer(Reader reader, int precisionStep, char[] buffer) throws IOException {
+ super(reader, new NumericTokenStream(precisionStep), buffer, null);
+ }
+
+ @Override
+ protected void setValue(NumericTokenStream tokenStream, String value) {
+ tokenStream.setDoubleValue(Double.parseDouble(value));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericFloatAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/NumericFloatAnalyzer.java
new file mode 100644
index 0000000..010212b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericFloatAnalyzer.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import org.apache.lucene.util.NumericUtils;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericFloatAnalyzer extends NumericAnalyzer<NumericFloatTokenizer> {
+
+ private final static IntObjectOpenHashMap<NamedAnalyzer> builtIn;
+
+ static {
+ builtIn = new IntObjectOpenHashMap<NamedAnalyzer>();
+ builtIn.put(Integer.MAX_VALUE, new NamedAnalyzer("_float/max", AnalyzerScope.GLOBAL, new NumericFloatAnalyzer(Integer.MAX_VALUE)));
+ for (int i = 0; i <= 64; i += 4) {
+ builtIn.put(i, new NamedAnalyzer("_float/" + i, AnalyzerScope.GLOBAL, new NumericFloatAnalyzer(i)));
+ }
+ }
+
+ public static NamedAnalyzer buildNamedAnalyzer(int precisionStep) {
+ NamedAnalyzer namedAnalyzer = builtIn.get(precisionStep);
+ if (namedAnalyzer == null) {
+ namedAnalyzer = new NamedAnalyzer("_float/" + precisionStep, AnalyzerScope.INDEX, new NumericFloatAnalyzer(precisionStep));
+ }
+ return namedAnalyzer;
+ }
+
+ private final int precisionStep;
+
+ public NumericFloatAnalyzer() {
+ this(NumericUtils.PRECISION_STEP_DEFAULT);
+ }
+
+ public NumericFloatAnalyzer(int precisionStep) {
+ this.precisionStep = precisionStep;
+ }
+
+ @Override
+ protected NumericFloatTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException {
+ return new NumericFloatTokenizer(reader, precisionStep, buffer);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java b/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java
new file mode 100644
index 0000000..900bbe1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericFloatTokenizer extends NumericTokenizer {
+
+ public NumericFloatTokenizer(Reader reader, int precisionStep, char[] buffer) throws IOException {
+ super(reader, new NumericTokenStream(precisionStep), buffer, null);
+ }
+
+ @Override
+ protected void setValue(NumericTokenStream tokenStream, String value) {
+ tokenStream.setFloatValue(Float.parseFloat(value));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericIntegerAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/NumericIntegerAnalyzer.java
new file mode 100644
index 0000000..e2b18e7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericIntegerAnalyzer.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import org.apache.lucene.util.NumericUtils;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericIntegerAnalyzer extends NumericAnalyzer<NumericIntegerTokenizer> {
+
+ private final static IntObjectOpenHashMap<NamedAnalyzer> builtIn;
+
+ static {
+ builtIn = new IntObjectOpenHashMap<NamedAnalyzer>();
+ builtIn.put(Integer.MAX_VALUE, new NamedAnalyzer("_int/max", AnalyzerScope.GLOBAL, new NumericIntegerAnalyzer(Integer.MAX_VALUE)));
+ for (int i = 0; i <= 64; i += 4) {
+ builtIn.put(i, new NamedAnalyzer("_int/" + i, AnalyzerScope.GLOBAL, new NumericIntegerAnalyzer(i)));
+ }
+ }
+
+ public static NamedAnalyzer buildNamedAnalyzer(int precisionStep) {
+ NamedAnalyzer namedAnalyzer = builtIn.get(precisionStep);
+ if (namedAnalyzer == null) {
+ namedAnalyzer = new NamedAnalyzer("_int/" + precisionStep, AnalyzerScope.INDEX, new NumericIntegerAnalyzer(precisionStep));
+ }
+ return namedAnalyzer;
+ }
+
+ private final int precisionStep;
+
+ public NumericIntegerAnalyzer() {
+ this(NumericUtils.PRECISION_STEP_DEFAULT);
+ }
+
+ public NumericIntegerAnalyzer(int precisionStep) {
+ this.precisionStep = precisionStep;
+ }
+
+ @Override
+ protected NumericIntegerTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException {
+ return new NumericIntegerTokenizer(reader, precisionStep, buffer);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java b/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java
new file mode 100644
index 0000000..c4b8170
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericIntegerTokenizer extends NumericTokenizer {
+
+ public NumericIntegerTokenizer(Reader reader, int precisionStep, char[] buffer) throws IOException {
+ super(reader, new NumericTokenStream(precisionStep), buffer, null);
+ }
+
+ @Override
+ protected void setValue(NumericTokenStream tokenStream, String value) {
+ tokenStream.setIntValue(Integer.parseInt(value));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java
new file mode 100644
index 0000000..5b11b10
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import org.apache.lucene.util.NumericUtils;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericLongAnalyzer extends NumericAnalyzer<NumericLongTokenizer> {
+
+ private final static IntObjectOpenHashMap<NamedAnalyzer> builtIn;
+
+ static {
+ builtIn = new IntObjectOpenHashMap<NamedAnalyzer>();
+ builtIn.put(Integer.MAX_VALUE, new NamedAnalyzer("_long/max", AnalyzerScope.GLOBAL, new NumericLongAnalyzer(Integer.MAX_VALUE)));
+ for (int i = 0; i <= 64; i += 4) {
+ builtIn.put(i, new NamedAnalyzer("_long/" + i, AnalyzerScope.GLOBAL, new NumericLongAnalyzer(i)));
+ }
+ }
+
+ public static NamedAnalyzer buildNamedAnalyzer(int precisionStep) {
+ NamedAnalyzer namedAnalyzer = builtIn.get(precisionStep);
+ if (namedAnalyzer == null) {
+ namedAnalyzer = new NamedAnalyzer("_long/" + precisionStep, AnalyzerScope.INDEX, new NumericLongAnalyzer(precisionStep));
+ }
+ return namedAnalyzer;
+ }
+
+ private final int precisionStep;
+
+ public NumericLongAnalyzer() {
+ this(NumericUtils.PRECISION_STEP_DEFAULT);
+ }
+
+ public NumericLongAnalyzer(int precisionStep) {
+ this.precisionStep = precisionStep;
+ }
+
+ @Override
+ protected NumericLongTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException {
+ return new NumericLongTokenizer(reader, precisionStep, buffer);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java b/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java
new file mode 100644
index 0000000..5ca9492
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+
+import java.io.IOException;
+import java.io.Reader;
+
+/**
+ *
+ */
+public class NumericLongTokenizer extends NumericTokenizer {
+
+ public NumericLongTokenizer(Reader reader, int precisionStep, char[] buffer) throws IOException {
+ super(reader, new NumericTokenStream(precisionStep), buffer, null);
+ }
+
+ @Override
+ protected void setValue(NumericTokenStream tokenStream, String value) {
+ tokenStream.setLongValue(Long.parseLong(value));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java b/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java
new file mode 100644
index 0000000..5b17b4e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.AttributeSource;
+import org.elasticsearch.common.io.Streams;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Iterator;
+
+/**
+ *
+ */
+public abstract class NumericTokenizer extends Tokenizer {
+
+ /** Make this tokenizer get attributes from the delegate token stream. */
+ private static final AttributeFactory delegatingAttributeFactory(final AttributeSource source) {
+ return new AttributeFactory() {
+ @Override
+ public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
+ return (AttributeImpl) source.addAttribute(attClass);
+ }
+ };
+ }
+
+ private final NumericTokenStream numericTokenStream;
+ private final char[] buffer;
+ protected final Object extra;
+ private boolean started;
+
+ protected NumericTokenizer(Reader reader, NumericTokenStream numericTokenStream, char[] buffer, Object extra) throws IOException {
+ super(delegatingAttributeFactory(numericTokenStream), reader);
+ this.numericTokenStream = numericTokenStream;
+ // Add attributes from the numeric token stream, this works fine because the attribute factory delegates to numericTokenStream
+ for (Iterator<Class<? extends Attribute>> it = numericTokenStream.getAttributeClassesIterator(); it.hasNext();) {
+ addAttribute(it.next());
+ }
+ this.extra = extra;
+ this.buffer = buffer;
+ started = true;
+ }
+
+ @Override
+ public void reset() throws IOException {
+ super.reset();
+ started = false;
+ }
+
+ @Override
+ public final boolean incrementToken() throws IOException {
+ if (!started) {
+ // reset() must be idempotent, this is why we read data in incrementToken
+ final int len = Streams.readFully(input, buffer);
+ if (len == buffer.length && input.read() != -1) {
+ throw new IOException("Cannot read numeric data larger than " + buffer.length + " chars");
+ }
+ setValue(numericTokenStream, new String(buffer, 0, len));
+ numericTokenStream.reset();
+ started = true;
+ }
+ return numericTokenStream.incrementToken();
+ }
+
+ @Override
+ public void end() throws IOException {
+ super.end();
+ numericTokenStream.end();
+ }
+
+ @Override
+ public void close() throws IOException {
+ super.close();
+ numericTokenStream.close();
+ }
+
+ protected abstract void setValue(NumericTokenStream tokenStream, String value);
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java
new file mode 100644
index 0000000..4b11fb0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
+import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+
+public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
+
+ private final int bufferSize;
+
+ private final char delimiter;
+ private final char replacement;
+ private final int skip;
+ private final boolean reverse;
+
+ @Inject
+ public PathHierarchyTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ bufferSize = settings.getAsInt("buffer_size", 1024);
+ String delimiter = settings.get("delimiter");
+ if (delimiter == null) {
+ this.delimiter = PathHierarchyTokenizer.DEFAULT_DELIMITER;
+ } else if (delimiter.length() > 1) {
+ throw new ElasticsearchIllegalArgumentException("delimiter can only be a one char value");
+ } else {
+ this.delimiter = delimiter.charAt(0);
+ }
+
+ String replacement = settings.get("replacement");
+ if (replacement == null) {
+ this.replacement = this.delimiter;
+ } else if (replacement.length() > 1) {
+ throw new ElasticsearchIllegalArgumentException("replacement can only be a one char value");
+ } else {
+ this.replacement = replacement.charAt(0);
+ }
+ this.skip = settings.getAsInt("skip", PathHierarchyTokenizer.DEFAULT_SKIP);
+ this.reverse = settings.getAsBoolean("reverse", false);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ if (reverse) {
+ return new ReversePathHierarchyTokenizer(reader, bufferSize, delimiter, replacement, skip);
+ }
+ return new PathHierarchyTokenizer(reader, bufferSize, delimiter, replacement, skip);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java
new file mode 100644
index 0000000..af053b3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.miscellaneous.PatternAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider<PatternAnalyzer> {
+
+ private final PatternAnalyzer analyzer;
+
+ @Inject
+ public PatternAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+
+ Version esVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT);
+ final CharArraySet defaultStopwords;
+ if (esVersion.onOrAfter(Version.V_1_0_0_RC1)) {
+ defaultStopwords = CharArraySet.EMPTY_SET;
+ } else {
+ defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+ }
+ boolean lowercase = settings.getAsBoolean("lowercase", true);
+ CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords, version);
+
+ String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
+ if (sPattern == null) {
+ throw new ElasticsearchIllegalArgumentException("Analyzer [" + name + "] of type pattern must have a `pattern` set");
+ }
+ Pattern pattern = Regex.compile(sPattern, settings.get("flags"));
+
+ analyzer = new PatternAnalyzer(version, pattern, lowercase, stopWords);
+ }
+
+ @Override
+ public PatternAnalyzer get() {
+ return analyzer;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PatternCaptureGroupTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/PatternCaptureGroupTokenFilterFactory.java
new file mode 100644
index 0000000..5fd2d5b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PatternCaptureGroupTokenFilterFactory.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.pattern.PatternCaptureGroupTokenFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.regex.Pattern;
+
+@AnalysisSettingsRequired
+public class PatternCaptureGroupTokenFilterFactory extends AbstractTokenFilterFactory {
+ private final Pattern[] patterns;
+ private final boolean preserveOriginal;
+ private static final String PATTERNS_KEY = "patterns";
+ private static final String PRESERVE_ORIG_KEY = "preserve_original";
+
+ @Inject
+ public PatternCaptureGroupTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name,
+ @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ String[] regexes = settings.getAsArray(PATTERNS_KEY, null, false);
+ if (regexes == null) {
+ throw new ElasticsearchIllegalArgumentException("required setting '" + PATTERNS_KEY + "' is missing for token filter [" + name + "]");
+ }
+ patterns = new Pattern[regexes.length];
+ for (int i = 0; i < regexes.length; i++) {
+ patterns[i] = Pattern.compile(regexes[i]);
+ }
+
+ preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, true);
+ }
+
+ @Override
+ public TokenFilter create(TokenStream tokenStream) {
+ return new PatternCaptureGroupTokenFilter(tokenStream, preserveOriginal, patterns);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PatternReplaceCharFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/PatternReplaceCharFilterFactory.java
new file mode 100644
index 0000000..25101dd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PatternReplaceCharFilterFactory.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.pattern.PatternReplaceCharFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+import java.util.regex.Pattern;
+
+@AnalysisSettingsRequired
+public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory {
+
+ private final Pattern pattern;
+ private final String replacement;
+
+ @Inject
+ public PatternReplaceCharFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name);
+
+ if (!Strings.hasLength(settings.get("pattern"))) {
+ throw new ElasticsearchIllegalArgumentException("pattern is missing for [" + name + "] char filter of type 'pattern_replace'");
+ }
+ pattern = Pattern.compile(settings.get("pattern"));
+ replacement = settings.get("replacement", ""); // when not set or set to "", use "".
+ }
+
+ public Pattern getPattern() {
+ return pattern;
+ }
+
+ public String getReplacement() {
+ return replacement;
+ }
+
+ @Override
+ public Reader create(Reader tokenStream) {
+ return new PatternReplaceCharFilter(pattern, replacement, tokenStream);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PatternReplaceTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/PatternReplaceTokenFilterFactory.java
new file mode 100644
index 0000000..9cb23bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PatternReplaceTokenFilterFactory.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.pattern.PatternReplaceFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.regex.Pattern;
+
+@AnalysisSettingsRequired
+public class PatternReplaceTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final Pattern pattern;
+ private final String replacement;
+ private final boolean all;
+
+ @Inject
+ public PatternReplaceTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+
+ String sPattern = settings.get("pattern", null);
+ if (sPattern == null) {
+ throw new ElasticsearchIllegalArgumentException("pattern is missing for [" + name + "] token filter of type 'pattern_replace'");
+ }
+ this.pattern = Regex.compile(sPattern, settings.get("flags"));
+ this.replacement = settings.get("replacement", "");
+ this.all = settings.getAsBoolean("all", true);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new PatternReplaceFilter(tokenStream, pattern, replacement, all);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/PatternTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/PatternTokenizerFactory.java
new file mode 100644
index 0000000..1dc2f24
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PatternTokenizerFactory.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.pattern.PatternTokenizer;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+import java.util.regex.Pattern;
+
+public class PatternTokenizerFactory extends AbstractTokenizerFactory {
+
+ private final Pattern pattern;
+ private final int group;
+
+ @Inject
+ public PatternTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+
+ String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
+ if (sPattern == null) {
+ throw new ElasticsearchIllegalArgumentException("pattern is missing for [" + name + "] tokenizer of type 'pattern'");
+ }
+
+ this.pattern = Regex.compile(sPattern, settings.get("flags"));
+ this.group = settings.getAsInt("group", -1);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ return new PatternTokenizer(reader, pattern, group);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java
new file mode 100644
index 0000000..eec5aca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.fa.PersianAnalyzer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class PersianAnalyzerProvider extends AbstractIndexAnalyzerProvider<PersianAnalyzer> {
+
+ private final PersianAnalyzer analyzer;
+
+ @Inject
+ public PersianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new PersianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, PersianAnalyzer.getDefaultStopSet(), version));
+ }
+
+ @Override
+ public PersianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/PersianNormalizationFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/PersianNormalizationFilterFactory.java
new file mode 100644
index 0000000..fbf1771
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PersianNormalizationFilterFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class PersianNormalizationFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public PersianNormalizationFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new PersianNormalizationFilter(tokenStream);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java
new file mode 100644
index 0000000..ff08ed5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.en.PorterStemFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class PorterStemTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public PorterStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new PorterStemFilter(tokenStream);
+ }
+}
+
+
diff --git a/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java
new file mode 100644
index 0000000..802015a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.pt.PortugueseAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class PortugueseAnalyzerProvider extends AbstractIndexAnalyzerProvider<PortugueseAnalyzer> {
+
+ private final PortugueseAnalyzer analyzer;
+
+ @Inject
+ public PortugueseAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new PortugueseAnalyzer(version,
+ Analysis.parseStopWords(env, settings, PortugueseAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public PortugueseAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProvider.java
new file mode 100644
index 0000000..af87d09
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProvider.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+
+/**
+ *
+ */
+public class PreBuiltAnalyzerProvider implements AnalyzerProvider<NamedAnalyzer> {
+
+ private final NamedAnalyzer analyzer;
+
+ public PreBuiltAnalyzerProvider(String name, AnalyzerScope scope, Analyzer analyzer) {
+ // we create the named analyzer here so the resources associated with it will be shared
+ // and we won't wrap a shared analyzer with named analyzer each time causing the resources
+ // to not be shared...
+ this.analyzer = new NamedAnalyzer(name, scope, analyzer);
+ }
+
+ @Override
+ public String name() {
+ return analyzer.name();
+ }
+
+ @Override
+ public AnalyzerScope scope() {
+ return analyzer.scope();
+ }
+
+ @Override
+ public NamedAnalyzer get() {
+ return analyzer;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java
new file mode 100644
index 0000000..05342ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
+
+/**
+ *
+ */
+public class PreBuiltAnalyzerProviderFactory implements AnalyzerProviderFactory {
+
+ private final PreBuiltAnalyzerProvider analyzerProvider;
+
+ public PreBuiltAnalyzerProviderFactory(String name, AnalyzerScope scope, Analyzer analyzer) {
+ analyzerProvider = new PreBuiltAnalyzerProvider(name, scope, analyzer);
+ }
+
+ @Override
+ public AnalyzerProvider create(String name, Settings settings) {
+ Version indexVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
+ if (!Version.CURRENT.equals(indexVersion)) {
+ PreBuiltAnalyzers preBuiltAnalyzers = PreBuiltAnalyzers.getOrDefault(name, null);
+ if (preBuiltAnalyzers != null) {
+ Analyzer analyzer = preBuiltAnalyzers.getAnalyzer(indexVersion);
+ return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer);
+ }
+ }
+
+ return analyzerProvider;
+ }
+
+ public Analyzer analyzer() {
+ return analyzerProvider.get();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactory.java b/src/main/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactory.java
new file mode 100644
index 0000000..107bee1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactory.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.PreBuiltCharFilters;
+
+public class PreBuiltCharFilterFactoryFactory implements CharFilterFactoryFactory {
+
+ private final CharFilterFactory charFilterFactory;
+
+ public PreBuiltCharFilterFactoryFactory(CharFilterFactory charFilterFactory) {
+ this.charFilterFactory = charFilterFactory;
+ }
+
+ @Override
+ public CharFilterFactory create(String name, Settings settings) {
+ Version indexVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
+ if (!Version.CURRENT.equals(indexVersion)) {
+ PreBuiltCharFilters preBuiltCharFilters = PreBuiltCharFilters.getOrDefault(name, null);
+ if (preBuiltCharFilters != null) {
+ return preBuiltCharFilters.getCharFilterFactory(indexVersion);
+ }
+ }
+
+ return charFilterFactory;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactory.java b/src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactory.java
new file mode 100644
index 0000000..9c89268
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactory.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.PreBuiltTokenFilters;
+
+public class PreBuiltTokenFilterFactoryFactory implements TokenFilterFactoryFactory {
+
+ private final TokenFilterFactory tokenFilterFactory;
+
+ public PreBuiltTokenFilterFactoryFactory(TokenFilterFactory tokenFilterFactory) {
+ this.tokenFilterFactory = tokenFilterFactory;
+ }
+
+ @Override
+ public TokenFilterFactory create(String name, Settings settings) {
+ Version indexVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
+ if (!Version.CURRENT.equals(indexVersion)) {
+ PreBuiltTokenFilters preBuiltTokenFilters = PreBuiltTokenFilters.getOrDefault(name, null);
+ if (preBuiltTokenFilters != null) {
+ return preBuiltTokenFilters.getTokenFilterFactory(indexVersion);
+ }
+ }
+ return tokenFilterFactory;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactory.java b/src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactory.java
new file mode 100644
index 0000000..4ee66b1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactory.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.PreBuiltTokenizers;
+
+public class PreBuiltTokenizerFactoryFactory implements TokenizerFactoryFactory {
+
+ private final TokenizerFactory tokenizerFactory;
+
+ public PreBuiltTokenizerFactoryFactory(TokenizerFactory tokenizerFactory) {
+ this.tokenizerFactory = tokenizerFactory;
+ }
+
+ @Override
+ public TokenizerFactory create(String name, Settings settings) {
+ Version indexVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
+ if (!Version.CURRENT.equals(indexVersion)) {
+ PreBuiltTokenizers preBuiltTokenizers = PreBuiltTokenizers.getOrDefault(name, null);
+ if (preBuiltTokenizers != null) {
+ return preBuiltTokenizers.getTokenizerFactory(indexVersion);
+ }
+ }
+
+ return tokenizerFactory;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java
new file mode 100644
index 0000000..5f7834a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ReverseTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public ReverseTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new ReverseStringFilter(version, tokenStream);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java
new file mode 100644
index 0000000..451ac69
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.ro.RomanianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class RomanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<RomanianAnalyzer> {
+
+ private final RomanianAnalyzer analyzer;
+
+ @Inject
+ public RomanianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new RomanianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, RomanianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public RomanianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java
new file mode 100644
index 0000000..e76e73b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.ru.RussianAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class RussianAnalyzerProvider extends AbstractIndexAnalyzerProvider<RussianAnalyzer> {
+
+ private final RussianAnalyzer analyzer;
+
+ @Inject
+ public RussianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new RussianAnalyzer(version,
+ Analysis.parseStopWords(env, settings, RussianAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public RussianAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/RussianStemTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/RussianStemTokenFilterFactory.java
new file mode 100644
index 0000000..d2dec9c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/RussianStemTokenFilterFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.snowball.SnowballFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class RussianStemTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public RussianStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new SnowballFilter(tokenStream, "Russian");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java
new file mode 100644
index 0000000..7b81228
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.shingle.ShingleFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final Factory factory;
+
+ @Inject
+ public ShingleTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ Integer maxShingleSize = settings.getAsInt("max_shingle_size", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
+ Integer minShingleSize = settings.getAsInt("min_shingle_size", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);
+ Boolean outputUnigrams = settings.getAsBoolean("output_unigrams", true);
+ Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false);
+ String tokenSeparator = settings.get("token_separator", ShingleFilter.TOKEN_SEPARATOR);
+ factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator);
+ }
+
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return factory.create(tokenStream);
+ }
+
+
+ public Factory getInnerFactory() {
+ return this.factory;
+ }
+
+ public static final class Factory implements TokenFilterFactory {
+ private final int maxShingleSize;
+
+ private final boolean outputUnigrams;
+
+ private final boolean outputUnigramsIfNoShingles;
+
+ private final String tokenSeparator;
+
+ private int minShingleSize;
+
+ private final String name;
+
+ public Factory(String name) {
+ this(name, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, true, false, ShingleFilter.TOKEN_SEPARATOR);
+ }
+
+ Factory(String name, int minShingleSize, int maxShingleSize, boolean outputUnigrams, boolean outputUnigramsIfNoShingles, String tokenSeparator) {
+ this.maxShingleSize = maxShingleSize;
+ this.outputUnigrams = outputUnigrams;
+ this.outputUnigramsIfNoShingles = outputUnigramsIfNoShingles;
+ this.tokenSeparator = tokenSeparator;
+ this.minShingleSize = minShingleSize;
+ this.name = name;
+ }
+
+ public TokenStream create(TokenStream tokenStream) {
+ ShingleFilter filter = new ShingleFilter(tokenStream, minShingleSize, maxShingleSize);
+ filter.setOutputUnigrams(outputUnigrams);
+ filter.setOutputUnigramsIfNoShingles(outputUnigramsIfNoShingles);
+ filter.setTokenSeparator(tokenSeparator);
+ return filter;
+ }
+
+ public int getMaxShingleSize() {
+ return maxShingleSize;
+ }
+
+ public int getMinShingleSize() {
+ return minShingleSize;
+ }
+
+ public boolean getOutputUnigrams() {
+ return outputUnigrams;
+ }
+
+ public boolean getOutputUnigramsIfNoShingles() {
+ return outputUnigramsIfNoShingles;
+ }
+
+ @Override
+ public String name() {
+ return name;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java
new file mode 100644
index 0000000..03f4df9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.core.SimpleAnalyzer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class SimpleAnalyzerProvider extends AbstractIndexAnalyzerProvider<SimpleAnalyzer> {
+
+ private final SimpleAnalyzer simpleAnalyzer;
+
+ @Inject
+ public SimpleAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.simpleAnalyzer = new SimpleAnalyzer(version);
+ }
+
+ @Override
+ public SimpleAnalyzer get() {
+ return this.simpleAnalyzer;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java
new file mode 100644
index 0000000..1c1ade4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/SnowballAnalyzerProvider.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.de.GermanAnalyzer;
+import org.apache.lucene.analysis.fr.FrenchAnalyzer;
+import org.apache.lucene.analysis.nl.DutchAnalyzer;
+import org.apache.lucene.analysis.snowball.SnowballAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Set;
+
+/**
+ * Creates a SnowballAnalyzer initialized with stopwords and Snowball filter. Only
+ * supports Dutch, English (default), French, German and German2 where stopwords
+ * are readily available. For other languages available with the Lucene Snowball
+ * Stemmer, use them directly with the SnowballFilter and a CustomAnalyzer.
+ * Configuration of language is done with the "language" attribute or the analyzer.
+ * Also supports additional stopwords via "stopwords" attribute
+ * <p/>
+ * The SnowballAnalyzer comes with a StandardFilter, LowerCaseFilter, StopFilter
+ * and the SnowballFilter.
+ *
+ *
+ */
+public class SnowballAnalyzerProvider extends AbstractIndexAnalyzerProvider<SnowballAnalyzer> {
+
+ private static final ImmutableMap<String, CharArraySet> defaultLanguageStopwords = MapBuilder.<String, CharArraySet>newMapBuilder()
+ .put("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET)
+ .put("Dutch", DutchAnalyzer.getDefaultStopSet())
+ .put("German", GermanAnalyzer.getDefaultStopSet())
+ .put("German2", GermanAnalyzer.getDefaultStopSet())
+ .put("French", FrenchAnalyzer.getDefaultStopSet())
+ .immutableMap();
+
+ private final SnowballAnalyzer analyzer;
+
+ @Inject
+ public SnowballAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+
+ String language = settings.get("language", settings.get("name", "English"));
+ CharArraySet defaultStopwords = defaultLanguageStopwords.containsKey(language) ? defaultLanguageStopwords.get(language) : CharArraySet.EMPTY_SET;
+ CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords, version);
+
+ analyzer = new SnowballAnalyzer(version, language, stopWords);
+ }
+
+ @Override
+ public SnowballAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/SnowballTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/SnowballTokenFilterFactory.java
new file mode 100644
index 0000000..4656933
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/SnowballTokenFilterFactory.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.snowball.SnowballFilter;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ * Real work actually done here by Sebastian on the Elasticsearch mailing list
+ * http://elasticsearch-users.115913.n3.nabble.com/Using-the-Snowball-stemmers-tp2126106p2127111.html
+ */
+public class SnowballTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private String language;
+
+ @Inject
+ public SnowballTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.language = Strings.capitalize(settings.get("language", settings.get("name", "English")));
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new SnowballFilter(tokenStream, language);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java
new file mode 100644
index 0000000..4eb640a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.es.SpanishAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class SpanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<SpanishAnalyzer> {
+
+ private final SpanishAnalyzer analyzer;
+
+ @Inject
+ public SpanishAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new SpanishAnalyzer(version,
+ Analysis.parseStopWords(env, settings, SpanishAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public SpanishAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java
new file mode 100644
index 0000000..b07aa48
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class StandardAnalyzerProvider extends AbstractIndexAnalyzerProvider<StandardAnalyzer> {
+
+ private final StandardAnalyzer standardAnalyzer;
+ private final Version esVersion;
+
+ @Inject
+ public StandardAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.esVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT);
+ final CharArraySet defaultStopwords;
+ if (esVersion.onOrAfter(Version.V_1_0_0_Beta1)) {
+ defaultStopwords = CharArraySet.EMPTY_SET;
+ } else {
+ defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+ }
+
+ CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords, version);
+ int maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
+ standardAnalyzer = new StandardAnalyzer(version, stopWords);
+ standardAnalyzer.setMaxTokenLength(maxTokenLength);
+ }
+
+ @Override
+ public StandardAnalyzer get() {
+ return this.standardAnalyzer;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java b/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java
new file mode 100644
index 0000000..05f50cb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.standard.StandardFilter;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
+import org.apache.lucene.util.Version;
+
+import java.io.IOException;
+import java.io.Reader;
+
+public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
+
+ /**
+ * @deprecated use {@link StandardHtmlStripAnalyzer#StandardHtmlStripAnalyzer(org.apache.lucene.util.Version,
+ * org.apache.lucene.analysis.util.CharArraySet)} instead
+ */
+ @Deprecated
+ public StandardHtmlStripAnalyzer(Version version) {
+ super(version, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ }
+
+ public StandardHtmlStripAnalyzer(Version version, CharArraySet stopwords) {
+ super(version, stopwords);
+ }
+
+ @Override
+ protected TokenStreamComponents createComponents(final String fieldName, final Reader reader) {
+ final StandardTokenizer src = new StandardTokenizer(matchVersion, reader);
+ src.setMaxTokenLength(StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
+ TokenStream tok = new StandardFilter(matchVersion, src);
+ tok = new LowerCaseFilter(matchVersion, tok);
+ if (!stopwords.isEmpty()) {
+ tok = new StopFilter(matchVersion, tok, stopwords);
+ }
+ return new TokenStreamComponents(src, tok) {
+ @Override
+ protected void setReader(final Reader reader) throws IOException {
+ src.setMaxTokenLength(StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
+ super.setReader(reader);
+ }
+ };
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java
new file mode 100644
index 0000000..ebea240
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class StandardHtmlStripAnalyzerProvider extends AbstractIndexAnalyzerProvider<StandardHtmlStripAnalyzer> {
+
+ private final StandardHtmlStripAnalyzer analyzer;
+ private final Version esVersion;
+
+ @Inject
+ public StandardHtmlStripAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.esVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT);
+ final CharArraySet defaultStopwords;
+ if (esVersion.onOrAfter(Version.V_1_0_0_RC1)) {
+ defaultStopwords = CharArraySet.EMPTY_SET;
+ } else {
+ defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+ }
+ CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords, version);
+ analyzer = new StandardHtmlStripAnalyzer(version, stopWords);
+ }
+
+ @Override
+ public StandardHtmlStripAnalyzer get() {
+ return this.analyzer;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java
new file mode 100644
index 0000000..a9948be
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.standard.StandardFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+
+/**
+ *
+ */
+public class StandardTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public StandardTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new StandardFilter(version, tokenStream);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java
new file mode 100644
index 0000000..7a71c72
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+
+/**
+ */
+public class StandardTokenizerFactory extends AbstractTokenizerFactory {
+
+ private final int maxTokenLength;
+
+ @Inject
+ public StandardTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ StandardTokenizer tokenizer = new StandardTokenizer(version, reader);
+ tokenizer.setMaxTokenLength(maxTokenLength);
+ return tokenizer;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/StemmerOverrideTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/StemmerOverrideTokenFilterFactory.java
new file mode 100644
index 0000000..655752f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/StemmerOverrideTokenFilterFactory.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter;
+import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter.StemmerOverrideMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+import java.util.List;
+
+@AnalysisSettingsRequired
+public class StemmerOverrideTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final StemmerOverrideMap overrideMap;
+
+ @Inject
+ public StemmerOverrideTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) throws IOException {
+ super(index, indexSettings, name, settings);
+
+ List<String> rules = Analysis.getWordList(env, settings, "rules");
+ if (rules == null) {
+ throw new ElasticsearchIllegalArgumentException("stemmer override filter requires either `rules` or `rules_path` to be configured");
+ }
+
+ StemmerOverrideFilter.Builder builder = new StemmerOverrideFilter.Builder(false);
+ parseRules(rules, builder, "=>");
+ overrideMap = builder.build();
+
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new StemmerOverrideFilter(tokenStream, overrideMap);
+ }
+
+ static void parseRules(List<String> rules, StemmerOverrideFilter.Builder builder, String mappingSep) {
+ for (String rule : rules) {
+ String key, override;
+ List<String> mapping = Strings.splitSmart(rule, mappingSep, false);
+ if (mapping.size() == 2) {
+ key = mapping.get(0).trim();
+ override = mapping.get(1).trim();
+ } else {
+ throw new RuntimeException("Invalid Keyword override Rule:" + rule);
+ }
+
+ if (key.isEmpty() || override.isEmpty()) {
+ throw new RuntimeException("Invalid Keyword override Rule:" + rule);
+ } else {
+ builder.add(key, override);
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java
new file mode 100644
index 0000000..9045ed2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.ar.ArabicStemFilter;
+import org.apache.lucene.analysis.bg.BulgarianStemFilter;
+import org.apache.lucene.analysis.br.BrazilianStemFilter;
+import org.apache.lucene.analysis.cz.CzechStemFilter;
+import org.apache.lucene.analysis.de.GermanLightStemFilter;
+import org.apache.lucene.analysis.de.GermanMinimalStemFilter;
+import org.apache.lucene.analysis.el.GreekStemFilter;
+import org.apache.lucene.analysis.en.EnglishMinimalStemFilter;
+import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
+import org.apache.lucene.analysis.en.KStemFilter;
+import org.apache.lucene.analysis.en.PorterStemFilter;
+import org.apache.lucene.analysis.es.SpanishLightStemFilter;
+import org.apache.lucene.analysis.fi.FinnishLightStemFilter;
+import org.apache.lucene.analysis.fr.FrenchLightStemFilter;
+import org.apache.lucene.analysis.fr.FrenchMinimalStemFilter;
+import org.apache.lucene.analysis.hi.HindiStemFilter;
+import org.apache.lucene.analysis.hu.HungarianLightStemFilter;
+import org.apache.lucene.analysis.id.IndonesianStemFilter;
+import org.apache.lucene.analysis.it.ItalianLightStemFilter;
+import org.apache.lucene.analysis.lv.LatvianStemFilter;
+import org.apache.lucene.analysis.no.NorwegianMinimalStemFilter;
+import org.apache.lucene.analysis.pt.PortugueseLightStemFilter;
+import org.apache.lucene.analysis.pt.PortugueseMinimalStemFilter;
+import org.apache.lucene.analysis.pt.PortugueseStemFilter;
+import org.apache.lucene.analysis.ru.RussianLightStemFilter;
+import org.apache.lucene.analysis.snowball.SnowballFilter;
+import org.apache.lucene.analysis.sv.SwedishLightStemFilter;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.tartarus.snowball.ext.*;
+
+/**
+ */
+public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private String language;
+
+ @Inject
+ public StemmerTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.language = Strings.capitalize(settings.get("language", settings.get("name", "porter")));
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ if ("arabic".equalsIgnoreCase(language)) {
+ return new ArabicStemFilter(tokenStream);
+ } else if ("armenian".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new ArmenianStemmer());
+ } else if ("basque".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new BasqueStemmer());
+ } else if ("brazilian".equalsIgnoreCase(language)) {
+ return new BrazilianStemFilter(tokenStream);
+ } else if ("bulgarian".equalsIgnoreCase(language)) {
+ return new BulgarianStemFilter(tokenStream);
+ } else if ("catalan".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new CatalanStemmer());
+ } else if ("czech".equalsIgnoreCase(language)) {
+ return new CzechStemFilter(tokenStream);
+ } else if ("danish".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new DanishStemmer());
+ } else if ("dutch".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new DutchStemmer());
+ } else if ("english".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new EnglishStemmer());
+ } else if ("finnish".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new FinnishStemmer());
+ } else if ("french".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new FrenchStemmer());
+ } else if ("german".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new GermanStemmer());
+ } else if ("german2".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new German2Stemmer());
+ } else if ("hungarian".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new HungarianStemmer());
+ } else if ("italian".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new ItalianStemmer());
+ } else if ("kp".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new KpStemmer());
+ } else if ("kstem".equalsIgnoreCase(language)) {
+ return new KStemFilter(tokenStream);
+ } else if ("lovins".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new LovinsStemmer());
+ } else if ("latvian".equalsIgnoreCase(language)) {
+ return new LatvianStemFilter(tokenStream);
+ } else if ("norwegian".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new NorwegianStemmer());
+ } else if ("minimal_norwegian".equalsIgnoreCase(language) || "minimalNorwegian".equals(language)) {
+ return new NorwegianMinimalStemFilter(tokenStream);
+ } else if ("porter".equalsIgnoreCase(language)) {
+ return new PorterStemFilter(tokenStream);
+ } else if ("porter2".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new PorterStemmer());
+ } else if ("portuguese".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new PortugueseStemmer());
+ } else if ("romanian".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new RomanianStemmer());
+ } else if ("russian".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new RussianStemmer());
+ } else if ("spanish".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new SpanishStemmer());
+ } else if ("swedish".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new SwedishStemmer());
+ } else if ("turkish".equalsIgnoreCase(language)) {
+ return new SnowballFilter(tokenStream, new TurkishStemmer());
+ } else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) {
+ return new EnglishMinimalStemFilter(tokenStream);
+ } else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) {
+ return new EnglishPossessiveFilter(version, tokenStream);
+ } else if ("light_finish".equalsIgnoreCase(language) || "lightFinish".equalsIgnoreCase(language)) {
+ // leaving this for backward compatibility
+ return new FinnishLightStemFilter(tokenStream);
+ } else if ("light_finnish".equalsIgnoreCase(language) || "lightFinnish".equalsIgnoreCase(language)) {
+ return new FinnishLightStemFilter(tokenStream);
+ } else if ("light_french".equalsIgnoreCase(language) || "lightFrench".equalsIgnoreCase(language)) {
+ return new FrenchLightStemFilter(tokenStream);
+ } else if ("minimal_french".equalsIgnoreCase(language) || "minimalFrench".equalsIgnoreCase(language)) {
+ return new FrenchMinimalStemFilter(tokenStream);
+ } else if ("light_german".equalsIgnoreCase(language) || "lightGerman".equalsIgnoreCase(language)) {
+ return new GermanLightStemFilter(tokenStream);
+ } else if ("minimal_german".equalsIgnoreCase(language) || "minimalGerman".equalsIgnoreCase(language)) {
+ return new GermanMinimalStemFilter(tokenStream);
+ } else if ("hindi".equalsIgnoreCase(language)) {
+ return new HindiStemFilter(tokenStream);
+ } else if ("light_hungarian".equalsIgnoreCase(language) || "lightHungarian".equalsIgnoreCase(language)) {
+ return new HungarianLightStemFilter(tokenStream);
+ } else if ("indonesian".equalsIgnoreCase(language)) {
+ return new IndonesianStemFilter(tokenStream);
+ } else if ("light_italian".equalsIgnoreCase(language) || "lightItalian".equalsIgnoreCase(language)) {
+ return new ItalianLightStemFilter(tokenStream);
+ } else if ("light_portuguese".equalsIgnoreCase(language) || "lightPortuguese".equalsIgnoreCase(language)) {
+ return new PortugueseLightStemFilter(tokenStream);
+ } else if ("minimal_portuguese".equalsIgnoreCase(language) || "minimalPortuguese".equalsIgnoreCase(language)) {
+ return new PortugueseMinimalStemFilter(tokenStream);
+ } else if ("portuguese".equalsIgnoreCase(language)) {
+ return new PortugueseStemFilter(tokenStream);
+ } else if ("light_russian".equalsIgnoreCase(language) || "lightRussian".equalsIgnoreCase(language)) {
+ return new RussianLightStemFilter(tokenStream);
+ } else if ("light_spanish".equalsIgnoreCase(language) || "lightSpanish".equalsIgnoreCase(language)) {
+ return new SpanishLightStemFilter(tokenStream);
+ } else if ("light_swedish".equalsIgnoreCase(language) || "lightSwedish".equalsIgnoreCase(language)) {
+ return new SwedishLightStemFilter(tokenStream);
+ } else if ("greek".equalsIgnoreCase(language)) {
+ return new GreekStemFilter(tokenStream);
+ }
+ return new SnowballFilter(tokenStream, language);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java
new file mode 100644
index 0000000..0a8fedd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class StopAnalyzerProvider extends AbstractIndexAnalyzerProvider<StopAnalyzer> {
+
+ private final StopAnalyzer stopAnalyzer;
+
+ @Inject
+ public StopAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ CharArraySet stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, version);
+ this.stopAnalyzer = new StopAnalyzer(version, stopWords);
+ }
+
+ @Override
+ public StopAnalyzer get() {
+ return this.stopAnalyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java
new file mode 100644
index 0000000..6f95bd1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Set;
+
+/**
+ *
+ */
+@SuppressWarnings("deprecation")
+public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final CharArraySet stopWords;
+
+ private final boolean ignoreCase;
+
+ private final boolean enablePositionIncrements;
+ private final boolean removeTrailing;
+
+ @Inject
+ public StopTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.ignoreCase = settings.getAsBoolean("ignore_case", false);
+ this.removeTrailing = settings.getAsBoolean("remove_trailing", true);
+ this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, version, ignoreCase);
+ this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", true);
+ if (!enablePositionIncrements && version.onOrAfter(Version.LUCENE_44)) {
+ throw new ElasticsearchIllegalArgumentException("[enable_position_increments: false] is not supported anymore as of Lucene 4.4 as it can create broken token streams."
+ + " Please fix your analysis chain or use an older compatibility version (<=4.3) but beware that it might cause unexpected behavior.");
+ }
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ if (removeTrailing) {
+ StopFilter filter = new StopFilter(version, tokenStream, stopWords);
+ filter.setEnablePositionIncrements(enablePositionIncrements);
+ return filter;
+ } else {
+ return new SuggestStopFilter(tokenStream, stopWords);
+ }
+ }
+
+ public Set<?> stopWords() {
+ return stopWords;
+ }
+
+ public boolean ignoreCase() {
+ return ignoreCase;
+ }
+
+ public boolean enablePositionIncrements() {
+ return this.enablePositionIncrements;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java
new file mode 100644
index 0000000..9033a84
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.sv.SwedishAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class SwedishAnalyzerProvider extends AbstractIndexAnalyzerProvider<SwedishAnalyzer> {
+
+ private final SwedishAnalyzer analyzer;
+
+ @Inject
+ public SwedishAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new SwedishAnalyzer(version,
+ Analysis.parseStopWords(env, settings, SwedishAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public SwedishAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java
new file mode 100644
index 0000000..1961770
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.synonym.SolrSynonymParser;
+import org.apache.lucene.analysis.synonym.SynonymFilter;
+import org.apache.lucene.analysis.synonym.SynonymMap;
+import org.apache.lucene.analysis.synonym.WordnetSynonymParser;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.io.FastStringReader;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+
+import java.io.Reader;
+import java.util.List;
+import java.util.Map;
+
+@AnalysisSettingsRequired
+public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final SynonymMap synonymMap;
+ private final boolean ignoreCase;
+
+ @Inject
+ public SynonymTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, IndicesAnalysisService indicesAnalysisService, Map<String, TokenizerFactoryFactory> tokenizerFactories,
+ @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+
+ Reader rulesReader = null;
+ if (settings.getAsArray("synonyms", null) != null) {
+ List<String> rules = Analysis.getWordList(env, settings, "synonyms");
+ StringBuilder sb = new StringBuilder();
+ for (String line : rules) {
+ sb.append(line).append(System.getProperty("line.separator"));
+ }
+ rulesReader = new FastStringReader(sb.toString());
+ } else if (settings.get("synonyms_path") != null) {
+ rulesReader = Analysis.getReaderFromFile(env, settings, "synonyms_path");
+ } else {
+ throw new ElasticsearchIllegalArgumentException("synonym requires either `synonyms` or `synonyms_path` to be configured");
+ }
+
+ this.ignoreCase = settings.getAsBoolean("ignore_case", false);
+ boolean expand = settings.getAsBoolean("expand", true);
+
+ String tokenizerName = settings.get("tokenizer", "whitespace");
+
+ TokenizerFactoryFactory tokenizerFactoryFactory = tokenizerFactories.get(tokenizerName);
+ if (tokenizerFactoryFactory == null) {
+ tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(tokenizerName);
+ }
+ if (tokenizerFactoryFactory == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find tokenizer [" + tokenizerName + "] for synonym token filter");
+ }
+ final TokenizerFactory tokenizerFactory = tokenizerFactoryFactory.create(tokenizerName, settings);
+
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer tokenizer = tokenizerFactory == null ? new WhitespaceTokenizer(Lucene.ANALYZER_VERSION, reader) : tokenizerFactory.create(reader);
+ TokenStream stream = ignoreCase ? new LowerCaseFilter(Lucene.ANALYZER_VERSION, tokenizer) : tokenizer;
+ return new TokenStreamComponents(tokenizer, stream);
+ }
+ };
+
+ try {
+ SynonymMap.Builder parser = null;
+
+ if ("wordnet".equalsIgnoreCase(settings.get("format"))) {
+ parser = new WordnetSynonymParser(true, expand, analyzer);
+ ((WordnetSynonymParser) parser).parse(rulesReader);
+ } else {
+ parser = new SolrSynonymParser(true, expand, analyzer);
+ ((SolrSynonymParser) parser).parse(rulesReader);
+ }
+
+ synonymMap = parser.build();
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("failed to build synonyms", e);
+ }
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ // fst is null means no synonyms
+ return synonymMap.fst == null ? tokenStream : new SynonymFilter(tokenStream, synonymMap, ignoreCase);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java
new file mode 100644
index 0000000..601d880
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.th.ThaiAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class ThaiAnalyzerProvider extends AbstractIndexAnalyzerProvider<ThaiAnalyzer> {
+
+ private final ThaiAnalyzer analyzer;
+
+ @Inject
+ public ThaiAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new ThaiAnalyzer(version, Analysis.parseStopWords(env, settings, ThaiAnalyzer.getDefaultStopSet(), version));
+ }
+
+ @Override
+ public ThaiAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java
new file mode 100644
index 0000000..3d48666
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+
+/**
+ *
+ */
+public interface TokenFilterFactory {
+
+ String name();
+
+ TokenStream create(TokenStream tokenStream);
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactoryFactory.java b/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactoryFactory.java
new file mode 100644
index 0000000..d374187
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactoryFactory.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public interface TokenFilterFactoryFactory {
+
+ TokenFilterFactory create(String name, Settings settings);
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java
new file mode 100644
index 0000000..a5a1faf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public interface TokenizerFactory {
+
+ String name();
+
+ Tokenizer create(Reader reader);
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/TokenizerFactoryFactory.java b/src/main/java/org/elasticsearch/index/analysis/TokenizerFactoryFactory.java
new file mode 100644
index 0000000..e1d135d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/TokenizerFactoryFactory.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public interface TokenizerFactoryFactory {
+
+ TokenizerFactory create(String name, Settings settings);
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java
new file mode 100644
index 0000000..c8db9ef
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.util.Version;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.TrimFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class TrimTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final boolean updateOffsets;
+ private static final String UPDATE_OFFSETS_KEY = "update_offsets";
+
+ @Inject
+ public TrimTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ if (version.onOrAfter(Version.LUCENE_44) && settings.get(UPDATE_OFFSETS_KEY) != null) {
+ throw new ElasticsearchIllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain or use"
+ + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
+ }
+ this.updateOffsets = settings.getAsBoolean("update_offsets", false);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ if (version.onOrAfter(Version.LUCENE_44)) {
+ return new TrimFilter(version, tokenStream);
+ }
+ return new TrimFilter(version, tokenStream, updateOffsets);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java
new file mode 100644
index 0000000..69a37bc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+@AnalysisSettingsRequired
+public class TruncateTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final int length;
+
+ @Inject
+ public TruncateTokenFilterFactory(Index index, @IndexSettings Settings indexSettings,
+ @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.length = settings.getAsInt("length", -1);
+ if (length <= 0) {
+ throw new ElasticsearchIllegalArgumentException("length parameter must be provided");
+ }
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new TruncateTokenFilter(tokenStream, length);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java
new file mode 100644
index 0000000..d36c108
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.tr.TurkishAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class TurkishAnalyzerProvider extends AbstractIndexAnalyzerProvider<TurkishAnalyzer> {
+
+ private final TurkishAnalyzer analyzer;
+
+ @Inject
+ public TurkishAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ analyzer = new TurkishAnalyzer(version,
+ Analysis.parseStopWords(env, settings, TurkishAnalyzer.getDefaultStopSet(), version),
+ Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
+ }
+
+ @Override
+ public TurkishAnalyzer get() {
+ return this.analyzer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java
new file mode 100644
index 0000000..9ae0e94
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory {
+
+ private final int maxTokenLength;
+
+ @Inject
+ public UAX29URLEmailTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(version, reader);
+ tokenizer.setMaxTokenLength(maxTokenLength);
+ return tokenizer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java
new file mode 100644
index 0000000..fd1b13a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.UniqueTokenFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class UniqueTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final boolean onlyOnSamePosition;
+
+ @Inject
+ public UniqueTokenFilterFactory(Index index, @IndexSettings Settings indexSettings,
+ @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.onlyOnSamePosition = settings.getAsBoolean("only_on_same_position", false);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new UniqueTokenFilter(tokenStream, onlyOnSamePosition);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java
new file mode 100644
index 0000000..ed612c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class WhitespaceAnalyzerProvider extends AbstractIndexAnalyzerProvider<WhitespaceAnalyzer> {
+
+ private final WhitespaceAnalyzer analyzer;
+
+ @Inject
+ public WhitespaceAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ this.analyzer = new WhitespaceAnalyzer(version);
+ }
+
+ @Override
+ public WhitespaceAnalyzer get() {
+ return this.analyzer;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java
new file mode 100644
index 0000000..3bc008d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.Reader;
+
+/**
+ *
+ */
+public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory {
+
+ @Inject
+ public WhitespaceTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ return new WhitespaceTokenizer(version, reader);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java
new file mode 100644
index 0000000..cfba439
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
+import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.*;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.*;
+
+public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ private final byte[] charTypeTable;
+ private final int flags;
+ private final CharArraySet protoWords;
+
+ @Inject
+ public WordDelimiterTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+
+ // Sample Format for the type table:
+ // $ => DIGIT
+ // % => DIGIT
+ // . => DIGIT
+ // \u002C => DIGIT
+ // \u200D => ALPHANUM
+ List<String> charTypeTableValues = Analysis.getWordList(env, settings, "type_table");
+ if (charTypeTableValues == null) {
+ this.charTypeTable = WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE;
+ } else {
+ this.charTypeTable = parseTypes(charTypeTableValues);
+ }
+ int flags = 0;
+ // If set, causes parts of words to be generated: "PowerShot" => "Power" "Shot"
+ flags |= getFlag(GENERATE_WORD_PARTS, settings, "generate_word_parts", true);
+ // If set, causes number subwords to be generated: "500-42" => "500" "42"
+ flags |= getFlag(GENERATE_NUMBER_PARTS, settings, "generate_number_parts", true);
+ // 1, causes maximum runs of word parts to be catenated: "wi-fi" => "wifi"
+ flags |= getFlag(CATENATE_WORDS, settings, "catenate_words", false);
+ // If set, causes maximum runs of number parts to be catenated: "500-42" => "50042"
+ flags |= getFlag(CATENATE_NUMBERS, settings, "catenate_numbers", false);
+ // If set, causes all subword parts to be catenated: "wi-fi-4000" => "wifi4000"
+ flags |= getFlag(CATENATE_ALL, settings, "catenate_all", false);
+ // 1, causes "PowerShot" to be two tokens; ("Power-Shot" remains two parts regards)
+ flags |= getFlag(SPLIT_ON_CASE_CHANGE, settings, "split_on_case_change", true);
+ // If set, includes original words in subwords: "500-42" => "500" "42" "500-42"
+ flags |= getFlag(PRESERVE_ORIGINAL, settings, "preserve_original", false);
+ // 1, causes "j2se" to be three tokens; "j" "2" "se"
+ flags |= getFlag(SPLIT_ON_NUMERICS, settings, "split_on_numerics", true);
+ // If set, causes trailing "'s" to be removed for each subword: "O'Neil's" => "O", "Neil"
+ flags |= getFlag(STEM_ENGLISH_POSSESSIVE, settings, "stem_english_possessive", true);
+ // If not null is the set of tokens to protect from being delimited
+ Set<?> protectedWords = Analysis.getWordSet(env, settings, "protected_words", version);
+ this.protoWords = protectedWords == null ? null : CharArraySet.copy(Lucene.VERSION, protectedWords);
+ this.flags = flags;
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new WordDelimiterFilter(tokenStream,
+ charTypeTable,
+ flags,
+ protoWords);
+ }
+
+ public int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
+ if (settings.getAsBoolean(key, defaultValue)) {
+ return flag;
+ }
+ return 0;
+ }
+
+ // source => type
+ private static Pattern typePattern = Pattern.compile("(.*)\\s*=>\\s*(.*)\\s*$");
+
+ /**
+ * parses a list of MappingCharFilter style rules into a custom byte[] type table
+ */
+ private byte[] parseTypes(Collection<String> rules) {
+ SortedMap<Character, Byte> typeMap = new TreeMap<Character, Byte>();
+ for (String rule : rules) {
+ Matcher m = typePattern.matcher(rule);
+ if (!m.find())
+ throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]");
+ String lhs = parseString(m.group(1).trim());
+ Byte rhs = parseType(m.group(2).trim());
+ if (lhs.length() != 1)
+ throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]. Only a single character is allowed.");
+ if (rhs == null)
+ throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]. Illegal type.");
+ typeMap.put(lhs.charAt(0), rhs);
+ }
+
+ // ensure the table is always at least as big as DEFAULT_WORD_DELIM_TABLE for performance
+ byte types[] = new byte[Math.max(typeMap.lastKey() + 1, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE.length)];
+ for (int i = 0; i < types.length; i++)
+ types[i] = WordDelimiterIterator.getType(i);
+ for (Map.Entry<Character, Byte> mapping : typeMap.entrySet())
+ types[mapping.getKey()] = mapping.getValue();
+ return types;
+ }
+
+ private Byte parseType(String s) {
+ if (s.equals("LOWER"))
+ return WordDelimiterFilter.LOWER;
+ else if (s.equals("UPPER"))
+ return WordDelimiterFilter.UPPER;
+ else if (s.equals("ALPHA"))
+ return WordDelimiterFilter.ALPHA;
+ else if (s.equals("DIGIT"))
+ return WordDelimiterFilter.DIGIT;
+ else if (s.equals("ALPHANUM"))
+ return WordDelimiterFilter.ALPHANUM;
+ else if (s.equals("SUBWORD_DELIM"))
+ return WordDelimiterFilter.SUBWORD_DELIM;
+ else
+ return null;
+ }
+
+ char[] out = new char[256];
+
+ private String parseString(String s) {
+ int readPos = 0;
+ int len = s.length();
+ int writePos = 0;
+ while (readPos < len) {
+ char c = s.charAt(readPos++);
+ if (c == '\\') {
+ if (readPos >= len)
+ throw new RuntimeException("Invalid escaped char in [" + s + "]");
+ c = s.charAt(readPos++);
+ switch (c) {
+ case '\\':
+ c = '\\';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ case 't':
+ c = '\t';
+ break;
+ case 'r':
+ c = '\r';
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'f':
+ c = '\f';
+ break;
+ case 'u':
+ if (readPos + 3 >= len)
+ throw new RuntimeException("Invalid escaped char in [" + s + "]");
+ c = (char) Integer.parseInt(s.substring(readPos, readPos + 4), 16);
+ readPos += 4;
+ break;
+ }
+ }
+ out[writePos++] = c;
+ }
+ return new String(out, 0, writePos);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java
new file mode 100644
index 0000000..8505d50
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis.compound;
+
+import org.apache.lucene.analysis.compound.CompoundWordTokenFilterBase;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
+import org.elasticsearch.index.analysis.Analysis;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ * Contains the common configuration settings between subclasses of this class.
+ */
+public abstract class AbstractCompoundWordTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ protected final int minWordSize;
+ protected final int minSubwordSize;
+ protected final int maxSubwordSize;
+ protected final boolean onlyLongestMatch;
+ protected final CharArraySet wordList;
+
+ @Inject
+ public AbstractCompoundWordTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, name, settings);
+
+ minWordSize = settings.getAsInt("min_word_size", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
+ minSubwordSize = settings.getAsInt("min_subword_size", CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE);
+ maxSubwordSize = settings.getAsInt("max_subword_size", CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE);
+ onlyLongestMatch = settings.getAsBoolean("only_longest_match", false);
+ wordList = Analysis.getWordSet(env, settings, "word_list", version);
+ if (wordList == null) {
+ throw new ElasticsearchIllegalArgumentException("word_list must be provided for [" + name + "], either as a path to a file, or directly");
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java
new file mode 100644
index 0000000..4ef9123
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis.compound;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.compound.DictionaryCompoundWordTokenFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AnalysisSettingsRequired;
+import org.elasticsearch.index.settings.IndexSettings;
+
+
+/**
+ * Uses the {@link org.apache.lucene.analysis.compound.DictionaryCompoundWordTokenFilter} to decompound tokens using a dictionary.
+ *
+ * @see org.apache.lucene.analysis.compound.DictionaryCompoundWordTokenFilter
+ */
+@AnalysisSettingsRequired
+public class DictionaryCompoundWordTokenFilterFactory extends AbstractCompoundWordTokenFilterFactory {
+
+ @Inject
+ public DictionaryCompoundWordTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, env, name, settings);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new DictionaryCompoundWordTokenFilter(version, tokenStream, wordList,
+ minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java
new file mode 100644
index 0000000..da7444e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis.compound;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter;
+import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AnalysisSettingsRequired;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.xml.sax.InputSource;
+
+import java.net.URL;
+
+/**
+ * Uses the {@link org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter} to decompound tokens based on hyphenation rules.
+ *
+ * @see org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter
+ */
+@AnalysisSettingsRequired
+public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundWordTokenFilterFactory {
+
+ private final HyphenationTree hyphenationTree;
+
+ @Inject
+ public HyphenationCompoundWordTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings, env, name, settings);
+
+ String hyphenationPatternsPath = settings.get("hyphenation_patterns_path", null);
+ if (hyphenationPatternsPath == null) {
+ throw new ElasticsearchIllegalArgumentException("hyphenation_patterns_path is a required setting.");
+ }
+
+ URL hyphenationPatternsFile = env.resolveConfig(hyphenationPatternsPath);
+
+ try {
+ hyphenationTree = HyphenationCompoundWordTokenFilter.getHyphenationTree(new InputSource(hyphenationPatternsFile.toExternalForm()));
+ } catch (Exception e) {
+ throw new ElasticsearchIllegalArgumentException("Exception while reading hyphenation_patterns_path: " + e.getMessage());
+ }
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new HyphenationCompoundWordTokenFilter(version, tokenStream,
+ hyphenationTree, wordList,
+ minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/cache/IndexCache.java b/src/main/java/org/elasticsearch/index/cache/IndexCache.java
new file mode 100644
index 0000000..eca9de4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/IndexCache.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache;
+
+import org.apache.lucene.index.IndexReader;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.CloseableComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.docset.DocSetCache;
+import org.elasticsearch.index.cache.filter.FilterCache;
+import org.elasticsearch.index.cache.id.IdCache;
+import org.elasticsearch.index.cache.query.parser.QueryParserCache;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class IndexCache extends AbstractIndexComponent implements CloseableComponent, ClusterStateListener {
+
+ private final FilterCache filterCache;
+ private final QueryParserCache queryParserCache;
+ private final IdCache idCache;
+ private final DocSetCache docSetCache;
+
+ private ClusterService clusterService;
+
+ @Inject
+ public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryParserCache queryParserCache, IdCache idCache,
+ DocSetCache docSetCache) {
+ super(index, indexSettings);
+ this.filterCache = filterCache;
+ this.queryParserCache = queryParserCache;
+ this.idCache = idCache;
+ this.docSetCache = docSetCache;
+ }
+
+ @Inject(optional = true)
+ public void setClusterService(@Nullable ClusterService clusterService) {
+ this.clusterService = clusterService;
+ if (clusterService != null) {
+ clusterService.add(this);
+ }
+ }
+
+ public FilterCache filter() {
+ return filterCache;
+ }
+
+ public DocSetCache docSet() {
+ return this.docSetCache;
+ }
+
+ public IdCache idCache() {
+ return this.idCache;
+ }
+
+ public QueryParserCache queryParserCache() {
+ return this.queryParserCache;
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ filterCache.close();
+ idCache.close();
+ queryParserCache.close();
+ docSetCache.clear("close");
+ if (clusterService != null) {
+ clusterService.remove(this);
+ }
+ }
+
+ public void clear(IndexReader reader) {
+ filterCache.clear(reader);
+ idCache.clear(reader);
+ docSetCache.clear(reader);
+ }
+
+ public void clear(String reason) {
+ filterCache.clear(reason);
+ idCache.clear();
+ queryParserCache.clear();
+ docSetCache.clear(reason);
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ // clear the query parser cache if the metadata (mappings) changed...
+ if (event.metaDataChanged()) {
+ queryParserCache.clear();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java b/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java
new file mode 100644
index 0000000..b9fe3a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.cache.docset.DocSetCacheModule;
+import org.elasticsearch.index.cache.filter.FilterCacheModule;
+import org.elasticsearch.index.cache.id.IdCacheModule;
+import org.elasticsearch.index.cache.query.parser.QueryParserCacheModule;
+
+/**
+ *
+ */
+public class IndexCacheModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public IndexCacheModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ new FilterCacheModule(settings).configure(binder());
+ new IdCacheModule(settings).configure(binder());
+ new QueryParserCacheModule(settings).configure(binder());
+ new DocSetCacheModule(settings).configure(binder());
+
+ bind(IndexCache.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/docset/DocSetCache.java b/src/main/java/org/elasticsearch/index/cache/docset/DocSetCache.java
new file mode 100644
index 0000000..08996c9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/docset/DocSetCache.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.docset;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
+import org.elasticsearch.index.IndexComponent;
+
+/**
+ */
+public interface DocSetCache extends IndexComponent {
+
+ void clear(String reason);
+
+ void clear(IndexReader reader);
+
+ ContextDocIdSet obtain(AtomicReaderContext context);
+
+ void release(ContextDocIdSet docSet);
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/docset/DocSetCacheModule.java b/src/main/java/org/elasticsearch/index/cache/docset/DocSetCacheModule.java
new file mode 100644
index 0000000..21fa0b3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/docset/DocSetCacheModule.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.docset;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.cache.docset.simple.SimpleDocSetCache;
+
+/**
+ *
+ */
+public class DocSetCacheModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public DocSetCacheModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(DocSetCache.class)
+ .to(settings.getAsClass("index.cache.docset.type", SimpleDocSetCache.class, "org.elasticsearch.index.cache.docset.", "DocSetCache"))
+ .in(Scopes.SINGLETON);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/docset/none/NoneDocSetCache.java b/src/main/java/org/elasticsearch/index/cache/docset/none/NoneDocSetCache.java
new file mode 100644
index 0000000..81eb335
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/docset/none/NoneDocSetCache.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.docset.none;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.docset.DocSetCache;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ */
+public class NoneDocSetCache extends AbstractIndexComponent implements DocSetCache {
+
+ @Inject
+ public NoneDocSetCache(Index index, @IndexSettings Settings indexSettings) {
+ super(index, indexSettings);
+ }
+
+ @Override
+ public void clear(String reason) {
+ }
+
+ @Override
+ public void clear(IndexReader reader) {
+ }
+
+ @Override
+ public ContextDocIdSet obtain(AtomicReaderContext context) {
+ return new ContextDocIdSet(context, new FixedBitSet(context.reader().maxDoc()));
+ }
+
+ @Override
+ public void release(ContextDocIdSet docSet) {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/docset/simple/SimpleDocSetCache.java b/src/main/java/org/elasticsearch/index/cache/docset/simple/SimpleDocSetCache.java
new file mode 100644
index 0000000..0eb895f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/docset/simple/SimpleDocSetCache.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.docset.simple;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.SegmentReader;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.docset.DocSetCache;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Queue;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ */
+public class SimpleDocSetCache extends AbstractIndexComponent implements DocSetCache, SegmentReader.CoreClosedListener {
+
+ private final ConcurrentMap<Object, Queue<FixedBitSet>> cache;
+
+ @Inject
+ public SimpleDocSetCache(Index index, @IndexSettings Settings indexSettings) {
+ super(index, indexSettings);
+ this.cache = ConcurrentCollections.newConcurrentMap();
+ }
+
+ @Override
+ public void onClose(Object coreCacheKey) {
+ cache.remove(coreCacheKey);
+ }
+
+ @Override
+ public void clear(String reason) {
+ cache.clear();
+ }
+
+ @Override
+ public void clear(IndexReader reader) {
+ cache.remove(reader.getCoreCacheKey());
+ }
+
+ @Override
+ public ContextDocIdSet obtain(AtomicReaderContext context) {
+ Queue<FixedBitSet> docIdSets = cache.get(context.reader().getCoreCacheKey());
+ if (docIdSets == null) {
+ if (context.reader() instanceof SegmentReader) {
+ ((SegmentReader) context.reader()).addCoreClosedListener(this);
+ }
+ cache.put(context.reader().getCoreCacheKey(), ConcurrentCollections.<FixedBitSet>newQueue());
+ return new ContextDocIdSet(context, new FixedBitSet(context.reader().maxDoc()));
+ }
+ FixedBitSet docIdSet = docIdSets.poll();
+ if (docIdSet == null) {
+ docIdSet = new FixedBitSet(context.reader().maxDoc());
+ } else {
+ docIdSet.clear(0, docIdSet.length());
+ }
+ return new ContextDocIdSet(context, docIdSet);
+ }
+
+ @Override
+ public void release(ContextDocIdSet docSet) {
+ Queue<FixedBitSet> docIdSets = cache.get(docSet.context.reader().getCoreCacheKey());
+ if (docIdSets != null) {
+ docIdSets.add((FixedBitSet) docSet.docSet);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java
new file mode 100644
index 0000000..d1c8ecc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.component.CloseableComponent;
+import org.elasticsearch.index.IndexComponent;
+import org.elasticsearch.index.service.IndexService;
+
+/**
+ *
+ */
+public interface FilterCache extends IndexComponent, CloseableComponent {
+
+ static class EntriesStats {
+ public final long sizeInBytes;
+ public final long count;
+
+ public EntriesStats(long sizeInBytes, long count) {
+ this.sizeInBytes = sizeInBytes;
+ this.count = count;
+ }
+ }
+
+ // we need to "inject" the index service to not create cyclic dep
+ void setIndexService(IndexService indexService);
+
+ String type();
+
+ Filter cache(Filter filterToCache);
+
+ void clear(Object reader);
+
+ void clear(String reason);
+
+ void clear(String reason, String[] keys);
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java
new file mode 100644
index 0000000..91a2c2f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache;
+
+/**
+ *
+ */
+public class FilterCacheModule extends AbstractModule {
+
+ public static final class FilterCacheSettings {
+ public static final String FILTER_CACHE_TYPE = "index.cache.filter.type";
+ }
+
+ private final Settings settings;
+
+ public FilterCacheModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(FilterCache.class)
+ .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache"))
+ .in(Scopes.SINGLETON);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java
new file mode 100644
index 0000000..e56a114
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ */
+public class FilterCacheStats implements Streamable, ToXContent {
+
+ long memorySize;
+ long evictions;
+
+ public FilterCacheStats() {
+ }
+
+ public FilterCacheStats(long memorySize, long evictions) {
+ this.memorySize = memorySize;
+ this.evictions = evictions;
+ }
+
+ public void add(FilterCacheStats stats) {
+ this.memorySize += stats.memorySize;
+ this.evictions += stats.evictions;
+ }
+
+ public long getMemorySizeInBytes() {
+ return this.memorySize;
+ }
+
+ public ByteSizeValue getMemorySize() {
+ return new ByteSizeValue(memorySize);
+ }
+
+ public long getEvictions() {
+ return this.evictions;
+ }
+
+ public static FilterCacheStats readFilterCacheStats(StreamInput in) throws IOException {
+ FilterCacheStats stats = new FilterCacheStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ memorySize = in.readVLong();
+ evictions = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(memorySize);
+ out.writeVLong(evictions);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject(Fields.FILTER_CACHE);
+ builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
+ builder.field(Fields.EVICTIONS, getEvictions());
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString FILTER_CACHE = new XContentBuilderString("filter_cache");
+ static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
+ static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
+ static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java
new file mode 100644
index 0000000..67ab084
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter;
+
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import org.apache.lucene.search.DocIdSet;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ */
+public class ShardFilterCache extends AbstractIndexShardComponent implements RemovalListener<WeightedFilterCache.FilterCacheKey, DocIdSet> {
+
+ final CounterMetric evictionsMetric = new CounterMetric();
+ final CounterMetric totalMetric = new CounterMetric();
+
+ @Inject
+ public ShardFilterCache(ShardId shardId, @IndexSettings Settings indexSettings) {
+ super(shardId, indexSettings);
+ }
+
+ public FilterCacheStats stats() {
+ return new FilterCacheStats(totalMetric.count(), evictionsMetric.count());
+ }
+
+ public void onCached(long sizeInBytes) {
+ totalMetric.inc(sizeInBytes);
+ }
+
+ @Override
+ public void onRemoval(RemovalNotification<WeightedFilterCache.FilterCacheKey, DocIdSet> removalNotification) {
+ if (removalNotification.wasEvicted()) {
+ evictionsMetric.inc();
+ }
+ if (removalNotification.getValue() != null) {
+ totalMetric.dec(DocIdSets.sizeInBytes(removalNotification.getValue()));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCacheModule.java b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCacheModule.java
new file mode 100644
index 0000000..749fd8e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCacheModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ */
+public class ShardFilterCacheModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(ShardFilterCache.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java
new file mode 100644
index 0000000..f66e7d3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter.none;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.filter.FilterCache;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class NoneFilterCache extends AbstractIndexComponent implements FilterCache {
+
+ @Inject
+ public NoneFilterCache(Index index, @IndexSettings Settings indexSettings) {
+ super(index, indexSettings);
+ logger.debug("Using no filter cache");
+ }
+
+ @Override
+ public void setIndexService(IndexService indexService) {
+ // nothing to do here...
+ }
+
+ @Override
+ public String type() {
+ return "none";
+ }
+
+ @Override
+ public void close() {
+ // nothing to do here
+ }
+
+ @Override
+ public Filter cache(Filter filterToCache) {
+ return filterToCache;
+ }
+
+ @Override
+ public void clear(String reason) {
+ // nothing to do here
+ }
+
+ @Override
+ public void clear(String reason, String[] keys) {
+ // nothing to do there
+ }
+
+ @Override
+ public void clear(Object reader) {
+ // nothing to do here
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/cache/filter/support/CacheKeyFilter.java b/src/main/java/org/elasticsearch/index/cache/filter/support/CacheKeyFilter.java
new file mode 100644
index 0000000..fcbd1b0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/filter/support/CacheKeyFilter.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter.support;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.Strings;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+public interface CacheKeyFilter {
+
+ public static class Key {
+
+ private final byte[] bytes;
+
+ // we pre-compute the hashCode for better performance (especially in IdCache)
+ private final int hashCode;
+
+ public Key(byte[] bytes) {
+ this.bytes = bytes;
+ this.hashCode = Arrays.hashCode(bytes);
+ }
+
+ public Key(String str) {
+ this(Strings.toUTF8Bytes(str));
+ }
+
+ public byte[] bytes() {
+ return this.bytes;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o.getClass() != this.getClass()) {
+ return false;
+ }
+ Key bytesWrap = (Key) o;
+ return Arrays.equals(bytes, bytesWrap.bytes);
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+ }
+
+ public static class Wrapper extends Filter implements CacheKeyFilter {
+
+ private final Filter filter;
+
+ private final Key key;
+
+ public Wrapper(Filter filter, Key key) {
+ this.filter = filter;
+ this.key = key;
+ }
+
+ @Override
+ public Key cacheKey() {
+ return key;
+ }
+
+ public Filter wrappedFilter() {
+ return filter;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ return filter.getDocIdSet(context, acceptDocs);
+ }
+
+ @Override
+ public int hashCode() {
+ return filter.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return filter.equals(obj);
+ }
+
+ @Override
+ public String toString() {
+ return filter.toString();
+ }
+ }
+
+ Object cacheKey();
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/cache/filter/support/FilterCacheValue.java b/src/main/java/org/elasticsearch/index/cache/filter/support/FilterCacheValue.java
new file mode 100644
index 0000000..4692119
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/filter/support/FilterCacheValue.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter.support;
+
+public class FilterCacheValue<T> {
+
+ private final T value;
+
+ public FilterCacheValue(T value) {
+ this.value = value;
+ }
+
+ public T value() {
+ return value;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java
new file mode 100644
index 0000000..6ce7734
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter.weighted;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.Weigher;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.SegmentReader;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.search.CachedFilter;
+import org.elasticsearch.common.lucene.search.NoCacheFilter;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.filter.FilterCache;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardUtils;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
+
+import java.io.IOException;
+import java.util.concurrent.ConcurrentMap;
+
+public class WeightedFilterCache extends AbstractIndexComponent implements FilterCache, SegmentReader.CoreClosedListener {
+
+ final IndicesFilterCache indicesFilterCache;
+ IndexService indexService;
+
+ final ConcurrentMap<Object, Boolean> seenReaders = ConcurrentCollections.newConcurrentMap();
+
+ @Inject
+ public WeightedFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) {
+ super(index, indexSettings);
+ this.indicesFilterCache = indicesFilterCache;
+ }
+
+ @Override
+ public void setIndexService(IndexService indexService) {
+ this.indexService = indexService;
+ }
+
+ @Override
+ public String type() {
+ return "weighted";
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ clear("close");
+ }
+
+ @Override
+ public void clear(String reason) {
+ logger.debug("full cache clear, reason [{}]", reason);
+ for (Object readerKey : seenReaders.keySet()) {
+ Boolean removed = seenReaders.remove(readerKey);
+ if (removed == null) {
+ return;
+ }
+ indicesFilterCache.addReaderKeyToClean(readerKey);
+ }
+ }
+
+ @Override
+ public void clear(String reason, String[] keys) {
+ logger.debug("clear keys [], reason [{}]", reason, keys);
+ final BytesRef spare = new BytesRef();
+ for (String key : keys) {
+ final byte[] keyBytes = Strings.toUTF8Bytes(key, spare);
+ for (Object readerKey : seenReaders.keySet()) {
+ indicesFilterCache.cache().invalidate(new FilterCacheKey(readerKey, new CacheKeyFilter.Key(keyBytes)));
+ }
+ }
+ }
+
+ @Override
+ public void onClose(Object coreKey) {
+ clear(coreKey);
+ }
+
+ @Override
+ public void clear(Object coreCacheKey) {
+ // we add the seen reader before we add the first cache entry for this reader
+ // so, if we don't see it here, its won't be in the cache
+ Boolean removed = seenReaders.remove(coreCacheKey);
+ if (removed == null) {
+ return;
+ }
+ indicesFilterCache.addReaderKeyToClean(coreCacheKey);
+ }
+
+ @Override
+ public Filter cache(Filter filterToCache) {
+ if (filterToCache == null) {
+ return null;
+ }
+ if (filterToCache instanceof NoCacheFilter) {
+ return filterToCache;
+ }
+ if (CachedFilter.isCached(filterToCache)) {
+ return filterToCache;
+ }
+ return new FilterCacheFilterWrapper(filterToCache, this);
+ }
+
+ static class FilterCacheFilterWrapper extends CachedFilter {
+
+ private final Filter filter;
+
+ private final WeightedFilterCache cache;
+
+ FilterCacheFilterWrapper(Filter filter, WeightedFilterCache cache) {
+ this.filter = filter;
+ this.cache = cache;
+ }
+
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ Object filterKey = filter;
+ if (filter instanceof CacheKeyFilter) {
+ filterKey = ((CacheKeyFilter) filter).cacheKey();
+ }
+ FilterCacheKey cacheKey = new FilterCacheKey(context.reader().getCoreCacheKey(), filterKey);
+ Cache<FilterCacheKey, DocIdSet> innerCache = cache.indicesFilterCache.cache();
+
+ DocIdSet cacheValue = innerCache.getIfPresent(cacheKey);
+ if (cacheValue == null) {
+ if (!cache.seenReaders.containsKey(context.reader().getCoreCacheKey())) {
+ Boolean previous = cache.seenReaders.putIfAbsent(context.reader().getCoreCacheKey(), Boolean.TRUE);
+ if (previous == null) {
+ // we add a core closed listener only, for non core IndexReaders we rely on clear being called (percolator for example)
+ if (context.reader() instanceof SegmentReader) {
+ ((SegmentReader) context.reader()).addCoreClosedListener(cache);
+ }
+ }
+ }
+ // we can't pass down acceptedDocs provided, because we are caching the result, and acceptedDocs
+ // might be specific to a query. We don't pass the live docs either because a cache built for a specific
+ // generation of a segment might be reused by an older generation which has fewer deleted documents
+ cacheValue = DocIdSets.toCacheable(context.reader(), filter.getDocIdSet(context, null));
+ // we might put the same one concurrently, that's fine, it will be replaced and the removal
+ // will be called
+ ShardId shardId = ShardUtils.extractShardId(context.reader());
+ if (shardId != null) {
+ IndexShard shard = cache.indexService.shard(shardId.id());
+ if (shard != null) {
+ cacheKey.removalListener = shard.filterCache();
+ shard.filterCache().onCached(DocIdSets.sizeInBytes(cacheValue));
+ }
+ }
+ innerCache.put(cacheKey, cacheValue);
+ }
+
+ // note, we don't wrap the return value with a BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs) because
+ // we rely on our custom XFilteredQuery to do the wrapping if needed, so we don't have the wrap each
+ // filter on its own
+ return DocIdSets.isEmpty(cacheValue) ? null : cacheValue;
+ }
+
+ public String toString() {
+ return "cache(" + filter + ")";
+ }
+
+ public boolean equals(Object o) {
+ if (!(o instanceof FilterCacheFilterWrapper)) return false;
+ return this.filter.equals(((FilterCacheFilterWrapper) o).filter);
+ }
+
+ public int hashCode() {
+ return filter.hashCode() ^ 0x1117BF25;
+ }
+ }
+
+
+ public static class FilterCacheValueWeigher implements Weigher<WeightedFilterCache.FilterCacheKey, DocIdSet> {
+
+ @Override
+ public int weigh(FilterCacheKey key, DocIdSet value) {
+ int weight = (int) Math.min(DocIdSets.sizeInBytes(value), Integer.MAX_VALUE);
+ return weight == 0 ? 1 : weight;
+ }
+ }
+
+ public static class FilterCacheKey {
+ private final Object readerKey;
+ private final Object filterKey;
+
+ // if we know, we will try and set the removal listener (for statistics)
+ // its ok that its not volatile because we make sure we only set it when the object is created before its shared between threads
+ @Nullable
+ public RemovalListener<WeightedFilterCache.FilterCacheKey, DocIdSet> removalListener;
+
+ public FilterCacheKey(Object readerKey, Object filterKey) {
+ this.readerKey = readerKey;
+ this.filterKey = filterKey;
+ }
+
+ public Object readerKey() {
+ return readerKey;
+ }
+
+ public Object filterKey() {
+ return filterKey;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+// if (o == null || getClass() != o.getClass()) return false;
+ FilterCacheKey that = (FilterCacheKey) o;
+ return (readerKey().equals(that.readerKey()) && filterKey.equals(that.filterKey));
+ }
+
+ @Override
+ public int hashCode() {
+ return readerKey().hashCode() + 31 * filterKey.hashCode();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/cache/id/IdCache.java b/src/main/java/org/elasticsearch/index/cache/id/IdCache.java
new file mode 100644
index 0000000..f6c52e2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/IdCache.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.cache.id;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.component.CloseableComponent;
+import org.elasticsearch.index.IndexComponent;
+import org.elasticsearch.index.service.IndexService;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * This id cache contains only the ids of parent documents, loaded via the uid or parent field.
+ * This name IdCache is misleading, parentIdCache would be a better name.
+ */
+public interface IdCache extends IndexComponent, CloseableComponent {
+
+ // we need to "inject" the index service to not create cyclic dep
+ void setIndexService(IndexService indexService);
+
+ void clear();
+
+ void clear(Object coreCacheKey);
+
+ void refresh(List<AtomicReaderContext> readers) throws IOException;
+
+ IdReaderCache reader(AtomicReader reader);
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/id/IdCacheModule.java b/src/main/java/org/elasticsearch/index/cache/id/IdCacheModule.java
new file mode 100644
index 0000000..765c9f2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/IdCacheModule.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.cache.id;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.cache.id.simple.SimpleIdCache;
+
+/**
+ *
+ */
+public class IdCacheModule extends AbstractModule {
+
+ public static final class IdCacheSettings {
+ public static final String ID_CACHE_TYPE = "index.cache.id.type";
+ }
+
+ private final Settings settings;
+
+ public IdCacheModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(IdCache.class)
+ .to(settings.getAsClass(IdCacheSettings.ID_CACHE_TYPE, SimpleIdCache.class, "org.elasticsearch.index.cache.id.", "IdCache"))
+ .in(Scopes.SINGLETON);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/id/IdCacheStats.java b/src/main/java/org/elasticsearch/index/cache/id/IdCacheStats.java
new file mode 100644
index 0000000..a81f00d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/IdCacheStats.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.id;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ */
+public class IdCacheStats implements Streamable, ToXContent {
+
+ long memorySize;
+
+ public IdCacheStats() {
+ }
+
+ public IdCacheStats(long memorySize) {
+ this.memorySize = memorySize;
+ }
+
+ public void add(IdCacheStats stats) {
+ this.memorySize += stats.memorySize;
+ }
+
+ public long getMemorySizeInBytes() {
+ return this.memorySize;
+ }
+
+ public ByteSizeValue getMemorySize() {
+ return new ByteSizeValue(memorySize);
+ }
+
+ public static IdCacheStats readIdCacheStats(StreamInput in) throws IOException {
+ IdCacheStats stats = new IdCacheStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ memorySize = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(memorySize);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.ID_CACHE);
+ builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString ID_CACHE = new XContentBuilderString("id_cache");
+ static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
+ static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/id/IdReaderCache.java b/src/main/java/org/elasticsearch/index/cache/id/IdReaderCache.java
new file mode 100644
index 0000000..23bde0a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/IdReaderCache.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.cache.id;
+
+import org.elasticsearch.common.bytes.HashedBytesArray;
+
+/**
+ *
+ */
+public interface IdReaderCache {
+
+ IdReaderTypeCache type(String type);
+
+ HashedBytesArray parentIdByDoc(String type, int docId);
+
+ int docById(String type, HashedBytesArray id);
+
+ long sizeInBytes();
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/id/IdReaderTypeCache.java b/src/main/java/org/elasticsearch/index/cache/id/IdReaderTypeCache.java
new file mode 100644
index 0000000..804af01
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/IdReaderTypeCache.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.cache.id;
+
+import org.elasticsearch.common.bytes.HashedBytesArray;
+
+/**
+ *
+ */
+public interface IdReaderTypeCache {
+
+ /**
+ * @param docId The Lucene docId of the child document to return the parent _uid for.
+ * @return The parent _uid for the specified docId (which is a child document)
+ */
+ HashedBytesArray parentIdByDoc(int docId);
+
+ /**
+ * @param uid The uid of the document to return the lucene docId for
+ * @return The lucene docId for the specified uid
+ */
+ int docById(HashedBytesArray uid);
+
+ /**
+ * @param docId The lucene docId of the document to return _uid for
+ * @return The _uid of the specified docId
+ */
+ HashedBytesArray idByDoc(int docId);
+
+ /**
+ * @return The size in bytes for this particular instance
+ */
+ long sizeInBytes();
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/id/ShardIdCache.java b/src/main/java/org/elasticsearch/index/cache/id/ShardIdCache.java
new file mode 100644
index 0000000..65525d8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/ShardIdCache.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.id;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ */
+public class ShardIdCache extends AbstractIndexShardComponent {
+
+ final CounterMetric totalMetric = new CounterMetric();
+
+ @Inject
+ public ShardIdCache(ShardId shardId, @IndexSettings Settings indexSettings) {
+ super(shardId, indexSettings);
+ }
+
+ public IdCacheStats stats() {
+ return new IdCacheStats(totalMetric.count());
+ }
+
+ public void onCached(long sizeInBytes) {
+ totalMetric.inc(sizeInBytes);
+ }
+
+ public void onRemoval(long sizeInBytes) {
+ totalMetric.dec(sizeInBytes);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/id/ShardIdCacheModule.java b/src/main/java/org/elasticsearch/index/cache/id/ShardIdCacheModule.java
new file mode 100644
index 0000000..1f84b6b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/ShardIdCacheModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.id;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ */
+public class ShardIdCacheModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(ShardIdCache.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdCache.java b/src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdCache.java
new file mode 100644
index 0000000..b07af1f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdCache.java
@@ -0,0 +1,353 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.cache.id.simple;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.UTF8SortedAsUnicodeComparator;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.id.IdCache;
+import org.elasticsearch.index.cache.id.IdReaderCache;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentTypeListener;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardUtils;
+import org.elasticsearch.index.shard.service.IndexShard;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ *
+ */
+public class SimpleIdCache extends AbstractIndexComponent implements IdCache, SegmentReader.CoreClosedListener, DocumentTypeListener {
+
+ private final boolean reuse;
+ private final ConcurrentMap<Object, SimpleIdReaderCache> idReaders;
+ private final NavigableSet<HashedBytesArray> parentTypes;
+
+ IndexService indexService;
+
+ @Inject
+ public SimpleIdCache(Index index, @IndexSettings Settings indexSettings) {
+ super(index, indexSettings);
+ reuse = componentSettings.getAsBoolean("reuse", false);
+ idReaders = ConcurrentCollections.newConcurrentMap();
+ parentTypes = new TreeSet<HashedBytesArray>(UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder);
+ }
+
+ @Override
+ public void setIndexService(IndexService indexService) {
+ this.indexService = indexService;
+ indexService.mapperService().addTypeListener(this);
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ indexService.mapperService().removeTypeListener(this);
+ clear();
+ }
+
+ @Override
+ public void clear() {
+ // Make a copy of the live id readers...
+ Map<Object, SimpleIdReaderCache> copy = new HashMap<Object, SimpleIdReaderCache>(idReaders);
+ for (Map.Entry<Object, SimpleIdReaderCache> entry : copy.entrySet()) {
+ SimpleIdReaderCache removed = idReaders.remove(entry.getKey());
+ // ... and only if the id reader still exists in live readers we decrement stats,
+ // this will prevent double onRemoval calls
+ if (removed != null) {
+ onRemoval(removed);
+ }
+ }
+ }
+
+ @Override
+ public void onClose(Object coreCacheKey) {
+ clear(coreCacheKey);
+ }
+
+ @Override
+ public void clear(Object coreCacheKey) {
+ SimpleIdReaderCache removed = idReaders.remove(coreCacheKey);
+ if (removed != null) onRemoval(removed);
+ }
+
+ @Override
+ public IdReaderCache reader(AtomicReader reader) {
+ return idReaders.get(reader.getCoreCacheKey());
+ }
+
+ @SuppressWarnings({"StringEquality"})
+ @Override
+ public void refresh(List<AtomicReaderContext> atomicReaderContexts) throws IOException {
+ // do a quick check for the common case, that all are there
+ if (refreshNeeded(atomicReaderContexts)) {
+ synchronized (idReaders) {
+ if (!refreshNeeded(atomicReaderContexts)) {
+ return;
+ }
+
+ // do the refresh
+ Map<Object, Map<String, TypeBuilder>> builders = new HashMap<Object, Map<String, TypeBuilder>>();
+ Map<Object, AtomicReader> cacheToReader = new HashMap<Object, AtomicReader>();
+
+ // first, go over and load all the id->doc map for all types
+ for (AtomicReaderContext context : atomicReaderContexts) {
+ AtomicReader reader = context.reader();
+ if (!refreshNeeded(context)) {
+ // no need, continue
+ continue;
+ }
+
+ if (reader instanceof SegmentReader) {
+ ((SegmentReader) reader).addCoreClosedListener(this);
+ }
+ Map<String, TypeBuilder> readerBuilder = new HashMap<String, TypeBuilder>();
+ builders.put(reader.getCoreCacheKey(), readerBuilder);
+ cacheToReader.put(reader.getCoreCacheKey(), context.reader());
+
+
+ Terms terms = reader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ uid: for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
+ HashedBytesArray[] typeAndId = Uid.splitUidIntoTypeAndId(term);
+ // We don't want to load uid of child documents, this allows us to not load uids of child types.
+ if (!parentTypes.contains(typeAndId[0])) {
+ do {
+ HashedBytesArray nextParent = parentTypes.ceiling(typeAndId[0]);
+ if (nextParent == null) {
+ break uid;
+ }
+
+ TermsEnum.SeekStatus status = termsEnum.seekCeil(nextParent.toBytesRef());
+ if (status == TermsEnum.SeekStatus.END) {
+ break uid;
+ } else if (status == TermsEnum.SeekStatus.NOT_FOUND) {
+ term = termsEnum.term();
+ typeAndId = Uid.splitUidIntoTypeAndId(term);
+ } else if (status == TermsEnum.SeekStatus.FOUND) {
+ assert false : "Seek status should never be FOUND, because we seek only the type part";
+ term = termsEnum.term();
+ typeAndId = Uid.splitUidIntoTypeAndId(term);
+ }
+ } while (!parentTypes.contains(typeAndId[0]));
+ }
+
+ String type = typeAndId[0].toUtf8();
+ TypeBuilder typeBuilder = readerBuilder.get(type);
+ if (typeBuilder == null) {
+ typeBuilder = new TypeBuilder(reader);
+ readerBuilder.put(type, typeBuilder);
+ }
+
+ HashedBytesArray idAsBytes = checkIfCanReuse(builders, typeAndId[1]);
+ docsEnum = termsEnum.docs(null, docsEnum, 0);
+ for (int docId = docsEnum.nextDoc(); docId != DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
+ typeBuilder.idToDoc.put(idAsBytes, docId);
+ typeBuilder.docToId[docId] = idAsBytes;
+ }
+ }
+ }
+ }
+
+ // now, go and load the docId->parentId map
+ for (AtomicReaderContext context : atomicReaderContexts) {
+ AtomicReader reader = context.reader();
+ if (!refreshNeeded(context)) {
+ // no need, continue
+ continue;
+ }
+
+ Map<String, TypeBuilder> readerBuilder = builders.get(reader.getCoreCacheKey());
+
+ Terms terms = reader.terms(ParentFieldMapper.NAME);
+ if (terms != null) {
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
+ HashedBytesArray[] typeAndId = Uid.splitUidIntoTypeAndId(term);
+
+ TypeBuilder typeBuilder = readerBuilder.get(typeAndId[0].toUtf8());
+ if (typeBuilder == null) {
+ typeBuilder = new TypeBuilder(reader);
+ readerBuilder.put(typeAndId[0].toUtf8(), typeBuilder);
+ }
+
+ HashedBytesArray idAsBytes = checkIfCanReuse(builders, typeAndId[1]);
+ boolean added = false; // optimize for when all the docs are deleted for this id
+
+ docsEnum = termsEnum.docs(null, docsEnum, 0);
+ for (int docId = docsEnum.nextDoc(); docId != DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
+ if (!added) {
+ typeBuilder.parentIdsValues.add(idAsBytes);
+ added = true;
+ }
+ typeBuilder.parentIdsOrdinals[docId] = typeBuilder.t;
+ }
+
+ if (added) {
+ typeBuilder.t++;
+ }
+ }
+ }
+ }
+
+
+ // now, build it back
+ for (Map.Entry<Object, Map<String, TypeBuilder>> entry : builders.entrySet()) {
+ Object readerKey = entry.getKey();
+ MapBuilder<String, SimpleIdReaderTypeCache> types = MapBuilder.newMapBuilder();
+ for (Map.Entry<String, TypeBuilder> typeBuilderEntry : entry.getValue().entrySet()) {
+ types.put(typeBuilderEntry.getKey(), new SimpleIdReaderTypeCache(typeBuilderEntry.getKey(),
+ typeBuilderEntry.getValue().idToDoc,
+ typeBuilderEntry.getValue().docToId,
+ typeBuilderEntry.getValue().parentIdsValues.toArray(new HashedBytesArray[typeBuilderEntry.getValue().parentIdsValues.size()]),
+ typeBuilderEntry.getValue().parentIdsOrdinals));
+ }
+ AtomicReader indexReader = cacheToReader.get(readerKey);
+ SimpleIdReaderCache readerCache = new SimpleIdReaderCache(types.immutableMap(), ShardUtils.extractShardId(indexReader));
+ idReaders.put(readerKey, readerCache);
+ onCached(readerCache);
+ }
+ }
+ }
+ }
+
+ void onCached(SimpleIdReaderCache readerCache) {
+ if (readerCache.shardId != null) {
+ IndexShard shard = indexService.shard(readerCache.shardId.id());
+ if (shard != null) {
+ shard.idCache().onCached(readerCache.sizeInBytes());
+ }
+ }
+ }
+
+ void onRemoval(SimpleIdReaderCache readerCache) {
+ if (readerCache.shardId != null) {
+ IndexShard shard = indexService.shard(readerCache.shardId.id());
+ if (shard != null) {
+ shard.idCache().onRemoval(readerCache.sizeInBytes());
+ }
+ }
+ }
+
+ private HashedBytesArray checkIfCanReuse(Map<Object, Map<String, TypeBuilder>> builders, HashedBytesArray idAsBytes) {
+ HashedBytesArray finalIdAsBytes;
+ // go over and see if we can reuse this id
+ if (reuse) {
+ for (SimpleIdReaderCache idReaderCache : idReaders.values()) {
+ finalIdAsBytes = idReaderCache.canReuse(idAsBytes);
+ if (finalIdAsBytes != null) {
+ return finalIdAsBytes;
+ }
+ }
+ }
+ // even if we don't enable reuse, at least check on the current "live" builders that we are handling
+ for (Map<String, TypeBuilder> map : builders.values()) {
+ for (TypeBuilder typeBuilder : map.values()) {
+ finalIdAsBytes = typeBuilder.canReuse(idAsBytes);
+ if (finalIdAsBytes != null) {
+ return finalIdAsBytes;
+ }
+ }
+ }
+ return idAsBytes;
+ }
+
+ private boolean refreshNeeded(List<AtomicReaderContext> atomicReaderContexts) {
+ for (AtomicReaderContext atomicReaderContext : atomicReaderContexts) {
+ if (refreshNeeded(atomicReaderContext)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private boolean refreshNeeded(AtomicReaderContext atomicReaderContext) {
+ return !idReaders.containsKey(atomicReaderContext.reader().getCoreCacheKey());
+ }
+
+ @Override
+ public void beforeCreate(DocumentMapper mapper) {
+ synchronized (idReaders) {
+ ParentFieldMapper parentFieldMapper = mapper.parentFieldMapper();
+ if (parentFieldMapper.active()) {
+ // A _parent field can never be added to an existing mapping, so a _parent field either exists on
+ // a new created or doesn't exists. This is why we can update the known parent types via DocumentTypeListener
+ if (parentTypes.add(new HashedBytesArray(Strings.toUTF8Bytes(parentFieldMapper.type(), new BytesRef())))) {
+ clear();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void afterRemove(DocumentMapper mapper) {
+ synchronized (idReaders) {
+ ParentFieldMapper parentFieldMapper = mapper.parentFieldMapper();
+ if (parentFieldMapper.active()) {
+ parentTypes.remove(new HashedBytesArray(Strings.toUTF8Bytes(parentFieldMapper.type(), new BytesRef())));
+ }
+ }
+ }
+
+ static class TypeBuilder {
+ final ObjectIntOpenHashMap<HashedBytesArray> idToDoc = new ObjectIntOpenHashMap<HashedBytesArray>();
+ final HashedBytesArray[] docToId;
+ final ArrayList<HashedBytesArray> parentIdsValues = new ArrayList<HashedBytesArray>();
+ final int[] parentIdsOrdinals;
+ int t = 1; // current term number (0 indicated null value)
+
+ TypeBuilder(IndexReader reader) {
+ parentIdsOrdinals = new int[reader.maxDoc()];
+ // the first one indicates null value
+ parentIdsValues.add(null);
+ docToId = new HashedBytesArray[reader.maxDoc()];
+ }
+
+ /**
+ * Returns an already stored instance if exists, if not, returns null;
+ */
+ public HashedBytesArray canReuse(HashedBytesArray id) {
+ if (idToDoc.containsKey(id)) {
+ // we can use #lkey() since this is called from a synchronized block
+ return idToDoc.lkey();
+ } else {
+ return id;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdReaderCache.java b/src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdReaderCache.java
new file mode 100644
index 0000000..5a0a4e6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdReaderCache.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.cache.id.simple;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.index.cache.id.IdReaderCache;
+import org.elasticsearch.index.cache.id.IdReaderTypeCache;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class SimpleIdReaderCache implements IdReaderCache {
+
+ private final ImmutableMap<String, SimpleIdReaderTypeCache> types;
+
+ @Nullable
+ public final ShardId shardId;
+
+ public SimpleIdReaderCache(ImmutableMap<String, SimpleIdReaderTypeCache> types, @Nullable ShardId shardId) {
+ this.types = types;
+ this.shardId = shardId;
+ }
+
+ @Override
+ public IdReaderTypeCache type(String type) {
+ return types.get(type);
+ }
+
+ @Override
+ public HashedBytesArray parentIdByDoc(String type, int docId) {
+ SimpleIdReaderTypeCache typeCache = types.get(type);
+ if (typeCache != null) {
+ return typeCache.parentIdByDoc(docId);
+ }
+ return null;
+ }
+
+ @Override
+ public int docById(String type, HashedBytesArray id) {
+ SimpleIdReaderTypeCache typeCache = types.get(type);
+ if (typeCache != null) {
+ return typeCache.docById(id);
+ }
+ return -1;
+ }
+
+ public long sizeInBytes() {
+ long sizeInBytes = 0;
+ for (SimpleIdReaderTypeCache readerTypeCache : types.values()) {
+ sizeInBytes += readerTypeCache.sizeInBytes();
+ }
+ return sizeInBytes;
+ }
+
+ /**
+ * Returns an already stored instance if exists, if not, returns null;
+ */
+ public HashedBytesArray canReuse(HashedBytesArray id) {
+ for (SimpleIdReaderTypeCache typeCache : types.values()) {
+ HashedBytesArray wrap = typeCache.canReuse(id);
+ if (wrap != null) {
+ return wrap;
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdReaderTypeCache.java b/src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdReaderTypeCache.java
new file mode 100644
index 0000000..c5f5709
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdReaderTypeCache.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.cache.id.simple;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.index.cache.id.IdReaderTypeCache;
+
+/**
+ *
+ */
+public class SimpleIdReaderTypeCache implements IdReaderTypeCache {
+
+ private final String type;
+
+ private final ObjectIntOpenHashMap<HashedBytesArray> idToDoc;
+
+ private final HashedBytesArray[] docIdToId;
+
+ private final HashedBytesArray[] parentIdsValues;
+
+ private final int[] parentIdsOrdinals;
+
+ private long sizeInBytes = -1;
+
+ public SimpleIdReaderTypeCache(String type, ObjectIntOpenHashMap<HashedBytesArray> idToDoc, HashedBytesArray[] docIdToId,
+ HashedBytesArray[] parentIdsValues, int[] parentIdsOrdinals) {
+ this.type = type;
+ this.idToDoc = idToDoc;
+ this.docIdToId = docIdToId;
+ this.parentIdsValues = parentIdsValues;
+ this.parentIdsOrdinals = parentIdsOrdinals;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public HashedBytesArray parentIdByDoc(int docId) {
+ return parentIdsValues[parentIdsOrdinals[docId]];
+ }
+
+ public int docById(HashedBytesArray uid) {
+ return idToDoc.getOrDefault(uid, -1);
+ }
+
+ public HashedBytesArray idByDoc(int docId) {
+ return docIdToId[docId];
+ }
+
+ public long sizeInBytes() {
+ if (sizeInBytes == -1) {
+ sizeInBytes = computeSizeInBytes();
+ }
+ return sizeInBytes;
+ }
+
+ /**
+ * Returns an already stored instance if exists, if not, returns null;
+ */
+ public HashedBytesArray canReuse(HashedBytesArray id) {
+ if (idToDoc.containsKey(id)) {
+ // we can use #lkey() since this is called from a synchronized block
+ return idToDoc.lkey();
+ } else {
+ return id;
+ }
+ }
+
+ long computeSizeInBytes() {
+ long sizeInBytes = 0;
+ // Ignore type field
+ // sizeInBytes += ((type.length() * RamUsage.NUM_BYTES_CHAR) + (3 * RamUsage.NUM_BYTES_INT)) + RamUsage.NUM_BYTES_OBJECT_HEADER;
+ sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (idToDoc.values.length * RamUsageEstimator.NUM_BYTES_INT);
+ sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (idToDoc.allocated.length);
+ final boolean[] states = idToDoc.allocated;
+ final Object[] keys = idToDoc.keys;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ HashedBytesArray bytesArray = (HashedBytesArray) keys[i];
+ if (bytesArray != null) {
+ sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + (bytesArray.length() + RamUsageEstimator.NUM_BYTES_INT);
+ } else {
+ sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+ }
+ }
+ }
+
+ // The docIdToId array contains references to idToDoc for this segment or other segments, so we can use OBJECT_REF
+ sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (RamUsageEstimator.NUM_BYTES_OBJECT_REF * docIdToId.length);
+ for (HashedBytesArray bytesArray : parentIdsValues) {
+ if (bytesArray == null) {
+ sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+ } else {
+ sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + (bytesArray.length() + RamUsageEstimator.NUM_BYTES_INT);
+ }
+ }
+ sizeInBytes += RamUsageEstimator.sizeOf(parentIdsOrdinals);
+
+ return sizeInBytes;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java b/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java
new file mode 100644
index 0000000..7864380
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.query.parser;
+
+import org.apache.lucene.queryparser.classic.QueryParserSettings;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.component.CloseableComponent;
+import org.elasticsearch.index.IndexComponent;
+
+/**
+ * The main benefit of the query parser cache is to not parse the same query string on different shards.
+ * Less about long running query strings.
+ */
+public interface QueryParserCache extends IndexComponent, CloseableComponent {
+
+ Query get(QueryParserSettings queryString);
+
+ void put(QueryParserSettings queryString, Query query);
+
+ void clear();
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java b/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java
new file mode 100644
index 0000000..b244d5f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.query.parser;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.cache.query.parser.resident.ResidentQueryParserCache;
+
+/**
+ *
+ */
+public class QueryParserCacheModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public QueryParserCacheModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(QueryParserCache.class)
+ .to(settings.getAsClass("index.cache.query.parser.type", ResidentQueryParserCache.class, "org.elasticsearch.index.cache.query.parser.", "QueryParserCache"))
+ .in(Scopes.SINGLETON);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java b/src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java
new file mode 100644
index 0000000..2a9dcd8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.query.parser.none;
+
+import org.apache.lucene.queryparser.classic.QueryParserSettings;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.query.parser.QueryParserCache;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class NoneQueryParserCache extends AbstractIndexComponent implements QueryParserCache {
+
+ @Inject
+ public NoneQueryParserCache(Index index, @IndexSettings Settings indexSettings) {
+ super(index, indexSettings);
+ }
+
+ @Override
+ public Query get(QueryParserSettings queryString) {
+ return null;
+ }
+
+ @Override
+ public void put(QueryParserSettings queryString, Query query) {
+ }
+
+ @Override
+ public void clear() {
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java b/src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java
new file mode 100644
index 0000000..b0dabad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.query.parser.resident;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import org.apache.lucene.queryparser.classic.QueryParserSettings;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.query.parser.QueryParserCache;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A small (by default) query parser cache mainly to not parse the same query string several times
+ * if several shards exists on the same node.
+ */
+public class ResidentQueryParserCache extends AbstractIndexComponent implements QueryParserCache {
+
+ private final Cache<QueryParserSettings, Query> cache;
+
+ private volatile int maxSize;
+ private volatile TimeValue expire;
+
+ @Inject
+ public ResidentQueryParserCache(Index index, @IndexSettings Settings indexSettings) {
+ super(index, indexSettings);
+
+ this.maxSize = componentSettings.getAsInt("max_size", 100);
+ this.expire = componentSettings.getAsTime("expire", null);
+ logger.debug("using [resident] query cache with max_size [{}], expire [{}]", maxSize, expire);
+
+ CacheBuilder cacheBuilder = CacheBuilder.newBuilder().maximumSize(maxSize);
+ if (expire != null) {
+ cacheBuilder.expireAfterAccess(expire.nanos(), TimeUnit.NANOSECONDS);
+ }
+
+ this.cache = cacheBuilder.build();
+ }
+
+ @Override
+ public Query get(QueryParserSettings queryString) {
+ return cache.getIfPresent(queryString);
+ }
+
+ @Override
+ public void put(QueryParserSettings queryString, Query query) {
+ if (queryString.isCacheable()) {
+ cache.put(queryString, query);
+ }
+ }
+
+ @Override
+ public void clear() {
+ cache.invalidateAll();
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ cache.invalidateAll();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/CodecModule.java b/src/main/java/org/elasticsearch/index/codec/CodecModule.java
new file mode 100644
index 0000000..384d73d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/CodecModule.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.inject.assistedinject.FactoryProvider;
+import org.elasticsearch.common.inject.multibindings.MapBinder;
+import org.elasticsearch.common.settings.NoClassSettingsException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormats;
+import org.elasticsearch.index.codec.docvaluesformat.PreBuiltDocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingFormats;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.codec.postingsformat.PreBuiltPostingsFormatProvider;
+
+import java.util.Map;
+
+/**
+ * The {@link CodecModule} creates and loads the {@link CodecService},
+ * {@link PostingsFormatService} and {@link DocValuesFormatService},
+ * allowing low level data-structure specialization on a Lucene Segment basis.
+ * <p>
+ * The codec module is the authoritative source for build-in and custom
+ * {@link PostingsFormatProvider}. During module bootstrap it processes the
+ * index settings underneath the
+ * {@value PostingsFormatProvider#POSTINGS_FORMAT_SETTINGS_PREFIX} and
+ * instantiates the corresponding {@link PostingsFormatProvider} instances. To
+ * configure a custom provider implementations the class should reside in the
+ * <tt>org.elasticsearch.index.codec.postingsformat</tt> package and the
+ * classname should be suffixed with <tt>PostingsFormatProvider</tt>. <br>
+ * For example to expose the Elastic-Fantastic format provider one need to
+ * provide the following configuration settings and classes:
+ * <ol>
+ * <li>create a {@link PostingsFormatProvider} subclass in the package
+ * <tt>org.elasticsearch.index.codec.postingsformat</tt></li>
+ *
+ * <li>name the subclass <tt>ElasticFantatsticPostingsFormatProvider</tt></li>
+ *
+ * <li>configure the custom format in you index settings under
+ * <tt>index.codec.postings_format.elastic_fantatic.type : "ElasticFantatic"</tt>
+ * </li>
+ *
+ * <li>provide any postings format settings for this custom format under the
+ * same key ie.
+ * <tt>index.codec.postings_format.elastic_fantatic.performance : "crazy_fast"</tt>
+ * </li>
+ * </ol>
+ *
+ * @see CodecService
+ *
+ */
+public class CodecModule extends AbstractModule {
+
+ private final Settings indexSettings;
+
+ private final Map<String, Class<? extends PostingsFormatProvider>> customPostingsFormatProviders = Maps.newHashMap();
+ private final Map<String, Class<? extends DocValuesFormatProvider>> customDocValuesFormatProviders = Maps.newHashMap();
+
+ public CodecModule(Settings indexSettings) {
+ this.indexSettings = indexSettings;
+ }
+
+ public CodecModule addPostingFormat(String name, Class<? extends PostingsFormatProvider> provider) {
+ this.customPostingsFormatProviders.put(name, provider);
+ return this;
+ }
+
+ public CodecModule addDocValuesFormat(String name, Class<? extends DocValuesFormatProvider> provider) {
+ this.customDocValuesFormatProviders.put(name, provider);
+ return this;
+ }
+
+ private void configurePostingsFormats() {
+ Map<String, Class<? extends PostingsFormatProvider>> postingFormatProviders = Maps.newHashMap(customPostingsFormatProviders);
+
+ Map<String, Settings> postingsFormatsSettings = indexSettings.getGroups(PostingsFormatProvider.POSTINGS_FORMAT_SETTINGS_PREFIX);
+ for (Map.Entry<String, Settings> entry : postingsFormatsSettings.entrySet()) {
+ String name = entry.getKey();
+ Settings settings = entry.getValue();
+
+ String sType = settings.get("type");
+ if (sType == null || sType.trim().isEmpty()) {
+ throw new ElasticsearchIllegalArgumentException("PostingsFormat Factory [" + name + "] must have a type associated with it");
+ }
+
+ Class<? extends PostingsFormatProvider> type;
+ try {
+ type = settings.getAsClass("type", null, "org.elasticsearch.index.codec.postingsformat.", "PostingsFormatProvider");
+ } catch (NoClassSettingsException e) {
+ throw new ElasticsearchIllegalArgumentException("The specified type [" + sType + "] for postingsFormat Factory [" + name + "] can't be found");
+ }
+ postingFormatProviders.put(name, type);
+ }
+
+ // now bind
+ MapBinder<String, PostingsFormatProvider.Factory> postingFormatFactoryBinder
+ = MapBinder.newMapBinder(binder(), String.class, PostingsFormatProvider.Factory.class);
+
+ for (Map.Entry<String, Class<? extends PostingsFormatProvider>> entry : postingFormatProviders.entrySet()) {
+ postingFormatFactoryBinder.addBinding(entry.getKey()).toProvider(FactoryProvider.newFactory(PostingsFormatProvider.Factory.class, entry.getValue())).in(Scopes.SINGLETON);
+ }
+
+ for (PreBuiltPostingsFormatProvider.Factory factory : PostingFormats.listFactories()) {
+ if (postingFormatProviders.containsKey(factory.name())) {
+ continue;
+ }
+ postingFormatFactoryBinder.addBinding(factory.name()).toInstance(factory);
+ }
+
+ bind(PostingsFormatService.class).asEagerSingleton();
+ }
+
+ private void configureDocValuesFormats() {
+ Map<String, Class<? extends DocValuesFormatProvider>> docValuesFormatProviders = Maps.newHashMap(customDocValuesFormatProviders);
+
+ Map<String, Settings> docValuesFormatSettings = indexSettings.getGroups(DocValuesFormatProvider.DOC_VALUES_FORMAT_SETTINGS_PREFIX);
+ for (Map.Entry<String, Settings> entry : docValuesFormatSettings.entrySet()) {
+ final String name = entry.getKey();
+ final Settings settings = entry.getValue();
+
+ final String sType = settings.get("type");
+ if (sType == null || sType.trim().isEmpty()) {
+ throw new ElasticsearchIllegalArgumentException("DocValuesFormat Factory [" + name + "] must have a type associated with it");
+ }
+
+ final Class<? extends DocValuesFormatProvider> type;
+ try {
+ type = settings.getAsClass("type", null, "org.elasticsearch.index.codec.docvaluesformat.", "DocValuesFormatProvider");
+ } catch (NoClassSettingsException e) {
+ throw new ElasticsearchIllegalArgumentException("The specified type [" + sType + "] for docValuesFormat Factory [" + name + "] can't be found");
+ }
+ docValuesFormatProviders.put(name, type);
+ }
+
+ // now bind
+ MapBinder<String, DocValuesFormatProvider.Factory> docValuesFormatFactoryBinder
+ = MapBinder.newMapBinder(binder(), String.class, DocValuesFormatProvider.Factory.class);
+
+ for (Map.Entry<String, Class<? extends DocValuesFormatProvider>> entry : docValuesFormatProviders.entrySet()) {
+ docValuesFormatFactoryBinder.addBinding(entry.getKey()).toProvider(FactoryProvider.newFactory(DocValuesFormatProvider.Factory.class, entry.getValue())).in(Scopes.SINGLETON);
+ }
+
+ for (PreBuiltDocValuesFormatProvider.Factory factory : DocValuesFormats.listFactories()) {
+ if (docValuesFormatProviders.containsKey(factory.name())) {
+ continue;
+ }
+ docValuesFormatFactoryBinder.addBinding(factory.name()).toInstance(factory);
+ }
+
+ bind(DocValuesFormatService.class).asEagerSingleton();
+ }
+
+ @Override
+ protected void configure() {
+ configurePostingsFormats();
+ configureDocValuesFormats();
+
+ bind(CodecService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/CodecService.java b/src/main/java/org/elasticsearch/index/codec/CodecService.java
new file mode 100644
index 0000000..8d4c330
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/CodecService.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.codecs.Codec;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ * Since Lucene 4.0 low level index segments are read and written through a
+ * codec layer that allows to use use-case specific file formats &
+ * data-structures per field. Elasticsearch exposes the full
+ * {@link Codec} capabilities through this {@link CodecService}.
+ *
+ * @see PostingsFormatService
+ * @see DocValuesFormatService
+ */
+public class CodecService extends AbstractIndexComponent {
+
+ public static final String INDEX_CODEC_BLOOM_LOAD = "index.codec.bloom.load";
+ public static final boolean INDEX_CODEC_BLOOM_LOAD_DEFAULT = true;
+
+ private final PostingsFormatService postingsFormatService;
+ private final DocValuesFormatService docValuesFormatService;
+ private final MapperService mapperService;
+ private final ImmutableMap<String, Codec> codecs;
+
+ private volatile boolean loadBloomFilter = true;
+
+ public final static String DEFAULT_CODEC = "default";
+
+ public CodecService(Index index) {
+ this(index, ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public CodecService(Index index, @IndexSettings Settings indexSettings) {
+ this(index, indexSettings, new PostingsFormatService(index, indexSettings), new DocValuesFormatService(index, indexSettings), null);
+ }
+
+ @Inject
+ public CodecService(Index index, @IndexSettings Settings indexSettings, PostingsFormatService postingsFormatService,
+ DocValuesFormatService docValuesFormatService, MapperService mapperService) {
+ super(index, indexSettings);
+ this.postingsFormatService = postingsFormatService;
+ this.docValuesFormatService = docValuesFormatService;
+ this.mapperService = mapperService;
+ MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder();
+ if (mapperService == null) {
+ codecs.put(DEFAULT_CODEC, Codec.getDefault());
+ } else {
+ codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(mapperService,
+ postingsFormatService.get(PostingsFormatService.DEFAULT_FORMAT).get(),
+ docValuesFormatService.get(DocValuesFormatService.DEFAULT_FORMAT).get(), logger));
+ }
+ for (String codec : Codec.availableCodecs()) {
+ codecs.put(codec, Codec.forName(codec));
+ }
+ this.codecs = codecs.immutableMap();
+ this.loadBloomFilter = indexSettings.getAsBoolean(INDEX_CODEC_BLOOM_LOAD, INDEX_CODEC_BLOOM_LOAD_DEFAULT);
+ }
+
+ public PostingsFormatService postingsFormatService() {
+ return this.postingsFormatService;
+ }
+
+ public DocValuesFormatService docValuesFormatService() {
+ return docValuesFormatService;
+ }
+
+ public MapperService mapperService() {
+ return mapperService;
+ }
+
+ public Codec codec(String name) throws ElasticsearchIllegalArgumentException {
+ Codec codec = codecs.get(name);
+ if (codec == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find codec [" + name + "]");
+ }
+ return codec;
+ }
+
+ public boolean isLoadBloomFilter() {
+ return this.loadBloomFilter;
+ }
+
+ public void setLoadBloomFilter(boolean loadBloomFilter) {
+ this.loadBloomFilter = loadBloomFilter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java
new file mode 100644
index 0000000..1d64f78
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec;
+
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.lucene46.Lucene46Codec;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperService;
+
+/**
+ * {@link PerFieldMappingPostingFormatCodec This postings format} is the default
+ * {@link PostingsFormat} for Elasticsearch. It utilizes the
+ * {@link MapperService} to lookup a {@link PostingsFormat} per field. This
+ * allows users to change the low level postings format for individual fields
+ * per index in real time via the mapping API. If no specific postings format is
+ * configured for a specific field the default postings format is used.
+ */
+// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version
+public class PerFieldMappingPostingFormatCodec extends Lucene46Codec {
+ private final ESLogger logger;
+ private final MapperService mapperService;
+ private final PostingsFormat defaultPostingFormat;
+ private final DocValuesFormat defaultDocValuesFormat;
+
+ public PerFieldMappingPostingFormatCodec(MapperService mapperService, PostingsFormat defaultPostingFormat, DocValuesFormat defaultDocValuesFormat, ESLogger logger) {
+ this.mapperService = mapperService;
+ this.logger = logger;
+ this.defaultPostingFormat = defaultPostingFormat;
+ this.defaultDocValuesFormat = defaultDocValuesFormat;
+ }
+
+ @Override
+ public PostingsFormat getPostingsFormatForField(String field) {
+ final FieldMappers indexName = mapperService.indexName(field);
+ if (indexName == null) {
+ logger.warn("no index mapper found for field: [{}] returning default postings format", field);
+ return defaultPostingFormat;
+ }
+ PostingsFormatProvider postingsFormat = indexName.mapper().postingsFormatProvider();
+ return postingsFormat != null ? postingsFormat.get() : defaultPostingFormat;
+ }
+
+ @Override
+ public DocValuesFormat getDocValuesFormatForField(String field) {
+ final FieldMappers indexName = mapperService.indexName(field);
+ if (indexName == null) {
+ logger.warn("no index mapper found for field: [{}] returning default doc values format", field);
+ return defaultDocValuesFormat;
+ }
+ DocValuesFormatProvider docValuesFormat = indexName.mapper().docValuesFormatProvider();
+ return docValuesFormat != null ? docValuesFormat.get() : defaultDocValuesFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/docvaluesformat/AbstractDocValuesFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/AbstractDocValuesFormatProvider.java
new file mode 100644
index 0000000..e7504b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/AbstractDocValuesFormatProvider.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.docvaluesformat;
+
+import org.apache.lucene.codecs.DocValuesFormat;
+
+/**
+ * Simple abstract {@link DocValuesFormat} requiring a name for the provider;
+ */
+public abstract class AbstractDocValuesFormatProvider implements DocValuesFormatProvider {
+
+ private final String name;
+
+ protected AbstractDocValuesFormatProvider(String name) {
+ this.name = name;
+ }
+
+ public String name() {
+ return name;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DefaultDocValuesFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DefaultDocValuesFormatProvider.java
new file mode 100644
index 0000000..f0e2f54
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DefaultDocValuesFormatProvider.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.docvaluesformat;
+
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * The default doc values format. This format takes no parameter.
+ */
+public class DefaultDocValuesFormatProvider extends AbstractDocValuesFormatProvider {
+
+ private final DocValuesFormat docValuesFormat;
+
+ @Inject
+ public DefaultDocValuesFormatProvider(@Assisted String name, @Assisted Settings docValuesFormatSettings) {
+ super(name);
+ this.docValuesFormat = Codec.getDefault().docValuesFormat();
+ }
+
+ @Override
+ public DocValuesFormat get() {
+ return docValuesFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DiskDocValuesFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DiskDocValuesFormatProvider.java
new file mode 100644
index 0000000..d9e1e13
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DiskDocValuesFormatProvider.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.docvaluesformat;
+
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.diskdv.DiskDocValuesFormat;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * A disk-based doc values format. This format takes no parameter.
+ */
+public class DiskDocValuesFormatProvider extends AbstractDocValuesFormatProvider {
+
+ private final DocValuesFormat docValuesFormat;
+
+ @Inject
+ public DiskDocValuesFormatProvider(@Assisted String name, @Assisted Settings docValuesFormatSettings) {
+ super(name);
+ this.docValuesFormat = new DiskDocValuesFormat();
+ }
+
+ @Override
+ public DocValuesFormat get() {
+ return docValuesFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormatProvider.java
new file mode 100644
index 0000000..3dad664
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormatProvider.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.docvaluesformat;
+
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Map;
+
+/**
+ * A {@link DocValuesFormatProvider} acts as a named container for specific
+ * {@link DocValuesFormat} implementations. Custom {@link DocValuesFormat}
+ * implementations can be exposed via
+ * {@link CodecModule#addDocValuesFormat(String, Class)}
+ *
+ * @see CodecModule
+ */
+public interface DocValuesFormatProvider {
+ public static final String DOC_VALUES_FORMAT_SETTINGS_PREFIX = "index.codec.doc_values_format";
+
+ /**
+ * A helper class to lookup {@link DocValuesFormatProvider providers} by their unique {@link DocValuesFormatProvider#name() name}
+ */
+ public static class Helper {
+
+ /**
+ * Looks up and creates {@link DocValuesFormatProvider} for the given name.
+ * <p>
+ * The settings for the created {@link DocValuesFormatProvider} is taken from the given index settings.
+ * All settings with the {@value DocValuesFormatProvider#POSTINGS_FORMAT_SETTINGS_PREFIX} prefix
+ * and the formats name as the key are passed to the factory.
+ * </p>
+ *
+ * @param indexSettings the index settings to configure the postings format
+ * @param name the name of the doc values format to lookup
+ * @param docValuesFormatFactories the factory mapping to lookup the {@link Factory} to create the {@link DocValuesFormatProvider}
+ * @return a fully configured {@link DocValuesFormatProvider} for the given name.
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if the no {@link DocValuesFormatProvider} for the given name parameter could be found.
+ */
+ public static DocValuesFormatProvider lookup(@IndexSettings Settings indexSettings, String name, Map<String, Factory> docValuesFormatFactories) throws ElasticsearchIllegalArgumentException {
+ Factory factory = docValuesFormatFactories.get(name);
+ if (factory == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find doc_values_format [" + name + "]");
+ }
+ Settings settings = indexSettings.getGroups(DOC_VALUES_FORMAT_SETTINGS_PREFIX).get(name);
+ if (settings == null) {
+ settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+ return factory.create(name, settings);
+ }
+ }
+
+ /**
+ * Returns this providers {@link DocValuesFormat} instance.
+ */
+ DocValuesFormat get();
+
+ /**
+ * Returns the name of this providers {@link DocValuesFormat}
+ */
+ String name();
+
+ /**
+ * A simple factory used to create {@link DocValuesFormatProvider}.
+ */
+ public interface Factory {
+ DocValuesFormatProvider create(String name, Settings settings);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormatService.java b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormatService.java
new file mode 100644
index 0000000..e6ed724
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormatService.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.docvaluesformat;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Map;
+
+/**
+ * The {@link DocValuesFormatService} provides access to
+ * all configured {@link DocValuesFormatProvider} instances by
+ * {@link DocValuesFormatProvider#name() name}.
+ *
+ * @see CodecService
+ */
+public class DocValuesFormatService extends AbstractIndexComponent {
+
+ private final ImmutableMap<String, DocValuesFormatProvider> providers;
+
+ public final static String DEFAULT_FORMAT = "default";
+
+ public DocValuesFormatService(Index index) {
+ this(index, ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public DocValuesFormatService(Index index, @IndexSettings Settings indexSettings) {
+ this(index, indexSettings, ImmutableMap.<String, DocValuesFormatProvider.Factory>of());
+ }
+
+ @Inject
+ public DocValuesFormatService(Index index, @IndexSettings Settings indexSettings, Map<String, DocValuesFormatProvider.Factory> docValuesFormatFactories) {
+ super(index, indexSettings);
+
+ MapBuilder<String, DocValuesFormatProvider> providers = MapBuilder.newMapBuilder();
+
+ Map<String, Settings> docValuesFormatSettings = indexSettings.getGroups(DocValuesFormatProvider.DOC_VALUES_FORMAT_SETTINGS_PREFIX);
+ for (Map.Entry<String, DocValuesFormatProvider.Factory> entry : docValuesFormatFactories.entrySet()) {
+ String name = entry.getKey();
+ DocValuesFormatProvider.Factory factory = entry.getValue();
+
+ Settings settings = docValuesFormatSettings.get(name);
+ if (settings == null) {
+ settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+ providers.put(name, factory.create(name, settings));
+ }
+
+ // This is only needed for tests when guice doesn't have the chance to populate the list of DVF factories
+ for (PreBuiltDocValuesFormatProvider.Factory factory : DocValuesFormats.listFactories()) {
+ if (!providers.containsKey(factory.name())) {
+ providers.put(factory.name(), factory.get());
+ }
+ }
+
+ this.providers = providers.immutableMap();
+ }
+
+ public DocValuesFormatProvider get(String name) throws ElasticsearchIllegalArgumentException {
+ DocValuesFormatProvider provider = providers.get(name);
+ if (provider == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find doc_values_format [" + name + "]");
+ }
+ return provider;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormats.java b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormats.java
new file mode 100644
index 0000000..0afcd99
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/DocValuesFormats.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.docvaluesformat;
+
+import com.google.common.collect.ImmutableCollection;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.elasticsearch.common.collect.MapBuilder;
+
+/**
+ * This class represents the set of Elasticsearch "built-in"
+ * {@link DocValuesFormatProvider.Factory doc values format factories}
+ */
+public class DocValuesFormats {
+
+ private static final ImmutableMap<String, PreBuiltDocValuesFormatProvider.Factory> builtInDocValuesFormats;
+
+ static {
+ MapBuilder<String, PreBuiltDocValuesFormatProvider.Factory> builtInDocValuesFormatsX = MapBuilder.newMapBuilder();
+ for (String name : DocValuesFormat.availableDocValuesFormats()) {
+ builtInDocValuesFormatsX.put(name, new PreBuiltDocValuesFormatProvider.Factory(DocValuesFormat.forName(name)));
+ }
+ // LUCENE UPGRADE: update those DVF if necessary
+ builtInDocValuesFormatsX.put(DocValuesFormatService.DEFAULT_FORMAT, new PreBuiltDocValuesFormatProvider.Factory(DocValuesFormatService.DEFAULT_FORMAT, DocValuesFormat.forName("Lucene45")));
+ builtInDocValuesFormatsX.put("memory", new PreBuiltDocValuesFormatProvider.Factory("memory", DocValuesFormat.forName("Memory")));
+ builtInDocValuesFormatsX.put("disk", new PreBuiltDocValuesFormatProvider.Factory("disk", DocValuesFormat.forName("Disk")));
+ builtInDocValuesFormats = builtInDocValuesFormatsX.immutableMap();
+ }
+
+ public static DocValuesFormatProvider.Factory getAsFactory(String name) {
+ return builtInDocValuesFormats.get(name);
+ }
+
+ public static DocValuesFormatProvider getAsProvider(String name) {
+ final PreBuiltDocValuesFormatProvider.Factory factory = builtInDocValuesFormats.get(name);
+ return factory == null ? null : factory.get();
+ }
+
+ public static ImmutableCollection<PreBuiltDocValuesFormatProvider.Factory> listFactories() {
+ return builtInDocValuesFormats.values();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/docvaluesformat/MemoryDocValuesFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/MemoryDocValuesFormatProvider.java
new file mode 100644
index 0000000..3c507de
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/MemoryDocValuesFormatProvider.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.docvaluesformat;
+
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.memory.MemoryDocValuesFormat;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * A memory-based doc values format. This format takes no parameter.
+ */
+public class MemoryDocValuesFormatProvider extends AbstractDocValuesFormatProvider {
+
+ private final DocValuesFormat docValuesFormat;
+
+ @Inject
+ public MemoryDocValuesFormatProvider(@Assisted String name, @Assisted Settings docValuesFormatSettings) {
+ super(name);
+ this.docValuesFormat = new MemoryDocValuesFormat();
+ }
+
+ @Override
+ public DocValuesFormat get() {
+ return docValuesFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/docvaluesformat/PreBuiltDocValuesFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/PreBuiltDocValuesFormatProvider.java
new file mode 100644
index 0000000..a15718c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/docvaluesformat/PreBuiltDocValuesFormatProvider.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.docvaluesformat;
+
+import com.google.common.base.Preconditions;
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * Pre-built format provider which accepts no configuration option.
+ */
+public class PreBuiltDocValuesFormatProvider implements DocValuesFormatProvider {
+
+ public static final class Factory implements DocValuesFormatProvider.Factory {
+
+ private final PreBuiltDocValuesFormatProvider provider;
+
+ public Factory(DocValuesFormat docValuesFormat) {
+ this(docValuesFormat.getName(), docValuesFormat);
+ }
+
+ public Factory(String name, DocValuesFormat docValuesFormat) {
+ this.provider = new PreBuiltDocValuesFormatProvider(name, docValuesFormat);
+ }
+
+ public DocValuesFormatProvider get() {
+ return provider;
+ }
+
+ @Override
+ public DocValuesFormatProvider create(String name, Settings settings) {
+ return provider;
+ }
+
+ public String name() {
+ return provider.name();
+ }
+ }
+
+ private final String name;
+ private final DocValuesFormat docValuesFormat;
+
+ public PreBuiltDocValuesFormatProvider(DocValuesFormat postingsFormat) {
+ this(postingsFormat.getName(), postingsFormat);
+ }
+
+ public PreBuiltDocValuesFormatProvider(String name, DocValuesFormat postingsFormat) {
+ Preconditions.checkNotNull(postingsFormat, "DocValuesFormat must not be null");
+ this.name = name;
+ this.docValuesFormat = postingsFormat;
+ }
+
+ @Override
+ public String name() {
+ return name;
+ }
+
+ @Override
+ public DocValuesFormat get() {
+ return docValuesFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/AbstractPostingsFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/AbstractPostingsFormatProvider.java
new file mode 100644
index 0000000..c3a84d2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/AbstractPostingsFormatProvider.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.PostingsFormat;
+
+/**
+ * Simple abstract {@link PostingsFormat} requiring a name for the provider;
+ */
+public abstract class AbstractPostingsFormatProvider implements PostingsFormatProvider {
+
+ private final String name;
+
+ protected AbstractPostingsFormatProvider(String name) {
+ this.name = name;
+ }
+
+ public String name() {
+ return name;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterLucenePostingsFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterLucenePostingsFormatProvider.java
new file mode 100644
index 0000000..f949c91
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterLucenePostingsFormatProvider.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.bloom.BloomFilterFactory;
+import org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat;
+import org.apache.lucene.codecs.bloom.FuzzySet;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.SegmentWriteState;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Map;
+
+/**
+ */
+public class BloomFilterLucenePostingsFormatProvider extends AbstractPostingsFormatProvider {
+
+ public static final class Defaults {
+ public static final float MAX_SATURATION = 0.1f;
+ public static final float SATURATION_LIMIT = 0.9f;
+ }
+
+ private final float desiredMaxSaturation;
+ private final float saturationLimit;
+ private final PostingsFormatProvider delegate;
+ private final BloomFilteringPostingsFormat postingsFormat;
+
+ @Inject
+ public BloomFilterLucenePostingsFormatProvider(@IndexSettings Settings indexSettings, @Nullable Map<String, Factory> postingFormatFactories, @Assisted String name, @Assisted Settings postingsFormatSettings) {
+ super(name);
+ this.desiredMaxSaturation = postingsFormatSettings.getAsFloat("desired_max_saturation", Defaults.MAX_SATURATION);
+ this.saturationLimit = postingsFormatSettings.getAsFloat("saturation_limit", Defaults.SATURATION_LIMIT);
+ this.delegate = Helper.lookup(indexSettings, postingsFormatSettings.get("delegate"), postingFormatFactories);
+ this.postingsFormat = new BloomFilteringPostingsFormat(
+ delegate.get(),
+ new CustomBloomFilterFactory(desiredMaxSaturation, saturationLimit)
+ );
+ }
+
+ public float desiredMaxSaturation() {
+ return desiredMaxSaturation;
+ }
+
+ public float saturationLimit() {
+ return saturationLimit;
+ }
+
+ public PostingsFormatProvider delegate() {
+ return delegate;
+ }
+
+ @Override
+ public PostingsFormat get() {
+ return postingsFormat;
+ }
+
+ public static class CustomBloomFilterFactory extends BloomFilterFactory {
+
+ private final float desiredMaxSaturation;
+ private final float saturationLimit;
+
+ public CustomBloomFilterFactory() {
+ this(Defaults.MAX_SATURATION, Defaults.SATURATION_LIMIT);
+ }
+
+ CustomBloomFilterFactory(float desiredMaxSaturation, float saturationLimit) {
+ this.desiredMaxSaturation = desiredMaxSaturation;
+ this.saturationLimit = saturationLimit;
+ }
+
+ @Override
+ public FuzzySet getSetForField(SegmentWriteState state, FieldInfo info) {
+ //Assume all of the docs have a unique term (e.g. a primary key) and we hope to maintain a set with desiredMaxSaturation% of bits set
+ return FuzzySet.createSetBasedOnQuality(state.segmentInfo.getDocCount(), desiredMaxSaturation);
+ }
+
+ @Override
+ public boolean isSaturated(FuzzySet bloomFilter, FieldInfo fieldInfo) {
+ // Don't bother saving bitsets if > saturationLimit % of bits are set - we don't want to
+ // throw any more memory at this problem.
+ return bloomFilter.getSaturation() > saturationLimit;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java
new file mode 100644
index 0000000..842bb6a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java
@@ -0,0 +1,459 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.util.BloomFilter;
+import org.elasticsearch.index.store.DirectoryUtils;
+import org.elasticsearch.index.store.Store;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.Map.Entry;
+
+/**
+ * <p>
+ * A {@link PostingsFormat} useful for low doc-frequency fields such as primary
+ * keys. Bloom filters are maintained in a ".blm" file which offers "fast-fail"
+ * for reads in segments known to have no record of the key. A choice of
+ * delegate PostingsFormat is used to record all other Postings data.
+ * </p>
+ * <p>
+ * This is a special bloom filter version, based on {@link org.elasticsearch.common.util.BloomFilter} and inspired
+ * by Lucene {@link org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat}.
+ * </p>
+ */
+public final class BloomFilterPostingsFormat extends PostingsFormat {
+
+ public static final String BLOOM_CODEC_NAME = "XBloomFilter"; // the Lucene one is named BloomFilter
+ public static final int BLOOM_CODEC_VERSION = 1;
+
+ /**
+ * Extension of Bloom Filters file
+ */
+ static final String BLOOM_EXTENSION = "blm";
+
+ private BloomFilter.Factory bloomFilterFactory = BloomFilter.Factory.DEFAULT;
+ private PostingsFormat delegatePostingsFormat;
+
+ /**
+ * Creates Bloom filters for a selection of fields created in the index. This
+ * is recorded as a set of Bitsets held as a segment summary in an additional
+ * "blm" file. This PostingsFormat delegates to a choice of delegate
+ * PostingsFormat for encoding all other postings data.
+ *
+ * @param delegatePostingsFormat The PostingsFormat that records all the non-bloom filter data i.e.
+ * postings info.
+ * @param bloomFilterFactory The {@link BloomFilter.Factory} responsible for sizing BloomFilters
+ * appropriately
+ */
+ public BloomFilterPostingsFormat(PostingsFormat delegatePostingsFormat,
+ BloomFilter.Factory bloomFilterFactory) {
+ super(BLOOM_CODEC_NAME);
+ this.delegatePostingsFormat = delegatePostingsFormat;
+ this.bloomFilterFactory = bloomFilterFactory;
+ }
+
+ // Used only by core Lucene at read-time via Service Provider instantiation -
+ // do not use at Write-time in application code.
+ public BloomFilterPostingsFormat() {
+ super(BLOOM_CODEC_NAME);
+ }
+
+ @Override
+ public BloomFilteredFieldsConsumer fieldsConsumer(SegmentWriteState state)
+ throws IOException {
+ if (delegatePostingsFormat == null) {
+ throw new UnsupportedOperationException("Error - " + getClass().getName()
+ + " has been constructed without a choice of PostingsFormat");
+ }
+ return new BloomFilteredFieldsConsumer(
+ delegatePostingsFormat.fieldsConsumer(state), state,
+ delegatePostingsFormat);
+ }
+
+ @Override
+ public BloomFilteredFieldsProducer fieldsProducer(SegmentReadState state)
+ throws IOException {
+ return new BloomFilteredFieldsProducer(state);
+ }
+
+ public final class BloomFilteredFieldsProducer extends FieldsProducer {
+ private FieldsProducer delegateFieldsProducer;
+ HashMap<String, BloomFilter> bloomsByFieldName = new HashMap<String, BloomFilter>();
+
+ // for internal use only
+ FieldsProducer getDelegate() {
+ return delegateFieldsProducer;
+ }
+
+ public BloomFilteredFieldsProducer(SegmentReadState state)
+ throws IOException {
+
+ String bloomFileName = IndexFileNames.segmentFileName(
+ state.segmentInfo.name, state.segmentSuffix, BLOOM_EXTENSION);
+ IndexInput bloomIn = null;
+ boolean success = false;
+ try {
+ bloomIn = state.directory.openInput(bloomFileName, state.context);
+ CodecUtil.checkHeader(bloomIn, BLOOM_CODEC_NAME, BLOOM_CODEC_VERSION,
+ BLOOM_CODEC_VERSION);
+ // // Load the hash function used in the BloomFilter
+ // hashFunction = HashFunction.forName(bloomIn.readString());
+ // Load the delegate postings format
+ PostingsFormat delegatePostingsFormat = PostingsFormat.forName(bloomIn
+ .readString());
+
+ this.delegateFieldsProducer = delegatePostingsFormat
+ .fieldsProducer(state);
+ int numBlooms = bloomIn.readInt();
+
+ boolean load = true;
+ Store.StoreDirectory storeDir = DirectoryUtils.getStoreDirectory(state.directory);
+ if (storeDir != null && storeDir.codecService() != null) {
+ load = storeDir.codecService().isLoadBloomFilter();
+ }
+
+ if (load && state.context.context != IOContext.Context.MERGE) {
+ // if we merge we don't need to load the bloom filters
+ for (int i = 0; i < numBlooms; i++) {
+ int fieldNum = bloomIn.readInt();
+ BloomFilter bloom = BloomFilter.deserialize(bloomIn);
+ FieldInfo fieldInfo = state.fieldInfos.fieldInfo(fieldNum);
+ bloomsByFieldName.put(fieldInfo.name, bloom);
+ }
+ }
+ IOUtils.close(bloomIn);
+ success = true;
+ } finally {
+ if (!success) {
+ IOUtils.closeWhileHandlingException(bloomIn, delegateFieldsProducer);
+ }
+ }
+ }
+
+ @Override
+ public Iterator<String> iterator() {
+ return delegateFieldsProducer.iterator();
+ }
+
+ @Override
+ public void close() throws IOException {
+ delegateFieldsProducer.close();
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ BloomFilter filter = bloomsByFieldName.get(field);
+ if (filter == null) {
+ return delegateFieldsProducer.terms(field);
+ } else {
+ Terms result = delegateFieldsProducer.terms(field);
+ if (result == null) {
+ return null;
+ }
+ return new BloomFilteredTerms(result, filter);
+ }
+ }
+
+ @Override
+ public int size() {
+ return delegateFieldsProducer.size();
+ }
+
+ public long getUniqueTermCount() throws IOException {
+ return delegateFieldsProducer.getUniqueTermCount();
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ long size = delegateFieldsProducer.ramBytesUsed();
+ for (BloomFilter bloomFilter : bloomsByFieldName.values()) {
+ size += bloomFilter.getSizeInBytes();
+ }
+ return size;
+ }
+ }
+
+ public static final class BloomFilteredTerms extends FilterAtomicReader.FilterTerms {
+ private BloomFilter filter;
+
+ public BloomFilteredTerms(Terms terms, BloomFilter filter) {
+ super(terms);
+ this.filter = filter;
+ }
+
+ public BloomFilter getFilter() {
+ return filter;
+ }
+
+ @Override
+ public TermsEnum iterator(TermsEnum reuse) throws IOException {
+ TermsEnum result;
+ if ((reuse != null) && (reuse instanceof BloomFilteredTermsEnum)) {
+ // recycle the existing BloomFilteredTermsEnum by asking the delegate
+ // to recycle its contained TermsEnum
+ BloomFilteredTermsEnum bfte = (BloomFilteredTermsEnum) reuse;
+ if (bfte.filter == filter) {
+ bfte.reset(this.in);
+ return bfte;
+ }
+ reuse = bfte.reuse;
+ }
+ // We have been handed something we cannot reuse (either null, wrong
+ // class or wrong filter) so allocate a new object
+ result = new BloomFilteredTermsEnum(this.in, reuse, filter);
+ return result;
+ }
+ }
+
+ static final class BloomFilteredTermsEnum extends TermsEnum {
+
+ private Terms delegateTerms;
+ private TermsEnum delegateTermsEnum;
+ private TermsEnum reuse;
+ private BloomFilter filter;
+
+ public BloomFilteredTermsEnum(Terms other, TermsEnum reuse, BloomFilter filter) {
+ this.delegateTerms = other;
+ this.reuse = reuse;
+ this.filter = filter;
+ }
+
+ void reset(Terms others) {
+ reuse = this.delegateTermsEnum;
+ this.delegateTermsEnum = null;
+ this.delegateTerms = others;
+ }
+
+ private TermsEnum getDelegate() throws IOException {
+ if (delegateTermsEnum == null) {
+ /* pull the iterator only if we really need it -
+ * this can be a relatively heavy operation depending on the
+ * delegate postings format and they underlying directory
+ * (clone IndexInput) */
+ delegateTermsEnum = delegateTerms.iterator(reuse);
+ }
+ return delegateTermsEnum;
+ }
+
+ @Override
+ public final BytesRef next() throws IOException {
+ return getDelegate().next();
+ }
+
+ @Override
+ public final Comparator<BytesRef> getComparator() {
+ return delegateTerms.getComparator();
+ }
+
+ @Override
+ public final boolean seekExact(BytesRef text)
+ throws IOException {
+ // The magical fail-fast speed up that is the entire point of all of
+ // this code - save a disk seek if there is a match on an in-memory
+ // structure
+ // that may occasionally give a false positive but guaranteed no false
+ // negatives
+ if (!filter.mightContain(text)) {
+ return false;
+ }
+ return getDelegate().seekExact(text);
+ }
+
+ @Override
+ public final SeekStatus seekCeil(BytesRef text)
+ throws IOException {
+ return getDelegate().seekCeil(text);
+ }
+
+ @Override
+ public final void seekExact(long ord) throws IOException {
+ getDelegate().seekExact(ord);
+ }
+
+ @Override
+ public final BytesRef term() throws IOException {
+ return getDelegate().term();
+ }
+
+ @Override
+ public final long ord() throws IOException {
+ return getDelegate().ord();
+ }
+
+ @Override
+ public final int docFreq() throws IOException {
+ return getDelegate().docFreq();
+ }
+
+ @Override
+ public final long totalTermFreq() throws IOException {
+ return getDelegate().totalTermFreq();
+ }
+
+
+ @Override
+ public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
+ DocsAndPositionsEnum reuse, int flags) throws IOException {
+ return getDelegate().docsAndPositions(liveDocs, reuse, flags);
+ }
+
+ @Override
+ public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags)
+ throws IOException {
+ return getDelegate().docs(liveDocs, reuse, flags);
+ }
+
+
+ }
+
+
+ final class BloomFilteredFieldsConsumer extends FieldsConsumer {
+ private FieldsConsumer delegateFieldsConsumer;
+ private Map<FieldInfo, BloomFilter> bloomFilters = new HashMap<FieldInfo, BloomFilter>();
+ private SegmentWriteState state;
+
+ // private PostingsFormat delegatePostingsFormat;
+
+ public BloomFilteredFieldsConsumer(FieldsConsumer fieldsConsumer,
+ SegmentWriteState state, PostingsFormat delegatePostingsFormat) {
+ this.delegateFieldsConsumer = fieldsConsumer;
+ // this.delegatePostingsFormat=delegatePostingsFormat;
+ this.state = state;
+ }
+
+ // for internal use only
+ FieldsConsumer getDelegate() {
+ return delegateFieldsConsumer;
+ }
+
+ @Override
+ public TermsConsumer addField(FieldInfo field) throws IOException {
+ BloomFilter bloomFilter = bloomFilterFactory.createFilter(state.segmentInfo.getDocCount());
+ if (bloomFilter != null) {
+ assert bloomFilters.containsKey(field) == false;
+ bloomFilters.put(field, bloomFilter);
+ return new WrappedTermsConsumer(delegateFieldsConsumer.addField(field), bloomFilter);
+ } else {
+ // No, use the unfiltered fieldsConsumer - we are not interested in
+ // recording any term Bitsets.
+ return delegateFieldsConsumer.addField(field);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ delegateFieldsConsumer.close();
+ // Now we are done accumulating values for these fields
+ List<Entry<FieldInfo, BloomFilter>> nonSaturatedBlooms = new ArrayList<Map.Entry<FieldInfo, BloomFilter>>();
+
+ for (Entry<FieldInfo, BloomFilter> entry : bloomFilters.entrySet()) {
+ nonSaturatedBlooms.add(entry);
+ }
+ String bloomFileName = IndexFileNames.segmentFileName(
+ state.segmentInfo.name, state.segmentSuffix, BLOOM_EXTENSION);
+ IndexOutput bloomOutput = null;
+ try {
+ bloomOutput = state.directory
+ .createOutput(bloomFileName, state.context);
+ CodecUtil.writeHeader(bloomOutput, BLOOM_CODEC_NAME,
+ BLOOM_CODEC_VERSION);
+ // remember the name of the postings format we will delegate to
+ bloomOutput.writeString(delegatePostingsFormat.getName());
+
+ // First field in the output file is the number of fields+blooms saved
+ bloomOutput.writeInt(nonSaturatedBlooms.size());
+ for (Entry<FieldInfo, BloomFilter> entry : nonSaturatedBlooms) {
+ FieldInfo fieldInfo = entry.getKey();
+ BloomFilter bloomFilter = entry.getValue();
+ bloomOutput.writeInt(fieldInfo.number);
+ saveAppropriatelySizedBloomFilter(bloomOutput, bloomFilter, fieldInfo);
+ }
+ } finally {
+ IOUtils.close(bloomOutput);
+ }
+ //We are done with large bitsets so no need to keep them hanging around
+ bloomFilters.clear();
+ }
+
+ private void saveAppropriatelySizedBloomFilter(IndexOutput bloomOutput,
+ BloomFilter bloomFilter, FieldInfo fieldInfo) throws IOException {
+
+// FuzzySet rightSizedSet = bloomFilterFactory.downsize(fieldInfo,
+// bloomFilter);
+// if (rightSizedSet == null) {
+// rightSizedSet = bloomFilter;
+// }
+// rightSizedSet.serialize(bloomOutput);
+ BloomFilter.serilaize(bloomFilter, bloomOutput);
+ }
+
+ }
+
+ class WrappedTermsConsumer extends TermsConsumer {
+ private TermsConsumer delegateTermsConsumer;
+ private BloomFilter bloomFilter;
+
+ public WrappedTermsConsumer(TermsConsumer termsConsumer, BloomFilter bloomFilter) {
+ this.delegateTermsConsumer = termsConsumer;
+ this.bloomFilter = bloomFilter;
+ }
+
+ @Override
+ public PostingsConsumer startTerm(BytesRef text) throws IOException {
+ return delegateTermsConsumer.startTerm(text);
+ }
+
+ @Override
+ public void finishTerm(BytesRef text, TermStats stats) throws IOException {
+
+ // Record this term in our BloomFilter
+ if (stats.docFreq > 0) {
+ bloomFilter.put(text);
+ }
+ delegateTermsConsumer.finishTerm(text, stats);
+ }
+
+ @Override
+ public void finish(long sumTotalTermFreq, long sumDocFreq, int docCount)
+ throws IOException {
+ delegateTermsConsumer.finish(sumTotalTermFreq, sumDocFreq, docCount);
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() throws IOException {
+ return delegateTermsConsumer.getComparator();
+ }
+
+ }
+
+ public PostingsFormat getDelegate() {
+ return this.delegatePostingsFormat;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormatProvider.java
new file mode 100644
index 0000000..1532b8f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormatProvider.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.PostingsFormat;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BloomFilter;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Map;
+
+/**
+ */
+public class BloomFilterPostingsFormatProvider extends AbstractPostingsFormatProvider {
+
+ private final PostingsFormatProvider delegate;
+ private final BloomFilterPostingsFormat postingsFormat;
+
+ @Inject
+ public BloomFilterPostingsFormatProvider(@IndexSettings Settings indexSettings, @Nullable Map<String, Factory> postingFormatFactories, @Assisted String name, @Assisted Settings postingsFormatSettings) {
+ super(name);
+ this.delegate = Helper.lookup(indexSettings, postingsFormatSettings.get("delegate"), postingFormatFactories);
+ this.postingsFormat = new BloomFilterPostingsFormat(
+ delegate.get(),
+ BloomFilter.Factory.buildFromString(postingsFormatSettings.get("fpp"))
+ );
+ }
+
+ public PostingsFormatProvider delegate() {
+ return delegate;
+ }
+
+ @Override
+ public PostingsFormat get() {
+ return postingsFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/DefaultPostingsFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/DefaultPostingsFormatProvider.java
new file mode 100644
index 0000000..3d9b7d5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/DefaultPostingsFormatProvider.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.BlockTreeTermsWriter;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * The default postingsformat, maps to {@link Lucene41PostingsFormat}.
+ * <ul>
+ * <li><tt>min_block_size</tt>: the minimum block size the default Lucene term
+ * dictionary uses to encode on-disk blocks.</li>
+ *
+ * <li><tt>max_block_size</tt>: the maximum block size the default Lucene term
+ * dictionary uses to encode on-disk blocks.</li>
+ * </ul>
+ */
+// LUCENE UPGRADE: Check if type of field postingsFormat needs to be updated!
+public class DefaultPostingsFormatProvider extends AbstractPostingsFormatProvider {
+
+ private final int minBlockSize;
+ private final int maxBlockSize;
+ private final Lucene41PostingsFormat postingsFormat;
+
+ @Inject
+ public DefaultPostingsFormatProvider(@Assisted String name, @Assisted Settings postingsFormatSettings) {
+ super(name);
+ this.minBlockSize = postingsFormatSettings.getAsInt("min_block_size", BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE);
+ this.maxBlockSize = postingsFormatSettings.getAsInt("max_block_size", BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
+ this.postingsFormat = new Lucene41PostingsFormat(minBlockSize, maxBlockSize);
+ }
+
+ public int minBlockSize() {
+ return minBlockSize;
+ }
+
+ public int maxBlockSize() {
+ return maxBlockSize;
+ }
+
+ @Override
+ public PostingsFormat get() {
+ return postingsFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/DirectPostingsFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/DirectPostingsFormatProvider.java
new file mode 100644
index 0000000..430fb29
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/DirectPostingsFormatProvider.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.memory.DirectPostingsFormat;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * A {@link PostingsFormatProvider} for {@link DirectPostingsFormat}. This
+ * postings format uses an on-disk storage for its terms and posting lists and
+ * streams its data during segment merges but loads its entire postings, terms
+ * and positions into memory for faster search performance. This format has a
+ * significant memory footprint and should be used with care. <b> This postings
+ * format offers the following parameters:
+ * <ul>
+ * <li><tt>min_skip_count</tt>: the minimum number terms with a shared prefix to
+ * allow a skip pointer to be written. the default is <tt>8</tt></li>
+ *
+ * <li><tt>low_freq_cutoff</tt>: terms with a lower document frequency use a
+ * single array object representation for postings and positions.</li>
+ * </ul>
+ *
+ * @see DirectPostingsFormat
+ *
+ */
+public class DirectPostingsFormatProvider extends AbstractPostingsFormatProvider {
+
+ private final int minSkipCount;
+ private final int lowFreqCutoff;
+ private final DirectPostingsFormat postingsFormat;
+
+ @Inject
+ public DirectPostingsFormatProvider(@Assisted String name, @Assisted Settings postingsFormatSettings) {
+ super(name);
+ this.minSkipCount = postingsFormatSettings.getAsInt("min_skip_count", 8); // See DirectPostingsFormat#DEFAULT_MIN_SKIP_COUNT
+ this.lowFreqCutoff = postingsFormatSettings.getAsInt("low_freq_cutoff", 32); // See DirectPostingsFormat#DEFAULT_LOW_FREQ_CUTOFF
+ this.postingsFormat = new DirectPostingsFormat(minSkipCount, lowFreqCutoff);
+ }
+
+ public int minSkipCount() {
+ return minSkipCount;
+ }
+
+ public int lowFreqCutoff() {
+ return lowFreqCutoff;
+ }
+
+ @Override
+ public PostingsFormat get() {
+ return postingsFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/Elasticsearch090PostingsFormat.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/Elasticsearch090PostingsFormat.java
new file mode 100644
index 0000000..9ad8a9d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/Elasticsearch090PostingsFormat.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.TermsConsumer;
+import org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.index.SegmentWriteState;
+import org.elasticsearch.common.util.BloomFilter;
+import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat.BloomFilteredFieldsConsumer;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+
+/**
+ * This is the default postings format for Elasticsearch that special cases
+ * the <tt>_uid</tt> field to use a bloom filter while all other fields
+ * will use a {@link Lucene41PostingsFormat}. This format will reuse the underlying
+ * {@link Lucene41PostingsFormat} and its files also for the <tt>_uid</tt> saving up to
+ * 5 files per segment in the default case.
+ */
+public final class Elasticsearch090PostingsFormat extends PostingsFormat {
+ private final BloomFilterPostingsFormat bloomPostings;
+
+ public Elasticsearch090PostingsFormat() {
+ super("es090");
+ bloomPostings = new BloomFilterPostingsFormat(new Lucene41PostingsFormat(), BloomFilter.Factory.DEFAULT);
+ }
+
+ public PostingsFormat getDefaultWrapped() {
+ return bloomPostings.getDelegate();
+ }
+
+ @Override
+ public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
+ final BloomFilteredFieldsConsumer fieldsConsumer = bloomPostings.fieldsConsumer(state);
+ return new FieldsConsumer() {
+
+ @Override
+ public void close() throws IOException {
+ fieldsConsumer.close();
+ }
+
+ @Override
+ public TermsConsumer addField(FieldInfo field) throws IOException {
+ if (UidFieldMapper.NAME.equals(field.name)) {
+ // only go through bloom for the UID field
+ return fieldsConsumer.addField(field);
+ }
+ return fieldsConsumer.getDelegate().addField(field);
+ }
+ };
+ }
+
+ @Override
+ public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
+ // we can just return the delegate here since we didn't record bloom filters for
+ // the other fields.
+ return bloomPostings.fieldsProducer(state);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/MemoryPostingsFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/MemoryPostingsFormatProvider.java
new file mode 100644
index 0000000..3e0fe00
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/MemoryPostingsFormatProvider.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
+import org.apache.lucene.util.packed.PackedInts;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * A {@link PostingsFormatProvider} for Lucenes {@link MemoryPostingsFormat}.
+ * This postings format offers the following parameters:
+ * <ul>
+ * <li><tt>pack_fst</tt>: <code>true</code> iff the in memory structure should
+ * be packed once its build. Packed will reduce the size for the data-structure
+ * in memory but requires more memory during building. Default is <code>false</code></li>
+ *
+ * <li><tt>acceptable_overhead_ratio</tt>: the compression overhead used to
+ * compress internal structures. See {@link PackedInts} for details. Default is {@value PackedInts#DEFAULT}</li>
+ * </ul>
+ */
+public class MemoryPostingsFormatProvider extends AbstractPostingsFormatProvider {
+
+ private final boolean packFst;
+ private final float acceptableOverheadRatio;
+ private final MemoryPostingsFormat postingsFormat;
+
+ @Inject
+ public MemoryPostingsFormatProvider(@Assisted String name, @Assisted Settings postingsFormatSettings) {
+ super(name);
+ this.packFst = postingsFormatSettings.getAsBoolean("pack_fst", false);
+ this.acceptableOverheadRatio = postingsFormatSettings.getAsFloat("acceptable_overhead_ratio", PackedInts.DEFAULT);
+ // TODO this should really be an ENUM?
+ this.postingsFormat = new MemoryPostingsFormat(packFst, acceptableOverheadRatio);
+ }
+
+ public boolean packFst() {
+ return packFst;
+ }
+
+ public float acceptableOverheadRatio() {
+ return acceptableOverheadRatio;
+ }
+
+ @Override
+ public PostingsFormat get() {
+ return postingsFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/PostingFormats.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/PostingFormats.java
new file mode 100644
index 0000000..86d5664
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/PostingFormats.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import com.google.common.collect.ImmutableCollection;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat;
+import org.apache.lucene.codecs.memory.DirectPostingsFormat;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.util.BloomFilter;
+
+/**
+ * This class represents the set of Elasticsearch "build-in"
+ * {@link PostingsFormatProvider.Factory postings format factories}
+ * <ul>
+ * <li><b>direct</b>: a postings format that uses disk-based storage but loads
+ * its terms and postings directly into memory. Note this postings format is
+ * very memory intensive and has certain limitation that don't allow segments to
+ * grow beyond 2.1GB see {@link DirectPostingsFormat} for details.</li>
+ * <p/>
+ * <li><b>memory</b>: a postings format that stores its entire terms, postings,
+ * positions and payloads in a finite state transducer. This format should only
+ * be used for primary keys or with fields where each term is contained in a
+ * very low number of documents.</li>
+ * <p/>
+ * <li><b>pulsing</b>: a postings format in-lines the posting lists for very low
+ * frequent terms in the term dictionary. This is useful to improve lookup
+ * performance for low-frequent terms.</li>
+ * <p/>
+ * <li><b>bloom_default</b>: a postings format that uses a bloom filter to
+ * improve term lookup performance. This is useful for primarily keys or fields
+ * that are used as a delete key</li>
+ * <p/>
+ * <li><b>bloom_pulsing</b>: a postings format that combines the advantages of
+ * <b>bloom</b> and <b>pulsing</b> to further improve lookup performance</li>
+ * <p/>
+ * <li><b>default</b>: the default Elasticsearch postings format offering best
+ * general purpose performance. This format is used if no postings format is
+ * specified in the field mapping.</li>
+ * </ul>
+ */
+public class PostingFormats {
+
+ private static final ImmutableMap<String, PreBuiltPostingsFormatProvider.Factory> builtInPostingFormats;
+
+ static {
+ MapBuilder<String, PreBuiltPostingsFormatProvider.Factory> buildInPostingFormatsX = MapBuilder.newMapBuilder();
+ // add defaults ones
+ for (String luceneName : PostingsFormat.availablePostingsFormats()) {
+ buildInPostingFormatsX.put(luceneName, new PreBuiltPostingsFormatProvider.Factory(PostingsFormat.forName(luceneName)));
+ }
+ final Elasticsearch090PostingsFormat defaultFormat = new Elasticsearch090PostingsFormat();
+ buildInPostingFormatsX.put("direct", new PreBuiltPostingsFormatProvider.Factory("direct", PostingsFormat.forName("Direct")));
+ buildInPostingFormatsX.put("memory", new PreBuiltPostingsFormatProvider.Factory("memory", PostingsFormat.forName("Memory")));
+ // LUCENE UPGRADE: Need to change this to the relevant ones on a lucene upgrade
+ buildInPostingFormatsX.put("pulsing", new PreBuiltPostingsFormatProvider.Factory("pulsing", PostingsFormat.forName("Pulsing41")));
+ buildInPostingFormatsX.put(PostingsFormatService.DEFAULT_FORMAT, new PreBuiltPostingsFormatProvider.Factory(PostingsFormatService.DEFAULT_FORMAT, defaultFormat));
+
+ buildInPostingFormatsX.put("bloom_pulsing", new PreBuiltPostingsFormatProvider.Factory("bloom_pulsing", wrapInBloom(PostingsFormat.forName("Pulsing41"))));
+ buildInPostingFormatsX.put("bloom_default", new PreBuiltPostingsFormatProvider.Factory("bloom_default", wrapInBloom(PostingsFormat.forName("Lucene41"))));
+
+ builtInPostingFormats = buildInPostingFormatsX.immutableMap();
+ }
+
+ public static final boolean luceneBloomFilter = false;
+
+ static PostingsFormat wrapInBloom(PostingsFormat delegate) {
+ if (luceneBloomFilter) {
+ return new BloomFilteringPostingsFormat(delegate, new BloomFilterLucenePostingsFormatProvider.CustomBloomFilterFactory());
+ }
+ return new BloomFilterPostingsFormat(delegate, BloomFilter.Factory.DEFAULT);
+ }
+
+ public static PostingsFormatProvider.Factory getAsFactory(String name) {
+ return builtInPostingFormats.get(name);
+ }
+
+ public static PostingsFormatProvider getAsProvider(String name) {
+ final PreBuiltPostingsFormatProvider.Factory factory = builtInPostingFormats.get(name);
+ return factory == null ? null : factory.get();
+ }
+
+ public static ImmutableCollection<PreBuiltPostingsFormatProvider.Factory> listFactories() {
+ return builtInPostingFormats.values();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/PostingsFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/PostingsFormatProvider.java
new file mode 100644
index 0000000..4a0d32c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/PostingsFormatProvider.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.PostingsFormat;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Map;
+
+/**
+ * A {@link PostingsFormatProvider} acts as a named container for specific
+ * {@link PostingsFormat} implementations. Custom {@link PostingsFormat}
+ * implementations can be exposed via
+ * {@link CodecModule#addPostingFormat(String, Class)}
+ * <p>
+ * Each {@link PostingsFormatProvider} must provide a unique name for its
+ * postings format in order to map the postings format to a specific field via
+ * the mapping API. The name provided via {@link #name()} is used to lookup the
+ * postings format in {@link PostingsFormatService#get(String)} and should be
+ * identical to the values used in the field mappings.
+ * </p>
+ * <p>
+ * {@link PostingsFormatProvider} instances are initialized with a
+ * {@link Settings} subset below the
+ * {@value PostingsFormatProvider#POSTINGS_FORMAT_SETTINGS_PREFIX} prefix and
+ * will only see the sub-tree below their mapping name. For instance a postings
+ * format <tt>ElasticFantastic</tt> will see settings below
+ * <tt>index.codec.postings_format.elastic_fantastic</tt> given that the
+ * postings format is exposed via
+ * <tt>index.codec.postings_format.elastic_fantastic.type : "ElasticFantastic"</tt>.
+ * </p>
+ *
+ * @see CodecModule
+ */
+public interface PostingsFormatProvider {
+ public static final String POSTINGS_FORMAT_SETTINGS_PREFIX = "index.codec.postings_format";
+
+ /**
+ * A helper class to lookup {@link PostingsFormatProvider providers} by their unique {@link PostingsFormatProvider#name() name}
+ */
+ public static class Helper {
+
+ /**
+ * Looks up and creates {@link PostingsFormatProvider} for the given name.
+ * <p>
+ * The settings for the created {@link PostingsFormatProvider} is taken from the given index settings.
+ * All settings with the {@value PostingsFormatProvider#POSTINGS_FORMAT_SETTINGS_PREFIX} prefix
+ * and the formats name as the key are passed to the factory.
+ * </p>
+ *
+ * @param indexSettings the index settings to configure the postings format
+ * @param name the name of the postings format to lookup
+ * @param postingFormatFactories the factory mapping to lookup the {@link Factory} to create the {@link PostingsFormatProvider}
+ * @return a fully configured {@link PostingsFormatProvider} for the given name.
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException
+ * if the no {@link PostingsFormatProvider} for the given name parameter could be found.
+ */
+ public static PostingsFormatProvider lookup(@IndexSettings Settings indexSettings, String name, Map<String, Factory> postingFormatFactories) throws ElasticsearchIllegalArgumentException {
+ Factory factory = postingFormatFactories.get(name);
+ if (factory == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find postings_format [" + name + "]");
+ }
+ Settings settings = indexSettings.getGroups(POSTINGS_FORMAT_SETTINGS_PREFIX).get(name);
+ if (settings == null) {
+ settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+ return factory.create(name, settings);
+ }
+ }
+
+ /**
+ * Returns this providers {@link PostingsFormat} instance.
+ */
+ PostingsFormat get();
+
+ /**
+ * Returns the name of this providers {@link PostingsFormat}
+ */
+ String name();
+
+ /**
+ * A simple factory used to create {@link PostingsFormatProvider} used by
+ * delegating providers like {@link BloomFilterLucenePostingsFormatProvider} or
+ * {@link PulsingPostingsFormatProvider}. Those providers wrap other
+ * postings formats to enrich their capabilities.
+ */
+ public interface Factory {
+ PostingsFormatProvider create(String name, Settings settings);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/PostingsFormatService.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/PostingsFormatService.java
new file mode 100644
index 0000000..fda59c4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/PostingsFormatService.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Map;
+
+/**
+ * The {@link PostingsFormatService} provides access to
+ * all configured {@link PostingsFormatProvider} instances by
+ * {@link PostingsFormatProvider#name() name}.
+ *
+ * @see CodecService
+ */
+public class PostingsFormatService extends AbstractIndexComponent {
+
+ private final ImmutableMap<String, PostingsFormatProvider> providers;
+
+ public final static String DEFAULT_FORMAT = "default";
+
+ public PostingsFormatService(Index index) {
+ this(index, ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public PostingsFormatService(Index index, @IndexSettings Settings indexSettings) {
+ this(index, indexSettings, ImmutableMap.<String, PostingsFormatProvider.Factory>of());
+ }
+
+ @Inject
+ public PostingsFormatService(Index index, @IndexSettings Settings indexSettings, Map<String, PostingsFormatProvider.Factory> postingFormatFactories) {
+ super(index, indexSettings);
+
+ MapBuilder<String, PostingsFormatProvider> providers = MapBuilder.newMapBuilder();
+
+ Map<String, Settings> postingsFormatSettings = indexSettings.getGroups(PostingsFormatProvider.POSTINGS_FORMAT_SETTINGS_PREFIX);
+ for (Map.Entry<String, PostingsFormatProvider.Factory> entry : postingFormatFactories.entrySet()) {
+ String name = entry.getKey();
+ PostingsFormatProvider.Factory factory = entry.getValue();
+
+ Settings settings = postingsFormatSettings.get(name);
+ if (settings == null) {
+ settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+ providers.put(name, factory.create(name, settings));
+ }
+
+ // This is only needed for tests when guice doesn't have the chance to populate the list of PF factories
+ for (PreBuiltPostingsFormatProvider.Factory factory : PostingFormats.listFactories()) {
+ if (providers.containsKey(factory.name())) {
+ continue;
+ }
+ providers.put(factory.name(), factory.get());
+ }
+
+ this.providers = providers.immutableMap();
+ }
+
+ public PostingsFormatProvider get(String name) throws ElasticsearchIllegalArgumentException {
+ PostingsFormatProvider provider = providers.get(name);
+ if (provider == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find postings_format [" + name + "]");
+ }
+ return provider;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/PreBuiltPostingsFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/PreBuiltPostingsFormatProvider.java
new file mode 100644
index 0000000..603d69a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/PreBuiltPostingsFormatProvider.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.PostingsFormat;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ */
+public class PreBuiltPostingsFormatProvider implements PostingsFormatProvider {
+
+ public static final class Factory implements PostingsFormatProvider.Factory {
+
+ private final PreBuiltPostingsFormatProvider provider;
+
+ public Factory(PostingsFormat postingsFormat) {
+ this(postingsFormat.getName(), postingsFormat);
+ }
+
+ public Factory(String name, PostingsFormat postingsFormat) {
+ this.provider = new PreBuiltPostingsFormatProvider(name, postingsFormat);
+ }
+
+ public PostingsFormatProvider get() {
+ return provider;
+ }
+
+ @Override
+ public PostingsFormatProvider create(String name, Settings settings) {
+ return provider;
+ }
+
+ public String name() {
+ return provider.name();
+ }
+ }
+
+ private final String name;
+ private final PostingsFormat postingsFormat;
+
+ public PreBuiltPostingsFormatProvider(PostingsFormat postingsFormat) {
+ this(postingsFormat.getName(), postingsFormat);
+ }
+
+ public PreBuiltPostingsFormatProvider(String name, PostingsFormat postingsFormat) {
+ if (postingsFormat == null) {
+ throw new IllegalArgumentException("PostingsFormat must not be null");
+ }
+ this.name = name;
+ this.postingsFormat = postingsFormat;
+ }
+
+ @Override
+ public String name() {
+ return name;
+ }
+
+ @Override
+ public PostingsFormat get() {
+ return postingsFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/PulsingPostingsFormatProvider.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/PulsingPostingsFormatProvider.java
new file mode 100644
index 0000000..6c6525f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/PulsingPostingsFormatProvider.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingsformat;
+
+import org.apache.lucene.codecs.BlockTreeTermsWriter;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.pulsing.Pulsing41PostingsFormat;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * A {@link PostingsFormatProvider} for Lucenes {@link Pulsing41PostingsFormat}.
+ * The pulsing implementation in-lines the posting lists for very low frequent
+ * terms in the term dictionary. This is useful to improve lookup performance
+ * for low-frequent terms. This postings format offers the following parameters:
+ * <ul>
+ * <li><tt>min_block_size</tt>: the minimum block size the default Lucene term
+ * dictionary uses to encode on-disk blocks.</li>
+ *
+ * <li><tt>max_block_size</tt>: the maximum block size the default Lucene term
+ * dictionary uses to encode on-disk blocks.</li>
+ *
+ * <li><tt>freq_cut_off</tt>: the document frequency cut off where pulsing
+ * in-lines posting lists into the term dictionary. Terms with a document
+ * frequency less or equal to the cutoff will be in-lined. The default is
+ * <tt>1</tt></li>
+ * </ul>
+ */
+// LUCENE UPGRADE: Check if type of field postingsFormat needs to be updated!
+public class PulsingPostingsFormatProvider extends AbstractPostingsFormatProvider {
+
+ private final int freqCutOff;
+ private final int minBlockSize;
+ private final int maxBlockSize;
+ private final Pulsing41PostingsFormat postingsFormat;
+
+ @Inject
+ public PulsingPostingsFormatProvider(@Assisted String name, @Assisted Settings postingsFormatSettings) {
+ super(name);
+ this.freqCutOff = postingsFormatSettings.getAsInt("freq_cut_off", 1);
+ this.minBlockSize = postingsFormatSettings.getAsInt("min_block_size", BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE);
+ this.maxBlockSize = postingsFormatSettings.getAsInt("max_block_size", BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
+ this.postingsFormat = new Pulsing41PostingsFormat(freqCutOff, minBlockSize, maxBlockSize);
+ }
+
+ public int freqCutOff() {
+ return freqCutOff;
+ }
+
+ public int minBlockSize() {
+ return minBlockSize;
+ }
+
+ public int maxBlockSize() {
+ return maxBlockSize;
+ }
+
+ @Override
+ public PostingsFormat get() {
+ return postingsFormat;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/deletionpolicy/AbstractESDeletionPolicy.java b/src/main/java/org/elasticsearch/index/deletionpolicy/AbstractESDeletionPolicy.java
new file mode 100644
index 0000000..fc4acff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/deletionpolicy/AbstractESDeletionPolicy.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.IndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+
+abstract class AbstractESDeletionPolicy extends IndexDeletionPolicy implements IndexShardComponent {
+
+ protected final ESLogger logger;
+
+ protected final ShardId shardId;
+
+ protected final Settings indexSettings;
+
+ protected final Settings componentSettings;
+
+ protected AbstractESDeletionPolicy(ShardId shardId, @IndexSettings Settings indexSettings) {
+ this.shardId = shardId;
+ this.indexSettings = indexSettings;
+ this.componentSettings = indexSettings.getComponentSettings(getClass());
+
+ this.logger = Loggers.getLogger(getClass(), indexSettings, shardId);
+ }
+
+ protected AbstractESDeletionPolicy(ShardId shardId, @IndexSettings Settings indexSettings, String prefixSettings) {
+ this.shardId = shardId;
+ this.indexSettings = indexSettings;
+ this.componentSettings = indexSettings.getComponentSettings(prefixSettings, getClass());
+
+ this.logger = Loggers.getLogger(getClass(), indexSettings, shardId);
+ }
+
+ @Override
+ public ShardId shardId() {
+ return this.shardId;
+ }
+
+ @Override
+ public Settings indexSettings() {
+ return this.indexSettings;
+ }
+
+ public String nodeName() {
+ return indexSettings.get("name", "");
+ }
+
+
+
+}
diff --git a/src/main/java/org/elasticsearch/index/deletionpolicy/DeletionPolicyModule.java b/src/main/java/org/elasticsearch/index/deletionpolicy/DeletionPolicyModule.java
new file mode 100644
index 0000000..35f370d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/deletionpolicy/DeletionPolicyModule.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.name.Names;
+import org.elasticsearch.common.settings.Settings;
+
+import static org.elasticsearch.index.deletionpolicy.DeletionPolicyModule.DeletionPolicySettings.TYPE;
+
+/**
+ *
+ */
+public class DeletionPolicyModule extends AbstractModule {
+
+ public static class DeletionPolicySettings {
+ public static final String TYPE = "index.deletionpolicy.type";
+ }
+
+ private final Settings settings;
+
+ public DeletionPolicyModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(IndexDeletionPolicy.class)
+ .annotatedWith(Names.named("actual"))
+ .to(settings.getAsClass(TYPE, KeepOnlyLastDeletionPolicy.class))
+ .asEagerSingleton();
+
+ bind(SnapshotDeletionPolicy.class)
+ .asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/deletionpolicy/KeepLastNDeletionPolicy.java b/src/main/java/org/elasticsearch/index/deletionpolicy/KeepLastNDeletionPolicy.java
new file mode 100644
index 0000000..996a885
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/deletionpolicy/KeepLastNDeletionPolicy.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class KeepLastNDeletionPolicy extends AbstractESDeletionPolicy {
+
+ private final int numToKeep;
+
+ @Inject
+ public KeepLastNDeletionPolicy(ShardId shardId, @IndexSettings Settings indexSettings) {
+ super(shardId, indexSettings);
+ this.numToKeep = componentSettings.getAsInt("num_to_keep", 5);
+ logger.debug("Using [keep_last_n] deletion policy with num_to_keep[{}]", numToKeep);
+ }
+
+ public void onInit(List<? extends IndexCommit> commits) throws IOException {
+ // do no deletions on init
+ doDeletes(commits);
+ }
+
+ public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+ doDeletes(commits);
+ }
+
+ private void doDeletes(List<? extends IndexCommit> commits) {
+ int size = commits.size();
+ for (int i = 0; i < size - numToKeep; i++) {
+ commits.get(i).delete();
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/deletionpolicy/KeepOnlyLastDeletionPolicy.java b/src/main/java/org/elasticsearch/index/deletionpolicy/KeepOnlyLastDeletionPolicy.java
new file mode 100644
index 0000000..4454695
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/deletionpolicy/KeepOnlyLastDeletionPolicy.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.apache.lucene.index.IndexCommit;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.List;
+
+/**
+ * This {@link org.apache.lucene.index.IndexDeletionPolicy} implementation that
+ * keeps only the most recent commit and immediately removes
+ * all prior commits after a new commit is done. This is
+ * the default deletion policy.
+ */
+public class KeepOnlyLastDeletionPolicy extends AbstractESDeletionPolicy {
+
+ @Inject
+ public KeepOnlyLastDeletionPolicy(ShardId shardId, @IndexSettings Settings indexSettings) {
+ super(shardId, indexSettings);
+ logger.debug("Using [keep_only_last] deletion policy");
+ }
+
+ /**
+ * Deletes all commits except the most recent one.
+ */
+ public void onInit(List<? extends IndexCommit> commits) {
+ // Note that commits.size() should normally be 1:
+ onCommit(commits);
+ }
+
+ /**
+ * Deletes all commits except the most recent one.
+ */
+ public void onCommit(List<? extends IndexCommit> commits) {
+ // Note that commits.size() should normally be 2 (if not
+ // called by onInit above):
+ int size = commits.size();
+ for (int i = 0; i < size - 1; i++) {
+ commits.get(i).delete();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicy.java b/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicy.java
new file mode 100644
index 0000000..5a5d95e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicy.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.name.Named;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.IndexShardComponent;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * Snapshot deletion policy allows to get snapshots of an index state (last commit or all commits)
+ * and if the deletion policy is used with all open index writers (JVM level) then the snapshot
+ * state will not be deleted until it will be released.
+ *
+ *
+ */
+public class SnapshotDeletionPolicy extends AbstractESDeletionPolicy {
+
+ private final IndexDeletionPolicy primary;
+
+ private final ConcurrentMap<Long, SnapshotHolder> snapshots = ConcurrentCollections.newConcurrentMap();
+
+ private volatile List<SnapshotIndexCommit> commits;
+
+ private final Object mutex = new Object();
+
+ private SnapshotIndexCommit lastCommit;
+
+ /**
+ * Constructs a new snapshot deletion policy that wraps the provided deletion policy.
+ */
+ @Inject
+ public SnapshotDeletionPolicy(@Named("actual") IndexDeletionPolicy primary) {
+ super(((IndexShardComponent) primary).shardId(), ((IndexShardComponent) primary).indexSettings());
+ this.primary = primary;
+ }
+
+ /**
+ * Called by Lucene. Same as {@link #onCommit(java.util.List)}.
+ */
+ public void onInit(List<? extends IndexCommit> commits) throws IOException {
+ if (!commits.isEmpty()) { // this might be empty if we create a new index.
+ // the behavior has changed in Lucene 4.4 that calls onInit even with an empty commits list.
+ onCommit(commits);
+ }
+ }
+
+ /**
+ * Called by Lucene.. Wraps the provided commits with {@link SnapshotIndexCommit}
+ * and delegates to the wrapped deletion policy.
+ */
+ public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+ assert !commits.isEmpty() : "Commits must not be empty";
+ synchronized (mutex) {
+ List<SnapshotIndexCommit> snapshotCommits = wrapCommits(commits);
+ primary.onCommit(snapshotCommits);
+
+ // clean snapshots that their respective counts are 0 (should not really happen)
+ for (Iterator<SnapshotHolder> it = snapshots.values().iterator(); it.hasNext(); ) {
+ SnapshotHolder holder = it.next();
+ if (holder.counter <= 0) {
+ it.remove();
+ }
+ }
+ // build the current commits list (all the ones that are not deleted by the primary)
+ List<SnapshotIndexCommit> newCommits = new ArrayList<SnapshotIndexCommit>();
+ for (SnapshotIndexCommit commit : snapshotCommits) {
+ if (!commit.isDeleted()) {
+ newCommits.add(commit);
+ }
+ }
+ this.commits = newCommits;
+ // the last commit that is not deleted
+ this.lastCommit = newCommits.get(newCommits.size() - 1);
+
+ }
+ }
+
+ /**
+ * Snapshots all the current commits in the index. Make sure to call
+ * {@link SnapshotIndexCommits#release()} to release it.
+ */
+ public SnapshotIndexCommits snapshots() throws IOException {
+ synchronized (mutex) {
+ if (snapshots == null) {
+ throw new IllegalStateException("Snapshot deletion policy has not been init yet...");
+ }
+ List<SnapshotIndexCommit> result = new ArrayList<SnapshotIndexCommit>(commits.size());
+ for (SnapshotIndexCommit commit : commits) {
+ result.add(snapshot(commit));
+ }
+ return new SnapshotIndexCommits(result);
+ }
+ }
+
+ /**
+ * Returns a snapshot of the index (for the last commit point). Make
+ * sure to call {@link SnapshotIndexCommit#release()} in order to release it.
+ */
+ public SnapshotIndexCommit snapshot() throws IOException {
+ synchronized (mutex) {
+ if (lastCommit == null) {
+ throw new IllegalStateException("Snapshot deletion policy has not been init yet...");
+ }
+ return snapshot(lastCommit);
+ }
+ }
+
+ @Override
+ public IndexDeletionPolicy clone() {
+ // Lucene IW makes a clone internally but since we hold on to this instance
+ // the clone will just be the identity. See InternalEngine recovery why we need this.
+ return this;
+ }
+
+ /**
+ * Helper method to snapshot a give commit.
+ */
+ private SnapshotIndexCommit snapshot(SnapshotIndexCommit commit) throws IOException {
+ SnapshotHolder snapshotHolder = snapshots.get(commit.getGeneration());
+ if (snapshotHolder == null) {
+ snapshotHolder = new SnapshotHolder(0);
+ snapshots.put(commit.getGeneration(), snapshotHolder);
+ }
+ snapshotHolder.counter++;
+ return new OneTimeReleaseSnapshotIndexCommit(this, commit);
+ }
+
+ /**
+ * Returns <tt>true</tt> if the version has been snapshotted.
+ */
+ boolean isHeld(long version) {
+ SnapshotDeletionPolicy.SnapshotHolder holder = snapshots.get(version);
+ return holder != null && holder.counter > 0;
+ }
+
+ /**
+ * Releases the version provided. Returns <tt>true</tt> if the release was successful.
+ */
+ boolean release(long version) {
+ synchronized (mutex) {
+ SnapshotDeletionPolicy.SnapshotHolder holder = snapshots.get(version);
+ if (holder == null) {
+ return false;
+ }
+ if (holder.counter <= 0) {
+ snapshots.remove(version);
+ return false;
+ }
+ if (--holder.counter == 0) {
+ snapshots.remove(version);
+ }
+ return true;
+ }
+ }
+
+ /**
+ * A class that wraps an {@link SnapshotIndexCommit} and makes sure that release will only
+ * be called once on it.
+ */
+ private static class OneTimeReleaseSnapshotIndexCommit extends SnapshotIndexCommit {
+ private volatile boolean released = false;
+
+ OneTimeReleaseSnapshotIndexCommit(SnapshotDeletionPolicy deletionPolicy, IndexCommit cp) throws IOException {
+ super(deletionPolicy, cp);
+ }
+
+ @Override
+ public boolean release() {
+ if (released) {
+ return false;
+ }
+ released = true;
+ return ((SnapshotIndexCommit) delegate).release();
+ }
+ }
+
+ private static class SnapshotHolder {
+ int counter;
+
+ private SnapshotHolder(int counter) {
+ this.counter = counter;
+ }
+ }
+
+ private List<SnapshotIndexCommit> wrapCommits(List<? extends IndexCommit> commits) throws IOException {
+ final int count = commits.size();
+ List<SnapshotIndexCommit> snapshotCommits = new ArrayList<SnapshotIndexCommit>(count);
+ for (int i = 0; i < count; i++)
+ snapshotCommits.add(new SnapshotIndexCommit(this, commits.get(i)));
+ return snapshotCommits;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommit.java b/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommit.java
new file mode 100644
index 0000000..4e1ee19
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommit.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.apache.lucene.index.IndexCommit;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lucene.IndexCommitDelegate;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A snapshot index commit point. While this is held and {@link #release()}
+ * was not called, no files will be deleted that relates to this commit point
+ * ({@link #getFileNames()}).
+ *
+ *
+ */
+public class SnapshotIndexCommit extends IndexCommitDelegate implements Releasable {
+
+ private final SnapshotDeletionPolicy deletionPolicy;
+
+ private final String[] files;
+
+ SnapshotIndexCommit(SnapshotDeletionPolicy deletionPolicy, IndexCommit cp) throws IOException {
+ super(cp);
+ this.deletionPolicy = deletionPolicy;
+ ArrayList<String> tmpFiles = new ArrayList<String>();
+ for (String o : cp.getFileNames()) {
+ tmpFiles.add(o);
+ }
+ files = tmpFiles.toArray(new String[tmpFiles.size()]);
+ }
+
+ public String[] getFiles() {
+ return files;
+ }
+
+ /**
+ * Releases the current snapshot, returning <code>true</code> if it was
+ * actually released.
+ */
+ public boolean release() {
+ return deletionPolicy.release(getGeneration());
+ }
+
+ /**
+ * Override the delete operation, and only actually delete it if it
+ * is not held by the {@link SnapshotDeletionPolicy}.
+ */
+ @Override
+ public void delete() {
+ if (!deletionPolicy.isHeld(getGeneration())) {
+ delegate.delete();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommits.java b/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommits.java
new file mode 100644
index 0000000..69b8115
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommits.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.elasticsearch.common.lease.Releasable;
+
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Represents a snapshot view of several commits. Provides a way to iterate over
+ * them as well as a simple method to release all of them.
+ *
+ *
+ */
+public class SnapshotIndexCommits implements Iterable<SnapshotIndexCommit>, Releasable {
+
+ private final List<SnapshotIndexCommit> commits;
+
+ public SnapshotIndexCommits(List<SnapshotIndexCommit> commits) {
+ this.commits = commits;
+ }
+
+ public int size() {
+ return commits.size();
+ }
+
+ @Override
+ public Iterator<SnapshotIndexCommit> iterator() {
+ return commits.iterator();
+ }
+
+ public boolean release() {
+ boolean result = false;
+ for (SnapshotIndexCommit snapshot : commits) {
+ result |= snapshot.release();
+ }
+ return result;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/CloseEngineException.java b/src/main/java/org/elasticsearch/index/engine/CloseEngineException.java
new file mode 100644
index 0000000..f3b21f8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/CloseEngineException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * An exception indicating that an {@link org.elasticsearch.index.engine.Engine} close failed.
+ *
+ *
+ */
+public class CloseEngineException extends EngineException {
+
+ public CloseEngineException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public CloseEngineException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java
new file mode 100644
index 0000000..ad7ca95
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class CreateFailedEngineException extends EngineException {
+
+ private final String type;
+
+ private final String id;
+
+ public CreateFailedEngineException(ShardId shardId, Engine.Create create, Throwable cause) {
+ super(shardId, "Create failed for [" + create.type() + "#" + create.id() + "]", cause);
+ this.type = create.type();
+ this.id = create.id();
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String id() {
+ return this.id;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java
new file mode 100644
index 0000000..9c12348
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class DeleteByQueryFailedEngineException extends EngineException {
+
+ public DeleteByQueryFailedEngineException(ShardId shardId, Engine.DeleteByQuery deleteByQuery, Throwable cause) {
+ super(shardId, "Delete by query failed for [" + deleteByQuery.query() + "]", cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java
new file mode 100644
index 0000000..e693519
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class DeleteFailedEngineException extends EngineException {
+
+ public DeleteFailedEngineException(ShardId shardId, Engine.Delete delete, Throwable cause) {
+ super(shardId, "Delete failed for [" + delete.uid().text() + "]", cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/DocumentAlreadyExistsException.java b/src/main/java/org/elasticsearch/index/engine/DocumentAlreadyExistsException.java
new file mode 100644
index 0000000..02bf49a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/DocumentAlreadyExistsException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class DocumentAlreadyExistsException extends EngineException {
+
+ public DocumentAlreadyExistsException(ShardId shardId, String type, String id) {
+ super(shardId, "[" + type + "][" + id + "]: document already exists");
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.CONFLICT;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/DocumentMissingException.java b/src/main/java/org/elasticsearch/index/engine/DocumentMissingException.java
new file mode 100644
index 0000000..37350bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/DocumentMissingException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class DocumentMissingException extends EngineException {
+
+ public DocumentMissingException(ShardId shardId, String type, String id) {
+ super(shardId, "[" + type + "][" + id + "]: document missing");
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/DocumentSourceMissingException.java b/src/main/java/org/elasticsearch/index/engine/DocumentSourceMissingException.java
new file mode 100644
index 0000000..c478b10
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/DocumentSourceMissingException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class DocumentSourceMissingException extends EngineException {
+
+ public DocumentSourceMissingException(ShardId shardId, String type, String id) {
+ super(shardId, "[" + type + "][" + id + "]: document source missing");
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java
new file mode 100644
index 0000000..a0baf7b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/Engine.java
@@ -0,0 +1,937 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.component.CloseableComponent;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.shard.IndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.translog.Translog;
+
+import java.util.List;
+
+/**
+ *
+ */
+public interface Engine extends IndexShardComponent, CloseableComponent {
+
+ static final String INDEX_CODEC = "index.codec";
+ static ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER = ByteSizeValue.parseBytesSizeValue("500kb");
+
+ /**
+ * The default suggested refresh interval, -1 to disable it.
+ */
+ TimeValue defaultRefreshInterval();
+
+ void enableGcDeletes(boolean enableGcDeletes);
+
+ void updateIndexingBufferSize(ByteSizeValue indexingBufferSize);
+
+ void addFailedEngineListener(FailedEngineListener listener);
+
+ /**
+ * Starts the Engine.
+ * <p/>
+ * <p>Note, after the creation and before the call to start, the store might
+ * be changed.
+ */
+ void start() throws EngineException;
+
+ void create(Create create) throws EngineException;
+
+ void index(Index index) throws EngineException;
+
+ void delete(Delete delete) throws EngineException;
+
+ void delete(DeleteByQuery delete) throws EngineException;
+
+ GetResult get(Get get) throws EngineException;
+
+ /**
+ * Returns a new searcher instance. The consumer of this
+ * API is responsible for releasing the returned seacher in a
+ * safe manner, preferably in a try/finally block.
+ *
+ * @see Searcher#release()
+ */
+ Searcher acquireSearcher(String source) throws EngineException;
+
+ /**
+ * Global stats on segments.
+ */
+ SegmentsStats segmentsStats();
+
+ /**
+ * The list of segments in the engine.
+ */
+ List<Segment> segments();
+
+ /**
+ * Returns <tt>true</tt> if a refresh is really needed.
+ */
+ boolean refreshNeeded();
+
+ /**
+ * Returns <tt>true</tt> if a possible merge is really needed.
+ */
+ boolean possibleMergeNeeded();
+
+ void maybeMerge() throws EngineException;
+
+ /**
+ * Refreshes the engine for new search operations to reflect the latest
+ * changes. Pass <tt>true</tt> if the refresh operation should include
+ * all the operations performed up to this call.
+ */
+ void refresh(Refresh refresh) throws EngineException;
+
+ /**
+ * Flushes the state of the engine, clearing memory.
+ */
+ void flush(Flush flush) throws EngineException, FlushNotAllowedEngineException;
+
+ void optimize(Optimize optimize) throws EngineException;
+
+ <T> T snapshot(SnapshotHandler<T> snapshotHandler) throws EngineException;
+
+ /**
+ * Snapshots the index and returns a handle to it. Will always try and "commit" the
+ * lucene index to make sure we have a "fresh" copy of the files to snapshot.
+ */
+ SnapshotIndexCommit snapshotIndex() throws EngineException;
+
+ void recover(RecoveryHandler recoveryHandler) throws EngineException;
+
+ static interface FailedEngineListener {
+ void onFailedEngine(ShardId shardId, Throwable t);
+ }
+
+ /**
+ * Recovery allow to start the recovery process. It is built of three phases.
+ * <p/>
+ * <p>The first phase allows to take a snapshot of the master index. Once this
+ * is taken, no commit operations are effectively allowed on the index until the recovery
+ * phases are through.
+ * <p/>
+ * <p>The seconds phase takes a snapshot of the current transaction log.
+ * <p/>
+ * <p>The last phase returns the remaining transaction log. During this phase, no dirty
+ * operations are allowed on the index.
+ */
+ static interface RecoveryHandler {
+
+ void phase1(SnapshotIndexCommit snapshot) throws ElasticsearchException;
+
+ void phase2(Translog.Snapshot snapshot) throws ElasticsearchException;
+
+ void phase3(Translog.Snapshot snapshot) throws ElasticsearchException;
+ }
+
+ static interface SnapshotHandler<T> {
+
+ T snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException;
+ }
+
+ static interface Searcher extends Releasable {
+
+ /**
+ * The source that caused this searcher to be acquired.
+ */
+ String source();
+
+ IndexReader reader();
+
+ IndexSearcher searcher();
+ }
+
+ static class SimpleSearcher implements Searcher {
+
+ private final String source;
+ private final IndexSearcher searcher;
+
+ public SimpleSearcher(String source, IndexSearcher searcher) {
+ this.source = source;
+ this.searcher = searcher;
+ }
+
+ @Override
+ public String source() {
+ return source;
+ }
+
+ @Override
+ public IndexReader reader() {
+ return searcher.getIndexReader();
+ }
+
+ @Override
+ public IndexSearcher searcher() {
+ return searcher;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ // nothing to release here...
+ return true;
+ }
+ }
+
+ static class Refresh {
+
+ private final String source;
+ private boolean force = false;
+
+ public Refresh(String source) {
+ this.source = source;
+ }
+
+ /**
+ * Forces calling refresh, overriding the check that dirty operations even happened. Defaults
+ * to true (note, still lightweight if no refresh is needed).
+ */
+ public Refresh force(boolean force) {
+ this.force = force;
+ return this;
+ }
+
+ public boolean force() {
+ return this.force;
+ }
+
+ public String source() {
+ return this.source;
+ }
+
+ @Override
+ public String toString() {
+ return "force[" + force + "], source [" + source + "]";
+ }
+ }
+
+ static class Flush {
+
+ public static enum Type {
+ /**
+ * A flush that causes a new writer to be created.
+ */
+ NEW_WRITER,
+ /**
+ * A flush that just commits the writer, without cleaning the translog.
+ */
+ COMMIT,
+ /**
+ * A flush that does a commit, as well as clears the translog.
+ */
+ COMMIT_TRANSLOG
+ }
+
+ private Type type = Type.COMMIT_TRANSLOG;
+ private boolean force = false;
+ /**
+ * Should the flush operation wait if there is an ongoing flush operation.
+ */
+ private boolean waitIfOngoing = false;
+
+ public Type type() {
+ return this.type;
+ }
+
+ /**
+ * Should a "full" flush be issued, basically cleaning as much memory as possible.
+ */
+ public Flush type(Type type) {
+ this.type = type;
+ return this;
+ }
+
+ public boolean force() {
+ return this.force;
+ }
+
+ public Flush force(boolean force) {
+ this.force = force;
+ return this;
+ }
+
+ public boolean waitIfOngoing() {
+ return this.waitIfOngoing;
+ }
+
+ public Flush waitIfOngoing(boolean waitIfOngoing) {
+ this.waitIfOngoing = waitIfOngoing;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return "type[" + type + "], force[" + force + "]";
+ }
+ }
+
+ static class Optimize {
+ private boolean waitForMerge = true;
+ private int maxNumSegments = -1;
+ private boolean onlyExpungeDeletes = false;
+ private boolean flush = false;
+
+ public Optimize() {
+ }
+
+ public boolean waitForMerge() {
+ return waitForMerge;
+ }
+
+ public Optimize waitForMerge(boolean waitForMerge) {
+ this.waitForMerge = waitForMerge;
+ return this;
+ }
+
+ public int maxNumSegments() {
+ return maxNumSegments;
+ }
+
+ public Optimize maxNumSegments(int maxNumSegments) {
+ this.maxNumSegments = maxNumSegments;
+ return this;
+ }
+
+ public boolean onlyExpungeDeletes() {
+ return onlyExpungeDeletes;
+ }
+
+ public Optimize onlyExpungeDeletes(boolean onlyExpungeDeletes) {
+ this.onlyExpungeDeletes = onlyExpungeDeletes;
+ return this;
+ }
+
+ public boolean flush() {
+ return flush;
+ }
+
+ public Optimize flush(boolean flush) {
+ this.flush = flush;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return "waitForMerge[" + waitForMerge + "], maxNumSegments[" + maxNumSegments + "], onlyExpungeDeletes[" + onlyExpungeDeletes + "], flush[" + flush + "]";
+ }
+ }
+
+ static interface Operation {
+ static enum Type {
+ CREATE,
+ INDEX,
+ DELETE
+ }
+
+ static enum Origin {
+ PRIMARY,
+ REPLICA,
+ RECOVERY
+ }
+
+ Type opType();
+
+ Origin origin();
+ }
+
+ static interface IndexingOperation extends Operation {
+
+ ParsedDocument parsedDoc();
+
+ List<Document> docs();
+
+ DocumentMapper docMapper();
+ }
+
+ static class Create implements IndexingOperation {
+ private final DocumentMapper docMapper;
+ private final Term uid;
+ private final ParsedDocument doc;
+ private long version = Versions.MATCH_ANY;
+ private VersionType versionType = VersionType.INTERNAL;
+ private Origin origin = Origin.PRIMARY;
+
+ private long startTime;
+ private long endTime;
+
+ public Create(DocumentMapper docMapper, Term uid, ParsedDocument doc) {
+ this.docMapper = docMapper;
+ this.uid = uid;
+ this.doc = doc;
+ }
+
+ @Override
+ public DocumentMapper docMapper() {
+ return this.docMapper;
+ }
+
+ @Override
+ public Type opType() {
+ return Type.CREATE;
+ }
+
+ public Create origin(Origin origin) {
+ this.origin = origin;
+ return this;
+ }
+
+ @Override
+ public Origin origin() {
+ return this.origin;
+ }
+
+ @Override
+ public ParsedDocument parsedDoc() {
+ return this.doc;
+ }
+
+ public Term uid() {
+ return this.uid;
+ }
+
+ public String type() {
+ return this.doc.type();
+ }
+
+ public String id() {
+ return this.doc.id();
+ }
+
+ public String routing() {
+ return this.doc.routing();
+ }
+
+ public long timestamp() {
+ return this.doc.timestamp();
+ }
+
+ public long ttl() {
+ return this.doc.ttl();
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ public Create version(long version) {
+ this.version = version;
+ this.doc.version().setLongValue(version);
+ return this;
+ }
+
+ public VersionType versionType() {
+ return this.versionType;
+ }
+
+ public Create versionType(VersionType versionType) {
+ this.versionType = versionType;
+ return this;
+ }
+
+ public String parent() {
+ return this.doc.parent();
+ }
+
+ @Override
+ public List<Document> docs() {
+ return this.doc.docs();
+ }
+
+ public Analyzer analyzer() {
+ return this.doc.analyzer();
+ }
+
+ public BytesReference source() {
+ return this.doc.source();
+ }
+
+ public Create startTime(long startTime) {
+ this.startTime = startTime;
+ return this;
+ }
+
+ /**
+ * Returns operation start time in nanoseconds.
+ */
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public Create endTime(long endTime) {
+ this.endTime = endTime;
+ return this;
+ }
+
+ /**
+ * Returns operation end time in nanoseconds.
+ */
+ public long endTime() {
+ return this.endTime;
+ }
+ }
+
+ static class Index implements IndexingOperation {
+ private final DocumentMapper docMapper;
+ private final Term uid;
+ private final ParsedDocument doc;
+ private long version = Versions.MATCH_ANY;
+ private VersionType versionType = VersionType.INTERNAL;
+ private Origin origin = Origin.PRIMARY;
+ private boolean created;
+
+ private long startTime;
+ private long endTime;
+
+ public Index(DocumentMapper docMapper, Term uid, ParsedDocument doc) {
+ this.docMapper = docMapper;
+ this.uid = uid;
+ this.doc = doc;
+ }
+
+ @Override
+ public DocumentMapper docMapper() {
+ return this.docMapper;
+ }
+
+ @Override
+ public Type opType() {
+ return Type.INDEX;
+ }
+
+ public Index origin(Origin origin) {
+ this.origin = origin;
+ return this;
+ }
+
+ @Override
+ public Origin origin() {
+ return this.origin;
+ }
+
+ public Term uid() {
+ return this.uid;
+ }
+
+ @Override
+ public ParsedDocument parsedDoc() {
+ return this.doc;
+ }
+
+ public Index version(long version) {
+ this.version = version;
+ doc.version().setLongValue(version);
+ return this;
+ }
+
+ /**
+ * before indexing holds the version requested, after indexing holds the new version of the document.
+ */
+ public long version() {
+ return this.version;
+ }
+
+ public Index versionType(VersionType versionType) {
+ this.versionType = versionType;
+ return this;
+ }
+
+ public VersionType versionType() {
+ return this.versionType;
+ }
+
+ @Override
+ public List<Document> docs() {
+ return this.doc.docs();
+ }
+
+ public Analyzer analyzer() {
+ return this.doc.analyzer();
+ }
+
+ public String id() {
+ return this.doc.id();
+ }
+
+ public String type() {
+ return this.doc.type();
+ }
+
+ public String routing() {
+ return this.doc.routing();
+ }
+
+ public String parent() {
+ return this.doc.parent();
+ }
+
+ public long timestamp() {
+ return this.doc.timestamp();
+ }
+
+ public long ttl() {
+ return this.doc.ttl();
+ }
+
+ public BytesReference source() {
+ return this.doc.source();
+ }
+
+ public Index startTime(long startTime) {
+ this.startTime = startTime;
+ return this;
+ }
+
+ /**
+ * Returns operation start time in nanoseconds.
+ */
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public Index endTime(long endTime) {
+ this.endTime = endTime;
+ return this;
+ }
+
+ /**
+ * Returns operation end time in nanoseconds.
+ */
+ public long endTime() {
+ return this.endTime;
+ }
+
+ /**
+ * @return true if object was created
+ */
+ public boolean created() {
+ return created;
+ }
+
+ public void created(boolean created) {
+ this.created = created;
+ }
+ }
+
+ static class Delete implements Operation {
+ private final String type;
+ private final String id;
+ private final Term uid;
+ private long version = Versions.MATCH_ANY;
+ private VersionType versionType = VersionType.INTERNAL;
+ private Origin origin = Origin.PRIMARY;
+ private boolean found;
+
+ private long startTime;
+ private long endTime;
+
+ public Delete(String type, String id, Term uid) {
+ this.type = type;
+ this.id = id;
+ this.uid = uid;
+ }
+
+ @Override
+ public Type opType() {
+ return Type.DELETE;
+ }
+
+ public Delete origin(Origin origin) {
+ this.origin = origin;
+ return this;
+ }
+
+ @Override
+ public Origin origin() {
+ return this.origin;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String id() {
+ return this.id;
+ }
+
+ public Term uid() {
+ return this.uid;
+ }
+
+ public Delete version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ /**
+ * before delete execution this is the version to be deleted. After this is the version of the "delete" transaction record.
+ */
+ public long version() {
+ return this.version;
+ }
+
+ public Delete versionType(VersionType versionType) {
+ this.versionType = versionType;
+ return this;
+ }
+
+ public VersionType versionType() {
+ return this.versionType;
+ }
+
+ public boolean found() {
+ return this.found;
+ }
+
+ public Delete found(boolean found) {
+ this.found = found;
+ return this;
+ }
+
+ public Delete startTime(long startTime) {
+ this.startTime = startTime;
+ return this;
+ }
+
+ /**
+ * Returns operation start time in nanoseconds.
+ */
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public Delete endTime(long endTime) {
+ this.endTime = endTime;
+ return this;
+ }
+
+ /**
+ * Returns operation end time in nanoseconds.
+ */
+ public long endTime() {
+ return this.endTime;
+ }
+ }
+
+ static class DeleteByQuery {
+ private final Query query;
+ private final BytesReference source;
+ private final String[] filteringAliases;
+ private final Filter aliasFilter;
+ private final String[] types;
+ private final Filter parentFilter;
+ private Operation.Origin origin = Operation.Origin.PRIMARY;
+
+ private long startTime;
+ private long endTime;
+
+ public DeleteByQuery(Query query, BytesReference source, @Nullable String[] filteringAliases, @Nullable Filter aliasFilter, Filter parentFilter, String... types) {
+ this.query = query;
+ this.source = source;
+ this.types = types;
+ this.filteringAliases = filteringAliases;
+ this.aliasFilter = aliasFilter;
+ this.parentFilter = parentFilter;
+ }
+
+ public Query query() {
+ return this.query;
+ }
+
+ public BytesReference source() {
+ return this.source;
+ }
+
+ public String[] types() {
+ return this.types;
+ }
+
+ public String[] filteringAliases() {
+ return filteringAliases;
+ }
+
+ public Filter aliasFilter() {
+ return aliasFilter;
+ }
+
+ public boolean nested() {
+ return parentFilter != null;
+ }
+
+ public Filter parentFilter() {
+ return parentFilter;
+ }
+
+ public DeleteByQuery origin(Operation.Origin origin) {
+ this.origin = origin;
+ return this;
+ }
+
+ public Operation.Origin origin() {
+ return this.origin;
+ }
+
+ public DeleteByQuery startTime(long startTime) {
+ this.startTime = startTime;
+ return this;
+ }
+
+ /**
+ * Returns operation start time in nanoseconds.
+ */
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public DeleteByQuery endTime(long endTime) {
+ this.endTime = endTime;
+ return this;
+ }
+
+ /**
+ * Returns operation end time in nanoseconds.
+ */
+ public long endTime() {
+ return this.endTime;
+ }
+ }
+
+
+ static class Get {
+ private final boolean realtime;
+ private final Term uid;
+ private boolean loadSource = true;
+ private long version;
+ private VersionType versionType;
+
+ public Get(boolean realtime, Term uid) {
+ this.realtime = realtime;
+ this.uid = uid;
+ }
+
+ public boolean realtime() {
+ return this.realtime;
+ }
+
+ public Term uid() {
+ return uid;
+ }
+
+ public boolean loadSource() {
+ return this.loadSource;
+ }
+
+ public Get loadSource(boolean loadSource) {
+ this.loadSource = loadSource;
+ return this;
+ }
+
+ public long version() {
+ return version;
+ }
+
+ public Get version(long version) {
+ this.version = version;
+ return this;
+ }
+
+ public VersionType versionType() {
+ return versionType;
+ }
+
+ public Get versionType(VersionType versionType) {
+ this.versionType = versionType;
+ return this;
+ }
+ }
+
+ static class GetResult {
+ private final boolean exists;
+ private final long version;
+ private final Translog.Source source;
+ private final Versions.DocIdAndVersion docIdAndVersion;
+ private final Searcher searcher;
+
+ public static final GetResult NOT_EXISTS = new GetResult(false, Versions.NOT_FOUND, null);
+
+ public GetResult(boolean exists, long version, @Nullable Translog.Source source) {
+ this.source = source;
+ this.exists = exists;
+ this.version = version;
+ this.docIdAndVersion = null;
+ this.searcher = null;
+ }
+
+ public GetResult(Searcher searcher, Versions.DocIdAndVersion docIdAndVersion) {
+ this.exists = true;
+ this.source = null;
+ this.version = docIdAndVersion.version;
+ this.docIdAndVersion = docIdAndVersion;
+ this.searcher = searcher;
+ }
+
+ public boolean exists() {
+ return exists;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ @Nullable
+ public Translog.Source source() {
+ return source;
+ }
+
+ public Searcher searcher() {
+ return this.searcher;
+ }
+
+ public Versions.DocIdAndVersion docIdAndVersion() {
+ return docIdAndVersion;
+ }
+
+ public void release() {
+ if (searcher != null) {
+ searcher.release();
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/EngineAlreadyStartedException.java b/src/main/java/org/elasticsearch/index/engine/EngineAlreadyStartedException.java
new file mode 100644
index 0000000..fb0591c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/EngineAlreadyStartedException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class EngineAlreadyStartedException extends EngineException {
+
+ public EngineAlreadyStartedException(ShardId shardId) {
+ super(shardId, "Already started");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/EngineClosedException.java b/src/main/java/org/elasticsearch/index/engine/EngineClosedException.java
new file mode 100644
index 0000000..f581394
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/EngineClosedException.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.IndexShardClosedException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * An engine is already closed.
+ * <p/>
+ * <p>Note, the relationship between shard and engine indicates that engine closed is shard closed, and
+ * we might get something slipping through the the shard and into the engine while the shard is closing.
+ *
+ *
+ */
+public class EngineClosedException extends IndexShardClosedException {
+
+ public EngineClosedException(ShardId shardId) {
+ super(shardId);
+ }
+
+ public EngineClosedException(ShardId shardId, Throwable t) {
+ super(shardId, t);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/EngineCreationFailureException.java b/src/main/java/org/elasticsearch/index/engine/EngineCreationFailureException.java
new file mode 100644
index 0000000..2036e2f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/EngineCreationFailureException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * An exception indicating that an {@link Engine} creation failed.
+ *
+ *
+ */
+public class EngineCreationFailureException extends EngineException {
+
+ public EngineCreationFailureException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/EngineException.java b/src/main/java/org/elasticsearch/index/engine/EngineException.java
new file mode 100644
index 0000000..6b8c7e3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/EngineException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class EngineException extends IndexShardException {
+
+ public EngineException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public EngineException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/EngineModule.java b/src/main/java/org/elasticsearch/index/engine/EngineModule.java
new file mode 100644
index 0000000..dcf9129
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/EngineModule.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.Modules;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class EngineModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ public EngineModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(Modules.createModule(settings.getAsClass(IndexEngineModule.EngineSettings.ENGINE_TYPE, IndexEngineModule.EngineSettings.DEFAULT_ENGINE, "org.elasticsearch.index.engine.", "EngineModule"), settings));
+ }
+
+ @Override
+ protected void configure() {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java
new file mode 100644
index 0000000..06cce4f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class FlushFailedEngineException extends EngineException {
+
+ public FlushFailedEngineException(ShardId shardId, Throwable t) {
+ super(shardId, "Flush failed", t);
+ }
+
+ public FlushFailedEngineException(ShardId shardId, String message, Throwable t) {
+ super(shardId, "Flush failed [" + message + "]", t);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java b/src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java
new file mode 100644
index 0000000..0f06520
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/FlushNotAllowedEngineException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class FlushNotAllowedEngineException extends EngineException {
+
+ public FlushNotAllowedEngineException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/IgnoreOnRecoveryEngineException.java b/src/main/java/org/elasticsearch/index/engine/IgnoreOnRecoveryEngineException.java
new file mode 100644
index 0000000..dcb3e43
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/IgnoreOnRecoveryEngineException.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+/**
+ * Exceptions implementing this interface will be ignored during recovery.
+ */
+public interface IgnoreOnRecoveryEngineException {
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/IndexEngine.java b/src/main/java/org/elasticsearch/index/engine/IndexEngine.java
new file mode 100644
index 0000000..2be6009
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/IndexEngine.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.IndexComponent;
+
+/**
+ * An "index" scoped engine that provides some meta engine for the engine, and can be used to store
+ * index level data structures that an engine requires.
+ *
+ *
+ */
+public interface IndexEngine extends IndexComponent {
+
+ void close();
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/IndexEngineModule.java b/src/main/java/org/elasticsearch/index/engine/IndexEngineModule.java
new file mode 100644
index 0000000..3ceaf8e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/IndexEngineModule.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.internal.InternalEngineModule;
+import org.elasticsearch.index.engine.internal.InternalIndexEngineModule;
+
+import static org.elasticsearch.common.inject.Modules.createModule;
+
+/**
+ *
+ */
+public class IndexEngineModule extends AbstractModule implements SpawnModules {
+
+ public static final class EngineSettings {
+ public static final String ENGINE_TYPE = "index.engine.type";
+ public static final String INDEX_ENGINE_TYPE = "index.index_engine.type";
+ public static final Class<? extends Module> DEFAULT_INDEX_ENGINE = InternalIndexEngineModule.class;
+ public static final Class<? extends Module> DEFAULT_ENGINE = InternalEngineModule.class;
+ }
+
+ private final Settings settings;
+
+ public IndexEngineModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(createModule(settings.getAsClass(EngineSettings.INDEX_ENGINE_TYPE, EngineSettings.DEFAULT_INDEX_ENGINE, "org.elasticsearch.index.engine.", "IndexEngineModule"), settings));
+ }
+
+ @Override
+ protected void configure() {
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java
new file mode 100644
index 0000000..3773529
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class IndexFailedEngineException extends EngineException {
+
+ private final String type;
+
+ private final String id;
+
+ public IndexFailedEngineException(ShardId shardId, Engine.Index index, Throwable cause) {
+ super(shardId, "Index failed for [" + index.type() + "#" + index.id() + "]", cause);
+ this.type = index.type();
+ this.id = index.id();
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String id() {
+ return this.id;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/OptimizeFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/OptimizeFailedEngineException.java
new file mode 100644
index 0000000..442f7b0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/OptimizeFailedEngineException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class OptimizeFailedEngineException extends EngineException {
+
+ public OptimizeFailedEngineException(ShardId shardId, Throwable t) {
+ super(shardId, "Optimize failed", t);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java b/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java
new file mode 100644
index 0000000..8984849
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class RecoveryEngineException extends EngineException {
+
+ private final int phase;
+
+ public RecoveryEngineException(ShardId shardId, int phase, String msg, Throwable cause) {
+ super(shardId, "Phase[" + phase + "] " + msg, cause);
+ this.phase = phase;
+ }
+
+ public int phase() {
+ return phase;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java
new file mode 100644
index 0000000..581bef0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class RefreshFailedEngineException extends EngineException {
+
+ public RefreshFailedEngineException(ShardId shardId, Throwable t) {
+ super(shardId, "Refresh failed", t);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/RollbackFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/RollbackFailedEngineException.java
new file mode 100644
index 0000000..0a4124e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/RollbackFailedEngineException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class RollbackFailedEngineException extends EngineException {
+
+ public RollbackFailedEngineException(ShardId shardId, Throwable t) {
+ super(shardId, "Rollback failed", t);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/RollbackNotAllowedEngineException.java b/src/main/java/org/elasticsearch/index/engine/RollbackNotAllowedEngineException.java
new file mode 100644
index 0000000..3af1474
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/RollbackNotAllowedEngineException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class RollbackNotAllowedEngineException extends EngineException {
+
+ public RollbackNotAllowedEngineException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/Segment.java b/src/main/java/org/elasticsearch/index/engine/Segment.java
new file mode 100644
index 0000000..42bd593
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/Segment.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+import java.io.IOException;
+
+public class Segment implements Streamable {
+
+ private String name;
+ private long generation;
+ public boolean committed;
+ public boolean search;
+ public long sizeInBytes = -1;
+ public int docCount = -1;
+ public int delDocCount = -1;
+ public String version = null;
+ public Boolean compound = null;
+ public String mergeId;
+ public long memoryInBytes;
+
+ Segment() {
+ }
+
+ public Segment(String name) {
+ this.name = name;
+ this.generation = Long.parseLong(name.substring(1), Character.MAX_RADIX);
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public long getGeneration() {
+ return this.generation;
+ }
+
+ public boolean isCommitted() {
+ return this.committed;
+ }
+
+ public boolean isSearch() {
+ return this.search;
+ }
+
+ public int getNumDocs() {
+ return this.docCount;
+ }
+
+ public int getDeletedDocs() {
+ return this.delDocCount;
+ }
+
+ public ByteSizeValue getSize() {
+ return new ByteSizeValue(sizeInBytes);
+ }
+
+ public long getSizeInBytes() {
+ return this.sizeInBytes;
+ }
+
+ public String getVersion() {
+ return version;
+ }
+
+ @Nullable
+ public Boolean isCompound() {
+ return compound;
+ }
+
+ /**
+ * If set, a string representing that the segment is part of a merge, with the value representing the
+ * group of segments that represent this merge.
+ */
+ @Nullable
+ public String getMergeId() {
+ return this.mergeId;
+ }
+
+ /**
+ * Estimation of the memory usage used by a segment.
+ */
+ public long getMemoryInBytes() {
+ return this.memoryInBytes;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Segment segment = (Segment) o;
+
+ if (name != null ? !name.equals(segment.name) : segment.name != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return name != null ? name.hashCode() : 0;
+ }
+
+ public static Segment readSegment(StreamInput in) throws IOException {
+ Segment segment = new Segment();
+ segment.readFrom(in);
+ return segment;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ generation = Long.parseLong(name.substring(1), Character.MAX_RADIX);
+ committed = in.readBoolean();
+ search = in.readBoolean();
+ docCount = in.readInt();
+ delDocCount = in.readInt();
+ sizeInBytes = in.readLong();
+ version = in.readOptionalString();
+ compound = in.readOptionalBoolean();
+ mergeId = in.readOptionalString();
+ memoryInBytes = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeBoolean(committed);
+ out.writeBoolean(search);
+ out.writeInt(docCount);
+ out.writeInt(delDocCount);
+ out.writeLong(sizeInBytes);
+ out.writeOptionalString(version);
+ out.writeOptionalBoolean(compound);
+ out.writeOptionalString(mergeId);
+ out.writeLong(memoryInBytes);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java
new file mode 100644
index 0000000..4f65ddd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SegmentsStats implements Streamable, ToXContent {
+
+ private long count;
+ private long memoryInBytes;
+
+ public SegmentsStats() {
+
+ }
+
+ static {
+ assert Version.LUCENE_46.onOrAfter(Lucene.VERSION); // remove special -1 handling below
+ }
+
+ public void add(long count, long memoryInBytes) {
+ this.count += count;
+ // replace me with
+ // "this.memoryInBytes += memoryInBytes;"
+ // upon upgrade to Lucene 4.7
+ if (memoryInBytes == -1) {
+ this.memoryInBytes = -1;
+ } else if (this.memoryInBytes != -1) {
+ this.memoryInBytes += memoryInBytes;
+ }
+ }
+
+ public void add(SegmentsStats mergeStats) {
+ if (mergeStats == null) {
+ return;
+ }
+ add(mergeStats.count, mergeStats.memoryInBytes);
+ }
+
+ /**
+ * The the segments count.
+ */
+ public long getCount() {
+ return this.count;
+ }
+
+ /**
+ * Estimation of the memory usage used by a segment.
+ */
+ public long getMemoryInBytes() {
+ return this.memoryInBytes;
+ }
+
+ public ByteSizeValue getMemory() {
+ return new ByteSizeValue(memoryInBytes);
+ }
+
+ public static SegmentsStats readSegmentsStats(StreamInput in) throws IOException {
+ SegmentsStats stats = new SegmentsStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.SEGMENTS);
+ builder.field(Fields.COUNT, count);
+ builder.byteSizeField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, memoryInBytes);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString SEGMENTS = new XContentBuilderString("segments");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString MEMORY = new XContentBuilderString("memory");
+ static final XContentBuilderString MEMORY_IN_BYTES = new XContentBuilderString("memory_in_bytes");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ count = in.readVLong();
+ memoryInBytes = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(count);
+ out.writeLong(memoryInBytes);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java
new file mode 100644
index 0000000..1fb18a3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class SnapshotFailedEngineException extends EngineException {
+
+ public SnapshotFailedEngineException(ShardId shardId, Throwable cause) {
+ super(shardId, "Snapshot failed", cause);
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java
new file mode 100644
index 0000000..d3904f6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class VersionConflictEngineException extends EngineException {
+
+ private final long current;
+
+ private final long provided;
+
+ public VersionConflictEngineException(ShardId shardId, String type, String id, long current, long provided) {
+ super(shardId, "[" + type + "][" + id + "]: version conflict, current [" + current + "], provided [" + provided + "]");
+ this.current = current;
+ this.provided = provided;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.CONFLICT;
+ }
+
+ public long getCurrentVersion() {
+ return this.current;
+ }
+
+ public long getProvidedVersion() {
+ return this.provided;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/internal/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/internal/InternalEngine.java
new file mode 100644
index 0000000..c3c5869
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/internal/InternalEngine.java
@@ -0,0 +1,1653 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine.internal;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.codecs.lucene3x.Lucene3xCodec;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SearcherFactory;
+import org.apache.lucene.search.XSearcherManager;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Preconditions;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.SegmentReaderUtils;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.engine.*;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.merge.Merges;
+import org.elasticsearch.index.merge.OnGoingMerge;
+import org.elasticsearch.index.merge.policy.IndexUpgraderMergePolicy;
+import org.elasticsearch.index.merge.policy.MergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.search.nested.IncludeNestedDocsQuery;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogStreams;
+import org.elasticsearch.indices.warmer.IndicesWarmer;
+import org.elasticsearch.indices.warmer.InternalIndicesWarmer;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ *
+ */
+public class InternalEngine extends AbstractIndexShardComponent implements Engine {
+
+ private volatile ByteSizeValue indexingBufferSize;
+ private volatile int indexConcurrency;
+ private volatile boolean compoundOnFlush = true;
+
+ private long gcDeletesInMillis;
+ private volatile boolean enableGcDeletes = true;
+ private volatile String codecName;
+
+ private final ThreadPool threadPool;
+
+ private final ShardIndexingService indexingService;
+ private final IndexSettingsService indexSettingsService;
+ @Nullable
+ private final InternalIndicesWarmer warmer;
+ private final Store store;
+ private final SnapshotDeletionPolicy deletionPolicy;
+ private final Translog translog;
+ private final MergePolicyProvider mergePolicyProvider;
+ private final MergeSchedulerProvider mergeScheduler;
+ private final AnalysisService analysisService;
+ private final SimilarityService similarityService;
+ private final CodecService codecService;
+
+
+ private final ReadWriteLock rwl = new ReentrantReadWriteLock();
+
+ private volatile IndexWriter indexWriter;
+
+ private final SearcherFactory searcherFactory = new SearchFactory();
+ private volatile XSearcherManager searcherManager;
+
+ private volatile boolean closed = false;
+
+ // flag indicating if a dirty operation has occurred since the last refresh
+ private volatile boolean dirty = false;
+
+ private volatile boolean possibleMergeNeeded = false;
+
+ private final AtomicBoolean optimizeMutex = new AtomicBoolean();
+ // we use flushNeeded here, since if there are no changes, then the commit won't write
+ // will not really happen, and then the commitUserData and the new translog will not be reflected
+ private volatile boolean flushNeeded = false;
+ private final AtomicInteger flushing = new AtomicInteger();
+ private final Lock flushLock = new ReentrantLock();
+
+ private final RecoveryCounter onGoingRecoveries = new RecoveryCounter();
+
+
+ // A uid (in the form of BytesRef) to the version map
+ // we use the hashed variant since we iterate over it and check removal and additions on existing keys
+ private final ConcurrentMap<HashedBytesRef, VersionValue> versionMap;
+
+ private final Object[] dirtyLocks;
+
+ private final Object refreshMutex = new Object();
+
+ private final ApplySettings applySettings = new ApplySettings();
+
+ private volatile boolean failOnMergeFailure;
+ private Throwable failedEngine = null;
+ private final Object failedEngineMutex = new Object();
+ private final CopyOnWriteArrayList<FailedEngineListener> failedEngineListeners = new CopyOnWriteArrayList<FailedEngineListener>();
+
+ private final AtomicLong translogIdGenerator = new AtomicLong();
+
+ private SegmentInfos lastCommittedSegmentInfos;
+
+ @Inject
+ public InternalEngine(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool,
+ IndexSettingsService indexSettingsService, ShardIndexingService indexingService, @Nullable IndicesWarmer warmer,
+ Store store, SnapshotDeletionPolicy deletionPolicy, Translog translog,
+ MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler,
+ AnalysisService analysisService, SimilarityService similarityService, CodecService codecService) throws EngineException {
+ super(shardId, indexSettings);
+ Preconditions.checkNotNull(store, "Store must be provided to the engine");
+ Preconditions.checkNotNull(deletionPolicy, "Snapshot deletion policy must be provided to the engine");
+ Preconditions.checkNotNull(translog, "Translog must be provided to the engine");
+
+ this.gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES, TimeValue.timeValueSeconds(60)).millis();
+ this.indexingBufferSize = componentSettings.getAsBytesSize("index_buffer_size", new ByteSizeValue(64, ByteSizeUnit.MB)); // not really important, as it is set by the IndexingMemory manager
+ this.codecName = indexSettings.get(INDEX_CODEC, "default");
+
+ this.threadPool = threadPool;
+ this.indexSettingsService = indexSettingsService;
+ this.indexingService = indexingService;
+ this.warmer = (InternalIndicesWarmer) warmer;
+ this.store = store;
+ this.deletionPolicy = deletionPolicy;
+ this.translog = translog;
+ this.mergePolicyProvider = mergePolicyProvider;
+ this.mergeScheduler = mergeScheduler;
+ this.analysisService = analysisService;
+ this.similarityService = similarityService;
+ this.codecService = codecService;
+ this.compoundOnFlush = indexSettings.getAsBoolean(INDEX_COMPOUND_ON_FLUSH, this.compoundOnFlush);
+ this.indexConcurrency = indexSettings.getAsInt(INDEX_INDEX_CONCURRENCY, Math.max(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65)));
+ this.versionMap = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
+ this.dirtyLocks = new Object[indexConcurrency * 50]; // we multiply it to have enough...
+ for (int i = 0; i < dirtyLocks.length; i++) {
+ dirtyLocks[i] = new Object();
+ }
+
+ this.indexSettingsService.addListener(applySettings);
+
+ this.failOnMergeFailure = indexSettings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE, true);
+ if (failOnMergeFailure) {
+ this.mergeScheduler.addFailureListener(new FailEngineOnMergeFailure());
+ }
+ }
+
+ @Override
+ public void updateIndexingBufferSize(ByteSizeValue indexingBufferSize) {
+ ByteSizeValue preValue = this.indexingBufferSize;
+ rwl.readLock().lock();
+ try {
+ this.indexingBufferSize = indexingBufferSize;
+ IndexWriter indexWriter = this.indexWriter;
+ if (indexWriter != null) {
+ indexWriter.getConfig().setRAMBufferSizeMB(this.indexingBufferSize.mbFrac());
+ }
+ } finally {
+ rwl.readLock().unlock();
+ }
+ if (preValue.bytes() != indexingBufferSize.bytes()) {
+ // its inactive, make sure we do a full flush in this case, since the memory
+ // changes only after a "data" change has happened to the writer
+ if (indexingBufferSize == Engine.INACTIVE_SHARD_INDEXING_BUFFER && preValue != Engine.INACTIVE_SHARD_INDEXING_BUFFER) {
+ logger.debug("updating index_buffer_size from [{}] to (inactive) [{}]", preValue, indexingBufferSize);
+ try {
+ flush(new Flush().type(Flush.Type.NEW_WRITER));
+ } catch (EngineClosedException e) {
+ // ignore
+ } catch (FlushNotAllowedEngineException e) {
+ // ignore
+ } catch (Throwable e) {
+ logger.warn("failed to flush after setting shard to inactive", e);
+ }
+ } else {
+ logger.debug("updating index_buffer_size from [{}] to [{}]", preValue, indexingBufferSize);
+ }
+ }
+ }
+
+ @Override
+ public void addFailedEngineListener(FailedEngineListener listener) {
+ failedEngineListeners.add(listener);
+ }
+
+ @Override
+ public void start() throws EngineException {
+ rwl.writeLock().lock();
+ try {
+ if (indexWriter != null) {
+ throw new EngineAlreadyStartedException(shardId);
+ }
+ if (closed) {
+ throw new EngineClosedException(shardId);
+ }
+ if (logger.isDebugEnabled()) {
+ logger.debug("starting engine");
+ }
+ try {
+ this.indexWriter = createWriter();
+ } catch (IOException e) {
+ throw new EngineCreationFailureException(shardId, "failed to create engine", e);
+ }
+
+ try {
+ // commit on a just opened writer will commit even if there are no changes done to it
+ // we rely on that for the commit data translog id key
+ if (Lucene.indexExists(store.directory())) {
+ Map<String, String> commitUserData = Lucene.readSegmentInfos(store.directory()).getUserData();
+ if (commitUserData.containsKey(Translog.TRANSLOG_ID_KEY)) {
+ translogIdGenerator.set(Long.parseLong(commitUserData.get(Translog.TRANSLOG_ID_KEY)));
+ } else {
+ translogIdGenerator.set(System.currentTimeMillis());
+ indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get())).map());
+ indexWriter.commit();
+ }
+ } else {
+ translogIdGenerator.set(System.currentTimeMillis());
+ indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get())).map());
+ indexWriter.commit();
+ }
+ translog.newTranslog(translogIdGenerator.get());
+ this.searcherManager = buildSearchManager(indexWriter);
+ readLastCommittedSegmentsInfo();
+ } catch (IOException e) {
+ try {
+ indexWriter.rollback();
+ } catch (IOException e1) {
+ // ignore
+ } finally {
+ IOUtils.closeWhileHandlingException(indexWriter);
+ }
+ throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e);
+ }
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+
+ private void readLastCommittedSegmentsInfo() throws IOException {
+ SegmentInfos infos = new SegmentInfos();
+ infos.read(store.directory());
+ lastCommittedSegmentInfos = infos;
+ }
+
+ @Override
+ public TimeValue defaultRefreshInterval() {
+ return new TimeValue(1, TimeUnit.SECONDS);
+ }
+
+ @Override
+ public void enableGcDeletes(boolean enableGcDeletes) {
+ this.enableGcDeletes = enableGcDeletes;
+ }
+
+ public GetResult get(Get get) throws EngineException {
+ rwl.readLock().lock();
+ try {
+ if (get.realtime()) {
+ VersionValue versionValue = versionMap.get(versionKey(get.uid()));
+ if (versionValue != null) {
+ if (versionValue.delete()) {
+ return GetResult.NOT_EXISTS;
+ }
+ if (get.version() != Versions.MATCH_ANY) {
+ if (get.versionType().isVersionConflict(versionValue.version(), get.version())) {
+ Uid uid = Uid.createUid(get.uid().text());
+ throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), versionValue.version(), get.version());
+ }
+ }
+ if (!get.loadSource()) {
+ return new GetResult(true, versionValue.version(), null);
+ }
+ byte[] data = translog.read(versionValue.translogLocation());
+ if (data != null) {
+ try {
+ Translog.Source source = TranslogStreams.readSource(data);
+ return new GetResult(true, versionValue.version(), source);
+ } catch (IOException e) {
+ // switched on us, read it from the reader
+ }
+ }
+ }
+ }
+
+ // no version, get the version from the index, we know that we refresh on flush
+ Searcher searcher = acquireSearcher("get");
+ final Versions.DocIdAndVersion docIdAndVersion;
+ try {
+ docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid());
+ } catch (Throwable e) {
+ searcher.release();
+ //TODO: A better exception goes here
+ throw new EngineException(shardId(), "Couldn't resolve version", e);
+ }
+
+ if (get.version() != Versions.MATCH_ANY && docIdAndVersion != null) {
+ if (get.versionType().isVersionConflict(docIdAndVersion.version, get.version())) {
+ searcher.release();
+ Uid uid = Uid.createUid(get.uid().text());
+ throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), docIdAndVersion.version, get.version());
+ }
+ }
+
+ if (docIdAndVersion != null) {
+ // don't release the searcher on this path, it is the responsability of the caller to call GetResult.release
+ return new GetResult(searcher, docIdAndVersion);
+ } else {
+ searcher.release();
+ return GetResult.NOT_EXISTS;
+ }
+
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void create(Create create) throws EngineException {
+ rwl.readLock().lock();
+ try {
+ IndexWriter writer = this.indexWriter;
+ if (writer == null) {
+ throw new EngineClosedException(shardId, failedEngine);
+ }
+ innerCreate(create, writer);
+ dirty = true;
+ possibleMergeNeeded = true;
+ flushNeeded = true;
+ } catch (IOException e) {
+ throw new CreateFailedEngineException(shardId, create, e);
+ } catch (OutOfMemoryError e) {
+ failEngine(e);
+ throw new CreateFailedEngineException(shardId, create, e);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().contains("OutOfMemoryError")) {
+ failEngine(e);
+ }
+ throw new CreateFailedEngineException(shardId, create, e);
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ private void innerCreate(Create create, IndexWriter writer) throws IOException {
+ synchronized (dirtyLock(create.uid())) {
+ HashedBytesRef versionKey = versionKey(create.uid());
+ final long currentVersion;
+ VersionValue versionValue = versionMap.get(versionKey);
+ if (versionValue == null) {
+ currentVersion = loadCurrentVersionFromIndex(create.uid());
+ } else {
+ if (enableGcDeletes && versionValue.delete() && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
+ currentVersion = Versions.NOT_FOUND; // deleted, and GC
+ } else {
+ currentVersion = versionValue.version();
+ }
+ }
+
+ // same logic as index
+ long updatedVersion;
+ long expectedVersion = create.version();
+ if (create.origin() == Operation.Origin.PRIMARY) {
+ if (create.versionType().isVersionConflict(currentVersion, expectedVersion)) {
+ throw new VersionConflictEngineException(shardId, create.type(), create.id(), currentVersion, expectedVersion);
+ }
+ updatedVersion = create.versionType().updateVersion(currentVersion, expectedVersion);
+ } else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
+ // replicas treat the version as "external" as it comes from the primary ->
+ // only exploding if the version they got is lower or equal to what they know.
+ if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
+ if (create.origin() == Operation.Origin.RECOVERY) {
+ return;
+ } else {
+ throw new VersionConflictEngineException(shardId, create.type(), create.id(), currentVersion, expectedVersion);
+ }
+ }
+ updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
+ }
+
+ // if the doc does not exists or it exists but not delete
+ if (versionValue != null) {
+ if (!versionValue.delete()) {
+ if (create.origin() == Operation.Origin.RECOVERY) {
+ return;
+ } else {
+ throw new DocumentAlreadyExistsException(shardId, create.type(), create.id());
+ }
+ }
+ } else if (currentVersion != Versions.NOT_FOUND) {
+ // its not deleted, its already there
+ if (create.origin() == Operation.Origin.RECOVERY) {
+ return;
+ } else {
+ throw new DocumentAlreadyExistsException(shardId, create.type(), create.id());
+ }
+ }
+
+ create.version(updatedVersion);
+
+ if (create.docs().size() > 1) {
+ writer.addDocuments(create.docs(), create.analyzer());
+ } else {
+ writer.addDocument(create.docs().get(0), create.analyzer());
+ }
+ Translog.Location translogLocation = translog.add(new Translog.Create(create));
+
+ versionMap.put(versionKey, new VersionValue(updatedVersion, false, threadPool.estimatedTimeInMillis(), translogLocation));
+
+ indexingService.postCreateUnderLock(create);
+ }
+ }
+
+ @Override
+ public void index(Index index) throws EngineException {
+ rwl.readLock().lock();
+ try {
+ IndexWriter writer = this.indexWriter;
+ if (writer == null) {
+ throw new EngineClosedException(shardId, failedEngine);
+ }
+
+ innerIndex(index, writer);
+ dirty = true;
+ possibleMergeNeeded = true;
+ flushNeeded = true;
+ } catch (IOException e) {
+ throw new IndexFailedEngineException(shardId, index, e);
+ } catch (OutOfMemoryError e) {
+ failEngine(e);
+ throw new IndexFailedEngineException(shardId, index, e);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().contains("OutOfMemoryError")) {
+ failEngine(e);
+ }
+ throw new IndexFailedEngineException(shardId, index, e);
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ private void innerIndex(Index index, IndexWriter writer) throws IOException {
+ synchronized (dirtyLock(index.uid())) {
+ HashedBytesRef versionKey = versionKey(index.uid());
+ final long currentVersion;
+ VersionValue versionValue = versionMap.get(versionKey);
+ if (versionValue == null) {
+ currentVersion = loadCurrentVersionFromIndex(index.uid());
+ } else {
+ if (enableGcDeletes && versionValue.delete() && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
+ currentVersion = Versions.NOT_FOUND; // deleted, and GC
+ } else {
+ currentVersion = versionValue.version();
+ }
+ }
+
+ long updatedVersion;
+ long expectedVersion = index.version();
+ if (index.origin() == Operation.Origin.PRIMARY) {
+ if (index.versionType().isVersionConflict(currentVersion, expectedVersion)) {
+ throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
+ }
+
+ updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
+
+ } else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
+ // replicas treat the version as "external" as it comes from the primary ->
+ // only exploding if the version they got is lower or equal to what they know.
+ if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
+ if (index.origin() == Operation.Origin.RECOVERY) {
+ return;
+ } else {
+ throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
+ }
+ }
+ updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
+ }
+
+ index.version(updatedVersion);
+ if (currentVersion == Versions.NOT_FOUND) {
+ // document does not exists, we can optimize for create
+ index.created(true);
+ if (index.docs().size() > 1) {
+ writer.addDocuments(index.docs(), index.analyzer());
+ } else {
+ writer.addDocument(index.docs().get(0), index.analyzer());
+ }
+ } else {
+ if (versionValue != null) {
+ index.created(versionValue.delete()); // we have a delete which is not GC'ed...
+ }
+ if (index.docs().size() > 1) {
+ writer.updateDocuments(index.uid(), index.docs(), index.analyzer());
+ } else {
+ writer.updateDocument(index.uid(), index.docs().get(0), index.analyzer());
+ }
+ }
+ Translog.Location translogLocation = translog.add(new Translog.Index(index));
+
+ versionMap.put(versionKey, new VersionValue(updatedVersion, false, threadPool.estimatedTimeInMillis(), translogLocation));
+
+ indexingService.postIndexUnderLock(index);
+ }
+ }
+
+ @Override
+ public void delete(Delete delete) throws EngineException {
+ rwl.readLock().lock();
+ try {
+ IndexWriter writer = this.indexWriter;
+ if (writer == null) {
+ throw new EngineClosedException(shardId, failedEngine);
+ }
+ innerDelete(delete, writer);
+ dirty = true;
+ possibleMergeNeeded = true;
+ flushNeeded = true;
+ } catch (IOException e) {
+ throw new DeleteFailedEngineException(shardId, delete, e);
+ } catch (OutOfMemoryError e) {
+ failEngine(e);
+ throw new DeleteFailedEngineException(shardId, delete, e);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().contains("OutOfMemoryError")) {
+ failEngine(e);
+ }
+ throw new DeleteFailedEngineException(shardId, delete, e);
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ private void innerDelete(Delete delete, IndexWriter writer) throws IOException {
+ synchronized (dirtyLock(delete.uid())) {
+ final long currentVersion;
+ HashedBytesRef versionKey = versionKey(delete.uid());
+ VersionValue versionValue = versionMap.get(versionKey);
+ if (versionValue == null) {
+ currentVersion = loadCurrentVersionFromIndex(delete.uid());
+ } else {
+ if (enableGcDeletes && versionValue.delete() && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
+ currentVersion = Versions.NOT_FOUND; // deleted, and GC
+ } else {
+ currentVersion = versionValue.version();
+ }
+ }
+
+ long updatedVersion;
+ long expectedVersion = delete.version();
+ if (delete.origin() == Operation.Origin.PRIMARY) {
+ if (delete.versionType().isVersionConflict(currentVersion, expectedVersion)) {
+ throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion, expectedVersion);
+ }
+
+ updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
+
+ } else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
+ // replicas treat the version as "external" as it comes from the primary ->
+ // only exploding if the version they got is lower or equal to what they know.
+ if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
+ if (delete.origin() == Operation.Origin.RECOVERY) {
+ return;
+ } else {
+ throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion - 1, expectedVersion);
+ }
+ }
+ updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
+ }
+
+ if (currentVersion == Versions.NOT_FOUND) {
+ // doc does not exists and no prior deletes
+ delete.version(updatedVersion).found(false);
+ Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
+ versionMap.put(versionKey, new VersionValue(updatedVersion, true, threadPool.estimatedTimeInMillis(), translogLocation));
+ } else if (versionValue != null && versionValue.delete()) {
+ // a "delete on delete", in this case, we still increment the version, log it, and return that version
+ delete.version(updatedVersion).found(false);
+ Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
+ versionMap.put(versionKey, new VersionValue(updatedVersion, true, threadPool.estimatedTimeInMillis(), translogLocation));
+ } else {
+ delete.version(updatedVersion).found(true);
+ writer.deleteDocuments(delete.uid());
+ Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
+ versionMap.put(versionKey, new VersionValue(updatedVersion, true, threadPool.estimatedTimeInMillis(), translogLocation));
+ }
+
+ indexingService.postDeleteUnderLock(delete);
+ }
+ }
+
+ @Override
+ public void delete(DeleteByQuery delete) throws EngineException {
+ rwl.readLock().lock();
+ try {
+ IndexWriter writer = this.indexWriter;
+ if (writer == null) {
+ throw new EngineClosedException(shardId);
+ }
+
+ Query query;
+ if (delete.nested() && delete.aliasFilter() != null) {
+ query = new IncludeNestedDocsQuery(new XFilteredQuery(delete.query(), delete.aliasFilter()), delete.parentFilter());
+ } else if (delete.nested()) {
+ query = new IncludeNestedDocsQuery(delete.query(), delete.parentFilter());
+ } else if (delete.aliasFilter() != null) {
+ query = new XFilteredQuery(delete.query(), delete.aliasFilter());
+ } else {
+ query = delete.query();
+ }
+
+ writer.deleteDocuments(query);
+ translog.add(new Translog.DeleteByQuery(delete));
+ dirty = true;
+ possibleMergeNeeded = true;
+ flushNeeded = true;
+ } catch (IOException e) {
+ throw new DeleteByQueryFailedEngineException(shardId, delete, e);
+ } finally {
+ rwl.readLock().unlock();
+ }
+ //TODO: This is heavy, since we refresh, but we really have to...
+ refreshVersioningTable(System.currentTimeMillis());
+ }
+
+ @Override
+ public final Searcher acquireSearcher(String source) throws EngineException {
+ XSearcherManager manager = this.searcherManager;
+ if (manager == null) {
+ throw new EngineClosedException(shardId);
+ }
+ try {
+ IndexSearcher searcher = manager.acquire();
+ return newSearcher(source, searcher, manager);
+ } catch (Throwable ex) {
+ logger.error("failed to acquire searcher, source {}", ex, source);
+ throw new EngineException(shardId, ex.getMessage());
+ }
+ }
+
+ protected Searcher newSearcher(String source, IndexSearcher searcher, XSearcherManager manager) {
+ return new EngineSearcher(source, searcher, manager);
+ }
+
+ @Override
+ public boolean refreshNeeded() {
+ return dirty;
+ }
+
+ @Override
+ public boolean possibleMergeNeeded() {
+ return this.possibleMergeNeeded;
+ }
+
+ @Override
+ public void refresh(Refresh refresh) throws EngineException {
+ if (indexWriter == null) {
+ throw new EngineClosedException(shardId);
+ }
+ // we obtain a read lock here, since we don't want a flush to happen while we are refreshing
+ // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
+ rwl.readLock().lock();
+ try {
+ // this engine always acts as if waitForOperations=true
+ IndexWriter currentWriter = indexWriter;
+ if (currentWriter == null) {
+ throw new EngineClosedException(shardId, failedEngine);
+ }
+ try {
+ // maybeRefresh will only allow one refresh to execute, and the rest will "pass through",
+ // but, we want to make sure not to loose ant refresh calls, if one is taking time
+ synchronized (refreshMutex) {
+ if (dirty || refresh.force()) {
+ dirty = false;
+ searcherManager.maybeRefresh();
+ }
+ }
+ } catch (AlreadyClosedException e) {
+ // an index writer got replaced on us, ignore
+ } catch (OutOfMemoryError e) {
+ failEngine(e);
+ throw new RefreshFailedEngineException(shardId, e);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().contains("OutOfMemoryError")) {
+ failEngine(e);
+ }
+ throw new RefreshFailedEngineException(shardId, e);
+ } catch (Throwable e) {
+ if (indexWriter == null) {
+ throw new EngineClosedException(shardId, failedEngine);
+ } else if (currentWriter != indexWriter) {
+ // an index writer got replaced on us, ignore
+ } else {
+ throw new RefreshFailedEngineException(shardId, e);
+ }
+ }
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void flush(Flush flush) throws EngineException {
+ ensureOpen();
+ if (flush.type() == Flush.Type.NEW_WRITER || flush.type() == Flush.Type.COMMIT_TRANSLOG) {
+ // check outside the lock as well so we can check without blocking on the write lock
+ if (onGoingRecoveries.get() > 0) {
+ throw new FlushNotAllowedEngineException(shardId, "recovery is in progress, flush [" + flush.type() + "] is not allowed");
+ }
+ }
+ int currentFlushing = flushing.incrementAndGet();
+ if (currentFlushing > 1 && !flush.waitIfOngoing()) {
+ flushing.decrementAndGet();
+ throw new FlushNotAllowedEngineException(shardId, "already flushing...");
+ }
+
+ flushLock.lock();
+ try {
+ if (flush.type() == Flush.Type.NEW_WRITER) {
+ rwl.writeLock().lock();
+ try {
+ ensureOpen();
+ if (onGoingRecoveries.get() > 0) {
+ throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed");
+ }
+ // disable refreshing, not dirty
+ dirty = false;
+ try {
+ // that's ok if the index writer failed and is in inconsistent state
+ // we will get an exception on a dirty operation, and will cause the shard
+ // to be allocated to a different node
+ indexWriter.close(false);
+ indexWriter = createWriter();
+
+ // commit on a just opened writer will commit even if there are no changes done to it
+ // we rely on that for the commit data translog id key
+ if (flushNeeded || flush.force()) {
+ flushNeeded = false;
+ long translogId = translogIdGenerator.incrementAndGet();
+ indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)).map());
+ indexWriter.commit();
+ translog.newTranslog(translogId);
+ }
+
+ XSearcherManager current = this.searcherManager;
+ this.searcherManager = buildSearchManager(indexWriter);
+ try {
+ IOUtils.close(current);
+ } catch (Throwable t) {
+ logger.warn("Failed to close current SearcherManager", t);
+ }
+ refreshVersioningTable(threadPool.estimatedTimeInMillis());
+ } catch (OutOfMemoryError e) {
+ failEngine(e);
+ throw new FlushFailedEngineException(shardId, e);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().contains("OutOfMemoryError")) {
+ failEngine(e);
+ }
+ throw new FlushFailedEngineException(shardId, e);
+ } catch (Throwable e) {
+ throw new FlushFailedEngineException(shardId, e);
+ }
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ } else if (flush.type() == Flush.Type.COMMIT_TRANSLOG) {
+ rwl.readLock().lock();
+ try {
+ ensureOpen();
+ if (onGoingRecoveries.get() > 0) {
+ throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed");
+ }
+
+ if (flushNeeded || flush.force()) {
+ flushNeeded = false;
+ try {
+ long translogId = translogIdGenerator.incrementAndGet();
+ translog.newTransientTranslog(translogId);
+ indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)).map());
+ indexWriter.commit();
+ refreshVersioningTable(threadPool.estimatedTimeInMillis());
+ // we need to move transient to current only after we refresh
+ // so items added to current will still be around for realtime get
+ // when tans overrides it
+ translog.makeTransientCurrent();
+ } catch (OutOfMemoryError e) {
+ translog.revertTransient();
+ failEngine(e);
+ throw new FlushFailedEngineException(shardId, e);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().contains("OutOfMemoryError")) {
+ failEngine(e);
+ }
+ throw new FlushFailedEngineException(shardId, e);
+ } catch (Throwable e) {
+ translog.revertTransient();
+ throw new FlushFailedEngineException(shardId, e);
+ }
+ }
+ } finally {
+ rwl.readLock().unlock();
+ }
+ } else if (flush.type() == Flush.Type.COMMIT) {
+ // note, its ok to just commit without cleaning the translog, its perfectly fine to replay a
+ // translog on an index that was opened on a committed point in time that is "in the future"
+ // of that translog
+ rwl.readLock().lock();
+ try {
+ ensureOpen();
+ // we allow to *just* commit if there is an ongoing recovery happening...
+ // its ok to use this, only a flush will cause a new translogId, and we are locked here from
+ // other flushes use flushLock
+ try {
+ long translogId = translog.currentId();
+ indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)).map());
+ indexWriter.commit();
+ } catch (OutOfMemoryError e) {
+ translog.revertTransient();
+ failEngine(e);
+ throw new FlushFailedEngineException(shardId, e);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().contains("OutOfMemoryError")) {
+ failEngine(e);
+ }
+ throw new FlushFailedEngineException(shardId, e);
+ } catch (Throwable e) {
+ throw new FlushFailedEngineException(shardId, e);
+ }
+ } finally {
+ rwl.readLock().unlock();
+ }
+ } else {
+ throw new ElasticsearchIllegalStateException("flush type [" + flush.type() + "] not supported");
+ }
+
+ // reread the last committed segment infos
+ rwl.readLock().lock();
+ try {
+ ensureOpen();
+ readLastCommittedSegmentsInfo();
+ } catch (Throwable e) {
+ if (!closed) {
+ logger.warn("failed to read latest segment infos on flush", e);
+ }
+ } finally {
+ rwl.readLock().unlock();
+ }
+
+ } finally {
+ flushLock.unlock();
+ flushing.decrementAndGet();
+ }
+ }
+
+ private void ensureOpen() {
+ if (indexWriter == null) {
+ throw new EngineClosedException(shardId, failedEngine);
+ }
+ }
+
+ private void refreshVersioningTable(long time) {
+ // we need to refresh in order to clear older version values
+ refresh(new Refresh("version_table").force(true));
+ for (Map.Entry<HashedBytesRef, VersionValue> entry : versionMap.entrySet()) {
+ HashedBytesRef uid = entry.getKey();
+ synchronized (dirtyLock(uid.bytes)) { // can we do it without this lock on each value? maybe batch to a set and get the lock once per set?
+ VersionValue versionValue = versionMap.get(uid);
+ if (versionValue == null) {
+ continue;
+ }
+ if (time - versionValue.time() <= 0) {
+ continue; // its a newer value, from after/during we refreshed, don't clear it
+ }
+ if (versionValue.delete()) {
+ if (enableGcDeletes && (time - versionValue.time()) > gcDeletesInMillis) {
+ versionMap.remove(uid);
+ }
+ } else {
+ versionMap.remove(uid);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void maybeMerge() throws EngineException {
+ if (!possibleMergeNeeded) {
+ return;
+ }
+ possibleMergeNeeded = false;
+ rwl.readLock().lock();
+ try {
+ ensureOpen();
+ Merges.maybeMerge(indexWriter);
+ } catch (OutOfMemoryError e) {
+ failEngine(e);
+ throw new OptimizeFailedEngineException(shardId, e);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().contains("OutOfMemoryError")) {
+ failEngine(e);
+ }
+ throw new OptimizeFailedEngineException(shardId, e);
+ } catch (Throwable e) {
+ throw new OptimizeFailedEngineException(shardId, e);
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void optimize(Optimize optimize) throws EngineException {
+ if (optimize.flush()) {
+ flush(new Flush().force(true).waitIfOngoing(true));
+ }
+ if (optimizeMutex.compareAndSet(false, true)) {
+ rwl.readLock().lock();
+ try {
+ ensureOpen();
+ if (optimize.onlyExpungeDeletes()) {
+ Merges.forceMergeDeletes(indexWriter, false);
+ } else if (optimize.maxNumSegments() <= 0) {
+ Merges.maybeMerge(indexWriter);
+ possibleMergeNeeded = false;
+ } else {
+ Merges.forceMerge(indexWriter, optimize.maxNumSegments(), false);
+ }
+ } catch (OutOfMemoryError e) {
+ failEngine(e);
+ throw new OptimizeFailedEngineException(shardId, e);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().contains("OutOfMemoryError")) {
+ failEngine(e);
+ }
+ throw new OptimizeFailedEngineException(shardId, e);
+ } catch (Throwable e) {
+ throw new OptimizeFailedEngineException(shardId, e);
+ } finally {
+ rwl.readLock().unlock();
+ optimizeMutex.set(false);
+ }
+ }
+ // wait for the merges outside of the read lock
+ if (optimize.waitForMerge()) {
+ indexWriter.waitForMerges();
+ }
+ if (optimize.flush()) {
+ flush(new Flush().force(true).waitIfOngoing(true));
+ }
+ }
+
+ @Override
+ public <T> T snapshot(SnapshotHandler<T> snapshotHandler) throws EngineException {
+ SnapshotIndexCommit snapshotIndexCommit = null;
+ Translog.Snapshot traslogSnapshot = null;
+ rwl.readLock().lock();
+ try {
+ snapshotIndexCommit = deletionPolicy.snapshot();
+ traslogSnapshot = translog.snapshot();
+ } catch (Throwable e) {
+ if (snapshotIndexCommit != null) {
+ snapshotIndexCommit.release();
+ }
+ throw new SnapshotFailedEngineException(shardId, e);
+ } finally {
+ rwl.readLock().unlock();
+ }
+
+ try {
+ return snapshotHandler.snapshot(snapshotIndexCommit, traslogSnapshot);
+ } finally {
+ snapshotIndexCommit.release();
+ traslogSnapshot.release();
+ }
+ }
+
+ @Override
+ public SnapshotIndexCommit snapshotIndex() throws EngineException {
+ rwl.readLock().lock();
+ try {
+ flush(new Flush().type(Flush.Type.COMMIT).waitIfOngoing(true));
+ ensureOpen();
+ return deletionPolicy.snapshot();
+ } catch (IOException e) {
+ throw new SnapshotFailedEngineException(shardId, e);
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void recover(RecoveryHandler recoveryHandler) throws EngineException {
+ // take a write lock here so it won't happen while a flush is in progress
+ // this means that next commits will not be allowed once the lock is released
+ rwl.writeLock().lock();
+ try {
+ if (closed) {
+ throw new EngineClosedException(shardId);
+ }
+ onGoingRecoveries.increment();
+ } finally {
+ rwl.writeLock().unlock();
+ }
+
+ SnapshotIndexCommit phase1Snapshot;
+ try {
+ phase1Snapshot = deletionPolicy.snapshot();
+ } catch (Throwable e) {
+ onGoingRecoveries.decrement();
+ throw new RecoveryEngineException(shardId, 1, "Snapshot failed", e);
+ }
+
+ try {
+ recoveryHandler.phase1(phase1Snapshot);
+ } catch (Throwable e) {
+ onGoingRecoveries.decrement();
+ phase1Snapshot.release();
+ if (closed) {
+ e = new EngineClosedException(shardId, e);
+ }
+ throw new RecoveryEngineException(shardId, 1, "Execution failed", e);
+ }
+
+ Translog.Snapshot phase2Snapshot;
+ try {
+ phase2Snapshot = translog.snapshot();
+ } catch (Throwable e) {
+ onGoingRecoveries.decrement();
+ phase1Snapshot.release();
+ if (closed) {
+ e = new EngineClosedException(shardId, e);
+ }
+ throw new RecoveryEngineException(shardId, 2, "Snapshot failed", e);
+ }
+
+ try {
+ recoveryHandler.phase2(phase2Snapshot);
+ } catch (Throwable e) {
+ onGoingRecoveries.decrement();
+ phase1Snapshot.release();
+ phase2Snapshot.release();
+ if (closed) {
+ e = new EngineClosedException(shardId, e);
+ }
+ throw new RecoveryEngineException(shardId, 2, "Execution failed", e);
+ }
+
+ rwl.writeLock().lock();
+ Translog.Snapshot phase3Snapshot = null;
+ try {
+ phase3Snapshot = translog.snapshot(phase2Snapshot);
+ recoveryHandler.phase3(phase3Snapshot);
+ } catch (Throwable e) {
+ throw new RecoveryEngineException(shardId, 3, "Execution failed", e);
+ } finally {
+ onGoingRecoveries.decrement();
+ rwl.writeLock().unlock();
+ phase1Snapshot.release();
+ phase2Snapshot.release();
+ if (phase3Snapshot != null) {
+ phase3Snapshot.release();
+ }
+ }
+ }
+
+ static {
+ assert Version.LUCENE_46.onOrAfter(Lucene.VERSION); // Lucene 4.7 fixed Lucene 3.X RAM usage estimations, see LUCENE-5462
+ }
+
+ private long getReaderRamBytesUsed(AtomicReaderContext reader) {
+ final SegmentReader segmentReader = SegmentReaderUtils.segmentReader(reader.reader());
+ if (segmentReader.getSegmentInfo().info.getCodec() instanceof Lucene3xCodec) {
+ // https://issues.apache.org/jira/browse/LUCENE-5462
+ // RAM usage estimation is very costly on Lucene 3.x segments
+ return -1;
+ }
+ return segmentReader.ramBytesUsed();
+ }
+
+ @Override
+ public SegmentsStats segmentsStats() {
+ rwl.readLock().lock();
+ try {
+ ensureOpen();
+ Searcher searcher = acquireSearcher("segments_stats");
+ try {
+ SegmentsStats stats = new SegmentsStats();
+ for (AtomicReaderContext reader : searcher.reader().leaves()) {
+ stats.add(1, getReaderRamBytesUsed(reader));
+ }
+ return stats;
+ } finally {
+ searcher.release();
+ }
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ @Override
+ public List<Segment> segments() {
+ rwl.readLock().lock();
+ try {
+ ensureOpen();
+ Map<String, Segment> segments = new HashMap<String, Segment>();
+
+ // first, go over and compute the search ones...
+ Searcher searcher = acquireSearcher("segments");
+ try {
+ for (AtomicReaderContext reader : searcher.reader().leaves()) {
+ assert reader.reader() instanceof SegmentReader;
+ SegmentCommitInfo info = SegmentReaderUtils.segmentReader(reader.reader()).getSegmentInfo();
+ assert !segments.containsKey(info.info.name);
+ Segment segment = new Segment(info.info.name);
+ segment.search = true;
+ segment.docCount = reader.reader().numDocs();
+ segment.delDocCount = reader.reader().numDeletedDocs();
+ segment.version = info.info.getVersion();
+ segment.compound = info.info.getUseCompoundFile();
+ try {
+ segment.sizeInBytes = info.sizeInBytes();
+ } catch (IOException e) {
+ logger.trace("failed to get size for [{}]", e, info.info.name);
+ }
+ segment.memoryInBytes = getReaderRamBytesUsed(reader);
+ segments.put(info.info.name, segment);
+ }
+ } finally {
+ searcher.release();
+ }
+
+ // now, correlate or add the committed ones...
+ if (lastCommittedSegmentInfos != null) {
+ SegmentInfos infos = lastCommittedSegmentInfos;
+ for (SegmentCommitInfo info : infos) {
+ Segment segment = segments.get(info.info.name);
+ if (segment == null) {
+ segment = new Segment(info.info.name);
+ segment.search = false;
+ segment.committed = true;
+ segment.docCount = info.info.getDocCount();
+ segment.delDocCount = info.getDelCount();
+ segment.version = info.info.getVersion();
+ segment.compound = info.info.getUseCompoundFile();
+ try {
+ segment.sizeInBytes = info.sizeInBytes();
+ } catch (IOException e) {
+ logger.trace("failed to get size for [{}]", e, info.info.name);
+ }
+ segments.put(info.info.name, segment);
+ } else {
+ segment.committed = true;
+ }
+ }
+ }
+
+ Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
+ Arrays.sort(segmentsArr, new Comparator<Segment>() {
+ @Override
+ public int compare(Segment o1, Segment o2) {
+ return (int) (o1.getGeneration() - o2.getGeneration());
+ }
+ });
+
+ // fill in the merges flag
+ Set<OnGoingMerge> onGoingMerges = mergeScheduler.onGoingMerges();
+ for (OnGoingMerge onGoingMerge : onGoingMerges) {
+ for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) {
+ for (Segment segment : segmentsArr) {
+ if (segment.getName().equals(segmentInfoPerCommit.info.name)) {
+ segment.mergeId = onGoingMerge.getId();
+ break;
+ }
+ }
+ }
+ }
+
+ return Arrays.asList(segmentsArr);
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ rwl.writeLock().lock();
+ try {
+ innerClose();
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ try {
+ // wait for recoveries to join and close all resources / IO streams
+ int ongoingRecoveries = onGoingRecoveries.awaitNoRecoveries(5000);
+ if (ongoingRecoveries > 0) {
+ logger.debug("Waiting for ongoing recoveries timed out on close currently ongoing disoveries: [{}]", ongoingRecoveries);
+ }
+ } catch (InterruptedException e) {
+ // ignore & restore interrupt
+ Thread.currentThread().interrupt();
+ }
+
+ }
+
+ class FailEngineOnMergeFailure implements MergeSchedulerProvider.FailureListener {
+ @Override
+ public void onFailedMerge(MergePolicy.MergeException e) {
+ failEngine(e);
+ }
+ }
+
+ private void failEngine(Throwable failure) {
+ synchronized (failedEngineMutex) {
+ if (failedEngine != null) {
+ return;
+ }
+ logger.warn("failed engine", failure);
+ failedEngine = failure;
+ for (FailedEngineListener listener : failedEngineListeners) {
+ listener.onFailedEngine(shardId, failure);
+ }
+ innerClose();
+ }
+ }
+
+ private void innerClose() {
+ if (closed) {
+ return;
+ }
+ indexSettingsService.removeListener(applySettings);
+ closed = true;
+ this.versionMap.clear();
+ this.failedEngineListeners.clear();
+ try {
+ try {
+ IOUtils.close(searcherManager);
+ } catch (Throwable t) {
+ logger.warn("Failed to close SearcherManager", t);
+ }
+ // no need to commit in this case!, we snapshot before we close the shard, so translog and all sync'ed
+ if (indexWriter != null) {
+ try {
+ indexWriter.rollback();
+ } catch (AlreadyClosedException e) {
+ // ignore
+ }
+ }
+ } catch (Throwable e) {
+ logger.warn("failed to rollback writer on close", e);
+ } finally {
+ indexWriter = null;
+ }
+ }
+
+ private HashedBytesRef versionKey(Term uid) {
+ return new HashedBytesRef(uid.bytes());
+ }
+
+ private Object dirtyLock(BytesRef uid) {
+ int hash = DjbHashFunction.DJB_HASH(uid.bytes, uid.offset, uid.length);
+ // abs returns Integer.MIN_VALUE, so we need to protect against it...
+ if (hash == Integer.MIN_VALUE) {
+ hash = 0;
+ }
+ return dirtyLocks[Math.abs(hash) % dirtyLocks.length];
+ }
+
+ private Object dirtyLock(Term uid) {
+ return dirtyLock(uid.bytes());
+ }
+
+ private long loadCurrentVersionFromIndex(Term uid) throws IOException {
+ Searcher searcher = acquireSearcher("load_version");
+ try {
+ return Versions.loadVersion(searcher.reader(), uid);
+ } finally {
+ searcher.release();
+ }
+ }
+
+ /**
+ * Returns whether a leaf reader comes from a merge (versus flush or addIndexes).
+ */
+ private static boolean isMergedSegment(AtomicReader reader) {
+ // We expect leaves to be segment readers
+ final Map<String, String> diagnostics = SegmentReaderUtils.segmentReader(reader).getSegmentInfo().info.getDiagnostics();
+ final String source = diagnostics.get(IndexWriter.SOURCE);
+ assert Arrays.asList(IndexWriter.SOURCE_ADDINDEXES_READERS, IndexWriter.SOURCE_FLUSH, IndexWriter.SOURCE_MERGE).contains(source) : "Unknown source " + source;
+ return IndexWriter.SOURCE_MERGE.equals(source);
+ }
+
+ private IndexWriter createWriter() throws IOException {
+ try {
+ // release locks when started
+ if (IndexWriter.isLocked(store.directory())) {
+ logger.warn("shard is locked, releasing lock");
+ IndexWriter.unlock(store.directory());
+ }
+ boolean create = !Lucene.indexExists(store.directory());
+ IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION, analysisService.defaultIndexAnalyzer());
+ config.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND);
+ config.setIndexDeletionPolicy(deletionPolicy);
+ config.setMergeScheduler(mergeScheduler.newMergeScheduler());
+ MergePolicy mergePolicy = mergePolicyProvider.newMergePolicy();
+ // Give us the opportunity to upgrade old segments while performing
+ // background merges
+ mergePolicy = new IndexUpgraderMergePolicy(mergePolicy);
+ config.setMergePolicy(mergePolicy);
+ config.setSimilarity(similarityService.similarity());
+ config.setRAMBufferSizeMB(indexingBufferSize.mbFrac());
+ config.setMaxThreadStates(indexConcurrency);
+ config.setCodec(codecService.codec(codecName));
+ /* We set this timeout to a highish value to work around
+ * the default poll interval in the Lucene lock that is
+ * 1000ms by default. We might need to poll multiple times
+ * here but with 1s poll this is only executed twice at most
+ * in combination with the default writelock timeout*/
+ config.setWriteLockTimeout(5000);
+ config.setUseCompoundFile(this.compoundOnFlush);
+ // Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end
+ // of the merge operation and won't slow down _refresh
+ config.setMergedSegmentWarmer(new IndexReaderWarmer() {
+ @Override
+ public void warm(AtomicReader reader) throws IOException {
+ try {
+ assert isMergedSegment(reader);
+ final Engine.Searcher searcher = new SimpleSearcher("warmer", new IndexSearcher(reader));
+ final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher);
+ if (warmer != null) warmer.warm(context);
+ } catch (Throwable t) {
+ // Don't fail a merge if the warm-up failed
+ if (!closed) {
+ logger.warn("Warm-up failed", t);
+ }
+ if (t instanceof Error) {
+ // assertion/out-of-memory error, don't ignore those
+ throw (Error) t;
+ }
+ }
+ }
+ });
+ return new IndexWriter(store.directory(), config);
+ } catch (LockObtainFailedException ex) {
+ boolean isLocked = IndexWriter.isLocked(store.directory());
+ logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked);
+ throw ex;
+ }
+ }
+
+ public static final String INDEX_INDEX_CONCURRENCY = "index.index_concurrency";
+ public static final String INDEX_COMPOUND_ON_FLUSH = "index.compound_on_flush";
+ public static final String INDEX_GC_DELETES = "index.gc_deletes";
+ public static final String INDEX_FAIL_ON_MERGE_FAILURE = "index.fail_on_merge_failure";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ long gcDeletesInMillis = settings.getAsTime(INDEX_GC_DELETES, TimeValue.timeValueMillis(InternalEngine.this.gcDeletesInMillis)).millis();
+ if (gcDeletesInMillis != InternalEngine.this.gcDeletesInMillis) {
+ logger.info("updating index.gc_deletes from [{}] to [{}]", TimeValue.timeValueMillis(InternalEngine.this.gcDeletesInMillis), TimeValue.timeValueMillis(gcDeletesInMillis));
+ InternalEngine.this.gcDeletesInMillis = gcDeletesInMillis;
+ }
+
+ final boolean compoundOnFlush = settings.getAsBoolean(INDEX_COMPOUND_ON_FLUSH, InternalEngine.this.compoundOnFlush);
+ if (compoundOnFlush != InternalEngine.this.compoundOnFlush) {
+ logger.info("updating {} from [{}] to [{}]", InternalEngine.INDEX_COMPOUND_ON_FLUSH, InternalEngine.this.compoundOnFlush, compoundOnFlush);
+ InternalEngine.this.compoundOnFlush = compoundOnFlush;
+ indexWriter.getConfig().setUseCompoundFile(compoundOnFlush);
+ }
+
+ int indexConcurrency = settings.getAsInt(INDEX_INDEX_CONCURRENCY, InternalEngine.this.indexConcurrency);
+ boolean failOnMergeFailure = settings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE, InternalEngine.this.failOnMergeFailure);
+ String codecName = settings.get(INDEX_CODEC, InternalEngine.this.codecName);
+ final boolean codecBloomLoad = settings.getAsBoolean(CodecService.INDEX_CODEC_BLOOM_LOAD, codecService.isLoadBloomFilter());
+ boolean requiresFlushing = false;
+ if (indexConcurrency != InternalEngine.this.indexConcurrency ||
+ !codecName.equals(InternalEngine.this.codecName) ||
+ failOnMergeFailure != InternalEngine.this.failOnMergeFailure ||
+ codecBloomLoad != codecService.isLoadBloomFilter()) {
+ rwl.readLock().lock();
+ try {
+ if (indexConcurrency != InternalEngine.this.indexConcurrency) {
+ logger.info("updating index.index_concurrency from [{}] to [{}]", InternalEngine.this.indexConcurrency, indexConcurrency);
+ InternalEngine.this.indexConcurrency = indexConcurrency;
+ // we have to flush in this case, since it only applies on a new index writer
+ requiresFlushing = true;
+ }
+ if (!codecName.equals(InternalEngine.this.codecName)) {
+ logger.info("updating index.codec from [{}] to [{}]", InternalEngine.this.codecName, codecName);
+ InternalEngine.this.codecName = codecName;
+ // we want to flush in this case, so the new codec will be reflected right away...
+ requiresFlushing = true;
+ }
+ if (failOnMergeFailure != InternalEngine.this.failOnMergeFailure) {
+ logger.info("updating {} from [{}] to [{}]", InternalEngine.INDEX_FAIL_ON_MERGE_FAILURE, InternalEngine.this.failOnMergeFailure, failOnMergeFailure);
+ InternalEngine.this.failOnMergeFailure = failOnMergeFailure;
+ }
+ if (codecBloomLoad != codecService.isLoadBloomFilter()) {
+ logger.info("updating {} from [{}] to [{}]", CodecService.INDEX_CODEC_BLOOM_LOAD, codecService.isLoadBloomFilter(), codecBloomLoad);
+ codecService.setLoadBloomFilter(codecBloomLoad);
+ // we need to flush in this case, to load/unload the bloom filters
+ requiresFlushing = true;
+ }
+ } finally {
+ rwl.readLock().unlock();
+ }
+ if (requiresFlushing) {
+ flush(new Flush().type(Flush.Type.NEW_WRITER));
+ }
+ }
+ }
+ }
+
+ private XSearcherManager buildSearchManager(IndexWriter indexWriter) throws IOException {
+ return new XSearcherManager(indexWriter, true, searcherFactory);
+ }
+
+ class EngineSearcher implements Searcher {
+
+ private final String source;
+ private final IndexSearcher searcher;
+ private final XSearcherManager manager;
+ private final AtomicBoolean released;
+
+ private EngineSearcher(String source, IndexSearcher searcher, XSearcherManager manager) {
+ this.source = source;
+ this.searcher = searcher;
+ this.manager = manager;
+ this.released = new AtomicBoolean(false);
+ }
+
+ @Override
+ public String source() {
+ return this.source;
+ }
+
+ @Override
+ public IndexReader reader() {
+ return searcher.getIndexReader();
+ }
+
+ @Override
+ public IndexSearcher searcher() {
+ return searcher;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ if (!released.compareAndSet(false, true)) {
+ /* In general, searchers should never be released twice or this would break reference counting. There is one rare case
+ * when it might happen though: when the request and the Reaper thread would both try to release it in a very short amount
+ * of time, this is why we only log a warning instead of throwing an exception.
+ */
+ logger.warn("Searcher was released twice", new ElasticsearchIllegalStateException("Double release"));
+ return false;
+ }
+ try {
+ manager.release(searcher);
+ return true;
+ } catch (IOException e) {
+ return false;
+ } catch (AlreadyClosedException e) {
+ /* this one can happen if we already closed the
+ * underlying store / directory and we call into the
+ * IndexWriter to free up pending files. */
+ return false;
+ }
+ }
+ }
+
+ static class VersionValue {
+ private final long version;
+ private final boolean delete;
+ private final long time;
+ private final Translog.Location translogLocation;
+
+ VersionValue(long version, boolean delete, long time, Translog.Location translogLocation) {
+ this.version = version;
+ this.delete = delete;
+ this.time = time;
+ this.translogLocation = translogLocation;
+ }
+
+ public long time() {
+ return this.time;
+ }
+
+ public long version() {
+ return version;
+ }
+
+ public boolean delete() {
+ return delete;
+ }
+
+ public Translog.Location translogLocation() {
+ return this.translogLocation;
+ }
+ }
+
+ class SearchFactory extends SearcherFactory {
+
+ @Override
+ public IndexSearcher newSearcher(IndexReader reader) throws IOException {
+ IndexSearcher searcher = new IndexSearcher(reader);
+ searcher.setSimilarity(similarityService.similarity());
+ if (warmer != null) {
+ // we need to pass a custom searcher that does not release anything on Engine.Search Release,
+ // we will release explicitly
+ Searcher currentSearcher = null;
+ IndexSearcher newSearcher = null;
+ boolean closeNewSearcher = false;
+ try {
+ if (searcherManager == null) {
+ // fresh index writer, just do on all of it
+ newSearcher = searcher;
+ } else {
+ currentSearcher = acquireSearcher("search_factory");
+ // figure out the newSearcher, with only the new readers that are relevant for us
+ List<IndexReader> readers = Lists.newArrayList();
+ for (AtomicReaderContext newReaderContext : searcher.getIndexReader().leaves()) {
+ if (isMergedSegment(newReaderContext.reader())) {
+ // merged segments are already handled by IndexWriterConfig.setMergedSegmentWarmer
+ continue;
+ }
+ boolean found = false;
+ for (AtomicReaderContext currentReaderContext : currentSearcher.reader().leaves()) {
+ if (currentReaderContext.reader().getCoreCacheKey().equals(newReaderContext.reader().getCoreCacheKey())) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ readers.add(newReaderContext.reader());
+ }
+ }
+ if (!readers.isEmpty()) {
+ // we don't want to close the inner readers, just increase ref on them
+ newSearcher = new IndexSearcher(new MultiReader(readers.toArray(new IndexReader[readers.size()]), false));
+ closeNewSearcher = true;
+ }
+ }
+
+ if (newSearcher != null) {
+ IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId,
+ new SimpleSearcher("warmer", newSearcher));
+ warmer.warm(context);
+ }
+ } catch (Throwable e) {
+ if (!closed) {
+ logger.warn("failed to prepare/warm", e);
+ }
+ } finally {
+ // no need to release the fullSearcher, nothing really is done...
+ if (currentSearcher != null) {
+ currentSearcher.release();
+ }
+ if (newSearcher != null && closeNewSearcher) {
+ IOUtils.closeWhileHandlingException(newSearcher.getIndexReader()); // ignore
+ }
+ }
+ }
+ return searcher;
+ }
+ }
+
+ private static final class RecoveryCounter {
+ private volatile int ongoingRecoveries = 0;
+
+ synchronized void increment() {
+ ongoingRecoveries++;
+ }
+
+ synchronized void decrement() {
+ ongoingRecoveries--;
+ if (ongoingRecoveries == 0) {
+ notifyAll(); // notify waiting threads - we only wait on ongoingRecoveries == 0
+ }
+ assert ongoingRecoveries >= 0 : "ongoingRecoveries must be >= 0 but was: " + ongoingRecoveries;
+ }
+
+ int get() {
+ // volatile read - no sync needed
+ return ongoingRecoveries;
+ }
+
+ synchronized int awaitNoRecoveries(long timeout) throws InterruptedException {
+ if (ongoingRecoveries > 0) { // no loop here - we either time out or we are done!
+ wait(timeout);
+ }
+ return ongoingRecoveries;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/internal/InternalEngineModule.java b/src/main/java/org/elasticsearch/index/engine/internal/InternalEngineModule.java
new file mode 100644
index 0000000..667e13c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/internal/InternalEngineModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine.internal;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.engine.Engine;
+
+/**
+ *
+ */
+public class InternalEngineModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(Engine.class).to(InternalEngine.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/internal/InternalIndexEngine.java b/src/main/java/org/elasticsearch/index/engine/internal/InternalIndexEngine.java
new file mode 100644
index 0000000..5a82e17
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/internal/InternalIndexEngine.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine.internal;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.engine.IndexEngine;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+/**
+ *
+ */
+public class InternalIndexEngine extends AbstractIndexComponent implements IndexEngine {
+
+ public InternalIndexEngine(Index index) {
+ this(index, EMPTY_SETTINGS);
+ }
+
+ @Inject
+ public InternalIndexEngine(Index index, @IndexSettings Settings indexSettings) {
+ super(index, indexSettings);
+ }
+
+ @Override
+ public void close() {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/engine/internal/InternalIndexEngineModule.java b/src/main/java/org/elasticsearch/index/engine/internal/InternalIndexEngineModule.java
new file mode 100644
index 0000000..d3f596d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/engine/internal/InternalIndexEngineModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine.internal;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.engine.IndexEngine;
+
+/**
+ *
+ */
+public class InternalIndexEngineModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexEngine.class).to(InternalIndexEngine.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/AbstractAtomicNumericFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/AbstractAtomicNumericFieldData.java
new file mode 100644
index 0000000..d326857
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/AbstractAtomicNumericFieldData.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.util.BytesRef;
+
+/**
+ */
+public abstract class AbstractAtomicNumericFieldData implements AtomicNumericFieldData {
+
+ private boolean isFloat;
+
+ public AbstractAtomicNumericFieldData(boolean isFloat) {
+ this.isFloat = isFloat;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public ScriptDocValues getScriptValues() {
+ if (isFloat) {
+ return new ScriptDocValues.Doubles(getDoubleValues());
+ } else {
+ return new ScriptDocValues.Longs(getLongValues());
+ }
+ }
+
+ @Override
+ public BytesValues getBytesValues(boolean needsHashes) {
+ if (isFloat) {
+ final DoubleValues values = getDoubleValues();
+ return new BytesValues(values.isMultiValued()) {
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return values.setDocument(docId);
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ scratch.copyChars(Double.toString(values.nextValue()));
+ return scratch;
+ }
+
+ @Override
+ public Order getOrder() {
+ return values.getOrder();
+ }
+
+ };
+ } else {
+ final LongValues values = getLongValues();
+ return new BytesValues(values.isMultiValued()) {
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return values.setDocument(docId);
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ scratch.copyChars(Long.toString(values.nextValue()));
+ return scratch;
+ }
+
+ @Override
+ public Order getOrder() {
+ return values.getOrder();
+ }
+ };
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/AbstractIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/AbstractIndexFieldData.java
new file mode 100644
index 0000000..34e1ef8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/AbstractIndexFieldData.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ */
+public abstract class AbstractIndexFieldData<FD extends AtomicFieldData> extends AbstractIndexComponent implements IndexFieldData<FD> {
+
+ private final FieldMapper.Names fieldNames;
+ protected final FieldDataType fieldDataType;
+ protected final IndexFieldDataCache cache;
+
+ public AbstractIndexFieldData(Index index, @IndexSettings Settings indexSettings, FieldMapper.Names fieldNames, FieldDataType fieldDataType, IndexFieldDataCache cache) {
+ super(index, indexSettings);
+ this.fieldNames = fieldNames;
+ this.fieldDataType = fieldDataType;
+ this.cache = cache;
+ }
+
+ @Override
+ public FieldMapper.Names getFieldNames() {
+ return this.fieldNames;
+ }
+
+ @Override
+ public void clear() {
+ cache.clear(fieldNames.indexName());
+ }
+
+ @Override
+ public void clear(IndexReader reader) {
+ cache.clear(reader);
+ }
+
+ @Override
+ public final FD load(AtomicReaderContext context) {
+ try {
+ FD fd = cache.load(context, this);
+ return fd;
+ } catch (Throwable e) {
+ if (e instanceof ElasticsearchException) {
+ throw (ElasticsearchException) e;
+ } else {
+ throw new ElasticsearchException(e.getMessage(), e);
+ }
+ }
+ }
+
+ /**
+ * A {@code PerValueEstimator} is a sub-class that can be used to estimate
+ * the memory overhead for loading the data. Each field data
+ * implementation should implement its own {@code PerValueEstimator} if it
+ * intends to take advantage of the MemoryCircuitBreaker.
+ * <p/>
+ * Note that the .beforeLoad(...) and .afterLoad(...) methods must be
+ * manually called.
+ */
+ public interface PerValueEstimator {
+
+ /**
+ * @return the number of bytes for the given term
+ */
+ public long bytesPerValue(BytesRef term);
+
+ /**
+ * Execute any pre-loading estimations for the terms. May also
+ * optionally wrap a {@link TermsEnum} in a
+ * {@link RamAccountingTermsEnum}
+ * which will estimate the memory on a per-term basis.
+ *
+ * @param terms terms to be estimated
+ * @return A TermsEnum for the given terms
+ * @throws IOException
+ */
+ public TermsEnum beforeLoad(Terms terms) throws IOException;
+
+ /**
+ * Possibly adjust a circuit breaker after field data has been loaded,
+ * now that the actual amount of memory used by the field data is known
+ *
+ * @param termsEnum terms that were loaded
+ * @param actualUsed actual field data memory usage
+ */
+ public void afterLoad(TermsEnum termsEnum, long actualUsed);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java
new file mode 100644
index 0000000..9845608
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+/**
+ * The thread safe {@link org.apache.lucene.index.AtomicReader} level cache of the data.
+ */
+public interface AtomicFieldData<Script extends ScriptDocValues> {
+
+ /**
+ * If this method returns false, this means that no document has multiple values. However this method may return true even if all
+ * documents are single-valued. So this method is useful for performing optimizations when the single-value case makes the problem
+ * simpler but cannot be used to actually check whether this instance is multi-valued.
+ */
+ boolean isMultiValued();
+
+ /**
+ * Are the values ordered? (in ascending manner).
+ */
+ boolean isValuesOrdered();
+
+ /**
+ * The number of docs in this field data.
+ */
+ int getNumDocs();
+
+ /**
+ * An upper limit of the number of unique values in this atomic field data.
+ */
+ long getNumberUniqueValues();
+
+ /**
+ * Size (in bytes) of memory used by this field data.
+ */
+ long getMemorySizeInBytes();
+
+ /**
+ * Use a non thread safe (lightweight) view of the values as bytes.
+ *
+ * @param needsHashes if <code>true</code> the implementation will use pre-build hashes if
+ * {@link org.elasticsearch.index.fielddata.BytesValues#currentValueHash()} is used. if no hashes
+ * are used <code>false</code> should be passed instead.
+ *
+ */
+ BytesValues getBytesValues(boolean needsHashes);
+
+ /**
+ * Returns a "scripting" based values.
+ */
+ Script getScriptValues();
+
+ /**
+ * Close the field data.
+ */
+ void close();
+
+ interface WithOrdinals<Script extends ScriptDocValues> extends AtomicFieldData<Script> {
+
+ /**
+ * Use a non thread safe (lightweight) view of the values as bytes.
+ * @param needsHashes
+ */
+ BytesValues.WithOrdinals getBytesValues(boolean needsHashes);
+ }
+
+ /**
+ * This enum provides information about the order of the values for
+ * a given document. For instance {@link BytesValues} by default
+ * return values in {@link #BYTES} order but if the interface
+ * wraps a numeric variant the sort order might change to {@link #NUMERIC}.
+ * In that case the values might not be returned in byte sort order but in numeric
+ * order instead while maintaining the property of <tt>N < N+1</tt> during the
+ * value iterations.
+ *
+ * @see org.elasticsearch.index.fielddata.BytesValues#getOrder()
+ * @see org.elasticsearch.index.fielddata.DoubleValues#getOrder()
+ * @see org.elasticsearch.index.fielddata.LongValues#getOrder()
+ */
+ public enum Order {
+ /**
+ * Donates Byte sort order
+ */
+ BYTES,
+ /**
+ * Donates Numeric sort order
+ */
+ NUMERIC,
+ /**
+ * Donates custom sort order
+ */
+ CUSTOM,
+ /**
+ * Donates no sort order
+ */
+ NONE
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/AtomicGeoPointFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/AtomicGeoPointFieldData.java
new file mode 100644
index 0000000..174b3d3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/AtomicGeoPointFieldData.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+
+/**
+ */
+public abstract class AtomicGeoPointFieldData<Script extends ScriptDocValues> implements AtomicFieldData<Script> {
+
+ public abstract GeoPointValues getGeoPointValues();
+
+ @Override
+ public BytesValues getBytesValues(boolean needsHashes) {
+ final GeoPointValues values = getGeoPointValues();
+ return new BytesValues(values.isMultiValued()) {
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return values.setDocument(docId);
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ GeoPoint value = values.nextValue();
+ scratch.copyChars(GeoHashUtils.encode(value.lat(), value.lon()));
+ return scratch;
+ }
+
+ };
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/AtomicNumericFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/AtomicNumericFieldData.java
new file mode 100644
index 0000000..ef67a1d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/AtomicNumericFieldData.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+/**
+ */
+public interface AtomicNumericFieldData extends AtomicFieldData<ScriptDocValues> {
+
+ LongValues getLongValues();
+
+ DoubleValues getDoubleValues();
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/BytesValues.java b/src/main/java/org/elasticsearch/index/fielddata/BytesValues.java
new file mode 100644
index 0000000..2427bb3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/BytesValues.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+
+/**
+ * A state-full lightweight per document set of <code>byte[]</code> values.
+ *
+ * To iterate over values in a document use the following pattern:
+ * <pre>
+ * BytesValues values = ..;
+ * final int numValues = values.setDocId(docId);
+ * for (int i = 0; i < numValues; i++) {
+ * BytesRef value = values.nextValue();
+ * // process value
+ * }
+ * </pre>
+ */
+public abstract class BytesValues {
+
+ /**
+ * An empty {@link BytesValues instance}
+ */
+ public static final BytesValues EMPTY = new Empty();
+
+ private boolean multiValued;
+
+ protected final BytesRef scratch = new BytesRef();
+
+ protected int docId = -1;
+
+ /**
+ * Creates a new {@link BytesValues} instance
+ * @param multiValued <code>true</code> iff this instance is multivalued. Otherwise <code>false</code>.
+ */
+ protected BytesValues(boolean multiValued) {
+ this.multiValued = multiValued;
+ }
+
+ /**
+ * Is one of the documents in this field data values is multi valued?
+ */
+ public final boolean isMultiValued() {
+ return multiValued;
+ }
+
+ /**
+ * Converts the current shared {@link BytesRef} to a stable instance. Note,
+ * this calls makes the bytes safe for *reads*, not writes (into the same BytesRef). For example,
+ * it makes it safe to be placed in a map.
+ */
+ public BytesRef copyShared() {
+ return BytesRef.deepCopyOf(scratch);
+ }
+
+ /**
+ * Sets iteration to the specified docID and returns the number of
+ * values for this document ID,
+ * @param docId document ID
+ *
+ * @see #nextValue()
+ */
+ public abstract int setDocument(int docId);
+
+ /**
+ * Returns the next value for the current docID set to {@link #setDocument(int)}.
+ * This method should only be called <tt>N</tt> times where <tt>N</tt> is the number
+ * returned from {@link #setDocument(int)}. If called more than <tt>N</tt> times the behavior
+ * is undefined. This interface guarantees that the values are returned in order.
+ * <p>
+ * If this instance returns ordered values the <tt>Nth</tt> value is strictly less than the <tt>N+1</tt> value with
+ * respect to the {@link AtomicFieldData.Order} returned from {@link #getOrder()}. If this instance returns
+ * <i>unordered</i> values {@link #getOrder()} must return {@link AtomicFieldData.Order#NONE}
+ * Note: the values returned are de-duplicated, only unique values are returned.
+ * </p>
+ *
+ * Note: the returned {@link BytesRef} might be shared across invocations.
+ *
+ * @return the next value for the current docID set to {@link #setDocument(int)}.
+ */
+ public abstract BytesRef nextValue();
+
+ /**
+ * Returns the hash value of the previously returned shared {@link BytesRef} instances.
+ *
+ * @return the hash value of the previously returned shared {@link BytesRef} instances.
+ */
+ public int currentValueHash() {
+ return scratch.hashCode();
+ }
+
+ /**
+ * Returns the order the values are returned from {@link #nextValue()}.
+ * <p> Note: {@link BytesValues} have {@link AtomicFieldData.Order#BYTES} by default.</p>
+ */
+ public AtomicFieldData.Order getOrder() {
+ return AtomicFieldData.Order.BYTES;
+ }
+
+ /**
+ * Ordinal based {@link BytesValues}.
+ */
+ public static abstract class WithOrdinals extends BytesValues {
+
+ protected final Docs ordinals;
+
+ protected WithOrdinals(Ordinals.Docs ordinals) {
+ super(ordinals.isMultiValued());
+ this.ordinals = ordinals;
+ }
+
+ /**
+ * Returns the associated ordinals instance.
+ * @return the associated ordinals instance.
+ */
+ public Ordinals.Docs ordinals() {
+ return ordinals;
+ }
+
+ /**
+ * Returns the value for the given ordinal.
+ * @param ord the ordinal to lookup.
+ * @return a shared {@link BytesRef} instance holding the value associated
+ * with the given ordinal or <code>null</code> if ordinal is <tt>0</tt>
+ */
+ public abstract BytesRef getValueByOrd(long ord);
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ int length = ordinals.setDocument(docId);
+ assert (ordinals.getOrd(docId) != Ordinals.MISSING_ORDINAL) == length > 0 : "Doc: [" + docId + "] hasValue: [" + (ordinals.getOrd(docId) != Ordinals.MISSING_ORDINAL) + "] but length is [" + length + "]";
+ return length;
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ assert docId != -1;
+ return getValueByOrd(ordinals.nextOrd());
+ }
+ }
+
+ /**
+ * An empty {@link BytesValues} implementation
+ */
+ private final static class Empty extends BytesValues {
+
+ Empty() {
+ super(false);
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return 0;
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ throw new ElasticsearchIllegalStateException("Empty BytesValues has no next value");
+ }
+
+ @Override
+ public int currentValueHash() {
+ throw new ElasticsearchIllegalStateException("Empty BytesValues has no hash for the current Value");
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/DoubleValues.java b/src/main/java/org/elasticsearch/index/fielddata/DoubleValues.java
new file mode 100644
index 0000000..eb47b78
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/DoubleValues.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+
+/**
+ * A state-full lightweight per document set of <code>double</code> values.
+ *
+ * To iterate over values in a document use the following pattern:
+ * <pre>
+ * DoubleValues values = ..;
+ * final int numValues = values.setDocId(docId);
+ * for (int i = 0; i < numValues; i++) {
+ * double value = values.nextValue();
+ * // process value
+ * }
+ * </pre>
+ */
+public abstract class DoubleValues {
+
+ /**
+ * An empty {@link DoubleValues instance}
+ */
+ public static final DoubleValues EMPTY = new Empty();
+
+ private final boolean multiValued;
+
+ protected int docId;
+
+ /**
+ * Creates a new {@link DoubleValues} instance
+ * @param multiValued <code>true</code> iff this instance is multivalued. Otherwise <code>false</code>.
+ */
+ protected DoubleValues(boolean multiValued) {
+ this.multiValued = multiValued;
+ }
+
+ /**
+ * Is one of the documents in this field data values is multi valued?
+ */
+ public final boolean isMultiValued() {
+ return multiValued;
+ }
+
+ /**
+ * Sets iteration to the specified docID and returns the number of
+ * values for this document ID,
+ * @param docId document ID
+ *
+ * @see #nextValue()
+ */
+ public abstract int setDocument(int docId);
+
+ /**
+ * Returns the next value for the current docID set to {@link #setDocument(int)}.
+ * This method should only be called <tt>N</tt> times where <tt>N</tt> is the number
+ * returned from {@link #setDocument(int)}. If called more than <tt>N</tt> times the behavior
+ * is undefined.
+ * <p>
+ * If this instance returns ordered values the <tt>Nth</tt> value is strictly less than the <tt>N+1</tt> value with
+ * respect to the {@link AtomicFieldData.Order} returned from {@link #getOrder()}. If this instance returns
+ * <i>unordered</i> values {@link #getOrder()} must return {@link AtomicFieldData.Order#NONE}
+ * Note: the values returned are de-duplicated, only unique values are returned.
+ * </p>
+ *
+ * @return the next value for the current docID set to {@link #setDocument(int)}.
+ */
+ public abstract double nextValue();
+
+ /**
+ * Returns the order the values are returned from {@link #nextValue()}.
+ * <p> Note: {@link DoubleValues} have {@link AtomicFieldData.Order#NUMERIC} by default.</p>
+ */
+ public AtomicFieldData.Order getOrder() {
+ return AtomicFieldData.Order.NUMERIC;
+ }
+
+ /**
+ * Ordinal based {@link DoubleValues}.
+ */
+ public static abstract class WithOrdinals extends DoubleValues {
+
+ protected final Docs ordinals;
+
+ protected WithOrdinals(Ordinals.Docs ordinals) {
+ super(ordinals.isMultiValued());
+ this.ordinals = ordinals;
+ }
+
+ /**
+ * Returns the associated ordinals instance.
+ * @return the associated ordinals instance.
+ */
+ public Docs ordinals() {
+ return ordinals;
+ }
+
+ /**
+ * Returns the value for the given ordinal.
+ * @param ord the ordinal to lookup.
+ * @return a double value associated with the given ordinal.
+ */
+ public abstract double getValueByOrd(long ord);
+
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return ordinals.setDocument(docId);
+ }
+
+ @Override
+ public double nextValue() {
+ return getValueByOrd(ordinals.nextOrd());
+ }
+ }
+ /**
+ * An empty {@link DoubleValues} implementation
+ */
+ private static class Empty extends DoubleValues {
+
+ Empty() {
+ super(false);
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return 0;
+ }
+
+ @Override
+ public double nextValue() {
+ throw new ElasticsearchIllegalStateException("Empty DoubleValues has no next value");
+ }
+ }
+
+ /** Wrap a {@link LongValues} instance. */
+ public static DoubleValues asDoubleValues(final LongValues values) {
+ return new DoubleValues(values.isMultiValued()) {
+
+ @Override
+ public int setDocument(int docId) {
+ return values.setDocument(docId);
+ }
+
+ @Override
+ public double nextValue() {
+ return (double) values.nextValue();
+ }
+
+ };
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java b/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java
new file mode 100644
index 0000000..5abba97
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ */
+public class FieldDataStats implements Streamable, ToXContent {
+
+ long memorySize;
+ long evictions;
+ @Nullable
+ ObjectLongOpenHashMap<String> fields;
+
+ public FieldDataStats() {
+
+ }
+
+ public FieldDataStats(long memorySize, long evictions, @Nullable ObjectLongOpenHashMap<String> fields) {
+ this.memorySize = memorySize;
+ this.evictions = evictions;
+ this.fields = fields;
+ }
+
+ public void add(FieldDataStats stats) {
+ this.memorySize += stats.memorySize;
+ this.evictions += stats.evictions;
+ if (stats.fields != null) {
+ if (fields == null) fields = new ObjectLongOpenHashMap<String>();
+ final boolean[] states = stats.fields.allocated;
+ final Object[] keys = stats.fields.keys;
+ final long[] values = stats.fields.values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ fields.addTo((String) keys[i], values[i]);
+ }
+ }
+ }
+ }
+
+ public long getMemorySizeInBytes() {
+ return this.memorySize;
+ }
+
+ public ByteSizeValue getMemorySize() {
+ return new ByteSizeValue(memorySize);
+ }
+
+ public long getEvictions() {
+ return this.evictions;
+ }
+
+ @Nullable
+ public ObjectLongOpenHashMap<String> getFields() {
+ return fields;
+ }
+
+ public static FieldDataStats readFieldDataStats(StreamInput in) throws IOException {
+ FieldDataStats stats = new FieldDataStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ memorySize = in.readVLong();
+ evictions = in.readVLong();
+ if (in.readBoolean()) {
+ int size = in.readVInt();
+ fields = new ObjectLongOpenHashMap<String>(size);
+ for (int i = 0; i < size; i++) {
+ fields.put(in.readString(), in.readVLong());
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(memorySize);
+ out.writeVLong(evictions);
+ if (fields == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(fields.size());
+ final boolean[] states = fields.allocated;
+ final Object[] keys = fields.keys;
+ final long[] values = fields.values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ out.writeString((String) keys[i]);
+ out.writeVLong(values[i]);
+ }
+ }
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.FIELDDATA);
+ builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
+ builder.field(Fields.EVICTIONS, getEvictions());
+ if (fields != null) {
+ builder.startObject(Fields.FIELDS);
+ final boolean[] states = fields.allocated;
+ final Object[] keys = fields.keys;
+ final long[] values = fields.values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ builder.startObject((String) keys[i], XContentBuilder.FieldCaseConversion.NONE);
+ builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, values[i]);
+ builder.endObject();
+ }
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString FIELDDATA = new XContentBuilderString("fielddata");
+ static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
+ static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
+ static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions");
+ static final XContentBuilderString FIELDS = new XContentBuilderString("fields");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/FieldDataType.java b/src/main/java/org/elasticsearch/index/fielddata/FieldDataType.java
new file mode 100644
index 0000000..559dde8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/FieldDataType.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.FieldMapper.Loading;
+
+/**
+ */
+public class FieldDataType {
+
+
+
+ public static final String FORMAT_KEY = "format";
+ public static final String DOC_VALUES_FORMAT_VALUE = "doc_values";
+
+ private final String type;
+ private final String typeFormat;
+ private final Loading loading;
+ private final Settings settings;
+
+ public FieldDataType(String type) {
+ this(type, ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public FieldDataType(String type, Settings.Builder builder) {
+ this(type, builder.build());
+ }
+
+ public FieldDataType(String type, Settings settings) {
+ this.type = type;
+ this.typeFormat = "index.fielddata.type." + type + "." + FORMAT_KEY;
+ this.settings = settings;
+ final String loading = settings.get(Loading.KEY);
+ this.loading = Loading.parse(loading, Loading.LAZY);
+ }
+
+ public String getType() {
+ return this.type;
+ }
+
+ public Settings getSettings() {
+ return this.settings;
+ }
+
+ public Loading getLoading() {
+ return loading;
+ }
+
+ public String getFormat(Settings indexSettings) {
+ String format = settings.get(FORMAT_KEY);
+ if (format == null && indexSettings != null) {
+ format = indexSettings.get(typeFormat);
+ }
+ return format;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ FieldDataType that = (FieldDataType) o;
+
+ if (!settings.equals(that.settings)) return false;
+ if (!type.equals(that.type)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = type.hashCode();
+ result = 31 * result + settings.hashCode();
+ return result;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/FilterDoubleValues.java b/src/main/java/org/elasticsearch/index/fielddata/FilterDoubleValues.java
new file mode 100644
index 0000000..dc020a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/FilterDoubleValues.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+/**
+ * <code>FilterDoubleValues</code> contains another {@link DoubleValues}, which it
+ * uses as its basic source of data, possibly transforming the data along the
+ * way or providing additional functionality.
+ */
+public abstract class FilterDoubleValues extends DoubleValues {
+
+ protected final DoubleValues delegate;
+
+ protected FilterDoubleValues(DoubleValues delegate) {
+ super(delegate.isMultiValued());
+ this.delegate = delegate;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return delegate.setDocument(docId);
+ }
+
+ @Override
+ public double nextValue() {
+ return delegate.nextValue();
+ }
+
+ @Override
+ public AtomicFieldData.Order getOrder() {
+ return delegate.getOrder();
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/FilterLongValues.java b/src/main/java/org/elasticsearch/index/fielddata/FilterLongValues.java
new file mode 100644
index 0000000..0886466
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/FilterLongValues.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+/**
+ * <code>FilterLongValues</code> contains another {@link LongValues}, which it
+ * uses as its basic source of data, possibly transforming the data along the
+ * way or providing additional functionality.
+ */
+public class FilterLongValues extends LongValues {
+
+ protected final LongValues delegate;
+
+ protected FilterLongValues(LongValues delegate) {
+ super(delegate.isMultiValued());
+ this.delegate = delegate;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return delegate.setDocument(docId);
+ }
+
+ @Override
+ public long nextValue() {
+ return delegate.nextValue();
+ }
+
+ @Override
+ public AtomicFieldData.Order getOrder() {
+ return delegate.getOrder();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/GeoPointValues.java b/src/main/java/org/elasticsearch/index/fielddata/GeoPointValues.java
new file mode 100644
index 0000000..fc86638
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/GeoPointValues.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.geo.GeoPoint;
+
+/**
+ * A state-full lightweight per document set of {@link GeoPoint} values.
+ * To iterate over values in a document use the following pattern:
+ * <pre>
+ * GeoPointValues values = ..;
+ * final int numValues = values.setDocId(docId);
+ * for (int i = 0; i < numValues; i++) {
+ * GeoPoint value = values.nextValue();
+ * // process value
+ * }
+ * </pre>
+ */
+public abstract class GeoPointValues {
+
+ /**
+ * An empty {@link GeoPointValues instance}
+ */
+ public static final GeoPointValues EMPTY = new Empty();
+
+ private final boolean multiValued;
+
+ protected int docId = -1;
+
+ /**
+ * Creates a new {@link GeoPointValues} instance
+ * @param multiValued <code>true</code> iff this instance is multivalued. Otherwise <code>false</code>.
+ */
+ protected GeoPointValues(boolean multiValued) {
+ this.multiValued = multiValued;
+ }
+ /**
+ * Is one of the documents in this field data values is multi valued?
+ */
+ public final boolean isMultiValued() {
+ return multiValued;
+ }
+
+ /**
+ * Sets iteration to the specified docID and returns the number of
+ * values for this document ID,
+ * @param docId document ID
+ *
+ * @see #nextValue()
+ */
+ public abstract int setDocument(int docId);
+ /**
+ * Returns the next value for the current docID set to {@link #setDocument(int)}.
+ * This method should only be called <tt>N</tt> times where <tt>N</tt> is the number
+ * returned from {@link #setDocument(int)}. If called more than <tt>N</tt> times the behavior
+ * is undefined.
+ * <p>
+ * If this instance returns ordered values the <tt>Nth</tt> value is strictly less than the <tt>N+1</tt> value with
+ * respect to the {@link AtomicFieldData.Order} returned from {@link #getOrder()}. If this instance returns
+ * <i>unordered</i> values {@link #getOrder()} must return {@link AtomicFieldData.Order#NONE}
+ * Note: the values returned are de-duplicated, only unique values are returned.
+ * </p>
+ *
+ * Note: the returned {@link GeoPoint} might be shared across invocations.
+ *
+ * @return the next value for the current docID set to {@link #setDocument(int)}.
+ */
+ public abstract GeoPoint nextValue();
+
+ /**
+ * Returns the order the values are returned from {@link #nextValue()}.
+ * <p> Note: {@link GeoPointValues} have {@link AtomicFieldData.Order#NONE} by default.</p>
+ */
+ public AtomicFieldData.Order getOrder() {
+ return AtomicFieldData.Order.NONE;
+ }
+
+ /**
+ * An empty {@link GeoPointValues} implementation
+ */
+ private static final class Empty extends GeoPointValues {
+ protected Empty() {
+ super(false);
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return 0;
+ }
+
+ @Override
+ public GeoPoint nextValue() {
+ throw new ElasticsearchIllegalStateException("Empty GeoPointValues has no next value");
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java
new file mode 100644
index 0000000..bc08471
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.FieldComparatorSource;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexComponent;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+/**
+ */
+public interface IndexFieldData<FD extends AtomicFieldData> extends IndexComponent {
+
+ public static class CommonSettings {
+
+ /**
+ * Should single value cross documents case be optimized to remove ords. Note, this optimization
+ * might not be supported by all Field Data implementations, but the ones that do, should consult
+ * this method to check if it should be done or not.
+ */
+ public static boolean removeOrdsOnSingleValue(FieldDataType fieldDataType) {
+ return !"always".equals(fieldDataType.getSettings().get("ordinals"));
+ }
+ }
+
+ /**
+ * The field name.
+ */
+ FieldMapper.Names getFieldNames();
+
+ /**
+ * Are the values ordered? (in ascending manner).
+ */
+ boolean valuesOrdered();
+
+ /**
+ * Loads the atomic field data for the reader, possibly cached.
+ */
+ FD load(AtomicReaderContext context);
+
+ /**
+ * Loads directly the atomic field data for the reader, ignoring any caching involved.
+ */
+ FD loadDirect(AtomicReaderContext context) throws Exception;
+
+ /**
+ * Comparator used for sorting.
+ */
+ XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode);
+
+ /**
+ * Clears any resources associated with this field data.
+ */
+ void clear();
+
+ void clear(IndexReader reader);
+
+ // we need this extended source we we have custom comparators to reuse our field data
+ // in this case, we need to reduce type that will be used when search results are reduced
+ // on another node (we don't have the custom source them...)
+ public abstract class XFieldComparatorSource extends FieldComparatorSource {
+
+ /** UTF-8 term containing a single code point: {@link Character#MAX_CODE_POINT} which will compare greater than all other index terms
+ * since {@link Character#MAX_CODE_POINT} is a noncharacter and thus shouldn't appear in an index term. */
+ public static final BytesRef MAX_TERM;
+ static {
+ MAX_TERM = new BytesRef();
+ final char[] chars = Character.toChars(Character.MAX_CODE_POINT);
+ UnicodeUtil.UTF16toUTF8(chars, 0, chars.length, MAX_TERM);
+ }
+
+ /** Whether missing values should be sorted first. */
+ protected final boolean sortMissingFirst(Object missingValue) {
+ return "_first".equals(missingValue);
+ }
+
+ /** Whether missing values should be sorted last, this is the default. */
+ protected final boolean sortMissingLast(Object missingValue) {
+ return missingValue == null || "_last".equals(missingValue);
+ }
+
+ /** Return the missing object value according to the reduced type of the comparator. */
+ protected final Object missingObject(Object missingValue, boolean reversed) {
+ if (sortMissingFirst(missingValue) || sortMissingLast(missingValue)) {
+ final boolean min = sortMissingFirst(missingValue) ^ reversed;
+ switch (reducedType()) {
+ case INT:
+ return min ? Integer.MIN_VALUE : Integer.MAX_VALUE;
+ case LONG:
+ return min ? Long.MIN_VALUE : Long.MAX_VALUE;
+ case FLOAT:
+ return min ? Float.NEGATIVE_INFINITY : Float.POSITIVE_INFINITY;
+ case DOUBLE:
+ return min ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ case STRING:
+ case STRING_VAL:
+ return min ? null : MAX_TERM;
+ default:
+ throw new UnsupportedOperationException("Unsupported reduced type: " + reducedType());
+ }
+ } else {
+ switch (reducedType()) {
+ case INT:
+ if (missingValue instanceof Number) {
+ return ((Number) missingValue).intValue();
+ } else {
+ return Integer.parseInt(missingValue.toString());
+ }
+ case LONG:
+ if (missingValue instanceof Number) {
+ return ((Number) missingValue).longValue();
+ } else {
+ return Long.parseLong(missingValue.toString());
+ }
+ case FLOAT:
+ if (missingValue instanceof Number) {
+ return ((Number) missingValue).floatValue();
+ } else {
+ return Float.parseFloat(missingValue.toString());
+ }
+ case DOUBLE:
+ if (missingValue instanceof Number) {
+ return ((Number) missingValue).doubleValue();
+ } else {
+ return Double.parseDouble(missingValue.toString());
+ }
+ case STRING:
+ case STRING_VAL:
+ if (missingValue instanceof BytesRef) {
+ return (BytesRef) missingValue;
+ } else if (missingValue instanceof byte[]) {
+ return new BytesRef((byte[]) missingValue);
+ } else {
+ return new BytesRef(missingValue.toString());
+ }
+ default:
+ throw new UnsupportedOperationException("Unsupported reduced type: " + reducedType());
+ }
+ }
+ }
+
+ public abstract SortField.Type reducedType();
+ }
+
+ interface Builder {
+
+ IndexFieldData build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
+ CircuitBreakerService breakerService);
+ }
+
+ public interface WithOrdinals<FD extends AtomicFieldData.WithOrdinals> extends IndexFieldData<FD> {
+
+ /**
+ * Loads the atomic field data for the reader, possibly cached.
+ */
+ FD load(AtomicReaderContext context);
+
+ /**
+ * Loads directly the atomic field data for the reader, ignoring any caching involved.
+ */
+ FD loadDirect(AtomicReaderContext context) throws Exception;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java
new file mode 100644
index 0000000..5c4b14c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import com.google.common.collect.Lists;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.SegmentReader;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.lucene.SegmentReaderUtils;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardUtils;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+
+/**
+ * A simple field data cache abstraction on the *index* level.
+ */
+public interface IndexFieldDataCache {
+
+ <FD extends AtomicFieldData, IFD extends IndexFieldData<FD>> FD load(AtomicReaderContext context, IFD indexFieldData) throws Exception;
+
+ /**
+ * Clears all the field data stored cached in on this index.
+ */
+ void clear();
+
+ /**
+ * Clears all the field data stored cached in on this index for the specified field name.
+ */
+ void clear(String fieldName);
+
+ void clear(Object coreCacheKey);
+
+ interface Listener {
+
+ void onLoad(FieldMapper.Names fieldNames, FieldDataType fieldDataType, AtomicFieldData fieldData);
+
+ void onUnload(FieldMapper.Names fieldNames, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes, @Nullable AtomicFieldData fieldData);
+ }
+
+ /**
+ * The resident field data cache is a *per field* cache that keeps all the values in memory.
+ */
+ static abstract class FieldBased implements IndexFieldDataCache, SegmentReader.CoreClosedListener, RemovalListener<FieldBased.Key, AtomicFieldData> {
+ @Nullable
+ private final IndexService indexService;
+ private final FieldMapper.Names fieldNames;
+ private final FieldDataType fieldDataType;
+ private final Cache<Key, AtomicFieldData> cache;
+ private final IndicesFieldDataCacheListener indicesFieldDataCacheListener;
+
+ protected FieldBased(@Nullable IndexService indexService, FieldMapper.Names fieldNames, FieldDataType fieldDataType, CacheBuilder cache, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
+ this.indexService = indexService;
+ this.fieldNames = fieldNames;
+ this.fieldDataType = fieldDataType;
+ this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
+ cache.removalListener(this);
+ //noinspection unchecked
+ this.cache = cache.build();
+ }
+
+ @Override
+ public void onRemoval(RemovalNotification<Key, AtomicFieldData> notification) {
+ Key key = notification.getKey();
+ assert key != null && key.listeners != null;
+
+ AtomicFieldData value = notification.getValue();
+ long sizeInBytes = key.sizeInBytes;
+ if (sizeInBytes == -1 && value != null) {
+ sizeInBytes = value.getMemorySizeInBytes();
+ }
+ for (Listener listener : key.listeners) {
+ listener.onUnload(fieldNames, fieldDataType, notification.wasEvicted(), sizeInBytes, value);
+ }
+ }
+
+ @Override
+ public <FD extends AtomicFieldData, IFD extends IndexFieldData<FD>> FD load(final AtomicReaderContext context, final IFD indexFieldData) throws Exception {
+ final Key key = new Key(context.reader().getCoreCacheKey());
+ //noinspection unchecked
+ return (FD) cache.get(key, new Callable<AtomicFieldData>() {
+ @Override
+ public AtomicFieldData call() throws Exception {
+ SegmentReaderUtils.registerCoreListener(context.reader(), FieldBased.this);
+ AtomicFieldData fieldData = indexFieldData.loadDirect(context);
+ key.sizeInBytes = fieldData.getMemorySizeInBytes();
+ key.listeners.add(indicesFieldDataCacheListener);
+
+ if (indexService != null) {
+ ShardId shardId = ShardUtils.extractShardId(context.reader());
+ if (shardId != null) {
+ IndexShard shard = indexService.shard(shardId.id());
+ if (shard != null) {
+ key.listeners.add(shard.fieldData());
+ }
+ }
+ }
+ for (Listener listener : key.listeners) {
+ listener.onLoad(fieldNames, fieldDataType, fieldData);
+ }
+ return fieldData;
+ }
+ });
+ }
+
+ @Override
+ public void clear() {
+ cache.invalidateAll();
+ }
+
+ @Override
+ public void clear(String fieldName) {
+ cache.invalidateAll();
+ }
+
+ @Override
+ public void clear(Object coreCacheKey) {
+ cache.invalidate(new Key(coreCacheKey));
+ }
+
+ @Override
+ public void onClose(Object coreCacheKey) {
+ cache.invalidate(new Key(coreCacheKey));
+ }
+
+ static class Key {
+ final Object readerKey;
+ final List<Listener> listeners = Lists.newArrayList();
+ long sizeInBytes = -1; // optional size in bytes (we keep it here in case the values are soft references)
+
+ Key(Object readerKey) {
+ this.readerKey = readerKey;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ Key key = (Key) o;
+ if (!readerKey.equals(key.readerKey)) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return readerKey.hashCode();
+ }
+ }
+ }
+
+ static class Resident extends FieldBased {
+
+ public Resident(@Nullable IndexService indexService, FieldMapper.Names fieldNames, FieldDataType fieldDataType, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
+ super(indexService, fieldNames, fieldDataType, CacheBuilder.newBuilder(), indicesFieldDataCacheListener);
+ }
+ }
+
+ static class Soft extends FieldBased {
+
+ public Soft(@Nullable IndexService indexService, FieldMapper.Names fieldNames, FieldDataType fieldDataType, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
+ super(indexService, fieldNames, fieldDataType, CacheBuilder.newBuilder().softValues(), indicesFieldDataCacheListener);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataModule.java b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataModule.java
new file mode 100644
index 0000000..e68ff4c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataModule.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ */
+public class IndexFieldDataModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public IndexFieldDataModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(IndexFieldDataService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java
new file mode 100644
index 0000000..8729327
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.IndexReader;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.plain.*;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ */
+public class IndexFieldDataService extends AbstractIndexComponent {
+
+ private static final String DISABLED_FORMAT = "disabled";
+ private static final String DOC_VALUES_FORMAT = "doc_values";
+ private static final String ARRAY_FORMAT = "array";
+ private static final String PAGED_BYTES_FORMAT = "paged_bytes";
+ private static final String FST_FORMAT = "fst";
+ private static final String COMPRESSED_FORMAT = "compressed";
+
+ private final static ImmutableMap<String, IndexFieldData.Builder> buildersByType;
+ private final static ImmutableMap<String, IndexFieldData.Builder> docValuesBuildersByType;
+ private final static ImmutableMap<Tuple<String, String>, IndexFieldData.Builder> buildersByTypeAndFormat;
+ private final CircuitBreakerService circuitBreakerService;
+ private final IndicesFieldDataCacheListener indicesFieldDataCacheListener;
+
+ static {
+ buildersByType = MapBuilder.<String, IndexFieldData.Builder>newMapBuilder()
+ .put("string", new PagedBytesIndexFieldData.Builder())
+ .put("float", new FloatArrayIndexFieldData.Builder())
+ .put("double", new DoubleArrayIndexFieldData.Builder())
+ .put("byte", new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.BYTE))
+ .put("short", new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.SHORT))
+ .put("int", new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.INT))
+ .put("long", new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.LONG))
+ .put("geo_point", new GeoPointDoubleArrayIndexFieldData.Builder())
+ .immutableMap();
+
+ docValuesBuildersByType = MapBuilder.<String, IndexFieldData.Builder>newMapBuilder()
+ .put("string", new DocValuesIndexFieldData.Builder())
+ .put("float", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT))
+ .put("double", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE))
+ .put("byte", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE))
+ .put("short", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.SHORT))
+ .put("int", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.INT))
+ .put("long", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.LONG))
+ .put("geo_point", new GeoPointBinaryDVIndexFieldData.Builder())
+ .immutableMap();
+
+ buildersByTypeAndFormat = MapBuilder.<Tuple<String, String>, IndexFieldData.Builder>newMapBuilder()
+ .put(Tuple.tuple("string", PAGED_BYTES_FORMAT), new PagedBytesIndexFieldData.Builder())
+ .put(Tuple.tuple("string", FST_FORMAT), new FSTBytesIndexFieldData.Builder())
+ .put(Tuple.tuple("string", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder())
+ .put(Tuple.tuple("string", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
+
+ .put(Tuple.tuple("float", ARRAY_FORMAT), new FloatArrayIndexFieldData.Builder())
+ .put(Tuple.tuple("float", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT))
+ .put(Tuple.tuple("float", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
+
+ .put(Tuple.tuple("double", ARRAY_FORMAT), new DoubleArrayIndexFieldData.Builder())
+ .put(Tuple.tuple("double", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE))
+ .put(Tuple.tuple("double", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
+
+ .put(Tuple.tuple("byte", ARRAY_FORMAT), new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.BYTE))
+ .put(Tuple.tuple("byte", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE))
+ .put(Tuple.tuple("byte", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
+
+ .put(Tuple.tuple("short", ARRAY_FORMAT), new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.SHORT))
+ .put(Tuple.tuple("short", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.SHORT))
+ .put(Tuple.tuple("short", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
+
+ .put(Tuple.tuple("int", ARRAY_FORMAT), new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.INT))
+ .put(Tuple.tuple("int", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.INT))
+ .put(Tuple.tuple("int", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
+
+ .put(Tuple.tuple("long", ARRAY_FORMAT), new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.LONG))
+ .put(Tuple.tuple("long", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.LONG))
+ .put(Tuple.tuple("long", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
+
+ .put(Tuple.tuple("geo_point", ARRAY_FORMAT), new GeoPointDoubleArrayIndexFieldData.Builder())
+ .put(Tuple.tuple("geo_point", DOC_VALUES_FORMAT), new GeoPointBinaryDVIndexFieldData.Builder())
+ .put(Tuple.tuple("geo_point", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
+ .put(Tuple.tuple("geo_point", COMPRESSED_FORMAT), new GeoPointCompressedIndexFieldData.Builder())
+
+ .immutableMap();
+ }
+
+ private final IndicesFieldDataCache indicesFieldDataCache;
+ private final ConcurrentMap<String, IndexFieldData<?>> loadedFieldData = ConcurrentCollections.newConcurrentMap();
+ private final Map<String, IndexFieldDataCache> fieldDataCaches = Maps.newHashMap(); // no need for concurrency support, always used under lock
+
+ IndexService indexService;
+
+ // public for testing
+ public IndexFieldDataService(Index index, CircuitBreakerService circuitBreakerService) {
+ this(index, ImmutableSettings.Builder.EMPTY_SETTINGS, new IndicesFieldDataCache(ImmutableSettings.Builder.EMPTY_SETTINGS, new IndicesFieldDataCacheListener(circuitBreakerService)), circuitBreakerService, new IndicesFieldDataCacheListener(circuitBreakerService));
+ }
+
+ @Inject
+ public IndexFieldDataService(Index index, @IndexSettings Settings indexSettings, IndicesFieldDataCache indicesFieldDataCache,
+ CircuitBreakerService circuitBreakerService, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
+ super(index, indexSettings);
+ this.indicesFieldDataCache = indicesFieldDataCache;
+ this.circuitBreakerService = circuitBreakerService;
+ this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
+ }
+
+ // we need to "inject" the index service to not create cyclic dep
+ public void setIndexService(IndexService indexService) {
+ this.indexService = indexService;
+ }
+
+ public void clear() {
+ synchronized (loadedFieldData) {
+ for (IndexFieldData<?> fieldData : loadedFieldData.values()) {
+ fieldData.clear();
+ }
+ loadedFieldData.clear();
+ for (IndexFieldDataCache cache : fieldDataCaches.values()) {
+ cache.clear();
+ }
+ fieldDataCaches.clear();
+ }
+ }
+
+ public void clearField(String fieldName) {
+ synchronized (loadedFieldData) {
+ IndexFieldData<?> fieldData = loadedFieldData.remove(fieldName);
+ if (fieldData != null) {
+ fieldData.clear();
+ }
+ IndexFieldDataCache cache = fieldDataCaches.remove(fieldName);
+ if (cache != null) {
+ cache.clear();
+ }
+ }
+ }
+
+ public void clear(IndexReader reader) {
+ synchronized (loadedFieldData) {
+ for (IndexFieldData<?> indexFieldData : loadedFieldData.values()) {
+ indexFieldData.clear(reader);
+ }
+ for (IndexFieldDataCache cache : fieldDataCaches.values()) {
+ cache.clear(reader);
+ }
+ }
+ }
+
+ public void onMappingUpdate() {
+ // synchronize to make sure to not miss field data instances that are being loaded
+ synchronized (loadedFieldData) {
+ // important: do not clear fieldDataCaches: the cache may be reused
+ loadedFieldData.clear();
+ }
+ }
+
+ public <IFD extends IndexFieldData<?>> IFD getForField(FieldMapper<?> mapper) {
+ final FieldMapper.Names fieldNames = mapper.names();
+ final FieldDataType type = mapper.fieldDataType();
+ final boolean docValues = mapper.hasDocValues();
+ IndexFieldData<?> fieldData = loadedFieldData.get(fieldNames.indexName());
+ if (fieldData == null) {
+ synchronized (loadedFieldData) {
+ fieldData = loadedFieldData.get(fieldNames.indexName());
+ if (fieldData == null) {
+ IndexFieldData.Builder builder = null;
+ String format = type.getFormat(indexSettings);
+ if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) {
+ logger.warn("field [" + fieldNames.fullName() + "] has no doc values, will use default field data format");
+ format = null;
+ }
+ if (format != null) {
+ builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
+ if (builder == null) {
+ logger.warn("failed to find format [" + format + "] for field [" + fieldNames.fullName() + "], will use default");
+ }
+ }
+ if (builder == null && docValues) {
+ builder = docValuesBuildersByType.get(type.getType());
+ }
+ if (builder == null) {
+ builder = buildersByType.get(type.getType());
+ }
+ if (builder == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find field data builder for field " + fieldNames.fullName() + ", and type " + type.getType());
+ }
+
+ IndexFieldDataCache cache = fieldDataCaches.get(fieldNames.indexName());
+ if (cache == null) {
+ // we default to node level cache, which in turn defaults to be unbounded
+ // this means changing the node level settings is simple, just set the bounds there
+ String cacheType = type.getSettings().get("cache", indexSettings.get("index.fielddata.cache", "node"));
+ if ("resident".equals(cacheType)) {
+ cache = new IndexFieldDataCache.Resident(indexService, fieldNames, type, indicesFieldDataCacheListener);
+ } else if ("soft".equals(cacheType)) {
+ cache = new IndexFieldDataCache.Soft(indexService, fieldNames, type, indicesFieldDataCacheListener);
+ } else if ("node".equals(cacheType)) {
+ cache = indicesFieldDataCache.buildIndexFieldDataCache(indexService, index, fieldNames, type);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("cache type not supported [" + cacheType + "] for field [" + fieldNames.fullName() + "]");
+ }
+ fieldDataCaches.put(fieldNames.indexName(), cache);
+ }
+
+ fieldData = builder.build(index, indexSettings, mapper, cache, circuitBreakerService);
+ loadedFieldData.put(fieldNames.indexName(), fieldData);
+ }
+ }
+ }
+ return (IFD) fieldData;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexGeoPointFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/IndexGeoPointFieldData.java
new file mode 100644
index 0000000..d6f1c9e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/IndexGeoPointFieldData.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.AtomicReaderContext;
+
+/**
+ */
+public interface IndexGeoPointFieldData<FD extends AtomicGeoPointFieldData> extends IndexFieldData<FD> {
+
+ /**
+ * Loads the atomic field data for the reader, possibly cached.
+ */
+ FD load(AtomicReaderContext context);
+
+ /**
+ * Loads directly the atomic field data for the reader, ignoring any caching involved.
+ */
+ FD loadDirect(AtomicReaderContext context) throws Exception;
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java
new file mode 100644
index 0000000..f769f23
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+
+/**
+ */
+public interface IndexNumericFieldData<FD extends AtomicNumericFieldData> extends IndexFieldData<FD> {
+
+ public static enum NumericType {
+ BYTE(8, false, SortField.Type.INT, Byte.MIN_VALUE, Byte.MAX_VALUE) {
+ @Override
+ public long toLong(BytesRef indexForm) {
+ return INT.toLong(indexForm);
+ }
+
+ @Override
+ public void toIndexForm(Number number, BytesRef bytes) {
+ INT.toIndexForm(number, bytes);
+ }
+
+ @Override
+ public Number toNumber(BytesRef indexForm) {
+ return INT.toNumber(indexForm);
+ }
+ },
+ SHORT(16, false, SortField.Type.INT, Short.MIN_VALUE, Short.MAX_VALUE) {
+ @Override
+ public long toLong(BytesRef indexForm) {
+ return INT.toLong(indexForm);
+ }
+
+ @Override
+ public void toIndexForm(Number number, BytesRef bytes) {
+ INT.toIndexForm(number, bytes);
+ }
+
+ @Override
+ public Number toNumber(BytesRef indexForm) {
+ return INT.toNumber(indexForm);
+ }
+ },
+ INT(32, false, SortField.Type.INT, Integer.MIN_VALUE, Integer.MAX_VALUE) {
+ @Override
+ public long toLong(BytesRef indexForm) {
+ return NumericUtils.prefixCodedToInt(indexForm);
+ }
+
+ @Override
+ public void toIndexForm(Number number, BytesRef bytes) {
+ NumericUtils.intToPrefixCodedBytes(number.intValue(), 0, bytes);
+ }
+
+ @Override
+ public Number toNumber(BytesRef indexForm) {
+ return NumericUtils.prefixCodedToInt(indexForm);
+ }
+ },
+ LONG(64, false, SortField.Type.LONG, Long.MIN_VALUE, Long.MAX_VALUE) {
+ @Override
+ public long toLong(BytesRef indexForm) {
+ return NumericUtils.prefixCodedToLong(indexForm);
+ }
+
+ @Override
+ public void toIndexForm(Number number, BytesRef bytes) {
+ NumericUtils.longToPrefixCodedBytes(number.longValue(), 0, bytes);
+ }
+
+ @Override
+ public Number toNumber(BytesRef indexForm) {
+ return NumericUtils.prefixCodedToLong(indexForm);
+ }
+ },
+ FLOAT(32, true, SortField.Type.FLOAT, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY) {
+ @Override
+ public double toDouble(BytesRef indexForm) {
+ return NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(indexForm));
+ }
+
+ @Override
+ public void toIndexForm(Number number, BytesRef bytes) {
+ NumericUtils.intToPrefixCodedBytes(NumericUtils.floatToSortableInt(number.floatValue()), 0, bytes);
+ }
+
+ @Override
+ public Number toNumber(BytesRef indexForm) {
+ return NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(indexForm));
+ }
+ },
+ DOUBLE(64, true, SortField.Type.DOUBLE, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) {
+ @Override
+ public double toDouble(BytesRef indexForm) {
+ return NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(indexForm));
+ }
+
+ @Override
+ public void toIndexForm(Number number, BytesRef bytes) {
+ NumericUtils.longToPrefixCodedBytes(NumericUtils.doubleToSortableLong(number.doubleValue()), 0, bytes);
+ }
+
+ @Override
+ public Number toNumber(BytesRef indexForm) {
+ return NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(indexForm));
+ }
+ };
+
+ private final int requiredBits;
+ private final boolean floatingPoint;
+ private final SortField.Type type;
+ private final Number minValue, maxValue;
+
+ private NumericType(int requiredBits, boolean floatingPoint, SortField.Type type, Number minValue, Number maxValue) {
+ this.requiredBits = requiredBits;
+ this.floatingPoint = floatingPoint;
+ this.type = type;
+ this.minValue = minValue;
+ this.maxValue = maxValue;
+ }
+
+ public final SortField.Type sortFieldType() {
+ return type;
+ }
+
+ public final Number minValue() {
+ return minValue;
+ }
+
+ public final Number maxValue() {
+ return maxValue;
+ }
+
+ public final boolean isFloatingPoint() {
+ return floatingPoint;
+ }
+
+ public final int requiredBits() {
+ return requiredBits;
+ }
+
+ public abstract void toIndexForm(Number number, BytesRef bytes);
+
+ public long toLong(BytesRef indexForm) {
+ return (long) toDouble(indexForm);
+ }
+
+ public double toDouble(BytesRef indexForm) {
+ return (double) toLong(indexForm);
+ }
+
+ public abstract Number toNumber(BytesRef indexForm);
+
+ public final TermsEnum wrapTermsEnum(TermsEnum termsEnum) {
+ if (requiredBits() > 32) {
+ return OrdinalsBuilder.wrapNumeric64Bit(termsEnum);
+ } else {
+ return OrdinalsBuilder.wrapNumeric32Bit(termsEnum);
+ }
+ }
+ }
+
+ NumericType getNumericType();
+
+ /**
+ * Loads the atomic field data for the reader, possibly cached.
+ */
+ FD load(AtomicReaderContext context);
+
+ /**
+ * Loads directly the atomic field data for the reader, ignoring any caching involved.
+ */
+ FD loadDirect(AtomicReaderContext context) throws Exception;
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/LongValues.java b/src/main/java/org/elasticsearch/index/fielddata/LongValues.java
new file mode 100644
index 0000000..eb94f36
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/LongValues.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+
+/**
+ * A state-full lightweight per document set of <code>long</code> values.
+ *
+ * To iterate over values in a document use the following pattern:
+ * <pre>
+ * LongValues values = ..;
+ * final int numValues = values.setDocId(docId);
+ * for (int i = 0; i < numValues; i++) {
+ * long value = values.nextValue();
+ * // process value
+ * }
+ * </pre>
+ *
+ */
+public abstract class LongValues {
+
+ /**
+ * An empty {@link LongValues instance}
+ */
+ public static final LongValues EMPTY = new Empty();
+
+ private final boolean multiValued;
+
+ protected int docId;
+
+ /**
+ * Creates a new {@link LongValues} instance
+ * @param multiValued <code>true</code> iff this instance is multivalued. Otherwise <code>false</code>.
+ */
+ protected LongValues(boolean multiValued) {
+ this.multiValued = multiValued;
+ }
+
+ /**
+ * Is one of the documents in this field data values is multi valued?
+ */
+ public final boolean isMultiValued() {
+ return multiValued;
+ }
+
+ /**
+ * Sets iteration to the specified docID and returns the number of
+ * values for this document ID,
+ * @param docId document ID
+ *
+ * @see #nextValue()
+ */
+ public abstract int setDocument(int docId);
+
+ /**
+ * Returns the next value for the current docID set to {@link #setDocument(int)}.
+ * This method should only be called <tt>N</tt> times where <tt>N</tt> is the number
+ * returned from {@link #setDocument(int)}. If called more than <tt>N</tt> times the behavior
+ * is undefined.
+ * <p>
+ * If this instance returns ordered values the <tt>Nth</tt> value is strictly less than the <tt>N+1</tt> value with
+ * respect to the {@link AtomicFieldData.Order} returned from {@link #getOrder()}. If this instance returns
+ * <i>unordered</i> values {@link #getOrder()} must return {@link AtomicFieldData.Order#NONE}
+ * Note: the values returned are de-duplicated, only unique values are returned.
+ * </p>
+ *
+ * @return the next value for the current docID set to {@link #setDocument(int)}.
+ */
+ public abstract long nextValue();
+
+ /**
+ * Returns the order the values are returned from {@link #nextValue()}.
+ * <p> Note: {@link LongValues} have {@link AtomicFieldData.Order#NUMERIC} by default.</p>
+ */
+ public AtomicFieldData.Order getOrder() {
+ return AtomicFieldData.Order.NUMERIC;
+ }
+
+ /**
+ * Ordinal based {@link LongValues}.
+ */
+ public static abstract class WithOrdinals extends LongValues {
+
+ protected final Docs ordinals;
+
+ protected WithOrdinals(Ordinals.Docs ordinals) {
+ super(ordinals.isMultiValued());
+ this.ordinals = ordinals;
+ }
+
+ /**
+ * Returns the associated ordinals instance.
+ * @return the associated ordinals instance.
+ */
+ public Docs ordinals() {
+ return this.ordinals;
+ }
+
+ /**
+ * Returns the value for the given ordinal.
+ * @param ord the ordinal to lookup.
+ * @return a long value associated with the given ordinal.
+ */
+ public abstract long getValueByOrd(long ord);
+
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return ordinals.setDocument(docId);
+ }
+
+ @Override
+ public long nextValue() {
+ return getValueByOrd(ordinals.nextOrd());
+ }
+ }
+
+
+ private static final class Empty extends LongValues {
+
+ public Empty() {
+ super(false);
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return 0;
+ }
+
+ @Override
+ public long nextValue() {
+ throw new ElasticsearchIllegalStateException("Empty LongValues has no next value");
+ }
+
+ }
+
+ /** Wrap a {@link DoubleValues} instance. */
+ public static LongValues asLongValues(final DoubleValues values) {
+ return new LongValues(values.isMultiValued()) {
+
+ @Override
+ public int setDocument(int docId) {
+ return values.setDocument(docId);
+ }
+
+ @Override
+ public long nextValue() {
+ return (long) values.nextValue();
+ }
+
+ };
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/RamAccountingTermsEnum.java b/src/main/java/org/elasticsearch/index/fielddata/RamAccountingTermsEnum.java
new file mode 100644
index 0000000..739054d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/RamAccountingTermsEnum.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.FilteredTermsEnum;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.breaker.MemoryCircuitBreaker;
+import org.elasticsearch.index.fielddata.AbstractIndexFieldData;
+
+import java.io.IOException;
+
+/**
+ * {@link TermsEnum} that takes a MemoryCircuitBreaker, increasing the breaker
+ * every time {@code .next(...)} is called. Proxies all methods to the original
+ * TermsEnum otherwise.
+ */
+public final class RamAccountingTermsEnum extends FilteredTermsEnum {
+
+ // Flush every 1mb
+ private static final long FLUSH_BUFFER_SIZE = 1024 * 1024;
+
+ private final MemoryCircuitBreaker breaker;
+ private final TermsEnum termsEnum;
+ private final AbstractIndexFieldData.PerValueEstimator estimator;
+ private long totalBytes;
+ private long flushBuffer;
+
+
+ public RamAccountingTermsEnum(TermsEnum termsEnum, MemoryCircuitBreaker breaker, AbstractIndexFieldData.PerValueEstimator estimator) {
+ super(termsEnum);
+ this.breaker = breaker;
+ this.termsEnum = termsEnum;
+ this.estimator = estimator;
+ this.totalBytes = 0;
+ this.flushBuffer = 0;
+ }
+
+ /**
+ * Always accept the term.
+ */
+ @Override
+ protected AcceptStatus accept(BytesRef term) throws IOException {
+ return AcceptStatus.YES;
+ }
+
+ /**
+ * Flush the {@code flushBuffer} to the breaker, incrementing the total
+ * bytes and resetting the buffer.
+ */
+ public void flush() {
+ breaker.addEstimateBytesAndMaybeBreak(this.flushBuffer);
+ this.totalBytes += this.flushBuffer;
+ this.flushBuffer = 0;
+ }
+
+ /**
+ * Proxy to the original next() call, but estimates the overhead of
+ * loading the next term.
+ */
+ @Override
+ public BytesRef next() throws IOException {
+ BytesRef term = termsEnum.next();
+ if (term == null && this.flushBuffer != 0) {
+ // We have reached the end of the termsEnum, flush the buffer
+ flush();
+ } else {
+ this.flushBuffer += estimator.bytesPerValue(term);
+ if (this.flushBuffer >= FLUSH_BUFFER_SIZE) {
+ flush();
+ }
+ }
+ return term;
+ }
+
+ /**
+ * @return the total number of bytes that have been aggregated
+ */
+ public long getTotalBytes() {
+ return this.totalBytes;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java
new file mode 100644
index 0000000..fcdd5b4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java
@@ -0,0 +1,399 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.util.*;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.util.SlicedDoubleList;
+import org.elasticsearch.common.util.SlicedLongList;
+import org.elasticsearch.common.util.SlicedObjectList;
+import org.joda.time.DateTimeZone;
+import org.joda.time.MutableDateTime;
+
+import java.util.List;
+
+/**
+ * Script level doc values, the assumption is that any implementation will implement a <code>getValue</code>
+ * and a <code>getValues</code> that return the relevant type that then can be used in scripts.
+ */
+public abstract class ScriptDocValues {
+
+ public static final Longs EMPTY_LONGS = new Longs(LongValues.EMPTY);
+ public static final Doubles EMPTY_DOUBLES = new Doubles(DoubleValues.EMPTY);
+ public static final GeoPoints EMPTY_GEOPOINTS = new GeoPoints(GeoPointValues.EMPTY);
+ public static final Strings EMPTY_STRINGS = new Strings(BytesValues.EMPTY);
+ protected int docId;
+ protected boolean listLoaded = false;
+
+ public void setNextDocId(int docId) {
+ this.docId = docId;
+ this.listLoaded = false;
+ }
+
+ public abstract boolean isEmpty();
+
+ public abstract List<?> getValues();
+
+ public final static class Strings extends ScriptDocValues {
+
+ private final BytesValues values;
+ private final CharsRef spare = new CharsRef();
+ private SlicedObjectList<String> list;
+
+ public Strings(BytesValues values) {
+ this.values = values;
+ list = new SlicedObjectList<String>(values.isMultiValued() ? new String[10] : new String[1]) {
+
+ @Override
+ public void grow(int newLength) {
+ assert offset == 0; // NOTE: senseless if offset != 0
+ if (values.length >= newLength) {
+ return;
+ }
+ final String[] current = values;
+ values = new String[ArrayUtil.oversize(newLength, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+ System.arraycopy(current, 0, values, 0, current.length);
+ }
+ };
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return values.setDocument(docId) == 0;
+ }
+
+ public BytesValues getInternalValues() {
+ return this.values;
+ }
+
+ public BytesRef getBytesValue() {
+ int numValues = values.setDocument(docId);
+ if (numValues == 0) {
+ return null;
+ }
+ return values.nextValue();
+ }
+
+ public String getValue() {
+ String value = null;
+ if (values.setDocument(docId) > 0) {
+ UnicodeUtil.UTF8toUTF16(values.nextValue(), spare);
+ value = spare.toString();
+ }
+ return value;
+ }
+
+ public List<String> getValues() {
+ if (!listLoaded) {
+ final int numValues = values.setDocument(docId);
+ list.offset = 0;
+ list.grow(numValues);
+ list.length = numValues;
+ for (int i = 0; i < numValues; i++) {
+ BytesRef next = values.nextValue();
+ UnicodeUtil.UTF8toUTF16(next, spare);
+ list.values[i] = spare.toString();
+ }
+ listLoaded = true;
+ }
+ return list;
+ }
+
+ }
+
+ public static class Longs extends ScriptDocValues {
+
+ private final LongValues values;
+ private final MutableDateTime date = new MutableDateTime(0, DateTimeZone.UTC);
+ private final SlicedLongList list;
+
+ public Longs(LongValues values) {
+ this.values = values;
+ this.list = new SlicedLongList(values.isMultiValued() ? 10 : 1);
+ }
+
+ public LongValues getInternalValues() {
+ return this.values;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return values.setDocument(docId) == 0;
+ }
+
+ public long getValue() {
+ int numValues = values.setDocument(docId);
+ if (numValues == 0) {
+ return 0l;
+ }
+ return values.nextValue();
+ }
+
+ public List<Long> getValues() {
+ if (!listLoaded) {
+ final int numValues = values.setDocument(docId);
+ list.offset = 0;
+ list.grow(numValues);
+ list.length = numValues;
+ for (int i = 0; i < numValues; i++) {
+ list.values[i] = values.nextValue();
+ }
+ listLoaded = true;
+ }
+ return list;
+ }
+
+ public MutableDateTime getDate() {
+ date.setMillis(getValue());
+ return date;
+ }
+
+ }
+
+ public static class Doubles extends ScriptDocValues {
+
+ private final DoubleValues values;
+ private final SlicedDoubleList list;
+
+ public Doubles(DoubleValues values) {
+ this.values = values;
+ this.list = new SlicedDoubleList(values.isMultiValued() ? 10 : 1);
+
+ }
+
+ public DoubleValues getInternalValues() {
+ return this.values;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return values.setDocument(docId) == 0;
+ }
+
+
+ public double getValue() {
+ int numValues = values.setDocument(docId);
+ if (numValues == 0) {
+ return 0d;
+ }
+ return values.nextValue();
+ }
+
+ public List<Double> getValues() {
+ if (!listLoaded) {
+ int numValues = values.setDocument(docId);
+ list.offset = 0;
+ list.grow(numValues);
+ list.length = numValues;
+ for (int i = 0; i < numValues; i++) {
+ list.values[i] = values.nextValue();
+ }
+ listLoaded = true;
+ }
+ return list;
+ }
+ }
+
+ public static class GeoPoints extends ScriptDocValues {
+
+ private final GeoPointValues values;
+ private final SlicedObjectList<GeoPoint> list;
+
+ public GeoPoints(GeoPointValues values) {
+ this.values = values;
+ list = new SlicedObjectList<GeoPoint>(values.isMultiValued() ? new GeoPoint[10] : new GeoPoint[1]) {
+
+ @Override
+ public void grow(int newLength) {
+ assert offset == 0; // NOTE: senseless if offset != 0
+ if (values.length >= newLength) {
+ return;
+ }
+ final GeoPoint[] current = values;
+ values = new GeoPoint[ArrayUtil.oversize(newLength, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+ System.arraycopy(current, 0, values, 0, current.length);
+ }
+ };
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return values.setDocument(docId) == 0;
+ }
+
+ public GeoPoint getValue() {
+ int numValues = values.setDocument(docId);
+ if (numValues == 0) {
+ return null;
+ }
+ return values.nextValue();
+ }
+
+ public double getLat() {
+ return getValue().lat();
+ }
+
+ public double[] getLats() {
+ List<GeoPoint> points = getValues();
+ double[] lats = new double[points.size()];
+ for (int i = 0; i < points.size(); i++) {
+ lats[i] = points.get(i).lat();
+ }
+ return lats;
+ }
+
+ public double[] getLons() {
+ List<GeoPoint> points = getValues();
+ double[] lons = new double[points.size()];
+ for (int i = 0; i < points.size(); i++) {
+ lons[i] = points.get(i).lon();
+ }
+ return lons;
+ }
+
+ public double getLon() {
+ return getValue().lon();
+ }
+
+ public List<GeoPoint> getValues() {
+ if (!listLoaded) {
+ int numValues = values.setDocument(docId);
+ list.offset = 0;
+ list.grow(numValues);
+ list.length = numValues;
+ for (int i = 0; i < numValues; i++) {
+ GeoPoint next = values.nextValue();
+ GeoPoint point = list.values[i];
+ if (point == null) {
+ point = list.values[i] = new GeoPoint();
+ }
+ point.reset(next.lat(), next.lon());
+ list.values[i] = point;
+ }
+ listLoaded = true;
+ }
+ return list;
+
+ }
+
+ public double factorDistance(double lat, double lon) {
+ GeoPoint point = getValue();
+ return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
+ }
+
+ public double factorDistanceWithDefault(double lat, double lon, double defaultValue) {
+ if (isEmpty()) {
+ return defaultValue;
+ }
+ GeoPoint point = getValue();
+ return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
+ }
+
+ public double factorDistance02(double lat, double lon) {
+ GeoPoint point = getValue();
+ return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT) + 1;
+ }
+
+ public double factorDistance13(double lat, double lon) {
+ GeoPoint point = getValue();
+ return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT) + 2;
+ }
+
+ public double arcDistance(double lat, double lon) {
+ GeoPoint point = getValue();
+ return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
+ }
+
+ public double arcDistanceWithDefault(double lat, double lon, double defaultValue) {
+ if (isEmpty()) {
+ return defaultValue;
+ }
+ GeoPoint point = getValue();
+ return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
+ }
+
+ public double arcDistanceInKm(double lat, double lon) {
+ GeoPoint point = getValue();
+ return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
+ }
+
+ public double arcDistanceInKmWithDefault(double lat, double lon, double defaultValue) {
+ if (isEmpty()) {
+ return defaultValue;
+ }
+ GeoPoint point = getValue();
+ return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
+ }
+
+ public double arcDistanceInMiles(double lat, double lon) {
+ GeoPoint point = getValue();
+ return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
+ }
+
+ public double arcDistanceInMilesWithDefault(double lat, double lon, double defaultValue) {
+ if (isEmpty()) {
+ return defaultValue;
+ }
+ GeoPoint point = getValue();
+ return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
+ }
+
+ public double distance(double lat, double lon) {
+ GeoPoint point = getValue();
+ return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
+ }
+
+ public double distanceWithDefault(double lat, double lon, double defaultValue) {
+ if (isEmpty()) {
+ return defaultValue;
+ }
+ GeoPoint point = getValue();
+ return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
+ }
+
+ public double distanceInKm(double lat, double lon) {
+ GeoPoint point = getValue();
+ return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
+ }
+
+ public double distanceInKmWithDefault(double lat, double lon, double defaultValue) {
+ if (isEmpty()) {
+ return defaultValue;
+ }
+ GeoPoint point = getValue();
+ return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
+ }
+
+ public double distanceInMiles(double lat, double lon) {
+ GeoPoint point = getValue();
+ return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
+ }
+
+ public double distanceInMilesWithDefault(double lat, double lon, double defaultValue) {
+ if (isEmpty()) {
+ return defaultValue;
+ }
+ GeoPoint point = getValue();
+ return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java
new file mode 100644
index 0000000..a2b2c1f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.fielddata.plain.PackedArrayAtomicFieldData;
+import org.elasticsearch.index.fielddata.plain.PagedBytesAtomicFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ */
+public class ShardFieldData extends AbstractIndexShardComponent implements IndexFieldDataCache.Listener {
+
+ final CounterMetric evictionsMetric = new CounterMetric();
+ final CounterMetric totalMetric = new CounterMetric();
+
+ final ConcurrentMap<String, CounterMetric> perFieldTotals = ConcurrentCollections.newConcurrentMap();
+
+ @Inject
+ public ShardFieldData(ShardId shardId, @IndexSettings Settings indexSettings) {
+ super(shardId, indexSettings);
+ }
+
+ public FieldDataStats stats(String... fields) {
+ ObjectLongOpenHashMap<String> fieldTotals = null;
+ if (fields != null && fields.length > 0) {
+ fieldTotals = new ObjectLongOpenHashMap<String>();
+ for (Map.Entry<String, CounterMetric> entry : perFieldTotals.entrySet()) {
+ for (String field : fields) {
+ if (Regex.simpleMatch(field, entry.getKey())) {
+ fieldTotals.put(entry.getKey(), entry.getValue().count());
+ }
+ }
+ }
+ }
+ return new FieldDataStats(totalMetric.count(), evictionsMetric.count(), fieldTotals);
+ }
+
+ @Override
+ public void onLoad(FieldMapper.Names fieldNames, FieldDataType fieldDataType, AtomicFieldData fieldData) {
+ long sizeInBytes = fieldData.getMemorySizeInBytes();
+
+ totalMetric.inc(sizeInBytes);
+
+ String keyFieldName = fieldNames.indexName();
+ CounterMetric total = perFieldTotals.get(keyFieldName);
+ if (total != null) {
+ total.inc(sizeInBytes);
+ } else {
+ total = new CounterMetric();
+ total.inc(sizeInBytes);
+ CounterMetric prev = perFieldTotals.putIfAbsent(keyFieldName, total);
+ if (prev != null) {
+ prev.inc(sizeInBytes);
+ }
+ }
+ }
+
+ @Override
+ public void onUnload(FieldMapper.Names fieldNames, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes, @Nullable AtomicFieldData fieldData) {
+ if (wasEvicted) {
+ evictionsMetric.inc();
+ }
+ if (sizeInBytes != -1) {
+ totalMetric.dec(sizeInBytes);
+
+ String keyFieldName = fieldNames.indexName();
+ CounterMetric total = perFieldTotals.get(keyFieldName);
+ if (total != null) {
+ total.dec(sizeInBytes);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/ShardFieldDataModule.java b/src/main/java/org/elasticsearch/index/fielddata/ShardFieldDataModule.java
new file mode 100644
index 0000000..c647056
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/ShardFieldDataModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ */
+public class ShardFieldDataModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(ShardFieldData.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java
new file mode 100644
index 0000000..d8b2eb2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+
+import java.io.IOException;
+
+/**
+ */
+public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparatorSource {
+
+ /** UTF-8 term containing a single code point: {@link Character#MAX_CODE_POINT} which will compare greater than all other index terms
+ * since {@link Character#MAX_CODE_POINT} is a noncharacter and thus shouldn't appear in an index term. */
+ public static final BytesRef MAX_TERM;
+ static {
+ MAX_TERM = new BytesRef();
+ final char[] chars = Character.toChars(Character.MAX_CODE_POINT);
+ UnicodeUtil.UTF16toUTF8(chars, 0, chars.length, MAX_TERM);
+ }
+
+ private final IndexFieldData<?> indexFieldData;
+ private final SortMode sortMode;
+ private final Object missingValue;
+
+ public BytesRefFieldComparatorSource(IndexFieldData<?> indexFieldData, Object missingValue, SortMode sortMode) {
+ this.indexFieldData = indexFieldData;
+ this.sortMode = sortMode;
+ this.missingValue = missingValue;
+ }
+
+ @Override
+ public SortField.Type reducedType() {
+ return SortField.Type.STRING;
+ }
+
+ @Override
+ public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+ assert fieldname.equals(indexFieldData.getFieldNames().indexName());
+ final BytesRef missingBytes = (BytesRef) missingObject(missingValue, reversed);
+
+ if (indexFieldData.valuesOrdered() && indexFieldData instanceof IndexFieldData.WithOrdinals) {
+ return new BytesRefOrdValComparator((IndexFieldData.WithOrdinals<?>) indexFieldData, numHits, sortMode, missingBytes);
+ }
+ return new BytesRefValComparator(indexFieldData, numHits, sortMode, missingBytes);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefOrdValComparator.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefOrdValComparator.java
new file mode 100644
index 0000000..a5aa086
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefOrdValComparator.java
@@ -0,0 +1,386 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+
+import java.io.IOException;
+
+/**
+ * Sorts by field's natural Term sort order, using
+ * ordinals. This is functionally equivalent to {@link
+ * org.apache.lucene.search.FieldComparator.TermValComparator}, but it first resolves the string
+ * to their relative ordinal positions (using the index
+ * returned by {@link org.apache.lucene.search.FieldCache#getTermsIndex}), and
+ * does most comparisons using the ordinals. For medium
+ * to large results, this comparator will be much faster
+ * than {@link org.apache.lucene.search.FieldComparator.TermValComparator}. For very small
+ * result sets it may be slower.
+ *
+ * Internally this comparator multiplies ordinals by 4 so that virtual ordinals can be inserted in-between the original field data ordinals.
+ * Thanks to this, an ordinal for the missing value and the bottom value can be computed and all ordinals are directly comparable. For example,
+ * if the field data ordinals are (a,1), (b,2) and (c,3), they will be internally stored as (a,4), (b,8), (c,12). Then the ordinal for the
+ * missing value will be computed by binary searching. For example, if the missing value is 'ab', it will be assigned 6 as an ordinal (between
+ * 'a' and 'b'. And if the bottom value is 'ac', it will be assigned 7 as an ordinal (between 'ab' and 'b').
+ */
+public final class BytesRefOrdValComparator extends NestedWrappableComparator<BytesRef> {
+
+ final IndexFieldData.WithOrdinals<?> indexFieldData;
+ final BytesRef missingValue;
+
+ /* Ords for each slot, times 4.
+ @lucene.internal */
+ final long[] ords;
+
+ final SortMode sortMode;
+
+ /* Values for each slot.
+ @lucene.internal */
+ final BytesRef[] values;
+
+ /* Which reader last copied a value into the slot. When
+ we compare two slots, we just compare-by-ord if the
+ readerGen is the same; else we must compare the
+ values (slower).
+ @lucene.internal */
+ final int[] readerGen;
+
+ /* Gen of current reader we are on.
+ @lucene.internal */
+ int currentReaderGen = -1;
+
+ /* Current reader's doc ord/values.
+ @lucene.internal */
+ BytesValues.WithOrdinals termsIndex;
+ long missingOrd;
+
+ /* Bottom slot, or -1 if queue isn't full yet
+ @lucene.internal */
+ int bottomSlot = -1;
+
+ /* Bottom ord (same as ords[bottomSlot] once bottomSlot
+ is set). Cached for faster compares.
+ @lucene.internal */
+ long bottomOrd;
+
+ final BytesRef tempBR = new BytesRef();
+
+ public BytesRefOrdValComparator(IndexFieldData.WithOrdinals<?> indexFieldData, int numHits, SortMode sortMode, BytesRef missingValue) {
+ this.indexFieldData = indexFieldData;
+ this.sortMode = sortMode;
+ this.missingValue = missingValue;
+ ords = new long[numHits];
+ values = new BytesRef[numHits];
+ readerGen = new int[numHits];
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ if (readerGen[slot1] == readerGen[slot2]) {
+ return LongValuesComparator.compare(ords[slot1], ords[slot2]);
+ }
+
+ final BytesRef val1 = values[slot1];
+ final BytesRef val2 = values[slot2];
+ if (val1 == null) {
+ if (val2 == null) {
+ return 0;
+ }
+ return -1;
+ } else if (val2 == null) {
+ return 1;
+ }
+ return val1.compareTo(val2);
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int compareBottomMissing() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void missing(int slot) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int compareDocToValue(int doc, BytesRef value) {
+ throw new UnsupportedOperationException();
+ }
+
+ class PerSegmentComparator extends NestedWrappableComparator<BytesRef> {
+ final Ordinals.Docs readerOrds;
+ final BytesValues.WithOrdinals termsIndex;
+
+ public PerSegmentComparator(BytesValues.WithOrdinals termsIndex) {
+ this.readerOrds = termsIndex.ordinals();
+ this.termsIndex = termsIndex;
+ if (readerOrds.getNumOrds() > Long.MAX_VALUE / 4) {
+ throw new IllegalStateException("Current terms index pretends it has more than " + (Long.MAX_VALUE / 4) + " ordinals, which is unsupported by this impl");
+ }
+ }
+
+ @Override
+ public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
+ return BytesRefOrdValComparator.this.setNextReader(context);
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ return BytesRefOrdValComparator.this.compare(slot1, slot2);
+ }
+
+ @Override
+ public void setBottom(final int bottom) {
+ BytesRefOrdValComparator.this.setBottom(bottom);
+ }
+
+ @Override
+ public BytesRef value(int slot) {
+ return BytesRefOrdValComparator.this.value(slot);
+ }
+
+ @Override
+ public int compareValues(BytesRef val1, BytesRef val2) {
+ if (val1 == null) {
+ if (val2 == null) {
+ return 0;
+ }
+ return -1;
+ } else if (val2 == null) {
+ return 1;
+ }
+ return val1.compareTo(val2);
+ }
+
+ @Override
+ public int compareDocToValue(int doc, BytesRef value) {
+ final long ord = getOrd(doc);
+ final BytesRef docValue = ord == Ordinals.MISSING_ORDINAL ? missingValue : termsIndex.getValueByOrd(ord);
+ return compareValues(docValue, value);
+ }
+
+ protected long getOrd(int doc) {
+ return readerOrds.getOrd(doc);
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ assert bottomSlot != -1;
+ final long docOrd = getOrd(doc);
+ final long comparableOrd = docOrd == Ordinals.MISSING_ORDINAL ? missingOrd : docOrd << 2;
+ return LongValuesComparator.compare(bottomOrd, comparableOrd);
+ }
+
+ @Override
+ public int compareBottomMissing() {
+ assert bottomSlot != -1;
+ return LongValuesComparator.compare(bottomOrd, missingOrd);
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ final long ord = getOrd(doc);
+ if (ord == Ordinals.MISSING_ORDINAL) {
+ ords[slot] = missingOrd;
+ values[slot] = missingValue;
+ } else {
+ assert ord > 0;
+ ords[slot] = ord << 2;
+ if (values[slot] == null || values[slot] == missingValue) {
+ values[slot] = new BytesRef();
+ }
+ values[slot].copyBytes(termsIndex.getValueByOrd(ord));
+ }
+ readerGen[slot] = currentReaderGen;
+ }
+
+ @Override
+ public void missing(int slot) {
+ ords[slot] = missingOrd;
+ values[slot] = missingValue;
+ }
+ }
+
+ // for assertions
+ private boolean consistentInsertedOrd(BytesValues.WithOrdinals termsIndex, long ord, BytesRef value) {
+ assert ord >= 0 : ord;
+ assert (ord == 0) == (value == null) : "ord=" + ord + ", value=" + value;
+ final long previousOrd = ord >>> 2;
+ final long nextOrd = previousOrd + 1;
+ final BytesRef previous = previousOrd == 0 ? null : termsIndex.getValueByOrd(previousOrd);
+ if ((ord & 3) == 0) { // there was an existing ord with the inserted value
+ assert compareValues(previous, value) == 0;
+ } else {
+ assert compareValues(previous, value) < 0;
+ }
+ if (nextOrd < termsIndex.ordinals().getMaxOrd()) {
+ final BytesRef next = termsIndex.getValueByOrd(nextOrd);
+ assert compareValues(value, next) < 0;
+ }
+ return true;
+ }
+
+ // find where to insert an ord in the current terms index
+ private long ordInCurrentReader(BytesValues.WithOrdinals termsIndex, BytesRef value) {
+ final long docOrd = binarySearch(termsIndex, value);
+ assert docOrd != -1; // would mean smaller than null
+ final long ord;
+ if (docOrd >= 0) {
+ // value exists in the current segment
+ ord = docOrd << 2;
+ } else {
+ // value doesn't exist, use the ord between the previous and the next term
+ ord = ((-2 - docOrd) << 2) + 2;
+ }
+ assert (ord & 1) == 0;
+ return ord;
+ }
+
+ @Override
+ public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
+ termsIndex = indexFieldData.load(context).getBytesValues(false);
+ assert termsIndex.ordinals() != null && termsIndex.ordinals().ordinals() != null;
+ if (missingValue == null) {
+ missingOrd = Ordinals.MISSING_ORDINAL;
+ } else {
+ missingOrd = ordInCurrentReader(termsIndex, missingValue);
+ assert consistentInsertedOrd(termsIndex, missingOrd, missingValue);
+ }
+ FieldComparator<BytesRef> perSegComp = null;
+ assert termsIndex.ordinals() != null && termsIndex.ordinals().ordinals() != null;
+ if (termsIndex.isMultiValued()) {
+ perSegComp = new PerSegmentComparator(termsIndex) {
+ @Override
+ protected long getOrd(int doc) {
+ return getRelevantOrd(readerOrds, doc, sortMode);
+ }
+ };
+ } else {
+ perSegComp = new PerSegmentComparator(termsIndex);
+ }
+ currentReaderGen++;
+ if (bottomSlot != -1) {
+ perSegComp.setBottom(bottomSlot);
+ }
+ return perSegComp;
+ }
+
+ @Override
+ public void setBottom(final int bottom) {
+ bottomSlot = bottom;
+ final BytesRef bottomValue = values[bottomSlot];
+
+ if (bottomValue == null) {
+ bottomOrd = Ordinals.MISSING_ORDINAL;
+ } else if (currentReaderGen == readerGen[bottomSlot]) {
+ bottomOrd = ords[bottomSlot];
+ } else {
+ // insert an ord
+ bottomOrd = ordInCurrentReader(termsIndex, bottomValue);
+ if (bottomOrd == missingOrd) {
+ // bottomValue and missingValue and in-between the same field data values -> tie-break
+ // this is why we multiply ords by 4
+ assert missingValue != null;
+ final int cmp = bottomValue.compareTo(missingValue);
+ if (cmp < 0) {
+ --bottomOrd;
+ } else if (cmp > 0) {
+ ++bottomOrd;
+ }
+ }
+ assert consistentInsertedOrd(termsIndex, bottomOrd, bottomValue);
+ }
+ readerGen[bottomSlot] = currentReaderGen;
+ }
+
+ @Override
+ public BytesRef value(int slot) {
+ return values[slot];
+ }
+
+ final protected static long binarySearch(BytesValues.WithOrdinals a, BytesRef key) {
+ return binarySearch(a, key, 1, a.ordinals().getNumOrds());
+ }
+
+ final protected static long binarySearch(BytesValues.WithOrdinals a, BytesRef key, long low, long high) {
+ assert low != Ordinals.MISSING_ORDINAL;
+ assert high == Ordinals.MISSING_ORDINAL || (a.getValueByOrd(high) == null | a.getValueByOrd(high) != null); // make sure we actually can get these values
+ assert low == high + 1 || a.getValueByOrd(low) == null | a.getValueByOrd(low) != null;
+ while (low <= high) {
+ long mid = (low + high) >>> 1;
+ BytesRef midVal = a.getValueByOrd(mid);
+ int cmp;
+ if (midVal != null) {
+ cmp = midVal.compareTo(key);
+ } else {
+ cmp = -1;
+ }
+
+ if (cmp < 0)
+ low = mid + 1;
+ else if (cmp > 0)
+ high = mid - 1;
+ else
+ return mid;
+ }
+ return -(low + 1);
+ }
+
+ static long getRelevantOrd(Ordinals.Docs readerOrds, int docId, SortMode sortMode) {
+ int length = readerOrds.setDocument(docId);
+ long relevantVal = sortMode.startLong();
+ long result = 0;
+ assert sortMode == SortMode.MAX || sortMode == SortMode.MIN;
+ for (int i = 0; i < length; i++) {
+ result = relevantVal = sortMode.apply(readerOrds.nextOrd(), relevantVal);
+ }
+ assert result >= 0;
+ assert result <= readerOrds.getMaxOrd();
+ return result;
+ // Enable this when the api can tell us that the ords per doc are ordered
+ /*if (reversed) {
+ IntArrayRef ref = readerOrds.getOrds(docId);
+ if (ref.isEmpty()) {
+ return 0;
+ } else {
+ return ref.values[ref.end - 1]; // last element is the highest value.
+ }
+ } else {
+ return readerOrds.getOrd(docId); // returns the lowest value
+ }*/
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefValComparator.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefValComparator.java
new file mode 100644
index 0000000..67b4d70
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefValComparator.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+
+import java.io.IOException;
+
+/**
+ * Sorts by field's natural Term sort order. All
+ * comparisons are done using BytesRef.compareTo, which is
+ * slow for medium to large result sets but possibly
+ * very fast for very small results sets.
+ */
+public final class BytesRefValComparator extends NestedWrappableComparator<BytesRef> {
+
+ private final IndexFieldData<?> indexFieldData;
+ private final SortMode sortMode;
+ private final BytesRef missingValue;
+
+ private final BytesRef[] values;
+ private BytesRef bottom;
+ private BytesValues docTerms;
+
+ BytesRefValComparator(IndexFieldData<?> indexFieldData, int numHits, SortMode sortMode, BytesRef missingValue) {
+ this.sortMode = sortMode;
+ values = new BytesRef[numHits];
+ this.indexFieldData = indexFieldData;
+ this.missingValue = missingValue;
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ final BytesRef val1 = values[slot1];
+ final BytesRef val2 = values[slot2];
+ return compareValues(val1, val2);
+ }
+
+ @Override
+ public int compareBottom(int doc) throws IOException {
+ BytesRef val2 = sortMode.getRelevantValue(docTerms, doc, missingValue);
+ return compareValues(bottom, val2);
+ }
+
+ @Override
+ public void copy(int slot, int doc) throws IOException {
+ BytesRef relevantValue = sortMode.getRelevantValue(docTerms, doc, missingValue);
+ if (relevantValue == missingValue) {
+ values[slot] = missingValue;
+ } else {
+ if (values[slot] == null || values[slot] == missingValue) {
+ values[slot] = new BytesRef();
+ }
+ values[slot].copyBytes(relevantValue);
+ }
+ }
+
+ @Override
+ public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
+ docTerms = indexFieldData.load(context).getBytesValues(false);
+ return this;
+ }
+
+ @Override
+ public void setBottom(final int bottom) {
+ this.bottom = values[bottom];
+ }
+
+ @Override
+ public BytesRef value(int slot) {
+ return values[slot];
+ }
+
+ @Override
+ public int compareValues(BytesRef val1, BytesRef val2) {
+ if (val1 == null) {
+ if (val2 == null) {
+ return 0;
+ }
+ return -1;
+ } else if (val2 == null) {
+ return 1;
+ }
+ return val1.compareTo(val2);
+ }
+
+ @Override
+ public int compareDocToValue(int doc, BytesRef value) {
+ return sortMode.getRelevantValue(docTerms, doc, missingValue).compareTo(value);
+ }
+
+ @Override
+ public void missing(int slot) {
+ values[slot] = missingValue;
+ }
+
+ @Override
+ public int compareBottomMissing() {
+ return compareValues(bottom, missingValue);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleScriptDataComparator.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleScriptDataComparator.java
new file mode 100644
index 0000000..07f0dd4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleScriptDataComparator.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SortField;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.script.SearchScript;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+// LUCENE MONITOR: Monitor against FieldComparator.Double
+public class DoubleScriptDataComparator extends NumberComparatorBase<Double> {
+
+ public static IndexFieldData.XFieldComparatorSource comparatorSource(SearchScript script) {
+ return new InnerSource(script);
+ }
+
+ private static class InnerSource extends IndexFieldData.XFieldComparatorSource {
+
+ private final SearchScript script;
+
+ private InnerSource(SearchScript script) {
+ this.script = script;
+ }
+
+ @Override
+ public FieldComparator<? extends Number> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+ return new DoubleScriptDataComparator(numHits, script);
+ }
+
+ @Override
+ public SortField.Type reducedType() {
+ return SortField.Type.DOUBLE;
+ }
+ }
+
+ private final SearchScript script;
+
+ private final double[] values;
+ private double bottom;
+
+ public DoubleScriptDataComparator(int numHits, SearchScript script) {
+ this.script = script;
+ values = new double[numHits];
+ }
+
+ @Override
+ public FieldComparator<Double> setNextReader(AtomicReaderContext context) throws IOException {
+ script.setNextReader(context);
+ return this;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) {
+ script.setScorer(scorer);
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ final double v1 = values[slot1];
+ final double v2 = values[slot2];
+ if (v1 > v2) {
+ return 1;
+ } else if (v1 < v2) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ script.setNextDocId(doc);
+ final double v2 = script.runAsDouble();
+ if (bottom > v2) {
+ return 1;
+ } else if (bottom < v2) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ public int compareDocToValue(int doc, Double val2) throws IOException {
+ script.setNextDocId(doc);
+ double val1 = script.runAsDouble();
+ return Double.compare(val1, val2);
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ script.setNextDocId(doc);
+ values[slot] = script.runAsDouble();
+ }
+
+ @Override
+ public void setBottom(final int bottom) {
+ this.bottom = values[bottom];
+ }
+
+ @Override
+ public Double value(int slot) {
+ return values[slot];
+ }
+
+ @Override
+ public void add(int slot, int doc) {
+ script.setNextDocId(doc);
+ values[slot] += script.runAsDouble();
+ }
+
+ @Override
+ public void divide(int slot, int divisor) {
+ values[slot] /= divisor;
+ }
+
+ @Override
+ public void missing(int slot) {
+ values[slot] = Double.MAX_VALUE;
+ }
+
+ @Override
+ public int compareBottomMissing() {
+ return Double.compare(bottom, Double.MAX_VALUE);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparator.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparator.java
new file mode 100644
index 0000000..6c7dd13
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparator.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+
+import java.io.IOException;
+
+/**
+ */
+public final class DoubleValuesComparator extends DoubleValuesComparatorBase<Double> {
+
+ private final double[] values;
+
+ public DoubleValuesComparator(IndexNumericFieldData<?> indexFieldData, double missingValue, int numHits, SortMode sortMode) {
+ super(indexFieldData, missingValue, sortMode);
+ assert indexFieldData.getNumericType().requiredBits() <= 64;
+ this.values = new double[numHits];
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ final double v1 = values[slot1];
+ final double v2 = values[slot2];
+ return compare(v1, v2);
+ }
+
+ @Override
+ public void setBottom(int slot) {
+ this.bottom = values[slot];
+ }
+
+ @Override
+ public void copy(int slot, int doc) throws IOException {
+ values[slot] = sortMode.getRelevantValue(readerValues, doc, missingValue);
+ }
+
+ @Override
+ public Double value(int slot) {
+ return Double.valueOf(values[slot]);
+ }
+
+ @Override
+ public void add(int slot, int doc) {
+ values[slot] += sortMode.getRelevantValue(readerValues, doc, missingValue);
+ }
+
+ @Override
+ public void divide(int slot, int divisor) {
+ values[slot] /= divisor;
+ }
+
+ @Override
+ public void missing(int slot) {
+ values[slot] = missingValue;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorBase.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorBase.java
new file mode 100644
index 0000000..366778d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorBase.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.FieldComparator;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+
+import java.io.IOException;
+
+abstract class DoubleValuesComparatorBase<T extends Number> extends NumberComparatorBase<T> {
+
+ protected final IndexNumericFieldData<?> indexFieldData;
+ protected final double missingValue;
+ protected double bottom;
+ protected DoubleValues readerValues;
+ protected final SortMode sortMode;
+
+ public DoubleValuesComparatorBase(IndexNumericFieldData<?> indexFieldData, double missingValue, SortMode sortMode) {
+ this.indexFieldData = indexFieldData;
+ this.missingValue = missingValue;
+ this.sortMode = sortMode;
+ }
+
+ @Override
+ public final int compareBottom(int doc) throws IOException {
+ final double v2 = sortMode.getRelevantValue(readerValues, doc, missingValue);
+ return compare(bottom, v2);
+ }
+
+ @Override
+ public final int compareDocToValue(int doc, T valueObj) throws IOException {
+ final double value = valueObj.doubleValue();
+ final double docValue = sortMode.getRelevantValue(readerValues, doc, missingValue);
+ return compare(docValue, value);
+ }
+
+ @Override
+ public final FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException {
+ readerValues = indexFieldData.load(context).getDoubleValues();
+ return this;
+ }
+
+ @Override
+ public int compareBottomMissing() {
+ return compare(bottom, missingValue);
+ }
+
+ static final int compare(double left, double right) {
+ return Double.compare(left, right);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java
new file mode 100644
index 0000000..868c6a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.SortField;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+
+import java.io.IOException;
+
+/**
+ */
+public class DoubleValuesComparatorSource extends IndexFieldData.XFieldComparatorSource {
+
+ private final IndexNumericFieldData<?> indexFieldData;
+ private final Object missingValue;
+ private final SortMode sortMode;
+
+ public DoubleValuesComparatorSource(IndexNumericFieldData<?> indexFieldData, @Nullable Object missingValue, SortMode sortMode) {
+ this.indexFieldData = indexFieldData;
+ this.missingValue = missingValue;
+ this.sortMode = sortMode;
+ }
+
+ @Override
+ public SortField.Type reducedType() {
+ return SortField.Type.DOUBLE;
+ }
+
+ @Override
+ public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+ assert fieldname.equals(indexFieldData.getFieldNames().indexName());
+
+ final double dMissingValue = (Double) missingObject(missingValue, reversed);
+ return new DoubleValuesComparator(indexFieldData, dMissingValue, numHits, sortMode);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparator.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparator.java
new file mode 100644
index 0000000..3b7fedc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparator.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+
+import java.io.IOException;
+
+/**
+ */
+public final class FloatValuesComparator extends DoubleValuesComparatorBase<Float> {
+
+ private final float[] values;
+
+ public FloatValuesComparator(IndexNumericFieldData<?> indexFieldData, float missingValue, int numHits, SortMode sortMode) {
+ super(indexFieldData, missingValue, sortMode);
+ assert indexFieldData.getNumericType().requiredBits() <= 32;
+ this.values = new float[numHits];
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ final float v1 = values[slot1];
+ final float v2 = values[slot2];
+ return Float.compare(v1, v2);
+ }
+
+ @Override
+ public void setBottom(int slot) {
+ this.bottom = values[slot];
+ }
+
+ @Override
+ public void copy(int slot, int doc) throws IOException {
+ values[slot] = (float) sortMode.getRelevantValue(readerValues, doc, missingValue);
+ }
+
+ @Override
+ public Float value(int slot) {
+ return Float.valueOf(values[slot]);
+ }
+
+ @Override
+ public void add(int slot, int doc) {
+ values[slot] += (float) sortMode.getRelevantValue(readerValues, doc, missingValue);
+ }
+
+ @Override
+ public void divide(int slot, int divisor) {
+ values[slot] /= divisor;
+ }
+
+ @Override
+ public void missing(int slot) {
+ values[slot] = (float) missingValue;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java
new file mode 100644
index 0000000..3df74f0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.SortField;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+
+import java.io.IOException;
+
+/**
+ */
+public class FloatValuesComparatorSource extends IndexFieldData.XFieldComparatorSource {
+
+ private final IndexNumericFieldData<?> indexFieldData;
+ private final Object missingValue;
+ private final SortMode sortMode;
+
+ public FloatValuesComparatorSource(IndexNumericFieldData<?> indexFieldData, @Nullable Object missingValue, SortMode sortMode) {
+ this.indexFieldData = indexFieldData;
+ this.missingValue = missingValue;
+ this.sortMode = sortMode;
+ }
+
+ @Override
+ public SortField.Type reducedType() {
+ return SortField.Type.FLOAT;
+ }
+
+ @Override
+ public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+ assert fieldname.equals(indexFieldData.getFieldNames().indexName());
+
+ final float dMissingValue = (Float) missingObject(missingValue, reversed);
+ return new FloatValuesComparator(indexFieldData, dMissingValue, numHits, sortMode);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/GeoDistanceComparator.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/GeoDistanceComparator.java
new file mode 100644
index 0000000..510d7d0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/GeoDistanceComparator.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.FieldComparator;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+
+import java.io.IOException;
+
+/**
+ */
+public class GeoDistanceComparator extends NumberComparatorBase<Double> {
+
+ protected final IndexGeoPointFieldData<?> indexFieldData;
+
+ protected final double lat;
+ protected final double lon;
+ protected final DistanceUnit unit;
+ protected final GeoDistance geoDistance;
+ protected final GeoDistance.FixedSourceDistance fixedSourceDistance;
+ protected final SortMode sortMode;
+ private static final Double MISSING_VALUE = Double.MAX_VALUE;
+
+ private final double[] values;
+ private double bottom;
+
+ private GeoDistanceValues geoDistanceValues;
+
+ public GeoDistanceComparator(int numHits, IndexGeoPointFieldData<?> indexFieldData, double lat, double lon, DistanceUnit unit, GeoDistance geoDistance, SortMode sortMode) {
+ this.values = new double[numHits];
+ this.indexFieldData = indexFieldData;
+ this.lat = lat;
+ this.lon = lon;
+ this.unit = unit;
+ this.geoDistance = geoDistance;
+ this.fixedSourceDistance = geoDistance.fixedSourceDistance(lat, lon, unit);
+ this.sortMode = sortMode;
+ }
+
+ @Override
+ public FieldComparator<Double> setNextReader(AtomicReaderContext context) throws IOException {
+ GeoPointValues readerValues = indexFieldData.load(context).getGeoPointValues();
+ if (readerValues.isMultiValued()) {
+ geoDistanceValues = new MV(readerValues, fixedSourceDistance, sortMode);
+ } else {
+ geoDistanceValues = new SV(readerValues, fixedSourceDistance);
+ }
+ return this;
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ return Double.compare(values[slot1], values[slot2]);
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ final double v2 = geoDistanceValues.computeDistance(doc);
+ return Double.compare(bottom, v2);
+ }
+
+ @Override
+ public int compareDocToValue(int doc, Double distance2) throws IOException {
+ double distance1 = geoDistanceValues.computeDistance(doc);
+ return Double.compare(distance1, distance2);
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ values[slot] = geoDistanceValues.computeDistance(doc);
+ }
+
+ @Override
+ public void setBottom(final int bottom) {
+ this.bottom = values[bottom];
+ }
+
+ @Override
+ public Double value(int slot) {
+ return values[slot];
+ }
+
+ @Override
+ public void add(int slot, int doc) {
+ values[slot] += geoDistanceValues.computeDistance(doc);
+ }
+
+ @Override
+ public void divide(int slot, int divisor) {
+ values[slot] /= divisor;
+ }
+
+ @Override
+ public void missing(int slot) {
+ values[slot] = MISSING_VALUE;
+ }
+
+ @Override
+ public int compareBottomMissing() {
+ return Double.compare(bottom, MISSING_VALUE);
+ }
+
+ // Computes the distance based on geo points.
+ // Due to this abstractions the geo distance comparator doesn't need to deal with whether fields have one
+ // or multiple geo points per document.
+ private static abstract class GeoDistanceValues {
+
+ protected final GeoPointValues readerValues;
+ protected final GeoDistance.FixedSourceDistance fixedSourceDistance;
+
+ protected GeoDistanceValues(GeoPointValues readerValues, GeoDistance.FixedSourceDistance fixedSourceDistance) {
+ this.readerValues = readerValues;
+ this.fixedSourceDistance = fixedSourceDistance;
+ }
+
+ public abstract double computeDistance(int doc);
+
+ }
+
+ // Deals with one geo point per document
+ private static final class SV extends GeoDistanceValues {
+
+ SV(GeoPointValues readerValues, GeoDistance.FixedSourceDistance fixedSourceDistance) {
+ super(readerValues, fixedSourceDistance);
+ }
+
+ @Override
+ public double computeDistance(int doc) {
+ int numValues = readerValues.setDocument(doc);
+ double result = MISSING_VALUE;
+ for (int i = 0; i < numValues; i++) {
+ GeoPoint geoPoint = readerValues.nextValue();
+ return fixedSourceDistance.calculate(geoPoint.lat(), geoPoint.lon());
+ }
+ return MISSING_VALUE;
+ }
+ }
+
+ // Deals with more than one geo point per document
+ private static final class MV extends GeoDistanceValues {
+
+ private final SortMode sortMode;
+
+ MV(GeoPointValues readerValues, GeoDistance.FixedSourceDistance fixedSourceDistance, SortMode sortMode) {
+ super(readerValues, fixedSourceDistance);
+ this.sortMode = sortMode;
+ }
+
+ @Override
+ public double computeDistance(int doc) {
+ final int length = readerValues.setDocument(doc);
+ double distance = sortMode.startDouble();
+ double result = MISSING_VALUE;
+ for (int i = 0; i < length; i++) {
+ GeoPoint point = readerValues.nextValue();
+ result = distance = sortMode.apply(distance, fixedSourceDistance.calculate(point.lat(), point.lon()));
+ }
+ return sortMode.reduce(result, length);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/GeoDistanceComparatorSource.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/GeoDistanceComparatorSource.java
new file mode 100644
index 0000000..e73004d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/GeoDistanceComparatorSource.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.SortField;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+
+import java.io.IOException;
+
+/**
+ */
+public class GeoDistanceComparatorSource extends IndexFieldData.XFieldComparatorSource {
+
+ private final IndexGeoPointFieldData<?> indexFieldData;
+ private final double lat;
+ private final double lon;
+ private final DistanceUnit unit;
+ private final GeoDistance geoDistance;
+ private final SortMode sortMode;
+
+ public GeoDistanceComparatorSource(IndexGeoPointFieldData<?> indexFieldData, double lat, double lon, DistanceUnit unit, GeoDistance geoDistance, SortMode sortMode) {
+ this.indexFieldData = indexFieldData;
+ this.lat = lat;
+ this.lon = lon;
+ this.unit = unit;
+ this.geoDistance = geoDistance;
+ this.sortMode = sortMode;
+ }
+
+ @Override
+ public SortField.Type reducedType() {
+ return SortField.Type.DOUBLE;
+ }
+
+ @Override
+ public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+ assert indexFieldData.getFieldNames().indexName().equals(fieldname);
+ return new GeoDistanceComparator(numHits, indexFieldData, lat, lon, unit, geoDistance, sortMode);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparator.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparator.java
new file mode 100644
index 0000000..7e428a3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparator.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+
+import java.io.IOException;
+
+/**
+ */
+public final class LongValuesComparator extends LongValuesComparatorBase<Long> {
+
+ private final long[] values;
+
+ public LongValuesComparator(IndexNumericFieldData<?> indexFieldData, long missingValue, int numHits, SortMode sortMode) {
+ super(indexFieldData, missingValue, sortMode);
+ this.values = new long[numHits];
+ assert indexFieldData.getNumericType().requiredBits() <= 64;
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ final long v1 = values[slot1];
+ final long v2 = values[slot2];
+ return compare(v1, v2);
+ }
+
+ @Override
+ public void setBottom(int slot) {
+ this.bottom = values[slot];
+ }
+
+ public void copy(int slot, int doc) throws IOException {
+ values[slot] = sortMode.getRelevantValue(readerValues, doc, missingValue);
+ }
+
+ @Override
+ public Long value(int slot) {
+ return Long.valueOf(values[slot]);
+ }
+
+ @Override
+ public void add(int slot, int doc) {
+ values[slot] += sortMode.getRelevantValue(readerValues, doc, missingValue);
+ }
+
+ @Override
+ public void divide(int slot, int divisor) {
+ values[slot] /= divisor;
+ }
+
+ @Override
+ public void missing(int slot) {
+ values[slot] = missingValue;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorBase.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorBase.java
new file mode 100644
index 0000000..c68ec28
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorBase.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.FieldComparator;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.LongValues;
+
+import java.io.IOException;
+
+abstract class LongValuesComparatorBase<T extends Number> extends NumberComparatorBase<T> {
+
+ protected final IndexNumericFieldData<?> indexFieldData;
+ protected final long missingValue;
+ protected long bottom;
+ protected LongValues readerValues;
+ protected final SortMode sortMode;
+
+
+ public LongValuesComparatorBase(IndexNumericFieldData<?> indexFieldData, long missingValue, SortMode sortMode) {
+ this.indexFieldData = indexFieldData;
+ this.missingValue = missingValue;
+ this.sortMode = sortMode;
+ }
+
+ @Override
+ public final int compareBottom(int doc) throws IOException {
+ long v2 = sortMode.getRelevantValue(readerValues, doc, missingValue);
+ return compare(bottom, v2);
+ }
+
+ @Override
+ public final int compareDocToValue(int doc, T valueObj) throws IOException {
+ final long value = valueObj.longValue();
+ long docValue = sortMode.getRelevantValue(readerValues, doc, missingValue);
+ return compare(docValue, value);
+ }
+
+ static final int compare(long left, long right) {
+ if (left > right) {
+ return 1;
+ } else if (left < right) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ public final FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException {
+ readerValues = indexFieldData.load(context).getLongValues();
+ return this;
+ }
+
+ @Override
+ public int compareBottomMissing() {
+ return compare(bottom, missingValue);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java
new file mode 100644
index 0000000..430f6af
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.SortField;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+
+import java.io.IOException;
+
+/**
+ */
+public class LongValuesComparatorSource extends IndexFieldData.XFieldComparatorSource {
+
+ private final IndexNumericFieldData<?> indexFieldData;
+ private final Object missingValue;
+ private final SortMode sortMode;
+
+ public LongValuesComparatorSource(IndexNumericFieldData<?> indexFieldData, @Nullable Object missingValue, SortMode sortMode) {
+ this.indexFieldData = indexFieldData;
+ this.missingValue = missingValue;
+ this.sortMode = sortMode;
+ }
+
+ @Override
+ public SortField.Type reducedType() {
+ return SortField.Type.LONG;
+ }
+
+ @Override
+ public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+ assert fieldname.equals(indexFieldData.getFieldNames().indexName());
+
+ final long dMissingValue = (Long) missingObject(missingValue, reversed);
+ return new LongValuesComparator(indexFieldData, dMissingValue, numHits, sortMode);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/NestedWrappableComparator.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/NestedWrappableComparator.java
new file mode 100644
index 0000000..6eeb9b0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/NestedWrappableComparator.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.search.FieldComparator;
+
+/** Base comparator which allows for nested sorting. */
+public abstract class NestedWrappableComparator<T> extends FieldComparator<T> {
+
+ /**
+ * Assigns the underlying missing value to the specified slot, if the actual implementation supports missing value.
+ *
+ * @param slot The slot to assign the the missing value to.
+ */
+ public abstract void missing(int slot);
+
+ /**
+ * Compares the missing value to the bottom.
+ *
+ * @return any N < 0 if the bottom value is not competitive with the missing value, any N > 0 if the
+ * bottom value is competitive with the missing value and 0 if they are equal.
+ */
+ public abstract int compareBottomMissing();
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/NumberComparatorBase.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/NumberComparatorBase.java
new file mode 100644
index 0000000..e4c66ab
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/NumberComparatorBase.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+
+/**
+ * Base FieldComparator class for number fields.
+ */
+// This is right now only used for sorting number based fields inside nested objects
+public abstract class NumberComparatorBase<T> extends NestedWrappableComparator<T> {
+
+ /**
+ * Adds numeric value at the specified doc to the specified slot.
+ *
+ * @param slot The specified slot
+ * @param doc The specified doc
+ */
+ public abstract void add(int slot, int doc);
+
+ /**
+ * Divides the value at the specified slot with the specified divisor.
+ *
+ * @param slot The specified slot
+ * @param divisor The specified divisor
+ */
+ public abstract void divide(int slot, int divisor);
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/SortMode.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/SortMode.java
new file mode 100644
index 0000000..76e4ca9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/SortMode.java
@@ -0,0 +1,411 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.LongValues;
+
+import java.util.Locale;
+
+/**
+ * Defines what values to pick in the case a document contains multiple values for a particular field.
+ */
+public enum SortMode {
+
+ /**
+ * Sum of all the values.
+ */
+ SUM {
+ /**
+ * Returns the sum of the two values
+ */
+ @Override
+ public double apply(double a, double b) {
+ return a + b;
+ }
+
+ /**
+ * Returns the sum of the two values
+ */
+ @Override
+ public long apply(long a, long b) {
+ return a + b;
+ }
+ },
+
+ /**
+ * Average of all the values.
+ */
+ AVG {
+
+ /**
+ * Returns the sum of the two values
+ */
+ @Override
+ public double apply(double a, double b) {
+ return a + b;
+ }
+
+ /**
+ * Returns the sum of the two values
+ */
+ @Override
+ public long apply(long a, long b) {
+ return a + b;
+ }
+
+ /**
+ * Returns <code>a / Math.max(1.0d, numValues)</code>
+ */
+ @Override
+ public double reduce(double a, int numValues) {
+ return a / Math.max(1.0d, (double) numValues);
+ }
+
+ /**
+ * Returns <code>Math.round(a / Math.max(1.0, numValues))</code>
+ */
+ @Override
+ public long reduce(long a, int numValues) {
+ return Math.round(a / Math.max(1.0, numValues));
+ }
+ },
+
+ /**
+ * Pick the lowest value.
+ */
+ MIN {
+ /**
+ * Equivalent to {@link Math#min(double, double)}
+ */
+ @Override
+ public double apply(double a, double b) {
+ return Math.min(a, b);
+ }
+
+ /**
+ * Equivalent to {@link Math#min(long, long)}
+ */
+ @Override
+ public long apply(long a, long b) {
+ return Math.min(a, b);
+ }
+
+ /**
+ * Returns {@link Double#POSITIVE_INFINITY}
+ */
+ @Override
+ public double startDouble() {
+ return Double.POSITIVE_INFINITY;
+ }
+
+ /**
+ * Returns {@link Long#MAX_VALUE}
+ */
+ @Override
+ public long startLong() {
+ return Long.MAX_VALUE;
+ }
+
+ /**
+ * Returns the first value returned for the given <tt>docId</tt> or the <tt>defaultValue</tt> if the document
+ * has no values.
+ */
+ @Override
+ public double getRelevantValue(DoubleValues values, int docId, double defaultValue) {
+ assert values.getOrder() != AtomicFieldData.Order.NONE;
+ if (values.setDocument(docId) > 0) {
+ return values.nextValue();
+ }
+ return defaultValue;
+ }
+
+ /**
+ * Returns the first value returned for the given <tt>docId</tt> or the <tt>defaultValue</tt> if the document
+ * has no values.
+ */
+ @Override
+ public long getRelevantValue(LongValues values, int docId, long defaultValue) {
+ assert values.getOrder() != AtomicFieldData.Order.NONE;
+ if (values.setDocument(docId) > 0) {
+ return values.nextValue();
+ }
+ return defaultValue;
+ }
+
+ /**
+ * Returns the first value returned for the given <tt>docId</tt> or the <tt>defaultValue</tt> if the document
+ * has no values.
+ */
+ @Override
+ public BytesRef getRelevantValue(BytesValues values, int docId, BytesRef defaultValue) {
+ assert values.getOrder() != AtomicFieldData.Order.NONE;
+ if (values.setDocument(docId) > 0) {
+ return values.nextValue();
+ }
+ return defaultValue;
+ }
+ },
+
+ /**
+ * Pick the highest value.
+ */
+ MAX {
+ /**
+ * Equivalent to {@link Math#max(double, double)}
+ */
+ @Override
+ public double apply(double a, double b) {
+ return Math.max(a, b);
+ }
+
+ /**
+ * Equivalent to {@link Math#max(long, long)}
+ */
+ @Override
+ public long apply(long a, long b) {
+ return Math.max(a, b);
+ }
+
+ /**
+ * Returns {@link Double#NEGATIVE_INFINITY}
+ */
+ @Override
+ public double startDouble() {
+ return Double.NEGATIVE_INFINITY;
+ }
+
+
+ /**
+ * Returns {@link Long#MIN_VALUE}
+ */
+ @Override
+ public long startLong() {
+ return Long.MIN_VALUE;
+ }
+
+ /**
+ * Returns the last value returned for the given <tt>docId</tt> or the <tt>defaultValue</tt> if the document
+ * has no values.
+ */
+ @Override
+ public double getRelevantValue(DoubleValues values, int docId, double defaultValue) {
+ assert values.getOrder() != AtomicFieldData.Order.NONE;
+ final int numValues = values.setDocument(docId);
+ double retVal = defaultValue;
+ for (int i = 0; i < numValues; i++) {
+ retVal = values.nextValue();
+ }
+ return retVal;
+ }
+
+ /**
+ * Returns the last value returned for the given <tt>docId</tt> or the <tt>defaultValue</tt> if the document
+ * has no values.
+ */
+ @Override
+ public long getRelevantValue(LongValues values, int docId, long defaultValue) {
+ assert values.getOrder() != AtomicFieldData.Order.NONE;
+ final int numValues = values.setDocument(docId);
+ long retVal = defaultValue;
+ for (int i = 0; i < numValues; i++) {
+ retVal = values.nextValue();
+ }
+ return retVal;
+ }
+
+ /**
+ * Returns the last value returned for the given <tt>docId</tt> or the <tt>defaultValue</tt> if the document
+ * has no values.
+ */
+ @Override
+ public BytesRef getRelevantValue(BytesValues values, int docId, BytesRef defaultValue) {
+ assert values.getOrder() != AtomicFieldData.Order.NONE;
+ final int numValues = values.setDocument(docId);
+ BytesRef currentVal = defaultValue;
+ for (int i = 0; i < numValues; i++) {
+ currentVal = values.nextValue();
+ }
+ return currentVal;
+ }
+ };
+
+ /**
+ * Applies the sort mode and returns the result. This method is meant to be
+ * a binary function that is commonly used in a loop to find the relevant
+ * value for the sort mode in a list of values. For instance if the sort mode
+ * is {@link SortMode#MAX} this method is equivalent to {@link Math#max(double, double)}.
+ *
+ * Note: all implementations are idempotent.
+ *
+ * @param a an argument
+ * @param b another argument
+ * @return the result of the function.
+ */
+ public abstract double apply(double a, double b);
+
+ /**
+ * Applies the sort mode and returns the result. This method is meant to be
+ * a binary function that is commonly used in a loop to find the relevant
+ * value for the sort mode in a list of values. For instance if the sort mode
+ * is {@link SortMode#MAX} this method is equivalent to {@link Math#max(long, long)}.
+ *
+ * Note: all implementations are idempotent.
+ *
+ * @param a an argument
+ * @param b another argument
+ * @return the result of the function.
+ */
+ public abstract long apply(long a, long b);
+
+ /**
+ * Returns an initial value for the sort mode that is guaranteed to have no impact if passed
+ * to {@link #apply(double, double)}. This value should be used as the initial value if the
+ * sort mode is applied to a non-empty list of values. For instance:
+ * <pre>
+ * double relevantValue = sortMode.startDouble();
+ * for (int i = 0; i < array.length; i++) {
+ * relevantValue = sortMode.apply(array[i], relevantValue);
+ * }
+ * </pre>
+ *
+ * Note: This method return <code>0</code> by default.
+ *
+ * @return an initial value for the sort mode.
+ */
+ public double startDouble() {
+ return 0;
+ }
+
+ /**
+ * Returns an initial value for the sort mode that is guaranteed to have no impact if passed
+ * to {@link #apply(long, long)}. This value should be used as the initial value if the
+ * sort mode is applied to a non-empty list of values. For instance:
+ * <pre>
+ * long relevantValue = sortMode.startLong();
+ * for (int i = 0; i < array.length; i++) {
+ * relevantValue = sortMode.apply(array[i], relevantValue);
+ * }
+ * </pre>
+ *
+ * Note: This method return <code>0</code> by default.
+ * @return an initial value for the sort mode.
+ */
+ public long startLong() {
+ return 0;
+ }
+
+ /**
+ * Returns the aggregated value based on the sort mode. For instance if {@link SortMode#AVG} is used
+ * this method divides the given value by the number of values. The default implementation returns
+ * the first argument.
+ *
+ * Note: all implementations are idempotent.
+ */
+ public double reduce(double a, int numValues) {
+ return a;
+ }
+
+ /**
+ * Returns the aggregated value based on the sort mode. For instance if {@link SortMode#AVG} is used
+ * this method divides the given value by the number of values. The default implementation returns
+ * the first argument.
+ *
+ * Note: all implementations are idempotent.
+ */
+ public long reduce(long a, int numValues) {
+ return a;
+ }
+
+ /**
+ * A case insensitive version of {@link #valueOf(String)}
+ *
+ * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the given string doesn't match a sort mode or is <code>null</code>.
+ */
+ public static SortMode fromString(String sortMode) {
+ try {
+ return valueOf(sortMode.toUpperCase(Locale.ROOT));
+ } catch (Throwable t) {
+ throw new ElasticsearchIllegalArgumentException("Illegal sort_mode " + sortMode);
+ }
+ }
+
+ /**
+ * Returns the relevant value for the given document based on the {@link SortMode}. This
+ * method will apply each value for the given document to {@link #apply(double, double)} and returns
+ * the reduced value from {@link #reduce(double, int)} if the document has at least one value. Otherwise it will
+ * return the given default value.
+ * @param values the values to fetch the relevant value from.
+ * @param docId the doc id to fetch the relevant value for.
+ * @param defaultValue the default value if the document has no value
+ * @return the relevant value or the default value passed to the method.
+ */
+ public double getRelevantValue(DoubleValues values, int docId, double defaultValue) {
+ final int numValues = values.setDocument(docId);
+ double relevantVal = startDouble();
+ double result = defaultValue;
+ for (int i = 0; i < numValues; i++) {
+ result = relevantVal = apply(relevantVal, values.nextValue());
+ }
+ return reduce(result, numValues);
+ }
+
+ /**
+ * Returns the relevant value for the given document based on the {@link SortMode}. This
+ * method will apply each value for the given document to {@link #apply(long, long)} and returns
+ * the reduced value from {@link #reduce(long, int)} if the document has at least one value. Otherwise it will
+ * return the given default value.
+ * @param values the values to fetch the relevant value from.
+ * @param docId the doc id to fetch the relevant value for.
+ * @param defaultValue the default value if the document has no value
+ * @return the relevant value or the default value passed to the method.
+ */
+ public long getRelevantValue(LongValues values, int docId, long defaultValue) {
+ final int numValues = values.setDocument(docId);
+ long relevantVal = startLong();
+ long result = defaultValue;
+ for (int i = 0; i < numValues; i++) {
+ result = relevantVal = apply(relevantVal, values.nextValue());
+ }
+ return reduce(result, numValues);
+ }
+
+
+ /**
+ * Returns the relevant value for the given document based on the {@link SortMode}
+ * if the document has at least one value. Otherwise it will return same object given as the default value.
+ * Note: This method is optional and will throw {@link UnsupportedOperationException} if the sort mode doesn't
+ * allow a relevant value.
+ *
+ * @param values the values to fetch the relevant value from.
+ * @param docId the doc id to fetch the relevant value for.
+ * @param defaultValue the default value if the document has no value. This object will never be modified.
+ * @return the relevant value or the default value passed to the method.
+ */
+ public BytesRef getRelevantValue(BytesValues values, int docId, BytesRef defaultValue) {
+ throw new UnsupportedOperationException("no relevant bytes value for sort mode: " + this.name());
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/StringScriptDataComparator.java b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/StringScriptDataComparator.java
new file mode 100644
index 0000000..6133249
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/StringScriptDataComparator.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.script.SearchScript;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class StringScriptDataComparator extends FieldComparator<BytesRef> {
+
+ public static IndexFieldData.XFieldComparatorSource comparatorSource(SearchScript script) {
+ return new InnerSource(script);
+ }
+
+ private static class InnerSource extends IndexFieldData.XFieldComparatorSource {
+
+ private final SearchScript script;
+
+ private InnerSource(SearchScript script) {
+ this.script = script;
+ }
+
+ @Override
+ public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+ return new StringScriptDataComparator(numHits, script);
+ }
+
+ @Override
+ public SortField.Type reducedType() {
+ return SortField.Type.STRING;
+ }
+ }
+
+ private final SearchScript script;
+
+ private BytesRef[] values; // TODO maybe we can preallocate or use a sentinel to prevent the conditionals in compare
+
+ private BytesRef bottom;
+
+ private final BytesRef spare = new BytesRef();
+
+ private int spareDoc = -1;
+
+ public StringScriptDataComparator(int numHits, SearchScript script) {
+ this.script = script;
+ values = new BytesRef[numHits];
+ }
+
+ @Override
+ public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
+ script.setNextReader(context);
+ spareDoc = -1;
+ return this;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) {
+ script.setScorer(scorer);
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ final BytesRef val1 = values[slot1];
+ final BytesRef val2 = values[slot2];
+ if (val1 == null) {
+ if (val2 == null) {
+ return 0;
+ }
+ return -1;
+ } else if (val2 == null) {
+ return 1;
+ }
+
+ return val1.compareTo(val2);
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ if (bottom == null) {
+ return -1;
+ }
+ setSpare(doc);
+ return bottom.compareTo(spare);
+ }
+
+ @Override
+ public int compareDocToValue(int doc, BytesRef val2) throws IOException {
+ script.setNextDocId(doc);
+ setSpare(doc);
+ return spare.compareTo(val2);
+ }
+
+ private void setSpare(int doc) {
+ if (spareDoc == doc) {
+ return;
+ }
+ script.setNextDocId(doc);
+ spare.copyChars(script.run().toString());
+ spareDoc = doc;
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ setSpare(doc);
+ if (values[slot] == null) {
+ values[slot] = BytesRef.deepCopyOf(spare);
+ } else {
+ values[slot].copyBytes(spare);
+ }
+ }
+
+ @Override
+ public void setBottom(final int bottom) {
+ this.bottom = values[bottom];
+ }
+
+ @Override
+ public BytesRef value(int slot) {
+ return values[slot];
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/ordinals/DocIdOrdinals.java b/src/main/java/org/elasticsearch/index/fielddata/ordinals/DocIdOrdinals.java
new file mode 100644
index 0000000..74a5e42
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/ordinals/DocIdOrdinals.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.util.LongsRef;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/**
+ * Ordinals that effectively are single valued and map "one to one" to the
+ * doc ids. Note, the docId is incremented by 1 to get the ordinal, since 0
+ * denotes an empty value.
+ */
+public class DocIdOrdinals implements Ordinals {
+
+ private final int numDocs;
+
+ /**
+ * Constructs a new doc id ordinals.
+ */
+ public DocIdOrdinals(int numDocs) {
+ this.numDocs = numDocs;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ return RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return numDocs;
+ }
+
+ @Override
+ public long getNumOrds() {
+ return numDocs;
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return 1L + numDocs;
+ }
+
+ @Override
+ public Ordinals.Docs ordinals() {
+ return new Docs(this);
+ }
+
+ public static class Docs implements Ordinals.Docs {
+
+ private final DocIdOrdinals parent;
+ private final LongsRef longsScratch = new LongsRef(new long[1], 0, 1);
+ private int docId = -1;
+ private long currentOrdinal = -1;
+
+ public Docs(DocIdOrdinals parent) {
+ this.parent = parent;
+ }
+
+ @Override
+ public Ordinals ordinals() {
+ return parent;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return parent.getNumDocs();
+ }
+
+ @Override
+ public long getNumOrds() {
+ return parent.getNumOrds();
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return parent.getMaxOrd();
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public long getOrd(int docId) {
+ return currentOrdinal = docId + 1;
+ }
+
+ @Override
+ public LongsRef getOrds(int docId) {
+ longsScratch.longs[0] = currentOrdinal = docId + 1;
+ return longsScratch;
+ }
+
+ @Override
+ public long nextOrd() {
+ assert docId >= 0;
+ currentOrdinal = docId + 1;
+ docId = -1;
+ return currentOrdinal;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return 1;
+ }
+
+ @Override
+ public long currentOrd() {
+ return currentOrdinal;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/ordinals/EmptyOrdinals.java b/src/main/java/org/elasticsearch/index/fielddata/ordinals/EmptyOrdinals.java
new file mode 100644
index 0000000..ddbba20
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/ordinals/EmptyOrdinals.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.util.LongsRef;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+/**
+ */
+public class EmptyOrdinals implements Ordinals {
+
+ private final int numDocs;
+
+ public EmptyOrdinals(int numDocs) {
+ this.numDocs = numDocs;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ return 0;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return this.numDocs;
+ }
+
+ @Override
+ public long getNumOrds() {
+ return 0;
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return 1;
+ }
+
+ @Override
+ public Docs ordinals() {
+ return new Docs(this);
+ }
+
+ public static class Docs implements Ordinals.Docs {
+ private final EmptyOrdinals parent;
+ public static final LongsRef EMPTY_LONGS_REF = new LongsRef();
+
+ public Docs(EmptyOrdinals parent) {
+ this.parent = parent;
+ }
+
+ @Override
+ public Ordinals ordinals() {
+ return parent;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return parent.getNumDocs();
+ }
+
+ @Override
+ public long getNumOrds() {
+ return 0;
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return 1;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public long getOrd(int docId) {
+ return 0;
+ }
+
+ @Override
+ public LongsRef getOrds(int docId) {
+ return EMPTY_LONGS_REF;
+ }
+
+ @Override
+ public long nextOrd() {
+ throw new ElasticsearchIllegalStateException("Empty ordinals has no nextOrd");
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return 0;
+ }
+
+ @Override
+ public long currentOrd() {
+ return 0;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java b/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java
new file mode 100644
index 0000000..de765da
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.LongsRef;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.packed.AppendingPackedLongBuffer;
+import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
+import org.apache.lucene.util.packed.PackedInts;
+
+/**
+ * {@link Ordinals} implementation which is efficient at storing field data ordinals for multi-valued or sparse fields.
+ */
+public class MultiOrdinals implements Ordinals {
+
+ private static final int OFFSETS_PAGE_SIZE = 1024;
+ private static final int OFFSET_INIT_PAGE_COUNT = 16;
+
+ /**
+ * Return true if this impl is going to be smaller than {@link SinglePackedOrdinals} by at least 20%.
+ */
+ public static boolean significantlySmallerThanSinglePackedOrdinals(int maxDoc, int numDocsWithValue, long numOrds, float acceptableOverheadRatio) {
+ int bitsPerOrd = PackedInts.bitsRequired(numOrds);
+ bitsPerOrd = PackedInts.fastestFormatAndBits(numDocsWithValue, bitsPerOrd, acceptableOverheadRatio).bitsPerValue;
+ // Compute the worst-case number of bits per value for offsets in the worst case, eg. if no docs have a value at the
+ // beginning of the block and all docs have one at the end of the block
+ final float avgValuesPerDoc = (float) numDocsWithValue / maxDoc;
+ final int maxDelta = (int) Math.ceil(OFFSETS_PAGE_SIZE * (1 - avgValuesPerDoc) * avgValuesPerDoc);
+ int bitsPerOffset = PackedInts.bitsRequired(maxDelta) + 1; // +1 because of the sign
+ bitsPerOffset = PackedInts.fastestFormatAndBits(maxDoc, bitsPerOffset, acceptableOverheadRatio).bitsPerValue;
+
+ final long expectedMultiSizeInBytes = (long) numDocsWithValue * bitsPerOrd + (long) maxDoc * bitsPerOffset;
+ final long expectedSingleSizeInBytes = (long) maxDoc * bitsPerOrd;
+ return expectedMultiSizeInBytes < 0.8f * expectedSingleSizeInBytes;
+ }
+
+ private final boolean multiValued;
+ private final long numOrds;
+ private final MonotonicAppendingLongBuffer endOffsets;
+ private final AppendingPackedLongBuffer ords;
+
+ public MultiOrdinals(OrdinalsBuilder builder, float acceptableOverheadRatio) {
+ multiValued = builder.getNumMultiValuesDocs() > 0;
+ numOrds = builder.getNumOrds();
+ endOffsets = new MonotonicAppendingLongBuffer(OFFSET_INIT_PAGE_COUNT, OFFSETS_PAGE_SIZE, acceptableOverheadRatio);
+ ords = new AppendingPackedLongBuffer(OFFSET_INIT_PAGE_COUNT, OFFSETS_PAGE_SIZE, acceptableOverheadRatio);
+ long lastEndOffset = 0;
+ for (int i = 0; i < builder.maxDoc(); ++i) {
+ final LongsRef docOrds = builder.docOrds(i);
+ final long endOffset = lastEndOffset + docOrds.length;
+ endOffsets.add(endOffset);
+ for (int j = 0; j < docOrds.length; ++j) {
+ ords.add(docOrds.longs[docOrds.offset + j] - 1);
+ }
+ lastEndOffset = endOffset;
+ }
+ assert endOffsets.size() == builder.maxDoc();
+ assert ords.size() == builder.getTotalNumOrds() : ords.size() + " != " + builder.getTotalNumOrds();
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ return endOffsets.ramBytesUsed() + ords.ramBytesUsed();
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return multiValued;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return (int) endOffsets.size();
+ }
+
+ @Override
+ public long getNumOrds() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return numOrds + 1;
+ }
+
+ @Override
+ public Ordinals.Docs ordinals() {
+ return new MultiDocs(this);
+ }
+
+ static class MultiDocs implements Ordinals.Docs {
+
+ private final MultiOrdinals ordinals;
+ private final MonotonicAppendingLongBuffer endOffsets;
+ private final AppendingPackedLongBuffer ords;
+ private final LongsRef longsScratch;
+ private long offset;
+ private long limit;
+ private long currentOrd;
+
+ MultiDocs(MultiOrdinals ordinals) {
+ this.ordinals = ordinals;
+ this.endOffsets = ordinals.endOffsets;
+ this.ords = ordinals.ords;
+ this.longsScratch = new LongsRef(16);
+ }
+
+ @Override
+ public Ordinals ordinals() {
+ return this.ordinals;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return ordinals.getNumDocs();
+ }
+
+ @Override
+ public long getNumOrds() {
+ return ordinals.getNumOrds();
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return ordinals.getMaxOrd();
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return ordinals.isMultiValued();
+ }
+
+ @Override
+ public long getOrd(int docId) {
+ final long startOffset = docId > 0 ? endOffsets.get(docId - 1) : 0;
+ final long endOffset = endOffsets.get(docId);
+ if (startOffset == endOffset) {
+ return currentOrd = 0L; // ord for missing values
+ } else {
+ return currentOrd = 1L + ords.get(startOffset);
+ }
+ }
+
+ @Override
+ public LongsRef getOrds(int docId) {
+ final long startOffset = docId > 0 ? endOffsets.get(docId - 1) : 0;
+ final long endOffset = endOffsets.get(docId);
+ final int numValues = (int) (endOffset - startOffset);
+ if (longsScratch.length < numValues) {
+ longsScratch.longs = new long[ArrayUtil.oversize(numValues, RamUsageEstimator.NUM_BYTES_LONG)];
+ }
+ for (int i = 0; i < numValues; ++i) {
+ longsScratch.longs[i] = 1L + ords.get(startOffset + i);
+ }
+ longsScratch.offset = 0;
+ longsScratch.length = numValues;
+ return longsScratch;
+ }
+
+ @Override
+ public long nextOrd() {
+ assert offset < limit;
+ return currentOrd = 1L + ords.get(offset++);
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ final long startOffset = docId > 0 ? endOffsets.get(docId - 1) : 0;
+ final long endOffset = endOffsets.get(docId);
+ offset = startOffset;
+ limit = endOffset;
+ return (int) (endOffset - startOffset);
+ }
+
+ @Override
+ public long currentOrd() {
+ return currentOrd;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/ordinals/Ordinals.java b/src/main/java/org/elasticsearch/index/fielddata/ordinals/Ordinals.java
new file mode 100644
index 0000000..e4c5066
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/ordinals/Ordinals.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.util.LongsRef;
+
+/**
+ * A thread safe ordinals abstraction. Ordinals can only be positive integers.
+ */
+public interface Ordinals {
+
+ static final long MISSING_ORDINAL = 0;
+ static final long MIN_ORDINAL = 1;
+
+ /**
+ * The memory size this ordinals take.
+ */
+ long getMemorySizeInBytes();
+
+ /**
+ * Is one of the docs maps to more than one ordinal?
+ */
+ boolean isMultiValued();
+
+ /**
+ * The number of docs in this ordinals.
+ */
+ int getNumDocs();
+
+ /**
+ * The number of ordinals, excluding the {@link #MISSING_ORDINAL} ordinal indicating a missing value.
+ */
+ long getNumOrds();
+
+ /**
+ * Returns total unique ord count; this includes +1 for
+ * the {@link #MISSING_ORDINAL} ord (always {@value #MISSING_ORDINAL} ).
+ */
+ long getMaxOrd();
+
+ /**
+ * Returns a lightweight (non thread safe) view iterator of the ordinals.
+ */
+ Docs ordinals();
+
+ /**
+ * A non thread safe ordinals abstraction, yet very lightweight to create. The idea
+ * is that this gets created for each "iteration" over ordinals.
+ * <p/>
+ * <p>A value of 0 ordinal when iterating indicated "no" value.</p>
+ * To iterate of a set of ordinals for a given document use {@link #setDocument(int)} and {@link #nextOrd()} as
+ * show in the example below:
+ * <pre>
+ * Ordinals.Docs docs = ...;
+ * final int len = docs.setDocId(docId);
+ * for (int i = 0; i < len; i++) {
+ * final long ord = docs.nextOrd();
+ * // process ord
+ * }
+ * </pre>
+ */
+ interface Docs {
+
+ /**
+ * Returns the original ordinals used to generate this Docs "itereator".
+ */
+ Ordinals ordinals();
+
+ /**
+ * The number of docs in this ordinals.
+ */
+ int getNumDocs();
+
+ /**
+ * The number of ordinals, excluding the "0" ordinal (indicating a missing value).
+ */
+ long getNumOrds();
+
+ /**
+ * Returns total unique ord count; this includes +1 for
+ * the null ord (always 0).
+ */
+ long getMaxOrd();
+
+ /**
+ * Is one of the docs maps to more than one ordinal?
+ */
+ boolean isMultiValued();
+
+ /**
+ * The ordinal that maps to the relevant docId. If it has no value, returns
+ * <tt>0</tt>.
+ */
+ long getOrd(int docId);
+
+ /**
+ * Returns an array of ordinals matching the docIds, with 0 length one
+ * for a doc with no ordinals.
+ */
+ LongsRef getOrds(int docId);
+
+
+ /**
+ * Returns the next ordinal for the current docID set to {@link #setDocument(int)}.
+ * This method should only be called <tt>N</tt> times where <tt>N</tt> is the number
+ * returned from {@link #setDocument(int)}. If called more than <tt>N</tt> times the behavior
+ * is undefined.
+ *
+ * Note: This method will never return <tt>0</tt>.
+ *
+ * @return the next ordinal for the current docID set to {@link #setDocument(int)}.
+ */
+ long nextOrd();
+
+
+ /**
+ * Sets iteration to the specified docID and returns the number of
+ * ordinals for this document ID,
+ * @param docId document ID
+ *
+ * @see #nextOrd()
+ */
+ int setDocument(int docId);
+
+
+ /**
+ * Returns the current ordinal in the iteration
+ * @return the current ordinal in the iteration
+ */
+ long currentOrd();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java b/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java
new file mode 100644
index 0000000..b89d650
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java
@@ -0,0 +1,491 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.FilteredTermsEnum;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.*;
+import org.apache.lucene.util.packed.GrowableWriter;
+import org.apache.lucene.util.packed.PackedInts;
+import org.apache.lucene.util.packed.PagedGrowableWriter;
+import org.elasticsearch.common.settings.Settings;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+
+/**
+ * Simple class to build document ID <-> ordinal mapping. Note: Ordinals are
+ * <tt>1</tt> based monotonically increasing positive integers. <tt>0</tt>
+ * donates the missing value in this context.
+ */
+public final class OrdinalsBuilder implements Closeable {
+
+ /**
+ * Default acceptable overhead ratio. {@link OrdinalsBuilder} memory usage is mostly transient so it is likely a better trade-off to
+ * trade memory for speed in order to resize less often.
+ */
+ public static final float DEFAULT_ACCEPTABLE_OVERHEAD_RATIO = PackedInts.FAST;
+
+ /**
+ * The following structure is used to store ordinals. The idea is to store ords on levels of increasing sizes. Level 0 stores
+ * 1 value and 1 pointer to level 1. Level 1 stores 2 values and 1 pointer to level 2, ..., Level n stores 2**n values and
+ * 1 pointer to level n+1. If at some point an ordinal or a pointer has 0 as a value, this means that there are no remaining
+ * values. On the first level, ordinals.get(docId) is the first ordinal for docId or 0 if the document has no ordinals. On
+ * subsequent levels, the first 2^level slots are reserved and all have 0 as a value.
+ * <pre>
+ * Example for an index of 3 docs (O=ordinal, P = pointer)
+ * Level 0:
+ * ordinals [1] [4] [2]
+ * nextLevelSlices 2 0 1
+ * Level 1:
+ * ordinals [0 0] [2 0] [3 4]
+ * nextLevelSlices 0 0 1
+ * Level 2:
+ * ordinals [0 0 0 0] [5 0 0 0]
+ * nextLevelSlices 0 0
+ * </pre>
+ * On level 0, all documents have an ordinal: 0 has 1, 1 has 4 and 2 has 2 as a first ordinal, this means that we need to read
+ * nextLevelEntries to get the index of their ordinals on the next level. The entry for document 1 is 0, meaning that we have
+ * already read all its ordinals. On the contrary 0 and 2 have more ordinals which are stored at indices 2 and 1. Let's continue
+ * with document 2: it has 2 more ordinals on level 1: 3 and 4 and its next level index is 1 meaning that there are remaining
+ * ordinals on the next level. On level 2 at index 1, we can read [5 0 0 0] meaning that 5 is an ordinal as well, but the
+ * fact that it is followed by zeros means that there are no more ordinals. In the end, document 2 has 2, 3, 4 and 5 as ordinals.
+ * <p/>
+ * In addition to these structures, there is another array which stores the current position (level + slice + offset in the slice)
+ * in order to be able to append data in constant time.
+ */
+ private static class OrdinalsStore {
+
+ private static final int PAGE_SIZE = 1 << 12;
+
+ /**
+ * Number of slots at <code>level</code>
+ */
+ private static int numSlots(int level) {
+ return 1 << level;
+ }
+
+ private static int slotsMask(int level) {
+ return numSlots(level) - 1;
+ }
+
+ /**
+ * Encode the position for the given level and offset. The idea is to encode the level using unary coding in the lower bits and
+ * then the offset in the higher bits.
+ */
+ private static long position(int level, long offset) {
+ assert level >= 1;
+ return (1 << (level - 1)) | (offset << level);
+ }
+
+ /**
+ * Decode the level from an encoded position.
+ */
+ private static int level(long position) {
+ return 1 + Long.numberOfTrailingZeros(position);
+ }
+
+ /**
+ * Decode the offset from the position.
+ */
+ private static long offset(long position, int level) {
+ return position >>> level;
+ }
+
+ /**
+ * Get the ID of the slice given an offset.
+ */
+ private static long sliceID(int level, long offset) {
+ return offset >>> level;
+ }
+
+ /**
+ * Compute the first offset of the given slice.
+ */
+ private static long startOffset(int level, long slice) {
+ return slice << level;
+ }
+
+ /**
+ * Compute the number of ordinals stored for a value given its current position.
+ */
+ private static int numOrdinals(int level, long offset) {
+ return (1 << level) + (int) (offset & slotsMask(level));
+ }
+
+ // Current position
+ private PagedGrowableWriter positions;
+ // First level (0) of ordinals and pointers to the next level
+ private final GrowableWriter firstOrdinals;
+ private PagedGrowableWriter firstNextLevelSlices;
+ // Ordinals and pointers for other levels, starting at 1
+ private final PagedGrowableWriter[] ordinals;
+ private final PagedGrowableWriter[] nextLevelSlices;
+ private final int[] sizes;
+
+ private final int startBitsPerValue;
+ private final float acceptableOverheadRatio;
+
+ OrdinalsStore(int maxDoc, int startBitsPerValue, float acceptableOverheadRatio) {
+ this.startBitsPerValue = startBitsPerValue;
+ this.acceptableOverheadRatio = acceptableOverheadRatio;
+ positions = new PagedGrowableWriter(maxDoc, PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio);
+ firstOrdinals = new GrowableWriter(startBitsPerValue, maxDoc, acceptableOverheadRatio);
+ // over allocate in order to never worry about the array sizes, 24 entries would allow to store several millions of ordinals per doc...
+ ordinals = new PagedGrowableWriter[24];
+ nextLevelSlices = new PagedGrowableWriter[24];
+ sizes = new int[24];
+ Arrays.fill(sizes, 1); // reserve the 1st slice on every level
+ }
+
+ /**
+ * Allocate a new slice and return its ID.
+ */
+ private long newSlice(int level) {
+ final long newSlice = sizes[level]++;
+ // Lazily allocate ordinals
+ if (ordinals[level] == null) {
+ ordinals[level] = new PagedGrowableWriter(8L * numSlots(level), PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio);
+ } else {
+ ordinals[level] = ordinals[level].grow(sizes[level] * numSlots(level));
+ if (nextLevelSlices[level] != null) {
+ nextLevelSlices[level] = nextLevelSlices[level].grow(sizes[level]);
+ }
+ }
+ return newSlice;
+ }
+
+ public int addOrdinal(int docID, long ordinal) {
+ final long position = positions.get(docID);
+
+ if (position == 0L) { // on the first level
+ // 0 or 1 ordinal
+ if (firstOrdinals.get(docID) == 0L) {
+ firstOrdinals.set(docID, ordinal);
+ return 1;
+ } else {
+ final long newSlice = newSlice(1);
+ if (firstNextLevelSlices == null) {
+ firstNextLevelSlices = new PagedGrowableWriter(firstOrdinals.size(), PAGE_SIZE, 3, acceptableOverheadRatio);
+ }
+ firstNextLevelSlices.set(docID, newSlice);
+ final long offset = startOffset(1, newSlice);
+ ordinals[1].set(offset, ordinal);
+ positions.set(docID, position(1, offset)); // current position is on the 1st level and not allocated yet
+ return 2;
+ }
+ } else {
+ int level = level(position);
+ long offset = offset(position, level);
+ assert offset != 0L;
+ if (((offset + 1) & slotsMask(level)) == 0L) {
+ // reached the end of the slice, allocate a new one on the next level
+ final long newSlice = newSlice(level + 1);
+ if (nextLevelSlices[level] == null) {
+ nextLevelSlices[level] = new PagedGrowableWriter(sizes[level], PAGE_SIZE, 1, acceptableOverheadRatio);
+ }
+ nextLevelSlices[level].set(sliceID(level, offset), newSlice);
+ ++level;
+ offset = startOffset(level, newSlice);
+ assert (offset & slotsMask(level)) == 0L;
+ } else {
+ // just go to the next slot
+ ++offset;
+ }
+ ordinals[level].set(offset, ordinal);
+ final long newPosition = position(level, offset);
+ positions.set(docID, newPosition);
+ return numOrdinals(level, offset);
+ }
+ }
+
+ public void appendOrdinals(int docID, LongsRef ords) {
+ // First level
+ final long firstOrd = firstOrdinals.get(docID);
+ if (firstOrd == 0L) {
+ return;
+ }
+ ords.longs = ArrayUtil.grow(ords.longs, ords.offset + ords.length + 1);
+ ords.longs[ords.offset + ords.length++] = firstOrd;
+ if (firstNextLevelSlices == null) {
+ return;
+ }
+ long sliceID = firstNextLevelSlices.get(docID);
+ if (sliceID == 0L) {
+ return;
+ }
+ // Other levels
+ for (int level = 1; ; ++level) {
+ final int numSlots = numSlots(level);
+ ords.longs = ArrayUtil.grow(ords.longs, ords.offset + ords.length + numSlots);
+ final long offset = startOffset(level, sliceID);
+ for (int j = 0; j < numSlots; ++j) {
+ final long ord = ordinals[level].get(offset + j);
+ if (ord == 0L) {
+ return;
+ }
+ ords.longs[ords.offset + ords.length++] = ord;
+ }
+ if (nextLevelSlices[level] == null) {
+ return;
+ }
+ sliceID = nextLevelSlices[level].get(sliceID);
+ if (sliceID == 0L) {
+ return;
+ }
+ }
+ }
+
+ }
+
+ private final int maxDoc;
+ private long currentOrd = 0;
+ private int numDocsWithValue = 0;
+ private int numMultiValuedDocs = 0;
+ private int totalNumOrds = 0;
+
+ private OrdinalsStore ordinals;
+ private final LongsRef spare;
+
+ public OrdinalsBuilder(long numTerms, int maxDoc, float acceptableOverheadRatio) throws IOException {
+ this.maxDoc = maxDoc;
+ int startBitsPerValue = 8;
+ if (numTerms >= 0) {
+ startBitsPerValue = PackedInts.bitsRequired(numTerms);
+ }
+ ordinals = new OrdinalsStore(maxDoc, startBitsPerValue, acceptableOverheadRatio);
+ spare = new LongsRef();
+ }
+
+ public OrdinalsBuilder(int maxDoc, float acceptableOverheadRatio) throws IOException {
+ this(-1, maxDoc, acceptableOverheadRatio);
+ }
+
+ public OrdinalsBuilder(int maxDoc) throws IOException {
+ this(maxDoc, DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
+ }
+
+ /**
+ * Returns a shared {@link LongsRef} instance for the given doc ID holding all ordinals associated with it.
+ */
+ public LongsRef docOrds(int docID) {
+ spare.offset = spare.length = 0;
+ ordinals.appendOrdinals(docID, spare);
+ return spare;
+ }
+
+ /**
+ * Return a {@link PackedInts.Reader} instance mapping every doc ID to its first ordinal if it exists and 0 otherwise.
+ */
+ public PackedInts.Reader getFirstOrdinals() {
+ return ordinals.firstOrdinals;
+ }
+
+ /**
+ * Advances the {@link OrdinalsBuilder} to the next ordinal and
+ * return the current ordinal.
+ */
+ public long nextOrdinal() {
+ return ++currentOrd;
+ }
+
+ /**
+ * Retruns the current ordinal or <tt>0</tt> if this build has not been advanced via
+ * {@link #nextOrdinal()}.
+ */
+ public long currentOrdinal() {
+ return currentOrd;
+ }
+
+ /**
+ * Associates the given document id with the current ordinal.
+ */
+ public OrdinalsBuilder addDoc(int doc) {
+ totalNumOrds++;
+ final int numValues = ordinals.addOrdinal(doc, currentOrd);
+ if (numValues == 1) {
+ ++numDocsWithValue;
+ } else if (numValues == 2) {
+ ++numMultiValuedDocs;
+ }
+ return this;
+ }
+
+ /**
+ * Returns <code>true</code> iff this builder contains a document ID that is associated with more than one ordinal. Otherwise <code>false</code>;
+ */
+ public boolean isMultiValued() {
+ return numMultiValuedDocs > 0;
+ }
+
+ /**
+ * Returns the number distinct of document IDs with one or more values.
+ */
+ public int getNumDocsWithValue() {
+ return numDocsWithValue;
+ }
+
+ /**
+ * Returns the number distinct of document IDs associated with exactly one value.
+ */
+ public int getNumSingleValuedDocs() {
+ return numDocsWithValue - numMultiValuedDocs;
+ }
+
+ /**
+ * Returns the number distinct of document IDs associated with two or more values.
+ */
+ public int getNumMultiValuesDocs() {
+ return numMultiValuedDocs;
+ }
+
+ /**
+ * Returns the number of document ID to ordinal pairs in this builder.
+ */
+ public int getTotalNumOrds() {
+ return totalNumOrds;
+ }
+
+ /**
+ * Returns the number of distinct ordinals in this builder.
+ */
+ public long getNumOrds() {
+ return currentOrd;
+ }
+
+ /**
+ * Builds a {@link FixedBitSet} where each documents bit is that that has one or more ordinals associated with it.
+ * if every document has an ordinal associated with it this method returns <code>null</code>
+ */
+ public FixedBitSet buildDocsWithValuesSet() {
+ if (numDocsWithValue == maxDoc) {
+ return null;
+ }
+ final FixedBitSet bitSet = new FixedBitSet(maxDoc);
+ for (int docID = 0; docID < maxDoc; ++docID) {
+ if (ordinals.firstOrdinals.get(docID) != 0) {
+ bitSet.set(docID);
+ }
+ }
+ return bitSet;
+ }
+
+ /**
+ * Builds an {@link Ordinals} instance from the builders current state.
+ */
+ public Ordinals build(Settings settings) {
+ final float acceptableOverheadRatio = settings.getAsFloat("acceptable_overhead_ratio", PackedInts.FASTEST);
+ if (numMultiValuedDocs > 0 || MultiOrdinals.significantlySmallerThanSinglePackedOrdinals(maxDoc, numDocsWithValue, getNumOrds(), acceptableOverheadRatio)) {
+ // MultiOrdinals can be smaller than SinglePackedOrdinals for sparse fields
+ return new MultiOrdinals(this, acceptableOverheadRatio);
+ } else {
+ return new SinglePackedOrdinals(this, acceptableOverheadRatio);
+ }
+ }
+
+ /**
+ * Returns the maximum document ID this builder can associate with an ordinal
+ */
+ public int maxDoc() {
+ return maxDoc;
+ }
+
+ /**
+ * A {@link TermsEnum} that iterates only full precision prefix coded 64 bit values.
+ *
+ * @see #buildFromTerms(TermsEnum, Bits)
+ */
+ public static TermsEnum wrapNumeric64Bit(TermsEnum termsEnum) {
+ return new FilteredTermsEnum(termsEnum, false) {
+ @Override
+ protected AcceptStatus accept(BytesRef term) throws IOException {
+ // we stop accepting terms once we moved across the prefix codec terms - redundant values!
+ return NumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
+ }
+ };
+ }
+
+ /**
+ * A {@link TermsEnum} that iterates only full precision prefix coded 32 bit values.
+ *
+ * @see #buildFromTerms(TermsEnum, Bits)
+ */
+ public static TermsEnum wrapNumeric32Bit(TermsEnum termsEnum) {
+ return new FilteredTermsEnum(termsEnum, false) {
+
+ @Override
+ protected AcceptStatus accept(BytesRef term) throws IOException {
+ // we stop accepting terms once we moved across the prefix codec terms - redundant values!
+ return NumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
+ }
+ };
+ }
+
+ /**
+ * This method iterates all terms in the given {@link TermsEnum} and
+ * associates each terms ordinal with the terms documents. The caller must
+ * exhaust the returned {@link BytesRefIterator} which returns all values
+ * where the first returned value is associted with the ordinal <tt>1</tt>
+ * etc.
+ * <p>
+ * If the {@link TermsEnum} contains prefix coded numerical values the terms
+ * enum should be wrapped with either {@link #wrapNumeric32Bit(TermsEnum)}
+ * or {@link #wrapNumeric64Bit(TermsEnum)} depending on its precision. If
+ * the {@link TermsEnum} is not wrapped the returned
+ * {@link BytesRefIterator} will contain partial precision terms rather than
+ * only full-precision terms.
+ * </p>
+ */
+ public BytesRefIterator buildFromTerms(final TermsEnum termsEnum) throws IOException {
+ return new BytesRefIterator() {
+ private DocsEnum docsEnum = null;
+
+ @Override
+ public BytesRef next() throws IOException {
+ BytesRef ref;
+ if ((ref = termsEnum.next()) != null) {
+ docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
+ nextOrdinal();
+ int docId;
+ while ((docId = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+ addDoc(docId);
+ }
+ }
+ return ref;
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() {
+ return termsEnum.getComparator();
+ }
+ };
+ }
+
+ /**
+ * Closes this builder and release all resources.
+ */
+ @Override
+ public void close() throws IOException {
+ ordinals = null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java b/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java
new file mode 100644
index 0000000..1a9ae36
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.util.LongsRef;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.packed.PackedInts;
+
+/**
+ */
+public class SinglePackedOrdinals implements Ordinals {
+
+ // ordinals with value 0 indicates no value
+ private final PackedInts.Reader reader;
+ private final long numOrds;
+ private final long maxOrd;
+
+ private long size = -1;
+
+ public SinglePackedOrdinals(OrdinalsBuilder builder, float acceptableOverheadRatio) {
+ assert builder.getNumMultiValuesDocs() == 0;
+ this.numOrds = builder.getNumOrds();
+ this.maxOrd = builder.getNumOrds() + 1;
+ // We don't reuse the builder as-is because it might have been built with a higher overhead ratio
+ final PackedInts.Mutable reader = PackedInts.getMutable(builder.maxDoc(), PackedInts.bitsRequired(getNumOrds()), acceptableOverheadRatio);
+ PackedInts.copy(builder.getFirstOrdinals(), 0, reader, 0, builder.maxDoc(), 8 * 1024);
+ this.reader = reader;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_OBJECT_REF + reader.ramBytesUsed();
+ }
+ return size;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return reader.size();
+ }
+
+ @Override
+ public long getNumOrds() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return maxOrd;
+ }
+
+ @Override
+ public Docs ordinals() {
+ return new Docs(this, reader);
+ }
+
+ public static class Docs implements Ordinals.Docs {
+
+ private final SinglePackedOrdinals parent;
+ private final PackedInts.Reader reader;
+
+ private final LongsRef longsScratch = new LongsRef(1);
+ private long currentOrdinal;
+
+ public Docs(SinglePackedOrdinals parent, PackedInts.Reader reader) {
+ this.parent = parent;
+ this.reader = reader;
+ }
+
+ @Override
+ public Ordinals ordinals() {
+ return parent;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return parent.getNumDocs();
+ }
+
+ @Override
+ public long getNumOrds() {
+ return parent.getNumOrds();
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return parent.getMaxOrd();
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public long getOrd(int docId) {
+ return currentOrdinal = reader.get(docId);
+ }
+
+ @Override
+ public LongsRef getOrds(int docId) {
+ final long ordinal = reader.get(docId);
+ longsScratch.offset = 0;
+ longsScratch.length = (int)Math.min(currentOrdinal, 1);
+ longsScratch.longs[0] = currentOrdinal = ordinal;
+ return longsScratch;
+ }
+
+ @Override
+ public long nextOrd() {
+ assert currentOrdinal > 0;
+ return currentOrdinal;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ currentOrdinal = reader.get(docId);
+ // either this is > 1 or 0 - in any case it prevents a branch!
+ return (int)Math.min(currentOrdinal, 1);
+ }
+
+ @Override
+ public long currentOrd() {
+ return currentOrdinal;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractBytesIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractBytesIndexFieldData.java
new file mode 100644
index 0000000..1369861
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractBytesIndexFieldData.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.FilteredTermsEnum;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public abstract class AbstractBytesIndexFieldData<FD extends AtomicFieldData.WithOrdinals<ScriptDocValues.Strings>> extends AbstractIndexFieldData<FD> implements IndexFieldData.WithOrdinals<FD> {
+
+ protected Settings frequency;
+ protected Settings regex;
+
+ protected AbstractBytesIndexFieldData(Index index, Settings indexSettings, Names fieldNames, FieldDataType fieldDataType,
+ IndexFieldDataCache cache) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ final Map<String, Settings> groups = fieldDataType.getSettings().getGroups("filter");
+ frequency = groups.get("frequency");
+ regex = groups.get("regex");
+
+ }
+
+ @Override
+ public final boolean valuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode) {
+ return new BytesRefFieldComparatorSource(this, missingValue, sortMode);
+ }
+
+ protected TermsEnum filter(Terms terms, AtomicReader reader) throws IOException {
+ TermsEnum iterator = terms.iterator(null);
+ if (iterator == null) {
+ return null;
+ }
+ if (iterator != null && frequency != null) {
+ iterator = FrequencyFilter.filter(iterator, terms, reader, frequency);
+ }
+
+ if (iterator != null && regex != null) {
+ iterator = RegexFilter.filter(iterator, terms, reader, regex);
+ }
+ return iterator;
+ }
+
+ private static final class FrequencyFilter extends FilteredTermsEnum {
+
+ private int minFreq;
+ private int maxFreq;
+ public FrequencyFilter(TermsEnum delegate, int minFreq, int maxFreq) {
+ super(delegate, false);
+ this.minFreq = minFreq;
+ this.maxFreq = maxFreq;
+ }
+
+ public static TermsEnum filter(TermsEnum toFilter, Terms terms, AtomicReader reader, Settings settings) throws IOException {
+ int docCount = terms.getDocCount();
+ if (docCount == -1) {
+ docCount = reader.maxDoc();
+ }
+ final double minFrequency = settings.getAsDouble("min", 0d);
+ final double maxFrequency = settings.getAsDouble("max", docCount+1d);
+ final double minSegmentSize = settings.getAsInt("min_segment_size", 0);
+ if (minSegmentSize < docCount) {
+ final int minFreq = minFrequency >= 1.0? (int) minFrequency : (int)(docCount * minFrequency);
+ final int maxFreq = maxFrequency >= 1.0? (int) maxFrequency : (int)(docCount * maxFrequency);
+ assert minFreq < maxFreq;
+ return new FrequencyFilter(toFilter, minFreq, maxFreq);
+ }
+
+ return toFilter;
+
+ }
+
+ @Override
+ protected AcceptStatus accept(BytesRef arg0) throws IOException {
+ int docFreq = docFreq();
+ if (docFreq >= minFreq && docFreq <= maxFreq) {
+ return AcceptStatus.YES;
+ }
+ return AcceptStatus.NO;
+ }
+ }
+
+ private static final class RegexFilter extends FilteredTermsEnum {
+
+ private final Matcher matcher;
+ private final CharsRef spare = new CharsRef();
+
+ public RegexFilter(TermsEnum delegate, Matcher matcher) {
+ super(delegate, false);
+ this.matcher = matcher;
+ }
+ public static TermsEnum filter(TermsEnum iterator, Terms terms, AtomicReader reader, Settings regex) {
+ String pattern = regex.get("pattern");
+ if (pattern == null) {
+ return iterator;
+ }
+ Pattern p = Pattern.compile(pattern);
+ return new RegexFilter(iterator, p.matcher(""));
+ }
+
+ @Override
+ protected AcceptStatus accept(BytesRef arg0) throws IOException {
+ UnicodeUtil.UTF8toUTF16(arg0, spare);
+ matcher.reset(spare);
+ if (matcher.matches()) {
+ return AcceptStatus.YES;
+ }
+ return AcceptStatus.NO;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointIndexFieldData.java
new file mode 100644
index 0000000..886f39b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointIndexFieldData.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefIterator;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+
+import java.io.IOException;
+
+abstract class AbstractGeoPointIndexFieldData extends AbstractIndexFieldData<AtomicGeoPointFieldData<ScriptDocValues>> implements IndexGeoPointFieldData<AtomicGeoPointFieldData<ScriptDocValues>> {
+
+ protected static class Empty extends AtomicGeoPointFieldData<ScriptDocValues> {
+
+ private final int numDocs;
+
+ Empty(int numDocs) {
+ this.numDocs = numDocs;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return 0;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ return 0;
+ }
+
+ @Override
+ public BytesValues getBytesValues(boolean needsHashes) {
+ return BytesValues.EMPTY;
+ }
+
+ @Override
+ public GeoPointValues getGeoPointValues() {
+ return GeoPointValues.EMPTY;
+ }
+
+ @Override
+ public ScriptDocValues getScriptValues() {
+ return ScriptDocValues.EMPTY_GEOPOINTS;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return numDocs;
+ }
+
+ @Override
+ public void close() {
+ // no-op
+ }
+ }
+
+ protected static class GeoPointEnum {
+
+ private final BytesRefIterator termsEnum;
+ private final GeoPoint next;
+ private final CharsRef spare;
+
+ protected GeoPointEnum(BytesRefIterator termsEnum) {
+ this.termsEnum = termsEnum;
+ next = new GeoPoint();
+ spare = new CharsRef();
+ }
+
+ public GeoPoint next() throws IOException {
+ final BytesRef term = termsEnum.next();
+ if (term == null) {
+ return null;
+ }
+ UnicodeUtil.UTF8toUTF16(term, spare);
+ int commaIndex = -1;
+ for (int i = 0; i < spare.length; i++) {
+ if (spare.chars[spare.offset + i] == ',') { // safes a string creation
+ commaIndex = i;
+ break;
+ }
+ }
+ if (commaIndex == -1) {
+ assert false;
+ return next.reset(0, 0);
+ }
+ final double lat = Double.parseDouble(new String(spare.chars, spare.offset, (commaIndex - spare.offset)));
+ final double lon = Double.parseDouble(new String(spare.chars, (spare.offset + (commaIndex + 1)), spare.length - ((commaIndex + 1) - spare.offset)));
+ return next.reset(lat, lon);
+ }
+
+ }
+
+ public AbstractGeoPointIndexFieldData(Index index, Settings indexSettings, Names fieldNames, FieldDataType fieldDataType, IndexFieldDataCache cache) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ // because we might have single values? we can dynamically update a flag to reflect that
+ // based on the atomic field data loaded
+ return false;
+ }
+
+ @Override
+ public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode) {
+ throw new ElasticsearchIllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVAtomicFieldData.java
new file mode 100644
index 0000000..b550b74
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVAtomicFieldData.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.index.fielddata.ScriptDocValues.Strings;
+
+import java.io.IOException;
+
+/** {@link AtomicFieldData} impl on top of Lucene's binary doc values. */
+public class BinaryDVAtomicFieldData implements AtomicFieldData<ScriptDocValues.Strings> {
+
+ private final AtomicReader reader;
+ private final String field;
+
+ public BinaryDVAtomicFieldData(AtomicReader reader, String field) {
+ this.reader = reader;
+ this.field = field;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true; // single-valued
+ }
+
+ @Override
+ public int getNumDocs() {
+ return reader.maxDoc();
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ // probably not accurate, but a good upper limit
+ return reader.maxDoc();
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ // TODO: Lucene doesn't expose it right now
+ return -1;
+ }
+
+ @Override
+ public BytesValues getBytesValues(boolean needsHashes) {
+ // if you want hashes to be cached, you should rather store them on disk alongside the values rather than loading them into memory
+ // here - not supported for now, and probably not useful since this field data only applies to _id and _uid?
+ final BinaryDocValues values;
+ final Bits docsWithField;
+ try {
+ final BinaryDocValues v = reader.getBinaryDocValues(field);
+ if (v == null) {
+ // segment has no value
+ values = BinaryDocValues.EMPTY;
+ docsWithField = new Bits.MatchNoBits(reader.maxDoc());
+ } else {
+ values = v;
+ final Bits b = reader.getDocsWithField(field);
+ docsWithField = b == null ? new Bits.MatchAllBits(reader.maxDoc()) : b;
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalStateException("Cannot load doc values", e);
+ }
+
+ return new BytesValues(false) {
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return docsWithField.get(docId) ? 1 : 0;
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ values.get(docId, scratch);
+ return scratch;
+ }
+ };
+ }
+
+ @Override
+ public Strings getScriptValues() {
+ return new ScriptDocValues.Strings(getBytesValues(false));
+ }
+
+ @Override
+ public void close() {
+ // no-op
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java
new file mode 100644
index 0000000..4029b56
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+
+public class BinaryDVIndexFieldData extends DocValuesIndexFieldData implements IndexFieldData<BinaryDVAtomicFieldData> {
+
+ public BinaryDVIndexFieldData(Index index, Names fieldNames) {
+ super(index, fieldNames);
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public BinaryDVAtomicFieldData load(AtomicReaderContext context) {
+ return new BinaryDVAtomicFieldData(context.reader(), fieldNames.indexName());
+ }
+
+ @Override
+ public BinaryDVAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
+ return load(context);
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, SortMode sortMode) {
+ return new BytesRefFieldComparatorSource(this, missingValue, sortMode);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericAtomicFieldData.java
new file mode 100644
index 0000000..85c78fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericAtomicFieldData.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.util.ByteUtils;
+import org.elasticsearch.index.fielddata.AbstractAtomicNumericFieldData;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
+import org.elasticsearch.index.fielddata.LongValues;
+
+final class BinaryDVNumericAtomicFieldData extends AbstractAtomicNumericFieldData {
+
+ private final AtomicReader reader;
+ private final BinaryDocValues values;
+ private final NumericType numericType;
+
+ BinaryDVNumericAtomicFieldData(AtomicReader reader, BinaryDocValues values, NumericType numericType) {
+ super(numericType.isFloatingPoint());
+ this.reader = reader;
+ this.values = values == null ? BinaryDocValues.EMPTY : values;
+ this.numericType = numericType;
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ if (numericType.isFloatingPoint()) {
+ return LongValues.asLongValues(getDoubleValues());
+ }
+ return new LongValues(true) {
+
+ final BytesRef bytes = new BytesRef();
+ final ByteArrayDataInput in = new ByteArrayDataInput();
+ long[] longs = new long[8];
+ int i = Integer.MAX_VALUE;
+ int valueCount = 0;
+
+ @Override
+ public int setDocument(int docId) {
+ values.get(docId, bytes);
+ in.reset(bytes.bytes, bytes.offset, bytes.length);
+ if (!in.eof()) {
+ // first value uses vLong on top of zig-zag encoding, then deltas are encoded using vLong
+ long previousValue = longs[0] = ByteUtils.zigZagDecode(ByteUtils.readVLong(in));
+ valueCount = 1;
+ while (!in.eof()) {
+ longs = ArrayUtil.grow(longs, valueCount + 1);
+ previousValue = longs[valueCount++] = previousValue + ByteUtils.readVLong(in);
+ }
+ } else {
+ valueCount = 0;
+ }
+ i = 0;
+ return valueCount;
+ }
+
+ @Override
+ public long nextValue() {
+ assert i < valueCount;
+ return longs[i++];
+ }
+
+ };
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ if (!numericType.isFloatingPoint()) {
+ return DoubleValues.asDoubleValues(getLongValues());
+ }
+ switch (numericType) {
+ case FLOAT:
+ return new DoubleValues(true) {
+
+ final BytesRef bytes = new BytesRef();
+ int i = Integer.MAX_VALUE;
+ int valueCount = 0;
+
+ @Override
+ public int setDocument(int docId) {
+ values.get(docId, bytes);
+ assert bytes.length % 4 == 0;
+ i = 0;
+ return valueCount = bytes.length / 4;
+ }
+
+ @Override
+ public double nextValue() {
+ assert i < valueCount;
+ return ByteUtils.readFloatLE(bytes.bytes, bytes.offset + i++ * 4);
+ }
+
+ };
+ case DOUBLE:
+ return new DoubleValues(true) {
+
+ final BytesRef bytes = new BytesRef();
+ int i = Integer.MAX_VALUE;
+ int valueCount = 0;
+
+ @Override
+ public int setDocument(int docId) {
+ values.get(docId, bytes);
+ assert bytes.length % 8 == 0;
+ i = 0;
+ return valueCount = bytes.length / 8;
+ }
+
+ @Override
+ public double nextValue() {
+ assert i < valueCount;
+ return ByteUtils.readDoubleLE(bytes.bytes, bytes.offset + i++ * 8);
+ }
+
+ };
+ default:
+ throw new AssertionError();
+ }
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return true; // no way to know
+ }
+
+ @Override
+ public int getNumDocs() {
+ return reader.maxDoc();
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return Long.MAX_VALUE; // no clue
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ return -1; // Lucene doesn't expose it
+ }
+
+ @Override
+ public void close() {
+ // no-op
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericIndexFieldData.java
new file mode 100644
index 0000000..ee495a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericIndexFieldData.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import com.google.common.base.Preconditions;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.FloatValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.LongValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+
+import java.io.IOException;
+
+public class BinaryDVNumericIndexFieldData extends DocValuesIndexFieldData implements IndexNumericFieldData<BinaryDVNumericAtomicFieldData> {
+
+ private final NumericType numericType;
+
+ public BinaryDVNumericIndexFieldData(Index index, Names fieldNames, NumericType numericType) {
+ super(index, fieldNames);
+ Preconditions.checkArgument(numericType != null, "numericType must be non-null");
+ this.numericType = numericType;
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ return false;
+ }
+
+ public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(final Object missingValue, final SortMode sortMode) {
+ switch (numericType) {
+ case FLOAT:
+ return new FloatValuesComparatorSource(this, missingValue, sortMode);
+ case DOUBLE:
+ return new DoubleValuesComparatorSource(this, missingValue, sortMode);
+ default:
+ assert !numericType.isFloatingPoint();
+ return new LongValuesComparatorSource(this, missingValue, sortMode);
+ }
+ }
+
+ @Override
+ public BinaryDVNumericAtomicFieldData load(AtomicReaderContext context) {
+ try {
+ return new BinaryDVNumericAtomicFieldData(context.reader(), context.reader().getBinaryDocValues(fieldNames.indexName()), numericType);
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalStateException("Cannot load doc values", e);
+ }
+ }
+
+ @Override
+ public BinaryDVNumericAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
+ return load(context);
+ }
+
+ @Override
+ public NumericType getNumericType() {
+ return numericType;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/DenseDoubleValues.java b/src/main/java/org/elasticsearch/index/fielddata/plain/DenseDoubleValues.java
new file mode 100644
index 0000000..4d8afcb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/DenseDoubleValues.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.elasticsearch.index.fielddata.DoubleValues;
+
+/**
+ * Package private base class for dense double values.
+ */
+abstract class DenseDoubleValues extends DoubleValues {
+
+ protected DenseDoubleValues(boolean multiValued) {
+ super(multiValued);
+ }
+
+ @Override
+ public final int setDocument(int docId) {
+ this.docId = docId;
+ return 1;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/DenseLongValues.java b/src/main/java/org/elasticsearch/index/fielddata/plain/DenseLongValues.java
new file mode 100644
index 0000000..5fb910f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/DenseLongValues.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.elasticsearch.index.fielddata.LongValues;
+
+/**
+ * Package private base class for dense long values.
+ */
+abstract class DenseLongValues extends LongValues {
+
+ protected DenseLongValues(boolean multiValued) {
+ super(multiValued);
+ }
+
+ @Override
+ public final int setDocument(int docId) {
+ this.docId = docId;
+ return 1;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/DisabledIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/DisabledIndexFieldData.java
new file mode 100644
index 0000000..a546489
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/DisabledIndexFieldData.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+/**
+ * A field data implementation that forbids loading and will throw an {@link org.elasticsearch.ElasticsearchIllegalStateException} if you try to load
+ * {@link AtomicFieldData} instances.
+ */
+public final class DisabledIndexFieldData extends AbstractIndexFieldData<AtomicFieldData<?>> {
+
+ public static class Builder implements IndexFieldData.Builder {
+ @Override
+ public IndexFieldData<AtomicFieldData<?>> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
+ IndexFieldDataCache cache, CircuitBreakerService breakerService) {
+ // Ignore Circuit Breaker
+ return new DisabledIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache);
+ }
+ }
+
+ public DisabledIndexFieldData(Index index, Settings indexSettings, Names fieldNames, FieldDataType fieldDataType, IndexFieldDataCache cache) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public AtomicFieldData<?> loadDirect(AtomicReaderContext context) throws Exception {
+ throw fail();
+ }
+
+ @Override
+ public IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, SortMode sortMode) {
+ throw fail();
+ }
+
+ private ElasticsearchIllegalStateException fail() {
+ return new ElasticsearchIllegalStateException("Field data loading is forbidden on " + getFieldNames().name());
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java
new file mode 100644
index 0000000..06629d1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.IndexReader;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldDataCache;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+import java.util.Map;
+import java.util.Set;
+
+/** {@link IndexFieldData} impl based on Lucene's doc values. Caching is done on the Lucene side. */
+public abstract class DocValuesIndexFieldData {
+
+ protected final Index index;
+ protected final Names fieldNames;
+
+ public DocValuesIndexFieldData(Index index, Names fieldNames) {
+ super();
+ this.index = index;
+ this.fieldNames = fieldNames;
+ }
+
+ public final Names getFieldNames() {
+ return fieldNames;
+ }
+
+ public final void clear() {
+ // can't do
+ }
+
+ public final void clear(IndexReader reader) {
+ // can't do
+ }
+
+ public final Index index() {
+ return index;
+ }
+
+ public static class Builder implements IndexFieldData.Builder {
+
+ private static final Set<String> BINARY_INDEX_FIELD_NAMES = ImmutableSet.of(UidFieldMapper.NAME, IdFieldMapper.NAME);
+ private static final Set<String> NUMERIC_INDEX_FIELD_NAMES = ImmutableSet.of(TimestampFieldMapper.NAME);
+
+ private NumericType numericType;
+
+ public Builder numericType(NumericType type) {
+ this.numericType = type;
+ return this;
+ }
+
+ @Override
+ public IndexFieldData<?> build(Index index, Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
+ CircuitBreakerService breakerService) {
+ // Ignore Circuit Breaker
+ final FieldMapper.Names fieldNames = mapper.names();
+ final Settings fdSettings = mapper.fieldDataType().getSettings();
+ final Map<String, Settings> filter = fdSettings.getGroups("filter");
+ if (filter != null && !filter.isEmpty()) {
+ throw new ElasticsearchIllegalArgumentException("Doc values field data doesn't support filters [" + fieldNames.name() + "]");
+ }
+
+ if (BINARY_INDEX_FIELD_NAMES.contains(fieldNames.indexName())) {
+ assert numericType == null;
+ return new BinaryDVIndexFieldData(index, fieldNames);
+ } else if (NUMERIC_INDEX_FIELD_NAMES.contains(fieldNames.indexName())) {
+ assert !numericType.isFloatingPoint();
+ return new NumericDVIndexFieldData(index, fieldNames);
+ } else if (numericType != null) {
+ return new BinaryDVNumericIndexFieldData(index, fieldNames, numericType);
+ } else {
+ return new SortedSetDVBytesIndexFieldData(index, fieldNames);
+ }
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/DoubleArrayAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/DoubleArrayAtomicFieldData.java
new file mode 100644
index 0000000..9ebf971
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/DoubleArrayAtomicFieldData.java
@@ -0,0 +1,359 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.common.util.BigDoubleArrayList;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+
+/**
+ */
+public abstract class DoubleArrayAtomicFieldData extends AbstractAtomicNumericFieldData {
+
+ public static DoubleArrayAtomicFieldData empty(int numDocs) {
+ return new Empty(numDocs);
+ }
+
+ private final int numDocs;
+
+ protected long size = -1;
+
+ public DoubleArrayAtomicFieldData(int numDocs) {
+ super(true);
+ this.numDocs = numDocs;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public int getNumDocs() {
+ return numDocs;
+ }
+
+ static class Empty extends DoubleArrayAtomicFieldData {
+
+ Empty(int numDocs) {
+ super(numDocs);
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return LongValues.EMPTY;
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return DoubleValues.EMPTY;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return 0;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ return 0;
+ }
+
+ @Override
+ public BytesValues getBytesValues(boolean needsHashes) {
+ return BytesValues.EMPTY;
+ }
+
+ @Override
+ public ScriptDocValues getScriptValues() {
+ return ScriptDocValues.EMPTY_DOUBLES;
+ }
+ }
+
+ public static class WithOrdinals extends DoubleArrayAtomicFieldData {
+
+ private final BigDoubleArrayList values;
+ private final Ordinals ordinals;
+
+ public WithOrdinals(BigDoubleArrayList values, int numDocs, Ordinals ordinals) {
+ super(numDocs);
+ this.values = values;
+ this.ordinals = ordinals;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return ordinals.isMultiValued();
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return ordinals.getNumOrds();
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + values.sizeInBytes() + ordinals.getMemorySizeInBytes();
+ }
+ return size;
+ }
+
+
+ @Override
+ public LongValues getLongValues() {
+ return new LongValues(values, ordinals.ordinals());
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return new DoubleValues(values, ordinals.ordinals());
+ }
+
+
+ static class LongValues extends org.elasticsearch.index.fielddata.LongValues.WithOrdinals {
+
+ private final BigDoubleArrayList values;
+
+ LongValues(BigDoubleArrayList values, Ordinals.Docs ordinals) {
+ super(ordinals);
+ this.values = values;
+ }
+
+ @Override
+ public final long getValueByOrd(long ord) {
+ assert ord != Ordinals.MISSING_ORDINAL;
+ return (long) values.get(ord);
+ }
+ }
+
+ static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues.WithOrdinals {
+
+ private final BigDoubleArrayList values;
+
+ DoubleValues(BigDoubleArrayList values, Ordinals.Docs ordinals) {
+ super(ordinals);
+ this.values = values;
+ }
+
+ @Override
+ public double getValueByOrd(long ord) {
+ assert ord != Ordinals.MISSING_ORDINAL;
+ return values.get(ord);
+ }
+ }
+ }
+
+ /**
+ * A single valued case, where not all values are "set", so we have a FixedBitSet that
+ * indicates which values have an actual value.
+ */
+ public static class SingleFixedSet extends DoubleArrayAtomicFieldData {
+
+ private final BigDoubleArrayList values;
+ private final FixedBitSet set;
+ private final long numOrds;
+
+ public SingleFixedSet(BigDoubleArrayList values, int numDocs, FixedBitSet set, long numOrds) {
+ super(numDocs);
+ this.values = values;
+ this.set = set;
+ this.numOrds = numOrds;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + values.sizeInBytes() + RamUsageEstimator.sizeOf(set.getBits());
+ }
+ return size;
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return new LongValues(values, set);
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return new DoubleValues(values, set);
+ }
+
+ static class LongValues extends org.elasticsearch.index.fielddata.LongValues {
+
+ private final BigDoubleArrayList values;
+ private final FixedBitSet set;
+
+ LongValues(BigDoubleArrayList values, FixedBitSet set) {
+ super(false);
+ this.values = values;
+ this.set = set;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return set.get(docId) ? 1 : 0;
+ }
+
+ @Override
+ public long nextValue() {
+ return (long) values.get(docId);
+ }
+ }
+
+ static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
+
+ private final BigDoubleArrayList values;
+ private final FixedBitSet set;
+
+ DoubleValues(BigDoubleArrayList values, FixedBitSet set) {
+ super(false);
+ this.values = values;
+ this.set = set;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return set.get(docId) ? 1 : 0;
+ }
+
+ @Override
+ public double nextValue() {
+ return values.get(docId);
+ }
+ }
+ }
+
+ /**
+ * Assumes all the values are "set", and docId is used as the index to the value array.
+ */
+ public static class Single extends DoubleArrayAtomicFieldData {
+
+ private final BigDoubleArrayList values;
+ private final long numOrds;
+
+ /**
+ * Note, here, we assume that there is no offset by 1 from docId, so position 0
+ * is the value for docId 0.
+ */
+ public Single(BigDoubleArrayList values, int numDocs, long numOrds) {
+ super(numDocs);
+ this.values = values;
+ this.numOrds = numOrds;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + values.sizeInBytes();
+ }
+ return size;
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return new LongValues(values);
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return new DoubleValues(values);
+ }
+
+ static final class LongValues extends DenseLongValues {
+
+ private final BigDoubleArrayList values;
+
+ LongValues(BigDoubleArrayList values) {
+ super(false);
+ this.values = values;
+ }
+
+ @Override
+ public long nextValue() {
+ return (long) values.get(docId);
+ }
+ }
+
+ static final class DoubleValues extends DenseDoubleValues {
+
+ private final BigDoubleArrayList values;
+
+ DoubleValues(BigDoubleArrayList values) {
+ super(false);
+ this.values = values;
+ }
+
+ @Override
+ public double nextValue() {
+ return values.get(docId);
+ }
+
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/DoubleArrayIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/DoubleArrayIndexFieldData.java
new file mode 100644
index 0000000..9101dce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/DoubleArrayIndexFieldData.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.util.*;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigDoubleArrayList;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+/**
+ */
+public class DoubleArrayIndexFieldData extends AbstractIndexFieldData<DoubleArrayAtomicFieldData> implements IndexNumericFieldData<DoubleArrayAtomicFieldData> {
+
+ private final CircuitBreakerService breakerService;
+
+ public static class Builder implements IndexFieldData.Builder {
+
+ @Override
+ public IndexFieldData<?> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
+ CircuitBreakerService breakerService) {
+ return new DoubleArrayIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
+ }
+ }
+
+ public DoubleArrayIndexFieldData(Index index, @IndexSettings Settings indexSettings, FieldMapper.Names fieldNames,
+ FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ this.breakerService = breakerService;
+ }
+
+ @Override
+ public NumericType getNumericType() {
+ return NumericType.DOUBLE;
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ // because we might have single values? we can dynamically update a flag to reflect that
+ // based on the atomic field data loaded
+ return false;
+ }
+
+ @Override
+ public DoubleArrayAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
+
+ AtomicReader reader = context.reader();
+ Terms terms = reader.terms(getFieldNames().indexName());
+ DoubleArrayAtomicFieldData data = null;
+ // TODO: Use an actual estimator to estimate before loading.
+ NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker());
+ if (terms == null) {
+ data = DoubleArrayAtomicFieldData.empty(reader.maxDoc());
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ return data;
+ }
+ // TODO: how can we guess the number of terms? numerics end up creating more terms per value...
+ final BigDoubleArrayList values = new BigDoubleArrayList();
+
+ values.add(0); // first "t" indicates null value
+ final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
+ OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio);
+ boolean success = false;
+ try {
+ final BytesRefIterator iter = builder.buildFromTerms(getNumericType().wrapTermsEnum(terms.iterator(null)));
+ BytesRef term;
+ while ((term = iter.next()) != null) {
+ values.add(NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(term)));
+ }
+ Ordinals build = builder.build(fieldDataType.getSettings());
+ if (!build.isMultiValued() && CommonSettings.removeOrdsOnSingleValue(fieldDataType)) {
+ Docs ordinals = build.ordinals();
+ final FixedBitSet set = builder.buildDocsWithValuesSet();
+
+ // there's sweet spot where due to low unique value count, using ordinals will consume less memory
+ long singleValuesArraySize = reader.maxDoc() * RamUsageEstimator.NUM_BYTES_DOUBLE + (set == null ? 0 : RamUsageEstimator.sizeOf(set.getBits()) + RamUsageEstimator.NUM_BYTES_INT);
+ long uniqueValuesArraySize = values.sizeInBytes();
+ long ordinalsSize = build.getMemorySizeInBytes();
+ if (uniqueValuesArraySize + ordinalsSize < singleValuesArraySize) {
+ data = new DoubleArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
+ success = true;
+ return data;
+ }
+
+ int maxDoc = reader.maxDoc();
+ BigDoubleArrayList sValues = new BigDoubleArrayList(maxDoc);
+ for (int i = 0; i < maxDoc; i++) {
+ sValues.add(values.get(ordinals.getOrd(i)));
+ }
+ assert sValues.size() == maxDoc;
+ if (set == null) {
+ data = new DoubleArrayAtomicFieldData.Single(sValues, maxDoc, ordinals.getNumOrds());
+ } else {
+ data = new DoubleArrayAtomicFieldData.SingleFixedSet(sValues, maxDoc, set, ordinals.getNumOrds());
+ }
+ } else {
+ data = new DoubleArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
+ }
+ success = true;
+ return data;
+ } finally {
+ if (success) {
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ }
+ builder.close();
+ }
+
+ }
+
+ @Override
+ public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode) {
+ return new DoubleValuesComparatorSource(this, missingValue, sortMode);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/EmptyByteValuesWithOrdinals.java b/src/main/java/org/elasticsearch/index/fielddata/plain/EmptyByteValuesWithOrdinals.java
new file mode 100644
index 0000000..a2410a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/EmptyByteValuesWithOrdinals.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+
+/**
+ * An empty {@link org.elasticsearch.index.fielddata.BytesValues.WithOrdinals} implementation
+ */
+final class EmptyByteValuesWithOrdinals extends BytesValues.WithOrdinals {
+
+ EmptyByteValuesWithOrdinals(Ordinals.Docs ordinals) {
+ super(ordinals);
+ }
+
+ @Override
+ public BytesRef getValueByOrd(long ord) {
+ scratch.length = 0;
+ return scratch;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return 0;
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ throw new ElasticsearchIllegalStateException("Empty BytesValues has no next value");
+ }
+
+ @Override
+ public int currentValueHash() {
+ throw new ElasticsearchIllegalStateException("Empty BytesValues has no hash for the current value");
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/FSTBytesAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/FSTBytesAtomicFieldData.java
new file mode 100644
index 0000000..bec4e93
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/FSTBytesAtomicFieldData.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.fst.*;
+import org.apache.lucene.util.fst.FST.Arc;
+import org.apache.lucene.util.fst.FST.BytesReader;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.IntArray;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.index.fielddata.ordinals.EmptyOrdinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+
+import java.io.IOException;
+
+/**
+ */
+public class FSTBytesAtomicFieldData implements AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> {
+
+ public static FSTBytesAtomicFieldData empty(int numDocs) {
+ return new Empty(numDocs);
+ }
+
+ // 0 ordinal in values means no value (its null)
+ protected final Ordinals ordinals;
+
+ private volatile IntArray hashes;
+ private long size = -1;
+
+ private final FST<Long> fst;
+
+ public FSTBytesAtomicFieldData(FST<Long> fst, Ordinals ordinals) {
+ this.ordinals = ordinals;
+ this.fst = fst;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return ordinals.isMultiValued();
+ }
+
+ @Override
+ public int getNumDocs() {
+ return ordinals.getNumDocs();
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return ordinals.getNumOrds();
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ long size = ordinals.getMemorySizeInBytes();
+ // FST
+ size += fst == null ? 0 : fst.sizeInBytes();
+ this.size = size;
+ }
+ return size;
+ }
+
+ @Override
+ public BytesValues.WithOrdinals getBytesValues(boolean needsHashes) {
+ assert fst != null;
+ if (needsHashes) {
+ if (hashes == null) {
+ BytesRefFSTEnum<Long> fstEnum = new BytesRefFSTEnum<Long>(fst);
+ IntArray hashes = BigArrays.newIntArray(ordinals.getMaxOrd());
+ // we don't store an ord 0 in the FST since we could have an empty string in there and FST don't support
+ // empty strings twice. ie. them merge fails for long output.
+ hashes.set(0, new BytesRef().hashCode());
+ try {
+ for (long i = 1, maxOrd = ordinals.getMaxOrd(); i < maxOrd; ++i) {
+ hashes.set(i, fstEnum.next().input.hashCode());
+ }
+ assert fstEnum.next() == null;
+ } catch (IOException e) {
+ // Don't use new "AssertionError("Cannot happen", e)" directly as this is a Java 1.7-only API
+ final AssertionError error = new AssertionError("Cannot happen");
+ error.initCause(e);
+ throw error;
+ }
+ this.hashes = hashes;
+ }
+ return new HashedBytesValues(fst, ordinals.ordinals(), hashes);
+ } else {
+ return new BytesValues(fst, ordinals.ordinals());
+ }
+ }
+
+
+ @Override
+ public ScriptDocValues.Strings getScriptValues() {
+ assert fst != null;
+ return new ScriptDocValues.Strings(getBytesValues(false));
+ }
+
+
+
+ static class BytesValues extends org.elasticsearch.index.fielddata.BytesValues.WithOrdinals {
+
+ protected final FST<Long> fst;
+ protected final Ordinals.Docs ordinals;
+
+ // per-thread resources
+ protected final BytesReader in;
+ protected final Arc<Long> firstArc = new Arc<Long>();
+ protected final Arc<Long> scratchArc = new Arc<Long>();
+ protected final IntsRef scratchInts = new IntsRef();
+
+ BytesValues(FST<Long> fst, Ordinals.Docs ordinals) {
+ super(ordinals);
+ this.fst = fst;
+ this.ordinals = ordinals;
+ in = fst.getBytesReader();
+ }
+
+ @Override
+ public BytesRef getValueByOrd(long ord) {
+ assert ord != Ordinals.MISSING_ORDINAL;
+ in.setPosition(0);
+ fst.getFirstArc(firstArc);
+ try {
+ IntsRef output = Util.getByOutput(fst, ord, in, firstArc, scratchArc, scratchInts);
+ scratch.length = scratch.offset = 0;
+ scratch.grow(output.length);
+ Util.toBytesRef(output, scratch);
+ } catch (IOException ex) {
+ //bogus
+ }
+ return scratch;
+ }
+
+ }
+
+ static final class HashedBytesValues extends BytesValues {
+ private final IntArray hashes;
+
+ HashedBytesValues(FST<Long> fst, Docs ordinals, IntArray hashes) {
+ super(fst, ordinals);
+ this.hashes = hashes;
+ }
+
+ @Override
+ public int currentValueHash() {
+ assert ordinals.currentOrd() >= 0;
+ return hashes.get(ordinals.currentOrd());
+ }
+ }
+
+
+ final static class Empty extends FSTBytesAtomicFieldData {
+
+ Empty(int numDocs) {
+ super(null, new EmptyOrdinals(numDocs));
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return ordinals.getNumDocs();
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public BytesValues.WithOrdinals getBytesValues(boolean needsHashes) {
+ return new EmptyByteValuesWithOrdinals(ordinals.ordinals());
+ }
+
+ @Override
+ public ScriptDocValues.Strings getScriptValues() {
+ return ScriptDocValues.EMPTY_STRINGS;
+ }
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/FSTBytesIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/FSTBytesIndexFieldData.java
new file mode 100644
index 0000000..984381c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/FSTBytesIndexFieldData.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.fst.FST;
+import org.apache.lucene.util.fst.FST.INPUT_TYPE;
+import org.apache.lucene.util.fst.PositiveIntOutputs;
+import org.apache.lucene.util.fst.Util;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldDataCache;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+/**
+ */
+public class FSTBytesIndexFieldData extends AbstractBytesIndexFieldData<FSTBytesAtomicFieldData> {
+
+ private final CircuitBreakerService breakerService;
+
+ public static class Builder implements IndexFieldData.Builder {
+
+ @Override
+ public IndexFieldData<FSTBytesAtomicFieldData> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
+ IndexFieldDataCache cache, CircuitBreakerService breakerService) {
+ return new FSTBytesIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
+ }
+ }
+
+ FSTBytesIndexFieldData(Index index, @IndexSettings Settings indexSettings, FieldMapper.Names fieldNames, FieldDataType fieldDataType,
+ IndexFieldDataCache cache, CircuitBreakerService breakerService) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ this.breakerService = breakerService;
+ }
+
+ @Override
+ public FSTBytesAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
+ AtomicReader reader = context.reader();
+
+ Terms terms = reader.terms(getFieldNames().indexName());
+ FSTBytesAtomicFieldData data = null;
+ // TODO: Use an actual estimator to estimate before loading.
+ NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker());
+ if (terms == null) {
+ data = FSTBytesAtomicFieldData.empty(reader.maxDoc());
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ return data;
+ }
+ PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
+ org.apache.lucene.util.fst.Builder<Long> fstBuilder = new org.apache.lucene.util.fst.Builder<Long>(INPUT_TYPE.BYTE1, outputs);
+ final IntsRef scratch = new IntsRef();
+
+ final long numTerms;
+ if (regex == null && frequency == null) {
+ numTerms = terms.size();
+ } else {
+ numTerms = -1;
+ }
+ final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
+ OrdinalsBuilder builder = new OrdinalsBuilder(numTerms, reader.maxDoc(), acceptableTransientOverheadRatio);
+ boolean success = false;
+ try {
+
+ // we don't store an ord 0 in the FST since we could have an empty string in there and FST don't support
+ // empty strings twice. ie. them merge fails for long output.
+ TermsEnum termsEnum = filter(terms, reader);
+ DocsEnum docsEnum = null;
+ for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
+ final long termOrd = builder.nextOrdinal();
+ assert termOrd > 0;
+ fstBuilder.add(Util.toIntsRef(term, scratch), (long)termOrd);
+ docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
+ for (int docId = docsEnum.nextDoc(); docId != DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
+ builder.addDoc(docId);
+ }
+ }
+
+ FST<Long> fst = fstBuilder.finish();
+
+ final Ordinals ordinals = builder.build(fieldDataType.getSettings());
+
+ data = new FSTBytesAtomicFieldData(fst, ordinals);
+ success = true;
+ return data;
+ } finally {
+ if (success) {
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ }
+ builder.close();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/FloatArrayAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/FloatArrayAtomicFieldData.java
new file mode 100644
index 0000000..c0c8c41
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/FloatArrayAtomicFieldData.java
@@ -0,0 +1,358 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.common.util.BigFloatArrayList;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+
+/**
+ */
+public abstract class FloatArrayAtomicFieldData extends AbstractAtomicNumericFieldData {
+
+ public static FloatArrayAtomicFieldData empty(int numDocs) {
+ return new Empty(numDocs);
+ }
+
+ private final int numDocs;
+
+ protected long size = -1;
+
+ public FloatArrayAtomicFieldData(int numDocs) {
+ super(true);
+ this.numDocs = numDocs;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public int getNumDocs() {
+ return numDocs;
+ }
+
+ static class Empty extends FloatArrayAtomicFieldData {
+
+ Empty(int numDocs) {
+ super(numDocs);
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return LongValues.EMPTY;
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return DoubleValues.EMPTY;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return 0;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ return 0;
+ }
+
+ @Override
+ public BytesValues getBytesValues(boolean needsHashes) {
+ return BytesValues.EMPTY;
+ }
+
+ @Override
+ public ScriptDocValues getScriptValues() {
+ return ScriptDocValues.EMPTY_DOUBLES;
+ }
+ }
+
+ public static class WithOrdinals extends FloatArrayAtomicFieldData {
+
+ private final Ordinals ordinals;
+ private final BigFloatArrayList values;
+
+ public WithOrdinals(BigFloatArrayList values, int numDocs, Ordinals ordinals) {
+ super(numDocs);
+ this.values = values;
+ this.ordinals = ordinals;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return ordinals.isMultiValued();
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return ordinals.getNumOrds();
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + values.sizeInBytes() + ordinals.getMemorySizeInBytes();
+ }
+ return size;
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return new LongValues(values, ordinals.ordinals());
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return new DoubleValues(values, ordinals.ordinals());
+ }
+
+ static class LongValues extends org.elasticsearch.index.fielddata.LongValues.WithOrdinals {
+
+ private final BigFloatArrayList values;
+
+ LongValues(BigFloatArrayList values, Ordinals.Docs ordinals) {
+ super(ordinals);
+ this.values = values;
+ }
+
+ @Override
+ public long getValueByOrd(long ord) {
+ assert ord != Ordinals.MISSING_ORDINAL;
+ return (long) values.get(ord);
+ }
+ }
+
+ static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues.WithOrdinals {
+
+ private final BigFloatArrayList values;
+
+ DoubleValues(BigFloatArrayList values, Ordinals.Docs ordinals) {
+ super(ordinals);
+ this.values = values;
+ }
+
+ @Override
+ public double getValueByOrd(long ord) {
+ return values.get(ord);
+ }
+ }
+ }
+
+ /**
+ * A single valued case, where not all values are "set", so we have a FixedBitSet that
+ * indicates which values have an actual value.
+ */
+ public static class SingleFixedSet extends FloatArrayAtomicFieldData {
+
+ private final BigFloatArrayList values;
+ private final FixedBitSet set;
+ private final long numOrd;
+
+ public SingleFixedSet(BigFloatArrayList values, int numDocs, FixedBitSet set, long numOrd) {
+ super(numDocs);
+ this.values = values;
+ this.set = set;
+ this.numOrd = numOrd;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrd;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + values.sizeInBytes() + RamUsageEstimator.sizeOf(set.getBits());
+ }
+ return size;
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return new LongValues(values, set);
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return new DoubleValues(values, set);
+ }
+
+
+ static class LongValues extends org.elasticsearch.index.fielddata.LongValues {
+
+ private final BigFloatArrayList values;
+ private final FixedBitSet set;
+
+ LongValues(BigFloatArrayList values, FixedBitSet set) {
+ super(false);
+ this.values = values;
+ this.set = set;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return set.get(docId) ? 1 : 0;
+ }
+
+ @Override
+ public long nextValue() {
+ return (long) values.get(docId);
+ }
+ }
+
+ static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
+
+ private final BigFloatArrayList values;
+ private final FixedBitSet set;
+
+ DoubleValues(BigFloatArrayList values, FixedBitSet set) {
+ super(false);
+ this.values = values;
+ this.set = set;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return set.get(docId) ? 1 : 0;
+ }
+
+ @Override
+ public double nextValue() {
+ return values.get(docId);
+ }
+ }
+
+ }
+
+ /**
+ * Assumes all the values are "set", and docId is used as the index to the value array.
+ */
+ public static class Single extends FloatArrayAtomicFieldData {
+
+ private final BigFloatArrayList values;
+ private final long numOrd;
+
+ /**
+ * Note, here, we assume that there is no offset by 1 from docId, so position 0
+ * is the value for docId 0.
+ */
+ public Single(BigFloatArrayList values, int numDocs, long numOrd) {
+ super(numDocs);
+ this.values = values;
+ this.numOrd = numOrd;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrd;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + values.sizeInBytes();
+ }
+ return size;
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return new LongValues(values);
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return new DoubleValues(values);
+ }
+
+
+ static class LongValues extends DenseLongValues {
+
+ private final BigFloatArrayList values;
+
+ LongValues(BigFloatArrayList values) {
+ super(false);
+ this.values = values;
+ }
+
+ @Override
+ public long nextValue() {
+ return (long) values.get(docId);
+ }
+ }
+
+ static class DoubleValues extends DenseDoubleValues {
+
+ private final BigFloatArrayList values;
+
+ DoubleValues(BigFloatArrayList values) {
+ super(false);
+ this.values = values;
+ }
+
+ @Override
+ public double nextValue() {
+ return values.get(docId);
+ }
+
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/FloatArrayIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/FloatArrayIndexFieldData.java
new file mode 100644
index 0000000..de1ee0b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/FloatArrayIndexFieldData.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.util.*;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigFloatArrayList;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.fieldcomparator.FloatValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+/**
+ */
+public class FloatArrayIndexFieldData extends AbstractIndexFieldData<FloatArrayAtomicFieldData> implements IndexNumericFieldData<FloatArrayAtomicFieldData> {
+
+ private final CircuitBreakerService breakerService;
+
+ public static class Builder implements IndexFieldData.Builder {
+
+ @Override
+ public IndexFieldData<?> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
+ CircuitBreakerService breakerService) {
+ return new FloatArrayIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
+ }
+ }
+
+ public FloatArrayIndexFieldData(Index index, @IndexSettings Settings indexSettings, FieldMapper.Names fieldNames,
+ FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ this.breakerService = breakerService;
+ }
+
+ @Override
+ public NumericType getNumericType() {
+ return NumericType.FLOAT;
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ // because we might have single values? we can dynamically update a flag to reflect that
+ // based on the atomic field data loaded
+ return false;
+ }
+
+ @Override
+ public FloatArrayAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
+ AtomicReader reader = context.reader();
+ Terms terms = reader.terms(getFieldNames().indexName());
+ FloatArrayAtomicFieldData data = null;
+ // TODO: Use an actual estimator to estimate before loading.
+ NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker());
+ if (terms == null) {
+ data = FloatArrayAtomicFieldData.empty(reader.maxDoc());
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ return data;
+ }
+ // TODO: how can we guess the number of terms? numerics end up creating more terms per value...
+ final BigFloatArrayList values = new BigFloatArrayList();
+
+ values.add(0); // first "t" indicates null value
+
+ final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
+ OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio);
+ boolean success = false;
+ try {
+ BytesRefIterator iter = builder.buildFromTerms(getNumericType().wrapTermsEnum(terms.iterator(null)));
+ BytesRef term;
+ while ((term = iter.next()) != null) {
+ values.add(NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(term)));
+ }
+ Ordinals build = builder.build(fieldDataType.getSettings());
+ if (!build.isMultiValued() && CommonSettings.removeOrdsOnSingleValue(fieldDataType)) {
+ Docs ordinals = build.ordinals();
+ final FixedBitSet set = builder.buildDocsWithValuesSet();
+
+ // there's sweet spot where due to low unique value count, using ordinals will consume less memory
+ long singleValuesArraySize = reader.maxDoc() * RamUsageEstimator.NUM_BYTES_FLOAT + (set == null ? 0 : RamUsageEstimator.sizeOf(set.getBits()) + RamUsageEstimator.NUM_BYTES_INT);
+ long uniqueValuesArraySize = values.sizeInBytes();
+ long ordinalsSize = build.getMemorySizeInBytes();
+ if (uniqueValuesArraySize + ordinalsSize < singleValuesArraySize) {
+ data = new FloatArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
+ success = true;
+ return data;
+ }
+
+ int maxDoc = reader.maxDoc();
+ BigFloatArrayList sValues = new BigFloatArrayList(maxDoc);
+ for (int i = 0; i < maxDoc; i++) {
+ sValues.add(values.get(ordinals.getOrd(i)));
+ }
+ assert sValues.size() == maxDoc;
+ if (set == null) {
+ data = new FloatArrayAtomicFieldData.Single(sValues, maxDoc, ordinals.getNumOrds());
+ } else {
+ data = new FloatArrayAtomicFieldData.SingleFixedSet(sValues, maxDoc, set, ordinals.getNumOrds());
+ }
+ } else {
+ data = new FloatArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
+ }
+ success = true;
+ return data;
+ } finally {
+ if (success) {
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ }
+ builder.close();
+ }
+
+ }
+
+ @Override
+ public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode) {
+ return new FloatValuesComparatorSource(this, missingValue, sortMode);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVAtomicFieldData.java
new file mode 100644
index 0000000..8260531
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVAtomicFieldData.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.util.ByteUtils;
+import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+
+final class GeoPointBinaryDVAtomicFieldData extends AtomicGeoPointFieldData<ScriptDocValues> {
+
+ private final AtomicReader reader;
+ private final BinaryDocValues values;
+
+ GeoPointBinaryDVAtomicFieldData(AtomicReader reader, BinaryDocValues values) {
+ super();
+ this.reader = reader;
+ this.values = values == null ? BinaryDocValues.EMPTY : values;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return reader.maxDoc();
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return Long.MAX_VALUE;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ return -1; // not exposed by Lucene
+ }
+
+ @Override
+ public ScriptDocValues getScriptValues() {
+ return new ScriptDocValues.GeoPoints(getGeoPointValues());
+ }
+
+ @Override
+ public void close() {
+ // no-op
+ }
+
+ @Override
+ public GeoPointValues getGeoPointValues() {
+ return new GeoPointValues(true) {
+
+ final BytesRef bytes = new BytesRef();
+ int i = Integer.MAX_VALUE;
+ int valueCount = 0;
+ final GeoPoint point = new GeoPoint();
+
+ @Override
+ public int setDocument(int docId) {
+ values.get(docId, bytes);
+ assert bytes.length % 16 == 0;
+ i = 0;
+ return valueCount = (bytes.length >>> 4);
+ }
+
+ @Override
+ public GeoPoint nextValue() {
+ assert i < 2 * valueCount;
+ final double lat = ByteUtils.readDoubleLE(bytes.bytes, bytes.offset + i++ * 8);
+ final double lon = ByteUtils.readDoubleLE(bytes.bytes, bytes.offset + i++ * 8);
+ return point.reset(lat, lon);
+ }
+
+ };
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java
new file mode 100644
index 0000000..1d10e08
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+import java.io.IOException;
+
+public class GeoPointBinaryDVIndexFieldData extends DocValuesIndexFieldData implements IndexGeoPointFieldData<AtomicGeoPointFieldData<ScriptDocValues>> {
+
+ public GeoPointBinaryDVIndexFieldData(Index index, Names fieldNames) {
+ super(index, fieldNames);
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode) {
+ throw new ElasticsearchIllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance");
+ }
+
+ @Override
+ public AtomicGeoPointFieldData<ScriptDocValues> load(AtomicReaderContext context) {
+ try {
+ return new GeoPointBinaryDVAtomicFieldData(context.reader(), context.reader().getBinaryDocValues(fieldNames.indexName()));
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalStateException("Cannot load doc values", e);
+ }
+ }
+
+ @Override
+ public AtomicGeoPointFieldData<ScriptDocValues> loadDirect(AtomicReaderContext context) throws Exception {
+ return load(context);
+ }
+
+ public static class Builder implements IndexFieldData.Builder {
+
+ @Override
+ public IndexFieldData<?> build(Index index, Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
+ CircuitBreakerService breakerService) {
+ // Ignore breaker
+ final FieldMapper.Names fieldNames = mapper.names();
+ return new GeoPointBinaryDVIndexFieldData(index, fieldNames);
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointCompressedAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointCompressedAtomicFieldData.java
new file mode 100644
index 0000000..1b54599
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointCompressedAtomicFieldData.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.packed.PagedMutable;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+
+/**
+ * Field data atomic impl for geo points with lossy compression.
+ */
+public abstract class GeoPointCompressedAtomicFieldData extends AtomicGeoPointFieldData<ScriptDocValues> {
+
+ private final int numDocs;
+
+ protected long size = -1;
+
+ public GeoPointCompressedAtomicFieldData(int numDocs) {
+ this.numDocs = numDocs;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public int getNumDocs() {
+ return numDocs;
+ }
+
+ @Override
+ public ScriptDocValues getScriptValues() {
+ return new ScriptDocValues.GeoPoints(getGeoPointValues());
+ }
+
+ static class WithOrdinals extends GeoPointCompressedAtomicFieldData {
+
+ private final GeoPointFieldMapper.Encoding encoding;
+ private final PagedMutable lon, lat;
+ private final Ordinals ordinals;
+
+ public WithOrdinals(GeoPointFieldMapper.Encoding encoding, PagedMutable lon, PagedMutable lat, int numDocs, Ordinals ordinals) {
+ super(numDocs);
+ this.encoding = encoding;
+ this.lon = lon;
+ this.lat = lat;
+ this.ordinals = ordinals;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return ordinals.isMultiValued();
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return ordinals.getNumOrds();
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + lon.ramBytesUsed() + lat.ramBytesUsed();
+ }
+ return size;
+ }
+
+ @Override
+ public GeoPointValues getGeoPointValues() {
+ return new GeoPointValuesWithOrdinals(encoding, lon, lat, ordinals.ordinals());
+ }
+
+ public static class GeoPointValuesWithOrdinals extends GeoPointValues {
+
+ private final GeoPointFieldMapper.Encoding encoding;
+ private final PagedMutable lon, lat;
+ private final Ordinals.Docs ordinals;
+
+ private final GeoPoint scratch = new GeoPoint();
+
+ GeoPointValuesWithOrdinals(GeoPointFieldMapper.Encoding encoding, PagedMutable lon, PagedMutable lat, Ordinals.Docs ordinals) {
+ super(ordinals.isMultiValued());
+ this.encoding = encoding;
+ this.lon = lon;
+ this.lat = lat;
+ this.ordinals = ordinals;
+ }
+
+ @Override
+ public GeoPoint nextValue() {
+ final long ord = ordinals.nextOrd();
+ assert ord > 0;
+ return encoding.decode(lat.get(ord), lon.get(ord), scratch);
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return ordinals.setDocument(docId);
+ }
+ }
+ }
+
+ /**
+ * Assumes unset values are marked in bitset, and docId is used as the index to the value array.
+ */
+ public static class SingleFixedSet extends GeoPointCompressedAtomicFieldData {
+
+ private final GeoPointFieldMapper.Encoding encoding;
+ private final PagedMutable lon, lat;
+ private final FixedBitSet set;
+ private final long numOrds;
+
+ public SingleFixedSet(GeoPointFieldMapper.Encoding encoding, PagedMutable lon, PagedMutable lat, int numDocs, FixedBitSet set, long numOrds) {
+ super(numDocs);
+ this.encoding = encoding;
+ this.lon = lon;
+ this.lat = lat;
+ this.set = set;
+ this.numOrds = numOrds;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + lon.ramBytesUsed() + lat.ramBytesUsed() + RamUsageEstimator.sizeOf(set.getBits());
+ }
+ return size;
+ }
+
+ @Override
+ public GeoPointValues getGeoPointValues() {
+ return new GeoPointValuesSingleFixedSet(encoding, lon, lat, set);
+ }
+
+
+ static class GeoPointValuesSingleFixedSet extends GeoPointValues {
+
+ private final GeoPointFieldMapper.Encoding encoding;
+ private final PagedMutable lat, lon;
+ private final FixedBitSet set;
+ private final GeoPoint scratch = new GeoPoint();
+
+
+ GeoPointValuesSingleFixedSet(GeoPointFieldMapper.Encoding encoding, PagedMutable lon, PagedMutable lat, FixedBitSet set) {
+ super(false);
+ this.encoding = encoding;
+ this.lon = lon;
+ this.lat = lat;
+ this.set = set;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return set.get(docId) ? 1 : 0;
+ }
+
+ @Override
+ public GeoPoint nextValue() {
+ return encoding.decode(lat.get(docId), lon.get(docId), scratch);
+ }
+ }
+ }
+
+ /**
+ * Assumes all the values are "set", and docId is used as the index to the value array.
+ */
+ public static class Single extends GeoPointCompressedAtomicFieldData {
+
+ private final GeoPointFieldMapper.Encoding encoding;
+ private final PagedMutable lon, lat;
+ private final long numOrds;
+
+ public Single(GeoPointFieldMapper.Encoding encoding, PagedMutable lon, PagedMutable lat, int numDocs, long numOrds) {
+ super(numDocs);
+ this.encoding = encoding;
+ this.lon = lon;
+ this.lat = lat;
+ this.numOrds = numOrds;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + (lon.ramBytesUsed() + lat.ramBytesUsed());
+ }
+ return size;
+ }
+
+ @Override
+ public GeoPointValues getGeoPointValues() {
+ return new GeoPointValuesSingle(encoding, lon, lat);
+ }
+
+ static class GeoPointValuesSingle extends GeoPointValues {
+
+ private final GeoPointFieldMapper.Encoding encoding;
+ private final PagedMutable lon, lat;
+
+ private final GeoPoint scratch = new GeoPoint();
+
+
+ GeoPointValuesSingle(GeoPointFieldMapper.Encoding encoding, PagedMutable lon, PagedMutable lat) {
+ super(false);
+ this.encoding = encoding;
+ this.lon = lon;
+ this.lat = lat;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return 1;
+ }
+
+ @Override
+ public GeoPoint nextValue() {
+ return encoding.decode(lat.get(docId), lon.get(docId), scratch);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointCompressedIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointCompressedIndexFieldData.java
new file mode 100644
index 0000000..882cb3a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointCompressedIndexFieldData.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.packed.PackedInts;
+import org.apache.lucene.util.packed.PagedMutable;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+/**
+ */
+public class GeoPointCompressedIndexFieldData extends AbstractGeoPointIndexFieldData {
+
+ private static final String PRECISION_KEY = "precision";
+ private static final Distance DEFAULT_PRECISION_VALUE = new Distance(1, DistanceUnit.CENTIMETERS);
+ private final CircuitBreakerService breakerService;
+
+ public static class Builder implements IndexFieldData.Builder {
+
+ @Override
+ public IndexFieldData<?> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
+ CircuitBreakerService breakerService) {
+ FieldDataType type = mapper.fieldDataType();
+ final String precisionAsString = type.getSettings().get(PRECISION_KEY);
+ final Distance precision;
+ if (precisionAsString != null) {
+ precision = Distance.parseDistance(precisionAsString);
+ } else {
+ precision = DEFAULT_PRECISION_VALUE;
+ }
+ return new GeoPointCompressedIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, precision, breakerService);
+ }
+ }
+
+ private final GeoPointFieldMapper.Encoding encoding;
+
+ public GeoPointCompressedIndexFieldData(Index index, @IndexSettings Settings indexSettings, FieldMapper.Names fieldNames,
+ FieldDataType fieldDataType, IndexFieldDataCache cache, Distance precision,
+ CircuitBreakerService breakerService) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ this.encoding = GeoPointFieldMapper.Encoding.of(precision);
+ this.breakerService = breakerService;
+ }
+
+ @Override
+ public AtomicGeoPointFieldData<ScriptDocValues> loadDirect(AtomicReaderContext context) throws Exception {
+ AtomicReader reader = context.reader();
+
+ Terms terms = reader.terms(getFieldNames().indexName());
+ AtomicGeoPointFieldData data = null;
+ // TODO: Use an actual estimator to estimate before loading.
+ NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker());
+ if (terms == null) {
+ data = new Empty(reader.maxDoc());
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ return data;
+ }
+ final long initialSize;
+ if (terms.size() >= 0) {
+ initialSize = 1 + terms.size();
+ } else { // codec doesn't expose size
+ initialSize = 1 + Math.min(1 << 12, reader.maxDoc());
+ }
+ final int pageSize = Integer.highestOneBit(BigArrays.PAGE_SIZE_IN_BYTES * 8 / encoding.numBitsPerCoordinate() - 1) << 1;
+ PagedMutable lat = new PagedMutable(initialSize, pageSize, encoding.numBitsPerCoordinate(), PackedInts.COMPACT);
+ PagedMutable lon = new PagedMutable(initialSize, pageSize, encoding.numBitsPerCoordinate(), PackedInts.COMPACT);
+ final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
+ OrdinalsBuilder builder = new OrdinalsBuilder(terms.size(), reader.maxDoc(), acceptableTransientOverheadRatio);
+ boolean success = false;
+ try {
+ final GeoPointEnum iter = new GeoPointEnum(builder.buildFromTerms(terms.iterator(null)));
+ GeoPoint point;
+ long ord = 0;
+ while ((point = iter.next()) != null) {
+ ++ord;
+ if (lat.size() <= ord) {
+ final long newSize = BigArrays.overSize(ord + 1);
+ lat = lat.resize(newSize);
+ lon = lon.resize(newSize);
+ }
+ lat.set(ord, encoding.encodeCoordinate(point.getLat()));
+ lon.set(ord, encoding.encodeCoordinate(point.getLon()));
+ }
+
+ Ordinals build = builder.build(fieldDataType.getSettings());
+ if (!build.isMultiValued() && CommonSettings.removeOrdsOnSingleValue(fieldDataType)) {
+ Docs ordinals = build.ordinals();
+ int maxDoc = reader.maxDoc();
+ PagedMutable sLat = new PagedMutable(reader.maxDoc(), pageSize, encoding.numBitsPerCoordinate(), PackedInts.COMPACT);
+ PagedMutable sLon = new PagedMutable(reader.maxDoc(), pageSize, encoding.numBitsPerCoordinate(), PackedInts.COMPACT);
+ for (int i = 0; i < maxDoc; i++) {
+ final long nativeOrdinal = ordinals.getOrd(i);
+ sLat.set(i, lat.get(nativeOrdinal));
+ sLon.set(i, lon.get(nativeOrdinal));
+ }
+ FixedBitSet set = builder.buildDocsWithValuesSet();
+ if (set == null) {
+ data = new GeoPointCompressedAtomicFieldData.Single(encoding, sLon, sLat, reader.maxDoc(), ordinals.getNumOrds());
+ } else {
+ data = new GeoPointCompressedAtomicFieldData.SingleFixedSet(encoding, sLon, sLat, reader.maxDoc(), set, ordinals.getNumOrds());
+ }
+ } else {
+ if (lat.size() != build.getMaxOrd()) {
+ lat = lat.resize(build.getMaxOrd());
+ lon = lon.resize(build.getMaxOrd());
+ }
+ data = new GeoPointCompressedAtomicFieldData.WithOrdinals(encoding, lon, lat, reader.maxDoc(), build);
+ }
+ success = true;
+ return data;
+ } finally {
+ if (success) {
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ }
+ builder.close();
+ }
+
+ }
+
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointDoubleArrayAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointDoubleArrayAtomicFieldData.java
new file mode 100644
index 0000000..9c2bd93
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointDoubleArrayAtomicFieldData.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.util.BigDoubleArrayList;
+import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+
+/**
+ */
+public abstract class GeoPointDoubleArrayAtomicFieldData extends AtomicGeoPointFieldData<ScriptDocValues> {
+
+ private final int numDocs;
+
+ protected long size = -1;
+
+ public GeoPointDoubleArrayAtomicFieldData(int numDocs) {
+ this.numDocs = numDocs;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public int getNumDocs() {
+ return numDocs;
+ }
+
+ @Override
+ public ScriptDocValues getScriptValues() {
+ return new ScriptDocValues.GeoPoints(getGeoPointValues());
+ }
+
+ static class WithOrdinals extends GeoPointDoubleArrayAtomicFieldData {
+
+ private final BigDoubleArrayList lon, lat;
+ private final Ordinals ordinals;
+
+ public WithOrdinals(BigDoubleArrayList lon, BigDoubleArrayList lat, int numDocs, Ordinals ordinals) {
+ super(numDocs);
+ this.lon = lon;
+ this.lat = lat;
+ this.ordinals = ordinals;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return ordinals.isMultiValued();
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return ordinals.getNumOrds();
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + lon.sizeInBytes() + lat.sizeInBytes();
+ }
+ return size;
+ }
+
+ @Override
+ public GeoPointValues getGeoPointValues() {
+ return new GeoPointValuesWithOrdinals(lon, lat, ordinals.ordinals());
+ }
+
+ public static class GeoPointValuesWithOrdinals extends GeoPointValues {
+
+ private final BigDoubleArrayList lon, lat;
+ private final Ordinals.Docs ordinals;
+
+ private final GeoPoint scratch = new GeoPoint();
+
+ GeoPointValuesWithOrdinals(BigDoubleArrayList lon, BigDoubleArrayList lat, Ordinals.Docs ordinals) {
+ super(ordinals.isMultiValued());
+ this.lon = lon;
+ this.lat = lat;
+ this.ordinals = ordinals;
+ }
+
+ @Override
+ public GeoPoint nextValue() {
+ final long ord = ordinals.nextOrd();
+ assert ord > 0;
+ return scratch.reset(lat.get(ord), lon.get(ord));
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return ordinals.setDocument(docId);
+ }
+ }
+ }
+
+ /**
+ * Assumes unset values are marked in bitset, and docId is used as the index to the value array.
+ */
+ public static class SingleFixedSet extends GeoPointDoubleArrayAtomicFieldData {
+
+ private final BigDoubleArrayList lon, lat;
+ private final FixedBitSet set;
+ private final long numOrds;
+
+ public SingleFixedSet(BigDoubleArrayList lon, BigDoubleArrayList lat, int numDocs, FixedBitSet set, long numOrds) {
+ super(numDocs);
+ this.lon = lon;
+ this.lat = lat;
+ this.set = set;
+ this.numOrds = numOrds;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + lon.sizeInBytes() + lat.sizeInBytes() + RamUsageEstimator.sizeOf(set.getBits());
+ }
+ return size;
+ }
+
+ @Override
+ public GeoPointValues getGeoPointValues() {
+ return new GeoPointValuesSingleFixedSet(lon, lat, set);
+ }
+
+
+ static class GeoPointValuesSingleFixedSet extends GeoPointValues {
+
+ private final BigDoubleArrayList lon;
+ private final BigDoubleArrayList lat;
+ private final FixedBitSet set;
+ private final GeoPoint scratch = new GeoPoint();
+
+
+ GeoPointValuesSingleFixedSet(BigDoubleArrayList lon, BigDoubleArrayList lat, FixedBitSet set) {
+ super(false);
+ this.lon = lon;
+ this.lat = lat;
+ this.set = set;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return set.get(docId) ? 1 : 0;
+ }
+
+ @Override
+ public GeoPoint nextValue() {
+ return scratch.reset(lat.get(docId), lon.get(docId));
+ }
+ }
+ }
+
+ /**
+ * Assumes all the values are "set", and docId is used as the index to the value array.
+ */
+ public static class Single extends GeoPointDoubleArrayAtomicFieldData {
+
+ private final BigDoubleArrayList lon, lat;
+ private final long numOrds;
+
+ public Single(BigDoubleArrayList lon, BigDoubleArrayList lat, int numDocs, long numOrds) {
+ super(numDocs);
+ this.lon = lon;
+ this.lat = lat;
+ this.numOrds = numOrds;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + (lon.sizeInBytes() + lat.sizeInBytes());
+ }
+ return size;
+ }
+
+
+ @Override
+ public GeoPointValues getGeoPointValues() {
+ return new GeoPointValuesSingle(lon, lat);
+ }
+
+ static class GeoPointValuesSingle extends GeoPointValues {
+
+ private final BigDoubleArrayList lon;
+ private final BigDoubleArrayList lat;
+
+ private final GeoPoint scratch = new GeoPoint();
+
+
+ GeoPointValuesSingle(BigDoubleArrayList lon, BigDoubleArrayList lat) {
+ super(false);
+ this.lon = lon;
+ this.lat = lat;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return 1;
+ }
+
+ @Override
+ public GeoPoint nextValue() {
+ return scratch.reset(lat.get(docId), lon.get(docId));
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointDoubleArrayIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointDoubleArrayIndexFieldData.java
new file mode 100644
index 0000000..2a8be1a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointDoubleArrayIndexFieldData.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigDoubleArrayList;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+/**
+ */
+public class GeoPointDoubleArrayIndexFieldData extends AbstractGeoPointIndexFieldData {
+
+ private final CircuitBreakerService breakerService;
+
+ public static class Builder implements IndexFieldData.Builder {
+
+ @Override
+ public IndexFieldData<?> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
+ CircuitBreakerService breakerService) {
+ return new GeoPointDoubleArrayIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
+ }
+ }
+
+ public GeoPointDoubleArrayIndexFieldData(Index index, @IndexSettings Settings indexSettings, FieldMapper.Names fieldNames,
+ FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ this.breakerService = breakerService;
+ }
+
+ @Override
+ public AtomicGeoPointFieldData<ScriptDocValues> loadDirect(AtomicReaderContext context) throws Exception {
+ AtomicReader reader = context.reader();
+
+ Terms terms = reader.terms(getFieldNames().indexName());
+ AtomicGeoPointFieldData data = null;
+ // TODO: Use an actual estimator to estimate before loading.
+ NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker());
+ if (terms == null) {
+ data = new Empty(reader.maxDoc());
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ return data;
+ }
+ final BigDoubleArrayList lat = new BigDoubleArrayList();
+ final BigDoubleArrayList lon = new BigDoubleArrayList();
+ lat.add(0); // first "t" indicates null value
+ lon.add(0); // first "t" indicates null value
+ final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
+ OrdinalsBuilder builder = new OrdinalsBuilder(terms.size(), reader.maxDoc(), acceptableTransientOverheadRatio);
+ boolean success = false;
+ try {
+ final GeoPointEnum iter = new GeoPointEnum(builder.buildFromTerms(terms.iterator(null)));
+ GeoPoint point;
+ while ((point = iter.next()) != null) {
+ lat.add(point.getLat());
+ lon.add(point.getLon());
+ }
+
+ Ordinals build = builder.build(fieldDataType.getSettings());
+ if (!build.isMultiValued() && CommonSettings.removeOrdsOnSingleValue(fieldDataType)) {
+ Docs ordinals = build.ordinals();
+ int maxDoc = reader.maxDoc();
+ BigDoubleArrayList sLat = new BigDoubleArrayList(reader.maxDoc());
+ BigDoubleArrayList sLon = new BigDoubleArrayList(reader.maxDoc());
+ for (int i = 0; i < maxDoc; i++) {
+ long nativeOrdinal = ordinals.getOrd(i);
+ sLat.add(lat.get(nativeOrdinal));
+ sLon.add(lon.get(nativeOrdinal));
+ }
+ FixedBitSet set = builder.buildDocsWithValuesSet();
+ if (set == null) {
+ data = new GeoPointDoubleArrayAtomicFieldData.Single(sLon, sLat, reader.maxDoc(), ordinals.getNumOrds());
+ } else {
+ data = new GeoPointDoubleArrayAtomicFieldData.SingleFixedSet(sLon, sLat, reader.maxDoc(), set, ordinals.getNumOrds());
+ }
+ } else {
+ data = new GeoPointDoubleArrayAtomicFieldData.WithOrdinals(lon, lat, reader.maxDoc(), build);
+ }
+ success = true;
+ return data;
+ } finally {
+ if (success) {
+ estimator.afterLoad(null, data.getMemorySizeInBytes());
+ }
+ builder.close();
+ }
+
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/NonEstimatingEstimator.java b/src/main/java/org/elasticsearch/index/fielddata/plain/NonEstimatingEstimator.java
new file mode 100644
index 0000000..f81a935
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/NonEstimatingEstimator.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.breaker.MemoryCircuitBreaker;
+import org.elasticsearch.index.fielddata.AbstractIndexFieldData;
+
+import java.io.IOException;
+
+/**
+ * Estimator that does nothing except for adjust the breaker after the field
+ * data has been loaded. Useful for field data implementations that do not yet
+ * have pre-loading estimations.
+ */
+public class NonEstimatingEstimator implements AbstractIndexFieldData.PerValueEstimator {
+
+ private final MemoryCircuitBreaker breaker;
+
+ NonEstimatingEstimator(MemoryCircuitBreaker breaker) {
+ this.breaker = breaker;
+ }
+
+ @Override
+ public long bytesPerValue(BytesRef term) {
+ return 0;
+ }
+
+ @Override
+ public TermsEnum beforeLoad(Terms terms) throws IOException {
+ return null;
+ }
+
+ @Override
+ public void afterLoad(@Nullable TermsEnum termsEnum, long actualUsed) {
+ breaker.addWithoutBreaking(actualUsed);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVAtomicFieldData.java
new file mode 100644
index 0000000..d315917
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVAtomicFieldData.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.index.fielddata.AbstractAtomicNumericFieldData;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.LongValues;
+
+import java.io.IOException;
+
+/** {@link AtomicFieldData} impl on top of Lucene's numeric doc values. */
+public class NumericDVAtomicFieldData extends AbstractAtomicNumericFieldData {
+
+ private final AtomicReader reader;
+ private final String field;
+
+ public NumericDVAtomicFieldData(AtomicReader reader, String field) {
+ super(false);
+ this.reader = reader;
+ this.field = field;
+ }
+
+ @Override
+ public void close() {
+ // no-op
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true; // single-valued
+ }
+
+ @Override
+ public int getNumDocs() {
+ return reader.maxDoc();
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ // good upper limit
+ return reader.maxDoc();
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ // TODO: cannot be computed from Lucene
+ return -1;
+ }
+
+ private static class DocValuesAndBits {
+ final NumericDocValues values;
+ final Bits docsWithField;
+
+ DocValuesAndBits(NumericDocValues values, Bits docsWithField) {
+ super();
+ this.values = values;
+ this.docsWithField = docsWithField;
+ }
+ }
+
+ private DocValuesAndBits getDocValues() {
+ final NumericDocValues values;
+ final Bits docsWithField;
+ try {
+ final NumericDocValues v = reader.getNumericDocValues(field);
+ if (v == null) {
+ // segment has no value
+ values = NumericDocValues.EMPTY;
+ docsWithField = new Bits.MatchNoBits(reader.maxDoc());
+ } else {
+ values = v;
+ final Bits b = reader.getDocsWithField(field);
+ docsWithField = b == null ? new Bits.MatchAllBits(reader.maxDoc()) : b;
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalStateException("Cannot load doc values", e);
+ }
+ return new DocValuesAndBits(values, docsWithField);
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ final DocValuesAndBits docValues = getDocValues();
+
+ return new LongValues(false) {
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return docValues.docsWithField.get(docId) ? 1 : 0;
+ }
+
+ @Override
+ public long nextValue() {
+ return docValues.values.get(docId);
+ }
+ };
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ final DocValuesAndBits docValues = getDocValues();
+
+ return new DoubleValues(false) {
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return docValues.docsWithField.get(docId) ? 1 : 0;
+ }
+
+ @Override
+ public double nextValue() {
+ return docValues.values.get(docId);
+ }
+
+ };
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java
new file mode 100644
index 0000000..2004337
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.LongValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+
+public class NumericDVIndexFieldData extends DocValuesIndexFieldData implements IndexNumericFieldData<NumericDVAtomicFieldData> {
+
+ public NumericDVIndexFieldData(Index index, Names fieldNames) {
+ super(index, fieldNames);
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public NumericDVAtomicFieldData load(AtomicReaderContext context) {
+ return new NumericDVAtomicFieldData(context.reader(), fieldNames.indexName());
+ }
+
+ @Override
+ public NumericDVAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
+ return load(context);
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, SortMode sortMode) {
+ return new LongValuesComparatorSource(this, missingValue, sortMode);
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType getNumericType() {
+ return NumericType.LONG;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/PackedArrayAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/PackedArrayAtomicFieldData.java
new file mode 100644
index 0000000..43d10aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/PackedArrayAtomicFieldData.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
+import org.apache.lucene.util.packed.PackedInts;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+
+/**
+ * {@link AtomicNumericFieldData} implementation which stores data in packed arrays to save memory.
+ */
+public abstract class PackedArrayAtomicFieldData extends AbstractAtomicNumericFieldData {
+
+ public static PackedArrayAtomicFieldData empty(int numDocs) {
+ return new Empty(numDocs);
+ }
+
+ private final int numDocs;
+
+ protected long size = -1;
+
+ public PackedArrayAtomicFieldData(int numDocs) {
+ super(false);
+ this.numDocs = numDocs;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public int getNumDocs() {
+ return numDocs;
+ }
+
+ static class Empty extends PackedArrayAtomicFieldData {
+
+ Empty(int numDocs) {
+ super(numDocs);
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return LongValues.EMPTY;
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return DoubleValues.EMPTY;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ return 0;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return 0;
+ }
+
+ @Override
+ public BytesValues getBytesValues(boolean needsHashes) {
+ return BytesValues.EMPTY;
+ }
+
+ @Override
+ public ScriptDocValues getScriptValues() {
+ return ScriptDocValues.EMPTY_LONGS;
+ }
+ }
+
+ public static class WithOrdinals extends PackedArrayAtomicFieldData {
+
+ private final MonotonicAppendingLongBuffer values;
+ private final Ordinals ordinals;
+
+ public WithOrdinals(MonotonicAppendingLongBuffer values, int numDocs, Ordinals ordinals) {
+ super(numDocs);
+ this.values = values;
+ this.ordinals = ordinals;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return ordinals.isMultiValued();
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + values.ramBytesUsed() + ordinals.getMemorySizeInBytes();
+ }
+ return size;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return ordinals.getNumOrds();
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return new LongValues(values, ordinals.ordinals());
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return new DoubleValues(values, ordinals.ordinals());
+ }
+
+ static class LongValues extends org.elasticsearch.index.fielddata.LongValues.WithOrdinals {
+
+ private final MonotonicAppendingLongBuffer values;
+
+ LongValues(MonotonicAppendingLongBuffer values, Ordinals.Docs ordinals) {
+ super(ordinals);
+ this.values = values;
+ }
+
+ @Override
+ public long getValueByOrd(long ord) {
+ assert ord != Ordinals.MISSING_ORDINAL;
+ return values.get(ord - 1);
+ }
+ }
+
+ static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues.WithOrdinals {
+
+ private final MonotonicAppendingLongBuffer values;
+
+ DoubleValues(MonotonicAppendingLongBuffer values, Ordinals.Docs ordinals) {
+ super(ordinals);
+ this.values = values;
+ }
+
+ @Override
+ public double getValueByOrd(long ord) {
+ assert ord != Ordinals.MISSING_ORDINAL;
+ return values.get(ord - 1);
+ }
+
+
+ }
+ }
+
+ /**
+ * A single valued case, where not all values are "set", so we have a special
+ * value which encodes the fact that the document has no value.
+ */
+ public static class SingleSparse extends PackedArrayAtomicFieldData {
+
+ private final PackedInts.Mutable values;
+ private final long minValue;
+ private final long missingValue;
+ private final long numOrds;
+
+ public SingleSparse(PackedInts.Mutable values, long minValue, int numDocs, long missingValue, long numOrds) {
+ super(numDocs);
+ this.values = values;
+ this.minValue = minValue;
+ this.missingValue = missingValue;
+ this.numOrds = numOrds;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = values.ramBytesUsed() + 2 * RamUsageEstimator.NUM_BYTES_LONG;
+ }
+ return size;
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return new LongValues(values, minValue, missingValue);
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return new DoubleValues(values, minValue, missingValue);
+ }
+
+ static class LongValues extends org.elasticsearch.index.fielddata.LongValues {
+
+ private final PackedInts.Mutable values;
+ private final long minValue;
+ private final long missingValue;
+
+ LongValues(PackedInts.Mutable values, long minValue, long missingValue) {
+ super(false);
+ this.values = values;
+ this.minValue = minValue;
+ this.missingValue = missingValue;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return values.get(docId) != missingValue ? 1 : 0;
+ }
+
+ @Override
+ public long nextValue() {
+ return minValue + values.get(docId);
+ }
+ }
+
+ static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
+
+ private final PackedInts.Mutable values;
+ private final long minValue;
+ private final long missingValue;
+
+ DoubleValues(PackedInts.Mutable values, long minValue, long missingValue) {
+ super(false);
+ this.values = values;
+ this.minValue = minValue;
+ this.missingValue = missingValue;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return values.get(docId) != missingValue ? 1 : 0;
+ }
+
+ @Override
+ public double nextValue() {
+ return minValue + values.get(docId);
+ }
+ }
+ }
+
+ /**
+ * Assumes all the values are "set", and docId is used as the index to the value array.
+ */
+ public static class Single extends PackedArrayAtomicFieldData {
+
+ private final PackedInts.Mutable values;
+ private final long minValue;
+ private final long numOrds;
+
+ /**
+ * Note, here, we assume that there is no offset by 1 from docId, so position 0
+ * is the value for docId 0.
+ */
+ public Single(PackedInts.Mutable values, long minValue, int numDocs, long numOrds) {
+ super(numDocs);
+ this.values = values;
+ this.minValue = minValue;
+ this.numOrds = numOrds;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return false;
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ size = values.ramBytesUsed();
+ }
+ return size;
+ }
+
+ @Override
+ public LongValues getLongValues() {
+ return new LongValues(values, minValue);
+ }
+
+ @Override
+ public DoubleValues getDoubleValues() {
+ return new DoubleValues(values, minValue);
+ }
+
+ static class LongValues extends DenseLongValues {
+
+ private final PackedInts.Mutable values;
+ private final long minValue;
+
+ LongValues(PackedInts.Mutable values, long minValue) {
+ super(false);
+ this.values = values;
+ this.minValue = minValue;
+ }
+
+
+ @Override
+ public long nextValue() {
+ return minValue + values.get(docId);
+ }
+
+
+ }
+
+ static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
+
+ private final PackedInts.Mutable values;
+ private final long minValue;
+
+ DoubleValues(PackedInts.Mutable values, long minValue) {
+ super(false);
+ this.values = values;
+ this.minValue = minValue;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ return 1;
+ }
+
+ @Override
+ public double nextValue() {
+ return minValue + values.get(docId);
+ }
+
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/PackedArrayIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/PackedArrayIndexFieldData.java
new file mode 100644
index 0000000..aa61976
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/PackedArrayIndexFieldData.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import com.google.common.base.Preconditions;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefIterator;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
+import org.apache.lucene.util.packed.PackedInts;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.breaker.MemoryCircuitBreaker;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.RamAccountingTermsEnum;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.fieldcomparator.LongValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+/**
+ * Stores numeric data into bit-packed arrays for better memory efficiency.
+ */
+public class PackedArrayIndexFieldData extends AbstractIndexFieldData<AtomicNumericFieldData> implements IndexNumericFieldData<AtomicNumericFieldData> {
+
+ public static class Builder implements IndexFieldData.Builder {
+
+ private NumericType numericType;
+
+ public Builder setNumericType(NumericType numericType) {
+ this.numericType = numericType;
+ return this;
+ }
+
+ @Override
+ public IndexFieldData<AtomicNumericFieldData> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
+ IndexFieldDataCache cache, CircuitBreakerService breakerService) {
+ return new PackedArrayIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, numericType, breakerService);
+ }
+ }
+
+ private final NumericType numericType;
+ private final CircuitBreakerService breakerService;
+
+ public PackedArrayIndexFieldData(Index index, @IndexSettings Settings indexSettings, FieldMapper.Names fieldNames,
+ FieldDataType fieldDataType, IndexFieldDataCache cache, NumericType numericType,
+ CircuitBreakerService breakerService) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ Preconditions.checkNotNull(numericType);
+ Preconditions.checkArgument(EnumSet.of(NumericType.BYTE, NumericType.SHORT, NumericType.INT, NumericType.LONG).contains(numericType), getClass().getSimpleName() + " only supports integer types, not " + numericType);
+ this.numericType = numericType;
+ this.breakerService = breakerService;
+ }
+
+ @Override
+ public NumericType getNumericType() {
+ return numericType;
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ // because we might have single values? we can dynamically update a flag to reflect that
+ // based on the atomic field data loaded
+ return false;
+ }
+
+ @Override
+ public AtomicNumericFieldData loadDirect(AtomicReaderContext context) throws Exception {
+ AtomicReader reader = context.reader();
+ Terms terms = reader.terms(getFieldNames().indexName());
+ PackedArrayAtomicFieldData data = null;
+ PackedArrayEstimator estimator = new PackedArrayEstimator(breakerService.getBreaker(), getNumericType());
+ if (terms == null) {
+ data = PackedArrayAtomicFieldData.empty(reader.maxDoc());
+ estimator.adjustForNoTerms(data.getMemorySizeInBytes());
+ return data;
+ }
+ // TODO: how can we guess the number of terms? numerics end up creating more terms per value...
+ // Lucene encodes numeric data so that the lexicographical (encoded) order matches the integer order so we know the sequence of
+ // longs is going to be monotonically increasing
+ final MonotonicAppendingLongBuffer values = new MonotonicAppendingLongBuffer();
+
+ final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
+ OrdinalsBuilder builder = new OrdinalsBuilder(-1, reader.maxDoc(), acceptableTransientOverheadRatio);
+ TermsEnum termsEnum = estimator.beforeLoad(terms);
+ boolean success = false;
+ try {
+ BytesRefIterator iter = builder.buildFromTerms(termsEnum);
+ BytesRef term;
+ assert !getNumericType().isFloatingPoint();
+ final boolean indexedAsLong = getNumericType().requiredBits() > 32;
+ while ((term = iter.next()) != null) {
+ final long value = indexedAsLong
+ ? NumericUtils.prefixCodedToLong(term)
+ : NumericUtils.prefixCodedToInt(term);
+ assert values.size() == 0 || value > values.get(values.size() - 1);
+ values.add(value);
+ }
+ Ordinals build = builder.build(fieldDataType.getSettings());
+
+ if (!build.isMultiValued() && CommonSettings.removeOrdsOnSingleValue(fieldDataType)) {
+ Docs ordinals = build.ordinals();
+ final FixedBitSet set = builder.buildDocsWithValuesSet();
+
+ long minValue, maxValue;
+ minValue = maxValue = 0;
+ if (values.size() > 0) {
+ minValue = values.get(0);
+ maxValue = values.get(values.size() - 1);
+ }
+
+ // Encode document without a value with a special value
+ long missingValue = 0;
+ if (set != null) {
+ if ((maxValue - minValue + 1) == values.size()) {
+ // values are dense
+ if (minValue > Long.MIN_VALUE) {
+ missingValue = --minValue;
+ } else {
+ assert maxValue != Long.MAX_VALUE;
+ missingValue = ++maxValue;
+ }
+ } else {
+ for (long i = 1; i < values.size(); ++i) {
+ if (values.get(i) > values.get(i - 1) + 1) {
+ missingValue = values.get(i - 1) + 1;
+ break;
+ }
+ }
+ }
+ missingValue -= minValue; // delta
+ }
+
+ final long delta = maxValue - minValue;
+ final int bitsRequired = delta < 0 ? 64 : PackedInts.bitsRequired(delta);
+ final float acceptableOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_overhead_ratio", PackedInts.DEFAULT);
+ final PackedInts.FormatAndBits formatAndBits = PackedInts.fastestFormatAndBits(reader.maxDoc(), bitsRequired, acceptableOverheadRatio);
+
+ // there's sweet spot where due to low unique value count, using ordinals will consume less memory
+ final long singleValuesSize = formatAndBits.format.longCount(PackedInts.VERSION_CURRENT, reader.maxDoc(), formatAndBits.bitsPerValue) * 8L;
+ final long uniqueValuesSize = values.ramBytesUsed();
+ final long ordinalsSize = build.getMemorySizeInBytes();
+
+ if (uniqueValuesSize + ordinalsSize < singleValuesSize) {
+ data = new PackedArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
+ } else {
+ final PackedInts.Mutable sValues = PackedInts.getMutable(reader.maxDoc(), bitsRequired, acceptableOverheadRatio);
+ if (missingValue != 0) {
+ sValues.fill(0, sValues.size(), missingValue);
+ }
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ final long ord = ordinals.getOrd(i);
+ if (ord != Ordinals.MISSING_ORDINAL) {
+ sValues.set(i, values.get(ord - 1) - minValue);
+ }
+ }
+ if (set == null) {
+ data = new PackedArrayAtomicFieldData.Single(sValues, minValue, reader.maxDoc(), ordinals.getNumOrds());
+ } else {
+ data = new PackedArrayAtomicFieldData.SingleSparse(sValues, minValue, reader.maxDoc(), missingValue, ordinals.getNumOrds());
+ }
+ }
+ } else {
+ data = new PackedArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
+ }
+
+ success = true;
+ return data;
+ } finally {
+ if (!success) {
+ // If something went wrong, unwind any current estimations we've made
+ estimator.afterLoad(termsEnum, 0);
+ } else {
+ // Adjust as usual, based on the actual size of the field data
+ estimator.afterLoad(termsEnum, data.getMemorySizeInBytes());
+ }
+ builder.close();
+ }
+
+ }
+
+ @Override
+ public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode) {
+ return new LongValuesComparatorSource(this, missingValue, sortMode);
+ }
+
+ /**
+ * Estimator that wraps numeric field data loading in a
+ * RamAccountingTermsEnum, adjusting the breaker after data has been
+ * loaded
+ */
+ public class PackedArrayEstimator implements PerValueEstimator {
+
+ private final MemoryCircuitBreaker breaker;
+ private final NumericType type;
+
+ public PackedArrayEstimator(MemoryCircuitBreaker breaker, NumericType type) {
+ this.breaker = breaker;
+ this.type = type;
+ }
+
+ /**
+ * @return number of bytes per term, based on the NumericValue.requiredBits()
+ */
+ @Override
+ public long bytesPerValue(BytesRef term) {
+ // Estimate about about 0.8 (8 / 10) compression ratio for
+ // numbers, but at least 4 bytes
+ return Math.max(type.requiredBits() / 10, 4);
+ }
+
+ /**
+ * @return A TermsEnum wrapped in a RamAccountingTermsEnum
+ * @throws IOException
+ */
+ @Override
+ public TermsEnum beforeLoad(Terms terms) throws IOException {
+ return new RamAccountingTermsEnum(type.wrapTermsEnum(terms.iterator(null)), breaker, this);
+ }
+
+ /**
+ * Adjusts the breaker based on the aggregated value from the RamAccountingTermsEnum
+ *
+ * @param termsEnum terms that were wrapped and loaded
+ * @param actualUsed actual field data memory usage
+ */
+ @Override
+ public void afterLoad(TermsEnum termsEnum, long actualUsed) {
+ assert termsEnum instanceof RamAccountingTermsEnum;
+ long estimatedBytes = ((RamAccountingTermsEnum) termsEnum).getTotalBytes();
+ breaker.addWithoutBreaking(-(estimatedBytes - actualUsed));
+ }
+
+ /**
+ * Adjust the breaker when no terms were actually loaded, but the field
+ * data takes up space regardless. For instance, when ordinals are
+ * used.
+ * @param actualUsed bytes actually used
+ */
+ public void adjustForNoTerms(long actualUsed) {
+ breaker.addWithoutBreaking(actualUsed);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesAtomicFieldData.java
new file mode 100644
index 0000000..46e9bc5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesAtomicFieldData.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.PagedBytes;
+import org.apache.lucene.util.PagedBytes.Reader;
+import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.IntArray;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.index.fielddata.ordinals.EmptyOrdinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+
+/**
+ */
+public class PagedBytesAtomicFieldData implements AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> {
+
+ public static PagedBytesAtomicFieldData empty(int numDocs) {
+ return new Empty(numDocs);
+ }
+
+ // 0 ordinal in values means no value (its null)
+ private final PagedBytes.Reader bytes;
+ private final MonotonicAppendingLongBuffer termOrdToBytesOffset;
+ protected final Ordinals ordinals;
+
+ private volatile IntArray hashes;
+ private long size = -1;
+ private final long readerBytesSize;
+
+ public PagedBytesAtomicFieldData(PagedBytes.Reader bytes, long readerBytesSize, MonotonicAppendingLongBuffer termOrdToBytesOffset, Ordinals ordinals) {
+ this.bytes = bytes;
+ this.termOrdToBytesOffset = termOrdToBytesOffset;
+ this.ordinals = ordinals;
+ this.readerBytesSize = readerBytesSize;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return ordinals.isMultiValued();
+ }
+
+ @Override
+ public int getNumDocs() {
+ return ordinals.getNumDocs();
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return ordinals.getNumOrds();
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ if (size == -1) {
+ long size = ordinals.getMemorySizeInBytes();
+ // PackedBytes
+ size += readerBytesSize;
+ // PackedInts
+ size += termOrdToBytesOffset.ramBytesUsed();
+ this.size = size;
+ }
+ return size;
+ }
+
+ private final IntArray getHashes() {
+ if (hashes == null) {
+ long numberOfValues = termOrdToBytesOffset.size();
+ IntArray hashes = BigArrays.newIntArray(numberOfValues);
+ BytesRef scratch = new BytesRef();
+ for (long i = 0; i < numberOfValues; i++) {
+ bytes.fill(scratch, termOrdToBytesOffset.get(i));
+ hashes.set(i, scratch.hashCode());
+ }
+ this.hashes = hashes;
+ }
+ return hashes;
+ }
+
+ @Override
+ public BytesValues.WithOrdinals getBytesValues(boolean needsHashes) {
+ if (needsHashes) {
+ final IntArray hashes = getHashes();
+ return new BytesValues.HashedBytesValues(hashes, bytes, termOrdToBytesOffset, ordinals.ordinals());
+ } else {
+ return new BytesValues(bytes, termOrdToBytesOffset, ordinals.ordinals());
+ }
+ }
+
+ @Override
+ public ScriptDocValues.Strings getScriptValues() {
+ return new ScriptDocValues.Strings(getBytesValues(false));
+ }
+
+ static class BytesValues extends org.elasticsearch.index.fielddata.BytesValues.WithOrdinals {
+
+ protected final PagedBytes.Reader bytes;
+ protected final MonotonicAppendingLongBuffer termOrdToBytesOffset;
+ protected final Ordinals.Docs ordinals;
+
+ BytesValues(PagedBytes.Reader bytes, MonotonicAppendingLongBuffer termOrdToBytesOffset, Ordinals.Docs ordinals) {
+ super(ordinals);
+ this.bytes = bytes;
+ this.termOrdToBytesOffset = termOrdToBytesOffset;
+ this.ordinals = ordinals;
+ }
+
+ @Override
+ public BytesRef copyShared() {
+ // when we fill from the pages bytes, we just reference an existing buffer slice, its enough
+ // to create a shallow copy of the bytes to be safe for "reads".
+ return new BytesRef(scratch.bytes, scratch.offset, scratch.length);
+ }
+
+ @Override
+ public final Ordinals.Docs ordinals() {
+ return this.ordinals;
+ }
+
+ @Override
+ public final BytesRef getValueByOrd(long ord) {
+ assert ord != Ordinals.MISSING_ORDINAL;
+ bytes.fill(scratch, termOrdToBytesOffset.get(ord));
+ return scratch;
+ }
+
+ @Override
+ public final BytesRef nextValue() {
+ bytes.fill(scratch, termOrdToBytesOffset.get(ordinals.nextOrd()));
+ return scratch;
+ }
+
+ static final class HashedBytesValues extends BytesValues {
+ private final IntArray hashes;
+
+
+ HashedBytesValues(IntArray hashes, Reader bytes, MonotonicAppendingLongBuffer termOrdToBytesOffset, Docs ordinals) {
+ super(bytes, termOrdToBytesOffset, ordinals);
+ this.hashes = hashes;
+ }
+
+ @Override
+ public int currentValueHash() {
+ assert ordinals.currentOrd() >= 0;
+ return hashes.get(ordinals.currentOrd());
+ }
+ }
+
+ }
+
+ private final static class Empty extends PagedBytesAtomicFieldData {
+
+ Empty(int numDocs) {
+ super(emptyBytes(), 0, new MonotonicAppendingLongBuffer(), new EmptyOrdinals(numDocs));
+ }
+
+ static PagedBytes.Reader emptyBytes() {
+ PagedBytes bytes = new PagedBytes(1);
+ bytes.copyUsingLengthPrefix(new BytesRef());
+ return bytes.freeze(true);
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return false;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return ordinals.getNumDocs();
+ }
+
+ @Override
+ public long getNumberUniqueValues() {
+ return 0;
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public BytesValues.WithOrdinals getBytesValues(boolean needsHashes) {
+ return new EmptyByteValuesWithOrdinals(ordinals.ordinals());
+ }
+
+ @Override
+ public ScriptDocValues.Strings getScriptValues() {
+ return ScriptDocValues.EMPTY_STRINGS;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java
new file mode 100644
index 0000000..dcecb8c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.codecs.BlockTreeTermsReader;
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.PagedBytes;
+import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
+import org.elasticsearch.common.breaker.MemoryCircuitBreaker;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.RamAccountingTermsEnum;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+import java.io.IOException;
+
+/**
+ */
+public class PagedBytesIndexFieldData extends AbstractBytesIndexFieldData<PagedBytesAtomicFieldData> {
+
+ private final CircuitBreakerService breakerService;
+
+ public static class Builder implements IndexFieldData.Builder {
+
+ @Override
+ public IndexFieldData<PagedBytesAtomicFieldData> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
+ IndexFieldDataCache cache, CircuitBreakerService breakerService) {
+ return new PagedBytesIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
+ }
+ }
+
+ public PagedBytesIndexFieldData(Index index, @IndexSettings Settings indexSettings, FieldMapper.Names fieldNames,
+ FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) {
+ super(index, indexSettings, fieldNames, fieldDataType, cache);
+ this.breakerService = breakerService;
+ }
+
+ @Override
+ public PagedBytesAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
+ AtomicReader reader = context.reader();
+
+ PagedBytesEstimator estimator = new PagedBytesEstimator(context, breakerService.getBreaker());
+ Terms terms = reader.terms(getFieldNames().indexName());
+ if (terms == null) {
+ PagedBytesAtomicFieldData emptyData = PagedBytesAtomicFieldData.empty(reader.maxDoc());
+ estimator.adjustForNoTerms(emptyData.getMemorySizeInBytes());
+ return emptyData;
+ }
+
+ final PagedBytes bytes = new PagedBytes(15);
+
+ final MonotonicAppendingLongBuffer termOrdToBytesOffset = new MonotonicAppendingLongBuffer();
+ termOrdToBytesOffset.add(0); // first ord is reserved for missing values
+ final long numTerms;
+ if (regex == null && frequency == null) {
+ numTerms = terms.size();
+ } else {
+ numTerms = -1;
+ }
+ final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat(
+ FilterSettingFields.ACCEPTABLE_TRANSIENT_OVERHEAD_RATIO, OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
+
+ OrdinalsBuilder builder = new OrdinalsBuilder(numTerms, reader.maxDoc(), acceptableTransientOverheadRatio);
+
+ // Wrap the context in an estimator and use it to either estimate
+ // the entire set, or wrap the TermsEnum so it can be calculated
+ // per-term
+ PagedBytesAtomicFieldData data = null;
+ TermsEnum termsEnum = estimator.beforeLoad(terms);
+ boolean success = false;
+
+ try {
+ // 0 is reserved for "unset"
+ bytes.copyUsingLengthPrefix(new BytesRef());
+
+ DocsEnum docsEnum = null;
+ for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
+ final long termOrd = builder.nextOrdinal();
+ assert termOrd == termOrdToBytesOffset.size();
+ termOrdToBytesOffset.add(bytes.copyUsingLengthPrefix(term));
+ docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
+ for (int docId = docsEnum.nextDoc(); docId != DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
+ builder.addDoc(docId);
+ }
+ }
+ final long sizePointer = bytes.getPointer();
+ PagedBytes.Reader bytesReader = bytes.freeze(true);
+ final Ordinals ordinals = builder.build(fieldDataType.getSettings());
+
+ data = new PagedBytesAtomicFieldData(bytesReader, sizePointer, termOrdToBytesOffset, ordinals);
+ success = true;
+ return data;
+ } finally {
+ if (!success) {
+ // If something went wrong, unwind any current estimations we've made
+ estimator.afterLoad(termsEnum, 0);
+ } else {
+ // Call .afterLoad() to adjust the breaker now that we have an exact size
+ estimator.afterLoad(termsEnum, data.getMemorySizeInBytes());
+ }
+ builder.close();
+ }
+ }
+
+ /**
+ * Estimator that wraps string field data by either using
+ * BlockTreeTermsReader, or wrapping the data in a RamAccountingTermsEnum
+ * if the BlockTreeTermsReader cannot be used.
+ */
+ public class PagedBytesEstimator implements PerValueEstimator {
+
+ private final AtomicReaderContext context;
+ private final MemoryCircuitBreaker breaker;
+ private long estimatedBytes;
+
+ PagedBytesEstimator(AtomicReaderContext context, MemoryCircuitBreaker breaker) {
+ this.breaker = breaker;
+ this.context = context;
+ }
+
+ /**
+ * @return the number of bytes for the term based on the length and ordinal overhead
+ */
+ public long bytesPerValue(BytesRef term) {
+ if (term == null) {
+ return 0;
+ }
+ long bytes = term.length;
+ // 64 bytes for miscellaneous overhead
+ bytes += 64;
+ // Seems to be about a 1.5x compression per term/ord, plus 1 for some wiggle room
+ bytes = (long) ((double) bytes / 1.5) + 1;
+ return bytes;
+ }
+
+ /**
+ * @return the estimate for loading the entire term set into field data, or 0 if unavailable
+ */
+ public long estimateStringFieldData() {
+ try {
+ AtomicReader reader = context.reader();
+ Terms terms = reader.terms(getFieldNames().indexName());
+
+ Fields fields = reader.fields();
+ final Terms fieldTerms = fields.terms(getFieldNames().indexName());
+
+ if (fieldTerms instanceof BlockTreeTermsReader.FieldReader) {
+ final BlockTreeTermsReader.Stats stats = ((BlockTreeTermsReader.FieldReader) fieldTerms).computeStats();
+ long totalTermBytes = stats.totalTermBytes;
+ if (logger.isTraceEnabled()) {
+ logger.trace("totalTermBytes: {}, terms.size(): {}, terms.getSumDocFreq(): {}",
+ totalTermBytes, terms.size(), terms.getSumDocFreq());
+ }
+ long totalBytes = totalTermBytes + (2 * terms.size()) + (4 * terms.getSumDocFreq());
+ return totalBytes;
+ }
+ } catch (Exception e) {
+ logger.warn("Unable to estimate memory overhead", e);
+ }
+ return 0;
+ }
+
+ /**
+ * Determine whether the BlockTreeTermsReader.FieldReader can be used
+ * for estimating the field data, adding the estimate to the circuit
+ * breaker if it can, otherwise wrapping the terms in a
+ * RamAccountingTermsEnum to be estimated on a per-term basis.
+ *
+ * @param terms terms to be estimated
+ * @return A possibly wrapped TermsEnum for the terms
+ * @throws IOException
+ */
+ public TermsEnum beforeLoad(Terms terms) throws IOException {
+ final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat(
+ FilterSettingFields.ACCEPTABLE_TRANSIENT_OVERHEAD_RATIO,
+ OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
+
+ AtomicReader reader = context.reader();
+ // Check if one of the following is present:
+ // - The OrdinalsBuilder overhead has been tweaked away from the default
+ // - A field data filter is present
+ // - A regex filter is present
+ if (acceptableTransientOverheadRatio != OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO ||
+ fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MIN, 0d) != 0d ||
+ fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MAX, 0d) != 0d ||
+ fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MIN_SEGMENT_SIZE, 0d) != 0d ||
+ fieldDataType.getSettings().get(FilterSettingFields.REGEX_PATTERN) != null) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Filter exists, can't circuit break normally, using RamAccountingTermsEnum");
+ }
+ return new RamAccountingTermsEnum(filter(terms, reader), breaker, this);
+ } else {
+ estimatedBytes = this.estimateStringFieldData();
+ // If we weren't able to estimate, wrap in the RamAccountingTermsEnum
+ if (estimatedBytes == 0) {
+ return new RamAccountingTermsEnum(filter(terms, reader), breaker, this);
+ }
+
+ breaker.addEstimateBytesAndMaybeBreak(estimatedBytes);
+ return filter(terms, reader);
+ }
+ }
+
+ /**
+ * Adjust the circuit breaker now that terms have been loaded, getting
+ * the actual used either from the parameter (if estimation worked for
+ * the entire set), or from the TermsEnum if it has been wrapped in a
+ * RamAccountingTermsEnum.
+ *
+ * @param termsEnum terms that were loaded
+ * @param actualUsed actual field data memory usage
+ */
+ public void afterLoad(TermsEnum termsEnum, long actualUsed) {
+ if (termsEnum instanceof RamAccountingTermsEnum) {
+ estimatedBytes = ((RamAccountingTermsEnum) termsEnum).getTotalBytes();
+ }
+ breaker.addWithoutBreaking(-(estimatedBytes - actualUsed));
+ }
+
+ /**
+ * Adjust the breaker when no terms were actually loaded, but the field
+ * data takes up space regardless. For instance, when ordinals are
+ * used.
+ * @param actualUsed bytes actually used
+ */
+ public void adjustForNoTerms(long actualUsed) {
+ breaker.addWithoutBreaking(actualUsed);
+ }
+ }
+
+ static final class FilterSettingFields {
+ static final String ACCEPTABLE_TRANSIENT_OVERHEAD_RATIO = "acceptable_transient_overhead_ratio";
+ static final String FREQUENCY_MIN = "filter.frequency.min";
+ static final String FREQUENCY_MAX = "filter.frequency.max";
+ static final String FREQUENCY_MIN_SEGMENT_SIZE = "filter.frequency.min_segment_size";
+ static final String REGEX_PATTERN = "filter.regex.pattern";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVAtomicFieldData.java
new file mode 100644
index 0000000..c02e00e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVAtomicFieldData.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LongsRef;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.IntArray;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+
+import java.io.IOException;
+
+/**
+ * {@link AtomicFieldData} impl based on Lucene's {@link SortedSetDocValues}.
+ * <p><b>Implementation note</b>: Lucene's ordinal for unset values is -1 whereas Elasticsearch's is 0, this is why there are all
+ * these +1 to translate from Lucene's ordinals to ES's.
+ */
+abstract class SortedSetDVAtomicFieldData {
+
+ private final AtomicReader reader;
+ private final String field;
+ private volatile IntArray hashes;
+
+ SortedSetDVAtomicFieldData(AtomicReader reader, String field) {
+ this.reader = reader;
+ this.field = field;
+ }
+
+ public boolean isMultiValued() {
+ // we could compute it when loading the values for the first time and then cache it but it would defeat the point of
+ // doc values which is to make loading faster
+ return true;
+ }
+
+ public int getNumDocs() {
+ return reader.maxDoc();
+ }
+
+ public long getNumberUniqueValues() {
+ final SortedSetDocValues values = getValuesNoException(reader, field);
+ return values.getValueCount();
+ }
+
+ public long getMemorySizeInBytes() {
+ // There is no API to access memory usage per-field and RamUsageEstimator can't help since there are often references
+ // from a per-field instance to all other instances handled by the same format
+ return -1L;
+ }
+
+ public void close() {
+ // no-op
+ }
+
+ public org.elasticsearch.index.fielddata.BytesValues.WithOrdinals getBytesValues(boolean needsHashes) {
+ final SortedSetDocValues values = getValuesNoException(reader, field);
+ return new SortedSetValues(reader, field, values);
+ }
+
+ public org.elasticsearch.index.fielddata.BytesValues.WithOrdinals getHashedBytesValues() {
+ final SortedSetDocValues values = getValuesNoException(reader, field);
+ if (hashes == null) {
+ synchronized (this) {
+ if (hashes == null) {
+ final long valueCount = values.getValueCount();
+ final IntArray hashes = BigArrays.newIntArray(1L + valueCount);
+ BytesRef scratch = new BytesRef(16);
+ hashes.set(0, scratch.hashCode());
+ for (long i = 0; i < valueCount; ++i) {
+ values.lookupOrd(i, scratch);
+ hashes.set(1L + i, scratch.hashCode());
+ }
+ this.hashes = hashes;
+ }
+ }
+ }
+ return new SortedSetHashedValues(reader, field, values, hashes);
+ }
+
+ private static SortedSetDocValues getValuesNoException(AtomicReader reader, String field) {
+ try {
+ SortedSetDocValues values = reader.getSortedSetDocValues(field);
+ if (values == null) {
+ // This field has not been populated
+ assert reader.getFieldInfos().fieldInfo(field) == null;
+ values = SortedSetDocValues.EMPTY;
+ }
+ return values;
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalStateException("Couldn't load doc values", e);
+ }
+ }
+
+ static class SortedSetValues extends BytesValues.WithOrdinals {
+
+ protected final SortedSetDocValues values;
+
+ SortedSetValues(AtomicReader reader, String field, SortedSetDocValues values) {
+ super(new SortedSetDocs(new SortedSetOrdinals(reader, field, values.getValueCount()), values));
+ this.values = values;
+ }
+
+ @Override
+ public BytesRef getValueByOrd(long ord) {
+ assert ord != Ordinals.MISSING_ORDINAL;
+ values.lookupOrd(ord - 1, scratch);
+ return scratch;
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ values.lookupOrd(ordinals.nextOrd()-1, scratch);
+ return scratch;
+ }
+ }
+
+ static final class SortedSetHashedValues extends SortedSetValues {
+
+ private final IntArray hashes;
+
+ SortedSetHashedValues(AtomicReader reader, String field, SortedSetDocValues values, IntArray hashes) {
+ super(reader, field, values);
+ this.hashes = hashes;
+ }
+
+ @Override
+ public int currentValueHash() {
+ assert ordinals.currentOrd() >= 0;
+ return hashes.get(ordinals.currentOrd());
+ }
+ }
+
+ static final class SortedSetOrdinals implements Ordinals {
+
+ // We don't store SortedSetDocValues as a member because Ordinals must be thread-safe
+ private final AtomicReader reader;
+ private final String field;
+ private final long numOrds;
+
+ public SortedSetOrdinals(AtomicReader reader, String field, long numOrds) {
+ super();
+ this.reader = reader;
+ this.field = field;
+ this.numOrds = numOrds;
+ }
+
+ @Override
+ public long getMemorySizeInBytes() {
+ // Ordinals can't be distinguished from the atomic field data instance
+ return -1;
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return true;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return reader.maxDoc();
+ }
+
+ @Override
+ public long getNumOrds() {
+ return numOrds;
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return 1 + numOrds;
+ }
+
+ @Override
+ public Docs ordinals() {
+ final SortedSetDocValues values = getValuesNoException(reader, field);
+ assert values.getValueCount() == numOrds;
+ return new SortedSetDocs(this, values);
+ }
+
+ }
+
+ static class SortedSetDocs implements Ordinals.Docs {
+
+ private final SortedSetOrdinals ordinals;
+ private final SortedSetDocValues values;
+ private final LongsRef longScratch;
+ private int ordIndex = Integer.MAX_VALUE;
+ private long currentOrdinal = -1;
+
+ SortedSetDocs(SortedSetOrdinals ordinals, SortedSetDocValues values) {
+ this.ordinals = ordinals;
+ this.values = values;
+ longScratch = new LongsRef(8);
+ }
+
+ @Override
+ public Ordinals ordinals() {
+ return ordinals;
+ }
+
+ @Override
+ public int getNumDocs() {
+ return ordinals.getNumDocs();
+ }
+
+ @Override
+ public long getNumOrds() {
+ return ordinals.getNumOrds();
+ }
+
+ @Override
+ public long getMaxOrd() {
+ return ordinals.getMaxOrd();
+ }
+
+ @Override
+ public boolean isMultiValued() {
+ return ordinals.isMultiValued();
+ }
+
+ @Override
+ public long getOrd(int docId) {
+ values.setDocument(docId);
+ return currentOrdinal = 1 + values.nextOrd();
+ }
+
+ @Override
+ public LongsRef getOrds(int docId) {
+ values.setDocument(docId);
+ longScratch.offset = 0;
+ longScratch.length = 0;
+ for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
+ longScratch.longs = ArrayUtil.grow(longScratch.longs, longScratch.length + 1);
+ longScratch.longs[longScratch.length++] = 1 + ord;
+ }
+ return longScratch;
+ }
+
+ @Override
+ public long nextOrd() {
+ assert ordIndex < longScratch.length;
+ return currentOrdinal = longScratch.longs[ordIndex++];
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ // For now, we consume all ords and pass them to the iter instead of doing it in a streaming way because Lucene's
+ // SORTED_SET doc values are cached per thread, you can't have a fully independent instance
+ final LongsRef ords = getOrds(docId);
+ ordIndex = 0;
+ return ords.length;
+ }
+
+ @Override
+ public long currentOrd() {
+ return currentOrdinal;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java
new file mode 100644
index 0000000..0a10a7b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReader;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.index.fielddata.ScriptDocValues.Strings;
+
+/**
+ * An {@link AtomicFieldData} implementation that uses Lucene {@link org.apache.lucene.index.SortedSetDocValues}.
+ */
+public final class SortedSetDVBytesAtomicFieldData extends SortedSetDVAtomicFieldData implements AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> {
+
+ /* NOTE: This class inherits the methods getBytesValues() and getHashedBytesValues()
+ * from SortedSetDVAtomicFieldData. This can cause confusion since the are
+ * part of the interface this class implements.*/
+
+ SortedSetDVBytesAtomicFieldData(AtomicReader reader, String field) {
+ super(reader, field);
+ }
+
+ @Override
+ public boolean isValuesOrdered() {
+ return true;
+ }
+
+ @Override
+ public Strings getScriptValues() {
+ return new ScriptDocValues.Strings(getBytesValues(false));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesIndexFieldData.java
new file mode 100644
index 0000000..8e2ce52
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesIndexFieldData.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+
+public class SortedSetDVBytesIndexFieldData extends DocValuesIndexFieldData implements IndexFieldData.WithOrdinals<SortedSetDVBytesAtomicFieldData> {
+
+ public SortedSetDVBytesIndexFieldData(Index index, Names fieldNames) {
+ super(index, fieldNames);
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ return true;
+ }
+
+ public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, SortMode sortMode) {
+ return new BytesRefFieldComparatorSource((IndexFieldData<?>) this, missingValue, sortMode);
+ }
+
+ @Override
+ public SortedSetDVBytesAtomicFieldData load(AtomicReaderContext context) {
+ return new SortedSetDVBytesAtomicFieldData(context.reader(), fieldNames.indexName());
+ }
+
+ @Override
+ public SortedSetDVBytesAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
+ return load(context);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fieldvisitor/AllFieldsVisitor.java b/src/main/java/org/elasticsearch/index/fieldvisitor/AllFieldsVisitor.java
new file mode 100644
index 0000000..8ce71c9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fieldvisitor/AllFieldsVisitor.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fieldvisitor;
+
+import org.apache.lucene.index.FieldInfo;
+
+import java.io.IOException;
+
+/**
+ */
+public class AllFieldsVisitor extends FieldsVisitor {
+
+ public AllFieldsVisitor() {
+ }
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ return Status.YES;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java b/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java
new file mode 100644
index 0000000..2139ec3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fieldvisitor;
+
+import org.apache.lucene.index.FieldInfo;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A field visitor that allows to load a selection of the stored fields.
+ * The Uid field is always loaded.
+ * The class is optimized for source loading as it is a common use case.
+ */
+public class CustomFieldsVisitor extends FieldsVisitor {
+
+ private final boolean loadSource;
+ private final Set<String> fields;
+
+ public CustomFieldsVisitor(Set<String> fields, boolean loadSource) {
+ this.loadSource = loadSource;
+ this.fields = fields;
+ }
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+
+ if (loadSource && SourceFieldMapper.NAME.equals(fieldInfo.name)) {
+ return Status.YES;
+ }
+ if (UidFieldMapper.NAME.equals(fieldInfo.name)) {
+ return Status.YES;
+ }
+
+ return fields.contains(fieldInfo.name) ? Status.YES : Status.NO;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java
new file mode 100644
index 0000000..6825cd2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fieldvisitor;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.StoredFieldVisitor;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ */
+public abstract class FieldsVisitor extends StoredFieldVisitor {
+
+ protected BytesReference source;
+ protected Uid uid;
+ protected Map<String, List<Object>> fieldsValues;
+
+ public void postProcess(MapperService mapperService) {
+ if (uid != null) {
+ DocumentMapper documentMapper = mapperService.documentMapper(uid.type());
+ if (documentMapper != null) {
+ // we can derive the exact type for the mapping
+ postProcess(documentMapper);
+ return;
+ }
+ }
+ // can't derive exact mapping type
+ for (Map.Entry<String, List<Object>> entry : fields().entrySet()) {
+ FieldMappers fieldMappers = mapperService.indexName(entry.getKey());
+ if (fieldMappers == null) {
+ continue;
+ }
+ List<Object> fieldValues = entry.getValue();
+ for (int i = 0; i < fieldValues.size(); i++) {
+ fieldValues.set(i, fieldMappers.mapper().valueForSearch(fieldValues.get(i)));
+ }
+ }
+ }
+
+ public void postProcess(DocumentMapper documentMapper) {
+ for (Map.Entry<String, List<Object>> entry : fields().entrySet()) {
+ FieldMapper<?> fieldMapper = documentMapper.mappers().indexName(entry.getKey()).mapper();
+ if (fieldMapper == null) {
+ continue;
+ }
+ List<Object> fieldValues = entry.getValue();
+ for (int i = 0; i < fieldValues.size(); i++) {
+ fieldValues.set(i, fieldMapper.valueForSearch(fieldValues.get(i)));
+ }
+ }
+ }
+
+ @Override
+ public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException {
+ if (SourceFieldMapper.NAME.equals(fieldInfo.name)) {
+ source = new BytesArray(value);
+ } else {
+ addValue(fieldInfo.name, new BytesRef(value));
+ }
+ }
+
+ @Override
+ public void stringField(FieldInfo fieldInfo, String value) throws IOException {
+ if (UidFieldMapper.NAME.equals(fieldInfo.name)) {
+ uid = Uid.createUid(value);
+ } else {
+ addValue(fieldInfo.name, value);
+ }
+ }
+
+ @Override
+ public void intField(FieldInfo fieldInfo, int value) throws IOException {
+ addValue(fieldInfo.name, value);
+ }
+
+ @Override
+ public void longField(FieldInfo fieldInfo, long value) throws IOException {
+ addValue(fieldInfo.name, value);
+ }
+
+ @Override
+ public void floatField(FieldInfo fieldInfo, float value) throws IOException {
+ addValue(fieldInfo.name, value);
+ }
+
+ @Override
+ public void doubleField(FieldInfo fieldInfo, double value) throws IOException {
+ addValue(fieldInfo.name, value);
+ }
+
+ public BytesReference source() {
+ return source;
+ }
+
+ public Uid uid() {
+ return uid;
+ }
+
+ public Map<String, List<Object>> fields() {
+ return fieldsValues != null
+ ? fieldsValues
+ : ImmutableMap.<String, List<Object>>of();
+ }
+
+ public void reset() {
+ if (fieldsValues != null) fieldsValues.clear();
+ source = null;
+ uid = null;
+ }
+
+ private void addValue(String name, Object value) {
+ if (fieldsValues == null) {
+ fieldsValues = newHashMap();
+ }
+
+ List<Object> values = fieldsValues.get(name);
+ if (values == null) {
+ values = new ArrayList<Object>(2);
+ fieldsValues.put(name, values);
+ }
+ values.add(value);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fieldvisitor/JustSourceFieldsVisitor.java b/src/main/java/org/elasticsearch/index/fieldvisitor/JustSourceFieldsVisitor.java
new file mode 100644
index 0000000..9efe48c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fieldvisitor/JustSourceFieldsVisitor.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fieldvisitor;
+
+import org.apache.lucene.index.FieldInfo;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+
+import java.io.IOException;
+
+/**
+ */
+public class JustSourceFieldsVisitor extends FieldsVisitor {
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ if (SourceFieldMapper.NAME.equals(fieldInfo.name)) {
+ return Status.YES;
+ }
+ return source != null ? Status.STOP : Status.NO;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fieldvisitor/JustUidFieldsVisitor.java b/src/main/java/org/elasticsearch/index/fieldvisitor/JustUidFieldsVisitor.java
new file mode 100644
index 0000000..b5caebc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fieldvisitor/JustUidFieldsVisitor.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fieldvisitor;
+
+import org.apache.lucene.index.FieldInfo;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+
+/**
+ */
+public class JustUidFieldsVisitor extends FieldsVisitor {
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ if (UidFieldMapper.NAME.equals(fieldInfo.name)) {
+ return Status.YES;
+ }
+ return uid != null ? Status.STOP : Status.NO;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java b/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java
new file mode 100644
index 0000000..c679354
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fieldvisitor;
+
+import org.apache.lucene.index.FieldInfo;
+import org.elasticsearch.index.mapper.FieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ */
+public class SingleFieldsVisitor extends FieldsVisitor {
+
+ private String field;
+
+ public SingleFieldsVisitor(String field) {
+ this.field = field;
+ }
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ // TODO we can potentially skip if we processed a field, the question is if it works for multi valued fields
+ if (fieldInfo.name.equals(field)) {
+ return Status.YES;
+ } else {
+ return Status.NO;
+ }
+ }
+
+ public void reset(String field) {
+ this.field = field;
+ super.reset();
+ }
+
+ public void postProcess(FieldMapper mapper) {
+ if (fieldsValues == null) {
+ return;
+ }
+ List<Object> fieldValues = fieldsValues.get(mapper.names().indexName());
+ if (fieldValues == null) {
+ return;
+ }
+ for (int i = 0; i < fieldValues.size(); i++) {
+ fieldValues.set(i, mapper.valueForSearch(fieldValues.get(i)));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fieldvisitor/UidAndRoutingFieldsVisitor.java b/src/main/java/org/elasticsearch/index/fieldvisitor/UidAndRoutingFieldsVisitor.java
new file mode 100644
index 0000000..a04bda7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fieldvisitor/UidAndRoutingFieldsVisitor.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fieldvisitor;
+
+import org.apache.lucene.index.FieldInfo;
+import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+
+/**
+ */
+public class UidAndRoutingFieldsVisitor extends FieldsVisitor {
+
+ private String routing;
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ if (RoutingFieldMapper.NAME.equals(fieldInfo.name)) {
+ return Status.YES;
+ } else if (UidFieldMapper.NAME.equals(fieldInfo.name)) {
+ return Status.YES;
+ }
+
+ return uid != null && routing != null ? Status.STOP : Status.NO;
+ }
+
+ @Override
+ public void stringField(FieldInfo fieldInfo, String value) throws IOException {
+ if (RoutingFieldMapper.NAME.equals(fieldInfo.name)) {
+ routing = value;
+ } else {
+ super.stringField(fieldInfo, value);
+ }
+ }
+
+ public String routing() {
+ return routing;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/fieldvisitor/UidAndSourceFieldsVisitor.java b/src/main/java/org/elasticsearch/index/fieldvisitor/UidAndSourceFieldsVisitor.java
new file mode 100644
index 0000000..5d9ded7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/fieldvisitor/UidAndSourceFieldsVisitor.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fieldvisitor;
+
+import org.apache.lucene.index.FieldInfo;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+
+/**
+ */
+public class UidAndSourceFieldsVisitor extends FieldsVisitor {
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ if (SourceFieldMapper.NAME.equals(fieldInfo.name)) {
+ return Status.YES;
+ } else if (UidFieldMapper.NAME.equals(fieldInfo.name)) {
+ return Status.YES;
+ }
+
+ return uid != null && source != null ? Status.STOP : Status.NO;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/flush/FlushStats.java b/src/main/java/org/elasticsearch/index/flush/FlushStats.java
new file mode 100644
index 0000000..0566442
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/flush/FlushStats.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.flush;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+public class FlushStats implements Streamable, ToXContent {
+
+ private long total;
+
+ private long totalTimeInMillis;
+
+ public FlushStats() {
+
+ }
+
+ public FlushStats(long total, long totalTimeInMillis) {
+ this.total = total;
+ this.totalTimeInMillis = totalTimeInMillis;
+ }
+
+ public void add(long total, long totalTimeInMillis) {
+ this.total += total;
+ this.totalTimeInMillis += totalTimeInMillis;
+ }
+
+ public void add(FlushStats flushStats) {
+ if (flushStats == null) {
+ return;
+ }
+ this.total += flushStats.total;
+ this.totalTimeInMillis += flushStats.totalTimeInMillis;
+ }
+
+ /**
+ * The total number of flush executed.
+ */
+ public long getTotal() {
+ return this.total;
+ }
+
+ /**
+ * The total time merges have been executed (in milliseconds).
+ */
+ public long getTotalTimeInMillis() {
+ return this.totalTimeInMillis;
+ }
+
+ /**
+ * The total time merges have been executed.
+ */
+ public TimeValue getTotalTime() {
+ return new TimeValue(totalTimeInMillis);
+ }
+
+ public static FlushStats readFlushStats(StreamInput in) throws IOException {
+ FlushStats flushStats = new FlushStats();
+ flushStats.readFrom(in);
+ return flushStats;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.FLUSH);
+ builder.field(Fields.TOTAL, total);
+ builder.timeValueField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, totalTimeInMillis);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString FLUSH = new XContentBuilderString("flush");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_TIME = new XContentBuilderString("total_time");
+ static final XContentBuilderString TOTAL_TIME_IN_MILLIS = new XContentBuilderString("total_time_in_millis");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ total = in.readVLong();
+ totalTimeInMillis = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(total);
+ out.writeVLong(totalTimeInMillis);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/gateway/CommitPoint.java b/src/main/java/org/elasticsearch/index/gateway/CommitPoint.java
new file mode 100644
index 0000000..d5ec4fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/CommitPoint.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.store.StoreFileMetaData;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class CommitPoint {
+
+ public static final CommitPoint NULL = new CommitPoint(-1, "_null_", Type.GENERATED, ImmutableList.<CommitPoint.FileInfo>of(), ImmutableList.<CommitPoint.FileInfo>of());
+
+ public static class FileInfo {
+ private final String name;
+ private final String physicalName;
+ private final long length;
+ private final String checksum;
+
+ public FileInfo(String name, String physicalName, long length, String checksum) {
+ this.name = name;
+ this.physicalName = physicalName;
+ this.length = length;
+ this.checksum = checksum;
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public String physicalName() {
+ return this.physicalName;
+ }
+
+ public long length() {
+ return length;
+ }
+
+ @Nullable
+ public String checksum() {
+ return checksum;
+ }
+
+ public boolean isSame(StoreFileMetaData md) {
+ if (checksum == null || md.checksum() == null) {
+ return false;
+ }
+ return length == md.length() && checksum.equals(md.checksum());
+ }
+ }
+
+ public static enum Type {
+ GENERATED,
+ SAVED
+ }
+
+ private final long version;
+
+ private final String name;
+
+ private final Type type;
+
+ private final ImmutableList<FileInfo> indexFiles;
+
+ private final ImmutableList<FileInfo> translogFiles;
+
+ public CommitPoint(long version, String name, Type type, List<FileInfo> indexFiles, List<FileInfo> translogFiles) {
+ this.version = version;
+ this.name = name;
+ this.type = type;
+ this.indexFiles = ImmutableList.copyOf(indexFiles);
+ this.translogFiles = ImmutableList.copyOf(translogFiles);
+ }
+
+ public long version() {
+ return version;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public Type type() {
+ return this.type;
+ }
+
+ public ImmutableList<FileInfo> indexFiles() {
+ return this.indexFiles;
+ }
+
+ public ImmutableList<FileInfo> translogFiles() {
+ return this.translogFiles;
+ }
+
+ public boolean containPhysicalIndexFile(String physicalName) {
+ return findPhysicalIndexFile(physicalName) != null;
+ }
+
+ public CommitPoint.FileInfo findPhysicalIndexFile(String physicalName) {
+ for (FileInfo file : indexFiles) {
+ if (file.physicalName().equals(physicalName)) {
+ return file;
+ }
+ }
+ return null;
+ }
+
+ public CommitPoint.FileInfo findNameFile(String name) {
+ CommitPoint.FileInfo fileInfo = findNameIndexFile(name);
+ if (fileInfo != null) {
+ return fileInfo;
+ }
+ return findNameTranslogFile(name);
+ }
+
+ public CommitPoint.FileInfo findNameIndexFile(String name) {
+ for (FileInfo file : indexFiles) {
+ if (file.name().equals(name)) {
+ return file;
+ }
+ }
+ return null;
+ }
+
+ public CommitPoint.FileInfo findNameTranslogFile(String name) {
+ for (FileInfo file : translogFiles) {
+ if (file.name().equals(name)) {
+ return file;
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/CommitPoints.java b/src/main/java/org/elasticsearch/index/gateway/CommitPoints.java
new file mode 100644
index 0000000..de5c08d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/CommitPoints.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class CommitPoints implements Iterable<CommitPoint> {
+
+ private final ImmutableList<CommitPoint> commitPoints;
+
+ public CommitPoints(List<CommitPoint> commitPoints) {
+ CollectionUtil.introSort(commitPoints, new Comparator<CommitPoint>() {
+ @Override
+ public int compare(CommitPoint o1, CommitPoint o2) {
+ return (o2.version() < o1.version() ? -1 : (o2.version() == o1.version() ? 0 : 1));
+ }
+ });
+ this.commitPoints = ImmutableList.copyOf(commitPoints);
+ }
+
+ public ImmutableList<CommitPoint> commits() {
+ return this.commitPoints;
+ }
+
+ public boolean hasVersion(long version) {
+ for (CommitPoint commitPoint : commitPoints) {
+ if (commitPoint.version() == version) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public CommitPoint.FileInfo findPhysicalIndexFile(String physicalName) {
+ for (CommitPoint commitPoint : commitPoints) {
+ CommitPoint.FileInfo fileInfo = commitPoint.findPhysicalIndexFile(physicalName);
+ if (fileInfo != null) {
+ return fileInfo;
+ }
+ }
+ return null;
+ }
+
+ public CommitPoint.FileInfo findNameFile(String name) {
+ for (CommitPoint commitPoint : commitPoints) {
+ CommitPoint.FileInfo fileInfo = commitPoint.findNameFile(name);
+ if (fileInfo != null) {
+ return fileInfo;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public Iterator<CommitPoint> iterator() {
+ return commitPoints.iterator();
+ }
+
+ public static byte[] toXContent(CommitPoint commitPoint) throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint();
+ builder.startObject();
+ builder.field("version", commitPoint.version());
+ builder.field("name", commitPoint.name());
+ builder.field("type", commitPoint.type().toString());
+
+ builder.startObject("index_files");
+ for (CommitPoint.FileInfo fileInfo : commitPoint.indexFiles()) {
+ builder.startObject(fileInfo.name());
+ builder.field("physical_name", fileInfo.physicalName());
+ builder.field("length", fileInfo.length());
+ if (fileInfo.checksum() != null) {
+ builder.field("checksum", fileInfo.checksum());
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+
+ builder.startObject("translog_files");
+ for (CommitPoint.FileInfo fileInfo : commitPoint.translogFiles()) {
+ builder.startObject(fileInfo.name());
+ builder.field("physical_name", fileInfo.physicalName());
+ builder.field("length", fileInfo.length());
+ builder.endObject();
+ }
+ builder.endObject();
+
+ builder.endObject();
+ return builder.bytes().toBytes();
+ }
+
+ public static CommitPoint fromXContent(byte[] data) throws Exception {
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(data);
+ try {
+ String currentFieldName = null;
+ XContentParser.Token token = parser.nextToken();
+ if (token == null) {
+ // no data...
+ throw new IOException("No commit point data");
+ }
+ long version = -1;
+ String name = null;
+ CommitPoint.Type type = null;
+ List<CommitPoint.FileInfo> indexFiles = Lists.newArrayList();
+ List<CommitPoint.FileInfo> translogFiles = Lists.newArrayList();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ List<CommitPoint.FileInfo> files = null;
+ if ("index_files".equals(currentFieldName) || "indexFiles".equals(currentFieldName)) {
+ files = indexFiles;
+ } else if ("translog_files".equals(currentFieldName) || "translogFiles".equals(currentFieldName)) {
+ files = translogFiles;
+ } else {
+ throw new IOException("Can't handle object with name [" + currentFieldName + "]");
+ }
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ String fileName = currentFieldName;
+ String physicalName = null;
+ long size = -1;
+ String checksum = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("physical_name".equals(currentFieldName) || "physicalName".equals(currentFieldName)) {
+ physicalName = parser.text();
+ } else if ("length".equals(currentFieldName)) {
+ size = parser.longValue();
+ } else if ("checksum".equals(currentFieldName)) {
+ checksum = parser.text();
+ }
+ }
+ }
+ if (physicalName == null) {
+ throw new IOException("Malformed commit, missing physical_name for [" + fileName + "]");
+ }
+ if (size == -1) {
+ throw new IOException("Malformed commit, missing length for [" + fileName + "]");
+ }
+ files.add(new CommitPoint.FileInfo(fileName, physicalName, size, checksum));
+ }
+ }
+ } else if (token.isValue()) {
+ if ("version".equals(currentFieldName)) {
+ version = parser.longValue();
+ } else if ("name".equals(currentFieldName)) {
+ name = parser.text();
+ } else if ("type".equals(currentFieldName)) {
+ type = CommitPoint.Type.valueOf(parser.text());
+ }
+ }
+ }
+
+ if (version == -1) {
+ throw new IOException("Malformed commit, missing version");
+ }
+ if (name == null) {
+ throw new IOException("Malformed commit, missing name");
+ }
+ if (type == null) {
+ throw new IOException("Malformed commit, missing type");
+ }
+
+ return new CommitPoint(version, name, type, indexFiles, translogFiles);
+ } finally {
+ parser.close();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/IgnoreGatewayRecoveryException.java b/src/main/java/org/elasticsearch/index/gateway/IgnoreGatewayRecoveryException.java
new file mode 100644
index 0000000..00fe6d5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IgnoreGatewayRecoveryException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * An exception marking that this recovery attempt should be ignored (since probably, we already recovered).
+ *
+ *
+ */
+public class IgnoreGatewayRecoveryException extends IndexShardException {
+
+ public IgnoreGatewayRecoveryException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public IgnoreGatewayRecoveryException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexGateway.java b/src/main/java/org/elasticsearch/index/gateway/IndexGateway.java
new file mode 100644
index 0000000..067b80d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IndexGateway.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import org.elasticsearch.index.CloseableIndexComponent;
+import org.elasticsearch.index.IndexComponent;
+
+/**
+ *
+ */
+public interface IndexGateway extends IndexComponent, CloseableIndexComponent {
+
+ String type();
+
+ Class<? extends IndexShardGateway> shardGatewayClass();
+
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexGatewayModule.java b/src/main/java/org/elasticsearch/index/gateway/IndexGatewayModule.java
new file mode 100644
index 0000000..565fe00
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IndexGatewayModule.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.Modules;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.Gateway;
+
+/**
+ *
+ */
+public class IndexGatewayModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ private final Gateway gateway;
+
+ public IndexGatewayModule(Settings settings, Gateway gateway) {
+ this.settings = settings;
+ this.gateway = gateway;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(Modules.createModule(settings.getAsClass("index.gateway.type", gateway.suggestIndexGateway(), "org.elasticsearch.index.gateway.", "IndexGatewayModule"), settings));
+ }
+
+ @Override
+ protected void configure() {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java
new file mode 100644
index 0000000..555dadf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.index.CloseableIndexComponent;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.shard.IndexShardComponent;
+import org.elasticsearch.index.translog.Translog;
+
+/**
+ *
+ */
+public interface IndexShardGateway extends IndexShardComponent, CloseableIndexComponent {
+
+ String type();
+
+ /**
+ * The last / on going recovery status.
+ */
+ RecoveryStatus recoveryStatus();
+
+ /**
+ * The last snapshot status performed. Can be <tt>null</tt>.
+ */
+ SnapshotStatus lastSnapshotStatus();
+
+ /**
+ * The current snapshot status being performed. Can be <tt>null</tt> indicating that no snapshot
+ * is being executed currently.
+ */
+ SnapshotStatus currentSnapshotStatus();
+
+ /**
+ * Recovers the state of the shard from the gateway.
+ */
+ void recover(boolean indexShouldExists, RecoveryStatus recoveryStatus) throws IndexShardGatewayRecoveryException;
+
+ /**
+ * Snapshots the given shard into the gateway.
+ */
+ SnapshotStatus snapshot(Snapshot snapshot) throws IndexShardGatewaySnapshotFailedException;
+
+ /**
+ * Returns <tt>true</tt> if snapshot is even required on this gateway (i.e. mainly handles recovery).
+ */
+ boolean requiresSnapshot();
+
+ /**
+ * Returns <tt>true</tt> if this gateway requires scheduling management for snapshot
+ * operations.
+ */
+ boolean requiresSnapshotScheduling();
+
+ SnapshotLock obtainSnapshotLock() throws Exception;
+
+ public static interface SnapshotLock {
+ void release();
+ }
+
+ public static final SnapshotLock NO_SNAPSHOT_LOCK = new SnapshotLock() {
+ @Override
+ public void release() {
+ }
+ };
+
+ public static class Snapshot {
+ private final SnapshotIndexCommit indexCommit;
+ private final Translog.Snapshot translogSnapshot;
+
+ private final long lastIndexVersion;
+ private final long lastTranslogId;
+ private final long lastTranslogLength;
+ private final int lastTotalTranslogOperations;
+
+ public Snapshot(SnapshotIndexCommit indexCommit, Translog.Snapshot translogSnapshot, long lastIndexVersion, long lastTranslogId, long lastTranslogLength, int lastTotalTranslogOperations) {
+ this.indexCommit = indexCommit;
+ this.translogSnapshot = translogSnapshot;
+ this.lastIndexVersion = lastIndexVersion;
+ this.lastTranslogId = lastTranslogId;
+ this.lastTranslogLength = lastTranslogLength;
+ this.lastTotalTranslogOperations = lastTotalTranslogOperations;
+ }
+
+ /**
+ * Indicates that the index has changed from the latest snapshot.
+ */
+ public boolean indexChanged() {
+ return lastIndexVersion != indexCommit.getGeneration();
+ }
+
+ /**
+ * Indicates that a new transaction log has been created. Note check this <b>before</b> you
+ * check {@link #sameTranslogNewOperations()}.
+ */
+ public boolean newTranslogCreated() {
+ return translogSnapshot.translogId() != lastTranslogId;
+ }
+
+ /**
+ * Indicates that the same translog exists, but new operations have been appended to it. Throws
+ * {@link org.elasticsearch.ElasticsearchIllegalStateException} if {@link #newTranslogCreated()} is <tt>true</tt>, so
+ * always check that first.
+ */
+ public boolean sameTranslogNewOperations() {
+ if (newTranslogCreated()) {
+ throw new ElasticsearchIllegalStateException("Should not be called when there is a new translog");
+ }
+ return translogSnapshot.length() > lastTranslogLength;
+ }
+
+ public SnapshotIndexCommit indexCommit() {
+ return indexCommit;
+ }
+
+ public Translog.Snapshot translogSnapshot() {
+ return translogSnapshot;
+ }
+
+ public long lastIndexVersion() {
+ return lastIndexVersion;
+ }
+
+ public long lastTranslogId() {
+ return lastTranslogId;
+ }
+
+ public long lastTranslogLength() {
+ return lastTranslogLength;
+ }
+
+ public int lastTotalTranslogOperations() {
+ return this.lastTotalTranslogOperations;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayException.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayException.java
new file mode 100644
index 0000000..5711451
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class IndexShardGatewayException extends IndexShardException {
+
+ public IndexShardGatewayException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public IndexShardGatewayException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayModule.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayModule.java
new file mode 100644
index 0000000..da73b04
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayModule.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class IndexShardGatewayModule extends AbstractModule {
+
+ private final IndexGateway indexGateway;
+
+ public IndexShardGatewayModule(IndexGateway indexGateway) {
+ this.indexGateway = indexGateway;
+ }
+
+ @Override
+ protected void configure() {
+ bind(IndexShardGateway.class)
+ .to(indexGateway.shardGatewayClass())
+ .asEagerSingleton();
+
+ bind(IndexShardGatewayService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayRecoveryException.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayRecoveryException.java
new file mode 100644
index 0000000..72c414c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayRecoveryException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class IndexShardGatewayRecoveryException extends IndexShardGatewayException {
+
+ public IndexShardGatewayRecoveryException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public IndexShardGatewayRecoveryException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java
new file mode 100644
index 0000000..660da8a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java
@@ -0,0 +1,395 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.CloseableIndexComponent;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.engine.SnapshotFailedEngineException;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.*;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.snapshots.IndexShardSnapshotAndRestoreService;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.ScheduledFuture;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
+
+/**
+ *
+ */
+public class IndexShardGatewayService extends AbstractIndexShardComponent implements CloseableIndexComponent {
+
+ private final boolean snapshotOnClose;
+
+ private final ThreadPool threadPool;
+
+ private final IndexSettingsService indexSettingsService;
+
+ private final InternalIndexShard indexShard;
+
+ private final IndexShardGateway shardGateway;
+
+ private final IndexShardSnapshotAndRestoreService snapshotService;
+
+
+ private volatile long lastIndexVersion;
+
+ private volatile long lastTranslogId = -1;
+
+ private volatile int lastTotalTranslogOperations;
+
+ private volatile long lastTranslogLength;
+
+ private volatile TimeValue snapshotInterval;
+
+ private volatile ScheduledFuture snapshotScheduleFuture;
+
+ private RecoveryStatus recoveryStatus;
+
+ private IndexShardGateway.SnapshotLock snapshotLock;
+
+ private final SnapshotRunnable snapshotRunnable = new SnapshotRunnable();
+
+ private final ApplySettings applySettings = new ApplySettings();
+
+
+ @Inject
+ public IndexShardGatewayService(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService,
+ ThreadPool threadPool, IndexShard indexShard, IndexShardGateway shardGateway, IndexShardSnapshotAndRestoreService snapshotService,
+ RepositoriesService repositoriesService) {
+ super(shardId, indexSettings);
+ this.threadPool = threadPool;
+ this.indexSettingsService = indexSettingsService;
+ this.indexShard = (InternalIndexShard) indexShard;
+ this.shardGateway = shardGateway;
+ this.snapshotService = snapshotService;
+
+ this.snapshotOnClose = componentSettings.getAsBoolean("snapshot_on_close", true);
+ this.snapshotInterval = componentSettings.getAsTime("snapshot_interval", TimeValue.timeValueSeconds(10));
+
+ indexSettingsService.addListener(applySettings);
+ }
+
+ public static final String INDEX_GATEWAY_SNAPSHOT_INTERVAL = "index.gateway.snapshot_interval";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ TimeValue snapshotInterval = settings.getAsTime(INDEX_GATEWAY_SNAPSHOT_INTERVAL, IndexShardGatewayService.this.snapshotInterval);
+ if (!snapshotInterval.equals(IndexShardGatewayService.this.snapshotInterval)) {
+ logger.info("updating snapshot_interval from [{}] to [{}]", IndexShardGatewayService.this.snapshotInterval, snapshotInterval);
+ IndexShardGatewayService.this.snapshotInterval = snapshotInterval;
+ if (snapshotScheduleFuture != null) {
+ snapshotScheduleFuture.cancel(false);
+ snapshotScheduleFuture = null;
+ }
+ scheduleSnapshotIfNeeded();
+ }
+ }
+ }
+
+ /**
+ * Should be called when the shard routing state has changed (note, after the state has been set on the shard).
+ */
+ public void routingStateChanged() {
+ scheduleSnapshotIfNeeded();
+ }
+
+ public static interface RecoveryListener {
+ void onRecoveryDone();
+
+ void onIgnoreRecovery(String reason);
+
+ void onRecoveryFailed(IndexShardGatewayRecoveryException e);
+ }
+
+ public RecoveryStatus recoveryStatus() {
+ if (recoveryStatus == null) {
+ return recoveryStatus;
+ }
+ if (recoveryStatus.startTime() > 0 && recoveryStatus.stage() != RecoveryStatus.Stage.DONE) {
+ recoveryStatus.time(System.currentTimeMillis() - recoveryStatus.startTime());
+ }
+ return recoveryStatus;
+ }
+
+ public SnapshotStatus snapshotStatus() {
+ SnapshotStatus snapshotStatus = shardGateway.currentSnapshotStatus();
+ if (snapshotStatus != null) {
+ return snapshotStatus;
+ }
+ return shardGateway.lastSnapshotStatus();
+ }
+
+ /**
+ * Recovers the state of the shard from the gateway.
+ */
+ public void recover(final boolean indexShouldExists, final RecoveryListener listener) throws IndexShardGatewayRecoveryException, IgnoreGatewayRecoveryException {
+ if (indexShard.state() == IndexShardState.CLOSED) {
+ // got closed on us, just ignore this recovery
+ listener.onIgnoreRecovery("shard closed");
+ return;
+ }
+ if (!indexShard.routingEntry().primary()) {
+ listener.onRecoveryFailed(new IndexShardGatewayRecoveryException(shardId, "Trying to recover when the shard is in backup state", null));
+ return;
+ }
+ try {
+ if (indexShard.routingEntry().restoreSource() != null) {
+ indexShard.recovering("from snapshot");
+ } else {
+ indexShard.recovering("from gateway");
+ }
+ } catch (IllegalIndexShardStateException e) {
+ // that's fine, since we might be called concurrently, just ignore this, we are already recovering
+ listener.onIgnoreRecovery("already in recovering process, " + e.getMessage());
+ return;
+ }
+
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ recoveryStatus = new RecoveryStatus();
+ recoveryStatus.updateStage(RecoveryStatus.Stage.INIT);
+
+ try {
+ if (indexShard.routingEntry().restoreSource() != null) {
+ logger.debug("restoring from {} ...", indexShard.routingEntry().restoreSource());
+ snapshotService.restore(recoveryStatus);
+ } else {
+ logger.debug("starting recovery from {} ...", shardGateway);
+ shardGateway.recover(indexShouldExists, recoveryStatus);
+ }
+
+ lastIndexVersion = recoveryStatus.index().version();
+ lastTranslogId = -1;
+ lastTranslogLength = 0;
+ lastTotalTranslogOperations = recoveryStatus.translog().currentTranslogOperations();
+
+ // start the shard if the gateway has not started it already. Note that if the gateway
+ // moved shard to POST_RECOVERY, it may have been started as well if:
+ // 1) master sent a new cluster state indicating shard is initializing
+ // 2) IndicesClusterStateService#applyInitializingShard will send a shard started event
+ // 3) Master will mark shard as started and this will be processed locally.
+ IndexShardState shardState = indexShard.state();
+ if (shardState != IndexShardState.POST_RECOVERY && shardState != IndexShardState.STARTED) {
+ indexShard.postRecovery("post recovery from gateway");
+ }
+ // refresh the shard
+ indexShard.refresh(new Engine.Refresh("post_gateway").force(true));
+
+ recoveryStatus.time(System.currentTimeMillis() - recoveryStatus.startTime());
+ recoveryStatus.updateStage(RecoveryStatus.Stage.DONE);
+
+ if (logger.isTraceEnabled()) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("recovery completed from ").append(shardGateway).append(", took [").append(timeValueMillis(recoveryStatus.time())).append("]\n");
+ sb.append(" index : files [").append(recoveryStatus.index().numberOfFiles()).append("] with total_size [").append(new ByteSizeValue(recoveryStatus.index().totalSize())).append("], took[").append(TimeValue.timeValueMillis(recoveryStatus.index().time())).append("]\n");
+ sb.append(" : recovered_files [").append(recoveryStatus.index().numberOfRecoveredFiles()).append("] with total_size [").append(new ByteSizeValue(recoveryStatus.index().recoveredTotalSize())).append("]\n");
+ sb.append(" : reusing_files [").append(recoveryStatus.index().numberOfReusedFiles()).append("] with total_size [").append(new ByteSizeValue(recoveryStatus.index().reusedTotalSize())).append("]\n");
+ sb.append(" start : took [").append(TimeValue.timeValueMillis(recoveryStatus.start().time())).append("], check_index [").append(timeValueMillis(recoveryStatus.start().checkIndexTime())).append("]\n");
+ sb.append(" translog : number_of_operations [").append(recoveryStatus.translog().currentTranslogOperations()).append("], took [").append(TimeValue.timeValueMillis(recoveryStatus.translog().time())).append("]");
+ logger.trace(sb.toString());
+ } else if (logger.isDebugEnabled()) {
+ logger.debug("recovery completed from [{}], took [{}]", shardGateway, timeValueMillis(recoveryStatus.time()));
+ }
+ listener.onRecoveryDone();
+ scheduleSnapshotIfNeeded();
+ } catch (IndexShardGatewayRecoveryException e) {
+ if (indexShard.state() == IndexShardState.CLOSED) {
+ // got closed on us, just ignore this recovery
+ listener.onIgnoreRecovery("shard closed");
+ return;
+ }
+ if ((e.getCause() instanceof IndexShardClosedException) || (e.getCause() instanceof IndexShardNotStartedException)) {
+ // got closed on us, just ignore this recovery
+ listener.onIgnoreRecovery("shard closed");
+ return;
+ }
+ listener.onRecoveryFailed(e);
+ } catch (IndexShardClosedException e) {
+ listener.onIgnoreRecovery("shard closed");
+ } catch (IndexShardNotStartedException e) {
+ listener.onIgnoreRecovery("shard closed");
+ } catch (Exception e) {
+ if (indexShard.state() == IndexShardState.CLOSED) {
+ // got closed on us, just ignore this recovery
+ listener.onIgnoreRecovery("shard closed");
+ return;
+ }
+ listener.onRecoveryFailed(new IndexShardGatewayRecoveryException(shardId, "failed recovery", e));
+ }
+ }
+ });
+ }
+
+ /**
+ * Snapshots the given shard into the gateway.
+ */
+ public synchronized void snapshot(final String reason) throws IndexShardGatewaySnapshotFailedException {
+ if (!indexShard.routingEntry().primary()) {
+ return;
+// throw new IndexShardGatewaySnapshotNotAllowedException(shardId, "Snapshot not allowed on non primary shard");
+ }
+ if (indexShard.routingEntry().relocating()) {
+ // do not snapshot when in the process of relocation of primaries so we won't get conflicts
+ return;
+ }
+ if (indexShard.state() == IndexShardState.CREATED) {
+ // shard has just been created, ignore it and return
+ return;
+ }
+ if (indexShard.state() == IndexShardState.RECOVERING) {
+ // shard is recovering, don't snapshot
+ return;
+ }
+
+ if (snapshotLock == null) {
+ try {
+ snapshotLock = shardGateway.obtainSnapshotLock();
+ } catch (Exception e) {
+ logger.warn("failed to obtain snapshot lock, ignoring snapshot", e);
+ return;
+ }
+ }
+
+ try {
+ SnapshotStatus snapshotStatus = indexShard.snapshot(new Engine.SnapshotHandler<SnapshotStatus>() {
+ @Override
+ public SnapshotStatus snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException {
+ if (lastIndexVersion != snapshotIndexCommit.getGeneration() || lastTranslogId != translogSnapshot.translogId() || lastTranslogLength < translogSnapshot.length()) {
+
+ logger.debug("snapshot ({}) to {} ...", reason, shardGateway);
+ SnapshotStatus snapshotStatus =
+ shardGateway.snapshot(new IndexShardGateway.Snapshot(snapshotIndexCommit, translogSnapshot, lastIndexVersion, lastTranslogId, lastTranslogLength, lastTotalTranslogOperations));
+
+ lastIndexVersion = snapshotIndexCommit.getGeneration();
+ lastTranslogId = translogSnapshot.translogId();
+ lastTranslogLength = translogSnapshot.length();
+ lastTotalTranslogOperations = translogSnapshot.estimatedTotalOperations();
+ return snapshotStatus;
+ }
+ return null;
+ }
+ });
+ if (snapshotStatus != null) {
+ if (logger.isDebugEnabled()) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("snapshot (").append(reason).append(") completed to ").append(shardGateway).append(", took [").append(TimeValue.timeValueMillis(snapshotStatus.time())).append("]\n");
+ sb.append(" index : version [").append(lastIndexVersion).append("], number_of_files [").append(snapshotStatus.index().numberOfFiles()).append("] with total_size [").append(new ByteSizeValue(snapshotStatus.index().totalSize())).append("], took [").append(TimeValue.timeValueMillis(snapshotStatus.index().time())).append("]\n");
+ sb.append(" translog : id [").append(lastTranslogId).append("], number_of_operations [").append(snapshotStatus.translog().expectedNumberOfOperations()).append("], took [").append(TimeValue.timeValueMillis(snapshotStatus.translog().time())).append("]");
+ logger.debug(sb.toString());
+ }
+ }
+ } catch (SnapshotFailedEngineException e) {
+ if (e.getCause() instanceof IllegalStateException) {
+ // ignore, that's fine, snapshot has not started yet
+ } else {
+ throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to snapshot", e);
+ }
+ } catch (IllegalIndexShardStateException e) {
+ // ignore, that's fine, snapshot has not started yet
+ } catch (IndexShardGatewaySnapshotFailedException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to snapshot", e);
+ }
+ }
+
+ public void snapshotOnClose() {
+ if (shardGateway.requiresSnapshot() && snapshotOnClose) {
+ try {
+ snapshot("shutdown");
+ } catch (Exception e) {
+ logger.warn("failed to snapshot on close", e);
+ }
+ }
+ }
+
+ @Override
+ public synchronized void close() {
+ indexSettingsService.removeListener(applySettings);
+ if (snapshotScheduleFuture != null) {
+ snapshotScheduleFuture.cancel(true);
+ snapshotScheduleFuture = null;
+ }
+ shardGateway.close();
+ if (snapshotLock != null) {
+ snapshotLock.release();
+ }
+ }
+
+ private synchronized void scheduleSnapshotIfNeeded() {
+ if (!shardGateway.requiresSnapshot()) {
+ return;
+ }
+ if (!shardGateway.requiresSnapshotScheduling()) {
+ return;
+ }
+ if (!indexShard.routingEntry().primary()) {
+ // we only do snapshotting on the primary shard
+ return;
+ }
+ if (!indexShard.routingEntry().started()) {
+ // we only schedule when the cluster assumes we have started
+ return;
+ }
+ if (snapshotScheduleFuture != null) {
+ // we are already scheduling this one, ignore
+ return;
+ }
+ if (snapshotInterval.millis() != -1) {
+ // we need to schedule snapshot
+ if (logger.isDebugEnabled()) {
+ logger.debug("scheduling snapshot every [{}]", snapshotInterval);
+ }
+ snapshotScheduleFuture = threadPool.schedule(snapshotInterval, ThreadPool.Names.SNAPSHOT, snapshotRunnable);
+ }
+ }
+
+ private class SnapshotRunnable implements Runnable {
+ @Override
+ public synchronized void run() {
+ try {
+ snapshot("scheduled");
+ } catch (Throwable e) {
+ if (indexShard.state() == IndexShardState.CLOSED) {
+ return;
+ }
+ logger.warn("failed to snapshot (scheduled)", e);
+ }
+ // schedule it again
+ if (indexShard.state() != IndexShardState.CLOSED) {
+ snapshotScheduleFuture = threadPool.schedule(snapshotInterval, ThreadPool.Names.SNAPSHOT, this);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotFailedException.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotFailedException.java
new file mode 100644
index 0000000..6d468c2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotFailedException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class IndexShardGatewaySnapshotFailedException extends IndexShardGatewayException {
+
+ public IndexShardGatewaySnapshotFailedException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotNotAllowedException.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotNotAllowedException.java
new file mode 100644
index 0000000..64a147b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewaySnapshotNotAllowedException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class IndexShardGatewaySnapshotNotAllowedException extends IndexShardGatewayException {
+
+ public IndexShardGatewaySnapshotNotAllowedException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/RecoveryStatus.java b/src/main/java/org/elasticsearch/index/gateway/RecoveryStatus.java
new file mode 100644
index 0000000..d74afec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/RecoveryStatus.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ *
+ */
+public class RecoveryStatus {
+
+ public static enum Stage {
+ INIT,
+ INDEX,
+ START,
+ TRANSLOG,
+ DONE
+ }
+
+ private Stage stage = Stage.INIT;
+
+ private long startTime = System.currentTimeMillis();
+
+ private long time;
+
+ private Index index = new Index();
+
+ private Translog translog = new Translog();
+
+ private Start start = new Start();
+
+ public Stage stage() {
+ return this.stage;
+ }
+
+ public RecoveryStatus updateStage(Stage stage) {
+ this.stage = stage;
+ return this;
+ }
+
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public void startTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long time() {
+ return this.time;
+ }
+
+ public void time(long time) {
+ this.time = time;
+ }
+
+ public Index index() {
+ return index;
+ }
+
+ public Start start() {
+ return this.start;
+ }
+
+ public Translog translog() {
+ return translog;
+ }
+
+ public static class Start {
+ private long startTime;
+ private long time;
+ private long checkIndexTime;
+
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public void startTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long time() {
+ return this.time;
+ }
+
+ public void time(long time) {
+ this.time = time;
+ }
+
+ public long checkIndexTime() {
+ return checkIndexTime;
+ }
+
+ public void checkIndexTime(long checkIndexTime) {
+ this.checkIndexTime = checkIndexTime;
+ }
+ }
+
+ public static class Translog {
+ private long startTime = 0;
+ private long time;
+ private volatile int currentTranslogOperations = 0;
+
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public void startTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long time() {
+ return this.time;
+ }
+
+ public void time(long time) {
+ this.time = time;
+ }
+
+ public void addTranslogOperations(int count) {
+ this.currentTranslogOperations += count;
+ }
+
+ public int currentTranslogOperations() {
+ return this.currentTranslogOperations;
+ }
+ }
+
+ public static class Index {
+ private long startTime = 0;
+ private long time = 0;
+
+ private long version = -1;
+ private int numberOfFiles = 0;
+ private long totalSize = 0;
+ private int numberOfReusedFiles = 0;
+ private long reusedTotalSize = 0;
+ private AtomicLong currentFilesSize = new AtomicLong();
+
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public void startTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long time() {
+ return this.time;
+ }
+
+ public void time(long time) {
+ this.time = time;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ public void files(int numberOfFiles, long totalSize, int numberOfReusedFiles, long reusedTotalSize) {
+ this.numberOfFiles = numberOfFiles;
+ this.totalSize = totalSize;
+ this.numberOfReusedFiles = numberOfReusedFiles;
+ this.reusedTotalSize = reusedTotalSize;
+ }
+
+ public int numberOfFiles() {
+ return numberOfFiles;
+ }
+
+ public int numberOfRecoveredFiles() {
+ return numberOfFiles - numberOfReusedFiles;
+ }
+
+ public long totalSize() {
+ return this.totalSize;
+ }
+
+ public int numberOfReusedFiles() {
+ return numberOfReusedFiles;
+ }
+
+ public long reusedTotalSize() {
+ return this.reusedTotalSize;
+ }
+
+ public long recoveredTotalSize() {
+ return totalSize - reusedTotalSize;
+ }
+
+ public void updateVersion(long version) {
+ this.version = version;
+ }
+
+ public long currentFilesSize() {
+ return this.currentFilesSize.get();
+ }
+
+ public void addCurrentFilesSize(long updatedSize) {
+ this.currentFilesSize.addAndGet(updatedSize);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/SnapshotStatus.java b/src/main/java/org/elasticsearch/index/gateway/SnapshotStatus.java
new file mode 100644
index 0000000..324f555
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/SnapshotStatus.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+/**
+ *
+ */
+public class SnapshotStatus {
+
+ public static enum Stage {
+ NONE,
+ INDEX,
+ TRANSLOG,
+ FINALIZE,
+ DONE,
+ FAILURE
+ }
+
+ private Stage stage = Stage.NONE;
+
+ private long startTime;
+
+ private long time;
+
+ private Index index = new Index();
+
+ private Translog translog = new Translog();
+
+ private Throwable failure;
+
+ public Stage stage() {
+ return this.stage;
+ }
+
+ public SnapshotStatus updateStage(Stage stage) {
+ this.stage = stage;
+ return this;
+ }
+
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public void startTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long time() {
+ return this.time;
+ }
+
+ public void time(long time) {
+ this.time = time;
+ }
+
+ public void failed(Throwable failure) {
+ this.failure = failure;
+ }
+
+ public Index index() {
+ return index;
+ }
+
+ public Translog translog() {
+ return translog;
+ }
+
+ public static class Index {
+ private long startTime;
+ private long time;
+
+ private int numberOfFiles;
+ private long totalSize;
+
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public void startTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long time() {
+ return this.time;
+ }
+
+ public void time(long time) {
+ this.time = time;
+ }
+
+ public void files(int numberOfFiles, long totalSize) {
+ this.numberOfFiles = numberOfFiles;
+ this.totalSize = totalSize;
+ }
+
+ public int numberOfFiles() {
+ return numberOfFiles;
+ }
+
+ public long totalSize() {
+ return totalSize;
+ }
+ }
+
+ public static class Translog {
+ private long startTime;
+ private long time;
+ private int expectedNumberOfOperations;
+
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public void startTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long time() {
+ return this.time;
+ }
+
+ public void time(long time) {
+ this.time = time;
+ }
+
+ public int expectedNumberOfOperations() {
+ return expectedNumberOfOperations;
+ }
+
+ public void expectedNumberOfOperations(int expectedNumberOfOperations) {
+ this.expectedNumberOfOperations = expectedNumberOfOperations;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexGateway.java b/src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexGateway.java
new file mode 100644
index 0000000..4adbe4a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexGateway.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.blobstore;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.gateway.blobstore.BlobStoreGateway;
+import org.elasticsearch.gateway.none.NoneGateway;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.gateway.IndexGateway;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public abstract class BlobStoreIndexGateway extends AbstractIndexComponent implements IndexGateway {
+
+ private final BlobStoreGateway gateway;
+
+ private final BlobStore blobStore;
+
+ private final BlobPath indexPath;
+
+ protected ByteSizeValue chunkSize;
+
+ protected BlobStoreIndexGateway(Index index, @IndexSettings Settings indexSettings, Gateway gateway) {
+ super(index, indexSettings);
+
+ if (gateway.type().equals(NoneGateway.TYPE)) {
+ logger.warn("index gateway is configured, but no cluster level gateway configured, cluster level metadata will be lost on full shutdown");
+ }
+
+ this.gateway = (BlobStoreGateway) gateway;
+ this.blobStore = this.gateway.blobStore();
+
+ this.chunkSize = componentSettings.getAsBytesSize("chunk_size", this.gateway.chunkSize());
+
+ this.indexPath = this.gateway.basePath().add("indices").add(index.name());
+ }
+
+ @Override
+ public String toString() {
+ return type() + "://" + blobStore + "/" + indexPath;
+ }
+
+ public BlobStore blobStore() {
+ return blobStore;
+ }
+
+ public ByteSizeValue chunkSize() {
+ return this.chunkSize;
+ }
+
+ public BlobPath shardPath(int shardId) {
+ return indexPath.add(Integer.toString(shardId));
+ }
+
+ public static BlobPath shardPath(BlobPath basePath, String index, int shardId) {
+ return basePath.add("indices").add(index).add(Integer.toString(shardId));
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexShardGateway.java b/src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexShardGateway.java
new file mode 100644
index 0000000..348fd4a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexShardGateway.java
@@ -0,0 +1,879 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.blobstore;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.blobstore.*;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
+import org.elasticsearch.common.lucene.store.ThreadSafeInputStreamIndexInput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.gateway.*;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogStreams;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ *
+ */
+public abstract class BlobStoreIndexShardGateway extends AbstractIndexShardComponent implements IndexShardGateway {
+
+ protected final ThreadPool threadPool;
+
+ protected final InternalIndexShard indexShard;
+
+ protected final Store store;
+
+ protected final ByteSizeValue chunkSize;
+
+ protected final BlobStore blobStore;
+
+ protected final BlobPath shardPath;
+
+ protected final ImmutableBlobContainer blobContainer;
+
+ private volatile RecoveryStatus recoveryStatus;
+
+ private volatile SnapshotStatus lastSnapshotStatus;
+
+ private volatile SnapshotStatus currentSnapshotStatus;
+
+ protected BlobStoreIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, IndexGateway indexGateway,
+ IndexShard indexShard, Store store) {
+ super(shardId, indexSettings);
+
+ this.threadPool = threadPool;
+ this.indexShard = (InternalIndexShard) indexShard;
+ this.store = store;
+
+ BlobStoreIndexGateway blobStoreIndexGateway = (BlobStoreIndexGateway) indexGateway;
+
+ this.chunkSize = blobStoreIndexGateway.chunkSize(); // can be null -> no chunking
+ this.blobStore = blobStoreIndexGateway.blobStore();
+ this.shardPath = blobStoreIndexGateway.shardPath(shardId.id());
+
+ this.blobContainer = blobStore.immutableBlobContainer(shardPath);
+
+ this.recoveryStatus = new RecoveryStatus();
+ }
+
+ @Override
+ public RecoveryStatus recoveryStatus() {
+ return this.recoveryStatus;
+ }
+
+ @Override
+ public String toString() {
+ return type() + "://" + blobStore + "/" + shardPath;
+ }
+
+ @Override
+ public boolean requiresSnapshot() {
+ return true;
+ }
+
+ @Override
+ public boolean requiresSnapshotScheduling() {
+ return true;
+ }
+
+ @Override
+ public SnapshotLock obtainSnapshotLock() throws Exception {
+ return NO_SNAPSHOT_LOCK;
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ }
+
+ @Override
+ public SnapshotStatus lastSnapshotStatus() {
+ return this.lastSnapshotStatus;
+ }
+
+ @Override
+ public SnapshotStatus currentSnapshotStatus() {
+ SnapshotStatus snapshotStatus = this.currentSnapshotStatus;
+ if (snapshotStatus == null) {
+ return snapshotStatus;
+ }
+ if (snapshotStatus.stage() != SnapshotStatus.Stage.DONE || snapshotStatus.stage() != SnapshotStatus.Stage.FAILURE) {
+ snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime());
+ }
+ return snapshotStatus;
+ }
+
+ @Override
+ public SnapshotStatus snapshot(final Snapshot snapshot) throws IndexShardGatewaySnapshotFailedException {
+ currentSnapshotStatus = new SnapshotStatus();
+ currentSnapshotStatus.startTime(System.currentTimeMillis());
+
+ try {
+ doSnapshot(snapshot);
+ currentSnapshotStatus.time(System.currentTimeMillis() - currentSnapshotStatus.startTime());
+ currentSnapshotStatus.updateStage(SnapshotStatus.Stage.DONE);
+ } catch (Exception e) {
+ currentSnapshotStatus.time(System.currentTimeMillis() - currentSnapshotStatus.startTime());
+ currentSnapshotStatus.updateStage(SnapshotStatus.Stage.FAILURE);
+ currentSnapshotStatus.failed(e);
+ if (e instanceof IndexShardGatewaySnapshotFailedException) {
+ throw (IndexShardGatewaySnapshotFailedException) e;
+ } else {
+ throw new IndexShardGatewaySnapshotFailedException(shardId, e.getMessage(), e);
+ }
+ } finally {
+ this.lastSnapshotStatus = currentSnapshotStatus;
+ this.currentSnapshotStatus = null;
+ }
+ return this.lastSnapshotStatus;
+ }
+
+ private void doSnapshot(final Snapshot snapshot) throws IndexShardGatewaySnapshotFailedException {
+ ImmutableMap<String, BlobMetaData> blobs;
+ try {
+ blobs = blobContainer.listBlobs();
+ } catch (IOException e) {
+ throw new IndexShardGatewaySnapshotFailedException(shardId, "failed to list blobs", e);
+ }
+
+ long generation = findLatestFileNameGeneration(blobs);
+ CommitPoints commitPoints = buildCommitPoints(blobs);
+
+ currentSnapshotStatus.index().startTime(System.currentTimeMillis());
+ currentSnapshotStatus.updateStage(SnapshotStatus.Stage.INDEX);
+
+ final SnapshotIndexCommit snapshotIndexCommit = snapshot.indexCommit();
+ final Translog.Snapshot translogSnapshot = snapshot.translogSnapshot();
+
+ final CountDownLatch indexLatch = new CountDownLatch(snapshotIndexCommit.getFiles().length);
+ final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
+ final List<CommitPoint.FileInfo> indexCommitPointFiles = Lists.newArrayList();
+
+ int indexNumberOfFiles = 0;
+ long indexTotalFilesSize = 0;
+ for (final String fileName : snapshotIndexCommit.getFiles()) {
+ StoreFileMetaData md;
+ try {
+ md = store.metaData(fileName);
+ } catch (IOException e) {
+ throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to get store file metadata", e);
+ }
+
+ boolean snapshotRequired = false;
+ if (snapshot.indexChanged() && fileName.equals(snapshotIndexCommit.getSegmentsFileName())) {
+ snapshotRequired = true; // we want to always snapshot the segment file if the index changed
+ }
+
+ CommitPoint.FileInfo fileInfo = commitPoints.findPhysicalIndexFile(fileName);
+ if (fileInfo == null || !fileInfo.isSame(md) || !commitPointFileExistsInBlobs(fileInfo, blobs)) {
+ // commit point file does not exists in any commit point, or has different length, or does not fully exists in the listed blobs
+ snapshotRequired = true;
+ }
+
+ if (snapshotRequired) {
+ indexNumberOfFiles++;
+ indexTotalFilesSize += md.length();
+ // create a new FileInfo
+ try {
+ CommitPoint.FileInfo snapshotFileInfo = new CommitPoint.FileInfo(fileNameFromGeneration(++generation), fileName, md.length(), md.checksum());
+ indexCommitPointFiles.add(snapshotFileInfo);
+ snapshotFile(snapshotIndexCommit.getDirectory(), snapshotFileInfo, indexLatch, failures);
+ } catch (IOException e) {
+ failures.add(e);
+ indexLatch.countDown();
+ }
+ } else {
+ indexCommitPointFiles.add(fileInfo);
+ indexLatch.countDown();
+ }
+ }
+ currentSnapshotStatus.index().files(indexNumberOfFiles, indexTotalFilesSize);
+
+ try {
+ indexLatch.await();
+ } catch (InterruptedException e) {
+ failures.add(e);
+ }
+ if (!failures.isEmpty()) {
+ throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to perform snapshot (index files)", failures.get(failures.size() - 1));
+ }
+
+ currentSnapshotStatus.index().time(System.currentTimeMillis() - currentSnapshotStatus.index().startTime());
+
+ currentSnapshotStatus.updateStage(SnapshotStatus.Stage.TRANSLOG);
+ currentSnapshotStatus.translog().startTime(System.currentTimeMillis());
+
+ // Note, we assume the snapshot is always started from "base 0". We need to seek forward if we want to lastTranslogPosition if we want the delta
+ List<CommitPoint.FileInfo> translogCommitPointFiles = Lists.newArrayList();
+ int expectedNumberOfOperations = 0;
+ boolean snapshotRequired = false;
+ if (snapshot.newTranslogCreated()) {
+ if (translogSnapshot.lengthInBytes() > 0) {
+ snapshotRequired = true;
+ expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations();
+ }
+ } else {
+ // if we have a commit point, check that we have all the files listed in it in the blob store
+ if (!commitPoints.commits().isEmpty()) {
+ CommitPoint commitPoint = commitPoints.commits().get(0);
+ boolean allTranslogFilesExists = true;
+ for (CommitPoint.FileInfo fileInfo : commitPoint.translogFiles()) {
+ if (!commitPointFileExistsInBlobs(fileInfo, blobs)) {
+ allTranslogFilesExists = false;
+ break;
+ }
+ }
+ // if everything exists, we can seek forward in case there are new operations, otherwise, we copy over all again...
+ if (allTranslogFilesExists) {
+ translogCommitPointFiles.addAll(commitPoint.translogFiles());
+ if (snapshot.sameTranslogNewOperations()) {
+ translogSnapshot.seekForward(snapshot.lastTranslogLength());
+ if (translogSnapshot.lengthInBytes() > 0) {
+ snapshotRequired = true;
+ expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations() - snapshot.lastTotalTranslogOperations();
+ }
+ } // else (no operations, nothing to snapshot)
+ } else {
+ // a full translog snapshot is required
+ if (translogSnapshot.lengthInBytes() > 0) {
+ expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations();
+ snapshotRequired = true;
+ }
+ }
+ } else {
+ // no commit point, snapshot all the translog
+ if (translogSnapshot.lengthInBytes() > 0) {
+ expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations();
+ snapshotRequired = true;
+ }
+ }
+ }
+ currentSnapshotStatus.translog().expectedNumberOfOperations(expectedNumberOfOperations);
+
+ if (snapshotRequired) {
+ CommitPoint.FileInfo addedTranslogFileInfo = new CommitPoint.FileInfo(fileNameFromGeneration(++generation), "translog-" + translogSnapshot.translogId(), translogSnapshot.lengthInBytes(), null /* no need for checksum in translog */);
+ translogCommitPointFiles.add(addedTranslogFileInfo);
+ try {
+ snapshotTranslog(translogSnapshot, addedTranslogFileInfo);
+ } catch (Exception e) {
+ throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to snapshot translog", e);
+ }
+ }
+ currentSnapshotStatus.translog().time(System.currentTimeMillis() - currentSnapshotStatus.translog().startTime());
+
+ // now create and write the commit point
+ currentSnapshotStatus.updateStage(SnapshotStatus.Stage.FINALIZE);
+ long version = 0;
+ if (!commitPoints.commits().isEmpty()) {
+ version = commitPoints.commits().iterator().next().version() + 1;
+ }
+ String commitPointName = "commit-" + Long.toString(version, Character.MAX_RADIX);
+ CommitPoint commitPoint = new CommitPoint(version, commitPointName, CommitPoint.Type.GENERATED, indexCommitPointFiles, translogCommitPointFiles);
+ try {
+ byte[] commitPointData = CommitPoints.toXContent(commitPoint);
+ blobContainer.writeBlob(commitPointName, new BytesStreamInput(commitPointData, false), commitPointData.length);
+ } catch (Exception e) {
+ throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to write commit point", e);
+ }
+
+ // delete all files that are not referenced by any commit point
+ // build a new CommitPoint, that includes this one and all the saved ones
+ List<CommitPoint> newCommitPointsList = Lists.newArrayList();
+ newCommitPointsList.add(commitPoint);
+ for (CommitPoint point : commitPoints) {
+ if (point.type() == CommitPoint.Type.SAVED) {
+ newCommitPointsList.add(point);
+ }
+ }
+ CommitPoints newCommitPoints = new CommitPoints(newCommitPointsList);
+ // first, go over and delete all the commit points
+ for (String blobName : blobs.keySet()) {
+ if (!blobName.startsWith("commit-")) {
+ continue;
+ }
+ long checkedVersion = Long.parseLong(blobName.substring("commit-".length()), Character.MAX_RADIX);
+ if (!newCommitPoints.hasVersion(checkedVersion)) {
+ try {
+ blobContainer.deleteBlob(blobName);
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ }
+ // now go over all the blobs, and if they don't exists in a commit point, delete them
+ for (String blobName : blobs.keySet()) {
+ String name = blobName;
+ if (!name.startsWith("__")) {
+ continue;
+ }
+ if (blobName.contains(".part")) {
+ name = blobName.substring(0, blobName.indexOf(".part"));
+ }
+ if (newCommitPoints.findNameFile(name) == null) {
+ try {
+ blobContainer.deleteBlob(blobName);
+ } catch (IOException e) {
+ // ignore, will delete it laters
+ }
+ }
+ }
+ }
+
+ @Override
+ public void recover(boolean indexShouldExists, RecoveryStatus recoveryStatus) throws IndexShardGatewayRecoveryException {
+ this.recoveryStatus = recoveryStatus;
+
+ final ImmutableMap<String, BlobMetaData> blobs;
+ try {
+ blobs = blobContainer.listBlobs();
+ } catch (IOException e) {
+ throw new IndexShardGatewayRecoveryException(shardId, "Failed to list content of gateway", e);
+ }
+
+ List<CommitPoint> commitPointsList = Lists.newArrayList();
+ boolean atLeastOneCommitPointExists = false;
+ for (String name : blobs.keySet()) {
+ if (name.startsWith("commit-")) {
+ atLeastOneCommitPointExists = true;
+ try {
+ commitPointsList.add(CommitPoints.fromXContent(blobContainer.readBlobFully(name)));
+ } catch (Exception e) {
+ logger.warn("failed to read commit point [{}]", e, name);
+ }
+ }
+ }
+ if (atLeastOneCommitPointExists && commitPointsList.isEmpty()) {
+ // no commit point managed to load, bail so we won't corrupt the index, will require manual intervention
+ throw new IndexShardGatewayRecoveryException(shardId, "Commit points exists but none could be loaded", null);
+ }
+ CommitPoints commitPoints = new CommitPoints(commitPointsList);
+
+ if (commitPoints.commits().isEmpty()) {
+ // no commit points, clean the store just so we won't recover wrong files
+ try {
+ indexShard.store().deleteContent();
+ } catch (IOException e) {
+ logger.warn("failed to clean store before starting shard", e);
+ }
+ recoveryStatus.index().startTime(System.currentTimeMillis());
+ recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
+ return;
+ }
+
+ for (CommitPoint commitPoint : commitPoints) {
+ if (!commitPointExistsInBlobs(commitPoint, blobs)) {
+ logger.warn("listed commit_point [{}]/[{}], but not all files exists, ignoring", commitPoint.name(), commitPoint.version());
+ continue;
+ }
+ try {
+ recoveryStatus.index().startTime(System.currentTimeMillis());
+ recoverIndex(commitPoint, blobs);
+ recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
+
+ recoverTranslog(commitPoint, blobs);
+ return;
+ } catch (Exception e) {
+ throw new IndexShardGatewayRecoveryException(shardId, "failed to recover commit_point [" + commitPoint.name() + "]/[" + commitPoint.version() + "]", e);
+ }
+ }
+ throw new IndexShardGatewayRecoveryException(shardId, "No commit point data is available in gateway", null);
+ }
+
+ private void recoverTranslog(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs) throws IndexShardGatewayRecoveryException {
+ if (commitPoint.translogFiles().isEmpty()) {
+ // no translog files, bail
+ recoveryStatus.start().startTime(System.currentTimeMillis());
+ recoveryStatus.updateStage(RecoveryStatus.Stage.START);
+ indexShard.postRecovery("post recovery from gateway, no translog");
+ recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
+ recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
+ return;
+ }
+
+ try {
+ recoveryStatus.start().startTime(System.currentTimeMillis());
+ recoveryStatus.updateStage(RecoveryStatus.Stage.START);
+ indexShard.performRecoveryPrepareForTranslog();
+ recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
+ recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
+
+ recoveryStatus.updateStage(RecoveryStatus.Stage.TRANSLOG);
+ recoveryStatus.translog().startTime(System.currentTimeMillis());
+
+ final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();
+ final CountDownLatch latch = new CountDownLatch(1);
+
+ final Iterator<CommitPoint.FileInfo> transIt = commitPoint.translogFiles().iterator();
+
+ blobContainer.readBlob(transIt.next().name(), new BlobContainer.ReadBlobListener() {
+ BytesStreamOutput bos = new BytesStreamOutput();
+ boolean ignore = false;
+
+ @Override
+ public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
+ if (ignore) {
+ return;
+ }
+ bos.write(data, offset, size);
+ // if we don't have enough to read the header size of the first translog, bail and wait for the next one
+ if (bos.size() < 4) {
+ return;
+ }
+ BytesStreamInput si = new BytesStreamInput(bos.bytes());
+ int position;
+ while (true) {
+ try {
+ position = si.position();
+ if (position + 4 > bos.size()) {
+ break;
+ }
+ int opSize = si.readInt();
+ int curPos = si.position();
+ if ((si.position() + opSize) > bos.size()) {
+ break;
+ }
+ Translog.Operation operation = TranslogStreams.readTranslogOperation(si);
+ if ((si.position() - curPos) != opSize) {
+ logger.warn("mismatch in size, expected [{}], got [{}]", opSize, si.position() - curPos);
+ }
+ recoveryStatus.translog().addTranslogOperations(1);
+ indexShard.performRecoveryOperation(operation);
+ if (si.position() >= bos.size()) {
+ position = si.position();
+ break;
+ }
+ } catch (Throwable e) {
+ logger.warn("failed to retrieve translog after [{}] operations, ignoring the rest, considered corrupted", e, recoveryStatus.translog().currentTranslogOperations());
+ ignore = true;
+ latch.countDown();
+ return;
+ }
+ }
+
+ BytesStreamOutput newBos = new BytesStreamOutput();
+
+ int leftOver = bos.size() - position;
+ if (leftOver > 0) {
+ newBos.write(bos.bytes().array(), position, leftOver);
+ }
+
+ bos = newBos;
+ }
+
+ @Override
+ public synchronized void onCompleted() {
+ if (ignore) {
+ return;
+ }
+ if (!transIt.hasNext()) {
+ latch.countDown();
+ return;
+ }
+ blobContainer.readBlob(transIt.next().name(), this);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ failure.set(t);
+ latch.countDown();
+ }
+ });
+
+
+ latch.await();
+ if (failure.get() != null) {
+ throw failure.get();
+ }
+
+ indexShard.performRecoveryFinalization(true);
+ recoveryStatus.translog().time(System.currentTimeMillis() - recoveryStatus.translog().startTime());
+ } catch (Throwable e) {
+ throw new IndexShardGatewayRecoveryException(shardId, "Failed to recover translog", e);
+ }
+ }
+
+ private void recoverIndex(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs) throws Exception {
+ recoveryStatus.updateStage(RecoveryStatus.Stage.INDEX);
+ int numberOfFiles = 0;
+ long totalSize = 0;
+ int numberOfReusedFiles = 0;
+ long reusedTotalSize = 0;
+
+ List<CommitPoint.FileInfo> filesToRecover = Lists.newArrayList();
+ for (CommitPoint.FileInfo fileInfo : commitPoint.indexFiles()) {
+ String fileName = fileInfo.physicalName();
+ StoreFileMetaData md = null;
+ try {
+ md = store.metaData(fileName);
+ } catch (Exception e) {
+ // no file
+ }
+ // we don't compute checksum for segments, so always recover them
+ if (!fileName.startsWith("segments") && md != null && fileInfo.isSame(md)) {
+ numberOfFiles++;
+ totalSize += md.length();
+ numberOfReusedFiles++;
+ reusedTotalSize += md.length();
+ if (logger.isTraceEnabled()) {
+ logger.trace("not_recovering [{}], exists in local store and is same", fileInfo.physicalName());
+ }
+ } else {
+ if (logger.isTraceEnabled()) {
+ if (md == null) {
+ logger.trace("recovering [{}], does not exists in local store", fileInfo.physicalName());
+ } else {
+ logger.trace("recovering [{}], exists in local store but is different", fileInfo.physicalName());
+ }
+ }
+ numberOfFiles++;
+ totalSize += fileInfo.length();
+ filesToRecover.add(fileInfo);
+ }
+ }
+
+ recoveryStatus.index().files(numberOfFiles, totalSize, numberOfReusedFiles, reusedTotalSize);
+ if (filesToRecover.isEmpty()) {
+ logger.trace("no files to recover, all exists within the local store");
+ }
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", numberOfFiles, new ByteSizeValue(totalSize), numberOfReusedFiles, new ByteSizeValue(reusedTotalSize));
+ }
+
+ final CountDownLatch latch = new CountDownLatch(filesToRecover.size());
+ final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
+
+ for (final CommitPoint.FileInfo fileToRecover : filesToRecover) {
+ recoverFile(fileToRecover, blobs, latch, failures);
+ }
+
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw new IndexShardGatewayRecoveryException(shardId, "Interrupted while recovering index", e);
+ }
+
+ if (!failures.isEmpty()) {
+ throw new IndexShardGatewayRecoveryException(shardId, "Failed to recover index", failures.get(0));
+ }
+
+ // read the gateway data persisted
+ long version = -1;
+ try {
+ if (Lucene.indexExists(store.directory())) {
+ version = Lucene.readSegmentInfos(store.directory()).getVersion();
+ }
+ } catch (IOException e) {
+ throw new IndexShardGatewayRecoveryException(shardId(), "Failed to fetch index version after copying it over", e);
+ }
+ recoveryStatus.index().updateVersion(version);
+
+ /// now, go over and clean files that are in the store, but were not in the gateway
+ try {
+ for (String storeFile : store.directory().listAll()) {
+ if (!commitPoint.containPhysicalIndexFile(storeFile)) {
+ try {
+ store.directory().deleteFile(storeFile);
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ }
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ private void recoverFile(final CommitPoint.FileInfo fileInfo, final ImmutableMap<String, BlobMetaData> blobs, final CountDownLatch latch, final List<Throwable> failures) {
+ final IndexOutput indexOutput;
+ try {
+ // we create an output with no checksum, this is because the pure binary data of the file is not
+ // the checksum (because of seek). We will create the checksum file once copying is done
+ indexOutput = store.createOutputRaw(fileInfo.physicalName());
+ } catch (IOException e) {
+ failures.add(e);
+ latch.countDown();
+ return;
+ }
+
+ String firstFileToRecover = fileInfo.name();
+ if (!blobs.containsKey(fileInfo.name())) {
+ // chunking, append part0 to it
+ firstFileToRecover = fileInfo.name() + ".part0";
+ }
+ if (!blobs.containsKey(firstFileToRecover)) {
+ // no file, what to do, what to do?
+ logger.warn("no file [{}]/[{}] to recover, ignoring it", fileInfo.name(), fileInfo.physicalName());
+ latch.countDown();
+ return;
+ }
+ final AtomicInteger partIndex = new AtomicInteger();
+
+ blobContainer.readBlob(firstFileToRecover, new BlobContainer.ReadBlobListener() {
+ @Override
+ public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
+ recoveryStatus.index().addCurrentFilesSize(size);
+ indexOutput.writeBytes(data, offset, size);
+ }
+
+ @Override
+ public synchronized void onCompleted() {
+ int part = partIndex.incrementAndGet();
+ String partName = fileInfo.name() + ".part" + part;
+ if (blobs.containsKey(partName)) {
+ // continue with the new part
+ blobContainer.readBlob(partName, this);
+ return;
+ } else {
+ // we are done...
+ try {
+ indexOutput.close();
+ // write the checksum
+ if (fileInfo.checksum() != null) {
+ store.writeChecksum(fileInfo.physicalName(), fileInfo.checksum());
+ }
+ store.directory().sync(Collections.singleton(fileInfo.physicalName()));
+ } catch (IOException e) {
+ onFailure(e);
+ return;
+ }
+ }
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ failures.add(t);
+ latch.countDown();
+ }
+ });
+ }
+
+ private void snapshotTranslog(Translog.Snapshot snapshot, CommitPoint.FileInfo fileInfo) throws IOException {
+ blobContainer.writeBlob(fileInfo.name(), snapshot.stream(), snapshot.lengthInBytes());
+//
+// long chunkBytes = Long.MAX_VALUE;
+// if (chunkSize != null) {
+// chunkBytes = chunkSize.bytes();
+// }
+//
+// long totalLength = fileInfo.length();
+// long numberOfChunks = totalLength / chunkBytes;
+// if (totalLength % chunkBytes > 0) {
+// numberOfChunks++;
+// }
+// if (numberOfChunks == 0) {
+// numberOfChunks++;
+// }
+//
+// if (numberOfChunks == 1) {
+// blobContainer.writeBlob(fileInfo.name(), snapshot.stream(), snapshot.lengthInBytes());
+// } else {
+// InputStream translogStream = snapshot.stream();
+// long totalLengthLeftToWrite = totalLength;
+// for (int i = 0; i < numberOfChunks; i++) {
+// long lengthToWrite = chunkBytes;
+// if (totalLengthLeftToWrite < chunkBytes) {
+// lengthToWrite = totalLengthLeftToWrite;
+// }
+// blobContainer.writeBlob(fileInfo.name() + ".part" + i, new LimitInputStream(translogStream, lengthToWrite), lengthToWrite);
+// totalLengthLeftToWrite -= lengthToWrite;
+// }
+// }
+ }
+
+ private void snapshotFile(Directory dir, final CommitPoint.FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) throws IOException {
+ long chunkBytes = Long.MAX_VALUE;
+ if (chunkSize != null) {
+ chunkBytes = chunkSize.bytes();
+ }
+
+ long totalLength = fileInfo.length();
+ long numberOfChunks = totalLength / chunkBytes;
+ if (totalLength % chunkBytes > 0) {
+ numberOfChunks++;
+ }
+ if (numberOfChunks == 0) {
+ numberOfChunks++;
+ }
+
+ final long fNumberOfChunks = numberOfChunks;
+ final AtomicLong counter = new AtomicLong(numberOfChunks);
+ for (long i = 0; i < fNumberOfChunks; i++) {
+ final long partNumber = i;
+
+ IndexInput indexInput = null;
+ try {
+ // TODO: maybe use IOContext.READONCE?
+ indexInput = indexShard.store().openInputRaw(fileInfo.physicalName(), IOContext.READ);
+ indexInput.seek(partNumber * chunkBytes);
+ InputStreamIndexInput is = new ThreadSafeInputStreamIndexInput(indexInput, chunkBytes);
+
+ String blobName = fileInfo.name();
+ if (fNumberOfChunks > 1) {
+ // if we do chunks, then all of them are in the form of "[xxx].part[N]".
+ blobName += ".part" + partNumber;
+ }
+
+ final IndexInput fIndexInput = indexInput;
+ blobContainer.writeBlob(blobName, is, is.actualSizeToRead(), new ImmutableBlobContainer.WriterListener() {
+ @Override
+ public void onCompleted() {
+ try {
+ fIndexInput.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ if (counter.decrementAndGet() == 0) {
+ latch.countDown();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ try {
+ fIndexInput.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ failures.add(t);
+ if (counter.decrementAndGet() == 0) {
+ latch.countDown();
+ }
+ }
+ });
+ } catch (Exception e) {
+ if (indexInput != null) {
+ try {
+ indexInput.close();
+ } catch (IOException e1) {
+ // ignore
+ }
+ }
+ failures.add(e);
+ latch.countDown();
+ }
+ }
+ }
+
+ private boolean commitPointExistsInBlobs(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs) {
+ for (CommitPoint.FileInfo fileInfo : Iterables.concat(commitPoint.indexFiles(), commitPoint.translogFiles())) {
+ if (!commitPointFileExistsInBlobs(fileInfo, blobs)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private boolean commitPointFileExistsInBlobs(CommitPoint.FileInfo fileInfo, ImmutableMap<String, BlobMetaData> blobs) {
+ BlobMetaData blobMetaData = blobs.get(fileInfo.name());
+ if (blobMetaData != null) {
+ if (blobMetaData.length() != fileInfo.length()) {
+ return false;
+ }
+ } else if (blobs.containsKey(fileInfo.name() + ".part0")) {
+ // multi part file sum up the size and check
+ int part = 0;
+ long totalSize = 0;
+ while (true) {
+ blobMetaData = blobs.get(fileInfo.name() + ".part" + part++);
+ if (blobMetaData == null) {
+ break;
+ }
+ totalSize += blobMetaData.length();
+ }
+ if (totalSize != fileInfo.length()) {
+ return false;
+ }
+ } else {
+ // no file, not exact and not multipart
+ return false;
+ }
+ return true;
+ }
+
+ private CommitPoints buildCommitPoints(ImmutableMap<String, BlobMetaData> blobs) {
+ List<CommitPoint> commitPoints = Lists.newArrayList();
+ for (String name : blobs.keySet()) {
+ if (name.startsWith("commit-")) {
+ try {
+ commitPoints.add(CommitPoints.fromXContent(blobContainer.readBlobFully(name)));
+ } catch (Exception e) {
+ logger.warn("failed to read commit point [{}]", e, name);
+ }
+ }
+ }
+ return new CommitPoints(commitPoints);
+ }
+
+ private String fileNameFromGeneration(long generation) {
+ return "__" + Long.toString(generation, Character.MAX_RADIX);
+ }
+
+ private long findLatestFileNameGeneration(ImmutableMap<String, BlobMetaData> blobs) {
+ long generation = -1;
+ for (String name : blobs.keySet()) {
+ if (!name.startsWith("__")) {
+ continue;
+ }
+ if (name.contains(".part")) {
+ name = name.substring(0, name.indexOf(".part"));
+ }
+
+ try {
+ long currentGen = Long.parseLong(name.substring(2) /*__*/, Character.MAX_RADIX);
+ if (currentGen > generation) {
+ generation = currentGen;
+ }
+ } catch (NumberFormatException e) {
+ logger.warn("file [{}] does not conform to the '__' schema");
+ }
+ }
+ return generation;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGateway.java b/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGateway.java
new file mode 100644
index 0000000..58433cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGateway.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.fs;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.gateway.IndexShardGateway;
+import org.elasticsearch.index.gateway.blobstore.BlobStoreIndexGateway;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class FsIndexGateway extends BlobStoreIndexGateway {
+
+ @Inject
+ public FsIndexGateway(Index index, @IndexSettings Settings indexSettings, Gateway gateway) {
+ super(index, indexSettings, gateway);
+ }
+
+ @Override
+ public String type() {
+ return "fs";
+ }
+
+ @Override
+ public Class<? extends IndexShardGateway> shardGatewayClass() {
+ return FsIndexShardGateway.class;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGatewayModule.java b/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGatewayModule.java
new file mode 100644
index 0000000..9e46f82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGatewayModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.fs;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.gateway.IndexGateway;
+
+/**
+ *
+ */
+public class FsIndexGatewayModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexGateway.class).to(FsIndexGateway.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexShardGateway.java b/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexShardGateway.java
new file mode 100644
index 0000000..03eb534
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/fs/FsIndexShardGateway.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.fs;
+
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.NativeFSLockFactory;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.blobstore.fs.AbstractFsBlobContainer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.gateway.IndexGateway;
+import org.elasticsearch.index.gateway.blobstore.BlobStoreIndexShardGateway;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FsIndexShardGateway extends BlobStoreIndexShardGateway {
+
+ private final boolean snapshotLock;
+
+ @Inject
+ public FsIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, IndexGateway fsIndexGateway,
+ IndexShard indexShard, Store store) {
+ super(shardId, indexSettings, threadPool, fsIndexGateway, indexShard, store);
+ this.snapshotLock = indexSettings.getAsBoolean("gateway.fs.snapshot_lock", true);
+ }
+
+ @Override
+ public String type() {
+ return "fs";
+ }
+
+ @Override
+ public SnapshotLock obtainSnapshotLock() throws Exception {
+ if (!snapshotLock) {
+ return NO_SNAPSHOT_LOCK;
+ }
+ AbstractFsBlobContainer fsBlobContainer = (AbstractFsBlobContainer) blobContainer;
+ NativeFSLockFactory lockFactory = new NativeFSLockFactory(fsBlobContainer.filePath());
+
+ Lock lock = lockFactory.makeLock("snapshot.lock");
+ boolean obtained = lock.obtain();
+ if (!obtained) {
+ throw new ElasticsearchIllegalStateException("failed to obtain snapshot lock [" + lock + "]");
+ }
+ return new FsSnapshotLock(lock);
+ }
+
+ public class FsSnapshotLock implements SnapshotLock {
+ private final Lock lock;
+
+ public FsSnapshotLock(Lock lock) {
+ this.lock = lock;
+ }
+
+ @Override
+ public void release() {
+ try {
+ lock.release();
+ } catch (IOException e) {
+ logger.warn("failed to release snapshot lock [{}]", e, lock);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/local/LocalIndexGateway.java b/src/main/java/org/elasticsearch/index/gateway/local/LocalIndexGateway.java
new file mode 100644
index 0000000..bec0c17
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/local/LocalIndexGateway.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.local;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.gateway.IndexGateway;
+import org.elasticsearch.index.gateway.IndexShardGateway;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class LocalIndexGateway extends AbstractIndexComponent implements IndexGateway {
+
+ @Inject
+ public LocalIndexGateway(Index index, @IndexSettings Settings indexSettings) {
+ super(index, indexSettings);
+ }
+
+ @Override
+ public String type() {
+ return "local";
+ }
+
+ @Override
+ public Class<? extends IndexShardGateway> shardGatewayClass() {
+ return LocalIndexShardGateway.class;
+ }
+
+ @Override
+ public String toString() {
+ return "local";
+ }
+
+ @Override
+ public void close() {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/local/LocalIndexGatewayModule.java b/src/main/java/org/elasticsearch/index/gateway/local/LocalIndexGatewayModule.java
new file mode 100644
index 0000000..64ff78c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/local/LocalIndexGatewayModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.local;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.gateway.IndexGateway;
+
+/**
+ *
+ */
+public class LocalIndexGatewayModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexGateway.class).to(LocalIndexGateway.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/local/LocalIndexShardGateway.java b/src/main/java/org/elasticsearch/index/gateway/local/LocalIndexShardGateway.java
new file mode 100644
index 0000000..5a077ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/local/LocalIndexShardGateway.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.local;
+
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SegmentInfos;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.gateway.IndexShardGateway;
+import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException;
+import org.elasticsearch.index.gateway.RecoveryStatus;
+import org.elasticsearch.index.gateway.SnapshotStatus;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogStreams;
+import org.elasticsearch.index.translog.fs.FsTranslog;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.ScheduledFuture;
+
+/**
+ *
+ */
+public class LocalIndexShardGateway extends AbstractIndexShardComponent implements IndexShardGateway {
+
+ private final ThreadPool threadPool;
+
+ private final InternalIndexShard indexShard;
+
+ private final RecoveryStatus recoveryStatus = new RecoveryStatus();
+
+ private volatile ScheduledFuture flushScheduler;
+ private final TimeValue syncInterval;
+
+ @Inject
+ public LocalIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, IndexShard indexShard) {
+ super(shardId, indexSettings);
+ this.threadPool = threadPool;
+ this.indexShard = (InternalIndexShard) indexShard;
+
+ syncInterval = componentSettings.getAsTime("sync", TimeValue.timeValueSeconds(5));
+ if (syncInterval.millis() > 0) {
+ this.indexShard.translog().syncOnEachOperation(false);
+ flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, new Sync());
+ } else if (syncInterval.millis() == 0) {
+ flushScheduler = null;
+ this.indexShard.translog().syncOnEachOperation(true);
+ } else {
+ flushScheduler = null;
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "local";
+ }
+
+ @Override
+ public RecoveryStatus recoveryStatus() {
+ return recoveryStatus;
+ }
+
+ @Override
+ public void recover(boolean indexShouldExists, RecoveryStatus recoveryStatus) throws IndexShardGatewayRecoveryException {
+ recoveryStatus.index().startTime(System.currentTimeMillis());
+ recoveryStatus.updateStage(RecoveryStatus.Stage.INDEX);
+ long version = -1;
+ long translogId = -1;
+ try {
+ SegmentInfos si = null;
+ try {
+ si = Lucene.readSegmentInfos(indexShard.store().directory());
+ } catch (Throwable e) {
+ String files = "_unknown_";
+ try {
+ files = Arrays.toString(indexShard.store().directory().listAll());
+ } catch (Throwable e1) {
+ files += " (failure=" + ExceptionsHelper.detailedMessage(e1) + ")";
+ }
+ if (indexShouldExists && indexShard.store().indexStore().persistent()) {
+ throw new IndexShardGatewayRecoveryException(shardId(), "shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e);
+ }
+ }
+ if (si != null) {
+ if (indexShouldExists) {
+ version = si.getVersion();
+ if (si.getUserData().containsKey(Translog.TRANSLOG_ID_KEY)) {
+ translogId = Long.parseLong(si.getUserData().get(Translog.TRANSLOG_ID_KEY));
+ } else {
+ translogId = version;
+ }
+ logger.trace("using existing shard data, translog id [{}]", translogId);
+ } else {
+ // it exists on the directory, but shouldn't exist on the FS, its a leftover (possibly dangling)
+ // its a "new index create" API, we have to do something, so better to clean it than use same data
+ logger.trace("cleaning existing shard, shouldn't exists");
+ IndexWriter writer = new IndexWriter(indexShard.store().directory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+ writer.close();
+ }
+ }
+ } catch (Throwable e) {
+ throw new IndexShardGatewayRecoveryException(shardId(), "failed to fetch index version after copying it over", e);
+ }
+ recoveryStatus.index().updateVersion(version);
+ recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
+
+ // since we recover from local, just fill the files and size
+ try {
+ int numberOfFiles = 0;
+ long totalSizeInBytes = 0;
+ for (String name : indexShard.store().directory().listAll()) {
+ numberOfFiles++;
+ totalSizeInBytes += indexShard.store().directory().fileLength(name);
+ }
+ recoveryStatus.index().files(numberOfFiles, totalSizeInBytes, numberOfFiles, totalSizeInBytes);
+ } catch (Exception e) {
+ // ignore
+ }
+
+ recoveryStatus.start().startTime(System.currentTimeMillis());
+ recoveryStatus.updateStage(RecoveryStatus.Stage.START);
+ if (translogId == -1) {
+ // no translog files, bail
+ indexShard.postRecovery("post recovery from gateway, no translog");
+ // no index, just start the shard and bail
+ recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
+ recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
+ return;
+ }
+
+ // move an existing translog, if exists, to "recovering" state, and start reading from it
+ FsTranslog translog = (FsTranslog) indexShard.translog();
+ String translogName = "translog-" + translogId;
+ String recoverTranslogName = translogName + ".recovering";
+
+
+ File recoveringTranslogFile = null;
+ for (File translogLocation : translog.locations()) {
+ File tmpRecoveringFile = new File(translogLocation, recoverTranslogName);
+ if (!tmpRecoveringFile.exists()) {
+ File tmpTranslogFile = new File(translogLocation, translogName);
+ if (tmpTranslogFile.exists()) {
+ for (int i = 0; i < 3; i++) {
+ if (tmpTranslogFile.renameTo(tmpRecoveringFile)) {
+ recoveringTranslogFile = tmpRecoveringFile;
+ break;
+ }
+ }
+ }
+ } else {
+ recoveringTranslogFile = tmpRecoveringFile;
+ break;
+ }
+ }
+
+ if (recoveringTranslogFile == null || !recoveringTranslogFile.exists()) {
+ // no translog to recovery from, start and bail
+ // no translog files, bail
+ indexShard.postRecovery("post recovery from gateway, no translog");
+ // no index, just start the shard and bail
+ recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
+ recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
+ return;
+ }
+
+ // recover from the translog file
+ indexShard.performRecoveryPrepareForTranslog();
+ recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
+ recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
+
+ recoveryStatus.translog().startTime(System.currentTimeMillis());
+ recoveryStatus.updateStage(RecoveryStatus.Stage.TRANSLOG);
+ FileInputStream fs = null;
+ try {
+ fs = new FileInputStream(recoveringTranslogFile);
+ InputStreamStreamInput si = new InputStreamStreamInput(fs);
+ while (true) {
+ Translog.Operation operation;
+ try {
+ int opSize = si.readInt();
+ operation = TranslogStreams.readTranslogOperation(si);
+ } catch (EOFException e) {
+ // ignore, not properly written the last op
+ break;
+ } catch (IOException e) {
+ // ignore, not properly written last op
+ break;
+ }
+ try {
+ indexShard.performRecoveryOperation(operation);
+ recoveryStatus.translog().addTranslogOperations(1);
+ } catch (ElasticsearchException e) {
+ if (e.status() == RestStatus.BAD_REQUEST) {
+ // mainly for MapperParsingException and Failure to detect xcontent
+ logger.info("ignoring recovery of a corrupt translog entry", e);
+ } else {
+ throw e;
+ }
+ }
+ }
+ } catch (Throwable e) {
+ // we failed to recovery, make sure to delete the translog file (and keep the recovering one)
+ indexShard.translog().closeWithDelete();
+ throw new IndexShardGatewayRecoveryException(shardId, "failed to recover shard", e);
+ } finally {
+ try {
+ fs.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ indexShard.performRecoveryFinalization(true);
+
+ recoveringTranslogFile.delete();
+
+ recoveryStatus.translog().time(System.currentTimeMillis() - recoveryStatus.translog().startTime());
+ }
+
+ @Override
+ public String type() {
+ return "local";
+ }
+
+ @Override
+ public SnapshotStatus snapshot(Snapshot snapshot) {
+ return null;
+ }
+
+ @Override
+ public SnapshotStatus lastSnapshotStatus() {
+ return null;
+ }
+
+ @Override
+ public SnapshotStatus currentSnapshotStatus() {
+ return null;
+ }
+
+ @Override
+ public boolean requiresSnapshot() {
+ return false;
+ }
+
+ @Override
+ public boolean requiresSnapshotScheduling() {
+ return false;
+ }
+
+ @Override
+ public void close() {
+ if (flushScheduler != null) {
+ flushScheduler.cancel(false);
+ }
+ }
+
+ @Override
+ public SnapshotLock obtainSnapshotLock() throws Exception {
+ return NO_SNAPSHOT_LOCK;
+ }
+
+ class Sync implements Runnable {
+ @Override
+ public void run() {
+ // don't re-schedule if its closed..., we are done
+ if (indexShard.state() == IndexShardState.CLOSED) {
+ return;
+ }
+ if (indexShard.state() == IndexShardState.STARTED && indexShard.translog().syncNeeded()) {
+ threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ indexShard.translog().sync();
+ } catch (Exception e) {
+ if (indexShard.state() == IndexShardState.STARTED) {
+ logger.warn("failed to sync translog", e);
+ }
+ }
+ if (indexShard.state() != IndexShardState.CLOSED) {
+ flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this);
+ }
+ }
+ });
+ } else {
+ flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGateway.java b/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGateway.java
new file mode 100644
index 0000000..499fbf7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGateway.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.none;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.gateway.IndexGateway;
+import org.elasticsearch.index.gateway.IndexShardGateway;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class NoneIndexGateway extends AbstractIndexComponent implements IndexGateway {
+
+ @Inject
+ public NoneIndexGateway(Index index, @IndexSettings Settings indexSettings) {
+ super(index, indexSettings);
+ }
+
+ @Override
+ public String type() {
+ return "none";
+ }
+
+ @Override
+ public Class<? extends IndexShardGateway> shardGatewayClass() {
+ return NoneIndexShardGateway.class;
+ }
+
+ @Override
+ public String toString() {
+ return "_none_";
+ }
+
+ @Override
+ public void close() {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGatewayModule.java b/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGatewayModule.java
new file mode 100644
index 0000000..ef2b5b5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexGatewayModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.none;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.gateway.IndexGateway;
+
+/**
+ *
+ */
+public class NoneIndexGatewayModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexGateway.class).to(NoneIndexGateway.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexShardGateway.java b/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexShardGateway.java
new file mode 100644
index 0000000..4b38970
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/gateway/none/NoneIndexShardGateway.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway.none;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.none.NoneGateway;
+import org.elasticsearch.index.gateway.IndexShardGateway;
+import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException;
+import org.elasticsearch.index.gateway.RecoveryStatus;
+import org.elasticsearch.index.gateway.SnapshotStatus;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NoneIndexShardGateway extends AbstractIndexShardComponent implements IndexShardGateway {
+
+ private final InternalIndexShard indexShard;
+
+ private final RecoveryStatus recoveryStatus = new RecoveryStatus();
+
+ @Inject
+ public NoneIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, IndexShard indexShard) {
+ super(shardId, indexSettings);
+ this.indexShard = (InternalIndexShard) indexShard;
+ }
+
+ @Override
+ public String toString() {
+ return "_none_";
+ }
+
+ @Override
+ public RecoveryStatus recoveryStatus() {
+ return recoveryStatus;
+ }
+
+ @Override
+ public void recover(boolean indexShouldExists, RecoveryStatus recoveryStatus) throws IndexShardGatewayRecoveryException {
+ recoveryStatus.index().startTime(System.currentTimeMillis());
+ // in the none case, we simply start the shard
+ // clean the store, there should be nothing there...
+ try {
+ logger.debug("cleaning shard content before creation");
+ indexShard.store().deleteContent();
+ } catch (IOException e) {
+ logger.warn("failed to clean store before starting shard", e);
+ }
+ indexShard.postRecovery("post recovery from gateway");
+ recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
+ recoveryStatus.translog().startTime(System.currentTimeMillis());
+ recoveryStatus.translog().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
+ }
+
+ @Override
+ public String type() {
+ return NoneGateway.TYPE;
+ }
+
+ @Override
+ public SnapshotStatus snapshot(Snapshot snapshot) {
+ return null;
+ }
+
+ @Override
+ public SnapshotStatus lastSnapshotStatus() {
+ return null;
+ }
+
+ @Override
+ public SnapshotStatus currentSnapshotStatus() {
+ return null;
+ }
+
+ @Override
+ public boolean requiresSnapshot() {
+ return false;
+ }
+
+ @Override
+ public boolean requiresSnapshotScheduling() {
+ return false;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public SnapshotLock obtainSnapshotLock() throws Exception {
+ return NO_SNAPSHOT_LOCK;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/get/GetField.java b/src/main/java/org/elasticsearch/index/get/GetField.java
new file mode 100644
index 0000000..cba582b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/get/GetField.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.get;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class GetField implements Streamable, Iterable<Object> {
+
+ private String name;
+ private List<Object> values;
+
+ private GetField() {
+ }
+
+ public GetField(String name, List<Object> values) {
+ this.name = name;
+ this.values = values;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public Object getValue() {
+ if (values != null && !values.isEmpty()) {
+ return values.get(0);
+ }
+ return null;
+ }
+
+ public List<Object> getValues() {
+ return values;
+ }
+
+ public boolean isMetadataField() {
+ return MapperService.isMetadataField(name);
+ }
+
+ @Override
+ public Iterator<Object> iterator() {
+ return values.iterator();
+ }
+
+ public static GetField readGetField(StreamInput in) throws IOException {
+ GetField result = new GetField();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ int size = in.readVInt();
+ values = new ArrayList<Object>(size);
+ for (int i = 0; i < size; i++) {
+ values.add(in.readGenericValue());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVInt(values.size());
+ for (Object obj : values) {
+ out.writeGenericValue(obj);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/get/GetResult.java b/src/main/java/org/elasticsearch/index/get/GetResult.java
new file mode 100644
index 0000000..6d07429
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/get/GetResult.java
@@ -0,0 +1,313 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.get;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+import org.elasticsearch.search.lookup.SourceLookup;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+
+import static com.google.common.collect.Iterators.emptyIterator;
+import static com.google.common.collect.Maps.newHashMapWithExpectedSize;
+import static org.elasticsearch.index.get.GetField.readGetField;
+
+/**
+ */
+public class GetResult implements Streamable, Iterable<GetField>, ToXContent {
+
+ private String index;
+ private String type;
+ private String id;
+ private long version;
+ private boolean exists;
+ private Map<String, GetField> fields;
+ private Map<String, Object> sourceAsMap;
+ private BytesReference source;
+ private byte[] sourceAsBytes;
+
+ GetResult() {
+ }
+
+ public GetResult(String index, String type, String id, long version, boolean exists, BytesReference source, Map<String, GetField> fields) {
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ this.version = version;
+ this.exists = exists;
+ this.source = source;
+ this.fields = fields;
+ if (this.fields == null) {
+ this.fields = ImmutableMap.of();
+ }
+ }
+
+ /**
+ * Does the document exists.
+ */
+ public boolean isExists() {
+ return exists;
+ }
+
+ /**
+ * The index the document was fetched from.
+ */
+ public String getIndex() {
+ return index;
+ }
+
+ /**
+ * The type of the document.
+ */
+ public String getType() {
+ return type;
+ }
+
+ /**
+ * The id of the document.
+ */
+ public String getId() {
+ return id;
+ }
+
+ /**
+ * The version of the doc.
+ */
+ public long getVersion() {
+ return version;
+ }
+
+ /**
+ * The source of the document if exists.
+ */
+ public byte[] source() {
+ if (source == null) {
+ return null;
+ }
+ if (sourceAsBytes != null) {
+ return sourceAsBytes;
+ }
+ this.sourceAsBytes = sourceRef().toBytes();
+ return this.sourceAsBytes;
+ }
+
+ /**
+ * Returns bytes reference, also un compress the source if needed.
+ */
+ public BytesReference sourceRef() {
+ try {
+ this.source = CompressorFactory.uncompressIfNeeded(this.source);
+ return this.source;
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to decompress source", e);
+ }
+ }
+
+ /**
+ * Internal source representation, might be compressed....
+ */
+ public BytesReference internalSourceRef() {
+ return source;
+ }
+
+ /**
+ * Is the source empty (not available) or not.
+ */
+ public boolean isSourceEmpty() {
+ return source == null;
+ }
+
+ /**
+ * The source of the document (as a string).
+ */
+ public String sourceAsString() {
+ if (source == null) {
+ return null;
+ }
+ BytesReference source = sourceRef();
+ try {
+ return XContentHelper.convertToJson(source, false);
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to convert source to a json string");
+ }
+ }
+
+ /**
+ * The source of the document (As a map).
+ */
+ @SuppressWarnings({"unchecked"})
+ public Map<String, Object> sourceAsMap() throws ElasticsearchParseException {
+ if (source == null) {
+ return null;
+ }
+ if (sourceAsMap != null) {
+ return sourceAsMap;
+ }
+
+ sourceAsMap = SourceLookup.sourceAsMap(source);
+ return sourceAsMap;
+ }
+
+ public Map<String, Object> getSource() {
+ return sourceAsMap();
+ }
+
+ public Map<String, GetField> getFields() {
+ return fields;
+ }
+
+ public GetField field(String name) {
+ return fields.get(name);
+ }
+
+ @Override
+ public Iterator<GetField> iterator() {
+ if (fields == null) {
+ return emptyIterator();
+ }
+ return fields.values().iterator();
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString _VERSION = new XContentBuilderString("_version");
+ static final XContentBuilderString FOUND = new XContentBuilderString("found");
+ static final XContentBuilderString FIELDS = new XContentBuilderString("fields");
+ }
+
+ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.FOUND, exists);
+
+ if (source != null) {
+ RestXContentBuilder.restDocumentSource(source, builder, params);
+ }
+
+ if (fields != null && !fields.isEmpty()) {
+ builder.startObject(Fields.FIELDS);
+ for (GetField field : fields.values()) {
+ if (field.getValues().isEmpty()) {
+ continue;
+ }
+ String fieldName = field.getName();
+ if (field.isMetadataField()) {
+ builder.field(fieldName, field.getValue());
+ } else {
+ builder.startArray(field.getName());
+ for (Object value : field.getValues()) {
+ builder.value(value);
+ }
+ builder.endArray();
+ }
+ }
+ builder.endObject();
+ }
+ return builder;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (!isExists()) {
+ builder.startObject();
+ builder.field(Fields._INDEX, index);
+ builder.field(Fields._TYPE, type);
+ builder.field(Fields._ID, id);
+ builder.field(Fields.FOUND, false);
+ builder.endObject();
+ } else {
+ builder.startObject();
+ builder.field(Fields._INDEX, index);
+ builder.field(Fields._TYPE, type);
+ builder.field(Fields._ID, id);
+ if (version != -1) {
+ builder.field(Fields._VERSION, version);
+ }
+ toXContentEmbedded(builder, params);
+
+ builder.endObject();
+ }
+ return builder;
+ }
+
+ public static GetResult readGetResult(StreamInput in) throws IOException {
+ GetResult result = new GetResult();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ index = in.readSharedString();
+ type = in.readOptionalSharedString();
+ id = in.readString();
+ version = in.readLong();
+ exists = in.readBoolean();
+ if (exists) {
+ source = in.readBytesReference();
+ if (source.length() == 0) {
+ source = null;
+ }
+ int size = in.readVInt();
+ if (size == 0) {
+ fields = ImmutableMap.of();
+ } else {
+ fields = newHashMapWithExpectedSize(size);
+ for (int i = 0; i < size; i++) {
+ GetField field = readGetField(in);
+ fields.put(field.getName(), field);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeSharedString(index);
+ out.writeOptionalSharedString(type);
+ out.writeString(id);
+ out.writeLong(version);
+ out.writeBoolean(exists);
+ if (exists) {
+ out.writeBytesReference(source);
+ if (fields == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(fields.size());
+ for (GetField field : fields.values()) {
+ field.writeTo(out);
+ }
+ }
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/index/get/GetStats.java b/src/main/java/org/elasticsearch/index/get/GetStats.java
new file mode 100644
index 0000000..fa7730c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/get/GetStats.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.get;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ */
+public class GetStats implements Streamable, ToXContent {
+
+ private long existsCount;
+ private long existsTimeInMillis;
+ private long missingCount;
+ private long missingTimeInMillis;
+ private long current;
+
+ public GetStats() {
+ }
+
+ public GetStats(long existsCount, long existsTimeInMillis, long missingCount, long missingTimeInMillis, long current) {
+ this.existsCount = existsCount;
+ this.existsTimeInMillis = existsTimeInMillis;
+ this.missingCount = missingCount;
+ this.missingTimeInMillis = missingTimeInMillis;
+ this.current = current;
+ }
+
+ public void add(GetStats stats) {
+ if (stats == null) {
+ return;
+ }
+ existsCount += stats.existsCount;
+ existsTimeInMillis += stats.existsTimeInMillis;
+ missingCount += stats.missingCount;
+ missingTimeInMillis += stats.missingTimeInMillis;
+ current += stats.current;
+ }
+
+ public long getCount() {
+ return existsCount + missingCount;
+ }
+
+ public long getTimeInMillis() {
+ return existsTimeInMillis + missingTimeInMillis;
+ }
+
+ public TimeValue getTime() {
+ return new TimeValue(getTimeInMillis());
+ }
+
+ public long getExistsCount() {
+ return this.existsCount;
+ }
+
+ public long getExistsTimeInMillis() {
+ return this.existsTimeInMillis;
+ }
+
+ public TimeValue getExistsTime() {
+ return new TimeValue(existsTimeInMillis);
+ }
+
+ public long getMissingCount() {
+ return this.missingCount;
+ }
+
+ public long getMissingTimeInMillis() {
+ return this.missingTimeInMillis;
+ }
+
+ public TimeValue getMissingTime() {
+ return new TimeValue(missingTimeInMillis);
+ }
+
+ public long current() {
+ return this.current;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.GET);
+ builder.field(Fields.TOTAL, getCount());
+ builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, getTimeInMillis());
+ builder.field(Fields.EXISTS_TOTAL, existsCount);
+ builder.timeValueField(Fields.EXISTS_TIME_IN_MILLIS, Fields.EXISTS_TIME, existsTimeInMillis);
+ builder.field(Fields.MISSING_TOTAL, missingCount);
+ builder.timeValueField(Fields.MISSING_TIME_IN_MILLIS, Fields.MISSING_TIME, missingTimeInMillis);
+ builder.field(Fields.CURRENT, current);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString GET = new XContentBuilderString("get");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TIME = new XContentBuilderString("getTime");
+ static final XContentBuilderString TIME_IN_MILLIS = new XContentBuilderString("time_in_millis");
+ static final XContentBuilderString EXISTS_TOTAL = new XContentBuilderString("exists_total");
+ static final XContentBuilderString EXISTS_TIME = new XContentBuilderString("exists_time");
+ static final XContentBuilderString EXISTS_TIME_IN_MILLIS = new XContentBuilderString("exists_time_in_millis");
+ static final XContentBuilderString MISSING_TOTAL = new XContentBuilderString("missing_total");
+ static final XContentBuilderString MISSING_TIME = new XContentBuilderString("missing_time");
+ static final XContentBuilderString MISSING_TIME_IN_MILLIS = new XContentBuilderString("missing_time_in_millis");
+ static final XContentBuilderString CURRENT = new XContentBuilderString("current");
+ }
+
+ public static GetStats readGetStats(StreamInput in) throws IOException {
+ GetStats stats = new GetStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ existsCount = in.readVLong();
+ existsTimeInMillis = in.readVLong();
+ missingCount = in.readVLong();
+ missingTimeInMillis = in.readVLong();
+ current = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(existsCount);
+ out.writeVLong(existsTimeInMillis);
+ out.writeVLong(missingCount);
+ out.writeVLong(missingTimeInMillis);
+ out.writeVLong(current);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/get/ShardGetModule.java b/src/main/java/org/elasticsearch/index/get/ShardGetModule.java
new file mode 100644
index 0000000..bc1df27
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/get/ShardGetModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.get;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ */
+public class ShardGetModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(ShardGetService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/src/main/java/org/elasticsearch/index/get/ShardGetService.java
new file mode 100644
index 0000000..56bbd60
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/get/ShardGetService.java
@@ -0,0 +1,408 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.get;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Sets;
+import org.apache.lucene.index.Term;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.metrics.MeanMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
+import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
+import org.elasticsearch.index.fieldvisitor.JustSourceFieldsVisitor;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.internal.*;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.lookup.SourceLookup;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.collect.Maps.newHashMapWithExpectedSize;
+
+/**
+ */
+public class ShardGetService extends AbstractIndexShardComponent {
+
+ private final ScriptService scriptService;
+
+ private final MapperService mapperService;
+
+ private final IndexFieldDataService fieldDataService;
+
+ private IndexShard indexShard;
+
+ private final MeanMetric existsMetric = new MeanMetric();
+ private final MeanMetric missingMetric = new MeanMetric();
+ private final CounterMetric currentMetric = new CounterMetric();
+
+ @Inject
+ public ShardGetService(ShardId shardId, @IndexSettings Settings indexSettings, ScriptService scriptService,
+ MapperService mapperService, IndexFieldDataService fieldDataService) {
+ super(shardId, indexSettings);
+ this.scriptService = scriptService;
+ this.mapperService = mapperService;
+ this.fieldDataService = fieldDataService;
+ }
+
+ public GetStats stats() {
+ return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
+ }
+
+ // sadly, to overcome cyclic dep, we need to do this and inject it ourselves...
+ public ShardGetService setIndexShard(IndexShard indexShard) {
+ this.indexShard = indexShard;
+ return this;
+ }
+
+ public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext)
+ throws ElasticsearchException {
+ currentMetric.inc();
+ try {
+ long now = System.nanoTime();
+ GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext);
+
+ if (getResult.isExists()) {
+ existsMetric.inc(System.nanoTime() - now);
+ } else {
+ missingMetric.inc(System.nanoTime() - now);
+ }
+ return getResult;
+ } finally {
+ currentMetric.dec();
+ }
+ }
+
+ /**
+ * Returns {@link GetResult} based on the specified {@link Engine.GetResult} argument.
+ * This method basically loads specified fields for the associated document in the engineGetResult.
+ * This method load the fields from the Lucene index and not from transaction log and therefore isn't realtime.
+ * <p/>
+ * Note: Call <b>must</b> release engine searcher associated with engineGetResult!
+ */
+ public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) {
+ if (!engineGetResult.exists()) {
+ return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
+ }
+
+ currentMetric.inc();
+ try {
+ long now = System.nanoTime();
+ DocumentMapper docMapper = mapperService.documentMapper(type);
+ if (docMapper == null) {
+ missingMetric.inc(System.nanoTime() - now);
+ return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
+ }
+ fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, fields);
+ GetResult getResult = innerGetLoadFromStoredFields(type, id, fields, fetchSourceContext, engineGetResult, docMapper);
+ if (getResult.isExists()) {
+ existsMetric.inc(System.nanoTime() - now);
+ } else {
+ missingMetric.inc(System.nanoTime() - now); // This shouldn't happen...
+ }
+ return getResult;
+ } finally {
+ currentMetric.dec();
+ }
+ }
+
+ /**
+ * decides what needs to be done based on the request input and always returns a valid non-null FetchSourceContext
+ */
+ protected FetchSourceContext normalizeFetchSourceContent(@Nullable FetchSourceContext context, @Nullable String[] gFields) {
+ if (context != null) {
+ return context;
+ }
+ if (gFields == null) {
+ return FetchSourceContext.FETCH_SOURCE;
+ }
+ for (String field : gFields) {
+ if (SourceFieldMapper.NAME.equals(field)) {
+ return FetchSourceContext.FETCH_SOURCE;
+ }
+ }
+ return FetchSourceContext.DO_NOT_FETCH_SOURCE;
+ }
+
+ public GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) throws ElasticsearchException {
+ fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields);
+
+ boolean loadSource = (gFields != null && gFields.length > 0) || fetchSourceContext.fetchSource();
+
+ Engine.GetResult get = null;
+ if (type == null || type.equals("_all")) {
+ for (String typeX : mapperService.types()) {
+ get = indexShard.get(new Engine.Get(realtime, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(typeX, id)))
+ .loadSource(loadSource).version(version).versionType(versionType));
+ if (get.exists()) {
+ type = typeX;
+ break;
+ } else {
+ get.release();
+ }
+ }
+ if (get == null) {
+ return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
+ }
+ if (!get.exists()) {
+ // no need to release here as well..., we release in the for loop for non exists
+ return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
+ }
+ } else {
+ get = indexShard.get(new Engine.Get(realtime, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(type, id)))
+ .loadSource(loadSource).version(version).versionType(versionType));
+ if (!get.exists()) {
+ get.release();
+ return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
+ }
+ }
+
+ DocumentMapper docMapper = mapperService.documentMapper(type);
+ if (docMapper == null) {
+ get.release();
+ return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
+ }
+
+ try {
+ // break between having loaded it from translog (so we only have _source), and having a document to load
+ if (get.docIdAndVersion() != null) {
+ return innerGetLoadFromStoredFields(type, id, gFields, fetchSourceContext, get, docMapper);
+ } else {
+ Translog.Source source = get.source();
+
+ Map<String, GetField> fields = null;
+ SearchLookup searchLookup = null;
+
+ // we can only load scripts that can run against the source
+ if (gFields != null && gFields.length > 0) {
+ Map<String, Object> sourceAsMap = null;
+ for (String field : gFields) {
+ if (SourceFieldMapper.NAME.equals(field)) {
+ // dealt with when normalizing fetchSourceContext.
+ continue;
+ }
+ Object value = null;
+ if (field.equals(RoutingFieldMapper.NAME) && docMapper.routingFieldMapper().fieldType().stored()) {
+ value = source.routing;
+ } else if (field.equals(ParentFieldMapper.NAME) && docMapper.parentFieldMapper().active() && docMapper.parentFieldMapper().fieldType().stored()) {
+ value = source.parent;
+ } else if (field.equals(TimestampFieldMapper.NAME) && docMapper.timestampFieldMapper().fieldType().stored()) {
+ value = source.timestamp;
+ } else if (field.equals(TTLFieldMapper.NAME) && docMapper.TTLFieldMapper().fieldType().stored()) {
+ // Call value for search with timestamp + ttl here to display the live remaining ttl value and be consistent with the search result display
+ if (source.ttl > 0) {
+ value = docMapper.TTLFieldMapper().valueForSearch(source.timestamp + source.ttl);
+ }
+ } else if (field.equals(SizeFieldMapper.NAME) && docMapper.rootMapper(SizeFieldMapper.class).fieldType().stored()) {
+ value = source.source.length();
+ } else {
+ if (searchLookup == null) {
+ searchLookup = new SearchLookup(mapperService, fieldDataService, new String[]{type});
+ searchLookup.source().setNextSource(source.source);
+ }
+
+ FieldMapper<?> x = docMapper.mappers().smartNameFieldMapper(field);
+ if (x == null) {
+ if (docMapper.objectMappers().get(field) != null) {
+ // Only fail if we know it is a object field, missing paths / fields shouldn't fail.
+ throw new ElasticsearchIllegalArgumentException("field [" + field + "] isn't a leaf field");
+ }
+ } else if (docMapper.sourceMapper().enabled() || x.fieldType().stored()) {
+ List<Object> values = searchLookup.source().extractRawValues(field);
+ if (!values.isEmpty()) {
+ for (int i = 0; i < values.size(); i++) {
+ values.set(i, x.valueForSearch(values.get(i)));
+ }
+ value = values;
+ }
+ }
+ }
+ if (value != null) {
+ if (fields == null) {
+ fields = newHashMapWithExpectedSize(2);
+ }
+ if (value instanceof List) {
+ fields.put(field, new GetField(field, (List) value));
+ } else {
+ fields.put(field, new GetField(field, ImmutableList.of(value)));
+ }
+ }
+ }
+ }
+
+ // deal with source, but only if it's enabled (we always have it from the translog)
+ BytesReference sourceToBeReturned = null;
+ SourceFieldMapper sourceFieldMapper = docMapper.sourceMapper();
+ if (fetchSourceContext.fetchSource() && sourceFieldMapper.enabled()) {
+
+ sourceToBeReturned = source.source;
+
+ // Cater for source excludes/includes at the cost of performance
+ // We must first apply the field mapper filtering to make sure we get correct results
+ // in the case that the fetchSourceContext white lists something that's not included by the field mapper
+
+ Map<String, Object> filteredSource = null;
+ XContentType sourceContentType = null;
+ if (sourceFieldMapper.includes().length > 0 || sourceFieldMapper.excludes().length > 0) {
+ // TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
+ Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source.source, true);
+ sourceContentType = typeMapTuple.v1();
+ filteredSource = XContentMapValues.filter(typeMapTuple.v2(), sourceFieldMapper.includes(), sourceFieldMapper.excludes());
+ }
+ if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
+ if (filteredSource == null) {
+ Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source.source, true);
+ sourceContentType = typeMapTuple.v1();
+ filteredSource = typeMapTuple.v2();
+ }
+ filteredSource = XContentMapValues.filter(filteredSource, fetchSourceContext.includes(), fetchSourceContext.excludes());
+ }
+ if (filteredSource != null) {
+ try {
+ sourceToBeReturned = XContentFactory.contentBuilder(sourceContentType).map(filteredSource).bytes();
+ } catch (IOException e) {
+ throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
+ }
+ }
+ }
+
+ return new GetResult(shardId.index().name(), type, id, get.version(), get.exists(), sourceToBeReturned, fields);
+ }
+ } finally {
+ get.release();
+ }
+ }
+
+ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, DocumentMapper docMapper) {
+ Map<String, GetField> fields = null;
+ BytesReference source = null;
+ Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
+ FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext);
+ if (fieldVisitor != null) {
+ try {
+ docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor);
+ } catch (IOException e) {
+ throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e);
+ }
+ source = fieldVisitor.source();
+
+ if (!fieldVisitor.fields().isEmpty()) {
+ fieldVisitor.postProcess(docMapper);
+ fields = new HashMap<String, GetField>(fieldVisitor.fields().size());
+ for (Map.Entry<String, List<Object>> entry : fieldVisitor.fields().entrySet()) {
+ fields.put(entry.getKey(), new GetField(entry.getKey(), entry.getValue()));
+ }
+ }
+ }
+
+ // now, go and do the script thingy if needed
+ if (gFields != null && gFields.length > 0) {
+ SearchLookup searchLookup = null;
+ for (String field : gFields) {
+ Object value = null;
+ FieldMappers x = docMapper.mappers().smartName(field);
+ if (x == null) {
+ if (docMapper.objectMappers().get(field) != null) {
+ // Only fail if we know it is a object field, missing paths / fields shouldn't fail.
+ throw new ElasticsearchIllegalArgumentException("field [" + field + "] isn't a leaf field");
+ }
+ } else if (!x.mapper().fieldType().stored()) {
+ if (searchLookup == null) {
+ searchLookup = new SearchLookup(mapperService, fieldDataService, new String[]{type});
+ searchLookup.setNextReader(docIdAndVersion.context);
+ searchLookup.source().setNextSource(source);
+ searchLookup.setNextDocId(docIdAndVersion.docId);
+ }
+
+ List<Object> values = searchLookup.source().extractRawValues(field);
+ if (!values.isEmpty()) {
+ for (int i = 0; i < values.size(); i++) {
+ values.set(i, x.mapper().valueForSearch(values.get(i)));
+ }
+ value = values;
+ }
+ }
+
+ if (value != null) {
+ if (fields == null) {
+ fields = newHashMapWithExpectedSize(2);
+ }
+ if (value instanceof List) {
+ fields.put(field, new GetField(field, (List) value));
+ } else {
+ fields.put(field, new GetField(field, ImmutableList.of(value)));
+ }
+ }
+ }
+ }
+
+ if (!fetchSourceContext.fetchSource()) {
+ source = null;
+ } else if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
+ Map<String, Object> filteredSource;
+ XContentType sourceContentType = null;
+ // TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
+ Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source, true);
+ sourceContentType = typeMapTuple.v1();
+ filteredSource = XContentMapValues.filter(typeMapTuple.v2(), fetchSourceContext.includes(), fetchSourceContext.excludes());
+ try {
+ source = XContentFactory.contentBuilder(sourceContentType).map(filteredSource).bytes();
+ } catch (IOException e) {
+ throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
+ }
+ }
+
+ return new GetResult(shardId.index().name(), type, id, get.version(), get.exists(), source, fields);
+ }
+
+ private static FieldsVisitor buildFieldsVisitors(String[] fields, FetchSourceContext fetchSourceContext) {
+ if (fields == null || fields.length == 0) {
+ return fetchSourceContext.fetchSource() ? new JustSourceFieldsVisitor() : null;
+ }
+
+ return new CustomFieldsVisitor(Sets.newHashSet(fields), fetchSourceContext.fetchSource());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java b/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java
new file mode 100644
index 0000000..29bfe58
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.indexing;
+
+import org.elasticsearch.index.engine.Engine;
+
+/**
+ * An indexing listener for indexing, delete, events.
+ */
+public abstract class IndexingOperationListener {
+
+ /**
+ * Called before the indexing occurs.
+ */
+ public Engine.Create preCreate(Engine.Create create) {
+ return create;
+ }
+
+ /**
+ * Called after the indexing occurs, under a locking scheme to maintain
+ * concurrent updates to the same doc.
+ * <p/>
+ * Note, long operations should not occur under this callback.
+ */
+ public void postCreateUnderLock(Engine.Create create) {
+
+ }
+
+ /**
+ * Called after the indexing operation occurred.
+ */
+ public void postCreate(Engine.Create create) {
+
+ }
+
+ /**
+ * Called before the indexing occurs.
+ */
+ public Engine.Index preIndex(Engine.Index index) {
+ return index;
+ }
+
+ /**
+ * Called after the indexing occurs, under a locking scheme to maintain
+ * concurrent updates to the same doc.
+ * <p/>
+ * Note, long operations should not occur under this callback.
+ */
+ public void postIndexUnderLock(Engine.Index index) {
+
+ }
+
+ /**
+ * Called after the indexing operation occurred.
+ */
+ public void postIndex(Engine.Index index) {
+
+ }
+
+ /**
+ * Called before the delete occurs.
+ */
+ public Engine.Delete preDelete(Engine.Delete delete) {
+ return delete;
+ }
+
+ /**
+ * Called after the delete occurs, under a locking scheme to maintain
+ * concurrent updates to the same doc.
+ * <p/>
+ * Note, long operations should not occur under this callback.
+ */
+ public void postDeleteUnderLock(Engine.Delete delete) {
+
+ }
+
+ /**
+ * Called after the delete operation occurred.
+ */
+ public void postDelete(Engine.Delete delete) {
+
+ }
+
+ public Engine.DeleteByQuery preDeleteByQuery(Engine.DeleteByQuery deleteByQuery) {
+ return deleteByQuery;
+ }
+
+ public void postDeleteByQuery(Engine.DeleteByQuery deleteByQuery) {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/indexing/IndexingStats.java b/src/main/java/org/elasticsearch/index/indexing/IndexingStats.java
new file mode 100644
index 0000000..81a639f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/indexing/IndexingStats.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.indexing;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ */
+public class IndexingStats implements Streamable, ToXContent {
+
+ public static class Stats implements Streamable, ToXContent {
+
+ private long indexCount;
+ private long indexTimeInMillis;
+ private long indexCurrent;
+
+ private long deleteCount;
+ private long deleteTimeInMillis;
+ private long deleteCurrent;
+
+ Stats() {
+
+ }
+
+ public Stats(long indexCount, long indexTimeInMillis, long indexCurrent, long deleteCount, long deleteTimeInMillis, long deleteCurrent) {
+ this.indexCount = indexCount;
+ this.indexTimeInMillis = indexTimeInMillis;
+ this.indexCurrent = indexCurrent;
+ this.deleteCount = deleteCount;
+ this.deleteTimeInMillis = deleteTimeInMillis;
+ this.deleteCurrent = deleteCurrent;
+ }
+
+ public void add(Stats stats) {
+ indexCount += stats.indexCount;
+ indexTimeInMillis += stats.indexTimeInMillis;
+ indexCurrent += stats.indexCurrent;
+
+ deleteCount += stats.deleteCount;
+ deleteTimeInMillis += stats.deleteTimeInMillis;
+ deleteCurrent += stats.deleteCurrent;
+ }
+
+ public long getIndexCount() {
+ return indexCount;
+ }
+
+ public TimeValue getIndexTime() {
+ return new TimeValue(indexTimeInMillis);
+ }
+
+ public long getIndexTimeInMillis() {
+ return indexTimeInMillis;
+ }
+
+ public long getIndexCurrent() {
+ return indexCurrent;
+ }
+
+ public long getDeleteCount() {
+ return deleteCount;
+ }
+
+ public TimeValue getDeleteTime() {
+ return new TimeValue(deleteTimeInMillis);
+ }
+
+ public long getDeleteTimeInMillis() {
+ return deleteTimeInMillis;
+ }
+
+ public long getDeleteCurrent() {
+ return deleteCurrent;
+ }
+
+ public static Stats readStats(StreamInput in) throws IOException {
+ Stats stats = new Stats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ indexCount = in.readVLong();
+ indexTimeInMillis = in.readVLong();
+ indexCurrent = in.readVLong();
+
+ deleteCount = in.readVLong();
+ deleteTimeInMillis = in.readVLong();
+ deleteCurrent = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(indexCount);
+ out.writeVLong(indexTimeInMillis);
+ out.writeVLong(indexCurrent);
+
+ out.writeVLong(deleteCount);
+ out.writeVLong(deleteTimeInMillis);
+ out.writeVLong(deleteCurrent);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.INDEX_TOTAL, indexCount);
+ builder.timeValueField(Fields.INDEX_TIME_IN_MILLIS, Fields.INDEX_TIME, indexTimeInMillis);
+ builder.field(Fields.INDEX_CURRENT, indexCurrent);
+
+ builder.field(Fields.DELETE_TOTAL, deleteCount);
+ builder.timeValueField(Fields.DELETE_TIME_IN_MILLIS, Fields.DELETE_TIME, deleteTimeInMillis);
+ builder.field(Fields.DELETE_CURRENT, deleteCurrent);
+
+ return builder;
+ }
+ }
+
+ private Stats totalStats;
+
+ @Nullable
+ private Map<String, Stats> typeStats;
+
+ public IndexingStats() {
+ totalStats = new Stats();
+ }
+
+ public IndexingStats(Stats totalStats, @Nullable Map<String, Stats> typeStats) {
+ this.totalStats = totalStats;
+ this.typeStats = typeStats;
+ }
+
+ public void add(IndexingStats indexingStats) {
+ add(indexingStats, true);
+ }
+
+ public void add(IndexingStats indexingStats, boolean includeTypes) {
+ if (indexingStats == null) {
+ return;
+ }
+ totalStats.add(indexingStats.totalStats);
+ if (includeTypes && indexingStats.typeStats != null && !indexingStats.typeStats.isEmpty()) {
+ if (typeStats == null) {
+ typeStats = new HashMap<String, Stats>(indexingStats.typeStats.size());
+ }
+ for (Map.Entry<String, Stats> entry : indexingStats.typeStats.entrySet()) {
+ Stats stats = typeStats.get(entry.getKey());
+ if (stats == null) {
+ typeStats.put(entry.getKey(), entry.getValue());
+ } else {
+ stats.add(entry.getValue());
+ }
+ }
+ }
+ }
+
+ public Stats getTotal() {
+ return this.totalStats;
+ }
+
+ @Nullable
+ public Map<String, Stats> getTypeStats() {
+ return this.typeStats;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject(Fields.INDEXING);
+ totalStats.toXContent(builder, params);
+ if (typeStats != null && !typeStats.isEmpty()) {
+ builder.startObject(Fields.TYPES);
+ for (Map.Entry<String, Stats> entry : typeStats.entrySet()) {
+ builder.startObject(entry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
+ entry.getValue().toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString INDEXING = new XContentBuilderString("indexing");
+ static final XContentBuilderString TYPES = new XContentBuilderString("types");
+ static final XContentBuilderString INDEX_TOTAL = new XContentBuilderString("index_total");
+ static final XContentBuilderString INDEX_TIME = new XContentBuilderString("index_time");
+ static final XContentBuilderString INDEX_TIME_IN_MILLIS = new XContentBuilderString("index_time_in_millis");
+ static final XContentBuilderString INDEX_CURRENT = new XContentBuilderString("index_current");
+ static final XContentBuilderString DELETE_TOTAL = new XContentBuilderString("delete_total");
+ static final XContentBuilderString DELETE_TIME = new XContentBuilderString("delete_time");
+ static final XContentBuilderString DELETE_TIME_IN_MILLIS = new XContentBuilderString("delete_time_in_millis");
+ static final XContentBuilderString DELETE_CURRENT = new XContentBuilderString("delete_current");
+ }
+
+ public static IndexingStats readIndexingStats(StreamInput in) throws IOException {
+ IndexingStats indexingStats = new IndexingStats();
+ indexingStats.readFrom(in);
+ return indexingStats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ totalStats = Stats.readStats(in);
+ if (in.readBoolean()) {
+ int size = in.readVInt();
+ typeStats = new HashMap<String, Stats>(size);
+ for (int i = 0; i < size; i++) {
+ typeStats.put(in.readString(), Stats.readStats(in));
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ totalStats.writeTo(out);
+ if (typeStats == null || typeStats.isEmpty()) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(typeStats.size());
+ for (Map.Entry<String, Stats> entry : typeStats.entrySet()) {
+ out.writeString(entry.getKey());
+ entry.getValue().writeTo(out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/indexing/ShardIndexingModule.java b/src/main/java/org/elasticsearch/index/indexing/ShardIndexingModule.java
new file mode 100644
index 0000000..4d5e29d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/indexing/ShardIndexingModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.indexing;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
+
+/**
+ */
+public class ShardIndexingModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(ShardIndexingService.class).asEagerSingleton();
+ bind(ShardSlowLogIndexingService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java b/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java
new file mode 100644
index 0000000..18258c1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.indexing;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.metrics.MeanMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class ShardIndexingService extends AbstractIndexShardComponent {
+
+ private final ShardSlowLogIndexingService slowLog;
+
+ private final StatsHolder totalStats = new StatsHolder();
+
+ private final CopyOnWriteArrayList<IndexingOperationListener> listeners = new CopyOnWriteArrayList<IndexingOperationListener>();
+
+ private volatile Map<String, StatsHolder> typesStats = ImmutableMap.of();
+
+ @Inject
+ public ShardIndexingService(ShardId shardId, @IndexSettings Settings indexSettings, ShardSlowLogIndexingService slowLog) {
+ super(shardId, indexSettings);
+ this.slowLog = slowLog;
+ }
+
+ /**
+ * Returns the stats, including type specific stats. If the types are null/0 length, then nothing
+ * is returned for them. If they are set, then only types provided will be returned, or
+ * <tt>_all</tt> for all types.
+ */
+ public IndexingStats stats(String... types) {
+ IndexingStats.Stats total = totalStats.stats();
+ Map<String, IndexingStats.Stats> typesSt = null;
+ if (types != null && types.length > 0) {
+ if (types.length == 1 && types[0].equals("_all")) {
+ typesSt = new HashMap<String, IndexingStats.Stats>(typesStats.size());
+ for (Map.Entry<String, StatsHolder> entry : typesStats.entrySet()) {
+ typesSt.put(entry.getKey(), entry.getValue().stats());
+ }
+ } else {
+ typesSt = new HashMap<String, IndexingStats.Stats>(types.length);
+ for (String type : types) {
+ StatsHolder statsHolder = typesStats.get(type);
+ if (statsHolder != null) {
+ typesSt.put(type, statsHolder.stats());
+ }
+ }
+ }
+ }
+ return new IndexingStats(total, typesSt);
+ }
+
+ public void addListener(IndexingOperationListener listener) {
+ listeners.add(listener);
+ }
+
+ public void removeListener(IndexingOperationListener listener) {
+ listeners.remove(listener);
+ }
+
+ public Engine.Create preCreate(Engine.Create create) {
+ totalStats.indexCurrent.inc();
+ typeStats(create.type()).indexCurrent.inc();
+ for (IndexingOperationListener listener : listeners) {
+ create = listener.preCreate(create);
+ }
+ return create;
+ }
+
+ public void postCreateUnderLock(Engine.Create create) {
+ for (IndexingOperationListener listener : listeners) {
+ try {
+ listener.postCreateUnderLock(create);
+ } catch (Exception e) {
+ logger.warn("post listener [{}] failed", e, listener);
+ }
+ }
+ }
+
+ public void postCreate(Engine.Create create) {
+ long took = create.endTime() - create.startTime();
+ totalStats.indexMetric.inc(took);
+ totalStats.indexCurrent.dec();
+ StatsHolder typeStats = typeStats(create.type());
+ typeStats.indexMetric.inc(took);
+ typeStats.indexCurrent.dec();
+ slowLog.postCreate(create, took);
+ for (IndexingOperationListener listener : listeners) {
+ try {
+ listener.postCreate(create);
+ } catch (Exception e) {
+ logger.warn("post listener [{}] failed", e, listener);
+ }
+ }
+ }
+
+ public Engine.Index preIndex(Engine.Index index) {
+ totalStats.indexCurrent.inc();
+ typeStats(index.type()).indexCurrent.inc();
+ for (IndexingOperationListener listener : listeners) {
+ index = listener.preIndex(index);
+ }
+ return index;
+ }
+
+ public void postIndexUnderLock(Engine.Index index) {
+ for (IndexingOperationListener listener : listeners) {
+ try {
+ listener.postIndexUnderLock(index);
+ } catch (Exception e) {
+ logger.warn("post listener [{}] failed", e, listener);
+ }
+ }
+ }
+
+ public void postIndex(Engine.Index index) {
+ long took = index.endTime() - index.startTime();
+ totalStats.indexMetric.inc(took);
+ totalStats.indexCurrent.dec();
+ StatsHolder typeStats = typeStats(index.type());
+ typeStats.indexMetric.inc(took);
+ typeStats.indexCurrent.dec();
+ slowLog.postIndex(index, took);
+ for (IndexingOperationListener listener : listeners) {
+ try {
+ listener.postIndex(index);
+ } catch (Exception e) {
+ logger.warn("post listener [{}] failed", e, listener);
+ }
+ }
+ }
+
+ public void failedIndex(Engine.Index index) {
+ totalStats.indexCurrent.dec();
+ typeStats(index.type()).indexCurrent.dec();
+ }
+
+ public Engine.Delete preDelete(Engine.Delete delete) {
+ totalStats.deleteCurrent.inc();
+ typeStats(delete.type()).deleteCurrent.inc();
+ for (IndexingOperationListener listener : listeners) {
+ delete = listener.preDelete(delete);
+ }
+ return delete;
+ }
+
+ public void postDeleteUnderLock(Engine.Delete delete) {
+ for (IndexingOperationListener listener : listeners) {
+ try {
+ listener.postDeleteUnderLock(delete);
+ } catch (Exception e) {
+ logger.warn("post listener [{}] failed", e, listener);
+ }
+ }
+ }
+
+ public void postDelete(Engine.Delete delete) {
+ long took = delete.endTime() - delete.startTime();
+ totalStats.deleteMetric.inc(took);
+ totalStats.deleteCurrent.dec();
+ StatsHolder typeStats = typeStats(delete.type());
+ typeStats.deleteMetric.inc(took);
+ typeStats.deleteCurrent.dec();
+ for (IndexingOperationListener listener : listeners) {
+ try {
+ listener.postDelete(delete);
+ } catch (Exception e) {
+ logger.warn("post listener [{}] failed", e, listener);
+ }
+ }
+ }
+
+ public void failedDelete(Engine.Delete delete) {
+ totalStats.deleteCurrent.dec();
+ typeStats(delete.type()).deleteCurrent.dec();
+ }
+
+ public Engine.DeleteByQuery preDeleteByQuery(Engine.DeleteByQuery deleteByQuery) {
+ for (IndexingOperationListener listener : listeners) {
+ deleteByQuery = listener.preDeleteByQuery(deleteByQuery);
+ }
+ return deleteByQuery;
+ }
+
+ public void postDeleteByQuery(Engine.DeleteByQuery deleteByQuery) {
+ for (IndexingOperationListener listener : listeners) {
+ listener.postDeleteByQuery(deleteByQuery);
+ }
+ }
+
+ public void clear() {
+ totalStats.clear();
+ synchronized (this) {
+ if (!typesStats.isEmpty()) {
+ MapBuilder<String, StatsHolder> typesStatsBuilder = MapBuilder.newMapBuilder();
+ for (Map.Entry<String, StatsHolder> typeStats : typesStats.entrySet()) {
+ if (typeStats.getValue().totalCurrent() > 0) {
+ typeStats.getValue().clear();
+ typesStatsBuilder.put(typeStats.getKey(), typeStats.getValue());
+ }
+ }
+ typesStats = typesStatsBuilder.immutableMap();
+ }
+ }
+ }
+
+ private StatsHolder typeStats(String type) {
+ StatsHolder stats = typesStats.get(type);
+ if (stats == null) {
+ synchronized (this) {
+ stats = typesStats.get(type);
+ if (stats == null) {
+ stats = new StatsHolder();
+ typesStats = MapBuilder.newMapBuilder(typesStats).put(type, stats).immutableMap();
+ }
+ }
+ }
+ return stats;
+ }
+
+ static class StatsHolder {
+ public final MeanMetric indexMetric = new MeanMetric();
+ public final MeanMetric deleteMetric = new MeanMetric();
+ public final CounterMetric indexCurrent = new CounterMetric();
+ public final CounterMetric deleteCurrent = new CounterMetric();
+
+ public IndexingStats.Stats stats() {
+ return new IndexingStats.Stats(
+ indexMetric.count(), TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()), indexCurrent.count(),
+ deleteMetric.count(), TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), deleteCurrent.count());
+ }
+
+ public long totalCurrent() {
+ return indexCurrent.count() + deleteMetric.count();
+ }
+
+ public void clear() {
+ indexMetric.clear();
+ deleteMetric.clear();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/indexing/slowlog/ShardSlowLogIndexingService.java b/src/main/java/org/elasticsearch/index/indexing/slowlog/ShardSlowLogIndexingService.java
new file mode 100644
index 0000000..183e223
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/indexing/slowlog/ShardSlowLogIndexingService.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.indexing.slowlog;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class ShardSlowLogIndexingService extends AbstractIndexShardComponent {
+
+ private boolean reformat;
+
+ private long indexWarnThreshold;
+ private long indexInfoThreshold;
+ private long indexDebugThreshold;
+ private long indexTraceThreshold;
+
+ private String level;
+
+ private final ESLogger indexLogger;
+ private final ESLogger deleteLogger;
+
+ public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN = "index.indexing.slowlog.threshold.index.warn";
+ public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO = "index.indexing.slowlog.threshold.index.info";
+ public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG = "index.indexing.slowlog.threshold.index.debug";
+ public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE = "index.indexing.slowlog.threshold.index.trace";
+ public static final String INDEX_INDEXING_SLOWLOG_REFORMAT = "index.indexing.slowlog.reformat";
+ public static final String INDEX_INDEXING_SLOWLOG_LEVEL = "index.indexing.slowlog.level";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+ @Override
+ public synchronized void onRefreshSettings(Settings settings) {
+ long indexWarnThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, TimeValue.timeValueNanos(ShardSlowLogIndexingService.this.indexWarnThreshold)).nanos();
+ if (indexWarnThreshold != ShardSlowLogIndexingService.this.indexWarnThreshold) {
+ ShardSlowLogIndexingService.this.indexWarnThreshold = indexWarnThreshold;
+ }
+ long indexInfoThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, TimeValue.timeValueNanos(ShardSlowLogIndexingService.this.indexInfoThreshold)).nanos();
+ if (indexInfoThreshold != ShardSlowLogIndexingService.this.indexInfoThreshold) {
+ ShardSlowLogIndexingService.this.indexInfoThreshold = indexInfoThreshold;
+ }
+ long indexDebugThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, TimeValue.timeValueNanos(ShardSlowLogIndexingService.this.indexDebugThreshold)).nanos();
+ if (indexDebugThreshold != ShardSlowLogIndexingService.this.indexDebugThreshold) {
+ ShardSlowLogIndexingService.this.indexDebugThreshold = indexDebugThreshold;
+ }
+ long indexTraceThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, TimeValue.timeValueNanos(ShardSlowLogIndexingService.this.indexTraceThreshold)).nanos();
+ if (indexTraceThreshold != ShardSlowLogIndexingService.this.indexTraceThreshold) {
+ ShardSlowLogIndexingService.this.indexTraceThreshold = indexTraceThreshold;
+ }
+
+ String level = settings.get(INDEX_INDEXING_SLOWLOG_LEVEL, ShardSlowLogIndexingService.this.level);
+ if (!level.equals(ShardSlowLogIndexingService.this.level)) {
+ ShardSlowLogIndexingService.this.indexLogger.setLevel(level.toUpperCase(Locale.ROOT));
+ ShardSlowLogIndexingService.this.deleteLogger.setLevel(level.toUpperCase(Locale.ROOT));
+ ShardSlowLogIndexingService.this.level = level;
+ }
+
+ boolean reformat = settings.getAsBoolean(INDEX_INDEXING_SLOWLOG_REFORMAT, ShardSlowLogIndexingService.this.reformat);
+ if (reformat != ShardSlowLogIndexingService.this.reformat) {
+ ShardSlowLogIndexingService.this.reformat = reformat;
+ }
+ }
+ }
+
+ @Inject
+ public ShardSlowLogIndexingService(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService) {
+ super(shardId, indexSettings);
+
+ this.reformat = componentSettings.getAsBoolean("reformat", true);
+
+ this.indexWarnThreshold = componentSettings.getAsTime("threshold.index.warn", TimeValue.timeValueNanos(-1)).nanos();
+ this.indexInfoThreshold = componentSettings.getAsTime("threshold.index.info", TimeValue.timeValueNanos(-1)).nanos();
+ this.indexDebugThreshold = componentSettings.getAsTime("threshold.index.debug", TimeValue.timeValueNanos(-1)).nanos();
+ this.indexTraceThreshold = componentSettings.getAsTime("threshold.index.trace", TimeValue.timeValueNanos(-1)).nanos();
+
+ this.level = componentSettings.get("level", "TRACE").toUpperCase(Locale.ROOT);
+
+ this.indexLogger = Loggers.getLogger(logger, ".index");
+ this.deleteLogger = Loggers.getLogger(logger, ".delete");
+
+ indexLogger.setLevel(level);
+ deleteLogger.setLevel(level);
+
+ indexSettingsService.addListener(new ApplySettings());
+ }
+
+ public void postIndex(Engine.Index index, long tookInNanos) {
+ postIndexing(index.parsedDoc(), tookInNanos);
+ }
+
+ public void postCreate(Engine.Create create, long tookInNanos) {
+ postIndexing(create.parsedDoc(), tookInNanos);
+ }
+
+ private void postIndexing(ParsedDocument doc, long tookInNanos) {
+ if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) {
+ indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat));
+ } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) {
+ indexLogger.info("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat));
+ } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) {
+ indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat));
+ } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) {
+ indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat));
+ }
+ }
+
+ public static class SlowLogParsedDocumentPrinter {
+ private final ParsedDocument doc;
+ private final long tookInNanos;
+ private final boolean reformat;
+
+ public SlowLogParsedDocumentPrinter(ParsedDocument doc, long tookInNanos, boolean reformat) {
+ this.doc = doc;
+ this.tookInNanos = tookInNanos;
+ this.reformat = reformat;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], ");
+ sb.append("type[").append(doc.type()).append("], ");
+ sb.append("id[").append(doc.id()).append("], ");
+ if (doc.routing() == null) {
+ sb.append("routing[], ");
+ } else {
+ sb.append("routing[").append(doc.routing()).append("], ");
+ }
+ if (doc.source() != null && doc.source().length() > 0) {
+ try {
+ sb.append("source[").append(XContentHelper.convertToJson(doc.source(), reformat)).append("]");
+ } catch (IOException e) {
+ sb.append("source[_failed_to_convert_]");
+ }
+ } else {
+ sb.append("source[]");
+ }
+ return sb.toString();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/mapper/ContentPath.java b/src/main/java/org/elasticsearch/index/mapper/ContentPath.java
new file mode 100644
index 0000000..0d58b3f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/ContentPath.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+/**
+ *
+ */
+public class ContentPath {
+
+ public static enum Type {
+ JUST_NAME,
+ FULL,
+ }
+
+ private Type pathType;
+
+ private final char delimiter;
+
+ private final StringBuilder sb;
+
+ private final int offset;
+
+ private int index = 0;
+
+ private String[] path = new String[10];
+
+ private String sourcePath;
+
+ public ContentPath() {
+ this(0);
+ }
+
+ /**
+ * Constructs a json path with an offset. The offset will result an <tt>offset</tt>
+ * number of path elements to not be included in {@link #pathAsText(String)}.
+ */
+ public ContentPath(int offset) {
+ this.delimiter = '.';
+ this.sb = new StringBuilder();
+ this.offset = offset;
+ reset();
+ }
+
+ public void reset() {
+ this.index = 0;
+ this.sourcePath = null;
+ }
+
+ public void add(String name) {
+ path[index++] = name;
+ if (index == path.length) { // expand if needed
+ String[] newPath = new String[path.length + 10];
+ System.arraycopy(path, 0, newPath, 0, path.length);
+ path = newPath;
+ }
+ }
+
+ public void remove() {
+ path[index--] = null;
+ }
+
+ public String pathAsText(String name) {
+ if (pathType == Type.JUST_NAME) {
+ return name;
+ }
+ return fullPathAsText(name);
+ }
+
+ public String fullPathAsText(String name) {
+ sb.setLength(0);
+ for (int i = offset; i < index; i++) {
+ sb.append(path[i]).append(delimiter);
+ }
+ sb.append(name);
+ return sb.toString();
+ }
+
+ public Type pathType() {
+ return pathType;
+ }
+
+ public void pathType(Type type) {
+ this.pathType = type;
+ }
+
+ public String sourcePath(String sourcePath) {
+ String orig = this.sourcePath;
+ this.sourcePath = sourcePath;
+ return orig;
+ }
+
+ public String sourcePath() {
+ return this.sourcePath;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java b/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java
new file mode 100644
index 0000000..41d8c07
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.UnmodifiableIterator;
+import org.apache.lucene.analysis.Analyzer;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.index.analysis.FieldNameAnalyzer;
+
+import java.util.Set;
+
+/**
+ *
+ */
+public class DocumentFieldMappers implements Iterable<FieldMapper> {
+
+ private final DocumentMapper docMapper;
+ private final FieldMappersLookup fieldMappers;
+
+ private volatile FieldNameAnalyzer indexAnalyzer;
+ private volatile FieldNameAnalyzer searchAnalyzer;
+ private volatile FieldNameAnalyzer searchQuoteAnalyzer;
+
+ public DocumentFieldMappers(DocumentMapper docMapper) {
+ this.docMapper = docMapper;
+ this.fieldMappers = new FieldMappersLookup();
+ this.indexAnalyzer = new FieldNameAnalyzer(ImmutableOpenMap.<String, Analyzer>of(), docMapper.indexAnalyzer());
+ this.searchAnalyzer = new FieldNameAnalyzer(ImmutableOpenMap.<String, Analyzer>of(), docMapper.searchAnalyzer());
+ this.searchQuoteAnalyzer = new FieldNameAnalyzer(ImmutableOpenMap.<String, Analyzer>of(), docMapper.searchQuotedAnalyzer());
+ }
+
+ public void addNewMappers(Iterable<FieldMapper> newMappers) {
+ fieldMappers.addNewMappers(newMappers);
+
+ final ImmutableOpenMap.Builder<String, Analyzer> indexAnalyzers = ImmutableOpenMap.builder(this.indexAnalyzer.analyzers());
+ final ImmutableOpenMap.Builder<String, Analyzer> searchAnalyzers = ImmutableOpenMap.builder(this.searchAnalyzer.analyzers());
+ final ImmutableOpenMap.Builder<String, Analyzer> searchQuoteAnalyzers = ImmutableOpenMap.builder(this.searchQuoteAnalyzer.analyzers());
+
+ for (FieldMapper fieldMapper : newMappers) {
+ if (fieldMapper.indexAnalyzer() != null) {
+ indexAnalyzers.put(fieldMapper.names().indexName(), fieldMapper.indexAnalyzer());
+ }
+ if (fieldMapper.searchAnalyzer() != null) {
+ searchAnalyzers.put(fieldMapper.names().indexName(), fieldMapper.searchAnalyzer());
+ }
+ if (fieldMapper.searchQuoteAnalyzer() != null) {
+ searchQuoteAnalyzers.put(fieldMapper.names().indexName(), fieldMapper.searchQuoteAnalyzer());
+ }
+ }
+
+ this.indexAnalyzer = new FieldNameAnalyzer(indexAnalyzers.build(), docMapper.indexAnalyzer());
+ this.searchAnalyzer = new FieldNameAnalyzer(searchAnalyzers.build(), docMapper.searchAnalyzer());
+ this.searchQuoteAnalyzer = new FieldNameAnalyzer(searchQuoteAnalyzers.build(), docMapper.searchQuotedAnalyzer());
+ }
+
+ @Override
+ public UnmodifiableIterator<FieldMapper> iterator() {
+ return fieldMappers.iterator();
+ }
+
+ public ImmutableList<FieldMapper> mappers() {
+ return this.fieldMappers.mappers();
+ }
+
+ public boolean hasMapper(FieldMapper fieldMapper) {
+ return fieldMappers.mappers().contains(fieldMapper);
+ }
+
+ public FieldMappers name(String name) {
+ return fieldMappers.name(name);
+ }
+
+ public FieldMappers indexName(String indexName) {
+ return fieldMappers.indexName(indexName);
+ }
+
+ public FieldMappers fullName(String fullName) {
+ return fieldMappers.fullName(fullName);
+ }
+
+ public Set<String> simpleMatchToIndexNames(String pattern) {
+ return fieldMappers.simpleMatchToIndexNames(pattern);
+ }
+
+ public Set<String> simpleMatchToFullName(String pattern) {
+ return fieldMappers.simpleMatchToFullName(pattern);
+ }
+
+ /**
+ * Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}, and last
+ * by {@link #name(String)}.
+ */
+ public FieldMappers smartName(String name) {
+ return fieldMappers.smartName(name);
+ }
+
+ public FieldMapper smartNameFieldMapper(String name) {
+ return fieldMappers.smartNameFieldMapper(name);
+ }
+
+ /**
+ * A smart analyzer used for indexing that takes into account specific analyzers configured
+ * per {@link FieldMapper}.
+ */
+ public Analyzer indexAnalyzer() {
+ return this.indexAnalyzer;
+ }
+
+ /**
+ * A smart analyzer used for indexing that takes into account specific analyzers configured
+ * per {@link FieldMapper} with a custom default analyzer for no explicit field analyzer.
+ */
+ public Analyzer indexAnalyzer(Analyzer defaultAnalyzer) {
+ return new FieldNameAnalyzer(indexAnalyzer.analyzers(), defaultAnalyzer);
+ }
+
+ /**
+ * A smart analyzer used for searching that takes into account specific analyzers configured
+ * per {@link FieldMapper}.
+ */
+ public Analyzer searchAnalyzer() {
+ return this.searchAnalyzer;
+ }
+
+ public Analyzer searchQuoteAnalyzer() {
+ return this.searchQuoteAnalyzer;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
new file mode 100644
index 0000000..98157f2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
@@ -0,0 +1,710 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.CloseableThreadLocal;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Preconditions;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.internal.*;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.mapper.object.RootObjectMapper;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class DocumentMapper implements ToXContent {
+
+ /**
+ * A result of a merge.
+ */
+ public static class MergeResult {
+
+ private final String[] conflicts;
+
+ public MergeResult(String[] conflicts) {
+ this.conflicts = conflicts;
+ }
+
+ /**
+ * Does the merge have conflicts or not?
+ */
+ public boolean hasConflicts() {
+ return conflicts.length > 0;
+ }
+
+ /**
+ * The merge conflicts.
+ */
+ public String[] conflicts() {
+ return this.conflicts;
+ }
+ }
+
+ public static class MergeFlags {
+
+ public static MergeFlags mergeFlags() {
+ return new MergeFlags();
+ }
+
+ private boolean simulate = true;
+
+ public MergeFlags() {
+ }
+
+ /**
+ * A simulation run, don't perform actual modifications to the mapping.
+ */
+ public boolean simulate() {
+ return simulate;
+ }
+
+ public MergeFlags simulate(boolean simulate) {
+ this.simulate = simulate;
+ return this;
+ }
+ }
+
+ /**
+ * A listener to be called during the parse process.
+ */
+ public static interface ParseListener<ParseContext> {
+
+ public static final ParseListener EMPTY = new ParseListenerAdapter();
+
+ /**
+ * Called before a field is added to the document. Return <tt>true</tt> to include
+ * it in the document.
+ */
+ boolean beforeFieldAdded(FieldMapper fieldMapper, Field fieldable, ParseContext parseContent);
+ }
+
+ public static class ParseListenerAdapter implements ParseListener {
+
+ @Override
+ public boolean beforeFieldAdded(FieldMapper fieldMapper, Field fieldable, Object parseContext) {
+ return true;
+ }
+ }
+
+ public static class Builder {
+
+ private Map<Class<? extends RootMapper>, RootMapper> rootMappers = new LinkedHashMap<Class<? extends RootMapper>, RootMapper>();
+
+ private NamedAnalyzer indexAnalyzer;
+
+ private NamedAnalyzer searchAnalyzer;
+
+ private NamedAnalyzer searchQuoteAnalyzer;
+
+ private final String index;
+
+ @Nullable
+ private final Settings indexSettings;
+
+ private final RootObjectMapper rootObjectMapper;
+
+ private ImmutableMap<String, Object> meta = ImmutableMap.of();
+
+ private final Mapper.BuilderContext builderContext;
+
+ public Builder(String index, @Nullable Settings indexSettings, RootObjectMapper.Builder builder) {
+ this.index = index;
+ this.indexSettings = indexSettings;
+ this.builderContext = new Mapper.BuilderContext(indexSettings, new ContentPath(1));
+ this.rootObjectMapper = builder.build(builderContext);
+ IdFieldMapper idFieldMapper = new IdFieldMapper();
+ if (indexSettings != null) {
+ String idIndexed = indexSettings.get("index.mapping._id.indexed");
+ if (idIndexed != null && Booleans.parseBoolean(idIndexed, false)) {
+ FieldType fieldType = new FieldType(IdFieldMapper.Defaults.FIELD_TYPE);
+ fieldType.setTokenized(false);
+ idFieldMapper = new IdFieldMapper(fieldType);
+ }
+ }
+ // UID first so it will be the first stored field to load (so will benefit from "fields: []" early termination
+ this.rootMappers.put(UidFieldMapper.class, new UidFieldMapper());
+ this.rootMappers.put(IdFieldMapper.class, idFieldMapper);
+ this.rootMappers.put(RoutingFieldMapper.class, new RoutingFieldMapper());
+ // add default mappers, order is important (for example analyzer should come before the rest to set context.analyzer)
+ this.rootMappers.put(SizeFieldMapper.class, new SizeFieldMapper());
+ this.rootMappers.put(IndexFieldMapper.class, new IndexFieldMapper());
+ this.rootMappers.put(SourceFieldMapper.class, new SourceFieldMapper());
+ this.rootMappers.put(TypeFieldMapper.class, new TypeFieldMapper());
+ this.rootMappers.put(AnalyzerMapper.class, new AnalyzerMapper());
+ this.rootMappers.put(AllFieldMapper.class, new AllFieldMapper());
+ this.rootMappers.put(BoostFieldMapper.class, new BoostFieldMapper());
+ this.rootMappers.put(TimestampFieldMapper.class, new TimestampFieldMapper());
+ this.rootMappers.put(TTLFieldMapper.class, new TTLFieldMapper());
+ this.rootMappers.put(VersionFieldMapper.class, new VersionFieldMapper());
+ this.rootMappers.put(ParentFieldMapper.class, new ParentFieldMapper());
+ }
+
+ public Builder meta(ImmutableMap<String, Object> meta) {
+ this.meta = meta;
+ return this;
+ }
+
+ public Builder put(RootMapper.Builder mapper) {
+ RootMapper rootMapper = (RootMapper) mapper.build(builderContext);
+ rootMappers.put(rootMapper.getClass(), rootMapper);
+ return this;
+ }
+
+ public Builder indexAnalyzer(NamedAnalyzer indexAnalyzer) {
+ this.indexAnalyzer = indexAnalyzer;
+ return this;
+ }
+
+ public boolean hasIndexAnalyzer() {
+ return indexAnalyzer != null;
+ }
+
+ public Builder searchAnalyzer(NamedAnalyzer searchAnalyzer) {
+ this.searchAnalyzer = searchAnalyzer;
+ if (this.searchQuoteAnalyzer == null) {
+ this.searchQuoteAnalyzer = searchAnalyzer;
+ }
+ return this;
+ }
+
+ public Builder searchQuoteAnalyzer(NamedAnalyzer searchQuoteAnalyzer) {
+ this.searchQuoteAnalyzer = searchQuoteAnalyzer;
+ return this;
+ }
+
+ public boolean hasSearchAnalyzer() {
+ return searchAnalyzer != null;
+ }
+
+ public boolean hasSearchQuoteAnalyzer() {
+ return searchQuoteAnalyzer != null;
+ }
+
+ public DocumentMapper build(DocumentMapperParser docMapperParser) {
+ Preconditions.checkNotNull(rootObjectMapper, "Mapper builder must have the root object mapper set");
+ return new DocumentMapper(index, indexSettings, docMapperParser, rootObjectMapper, meta,
+ indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer,
+ rootMappers);
+ }
+ }
+
+
+ private CloseableThreadLocal<ParseContext> cache = new CloseableThreadLocal<ParseContext>() {
+ @Override
+ protected ParseContext initialValue() {
+ return new ParseContext(index, indexSettings, docMapperParser, DocumentMapper.this, new ContentPath(0));
+ }
+ };
+
+ public static final String ALLOW_TYPE_WRAPPER = "index.mapping.allow_type_wrapper";
+
+ private final String index;
+
+ private final Settings indexSettings;
+
+ private final String type;
+ private final StringAndBytesText typeText;
+
+ private final DocumentMapperParser docMapperParser;
+
+ private volatile ImmutableMap<String, Object> meta;
+
+ private volatile CompressedString mappingSource;
+
+ private final RootObjectMapper rootObjectMapper;
+
+ private final ImmutableMap<Class<? extends RootMapper>, RootMapper> rootMappers;
+ private final RootMapper[] rootMappersOrdered;
+ private final RootMapper[] rootMappersNotIncludedInObject;
+
+ private final NamedAnalyzer indexAnalyzer;
+
+ private final NamedAnalyzer searchAnalyzer;
+ private final NamedAnalyzer searchQuoteAnalyzer;
+
+ private final DocumentFieldMappers fieldMappers;
+
+ private volatile ImmutableMap<String, ObjectMapper> objectMappers = ImmutableMap.of();
+
+ private final List<FieldMapperListener> fieldMapperListeners = new CopyOnWriteArrayList<FieldMapperListener>();
+
+ private final List<ObjectMapperListener> objectMapperListeners = new CopyOnWriteArrayList<ObjectMapperListener>();
+
+ private boolean hasNestedObjects = false;
+
+ private final Filter typeFilter;
+
+ private final Object mappersMutex = new Object();
+
+ private boolean initMappersAdded = true;
+
+ public DocumentMapper(String index, @Nullable Settings indexSettings, DocumentMapperParser docMapperParser,
+ RootObjectMapper rootObjectMapper,
+ ImmutableMap<String, Object> meta,
+ NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer, NamedAnalyzer searchQuoteAnalyzer,
+ Map<Class<? extends RootMapper>, RootMapper> rootMappers) {
+ this.index = index;
+ this.indexSettings = indexSettings;
+ this.type = rootObjectMapper.name();
+ this.typeText = new StringAndBytesText(this.type);
+ this.docMapperParser = docMapperParser;
+ this.meta = meta;
+ this.rootObjectMapper = rootObjectMapper;
+
+ this.rootMappers = ImmutableMap.copyOf(rootMappers);
+ this.rootMappersOrdered = rootMappers.values().toArray(new RootMapper[rootMappers.values().size()]);
+ List<RootMapper> rootMappersNotIncludedInObjectLst = newArrayList();
+ for (RootMapper rootMapper : rootMappersOrdered) {
+ if (!rootMapper.includeInObject()) {
+ rootMappersNotIncludedInObjectLst.add(rootMapper);
+ }
+ }
+ this.rootMappersNotIncludedInObject = rootMappersNotIncludedInObjectLst.toArray(new RootMapper[rootMappersNotIncludedInObjectLst.size()]);
+
+ this.indexAnalyzer = indexAnalyzer;
+ this.searchAnalyzer = searchAnalyzer;
+ this.searchQuoteAnalyzer = searchQuoteAnalyzer != null ? searchQuoteAnalyzer : searchAnalyzer;
+
+ this.typeFilter = typeMapper().termFilter(type, null);
+
+ if (rootMapper(ParentFieldMapper.class).active()) {
+ // mark the routing field mapper as required
+ rootMapper(RoutingFieldMapper.class).markAsRequired();
+ }
+
+ FieldMapperListener.Aggregator fieldMappersAgg = new FieldMapperListener.Aggregator();
+ for (RootMapper rootMapper : rootMappersOrdered) {
+ if (rootMapper.includeInObject()) {
+ rootObjectMapper.putMapper(rootMapper);
+ } else {
+ if (rootMapper instanceof FieldMapper) {
+ fieldMappersAgg.mappers.add((FieldMapper) rootMapper);
+ }
+ }
+ }
+
+ // now traverse and get all the statically defined ones
+ rootObjectMapper.traverse(fieldMappersAgg);
+
+ this.fieldMappers = new DocumentFieldMappers(this);
+ this.fieldMappers.addNewMappers(fieldMappersAgg.mappers);
+
+ final Map<String, ObjectMapper> objectMappers = Maps.newHashMap();
+ rootObjectMapper.traverse(new ObjectMapperListener() {
+ @Override
+ public void objectMapper(ObjectMapper objectMapper) {
+ objectMappers.put(objectMapper.fullPath(), objectMapper);
+ }
+ });
+ this.objectMappers = ImmutableMap.copyOf(objectMappers);
+ for (ObjectMapper objectMapper : objectMappers.values()) {
+ if (objectMapper.nested().isNested()) {
+ hasNestedObjects = true;
+ }
+ }
+
+ refreshSource();
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public Text typeText() {
+ return this.typeText;
+ }
+
+ public ImmutableMap<String, Object> meta() {
+ return this.meta;
+ }
+
+ public CompressedString mappingSource() {
+ return this.mappingSource;
+ }
+
+ public RootObjectMapper root() {
+ return this.rootObjectMapper;
+ }
+
+ public UidFieldMapper uidMapper() {
+ return rootMapper(UidFieldMapper.class);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ public <T extends RootMapper> T rootMapper(Class<T> type) {
+ return (T) rootMappers.get(type);
+ }
+
+ public TypeFieldMapper typeMapper() {
+ return rootMapper(TypeFieldMapper.class);
+ }
+
+ public SourceFieldMapper sourceMapper() {
+ return rootMapper(SourceFieldMapper.class);
+ }
+
+ public AllFieldMapper allFieldMapper() {
+ return rootMapper(AllFieldMapper.class);
+ }
+
+ public IdFieldMapper idFieldMapper() {
+ return rootMapper(IdFieldMapper.class);
+ }
+
+ public RoutingFieldMapper routingFieldMapper() {
+ return rootMapper(RoutingFieldMapper.class);
+ }
+
+ public ParentFieldMapper parentFieldMapper() {
+ return rootMapper(ParentFieldMapper.class);
+ }
+
+ public TimestampFieldMapper timestampFieldMapper() {
+ return rootMapper(TimestampFieldMapper.class);
+ }
+
+ public TTLFieldMapper TTLFieldMapper() {
+ return rootMapper(TTLFieldMapper.class);
+ }
+
+ public IndexFieldMapper IndexFieldMapper() {
+ return rootMapper(IndexFieldMapper.class);
+ }
+
+ public SizeFieldMapper SizeFieldMapper() {
+ return rootMapper(SizeFieldMapper.class);
+ }
+
+ public BoostFieldMapper boostFieldMapper() {
+ return rootMapper(BoostFieldMapper.class);
+ }
+
+ public Analyzer indexAnalyzer() {
+ return this.indexAnalyzer;
+ }
+
+ public Analyzer searchAnalyzer() {
+ return this.searchAnalyzer;
+ }
+
+ public Analyzer searchQuotedAnalyzer() {
+ return this.searchQuoteAnalyzer;
+ }
+
+ public Filter typeFilter() {
+ return this.typeFilter;
+ }
+
+ public boolean hasNestedObjects() {
+ return hasNestedObjects;
+ }
+
+ public DocumentFieldMappers mappers() {
+ return this.fieldMappers;
+ }
+
+ public ImmutableMap<String, ObjectMapper> objectMappers() {
+ return this.objectMappers;
+ }
+
+ public ParsedDocument parse(BytesReference source) throws MapperParsingException {
+ return parse(SourceToParse.source(source));
+ }
+
+ public ParsedDocument parse(String type, String id, BytesReference source) throws MapperParsingException {
+ return parse(SourceToParse.source(source).type(type).id(id));
+ }
+
+ public ParsedDocument parse(SourceToParse source) throws MapperParsingException {
+ return parse(source, null);
+ }
+
+ public ParsedDocument parse(SourceToParse source, @Nullable ParseListener listener) throws MapperParsingException {
+ ParseContext context = cache.get();
+
+ if (source.type() != null && !source.type().equals(this.type)) {
+ throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + this.type + "]");
+ }
+ source.type(this.type);
+
+ XContentParser parser = source.parser();
+ try {
+ if (parser == null) {
+ parser = XContentHelper.createParser(source.source());
+ }
+ context.reset(parser, new ParseContext.Document(), source, listener);
+ // on a newly created instance of document mapper, we always consider it as new mappers that have been added
+ if (initMappersAdded) {
+ context.setMappingsModified();
+ initMappersAdded = false;
+ }
+
+ // will result in START_OBJECT
+ int countDownTokens = 0;
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new MapperParsingException("Malformed content, must start with an object");
+ }
+ boolean emptyDoc = false;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.END_OBJECT) {
+ // empty doc, we can handle it...
+ emptyDoc = true;
+ } else if (token != XContentParser.Token.FIELD_NAME) {
+ throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist");
+ }
+ // first field is the same as the type, this might be because the
+ // type is provided, and the object exists within it or because
+ // there is a valid field that by chance is named as the type.
+ // Because of this, by default wrapping a document in a type is
+ // disabled, but can be enabled by setting
+ // index.mapping.allow_type_wrapper to true
+ if (type.equals(parser.currentName()) && indexSettings.getAsBoolean(ALLOW_TYPE_WRAPPER, false)) {
+ parser.nextToken();
+ countDownTokens++;
+ }
+
+ for (RootMapper rootMapper : rootMappersOrdered) {
+ rootMapper.preParse(context);
+ }
+
+ if (!emptyDoc) {
+ rootObjectMapper.parse(context);
+ }
+
+ for (int i = 0; i < countDownTokens; i++) {
+ parser.nextToken();
+ }
+
+ for (RootMapper rootMapper : rootMappersOrdered) {
+ rootMapper.postParse(context);
+ }
+
+ for (RootMapper rootMapper : rootMappersOrdered) {
+ rootMapper.validate(context);
+ }
+ } catch (Throwable e) {
+ // if its already a mapper parsing exception, no need to wrap it...
+ if (e instanceof MapperParsingException) {
+ throw (MapperParsingException) e;
+ }
+
+ // Throw a more meaningful message if the document is empty.
+ if (source.source() != null && source.source().length() == 0) {
+ throw new MapperParsingException("failed to parse, document is empty");
+ }
+
+ throw new MapperParsingException("failed to parse", e);
+ } finally {
+ // only close the parser when its not provided externally
+ if (source.parser() == null && parser != null) {
+ parser.close();
+ }
+ }
+ // reverse the order of docs for nested docs support, parent should be last
+ if (context.docs().size() > 1) {
+ Collections.reverse(context.docs());
+ }
+ // apply doc boost
+ if (context.docBoost() != 1.0f) {
+ Set<String> encounteredFields = Sets.newHashSet();
+ for (ParseContext.Document doc : context.docs()) {
+ encounteredFields.clear();
+ for (IndexableField field : doc) {
+ if (field.fieldType().indexed() && !field.fieldType().omitNorms()) {
+ if (!encounteredFields.contains(field.name())) {
+ ((Field) field).setBoost(context.docBoost() * field.boost());
+ encounteredFields.add(field.name());
+ }
+ }
+ }
+ }
+ }
+
+ ParsedDocument doc = new ParsedDocument(context.uid(), context.version(), context.id(), context.type(), source.routing(), source.timestamp(), source.ttl(), context.docs(), context.analyzer(),
+ context.source(), context.mappingsModified()).parent(source.parent());
+ // reset the context to free up memory
+ context.reset(null, null, null, null);
+ return doc;
+ }
+
+ public void addFieldMappers(Iterable<FieldMapper> fieldMappers) {
+ synchronized (mappersMutex) {
+ this.fieldMappers.addNewMappers(fieldMappers);
+ }
+ for (FieldMapperListener listener : fieldMapperListeners) {
+ listener.fieldMappers(fieldMappers);
+ }
+ }
+
+ public void addFieldMapperListener(FieldMapperListener fieldMapperListener, boolean includeExisting) {
+ fieldMapperListeners.add(fieldMapperListener);
+ if (includeExisting) {
+ traverse(fieldMapperListener);
+ }
+ }
+
+ public void traverse(FieldMapperListener listener) {
+ for (RootMapper rootMapper : rootMappersOrdered) {
+ if (!rootMapper.includeInObject() && rootMapper instanceof FieldMapper) {
+ listener.fieldMapper((FieldMapper) rootMapper);
+ }
+ }
+ rootObjectMapper.traverse(listener);
+ }
+
+ public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
+ addObjectMappers(objectMappers.toArray(new ObjectMapper[objectMappers.size()]));
+ }
+
+ private void addObjectMappers(ObjectMapper... objectMappers) {
+ synchronized (mappersMutex) {
+ MapBuilder<String, ObjectMapper> builder = MapBuilder.newMapBuilder(this.objectMappers);
+ for (ObjectMapper objectMapper : objectMappers) {
+ builder.put(objectMapper.fullPath(), objectMapper);
+ if (objectMapper.nested().isNested()) {
+ hasNestedObjects = true;
+ }
+ }
+ this.objectMappers = builder.immutableMap();
+ }
+ for (ObjectMapperListener objectMapperListener : objectMapperListeners) {
+ objectMapperListener.objectMappers(objectMappers);
+ }
+ }
+
+ public void addObjectMapperListener(ObjectMapperListener objectMapperListener, boolean includeExisting) {
+ objectMapperListeners.add(objectMapperListener);
+ if (includeExisting) {
+ traverse(objectMapperListener);
+ }
+ }
+
+ public void traverse(ObjectMapperListener listener) {
+ rootObjectMapper.traverse(listener);
+ }
+
+ public synchronized MergeResult merge(DocumentMapper mergeWith, MergeFlags mergeFlags) {
+ MergeContext mergeContext = new MergeContext(this, mergeFlags);
+ assert rootMappers.size() == mergeWith.rootMappers.size();
+
+ rootObjectMapper.merge(mergeWith.rootObjectMapper, mergeContext);
+ for (Map.Entry<Class<? extends RootMapper>, RootMapper> entry : rootMappers.entrySet()) {
+ // root mappers included in root object will get merge in the rootObjectMapper
+ if (entry.getValue().includeInObject()) {
+ continue;
+ }
+ RootMapper mergeWithRootMapper = mergeWith.rootMappers.get(entry.getKey());
+ if (mergeWithRootMapper != null) {
+ entry.getValue().merge(mergeWithRootMapper, mergeContext);
+ }
+ }
+
+ if (!mergeFlags.simulate()) {
+ // let the merge with attributes to override the attributes
+ meta = mergeWith.meta();
+ // update the source of the merged one
+ refreshSource();
+ }
+ return new MergeResult(mergeContext.buildConflicts());
+ }
+
+ public CompressedString refreshSource() throws ElasticsearchGenerationException {
+ try {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, CompressorFactory.defaultCompressor().streamOutput(bStream));
+ builder.startObject();
+ toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ builder.close();
+ return mappingSource = new CompressedString(bStream.bytes());
+ } catch (Exception e) {
+ throw new ElasticsearchGenerationException("failed to serialize source for type [" + type + "]", e);
+ }
+ }
+
+ public void close() {
+ cache.close();
+ rootObjectMapper.close();
+ for (RootMapper rootMapper : rootMappersOrdered) {
+ rootMapper.close();
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ rootObjectMapper.toXContent(builder, params, new ToXContent() {
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (indexAnalyzer != null && searchAnalyzer != null && indexAnalyzer.name().equals(searchAnalyzer.name()) && !indexAnalyzer.name().startsWith("_")) {
+ if (!indexAnalyzer.name().equals("default")) {
+ // same analyzers, output it once
+ builder.field("analyzer", indexAnalyzer.name());
+ }
+ } else {
+ if (indexAnalyzer != null && !indexAnalyzer.name().startsWith("_")) {
+ if (!indexAnalyzer.name().equals("default")) {
+ builder.field("index_analyzer", indexAnalyzer.name());
+ }
+ }
+ if (searchAnalyzer != null && !searchAnalyzer.name().startsWith("_")) {
+ if (!searchAnalyzer.name().equals("default")) {
+ builder.field("search_analyzer", searchAnalyzer.name());
+ }
+ }
+ }
+
+ if (meta != null && !meta.isEmpty()) {
+ builder.field("_meta", meta());
+ }
+ return builder;
+ }
+ // no need to pass here id and boost, since they are added to the root object mapper
+ // in the constructor
+ }, rootMappersNotIncludedInObject);
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java
new file mode 100644
index 0000000..68af75d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java
@@ -0,0 +1,292 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.geo.ShapesAvailability;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.mapper.core.*;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
+import org.elasticsearch.index.mapper.internal.*;
+import org.elasticsearch.index.mapper.ip.IpFieldMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.mapper.object.RootObjectMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.similarity.SimilarityLookupService;
+
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.doc;
+
+/**
+ *
+ */
+public class DocumentMapperParser extends AbstractIndexComponent {
+
+ final AnalysisService analysisService;
+ private final PostingsFormatService postingsFormatService;
+ private final DocValuesFormatService docValuesFormatService;
+ private final SimilarityLookupService similarityLookupService;
+
+ private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser();
+
+ private final Object typeParsersMutex = new Object();
+ private final Version indexVersionCreated;
+
+ private volatile ImmutableMap<String, Mapper.TypeParser> typeParsers;
+ private volatile ImmutableMap<String, Mapper.TypeParser> rootTypeParsers;
+
+ public DocumentMapperParser(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService,
+ PostingsFormatService postingsFormatService, DocValuesFormatService docValuesFormatService,
+ SimilarityLookupService similarityLookupService) {
+ super(index, indexSettings);
+ this.analysisService = analysisService;
+ this.postingsFormatService = postingsFormatService;
+ this.docValuesFormatService = docValuesFormatService;
+ this.similarityLookupService = similarityLookupService;
+ MapBuilder<String, Mapper.TypeParser> typeParsersBuilder = new MapBuilder<String, Mapper.TypeParser>()
+ .put(ByteFieldMapper.CONTENT_TYPE, new ByteFieldMapper.TypeParser())
+ .put(ShortFieldMapper.CONTENT_TYPE, new ShortFieldMapper.TypeParser())
+ .put(IntegerFieldMapper.CONTENT_TYPE, new IntegerFieldMapper.TypeParser())
+ .put(LongFieldMapper.CONTENT_TYPE, new LongFieldMapper.TypeParser())
+ .put(FloatFieldMapper.CONTENT_TYPE, new FloatFieldMapper.TypeParser())
+ .put(DoubleFieldMapper.CONTENT_TYPE, new DoubleFieldMapper.TypeParser())
+ .put(BooleanFieldMapper.CONTENT_TYPE, new BooleanFieldMapper.TypeParser())
+ .put(BinaryFieldMapper.CONTENT_TYPE, new BinaryFieldMapper.TypeParser())
+ .put(DateFieldMapper.CONTENT_TYPE, new DateFieldMapper.TypeParser())
+ .put(IpFieldMapper.CONTENT_TYPE, new IpFieldMapper.TypeParser())
+ .put(StringFieldMapper.CONTENT_TYPE, new StringFieldMapper.TypeParser())
+ .put(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser())
+ .put(ObjectMapper.CONTENT_TYPE, new ObjectMapper.TypeParser())
+ .put(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser())
+ .put(TypeParsers.MULTI_FIELD_CONTENT_TYPE, TypeParsers.multiFieldConverterTypeParser)
+ .put(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser())
+ .put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser());
+
+ if (ShapesAvailability.JTS_AVAILABLE) {
+ typeParsersBuilder.put(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser());
+ }
+
+ typeParsers = typeParsersBuilder.immutableMap();
+
+ rootTypeParsers = new MapBuilder<String, Mapper.TypeParser>()
+ .put(SizeFieldMapper.NAME, new SizeFieldMapper.TypeParser())
+ .put(IndexFieldMapper.NAME, new IndexFieldMapper.TypeParser())
+ .put(SourceFieldMapper.NAME, new SourceFieldMapper.TypeParser())
+ .put(TypeFieldMapper.NAME, new TypeFieldMapper.TypeParser())
+ .put(AllFieldMapper.NAME, new AllFieldMapper.TypeParser())
+ .put(AnalyzerMapper.NAME, new AnalyzerMapper.TypeParser())
+ .put(BoostFieldMapper.NAME, new BoostFieldMapper.TypeParser())
+ .put(ParentFieldMapper.NAME, new ParentFieldMapper.TypeParser())
+ .put(RoutingFieldMapper.NAME, new RoutingFieldMapper.TypeParser())
+ .put(TimestampFieldMapper.NAME, new TimestampFieldMapper.TypeParser())
+ .put(TTLFieldMapper.NAME, new TTLFieldMapper.TypeParser())
+ .put(UidFieldMapper.NAME, new UidFieldMapper.TypeParser())
+ .put(VersionFieldMapper.NAME, new VersionFieldMapper.TypeParser())
+ .put(IdFieldMapper.NAME, new IdFieldMapper.TypeParser())
+ .immutableMap();
+ assert indexSettings.get(IndexMetaData.SETTING_UUID) == null // if the UUDI is there the index has actually been created otherwise this might be a test
+ || indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) != null : IndexMetaData.SETTING_VERSION_CREATED + " not set in IndexSettings";
+ indexVersionCreated = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
+ }
+
+ public void putTypeParser(String type, Mapper.TypeParser typeParser) {
+ synchronized (typeParsersMutex) {
+ typeParsers = new MapBuilder<String, Mapper.TypeParser>(typeParsers)
+ .put(type, typeParser)
+ .immutableMap();
+ }
+ }
+
+ public void putRootTypeParser(String type, Mapper.TypeParser typeParser) {
+ synchronized (typeParsersMutex) {
+ rootTypeParsers = new MapBuilder<String, Mapper.TypeParser>(rootTypeParsers)
+ .put(type, typeParser)
+ .immutableMap();
+ }
+ }
+
+ public Mapper.TypeParser.ParserContext parserContext() {
+ return new Mapper.TypeParser.ParserContext(postingsFormatService, docValuesFormatService, analysisService, similarityLookupService, typeParsers, indexVersionCreated);
+ }
+
+ public DocumentMapper parse(String source) throws MapperParsingException {
+ return parse(null, source);
+ }
+
+ public DocumentMapper parse(@Nullable String type, String source) throws MapperParsingException {
+ return parse(type, source, null);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ public DocumentMapper parse(@Nullable String type, String source, String defaultSource) throws MapperParsingException {
+ Map<String, Object> mapping = null;
+ if (source != null) {
+ Tuple<String, Map<String, Object>> t = extractMapping(type, source);
+ type = t.v1();
+ mapping = t.v2();
+ }
+ if (mapping == null) {
+ mapping = Maps.newHashMap();
+ }
+ return parse(type, mapping, defaultSource);
+ }
+
+ public DocumentMapper parseCompressed(@Nullable String type, CompressedString source) throws MapperParsingException {
+ return parseCompressed(type, source, null);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ public DocumentMapper parseCompressed(@Nullable String type, CompressedString source, String defaultSource) throws MapperParsingException {
+ Map<String, Object> mapping = null;
+ if (source != null) {
+ Map<String, Object> root = XContentHelper.convertToMap(source.compressed(), true).v2();
+ Tuple<String, Map<String, Object>> t = extractMapping(type, root);
+ type = t.v1();
+ mapping = t.v2();
+ }
+ if (mapping == null) {
+ mapping = Maps.newHashMap();
+ }
+ return parse(type, mapping, defaultSource);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ private DocumentMapper parse(String type, Map<String, Object> mapping, String defaultSource) throws MapperParsingException {
+ if (type == null) {
+ throw new MapperParsingException("Failed to derive type");
+ }
+
+ if (defaultSource != null) {
+ Tuple<String, Map<String, Object>> t = extractMapping(MapperService.DEFAULT_MAPPING, defaultSource);
+ if (t.v2() != null) {
+ XContentHelper.mergeDefaults(mapping, t.v2());
+ }
+ }
+
+
+ Mapper.TypeParser.ParserContext parserContext = parserContext();
+ DocumentMapper.Builder docBuilder = doc(index.name(), indexSettings, (RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext));
+
+ for (Map.Entry<String, Object> entry : mapping.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+
+ if ("index_analyzer".equals(fieldName)) {
+ NamedAnalyzer analyzer = analysisService.analyzer(fieldNode.toString());
+ if (analyzer == null) {
+ throw new MapperParsingException("Analyzer [" + fieldNode.toString() + "] not found for index_analyzer setting on root type [" + type + "]");
+ }
+ docBuilder.indexAnalyzer(analyzer);
+ } else if ("search_analyzer".equals(fieldName)) {
+ NamedAnalyzer analyzer = analysisService.analyzer(fieldNode.toString());
+ if (analyzer == null) {
+ throw new MapperParsingException("Analyzer [" + fieldNode.toString() + "] not found for search_analyzer setting on root type [" + type + "]");
+ }
+ docBuilder.searchAnalyzer(analyzer);
+ } else if ("search_quote_analyzer".equals(fieldName)) {
+ NamedAnalyzer analyzer = analysisService.analyzer(fieldNode.toString());
+ if (analyzer == null) {
+ throw new MapperParsingException("Analyzer [" + fieldNode.toString() + "] not found for search_analyzer setting on root type [" + type + "]");
+ }
+ docBuilder.searchQuoteAnalyzer(analyzer);
+ } else if ("analyzer".equals(fieldName)) {
+ NamedAnalyzer analyzer = analysisService.analyzer(fieldNode.toString());
+ if (analyzer == null) {
+ throw new MapperParsingException("Analyzer [" + fieldNode.toString() + "] not found for analyzer setting on root type [" + type + "]");
+ }
+ docBuilder.indexAnalyzer(analyzer);
+ docBuilder.searchAnalyzer(analyzer);
+ } else {
+ Mapper.TypeParser typeParser = rootTypeParsers.get(fieldName);
+ if (typeParser != null) {
+ docBuilder.put(typeParser.parse(fieldName, (Map<String, Object>) fieldNode, parserContext));
+ }
+ }
+ }
+
+ if (!docBuilder.hasIndexAnalyzer()) {
+ docBuilder.indexAnalyzer(analysisService.defaultIndexAnalyzer());
+ }
+ if (!docBuilder.hasSearchAnalyzer()) {
+ docBuilder.searchAnalyzer(analysisService.defaultSearchAnalyzer());
+ }
+ if (!docBuilder.hasSearchQuoteAnalyzer()) {
+ docBuilder.searchAnalyzer(analysisService.defaultSearchQuoteAnalyzer());
+ }
+
+ ImmutableMap<String, Object> attributes = ImmutableMap.of();
+ if (mapping.containsKey("_meta")) {
+ attributes = ImmutableMap.copyOf((Map<String, Object>) mapping.get("_meta"));
+ }
+ docBuilder.meta(attributes);
+
+ DocumentMapper documentMapper = docBuilder.build(this);
+ // update the source with the generated one
+ documentMapper.refreshSource();
+ return documentMapper;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ private Tuple<String, Map<String, Object>> extractMapping(String type, String source) throws MapperParsingException {
+ Map<String, Object> root;
+ try {
+ root = XContentFactory.xContent(source).createParser(source).mapOrderedAndClose();
+ } catch (Exception e) {
+ throw new MapperParsingException("failed to parse mapping definition", e);
+ }
+ return extractMapping(type, root);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ private Tuple<String, Map<String, Object>> extractMapping(String type, Map<String, Object> root) throws MapperParsingException {
+ if (root.size() == 0) {
+ // if we don't have any keys throw an exception
+ throw new MapperParsingException("malformed mapping no root object found");
+ }
+
+ String rootName = root.keySet().iterator().next();
+ Tuple<String, Map<String, Object>> mapping;
+ if (type == null || type.equals(rootName)) {
+ mapping = new Tuple<String, Map<String, Object>>(rootName, (Map<String, Object>) root.get(rootName));
+ } else {
+ mapping = new Tuple<String, Map<String, Object>>(type, root);
+ }
+
+ return mapping;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentTypeListener.java b/src/main/java/org/elasticsearch/index/mapper/DocumentTypeListener.java
new file mode 100644
index 0000000..2a65bd3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/DocumentTypeListener.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+/**
+ */
+public interface DocumentTypeListener {
+
+ /**
+ * Invoked just before a new document type has been created.
+ *
+ * @param mapper The new document mapper of the type being added
+ */
+ void beforeCreate(DocumentMapper mapper);
+
+ /**
+ * Invoked just after an existing document type has been removed.
+ *
+ * @param mapper The existing document mapper of the type being removed
+ */
+ void afterRemove(DocumentMapper mapper);
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
new file mode 100644
index 0000000..7d7de79
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.base.Strings;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.util.List;
+
+/**
+ *
+ */
+public interface FieldMapper<T> extends Mapper {
+
+ public static final String DOC_VALUES_FORMAT = "doc_values_format";
+
+ public static class Names {
+
+ private final String name;
+
+ private final String indexName;
+
+ private final String indexNameClean;
+
+ private final String fullName;
+
+ private final String sourcePath;
+
+ public Names(String name) {
+ this(name, name, name, name);
+ }
+
+ public Names(String name, String indexName, String indexNameClean, String fullName) {
+ this(name, indexName, indexNameClean, fullName, fullName);
+ }
+
+ public Names(String name, String indexName, String indexNameClean, String fullName, @Nullable String sourcePath) {
+ this.name = name.intern();
+ this.indexName = indexName.intern();
+ this.indexNameClean = indexNameClean.intern();
+ this.fullName = fullName.intern();
+ this.sourcePath = sourcePath == null ? this.fullName : sourcePath.intern();
+ }
+
+ /**
+ * The logical name of the field.
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * The indexed name of the field. This is the name under which we will
+ * store it in the index.
+ */
+ public String indexName() {
+ return indexName;
+ }
+
+ /**
+ * The cleaned index name, before any "path" modifications performed on it.
+ */
+ public String indexNameClean() {
+ return indexNameClean;
+ }
+
+ /**
+ * The full name, including dot path.
+ */
+ public String fullName() {
+ return fullName;
+ }
+
+ /**
+ * The dot path notation to extract the value from source.
+ */
+ public String sourcePath() {
+ return sourcePath;
+ }
+
+ /**
+ * Creates a new index term based on the provided value.
+ */
+ public Term createIndexNameTerm(String value) {
+ return new Term(indexName, value);
+ }
+
+ /**
+ * Creates a new index term based on the provided value.
+ */
+ public Term createIndexNameTerm(BytesRef value) {
+ return new Term(indexName, value);
+ }
+ }
+
+ public static enum Loading {
+ LAZY {
+ @Override
+ public String toString() {
+ return LAZY_VALUE;
+ }
+ },
+ EAGER {
+ @Override
+ public String toString() {
+ return EAGER_VALUE;
+ }
+ };
+
+ public static final String KEY = "loading";
+ public static final String EAGER_VALUE = "eager";
+ public static final String LAZY_VALUE = "lazy";
+
+ public static Loading parse(String loading, Loading defaultValue) {
+ if (Strings.isNullOrEmpty(loading)) {
+ return defaultValue;
+ } else if (EAGER_VALUE.equalsIgnoreCase(loading)) {
+ return EAGER;
+ } else if (LAZY_VALUE.equalsIgnoreCase(loading)) {
+ return LAZY;
+ } else {
+ throw new MapperParsingException("Unknown [" + KEY + "] value: [" + loading + "]");
+ }
+ }
+
+ }
+
+ Names names();
+
+ FieldType fieldType();
+
+ float boost();
+
+ /**
+ * The analyzer that will be used to index the field.
+ */
+ Analyzer indexAnalyzer();
+
+ /**
+ * The analyzer that will be used to search the field.
+ */
+ Analyzer searchAnalyzer();
+
+ /**
+ * The analyzer that will be used for quoted search on the field.
+ */
+ Analyzer searchQuoteAnalyzer();
+
+ /**
+ * Similarity used for scoring queries on the field
+ */
+ SimilarityProvider similarity();
+
+ /**
+ * List of fields where this field should be copied to
+ */
+ public AbstractFieldMapper.CopyTo copyTo();
+
+ /**
+ * Returns the actual value of the field.
+ */
+ T value(Object value);
+
+ /**
+ * Returns the value that will be used as a result for search. Can be only of specific types... .
+ */
+ Object valueForSearch(Object value);
+
+ /**
+ * Returns the indexed value used to construct search "values".
+ */
+ BytesRef indexedValueForSearch(Object value);
+
+ /**
+ * Should the field query {@link #termQuery(Object, org.elasticsearch.index.query.QueryParseContext)} be used when detecting this
+ * field in query string.
+ */
+ boolean useTermQueryWithQueryString();
+
+ Query termQuery(Object value, @Nullable QueryParseContext context);
+
+ Filter termFilter(Object value, @Nullable QueryParseContext context);
+
+ Filter termsFilter(List values, @Nullable QueryParseContext context);
+
+ Filter termsFilter(IndexFieldDataService fieldData, List values, @Nullable QueryParseContext context);
+
+ Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context);
+
+ Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context);
+
+ Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions);
+
+ Query prefixQuery(Object value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context);
+
+ Filter prefixFilter(Object value, @Nullable QueryParseContext context);
+
+ Query regexpQuery(Object value, int flags, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context);
+
+ Filter regexpFilter(Object value, int flags, @Nullable QueryParseContext parseContext);
+
+ /**
+ * A term query to use when parsing a query string. Can return <tt>null</tt>.
+ */
+ @Nullable
+ Query queryStringTermQuery(Term term);
+
+ /**
+ * Null value filter, returns <tt>null</tt> if there is no null value associated with the field.
+ */
+ @Nullable
+ Filter nullValueFilter();
+
+ FieldDataType fieldDataType();
+
+ PostingsFormatProvider postingsFormatProvider();
+
+ DocValuesFormatProvider docValuesFormatProvider();
+
+ boolean isNumeric();
+
+ boolean isSortable();
+
+ boolean hasDocValues();
+
+ Loading normsLoading(Loading defaultLoading);
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java b/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java
new file mode 100644
index 0000000..9c5605a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public abstract class FieldMapperListener {
+
+ public static class Aggregator extends FieldMapperListener {
+ public final List<FieldMapper> mappers = new ArrayList<FieldMapper>();
+
+ @Override
+ public void fieldMapper(FieldMapper fieldMapper) {
+ mappers.add(fieldMapper);
+ }
+ }
+
+ public abstract void fieldMapper(FieldMapper fieldMapper);
+
+ public void fieldMappers(Iterable<FieldMapper> fieldMappers) {
+ for (FieldMapper mapper : fieldMappers) {
+ fieldMapper(mapper);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMappers.java b/src/main/java/org/elasticsearch/index/mapper/FieldMappers.java
new file mode 100644
index 0000000..4ea2b86
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/FieldMappers.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * A holder for several {@link FieldMapper}.
+ */
+public class FieldMappers implements Iterable<FieldMapper> {
+
+ private final FieldMapper[] fieldMappers;
+ private final List<FieldMapper> fieldMappersAsList;
+
+ public FieldMappers() {
+ this.fieldMappers = new FieldMapper[0];
+ this.fieldMappersAsList = Arrays.asList(fieldMappers);
+ }
+
+ public FieldMappers(FieldMapper fieldMapper) {
+ this.fieldMappers = new FieldMapper[]{fieldMapper};
+ this.fieldMappersAsList = Arrays.asList(this.fieldMappers);
+ }
+
+ private FieldMappers(FieldMapper[] fieldMappers) {
+ this.fieldMappers = fieldMappers;
+ this.fieldMappersAsList = Arrays.asList(this.fieldMappers);
+ }
+
+ public FieldMapper mapper() {
+ if (fieldMappers.length == 0) {
+ return null;
+ }
+ return fieldMappers[0];
+ }
+
+ public boolean isEmpty() {
+ return fieldMappers.length == 0;
+ }
+
+ public List<FieldMapper> mappers() {
+ return this.fieldMappersAsList;
+ }
+
+ @Override
+ public Iterator<FieldMapper> iterator() {
+ return fieldMappersAsList.iterator();
+ }
+
+ /**
+ * Concats and returns a new {@link FieldMappers}.
+ */
+ public FieldMappers concat(FieldMapper mapper) {
+ FieldMapper[] newMappers = new FieldMapper[fieldMappers.length + 1];
+ System.arraycopy(fieldMappers, 0, newMappers, 0, fieldMappers.length);
+ newMappers[fieldMappers.length] = mapper;
+ return new FieldMappers(newMappers);
+ }
+
+ public FieldMappers remove(FieldMapper mapper) {
+ ArrayList<FieldMapper> list = new ArrayList<FieldMapper>(fieldMappers.length);
+ for (FieldMapper fieldMapper : fieldMappers) {
+ if (!fieldMapper.equals(mapper)) { // identify equality
+ list.add(fieldMapper);
+ }
+ }
+ return new FieldMappers(list.toArray(new FieldMapper[list.size()]));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java b/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java
new file mode 100644
index 0000000..0549ca9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Sets;
+import com.google.common.collect.UnmodifiableIterator;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.regex.Regex;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * A class that holds a map of field mappers from name, index name, and full name.
+ */
+public class FieldMappersLookup implements Iterable<FieldMapper> {
+
+ private volatile ImmutableList<FieldMapper> mappers;
+ private volatile ImmutableOpenMap<String, FieldMappers> name;
+ private volatile ImmutableOpenMap<String, FieldMappers> indexName;
+ private volatile ImmutableOpenMap<String, FieldMappers> fullName;
+
+ public FieldMappersLookup() {
+ this.mappers = ImmutableList.of();
+ this.fullName = ImmutableOpenMap.of();
+ this.name = ImmutableOpenMap.of();
+ this.indexName = ImmutableOpenMap.of();
+ }
+
+ /**
+ * Adds a new set of mappers.
+ */
+ public void addNewMappers(Iterable<FieldMapper> newMappers) {
+ final ImmutableOpenMap.Builder<String, FieldMappers> tempName = ImmutableOpenMap.builder(name);
+ final ImmutableOpenMap.Builder<String, FieldMappers> tempIndexName = ImmutableOpenMap.builder(indexName);
+ final ImmutableOpenMap.Builder<String, FieldMappers> tempFullName = ImmutableOpenMap.builder(fullName);
+
+ for (FieldMapper fieldMapper : newMappers) {
+ FieldMappers mappers = tempName.get(fieldMapper.names().name());
+ if (mappers == null) {
+ mappers = new FieldMappers(fieldMapper);
+ } else {
+ mappers = mappers.concat(fieldMapper);
+ }
+ tempName.put(fieldMapper.names().name(), mappers);
+
+ mappers = tempIndexName.get(fieldMapper.names().indexName());
+ if (mappers == null) {
+ mappers = new FieldMappers(fieldMapper);
+ } else {
+ mappers = mappers.concat(fieldMapper);
+ }
+ tempIndexName.put(fieldMapper.names().indexName(), mappers);
+
+ mappers = tempFullName.get(fieldMapper.names().fullName());
+ if (mappers == null) {
+ mappers = new FieldMappers(fieldMapper);
+ } else {
+ mappers = mappers.concat(fieldMapper);
+ }
+ tempFullName.put(fieldMapper.names().fullName(), mappers);
+ }
+ this.mappers = ImmutableList.<FieldMapper>builder().addAll(this.mappers).addAll(newMappers).build();
+ this.name = tempName.build();
+ this.indexName = tempIndexName.build();
+ this.fullName = tempFullName.build();
+ }
+
+ /**
+ * Removes the set of mappers.
+ */
+ public void removeMappers(Iterable<FieldMapper> mappersToRemove) {
+ List<FieldMapper> tempMappers = new ArrayList<FieldMapper>(this.mappers);
+ ImmutableOpenMap.Builder<String, FieldMappers> tempName = ImmutableOpenMap.builder(this.name);
+ ImmutableOpenMap.Builder<String, FieldMappers> tempIndexName = ImmutableOpenMap.builder(this.indexName);
+ ImmutableOpenMap.Builder<String, FieldMappers> tempFullName = ImmutableOpenMap.builder(this.fullName);
+
+ for (FieldMapper mapper : mappersToRemove) {
+ FieldMappers mappers = tempName.get(mapper.names().name());
+ if (mappers != null) {
+ mappers = mappers.remove(mapper);
+ if (mappers.isEmpty()) {
+ tempName.remove(mapper.names().name());
+ } else {
+ tempName.put(mapper.names().name(), mappers);
+ }
+ }
+
+ mappers = tempIndexName.get(mapper.names().indexName());
+ if (mappers != null) {
+ mappers = mappers.remove(mapper);
+ if (mappers.isEmpty()) {
+ tempIndexName.remove(mapper.names().indexName());
+ } else {
+ tempIndexName.put(mapper.names().indexName(), mappers);
+ }
+ }
+
+ mappers = tempFullName.get(mapper.names().fullName());
+ if (mappers != null) {
+ mappers = mappers.remove(mapper);
+ if (mappers.isEmpty()) {
+ tempFullName.remove(mapper.names().fullName());
+ } else {
+ tempFullName.put(mapper.names().fullName(), mappers);
+ }
+ }
+
+ tempMappers.remove(mapper);
+ }
+
+
+ this.mappers = ImmutableList.copyOf(tempMappers);
+ this.name = tempName.build();
+ this.indexName = tempIndexName.build();
+ this.fullName = tempFullName.build();
+ }
+
+ @Override
+ public UnmodifiableIterator<FieldMapper> iterator() {
+ return mappers.iterator();
+ }
+
+ /**
+ * The list of all mappers.
+ */
+ public ImmutableList<FieldMapper> mappers() {
+ return this.mappers;
+ }
+
+ /**
+ * Is there a mapper (based on unique {@link FieldMapper} identity)?
+ */
+ public boolean hasMapper(FieldMapper fieldMapper) {
+ return mappers.contains(fieldMapper);
+ }
+
+ /**
+ * Returns the field mappers based on the mapper name.
+ */
+ public FieldMappers name(String name) {
+ return this.name.get(name);
+ }
+
+ /**
+ * Returns the field mappers based on the mapper index name.
+ */
+ public FieldMappers indexName(String indexName) {
+ return this.indexName.get(indexName);
+ }
+
+ /**
+ * Returns the field mappers based on the mapper full name.
+ */
+ public FieldMappers fullName(String fullName) {
+ return this.fullName.get(fullName);
+ }
+
+ /**
+ * Returns a set of the index names of a simple match regex like pattern against full name, name and index name.
+ */
+ public Set<String> simpleMatchToIndexNames(String pattern) {
+ Set<String> fields = Sets.newHashSet();
+ for (FieldMapper fieldMapper : mappers) {
+ if (Regex.simpleMatch(pattern, fieldMapper.names().fullName())) {
+ fields.add(fieldMapper.names().indexName());
+ } else if (Regex.simpleMatch(pattern, fieldMapper.names().indexName())) {
+ fields.add(fieldMapper.names().indexName());
+ } else if (Regex.simpleMatch(pattern, fieldMapper.names().name())) {
+ fields.add(fieldMapper.names().indexName());
+ }
+ }
+ return fields;
+ }
+
+ /**
+ * Returns a set of the full names of a simple match regex like pattern against full name, name and index name.
+ */
+ public Set<String> simpleMatchToFullName(String pattern) {
+ Set<String> fields = Sets.newHashSet();
+ for (FieldMapper fieldMapper : mappers) {
+ if (Regex.simpleMatch(pattern, fieldMapper.names().fullName())) {
+ fields.add(fieldMapper.names().fullName());
+ } else if (Regex.simpleMatch(pattern, fieldMapper.names().indexName())) {
+ fields.add(fieldMapper.names().fullName());
+ } else if (Regex.simpleMatch(pattern, fieldMapper.names().name())) {
+ fields.add(fieldMapper.names().fullName());
+ }
+ }
+ return fields;
+ }
+
+ /**
+ * Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}, and last
+ * by {@link #name(String)}.
+ */
+ @Nullable
+ public FieldMappers smartName(String name) {
+ FieldMappers fieldMappers = fullName(name);
+ if (fieldMappers != null) {
+ return fieldMappers;
+ }
+ fieldMappers = indexName(name);
+ if (fieldMappers != null) {
+ return fieldMappers;
+ }
+ return name(name);
+ }
+
+ /**
+ * Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}, and last
+ * by {@link #name(String)} and return the first mapper for it (see {@link org.elasticsearch.index.mapper.FieldMappers#mapper()}).
+ */
+ @Nullable
+ public FieldMapper smartNameFieldMapper(String name) {
+ FieldMappers fieldMappers = smartName(name);
+ if (fieldMappers == null) {
+ return null;
+ }
+ return fieldMappers.mapper();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/InternalMapper.java b/src/main/java/org/elasticsearch/index/mapper/InternalMapper.java
new file mode 100644
index 0000000..e9faab7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/InternalMapper.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+/**
+ * A marker interface for internal mappings.
+ *
+ *
+ */
+public interface InternalMapper {
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/src/main/java/org/elasticsearch/index/mapper/Mapper.java
new file mode 100644
index 0000000..6b1d65d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/Mapper.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.similarity.SimilarityLookupService;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public interface Mapper extends ToXContent {
+
+ public static final Mapper[] EMPTY_ARRAY = new Mapper[0];
+
+ public static class BuilderContext {
+ private final Settings indexSettings;
+ private final ContentPath contentPath;
+
+ public BuilderContext(@Nullable Settings indexSettings, ContentPath contentPath) {
+ this.contentPath = contentPath;
+ this.indexSettings = indexSettings;
+ }
+
+ public ContentPath path() {
+ return this.contentPath;
+ }
+
+ @Nullable
+ public Settings indexSettings() {
+ return this.indexSettings;
+ }
+
+ @Nullable
+ public Version indexCreatedVersion() {
+ if (indexSettings == null) {
+ return null;
+ }
+ return indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null);
+ }
+ }
+
+ public static abstract class Builder<T extends Builder, Y extends Mapper> {
+
+ public String name;
+
+ protected T builder;
+
+ protected Builder(String name) {
+ this.name = name;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public abstract Y build(BuilderContext context);
+ }
+
+ public interface TypeParser {
+
+ public static class ParserContext {
+
+ private final PostingsFormatService postingsFormatService;
+
+ private final DocValuesFormatService docValuesFormatService;
+
+ private final AnalysisService analysisService;
+
+ private final SimilarityLookupService similarityLookupService;
+
+ private final ImmutableMap<String, TypeParser> typeParsers;
+
+ private final Version indexVersionCreated;
+
+ public ParserContext(PostingsFormatService postingsFormatService, DocValuesFormatService docValuesFormatService,
+ AnalysisService analysisService, SimilarityLookupService similarityLookupService,
+ ImmutableMap<String, TypeParser> typeParsers, Version indexVersionCreated) {
+ this.postingsFormatService = postingsFormatService;
+ this.docValuesFormatService = docValuesFormatService;
+ this.analysisService = analysisService;
+ this.similarityLookupService = similarityLookupService;
+ this.typeParsers = typeParsers;
+ this.indexVersionCreated = indexVersionCreated;
+ }
+
+ public AnalysisService analysisService() {
+ return analysisService;
+ }
+
+ public PostingsFormatService postingFormatService() {
+ return postingsFormatService;
+ }
+
+ public DocValuesFormatService docValuesFormatService() {
+ return docValuesFormatService;
+ }
+
+ public SimilarityLookupService similarityLookupService() {
+ return similarityLookupService;
+ }
+
+ public TypeParser typeParser(String type) {
+ return typeParsers.get(Strings.toUnderscoreCase(type));
+ }
+
+ public Version indexVersionCreated() {
+ return indexVersionCreated;
+ }
+ }
+
+ Mapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException;
+ }
+
+ String name();
+
+ void parse(ParseContext context) throws IOException;
+
+ void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException;
+
+ void traverse(FieldMapperListener fieldMapperListener);
+
+ void traverse(ObjectMapperListener objectMapperListener);
+
+ void close();
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperBuilders.java b/src/main/java/org/elasticsearch/index/mapper/MapperBuilders.java
new file mode 100644
index 0000000..0cf098f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/MapperBuilders.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.core.*;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
+import org.elasticsearch.index.mapper.internal.*;
+import org.elasticsearch.index.mapper.ip.IpFieldMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.mapper.object.RootObjectMapper;
+
+/**
+ *
+ */
+public final class MapperBuilders {
+
+ private MapperBuilders() {
+
+ }
+
+ public static DocumentMapper.Builder doc(String index, RootObjectMapper.Builder objectBuilder) {
+ return new DocumentMapper.Builder(index, null, objectBuilder);
+ }
+
+ public static DocumentMapper.Builder doc(String index, @Nullable Settings settings, RootObjectMapper.Builder objectBuilder) {
+ return new DocumentMapper.Builder(index, settings, objectBuilder);
+ }
+
+ public static SourceFieldMapper.Builder source() {
+ return new SourceFieldMapper.Builder();
+ }
+
+ public static IdFieldMapper.Builder id() {
+ return new IdFieldMapper.Builder();
+ }
+
+ public static RoutingFieldMapper.Builder routing() {
+ return new RoutingFieldMapper.Builder();
+ }
+
+ public static UidFieldMapper.Builder uid() {
+ return new UidFieldMapper.Builder();
+ }
+
+ public static SizeFieldMapper.Builder size() {
+ return new SizeFieldMapper.Builder();
+ }
+
+ public static VersionFieldMapper.Builder version() {
+ return new VersionFieldMapper.Builder();
+ }
+
+ public static TypeFieldMapper.Builder type() {
+ return new TypeFieldMapper.Builder();
+ }
+
+ public static IndexFieldMapper.Builder index() {
+ return new IndexFieldMapper.Builder();
+ }
+
+ public static TimestampFieldMapper.Builder timestamp() {
+ return new TimestampFieldMapper.Builder();
+ }
+
+ public static TTLFieldMapper.Builder ttl() {
+ return new TTLFieldMapper.Builder();
+ }
+
+ public static ParentFieldMapper.Builder parent() {
+ return new ParentFieldMapper.Builder();
+ }
+
+ public static BoostFieldMapper.Builder boost(String name) {
+ return new BoostFieldMapper.Builder(name);
+ }
+
+ public static AllFieldMapper.Builder all() {
+ return new AllFieldMapper.Builder();
+ }
+
+ public static AnalyzerMapper.Builder analyzer() {
+ return new AnalyzerMapper.Builder();
+ }
+
+ public static RootObjectMapper.Builder rootObject(String name) {
+ return new RootObjectMapper.Builder(name);
+ }
+
+ public static ObjectMapper.Builder object(String name) {
+ return new ObjectMapper.Builder(name);
+ }
+
+ public static BooleanFieldMapper.Builder booleanField(String name) {
+ return new BooleanFieldMapper.Builder(name);
+ }
+
+ public static StringFieldMapper.Builder stringField(String name) {
+ return new StringFieldMapper.Builder(name);
+ }
+
+ public static BinaryFieldMapper.Builder binaryField(String name) {
+ return new BinaryFieldMapper.Builder(name);
+ }
+
+ public static DateFieldMapper.Builder dateField(String name) {
+ return new DateFieldMapper.Builder(name);
+ }
+
+ public static IpFieldMapper.Builder ipField(String name) {
+ return new IpFieldMapper.Builder(name);
+ }
+
+ public static ShortFieldMapper.Builder shortField(String name) {
+ return new ShortFieldMapper.Builder(name);
+ }
+
+ public static ByteFieldMapper.Builder byteField(String name) {
+ return new ByteFieldMapper.Builder(name);
+ }
+
+ public static IntegerFieldMapper.Builder integerField(String name) {
+ return new IntegerFieldMapper.Builder(name);
+ }
+
+ public static TokenCountFieldMapper.Builder tokenCountField(String name) {
+ return new TokenCountFieldMapper.Builder(name);
+ }
+
+ public static LongFieldMapper.Builder longField(String name) {
+ return new LongFieldMapper.Builder(name);
+ }
+
+ public static FloatFieldMapper.Builder floatField(String name) {
+ return new FloatFieldMapper.Builder(name);
+ }
+
+ public static DoubleFieldMapper.Builder doubleField(String name) {
+ return new DoubleFieldMapper.Builder(name);
+ }
+
+ public static GeoPointFieldMapper.Builder geoPointField(String name) {
+ return new GeoPointFieldMapper.Builder(name);
+ }
+
+ public static GeoShapeFieldMapper.Builder geoShapeField(String name) {
+ return new GeoShapeFieldMapper.Builder(name);
+ }
+
+ public static CompletionFieldMapper.Builder completionField(String name) {
+ return new CompletionFieldMapper.Builder(name);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperCompressionException.java b/src/main/java/org/elasticsearch/index/mapper/MapperCompressionException.java
new file mode 100644
index 0000000..93034e0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/MapperCompressionException.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+/**
+ *
+ */
+public class MapperCompressionException extends MapperException {
+
+ public MapperCompressionException(String message) {
+ super(message);
+ }
+
+ public MapperCompressionException(String message, Throwable cause) {
+ super(message, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperException.java b/src/main/java/org/elasticsearch/index/mapper/MapperException.java
new file mode 100644
index 0000000..be8886d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/MapperException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class MapperException extends ElasticsearchException {
+
+ public MapperException(String message) {
+ super(message);
+ }
+
+ public MapperException(String message, Throwable cause) {
+ super(message, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java b/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java
new file mode 100644
index 0000000..9c180b8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class MapperParsingException extends MapperException {
+
+ public MapperParsingException(String message) {
+ super(message);
+ }
+
+ public MapperParsingException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java
new file mode 100755
index 0000000..97273aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java
@@ -0,0 +1,1068 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.google.common.base.Charsets;
+import com.google.common.collect.*;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.AnalyzerWrapper;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.FilterClause;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.FailedToResolveConfigException;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.similarity.SimilarityLookupService;
+import org.elasticsearch.indices.InvalidTypeNameException;
+import org.elasticsearch.indices.TypeMissingException;
+import org.elasticsearch.percolator.PercolatorService;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+
+/**
+ *
+ */
+public class MapperService extends AbstractIndexComponent implements Iterable<DocumentMapper> {
+
+ public static final String DEFAULT_MAPPING = "_default_";
+ private static ObjectOpenHashSet<String> META_FIELDS = ObjectOpenHashSet.from(
+ "_uid", "_id", "_type", "_all", "_analyzer", "_boost", "_parent", "_routing", "_index",
+ "_size", "_timestamp", "_ttl"
+ );
+
+ private final AnalysisService analysisService;
+ private final IndexFieldDataService fieldDataService;
+
+ /**
+ * Will create types automatically if they do not exists in the mapping definition yet
+ */
+ private final boolean dynamic;
+
+ private volatile String defaultMappingSource;
+ private volatile String defaultPercolatorMappingSource;
+
+ private volatile Map<String, DocumentMapper> mappers = ImmutableMap.of();
+
+ private final Object typeMutex = new Object();
+ private final Object mappersMutex = new Object();
+
+ private final FieldMappersLookup fieldMappers = new FieldMappersLookup();
+ private volatile ImmutableOpenMap<String, ObjectMappers> fullPathObjectMappers = ImmutableOpenMap.of();
+ private boolean hasNested = false; // updated dynamically to true when a nested object is added
+
+ private final DocumentMapperParser documentParser;
+
+ private final InternalFieldMapperListener fieldMapperListener = new InternalFieldMapperListener();
+ private final InternalObjectMapperListener objectMapperListener = new InternalObjectMapperListener();
+
+ private final SmartIndexNameSearchAnalyzer searchAnalyzer;
+ private final SmartIndexNameSearchQuoteAnalyzer searchQuoteAnalyzer;
+
+ private final List<DocumentTypeListener> typeListeners = new CopyOnWriteArrayList<DocumentTypeListener>();
+
+ @Inject
+ public MapperService(Index index, @IndexSettings Settings indexSettings, Environment environment, AnalysisService analysisService, IndexFieldDataService fieldDataService,
+ PostingsFormatService postingsFormatService, DocValuesFormatService docValuesFormatService, SimilarityLookupService similarityLookupService) {
+ super(index, indexSettings);
+ this.analysisService = analysisService;
+ this.fieldDataService = fieldDataService;
+ this.documentParser = new DocumentMapperParser(index, indexSettings, analysisService, postingsFormatService, docValuesFormatService, similarityLookupService);
+ this.searchAnalyzer = new SmartIndexNameSearchAnalyzer(analysisService.defaultSearchAnalyzer());
+ this.searchQuoteAnalyzer = new SmartIndexNameSearchQuoteAnalyzer(analysisService.defaultSearchQuoteAnalyzer());
+
+ this.dynamic = componentSettings.getAsBoolean("dynamic", true);
+ String defaultMappingLocation = componentSettings.get("default_mapping_location");
+ URL defaultMappingUrl;
+ if (defaultMappingLocation == null) {
+ try {
+ defaultMappingUrl = environment.resolveConfig("default-mapping.json");
+ } catch (FailedToResolveConfigException e) {
+ // not there, default to the built in one
+ defaultMappingUrl = indexSettings.getClassLoader().getResource("org/elasticsearch/index/mapper/default-mapping.json");
+ if (defaultMappingUrl == null) {
+ defaultMappingUrl = MapperService.class.getClassLoader().getResource("org/elasticsearch/index/mapper/default-mapping.json");
+ }
+ }
+ } else {
+ try {
+ defaultMappingUrl = environment.resolveConfig(defaultMappingLocation);
+ } catch (FailedToResolveConfigException e) {
+ // not there, default to the built in one
+ try {
+ defaultMappingUrl = new File(defaultMappingLocation).toURI().toURL();
+ } catch (MalformedURLException e1) {
+ throw new FailedToResolveConfigException("Failed to resolve dynamic mapping location [" + defaultMappingLocation + "]");
+ }
+ }
+ }
+
+ if (defaultMappingUrl == null) {
+ logger.info("failed to find default-mapping.json in the classpath, using the default template");
+ defaultMappingSource = "{\n" +
+ " \"_default_\":{\n" +
+ " }\n" +
+ "}";
+ } else {
+ try {
+ defaultMappingSource = Streams.copyToString(new InputStreamReader(defaultMappingUrl.openStream(), Charsets.UTF_8));
+ } catch (IOException e) {
+ throw new MapperException("Failed to load default mapping source from [" + defaultMappingLocation + "]", e);
+ }
+ }
+
+ String percolatorMappingLocation = componentSettings.get("default_percolator_mapping_location");
+ URL percolatorMappingUrl = null;
+ if (percolatorMappingLocation != null) {
+ try {
+ percolatorMappingUrl = environment.resolveConfig(percolatorMappingLocation);
+ } catch (FailedToResolveConfigException e) {
+ // not there, default to the built in one
+ try {
+ percolatorMappingUrl = new File(percolatorMappingLocation).toURI().toURL();
+ } catch (MalformedURLException e1) {
+ throw new FailedToResolveConfigException("Failed to resolve default percolator mapping location [" + percolatorMappingLocation + "]");
+ }
+ }
+ }
+ if (percolatorMappingUrl != null) {
+ try {
+ defaultPercolatorMappingSource = Streams.copyToString(new InputStreamReader(percolatorMappingUrl.openStream(), Charsets.UTF_8));
+ } catch (IOException e) {
+ throw new MapperException("Failed to load default percolator mapping source from [" + percolatorMappingUrl + "]", e);
+ }
+ } else {
+ defaultPercolatorMappingSource = "{\n" +
+ //" \"" + PercolatorService.TYPE_NAME + "\":{\n" +
+ " \"" + "_default_" + "\":{\n" +
+ " \"_id\" : {\"index\": \"not_analyzed\"}," +
+ " \"properties\" : {\n" +
+ " \"query\" : {\n" +
+ " \"type\" : \"object\",\n" +
+ " \"enabled\" : false\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
+ }
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("using dynamic[{}], default mapping: default_mapping_location[{}], loaded_from[{}] and source[{}], default percolator mapping: location[{}], loaded_from[{}] and source[{}]", dynamic, defaultMappingLocation, defaultMappingUrl, defaultMappingSource, percolatorMappingLocation, percolatorMappingUrl, defaultPercolatorMappingSource);
+ } else if (logger.isDebugEnabled()) {
+ logger.debug("using dynamic[{}], default mapping: default_mapping_location[{}], loaded_from[{}], default percolator mapping: location[{}], loaded_from[{}]", dynamic, defaultMappingLocation, defaultMappingUrl, percolatorMappingLocation, percolatorMappingUrl);
+ }
+ }
+
+ public void close() {
+ for (DocumentMapper documentMapper : mappers.values()) {
+ documentMapper.close();
+ }
+ }
+
+ public boolean hasNested() {
+ return this.hasNested;
+ }
+
+ @Override
+ public UnmodifiableIterator<DocumentMapper> iterator() {
+ return Iterators.unmodifiableIterator(mappers.values().iterator());
+ }
+
+ public AnalysisService analysisService() {
+ return this.analysisService;
+ }
+
+ public DocumentMapperParser documentMapperParser() {
+ return this.documentParser;
+ }
+
+ public void addTypeListener(DocumentTypeListener listener) {
+ typeListeners.add(listener);
+ }
+
+ public void removeTypeListener(DocumentTypeListener listener) {
+ typeListeners.remove(listener);
+ }
+
+ public DocumentMapper merge(String type, CompressedString mappingSource, boolean applyDefault) {
+ if (DEFAULT_MAPPING.equals(type)) {
+ // verify we can parse it
+ DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource);
+ // still add it as a document mapper so we have it registered and, for example, persisted back into
+ // the cluster meta data if needed, or checked for existence
+ synchronized (typeMutex) {
+ mappers = newMapBuilder(mappers).put(type, mapper).map();
+ }
+ try {
+ defaultMappingSource = mappingSource.string();
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("failed to un-compress", e);
+ }
+ return mapper;
+ } else {
+ return merge(parse(type, mappingSource, applyDefault));
+ }
+ }
+
+ // never expose this to the outside world, we need to reparse the doc mapper so we get fresh
+ // instances of field mappers to properly remove existing doc mapper
+ private DocumentMapper merge(DocumentMapper mapper) {
+ synchronized (typeMutex) {
+ if (mapper.type().length() == 0) {
+ throw new InvalidTypeNameException("mapping type name is empty");
+ }
+ if (mapper.type().charAt(0) == '_') {
+ throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
+ }
+ if (mapper.type().contains("#")) {
+ throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
+ }
+ if (mapper.type().contains(",")) {
+ throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
+ }
+ if (mapper.type().contains(".") && !PercolatorService.TYPE_NAME.equals(mapper.type())) {
+ logger.warn("Type [{}] contains a '.', it is recommended not to include it within a type name", mapper.type());
+ }
+ // we can add new field/object mappers while the old ones are there
+ // since we get new instances of those, and when we remove, we remove
+ // by instance equality
+ DocumentMapper oldMapper = mappers.get(mapper.type());
+
+ if (oldMapper != null) {
+ DocumentMapper.MergeResult result = oldMapper.merge(mapper, mergeFlags().simulate(false));
+ if (result.hasConflicts()) {
+ // TODO: What should we do???
+ if (logger.isDebugEnabled()) {
+ logger.debug("merging mapping for type [{}] resulted in conflicts: [{}]", mapper.type(), Arrays.toString(result.conflicts()));
+ }
+ }
+ fieldDataService.onMappingUpdate();
+ return oldMapper;
+ } else {
+ FieldMapperListener.Aggregator fieldMappersAgg = new FieldMapperListener.Aggregator();
+ mapper.traverse(fieldMappersAgg);
+ addFieldMappers(fieldMappersAgg.mappers);
+ mapper.addFieldMapperListener(fieldMapperListener, false);
+
+ ObjectMapperListener.Aggregator objectMappersAgg = new ObjectMapperListener.Aggregator();
+ mapper.traverse(objectMappersAgg);
+ addObjectMappers(objectMappersAgg.mappers.toArray(new ObjectMapper[objectMappersAgg.mappers.size()]));
+ mapper.addObjectMapperListener(objectMapperListener, false);
+
+ for (DocumentTypeListener typeListener : typeListeners) {
+ typeListener.beforeCreate(mapper);
+ }
+ mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();
+ return mapper;
+ }
+ }
+ }
+
+ private void addObjectMappers(ObjectMapper[] objectMappers) {
+ synchronized (mappersMutex) {
+ ImmutableOpenMap.Builder<String, ObjectMappers> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers);
+ for (ObjectMapper objectMapper : objectMappers) {
+ ObjectMappers mappers = fullPathObjectMappers.get(objectMapper.fullPath());
+ if (mappers == null) {
+ mappers = new ObjectMappers(objectMapper);
+ } else {
+ mappers = mappers.concat(objectMapper);
+ }
+ fullPathObjectMappers.put(objectMapper.fullPath(), mappers);
+ // update the hasNested flag
+ if (objectMapper.nested().isNested()) {
+ hasNested = true;
+ }
+ }
+ this.fullPathObjectMappers = fullPathObjectMappers.build();
+ }
+ }
+
+ private void addFieldMappers(Iterable<FieldMapper> fieldMappers) {
+ synchronized (mappersMutex) {
+ this.fieldMappers.addNewMappers(fieldMappers);
+ }
+ }
+
+ public void remove(String type) {
+ synchronized (typeMutex) {
+ DocumentMapper docMapper = mappers.get(type);
+ if (docMapper == null) {
+ return;
+ }
+ docMapper.close();
+ mappers = newMapBuilder(mappers).remove(type).map();
+ removeObjectAndFieldMappers(docMapper);
+ for (DocumentTypeListener typeListener : typeListeners) {
+ typeListener.afterRemove(docMapper);
+ }
+ }
+ }
+
+ private void removeObjectAndFieldMappers(DocumentMapper docMapper) {
+ synchronized (mappersMutex) {
+ fieldMappers.removeMappers(docMapper.mappers());
+
+ ImmutableOpenMap.Builder<String, ObjectMappers> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers);
+ for (ObjectMapper mapper : docMapper.objectMappers().values()) {
+ ObjectMappers mappers = fullPathObjectMappers.get(mapper.fullPath());
+ if (mappers != null) {
+ mappers = mappers.remove(mapper);
+ if (mappers.isEmpty()) {
+ fullPathObjectMappers.remove(mapper.fullPath());
+ } else {
+ fullPathObjectMappers.put(mapper.fullPath(), mappers);
+ }
+ }
+ }
+
+ this.fullPathObjectMappers = fullPathObjectMappers.build();
+ }
+ }
+
+ /**
+ * Just parses and returns the mapper without adding it, while still applying default mapping.
+ */
+ public DocumentMapper parse(String mappingType, CompressedString mappingSource) throws MapperParsingException {
+ return parse(mappingType, mappingSource, true);
+ }
+
+ public DocumentMapper parse(String mappingType, CompressedString mappingSource, boolean applyDefault) throws MapperParsingException {
+ String defaultMappingSource;
+ if (PercolatorService.TYPE_NAME.equals(mappingType)) {
+ defaultMappingSource = this.defaultPercolatorMappingSource;
+ } else {
+ defaultMappingSource = this.defaultMappingSource;
+ }
+ return documentParser.parseCompressed(mappingType, mappingSource, applyDefault ? defaultMappingSource : null);
+ }
+
+ public boolean hasMapping(String mappingType) {
+ return mappers.containsKey(mappingType);
+ }
+
+ public Collection<String> types() {
+ return mappers.keySet();
+ }
+
+ public DocumentMapper documentMapper(String type) {
+ return mappers.get(type);
+ }
+
+ public DocumentMapper documentMapperWithAutoCreate(String type) {
+ DocumentMapper mapper = mappers.get(type);
+ if (mapper != null) {
+ return mapper;
+ }
+ if (!dynamic) {
+ throw new TypeMissingException(index, type, "trying to auto create mapping, but dynamic mapping is disabled");
+ }
+ // go ahead and dynamically create it
+ synchronized (typeMutex) {
+ mapper = mappers.get(type);
+ if (mapper != null) {
+ return mapper;
+ }
+ merge(type, null, true);
+ return mappers.get(type);
+ }
+ }
+
+ /**
+ * A filter for search. If a filter is required, will return it, otherwise, will return <tt>null</tt>.
+ */
+ @Nullable
+ public Filter searchFilter(String... types) {
+ boolean filterPercolateType = hasMapping(PercolatorService.TYPE_NAME);
+ if (types != null && filterPercolateType) {
+ for (String type : types) {
+ if (PercolatorService.TYPE_NAME.equals(type)) {
+ filterPercolateType = false;
+ break;
+ }
+ }
+ }
+ Filter excludePercolatorType = null;
+ if (filterPercolateType) {
+ excludePercolatorType = new NotFilter(documentMapper(PercolatorService.TYPE_NAME).typeFilter());
+ }
+
+ if (types == null || types.length == 0) {
+ if (hasNested && filterPercolateType) {
+ return new AndFilter(ImmutableList.of(excludePercolatorType, NonNestedDocsFilter.INSTANCE));
+ } else if (hasNested) {
+ return NonNestedDocsFilter.INSTANCE;
+ } else if (filterPercolateType) {
+ return excludePercolatorType;
+ } else {
+ return null;
+ }
+ }
+ // if we filter by types, we don't need to filter by non nested docs
+ // since they have different types (starting with __)
+ if (types.length == 1) {
+ DocumentMapper docMapper = documentMapper(types[0]);
+ if (docMapper == null) {
+ return new TermFilter(new Term(types[0]));
+ }
+ return docMapper.typeFilter();
+ }
+ // see if we can use terms filter
+ boolean useTermsFilter = true;
+ for (String type : types) {
+ DocumentMapper docMapper = documentMapper(type);
+ if (docMapper == null) {
+ useTermsFilter = false;
+ break;
+ }
+ if (!docMapper.typeMapper().fieldType().indexed()) {
+ useTermsFilter = false;
+ break;
+ }
+ }
+
+ if (useTermsFilter) {
+ BytesRef[] typesBytes = new BytesRef[types.length];
+ for (int i = 0; i < typesBytes.length; i++) {
+ typesBytes[i] = new BytesRef(types[i]);
+ }
+ XTermsFilter termsFilter = new XTermsFilter(TypeFieldMapper.NAME, typesBytes);
+ if (filterPercolateType) {
+ return new AndFilter(ImmutableList.of(excludePercolatorType, termsFilter));
+ } else {
+ return termsFilter;
+ }
+ } else {
+ // Current bool filter requires that at least one should clause matches, even with a must clause.
+ XBooleanFilter bool = new XBooleanFilter();
+ for (String type : types) {
+ DocumentMapper docMapper = documentMapper(type);
+ if (docMapper == null) {
+ bool.add(new FilterClause(new TermFilter(new Term(TypeFieldMapper.NAME, type)), BooleanClause.Occur.SHOULD));
+ } else {
+ bool.add(new FilterClause(docMapper.typeFilter(), BooleanClause.Occur.SHOULD));
+ }
+ }
+ if (filterPercolateType) {
+ bool.add(excludePercolatorType, BooleanClause.Occur.MUST);
+ }
+
+ return bool;
+ }
+ }
+
+ /**
+ * Returns {@link FieldMappers} for all the {@link FieldMapper}s that are registered
+ * under the given name across all the different {@link DocumentMapper} types.
+ *
+ * @param name The name to return all the {@link FieldMappers} for across all {@link DocumentMapper}s.
+ * @return All the {@link FieldMappers} for across all {@link DocumentMapper}s
+ */
+ public FieldMappers name(String name) {
+ return fieldMappers.name(name);
+ }
+
+ /**
+ * Returns {@link FieldMappers} for all the {@link FieldMapper}s that are registered
+ * under the given indexName across all the different {@link DocumentMapper} types.
+ *
+ * @param indexName The indexName to return all the {@link FieldMappers} for across all {@link DocumentMapper}s.
+ * @return All the {@link FieldMappers} across all {@link DocumentMapper}s for the given indexName.
+ */
+ public FieldMappers indexName(String indexName) {
+ return fieldMappers.indexName(indexName);
+ }
+
+ /**
+ * Returns the {@link FieldMappers} of all the {@link FieldMapper}s that are
+ * registered under the give fullName across all the different {@link DocumentMapper} types.
+ *
+ * @param fullName The full name
+ * @return All teh {@link FieldMappers} across all the {@link DocumentMapper}s for the given fullName.
+ */
+ public FieldMappers fullName(String fullName) {
+ return fieldMappers.fullName(fullName);
+ }
+
+ /**
+ * Returns objects mappers based on the full path of the object.
+ */
+ public ObjectMappers objectMapper(String path) {
+ return fullPathObjectMappers.get(path);
+ }
+
+ /**
+ * Returns all the fields that match the given pattern, with an optional narrowing
+ * based on a list of types.
+ */
+ public Set<String> simpleMatchToIndexNames(String pattern, @Nullable String[] types) {
+ if (types == null || types.length == 0) {
+ return simpleMatchToIndexNames(pattern);
+ }
+ if (types.length == 1 && types[0].equals("_all")) {
+ return simpleMatchToIndexNames(pattern);
+ }
+ if (!Regex.isSimpleMatchPattern(pattern)) {
+ return ImmutableSet.of(pattern);
+ }
+ Set<String> fields = Sets.newHashSet();
+ for (String type : types) {
+ DocumentMapper possibleDocMapper = mappers.get(type);
+ if (possibleDocMapper != null) {
+ for (String indexName : possibleDocMapper.mappers().simpleMatchToIndexNames(pattern)) {
+ fields.add(indexName);
+ }
+ }
+ }
+ return fields;
+ }
+
+ /**
+ * Returns all the fields that match the given pattern. If the pattern is prefixed with a type
+ * then the fields will be returned with a type prefix.
+ */
+ public Set<String> simpleMatchToIndexNames(String pattern) {
+ if (!Regex.isSimpleMatchPattern(pattern)) {
+ return ImmutableSet.of(pattern);
+ }
+ int dotIndex = pattern.indexOf('.');
+ if (dotIndex != -1) {
+ String possibleType = pattern.substring(0, dotIndex);
+ DocumentMapper possibleDocMapper = mappers.get(possibleType);
+ if (possibleDocMapper != null) {
+ Set<String> typedFields = Sets.newHashSet();
+ for (String indexName : possibleDocMapper.mappers().simpleMatchToIndexNames(pattern)) {
+ typedFields.add(possibleType + "." + indexName);
+ }
+ return typedFields;
+ }
+ }
+ return fieldMappers.simpleMatchToIndexNames(pattern);
+ }
+
+ public SmartNameObjectMapper smartNameObjectMapper(String smartName, @Nullable String[] types) {
+ if (types == null || types.length == 0) {
+ return smartNameObjectMapper(smartName);
+ }
+ if (types.length == 1 && types[0].equals("_all")) {
+ return smartNameObjectMapper(smartName);
+ }
+ for (String type : types) {
+ DocumentMapper possibleDocMapper = mappers.get(type);
+ if (possibleDocMapper != null) {
+ ObjectMapper mapper = possibleDocMapper.objectMappers().get(smartName);
+ if (mapper != null) {
+ return new SmartNameObjectMapper(mapper, possibleDocMapper);
+ }
+ }
+ }
+ // did not find one, see if its prefixed by type
+ int dotIndex = smartName.indexOf('.');
+ if (dotIndex != -1) {
+ String possibleType = smartName.substring(0, dotIndex);
+ DocumentMapper possibleDocMapper = mappers.get(possibleType);
+ if (possibleDocMapper != null) {
+ String possiblePath = smartName.substring(dotIndex + 1);
+ ObjectMapper mapper = possibleDocMapper.objectMappers().get(possiblePath);
+ if (mapper != null) {
+ return new SmartNameObjectMapper(mapper, possibleDocMapper);
+ }
+ }
+ }
+ // did not explicitly find one under the types provided, or prefixed by type...
+ return null;
+ }
+
+ public SmartNameObjectMapper smartNameObjectMapper(String smartName) {
+ int dotIndex = smartName.indexOf('.');
+ if (dotIndex != -1) {
+ String possibleType = smartName.substring(0, dotIndex);
+ DocumentMapper possibleDocMapper = mappers.get(possibleType);
+ if (possibleDocMapper != null) {
+ String possiblePath = smartName.substring(dotIndex + 1);
+ ObjectMapper mapper = possibleDocMapper.objectMappers().get(possiblePath);
+ if (mapper != null) {
+ return new SmartNameObjectMapper(mapper, possibleDocMapper);
+ }
+ }
+ }
+ ObjectMappers mappers = objectMapper(smartName);
+ if (mappers != null) {
+ return new SmartNameObjectMapper(mappers.mapper(), null);
+ }
+ return null;
+ }
+
+ /**
+ * Same as {@link #smartNameFieldMappers(String)} but returns the first field mapper for it. Returns
+ * <tt>null</tt> if there is none.
+ */
+ public FieldMapper smartNameFieldMapper(String smartName) {
+ FieldMappers fieldMappers = smartNameFieldMappers(smartName);
+ if (fieldMappers != null) {
+ return fieldMappers.mapper();
+ }
+ return null;
+ }
+
+ public FieldMapper smartNameFieldMapper(String smartName, @Nullable String[] types) {
+ FieldMappers fieldMappers = smartNameFieldMappers(smartName, types);
+ if (fieldMappers != null) {
+ return fieldMappers.mapper();
+ }
+ return null;
+ }
+
+ public FieldMappers smartNameFieldMappers(String smartName, @Nullable String[] types) {
+ if (types == null || types.length == 0) {
+ return smartNameFieldMappers(smartName);
+ }
+ for (String type : types) {
+ DocumentMapper documentMapper = mappers.get(type);
+ // we found a mapper
+ if (documentMapper != null) {
+ // see if we find a field for it
+ FieldMappers mappers = documentMapper.mappers().smartName(smartName);
+ if (mappers != null) {
+ return mappers;
+ }
+ }
+ }
+ // did not find explicit field in the type provided, see if its prefixed with type
+ int dotIndex = smartName.indexOf('.');
+ if (dotIndex != -1) {
+ String possibleType = smartName.substring(0, dotIndex);
+ DocumentMapper possibleDocMapper = mappers.get(possibleType);
+ if (possibleDocMapper != null) {
+ String possibleName = smartName.substring(dotIndex + 1);
+ FieldMappers mappers = possibleDocMapper.mappers().smartName(possibleName);
+ if (mappers != null) {
+ return mappers;
+ }
+ }
+ }
+ // we did not find the field mapping in any of the types, so don't go and try to find
+ // it in other types...
+ return null;
+ }
+
+ /**
+ * Same as {@link #smartName(String)}, except it returns just the field mappers.
+ */
+ public FieldMappers smartNameFieldMappers(String smartName) {
+ int dotIndex = smartName.indexOf('.');
+ if (dotIndex != -1) {
+ String possibleType = smartName.substring(0, dotIndex);
+ DocumentMapper possibleDocMapper = mappers.get(possibleType);
+ if (possibleDocMapper != null) {
+ String possibleName = smartName.substring(dotIndex + 1);
+ FieldMappers mappers = possibleDocMapper.mappers().smartName(possibleName);
+ if (mappers != null) {
+ return mappers;
+ }
+ }
+ }
+ FieldMappers mappers = fullName(smartName);
+ if (mappers != null) {
+ return mappers;
+ }
+ mappers = indexName(smartName);
+ if (mappers != null) {
+ return mappers;
+ }
+ return name(smartName);
+ }
+
+ public SmartNameFieldMappers smartName(String smartName, @Nullable String[] types) {
+ if (types == null || types.length == 0) {
+ return smartName(smartName);
+ }
+ if (types.length == 1 && types[0].equals("_all")) {
+ return smartName(smartName);
+ }
+ for (String type : types) {
+ DocumentMapper documentMapper = mappers.get(type);
+ // we found a mapper
+ if (documentMapper != null) {
+ // see if we find a field for it
+ FieldMappers mappers = documentMapper.mappers().smartName(smartName);
+ if (mappers != null) {
+ return new SmartNameFieldMappers(this, mappers, documentMapper, false);
+ }
+ }
+ }
+ // did not find explicit field in the type provided, see if its prefixed with type
+ int dotIndex = smartName.indexOf('.');
+ if (dotIndex != -1) {
+ String possibleType = smartName.substring(0, dotIndex);
+ DocumentMapper possibleDocMapper = mappers.get(possibleType);
+ if (possibleDocMapper != null) {
+ String possibleName = smartName.substring(dotIndex + 1);
+ FieldMappers mappers = possibleDocMapper.mappers().smartName(possibleName);
+ if (mappers != null) {
+ return new SmartNameFieldMappers(this, mappers, possibleDocMapper, true);
+ }
+ }
+ }
+ // we did not find the field mapping in any of the types, so don't go and try to find
+ // it in other types...
+ return null;
+ }
+
+ /**
+ * Returns smart field mappers based on a smart name. A smart name is one that can optioannly be prefixed
+ * with a type (and then a '.'). If it is, then the {@link MapperService.SmartNameFieldMappers}
+ * will have the doc mapper set.
+ * <p/>
+ * <p>It also (without the optional type prefix) try and find the {@link FieldMappers} for the specific
+ * name. It will first try to find it based on the full name (with the dots if its a compound name). If
+ * it is not found, will try and find it based on the indexName (which can be controlled in the mapping),
+ * and last, will try it based no the name itself.
+ * <p/>
+ * <p>If nothing is found, returns null.
+ */
+ public SmartNameFieldMappers smartName(String smartName) {
+ int dotIndex = smartName.indexOf('.');
+ if (dotIndex != -1) {
+ String possibleType = smartName.substring(0, dotIndex);
+ DocumentMapper possibleDocMapper = mappers.get(possibleType);
+ if (possibleDocMapper != null) {
+ String possibleName = smartName.substring(dotIndex + 1);
+ FieldMappers mappers = possibleDocMapper.mappers().smartName(possibleName);
+ if (mappers != null) {
+ return new SmartNameFieldMappers(this, mappers, possibleDocMapper, true);
+ }
+ }
+ }
+ FieldMappers fieldMappers = fullName(smartName);
+ if (fieldMappers != null) {
+ return new SmartNameFieldMappers(this, fieldMappers, null, false);
+ }
+ fieldMappers = indexName(smartName);
+ if (fieldMappers != null) {
+ return new SmartNameFieldMappers(this, fieldMappers, null, false);
+ }
+ fieldMappers = name(smartName);
+ if (fieldMappers != null) {
+ return new SmartNameFieldMappers(this, fieldMappers, null, false);
+ }
+ return null;
+ }
+
+ public Analyzer searchAnalyzer() {
+ return this.searchAnalyzer;
+ }
+
+ public Analyzer searchQuoteAnalyzer() {
+ return this.searchQuoteAnalyzer;
+ }
+
+ public Analyzer fieldSearchAnalyzer(String field) {
+ return this.searchAnalyzer.getWrappedAnalyzer(field);
+ }
+
+ public Analyzer fieldSearchQuoteAnalyzer(String field) {
+ return this.searchQuoteAnalyzer.getWrappedAnalyzer(field);
+ }
+
+
+ /**
+ * Resolves the closest inherited {@link ObjectMapper} that is nested.
+ */
+ public ObjectMapper resolveClosestNestedObjectMapper(String fieldName) {
+ int indexOf = fieldName.lastIndexOf('.');
+ if (indexOf == -1) {
+ return null;
+ } else {
+ do {
+ String objectPath = fieldName.substring(0, indexOf);
+ ObjectMappers objectMappers = objectMapper(objectPath);
+ if (objectMappers == null) {
+ return null;
+ }
+
+ if (objectMappers.hasNested()) {
+ for (ObjectMapper objectMapper : objectMappers) {
+ if (objectMapper.nested().isNested()) {
+ return objectMapper;
+ }
+ }
+ }
+
+ indexOf = objectPath.lastIndexOf('.');
+ } while (indexOf != -1);
+ }
+
+ return null;
+ }
+
+ /**
+ * @return Whether a field is a metadata field.
+ */
+ public static boolean isMetadataField(String fieldName) {
+ return META_FIELDS.contains(fieldName);
+ }
+
+ public static class SmartNameObjectMapper {
+ private final ObjectMapper mapper;
+ private final DocumentMapper docMapper;
+
+ public SmartNameObjectMapper(ObjectMapper mapper, @Nullable DocumentMapper docMapper) {
+ this.mapper = mapper;
+ this.docMapper = docMapper;
+ }
+
+ public boolean hasMapper() {
+ return mapper != null;
+ }
+
+ public ObjectMapper mapper() {
+ return mapper;
+ }
+
+ public boolean hasDocMapper() {
+ return docMapper != null;
+ }
+
+ public DocumentMapper docMapper() {
+ return docMapper;
+ }
+ }
+
+ public static class SmartNameFieldMappers {
+ private final MapperService mapperService;
+ private final FieldMappers fieldMappers;
+ private final DocumentMapper docMapper;
+ private final boolean explicitTypeInName;
+
+ public SmartNameFieldMappers(MapperService mapperService, FieldMappers fieldMappers, @Nullable DocumentMapper docMapper, boolean explicitTypeInName) {
+ this.mapperService = mapperService;
+ this.fieldMappers = fieldMappers;
+ this.docMapper = docMapper;
+ this.explicitTypeInName = explicitTypeInName;
+ }
+
+ /**
+ * Has at least one mapper for the field.
+ */
+ public boolean hasMapper() {
+ return !fieldMappers.isEmpty();
+ }
+
+ /**
+ * The first mapper for the smart named field.
+ */
+ public FieldMapper mapper() {
+ return fieldMappers.mapper();
+ }
+
+ /**
+ * All the field mappers for the smart name field.
+ */
+ public FieldMappers fieldMappers() {
+ return fieldMappers;
+ }
+
+ /**
+ * If the smart name was a typed field, with a type that we resolved, will return
+ * <tt>true</tt>.
+ */
+ public boolean hasDocMapper() {
+ return docMapper != null;
+ }
+
+ /**
+ * If the smart name was a typed field, with a type that we resolved, will return
+ * the document mapper for it.
+ */
+ public DocumentMapper docMapper() {
+ return docMapper;
+ }
+
+ /**
+ * Returns <tt>true</tt> if the type is explicitly specified in the name.
+ */
+ public boolean explicitTypeInName() {
+ return this.explicitTypeInName;
+ }
+
+ public boolean explicitTypeInNameWithDocMapper() {
+ return explicitTypeInName && docMapper != null;
+ }
+
+ /**
+ * The best effort search analyzer associated with this field.
+ */
+ public Analyzer searchAnalyzer() {
+ if (hasMapper()) {
+ Analyzer analyzer = mapper().searchAnalyzer();
+ if (analyzer != null) {
+ return analyzer;
+ }
+ }
+ if (docMapper != null && docMapper.searchAnalyzer() != null) {
+ return docMapper.searchAnalyzer();
+ }
+ return mapperService.searchAnalyzer();
+ }
+
+ public Analyzer searchQuoteAnalyzer() {
+ if (hasMapper()) {
+ Analyzer analyzer = mapper().searchQuoteAnalyzer();
+ if (analyzer != null) {
+ return analyzer;
+ }
+ }
+ if (docMapper != null && docMapper.searchQuotedAnalyzer() != null) {
+ return docMapper.searchQuotedAnalyzer();
+ }
+ return mapperService.searchQuoteAnalyzer();
+ }
+ }
+
+ final class SmartIndexNameSearchAnalyzer extends AnalyzerWrapper {
+
+ private final Analyzer defaultAnalyzer;
+
+ SmartIndexNameSearchAnalyzer(Analyzer defaultAnalyzer) {
+ this.defaultAnalyzer = defaultAnalyzer;
+ }
+
+ @Override
+ protected Analyzer getWrappedAnalyzer(String fieldName) {
+ int dotIndex = fieldName.indexOf('.');
+ if (dotIndex != -1) {
+ String possibleType = fieldName.substring(0, dotIndex);
+ DocumentMapper possibleDocMapper = mappers.get(possibleType);
+ if (possibleDocMapper != null) {
+ return possibleDocMapper.mappers().searchAnalyzer();
+ }
+ }
+ FieldMappers mappers = fieldMappers.fullName(fieldName);
+ if (mappers != null && mappers.mapper() != null && mappers.mapper().searchAnalyzer() != null) {
+ return mappers.mapper().searchAnalyzer();
+ }
+
+ mappers = fieldMappers.indexName(fieldName);
+ if (mappers != null && mappers.mapper() != null && mappers.mapper().searchAnalyzer() != null) {
+ return mappers.mapper().searchAnalyzer();
+ }
+ return defaultAnalyzer;
+ }
+
+ @Override
+ protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
+ return components;
+ }
+ }
+
+ final class SmartIndexNameSearchQuoteAnalyzer extends AnalyzerWrapper {
+
+ private final Analyzer defaultAnalyzer;
+
+ SmartIndexNameSearchQuoteAnalyzer(Analyzer defaultAnalyzer) {
+ this.defaultAnalyzer = defaultAnalyzer;
+ }
+
+ @Override
+ protected Analyzer getWrappedAnalyzer(String fieldName) {
+ int dotIndex = fieldName.indexOf('.');
+ if (dotIndex != -1) {
+ String possibleType = fieldName.substring(0, dotIndex);
+ DocumentMapper possibleDocMapper = mappers.get(possibleType);
+ if (possibleDocMapper != null) {
+ return possibleDocMapper.mappers().searchQuoteAnalyzer();
+ }
+ }
+ FieldMappers mappers = fieldMappers.fullName(fieldName);
+ if (mappers != null && mappers.mapper() != null && mappers.mapper().searchQuoteAnalyzer() != null) {
+ return mappers.mapper().searchQuoteAnalyzer();
+ }
+
+ mappers = fieldMappers.indexName(fieldName);
+ if (mappers != null && mappers.mapper() != null && mappers.mapper().searchQuoteAnalyzer() != null) {
+ return mappers.mapper().searchQuoteAnalyzer();
+ }
+ return defaultAnalyzer;
+ }
+
+ @Override
+ protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
+ return components;
+ }
+ }
+
+ class InternalFieldMapperListener extends FieldMapperListener {
+ @Override
+ public void fieldMapper(FieldMapper fieldMapper) {
+ addFieldMappers(Arrays.asList(fieldMapper));
+ }
+
+ @Override
+ public void fieldMappers(Iterable<FieldMapper> fieldMappers) {
+ addFieldMappers(fieldMappers);
+ }
+ }
+
+ class InternalObjectMapperListener extends ObjectMapperListener {
+ @Override
+ public void objectMapper(ObjectMapper objectMapper) {
+ addObjectMappers(new ObjectMapper[]{objectMapper});
+ }
+
+ @Override
+ public void objectMappers(ObjectMapper... objectMappers) {
+ addObjectMappers(objectMappers);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperServiceModule.java b/src/main/java/org/elasticsearch/index/mapper/MapperServiceModule.java
new file mode 100644
index 0000000..e742992
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/MapperServiceModule.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class MapperServiceModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(MapperService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/MergeContext.java b/src/main/java/org/elasticsearch/index/mapper/MergeContext.java
new file mode 100644
index 0000000..4c250c2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/MergeContext.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class MergeContext {
+
+ private final DocumentMapper documentMapper;
+ private final DocumentMapper.MergeFlags mergeFlags;
+ private final List<String> mergeConflicts = Lists.newArrayList();
+
+ public MergeContext(DocumentMapper documentMapper, DocumentMapper.MergeFlags mergeFlags) {
+ this.documentMapper = documentMapper;
+ this.mergeFlags = mergeFlags;
+ }
+
+ public DocumentMapper docMapper() {
+ return documentMapper;
+ }
+
+ public DocumentMapper.MergeFlags mergeFlags() {
+ return mergeFlags;
+ }
+
+ public void addConflict(String mergeFailure) {
+ mergeConflicts.add(mergeFailure);
+ }
+
+ public boolean hasConflicts() {
+ return !mergeConflicts.isEmpty();
+ }
+
+ public String[] buildConflicts() {
+ return mergeConflicts.toArray(new String[mergeConflicts.size()]);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/MergeMappingException.java b/src/main/java/org/elasticsearch/index/mapper/MergeMappingException.java
new file mode 100644
index 0000000..387c92c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/MergeMappingException.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.rest.RestStatus;
+
+import java.util.Arrays;
+
+/**
+ *
+ */
+public class MergeMappingException extends MapperException {
+
+ private final String[] failures;
+
+ public MergeMappingException(String[] failures) {
+ super("Merge failed with failures {" + Arrays.toString(failures) + "}");
+ this.failures = failures;
+ }
+
+ public String[] failures() {
+ return failures;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/ObjectMapperListener.java b/src/main/java/org/elasticsearch/index/mapper/ObjectMapperListener.java
new file mode 100644
index 0000000..657a389
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/ObjectMapperListener.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public abstract class ObjectMapperListener {
+
+ public static class Aggregator extends ObjectMapperListener {
+ public final List<ObjectMapper> mappers = new ArrayList<ObjectMapper>();
+
+ @Override
+ public void objectMapper(ObjectMapper objectMapper) {
+ mappers.add(objectMapper);
+ }
+ }
+
+ public abstract void objectMapper(ObjectMapper objectMapper);
+
+ public void objectMappers(ObjectMapper... objectMappers) {
+ for (ObjectMapper objectMapper : objectMappers) {
+ objectMapper(objectMapper);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java b/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java
new file mode 100644
index 0000000..a6a41f6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.UnmodifiableIterator;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+
+/**
+ * A holder for several {@link org.elasticsearch.index.mapper.object.ObjectMapper}.
+ */
+public class ObjectMappers implements Iterable<ObjectMapper> {
+
+ private final ImmutableList<ObjectMapper> objectMappers;
+ private final boolean hasNested;
+
+ public ObjectMappers() {
+ this(ImmutableList.<ObjectMapper>of());
+ }
+
+ public ObjectMappers(ObjectMapper objectMapper) {
+ this(new ObjectMapper[]{objectMapper});
+ }
+
+ public ObjectMappers(ObjectMapper[] objectMappers) {
+ this(ImmutableList.copyOf(objectMappers));
+ }
+
+ public ObjectMappers(ImmutableList<ObjectMapper> objectMappers) {
+ this.objectMappers = objectMappers;
+ boolean hasNested = false;
+ for (ObjectMapper objectMapper : objectMappers) {
+ if (objectMapper.nested().isNested()) {
+ hasNested = true;
+ break;
+ }
+ }
+ this.hasNested = hasNested;
+ }
+
+ /**
+ * Is one of the object mappers has a nested mapping set?
+ */
+ public boolean hasNested() {
+ return this.hasNested;
+ }
+
+ public ObjectMapper mapper() {
+ if (objectMappers.isEmpty()) {
+ return null;
+ }
+ return objectMappers.get(0);
+ }
+
+ public boolean isEmpty() {
+ return objectMappers.isEmpty();
+ }
+
+ public ImmutableList<ObjectMapper> mappers() {
+ return this.objectMappers;
+ }
+
+ @Override
+ public UnmodifiableIterator<ObjectMapper> iterator() {
+ return objectMappers.iterator();
+ }
+
+ /**
+ * Concats and returns a new {@link org.elasticsearch.index.mapper.ObjectMappers}.
+ */
+ public ObjectMappers concat(ObjectMapper mapper) {
+ return new ObjectMappers(new ImmutableList.Builder<ObjectMapper>().addAll(objectMappers).add(mapper).build());
+ }
+
+ /**
+ * Concats and returns a new {@link org.elasticsearch.index.mapper.ObjectMappers}.
+ */
+ public ObjectMappers concat(ObjectMappers mappers) {
+ return new ObjectMappers(new ImmutableList.Builder<ObjectMapper>().addAll(objectMappers).addAll(mappers).build());
+ }
+
+ public ObjectMappers remove(Iterable<ObjectMapper> mappers) {
+ ImmutableList.Builder<ObjectMapper> builder = new ImmutableList.Builder<ObjectMapper>();
+ for (ObjectMapper objectMapper : objectMappers) {
+ boolean found = false;
+ for (ObjectMapper mapper : mappers) {
+ if (objectMapper == mapper) { // identify equality
+ found = true;
+ }
+ }
+ if (!found) {
+ builder.add(objectMapper);
+ }
+ }
+ return new ObjectMappers(builder.build());
+ }
+
+ public ObjectMappers remove(ObjectMapper mapper) {
+ ImmutableList.Builder<ObjectMapper> builder = new ImmutableList.Builder<ObjectMapper>();
+ for (ObjectMapper objectMapper : objectMappers) {
+ if (objectMapper != mapper) { // identify equality
+ builder.add(objectMapper);
+ }
+ }
+ return new ObjectMappers(builder.build());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/src/main/java/org/elasticsearch/index/mapper/ParseContext.java
new file mode 100644
index 0000000..973f289
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/ParseContext.java
@@ -0,0 +1,416 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.carrotsearch.hppc.ObjectObjectMap;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.google.common.collect.Lists;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.object.RootObjectMapper;
+
+import java.util.*;
+
+/**
+ *
+ */
+public class ParseContext {
+
+ /** Fork of {@link org.apache.lucene.document.Document} with additional functionality. */
+ public static class Document implements Iterable<IndexableField> {
+
+ private final List<IndexableField> fields;
+ private ObjectObjectMap<Object, IndexableField> keyedFields;
+
+ public Document() {
+ fields = Lists.newArrayList();
+ }
+
+ @Override
+ public Iterator<IndexableField> iterator() {
+ return fields.iterator();
+ }
+
+ public List<IndexableField> getFields() {
+ return fields;
+ }
+
+ public void add(IndexableField field) {
+ fields.add(field);
+ }
+
+ /** Add fields so that they can later be fetched using {@link #getByKey(Object)}. */
+ public void addWithKey(Object key, IndexableField field) {
+ if (keyedFields == null) {
+ keyedFields = new ObjectObjectOpenHashMap<Object, IndexableField>();
+ } else if (keyedFields.containsKey(key)) {
+ throw new ElasticsearchIllegalStateException("Only one field can be stored per key");
+ }
+ keyedFields.put(key, field);
+ add(field);
+ }
+
+ /** Get back fields that have been previously added with {@link #addWithKey(Object, IndexableField)}. */
+ public IndexableField getByKey(Object key) {
+ return keyedFields == null ? null : keyedFields.get(key);
+ }
+
+ public IndexableField[] getFields(String name) {
+ List<IndexableField> f = new ArrayList<IndexableField>();
+ for (IndexableField field : fields) {
+ if (field.name().equals(name)) {
+ f.add(field);
+ }
+ }
+ return f.toArray(new IndexableField[f.size()]);
+ }
+
+ public IndexableField getField(String name) {
+ for (IndexableField field : fields) {
+ if (field.name().equals(name)) {
+ return field;
+ }
+ }
+ return null;
+ }
+
+ public String get(String name) {
+ for (IndexableField f : fields) {
+ if (f.name().equals(name) && f.stringValue() != null) {
+ return f.stringValue();
+ }
+ }
+ return null;
+ }
+
+ public BytesRef getBinaryValue(String name) {
+ for (IndexableField f : fields) {
+ if (f.name().equals(name) && f.binaryValue() != null) {
+ return f.binaryValue();
+ }
+ }
+ return null;
+ }
+
+ }
+
+ private final DocumentMapper docMapper;
+
+ private final DocumentMapperParser docMapperParser;
+
+ private final ContentPath path;
+
+ private XContentParser parser;
+
+ private Document document;
+
+ private List<Document> documents = Lists.newArrayList();
+
+ private Analyzer analyzer;
+
+ private final String index;
+
+ @Nullable
+ private final Settings indexSettings;
+
+ private SourceToParse sourceToParse;
+ private BytesReference source;
+
+ private String id;
+
+ private DocumentMapper.ParseListener listener;
+
+ private Field uid, version;
+
+ private StringBuilder stringBuilder = new StringBuilder();
+
+ private Map<String, String> ignoredValues = new HashMap<String, String>();
+
+ private boolean mappingsModified = false;
+ private boolean withinNewMapper = false;
+ private boolean withinCopyTo = false;
+
+ private boolean externalValueSet;
+
+ private Object externalValue;
+
+ private AllEntries allEntries = new AllEntries();
+
+ private float docBoost = 1.0f;
+
+ public ParseContext(String index, @Nullable Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, ContentPath path) {
+ this.index = index;
+ this.indexSettings = indexSettings;
+ this.docMapper = docMapper;
+ this.docMapperParser = docMapperParser;
+ this.path = path;
+ }
+
+ public void reset(XContentParser parser, Document document, SourceToParse source, DocumentMapper.ParseListener listener) {
+ this.parser = parser;
+ this.document = document;
+ if (document != null) {
+ this.documents = Lists.newArrayList();
+ this.documents.add(document);
+ } else {
+ this.documents = null;
+ }
+ this.analyzer = null;
+ this.uid = null;
+ this.version = null;
+ this.id = null;
+ this.sourceToParse = source;
+ this.source = source == null ? null : sourceToParse.source();
+ this.path.reset();
+ this.mappingsModified = false;
+ this.withinNewMapper = false;
+ this.listener = listener == null ? DocumentMapper.ParseListener.EMPTY : listener;
+ this.allEntries = new AllEntries();
+ this.ignoredValues.clear();
+ this.docBoost = 1.0f;
+ }
+
+ public boolean flyweight() {
+ return sourceToParse.flyweight();
+ }
+
+ public DocumentMapperParser docMapperParser() {
+ return this.docMapperParser;
+ }
+
+ public boolean mappingsModified() {
+ return this.mappingsModified;
+ }
+
+ public void setMappingsModified() {
+ this.mappingsModified = true;
+ }
+
+ public void setWithinNewMapper() {
+ this.withinNewMapper = true;
+ }
+
+ public void clearWithinNewMapper() {
+ this.withinNewMapper = false;
+ }
+
+ public boolean isWithinNewMapper() {
+ return withinNewMapper;
+ }
+
+ public void setWithinCopyTo() {
+ this.withinCopyTo = true;
+ }
+
+ public void clearWithinCopyTo() {
+ this.withinCopyTo = false;
+ }
+
+ public boolean isWithinCopyTo() {
+ return withinCopyTo;
+ }
+
+ public String index() {
+ return this.index;
+ }
+
+ @Nullable
+ public Settings indexSettings() {
+ return this.indexSettings;
+ }
+
+ public String type() {
+ return sourceToParse.type();
+ }
+
+ public SourceToParse sourceToParse() {
+ return this.sourceToParse;
+ }
+
+ public BytesReference source() {
+ return source;
+ }
+
+ // only should be used by SourceFieldMapper to update with a compressed source
+ public void source(BytesReference source) {
+ this.source = source;
+ }
+
+ public ContentPath path() {
+ return this.path;
+ }
+
+ public XContentParser parser() {
+ return this.parser;
+ }
+
+ public DocumentMapper.ParseListener listener() {
+ return this.listener;
+ }
+
+ public Document rootDoc() {
+ return documents.get(0);
+ }
+
+ public List<Document> docs() {
+ return this.documents;
+ }
+
+ public Document doc() {
+ return this.document;
+ }
+
+ public void addDoc(Document doc) {
+ this.documents.add(doc);
+ }
+
+ public Document switchDoc(Document doc) {
+ Document prev = this.document;
+ this.document = doc;
+ return prev;
+ }
+
+ public RootObjectMapper root() {
+ return docMapper.root();
+ }
+
+ public DocumentMapper docMapper() {
+ return this.docMapper;
+ }
+
+ public AnalysisService analysisService() {
+ return docMapperParser.analysisService;
+ }
+
+ public String id() {
+ return id;
+ }
+
+ public void ignoredValue(String indexName, String value) {
+ ignoredValues.put(indexName, value);
+ }
+
+ public String ignoredValue(String indexName) {
+ return ignoredValues.get(indexName);
+ }
+
+ /**
+ * Really, just the id mapper should set this.
+ */
+ public void id(String id) {
+ this.id = id;
+ }
+
+ public Field uid() {
+ return this.uid;
+ }
+
+ /**
+ * Really, just the uid mapper should set this.
+ */
+ public void uid(Field uid) {
+ this.uid = uid;
+ }
+
+ public Field version() {
+ return this.version;
+ }
+
+ public void version(Field version) {
+ this.version = version;
+ }
+
+ public boolean includeInAll(Boolean includeInAll, FieldMapper mapper) {
+ return includeInAll(includeInAll, mapper.fieldType().indexed());
+ }
+
+ /**
+ * Is all included or not. Will always disable it if {@link org.elasticsearch.index.mapper.internal.AllFieldMapper#enabled()}
+ * is <tt>false</tt>. If its enabled, then will return <tt>true</tt> only if the specific flag is <tt>null</tt> or
+ * its actual value (so, if not set, defaults to "true") and the field is indexed.
+ */
+ private boolean includeInAll(Boolean specificIncludeInAll, boolean indexed) {
+ if (withinCopyTo) {
+ return false;
+ }
+ if (!docMapper.allFieldMapper().enabled()) {
+ return false;
+ }
+ // not explicitly set
+ if (specificIncludeInAll == null) {
+ return indexed;
+ }
+ return specificIncludeInAll;
+ }
+
+ public AllEntries allEntries() {
+ return this.allEntries;
+ }
+
+ public Analyzer analyzer() {
+ return this.analyzer;
+ }
+
+ public void analyzer(Analyzer analyzer) {
+ this.analyzer = analyzer;
+ }
+
+ public void externalValue(Object externalValue) {
+ this.externalValueSet = true;
+ this.externalValue = externalValue;
+ }
+
+ public boolean externalValueSet() {
+ return this.externalValueSet;
+ }
+
+ public Object externalValue() {
+ externalValueSet = false;
+ return externalValue;
+ }
+
+ public float docBoost() {
+ return this.docBoost;
+ }
+
+ public void docBoost(float docBoost) {
+ this.docBoost = docBoost;
+ }
+
+ /**
+ * A string builder that can be used to construct complex names for example.
+ * Its better to reuse the.
+ */
+ public StringBuilder stringBuilder() {
+ stringBuilder.setLength(0);
+ return this.stringBuilder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java
new file mode 100644
index 0000000..4624df5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+
+import java.util.List;
+
+/**
+ * The result of parsing a document.
+ */
+public class ParsedDocument {
+
+ private final Field uid, version;
+
+ private final String id;
+
+ private final String type;
+
+ private final String routing;
+
+ private final long timestamp;
+
+ private final long ttl;
+
+ private final List<Document> documents;
+
+ private final Analyzer analyzer;
+
+ private BytesReference source;
+
+ private boolean mappingsModified;
+
+ private String parent;
+
+ public ParsedDocument(Field uid, Field version, String id, String type, String routing, long timestamp, long ttl, List<Document> documents, Analyzer analyzer, BytesReference source, boolean mappingsModified) {
+ this.uid = uid;
+ this.version = version;
+ this.id = id;
+ this.type = type;
+ this.routing = routing;
+ this.timestamp = timestamp;
+ this.ttl = ttl;
+ this.documents = documents;
+ this.source = source;
+ this.analyzer = analyzer;
+ this.mappingsModified = mappingsModified;
+ }
+
+ public Field uid() {
+ return this.uid;
+ }
+
+ public Field version() {
+ return version;
+ }
+
+ public String id() {
+ return this.id;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String routing() {
+ return this.routing;
+ }
+
+ public long timestamp() {
+ return this.timestamp;
+ }
+
+ public long ttl() {
+ return this.ttl;
+ }
+
+ public Document rootDoc() {
+ return documents.get(documents.size() - 1);
+ }
+
+ public List<Document> docs() {
+ return this.documents;
+ }
+
+ public Analyzer analyzer() {
+ return this.analyzer;
+ }
+
+ public BytesReference source() {
+ return this.source;
+ }
+
+ public void setSource(BytesReference source) {
+ this.source = source;
+ }
+
+ public ParsedDocument parent(String parent) {
+ this.parent = parent;
+ return this;
+ }
+
+ public String parent() {
+ return this.parent;
+ }
+
+ /**
+ * Has the parsed document caused mappings to be modified?
+ */
+ public boolean mappingsModified() {
+ return mappingsModified;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Document ").append("uid[").append(uid).append("] doc [").append(documents).append("]");
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/RootMapper.java b/src/main/java/org/elasticsearch/index/mapper/RootMapper.java
new file mode 100644
index 0000000..fde3566
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/RootMapper.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import java.io.IOException;
+
+
+/**
+ * A mapper that exists only as a mapper within a root object.
+ */
+public interface RootMapper extends Mapper {
+
+ void preParse(ParseContext context) throws IOException;
+
+ void postParse(ParseContext context) throws IOException;
+
+ void validate(ParseContext context) throws MapperParsingException;
+
+ /**
+ * Should the mapper be included in the root {@link org.elasticsearch.index.mapper.object.ObjectMapper}.
+ */
+ boolean includeInObject();
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java
new file mode 100644
index 0000000..d952530
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+/**
+ *
+ */
+public class SourceToParse {
+
+ public static SourceToParse source(XContentParser parser) {
+ return new SourceToParse(Origin.PRIMARY, parser);
+ }
+
+ public static SourceToParse source(BytesReference source) {
+ return new SourceToParse(Origin.PRIMARY, source);
+ }
+
+ public static SourceToParse source(Origin origin, BytesReference source) {
+ return new SourceToParse(origin, source);
+ }
+
+ private final Origin origin;
+
+ private final BytesReference source;
+
+ private final XContentParser parser;
+
+ private boolean flyweight = false;
+
+ private String type;
+
+ private String id;
+
+ private String routing;
+
+ private String parentId;
+
+ private long timestamp;
+
+ private long ttl;
+
+ public SourceToParse(Origin origin, XContentParser parser) {
+ this.origin = origin;
+ this.parser = parser;
+ this.source = null;
+ }
+
+ public SourceToParse(Origin origin, BytesReference source) {
+ this.origin = origin;
+ // we always convert back to byte array, since we store it and Field only supports bytes..
+ // so, we might as well do it here, and improve the performance of working with direct byte arrays
+ this.source = source.toBytesArray();
+ this.parser = null;
+ }
+
+ public Origin origin() {
+ return origin;
+ }
+
+ public XContentParser parser() {
+ return this.parser;
+ }
+
+ public BytesReference source() {
+ return this.source;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public SourceToParse type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ public SourceToParse flyweight(boolean flyweight) {
+ this.flyweight = flyweight;
+ return this;
+ }
+
+ public boolean flyweight() {
+ return this.flyweight;
+ }
+
+ public String id() {
+ return this.id;
+ }
+
+ public SourceToParse id(String id) {
+ this.id = id;
+ return this;
+ }
+
+ public String parent() {
+ return this.parentId;
+ }
+
+ public SourceToParse parent(String parentId) {
+ this.parentId = parentId;
+ return this;
+ }
+
+ public String routing() {
+ return this.routing;
+ }
+
+ public SourceToParse routing(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ public long timestamp() {
+ return this.timestamp;
+ }
+
+ public SourceToParse timestamp(String timestamp) {
+ this.timestamp = Long.parseLong(timestamp);
+ return this;
+ }
+
+ public SourceToParse timestamp(long timestamp) {
+ this.timestamp = timestamp;
+ return this;
+ }
+
+ public long ttl() {
+ return this.ttl;
+ }
+
+ public SourceToParse ttl(long ttl) {
+ this.ttl = ttl;
+ return this;
+ }
+
+ public static enum Origin {
+
+ PRIMARY,
+ REPLICA
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/StrictDynamicMappingException.java b/src/main/java/org/elasticsearch/index/mapper/StrictDynamicMappingException.java
new file mode 100644
index 0000000..f675396
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/StrictDynamicMappingException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ */
+public class StrictDynamicMappingException extends MapperParsingException {
+
+ public StrictDynamicMappingException(String path, String fieldName) {
+ super("mapping set to strict, dynamic introduction of [" + fieldName + "] within [" + path + "] is not allowed");
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/mapper/Uid.java b/src/main/java/org/elasticsearch/index/mapper/Uid.java
new file mode 100644
index 0000000..a34e088
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/Uid.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.lucene.BytesRefs;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ *
+ */
+public final class Uid {
+
+ public static final char DELIMITER = '#';
+ public static final byte DELIMITER_BYTE = 0x23;
+ public static final BytesRef DELIMITER_BYTES = new BytesRef(new byte[]{DELIMITER_BYTE});
+
+ private final String type;
+
+ private final String id;
+
+ public Uid(String type, String id) {
+ this.type = type;
+ this.id = id;
+ }
+
+ public String type() {
+ return type;
+ }
+
+ public String id() {
+ return id;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Uid uid = (Uid) o;
+
+ if (id != null ? !id.equals(uid.id) : uid.id != null) return false;
+ if (type != null ? !type.equals(uid.type) : uid.type != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = type != null ? type.hashCode() : 0;
+ result = 31 * result + (id != null ? id.hashCode() : 0);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return createUid(type, id);
+ }
+
+ public BytesRef toBytesRef() {
+ return createUidAsBytes(type, id);
+ }
+
+ public static String typePrefix(String type) {
+ return type + DELIMITER;
+ }
+
+ public static BytesRef typePrefixAsBytes(BytesRef type) {
+ BytesRef bytesRef = new BytesRef(type.length + 1);
+ bytesRef.append(type);
+ bytesRef.append(DELIMITER_BYTES);
+ return bytesRef;
+ }
+
+ public static String idFromUid(String uid) {
+ int delimiterIndex = uid.indexOf(DELIMITER); // type is not allowed to have # in it..., ids can
+ return uid.substring(delimiterIndex + 1);
+ }
+
+ public static HashedBytesArray idFromUid(BytesRef uid) {
+ return splitUidIntoTypeAndId(uid)[1];
+ }
+
+ public static HashedBytesArray typeFromUid(BytesRef uid) {
+ return splitUidIntoTypeAndId(uid)[0];
+ }
+
+ public static String typeFromUid(String uid) {
+ int delimiterIndex = uid.indexOf(DELIMITER); // type is not allowed to have # in it..., ids can
+ return uid.substring(0, delimiterIndex);
+ }
+
+ public static Uid createUid(String uid) {
+ int delimiterIndex = uid.indexOf(DELIMITER); // type is not allowed to have # in it..., ids can
+ return new Uid(uid.substring(0, delimiterIndex), uid.substring(delimiterIndex + 1));
+ }
+
+ public static BytesRef createUidAsBytes(String type, String id) {
+ return createUidAsBytes(new BytesRef(type), new BytesRef(id));
+ }
+
+ public static BytesRef createUidAsBytes(String type, BytesRef id) {
+ return createUidAsBytes(new BytesRef(type), id);
+ }
+
+ public static BytesRef createUidAsBytes(BytesRef type, BytesRef id) {
+ final BytesRef ref = new BytesRef(type.length + 1 + id.length);
+ System.arraycopy(type.bytes, type.offset, ref.bytes, 0, type.length);
+ ref.offset = type.length;
+ ref.bytes[ref.offset++] = DELIMITER_BYTE;
+ System.arraycopy(id.bytes, id.offset, ref.bytes, ref.offset, id.length);
+ ref.offset = 0;
+ ref.length = ref.bytes.length;
+ return ref;
+ }
+
+ public static void createUidAsBytes(BytesRef type, BytesRef id, BytesRef spare) {
+ spare.copyBytes(type);
+ spare.append(DELIMITER_BYTES);
+ spare.append(id);
+ }
+
+ public static BytesRef[] createTypeUids(Collection<String> types, Object ids) {
+ return createTypeUids(types, Collections.singletonList(ids));
+ }
+
+ public static BytesRef[] createTypeUids(Collection<String> types, List<? extends Object> ids) {
+ final int numIds = ids.size();
+ BytesRef[] uids = new BytesRef[types.size() * ids.size()];
+ BytesRef typeBytes = new BytesRef();
+ BytesRef idBytes = new BytesRef();
+ int index = 0;
+ for (String type : types) {
+ UnicodeUtil.UTF16toUTF8(type, 0, type.length(), typeBytes);
+ for (int i = 0; i < numIds; i++, index++) {
+ uids[index] = Uid.createUidAsBytes(typeBytes, BytesRefs.toBytesRef(ids.get(i), idBytes));
+ }
+ }
+ return uids;
+ }
+
+ public static String createUid(String type, String id) {
+ return createUid(new StringBuilder(), type, id);
+ }
+
+ public static String createUid(StringBuilder sb, String type, String id) {
+ return sb.append(type).append(DELIMITER).append(id).toString();
+ }
+
+ public static boolean hasDelimiter(BytesRef uid) {
+ final int limit = uid.offset + uid.length;
+ for (int i = uid.offset; i < limit; i++) {
+ if (uid.bytes[i] == DELIMITER_BYTE) { // 0x23 is equal to '#'
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // LUCENE 4 UPGRADE: HashedBytesArray or BytesRef as return type?
+ public static HashedBytesArray[] splitUidIntoTypeAndId(BytesRef uid) {
+ int loc = -1;
+ final int limit = uid.offset + uid.length;
+ for (int i = uid.offset; i < limit; i++) {
+ if (uid.bytes[i] == DELIMITER_BYTE) { // 0x23 is equal to '#'
+ loc = i;
+ break;
+ }
+ }
+
+ if (loc == -1) {
+ return null;
+ }
+
+ byte[] type = new byte[loc - uid.offset];
+ System.arraycopy(uid.bytes, uid.offset, type, 0, type.length);
+
+ byte[] id = new byte[uid.length - type.length - 1];
+ System.arraycopy(uid.bytes, loc + 1, id, 0, id.length);
+ return new HashedBytesArray[]{new HashedBytesArray(type), new HashedBytesArray(id)};
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java
new file mode 100644
index 0000000..8dd163b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java
@@ -0,0 +1,1109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.RegexpFilter;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.postingsformat.PostingFormats;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.FieldDataTermsFilter;
+import org.elasticsearch.index.similarity.SimilarityLookupService;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ *
+ */
+public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
+
+ public static class Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType();
+ public static final boolean DOC_VALUES = false;
+
+ static {
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(true);
+ FIELD_TYPE.setStored(false);
+ FIELD_TYPE.setStoreTermVectors(false);
+ FIELD_TYPE.setOmitNorms(false);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+ FIELD_TYPE.freeze();
+ }
+
+ public static final float BOOST = 1.0f;
+ public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
+ }
+
+ public abstract static class Builder<T extends Builder, Y extends AbstractFieldMapper> extends Mapper.Builder<T, Y> {
+
+ protected final FieldType fieldType;
+ protected Boolean docValues;
+ protected float boost = Defaults.BOOST;
+ protected boolean omitNormsSet = false;
+ protected String indexName;
+ protected NamedAnalyzer indexAnalyzer;
+ protected NamedAnalyzer searchAnalyzer;
+ protected Boolean includeInAll;
+ protected boolean indexOptionsSet = false;
+ protected PostingsFormatProvider postingsProvider;
+ protected DocValuesFormatProvider docValuesProvider;
+ protected SimilarityProvider similarity;
+ protected Loading normsLoading;
+ @Nullable
+ protected Settings fieldDataSettings;
+ protected final MultiFields.Builder multiFieldsBuilder;
+ protected CopyTo copyTo;
+
+ protected Builder(String name, FieldType fieldType) {
+ super(name);
+ this.fieldType = fieldType;
+ multiFieldsBuilder = new MultiFields.Builder();
+ }
+
+ public T index(boolean index) {
+ this.fieldType.setIndexed(index);
+ return builder;
+ }
+
+ public T store(boolean store) {
+ this.fieldType.setStored(store);
+ return builder;
+ }
+
+ public T docValues(boolean docValues) {
+ this.docValues = docValues;
+ return builder;
+ }
+
+ public T storeTermVectors(boolean termVectors) {
+ if (termVectors) {
+ this.fieldType.setStoreTermVectors(termVectors);
+ } // don't set it to false, it is default and might be flipped by a more specific option
+ return builder;
+ }
+
+ public T storeTermVectorOffsets(boolean termVectorOffsets) {
+ if (termVectorOffsets) {
+ this.fieldType.setStoreTermVectors(termVectorOffsets);
+ }
+ this.fieldType.setStoreTermVectorOffsets(termVectorOffsets);
+ return builder;
+ }
+
+ public T storeTermVectorPositions(boolean termVectorPositions) {
+ if (termVectorPositions) {
+ this.fieldType.setStoreTermVectors(termVectorPositions);
+ }
+ this.fieldType.setStoreTermVectorPositions(termVectorPositions);
+ return builder;
+ }
+
+ public T storeTermVectorPayloads(boolean termVectorPayloads) {
+ if (termVectorPayloads) {
+ this.fieldType.setStoreTermVectors(termVectorPayloads);
+ }
+ this.fieldType.setStoreTermVectorPayloads(termVectorPayloads);
+ return builder;
+ }
+
+ public T tokenized(boolean tokenized) {
+ this.fieldType.setTokenized(tokenized);
+ return builder;
+ }
+
+ public T boost(float boost) {
+ this.boost = boost;
+ return builder;
+ }
+
+ public T omitNorms(boolean omitNorms) {
+ this.fieldType.setOmitNorms(omitNorms);
+ this.omitNormsSet = true;
+ return builder;
+ }
+
+ public T indexOptions(IndexOptions indexOptions) {
+ this.fieldType.setIndexOptions(indexOptions);
+ this.indexOptionsSet = true;
+ return builder;
+ }
+
+ public T indexName(String indexName) {
+ this.indexName = indexName;
+ return builder;
+ }
+
+ public T indexAnalyzer(NamedAnalyzer indexAnalyzer) {
+ this.indexAnalyzer = indexAnalyzer;
+ return builder;
+ }
+
+ public T searchAnalyzer(NamedAnalyzer searchAnalyzer) {
+ this.searchAnalyzer = searchAnalyzer;
+ return builder;
+ }
+
+ public T includeInAll(Boolean includeInAll) {
+ this.includeInAll = includeInAll;
+ return builder;
+ }
+
+ public T postingsFormat(PostingsFormatProvider postingsFormat) {
+ this.postingsProvider = postingsFormat;
+ return builder;
+ }
+
+ public T docValuesFormat(DocValuesFormatProvider docValuesFormat) {
+ this.docValuesProvider = docValuesFormat;
+ return builder;
+ }
+
+ public T similarity(SimilarityProvider similarity) {
+ this.similarity = similarity;
+ return builder;
+ }
+
+ public T normsLoading(Loading normsLoading) {
+ this.normsLoading = normsLoading;
+ return builder;
+ }
+
+ public T fieldDataSettings(Settings settings) {
+ this.fieldDataSettings = settings;
+ return builder;
+ }
+
+ public T multiFieldPathType(ContentPath.Type pathType) {
+ multiFieldsBuilder.pathType(pathType);
+ return builder;
+ }
+
+ public T addMultiField(Mapper.Builder mapperBuilder) {
+ multiFieldsBuilder.add(mapperBuilder);
+ return builder;
+ }
+
+ public T copyTo(CopyTo copyTo) {
+ this.copyTo = copyTo;
+ return builder;
+ }
+
+ public Names buildNames(BuilderContext context) {
+ return new Names(name, buildIndexName(context), indexName == null ? name : indexName, buildFullName(context), context.path().sourcePath());
+ }
+
+ public String buildIndexName(BuilderContext context) {
+ String actualIndexName = indexName == null ? name : indexName;
+ return context.path().pathAsText(actualIndexName);
+ }
+
+ public String buildFullName(BuilderContext context) {
+ return context.path().fullPathAsText(name);
+ }
+ }
+
+ private static final ThreadLocal<List<Field>> FIELD_LIST = new ThreadLocal<List<Field>>() {
+ protected List<Field> initialValue() {
+ return new ArrayList<Field>(2);
+ }
+ };
+
+ protected final Names names;
+ protected float boost;
+ protected final FieldType fieldType;
+ private final boolean docValues;
+ protected final NamedAnalyzer indexAnalyzer;
+ protected NamedAnalyzer searchAnalyzer;
+ protected PostingsFormatProvider postingsFormat;
+ protected DocValuesFormatProvider docValuesFormat;
+ protected final SimilarityProvider similarity;
+ protected Loading normsLoading;
+ protected Settings customFieldDataSettings;
+ protected FieldDataType fieldDataType;
+ protected final MultiFields multiFields;
+ protected CopyTo copyTo;
+
+ protected AbstractFieldMapper(Names names, float boost, FieldType fieldType, Boolean docValues, NamedAnalyzer indexAnalyzer,
+ NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsFormat,
+ DocValuesFormatProvider docValuesFormat, SimilarityProvider similarity,
+ Loading normsLoading, @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ this(names, boost, fieldType, docValues, indexAnalyzer, searchAnalyzer, postingsFormat, docValuesFormat, similarity,
+ normsLoading, fieldDataSettings, indexSettings, MultiFields.empty(), null);
+ }
+
+ protected AbstractFieldMapper(Names names, float boost, FieldType fieldType, Boolean docValues, NamedAnalyzer indexAnalyzer,
+ NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsFormat,
+ DocValuesFormatProvider docValuesFormat, SimilarityProvider similarity,
+ Loading normsLoading, @Nullable Settings fieldDataSettings, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ this.names = names;
+ this.boost = boost;
+ this.fieldType = fieldType;
+ this.fieldType.freeze();
+
+ // automatically set to keyword analyzer if its indexed and not analyzed
+ if (indexAnalyzer == null && !this.fieldType.tokenized() && this.fieldType.indexed()) {
+ this.indexAnalyzer = Lucene.KEYWORD_ANALYZER;
+ } else {
+ this.indexAnalyzer = indexAnalyzer;
+ }
+ // automatically set to keyword analyzer if its indexed and not analyzed
+ if (searchAnalyzer == null && !this.fieldType.tokenized() && this.fieldType.indexed()) {
+ this.searchAnalyzer = Lucene.KEYWORD_ANALYZER;
+ } else {
+ this.searchAnalyzer = searchAnalyzer;
+ }
+ if (postingsFormat == null) {
+ if (defaultPostingFormat() != null) {
+ postingsFormat = PostingFormats.getAsProvider(defaultPostingFormat());
+ }
+ }
+ this.postingsFormat = postingsFormat;
+ this.docValuesFormat = docValuesFormat;
+ this.similarity = similarity;
+ this.normsLoading = normsLoading;
+
+ this.customFieldDataSettings = fieldDataSettings;
+ if (fieldDataSettings == null) {
+ this.fieldDataType = defaultFieldDataType();
+ } else {
+ // create a new field data type, with the default settings as well as the "new ones"
+ this.fieldDataType = new FieldDataType(defaultFieldDataType().getType(),
+ ImmutableSettings.builder().put(defaultFieldDataType().getSettings()).put(fieldDataSettings)
+ );
+ }
+ if (docValues != null) {
+ this.docValues = docValues;
+ } else if (fieldDataType == null) {
+ this.docValues = false;
+ } else {
+ this.docValues = FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(fieldDataType.getFormat(indexSettings));
+ }
+ this.multiFields = multiFields;
+ this.copyTo = copyTo;
+ }
+
+ @Nullable
+ protected String defaultPostingFormat() {
+ return null;
+ }
+
+ @Nullable
+ protected String defaultDocValuesFormat() {
+ return null;
+ }
+
+ @Override
+ public String name() {
+ return names.name();
+ }
+
+ @Override
+ public Names names() {
+ return this.names;
+ }
+
+ public abstract FieldType defaultFieldType();
+
+ public abstract FieldDataType defaultFieldDataType();
+
+ @Override
+ public final FieldDataType fieldDataType() {
+ return fieldDataType;
+ }
+
+ @Override
+ public FieldType fieldType() {
+ return fieldType;
+ }
+
+ @Override
+ public float boost() {
+ return this.boost;
+ }
+
+ @Override
+ public Analyzer indexAnalyzer() {
+ return this.indexAnalyzer;
+ }
+
+ @Override
+ public Analyzer searchAnalyzer() {
+ return this.searchAnalyzer;
+ }
+
+ @Override
+ public Analyzer searchQuoteAnalyzer() {
+ return this.searchAnalyzer;
+ }
+
+ @Override
+ public SimilarityProvider similarity() {
+ return similarity;
+ }
+
+ @Override
+ public CopyTo copyTo() {
+ return copyTo;
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ final List<Field> fields = FIELD_LIST.get();
+ assert fields.isEmpty();
+ try {
+ parseCreateField(context, fields);
+ for (Field field : fields) {
+ if (!customBoost()) {
+ field.setBoost(boost);
+ }
+ if (context.listener().beforeFieldAdded(this, field, context)) {
+ context.doc().add(field);
+ }
+ }
+ } catch (Exception e) {
+ throw new MapperParsingException("failed to parse [" + names.fullName() + "]", e);
+ } finally {
+ fields.clear();
+ }
+ multiFields.parse(this, context);
+ if (copyTo != null) {
+ copyTo.parse(context);
+ }
+ }
+
+ /**
+ * Parse the field value and populate <code>fields</code>.
+ */
+ protected abstract void parseCreateField(ParseContext context, List<Field> fields) throws IOException;
+
+ /**
+ * Derived classes can override it to specify that boost value is set by derived classes.
+ */
+ protected boolean customBoost() {
+ return false;
+ }
+
+ @Override
+ public void traverse(FieldMapperListener fieldMapperListener) {
+ fieldMapperListener.fieldMapper(this);
+ multiFields.traverse(fieldMapperListener);
+ }
+
+ @Override
+ public void traverse(ObjectMapperListener objectMapperListener) {
+ // nothing to do here...
+ }
+
+ @Override
+ public Object valueForSearch(Object value) {
+ return value;
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ return BytesRefs.toBytesRef(value);
+ }
+
+ @Override
+ public Query queryStringTermQuery(Term term) {
+ return null;
+ }
+
+ @Override
+ public boolean useTermQueryWithQueryString() {
+ return false;
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ return new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value)));
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ return new TermFilter(names().createIndexNameTerm(indexedValueForSearch(value)));
+ }
+
+ @Override
+ public Filter termsFilter(List values, @Nullable QueryParseContext context) {
+ BytesRef[] bytesRefs = new BytesRef[values.size()];
+ for (int i = 0; i < bytesRefs.length; i++) {
+ bytesRefs[i] = indexedValueForSearch(values.get(i));
+ }
+ return new XTermsFilter(names.indexName(), bytesRefs);
+ }
+
+ /**
+ * A terms filter based on the field data cache
+ */
+ @Override
+ public Filter termsFilter(IndexFieldDataService fieldDataService, List values, @Nullable QueryParseContext context) {
+ // create with initial size large enough to avoid rehashing
+ ObjectOpenHashSet<BytesRef> terms =
+ new ObjectOpenHashSet<BytesRef>((int) (values.size() * (1 + ObjectOpenHashSet.DEFAULT_LOAD_FACTOR)));
+ for (int i = 0, len = values.size(); i < len; i++) {
+ terms.add(indexedValueForSearch(values.get(i)));
+ }
+
+ return FieldDataTermsFilter.newBytes(fieldDataService.getForField(this), terms);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return new TermRangeQuery(names.indexName(),
+ lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
+ upperTerm == null ? null : indexedValueForSearch(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return new TermRangeFilter(names.indexName(),
+ lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
+ upperTerm == null ? null : indexedValueForSearch(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ return new FuzzyQuery(names.createIndexNameTerm(indexedValueForSearch(value)), fuzziness.asDistance(value), prefixLength, maxExpansions, transpositions);
+ }
+
+ @Override
+ public Query prefixQuery(Object value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) {
+ PrefixQuery query = new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value)));
+ if (method != null) {
+ query.setRewriteMethod(method);
+ }
+ return query;
+ }
+
+ @Override
+ public Filter prefixFilter(Object value, @Nullable QueryParseContext context) {
+ return new PrefixFilter(names().createIndexNameTerm(indexedValueForSearch(value)));
+ }
+
+ @Override
+ public Query regexpQuery(Object value, int flags, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) {
+ RegexpQuery query = new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags);
+ if (method != null) {
+ query.setRewriteMethod(method);
+ }
+ return query;
+ }
+
+ @Override
+ public Filter regexpFilter(Object value, int flags, @Nullable QueryParseContext parseContext) {
+ return new RegexpFilter(names().createIndexNameTerm(indexedValueForSearch(value)), flags);
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ return null;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ String mergedType = mergeWith.getClass().getSimpleName();
+ if (mergeWith instanceof AbstractFieldMapper) {
+ mergedType = ((AbstractFieldMapper) mergeWith).contentType();
+ }
+ mergeContext.addConflict("mapper [" + names.fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
+ // different types, return
+ return;
+ }
+ AbstractFieldMapper fieldMergeWith = (AbstractFieldMapper) mergeWith;
+ if (this.fieldType().indexed() != fieldMergeWith.fieldType().indexed() || this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different index values");
+ }
+ if (this.fieldType().stored() != fieldMergeWith.fieldType().stored()) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different store values");
+ }
+ if (!this.hasDocValues() && fieldMergeWith.hasDocValues()) {
+ // don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitely set
+ // when the doc_values field data format is configured
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different " + TypeParsers.DOC_VALUES + " values");
+ }
+ if (this.fieldType().omitNorms() != fieldMergeWith.fieldType.omitNorms()) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different `norms.enabled` values");
+ }
+ if (this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different tokenize values");
+ }
+ if (this.fieldType().storeTermVectors() != fieldMergeWith.fieldType().storeTermVectors()) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector values");
+ }
+ if (this.fieldType().storeTermVectorOffsets() != fieldMergeWith.fieldType().storeTermVectorOffsets()) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_offsets values");
+ }
+ if (this.fieldType().storeTermVectorPositions() != fieldMergeWith.fieldType().storeTermVectorPositions()) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_positions values");
+ }
+ if (this.fieldType().storeTermVectorPayloads() != fieldMergeWith.fieldType().storeTermVectorPayloads()) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_payloads values");
+ }
+ if (this.indexAnalyzer == null) {
+ if (fieldMergeWith.indexAnalyzer != null) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_analyzer");
+ }
+ } else if (fieldMergeWith.indexAnalyzer == null) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_analyzer");
+ } else if (!this.indexAnalyzer.name().equals(fieldMergeWith.indexAnalyzer.name())) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_analyzer");
+ }
+
+ if (this.similarity == null) {
+ if (fieldMergeWith.similarity() != null) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity");
+ }
+ } else if (fieldMergeWith.similarity() == null) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity");
+ } else if (!this.similarity().equals(fieldMergeWith.similarity())) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity");
+ }
+ multiFields.merge(mergeWith, mergeContext);
+
+ if (!mergeContext.mergeFlags().simulate()) {
+ // apply changeable values
+ this.boost = fieldMergeWith.boost;
+ this.normsLoading = fieldMergeWith.normsLoading;
+ this.copyTo = fieldMergeWith.copyTo;
+ if (fieldMergeWith.postingsFormat != null) {
+ this.postingsFormat = fieldMergeWith.postingsFormat;
+ }
+ if (fieldMergeWith.docValuesFormat != null) {
+ this.docValuesFormat = fieldMergeWith.docValuesFormat;
+ }
+ if (fieldMergeWith.searchAnalyzer != null) {
+ this.searchAnalyzer = fieldMergeWith.searchAnalyzer;
+ }
+ if (fieldMergeWith.customFieldDataSettings != null) {
+ if (!Objects.equal(fieldMergeWith.customFieldDataSettings, this.customFieldDataSettings)) {
+ this.customFieldDataSettings = fieldMergeWith.customFieldDataSettings;
+ this.fieldDataType = new FieldDataType(defaultFieldDataType().getType(),
+ ImmutableSettings.builder().put(defaultFieldDataType().getSettings()).put(this.customFieldDataSettings)
+ );
+ }
+ }
+ }
+ }
+
+ @Override
+ public PostingsFormatProvider postingsFormatProvider() {
+ return postingsFormat;
+ }
+
+ @Override
+ public DocValuesFormatProvider docValuesFormatProvider() {
+ return docValuesFormat;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(names.name());
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+ doXContentBody(builder, includeDefaults, params);
+ return builder.endObject();
+ }
+
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+
+ builder.field("type", contentType());
+ if (includeDefaults || !names.name().equals(names.indexNameClean())) {
+ builder.field("index_name", names.indexNameClean());
+ }
+
+ if (includeDefaults || boost != 1.0f) {
+ builder.field("boost", boost);
+ }
+
+ FieldType defaultFieldType = defaultFieldType();
+ if (includeDefaults || fieldType.indexed() != defaultFieldType.indexed() ||
+ fieldType.tokenized() != defaultFieldType.tokenized()) {
+ builder.field("index", indexTokenizeOptionToString(fieldType.indexed(), fieldType.tokenized()));
+ }
+ if (includeDefaults || fieldType.stored() != defaultFieldType.stored()) {
+ builder.field("store", fieldType.stored());
+ }
+ if (includeDefaults || hasDocValues() != Defaults.DOC_VALUES) {
+ builder.field(TypeParsers.DOC_VALUES, docValues);
+ }
+ if (includeDefaults || fieldType.storeTermVectors() != defaultFieldType.storeTermVectors()) {
+ builder.field("term_vector", termVectorOptionsToString(fieldType));
+ }
+ if (includeDefaults || fieldType.omitNorms() != defaultFieldType.omitNorms() || normsLoading != null) {
+ builder.startObject("norms");
+ if (includeDefaults || fieldType.omitNorms() != defaultFieldType.omitNorms()) {
+ builder.field("enabled", !fieldType.omitNorms());
+ }
+ if (normsLoading != null) {
+ builder.field(Loading.KEY, normsLoading);
+ }
+ builder.endObject();
+ }
+ if (includeDefaults || fieldType.indexOptions() != defaultFieldType.indexOptions()) {
+ builder.field("index_options", indexOptionToString(fieldType.indexOptions()));
+ }
+
+ if (indexAnalyzer == null && searchAnalyzer == null) {
+ if (includeDefaults) {
+ builder.field("analyzer", "default");
+ }
+ } else if (indexAnalyzer == null) {
+ // searchAnalyzer != null
+ if (includeDefaults || (!searchAnalyzer.name().startsWith("_") && !searchAnalyzer.name().equals("default"))) {
+ builder.field("search_analyzer", searchAnalyzer.name());
+ }
+ } else if (searchAnalyzer == null) {
+ // indexAnalyzer != null
+ if (includeDefaults || (!indexAnalyzer.name().startsWith("_") && !indexAnalyzer.name().equals("default"))) {
+ builder.field("index_analyzer", indexAnalyzer.name());
+ }
+ } else if (indexAnalyzer.name().equals(searchAnalyzer.name())) {
+ // indexAnalyzer == searchAnalyzer
+ if (includeDefaults || (!indexAnalyzer.name().startsWith("_") && !indexAnalyzer.name().equals("default"))) {
+ builder.field("analyzer", indexAnalyzer.name());
+ }
+ } else {
+ // both are there but different
+ if (includeDefaults || (!indexAnalyzer.name().startsWith("_") && !indexAnalyzer.name().equals("default"))) {
+ builder.field("index_analyzer", indexAnalyzer.name());
+ }
+ if (includeDefaults || (!searchAnalyzer.name().startsWith("_") && !searchAnalyzer.name().equals("default"))) {
+ builder.field("search_analyzer", searchAnalyzer.name());
+ }
+ }
+
+ if (postingsFormat != null) {
+ if (includeDefaults || !postingsFormat.name().equals(defaultPostingFormat())) {
+ builder.field("postings_format", postingsFormat.name());
+ }
+ } else if (includeDefaults) {
+ String format = defaultPostingFormat();
+ if (format == null) {
+ format = PostingsFormatService.DEFAULT_FORMAT;
+ }
+ builder.field("postings_format", format);
+ }
+
+ if (docValuesFormat != null) {
+ if (includeDefaults || !docValuesFormat.name().equals(defaultDocValuesFormat())) {
+ builder.field(DOC_VALUES_FORMAT, docValuesFormat.name());
+ }
+ } else if (includeDefaults) {
+ String format = defaultDocValuesFormat();
+ if (format == null) {
+ format = DocValuesFormatService.DEFAULT_FORMAT;
+ }
+ builder.field(DOC_VALUES_FORMAT, format);
+ }
+
+ if (similarity() != null) {
+ builder.field("similarity", similarity().name());
+ } else if (includeDefaults) {
+ builder.field("similariry", SimilarityLookupService.DEFAULT_SIMILARITY);
+ }
+
+ if (customFieldDataSettings != null) {
+ builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
+ } else if (includeDefaults) {
+ builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
+ }
+ multiFields.toXContent(builder, params);
+
+ if (copyTo != null) {
+ copyTo.toXContent(builder, params);
+ }
+ }
+
+ protected static String indexOptionToString(IndexOptions indexOption) {
+ switch (indexOption) {
+ case DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS:
+ return TypeParsers.INDEX_OPTIONS_OFFSETS;
+ case DOCS_AND_FREQS:
+ return TypeParsers.INDEX_OPTIONS_FREQS;
+ case DOCS_AND_FREQS_AND_POSITIONS:
+ return TypeParsers.INDEX_OPTIONS_POSITIONS;
+ case DOCS_ONLY:
+ return TypeParsers.INDEX_OPTIONS_DOCS;
+ default:
+ throw new ElasticsearchIllegalArgumentException("Unknown IndexOptions [" + indexOption + "]");
+ }
+ }
+
+ public static String termVectorOptionsToString(FieldType fieldType) {
+ if (!fieldType.storeTermVectors()) {
+ return "no";
+ } else if (!fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
+ return "yes";
+ } else if (fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
+ return "with_offsets";
+ } else {
+ StringBuilder builder = new StringBuilder("with");
+ if (fieldType.storeTermVectorPositions()) {
+ builder.append("_positions");
+ }
+ if (fieldType.storeTermVectorOffsets()) {
+ builder.append("_offsets");
+ }
+ if (fieldType.storeTermVectorPayloads()) {
+ builder.append("_payloads");
+ }
+ return builder.toString();
+ }
+ }
+
+ protected static String indexTokenizeOptionToString(boolean indexed, boolean tokenized) {
+ if (!indexed) {
+ return "no";
+ } else if (tokenized) {
+ return "analyzed";
+ } else {
+ return "not_analyzed";
+ }
+ }
+
+
+ protected abstract String contentType();
+
+ @Override
+ public void close() {
+ multiFields.close();
+ }
+
+ @Override
+ public boolean isNumeric() {
+ return false;
+ }
+
+ @Override
+ public boolean isSortable() {
+ return true;
+ }
+
+ public boolean hasDocValues() {
+ return docValues;
+ }
+
+ @Override
+ public Loading normsLoading(Loading defaultLoading) {
+ return normsLoading == null ? defaultLoading : normsLoading;
+ }
+
+ public static class MultiFields {
+
+ public static MultiFields empty() {
+ return new MultiFields(Defaults.PATH_TYPE, ImmutableOpenMap.<String, Mapper>of());
+ }
+
+ public static class Builder {
+
+ private final ImmutableOpenMap.Builder<String, Mapper.Builder> mapperBuilders = ImmutableOpenMap.builder();
+ private ContentPath.Type pathType = Defaults.PATH_TYPE;
+
+ public Builder pathType(ContentPath.Type pathType) {
+ this.pathType = pathType;
+ return this;
+ }
+
+ public Builder add(Mapper.Builder builder) {
+ mapperBuilders.put(builder.name(), builder);
+ return this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public MultiFields build(AbstractFieldMapper.Builder mainFieldBuilder, BuilderContext context) {
+ if (pathType == Defaults.PATH_TYPE && mapperBuilders.isEmpty()) {
+ return empty();
+ } else if (mapperBuilders.isEmpty()) {
+ return new MultiFields(pathType, ImmutableOpenMap.<String, Mapper>of());
+ } else {
+ ContentPath.Type origPathType = context.path().pathType();
+ context.path().pathType(pathType);
+ context.path().add(mainFieldBuilder.name());
+ ImmutableOpenMap.Builder mapperBuilders = this.mapperBuilders;
+ for (ObjectObjectCursor<String, Mapper.Builder> cursor : this.mapperBuilders) {
+ String key = cursor.key;
+ Mapper.Builder value = cursor.value;
+ mapperBuilders.put(key, value.build(context));
+ }
+ context.path().remove();
+ context.path().pathType(origPathType);
+ ImmutableOpenMap.Builder<String, Mapper> mappers = mapperBuilders.cast();
+ return new MultiFields(pathType, mappers.build());
+ }
+ }
+
+ }
+
+ private final ContentPath.Type pathType;
+ private volatile ImmutableOpenMap<String, Mapper> mappers;
+
+ public MultiFields(ContentPath.Type pathType, ImmutableOpenMap<String, Mapper> mappers) {
+ this.pathType = pathType;
+ this.mappers = mappers;
+ // we disable the all in multi-field mappers
+ for (ObjectCursor<Mapper> cursor : mappers.values()) {
+ Mapper mapper = cursor.value;
+ if (mapper instanceof AllFieldMapper.IncludeInAll) {
+ ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
+ }
+ }
+ }
+
+ public void parse(AbstractFieldMapper mainField, ParseContext context) throws IOException {
+ if (mappers.isEmpty()) {
+ return;
+ }
+
+ ContentPath.Type origPathType = context.path().pathType();
+ context.path().pathType(pathType);
+
+ context.path().add(mainField.name());
+ for (ObjectCursor<Mapper> cursor : mappers.values()) {
+ cursor.value.parse(context);
+ }
+ context.path().remove();
+ context.path().pathType(origPathType);
+ }
+
+ // No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ AbstractFieldMapper mergeWithMultiField = (AbstractFieldMapper) mergeWith;
+
+ List<FieldMapper> newFieldMappers = null;
+ ImmutableOpenMap.Builder<String, Mapper> newMappersBuilder = null;
+
+ for (ObjectCursor<Mapper> cursor : mergeWithMultiField.multiFields.mappers.values()) {
+ Mapper mergeWithMapper = cursor.value;
+ Mapper mergeIntoMapper = mappers.get(mergeWithMapper.name());
+ if (mergeIntoMapper == null) {
+ // no mapping, simply add it if not simulating
+ if (!mergeContext.mergeFlags().simulate()) {
+ // we disable the all in multi-field mappers
+ if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
+ ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
+ }
+ if (newMappersBuilder == null) {
+ newMappersBuilder = ImmutableOpenMap.builder(mappers);
+ }
+ newMappersBuilder.put(mergeWithMapper.name(), mergeWithMapper);
+ if (mergeWithMapper instanceof AbstractFieldMapper) {
+ if (newFieldMappers == null) {
+ newFieldMappers = new ArrayList<FieldMapper>(2);
+ }
+ newFieldMappers.add((FieldMapper) mergeWithMapper);
+ }
+ }
+ } else {
+ mergeIntoMapper.merge(mergeWithMapper, mergeContext);
+ }
+ }
+
+ // first add all field mappers
+ if (newFieldMappers != null) {
+ mergeContext.docMapper().addFieldMappers(newFieldMappers);
+ }
+ // now publish mappers
+ if (newMappersBuilder != null) {
+ mappers = newMappersBuilder.build();
+ }
+ }
+
+ public void traverse(FieldMapperListener fieldMapperListener) {
+ for (ObjectCursor<Mapper> cursor : mappers.values()) {
+ cursor.value.traverse(fieldMapperListener);
+ }
+ }
+
+ public void close() {
+ for (ObjectCursor<Mapper> cursor : mappers.values()) {
+ cursor.value.close();
+ }
+ }
+
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (pathType != Defaults.PATH_TYPE) {
+ builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
+ }
+ if (!mappers.isEmpty()) {
+ builder.startObject("fields");
+ for (ObjectCursor<Mapper> cursor : mappers.values()) {
+ cursor.value.toXContent(builder, params);
+ }
+ builder.endObject();
+ }
+ return builder;
+ }
+ }
+
+ /**
+ * Represents a list of fields with optional boost factor where the current field should be copied to
+ */
+ public static class CopyTo {
+
+ private final ImmutableList<String> copyToFields;
+
+ private CopyTo(ImmutableList<String> copyToFields) {
+ this.copyToFields = copyToFields;
+ }
+
+ /**
+ * Creates instances of the fields that the current field should be copied to
+ */
+ public void parse(ParseContext context) throws IOException {
+ if (!context.isWithinCopyTo()) {
+ for (String field : copyToFields) {
+ parse(field, context);
+ }
+ }
+ }
+
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (!copyToFields.isEmpty()) {
+ builder.startArray("copy_to");
+ for (String field : copyToFields) {
+ builder.value(field);
+ }
+ builder.endArray();
+ }
+ return builder;
+ }
+
+ public static class Builder {
+ private final ImmutableList.Builder<String> copyToBuilders = ImmutableList.builder();
+
+ public Builder add(String field) {
+ copyToBuilders.add(field);
+ return this;
+ }
+
+ public CopyTo build() {
+ return new CopyTo(copyToBuilders.build());
+ }
+ }
+
+ public ImmutableList<String> copyToFields() {
+ return copyToFields;
+ }
+
+ /**
+ * Creates an copy of the current field with given field name and boost
+ */
+ public void parse(String field, ParseContext context) throws IOException {
+ context.setWithinCopyTo();
+ FieldMappers mappers = context.docMapper().mappers().indexName(field);
+ if (mappers != null && !mappers.isEmpty()) {
+ mappers.mapper().parse(context);
+ } else {
+ int posDot = field.lastIndexOf('.');
+ if (posDot > 0) {
+ // Compound name
+ String objectPath = field.substring(0, posDot);
+ String fieldPath = field.substring(posDot + 1);
+ ObjectMapper mapper = context.docMapper().objectMappers().get(objectPath);
+ if (mapper == null) {
+ //TODO: Create an object dynamically?
+ throw new MapperParsingException("attempt to copy value to non-existing object [" + field + "]");
+ }
+
+ ContentPath.Type origPathType = context.path().pathType();
+ context.path().pathType(ContentPath.Type.FULL);
+ context.path().add(objectPath);
+
+ // We might be in dynamically created field already, so need to clean withinNewMapper flag
+ // and then restore it, so we wouldn't miss new mappers created from copy_to fields
+ boolean origWithinNewMapper = context.isWithinNewMapper();
+ context.clearWithinNewMapper();
+
+ try {
+ mapper.parseDynamicValue(context, fieldPath, context.parser().currentToken());
+ } finally {
+ if (origWithinNewMapper) {
+ context.setWithinNewMapper();
+ } else {
+ context.clearWithinNewMapper();
+ }
+ context.path().remove();
+ context.path().pathType(origPathType);
+ }
+
+ } else {
+ // We might be in dynamically created field already, so need to clean withinNewMapper flag
+ // and then restore it, so we wouldn't miss new mappers created from copy_to fields
+ boolean origWithinNewMapper = context.isWithinNewMapper();
+ context.clearWithinNewMapper();
+ try {
+ context.docMapper().root().parseDynamicValue(context, field, context.parser().currentToken());
+ } finally {
+ if (origWithinNewMapper) {
+ context.setWithinNewMapper();
+ } else {
+ context.clearWithinNewMapper();
+ }
+ }
+
+ }
+ }
+ context.clearWithinCopyTo();
+ }
+
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java
new file mode 100644
index 0000000..638e1ee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Base64;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.binaryField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+/**
+ *
+ */
+public class BinaryFieldMapper extends AbstractFieldMapper<BytesReference> {
+
+ public static final String CONTENT_TYPE = "binary";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final long COMPRESS_THRESHOLD = -1;
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setIndexed(false);
+ FIELD_TYPE.freeze();
+ }
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, BinaryFieldMapper> {
+
+ private Boolean compress = null;
+
+ private long compressThreshold = Defaults.COMPRESS_THRESHOLD;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder compress(boolean compress) {
+ this.compress = compress;
+ return this;
+ }
+
+ public Builder compressThreshold(long compressThreshold) {
+ this.compressThreshold = compressThreshold;
+ return this;
+ }
+
+ @Override
+ public BinaryFieldMapper build(BuilderContext context) {
+ return new BinaryFieldMapper(buildNames(context), fieldType, compress, compressThreshold, postingsProvider,
+ docValuesProvider, multiFieldsBuilder.build(this, context), copyTo);
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ BinaryFieldMapper.Builder builder = binaryField(name);
+ parseField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("compress") && fieldNode != null) {
+ builder.compress(nodeBooleanValue(fieldNode));
+ } else if (fieldName.equals("compress_threshold") && fieldNode != null) {
+ if (fieldNode instanceof Number) {
+ builder.compressThreshold(((Number) fieldNode).longValue());
+ builder.compress(true);
+ } else {
+ builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString()).bytes());
+ builder.compress(true);
+ }
+ }
+ }
+ return builder;
+ }
+ }
+
+ private Boolean compress;
+
+ private long compressThreshold;
+
+ protected BinaryFieldMapper(Names names, FieldType fieldType, Boolean compress, long compressThreshold,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ MultiFields multiFields, CopyTo copyTo) {
+ super(names, 1.0f, fieldType, null, null, null, postingsProvider, docValuesProvider, null, null, null, null, multiFields, copyTo);
+ this.compress = compress;
+ this.compressThreshold = compressThreshold;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return null;
+ }
+
+ @Override
+ public Object valueForSearch(Object value) {
+ return value(value);
+ }
+
+ @Override
+ public BytesReference value(Object value) {
+ if (value == null) {
+ return null;
+ }
+
+ BytesReference bytes;
+ if (value instanceof BytesRef) {
+ bytes = new BytesArray((BytesRef) value);
+ } else if (value instanceof BytesReference) {
+ bytes = (BytesReference) value;
+ } else if (value instanceof byte[]) {
+ bytes = new BytesArray((byte[]) value);
+ } else {
+ try {
+ bytes = new BytesArray(Base64.decode(value.toString()));
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to convert bytes", e);
+ }
+ }
+ try {
+ return CompressorFactory.uncompressIfNeeded(bytes);
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to decompress source", e);
+ }
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (!fieldType().stored()) {
+ return;
+ }
+ byte[] value;
+ if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) {
+ return;
+ } else {
+ value = context.parser().binaryValue();
+ if (compress != null && compress && !CompressorFactory.isCompressed(value, 0, value.length)) {
+ if (compressThreshold == -1 || value.length > compressThreshold) {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream);
+ stream.writeBytes(value, 0, value.length);
+ stream.close();
+ value = bStream.bytes().toBytes();
+ }
+ }
+ }
+ if (value == null) {
+ return;
+ }
+ fields.add(new Field(names.indexName(), value, fieldType));
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ builder.field("type", contentType());
+ if (includeDefaults || !names.name().equals(names.indexNameClean())) {
+ builder.field("index_name", names.indexNameClean());
+ }
+ if (compress != null) {
+ builder.field("compress", compress);
+ } else if (includeDefaults) {
+ builder.field("compress", false);
+ }
+ if (compressThreshold != -1) {
+ builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString());
+ } else if (includeDefaults) {
+ builder.field("compress_threshold", -1);
+ }
+ if (includeDefaults || fieldType.stored() != defaultFieldType().stored()) {
+ builder.field("store", fieldType.stored());
+ }
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ BinaryFieldMapper sourceMergeWith = (BinaryFieldMapper) mergeWith;
+ if (!mergeContext.mergeFlags().simulate()) {
+ if (sourceMergeWith.compress != null) {
+ this.compress = sourceMergeWith.compress;
+ }
+ if (sourceMergeWith.compressThreshold != -1) {
+ this.compressThreshold = sourceMergeWith.compressThreshold;
+ }
+ }
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java
new file mode 100644
index 0000000..d8d1af8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.Mapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.ParseContext;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.booleanField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+/**
+ *
+ */
+// TODO this can be made better, maybe storing a byte for it?
+public class BooleanFieldMapper extends AbstractFieldMapper<Boolean> {
+
+ public static final String CONTENT_TYPE = "boolean";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.freeze();
+ }
+
+ public static final Boolean NULL_VALUE = null;
+ }
+
+ public static class Values {
+ public final static BytesRef TRUE = new BytesRef("T");
+ public final static BytesRef FALSE = new BytesRef("F");
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, BooleanFieldMapper> {
+
+ private Boolean nullValue = Defaults.NULL_VALUE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ this.builder = this;
+ }
+
+ public Builder nullValue(boolean nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public Builder tokenized(boolean tokenized) {
+ if (tokenized) {
+ throw new ElasticsearchIllegalArgumentException("bool field can't be tokenized");
+ }
+ return super.tokenized(tokenized);
+ }
+
+ @Override
+ public BooleanFieldMapper build(BuilderContext context) {
+ return new BooleanFieldMapper(buildNames(context), boost, fieldType, nullValue, postingsProvider,
+ docValuesProvider, similarity, normsLoading, fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ BooleanFieldMapper.Builder builder = booleanField(name);
+ parseField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(nodeBooleanValue(propNode));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private Boolean nullValue;
+
+ protected BooleanFieldMapper(Names names, float boost, FieldType fieldType, Boolean nullValue, PostingsFormatProvider postingsProvider,
+ DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity, Loading normsLoading,
+ @Nullable Settings fieldDataSettings, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, boost, fieldType, null, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ this.nullValue = nullValue;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ // TODO have a special boolean type?
+ return new FieldDataType("string");
+ }
+
+ @Override
+ public boolean useTermQueryWithQueryString() {
+ return true;
+ }
+
+ @Override
+ public Boolean value(Object value) {
+ if (value == null) {
+ return Boolean.FALSE;
+ }
+ String sValue = value.toString();
+ if (sValue.length() == 0) {
+ return Boolean.FALSE;
+ }
+ if (sValue.length() == 1 && sValue.charAt(0) == 'F') {
+ return Boolean.FALSE;
+ }
+ if (Booleans.parseBoolean(sValue, false)) {
+ return Boolean.TRUE;
+ }
+ return Boolean.FALSE;
+ }
+
+ @Override
+ public Object valueForSearch(Object value) {
+ return value(value);
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ if (value == null) {
+ return Values.FALSE;
+ }
+ if (value instanceof Boolean) {
+ return ((Boolean) value) ? Values.TRUE : Values.FALSE;
+ }
+ String sValue;
+ if (value instanceof BytesRef) {
+ sValue = ((BytesRef) value).utf8ToString();
+ } else {
+ sValue = value.toString();
+ }
+ if (sValue.length() == 0) {
+ return Values.FALSE;
+ }
+ if (sValue.length() == 1 && sValue.charAt(0) == 'F') {
+ return Values.FALSE;
+ }
+ if (Booleans.parseBoolean(sValue, false)) {
+ return Values.TRUE;
+ }
+ return Values.FALSE;
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ return new TermFilter(names().createIndexNameTerm(nullValue ? Values.TRUE : Values.FALSE));
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (!fieldType().indexed() && !fieldType().stored()) {
+ return;
+ }
+ XContentParser.Token token = context.parser().currentToken();
+ String value = null;
+ if (token == XContentParser.Token.VALUE_NULL) {
+ if (nullValue != null) {
+ value = nullValue ? "T" : "F";
+ }
+ } else {
+ value = context.parser().booleanValue() ? "T" : "F";
+ }
+ if (value == null) {
+ return;
+ }
+ fields.add(new Field(names.indexName(), value, fieldType));
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java
new file mode 100644
index 0000000..196bb8e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java
@@ -0,0 +1,387 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.analysis.NumericIntegerAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeByteValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.byteField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ *
+ */
+public class ByteFieldMapper extends NumberFieldMapper<Byte> {
+
+ public static final String CONTENT_TYPE = "byte";
+
+ public static class Defaults extends NumberFieldMapper.Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType(NumberFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.freeze();
+ }
+
+ public static final Byte NULL_VALUE = null;
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, ByteFieldMapper> {
+
+ protected Byte nullValue = Defaults.NULL_VALUE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(byte nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public ByteFieldMapper build(BuilderContext context) {
+ fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
+ ByteFieldMapper fieldMapper = new ByteFieldMapper(buildNames(context),
+ precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed(context),
+ coerce(context), postingsProvider, docValuesProvider, similarity, normsLoading,
+ fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ ByteFieldMapper.Builder builder = byteField(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(nodeByteValue(propNode));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private Byte nullValue;
+
+ private String nullValueAsString;
+
+ protected ByteFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,
+ Byte nullValue, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ PostingsFormatProvider postingsProvider,
+ DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity, Loading normsLoading,
+ @Nullable Settings fieldDataSettings, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, precisionStep, boost, fieldType, docValues,
+ ignoreMalformed, coerce, new NamedAnalyzer("_byte/" + precisionStep, new NumericIntegerAnalyzer(precisionStep)),
+ new NamedAnalyzer("_byte/max", new NumericIntegerAnalyzer(Integer.MAX_VALUE)), postingsProvider,
+ docValuesProvider, similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ this.nullValue = nullValue;
+ this.nullValueAsString = nullValue == null ? null : nullValue.toString();
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("byte");
+ }
+
+ @Override
+ protected int maxPrecisionStep() {
+ return 32;
+ }
+
+ @Override
+ public Byte value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Number) {
+ return ((Number) value).byteValue();
+ }
+ if (value instanceof BytesRef) {
+ return ((BytesRef) value).bytes[((BytesRef) value).offset];
+ }
+ return Byte.parseByte(value.toString());
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
+ return bytesRef;
+ }
+
+ private byte parseValue(Object value) {
+ if (value instanceof Number) {
+ return ((Number) value).byteValue();
+ }
+ if (value instanceof BytesRef) {
+ return Byte.parseByte(((BytesRef) value).utf8ToString());
+ }
+ return Byte.parseByte(value.toString());
+ }
+
+ private int parseValueAsInt(Object value) {
+ return parseValue(value);
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ byte iValue = Byte.parseByte(value);
+ byte iSim = fuzziness.asByte();
+ return NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
+ iValue - iSim,
+ iValue + iSim,
+ true, true);
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ int iValue = parseValue(value);
+ return NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
+ iValue, iValue, true, true);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValueAsInt(lowerTerm),
+ upperTerm == null ? null : parseValueAsInt(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ int iValue = parseValueAsInt(value);
+ return NumericRangeFilter.newIntRange(names.indexName(), precisionStep,
+ iValue, iValue, true, true);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFilter.newIntRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValueAsInt(lowerTerm),
+ upperTerm == null ? null : parseValueAsInt(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFieldDataFilter.newByteRange((IndexNumericFieldData) fieldData.getForField(this),
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ return NumericRangeFilter.newIntRange(names.indexName(), precisionStep,
+ nullValue.intValue(),
+ nullValue.intValue(),
+ true, true);
+ }
+
+ @Override
+ protected boolean customBoost() {
+ return true;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ byte value;
+ float boost = this.boost;
+ if (context.externalValueSet()) {
+ Object externalValue = context.externalValue();
+ if (externalValue == null) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else if (externalValue instanceof String) {
+ String sExternalValue = (String) externalValue;
+ if (sExternalValue.length() == 0) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else {
+ value = Byte.parseByte(sExternalValue);
+ }
+ } else {
+ value = ((Number) externalValue).byteValue();
+ }
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), Byte.toString(value), boost);
+ }
+ } else {
+ XContentParser parser = context.parser();
+ if (parser.currentToken() == XContentParser.Token.VALUE_NULL ||
+ (parser.currentToken() == XContentParser.Token.VALUE_STRING && parser.textLength() == 0)) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ if (nullValueAsString != null && (context.includeInAll(includeInAll, this))) {
+ context.allEntries().addText(names.fullName(), nullValueAsString, boost);
+ }
+ } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ Byte objValue = nullValue;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ objValue = (byte) parser.shortValue(coerce.value());
+ }
+ } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (objValue == null) {
+ // no value
+ return;
+ }
+ value = objValue;
+ } else {
+ value = (byte) parser.shortValue(coerce.value());
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), parser.text(), boost);
+ }
+ }
+ }
+ if (fieldType.indexed() || fieldType.stored()) {
+ CustomByteNumericField field = new CustomByteNumericField(this, value, fieldType);
+ field.setBoost(boost);
+ fields.add(field);
+ }
+ if (hasDocValues()) {
+ addDocValue(context, value);
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.nullValue = ((ByteFieldMapper) mergeWith).nullValue;
+ this.nullValueAsString = ((ByteFieldMapper) mergeWith).nullValueAsString;
+ }
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || precisionStep != Defaults.PRECISION_STEP) {
+ builder.field("precision_step", precisionStep);
+ }
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ } else if (includeDefaults) {
+ builder.field("include_in_all", false);
+ }
+ }
+
+ public static class CustomByteNumericField extends CustomNumericField {
+
+ private final byte number;
+
+ private final NumberFieldMapper mapper;
+
+ public CustomByteNumericField(NumberFieldMapper mapper, byte number, FieldType fieldType) {
+ super(mapper, number, fieldType);
+ this.mapper = mapper;
+ this.number = number;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) {
+ if (fieldType().indexed()) {
+ return mapper.popCachedStream().setIntValue(number);
+ }
+ return null;
+ }
+
+ @Override
+ public String numericAsString() {
+ return Byte.toString(number);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java
new file mode 100644
index 0000000..f5f3c2c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java
@@ -0,0 +1,442 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.core;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.NumberType;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+import org.elasticsearch.search.suggest.completion.AnalyzingCompletionLookupProvider;
+import org.elasticsearch.search.suggest.completion.CompletionPostingsFormatProvider;
+import org.elasticsearch.search.suggest.completion.CompletionTokenStream;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.completionField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
+
+/**
+ *
+ */
+public class CompletionFieldMapper extends AbstractFieldMapper<String> {
+
+ public static final String CONTENT_TYPE = "completion";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.freeze();
+ }
+
+ public static final boolean DEFAULT_PRESERVE_SEPARATORS = true;
+ public static final boolean DEFAULT_POSITION_INCREMENTS = true;
+ public static final boolean DEFAULT_HAS_PAYLOADS = false;
+ public static final int DEFAULT_MAX_INPUT_LENGTH = 50;
+ }
+
+ public static class Fields {
+ // Mapping field names
+ public static final String ANALYZER = "analyzer";
+ public static final ParseField INDEX_ANALYZER = new ParseField("index_analyzer");
+ public static final ParseField SEARCH_ANALYZER = new ParseField("search_analyzer");
+ public static final ParseField PRESERVE_SEPARATORS = new ParseField("preserve_separators");
+ public static final ParseField PRESERVE_POSITION_INCREMENTS = new ParseField("preserve_position_increments");
+ public static final String PAYLOADS = "payloads";
+ public static final String TYPE = "type";
+ public static final ParseField MAX_INPUT_LENGTH = new ParseField("max_input_length", "max_input_len");
+ // Content field names
+ public static final String CONTENT_FIELD_NAME_INPUT = "input";
+ public static final String CONTENT_FIELD_NAME_OUTPUT = "output";
+ public static final String CONTENT_FIELD_NAME_PAYLOAD = "payload";
+ public static final String CONTENT_FIELD_NAME_WEIGHT = "weight";
+ }
+
+ public static Set<String> ALLOWED_CONTENT_FIELD_NAMES = Sets.newHashSet(Fields.CONTENT_FIELD_NAME_INPUT,
+ Fields.CONTENT_FIELD_NAME_OUTPUT, Fields.CONTENT_FIELD_NAME_PAYLOAD, Fields.CONTENT_FIELD_NAME_WEIGHT);
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, CompletionFieldMapper> {
+
+ private boolean preserveSeparators = Defaults.DEFAULT_PRESERVE_SEPARATORS;
+ private boolean payloads = Defaults.DEFAULT_HAS_PAYLOADS;
+ private boolean preservePositionIncrements = Defaults.DEFAULT_POSITION_INCREMENTS;
+ private int maxInputLength = Defaults.DEFAULT_MAX_INPUT_LENGTH;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder payloads(boolean payloads) {
+ this.payloads = payloads;
+ return this;
+ }
+
+ public Builder preserveSeparators(boolean preserveSeparators) {
+ this.preserveSeparators = preserveSeparators;
+ return this;
+ }
+
+ public Builder preservePositionIncrements(boolean preservePositionIncrements) {
+ this.preservePositionIncrements = preservePositionIncrements;
+ return this;
+ }
+
+ public Builder maxInputLength(int maxInputLength) {
+ if (maxInputLength <= 0) {
+ throw new ElasticsearchIllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + " must be > 0 but was [" + maxInputLength + "]");
+ }
+ this.maxInputLength = maxInputLength;
+ return this;
+ }
+
+ @Override
+ public CompletionFieldMapper build(Mapper.BuilderContext context) {
+ return new CompletionFieldMapper(buildNames(context), indexAnalyzer, searchAnalyzer, postingsProvider, similarity, payloads,
+ preserveSeparators, preservePositionIncrements, maxInputLength, multiFieldsBuilder.build(this, context), copyTo);
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+
+ @Override
+ public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ CompletionFieldMapper.Builder builder = completionField(name);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = entry.getKey();
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("type")) {
+ continue;
+ }
+ if (fieldName.equals("analyzer")) {
+ NamedAnalyzer analyzer = getNamedAnalyzer(parserContext, fieldNode.toString());
+ builder.indexAnalyzer(analyzer);
+ builder.searchAnalyzer(analyzer);
+ } else if (Fields.INDEX_ANALYZER.match(fieldName)) {
+ builder.indexAnalyzer(getNamedAnalyzer(parserContext, fieldNode.toString()));
+ } else if (Fields.SEARCH_ANALYZER.match(fieldName)) {
+ builder.searchAnalyzer(getNamedAnalyzer(parserContext, fieldNode.toString()));
+ } else if (fieldName.equals(Fields.PAYLOADS)) {
+ builder.payloads(Boolean.parseBoolean(fieldNode.toString()));
+ } else if (Fields.PRESERVE_SEPARATORS.match(fieldName)) {
+ builder.preserveSeparators(Boolean.parseBoolean(fieldNode.toString()));
+ } else if (Fields.PRESERVE_POSITION_INCREMENTS.match(fieldName)) {
+ builder.preservePositionIncrements(Boolean.parseBoolean(fieldNode.toString()));
+ } else if (Fields.MAX_INPUT_LENGTH.match(fieldName)) {
+ builder.maxInputLength(Integer.parseInt(fieldNode.toString()));
+ } else if ("fields".equals(fieldName) || "path".equals(fieldName)) {
+ parseMultiField(builder, name, node, parserContext, fieldName, fieldNode);
+ } else {
+ throw new MapperParsingException("Unknown field [" + fieldName + "]");
+ }
+ }
+
+ if (builder.searchAnalyzer == null) {
+ builder.searchAnalyzer(parserContext.analysisService().analyzer("simple"));
+ }
+
+ if (builder.indexAnalyzer == null) {
+ builder.indexAnalyzer(parserContext.analysisService().analyzer("simple"));
+ }
+ // we are just using this as the default to be wrapped by the CompletionPostingsFormatProvider in the SuggesteFieldMapper ctor
+ builder.postingsFormat(parserContext.postingFormatService().get("default"));
+ return builder;
+ }
+
+ private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name) {
+ NamedAnalyzer analyzer = parserContext.analysisService().analyzer(name);
+ if (analyzer == null) {
+ throw new ElasticsearchIllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]");
+ }
+ return analyzer;
+ }
+ }
+
+ private static final BytesRef EMPTY = new BytesRef();
+
+ private final CompletionPostingsFormatProvider completionPostingsFormatProvider;
+ private final AnalyzingCompletionLookupProvider analyzingSuggestLookupProvider;
+ private final boolean payloads;
+ private final boolean preservePositionIncrements;
+ private final boolean preserveSeparators;
+ private int maxInputLength;
+
+ public CompletionFieldMapper(Names names, NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsProvider, SimilarityProvider similarity, boolean payloads,
+ boolean preserveSeparators, boolean preservePositionIncrements, int maxInputLength, MultiFields multiFields, CopyTo copyTo) {
+ super(names, 1.0f, Defaults.FIELD_TYPE, null, indexAnalyzer, searchAnalyzer, postingsProvider, null, similarity, null, null, null, multiFields, copyTo);
+ analyzingSuggestLookupProvider = new AnalyzingCompletionLookupProvider(preserveSeparators, false, preservePositionIncrements, payloads);
+ this.completionPostingsFormatProvider = new CompletionPostingsFormatProvider("completion", postingsProvider, analyzingSuggestLookupProvider);
+ this.preserveSeparators = preserveSeparators;
+ this.payloads = payloads;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.maxInputLength = maxInputLength;
+ }
+
+
+ @Override
+ public PostingsFormatProvider postingsFormatProvider() {
+ return this.completionPostingsFormatProvider;
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ XContentParser parser = context.parser();
+ XContentParser.Token token = parser.currentToken();
+
+ String surfaceForm = null;
+ BytesRef payload = null;
+ long weight = -1;
+ List<String> inputs = Lists.newArrayListWithExpectedSize(4);
+
+ if (token == XContentParser.Token.VALUE_STRING) {
+ inputs.add(parser.text());
+ multiFields.parse(this, context);
+ } else {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ if (!ALLOWED_CONTENT_FIELD_NAMES.contains(currentFieldName)) {
+ throw new ElasticsearchIllegalArgumentException("Unknown field name[" + currentFieldName + "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES);
+ }
+ } else if (Fields.CONTENT_FIELD_NAME_PAYLOAD.equals(currentFieldName)) {
+ if (!isStoringPayloads()) {
+ throw new MapperException("Payloads disabled in mapping");
+ }
+ if (token == XContentParser.Token.START_OBJECT) {
+ XContentBuilder payloadBuilder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
+ payload = payloadBuilder.bytes().toBytesRef();
+ payloadBuilder.close();
+ } else if (token.isValue()) {
+ payload = parser.bytesOrNull();
+ } else {
+ throw new MapperException("payload doesn't support type " + token);
+ }
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if (Fields.CONTENT_FIELD_NAME_OUTPUT.equals(currentFieldName)) {
+ surfaceForm = parser.text();
+ }
+ if (Fields.CONTENT_FIELD_NAME_INPUT.equals(currentFieldName)) {
+ inputs.add(parser.text());
+ }
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if (Fields.CONTENT_FIELD_NAME_WEIGHT.equals(currentFieldName)) {
+ NumberType numberType = parser.numberType();
+ if (NumberType.LONG != numberType && NumberType.INT != numberType) {
+ throw new ElasticsearchIllegalArgumentException("Weight must be an integer, but was [" + parser.numberValue() + "]");
+ }
+ weight = parser.longValue(); // always parse a long to make sure we don't get the overflow value
+ if (weight < 0 || weight > Integer.MAX_VALUE) {
+ throw new ElasticsearchIllegalArgumentException("Weight must be in the interval [0..2147483647], but was [" + weight + "]");
+ }
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if (Fields.CONTENT_FIELD_NAME_INPUT.equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ inputs.add(parser.text());
+ }
+ }
+ }
+ }
+ }
+ payload = payload == null ? EMPTY : payload;
+ if (surfaceForm == null) { // no surface form use the input
+ for (String input : inputs) {
+ BytesRef suggestPayload = analyzingSuggestLookupProvider.buildPayload(new BytesRef(
+ input), weight, payload);
+ context.doc().add(getCompletionField(input, suggestPayload));
+ }
+ } else {
+ BytesRef suggestPayload = analyzingSuggestLookupProvider.buildPayload(new BytesRef(
+ surfaceForm), weight, payload);
+ for (String input : inputs) {
+ context.doc().add(getCompletionField(input, suggestPayload));
+ }
+ }
+ }
+
+ public Field getCompletionField(String input, BytesRef payload) {
+ final String originalInput = input;
+ if (input.length() > maxInputLength) {
+ final int len = correctSubStringLen(input, Math.min(maxInputLength, input.length()));
+ input = input.substring(0, len);
+ }
+ for (int i = 0; i < input.length(); i++) {
+ if (isReservedChar(input.charAt(i))) {
+ throw new ElasticsearchIllegalArgumentException("Illegal input [" + originalInput + "] UTF-16 codepoint [0x"
+ + Integer.toHexString((int) input.charAt(i)).toUpperCase(Locale.ROOT)
+ + "] at position " + i + " is a reserved character");
+ }
+ }
+ return new SuggestField(names.indexName(), input, this.fieldType, payload, analyzingSuggestLookupProvider);
+ }
+
+ public static int correctSubStringLen(String input, int len) {
+ if (Character.isHighSurrogate(input.charAt(len - 1))) {
+ assert input.length() >= len + 1 && Character.isLowSurrogate(input.charAt(len));
+ return len + 1;
+ }
+ return len;
+ }
+
+ public BytesRef buildPayload(BytesRef surfaceForm, long weight, BytesRef payload) throws IOException {
+ return analyzingSuggestLookupProvider.buildPayload(
+ surfaceForm, weight, payload);
+ }
+
+ private static final class SuggestField extends Field {
+ private final BytesRef payload;
+ private final CompletionTokenStream.ToFiniteStrings toFiniteStrings;
+
+ public SuggestField(String name, Reader value, FieldType type, BytesRef payload, CompletionTokenStream.ToFiniteStrings toFiniteStrings) {
+ super(name, value, type);
+ this.payload = payload;
+ this.toFiniteStrings = toFiniteStrings;
+ }
+
+ public SuggestField(String name, String value, FieldType type, BytesRef payload, CompletionTokenStream.ToFiniteStrings toFiniteStrings) {
+ super(name, value, type);
+ this.payload = payload;
+ this.toFiniteStrings = toFiniteStrings;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ TokenStream ts = super.tokenStream(analyzer);
+ return new CompletionTokenStream(ts, payload, toFiniteStrings);
+ }
+ }
+
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name())
+ .field(Fields.TYPE, CONTENT_TYPE);
+ if (indexAnalyzer.name().equals(searchAnalyzer.name())) {
+ builder.field(Fields.ANALYZER, indexAnalyzer.name());
+ } else {
+ builder.field(Fields.INDEX_ANALYZER.getPreferredName(), indexAnalyzer.name())
+ .field(Fields.SEARCH_ANALYZER.getPreferredName(), searchAnalyzer.name());
+ }
+ builder.field(Fields.PAYLOADS, this.payloads);
+ builder.field(Fields.PRESERVE_SEPARATORS.getPreferredName(), this.preserveSeparators);
+ builder.field(Fields.PRESERVE_POSITION_INCREMENTS.getPreferredName(), this.preservePositionIncrements);
+ builder.field(Fields.MAX_INPUT_LENGTH.getPreferredName(), this.maxInputLength);
+ multiFields.toXContent(builder, params);
+ return builder.endObject();
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ }
+
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public boolean isSortable() {
+ return false;
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return null;
+ }
+
+ @Override
+ public String value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ return value.toString();
+ }
+
+ public boolean isStoringPayloads() {
+ return payloads;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
+ if (payloads != fieldMergeWith.payloads) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different payload values");
+ }
+ if (preservePositionIncrements != fieldMergeWith.preservePositionIncrements) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'preserve_position_increments' values");
+ }
+ if (preserveSeparators != fieldMergeWith.preserveSeparators) {
+ mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'preserve_separators' values");
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.maxInputLength = fieldMergeWith.maxInputLength;
+ }
+ }
+
+ // this should be package private but our tests don't allow it.
+ public static boolean isReservedChar(char character) {
+ /* we use 0x001F as a SEP_LABEL in the suggester but we can use the UTF-16 representation since they
+ * are equivalent. We also don't need to convert the input character to UTF-8 here to check for
+ * the 0x00 end label since all multi-byte UTF-8 chars start with 0x10 binary so if the UTF-16 CP is == 0x00
+ * it's the single byte UTF-8 CP */
+ assert XAnalyzingSuggester.PAYLOAD_SEP == XAnalyzingSuggester.SEP_LABEL; // ensure they are the same!
+ switch(character) {
+ case XAnalyzingSuggester.END_BYTE:
+ case XAnalyzingSuggester.SEP_LABEL:
+ case XAnalyzingSuggester.HOLE_CHARACTER:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java
new file mode 100644
index 0000000..431c48e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java
@@ -0,0 +1,586 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.joda.DateMathParser;
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.lucene.search.NoCacheFilter;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.analysis.NumericDateAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.LongFieldMapper.CustomLongNumericField;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.dateField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseDateTimeFormatter;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ *
+ */
+public class DateFieldMapper extends NumberFieldMapper<Long> {
+
+ public static final String CONTENT_TYPE = "date";
+
+ public static class Defaults extends NumberFieldMapper.Defaults {
+ public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime", Locale.ROOT);
+
+ public static final FieldType FIELD_TYPE = new FieldType(NumberFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.freeze();
+ }
+
+ public static final String NULL_VALUE = null;
+
+ public static final TimeUnit TIME_UNIT = TimeUnit.MILLISECONDS;
+ public static final boolean ROUND_CEIL = true;
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, DateFieldMapper> {
+
+ protected TimeUnit timeUnit = Defaults.TIME_UNIT;
+
+ protected String nullValue = Defaults.NULL_VALUE;
+
+ protected FormatDateTimeFormatter dateTimeFormatter = Defaults.DATE_TIME_FORMATTER;
+
+ private Locale locale;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ // do *NOT* rely on the default locale
+ locale = Locale.ROOT;
+ }
+
+ public Builder timeUnit(TimeUnit timeUnit) {
+ this.timeUnit = timeUnit;
+ return this;
+ }
+
+ public Builder nullValue(String nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ public Builder dateTimeFormatter(FormatDateTimeFormatter dateTimeFormatter) {
+ this.dateTimeFormatter = dateTimeFormatter;
+ return this;
+ }
+
+ @Override
+ public DateFieldMapper build(BuilderContext context) {
+ boolean roundCeil = Defaults.ROUND_CEIL;
+ if (context.indexSettings() != null) {
+ Settings settings = context.indexSettings();
+ roundCeil = settings.getAsBoolean("index.mapping.date.round_ceil", settings.getAsBoolean("index.mapping.date.parse_upper_inclusive", Defaults.ROUND_CEIL));
+ }
+ fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
+ if (!locale.equals(dateTimeFormatter.locale())) {
+ dateTimeFormatter = new FormatDateTimeFormatter(dateTimeFormatter.format(), dateTimeFormatter.parser(), dateTimeFormatter.printer(), locale);
+ }
+ DateFieldMapper fieldMapper = new DateFieldMapper(buildNames(context), dateTimeFormatter,
+ precisionStep, boost, fieldType, docValues, nullValue, timeUnit, roundCeil, ignoreMalformed(context), coerce(context),
+ postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, context.indexSettings(),
+ multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+
+ public Builder locale(Locale locale) {
+ this.locale = locale;
+ return this;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ DateFieldMapper.Builder builder = dateField(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(propNode.toString());
+ } else if (propName.equals("format")) {
+ builder.dateTimeFormatter(parseDateTimeFormatter(propName, propNode));
+ } else if (propName.equals("numeric_resolution")) {
+ builder.timeUnit(TimeUnit.valueOf(propNode.toString().toUpperCase(Locale.ROOT)));
+ } else if (propName.equals("locale")) {
+ builder.locale(parseLocale(propNode.toString()));
+ }
+ }
+ return builder;
+ }
+ }
+
+ // public for test
+ public static Locale parseLocale(String locale) {
+ final String[] parts = locale.split("_", -1);
+ switch (parts.length) {
+ case 3:
+ // lang_country_variant
+ return new Locale(parts[0], parts[1], parts[2]);
+ case 2:
+ // lang_country
+ return new Locale(parts[0], parts[1]);
+ case 1:
+ if ("ROOT".equalsIgnoreCase(parts[0])) {
+ return Locale.ROOT;
+ }
+ // lang
+ return new Locale(parts[0]);
+ default:
+ throw new ElasticsearchIllegalArgumentException("Can't parse locale: [" + locale + "]");
+ }
+ }
+
+ protected FormatDateTimeFormatter dateTimeFormatter;
+
+ // Triggers rounding up of the upper bound for range queries and filters if
+ // set to true.
+ // Rounding up a date here has the following meaning: If a date is not
+ // defined with full precision, for example, no milliseconds given, the date
+ // will be filled up to the next larger date with that precision.
+ // Example: An upper bound given as "2000-01-01", will be converted to
+ // "2000-01-01T23.59.59.999"
+ private final boolean roundCeil;
+
+ private final DateMathParser dateMathParser;
+
+ private String nullValue;
+
+ protected final TimeUnit timeUnit;
+
+ protected DateFieldMapper(Names names, FormatDateTimeFormatter dateTimeFormatter, int precisionStep, float boost, FieldType fieldType, Boolean docValues,
+ String nullValue, TimeUnit timeUnit, boolean roundCeil, Explicit<Boolean> ignoreMalformed,Explicit<Boolean> coerce,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity,
+
+ Loading normsLoading, @Nullable Settings fieldDataSettings, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed, coerce, new NamedAnalyzer("_date/" + precisionStep,
+ new NumericDateAnalyzer(precisionStep, dateTimeFormatter.parser())),
+ new NamedAnalyzer("_date/max", new NumericDateAnalyzer(Integer.MAX_VALUE, dateTimeFormatter.parser())),
+ postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ this.dateTimeFormatter = dateTimeFormatter;
+ this.nullValue = nullValue;
+ this.timeUnit = timeUnit;
+ this.roundCeil = roundCeil;
+ this.dateMathParser = new DateMathParser(dateTimeFormatter, timeUnit);
+ }
+
+ public FormatDateTimeFormatter dateTimeFormatter() {
+ return dateTimeFormatter;
+ }
+
+ public DateMathParser dateMathParser() {
+ return dateMathParser;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("long");
+ }
+
+ @Override
+ protected int maxPrecisionStep() {
+ return 64;
+ }
+
+ @Override
+ public Long value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Number) {
+ return ((Number) value).longValue();
+ }
+ if (value instanceof BytesRef) {
+ return Numbers.bytesToLong((BytesRef) value);
+ }
+ return parseStringValue(value.toString());
+ }
+
+ /** Dates should return as a string. */
+ @Override
+ public Object valueForSearch(Object value) {
+ if (value instanceof String) {
+ // assume its the string that was indexed, just return it... (for example, with get)
+ return value;
+ }
+ Long val = value(value);
+ if (val == null) {
+ return null;
+ }
+ return dateTimeFormatter.printer().print(val);
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
+ return bytesRef;
+ }
+
+ private long parseValue(Object value) {
+ if (value instanceof Number) {
+ return ((Number) value).longValue();
+ }
+ if (value instanceof BytesRef) {
+ return dateTimeFormatter.parser().parseMillis(((BytesRef) value).utf8ToString());
+ }
+ return dateTimeFormatter.parser().parseMillis(value.toString());
+ }
+
+ private String convertToString(Object value) {
+ if (value instanceof BytesRef) {
+ return ((BytesRef) value).utf8ToString();
+ }
+ return value.toString();
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ long iValue = dateMathParser.parse(value, System.currentTimeMillis());
+ long iSim;
+ try {
+ iSim = fuzziness.asTimeValue().millis();
+ } catch (Exception e) {
+ // not a time format
+ iSim = fuzziness.asLong();
+ }
+ return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
+ iValue - iSim,
+ iValue + iSim,
+ true, true);
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ long lValue = parseToMilliseconds(value, context);
+ return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
+ lValue, lValue, true, true);
+ }
+
+ public long parseToMilliseconds(Object value, @Nullable QueryParseContext context) {
+ return parseToMilliseconds(value, context, false);
+ }
+
+ public long parseToMilliseconds(Object value, @Nullable QueryParseContext context, boolean includeUpper) {
+ long now = context == null ? System.currentTimeMillis() : context.nowInMillis();
+ return includeUpper && roundCeil ? dateMathParser.parseRoundCeil(convertToString(value), now) : dateMathParser.parse(convertToString(value), now);
+ }
+
+ public long parseToMilliseconds(String value, @Nullable QueryParseContext context, boolean includeUpper) {
+ long now = context == null ? System.currentTimeMillis() : context.nowInMillis();
+ return includeUpper && roundCeil ? dateMathParser.parseRoundCeil(value, now) : dateMathParser.parse(value, now);
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ final long lValue = parseToMilliseconds(value, context);
+ return NumericRangeFilter.newLongRange(names.indexName(), precisionStep,
+ lValue, lValue, true, true);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseToMilliseconds(lowerTerm, context),
+ upperTerm == null ? null : parseToMilliseconds(upperTerm, context, includeUpper),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return rangeFilter(lowerTerm, upperTerm, includeLower, includeUpper, context, false);
+ }
+
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context, boolean explicitCaching) {
+ boolean cache = explicitCaching;
+ Long lowerVal = null;
+ Long upperVal = null;
+ if (lowerTerm != null) {
+ String value = convertToString(lowerTerm);
+ cache = explicitCaching || !hasNowExpressionWithNoRounding(value);
+ lowerVal = parseToMilliseconds(value, context, false);
+ }
+ if (upperTerm != null) {
+ String value = convertToString(upperTerm);
+ cache = explicitCaching || !hasNowExpressionWithNoRounding(value);
+ upperVal = parseToMilliseconds(value, context, includeUpper);
+ }
+
+ Filter filter = NumericRangeFilter.newLongRange(
+ names.indexName(), precisionStep, lowerVal, upperVal, includeLower, includeUpper
+ );
+ if (!cache) {
+ // We don't cache range filter if `now` date expression is used and also when a compound filter wraps
+ // a range filter with a `now` date expressions.
+ return NoCacheFilter.wrap(filter);
+ } else {
+ return filter;
+ }
+ }
+
+ @Override
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return rangeFilter(fieldData, lowerTerm, upperTerm, includeLower, includeUpper, context, false);
+ }
+
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context, boolean explicitCaching) {
+ boolean cache = explicitCaching;
+ Long lowerVal = null;
+ Long upperVal = null;
+ if (lowerTerm != null) {
+ String value = convertToString(lowerTerm);
+ cache = explicitCaching || !hasNowExpressionWithNoRounding(value);
+ lowerVal = parseToMilliseconds(value, context, false);
+ }
+ if (upperTerm != null) {
+ String value = convertToString(upperTerm);
+ cache = explicitCaching || !hasNowExpressionWithNoRounding(value);
+ upperVal = parseToMilliseconds(value, context, includeUpper);
+ }
+
+ Filter filter = NumericRangeFieldDataFilter.newLongRange(
+ (IndexNumericFieldData<?>) fieldData.getForField(this), lowerVal,upperVal, includeLower, includeUpper
+ );
+ if (!cache) {
+ // We don't cache range filter if `now` date expression is used and also when a compound filter wraps
+ // a range filter with a `now` date expressions.
+ return NoCacheFilter.wrap(filter);
+ } else {
+ return filter;
+ }
+ }
+
+ private boolean hasNowExpressionWithNoRounding(String value) {
+ int index = value.indexOf("now");
+ if (index != -1) {
+ if (value.length() == 3) {
+ return true;
+ } else {
+ int indexOfPotentialRounding = index + 3;
+ if (indexOfPotentialRounding >= value.length()) {
+ return true;
+ } else {
+ char potentialRoundingChar;
+ do {
+ potentialRoundingChar = value.charAt(indexOfPotentialRounding++);
+ if (potentialRoundingChar == '/') {
+ return false; // We found the rounding char, so we shouldn't forcefully disable caching
+ } else if (potentialRoundingChar == ' ') {
+ return true; // Next token in the date math expression and no rounding found, so we should not cache.
+ }
+ } while (indexOfPotentialRounding < value.length());
+ return true; // Couldn't find rounding char, so we should not cache
+ }
+ }
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ long value = parseStringValue(nullValue);
+ return NumericRangeFilter.newLongRange(names.indexName(), precisionStep,
+ value,
+ value,
+ true, true);
+ }
+
+
+ @Override
+ protected boolean customBoost() {
+ return true;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ String dateAsString = null;
+ Long value = null;
+ float boost = this.boost;
+ if (context.externalValueSet()) {
+ Object externalValue = context.externalValue();
+ if (externalValue instanceof Number) {
+ value = ((Number) externalValue).longValue();
+ } else {
+ dateAsString = (String) externalValue;
+ if (dateAsString == null) {
+ dateAsString = nullValue;
+ }
+ }
+ } else {
+ XContentParser parser = context.parser();
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.VALUE_NULL) {
+ dateAsString = nullValue;
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ value = parser.longValue(coerce.value());
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
+ if (token == XContentParser.Token.VALUE_NULL) {
+ dateAsString = nullValue;
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ value = parser.longValue(coerce.value());
+ } else {
+ dateAsString = parser.text();
+ }
+ } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else {
+ dateAsString = parser.text();
+ }
+ }
+
+ if (dateAsString != null) {
+ assert value == null;
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), dateAsString, boost);
+ }
+ value = parseStringValue(dateAsString);
+ }
+
+ if (value != null) {
+ if (fieldType.indexed() || fieldType.stored()) {
+ CustomLongNumericField field = new CustomLongNumericField(this, value, fieldType);
+ field.setBoost(boost);
+ fields.add(field);
+ }
+ if (hasDocValues()) {
+ addDocValue(context, value);
+ }
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.nullValue = ((DateFieldMapper) mergeWith).nullValue;
+ this.dateTimeFormatter = ((DateFieldMapper) mergeWith).dateTimeFormatter;
+ }
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || precisionStep != Defaults.PRECISION_STEP) {
+ builder.field("precision_step", precisionStep);
+ }
+ builder.field("format", dateTimeFormatter.format());
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ } else if (includeDefaults) {
+ builder.field("include_in_all", false);
+ }
+
+ if (includeDefaults || timeUnit != Defaults.TIME_UNIT) {
+ builder.field("numeric_resolution", timeUnit.name().toLowerCase(Locale.ROOT));
+ }
+ // only serialize locale if needed, ROOT is the default, so no need to serialize that case as well...
+ if (dateTimeFormatter.locale() != null && dateTimeFormatter.locale() != Locale.ROOT) {
+ builder.field("locale", dateTimeFormatter.locale());
+ } else if (includeDefaults) {
+ if (dateTimeFormatter.locale() == null) {
+ builder.field("locale", Locale.ROOT);
+ } else {
+ builder.field("locale", dateTimeFormatter.locale());
+ }
+ }
+ }
+
+ private long parseStringValue(String value) {
+ try {
+ return dateTimeFormatter.parser().parseMillis(value);
+ } catch (RuntimeException e) {
+ try {
+ long time = Long.parseLong(value);
+ return timeUnit.toMillis(time);
+ } catch (NumberFormatException e1) {
+ throw new MapperParsingException("failed to parse date field [" + value + "], tried both date format [" + dateTimeFormatter.format() + "], and timestamp number with locale [" + dateTimeFormatter.locale() + "]", e);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java
new file mode 100644
index 0000000..6f449a9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java
@@ -0,0 +1,423 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import com.carrotsearch.hppc.DoubleArrayList;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.util.ByteUtils;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NumericDoubleAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.doubleField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ *
+ */
+public class DoubleFieldMapper extends NumberFieldMapper<Double> {
+
+ public static final String CONTENT_TYPE = "double";
+
+ public static class Defaults extends NumberFieldMapper.Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType(NumberFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.freeze();
+ }
+
+ public static final Double NULL_VALUE = null;
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, DoubleFieldMapper> {
+
+ protected Double nullValue = Defaults.NULL_VALUE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(double nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public DoubleFieldMapper build(BuilderContext context) {
+ fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
+ DoubleFieldMapper fieldMapper = new DoubleFieldMapper(buildNames(context),
+ precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed(context), coerce(context), postingsProvider,
+ docValuesProvider, similarity, normsLoading, fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ DoubleFieldMapper.Builder builder = doubleField(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = entry.getKey();
+ Object propNode = entry.getValue();
+ if (propName.equals("nullValue") || propName.equals("null_value")) {
+ builder.nullValue(nodeDoubleValue(propNode));
+ }
+ }
+ return builder;
+ }
+ }
+
+
+ private Double nullValue;
+
+ private String nullValueAsString;
+
+ protected DoubleFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,
+ Double nullValue, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+
+ SimilarityProvider similarity, Loading normsLoading, @Nullable Settings fieldDataSettings,
+ Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed, coerce,
+ NumericDoubleAnalyzer.buildNamedAnalyzer(precisionStep), NumericDoubleAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),
+ postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ this.nullValue = nullValue;
+ this.nullValueAsString = nullValue == null ? null : nullValue.toString();
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("double");
+ }
+
+ @Override
+ protected int maxPrecisionStep() {
+ return 64;
+ }
+
+ @Override
+ public Double value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Number) {
+ return ((Number) value).doubleValue();
+ }
+ if (value instanceof BytesRef) {
+ return Numbers.bytesToDouble((BytesRef) value);
+ }
+ return Double.parseDouble(value.toString());
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ long longValue = NumericUtils.doubleToSortableLong(parseDoubleValue(value));
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.longToPrefixCoded(longValue, 0, bytesRef); // 0 because of exact match
+ return bytesRef;
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ double iValue = Double.parseDouble(value);
+ double iSim = fuzziness.asDouble();
+ return NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep,
+ iValue - iSim,
+ iValue + iSim,
+ true, true);
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ double dValue = parseDoubleValue(value);
+ return NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep,
+ dValue, dValue, true, true);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseDoubleValue(lowerTerm),
+ upperTerm == null ? null : parseDoubleValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ double dValue = parseDoubleValue(value);
+ return NumericRangeFilter.newDoubleRange(names.indexName(), precisionStep,
+ dValue, dValue, true, true);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFilter.newDoubleRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseDoubleValue(lowerTerm),
+ upperTerm == null ? null : parseDoubleValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ public Filter rangeFilter(Double lowerTerm, Double upperTerm, boolean includeLower, boolean includeUpper) {
+ return NumericRangeFilter.newDoubleRange(names.indexName(), precisionStep, lowerTerm, upperTerm, includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFieldDataFilter.newDoubleRange((IndexNumericFieldData) fieldData.getForField(this),
+ lowerTerm == null ? null : parseDoubleValue(lowerTerm),
+ upperTerm == null ? null : parseDoubleValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ return NumericRangeFilter.newDoubleRange(names.indexName(), precisionStep,
+ nullValue,
+ nullValue,
+ true, true);
+ }
+
+ @Override
+ protected boolean customBoost() {
+ return true;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ double value;
+ float boost = this.boost;
+ if (context.externalValueSet()) {
+ Object externalValue = context.externalValue();
+ if (externalValue == null) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else if (externalValue instanceof String) {
+ String sExternalValue = (String) externalValue;
+ if (sExternalValue.length() == 0) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else {
+ value = Double.parseDouble(sExternalValue);
+ }
+ } else {
+ value = ((Number) externalValue).doubleValue();
+ }
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), Double.toString(value), boost);
+ }
+ } else {
+ XContentParser parser = context.parser();
+ if (parser.currentToken() == XContentParser.Token.VALUE_NULL ||
+ (parser.currentToken() == XContentParser.Token.VALUE_STRING && parser.textLength() == 0)) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ if (nullValueAsString != null && (context.includeInAll(includeInAll, this))) {
+ context.allEntries().addText(names.fullName(), nullValueAsString, boost);
+ }
+ } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ Double objValue = nullValue;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ objValue = parser.doubleValue(coerce.value());
+ }
+ } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (objValue == null) {
+ // no value
+ return;
+ }
+ value = objValue;
+ } else {
+ value = parser.doubleValue(coerce.value());
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), parser.text(), boost);
+ }
+ }
+ }
+
+ if (fieldType.indexed() || fieldType.stored()) {
+ CustomDoubleNumericField field = new CustomDoubleNumericField(this, value, fieldType);
+ field.setBoost(boost);
+ fields.add(field);
+ }
+ if (hasDocValues()) {
+ CustomDoubleNumericDocValuesField field = (CustomDoubleNumericDocValuesField) context.doc().getByKey(names().indexName());
+ if (field != null) {
+ field.add(value);
+ } else {
+ field = new CustomDoubleNumericDocValuesField(names().indexName(), value);
+ context.doc().addWithKey(names().indexName(), field);
+ }
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.nullValue = ((DoubleFieldMapper) mergeWith).nullValue;
+ this.nullValueAsString = ((DoubleFieldMapper) mergeWith).nullValueAsString;
+ }
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || precisionStep != Defaults.PRECISION_STEP) {
+ builder.field("precision_step", precisionStep);
+ }
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ } else if (includeDefaults) {
+ builder.field("include_in_all", false);
+ }
+
+ }
+
+ public static class CustomDoubleNumericField extends CustomNumericField {
+
+ private final double number;
+
+ private final NumberFieldMapper mapper;
+
+ public CustomDoubleNumericField(NumberFieldMapper mapper, double number, FieldType fieldType) {
+ super(mapper, number, fieldType);
+ this.mapper = mapper;
+ this.number = number;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ if (fieldType().indexed()) {
+ return mapper.popCachedStream().setDoubleValue(number);
+ }
+ return null;
+ }
+
+ @Override
+ public String numericAsString() {
+ return Double.toString(number);
+ }
+ }
+
+ public static class CustomDoubleNumericDocValuesField extends CustomNumericDocValuesField {
+
+ public static final FieldType TYPE = new FieldType();
+ static {
+ TYPE.setDocValueType(FieldInfo.DocValuesType.BINARY);
+ TYPE.freeze();
+ }
+
+ private final DoubleArrayList values;
+
+ public CustomDoubleNumericDocValuesField(String name, double value) {
+ super(name);
+ values = new DoubleArrayList();
+ add(value);
+ }
+
+ public void add(double value) {
+ values.add(value);
+ }
+
+ @Override
+ public BytesRef binaryValue() {
+ CollectionUtils.sortAndDedup(values);
+
+ final byte[] bytes = new byte[values.size() * 8];
+ for (int i = 0; i < values.size(); ++i) {
+ ByteUtils.writeDoubleLE(values.get(i), bytes, i * 8);
+ }
+ return new BytesRef(bytes);
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java
new file mode 100644
index 0000000..1e9de2b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java
@@ -0,0 +1,429 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.util.ByteUtils;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NumericFloatAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.floatField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ *
+ */
+public class FloatFieldMapper extends NumberFieldMapper<Float> {
+
+ public static final String CONTENT_TYPE = "float";
+
+ public static class Defaults extends NumberFieldMapper.Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType(NumberFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.freeze();
+ }
+
+ public static final Float NULL_VALUE = null;
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, FloatFieldMapper> {
+
+ protected Float nullValue = Defaults.NULL_VALUE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(float nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public FloatFieldMapper build(BuilderContext context) {
+ fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
+ FloatFieldMapper fieldMapper = new FloatFieldMapper(buildNames(context),
+ precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed(context), coerce(context), postingsProvider,
+ docValuesProvider, similarity, normsLoading, fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ FloatFieldMapper.Builder builder = floatField(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(nodeFloatValue(propNode));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private Float nullValue;
+
+ private String nullValueAsString;
+
+ protected FloatFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,
+ Float nullValue, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ SimilarityProvider similarity, Loading normsLoading, @Nullable Settings fieldDataSettings,
+ Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed, coerce,
+ NumericFloatAnalyzer.buildNamedAnalyzer(precisionStep), NumericFloatAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),
+ postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ this.nullValue = nullValue;
+ this.nullValueAsString = nullValue == null ? null : nullValue.toString();
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("float");
+ }
+
+ @Override
+ protected int maxPrecisionStep() {
+ return 32;
+ }
+
+ @Override
+ public Float value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Number) {
+ return ((Number) value).floatValue();
+ }
+ if (value instanceof BytesRef) {
+ return Numbers.bytesToFloat((BytesRef) value);
+ }
+ return Float.parseFloat(value.toString());
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ int intValue = NumericUtils.floatToSortableInt(parseValue(value));
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.intToPrefixCoded(intValue, 0, bytesRef); // 0 because of exact match
+ return bytesRef;
+ }
+
+ private float parseValue(Object value) {
+ if (value instanceof Number) {
+ return ((Number) value).floatValue();
+ }
+ if (value instanceof BytesRef) {
+ return Float.parseFloat(((BytesRef) value).utf8ToString());
+ }
+ return Float.parseFloat(value.toString());
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ float iValue = Float.parseFloat(value);
+ final float iSim = fuzziness.asFloat();
+ return NumericRangeQuery.newFloatRange(names.indexName(), precisionStep,
+ iValue - iSim,
+ iValue + iSim,
+ true, true);
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ float fValue = parseValue(value);
+ return NumericRangeQuery.newFloatRange(names.indexName(), precisionStep,
+ fValue, fValue, true, true);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeQuery.newFloatRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ float fValue = parseValue(value);
+ return NumericRangeFilter.newFloatRange(names.indexName(), precisionStep,
+ fValue, fValue, true, true);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFilter.newFloatRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFieldDataFilter.newFloatRange((IndexNumericFieldData) fieldData.getForField(this),
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ return NumericRangeFilter.newFloatRange(names.indexName(), precisionStep,
+ nullValue,
+ nullValue,
+ true, true);
+ }
+
+ @Override
+ protected boolean customBoost() {
+ return true;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ float value;
+ float boost = this.boost;
+ if (context.externalValueSet()) {
+ Object externalValue = context.externalValue();
+ if (externalValue == null) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else if (externalValue instanceof String) {
+ String sExternalValue = (String) externalValue;
+ if (sExternalValue.length() == 0) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else {
+ value = Float.parseFloat(sExternalValue);
+ }
+ } else {
+ value = ((Number) externalValue).floatValue();
+ }
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), Float.toString(value), boost);
+ }
+ } else {
+ XContentParser parser = context.parser();
+ if (parser.currentToken() == XContentParser.Token.VALUE_NULL ||
+ (parser.currentToken() == XContentParser.Token.VALUE_STRING && parser.textLength() == 0)) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ if (nullValueAsString != null && (context.includeInAll(includeInAll, this))) {
+ context.allEntries().addText(names.fullName(), nullValueAsString, boost);
+ }
+ } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ Float objValue = nullValue;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ objValue = parser.floatValue(coerce.value());
+ }
+ } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (objValue == null) {
+ // no value
+ return;
+ }
+ value = objValue;
+ } else {
+ value = parser.floatValue(coerce.value());
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), parser.text(), boost);
+ }
+ }
+ }
+
+ if (fieldType.indexed() || fieldType.stored()) {
+ CustomFloatNumericField field = new CustomFloatNumericField(this, value, fieldType);
+ field.setBoost(boost);
+ fields.add(field);
+ }
+ if (hasDocValues()) {
+ CustomFloatNumericDocValuesField field = (CustomFloatNumericDocValuesField) context.doc().getByKey(names().indexName());
+ if (field != null) {
+ field.add(value);
+ } else {
+ field = new CustomFloatNumericDocValuesField(names().indexName(), value);
+ context.doc().addWithKey(names().indexName(), field);
+ }
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.nullValue = ((FloatFieldMapper) mergeWith).nullValue;
+ this.nullValueAsString = ((FloatFieldMapper) mergeWith).nullValueAsString;
+ }
+ }
+
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || precisionStep != Defaults.PRECISION_STEP) {
+ builder.field("precision_step", precisionStep);
+ }
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ } else if (includeDefaults) {
+ builder.field("include_in_all", false);
+ }
+
+ }
+
+ public static class CustomFloatNumericField extends CustomNumericField {
+
+ private final float number;
+
+ private final NumberFieldMapper mapper;
+
+ public CustomFloatNumericField(NumberFieldMapper mapper, float number, FieldType fieldType) {
+ super(mapper, number, fieldType);
+ this.mapper = mapper;
+ this.number = number;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ if (fieldType().indexed()) {
+ return mapper.popCachedStream().setFloatValue(number);
+ }
+ return null;
+ }
+
+ @Override
+ public String numericAsString() {
+ return Float.toString(number);
+ }
+ }
+
+ public static class CustomFloatNumericDocValuesField extends CustomNumericDocValuesField {
+
+ public static final FieldType TYPE = new FieldType();
+ static {
+ TYPE.setDocValueType(FieldInfo.DocValuesType.BINARY);
+ TYPE.freeze();
+ }
+
+ private final FloatArrayList values;
+
+ public CustomFloatNumericDocValuesField(String name, float value) {
+ super(name);
+ values = new FloatArrayList();
+ add(value);
+ }
+
+ public void add(float value) {
+ values.add(value);
+ }
+
+ @Override
+ public BytesRef binaryValue() {
+ CollectionUtils.sortAndDedup(values);
+
+ final byte[] bytes = new byte[values.size() * 4];
+ for (int i = 0; i < values.size(); ++i) {
+ ByteUtils.writeFloatLE(values.get(i), bytes, i * 4);
+ }
+ return new BytesRef(bytes);
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java
new file mode 100644
index 0000000..1d8efa7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java
@@ -0,0 +1,391 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NumericIntegerAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.integerField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ *
+ */
+public class IntegerFieldMapper extends NumberFieldMapper<Integer> {
+
+ public static final String CONTENT_TYPE = "integer";
+
+ public static class Defaults extends NumberFieldMapper.Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType(NumberFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.freeze();
+ }
+
+ public static final Integer NULL_VALUE = null;
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, IntegerFieldMapper> {
+
+ protected Integer nullValue = Defaults.NULL_VALUE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(int nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public IntegerFieldMapper build(BuilderContext context) {
+ fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
+ IntegerFieldMapper fieldMapper = new IntegerFieldMapper(buildNames(context), precisionStep, boost, fieldType, docValues,
+ nullValue, ignoreMalformed(context), coerce(context), postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings,
+ context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ IntegerFieldMapper.Builder builder = integerField(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(nodeIntegerValue(propNode));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private Integer nullValue;
+
+ private String nullValueAsString;
+
+ protected IntegerFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,
+ Integer nullValue, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ SimilarityProvider similarity, Loading normsLoading, @Nullable Settings fieldDataSettings,
+ Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed, coerce,
+ NumericIntegerAnalyzer.buildNamedAnalyzer(precisionStep), NumericIntegerAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),
+ postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ this.nullValue = nullValue;
+ this.nullValueAsString = nullValue == null ? null : nullValue.toString();
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("int");
+ }
+
+ @Override
+ protected int maxPrecisionStep() {
+ return 32;
+ }
+
+ @Override
+ public Integer value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Number) {
+ return ((Number) value).intValue();
+ }
+ if (value instanceof BytesRef) {
+ return Numbers.bytesToInt((BytesRef) value);
+ }
+ return Integer.parseInt(value.toString());
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
+ return bytesRef;
+ }
+
+ private int parseValue(Object value) {
+ if (value instanceof Number) {
+ return ((Number) value).intValue();
+ }
+ if (value instanceof BytesRef) {
+ return Integer.parseInt(((BytesRef) value).utf8ToString());
+ }
+ return Integer.parseInt(value.toString());
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ int iValue = Integer.parseInt(value);
+ int iSim = fuzziness.asInt();
+ return NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
+ iValue - iSim,
+ iValue + iSim,
+ true, true);
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ int iValue = parseValue(value);
+ return NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
+ iValue, iValue, true, true);
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ int iValue = parseValue(value);
+ return NumericRangeFilter.newIntRange(names.indexName(), precisionStep,
+ iValue, iValue, true, true);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFilter.newIntRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFieldDataFilter.newIntRange((IndexNumericFieldData) fieldData.getForField(this),
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ return NumericRangeFilter.newIntRange(names.indexName(), precisionStep,
+ nullValue,
+ nullValue,
+ true, true);
+ }
+
+ @Override
+ protected boolean customBoost() {
+ return true;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ int value;
+ float boost = this.boost;
+ if (context.externalValueSet()) {
+ Object externalValue = context.externalValue();
+ if (externalValue == null) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else if (externalValue instanceof String) {
+ String sExternalValue = (String) externalValue;
+ if (sExternalValue.length() == 0) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else {
+ value = Integer.parseInt(sExternalValue);
+ }
+ } else {
+ value = ((Number) externalValue).intValue();
+ }
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), Integer.toString(value), boost);
+ }
+ } else {
+ XContentParser parser = context.parser();
+ if (parser.currentToken() == XContentParser.Token.VALUE_NULL ||
+ (parser.currentToken() == XContentParser.Token.VALUE_STRING && parser.textLength() == 0)) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ if (nullValueAsString != null && (context.includeInAll(includeInAll, this))) {
+ context.allEntries().addText(names.fullName(), nullValueAsString, boost);
+ }
+ } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ Integer objValue = nullValue;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ objValue = parser.intValue(coerce.value());
+ }
+ } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (objValue == null) {
+ // no value
+ return;
+ }
+ value = objValue;
+ } else {
+ value = parser.intValue(coerce.value());
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), parser.text(), boost);
+ }
+ }
+ }
+ addIntegerFields(context, fields, value, boost);
+ }
+
+ protected void addIntegerFields(ParseContext context, List<Field> fields, int value, float boost) {
+ if (fieldType.indexed() || fieldType.stored()) {
+ CustomIntegerNumericField field = new CustomIntegerNumericField(this, value, fieldType);
+ field.setBoost(boost);
+ fields.add(field);
+ }
+ if (hasDocValues()) {
+ addDocValue(context, value);
+ }
+ }
+
+ protected Integer nullValue() {
+ return nullValue;
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.nullValue = ((IntegerFieldMapper) mergeWith).nullValue;
+ this.nullValueAsString = ((IntegerFieldMapper) mergeWith).nullValueAsString;
+ }
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || precisionStep != Defaults.PRECISION_STEP) {
+ builder.field("precision_step", precisionStep);
+ }
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ } else if (includeDefaults) {
+ builder.field("include_in_all", false);
+ }
+
+ }
+
+ public static class CustomIntegerNumericField extends CustomNumericField {
+
+ private final int number;
+
+ private final NumberFieldMapper mapper;
+
+ public CustomIntegerNumericField(NumberFieldMapper mapper, int number, FieldType fieldType) {
+ super(mapper, number, fieldType);
+ this.mapper = mapper;
+ this.number = number;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ if (fieldType().indexed()) {
+ return mapper.popCachedStream().setIntValue(number);
+ }
+ return null;
+ }
+
+ @Override
+ public String numericAsString() {
+ return Integer.toString(number);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java
new file mode 100644
index 0000000..4ada619
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java
@@ -0,0 +1,372 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NumericLongAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeLongValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.longField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ *
+ */
+public class LongFieldMapper extends NumberFieldMapper<Long> {
+
+ public static final String CONTENT_TYPE = "long";
+
+ public static class Defaults extends NumberFieldMapper.Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType(NumberFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.freeze();
+ }
+
+ public static final Long NULL_VALUE = null;
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, LongFieldMapper> {
+
+ protected Long nullValue = Defaults.NULL_VALUE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(long nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public LongFieldMapper build(BuilderContext context) {
+ fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
+ LongFieldMapper fieldMapper = new LongFieldMapper(buildNames(context), precisionStep, boost, fieldType, docValues, nullValue,
+ ignoreMalformed(context), coerce(context), postingsProvider, docValuesProvider, similarity, normsLoading,
+ fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ LongFieldMapper.Builder builder = longField(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(nodeLongValue(propNode));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private Long nullValue;
+
+ private String nullValueAsString;
+
+ protected LongFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,
+ Long nullValue, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ SimilarityProvider similarity, Loading normsLoading, @Nullable Settings fieldDataSettings,
+ Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed, coerce,
+ NumericLongAnalyzer.buildNamedAnalyzer(precisionStep), NumericLongAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),
+ postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ this.nullValue = nullValue;
+ this.nullValueAsString = nullValue == null ? null : nullValue.toString();
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("long");
+ }
+
+ @Override
+ protected int maxPrecisionStep() {
+ return 64;
+ }
+
+ @Override
+ public Long value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Number) {
+ return ((Number) value).longValue();
+ }
+ if (value instanceof BytesRef) {
+ return Numbers.bytesToLong((BytesRef) value);
+ }
+ return Long.parseLong(value.toString());
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.longToPrefixCoded(parseLongValue(value), 0, bytesRef); // 0 because of exact match
+ return bytesRef;
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ long iValue = Long.parseLong(value);
+ final long iSim = fuzziness.asLong();
+ return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
+ iValue - iSim,
+ iValue + iSim,
+ true, true);
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ long iValue = parseLongValue(value);
+ return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
+ iValue, iValue, true, true);
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ long iValue = parseLongValue(value);
+ return NumericRangeFilter.newLongRange(names.indexName(), precisionStep,
+ iValue, iValue, true, true);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseLongValue(lowerTerm),
+ upperTerm == null ? null : parseLongValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFilter.newLongRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseLongValue(lowerTerm),
+ upperTerm == null ? null : parseLongValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFieldDataFilter.newLongRange((IndexNumericFieldData) fieldData.getForField(this),
+ lowerTerm == null ? null : parseLongValue(lowerTerm),
+ upperTerm == null ? null : parseLongValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ return NumericRangeFilter.newLongRange(names.indexName(), precisionStep,
+ nullValue,
+ nullValue,
+ true, true);
+ }
+
+ @Override
+ protected boolean customBoost() {
+ return true;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ long value;
+ float boost = this.boost;
+ if (context.externalValueSet()) {
+ Object externalValue = context.externalValue();
+ if (externalValue == null) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else if (externalValue instanceof String) {
+ String sExternalValue = (String) externalValue;
+ if (sExternalValue.length() == 0) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else {
+ value = Long.parseLong(sExternalValue);
+ }
+ } else {
+ value = ((Number) externalValue).longValue();
+ }
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), Long.toString(value), boost);
+ }
+ } else {
+ XContentParser parser = context.parser();
+ if (parser.currentToken() == XContentParser.Token.VALUE_NULL ||
+ (parser.currentToken() == XContentParser.Token.VALUE_STRING && parser.textLength() == 0)) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ if (nullValueAsString != null && (context.includeInAll(includeInAll, this))) {
+ context.allEntries().addText(names.fullName(), nullValueAsString, boost);
+ }
+ } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ Long objValue = nullValue;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ objValue = parser.longValue(coerce.value());
+ }
+ } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (objValue == null) {
+ // no value
+ return;
+ }
+ value = objValue;
+ } else {
+ value = parser.longValue(coerce.value());
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), parser.text(), boost);
+ }
+ }
+ }
+ if (fieldType.indexed() || fieldType.stored()) {
+ CustomLongNumericField field = new CustomLongNumericField(this, value, fieldType);
+ field.setBoost(boost);
+ fields.add(field);
+ }
+ if (hasDocValues()) {
+ addDocValue(context, value);
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.nullValue = ((LongFieldMapper) mergeWith).nullValue;
+ this.nullValueAsString = ((LongFieldMapper) mergeWith).nullValueAsString;
+ }
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || precisionStep != Defaults.PRECISION_STEP) {
+ builder.field("precision_step", precisionStep);
+ }
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ } else if (includeDefaults) {
+ builder.field("include_in_all", false);
+ }
+ }
+
+ public static class CustomLongNumericField extends CustomNumericField {
+
+ private final long number;
+
+ private final NumberFieldMapper mapper;
+
+ public CustomLongNumericField(NumberFieldMapper mapper, long number, FieldType fieldType) {
+ super(mapper, number, fieldType);
+ this.mapper = mapper;
+ this.number = number;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ if (fieldType().indexed()) {
+ return mapper.popCachedStream().setLongValue(number);
+ }
+ return null;
+ }
+
+ @Override
+ public String numericAsString() {
+ return Long.toString(number);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java
new file mode 100644
index 0000000..c903505
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java
@@ -0,0 +1,514 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import com.carrotsearch.hppc.DoubleOpenHashSet;
+import com.carrotsearch.hppc.LongArrayList;
+import com.carrotsearch.hppc.LongOpenHashSet;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.util.ByteUtils;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.FieldDataTermsFilter;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.List;
+
+/**
+ *
+ */
+public abstract class NumberFieldMapper<T extends Number> extends AbstractFieldMapper<T> implements AllFieldMapper.IncludeInAll {
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final int PRECISION_STEP = NumericUtils.PRECISION_STEP_DEFAULT;
+
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.setStoreTermVectors(false);
+ FIELD_TYPE.freeze();
+ }
+
+ public static final Explicit<Boolean> IGNORE_MALFORMED = new Explicit<Boolean>(false, false);
+ public static final Explicit<Boolean> COERCE = new Explicit<Boolean>(true, false);
+ }
+
+ public abstract static class Builder<T extends Builder, Y extends NumberFieldMapper> extends AbstractFieldMapper.Builder<T, Y> {
+
+ protected int precisionStep = Defaults.PRECISION_STEP;
+
+ private Boolean ignoreMalformed;
+
+ private Boolean coerce;
+
+ public Builder(String name, FieldType fieldType) {
+ super(name, fieldType);
+ }
+
+ public T precisionStep(int precisionStep) {
+ this.precisionStep = precisionStep;
+ return builder;
+ }
+
+ public T ignoreMalformed(boolean ignoreMalformed) {
+ this.ignoreMalformed = ignoreMalformed;
+ return builder;
+ }
+
+ protected Explicit<Boolean> ignoreMalformed(BuilderContext context) {
+ if (ignoreMalformed != null) {
+ return new Explicit<Boolean>(ignoreMalformed, true);
+ }
+ if (context.indexSettings() != null) {
+ return new Explicit<Boolean>(context.indexSettings().getAsBoolean("index.mapping.ignore_malformed", Defaults.IGNORE_MALFORMED.value()), false);
+ }
+ return Defaults.IGNORE_MALFORMED;
+ }
+
+ public T coerce(boolean coerce) {
+ this.coerce = coerce;
+ return builder;
+ }
+
+ protected Explicit<Boolean> coerce(BuilderContext context) {
+ if (coerce != null) {
+ return new Explicit<Boolean>(coerce, true);
+ }
+ if (context.indexSettings() != null) {
+ return new Explicit<Boolean>(context.indexSettings().getAsBoolean("index.mapping.coerce", Defaults.COERCE.value()), false);
+ }
+ return Defaults.COERCE;
+ }
+
+ }
+
+ protected int precisionStep;
+
+ protected Boolean includeInAll;
+
+ protected Explicit<Boolean> ignoreMalformed;
+
+ protected Explicit<Boolean> coerce;
+
+ private ThreadLocal<NumericTokenStream> tokenStream = new ThreadLocal<NumericTokenStream>() {
+ @Override
+ protected NumericTokenStream initialValue() {
+ return new NumericTokenStream(precisionStep);
+ }
+ };
+
+ private static ThreadLocal<NumericTokenStream> tokenStream4 = new ThreadLocal<NumericTokenStream>() {
+ @Override
+ protected NumericTokenStream initialValue() {
+ return new NumericTokenStream(4);
+ }
+ };
+
+ private static ThreadLocal<NumericTokenStream> tokenStream8 = new ThreadLocal<NumericTokenStream>() {
+ @Override
+ protected NumericTokenStream initialValue() {
+ return new NumericTokenStream(8);
+ }
+ };
+
+ private static ThreadLocal<NumericTokenStream> tokenStreamMax = new ThreadLocal<NumericTokenStream>() {
+ @Override
+ protected NumericTokenStream initialValue() {
+ return new NumericTokenStream(Integer.MAX_VALUE);
+ }
+ };
+
+ protected NumberFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,
+ Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, NamedAnalyzer indexAnalyzer,
+ NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsProvider,
+ DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity,
+ Loading normsLoading, @Nullable Settings fieldDataSettings, Settings indexSettings,
+ MultiFields multiFields, CopyTo copyTo) {
+ // LUCENE 4 UPGRADE: Since we can't do anything before the super call, we have to push the boost check down to subclasses
+ super(names, boost, fieldType, docValues, indexAnalyzer, searchAnalyzer, postingsProvider, docValuesProvider,
+ similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ if (precisionStep <= 0 || precisionStep >= maxPrecisionStep()) {
+ this.precisionStep = Integer.MAX_VALUE;
+ } else {
+ this.precisionStep = precisionStep;
+ }
+ this.ignoreMalformed = ignoreMalformed;
+ this.coerce = coerce;
+ }
+
+ @Override
+ public void includeInAll(Boolean includeInAll) {
+ if (includeInAll != null) {
+ this.includeInAll = includeInAll;
+ }
+ }
+
+ @Override
+ public void includeInAllIfNotSet(Boolean includeInAll) {
+ if (includeInAll != null && this.includeInAll == null) {
+ this.includeInAll = includeInAll;
+ }
+ }
+
+ @Override
+ public void unsetIncludeInAll() {
+ includeInAll = null;
+ }
+
+ protected abstract int maxPrecisionStep();
+
+ public int precisionStep() {
+ return this.precisionStep;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ RuntimeException e = null;
+ try {
+ innerParseCreateField(context, fields);
+ } catch (IllegalArgumentException e1) {
+ e = e1;
+ } catch (MapperParsingException e2) {
+ e = e2;
+ }
+
+ if (e != null && !ignoreMalformed.value()) {
+ throw e;
+ }
+ }
+
+ protected abstract void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException;
+
+ protected final void addDocValue(ParseContext context, long value) {
+ CustomLongNumericDocValuesField field = (CustomLongNumericDocValuesField) context.doc().getByKey(names().indexName());
+ if (field != null) {
+ field.add(value);
+ } else {
+ field = new CustomLongNumericDocValuesField(names().indexName(), value);
+ context.doc().addWithKey(names().indexName(), field);
+ }
+ }
+
+ /**
+ * Use the field query created here when matching on numbers.
+ */
+ @Override
+ public boolean useTermQueryWithQueryString() {
+ return true;
+ }
+
+ /**
+ * Numeric field level query are basically range queries with same value and included. That's the recommended
+ * way to execute it.
+ */
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ return rangeQuery(value, value, true, true, context);
+ }
+
+ /**
+ * Numeric field level filter are basically range queries with same value and included. That's the recommended
+ * way to execute it.
+ */
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ return rangeFilter(value, value, true, true, context);
+ }
+
+ @Override
+ public abstract Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context);
+
+ @Override
+ public abstract Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context);
+
+ @Override
+ public abstract Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions);
+
+ /**
+ * A range filter based on the field data cache.
+ */
+ public abstract Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context);
+
+ /**
+ * A terms filter based on the field data cache for numeric fields.
+ */
+ @Override
+ public Filter termsFilter(IndexFieldDataService fieldDataService, List values, @Nullable QueryParseContext context) {
+ IndexNumericFieldData fieldData = fieldDataService.getForField(this);
+ if (fieldData.getNumericType().isFloatingPoint()) {
+ // create with initial size large enough to avoid rehashing
+ DoubleOpenHashSet terms =
+ new DoubleOpenHashSet((int) (values.size() * (1 + DoubleOpenHashSet.DEFAULT_LOAD_FACTOR)));
+ for (int i = 0, len = values.size(); i < len; i++) {
+ terms.add(parseDoubleValue(values.get(i)));
+ }
+
+ return FieldDataTermsFilter.newDoubles(fieldData, terms);
+ } else {
+ // create with initial size large enough to avoid rehashing
+ LongOpenHashSet terms =
+ new LongOpenHashSet((int) (values.size() * (1 + LongOpenHashSet.DEFAULT_LOAD_FACTOR)));
+ for (int i = 0, len = values.size(); i < len; i++) {
+ terms.add(parseLongValue(values.get(i)));
+ }
+
+ return FieldDataTermsFilter.newLongs(fieldData, terms);
+ }
+ }
+
+ /**
+ * Converts an object value into a double
+ */
+ public double parseDoubleValue(Object value) {
+ if (value instanceof Number) {
+ return ((Number) value).doubleValue();
+ }
+
+ if (value instanceof BytesRef) {
+ return Double.parseDouble(((BytesRef) value).utf8ToString());
+ }
+
+ return Double.parseDouble(value.toString());
+ }
+
+ /**
+ * Converts an object value into a long
+ */
+ public long parseLongValue(Object value) {
+ if (value instanceof Number) {
+ return ((Number) value).longValue();
+ }
+
+ if (value instanceof BytesRef) {
+ return Long.parseLong(((BytesRef) value).utf8ToString());
+ }
+
+ return Long.parseLong(value.toString());
+ }
+
+ /**
+ * Override the default behavior (to return the string, and return the actual Number instance).
+ *
+ * @param value
+ */
+ @Override
+ public Object valueForSearch(Object value) {
+ return value(value);
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith;
+ this.precisionStep = nfmMergeWith.precisionStep;
+ this.includeInAll = nfmMergeWith.includeInAll;
+ if (nfmMergeWith.ignoreMalformed.explicit()) {
+ this.ignoreMalformed = nfmMergeWith.ignoreMalformed;
+ }
+ if (nfmMergeWith.coerce.explicit()) {
+ this.coerce = nfmMergeWith.coerce;
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ }
+
+ protected NumericTokenStream popCachedStream() {
+ if (precisionStep == 4) {
+ return tokenStream4.get();
+ }
+ if (precisionStep == 8) {
+ return tokenStream8.get();
+ }
+ if (precisionStep == Integer.MAX_VALUE) {
+ return tokenStreamMax.get();
+ }
+ return tokenStream.get();
+ }
+
+ // used to we can use a numeric field in a document that is then parsed twice!
+ public abstract static class CustomNumericField extends Field {
+
+ protected final NumberFieldMapper mapper;
+
+ public CustomNumericField(NumberFieldMapper mapper, Number value, FieldType fieldType) {
+ super(mapper.names().indexName(), fieldType);
+ this.mapper = mapper;
+ if (value != null) {
+ this.fieldsData = value;
+ }
+ }
+
+ @Override
+ public String stringValue() {
+ return null;
+ }
+
+ @Override
+ public Reader readerValue() {
+ return null;
+ }
+
+ public abstract String numericAsString();
+ }
+
+ public static abstract class CustomNumericDocValuesField implements IndexableField {
+
+ public static final FieldType TYPE = new FieldType();
+ static {
+ TYPE.setDocValueType(FieldInfo.DocValuesType.BINARY);
+ TYPE.freeze();
+ }
+
+ private final String name;
+
+ public CustomNumericDocValuesField(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public String name() {
+ return name;
+ }
+
+ @Override
+ public IndexableFieldType fieldType() {
+ return TYPE;
+ }
+
+ @Override
+ public float boost() {
+ return 1f;
+ }
+
+ @Override
+ public String stringValue() {
+ return null;
+ }
+
+ @Override
+ public Reader readerValue() {
+ return null;
+ }
+
+ @Override
+ public Number numericValue() {
+ return null;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ return null;
+ }
+
+ }
+
+ public static class CustomLongNumericDocValuesField extends CustomNumericDocValuesField {
+
+ public static final FieldType TYPE = new FieldType();
+ static {
+ TYPE.setDocValueType(FieldInfo.DocValuesType.BINARY);
+ TYPE.freeze();
+ }
+
+ private final LongArrayList values;
+
+ public CustomLongNumericDocValuesField(String name, long value) {
+ super(name);
+ values = new LongArrayList();
+ add(value);
+ }
+
+ public void add(long value) {
+ values.add(value);
+ }
+
+ @Override
+ public BytesRef binaryValue() {
+ CollectionUtils.sortAndDedup(values);
+
+ // here is the trick:
+ // - the first value is zig-zag encoded so that eg. -5 would become positive and would be better compressed by vLong
+ // - for other values, we only encode deltas using vLong
+ final byte[] bytes = new byte[values.size() * ByteUtils.MAX_BYTES_VLONG];
+ final ByteArrayDataOutput out = new ByteArrayDataOutput(bytes);
+ ByteUtils.writeVLong(out, ByteUtils.zigZagEncode(values.get(0)));
+ for (int i = 1; i < values.size(); ++i) {
+ final long delta = values.get(i) - values.get(i - 1);
+ ByteUtils.writeVLong(out, delta);
+ }
+ return new BytesRef(bytes, 0, out.getPosition());
+ }
+
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || ignoreMalformed.explicit()) {
+ builder.field("ignore_malformed", ignoreMalformed.value());
+ }
+ if (includeDefaults || coerce.explicit()) {
+ builder.field("coerce", coerce.value());
+ }
+ }
+
+ @Override
+ public boolean isNumeric() {
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java
new file mode 100644
index 0000000..1edc7ac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java
@@ -0,0 +1,388 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.analysis.NumericIntegerAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeShortValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.shortField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ *
+ */
+public class ShortFieldMapper extends NumberFieldMapper<Short> {
+
+ public static final String CONTENT_TYPE = "short";
+
+ public static class Defaults extends NumberFieldMapper.Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType(NumberFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.freeze();
+ }
+
+ public static final Short NULL_VALUE = null;
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, ShortFieldMapper> {
+
+ protected Short nullValue = Defaults.NULL_VALUE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(short nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public ShortFieldMapper build(BuilderContext context) {
+ fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
+ ShortFieldMapper fieldMapper = new ShortFieldMapper(buildNames(context), precisionStep, boost, fieldType, docValues, nullValue,
+ ignoreMalformed(context), coerce(context),postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings,
+ context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ ShortFieldMapper.Builder builder = shortField(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(nodeShortValue(propNode));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private Short nullValue;
+
+ private String nullValueAsString;
+
+ protected ShortFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,
+ Short nullValue, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ SimilarityProvider similarity, Loading normsLoading, @Nullable Settings fieldDataSettings,
+ Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed, coerce, new NamedAnalyzer("_short/" + precisionStep,
+ new NumericIntegerAnalyzer(precisionStep)), new NamedAnalyzer("_short/max", new NumericIntegerAnalyzer(Integer.MAX_VALUE)),
+ postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ this.nullValue = nullValue;
+ this.nullValueAsString = nullValue == null ? null : nullValue.toString();
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("short");
+ }
+
+ @Override
+ protected int maxPrecisionStep() {
+ return 32;
+ }
+
+ @Override
+ public Short value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Number) {
+ return ((Number) value).shortValue();
+ }
+ if (value instanceof BytesRef) {
+ return Numbers.bytesToShort((BytesRef) value);
+ }
+ return Short.parseShort(value.toString());
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
+ return bytesRef;
+ }
+
+ private short parseValue(Object value) {
+ if (value instanceof Number) {
+ return ((Number) value).shortValue();
+ }
+ if (value instanceof BytesRef) {
+ return Short.parseShort(((BytesRef) value).utf8ToString());
+ }
+ return Short.parseShort(value.toString());
+ }
+
+ private int parseValueAsInt(Object value) {
+ return parseValue(value);
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ short iValue = Short.parseShort(value);
+ short iSim = fuzziness.asShort();
+ return NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
+ iValue - iSim,
+ iValue + iSim,
+ true, true);
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ int iValue = parseValueAsInt(value);
+ return NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
+ iValue, iValue, true, true);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValueAsInt(lowerTerm),
+ upperTerm == null ? null : parseValueAsInt(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ int iValue = parseValueAsInt(value);
+ return NumericRangeFilter.newIntRange(names.indexName(), precisionStep,
+ iValue, iValue, true, true);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFilter.newIntRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValueAsInt(lowerTerm),
+ upperTerm == null ? null : parseValueAsInt(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFieldDataFilter.newShortRange((IndexNumericFieldData) fieldData.getForField(this),
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ return NumericRangeFilter.newIntRange(names.indexName(), precisionStep,
+ nullValue.intValue(),
+ nullValue.intValue(),
+ true, true);
+ }
+
+ @Override
+ protected boolean customBoost() {
+ return true;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ short value;
+ float boost = this.boost;
+ if (context.externalValueSet()) {
+ Object externalValue = context.externalValue();
+ if (externalValue == null) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else if (externalValue instanceof String) {
+ String sExternalValue = (String) externalValue;
+ if (sExternalValue.length() == 0) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ } else {
+ value = Short.parseShort(sExternalValue);
+ }
+ } else {
+ value = ((Number) externalValue).shortValue();
+ }
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), Short.toString(value), boost);
+ }
+ } else {
+ XContentParser parser = context.parser();
+ if (parser.currentToken() == XContentParser.Token.VALUE_NULL ||
+ (parser.currentToken() == XContentParser.Token.VALUE_STRING && parser.textLength() == 0)) {
+ if (nullValue == null) {
+ return;
+ }
+ value = nullValue;
+ if (nullValueAsString != null && (context.includeInAll(includeInAll, this))) {
+ context.allEntries().addText(names.fullName(), nullValueAsString, boost);
+ }
+ } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ Short objValue = nullValue;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ objValue = parser.shortValue(coerce.value());
+ }
+ } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (objValue == null) {
+ // no value
+ return;
+ }
+ value = objValue;
+ } else {
+ value = parser.shortValue(coerce.value());
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), parser.text(), boost);
+ }
+ }
+ }
+ if (fieldType.indexed() || fieldType.stored()) {
+ CustomShortNumericField field = new CustomShortNumericField(this, value, fieldType);
+ field.setBoost(boost);
+ fields.add(field);
+ }
+ if (hasDocValues()) {
+ addDocValue(context, value);
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.nullValue = ((ShortFieldMapper) mergeWith).nullValue;
+ this.nullValueAsString = ((ShortFieldMapper) mergeWith).nullValueAsString;
+ }
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || precisionStep != Defaults.PRECISION_STEP) {
+ builder.field("precision_step", precisionStep);
+ }
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ } else if (includeDefaults) {
+ builder.field("include_in_all", false);
+ }
+
+ }
+
+ public static class CustomShortNumericField extends CustomNumericField {
+
+ private final short number;
+
+ private final NumberFieldMapper mapper;
+
+ public CustomShortNumericField(NumberFieldMapper mapper, short number, FieldType fieldType) {
+ super(mapper, number, fieldType);
+ this.mapper = mapper;
+ this.number = number;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ if (fieldType().indexed()) {
+ return mapper.popCachedStream().setIntValue(number);
+ }
+ return null;
+ }
+
+ @Override
+ public String numericAsString() {
+ return Short.toString(number);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java
new file mode 100644
index 0000000..a108fcb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java
@@ -0,0 +1,496 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
+
+/**
+ *
+ */
+public class StringFieldMapper extends AbstractFieldMapper<String> implements AllFieldMapper.IncludeInAll {
+
+ public static final String CONTENT_TYPE = "string";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.freeze();
+ }
+
+ // NOTE, when adding defaults here, make sure you add them in the builder
+ public static final String NULL_VALUE = null;
+ public static final int POSITION_OFFSET_GAP = 0;
+ public static final int IGNORE_ABOVE = -1;
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, StringFieldMapper> {
+
+ protected String nullValue = Defaults.NULL_VALUE;
+
+ protected int positionOffsetGap = Defaults.POSITION_OFFSET_GAP;
+
+ protected NamedAnalyzer searchQuotedAnalyzer;
+
+ protected int ignoreAbove = Defaults.IGNORE_ABOVE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(String nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public Builder searchAnalyzer(NamedAnalyzer searchAnalyzer) {
+ super.searchAnalyzer(searchAnalyzer);
+ if (searchQuotedAnalyzer == null) {
+ searchQuotedAnalyzer = searchAnalyzer;
+ }
+ return this;
+ }
+
+ public Builder positionOffsetGap(int positionOffsetGap) {
+ this.positionOffsetGap = positionOffsetGap;
+ return this;
+ }
+
+ public Builder searchQuotedAnalyzer(NamedAnalyzer analyzer) {
+ this.searchQuotedAnalyzer = analyzer;
+ return builder;
+ }
+
+ public Builder ignoreAbove(int ignoreAbove) {
+ this.ignoreAbove = ignoreAbove;
+ return this;
+ }
+
+ @Override
+ public StringFieldMapper build(BuilderContext context) {
+ if (positionOffsetGap > 0) {
+ indexAnalyzer = new NamedAnalyzer(indexAnalyzer, positionOffsetGap);
+ searchAnalyzer = new NamedAnalyzer(searchAnalyzer, positionOffsetGap);
+ searchQuotedAnalyzer = new NamedAnalyzer(searchQuotedAnalyzer, positionOffsetGap);
+ }
+ // if the field is not analyzed, then by default, we should omit norms and have docs only
+ // index options, as probably what the user really wants
+ // if they are set explicitly, we will use those values
+ // we also change the values on the default field type so that toXContent emits what
+ // differs from the defaults
+ FieldType defaultFieldType = new FieldType(Defaults.FIELD_TYPE);
+ if (fieldType.indexed() && !fieldType.tokenized()) {
+ defaultFieldType.setOmitNorms(true);
+ defaultFieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
+ if (!omitNormsSet && boost == Defaults.BOOST) {
+ fieldType.setOmitNorms(true);
+ }
+ if (!indexOptionsSet) {
+ fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
+ }
+ }
+ defaultFieldType.freeze();
+ StringFieldMapper fieldMapper = new StringFieldMapper(buildNames(context),
+ boost, fieldType, defaultFieldType, docValues, nullValue, indexAnalyzer, searchAnalyzer, searchQuotedAnalyzer,
+ positionOffsetGap, ignoreAbove, postingsProvider, docValuesProvider, similarity, normsLoading,
+ fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ StringFieldMapper.Builder builder = stringField(name);
+ parseField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(propNode.toString());
+ } else if (propName.equals("search_quote_analyzer")) {
+ NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ if (analyzer == null) {
+ throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
+ }
+ builder.searchQuotedAnalyzer(analyzer);
+ } else if (propName.equals("position_offset_gap")) {
+ builder.positionOffsetGap(XContentMapValues.nodeIntegerValue(propNode, -1));
+ // we need to update to actual analyzers if they are not set in this case...
+ // so we can inject the position offset gap...
+ if (builder.indexAnalyzer == null) {
+ builder.indexAnalyzer = parserContext.analysisService().defaultIndexAnalyzer();
+ }
+ if (builder.searchAnalyzer == null) {
+ builder.searchAnalyzer = parserContext.analysisService().defaultSearchAnalyzer();
+ }
+ if (builder.searchQuotedAnalyzer == null) {
+ builder.searchQuotedAnalyzer = parserContext.analysisService().defaultSearchQuoteAnalyzer();
+ }
+ } else if (propName.equals("ignore_above")) {
+ builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1));
+ } else {
+ parseMultiField(builder, name, node, parserContext, propName, propNode);
+ }
+ }
+ return builder;
+ }
+ }
+
+ private String nullValue;
+ private Boolean includeInAll;
+ private int positionOffsetGap;
+ private NamedAnalyzer searchQuotedAnalyzer;
+ private int ignoreAbove;
+ private final FieldType defaultFieldType;
+
+ protected StringFieldMapper(Names names, float boost, FieldType fieldType,FieldType defaultFieldType, Boolean docValues,
+ String nullValue, NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer,
+ NamedAnalyzer searchQuotedAnalyzer, int positionOffsetGap, int ignoreAbove,
+ PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat,
+ SimilarityProvider similarity, Loading normsLoading, @Nullable Settings fieldDataSettings,
+ Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, boost, fieldType, docValues, indexAnalyzer, searchAnalyzer, postingsFormat, docValuesFormat,
+ similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ if (fieldType.tokenized() && fieldType.indexed() && hasDocValues()) {
+ throw new MapperParsingException("Field [" + names.fullName() + "] cannot be analyzed and have doc values");
+ }
+ this.defaultFieldType = defaultFieldType;
+ this.nullValue = nullValue;
+ this.positionOffsetGap = positionOffsetGap;
+ this.searchQuotedAnalyzer = searchQuotedAnalyzer != null ? searchQuotedAnalyzer : this.searchAnalyzer;
+ this.ignoreAbove = ignoreAbove;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return defaultFieldType;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("string");
+ }
+
+ @Override
+ public void includeInAll(Boolean includeInAll) {
+ if (includeInAll != null) {
+ this.includeInAll = includeInAll;
+ }
+ }
+
+ @Override
+ public void includeInAllIfNotSet(Boolean includeInAll) {
+ if (includeInAll != null && this.includeInAll == null) {
+ this.includeInAll = includeInAll;
+ }
+ }
+
+ @Override
+ public void unsetIncludeInAll() {
+ includeInAll = null;
+ }
+
+ @Override
+ public String value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ return value.toString();
+ }
+
+ @Override
+ protected boolean customBoost() {
+ return true;
+ }
+
+ public int getPositionOffsetGap() {
+ return this.positionOffsetGap;
+ }
+
+ @Override
+ public Analyzer searchQuoteAnalyzer() {
+ return this.searchQuotedAnalyzer;
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ return termFilter(nullValue, null);
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ ValueAndBoost valueAndBoost = parseCreateFieldForString(context, nullValue, boost);
+ if (valueAndBoost.value() == null) {
+ return;
+ }
+ if (ignoreAbove > 0 && valueAndBoost.value().length() > ignoreAbove) {
+ return;
+ }
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), valueAndBoost.value(), valueAndBoost.boost());
+ }
+
+ if (fieldType.indexed() || fieldType.stored()) {
+ Field field = new StringField(names.indexName(), valueAndBoost.value(), fieldType);
+ field.setBoost(valueAndBoost.boost());
+ fields.add(field);
+ }
+ if (hasDocValues()) {
+ fields.add(new SortedSetDocValuesField(names.indexName(), new BytesRef(valueAndBoost.value())));
+ }
+ if (fields.isEmpty()) {
+ context.ignoredValue(names.indexName(), valueAndBoost.value());
+ }
+ }
+
+ /**
+ * Parse a field as though it were a string.
+ * @param context parse context used during parsing
+ * @param nullValue value to use for null
+ * @param defaultBoost default boost value returned unless overwritten in the field
+ * @return the parsed field and the boost either parsed or defaulted
+ * @throws IOException if thrown while parsing
+ */
+ public static ValueAndBoost parseCreateFieldForString(ParseContext context, String nullValue, float defaultBoost) throws IOException {
+ if (context.externalValueSet()) {
+ return new ValueAndBoost((String) context.externalValue(), defaultBoost);
+ }
+ XContentParser parser = context.parser();
+ if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
+ return new ValueAndBoost(nullValue, defaultBoost);
+ }
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ String value = nullValue;
+ float boost = defaultBoost;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
+ value = parser.textOrNull();
+ } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]");
+ }
+ }
+ }
+ return new ValueAndBoost(value, boost);
+ }
+ return new ValueAndBoost(parser.textOrNull(), defaultBoost);
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
+ this.nullValue = ((StringFieldMapper) mergeWith).nullValue;
+ this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
+ }
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ } else if (includeDefaults) {
+ builder.field("include_in_all", false);
+ }
+
+ if (includeDefaults || positionOffsetGap != Defaults.POSITION_OFFSET_GAP) {
+ builder.field("position_offset_gap", positionOffsetGap);
+ }
+ if (searchQuotedAnalyzer != null && searchAnalyzer != searchQuotedAnalyzer) {
+ builder.field("search_quote_analyzer", searchQuotedAnalyzer.name());
+ } else if (includeDefaults) {
+ if (searchQuotedAnalyzer == null) {
+ builder.field("search_quote_analyzer", "default");
+ } else {
+ builder.field("search_quote_analyzer", searchQuotedAnalyzer.name());
+ }
+ }
+ if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) {
+ builder.field("ignore_above", ignoreAbove);
+ }
+ }
+
+ /** Extension of {@link Field} supporting reuse of a cached TokenStream for not-tokenized values. */
+ static class StringField extends Field {
+
+ public StringField(String name, String value, FieldType fieldType) {
+ super(name, fieldType);
+ fieldsData = value;
+ }
+
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ if (!fieldType().indexed()) {
+ return null;
+ }
+ // Only use the cached TokenStream if the value is indexed and not-tokenized
+ if (fieldType().tokenized()) {
+ return super.tokenStream(analyzer);
+ }
+ return NOT_ANALYZED_TOKENSTREAM.get().setValue((String) fieldsData);
+ }
+ }
+
+ private static final ThreadLocal<StringTokenStream> NOT_ANALYZED_TOKENSTREAM = new ThreadLocal<StringTokenStream>() {
+ @Override
+ protected StringTokenStream initialValue() {
+ return new StringTokenStream();
+ }
+ };
+
+
+ // Copied from Field.java
+ static final class StringTokenStream extends TokenStream {
+ private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);
+ private boolean used = false;
+ private String value = null;
+
+ /**
+ * Creates a new TokenStream that returns a String as single token.
+ * <p>Warning: Does not initialize the value, you must call
+ * {@link #setValue(String)} afterwards!
+ */
+ StringTokenStream() {
+ }
+
+ /** Sets the string value. */
+ StringTokenStream setValue(String value) {
+ this.value = value;
+ return this;
+ }
+
+ @Override
+ public boolean incrementToken() {
+ if (used) {
+ return false;
+ }
+ clearAttributes();
+ termAttribute.append(value);
+ offsetAttribute.setOffset(0, value.length());
+ used = true;
+ return true;
+ }
+
+ @Override
+ public void end() {
+ final int finalOffset = value.length();
+ offsetAttribute.setOffset(finalOffset, finalOffset);
+ value = null;
+ }
+
+ @Override
+ public void reset() {
+ used = false;
+ }
+
+ @Override
+ public void close() {
+ value = null;
+ }
+ }
+
+ /**
+ * Parsed value and boost to be returned from {@link #parseCreateFieldForString}.
+ */
+ public static class ValueAndBoost {
+ private final String value;
+ private final float boost;
+
+ public ValueAndBoost(String value, float boost) {
+ this.value = value;
+ this.boost = boost;
+ }
+
+ /**
+ * Value of string field.
+ * @return value of string field
+ */
+ public String value() {
+ return value;
+ }
+
+ /**
+ * Boost either parsed from the document or defaulted.
+ * @return boost either parsed from the document or defaulted
+ */
+ public float boost() {
+ return boost;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java
new file mode 100644
index 0000000..513e911
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.tokenCountField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ * A {@link FieldMapper} that takes a string and writes a count of the tokens in that string
+ * to the index. In most ways the mapper acts just like an {@link IntegerFieldMapper}.
+ */
+public class TokenCountFieldMapper extends IntegerFieldMapper {
+ public static final String CONTENT_TYPE = "token_count";
+
+ public static class Defaults extends IntegerFieldMapper.Defaults {
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, TokenCountFieldMapper> {
+ private Integer nullValue = Defaults.NULL_VALUE;
+ private NamedAnalyzer analyzer;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(int nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ public Builder analyzer(NamedAnalyzer analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ public NamedAnalyzer analyzer() {
+ return analyzer;
+ }
+
+ @Override
+ public TokenCountFieldMapper build(BuilderContext context) {
+ fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
+ TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(buildNames(context), precisionStep, boost, fieldType, docValues, nullValue,
+ ignoreMalformed(context), coerce(context), postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, context.indexSettings(),
+ analyzer, multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ @SuppressWarnings("unchecked")
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ TokenCountFieldMapper.Builder builder = tokenCountField(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(nodeIntegerValue(propNode));
+ } else if (propName.equals("analyzer")) {
+ NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ if (analyzer == null) {
+ throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
+ }
+ builder.analyzer(analyzer);
+ }
+ }
+ if (builder.analyzer() == null) {
+ throw new MapperParsingException("Analyzer must be set for field [" + name + "] but wasn't.");
+ }
+ return builder;
+ }
+ }
+
+ private NamedAnalyzer analyzer;
+
+ protected TokenCountFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues, Integer nullValue,
+ Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ SimilarityProvider similarity, Loading normsLoading, Settings fieldDataSettings, Settings indexSettings, NamedAnalyzer analyzer,
+ MultiFields multiFields, CopyTo copyTo) {
+ super(names, precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed, coerce, postingsProvider, docValuesProvider,
+ similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+
+ this.analyzer = analyzer;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ ValueAndBoost valueAndBoost = StringFieldMapper.parseCreateFieldForString(context, null /* Out null value is an int so we convert*/, boost);
+ if (valueAndBoost.value() == null && nullValue() == null) {
+ return;
+ }
+
+ if (fieldType.indexed() || fieldType.stored() || hasDocValues()) {
+ int count;
+ if (valueAndBoost.value() == null) {
+ count = nullValue();
+ } else {
+ count = countPositions(analyzer.analyzer().tokenStream(name(), valueAndBoost.value()));
+ }
+ addIntegerFields(context, fields, count, valueAndBoost.boost());
+ }
+ if (fields.isEmpty()) {
+ context.ignoredValue(names.indexName(), valueAndBoost.value());
+ }
+ }
+
+ /**
+ * Count position increments in a token stream. Package private for testing.
+ * @param tokenStream token stream to count
+ * @return number of position increments in a token stream
+ * @throws IOException if tokenStream throws it
+ */
+ static int countPositions(TokenStream tokenStream) throws IOException {
+ try {
+ int count = 0;
+ PositionIncrementAttribute position = tokenStream.addAttribute(PositionIncrementAttribute.class);
+ tokenStream.reset();
+ while (tokenStream.incrementToken()) {
+ count += position.getPositionIncrement();
+ }
+ tokenStream.end();
+ count += position.getPositionIncrement();
+ return count;
+ } finally {
+ tokenStream.close();
+ }
+ }
+
+ /**
+ * Name of analyzer.
+ * @return name of analyzer
+ */
+ public String analyzer() {
+ return analyzer.name();
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
+ }
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ builder.field("analyzer", analyzer());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java
new file mode 100644
index 0000000..5bfc780
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java
@@ -0,0 +1,382 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.loader.SettingsLoader;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper.Loading;
+import org.elasticsearch.index.mapper.Mapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.*;
+import static org.elasticsearch.index.mapper.FieldMapper.DOC_VALUES_FORMAT;
+
+/**
+ *
+ */
+public class TypeParsers {
+
+ public static final String MULTI_FIELD_CONTENT_TYPE = "multi_field";
+ public static final Mapper.TypeParser multiFieldConverterTypeParser = new Mapper.TypeParser() {
+
+ @Override
+ public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ ContentPath.Type pathType = null;
+ AbstractFieldMapper.Builder mainFieldBuilder = null;
+ List<AbstractFieldMapper.Builder> fields = null;
+ String firstType = null;
+
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("path")) {
+ pathType = parsePathType(name, fieldNode.toString());
+ } else if (fieldName.equals("fields")) {
+ Map<String, Object> fieldsNode = (Map<String, Object>) fieldNode;
+ for (Map.Entry<String, Object> entry1 : fieldsNode.entrySet()) {
+ String propName = entry1.getKey();
+ Map<String, Object> propNode = (Map<String, Object>) entry1.getValue();
+
+ String type;
+ Object typeNode = propNode.get("type");
+ if (typeNode != null) {
+ type = typeNode.toString();
+ if (firstType == null) {
+ firstType = type;
+ }
+ } else {
+ throw new MapperParsingException("No type specified for property [" + propName + "]");
+ }
+
+ Mapper.TypeParser typeParser = parserContext.typeParser(type);
+ if (typeParser == null) {
+ throw new MapperParsingException("No handler for type [" + type + "] declared on field [" + fieldName + "]");
+ }
+ if (propName.equals(name)) {
+ mainFieldBuilder = (AbstractFieldMapper.Builder) typeParser.parse(propName, propNode, parserContext);
+ } else {
+ if (fields == null) {
+ fields = new ArrayList<AbstractFieldMapper.Builder>(2);
+ }
+ fields.add((AbstractFieldMapper.Builder) typeParser.parse(propName, propNode, parserContext));
+ }
+ }
+ }
+ }
+
+ if (mainFieldBuilder == null) {
+ if (fields == null) {
+ // No fields at all were specified in multi_field, so lets return a non indexed string field.
+ return new StringFieldMapper.Builder(name).index(false);
+ }
+ Mapper.TypeParser typeParser = parserContext.typeParser(firstType);
+ if (typeParser == null) {
+ // The first multi field's type is unknown
+ mainFieldBuilder = new StringFieldMapper.Builder(name).index(false);
+ } else {
+ Mapper.Builder substitute = typeParser.parse(name, Collections.<String, Object>emptyMap(), parserContext);
+ if (substitute instanceof AbstractFieldMapper.Builder) {
+ mainFieldBuilder = ((AbstractFieldMapper.Builder) substitute).index(false);
+ } else {
+ // The first multi isn't a core field type
+ mainFieldBuilder = new StringFieldMapper.Builder(name).index(false);
+ }
+ }
+ }
+
+ if (fields != null && pathType != null) {
+ for (Mapper.Builder field : fields) {
+ mainFieldBuilder.addMultiField(field);
+ }
+ mainFieldBuilder.multiFieldPathType(pathType);
+ } else if (fields != null) {
+ for (Mapper.Builder field : fields) {
+ mainFieldBuilder.addMultiField(field);
+ }
+ } else if (pathType != null) {
+ mainFieldBuilder.multiFieldPathType(pathType);
+ }
+ return mainFieldBuilder;
+ }
+
+ };
+
+ public static final String DOC_VALUES = "doc_values";
+ public static final String INDEX_OPTIONS_DOCS = "docs";
+ public static final String INDEX_OPTIONS_FREQS = "freqs";
+ public static final String INDEX_OPTIONS_POSITIONS = "positions";
+ public static final String INDEX_OPTIONS_OFFSETS = "offsets";
+
+ public static void parseNumberField(NumberFieldMapper.Builder builder, String name, Map<String, Object> numberNode, Mapper.TypeParser.ParserContext parserContext) {
+ parseField(builder, name, numberNode, parserContext);
+ for (Map.Entry<String, Object> entry : numberNode.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("precision_step")) {
+ builder.precisionStep(nodeIntegerValue(propNode));
+ } else if (propName.equals("ignore_malformed")) {
+ builder.ignoreMalformed(nodeBooleanValue(propNode));
+ } else if (propName.equals("coerce")) {
+ builder.coerce(nodeBooleanValue(propNode));
+ } else if (propName.equals("omit_norms")) {
+ builder.omitNorms(nodeBooleanValue(propNode));
+ } else if (propName.equals("similarity")) {
+ builder.similarity(parserContext.similarityLookupService().similarity(propNode.toString()));
+ } else {
+ parseMultiField(builder, name, numberNode, parserContext, propName, propNode);
+ }
+ }
+ }
+
+ public static void parseField(AbstractFieldMapper.Builder builder, String name, Map<String, Object> fieldNode, Mapper.TypeParser.ParserContext parserContext) {
+ for (Map.Entry<String, Object> entry : fieldNode.entrySet()) {
+ final String propName = Strings.toUnderscoreCase(entry.getKey());
+ final Object propNode = entry.getValue();
+ if (propName.equals("index_name")) {
+ builder.indexName(propNode.toString());
+ } else if (propName.equals("store")) {
+ builder.store(parseStore(name, propNode.toString()));
+ } else if (propName.equals("index")) {
+ parseIndex(name, propNode.toString(), builder);
+ } else if (propName.equals("tokenized")) {
+ builder.tokenized(nodeBooleanValue(propNode));
+ } else if (propName.equals(DOC_VALUES)) {
+ builder.docValues(nodeBooleanValue(propNode));
+ } else if (propName.equals("term_vector")) {
+ parseTermVector(name, propNode.toString(), builder);
+ } else if (propName.equals("boost")) {
+ builder.boost(nodeFloatValue(propNode));
+ } else if (propName.equals("store_term_vectors")) {
+ builder.storeTermVectors(nodeBooleanValue(propNode));
+ } else if (propName.equals("store_term_vector_offsets")) {
+ builder.storeTermVectorOffsets(nodeBooleanValue(propNode));
+ } else if (propName.equals("store_term_vector_positions")) {
+ builder.storeTermVectorPositions(nodeBooleanValue(propNode));
+ } else if (propName.equals("store_term_vector_payloads")) {
+ builder.storeTermVectorPayloads(nodeBooleanValue(propNode));
+ } else if (propName.equals("omit_norms")) {
+ builder.omitNorms(nodeBooleanValue(propNode));
+ } else if (propName.equals("norms")) {
+ final Map<String, Object> properties = nodeMapValue(propNode, "norms");
+ for (Map.Entry<String, Object> entry2 : properties.entrySet()) {
+ final String propName2 = Strings.toUnderscoreCase(entry2.getKey());
+ final Object propNode2 = entry2.getValue();
+ if (propName2.equals("enabled")) {
+ builder.omitNorms(!nodeBooleanValue(propNode2));
+ } else if (propName2.equals(Loading.KEY)) {
+ builder.normsLoading(Loading.parse(nodeStringValue(propNode2, null), null));
+ }
+ }
+ } else if (propName.equals("omit_term_freq_and_positions")) {
+ final IndexOptions op = nodeBooleanValue(propNode) ? IndexOptions.DOCS_ONLY : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+ if (parserContext.indexVersionCreated().onOrAfter(Version.V_1_0_0_RC2)) {
+ throw new ElasticsearchParseException("'omit_term_freq_and_positions' is not supported anymore - use ['index_options' : '" + op.name() + "'] instead");
+ }
+ // deprecated option for BW compat
+ builder.indexOptions(op);
+ } else if (propName.equals("index_options")) {
+ builder.indexOptions(nodeIndexOptionValue(propNode));
+ } else if (propName.equals("analyzer")) {
+ NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ if (analyzer == null) {
+ throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
+ }
+ builder.indexAnalyzer(analyzer);
+ builder.searchAnalyzer(analyzer);
+ } else if (propName.equals("index_analyzer")) {
+ NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ if (analyzer == null) {
+ throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
+ }
+ builder.indexAnalyzer(analyzer);
+ } else if (propName.equals("search_analyzer")) {
+ NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
+ if (analyzer == null) {
+ throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
+ }
+ builder.searchAnalyzer(analyzer);
+ } else if (propName.equals("include_in_all")) {
+ builder.includeInAll(nodeBooleanValue(propNode));
+ } else if (propName.equals("postings_format")) {
+ String postingFormatName = propNode.toString();
+ builder.postingsFormat(parserContext.postingFormatService().get(postingFormatName));
+ } else if (propName.equals(DOC_VALUES_FORMAT)) {
+ String docValuesFormatName = propNode.toString();
+ builder.docValuesFormat(parserContext.docValuesFormatService().get(docValuesFormatName));
+ } else if (propName.equals("similarity")) {
+ builder.similarity(parserContext.similarityLookupService().similarity(propNode.toString()));
+ } else if (propName.equals("fielddata")) {
+ final Settings settings = ImmutableSettings.builder().put(SettingsLoader.Helper.loadNestedFromMap(nodeMapValue(propNode, "fielddata"))).build();
+ builder.fieldDataSettings(settings);
+ } else if (propName.equals("copy_to")) {
+ parseCopyFields(propNode, builder);
+ }
+ }
+ }
+
+ public static void parseMultiField(AbstractFieldMapper.Builder builder, String name, Map<String, Object> node, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) {
+ if (propName.equals("path")) {
+ builder.multiFieldPathType(parsePathType(name, propNode.toString()));
+ } else if (propName.equals("fields")) {
+ @SuppressWarnings("unchecked")
+ Map<String, Object> multiFieldsPropNodes = (Map<String, Object>) propNode;
+ for (Map.Entry<String, Object> multiFieldEntry : multiFieldsPropNodes.entrySet()) {
+ String multiFieldName = multiFieldEntry.getKey();
+ if (!(multiFieldEntry.getValue() instanceof Map)) {
+ throw new MapperParsingException("Illegal field [" + multiFieldName + "], only fields can be specified inside fields");
+ }
+ @SuppressWarnings("unchecked")
+ Map<String, Object> multiFieldNodes = (Map<String, Object>) multiFieldEntry.getValue();
+
+ String type;
+ Object typeNode = multiFieldNodes.get("type");
+ if (typeNode != null) {
+ type = typeNode.toString();
+ } else {
+ throw new MapperParsingException("No type specified for property [" + multiFieldName + "]");
+ }
+
+ Mapper.TypeParser typeParser = parserContext.typeParser(type);
+ if (typeParser == null) {
+ throw new MapperParsingException("No handler for type [" + type + "] declared on field [" + multiFieldName + "]");
+ }
+ builder.addMultiField(typeParser.parse(multiFieldName, multiFieldNodes, parserContext));
+ }
+ }
+ }
+
+ private static IndexOptions nodeIndexOptionValue(final Object propNode) {
+ final String value = propNode.toString();
+ if (INDEX_OPTIONS_OFFSETS.equalsIgnoreCase(value)) {
+ return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
+ } else if (INDEX_OPTIONS_POSITIONS.equalsIgnoreCase(value)) {
+ return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+ } else if (INDEX_OPTIONS_FREQS.equalsIgnoreCase(value)) {
+ return IndexOptions.DOCS_AND_FREQS;
+ } else if (INDEX_OPTIONS_DOCS.equalsIgnoreCase(value)) {
+ return IndexOptions.DOCS_ONLY;
+ } else {
+ throw new ElasticsearchParseException("Failed to parse index option [" + value + "]");
+ }
+ }
+
+ public static FormatDateTimeFormatter parseDateTimeFormatter(String fieldName, Object node) {
+ return Joda.forPattern(node.toString());
+ }
+
+ public static void parseTermVector(String fieldName, String termVector, AbstractFieldMapper.Builder builder) throws MapperParsingException {
+ termVector = Strings.toUnderscoreCase(termVector);
+ if ("no".equals(termVector)) {
+ builder.storeTermVectors(false);
+ } else if ("yes".equals(termVector)) {
+ builder.storeTermVectors(true);
+ } else if ("with_offsets".equals(termVector)) {
+ builder.storeTermVectorOffsets(true);
+ } else if ("with_positions".equals(termVector)) {
+ builder.storeTermVectorPositions(true);
+ } else if ("with_positions_offsets".equals(termVector)) {
+ builder.storeTermVectorPositions(true);
+ builder.storeTermVectorOffsets(true);
+ } else if ("with_positions_payloads".equals(termVector)) {
+ builder.storeTermVectorPositions(true);
+ builder.storeTermVectorPayloads(true);
+ } else if ("with_positions_offsets_payloads".equals(termVector)) {
+ builder.storeTermVectorPositions(true);
+ builder.storeTermVectorOffsets(true);
+ builder.storeTermVectorPayloads(true);
+ } else {
+ throw new MapperParsingException("Wrong value for termVector [" + termVector + "] for field [" + fieldName + "]");
+ }
+ }
+
+ public static void parseIndex(String fieldName, String index, AbstractFieldMapper.Builder builder) throws MapperParsingException {
+ index = Strings.toUnderscoreCase(index);
+ if ("no".equals(index)) {
+ builder.index(false);
+ } else if ("not_analyzed".equals(index)) {
+ builder.index(true);
+ builder.tokenized(false);
+ } else if ("analyzed".equals(index)) {
+ builder.index(true);
+ builder.tokenized(true);
+ } else {
+ throw new MapperParsingException("Wrong value for index [" + index + "] for field [" + fieldName + "]");
+ }
+ }
+
+ public static boolean parseDocValues(String docValues) {
+ if ("no".equals(docValues)) {
+ return false;
+ } else if ("yes".equals(docValues)) {
+ return true;
+ } else {
+ return nodeBooleanValue(docValues);
+ }
+ }
+
+ public static boolean parseStore(String fieldName, String store) throws MapperParsingException {
+ if ("no".equals(store)) {
+ return false;
+ } else if ("yes".equals(store)) {
+ return true;
+ } else {
+ return nodeBooleanValue(store);
+ }
+ }
+
+ public static ContentPath.Type parsePathType(String name, String path) throws MapperParsingException {
+ path = Strings.toUnderscoreCase(path);
+ if ("just_name".equals(path)) {
+ return ContentPath.Type.JUST_NAME;
+ } else if ("full".equals(path)) {
+ return ContentPath.Type.FULL;
+ } else {
+ throw new MapperParsingException("Wrong value for pathType [" + path + "] for object [" + name + "]");
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public static void parseCopyFields(Object propNode, AbstractFieldMapper.Builder builder) {
+ AbstractFieldMapper.CopyTo.Builder copyToBuilder = new AbstractFieldMapper.CopyTo.Builder();
+ if (isArray(propNode)) {
+ for(Object node : (List<Object>) propNode) {
+ copyToBuilder.add(nodeStringValue(node, null));
+ }
+ } else {
+ copyToBuilder.add(nodeStringValue(propNode, null));
+ }
+ builder.copyTo(copyToBuilder.build());
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/default-mapping.json b/src/main/java/org/elasticsearch/index/mapper/default-mapping.json
new file mode 100644
index 0000000..7b035a3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/default-mapping.json
@@ -0,0 +1,4 @@
+{
+ "_default_":{
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java
new file mode 100644
index 0000000..3c8b436
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java
@@ -0,0 +1,761 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.util.ByteUtils;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.*;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField;
+import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.*;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType;
+
+/**
+ * Parsing: We handle:
+ * <p/>
+ * - "field" : "geo_hash"
+ * - "field" : "lat,lon"
+ * - "field" : {
+ * "lat" : 1.1,
+ * "lon" : 2.1
+ * }
+ */
+public class GeoPointFieldMapper extends AbstractFieldMapper<GeoPoint> implements ArrayValueMapperParser {
+
+ public static final String CONTENT_TYPE = "geo_point";
+
+ public static class Names {
+ public static final String LAT = "lat";
+ public static final String LAT_SUFFIX = "." + LAT;
+ public static final String LON = "lon";
+ public static final String LON_SUFFIX = "." + LON;
+ public static final String GEOHASH = "geohash";
+ public static final String GEOHASH_SUFFIX = "." + GEOHASH;
+ }
+
+ public static class Defaults {
+ public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
+ public static final boolean STORE = false;
+ public static final boolean ENABLE_LATLON = false;
+ public static final boolean ENABLE_GEOHASH = false;
+ public static final boolean ENABLE_GEOHASH_PREFIX = false;
+ public static final int GEO_HASH_PRECISION = GeoHashUtils.PRECISION;
+ public static final boolean NORMALIZE_LAT = true;
+ public static final boolean NORMALIZE_LON = true;
+ public static final boolean VALIDATE_LAT = true;
+ public static final boolean VALIDATE_LON = true;
+
+ public static final FieldType FIELD_TYPE = new FieldType(StringFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.freeze();
+ }
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, GeoPointFieldMapper> {
+
+ private ContentPath.Type pathType = Defaults.PATH_TYPE;
+
+ private boolean enableGeoHash = Defaults.ENABLE_GEOHASH;
+
+ private boolean enableGeohashPrefix = Defaults.ENABLE_GEOHASH_PREFIX;
+
+ private boolean enableLatLon = Defaults.ENABLE_LATLON;
+
+ private Integer precisionStep;
+
+ private int geoHashPrecision = Defaults.GEO_HASH_PRECISION;
+
+ boolean validateLat = Defaults.VALIDATE_LAT;
+ boolean validateLon = Defaults.VALIDATE_LON;
+ boolean normalizeLat = Defaults.NORMALIZE_LAT;
+ boolean normalizeLon = Defaults.NORMALIZE_LON;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ this.builder = this;
+ }
+
+ public Builder multiFieldPathType(ContentPath.Type pathType) {
+ this.pathType = pathType;
+ return this;
+ }
+
+ public Builder enableGeoHash(boolean enableGeoHash) {
+ this.enableGeoHash = enableGeoHash;
+ return this;
+ }
+
+ public Builder geohashPrefix(boolean enableGeohashPrefix) {
+ this.enableGeohashPrefix = enableGeohashPrefix;
+ return this;
+ }
+
+ public Builder enableLatLon(boolean enableLatLon) {
+ this.enableLatLon = enableLatLon;
+ return this;
+ }
+
+ public Builder precisionStep(int precisionStep) {
+ this.precisionStep = precisionStep;
+ return this;
+ }
+
+ public Builder geoHashPrecision(int precision) {
+ this.geoHashPrecision = precision;
+ return this;
+ }
+
+ public Builder fieldDataSettings(Settings settings) {
+ this.fieldDataSettings = settings;
+ return builder;
+ }
+
+ @Override
+ public GeoPointFieldMapper build(BuilderContext context) {
+ ContentPath.Type origPathType = context.path().pathType();
+ context.path().pathType(pathType);
+
+ DoubleFieldMapper latMapper = null;
+ DoubleFieldMapper lonMapper = null;
+
+ context.path().add(name);
+ if (enableLatLon) {
+ NumberFieldMapper.Builder<?, ?> latMapperBuilder = doubleField(Names.LAT).includeInAll(false);
+ NumberFieldMapper.Builder<?, ?> lonMapperBuilder = doubleField(Names.LON).includeInAll(false);
+ if (precisionStep != null) {
+ latMapperBuilder.precisionStep(precisionStep);
+ lonMapperBuilder.precisionStep(precisionStep);
+ }
+ latMapper = (DoubleFieldMapper) latMapperBuilder.includeInAll(false).store(fieldType.stored()).build(context);
+ lonMapper = (DoubleFieldMapper) lonMapperBuilder.includeInAll(false).store(fieldType.stored()).build(context);
+ }
+ StringFieldMapper geohashMapper = null;
+ if (enableGeoHash) {
+ geohashMapper = stringField(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).omitNorms(true).indexOptions(IndexOptions.DOCS_ONLY).build(context);
+ }
+ context.path().remove();
+
+ context.path().pathType(origPathType);
+
+ // this is important: even if geo points feel like they need to be tokenized to distinguish lat from lon, we actually want to
+ // store them as a single token.
+ fieldType.setTokenized(false);
+
+ return new GeoPointFieldMapper(buildNames(context), fieldType, docValues, indexAnalyzer, searchAnalyzer, postingsProvider, docValuesProvider,
+ similarity, fieldDataSettings, context.indexSettings(), origPathType, enableLatLon, enableGeoHash, enableGeohashPrefix, precisionStep,
+ geoHashPrecision, latMapper, lonMapper, geohashMapper, validateLon, validateLat, normalizeLon, normalizeLat
+ , multiFieldsBuilder.build(this, context));
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ Builder builder = geoPointField(name);
+ parseField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("path")) {
+ builder.multiFieldPathType(parsePathType(name, fieldNode.toString()));
+ } else if (fieldName.equals("lat_lon")) {
+ builder.enableLatLon(XContentMapValues.nodeBooleanValue(fieldNode));
+ } else if (fieldName.equals("geohash")) {
+ builder.enableGeoHash(XContentMapValues.nodeBooleanValue(fieldNode));
+ } else if (fieldName.equals("geohash_prefix")) {
+ builder.geohashPrefix(XContentMapValues.nodeBooleanValue(fieldNode));
+ if (XContentMapValues.nodeBooleanValue(fieldNode)) {
+ builder.enableGeoHash(true);
+ }
+ } else if (fieldName.equals("precision_step")) {
+ builder.precisionStep(XContentMapValues.nodeIntegerValue(fieldNode));
+ } else if (fieldName.equals("geohash_precision")) {
+ if (fieldNode instanceof Integer) {
+ builder.geoHashPrecision(XContentMapValues.nodeIntegerValue(fieldNode));
+ } else {
+ builder.geoHashPrecision(GeoUtils.geoHashLevelsForPrecision(fieldNode.toString()));
+ }
+ } else if (fieldName.equals("validate")) {
+ builder.validateLat = XContentMapValues.nodeBooleanValue(fieldNode);
+ builder.validateLon = XContentMapValues.nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("validate_lon")) {
+ builder.validateLon = XContentMapValues.nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("validate_lat")) {
+ builder.validateLat = XContentMapValues.nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("normalize")) {
+ builder.normalizeLat = XContentMapValues.nodeBooleanValue(fieldNode);
+ builder.normalizeLon = XContentMapValues.nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("normalize_lat")) {
+ builder.normalizeLat = XContentMapValues.nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("normalize_lon")) {
+ builder.normalizeLon = XContentMapValues.nodeBooleanValue(fieldNode);
+ } else {
+ parseMultiField(builder, name, node, parserContext, fieldName, fieldNode);
+ }
+ }
+ return builder;
+ }
+ }
+
+ /**
+ * A byte-aligned fixed-length encoding for latitudes and longitudes.
+ */
+ public static final class Encoding {
+
+ // With 14 bytes we already have better precision than a double since a double has 11 bits of exponent
+ private static final int MAX_NUM_BYTES = 14;
+
+ private static final Encoding[] INSTANCES;
+ static {
+ INSTANCES = new Encoding[MAX_NUM_BYTES + 1];
+ for (int numBytes = 2; numBytes <= MAX_NUM_BYTES; numBytes += 2) {
+ INSTANCES[numBytes] = new Encoding(numBytes);
+ }
+ }
+
+ /** Get an instance based on the number of bytes that has been used to encode values. */
+ public static final Encoding of(int numBytesPerValue) {
+ final Encoding instance = INSTANCES[numBytesPerValue];
+ if (instance == null) {
+ throw new ElasticsearchIllegalStateException("No encoding for " + numBytesPerValue + " bytes per value");
+ }
+ return instance;
+ }
+
+ /** Get an instance based on the expected precision. Here are examples of the number of required bytes per value depending on the
+ * expected precision:<ul>
+ * <li>1km: 4 bytes</li>
+ * <li>3m: 6 bytes</li>
+ * <li>1m: 8 bytes</li>
+ * <li>1cm: 8 bytes</li>
+ * <li>1mm: 10 bytes</li></ul> */
+ public static final Encoding of(DistanceUnit.Distance precision) {
+ for (Encoding encoding : INSTANCES) {
+ if (encoding != null && encoding.precision().compareTo(precision) <= 0) {
+ return encoding;
+ }
+ }
+ return INSTANCES[MAX_NUM_BYTES];
+ }
+
+ private final DistanceUnit.Distance precision;
+ private final int numBytes;
+ private final int numBytesPerCoordinate;
+ private final double factor;
+
+ private Encoding(int numBytes) {
+ assert numBytes >= 1 && numBytes <= MAX_NUM_BYTES;
+ assert (numBytes & 1) == 0; // we don't support odd numBytes for the moment
+ this.numBytes = numBytes;
+ this.numBytesPerCoordinate = numBytes / 2;
+ this.factor = Math.pow(2, - numBytesPerCoordinate * 8 + 9);
+ assert (1L << (numBytesPerCoordinate * 8 - 1)) * factor > 180 && (1L << (numBytesPerCoordinate * 8 - 2)) * factor < 180 : numBytesPerCoordinate + " " + factor;
+ if (numBytes == MAX_NUM_BYTES) {
+ // no precision loss compared to a double
+ precision = new DistanceUnit.Distance(0, DistanceUnit.DEFAULT);
+ } else {
+ precision = new DistanceUnit.Distance(
+ GeoDistance.PLANE.calculate(0, 0, factor / 2, factor / 2, DistanceUnit.DEFAULT), // factor/2 because we use Math.round instead of a cast to convert the double to a long
+ DistanceUnit.DEFAULT);
+ }
+ }
+
+ public DistanceUnit.Distance precision() {
+ return precision;
+ }
+
+ /** The number of bytes required to encode a single geo point. */
+ public final int numBytes() {
+ return numBytes;
+ }
+
+ /** The number of bits required to encode a single coordinate of a geo point. */
+ public int numBitsPerCoordinate() {
+ return numBytesPerCoordinate << 3;
+ }
+
+ /** Return the bits that encode a latitude/longitude. */
+ public long encodeCoordinate(double lat) {
+ return Math.round((lat + 180) / factor);
+ }
+
+ /** Decode a sequence of bits into the original coordinate. */
+ public double decodeCoordinate(long bits) {
+ return bits * factor - 180;
+ }
+
+ private void encodeBits(long bits, byte[] out, int offset) {
+ for (int i = 0; i < numBytesPerCoordinate; ++i) {
+ out[offset++] = (byte) bits;
+ bits >>>= 8;
+ }
+ assert bits == 0;
+ }
+
+ private long decodeBits(byte [] in, int offset) {
+ long r = in[offset++] & 0xFFL;
+ for (int i = 1; i < numBytesPerCoordinate; ++i) {
+ r = (in[offset++] & 0xFFL) << (i * 8);
+ }
+ return r;
+ }
+
+ /** Encode a geo point into a byte-array, over {@link #numBytes()} bytes. */
+ public void encode(double lat, double lon, byte[] out, int offset) {
+ encodeBits(encodeCoordinate(lat), out, offset);
+ encodeBits(encodeCoordinate(lon), out, offset + numBytesPerCoordinate);
+ }
+
+ /** Decode a geo point from a byte-array, reading {@link #numBytes()} bytes. */
+ public GeoPoint decode(byte[] in, int offset, GeoPoint out) {
+ final long latBits = decodeBits(in, offset);
+ final long lonBits = decodeBits(in, offset + numBytesPerCoordinate);
+ return decode(latBits, lonBits, out);
+ }
+
+ /** Decode a geo point from the bits of the encoded latitude and longitudes. */
+ public GeoPoint decode(long latBits, long lonBits, GeoPoint out) {
+ final double lat = decodeCoordinate(latBits);
+ final double lon = decodeCoordinate(lonBits);
+ return out.reset(lat, lon);
+ }
+
+ }
+
+ private final ContentPath.Type pathType;
+
+ private final boolean enableLatLon;
+
+ private final boolean enableGeoHash;
+
+ private final boolean enableGeohashPrefix;
+
+ private final Integer precisionStep;
+
+ private final int geoHashPrecision;
+
+ private final DoubleFieldMapper latMapper;
+
+ private final DoubleFieldMapper lonMapper;
+
+ private final StringFieldMapper geohashMapper;
+
+ private final boolean validateLon;
+ private final boolean validateLat;
+
+ private final boolean normalizeLon;
+ private final boolean normalizeLat;
+
+ public GeoPointFieldMapper(FieldMapper.Names names, FieldType fieldType, Boolean docValues,
+ NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer,
+ PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat,
+ SimilarityProvider similarity, @Nullable Settings fieldDataSettings, Settings indexSettings,
+ ContentPath.Type pathType, boolean enableLatLon, boolean enableGeoHash, boolean enableGeohashPrefix, Integer precisionStep, int geoHashPrecision,
+ DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geohashMapper,
+ boolean validateLon, boolean validateLat,
+ boolean normalizeLon, boolean normalizeLat, MultiFields multiFields) {
+ super(names, 1f, fieldType, docValues, null, indexAnalyzer, postingsFormat, docValuesFormat, similarity, null, fieldDataSettings, indexSettings, multiFields, null);
+ this.pathType = pathType;
+ this.enableLatLon = enableLatLon;
+ this.enableGeoHash = enableGeoHash || enableGeohashPrefix; // implicitly enable geohashes if geohash_prefix is set
+ this.enableGeohashPrefix = enableGeohashPrefix;
+ this.precisionStep = precisionStep;
+ this.geoHashPrecision = geoHashPrecision;
+
+ this.latMapper = latMapper;
+ this.lonMapper = lonMapper;
+ this.geohashMapper = geohashMapper;
+
+ this.validateLat = validateLat;
+ this.validateLon = validateLon;
+
+ this.normalizeLat = normalizeLat;
+ this.normalizeLon = normalizeLon;
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("geo_point");
+ }
+
+ public DoubleFieldMapper latMapper() {
+ return latMapper;
+ }
+
+ public DoubleFieldMapper lonMapper() {
+ return lonMapper;
+ }
+
+ public StringFieldMapper geoHashStringMapper() {
+ return this.geohashMapper;
+ }
+
+ int geoHashPrecision() {
+ return geoHashPrecision;
+ }
+
+ public boolean isEnableLatLon() {
+ return enableLatLon;
+ }
+
+ public boolean isEnableGeohashPrefix() {
+ return enableGeohashPrefix;
+ }
+
+ @Override
+ public GeoPoint value(Object value) {
+ if (value instanceof GeoPoint) {
+ return (GeoPoint) value;
+ } else {
+ return GeoPoint.parseFromLatLon(value.toString());
+ }
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ throw new UnsupportedOperationException("Parsing is implemented in parse(), this method should NEVER be called");
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ ContentPath.Type origPathType = context.path().pathType();
+ context.path().pathType(pathType);
+ context.path().add(name());
+
+ XContentParser.Token token = context.parser().currentToken();
+ if (token == XContentParser.Token.START_ARRAY) {
+ token = context.parser().nextToken();
+ if (token == XContentParser.Token.START_ARRAY) {
+ // its an array of array of lon/lat [ [1.2, 1.3], [1.4, 1.5] ]
+ while (token != XContentParser.Token.END_ARRAY) {
+ token = context.parser().nextToken();
+ double lon = context.parser().doubleValue();
+ token = context.parser().nextToken();
+ double lat = context.parser().doubleValue();
+ while ((token = context.parser().nextToken()) != XContentParser.Token.END_ARRAY) {
+
+ }
+ parseLatLon(context, lat, lon);
+ token = context.parser().nextToken();
+ }
+ } else {
+ // its an array of other possible values
+ if (token == XContentParser.Token.VALUE_NUMBER) {
+ double lon = context.parser().doubleValue();
+ token = context.parser().nextToken();
+ double lat = context.parser().doubleValue();
+ while ((token = context.parser().nextToken()) != XContentParser.Token.END_ARRAY) {
+
+ }
+ parseLatLon(context, lat, lon);
+ } else {
+ while (token != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.START_OBJECT) {
+ parseObjectLatLon(context);
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ parseStringLatLon(context);
+ }
+ token = context.parser().nextToken();
+ }
+ }
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ parseObjectLatLon(context);
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ parseStringLatLon(context);
+ }
+
+ context.path().remove();
+ context.path().pathType(origPathType);
+ }
+
+ private void parseStringLatLon(ParseContext context) throws IOException {
+ String value = context.parser().text();
+ int comma = value.indexOf(',');
+ if (comma != -1) {
+ double lat = Double.parseDouble(value.substring(0, comma).trim());
+ double lon = Double.parseDouble(value.substring(comma + 1).trim());
+ parseLatLon(context, lat, lon);
+ } else { // geo hash
+ parseGeohash(context, value);
+ }
+ }
+
+ private void parseObjectLatLon(ParseContext context) throws IOException {
+ XContentParser.Token token;
+ String currentName = context.parser().currentName();
+ Double lat = null;
+ Double lon = null;
+ String geohash = null;
+ while ((token = context.parser().nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentName = context.parser().currentName();
+ } else if (token.isValue()) {
+ if (currentName.equals(Names.LAT)) {
+ lat = context.parser().doubleValue();
+ } else if (currentName.equals(Names.LON)) {
+ lon = context.parser().doubleValue();
+ } else if (currentName.equals(Names.GEOHASH)) {
+ geohash = context.parser().text();
+ }
+ }
+ }
+ if (geohash != null) {
+ parseGeohash(context, geohash);
+ } else if (lat != null && lon != null) {
+ parseLatLon(context, lat, lon);
+ }
+ }
+
+ private void parseGeohashField(ParseContext context, String geohash) throws IOException {
+ int len = Math.min(geoHashPrecision, geohash.length());
+ int min = enableGeohashPrefix ? 1 : geohash.length();
+
+ for (int i = len; i >= min; i--) {
+ context.externalValue(geohash.substring(0, i));
+ // side effect of this call is adding the field
+ geohashMapper.parse(context);
+ }
+ }
+
+ private void parseLatLon(ParseContext context, double lat, double lon) throws IOException {
+ parse(context, new GeoPoint(lat, lon), null);
+ }
+
+ private void parseGeohash(ParseContext context, String geohash) throws IOException {
+ GeoPoint point = GeoHashUtils.decode(geohash);
+ parse(context, point, geohash);
+ }
+
+ private void parse(ParseContext context, GeoPoint point, String geohash) throws IOException {
+ if (normalizeLat || normalizeLon) {
+ GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
+ }
+
+ if (validateLat) {
+ if (point.lat() > 90.0 || point.lat() < -90.0) {
+ throw new ElasticsearchIllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name());
+ }
+ }
+ if (validateLon) {
+ if (point.lon() > 180.0 || point.lon() < -180) {
+ throw new ElasticsearchIllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name());
+ }
+ }
+
+ if (fieldType.indexed() || fieldType.stored()) {
+ Field field = new Field(names.indexName(), Double.toString(point.lat()) + ',' + Double.toString(point.lon()), fieldType);
+ context.doc().add(field);
+ }
+ if (enableGeoHash) {
+ if (geohash == null) {
+ geohash = GeoHashUtils.encode(point.lat(), point.lon());
+ }
+ parseGeohashField(context, geohash);
+ }
+ if (enableLatLon) {
+ context.externalValue(point.lat());
+ latMapper.parse(context);
+ context.externalValue(point.lon());
+ lonMapper.parse(context);
+ }
+ if (hasDocValues()) {
+ CustomGeoPointDocValuesField field = (CustomGeoPointDocValuesField) context.doc().getByKey(names().indexName());
+ if (field == null) {
+ field = new CustomGeoPointDocValuesField(names().indexName(), point.lat(), point.lon());
+ context.doc().addWithKey(names().indexName(), field);
+ } else {
+ field.add(point.lat(), point.lon());
+ }
+ }
+ multiFields.parse(this, context);
+ }
+
+ @Override
+ public void close() {
+ super.close();
+ if (latMapper != null) {
+ latMapper.close();
+ }
+ if (lonMapper != null) {
+ lonMapper.close();
+ }
+ if (geohashMapper != null) {
+ geohashMapper.close();
+ }
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ // TODO: geo-specific properties
+ }
+
+ @Override
+ public void traverse(FieldMapperListener fieldMapperListener) {
+ super.traverse(fieldMapperListener);
+ if (enableGeoHash) {
+ geohashMapper.traverse(fieldMapperListener);
+ }
+ if (enableLatLon) {
+ latMapper.traverse(fieldMapperListener);
+ lonMapper.traverse(fieldMapperListener);
+ }
+ }
+
+ @Override
+ public void traverse(ObjectMapperListener objectMapperListener) {
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+ if (includeDefaults || pathType != Defaults.PATH_TYPE) {
+ builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
+ }
+ if (includeDefaults || enableLatLon != Defaults.ENABLE_LATLON) {
+ builder.field("lat_lon", enableLatLon);
+ }
+ if (includeDefaults || enableGeoHash != Defaults.ENABLE_GEOHASH) {
+ builder.field("geohash", enableGeoHash);
+ }
+ if (includeDefaults || enableGeohashPrefix != Defaults.ENABLE_GEOHASH_PREFIX) {
+ builder.field("geohash_prefix", enableGeohashPrefix);
+ }
+ if (includeDefaults || geoHashPrecision != Defaults.GEO_HASH_PRECISION) {
+ builder.field("geohash_precision", geoHashPrecision);
+ }
+ if (includeDefaults || precisionStep != null) {
+ builder.field("precision_step", precisionStep);
+ }
+ if (includeDefaults || validateLat != Defaults.VALIDATE_LAT || validateLon != Defaults.VALIDATE_LON) {
+ if (validateLat && validateLon) {
+ builder.field("validate", true);
+ } else if (!validateLat && !validateLon) {
+ builder.field("validate", false);
+ } else {
+ if (includeDefaults || validateLat != Defaults.VALIDATE_LAT) {
+ builder.field("validate_lat", validateLat);
+ }
+ if (includeDefaults || validateLon != Defaults.VALIDATE_LON) {
+ builder.field("validate_lon", validateLon);
+ }
+ }
+ }
+ if (includeDefaults || normalizeLat != Defaults.NORMALIZE_LAT || normalizeLon != Defaults.NORMALIZE_LON) {
+ if (normalizeLat && normalizeLon) {
+ builder.field("normalize", true);
+ } else if (!normalizeLat && !normalizeLon) {
+ builder.field("normalize", false);
+ } else {
+ if (includeDefaults || normalizeLat != Defaults.NORMALIZE_LAT) {
+ builder.field("normalize_lat", normalizeLat);
+ }
+ if (includeDefaults || normalizeLon != Defaults.NORMALIZE_LON) {
+ builder.field("normalize_lon", normalizeLat);
+ }
+ }
+ }
+ }
+
+ public static class CustomGeoPointDocValuesField extends CustomNumericDocValuesField {
+
+ public static final FieldType TYPE = new FieldType();
+ static {
+ TYPE.setDocValueType(FieldInfo.DocValuesType.BINARY);
+ TYPE.freeze();
+ }
+
+ private final ObjectOpenHashSet<GeoPoint> points;
+
+ public CustomGeoPointDocValuesField(String name, double lat, double lon) {
+ super(name);
+ points = new ObjectOpenHashSet<GeoPoint>(2);
+ points.add(new GeoPoint(lat, lon));
+ }
+
+ public void add(double lat, double lon) {
+ points.add(new GeoPoint(lat, lon));
+ }
+
+ @Override
+ public BytesRef binaryValue() {
+ final byte[] bytes = new byte[points.size() * 16];
+ int off = 0;
+ for (Iterator<ObjectCursor<GeoPoint>> it = points.iterator(); it.hasNext(); ) {
+ final GeoPoint point = it.next().value;
+ ByteUtils.writeDoubleLE(point.getLat(), bytes, off);
+ ByteUtils.writeDoubleLE(point.getLon(), bytes, off + 8);
+ off += 16;
+ }
+ return new BytesRef(bytes);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java
new file mode 100644
index 0000000..91ac65b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java
@@ -0,0 +1,307 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.geo;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.TermQueryPrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.geo.SpatialStrategy;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.ParseContext;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.geoShapeField;
+
+
+/**
+ * FieldMapper for indexing {@link com.spatial4j.core.shape.Shape}s.
+ * <p/>
+ * Currently Shapes can only be indexed and can only be queried using
+ * {@link org.elasticsearch.index.query.GeoShapeFilterParser}, consequently
+ * a lot of behavior in this Mapper is disabled.
+ * <p/>
+ * Format supported:
+ * <p/>
+ * "field" : {
+ * "type" : "polygon",
+ * "coordinates" : [
+ * [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]
+ * ]
+ * }
+ */
+public class GeoShapeFieldMapper extends AbstractFieldMapper<String> {
+
+ public static final String CONTENT_TYPE = "geo_shape";
+
+ public static class Names {
+ public static final String TREE = "tree";
+ public static final String TREE_GEOHASH = "geohash";
+ public static final String TREE_QUADTREE = "quadtree";
+ public static final String TREE_LEVELS = "tree_levels";
+ public static final String TREE_PRESISION = "precision";
+ public static final String DISTANCE_ERROR_PCT = "distance_error_pct";
+ public static final String STRATEGY = "strategy";
+ }
+
+ public static class Defaults {
+ public static final String TREE = Names.TREE_GEOHASH;
+ public static final String STRATEGY = SpatialStrategy.RECURSIVE.getStrategyName();
+ public static final int GEOHASH_LEVELS = GeoUtils.geoHashLevelsForPrecision("50m");
+ public static final int QUADTREE_LEVELS = GeoUtils.quadTreeLevelsForPrecision("50m");
+ public static final double DISTANCE_ERROR_PCT = 0.025d;
+
+ public static final FieldType FIELD_TYPE = new FieldType();
+
+ static {
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setStored(false);
+ FIELD_TYPE.setStoreTermVectors(false);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.freeze();
+ }
+
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, GeoShapeFieldMapper> {
+
+ private String tree = Defaults.TREE;
+ private String strategyName = Defaults.STRATEGY;
+ private int treeLevels = 0;
+ private double precisionInMeters = -1;
+ private double distanceErrorPct = Defaults.DISTANCE_ERROR_PCT;
+
+ private SpatialPrefixTree prefixTree;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ }
+
+ public Builder tree(String tree) {
+ this.tree = tree;
+ return this;
+ }
+
+ public Builder strategy(String strategy) {
+ this.strategyName = strategy;
+ return this;
+ }
+
+ public Builder treeLevelsByDistance(double meters) {
+ this.precisionInMeters = meters;
+ return this;
+ }
+
+ public Builder treeLevels(int treeLevels) {
+ this.treeLevels = treeLevels;
+ return this;
+ }
+
+ public Builder distanceErrorPct(double distanceErrorPct) {
+ this.distanceErrorPct = distanceErrorPct;
+ return this;
+ }
+
+ @Override
+ public GeoShapeFieldMapper build(BuilderContext context) {
+
+ final FieldMapper.Names names = buildNames(context);
+ if (Names.TREE_GEOHASH.equals(tree)) {
+ prefixTree = new GeohashPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, getLevels(treeLevels, precisionInMeters, Defaults.GEOHASH_LEVELS, true));
+ } else if (Names.TREE_QUADTREE.equals(tree)) {
+ prefixTree = new QuadPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, getLevels(treeLevels, precisionInMeters, Defaults.QUADTREE_LEVELS, false));
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Unknown prefix tree type [" + tree + "]");
+ }
+
+ return new GeoShapeFieldMapper(names, prefixTree, strategyName, distanceErrorPct, fieldType, postingsProvider,
+ docValuesProvider, multiFieldsBuilder.build(this, context), copyTo);
+ }
+ }
+
+ private static final int getLevels(int treeLevels, double precisionInMeters, int defaultLevels, boolean geoHash) {
+ if (treeLevels > 0 || precisionInMeters >= 0) {
+ return Math.max(treeLevels, precisionInMeters >= 0 ? (geoHash ? GeoUtils.geoHashLevelsForPrecision(precisionInMeters)
+ : GeoUtils.quadTreeLevelsForPrecision(precisionInMeters)) : 0);
+ }
+ return defaultLevels;
+ }
+
+
+ public static class TypeParser implements Mapper.TypeParser {
+
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ Builder builder = geoShapeField(name);
+
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (Names.TREE.equals(fieldName)) {
+ builder.tree(fieldNode.toString());
+ } else if (Names.TREE_LEVELS.equals(fieldName)) {
+ builder.treeLevels(Integer.parseInt(fieldNode.toString()));
+ } else if (Names.TREE_PRESISION.equals(fieldName)) {
+ builder.treeLevelsByDistance(DistanceUnit.parse(fieldNode.toString(), DistanceUnit.DEFAULT, DistanceUnit.DEFAULT));
+ } else if (Names.DISTANCE_ERROR_PCT.equals(fieldName)) {
+ builder.distanceErrorPct(Double.parseDouble(fieldNode.toString()));
+ } else if (Names.STRATEGY.equals(fieldName)) {
+ builder.strategy(fieldNode.toString());
+ }
+ }
+ return builder;
+ }
+ }
+
+ private final PrefixTreeStrategy defaultStrategy;
+ private final RecursivePrefixTreeStrategy recursiveStrategy;
+ private final TermQueryPrefixTreeStrategy termStrategy;
+
+ public GeoShapeFieldMapper(FieldMapper.Names names, SpatialPrefixTree tree, String defaultStrategyName, double distanceErrorPct,
+ FieldType fieldType, PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ MultiFields multiFields, CopyTo copyTo) {
+ super(names, 1, fieldType, null, null, null, postingsProvider, docValuesProvider, null, null, null, null, multiFields, copyTo);
+ this.recursiveStrategy = new RecursivePrefixTreeStrategy(tree, names.indexName());
+ this.recursiveStrategy.setDistErrPct(distanceErrorPct);
+ this.termStrategy = new TermQueryPrefixTreeStrategy(tree, names.indexName());
+ this.termStrategy.setDistErrPct(distanceErrorPct);
+ this.defaultStrategy = resolveStrategy(defaultStrategyName);
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return null;
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ try {
+ ShapeBuilder shape = ShapeBuilder.parse(context.parser());
+ if (shape == null) {
+ return;
+ }
+ Field[] fields = defaultStrategy.createIndexableFields(shape.build());
+ if (fields == null || fields.length == 0) {
+ return;
+ }
+ for (Field field : fields) {
+ if (!customBoost()) {
+ field.setBoost(boost);
+ }
+ if (context.listener().beforeFieldAdded(this, field, context)) {
+ context.doc().add(field);
+ }
+ }
+ } catch (Exception e) {
+ throw new MapperParsingException("failed to parse [" + names.fullName() + "]", e);
+ }
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ builder.field("type", contentType());
+
+ // TODO: Come up with a better way to get the name, maybe pass it from builder
+ if (defaultStrategy.getGrid() instanceof GeohashPrefixTree) {
+ // Don't emit the tree name since GeohashPrefixTree is the default
+ // Only emit the tree levels if it isn't the default value
+ if (includeDefaults || defaultStrategy.getGrid().getMaxLevels() != Defaults.GEOHASH_LEVELS) {
+ builder.field(Names.TREE_LEVELS, defaultStrategy.getGrid().getMaxLevels());
+ }
+ } else {
+ builder.field(Names.TREE, Names.TREE_QUADTREE);
+ if (includeDefaults || defaultStrategy.getGrid().getMaxLevels() != Defaults.QUADTREE_LEVELS) {
+ builder.field(Names.TREE_LEVELS, defaultStrategy.getGrid().getMaxLevels());
+ }
+ }
+
+ if (includeDefaults || defaultStrategy.getDistErrPct() != Defaults.DISTANCE_ERROR_PCT) {
+ builder.field(Names.DISTANCE_ERROR_PCT, defaultStrategy.getDistErrPct());
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public String value(Object value) {
+ throw new UnsupportedOperationException("GeoShape fields cannot be converted to String values");
+ }
+
+ public PrefixTreeStrategy defaultStrategy() {
+ return this.defaultStrategy;
+ }
+
+ public PrefixTreeStrategy recursiveStrategy() {
+ return this.recursiveStrategy;
+ }
+
+ public PrefixTreeStrategy termStrategy() {
+ return this.termStrategy;
+ }
+
+ public PrefixTreeStrategy resolveStrategy(String strategyName) {
+ if (SpatialStrategy.RECURSIVE.getStrategyName().equals(strategyName)) {
+ return recursiveStrategy;
+ }
+ if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) {
+ return termStrategy;
+ }
+ throw new ElasticsearchIllegalArgumentException("Unknown prefix tree strategy [" + strategyName + "]");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java
new file mode 100644
index 0000000..82055f7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.all.AllField;
+import org.elasticsearch.common.lucene.all.AllTermQuery;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.similarity.SimilarityLookupService;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.all;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+/**
+ *
+ */
+public class AllFieldMapper extends AbstractFieldMapper<Void> implements InternalMapper, RootMapper {
+
+ public interface IncludeInAll extends Mapper {
+
+ void includeInAll(Boolean includeInAll);
+
+ void includeInAllIfNotSet(Boolean includeInAll);
+
+ void unsetIncludeInAll();
+ }
+
+ public static final String NAME = "_all";
+
+ public static final String CONTENT_TYPE = "_all";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final String NAME = AllFieldMapper.NAME;
+ public static final String INDEX_NAME = AllFieldMapper.NAME;
+ public static final boolean ENABLED = true;
+
+ public static final FieldType FIELD_TYPE = new FieldType();
+
+ static {
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(true);
+ FIELD_TYPE.freeze();
+ }
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, AllFieldMapper> {
+
+ private boolean enabled = Defaults.ENABLED;
+
+ // an internal flag, automatically set if we encounter boosting
+ boolean autoBoost = false;
+
+ public Builder() {
+ super(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ indexName = Defaults.INDEX_NAME;
+ }
+
+ public Builder enabled(boolean enabled) {
+ this.enabled = enabled;
+ return this;
+ }
+
+ @Override
+ public AllFieldMapper build(BuilderContext context) {
+ // In case the mapping overrides these
+ fieldType.setIndexed(true);
+ fieldType.setTokenized(true);
+
+ return new AllFieldMapper(name, fieldType, indexAnalyzer, searchAnalyzer, enabled, autoBoost, postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ AllFieldMapper.Builder builder = all();
+ parseField(builder, builder.name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("enabled")) {
+ builder.enabled(nodeBooleanValue(fieldNode));
+ } else if (fieldName.equals("auto_boost")) {
+ builder.autoBoost = nodeBooleanValue(fieldNode);
+ }
+ }
+ return builder;
+ }
+ }
+
+
+ private boolean enabled;
+ // The autoBoost flag is automatically set based on indexed docs on the mappings
+ // if a doc is indexed with a specific boost value and part of _all, it is automatically
+ // set to true. This allows to optimize (automatically, which we like) for the common case
+ // where fields don't usually have boost associated with them, and we don't need to use the
+ // special SpanTermQuery to look at payloads
+ private volatile boolean autoBoost;
+
+ public AllFieldMapper() {
+ this(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE), null, null, Defaults.ENABLED, false, null, null, null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ protected AllFieldMapper(String name, FieldType fieldType, NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer,
+ boolean enabled, boolean autoBoost, PostingsFormatProvider postingsProvider,
+ DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity, Loading normsLoading,
+ @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(name, name, name, name), 1.0f, fieldType, null, indexAnalyzer, searchAnalyzer, postingsProvider, docValuesProvider,
+ similarity, normsLoading, fieldDataSettings, indexSettings);
+ this.enabled = enabled;
+ this.autoBoost = autoBoost;
+
+ }
+
+ public boolean enabled() {
+ return this.enabled;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("string");
+ }
+
+ @Override
+ public Query queryStringTermQuery(Term term) {
+ if (!autoBoost) {
+ return new TermQuery(term);
+ }
+ if (fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) {
+ return new AllTermQuery(term);
+ }
+ return new TermQuery(term);
+ }
+
+ @Override
+ public Query termQuery(Object value, QueryParseContext context) {
+ return queryStringTermQuery(names().createIndexNameTerm(indexedValueForSearch(value)));
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ super.parse(context);
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ // we parse in post parse
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return true;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (!enabled) {
+ return;
+ }
+ // reset the entries
+ context.allEntries().reset();
+
+ // if the autoBoost flag is not set, and we indexed a doc with custom boost, make
+ // sure to update the flag, and notify mappings on change
+ if (!autoBoost && context.allEntries().customBoost()) {
+ autoBoost = true;
+ context.setMappingsModified();
+ }
+
+ Analyzer analyzer = findAnalyzer(context);
+ fields.add(new AllField(names.indexName(), context.allEntries(), analyzer, fieldType));
+ }
+
+ private Analyzer findAnalyzer(ParseContext context) {
+ Analyzer analyzer = indexAnalyzer;
+ if (analyzer == null) {
+ analyzer = context.analyzer();
+ if (analyzer == null) {
+ analyzer = context.docMapper().indexAnalyzer();
+ if (analyzer == null) {
+ // This should not happen, should we log warn it?
+ analyzer = Lucene.STANDARD_ANALYZER;
+ }
+ }
+ }
+ return analyzer;
+ }
+
+ @Override
+ public Void value(Object value) {
+ return null;
+ }
+
+ @Override
+ public Object valueForSearch(Object value) {
+ return null;
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+ if (!includeDefaults) {
+ // simulate the generation to make sure we don't add unnecessary content if all is default
+ // if all are defaults, no need to write it at all - generating is twice is ok though
+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);
+ XContentBuilder b = new XContentBuilder(builder.contentType().xContent(), bytesStreamOutput);
+ long pos = bytesStreamOutput.position();
+ innerToXContent(b, false);
+ b.flush();
+ if (pos == bytesStreamOutput.position()) {
+ return builder;
+ }
+ }
+ builder.startObject(CONTENT_TYPE);
+ innerToXContent(builder, includeDefaults);
+ builder.endObject();
+ return builder;
+ }
+
+ private void innerToXContent(XContentBuilder builder, boolean includeDefaults) throws IOException {
+ if (includeDefaults || enabled != Defaults.ENABLED) {
+ builder.field("enabled", enabled);
+ }
+ if (includeDefaults || autoBoost != false) {
+ builder.field("auto_boost", autoBoost);
+ }
+ if (includeDefaults || fieldType.stored() != Defaults.FIELD_TYPE.stored()) {
+ builder.field("store", fieldType.stored());
+ }
+ if (includeDefaults || fieldType.storeTermVectors() != Defaults.FIELD_TYPE.storeTermVectors()) {
+ builder.field("store_term_vectors", fieldType.storeTermVectors());
+ }
+ if (includeDefaults || fieldType.storeTermVectorOffsets() != Defaults.FIELD_TYPE.storeTermVectorOffsets()) {
+ builder.field("store_term_vector_offsets", fieldType.storeTermVectorOffsets());
+ }
+ if (includeDefaults || fieldType.storeTermVectorPositions() != Defaults.FIELD_TYPE.storeTermVectorPositions()) {
+ builder.field("store_term_vector_positions", fieldType.storeTermVectorPositions());
+ }
+ if (includeDefaults || fieldType.storeTermVectorPayloads() != Defaults.FIELD_TYPE.storeTermVectorPayloads()) {
+ builder.field("store_term_vector_payloads", fieldType.storeTermVectorPayloads());
+ }
+ if (includeDefaults || fieldType.omitNorms() != Defaults.FIELD_TYPE.omitNorms()) {
+ builder.field("omit_norms", fieldType.omitNorms());
+ }
+
+
+ if (indexAnalyzer == null && searchAnalyzer == null) {
+ if (includeDefaults) {
+ builder.field("analyzer", "default");
+ }
+ } else if (indexAnalyzer == null) {
+ // searchAnalyzer != null
+ if (includeDefaults || !searchAnalyzer.name().startsWith("_")) {
+ builder.field("search_analyzer", searchAnalyzer.name());
+ }
+ } else if (searchAnalyzer == null) {
+ // indexAnalyzer != null
+ if (includeDefaults || !indexAnalyzer.name().startsWith("_")) {
+ builder.field("index_analyzer", indexAnalyzer.name());
+ }
+ } else if (indexAnalyzer.name().equals(searchAnalyzer.name())) {
+ // indexAnalyzer == searchAnalyzer
+ if (includeDefaults || !indexAnalyzer.name().startsWith("_")) {
+ builder.field("analyzer", indexAnalyzer.name());
+ }
+ } else {
+ // both are there but different
+ if (includeDefaults || !indexAnalyzer.name().startsWith("_")) {
+ builder.field("index_analyzer", indexAnalyzer.name());
+ }
+ if (includeDefaults || !searchAnalyzer.name().startsWith("_")) {
+ builder.field("search_analyzer", searchAnalyzer.name());
+ }
+ }
+
+ if (similarity() != null) {
+ builder.field("similarity", similarity().name());
+ } else if (includeDefaults) {
+ builder.field("similarity", SimilarityLookupService.DEFAULT_SIMILARITY);
+ }
+
+ if (customFieldDataSettings != null) {
+ builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
+ } else if (includeDefaults) {
+ builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
+ }
+ }
+
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ // do nothing here, no merging, but also no exception
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/AnalyzerMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/AnalyzerMapper.java
new file mode 100644
index 0000000..25f0a33
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/AnalyzerMapper.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.mapper.*;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.analyzer;
+
+/**
+ *
+ */
+public class AnalyzerMapper implements Mapper, InternalMapper, RootMapper {
+
+ public static final String NAME = "_analyzer";
+ public static final String CONTENT_TYPE = "_analyzer";
+
+ public static class Defaults {
+ public static final String PATH = "_analyzer";
+ }
+
+ public static class Builder extends Mapper.Builder<Builder, AnalyzerMapper> {
+
+ private String field = Defaults.PATH;
+
+ public Builder() {
+ super(CONTENT_TYPE);
+ this.builder = this;
+ }
+
+ public Builder field(String field) {
+ this.field = field;
+ return this;
+ }
+
+ @Override
+ public AnalyzerMapper build(BuilderContext context) {
+ return new AnalyzerMapper(field);
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ AnalyzerMapper.Builder builder = analyzer();
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("path")) {
+ builder.field(fieldNode.toString());
+ }
+ }
+ return builder;
+ }
+ }
+
+ private final String path;
+
+ public AnalyzerMapper() {
+ this(Defaults.PATH);
+ }
+
+ public AnalyzerMapper(String path) {
+ this.path = path.intern();
+ }
+
+ @Override
+ public String name() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ Analyzer analyzer = context.docMapper().mappers().indexAnalyzer();
+ if (path != null) {
+ String value = null;
+ List<IndexableField> fields = context.doc().getFields();
+ for (int i = 0, fieldsSize = fields.size(); i < fieldsSize; i++) {
+ IndexableField field = fields.get(i);
+ if (field.name() == path) {
+ value = field.stringValue();
+ break;
+ }
+ }
+ if (value == null) {
+ value = context.ignoredValue(path);
+ }
+ if (value != null) {
+ analyzer = context.analysisService().analyzer(value);
+ if (analyzer == null) {
+ throw new MapperParsingException("No analyzer found for [" + value + "] from path [" + path + "]");
+ }
+ analyzer = context.docMapper().mappers().indexAnalyzer(analyzer);
+ }
+ }
+ context.analyzer(analyzer);
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return false;
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ }
+
+ @Override
+ public void traverse(FieldMapperListener fieldMapperListener) {
+ }
+
+ @Override
+ public void traverse(ObjectMapperListener objectMapperListener) {
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (path.equals(Defaults.PATH)) {
+ return builder;
+ }
+ builder.startObject(CONTENT_TYPE);
+ if (!path.equals(Defaults.PATH)) {
+ builder.field("path", path);
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void close() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/BoostFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/BoostFieldMapper.java
new file mode 100644
index 0000000..82fc47f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/BoostFieldMapper.java
@@ -0,0 +1,324 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NumericFloatAnalyzer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.FloatFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ *
+ */
+public class BoostFieldMapper extends NumberFieldMapper<Float> implements InternalMapper, RootMapper {
+
+ public static final String CONTENT_TYPE = "_boost";
+ public static final String NAME = "_boost";
+
+ public static class Defaults extends NumberFieldMapper.Defaults {
+ public static final String NAME = "_boost";
+ public static final Float NULL_VALUE = null;
+
+ public static final FieldType FIELD_TYPE = new FieldType(NumberFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setIndexed(false);
+ FIELD_TYPE.setStored(false);
+ }
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, BoostFieldMapper> {
+
+ protected Float nullValue = Defaults.NULL_VALUE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(float nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public BoostFieldMapper build(BuilderContext context) {
+ return new BoostFieldMapper(name, buildIndexName(context),
+ precisionStep, boost, fieldType, docValues, nullValue, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ String name = node.get("name") == null ? BoostFieldMapper.Defaults.NAME : node.get("name").toString();
+ BoostFieldMapper.Builder builder = MapperBuilders.boost(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(nodeFloatValue(propNode));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private final Float nullValue;
+
+ public BoostFieldMapper() {
+ this(Defaults.NAME, Defaults.NAME);
+ }
+
+ protected BoostFieldMapper(String name, String indexName) {
+ this(name, indexName, Defaults.PRECISION_STEP, Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null,
+ Defaults.NULL_VALUE, null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ protected BoostFieldMapper(String name, String indexName, int precisionStep, float boost, FieldType fieldType, Boolean docValues, Float nullValue,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(name, indexName, indexName, name), precisionStep, boost, fieldType, docValues, Defaults.IGNORE_MALFORMED, Defaults.COERCE,
+ NumericFloatAnalyzer.buildNamedAnalyzer(precisionStep), NumericFloatAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),
+ postingsProvider, docValuesProvider, null, null, fieldDataSettings, indexSettings, MultiFields.empty(), null);
+ this.nullValue = nullValue;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("float");
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ @Override
+ protected int maxPrecisionStep() {
+ return 32;
+ }
+
+ @Override
+ public Float value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Number) {
+ return ((Number) value).floatValue();
+ }
+ if (value instanceof BytesRef) {
+ return Numbers.bytesToFloat((BytesRef) value);
+ }
+ return Float.parseFloat(value.toString());
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ int intValue = NumericUtils.floatToSortableInt(parseValue(value));
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.intToPrefixCoded(intValue, precisionStep(), bytesRef);
+ return bytesRef;
+ }
+
+ private float parseValue(Object value) {
+ if (value instanceof Number) {
+ return ((Number) value).floatValue();
+ }
+ if (value instanceof BytesRef) {
+ return Float.parseFloat(((BytesRef) value).utf8ToString());
+ }
+ return Float.parseFloat(value.toString());
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ float iValue = Float.parseFloat(value);
+ float iSim = fuzziness.asFloat();
+ return NumericRangeQuery.newFloatRange(names.indexName(), precisionStep,
+ iValue - iSim,
+ iValue + iSim,
+ true, true);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeQuery.newFloatRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFilter.newFloatRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFieldDataFilter.newFloatRange((IndexNumericFieldData) fieldData.getForField(this),
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ return NumericRangeFilter.newFloatRange(names.indexName(), precisionStep,
+ nullValue,
+ nullValue,
+ true, true);
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return true;
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ // we override parse since we want to handle cases where it is not indexed and not stored (the default)
+ float value = parseFloatValue(context);
+ if (!Float.isNaN(value)) {
+ context.docBoost(value);
+ }
+ super.parse(context);
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ final float value = parseFloatValue(context);
+ if (Float.isNaN(value)) {
+ return;
+ }
+ context.docBoost(value);
+ fields.add(new FloatFieldMapper.CustomFloatNumericField(this, value, fieldType));
+ }
+
+ private float parseFloatValue(ParseContext context) throws IOException {
+ float value;
+ if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) {
+ if (nullValue == null) {
+ return Float.NaN;
+ }
+ value = nullValue;
+ } else {
+ value = context.parser().floatValue(coerce.value());
+ }
+ return value;
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // all are defaults, don't write it at all
+ if (!includeDefaults && name().equals(Defaults.NAME) && nullValue == null &&
+ fieldType.indexed() == Defaults.FIELD_TYPE.indexed() &&
+ fieldType.stored() == Defaults.FIELD_TYPE.stored() &&
+ customFieldDataSettings == null) {
+ return builder;
+ }
+ builder.startObject(contentType());
+ if (includeDefaults || !name().equals(Defaults.NAME)) {
+ builder.field("name", name());
+ }
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeDefaults || fieldType.indexed() != Defaults.FIELD_TYPE.indexed()) {
+ builder.field("index", fieldType.indexed());
+ }
+ if (includeDefaults || fieldType.stored() != Defaults.FIELD_TYPE.stored()) {
+ builder.field("store", fieldType.stored());
+ }
+ if (customFieldDataSettings != null) {
+ builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
+ } else if (includeDefaults) {
+ builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
+
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ // do nothing here, no merging, but also no exception
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/EnabledAttributeMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/EnabledAttributeMapper.java
new file mode 100644
index 0000000..5eda7d8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/EnabledAttributeMapper.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.internal;
+
+public enum EnabledAttributeMapper {
+ ENABLED(true), UNSET_ENABLED(true), DISABLED(false), UNSET_DISABLED(false);
+
+ public final boolean enabled;
+
+ EnabledAttributeMapper(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public boolean unset() {
+ return this == UNSET_DISABLED || this == UNSET_ENABLED;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
new file mode 100644
index 0000000..7c1960e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
@@ -0,0 +1,389 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import com.google.common.collect.Iterables;
+import org.apache.lucene.document.BinaryDocValuesField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.RegexpFilter;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.id;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+/**
+ *
+ */
+public class IdFieldMapper extends AbstractFieldMapper<String> implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_id";
+
+ public static final String CONTENT_TYPE = "_id";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final String NAME = IdFieldMapper.NAME;
+ public static final String INDEX_NAME = IdFieldMapper.NAME;
+
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setIndexed(false);
+ FIELD_TYPE.setStored(false);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.freeze();
+ }
+
+ public static final String PATH = null;
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, IdFieldMapper> {
+
+ private String path = Defaults.PATH;
+
+ public Builder() {
+ super(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE));
+ indexName = Defaults.INDEX_NAME;
+ }
+
+ public Builder path(String path) {
+ this.path = path;
+ return builder;
+ }
+
+ @Override
+ public IdFieldMapper build(BuilderContext context) {
+ return new IdFieldMapper(name, indexName, boost, fieldType, docValues, path, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ IdFieldMapper.Builder builder = id();
+ parseField(builder, builder.name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("path")) {
+ builder.path(fieldNode.toString());
+ }
+ }
+ return builder;
+ }
+ }
+
+ private final String path;
+
+ public IdFieldMapper() {
+ this(new FieldType(Defaults.FIELD_TYPE));
+ }
+
+ public IdFieldMapper(FieldType fieldType) {
+ this(Defaults.NAME, Defaults.INDEX_NAME, fieldType, null);
+ }
+
+ protected IdFieldMapper(String name, String indexName, FieldType fieldType, Boolean docValues) {
+ this(name, indexName, Defaults.BOOST, fieldType, docValues, Defaults.PATH, null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ protected IdFieldMapper(String name, String indexName, float boost, FieldType fieldType, Boolean docValues, String path,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(name, indexName, indexName, name), boost, fieldType, docValues, Lucene.KEYWORD_ANALYZER,
+ Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, null, fieldDataSettings, indexSettings);
+ this.path = path;
+ }
+
+ public String path() {
+ return this.path;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("string");
+ }
+
+ @Override
+ public String value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ return value.toString();
+ }
+
+ @Override
+ public boolean useTermQueryWithQueryString() {
+ return true;
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ if (fieldType.indexed() || context == null) {
+ return super.termQuery(value, context);
+ }
+ // no need for constant score filter, since we don't cache the filter, and it always takes deletes into account
+ return new ConstantScoreQuery(termFilter(value, context));
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ if (fieldType.indexed() || context == null) {
+ return super.termFilter(value, context);
+ }
+ return new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value));
+ }
+
+ @Override
+ public Filter termsFilter(List values, @Nullable QueryParseContext context) {
+ if (fieldType.indexed() || context == null) {
+ return super.termsFilter(values, context);
+ }
+ return new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values));
+ }
+
+ @Override
+ public Query prefixQuery(Object value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) {
+ if (fieldType.indexed() || context == null) {
+ return super.prefixQuery(value, method, context);
+ }
+ Collection<String> queryTypes = context.queryTypes();
+ if (queryTypes.size() == 1) {
+ PrefixQuery prefixQuery = new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(Iterables.getFirst(queryTypes, null), BytesRefs.toBytesRef(value))));
+ if (method != null) {
+ prefixQuery.setRewriteMethod(method);
+ }
+ return prefixQuery;
+ }
+ BooleanQuery query = new BooleanQuery();
+ for (String queryType : queryTypes) {
+ PrefixQuery prefixQuery = new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))));
+ if (method != null) {
+ prefixQuery.setRewriteMethod(method);
+ }
+ query.add(prefixQuery, BooleanClause.Occur.SHOULD);
+ }
+ return query;
+ }
+
+ @Override
+ public Filter prefixFilter(Object value, @Nullable QueryParseContext context) {
+ if (fieldType.indexed() || context == null) {
+ return super.prefixFilter(value, context);
+ }
+ Collection<String> queryTypes = context.queryTypes();
+ if (queryTypes.size() == 1) {
+ return new PrefixFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(Iterables.getFirst(queryTypes, null), BytesRefs.toBytesRef(value))));
+ }
+ XBooleanFilter filter = new XBooleanFilter();
+ for (String queryType : queryTypes) {
+ filter.add(new PrefixFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value)))), BooleanClause.Occur.SHOULD);
+ }
+ return filter;
+ }
+
+ @Override
+ public Query regexpQuery(Object value, int flags, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) {
+ if (fieldType.indexed() || context == null) {
+ return super.regexpQuery(value, flags, method, context);
+ }
+ Collection<String> queryTypes = context.queryTypes();
+ if (queryTypes.size() == 1) {
+ RegexpQuery regexpQuery = new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(Iterables.getFirst(queryTypes, null), BytesRefs.toBytesRef(value))), flags);
+ if (method != null) {
+ regexpQuery.setRewriteMethod(method);
+ }
+ return regexpQuery;
+ }
+ BooleanQuery query = new BooleanQuery();
+ for (String queryType : queryTypes) {
+ RegexpQuery regexpQuery = new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))), flags);
+ if (method != null) {
+ regexpQuery.setRewriteMethod(method);
+ }
+ query.add(regexpQuery, BooleanClause.Occur.SHOULD);
+ }
+ return query;
+ }
+
+ public Filter regexpFilter(Object value, int flags, @Nullable QueryParseContext context) {
+ if (fieldType.indexed() || context == null) {
+ return super.regexpFilter(value, flags, context);
+ }
+ Collection<String> queryTypes = context.queryTypes();
+ if (queryTypes.size() == 1) {
+ return new RegexpFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(Iterables.getFirst(queryTypes, null), BytesRefs.toBytesRef(value))), flags);
+ }
+ XBooleanFilter filter = new XBooleanFilter();
+ for (String queryType : queryTypes) {
+ filter.add(new RegexpFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))), flags), BooleanClause.Occur.SHOULD);
+ }
+ return filter;
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ if (context.sourceToParse().id() != null) {
+ context.id(context.sourceToParse().id());
+ super.parse(context);
+ }
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ if (context.id() == null && !context.sourceToParse().flyweight()) {
+ throw new MapperParsingException("No id found while parsing the content source");
+ }
+ // it either get built in the preParse phase, or get parsed...
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ super.parse(context);
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return true;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ XContentParser parser = context.parser();
+ if (parser.currentName() != null && parser.currentName().equals(Defaults.NAME) && parser.currentToken().isValue()) {
+ // we are in the parse Phase
+ String id = parser.text();
+ if (context.id() != null && !context.id().equals(id)) {
+ throw new MapperParsingException("Provided id [" + context.id() + "] does not match the content one [" + id + "]");
+ }
+ context.id(id);
+ } // else we are in the pre/post parse phase
+
+ if (fieldType.indexed() || fieldType.stored()) {
+ fields.add(new Field(names.indexName(), context.id(), fieldType));
+ }
+ if (hasDocValues()) {
+ fields.add(new BinaryDocValuesField(names.indexName(), new BytesRef(context.id())));
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // if all are defaults, no sense to write it at all
+ if (!includeDefaults && fieldType.stored() == Defaults.FIELD_TYPE.stored()
+ && fieldType.indexed() == Defaults.FIELD_TYPE.indexed()
+ && path == Defaults.PATH
+ && customFieldDataSettings == null
+ && (postingsFormat == null || postingsFormat.name().equals(defaultPostingFormat()))
+ && (docValuesFormat == null || docValuesFormat.name().equals(defaultDocValuesFormat()))) {
+ return builder;
+ }
+ builder.startObject(CONTENT_TYPE);
+ if (includeDefaults || fieldType.stored() != Defaults.FIELD_TYPE.stored()) {
+ builder.field("store", fieldType.stored());
+ }
+ if (includeDefaults || fieldType.indexed() != Defaults.FIELD_TYPE.indexed()) {
+ builder.field("index", indexTokenizeOptionToString(fieldType.indexed(), fieldType.tokenized()));
+ }
+ if (includeDefaults || path != Defaults.PATH) {
+ builder.field("path", path);
+ }
+
+ if (postingsFormat != null) {
+ if (includeDefaults || !postingsFormat.name().equals(defaultPostingFormat())) {
+ builder.field("postings_format", postingsFormat.name());
+ }
+ } else if (includeDefaults) {
+ String format = defaultPostingFormat();
+ if (format == null) {
+ format = PostingsFormatService.DEFAULT_FORMAT;
+ }
+ builder.field("postings_format", format);
+ }
+
+ if (docValuesFormat != null) {
+ if (includeDefaults || !docValuesFormat.name().equals(defaultDocValuesFormat())) {
+ builder.field(DOC_VALUES_FORMAT, docValuesFormat.name());
+ }
+ } else if (includeDefaults) {
+ String format = defaultDocValuesFormat();
+ if (format == null) {
+ format = DocValuesFormatService.DEFAULT_FORMAT;
+ }
+ builder.field(DOC_VALUES_FORMAT, format);
+ }
+
+ if (customFieldDataSettings != null) {
+ builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
+ } else if (includeDefaults) {
+ builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ // do nothing here, no merging, but also no exception
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java
new file mode 100644
index 0000000..b59934d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+/**
+ *
+ */
+public class IndexFieldMapper extends AbstractFieldMapper<String> implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_index";
+
+ public static final String CONTENT_TYPE = "_index";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final String NAME = IndexFieldMapper.NAME;
+ public static final String INDEX_NAME = IndexFieldMapper.NAME;
+
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setStored(false);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.freeze();
+ }
+
+ public static final EnabledAttributeMapper ENABLED_STATE = EnabledAttributeMapper.DISABLED;
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, IndexFieldMapper> {
+
+ private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
+
+ public Builder() {
+ super(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE));
+ indexName = Defaults.INDEX_NAME;
+ }
+
+ public Builder enabled(EnabledAttributeMapper enabledState) {
+ this.enabledState = enabledState;
+ return this;
+ }
+
+ @Override
+ public IndexFieldMapper build(BuilderContext context) {
+ return new IndexFieldMapper(name, indexName, boost, fieldType, docValues, enabledState, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ IndexFieldMapper.Builder builder = MapperBuilders.index();
+ parseField(builder, builder.name, node, parserContext);
+
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("enabled")) {
+ EnabledAttributeMapper mapper = nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED;
+ builder.enabled(mapper);
+ }
+ }
+ return builder;
+ }
+ }
+
+ private EnabledAttributeMapper enabledState;
+
+ public IndexFieldMapper() {
+ this(Defaults.NAME, Defaults.INDEX_NAME);
+ }
+
+ protected IndexFieldMapper(String name, String indexName) {
+ this(name, indexName, Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null, Defaults.ENABLED_STATE, null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ public IndexFieldMapper(String name, String indexName, float boost, FieldType fieldType, Boolean docValues, EnabledAttributeMapper enabledState,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(name, indexName, indexName, name), boost, fieldType, docValues, Lucene.KEYWORD_ANALYZER,
+ Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, null, fieldDataSettings, indexSettings);
+ this.enabledState = enabledState;
+ }
+
+ public boolean enabled() {
+ return this.enabledState.enabled;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("string");
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ public String value(Document document) {
+ Field field = (Field) document.getField(names.indexName());
+ return field == null ? null : value(field);
+ }
+
+ @Override
+ public String value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ return value.toString();
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ // we pre parse it and not in parse, since its not part of the root object
+ super.parse(context);
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return false;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (!enabledState.enabled) {
+ return;
+ }
+ fields.add(new Field(names.indexName(), context.index(), fieldType));
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // if all defaults, no need to write it at all
+ if (!includeDefaults && fieldType().stored() == Defaults.FIELD_TYPE.stored() && enabledState == Defaults.ENABLED_STATE) {
+ return builder;
+ }
+ builder.startObject(CONTENT_TYPE);
+ if (includeDefaults || fieldType().stored() != Defaults.FIELD_TYPE.stored() && enabledState.enabled) {
+ builder.field("store", fieldType().stored());
+ }
+ if (includeDefaults || enabledState != Defaults.ENABLED_STATE) {
+ builder.field("enabled", enabledState.enabled);
+ }
+
+ if (customFieldDataSettings != null) {
+ builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
+ } else if (includeDefaults) {
+ builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
+
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith;
+ if (!mergeContext.mergeFlags().simulate()) {
+ if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
+ this.enabledState = indexFieldMapperMergeWith.enabledState;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
new file mode 100644
index 0000000..bf85e60
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.parent;
+
+/**
+ *
+ */
+public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_parent";
+
+ public static final String CONTENT_TYPE = "_parent";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final String NAME = ParentFieldMapper.NAME;
+
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setStored(true);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.freeze();
+ }
+ }
+
+ public static class Builder extends Mapper.Builder<Builder, ParentFieldMapper> {
+
+ protected String indexName;
+
+ private String type;
+ protected PostingsFormatProvider postingsFormat;
+
+ public Builder() {
+ super(Defaults.NAME);
+ this.indexName = name;
+ }
+
+ public Builder type(String type) {
+ this.type = type;
+ return builder;
+ }
+
+ protected Builder postingsFormat(PostingsFormatProvider postingsFormat) {
+ this.postingsFormat = postingsFormat;
+ return builder;
+ }
+
+ @Override
+ public ParentFieldMapper build(BuilderContext context) {
+ if (type == null) {
+ throw new MapperParsingException("Parent mapping must contain the parent type");
+ }
+ return new ParentFieldMapper(name, indexName, type, postingsFormat, null, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ ParentFieldMapper.Builder builder = parent();
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("type")) {
+ builder.type(fieldNode.toString());
+ } else if (fieldName.equals("postings_format")) {
+ String postingFormatName = fieldNode.toString();
+ builder.postingsFormat(parserContext.postingFormatService().get(postingFormatName));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private final String type;
+ private final BytesRef typeAsBytes;
+
+ protected ParentFieldMapper(String name, String indexName, String type, PostingsFormatProvider postingsFormat, @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(name, indexName, indexName, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null,
+ Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, postingsFormat, null, null, null, fieldDataSettings, indexSettings);
+ this.type = type;
+ this.typeAsBytes = type == null ? null : new BytesRef(type);
+ }
+
+ public ParentFieldMapper() {
+ this(Defaults.NAME, Defaults.NAME, null, null, null, null);
+ }
+
+ public String type() {
+ return type;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("string");
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ parse(context);
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return true;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (!active()) {
+ return;
+ }
+
+ if (context.parser().currentName() != null && context.parser().currentName().equals(Defaults.NAME)) {
+ // we are in the parsing of _parent phase
+ String parentId = context.parser().text();
+ context.sourceToParse().parent(parentId);
+ fields.add(new Field(names.indexName(), Uid.createUid(context.stringBuilder(), type, parentId), fieldType));
+ } else {
+ // otherwise, we are running it post processing of the xcontent
+ String parsedParentId = context.doc().get(Defaults.NAME);
+ if (context.sourceToParse().parent() != null) {
+ String parentId = context.sourceToParse().parent();
+ if (parsedParentId == null) {
+ if (parentId == null) {
+ throw new MapperParsingException("No parent id provided, not within the document, and not externally");
+ }
+ // we did not add it in the parsing phase, add it now
+ fields.add(new Field(names.indexName(), Uid.createUid(context.stringBuilder(), type, parentId), fieldType));
+ } else if (parentId != null && !parsedParentId.equals(Uid.createUid(context.stringBuilder(), type, parentId))) {
+ throw new MapperParsingException("Parent id mismatch, document value is [" + Uid.createUid(parsedParentId).id() + "], while external value is [" + parentId + "]");
+ }
+ }
+ }
+ // we have parent mapping, yet no value was set, ignore it...
+ }
+
+ @Override
+ public Uid value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ return Uid.createUid(value.toString());
+ }
+
+ @Override
+ public Object valueForSearch(Object value) {
+ if (value == null) {
+ return null;
+ }
+ String sValue = value.toString();
+ if (sValue == null) {
+ return null;
+ }
+ int index = sValue.indexOf(Uid.DELIMITER);
+ if (index == -1) {
+ return sValue;
+ }
+ return sValue.substring(index + 1);
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ if (value instanceof BytesRef) {
+ BytesRef bytesRef = (BytesRef) value;
+ if (Uid.hasDelimiter(bytesRef)) {
+ return bytesRef;
+ }
+ return Uid.createUidAsBytes(typeAsBytes, bytesRef);
+ }
+ String sValue = value.toString();
+ if (sValue.indexOf(Uid.DELIMITER) == -1) {
+ return Uid.createUidAsBytes(type, sValue);
+ }
+ return super.indexedValueForSearch(value);
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ if (context == null) {
+ return super.termQuery(value, context);
+ }
+ return new ConstantScoreQuery(termFilter(value, context));
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ if (context == null) {
+ return super.termFilter(value, context);
+ }
+ BytesRef bValue = BytesRefs.toBytesRef(value);
+ if (Uid.hasDelimiter(bValue)) {
+ return new TermFilter(new Term(names.indexName(), bValue));
+ }
+
+ List<String> types = new ArrayList<String>(context.mapperService().types().size());
+ for (DocumentMapper documentMapper : context.mapperService()) {
+ if (!documentMapper.parentFieldMapper().active()) {
+ types.add(documentMapper.type());
+ }
+ }
+
+ if (types.isEmpty()) {
+ return Queries.MATCH_NO_FILTER;
+ } else if (types.size() == 1) {
+ return new TermFilter(new Term(names.indexName(), Uid.createUidAsBytes(types.get(0), bValue)));
+ } else {
+ // we use all non child types, cause we don't know if its exact or not...
+ List<BytesRef> typesValues = new ArrayList<BytesRef>(types.size());
+ for (String type : context.mapperService().types()) {
+ typesValues.add(Uid.createUidAsBytes(type, bValue));
+ }
+ return new XTermsFilter(names.indexName(), typesValues);
+ }
+ }
+
+ @Override
+ public Filter termsFilter(List values, @Nullable QueryParseContext context) {
+ if (context == null) {
+ return super.termsFilter(values, context);
+ }
+ // This will not be invoked if values is empty, so don't check for empty
+ if (values.size() == 1) {
+ return termFilter(values.get(0), context);
+ }
+
+ List<String> types = new ArrayList<String>(context.mapperService().types().size());
+ for (DocumentMapper documentMapper : context.mapperService()) {
+ if (!documentMapper.parentFieldMapper().active()) {
+ types.add(documentMapper.type());
+ }
+ }
+
+ List<BytesRef> bValues = new ArrayList<BytesRef>(values.size());
+ for (Object value : values) {
+ BytesRef bValue = BytesRefs.toBytesRef(value);
+ if (Uid.hasDelimiter(bValue)) {
+ bValues.add(bValue);
+ } else {
+ // we use all non child types, cause we don't know if its exact or not...
+ for (String type : types) {
+ bValues.add(Uid.createUidAsBytes(type, bValue));
+ }
+ }
+ }
+ return new XTermsFilter(names.indexName(), bValues);
+ }
+
+ /**
+ * We don't need to analyzer the text, and we need to convert it to UID...
+ */
+ @Override
+ public boolean useTermQueryWithQueryString() {
+ return true;
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (!active()) {
+ return builder;
+ }
+
+ builder.startObject(CONTENT_TYPE);
+ builder.field("type", type);
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ ParentFieldMapper other = (ParentFieldMapper) mergeWith;
+ if (active() == other.active()) {
+ return;
+ }
+
+ if (active() != other.active() || !type.equals(other.type)) {
+ mergeContext.addConflict("The _parent field can't be added or updated");
+ }
+ }
+
+ /**
+ * @return Whether the _parent field is actually used.
+ */
+ public boolean active() {
+ return type != null;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java
new file mode 100644
index 0000000..c94e58c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.routing;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+/**
+ *
+ */
+public class RoutingFieldMapper extends AbstractFieldMapper<String> implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_routing";
+ public static final String CONTENT_TYPE = "_routing";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final String NAME = "_routing";
+
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setStored(true);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.freeze();
+ }
+
+ public static final boolean REQUIRED = false;
+ public static final String PATH = null;
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, RoutingFieldMapper> {
+
+ private boolean required = Defaults.REQUIRED;
+
+ private String path = Defaults.PATH;
+
+ public Builder() {
+ super(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE));
+ }
+
+ public Builder required(boolean required) {
+ this.required = required;
+ return builder;
+ }
+
+ public Builder path(String path) {
+ this.path = path;
+ return builder;
+ }
+
+ @Override
+ public RoutingFieldMapper build(BuilderContext context) {
+ return new RoutingFieldMapper(fieldType, required, path, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ RoutingFieldMapper.Builder builder = routing();
+ parseField(builder, builder.name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("required")) {
+ builder.required(nodeBooleanValue(fieldNode));
+ } else if (fieldName.equals("path")) {
+ builder.path(fieldNode.toString());
+ }
+ }
+ return builder;
+ }
+ }
+
+
+ private boolean required;
+
+ private final String path;
+
+ public RoutingFieldMapper() {
+ this(new FieldType(Defaults.FIELD_TYPE), Defaults.REQUIRED, Defaults.PATH, null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ protected RoutingFieldMapper(FieldType fieldType, boolean required, String path, PostingsFormatProvider postingsProvider,
+ DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), 1.0f, fieldType, null, Lucene.KEYWORD_ANALYZER,
+ Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, null, fieldDataSettings, indexSettings);
+ this.required = required;
+ this.path = path;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("string");
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ public void markAsRequired() {
+ this.required = true;
+ }
+
+ public boolean required() {
+ return this.required;
+ }
+
+ public String path() {
+ return this.path;
+ }
+
+ public String value(Document document) {
+ Field field = (Field) document.getField(names.indexName());
+ return field == null ? null : value(field);
+ }
+
+ @Override
+ public String value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ return value.toString();
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ String routing = context.sourceToParse().routing();
+ if (path != null && routing != null) {
+ // we have a path, check if we can validate we have the same routing value as the one in the doc...
+ String value = null;
+ Field field = (Field) context.doc().getField(path);
+ if (field != null) {
+ value = field.stringValue();
+ if (value == null) {
+ // maybe its a numeric field...
+ if (field instanceof NumberFieldMapper.CustomNumericField) {
+ value = ((NumberFieldMapper.CustomNumericField) field).numericAsString();
+ }
+ }
+ }
+ if (value == null) {
+ value = context.ignoredValue(path);
+ }
+ if (!routing.equals(value)) {
+ throw new MapperParsingException("External routing [" + routing + "] and document path routing [" + value + "] mismatch");
+ }
+ }
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ super.parse(context);
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ // no need ot parse here, we either get the routing in the sourceToParse
+ // or we don't have routing, if we get it in sourceToParse, we process it in preParse
+ // which will always be called
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return true;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (context.sourceToParse().routing() != null) {
+ String routing = context.sourceToParse().routing();
+ if (routing != null) {
+ if (!fieldType.indexed() && !fieldType.stored()) {
+ context.ignoredValue(names.indexName(), routing);
+ return;
+ }
+ fields.add(new Field(names.indexName(), routing, fieldType));
+ }
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // if all are defaults, no sense to write it at all
+ if (!includeDefaults && fieldType.indexed() == Defaults.FIELD_TYPE.indexed() &&
+ fieldType.stored() == Defaults.FIELD_TYPE.stored() && required == Defaults.REQUIRED && path == Defaults.PATH) {
+ return builder;
+ }
+ builder.startObject(CONTENT_TYPE);
+ if (includeDefaults || fieldType.indexed() != Defaults.FIELD_TYPE.indexed()) {
+ builder.field("index", indexTokenizeOptionToString(fieldType.indexed(), fieldType.tokenized()));
+ }
+ if (includeDefaults || fieldType.stored() != Defaults.FIELD_TYPE.stored()) {
+ builder.field("store", fieldType.stored());
+ }
+ if (includeDefaults || required != Defaults.REQUIRED) {
+ builder.field("required", required);
+ }
+ if (includeDefaults || path != Defaults.PATH) {
+ builder.field("path", path);
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ // do nothing here, no merging, but also no exception
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java
new file mode 100644
index 0000000..58035e7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.size;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseStore;
+
+public class SizeFieldMapper extends IntegerFieldMapper implements RootMapper {
+
+ public static final String NAME = "_size";
+ public static final String CONTENT_TYPE = "_size";
+
+ public static class Defaults extends IntegerFieldMapper.Defaults {
+ public static final String NAME = CONTENT_TYPE;
+ public static final EnabledAttributeMapper ENABLED_STATE = EnabledAttributeMapper.DISABLED;
+
+ public static final FieldType SIZE_FIELD_TYPE = new FieldType(IntegerFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ SIZE_FIELD_TYPE.freeze();
+ }
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, IntegerFieldMapper> {
+
+ protected EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
+
+ public Builder() {
+ super(Defaults.NAME, new FieldType(Defaults.SIZE_FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder enabled(EnabledAttributeMapper enabled) {
+ this.enabledState = enabled;
+ return builder;
+ }
+
+ @Override
+ public SizeFieldMapper build(BuilderContext context) {
+ return new SizeFieldMapper(enabledState, fieldType, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ SizeFieldMapper.Builder builder = size();
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("enabled")) {
+ builder.enabled(nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED);
+ } else if (fieldName.equals("store")) {
+ builder.store(parseStore(fieldName, fieldNode.toString()));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private EnabledAttributeMapper enabledState;
+
+ public SizeFieldMapper() {
+ this(Defaults.ENABLED_STATE, new FieldType(Defaults.SIZE_FIELD_TYPE), null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ public SizeFieldMapper(EnabledAttributeMapper enabled, FieldType fieldType, PostingsFormatProvider postingsProvider,
+ DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(Defaults.NAME), Defaults.PRECISION_STEP, Defaults.BOOST, fieldType, null, Defaults.NULL_VALUE,
+ Defaults.IGNORE_MALFORMED, Defaults.COERCE, postingsProvider, docValuesProvider, null, null, fieldDataSettings,
+ indexSettings, MultiFields.empty(), null);
+ this.enabledState = enabled;
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ @Override
+ protected String contentType() {
+ return Defaults.NAME;
+ }
+
+ public boolean enabled() {
+ return this.enabledState.enabled;
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ // we post parse it so we get the size stored, possibly compressed (source will be preParse)
+ super.parse(context);
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ // nothing to do here, we call the parent in postParse
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return false;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (!enabledState.enabled) {
+ return;
+ }
+ if (context.flyweight()) {
+ return;
+ }
+ fields.add(new CustomIntegerNumericField(this, context.source().length(), fieldType));
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // all are defaults, no need to write it at all
+ if (!includeDefaults && enabledState == Defaults.ENABLED_STATE && fieldType().stored() == Defaults.SIZE_FIELD_TYPE.stored()) {
+ return builder;
+ }
+ builder.startObject(contentType());
+ if (includeDefaults || enabledState != Defaults.ENABLED_STATE) {
+ builder.field("enabled", enabledState.enabled);
+ }
+ if (includeDefaults || fieldType().stored() != Defaults.SIZE_FIELD_TYPE.stored() && enabledState.enabled) {
+ builder.field("store", fieldType().stored());
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith;
+ if (!mergeContext.mergeFlags().simulate()) {
+ if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) {
+ this.enabledState = sizeFieldMapperMergeWith.enabledState;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java
new file mode 100644
index 0000000..e685a11
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java
@@ -0,0 +1,431 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import com.google.common.base.Objects;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.compress.CompressedStreamInput;
+import org.elasticsearch.common.compress.Compressor;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.source;
+
+/**
+ *
+ */
+public class SourceFieldMapper extends AbstractFieldMapper<byte[]> implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_source";
+
+ public static final String CONTENT_TYPE = "_source";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final String NAME = SourceFieldMapper.NAME;
+ public static final boolean ENABLED = true;
+ public static final long COMPRESS_THRESHOLD = -1;
+ public static final String FORMAT = null; // default format is to use the one provided
+
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setIndexed(false);
+ FIELD_TYPE.setStored(true);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.freeze();
+ }
+
+ }
+
+ public static class Builder extends Mapper.Builder<Builder, SourceFieldMapper> {
+
+ private boolean enabled = Defaults.ENABLED;
+
+ private long compressThreshold = Defaults.COMPRESS_THRESHOLD;
+
+ private Boolean compress = null;
+
+ private String format = Defaults.FORMAT;
+
+ private String[] includes = null;
+ private String[] excludes = null;
+
+ public Builder() {
+ super(Defaults.NAME);
+ }
+
+ public Builder enabled(boolean enabled) {
+ this.enabled = enabled;
+ return this;
+ }
+
+ public Builder compress(boolean compress) {
+ this.compress = compress;
+ return this;
+ }
+
+ public Builder compressThreshold(long compressThreshold) {
+ this.compressThreshold = compressThreshold;
+ return this;
+ }
+
+ public Builder format(String format) {
+ this.format = format;
+ return this;
+ }
+
+ public Builder includes(String[] includes) {
+ this.includes = includes;
+ return this;
+ }
+
+ public Builder excludes(String[] excludes) {
+ this.excludes = excludes;
+ return this;
+ }
+
+ @Override
+ public SourceFieldMapper build(BuilderContext context) {
+ return new SourceFieldMapper(name, enabled, format, compress, compressThreshold, includes, excludes);
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ SourceFieldMapper.Builder builder = source();
+
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("enabled")) {
+ builder.enabled(nodeBooleanValue(fieldNode));
+ } else if (fieldName.equals("compress") && fieldNode != null) {
+ builder.compress(nodeBooleanValue(fieldNode));
+ } else if (fieldName.equals("compress_threshold") && fieldNode != null) {
+ if (fieldNode instanceof Number) {
+ builder.compressThreshold(((Number) fieldNode).longValue());
+ builder.compress(true);
+ } else {
+ builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString()).bytes());
+ builder.compress(true);
+ }
+ } else if ("format".equals(fieldName)) {
+ builder.format(nodeStringValue(fieldNode, null));
+ } else if (fieldName.equals("includes")) {
+ List<Object> values = (List<Object>) fieldNode;
+ String[] includes = new String[values.size()];
+ for (int i = 0; i < includes.length; i++) {
+ includes[i] = values.get(i).toString();
+ }
+ builder.includes(includes);
+ } else if (fieldName.equals("excludes")) {
+ List<Object> values = (List<Object>) fieldNode;
+ String[] excludes = new String[values.size()];
+ for (int i = 0; i < excludes.length; i++) {
+ excludes[i] = values.get(i).toString();
+ }
+ builder.excludes(excludes);
+ }
+ }
+ return builder;
+ }
+ }
+
+
+ private final boolean enabled;
+
+ private Boolean compress;
+ private long compressThreshold;
+
+ private String[] includes;
+ private String[] excludes;
+
+ private String format;
+
+ private XContentType formatContentType;
+
+ public SourceFieldMapper() {
+ this(Defaults.NAME, Defaults.ENABLED, Defaults.FORMAT, null, -1, null, null);
+ }
+
+ protected SourceFieldMapper(String name, boolean enabled, String format, Boolean compress, long compressThreshold,
+ String[] includes, String[] excludes) {
+ super(new Names(name, name, name, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null,
+ Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, null, null, null, null, null, null); // Only stored.
+ this.enabled = enabled;
+ this.compress = compress;
+ this.compressThreshold = compressThreshold;
+ this.includes = includes;
+ this.excludes = excludes;
+ this.format = format;
+ this.formatContentType = format == null ? null : XContentType.fromRestContentType(format);
+ }
+
+ public boolean enabled() {
+ return this.enabled;
+ }
+
+ public String[] excludes() {
+ return this.excludes != null ? this.excludes : Strings.EMPTY_ARRAY;
+
+ }
+
+ public String[] includes() {
+ return this.includes != null ? this.includes : Strings.EMPTY_ARRAY;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return null;
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ super.parse(context);
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ // nothing to do here, we will call it in pre parse
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return false;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (!enabled) {
+ return;
+ }
+ if (!fieldType.stored()) {
+ return;
+ }
+ if (context.flyweight()) {
+ return;
+ }
+ BytesReference source = context.source();
+
+ boolean filtered = (includes != null && includes.length > 0) || (excludes != null && excludes.length > 0);
+ if (filtered) {
+ // we don't update the context source if we filter, we want to keep it as is...
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes);
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput streamOutput = bStream;
+ if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) {
+ streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
+ }
+ XContentType contentType = formatContentType;
+ if (contentType == null) {
+ contentType = mapTuple.v1();
+ }
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource);
+ builder.close();
+
+ source = bStream.bytes();
+ } else if (compress != null && compress && !CompressorFactory.isCompressed(source)) {
+ if (compressThreshold == -1 || source.length() > compressThreshold) {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ XContentType contentType = XContentFactory.xContentType(source);
+ if (formatContentType != null && formatContentType != contentType) {
+ XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, CompressorFactory.defaultCompressor().streamOutput(bStream));
+ builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
+ builder.close();
+ } else {
+ StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
+ source.writeTo(streamOutput);
+ streamOutput.close();
+ }
+ source = bStream.bytes();
+ // update the data in the context, so it can be compressed and stored compressed outside...
+ context.source(source);
+ }
+ } else if (formatContentType != null) {
+ // see if we need to convert the content type
+ Compressor compressor = CompressorFactory.compressor(source);
+ if (compressor != null) {
+ CompressedStreamInput compressedStreamInput = compressor.streamInput(source.streamInput());
+ XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
+ compressedStreamInput.resetToBufferStart();
+ if (contentType != formatContentType) {
+ // we need to reread and store back, compressed....
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
+ XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput);
+ builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(compressedStreamInput));
+ builder.close();
+ source = bStream.bytes();
+ // update the data in the context, so we store it in the translog in this format
+ context.source(source);
+ } else {
+ compressedStreamInput.close();
+ }
+ } else {
+ XContentType contentType = XContentFactory.xContentType(source);
+ if (contentType != formatContentType) {
+ // we need to reread and store back
+ // we need to reread and store back, compressed....
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream);
+ builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
+ builder.close();
+ source = bStream.bytes();
+ // update the data in the context, so we store it in the translog in this format
+ context.source(source);
+ }
+ }
+ }
+ assert source.hasArray();
+ fields.add(new StoredField(names().indexName(), source.array(), source.arrayOffset(), source.length()));
+ }
+
+ @Override
+ public byte[] value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ BytesReference bValue;
+ if (value instanceof BytesRef) {
+ bValue = new BytesArray((BytesRef) value);
+ } else {
+ bValue = (BytesReference) value;
+ }
+ try {
+ return CompressorFactory.uncompressIfNeeded(bValue).toBytes();
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to decompress source", e);
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // all are defaults, no need to write it at all
+ if (!includeDefaults && enabled == Defaults.ENABLED && compress == null && compressThreshold == -1 && includes == null && excludes == null) {
+ return builder;
+ }
+ builder.startObject(contentType());
+ if (includeDefaults || enabled != Defaults.ENABLED) {
+ builder.field("enabled", enabled);
+ }
+ if (includeDefaults || !Objects.equal(format, Defaults.FORMAT)) {
+ builder.field("format", format);
+ }
+ if (compress != null) {
+ builder.field("compress", compress);
+ } else if (includeDefaults) {
+ builder.field("compress", false);
+ }
+ if (compressThreshold != -1) {
+ builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString());
+ } else if (includeDefaults) {
+ builder.field("compress_threshold", -1);
+ }
+
+ if (includes != null) {
+ builder.field("includes", includes);
+ } else if (includeDefaults) {
+ builder.field("includes", Strings.EMPTY_ARRAY);
+ }
+
+ if (excludes != null) {
+ builder.field("excludes", excludes);
+ } else if (includeDefaults) {
+ builder.field("excludes", Strings.EMPTY_ARRAY);
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith;
+ if (!mergeContext.mergeFlags().simulate()) {
+ if (sourceMergeWith.compress != null) {
+ this.compress = sourceMergeWith.compress;
+ }
+ if (sourceMergeWith.compressThreshold != -1) {
+ this.compressThreshold = sourceMergeWith.compressThreshold;
+ }
+ if (sourceMergeWith.includes != null) {
+ this.includes = sourceMergeWith.includes;
+ }
+ if (sourceMergeWith.excludes != null) {
+ this.excludes = sourceMergeWith.excludes;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java
new file mode 100644
index 0000000..96c3ce3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.AlreadyExpiredException;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeTimeValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.ttl;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+public class TTLFieldMapper extends LongFieldMapper implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_ttl";
+ public static final String CONTENT_TYPE = "_ttl";
+
+ public static class Defaults extends LongFieldMapper.Defaults {
+ public static final String NAME = TTLFieldMapper.CONTENT_TYPE;
+
+ public static final FieldType TTL_FIELD_TYPE = new FieldType(LongFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ TTL_FIELD_TYPE.setStored(true);
+ TTL_FIELD_TYPE.setIndexed(true);
+ TTL_FIELD_TYPE.setTokenized(false);
+ TTL_FIELD_TYPE.freeze();
+ }
+
+ public static final EnabledAttributeMapper ENABLED_STATE = EnabledAttributeMapper.DISABLED;
+ public static final long DEFAULT = -1;
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, TTLFieldMapper> {
+
+ private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
+ private long defaultTTL = Defaults.DEFAULT;
+
+ public Builder() {
+ super(Defaults.NAME, new FieldType(Defaults.TTL_FIELD_TYPE));
+ }
+
+ public Builder enabled(EnabledAttributeMapper enabled) {
+ this.enabledState = enabled;
+ return builder;
+ }
+
+ public Builder defaultTTL(long defaultTTL) {
+ this.defaultTTL = defaultTTL;
+ return builder;
+ }
+
+ @Override
+ public TTLFieldMapper build(BuilderContext context) {
+ return new TTLFieldMapper(fieldType, enabledState, defaultTTL, ignoreMalformed(context),coerce(context), postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ TTLFieldMapper.Builder builder = ttl();
+ parseField(builder, builder.name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("enabled")) {
+ EnabledAttributeMapper enabledState = nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED;
+ builder.enabled(enabledState);
+ } else if (fieldName.equals("default")) {
+ TimeValue ttlTimeValue = nodeTimeValue(fieldNode, null);
+ if (ttlTimeValue != null) {
+ builder.defaultTTL(ttlTimeValue.millis());
+ }
+ }
+ }
+ return builder;
+ }
+ }
+
+ private EnabledAttributeMapper enabledState;
+ private long defaultTTL;
+
+ public TTLFieldMapper() {
+ this(new FieldType(Defaults.TTL_FIELD_TYPE), Defaults.ENABLED_STATE, Defaults.DEFAULT, Defaults.IGNORE_MALFORMED, Defaults.COERCE, null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ protected TTLFieldMapper(FieldType fieldType, EnabledAttributeMapper enabled, long defaultTTL, Explicit<Boolean> ignoreMalformed,
+ Explicit<Boolean> coerce, PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), Defaults.PRECISION_STEP,
+ Defaults.BOOST, fieldType, null, Defaults.NULL_VALUE, ignoreMalformed, coerce,
+ postingsProvider, docValuesProvider, null, null, fieldDataSettings, indexSettings, MultiFields.empty(), null);
+ this.enabledState = enabled;
+ this.defaultTTL = defaultTTL;
+ }
+
+ public boolean enabled() {
+ return this.enabledState.enabled;
+ }
+
+ public long defaultTTL() {
+ return this.defaultTTL;
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ // Overrides valueForSearch to display live value of remaining ttl
+ @Override
+ public Object valueForSearch(Object value) {
+ long now;
+ SearchContext searchContext = SearchContext.current();
+ if (searchContext != null) {
+ now = searchContext.nowInMillis();
+ } else {
+ now = System.currentTimeMillis();
+ }
+ long val = value(value);
+ return val - now;
+ }
+
+ // Other implementation for realtime get display
+ public Object valueForSearch(long expirationTime) {
+ return expirationTime - System.currentTimeMillis();
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ super.parse(context);
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException, MapperParsingException {
+ if (context.sourceToParse().ttl() < 0) { // no ttl has been provided externally
+ long ttl;
+ if (context.parser().currentToken() == XContentParser.Token.VALUE_STRING) {
+ ttl = TimeValue.parseTimeValue(context.parser().text(), null).millis();
+ } else {
+ ttl = context.parser().longValue(coerce.value());
+ }
+ if (ttl <= 0) {
+ throw new MapperParsingException("TTL value must be > 0. Illegal value provided [" + ttl + "]");
+ }
+ context.sourceToParse().ttl(ttl);
+ }
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return true;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException, AlreadyExpiredException {
+ if (enabledState.enabled && !context.sourceToParse().flyweight()) {
+ long ttl = context.sourceToParse().ttl();
+ if (ttl <= 0 && defaultTTL > 0) { // no ttl provided so we use the default value
+ ttl = defaultTTL;
+ context.sourceToParse().ttl(ttl);
+ }
+ if (ttl > 0) { // a ttl has been provided either externally or in the _source
+ long timestamp = context.sourceToParse().timestamp();
+ long expire = new Date(timestamp + ttl).getTime();
+ long now = System.currentTimeMillis();
+ // there is not point indexing already expired doc
+ if (context.sourceToParse().origin() == SourceToParse.Origin.PRIMARY && now >= expire) {
+ throw new AlreadyExpiredException(context.index(), context.type(), context.id(), timestamp, ttl, now);
+ }
+ // the expiration timestamp (timestamp + ttl) is set as field
+ fields.add(new CustomLongNumericField(this, expire, fieldType));
+ }
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // if all are defaults, no sense to write it at all
+ if (!includeDefaults && enabledState == Defaults.ENABLED_STATE && defaultTTL == Defaults.DEFAULT) {
+ return builder;
+ }
+ builder.startObject(CONTENT_TYPE);
+ if (includeDefaults || enabledState != Defaults.ENABLED_STATE) {
+ builder.field("enabled", enabledState.enabled);
+ }
+ if (includeDefaults || defaultTTL != Defaults.DEFAULT && enabledState.enabled) {
+ builder.field("default", defaultTTL);
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith;
+ if (!mergeContext.mergeFlags().simulate()) {
+ if (ttlMergeWith.defaultTTL != -1) {
+ this.defaultTTL = ttlMergeWith.defaultTTL;
+ }
+ if (ttlMergeWith.enabledState != enabledState && !ttlMergeWith.enabledState.unset()) {
+ this.enabledState = ttlMergeWith.enabledState;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java
new file mode 100644
index 0000000..1d2467b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.timestamp;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseDateTimeFormatter;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+/**
+ */
+public class TimestampFieldMapper extends DateFieldMapper implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_timestamp";
+ public static final String CONTENT_TYPE = "_timestamp";
+ public static final String DEFAULT_DATE_TIME_FORMAT = "dateOptionalTime";
+
+ public static class Defaults extends DateFieldMapper.Defaults {
+ public static final String NAME = "_timestamp";
+
+ public static final FieldType FIELD_TYPE = new FieldType(DateFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setStored(false);
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.freeze();
+ }
+
+ public static final EnabledAttributeMapper ENABLED = EnabledAttributeMapper.DISABLED;
+ public static final String PATH = null;
+ public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern(DEFAULT_DATE_TIME_FORMAT);
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, TimestampFieldMapper> {
+
+ private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
+ private String path = Defaults.PATH;
+ private FormatDateTimeFormatter dateTimeFormatter = Defaults.DATE_TIME_FORMATTER;
+
+ public Builder() {
+ super(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE));
+ }
+
+ public Builder enabled(EnabledAttributeMapper enabledState) {
+ this.enabledState = enabledState;
+ return builder;
+ }
+
+ public Builder path(String path) {
+ this.path = path;
+ return builder;
+ }
+
+ public Builder dateTimeFormatter(FormatDateTimeFormatter dateTimeFormatter) {
+ this.dateTimeFormatter = dateTimeFormatter;
+ return builder;
+ }
+
+ @Override
+ public TimestampFieldMapper build(BuilderContext context) {
+ boolean roundCeil = Defaults.ROUND_CEIL;
+ if (context.indexSettings() != null) {
+ Settings settings = context.indexSettings();
+ roundCeil = settings.getAsBoolean("index.mapping.date.round_ceil", settings.getAsBoolean("index.mapping.date.parse_upper_inclusive", Defaults.ROUND_CEIL));
+ }
+ return new TimestampFieldMapper(fieldType, docValues, enabledState, path, dateTimeFormatter, roundCeil,
+ ignoreMalformed(context), coerce(context), postingsProvider, docValuesProvider, normsLoading, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ TimestampFieldMapper.Builder builder = timestamp();
+ parseField(builder, builder.name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals("enabled")) {
+ EnabledAttributeMapper enabledState = nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED;
+ builder.enabled(enabledState);
+ } else if (fieldName.equals("path")) {
+ builder.path(fieldNode.toString());
+ } else if (fieldName.equals("format")) {
+ builder.dateTimeFormatter(parseDateTimeFormatter(builder.name(), fieldNode.toString()));
+ }
+ }
+ return builder;
+ }
+ }
+
+
+ private EnabledAttributeMapper enabledState;
+
+ private final String path;
+
+ public TimestampFieldMapper() {
+ this(new FieldType(Defaults.FIELD_TYPE), null, Defaults.ENABLED, Defaults.PATH, Defaults.DATE_TIME_FORMATTER,
+ Defaults.ROUND_CEIL, Defaults.IGNORE_MALFORMED, Defaults.COERCE, null, null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ protected TimestampFieldMapper(FieldType fieldType, Boolean docValues, EnabledAttributeMapper enabledState, String path,
+ FormatDateTimeFormatter dateTimeFormatter, boolean roundCeil,
+ Explicit<Boolean> ignoreMalformed,Explicit<Boolean> coerce, PostingsFormatProvider postingsProvider,
+ DocValuesFormatProvider docValuesProvider, Loading normsLoading,
+ @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), dateTimeFormatter,
+ Defaults.PRECISION_STEP, Defaults.BOOST, fieldType, docValues,
+ Defaults.NULL_VALUE, TimeUnit.MILLISECONDS /*always milliseconds*/,
+ roundCeil, ignoreMalformed, coerce, postingsProvider, docValuesProvider, null, normsLoading, fieldDataSettings,
+ indexSettings, MultiFields.empty(), null);
+ this.enabledState = enabledState;
+ this.path = path;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ public boolean enabled() {
+ return this.enabledState.enabled;
+ }
+
+ public String path() {
+ return this.path;
+ }
+
+ public FormatDateTimeFormatter dateTimeFormatter() {
+ return this.dateTimeFormatter;
+ }
+
+ /**
+ * Override the default behavior to return a timestamp
+ */
+ @Override
+ public Object valueForSearch(Object value) {
+ return value(value);
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ super.parse(context);
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ // nothing to do here, we call the parent in preParse
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return true;
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (enabledState.enabled) {
+ long timestamp = context.sourceToParse().timestamp();
+ if (!fieldType.indexed() && !fieldType.stored() && !hasDocValues()) {
+ context.ignoredValue(names.indexName(), String.valueOf(timestamp));
+ }
+ if (fieldType.indexed() || fieldType.stored()) {
+ fields.add(new LongFieldMapper.CustomLongNumericField(this, timestamp, fieldType));
+ }
+ if (hasDocValues()) {
+ fields.add(new NumericDocValuesField(names.indexName(), timestamp));
+ }
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // if all are defaults, no sense to write it at all
+ if (!includeDefaults && fieldType.indexed() == Defaults.FIELD_TYPE.indexed() && customFieldDataSettings == null &&
+ fieldType.stored() == Defaults.FIELD_TYPE.stored() && enabledState == Defaults.ENABLED && path == Defaults.PATH
+ && dateTimeFormatter.format().equals(Defaults.DATE_TIME_FORMATTER.format())) {
+ return builder;
+ }
+ builder.startObject(CONTENT_TYPE);
+ if (includeDefaults || enabledState != Defaults.ENABLED) {
+ builder.field("enabled", enabledState.enabled);
+ }
+ if (enabledState.enabled) {
+ if (includeDefaults || fieldType.indexed() != Defaults.FIELD_TYPE.indexed()) {
+ builder.field("index", indexTokenizeOptionToString(fieldType.indexed(), fieldType.tokenized()));
+ }
+ if (includeDefaults || fieldType.stored() != Defaults.FIELD_TYPE.stored()) {
+ builder.field("store", fieldType.stored());
+ }
+ if (includeDefaults || path != Defaults.PATH) {
+ builder.field("path", path);
+ }
+ if (includeDefaults || !dateTimeFormatter.format().equals(Defaults.DATE_TIME_FORMATTER.format())) {
+ builder.field("format", dateTimeFormatter.format());
+ }
+ if (customFieldDataSettings != null) {
+ builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
+ } else if (includeDefaults) {
+ builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
+ }
+
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith;
+ if (!mergeContext.mergeFlags().simulate()) {
+ if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) {
+ this.enabledState = timestampFieldMapperMergeWith.enabledState;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
new file mode 100644
index 0000000..8fc64a9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.PrefixFilter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.type;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+/**
+ *
+ */
+public class TypeFieldMapper extends AbstractFieldMapper<String> implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_type";
+
+ public static final String CONTENT_TYPE = "_type";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final String NAME = TypeFieldMapper.NAME;
+ public static final String INDEX_NAME = TypeFieldMapper.NAME;
+
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setStored(false);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.freeze();
+ }
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, TypeFieldMapper> {
+
+ public Builder() {
+ super(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE));
+ indexName = Defaults.INDEX_NAME;
+ }
+
+ @Override
+ public TypeFieldMapper build(BuilderContext context) {
+ return new TypeFieldMapper(name, indexName, boost, fieldType, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ TypeFieldMapper.Builder builder = type();
+ parseField(builder, builder.name, node, parserContext);
+ return builder;
+ }
+ }
+
+
+ public TypeFieldMapper() {
+ this(Defaults.NAME, Defaults.INDEX_NAME);
+ }
+
+ protected TypeFieldMapper(String name, String indexName) {
+ this(name, indexName, Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ public TypeFieldMapper(String name, String indexName, float boost, FieldType fieldType, PostingsFormatProvider postingsProvider,
+ DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(name, indexName, indexName, name), boost, fieldType, null, Lucene.KEYWORD_ANALYZER,
+ Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, null, fieldDataSettings, indexSettings);
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("string");
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return false;
+ }
+
+ @Override
+ public String value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ return value.toString();
+ }
+
+ @Override
+ public Query termQuery(Object value, @Nullable QueryParseContext context) {
+ return new XConstantScoreQuery(context.cacheFilter(termFilter(value, context), null));
+ }
+
+ @Override
+ public Filter termFilter(Object value, @Nullable QueryParseContext context) {
+ if (!fieldType.indexed()) {
+ return new PrefixFilter(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value))));
+ }
+ return new TermFilter(names().createIndexNameTerm(BytesRefs.toBytesRef(value)));
+ }
+
+ @Override
+ public boolean useTermQueryWithQueryString() {
+ return true;
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ super.parse(context);
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ // we parse in pre parse
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return false;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ if (!fieldType.indexed() && !fieldType.stored()) {
+ return;
+ }
+ fields.add(new Field(names.indexName(), context.type(), fieldType));
+ if (hasDocValues()) {
+ fields.add(new SortedSetDocValuesField(names.indexName(), new BytesRef(context.type())));
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // if all are defaults, no sense to write it at all
+ if (!includeDefaults && fieldType.stored() == Defaults.FIELD_TYPE.stored() && fieldType.indexed() == Defaults.FIELD_TYPE.indexed()) {
+ return builder;
+ }
+ builder.startObject(CONTENT_TYPE);
+ if (includeDefaults || fieldType.stored() != Defaults.FIELD_TYPE.stored()) {
+ builder.field("store", fieldType.stored());
+ }
+ if (includeDefaults || fieldType.indexed() != Defaults.FIELD_TYPE.indexed()) {
+ builder.field("index", indexTokenizeOptionToString(fieldType.indexed(), fieldType.tokenized()));
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ // do nothing here, no merging, but also no exception
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java
new file mode 100644
index 0000000..29e5b1d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java
@@ -0,0 +1,265 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.BinaryDocValuesField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.uid;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+
+/**
+ *
+ */
+public class UidFieldMapper extends AbstractFieldMapper<Uid> implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_uid";
+
+ public static final String CONTENT_TYPE = "_uid";
+
+ public static class Defaults extends AbstractFieldMapper.Defaults {
+ public static final String NAME = UidFieldMapper.NAME;
+ public static final String INDEX_NAME = UidFieldMapper.NAME;
+
+ public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
+ public static final FieldType NESTED_FIELD_TYPE;
+
+ static {
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setTokenized(false);
+ FIELD_TYPE.setStored(true);
+ FIELD_TYPE.setOmitNorms(true);
+ FIELD_TYPE.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);
+ FIELD_TYPE.freeze();
+
+ NESTED_FIELD_TYPE = new FieldType(FIELD_TYPE);
+ NESTED_FIELD_TYPE.setStored(false);
+ NESTED_FIELD_TYPE.freeze();
+ }
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, UidFieldMapper> {
+
+ public Builder() {
+ super(Defaults.NAME, Defaults.FIELD_TYPE);
+ indexName = Defaults.INDEX_NAME;
+ }
+
+ @Override
+ public UidFieldMapper build(BuilderContext context) {
+ return new UidFieldMapper(name, indexName, docValues, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ Builder builder = uid();
+ parseField(builder, builder.name, node, parserContext);
+ return builder;
+ }
+ }
+
+ public UidFieldMapper() {
+ this(Defaults.NAME);
+ }
+
+ protected UidFieldMapper(String name) {
+ this(name, name, null, null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ protected UidFieldMapper(String name, String indexName, Boolean docValues, PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat, @Nullable Settings fieldDataSettings, Settings indexSettings) {
+ super(new Names(name, indexName, indexName, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), docValues,
+ Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, postingsFormat, docValuesFormat, null, null, fieldDataSettings, indexSettings);
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("string");
+ }
+
+ @Override
+ protected String defaultPostingFormat() {
+ return "default";
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ // if we have the id provided, fill it, and parse now
+ if (context.sourceToParse().id() != null) {
+ context.id(context.sourceToParse().id());
+ super.parse(context);
+ }
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ if (context.id() == null && !context.sourceToParse().flyweight()) {
+ throw new MapperParsingException("No id found while parsing the content source");
+ }
+ // if we did not have the id as part of the sourceToParse, then we need to parse it here
+ // it would have been filled in the _id parse phase
+ if (context.sourceToParse().id() == null) {
+ super.parse(context);
+ // since we did not have the uid in the pre phase, we did not add it automatically to the nested docs
+ // as they were created we need to make sure we add it to all the nested docs...
+ if (context.docs().size() > 1) {
+ final IndexableField uidField = context.rootDoc().getField(UidFieldMapper.NAME);
+ assert uidField != null;
+ // we need to go over the docs and add it...
+ for (int i = 1; i < context.docs().size(); i++) {
+ final Document doc = context.docs().get(i);
+ doc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), Defaults.NESTED_FIELD_TYPE));
+ }
+ }
+ }
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ // nothing to do here, we either do it in post parse, or in pre parse.
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return false;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ Field uid = new Field(NAME, Uid.createUid(context.stringBuilder(), context.type(), context.id()), Defaults.FIELD_TYPE);
+ context.uid(uid);
+ fields.add(uid);
+ if (hasDocValues()) {
+ fields.add(new BinaryDocValuesField(NAME, new BytesRef(uid.stringValue())));
+ }
+ }
+
+ @Override
+ public Uid value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ return Uid.createUid(value.toString());
+ }
+
+ public Term term(String type, String id) {
+ return term(Uid.createUid(type, id));
+ }
+
+ public Term term(String uid) {
+ return names().createIndexNameTerm(uid);
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ // if defaults, don't output
+ if (!includeDefaults && customFieldDataSettings == null
+ && (postingsFormat == null || postingsFormat.name().equals(defaultPostingFormat()))
+ && (docValuesFormat == null || docValuesFormat.name().equals(defaultDocValuesFormat()))) {
+ return builder;
+ }
+
+ builder.startObject(CONTENT_TYPE);
+
+ if (postingsFormat != null) {
+ if (includeDefaults || !postingsFormat.name().equals(defaultPostingFormat())) {
+ builder.field("postings_format", postingsFormat.name());
+ }
+ } else if (includeDefaults) {
+ String format = defaultPostingFormat();
+ if (format == null) {
+ format = PostingsFormatService.DEFAULT_FORMAT;
+ }
+ builder.field("postings_format", format);
+ }
+
+ if (docValuesFormat != null) {
+ if (includeDefaults || !docValuesFormat.name().equals(defaultDocValuesFormat())) {
+ builder.field(DOC_VALUES_FORMAT, docValuesFormat.name());
+ }
+ } else if (includeDefaults) {
+ String format = defaultDocValuesFormat();
+ if (format == null) {
+ format = DocValuesFormatService.DEFAULT_FORMAT;
+ }
+ builder.field(DOC_VALUES_FORMAT, format);
+ }
+
+ if (customFieldDataSettings != null) {
+ builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
+ } else if (includeDefaults) {
+ builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ AbstractFieldMapper<?> fieldMergeWith = (AbstractFieldMapper<?>) mergeWith;
+ // do nothing here, no merging, but also no exception
+ if (!mergeContext.mergeFlags().simulate()) {
+ // apply changeable values
+ if (fieldMergeWith.postingsFormatProvider() != null) {
+ this.postingsFormat = fieldMergeWith.postingsFormatProvider();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java
new file mode 100644
index 0000000..2e794c9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.version;
+
+/** Mapper for the _version field. */
+public class VersionFieldMapper extends AbstractFieldMapper<Long> implements InternalMapper, RootMapper {
+
+ public static final String NAME = "_version";
+ public static final String CONTENT_TYPE = "_version";
+
+ public static class Defaults {
+
+ public static final String NAME = VersionFieldMapper.NAME;
+ public static final float BOOST = 1.0f;
+ public static final FieldType FIELD_TYPE = NumericDocValuesField.TYPE;
+
+ }
+
+ public static class Builder extends Mapper.Builder<Builder, VersionFieldMapper> {
+
+ DocValuesFormatProvider docValuesFormat;
+
+ public Builder() {
+ super(Defaults.NAME);
+ }
+
+ @Override
+ public VersionFieldMapper build(BuilderContext context) {
+ return new VersionFieldMapper(docValuesFormat);
+ }
+
+ public Builder docValuesFormat(DocValuesFormatProvider docValuesFormat) {
+ this.docValuesFormat = docValuesFormat;
+ return this;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ Builder builder = version();
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+ if (fieldName.equals(DOC_VALUES_FORMAT)) {
+ String docValuesFormatName = fieldNode.toString();
+ builder.docValuesFormat(parserContext.docValuesFormatService().get(docValuesFormatName));
+ }
+ }
+ return builder;
+ }
+ }
+
+ private final ThreadLocal<Field> fieldCache = new ThreadLocal<Field>() {
+ @Override
+ protected Field initialValue() {
+ return new NumericDocValuesField(NAME, -1L);
+ }
+ };
+
+ public VersionFieldMapper() {
+ this(null);
+ }
+
+ VersionFieldMapper(DocValuesFormatProvider docValuesFormat) {
+ super(new Names(NAME, NAME, NAME, NAME), Defaults.BOOST, Defaults.FIELD_TYPE, null, null, null, null, docValuesFormat, null, null, null, ImmutableSettings.EMPTY);
+ }
+
+ @Override
+ protected String defaultDocValuesFormat() {
+ return "disk";
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ super.parse(context);
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ // see UidFieldMapper.parseCreateField
+ final Field version = fieldCache.get();
+ context.version(version);
+ fields.add(version);
+ }
+
+ @Override
+ public void parse(ParseContext context) throws IOException {
+ // _version added in preparse
+ }
+
+ @Override
+ public Long value(Object value) {
+ if (value == null || (value instanceof Long)) {
+ return (Long) value;
+ } else {
+ return Long.parseLong(value.toString());
+ }
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ // In the case of nested docs, let's fill nested docs with version=0 so that Lucene doesn't write a Bitset for documents
+ // that don't have the field
+ for (int i = 1; i < context.docs().size(); i++) {
+ final Document doc = context.docs().get(i);
+ doc.add(new NumericDocValuesField(NAME, 0L));
+ }
+ }
+
+ @Override
+ public void validate(ParseContext context) throws MapperParsingException {
+ }
+
+ @Override
+ public boolean includeInObject() {
+ return false;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("long");
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
+
+ if (!includeDefaults && (docValuesFormat == null || docValuesFormat.name().equals(defaultDocValuesFormat()))) {
+ return builder;
+ }
+
+ builder.startObject(CONTENT_TYPE);
+ if (docValuesFormat != null) {
+ if (includeDefaults || !docValuesFormat.name().equals(defaultDocValuesFormat())) {
+ builder.field(DOC_VALUES_FORMAT, docValuesFormat.name());
+ }
+ } else {
+ String format = defaultDocValuesFormat();
+ if (format == null) {
+ format = DocValuesFormatService.DEFAULT_FORMAT;
+ }
+ builder.field(DOC_VALUES_FORMAT, format);
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ if (mergeContext.mergeFlags().simulate()) {
+ return;
+ }
+ AbstractFieldMapper<?> fieldMergeWith = (AbstractFieldMapper<?>) mergeWith;
+ if (fieldMergeWith.docValuesFormatProvider() != null) {
+ this.docValuesFormat = fieldMergeWith.docValuesFormatProvider();
+ }
+ }
+
+ @Override
+ public void close() {
+ fieldCache.remove();
+ }
+
+ @Override
+ public boolean hasDocValues() {
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java
new file mode 100644
index 0000000..4d11c8a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.ip;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Explicit;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.analysis.NumericAnalyzer;
+import org.elasticsearch.index.analysis.NumericTokenizer;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.LongFieldMapper.CustomLongNumericField;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.similarity.SimilarityProvider;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.ipField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
+
+/**
+ *
+ */
+public class IpFieldMapper extends NumberFieldMapper<Long> {
+
+ public static final String CONTENT_TYPE = "ip";
+
+ public static String longToIp(long longIp) {
+ int octet3 = (int) ((longIp >> 24) % 256);
+ int octet2 = (int) ((longIp >> 16) % 256);
+ int octet1 = (int) ((longIp >> 8) % 256);
+ int octet0 = (int) ((longIp) % 256);
+ return octet3 + "." + octet2 + "." + octet1 + "." + octet0;
+ }
+
+ private static final Pattern pattern = Pattern.compile("\\.");
+
+ public static long ipToLong(String ip) throws ElasticsearchIllegalArgumentException {
+ try {
+ String[] octets = pattern.split(ip);
+ if (octets.length != 4) {
+ throw new ElasticsearchIllegalArgumentException("failed to parse ip [" + ip + "], not full ip address (4 dots)");
+ }
+ return (Long.parseLong(octets[0]) << 24) + (Integer.parseInt(octets[1]) << 16) +
+ (Integer.parseInt(octets[2]) << 8) + Integer.parseInt(octets[3]);
+ } catch (Exception e) {
+ if (e instanceof ElasticsearchIllegalArgumentException) {
+ throw (ElasticsearchIllegalArgumentException) e;
+ }
+ throw new ElasticsearchIllegalArgumentException("failed to parse ip [" + ip + "]", e);
+ }
+ }
+
+ public static class Defaults extends NumberFieldMapper.Defaults {
+ public static final String NULL_VALUE = null;
+
+ public static final FieldType FIELD_TYPE = new FieldType(NumberFieldMapper.Defaults.FIELD_TYPE);
+
+ static {
+ FIELD_TYPE.freeze();
+ }
+ }
+
+ public static class Builder extends NumberFieldMapper.Builder<Builder, IpFieldMapper> {
+
+ protected String nullValue = Defaults.NULL_VALUE;
+
+ public Builder(String name) {
+ super(name, new FieldType(Defaults.FIELD_TYPE));
+ builder = this;
+ }
+
+ public Builder nullValue(String nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ @Override
+ public IpFieldMapper build(BuilderContext context) {
+ fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
+ IpFieldMapper fieldMapper = new IpFieldMapper(buildNames(context),
+ precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed(context), coerce(context),
+ postingsProvider, docValuesProvider, similarity,
+ normsLoading, fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ fieldMapper.includeInAll(includeInAll);
+ return fieldMapper;
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ IpFieldMapper.Builder builder = ipField(name);
+ parseNumberField(builder, name, node, parserContext);
+ for (Map.Entry<String, Object> entry : node.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+ if (propName.equals("null_value")) {
+ builder.nullValue(propNode.toString());
+ }
+ }
+ return builder;
+ }
+ }
+
+ private String nullValue;
+
+ protected IpFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,
+ String nullValue, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce,
+ PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
+ SimilarityProvider similarity, Loading normsLoading, @Nullable Settings fieldDataSettings,
+ Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(names, precisionStep, boost, fieldType, docValues,
+ ignoreMalformed, coerce, new NamedAnalyzer("_ip/" + precisionStep, new NumericIpAnalyzer(precisionStep)),
+ new NamedAnalyzer("_ip/max", new NumericIpAnalyzer(Integer.MAX_VALUE)), postingsProvider, docValuesProvider,
+ similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
+ this.nullValue = nullValue;
+ }
+
+ @Override
+ public FieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return new FieldDataType("long");
+ }
+
+ @Override
+ protected int maxPrecisionStep() {
+ return 64;
+ }
+
+ @Override
+ public Long value(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Number) {
+ return ((Number) value).longValue();
+ }
+ if (value instanceof BytesRef) {
+ return Numbers.bytesToLong((BytesRef) value);
+ }
+ return ipToLong(value.toString());
+ }
+
+ /**
+ * IPs should return as a string.
+ */
+ @Override
+ public Object valueForSearch(Object value) {
+ Long val = value(value);
+ if (val == null) {
+ return null;
+ }
+ return longToIp(val);
+ }
+
+ @Override
+ public BytesRef indexedValueForSearch(Object value) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
+ return bytesRef;
+ }
+
+ private long parseValue(Object value) {
+ if (value instanceof Number) {
+ return ((Number) value).longValue();
+ }
+ if (value instanceof BytesRef) {
+ return ipToLong(((BytesRef) value).utf8ToString());
+ }
+ return ipToLong(value.toString());
+ }
+
+ @Override
+ public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
+ long iValue = ipToLong(value);
+ long iSim;
+ try {
+ iSim = ipToLong(fuzziness.asString());
+ } catch (ElasticsearchIllegalArgumentException e) {
+ iSim = fuzziness.asLong();
+ }
+ return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
+ iValue - iSim,
+ iValue + iSim,
+ true, true);
+ }
+
+ @Override
+ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFilter.newLongRange(names.indexName(), precisionStep,
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
+ return NumericRangeFieldDataFilter.newLongRange((IndexNumericFieldData) fieldData.getForField(this),
+ lowerTerm == null ? null : parseValue(lowerTerm),
+ upperTerm == null ? null : parseValue(upperTerm),
+ includeLower, includeUpper);
+ }
+
+ @Override
+ public Filter nullValueFilter() {
+ if (nullValue == null) {
+ return null;
+ }
+ final long value = ipToLong(nullValue);
+ return NumericRangeFilter.newLongRange(names.indexName(), precisionStep,
+ value,
+ value,
+ true, true);
+ }
+
+ @Override
+ protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ String ipAsString;
+ if (context.externalValueSet()) {
+ ipAsString = (String) context.externalValue();
+ if (ipAsString == null) {
+ ipAsString = nullValue;
+ }
+ } else {
+ if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) {
+ ipAsString = nullValue;
+ } else {
+ ipAsString = context.parser().text();
+ }
+ }
+
+ if (ipAsString == null) {
+ return;
+ }
+ if (context.includeInAll(includeInAll, this)) {
+ context.allEntries().addText(names.fullName(), ipAsString, boost);
+ }
+
+ final long value = ipToLong(ipAsString);
+ if (fieldType.indexed() || fieldType.stored()) {
+ CustomLongNumericField field = new CustomLongNumericField(this, value, fieldType);
+ field.setBoost(boost);
+ fields.add(field);
+ }
+ if (hasDocValues()) {
+ addDocValue(context, value);
+ }
+ }
+
+ @Override
+ protected String contentType() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
+ super.merge(mergeWith, mergeContext);
+ if (!this.getClass().equals(mergeWith.getClass())) {
+ return;
+ }
+ if (!mergeContext.mergeFlags().simulate()) {
+ this.nullValue = ((IpFieldMapper) mergeWith).nullValue;
+ }
+ }
+
+ @Override
+ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
+ super.doXContentBody(builder, includeDefaults, params);
+
+ if (includeDefaults || precisionStep != Defaults.PRECISION_STEP) {
+ builder.field("precision_step", precisionStep);
+ }
+ if (includeDefaults || nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ } else if (includeDefaults) {
+ builder.field("include_in_all", false);
+ }
+
+ }
+
+ public static class NumericIpAnalyzer extends NumericAnalyzer<NumericIpTokenizer> {
+
+ private final int precisionStep;
+
+ public NumericIpAnalyzer() {
+ this(NumericUtils.PRECISION_STEP_DEFAULT);
+ }
+
+ public NumericIpAnalyzer(int precisionStep) {
+ this.precisionStep = precisionStep;
+ }
+
+ @Override
+ protected NumericIpTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException {
+ return new NumericIpTokenizer(reader, precisionStep, buffer);
+ }
+ }
+
+ public static class NumericIpTokenizer extends NumericTokenizer {
+
+ public NumericIpTokenizer(Reader reader, int precisionStep, char[] buffer) throws IOException {
+ super(reader, new NumericTokenStream(precisionStep), buffer, null);
+ }
+
+ @Override
+ protected void setValue(NumericTokenStream tokenStream, String value) {
+ tokenStream.setLongValue(ipToLong(value));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/object/ArrayValueMapperParser.java b/src/main/java/org/elasticsearch/index/mapper/object/ArrayValueMapperParser.java
new file mode 100644
index 0000000..2044466
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/object/ArrayValueMapperParser.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+/**
+ * A marker interface indicating that this mapper can handle array value, and the array
+ * itself should be passed to it.
+ *
+ *
+ */
+public interface ArrayValueMapperParser {
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java b/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java
new file mode 100644
index 0000000..6226c81
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.MapperParsingException;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ *
+ */
+public class DynamicTemplate {
+
+ public static enum MatchType {
+ SIMPLE,
+ REGEX;
+
+ public static MatchType fromString(String value) {
+ if ("simple".equals(value)) {
+ return SIMPLE;
+ } else if ("regex".equals(value)) {
+ return REGEX;
+ }
+ throw new ElasticsearchIllegalArgumentException("No matching pattern matched on [" + value + "]");
+ }
+ }
+
+ public static DynamicTemplate parse(String name, Map<String, Object> conf) throws MapperParsingException {
+ String match = null;
+ String pathMatch = null;
+ String unmatch = null;
+ String pathUnmatch = null;
+ Map<String, Object> mapping = null;
+ String matchMappingType = null;
+ String matchPattern = "simple";
+
+ for (Map.Entry<String, Object> entry : conf.entrySet()) {
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ if ("match".equals(propName)) {
+ match = entry.getValue().toString();
+ } else if ("path_match".equals(propName)) {
+ pathMatch = entry.getValue().toString();
+ } else if ("unmatch".equals(propName)) {
+ unmatch = entry.getValue().toString();
+ } else if ("path_unmatch".equals(propName)) {
+ pathUnmatch = entry.getValue().toString();
+ } else if ("match_mapping_type".equals(propName)) {
+ matchMappingType = entry.getValue().toString();
+ } else if ("match_pattern".equals(propName)) {
+ matchPattern = entry.getValue().toString();
+ } else if ("mapping".equals(propName)) {
+ mapping = (Map<String, Object>) entry.getValue();
+ }
+ }
+
+ if (match == null && pathMatch == null && matchMappingType == null) {
+ throw new MapperParsingException("template must have match, path_match or match_mapping_type set");
+ }
+ if (mapping == null) {
+ throw new MapperParsingException("template must have mapping set");
+ }
+ return new DynamicTemplate(name, conf, pathMatch, pathUnmatch, match, unmatch, matchMappingType, MatchType.fromString(matchPattern), mapping);
+ }
+
+ private final String name;
+
+ private final Map<String, Object> conf;
+
+ private final String pathMatch;
+
+ private final String pathUnmatch;
+
+ private final String match;
+
+ private final String unmatch;
+
+ private final MatchType matchType;
+
+ private final String matchMappingType;
+
+ private final Map<String, Object> mapping;
+
+ public DynamicTemplate(String name, Map<String, Object> conf, String pathMatch, String pathUnmatch, String match, String unmatch, String matchMappingType, MatchType matchType, Map<String, Object> mapping) {
+ this.name = name;
+ this.conf = new TreeMap<String, Object>(conf);
+ this.pathMatch = pathMatch;
+ this.pathUnmatch = pathUnmatch;
+ this.match = match;
+ this.unmatch = unmatch;
+ this.matchType = matchType;
+ this.matchMappingType = matchMappingType;
+ this.mapping = mapping;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public Map<String, Object> conf() {
+ return this.conf;
+ }
+
+ public boolean match(ContentPath path, String name, String dynamicType) {
+ if (pathMatch != null && !patternMatch(pathMatch, path.fullPathAsText(name))) {
+ return false;
+ }
+ if (match != null && !patternMatch(match, name)) {
+ return false;
+ }
+ if (pathUnmatch != null && patternMatch(pathUnmatch, path.fullPathAsText(name))) {
+ return false;
+ }
+ if (unmatch != null && patternMatch(unmatch, name)) {
+ return false;
+ }
+ if (matchMappingType != null) {
+ if (dynamicType == null) {
+ return false;
+ }
+ if (!patternMatch(matchMappingType, dynamicType)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public boolean hasType() {
+ return mapping.containsKey("type");
+ }
+
+ public String mappingType(String dynamicType) {
+ return mapping.containsKey("type") ? mapping.get("type").toString() : dynamicType;
+ }
+
+ private boolean patternMatch(String pattern, String str) {
+ if (matchType == MatchType.SIMPLE) {
+ return Regex.simpleMatch(pattern, str);
+ }
+ return str.matches(pattern);
+ }
+
+ public Map<String, Object> mappingForName(String name, String dynamicType) {
+ return processMap(mapping, name, dynamicType);
+ }
+
+ private Map<String, Object> processMap(Map<String, Object> map, String name, String dynamicType) {
+ Map<String, Object> processedMap = Maps.newHashMap();
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ String key = entry.getKey().replace("{name}", name).replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType);
+ Object value = entry.getValue();
+ if (value instanceof Map) {
+ value = processMap((Map<String, Object>) value, name, dynamicType);
+ } else if (value instanceof List) {
+ value = processList((List) value, name, dynamicType);
+ } else if (value instanceof String) {
+ value = value.toString().replace("{name}", name).replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType);
+ }
+ processedMap.put(key, value);
+ }
+ return processedMap;
+ }
+
+ private List processList(List list, String name, String dynamicType) {
+ List processedList = new ArrayList();
+ for (Object value : list) {
+ if (value instanceof Map) {
+ value = processMap((Map<String, Object>) value, name, dynamicType);
+ } else if (value instanceof List) {
+ value = processList((List) value, name, dynamicType);
+ } else if (value instanceof String) {
+ value = value.toString().replace("{name}", name).replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType);
+ }
+ processedList.add(value);
+ }
+ return processedList;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ DynamicTemplate that = (DynamicTemplate) o;
+
+ // check if same matching, if so, replace the mapping
+ if (match != null ? !match.equals(that.match) : that.match != null) {
+ return false;
+ }
+ if (matchMappingType != null ? !matchMappingType.equals(that.matchMappingType) : that.matchMappingType != null) {
+ return false;
+ }
+ if (matchType != that.matchType) {
+ return false;
+ }
+ if (unmatch != null ? !unmatch.equals(that.unmatch) : that.unmatch != null) {
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ // check if same matching, if so, replace the mapping
+ int result = match != null ? match.hashCode() : 0;
+ result = 31 * result + (unmatch != null ? unmatch.hashCode() : 0);
+ result = 31 * result + (matchType != null ? matchType.hashCode() : 0);
+ result = 31 * result + (matchMappingType != null ? matchMappingType.hashCode() : 0);
+ return result;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java
new file mode 100644
index 0000000..f436d56
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java
@@ -0,0 +1,964 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+import java.util.*;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.index.mapper.MapperBuilders.*;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType;
+
+/**
+ *
+ */
+public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll {
+
+ public static final String CONTENT_TYPE = "object";
+ public static final String NESTED_CONTENT_TYPE = "nested";
+
+ public static class Defaults {
+ public static final boolean ENABLED = true;
+ public static final Nested NESTED = Nested.NO;
+ public static final Dynamic DYNAMIC = null; // not set, inherited from root
+ public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
+ }
+
+ public static enum Dynamic {
+ TRUE,
+ FALSE,
+ STRICT
+ }
+
+ public static class Nested {
+
+ public static final Nested NO = new Nested(false, false, false);
+
+ public static Nested newNested(boolean includeInParent, boolean includeInRoot) {
+ return new Nested(true, includeInParent, includeInRoot);
+ }
+
+ private final boolean nested;
+
+ private final boolean includeInParent;
+
+ private final boolean includeInRoot;
+
+ private Nested(boolean nested, boolean includeInParent, boolean includeInRoot) {
+ this.nested = nested;
+ this.includeInParent = includeInParent;
+ this.includeInRoot = includeInRoot;
+ }
+
+ public boolean isNested() {
+ return nested;
+ }
+
+ public boolean isIncludeInParent() {
+ return includeInParent;
+ }
+
+ public boolean isIncludeInRoot() {
+ return includeInRoot;
+ }
+ }
+
+ public static class Builder<T extends Builder, Y extends ObjectMapper> extends Mapper.Builder<T, Y> {
+
+ protected boolean enabled = Defaults.ENABLED;
+
+ protected Nested nested = Defaults.NESTED;
+
+ protected Dynamic dynamic = Defaults.DYNAMIC;
+
+ protected ContentPath.Type pathType = Defaults.PATH_TYPE;
+
+ protected Boolean includeInAll;
+
+ protected final List<Mapper.Builder> mappersBuilders = newArrayList();
+
+ public Builder(String name) {
+ super(name);
+ this.builder = (T) this;
+ }
+
+ public T enabled(boolean enabled) {
+ this.enabled = enabled;
+ return builder;
+ }
+
+ public T dynamic(Dynamic dynamic) {
+ this.dynamic = dynamic;
+ return builder;
+ }
+
+ public T nested(Nested nested) {
+ this.nested = nested;
+ return builder;
+ }
+
+ public T pathType(ContentPath.Type pathType) {
+ this.pathType = pathType;
+ return builder;
+ }
+
+ public T includeInAll(boolean includeInAll) {
+ this.includeInAll = includeInAll;
+ return builder;
+ }
+
+ public T add(Mapper.Builder builder) {
+ mappersBuilders.add(builder);
+ return this.builder;
+ }
+
+ @Override
+ public Y build(BuilderContext context) {
+ ContentPath.Type origPathType = context.path().pathType();
+ context.path().pathType(pathType);
+ context.path().add(name);
+
+ Map<String, Mapper> mappers = new HashMap<String, Mapper>();
+ for (Mapper.Builder builder : mappersBuilders) {
+ Mapper mapper = builder.build(context);
+ mappers.put(mapper.name(), mapper);
+ }
+ context.path().pathType(origPathType);
+ context.path().remove();
+
+ ObjectMapper objectMapper = createMapper(name, context.path().fullPathAsText(name), enabled, nested, dynamic, pathType, mappers);
+ objectMapper.includeInAllIfNotSet(includeInAll);
+
+ return (Y) objectMapper;
+ }
+
+ protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers) {
+ return new ObjectMapper(name, fullPath, enabled, nested, dynamic, pathType, mappers);
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ Map<String, Object> objectNode = node;
+ ObjectMapper.Builder builder = createBuilder(name);
+
+ boolean nested = false;
+ boolean nestedIncludeInParent = false;
+ boolean nestedIncludeInRoot = false;
+ for (Map.Entry<String, Object> entry : objectNode.entrySet()) {
+ String fieldName = Strings.toUnderscoreCase(entry.getKey());
+ Object fieldNode = entry.getValue();
+
+ if (fieldName.equals("dynamic")) {
+ String value = fieldNode.toString();
+ if (value.equalsIgnoreCase("strict")) {
+ builder.dynamic(Dynamic.STRICT);
+ } else {
+ builder.dynamic(nodeBooleanValue(fieldNode) ? Dynamic.TRUE : Dynamic.FALSE);
+ }
+ } else if (fieldName.equals("type")) {
+ String type = fieldNode.toString();
+ if (type.equals(CONTENT_TYPE)) {
+ builder.nested = Nested.NO;
+ } else if (type.equals(NESTED_CONTENT_TYPE)) {
+ nested = true;
+ } else {
+ throw new MapperParsingException("Trying to parse an object but has a different type [" + type + "] for [" + name + "]");
+ }
+ } else if (fieldName.equals("include_in_parent")) {
+ nestedIncludeInParent = nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("include_in_root")) {
+ nestedIncludeInRoot = nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("enabled")) {
+ builder.enabled(nodeBooleanValue(fieldNode));
+ } else if (fieldName.equals("path")) {
+ builder.pathType(parsePathType(name, fieldNode.toString()));
+ } else if (fieldName.equals("properties")) {
+ parseProperties(builder, (Map<String, Object>) fieldNode, parserContext);
+ } else if (fieldName.equals("include_in_all")) {
+ builder.includeInAll(nodeBooleanValue(fieldNode));
+ } else {
+ processField(builder, fieldName, fieldNode);
+ }
+ }
+
+ if (nested) {
+ builder.nested = Nested.newNested(nestedIncludeInParent, nestedIncludeInRoot);
+ }
+
+ return builder;
+ }
+
+ private void parseProperties(ObjectMapper.Builder objBuilder, Map<String, Object> propsNode, ParserContext parserContext) {
+ for (Map.Entry<String, Object> entry : propsNode.entrySet()) {
+ String propName = entry.getKey();
+ Map<String, Object> propNode = (Map<String, Object>) entry.getValue();
+
+ String type;
+ Object typeNode = propNode.get("type");
+ if (typeNode != null) {
+ type = typeNode.toString();
+ } else {
+ // lets see if we can derive this...
+ if (propNode.get("properties") != null) {
+ type = ObjectMapper.CONTENT_TYPE;
+ } else if (propNode.size() == 1 && propNode.get("enabled") != null) {
+ // if there is a single property with the enabled flag on it, make it an object
+ // (usually, setting enabled to false to not index any type, including core values, which
+ // non enabled object type supports).
+ type = ObjectMapper.CONTENT_TYPE;
+ } else {
+ throw new MapperParsingException("No type specified for property [" + propName + "]");
+ }
+ }
+
+ Mapper.TypeParser typeParser = parserContext.typeParser(type);
+ if (typeParser == null) {
+ throw new MapperParsingException("No handler for type [" + type + "] declared on field [" + propName + "]");
+ }
+ objBuilder.add(typeParser.parse(propName, propNode, parserContext));
+ }
+ }
+
+ protected Builder createBuilder(String name) {
+ return object(name);
+ }
+
+ protected void processField(Builder builder, String fieldName, Object fieldNode) {
+
+ }
+ }
+
+ private final String name;
+
+ private final String fullPath;
+
+ private final boolean enabled;
+
+ private final Nested nested;
+
+ private final String nestedTypePathAsString;
+ private final BytesRef nestedTypePathAsBytes;
+
+ private final Filter nestedTypeFilter;
+
+ private volatile Dynamic dynamic;
+
+ private final ContentPath.Type pathType;
+
+ private Boolean includeInAll;
+
+ private volatile ImmutableOpenMap<String, Mapper> mappers = ImmutableOpenMap.of();
+
+ private final Object mutex = new Object();
+
+ ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers) {
+ this.name = name;
+ this.fullPath = fullPath;
+ this.enabled = enabled;
+ this.nested = nested;
+ this.dynamic = dynamic;
+ this.pathType = pathType;
+ if (mappers != null) {
+ this.mappers = ImmutableOpenMap.builder(this.mappers).putAll(mappers).build();
+ }
+ this.nestedTypePathAsString = "__" + fullPath;
+ this.nestedTypePathAsBytes = new BytesRef(nestedTypePathAsString);
+ this.nestedTypeFilter = new TermFilter(new Term(TypeFieldMapper.NAME, nestedTypePathAsBytes));
+ }
+
+ @Override
+ public String name() {
+ return this.name;
+ }
+
+ @Override
+ public void includeInAll(Boolean includeInAll) {
+ if (includeInAll == null) {
+ return;
+ }
+ this.includeInAll = includeInAll;
+ // when called from outside, apply this on all the inner mappers
+ for (ObjectObjectCursor<String, Mapper> cursor : mappers) {
+ if (cursor.value instanceof AllFieldMapper.IncludeInAll) {
+ ((AllFieldMapper.IncludeInAll) cursor.value).includeInAll(includeInAll);
+ }
+ }
+ }
+
+ @Override
+ public void includeInAllIfNotSet(Boolean includeInAll) {
+ if (this.includeInAll == null) {
+ this.includeInAll = includeInAll;
+ }
+ // when called from outside, apply this on all the inner mappers
+ for (ObjectObjectCursor<String, Mapper> cursor : mappers) {
+ if (cursor.value instanceof AllFieldMapper.IncludeInAll) {
+ ((AllFieldMapper.IncludeInAll) cursor.value).includeInAllIfNotSet(includeInAll);
+ }
+ }
+ }
+
+ @Override
+ public void unsetIncludeInAll() {
+ includeInAll = null;
+ // when called from outside, apply this on all the inner mappers
+ for (ObjectObjectCursor<String, Mapper> cursor : mappers) {
+ if (cursor.value instanceof AllFieldMapper.IncludeInAll) {
+ ((AllFieldMapper.IncludeInAll) cursor.value).unsetIncludeInAll();
+ }
+ }
+ }
+
+ public Nested nested() {
+ return this.nested;
+ }
+
+ public Filter nestedTypeFilter() {
+ return this.nestedTypeFilter;
+ }
+
+ public ObjectMapper putMapper(Mapper mapper) {
+ if (mapper instanceof AllFieldMapper.IncludeInAll) {
+ ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll);
+ }
+ synchronized (mutex) {
+ this.mappers = ImmutableOpenMap.builder(this.mappers).fPut(mapper.name(), mapper).build();
+ }
+ return this;
+ }
+
+ @Override
+ public void traverse(FieldMapperListener fieldMapperListener) {
+ for (ObjectObjectCursor<String, Mapper> cursor : mappers) {
+ cursor.value.traverse(fieldMapperListener);
+ }
+ }
+
+ @Override
+ public void traverse(ObjectMapperListener objectMapperListener) {
+ objectMapperListener.objectMapper(this);
+ for (ObjectObjectCursor<String, Mapper> cursor : mappers) {
+ cursor.value.traverse(objectMapperListener);
+ }
+ }
+
+ public String fullPath() {
+ return this.fullPath;
+ }
+
+ public BytesRef nestedTypePathAsBytes() {
+ return nestedTypePathAsBytes;
+ }
+
+ public String nestedTypePathAsString() {
+ return nestedTypePathAsString;
+ }
+
+ public final Dynamic dynamic() {
+ return this.dynamic == null ? Dynamic.TRUE : this.dynamic;
+ }
+
+ protected boolean allowValue() {
+ return true;
+ }
+
+ public void parse(ParseContext context) throws IOException {
+ if (!enabled) {
+ context.parser().skipChildren();
+ return;
+ }
+ XContentParser parser = context.parser();
+
+ String currentFieldName = parser.currentName();
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.VALUE_NULL) {
+ // the object is null ("obj1" : null), simply bail
+ return;
+ }
+
+ if (token.isValue() && !allowValue()) {
+ // if we are parsing an object but it is just a value, its only allowed on root level parsers with there
+ // is a field name with the same name as the type
+ throw new MapperParsingException("object mapping for [" + name + "] tried to parse as object, but found a concrete value");
+ }
+
+ Document restoreDoc = null;
+ if (nested.isNested()) {
+ Document nestedDoc = new Document();
+ // pre add the uid field if possible (id was already provided)
+ IndexableField uidField = context.doc().getField(UidFieldMapper.NAME);
+ if (uidField != null) {
+ // we don't need to add it as a full uid field in nested docs, since we don't need versioning
+ // we also rely on this for UidField#loadVersion
+
+ // this is a deeply nested field
+ nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
+ }
+ // the type of the nested doc starts with __, so we can identify that its a nested one in filters
+ // note, we don't prefix it with the type of the doc since it allows us to execute a nested query
+ // across types (for example, with similar nested objects)
+ nestedDoc.add(new Field(TypeFieldMapper.NAME, nestedTypePathAsString, TypeFieldMapper.Defaults.FIELD_TYPE));
+ restoreDoc = context.switchDoc(nestedDoc);
+ context.addDoc(nestedDoc);
+ }
+
+ ContentPath.Type origPathType = context.path().pathType();
+ context.path().pathType(pathType);
+
+ // if we are at the end of the previous object, advance
+ if (token == XContentParser.Token.END_OBJECT) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_OBJECT) {
+ // if we are just starting an OBJECT, advance, this is the object we are parsing, we need the name first
+ token = parser.nextToken();
+ }
+
+ while (token != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.START_OBJECT) {
+ serializeObject(context, currentFieldName);
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ serializeArray(context, currentFieldName);
+ } else if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_NULL) {
+ serializeNullValue(context, currentFieldName);
+ } else if (token == null) {
+ throw new MapperParsingException("object mapping for [" + name + "] tried to parse as object, but got EOF, has a concrete value been provided to it?");
+ } else if (token.isValue()) {
+ serializeValue(context, currentFieldName, token);
+ }
+ token = parser.nextToken();
+ }
+ // restore the enable path flag
+ context.path().pathType(origPathType);
+ if (nested.isNested()) {
+ Document nestedDoc = context.switchDoc(restoreDoc);
+ if (nested.isIncludeInParent()) {
+ for (IndexableField field : nestedDoc.getFields()) {
+ if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) {
+ continue;
+ } else {
+ context.doc().add(field);
+ }
+ }
+ }
+ if (nested.isIncludeInRoot()) {
+ // don't add it twice, if its included in parent, and we are handling the master doc...
+ if (!(nested.isIncludeInParent() && context.doc() == context.rootDoc())) {
+ for (IndexableField field : nestedDoc.getFields()) {
+ if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) {
+ continue;
+ } else {
+ context.rootDoc().add(field);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ private void serializeNullValue(ParseContext context, String lastFieldName) throws IOException {
+ // we can only handle null values if we have mappings for them
+ Mapper mapper = mappers.get(lastFieldName);
+ if (mapper != null) {
+ mapper.parse(context);
+ }
+ }
+
+ private void serializeObject(final ParseContext context, String currentFieldName) throws IOException {
+ if (currentFieldName == null) {
+ throw new MapperParsingException("object mapping [" + name + "] trying to serialize an object with no field associated with it, current value [" + context.parser().textOrNull() + "]");
+ }
+ context.path().add(currentFieldName);
+
+ Mapper objectMapper = mappers.get(currentFieldName);
+ if (objectMapper != null) {
+ objectMapper.parse(context);
+ } else {
+ Dynamic dynamic = this.dynamic;
+ if (dynamic == null) {
+ dynamic = context.root().dynamic();
+ }
+ if (dynamic == Dynamic.STRICT) {
+ throw new StrictDynamicMappingException(fullPath, currentFieldName);
+ } else if (dynamic == Dynamic.TRUE) {
+ // we sync here just so we won't add it twice. Its not the end of the world
+ // to sync here since next operations will get it before
+ synchronized (mutex) {
+ objectMapper = mappers.get(currentFieldName);
+ if (objectMapper == null) {
+ // remove the current field name from path, since template search and the object builder add it as well...
+ context.path().remove();
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object");
+ if (builder == null) {
+ builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(pathType);
+ // if this is a non root object, then explicitly set the dynamic behavior if set
+ if (!(this instanceof RootObjectMapper) && this.dynamic != Defaults.DYNAMIC) {
+ ((Builder) builder).dynamic(this.dynamic);
+ }
+ }
+ BuilderContext builderContext = new BuilderContext(context.indexSettings(), context.path());
+ objectMapper = builder.build(builderContext);
+ // ...now re add it
+ context.path().add(currentFieldName);
+ context.setMappingsModified();
+
+ if (context.isWithinNewMapper()) {
+ // within a new mapper, no need to traverse, just parse
+ objectMapper.parse(context);
+ } else {
+ // create a context of new mapper, so we batch aggregate all the changes within
+ // this object mapper once, and traverse all of them to add them in a single go
+ context.setWithinNewMapper();
+ try {
+ objectMapper.parse(context);
+ FieldMapperListener.Aggregator newFields = new FieldMapperListener.Aggregator();
+ ObjectMapperListener.Aggregator newObjects = new ObjectMapperListener.Aggregator();
+ objectMapper.traverse(newFields);
+ objectMapper.traverse(newObjects);
+ // callback on adding those fields!
+ context.docMapper().addFieldMappers(newFields.mappers);
+ context.docMapper().addObjectMappers(newObjects.mappers);
+ } finally {
+ context.clearWithinNewMapper();
+ }
+ }
+
+ // only put after we traversed and did the callbacks, so other parsing won't see it only after we
+ // properly traversed it and adding the mappers
+ putMapper(objectMapper);
+ } else {
+ objectMapper.parse(context);
+ }
+ }
+ } else {
+ // not dynamic, read everything up to end object
+ context.parser().skipChildren();
+ }
+ }
+
+ context.path().remove();
+ }
+
+ private void serializeArray(ParseContext context, String lastFieldName) throws IOException {
+ String arrayFieldName = lastFieldName;
+ Mapper mapper = mappers.get(lastFieldName);
+ if (mapper != null && mapper instanceof ArrayValueMapperParser) {
+ mapper.parse(context);
+ } else {
+ XContentParser parser = context.parser();
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.START_OBJECT) {
+ serializeObject(context, lastFieldName);
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ serializeArray(context, lastFieldName);
+ } else if (token == XContentParser.Token.FIELD_NAME) {
+ lastFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_NULL) {
+ serializeNullValue(context, lastFieldName);
+ } else if (token == null) {
+ throw new MapperParsingException("object mapping for [" + name + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?");
+ } else {
+ serializeValue(context, lastFieldName, token);
+ }
+ }
+ }
+ }
+
+ private void serializeValue(final ParseContext context, String currentFieldName, XContentParser.Token token) throws IOException {
+ if (currentFieldName == null) {
+ throw new MapperParsingException("object mapping [" + name + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]");
+ }
+ Mapper mapper = mappers.get(currentFieldName);
+ if (mapper != null) {
+ mapper.parse(context);
+ } else {
+ parseDynamicValue(context, currentFieldName, token);
+ }
+ }
+
+ public void parseDynamicValue(final ParseContext context, String currentFieldName, XContentParser.Token token) throws IOException {
+ Dynamic dynamic = this.dynamic;
+ if (dynamic == null) {
+ dynamic = context.root().dynamic();
+ }
+ if (dynamic == Dynamic.STRICT) {
+ throw new StrictDynamicMappingException(fullPath, currentFieldName);
+ }
+ if (dynamic == Dynamic.FALSE) {
+ return;
+ }
+ // we sync here since we don't want to add this field twice to the document mapper
+ // its not the end of the world, since we add it to the mappers once we create it
+ // so next time we won't even get here for this field
+ synchronized (mutex) {
+ Mapper mapper = mappers.get(currentFieldName);
+ if (mapper == null) {
+ BuilderContext builderContext = new BuilderContext(context.indexSettings(), context.path());
+ if (token == XContentParser.Token.VALUE_STRING) {
+ boolean resolved = false;
+
+ // do a quick test to see if its fits a dynamic template, if so, use it.
+ // we need to do it here so we can handle things like attachment templates, where calling
+ // text (to see if its a date) causes the binary value to be cleared
+ if (!resolved) {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string", null);
+ if (builder != null) {
+ mapper = builder.build(builderContext);
+ resolved = true;
+ }
+ }
+
+ if (!resolved && context.parser().textLength() == 0) {
+ // empty string with no mapping, treat it like null value
+ return;
+ }
+
+ if (!resolved && context.root().dateDetection()) {
+ String text = context.parser().text();
+ // a safe check since "1" gets parsed as well
+ if (Strings.countOccurrencesOf(text, ":") > 1 || Strings.countOccurrencesOf(text, "-") > 1 || Strings.countOccurrencesOf(text, "/") > 1) {
+ for (FormatDateTimeFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) {
+ try {
+ dateTimeFormatter.parser().parseMillis(text);
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date");
+ if (builder == null) {
+ builder = dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter);
+ }
+ mapper = builder.build(builderContext);
+ resolved = true;
+ break;
+ } catch (Exception e) {
+ // failure to parse this, continue
+ }
+ }
+ }
+ }
+ if (!resolved && context.root().numericDetection()) {
+ String text = context.parser().text();
+ try {
+ Long.parseLong(text);
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
+ if (builder == null) {
+ builder = longField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ resolved = true;
+ } catch (Exception e) {
+ // not a long number
+ }
+ if (!resolved) {
+ try {
+ Double.parseDouble(text);
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
+ if (builder == null) {
+ builder = doubleField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ resolved = true;
+ } catch (Exception e) {
+ // not a long number
+ }
+ }
+ }
+ // DON'T do automatic ip detection logic, since it messes up with docs that have hosts and ips
+ // check if its an ip
+// if (!resolved && text.indexOf('.') != -1) {
+// try {
+// IpFieldMapper.ipToLong(text);
+// XContentMapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "ip");
+// if (builder == null) {
+// builder = ipField(currentFieldName);
+// }
+// mapper = builder.build(builderContext);
+// resolved = true;
+// } catch (Exception e) {
+// // failure to parse, not ip...
+// }
+// }
+ if (!resolved) {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
+ if (builder == null) {
+ builder = stringField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ }
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ XContentParser.NumberType numberType = context.parser().numberType();
+ if (numberType == XContentParser.NumberType.INT) {
+ if (context.parser().estimatedNumberType()) {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
+ if (builder == null) {
+ builder = longField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ } else {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "integer");
+ if (builder == null) {
+ builder = integerField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ }
+ } else if (numberType == XContentParser.NumberType.LONG) {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
+ if (builder == null) {
+ builder = longField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ } else if (numberType == XContentParser.NumberType.FLOAT) {
+ if (context.parser().estimatedNumberType()) {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
+ if (builder == null) {
+ builder = doubleField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ } else {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "float");
+ if (builder == null) {
+ builder = floatField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ }
+ } else if (numberType == XContentParser.NumberType.DOUBLE) {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
+ if (builder == null) {
+ builder = doubleField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ }
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean");
+ if (builder == null) {
+ builder = booleanField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary");
+ if (builder == null) {
+ builder = binaryField(currentFieldName);
+ }
+ mapper = builder.build(builderContext);
+ } else {
+ Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, null);
+ if (builder != null) {
+ mapper = builder.build(builderContext);
+ } else {
+ // TODO how do we identify dynamically that its a binary value?
+ throw new ElasticsearchIllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]");
+ }
+ }
+
+ if (context.isWithinNewMapper()) {
+ mapper.parse(context);
+ } else {
+ context.setWithinNewMapper();
+ try {
+ mapper.parse(context);
+ FieldMapperListener.Aggregator newFields = new FieldMapperListener.Aggregator();
+ mapper.traverse(newFields);
+ context.docMapper().addFieldMappers(newFields.mappers);
+ } finally {
+ context.clearWithinNewMapper();
+ }
+ }
+
+ // only put after we traversed and did the callbacks, so other parsing won't see it only after we
+ // properly traversed it and adding the mappers
+ putMapper(mapper);
+ context.setMappingsModified();
+ } else {
+ mapper.parse(context);
+ }
+ }
+ }
+
+ @Override
+ public void merge(final Mapper mergeWith, final MergeContext mergeContext) throws MergeMappingException {
+ if (!(mergeWith instanceof ObjectMapper)) {
+ mergeContext.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
+ return;
+ }
+ ObjectMapper mergeWithObject = (ObjectMapper) mergeWith;
+
+ if (nested().isNested()) {
+ if (!mergeWithObject.nested().isNested()) {
+ mergeContext.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested");
+ return;
+ }
+ } else {
+ if (mergeWithObject.nested().isNested()) {
+ mergeContext.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested");
+ return;
+ }
+ }
+
+ if (!mergeContext.mergeFlags().simulate()) {
+ if (mergeWithObject.dynamic != null) {
+ this.dynamic = mergeWithObject.dynamic;
+ }
+ }
+
+ doMerge(mergeWithObject, mergeContext);
+
+ List<Mapper> mappersToPut = new ArrayList<Mapper>();
+ FieldMapperListener.Aggregator newFieldMappers = new FieldMapperListener.Aggregator();
+ ObjectMapperListener.Aggregator newObjectMappers = new ObjectMapperListener.Aggregator();
+ synchronized (mutex) {
+ for (ObjectObjectCursor<String, Mapper> cursor : mergeWithObject.mappers) {
+ Mapper mergeWithMapper = cursor.value;
+ Mapper mergeIntoMapper = mappers.get(mergeWithMapper.name());
+ if (mergeIntoMapper == null) {
+ // no mapping, simply add it if not simulating
+ if (!mergeContext.mergeFlags().simulate()) {
+ mappersToPut.add(mergeWithMapper);
+ mergeWithMapper.traverse(newFieldMappers);
+ mergeWithMapper.traverse(newObjectMappers);
+ }
+ } else {
+ mergeIntoMapper.merge(mergeWithMapper, mergeContext);
+ }
+ }
+ if (!newFieldMappers.mappers.isEmpty()) {
+ mergeContext.docMapper().addFieldMappers(newFieldMappers.mappers);
+ }
+ if (!newObjectMappers.mappers.isEmpty()) {
+ mergeContext.docMapper().addObjectMappers(newObjectMappers.mappers);
+ }
+ // and the mappers only after the administration have been done, so it will not be visible to parser (which first try to read with no lock)
+ for (Mapper mapper : mappersToPut) {
+ putMapper(mapper);
+ }
+ }
+
+ }
+
+ protected void doMerge(ObjectMapper mergeWith, MergeContext mergeContext) {
+
+ }
+
+ @Override
+ public void close() {
+ for (ObjectObjectCursor<String, Mapper> cursor : mappers) {
+ cursor.value.close();
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ toXContent(builder, params, null, Mapper.EMPTY_ARRAY);
+ return builder;
+ }
+
+ public void toXContent(XContentBuilder builder, Params params, ToXContent custom, Mapper... additionalMappers) throws IOException {
+ builder.startObject(name);
+ if (nested.isNested()) {
+ builder.field("type", NESTED_CONTENT_TYPE);
+ if (nested.isIncludeInParent()) {
+ builder.field("include_in_parent", true);
+ }
+ if (nested.isIncludeInRoot()) {
+ builder.field("include_in_root", true);
+ }
+ } else if (mappers.isEmpty()) { // only write the object content type if there are no properties, otherwise, it is automatically detected
+ builder.field("type", CONTENT_TYPE);
+ }
+ if (dynamic != null) {
+ builder.field("dynamic", dynamic.name().toLowerCase(Locale.ROOT));
+ }
+ if (enabled != Defaults.ENABLED) {
+ builder.field("enabled", enabled);
+ }
+ if (pathType != Defaults.PATH_TYPE) {
+ builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
+ }
+ if (includeInAll != null) {
+ builder.field("include_in_all", includeInAll);
+ }
+
+ if (custom != null) {
+ custom.toXContent(builder, params);
+ }
+
+ doXContent(builder, params);
+
+ // sort the mappers so we get consistent serialization format
+ TreeMap<String, Mapper> sortedMappers = new TreeMap<String, Mapper>();
+ for (ObjectObjectCursor<String, Mapper> cursor : mappers) {
+ sortedMappers.put(cursor.key, cursor.value);
+ }
+
+ // check internal mappers first (this is only relevant for root object)
+ for (Mapper mapper : sortedMappers.values()) {
+ if (mapper instanceof InternalMapper) {
+ mapper.toXContent(builder, params);
+ }
+ }
+ if (additionalMappers != null && additionalMappers.length > 0) {
+ TreeMap<String, Mapper> additionalSortedMappers = new TreeMap<String, Mapper>();
+ for (Mapper mapper : additionalMappers) {
+ additionalSortedMappers.put(mapper.name(), mapper);
+ }
+
+ for (Mapper mapper : additionalSortedMappers.values()) {
+ mapper.toXContent(builder, params);
+ }
+ }
+
+ if (!mappers.isEmpty()) {
+ builder.startObject("properties");
+ for (Mapper mapper : sortedMappers.values()) {
+ if (!(mapper instanceof InternalMapper)) {
+ mapper.toXContent(builder, params);
+ }
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java
new file mode 100644
index 0000000..b8f593a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseDateTimeFormatter;
+
+/**
+ *
+ */
+public class RootObjectMapper extends ObjectMapper {
+
+ public static class Defaults {
+ public static final FormatDateTimeFormatter[] DYNAMIC_DATE_TIME_FORMATTERS =
+ new FormatDateTimeFormatter[]{
+ DateFieldMapper.Defaults.DATE_TIME_FORMATTER,
+ Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd")
+ };
+ public static final boolean DATE_DETECTION = true;
+ public static final boolean NUMERIC_DETECTION = false;
+ }
+
+ public static class Builder extends ObjectMapper.Builder<Builder, RootObjectMapper> {
+
+ protected final List<DynamicTemplate> dynamicTemplates = newArrayList();
+
+ // we use this to filter out seen date formats, because we might get duplicates during merging
+ protected Set<String> seenDateFormats = Sets.newHashSet();
+ protected List<FormatDateTimeFormatter> dynamicDateTimeFormatters = newArrayList();
+
+ protected boolean dateDetection = Defaults.DATE_DETECTION;
+ protected boolean numericDetection = Defaults.NUMERIC_DETECTION;
+
+ public Builder(String name) {
+ super(name);
+ this.builder = this;
+ }
+
+ public Builder noDynamicDateTimeFormatter() {
+ this.dynamicDateTimeFormatters = null;
+ return builder;
+ }
+
+ public Builder dynamicDateTimeFormatter(Iterable<FormatDateTimeFormatter> dateTimeFormatters) {
+ for (FormatDateTimeFormatter dateTimeFormatter : dateTimeFormatters) {
+ if (!seenDateFormats.contains(dateTimeFormatter.format())) {
+ seenDateFormats.add(dateTimeFormatter.format());
+ this.dynamicDateTimeFormatters.add(dateTimeFormatter);
+ }
+ }
+ return builder;
+ }
+
+ public Builder add(DynamicTemplate dynamicTemplate) {
+ this.dynamicTemplates.add(dynamicTemplate);
+ return this;
+ }
+
+ public Builder add(DynamicTemplate... dynamicTemplate) {
+ for (DynamicTemplate template : dynamicTemplate) {
+ this.dynamicTemplates.add(template);
+ }
+ return this;
+ }
+
+
+ @Override
+ protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers) {
+ assert !nested.isNested();
+ FormatDateTimeFormatter[] dates = null;
+ if (dynamicDateTimeFormatters == null) {
+ dates = new FormatDateTimeFormatter[0];
+ } else if (dynamicDateTimeFormatters.isEmpty()) {
+ // add the default one
+ dates = Defaults.DYNAMIC_DATE_TIME_FORMATTERS;
+ } else {
+ dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]);
+ }
+ return new RootObjectMapper(name, enabled, dynamic, pathType, mappers,
+ dates,
+ dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]),
+ dateDetection, numericDetection);
+ }
+ }
+
+ public static class TypeParser extends ObjectMapper.TypeParser {
+
+ @Override
+ protected ObjectMapper.Builder createBuilder(String name) {
+ return new Builder(name);
+ }
+
+ @Override
+ protected void processField(ObjectMapper.Builder builder, String fieldName, Object fieldNode) {
+ if (fieldName.equals("date_formats") || fieldName.equals("dynamic_date_formats")) {
+ List<FormatDateTimeFormatter> dateTimeFormatters = newArrayList();
+ if (fieldNode instanceof List) {
+ for (Object node1 : (List) fieldNode) {
+ dateTimeFormatters.add(parseDateTimeFormatter(fieldName, node1));
+ }
+ } else if ("none".equals(fieldNode.toString())) {
+ dateTimeFormatters = null;
+ } else {
+ dateTimeFormatters.add(parseDateTimeFormatter(fieldName, fieldNode));
+ }
+ if (dateTimeFormatters == null) {
+ ((Builder) builder).noDynamicDateTimeFormatter();
+ } else {
+ ((Builder) builder).dynamicDateTimeFormatter(dateTimeFormatters);
+ }
+ } else if (fieldName.equals("dynamic_templates")) {
+ // "dynamic_templates" : [
+ // {
+ // "template_1" : {
+ // "match" : "*_test",
+ // "match_mapping_type" : "string",
+ // "mapping" : { "type" : "string", "store" : "yes" }
+ // }
+ // }
+ // ]
+ List tmplNodes = (List) fieldNode;
+ for (Object tmplNode : tmplNodes) {
+ Map<String, Object> tmpl = (Map<String, Object>) tmplNode;
+ if (tmpl.size() != 1) {
+ throw new MapperParsingException("A dynamic template must be defined with a name");
+ }
+ Map.Entry<String, Object> entry = tmpl.entrySet().iterator().next();
+ ((Builder) builder).add(DynamicTemplate.parse(entry.getKey(), (Map<String, Object>) entry.getValue()));
+ }
+ } else if (fieldName.equals("date_detection")) {
+ ((Builder) builder).dateDetection = nodeBooleanValue(fieldNode);
+ } else if (fieldName.equals("numeric_detection")) {
+ ((Builder) builder).numericDetection = nodeBooleanValue(fieldNode);
+ }
+ }
+ }
+
+ private final FormatDateTimeFormatter[] dynamicDateTimeFormatters;
+
+ private final boolean dateDetection;
+ private final boolean numericDetection;
+
+ private volatile DynamicTemplate dynamicTemplates[];
+
+ RootObjectMapper(String name, boolean enabled, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers,
+ FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) {
+ super(name, name, enabled, Nested.NO, dynamic, pathType, mappers);
+ this.dynamicTemplates = dynamicTemplates;
+ this.dynamicDateTimeFormatters = dynamicDateTimeFormatters;
+ this.dateDetection = dateDetection;
+ this.numericDetection = numericDetection;
+ }
+
+ public boolean dateDetection() {
+ return this.dateDetection;
+ }
+
+ public boolean numericDetection() {
+ return this.numericDetection;
+ }
+
+ public FormatDateTimeFormatter[] dynamicDateTimeFormatters() {
+ return dynamicDateTimeFormatters;
+ }
+
+ public Mapper.Builder findTemplateBuilder(ParseContext context, String name, String dynamicType) {
+ return findTemplateBuilder(context, name, dynamicType, dynamicType);
+ }
+
+ public Mapper.Builder findTemplateBuilder(ParseContext context, String name, String dynamicType, String matchType) {
+ DynamicTemplate dynamicTemplate = findTemplate(context.path(), name, matchType);
+ if (dynamicTemplate == null) {
+ return null;
+ }
+ Mapper.TypeParser.ParserContext parserContext = context.docMapperParser().parserContext();
+ String mappingType = dynamicTemplate.mappingType(dynamicType);
+ Mapper.TypeParser typeParser = parserContext.typeParser(mappingType);
+ if (typeParser == null) {
+ throw new MapperParsingException("failed to find type parsed [" + mappingType + "] for [" + name + "]");
+ }
+ return typeParser.parse(name, dynamicTemplate.mappingForName(name, dynamicType), parserContext);
+ }
+
+ public DynamicTemplate findTemplate(ContentPath path, String name, String matchType) {
+ for (DynamicTemplate dynamicTemplate : dynamicTemplates) {
+ if (dynamicTemplate.match(path, name, matchType)) {
+ return dynamicTemplate;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ protected boolean allowValue() {
+ return true;
+ }
+
+ @Override
+ protected void doMerge(ObjectMapper mergeWith, MergeContext mergeContext) {
+ RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith;
+ if (!mergeContext.mergeFlags().simulate()) {
+ // merge them
+ List<DynamicTemplate> mergedTemplates = Lists.newArrayList(Arrays.asList(this.dynamicTemplates));
+ for (DynamicTemplate template : mergeWithObject.dynamicTemplates) {
+ boolean replaced = false;
+ for (int i = 0; i < mergedTemplates.size(); i++) {
+ if (mergedTemplates.get(i).name().equals(template.name())) {
+ mergedTemplates.set(i, template);
+ replaced = true;
+ }
+ }
+ if (!replaced) {
+ mergedTemplates.add(template);
+ }
+ }
+ this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]);
+ }
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ if (dynamicDateTimeFormatters != Defaults.DYNAMIC_DATE_TIME_FORMATTERS) {
+ if (dynamicDateTimeFormatters.length > 0) {
+ builder.startArray("dynamic_date_formats");
+ for (FormatDateTimeFormatter dateTimeFormatter : dynamicDateTimeFormatters) {
+ builder.value(dateTimeFormatter.format());
+ }
+ builder.endArray();
+ }
+ }
+
+ if (dynamicTemplates != null && dynamicTemplates.length > 0) {
+ builder.startArray("dynamic_templates");
+ for (DynamicTemplate dynamicTemplate : dynamicTemplates) {
+ builder.startObject();
+ builder.field(dynamicTemplate.name());
+ builder.map(dynamicTemplate.conf());
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+
+ if (dateDetection != Defaults.DATE_DETECTION) {
+ builder.field("date_detection", dateDetection);
+ }
+ if (numericDetection != Defaults.NUMERIC_DETECTION) {
+ builder.field("numeric_detection", numericDetection);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/EnableMergeScheduler.java b/src/main/java/org/elasticsearch/index/merge/EnableMergeScheduler.java
new file mode 100644
index 0000000..090d499
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/EnableMergeScheduler.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge;
+
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.MergeScheduler;
+
+import java.io.IOException;
+
+/**
+ * A wrapper of another {@link org.apache.lucene.index.MergeScheduler} that allows
+ * to explicitly enable merge and disable on a thread local basis. The default is
+ * to have merges disabled.
+ * <p/>
+ * This merge scheduler can be used to get around the fact that even though a merge
+ * policy can control that no new merges will be created as a result of a segment flush
+ * (during indexing operation for example), the {@link #merge(org.apache.lucene.index.IndexWriter)}
+ * call will still be called, and can result in stalling indexing.
+ */
+public class EnableMergeScheduler extends MergeScheduler {
+
+ private final MergeScheduler mergeScheduler;
+
+ private final ThreadLocal<Boolean> enabled = new ThreadLocal<Boolean>() {
+ @Override
+ protected Boolean initialValue() {
+ return Boolean.FALSE;
+ }
+ };
+
+ public EnableMergeScheduler(MergeScheduler mergeScheduler) {
+ this.mergeScheduler = mergeScheduler;
+ }
+
+ /**
+ * Enable merges on the current thread.
+ */
+ void enableMerge() {
+ assert !enabled.get();
+ enabled.set(Boolean.TRUE);
+ }
+
+ /**
+ * Disable merges on the current thread.
+ */
+ void disableMerge() {
+ assert enabled.get();
+ enabled.set(Boolean.FALSE);
+ }
+
+ @Override
+ public void merge(IndexWriter writer) throws IOException {
+ if (enabled.get()) {
+ mergeScheduler.merge(writer);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ mergeScheduler.close();
+ }
+
+ @Override
+ public MergeScheduler clone() {
+ // Lucene IW makes a clone internally but since we hold on to this instance
+ // the clone will just be the identity.
+ return this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/MergeStats.java b/src/main/java/org/elasticsearch/index/merge/MergeStats.java
new file mode 100644
index 0000000..ee451b1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/MergeStats.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MergeStats implements Streamable, ToXContent {
+
+ private long total;
+ private long totalTimeInMillis;
+ private long totalNumDocs;
+ private long totalSizeInBytes;
+ private long current;
+ private long currentNumDocs;
+ private long currentSizeInBytes;
+
+ public MergeStats() {
+
+ }
+
+ public void add(long totalMerges, long totalMergeTime, long totalNumDocs, long totalSizeInBytes, long currentMerges, long currentNumDocs, long currentSizeInBytes) {
+ this.total += totalMerges;
+ this.totalTimeInMillis += totalMergeTime;
+ this.totalNumDocs += totalNumDocs;
+ this.totalSizeInBytes += totalSizeInBytes;
+ this.current += currentMerges;
+ this.currentNumDocs += currentNumDocs;
+ this.currentSizeInBytes += currentSizeInBytes;
+ }
+
+ public void add(MergeStats mergeStats) {
+ if (mergeStats == null) {
+ return;
+ }
+ this.total += mergeStats.total;
+ this.totalTimeInMillis += mergeStats.totalTimeInMillis;
+ this.totalNumDocs += mergeStats.totalNumDocs;
+ this.totalSizeInBytes += mergeStats.totalSizeInBytes;
+ this.current += mergeStats.current;
+ this.currentNumDocs += mergeStats.currentNumDocs;
+ this.currentSizeInBytes += mergeStats.currentSizeInBytes;
+ }
+
+ /**
+ * The total number of merges executed.
+ */
+ public long getTotal() {
+ return this.total;
+ }
+
+ /**
+ * The total time merges have been executed (in milliseconds).
+ */
+ public long getTotalTimeInMillis() {
+ return this.totalTimeInMillis;
+ }
+
+ /**
+ * The total time merges have been executed.
+ */
+ public TimeValue getTotalTime() {
+ return new TimeValue(totalTimeInMillis);
+ }
+
+ public long getTotalNumDocs() {
+ return this.totalNumDocs;
+ }
+
+ public long getTotalSizeInBytes() {
+ return this.totalSizeInBytes;
+ }
+
+ public ByteSizeValue getTotalSize() {
+ return new ByteSizeValue(totalSizeInBytes);
+ }
+
+ /**
+ * The current number of merges executing.
+ */
+ public long getCurrent() {
+ return this.current;
+ }
+
+ public long getCurrentNumDocs() {
+ return this.currentNumDocs;
+ }
+
+ public long getCurrentSizeInBytes() {
+ return this.currentSizeInBytes;
+ }
+
+ public ByteSizeValue getCurrentSize() {
+ return new ByteSizeValue(currentSizeInBytes);
+ }
+
+ public static MergeStats readMergeStats(StreamInput in) throws IOException {
+ MergeStats stats = new MergeStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.MERGES);
+ builder.field(Fields.CURRENT, current);
+ builder.field(Fields.CURRENT_DOCS, currentNumDocs);
+ builder.byteSizeField(Fields.CURRENT_SIZE_IN_BYTES, Fields.CURRENT_SIZE, currentSizeInBytes);
+ builder.field(Fields.TOTAL, total);
+ builder.timeValueField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, totalTimeInMillis);
+ builder.field(Fields.TOTAL_DOCS, totalNumDocs);
+ builder.byteSizeField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, totalSizeInBytes);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString MERGES = new XContentBuilderString("merges");
+ static final XContentBuilderString CURRENT = new XContentBuilderString("current");
+ static final XContentBuilderString CURRENT_DOCS = new XContentBuilderString("current_docs");
+ static final XContentBuilderString CURRENT_SIZE = new XContentBuilderString("current_size");
+ static final XContentBuilderString CURRENT_SIZE_IN_BYTES = new XContentBuilderString("current_size_in_bytes");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_TIME = new XContentBuilderString("total_time");
+ static final XContentBuilderString TOTAL_TIME_IN_MILLIS = new XContentBuilderString("total_time_in_millis");
+ static final XContentBuilderString TOTAL_DOCS = new XContentBuilderString("total_docs");
+ static final XContentBuilderString TOTAL_SIZE = new XContentBuilderString("total_size");
+ static final XContentBuilderString TOTAL_SIZE_IN_BYTES = new XContentBuilderString("total_size_in_bytes");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ total = in.readVLong();
+ totalTimeInMillis = in.readVLong();
+ totalNumDocs = in.readVLong();
+ totalSizeInBytes = in.readVLong();
+ current = in.readVLong();
+ currentNumDocs = in.readVLong();
+ currentSizeInBytes = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(total);
+ out.writeVLong(totalTimeInMillis);
+ out.writeVLong(totalNumDocs);
+ out.writeVLong(totalSizeInBytes);
+ out.writeVLong(current);
+ out.writeVLong(currentNumDocs);
+ out.writeVLong(currentSizeInBytes);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/merge/Merges.java b/src/main/java/org/elasticsearch/index/merge/Merges.java
new file mode 100644
index 0000000..460ca78
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/Merges.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge;
+
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.MergeScheduler;
+
+import java.io.IOException;
+
+/**
+ * A helper to execute explicit merges of the {@link org.apache.lucene.index.IndexWriter} APIs. It
+ * holds additional logic which in case the merge scheduler is an {@link org.elasticsearch.index.merge.EnableMergeScheduler}
+ * then merges are explicitly enabled and disabled back at the end.
+ * <p/>
+ * In our codebase, at least until we can somehow use this logic in Lucene IW itself, we should only use
+ * this class to execute explicit merges. The explicit merge calls have been added to the forbidden APIs
+ * list to make sure we don't call them unless we use this class.
+ */
+public class Merges {
+
+ /**
+ * See {@link org.apache.lucene.index.IndexWriter#maybeMerge()}, with the additional
+ * logic of explicitly enabling merges if the scheduler is {@link org.elasticsearch.index.merge.EnableMergeScheduler}.
+ */
+ public static void maybeMerge(IndexWriter writer) throws IOException {
+ MergeScheduler mergeScheduler = writer.getConfig().getMergeScheduler();
+ if (mergeScheduler instanceof EnableMergeScheduler) {
+ ((EnableMergeScheduler) mergeScheduler).enableMerge();
+ try {
+ writer.maybeMerge();
+ } finally {
+ ((EnableMergeScheduler) mergeScheduler).disableMerge();
+ }
+ } else {
+ writer.maybeMerge();
+ }
+ }
+
+ /**
+ * See {@link org.apache.lucene.index.IndexWriter#forceMerge(int)}, with the additional
+ * logic of explicitly enabling merges if the scheduler is {@link org.elasticsearch.index.merge.EnableMergeScheduler}.
+ */
+ public static void forceMerge(IndexWriter writer, int maxNumSegments) throws IOException {
+ forceMerge(writer, maxNumSegments, true);
+ }
+
+ /**
+ * See {@link org.apache.lucene.index.IndexWriter#forceMerge(int, boolean)}, with the additional
+ * logic of explicitly enabling merges if the scheduler is {@link org.elasticsearch.index.merge.EnableMergeScheduler}.
+ */
+ public static void forceMerge(IndexWriter writer, int maxNumSegments, boolean doWait) throws IOException {
+ MergeScheduler mergeScheduler = writer.getConfig().getMergeScheduler();
+ if (mergeScheduler instanceof EnableMergeScheduler) {
+ ((EnableMergeScheduler) mergeScheduler).enableMerge();
+ try {
+ writer.forceMerge(maxNumSegments, doWait);
+ } finally {
+ ((EnableMergeScheduler) mergeScheduler).disableMerge();
+ }
+ } else {
+ writer.forceMerge(maxNumSegments, doWait);
+ }
+ }
+
+ /**
+ * See {@link org.apache.lucene.index.IndexWriter#forceMergeDeletes()}, with the additional
+ * logic of explicitly enabling merges if the scheduler is {@link org.elasticsearch.index.merge.EnableMergeScheduler}.
+ */
+ public static void forceMergeDeletes(IndexWriter writer) throws IOException {
+ forceMergeDeletes(writer, true);
+ }
+
+ /**
+ * See {@link org.apache.lucene.index.IndexWriter#forceMergeDeletes(boolean)}, with the additional
+ * logic of explicitly enabling merges if the scheduler is {@link org.elasticsearch.index.merge.EnableMergeScheduler}.
+ */
+ public static void forceMergeDeletes(IndexWriter writer, boolean doWait) throws IOException {
+ MergeScheduler mergeScheduler = writer.getConfig().getMergeScheduler();
+ if (mergeScheduler instanceof EnableMergeScheduler) {
+ ((EnableMergeScheduler) mergeScheduler).enableMerge();
+ try {
+ writer.forceMergeDeletes(doWait);
+ } finally {
+ ((EnableMergeScheduler) mergeScheduler).disableMerge();
+ }
+ } else {
+ writer.forceMergeDeletes(doWait);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/OnGoingMerge.java b/src/main/java/org/elasticsearch/index/merge/OnGoingMerge.java
new file mode 100644
index 0000000..e391587
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/OnGoingMerge.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge;
+
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.SegmentCommitInfo;
+
+import java.util.List;
+
+/**
+ * Represents a single on going merge within an index.
+ */
+public class OnGoingMerge {
+
+ private final String id;
+ private final List<SegmentCommitInfo> mergedSegments;
+
+ public OnGoingMerge(MergePolicy.OneMerge merge) {
+ this.id = Integer.toString(System.identityHashCode(merge));
+ this.mergedSegments = merge.segments;
+ }
+
+ /**
+ * A unique id for the merge.
+ */
+ public String getId() {
+ return id;
+ }
+
+ /**
+ * The list of segments that are being merged.
+ */
+ public List<SegmentCommitInfo> getMergedSegments() {
+ return mergedSegments;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java
new file mode 100644
index 0000000..5b37623
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.merge.policy;
+
+import org.apache.lucene.index.MergePolicy;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.store.Store;
+
+public abstract class AbstractMergePolicyProvider<MP extends MergePolicy> extends AbstractIndexShardComponent implements MergePolicyProvider<MP> {
+
+ public static final String INDEX_COMPOUND_FORMAT = "index.compound_format";
+
+ protected volatile double noCFSRatio;
+
+ protected AbstractMergePolicyProvider(Store store) {
+ super(store.shardId(), store.indexSettings());
+ this.noCFSRatio = parseNoCFSRatio(indexSettings.get(INDEX_COMPOUND_FORMAT, Boolean.toString(store.suggestUseCompoundFile())));
+ }
+
+ public static double parseNoCFSRatio(String noCFSRatio) {
+ noCFSRatio = noCFSRatio.trim();
+ if (noCFSRatio.equalsIgnoreCase("true")) {
+ return 1.0d;
+ } else if (noCFSRatio.equalsIgnoreCase("false")) {
+ return 0.0;
+ } else {
+ try {
+ double value = Double.parseDouble(noCFSRatio);
+ if (value < 0.0 || value > 1.0) {
+ throw new ElasticsearchIllegalArgumentException("NoCFSRatio must be in the interval [0..1] but was: [" + value + "]");
+ }
+ return value;
+ } catch (NumberFormatException ex) {
+ throw new ElasticsearchIllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: [" + noCFSRatio + "]", ex);
+ }
+ }
+ }
+
+ public static String formatNoCFSRatio(double ratio) {
+ if (ratio == 1.0) {
+ return Boolean.TRUE.toString();
+ } else if (ratio == 0.0) {
+ return Boolean.FALSE.toString();
+ } else {
+ return Double.toString(ratio);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/policy/IndexUpgraderMergePolicy.java b/src/main/java/org/elasticsearch/index/merge/policy/IndexUpgraderMergePolicy.java
new file mode 100644
index 0000000..46bfe53
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/policy/IndexUpgraderMergePolicy.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.policy;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.packed.GrowableWriter;
+import org.apache.lucene.util.packed.PackedInts;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A {@link MergePolicy} that upgrades segments.
+ * <p>
+ * It can be useful to use the background merging process to upgrade segments,
+ * for example when we perform internal changes that imply different index
+ * options or when a user modifies his mapping in non-breaking ways: we could
+ * imagine using this merge policy to be able to add doc values to fields after
+ * the fact or on the opposite to remove them.
+ * <p>
+ * For now, this {@link MergePolicy} takes care of moving versions that used to
+ * be stored as payloads to numeric doc values.
+ */
+public final class IndexUpgraderMergePolicy extends MergePolicy {
+
+ private final MergePolicy delegate;
+
+ /** @param delegate the merge policy to wrap */
+ public IndexUpgraderMergePolicy(MergePolicy delegate) {
+ this.delegate = delegate;
+ }
+
+ /** Return an "upgraded" view of the reader. */
+ static AtomicReader filter(AtomicReader reader) throws IOException {
+ final FieldInfos fieldInfos = reader.getFieldInfos();
+ final FieldInfo versionInfo = fieldInfos.fieldInfo(VersionFieldMapper.NAME);
+ if (versionInfo != null && versionInfo.hasDocValues()) {
+ // the reader is a recent one, it has versions and they are stored
+ // in a numeric doc values field
+ return reader;
+ }
+ // The segment is an old one, load all versions in memory and hide
+ // them behind a numeric doc values field
+ final Terms terms = reader.terms(UidFieldMapper.NAME);
+ if (terms == null || !terms.hasPayloads()) {
+ // The segment doesn't have an _uid field or doesn't have paylods
+ // don't try to do anything clever. If any other segment has versions
+ // all versions of this segment will be initialized to 0
+ return reader;
+ }
+ final TermsEnum uids = terms.iterator(null);
+ final GrowableWriter versions = new GrowableWriter(2, reader.maxDoc(), PackedInts.DEFAULT);
+ DocsAndPositionsEnum dpe = null;
+ for (BytesRef uid = uids.next(); uid != null; uid = uids.next()) {
+ dpe = uids.docsAndPositions(reader.getLiveDocs(), dpe, DocsAndPositionsEnum.FLAG_PAYLOADS);
+ assert dpe != null : "field has payloads";
+ for (int doc = dpe.nextDoc(); doc != DocsEnum.NO_MORE_DOCS; doc = dpe.nextDoc()) {
+ dpe.nextPosition();
+ final BytesRef payload = dpe.getPayload();
+ if (payload != null && payload.length == 8) {
+ final long version = Numbers.bytesToLong(payload);
+ versions.set(doc, version);
+ break;
+ }
+ }
+ }
+ // Build new field infos, doc values, and return a filter reader
+ final FieldInfo newVersionInfo;
+ if (versionInfo == null) {
+ // Find a free field number
+ int fieldNumber = 0;
+ for (FieldInfo fi : fieldInfos) {
+ fieldNumber = Math.max(fieldNumber, fi.number + 1);
+ }
+ newVersionInfo = new FieldInfo(VersionFieldMapper.NAME, false, fieldNumber, false, true, false,
+ IndexOptions.DOCS_ONLY, DocValuesType.NUMERIC, DocValuesType.NUMERIC, Collections.<String, String>emptyMap());
+ } else {
+ newVersionInfo = new FieldInfo(VersionFieldMapper.NAME, versionInfo.isIndexed(), versionInfo.number,
+ versionInfo.hasVectors(), versionInfo.omitsNorms(), versionInfo.hasPayloads(),
+ versionInfo.getIndexOptions(), versionInfo.getDocValuesType(), versionInfo.getNormType(), versionInfo.attributes());
+ }
+ final ArrayList<FieldInfo> fieldInfoList = new ArrayList<FieldInfo>();
+ for (FieldInfo info : fieldInfos) {
+ if (info != versionInfo) {
+ fieldInfoList.add(info);
+ }
+ }
+ fieldInfoList.add(newVersionInfo);
+ final FieldInfos newFieldInfos = new FieldInfos(fieldInfoList.toArray(new FieldInfo[fieldInfoList.size()]));
+ final NumericDocValues versionValues = new NumericDocValues() {
+ @Override
+ public long get(int index) {
+ return versions.get(index);
+ }
+ };
+ return new FilterAtomicReader(reader) {
+ @Override
+ public FieldInfos getFieldInfos() {
+ return newFieldInfos;
+ }
+ @Override
+ public NumericDocValues getNumericDocValues(String field) throws IOException {
+ if (VersionFieldMapper.NAME.equals(field)) {
+ return versionValues;
+ }
+ return super.getNumericDocValues(field);
+ }
+ @Override
+ public Bits getDocsWithField(String field) throws IOException {
+ return new Bits.MatchAllBits(in.maxDoc());
+ }
+ };
+ }
+
+ static class IndexUpgraderOneMerge extends OneMerge {
+
+ public IndexUpgraderOneMerge(List<SegmentCommitInfo> segments) {
+ super(segments);
+ }
+
+ @Override
+ public List<AtomicReader> getMergeReaders() throws IOException {
+ final List<AtomicReader> readers = super.getMergeReaders();
+ ImmutableList.Builder<AtomicReader> newReaders = ImmutableList.builder();
+ for (AtomicReader reader : readers) {
+ newReaders.add(filter(reader));
+ }
+ return newReaders.build();
+ }
+
+ }
+
+ static class IndexUpgraderMergeSpecification extends MergeSpecification {
+
+ @Override
+ public void add(OneMerge merge) {
+ super.add(new IndexUpgraderOneMerge(merge.segments));
+ }
+
+ @Override
+ public String segString(Directory dir) {
+ return "IndexUpgraderMergeSpec[" + super.segString(dir) + "]";
+ }
+
+ }
+
+ static MergeSpecification upgradedMergeSpecification(MergeSpecification spec) {
+ if (spec == null) {
+ return null;
+ }
+ MergeSpecification upgradedSpec = new IndexUpgraderMergeSpecification();
+ for (OneMerge merge : spec.merges) {
+ upgradedSpec.add(merge);
+ }
+ return upgradedSpec;
+ }
+
+ @Override
+ public MergeSpecification findMerges(MergeTrigger mergeTrigger,
+ SegmentInfos segmentInfos) throws IOException {
+ return upgradedMergeSpecification(delegate.findMerges(mergeTrigger, segmentInfos));
+ }
+
+ @Override
+ public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
+ int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge)
+ throws IOException {
+ return upgradedMergeSpecification(delegate.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge));
+ }
+
+ @Override
+ public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
+ throws IOException {
+ return upgradedMergeSpecification(delegate.findForcedDeletesMerges(segmentInfos));
+ }
+
+ @Override
+ public MergePolicy clone() {
+ return new IndexUpgraderMergePolicy(delegate.clone());
+ }
+
+ @Override
+ public void close() {
+ delegate.close();
+ }
+
+ @Override
+ public boolean useCompoundFile(SegmentInfos segments,
+ SegmentCommitInfo newSegment) throws IOException {
+ return delegate.useCompoundFile(segments, newSegment);
+ }
+
+ @Override
+ public void setIndexWriter(IndexWriter writer) {
+ delegate.setIndexWriter(writer);
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "(" + delegate + ")";
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java
new file mode 100644
index 0000000..85f3d9a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.policy;
+
+import org.apache.lucene.index.LogByteSizeMergePolicy;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.SegmentInfos;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Preconditions;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.store.Store;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+/**
+ *
+ */
+public class LogByteSizeMergePolicyProvider extends AbstractMergePolicyProvider<LogByteSizeMergePolicy> {
+
+ private final IndexSettingsService indexSettingsService;
+
+ private volatile ByteSizeValue minMergeSize;
+ private volatile ByteSizeValue maxMergeSize;
+ private volatile int mergeFactor;
+ private volatile int maxMergeDocs;
+ private final boolean calibrateSizeByDeletes;
+ private boolean asyncMerge;
+
+ private final Set<CustomLogByteSizeMergePolicy> policies = new CopyOnWriteArraySet<CustomLogByteSizeMergePolicy>();
+
+ private final ApplySettings applySettings = new ApplySettings();
+
+ @Inject
+ public LogByteSizeMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) {
+ super(store);
+ Preconditions.checkNotNull(store, "Store must be provided to merge policy");
+ this.indexSettingsService = indexSettingsService;
+ this.minMergeSize = componentSettings.getAsBytesSize("min_merge_size", new ByteSizeValue((long) (LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB * 1024 * 1024), ByteSizeUnit.BYTES));
+ this.maxMergeSize = componentSettings.getAsBytesSize("max_merge_size", new ByteSizeValue((long) LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_MB, ByteSizeUnit.MB));
+ this.mergeFactor = componentSettings.getAsInt("merge_factor", LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR);
+ this.maxMergeDocs = componentSettings.getAsInt("max_merge_docs", LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS);
+ this.calibrateSizeByDeletes = componentSettings.getAsBoolean("calibrate_size_by_deletes", true);
+ this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true);
+ logger.debug("using [log_bytes_size] merge policy with merge_factor[{}], min_merge_size[{}], max_merge_size[{}], max_merge_docs[{}], calibrate_size_by_deletes[{}], async_merge[{}]",
+ mergeFactor, minMergeSize, maxMergeSize, maxMergeDocs, calibrateSizeByDeletes, asyncMerge);
+
+ indexSettingsService.addListener(applySettings);
+ }
+
+ @Override
+ public LogByteSizeMergePolicy newMergePolicy() {
+ CustomLogByteSizeMergePolicy mergePolicy;
+ if (asyncMerge) {
+ mergePolicy = new EnableMergeLogByteSizeMergePolicy(this);
+ } else {
+ mergePolicy = new CustomLogByteSizeMergePolicy(this);
+ }
+ mergePolicy.setMinMergeMB(minMergeSize.mbFrac());
+ mergePolicy.setMaxMergeMB(maxMergeSize.mbFrac());
+ mergePolicy.setMergeFactor(mergeFactor);
+ mergePolicy.setMaxMergeDocs(maxMergeDocs);
+ mergePolicy.setCalibrateSizeByDeletes(calibrateSizeByDeletes);
+ mergePolicy.setNoCFSRatio(noCFSRatio);
+
+ policies.add(mergePolicy);
+ return mergePolicy;
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ indexSettingsService.removeListener(applySettings);
+ }
+
+ public static final String INDEX_MERGE_POLICY_MIN_MERGE_SIZE = "index.merge.policy.min_merge_size";
+ public static final String INDEX_MERGE_POLICY_MAX_MERGE_SIZE = "index.merge.policy.max_merge_size";
+ public static final String INDEX_MERGE_POLICY_MAX_MERGE_DOCS = "index.merge.policy.max_merge_docs";
+ public static final String INDEX_MERGE_POLICY_MERGE_FACTOR = "index.merge.policy.merge_factor";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ ByteSizeValue minMergeSize = settings.getAsBytesSize(INDEX_MERGE_POLICY_MIN_MERGE_SIZE, LogByteSizeMergePolicyProvider.this.minMergeSize);
+ if (!minMergeSize.equals(LogByteSizeMergePolicyProvider.this.minMergeSize)) {
+ logger.info("updating min_merge_size from [{}] to [{}]", LogByteSizeMergePolicyProvider.this.minMergeSize, minMergeSize);
+ LogByteSizeMergePolicyProvider.this.minMergeSize = minMergeSize;
+ for (CustomLogByteSizeMergePolicy policy : policies) {
+ policy.setMinMergeMB(minMergeSize.mbFrac());
+ }
+ }
+
+ ByteSizeValue maxMergeSize = settings.getAsBytesSize(INDEX_MERGE_POLICY_MAX_MERGE_SIZE, LogByteSizeMergePolicyProvider.this.maxMergeSize);
+ if (!maxMergeSize.equals(LogByteSizeMergePolicyProvider.this.maxMergeSize)) {
+ logger.info("updating max_merge_size from [{}] to [{}]", LogByteSizeMergePolicyProvider.this.maxMergeSize, maxMergeSize);
+ LogByteSizeMergePolicyProvider.this.maxMergeSize = maxMergeSize;
+ for (CustomLogByteSizeMergePolicy policy : policies) {
+ policy.setMaxMergeMB(maxMergeSize.mbFrac());
+ }
+ }
+
+ int maxMergeDocs = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_DOCS, LogByteSizeMergePolicyProvider.this.maxMergeDocs);
+ if (maxMergeDocs != LogByteSizeMergePolicyProvider.this.maxMergeDocs) {
+ logger.info("updating max_merge_docs from [{}] to [{}]", LogByteSizeMergePolicyProvider.this.maxMergeDocs, maxMergeDocs);
+ LogByteSizeMergePolicyProvider.this.maxMergeDocs = maxMergeDocs;
+ for (CustomLogByteSizeMergePolicy policy : policies) {
+ policy.setMaxMergeDocs(maxMergeDocs);
+ }
+ }
+
+ int mergeFactor = settings.getAsInt(INDEX_MERGE_POLICY_MERGE_FACTOR, LogByteSizeMergePolicyProvider.this.mergeFactor);
+ if (mergeFactor != LogByteSizeMergePolicyProvider.this.mergeFactor) {
+ logger.info("updating merge_factor from [{}] to [{}]", LogByteSizeMergePolicyProvider.this.mergeFactor, mergeFactor);
+ LogByteSizeMergePolicyProvider.this.mergeFactor = mergeFactor;
+ for (CustomLogByteSizeMergePolicy policy : policies) {
+ policy.setMergeFactor(mergeFactor);
+ }
+ }
+
+ final double noCFSRatio = parseNoCFSRatio(settings.get(INDEX_COMPOUND_FORMAT, Double.toString(LogByteSizeMergePolicyProvider.this.noCFSRatio)));
+ if (noCFSRatio != LogByteSizeMergePolicyProvider.this.noCFSRatio) {
+ logger.info("updating index.compound_format from [{}] to [{}]", formatNoCFSRatio(LogByteSizeMergePolicyProvider.this.noCFSRatio), formatNoCFSRatio(noCFSRatio));
+ LogByteSizeMergePolicyProvider.this.noCFSRatio = noCFSRatio;
+ for (CustomLogByteSizeMergePolicy policy : policies) {
+ policy.setNoCFSRatio(noCFSRatio);
+ }
+ }
+
+ }
+ }
+
+ public static class CustomLogByteSizeMergePolicy extends LogByteSizeMergePolicy {
+
+ private final LogByteSizeMergePolicyProvider provider;
+
+ public CustomLogByteSizeMergePolicy(LogByteSizeMergePolicyProvider provider) {
+ super();
+ this.provider = provider;
+ }
+
+ @Override
+ public void close() {
+ super.close();
+ provider.policies.remove(this);
+ }
+
+ @Override
+ public MergePolicy clone() {
+ // Lucene IW makes a clone internally but since we hold on to this instance
+ // the clone will just be the identity.
+ return this;
+ }
+ }
+
+ public static class EnableMergeLogByteSizeMergePolicy extends CustomLogByteSizeMergePolicy {
+
+ public EnableMergeLogByteSizeMergePolicy(LogByteSizeMergePolicyProvider provider) {
+ super(provider);
+ }
+
+ @Override
+ public MergeSpecification findMerges(MergeTrigger trigger, SegmentInfos infos) throws IOException {
+ // we don't enable merges while indexing documents, we do them in the background
+ if (trigger == MergeTrigger.SEGMENT_FLUSH) {
+ return null;
+ }
+ return super.findMerges(trigger, infos);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java
new file mode 100644
index 0000000..2e5396e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.policy;
+
+import org.apache.lucene.index.LogDocMergePolicy;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.SegmentInfos;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Preconditions;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.store.Store;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+/**
+ *
+ */
+public class LogDocMergePolicyProvider extends AbstractMergePolicyProvider<LogDocMergePolicy> {
+
+ private final IndexSettingsService indexSettingsService;
+
+ private volatile int minMergeDocs;
+ private volatile int maxMergeDocs;
+ private volatile int mergeFactor;
+ private final boolean calibrateSizeByDeletes;
+ private boolean asyncMerge;
+
+ private final Set<CustomLogDocMergePolicy> policies = new CopyOnWriteArraySet<CustomLogDocMergePolicy>();
+
+ private final ApplySettings applySettings = new ApplySettings();
+
+ @Inject
+ public LogDocMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) {
+ super(store);
+ Preconditions.checkNotNull(store, "Store must be provided to merge policy");
+ this.indexSettingsService = indexSettingsService;
+
+ this.minMergeDocs = componentSettings.getAsInt("min_merge_docs", LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS);
+ this.maxMergeDocs = componentSettings.getAsInt("max_merge_docs", LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS);
+ this.mergeFactor = componentSettings.getAsInt("merge_factor", LogDocMergePolicy.DEFAULT_MERGE_FACTOR);
+ this.calibrateSizeByDeletes = componentSettings.getAsBoolean("calibrate_size_by_deletes", true);
+ this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true);
+ logger.debug("using [log_doc] merge policy with merge_factor[{}], min_merge_docs[{}], max_merge_docs[{}], calibrate_size_by_deletes[{}], async_merge[{}]",
+ mergeFactor, minMergeDocs, maxMergeDocs, calibrateSizeByDeletes, asyncMerge);
+
+ indexSettingsService.addListener(applySettings);
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ indexSettingsService.removeListener(applySettings);
+ }
+
+ @Override
+ public LogDocMergePolicy newMergePolicy() {
+ CustomLogDocMergePolicy mergePolicy;
+ if (asyncMerge) {
+ mergePolicy = new EnableMergeLogDocMergePolicy(this);
+ } else {
+ mergePolicy = new CustomLogDocMergePolicy(this);
+ }
+ mergePolicy.setMinMergeDocs(minMergeDocs);
+ mergePolicy.setMaxMergeDocs(maxMergeDocs);
+ mergePolicy.setMergeFactor(mergeFactor);
+ mergePolicy.setCalibrateSizeByDeletes(calibrateSizeByDeletes);
+ mergePolicy.setNoCFSRatio(noCFSRatio);
+ policies.add(mergePolicy);
+ return mergePolicy;
+ }
+
+ public static final String INDEX_MERGE_POLICY_MIN_MERGE_DOCS = "index.merge.policy.min_merge_docs";
+ public static final String INDEX_MERGE_POLICY_MAX_MERGE_DOCS = "index.merge.policy.max_merge_docs";
+ public static final String INDEX_MERGE_POLICY_MERGE_FACTOR = "index.merge.policy.merge_factor";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ int minMergeDocs = settings.getAsInt(INDEX_MERGE_POLICY_MIN_MERGE_DOCS, LogDocMergePolicyProvider.this.minMergeDocs);
+ if (minMergeDocs != LogDocMergePolicyProvider.this.minMergeDocs) {
+ logger.info("updating min_merge_docs from [{}] to [{}]", LogDocMergePolicyProvider.this.minMergeDocs, minMergeDocs);
+ LogDocMergePolicyProvider.this.minMergeDocs = minMergeDocs;
+ for (CustomLogDocMergePolicy policy : policies) {
+ policy.setMinMergeDocs(minMergeDocs);
+ }
+ }
+
+ int maxMergeDocs = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_DOCS, LogDocMergePolicyProvider.this.maxMergeDocs);
+ if (maxMergeDocs != LogDocMergePolicyProvider.this.maxMergeDocs) {
+ logger.info("updating max_merge_docs from [{}] to [{}]", LogDocMergePolicyProvider.this.maxMergeDocs, maxMergeDocs);
+ LogDocMergePolicyProvider.this.maxMergeDocs = maxMergeDocs;
+ for (CustomLogDocMergePolicy policy : policies) {
+ policy.setMaxMergeDocs(maxMergeDocs);
+ }
+ }
+
+ int mergeFactor = settings.getAsInt(INDEX_MERGE_POLICY_MERGE_FACTOR, LogDocMergePolicyProvider.this.mergeFactor);
+ if (mergeFactor != LogDocMergePolicyProvider.this.mergeFactor) {
+ logger.info("updating merge_factor from [{}] to [{}]", LogDocMergePolicyProvider.this.mergeFactor, mergeFactor);
+ LogDocMergePolicyProvider.this.mergeFactor = mergeFactor;
+ for (CustomLogDocMergePolicy policy : policies) {
+ policy.setMergeFactor(mergeFactor);
+ }
+ }
+
+ final double noCFSRatio = parseNoCFSRatio(settings.get(INDEX_COMPOUND_FORMAT, Double.toString(LogDocMergePolicyProvider.this.noCFSRatio)));
+ final boolean compoundFormat = noCFSRatio != 0.0;
+ if (noCFSRatio != LogDocMergePolicyProvider.this.noCFSRatio) {
+ logger.info("updating index.compound_format from [{}] to [{}]", formatNoCFSRatio(LogDocMergePolicyProvider.this.noCFSRatio), formatNoCFSRatio(noCFSRatio));
+ LogDocMergePolicyProvider.this.noCFSRatio = noCFSRatio;
+ for (CustomLogDocMergePolicy policy : policies) {
+ policy.setNoCFSRatio(noCFSRatio);
+ }
+ }
+ }
+ }
+
+ public static class CustomLogDocMergePolicy extends LogDocMergePolicy {
+
+ private final LogDocMergePolicyProvider provider;
+
+ public CustomLogDocMergePolicy(LogDocMergePolicyProvider provider) {
+ super();
+ this.provider = provider;
+ }
+
+ @Override
+ public void close() {
+ super.close();
+ provider.policies.remove(this);
+ }
+ }
+
+ public static class EnableMergeLogDocMergePolicy extends CustomLogDocMergePolicy {
+
+ public EnableMergeLogDocMergePolicy(LogDocMergePolicyProvider provider) {
+ super(provider);
+ }
+
+ @Override
+ public MergeSpecification findMerges(MergeTrigger trigger, SegmentInfos infos) throws IOException {
+ // we don't enable merges while indexing documents, we do them in the background
+ if (trigger == MergeTrigger.SEGMENT_FLUSH) {
+ return null;
+ }
+ return super.findMerges(trigger, infos);
+ }
+
+ @Override
+ public MergePolicy clone() {
+ // Lucene IW makes a clone internally but since we hold on to this instance
+ // the clone will just be the identity.
+ return this;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyModule.java b/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyModule.java
new file mode 100644
index 0000000..14891b9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyModule.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.policy;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class MergePolicyModule extends AbstractModule {
+
+ private final Settings settings;
+ public static final String MERGE_POLICY_TYPE_KEY = "index.merge.policy.type";
+
+ public MergePolicyModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(MergePolicyProvider.class)
+ .to(settings.getAsClass("index.merge.policy.type", TieredMergePolicyProvider.class, "org.elasticsearch.index.merge.policy.", "MergePolicyProvider"))
+ .asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyProvider.java
new file mode 100644
index 0000000..8ec3db4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/policy/MergePolicyProvider.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.policy;
+
+import org.apache.lucene.index.MergePolicy;
+import org.elasticsearch.index.CloseableIndexComponent;
+import org.elasticsearch.index.shard.IndexShardComponent;
+
+/**
+ *
+ */
+public interface MergePolicyProvider<T extends MergePolicy> extends IndexShardComponent, CloseableIndexComponent {
+
+ T newMergePolicy();
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/policy/TieredMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/TieredMergePolicyProvider.java
new file mode 100644
index 0000000..7f9b1af
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/policy/TieredMergePolicyProvider.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.policy;
+
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.SegmentInfos;
+import org.apache.lucene.index.TieredMergePolicy;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.store.Store;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+public class TieredMergePolicyProvider extends AbstractMergePolicyProvider<TieredMergePolicy> {
+
+ private final IndexSettingsService indexSettingsService;
+
+ private final Set<CustomTieredMergePolicyProvider> policies = new CopyOnWriteArraySet<CustomTieredMergePolicyProvider>();
+
+ private volatile double forceMergeDeletesPctAllowed;
+ private volatile ByteSizeValue floorSegment;
+ private volatile int maxMergeAtOnce;
+ private volatile int maxMergeAtOnceExplicit;
+ private volatile ByteSizeValue maxMergedSegment;
+ private volatile double segmentsPerTier;
+ private volatile double reclaimDeletesWeight;
+ private boolean asyncMerge;
+
+ private final ApplySettings applySettings = new ApplySettings();
+
+
+
+ @Inject
+ public TieredMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) {
+ super(store);
+ this.indexSettingsService = indexSettingsService;
+ this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true);
+ this.forceMergeDeletesPctAllowed = componentSettings.getAsDouble("expunge_deletes_allowed", 10d); // percentage
+ this.floorSegment = componentSettings.getAsBytesSize("floor_segment", new ByteSizeValue(2, ByteSizeUnit.MB));
+ this.maxMergeAtOnce = componentSettings.getAsInt("max_merge_at_once", 10);
+ this.maxMergeAtOnceExplicit = componentSettings.getAsInt("max_merge_at_once_explicit", 30);
+ // TODO is this really a good default number for max_merge_segment, what happens for large indices, won't they end up with many segments?
+ this.maxMergedSegment = componentSettings.getAsBytesSize("max_merged_segment", componentSettings.getAsBytesSize("max_merge_segment", new ByteSizeValue(5, ByteSizeUnit.GB)));
+ this.segmentsPerTier = componentSettings.getAsDouble("segments_per_tier", 10.0d);
+ this.reclaimDeletesWeight = componentSettings.getAsDouble("reclaim_deletes_weight", 2.0d);
+
+ fixSettingsIfNeeded();
+
+ logger.debug("using [tiered] merge policy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], reclaim_deletes_weight[{}], async_merge[{}]",
+ forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, reclaimDeletesWeight, asyncMerge);
+
+ indexSettingsService.addListener(applySettings);
+ }
+
+ private void fixSettingsIfNeeded() {
+ // fixing maxMergeAtOnce, see TieredMergePolicy#setMaxMergeAtOnce
+ if (!(segmentsPerTier >= maxMergeAtOnce)) {
+ int newMaxMergeAtOnce = (int) segmentsPerTier;
+ // max merge at once should be at least 2
+ if (newMaxMergeAtOnce <= 1) {
+ newMaxMergeAtOnce = 2;
+ }
+ logger.debug("[tiered] merge policy changing max_merge_at_once from [{}] to [{}] because segments_per_tier [{}] has to be higher or equal to it", maxMergeAtOnce, newMaxMergeAtOnce, segmentsPerTier);
+ this.maxMergeAtOnce = newMaxMergeAtOnce;
+ }
+ }
+
+
+ @Override
+ public TieredMergePolicy newMergePolicy() {
+ CustomTieredMergePolicyProvider mergePolicy;
+ if (asyncMerge) {
+ mergePolicy = new EnableMergeTieredMergePolicyProvider(this);
+ } else {
+ mergePolicy = new CustomTieredMergePolicyProvider(this);
+ }
+ mergePolicy.setNoCFSRatio(noCFSRatio);
+ mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed);
+ mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
+ mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
+ mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
+ mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
+ mergePolicy.setSegmentsPerTier(segmentsPerTier);
+ mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
+ return mergePolicy;
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ indexSettingsService.removeListener(applySettings);
+ }
+
+ public static final String INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED = "index.merge.policy.expunge_deletes_allowed";
+ public static final String INDEX_MERGE_POLICY_FLOOR_SEGMENT = "index.merge.policy.floor_segment";
+ public static final String INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE = "index.merge.policy.max_merge_at_once";
+ public static final String INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT = "index.merge.policy.max_merge_at_once_explicit";
+ public static final String INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT = "index.merge.policy.max_merged_segment";
+ public static final String INDEX_MERGE_POLICY_SEGMENTS_PER_TIER = "index.merge.policy.segments_per_tier";
+ public static final String INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT = "index.merge.policy.reclaim_deletes_weight";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ double expungeDeletesPctAllowed = settings.getAsDouble(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed);
+ if (expungeDeletesPctAllowed != TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed) {
+ logger.info("updating [expunge_deletes_allowed] from [{}] to [{}]", TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed, expungeDeletesPctAllowed);
+ TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed = expungeDeletesPctAllowed;
+ for (CustomTieredMergePolicyProvider policy : policies) {
+ policy.setForceMergeDeletesPctAllowed(expungeDeletesPctAllowed);
+ }
+ }
+
+ ByteSizeValue floorSegment = settings.getAsBytesSize(INDEX_MERGE_POLICY_FLOOR_SEGMENT, TieredMergePolicyProvider.this.floorSegment);
+ if (!floorSegment.equals(TieredMergePolicyProvider.this.floorSegment)) {
+ logger.info("updating [floor_segment] from [{}] to [{}]", TieredMergePolicyProvider.this.floorSegment, floorSegment);
+ TieredMergePolicyProvider.this.floorSegment = floorSegment;
+ for (CustomTieredMergePolicyProvider policy : policies) {
+ policy.setFloorSegmentMB(floorSegment.mbFrac());
+ }
+ }
+
+ int maxMergeAtOnce = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, TieredMergePolicyProvider.this.maxMergeAtOnce);
+ if (maxMergeAtOnce != TieredMergePolicyProvider.this.maxMergeAtOnce) {
+ logger.info("updating [max_merge_at_once] from [{}] to [{}]", TieredMergePolicyProvider.this.maxMergeAtOnce, maxMergeAtOnce);
+ TieredMergePolicyProvider.this.maxMergeAtOnce = maxMergeAtOnce;
+ for (CustomTieredMergePolicyProvider policy : policies) {
+ policy.setMaxMergeAtOnce(maxMergeAtOnce);
+ }
+ }
+
+ int maxMergeAtOnceExplicit = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, TieredMergePolicyProvider.this.maxMergeAtOnceExplicit);
+ if (maxMergeAtOnceExplicit != TieredMergePolicyProvider.this.maxMergeAtOnceExplicit) {
+ logger.info("updating [max_merge_at_once_explicit] from [{}] to [{}]", TieredMergePolicyProvider.this.maxMergeAtOnceExplicit, maxMergeAtOnceExplicit);
+ TieredMergePolicyProvider.this.maxMergeAtOnceExplicit = maxMergeAtOnceExplicit;
+ for (CustomTieredMergePolicyProvider policy : policies) {
+ policy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
+ }
+ }
+
+ ByteSizeValue maxMergedSegment = settings.getAsBytesSize(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, TieredMergePolicyProvider.this.maxMergedSegment);
+ if (!maxMergedSegment.equals(TieredMergePolicyProvider.this.maxMergedSegment)) {
+ logger.info("updating [max_merged_segment] from [{}] to [{}]", TieredMergePolicyProvider.this.maxMergedSegment, maxMergedSegment);
+ TieredMergePolicyProvider.this.maxMergedSegment = maxMergedSegment;
+ for (CustomTieredMergePolicyProvider policy : policies) {
+ policy.setFloorSegmentMB(maxMergedSegment.mbFrac());
+ }
+ }
+
+ double segmentsPerTier = settings.getAsDouble(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, TieredMergePolicyProvider.this.segmentsPerTier);
+ if (segmentsPerTier != TieredMergePolicyProvider.this.segmentsPerTier) {
+ logger.info("updating [segments_per_tier] from [{}] to [{}]", TieredMergePolicyProvider.this.segmentsPerTier, segmentsPerTier);
+ TieredMergePolicyProvider.this.segmentsPerTier = segmentsPerTier;
+ for (CustomTieredMergePolicyProvider policy : policies) {
+ policy.setSegmentsPerTier(segmentsPerTier);
+ }
+ }
+
+ double reclaimDeletesWeight = settings.getAsDouble(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, TieredMergePolicyProvider.this.reclaimDeletesWeight);
+ if (reclaimDeletesWeight != TieredMergePolicyProvider.this.reclaimDeletesWeight) {
+ logger.info("updating [reclaim_deletes_weight] from [{}] to [{}]", TieredMergePolicyProvider.this.reclaimDeletesWeight, reclaimDeletesWeight);
+ TieredMergePolicyProvider.this.reclaimDeletesWeight = reclaimDeletesWeight;
+ for (CustomTieredMergePolicyProvider policy : policies) {
+ policy.setReclaimDeletesWeight(reclaimDeletesWeight);
+ }
+ }
+
+ final double noCFSRatio = parseNoCFSRatio(settings.get(INDEX_COMPOUND_FORMAT, Double.toString(TieredMergePolicyProvider.this.noCFSRatio)));
+ if (noCFSRatio != TieredMergePolicyProvider.this.noCFSRatio) {
+ logger.info("updating index.compound_format from [{}] to [{}]", formatNoCFSRatio(TieredMergePolicyProvider.this.noCFSRatio), formatNoCFSRatio(noCFSRatio));
+ TieredMergePolicyProvider.this.noCFSRatio = noCFSRatio;
+ for (CustomTieredMergePolicyProvider policy : policies) {
+ policy.setNoCFSRatio(noCFSRatio);
+ }
+ }
+
+ fixSettingsIfNeeded();
+ }
+ }
+
+ public static class CustomTieredMergePolicyProvider extends TieredMergePolicy {
+
+ private final TieredMergePolicyProvider provider;
+
+ public CustomTieredMergePolicyProvider(TieredMergePolicyProvider provider) {
+ super();
+ this.provider = provider;
+ }
+
+ @Override
+ public void close() {
+ super.close();
+ provider.policies.remove(this);
+ }
+
+ @Override
+ public MergePolicy clone() {
+ // Lucene IW makes a clone internally but since we hold on to this instance
+ // the clone will just be the identity.
+ return this;
+ }
+ }
+
+ public static class EnableMergeTieredMergePolicyProvider extends CustomTieredMergePolicyProvider {
+
+ public EnableMergeTieredMergePolicyProvider(TieredMergePolicyProvider provider) {
+ super(provider);
+ }
+
+ @Override
+ public MergePolicy.MergeSpecification findMerges(MergeTrigger trigger, SegmentInfos infos) throws IOException {
+ // we don't enable merges while indexing documents, we do them in the background
+ if (trigger == MergeTrigger.SEGMENT_FLUSH) {
+ return null;
+ }
+ return super.findMerges(trigger, infos);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java b/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java
new file mode 100644
index 0000000..7eb498d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.scheduler;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.MergeScheduler;
+import org.apache.lucene.index.TrackingConcurrentMergeScheduler;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.merge.OnGoingMerge;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+/**
+ *
+ */
+public class ConcurrentMergeSchedulerProvider extends MergeSchedulerProvider {
+
+ private final int maxThreadCount;
+ private final int maxMergeCount;
+
+ private Set<CustomConcurrentMergeScheduler> schedulers = new CopyOnWriteArraySet<CustomConcurrentMergeScheduler>();
+
+ @Inject
+ public ConcurrentMergeSchedulerProvider(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool) {
+ super(shardId, indexSettings, threadPool);
+
+ // TODO LUCENE MONITOR this will change in Lucene 4.0
+ this.maxThreadCount = componentSettings.getAsInt("max_thread_count", Math.max(1, Math.min(3, Runtime.getRuntime().availableProcessors() / 2)));
+ this.maxMergeCount = componentSettings.getAsInt("max_merge_count", maxThreadCount + 2);
+ logger.debug("using [concurrent] merge scheduler with max_thread_count[{}]", maxThreadCount);
+ }
+
+ @Override
+ public MergeScheduler buildMergeScheduler() {
+ CustomConcurrentMergeScheduler concurrentMergeScheduler = new CustomConcurrentMergeScheduler(logger, shardId, this);
+ concurrentMergeScheduler.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
+ schedulers.add(concurrentMergeScheduler);
+ return concurrentMergeScheduler;
+ }
+
+ @Override
+ public MergeStats stats() {
+ MergeStats mergeStats = new MergeStats();
+ for (CustomConcurrentMergeScheduler scheduler : schedulers) {
+ mergeStats.add(scheduler.totalMerges(), scheduler.totalMergeTime(), scheduler.totalMergeNumDocs(), scheduler.totalMergeSizeInBytes(),
+ scheduler.currentMerges(), scheduler.currentMergesNumDocs(), scheduler.currentMergesSizeInBytes());
+ }
+ return mergeStats;
+ }
+
+ @Override
+ public Set<OnGoingMerge> onGoingMerges() {
+ for (CustomConcurrentMergeScheduler scheduler : schedulers) {
+ return scheduler.onGoingMerges();
+ }
+ return ImmutableSet.of();
+ }
+
+ public static class CustomConcurrentMergeScheduler extends TrackingConcurrentMergeScheduler {
+
+ private final ShardId shardId;
+
+ private final ConcurrentMergeSchedulerProvider provider;
+
+ private CustomConcurrentMergeScheduler(ESLogger logger, ShardId shardId, ConcurrentMergeSchedulerProvider provider) {
+ super(logger);
+ this.shardId = shardId;
+ this.provider = provider;
+ }
+
+ @Override
+ protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
+ MergeThread thread = super.getMergeThread(writer, merge);
+ thread.setName(EsExecutors.threadName(provider.indexSettings(), "[" + shardId.index().name() + "][" + shardId.id() + "]: " + thread.getName()));
+ return thread;
+ }
+
+ @Override
+ protected void handleMergeException(Throwable exc) {
+ logger.warn("failed to merge", exc);
+ provider.failedMerge(new MergePolicy.MergeException(exc, dir));
+ super.handleMergeException(exc);
+ }
+
+ @Override
+ public void close() {
+ super.close();
+ provider.schedulers.remove(this);
+ }
+
+ @Override
+ protected void beforeMerge(OnGoingMerge merge) {
+ super.beforeMerge(merge);
+ provider.beforeMerge(merge);
+ }
+
+ @Override
+ protected void afterMerge(OnGoingMerge merge) {
+ super.afterMerge(merge);
+ provider.afterMerge(merge);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerModule.java b/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerModule.java
new file mode 100644
index 0000000..01cf42c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerModule.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.scheduler;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class MergeSchedulerModule extends AbstractModule {
+
+ public static final String MERGE_SCHEDULER_TYPE_KEY = "index.merge.scheduler.type";
+ public static final Class<? extends MergeSchedulerProvider> DEFAULT = ConcurrentMergeSchedulerProvider.class;
+
+ private final Settings settings;
+
+ public MergeSchedulerModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(MergeSchedulerProvider.class)
+ .to(settings.getAsClass(MERGE_SCHEDULER_TYPE_KEY, DEFAULT, "org.elasticsearch.index.merge.scheduler.", "MergeSchedulerProvider"))
+ .asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerProvider.java b/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerProvider.java
new file mode 100644
index 0000000..2a3c062
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/scheduler/MergeSchedulerProvider.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.scheduler;
+
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.MergeScheduler;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.merge.EnableMergeScheduler;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.merge.OnGoingMerge;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.IndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ *
+ */
+public abstract class MergeSchedulerProvider extends AbstractIndexShardComponent implements IndexShardComponent {
+
+ public static final String FORCE_ASYNC_MERGE = "index.merge.force_async_merge";
+
+ public static interface FailureListener {
+ void onFailedMerge(MergePolicy.MergeException e);
+ }
+
+ /**
+ * Listener for events before/after single merges. Called on the merge thread.
+ */
+ public static interface Listener {
+
+ /**
+ * A callback before a merge is going to execute. Note, any logic here will block the merge
+ * till its done.
+ */
+ void beforeMerge(OnGoingMerge merge);
+
+ /**
+ * A callback after a merge is going to execute. Note, any logic here will block the merge
+ * thread.
+ */
+ void afterMerge(OnGoingMerge merge);
+ }
+
+ private final ThreadPool threadPool;
+ private final CopyOnWriteArrayList<FailureListener> failureListeners = new CopyOnWriteArrayList<FailureListener>();
+ private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<Listener>();
+
+ private final boolean notifyOnMergeFailure;
+
+ protected MergeSchedulerProvider(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool) {
+ super(shardId, indexSettings);
+ this.threadPool = threadPool;
+ this.notifyOnMergeFailure = componentSettings.getAsBoolean("notify_on_failure", true);
+ }
+
+ public void addFailureListener(FailureListener listener) {
+ failureListeners.add(listener);
+ }
+
+ public void addListener(Listener listener) {
+ listeners.add(listener);
+ }
+
+ public void removeListener(Listener listener) {
+ listeners.remove(listener);
+ }
+
+ protected void failedMerge(final MergePolicy.MergeException e) {
+ if (!notifyOnMergeFailure) {
+ return;
+ }
+ for (final FailureListener failureListener : failureListeners) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ failureListener.onFailedMerge(e);
+ }
+ });
+ }
+ }
+
+ protected void beforeMerge(OnGoingMerge merge) {
+ for (Listener listener : listeners) {
+ listener.beforeMerge(merge);
+ }
+ }
+
+ protected void afterMerge(OnGoingMerge merge) {
+ for (Listener listener : listeners) {
+ listener.afterMerge(merge);
+ }
+ }
+
+ public final MergeScheduler newMergeScheduler() {
+ MergeScheduler scheduler = buildMergeScheduler();
+ // an internal settings, that would allow us to disable this behavior if really needed
+ if (indexSettings.getAsBoolean(FORCE_ASYNC_MERGE, true)) {
+ scheduler = new EnableMergeScheduler(scheduler);
+ }
+ return scheduler;
+ }
+
+ protected abstract MergeScheduler buildMergeScheduler();
+
+ public abstract MergeStats stats();
+
+ public abstract Set<OnGoingMerge> onGoingMerges();
+}
diff --git a/src/main/java/org/elasticsearch/index/merge/scheduler/SerialMergeSchedulerProvider.java b/src/main/java/org/elasticsearch/index/merge/scheduler/SerialMergeSchedulerProvider.java
new file mode 100644
index 0000000..f78a413
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/merge/scheduler/SerialMergeSchedulerProvider.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.scheduler;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.*;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.merge.OnGoingMerge;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+/**
+ *
+ */
+public class SerialMergeSchedulerProvider extends MergeSchedulerProvider {
+
+ private Set<CustomSerialMergeScheduler> schedulers = new CopyOnWriteArraySet<CustomSerialMergeScheduler>();
+
+ @Inject
+ public SerialMergeSchedulerProvider(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool) {
+ super(shardId, indexSettings, threadPool);
+ logger.trace("using [serial] merge scheduler");
+ }
+
+ @Override
+ public MergeScheduler buildMergeScheduler() {
+ CustomSerialMergeScheduler scheduler = new CustomSerialMergeScheduler(logger, this);
+ schedulers.add(scheduler);
+ return scheduler;
+ }
+
+ @Override
+ public MergeStats stats() {
+ MergeStats mergeStats = new MergeStats();
+ for (CustomSerialMergeScheduler scheduler : schedulers) {
+ mergeStats.add(scheduler.totalMerges(), scheduler.totalMergeTime(), scheduler.totalMergeNumDocs(), scheduler.totalMergeSizeInBytes(),
+ scheduler.currentMerges(), scheduler.currentMergesNumDocs(), scheduler.currentMergesSizeInBytes());
+ }
+ return mergeStats;
+ }
+
+ @Override
+ public Set<OnGoingMerge> onGoingMerges() {
+ for (CustomSerialMergeScheduler scheduler : schedulers) {
+ return scheduler.onGoingMerges();
+ }
+ return ImmutableSet.of();
+ }
+
+ public static class CustomSerialMergeScheduler extends TrackingSerialMergeScheduler {
+
+ private final SerialMergeSchedulerProvider provider;
+
+ public CustomSerialMergeScheduler(ESLogger logger, SerialMergeSchedulerProvider provider) {
+ super(logger);
+ this.provider = provider;
+ }
+
+ @Override
+ public void merge(IndexWriter writer) throws CorruptIndexException, IOException {
+ try {
+ super.merge(writer);
+ } catch (Throwable e) {
+ logger.warn("failed to merge", e);
+ provider.failedMerge(new MergePolicy.MergeException(e, writer.getDirectory()));
+ throw new MergePolicy.MergeException(e, writer.getDirectory());
+ }
+ }
+
+ @Override
+ public void close() {
+ super.close();
+ provider.schedulers.remove(this);
+ }
+
+ @Override
+ protected void beforeMerge(OnGoingMerge merge) {
+ super.beforeMerge(merge);
+ provider.beforeMerge(merge);
+ }
+
+ @Override
+ protected void afterMerge(OnGoingMerge merge) {
+ super.afterMerge(merge);
+ provider.afterMerge(merge);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java b/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java
new file mode 100644
index 0000000..462b910
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.percolator;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+
+/**
+ * Exception during indexing a percolator query.
+ */
+public class PercolatorException extends IndexException {
+
+ public PercolatorException(Index index, String msg) {
+ super(index, msg);
+ }
+
+ public PercolatorException(Index index, String msg, Throwable cause) {
+ super(index, msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java
new file mode 100644
index 0000000..a7b8805
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java
@@ -0,0 +1,337 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.percolator;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.indexing.IndexingOperationListener;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentTypeListener;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.percolator.stats.ShardPercolateService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesLifecycle;
+import org.elasticsearch.percolator.PercolatorService;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Each shard will have a percolator registry even if there isn't a {@link PercolatorService#TYPE_NAME} document type in the index.
+ * For shards with indices that have no {@link PercolatorService#TYPE_NAME} document type, this will hold no percolate queries.
+ * <p/>
+ * Once a document type has been created, the real-time percolator will start to listen to write events and update the
+ * this registry with queries in real time.
+ */
+public class PercolatorQueriesRegistry extends AbstractIndexShardComponent {
+
+ // This is a shard level service, but these below are index level service:
+ private final IndexQueryParserService queryParserService;
+ private final MapperService mapperService;
+ private final IndicesLifecycle indicesLifecycle;
+ private final IndexCache indexCache;
+ private final IndexFieldDataService indexFieldDataService;
+
+ private final ShardIndexingService indexingService;
+ private final ShardPercolateService shardPercolateService;
+
+ private final ConcurrentMap<HashedBytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
+ private final ShardLifecycleListener shardLifecycleListener = new ShardLifecycleListener();
+ private final RealTimePercolatorOperationListener realTimePercolatorOperationListener = new RealTimePercolatorOperationListener();
+ private final PercolateTypeListener percolateTypeListener = new PercolateTypeListener();
+ private final AtomicBoolean realTimePercolatorEnabled = new AtomicBoolean(false);
+
+ @Inject
+ public PercolatorQueriesRegistry(ShardId shardId, @IndexSettings Settings indexSettings, IndexQueryParserService queryParserService,
+ ShardIndexingService indexingService, IndicesLifecycle indicesLifecycle, MapperService mapperService,
+ IndexCache indexCache, IndexFieldDataService indexFieldDataService, ShardPercolateService shardPercolateService) {
+ super(shardId, indexSettings);
+ this.queryParserService = queryParserService;
+ this.mapperService = mapperService;
+ this.indicesLifecycle = indicesLifecycle;
+ this.indexingService = indexingService;
+ this.indexCache = indexCache;
+ this.indexFieldDataService = indexFieldDataService;
+ this.shardPercolateService = shardPercolateService;
+
+ indicesLifecycle.addListener(shardLifecycleListener);
+ mapperService.addTypeListener(percolateTypeListener);
+ }
+
+ public ConcurrentMap<HashedBytesRef, Query> percolateQueries() {
+ return percolateQueries;
+ }
+
+ public void close() {
+ mapperService.removeTypeListener(percolateTypeListener);
+ indicesLifecycle.removeListener(shardLifecycleListener);
+ indexingService.removeListener(realTimePercolatorOperationListener);
+ clear();
+ }
+
+ public void clear() {
+ percolateQueries.clear();
+ }
+
+ void enableRealTimePercolator() {
+ if (realTimePercolatorEnabled.compareAndSet(false, true)) {
+ indexingService.addListener(realTimePercolatorOperationListener);
+ }
+ }
+
+ void disableRealTimePercolator() {
+ if (realTimePercolatorEnabled.compareAndSet(true, false)) {
+ indexingService.removeListener(realTimePercolatorOperationListener);
+ }
+ }
+
+ public void addPercolateQuery(String idAsString, BytesReference source) {
+ Query newquery = parsePercolatorDocument(idAsString, source);
+ HashedBytesRef id = new HashedBytesRef(new BytesRef(idAsString));
+ Query previousQuery = percolateQueries.put(id, newquery);
+ shardPercolateService.addedQuery(id, previousQuery, newquery);
+ }
+
+ public void removePercolateQuery(String idAsString) {
+ HashedBytesRef id = new HashedBytesRef(idAsString);
+ Query query = percolateQueries.remove(id);
+ if (query != null) {
+ shardPercolateService.removedQuery(id, query);
+ }
+ }
+
+ Query parsePercolatorDocument(String id, BytesReference source) {
+ String type = null;
+ BytesReference querySource = null;
+
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(source);
+ String currentFieldName = null;
+ XContentParser.Token token = parser.nextToken(); // move the START_OBJECT
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchException("failed to parse query [" + id + "], not starting with OBJECT");
+ }
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ if (type != null) {
+ return parseQuery(type, null, parser);
+ } else {
+ XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
+ builder.copyCurrentStructure(parser);
+ querySource = builder.bytes();
+ builder.close();
+ }
+ } else {
+ parser.skipChildren();
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ parser.skipChildren();
+ } else if (token.isValue()) {
+ if ("type".equals(currentFieldName)) {
+ type = parser.text();
+ }
+ }
+ }
+ return parseQuery(type, querySource, null);
+ } catch (Exception e) {
+ throw new PercolatorException(shardId().index(), "failed to parse query [" + id + "]", e);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ private Query parseQuery(String type, BytesReference querySource, XContentParser parser) {
+ if (type == null) {
+ if (parser != null) {
+ return queryParserService.parse(parser).query();
+ } else {
+ return queryParserService.parse(querySource).query();
+ }
+ }
+
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{type});
+ try {
+ if (parser != null) {
+ return queryParserService.parse(parser).query();
+ } else {
+ return queryParserService.parse(querySource).query();
+ }
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ }
+
+ private class PercolateTypeListener implements DocumentTypeListener {
+
+ @Override
+ public void beforeCreate(DocumentMapper mapper) {
+ if (PercolatorService.TYPE_NAME.equals(mapper.type())) {
+ enableRealTimePercolator();
+ }
+ }
+
+ @Override
+ public void afterRemove(DocumentMapper mapper) {
+ if (PercolatorService.TYPE_NAME.equals(mapper.type())) {
+ disableRealTimePercolator();
+ clear();
+ }
+ }
+
+ }
+
+ private class ShardLifecycleListener extends IndicesLifecycle.Listener {
+
+ @Override
+ public void afterIndexShardCreated(IndexShard indexShard) {
+ if (hasPercolatorType(indexShard)) {
+ enableRealTimePercolator();
+ }
+ }
+
+ @Override
+ public void afterIndexShardPostRecovery(IndexShard indexShard) {
+ if (hasPercolatorType(indexShard)) {
+ // percolator index has started, fetch what we can from it and initialize the indices
+ // we have
+ logger.debug("loading percolator queries for index [{}] and shard[{}]...", shardId.index(), shardId.id());
+ loadQueries(indexShard);
+ logger.trace("done loading percolator queries for index [{}] and shard[{}]", shardId.index(), shardId.id());
+ }
+ }
+
+ private boolean hasPercolatorType(IndexShard indexShard) {
+ ShardId otherShardId = indexShard.shardId();
+ return shardId.equals(otherShardId) && mapperService.hasMapping(PercolatorService.TYPE_NAME);
+ }
+
+ private void loadQueries(IndexShard shard) {
+ try {
+ shard.refresh(new Engine.Refresh("percolator_load_queries").force(true));
+ // Maybe add a mode load? This isn't really a write. We need write b/c state=post_recovery
+ Engine.Searcher searcher = shard.acquireSearcher("percolator_load_queries", IndexShard.Mode.WRITE);
+ try {
+ Query query = new XConstantScoreQuery(
+ indexCache.filter().cache(
+ new TermFilter(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME))
+ )
+ );
+ QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
+ searcher.searcher().search(query, queryCollector);
+ Map<HashedBytesRef, Query> queries = queryCollector.queries();
+ for (Map.Entry<HashedBytesRef, Query> entry : queries.entrySet()) {
+ Query previousQuery = percolateQueries.put(entry.getKey(), entry.getValue());
+ shardPercolateService.addedQuery(entry.getKey(), previousQuery, entry.getValue());
+ }
+ } finally {
+ searcher.release();
+ }
+ } catch (Exception e) {
+ throw new PercolatorException(shardId.index(), "failed to load queries from percolator index", e);
+ }
+ }
+
+ }
+
+ private class RealTimePercolatorOperationListener extends IndexingOperationListener {
+
+ @Override
+ public Engine.Create preCreate(Engine.Create create) {
+ // validate the query here, before we index
+ if (PercolatorService.TYPE_NAME.equals(create.type())) {
+ parsePercolatorDocument(create.id(), create.source());
+ }
+ return create;
+ }
+
+ @Override
+ public void postCreateUnderLock(Engine.Create create) {
+ // add the query under a doc lock
+ if (PercolatorService.TYPE_NAME.equals(create.type())) {
+ addPercolateQuery(create.id(), create.source());
+ }
+ }
+
+ @Override
+ public Engine.Index preIndex(Engine.Index index) {
+ // validate the query here, before we index
+ if (PercolatorService.TYPE_NAME.equals(index.type())) {
+ parsePercolatorDocument(index.id(), index.source());
+ }
+ return index;
+ }
+
+ @Override
+ public void postIndexUnderLock(Engine.Index index) {
+ // add the query under a doc lock
+ if (PercolatorService.TYPE_NAME.equals(index.type())) {
+ addPercolateQuery(index.id(), index.source());
+ }
+ }
+
+ @Override
+ public void postDeleteUnderLock(Engine.Delete delete) {
+ // remove the query under a lock
+ if (PercolatorService.TYPE_NAME.equals(delete.type())) {
+ removePercolateQuery(delete.id());
+ }
+ }
+
+ // Updating the live percolate queries for a delete by query is tricky with the current way delete by queries
+ // are handled. It is only possible if we put a big lock around the post delete by query hook...
+
+ // If we implement delete by query, that just runs a query and generates delete operations in a bulk, then
+ // updating the live percolator is automatically supported for delete by query.
+// @Override
+// public void postDeleteByQuery(Engine.DeleteByQuery deleteByQuery) {
+// }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/percolator/PercolatorShardModule.java b/src/main/java/org/elasticsearch/index/percolator/PercolatorShardModule.java
new file mode 100644
index 0000000..aba7e10
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/percolator/PercolatorShardModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.percolator;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.percolator.stats.ShardPercolateService;
+
+/**
+ *
+ */
+public class PercolatorShardModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(PercolatorQueriesRegistry.class).asEagerSingleton();
+ bind(ShardPercolateService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java b/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java
new file mode 100644
index 0000000..37b8152
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.percolator;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fieldvisitor.JustSourceFieldsVisitor;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ */
+final class QueriesLoaderCollector extends Collector {
+
+ private final Map<HashedBytesRef, Query> queries = Maps.newHashMap();
+ private final JustSourceFieldsVisitor fieldsVisitor = new JustSourceFieldsVisitor();
+ private final PercolatorQueriesRegistry percolator;
+ private final IndexFieldData idFieldData;
+ private final ESLogger logger;
+
+ private BytesValues idValues;
+ private AtomicReader reader;
+
+ QueriesLoaderCollector(PercolatorQueriesRegistry percolator, ESLogger logger, MapperService mapperService, IndexFieldDataService indexFieldDataService) {
+ this.percolator = percolator;
+ this.logger = logger;
+ final FieldMapper<?> idMapper = mapperService.smartNameFieldMapper(IdFieldMapper.NAME);
+ this.idFieldData = indexFieldDataService.getForField(idMapper);
+ }
+
+ public Map<HashedBytesRef, Query> queries() {
+ return this.queries;
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ // the _source is the query
+
+ if (idValues.setDocument(doc) > 0) {
+ BytesRef id = idValues.nextValue();
+ fieldsVisitor.reset();
+ reader.document(doc, fieldsVisitor);
+
+ try {
+ // id is only used for logging, if we fail we log the id in the catch statement
+ final Query parseQuery = percolator.parsePercolatorDocument(null, fieldsVisitor.source());
+ if (parseQuery != null) {
+ queries.put(new HashedBytesRef(idValues.copyShared(), idValues.currentValueHash()), parseQuery);
+ } else {
+ logger.warn("failed to add query [{}] - parser returned null", id);
+ }
+
+ } catch (Exception e) {
+ logger.warn("failed to add query [{}]", e, id.utf8ToString());
+ }
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ reader = context.reader();
+ idValues = idFieldData.load(context).getBytesValues(true);
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/percolator/stats/PercolateStats.java b/src/main/java/org/elasticsearch/index/percolator/stats/PercolateStats.java
new file mode 100644
index 0000000..e38092b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/percolator/stats/PercolateStats.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.percolator.stats;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ * Exposes percolator related statistics.
+ */
+public class PercolateStats implements Streamable, ToXContent {
+
+ private long percolateCount;
+ private long percolateTimeInMillis;
+ private long current;
+ private long memorySizeInBytes = -1;
+ private long numQueries;
+
+ /**
+ * Noop constructor for serialazation purposes.
+ */
+ public PercolateStats() {
+ }
+
+ PercolateStats(long percolateCount, long percolateTimeInMillis, long current, long memorySizeInBytes, long numQueries) {
+ this.percolateCount = percolateCount;
+ this.percolateTimeInMillis = percolateTimeInMillis;
+ this.current = current;
+ this.memorySizeInBytes = memorySizeInBytes;
+ this.numQueries = numQueries;
+ }
+
+ /**
+ * @return The number of times the percolate api has been invoked.
+ */
+ public long getCount() {
+ return percolateCount;
+ }
+
+ /**
+ * @return The total amount of time spend in the percolate api
+ */
+ public long getTimeInMillis() {
+ return percolateTimeInMillis;
+ }
+
+ /**
+ * @return The total amount of time spend in the percolate api
+ */
+ public TimeValue getTime() {
+ return new TimeValue(getTimeInMillis());
+ }
+
+ /**
+ * @return The total amount of active percolate api invocations.
+ */
+ public long getCurrent() {
+ return current;
+ }
+
+ /**
+ * @return The total number of loaded percolate queries.
+ */
+ public long getNumQueries() {
+ return numQueries;
+ }
+
+ /**
+ * @return Temporarily returns <code>-1</code>, but this used to return the total size the loaded queries take in
+ * memory, but this is disabled now because the size estimation was too expensive cpu wise. This will be enabled
+ * again when a cheaper size estimation can be found.
+ */
+ public long getMemorySizeInBytes() {
+ return memorySizeInBytes;
+ }
+
+ /**
+ * @return The total size the loaded queries take in memory.
+ */
+ public ByteSizeValue getMemorySize() {
+ return new ByteSizeValue(memorySizeInBytes);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.PERCOLATE);
+ builder.field(Fields.TOTAL, percolateCount);
+ builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, percolateTimeInMillis);
+ builder.field(Fields.CURRENT, current);
+ builder.field(Fields.MEMORY_SIZE_IN_BYTES, memorySizeInBytes);
+ builder.field(Fields.MEMORY_SIZE, getMemorySize());
+ builder.field(Fields.QUERIES, getNumQueries());
+ builder.endObject();
+ return builder;
+ }
+
+ public void add(PercolateStats percolate) {
+ if (percolate == null) {
+ return;
+ }
+
+ percolateCount += percolate.getCount();
+ percolateTimeInMillis += percolate.getTimeInMillis();
+ current += percolate.getCurrent();
+ numQueries += percolate.getNumQueries();
+ }
+
+ static final class Fields {
+ static final XContentBuilderString PERCOLATE = new XContentBuilderString("percolate");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TIME = new XContentBuilderString("getTime");
+ static final XContentBuilderString TIME_IN_MILLIS = new XContentBuilderString("time_in_millis");
+ static final XContentBuilderString CURRENT = new XContentBuilderString("current");
+ static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
+ static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
+ static final XContentBuilderString QUERIES = new XContentBuilderString("queries");
+ }
+
+ public static PercolateStats readPercolateStats(StreamInput in) throws IOException {
+ PercolateStats stats = new PercolateStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ percolateCount = in.readVLong();
+ percolateTimeInMillis = in.readVLong();
+ current = in.readVLong();
+ if (in.getVersion().before(Version.V_1_0_2)) {
+ in.readVLong();
+ } else {
+ in.readLong();
+ }
+ numQueries = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(percolateCount);
+ out.writeVLong(percolateTimeInMillis);
+ out.writeVLong(current);
+ if (out.getVersion().before(Version.V_1_0_2)) {
+ out.writeVLong(0);
+ } else {
+ out.writeLong(-1);
+ }
+ out.writeVLong(numQueries);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/percolator/stats/ShardPercolateService.java b/src/main/java/org/elasticsearch/index/percolator/stats/ShardPercolateService.java
new file mode 100644
index 0000000..e60a37c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/percolator/stats/ShardPercolateService.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.percolator.stats;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.metrics.MeanMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Shard level percolator service that maintains percolator metrics:
+ * <ul>
+ * <li> total time spent in percolate api
+ * <li> the current number of percolate requests
+ * <li> number of registered percolate queries
+ * </ul>
+ */
+public class ShardPercolateService extends AbstractIndexShardComponent {
+
+ @Inject
+ public ShardPercolateService(ShardId shardId, @IndexSettings Settings indexSettings) {
+ super(shardId, indexSettings);
+ }
+
+ private final MeanMetric percolateMetric = new MeanMetric();
+ private final CounterMetric currentMetric = new CounterMetric();
+
+ private final CounterMetric numberOfQueries = new CounterMetric();
+
+ public void prePercolate() {
+ currentMetric.inc();
+ }
+
+ public void postPercolate(long tookInNanos) {
+ currentMetric.dec();
+ percolateMetric.inc(tookInNanos);
+ }
+
+ public void addedQuery(HashedBytesRef id, Query previousQuery, Query newQuery) {
+ numberOfQueries.inc();
+ }
+
+ public void removedQuery(HashedBytesRef id, Query query) {
+ numberOfQueries.dec();
+ }
+
+ /**
+ * @return The current metrics
+ */
+ public PercolateStats stats() {
+ return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), -1, numberOfQueries.count());
+ }
+
+ // Enable when a more efficient manner is found for estimating the size of a Lucene query.
+ /*private static long computeSizeInMemory(HashedBytesRef id, Query query) {
+ long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length;
+ size += RamEstimator.sizeOf(query);
+ return size;
+ }
+
+ private static final class RamEstimator {
+ // we move this into it's own class to exclude it from the forbidden API checks
+ // it's fine to use here!
+ static long sizeOf(Query query) {
+ return RamUsageEstimator.sizeOf(query);
+ }
+ }*/
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java
new file mode 100644
index 0000000..687fbad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A filter that matches documents matching boolean combinations of other filters.
+ *
+ *
+ */
+public class AndFilterBuilder extends BaseFilterBuilder {
+
+ private ArrayList<FilterBuilder> filters = Lists.newArrayList();
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ public AndFilterBuilder(FilterBuilder... filters) {
+ for (FilterBuilder filter : filters) {
+ this.filters.add(filter);
+ }
+ }
+
+ /**
+ * Adds a filter to the list of filters to "and".
+ */
+ public AndFilterBuilder add(FilterBuilder filterBuilder) {
+ filters.add(filterBuilder);
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public AndFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public AndFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public AndFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(AndFilterParser.NAME);
+ builder.startArray("filters");
+ for (FilterBuilder filter : filters) {
+ filter.toXContent(builder, params);
+ }
+ builder.endArray();
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/AndFilterParser.java b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java
new file mode 100644
index 0000000..edf0311
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class AndFilterParser implements FilterParser {
+
+ public static final String NAME = "and";
+
+ @Inject
+ public AndFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ ArrayList<Filter> filters = newArrayList();
+ boolean filtersFound = false;
+
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.START_ARRAY) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ filtersFound = true;
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ filters.add(filter);
+ }
+ }
+ } else {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("filters".equals(currentFieldName)) {
+ filtersFound = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ filters.add(filter);
+ }
+ }
+ } else {
+ filtersFound = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ filters.add(filter);
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[and] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ }
+
+ if (!filtersFound) {
+ throw new QueryParsingException(parseContext.index(), "[and] filter requires 'filters' to be set on it'");
+ }
+
+ if (filters.isEmpty()) {
+ // no filters provided, this should be ignored upstream
+ return null;
+ }
+
+ // no need to cache this one
+ Filter filter = new AndFilter(filters);
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/BaseFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/BaseFilterBuilder.java
new file mode 100644
index 0000000..ccd2b3a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/BaseFilterBuilder.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class BaseFilterBuilder implements FilterBuilder {
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.prettyPrint();
+ toXContent(builder, EMPTY_PARAMS);
+ return builder.string();
+ } catch (Exception e) {
+ throw new ElasticsearchException("Failed to build filter", e);
+ }
+ }
+
+ @Override
+ public BytesReference buildAsBytes() throws ElasticsearchException {
+ return buildAsBytes(XContentType.JSON);
+ }
+
+ @Override
+ public BytesReference buildAsBytes(XContentType contentType) throws ElasticsearchException {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ toXContent(builder, EMPTY_PARAMS);
+ return builder.bytes();
+ } catch (Exception e) {
+ throw new ElasticsearchException("Failed to build filter", e);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ doXContent(builder, params);
+ builder.endObject();
+ return builder;
+ }
+
+ protected abstract void doXContent(XContentBuilder builder, Params params) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/index/query/BaseQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/BaseQueryBuilder.java
new file mode 100644
index 0000000..6cb1315
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/BaseQueryBuilder.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class BaseQueryBuilder implements QueryBuilder {
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.prettyPrint();
+ toXContent(builder, EMPTY_PARAMS);
+ return builder.string();
+ } catch (Exception e) {
+ throw new ElasticsearchException("Failed to build query", e);
+ }
+ }
+
+ @Override
+ public BytesReference buildAsBytes() throws ElasticsearchException {
+ return buildAsBytes(XContentType.JSON);
+ }
+
+ @Override
+ public BytesReference buildAsBytes(XContentType contentType) throws ElasticsearchException {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ toXContent(builder, EMPTY_PARAMS);
+ return builder.bytes();
+ } catch (Exception e) {
+ throw new ElasticsearchException("Failed to build query", e);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ doXContent(builder, params);
+ builder.endObject();
+ return builder;
+ }
+
+ protected abstract void doXContent(XContentBuilder builder, Params params) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java
new file mode 100644
index 0000000..43b3dc6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.BooleanClause;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.ToXContent.Params;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A filter that matches documents matching boolean combinations of other filters.
+ *
+ *
+ */
+public class BoolFilterBuilder extends BaseFilterBuilder {
+
+ private ArrayList<FilterBuilder> mustClauses = new ArrayList<FilterBuilder>();
+
+ private ArrayList<FilterBuilder> mustNotClauses = new ArrayList<FilterBuilder>();
+
+ private ArrayList<FilterBuilder> shouldClauses = new ArrayList<FilterBuilder>();
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ /**
+ * Adds a filter that <b>must</b> appear in the matching documents.
+ */
+ public BoolFilterBuilder must(FilterBuilder filterBuilder) {
+ mustClauses.add(filterBuilder);
+ return this;
+ }
+
+ /**
+ * Adds a filter that <b>must not</b> appear in the matching documents.
+ */
+ public BoolFilterBuilder mustNot(FilterBuilder filterBuilder) {
+ mustNotClauses.add(filterBuilder);
+ return this;
+ }
+
+
+ /**
+ * Adds a filter that <i>should</i> appear in the matching documents. For a boolean filter
+ * with no <tt>MUST</tt> clauses one or more <code>SHOULD</code> clauses must match a document
+ * for the BooleanQuery to match.
+ */
+ public BoolFilterBuilder should(FilterBuilder filterBuilder) {
+ shouldClauses.add(filterBuilder);
+ return this;
+ }
+
+ /**
+ * Adds multiple <i>must</i> filters.
+ */
+ public BoolFilterBuilder must(FilterBuilder... filterBuilders) {
+ for (FilterBuilder fb : filterBuilders) {
+ mustClauses.add(fb);
+ }
+ return this;
+ }
+
+ /**
+ * Adds multiple <i>must not</i> filters.
+ */
+ public BoolFilterBuilder mustNot(FilterBuilder... filterBuilders) {
+ for (FilterBuilder fb : filterBuilders) {
+ mustNotClauses.add(fb);
+ }
+ return this;
+ }
+
+ /**
+ * Adds multiple <i>should</i> filters.
+ */
+ public BoolFilterBuilder should(FilterBuilder... filterBuilders) {
+ for (FilterBuilder fb : filterBuilders) {
+ shouldClauses.add(fb);
+ }
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public BoolFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public BoolFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public BoolFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("bool");
+ doXArrayContent("must", mustClauses, builder, params);
+ doXArrayContent("must_not", mustNotClauses, builder, params);
+ doXArrayContent("should", shouldClauses, builder, params);
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ builder.endObject();
+ }
+
+ private void doXArrayContent(String field, List<FilterBuilder> clauses, XContentBuilder builder, Params params) throws IOException {
+ if (clauses.isEmpty()) {
+ return;
+ }
+ if (clauses.size() == 1) {
+ builder.field(field);
+ clauses.get(0).toXContent(builder, params);
+ } else {
+ builder.startArray(field);
+ for (FilterBuilder clause : clauses) {
+ clause.toXContent(builder, params);
+ }
+ builder.endArray();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java
new file mode 100644
index 0000000..36ef4db
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.queries.FilterClause;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BoolFilterParser implements FilterParser {
+
+ public static final String NAME = "bool";
+
+ @Inject
+ public BoolFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XBooleanFilter boolFilter = new XBooleanFilter();
+
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ boolean hasAnyFilter = false;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("must".equals(currentFieldName)) {
+ hasAnyFilter = true;
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ boolFilter.add(new FilterClause(filter, BooleanClause.Occur.MUST));
+ }
+ } else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) {
+ hasAnyFilter = true;
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ boolFilter.add(new FilterClause(filter, BooleanClause.Occur.MUST_NOT));
+ }
+ } else if ("should".equals(currentFieldName)) {
+ hasAnyFilter = true;
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ boolFilter.add(new FilterClause(filter, BooleanClause.Occur.SHOULD));
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("must".equals(currentFieldName)) {
+ hasAnyFilter = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ boolFilter.add(new FilterClause(filter, BooleanClause.Occur.MUST));
+ }
+ }
+ } else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) {
+ hasAnyFilter = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ boolFilter.add(new FilterClause(filter, BooleanClause.Occur.MUST_NOT));
+ }
+ }
+ } else if ("should".equals(currentFieldName)) {
+ hasAnyFilter = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ boolFilter.add(new FilterClause(filter, BooleanClause.Occur.SHOULD));
+ }
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (!hasAnyFilter) {
+ throw new QueryParsingException(parseContext.index(), "[bool] filter has no inner should/must/must_not elements");
+ }
+
+ if (boolFilter.clauses().isEmpty()) {
+ // no filters provided, it should be ignored upstream
+ return null;
+ }
+
+ Filter filter = boolFilter;
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java
new file mode 100644
index 0000000..2155040
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A Query that matches documents matching boolean combinations of other queries.
+ */
+public class BoolQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<BoolQueryBuilder> {
+
+ private ArrayList<QueryBuilder> mustClauses = new ArrayList<QueryBuilder>();
+
+ private ArrayList<QueryBuilder> mustNotClauses = new ArrayList<QueryBuilder>();
+
+ private ArrayList<QueryBuilder> shouldClauses = new ArrayList<QueryBuilder>();
+
+ private float boost = -1;
+
+ private Boolean disableCoord;
+
+ private String minimumShouldMatch;
+
+ private Boolean adjustPureNegative;
+
+ private String queryName;
+
+ /**
+ * Adds a query that <b>must</b> appear in the matching documents.
+ */
+ public BoolQueryBuilder must(QueryBuilder queryBuilder) {
+ mustClauses.add(queryBuilder);
+ return this;
+ }
+
+ /**
+ * Adds a query that <b>must not</b> appear in the matching documents.
+ */
+ public BoolQueryBuilder mustNot(QueryBuilder queryBuilder) {
+ mustNotClauses.add(queryBuilder);
+ return this;
+ }
+
+ /**
+ * Adds a query that <i>should</i> appear in the matching documents. For a boolean query with no
+ * <tt>MUST</tt> clauses one or more <code>SHOULD</code> clauses must match a document
+ * for the BooleanQuery to match.
+ *
+ * @see #minimumNumberShouldMatch(int)
+ */
+ public BoolQueryBuilder should(QueryBuilder queryBuilder) {
+ shouldClauses.add(queryBuilder);
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public BoolQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Disables <tt>Similarity#coord(int,int)</tt> in scoring. Defualts to <tt>false</tt>.
+ */
+ public BoolQueryBuilder disableCoord(boolean disableCoord) {
+ this.disableCoord = disableCoord;
+ return this;
+ }
+
+ /**
+ * Specifies a minimum number of the optional (should) boolean clauses which must be satisfied.
+ * <p/>
+ * <p>By default no optional clauses are necessary for a match
+ * (unless there are no required clauses). If this method is used,
+ * then the specified number of clauses is required.
+ * <p/>
+ * <p>Use of this method is totally independent of specifying that
+ * any specific clauses are required (or prohibited). This number will
+ * only be compared against the number of matching optional clauses.
+ *
+ * @param minimumNumberShouldMatch the number of optional clauses that must match
+ */
+ public BoolQueryBuilder minimumNumberShouldMatch(int minimumNumberShouldMatch) {
+ this.minimumShouldMatch = Integer.toString(minimumNumberShouldMatch);
+ return this;
+ }
+
+ /**
+ * Sets the minimum should match using the special syntax (for example, supporting percentage).
+ */
+ public BoolQueryBuilder minimumShouldMatch(String minimumShouldMatch) {
+ this.minimumShouldMatch = minimumShouldMatch;
+ return this;
+ }
+
+ /**
+ * Return <code>true</code> if the query being built has no clause yet
+ */
+ public boolean hasClauses() {
+ return !mustClauses.isEmpty() || !mustNotClauses.isEmpty() || !shouldClauses.isEmpty();
+ }
+
+ /**
+ * If a boolean query contains only negative ("must not") clauses should the
+ * BooleanQuery be enhanced with a {@link MatchAllDocsQuery} in order to act
+ * as a pure exclude. The default is <code>true</code>.
+ */
+ public BoolQueryBuilder adjustPureNegative(boolean adjustPureNegative) {
+ this.adjustPureNegative = adjustPureNegative;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public BoolQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("bool");
+ doXArrayContent("must", mustClauses, builder, params);
+ doXArrayContent("must_not", mustNotClauses, builder, params);
+ doXArrayContent("should", shouldClauses, builder, params);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (disableCoord != null) {
+ builder.field("disable_coord", disableCoord);
+ }
+ if (minimumShouldMatch != null) {
+ builder.field("minimum_should_match", minimumShouldMatch);
+ }
+ if (adjustPureNegative != null) {
+ builder.field("adjust_pure_negative", adjustPureNegative);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+
+ private void doXArrayContent(String field, List<QueryBuilder> clauses, XContentBuilder builder, Params params) throws IOException {
+ if (clauses.isEmpty()) {
+ return;
+ }
+ if (clauses.size() == 1) {
+ builder.field(field);
+ clauses.get(0).toXContent(builder, params);
+ } else {
+ builder.startArray(field);
+ for (QueryBuilder clause : clauses) {
+ clause.toXContent(builder, params);
+ }
+ builder.endArray();
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java b/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java
new file mode 100644
index 0000000..c691c9e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
+import static org.elasticsearch.common.lucene.search.Queries.optimizeQuery;
+
+/**
+ *
+ */
+public class BoolQueryParser implements QueryParser {
+
+ public static final String NAME = "bool";
+
+ @Inject
+ public BoolQueryParser(Settings settings) {
+ BooleanQuery.setMaxClauseCount(settings.getAsInt("index.query.bool.max_clause_count", settings.getAsInt("indices.query.bool.max_clause_count", BooleanQuery.getMaxClauseCount())));
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ boolean disableCoord = false;
+ float boost = 1.0f;
+ String minimumShouldMatch = null;
+
+ List<BooleanClause> clauses = newArrayList();
+ boolean adjustPureNegative = true;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("must".equals(currentFieldName)) {
+ Query query = parseContext.parseInnerQuery();
+ if (query != null) {
+ clauses.add(new BooleanClause(query, BooleanClause.Occur.MUST));
+ }
+ } else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) {
+ Query query = parseContext.parseInnerQuery();
+ if (query != null) {
+ clauses.add(new BooleanClause(query, BooleanClause.Occur.MUST_NOT));
+ }
+ } else if ("should".equals(currentFieldName)) {
+ Query query = parseContext.parseInnerQuery();
+ if (query != null) {
+ clauses.add(new BooleanClause(query, BooleanClause.Occur.SHOULD));
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[bool] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("must".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Query query = parseContext.parseInnerQuery();
+ if (query != null) {
+ clauses.add(new BooleanClause(query, BooleanClause.Occur.MUST));
+ }
+ }
+ } else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Query query = parseContext.parseInnerQuery();
+ if (query != null) {
+ clauses.add(new BooleanClause(query, BooleanClause.Occur.MUST_NOT));
+ }
+ }
+ } else if ("should".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Query query = parseContext.parseInnerQuery();
+ if (query != null) {
+ clauses.add(new BooleanClause(query, BooleanClause.Occur.SHOULD));
+ }
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "bool query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) {
+ disableCoord = parser.booleanValue();
+ } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
+ minimumShouldMatch = parser.textOrNull();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("minimum_number_should_match".equals(currentFieldName) || "minimumNumberShouldMatch".equals(currentFieldName)) {
+ minimumShouldMatch = parser.textOrNull();
+ } else if ("adjust_pure_negative".equals(currentFieldName) || "adjustPureNegative".equals(currentFieldName)) {
+ adjustPureNegative = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[bool] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (clauses.isEmpty()) {
+ return null;
+ }
+
+ BooleanQuery booleanQuery = new BooleanQuery(disableCoord);
+ for (BooleanClause clause : clauses) {
+ booleanQuery.add(clause);
+ }
+ booleanQuery.setBoost(boost);
+ Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch);
+ Query query = optimizeQuery(adjustPureNegative ? fixNegativeQueryIfNeeded(booleanQuery) : booleanQuery);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/BoostableQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/BoostableQueryBuilder.java
new file mode 100644
index 0000000..2733438
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/BoostableQueryBuilder.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+/**
+ * Query builder which allow setting some boost
+ */
+public interface BoostableQueryBuilder<B extends BoostableQueryBuilder<B>> {
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public B boost(float boost);
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java
new file mode 100644
index 0000000..0886046
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * The BoostingQuery class can be used to effectively demote results that match a given query.
+ * Unlike the "NOT" clause, this still selects documents that contain undesirable terms,
+ * but reduces their overall score:
+ * <p/>
+ * Query balancedQuery = new BoostingQuery(positiveQuery, negativeQuery, 0.01f);
+ * In this scenario the positiveQuery contains the mandatory, desirable criteria which is used to
+ * select all matching documents, and the negativeQuery contains the undesirable elements which
+ * are simply used to lessen the scores. Documents that match the negativeQuery have their score
+ * multiplied by the supplied "boost" parameter, so this should be less than 1 to achieve a
+ * demoting effect
+ */
+public class BoostingQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<BoostingQueryBuilder> {
+
+ private QueryBuilder positiveQuery;
+
+ private QueryBuilder negativeQuery;
+
+ private float negativeBoost = -1;
+
+ private float boost = -1;
+
+ public BoostingQueryBuilder() {
+
+ }
+
+ public BoostingQueryBuilder positive(QueryBuilder positiveQuery) {
+ this.positiveQuery = positiveQuery;
+ return this;
+ }
+
+ public BoostingQueryBuilder negative(QueryBuilder negativeQuery) {
+ this.negativeQuery = negativeQuery;
+ return this;
+ }
+
+ public BoostingQueryBuilder negativeBoost(float negativeBoost) {
+ this.negativeBoost = negativeBoost;
+ return this;
+ }
+
+ public BoostingQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ if (positiveQuery == null) {
+ throw new ElasticsearchIllegalArgumentException("boosting query requires positive query to be set");
+ }
+ if (negativeQuery == null) {
+ throw new ElasticsearchIllegalArgumentException("boosting query requires negative query to be set");
+ }
+ if (negativeBoost == -1) {
+ throw new ElasticsearchIllegalArgumentException("boosting query requires negativeBoost to be set");
+ }
+ builder.startObject(BoostingQueryParser.NAME);
+ builder.field("positive");
+ positiveQuery.toXContent(builder, params);
+ builder.field("negative");
+ negativeQuery.toXContent(builder, params);
+
+ builder.field("negative_boost", negativeBoost);
+
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java b/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java
new file mode 100644
index 0000000..a117256
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.queries.BoostingQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BoostingQueryParser implements QueryParser {
+
+ public static final String NAME = "boosting";
+
+ @Inject
+ public BoostingQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query positiveQuery = null;
+ boolean positiveQueryFound = false;
+ Query negativeQuery = null;
+ boolean negativeQueryFound = false;
+ float boost = -1;
+ float negativeBoost = -1;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("positive".equals(currentFieldName)) {
+ positiveQuery = parseContext.parseInnerQuery();
+ positiveQueryFound = true;
+ } else if ("negative".equals(currentFieldName)) {
+ negativeQuery = parseContext.parseInnerQuery();
+ negativeQueryFound = true;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[boosting] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("negative_boost".equals(currentFieldName) || "negativeBoost".equals(currentFieldName)) {
+ negativeBoost = parser.floatValue();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[boosting] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (positiveQuery == null && !positiveQueryFound) {
+ throw new QueryParsingException(parseContext.index(), "[boosting] query requires 'positive' query to be set'");
+ }
+ if (negativeQuery == null && !negativeQueryFound) {
+ throw new QueryParsingException(parseContext.index(), "[boosting] query requires 'negative' query to be set'");
+ }
+ if (negativeBoost == -1) {
+ throw new QueryParsingException(parseContext.index(), "[boosting] query requires 'negative_boost' to be set'");
+ }
+
+ // parsers returned null
+ if (positiveQuery == null || negativeQuery == null) {
+ return null;
+ }
+
+ BoostingQuery boostingQuery = new BoostingQuery(positiveQuery, negativeQuery, negativeBoost);
+ if (boost != -1) {
+ boostingQuery.setBoost(boost);
+ }
+ return boostingQuery;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java
new file mode 100644
index 0000000..9775b3f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * CommonTermsQuery query is a query that executes high-frequency terms in a
+ * optional sub-query to prevent slow queries due to "common" terms like
+ * stopwords. This query basically builds 2 queries off the {@link #add(Term)
+ * added} terms where low-frequency terms are added to a required boolean clause
+ * and high-frequency terms are added to an optional boolean clause. The
+ * optional clause is only executed if the required "low-frequency' clause
+ * matches. Scores produced by this query will be slightly different to plain
+ * {@link BooleanQuery} scorer mainly due to differences in the
+ * {@link Similarity#coord(int,int) number of leave queries} in the required
+ * boolean clause. In the most cases high-frequency terms are unlikely to
+ * significantly contribute to the document score unless at least one of the
+ * low-frequency terms are matched such that this query can improve query
+ * execution times significantly if applicable.
+ * <p>
+ */
+public class CommonTermsQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<CommonTermsQueryBuilder> {
+
+ public static enum Operator {
+ OR, AND
+ }
+
+ private final String name;
+
+ private final Object text;
+
+ private Operator highFreqOperator = null;
+
+ private Operator lowFreqOperator = null;
+
+ private String analyzer = null;
+
+ private Float boost = null;
+
+ private String lowFreqMinimumShouldMatch = null;
+
+ private String highFreqMinimumShouldMatch = null;
+
+ private Boolean disableCoords = null;
+
+ private Float cutoffFrequency = null;
+
+ private String queryName;
+
+ /**
+ * Constructs a new common terms query.
+ */
+ public CommonTermsQueryBuilder(String name, Object text) {
+ if (name == null) {
+ throw new ElasticsearchIllegalArgumentException("Field name must not be null");
+ }
+ if (text == null) {
+ throw new ElasticsearchIllegalArgumentException("Query must not be null");
+ }
+ this.text = text;
+ this.name = name;
+ }
+
+ /**
+ * Sets the operator to use for terms with a high document frequency
+ * (greater than or equal to {@link #cutoffFrequency(float)}. Defaults to
+ * <tt>AND</tt>.
+ */
+ public CommonTermsQueryBuilder highFreqOperator(Operator operator) {
+ this.highFreqOperator = operator;
+ return this;
+ }
+
+ /**
+ * Sets the operator to use for terms with a low document frequency (less
+ * than {@link #cutoffFrequency(float)}. Defaults to <tt>AND</tt>.
+ */
+ public CommonTermsQueryBuilder lowFreqOperator(Operator operator) {
+ this.lowFreqOperator = operator;
+ return this;
+ }
+
+ /**
+ * Explicitly set the analyzer to use. Defaults to use explicit mapping
+ * config for the field, or, if not set, the default search analyzer.
+ */
+ public CommonTermsQueryBuilder analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ /**
+ * Set the boost to apply to the query.
+ */
+ public CommonTermsQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the cutoff document frequency for high / low frequent terms. A value
+ * in [0..1] (or absolute number >=1) representing the maximum threshold of
+ * a terms document frequency to be considered a low frequency term.
+ * Defaults to
+ * <tt>{@value CommonTermsQueryParser#DEFAULT_MAX_TERM_DOC_FREQ}</tt>
+ */
+ public CommonTermsQueryBuilder cutoffFrequency(float cutoffFrequency) {
+ this.cutoffFrequency = cutoffFrequency;
+ return this;
+ }
+
+ /**
+ * Sets the minimum number of high frequent query terms that need to match in order to
+ * produce a hit when there are no low frequen terms.
+ */
+ public CommonTermsQueryBuilder highFreqMinimumShouldMatch(String highFreqMinimumShouldMatch) {
+ this.highFreqMinimumShouldMatch = highFreqMinimumShouldMatch;
+ return this;
+ }
+
+ /**
+ * Sets the minimum number of low frequent query terms that need to match in order to
+ * produce a hit.
+ */
+ public CommonTermsQueryBuilder lowFreqMinimumShouldMatch(String lowFreqMinimumShouldMatch) {
+ this.lowFreqMinimumShouldMatch = lowFreqMinimumShouldMatch;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public CommonTermsQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(CommonTermsQueryParser.NAME);
+ builder.startObject(name);
+
+ builder.field("query", text);
+ if (disableCoords != null) {
+ builder.field("disable_coords", disableCoords);
+ }
+ if (highFreqOperator != null) {
+ builder.field("high_freq_operator", highFreqOperator.toString());
+ }
+ if (lowFreqOperator != null) {
+ builder.field("low_freq_operator", lowFreqOperator.toString());
+ }
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+ if (boost != null) {
+ builder.field("boost", boost);
+ }
+ if (cutoffFrequency != null) {
+ builder.field("cutoff_frequency", cutoffFrequency);
+ }
+ if (lowFreqMinimumShouldMatch != null || highFreqMinimumShouldMatch != null) {
+ builder.startObject("minimum_should_match");
+ if (lowFreqMinimumShouldMatch != null) {
+ builder.field("low_freq", lowFreqMinimumShouldMatch);
+ }
+ if (highFreqMinimumShouldMatch != null) {
+ builder.field("high_freq", highFreqMinimumShouldMatch);
+ }
+ builder.endObject();
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+
+ builder.endObject();
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java
new file mode 100644
index 0000000..701c4f6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.ExtendedCommonTermsQuery;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ *
+ */
+public class CommonTermsQueryParser implements QueryParser {
+
+ public static final String NAME = "common";
+
+ static final float DEFAULT_MAX_TERM_DOC_FREQ = 0.01f;
+
+ static final Occur DEFAULT_HIGH_FREQ_OCCUR = Occur.SHOULD;
+
+ static final Occur DEFAULT_LOW_FREQ_OCCUR = Occur.SHOULD;
+
+ static final boolean DEFAULT_DISABLE_COORDS = true;
+
+
+ @Inject
+ public CommonTermsQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] { NAME };
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[common] query malformed, no field");
+ }
+ String fieldName = parser.currentName();
+ Object value = null;
+ float boost = 1.0f;
+ String queryAnalyzer = null;
+ String lowFreqMinimumShouldMatch = null;
+ String highFreqMinimumShouldMatch = null;
+ boolean disableCoords = DEFAULT_DISABLE_COORDS;
+ Occur highFreqOccur = DEFAULT_HIGH_FREQ_OCCUR;
+ Occur lowFreqOccur = DEFAULT_LOW_FREQ_OCCUR;
+ float maxTermFrequency = DEFAULT_MAX_TERM_DOC_FREQ;
+ String queryName = null;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
+ String innerFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ innerFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("low_freq".equals(innerFieldName) || "lowFreq".equals(innerFieldName)) {
+ lowFreqMinimumShouldMatch = parser.text();
+ } else if ("high_freq".equals(innerFieldName) || "highFreq".equals(innerFieldName)) {
+ highFreqMinimumShouldMatch = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + innerFieldName + "] for [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("query".equals(currentFieldName)) {
+ value = parser.objectText();
+ } else if ("analyzer".equals(currentFieldName)) {
+ String analyzer = parser.text();
+ if (parseContext.analysisService().analyzer(analyzer) == null) {
+ throw new QueryParsingException(parseContext.index(), "[common] analyzer [" + parser.text() + "] not found");
+ }
+ queryAnalyzer = analyzer;
+ } else if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) {
+ disableCoords = parser.booleanValue();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("high_freq_operator".equals(currentFieldName) || "highFreqOperator".equals(currentFieldName)) {
+ String op = parser.text();
+ if ("or".equalsIgnoreCase(op)) {
+ highFreqOccur = BooleanClause.Occur.SHOULD;
+ } else if ("and".equalsIgnoreCase(op)) {
+ highFreqOccur = BooleanClause.Occur.MUST;
+ } else {
+ throw new QueryParsingException(parseContext.index(),
+ "[common] query requires operator to be either 'and' or 'or', not [" + op + "]");
+ }
+ } else if ("low_freq_operator".equals(currentFieldName) || "lowFreqOperator".equals(currentFieldName)) {
+ String op = parser.text();
+ if ("or".equalsIgnoreCase(op)) {
+ lowFreqOccur = BooleanClause.Occur.SHOULD;
+ } else if ("and".equalsIgnoreCase(op)) {
+ lowFreqOccur = BooleanClause.Occur.MUST;
+ } else {
+ throw new QueryParsingException(parseContext.index(),
+ "[common] query requires operator to be either 'and' or 'or', not [" + op + "]");
+ }
+ } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
+ lowFreqMinimumShouldMatch = parser.text();
+ } else if ("cutoff_frequency".equals(currentFieldName)) {
+ maxTermFrequency = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ parser.nextToken();
+ } else {
+ value = parser.objectText();
+ // move to the next token
+ token = parser.nextToken();
+ if (token != XContentParser.Token.END_OBJECT) {
+ throw new QueryParsingException(
+ parseContext.index(),
+ "[common] query parsed in simplified form, with direct field name, but included more options than just the field name, possibly use its 'options' form, with 'query' element?");
+ }
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No text specified for text query");
+ }
+ FieldMapper<?> mapper = null;
+ String field;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ mapper = smartNameFieldMappers.mapper();
+ field = mapper.names().indexName();
+ } else {
+ field = fieldName;
+ }
+
+ Analyzer analyzer = null;
+ if (queryAnalyzer == null) {
+ if (mapper != null) {
+ analyzer = mapper.searchAnalyzer();
+ }
+ if (analyzer == null && smartNameFieldMappers != null) {
+ analyzer = smartNameFieldMappers.searchAnalyzer();
+ }
+ if (analyzer == null) {
+ analyzer = parseContext.mapperService().searchAnalyzer();
+ }
+ } else {
+ analyzer = parseContext.mapperService().analysisService().analyzer(queryAnalyzer);
+ if (analyzer == null) {
+ throw new ElasticsearchIllegalArgumentException("No analyzer found for [" + queryAnalyzer + "]");
+ }
+ }
+
+ ExtendedCommonTermsQuery commonsQuery = new ExtendedCommonTermsQuery(highFreqOccur, lowFreqOccur, maxTermFrequency, disableCoords, mapper);
+ commonsQuery.setBoost(boost);
+ Query query = parseQueryString(commonsQuery, value.toString(), field, parseContext, analyzer, lowFreqMinimumShouldMatch, highFreqMinimumShouldMatch, smartNameFieldMappers);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+
+
+ private final Query parseQueryString(ExtendedCommonTermsQuery query, String queryString, String field, QueryParseContext parseContext,
+ Analyzer analyzer, String lowFreqMinimumShouldMatch, String highFreqMinimumShouldMatch, MapperService.SmartNameFieldMappers smartNameFieldMappers) throws IOException {
+ // Logic similar to QueryParser#getFieldQuery
+ TokenStream source = analyzer.tokenStream(field, queryString.toString());
+ int count = 0;
+ try {
+ source.reset();
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
+ while (source.incrementToken()) {
+ BytesRef ref = new BytesRef(termAtt.length() * 4); // oversize for
+ // UTF-8
+ UnicodeUtil.UTF16toUTF8(termAtt.buffer(), 0, termAtt.length(), ref);
+ query.add(new Term(field, ref));
+ count++;
+ }
+ } finally {
+ source.close();
+ }
+
+ if (count == 0) {
+ return null;
+ }
+ query.setLowFreqMinimumNumberShouldMatch(lowFreqMinimumShouldMatch);
+ query.setHighFreqMinimumNumberShouldMatch(highFreqMinimumShouldMatch);
+ return wrapSmartNameQuery(query, smartNameFieldMappers, parseContext);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java
new file mode 100644
index 0000000..380741e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A query that wraps a filter and simply returns a constant score equal to the
+ * query boost for every document in the filter.
+ *
+ *
+ */
+public class ConstantScoreQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<ConstantScoreQueryBuilder> {
+
+ private final FilterBuilder filterBuilder;
+ private final QueryBuilder queryBuilder;
+
+ private float boost = -1;
+
+
+ /**
+ * A query that wraps a filter and simply returns a constant score equal to the
+ * query boost for every document in the filter.
+ *
+ * @param filterBuilder The filter to wrap in a constant score query
+ */
+ public ConstantScoreQueryBuilder(FilterBuilder filterBuilder) {
+ this.filterBuilder = filterBuilder;
+ this.queryBuilder = null;
+ }
+ /**
+ * A query that wraps a query and simply returns a constant score equal to the
+ * query boost for every document in the query.
+ *
+ * @param queryBuilder The query to wrap in a constant score query
+ */
+ public ConstantScoreQueryBuilder(QueryBuilder queryBuilder) {
+ this.filterBuilder = null;
+ this.queryBuilder = queryBuilder;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public ConstantScoreQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(ConstantScoreQueryParser.NAME);
+ if (queryBuilder != null) {
+ assert filterBuilder == null;
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ } else {
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ }
+
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java
new file mode 100644
index 0000000..6b60741
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ConstantScoreQueryParser implements QueryParser {
+
+ public static final String NAME = "constant_score";
+
+ @Inject
+ public ConstantScoreQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Filter filter = null;
+ boolean filterFound = false;
+ Query query = null;
+ boolean queryFound = false;
+ float boost = 1.0f;
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("filter".equals(currentFieldName)) {
+ filter = parseContext.parseInnerFilter();
+ filterFound = true;
+ } else if ("query".equals(currentFieldName)) {
+ query = parseContext.parseInnerQuery();
+ queryFound = true;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[constant_score] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[constant_score] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!filterFound && !queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[constant_score] requires either 'filter' or 'query' element");
+ }
+
+ if (query == null && filter == null) {
+ return null;
+ }
+
+ if (filter != null) {
+ // cache the filter if possible needed
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+
+ Query query1 = new XConstantScoreQuery(filter);
+ query1.setBoost(boost);
+ return query1;
+ }
+ // Query
+ query = new ConstantScoreQuery(query);
+ query.setBoost(boost);
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/CustomBoostFactorQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/CustomBoostFactorQueryBuilder.java
new file mode 100644
index 0000000..392b8f1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/CustomBoostFactorQueryBuilder.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
+
+import java.io.IOException;
+
+/**
+ * A query that simply applies the boost factor to another query (multiply it).
+ *
+ * @deprecated use {@link FunctionScoreQueryBuilder} instead.
+ */
+public class CustomBoostFactorQueryBuilder extends BaseQueryBuilder {
+
+ private final QueryBuilder queryBuilder;
+
+ private float boostFactor = -1;
+
+ /**
+ * A query that simply applies the boost factor to another query (multiply
+ * it).
+ *
+ * @param queryBuilder
+ * The query to apply the boost factor to.
+ */
+ public CustomBoostFactorQueryBuilder(QueryBuilder queryBuilder) {
+ this.queryBuilder = queryBuilder;
+ }
+
+ /**
+ * Sets the boost factor for this query.
+ */
+ public CustomBoostFactorQueryBuilder boostFactor(float boost) {
+ this.boostFactor = boost;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(CustomBoostFactorQueryParser.NAME);
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ if (boostFactor != -1) {
+ builder.field("boost_factor", boostFactor);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/CustomBoostFactorQueryParser.java b/src/main/java/org/elasticsearch/index/query/CustomBoostFactorQueryParser.java
new file mode 100644
index 0000000..3a9c916
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/CustomBoostFactorQueryParser.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.function.BoostScoreFunction;
+import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.functionscore.FunctionScoreQueryParser;
+
+import java.io.IOException;
+
+/**
+ * @deprecated use {@link FunctionScoreQueryParser} instead.
+ */
+public class CustomBoostFactorQueryParser implements QueryParser {
+
+ public static final String NAME = "custom_boost_factor";
+
+ @Inject
+ public CustomBoostFactorQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] { NAME, Strings.toCamelCase(NAME) };
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ boolean queryFound = false;
+ float boost = 1.0f;
+ float boostFactor = 1.0f;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ query = parseContext.parseInnerQuery();
+ queryFound = true;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[custom_boost_factor] query does not support ["
+ + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("boost_factor".equals(currentFieldName) || "boostFactor".equals(currentFieldName)) {
+ boostFactor = parser.floatValue();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[custom_boost_factor] query does not support ["
+ + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[constant_factor_query] requires 'query' element");
+ }
+ if (query == null) {
+ return null;
+ }
+ FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(query, new BoostScoreFunction(boostFactor));
+ functionScoreQuery.setBoost(boost);
+ return functionScoreQuery;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/CustomFiltersScoreQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/CustomFiltersScoreQueryBuilder.java
new file mode 100644
index 0000000..4c3872b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/CustomFiltersScoreQueryBuilder.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Map;
+
+/**
+ * A query that uses a filters with a script associated with them to compute the
+ * score.
+ *
+ * @deprecated use {@link FunctionScoreQueryBuilder} instead.
+ */
+public class CustomFiltersScoreQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<CustomFiltersScoreQueryBuilder> {
+
+ private final QueryBuilder queryBuilder;
+
+ private String lang;
+
+ private float boost = -1;
+
+ private Float maxBoost;
+
+ private Map<String, Object> params = null;
+
+ private String scoreMode;
+
+ private ArrayList<FilterBuilder> filters = new ArrayList<FilterBuilder>();
+ private ArrayList<String> scripts = new ArrayList<String>();
+ private FloatArrayList boosts = new FloatArrayList();
+
+ public CustomFiltersScoreQueryBuilder(QueryBuilder queryBuilder) {
+ this.queryBuilder = queryBuilder;
+ }
+
+ public CustomFiltersScoreQueryBuilder add(FilterBuilder filter, String script) {
+ this.filters.add(filter);
+ this.scripts.add(script);
+ this.boosts.add(-1);
+ return this;
+ }
+
+ public CustomFiltersScoreQueryBuilder add(FilterBuilder filter, float boost) {
+ this.filters.add(filter);
+ this.scripts.add(null);
+ this.boosts.add(boost);
+ return this;
+ }
+
+ public CustomFiltersScoreQueryBuilder scoreMode(String scoreMode) {
+ this.scoreMode = scoreMode;
+ return this;
+ }
+
+ /**
+ * Sets the language of the script.
+ */
+ public CustomFiltersScoreQueryBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ /**
+ * Additional parameters that can be provided to the script.
+ */
+ public CustomFiltersScoreQueryBuilder params(Map<String, Object> params) {
+ if (this.params == null) {
+ this.params = params;
+ } else {
+ this.params.putAll(params);
+ }
+ return this;
+ }
+
+ /**
+ * Additional parameters that can be provided to the script.
+ */
+ public CustomFiltersScoreQueryBuilder param(String key, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(key, value);
+ return this;
+ }
+
+ public CustomFiltersScoreQueryBuilder maxBoost(float maxBoost) {
+ this.maxBoost = maxBoost;
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in
+ * addition to the normal weightings) have their score multiplied by the
+ * boost provided.
+ */
+ public CustomFiltersScoreQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(CustomFiltersScoreQueryParser.NAME);
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+
+ builder.startArray("filters");
+ for (int i = 0; i < filters.size(); i++) {
+ builder.startObject();
+ builder.field("filter");
+ filters.get(i).toXContent(builder, params);
+ String script = scripts.get(i);
+ if (script != null) {
+ builder.field("script", script);
+ } else {
+ builder.field("boost", boosts.get(i));
+ }
+ builder.endObject();
+ }
+ builder.endArray();
+
+ if (scoreMode != null) {
+ builder.field("score_mode", scoreMode);
+ }
+ if (maxBoost != null) {
+ builder.field("max_boost", maxBoost);
+ }
+
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/CustomFiltersScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/CustomFiltersScoreQueryParser.java
new file mode 100644
index 0000000..9e61f5e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/CustomFiltersScoreQueryParser.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.function.BoostScoreFunction;
+import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
+import org.elasticsearch.common.lucene.search.function.ScoreFunction;
+import org.elasticsearch.common.lucene.search.function.ScriptScoreFunction;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.functionscore.FunctionScoreQueryParser;
+import org.elasticsearch.script.SearchScript;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Map;
+
+/**
+ * @deprecated use {@link FunctionScoreQueryParser} instead.
+ */
+public class CustomFiltersScoreQueryParser implements QueryParser {
+
+ public static final String NAME = "custom_filters_score";
+
+ @Inject
+ public CustomFiltersScoreQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] { NAME, Strings.toCamelCase(NAME) };
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ boolean queryFound = false;
+ float boost = 1.0f;
+ String scriptLang = null;
+ Map<String, Object> vars = null;
+
+ FiltersFunctionScoreQuery.ScoreMode scoreMode = FiltersFunctionScoreQuery.ScoreMode.First;
+ ArrayList<Filter> filters = new ArrayList<Filter>();
+ boolean filtersFound = false;
+ ArrayList<String> scripts = new ArrayList<String>();
+ FloatArrayList boosts = new FloatArrayList();
+ float maxBoost = Float.MAX_VALUE;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ query = parseContext.parseInnerQuery();
+ queryFound = true;
+ } else if ("params".equals(currentFieldName)) {
+ vars = parser.map();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[custom_filters_score] query does not support ["
+ + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("filters".equals(currentFieldName)) {
+ filtersFound = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ String script = null;
+ Filter filter = null;
+ boolean filterFound = false;
+ float fboost = Float.NaN;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("filter".equals(currentFieldName)) {
+ filter = parseContext.parseInnerFilter();
+ filterFound = true;
+ }
+ } else if (token.isValue()) {
+ if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ fboost = parser.floatValue();
+ }
+ }
+ }
+ if (script == null && fboost == -1) {
+ throw new QueryParsingException(parseContext.index(),
+ "[custom_filters_score] missing 'script' or 'boost' in filters array element");
+ }
+ if (!filterFound) {
+ throw new QueryParsingException(parseContext.index(),
+ "[custom_filters_score] missing 'filter' in filters array element");
+ }
+ if (filter != null) {
+ filters.add(filter);
+ scripts.add(script);
+ boosts.add(fboost);
+ }
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[custom_filters_score] query does not support ["
+ + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
+ String sScoreMode = parser.text();
+ if ("avg".equals(sScoreMode)) {
+ scoreMode = FiltersFunctionScoreQuery.ScoreMode.Avg;
+ } else if ("max".equals(sScoreMode)) {
+ scoreMode = FiltersFunctionScoreQuery.ScoreMode.Max;
+ } else if ("min".equals(sScoreMode)) {
+ scoreMode = FiltersFunctionScoreQuery.ScoreMode.Min;
+ } else if ("total".equals(sScoreMode)) {
+ scoreMode = FiltersFunctionScoreQuery.ScoreMode.Sum;
+ } else if ("multiply".equals(sScoreMode)) {
+ scoreMode = FiltersFunctionScoreQuery.ScoreMode.Multiply;
+ } else if ("first".equals(sScoreMode)) {
+ scoreMode = FiltersFunctionScoreQuery.ScoreMode.First;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[custom_filters_score] illegal score_mode [" + sScoreMode
+ + "]");
+ }
+ } else if ("max_boost".equals(currentFieldName) || "maxBoost".equals(currentFieldName)) {
+ maxBoost = parser.floatValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[custom_filters_score] query does not support ["
+ + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[custom_filters_score] requires 'query' field");
+ }
+ if (query == null) {
+ return null;
+ }
+ if (!filtersFound) {
+ throw new QueryParsingException(parseContext.index(), "[custom_filters_score] requires 'filters' field");
+ }
+ // if all filter elements returned null, just use the query
+ if (filters.isEmpty()) {
+ return query;
+ }
+
+ FiltersFunctionScoreQuery.FilterFunction[] filterFunctions = new FiltersFunctionScoreQuery.FilterFunction[filters.size()];
+ for (int i = 0; i < filterFunctions.length; i++) {
+ ScoreFunction scoreFunction;
+ String script = scripts.get(i);
+ if (script != null) {
+ SearchScript searchScript = parseContext.scriptService().search(parseContext.lookup(), scriptLang, script, vars);
+ scoreFunction = new ScriptScoreFunction(script, vars, searchScript);
+ } else {
+ scoreFunction = new BoostScoreFunction(boosts.get(i));
+ }
+ filterFunctions[i] = new FiltersFunctionScoreQuery.FilterFunction(filters.get(i), scoreFunction);
+ }
+ FiltersFunctionScoreQuery functionScoreQuery = new FiltersFunctionScoreQuery(query, scoreMode, filterFunctions, maxBoost);
+ functionScoreQuery.setBoost(boost);
+ return functionScoreQuery;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/CustomScoreQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/CustomScoreQueryBuilder.java
new file mode 100644
index 0000000..dd3d0b9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/CustomScoreQueryBuilder.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * A query that uses a script to compute or influence the score of documents
+ * that match with the inner query or filter.
+ *
+ * @deprecated use {@link FunctionScoreQueryBuilder} instead.
+ */
+public class CustomScoreQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<CustomScoreQueryBuilder> {
+
+ private final QueryBuilder queryBuilder;
+
+ private final FilterBuilder filterBuilder;
+
+ private String script;
+
+ private String lang;
+
+ private float boost = -1;
+
+ private Map<String, Object> params = null;
+
+ /**
+ * Constructs a query that defines how the scores are computed or influenced
+ * for documents that match with the specified query by a custom defined
+ * script.
+ *
+ * @param queryBuilder
+ * The query that defines what documents are custom scored by
+ * this query
+ */
+ public CustomScoreQueryBuilder(QueryBuilder queryBuilder) {
+ this.queryBuilder = queryBuilder;
+ this.filterBuilder = null;
+ }
+
+ /**
+ * Constructs a query that defines how documents are scored that match with
+ * the specified filter.
+ *
+ * @param filterBuilder
+ * The filter that decides with documents are scored by this
+ * query.
+ */
+ public CustomScoreQueryBuilder(FilterBuilder filterBuilder) {
+ this.filterBuilder = filterBuilder;
+ this.queryBuilder = null;
+ }
+
+ /**
+ * Sets the boost factor for this query.
+ */
+ public CustomScoreQueryBuilder script(String script) {
+ this.script = script;
+ return this;
+ }
+
+ /**
+ * Sets the language of the script.
+ */
+ public CustomScoreQueryBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ /**
+ * Additional parameters that can be provided to the script.
+ */
+ public CustomScoreQueryBuilder params(Map<String, Object> params) {
+ if (this.params == null) {
+ this.params = params;
+ } else {
+ this.params.putAll(params);
+ }
+ return this;
+ }
+
+ /**
+ * Additional parameters that can be provided to the script.
+ */
+ public CustomScoreQueryBuilder param(String key, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(key, value);
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in
+ * addition to the normal weightings) have their score multiplied by the
+ * boost provided.
+ */
+ public CustomScoreQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(CustomScoreQueryParser.NAME);
+ if (queryBuilder != null) {
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ } else if (filterBuilder != null) {
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ }
+ builder.field("script", script);
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/CustomScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/CustomScoreQueryParser.java
new file mode 100644
index 0000000..5a1363b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/CustomScoreQueryParser.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
+import org.elasticsearch.common.lucene.search.function.ScriptScoreFunction;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.script.SearchScript;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * @deprecated use {@link FunctionScoreQueryParser} instead.
+ */
+public class CustomScoreQueryParser implements QueryParser {
+
+ public static final String NAME = "custom_score";
+
+ @Inject
+ public CustomScoreQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] { NAME, Strings.toCamelCase(NAME) };
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ Filter filter = null;
+ boolean queryOrFilterFound = false;
+ float boost = 1.0f;
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> vars = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ query = parseContext.parseInnerQuery();
+ queryOrFilterFound = true;
+ } else if ("filter".equals(currentFieldName)) {
+ filter = parseContext.parseInnerFilter();
+ queryOrFilterFound = true;
+ } else if ("params".equals(currentFieldName)) {
+ vars = parser.map();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[custom_score] query does not support [" + currentFieldName
+ + "]");
+ }
+ } else if (token.isValue()) {
+ if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[custom_score] query does not support [" + currentFieldName
+ + "]");
+ }
+ }
+ }
+ if (!queryOrFilterFound) {
+ throw new QueryParsingException(parseContext.index(), "[custom_score] requires 'query' or 'filter' field");
+ }
+ if (script == null) {
+ throw new QueryParsingException(parseContext.index(), "[custom_score] requires 'script' field");
+ }
+ if (query == null && filter == null) {
+ return null;
+ } else if (filter != null) {
+ query = new XConstantScoreQuery(filter);
+ }
+
+ SearchScript searchScript;
+ try {
+ searchScript = parseContext.scriptService().search(parseContext.lookup(), scriptLang, script, vars);
+ } catch (Exception e) {
+ throw new QueryParsingException(parseContext.index(), "[custom_score] the script could not be loaded", e);
+ }
+ FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(query, new ScriptScoreFunction(script, vars, searchScript));
+ functionScoreQuery.setBoost(boost);
+ return functionScoreQuery;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java
new file mode 100644
index 0000000..99cc5f5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * A query that generates the union of documents produced by its sub-queries, and that scores each document
+ * with the maximum score for that document as produced by any sub-query, plus a tie breaking increment for any
+ * additional matching sub-queries.
+ *
+ *
+ */
+public class DisMaxQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<DisMaxQueryBuilder> {
+
+ private ArrayList<QueryBuilder> queries = newArrayList();
+
+ private float boost = -1;
+
+ private float tieBreaker = -1;
+
+ private String queryName;
+
+ /**
+ * Add a sub-query to this disjunction.
+ */
+ public DisMaxQueryBuilder add(QueryBuilder queryBuilder) {
+ queries.add(queryBuilder);
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public DisMaxQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * The score of each non-maximum disjunct for a document is multiplied by this weight
+ * and added into the final score. If non-zero, the value should be small, on the order of 0.1, which says that
+ * 10 occurrences of word in a lower-scored field that is also in a higher scored field is just as good as a unique
+ * word in the lower scored field (i.e., one that is not in any higher scored field.
+ */
+ public DisMaxQueryBuilder tieBreaker(float tieBreaker) {
+ this.tieBreaker = tieBreaker;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public DisMaxQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(DisMaxQueryParser.NAME);
+ if (tieBreaker != -1) {
+ builder.field("tie_breaker", tieBreaker);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.startArray("queries");
+ for (QueryBuilder queryBuilder : queries) {
+ queryBuilder.toXContent(builder, params);
+ }
+ builder.endArray();
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java b/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java
new file mode 100644
index 0000000..82feb98
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.DisjunctionMaxQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class DisMaxQueryParser implements QueryParser {
+
+ public static final String NAME = "dis_max";
+
+ @Inject
+ public DisMaxQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ float boost = 1.0f;
+ float tieBreaker = 0.0f;
+
+ List<Query> queries = newArrayList();
+ boolean queriesFound = false;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("queries".equals(currentFieldName)) {
+ queriesFound = true;
+ Query query = parseContext.parseInnerQuery();
+ if (query != null) {
+ queries.add(query);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[dis_max] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("queries".equals(currentFieldName)) {
+ queriesFound = true;
+ while (token != XContentParser.Token.END_ARRAY) {
+ Query query = parseContext.parseInnerQuery();
+ if (query != null) {
+ queries.add(query);
+ }
+ token = parser.nextToken();
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[dis_max] query does not support [" + currentFieldName + "]");
+ }
+ } else {
+ if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("tie_breaker".equals(currentFieldName) || "tieBreaker".equals(currentFieldName)) {
+ tieBreaker = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[dis_max] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (!queriesFound) {
+ throw new QueryParsingException(parseContext.index(), "[dis_max] requires 'queries' field");
+ }
+
+ if (queries.isEmpty()) {
+ return null;
+ }
+
+ DisjunctionMaxQuery query = new DisjunctionMaxQuery(queries, tieBreaker);
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/ExistsFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/ExistsFilterBuilder.java
new file mode 100644
index 0000000..56511c4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/ExistsFilterBuilder.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * Constructs a filter that only match on documents that the field has a value in them.
+ *
+ *
+ */
+public class ExistsFilterBuilder extends BaseFilterBuilder {
+
+ private String name;
+
+ private String filterName;
+
+ public ExistsFilterBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public ExistsFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(ExistsFilterParser.NAME);
+ builder.field("field", name);
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java
new file mode 100644
index 0000000..63bc895
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.TermRangeFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ *
+ */
+public class ExistsFilterParser implements FilterParser {
+
+ public static final String NAME = "exists";
+
+ @Inject
+ public ExistsFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ String fieldPattern = null;
+ String filterName = null;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("field".equals(currentFieldName)) {
+ fieldPattern = parser.text();
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[exists] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (fieldPattern == null) {
+ throw new QueryParsingException(parseContext.index(), "exists must be provided with a [field]");
+ }
+
+ return newFilter(parseContext, fieldPattern, filterName);
+ }
+
+ public static Filter newFilter(QueryParseContext parseContext, String fieldPattern, String filterName) {
+ MapperService.SmartNameObjectMapper smartNameObjectMapper = parseContext.smartObjectMapper(fieldPattern);
+ if (smartNameObjectMapper != null && smartNameObjectMapper.hasMapper()) {
+ // automatic make the object mapper pattern
+ fieldPattern = fieldPattern + ".*";
+ }
+
+ Set<String> fields = parseContext.simpleMatchToIndexNames(fieldPattern);
+ if (fields.isEmpty()) {
+ // no fields exists, so we should not match anything
+ return Queries.MATCH_NO_FILTER;
+ }
+ MapperService.SmartNameFieldMappers nonNullFieldMappers = null;
+
+ XBooleanFilter boolFilter = new XBooleanFilter();
+ for (String field : fields) {
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(field);
+ if (smartNameFieldMappers != null) {
+ nonNullFieldMappers = smartNameFieldMappers;
+ }
+ Filter filter = null;
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ filter = smartNameFieldMappers.mapper().rangeFilter(null, null, true, true, parseContext);
+ }
+ if (filter == null) {
+ filter = new TermRangeFilter(field, null, null, true, true);
+ }
+ boolFilter.add(filter, BooleanClause.Occur.SHOULD);
+ }
+
+ // we always cache this one, really does not change... (exists)
+ // its ok to cache under the fieldName cacheKey, since its per segment and the mapping applies to this data on this segment...
+ Filter filter = parseContext.cacheFilter(boolFilter, new CacheKeyFilter.Key("$exists$" + fieldPattern));
+
+ filter = wrapSmartNameFilter(filter, nonNullFieldMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java
new file mode 100644
index 0000000..40ff977
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+
+import java.io.IOException;
+
+/**
+ * The "fquery" filter is the same as the {@link QueryFilterParser} except that it allows also to
+ * associate a name with the query filter.
+ */
+public class FQueryFilterParser implements FilterParser {
+
+ public static final String NAME = "fquery";
+
+ @Inject
+ public FQueryFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ boolean queryFound = false;
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ queryFound = true;
+ query = parseContext.parseInnerQuery();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[fquery] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[fquery] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[fquery] requires 'query' element");
+ }
+ if (query == null) {
+ return null;
+ }
+ Filter filter = Queries.wrap(query);
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java
new file mode 100644
index 0000000..fc86141
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FieldMaskingSpanQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder<FieldMaskingSpanQueryBuilder> {
+
+ private final SpanQueryBuilder queryBuilder;
+
+ private final String field;
+
+ private float boost = -1;
+
+ private String queryName;
+
+
+ public FieldMaskingSpanQueryBuilder(SpanQueryBuilder queryBuilder, String field) {
+ this.queryBuilder = queryBuilder;
+ this.field = field;
+ }
+
+ public FieldMaskingSpanQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public FieldMaskingSpanQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(FieldMaskingSpanQueryParser.NAME);
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ builder.field("field", field);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java b/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java
new file mode 100644
index 0000000..2b69cf6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FieldMaskingSpanQueryParser implements QueryParser {
+
+ public static final String NAME = "field_masking_span";
+
+ @Inject
+ public FieldMaskingSpanQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ float boost = 1.0f;
+
+ SpanQuery inner = null;
+ String field = null;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ Query query = parseContext.parseInnerQuery();
+ if (!(query instanceof SpanQuery)) {
+ throw new QueryParsingException(parseContext.index(), "[field_masking_span] query] must be of type span query");
+ }
+ inner = (SpanQuery) query;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[field_masking_span] query does not support [" + currentFieldName + "]");
+ }
+ } else {
+ if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[field_masking_span] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (inner == null) {
+ throw new QueryParsingException(parseContext.index(), "field_masking_span must have [query] span query clause");
+ }
+ if (field == null) {
+ throw new QueryParsingException(parseContext.index(), "field_masking_span must have [field] set for it");
+ }
+
+ FieldMapper mapper = parseContext.fieldMapper(field);
+ if (mapper != null) {
+ field = mapper.names().indexName();
+ }
+
+ FieldMaskingSpanQuery query = new FieldMaskingSpanQuery(inner, field);
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/FilterBuilder.java b/src/main/java/org/elasticsearch/index/query/FilterBuilder.java
new file mode 100644
index 0000000..50d0b36
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FilterBuilder.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentType;
+
+/**
+ *
+ */
+public interface FilterBuilder extends ToXContent {
+
+ BytesReference buildAsBytes() throws ElasticsearchException;
+
+ BytesReference buildAsBytes(XContentType contentType) throws ElasticsearchException;
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FilterBuilders.java b/src/main/java/org/elasticsearch/index/query/FilterBuilders.java
new file mode 100644
index 0000000..7ff4adc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FilterBuilders.java
@@ -0,0 +1,563 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.ShapeRelation;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+
+/**
+ * A static factory for simple "import static" usage.
+ */
+public abstract class FilterBuilders {
+
+ /**
+ * A filter that matches all documents.
+ */
+ public static MatchAllFilterBuilder matchAllFilter() {
+ return new MatchAllFilterBuilder();
+ }
+
+ /**
+ * A filter that limits the results to the provided limit value (per shard!).
+ */
+ public static LimitFilterBuilder limitFilter(int limit) {
+ return new LimitFilterBuilder(limit);
+ }
+
+ public static NestedFilterBuilder nestedFilter(String path, QueryBuilder query) {
+ return new NestedFilterBuilder(path, query);
+ }
+
+ public static NestedFilterBuilder nestedFilter(String path, FilterBuilder filter) {
+ return new NestedFilterBuilder(path, filter);
+ }
+
+ /**
+ * Creates a new ids filter with the provided doc/mapping types.
+ *
+ * @param types The types to match the ids against.
+ */
+ public static IdsFilterBuilder idsFilter(@Nullable String... types) {
+ return new IdsFilterBuilder(types);
+ }
+
+ /**
+ * A filter based on doc/mapping type.
+ */
+ public static TypeFilterBuilder typeFilter(String type) {
+ return new TypeFilterBuilder(type);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public static TermFilterBuilder termFilter(String name, String value) {
+ return new TermFilterBuilder(name, value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public static TermFilterBuilder termFilter(String name, int value) {
+ return new TermFilterBuilder(name, value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public static TermFilterBuilder termFilter(String name, long value) {
+ return new TermFilterBuilder(name, value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public static TermFilterBuilder termFilter(String name, float value) {
+ return new TermFilterBuilder(name, value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public static TermFilterBuilder termFilter(String name, double value) {
+ return new TermFilterBuilder(name, value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public static TermFilterBuilder termFilter(String name, Object value) {
+ return new TermFilterBuilder(name, value);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder termsFilter(String name, String... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder termsFilter(String name, int... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder termsFilter(String name, long... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder termsFilter(String name, float... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder termsFilter(String name, double... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder termsFilter(String name, Object... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder termsFilter(String name, Iterable<?> values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A terms lookup filter for the provided field name. A lookup terms filter can
+ * extract the terms to filter by from another doc in an index.
+ */
+ public static TermsLookupFilterBuilder termsLookupFilter(String name) {
+ return new TermsLookupFilterBuilder(name);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder inFilter(String name, String... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder inFilter(String name, int... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder inFilter(String name, long... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder inFilter(String name, float... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder inFilter(String name, double... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsFilterBuilder inFilter(String name, Object... values) {
+ return new TermsFilterBuilder(name, values);
+ }
+
+ /**
+ * A filter that restricts search results to values that have a matching prefix in a given
+ * field.
+ *
+ * @param name The field name
+ * @param prefix The prefix
+ */
+ public static PrefixFilterBuilder prefixFilter(String name, String prefix) {
+ return new PrefixFilterBuilder(name, prefix);
+ }
+
+ /**
+ * A filter that restricts search results to field values that match a given regular expression.
+ *
+ * @param name The field name
+ * @param regexp The regular expression
+ */
+ public static RegexpFilterBuilder regexpFilter(String name, String regexp) {
+ return new RegexpFilterBuilder(name, regexp);
+ }
+
+ /**
+ * A filter that restricts search results to values that are within the given range.
+ *
+ * @param name The field name
+ */
+ public static RangeFilterBuilder rangeFilter(String name) {
+ return new RangeFilterBuilder(name);
+ }
+
+ /**
+ * A filter that restricts search results to values that are within the given numeric range. Uses the
+ * field data cache (loading all the values for the specified field into memory)
+ *
+ * @param name The field name
+ * @deprecated The numeric_range filter will be removed at some point in time in favor for the range filter with
+ * the execution mode <code>fielddata</code>.
+ */
+ @Deprecated
+ public static NumericRangeFilterBuilder numericRangeFilter(String name) {
+ return new NumericRangeFilterBuilder(name);
+ }
+
+ /**
+ * A filter that simply wraps a query.
+ *
+ * @param queryBuilder The query to wrap as a filter
+ */
+ public static QueryFilterBuilder queryFilter(QueryBuilder queryBuilder) {
+ return new QueryFilterBuilder(queryBuilder);
+ }
+
+ /**
+ * A builder for filter based on a script.
+ *
+ * @param script The script to filter by.
+ */
+ public static ScriptFilterBuilder scriptFilter(String script) {
+ return new ScriptFilterBuilder(script);
+ }
+
+ /**
+ * A filter to filter based on a specific distance from a specific geo location / point.
+ *
+ * @param name The location field name.
+ */
+ public static GeoDistanceFilterBuilder geoDistanceFilter(String name) {
+ return new GeoDistanceFilterBuilder(name);
+ }
+
+ /**
+ * A filter to filter based on a specific range from a specific geo location / point.
+ *
+ * @param name The location field name.
+ */
+ public static GeoDistanceRangeFilterBuilder geoDistanceRangeFilter(String name) {
+ return new GeoDistanceRangeFilterBuilder(name);
+ }
+
+ /**
+ * A filter to filter based on a bounding box defined by top left and bottom right locations / points
+ *
+ * @param name The location field name.
+ */
+ public static GeoBoundingBoxFilterBuilder geoBoundingBoxFilter(String name) {
+ return new GeoBoundingBoxFilterBuilder(name);
+ }
+
+ /**
+ * A filter based on a bounding box defined by geohash. The field this filter is applied to
+ * must have <code>{&quot;type&quot;:&quot;geo_point&quot;, &quot;geohash&quot;:true}</code>
+ * to work.
+ *
+ * @param name The geo point field name.
+ */
+ public static GeohashCellFilter.Builder geoHashCellFilter(String name) {
+ return new GeohashCellFilter.Builder(name);
+ }
+
+ /**
+ * A filter based on a bounding box defined by geohash. The field this filter is applied to
+ * must have <code>{&quot;type&quot;:&quot;geo_point&quot;, &quot;geohash&quot;:true}</code>
+ * to work.
+ *
+ * @param name The geo point field name.
+ * @param geohash The Geohash to filter
+ */
+ public static GeohashCellFilter.Builder geoHashCellFilter(String name, String geohash) {
+ return new GeohashCellFilter.Builder(name, geohash);
+ }
+
+ /**
+ * A filter based on a bounding box defined by geohash. The field this filter is applied to
+ * must have <code>{&quot;type&quot;:&quot;geo_point&quot;, &quot;geohash&quot;:true}</code>
+ * to work.
+ *
+ * @param name The geo point field name.
+ * @param point a geo point within the geohash bucket
+ */
+ public static GeohashCellFilter.Builder geoHashCellFilter(String name, GeoPoint point) {
+ return new GeohashCellFilter.Builder(name, point);
+ }
+
+ /**
+ * A filter based on a bounding box defined by geohash. The field this filter is applied to
+ * must have <code>{&quot;type&quot;:&quot;geo_point&quot;, &quot;geohash&quot;:true}</code>
+ * to work.
+ *
+ * @param name The geo point field name
+ * @param geohash The Geohash to filter
+ * @param neighbors should the neighbor cell also be filtered
+ */
+ public static GeohashCellFilter.Builder geoHashCellFilter(String name, String geohash, boolean neighbors) {
+ return new GeohashCellFilter.Builder(name, geohash, neighbors);
+ }
+
+ /**
+ * A filter to filter based on a polygon defined by a set of locations / points.
+ *
+ * @param name The location field name.
+ */
+ public static GeoPolygonFilterBuilder geoPolygonFilter(String name) {
+ return new GeoPolygonFilterBuilder(name);
+ }
+
+ /**
+ * A filter based on the relationship of a shape and indexed shapes
+ *
+ * @param name The shape field name
+ * @param shape Shape to use in the filter
+ * @param relation relation of the shapes
+ */
+ public static GeoShapeFilterBuilder geoShapeFilter(String name, ShapeBuilder shape, ShapeRelation relation) {
+ return new GeoShapeFilterBuilder(name, shape, relation);
+ }
+
+ public static GeoShapeFilterBuilder geoShapeFilter(String name, String indexedShapeId, String indexedShapeType, ShapeRelation relation) {
+ return new GeoShapeFilterBuilder(name, indexedShapeId, indexedShapeType, relation);
+ }
+
+ /**
+ * A filter to filter indexed shapes intersecting with shapes
+ *
+ * @param name The shape field name
+ * @param shape Shape to use in the filter
+ */
+ public static GeoShapeFilterBuilder geoIntersectionFilter(String name, ShapeBuilder shape) {
+ return geoShapeFilter(name, shape, ShapeRelation.INTERSECTS);
+ }
+
+ public static GeoShapeFilterBuilder geoIntersectionFilter(String name, String indexedShapeId, String indexedShapeType) {
+ return geoShapeFilter(name, indexedShapeId, indexedShapeType, ShapeRelation.INTERSECTS);
+ }
+
+ /**
+ * A filter to filter indexed shapes that are contained by a shape
+ *
+ * @param name The shape field name
+ * @param shape Shape to use in the filter
+ */
+ public static GeoShapeFilterBuilder geoWithinFilter(String name, ShapeBuilder shape) {
+ return geoShapeFilter(name, shape, ShapeRelation.WITHIN);
+ }
+
+ public static GeoShapeFilterBuilder geoWithinFilter(String name, String indexedShapeId, String indexedShapeType) {
+ return geoShapeFilter(name, indexedShapeId, indexedShapeType, ShapeRelation.WITHIN);
+ }
+
+ /**
+ * A filter to filter indexed shapes that are not intersection with the query shape
+ *
+ * @param name The shape field name
+ * @param shape Shape to use in the filter
+ */
+ public static GeoShapeFilterBuilder geoDisjointFilter(String name, ShapeBuilder shape) {
+ return geoShapeFilter(name, shape, ShapeRelation.DISJOINT);
+ }
+
+ public static GeoShapeFilterBuilder geoDisjointFilter(String name, String indexedShapeId, String indexedShapeType) {
+ return geoShapeFilter(name, indexedShapeId, indexedShapeType, ShapeRelation.DISJOINT);
+ }
+
+ /**
+ * A filter to filter only documents where a field exists in them.
+ *
+ * @param name The name of the field
+ */
+ public static ExistsFilterBuilder existsFilter(String name) {
+ return new ExistsFilterBuilder(name);
+ }
+
+ /**
+ * A filter to filter only documents where a field does not exists in them.
+ *
+ * @param name The name of the field
+ */
+ public static MissingFilterBuilder missingFilter(String name) {
+ return new MissingFilterBuilder(name);
+ }
+
+ /**
+ * Constructs a child filter, with the child type and the query to run against child documents, with
+ * the result of the filter being the *parent* documents.
+ *
+ * @param type The child type
+ * @param query The query to run against the child type
+ */
+ public static HasChildFilterBuilder hasChildFilter(String type, QueryBuilder query) {
+ return new HasChildFilterBuilder(type, query);
+ }
+
+ /**
+ * Constructs a child filter, with the child type and the filter to run against child documents, with
+ * the result of the filter being the *parent* documents.
+ *
+ * @param type The child type
+ * @param filter The query to run against the child type
+ */
+ public static HasChildFilterBuilder hasChildFilter(String type, FilterBuilder filter) {
+ return new HasChildFilterBuilder(type, filter);
+ }
+
+ /**
+ * Constructs a parent filter, with the parent type and the query to run against parent documents, with
+ * the result of the filter being the *child* documents.
+ *
+ * @param parentType The parent type
+ * @param query The query to run against the parent type
+ */
+ public static HasParentFilterBuilder hasParentFilter(String parentType, QueryBuilder query) {
+ return new HasParentFilterBuilder(parentType, query);
+ }
+
+ /**
+ * Constructs a parent filter, with the parent type and the filter to run against parent documents, with
+ * the result of the filter being the *child* documents.
+ *
+ * @param parentType The parent type
+ * @param filter The filter to run against the parent type
+ */
+ public static HasParentFilterBuilder hasParentFilter(String parentType, FilterBuilder filter) {
+ return new HasParentFilterBuilder(parentType, filter);
+ }
+
+ public static BoolFilterBuilder boolFilter() {
+ return new BoolFilterBuilder();
+ }
+
+ public static AndFilterBuilder andFilter(FilterBuilder... filters) {
+ return new AndFilterBuilder(filters);
+ }
+
+ public static OrFilterBuilder orFilter(FilterBuilder... filters) {
+ return new OrFilterBuilder(filters);
+ }
+
+ public static NotFilterBuilder notFilter(FilterBuilder filter) {
+ return new NotFilterBuilder(filter);
+ }
+
+ public static IndicesFilterBuilder indicesFilter(FilterBuilder filter, String... indices) {
+ return new IndicesFilterBuilder(filter, indices);
+ }
+
+ public static WrapperFilterBuilder wrapperFilter(String filter) {
+ return new WrapperFilterBuilder(filter);
+ }
+
+ public static WrapperFilterBuilder wrapperFilter(byte[] data, int offset, int length) {
+ return new WrapperFilterBuilder(data, offset, length);
+ }
+
+ private FilterBuilders() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FilterParser.java b/src/main/java/org/elasticsearch/index/query/FilterParser.java
new file mode 100644
index 0000000..260f828
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FilterParser.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.Nullable;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public interface FilterParser {
+
+ /**
+ * The names this filter is registered under.
+ */
+ String[] names();
+
+ /**
+ * Parses the into a filter from the current parser location. Will be at "START_OBJECT" location,
+ * and should end when the token is at the matching "END_OBJECT".
+ * <p/>
+ * The parser should return null value when it should be ignored, regardless under which context
+ * it is. For example, an and filter with "and []" (no clauses), should be ignored regardless if
+ * it exists within a must clause or a must_not bool clause (that is why returning MATCH_ALL will
+ * not be good, since it will not match anything when returned within a must_not clause).
+ */
+ @Nullable
+ Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException;
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/FilterParserFactory.java b/src/main/java/org/elasticsearch/index/query/FilterParserFactory.java
new file mode 100644
index 0000000..900951e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FilterParserFactory.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public interface FilterParserFactory {
+
+ FilterParser create(String name, Settings settings);
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/FilteredQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/FilteredQueryBuilder.java
new file mode 100644
index 0000000..2819df5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FilteredQueryBuilder.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A query that applies a filter to the results of another query.
+ */
+public class FilteredQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<FilteredQueryBuilder> {
+
+ private final QueryBuilder queryBuilder;
+
+ private final FilterBuilder filterBuilder;
+
+ private float boost = -1;
+
+ private String queryName;
+
+ /**
+ * A query that applies a filter to the results of another query.
+ *
+ * @param queryBuilder The query to apply the filter to
+ * @param filterBuilder The filter to apply on the query (Can be null)
+ */
+ public FilteredQueryBuilder(QueryBuilder queryBuilder, @Nullable FilterBuilder filterBuilder) {
+ this.queryBuilder = queryBuilder;
+ this.filterBuilder = filterBuilder;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public FilteredQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public FilteredQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(FilteredQueryParser.NAME);
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ if (filterBuilder != null) {
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java
new file mode 100644
index 0000000..dbfe928
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FilteredQueryParser implements QueryParser {
+
+ public static final String NAME = "filtered";
+
+ @Inject
+ public FilteredQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = Queries.newMatchAllQuery();
+ Filter filter = null;
+ boolean filterFound = false;
+ float boost = 1.0f;
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ FilteredQuery.FilterStrategy filterStrategy = XFilteredQuery.CUSTOM_FILTER_STRATEGY;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ query = parseContext.parseInnerQuery();
+ } else if ("filter".equals(currentFieldName)) {
+ filterFound = true;
+ filter = parseContext.parseInnerFilter();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[filtered] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("strategy".equals(currentFieldName)) {
+ String value = parser.text();
+ if ("query_first".equals(value) || "queryFirst".equals(value)) {
+ filterStrategy = FilteredQuery.QUERY_FIRST_FILTER_STRATEGY;
+ } else if ("random_access_always".equals(value) || "randomAccessAlways".equals(value)) {
+ filterStrategy = XFilteredQuery.ALWAYS_RANDOM_ACCESS_FILTER_STRATEGY;
+ } else if ("leap_frog".equals(value) || "leapFrog".equals(value)) {
+ filterStrategy = FilteredQuery.LEAP_FROG_QUERY_FIRST_STRATEGY;
+ } else if (value.startsWith("random_access_")) {
+ int threshold = Integer.parseInt(value.substring("random_access_".length()));
+ filterStrategy = new XFilteredQuery.CustomRandomAccessFilterStrategy(threshold);
+ } else if (value.startsWith("randomAccess")) {
+ int threshold = Integer.parseInt(value.substring("randomAccess".length()));
+ filterStrategy = new XFilteredQuery.CustomRandomAccessFilterStrategy(threshold);
+ } else if ("leap_frog_query_first".equals(value) || "leapFrogQueryFirst".equals(value)) {
+ filterStrategy = FilteredQuery.LEAP_FROG_QUERY_FIRST_STRATEGY;
+ } else if ("leap_frog_filter_first".equals(value) || "leapFrogFilterFirst".equals(value)) {
+ filterStrategy = FilteredQuery.LEAP_FROG_FILTER_FIRST_STRATEGY;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[filtered] strategy value not supported [" + value + "]");
+ }
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[filtered] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ // parsed internally, but returned null during parsing...
+ if (query == null) {
+ return null;
+ }
+
+ if (filter == null) {
+ if (!filterFound) {
+ // we allow for null filter, so it makes compositions on the client side to be simpler
+ return query;
+ } else {
+ // even if the filter is not found, and its null, we should simply ignore it, and go
+ // by the query
+ return query;
+ }
+ }
+ if (filter == Queries.MATCH_ALL_FILTER) {
+ // this is an instance of match all filter, just execute the query
+ return query;
+ }
+
+ // cache if required
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+
+ // if its a match_all query, use constant_score
+ if (Queries.isConstantMatchAllQuery(query)) {
+ Query q = new XConstantScoreQuery(filter);
+ q.setBoost(boost);
+ return q;
+ }
+
+ XFilteredQuery filteredQuery = new XFilteredQuery(query, filter, filterStrategy);
+ filteredQuery.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, filteredQuery);
+ }
+ return filteredQuery;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisFieldQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisFieldQueryBuilder.java
new file mode 100644
index 0000000..f9846d0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisFieldQueryBuilder.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FuzzyLikeThisFieldQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<FuzzyLikeThisFieldQueryBuilder> {
+
+ private final String name;
+
+ private Float boost;
+
+ private String likeText = null;
+ private Fuzziness fuzziness;
+ private Integer prefixLength;
+ private Integer maxQueryTerms;
+ private Boolean ignoreTF;
+ private String analyzer;
+ private Boolean failOnUnsupportedField;
+ private String queryName;
+
+ /**
+ * A fuzzy more like this query on the provided field.
+ *
+ * @param name the name of the field
+ */
+ public FuzzyLikeThisFieldQueryBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * The text to use in order to find documents that are "like" this.
+ */
+ public FuzzyLikeThisFieldQueryBuilder likeText(String likeText) {
+ this.likeText = likeText;
+ return this;
+ }
+
+ public FuzzyLikeThisFieldQueryBuilder fuzziness(Fuzziness fuzziness) {
+ this.fuzziness = fuzziness;
+ return this;
+ }
+
+ public FuzzyLikeThisFieldQueryBuilder prefixLength(int prefixLength) {
+ this.prefixLength = prefixLength;
+ return this;
+ }
+
+ public FuzzyLikeThisFieldQueryBuilder maxQueryTerms(int maxQueryTerms) {
+ this.maxQueryTerms = maxQueryTerms;
+ return this;
+ }
+
+ public FuzzyLikeThisFieldQueryBuilder ignoreTF(boolean ignoreTF) {
+ this.ignoreTF = ignoreTF;
+ return this;
+ }
+
+ /**
+ * The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the field.
+ */
+ public FuzzyLikeThisFieldQueryBuilder analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ public FuzzyLikeThisFieldQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Whether to fail or return no result when this query is run against a field which is not supported such as binary/numeric fields.
+ */
+ public FuzzyLikeThisFieldQueryBuilder failOnUnsupportedField(boolean fail) {
+ failOnUnsupportedField = fail;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public FuzzyLikeThisFieldQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(FuzzyLikeThisFieldQueryParser.NAME);
+ builder.startObject(name);
+ if (likeText == null) {
+ throw new ElasticsearchIllegalArgumentException("fuzzyLikeThis requires 'likeText' to be provided");
+ }
+ builder.field("like_text", likeText);
+ if (maxQueryTerms != null) {
+ builder.field("max_query_terms", maxQueryTerms);
+ }
+ if (fuzziness != null) {
+ fuzziness.toXContent(builder, params);
+ }
+ if (prefixLength != null) {
+ builder.field("prefix_length", prefixLength);
+ }
+ if (ignoreTF != null) {
+ builder.field("ignore_tf", ignoreTF);
+ }
+ if (boost != null) {
+ builder.field("boost", boost);
+ }
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+ if (failOnUnsupportedField != null) {
+ builder.field("fail_on_unsupported_field", failOnUnsupportedField);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisFieldQueryParser.java b/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisFieldQueryParser.java
new file mode 100644
index 0000000..8088281
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisFieldQueryParser.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.sandbox.queries.FuzzyLikeThisQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.Analysis;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ * <pre>
+ * {
+ * fuzzy_like_this_field : {
+ * field1 : {
+ * maxNumTerms : 12,
+ * boost : 1.1,
+ * likeText : "..."
+ * }
+ * }
+ * </pre>
+ */
+public class FuzzyLikeThisFieldQueryParser implements QueryParser {
+
+ public static final String NAME = "flt_field";
+ private static final Fuzziness DEFAULT_FUZZINESS = Fuzziness.fromSimilarity(0.5f);
+ private static final ParseField FUZZINESS = Fuzziness.FIELD.withDeprecation("min_similarity");
+
+ @Inject
+ public FuzzyLikeThisFieldQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "fuzzy_like_this_field", Strings.toCamelCase(NAME), "fuzzyLikeThisField"};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ int maxNumTerms = 25;
+ float boost = 1.0f;
+ String likeText = null;
+ Fuzziness fuzziness = DEFAULT_FUZZINESS;
+ int prefixLength = 0;
+ boolean ignoreTF = false;
+ Analyzer analyzer = null;
+ boolean failOnUnsupportedField = true;
+ String queryName = null;
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[flt_field] query malformed, no field");
+ }
+ String fieldName = parser.currentName();
+
+ // now, we move after the field name, which starts the object
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new QueryParsingException(parseContext.index(), "[flt_field] query malformed, no start_object");
+ }
+
+
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("like_text".equals(currentFieldName) || "likeText".equals(currentFieldName)) {
+ likeText = parser.text();
+ } else if ("max_query_terms".equals(currentFieldName) || "maxQueryTerms".equals(currentFieldName)) {
+ maxNumTerms = parser.intValue();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("ignore_tf".equals(currentFieldName) || "ignoreTF".equals(currentFieldName)) {
+ ignoreTF = parser.booleanValue();
+ } else if (FUZZINESS.match(currentFieldName, parseContext.parseFlags())) {
+ fuzziness = Fuzziness.parse(parser);
+ } else if ("prefix_length".equals(currentFieldName) || "prefixLength".equals(currentFieldName)) {
+ prefixLength = parser.intValue();
+ } else if ("analyzer".equals(currentFieldName)) {
+ analyzer = parseContext.analysisService().analyzer(parser.text());
+ } else if ("fail_on_unsupported_field".equals(currentFieldName) || "failOnUnsupportedField".equals(currentFieldName)) {
+ failOnUnsupportedField = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[flt_field] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (likeText == null) {
+ throw new QueryParsingException(parseContext.index(), "fuzzy_like_This_field requires 'like_text' to be specified");
+ }
+
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null) {
+ if (smartNameFieldMappers.hasMapper()) {
+ fieldName = smartNameFieldMappers.mapper().names().indexName();
+ if (analyzer == null) {
+ analyzer = smartNameFieldMappers.mapper().searchAnalyzer();
+ }
+ }
+ }
+ if (analyzer == null) {
+ analyzer = parseContext.mapperService().searchAnalyzer();
+ }
+ if (!Analysis.generatesCharacterTokenStream(analyzer, fieldName)) {
+ if (failOnUnsupportedField) {
+ throw new ElasticsearchIllegalArgumentException("fuzzy_like_this_field doesn't support binary/numeric fields: [" + fieldName + "]");
+ } else {
+ return null;
+ }
+ }
+
+ FuzzyLikeThisQuery fuzzyLikeThisQuery = new FuzzyLikeThisQuery(maxNumTerms, analyzer);
+ fuzzyLikeThisQuery.addTerms(likeText, fieldName, fuzziness.asSimilarity(), prefixLength);
+ fuzzyLikeThisQuery.setBoost(boost);
+ fuzzyLikeThisQuery.setIgnoreTF(ignoreTF);
+
+ // move to the next end object, to close the field name
+ token = parser.nextToken();
+ if (token != XContentParser.Token.END_OBJECT) {
+ throw new QueryParsingException(parseContext.index(), "[flt_field] query malformed, no end_object");
+ }
+ assert token == XContentParser.Token.END_OBJECT;
+
+ Query query = wrapSmartNameQuery(fuzzyLikeThisQuery, smartNameFieldMappers, parseContext);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisQueryBuilder.java
new file mode 100644
index 0000000..fafe60e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisQueryBuilder.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FuzzyLikeThisQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<FuzzyLikeThisQueryBuilder> {
+
+ private final String[] fields;
+
+ private Float boost;
+
+ private String likeText = null;
+ private Fuzziness fuzziness;
+ private Integer prefixLength;
+ private Integer maxQueryTerms;
+ private Boolean ignoreTF;
+ private String analyzer;
+ private Boolean failOnUnsupportedField;
+ private String queryName;
+
+ /**
+ * Constructs a new fuzzy like this query which uses the "_all" field.
+ */
+ public FuzzyLikeThisQueryBuilder() {
+ this.fields = null;
+ }
+
+ /**
+ * Sets the field names that will be used when generating the 'Fuzzy Like This' query.
+ *
+ * @param fields the field names that will be used when generating the 'Fuzzy Like This' query.
+ */
+ public FuzzyLikeThisQueryBuilder(String... fields) {
+ this.fields = fields;
+ }
+
+ /**
+ * The text to use in order to find documents that are "like" this.
+ */
+ public FuzzyLikeThisQueryBuilder likeText(String likeText) {
+ this.likeText = likeText;
+ return this;
+ }
+
+ public FuzzyLikeThisQueryBuilder fuzziness(Fuzziness fuzziness) {
+ this.fuzziness = fuzziness;
+ return this;
+ }
+
+ public FuzzyLikeThisQueryBuilder prefixLength(int prefixLength) {
+ this.prefixLength = prefixLength;
+ return this;
+ }
+
+ public FuzzyLikeThisQueryBuilder maxQueryTerms(int maxQueryTerms) {
+ this.maxQueryTerms = maxQueryTerms;
+ return this;
+ }
+
+ public FuzzyLikeThisQueryBuilder ignoreTF(boolean ignoreTF) {
+ this.ignoreTF = ignoreTF;
+ return this;
+ }
+
+ /**
+ * The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the fied.
+ */
+ public FuzzyLikeThisQueryBuilder analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ public FuzzyLikeThisQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Whether to fail or return no result when this query is run against a field which is not supported such as binary/numeric fields.
+ */
+ public FuzzyLikeThisQueryBuilder failOnUnsupportedField(boolean fail) {
+ failOnUnsupportedField = fail;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public FuzzyLikeThisQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(FuzzyLikeThisQueryParser.NAME);
+ if (fields != null) {
+ builder.startArray("fields");
+ for (String field : fields) {
+ builder.value(field);
+ }
+ builder.endArray();
+ }
+ if (likeText == null) {
+ throw new ElasticsearchIllegalArgumentException("fuzzyLikeThis requires 'likeText' to be provided");
+ }
+ builder.field("like_text", likeText);
+ if (maxQueryTerms != null) {
+ builder.field("max_query_terms", maxQueryTerms);
+ }
+ if (fuzziness != null) {
+ fuzziness.toXContent(builder, params);
+ }
+ if (prefixLength != null) {
+ builder.field("prefix_length", prefixLength);
+ }
+ if (ignoreTF != null) {
+ builder.field("ignore_tf", ignoreTF);
+ }
+ if (boost != null) {
+ builder.field("boost", boost);
+ }
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+ if (failOnUnsupportedField != null) {
+ builder.field("fail_on_unsupported_field", failOnUnsupportedField);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisQueryParser.java b/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisQueryParser.java
new file mode 100644
index 0000000..68d6045
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FuzzyLikeThisQueryParser.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.sandbox.queries.FuzzyLikeThisQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.Analysis;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * <pre>
+ * {
+ * fuzzy_like_this : {
+ * maxNumTerms : 12,
+ * boost : 1.1,
+ * fields : ["field1", "field2"]
+ * likeText : "..."
+ * }
+ * }
+ * </pre>
+ */
+public class FuzzyLikeThisQueryParser implements QueryParser {
+
+ public static final String NAME = "flt";
+ private static final ParseField FUZZINESS = Fuzziness.FIELD.withDeprecation("min_similarity");
+
+ @Inject
+ public FuzzyLikeThisQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "fuzzy_like_this", "fuzzyLikeThis"};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ int maxNumTerms = 25;
+ float boost = 1.0f;
+ List<String> fields = null;
+ String likeText = null;
+ Fuzziness fuzziness = Fuzziness.TWO;
+ int prefixLength = 0;
+ boolean ignoreTF = false;
+ Analyzer analyzer = null;
+ boolean failOnUnsupportedField = true;
+ String queryName = null;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("like_text".equals(currentFieldName) || "likeText".equals(currentFieldName)) {
+ likeText = parser.text();
+ } else if ("max_query_terms".equals(currentFieldName) || "maxQueryTerms".equals(currentFieldName)) {
+ maxNumTerms = parser.intValue();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("ignore_tf".equals(currentFieldName) || "ignoreTF".equals(currentFieldName)) {
+ ignoreTF = parser.booleanValue();
+ } else if (FUZZINESS.match(currentFieldName, parseContext.parseFlags())) {
+ fuzziness = Fuzziness.parse(parser);
+ } else if ("prefix_length".equals(currentFieldName) || "prefixLength".equals(currentFieldName)) {
+ prefixLength = parser.intValue();
+ } else if ("analyzer".equals(currentFieldName)) {
+ analyzer = parseContext.analysisService().analyzer(parser.text());
+ } else if ("fail_on_unsupported_field".equals(currentFieldName) || "failOnUnsupportedField".equals(currentFieldName)) {
+ failOnUnsupportedField = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[flt] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("fields".equals(currentFieldName)) {
+ fields = Lists.newLinkedList();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ fields.add(parseContext.indexName(parser.text()));
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[flt] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (likeText == null) {
+ throw new QueryParsingException(parseContext.index(), "fuzzy_like_this requires 'like_text' to be specified");
+ }
+
+ if (analyzer == null) {
+ analyzer = parseContext.mapperService().searchAnalyzer();
+ }
+
+ FuzzyLikeThisQuery query = new FuzzyLikeThisQuery(maxNumTerms, analyzer);
+ if (fields == null) {
+ fields = Lists.newArrayList(parseContext.defaultField());
+ } else if (fields.isEmpty()) {
+ throw new QueryParsingException(parseContext.index(), "fuzzy_like_this requires 'fields' to be non-empty");
+ }
+ for (Iterator<String> it = fields.iterator(); it.hasNext(); ) {
+ final String fieldName = it.next();
+ if (!Analysis.generatesCharacterTokenStream(analyzer, fieldName)) {
+ if (failOnUnsupportedField) {
+ throw new ElasticsearchIllegalArgumentException("more_like_this doesn't support binary/numeric fields: [" + fieldName + "]");
+ } else {
+ it.remove();
+ }
+ }
+ }
+ if (fields.isEmpty()) {
+ return null;
+ }
+ float minSimilarity = fuzziness.asFloat();
+ if (minSimilarity >= 1.0f && minSimilarity != (int)minSimilarity) {
+ throw new ElasticsearchIllegalArgumentException("fractional edit distances are not allowed");
+ }
+ if (minSimilarity < 0.0f) {
+ throw new ElasticsearchIllegalArgumentException("minimumSimilarity cannot be less than 0");
+ }
+ for (String field : fields) {
+ query.addTerms(likeText, field, minSimilarity, prefixLength);
+ }
+ query.setBoost(boost);
+ query.setIgnoreTF(ignoreTF);
+
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java
new file mode 100644
index 0000000..ab158fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A Query that does fuzzy matching for a specific value.
+ *
+ *
+ */
+public class FuzzyQueryBuilder extends BaseQueryBuilder implements MultiTermQueryBuilder, BoostableQueryBuilder<FuzzyQueryBuilder> {
+
+ private final String name;
+
+ private final Object value;
+
+ private float boost = -1;
+
+ private Fuzziness fuzziness;
+
+ private Integer prefixLength;
+
+ private Integer maxExpansions;
+
+ //LUCENE 4 UPGRADE we need a testcase for this + documentation
+ private Boolean transpositions;
+
+ private String queryName;
+
+ /**
+ * Constructs a new term query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public FuzzyQueryBuilder(String name, Object value) {
+ this.name = name;
+ this.value = value;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public FuzzyQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ public FuzzyQueryBuilder fuzziness(Fuzziness fuzziness) {
+ this.fuzziness = fuzziness;
+ return this;
+ }
+
+ public FuzzyQueryBuilder prefixLength(int prefixLength) {
+ this.prefixLength = prefixLength;
+ return this;
+ }
+
+ public FuzzyQueryBuilder maxExpansions(int maxExpansions) {
+ this.maxExpansions = maxExpansions;
+ return this;
+ }
+
+ public FuzzyQueryBuilder transpositions(boolean transpositions) {
+ this.transpositions = transpositions;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public FuzzyQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(FuzzyQueryParser.NAME);
+ if (boost == -1 && fuzziness == null && prefixLength == null && queryName != null) {
+ builder.field(name, value);
+ } else {
+ builder.startObject(name);
+ builder.field("value", value);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (transpositions != null) {
+ builder.field("transpositions", transpositions);
+ }
+ if (fuzziness != null) {
+ fuzziness.toXContent(builder, params);
+ }
+ if (prefixLength != null) {
+ builder.field("prefix_length", prefixLength);
+ }
+ if (maxExpansions != null) {
+ builder.field("max_expansions", maxExpansions);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java b/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java
new file mode 100644
index 0000000..1d13c3c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.FuzzyQuery;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.support.QueryParsers;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ *
+ */
+public class FuzzyQueryParser implements QueryParser {
+
+ public static final String NAME = "fuzzy";
+ private static final Fuzziness DEFAULT_FUZZINESS = Fuzziness.AUTO;
+ private static final ParseField FUZZINESS = Fuzziness.FIELD.withDeprecation("min_similarity");
+
+
+ @Inject
+ public FuzzyQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[fuzzy] query malformed, no field");
+ }
+ String fieldName = parser.currentName();
+
+ String value = null;
+ float boost = 1.0f;
+ Fuzziness fuzziness = DEFAULT_FUZZINESS;
+ int prefixLength = FuzzyQuery.defaultPrefixLength;
+ int maxExpansions = FuzzyQuery.defaultMaxExpansions;
+ boolean transpositions = false;
+ String queryName = null;
+ MultiTermQuery.RewriteMethod rewriteMethod = null;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("term".equals(currentFieldName)) {
+ value = parser.text();
+ } else if ("value".equals(currentFieldName)) {
+ value = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if (FUZZINESS.match(currentFieldName, parseContext.parseFlags())) {
+ fuzziness = Fuzziness.parse(parser);
+ } else if ("prefix_length".equals(currentFieldName) || "prefixLength".equals(currentFieldName)) {
+ prefixLength = parser.intValue();
+ } else if ("max_expansions".equals(currentFieldName) || "maxExpansions".equals(currentFieldName)) {
+ maxExpansions = parser.intValue();
+ } else if ("transpositions".equals(currentFieldName)) {
+ transpositions = parser.booleanValue();
+ } else if ("rewrite".equals(currentFieldName)) {
+ rewriteMethod = QueryParsers.parseRewriteMethod(parser.textOrNull(), null);
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[fuzzy] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ parser.nextToken();
+ } else {
+ value = parser.text();
+ // move to the next token
+ parser.nextToken();
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for fuzzy query");
+ }
+
+ Query query = null;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null) {
+ if (smartNameFieldMappers.hasMapper()) {
+ query = smartNameFieldMappers.mapper().fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions);
+ }
+ }
+ if (query == null) {
+ query = new FuzzyQuery(new Term(fieldName, value), fuzziness.asDistance(value), prefixLength, maxExpansions, transpositions);
+ }
+ if (query instanceof MultiTermQuery) {
+ QueryParsers.setRewriteMethod((MultiTermQuery) query, rewriteMethod);
+ }
+ query.setBoost(boost);
+
+ query = wrapSmartNameQuery(query, smartNameFieldMappers, parseContext);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java
new file mode 100644
index 0000000..c2133f1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder {
+
+ public static final String TOP_LEFT = GeoBoundingBoxFilterParser.TOP_LEFT;
+ public static final String BOTTOM_RIGHT = GeoBoundingBoxFilterParser.BOTTOM_RIGHT;
+
+ private static final int TOP = 0;
+ private static final int LEFT = 1;
+ private static final int BOTTOM = 2;
+ private static final int RIGHT = 3;
+
+ private final String name;
+
+ private double[] box = {Double.NaN, Double.NaN, Double.NaN, Double.NaN};
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+ private String type;
+
+ public GeoBoundingBoxFilterBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Adds top left point.
+ *
+ * @param lat The latitude
+ * @param lon The longitude
+ */
+ public GeoBoundingBoxFilterBuilder topLeft(double lat, double lon) {
+ box[TOP] = lat;
+ box[LEFT] = lon;
+ return this;
+ }
+
+ public GeoBoundingBoxFilterBuilder topLeft(GeoPoint point) {
+ return topLeft(point.lat(), point.lon());
+ }
+
+ public GeoBoundingBoxFilterBuilder topLeft(String geohash) {
+ return topLeft(GeoHashUtils.decode(geohash));
+ }
+
+ /**
+ * Adds bottom right corner.
+ *
+ * @param lat The latitude
+ * @param lon The longitude
+ */
+ public GeoBoundingBoxFilterBuilder bottomRight(double lat, double lon) {
+ box[BOTTOM] = lat;
+ box[RIGHT] = lon;
+ return this;
+ }
+
+ public GeoBoundingBoxFilterBuilder bottomRight(GeoPoint point) {
+ return bottomRight(point.lat(), point.lon());
+ }
+
+ public GeoBoundingBoxFilterBuilder bottomRight(String geohash) {
+ return bottomRight(GeoHashUtils.decode(geohash));
+ }
+
+ /**
+ * Adds bottom left corner.
+ *
+ * @param lat The latitude
+ * @param lon The longitude
+ */
+ public GeoBoundingBoxFilterBuilder bottomLeft(double lat, double lon) {
+ box[BOTTOM] = lat;
+ box[LEFT] = lon;
+ return this;
+ }
+
+ public GeoBoundingBoxFilterBuilder bottomLeft(GeoPoint point) {
+ return bottomLeft(point.lat(), point.lon());
+ }
+
+ public GeoBoundingBoxFilterBuilder bottomLeft(String geohash) {
+ return bottomLeft(GeoHashUtils.decode(geohash));
+ }
+
+ /**
+ * Adds top right point.
+ *
+ * @param lat The latitude
+ * @param lon The longitude
+ */
+ public GeoBoundingBoxFilterBuilder topRight(double lat, double lon) {
+ box[TOP] = lat;
+ box[RIGHT] = lon;
+ return this;
+ }
+
+ public GeoBoundingBoxFilterBuilder topRight(GeoPoint point) {
+ return topRight(point.lat(), point.lon());
+ }
+
+ public GeoBoundingBoxFilterBuilder topRight(String geohash) {
+ return topRight(GeoHashUtils.decode(geohash));
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public GeoBoundingBoxFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public GeoBoundingBoxFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public GeoBoundingBoxFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ /**
+ * Sets the type of executing of the geo bounding box. Can be either `memory` or `indexed`. Defaults
+ * to `memory`.
+ */
+ public GeoBoundingBoxFilterBuilder type(String type) {
+ this.type = type;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ // check values
+ if(Double.isNaN(box[TOP])) {
+ throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires top latitude to be set");
+ } else if(Double.isNaN(box[BOTTOM])) {
+ throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires bottom latitude to be set");
+ } else if(Double.isNaN(box[RIGHT])) {
+ throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires right longitude to be set");
+ } else if(Double.isNaN(box[LEFT])) {
+ throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires left longitude to be set");
+ }
+
+ builder.startObject(GeoBoundingBoxFilterParser.NAME);
+
+ builder.startObject(name);
+ builder.array(TOP_LEFT, box[LEFT], box[TOP]);
+ builder.array(BOTTOM_RIGHT, box[RIGHT], box[BOTTOM]);
+ builder.endObject();
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ if (type != null) {
+ builder.field("type", type);
+ }
+
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java
new file mode 100644
index 0000000..85f7b58
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.index.search.geo.InMemoryGeoBoundingBoxFilter;
+import org.elasticsearch.index.search.geo.IndexedGeoBoundingBoxFilter;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ *
+ */
+public class GeoBoundingBoxFilterParser implements FilterParser {
+
+ public static final String TOP = "top";
+ public static final String LEFT = "left";
+ public static final String RIGHT = "right";
+ public static final String BOTTOM = "bottom";
+
+ public static final String TOP_LEFT = TOP + "_" + LEFT;
+ public static final String TOP_RIGHT = TOP + "_" + RIGHT;
+ public static final String BOTTOM_LEFT = BOTTOM + "_" + LEFT;
+ public static final String BOTTOM_RIGHT = BOTTOM + "_" + RIGHT;
+
+ public static final String TOPLEFT = "topLeft";
+ public static final String TOPRIGHT = "topRight";
+ public static final String BOTTOMLEFT = "bottomLeft";
+ public static final String BOTTOMRIGHT = "bottomRight";
+
+ public static final String NAME = "geo_bbox";
+ public static final String FIELD = "field";
+
+ @Inject
+ public GeoBoundingBoxFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "geoBbox", "geo_bounding_box", "geoBoundingBox"};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+ String fieldName = null;
+
+ double top = Double.NaN;
+ double bottom = Double.NaN;
+ double left = Double.NaN;
+ double right = Double.NaN;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ boolean normalize = true;
+
+ GeoPoint sparse = new GeoPoint();
+
+ String type = "memory";
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ fieldName = currentFieldName;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ token = parser.nextToken();
+ if (FIELD.equals(currentFieldName)) {
+ fieldName = parser.text();
+ } else if (TOP.equals(currentFieldName)) {
+ top = parser.doubleValue();
+ } else if (BOTTOM.equals(currentFieldName)) {
+ bottom = parser.doubleValue();
+ } else if (LEFT.equals(currentFieldName)) {
+ left = parser.doubleValue();
+ } else if (RIGHT.equals(currentFieldName)) {
+ right = parser.doubleValue();
+ } else {
+ if (TOP_LEFT.equals(currentFieldName) || TOPLEFT.equals(currentFieldName)) {
+ GeoPoint.parse(parser, sparse);
+ top = sparse.getLat();
+ left = sparse.getLon();
+ } else if (BOTTOM_RIGHT.equals(currentFieldName) || BOTTOMRIGHT.equals(currentFieldName)) {
+ GeoPoint.parse(parser, sparse);
+ bottom = sparse.getLat();
+ right = sparse.getLon();
+ } else if (TOP_RIGHT.equals(currentFieldName) || TOPRIGHT.equals(currentFieldName)) {
+ GeoPoint.parse(parser, sparse);
+ top = sparse.getLat();
+ right = sparse.getLon();
+ } else if (BOTTOM_LEFT.equals(currentFieldName) || BOTTOMLEFT.equals(currentFieldName)) {
+ GeoPoint.parse(parser, sparse);
+ bottom = sparse.getLat();
+ left = sparse.getLon();
+ } else {
+ throw new ElasticsearchParseException("Unexpected field [" + currentFieldName + "]");
+ }
+ }
+ } else {
+ throw new ElasticsearchParseException("fieldname expected but [" + token + "] found");
+ }
+ }
+ } else if (token.isValue()) {
+ if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else if ("normalize".equals(currentFieldName)) {
+ normalize = parser.booleanValue();
+ } else if ("type".equals(currentFieldName)) {
+ type = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[geo_bbox] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ final GeoPoint topLeft = sparse.reset(top, left); //just keep the object
+ final GeoPoint bottomRight = new GeoPoint(bottom, right);
+
+ if (normalize) {
+ GeoUtils.normalizePoint(topLeft);
+ GeoUtils.normalizePoint(bottomRight);
+ }
+
+ MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartMappers == null || !smartMappers.hasMapper()) {
+ throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]");
+ }
+ FieldMapper<?> mapper = smartMappers.mapper();
+ if (!(mapper instanceof GeoPointFieldMapper)) {
+ throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field");
+ }
+ GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper);
+
+ Filter filter;
+ if ("indexed".equals(type)) {
+ filter = IndexedGeoBoundingBoxFilter.create(topLeft, bottomRight, geoMapper);
+ } else if ("memory".equals(type)) {
+ IndexGeoPointFieldData<?> indexFieldData = parseContext.fieldData().getForField(mapper);
+ filter = new InMemoryGeoBoundingBoxFilter(topLeft, bottomRight, indexFieldData);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "geo bounding box type [" + type + "] not supported, either 'indexed' or 'memory' are allowed");
+ }
+
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ filter = wrapSmartNameFilter(filter, smartMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java
new file mode 100644
index 0000000..6a07c28
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Locale;
+
+/**
+ *
+ */
+public class GeoDistanceFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+
+ private String distance;
+
+ private double lat;
+
+ private double lon;
+
+ private String geohash;
+
+ private GeoDistance geoDistance;
+
+ private String optimizeBbox;
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ public GeoDistanceFilterBuilder(String name) {
+ this.name = name;
+ }
+
+ public GeoDistanceFilterBuilder point(double lat, double lon) {
+ this.lat = lat;
+ this.lon = lon;
+ return this;
+ }
+
+ public GeoDistanceFilterBuilder lat(double lat) {
+ this.lat = lat;
+ return this;
+ }
+
+ public GeoDistanceFilterBuilder lon(double lon) {
+ this.lon = lon;
+ return this;
+ }
+
+ public GeoDistanceFilterBuilder distance(String distance) {
+ this.distance = distance;
+ return this;
+ }
+
+ public GeoDistanceFilterBuilder distance(double distance, DistanceUnit unit) {
+ this.distance = unit.toString(distance);
+ return this;
+ }
+
+ public GeoDistanceFilterBuilder geohash(String geohash) {
+ this.geohash = geohash;
+ return this;
+ }
+
+ public GeoDistanceFilterBuilder geoDistance(GeoDistance geoDistance) {
+ this.geoDistance = geoDistance;
+ return this;
+ }
+
+ public GeoDistanceFilterBuilder optimizeBbox(String optimizeBbox) {
+ this.optimizeBbox = optimizeBbox;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public GeoDistanceFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public GeoDistanceFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public GeoDistanceFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(GeoDistanceFilterParser.NAME);
+ if (geohash != null) {
+ builder.field(name, geohash);
+ } else {
+ builder.startArray(name).value(lon).value(lat).endArray();
+ }
+ builder.field("distance", distance);
+ if (geoDistance != null) {
+ builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT));
+ }
+ if (optimizeBbox != null) {
+ builder.field("optimize_bbox", optimizeBbox);
+ }
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java
new file mode 100644
index 0000000..0d7f2bf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.index.search.geo.GeoDistanceFilter;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ * <pre>
+ * {
+ * "name.lat" : 1.1,
+ * "name.lon" : 1.2,
+ * }
+ * </pre>
+ */
+public class GeoDistanceFilterParser implements FilterParser {
+
+ public static final String NAME = "geo_distance";
+
+ @Inject
+ public GeoDistanceFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "geoDistance"};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token;
+
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+ String filterName = null;
+ String currentFieldName = null;
+ GeoPoint point = new GeoPoint();
+ String fieldName = null;
+ double distance = 0;
+ Object vDistance = null;
+ DistanceUnit unit = DistanceUnit.DEFAULT;
+ GeoDistance geoDistance = GeoDistance.DEFAULT;
+ String optimizeBbox = "memory";
+ boolean normalizeLon = true;
+ boolean normalizeLat = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ fieldName = currentFieldName;
+ GeoPoint.parse(parser, point);
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ // the json in the format of -> field : { lat : 30, lon : 12 }
+ String currentName = parser.currentName();
+ fieldName = currentFieldName;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentName = parser.currentName();
+ } else if (token.isValue()) {
+ if (currentName.equals(GeoPointFieldMapper.Names.LAT)) {
+ point.resetLat(parser.doubleValue());
+ } else if (currentName.equals(GeoPointFieldMapper.Names.LON)) {
+ point.resetLon(parser.doubleValue());
+ } else if (currentName.equals(GeoPointFieldMapper.Names.GEOHASH)) {
+ GeoHashUtils.decode(parser.text(), point);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[geo_distance] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if (currentFieldName.equals("distance")) {
+ if (token == XContentParser.Token.VALUE_STRING) {
+ vDistance = parser.text(); // a String
+ } else {
+ vDistance = parser.numberValue(); // a Number
+ }
+ } else if (currentFieldName.equals("unit")) {
+ unit = DistanceUnit.fromString(parser.text());
+ } else if (currentFieldName.equals("distance_type") || currentFieldName.equals("distanceType")) {
+ geoDistance = GeoDistance.fromString(parser.text());
+ } else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.LAT_SUFFIX)) {
+ point.resetLat(parser.doubleValue());
+ fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.LAT_SUFFIX.length());
+ } else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.LON_SUFFIX)) {
+ point.resetLon(parser.doubleValue());
+ fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.LON_SUFFIX.length());
+ } else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.GEOHASH_SUFFIX)) {
+ GeoHashUtils.decode(parser.text(), point);
+ fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length());
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) {
+ optimizeBbox = parser.textOrNull();
+ } else if ("normalize".equals(currentFieldName)) {
+ normalizeLat = parser.booleanValue();
+ normalizeLon = parser.booleanValue();
+ } else {
+ point.resetFromString(parser.text());
+ fieldName = currentFieldName;
+ }
+ }
+ }
+
+ if (vDistance instanceof Number) {
+ distance = DistanceUnit.DEFAULT.convert(((Number) vDistance).doubleValue(), unit);
+ } else {
+ distance = DistanceUnit.parse((String) vDistance, unit, DistanceUnit.DEFAULT);
+ }
+ distance = geoDistance.normalize(distance, DistanceUnit.DEFAULT);
+
+ if (normalizeLat || normalizeLon) {
+ GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
+ }
+
+ MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartMappers == null || !smartMappers.hasMapper()) {
+ throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]");
+ }
+ FieldMapper<?> mapper = smartMappers.mapper();
+ if (!(mapper instanceof GeoPointFieldMapper)) {
+ throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field");
+ }
+ GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper);
+
+
+ IndexGeoPointFieldData<?> indexFieldData = parseContext.fieldData().getForField(mapper);
+ Filter filter = new GeoDistanceFilter(point.lat(), point.lon(), distance, geoDistance, indexFieldData, geoMapper, optimizeBbox);
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ filter = wrapSmartNameFilter(filter, smartMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java
new file mode 100644
index 0000000..343e50e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Locale;
+
+/**
+ *
+ */
+public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+
+ private Object from;
+ private Object to;
+ private boolean includeLower = true;
+ private boolean includeUpper = true;
+
+ private double lat;
+
+ private double lon;
+
+ private String geohash;
+
+ private GeoDistance geoDistance;
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ private String optimizeBbox;
+
+ public GeoDistanceRangeFilterBuilder(String name) {
+ this.name = name;
+ }
+
+ public GeoDistanceRangeFilterBuilder point(double lat, double lon) {
+ this.lat = lat;
+ this.lon = lon;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder lat(double lat) {
+ this.lat = lat;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder lon(double lon) {
+ this.lon = lon;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder from(Object from) {
+ this.from = from;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder to(Object to) {
+ this.to = to;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder gt(Object from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder gte(Object from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder lt(Object to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder lte(Object to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder includeLower(boolean includeLower) {
+ this.includeLower = includeLower;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder includeUpper(boolean includeUpper) {
+ this.includeUpper = includeUpper;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder geohash(String geohash) {
+ this.geohash = geohash;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder geoDistance(GeoDistance geoDistance) {
+ this.geoDistance = geoDistance;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder optimizeBbox(String optimizeBbox) {
+ this.optimizeBbox = optimizeBbox;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public GeoDistanceRangeFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public GeoDistanceRangeFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public GeoDistanceRangeFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(GeoDistanceRangeFilterParser.NAME);
+ if (geohash != null) {
+ builder.field(name, geohash);
+ } else {
+ builder.startArray(name).value(lon).value(lat).endArray();
+ }
+ builder.field("from", from);
+ builder.field("to", to);
+ builder.field("include_lower", includeLower);
+ builder.field("include_upper", includeUpper);
+ if (geoDistance != null) {
+ builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT));
+ }
+ if (optimizeBbox != null) {
+ builder.field("optimize_bbox", optimizeBbox);
+ }
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java
new file mode 100644
index 0000000..e411fb0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.index.search.geo.GeoDistanceRangeFilter;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ * <pre>
+ * {
+ * "name.lat" : 1.1,
+ * "name.lon" : 1.2,
+ * }
+ * </pre>
+ */
+public class GeoDistanceRangeFilterParser implements FilterParser {
+
+ public static final String NAME = "geo_distance_range";
+
+ @Inject
+ public GeoDistanceRangeFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "geoDistanceRange"};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token;
+
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+ String filterName = null;
+ String currentFieldName = null;
+ GeoPoint point = new GeoPoint();
+ String fieldName = null;
+ Object vFrom = null;
+ Object vTo = null;
+ boolean includeLower = true;
+ boolean includeUpper = true;
+ DistanceUnit unit = DistanceUnit.DEFAULT;
+ GeoDistance geoDistance = GeoDistance.DEFAULT;
+ String optimizeBbox = "memory";
+ boolean normalizeLon = true;
+ boolean normalizeLat = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ GeoPoint.parse(parser, point);
+ fieldName = currentFieldName;
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ // the json in the format of -> field : { lat : 30, lon : 12 }
+ fieldName = currentFieldName;
+ GeoPoint.parse(parser, point);
+ } else if (token.isValue()) {
+ if (currentFieldName.equals("from")) {
+ if (token == XContentParser.Token.VALUE_NULL) {
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ vFrom = parser.text(); // a String
+ } else {
+ vFrom = parser.numberValue(); // a Number
+ }
+ } else if (currentFieldName.equals("to")) {
+ if (token == XContentParser.Token.VALUE_NULL) {
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ vTo = parser.text(); // a String
+ } else {
+ vTo = parser.numberValue(); // a Number
+ }
+ } else if ("include_lower".equals(currentFieldName) || "includeLower".equals(currentFieldName)) {
+ includeLower = parser.booleanValue();
+ } else if ("include_upper".equals(currentFieldName) || "includeUpper".equals(currentFieldName)) {
+ includeUpper = parser.booleanValue();
+ } else if ("gt".equals(currentFieldName)) {
+ if (token == XContentParser.Token.VALUE_NULL) {
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ vFrom = parser.text(); // a String
+ } else {
+ vFrom = parser.numberValue(); // a Number
+ }
+ includeLower = false;
+ } else if ("gte".equals(currentFieldName) || "ge".equals(currentFieldName)) {
+ if (token == XContentParser.Token.VALUE_NULL) {
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ vFrom = parser.text(); // a String
+ } else {
+ vFrom = parser.numberValue(); // a Number
+ }
+ includeLower = true;
+ } else if ("lt".equals(currentFieldName)) {
+ if (token == XContentParser.Token.VALUE_NULL) {
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ vTo = parser.text(); // a String
+ } else {
+ vTo = parser.numberValue(); // a Number
+ }
+ includeUpper = false;
+ } else if ("lte".equals(currentFieldName) || "le".equals(currentFieldName)) {
+ if (token == XContentParser.Token.VALUE_NULL) {
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ vTo = parser.text(); // a String
+ } else {
+ vTo = parser.numberValue(); // a Number
+ }
+ includeUpper = true;
+ } else if (currentFieldName.equals("unit")) {
+ unit = DistanceUnit.fromString(parser.text());
+ } else if (currentFieldName.equals("distance_type") || currentFieldName.equals("distanceType")) {
+ geoDistance = GeoDistance.fromString(parser.text());
+ } else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.LAT_SUFFIX)) {
+ point.resetLat(parser.doubleValue());
+ fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.LAT_SUFFIX.length());
+ } else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.LON_SUFFIX)) {
+ point.resetLon(parser.doubleValue());
+ fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.LON_SUFFIX.length());
+ } else if (currentFieldName.endsWith(GeoPointFieldMapper.Names.GEOHASH_SUFFIX)) {
+ GeoHashUtils.decode(parser.text(), point);
+ fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length());
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) {
+ optimizeBbox = parser.textOrNull();
+ } else if ("normalize".equals(currentFieldName)) {
+ normalizeLat = parser.booleanValue();
+ normalizeLon = parser.booleanValue();
+ } else {
+ point.resetFromString(parser.text());
+ fieldName = currentFieldName;
+ }
+ }
+ }
+
+ Double from = null;
+ Double to = null;
+ if (vFrom != null) {
+ if (vFrom instanceof Number) {
+ from = unit.toMeters(((Number) vFrom).doubleValue());
+ } else {
+ from = DistanceUnit.parse((String) vFrom, unit, DistanceUnit.DEFAULT);
+ }
+ from = geoDistance.normalize(from, DistanceUnit.DEFAULT);
+ }
+ if (vTo != null) {
+ if (vTo instanceof Number) {
+ to = unit.toMeters(((Number) vTo).doubleValue());
+ } else {
+ to = DistanceUnit.parse((String) vTo, unit, DistanceUnit.DEFAULT);
+ }
+ to = geoDistance.normalize(to, DistanceUnit.DEFAULT);
+ }
+
+ if (normalizeLat || normalizeLon) {
+ GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
+ }
+
+ MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartMappers == null || !smartMappers.hasMapper()) {
+ throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]");
+ }
+ FieldMapper<?> mapper = smartMappers.mapper();
+ if (!(mapper instanceof GeoPointFieldMapper)) {
+ throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field");
+ }
+ GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper);
+
+ IndexGeoPointFieldData<?> indexFieldData = parseContext.fieldData().getForField(mapper);
+ Filter filter = new GeoDistanceRangeFilter(point, from, to, includeLower, includeUpper, geoDistance, geoMapper, indexFieldData, optimizeBbox);
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ filter = wrapSmartNameFilter(filter, smartMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java
new file mode 100644
index 0000000..e32a1e5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class GeoPolygonFilterBuilder extends BaseFilterBuilder {
+
+ public static final String POINTS = GeoPolygonFilterParser.POINTS;
+
+ private final String name;
+
+ private final List<GeoPoint> shell = Lists.newArrayList();
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ public GeoPolygonFilterBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Adds a point with lat and lon
+ *
+ * @param lat The latitude
+ * @param lon The longitude
+ * @return
+ */
+ public GeoPolygonFilterBuilder addPoint(double lat, double lon) {
+ return addPoint(new GeoPoint(lat, lon));
+ }
+
+ public GeoPolygonFilterBuilder addPoint(String geohash) {
+ return addPoint(GeoHashUtils.decode(geohash));
+ }
+
+ public GeoPolygonFilterBuilder addPoint(GeoPoint point) {
+ shell.add(point);
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public GeoPolygonFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public GeoPolygonFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public GeoPolygonFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(GeoPolygonFilterParser.NAME);
+
+ builder.startObject(name);
+ builder.startArray(POINTS);
+ for (GeoPoint point : shell) {
+ builder.startArray().value(point.lon()).value(point.lat()).endArray();
+ }
+ builder.endArray();
+ builder.endObject();
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java
new file mode 100644
index 0000000..4a5a9fc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.index.search.geo.GeoPolygonFilter;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ * <pre>
+ * {
+ * "pin.location" : {
+ * "points" : [
+ * { "lat" : 12, "lon" : 40},
+ * {}
+ * ]
+ * }
+ * }
+ * </pre>
+ */
+public class GeoPolygonFilterParser implements FilterParser {
+
+ public static final String NAME = "geo_polygon";
+ public static final String POINTS = "points";
+
+ @Inject
+ public GeoPolygonFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "geoPolygon"};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+ String fieldName = null;
+
+ List<GeoPoint> shell = Lists.newArrayList();
+
+ boolean normalizeLon = true;
+ boolean normalizeLat = true;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ fieldName = currentFieldName;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if(POINTS.equals(currentFieldName)) {
+ while((token = parser.nextToken()) != Token.END_ARRAY) {
+ shell.add(GeoPoint.parse(parser));
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[geo_polygon] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else if ("normalize".equals(currentFieldName)) {
+ normalizeLat = parser.booleanValue();
+ normalizeLon = parser.booleanValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[geo_polygon] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (shell.isEmpty()) {
+ throw new QueryParsingException(parseContext.index(), "no points defined for geo_polygon filter");
+ } else {
+ if(shell.size() < 3) {
+ throw new QueryParsingException(parseContext.index(), "to few points defined for geo_polygon filter");
+ }
+ GeoPoint start = shell.get(0);
+ if(!start.equals(shell.get(shell.size()-1))) {
+ shell.add(start);
+ }
+ if(shell.size() < 4) {
+ throw new QueryParsingException(parseContext.index(), "to few points defined for geo_polygon filter");
+ }
+ }
+
+ if (normalizeLat || normalizeLon) {
+ for (GeoPoint point : shell) {
+ GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
+ }
+ }
+
+ MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartMappers == null || !smartMappers.hasMapper()) {
+ throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]");
+ }
+ FieldMapper<?> mapper = smartMappers.mapper();
+ if (!(mapper instanceof GeoPointFieldMapper)) {
+ throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field");
+ }
+
+ IndexGeoPointFieldData<?> indexFieldData = parseContext.fieldData().getForField(mapper);
+ Filter filter = new GeoPolygonFilter(indexFieldData, shell.toArray(new GeoPoint[shell.size()]));
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ filter = wrapSmartNameFilter(filter, smartMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java
new file mode 100644
index 0000000..4ff26d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.geo.ShapeRelation;
+import org.elasticsearch.common.geo.SpatialStrategy;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * {@link FilterBuilder} that builds a GeoShape Filter
+ */
+public class GeoShapeFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+
+ private final ShapeBuilder shape;
+
+ private SpatialStrategy strategy = null;
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ private final String indexedShapeId;
+ private final String indexedShapeType;
+
+ private String indexedShapeIndex;
+ private String indexedShapePath;
+
+ private ShapeRelation relation = null;
+
+ /**
+ * Creates a new GeoShapeFilterBuilder whose Filter will be against the
+ * given field name using the given Shape
+ *
+ * @param name Name of the field that will be filtered
+ * @param shape Shape used in the filter
+ */
+ public GeoShapeFilterBuilder(String name, ShapeBuilder shape) {
+ this(name, shape, null, null, null);
+ }
+
+ /**
+ * Creates a new GeoShapeFilterBuilder whose Filter will be against the
+ * given field name using the given Shape
+ *
+ * @param name Name of the field that will be filtered
+ * @param relation {@link ShapeRelation} of query and indexed shape
+ * @param shape Shape used in the filter
+ */
+ public GeoShapeFilterBuilder(String name, ShapeBuilder shape, ShapeRelation relation) {
+ this(name, shape, null, null, relation);
+ }
+
+ /**
+ * Creates a new GeoShapeFilterBuilder whose Filter will be against the given field name
+ * and will use the Shape found with the given ID in the given type
+ *
+ * @param name Name of the field that will be filtered
+ * @param indexedShapeId ID of the indexed Shape that will be used in the Filter
+ * @param indexedShapeType Index type of the indexed Shapes
+ */
+ public GeoShapeFilterBuilder(String name, String indexedShapeId, String indexedShapeType, ShapeRelation relation) {
+ this(name, null, indexedShapeId, indexedShapeType, relation);
+ }
+
+ private GeoShapeFilterBuilder(String name, ShapeBuilder shape, String indexedShapeId, String indexedShapeType, ShapeRelation relation) {
+ this.name = name;
+ this.shape = shape;
+ this.indexedShapeId = indexedShapeId;
+ this.relation = relation;
+ this.indexedShapeType = indexedShapeType;
+ }
+
+ /**
+ * Sets whether the filter will be cached.
+ *
+ * @param cache Whether filter will be cached
+ * @return this
+ */
+ public GeoShapeFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ /**
+ * Sets the key used for the filter if it is cached
+ *
+ * @param cacheKey Key for the Filter if cached
+ * @return this
+ */
+ public GeoShapeFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ /**
+ * Sets the name of the filter
+ *
+ * @param filterName Name of the filter
+ * @return this
+ */
+ public GeoShapeFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Defines which spatial strategy will be used for building the geo shape filter. When not set, the strategy that
+ * will be used will be the one that is associated with the geo shape field in the mappings.
+ *
+ * @param strategy The spatial strategy to use for building the geo shape filter
+ * @return this
+ */
+ public GeoShapeFilterBuilder strategy(SpatialStrategy strategy) {
+ this.strategy = strategy;
+ return this;
+ }
+
+ /**
+ * Sets the name of the index where the indexed Shape can be found
+ *
+ * @param indexedShapeIndex Name of the index where the indexed Shape is
+ * @return this
+ */
+ public GeoShapeFilterBuilder indexedShapeIndex(String indexedShapeIndex) {
+ this.indexedShapeIndex = indexedShapeIndex;
+ return this;
+ }
+
+ /**
+ * Sets the path of the field in the indexed Shape document that has the Shape itself
+ *
+ * @param indexedShapePath Path of the field where the Shape itself is defined
+ * @return this
+ */
+ public GeoShapeFilterBuilder indexedShapePath(String indexedShapePath) {
+ this.indexedShapePath = indexedShapePath;
+ return this;
+ }
+
+ /**
+ * Sets the relation of query shape and indexed shape.
+ *
+ * @param relation relation of the shapes
+ * @return this
+ */
+ public GeoShapeFilterBuilder relation(ShapeRelation relation) {
+ this.relation = relation;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(GeoShapeFilterParser.NAME);
+
+ builder.startObject(name);
+
+ if (strategy != null) {
+ builder.field("strategy", strategy.getStrategyName());
+ }
+
+ if (shape != null) {
+ builder.field("shape", shape);
+ } else {
+ builder.startObject("indexed_shape")
+ .field("id", indexedShapeId)
+ .field("type", indexedShapeType);
+ if (indexedShapeIndex != null) {
+ builder.field("index", indexedShapeIndex);
+ }
+ if (indexedShapePath != null) {
+ builder.field("path", indexedShapePath);
+ }
+ builder.endObject();
+ }
+
+ if(relation != null) {
+ builder.field("relation", relation.getRelationName());
+ }
+
+ builder.endObject();
+
+ if (name != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java
new file mode 100644
index 0000000..4f4f2a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.spatial4j.core.shape.Shape;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
+import org.elasticsearch.common.geo.ShapeRelation;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.internal.Nullable;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
+import org.elasticsearch.index.search.shape.ShapeFetchService;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ * {@link FilterParser} for filtering Documents based on {@link Shape}s.
+ * <p/>
+ * Only those fields mapped using {@link GeoShapeFieldMapper} can be filtered
+ * using this parser.
+ * <p/>
+ * Format supported:
+ * <p/>
+ * <pre>
+ * "field" : {
+ * "relation" : "intersects",
+ * "shape" : {
+ * "type" : "polygon",
+ * "coordinates" : [
+ * [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]
+ * ]
+ * }
+ * }
+ * </pre>
+ */
+public class GeoShapeFilterParser implements FilterParser {
+
+ public static final String NAME = "geo_shape";
+
+ private ShapeFetchService fetchService;
+
+ public static class DEFAULTS {
+ public static final String INDEX_NAME = "shapes";
+ public static final String SHAPE_FIELD_NAME = "shape";
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "geoShape"};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ String fieldName = null;
+ ShapeRelation shapeRelation = ShapeRelation.INTERSECTS;
+ String strategyName = null;
+ ShapeBuilder shape = null;
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+ String filterName = null;
+
+ String id = null;
+ String type = null;
+ String index = DEFAULTS.INDEX_NAME;
+ String shapePath = DEFAULTS.SHAPE_FIELD_NAME;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ fieldName = currentFieldName;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+
+ token = parser.nextToken();
+ if ("shape".equals(currentFieldName)) {
+ shape = ShapeBuilder.parse(parser);
+ } else if ("relation".equals(currentFieldName)) {
+ shapeRelation = ShapeRelation.getRelationByName(parser.text());
+ if (shapeRelation == null) {
+ throw new QueryParsingException(parseContext.index(), "Unknown shape operation [" + parser.text() + "]");
+ }
+ } else if ("strategy".equals(currentFieldName)) {
+ strategyName = parser.text();
+ } else if ("indexed_shape".equals(currentFieldName) || "indexedShape".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("id".equals(currentFieldName)) {
+ id = parser.text();
+ } else if ("type".equals(currentFieldName)) {
+ type = parser.text();
+ } else if ("index".equals(currentFieldName)) {
+ index = parser.text();
+ } else if ("path".equals(currentFieldName)) {
+ shapePath = parser.text();
+ }
+ }
+ }
+ if (id == null) {
+ throw new QueryParsingException(parseContext.index(), "ID for indexed shape not provided");
+ } else if (type == null) {
+ throw new QueryParsingException(parseContext.index(), "Type for indexed shape not provided");
+ }
+ shape = fetchService.fetch(id, type, index, shapePath);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[geo_shape] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[geo_shape] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (shape == null) {
+ throw new QueryParsingException(parseContext.index(), "No Shape defined");
+ } else if (shapeRelation == null) {
+ throw new QueryParsingException(parseContext.index(), "No Shape Relation defined");
+ }
+
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers == null || !smartNameFieldMappers.hasMapper()) {
+ throw new QueryParsingException(parseContext.index(), "Failed to find geo_shape field [" + fieldName + "]");
+ }
+
+ FieldMapper fieldMapper = smartNameFieldMappers.mapper();
+ // TODO: This isn't the nicest way to check this
+ if (!(fieldMapper instanceof GeoShapeFieldMapper)) {
+ throw new QueryParsingException(parseContext.index(), "Field [" + fieldName + "] is not a geo_shape");
+ }
+
+ GeoShapeFieldMapper shapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = shapeFieldMapper.defaultStrategy();
+ if (strategyName != null) {
+ strategy = shapeFieldMapper.resolveStrategy(strategyName);
+ }
+ Filter filter = strategy.makeFilter(GeoShapeQueryParser.getArgs(shape, shapeRelation));
+
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+
+ filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);
+
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+
+ return filter;
+ }
+
+ @Inject(optional = true)
+ public void setFetchService(@Nullable ShapeFetchService fetchService) {
+ this.fetchService = fetchService;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java
new file mode 100644
index 0000000..efda63b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.geo.SpatialStrategy;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * {@link QueryBuilder} that builds a GeoShape Query
+ */
+public class GeoShapeQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<GeoShapeQueryBuilder> {
+
+ private final String name;
+
+ private SpatialStrategy strategy = null;
+
+ private final ShapeBuilder shape;
+
+ private float boost = -1;
+
+ private final String indexedShapeId;
+ private final String indexedShapeType;
+
+ private String indexedShapeIndex;
+ private String indexedShapePath;
+
+ private String queryName;
+
+ /**
+ * Creates a new GeoShapeQueryBuilder whose Query will be against the
+ * given field name using the given Shape
+ *
+ * @param name Name of the field that will be queried
+ * @param shape Shape used in the query
+ */
+ public GeoShapeQueryBuilder(String name, ShapeBuilder shape) {
+ this(name, shape, null, null);
+ }
+
+ /**
+ * Creates a new GeoShapeQueryBuilder whose Query will be against the given field name
+ * and will use the Shape found with the given ID in the given type
+ *
+ * @param name Name of the field that will be queried
+ * @param indexedShapeId ID of the indexed Shape that will be used in the Query
+ * @param indexedShapeType Index type of the indexed Shapes
+ */
+ public GeoShapeQueryBuilder(String name, String indexedShapeId, String indexedShapeType) {
+ this(name, null, indexedShapeId, indexedShapeType);
+ }
+
+ private GeoShapeQueryBuilder(String name, ShapeBuilder shape, String indexedShapeId, String indexedShapeType) {
+ this.name = name;
+ this.shape = shape;
+ this.indexedShapeId = indexedShapeId;
+ this.indexedShapeType = indexedShapeType;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public GeoShapeQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Defines which spatial strategy will be used for building the geo shape query. When not set, the strategy that
+ * will be used will be the one that is associated with the geo shape field in the mappings.
+ *
+ * @param strategy The spatial strategy to use for building the geo shape query
+ * @return this
+ */
+ public GeoShapeQueryBuilder strategy(SpatialStrategy strategy) {
+ this.strategy = strategy;
+ return this;
+ }
+
+ /**
+ * Sets the name of the index where the indexed Shape can be found
+ *
+ * @param indexedShapeIndex Name of the index where the indexed Shape is
+ * @return this
+ */
+ public GeoShapeQueryBuilder indexedShapeIndex(String indexedShapeIndex) {
+ this.indexedShapeIndex = indexedShapeIndex;
+ return this;
+ }
+
+ /**
+ * Sets the path of the field in the indexed Shape document that has the Shape itself
+ *
+ * @param indexedShapePath path of the field where the Shape itself is defined
+ * @return this
+ */
+ public GeoShapeQueryBuilder indexedShapePath(String indexedShapePath) {
+ this.indexedShapePath = indexedShapePath;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public GeoShapeQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(GeoShapeQueryParser.NAME);
+
+ builder.startObject(name);
+
+ if (strategy != null) {
+ builder.field("strategy", strategy.getStrategyName());
+ }
+
+ if (shape != null) {
+ builder.field("shape", shape);
+ } else {
+ builder.startObject("indexed_shape")
+ .field("id", indexedShapeId)
+ .field("type", indexedShapeType);
+ if (indexedShapeIndex != null) {
+ builder.field("index", indexedShapeIndex);
+ }
+ if (indexedShapePath != null) {
+ builder.field("path", indexedShapePath);
+ }
+ builder.endObject();
+ }
+
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+
+ builder.endObject();
+
+ builder.endObject();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java
new file mode 100644
index 0000000..84f0dbb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
+import org.apache.lucene.spatial.query.SpatialArgs;
+import org.apache.lucene.spatial.query.SpatialOperation;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.geo.ShapeRelation;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
+import org.elasticsearch.index.search.shape.ShapeFetchService;
+
+import java.io.IOException;
+
+public class GeoShapeQueryParser implements QueryParser {
+
+ public static final String NAME = "geo_shape";
+
+ private ShapeFetchService fetchService;
+
+ public static class DEFAULTS {
+ public static final String INDEX_NAME = "shapes";
+ public static final String SHAPE_FIELD_NAME = "shape";
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ String fieldName = null;
+ ShapeRelation shapeRelation = ShapeRelation.INTERSECTS;
+ String strategyName = null;
+ ShapeBuilder shape = null;
+
+ String id = null;
+ String type = null;
+ String index = DEFAULTS.INDEX_NAME;
+ String shapePath = DEFAULTS.SHAPE_FIELD_NAME;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ float boost = 1f;
+ String queryName = null;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ fieldName = currentFieldName;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ token = parser.nextToken();
+ if ("shape".equals(currentFieldName)) {
+ shape = ShapeBuilder.parse(parser);
+ } else if ("strategy".equals(currentFieldName)) {
+ strategyName = parser.text();
+ } else if ("relation".equals(currentFieldName)) {
+ shapeRelation = ShapeRelation.getRelationByName(parser.text());
+ if (shapeRelation == null) {
+ throw new QueryParsingException(parseContext.index(), "Unknown shape operation [" + parser.text() + " ]");
+ }
+ } else if ("indexed_shape".equals(currentFieldName) || "indexedShape".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("id".equals(currentFieldName)) {
+ id = parser.text();
+ } else if ("type".equals(currentFieldName)) {
+ type = parser.text();
+ } else if ("index".equals(currentFieldName)) {
+ index = parser.text();
+ } else if ("path".equals(currentFieldName)) {
+ shapePath = parser.text();
+ }
+ }
+ }
+ if (id == null) {
+ throw new QueryParsingException(parseContext.index(), "ID for indexed shape not provided");
+ } else if (type == null) {
+ throw new QueryParsingException(parseContext.index(), "Type for indexed shape not provided");
+ }
+ shape = fetchService.fetch(id, type, index, shapePath);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[geo_shape] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[geo_shape] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (shape == null) {
+ throw new QueryParsingException(parseContext.index(), "No Shape defined");
+ } else if (shapeRelation == null) {
+ throw new QueryParsingException(parseContext.index(), "No Shape Relation defined");
+ }
+
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers == null || !smartNameFieldMappers.hasMapper()) {
+ throw new QueryParsingException(parseContext.index(), "Failed to find geo_shape field [" + fieldName + "]");
+ }
+
+ FieldMapper fieldMapper = smartNameFieldMappers.mapper();
+ // TODO: This isn't the nicest way to check this
+ if (!(fieldMapper instanceof GeoShapeFieldMapper)) {
+ throw new QueryParsingException(parseContext.index(), "Field [" + fieldName + "] is not a geo_shape");
+ }
+
+ GeoShapeFieldMapper shapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+
+ PrefixTreeStrategy strategy = shapeFieldMapper.defaultStrategy();
+ if (strategyName != null) {
+ strategy = shapeFieldMapper.resolveStrategy(strategyName);
+ }
+ Query query = strategy.makeQuery(getArgs(shape, shapeRelation));
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+
+ @Inject(optional = true)
+ public void setFetchService(@Nullable ShapeFetchService fetchService) {
+ this.fetchService = fetchService;
+ }
+
+ public static SpatialArgs getArgs(ShapeBuilder shape, ShapeRelation relation) {
+ switch(relation) {
+ case DISJOINT:
+ return new SpatialArgs(SpatialOperation.IsDisjointTo, shape.build());
+ case INTERSECTS:
+ return new SpatialArgs(SpatialOperation.Intersects, shape.build());
+ case WITHIN:
+ return new SpatialArgs(SpatialOperation.IsWithin, shape.build());
+ default:
+ throw new ElasticsearchIllegalArgumentException("");
+
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java
new file mode 100644
index 0000000..b41ba3b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * A geohash cell filter that filters {@link GeoPoint}s by their geohashes. Basically the a
+ * Geohash prefix is defined by the filter and all geohashes that are matching this
+ * prefix will be returned. The <code>neighbors</code> flag allows to filter
+ * geohashes that surround the given geohash. In general the neighborhood of a
+ * geohash is defined by its eight adjacent cells.<br />
+ * The structure of the {@link GeohashCellFilter} is defined as:
+ * <pre>
+ * &quot;geohash_bbox&quot; {
+ * &quot;field&quot;:&quot;location&quot;,
+ * &quot;geohash&quot;:&quot;u33d8u5dkx8k&quot;,
+ * &quot;neighbors&quot;:false
+ * }
+ * </pre>
+ */
+public class GeohashCellFilter {
+
+ public static final String NAME = "geohash_cell";
+ public static final String NEIGHBORS = "neighbors";
+ public static final String PRECISION = "precision";
+
+ /**
+ * Create a new geohash filter for a given set of geohashes. In general this method
+ * returns a boolean filter combining the geohashes OR-wise.
+ *
+ * @param context Context of the filter
+ * @param fieldMapper field mapper for geopoints
+ * @param geohash mandatory geohash
+ * @param geohashes optional array of additional geohashes
+ * @return a new GeoBoundinboxfilter
+ */
+ public static Filter create(QueryParseContext context, GeoPointFieldMapper fieldMapper, String geohash, @Nullable List<String> geohashes) {
+ if (fieldMapper.geoHashStringMapper() == null) {
+ throw new ElasticsearchIllegalArgumentException("geohash filter needs geohash_prefix to be enabled");
+ }
+
+ StringFieldMapper geoHashMapper = fieldMapper.geoHashStringMapper();
+ if (geohashes == null || geohashes.size() == 0) {
+ return geoHashMapper.termFilter(geohash, context);
+ } else {
+ geohashes.add(geohash);
+ return geoHashMapper.termsFilter(geohashes, context);
+ }
+ }
+
+ /**
+ * Builder for a geohashfilter. It needs the fields <code>fieldname</code> and
+ * <code>geohash</code> to be set. the default for a neighbor filteing is
+ * <code>false</code>.
+ */
+ public static class Builder extends BaseFilterBuilder {
+ // we need to store the geohash rather than the corresponding point,
+ // because a transformation from a geohash to a point an back to the
+ // geohash will extend the accuracy of the hash to max precision
+ // i.e. by filing up with z's.
+ private String field;
+ private String geohash;
+ private int levels = -1;
+ private boolean neighbors;
+
+ public Builder(String field) {
+ this(field, null, false);
+ }
+
+ public Builder(String field, GeoPoint point) {
+ this(field, point.geohash(), false);
+ }
+
+ public Builder(String field, String geohash) {
+ this(field, geohash, false);
+ }
+
+ public Builder(String field, String geohash, boolean neighbors) {
+ super();
+ this.field = field;
+ this.geohash = geohash;
+ this.neighbors = neighbors;
+ }
+
+ public Builder point(GeoPoint point) {
+ this.geohash = point.getGeohash();
+ return this;
+ }
+
+ public Builder point(double lat, double lon) {
+ this.geohash = GeoHashUtils.encode(lat, lon);
+ return this;
+ }
+
+ public Builder geohash(String geohash) {
+ this.geohash = geohash;
+ return this;
+ }
+
+ public Builder precision(int levels) {
+ this.levels = levels;
+ return this;
+ }
+
+ public Builder precision(String precision) {
+ double meters = DistanceUnit.parse(precision, DistanceUnit.DEFAULT, DistanceUnit.METERS);
+ return precision(GeoUtils.geoHashLevelsForPrecision(meters));
+ }
+
+ public Builder neighbors(boolean neighbors) {
+ this.neighbors = neighbors;
+ return this;
+ }
+
+ public Builder field(String field) {
+ this.field = field;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(NAME);
+ if (neighbors) {
+ builder.field(NEIGHBORS, neighbors);
+ }
+ if(levels > 0) {
+ builder.field(PRECISION, levels);
+ }
+ builder.field(field, geohash);
+
+ builder.endObject();
+ }
+ }
+
+ public static class Parser implements FilterParser {
+
+ @Inject
+ public Parser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ String fieldName = null;
+ String geohash = null;
+ int levels = -1;
+ boolean neighbors = false;
+
+ XContentParser.Token token;
+ if ((token = parser.currentToken()) != Token.START_OBJECT) {
+ throw new ElasticsearchParseException(NAME + " must be an object");
+ }
+
+ while ((token = parser.nextToken()) != Token.END_OBJECT) {
+ if (token == Token.FIELD_NAME) {
+ String field = parser.text();
+
+ if (PRECISION.equals(field)) {
+ token = parser.nextToken();
+ if(token == Token.VALUE_NUMBER) {
+ levels = parser.intValue();
+ } else if(token == Token.VALUE_STRING) {
+ double meters = DistanceUnit.parse(parser.text(), DistanceUnit.DEFAULT, DistanceUnit.METERS);
+ levels = GeoUtils.geoHashLevelsForPrecision(meters);
+ }
+ } else if (NEIGHBORS.equals(field)) {
+ parser.nextToken();
+ neighbors = parser.booleanValue();
+ } else {
+ fieldName = field;
+ token = parser.nextToken();
+ if(token == Token.VALUE_STRING) {
+ // A string indicates either a gehash or a lat/lon string
+ String location = parser.text();
+ if(location.indexOf(",")>0) {
+ geohash = GeoPoint.parse(parser).geohash();
+ } else {
+ geohash = location;
+ }
+ } else {
+ geohash = GeoPoint.parse(parser).geohash();
+ }
+ }
+ } else {
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ }
+ }
+
+ if (geohash == null) {
+ throw new QueryParsingException(parseContext.index(), "no geohash value provided to geohash_cell filter");
+ }
+
+ MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartMappers == null || !smartMappers.hasMapper()) {
+ throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]");
+ }
+
+ FieldMapper<?> mapper = smartMappers.mapper();
+ if (!(mapper instanceof GeoPointFieldMapper)) {
+ throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field");
+ }
+
+ GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper);
+ if (!geoMapper.isEnableGeohashPrefix()) {
+ throw new QueryParsingException(parseContext.index(), "can't execute geohash_cell on field [" + fieldName + "], geohash_prefix is not enabled");
+ }
+
+ if(levels > 0) {
+ int len = Math.min(levels, geohash.length());
+ geohash = geohash.substring(0, len);
+ }
+
+ if (neighbors) {
+ return create(parseContext, geoMapper, geohash, GeoHashUtils.neighbors(geohash));
+ } else {
+ return create(parseContext, geoMapper, geohash, null);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/HasChildFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/HasChildFilterBuilder.java
new file mode 100644
index 0000000..4417bcd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/HasChildFilterBuilder.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class HasChildFilterBuilder extends BaseFilterBuilder {
+
+ private final FilterBuilder filterBuilder;
+ private final QueryBuilder queryBuilder;
+ private String childType;
+ private String filterName;
+ private Integer shortCircuitCutoff;
+
+ public HasChildFilterBuilder(String type, QueryBuilder queryBuilder) {
+ this.childType = type;
+ this.queryBuilder = queryBuilder;
+ this.filterBuilder = null;
+ }
+
+ public HasChildFilterBuilder(String type, FilterBuilder filterBuilder) {
+ this.childType = type;
+ this.queryBuilder = null;
+ this.filterBuilder = filterBuilder;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public HasChildFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * This is a noop since has_child can't be cached.
+ */
+ public HasChildFilterBuilder cache(boolean cache) {
+ return this;
+ }
+
+ /**
+ * This is a noop since has_child can't be cached.
+ */
+ public HasChildFilterBuilder cacheKey(String cacheKey) {
+ return this;
+ }
+
+ /**
+ * Configures at what cut off point only to evaluate parent documents that contain the matching parent id terms
+ * instead of evaluating all parent docs.
+ */
+ public HasChildFilterBuilder setShortCircuitCutoff(int shortCircuitCutoff) {
+ this.shortCircuitCutoff = shortCircuitCutoff;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(HasChildFilterParser.NAME);
+ if (queryBuilder != null) {
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ } else if (filterBuilder != null) {
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ }
+ builder.field("child_type", childType);
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (shortCircuitCutoff != null) {
+ builder.field("short_circuit_cutoff", shortCircuitCutoff);
+ }
+ builder.endObject();
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java
new file mode 100644
index 0000000..d69b288
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery;
+import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
+import org.elasticsearch.index.search.child.DeleteByQueryWrappingFilter;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class HasChildFilterParser implements FilterParser {
+
+ public static final String NAME = "has_child";
+
+ @Inject
+ public HasChildFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ boolean queryFound = false;
+ String childType = null;
+ int shortCircuitParentDocSet = 8192; // Tests show a cut of point between 8192 and 16384.
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ // TODO we need to set the type, but, `query` can come before `type`...
+ // since we switch types, make sure we change the context
+ String[] origTypes = QueryParseContext.setTypesWithPrevious(childType == null ? null : new String[]{childType});
+ try {
+ query = parseContext.parseInnerQuery();
+ queryFound = true;
+ } finally {
+ QueryParseContext.setTypes(origTypes);
+ }
+ } else if ("filter".equals(currentFieldName)) {
+ // TODO handle `filter` element before `type` element...
+ String[] origTypes = QueryParseContext.setTypesWithPrevious(childType == null ? null : new String[]{childType});
+ try {
+ Filter innerFilter = parseContext.parseInnerFilter();
+ query = new XConstantScoreQuery(innerFilter);
+ queryFound = true;
+ } finally {
+ QueryParseContext.setTypes(origTypes);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[has_child] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("type".equals(currentFieldName) || "child_type".equals(currentFieldName) || "childType".equals(currentFieldName)) {
+ childType = parser.text();
+ } else if ("_scope".equals(currentFieldName)) {
+ throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_child] filter has been removed, use a filter as a facet_filter in the relevant global facet");
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ // noop to be backwards compatible
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ // noop to be backwards compatible
+ } else if ("short_circuit_cutoff".equals(currentFieldName)) {
+ shortCircuitParentDocSet = parser.intValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[has_child] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[has_child] filter requires 'query' field");
+ }
+ if (query == null) {
+ return null;
+ }
+ if (childType == null) {
+ throw new QueryParsingException(parseContext.index(), "[has_child] filter requires 'type' field");
+ }
+
+ DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
+ if (childDocMapper == null) {
+ throw new QueryParsingException(parseContext.index(), "No mapping for for type [" + childType + "]");
+ }
+ if (!childDocMapper.parentFieldMapper().active()) {
+ throw new QueryParsingException(parseContext.index(), "Type [" + childType + "] does not have parent mapping");
+ }
+ String parentType = childDocMapper.parentFieldMapper().type();
+
+ // wrap the query with type query
+ query = new XFilteredQuery(query, parseContext.cacheFilter(childDocMapper.typeFilter(), null));
+
+ DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
+ if (parentDocMapper == null) {
+ throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] points to a non existent parent type [" + parentType + "]");
+ }
+
+ Filter nonNestedDocsFilter = null;
+ if (parentDocMapper.hasNestedObjects()) {
+ nonNestedDocsFilter = parseContext.cacheFilter(NonNestedDocsFilter.INSTANCE, null);
+ }
+
+ Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null);
+ Query childrenConstantScoreQuery = new ChildrenConstantScoreQuery(query, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
+
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(childrenConstantScoreQuery));
+ }
+
+ boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
+ if (deleteByQuery) {
+ return new DeleteByQueryWrappingFilter(childrenConstantScoreQuery);
+ } else {
+ return new CustomQueryWrappingFilter(childrenConstantScoreQuery);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java
new file mode 100644
index 0000000..ee9bdbb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class HasChildQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<HasChildQueryBuilder> {
+
+ private final QueryBuilder queryBuilder;
+
+ private String childType;
+
+ private float boost = 1.0f;
+
+ private String scoreType;
+
+ private Integer shortCircuitCutoff;
+
+ private String queryName;
+
+ public HasChildQueryBuilder(String type, QueryBuilder queryBuilder) {
+ this.childType = type;
+ this.queryBuilder = queryBuilder;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public HasChildQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Defines how the scores from the matching child documents are mapped into the parent document.
+ */
+ public HasChildQueryBuilder scoreType(String scoreType) {
+ this.scoreType = scoreType;
+ return this;
+ }
+
+ /**
+ * Configures at what cut off point only to evaluate parent documents that contain the matching parent id terms
+ * instead of evaluating all parent docs.
+ */
+ public HasChildQueryBuilder setShortCircuitCutoff(int shortCircuitCutoff) {
+ this.shortCircuitCutoff = shortCircuitCutoff;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public HasChildQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(HasChildQueryParser.NAME);
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ builder.field("child_type", childType);
+ if (boost != 1.0f) {
+ builder.field("boost", boost);
+ }
+ if (scoreType != null) {
+ builder.field("score_type", scoreType);
+ }
+ if (shortCircuitCutoff != null) {
+ builder.field("short_circuit_cutoff", shortCircuitCutoff);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java
new file mode 100644
index 0000000..c5b1ed9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.search.child.*;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class HasChildQueryParser implements QueryParser {
+
+ public static final String NAME = "has_child";
+
+ @Inject
+ public HasChildQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query innerQuery = null;
+ boolean queryFound = false;
+ float boost = 1.0f;
+ String childType = null;
+ ScoreType scoreType = null;
+ int shortCircuitParentDocSet = 8192;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ // TODO we need to set the type, but, `query` can come before `type`... (see HasChildFilterParser)
+ // since we switch types, make sure we change the context
+ String[] origTypes = QueryParseContext.setTypesWithPrevious(childType == null ? null : new String[]{childType});
+ try {
+ innerQuery = parseContext.parseInnerQuery();
+ queryFound = true;
+ } finally {
+ QueryParseContext.setTypes(origTypes);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("type".equals(currentFieldName) || "child_type".equals(currentFieldName) || "childType".equals(currentFieldName)) {
+ childType = parser.text();
+ } else if ("_scope".equals(currentFieldName)) {
+ throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_child] query has been removed, use a filter as a facet_filter in the relevant global facet");
+ } else if ("score_type".equals(currentFieldName) || "scoreType".equals(currentFieldName)) {
+ String scoreTypeValue = parser.text();
+ if (!"none".equals(scoreTypeValue)) {
+ scoreType = ScoreType.fromString(scoreTypeValue);
+ }
+ } else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
+ String scoreModeValue = parser.text();
+ if (!"none".equals(scoreModeValue)) {
+ scoreType = ScoreType.fromString(scoreModeValue);
+ }
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("short_circuit_cutoff".equals(currentFieldName)) {
+ shortCircuitParentDocSet = parser.intValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[has_child] requires 'query' field");
+ }
+ if (innerQuery == null) {
+ return null;
+ }
+ if (childType == null) {
+ throw new QueryParsingException(parseContext.index(), "[has_child] requires 'type' field");
+ }
+ innerQuery.setBoost(boost);
+
+ DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
+ if (childDocMapper == null) {
+ throw new QueryParsingException(parseContext.index(), "[has_child] No mapping for for type [" + childType + "]");
+ }
+ if (!childDocMapper.parentFieldMapper().active()) {
+ throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] does not have parent mapping");
+ }
+ String parentType = childDocMapper.parentFieldMapper().type();
+ DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
+
+ if (parentDocMapper == null) {
+ throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] points to a non existent parent type [" + parentType + "]");
+ }
+
+ Filter nonNestedDocsFilter = null;
+ if (parentDocMapper.hasNestedObjects()) {
+ nonNestedDocsFilter = parseContext.cacheFilter(NonNestedDocsFilter.INSTANCE, null);
+ }
+
+ // wrap the query with type query
+ innerQuery = new XFilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null));
+
+ boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
+ Query query;
+ Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null);
+ if (!deleteByQuery && scoreType != null) {
+ query = new ChildrenQuery(parentType, childType, parentFilter, innerQuery, scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
+ } else {
+ query = new ChildrenConstantScoreQuery(innerQuery, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
+ if (deleteByQuery) {
+ query = new XConstantScoreQuery(new DeleteByQueryWrappingFilter(query));
+ }
+ }
+ if (queryName != null) {
+ parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query));
+ }
+ query.setBoost(boost);
+ return query;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java
new file mode 100644
index 0000000..8928713
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * Builder for the 'has_parent' filter.
+ */
+public class HasParentFilterBuilder extends BaseFilterBuilder {
+
+ private final QueryBuilder queryBuilder;
+ private final FilterBuilder filterBuilder;
+ private final String parentType;
+ private String filterName;
+
+ /**
+ * @param parentType The parent type
+ * @param parentQuery The query that will be matched with parent documents
+ */
+ public HasParentFilterBuilder(String parentType, QueryBuilder parentQuery) {
+ this.parentType = parentType;
+ this.queryBuilder = parentQuery;
+ this.filterBuilder = null;
+ }
+
+ /**
+ * @param parentType The parent type
+ * @param parentFilter The filter that will be matched with parent documents
+ */
+ public HasParentFilterBuilder(String parentType, FilterBuilder parentFilter) {
+ this.parentType = parentType;
+ this.queryBuilder = null;
+ this.filterBuilder = parentFilter;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public HasParentFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * This is a noop since has_parent can't be cached.
+ */
+ public HasParentFilterBuilder cache(boolean cache) {
+ return this;
+ }
+
+ /**
+ * This is a noop since has_parent can't be cached.
+ */
+ public HasParentFilterBuilder cacheKey(String cacheKey) {
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(HasParentFilterParser.NAME);
+ if (queryBuilder != null) {
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ } else if (filterBuilder != null) {
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ }
+ builder.field("parent_type", parentType);
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ builder.endObject();
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java
new file mode 100644
index 0000000..f910883
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
+import org.elasticsearch.index.search.child.DeleteByQueryWrappingFilter;
+import org.elasticsearch.index.search.child.ParentConstantScoreQuery;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ *
+ */
+public class HasParentFilterParser implements FilterParser {
+
+ public static final String NAME = "has_parent";
+
+ @Inject
+ public HasParentFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ boolean queryFound = false;
+ String parentType = null;
+
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ // TODO handle `query` element before `type` element...
+ String[] origTypes = QueryParseContext.setTypesWithPrevious(parentType == null ? null : new String[]{parentType});
+ try {
+ query = parseContext.parseInnerQuery();
+ queryFound = true;
+ } finally {
+ QueryParseContext.setTypes(origTypes);
+ }
+ } else if ("filter".equals(currentFieldName)) {
+ // TODO handle `filter` element before `type` element...
+ String[] origTypes = QueryParseContext.setTypesWithPrevious(parentType == null ? null : new String[]{parentType});
+ try {
+ Filter innerFilter = parseContext.parseInnerFilter();
+ query = new XConstantScoreQuery(innerFilter);
+ queryFound = true;
+ } finally {
+ QueryParseContext.setTypes(origTypes);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("type".equals(currentFieldName) || "parent_type".equals(currentFieldName) || "parentType".equals(currentFieldName)) {
+ parentType = parser.text();
+ } else if ("_scope".equals(currentFieldName)) {
+ throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_parent] filter has been removed, use a filter as a facet_filter in the relevant global facet");
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'query' field");
+ }
+ if (query == null) {
+ return null;
+ }
+
+ if (parentType == null) {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'parent_type' field");
+ }
+
+ DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
+ if (parentDocMapper == null) {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] filter configured 'parent_type' [" + parentType + "] is not a valid type");
+ }
+
+ // wrap the query with type query
+ query = new XFilteredQuery(query, parseContext.cacheFilter(parentDocMapper.typeFilter(), null));
+
+ Set<String> parentTypes = new HashSet<String>(5);
+ parentTypes.add(parentType);
+ for (DocumentMapper documentMapper : parseContext.mapperService()) {
+ ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();
+ if (parentFieldMapper.active()) {
+ DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type());
+ if (parentTypeDocumentMapper == null) {
+ // Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.
+ parentTypes.add(parentFieldMapper.type());
+ }
+ }
+ }
+
+ Filter parentFilter;
+ if (parentTypes.size() == 1) {
+ DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypes.iterator().next());
+ parentFilter = parseContext.cacheFilter(documentMapper.typeFilter(), null);
+ } else {
+ XBooleanFilter parentsFilter = new XBooleanFilter();
+ for (String parentTypeStr : parentTypes) {
+ DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypeStr);
+ Filter filter = parseContext.cacheFilter(documentMapper.typeFilter(), null);
+ parentsFilter.add(filter, BooleanClause.Occur.SHOULD);
+ }
+ parentFilter = parentsFilter;
+ }
+ Filter childrenFilter = parseContext.cacheFilter(new NotFilter(parentFilter), null);
+ Query parentConstantScoreQuery = new ParentConstantScoreQuery(query, parentType, childrenFilter);
+
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(parentConstantScoreQuery));
+ }
+
+ boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
+ if (deleteByQuery) {
+ return new DeleteByQueryWrappingFilter(parentConstantScoreQuery);
+ } else {
+ return new CustomQueryWrappingFilter(parentConstantScoreQuery);
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java
new file mode 100644
index 0000000..8d2d7fd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * Builder for the 'has_parent' query.
+ */
+public class HasParentQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<HasParentQueryBuilder> {
+
+ private final QueryBuilder queryBuilder;
+ private final String parentType;
+ private String scoreType;
+ private float boost = 1.0f;
+ private String queryName;
+
+ /**
+ * @param parentType The parent type
+ * @param parentQuery The query that will be matched with parent documents
+ */
+ public HasParentQueryBuilder(String parentType, QueryBuilder parentQuery) {
+ this.parentType = parentType;
+ this.queryBuilder = parentQuery;
+ }
+
+ public HasParentQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Defines how the parent score is mapped into the child documents.
+ */
+ public HasParentQueryBuilder scoreType(String scoreType) {
+ this.scoreType = scoreType;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public HasParentQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(HasParentQueryParser.NAME);
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ builder.field("parent_type", parentType);
+ if (scoreType != null) {
+ builder.field("score_type", scoreType);
+ }
+ if (boost != 1.0f) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+
+ builder.endObject();
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java
new file mode 100644
index 0000000..94542ac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
+import org.elasticsearch.index.search.child.DeleteByQueryWrappingFilter;
+import org.elasticsearch.index.search.child.ParentConstantScoreQuery;
+import org.elasticsearch.index.search.child.ParentQuery;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+public class HasParentQueryParser implements QueryParser {
+
+ public static final String NAME = "has_parent";
+
+ @Inject
+ public HasParentQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query innerQuery = null;
+ boolean queryFound = false;
+ float boost = 1.0f;
+ String parentType = null;
+ boolean score = false;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ // TODO handle `query` element before `type` element...
+ String[] origTypes = QueryParseContext.setTypesWithPrevious(parentType == null ? null : new String[]{parentType});
+ try {
+ innerQuery = parseContext.parseInnerQuery();
+ queryFound = true;
+ } finally {
+ QueryParseContext.setTypes(origTypes);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("type".equals(currentFieldName) || "parent_type".equals(currentFieldName) || "parentType".equals(currentFieldName)) {
+ parentType = parser.text();
+ } else if ("_scope".equals(currentFieldName)) {
+ throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_parent] query has been removed, use a filter as a facet_filter in the relevant global facet");
+ } else if ("score_type".equals(currentFieldName) || "scoreType".equals(currentFieldName)) {
+ String scoreTypeValue = parser.text();
+ if ("score".equals(scoreTypeValue)) {
+ score = true;
+ } else if ("none".equals(scoreTypeValue)) {
+ score = false;
+ }
+ } else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
+ String scoreModeValue = parser.text();
+ if ("score".equals(scoreModeValue)) {
+ score = true;
+ } else if ("none".equals(scoreModeValue)) {
+ score = false;
+ }
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] query requires 'query' field");
+ }
+ if (innerQuery == null) {
+ return null;
+ }
+
+ if (parentType == null) {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] query requires 'parent_type' field");
+ }
+
+ DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
+ if (parentDocMapper == null) {
+ throw new QueryParsingException(parseContext.index(), "[has_parent] query configured 'parent_type' [" + parentType + "] is not a valid type");
+ }
+
+ innerQuery.setBoost(boost);
+ // wrap the query with type query
+ innerQuery = new XFilteredQuery(innerQuery, parseContext.cacheFilter(parentDocMapper.typeFilter(), null));
+
+ Set<String> parentTypes = new HashSet<String>(5);
+ parentTypes.add(parentType);
+ for (DocumentMapper documentMapper : parseContext.mapperService()) {
+ ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();
+ if (parentFieldMapper.active()) {
+ DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type());
+ if (parentTypeDocumentMapper == null) {
+ // Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.
+ parentTypes.add(parentFieldMapper.type());
+ }
+ }
+ }
+
+ Filter parentFilter;
+ if (parentTypes.size() == 1) {
+ DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypes.iterator().next());
+ parentFilter = parseContext.cacheFilter(documentMapper.typeFilter(), null);
+ } else {
+ XBooleanFilter parentsFilter = new XBooleanFilter();
+ for (String parentTypeStr : parentTypes) {
+ DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypeStr);
+ Filter filter = parseContext.cacheFilter(documentMapper.typeFilter(), null);
+ parentsFilter.add(filter, BooleanClause.Occur.SHOULD);
+ }
+ parentFilter = parentsFilter;
+ }
+ Filter childrenFilter = parseContext.cacheFilter(new NotFilter(parentFilter), null);
+
+ boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
+ Query query;
+ if (!deleteByQuery && score) {
+ query = new ParentQuery(innerQuery, parentType, childrenFilter);
+ } else {
+ query = new ParentConstantScoreQuery(innerQuery, parentType, childrenFilter);
+ if (deleteByQuery) {
+ query = new XConstantScoreQuery(new DeleteByQueryWrappingFilter(query));
+ }
+ }
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query));
+ }
+ return query;
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/IdsFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/IdsFilterBuilder.java
new file mode 100644
index 0000000..020bb6e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IdsFilterBuilder.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * A filter that will return only documents matching specific ids (and a type).
+ */
+public class IdsFilterBuilder extends BaseFilterBuilder {
+
+ private final List<String> types;
+
+ private List<String> values = new ArrayList<String>();
+
+ private String filterName;
+
+ /**
+ * Create an ids filter based on the type.
+ */
+ public IdsFilterBuilder(String... types) {
+ this.types = types == null ? null : Arrays.asList(types);
+ }
+
+ /**
+ * Adds ids to the filter.
+ */
+ public IdsFilterBuilder addIds(String... ids) {
+ values.addAll(Arrays.asList(ids));
+ return this;
+ }
+
+ /**
+ * Adds ids to the filter.
+ */
+ public IdsFilterBuilder ids(String... ids) {
+ return addIds(ids);
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public IdsFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(IdsFilterParser.NAME);
+ if (types != null) {
+ if (types.size() == 1) {
+ builder.field("type", types.get(0));
+ } else {
+ builder.startArray("types");
+ for (Object type : types) {
+ builder.value(type);
+ }
+ builder.endArray();
+ }
+ }
+ builder.startArray("values");
+ for (Object value : values) {
+ builder.value(value);
+ }
+ builder.endArray();
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java
new file mode 100644
index 0000000..5eec1bb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+public class IdsFilterParser implements FilterParser {
+
+ public static final String NAME = "ids";
+
+ @Inject
+ public IdsFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ List<BytesRef> ids = new ArrayList<BytesRef>();
+ Collection<String> types = null;
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ boolean idsProvided = false;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("values".equals(currentFieldName)) {
+ idsProvided = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ BytesRef value = parser.bytesOrNull();
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for term filter");
+ }
+ ids.add(value);
+ }
+ } else if ("types".equals(currentFieldName) || "type".equals(currentFieldName)) {
+ types = new ArrayList<String>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ String value = parser.textOrNull();
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No type specified for term filter");
+ }
+ types.add(value);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[ids] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) {
+ types = ImmutableList.of(parser.text());
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[ids] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (!idsProvided) {
+ throw new QueryParsingException(parseContext.index(), "[ids] filter requires providing a values element");
+ }
+
+ if (ids.isEmpty()) {
+ return Queries.MATCH_NO_FILTER;
+ }
+
+ if (types == null || types.isEmpty()) {
+ types = parseContext.queryTypes();
+ } else if (types.size() == 1 && Iterables.getFirst(types, null).equals("_all")) {
+ types = parseContext.mapperService().types();
+ }
+
+ XTermsFilter filter = new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java
new file mode 100644
index 0000000..83fab89
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * A query that will return only documents matching specific ids (and a type).
+ */
+public class IdsQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<IdsQueryBuilder> {
+
+ private final List<String> types;
+
+ private List<String> values = new ArrayList<String>();
+
+ private float boost = -1;
+
+ private String queryName;
+
+ public IdsQueryBuilder(String... types) {
+ this.types = types == null ? null : Arrays.asList(types);
+ }
+
+ /**
+ * Adds ids to the filter.
+ */
+ public IdsQueryBuilder addIds(String... ids) {
+ values.addAll(Arrays.asList(ids));
+ return this;
+ }
+
+ /**
+ * Adds ids to the filter.
+ */
+ public IdsQueryBuilder ids(String... ids) {
+ return addIds(ids);
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public IdsQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public IdsQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(IdsQueryParser.NAME);
+ if (types != null) {
+ if (types.size() == 1) {
+ builder.field("type", types.get(0));
+ } else {
+ builder.startArray("types");
+ for (Object type : types) {
+ builder.value(type);
+ }
+ builder.endArray();
+ }
+ }
+ builder.startArray("values");
+ for (Object value : values) {
+ builder.value(value);
+ }
+ builder.endArray();
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java b/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java
new file mode 100644
index 0000000..cf009f4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ *
+ */
+public class IdsQueryParser implements QueryParser {
+
+ public static final String NAME = "ids";
+
+ @Inject
+ public IdsQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ List<BytesRef> ids = new ArrayList<BytesRef>();
+ Collection<String> types = null;
+ String currentFieldName = null;
+ float boost = 1.0f;
+ String queryName = null;
+ XContentParser.Token token;
+ boolean idsProvided = false;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("values".equals(currentFieldName)) {
+ idsProvided = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ BytesRef value = parser.bytesOrNull();
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for term filter");
+ }
+ ids.add(value);
+ }
+ } else if ("types".equals(currentFieldName) || "type".equals(currentFieldName)) {
+ types = new ArrayList<String>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ String value = parser.textOrNull();
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No type specified for term filter");
+ }
+ types.add(value);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) {
+ types = ImmutableList.of(parser.text());
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (!idsProvided) {
+ throw new QueryParsingException(parseContext.index(), "[ids] query, no ids values provided");
+ }
+
+ if (ids.isEmpty()) {
+ return Queries.newMatchNoDocsQuery();
+ }
+
+ if (types == null || types.isEmpty()) {
+ types = parseContext.queryTypes();
+ } else if (types.size() == 1 && Iterables.getFirst(types, null).equals("_all")) {
+ types = parseContext.mapperService().types();
+ }
+
+ XTermsFilter filter = new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
+ // no need for constant score filter, since we don't cache the filter, and it always takes deletes into account
+ ConstantScoreQuery query = new ConstantScoreQuery(filter);
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/index/query/IndexQueryParserModule.java b/src/main/java/org/elasticsearch/index/query/IndexQueryParserModule.java
new file mode 100644
index 0000000..101e123
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IndexQueryParserModule.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.inject.assistedinject.FactoryProvider;
+import org.elasticsearch.common.inject.multibindings.MapBinder;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.LinkedList;
+import java.util.Map;
+
+/**
+ *
+ */
+public class IndexQueryParserModule extends AbstractModule {
+
+ /**
+ * A custom processor that can be extended to process and bind custom implementations of
+ * {@link QueryParserFactory}, and {@link FilterParser}.
+ */
+ public static class QueryParsersProcessor {
+
+ /**
+ * Extension point to bind a custom {@link QueryParserFactory}.
+ */
+ public void processXContentQueryParsers(XContentQueryParsersBindings bindings) {
+
+ }
+
+ public static class XContentQueryParsersBindings {
+
+ private final MapBinder<String, QueryParserFactory> binder;
+ private final Map<String, Settings> groupSettings;
+
+ public XContentQueryParsersBindings(MapBinder<String, QueryParserFactory> binder, Map<String, Settings> groupSettings) {
+ this.binder = binder;
+ this.groupSettings = groupSettings;
+ }
+
+ public MapBinder<String, QueryParserFactory> binder() {
+ return binder;
+ }
+
+ public Map<String, Settings> groupSettings() {
+ return groupSettings;
+ }
+
+ public void processXContentQueryParser(String name, Class<? extends QueryParser> xcontentQueryParser) {
+ if (!groupSettings.containsKey(name)) {
+ binder.addBinding(name).toProvider(FactoryProvider.newFactory(QueryParserFactory.class, xcontentQueryParser)).in(Scopes.SINGLETON);
+ }
+ }
+ }
+
+ /**
+ * Extension point to bind a custom {@link FilterParserFactory}.
+ */
+ public void processXContentFilterParsers(XContentFilterParsersBindings bindings) {
+
+ }
+
+ public static class XContentFilterParsersBindings {
+
+ private final MapBinder<String, FilterParserFactory> binder;
+ private final Map<String, Settings> groupSettings;
+
+ public XContentFilterParsersBindings(MapBinder<String, FilterParserFactory> binder, Map<String, Settings> groupSettings) {
+ this.binder = binder;
+ this.groupSettings = groupSettings;
+ }
+
+ public MapBinder<String, FilterParserFactory> binder() {
+ return binder;
+ }
+
+ public Map<String, Settings> groupSettings() {
+ return groupSettings;
+ }
+
+ public void processXContentQueryFilter(String name, Class<? extends FilterParser> xcontentFilterParser) {
+ if (!groupSettings.containsKey(name)) {
+ binder.addBinding(name).toProvider(FactoryProvider.newFactory(FilterParserFactory.class, xcontentFilterParser)).in(Scopes.SINGLETON);
+ }
+ }
+ }
+ }
+
+ private final Settings settings;
+
+ private final LinkedList<QueryParsersProcessor> processors = Lists.newLinkedList();
+
+ private final Map<String, Class<? extends QueryParser>> queries = Maps.newHashMap();
+ private final Map<String, Class<? extends FilterParser>> filters = Maps.newHashMap();
+
+ public IndexQueryParserModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ /**
+ * Adds a custom query parser.
+ *
+ * @param name The name of the query parser
+ * @param queryParser the class of the query parser
+ */
+ public void addQueryParser(String name, Class<? extends QueryParser> queryParser) {
+ queries.put(name, queryParser);
+ }
+
+ /**
+ * Adds a custom filter parser.
+ *
+ * @param name The name of the filter parser
+ * @param filterParser the class of the filter parser
+ */
+ public void addFilterParser(String name, Class<? extends FilterParser> filterParser) {
+ filters.put(name, filterParser);
+ }
+
+ public IndexQueryParserModule addProcessor(QueryParsersProcessor processor) {
+ processors.addFirst(processor);
+ return this;
+ }
+
+ @Override
+ protected void configure() {
+
+ bind(IndexQueryParserService.class).asEagerSingleton();
+
+ // handle XContenQueryParsers
+ MapBinder<String, QueryParserFactory> queryBinder
+ = MapBinder.newMapBinder(binder(), String.class, QueryParserFactory.class);
+ Map<String, Settings> xContentQueryParserGroups = settings.getGroups(IndexQueryParserService.Defaults.QUERY_PREFIX);
+ for (Map.Entry<String, Settings> entry : xContentQueryParserGroups.entrySet()) {
+ String qName = entry.getKey();
+ Settings qSettings = entry.getValue();
+ Class<? extends QueryParser> type = qSettings.getAsClass("type", null);
+ if (type == null) {
+ throw new IllegalArgumentException("Query Parser [" + qName + "] must be provided with a type");
+ }
+ queryBinder.addBinding(qName).toProvider(FactoryProvider.newFactory(QueryParserFactory.class,
+ qSettings.getAsClass("type", null))).in(Scopes.SINGLETON);
+ }
+
+ QueryParsersProcessor.XContentQueryParsersBindings xContentQueryParsersBindings = new QueryParsersProcessor.XContentQueryParsersBindings(queryBinder, xContentQueryParserGroups);
+ for (QueryParsersProcessor processor : processors) {
+ processor.processXContentQueryParsers(xContentQueryParsersBindings);
+ }
+
+ for (Map.Entry<String, Class<? extends QueryParser>> entry : queries.entrySet()) {
+ queryBinder.addBinding(entry.getKey()).toProvider(FactoryProvider.newFactory(QueryParserFactory.class, entry.getValue())).in(Scopes.SINGLETON);
+ }
+
+ // handle XContentFilterParsers
+ MapBinder<String, FilterParserFactory> filterBinder
+ = MapBinder.newMapBinder(binder(), String.class, FilterParserFactory.class);
+ Map<String, Settings> xContentFilterParserGroups = settings.getGroups(IndexQueryParserService.Defaults.FILTER_PREFIX);
+ for (Map.Entry<String, Settings> entry : xContentFilterParserGroups.entrySet()) {
+ String fName = entry.getKey();
+ Settings fSettings = entry.getValue();
+ Class<? extends FilterParser> type = fSettings.getAsClass("type", null);
+ if (type == null) {
+ throw new IllegalArgumentException("Filter Parser [" + fName + "] must be provided with a type");
+ }
+ filterBinder.addBinding(fName).toProvider(FactoryProvider.newFactory(FilterParserFactory.class,
+ fSettings.getAsClass("type", null))).in(Scopes.SINGLETON);
+ }
+
+ QueryParsersProcessor.XContentFilterParsersBindings xContentFilterParsersBindings = new QueryParsersProcessor.XContentFilterParsersBindings(filterBinder, xContentFilterParserGroups);
+ for (QueryParsersProcessor processor : processors) {
+ processor.processXContentFilterParsers(xContentFilterParsersBindings);
+ }
+
+ for (Map.Entry<String, Class<? extends FilterParser>> entry : filters.entrySet()) {
+ filterBinder.addBinding(entry.getKey()).toProvider(FactoryProvider.newFactory(FilterParserFactory.class, entry.getValue())).in(Scopes.SINGLETON);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java
new file mode 100644
index 0000000..3701f1a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java
@@ -0,0 +1,339 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.CloseableThreadLocal;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.engine.IndexEngine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.indices.query.IndicesQueriesRegistry;
+import org.elasticsearch.script.ScriptService;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+/**
+ *
+ */
+public class IndexQueryParserService extends AbstractIndexComponent {
+
+ public static final class Defaults {
+ public static final String QUERY_PREFIX = "index.queryparser.query";
+ public static final String FILTER_PREFIX = "index.queryparser.filter";
+ }
+
+ private CloseableThreadLocal<QueryParseContext> cache = new CloseableThreadLocal<QueryParseContext>() {
+ @Override
+ protected QueryParseContext initialValue() {
+ return new QueryParseContext(index, IndexQueryParserService.this);
+ }
+ };
+
+ final CacheRecycler cacheRecycler;
+
+ final AnalysisService analysisService;
+
+ final ScriptService scriptService;
+
+ final MapperService mapperService;
+
+ final SimilarityService similarityService;
+
+ final IndexCache indexCache;
+
+ final IndexFieldDataService fieldDataService;
+
+ final IndexEngine indexEngine;
+
+ private final Map<String, QueryParser> queryParsers;
+
+ private final Map<String, FilterParser> filterParsers;
+
+ private String defaultField;
+ private boolean queryStringLenient;
+ private final boolean strict;
+
+ @Inject
+ public IndexQueryParserService(Index index, @IndexSettings Settings indexSettings,
+ IndicesQueriesRegistry indicesQueriesRegistry, CacheRecycler cacheRecycler,
+ ScriptService scriptService, AnalysisService analysisService,
+ MapperService mapperService, IndexCache indexCache, IndexFieldDataService fieldDataService, IndexEngine indexEngine,
+ @Nullable SimilarityService similarityService,
+ @Nullable Map<String, QueryParserFactory> namedQueryParsers,
+ @Nullable Map<String, FilterParserFactory> namedFilterParsers) {
+ super(index, indexSettings);
+ this.cacheRecycler = cacheRecycler;
+ this.scriptService = scriptService;
+ this.analysisService = analysisService;
+ this.mapperService = mapperService;
+ this.similarityService = similarityService;
+ this.indexCache = indexCache;
+ this.fieldDataService = fieldDataService;
+ this.indexEngine = indexEngine;
+
+ this.defaultField = indexSettings.get("index.query.default_field", AllFieldMapper.NAME);
+ this.queryStringLenient = indexSettings.getAsBoolean("index.query_string.lenient", false);
+ this.strict = indexSettings.getAsBoolean("index.query.parse.strict", false);
+
+ List<QueryParser> queryParsers = newArrayList();
+ if (namedQueryParsers != null) {
+ Map<String, Settings> queryParserGroups = indexSettings.getGroups(IndexQueryParserService.Defaults.QUERY_PREFIX);
+ for (Map.Entry<String, QueryParserFactory> entry : namedQueryParsers.entrySet()) {
+ String queryParserName = entry.getKey();
+ QueryParserFactory queryParserFactory = entry.getValue();
+ Settings queryParserSettings = queryParserGroups.get(queryParserName);
+ if (queryParserSettings == null) {
+ queryParserSettings = EMPTY_SETTINGS;
+ }
+ queryParsers.add(queryParserFactory.create(queryParserName, queryParserSettings));
+ }
+ }
+
+ Map<String, QueryParser> queryParsersMap = newHashMap();
+ queryParsersMap.putAll(indicesQueriesRegistry.queryParsers());
+ if (queryParsers != null) {
+ for (QueryParser queryParser : queryParsers) {
+ add(queryParsersMap, queryParser);
+ }
+ }
+ this.queryParsers = ImmutableMap.copyOf(queryParsersMap);
+
+ List<FilterParser> filterParsers = newArrayList();
+ if (namedFilterParsers != null) {
+ Map<String, Settings> filterParserGroups = indexSettings.getGroups(IndexQueryParserService.Defaults.FILTER_PREFIX);
+ for (Map.Entry<String, FilterParserFactory> entry : namedFilterParsers.entrySet()) {
+ String filterParserName = entry.getKey();
+ FilterParserFactory filterParserFactory = entry.getValue();
+ Settings filterParserSettings = filterParserGroups.get(filterParserName);
+ if (filterParserSettings == null) {
+ filterParserSettings = EMPTY_SETTINGS;
+ }
+ filterParsers.add(filterParserFactory.create(filterParserName, filterParserSettings));
+ }
+ }
+
+ Map<String, FilterParser> filterParsersMap = newHashMap();
+ filterParsersMap.putAll(indicesQueriesRegistry.filterParsers());
+ if (filterParsers != null) {
+ for (FilterParser filterParser : filterParsers) {
+ add(filterParsersMap, filterParser);
+ }
+ }
+ this.filterParsers = ImmutableMap.copyOf(filterParsersMap);
+ }
+
+ public void close() {
+ cache.close();
+ }
+
+ public String defaultField() {
+ return this.defaultField;
+ }
+
+ public boolean queryStringLenient() {
+ return this.queryStringLenient;
+ }
+
+ public QueryParser queryParser(String name) {
+ return queryParsers.get(name);
+ }
+
+ public FilterParser filterParser(String name) {
+ return filterParsers.get(name);
+ }
+
+ public ParsedQuery parse(QueryBuilder queryBuilder) throws ElasticsearchException {
+ XContentParser parser = null;
+ try {
+ BytesReference bytes = queryBuilder.buildAsBytes();
+ parser = XContentFactory.xContent(bytes).createParser(bytes);
+ return parse(cache.get(), parser);
+ } catch (QueryParsingException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new QueryParsingException(index, "Failed to parse", e);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ public ParsedQuery parse(byte[] source) throws ElasticsearchException {
+ return parse(source, 0, source.length);
+ }
+
+ public ParsedQuery parse(byte[] source, int offset, int length) throws ElasticsearchException {
+ XContentParser parser = null;
+ try {
+ parser = XContentFactory.xContent(source, offset, length).createParser(source, offset, length);
+ return parse(cache.get(), parser);
+ } catch (QueryParsingException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new QueryParsingException(index, "Failed to parse", e);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ public ParsedQuery parse(BytesReference source) throws ElasticsearchException {
+ XContentParser parser = null;
+ try {
+ parser = XContentFactory.xContent(source).createParser(source);
+ return parse(cache.get(), parser);
+ } catch (QueryParsingException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new QueryParsingException(index, "Failed to parse", e);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ public ParsedQuery parse(String source) throws QueryParsingException {
+ XContentParser parser = null;
+ try {
+ parser = XContentFactory.xContent(source).createParser(source);
+ return parse(cache.get(), parser);
+ } catch (QueryParsingException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new QueryParsingException(index, "Failed to parse [" + source + "]", e);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ public ParsedQuery parse(XContentParser parser) {
+ try {
+ return parse(cache.get(), parser);
+ } catch (IOException e) {
+ throw new QueryParsingException(index, "Failed to parse", e);
+ }
+ }
+
+ /**
+ * Parses an inner filter, returning null if the filter should be ignored.
+ */
+ @Nullable
+ public ParsedFilter parseInnerFilter(XContentParser parser) throws IOException {
+ QueryParseContext context = cache.get();
+ context.reset(parser);
+ Filter filter = context.parseInnerFilter();
+ if (filter == null) {
+ return null;
+ }
+ return new ParsedFilter(filter, context.copyNamedFilters());
+ }
+
+ @Nullable
+ public Query parseInnerQuery(XContentParser parser) throws IOException {
+ QueryParseContext context = cache.get();
+ context.reset(parser);
+ return context.parseInnerQuery();
+ }
+
+ /**
+ * Selectively parses a query from a top level query or query_binary json field from the specified source.
+ */
+ public ParsedQuery parseQuery(BytesReference source) {
+ try {
+ XContentParser parser = XContentHelper.createParser(source);
+ for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String fieldName = parser.currentName();
+ if ("query".equals(fieldName)) {
+ return parse(parser);
+ } else if ("query_binary".equals(fieldName) || "queryBinary".equals(fieldName)) {
+ byte[] querySource = parser.binaryValue();
+ XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource);
+ return parse(qSourceParser);
+ } else {
+ throw new QueryParsingException(index(), "request does not support [" + fieldName + "]");
+ }
+ }
+ }
+ } catch (QueryParsingException e) {
+ throw e;
+ } catch (Throwable e) {
+ throw new QueryParsingException(index, "Failed to parse", e);
+ }
+
+ throw new QueryParsingException(index(), "Required query is missing");
+ }
+
+ private ParsedQuery parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException {
+ parseContext.reset(parser);
+ if (strict) {
+ parseContext.parseFlags(EnumSet.of(ParseField.Flag.STRICT));
+ }
+ Query query = parseContext.parseInnerQuery();
+ if (query == null) {
+ query = Queries.newMatchNoDocsQuery();
+ }
+ return new ParsedQuery(query, parseContext.copyNamedFilters());
+ }
+
+ private void add(Map<String, FilterParser> map, FilterParser filterParser) {
+ for (String name : filterParser.names()) {
+ map.put(name.intern(), filterParser);
+ }
+ }
+
+ private void add(Map<String, QueryParser> map, QueryParser queryParser) {
+ for (String name : queryParser.names()) {
+ map.put(name.intern(), queryParser);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/IndicesFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/IndicesFilterBuilder.java
new file mode 100644
index 0000000..3ffed45
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IndicesFilterBuilder.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filter that will execute the wrapped filter only for the specified indices, and "match_all" when
+ * it does not match those indices (by default).
+ */
+public class IndicesFilterBuilder extends BaseFilterBuilder {
+
+ private final FilterBuilder filterBuilder;
+
+ private final String[] indices;
+
+ private String sNoMatchFilter;
+ private FilterBuilder noMatchFilter;
+
+ private String filterName;
+
+ public IndicesFilterBuilder(FilterBuilder filterBuilder, String... indices) {
+ this.filterBuilder = filterBuilder;
+ this.indices = indices;
+ }
+
+ /**
+ * Sets the no match filter, can either be <tt>all</tt> or <tt>none</tt>.
+ */
+ public IndicesFilterBuilder noMatchFilter(String type) {
+ this.sNoMatchFilter = type;
+ return this;
+ }
+
+ /**
+ * Sets the filter to use when it executes on an index that does not match the indices provided.
+ */
+ public IndicesFilterBuilder noMatchFilter(FilterBuilder noMatchFilter) {
+ this.noMatchFilter = noMatchFilter;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public IndicesFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(IndicesFilterParser.NAME);
+ builder.field("indices", indices);
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ if (noMatchFilter != null) {
+ builder.field("no_match_filter");
+ noMatchFilter.toXContent(builder, params);
+ } else if (sNoMatchFilter != null) {
+ builder.field("no_match_filter", sNoMatchFilter);
+ }
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java b/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java
new file mode 100644
index 0000000..3708612
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+/**
+ */
+public class IndicesFilterParser implements FilterParser {
+
+ public static final String NAME = "indices";
+
+ @Nullable
+ private final ClusterService clusterService;
+
+ @Inject
+ public IndicesFilterParser(@Nullable ClusterService clusterService) {
+ this.clusterService = clusterService;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Filter filter = null;
+ Filter noMatchFilter = Queries.MATCH_ALL_FILTER;
+ boolean filterFound = false;
+ boolean indicesFound = false;
+ boolean currentIndexMatchesIndices = false;
+ String filterName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("filter".equals(currentFieldName)) {
+ filterFound = true;
+ //TODO We are able to decide whether to parse the filter or not only if indices in the query appears first
+ if (indicesFound && !currentIndexMatchesIndices) {
+ parseContext.parser().skipChildren(); // skip the filter object without parsing it
+ } else {
+ filter = parseContext.parseInnerFilter();
+ }
+ } else if ("no_match_filter".equals(currentFieldName)) {
+ if (indicesFound && currentIndexMatchesIndices) {
+ parseContext.parser().skipChildren(); // skip the filter object without parsing it
+ } else {
+ noMatchFilter = parseContext.parseInnerFilter();
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[indices] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("indices".equals(currentFieldName)) {
+ if (indicesFound) {
+ throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified");
+ }
+ indicesFound = true;
+ Collection<String> indices = new ArrayList<String>();
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ String value = parser.textOrNull();
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "[indices] no value specified for 'indices' entry");
+ }
+ indices.add(value);
+ }
+ currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), indices.toArray(new String[indices.size()]));
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[indices] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ if (indicesFound) {
+ throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified");
+ }
+ indicesFound = true;
+ currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), parser.text());
+ } else if ("no_match_filter".equals(currentFieldName)) {
+ String type = parser.text();
+ if ("all".equals(type)) {
+ noMatchFilter = Queries.MATCH_ALL_FILTER;
+ } else if ("none".equals(type)) {
+ noMatchFilter = Queries.MATCH_NO_FILTER;
+ }
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[indices] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!filterFound) {
+ throw new QueryParsingException(parseContext.index(), "[indices] requires 'filter' element");
+ }
+ if (!indicesFound) {
+ throw new QueryParsingException(parseContext.index(), "[indices] requires 'indices' or 'index' element");
+ }
+
+ Filter chosenFilter;
+ if (currentIndexMatchesIndices) {
+ chosenFilter = filter;
+ } else {
+ chosenFilter = noMatchFilter;
+ }
+
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, chosenFilter);
+ }
+
+ return chosenFilter;
+ }
+
+ protected boolean matchesIndices(String currentIndex, String... indices) {
+ final String[] concreteIndices = clusterService.state().metaData().concreteIndicesIgnoreMissing(indices);
+ for (String index : concreteIndices) {
+ if (Regex.simpleMatch(index, currentIndex)) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java
new file mode 100644
index 0000000..5b1434a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A query that will execute the wrapped query only for the specified indices, and "match_all" when
+ * it does not match those indices (by default).
+ */
+public class IndicesQueryBuilder extends BaseQueryBuilder {
+
+ private final QueryBuilder queryBuilder;
+
+ private final String[] indices;
+
+ private String sNoMatchQuery;
+ private QueryBuilder noMatchQuery;
+
+ private String queryName;
+
+ public IndicesQueryBuilder(QueryBuilder queryBuilder, String... indices) {
+ this.queryBuilder = queryBuilder;
+ this.indices = indices;
+ }
+
+ /**
+ * Sets the no match query, can either be <tt>all</tt> or <tt>none</tt>.
+ */
+ public IndicesQueryBuilder noMatchQuery(String type) {
+ this.sNoMatchQuery = type;
+ return this;
+ }
+
+ /**
+ * Sets the query to use when it executes on an index that does not match the indices provided.
+ */
+ public IndicesQueryBuilder noMatchQuery(QueryBuilder noMatchQuery) {
+ this.noMatchQuery = noMatchQuery;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public IndicesQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(IndicesQueryParser.NAME);
+ builder.field("indices", indices);
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ if (noMatchQuery != null) {
+ builder.field("no_match_query");
+ noMatchQuery.toXContent(builder, params);
+ } else if (sNoMatchQuery != null) {
+ builder.field("no_match_query", sNoMatchQuery);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java b/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java
new file mode 100644
index 0000000..f235bbc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+/**
+ */
+public class IndicesQueryParser implements QueryParser {
+
+ public static final String NAME = "indices";
+
+ @Nullable
+ private final ClusterService clusterService;
+
+ @Inject
+ public IndicesQueryParser(@Nullable ClusterService clusterService) {
+ this.clusterService = clusterService;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ Query noMatchQuery = Queries.newMatchAllQuery();
+ boolean queryFound = false;
+ boolean indicesFound = false;
+ boolean currentIndexMatchesIndices = false;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ //TODO We are able to decide whether to parse the query or not only if indices in the query appears first
+ queryFound = true;
+ if (indicesFound && !currentIndexMatchesIndices) {
+ parseContext.parser().skipChildren(); // skip the query object without parsing it
+ } else {
+ query = parseContext.parseInnerQuery();
+ }
+ } else if ("no_match_query".equals(currentFieldName)) {
+ if (indicesFound && currentIndexMatchesIndices) {
+ parseContext.parser().skipChildren(); // skip the query object without parsing it
+ } else {
+ noMatchQuery = parseContext.parseInnerQuery();
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("indices".equals(currentFieldName)) {
+ if (indicesFound) {
+ throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified");
+ }
+ indicesFound = true;
+ Collection<String> indices = new ArrayList<String>();
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ String value = parser.textOrNull();
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "[indices] no value specified for 'indices' entry");
+ }
+ indices.add(value);
+ }
+ currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), indices.toArray(new String[indices.size()]));
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ if (indicesFound) {
+ throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified");
+ }
+ indicesFound = true;
+ currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), parser.text());
+ } else if ("no_match_query".equals(currentFieldName)) {
+ String type = parser.text();
+ if ("all".equals(type)) {
+ noMatchQuery = Queries.newMatchAllQuery();
+ } else if ("none".equals(type)) {
+ noMatchQuery = Queries.newMatchNoDocsQuery();
+ }
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[indices] requires 'query' element");
+ }
+ if (!indicesFound) {
+ throw new QueryParsingException(parseContext.index(), "[indices] requires 'indices' or 'index' element");
+ }
+
+ Query chosenQuery;
+ if (currentIndexMatchesIndices) {
+ chosenQuery = query;
+ } else {
+ chosenQuery = noMatchQuery;
+ }
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, chosenQuery);
+ }
+ return chosenQuery;
+ }
+
+ protected boolean matchesIndices(String currentIndex, String... indices) {
+ final String[] concreteIndices = clusterService.state().metaData().concreteIndicesIgnoreMissing(indices);
+ for (String index : concreteIndices) {
+ if (Regex.simpleMatch(index, currentIndex)) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/LimitFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/LimitFilterBuilder.java
new file mode 100644
index 0000000..31e3446
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/LimitFilterBuilder.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+public class LimitFilterBuilder extends BaseFilterBuilder {
+
+ private final int limit;
+
+ public LimitFilterBuilder(int limit) {
+ this.limit = limit;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(LimitFilterParser.NAME);
+ builder.field("value", limit);
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java b/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java
new file mode 100644
index 0000000..e82563f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.LimitFilter;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+public class LimitFilterParser implements FilterParser {
+
+ public static final String NAME = "limit";
+
+ @Inject
+ public LimitFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ int limit = -1;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("value".equals(currentFieldName)) {
+ limit = parser.intValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[limit] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (limit == -1) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for limit filter");
+ }
+
+ return new LimitFilter(limit);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/MatchAllFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/MatchAllFilterBuilder.java
new file mode 100644
index 0000000..ebb4ffd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MatchAllFilterBuilder.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filter that simply wraps a query.
+ *
+ *
+ */
+public class MatchAllFilterBuilder extends BaseFilterBuilder {
+
+ /**
+ * A filter that simply matches all docs.
+ */
+ public MatchAllFilterBuilder() {
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(MatchAllFilterParser.NAME).endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/MatchAllFilterParser.java b/src/main/java/org/elasticsearch/index/query/MatchAllFilterParser.java
new file mode 100644
index 0000000..a10f37d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MatchAllFilterParser.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MatchAllFilterParser implements FilterParser {
+
+ public static final String NAME = "match_all";
+
+ @Inject
+ public MatchAllFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token;
+ while (((token = parser.nextToken()) != XContentParser.Token.END_OBJECT && token != XContentParser.Token.END_ARRAY)) {
+ }
+
+ return Queries.MATCH_ALL_FILTER;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/MatchAllQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/MatchAllQueryBuilder.java
new file mode 100644
index 0000000..cb7a8ef
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MatchAllQueryBuilder.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A query that matches on all documents.
+ *
+ *
+ */
+public class MatchAllQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<MatchAllQueryBuilder> {
+
+ private String normsField;
+
+ private float boost = -1;
+
+ /**
+ * Field used for normalization factor (document boost). Defaults to no field.
+ */
+ public MatchAllQueryBuilder normsField(String normsField) {
+ this.normsField = normsField;
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public MatchAllQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(MatchAllQueryParser.NAME);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (normsField != null) {
+ builder.field("norms_field", normsField);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java b/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java
new file mode 100644
index 0000000..ded63f1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MatchAllQueryParser implements QueryParser {
+
+ public static final String NAME = "match_all";
+
+ @Inject
+ public MatchAllQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ float boost = 1.0f;
+ String currentFieldName = null;
+
+ XContentParser.Token token;
+ while (((token = parser.nextToken()) != XContentParser.Token.END_OBJECT && token != XContentParser.Token.END_ARRAY)) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[match_all] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (boost == 1.0f) {
+ return Queries.newMatchAllQuery();
+ }
+
+ //LUCENE 4 UPGRADE norms field is not supported anymore need to find another way or drop the functionality
+ //MatchAllDocsQuery query = new MatchAllDocsQuery(normsField);
+ MatchAllDocsQuery query = new MatchAllDocsQuery();
+ query.setBoost(boost);
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java
new file mode 100644
index 0000000..ba98508
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Locale;
+
+/**
+ * Match query is a query that analyzes the text and constructs a query as the result of the analysis. It
+ * can construct different queries based on the type provided.
+ */
+public class MatchQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<MatchQueryBuilder> {
+
+ public static enum Operator {
+ OR,
+ AND
+ }
+
+ public static enum Type {
+ /**
+ * The text is analyzed and terms are added to a boolean query.
+ */
+ BOOLEAN,
+ /**
+ * The text is analyzed and used as a phrase query.
+ */
+ PHRASE,
+ /**
+ * The text is analyzed and used in a phrase query, with the last term acting as a prefix.
+ */
+ PHRASE_PREFIX
+ }
+
+ public static enum ZeroTermsQuery {
+ NONE,
+ ALL
+ }
+
+ private final String name;
+
+ private final Object text;
+
+ private Type type;
+
+ private Operator operator;
+
+ private String analyzer;
+
+ private Float boost;
+
+ private Integer slop;
+
+ private Fuzziness fuzziness;
+
+ private Integer prefixLength;
+
+ private Integer maxExpansions;
+
+ private String minimumShouldMatch;
+
+ private String rewrite = null;
+
+ private String fuzzyRewrite = null;
+
+ private Boolean lenient;
+
+ private Boolean fuzzyTranspositions = null;
+
+ private ZeroTermsQuery zeroTermsQuery;
+
+ private Float cutoff_Frequency = null;
+
+ private String queryName;
+
+ /**
+ * Constructs a new text query.
+ */
+ public MatchQueryBuilder(String name, Object text) {
+ this.name = name;
+ this.text = text;
+ }
+
+ /**
+ * Sets the type of the text query.
+ */
+ public MatchQueryBuilder type(Type type) {
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * Sets the operator to use when using a boolean query. Defaults to <tt>OR</tt>.
+ */
+ public MatchQueryBuilder operator(Operator operator) {
+ this.operator = operator;
+ return this;
+ }
+
+ /**
+ * Explicitly set the analyzer to use. Defaults to use explicit mapping config for the field, or, if not
+ * set, the default search analyzer.
+ */
+ public MatchQueryBuilder analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ /**
+ * Set the boost to apply to the query.
+ */
+ public MatchQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Set the phrase slop if evaluated to a phrase query type.
+ */
+ public MatchQueryBuilder slop(int slop) {
+ this.slop = slop;
+ return this;
+ }
+
+ /**
+ * Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO".
+ */
+ public MatchQueryBuilder fuzziness(Object fuzziness) {
+ this.fuzziness = Fuzziness.build(fuzziness);
+ return this;
+ }
+
+ public MatchQueryBuilder prefixLength(int prefixLength) {
+ this.prefixLength = prefixLength;
+ return this;
+ }
+
+ /**
+ * When using fuzzy or prefix type query, the number of term expansions to use. Defaults to unbounded
+ * so its recommended to set it to a reasonable value for faster execution.
+ */
+ public MatchQueryBuilder maxExpansions(int maxExpansions) {
+ this.maxExpansions = maxExpansions;
+ return this;
+ }
+
+ /**
+ * Set a cutoff value in [0..1] (or absolute number >=1) representing the
+ * maximum threshold of a terms document frequency to be considered a low
+ * frequency term.
+ */
+ public MatchQueryBuilder cutoffFrequency(float cutoff) {
+ this.cutoff_Frequency = cutoff;
+ return this;
+ }
+
+ public MatchQueryBuilder minimumShouldMatch(String minimumShouldMatch) {
+ this.minimumShouldMatch = minimumShouldMatch;
+ return this;
+ }
+
+ public MatchQueryBuilder rewrite(String rewrite) {
+ this.rewrite = rewrite;
+ return this;
+ }
+
+ public MatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
+ this.fuzzyRewrite = fuzzyRewrite;
+ return this;
+ }
+
+ public MatchQueryBuilder fuzzyTranspositions(boolean fuzzyTranspositions) {
+ //LUCENE 4 UPGRADE add documentation
+ this.fuzzyTranspositions = fuzzyTranspositions;
+ return this;
+ }
+
+ /**
+ * Sets whether format based failures will be ignored.
+ */
+ public MatchQueryBuilder setLenient(boolean lenient) {
+ this.lenient = lenient;
+ return this;
+ }
+
+ public MatchQueryBuilder zeroTermsQuery(ZeroTermsQuery zeroTermsQuery) {
+ this.zeroTermsQuery = zeroTermsQuery;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public MatchQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(MatchQueryParser.NAME);
+ builder.startObject(name);
+
+ builder.field("query", text);
+ if (type != null) {
+ builder.field("type", type.toString().toLowerCase(Locale.ENGLISH));
+ }
+ if (operator != null) {
+ builder.field("operator", operator.toString());
+ }
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+ if (boost != null) {
+ builder.field("boost", boost);
+ }
+ if (slop != null) {
+ builder.field("slop", slop);
+ }
+ if (fuzziness != null) {
+ fuzziness.toXContent(builder, params);
+ }
+ if (prefixLength != null) {
+ builder.field("prefix_length", prefixLength);
+ }
+ if (maxExpansions != null) {
+ builder.field("max_expansions", maxExpansions);
+ }
+ if (minimumShouldMatch != null) {
+ builder.field("minimum_should_match", minimumShouldMatch);
+ }
+ if (rewrite != null) {
+ builder.field("rewrite", rewrite);
+ }
+ if (fuzzyRewrite != null) {
+ builder.field("fuzzy_rewrite", fuzzyRewrite);
+ }
+ if (fuzzyTranspositions != null) {
+ //LUCENE 4 UPGRADE we need to document this & test this
+ builder.field("fuzzy_transpositions", fuzzyTranspositions);
+ }
+ if (lenient != null) {
+ builder.field("lenient", lenient);
+ }
+ if (zeroTermsQuery != null) {
+ builder.field("zero_terms_query", zeroTermsQuery.toString());
+ }
+ if (cutoff_Frequency != null) {
+ builder.field("cutoff_frequency", cutoff_Frequency);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+
+
+ builder.endObject();
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java b/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java
new file mode 100644
index 0000000..eff80a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.queries.ExtendedCommonTermsQuery;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.support.QueryParsers;
+import org.elasticsearch.index.search.MatchQuery;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MatchQueryParser implements QueryParser {
+
+ public static final String NAME = "match";
+
+ @Inject
+ public MatchQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{
+ NAME, "match_phrase", "matchPhrase", "match_phrase_prefix", "matchPhrasePrefix", "matchFuzzy", "match_fuzzy", "fuzzy_match"
+ };
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ MatchQuery.Type type = MatchQuery.Type.BOOLEAN;
+ if ("match_phrase".equals(parser.currentName()) || "matchPhrase".equals(parser.currentName()) ||
+ "text_phrase".equals(parser.currentName()) || "textPhrase".equals(parser.currentName())) {
+ type = MatchQuery.Type.PHRASE;
+ } else if ("match_phrase_prefix".equals(parser.currentName()) || "matchPhrasePrefix".equals(parser.currentName()) ||
+ "text_phrase_prefix".equals(parser.currentName()) || "textPhrasePrefix".equals(parser.currentName())) {
+ type = MatchQuery.Type.PHRASE_PREFIX;
+ }
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[match] query malformed, no field");
+ }
+ String fieldName = parser.currentName();
+
+ Object value = null;
+ float boost = 1.0f;
+ MatchQuery matchQuery = new MatchQuery(parseContext);
+ String minimumShouldMatch = null;
+ String queryName = null;
+
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("query".equals(currentFieldName)) {
+ value = parser.objectText();
+ } else if ("type".equals(currentFieldName)) {
+ String tStr = parser.text();
+ if ("boolean".equals(tStr)) {
+ type = MatchQuery.Type.BOOLEAN;
+ } else if ("phrase".equals(tStr)) {
+ type = MatchQuery.Type.PHRASE;
+ } else if ("phrase_prefix".equals(tStr) || "phrasePrefix".equals(currentFieldName)) {
+ type = MatchQuery.Type.PHRASE_PREFIX;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[match] query does not support type " + tStr);
+ }
+ } else if ("analyzer".equals(currentFieldName)) {
+ String analyzer = parser.text();
+ if (parseContext.analysisService().analyzer(analyzer) == null) {
+ throw new QueryParsingException(parseContext.index(), "[match] analyzer [" + parser.text() + "] not found");
+ }
+ matchQuery.setAnalyzer(analyzer);
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("slop".equals(currentFieldName) || "phrase_slop".equals(currentFieldName) || "phraseSlop".equals(currentFieldName)) {
+ matchQuery.setPhraseSlop(parser.intValue());
+ } else if (Fuzziness.FIELD.match(currentFieldName, parseContext.parseFlags())) {
+ matchQuery.setFuzziness(Fuzziness.parse(parser));
+ } else if ("prefix_length".equals(currentFieldName) || "prefixLength".equals(currentFieldName)) {
+ matchQuery.setFuzzyPrefixLength(parser.intValue());
+ } else if ("max_expansions".equals(currentFieldName) || "maxExpansions".equals(currentFieldName)) {
+ matchQuery.setMaxExpansions(parser.intValue());
+ } else if ("operator".equals(currentFieldName)) {
+ String op = parser.text();
+ if ("or".equalsIgnoreCase(op)) {
+ matchQuery.setOccur(BooleanClause.Occur.SHOULD);
+ } else if ("and".equalsIgnoreCase(op)) {
+ matchQuery.setOccur(BooleanClause.Occur.MUST);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "text query requires operator to be either 'and' or 'or', not [" + op + "]");
+ }
+ } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
+ minimumShouldMatch = parser.textOrNull();
+ } else if ("rewrite".equals(currentFieldName)) {
+ matchQuery.setRewriteMethod(QueryParsers.parseRewriteMethod(parser.textOrNull(), null));
+ } else if ("fuzzy_rewrite".equals(currentFieldName) || "fuzzyRewrite".equals(currentFieldName)) {
+ matchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(parser.textOrNull(), null));
+ } else if ("fuzzy_transpositions".equals(fieldName)) {
+ matchQuery.setTranspositions(parser.booleanValue());
+ } else if ("lenient".equals(currentFieldName)) {
+ matchQuery.setLenient(parser.booleanValue());
+ } else if ("cutoff_frequency".equals(currentFieldName)) {
+ matchQuery.setCommonTermsCutoff(parser.floatValue());
+ } else if ("zero_terms_query".equals(currentFieldName)) {
+ String zeroTermsDocs = parser.text();
+ if ("none".equalsIgnoreCase(zeroTermsDocs)) {
+ matchQuery.setZeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE);
+ } else if ("all".equalsIgnoreCase(zeroTermsDocs)) {
+ matchQuery.setZeroTermsQuery(MatchQuery.ZeroTermsQuery.ALL);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]");
+ }
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[match] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ parser.nextToken();
+ } else {
+ value = parser.objectText();
+ // move to the next token
+ token = parser.nextToken();
+ if (token != XContentParser.Token.END_OBJECT) {
+ throw new QueryParsingException(parseContext.index(), "[match] query parsed in simplified form, with direct field name, but included more options than just the field name, possibly use its 'options' form, with 'query' element?");
+ }
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No text specified for text query");
+ }
+
+ Query query = matchQuery.parse(type, fieldName, value);
+ if (query == null) {
+ return null;
+ }
+
+ if (query instanceof BooleanQuery) {
+ Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
+ } else if (query instanceof ExtendedCommonTermsQuery) {
+ ((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch);
+ }
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/MissingFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/MissingFilterBuilder.java
new file mode 100644
index 0000000..bec56dc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MissingFilterBuilder.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * Constructs a filter that only match on documents that the field has a value in them.
+ */
+public class MissingFilterBuilder extends BaseFilterBuilder {
+
+ private String name;
+
+ private String filterName;
+
+ private Boolean nullValue;
+
+ private Boolean existence;
+
+ public MissingFilterBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Should the missing filter automatically include fields with null value configured in the
+ * mappings. Defaults to <tt>false</tt>.
+ */
+ public MissingFilterBuilder nullValue(boolean nullValue) {
+ this.nullValue = nullValue;
+ return this;
+ }
+
+ /**
+ * Should hte missing filter include documents where the field doesn't exists in the docs.
+ * Defaults to <tt>true</tt>.
+ */
+ public MissingFilterBuilder existence(boolean existence) {
+ this.existence = existence;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public MissingFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(MissingFilterParser.NAME);
+ builder.field("field", name);
+ if (nullValue != null) {
+ builder.field("null_value", nullValue);
+ }
+ if (existence != null) {
+ builder.field("existence", existence);
+ }
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java
new file mode 100644
index 0000000..fa27253
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.TermRangeFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ *
+ */
+public class MissingFilterParser implements FilterParser {
+
+ public static final String NAME = "missing";
+ public static final boolean DEFAULT_NULL_VALUE = false;
+ public static final boolean DEFAULT_EXISTENCE_VALUE = true;
+
+ @Inject
+ public MissingFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ String fieldPattern = null;
+ String filterName = null;
+ boolean nullValue = DEFAULT_NULL_VALUE;
+ boolean existence = DEFAULT_EXISTENCE_VALUE;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("field".equals(currentFieldName)) {
+ fieldPattern = parser.text();
+ } else if ("null_value".equals(currentFieldName)) {
+ nullValue = parser.booleanValue();
+ } else if ("existence".equals(currentFieldName)) {
+ existence = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[missing] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (fieldPattern == null) {
+ throw new QueryParsingException(parseContext.index(), "missing must be provided with a [field]");
+ }
+
+ return newFilter(parseContext, fieldPattern, existence, nullValue, filterName);
+ }
+
+ public static Filter newFilter(QueryParseContext parseContext, String fieldPattern, boolean existence, boolean nullValue, String filterName) {
+ if (!existence && !nullValue) {
+ throw new QueryParsingException(parseContext.index(), "missing must have either existence, or null_value, or both set to true");
+ }
+
+ MapperService.SmartNameObjectMapper smartNameObjectMapper = parseContext.smartObjectMapper(fieldPattern);
+ if (smartNameObjectMapper != null && smartNameObjectMapper.hasMapper()) {
+ // automatic make the object mapper pattern
+ fieldPattern = fieldPattern + ".*";
+ }
+
+ Set<String> fields = parseContext.simpleMatchToIndexNames(fieldPattern);
+ if (fields.isEmpty()) {
+ if (existence) {
+ // if we ask for existence of fields, and we found none, then we should match on all
+ return Queries.MATCH_ALL_FILTER;
+ }
+ return null;
+ }
+
+ Filter existenceFilter = null;
+ Filter nullFilter = null;
+
+ MapperService.SmartNameFieldMappers nonNullFieldMappers = null;
+
+ if (existence) {
+ XBooleanFilter boolFilter = new XBooleanFilter();
+ for (String field : fields) {
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(field);
+ if (smartNameFieldMappers != null) {
+ nonNullFieldMappers = smartNameFieldMappers;
+ }
+ Filter filter = null;
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ filter = smartNameFieldMappers.mapper().rangeFilter(null, null, true, true, parseContext);
+ }
+ if (filter == null) {
+ filter = new TermRangeFilter(field, null, null, true, true);
+ }
+ boolFilter.add(filter, BooleanClause.Occur.SHOULD);
+ }
+
+ // we always cache this one, really does not change... (exists)
+ // its ok to cache under the fieldName cacheKey, since its per segment and the mapping applies to this data on this segment...
+ existenceFilter = parseContext.cacheFilter(boolFilter, new CacheKeyFilter.Key("$exists$" + fieldPattern));
+ existenceFilter = new NotFilter(existenceFilter);
+ // cache the not filter as well, so it will be faster
+ existenceFilter = parseContext.cacheFilter(existenceFilter, new CacheKeyFilter.Key("$missing$" + fieldPattern));
+ }
+
+ if (nullValue) {
+ for (String field : fields) {
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(field);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ nullFilter = smartNameFieldMappers.mapper().nullValueFilter();
+ if (nullFilter != null) {
+ // cache the not filter as well, so it will be faster
+ nullFilter = parseContext.cacheFilter(nullFilter, new CacheKeyFilter.Key("$null$" + fieldPattern));
+ }
+ }
+ }
+ }
+
+ Filter filter;
+ if (nullFilter != null) {
+ if (existenceFilter != null) {
+ XBooleanFilter combined = new XBooleanFilter();
+ combined.add(existenceFilter, BooleanClause.Occur.SHOULD);
+ combined.add(nullFilter, BooleanClause.Occur.SHOULD);
+ // cache the not filter as well, so it will be faster
+ filter = parseContext.cacheFilter(combined, null);
+ } else {
+ filter = nullFilter;
+ }
+ } else {
+ filter = existenceFilter;
+ }
+
+ if (filter == null) {
+ return null;
+ }
+
+ filter = wrapSmartNameFilter(filter, nonNullFieldMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, existenceFilter);
+ }
+ return filter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/MoreLikeThisFieldQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/MoreLikeThisFieldQueryBuilder.java
new file mode 100644
index 0000000..72bed16
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MoreLikeThisFieldQueryBuilder.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A more like this query that runs against a specific field.
+ */
+public class MoreLikeThisFieldQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<MoreLikeThisFieldQueryBuilder> {
+
+ private final String name;
+
+ private String likeText;
+ private float percentTermsToMatch = -1;
+ private int minTermFreq = -1;
+ private int maxQueryTerms = -1;
+ private String[] stopWords = null;
+ private int minDocFreq = -1;
+ private int maxDocFreq = -1;
+ private int minWordLength = -1;
+ private int maxWordLength = -1;
+ private float boostTerms = -1;
+ private float boost = -1;
+ private String analyzer;
+ private Boolean failOnUnsupportedField;
+ private String queryName;
+
+ /**
+ * A more like this query that runs against a specific field.
+ *
+ * @param name The field name to run the query against
+ */
+ public MoreLikeThisFieldQueryBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * The text to use in order to find documents that are "like" this.
+ */
+ public MoreLikeThisFieldQueryBuilder likeText(String likeText) {
+ this.likeText = likeText;
+ return this;
+ }
+
+ /**
+ * The percentage of terms to match. Defaults to <tt>0.3</tt>.
+ */
+ public MoreLikeThisFieldQueryBuilder percentTermsToMatch(float percentTermsToMatch) {
+ this.percentTermsToMatch = percentTermsToMatch;
+ return this;
+ }
+
+ /**
+ * The frequency below which terms will be ignored in the source doc. The default
+ * frequency is <tt>2</tt>.
+ */
+ public MoreLikeThisFieldQueryBuilder minTermFreq(int minTermFreqy) {
+ this.minTermFreq = minTermFreqy;
+ return this;
+ }
+
+ /**
+ * Sets the maximum number of query terms that will be included in any generated query.
+ * Defaults to <tt>25</tt>.
+ */
+ public MoreLikeThisFieldQueryBuilder maxQueryTerms(int maxQueryTerms) {
+ this.maxQueryTerms = maxQueryTerms;
+ return this;
+ }
+
+ /**
+ * Set the set of stopwords.
+ * <p/>
+ * <p>Any word in this set is considered "uninteresting" and ignored. Even if your Analyzer allows stopwords, you
+ * might want to tell the MoreLikeThis code to ignore them, as for the purposes of document similarity it seems
+ * reasonable to assume that "a stop word is never interesting".
+ */
+ public MoreLikeThisFieldQueryBuilder stopWords(String... stopWords) {
+ this.stopWords = stopWords;
+ return this;
+ }
+
+ /**
+ * Sets the frequency at which words will be ignored which do not occur in at least this
+ * many docs. Defaults to <tt>5</tt>.
+ */
+ public MoreLikeThisFieldQueryBuilder minDocFreq(int minDocFreq) {
+ this.minDocFreq = minDocFreq;
+ return this;
+ }
+
+ /**
+ * Set the maximum frequency in which words may still appear. Words that appear
+ * in more than this many docs will be ignored. Defaults to unbounded.
+ */
+ public MoreLikeThisFieldQueryBuilder maxDocFreq(int maxDocFreq) {
+ this.maxDocFreq = maxDocFreq;
+ return this;
+ }
+
+ /**
+ * Sets the minimum word length below which words will be ignored. Defaults
+ * to <tt>0</tt>.
+ */
+ public MoreLikeThisFieldQueryBuilder minWordLength(int minWordLength) {
+ this.minWordLength = minWordLength;
+ return this;
+ }
+
+ /**
+ * Sets the maximum word length above which words will be ignored. Defaults to
+ * unbounded (<tt>0</tt>).
+ */
+ public MoreLikeThisFieldQueryBuilder maxWordLen(int maxWordLen) {
+ this.maxWordLength = maxWordLen;
+ return this;
+ }
+
+ /**
+ * Sets the boost factor to use when boosting terms. Defaults to <tt>1</tt>.
+ */
+ public MoreLikeThisFieldQueryBuilder boostTerms(float boostTerms) {
+ this.boostTerms = boostTerms;
+ return this;
+ }
+
+ /**
+ * The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the fied.
+ */
+ public MoreLikeThisFieldQueryBuilder analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ public MoreLikeThisFieldQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Whether to fail or return no result when this query is run against a field which is not supported such as binary/numeric fields.
+ */
+ public MoreLikeThisFieldQueryBuilder failOnUnsupportedField(boolean fail) {
+ failOnUnsupportedField = fail;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public MoreLikeThisFieldQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(MoreLikeThisFieldQueryParser.NAME);
+ builder.startObject(name);
+ if (likeText == null) {
+ throw new ElasticsearchIllegalArgumentException("moreLikeThisField requires '"+
+ MoreLikeThisQueryParser.Fields.LIKE_TEXT.getPreferredName() +"' to be provided");
+ }
+ builder.field(MoreLikeThisQueryParser.Fields.LIKE_TEXT.getPreferredName(), likeText);
+ if (percentTermsToMatch != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.PERCENT_TERMS_TO_MATCH.getPreferredName(), percentTermsToMatch);
+ }
+ if (minTermFreq != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MIN_TERM_FREQ.getPreferredName(), minTermFreq);
+ }
+ if (maxQueryTerms != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MAX_QUERY_TERMS.getPreferredName(), maxQueryTerms);
+ }
+ if (stopWords != null && stopWords.length > 0) {
+ builder.startArray(MoreLikeThisQueryParser.Fields.STOP_WORDS.getPreferredName());
+ for (String stopWord : stopWords) {
+ builder.value(stopWord);
+ }
+ builder.endArray();
+ }
+ if (minDocFreq != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MIN_DOC_FREQ.getPreferredName(), minDocFreq);
+ }
+ if (maxDocFreq != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MAX_DOC_FREQ.getPreferredName(), maxDocFreq);
+ }
+ if (minWordLength != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MIN_WORD_LENGTH.getPreferredName(), minWordLength);
+ }
+ if (maxWordLength != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MAX_WORD_LENGTH.getPreferredName(), maxWordLength);
+ }
+ if (boostTerms != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.BOOST_TERMS.getPreferredName(), boostTerms);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+ if (failOnUnsupportedField != null) {
+ builder.field(MoreLikeThisQueryParser.Fields.FAIL_ON_UNSUPPORTED_FIELD.getPreferredName(), failOnUnsupportedField);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/MoreLikeThisFieldQueryParser.java b/src/main/java/org/elasticsearch/index/query/MoreLikeThisFieldQueryParser.java
new file mode 100644
index 0000000..7e465a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MoreLikeThisFieldQueryParser.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Sets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.MoreLikeThisQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.Analysis;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ *
+ */
+public class MoreLikeThisFieldQueryParser implements QueryParser {
+
+ public static final String NAME = "mlt_field";
+
+ @Inject
+ public MoreLikeThisFieldQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "more_like_this_field", Strings.toCamelCase(NAME), "moreLikeThisField"};
+ }
+
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ assert token == XContentParser.Token.FIELD_NAME;
+ String fieldName = parser.currentName();
+
+ // now, we move after the field name, which starts the object
+ token = parser.nextToken();
+ assert token == XContentParser.Token.START_OBJECT;
+
+
+ MoreLikeThisQuery mltQuery = new MoreLikeThisQuery();
+ mltQuery.setSimilarity(parseContext.searchSimilarity());
+ Analyzer analyzer = null;
+ boolean failOnUnsupportedField = true;
+ String queryName = null;
+
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if (MoreLikeThisQueryParser.Fields.LIKE_TEXT.match(currentFieldName,parseContext.parseFlags()) ) {
+ mltQuery.setLikeText(parser.text());
+ } else if (MoreLikeThisQueryParser.Fields.MIN_TERM_FREQ.match(currentFieldName,parseContext.parseFlags()) ) {
+ mltQuery.setMinTermFrequency(parser.intValue());
+ } else if (MoreLikeThisQueryParser.Fields.MAX_QUERY_TERMS.match(currentFieldName,parseContext.parseFlags())) {
+ mltQuery.setMaxQueryTerms(parser.intValue());
+ } else if (MoreLikeThisQueryParser.Fields.MIN_DOC_FREQ.match(currentFieldName,parseContext.parseFlags())) {
+ mltQuery.setMinDocFreq(parser.intValue());
+ } else if (MoreLikeThisQueryParser.Fields.MAX_DOC_FREQ.match(currentFieldName,parseContext.parseFlags())) {
+ mltQuery.setMaxDocFreq(parser.intValue());
+ } else if (MoreLikeThisQueryParser.Fields.MIN_WORD_LENGTH.match(currentFieldName,parseContext.parseFlags())) {
+ mltQuery.setMinWordLen(parser.intValue());
+ } else if (MoreLikeThisQueryParser.Fields.MAX_WORD_LENGTH.match(currentFieldName,parseContext.parseFlags())) {
+ mltQuery.setMaxWordLen(parser.intValue());
+ } else if (MoreLikeThisQueryParser.Fields.BOOST_TERMS.match(currentFieldName,parseContext.parseFlags())) {
+ mltQuery.setBoostTerms(true);
+ mltQuery.setBoostTermsFactor(parser.floatValue());
+ } else if (MoreLikeThisQueryParser.Fields.PERCENT_TERMS_TO_MATCH.match(currentFieldName,parseContext.parseFlags())) {
+ mltQuery.setPercentTermsToMatch(parser.floatValue());
+ } else if ("analyzer".equals(currentFieldName)) {
+ analyzer = parseContext.analysisService().analyzer(parser.text());
+ } else if ("boost".equals(currentFieldName)) {
+ mltQuery.setBoost(parser.floatValue());
+ } else if (MoreLikeThisQueryParser.Fields.FAIL_ON_UNSUPPORTED_FIELD.match(currentFieldName,parseContext.parseFlags())) {
+ failOnUnsupportedField = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[mlt_field] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if (MoreLikeThisQueryParser.Fields.STOP_WORDS.match(currentFieldName,parseContext.parseFlags())) {
+
+ Set<String> stopWords = Sets.newHashSet();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ stopWords.add(parser.text());
+ }
+ mltQuery.setStopWords(stopWords);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[mlt_field] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (mltQuery.getLikeText() == null) {
+ throw new QueryParsingException(parseContext.index(), "more_like_this_field requires 'like_text' to be specified");
+ }
+
+ // move to the next end object, to close the field name
+ token = parser.nextToken();
+ assert token == XContentParser.Token.END_OBJECT;
+
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null) {
+ if (smartNameFieldMappers.hasMapper()) {
+ fieldName = smartNameFieldMappers.mapper().names().indexName();
+ }
+ if (analyzer == null) {
+ analyzer = smartNameFieldMappers.searchAnalyzer();
+ }
+ }
+ if (analyzer == null) {
+ analyzer = parseContext.mapperService().searchAnalyzer();
+ }
+ if (!Analysis.generatesCharacterTokenStream(analyzer, fieldName)) {
+ if (failOnUnsupportedField) {
+ throw new ElasticsearchIllegalArgumentException("more_like_this_field doesn't support binary/numeric fields: [" + fieldName + "]");
+ } else {
+ return null;
+ }
+ }
+ mltQuery.setAnalyzer(analyzer);
+ mltQuery.setMoreLikeFields(new String[]{fieldName});
+ Query query = wrapSmartNameQuery(mltQuery, smartNameFieldMappers, parseContext);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
new file mode 100644
index 0000000..dc205cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A more like this query that finds documents that are "like" the provided {@link #likeText(String)}
+ * which is checked against the fields the query is constructed with.
+ */
+public class MoreLikeThisQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<MoreLikeThisQueryBuilder> {
+
+ private final String[] fields;
+
+ private String likeText;
+ private float percentTermsToMatch = -1;
+ private int minTermFreq = -1;
+ private int maxQueryTerms = -1;
+ private String[] stopWords = null;
+ private int minDocFreq = -1;
+ private int maxDocFreq = -1;
+ private int minWordLength = -1;
+ private int maxWordLength = -1;
+ private float boostTerms = -1;
+ private float boost = -1;
+ private String analyzer;
+ private Boolean failOnUnsupportedField;
+ private String queryName;
+
+ /**
+ * Constructs a new more like this query which uses the "_all" field.
+ */
+ public MoreLikeThisQueryBuilder() {
+ this.fields = null;
+ }
+
+ /**
+ * Sets the field names that will be used when generating the 'More Like This' query.
+ *
+ * @param fields the field names that will be used when generating the 'More Like This' query.
+ */
+ public MoreLikeThisQueryBuilder(String... fields) {
+ this.fields = fields;
+ }
+
+ /**
+ * The text to use in order to find documents that are "like" this.
+ */
+ public MoreLikeThisQueryBuilder likeText(String likeText) {
+ this.likeText = likeText;
+ return this;
+ }
+
+ /**
+ * The percentage of terms to match. Defaults to <tt>0.3</tt>.
+ */
+ public MoreLikeThisQueryBuilder percentTermsToMatch(float percentTermsToMatch) {
+ this.percentTermsToMatch = percentTermsToMatch;
+ return this;
+ }
+
+ /**
+ * The frequency below which terms will be ignored in the source doc. The default
+ * frequency is <tt>2</tt>.
+ */
+ public MoreLikeThisQueryBuilder minTermFreq(int minTermFreq) {
+ this.minTermFreq = minTermFreq;
+ return this;
+ }
+
+ /**
+ * Sets the maximum number of query terms that will be included in any generated query.
+ * Defaults to <tt>25</tt>.
+ */
+ public MoreLikeThisQueryBuilder maxQueryTerms(int maxQueryTerms) {
+ this.maxQueryTerms = maxQueryTerms;
+ return this;
+ }
+
+ /**
+ * Set the set of stopwords.
+ * <p/>
+ * <p>Any word in this set is considered "uninteresting" and ignored. Even if your Analyzer allows stopwords, you
+ * might want to tell the MoreLikeThis code to ignore them, as for the purposes of document similarity it seems
+ * reasonable to assume that "a stop word is never interesting".
+ */
+ public MoreLikeThisQueryBuilder stopWords(String... stopWords) {
+ this.stopWords = stopWords;
+ return this;
+ }
+
+ /**
+ * Sets the frequency at which words will be ignored which do not occur in at least this
+ * many docs. Defaults to <tt>5</tt>.
+ */
+ public MoreLikeThisQueryBuilder minDocFreq(int minDocFreq) {
+ this.minDocFreq = minDocFreq;
+ return this;
+ }
+
+ /**
+ * Set the maximum frequency in which words may still appear. Words that appear
+ * in more than this many docs will be ignored. Defaults to unbounded.
+ */
+ public MoreLikeThisQueryBuilder maxDocFreq(int maxDocFreq) {
+ this.maxDocFreq = maxDocFreq;
+ return this;
+ }
+
+ /**
+ * Sets the minimum word length below which words will be ignored. Defaults
+ * to <tt>0</tt>.
+ */
+ public MoreLikeThisQueryBuilder minWordLength(int minWordLength) {
+ this.minWordLength = minWordLength;
+ return this;
+ }
+
+ /**
+ * Sets the maximum word length above which words will be ignored. Defaults to
+ * unbounded (<tt>0</tt>).
+ */
+ public MoreLikeThisQueryBuilder maxWordLength(int maxWordLength) {
+ this.maxWordLength = maxWordLength;
+ return this;
+ }
+
+ /**
+ * Sets the boost factor to use when boosting terms. Defaults to <tt>1</tt>.
+ */
+ public MoreLikeThisQueryBuilder boostTerms(float boostTerms) {
+ this.boostTerms = boostTerms;
+ return this;
+ }
+
+ /**
+ * The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the fied.
+ */
+ public MoreLikeThisQueryBuilder analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ public MoreLikeThisQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Whether to fail or return no result when this query is run against a field which is not supported such as binary/numeric fields.
+ */
+ public MoreLikeThisQueryBuilder failOnUnsupportedField(boolean fail) {
+ failOnUnsupportedField = fail;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public MoreLikeThisQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(MoreLikeThisQueryParser.NAME);
+ if (fields != null) {
+ builder.startArray("fields");
+ for (String field : fields) {
+ builder.value(field);
+ }
+ builder.endArray();
+ }
+ if (likeText == null) {
+ throw new ElasticsearchIllegalArgumentException("moreLikeThis requires '"+
+ MoreLikeThisQueryParser.Fields.LIKE_TEXT.getPreferredName() +"' to be provided");
+ }
+ builder.field(MoreLikeThisQueryParser.Fields.LIKE_TEXT.getPreferredName(), likeText);
+ if (percentTermsToMatch != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.PERCENT_TERMS_TO_MATCH.getPreferredName(), percentTermsToMatch);
+ }
+ if (minTermFreq != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MIN_TERM_FREQ.getPreferredName(), minTermFreq);
+ }
+ if (maxQueryTerms != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MAX_QUERY_TERMS.getPreferredName(), maxQueryTerms);
+ }
+ if (stopWords != null && stopWords.length > 0) {
+ builder.startArray(MoreLikeThisQueryParser.Fields.STOP_WORDS.getPreferredName());
+ for (String stopWord : stopWords) {
+ builder.value(stopWord);
+ }
+ builder.endArray();
+ }
+ if (minDocFreq != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MIN_DOC_FREQ.getPreferredName(), minDocFreq);
+ }
+ if (maxDocFreq != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MAX_DOC_FREQ.getPreferredName(), maxDocFreq);
+ }
+ if (minWordLength != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MIN_WORD_LENGTH.getPreferredName(), minWordLength);
+ }
+ if (maxWordLength != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.MAX_WORD_LENGTH.getPreferredName(), maxWordLength);
+ }
+ if (boostTerms != -1) {
+ builder.field(MoreLikeThisQueryParser.Fields.BOOST_TERMS.getPreferredName(), boostTerms);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+ if (failOnUnsupportedField != null) {
+ builder.field(MoreLikeThisQueryParser.Fields.FAIL_ON_UNSUPPORTED_FIELD.getPreferredName(), failOnUnsupportedField);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java
new file mode 100644
index 0000000..2076529
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.MoreLikeThisQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.Analysis;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+/**
+ *
+ */
+public class MoreLikeThisQueryParser implements QueryParser {
+
+ public static final String NAME = "mlt";
+
+
+ public static class Fields {
+ public static final ParseField LIKE_TEXT = new ParseField("like_text");
+ public static final ParseField MIN_TERM_FREQ = new ParseField("min_term_freq");
+ public static final ParseField MAX_QUERY_TERMS = new ParseField("max_query_terms");
+ public static final ParseField MIN_WORD_LENGTH = new ParseField("min_word_length", "min_word_len");
+ public static final ParseField MAX_WORD_LENGTH = new ParseField("max_word_length", "max_word_len");
+ public static final ParseField MIN_DOC_FREQ = new ParseField("min_doc_freq");
+ public static final ParseField MAX_DOC_FREQ = new ParseField("max_doc_freq");
+ public static final ParseField BOOST_TERMS = new ParseField("boost_terms");
+ public static final ParseField PERCENT_TERMS_TO_MATCH = new ParseField("percent_terms_to_match");
+ public static final ParseField FAIL_ON_UNSUPPORTED_FIELD = new ParseField("fail_on_unsupported_field");
+ public static final ParseField STOP_WORDS = new ParseField("stop_words");
+ }
+
+ @Inject
+ public MoreLikeThisQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "more_like_this", "moreLikeThis"};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ MoreLikeThisQuery mltQuery = new MoreLikeThisQuery();
+ mltQuery.setSimilarity(parseContext.searchSimilarity());
+ Analyzer analyzer = null;
+ List<String> moreLikeFields = null;
+ boolean failOnUnsupportedField = true;
+ String queryName = null;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if (Fields.LIKE_TEXT.match(currentFieldName, parseContext.parseFlags())) {
+ mltQuery.setLikeText(parser.text());
+ } else if (Fields.MIN_TERM_FREQ.match(currentFieldName, parseContext.parseFlags())) {
+ mltQuery.setMinTermFrequency(parser.intValue());
+ } else if (Fields.MAX_QUERY_TERMS.match(currentFieldName, parseContext.parseFlags())) {
+ mltQuery.setMaxQueryTerms(parser.intValue());
+ } else if (Fields.MIN_DOC_FREQ.match(currentFieldName, parseContext.parseFlags())) {
+ mltQuery.setMinDocFreq(parser.intValue());
+ } else if (Fields.MAX_DOC_FREQ.match(currentFieldName, parseContext.parseFlags())) {
+ mltQuery.setMaxDocFreq(parser.intValue());
+ } else if (Fields.MIN_WORD_LENGTH.match(currentFieldName, parseContext.parseFlags())) {
+ mltQuery.setMinWordLen(parser.intValue());
+ } else if (Fields.MAX_WORD_LENGTH.match(currentFieldName, parseContext.parseFlags())) {
+ mltQuery.setMaxWordLen(parser.intValue());
+ } else if (Fields.BOOST_TERMS.match(currentFieldName, parseContext.parseFlags())) {
+ mltQuery.setBoostTerms(true);
+ mltQuery.setBoostTermsFactor(parser.floatValue());
+ } else if (Fields.PERCENT_TERMS_TO_MATCH.match(currentFieldName, parseContext.parseFlags())) {
+ mltQuery.setPercentTermsToMatch(parser.floatValue());
+ } else if ("analyzer".equals(currentFieldName)) {
+ analyzer = parseContext.analysisService().analyzer(parser.text());
+ } else if ("boost".equals(currentFieldName)) {
+ mltQuery.setBoost(parser.floatValue());
+ } else if (Fields.FAIL_ON_UNSUPPORTED_FIELD.match(currentFieldName, parseContext.parseFlags())) {
+ failOnUnsupportedField = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[mlt] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if (Fields.STOP_WORDS.match(currentFieldName, parseContext.parseFlags())) {
+ Set<String> stopWords = Sets.newHashSet();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ stopWords.add(parser.text());
+ }
+ mltQuery.setStopWords(stopWords);
+ } else if ("fields".equals(currentFieldName)) {
+ moreLikeFields = Lists.newLinkedList();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ moreLikeFields.add(parseContext.indexName(parser.text()));
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[mlt] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (mltQuery.getLikeText() == null) {
+ throw new QueryParsingException(parseContext.index(), "more_like_this requires 'like_text' to be specified");
+ }
+
+ if (analyzer == null) {
+ analyzer = parseContext.mapperService().searchAnalyzer();
+ }
+ mltQuery.setAnalyzer(analyzer);
+
+ if (moreLikeFields == null) {
+ moreLikeFields = Lists.newArrayList(parseContext.defaultField());
+ } else if (moreLikeFields.isEmpty()) {
+ throw new QueryParsingException(parseContext.index(), "more_like_this requires 'fields' to be non-empty");
+ }
+
+ for (Iterator<String> it = moreLikeFields.iterator(); it.hasNext(); ) {
+ final String fieldName = it.next();
+ if (!Analysis.generatesCharacterTokenStream(analyzer, fieldName)) {
+ if (failOnUnsupportedField) {
+ throw new ElasticsearchIllegalArgumentException("more_like_this doesn't support binary/numeric fields: [" + fieldName + "]");
+ } else {
+ it.remove();
+ }
+ }
+ }
+ if (moreLikeFields.isEmpty()) {
+ return null;
+ }
+ mltQuery.setMoreLikeFields(moreLikeFields.toArray(Strings.EMPTY_ARRAY));
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, mltQuery);
+ }
+ return mltQuery;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java
new file mode 100644
index 0000000..19c8177
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * Same as {@link MatchQueryBuilder} but supports multiple fields.
+ */
+public class MultiMatchQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<MultiMatchQueryBuilder> {
+
+ private final Object text;
+
+ private final List<String> fields;
+ private ObjectFloatOpenHashMap<String> fieldsBoosts;
+
+ private MatchQueryBuilder.Type type;
+
+ private MatchQueryBuilder.Operator operator;
+
+ private String analyzer;
+
+ private Float boost;
+
+ private Integer slop;
+
+ private Fuzziness fuzziness;
+
+ private Integer prefixLength;
+
+ private Integer maxExpansions;
+
+ private String minimumShouldMatch;
+
+ private String rewrite = null;
+
+ private String fuzzyRewrite = null;
+
+ private Boolean useDisMax;
+
+ private Float tieBreaker;
+
+ private Boolean lenient;
+
+ private Float cutoffFrequency = null;
+
+ private MatchQueryBuilder.ZeroTermsQuery zeroTermsQuery = null;
+
+ private String queryName;
+
+ /**
+ * Constructs a new text query.
+ */
+ public MultiMatchQueryBuilder(Object text, String... fields) {
+ this.fields = Lists.newArrayList();
+ this.fields.addAll(Arrays.asList(fields));
+ this.text = text;
+ }
+
+ /**
+ * Adds a field to run the multi match against.
+ */
+ public MultiMatchQueryBuilder field(String field) {
+ fields.add(field);
+ return this;
+ }
+
+ /**
+ * Adds a field to run the multi match against with a specific boost.
+ */
+ public MultiMatchQueryBuilder field(String field, float boost) {
+ fields.add(field);
+ if (fieldsBoosts == null) {
+ fieldsBoosts = new ObjectFloatOpenHashMap<String>();
+ }
+ fieldsBoosts.put(field, boost);
+ return this;
+ }
+
+ /**
+ * Sets the type of the text query.
+ */
+ public MultiMatchQueryBuilder type(MatchQueryBuilder.Type type) {
+ this.type = type;
+ return this;
+ }
+
+ /**
+ * Sets the operator to use when using a boolean query. Defaults to <tt>OR</tt>.
+ */
+ public MultiMatchQueryBuilder operator(MatchQueryBuilder.Operator operator) {
+ this.operator = operator;
+ return this;
+ }
+
+ /**
+ * Explicitly set the analyzer to use. Defaults to use explicit mapping config for the field, or, if not
+ * set, the default search analyzer.
+ */
+ public MultiMatchQueryBuilder analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ /**
+ * Set the boost to apply to the query.
+ */
+ public MultiMatchQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Set the phrase slop if evaluated to a phrase query type.
+ */
+ public MultiMatchQueryBuilder slop(int slop) {
+ this.slop = slop;
+ return this;
+ }
+
+ /**
+ * Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO".
+ */
+ public MultiMatchQueryBuilder fuzziness(Object fuzziness) {
+ this.fuzziness = Fuzziness.build(fuzziness);
+ return this;
+ }
+
+ public MultiMatchQueryBuilder prefixLength(int prefixLength) {
+ this.prefixLength = prefixLength;
+ return this;
+ }
+
+ /**
+ * When using fuzzy or prefix type query, the number of term expansions to use. Defaults to unbounded
+ * so its recommended to set it to a reasonable value for faster execution.
+ */
+ public MultiMatchQueryBuilder maxExpansions(int maxExpansions) {
+ this.maxExpansions = maxExpansions;
+ return this;
+ }
+
+ public MultiMatchQueryBuilder minimumShouldMatch(String minimumShouldMatch) {
+ this.minimumShouldMatch = minimumShouldMatch;
+ return this;
+ }
+
+ public MultiMatchQueryBuilder rewrite(String rewrite) {
+ this.rewrite = rewrite;
+ return this;
+ }
+
+ public MultiMatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
+ this.fuzzyRewrite = fuzzyRewrite;
+ return this;
+ }
+
+ public MultiMatchQueryBuilder useDisMax(Boolean useDisMax) {
+ this.useDisMax = useDisMax;
+ return this;
+ }
+
+ public MultiMatchQueryBuilder tieBreaker(Float tieBreaker) {
+ this.tieBreaker = tieBreaker;
+ return this;
+ }
+
+ /**
+ * Sets whether format based failures will be ignored.
+ */
+ public MultiMatchQueryBuilder lenient(boolean lenient) {
+ this.lenient = lenient;
+ return this;
+ }
+
+
+ /**
+ * Set a cutoff value in [0..1] (or absolute number >=1) representing the
+ * maximum threshold of a terms document frequency to be considered a low
+ * frequency term.
+ */
+ public MultiMatchQueryBuilder cutoffFrequency(float cutoff) {
+ this.cutoffFrequency = cutoff;
+ return this;
+ }
+
+
+ public MultiMatchQueryBuilder zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery zeroTermsQuery) {
+ this.zeroTermsQuery = zeroTermsQuery;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public MultiMatchQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(MultiMatchQueryParser.NAME);
+
+ builder.field("query", text);
+ builder.startArray("fields");
+ for (String field : fields) {
+ if (fieldsBoosts != null && fieldsBoosts.containsKey(field)) {
+ field += "^" + fieldsBoosts.lget();
+ }
+ builder.value(field);
+ }
+ builder.endArray();
+
+ if (type != null) {
+ builder.field("type", type.toString().toLowerCase(Locale.ENGLISH));
+ }
+ if (operator != null) {
+ builder.field("operator", operator.toString());
+ }
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+ if (boost != null) {
+ builder.field("boost", boost);
+ }
+ if (slop != null) {
+ builder.field("slop", slop);
+ }
+ if (fuzziness != null) {
+ fuzziness.toXContent(builder, params);
+ }
+ if (prefixLength != null) {
+ builder.field("prefix_length", prefixLength);
+ }
+ if (maxExpansions != null) {
+ builder.field("max_expansions", maxExpansions);
+ }
+ if (minimumShouldMatch != null) {
+ builder.field("minimum_should_match", minimumShouldMatch);
+ }
+ if (rewrite != null) {
+ builder.field("rewrite", rewrite);
+ }
+ if (fuzzyRewrite != null) {
+ builder.field("fuzzy_rewrite", fuzzyRewrite);
+ }
+
+ if (useDisMax != null) {
+ builder.field("use_dis_max", useDisMax);
+ }
+
+ if (tieBreaker != null) {
+ builder.field("tie_breaker", tieBreaker);
+ }
+
+ if (lenient != null) {
+ builder.field("lenient", lenient);
+ }
+
+ if (cutoffFrequency != null) {
+ builder.field("cutoff_frequency", cutoffFrequency);
+ }
+
+ if (zeroTermsQuery != null) {
+ builder.field("zero_terms_query", zeroTermsQuery.toString());
+ }
+
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java b/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java
new file mode 100644
index 0000000..885475d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.support.QueryParsers;
+import org.elasticsearch.index.search.MatchQuery;
+import org.elasticsearch.index.search.MultiMatchQuery;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Same ad {@link MatchQueryParser} but has support for multiple fields.
+ */
+public class MultiMatchQueryParser implements QueryParser {
+
+ public static final String NAME = "multi_match";
+
+ @Inject
+ public MultiMatchQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{
+ NAME, "multiMatch"
+ };
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Object value = null;
+ float boost = 1.0f;
+ MatchQuery.Type type = MatchQuery.Type.BOOLEAN;
+ MultiMatchQuery multiMatchQuery = new MultiMatchQuery(parseContext);
+ String minimumShouldMatch = null;
+ Map<String, Float> fieldNameWithBoosts = Maps.newHashMap();
+ String queryName = null;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if ("fields".equals(currentFieldName)) {
+ if (token == XContentParser.Token.START_ARRAY) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ extractFieldAndBoost(parseContext, parser, fieldNameWithBoosts);
+ }
+ } else if (token.isValue()) {
+ extractFieldAndBoost(parseContext, parser, fieldNameWithBoosts);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[" + NAME + "] query does not support [" + currentFieldName
+ + "]");
+ }
+ } else if (token.isValue()) {
+ if ("query".equals(currentFieldName)) {
+ value = parser.objectText();
+ } else if ("type".equals(currentFieldName)) {
+ String tStr = parser.text();
+ if ("boolean".equals(tStr)) {
+ type = MatchQuery.Type.BOOLEAN;
+ } else if ("phrase".equals(tStr)) {
+ type = MatchQuery.Type.PHRASE;
+ } else if ("phrase_prefix".equals(tStr) || "phrasePrefix".equals(currentFieldName)) {
+ type = MatchQuery.Type.PHRASE_PREFIX;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[" + NAME + "] query does not support type " + tStr);
+ }
+ } else if ("analyzer".equals(currentFieldName)) {
+ String analyzer = parser.text();
+ if (parseContext.analysisService().analyzer(analyzer) == null) {
+ throw new QueryParsingException(parseContext.index(), "["+ NAME +"] analyzer [" + parser.text() + "] not found");
+ }
+ multiMatchQuery.setAnalyzer(analyzer);
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("slop".equals(currentFieldName) || "phrase_slop".equals(currentFieldName) || "phraseSlop".equals(currentFieldName)) {
+ multiMatchQuery.setPhraseSlop(parser.intValue());
+ } else if (Fuzziness.FIELD.match(currentFieldName, parseContext.parseFlags())) {
+ multiMatchQuery.setFuzziness(Fuzziness.parse(parser));
+ } else if ("prefix_length".equals(currentFieldName) || "prefixLength".equals(currentFieldName)) {
+ multiMatchQuery.setFuzzyPrefixLength(parser.intValue());
+ } else if ("max_expansions".equals(currentFieldName) || "maxExpansions".equals(currentFieldName)) {
+ multiMatchQuery.setMaxExpansions(parser.intValue());
+ } else if ("operator".equals(currentFieldName)) {
+ String op = parser.text();
+ if ("or".equalsIgnoreCase(op)) {
+ multiMatchQuery.setOccur(BooleanClause.Occur.SHOULD);
+ } else if ("and".equalsIgnoreCase(op)) {
+ multiMatchQuery.setOccur(BooleanClause.Occur.MUST);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "text query requires operator to be either 'and' or 'or', not [" + op + "]");
+ }
+ } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
+ minimumShouldMatch = parser.textOrNull();
+ } else if ("rewrite".equals(currentFieldName)) {
+ multiMatchQuery.setRewriteMethod(QueryParsers.parseRewriteMethod(parser.textOrNull(), null));
+ } else if ("fuzzy_rewrite".equals(currentFieldName) || "fuzzyRewrite".equals(currentFieldName)) {
+ multiMatchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(parser.textOrNull(), null));
+ } else if ("use_dis_max".equals(currentFieldName) || "useDisMax".equals(currentFieldName)) {
+ multiMatchQuery.setUseDisMax(parser.booleanValue());
+ } else if ("tie_breaker".equals(currentFieldName) || "tieBreaker".equals(currentFieldName)) {
+ multiMatchQuery.setTieBreaker(parser.floatValue());
+ } else if ("cutoff_frequency".equals(currentFieldName)) {
+ multiMatchQuery.setCommonTermsCutoff(parser.floatValue());
+ } else if ("lenient".equals(currentFieldName)) {
+ multiMatchQuery.setLenient(parser.booleanValue());
+ } else if ("zero_terms_query".equals(currentFieldName)) {
+ String zeroTermsDocs = parser.text();
+ if ("none".equalsIgnoreCase(zeroTermsDocs)) {
+ multiMatchQuery.setZeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE);
+ } else if ("all".equalsIgnoreCase(zeroTermsDocs)) {
+ multiMatchQuery.setZeroTermsQuery(MatchQuery.ZeroTermsQuery.ALL);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]");
+ }
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[match] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No text specified for match_all query");
+ }
+
+ if (fieldNameWithBoosts.isEmpty()) {
+ throw new QueryParsingException(parseContext.index(), "No fields specified for match_all query");
+ }
+
+ Query query = multiMatchQuery.parse(type, fieldNameWithBoosts, value, minimumShouldMatch);
+ if (query == null) {
+ return null;
+ }
+
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+
+ private void extractFieldAndBoost(QueryParseContext parseContext, XContentParser parser, Map<String, Float> fieldNameWithBoosts) throws IOException {
+ String fField = null;
+ Float fBoost = null;
+ char[] fieldText = parser.textCharacters();
+ int end = parser.textOffset() + parser.textLength();
+ for (int i = parser.textOffset(); i < end; i++) {
+ if (fieldText[i] == '^') {
+ int relativeLocation = i - parser.textOffset();
+ fField = new String(fieldText, parser.textOffset(), relativeLocation);
+ fBoost = Float.parseFloat(new String(fieldText, i + 1, parser.textLength() - relativeLocation - 1));
+ break;
+ }
+ }
+ if (fField == null) {
+ fField = parser.text();
+ }
+
+ if (Regex.isSimpleMatchPattern(fField)) {
+ for (String field : parseContext.mapperService().simpleMatchToIndexNames(fField)) {
+ fieldNameWithBoosts.put(field, fBoost);
+ }
+ } else {
+ fieldNameWithBoosts.put(fField, fBoost);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/MultiTermQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/MultiTermQueryBuilder.java
new file mode 100644
index 0000000..f34f6aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/MultiTermQueryBuilder.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+/**
+ *
+ */
+public interface MultiTermQueryBuilder extends QueryBuilder{
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java
new file mode 100644
index 0000000..8e86f1f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+public class NestedFilterBuilder extends BaseFilterBuilder {
+
+ private final QueryBuilder queryBuilder;
+ private final FilterBuilder filterBuilder;
+
+ private final String path;
+ private Boolean join;
+
+ private Boolean cache;
+ private String cacheKey;
+ private String filterName;
+
+ public NestedFilterBuilder(String path, QueryBuilder queryBuilder) {
+ this.path = path;
+ this.queryBuilder = queryBuilder;
+ this.filterBuilder = null;
+ }
+
+ public NestedFilterBuilder(String path, FilterBuilder filterBuilder) {
+ this.path = path;
+ this.queryBuilder = null;
+ this.filterBuilder = filterBuilder;
+ }
+
+ public NestedFilterBuilder join(boolean join) {
+ this.join = join;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public NestedFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public NestedFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public NestedFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(NestedFilterParser.NAME);
+ if (queryBuilder != null) {
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ } else {
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ }
+ if (join != null) {
+ builder.field("join", join);
+ }
+ builder.field("path", path);
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java
new file mode 100644
index 0000000..e4b7ad0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+
+import java.io.IOException;
+
+public class NestedFilterParser implements FilterParser {
+
+ public static final String NAME = "nested";
+
+ @Inject
+ public NestedFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ boolean queryFound = false;
+ Filter filter = null;
+ boolean filterFound = false;
+ float boost = 1.0f;
+ boolean join = true;
+ String path = null;
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+ String filterName = null;
+
+ // we need a late binding filter so we can inject a parent nested filter inner nested queries
+ NestedQueryParser.LateBindingParentFilter currentParentFilterContext = NestedQueryParser.parentFilterContext.get();
+
+ NestedQueryParser.LateBindingParentFilter usAsParentFilter = new NestedQueryParser.LateBindingParentFilter();
+ NestedQueryParser.parentFilterContext.set(usAsParentFilter);
+
+ try {
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ queryFound = true;
+ query = parseContext.parseInnerQuery();
+ } else if ("filter".equals(currentFieldName)) {
+ filterFound = true;
+ filter = parseContext.parseInnerFilter();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("join".equals(currentFieldName)) {
+ join = parser.booleanValue();
+ } else if ("path".equals(currentFieldName)) {
+ path = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_scope".equals(currentFieldName)) {
+ throw new QueryParsingException(parseContext.index(), "the [_scope] support in [nested] filter has been removed, use nested filter as a facet_filter in the relevant facet");
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound && !filterFound) {
+ throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field");
+ }
+ if (path == null) {
+ throw new QueryParsingException(parseContext.index(), "[nested] requires 'path' field");
+ }
+
+ if (query == null && filter == null) {
+ return null;
+ }
+
+ if (filter != null) {
+ query = new XConstantScoreQuery(filter);
+ }
+
+ query.setBoost(boost);
+
+ MapperService.SmartNameObjectMapper mapper = parseContext.smartObjectMapper(path);
+ if (mapper == null) {
+ throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
+ }
+ ObjectMapper objectMapper = mapper.mapper();
+ if (objectMapper == null) {
+ throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
+ }
+ if (!objectMapper.nested().isNested()) {
+ throw new QueryParsingException(parseContext.index(), "[nested] nested object under path [" + path + "] is not of nested type");
+ }
+
+ Filter childFilter = parseContext.cacheFilter(objectMapper.nestedTypeFilter(), null);
+ usAsParentFilter.filter = childFilter;
+ // wrap the child query to only work on the nested path type
+ query = new XFilteredQuery(query, childFilter);
+
+ Filter parentFilter = currentParentFilterContext;
+ if (parentFilter == null) {
+ parentFilter = NonNestedDocsFilter.INSTANCE;
+ // don't do special parent filtering, since we might have same nested mapping on two different types
+ //if (mapper.hasDocMapper()) {
+ // // filter based on the type...
+ // parentFilter = mapper.docMapper().typeFilter();
+ //}
+ parentFilter = parseContext.cacheFilter(parentFilter, null);
+ }
+
+ Filter nestedFilter;
+ if (join) {
+ ToParentBlockJoinQuery joinQuery = new ToParentBlockJoinQuery(query, parentFilter, ScoreMode.None);
+ nestedFilter = Queries.wrap(joinQuery);
+ } else {
+ nestedFilter = Queries.wrap(query);
+ }
+
+ if (cache) {
+ nestedFilter = parseContext.cacheFilter(nestedFilter, cacheKey);
+ }
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, nestedFilter);
+ }
+ return nestedFilter;
+ } finally {
+ // restore the thread local one...
+ NestedQueryParser.parentFilterContext.set(currentParentFilterContext);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java
new file mode 100644
index 0000000..88ded2a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+public class NestedQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<NestedQueryBuilder> {
+
+ private final QueryBuilder queryBuilder;
+ private final FilterBuilder filterBuilder;
+
+ private final String path;
+
+ private String scoreMode;
+
+ private float boost = 1.0f;
+
+ private String queryName;
+
+ public NestedQueryBuilder(String path, QueryBuilder queryBuilder) {
+ this.path = path;
+ this.queryBuilder = queryBuilder;
+ this.filterBuilder = null;
+ }
+
+ public NestedQueryBuilder(String path, FilterBuilder filterBuilder) {
+ this.path = path;
+ this.queryBuilder = null;
+ this.filterBuilder = filterBuilder;
+ }
+
+ /**
+ * The score mode.
+ */
+ public NestedQueryBuilder scoreMode(String scoreMode) {
+ this.scoreMode = scoreMode;
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public NestedQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public NestedQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(NestedQueryParser.NAME);
+ if (queryBuilder != null) {
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ } else {
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ }
+ builder.field("path", path);
+ if (scoreMode != null) {
+ builder.field("score_mode", scoreMode);
+ }
+ if (boost != 1.0f) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java b/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java
new file mode 100644
index 0000000..3f6c162
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+
+import java.io.IOException;
+
+public class NestedQueryParser implements QueryParser {
+
+ public static final String NAME = "nested";
+
+ @Inject
+ public NestedQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ boolean queryFound = false;
+ Filter filter = null;
+ boolean filterFound = false;
+ float boost = 1.0f;
+ String path = null;
+ ScoreMode scoreMode = ScoreMode.Avg;
+ String queryName = null;
+
+ // we need a late binding filter so we can inject a parent nested filter inner nested queries
+ LateBindingParentFilter currentParentFilterContext = parentFilterContext.get();
+
+ LateBindingParentFilter usAsParentFilter = new LateBindingParentFilter();
+ parentFilterContext.set(usAsParentFilter);
+
+ try {
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ queryFound = true;
+ query = parseContext.parseInnerQuery();
+ } else if ("filter".equals(currentFieldName)) {
+ filterFound = true;
+ filter = parseContext.parseInnerFilter();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[nested] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("path".equals(currentFieldName)) {
+ path = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_scope".equals(currentFieldName)) {
+ throw new QueryParsingException(parseContext.index(), "the [_scope] support in [nested] query has been removed, use nested filter as a facet_filter in the relevant facet");
+ } else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
+ String sScoreMode = parser.text();
+ if ("avg".equals(sScoreMode)) {
+ scoreMode = ScoreMode.Avg;
+ } else if ("max".equals(sScoreMode)) {
+ scoreMode = ScoreMode.Max;
+ } else if ("total".equals(sScoreMode) || "sum".equals(sScoreMode)) {
+ scoreMode = ScoreMode.Total;
+ } else if ("none".equals(sScoreMode)) {
+ scoreMode = ScoreMode.None;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "illegal score_mode for nested query [" + sScoreMode + "]");
+ }
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[nested] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound && !filterFound) {
+ throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field");
+ }
+ if (path == null) {
+ throw new QueryParsingException(parseContext.index(), "[nested] requires 'path' field");
+ }
+
+ if (query == null && filter == null) {
+ return null;
+ }
+
+ if (filter != null) {
+ query = new XConstantScoreQuery(filter);
+ }
+
+ MapperService.SmartNameObjectMapper mapper = parseContext.smartObjectMapper(path);
+ if (mapper == null) {
+ throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
+ }
+ ObjectMapper objectMapper = mapper.mapper();
+ if (objectMapper == null) {
+ throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
+ }
+ if (!objectMapper.nested().isNested()) {
+ throw new QueryParsingException(parseContext.index(), "[nested] nested object under path [" + path + "] is not of nested type");
+ }
+
+ Filter childFilter = parseContext.cacheFilter(objectMapper.nestedTypeFilter(), null);
+ usAsParentFilter.filter = childFilter;
+ // wrap the child query to only work on the nested path type
+ query = new XFilteredQuery(query, childFilter);
+
+ Filter parentFilter = currentParentFilterContext;
+ if (parentFilter == null) {
+ parentFilter = NonNestedDocsFilter.INSTANCE;
+ // don't do special parent filtering, since we might have same nested mapping on two different types
+ //if (mapper.hasDocMapper()) {
+ // // filter based on the type...
+ // parentFilter = mapper.docMapper().typeFilter();
+ //}
+ parentFilter = parseContext.cacheFilter(parentFilter, null);
+ }
+
+ ToParentBlockJoinQuery joinQuery = new ToParentBlockJoinQuery(query, parentFilter, scoreMode);
+ joinQuery.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, joinQuery);
+ }
+ return joinQuery;
+ } finally {
+ // restore the thread local one...
+ parentFilterContext.set(currentParentFilterContext);
+ }
+ }
+
+ static ThreadLocal<LateBindingParentFilter> parentFilterContext = new ThreadLocal<LateBindingParentFilter>();
+
+ static class LateBindingParentFilter extends Filter {
+
+ Filter filter;
+
+ @Override
+ public int hashCode() {
+ return filter.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return filter.equals(obj);
+ }
+
+ @Override
+ public String toString() {
+ return filter.toString();
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext ctx, Bits liveDocs) throws IOException {
+ //LUCENE 4 UPGRADE just passing on ctx and live docs here
+ return filter.getDocIdSet(ctx, liveDocs);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java
new file mode 100644
index 0000000..f835b02
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filter that matches documents matching boolean combinations of other filters.
+ *
+ *
+ */
+public class NotFilterBuilder extends BaseFilterBuilder {
+
+ private FilterBuilder filter;
+
+ private Boolean cache;
+
+ private String filterName;
+
+ public NotFilterBuilder(FilterBuilder filter) {
+ this.filter = filter;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public NotFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public NotFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(NotFilterParser.NAME);
+ builder.field("filter");
+ filter.toXContent(builder, params);
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/NotFilterParser.java b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java
new file mode 100644
index 0000000..1f13aa0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NotFilterParser implements FilterParser {
+
+ public static final String NAME = "not";
+
+ @Inject
+ public NotFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Filter filter = null;
+ boolean filterFound = false;
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("filter".equals(currentFieldName)) {
+ filter = parseContext.parseInnerFilter();
+ filterFound = true;
+ } else {
+ filterFound = true;
+ // its the filter, and the name is the field
+ filter = parseContext.parseInnerFilter(currentFieldName);
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ filterFound = true;
+ // its the filter, and the name is the field
+ filter = parseContext.parseInnerFilter(currentFieldName);
+ } else if (token.isValue()) {
+ if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[not] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (!filterFound) {
+ throw new QueryParsingException(parseContext.index(), "filter is required when using `not` filter");
+ }
+
+ if (filter == null) {
+ return null;
+ }
+
+ Filter notFilter = new NotFilter(filter);
+ if (cache) {
+ notFilter = parseContext.cacheFilter(notFilter, cacheKey);
+ }
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, notFilter);
+ }
+ return notFilter;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/NumericRangeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/NumericRangeFilterBuilder.java
new file mode 100644
index 0000000..c29bc65
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/NumericRangeFilterBuilder.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filter that restricts search results to values that are within the given numeric range.
+ * <p/>
+ * <p>Uses the field data cache (loading all the values for the specified field into memory).
+ *
+ * @deprecated This filter will be removed at some point in time in favor for the range filter with the execution
+ * mode <code>fielddata</code>.
+ */
+@Deprecated
+public class NumericRangeFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+
+ private Object from;
+
+ private Object to;
+
+ private boolean includeLower = true;
+
+ private boolean includeUpper = true;
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ /**
+ * A filter that restricts search results to values that are within the given range.
+ *
+ * @param name The field name
+ */
+ public NumericRangeFilterBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder from(Object from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder from(int from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder from(long from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder from(float from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder from(double from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gt(Object from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gt(int from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gt(long from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gt(float from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gt(double from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gte(Object from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gte(int from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gte(long from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gte(float from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder gte(double from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder to(Object to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder to(int to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder to(long to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder to(float to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder to(double to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lt(Object to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lt(int to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lt(long to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lt(float to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lt(double to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lte(Object to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lte(int to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lte(long to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lte(float to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public NumericRangeFilterBuilder lte(double to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * Should the lower bound be included or not. Defaults to <tt>true</tt>.
+ */
+ public NumericRangeFilterBuilder includeLower(boolean includeLower) {
+ this.includeLower = includeLower;
+ return this;
+ }
+
+ /**
+ * Should the upper bound be included or not. Defaults to <tt>true</tt>.
+ */
+ public NumericRangeFilterBuilder includeUpper(boolean includeUpper) {
+ this.includeUpper = includeUpper;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public NumericRangeFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public NumericRangeFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public NumericRangeFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(NumericRangeFilterParser.NAME);
+
+ builder.startObject(name);
+ builder.field("from", from);
+ builder.field("to", to);
+ builder.field("include_lower", includeLower);
+ builder.field("include_upper", includeUpper);
+ builder.endObject();
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/NumericRangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/NumericRangeFilterParser.java
new file mode 100644
index 0000000..f84db72
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/NumericRangeFilterParser.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ *
+ */
+@Deprecated
+public class NumericRangeFilterParser implements FilterParser {
+
+ public static final String NAME = "numeric_range";
+
+ @Inject
+ public NumericRangeFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "numericRange"};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ boolean cache = false; // default to false, since its using fielddata cache
+ CacheKeyFilter.Key cacheKey = null;
+ String fieldName = null;
+ String from = null;
+ String to = null;
+ boolean includeLower = true;
+ boolean includeUpper = true;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ fieldName = currentFieldName;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("from".equals(currentFieldName)) {
+ from = parser.textOrNull();
+ } else if ("to".equals(currentFieldName)) {
+ to = parser.textOrNull();
+ } else if ("include_lower".equals(currentFieldName) || "includeLower".equals(currentFieldName)) {
+ includeLower = parser.booleanValue();
+ } else if ("include_upper".equals(currentFieldName) || "includeUpper".equals(currentFieldName)) {
+ includeUpper = parser.booleanValue();
+ } else if ("gt".equals(currentFieldName)) {
+ from = parser.textOrNull();
+ includeLower = false;
+ } else if ("gte".equals(currentFieldName) || "ge".equals(currentFieldName)) {
+ from = parser.textOrNull();
+ includeLower = true;
+ } else if ("lt".equals(currentFieldName)) {
+ to = parser.textOrNull();
+ includeUpper = false;
+ } else if ("lte".equals(currentFieldName) || "le".equals(currentFieldName)) {
+ to = parser.textOrNull();
+ includeUpper = true;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[numeric_range] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[numeric_range] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+
+ if (smartNameFieldMappers == null || !smartNameFieldMappers.hasMapper()) {
+ throw new QueryParsingException(parseContext.index(), "failed to find mapping for field [" + fieldName + "]");
+ }
+
+ FieldMapper mapper = smartNameFieldMappers.mapper();
+ if (!(mapper instanceof NumberFieldMapper)) {
+ throw new QueryParsingException(parseContext.index(), "Field [" + fieldName + "] is not a numeric type");
+ }
+ Filter filter = ((NumberFieldMapper) mapper).rangeFilter(parseContext.fieldData(), from, to, includeLower, includeUpper, parseContext);
+
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java
new file mode 100644
index 0000000..9a68776
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A filter that matches documents matching boolean combinations of other filters.
+ *
+ *
+ */
+public class OrFilterBuilder extends BaseFilterBuilder {
+
+ private ArrayList<FilterBuilder> filters = Lists.newArrayList();
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ public OrFilterBuilder(FilterBuilder... filters) {
+ for (FilterBuilder filter : filters) {
+ this.filters.add(filter);
+ }
+ }
+
+ /**
+ * Adds a filter to the list of filters to "or".
+ */
+ public OrFilterBuilder add(FilterBuilder filterBuilder) {
+ filters.add(filterBuilder);
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public OrFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public OrFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ public OrFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(OrFilterParser.NAME);
+ builder.startArray("filters");
+ for (FilterBuilder filter : filters) {
+ filter.toXContent(builder, params);
+ }
+ builder.endArray();
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/OrFilterParser.java b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java
new file mode 100644
index 0000000..28a7ce3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.OrFilter;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class OrFilterParser implements FilterParser {
+
+ public static final String NAME = "or";
+
+ @Inject
+ public OrFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ ArrayList<Filter> filters = newArrayList();
+ boolean filtersFound = false;
+
+ boolean cache = false;
+ CacheKeyFilter.Key cacheKey = null;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.START_ARRAY) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ filtersFound = true;
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ filters.add(filter);
+ }
+ }
+ } else {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("filters".equals(currentFieldName)) {
+ filtersFound = true;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ filters.add(filter);
+ }
+ }
+ } else {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ filtersFound = true;
+ Filter filter = parseContext.parseInnerFilter();
+ if (filter != null) {
+ filters.add(filter);
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[or] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ }
+
+ if (!filtersFound) {
+ throw new QueryParsingException(parseContext.index(), "[or] filter requires 'filters' to be set on it'");
+ }
+
+ if (filters.isEmpty()) {
+ return null;
+ }
+
+ // no need to cache this one
+ Filter filter = new OrFilter(filters);
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/ParsedFilter.java b/src/main/java/org/elasticsearch/index/query/ParsedFilter.java
new file mode 100644
index 0000000..abd9280
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/ParsedFilter.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.search.Filter;
+
+public class ParsedFilter {
+
+ private final Filter filter;
+ private final ImmutableMap<String, Filter> namedFilters;
+
+ public ParsedFilter(Filter filter, ImmutableMap<String, Filter> namedFilters) {
+ assert filter != null;
+ assert namedFilters != null;
+ this.filter = filter;
+ this.namedFilters = namedFilters;
+ }
+
+ public Filter filter() {
+ return filter;
+ }
+
+ public ImmutableMap<String, Filter> namedFilters() {
+ return namedFilters;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/ParsedQuery.java b/src/main/java/org/elasticsearch/index/query/ParsedQuery.java
new file mode 100644
index 0000000..0bbae92
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/ParsedQuery.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.lucene.search.Queries;
+
+/**
+ * The result of parsing a query.
+ *
+ *
+ */
+public class ParsedQuery {
+
+ private final Query query;
+
+ private final ImmutableMap<String, Filter> namedFilters;
+
+ public ParsedQuery(Query query, ImmutableMap<String, Filter> namedFilters) {
+ this.query = query;
+ this.namedFilters = namedFilters;
+ }
+
+ public ParsedQuery(Query query, ParsedQuery parsedQuery) {
+ this.query = query;
+ this.namedFilters = parsedQuery.namedFilters;
+ }
+
+ /**
+ * The query parsed.
+ */
+ public Query query() {
+ return this.query;
+ }
+
+ public ImmutableMap<String, Filter> namedFilters() {
+ return this.namedFilters;
+ }
+
+ public static ParsedQuery parsedMatchAllQuery() {
+ return new ParsedQuery(Queries.newMatchAllQuery(), ImmutableMap.<String, Filter>of());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java
new file mode 100644
index 0000000..bb41e4f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filter that restricts search results to values that have a matching prefix in a given
+ * field.
+ *
+ *
+ */
+public class PrefixFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+
+ private final String prefix;
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ /**
+ * A filter that restricts search results to values that have a matching prefix in a given
+ * field.
+ *
+ * @param name The field name
+ * @param prefix The prefix
+ */
+ public PrefixFilterBuilder(String name, String prefix) {
+ this.name = name;
+ this.prefix = prefix;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public PrefixFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public PrefixFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public PrefixFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(PrefixFilterParser.NAME);
+ builder.field(name, prefix);
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java
new file mode 100644
index 0000000..bcd2ba6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.PrefixFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ *
+ */
+public class PrefixFilterParser implements FilterParser {
+
+ public static final String NAME = "prefix";
+
+ @Inject
+ public PrefixFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ boolean cache = true;
+ CacheKeyFilter.Key cacheKey = null;
+ String fieldName = null;
+ Object value = null;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ fieldName = currentFieldName;
+ value = parser.objectBytes();
+ }
+ }
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for prefix filter");
+ }
+
+ Filter filter = null;
+
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
+ try {
+ filter = smartNameFieldMappers.mapper().prefixFilter(value, parseContext);
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ filter = smartNameFieldMappers.mapper().prefixFilter(value, parseContext);
+ }
+ }
+ if (filter == null) {
+ filter = new PrefixFilter(new Term(fieldName, BytesRefs.toBytesRef(value)));
+ }
+
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+
+ filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java
new file mode 100644
index 0000000..f4f0cf3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A Query that matches documents containing terms with a specified prefix.
+ *
+ *
+ */
+public class PrefixQueryBuilder extends BaseQueryBuilder implements MultiTermQueryBuilder, BoostableQueryBuilder<PrefixQueryBuilder> {
+
+ private final String name;
+
+ private final String prefix;
+
+ private float boost = -1;
+
+ private String rewrite;
+
+ private String queryName;
+
+ /**
+ * A Query that matches documents containing terms with a specified prefix.
+ *
+ * @param name The name of the field
+ * @param prefix The prefix query
+ */
+ public PrefixQueryBuilder(String name, String prefix) {
+ this.name = name;
+ this.prefix = prefix;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public PrefixQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ public PrefixQueryBuilder rewrite(String rewrite) {
+ this.rewrite = rewrite;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public PrefixQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(PrefixQueryParser.NAME);
+ if (boost == -1 && rewrite == null && queryName != null) {
+ builder.field(name, prefix);
+ } else {
+ builder.startObject(name);
+ builder.field("prefix", prefix);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (rewrite != null) {
+ builder.field("rewrite", rewrite);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java b/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java
new file mode 100644
index 0000000..30f5877
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.support.QueryParsers;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ *
+ */
+public class PrefixQueryParser implements QueryParser {
+
+ public static final String NAME = "prefix";
+
+ @Inject
+ public PrefixQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[prefix] query malformed, no field");
+ }
+ String fieldName = parser.currentName();
+ String rewriteMethod = null;
+ String queryName = null;
+
+ Object value = null;
+ float boost = 1.0f;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("prefix".equals(currentFieldName)) {
+ value = parser.objectBytes();
+ } else if ("value".equals(currentFieldName)) {
+ value = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("rewrite".equals(currentFieldName)) {
+ rewriteMethod = parser.textOrNull();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[prefix] query does not support [" + currentFieldName + "]");
+ }
+ }
+ parser.nextToken();
+ } else {
+ value = parser.text();
+ parser.nextToken();
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for prefix query");
+ }
+
+ MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewriteMethod, null);
+
+ Query query = null;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
+ try {
+ query = smartNameFieldMappers.mapper().prefixQuery(value, method, parseContext);
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ query = smartNameFieldMappers.mapper().prefixQuery(value, method, parseContext);
+ }
+ }
+ if (query == null) {
+ PrefixQuery prefixQuery = new PrefixQuery(new Term(fieldName, BytesRefs.toBytesRef(value)));
+ if (method != null) {
+ prefixQuery.setRewriteMethod(method);
+ }
+ query = prefixQuery;
+ }
+ query.setBoost(boost);
+ query = wrapSmartNameQuery(query, smartNameFieldMappers, parseContext);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/QueryBuilder.java b/src/main/java/org/elasticsearch/index/query/QueryBuilder.java
new file mode 100644
index 0000000..c4d3588
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryBuilder.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentType;
+
+/**
+ *
+ */
+public interface QueryBuilder extends ToXContent {
+
+ BytesReference buildAsBytes() throws ElasticsearchException;
+
+ BytesReference buildAsBytes(XContentType contentType) throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/src/main/java/org/elasticsearch/index/query/QueryBuilders.java
new file mode 100644
index 0000000..e63f9da
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryBuilders.java
@@ -0,0 +1,787 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
+
+import java.util.Collection;
+
+/**
+ * A static factory for simple "import static" usage.
+ */
+public abstract class QueryBuilders {
+
+ /**
+ * A query that match on all documents.
+ */
+ public static MatchAllQueryBuilder matchAllQuery() {
+ return new MatchAllQueryBuilder();
+ }
+
+ /**
+ * Creates a match query with type "BOOLEAN" for the provided field name and text.
+ *
+ * @param name The field name.
+ * @param text The query text (to be analyzed).
+ */
+ public static MatchQueryBuilder matchQuery(String name, Object text) {
+ return new MatchQueryBuilder(name, text).type(MatchQueryBuilder.Type.BOOLEAN);
+ }
+
+ /**
+ * Creates a common query for the provided field name and text.
+ *
+ * @param name The field name.
+ * @param text The query text (to be analyzed).
+ */
+ public static CommonTermsQueryBuilder commonTerms(String name, Object text) {
+ return new CommonTermsQueryBuilder(name, text);
+ }
+
+ /**
+ * Creates a match query with type "BOOLEAN" for the provided field name and text.
+ *
+ * @param fieldNames The field names.
+ * @param text The query text (to be analyzed).
+ */
+ public static MultiMatchQueryBuilder multiMatchQuery(Object text, String... fieldNames) {
+ return new MultiMatchQueryBuilder(text, fieldNames); // BOOLEAN is the default
+ }
+
+ /**
+ * Creates a text query with type "PHRASE" for the provided field name and text.
+ *
+ * @param name The field name.
+ * @param text The query text (to be analyzed).
+ * @deprecated use {@link #textPhraseQuery(String, Object)} instead
+ */
+ public static MatchQueryBuilder textPhrase(String name, Object text) {
+ return textPhraseQuery(name, text);
+ }
+
+ /**
+ * Creates a text query with type "PHRASE" for the provided field name and text.
+ *
+ * @param name The field name.
+ * @param text The query text (to be analyzed).
+ * @deprecated Use {@link #matchPhraseQuery(String, Object)}
+ */
+ public static MatchQueryBuilder textPhraseQuery(String name, Object text) {
+ return new MatchQueryBuilder(name, text).type(MatchQueryBuilder.Type.PHRASE);
+ }
+
+ /**
+ * Creates a text query with type "PHRASE" for the provided field name and text.
+ *
+ * @param name The field name.
+ * @param text The query text (to be analyzed).
+ */
+ public static MatchQueryBuilder matchPhraseQuery(String name, Object text) {
+ return new MatchQueryBuilder(name, text).type(MatchQueryBuilder.Type.PHRASE);
+ }
+
+ /**
+ * Creates a text query with type "PHRASE_PREFIX" for the provided field name and text.
+ *
+ * @param name The field name.
+ * @param text The query text (to be analyzed).
+ * @deprecated use {@link #textPhrasePrefixQuery(String, Object)} instead
+ */
+ public static MatchQueryBuilder textPhrasePrefix(String name, Object text) {
+ return textPhrasePrefixQuery(name, text);
+ }
+
+ /**
+ * Creates a text query with type "PHRASE_PREFIX" for the provided field name and text.
+ *
+ * @param name The field name.
+ * @param text The query text (to be analyzed).
+ * @deprecated Use {@link #matchPhrasePrefixQuery(String, Object)}
+ */
+ public static MatchQueryBuilder textPhrasePrefixQuery(String name, Object text) {
+ return new MatchQueryBuilder(name, text).type(MatchQueryBuilder.Type.PHRASE_PREFIX);
+ }
+
+ /**
+ * Creates a match query with type "PHRASE_PREFIX" for the provided field name and text.
+ *
+ * @param name The field name.
+ * @param text The query text (to be analyzed).
+ */
+ public static MatchQueryBuilder matchPhrasePrefixQuery(String name, Object text) {
+ return new MatchQueryBuilder(name, text).type(MatchQueryBuilder.Type.PHRASE_PREFIX);
+ }
+
+ /**
+ * A query that generates the union of documents produced by its sub-queries, and that scores each document
+ * with the maximum score for that document as produced by any sub-query, plus a tie breaking increment for any
+ * additional matching sub-queries.
+ */
+ public static DisMaxQueryBuilder disMaxQuery() {
+ return new DisMaxQueryBuilder();
+ }
+
+ /**
+ * Constructs a query that will match only specific ids within types.
+ *
+ * @param types The mapping/doc type
+ */
+ public static IdsQueryBuilder idsQuery(@Nullable String... types) {
+ return new IdsQueryBuilder(types);
+ }
+
+ /**
+ * A Query that matches documents containing a term.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public static TermQueryBuilder termQuery(String name, String value) {
+ return new TermQueryBuilder(name, value);
+ }
+
+ /**
+ * A Query that matches documents containing a term.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public static TermQueryBuilder termQuery(String name, int value) {
+ return new TermQueryBuilder(name, value);
+ }
+
+ /**
+ * A Query that matches documents containing a term.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public static TermQueryBuilder termQuery(String name, long value) {
+ return new TermQueryBuilder(name, value);
+ }
+
+ /**
+ * A Query that matches documents containing a term.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public static TermQueryBuilder termQuery(String name, float value) {
+ return new TermQueryBuilder(name, value);
+ }
+
+ /**
+ * A Query that matches documents containing a term.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public static TermQueryBuilder termQuery(String name, double value) {
+ return new TermQueryBuilder(name, value);
+ }
+
+ /**
+ * A Query that matches documents containing a term.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public static TermQueryBuilder termQuery(String name, boolean value) {
+ return new TermQueryBuilder(name, value);
+ }
+
+ /**
+ * A Query that matches documents containing a term.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public static TermQueryBuilder termQuery(String name, Object value) {
+ return new TermQueryBuilder(name, value);
+ }
+
+ /**
+ * A Query that matches documents using fuzzy query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public static FuzzyQueryBuilder fuzzyQuery(String name, String value) {
+ return new FuzzyQueryBuilder(name, value);
+ }
+
+ /**
+ * A Query that matches documents using fuzzy query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public static FuzzyQueryBuilder fuzzyQuery(String name, Object value) {
+ return new FuzzyQueryBuilder(name, value);
+ }
+
+ /**
+ * A Query that matches documents containing terms with a specified prefix.
+ *
+ * @param name The name of the field
+ * @param prefix The prefix query
+ */
+ public static PrefixQueryBuilder prefixQuery(String name, String prefix) {
+ return new PrefixQueryBuilder(name, prefix);
+ }
+
+ /**
+ * A Query that matches documents within an range of terms.
+ *
+ * @param name The field name
+ */
+ public static RangeQueryBuilder rangeQuery(String name) {
+ return new RangeQueryBuilder(name);
+ }
+
+ /**
+ * Implements the wildcard search query. Supported wildcards are <tt>*</tt>, which
+ * matches any character sequence (including the empty one), and <tt>?</tt>,
+ * which matches any single character. Note this query can be slow, as it
+ * needs to iterate over many terms. In order to prevent extremely slow WildcardQueries,
+ * a Wildcard term should not start with one of the wildcards <tt>*</tt> or
+ * <tt>?</tt>.
+ *
+ * @param name The field name
+ * @param query The wildcard query string
+ */
+ public static WildcardQueryBuilder wildcardQuery(String name, String query) {
+ return new WildcardQueryBuilder(name, query);
+ }
+
+
+ /**
+ * A Query that matches documents containing terms with a specified regular expression.
+ *
+ * @param name The name of the field
+ * @param regexp The regular expression
+ */
+ public static RegexpQueryBuilder regexpQuery(String name, String regexp) {
+ return new RegexpQueryBuilder(name, regexp);
+ }
+
+ /**
+ * A query that parses a query string and runs it. There are two modes that this operates. The first,
+ * when no field is added (using {@link QueryStringQueryBuilder#field(String)}, will run the query once and non prefixed fields
+ * will use the {@link QueryStringQueryBuilder#defaultField(String)} set. The second, when one or more fields are added
+ * (using {@link QueryStringQueryBuilder#field(String)}), will run the parsed query against the provided fields, and combine
+ * them either using DisMax or a plain boolean query (see {@link QueryStringQueryBuilder#useDisMax(boolean)}).
+ *
+ * @param queryString The query string to run
+ */
+ public static QueryStringQueryBuilder queryString(String queryString) {
+ return new QueryStringQueryBuilder(queryString);
+ }
+
+ /**
+ * A query that acts similar to a query_string query, but won't throw
+ * exceptions for any weird string syntax. See
+ * {@link org.apache.lucene.queryparser.XSimpleQueryParser} for the full
+ * supported syntax.
+ */
+ public static SimpleQueryStringBuilder simpleQueryString(String queryString) {
+ return new SimpleQueryStringBuilder(queryString);
+ }
+
+ /**
+ * The BoostingQuery class can be used to effectively demote results that match a given query.
+ * Unlike the "NOT" clause, this still selects documents that contain undesirable terms,
+ * but reduces their overall score:
+ */
+ public static BoostingQueryBuilder boostingQuery() {
+ return new BoostingQueryBuilder();
+ }
+
+ /**
+ * A Query that matches documents matching boolean combinations of other queries.
+ */
+ public static BoolQueryBuilder boolQuery() {
+ return new BoolQueryBuilder();
+ }
+
+ public static SpanTermQueryBuilder spanTermQuery(String name, String value) {
+ return new SpanTermQueryBuilder(name, value);
+ }
+
+ public static SpanTermQueryBuilder spanTermQuery(String name, int value) {
+ return new SpanTermQueryBuilder(name, value);
+ }
+
+ public static SpanTermQueryBuilder spanTermQuery(String name, long value) {
+ return new SpanTermQueryBuilder(name, value);
+ }
+
+ public static SpanTermQueryBuilder spanTermQuery(String name, float value) {
+ return new SpanTermQueryBuilder(name, value);
+ }
+
+ public static SpanTermQueryBuilder spanTermQuery(String name, double value) {
+ return new SpanTermQueryBuilder(name, value);
+ }
+
+ public static SpanFirstQueryBuilder spanFirstQuery(SpanQueryBuilder match, int end) {
+ return new SpanFirstQueryBuilder(match, end);
+ }
+
+ public static SpanNearQueryBuilder spanNearQuery() {
+ return new SpanNearQueryBuilder();
+ }
+
+ public static SpanNotQueryBuilder spanNotQuery() {
+ return new SpanNotQueryBuilder();
+ }
+
+ public static SpanOrQueryBuilder spanOrQuery() {
+ return new SpanOrQueryBuilder();
+ }
+
+ /**
+ * Creates a {@link SpanQueryBuilder} which allows having a sub query
+ * which implements {@link MultiTermQueryBuilder}. This is useful for
+ * having e.g. wildcard or fuzzy queries inside spans.
+ *
+ * @param multiTermQueryBuilder The {@link MultiTermQueryBuilder} that
+ * backs the created builder.
+ * @return
+ */
+
+ public static SpanMultiTermQueryBuilder spanMultiTermQueryBuilder(MultiTermQueryBuilder multiTermQueryBuilder){
+ return new SpanMultiTermQueryBuilder(multiTermQueryBuilder);
+ }
+
+ public static FieldMaskingSpanQueryBuilder fieldMaskingSpanQuery(SpanQueryBuilder query, String field) {
+ return new FieldMaskingSpanQueryBuilder(query, field);
+ }
+
+ /**
+ * A query that applies a filter to the results of another query.
+ *
+ * @param queryBuilder The query to apply the filter to
+ * @param filterBuilder The filter to apply on the query
+ * @deprecated Use filteredQuery instead (rename)
+ */
+ public static FilteredQueryBuilder filtered(QueryBuilder queryBuilder, @Nullable FilterBuilder filterBuilder) {
+ return new FilteredQueryBuilder(queryBuilder, filterBuilder);
+ }
+
+ /**
+ * A query that applies a filter to the results of another query.
+ *
+ * @param queryBuilder The query to apply the filter to
+ * @param filterBuilder The filter to apply on the query
+ */
+ public static FilteredQueryBuilder filteredQuery(QueryBuilder queryBuilder, @Nullable FilterBuilder filterBuilder) {
+ return new FilteredQueryBuilder(queryBuilder, filterBuilder);
+ }
+
+ /**
+ * A query that wraps a filter and simply returns a constant score equal to the
+ * query boost for every document in the filter.
+ *
+ * @param filterBuilder The filter to wrap in a constant score query
+ */
+ public static ConstantScoreQueryBuilder constantScoreQuery(FilterBuilder filterBuilder) {
+ return new ConstantScoreQueryBuilder(filterBuilder);
+ }
+
+ /**
+ * A query that wraps another query and simply returns a constant score equal to the
+ * query boost for every document in the query.
+ *
+ * @param queryBuilder The query to wrap in a constant score query
+ */
+ public static ConstantScoreQueryBuilder constantScoreQuery(QueryBuilder queryBuilder) {
+ return new ConstantScoreQueryBuilder(queryBuilder);
+ }
+
+ /**
+ * A query that simply applies the boost fact to the wrapped query (multiplies it).
+ *
+ * @param queryBuilder The query to apply the boost factor to.
+ * @deprecated use {@link #functionScoreQuery(QueryBuilder)} instead
+ */
+ public static CustomBoostFactorQueryBuilder customBoostFactorQuery(QueryBuilder queryBuilder) {
+ return new CustomBoostFactorQueryBuilder(queryBuilder);
+ }
+
+ /**
+ * A query that allows to define a custom scoring script.
+ *
+ * @param queryBuilder The query to custom score
+ * @deprecated use {@link #functionScoreQuery(QueryBuilder)} instead
+ */
+ public static CustomScoreQueryBuilder customScoreQuery(QueryBuilder queryBuilder) {
+ return new CustomScoreQueryBuilder(queryBuilder);
+ }
+
+ /**
+ * A query that allows to define a custom scoring script, that defines the score for each document that match
+ * with the specified filter.
+ *
+ * @param filterBuilder The filter that defines which documents are scored by a script.
+ * @deprecated use {@link #functionScoreQuery(QueryBuilder)} instead
+ */
+ public static CustomScoreQueryBuilder customScoreQuery(FilterBuilder filterBuilder) {
+ return new CustomScoreQueryBuilder(filterBuilder);
+ }
+
+ /**
+ * @deprecated use {@link #functionScoreQuery(QueryBuilder)} instead
+ */
+ public static CustomFiltersScoreQueryBuilder customFiltersScoreQuery(QueryBuilder queryBuilder) {
+ return new CustomFiltersScoreQueryBuilder(queryBuilder);
+ }
+
+ /**
+ * A query that allows to define a custom scoring function.
+ *
+ * @param queryBuilder The query to custom score
+ */
+ public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder) {
+ return new FunctionScoreQueryBuilder(queryBuilder);
+ }
+
+ /**
+ * A query that allows to define a custom scoring function.
+ *
+ */
+ public static FunctionScoreQueryBuilder functionScoreQuery() {
+ return new FunctionScoreQueryBuilder();
+ }
+
+ /**
+ * A query that allows to define a custom scoring function.
+ *
+ * @param function The function builder used to custom score
+ */
+ public static FunctionScoreQueryBuilder functionScoreQuery(ScoreFunctionBuilder function) {
+ return new FunctionScoreQueryBuilder(function);
+ }
+
+ /**
+ * A query that allows to define a custom scoring function.
+ *
+ * @param queryBuilder The query to custom score
+ * @param function The function builder used to custom score
+ */
+ public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, ScoreFunctionBuilder function) {
+ return (new FunctionScoreQueryBuilder(queryBuilder)).add(function);
+ }
+
+ /**
+ * A query that allows to define a custom scoring function.
+ *
+ * @param filterBuilder The query to custom score
+ * @param function The function builder used to custom score
+ */
+ public static FunctionScoreQueryBuilder functionScoreQuery(FilterBuilder filterBuilder, ScoreFunctionBuilder function) {
+ return (new FunctionScoreQueryBuilder(filterBuilder)).add(function);
+ }
+
+ /**
+ * A query that allows to define a custom scoring function.
+ *
+ * @param filterBuilder The filterBuilder to custom score
+ */
+ public static FunctionScoreQueryBuilder functionScoreQuery(FilterBuilder filterBuilder) {
+ return new FunctionScoreQueryBuilder(filterBuilder);
+ }
+ /**
+ * A more like this query that finds documents that are "like" the provided {@link MoreLikeThisQueryBuilder#likeText(String)}
+ * which is checked against the fields the query is constructed with.
+ *
+ * @param fields The fields to run the query against
+ */
+ public static MoreLikeThisQueryBuilder moreLikeThisQuery(String... fields) {
+ return new MoreLikeThisQueryBuilder(fields);
+ }
+
+ /**
+ * A more like this query that finds documents that are "like" the provided {@link MoreLikeThisQueryBuilder#likeText(String)}
+ * which is checked against the "_all" field.
+ */
+ public static MoreLikeThisQueryBuilder moreLikeThisQuery() {
+ return new MoreLikeThisQueryBuilder();
+ }
+
+ /**
+ * A fuzzy like this query that finds documents that are "like" the provided {@link FuzzyLikeThisQueryBuilder#likeText(String)}
+ * which is checked against the fields the query is constructed with.
+ *
+ * @param fields The fields to run the query against
+ */
+ public static FuzzyLikeThisQueryBuilder fuzzyLikeThisQuery(String... fields) {
+ return new FuzzyLikeThisQueryBuilder(fields);
+ }
+
+ /**
+ * A fuzzy like this query that finds documents that are "like" the provided {@link FuzzyLikeThisQueryBuilder#likeText(String)}
+ * which is checked against the "_all" field.
+ */
+ public static FuzzyLikeThisQueryBuilder fuzzyLikeThisQuery() {
+ return new FuzzyLikeThisQueryBuilder();
+ }
+
+ /**
+ * A fuzzy like this query that finds documents that are "like" the provided {@link FuzzyLikeThisFieldQueryBuilder#likeText(String)}.
+ */
+ public static FuzzyLikeThisFieldQueryBuilder fuzzyLikeThisFieldQuery(String name) {
+ return new FuzzyLikeThisFieldQueryBuilder(name);
+ }
+
+ /**
+ * A more like this query that runs against a specific field.
+ *
+ * @param name The field name
+ */
+ public static MoreLikeThisFieldQueryBuilder moreLikeThisFieldQuery(String name) {
+ return new MoreLikeThisFieldQueryBuilder(name);
+ }
+
+ /**
+ * Constructs a new scoring child query, with the child type and the query to run on the child documents. The
+ * results of this query are the parent docs that those child docs matched.
+ *
+ * @param type The child type.
+ * @param query The query.
+ */
+ public static TopChildrenQueryBuilder topChildrenQuery(String type, QueryBuilder query) {
+ return new TopChildrenQueryBuilder(type, query);
+ }
+
+ /**
+ * Constructs a new NON scoring child query, with the child type and the query to run on the child documents. The
+ * results of this query are the parent docs that those child docs matched.
+ *
+ * @param type The child type.
+ * @param query The query.
+ */
+ public static HasChildQueryBuilder hasChildQuery(String type, QueryBuilder query) {
+ return new HasChildQueryBuilder(type, query);
+ }
+
+ /**
+ * Constructs a new NON scoring parent query, with the parent type and the query to run on the parent documents. The
+ * results of this query are the children docs that those parent docs matched.
+ *
+ * @param type The parent type.
+ * @param query The query.
+ */
+ public static HasParentQueryBuilder hasParentQuery(String type, QueryBuilder query) {
+ return new HasParentQueryBuilder(type, query);
+ }
+
+ public static NestedQueryBuilder nestedQuery(String path, QueryBuilder query) {
+ return new NestedQueryBuilder(path, query);
+ }
+
+ public static NestedQueryBuilder nestedQuery(String path, FilterBuilder filter) {
+ return new NestedQueryBuilder(path, filter);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder termsQuery(String name, String... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder termsQuery(String name, int... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder termsQuery(String name, long... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder termsQuery(String name, float... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder termsQuery(String name, double... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder termsQuery(String name, Object... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder termsQuery(String name, Collection<?> values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder inQuery(String name, String... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder inQuery(String name, int... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder inQuery(String name, long... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder inQuery(String name, float... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder inQuery(String name, double... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder inQuery(String name, Object... values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public static TermsQueryBuilder inQuery(String name, Collection<?> values) {
+ return new TermsQueryBuilder(name, values);
+ }
+
+ /**
+ * A query that will execute the wrapped query only for the specified indices, and "match_all" when
+ * it does not match those indices.
+ */
+ public static IndicesQueryBuilder indicesQuery(QueryBuilder queryBuilder, String... indices) {
+ return new IndicesQueryBuilder(queryBuilder, indices);
+ }
+
+ /**
+ * A Query builder which allows building a query thanks to a JSON string or binary data.
+ */
+ public static WrapperQueryBuilder wrapperQuery(String source) {
+ return new WrapperQueryBuilder(source);
+ }
+
+ /**
+ * A Query builder which allows building a query thanks to a JSON string or binary data.
+ */
+ public static WrapperQueryBuilder wrapperQuery(byte[] source, int offset, int length) {
+ return new WrapperQueryBuilder(source, offset, length);
+ }
+
+ /**
+ * Query that matches Documents based on the relationship between the given shape and
+ * indexed shapes
+ *
+ * @param name The shape field name
+ * @param shape Shape to use in the Query
+ */
+ public static GeoShapeQueryBuilder geoShapeQuery(String name, ShapeBuilder shape) {
+ return new GeoShapeQueryBuilder(name, shape);
+ }
+
+ public static GeoShapeQueryBuilder geoShapeQuery(String name, String indexedShapeId, String indexedShapeType) {
+ return new GeoShapeQueryBuilder(name, indexedShapeId, indexedShapeType);
+ }
+
+ private QueryBuilders() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java
new file mode 100644
index 0000000..cf5db0f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filter that simply wraps a query.
+ *
+ *
+ */
+public class QueryFilterBuilder extends BaseFilterBuilder {
+
+ private final QueryBuilder queryBuilder;
+
+ private Boolean cache;
+
+ private String filterName;
+
+ /**
+ * A filter that simply wraps a query.
+ *
+ * @param queryBuilder The query to wrap as a filter
+ */
+ public QueryFilterBuilder(QueryBuilder queryBuilder) {
+ this.queryBuilder = queryBuilder;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public QueryFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public QueryFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ if (filterName == null && cache == null) {
+ builder.field(QueryFilterParser.NAME);
+ queryBuilder.toXContent(builder, params);
+ } else {
+ builder.startObject(FQueryFilterParser.NAME);
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ builder.endObject();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java b/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java
new file mode 100644
index 0000000..802faf8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class QueryFilterParser implements FilterParser {
+
+ public static final String NAME = "query";
+
+ @Inject
+ public QueryFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ Query query = parseContext.parseInnerQuery();
+ if (query == null) {
+ return null;
+ }
+ return Queries.wrap(query);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java
new file mode 100644
index 0000000..d92fc15
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java
@@ -0,0 +1,353 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.lucene.queryparser.classic.MapperQueryParser;
+import org.apache.lucene.queryparser.classic.QueryParserSettings;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.similarities.Similarity;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.lucene.search.NoCacheFilter;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.engine.IndexEngine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.lookup.SearchLookup;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ *
+ */
+public class QueryParseContext {
+
+ private static ThreadLocal<String[]> typesContext = new ThreadLocal<String[]>();
+
+ public static void setTypes(String[] types) {
+ typesContext.set(types);
+ }
+
+ public static String[] getTypes() {
+ return typesContext.get();
+ }
+
+ public static String[] setTypesWithPrevious(String[] types) {
+ String[] old = typesContext.get();
+ setTypes(types);
+ return old;
+ }
+
+ public static void removeTypes() {
+ typesContext.remove();
+ }
+
+ private final Index index;
+
+ private boolean propagateNoCache = false;
+
+ IndexQueryParserService indexQueryParser;
+
+ private final Map<String, Filter> namedFilters = Maps.newHashMap();
+
+ private final MapperQueryParser queryParser = new MapperQueryParser(this);
+
+ private XContentParser parser;
+
+ private EnumSet<ParseField.Flag> parseFlags = ParseField.EMPTY_FLAGS;
+
+
+ public QueryParseContext(Index index, IndexQueryParserService indexQueryParser) {
+ this.index = index;
+ this.indexQueryParser = indexQueryParser;
+ }
+
+ public void parseFlags(EnumSet<ParseField.Flag> parseFlags) {
+ this.parseFlags = parseFlags == null ? ParseField.EMPTY_FLAGS : parseFlags;
+ }
+
+ public EnumSet<ParseField.Flag> parseFlags() {
+ return parseFlags;
+ }
+
+ public void reset(XContentParser jp) {
+ this.parseFlags = ParseField.EMPTY_FLAGS;
+ this.lookup = null;
+ this.parser = jp;
+ this.namedFilters.clear();
+ }
+
+ public Index index() {
+ return this.index;
+ }
+
+ public XContentParser parser() {
+ return parser;
+ }
+
+ public AnalysisService analysisService() {
+ return indexQueryParser.analysisService;
+ }
+
+ public CacheRecycler cacheRecycler() {
+ return indexQueryParser.cacheRecycler;
+ }
+
+ public ScriptService scriptService() {
+ return indexQueryParser.scriptService;
+ }
+
+ public MapperService mapperService() {
+ return indexQueryParser.mapperService;
+ }
+
+ public IndexEngine indexEngine() {
+ return indexQueryParser.indexEngine;
+ }
+
+ @Nullable
+ public SimilarityService similarityService() {
+ return indexQueryParser.similarityService;
+ }
+
+ public Similarity searchSimilarity() {
+ return indexQueryParser.similarityService != null ? indexQueryParser.similarityService.similarity() : null;
+ }
+
+ public IndexCache indexCache() {
+ return indexQueryParser.indexCache;
+ }
+
+ public IndexFieldDataService fieldData() {
+ return indexQueryParser.fieldDataService;
+ }
+
+ public String defaultField() {
+ return indexQueryParser.defaultField();
+ }
+
+ public boolean queryStringLenient() {
+ return indexQueryParser.queryStringLenient();
+ }
+
+ public MapperQueryParser queryParser(QueryParserSettings settings) {
+ queryParser.reset(settings);
+ return queryParser;
+ }
+
+ public Filter cacheFilter(Filter filter, @Nullable CacheKeyFilter.Key cacheKey) {
+ if (filter == null) {
+ return null;
+ }
+ if (this.propagateNoCache || filter instanceof NoCacheFilter) {
+ return filter;
+ }
+ if (cacheKey != null) {
+ filter = new CacheKeyFilter.Wrapper(filter, cacheKey);
+ }
+ return indexQueryParser.indexCache.filter().cache(filter);
+ }
+
+ public void addNamedFilter(String name, Filter filter) {
+ namedFilters.put(name, filter);
+ }
+
+ public void addNamedQuery(String name, Query query) {
+ namedFilters.put(name, Queries.wrap(query));
+ }
+
+ public ImmutableMap<String, Filter> copyNamedFilters() {
+ if (namedFilters.isEmpty()) {
+ return ImmutableMap.of();
+ }
+ return ImmutableMap.copyOf(namedFilters);
+ }
+
+ @Nullable
+ public Query parseInnerQuery() throws IOException, QueryParsingException {
+ // move to START object
+ XContentParser.Token token;
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new QueryParsingException(index, "[_na] query malformed, must start with start_object");
+ }
+ }
+ token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(index, "[_na] query malformed, no field after start_object");
+ }
+ String queryName = parser.currentName();
+ // move to the next START_OBJECT
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT && token != XContentParser.Token.START_ARRAY) {
+ throw new QueryParsingException(index, "[_na] query malformed, no field after start_object");
+ }
+
+ QueryParser queryParser = indexQueryParser.queryParser(queryName);
+ if (queryParser == null) {
+ throw new QueryParsingException(index, "No query registered for [" + queryName + "]");
+ }
+ Query result = queryParser.parse(this);
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) {
+ // if we are at END_OBJECT, move to the next one...
+ parser.nextToken();
+ }
+ return result;
+ }
+
+ @Nullable
+ public Filter parseInnerFilter() throws IOException, QueryParsingException {
+ // move to START object
+ XContentParser.Token token;
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new QueryParsingException(index, "[_na] filter malformed, must start with start_object");
+ }
+ }
+ token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ // empty filter
+ if (token == XContentParser.Token.END_OBJECT || token == XContentParser.Token.VALUE_NULL) {
+ return null;
+ }
+ throw new QueryParsingException(index, "[_na] filter malformed, no field after start_object");
+ }
+ String filterName = parser.currentName();
+ // move to the next START_OBJECT or START_ARRAY
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT && token != XContentParser.Token.START_ARRAY) {
+ throw new QueryParsingException(index, "[_na] filter malformed, no field after start_object");
+ }
+
+ FilterParser filterParser = indexQueryParser.filterParser(filterName);
+ if (filterParser == null) {
+ throw new QueryParsingException(index, "No filter registered for [" + filterName + "]");
+ }
+ Filter result = executeFilterParser(filterParser);
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) {
+ // if we are at END_OBJECT, move to the next one...
+ parser.nextToken();
+ }
+ return result;
+ }
+
+ public Filter parseInnerFilter(String filterName) throws IOException, QueryParsingException {
+ FilterParser filterParser = indexQueryParser.filterParser(filterName);
+ if (filterParser == null) {
+ throw new QueryParsingException(index, "No filter registered for [" + filterName + "]");
+ }
+ return executeFilterParser(filterParser);
+ }
+
+ private Filter executeFilterParser(FilterParser filterParser) throws IOException {
+ final boolean propagateNoCache = this.propagateNoCache; // first safe the state that we need to restore
+ this.propagateNoCache = false; // parse the subfilter with caching, that's fine
+ Filter result = filterParser.parse(this);
+ // now make sure we set propagateNoCache to true if it is true already or if the result is
+ // an instance of NoCacheFilter or if we used to be true! all filters above will
+ // be not cached ie. wrappers of this filter!
+ this.propagateNoCache |= (result instanceof NoCacheFilter) || propagateNoCache;
+ return result;
+ }
+
+ public FieldMapper fieldMapper(String name) {
+ FieldMappers fieldMappers = indexQueryParser.mapperService.smartNameFieldMappers(name, getTypes());
+ if (fieldMappers == null) {
+ return null;
+ }
+ return fieldMappers.mapper();
+ }
+
+ public String indexName(String name) {
+ FieldMapper smartMapper = fieldMapper(name);
+ if (smartMapper == null) {
+ return name;
+ }
+ return smartMapper.names().indexName();
+ }
+
+ public Set<String> simpleMatchToIndexNames(String pattern) {
+ return indexQueryParser.mapperService.simpleMatchToIndexNames(pattern, getTypes());
+ }
+
+ public MapperService.SmartNameFieldMappers smartFieldMappers(String name) {
+ return indexQueryParser.mapperService.smartName(name, getTypes());
+ }
+
+ public FieldMapper smartNameFieldMapper(String name) {
+ return indexQueryParser.mapperService.smartNameFieldMapper(name, getTypes());
+ }
+
+ public MapperService.SmartNameObjectMapper smartObjectMapper(String name) {
+ return indexQueryParser.mapperService.smartNameObjectMapper(name, getTypes());
+ }
+
+ /**
+ * Returns the narrowed down explicit types, or, if not set, all types.
+ */
+ public Collection<String> queryTypes() {
+ String[] types = getTypes();
+ if (types == null || types.length == 0) {
+ return mapperService().types();
+ }
+ if (types.length == 1 && types[0].equals("_all")) {
+ return mapperService().types();
+ }
+ return Arrays.asList(types);
+ }
+
+ private SearchLookup lookup = null;
+
+ public SearchLookup lookup() {
+ SearchContext current = SearchContext.current();
+ if (current != null) {
+ return current.lookup();
+ }
+ if (lookup == null) {
+ lookup = new SearchLookup(mapperService(), fieldData(), null);
+ }
+ return lookup;
+ }
+
+ public long nowInMillis() {
+ SearchContext current = SearchContext.current();
+ if (current != null) {
+ return current.nowInMillis();
+ }
+ return System.currentTimeMillis();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/QueryParser.java b/src/main/java/org/elasticsearch/index/query/QueryParser.java
new file mode 100644
index 0000000..eff585a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryParser.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Nullable;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public interface QueryParser {
+
+ /**
+ * The names this query parser is registered under.
+ */
+ String[] names();
+
+ /**
+ * Parses the into a query from the current parser location. Will be at "START_OBJECT" location,
+ * and should end when the token is at the matching "END_OBJECT".
+ * <p/>
+ * Returns <tt>null</tt> if this query should be ignored in the context of the DSL.
+ */
+ @Nullable
+ Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException;
+}
diff --git a/src/main/java/org/elasticsearch/index/query/QueryParserFactory.java b/src/main/java/org/elasticsearch/index/query/QueryParserFactory.java
new file mode 100644
index 0000000..fa280ff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryParserFactory.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public interface QueryParserFactory {
+
+ QueryParser create(String name, Settings settings);
+}
diff --git a/src/main/java/org/elasticsearch/index/query/QueryParsingException.java b/src/main/java/org/elasticsearch/index/query/QueryParsingException.java
new file mode 100644
index 0000000..8c3a0fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryParsingException.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class QueryParsingException extends IndexException {
+
+ public QueryParsingException(Index index, String msg) {
+ super(index, msg);
+ }
+
+ public QueryParsingException(Index index, String msg, Throwable cause) {
+ super(index, msg, cause);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
new file mode 100644
index 0000000..b01ba1e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
@@ -0,0 +1,397 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * A query that parses a query string and runs it. There are two modes that this operates. The first,
+ * when no field is added (using {@link #field(String)}, will run the query once and non prefixed fields
+ * will use the {@link #defaultField(String)} set. The second, when one or more fields are added
+ * (using {@link #field(String)}), will run the parsed query against the provided fields, and combine
+ * them either using DisMax or a plain boolean query (see {@link #useDisMax(boolean)}).
+ * <p/>
+ */
+public class QueryStringQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<QueryStringQueryBuilder> {
+
+ public static enum Operator {
+ OR,
+ AND
+ }
+
+ private final String queryString;
+
+ private String defaultField;
+
+ private Operator defaultOperator;
+
+ private String analyzer;
+ private String quoteAnalyzer;
+
+ private String quoteFieldSuffix;
+
+ private Boolean autoGeneratePhraseQueries;
+
+ private Boolean allowLeadingWildcard;
+
+ private Boolean lowercaseExpandedTerms;
+
+ private Boolean enablePositionIncrements;
+
+ private Boolean analyzeWildcard;
+
+
+ private float boost = -1;
+
+ private Fuzziness fuzziness;
+ private int fuzzyPrefixLength = -1;
+ private int fuzzyMaxExpansions = -1;
+ private String fuzzyRewrite;
+
+ private int phraseSlop = -1;
+
+ private List<String> fields;
+
+ private ObjectFloatOpenHashMap<String> fieldsBoosts;
+
+ private Boolean useDisMax;
+
+ private float tieBreaker = -1;
+
+ private String rewrite = null;
+
+ private String minimumShouldMatch;
+
+ private Boolean lenient;
+
+ private String queryName;
+
+ public QueryStringQueryBuilder(String queryString) {
+ this.queryString = queryString;
+ }
+
+ /**
+ * The default field to run against when no prefix field is specified. Only relevant when
+ * not explicitly adding fields the query string will run against.
+ */
+ public QueryStringQueryBuilder defaultField(String defaultField) {
+ this.defaultField = defaultField;
+ return this;
+ }
+
+ /**
+ * Adds a field to run the query string against.
+ */
+ public QueryStringQueryBuilder field(String field) {
+ if (fields == null) {
+ fields = newArrayList();
+ }
+ fields.add(field);
+ return this;
+ }
+
+ /**
+ * Adds a field to run the query string against with a specific boost.
+ */
+ public QueryStringQueryBuilder field(String field, float boost) {
+ if (fields == null) {
+ fields = newArrayList();
+ }
+ fields.add(field);
+ if (fieldsBoosts == null) {
+ fieldsBoosts = new ObjectFloatOpenHashMap<String>();
+ }
+ fieldsBoosts.put(field, boost);
+ return this;
+ }
+
+ /**
+ * When more than one field is used with the query string, should queries be combined using
+ * dis max, or boolean query. Defaults to dis max (<tt>true</tt>).
+ */
+ public QueryStringQueryBuilder useDisMax(boolean useDisMax) {
+ this.useDisMax = useDisMax;
+ return this;
+ }
+
+ /**
+ * When more than one field is used with the query string, and combined queries are using
+ * dis max, control the tie breaker for it.
+ */
+ public QueryStringQueryBuilder tieBreaker(float tieBreaker) {
+ this.tieBreaker = tieBreaker;
+ return this;
+ }
+
+ /**
+ * Sets the boolean operator of the query parser used to parse the query string.
+ * <p/>
+ * <p>In default mode ({@link FieldQueryBuilder.Operator#OR}) terms without any modifiers
+ * are considered optional: for example <code>capital of Hungary</code> is equal to
+ * <code>capital OR of OR Hungary</code>.
+ * <p/>
+ * <p>In {@link FieldQueryBuilder.Operator#AND} mode terms are considered to be in conjunction: the
+ * above mentioned query is parsed as <code>capital AND of AND Hungary</code>
+ */
+ public QueryStringQueryBuilder defaultOperator(Operator defaultOperator) {
+ this.defaultOperator = defaultOperator;
+ return this;
+ }
+
+ /**
+ * The optional analyzer used to analyze the query string. Note, if a field has search analyzer
+ * defined for it, then it will be used automatically. Defaults to the smart search analyzer.
+ */
+ public QueryStringQueryBuilder analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ /**
+ * The optional analyzer used to analyze the query string for phrase searches. Note, if a field has search (quote) analyzer
+ * defined for it, then it will be used automatically. Defaults to the smart search analyzer.
+ */
+ public QueryStringQueryBuilder quoteAnalyzer(String analyzer) {
+ this.quoteAnalyzer = analyzer;
+ return this;
+ }
+
+
+ /**
+ * Set to true if phrase queries will be automatically generated
+ * when the analyzer returns more than one term from whitespace
+ * delimited text.
+ * NOTE: this behavior may not be suitable for all languages.
+ * <p/>
+ * Set to false if phrase queries should only be generated when
+ * surrounded by double quotes.
+ */
+ public QueryStringQueryBuilder autoGeneratePhraseQueries(boolean autoGeneratePhraseQueries) {
+ this.autoGeneratePhraseQueries = autoGeneratePhraseQueries;
+ return this;
+ }
+
+ /**
+ * Should leading wildcards be allowed or not. Defaults to <tt>true</tt>.
+ */
+ public QueryStringQueryBuilder allowLeadingWildcard(boolean allowLeadingWildcard) {
+ this.allowLeadingWildcard = allowLeadingWildcard;
+ return this;
+ }
+
+ /**
+ * Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically
+ * lower-cased or not. Default is <tt>true</tt>.
+ */
+ public QueryStringQueryBuilder lowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
+ this.lowercaseExpandedTerms = lowercaseExpandedTerms;
+ return this;
+ }
+
+ /**
+ * Set to <tt>true</tt> to enable position increments in result query. Defaults to
+ * <tt>true</tt>.
+ * <p/>
+ * <p>When set, result phrase and multi-phrase queries will be aware of position increments.
+ * Useful when e.g. a StopFilter increases the position increment of the token that follows an omitted token.
+ */
+ public QueryStringQueryBuilder enablePositionIncrements(boolean enablePositionIncrements) {
+ this.enablePositionIncrements = enablePositionIncrements;
+ return this;
+ }
+
+ /**
+ * Set the edit distance for fuzzy queries. Default is "AUTO".
+ */
+ public QueryStringQueryBuilder fuzziness(Fuzziness fuzziness) {
+ this.fuzziness = fuzziness;
+ return this;
+ }
+
+ /**
+ * Set the minimum prefix length for fuzzy queries. Default is 1.
+ */
+ public QueryStringQueryBuilder fuzzyPrefixLength(int fuzzyPrefixLength) {
+ this.fuzzyPrefixLength = fuzzyPrefixLength;
+ return this;
+ }
+
+ public QueryStringQueryBuilder fuzzyMaxExpansions(int fuzzyMaxExpansions) {
+ this.fuzzyMaxExpansions = fuzzyMaxExpansions;
+ return this;
+ }
+
+ public QueryStringQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
+ this.fuzzyRewrite = fuzzyRewrite;
+ return this;
+ }
+
+ /**
+ * Sets the default slop for phrases. If zero, then exact phrase matches
+ * are required. Default value is zero.
+ */
+ public QueryStringQueryBuilder phraseSlop(int phraseSlop) {
+ this.phraseSlop = phraseSlop;
+ return this;
+ }
+
+ /**
+ * Set to <tt>true</tt> to enable analysis on wildcard and prefix queries.
+ */
+ public QueryStringQueryBuilder analyzeWildcard(boolean analyzeWildcard) {
+ this.analyzeWildcard = analyzeWildcard;
+ return this;
+ }
+
+ public QueryStringQueryBuilder rewrite(String rewrite) {
+ this.rewrite = rewrite;
+ return this;
+ }
+
+ public QueryStringQueryBuilder minimumShouldMatch(String minimumShouldMatch) {
+ this.minimumShouldMatch = minimumShouldMatch;
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public QueryStringQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * An optional field name suffix to automatically try and add to the field searched when using quoted text.
+ */
+ public QueryStringQueryBuilder quoteFieldSuffix(String quoteFieldSuffix) {
+ this.quoteFieldSuffix = quoteFieldSuffix;
+ return this;
+ }
+
+ /**
+ * Sets the query string parser to be lenient when parsing field values, defaults to the index
+ * setting and if not set, defaults to false.
+ */
+ public QueryStringQueryBuilder lenient(Boolean lenient) {
+ this.lenient = lenient;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public QueryStringQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(QueryStringQueryParser.NAME);
+ builder.field("query", queryString);
+ if (defaultField != null) {
+ builder.field("default_field", defaultField);
+ }
+ if (fields != null) {
+ builder.startArray("fields");
+ for (String field : fields) {
+ if (fieldsBoosts != null && fieldsBoosts.containsKey(field)) {
+ field += "^" + fieldsBoosts.get(field);
+ }
+ builder.value(field);
+ }
+ builder.endArray();
+ }
+ if (useDisMax != null) {
+ builder.field("use_dis_max", useDisMax);
+ }
+ if (tieBreaker != -1) {
+ builder.field("tie_breaker", tieBreaker);
+ }
+ if (defaultOperator != null) {
+ builder.field("default_operator", defaultOperator.name().toLowerCase(Locale.ROOT));
+ }
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+ if (quoteAnalyzer != null) {
+ builder.field("quote_analyzer", quoteAnalyzer);
+ }
+ if (autoGeneratePhraseQueries != null) {
+ builder.field("auto_generate_phrase_queries", autoGeneratePhraseQueries);
+ }
+ if (allowLeadingWildcard != null) {
+ builder.field("allow_leading_wildcard", allowLeadingWildcard);
+ }
+ if (lowercaseExpandedTerms != null) {
+ builder.field("lowercase_expanded_terms", lowercaseExpandedTerms);
+ }
+ if (enablePositionIncrements != null) {
+ builder.field("enable_position_increments", enablePositionIncrements);
+ }
+ if (fuzziness != null) {
+ fuzziness.toXContent(builder, params);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (fuzzyPrefixLength != -1) {
+ builder.field("fuzzy_prefix_length", fuzzyPrefixLength);
+ }
+ if (fuzzyMaxExpansions != -1) {
+ builder.field("fuzzy_max_expansions", fuzzyMaxExpansions);
+ }
+ if (fuzzyRewrite != null) {
+ builder.field("fuzzy_rewrite", fuzzyRewrite);
+ }
+ if (phraseSlop != -1) {
+ builder.field("phrase_slop", phraseSlop);
+ }
+ if (analyzeWildcard != null) {
+ builder.field("analyze_wildcard", analyzeWildcard);
+ }
+ if (rewrite != null) {
+ builder.field("rewrite", rewrite);
+ }
+ if (minimumShouldMatch != null) {
+ builder.field("minimum_should_match", minimumShouldMatch);
+ }
+ if (quoteFieldSuffix != null) {
+ builder.field("quote_field_suffix", quoteFieldSuffix);
+ }
+ if (lenient != null) {
+ builder.field("lenient", lenient);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java b/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java
new file mode 100644
index 0000000..6d3dfc4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
+import com.google.common.collect.Lists;
+import org.apache.lucene.queryparser.classic.MapperQueryParser;
+import org.apache.lucene.queryparser.classic.QueryParserSettings;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.query.support.QueryParsers;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
+import static org.elasticsearch.common.lucene.search.Queries.optimizeQuery;
+
+/**
+ *
+ */
+public class QueryStringQueryParser implements QueryParser {
+
+ public static final String NAME = "query_string";
+ private static final ParseField FUZZINESS = Fuzziness.FIELD.withDeprecation("fuzzy_min_sim");
+
+ private final boolean defaultAnalyzeWildcard;
+ private final boolean defaultAllowLeadingWildcard;
+
+ @Inject
+ public QueryStringQueryParser(Settings settings) {
+ this.defaultAnalyzeWildcard = settings.getAsBoolean("indices.query.query_string.analyze_wildcard", QueryParserSettings.DEFAULT_ANALYZE_WILDCARD);
+ this.defaultAllowLeadingWildcard = settings.getAsBoolean("indices.query.query_string.allowLeadingWildcard", QueryParserSettings.DEFAULT_ALLOW_LEADING_WILDCARD);
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ String queryName = null;
+ QueryParserSettings qpSettings = new QueryParserSettings();
+ qpSettings.defaultField(parseContext.defaultField());
+ qpSettings.lenient(parseContext.queryStringLenient());
+ qpSettings.analyzeWildcard(defaultAnalyzeWildcard);
+ qpSettings.allowLeadingWildcard(defaultAllowLeadingWildcard);
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("fields".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ String fField = null;
+ float fBoost = -1;
+ char[] text = parser.textCharacters();
+ int end = parser.textOffset() + parser.textLength();
+ for (int i = parser.textOffset(); i < end; i++) {
+ if (text[i] == '^') {
+ int relativeLocation = i - parser.textOffset();
+ fField = new String(text, parser.textOffset(), relativeLocation);
+ fBoost = Float.parseFloat(new String(text, i + 1, parser.textLength() - relativeLocation - 1));
+ break;
+ }
+ }
+ if (fField == null) {
+ fField = parser.text();
+ }
+ if (qpSettings.fields() == null) {
+ qpSettings.fields(Lists.<String>newArrayList());
+ }
+
+ if (Regex.isSimpleMatchPattern(fField)) {
+ for (String field : parseContext.mapperService().simpleMatchToIndexNames(fField)) {
+ qpSettings.fields().add(field);
+ if (fBoost != -1) {
+ if (qpSettings.boosts() == null) {
+ qpSettings.boosts(new ObjectFloatOpenHashMap<String>());
+ }
+ qpSettings.boosts().put(field, fBoost);
+ }
+ }
+ } else {
+ qpSettings.fields().add(fField);
+ if (fBoost != -1) {
+ if (qpSettings.boosts() == null) {
+ qpSettings.boosts(new ObjectFloatOpenHashMap<String>());
+ }
+ qpSettings.boosts().put(fField, fBoost);
+ }
+ }
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[query_string] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("query".equals(currentFieldName)) {
+ qpSettings.queryString(parser.text());
+ } else if ("default_field".equals(currentFieldName) || "defaultField".equals(currentFieldName)) {
+ qpSettings.defaultField(parser.text());
+ } else if ("default_operator".equals(currentFieldName) || "defaultOperator".equals(currentFieldName)) {
+ String op = parser.text();
+ if ("or".equalsIgnoreCase(op)) {
+ qpSettings.defaultOperator(org.apache.lucene.queryparser.classic.QueryParser.Operator.OR);
+ } else if ("and".equalsIgnoreCase(op)) {
+ qpSettings.defaultOperator(org.apache.lucene.queryparser.classic.QueryParser.Operator.AND);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "Query default operator [" + op + "] is not allowed");
+ }
+ } else if ("analyzer".equals(currentFieldName)) {
+ NamedAnalyzer analyzer = parseContext.analysisService().analyzer(parser.text());
+ if (analyzer == null) {
+ throw new QueryParsingException(parseContext.index(), "[query_string] analyzer [" + parser.text() + "] not found");
+ }
+ qpSettings.forcedAnalyzer(analyzer);
+ } else if ("quote_analyzer".equals(currentFieldName) || "quoteAnalyzer".equals(currentFieldName)) {
+ NamedAnalyzer analyzer = parseContext.analysisService().analyzer(parser.text());
+ if (analyzer == null) {
+ throw new QueryParsingException(parseContext.index(), "[query_string] quote_analyzer [" + parser.text() + "] not found");
+ }
+ qpSettings.forcedQuoteAnalyzer(analyzer);
+ } else if ("allow_leading_wildcard".equals(currentFieldName) || "allowLeadingWildcard".equals(currentFieldName)) {
+ qpSettings.allowLeadingWildcard(parser.booleanValue());
+ } else if ("auto_generate_phrase_queries".equals(currentFieldName) || "autoGeneratePhraseQueries".equals(currentFieldName)) {
+ qpSettings.autoGeneratePhraseQueries(parser.booleanValue());
+ } else if ("lowercase_expanded_terms".equals(currentFieldName) || "lowercaseExpandedTerms".equals(currentFieldName)) {
+ qpSettings.lowercaseExpandedTerms(parser.booleanValue());
+ } else if ("enable_position_increments".equals(currentFieldName) || "enablePositionIncrements".equals(currentFieldName)) {
+ qpSettings.enablePositionIncrements(parser.booleanValue());
+ } else if ("escape".equals(currentFieldName)) {
+ qpSettings.escape(parser.booleanValue());
+ } else if ("use_dis_max".equals(currentFieldName) || "useDisMax".equals(currentFieldName)) {
+ qpSettings.useDisMax(parser.booleanValue());
+ } else if ("fuzzy_prefix_length".equals(currentFieldName) || "fuzzyPrefixLength".equals(currentFieldName)) {
+ qpSettings.fuzzyPrefixLength(parser.intValue());
+ } else if ("fuzzy_max_expansions".equals(currentFieldName) || "fuzzyMaxExpansions".equals(currentFieldName)) {
+ qpSettings.fuzzyMaxExpansions(parser.intValue());
+ } else if ("fuzzy_rewrite".equals(currentFieldName) || "fuzzyRewrite".equals(currentFieldName)) {
+ qpSettings.fuzzyRewriteMethod(QueryParsers.parseRewriteMethod(parser.textOrNull()));
+ } else if ("phrase_slop".equals(currentFieldName) || "phraseSlop".equals(currentFieldName)) {
+ qpSettings.phraseSlop(parser.intValue());
+ } else if (FUZZINESS.match(currentFieldName, parseContext.parseFlags())) {
+ qpSettings.fuzzyMinSim(Fuzziness.parse(parser).asSimilarity());
+ } else if ("boost".equals(currentFieldName)) {
+ qpSettings.boost(parser.floatValue());
+ } else if ("tie_breaker".equals(currentFieldName) || "tieBreaker".equals(currentFieldName)) {
+ qpSettings.tieBreaker(parser.floatValue());
+ } else if ("analyze_wildcard".equals(currentFieldName) || "analyzeWildcard".equals(currentFieldName)) {
+ qpSettings.analyzeWildcard(parser.booleanValue());
+ } else if ("rewrite".equals(currentFieldName)) {
+ qpSettings.rewriteMethod(QueryParsers.parseRewriteMethod(parser.textOrNull()));
+ } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
+ qpSettings.minimumShouldMatch(parser.textOrNull());
+ } else if ("quote_field_suffix".equals(currentFieldName) || "quoteFieldSuffix".equals(currentFieldName)) {
+ qpSettings.quoteFieldSuffix(parser.textOrNull());
+ } else if ("lenient".equalsIgnoreCase(currentFieldName)) {
+ qpSettings.lenient(parser.booleanValue());
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[query_string] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (qpSettings.queryString() == null) {
+ throw new QueryParsingException(parseContext.index(), "query_string must be provided with a [query]");
+ }
+ qpSettings.defaultAnalyzer(parseContext.mapperService().searchAnalyzer());
+ qpSettings.defaultQuoteAnalyzer(parseContext.mapperService().searchQuoteAnalyzer());
+
+ if (qpSettings.escape()) {
+ qpSettings.queryString(org.apache.lucene.queryparser.classic.QueryParser.escape(qpSettings.queryString()));
+ }
+
+ qpSettings.queryTypes(parseContext.queryTypes());
+ Query query = parseContext.indexCache().queryParserCache().get(qpSettings);
+ if (query != null) {
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+
+ MapperQueryParser queryParser = parseContext.queryParser(qpSettings);
+
+ try {
+ query = queryParser.parse(qpSettings.queryString());
+ if (query == null) {
+ return null;
+ }
+ if (qpSettings.boost() != QueryParserSettings.DEFAULT_BOOST) {
+ query.setBoost(query.getBoost() * qpSettings.boost());
+ }
+ query = optimizeQuery(fixNegativeQueryIfNeeded(query));
+ if (query instanceof BooleanQuery) {
+ Queries.applyMinimumShouldMatch((BooleanQuery) query, qpSettings.minimumShouldMatch());
+ }
+ parseContext.indexCache().queryParserCache().put(qpSettings, query);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ } catch (org.apache.lucene.queryparser.classic.ParseException e) {
+ throw new QueryParsingException(parseContext.index(), "Failed to parse query [" + qpSettings.queryString() + "]", e);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java
new file mode 100644
index 0000000..ae4c5b6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java
@@ -0,0 +1,400 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filter that restricts search results to values that are within the given range.
+ *
+ *
+ */
+public class RangeFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+
+ private Object from;
+
+ private Object to;
+
+ private boolean includeLower = true;
+
+ private boolean includeUpper = true;
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ private String execution;
+
+ /**
+ * A filter that restricts search results to values that are within the given range.
+ *
+ * @param name The field name
+ */
+ public RangeFilterBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder from(Object from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder from(int from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder from(long from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder from(float from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder from(double from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gt(Object from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gt(int from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gt(long from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gt(float from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gt(double from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gte(Object from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gte(int from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gte(long from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gte(float from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder gte(double from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder to(Object to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder to(int to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder to(long to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder to(float to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder to(double to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lt(Object to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lt(int to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lt(long to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lt(float to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lt(double to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lte(int to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lte(long to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lte(float to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lte(double to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the filter query. Null indicates unbounded.
+ */
+ public RangeFilterBuilder lte(Object to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * Should the lower bound be included or not. Defaults to <tt>true</tt>.
+ */
+ public RangeFilterBuilder includeLower(boolean includeLower) {
+ this.includeLower = includeLower;
+ return this;
+ }
+
+ /**
+ * Should the upper bound be included or not. Defaults to <tt>true</tt>.
+ */
+ public RangeFilterBuilder includeUpper(boolean includeUpper) {
+ this.includeUpper = includeUpper;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public RangeFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>true</tt>.
+ */
+ public RangeFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public RangeFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ /**
+ * Sets the execution mode that controls how the range filter is executed. Valid values are: "index" and "fielddata".
+ * <ol>
+ * <li> The <code>index</code> execution uses the field's inverted in order to determine of documents fall with in
+ * the range filter's from and to range.
+ * <li> The <code>fielddata</code> execution uses field data in order to determine of documents fall with in the
+ * range filter's from and to range. Since field data is an in memory data structure, you need to have
+ * sufficient memory on your nodes in order to use this execution mode.
+ * </ol>
+ *
+ * In general for small ranges the <code>index</code> execution is faster and for longer ranges the
+ * <code>fielddata</code> execution is faster.
+ */
+ public RangeFilterBuilder setExecution(String execution) {
+ this.execution = execution;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(RangeFilterParser.NAME);
+
+ builder.startObject(name);
+ builder.field("from", from);
+ builder.field("to", to);
+ builder.field("include_lower", includeLower);
+ builder.field("include_upper", includeUpper);
+ builder.endObject();
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ if (execution != null) {
+ builder.field("execution", execution);
+ }
+
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java
new file mode 100644
index 0000000..3ad2e7c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.TermRangeFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ *
+ */
+public class RangeFilterParser implements FilterParser {
+
+ public static final String NAME = "range";
+
+ @Inject
+ public RangeFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Boolean cache = null;
+ CacheKeyFilter.Key cacheKey = null;
+ String fieldName = null;
+ Object from = null;
+ Object to = null;
+ boolean includeLower = true;
+ boolean includeUpper = true;
+ String execution = "index";
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ fieldName = currentFieldName;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("from".equals(currentFieldName)) {
+ from = parser.objectBytes();
+ } else if ("to".equals(currentFieldName)) {
+ to = parser.objectBytes();
+ } else if ("include_lower".equals(currentFieldName) || "includeLower".equals(currentFieldName)) {
+ includeLower = parser.booleanValue();
+ } else if ("include_upper".equals(currentFieldName) || "includeUpper".equals(currentFieldName)) {
+ includeUpper = parser.booleanValue();
+ } else if ("gt".equals(currentFieldName)) {
+ from = parser.objectBytes();
+ includeLower = false;
+ } else if ("gte".equals(currentFieldName) || "ge".equals(currentFieldName)) {
+ from = parser.objectBytes();
+ includeLower = true;
+ } else if ("lt".equals(currentFieldName)) {
+ to = parser.objectBytes();
+ includeUpper = false;
+ } else if ("lte".equals(currentFieldName) || "le".equals(currentFieldName)) {
+ to = parser.objectBytes();
+ includeUpper = true;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[range] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else if ("execution".equals(currentFieldName)) {
+ execution = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[range] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (fieldName == null) {
+ throw new QueryParsingException(parseContext.index(), "[range] filter no field specified for range filter");
+ }
+
+ Filter filter = null;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null) {
+ if (smartNameFieldMappers.hasMapper()) {
+ boolean explicitlyCached = cache != null && cache;
+ if (execution.equals("index")) {
+ if (cache == null) {
+ cache = true;
+ }
+ FieldMapper mapper = smartNameFieldMappers.mapper();
+ if (mapper instanceof DateFieldMapper) {
+ filter = ((DateFieldMapper) mapper).rangeFilter(from, to, includeLower, includeUpper, parseContext, explicitlyCached);
+ } else {
+ filter = mapper.rangeFilter(from, to, includeLower, includeUpper, parseContext);
+ }
+ } else if ("fielddata".equals(execution)) {
+ if (cache == null) {
+ cache = false;
+ }
+ FieldMapper mapper = smartNameFieldMappers.mapper();
+ if (!(mapper instanceof NumberFieldMapper)) {
+ throw new QueryParsingException(parseContext.index(), "[range] filter field [" + fieldName + "] is not a numeric type");
+ }
+ if (mapper instanceof DateFieldMapper) {
+ filter = ((DateFieldMapper) mapper).rangeFilter(parseContext.fieldData(), from, to, includeLower, includeUpper, parseContext, explicitlyCached);
+ } else {
+ filter = ((NumberFieldMapper) mapper).rangeFilter(parseContext.fieldData(), from, to, includeLower, includeUpper, parseContext);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[range] filter doesn't support [" + execution + "] execution");
+ }
+ }
+ }
+
+ if (filter == null) {
+ if (cache == null) {
+ cache = true;
+ }
+ filter = new TermRangeFilter(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper);
+ }
+
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+
+ filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java
new file mode 100644
index 0000000..7123985
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java
@@ -0,0 +1,418 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A Query that matches documents within an range of terms.
+ *
+ *
+ */
+public class RangeQueryBuilder extends BaseQueryBuilder implements MultiTermQueryBuilder, BoostableQueryBuilder<RangeQueryBuilder> {
+
+ private final String name;
+
+ private Object from;
+
+ private Object to;
+
+ private boolean includeLower = true;
+
+ private boolean includeUpper = true;
+
+ private float boost = -1;
+
+ private String queryName;
+
+ /**
+ * A Query that matches documents within an range of terms.
+ *
+ * @param name The field name
+ */
+ public RangeQueryBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder from(Object from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder from(String from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder from(int from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder from(long from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder from(float from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder from(double from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gt(String from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gt(Object from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gt(int from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gt(long from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gt(float from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gt(double from) {
+ this.from = from;
+ this.includeLower = false;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gte(String from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gte(Object from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gte(int from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gte(long from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gte(float from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The from part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder gte(double from) {
+ this.from = from;
+ this.includeLower = true;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder to(Object to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder to(String to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder to(int to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder to(long to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder to(float to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder to(double to) {
+ this.to = to;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lt(String to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lt(Object to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lt(int to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lt(long to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lt(float to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lt(double to) {
+ this.to = to;
+ this.includeUpper = false;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lte(String to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lte(Object to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lte(int to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lte(long to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lte(float to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * The to part of the range query. Null indicates unbounded.
+ */
+ public RangeQueryBuilder lte(double to) {
+ this.to = to;
+ this.includeUpper = true;
+ return this;
+ }
+
+ /**
+ * Should the lower bound be included or not. Defaults to <tt>true</tt>.
+ */
+ public RangeQueryBuilder includeLower(boolean includeLower) {
+ this.includeLower = includeLower;
+ return this;
+ }
+
+ /**
+ * Should the upper bound be included or not. Defaults to <tt>true</tt>.
+ */
+ public RangeQueryBuilder includeUpper(boolean includeUpper) {
+ this.includeUpper = includeUpper;
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public RangeQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public RangeQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(RangeQueryParser.NAME);
+ builder.startObject(name);
+ builder.field("from", from);
+ builder.field("to", to);
+ builder.field("include_lower", includeLower);
+ builder.field("include_upper", includeUpper);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java b/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java
new file mode 100644
index 0000000..53f3427
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermRangeQuery;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ *
+ */
+public class RangeQueryParser implements QueryParser {
+
+ public static final String NAME = "range";
+
+ @Inject
+ public RangeQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[range] query malformed, no field to indicate field name");
+ }
+ String fieldName = parser.currentName();
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new QueryParsingException(parseContext.index(), "[range] query malformed, after field missing start object");
+ }
+
+ Object from = null;
+ Object to = null;
+ boolean includeLower = true;
+ boolean includeUpper = true;
+ float boost = 1.0f;
+ String queryName = null;
+
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("from".equals(currentFieldName)) {
+ from = parser.objectBytes();
+ } else if ("to".equals(currentFieldName)) {
+ to = parser.objectBytes();
+ } else if ("include_lower".equals(currentFieldName) || "includeLower".equals(currentFieldName)) {
+ includeLower = parser.booleanValue();
+ } else if ("include_upper".equals(currentFieldName) || "includeUpper".equals(currentFieldName)) {
+ includeUpper = parser.booleanValue();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("gt".equals(currentFieldName)) {
+ from = parser.objectBytes();
+ includeLower = false;
+ } else if ("gte".equals(currentFieldName) || "ge".equals(currentFieldName)) {
+ from = parser.objectBytes();
+ includeLower = true;
+ } else if ("lt".equals(currentFieldName)) {
+ to = parser.objectBytes();
+ includeUpper = false;
+ } else if ("lte".equals(currentFieldName) || "le".equals(currentFieldName)) {
+ to = parser.objectBytes();
+ includeUpper = true;
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[range] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ // move to the next end object, to close the field name
+ token = parser.nextToken();
+ if (token != XContentParser.Token.END_OBJECT) {
+ throw new QueryParsingException(parseContext.index(), "[range] query malformed, does not end with an object");
+ }
+
+ Query query = null;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null) {
+ if (smartNameFieldMappers.hasMapper()) {
+ //LUCENE 4 UPGRADE Mapper#rangeQuery should use bytesref as well?
+ query = smartNameFieldMappers.mapper().rangeQuery(from, to, includeLower, includeUpper, parseContext);
+ }
+ }
+ if (query == null) {
+ query = new TermRangeQuery(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper);
+ }
+ query.setBoost(boost);
+ query = wrapSmartNameQuery(query, smartNameFieldMappers, parseContext);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java
new file mode 100644
index 0000000..026df4e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filter that restricts search results to values that have a matching regular expression in a given
+ * field.
+ *
+ *
+ */
+public class RegexpFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+ private final String regexp;
+ private int flags = -1;
+
+ private Boolean cache;
+ private String cacheKey;
+ private String filterName;
+
+ /**
+ * A filter that restricts search results to values that have a matching prefix in a given
+ * field.
+ *
+ * @param name The field name
+ * @param regexp The regular expression
+ */
+ public RegexpFilterBuilder(String name, String regexp) {
+ this.name = name;
+ this.regexp = regexp;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public RegexpFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Sets the regexp flags (see {@link RegexpFlag}).
+ */
+ public RegexpFilterBuilder flags(RegexpFlag... flags) {
+ int value = 0;
+ if (flags.length == 0) {
+ value = RegexpFlag.ALL.value;
+ } else {
+ for (RegexpFlag flag : flags) {
+ value |= flag.value;
+ }
+ }
+ this.flags = value;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public RegexpFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public RegexpFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(RegexpFilterParser.NAME);
+ if (flags < 0) {
+ builder.field(name, regexp);
+ } else {
+ builder.startObject(name)
+ .field("value", regexp)
+ .field("flags_value", flags)
+ .endObject();
+ }
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java
new file mode 100644
index 0000000..08a4fdf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.lucene.search.RegexpFilter;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ *
+ */
+public class RegexpFilterParser implements FilterParser {
+
+ public static final String NAME = "regexp";
+
+ @Inject
+ public RegexpFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ boolean cache = true;
+ CacheKeyFilter.Key cacheKey = null;
+ String fieldName = null;
+ String secondaryFieldName = null;
+ Object value = null;
+ Object secondaryValue = null;
+ int flagsValue = -1;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ fieldName = currentFieldName;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("value".equals(currentFieldName)) {
+ value = parser.objectBytes();
+ } else if ("flags".equals(currentFieldName)) {
+ String flags = parser.textOrNull();
+ flagsValue = RegexpFlag.resolveValue(flags);
+ } else if ("flags_value".equals(currentFieldName)) {
+ flagsValue = parser.intValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[regexp] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else {
+ if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ secondaryFieldName = currentFieldName;
+ secondaryValue = parser.objectBytes();
+ }
+ }
+ }
+
+ if (fieldName == null) {
+ fieldName = secondaryFieldName;
+ value = secondaryValue;
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for regexp filter");
+ }
+
+ Filter filter = null;
+
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
+ try {
+ filter = smartNameFieldMappers.mapper().regexpFilter(value, flagsValue, parseContext);
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ filter = smartNameFieldMappers.mapper().regexpFilter(value, flagsValue, parseContext);
+ }
+ }
+ if (filter == null) {
+ filter = new RegexpFilter(new Term(fieldName, BytesRefs.toBytesRef(value)), flagsValue);
+ }
+
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+
+ filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFlag.java b/src/main/java/org/elasticsearch/index/query/RegexpFlag.java
new file mode 100644
index 0000000..454d026
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/RegexpFlag.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import java.util.Locale;
+
+import org.apache.lucene.util.automaton.RegExp;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+
+/**
+ * Regular expression syntax flags. Each flag represents optional syntax support in the regular expression:
+ * <ul>
+ * <li><tt>INTERSECTION</tt> - Support for intersection notation: <tt>&lt;expression&gt; &amp; &lt;expression&gt;</tt></li>
+ * <li><tt>COMPLEMENT</tt> - Support for complement notation: <tt>&lt;expression&gt; &amp; &lt;expression&gt;</tt></li>
+ * <li><tt>EMPTY</tt> - Support for the empty language symbol: <tt>#</tt></li>
+ * <li><tt>ANYSTRING</tt> - Support for the any string symbol: <tt>@</tt></li>
+ * <li><tt>INTERVAL</tt> - Support for numerical interval notation: <tt>&lt;n-m&gt;</tt></li>
+ * <li><tt>NONE</tt> - Disable support for all syntax options</li>
+ * <li><tt>ALL</tt> - Enables support for all syntax options</li>
+ * </ul>
+ *
+ * @see RegexpQueryBuilder#flags(RegexpFlag...)
+ * @see RegexpFilterBuilder#flags(RegexpFlag...)
+ */
+public enum RegexpFlag {
+
+ /**
+ * Enables intersection of the form: <tt>&lt;expression&gt; &amp; &lt;expression&gt;</tt>
+ */
+ INTERSECTION(RegExp.INTERSECTION),
+
+ /**
+ * Enables complement expression of the form: <tt>~&lt;expression&gt;</tt>
+ */
+ COMPLEMENT(RegExp.COMPLEMENT),
+
+ /**
+ * Enables empty language expression: <tt>#</tt>
+ */
+ EMPTY(RegExp.EMPTY),
+
+ /**
+ * Enables any string expression: <tt>@</tt>
+ */
+ ANYSTRING(RegExp.ANYSTRING),
+
+ /**
+ * Enables numerical interval expression: <tt>&lt;n-m&gt;</tt>
+ */
+ INTERVAL(RegExp.INTERVAL),
+
+ /**
+ * Disables all available option flags
+ */
+ NONE(RegExp.NONE),
+
+ /**
+ * Enables all available option flags
+ */
+ ALL(RegExp.ALL);
+
+
+ final int value;
+
+ private RegexpFlag(int value) {
+ this.value = value;
+ }
+
+ public int value() {
+ return value;
+ }
+
+ /**
+ * Resolves the combined OR'ed value for the given list of regular expression flags. The given flags must follow the
+ * following syntax:
+ * <p/>
+ * <tt>flag_name</tt>(|<tt>flag_name</tt>)*
+ * <p/>
+ * Where <tt>flag_name</tt> is one of the following:
+ * <ul>
+ * <li>INTERSECTION</li>
+ * <li>COMPLEMENT</li>
+ * <li>EMPTY</li>
+ * <li>ANYSTRING</li>
+ * <li>INTERVAL</li>
+ * <li>NONE</li>
+ * <li>ALL</li>
+ * </ul>
+ * <p/>
+ * Example: <tt>INTERSECTION|COMPLEMENT|EMPTY</tt>
+ *
+ * @param flags A string representing a list of regualr expression flags
+ * @return The combined OR'ed value for all the flags
+ */
+ static int resolveValue(String flags) {
+ if (flags == null || flags.isEmpty()) {
+ return RegExp.ALL;
+ }
+ int magic = RegExp.NONE;
+ for (String s : Strings.delimitedListToStringArray(flags, "|")) {
+ if (s.isEmpty()) {
+ continue;
+ }
+ try {
+ RegexpFlag flag = RegexpFlag.valueOf(s.toUpperCase(Locale.ROOT));
+ if (flag == RegexpFlag.NONE) {
+ continue;
+ }
+ if (flag == RegexpFlag.ALL) {
+ return flag.value();
+ }
+ magic |= flag.value();
+ } catch (IllegalArgumentException iae) {
+ throw new ElasticsearchIllegalArgumentException("Unknown regexp flag [" + s + "]");
+ }
+ }
+ return magic;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java
new file mode 100644
index 0000000..c52c10e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A Query that does fuzzy matching for a specific value.
+ *
+ *
+ */
+public class RegexpQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<RegexpQueryBuilder>, MultiTermQueryBuilder {
+
+ private final String name;
+ private final String regexp;
+
+ private int flags = -1;
+ private float boost = -1;
+ private String rewrite;
+ private String queryName;
+
+ /**
+ * Constructs a new term query.
+ *
+ * @param name The name of the field
+ * @param regexp The regular expression
+ */
+ public RegexpQueryBuilder(String name, String regexp) {
+ this.name = name;
+ this.regexp = regexp;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public RegexpQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ public RegexpQueryBuilder flags(RegexpFlag... flags) {
+ int value = 0;
+ if (flags.length == 0) {
+ value = RegexpFlag.ALL.value;
+ } else {
+ for (RegexpFlag flag : flags) {
+ value |= flag.value;
+ }
+ }
+ this.flags = value;
+ return this;
+ }
+
+ public RegexpQueryBuilder rewrite(String rewrite) {
+ this.rewrite = rewrite;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public RegexpQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(RegexpQueryParser.NAME);
+ if (boost == -1 && rewrite == null && queryName != null) {
+ builder.field(name, regexp);
+ } else {
+ builder.startObject(name);
+ builder.field("value", regexp);
+ if (flags != -1) {
+ builder.field("flags_value", flags);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (rewrite != null) {
+ builder.field("rewrite", rewrite);
+ }
+ if (queryName != null) {
+ builder.field("name", queryName);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java b/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java
new file mode 100644
index 0000000..6013854
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.RegexpQuery;
+import org.apache.lucene.util.automaton.RegExp;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.support.QueryParsers;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ *
+ */
+public class RegexpQueryParser implements QueryParser {
+
+ public static final String NAME = "regexp";
+
+ @Inject
+ public RegexpQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[regexp] query malformed, no field");
+ }
+ String fieldName = parser.currentName();
+ String rewriteMethod = null;
+
+ Object value = null;
+ float boost = 1.0f;
+ int flagsValue = -1;
+ String queryName = null;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("value".equals(currentFieldName)) {
+ value = parser.objectBytes();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("rewrite".equals(currentFieldName)) {
+ rewriteMethod = parser.textOrNull();
+ } else if ("flags".equals(currentFieldName)) {
+ String flags = parser.textOrNull();
+ flagsValue = RegexpFlag.resolveValue(flags);
+ } else if ("flags_value".equals(currentFieldName)) {
+ flagsValue = parser.intValue();
+ if (flagsValue < 0) {
+ flagsValue = RegExp.ALL;
+ }
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[regexp] query does not support [" + currentFieldName + "]");
+ }
+ }
+ parser.nextToken();
+ } else {
+ value = parser.objectBytes();
+ parser.nextToken();
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for regexp query");
+ }
+
+ MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewriteMethod, null);
+
+ Query query = null;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
+ try {
+ query = smartNameFieldMappers.mapper().regexpQuery(value, flagsValue, method, parseContext);
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ query = smartNameFieldMappers.mapper().regexpQuery(value, flagsValue, method, parseContext);
+ }
+ }
+ if (query == null) {
+ RegexpQuery regexpQuery = new RegexpQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), flagsValue);
+ if (method != null) {
+ regexpQuery.setRewriteMethod(method);
+ }
+ query = regexpQuery;
+ }
+ query.setBoost(boost);
+ query = wrapSmartNameQuery(query, smartNameFieldMappers, parseContext);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java
new file mode 100644
index 0000000..75ffa38
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ *
+ */
+public class ScriptFilterBuilder extends BaseFilterBuilder {
+
+ private final String script;
+
+ private Map<String, Object> params;
+
+ private String lang;
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ public ScriptFilterBuilder(String script) {
+ this.script = script;
+ }
+
+ public ScriptFilterBuilder addParam(String name, Object value) {
+ if (params == null) {
+ params = newHashMap();
+ }
+ params.put(name, value);
+ return this;
+ }
+
+ public ScriptFilterBuilder params(Map<String, Object> params) {
+ if (this.params == null) {
+ this.params = params;
+ } else {
+ this.params.putAll(params);
+ }
+ return this;
+ }
+
+ /**
+ * Sets the script language.
+ */
+ public ScriptFilterBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public ScriptFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public ScriptFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public ScriptFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(ScriptFilterParser.NAME);
+ builder.field("script", script);
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ if (this.lang != null) {
+ builder.field("lang", lang);
+ }
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java
new file mode 100644
index 0000000..c33fabe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.BitsFilteredDocIdSet;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.docset.MatchDocIdSet;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.lookup.SearchLookup;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ *
+ */
+public class ScriptFilterParser implements FilterParser {
+
+ public static final String NAME = "script";
+
+ @Inject
+ public ScriptFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token;
+
+ boolean cache = false; // no need to cache it by default, changes a lot?
+ CacheKeyFilter.Key cacheKey = null;
+ // also, when caching, since its isCacheable is false, will result in loading all bit set...
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> params = null;
+
+ String filterName = null;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ params = parser.map();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[script] filter does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[script] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (script == null) {
+ throw new QueryParsingException(parseContext.index(), "script must be provided with a [script] filter");
+ }
+ if (params == null) {
+ params = newHashMap();
+ }
+
+ Filter filter = new ScriptFilter(scriptLang, script, params, parseContext.scriptService(), parseContext.lookup());
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+
+ public static class ScriptFilter extends Filter {
+
+ private final String script;
+
+ private final Map<String, Object> params;
+
+ private final SearchScript searchScript;
+
+ public ScriptFilter(String scriptLang, String script, Map<String, Object> params, ScriptService scriptService, SearchLookup searchLookup) {
+ this.script = script;
+ this.params = params;
+
+ this.searchScript = scriptService.search(searchLookup, scriptLang, script, newHashMap(params));
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder buffer = new StringBuilder();
+ buffer.append("ScriptFilter(");
+ buffer.append(script);
+ buffer.append(")");
+ return buffer.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ScriptFilter that = (ScriptFilter) o;
+
+ if (params != null ? !params.equals(that.params) : that.params != null) return false;
+ if (script != null ? !script.equals(that.script) : that.script != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = script != null ? script.hashCode() : 0;
+ result = 31 * result + (params != null ? params.hashCode() : 0);
+ return result;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ searchScript.setNextReader(context);
+ // LUCENE 4 UPGRADE: we can simply wrap this here since it is not cacheable and if we are not top level we will get a null passed anyway
+ return BitsFilteredDocIdSet.wrap(new ScriptDocSet(context.reader().maxDoc(), acceptDocs, searchScript), acceptDocs);
+ }
+
+ static class ScriptDocSet extends MatchDocIdSet {
+
+ private final SearchScript searchScript;
+
+ public ScriptDocSet(int maxDoc, @Nullable Bits acceptDocs, SearchScript searchScript) {
+ super(maxDoc, acceptDocs);
+ this.searchScript = searchScript;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ searchScript.setNextDocId(doc);
+ Object val = searchScript.run();
+ if (val == null) {
+ return false;
+ }
+ if (val instanceof Boolean) {
+ return (Boolean) val;
+ }
+ if (val instanceof Number) {
+ return ((Number) val).longValue() != 0;
+ }
+ throw new ElasticsearchIllegalArgumentException("Can't handle type [" + val + "] in script filter");
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
new file mode 100644
index 0000000..f5b4784
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * SimpleQuery is a query parser that acts similar to a query_string
+ * query, but won't throw exceptions for any weird string syntax.
+ */
+public class SimpleQueryStringBuilder extends BaseQueryBuilder {
+ private Map<String, Float> fields = new HashMap<String, Float>();
+ private String analyzer;
+ private Operator operator;
+ private final String queryText;
+ private int flags = -1;
+
+ /**
+ * Operators for the default_operator
+ */
+ public static enum Operator {
+ AND,
+ OR
+ }
+
+ /**
+ * Construct a new simple query with the given text
+ */
+ public SimpleQueryStringBuilder(String text) {
+ this.queryText = text;
+ }
+
+ /**
+ * Add a field to run the query against
+ */
+ public SimpleQueryStringBuilder field(String field) {
+ this.fields.put(field, null);
+ return this;
+ }
+
+ /**
+ * Add a field to run the query against with a specific boost
+ */
+ public SimpleQueryStringBuilder field(String field, float boost) {
+ this.fields.put(field, boost);
+ return this;
+ }
+
+ /**
+ * Specify an analyzer to use for the query
+ */
+ public SimpleQueryStringBuilder analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return this;
+ }
+
+ /**
+ * Specify the default operator for the query. Defaults to "OR" if no
+ * operator is specified
+ */
+ public SimpleQueryStringBuilder defaultOperator(Operator defaultOperator) {
+ this.operator = defaultOperator;
+ return this;
+ }
+
+ /**
+ * Specify the enabled features of the SimpleQueryString.
+ */
+ public SimpleQueryStringBuilder flags(SimpleQueryStringFlag... flags) {
+ int value = 0;
+ if (flags.length == 0) {
+ value = SimpleQueryStringFlag.ALL.value;
+ } else {
+ for (SimpleQueryStringFlag flag : flags) {
+ value |= flag.value;
+ }
+ }
+ this.flags = value;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(SimpleQueryStringParser.NAME);
+
+ builder.field("query", queryText);
+
+ if (fields.size() > 0) {
+ builder.startArray("fields");
+ for (Map.Entry<String, Float> entry : fields.entrySet()) {
+ String field = entry.getKey();
+ Float boost = entry.getValue();
+ if (boost != null) {
+ builder.value(field + "^" + boost);
+ } else {
+ builder.value(field);
+ }
+ }
+ builder.endArray();
+ }
+
+ if (flags != -1) {
+ builder.field("flags", flags);
+ }
+
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+
+ if (operator != null) {
+ builder.field("default_operator", operator.name().toLowerCase(Locale.ROOT));
+ }
+
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java
new file mode 100644
index 0000000..425753b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.queryparser.XSimpleQueryParser;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+
+import java.util.Locale;
+
+/**
+ * Flags for the XSimpleQueryString parser
+ */
+public enum SimpleQueryStringFlag {
+ ALL(-1),
+ NONE(0),
+ AND(XSimpleQueryParser.AND_OPERATOR),
+ NOT(XSimpleQueryParser.NOT_OPERATOR),
+ OR(XSimpleQueryParser.OR_OPERATOR),
+ PREFIX(XSimpleQueryParser.PREFIX_OPERATOR),
+ PHRASE(XSimpleQueryParser.PHRASE_OPERATOR),
+ PRECEDENCE(XSimpleQueryParser.PRECEDENCE_OPERATORS),
+ ESCAPE(XSimpleQueryParser.ESCAPE_OPERATOR),
+ WHITESPACE(XSimpleQueryParser.WHITESPACE_OPERATOR);
+
+ final int value;
+
+ private SimpleQueryStringFlag(int value) {
+ this.value = value;
+ }
+
+ public int value() {
+ return value;
+ }
+
+ static int resolveFlags(String flags) {
+ if (!Strings.hasLength(flags)) {
+ return ALL.value();
+ }
+ int magic = NONE.value();
+ for (String s : Strings.delimitedListToStringArray(flags, "|")) {
+ if (s.isEmpty()) {
+ continue;
+ }
+ try {
+ SimpleQueryStringFlag flag = SimpleQueryStringFlag.valueOf(s.toUpperCase(Locale.ROOT));
+ switch (flag) {
+ case NONE:
+ return 0;
+ case ALL:
+ return -1;
+ default:
+ magic |= flag.value();
+ }
+ } catch (IllegalArgumentException iae) {
+ throw new ElasticsearchIllegalArgumentException("Unknown " + SimpleQueryStringParser.NAME + " flag [" + s + "]");
+ }
+ }
+ return magic;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java
new file mode 100644
index 0000000..ca52822
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.queryparser.XSimpleQueryParser;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * SimpleQueryStringParser is a query parser that acts similar to a query_string
+ * query, but won't throw exceptions for any weird string syntax. It supports
+ * the following:
+ * <p/>
+ * <ul>
+ * <li>'{@code +}' specifies {@code AND} operation: <tt>token1+token2</tt>
+ * <li>'{@code |}' specifies {@code OR} operation: <tt>token1|token2</tt>
+ * <li>'{@code -}' negates a single token: <tt>-token0</tt>
+ * <li>'{@code "}' creates phrases of terms: <tt>"term1 term2 ..."</tt>
+ * <li>'{@code *}' at the end of terms specifies prefix query: <tt>term*</tt>
+ * <li>'{@code (}' and '{@code)}' specifies precedence: <tt>token1 + (token2 | token3)</tt>
+ * </ul>
+ * <p/>
+ * See: {@link XSimpleQueryParser} for more information.
+ * <p/>
+ * This query supports these options:
+ * <p/>
+ * Required:
+ * {@code query} - query text to be converted into other queries
+ * <p/>
+ * Optional:
+ * {@code analyzer} - anaylzer to be used for analyzing tokens to determine
+ * which kind of query they should be converted into, defaults to "standard"
+ * {@code default_operator} - default operator for boolean queries, defaults
+ * to OR
+ * {@code fields} - fields to search, defaults to _all if not set, allows
+ * boosting a field with ^n
+ */
+public class SimpleQueryStringParser implements QueryParser {
+
+ public static final String NAME = "simple_query_string";
+
+ @Inject
+ public SimpleQueryStringParser(Settings settings) {
+
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ String queryBody = null;
+ String field = null;
+ Map<String, Float> fieldsAndWeights = null;
+ BooleanClause.Occur defaultOperator = null;
+ Analyzer analyzer = null;
+ int flags = -1;
+
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("fields".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ String fField = null;
+ float fBoost = 1;
+ char[] text = parser.textCharacters();
+ int end = parser.textOffset() + parser.textLength();
+ for (int i = parser.textOffset(); i < end; i++) {
+ if (text[i] == '^') {
+ int relativeLocation = i - parser.textOffset();
+ fField = new String(text, parser.textOffset(), relativeLocation);
+ fBoost = Float.parseFloat(new String(text, i + 1, parser.textLength() - relativeLocation - 1));
+ break;
+ }
+ }
+ if (fField == null) {
+ fField = parser.text();
+ }
+
+ if (fieldsAndWeights == null) {
+ fieldsAndWeights = new HashMap<String, Float>();
+ }
+
+ if (Regex.isSimpleMatchPattern(fField)) {
+ for (String fieldName : parseContext.mapperService().simpleMatchToIndexNames(fField)) {
+ fieldsAndWeights.put(fieldName, fBoost);
+ }
+ } else {
+ MapperService.SmartNameFieldMappers mappers = parseContext.smartFieldMappers(fField);
+ if (mappers != null && mappers.hasMapper()) {
+ fieldsAndWeights.put(mappers.mapper().names().indexName(), fBoost);
+ } else {
+ fieldsAndWeights.put(fField, fBoost);
+ }
+ }
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(),
+ "[" + NAME + "] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("query".equals(currentFieldName)) {
+ queryBody = parser.text();
+ } else if ("analyzer".equals(currentFieldName)) {
+ analyzer = parseContext.analysisService().analyzer(parser.text());
+ if (analyzer == null) {
+ throw new QueryParsingException(parseContext.index(), "[" + NAME + "] analyzer [" + parser.text() + "] not found");
+ }
+ } else if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("default_operator".equals(currentFieldName) || "defaultOperator".equals(currentFieldName)) {
+ String op = parser.text();
+ if ("or".equalsIgnoreCase(op)) {
+ defaultOperator = BooleanClause.Occur.SHOULD;
+ } else if ("and".equalsIgnoreCase(op)) {
+ defaultOperator = BooleanClause.Occur.MUST;
+ } else {
+ throw new QueryParsingException(parseContext.index(),
+ "[" + NAME + "] default operator [" + op + "] is not allowed");
+ }
+ } else if ("flags".equals(currentFieldName)) {
+ if (parser.hasTextCharacters()) {
+ // Possible options are:
+ // ALL, NONE, AND, OR, PREFIX, PHRASE, PRECEDENCE, ESCAPE, WHITESPACE
+ flags = SimpleQueryStringFlag.resolveFlags(parser.text());
+ } else {
+ flags = parser.intValue();
+ if (flags < 0) {
+ flags = SimpleQueryStringFlag.ALL.value();
+ }
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[" + NAME + "] unsupported field [" + parser.currentName() + "]");
+ }
+ }
+ }
+
+ // Query text is required
+ if (queryBody == null) {
+ throw new QueryParsingException(parseContext.index(), "[" + NAME + "] query text missing");
+ }
+
+ // Support specifying only a field instead of a map
+ if (field == null) {
+ field = currentFieldName;
+ }
+
+ // Use the default field (_all) if no fields specified
+ if (fieldsAndWeights == null) {
+ field = parseContext.defaultField();
+ }
+
+ // Use standard analyzer by default
+ if (analyzer == null) {
+ analyzer = parseContext.mapperService().searchAnalyzer();
+ }
+
+ XSimpleQueryParser sqp;
+ if (fieldsAndWeights != null) {
+ sqp = new XSimpleQueryParser(analyzer, fieldsAndWeights, flags);
+ } else {
+ sqp = new XSimpleQueryParser(analyzer, Collections.singletonMap(field, 1.0F), flags);
+ }
+
+ if (defaultOperator != null) {
+ sqp.setDefaultOperator(defaultOperator);
+ }
+
+ return sqp.parse(queryBody);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SpanFirstQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanFirstQueryBuilder.java
new file mode 100644
index 0000000..5eb429a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanFirstQueryBuilder.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SpanFirstQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder<SpanFirstQueryBuilder> {
+
+ private final SpanQueryBuilder matchBuilder;
+
+ private final int end;
+
+ private float boost = -1;
+
+ private String queryName;
+
+ public SpanFirstQueryBuilder(SpanQueryBuilder matchBuilder, int end) {
+ this.matchBuilder = matchBuilder;
+ this.end = end;
+ }
+
+ public SpanFirstQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public SpanFirstQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(SpanFirstQueryParser.NAME);
+ builder.field("match");
+ matchBuilder.toXContent(builder, params);
+ builder.field("end", end);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java
new file mode 100644
index 0000000..ea8ff3d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanFirstQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SpanFirstQueryParser implements QueryParser {
+
+ public static final String NAME = "span_first";
+
+ @Inject
+ public SpanFirstQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ float boost = 1.0f;
+
+ SpanQuery match = null;
+ int end = -1;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("match".equals(currentFieldName)) {
+ Query query = parseContext.parseInnerQuery();
+ if (!(query instanceof SpanQuery)) {
+ throw new QueryParsingException(parseContext.index(), "spanFirst [match] must be of type span query");
+ }
+ match = (SpanQuery) query;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_first] query does not support [" + currentFieldName + "]");
+ }
+ } else {
+ if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("end".equals(currentFieldName)) {
+ end = parser.intValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_first] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (match == null) {
+ throw new QueryParsingException(parseContext.index(), "spanFirst must have [match] span query clause");
+ }
+ if (end == -1) {
+ throw new QueryParsingException(parseContext.index(), "spanFirst must have [end] set for it");
+ }
+
+ SpanFirstQuery query = new SpanFirstQuery(match, end);
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java
new file mode 100644
index 0000000..07991e0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import java.io.IOException;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+/**
+ *
+ */
+public class SpanMultiTermQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder{
+
+ private MultiTermQueryBuilder multiTermQueryBuilder;
+
+ public SpanMultiTermQueryBuilder(MultiTermQueryBuilder multiTermQueryBuilder) {
+ this.multiTermQueryBuilder = multiTermQueryBuilder;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params)
+ throws IOException {
+ builder.startObject(SpanMultiTermQueryParser.NAME);
+ builder.field(SpanMultiTermQueryParser.MATCH_NAME);
+ multiTermQueryBuilder.toXContent(builder, params);
+ builder.endObject();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java
new file mode 100644
index 0000000..c79c6f8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import java.io.IOException;
+
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
+
+/**
+ *
+ */
+public class SpanMultiTermQueryParser implements QueryParser {
+
+ public static final String NAME = "span_multi";
+ public static final String MATCH_NAME = "match";
+
+ @Inject
+ public SpanMultiTermQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] { NAME, Strings.toCamelCase(NAME) };
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Token token = parser.nextToken();
+ if (!MATCH_NAME.equals(parser.currentName()) || token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause");
+ }
+
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new QueryParsingException(parseContext.index(), "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause");
+ }
+
+ Query subQuery = parseContext.parseInnerQuery();
+ if (!(subQuery instanceof MultiTermQuery)) {
+ throw new QueryParsingException(parseContext.index(), "spanMultiTerm [" + MATCH_NAME + "] must be of type multi term query");
+ }
+
+ parser.nextToken();
+ return new SpanMultiTermQueryWrapper<MultiTermQuery>((MultiTermQuery) subQuery);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java
new file mode 100644
index 0000000..b0afb4b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ *
+ */
+public class SpanNearQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder<SpanNearQueryBuilder> {
+
+ private ArrayList<SpanQueryBuilder> clauses = new ArrayList<SpanQueryBuilder>();
+
+ private Integer slop = null;
+
+ private Boolean inOrder;
+
+ private Boolean collectPayloads;
+
+ private float boost = -1;
+
+ private String queryName;
+
+ public SpanNearQueryBuilder clause(SpanQueryBuilder clause) {
+ clauses.add(clause);
+ return this;
+ }
+
+ public SpanNearQueryBuilder slop(int slop) {
+ this.slop = slop;
+ return this;
+ }
+
+ public SpanNearQueryBuilder inOrder(boolean inOrder) {
+ this.inOrder = inOrder;
+ return this;
+ }
+
+ public SpanNearQueryBuilder collectPayloads(boolean collectPayloads) {
+ this.collectPayloads = collectPayloads;
+ return this;
+ }
+
+ public SpanNearQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public SpanNearQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ if (clauses.isEmpty()) {
+ throw new ElasticsearchIllegalArgumentException("Must have at least one clause when building a spanNear query");
+ }
+ if (slop == null) {
+ throw new ElasticsearchIllegalArgumentException("Must set the slop when building a spanNear query");
+ }
+ builder.startObject(SpanNearQueryParser.NAME);
+ builder.startArray("clauses");
+ for (SpanQueryBuilder clause : clauses) {
+ clause.toXContent(builder, params);
+ }
+ builder.endArray();
+ builder.field("slop", slop.intValue());
+ if (inOrder != null) {
+ builder.field("in_order", inOrder);
+ }
+ if (collectPayloads != null) {
+ builder.field("collect_payloads", collectPayloads);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java
new file mode 100644
index 0000000..84283fc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class SpanNearQueryParser implements QueryParser {
+
+ public static final String NAME = "span_near";
+
+ @Inject
+ public SpanNearQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ float boost = 1.0f;
+ Integer slop = null;
+ boolean inOrder = true;
+ boolean collectPayloads = true;
+ String queryName = null;
+
+ List<SpanQuery> clauses = newArrayList();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("clauses".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Query query = parseContext.parseInnerQuery();
+ if (!(query instanceof SpanQuery)) {
+ throw new QueryParsingException(parseContext.index(), "spanNear [clauses] must be of type span query");
+ }
+ clauses.add((SpanQuery) query);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_near] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("in_order".equals(currentFieldName) || "inOrder".equals(currentFieldName)) {
+ inOrder = parser.booleanValue();
+ } else if ("collect_payloads".equals(currentFieldName) || "collectPayloads".equals(currentFieldName)) {
+ collectPayloads = parser.booleanValue();
+ } else if ("slop".equals(currentFieldName)) {
+ slop = Integer.valueOf(parser.intValue());
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_near] query does not support [" + currentFieldName + "]");
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_near] query does not support [" + currentFieldName + "]");
+ }
+ }
+ if (clauses.isEmpty()) {
+ throw new QueryParsingException(parseContext.index(), "span_near must include [clauses]");
+ }
+ if (slop == null) {
+ throw new QueryParsingException(parseContext.index(), "span_near must include [slop]");
+ }
+
+ SpanNearQuery query = new SpanNearQuery(clauses.toArray(new SpanQuery[clauses.size()]), slop.intValue(), inOrder, collectPayloads);
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java
new file mode 100644
index 0000000..ce9d53f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SpanNotQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder<SpanNotQueryBuilder> {
+
+ private SpanQueryBuilder include;
+
+ private SpanQueryBuilder exclude;
+
+ private float boost = -1;
+
+ private String queryName;
+
+ public SpanNotQueryBuilder include(SpanQueryBuilder include) {
+ this.include = include;
+ return this;
+ }
+
+ public SpanNotQueryBuilder exclude(SpanQueryBuilder exclude) {
+ this.exclude = exclude;
+ return this;
+ }
+
+ public SpanNotQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public SpanNotQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ if (include == null) {
+ throw new ElasticsearchIllegalArgumentException("Must specify include when using spanNot query");
+ }
+ if (exclude == null) {
+ throw new ElasticsearchIllegalArgumentException("Must specify exclude when using spanNot query");
+ }
+ builder.startObject(SpanNotQueryParser.NAME);
+ builder.field("include");
+ include.toXContent(builder, params);
+ builder.field("exclude");
+ exclude.toXContent(builder, params);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java
new file mode 100644
index 0000000..269973e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanNotQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SpanNotQueryParser implements QueryParser {
+
+ public static final String NAME = "span_not";
+
+ @Inject
+ public SpanNotQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ float boost = 1.0f;
+
+ SpanQuery include = null;
+ SpanQuery exclude = null;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("include".equals(currentFieldName)) {
+ Query query = parseContext.parseInnerQuery();
+ if (!(query instanceof SpanQuery)) {
+ throw new QueryParsingException(parseContext.index(), "spanNot [include] must be of type span query");
+ }
+ include = (SpanQuery) query;
+ } else if ("exclude".equals(currentFieldName)) {
+ Query query = parseContext.parseInnerQuery();
+ if (!(query instanceof SpanQuery)) {
+ throw new QueryParsingException(parseContext.index(), "spanNot [exclude] must be of type span query");
+ }
+ exclude = (SpanQuery) query;
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_not] query does not support [" + currentFieldName + "]");
+ }
+ } else {
+ if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_not] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (include == null) {
+ throw new QueryParsingException(parseContext.index(), "spanNot must have [include] span query clause");
+ }
+ if (exclude == null) {
+ throw new QueryParsingException(parseContext.index(), "spanNot must have [exclude] span query clause");
+ }
+
+ SpanNotQuery query = new SpanNotQuery(include, exclude);
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java
new file mode 100644
index 0000000..90dc452
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ *
+ */
+public class SpanOrQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder<SpanOrQueryBuilder> {
+
+ private ArrayList<SpanQueryBuilder> clauses = new ArrayList<SpanQueryBuilder>();
+
+ private float boost = -1;
+
+ private String queryName;
+
+ public SpanOrQueryBuilder clause(SpanQueryBuilder clause) {
+ clauses.add(clause);
+ return this;
+ }
+
+ public SpanOrQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public SpanOrQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ if (clauses.isEmpty()) {
+ throw new ElasticsearchIllegalArgumentException("Must have at least one clause when building a spanOr query");
+ }
+ builder.startObject(SpanOrQueryParser.NAME);
+ builder.startArray("clauses");
+ for (SpanQueryBuilder clause : clauses) {
+ clause.toXContent(builder, params);
+ }
+ builder.endArray();
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java
new file mode 100644
index 0000000..a9d12f6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanOrQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class SpanOrQueryParser implements QueryParser {
+
+ public static final String NAME = "span_or";
+
+ @Inject
+ public SpanOrQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ float boost = 1.0f;
+ String queryName = null;
+
+ List<SpanQuery> clauses = newArrayList();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("clauses".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Query query = parseContext.parseInnerQuery();
+ if (!(query instanceof SpanQuery)) {
+ throw new QueryParsingException(parseContext.index(), "spanOr [clauses] must be of type span query");
+ }
+ clauses.add((SpanQuery) query);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_or] query does not support [" + currentFieldName + "]");
+ }
+ } else {
+ if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_or] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (clauses.isEmpty()) {
+ throw new QueryParsingException(parseContext.index(), "spanOr must include [clauses]");
+ }
+
+ SpanOrQuery query = new SpanOrQuery(clauses.toArray(new SpanQuery[clauses.size()]));
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/SpanQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanQueryBuilder.java
new file mode 100644
index 0000000..5c12489
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanQueryBuilder.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+/**
+ *
+ */
+public interface SpanQueryBuilder extends QueryBuilder {
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java
new file mode 100644
index 0000000..bbda99a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SpanTermQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder<SpanTermQueryBuilder> {
+
+ private final String name;
+
+ private final Object value;
+
+ private float boost = -1;
+
+ private String queryName;
+
+ public SpanTermQueryBuilder(String name, String value) {
+ this(name, (Object) value);
+ }
+
+ public SpanTermQueryBuilder(String name, int value) {
+ this(name, (Object) value);
+ }
+
+ public SpanTermQueryBuilder(String name, long value) {
+ this(name, (Object) value);
+ }
+
+ public SpanTermQueryBuilder(String name, float value) {
+ this(name, (Object) value);
+ }
+
+ public SpanTermQueryBuilder(String name, double value) {
+ this(name, (Object) value);
+ }
+
+ private SpanTermQueryBuilder(String name, Object value) {
+ this.name = name;
+ this.value = value;
+ }
+
+ public SpanTermQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public SpanTermQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(SpanTermQueryParser.NAME);
+ if (boost == -1 && queryName != null) {
+ builder.field(name, value);
+ } else {
+ builder.startObject(name);
+ builder.field("value", value);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java
new file mode 100644
index 0000000..0203bb2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SpanTermQueryParser implements QueryParser {
+
+ public static final String NAME = "span_term";
+
+ @Inject
+ public SpanTermQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
+ }
+ assert token == XContentParser.Token.FIELD_NAME;
+ String fieldName = parser.currentName();
+
+
+ String value = null;
+ float boost = 1.0f;
+ String queryName = null;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("term".equals(currentFieldName)) {
+ value = parser.text();
+ } else if ("value".equals(currentFieldName)) {
+ value = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[span_term] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ parser.nextToken();
+ } else {
+ value = parser.text();
+ // move to the next token
+ parser.nextToken();
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for term query");
+ }
+
+ BytesRef valueBytes = null;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null) {
+ if (smartNameFieldMappers.hasMapper()) {
+ fieldName = smartNameFieldMappers.mapper().names().indexName();
+ valueBytes = smartNameFieldMappers.mapper().indexedValueForSearch(value);
+ }
+ }
+ if (valueBytes == null) {
+ valueBytes = new BytesRef(value);
+ }
+
+ SpanTermQuery query = new SpanTermQuery(new Term(fieldName, valueBytes));
+ query.setBoost(boost);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java
new file mode 100644
index 0000000..74349a0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filter for a field based on a term.
+ *
+ *
+ */
+public class TermFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+
+ private final Object value;
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public TermFilterBuilder(String name, String value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public TermFilterBuilder(String name, int value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public TermFilterBuilder(String name, long value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public TermFilterBuilder(String name, float value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public TermFilterBuilder(String name, double value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * A filter for a field based on a term.
+ *
+ * @param name The field name
+ * @param value The term value
+ */
+ public TermFilterBuilder(String name, Object value) {
+ this.name = name;
+ this.value = value;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public TermFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>true</tt>.
+ */
+ public TermFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public TermFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(TermFilterParser.NAME);
+ builder.field(name, value);
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/TermFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java
new file mode 100644
index 0000000..5c8c62f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ *
+ */
+public class TermFilterParser implements FilterParser {
+
+ public static final String NAME = "term";
+
+ @Inject
+ public TermFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ boolean cache = true; // since usually term filter is on repeating terms, cache it by default
+ CacheKeyFilter.Key cacheKey = null;
+ String fieldName = null;
+ Object value = null;
+
+ String filterName = null;
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ // also support a format of "term" : {"field_name" : { ... }}
+ fieldName = currentFieldName;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("term".equals(currentFieldName)) {
+ value = parser.objectBytes();
+ } else if ("value".equals(currentFieldName)) {
+ value = parser.objectBytes();
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[term] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ } else if (token.isValue()) {
+ if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ fieldName = currentFieldName;
+ value = parser.objectBytes();
+ }
+ }
+ }
+
+ if (fieldName == null) {
+ throw new QueryParsingException(parseContext.index(), "No field specified for term filter");
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for term filter");
+ }
+
+ Filter filter = null;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
+ try {
+ filter = smartNameFieldMappers.mapper().termFilter(value, parseContext);
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ filter = smartNameFieldMappers.mapper().termFilter(value, parseContext);
+ }
+ }
+ if (filter == null) {
+ filter = new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(value)));
+ }
+
+ if (cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+
+ filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java
new file mode 100644
index 0000000..b8640e8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A Query that matches documents containing a term.
+ *
+ *
+ */
+public class TermQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<TermQueryBuilder> {
+
+ private final String name;
+
+ private final Object value;
+
+ private float boost = -1;
+
+ private String queryName;
+
+ /**
+ * Constructs a new term query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public TermQueryBuilder(String name, String value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * Constructs a new term query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public TermQueryBuilder(String name, int value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * Constructs a new term query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public TermQueryBuilder(String name, long value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * Constructs a new term query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public TermQueryBuilder(String name, float value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * Constructs a new term query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public TermQueryBuilder(String name, double value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * Constructs a new term query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public TermQueryBuilder(String name, boolean value) {
+ this(name, (Object) value);
+ }
+
+ /**
+ * Constructs a new term query.
+ *
+ * @param name The name of the field
+ * @param value The value of the term
+ */
+ public TermQueryBuilder(String name, Object value) {
+ this.name = name;
+ this.value = value;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public TermQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public TermQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(TermQueryParser.NAME);
+ if (boost == -1 && queryName == null) {
+ builder.field(name, value);
+ } else {
+ builder.startObject(name);
+ builder.field("value", value);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/TermQueryParser.java b/src/main/java/org/elasticsearch/index/query/TermQueryParser.java
new file mode 100644
index 0000000..1670e6f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TermQueryParser.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ *
+ */
+public class TermQueryParser implements QueryParser {
+
+ public static final String NAME = "term";
+
+ @Inject
+ public TermQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[term] query malformed, no field");
+ }
+ String fieldName = parser.currentName();
+
+ String queryName = null;
+ Object value = null;
+ float boost = 1.0f;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("term".equals(currentFieldName)) {
+ value = parser.objectBytes();
+ } else if ("value".equals(currentFieldName)) {
+ value = parser.objectBytes();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[term] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ parser.nextToken();
+ } else {
+ value = parser.text();
+ // move to the next token
+ parser.nextToken();
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for term query");
+ }
+
+ Query query = null;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
+ try {
+ query = smartNameFieldMappers.mapper().termQuery(value, parseContext);
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ query = smartNameFieldMappers.mapper().termQuery(value, parseContext);
+ }
+ }
+ if (query == null) {
+ query = new TermQuery(new Term(fieldName, BytesRefs.toBytesRef(value)));
+ }
+ query.setBoost(boost);
+ query = wrapSmartNameQuery(query, smartNameFieldMappers, parseContext);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java
new file mode 100644
index 0000000..f259a84
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filer for a field based on several terms matching on any of them.
+ */
+public class TermsFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+
+ private final Object values;
+
+ private Boolean cache;
+ private String cacheKey;
+
+ private String filterName;
+
+ private String execution;
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsFilterBuilder(String name, String... values) {
+ this(name, (Object[]) values);
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsFilterBuilder(String name, int... values) {
+ this.name = name;
+ this.values = values;
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsFilterBuilder(String name, long... values) {
+ this.name = name;
+ this.values = values;
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsFilterBuilder(String name, float... values) {
+ this.name = name;
+ this.values = values;
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsFilterBuilder(String name, double... values) {
+ this.name = name;
+ this.values = values;
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsFilterBuilder(String name, Object... values) {
+ this.name = name;
+ this.values = values;
+ }
+
+ /**
+ * A filer for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsFilterBuilder(String name, Iterable values) {
+ this.name = name;
+ this.values = values;
+ }
+
+ /**
+ * Sets the execution mode for the terms filter. Cane be either "plain", "bool"
+ * "and". Defaults to "plain".
+ */
+ public TermsFilterBuilder execution(String execution) {
+ this.execution = execution;
+ return this;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public TermsFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Should the filter be cached or not. Defaults to <tt>false</tt>.
+ */
+ public TermsFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public TermsFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(TermsFilterParser.NAME);
+ builder.field(name, values);
+
+ if (execution != null) {
+ builder.field("execution", execution);
+ }
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java
new file mode 100644
index 0000000..90153db
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java
@@ -0,0 +1,332 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.lucene.search.OrFilter;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.indices.cache.filter.terms.IndicesTermsFilterCache;
+import org.elasticsearch.indices.cache.filter.terms.TermsLookup;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;
+
+/**
+ *
+ */
+public class TermsFilterParser implements FilterParser {
+
+ public static final String NAME = "terms";
+ private IndicesTermsFilterCache termsFilterCache;
+
+ @Inject
+ public TermsFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "in"};
+ }
+
+ @Inject(optional = true)
+ public void setIndicesTermsFilterCache(IndicesTermsFilterCache termsFilterCache) {
+ this.termsFilterCache = termsFilterCache;
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ MapperService.SmartNameFieldMappers smartNameFieldMappers;
+ Boolean cache = null;
+ String filterName = null;
+ String currentFieldName = null;
+
+ String lookupIndex = parseContext.index().name();
+ String lookupType = null;
+ String lookupId = null;
+ String lookupPath = null;
+ String lookupRouting = null;
+ boolean lookupCache = true;
+
+ CacheKeyFilter.Key cacheKey = null;
+ XContentParser.Token token;
+ String execution = "plain";
+ List<Object> terms = Lists.newArrayList();
+ String fieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ fieldName = currentFieldName;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Object value = parser.objectBytes();
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for terms filter");
+ }
+ terms.add(value);
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ fieldName = currentFieldName;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ lookupIndex = parser.text();
+ } else if ("type".equals(currentFieldName)) {
+ lookupType = parser.text();
+ } else if ("id".equals(currentFieldName)) {
+ lookupId = parser.text();
+ } else if ("path".equals(currentFieldName)) {
+ lookupPath = parser.text();
+ } else if ("routing".equals(currentFieldName)) {
+ lookupRouting = parser.textOrNull();
+ } else if ("cache".equals(currentFieldName)) {
+ lookupCache = parser.booleanValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[terms] filter does not support [" + currentFieldName + "] within lookup element");
+ }
+ }
+ }
+ if (lookupType == null) {
+ throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the type");
+ }
+ if (lookupId == null) {
+ throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the id");
+ }
+ if (lookupPath == null) {
+ throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the path");
+ }
+ } else if (token.isValue()) {
+ if ("execution".equals(currentFieldName)) {
+ execution = parser.text();
+ } else if ("_name".equals(currentFieldName)) {
+ filterName = parser.text();
+ } else if ("_cache".equals(currentFieldName)) {
+ cache = parser.booleanValue();
+ } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
+ cacheKey = new CacheKeyFilter.Key(parser.text());
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[terms] filter does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (fieldName == null) {
+ throw new QueryParsingException(parseContext.index(), "terms filter requires a field name, followed by array of terms");
+ }
+
+ FieldMapper fieldMapper = null;
+ smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ String[] previousTypes = null;
+ if (smartNameFieldMappers != null) {
+ if (smartNameFieldMappers.hasMapper()) {
+ fieldMapper = smartNameFieldMappers.mapper();
+ fieldName = fieldMapper.names().indexName();
+ }
+ // if we have a doc mapper, its explicit type, mark it
+ if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
+ }
+ }
+
+ if (lookupId != null) {
+ // if there are no mappings, then nothing has been indexing yet against this shard, so we can return
+ // no match (but not cached!), since the Terms Lookup relies on the fact that there are mappings...
+ if (fieldMapper == null) {
+ return Queries.MATCH_NO_FILTER;
+ }
+
+ // external lookup, use it
+ TermsLookup termsLookup = new TermsLookup(fieldMapper, lookupIndex, lookupType, lookupId, lookupRouting, lookupPath, parseContext);
+
+ Filter filter = termsFilterCache.termsFilter(termsLookup, lookupCache, cacheKey);
+ if (filter == null) {
+ return null;
+ }
+
+ // cache the whole filter by default, or if explicitly told to
+ if (cache == null || cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ return filter;
+ }
+
+ if (terms.isEmpty()) {
+ return Queries.MATCH_NO_FILTER;
+ }
+
+ try {
+ Filter filter;
+ if ("plain".equals(execution)) {
+ if (fieldMapper != null) {
+ filter = fieldMapper.termsFilter(terms, parseContext);
+ } else {
+ BytesRef[] filterValues = new BytesRef[terms.size()];
+ for (int i = 0; i < filterValues.length; i++) {
+ filterValues[i] = BytesRefs.toBytesRef(terms.get(i));
+ }
+ filter = new XTermsFilter(fieldName, filterValues);
+ }
+ // cache the whole filter by default, or if explicitly told to
+ if (cache == null || cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ } else if ("fielddata".equals(execution)) {
+ // if there are no mappings, then nothing has been indexing yet against this shard, so we can return
+ // no match (but not cached!), since the FieldDataTermsFilter relies on a mapping...
+ if (fieldMapper == null) {
+ return Queries.MATCH_NO_FILTER;
+ }
+
+ filter = fieldMapper.termsFilter(parseContext.fieldData(), terms, parseContext);
+ if (cache != null && cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ } else if ("bool".equals(execution)) {
+ XBooleanFilter boolFiler = new XBooleanFilter();
+ if (fieldMapper != null) {
+ for (Object term : terms) {
+ boolFiler.add(parseContext.cacheFilter(fieldMapper.termFilter(term, parseContext), null), BooleanClause.Occur.SHOULD);
+ }
+ } else {
+ for (Object term : terms) {
+ boolFiler.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), null), BooleanClause.Occur.SHOULD);
+ }
+ }
+ filter = boolFiler;
+ // only cache if explicitly told to, since we cache inner filters
+ if (cache != null && cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ } else if ("bool_nocache".equals(execution)) {
+ XBooleanFilter boolFiler = new XBooleanFilter();
+ if (fieldMapper != null) {
+ for (Object term : terms) {
+ boolFiler.add(fieldMapper.termFilter(term, parseContext), BooleanClause.Occur.SHOULD);
+ }
+ } else {
+ for (Object term : terms) {
+ boolFiler.add(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), BooleanClause.Occur.SHOULD);
+ }
+ }
+ filter = boolFiler;
+ // cache the whole filter by default, or if explicitly told to
+ if (cache == null || cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ } else if ("and".equals(execution)) {
+ List<Filter> filters = Lists.newArrayList();
+ if (fieldMapper != null) {
+ for (Object term : terms) {
+ filters.add(parseContext.cacheFilter(fieldMapper.termFilter(term, parseContext), null));
+ }
+ } else {
+ for (Object term : terms) {
+ filters.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), null));
+ }
+ }
+ filter = new AndFilter(filters);
+ // only cache if explicitly told to, since we cache inner filters
+ if (cache != null && cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ } else if ("and_nocache".equals(execution)) {
+ List<Filter> filters = Lists.newArrayList();
+ if (fieldMapper != null) {
+ for (Object term : terms) {
+ filters.add(fieldMapper.termFilter(term, parseContext));
+ }
+ } else {
+ for (Object term : terms) {
+ filters.add(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))));
+ }
+ }
+ filter = new AndFilter(filters);
+ // cache the whole filter by default, or if explicitly told to
+ if (cache == null || cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ } else if ("or".equals(execution)) {
+ List<Filter> filters = Lists.newArrayList();
+ if (fieldMapper != null) {
+ for (Object term : terms) {
+ filters.add(parseContext.cacheFilter(fieldMapper.termFilter(term, parseContext), null));
+ }
+ } else {
+ for (Object term : terms) {
+ filters.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), null));
+ }
+ }
+ filter = new OrFilter(filters);
+ // only cache if explicitly told to, since we cache inner filters
+ if (cache != null && cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ } else if ("or_nocache".equals(execution)) {
+ List<Filter> filters = Lists.newArrayList();
+ if (fieldMapper != null) {
+ for (Object term : terms) {
+ filters.add(fieldMapper.termFilter(term, parseContext));
+ }
+ } else {
+ for (Object term : terms) {
+ filters.add(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))));
+ }
+ }
+ filter = new OrFilter(filters);
+ // cache the whole filter by default, or if explicitly told to
+ if (cache == null || cache) {
+ filter = parseContext.cacheFilter(filter, cacheKey);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "terms filter execution value [" + execution + "] not supported");
+ }
+
+ filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);
+ if (filterName != null) {
+ parseContext.addNamedFilter(filterName, filter);
+ }
+ return filter;
+ } finally {
+ if (smartNameFieldMappers != null && smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java
new file mode 100644
index 0000000..1c23c8f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A filer for a field based on several terms matching on any of them.
+ */
+public class TermsLookupFilterBuilder extends BaseFilterBuilder {
+
+ private final String name;
+ private String lookupIndex;
+ private String lookupType;
+ private String lookupId;
+ private String lookupRouting;
+ private String lookupPath;
+ private Boolean lookupCache;
+
+ private Boolean cache;
+ private String cacheKey;
+ private String filterName;
+
+ public TermsLookupFilterBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public TermsLookupFilterBuilder filterName(String filterName) {
+ this.filterName = filterName;
+ return this;
+ }
+
+ /**
+ * Sets the index name to lookup the terms from.
+ */
+ public TermsLookupFilterBuilder lookupIndex(String lookupIndex) {
+ this.lookupIndex = lookupIndex;
+ return this;
+ }
+
+ /**
+ * Sets the index type to lookup the terms from.
+ */
+ public TermsLookupFilterBuilder lookupType(String lookupType) {
+ this.lookupType = lookupType;
+ return this;
+ }
+
+ /**
+ * Sets the doc id to lookup the terms from.
+ */
+ public TermsLookupFilterBuilder lookupId(String lookupId) {
+ this.lookupId = lookupId;
+ return this;
+ }
+
+ /**
+ * Sets the path within the document to lookup the terms from.
+ */
+ public TermsLookupFilterBuilder lookupPath(String lookupPath) {
+ this.lookupPath = lookupPath;
+ return this;
+ }
+
+ public TermsLookupFilterBuilder lookupRouting(String lookupRouting) {
+ this.lookupRouting = lookupRouting;
+ return this;
+ }
+
+ public TermsLookupFilterBuilder lookupCache(boolean lookupCache) {
+ this.lookupCache = lookupCache;
+ return this;
+ }
+
+ public TermsLookupFilterBuilder cache(boolean cache) {
+ this.cache = cache;
+ return this;
+ }
+
+ public TermsLookupFilterBuilder cacheKey(String cacheKey) {
+ this.cacheKey = cacheKey;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(TermsFilterParser.NAME);
+
+ builder.startObject(name);
+ if (lookupIndex != null) {
+ builder.field("index", lookupIndex);
+ }
+ builder.field("type", lookupType);
+ builder.field("id", lookupId);
+ if (lookupRouting != null) {
+ builder.field("routing", lookupRouting);
+ }
+ if (lookupCache != null) {
+ builder.field("cache", lookupCache);
+ }
+ builder.field("path", lookupPath);
+ builder.endObject();
+
+ if (filterName != null) {
+ builder.field("_name", filterName);
+ }
+ if (cache != null) {
+ builder.field("_cache", cache);
+ }
+ if (cacheKey != null) {
+ builder.field("_cache_key", cacheKey);
+ }
+
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java
new file mode 100644
index 0000000..7c69d4b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Collection;
+
+/**
+ *
+ */
+public class TermsQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<TermsQueryBuilder> {
+
+ private final String name;
+
+ private final Object[] values;
+
+ private String minimumShouldMatch;
+
+ private Boolean disableCoord;
+
+ private float boost = -1;
+
+ private String queryName;
+
+ /**
+ * A query for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsQueryBuilder(String name, String... values) {
+ this(name, (Object[]) values);
+ }
+
+ /**
+ * A query for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsQueryBuilder(String name, int... values) {
+ this.name = name;
+ this.values = new Integer[values.length];
+ for (int i = 0; i < values.length; i++) {
+ this.values[i] = values[i];
+ }
+ }
+
+ /**
+ * A query for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsQueryBuilder(String name, long... values) {
+ this.name = name;
+ this.values = new Long[values.length];
+ for (int i = 0; i < values.length; i++) {
+ this.values[i] = values[i];
+ }
+ }
+
+ /**
+ * A query for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsQueryBuilder(String name, float... values) {
+ this.name = name;
+ this.values = new Float[values.length];
+ for (int i = 0; i < values.length; i++) {
+ this.values[i] = values[i];
+ }
+ }
+
+ /**
+ * A query for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsQueryBuilder(String name, double... values) {
+ this.name = name;
+ this.values = new Double[values.length];
+ for (int i = 0; i < values.length; i++) {
+ this.values[i] = values[i];
+ }
+ }
+
+ /**
+ * A query for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsQueryBuilder(String name, Object... values) {
+ this.name = name;
+ this.values = values;
+ }
+
+ /**
+ * A query for a field based on several terms matching on any of them.
+ *
+ * @param name The field name
+ * @param values The terms
+ */
+ public TermsQueryBuilder(String name, Collection values) {
+ this(name, values.toArray());
+ }
+
+ /**
+ * Sets the minimum number of matches across the provided terms. Defaults to <tt>1</tt>.
+ */
+ public TermsQueryBuilder minimumMatch(int minimumMatch) {
+ this.minimumShouldMatch = Integer.toString(minimumMatch);
+ return this;
+ }
+
+ public TermsQueryBuilder minimumShouldMatch(String minimumShouldMatch) {
+ this.minimumShouldMatch = minimumShouldMatch;
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public TermsQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Disables <tt>Similarity#coord(int,int)</tt> in scoring. Defualts to <tt>false</tt>.
+ */
+ public TermsQueryBuilder disableCoord(boolean disableCoord) {
+ this.disableCoord = disableCoord;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public TermsQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(TermsQueryParser.NAME);
+ builder.startArray(name);
+ for (Object value : values) {
+ builder.value(value);
+ }
+ builder.endArray();
+
+ if (minimumShouldMatch != null) {
+ builder.field("minimum_should_match", minimumShouldMatch);
+ }
+ if (disableCoord != null) {
+ builder.field("disable_coord", disableCoord);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java b/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java
new file mode 100644
index 0000000..5159fd2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
+import static org.elasticsearch.common.lucene.search.Queries.optimizeQuery;
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ * <pre>
+ * "terms" : {
+ * "field_name" : [ "value1", "value2" ]
+ * "minimum_match" : 1
+ * }
+ * </pre>
+ */
+public class TermsQueryParser implements QueryParser {
+
+ public static final String NAME = "terms";
+
+ @Inject
+ public TermsQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, "in"}; // allow both "in" and "terms" (since its similar to the "terms" filter)
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ String fieldName = null;
+ boolean disableCoord = false;
+ float boost = 1.0f;
+ String minimumShouldMatch = null;
+ List<Object> values = newArrayList();
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ fieldName = currentFieldName;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Object value = parser.objectBytes();
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for terms query");
+ }
+ values.add(value);
+ }
+ } else if (token.isValue()) {
+ if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) {
+ disableCoord = parser.booleanValue();
+ } else if ("minimum_match".equals(currentFieldName) || "minimumMatch".equals(currentFieldName)) {
+ minimumShouldMatch = parser.textOrNull();
+ } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
+ minimumShouldMatch = parser.textOrNull();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[terms] query does not support [" + currentFieldName + "]");
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[terms] query does not support [" + currentFieldName + "]");
+ }
+ }
+
+ if (fieldName == null) {
+ throw new QueryParsingException(parseContext.index(), "No field specified for terms query");
+ }
+
+ FieldMapper mapper = null;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ String[] previousTypes = null;
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ mapper = smartNameFieldMappers.mapper();
+ if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
+ }
+ }
+
+ try {
+ BooleanQuery booleanQuery = new BooleanQuery(disableCoord);
+ for (Object value : values) {
+ if (mapper != null) {
+ booleanQuery.add(new BooleanClause(mapper.termQuery(value, parseContext), BooleanClause.Occur.SHOULD));
+ } else {
+ booleanQuery.add(new TermQuery(new Term(fieldName, BytesRefs.toString(value))), BooleanClause.Occur.SHOULD);
+ }
+ }
+ booleanQuery.setBoost(boost);
+ Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch);
+ Query query = wrapSmartNameQuery(optimizeQuery(fixNegativeQueryIfNeeded(booleanQuery)), smartNameFieldMappers, parseContext);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ } finally {
+ if (smartNameFieldMappers != null && smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryBuilder.java
new file mode 100644
index 0000000..41c49f2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryBuilder.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class TopChildrenQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<TopChildrenQueryBuilder> {
+
+ private final QueryBuilder queryBuilder;
+
+ private String childType;
+
+ private String score;
+
+ private float boost = 1.0f;
+
+ private int factor = -1;
+
+ private int incrementalFactor = -1;
+
+ private String queryName;
+
+ public TopChildrenQueryBuilder(String type, QueryBuilder queryBuilder) {
+ this.childType = type;
+ this.queryBuilder = queryBuilder;
+ }
+
+ /**
+ * How to compute the score. Possible values are: <tt>max</tt>, <tt>sum</tt>, or <tt>avg</tt>. Defaults
+ * to <tt>max</tt>.
+ */
+ public TopChildrenQueryBuilder score(String score) {
+ this.score = score;
+ return this;
+ }
+
+ /**
+ * Controls the multiplication factor of the initial hits required from the child query over the main query request.
+ * Defaults to 5.
+ */
+ public TopChildrenQueryBuilder factor(int factor) {
+ this.factor = factor;
+ return this;
+ }
+
+ /**
+ * Sets the incremental factor when the query needs to be re-run in order to fetch more results. Defaults to 2.
+ */
+ public TopChildrenQueryBuilder incrementalFactor(int incrementalFactor) {
+ this.incrementalFactor = incrementalFactor;
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public TopChildrenQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public TopChildrenQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(TopChildrenQueryParser.NAME);
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ builder.field("type", childType);
+ if (score != null) {
+ builder.field("score", score);
+ }
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (factor != -1) {
+ builder.field("factor", factor);
+ }
+ if (incrementalFactor != -1) {
+ builder.field("incremental_factor", incrementalFactor);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java
new file mode 100644
index 0000000..1eaef1e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
+import org.elasticsearch.index.search.child.ScoreType;
+import org.elasticsearch.index.search.child.TopChildrenQuery;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class TopChildrenQueryParser implements QueryParser {
+
+ public static final String NAME = "top_children";
+
+ @Inject
+ public TopChildrenQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME, Strings.toCamelCase(NAME)};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query innerQuery = null;
+ boolean queryFound = false;
+ float boost = 1.0f;
+ String childType = null;
+ ScoreType scoreType = ScoreType.MAX;
+ int factor = 5;
+ int incrementalFactor = 2;
+ String queryName = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("query".equals(currentFieldName)) {
+ queryFound = true;
+ // TODO we need to set the type, but, `query` can come before `type`... (see HasChildFilterParser)
+ // since we switch types, make sure we change the context
+ String[] origTypes = QueryParseContext.setTypesWithPrevious(childType == null ? null : new String[]{childType});
+ try {
+ innerQuery = parseContext.parseInnerQuery();
+ } finally {
+ QueryParseContext.setTypes(origTypes);
+ }
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[top_children] query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("type".equals(currentFieldName)) {
+ childType = parser.text();
+ } else if ("_scope".equals(currentFieldName)) {
+ throw new QueryParsingException(parseContext.index(), "the [_scope] support in [top_children] query has been removed, use a filter as a facet_filter in the relevant global facet");
+ } else if ("score".equals(currentFieldName)) {
+ scoreType = ScoreType.fromString(parser.text());
+ } else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
+ scoreType = ScoreType.fromString(parser.text());
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("factor".equals(currentFieldName)) {
+ factor = parser.intValue();
+ } else if ("incremental_factor".equals(currentFieldName) || "incrementalFactor".equals(currentFieldName)) {
+ incrementalFactor = parser.intValue();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[top_children] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ if (!queryFound) {
+ throw new QueryParsingException(parseContext.index(), "[top_children] requires 'query' field");
+ }
+ if (childType == null) {
+ throw new QueryParsingException(parseContext.index(), "[top_children] requires 'type' field");
+ }
+
+ if (innerQuery == null) {
+ return null;
+ }
+
+ if ("delete_by_query".equals(SearchContext.current().source())) {
+ throw new QueryParsingException(parseContext.index(), "[top_children] unsupported in delete_by_query api");
+ }
+
+ DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
+ if (childDocMapper == null) {
+ throw new QueryParsingException(parseContext.index(), "No mapping for for type [" + childType + "]");
+ }
+ if (!childDocMapper.parentFieldMapper().active()) {
+ throw new QueryParsingException(parseContext.index(), "Type [" + childType + "] does not have parent mapping");
+ }
+ String parentType = childDocMapper.parentFieldMapper().type();
+
+ innerQuery.setBoost(boost);
+ // wrap the query with type query
+ innerQuery = new XFilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null));
+ TopChildrenQuery query = new TopChildrenQuery(innerQuery, childType, parentType, scoreType, factor, incrementalFactor, parseContext.cacheRecycler());
+ if (queryName != null) {
+ parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query));
+ }
+ return query;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/TypeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TypeFilterBuilder.java
new file mode 100644
index 0000000..fe939d0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TypeFilterBuilder.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+public class TypeFilterBuilder extends BaseFilterBuilder {
+
+ private final String type;
+
+ public TypeFilterBuilder(String type) {
+ this.type = type;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(TypeFilterParser.NAME);
+ builder.field("value", type);
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java
new file mode 100644
index 0000000..8e48867
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+
+import java.io.IOException;
+
+public class TypeFilterParser implements FilterParser {
+
+ public static final String NAME = "type";
+
+ @Inject
+ public TypeFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[type] filter should have a value field, and the type name");
+ }
+ String fieldName = parser.currentName();
+ if (!fieldName.equals("value")) {
+ throw new QueryParsingException(parseContext.index(), "[type] filter should have a value field, and the type name");
+ }
+ token = parser.nextToken();
+ if (token != XContentParser.Token.VALUE_STRING) {
+ throw new QueryParsingException(parseContext.index(), "[type] filter should have a value field, and the type name");
+ }
+ BytesRef type = parser.bytes();
+ // move to the next token
+ parser.nextToken();
+
+ Filter filter;
+ //LUCENE 4 UPGRADE document mapper should use bytesref aswell?
+ DocumentMapper documentMapper = parseContext.mapperService().documentMapper(type.utf8ToString());
+ if (documentMapper == null) {
+ filter = new TermFilter(new Term(TypeFieldMapper.NAME, type));
+ } else {
+ filter = documentMapper.typeFilter();
+ }
+ return parseContext.cacheFilter(filter, null);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java
new file mode 100644
index 0000000..a2a2d1a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * Implements the wildcard search query. Supported wildcards are <tt>*</tt>, which
+ * matches any character sequence (including the empty one), and <tt>?</tt>,
+ * which matches any single character. Note this query can be slow, as it
+ * needs to iterate over many terms. In order to prevent extremely slow WildcardQueries,
+ * a Wildcard term should not start with one of the wildcards <tt>*</tt> or
+ * <tt>?</tt>.
+ *
+ *
+ */
+public class WildcardQueryBuilder extends BaseQueryBuilder implements MultiTermQueryBuilder, BoostableQueryBuilder<WildcardQueryBuilder> {
+
+ private final String name;
+
+ private final String wildcard;
+
+ private float boost = -1;
+
+ private String rewrite;
+
+ private String queryName;
+
+ /**
+ * Implements the wildcard search query. Supported wildcards are <tt>*</tt>, which
+ * matches any character sequence (including the empty one), and <tt>?</tt>,
+ * which matches any single character. Note this query can be slow, as it
+ * needs to iterate over many terms. In order to prevent extremely slow WildcardQueries,
+ * a Wildcard term should not start with one of the wildcards <tt>*</tt> or
+ * <tt>?</tt>.
+ *
+ * @param name The field name
+ * @param wildcard The wildcard query string
+ */
+ public WildcardQueryBuilder(String name, String wildcard) {
+ this.name = name;
+ this.wildcard = wildcard;
+ }
+
+ public WildcardQueryBuilder rewrite(String rewrite) {
+ this.rewrite = rewrite;
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in addition to the normal
+ * weightings) have their score multiplied by the boost provided.
+ */
+ public WildcardQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ /**
+ * Sets the query name for the filter that can be used when searching for matched_filters per hit.
+ */
+ public WildcardQueryBuilder queryName(String queryName) {
+ this.queryName = queryName;
+ return this;
+ }
+
+ @Override
+ public void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(WildcardQueryParser.NAME);
+ if (boost == -1 && rewrite == null && queryName != null) {
+ builder.field(name, wildcard);
+ } else {
+ builder.startObject(name);
+ builder.field("wildcard", wildcard);
+ if (boost != -1) {
+ builder.field("boost", boost);
+ }
+ if (rewrite != null) {
+ builder.field("rewrite", rewrite);
+ }
+ if (queryName != null) {
+ builder.field("_name", queryName);
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java b/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java
new file mode 100644
index 0000000..3f4842f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.support.QueryParsers;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+/**
+ *
+ */
+public class WildcardQueryParser implements QueryParser {
+
+ public static final String NAME = "wildcard";
+
+ @Inject
+ public WildcardQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[wildcard] query malformed, no field");
+ }
+ String fieldName = parser.currentName();
+ String rewriteMethod = null;
+
+ String value = null;
+ float boost = 1.0f;
+ String queryName = null;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("wildcard".equals(currentFieldName)) {
+ value = parser.text();
+ } else if ("value".equals(currentFieldName)) {
+ value = parser.text();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("rewrite".equals(currentFieldName)) {
+ rewriteMethod = parser.textOrNull();
+ } else if ("_name".equals(currentFieldName)) {
+ queryName = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), "[wildcard] query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+ parser.nextToken();
+ } else {
+ value = parser.text();
+ parser.nextToken();
+ }
+
+ if (value == null) {
+ throw new QueryParsingException(parseContext.index(), "No value specified for prefix query");
+ }
+
+ BytesRef valueBytes;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ fieldName = smartNameFieldMappers.mapper().names().indexName();
+ valueBytes = smartNameFieldMappers.mapper().indexedValueForSearch(value);
+ } else {
+ valueBytes = new BytesRef(value);
+ }
+
+ WildcardQuery wildcardQuery = new WildcardQuery(new Term(fieldName, valueBytes));
+ QueryParsers.setRewriteMethod(wildcardQuery, rewriteMethod);
+ wildcardQuery.setRewriteMethod(QueryParsers.parseRewriteMethod(rewriteMethod));
+ wildcardQuery.setBoost(boost);
+ Query query = wrapSmartNameQuery(wildcardQuery, smartNameFieldMappers, parseContext);
+ if (queryName != null) {
+ parseContext.addNamedQuery(queryName, query);
+ }
+ return query;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/WrapperFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/WrapperFilterBuilder.java
new file mode 100644
index 0000000..8a63c0a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/WrapperFilterBuilder.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+/**
+ * Created by IntelliJ IDEA.
+ * User: cedric
+ * Date: 12/07/11
+ * Time: 11:30
+ */
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A Filter builder which allows building a filter thanks to a JSON string or binary data. This is useful when you want
+ * to use the Java Builder API but still have JSON filter strings at hand that you want to combine with other
+ * query builders.
+ */
+public class WrapperFilterBuilder extends BaseFilterBuilder {
+
+ private final byte[] source;
+ private final int offset;
+ private final int length;
+
+ public WrapperFilterBuilder(String source) {
+ this.source = source.getBytes(Charsets.UTF_8);
+ this.offset = 0;
+ this.length = this.source.length;
+ }
+
+ public WrapperFilterBuilder(byte[] source, int offset, int length) {
+ this.source = source;
+ this.offset = offset;
+ this.length = length;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(WrapperFilterParser.NAME);
+ builder.field("filter", source, offset, length);
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java b/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java
new file mode 100644
index 0000000..af6c740
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * Filter parser for embedded filter.
+ */
+public class WrapperFilterParser implements FilterParser {
+
+ public static final String NAME = "wrapper";
+
+ @Inject
+ public WrapperFilterParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[wrapper] filter malformed");
+ }
+ String fieldName = parser.currentName();
+ if (!fieldName.equals("filter")) {
+ throw new QueryParsingException(parseContext.index(), "[wrapper] filter malformed");
+ }
+ parser.nextToken();
+
+ byte[] querySource = parser.binaryValue();
+ XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource);
+ try {
+ final QueryParseContext context = new QueryParseContext(parseContext.index(), parseContext.indexQueryParser);
+ context.reset(qSourceParser);
+ Filter result = context.parseInnerFilter();
+ parser.nextToken();
+ return result;
+ } finally {
+ qSourceParser.close();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java
new file mode 100644
index 0000000..e3688a5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+/**
+ * Created by IntelliJ IDEA.
+ * User: cedric
+ * Date: 12/07/11
+ * Time: 11:30
+ */
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A Query builder which allows building a query thanks to a JSON string or binary data. This is useful when you want
+ * to use the Java Builder API but still have JSON query strings at hand that you want to combine with other
+ * query builders.
+ * <p/>
+ * Example usage in a boolean query :
+ * <pre>
+ * {@code
+ * BoolQueryBuilder bool = new BoolQueryBuilder();
+ * bool.must(new WrapperQueryBuilder("{\"term\": {\"field\":\"value\"}}");
+ * bool.must(new TermQueryBuilder("field2","value2");
+ * }
+ * </pre>
+ */
+public class WrapperQueryBuilder extends BaseQueryBuilder {
+
+ private final byte[] source;
+ private final int offset;
+ private final int length;
+
+ /**
+ * Builds a JSONQueryBuilder using the provided JSON query string.
+ */
+ public WrapperQueryBuilder(String source) {
+ this.source = source.getBytes(Charsets.UTF_8);
+ this.offset = 0;
+ this.length = this.source.length;
+ }
+
+ public WrapperQueryBuilder(byte[] source, int offset, int length) {
+ this.source = source;
+ this.offset = offset;
+ this.length = length;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(WrapperQueryParser.NAME);
+ builder.field("query", source, offset, length);
+ builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java b/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java
new file mode 100644
index 0000000..0affbce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * Query parser for JSON Queries.
+ */
+public class WrapperQueryParser implements QueryParser {
+
+ public static final String NAME = "wrapper";
+
+ @Inject
+ public WrapperQueryParser() {
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{NAME};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ XContentParser.Token token = parser.nextToken();
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new QueryParsingException(parseContext.index(), "[wrapper] query malformed");
+ }
+ String fieldName = parser.currentName();
+ if (!fieldName.equals("query")) {
+ throw new QueryParsingException(parseContext.index(), "[wrapper] query malformed");
+ }
+ parser.nextToken();
+
+ byte[] querySource = parser.binaryValue();
+ XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource);
+ try {
+ final QueryParseContext context = new QueryParseContext(parseContext.index(), parseContext.indexQueryParser);
+ context.reset(qSourceParser);
+ Query result = context.parseInnerQuery();
+ parser.nextToken();
+ return result;
+ } finally {
+ qSourceParser.close();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunction.java b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunction.java
new file mode 100644
index 0000000..44e3763
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunction.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser;
+
+/**
+ * Implement this interface to provide a decay function that is executed on a
+ * distance. For example, this could be an exponential drop of, a triangle
+ * function or something of the kind. This is used, for example, by
+ * {@link GaussDecayFunctionParser}.
+ *
+ * */
+
+public interface DecayFunction {
+
+ public double evaluate(double value, double scale);
+
+ public Explanation explainFunction(String valueString, double value, double scale);
+
+ /**
+ * The final scale parameter is computed from the scale parameter given by
+ * the user and a value. This value is the value that the decay function
+ * should compute if document distance and user defined scale equal. The
+ * scale parameter for the function must be adjusted accordingly in this
+ * function
+ *
+ * @param scale
+ * the raw scale value given by the user
+ * @param decay
+ * the value which decay function should take once the distance
+ * reaches this scale
+ * */
+ public double processScale(double scale, double decay);
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java
new file mode 100644
index 0000000..3b3f1c4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+public abstract class DecayFunctionBuilder implements ScoreFunctionBuilder {
+
+ protected static final String ORIGIN = "origin";
+ protected static final String SCALE = "scale";
+ protected static final String DECAY = "decay";
+ protected static final String OFFSET = "offset";
+
+ private String fieldName;
+ private Object origin;
+ private Object scale;
+ private double decay = -1;
+ private Object offset;
+
+ public DecayFunctionBuilder(String fieldName, Object origin, Object scale) {
+ this.fieldName = fieldName;
+ this.origin = origin;
+ this.scale = scale;
+ }
+
+ public DecayFunctionBuilder setDecay(double decay) {
+ if (decay <= 0 || decay >= 1.0) {
+ throw new ElasticsearchIllegalStateException("scale weight parameter must be in range 0..1!");
+ }
+ this.decay = decay;
+ return this;
+ }
+
+ public DecayFunctionBuilder setOffset(Object offset) {
+ this.offset = offset;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.startObject(fieldName);
+ if (origin != null) {
+ builder.field(ORIGIN, origin);
+ }
+ builder.field(SCALE, scale);
+ if (decay > 0) {
+ builder.field(DECAY, decay);
+ }
+ if (offset != null) {
+ builder.field(OFFSET, offset);
+ }
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java
new file mode 100644
index 0000000..6321fe6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java
@@ -0,0 +1,418 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.lucene.search.function.ScoreFunction;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ * This class provides the basic functionality needed for adding a decay
+ * function.
+ *
+ * This parser parses this kind of input
+ *
+ * <pre>
+ * {@code}
+ * {
+ * "fieldname1" : {
+ * "origin" = "someValue",
+ * "scale" = "someValue"
+ * }
+ *
+ * }
+ * </pre>
+ *
+ * "origin" here refers to the reference point and "scale" to the level of
+ * uncertainty you have in your origin.
+ * <p>
+ *
+ * For example, you might want to retrieve an event that took place around the
+ * 20 May 2010 somewhere near Berlin. You are mainly interested in events that
+ * are close to the 20 May 2010 but you are unsure about your guess, maybe it
+ * was a week before or after that. Your "origin" for the date field would be
+ * "20 May 2010" and your "scale" would be "7d".
+ *
+ * This class parses the input and creates a scoring function from the
+ * parameters origin and scale.
+ * <p>
+ * To write a new scoring function, create a new class that inherits from this
+ * one and implement the getDistanceFuntion(). Furthermore, to create a builder,
+ * override the getName() in {@link DecayFunctionBuilder}.
+ * <p>
+ * See {@link GaussDecayFunctionBuilder} and {@link GaussDecayFunctionParser}
+ * for an example. The parser furthermore needs to be registered in the
+ * {@link org.elasticsearch.index.query.functionscore.FunctionScoreModule
+ * FunctionScoreModule}.
+ *
+ * **/
+
+public abstract class DecayFunctionParser implements ScoreFunctionParser {
+
+ /**
+ * Override this function if you want to produce your own scorer.
+ * */
+ public abstract DecayFunction getDecayFunction();
+
+ /**
+ * Parses bodies of the kind
+ *
+ * <pre>
+ * {@code}
+ * {
+ * "fieldname1" : {
+ * "origin" = "someValue",
+ * "scale" = "someValue"
+ * }
+ *
+ * }
+ * </pre>
+ *
+ * */
+ @Override
+ public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException {
+ String currentFieldName = null;
+ XContentParser.Token token;
+ ScoreFunction scoreFunction = null;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ token = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ // parse per field the origin and scale value
+ scoreFunction = parseVariable(currentFieldName, parser, parseContext);
+ } else {
+ throw new ElasticsearchParseException("Malformed score function score parameters.");
+ }
+ } else {
+ throw new ElasticsearchParseException("Malformed score function score parameters.");
+ }
+ parser.nextToken();
+ return scoreFunction;
+ }
+
+ // parses origin and scale parameter for field "fieldName"
+ private ScoreFunction parseVariable(String fieldName, XContentParser parser, QueryParseContext parseContext) throws IOException {
+
+ // now, the field must exist, else we cannot read the value for
+ // the doc later
+ MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartMappers == null || !smartMappers.hasMapper()) {
+ throw new QueryParsingException(parseContext.index(), "Unknown field [" + fieldName + "]");
+ }
+
+ FieldMapper<?> mapper = smartMappers.fieldMappers().mapper();
+ // dates and time need special handling
+ if (mapper instanceof DateFieldMapper) {
+ return parseDateVariable(fieldName, parser, parseContext, (DateFieldMapper) mapper);
+ } else if (mapper instanceof GeoPointFieldMapper) {
+ return parseGeoVariable(fieldName, parser, parseContext, (GeoPointFieldMapper) mapper);
+ } else if (mapper instanceof NumberFieldMapper<?>) {
+ return parseNumberVariable(fieldName, parser, parseContext, (NumberFieldMapper<?>) mapper);
+ } else {
+ throw new QueryParsingException(parseContext.index(), "Field " + fieldName + " is of type " + mapper.fieldType()
+ + ", but only numeric types are supported.");
+ }
+ }
+
+ private ScoreFunction parseNumberVariable(String fieldName, XContentParser parser, QueryParseContext parseContext,
+ NumberFieldMapper<?> mapper) throws IOException {
+ XContentParser.Token token;
+ String parameterName = null;
+ double scale = 0;
+ double origin = 0;
+ double decay = 0.5;
+ double offset = 0.0d;
+ boolean scaleFound = false;
+ boolean refFound = false;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ parameterName = parser.currentName();
+ } else if (parameterName.equals(DecayFunctionBuilder.SCALE)) {
+ scale = parser.doubleValue();
+ scaleFound = true;
+ } else if (parameterName.equals(DecayFunctionBuilder.DECAY)) {
+ decay = parser.doubleValue();
+ } else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) {
+ origin = parser.doubleValue();
+ refFound = true;
+ } else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) {
+ offset = parser.doubleValue();
+ } else {
+ throw new ElasticsearchParseException("Parameter " + parameterName + " not supported!");
+ }
+ }
+ if (!scaleFound || !refFound) {
+ throw new ElasticsearchParseException("Both " + DecayFunctionBuilder.SCALE + "and " + DecayFunctionBuilder.ORIGIN
+ + " must be set for numeric fields.");
+ }
+ IndexNumericFieldData<?> numericFieldData = parseContext.fieldData().getForField(mapper);
+ return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData);
+ }
+
+ private ScoreFunction parseGeoVariable(String fieldName, XContentParser parser, QueryParseContext parseContext,
+ GeoPointFieldMapper mapper) throws IOException {
+ XContentParser.Token token;
+ String parameterName = null;
+ GeoPoint origin = new GeoPoint();
+ String scaleString = null;
+ String offsetString = "0km";
+ double decay = 0.5;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ parameterName = parser.currentName();
+ } else if (parameterName.equals(DecayFunctionBuilder.SCALE)) {
+ scaleString = parser.text();
+ } else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) {
+ origin = GeoPoint.parse(parser);
+ } else if (parameterName.equals(DecayFunctionBuilder.DECAY)) {
+ decay = parser.doubleValue();
+ } else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) {
+ offsetString = parser.text();
+ } else {
+ throw new ElasticsearchParseException("Parameter " + parameterName + " not supported!");
+ }
+ }
+ if (origin == null || scaleString == null) {
+ throw new ElasticsearchParseException(DecayFunctionBuilder.ORIGIN + " and " + DecayFunctionBuilder.SCALE + " must be set for geo fields.");
+ }
+ double scale = DistanceUnit.DEFAULT.parse(scaleString, DistanceUnit.DEFAULT);
+ double offset = DistanceUnit.DEFAULT.parse(offsetString, DistanceUnit.DEFAULT);
+ IndexGeoPointFieldData<?> indexFieldData = parseContext.fieldData().getForField(mapper);
+ return new GeoFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), indexFieldData);
+
+ }
+
+ private ScoreFunction parseDateVariable(String fieldName, XContentParser parser, QueryParseContext parseContext,
+ DateFieldMapper dateFieldMapper) throws IOException {
+ XContentParser.Token token;
+ String parameterName = null;
+ String scaleString = null;
+ String originString = null;
+ String offsetString = "0d";
+ double decay = 0.5;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ parameterName = parser.currentName();
+ } else if (parameterName.equals(DecayFunctionBuilder.SCALE)) {
+ scaleString = parser.text();
+ } else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) {
+ originString = parser.text();
+ } else if (parameterName.equals(DecayFunctionBuilder.DECAY)) {
+ decay = parser.doubleValue();
+ } else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) {
+ offsetString = parser.text();
+ } else {
+ throw new ElasticsearchParseException("Parameter " + parameterName + " not supported!");
+ }
+ }
+ long origin = SearchContext.current().nowInMillis();
+ if (originString != null) {
+ origin = dateFieldMapper.parseToMilliseconds(originString, parseContext);
+ }
+
+ if (scaleString == null) {
+ throw new ElasticsearchParseException(DecayFunctionBuilder.SCALE + " must be set for date fields.");
+ }
+ TimeValue val = TimeValue.parseTimeValue(scaleString, TimeValue.timeValueHours(24));
+ double scale = val.getMillis();
+ val = TimeValue.parseTimeValue(offsetString, TimeValue.timeValueHours(24));
+ double offset = val.getMillis();
+ IndexNumericFieldData<?> numericFieldData = parseContext.fieldData().getForField(dateFieldMapper);
+ return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData);
+ }
+
+ static class GeoFieldDataScoreFunction extends AbstractDistanceScoreFunction {
+
+ private final GeoPoint origin;
+ private final IndexGeoPointFieldData<?> fieldData;
+ private GeoPointValues geoPointValues = null;
+
+ private static final GeoDistance distFunction = GeoDistance.DEFAULT;
+
+ public GeoFieldDataScoreFunction(GeoPoint origin, double scale, double decay, double offset, DecayFunction func,
+ IndexGeoPointFieldData<?> fieldData) {
+ super(scale, decay, offset, func);
+ this.origin = origin;
+ this.fieldData = fieldData;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) {
+ geoPointValues = fieldData.load(context).getGeoPointValues();
+ }
+
+ private final GeoPoint getValue(int doc, GeoPoint missing) {
+ final int num = geoPointValues.setDocument(doc);
+ for (int i = 0; i < num; i++) {
+ return geoPointValues.nextValue();
+ }
+ return missing;
+ }
+
+ @Override
+ protected double distance(int docId) {
+ GeoPoint other = getValue(docId, origin);
+ double distance = Math.abs(distFunction.calculate(origin.lat(), origin.lon(), other.lat(), other.lon(),
+ DistanceUnit.METERS)) - offset;
+ return Math.max(0.0d, distance);
+ }
+
+ @Override
+ protected String getDistanceString(int docId) {
+ final GeoPoint other = getValue(docId, origin);
+ return "arcDistance(" + other + "(=doc value), " + origin + "(=origin)) - " + offset
+ + "(=offset) < 0.0 ? 0.0: arcDistance(" + other + "(=doc value), " + origin + "(=origin)) - " + offset
+ + "(=offset)";
+ }
+
+ @Override
+ protected String getFieldName() {
+ return fieldData.getFieldNames().fullName();
+ }
+ }
+
+ static class NumericFieldDataScoreFunction extends AbstractDistanceScoreFunction {
+
+ private final IndexNumericFieldData<?> fieldData;
+ private final double origin;
+ private DoubleValues doubleValues;
+
+ public NumericFieldDataScoreFunction(double origin, double scale, double decay, double offset, DecayFunction func,
+ IndexNumericFieldData<?> fieldData) {
+ super(scale, decay, offset, func);
+ this.fieldData = fieldData;
+ this.origin = origin;
+ }
+
+ public void setNextReader(AtomicReaderContext context) {
+ this.doubleValues = this.fieldData.load(context).getDoubleValues();
+ }
+
+ private final double getValue(int doc, double missing) {
+ final int num = doubleValues.setDocument(doc);
+ for (int i = 0; i < num; i++) {
+ return doubleValues.nextValue();
+ }
+ return missing;
+ }
+
+ @Override
+ protected double distance(int docId) {
+ double distance = Math.abs(getValue(docId, origin) - origin) - offset;
+ return Math.max(0.0d, distance);
+ }
+
+ @Override
+ protected String getDistanceString(int docId) {
+ return "Math.abs(" + getValue(docId, origin) + "(=doc value) - " + origin + "(=origin)) - "
+ + offset + "(=offset) < 0.0 ? 0.0: Math.abs(" + getValue(docId, origin) + "(=doc value) - "
+ + origin + ") - " + offset + "(=offset)";
+ }
+
+ @Override
+ protected String getFieldName() {
+ return fieldData.getFieldNames().fullName();
+ }
+ }
+
+ /**
+ * This is the base class for scoring a single field.
+ *
+ * */
+ public static abstract class AbstractDistanceScoreFunction extends ScoreFunction {
+
+ private final double scale;
+ protected final double offset;
+ private final DecayFunction func;
+
+ public AbstractDistanceScoreFunction(double userSuppiedScale, double decay, double offset, DecayFunction func) {
+ super(CombineFunction.MULT);
+ if (userSuppiedScale <= 0.0) {
+ throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + " : scale must be > 0.0.");
+ }
+ if (decay <= 0.0 || decay >= 1.0) {
+ throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME
+ + " : decay must be in the range [0..1].");
+ }
+ this.scale = func.processScale(userSuppiedScale, decay);
+ this.func = func;
+ if (offset < 0.0d) {
+ throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + " : offset must be > 0.0");
+ }
+ this.offset = offset;
+ }
+
+ @Override
+ public double score(int docId, float subQueryScore) {
+ double value = distance(docId);
+ return func.evaluate(value, scale);
+ }
+
+ /**
+ * This function computes the distance from a defined origin. Since
+ * the value of the document is read from the index, it cannot be
+ * guaranteed that the value actually exists. If it does not, we assume
+ * the user handles this case in the query and return 0.
+ * */
+ protected abstract double distance(int docId);
+
+ protected abstract String getDistanceString(int docId);
+
+ protected abstract String getFieldName();
+
+ @Override
+ public Explanation explainScore(int docId, Explanation subQueryExpl) {
+ ComplexExplanation ce = new ComplexExplanation();
+ ce.setValue(CombineFunction.toFloat(score(docId, subQueryExpl.getValue())));
+ ce.setMatch(true);
+ ce.setDescription("Function for field " + getFieldName() + ":");
+ ce.addDetail(func.explainFunction(getDistanceString(docId), distance(docId), scale));
+ return ce;
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreModule.java b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreModule.java
new file mode 100644
index 0000000..ff98eb8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreModule.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query.functionscore;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser;
+import org.elasticsearch.index.query.functionscore.factor.FactorParser;
+import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser;
+import org.elasticsearch.index.query.functionscore.lin.LinearDecayFunctionParser;
+import org.elasticsearch.index.query.functionscore.random.RandomScoreFunctionParser;
+import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionParser;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class FunctionScoreModule extends AbstractModule {
+
+ private List<Class<? extends ScoreFunctionParser>> parsers = Lists.newArrayList();
+
+ public FunctionScoreModule() {
+ registerParser(FactorParser.class);
+ registerParser(ScriptScoreFunctionParser.class);
+ registerParser(GaussDecayFunctionParser.class);
+ registerParser(LinearDecayFunctionParser.class);
+ registerParser(ExponentialDecayFunctionParser.class);
+ registerParser(RandomScoreFunctionParser.class);
+ }
+
+ public void registerParser(Class<? extends ScoreFunctionParser> parser) {
+ parsers.add(parser);
+ }
+
+ @Override
+ protected void configure() {
+ Multibinder<ScoreFunctionParser> parserMapBinder = Multibinder.newSetBinder(binder(), ScoreFunctionParser.class);
+ for (Class<? extends ScoreFunctionParser> clazz : parsers) {
+ parserMapBinder.addBinding().to(clazz);
+ }
+ bind(ScoreFunctionParserMapper.class);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java
new file mode 100644
index 0000000..60a1dc1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore;
+
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.BaseQueryBuilder;
+import org.elasticsearch.index.query.BoostableQueryBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A query that uses a filters with a script associated with them to compute the
+ * score.
+ */
+public class FunctionScoreQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder<FunctionScoreQueryBuilder> {
+
+ private final QueryBuilder queryBuilder;
+
+ private final FilterBuilder filterBuilder;
+
+ private Float boost;
+
+ private Float maxBoost;
+
+ private String scoreMode;
+
+ private String boostMode;
+
+ private ArrayList<FilterBuilder> filters = new ArrayList<FilterBuilder>();
+ private ArrayList<ScoreFunctionBuilder> scoreFunctions = new ArrayList<ScoreFunctionBuilder>();
+
+ public FunctionScoreQueryBuilder(QueryBuilder queryBuilder) {
+ this.queryBuilder = queryBuilder;
+ this.filterBuilder = null;
+ }
+
+ public FunctionScoreQueryBuilder(FilterBuilder filterBuilder) {
+ this.filterBuilder = filterBuilder;
+ this.queryBuilder = null;
+ }
+
+ public FunctionScoreQueryBuilder() {
+ this.filterBuilder = null;
+ this.queryBuilder = null;
+ }
+
+ public FunctionScoreQueryBuilder(ScoreFunctionBuilder scoreFunctionBuilder) {
+ queryBuilder = null;
+ filterBuilder = null;
+ this.filters.add(null);
+ this.scoreFunctions.add(scoreFunctionBuilder);
+ }
+
+ public FunctionScoreQueryBuilder add(FilterBuilder filter, ScoreFunctionBuilder scoreFunctionBuilder) {
+ this.filters.add(filter);
+ this.scoreFunctions.add(scoreFunctionBuilder);
+ return this;
+ }
+
+ public FunctionScoreQueryBuilder add(ScoreFunctionBuilder scoreFunctionBuilder) {
+ this.filters.add(null);
+ this.scoreFunctions.add(scoreFunctionBuilder);
+ return this;
+ }
+
+ public FunctionScoreQueryBuilder scoreMode(String scoreMode) {
+ this.scoreMode = scoreMode;
+ return this;
+ }
+
+ public FunctionScoreQueryBuilder boostMode(String boostMode) {
+ this.boostMode = boostMode;
+ return this;
+ }
+
+ public FunctionScoreQueryBuilder boostMode(CombineFunction combineFunction) {
+ this.boostMode = combineFunction.getName();
+ return this;
+ }
+
+ public FunctionScoreQueryBuilder maxBoost(float maxBoost) {
+ this.maxBoost = maxBoost;
+ return this;
+ }
+
+ /**
+ * Sets the boost for this query. Documents matching this query will (in
+ * addition to the normal weightings) have their score multiplied by the
+ * boost provided.
+ */
+ public FunctionScoreQueryBuilder boost(float boost) {
+ this.boost = boost;
+ return this;
+ }
+
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(FunctionScoreQueryParser.NAME);
+ if (queryBuilder != null) {
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ } else if (filterBuilder != null) {
+ builder.field("filter");
+ filterBuilder.toXContent(builder, params);
+ }
+ // If there is only one function without a filter, we later want to
+ // create a FunctionScoreQuery.
+ // For this, we only build the scoreFunction.Tthis will be translated to
+ // FunctionScoreQuery in the parser.
+ if (filters.size() == 1 && filters.get(0) == null) {
+ scoreFunctions.get(0).toXContent(builder, params);
+ } else { // in all other cases we build the format needed for a
+ // FiltersFunctionScoreQuery
+ builder.startArray("functions");
+ for (int i = 0; i < filters.size(); i++) {
+ builder.startObject();
+ if (filters.get(i) != null) {
+ builder.field("filter");
+ filters.get(i).toXContent(builder, params);
+ }
+ scoreFunctions.get(i).toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+ if (scoreMode != null) {
+ builder.field("score_mode", scoreMode);
+ }
+ if (boostMode != null) {
+ builder.field("boost_mode", boostMode);
+ }
+ if (maxBoost != null) {
+ builder.field("max_boost", maxBoost);
+ }
+ if (boost != null) {
+ builder.field("boost", boost);
+ }
+
+ builder.endObject();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java
new file mode 100644
index 0000000..fa27d2c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
+import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
+import org.elasticsearch.common.lucene.search.function.ScoreFunction;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParser;
+import org.elasticsearch.index.query.QueryParsingException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ *
+ */
+public class FunctionScoreQueryParser implements QueryParser {
+
+ public static final String NAME = "function_score";
+ ScoreFunctionParserMapper funtionParserMapper;
+
+ @Inject
+ public FunctionScoreQueryParser(ScoreFunctionParserMapper funtionParserMapper) {
+ this.funtionParserMapper = funtionParserMapper;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] { NAME, Strings.toCamelCase(NAME) };
+ }
+
+ private static final ImmutableMap<String, CombineFunction> combineFunctionsMap;
+
+ static {
+ CombineFunction[] values = CombineFunction.values();
+ Builder<String, CombineFunction> combineFunctionMapBuilder = ImmutableMap.<String, CombineFunction>builder();
+ for (CombineFunction combineFunction : values) {
+ combineFunctionMapBuilder.put(combineFunction.getName(), combineFunction);
+ }
+ combineFunctionsMap = combineFunctionMapBuilder.build();
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ XContentParser parser = parseContext.parser();
+
+ Query query = null;
+ float boost = 1.0f;
+
+ FiltersFunctionScoreQuery.ScoreMode scoreMode = FiltersFunctionScoreQuery.ScoreMode.Multiply;
+ ArrayList<FiltersFunctionScoreQuery.FilterFunction> filterFunctions = new ArrayList<FiltersFunctionScoreQuery.FilterFunction>();
+ float maxBoost = Float.MAX_VALUE;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ CombineFunction combineFunction = CombineFunction.MULT;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if ("query".equals(currentFieldName)) {
+ query = parseContext.parseInnerQuery();
+ } else if ("filter".equals(currentFieldName)) {
+ query = new XConstantScoreQuery(parseContext.parseInnerFilter());
+ } else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
+ scoreMode = parseScoreMode(parseContext, parser);
+ } else if ("boost_mode".equals(currentFieldName) || "boostMode".equals(currentFieldName)) {
+ combineFunction = parseBoostMode(parseContext, parser);
+ } else if ("max_boost".equals(currentFieldName) || "maxBoost".equals(currentFieldName)) {
+ maxBoost = parser.floatValue();
+ } else if ("boost".equals(currentFieldName)) {
+ boost = parser.floatValue();
+ } else if ("functions".equals(currentFieldName)) {
+ currentFieldName = parseFiltersAndFunctions(parseContext, parser, filterFunctions, currentFieldName);
+ } else {
+ // we tru to parse a score function. If there is no score
+ // function for the current field name,
+ // funtionParserMapper.get() will throw an Exception.
+ filterFunctions.add(new FiltersFunctionScoreQuery.FilterFunction(null, funtionParserMapper.get(parseContext.index(),
+ currentFieldName).parse(parseContext, parser)));
+ }
+ }
+ if (query == null) {
+ query = Queries.newMatchAllQuery();
+ }
+ // if all filter elements returned null, just use the query
+ if (filterFunctions.isEmpty()) {
+ return query;
+ }
+ // handle cases where only one score function and no filter was
+ // provided. In this case we create a FunctionScoreQuery.
+ if (filterFunctions.size() == 1 && filterFunctions.get(0).filter == null) {
+ FunctionScoreQuery theQuery = new FunctionScoreQuery(query, filterFunctions.get(0).function);
+ if (combineFunction != null) {
+ theQuery.setCombineFunction(combineFunction);
+ }
+ theQuery.setBoost(boost);
+ theQuery.setMaxBoost(maxBoost);
+ return theQuery;
+ // in all other cases we create a FiltersFunctionScoreQuery.
+ } else {
+ FiltersFunctionScoreQuery functionScoreQuery = new FiltersFunctionScoreQuery(query, scoreMode,
+ filterFunctions.toArray(new FiltersFunctionScoreQuery.FilterFunction[filterFunctions.size()]), maxBoost);
+ if (combineFunction != null) {
+ functionScoreQuery.setCombineFunction(combineFunction);
+ }
+ functionScoreQuery.setBoost(boost);
+ return functionScoreQuery;
+ }
+ }
+
+ private String parseFiltersAndFunctions(QueryParseContext parseContext, XContentParser parser,
+ ArrayList<FiltersFunctionScoreQuery.FilterFunction> filterFunctions, String currentFieldName) throws IOException {
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ Filter filter = null;
+ ScoreFunction scoreFunction = null;
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new QueryParsingException(parseContext.index(), NAME + ": malformed query, expected a "
+ + XContentParser.Token.START_OBJECT + " while parsing functions but got a " + token);
+ } else {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else {
+ if ("filter".equals(currentFieldName)) {
+ filter = parseContext.parseInnerFilter();
+ } else {
+ // do not need to check null here,
+ // funtionParserMapper throws exception if parser
+ // non-existent
+ ScoreFunctionParser functionParser = funtionParserMapper.get(parseContext.index(), currentFieldName);
+ scoreFunction = functionParser.parse(parseContext, parser);
+ }
+ }
+ }
+ }
+ if (filter == null) {
+ filter = Queries.MATCH_ALL_FILTER;
+ }
+ filterFunctions.add(new FiltersFunctionScoreQuery.FilterFunction(filter, scoreFunction));
+
+ }
+ return currentFieldName;
+ }
+
+ private FiltersFunctionScoreQuery.ScoreMode parseScoreMode(QueryParseContext parseContext, XContentParser parser) throws IOException {
+ String scoreMode = parser.text();
+ if ("avg".equals(scoreMode)) {
+ return FiltersFunctionScoreQuery.ScoreMode.Avg;
+ } else if ("max".equals(scoreMode)) {
+ return FiltersFunctionScoreQuery.ScoreMode.Max;
+ } else if ("min".equals(scoreMode)) {
+ return FiltersFunctionScoreQuery.ScoreMode.Min;
+ } else if ("sum".equals(scoreMode)) {
+ return FiltersFunctionScoreQuery.ScoreMode.Sum;
+ } else if ("multiply".equals(scoreMode)) {
+ return FiltersFunctionScoreQuery.ScoreMode.Multiply;
+ } else if ("first".equals(scoreMode)) {
+ return FiltersFunctionScoreQuery.ScoreMode.First;
+ } else {
+ throw new QueryParsingException(parseContext.index(), NAME + " illegal score_mode [" + scoreMode + "]");
+ }
+ }
+
+ private CombineFunction parseBoostMode(QueryParseContext parseContext, XContentParser parser) throws IOException {
+ String boostMode = parser.text();
+ CombineFunction cf = combineFunctionsMap.get(boostMode);
+ if (cf == null) {
+ throw new QueryParsingException(parseContext.index(), NAME + " illegal boost_mode [" + boostMode + "]");
+ }
+ return cf;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java
new file mode 100644
index 0000000..c6c5d15
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+
+public interface ScoreFunctionBuilder extends ToXContent {
+
+ public String getName();
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilders.java b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilders.java
new file mode 100644
index 0000000..825be6c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilders.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore;
+
+import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.factor.FactorBuilder;
+import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.lin.LinearDecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.random.RandomScoreFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;
+
+import java.util.Map;
+
+public class ScoreFunctionBuilders {
+
+ public static ExponentialDecayFunctionBuilder exponentialDecayFunction(String fieldName, Object origin, Object scale) {
+ return new ExponentialDecayFunctionBuilder(fieldName, origin, scale);
+ }
+
+ public static ExponentialDecayFunctionBuilder exponentialDecayFunction(String fieldName, Object scale) {
+ return new ExponentialDecayFunctionBuilder(fieldName, null, scale);
+ }
+
+ public static GaussDecayFunctionBuilder gaussDecayFunction(String fieldName, Object origin, Object scale) {
+ return new GaussDecayFunctionBuilder(fieldName, origin, scale);
+ }
+
+ public static GaussDecayFunctionBuilder gaussDecayFunction(String fieldName, Object scale) {
+ return new GaussDecayFunctionBuilder(fieldName, null, scale);
+ }
+
+ public static LinearDecayFunctionBuilder linearDecayFunction(String fieldName, Object origin, Object scale) {
+ return new LinearDecayFunctionBuilder(fieldName, origin, scale);
+ }
+
+ public static LinearDecayFunctionBuilder linearDecayFunction(String fieldName, Object scale) {
+ return new LinearDecayFunctionBuilder(fieldName, null, scale);
+ }
+
+ public static ScriptScoreFunctionBuilder scriptFunction(String script) {
+ return (new ScriptScoreFunctionBuilder()).script(script);
+ }
+
+ public static ScriptScoreFunctionBuilder scriptFunction(String script, String lang) {
+ return (new ScriptScoreFunctionBuilder()).script(script).lang(lang);
+ }
+
+ public static ScriptScoreFunctionBuilder scriptFunction(String script, String lang, Map<String, Object> params) {
+ return (new ScriptScoreFunctionBuilder()).script(script).lang(lang).params(params);
+ }
+
+ public static ScriptScoreFunctionBuilder scriptFunction(String script, Map<String, Object> params) {
+ return (new ScriptScoreFunctionBuilder()).script(script).params(params);
+ }
+
+ public static FactorBuilder factorFunction(float boost) {
+ return (new FactorBuilder()).boostFactor(boost);
+ }
+
+ public static RandomScoreFunctionBuilder randomFunction(long seed) {
+ return (new RandomScoreFunctionBuilder()).seed(seed);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParser.java
new file mode 100644
index 0000000..c4315a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParser.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore;
+
+import org.elasticsearch.common.lucene.search.function.ScoreFunction;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParsingException;
+
+import java.io.IOException;
+
+public interface ScoreFunctionParser {
+
+ public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException;
+
+ /**
+ * Returns the name of the function, for example "linear", "gauss" etc. This
+ * name is used for registering the parser in
+ * {@link FunctionScoreQueryParser}.
+ * */
+ public String[] getNames();
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java
new file mode 100644
index 0000000..4f7d9de
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.QueryParsingException;
+
+import java.util.Set;
+
+public class ScoreFunctionParserMapper {
+
+ protected ImmutableMap<String, ScoreFunctionParser> functionParsers;
+
+ @Inject
+ public ScoreFunctionParserMapper(Set<ScoreFunctionParser> parsers) {
+ MapBuilder<String, ScoreFunctionParser> builder = MapBuilder.newMapBuilder();
+ for (ScoreFunctionParser scoreFunctionParser : parsers) {
+ for (String name : scoreFunctionParser.getNames()) {
+ builder.put(name, scoreFunctionParser);
+ }
+ }
+ this.functionParsers = builder.immutableMap();
+ }
+
+ public ScoreFunctionParser get(Index index, String parserName) {
+ ScoreFunctionParser functionParser = get(parserName);
+ if (functionParser == null) {
+ throw new QueryParsingException(index, "No function with the name [" + parserName + "] is registered.");
+ }
+ return functionParser;
+ }
+
+ private ScoreFunctionParser get(String parserName) {
+ return functionParsers.get(parserName);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionBuilder.java
new file mode 100644
index 0000000..f4a730b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionBuilder.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore.exp;
+
+
+import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
+
+public class ExponentialDecayFunctionBuilder extends DecayFunctionBuilder {
+
+ public ExponentialDecayFunctionBuilder(String fieldName, Object origin, Object scale) {
+ super(fieldName, origin, scale);
+ }
+
+ @Override
+ public String getName() {
+ return ExponentialDecayFunctionParser.NAMES[0];
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java
new file mode 100644
index 0000000..5412559
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore.exp;
+
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.index.query.functionscore.DecayFunction;
+import org.elasticsearch.index.query.functionscore.DecayFunctionParser;
+
+public class ExponentialDecayFunctionParser extends DecayFunctionParser {
+
+ public static final String[] NAMES = { "exp" };
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+
+ static final DecayFunction decayFunction = new ExponentialDecayScoreFunction();
+
+ @Override
+ public DecayFunction getDecayFunction() {
+ return decayFunction;
+ }
+
+ final static class ExponentialDecayScoreFunction implements DecayFunction {
+
+ @Override
+ public double evaluate(double value, double scale) {
+ return Math.exp(scale * value);
+ }
+
+ @Override
+ public Explanation explainFunction(String valueExpl, double value, double scale) {
+ ComplexExplanation ce = new ComplexExplanation();
+ ce.setValue((float) evaluate(value, scale));
+ ce.setDescription("exp(- abs(" + valueExpl + ") * " + -1 * scale + ")");
+ return ce;
+ }
+
+ @Override
+ public double processScale(double scale, double decay) {
+ return Math.log(decay) / scale;
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java
new file mode 100644
index 0000000..941c216
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore.factor;
+
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A query that simply applies the boost factor to another query (multiply it).
+ *
+ *
+ */
+public class FactorBuilder implements ScoreFunctionBuilder {
+
+ private Float boostFactor;
+
+ /**
+ * Sets the boost factor for this query.
+ */
+ public FactorBuilder boostFactor(float boost) {
+ this.boostFactor = new Float(boost);
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (boostFactor != null) {
+ builder.field("boost_factor", boostFactor.floatValue());
+ }
+ return builder;
+ }
+
+ @Override
+ public String getName() {
+ return FactorParser.NAMES[0];
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorParser.java
new file mode 100644
index 0000000..294779c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorParser.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore.factor;
+
+import org.elasticsearch.index.query.functionscore.ScoreFunctionParser;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.function.BoostScoreFunction;
+import org.elasticsearch.common.lucene.search.function.ScoreFunction;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParsingException;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FactorParser implements ScoreFunctionParser {
+
+ public static String[] NAMES = { "boost_factor", "boostFactor" };
+
+ @Inject
+ public FactorParser() {
+ }
+
+ @Override
+ public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException {
+ float boostFactor = parser.floatValue();
+ return new BoostScoreFunction(boostFactor);
+ }
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionBuilder.java
new file mode 100644
index 0000000..b9d6708
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionBuilder.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore.gauss;
+
+
+import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
+
+public class GaussDecayFunctionBuilder extends DecayFunctionBuilder {
+
+ public GaussDecayFunctionBuilder(String fieldName, Object origin, Object scale) {
+ super(fieldName, origin, scale);
+ }
+
+ @Override
+ public String getName() {
+ return GaussDecayFunctionParser.NAMES[0];
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java
new file mode 100644
index 0000000..374bd6e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore.gauss;
+
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.index.query.functionscore.DecayFunction;
+import org.elasticsearch.index.query.functionscore.DecayFunctionParser;
+
+public class GaussDecayFunctionParser extends DecayFunctionParser {
+
+ static final DecayFunction decayFunction = new GaussScoreFunction();
+ public static final String[] NAMES = { "gauss" };
+
+ @Override
+ public DecayFunction getDecayFunction() {
+ return decayFunction;
+ }
+
+ final static class GaussScoreFunction implements DecayFunction {
+
+ @Override
+ public double evaluate(double value, double scale) {
+ // note that we already computed scale^2 in processScale() so we do
+ // not need to square it here.
+ return (float) Math.exp(0.5 * Math.pow(value, 2.0) / scale);
+ }
+
+ @Override
+ public Explanation explainFunction(String valueExpl, double value, double scale) {
+ ComplexExplanation ce = new ComplexExplanation();
+ ce.setValue((float) evaluate(value, scale));
+ ce.setDescription("-exp(-0.5*pow(" + valueExpl + ",2.0)/" + -1 * scale + ")");
+ return ce;
+ }
+
+ @Override
+ public double processScale(double scale, double decay) {
+ return 0.5 * Math.pow(scale, 2.0) / Math.log(decay);
+ }
+ }
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionBuilder.java
new file mode 100644
index 0000000..dcb4eff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionBuilder.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore.lin;
+
+import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
+
+public class LinearDecayFunctionBuilder extends DecayFunctionBuilder {
+
+ public LinearDecayFunctionBuilder(String fieldName, Object origin, Object scale) {
+ super(fieldName, origin, scale);
+ }
+
+ @Override
+ public String getName() {
+ return LinearDecayFunctionParser.NAMES[0];
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java
new file mode 100644
index 0000000..f2f1ad5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore.lin;
+
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.index.query.functionscore.DecayFunction;
+import org.elasticsearch.index.query.functionscore.DecayFunctionParser;
+
+public class LinearDecayFunctionParser extends DecayFunctionParser {
+
+ public static final String[] NAMES = { "linear" };
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+
+ static final DecayFunction decayFunction = new LinearDecayScoreFunction();
+
+ @Override
+ public DecayFunction getDecayFunction() {
+ return decayFunction;
+ }
+
+ final static class LinearDecayScoreFunction implements DecayFunction {
+
+ @Override
+ public double evaluate(double value, double scale) {
+ return Math.max(0.0, (scale - value) / scale);
+ }
+
+ @Override
+ public Explanation explainFunction(String valueExpl, double value, double scale) {
+ ComplexExplanation ce = new ComplexExplanation();
+ ce.setValue((float) evaluate(value, scale));
+ ce.setDescription("max(0.0, ((" + scale + " - abs(" + valueExpl + "))/" + scale + ")");
+ return ce;
+ }
+
+ @Override
+ public double processScale(double scale, double decay) {
+ return scale / (1.0 - decay);
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionBuilder.java
new file mode 100644
index 0000000..628f57e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionBuilder.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query.functionscore.random;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
+
+import java.io.IOException;
+
+/**
+ * A function that computes a random score for the matched documents
+ */
+public class RandomScoreFunctionBuilder implements ScoreFunctionBuilder {
+
+ private Long seed = null;
+
+ public RandomScoreFunctionBuilder() {
+ }
+
+ @Override
+ public String getName() {
+ return RandomScoreFunctionParser.NAMES[0];
+ }
+
+ /**
+ * Sets the seed based on which the random number will be generated. Using the same seed is guaranteed to generate the same
+ * random number for a specific doc.
+ *
+ * @param seed The seed.
+ */
+ public RandomScoreFunctionBuilder seed(long seed) {
+ this.seed = seed;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ if (seed != null) {
+ builder.field("seed", seed.longValue());
+ }
+ return builder.endObject();
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java
new file mode 100644
index 0000000..5f3d4aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.elasticsearch.index.query.functionscore.random;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.function.RandomScoreFunction;
+import org.elasticsearch.common.lucene.search.function.ScoreFunction;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionParser;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class RandomScoreFunctionParser implements ScoreFunctionParser {
+
+ public static String[] NAMES = { "random_score", "randomScore" };
+
+ @Inject
+ public RandomScoreFunctionParser() {
+ }
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+
+ @Override
+ public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException {
+
+ long seed = -1;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("seed".equals(currentFieldName)) {
+ seed = parser.longValue();
+ } else {
+ throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (seed == -1) {
+ seed = parseContext.nowInMillis();
+ }
+
+ ShardId shardId = SearchContext.current().indexShard().shardId();
+ seed = salt(seed, shardId.index().name(), shardId.id());
+
+ return new RandomScoreFunction(seed);
+ }
+
+ public static long salt(long seed, String index, int shardId) {
+ long salt = index.hashCode();
+ salt = salt << 32;
+ salt |= shardId;
+ return salt^seed;
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java
new file mode 100644
index 0000000..d11a85a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.functionscore.script;
+
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * A function that uses a script to compute or influence the score of documents
+ * that match with the inner query or filter.
+ */
+public class ScriptScoreFunctionBuilder implements ScoreFunctionBuilder {
+
+ private String script;
+
+ private String lang;
+
+ private Map<String, Object> params = null;
+
+ public ScriptScoreFunctionBuilder() {
+
+ }
+
+ public ScriptScoreFunctionBuilder script(String script) {
+ this.script = script;
+ return this;
+ }
+
+ /**
+ * Sets the language of the script.
+ */
+ public ScriptScoreFunctionBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ /**
+ * Additional parameters that can be provided to the script.
+ */
+ public ScriptScoreFunctionBuilder params(Map<String, Object> params) {
+ if (this.params == null) {
+ this.params = params;
+ } else {
+ this.params.putAll(params);
+ }
+ return this;
+ }
+
+ /**
+ * Additional parameters that can be provided to the script.
+ */
+ public ScriptScoreFunctionBuilder param(String key, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(key, value);
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field("script", script);
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ return builder.endObject();
+ }
+
+ @Override
+ public String getName() {
+ return ScriptScoreFunctionParser.NAMES[0];
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java
new file mode 100644
index 0000000..9b0e348
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+
+package org.elasticsearch.index.query.functionscore.script;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.function.ScoreFunction;
+import org.elasticsearch.common.lucene.search.function.ScriptScoreFunction;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionParser;
+import org.elasticsearch.script.SearchScript;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class ScriptScoreFunctionParser implements ScoreFunctionParser {
+
+ public static String[] NAMES = { "script_score", "scriptScore" };
+
+ @Inject
+ public ScriptScoreFunctionParser() {
+ }
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+
+ @Override
+ public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException {
+
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> vars = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ vars = parser.map();
+ } else {
+ throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else {
+ throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]");
+ }
+ }
+ }
+
+ if (script == null) {
+ throw new QueryParsingException(parseContext.index(), NAMES[0] + " requires 'script' field");
+ }
+
+ SearchScript searchScript;
+ try {
+ searchScript = parseContext.scriptService().search(parseContext.lookup(), scriptLang, script, vars);
+ return new ScriptScoreFunction(script, vars, searchScript);
+ } catch (Exception e) {
+ throw new QueryParsingException(parseContext.index(), NAMES[0] + " the script could not be loaded", e);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java b/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java
new file mode 100644
index 0000000..830619c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.support;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.QueryParseContext;
+
+/**
+ *
+ */
+public final class QueryParsers {
+
+ private QueryParsers() {
+
+ }
+
+ public static void setRewriteMethod(MultiTermQuery query, @Nullable MultiTermQuery.RewriteMethod rewriteMethod) {
+ if (rewriteMethod == null) {
+ return;
+ }
+ query.setRewriteMethod(rewriteMethod);
+ }
+
+ public static void setRewriteMethod(MultiTermQuery query, @Nullable String rewriteMethod) {
+ if (rewriteMethod == null) {
+ return;
+ }
+ query.setRewriteMethod(parseRewriteMethod(rewriteMethod));
+ }
+
+ public static MultiTermQuery.RewriteMethod parseRewriteMethod(@Nullable String rewriteMethod) {
+ return parseRewriteMethod(rewriteMethod, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
+ }
+
+ public static MultiTermQuery.RewriteMethod parseRewriteMethod(@Nullable String rewriteMethod, @Nullable MultiTermQuery.RewriteMethod defaultRewriteMethod) {
+ if (rewriteMethod == null) {
+ return defaultRewriteMethod;
+ }
+ if ("constant_score_auto".equals(rewriteMethod) || "constant_score_auto".equals(rewriteMethod)) {
+ return MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+ }
+ if ("scoring_boolean".equals(rewriteMethod) || "scoringBoolean".equals(rewriteMethod)) {
+ return MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE;
+ }
+ if ("constant_score_boolean".equals(rewriteMethod) || "constantScoreBoolean".equals(rewriteMethod)) {
+ return MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE;
+ }
+ if ("constant_score_filter".equals(rewriteMethod) || "constantScoreFilter".equals(rewriteMethod)) {
+ return MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE;
+ }
+ if (rewriteMethod.startsWith("top_terms_boost_")) {
+ int size = Integer.parseInt(rewriteMethod.substring("top_terms_boost_".length()));
+ return new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(size);
+ }
+ if (rewriteMethod.startsWith("topTermsBoost")) {
+ int size = Integer.parseInt(rewriteMethod.substring("topTermsBoost".length()));
+ return new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(size);
+ }
+ if (rewriteMethod.startsWith("top_terms_")) {
+ int size = Integer.parseInt(rewriteMethod.substring("top_terms_".length()));
+ return new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(size);
+ }
+ if (rewriteMethod.startsWith("topTerms")) {
+ int size = Integer.parseInt(rewriteMethod.substring("topTerms".length()));
+ return new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(size);
+ }
+ throw new ElasticsearchIllegalArgumentException("Failed to parse rewrite_method [" + rewriteMethod + "]");
+ }
+
+ public static Query wrapSmartNameQuery(Query query, @Nullable MapperService.SmartNameFieldMappers smartFieldMappers,
+ QueryParseContext parseContext) {
+ if (query == null) {
+ return null;
+ }
+ if (smartFieldMappers == null) {
+ return query;
+ }
+ if (!smartFieldMappers.explicitTypeInNameWithDocMapper()) {
+ return query;
+ }
+ DocumentMapper docMapper = smartFieldMappers.docMapper();
+ return new XFilteredQuery(query, parseContext.cacheFilter(docMapper.typeFilter(), null));
+ }
+
+ public static Filter wrapSmartNameFilter(Filter filter, @Nullable MapperService.SmartNameFieldMappers smartFieldMappers,
+ QueryParseContext parseContext) {
+ if (smartFieldMappers == null) {
+ return filter;
+ }
+ if (!smartFieldMappers.explicitTypeInNameWithDocMapper()) {
+ return filter;
+ }
+ DocumentMapper docMapper = smartFieldMappers.docMapper();
+ return new AndFilter(ImmutableList.of(parseContext.cacheFilter(docMapper.typeFilter(), null), filter));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java b/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java
new file mode 100644
index 0000000..e98a029
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.refresh;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+public class RefreshStats implements Streamable, ToXContent {
+
+ private long total;
+
+ private long totalTimeInMillis;
+
+ public RefreshStats() {
+
+ }
+
+ public RefreshStats(long total, long totalTimeInMillis) {
+ this.total = total;
+ this.totalTimeInMillis = totalTimeInMillis;
+ }
+
+ public void add(long total, long totalTimeInMillis) {
+ this.total += total;
+ this.totalTimeInMillis += totalTimeInMillis;
+ }
+
+ public void add(RefreshStats refreshStats) {
+ if (refreshStats == null) {
+ return;
+ }
+ this.total += refreshStats.total;
+ this.totalTimeInMillis += refreshStats.totalTimeInMillis;
+ }
+
+ /**
+ * The total number of refresh executed.
+ */
+ public long getTotal() {
+ return this.total;
+ }
+
+ /**
+ * The total time merges have been executed (in milliseconds).
+ */
+ public long getTotalTimeInMillis() {
+ return this.totalTimeInMillis;
+ }
+
+ /**
+ * The total time merges have been executed.
+ */
+ public TimeValue getTotalTime() {
+ return new TimeValue(totalTimeInMillis);
+ }
+
+ public static RefreshStats readRefreshStats(StreamInput in) throws IOException {
+ RefreshStats refreshStats = new RefreshStats();
+ refreshStats.readFrom(in);
+ return refreshStats;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.REFRESH);
+ builder.field(Fields.TOTAL, total);
+ builder.timeValueField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, totalTimeInMillis);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString REFRESH = new XContentBuilderString("refresh");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_TIME = new XContentBuilderString("total_time");
+ static final XContentBuilderString TOTAL_TIME_IN_MILLIS = new XContentBuilderString("total_time_in_millis");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ total = in.readVLong();
+ totalTimeInMillis = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(total);
+ out.writeVLong(totalTimeInMillis);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java b/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java
new file mode 100644
index 0000000..86bf353
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search;
+
+import com.carrotsearch.hppc.DoubleOpenHashSet;
+import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.lucene.docset.MatchDocIdSet;
+import org.elasticsearch.index.fielddata.*;
+
+import java.io.IOException;
+
+/**
+ * Similar to a {@link org.apache.lucene.queries.TermsFilter} but pulls terms from the fielddata.
+ */
+public abstract class FieldDataTermsFilter extends Filter {
+
+ final IndexFieldData fieldData;
+
+ protected FieldDataTermsFilter(IndexFieldData fieldData) {
+ this.fieldData = fieldData;
+ }
+
+ /**
+ * Get a {@link FieldDataTermsFilter} that filters on non-numeric terms found in a hppc {@link ObjectOpenHashSet} of
+ * {@link BytesRef}.
+ *
+ * @param fieldData The fielddata for the field.
+ * @param terms An {@link ObjectOpenHashSet} of terms.
+ * @return the filter.
+ */
+ public static FieldDataTermsFilter newBytes(IndexFieldData fieldData, ObjectOpenHashSet<BytesRef> terms) {
+ return new BytesFieldDataFilter(fieldData, terms);
+ }
+
+ /**
+ * Get a {@link FieldDataTermsFilter} that filters on non-floating point numeric terms found in a hppc
+ * {@link LongOpenHashSet}.
+ *
+ * @param fieldData The fielddata for the field.
+ * @param terms A {@link LongOpenHashSet} of terms.
+ * @return the filter.
+ */
+ public static FieldDataTermsFilter newLongs(IndexNumericFieldData fieldData, LongOpenHashSet terms) {
+ return new LongsFieldDataFilter(fieldData, terms);
+ }
+
+ /**
+ * Get a {@link FieldDataTermsFilter} that filters on floating point numeric terms found in a hppc
+ * {@link DoubleOpenHashSet}.
+ *
+ * @param fieldData The fielddata for the field.
+ * @param terms A {@link DoubleOpenHashSet} of terms.
+ * @return the filter.
+ */
+ public static FieldDataTermsFilter newDoubles(IndexNumericFieldData fieldData, DoubleOpenHashSet terms) {
+ return new DoublesFieldDataFilter(fieldData, terms);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (obj == null || !(obj instanceof FieldDataTermsFilter)) return false;
+
+ FieldDataTermsFilter that = (FieldDataTermsFilter) obj;
+ if (!fieldData.getFieldNames().indexName().equals(that.fieldData.getFieldNames().indexName())) return false;
+ if (this.hashCode() != obj.hashCode()) return false;
+ return true;
+ }
+
+ @Override
+ public abstract int hashCode();
+
+ @Override
+ public abstract String toString();
+
+ /**
+ * Filters on non-numeric fields.
+ */
+ protected static class BytesFieldDataFilter extends FieldDataTermsFilter {
+
+ final ObjectOpenHashSet<BytesRef> terms;
+
+ protected BytesFieldDataFilter(IndexFieldData fieldData, ObjectOpenHashSet<BytesRef> terms) {
+ super(fieldData);
+ this.terms = terms;
+ }
+
+ @Override
+ public int hashCode() {
+ int hashcode = fieldData.getFieldNames().indexName().hashCode();
+ hashcode += terms != null ? terms.hashCode() : 0;
+ return hashcode;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder("BytesFieldDataFilter:");
+ return sb
+ .append(fieldData.getFieldNames().indexName())
+ .append(":")
+ .append(terms != null ? terms.toString() : "")
+ .toString();
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ // make sure there are terms to filter on
+ if (terms == null || terms.isEmpty()) return null;
+
+ final BytesValues values = fieldData.load(context).getBytesValues(false); // load fielddata
+ return new MatchDocIdSet(context.reader().maxDoc(), acceptDocs) {
+ @Override
+ protected boolean matchDoc(int doc) {
+ final int numVals = values.setDocument(doc);
+ for (int i = 0; i < numVals; i++) {
+ if (terms.contains(values.nextValue())) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+ };
+ }
+ }
+
+ /**
+ * Filters on non-floating point numeric fields.
+ */
+ protected static class LongsFieldDataFilter extends FieldDataTermsFilter {
+
+ final LongOpenHashSet terms;
+
+ protected LongsFieldDataFilter(IndexNumericFieldData fieldData, LongOpenHashSet terms) {
+ super(fieldData);
+ this.terms = terms;
+ }
+
+ @Override
+ public int hashCode() {
+ int hashcode = fieldData.getFieldNames().indexName().hashCode();
+ hashcode += terms != null ? terms.hashCode() : 0;
+ return hashcode;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder("LongsFieldDataFilter:");
+ return sb
+ .append(fieldData.getFieldNames().indexName())
+ .append(":")
+ .append(terms != null ? terms.toString() : "")
+ .toString();
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ // make sure there are terms to filter on
+ if (terms == null || terms.isEmpty()) return null;
+
+ IndexNumericFieldData numericFieldData = (IndexNumericFieldData) fieldData;
+ if (!numericFieldData.getNumericType().isFloatingPoint()) {
+ final LongValues values = numericFieldData.load(context).getLongValues(); // load fielddata
+ return new MatchDocIdSet(context.reader().maxDoc(), acceptDocs) {
+ @Override
+ protected boolean matchDoc(int doc) {
+ final int numVals = values.setDocument(doc);
+ for (int i = 0; i < numVals; i++) {
+ if (terms.contains(values.nextValue())) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+ };
+ }
+
+ // only get here if wrong fielddata type in which case
+ // no docs will match so we just return null.
+ return null;
+ }
+ }
+
+ /**
+ * Filters on floating point numeric fields.
+ */
+ protected static class DoublesFieldDataFilter extends FieldDataTermsFilter {
+
+ final DoubleOpenHashSet terms;
+
+ protected DoublesFieldDataFilter(IndexNumericFieldData fieldData, DoubleOpenHashSet terms) {
+ super(fieldData);
+ this.terms = terms;
+ }
+
+ @Override
+ public int hashCode() {
+ int hashcode = fieldData.getFieldNames().indexName().hashCode();
+ hashcode += terms != null ? terms.hashCode() : 0;
+ return hashcode;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder("DoublesFieldDataFilter");
+ return sb
+ .append(fieldData.getFieldNames().indexName())
+ .append(":")
+ .append(terms != null ? terms.toString() : "")
+ .toString();
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ // make sure there are terms to filter on
+ if (terms == null || terms.isEmpty()) return null;
+
+ // verify we have a floating point numeric fielddata
+ IndexNumericFieldData indexNumericFieldData = (IndexNumericFieldData) fieldData;
+ if (indexNumericFieldData.getNumericType().isFloatingPoint()) {
+ final DoubleValues values = indexNumericFieldData.load(context).getDoubleValues(); // load fielddata
+ return new MatchDocIdSet(context.reader().maxDoc(), acceptDocs) {
+ @Override
+ protected boolean matchDoc(int doc) {
+ final int numVals = values.setDocument(doc);
+
+ for (int i = 0; i < numVals; i++) {
+ if (terms.contains(values.nextValue())) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+ };
+ }
+
+ // only get here if wrong fielddata type in which case
+ // no docs will match so we just return null.
+ return null;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/src/main/java/org/elasticsearch/index/search/MatchQuery.java
new file mode 100644
index 0000000..b4f7e42
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/MatchQuery.java
@@ -0,0 +1,392 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CachingTokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.ExtendedCommonTermsQuery;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.support.QueryParsers;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameQuery;
+
+public class MatchQuery {
+
+ public static enum Type {
+ BOOLEAN,
+ PHRASE,
+ PHRASE_PREFIX
+ }
+
+ public static enum ZeroTermsQuery {
+ NONE,
+ ALL
+ }
+
+ protected final QueryParseContext parseContext;
+
+ protected String analyzer;
+
+ protected BooleanClause.Occur occur = BooleanClause.Occur.SHOULD;
+
+ protected boolean enablePositionIncrements = true;
+
+ protected int phraseSlop = 0;
+
+ protected Fuzziness fuzziness = null;
+
+ protected int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength;
+
+ protected int maxExpansions = FuzzyQuery.defaultMaxExpansions;
+
+ //LUCENE 4 UPGRADE we need a default value for this!
+ protected boolean transpositions = false;
+
+ protected MultiTermQuery.RewriteMethod rewriteMethod;
+
+ protected MultiTermQuery.RewriteMethod fuzzyRewriteMethod;
+
+ protected boolean lenient;
+
+ protected ZeroTermsQuery zeroTermsQuery = ZeroTermsQuery.NONE;
+
+ protected Float commonTermsCutoff = null;
+
+ public MatchQuery(QueryParseContext parseContext) {
+ this.parseContext = parseContext;
+ }
+
+ public void setAnalyzer(String analyzer) {
+ this.analyzer = analyzer;
+ }
+
+ public void setOccur(BooleanClause.Occur occur) {
+ this.occur = occur;
+ }
+
+ public void setCommonTermsCutoff(float cutoff) {
+ this.commonTermsCutoff = Float.valueOf(cutoff);
+ }
+
+ public void setEnablePositionIncrements(boolean enablePositionIncrements) {
+ this.enablePositionIncrements = enablePositionIncrements;
+ }
+
+ public void setPhraseSlop(int phraseSlop) {
+ this.phraseSlop = phraseSlop;
+ }
+
+ public void setFuzziness(Fuzziness fuzziness) {
+ this.fuzziness = fuzziness;
+ }
+
+ public void setFuzzyPrefixLength(int fuzzyPrefixLength) {
+ this.fuzzyPrefixLength = fuzzyPrefixLength;
+ }
+
+ public void setMaxExpansions(int maxExpansions) {
+ this.maxExpansions = maxExpansions;
+ }
+
+ public void setTranspositions(boolean transpositions) {
+ this.transpositions = transpositions;
+ }
+
+ public void setRewriteMethod(MultiTermQuery.RewriteMethod rewriteMethod) {
+ this.rewriteMethod = rewriteMethod;
+ }
+
+ public void setFuzzyRewriteMethod(MultiTermQuery.RewriteMethod fuzzyRewriteMethod) {
+ this.fuzzyRewriteMethod = fuzzyRewriteMethod;
+ }
+
+ public void setLenient(boolean lenient) {
+ this.lenient = lenient;
+ }
+
+ public void setZeroTermsQuery(ZeroTermsQuery zeroTermsQuery) {
+ this.zeroTermsQuery = zeroTermsQuery;
+ }
+
+ public Query parse(Type type, String fieldName, Object value) throws IOException {
+ FieldMapper mapper = null;
+ final String field;
+ MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
+ if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
+ mapper = smartNameFieldMappers.mapper();
+ field = mapper.names().indexName();
+ } else {
+ field = fieldName;
+ }
+
+ if (mapper != null && mapper.useTermQueryWithQueryString()) {
+ if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
+ String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
+ try {
+ return wrapSmartNameQuery(mapper.termQuery(value, parseContext), smartNameFieldMappers, parseContext);
+ } catch (RuntimeException e) {
+ if (lenient) {
+ return null;
+ }
+ throw e;
+ } finally {
+ QueryParseContext.setTypes(previousTypes);
+ }
+ } else {
+ try {
+ return wrapSmartNameQuery(mapper.termQuery(value, parseContext), smartNameFieldMappers, parseContext);
+ } catch (RuntimeException e) {
+ if (lenient) {
+ return null;
+ }
+ throw e;
+ }
+ }
+ }
+
+ Analyzer analyzer = null;
+ if (this.analyzer == null) {
+ if (mapper != null) {
+ analyzer = mapper.searchAnalyzer();
+ }
+ if (analyzer == null && smartNameFieldMappers != null) {
+ analyzer = smartNameFieldMappers.searchAnalyzer();
+ }
+ if (analyzer == null) {
+ analyzer = parseContext.mapperService().searchAnalyzer();
+ }
+ } else {
+ analyzer = parseContext.mapperService().analysisService().analyzer(this.analyzer);
+ if (analyzer == null) {
+ throw new ElasticsearchIllegalArgumentException("No analyzer found for [" + this.analyzer + "]");
+ }
+ }
+
+ // Logic similar to QueryParser#getFieldQuery
+ final TokenStream source = analyzer.tokenStream(field, value.toString());
+ source.reset();
+ int numTokens = 0;
+ int positionCount = 0;
+ boolean severalTokensAtSamePosition = false;
+
+ final CachingTokenFilter buffer = new CachingTokenFilter(source);
+ buffer.reset();
+ final CharTermAttribute termAtt = buffer.addAttribute(CharTermAttribute.class);
+ final PositionIncrementAttribute posIncrAtt = buffer.addAttribute(PositionIncrementAttribute.class);
+ boolean hasMoreTokens = buffer.incrementToken();
+ while (hasMoreTokens) {
+ numTokens++;
+ int positionIncrement = posIncrAtt.getPositionIncrement();
+ if (positionIncrement != 0) {
+ positionCount += positionIncrement;
+ } else {
+ severalTokensAtSamePosition = true;
+ }
+ hasMoreTokens = buffer.incrementToken();
+ }
+ // rewind the buffer stream
+ buffer.reset();
+ source.close();
+
+ if (numTokens == 0) {
+ return zeroTermsQuery();
+ } else if (type == Type.BOOLEAN) {
+ if (numTokens == 1) {
+ boolean hasNext = buffer.incrementToken();
+ assert hasNext == true;
+ final Query q = newTermQuery(mapper, new Term(field, termToByteRef(termAtt)));
+ return wrapSmartNameQuery(q, smartNameFieldMappers, parseContext);
+ }
+ if (commonTermsCutoff != null) {
+ ExtendedCommonTermsQuery q = new ExtendedCommonTermsQuery(occur, occur, commonTermsCutoff, positionCount == 1, mapper);
+ for (int i = 0; i < numTokens; i++) {
+ boolean hasNext = buffer.incrementToken();
+ assert hasNext == true;
+ q.add(new Term(field, termToByteRef(termAtt)));
+ }
+ return wrapSmartNameQuery(q, smartNameFieldMappers, parseContext);
+ } if (severalTokensAtSamePosition && occur == Occur.MUST) {
+ BooleanQuery q = new BooleanQuery(positionCount == 1);
+ Query currentQuery = null;
+ for (int i = 0; i < numTokens; i++) {
+ boolean hasNext = buffer.incrementToken();
+ assert hasNext == true;
+ if (posIncrAtt != null && posIncrAtt.getPositionIncrement() == 0) {
+ if (!(currentQuery instanceof BooleanQuery)) {
+ Query t = currentQuery;
+ currentQuery = new BooleanQuery(true);
+ ((BooleanQuery)currentQuery).add(t, BooleanClause.Occur.SHOULD);
+ }
+ ((BooleanQuery)currentQuery).add(newTermQuery(mapper, new Term(field, termToByteRef(termAtt))), BooleanClause.Occur.SHOULD);
+ } else {
+ if (currentQuery != null) {
+ q.add(currentQuery, occur);
+ }
+ currentQuery = newTermQuery(mapper, new Term(field, termToByteRef(termAtt)));
+ }
+ }
+ q.add(currentQuery, occur);
+ return wrapSmartNameQuery(q, smartNameFieldMappers, parseContext);
+ } else {
+ BooleanQuery q = new BooleanQuery(positionCount == 1);
+ for (int i = 0; i < numTokens; i++) {
+ boolean hasNext = buffer.incrementToken();
+ assert hasNext == true;
+ final Query currentQuery = newTermQuery(mapper, new Term(field, termToByteRef(termAtt)));
+ q.add(currentQuery, occur);
+ }
+ return wrapSmartNameQuery(q, smartNameFieldMappers, parseContext);
+ }
+ } else if (type == Type.PHRASE) {
+ if (severalTokensAtSamePosition) {
+ final MultiPhraseQuery mpq = new MultiPhraseQuery();
+ mpq.setSlop(phraseSlop);
+ final List<Term> multiTerms = new ArrayList<Term>();
+ int position = -1;
+ for (int i = 0; i < numTokens; i++) {
+ int positionIncrement = 1;
+ boolean hasNext = buffer.incrementToken();
+ assert hasNext == true;
+ positionIncrement = posIncrAtt.getPositionIncrement();
+
+ if (positionIncrement > 0 && multiTerms.size() > 0) {
+ if (enablePositionIncrements) {
+ mpq.add(multiTerms.toArray(new Term[multiTerms.size()]), position);
+ } else {
+ mpq.add(multiTerms.toArray(new Term[multiTerms.size()]));
+ }
+ multiTerms.clear();
+ }
+ position += positionIncrement;
+ //LUCENE 4 UPGRADE instead of string term we can convert directly from utf-16 to utf-8
+ multiTerms.add(new Term(field, termToByteRef(termAtt)));
+ }
+ if (enablePositionIncrements) {
+ mpq.add(multiTerms.toArray(new Term[multiTerms.size()]), position);
+ } else {
+ mpq.add(multiTerms.toArray(new Term[multiTerms.size()]));
+ }
+ return wrapSmartNameQuery(mpq, smartNameFieldMappers, parseContext);
+ } else {
+ PhraseQuery pq = new PhraseQuery();
+ pq.setSlop(phraseSlop);
+ int position = -1;
+ for (int i = 0; i < numTokens; i++) {
+ int positionIncrement = 1;
+ boolean hasNext = buffer.incrementToken();
+ assert hasNext == true;
+ positionIncrement = posIncrAtt.getPositionIncrement();
+
+ if (enablePositionIncrements) {
+ position += positionIncrement;
+ //LUCENE 4 UPGRADE instead of string term we can convert directly from utf-16 to utf-8
+ pq.add(new Term(field, termToByteRef(termAtt)), position);
+ } else {
+ pq.add(new Term(field, termToByteRef(termAtt)));
+ }
+ }
+ return wrapSmartNameQuery(pq, smartNameFieldMappers, parseContext);
+ }
+ } else if (type == Type.PHRASE_PREFIX) {
+ MultiPhrasePrefixQuery mpq = new MultiPhrasePrefixQuery();
+ mpq.setSlop(phraseSlop);
+ mpq.setMaxExpansions(maxExpansions);
+ List<Term> multiTerms = new ArrayList<Term>();
+ int position = -1;
+ for (int i = 0; i < numTokens; i++) {
+ int positionIncrement = 1;
+ boolean hasNext = buffer.incrementToken();
+ assert hasNext == true;
+ positionIncrement = posIncrAtt.getPositionIncrement();
+
+ if (positionIncrement > 0 && multiTerms.size() > 0) {
+ if (enablePositionIncrements) {
+ mpq.add(multiTerms.toArray(new Term[multiTerms.size()]), position);
+ } else {
+ mpq.add(multiTerms.toArray(new Term[multiTerms.size()]));
+ }
+ multiTerms.clear();
+ }
+ position += positionIncrement;
+ multiTerms.add(new Term(field, termToByteRef(termAtt)));
+ }
+ if (enablePositionIncrements) {
+ mpq.add(multiTerms.toArray(new Term[multiTerms.size()]), position);
+ } else {
+ mpq.add(multiTerms.toArray(new Term[multiTerms.size()]));
+ }
+ return wrapSmartNameQuery(mpq, smartNameFieldMappers, parseContext);
+ }
+
+ throw new ElasticsearchIllegalStateException("No type found for [" + type + "]");
+ }
+
+ private Query newTermQuery(@Nullable FieldMapper mapper, Term term) {
+ if (fuzziness != null) {
+ if (mapper != null) {
+ Query query = mapper.fuzzyQuery(term.text(), fuzziness, fuzzyPrefixLength, maxExpansions, transpositions);
+ if (query instanceof FuzzyQuery) {
+ QueryParsers.setRewriteMethod((FuzzyQuery) query, fuzzyRewriteMethod);
+ }
+ }
+ int edits = fuzziness.asDistance(term.text());
+ FuzzyQuery query = new FuzzyQuery(term, edits, fuzzyPrefixLength, maxExpansions, transpositions);
+ QueryParsers.setRewriteMethod(query, rewriteMethod);
+ return query;
+ }
+ if (mapper != null) {
+ Query termQuery = mapper.queryStringTermQuery(term);
+ if (termQuery != null) {
+ return termQuery;
+ }
+ }
+ return new TermQuery(term);
+ }
+
+ private static BytesRef termToByteRef(CharTermAttribute attr) {
+ final BytesRef ref = new BytesRef();
+ UnicodeUtil.UTF16toUTF8(attr.buffer(), 0, attr.length(), ref);
+ return ref;
+ }
+
+ protected Query zeroTermsQuery() {
+ return zeroTermsQuery == ZeroTermsQuery.NONE ? Queries.newMatchNoDocsQuery() : Queries.newMatchAllQuery();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java
new file mode 100644
index 0000000..f13edba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DisjunctionMaxQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.index.query.QueryParseContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class MultiMatchQuery extends MatchQuery {
+
+ private boolean useDisMax = true;
+ private float tieBreaker;
+
+ public void setUseDisMax(boolean useDisMax) {
+ this.useDisMax = useDisMax;
+ }
+
+ public void setTieBreaker(float tieBreaker) {
+ this.tieBreaker = tieBreaker;
+ }
+
+ public MultiMatchQuery(QueryParseContext parseContext) {
+ super(parseContext);
+ }
+
+ private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch, Float boostValue) throws IOException {
+ Query query = parse(type, fieldName, value);
+ if (query instanceof BooleanQuery) {
+ Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
+ }
+ if (boostValue != null && query != null) {
+ query.setBoost(boostValue);
+ }
+ return query;
+ }
+
+ public Query parse(Type type, Map<String, Float> fieldNames, Object value, String minimumShouldMatch) throws IOException {
+ if (fieldNames.size() == 1) {
+ Map.Entry<String, Float> fieldBoost = fieldNames.entrySet().iterator().next();
+ Float boostValue = fieldBoost.getValue();
+ return parseAndApply(type, fieldBoost.getKey(), value, minimumShouldMatch, boostValue);
+ }
+
+ if (useDisMax) {
+ DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(tieBreaker);
+ boolean clauseAdded = false;
+ for (String fieldName : fieldNames.keySet()) {
+ Float boostValue = fieldNames.get(fieldName);
+ Query query = parseAndApply(type, fieldName, value, minimumShouldMatch, boostValue);
+ if (query != null) {
+ clauseAdded = true;
+ disMaxQuery.add(query);
+ }
+ }
+ return clauseAdded ? disMaxQuery : null;
+ } else {
+ BooleanQuery booleanQuery = new BooleanQuery();
+ for (String fieldName : fieldNames.keySet()) {
+ Float boostValue = fieldNames.get(fieldName);
+ Query query = parseAndApply(type, fieldName, value, minimumShouldMatch, boostValue);
+ if (query != null) {
+ booleanQuery.add(query, BooleanClause.Occur.SHOULD);
+ }
+ }
+ return !booleanQuery.clauses().isEmpty() ? booleanQuery : null;
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java b/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java
new file mode 100644
index 0000000..27cd59d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.lucene.docset.MatchDocIdSet;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.LongValues;
+
+import java.io.IOException;
+
+/**
+ * A numeric filter that can be much faster than {@link org.apache.lucene.search.NumericRangeFilter} at the
+ * expense of loading numeric values of the field to memory using {@link org.elasticsearch.index.cache.field.data.FieldDataCache}.
+ */
+public abstract class NumericRangeFieldDataFilter<T> extends Filter {
+ final IndexNumericFieldData indexFieldData;
+ final T lowerVal;
+ final T upperVal;
+ final boolean includeLower;
+ final boolean includeUpper;
+
+ public String getField() {
+ return indexFieldData.getFieldNames().indexName();
+ }
+
+ public T getLowerVal() {
+ return lowerVal;
+ }
+
+ public T getUpperVal() {
+ return upperVal;
+ }
+
+ public boolean isIncludeLower() {
+ return includeLower;
+ }
+
+ public boolean isIncludeUpper() {
+ return includeUpper;
+ }
+
+ protected NumericRangeFieldDataFilter(IndexNumericFieldData indexFieldData, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) {
+ this.indexFieldData = indexFieldData;
+ this.lowerVal = lowerVal;
+ this.upperVal = upperVal;
+ this.includeLower = includeLower;
+ this.includeUpper = includeUpper;
+ }
+
+ @Override
+ public final String toString() {
+ final StringBuilder sb = new StringBuilder(indexFieldData.getFieldNames().indexName()).append(":");
+ return sb.append(includeLower ? '[' : '{')
+ .append((lowerVal == null) ? "*" : lowerVal.toString())
+ .append(" TO ")
+ .append((upperVal == null) ? "*" : upperVal.toString())
+ .append(includeUpper ? ']' : '}')
+ .toString();
+ }
+
+ @Override
+ public final boolean equals(Object o) {
+ if (this == o) return true;
+ if (!(o instanceof NumericRangeFieldDataFilter)) return false;
+ NumericRangeFieldDataFilter other = (NumericRangeFieldDataFilter) o;
+
+ if (!this.indexFieldData.getFieldNames().indexName().equals(other.indexFieldData.getFieldNames().indexName())
+ || this.includeLower != other.includeLower
+ || this.includeUpper != other.includeUpper
+ ) {
+ return false;
+ }
+ if (this.lowerVal != null ? !this.lowerVal.equals(other.lowerVal) : other.lowerVal != null) return false;
+ if (this.upperVal != null ? !this.upperVal.equals(other.upperVal) : other.upperVal != null) return false;
+ return true;
+ }
+
+ @Override
+ public final int hashCode() {
+ int h = indexFieldData.getFieldNames().indexName().hashCode();
+ h ^= (lowerVal != null) ? lowerVal.hashCode() : 550356204;
+ h = (h << 1) | (h >>> 31); // rotate to distinguish lower from upper
+ h ^= (upperVal != null) ? upperVal.hashCode() : -1674416163;
+ h ^= (includeLower ? 1549299360 : -365038026) ^ (includeUpper ? 1721088258 : 1948649653);
+ return h;
+ }
+
+ public static NumericRangeFieldDataFilter<Byte> newByteRange(IndexNumericFieldData indexFieldData, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
+ return new NumericRangeFieldDataFilter<Byte>(indexFieldData, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext ctx, Bits acceptedDocs) throws IOException {
+ final byte inclusiveLowerPoint, inclusiveUpperPoint;
+ if (lowerVal != null) {
+ byte i = lowerVal.byteValue();
+ if (!includeLower && i == Byte.MAX_VALUE)
+ return null;
+ inclusiveLowerPoint = (byte) (includeLower ? i : (i + 1));
+ } else {
+ inclusiveLowerPoint = Byte.MIN_VALUE;
+ }
+ if (upperVal != null) {
+ byte i = upperVal.byteValue();
+ if (!includeUpper && i == Byte.MIN_VALUE)
+ return null;
+ inclusiveUpperPoint = (byte) (includeUpper ? i : (i - 1));
+ } else {
+ inclusiveUpperPoint = Byte.MAX_VALUE;
+ }
+
+ if (inclusiveLowerPoint > inclusiveUpperPoint)
+ return null;
+
+ final LongValues values = indexFieldData.load(ctx).getLongValues();
+ return new LongRangeMatchDocIdSet(ctx.reader().maxDoc(), acceptedDocs, values, inclusiveLowerPoint, inclusiveUpperPoint);
+ }
+ };
+ }
+
+
+ public static NumericRangeFieldDataFilter<Short> newShortRange(IndexNumericFieldData indexFieldData, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
+ return new NumericRangeFieldDataFilter<Short>(indexFieldData, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext ctx, Bits acceptedDocs) throws IOException {
+ final short inclusiveLowerPoint, inclusiveUpperPoint;
+ if (lowerVal != null) {
+ short i = lowerVal.shortValue();
+ if (!includeLower && i == Short.MAX_VALUE)
+ return null;
+ inclusiveLowerPoint = (short) (includeLower ? i : (i + 1));
+ } else {
+ inclusiveLowerPoint = Short.MIN_VALUE;
+ }
+ if (upperVal != null) {
+ short i = upperVal.shortValue();
+ if (!includeUpper && i == Short.MIN_VALUE)
+ return null;
+ inclusiveUpperPoint = (short) (includeUpper ? i : (i - 1));
+ } else {
+ inclusiveUpperPoint = Short.MAX_VALUE;
+ }
+
+ if (inclusiveLowerPoint > inclusiveUpperPoint)
+ return null;
+
+ final LongValues values = indexFieldData.load(ctx).getLongValues();
+ return new LongRangeMatchDocIdSet(ctx.reader().maxDoc(), acceptedDocs, values, inclusiveLowerPoint, inclusiveUpperPoint);
+ }
+ };
+ }
+
+ public static NumericRangeFieldDataFilter<Integer> newIntRange(IndexNumericFieldData indexFieldData, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
+ return new NumericRangeFieldDataFilter<Integer>(indexFieldData, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext ctx, Bits acceptedDocs) throws IOException {
+ final int inclusiveLowerPoint, inclusiveUpperPoint;
+ if (lowerVal != null) {
+ int i = lowerVal.intValue();
+ if (!includeLower && i == Integer.MAX_VALUE)
+ return null;
+ inclusiveLowerPoint = includeLower ? i : (i + 1);
+ } else {
+ inclusiveLowerPoint = Integer.MIN_VALUE;
+ }
+ if (upperVal != null) {
+ int i = upperVal.intValue();
+ if (!includeUpper && i == Integer.MIN_VALUE)
+ return null;
+ inclusiveUpperPoint = includeUpper ? i : (i - 1);
+ } else {
+ inclusiveUpperPoint = Integer.MAX_VALUE;
+ }
+
+ if (inclusiveLowerPoint > inclusiveUpperPoint)
+ return null;
+
+ final LongValues values = indexFieldData.load(ctx).getLongValues();
+ return new LongRangeMatchDocIdSet(ctx.reader().maxDoc(), acceptedDocs, values, inclusiveLowerPoint, inclusiveUpperPoint);
+ }
+ };
+ }
+
+ public static NumericRangeFieldDataFilter<Long> newLongRange(IndexNumericFieldData indexFieldData, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
+ return new NumericRangeFieldDataFilter<Long>(indexFieldData, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext ctx, Bits acceptedDocs) throws IOException {
+ final long inclusiveLowerPoint, inclusiveUpperPoint;
+ if (lowerVal != null) {
+ long i = lowerVal.longValue();
+ if (!includeLower && i == Long.MAX_VALUE)
+ return null;
+ inclusiveLowerPoint = includeLower ? i : (i + 1l);
+ } else {
+ inclusiveLowerPoint = Long.MIN_VALUE;
+ }
+ if (upperVal != null) {
+ long i = upperVal.longValue();
+ if (!includeUpper && i == Long.MIN_VALUE)
+ return null;
+ inclusiveUpperPoint = includeUpper ? i : (i - 1l);
+ } else {
+ inclusiveUpperPoint = Long.MAX_VALUE;
+ }
+
+ if (inclusiveLowerPoint > inclusiveUpperPoint)
+ return null;
+
+ final LongValues values = indexFieldData.load(ctx).getLongValues();
+ return new LongRangeMatchDocIdSet(ctx.reader().maxDoc(), acceptedDocs, values, inclusiveLowerPoint, inclusiveUpperPoint);
+
+ }
+ };
+ }
+
+ public static NumericRangeFieldDataFilter<Float> newFloatRange(IndexNumericFieldData indexFieldData, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
+ return new NumericRangeFieldDataFilter<Float>(indexFieldData, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext ctx, Bits acceptedDocs) throws IOException {
+ // we transform the floating point numbers to sortable integers
+ // using NumericUtils to easier find the next bigger/lower value
+ final float inclusiveLowerPoint, inclusiveUpperPoint;
+ if (lowerVal != null) {
+ float f = lowerVal.floatValue();
+ if (!includeUpper && f > 0.0f && Float.isInfinite(f))
+ return null;
+ int i = NumericUtils.floatToSortableInt(f);
+ inclusiveLowerPoint = NumericUtils.sortableIntToFloat(includeLower ? i : (i + 1));
+ } else {
+ inclusiveLowerPoint = Float.NEGATIVE_INFINITY;
+ }
+ if (upperVal != null) {
+ float f = upperVal.floatValue();
+ if (!includeUpper && f < 0.0f && Float.isInfinite(f))
+ return null;
+ int i = NumericUtils.floatToSortableInt(f);
+ inclusiveUpperPoint = NumericUtils.sortableIntToFloat(includeUpper ? i : (i - 1));
+ } else {
+ inclusiveUpperPoint = Float.POSITIVE_INFINITY;
+ }
+
+ if (inclusiveLowerPoint > inclusiveUpperPoint)
+ return null;
+
+ final DoubleValues values = indexFieldData.load(ctx).getDoubleValues();
+ return new DoubleRangeMatchDocIdSet(ctx.reader().maxDoc(), acceptedDocs, values, inclusiveLowerPoint, inclusiveUpperPoint);
+ }
+ };
+ }
+
+ public static NumericRangeFieldDataFilter<Double> newDoubleRange(IndexNumericFieldData indexFieldData, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
+ return new NumericRangeFieldDataFilter<Double>(indexFieldData, lowerVal, upperVal, includeLower, includeUpper) {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext ctx, Bits acceptedDocs) throws IOException {
+ // we transform the floating point numbers to sortable integers
+ // using NumericUtils to easier find the next bigger/lower value
+ final double inclusiveLowerPoint, inclusiveUpperPoint;
+ if (lowerVal != null) {
+ double f = lowerVal.doubleValue();
+ if (!includeUpper && f > 0.0 && Double.isInfinite(f))
+ return null;
+ long i = NumericUtils.doubleToSortableLong(f);
+ inclusiveLowerPoint = NumericUtils.sortableLongToDouble(includeLower ? i : (i + 1L));
+ } else {
+ inclusiveLowerPoint = Double.NEGATIVE_INFINITY;
+ }
+ if (upperVal != null) {
+ double f = upperVal.doubleValue();
+ if (!includeUpper && f < 0.0 && Double.isInfinite(f))
+ return null;
+ long i = NumericUtils.doubleToSortableLong(f);
+ inclusiveUpperPoint = NumericUtils.sortableLongToDouble(includeUpper ? i : (i - 1L));
+ } else {
+ inclusiveUpperPoint = Double.POSITIVE_INFINITY;
+ }
+
+ if (inclusiveLowerPoint > inclusiveUpperPoint)
+ return null;
+
+ final DoubleValues values = indexFieldData.load(ctx).getDoubleValues();
+ return new DoubleRangeMatchDocIdSet(ctx.reader().maxDoc(), acceptedDocs, values, inclusiveLowerPoint, inclusiveUpperPoint);
+ }
+ };
+ }
+
+ private static final class DoubleRangeMatchDocIdSet extends MatchDocIdSet {
+ private final DoubleValues values;
+ private final double inclusiveLowerPoint;
+ private final double inclusiveUpperPoint;
+
+ protected DoubleRangeMatchDocIdSet(int maxDoc, Bits acceptDocs, final DoubleValues values,final double inclusiveLowerPoint, final double inclusiveUpperPoint ) {
+ super(maxDoc, acceptDocs);
+ this.inclusiveLowerPoint = inclusiveLowerPoint;
+ this.inclusiveUpperPoint = inclusiveUpperPoint;
+ this.values = values;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ int numValues = values.setDocument(doc);
+ for (int i = 0; i < numValues; i++) {
+ double value = values.nextValue();
+ if (value >= inclusiveLowerPoint && value <= inclusiveUpperPoint) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ }
+
+ private static final class LongRangeMatchDocIdSet extends MatchDocIdSet {
+ private final LongValues values;
+ private final long inclusiveLowerPoint;
+ private final long inclusiveUpperPoint;
+
+ protected LongRangeMatchDocIdSet(int maxDoc, Bits acceptDocs, final LongValues values,final long inclusiveLowerPoint, final long inclusiveUpperPoint ) {
+ super(maxDoc, acceptDocs);
+ this.inclusiveLowerPoint = inclusiveLowerPoint;
+ this.inclusiveUpperPoint = inclusiveUpperPoint;
+ this.values = values;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ int numValues = values.setDocument(doc);
+ for (int i = 0; i < numValues; i++) {
+ long value = values.nextValue();
+ if (value >= inclusiveLowerPoint && value <= inclusiveUpperPoint) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java
new file mode 100644
index 0000000..700f0fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java
@@ -0,0 +1,288 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.cache.id.IdReaderTypeCache;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ *
+ */
+public class ChildrenConstantScoreQuery extends Query {
+
+ private final Query originalChildQuery;
+ private final String parentType;
+ private final String childType;
+ private final Filter parentFilter;
+ private final int shortCircuitParentDocSet;
+ private final Filter nonNestedDocsFilter;
+
+ private Query rewrittenChildQuery;
+ private IndexReader rewriteIndexReader;
+
+ public ChildrenConstantScoreQuery(Query childQuery, String parentType, String childType, Filter parentFilter, int shortCircuitParentDocSet, Filter nonNestedDocsFilter) {
+ this.parentFilter = parentFilter;
+ this.parentType = parentType;
+ this.childType = childType;
+ this.originalChildQuery = childQuery;
+ this.shortCircuitParentDocSet = shortCircuitParentDocSet;
+ this.nonNestedDocsFilter = nonNestedDocsFilter;
+ }
+
+ @Override
+ // See TopChildrenQuery#rewrite
+ public Query rewrite(IndexReader reader) throws IOException {
+ if (rewrittenChildQuery == null) {
+ rewrittenChildQuery = originalChildQuery.rewrite(reader);
+ rewriteIndexReader = reader;
+ }
+ return this;
+ }
+
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ rewrittenChildQuery.extractTerms(terms);
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ SearchContext searchContext = SearchContext.current();
+ searchContext.idCache().refresh(searcher.getTopReaderContext().leaves());
+ Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids = searchContext.cacheRecycler().hashSet(-1);
+ UidCollector collector = new UidCollector(parentType, searchContext, collectedUids.v());
+ final Query childQuery;
+ if (rewrittenChildQuery == null) {
+ childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery);
+ } else {
+ assert rewriteIndexReader == searcher.getIndexReader();
+ childQuery = rewrittenChildQuery;
+ }
+ IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
+ indexSearcher.setSimilarity(searcher.getSimilarity());
+ indexSearcher.search(childQuery, collector);
+
+ int remaining = collectedUids.v().size();
+ if (remaining == 0) {
+ return Queries.newMatchNoDocsQuery().createWeight(searcher);
+ }
+
+ Filter shortCircuitFilter = null;
+ if (remaining == 1) {
+ BytesRef id = collectedUids.v().iterator().next().value.toBytesRef();
+ shortCircuitFilter = new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
+ } else if (remaining <= shortCircuitParentDocSet) {
+ shortCircuitFilter = new ParentIdsFilter(parentType, collectedUids.v().keys, collectedUids.v().allocated, nonNestedDocsFilter);
+ }
+
+ ParentWeight parentWeight = new ParentWeight(parentFilter, shortCircuitFilter, searchContext, collectedUids);
+ searchContext.addReleasable(parentWeight);
+ return parentWeight;
+ }
+
+ private final class ParentWeight extends Weight implements Releasable {
+
+ private final Filter parentFilter;
+ private final Filter shortCircuitFilter;
+ private final SearchContext searchContext;
+ private final Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids;
+
+ private int remaining;
+ private float queryNorm;
+ private float queryWeight;
+
+ public ParentWeight(Filter parentFilter, Filter shortCircuitFilter, SearchContext searchContext, Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids) {
+ this.parentFilter = new ApplyAcceptedDocsFilter(parentFilter);
+ this.shortCircuitFilter = shortCircuitFilter;
+ this.searchContext = searchContext;
+ this.collectedUids = collectedUids;
+ this.remaining = collectedUids.v().size();
+ }
+
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+ return new Explanation(getBoost(), "not implemented yet...");
+ }
+
+ @Override
+ public Query getQuery() {
+ return ChildrenConstantScoreQuery.this;
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ queryWeight = getBoost();
+ return queryWeight * queryWeight;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ this.queryNorm = norm * topLevelBoost;
+ queryWeight *= this.queryNorm;
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+ if (remaining == 0) {
+ return null;
+ }
+
+ if (shortCircuitFilter != null) {
+ DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, acceptDocs);
+ if (!DocIdSets.isEmpty(docIdSet)) {
+ DocIdSetIterator iterator = docIdSet.iterator();
+ if (iterator != null) {
+ return ConstantScorer.create(iterator, this, queryWeight);
+ }
+ }
+ return null;
+ }
+
+ DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, acceptDocs);
+ if (!DocIdSets.isEmpty(parentDocIdSet)) {
+ IdReaderTypeCache idReaderTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
+ // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
+ // count down (short circuit) logic will then work as expected.
+ parentDocIdSet = BitsFilteredDocIdSet.wrap(parentDocIdSet, context.reader().getLiveDocs());
+ if (idReaderTypeCache != null) {
+ DocIdSetIterator innerIterator = parentDocIdSet.iterator();
+ if (innerIterator != null) {
+ ParentDocIdIterator parentDocIdIterator = new ParentDocIdIterator(innerIterator, collectedUids.v(), idReaderTypeCache);
+ return ConstantScorer.create(parentDocIdIterator, this, queryWeight);
+ }
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ Releasables.release(collectedUids);
+ return true;
+ }
+
+ private final class ParentDocIdIterator extends FilteredDocIdSetIterator {
+
+ private final ObjectOpenHashSet<HashedBytesArray> parents;
+ private final IdReaderTypeCache typeCache;
+
+ private ParentDocIdIterator(DocIdSetIterator innerIterator, ObjectOpenHashSet<HashedBytesArray> parents, IdReaderTypeCache typeCache) {
+ super(innerIterator);
+ this.parents = parents;
+ this.typeCache = typeCache;
+ }
+
+ @Override
+ protected boolean match(int doc) {
+ if (remaining == 0) {
+ try {
+ advance(DocIdSetIterator.NO_MORE_DOCS);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ return false;
+ }
+
+ boolean match = parents.contains(typeCache.idByDoc(doc));
+ if (match) {
+ remaining--;
+ }
+ return match;
+ }
+ }
+ }
+
+ private final static class UidCollector extends ParentIdCollector {
+
+ private final ObjectOpenHashSet<HashedBytesArray> collectedUids;
+
+ UidCollector(String parentType, SearchContext context, ObjectOpenHashSet<HashedBytesArray> collectedUids) {
+ super(parentType, context);
+ this.collectedUids = collectedUids;
+ }
+
+ @Override
+ public void collect(int doc, HashedBytesArray parentIdByDoc) {
+ collectedUids.add(parentIdByDoc);
+ }
+
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || obj.getClass() != this.getClass()) {
+ return false;
+ }
+
+ ChildrenConstantScoreQuery that = (ChildrenConstantScoreQuery) obj;
+ if (!originalChildQuery.equals(that.originalChildQuery)) {
+ return false;
+ }
+ if (!childType.equals(that.childType)) {
+ return false;
+ }
+ if (shortCircuitParentDocSet != that.shortCircuitParentDocSet) {
+ return false;
+ }
+ if (getBoost() != that.getBoost()) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = originalChildQuery.hashCode();
+ result = 31 * result + childType.hashCode();
+ result = 31 * result + shortCircuitParentDocSet;
+ result = 31 * result + Float.floatToIntBits(getBoost());
+ return result;
+ }
+
+ @Override
+ public String toString(String field) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("child_filter[").append(childType).append("/").append(parentType).append("](").append(originalChildQuery).append(')');
+ return sb.toString();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java
new file mode 100644
index 0000000..98533a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java
@@ -0,0 +1,463 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ToStringUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.cache.id.IdReaderTypeCache;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * A query implementation that executes the wrapped child query and connects all the matching child docs to the related
+ * parent documents using the {@link IdReaderTypeCache}.
+ * <p/>
+ * This query is executed in two rounds. The first round resolves all the matching child documents and groups these
+ * documents by parent uid value. Also the child scores are aggregated per parent uid value. During the second round
+ * all parent documents having the same uid value that is collected in the first phase are emitted as hit including
+ * a score based on the aggregated child scores and score type.
+ */
+public class ChildrenQuery extends Query {
+
+ private final String parentType;
+ private final String childType;
+ private final Filter parentFilter;
+ private final ScoreType scoreType;
+ private final Query originalChildQuery;
+ private final int shortCircuitParentDocSet;
+ private final Filter nonNestedDocsFilter;
+
+ private Query rewrittenChildQuery;
+ private IndexReader rewriteIndexReader;
+
+ public ChildrenQuery(String parentType, String childType, Filter parentFilter, Query childQuery, ScoreType scoreType, int shortCircuitParentDocSet, Filter nonNestedDocsFilter) {
+ this.parentType = parentType;
+ this.childType = childType;
+ this.parentFilter = parentFilter;
+ this.originalChildQuery = childQuery;
+ this.scoreType = scoreType;
+ this.shortCircuitParentDocSet = shortCircuitParentDocSet;
+ this.nonNestedDocsFilter = nonNestedDocsFilter;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || obj.getClass() != this.getClass()) {
+ return false;
+ }
+
+ ChildrenQuery that = (ChildrenQuery) obj;
+ if (!originalChildQuery.equals(that.originalChildQuery)) {
+ return false;
+ }
+ if (!childType.equals(that.childType)) {
+ return false;
+ }
+ if (getBoost() != that.getBoost()) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = originalChildQuery.hashCode();
+ result = 31 * result + childType.hashCode();
+ result = 31 * result + Float.floatToIntBits(getBoost());
+ return result;
+ }
+
+ @Override
+ public String toString(String field) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("ChildrenQuery[").append(childType).append("/").append(parentType).append("](").append(originalChildQuery
+ .toString(field)).append(')').append(ToStringUtils.boost(getBoost()));
+ return sb.toString();
+ }
+
+ @Override
+ // See TopChildrenQuery#rewrite
+ public Query rewrite(IndexReader reader) throws IOException {
+ if (rewrittenChildQuery == null) {
+ rewriteIndexReader = reader;
+ rewrittenChildQuery = originalChildQuery.rewrite(reader);
+ }
+ return this;
+ }
+
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ rewrittenChildQuery.extractTerms(terms);
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ SearchContext searchContext = SearchContext.current();
+ searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves());
+
+ Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore = searchContext.cacheRecycler().objectFloatMap(-1);
+ Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount = null;
+
+ final Collector collector;
+ switch (scoreType) {
+ case AVG:
+ uidToCount = searchContext.cacheRecycler().objectIntMap(-1);
+ collector = new AvgChildUidCollector(scoreType, searchContext, parentType, uidToScore.v(), uidToCount.v());
+ break;
+ default:
+ collector = new ChildUidCollector(scoreType, searchContext, parentType, uidToScore.v());
+ }
+ final Query childQuery;
+ if (rewrittenChildQuery == null) {
+ childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery);
+ } else {
+ assert rewriteIndexReader == searcher.getIndexReader();
+ childQuery = rewrittenChildQuery;
+ }
+ IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
+ indexSearcher.setSimilarity(searcher.getSimilarity());
+ indexSearcher.search(childQuery, collector);
+
+ int size = uidToScore.v().size();
+ if (size == 0) {
+ uidToScore.release();
+ if (uidToCount != null) {
+ uidToCount.release();
+ }
+ return Queries.newMatchNoDocsQuery().createWeight(searcher);
+ }
+
+ final Filter parentFilter;
+ if (size == 1) {
+ BytesRef id = uidToScore.v().keys().iterator().next().value.toBytesRef();
+ if (nonNestedDocsFilter != null) {
+ List<Filter> filters = Arrays.asList(
+ new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))),
+ nonNestedDocsFilter
+ );
+ parentFilter = new AndFilter(filters);
+ } else {
+ parentFilter = new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
+ }
+ } else if (size <= shortCircuitParentDocSet) {
+ parentFilter = new ParentIdsFilter(parentType, uidToScore.v().keys, uidToScore.v().allocated, nonNestedDocsFilter);
+ } else {
+ parentFilter = new ApplyAcceptedDocsFilter(this.parentFilter);
+ }
+ ParentWeight parentWeight = new ParentWeight(rewrittenChildQuery.createWeight(searcher), parentFilter, searchContext, size, uidToScore, uidToCount);
+ searchContext.addReleasable(parentWeight);
+ return parentWeight;
+ }
+
+ private final class ParentWeight extends Weight implements Releasable {
+
+ private final Weight childWeight;
+ private final Filter parentFilter;
+ private final SearchContext searchContext;
+ private final Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore;
+ private final Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount;
+
+ private int remaining;
+
+ private ParentWeight(Weight childWeight, Filter parentFilter, SearchContext searchContext, int remaining, Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore, Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount) {
+ this.childWeight = childWeight;
+ this.parentFilter = parentFilter;
+ this.searchContext = searchContext;
+ this.remaining = remaining;
+ this.uidToScore = uidToScore;
+ this.uidToCount= uidToCount;
+ }
+
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+ return new Explanation(getBoost(), "not implemented yet...");
+ }
+
+ @Override
+ public Query getQuery() {
+ return ChildrenQuery.this;
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ float sum = childWeight.getValueForNormalization();
+ sum *= getBoost() * getBoost();
+ return sum;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+ DocIdSet parentsSet = parentFilter.getDocIdSet(context, acceptDocs);
+ if (DocIdSets.isEmpty(parentsSet) || remaining == 0) {
+ return null;
+ }
+
+ IdReaderTypeCache idTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
+ // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
+ // count down (short circuit) logic will then work as expected.
+ DocIdSetIterator parentsIterator = BitsFilteredDocIdSet.wrap(parentsSet, context.reader().getLiveDocs()).iterator();
+ switch (scoreType) {
+ case AVG:
+ return new AvgParentScorer(this, idTypeCache, uidToScore.v(), uidToCount.v(), parentsIterator);
+ default:
+ return new ParentScorer(this, idTypeCache, uidToScore.v(), parentsIterator);
+ }
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ Releasables.release(uidToScore, uidToCount);
+ return true;
+ }
+
+ private class ParentScorer extends Scorer {
+
+ final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
+ final IdReaderTypeCache idTypeCache;
+ final DocIdSetIterator parentsIterator;
+
+ int currentDocId = -1;
+ float currentScore;
+
+ ParentScorer(Weight weight, IdReaderTypeCache idTypeCache, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, DocIdSetIterator parentsIterator) {
+ super(weight);
+ this.idTypeCache = idTypeCache;
+ this.parentsIterator = parentsIterator;
+ this.uidToScore = uidToScore;
+ }
+
+ @Override
+ public float score() throws IOException {
+ return currentScore;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ // We don't have the original child query hit info here...
+ // But the freq of the children could be collector and returned here, but makes this Scorer more expensive.
+ return 1;
+ }
+
+ @Override
+ public int docID() {
+ return currentDocId;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ if (remaining == 0) {
+ return currentDocId = NO_MORE_DOCS;
+ }
+
+ while (true) {
+ currentDocId = parentsIterator.nextDoc();
+ if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) {
+ return currentDocId;
+ }
+
+ HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
+ if (uidToScore.containsKey(uid)) {
+ // Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
+ currentScore = uidToScore.lget();
+ remaining--;
+ return currentDocId;
+ }
+ }
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ if (remaining == 0) {
+ return currentDocId = NO_MORE_DOCS;
+ }
+
+ currentDocId = parentsIterator.advance(target);
+ if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) {
+ return currentDocId;
+ }
+
+ HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
+ if (uidToScore.containsKey(uid)) {
+ // Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
+ currentScore = uidToScore.lget();
+ remaining--;
+ return currentDocId;
+ } else {
+ return nextDoc();
+ }
+ }
+
+ @Override
+ public long cost() {
+ return parentsIterator.cost();
+ }
+ }
+
+ private final class AvgParentScorer extends ParentScorer {
+
+ final ObjectIntOpenHashMap<HashedBytesArray> uidToCount;
+
+ AvgParentScorer(Weight weight, IdReaderTypeCache idTypeCache, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, ObjectIntOpenHashMap<HashedBytesArray> uidToCount, DocIdSetIterator parentsIterator) {
+ super(weight, idTypeCache, uidToScore, parentsIterator);
+ this.uidToCount = uidToCount;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ if (remaining == 0) {
+ return currentDocId = NO_MORE_DOCS;
+ }
+
+ while (true) {
+ currentDocId = parentsIterator.nextDoc();
+ if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) {
+ return currentDocId;
+ }
+
+ HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
+ if (uidToScore.containsKey(uid)) {
+ // Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
+ currentScore = uidToScore.lget();
+ currentScore /= uidToCount.get(uid);
+ remaining--;
+ return currentDocId;
+ }
+ }
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ if (remaining == 0) {
+ return currentDocId = NO_MORE_DOCS;
+ }
+
+ currentDocId = parentsIterator.advance(target);
+ if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) {
+ return currentDocId;
+ }
+
+ HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
+ if (uidToScore.containsKey(uid)) {
+ // Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
+ currentScore = uidToScore.lget();
+ currentScore /= uidToCount.get(uid);
+ remaining--;
+ return currentDocId;
+ } else {
+ return nextDoc();
+ }
+ }
+ }
+
+ }
+
+ private static class ChildUidCollector extends ParentIdCollector {
+
+ protected final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
+ private final ScoreType scoreType;
+ protected Scorer scorer;
+
+ ChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore) {
+ super(childType, searchContext);
+ this.uidToScore = uidToScore;
+ this.scoreType = scoreType;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ this.scorer = scorer;
+ }
+
+ @Override
+ protected void collect(int doc, HashedBytesArray parentUid) throws IOException {
+ float currentScore = scorer.score();
+ switch (scoreType) {
+ case SUM:
+ uidToScore.addTo(parentUid, currentScore);
+ break;
+ case MAX:
+ if (uidToScore.containsKey(parentUid)) {
+ float previousScore = uidToScore.lget();
+ if (currentScore > previousScore) {
+ uidToScore.lset(currentScore);
+ }
+ } else {
+ uidToScore.put(parentUid, currentScore);
+ }
+ break;
+ case AVG:
+ assert false : "AVG has its own collector";
+ default:
+ assert false : "Are we missing a score type here? -- " + scoreType;
+ break;
+ }
+ }
+
+ }
+
+ private final static class AvgChildUidCollector extends ChildUidCollector {
+
+ private final ObjectIntOpenHashMap<HashedBytesArray> uidToCount;
+
+ AvgChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, ObjectIntOpenHashMap<HashedBytesArray> uidToCount) {
+ super(scoreType, searchContext, childType, uidToScore);
+ this.uidToCount = uidToCount;
+ assert scoreType == ScoreType.AVG;
+ }
+
+ @Override
+ protected void collect(int doc, HashedBytesArray parentUid) throws IOException {
+ float currentScore = scorer.score();
+ uidToCount.addTo(parentUid, 1);
+ uidToScore.addTo(parentUid, currentScore);
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/search/child/ConstantScorer.java b/src/main/java/org/elasticsearch/index/search/child/ConstantScorer.java
new file mode 100644
index 0000000..defae7d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/ConstantScorer.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+
+import java.io.IOException;
+
+/**
+ * A scorer that wraps a {@link DocIdSetIterator} and emits a constant score.
+ */
+// Borrowed from ConstantScoreQuery
+class ConstantScorer extends Scorer {
+
+ static ConstantScorer create(DocIdSetIterator iterator, Weight weight, float constantScore) throws IOException {
+ return new ConstantScorer(iterator, weight, constantScore);
+ }
+
+ private final DocIdSetIterator docIdSetIterator;
+ private final float constantScore;
+
+ private ConstantScorer(DocIdSetIterator docIdSetIterator, Weight w, float constantScore) {
+ super(w);
+ this.constantScore = constantScore;
+ this.docIdSetIterator = docIdSetIterator;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return docIdSetIterator.nextDoc();
+ }
+
+ @Override
+ public int docID() {
+ return docIdSetIterator.docID();
+ }
+
+ @Override
+ public float score() throws IOException {
+ assert docIdSetIterator.docID() != NO_MORE_DOCS;
+ return constantScore;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return 1;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ return docIdSetIterator.advance(target);
+ }
+
+ @Override
+ public long cost() {
+ return docIdSetIterator.cost();
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java b/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java
new file mode 100644
index 0000000..d9c1318
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.search.NoCacheFilter;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.IdentityHashMap;
+
+/**
+ * Forked from {@link QueryWrapperFilter} to make sure the weight is only created once.
+ * This filter should never be cached! This filter only exists for internal usage.
+ *
+ * @elasticsearch.internal
+ */
+public class CustomQueryWrappingFilter extends NoCacheFilter implements Releasable {
+
+ private final Query query;
+
+ private IndexSearcher searcher;
+ private IdentityHashMap<AtomicReader, DocIdSet> docIdSets;
+
+ /** Constructs a filter which only matches documents matching
+ * <code>query</code>.
+ */
+ public CustomQueryWrappingFilter(Query query) {
+ if (query == null)
+ throw new NullPointerException("Query may not be null");
+ this.query = query;
+ }
+
+ /** returns the inner Query */
+ public final Query getQuery() {
+ return query;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
+ final SearchContext searchContext = SearchContext.current();
+ if (docIdSets == null) {
+ assert searcher == null;
+ IndexSearcher searcher = searchContext.searcher();
+ docIdSets = new IdentityHashMap<AtomicReader, DocIdSet>();
+ this.searcher = searcher;
+ searchContext.addReleasable(this);
+
+ final Weight weight = searcher.createNormalizedWeight(query);
+ for (final AtomicReaderContext leaf : searcher.getTopReaderContext().leaves()) {
+ final DocIdSet set = DocIdSets.toCacheable(leaf.reader(), new DocIdSet() {
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ return weight.scorer(leaf, true, false, null);
+ }
+ @Override
+ public boolean isCacheable() { return false; }
+ });
+ docIdSets.put(leaf.reader(), set);
+ }
+ } else {
+ assert searcher == SearchContext.current().searcher();
+ }
+ final DocIdSet set = docIdSets.get(context.reader());
+ if (set != null && acceptDocs != null) {
+ return BitsFilteredDocIdSet.wrap(set, acceptDocs);
+ }
+ return set;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ // We need to clear the docIdSets, otherwise this is leaved unused
+ // DocIdSets around and can potentially become a memory leak.
+ docIdSets = null;
+ searcher = null;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "CustomQueryWrappingFilter(" + query + ")";
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == this) {
+ return true;
+ }
+ if (o != null && o instanceof CustomQueryWrappingFilter &&
+ this.query.equals(((CustomQueryWrappingFilter)o).query)) {
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return query.hashCode() ^ 0x823D64C9;
+ }
+
+ /** @return Whether {@link CustomQueryWrappingFilter} should be used. */
+ public static boolean shouldUseCustomQueryWrappingFilter(Query query) {
+ if (query instanceof TopChildrenQuery || query instanceof ChildrenConstantScoreQuery
+ || query instanceof ChildrenQuery || query instanceof ParentConstantScoreQuery
+ || query instanceof ParentQuery) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/child/DeleteByQueryWrappingFilter.java b/src/main/java/org/elasticsearch/index/search/child/DeleteByQueryWrappingFilter.java
new file mode 100644
index 0000000..0ead91e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/DeleteByQueryWrappingFilter.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ * This filters just exist for wrapping parent child queries in the delete by query api.
+ * Don't use this filter for other purposes.
+ *
+ * @elasticsearch.internal
+ */
+public class DeleteByQueryWrappingFilter extends Filter {
+
+ private final Query query;
+
+ private IndexSearcher searcher;
+ private Weight weight;
+
+ /** Constructs a filter which only matches documents matching
+ * <code>query</code>.
+ */
+ public DeleteByQueryWrappingFilter(Query query) {
+ if (query == null)
+ throw new NullPointerException("Query may not be null");
+ this.query = query;
+ }
+
+ /** returns the inner Query */
+ public final Query getQuery() {
+ return query;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
+ SearchContext searchContext = SearchContext.current();
+ if (weight == null) {
+ assert searcher == null;
+ searcher = searchContext.searcher();
+ IndexReader indexReader = SearchContext.current().searcher().getIndexReader();
+ IndexReader multiReader = null;
+ try {
+ if (!contains(indexReader, context)) {
+ multiReader = new MultiReader(new IndexReader[]{indexReader, context.reader()}, false);
+ Similarity similarity = searcher.getSimilarity();
+ searcher = new IndexSearcher(new MultiReader(indexReader, context.reader()));
+ searcher.setSimilarity(similarity);
+ }
+ weight = searcher.createNormalizedWeight(query);
+ } finally {
+ if (multiReader != null) {
+ multiReader.close();
+ }
+ }
+ } else {
+ IndexReader indexReader = searcher.getIndexReader();
+ if (!contains(indexReader, context)) {
+ IndexReader multiReader = new MultiReader(new IndexReader[]{indexReader, context.reader()}, false);
+ try {
+ Similarity similarity = searcher.getSimilarity();
+ searcher = new IndexSearcher(multiReader);
+ searcher.setSimilarity(similarity);
+ weight = searcher.createNormalizedWeight(query);
+ } finally {
+ multiReader.close();
+ }
+ }
+ }
+
+ return new DocIdSet() {
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ return weight.scorer(context, true, false, acceptDocs);
+ }
+ @Override
+ public boolean isCacheable() { return false; }
+ };
+ }
+
+ @Override
+ public String toString() {
+ return "DeleteByQueryWrappingFilter(" + query + ")";
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof DeleteByQueryWrappingFilter))
+ return false;
+ return this.query.equals(((DeleteByQueryWrappingFilter)o).query);
+ }
+
+ @Override
+ public int hashCode() {
+ return query.hashCode() ^ 0x823D64CA;
+ }
+
+ static boolean contains(IndexReader indexReader, AtomicReaderContext context) {
+ for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
+ if (context.reader().getCoreCacheKey().equals(atomicReaderContext.reader().getCoreCacheKey())) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java
new file mode 100644
index 0000000..387a8ea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java
@@ -0,0 +1,247 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter;
+import org.elasticsearch.common.lucene.search.NoopCollector;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.cache.id.IdReaderTypeCache;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A query that only return child documents that are linked to the parent documents that matched with the inner query.
+ */
+public class ParentConstantScoreQuery extends Query {
+
+ private final Query originalParentQuery;
+ private final String parentType;
+ private final Filter childrenFilter;
+
+ private Query rewrittenParentQuery;
+ private IndexReader rewriteIndexReader;
+
+ public ParentConstantScoreQuery(Query parentQuery, String parentType, Filter childrenFilter) {
+ this.originalParentQuery = parentQuery;
+ this.parentType = parentType;
+ this.childrenFilter = childrenFilter;
+ }
+
+ @Override
+ // See TopChildrenQuery#rewrite
+ public Query rewrite(IndexReader reader) throws IOException {
+ if (rewrittenParentQuery == null) {
+ rewrittenParentQuery = originalParentQuery.rewrite(reader);
+ rewriteIndexReader = reader;
+ }
+ return this;
+ }
+
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ rewrittenParentQuery.extractTerms(terms);
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ SearchContext searchContext = SearchContext.current();
+ searchContext.idCache().refresh(searcher.getTopReaderContext().leaves());
+ Recycler.V<ObjectOpenHashSet<HashedBytesArray>> parents = searchContext.cacheRecycler().hashSet(-1);
+ ParentUidsCollector collector = new ParentUidsCollector(parents.v(), searchContext, parentType);
+
+ final Query parentQuery;
+ if (rewrittenParentQuery != null) {
+ parentQuery = rewrittenParentQuery;
+ } else {
+ assert rewriteIndexReader == searcher.getIndexReader();
+ parentQuery = rewrittenParentQuery = originalParentQuery.rewrite(searcher.getIndexReader());
+ }
+ IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
+ indexSearcher.setSimilarity(searcher.getSimilarity());
+ indexSearcher.search(parentQuery, collector);
+
+ if (parents.v().isEmpty()) {
+ return Queries.newMatchNoDocsQuery().createWeight(searcher);
+ }
+
+ ChildrenWeight childrenWeight = new ChildrenWeight(childrenFilter, searchContext, parents);
+ searchContext.addReleasable(childrenWeight);
+ return childrenWeight;
+ }
+
+ private final class ChildrenWeight extends Weight implements Releasable {
+
+ private final Filter childrenFilter;
+ private final SearchContext searchContext;
+ private final Recycler.V<ObjectOpenHashSet<HashedBytesArray>> parents;
+
+ private float queryNorm;
+ private float queryWeight;
+
+ private ChildrenWeight(Filter childrenFilter, SearchContext searchContext, Recycler.V<ObjectOpenHashSet<HashedBytesArray>> parents) {
+ this.childrenFilter = new ApplyAcceptedDocsFilter(childrenFilter);
+ this.searchContext = searchContext;
+ this.parents = parents;
+ }
+
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+ return new Explanation(getBoost(), "not implemented yet...");
+ }
+
+ @Override
+ public Query getQuery() {
+ return ParentConstantScoreQuery.this;
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ queryWeight = getBoost();
+ return queryWeight * queryWeight;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ this.queryNorm = norm * topLevelBoost;
+ queryWeight *= this.queryNorm;
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+ DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, acceptDocs);
+ if (DocIdSets.isEmpty(childrenDocIdSet)) {
+ return null;
+ }
+
+ IdReaderTypeCache idReaderTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
+ if (idReaderTypeCache != null) {
+ DocIdSetIterator innerIterator = childrenDocIdSet.iterator();
+ if (innerIterator != null) {
+ ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(innerIterator, parents.v(), idReaderTypeCache);
+ return ConstantScorer.create(childrenDocIdIterator, this, queryWeight);
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ Releasables.release(parents);
+ return true;
+ }
+
+ private final class ChildrenDocIdIterator extends FilteredDocIdSetIterator {
+
+ private final ObjectOpenHashSet<HashedBytesArray> parents;
+ private final IdReaderTypeCache idReaderTypeCache;
+
+ ChildrenDocIdIterator(DocIdSetIterator innerIterator, ObjectOpenHashSet<HashedBytesArray> parents, IdReaderTypeCache idReaderTypeCache) {
+ super(innerIterator);
+ this.parents = parents;
+ this.idReaderTypeCache = idReaderTypeCache;
+ }
+
+ @Override
+ protected boolean match(int doc) {
+ return parents.contains(idReaderTypeCache.parentIdByDoc(doc));
+ }
+
+ }
+ }
+
+ private final static class ParentUidsCollector extends NoopCollector {
+
+ private final ObjectOpenHashSet<HashedBytesArray> collectedUids;
+ private final SearchContext context;
+ private final String parentType;
+
+ private IdReaderTypeCache typeCache;
+
+ ParentUidsCollector(ObjectOpenHashSet<HashedBytesArray> collectedUids, SearchContext context, String parentType) {
+ this.collectedUids = collectedUids;
+ this.context = context;
+ this.parentType = parentType;
+ }
+
+ public void collect(int doc) throws IOException {
+ // It can happen that for particular segment no document exist for an specific type. This prevents NPE
+ if (typeCache != null) {
+ collectedUids.add(typeCache.idByDoc(doc));
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext readerContext) throws IOException {
+ typeCache = context.idCache().reader(readerContext.reader()).type(parentType);
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ int result = originalParentQuery.hashCode();
+ result = 31 * result + parentType.hashCode();
+ result = 31 * result + Float.floatToIntBits(getBoost());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || obj.getClass() != this.getClass()) {
+ return false;
+ }
+
+ ParentConstantScoreQuery that = (ParentConstantScoreQuery) obj;
+ if (!originalParentQuery.equals(that.originalParentQuery)) {
+ return false;
+ }
+ if (!parentType.equals(that.parentType)) {
+ return false;
+ }
+ if (this.getBoost() != that.getBoost()) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public String toString(String field) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("parent_filter[").append(parentType).append("](").append(originalParentQuery).append(')');
+ return sb.toString();
+ }
+
+}
+
diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentIdCollector.java b/src/main/java/org/elasticsearch/index/search/child/ParentIdCollector.java
new file mode 100644
index 0000000..b6cca71
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/ParentIdCollector.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.lucene.search.NoopCollector;
+import org.elasticsearch.index.cache.id.IdReaderTypeCache;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ * A simple collector that only collects if the docs parent ID is not
+ * <code>null</code>
+ */
+abstract class ParentIdCollector extends NoopCollector {
+ protected final String type;
+ protected final SearchContext context;
+ private IdReaderTypeCache typeCache;
+
+ protected ParentIdCollector(String parentType, SearchContext context) {
+ this.type = parentType;
+ this.context = context;
+ }
+
+ @Override
+ public final void collect(int doc) throws IOException {
+ if (typeCache != null) {
+ HashedBytesArray parentIdByDoc = typeCache.parentIdByDoc(doc);
+ if (parentIdByDoc != null) {
+ collect(doc, parentIdByDoc);
+ }
+ }
+ }
+
+ protected abstract void collect(int doc, HashedBytesArray parentId) throws IOException;
+
+ @Override
+ public void setNextReader(AtomicReaderContext readerContext) throws IOException {
+ typeCache = context.idCache().reader(readerContext.reader()).type(type);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java
new file mode 100644
index 0000000..21b59ca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+
+/**
+ * Advantages over using this filter over Lucene's TermsFilter in the parent child context:
+ * 1) Don't need to copy all values over to a list from the id cache and then
+ * copy all the ids values over to one continuous byte array. Should save a lot of of object creations and gcs..
+ * 2) We filter docs by one field only.
+ * 3) We can directly reference to values that originate from the id cache.
+ */
+final class ParentIdsFilter extends Filter {
+
+ private final BytesRef parentTypeBr;
+ private final Object[] keys;
+ private final boolean[] allocated;
+
+ private final Filter nonNestedDocsFilter;
+
+ public ParentIdsFilter(String parentType, Object[] keys, boolean[] allocated, Filter nonNestedDocsFilter) {
+ this.nonNestedDocsFilter = nonNestedDocsFilter;
+ this.parentTypeBr = new BytesRef(parentType);
+ this.keys = keys;
+ this.allocated = allocated;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ Terms terms = context.reader().terms(UidFieldMapper.NAME);
+ if (terms == null) {
+ return null;
+ }
+
+ TermsEnum termsEnum = terms.iterator(null);
+ BytesRef uidSpare = new BytesRef();
+ BytesRef idSpare = new BytesRef();
+
+ if (acceptDocs == null) {
+ acceptDocs = context.reader().getLiveDocs();
+ }
+
+ FixedBitSet nonNestedDocs = null;
+ if (nonNestedDocsFilter != null) {
+ nonNestedDocs = (FixedBitSet) nonNestedDocsFilter.getDocIdSet(context, acceptDocs);
+ }
+
+ DocsEnum docsEnum = null;
+ FixedBitSet result = null;
+ for (int i = 0; i < allocated.length; i++) {
+ if (!allocated[i]) {
+ continue;
+ }
+
+ idSpare.bytes = ((HashedBytesArray) keys[i]).toBytes();
+ idSpare.length = idSpare.bytes.length;
+ Uid.createUidAsBytes(parentTypeBr, idSpare, uidSpare);
+ if (termsEnum.seekExact(uidSpare)) {
+ int docId;
+ docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
+ if (result == null) {
+ docId = docsEnum.nextDoc();
+ if (docId != DocIdSetIterator.NO_MORE_DOCS) {
+ result = new FixedBitSet(context.reader().maxDoc());
+ } else {
+ continue;
+ }
+ } else {
+ docId = docsEnum.nextDoc();
+ if (docId == DocIdSetIterator.NO_MORE_DOCS) {
+ continue;
+ }
+ }
+ if (nonNestedDocs != null && !nonNestedDocs.get(docId)) {
+ docId = nonNestedDocs.nextSetBit(docId);
+ }
+ result.set(docId);
+ assert docsEnum.advance(docId + 1) == DocIdSetIterator.NO_MORE_DOCS : "DocId " + docId + " should have been the last one but docId " + docsEnum.docID() + " exists.";
+ }
+ }
+ return result;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java
new file mode 100644
index 0000000..599a7eb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java
@@ -0,0 +1,315 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter;
+import org.elasticsearch.common.lucene.search.NoopCollector;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.cache.id.IdReaderTypeCache;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A query implementation that executes the wrapped parent query and
+ * connects the matching parent docs to the related child documents
+ * using the {@link IdReaderTypeCache}.
+ */
+public class ParentQuery extends Query {
+
+ private final Query originalParentQuery;
+ private final String parentType;
+ private final Filter childrenFilter;
+
+ private Query rewrittenParentQuery;
+ private IndexReader rewriteIndexReader;
+
+ public ParentQuery(Query parentQuery, String parentType, Filter childrenFilter) {
+ this.originalParentQuery = parentQuery;
+ this.parentType = parentType;
+ this.childrenFilter = childrenFilter;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || obj.getClass() != this.getClass()) {
+ return false;
+ }
+
+ ParentQuery that = (ParentQuery) obj;
+ if (!originalParentQuery.equals(that.originalParentQuery)) {
+ return false;
+ }
+ if (!parentType.equals(that.parentType)) {
+ return false;
+ }
+ if (getBoost() != that.getBoost()) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = originalParentQuery.hashCode();
+ result = 31 * result + parentType.hashCode();
+ result = 31 * result + Float.floatToIntBits(getBoost());
+ return result;
+ }
+
+ @Override
+ public String toString(String field) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("ParentQuery[").append(parentType).append("](")
+ .append(originalParentQuery.toString(field)).append(')')
+ .append(ToStringUtils.boost(getBoost()));
+ return sb.toString();
+ }
+
+ @Override
+ // See TopChildrenQuery#rewrite
+ public Query rewrite(IndexReader reader) throws IOException {
+ if (rewrittenParentQuery == null) {
+ rewriteIndexReader = reader;
+ rewrittenParentQuery = originalParentQuery.rewrite(reader);
+ }
+ return this;
+ }
+
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ rewrittenParentQuery.extractTerms(terms);
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ SearchContext searchContext = SearchContext.current();
+ searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves());
+ Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore = searchContext.cacheRecycler().objectFloatMap(-1);
+ ParentUidCollector collector = new ParentUidCollector(uidToScore.v(), searchContext, parentType);
+
+ final Query parentQuery;
+ if (rewrittenParentQuery == null) {
+ parentQuery = rewrittenParentQuery = searcher.rewrite(originalParentQuery);
+ } else {
+ assert rewriteIndexReader == searcher.getIndexReader();
+ parentQuery = rewrittenParentQuery;
+ }
+ IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
+ indexSearcher.setSimilarity(searcher.getSimilarity());
+ indexSearcher.search(parentQuery, collector);
+
+ if (uidToScore.v().isEmpty()) {
+ uidToScore.release();
+ return Queries.newMatchNoDocsQuery().createWeight(searcher);
+ }
+
+ ChildWeight childWeight = new ChildWeight(parentQuery.createWeight(searcher), childrenFilter, searchContext, uidToScore);
+ searchContext.addReleasable(childWeight);
+ return childWeight;
+ }
+
+ private static class ParentUidCollector extends NoopCollector {
+
+ private final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
+ private final SearchContext searchContext;
+ private final String parentType;
+
+ private Scorer scorer;
+ private IdReaderTypeCache typeCache;
+
+ ParentUidCollector(ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, SearchContext searchContext, String parentType) {
+ this.uidToScore = uidToScore;
+ this.searchContext = searchContext;
+ this.parentType = parentType;
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ if (typeCache == null) {
+ return;
+ }
+
+ HashedBytesArray parentUid = typeCache.idByDoc(doc);
+ uidToScore.put(parentUid, scorer.score());
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ this.scorer = scorer;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ typeCache = searchContext.idCache().reader(context.reader()).type(parentType);
+ }
+ }
+
+ private class ChildWeight extends Weight implements Releasable {
+
+ private final Weight parentWeight;
+ private final Filter childrenFilter;
+ private final SearchContext searchContext;
+ private final Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore;
+
+ private ChildWeight(Weight parentWeight, Filter childrenFilter, SearchContext searchContext, Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore) {
+ this.parentWeight = parentWeight;
+ this.childrenFilter = new ApplyAcceptedDocsFilter(childrenFilter);
+ this.searchContext = searchContext;
+ this.uidToScore = uidToScore;
+ }
+
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+ return new Explanation(getBoost(), "not implemented yet...");
+ }
+
+ @Override
+ public Query getQuery() {
+ return ParentQuery.this;
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ float sum = parentWeight.getValueForNormalization();
+ sum *= getBoost() * getBoost();
+ return sum;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+ DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, acceptDocs);
+ if (DocIdSets.isEmpty(childrenDocSet)) {
+ return null;
+ }
+ IdReaderTypeCache idTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
+ if (idTypeCache == null) {
+ return null;
+ }
+
+ return new ChildScorer(this, uidToScore.v(), childrenDocSet.iterator(), idTypeCache);
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ Releasables.release(uidToScore);
+ return true;
+ }
+ }
+
+ private static class ChildScorer extends Scorer {
+
+ private final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
+ private final DocIdSetIterator childrenIterator;
+ private final IdReaderTypeCache typeCache;
+
+ private int currentChildDoc = -1;
+ private float currentScore;
+
+ ChildScorer(Weight weight, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, DocIdSetIterator childrenIterator, IdReaderTypeCache typeCache) {
+ super(weight);
+ this.uidToScore = uidToScore;
+ this.childrenIterator = childrenIterator;
+ this.typeCache = typeCache;
+ }
+
+ @Override
+ public float score() throws IOException {
+ return currentScore;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ // We don't have the original child query hit info here...
+ // But the freq of the children could be collector and returned here, but makes this Scorer more expensive.
+ return 1;
+ }
+
+ @Override
+ public int docID() {
+ return currentChildDoc;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ while (true) {
+ currentChildDoc = childrenIterator.nextDoc();
+ if (currentChildDoc == DocIdSetIterator.NO_MORE_DOCS) {
+ return currentChildDoc;
+ }
+
+ HashedBytesArray uid = typeCache.parentIdByDoc(currentChildDoc);
+ if (uid == null) {
+ continue;
+ }
+ if (uidToScore.containsKey(uid)) {
+ // Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
+ currentScore = uidToScore.lget();
+ return currentChildDoc;
+ }
+ }
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ currentChildDoc = childrenIterator.advance(target);
+ if (currentChildDoc == DocIdSetIterator.NO_MORE_DOCS) {
+ return currentChildDoc;
+ }
+ HashedBytesArray uid = typeCache.parentIdByDoc(currentChildDoc);
+ if (uid == null) {
+ return nextDoc();
+ }
+
+ if (uidToScore.containsKey(uid)) {
+ // Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
+ currentScore = uidToScore.lget();
+ return currentChildDoc;
+ } else {
+ return nextDoc();
+ }
+ }
+
+ @Override
+ public long cost() {
+ return childrenIterator.cost();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/child/ScoreType.java b/src/main/java/org/elasticsearch/index/search/child/ScoreType.java
new file mode 100644
index 0000000..5acdcb6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/ScoreType.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ * Defines how scores from child documents are mapped into the parent document.
+ */
+public enum ScoreType {
+
+ /**
+ * Only the highest score of all matching child documents is mapped into the parent.
+ */
+ MAX,
+
+ /**
+ * The average score based on all matching child documents are mapped into the parent.
+ */
+ AVG,
+
+ /**
+ * The matching children scores is summed up and mapped into the parent.
+ */
+ SUM;
+
+ public static ScoreType fromString(String type) {
+ if ("max".equals(type)) {
+ return MAX;
+ } else if ("avg".equals(type)) {
+ return AVG;
+ } else if ("sum".equals(type)) {
+ return SUM;
+ } else if ("total".equals(type)) { // This name is consistent with: ScoreMode.Total
+ return SUM;
+ }
+ throw new ElasticsearchIllegalArgumentException("No score type for child query [" + type + "] found");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java
new file mode 100644
index 0000000..5733057
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java
@@ -0,0 +1,389 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.lucene.search.EmptyScorer;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Set;
+
+/**
+ * A query that evaluates the top matching child documents (based on the score) in order to determine what
+ * parent documents to return. This query tries to find just enough child documents to return the the requested
+ * number of parent documents (or less if no other child document can be found).
+ * <p/>
+ * This query executes several internal searches. In the first round it tries to find ((request offset + requested size) * factor)
+ * child documents. The resulting child documents are mapped into their parent documents including the aggragted child scores.
+ * If not enough parent documents could be resolved then a subsequent round is executed, requesting previous requested
+ * documents times incremental_factor. This logic repeats until enough parent documents are resolved or until no more
+ * child documents are available.
+ * <p/>
+ * This query is most of the times faster than the {@link ChildrenQuery}. Usually enough parent documents can be returned
+ * in the first child document query round.
+ */
+public class TopChildrenQuery extends Query {
+
+ private static final ParentDocComparator PARENT_DOC_COMP = new ParentDocComparator();
+
+ private final CacheRecycler cacheRecycler;
+ private final String parentType;
+ private final String childType;
+ private final ScoreType scoreType;
+ private final int factor;
+ private final int incrementalFactor;
+ private final Query originalChildQuery;
+
+ // This field will hold the rewritten form of originalChildQuery, so that we can reuse it
+ private Query rewrittenChildQuery;
+ private IndexReader rewriteIndexReader;
+
+ // Note, the query is expected to already be filtered to only child type docs
+ public TopChildrenQuery(Query childQuery, String childType, String parentType, ScoreType scoreType, int factor, int incrementalFactor, CacheRecycler cacheRecycler) {
+ this.originalChildQuery = childQuery;
+ this.childType = childType;
+ this.parentType = parentType;
+ this.scoreType = scoreType;
+ this.factor = factor;
+ this.incrementalFactor = incrementalFactor;
+ this.cacheRecycler = cacheRecycler;
+ }
+
+ // Rewrite invocation logic:
+ // 1) query_then|and_fetch (default): Rewrite is execute as part of the createWeight invocation, when search child docs.
+ // 2) dfs_query_then|and_fetch:: First rewrite and then createWeight is executed. During query phase rewrite isn't
+ // executed any more because searchContext#queryRewritten() returns true.
+ @Override
+ public Query rewrite(IndexReader reader) throws IOException {
+ if (rewrittenChildQuery == null) {
+ rewrittenChildQuery = originalChildQuery.rewrite(reader);
+ rewriteIndexReader = reader;
+ }
+ // We can always return the current instance, and we can do this b/c the child query is executed separately
+ // before the main query (other scope) in a different IS#search() invocation than the main query.
+ // In fact we only need override the rewrite method because for the dfs phase, to get also global document
+ // frequency for the child query.
+ return this;
+ }
+
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ rewrittenChildQuery.extractTerms(terms);
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs = cacheRecycler.hashMap(-1);
+ SearchContext searchContext = SearchContext.current();
+ searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves());
+
+ int parentHitsResolved;
+ int requestedDocs = (searchContext.from() + searchContext.size());
+ if (requestedDocs <= 0) {
+ requestedDocs = 1;
+ }
+ int numChildDocs = requestedDocs * factor;
+
+ Query childQuery;
+ if (rewrittenChildQuery == null) {
+ childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery);
+ } else {
+ assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader();
+ childQuery = rewrittenChildQuery;
+ }
+
+ IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
+ indexSearcher.setSimilarity(searcher.getSimilarity());
+ while (true) {
+ parentDocs.v().clear();
+ TopDocs topChildDocs = indexSearcher.search(childQuery, numChildDocs);
+ parentHitsResolved = resolveParentDocuments(topChildDocs, searchContext, parentDocs);
+
+ // check if we found enough docs, if so, break
+ if (parentHitsResolved >= requestedDocs) {
+ break;
+ }
+ // if we did not find enough docs, check if it make sense to search further
+ if (topChildDocs.totalHits <= numChildDocs) {
+ break;
+ }
+ // if not, update numDocs, and search again
+ numChildDocs *= incrementalFactor;
+ if (numChildDocs > topChildDocs.totalHits) {
+ numChildDocs = topChildDocs.totalHits;
+ }
+ }
+
+ ParentWeight parentWeight = new ParentWeight(rewrittenChildQuery.createWeight(searcher), parentDocs);
+ searchContext.addReleasable(parentWeight);
+ return parentWeight;
+ }
+
+ int resolveParentDocuments(TopDocs topDocs, SearchContext context, Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs) {
+ int parentHitsResolved = 0;
+ Recycler.V<ObjectObjectOpenHashMap<Object, Recycler.V<IntObjectOpenHashMap<ParentDoc>>>> parentDocsPerReader = cacheRecycler.hashMap(context.searcher().getIndexReader().leaves().size());
+ for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
+ int readerIndex = ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves());
+ AtomicReaderContext subContext = context.searcher().getIndexReader().leaves().get(readerIndex);
+ int subDoc = scoreDoc.doc - subContext.docBase;
+
+ // find the parent id
+ HashedBytesArray parentId = context.idCache().reader(subContext.reader()).parentIdByDoc(parentType, subDoc);
+ if (parentId == null) {
+ // no parent found
+ continue;
+ }
+ // now go over and find the parent doc Id and reader tuple
+ for (AtomicReaderContext atomicReaderContext : context.searcher().getIndexReader().leaves()) {
+ AtomicReader indexReader = atomicReaderContext.reader();
+ int parentDocId = context.idCache().reader(indexReader).docById(parentType, parentId);
+ Bits liveDocs = indexReader.getLiveDocs();
+ if (parentDocId != -1 && (liveDocs == null || liveDocs.get(parentDocId))) {
+ // we found a match, add it and break
+
+ Recycler.V<IntObjectOpenHashMap<ParentDoc>> readerParentDocs = parentDocsPerReader.v().get(indexReader.getCoreCacheKey());
+ if (readerParentDocs == null) {
+ readerParentDocs = cacheRecycler.intObjectMap(indexReader.maxDoc());
+ parentDocsPerReader.v().put(indexReader.getCoreCacheKey(), readerParentDocs);
+ }
+
+ ParentDoc parentDoc = readerParentDocs.v().get(parentDocId);
+ if (parentDoc == null) {
+ parentHitsResolved++; // we have a hit on a parent
+ parentDoc = new ParentDoc();
+ parentDoc.docId = parentDocId;
+ parentDoc.count = 1;
+ parentDoc.maxScore = scoreDoc.score;
+ parentDoc.sumScores = scoreDoc.score;
+ readerParentDocs.v().put(parentDocId, parentDoc);
+ } else {
+ parentDoc.count++;
+ parentDoc.sumScores += scoreDoc.score;
+ if (scoreDoc.score > parentDoc.maxScore) {
+ parentDoc.maxScore = scoreDoc.score;
+ }
+ }
+ }
+ }
+ }
+ boolean[] states = parentDocsPerReader.v().allocated;
+ Object[] keys = parentDocsPerReader.v().keys;
+ Object[] values = parentDocsPerReader.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ Recycler.V<IntObjectOpenHashMap<ParentDoc>> value = (Recycler.V<IntObjectOpenHashMap<ParentDoc>>) values[i];
+ ParentDoc[] _parentDocs = value.v().values().toArray(ParentDoc.class);
+ Arrays.sort(_parentDocs, PARENT_DOC_COMP);
+ parentDocs.v().put(keys[i], _parentDocs);
+ Releasables.release(value);
+ }
+ }
+ Releasables.release(parentDocsPerReader);
+ return parentHitsResolved;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || obj.getClass() != this.getClass()) {
+ return false;
+ }
+
+ TopChildrenQuery that = (TopChildrenQuery) obj;
+ if (!originalChildQuery.equals(that.originalChildQuery)) {
+ return false;
+ }
+ if (!childType.equals(that.childType)) {
+ return false;
+ }
+ if (incrementalFactor != that.incrementalFactor) {
+ return false;
+ }
+ if (getBoost() != that.getBoost()) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = originalChildQuery.hashCode();
+ result = 31 * result + parentType.hashCode();
+ result = 31 * result + incrementalFactor;
+ result = 31 * result + Float.floatToIntBits(getBoost());
+ return result;
+ }
+
+ public String toString(String field) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("score_child[").append(childType).append("/").append(parentType).append("](").append(originalChildQuery.toString(field)).append(')');
+ sb.append(ToStringUtils.boost(getBoost()));
+ return sb.toString();
+ }
+
+ private class ParentWeight extends Weight implements Releasable {
+
+ private final Weight queryWeight;
+ private final Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs;
+
+ public ParentWeight(Weight queryWeight, Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs) throws IOException {
+ this.queryWeight = queryWeight;
+ this.parentDocs = parentDocs;
+ }
+
+ public Query getQuery() {
+ return TopChildrenQuery.this;
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ float sum = queryWeight.getValueForNormalization();
+ sum *= getBoost() * getBoost();
+ return sum;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ // Nothing to normalize
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ Releasables.release(parentDocs);
+ return true;
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+ ParentDoc[] readerParentDocs = parentDocs.v().get(context.reader().getCoreCacheKey());
+ if (readerParentDocs != null) {
+ if (scoreType == ScoreType.MAX) {
+ return new ParentScorer(this, readerParentDocs) {
+ @Override
+ public float score() throws IOException {
+ assert doc.docId >= 0 || doc.docId < NO_MORE_DOCS;
+ return doc.maxScore;
+ }
+ };
+ } else if (scoreType == ScoreType.AVG) {
+ return new ParentScorer(this, readerParentDocs) {
+ @Override
+ public float score() throws IOException {
+ assert doc.docId >= 0 || doc.docId < NO_MORE_DOCS;
+ return doc.sumScores / doc.count;
+ }
+ };
+ } else if (scoreType == ScoreType.SUM) {
+ return new ParentScorer(this, readerParentDocs) {
+ @Override
+ public float score() throws IOException {
+ assert doc.docId >= 0 || doc.docId < NO_MORE_DOCS;
+ return doc.sumScores;
+ }
+
+ };
+ }
+ throw new ElasticsearchIllegalStateException("No support for score type [" + scoreType + "]");
+ }
+ return new EmptyScorer(this);
+ }
+
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+ return new Explanation(getBoost(), "not implemented yet...");
+ }
+ }
+
+ private static abstract class ParentScorer extends Scorer {
+
+ private final ParentDoc spare = new ParentDoc();
+ protected final ParentDoc[] docs;
+ protected ParentDoc doc = spare;
+ private int index = -1;
+
+ ParentScorer(ParentWeight weight, ParentDoc[] docs) throws IOException {
+ super(weight);
+ this.docs = docs;
+ spare.docId = -1;
+ spare.count = -1;
+ }
+
+ @Override
+ public final int docID() {
+ return doc.docId;
+ }
+
+ @Override
+ public final int advance(int target) throws IOException {
+ return slowAdvance(target);
+ }
+
+ @Override
+ public final int nextDoc() throws IOException {
+ if (++index >= docs.length) {
+ doc = spare;
+ doc.count = 0;
+ return (doc.docId = NO_MORE_DOCS);
+ }
+ return (doc = docs[index]).docId;
+ }
+
+ @Override
+ public final int freq() throws IOException {
+ return doc.count; // The number of matches in the child doc, which is propagated to parent
+ }
+
+ @Override
+ public final long cost() {
+ return docs.length;
+ }
+ }
+
+ private static class ParentDocComparator implements Comparator<ParentDoc> {
+ @Override
+ public int compare(ParentDoc o1, ParentDoc o2) {
+ return o1.docId - o2.docId;
+ }
+ }
+
+ private static class ParentDoc {
+ public int docId;
+ public int count;
+ public float maxScore = Float.NaN;
+ public float sumScores = 0;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java
new file mode 100644
index 0000000..970499c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.docset.AndDocIdSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.docset.MatchDocIdSet;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+
+import java.io.IOException;
+
+/**
+ */
+public class GeoDistanceFilter extends Filter {
+
+ private final double lat;
+
+ private final double lon;
+
+ private final double distance; // in miles
+
+ private final GeoDistance geoDistance;
+
+ private final IndexGeoPointFieldData indexFieldData;
+
+ private final GeoDistance.FixedSourceDistance fixedSourceDistance;
+ private GeoDistance.DistanceBoundingCheck distanceBoundingCheck;
+ private final Filter boundingBoxFilter;
+
+ public GeoDistanceFilter(double lat, double lon, double distance, GeoDistance geoDistance, IndexGeoPointFieldData indexFieldData, GeoPointFieldMapper mapper,
+ String optimizeBbox) {
+ this.lat = lat;
+ this.lon = lon;
+ this.distance = distance;
+ this.geoDistance = geoDistance;
+ this.indexFieldData = indexFieldData;
+
+ this.fixedSourceDistance = geoDistance.fixedSourceDistance(lat, lon, DistanceUnit.DEFAULT);
+ if (optimizeBbox != null && !"none".equals(optimizeBbox)) {
+ distanceBoundingCheck = GeoDistance.distanceBoundingCheck(lat, lon, distance, DistanceUnit.DEFAULT);
+ if ("memory".equals(optimizeBbox)) {
+ boundingBoxFilter = null;
+ } else if ("indexed".equals(optimizeBbox)) {
+ boundingBoxFilter = IndexedGeoBoundingBoxFilter.create(distanceBoundingCheck.topLeft(), distanceBoundingCheck.bottomRight(), mapper);
+ distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; // fine, we do the bounding box check using the filter
+ } else {
+ throw new ElasticsearchIllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported");
+ }
+ } else {
+ distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE;
+ boundingBoxFilter = null;
+ }
+ }
+
+ public double lat() {
+ return lat;
+ }
+
+ public double lon() {
+ return lon;
+ }
+
+ public double distance() {
+ return distance;
+ }
+
+ public GeoDistance geoDistance() {
+ return geoDistance;
+ }
+
+ public String fieldName() {
+ return indexFieldData.getFieldNames().indexName();
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptedDocs) throws IOException {
+ DocIdSet boundingBoxDocSet = null;
+ if (boundingBoxFilter != null) {
+ boundingBoxDocSet = boundingBoxFilter.getDocIdSet(context, acceptedDocs);
+ if (DocIdSets.isEmpty(boundingBoxDocSet)) {
+ return null;
+ }
+ }
+ final GeoPointValues values = indexFieldData.load(context).getGeoPointValues();
+ GeoDistanceDocSet distDocSet = new GeoDistanceDocSet(context.reader().maxDoc(), acceptedDocs, values, fixedSourceDistance, distanceBoundingCheck, distance);
+ if (boundingBoxDocSet == null) {
+ return distDocSet;
+ } else {
+ return new AndDocIdSet(new DocIdSet[]{boundingBoxDocSet, distDocSet});
+ }
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ GeoDistanceFilter filter = (GeoDistanceFilter) o;
+
+ if (Double.compare(filter.distance, distance) != 0) return false;
+ if (Double.compare(filter.lat, lat) != 0) return false;
+ if (Double.compare(filter.lon, lon) != 0) return false;
+ if (!indexFieldData.getFieldNames().indexName().equals(filter.indexFieldData.getFieldNames().indexName()))
+ return false;
+ if (geoDistance != filter.geoDistance) return false;
+
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "GeoDistanceFilter(" + indexFieldData.getFieldNames().indexName() + ", " + geoDistance + ", " + distance + ", " + lat + ", " + lon + ")";
+ }
+
+ @Override
+ public int hashCode() {
+ int result;
+ long temp;
+ temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L;
+ result = (int) (temp ^ (temp >>> 32));
+ temp = lon != +0.0d ? Double.doubleToLongBits(lon) : 0L;
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ temp = distance != +0.0d ? Double.doubleToLongBits(distance) : 0L;
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ result = 31 * result + (geoDistance != null ? geoDistance.hashCode() : 0);
+ result = 31 * result + indexFieldData.getFieldNames().indexName().hashCode();
+ return result;
+ }
+
+ public static class GeoDistanceDocSet extends MatchDocIdSet {
+ private final double distance; // in miles
+ private final GeoPointValues values;
+ private final GeoDistance.FixedSourceDistance fixedSourceDistance;
+ private final GeoDistance.DistanceBoundingCheck distanceBoundingCheck;
+
+ public GeoDistanceDocSet(int maxDoc, @Nullable Bits acceptDocs, GeoPointValues values, GeoDistance.FixedSourceDistance fixedSourceDistance, GeoDistance.DistanceBoundingCheck distanceBoundingCheck,
+ double distance) {
+ super(maxDoc, acceptDocs);
+ this.values = values;
+ this.fixedSourceDistance = fixedSourceDistance;
+ this.distanceBoundingCheck = distanceBoundingCheck;
+ this.distance = distance;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+
+ final int length = values.setDocument(doc);
+ for (int i = 0; i < length; i++) {
+ GeoPoint point = values.nextValue();
+ if (distanceBoundingCheck.isWithin(point.lat(), point.lon())) {
+ double d = fixedSourceDistance.calculate(point.lat(), point.lon());
+ if (d < distance) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java
new file mode 100644
index 0000000..7f0fe4a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.docset.AndDocIdSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.docset.MatchDocIdSet;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class GeoDistanceRangeFilter extends Filter {
+
+ private final double lat;
+ private final double lon;
+
+ private final double inclusiveLowerPoint; // in meters
+ private final double inclusiveUpperPoint; // in meters
+
+ private final GeoDistance geoDistance;
+ private final GeoDistance.FixedSourceDistance fixedSourceDistance;
+ private GeoDistance.DistanceBoundingCheck distanceBoundingCheck;
+ private final Filter boundingBoxFilter;
+
+ private final IndexGeoPointFieldData indexFieldData;
+
+ public GeoDistanceRangeFilter(GeoPoint point, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper, GeoDistance geoDistance, GeoPointFieldMapper mapper, IndexGeoPointFieldData indexFieldData,
+ String optimizeBbox) {
+ this.lat = point.lat();
+ this.lon = point.lon();
+ this.geoDistance = geoDistance;
+ this.indexFieldData = indexFieldData;
+
+ this.fixedSourceDistance = geoDistance.fixedSourceDistance(lat, lon, DistanceUnit.DEFAULT);
+
+ if (lowerVal != null) {
+ double f = lowerVal.doubleValue();
+ long i = NumericUtils.doubleToSortableLong(f);
+ inclusiveLowerPoint = NumericUtils.sortableLongToDouble(includeLower ? i : (i + 1L));
+ } else {
+ inclusiveLowerPoint = Double.NEGATIVE_INFINITY;
+ }
+ if (upperVal != null) {
+ double f = upperVal.doubleValue();
+ long i = NumericUtils.doubleToSortableLong(f);
+ inclusiveUpperPoint = NumericUtils.sortableLongToDouble(includeUpper ? i : (i - 1L));
+ } else {
+ inclusiveUpperPoint = Double.POSITIVE_INFINITY;
+ // we disable bounding box in this case, since the upper point is all and we create bounding box up to the
+ // upper point it will effectively include all
+ // TODO we can create a bounding box up to from and "not" it
+ optimizeBbox = null;
+ }
+
+ if (optimizeBbox != null && !"none".equals(optimizeBbox)) {
+ distanceBoundingCheck = GeoDistance.distanceBoundingCheck(lat, lon, inclusiveUpperPoint, DistanceUnit.DEFAULT);
+ if ("memory".equals(optimizeBbox)) {
+ boundingBoxFilter = null;
+ } else if ("indexed".equals(optimizeBbox)) {
+ boundingBoxFilter = IndexedGeoBoundingBoxFilter.create(distanceBoundingCheck.topLeft(), distanceBoundingCheck.bottomRight(), mapper);
+ distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; // fine, we do the bounding box check using the filter
+ } else {
+ throw new ElasticsearchIllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported");
+ }
+ } else {
+ distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE;
+ boundingBoxFilter = null;
+ }
+ }
+
+ public double lat() {
+ return lat;
+ }
+
+ public double lon() {
+ return lon;
+ }
+
+ public GeoDistance geoDistance() {
+ return geoDistance;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptedDocs) throws IOException {
+ DocIdSet boundingBoxDocSet = null;
+ if (boundingBoxFilter != null) {
+ boundingBoxDocSet = boundingBoxFilter.getDocIdSet(context, acceptedDocs);
+ if (DocIdSets.isEmpty(boundingBoxDocSet)) {
+ return null;
+ }
+ }
+ GeoPointValues values = indexFieldData.load(context).getGeoPointValues();
+ GeoDistanceRangeDocSet distDocSet = new GeoDistanceRangeDocSet(context.reader().maxDoc(), acceptedDocs, values, fixedSourceDistance, distanceBoundingCheck, inclusiveLowerPoint, inclusiveUpperPoint);
+ if (boundingBoxDocSet == null) {
+ return distDocSet;
+ } else {
+ return new AndDocIdSet(new DocIdSet[]{boundingBoxDocSet, distDocSet});
+ }
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ GeoDistanceRangeFilter filter = (GeoDistanceRangeFilter) o;
+
+ if (Double.compare(filter.inclusiveLowerPoint, inclusiveLowerPoint) != 0) return false;
+ if (Double.compare(filter.inclusiveUpperPoint, inclusiveUpperPoint) != 0) return false;
+ if (Double.compare(filter.lat, lat) != 0) return false;
+ if (Double.compare(filter.lon, lon) != 0) return false;
+ if (!indexFieldData.getFieldNames().indexName().equals(filter.indexFieldData.getFieldNames().indexName()))
+ return false;
+ if (geoDistance != filter.geoDistance) return false;
+
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "GeoDistanceRangeFilter(" + indexFieldData.getFieldNames().indexName() + ", " + geoDistance + ", [" + inclusiveLowerPoint + " - " + inclusiveUpperPoint + "], " + lat + ", " + lon + ")";
+ }
+
+ @Override
+ public int hashCode() {
+ int result;
+ long temp;
+ temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L;
+ result = (int) (temp ^ (temp >>> 32));
+ temp = lon != +0.0d ? Double.doubleToLongBits(lon) : 0L;
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ temp = inclusiveLowerPoint != +0.0d ? Double.doubleToLongBits(inclusiveLowerPoint) : 0L;
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ temp = inclusiveUpperPoint != +0.0d ? Double.doubleToLongBits(inclusiveUpperPoint) : 0L;
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ result = 31 * result + (geoDistance != null ? geoDistance.hashCode() : 0);
+ result = 31 * result + indexFieldData.getFieldNames().indexName().hashCode();
+ return result;
+ }
+
+ public static class GeoDistanceRangeDocSet extends MatchDocIdSet {
+
+ private final GeoPointValues values;
+ private final GeoDistance.FixedSourceDistance fixedSourceDistance;
+ private final GeoDistance.DistanceBoundingCheck distanceBoundingCheck;
+ private final double inclusiveLowerPoint; // in miles
+ private final double inclusiveUpperPoint; // in miles
+
+ public GeoDistanceRangeDocSet(int maxDoc, @Nullable Bits acceptDocs, GeoPointValues values, GeoDistance.FixedSourceDistance fixedSourceDistance, GeoDistance.DistanceBoundingCheck distanceBoundingCheck,
+ double inclusiveLowerPoint, double inclusiveUpperPoint) {
+ super(maxDoc, acceptDocs);
+ this.values = values;
+ this.fixedSourceDistance = fixedSourceDistance;
+ this.distanceBoundingCheck = distanceBoundingCheck;
+ this.inclusiveLowerPoint = inclusiveLowerPoint;
+ this.inclusiveUpperPoint = inclusiveUpperPoint;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ final int length = values.setDocument(doc);
+ for (int i = 0; i < length; i++) {
+ GeoPoint point = values.nextValue();
+ if (distanceBoundingCheck.isWithin(point.lat(), point.lon())) {
+ double d = fixedSourceDistance.calculate(point.lat(), point.lon());
+ if (d >= inclusiveLowerPoint && d <= inclusiveUpperPoint) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonFilter.java b/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonFilter.java
new file mode 100644
index 0000000..23f0ce6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonFilter.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.docset.MatchDocIdSet;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ *
+ */
+public class GeoPolygonFilter extends Filter {
+
+ private final GeoPoint[] points;
+
+ private final IndexGeoPointFieldData indexFieldData;
+
+ public GeoPolygonFilter(IndexGeoPointFieldData indexFieldData, GeoPoint...points) {
+ this.points = points;
+ this.indexFieldData = indexFieldData;
+ }
+
+ public GeoPoint[] points() {
+ return points;
+ }
+
+ public String fieldName() {
+ return indexFieldData.getFieldNames().indexName();
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptedDocs) throws IOException {
+ final GeoPointValues values = indexFieldData.load(context).getGeoPointValues();
+ return new GeoPolygonDocIdSet(context.reader().maxDoc(), acceptedDocs, values, points);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("GeoPolygonFilter(");
+ sb.append(indexFieldData.getFieldNames().indexName());
+ sb.append(", ").append(Arrays.toString(points)).append(')');
+ return sb.toString();
+ }
+
+ public static class GeoPolygonDocIdSet extends MatchDocIdSet {
+ private final GeoPointValues values;
+ private final GeoPoint[] points;
+
+ public GeoPolygonDocIdSet(int maxDoc, @Nullable Bits acceptDocs, GeoPointValues values, GeoPoint[] points) {
+ super(maxDoc, acceptDocs);
+ this.values = values;
+ this.points = points;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ final int length = values.setDocument(doc);
+ for (int i = 0; i < length; i++) {
+ GeoPoint point = values.nextValue();
+ if (pointInPolygon(points, point.lat(), point.lon())) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private static boolean pointInPolygon(GeoPoint[] points, double lat, double lon) {
+ boolean inPoly = false;
+
+ for (int i = 1; i < points.length; i++) {
+ if (points[i].lon() < lon && points[i-1].lon() >= lon
+ || points[i-1].lon() < lon && points[i].lon() >= lon) {
+ if (points[i].lat() + (lon - points[i].lon()) /
+ (points[i-1].lon() - points[i].lon()) * (points[i-1].lat() - points[i].lat()) < lat) {
+ inPoly = !inPoly;
+ }
+ }
+ }
+ return inPoly;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java b/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java
new file mode 100644
index 0000000..8bc3ffd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.docset.MatchDocIdSet;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class InMemoryGeoBoundingBoxFilter extends Filter {
+
+ private final GeoPoint topLeft;
+ private final GeoPoint bottomRight;
+
+ private final IndexGeoPointFieldData indexFieldData;
+
+ public InMemoryGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, IndexGeoPointFieldData indexFieldData) {
+ this.topLeft = topLeft;
+ this.bottomRight = bottomRight;
+ this.indexFieldData = indexFieldData;
+ }
+
+ public GeoPoint topLeft() {
+ return topLeft;
+ }
+
+ public GeoPoint bottomRight() {
+ return bottomRight;
+ }
+
+ public String fieldName() {
+ return indexFieldData.getFieldNames().indexName();
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptedDocs) throws IOException {
+ final GeoPointValues values = indexFieldData.load(context).getGeoPointValues();
+
+ //checks to see if bounding box crosses 180 degrees
+ if (topLeft.lon() > bottomRight.lon()) {
+ return new Meridian180GeoBoundingBoxDocSet(context.reader().maxDoc(), acceptedDocs, values, topLeft, bottomRight);
+ } else {
+ return new GeoBoundingBoxDocSet(context.reader().maxDoc(), acceptedDocs, values, topLeft, bottomRight);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "GeoBoundingBoxFilter(" + indexFieldData.getFieldNames().indexName() + ", " + topLeft + ", " + bottomRight + ")";
+ }
+
+ public static class Meridian180GeoBoundingBoxDocSet extends MatchDocIdSet {
+ private final GeoPointValues values;
+ private final GeoPoint topLeft;
+ private final GeoPoint bottomRight;
+
+ public Meridian180GeoBoundingBoxDocSet(int maxDoc, @Nullable Bits acceptDocs, GeoPointValues values, GeoPoint topLeft, GeoPoint bottomRight) {
+ super(maxDoc, acceptDocs);
+ this.values = values;
+ this.topLeft = topLeft;
+ this.bottomRight = bottomRight;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ final int length = values.setDocument(doc);
+ for (int i = 0; i < length; i++) {
+ GeoPoint point = values.nextValue();
+ if (((topLeft.lon() <= point.lon() || bottomRight.lon() >= point.lon())) &&
+ (topLeft.lat() >= point.lat() && bottomRight.lat() <= point.lat())) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+
+ public static class GeoBoundingBoxDocSet extends MatchDocIdSet {
+ private final GeoPointValues values;
+ private final GeoPoint topLeft;
+ private final GeoPoint bottomRight;
+
+ public GeoBoundingBoxDocSet(int maxDoc, @Nullable Bits acceptDocs, GeoPointValues values, GeoPoint topLeft, GeoPoint bottomRight) {
+ super(maxDoc, acceptDocs);
+ this.values = values;
+ this.topLeft = topLeft;
+ this.bottomRight = bottomRight;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+
+ @Override
+ protected boolean matchDoc(int doc) {
+ final int length = values.setDocument(doc);
+ for (int i = 0; i < length; i++) {
+ GeoPoint point = values.nextValue();
+ if (topLeft.lon() <= point.lon() && bottomRight.lon() >= point.lon()
+ && topLeft.lat() >= point.lat() && bottomRight.lat() <= point.lat()) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java
new file mode 100644
index 0000000..cac948f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+
+import java.io.IOException;
+
+/**
+ */
+public class IndexedGeoBoundingBoxFilter {
+
+ public static Filter create(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapper fieldMapper) {
+ if (!fieldMapper.isEnableLatLon()) {
+ throw new ElasticsearchIllegalArgumentException("lat/lon is not enabled (indexed) for field [" + fieldMapper.name() + "], can't use indexed filter on it");
+ }
+ //checks to see if bounding box crosses 180 degrees
+ if (topLeft.lon() > bottomRight.lon()) {
+ return new WestGeoBoundingBoxFilter(topLeft, bottomRight, fieldMapper);
+ } else {
+ return new EastGeoBoundingBoxFilter(topLeft, bottomRight, fieldMapper);
+ }
+ }
+
+ static class WestGeoBoundingBoxFilter extends Filter {
+
+ final Filter lonFilter1;
+ final Filter lonFilter2;
+ final Filter latFilter;
+
+ public WestGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapper fieldMapper) {
+ lonFilter1 = fieldMapper.lonMapper().rangeFilter(null, bottomRight.lon(), true, true);
+ lonFilter2 = fieldMapper.lonMapper().rangeFilter(topLeft.lon(), null, true, true);
+ latFilter = fieldMapper.latMapper().rangeFilter(bottomRight.lat(), topLeft.lat(), true, true);
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptedDocs) throws IOException {
+ FixedBitSet main;
+ DocIdSet set = lonFilter1.getDocIdSet(context, acceptedDocs);
+ if (DocIdSets.isEmpty(set)) {
+ main = null;
+ } else {
+ main = (FixedBitSet) set;
+ }
+
+ set = lonFilter2.getDocIdSet(context, acceptedDocs);
+ if (DocIdSets.isEmpty(set)) {
+ if (main == null) {
+ return null;
+ } else {
+ // nothing to do here, we remain with the main one
+ }
+ } else {
+ if (main == null) {
+ main = (FixedBitSet) set;
+ } else {
+ main.or((FixedBitSet) set);
+ }
+ }
+
+ set = latFilter.getDocIdSet(context, acceptedDocs);
+ if (DocIdSets.isEmpty(set)) {
+ return null;
+ }
+ main.and(set.iterator());
+ return main;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ WestGeoBoundingBoxFilter that = (WestGeoBoundingBoxFilter) o;
+
+ if (latFilter != null ? !latFilter.equals(that.latFilter) : that.latFilter != null) return false;
+ if (lonFilter1 != null ? !lonFilter1.equals(that.lonFilter1) : that.lonFilter1 != null) return false;
+ if (lonFilter2 != null ? !lonFilter2.equals(that.lonFilter2) : that.lonFilter2 != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = lonFilter1 != null ? lonFilter1.hashCode() : 0;
+ result = 31 * result + (lonFilter2 != null ? lonFilter2.hashCode() : 0);
+ result = 31 * result + (latFilter != null ? latFilter.hashCode() : 0);
+ return result;
+ }
+ }
+
+ static class EastGeoBoundingBoxFilter extends Filter {
+
+ final Filter lonFilter;
+ final Filter latFilter;
+
+ public EastGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapper fieldMapper) {
+ lonFilter = fieldMapper.lonMapper().rangeFilter(topLeft.lon(), bottomRight.lon(), true, true);
+ latFilter = fieldMapper.latMapper().rangeFilter(bottomRight.lat(), topLeft.lat(), true, true);
+ }
+
+ @Override
+ public FixedBitSet getDocIdSet(AtomicReaderContext context, Bits acceptedDocs) throws IOException {
+ FixedBitSet main;
+ DocIdSet set = lonFilter.getDocIdSet(context, acceptedDocs);
+ if (DocIdSets.isEmpty(set)) {
+ return null;
+ }
+ main = (FixedBitSet) set;
+ set = latFilter.getDocIdSet(context, acceptedDocs);
+ if (DocIdSets.isEmpty(set)) {
+ return null;
+ }
+ main.and(set.iterator());
+ return main;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ EastGeoBoundingBoxFilter that = (EastGeoBoundingBoxFilter) o;
+
+ if (latFilter != null ? !latFilter.equals(that.latFilter) : that.latFilter != null) return false;
+ if (lonFilter != null ? !lonFilter.equals(that.lonFilter) : that.lonFilter != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = lonFilter != null ? lonFilter.hashCode() : 0;
+ result = 31 * result + (latFilter != null ? latFilter.hashCode() : 0);
+ return result;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java b/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java
new file mode 100644
index 0000000..ee1f76c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Set;
+
+/**
+ * A special query that accepts a top level parent matching query, and returns the nested docs of the matching parent
+ * doc as well. This is handy when deleting by query, don't use it for other purposes.
+ *
+ * @elasticsearch.internal
+ */
+public class IncludeNestedDocsQuery extends Query {
+
+ private final Filter parentFilter;
+ private final Query parentQuery;
+
+ // If we are rewritten, this is the original childQuery we
+ // were passed; we use this for .equals() and
+ // .hashCode(). This makes rewritten query equal the
+ // original, so that user does not have to .rewrite() their
+ // query before searching:
+ private final Query origParentQuery;
+
+
+ public IncludeNestedDocsQuery(Query parentQuery, Filter parentFilter) {
+ this.origParentQuery = parentQuery;
+ this.parentQuery = parentQuery;
+ this.parentFilter = parentFilter;
+ }
+
+ // For rewritting
+ IncludeNestedDocsQuery(Query rewrite, Query originalQuery, IncludeNestedDocsQuery previousInstance) {
+ this.origParentQuery = originalQuery;
+ this.parentQuery = rewrite;
+ this.parentFilter = previousInstance.parentFilter;
+ setBoost(previousInstance.getBoost());
+ }
+
+ // For cloning
+ IncludeNestedDocsQuery(Query originalQuery, IncludeNestedDocsQuery previousInstance) {
+ this.origParentQuery = originalQuery;
+ this.parentQuery = originalQuery;
+ this.parentFilter = previousInstance.parentFilter;
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
+ return new IncludeNestedDocsWeight(parentQuery, parentQuery.createWeight(searcher), parentFilter);
+ }
+
+ static class IncludeNestedDocsWeight extends Weight {
+
+ private final Query parentQuery;
+ private final Weight parentWeight;
+ private final Filter parentsFilter;
+
+ IncludeNestedDocsWeight(Query parentQuery, Weight parentWeight, Filter parentsFilter) {
+ this.parentQuery = parentQuery;
+ this.parentWeight = parentWeight;
+ this.parentsFilter = parentsFilter;
+ }
+
+ @Override
+ public Query getQuery() {
+ return parentQuery;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ parentWeight.normalize(norm, topLevelBoost);
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ return parentWeight.getValueForNormalization(); // this query is never boosted so just delegate...
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+ final Scorer parentScorer = parentWeight.scorer(context, true, false, acceptDocs);
+
+ // no matches
+ if (parentScorer == null) {
+ return null;
+ }
+
+ DocIdSet parents = parentsFilter.getDocIdSet(context, acceptDocs);
+ if (parents == null) {
+ // No matches
+ return null;
+ }
+ if (!(parents instanceof FixedBitSet)) {
+ throw new IllegalStateException("parentFilter must return FixedBitSet; got " + parents);
+ }
+
+ int firstParentDoc = parentScorer.nextDoc();
+ if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) {
+ // No matches
+ return null;
+ }
+ return new IncludeNestedDocsScorer(this, parentScorer, (FixedBitSet) parents, firstParentDoc);
+ }
+
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+ return null; //Query is used internally and not by users, so explain can be empty
+ }
+
+ @Override
+ public boolean scoresDocsOutOfOrder() {
+ return false;
+ }
+ }
+
+ static class IncludeNestedDocsScorer extends Scorer {
+
+ final Scorer parentScorer;
+ final FixedBitSet parentBits;
+
+ int currentChildPointer = -1;
+ int currentParentPointer = -1;
+ int currentDoc = -1;
+
+ IncludeNestedDocsScorer(Weight weight, Scorer parentScorer, FixedBitSet parentBits, int currentParentPointer) {
+ super(weight);
+ this.parentScorer = parentScorer;
+ this.parentBits = parentBits;
+ this.currentParentPointer = currentParentPointer;
+ if (currentParentPointer == 0) {
+ currentChildPointer = 0;
+ } else {
+ this.currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1);
+ if (currentChildPointer == -1) {
+ // no previous set parent, we delete from doc 0
+ currentChildPointer = 0;
+ } else {
+ currentChildPointer++; // we only care about children
+ }
+ }
+
+ currentDoc = currentChildPointer;
+ }
+
+ @Override
+ public Collection<ChildScorer> getChildren() {
+ return parentScorer.getChildren();
+ }
+
+ public int nextDoc() throws IOException {
+ if (currentParentPointer == NO_MORE_DOCS) {
+ return (currentDoc = NO_MORE_DOCS);
+ }
+
+ if (currentChildPointer == currentParentPointer) {
+ // we need to return the current parent as well, but prepare to return
+ // the next set of children
+ currentDoc = currentParentPointer;
+ currentParentPointer = parentScorer.nextDoc();
+ if (currentParentPointer != NO_MORE_DOCS) {
+ currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1);
+ if (currentChildPointer == -1) {
+ // no previous set parent, just set the child to the current parent
+ currentChildPointer = currentParentPointer;
+ } else {
+ currentChildPointer++; // we only care about children
+ }
+ }
+ } else {
+ currentDoc = currentChildPointer++;
+ }
+
+ assert currentDoc != -1;
+ return currentDoc;
+ }
+
+ public int advance(int target) throws IOException {
+ if (target == NO_MORE_DOCS) {
+ return (currentDoc = NO_MORE_DOCS);
+ }
+
+ if (target == 0) {
+ return nextDoc();
+ }
+
+ if (target < currentParentPointer) {
+ currentDoc = currentParentPointer = parentScorer.advance(target);
+ if (currentParentPointer == NO_MORE_DOCS) {
+ return (currentDoc = NO_MORE_DOCS);
+ }
+ if (currentParentPointer == 0) {
+ currentChildPointer = 0;
+ } else {
+ currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1);
+ if (currentChildPointer == -1) {
+ // no previous set parent, just set the child to 0 to delete all up to the parent
+ currentChildPointer = 0;
+ } else {
+ currentChildPointer++; // we only care about children
+ }
+ }
+ } else {
+ currentDoc = currentChildPointer++;
+ }
+
+ return currentDoc;
+ }
+
+ public float score() throws IOException {
+ return parentScorer.score();
+ }
+
+ public int freq() throws IOException {
+ return parentScorer.freq();
+ }
+
+ public int docID() {
+ return currentDoc;
+ }
+
+ @Override
+ public long cost() {
+ return parentScorer.cost();
+ }
+ }
+
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ parentQuery.extractTerms(terms);
+ }
+
+ @Override
+ public Query rewrite(IndexReader reader) throws IOException {
+ final Query parentRewrite = parentQuery.rewrite(reader);
+ if (parentRewrite != parentQuery) {
+ return new IncludeNestedDocsQuery(parentRewrite, parentQuery, this);
+ } else {
+ return this;
+ }
+ }
+
+ @Override
+ public String toString(String field) {
+ return "IncludeNestedDocsQuery (" + parentQuery.toString() + ")";
+ }
+
+ @Override
+ public boolean equals(Object _other) {
+ if (_other instanceof IncludeNestedDocsQuery) {
+ final IncludeNestedDocsQuery other = (IncludeNestedDocsQuery) _other;
+ return origParentQuery.equals(other.origParentQuery) && parentFilter.equals(other.parentFilter);
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int hash = 1;
+ hash = prime * hash + origParentQuery.hashCode();
+ hash = prime * hash + parentFilter.hashCode();
+ return hash;
+ }
+
+ @Override
+ public Query clone() {
+ Query clonedQuery = origParentQuery.clone();
+ return new IncludeNestedDocsQuery(clonedQuery, this);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/nested/NestedDocsFilter.java b/src/main/java/org/elasticsearch/index/search/nested/NestedDocsFilter.java
new file mode 100644
index 0000000..0b46f64
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/nested/NestedDocsFilter.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.PrefixFilter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+
+import java.io.IOException;
+
+public class NestedDocsFilter extends Filter {
+
+ public static final NestedDocsFilter INSTANCE = new NestedDocsFilter();
+
+ private final PrefixFilter filter = new PrefixFilter(new Term(TypeFieldMapper.NAME, new BytesRef("__")));
+
+ private final int hashCode = filter.hashCode();
+
+ private NestedDocsFilter() {
+
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ return filter.getDocIdSet(context, acceptDocs);
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj == INSTANCE;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/search/nested/NestedFieldComparatorSource.java b/src/main/java/org/elasticsearch/index/search/nested/NestedFieldComparatorSource.java
new file mode 100644
index 0000000..425d8a7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/nested/NestedFieldComparatorSource.java
@@ -0,0 +1,396 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.NestedWrappableComparator;
+import org.elasticsearch.index.fielddata.fieldcomparator.NumberComparatorBase;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+
+import java.io.IOException;
+import java.util.Locale;
+
+/**
+ */
+public class NestedFieldComparatorSource extends IndexFieldData.XFieldComparatorSource {
+
+ private final SortMode sortMode;
+ private final IndexFieldData.XFieldComparatorSource wrappedSource;
+ private final Filter rootDocumentsFilter;
+ private final Filter innerDocumentsFilter;
+
+ public NestedFieldComparatorSource(SortMode sortMode, IndexFieldData.XFieldComparatorSource wrappedSource, Filter rootDocumentsFilter, Filter innerDocumentsFilter) {
+ this.sortMode = sortMode;
+ this.wrappedSource = wrappedSource;
+ this.rootDocumentsFilter = rootDocumentsFilter;
+ this.innerDocumentsFilter = innerDocumentsFilter;
+ }
+
+ @Override
+ public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+ // +1: have one spare slot for value comparison between inner documents.
+ FieldComparator wrappedComparator = wrappedSource.newComparator(fieldname, numHits + 1, sortPos, reversed);
+ switch (sortMode) {
+ case MAX:
+ return new NestedFieldComparator.Highest(wrappedComparator, rootDocumentsFilter, innerDocumentsFilter, numHits);
+ case MIN:
+ return new NestedFieldComparator.Lowest(wrappedComparator, rootDocumentsFilter, innerDocumentsFilter, numHits);
+ case SUM:
+ return new NestedFieldComparator.Sum((NumberComparatorBase<?>) wrappedComparator, rootDocumentsFilter, innerDocumentsFilter, numHits);
+ case AVG:
+ return new NestedFieldComparator.Avg((NumberComparatorBase<?>) wrappedComparator, rootDocumentsFilter, innerDocumentsFilter, numHits);
+ default:
+ throw new ElasticsearchIllegalArgumentException(
+ String.format(Locale.ROOT, "Unsupported sort_mode[%s] for nested type", sortMode)
+ );
+ }
+ }
+
+ @Override
+ public SortField.Type reducedType() {
+ return wrappedSource.reducedType();
+ }
+
+}
+
+abstract class NestedFieldComparator extends FieldComparator {
+
+ final Filter rootDocumentsFilter;
+ final Filter innerDocumentsFilter;
+ final int spareSlot;
+
+ FieldComparator wrappedComparator;
+ FixedBitSet rootDocuments;
+ FixedBitSet innerDocuments;
+ int bottomSlot;
+
+ NestedFieldComparator(FieldComparator wrappedComparator, Filter rootDocumentsFilter, Filter innerDocumentsFilter, int spareSlot) {
+ this.wrappedComparator = wrappedComparator;
+ this.rootDocumentsFilter = rootDocumentsFilter;
+ this.innerDocumentsFilter = innerDocumentsFilter;
+ this.spareSlot = spareSlot;
+ }
+
+ @Override
+ public final int compare(int slot1, int slot2) {
+ return wrappedComparator.compare(slot1, slot2);
+ }
+
+ @Override
+ public final void setBottom(int slot) {
+ wrappedComparator.setBottom(slot);
+ this.bottomSlot = slot;
+ }
+
+ @Override
+ public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
+ DocIdSet innerDocuments = innerDocumentsFilter.getDocIdSet(context, null);
+ if (DocIdSets.isEmpty(innerDocuments)) {
+ this.innerDocuments = null;
+ } else if (innerDocuments instanceof FixedBitSet) {
+ this.innerDocuments = (FixedBitSet) innerDocuments;
+ } else {
+ this.innerDocuments = DocIdSets.toFixedBitSet(innerDocuments.iterator(), context.reader().maxDoc());
+ }
+ DocIdSet rootDocuments = rootDocumentsFilter.getDocIdSet(context, null);
+ if (DocIdSets.isEmpty(rootDocuments)) {
+ this.rootDocuments = null;
+ } else if (rootDocuments instanceof FixedBitSet) {
+ this.rootDocuments = (FixedBitSet) rootDocuments;
+ } else {
+ this.rootDocuments = DocIdSets.toFixedBitSet(rootDocuments.iterator(), context.reader().maxDoc());
+ }
+
+ wrappedComparator = wrappedComparator.setNextReader(context);
+ return this;
+ }
+
+ @Override
+ public final Object value(int slot) {
+ return wrappedComparator.value(slot);
+ }
+
+ @Override
+ public final int compareDocToValue(int rootDoc, Object value) throws IOException {
+ throw new UnsupportedOperationException("compareDocToValue() not used for sorting in ES");
+ }
+
+ final static class Lowest extends NestedFieldComparator {
+
+ Lowest(FieldComparator wrappedComparator, Filter parentFilter, Filter childFilter, int spareSlot) {
+ super(wrappedComparator, parentFilter, childFilter, spareSlot);
+ }
+
+ @Override
+ public int compareBottom(int rootDoc) throws IOException {
+ if (rootDoc == 0 || rootDocuments == null || innerDocuments == null) {
+ return compareBottomMissing(wrappedComparator);
+ }
+
+ // We need to copy the lowest value from all nested docs into slot.
+ int prevRootDoc = rootDocuments.prevSetBit(rootDoc - 1);
+ int nestedDoc = innerDocuments.nextSetBit(prevRootDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ return compareBottomMissing(wrappedComparator);
+ }
+
+ // We only need to emit a single cmp value for any matching nested doc
+ int cmp = wrappedComparator.compareBottom(nestedDoc);
+ if (cmp > 0) {
+ return cmp;
+ }
+
+ while (true) {
+ nestedDoc = innerDocuments.nextSetBit(nestedDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ return cmp;
+ }
+ int cmp1 = wrappedComparator.compareBottom(nestedDoc);
+ if (cmp1 > 0) {
+ return cmp1;
+ } else {
+ if (cmp1 == 0) {
+ cmp = 0;
+ }
+ }
+ }
+ }
+
+ @Override
+ public void copy(int slot, int rootDoc) throws IOException {
+ if (rootDoc == 0 || rootDocuments == null || innerDocuments == null) {
+ copyMissing(wrappedComparator, slot);
+ return;
+ }
+
+ // We need to copy the lowest value from all nested docs into slot.
+ int prevRootDoc = rootDocuments.prevSetBit(rootDoc - 1);
+ int nestedDoc = innerDocuments.nextSetBit(prevRootDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ copyMissing(wrappedComparator, slot);
+ return;
+ }
+ wrappedComparator.copy(slot, nestedDoc);
+
+ while (true) {
+ nestedDoc = innerDocuments.nextSetBit(nestedDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ return;
+ }
+ wrappedComparator.copy(spareSlot, nestedDoc);
+ if (wrappedComparator.compare(spareSlot, slot) < 0) {
+ wrappedComparator.copy(slot, nestedDoc);
+ }
+ }
+ }
+
+ }
+
+ final static class Highest extends NestedFieldComparator {
+
+ Highest(FieldComparator wrappedComparator, Filter parentFilter, Filter childFilter, int spareSlot) {
+ super(wrappedComparator, parentFilter, childFilter, spareSlot);
+ }
+
+ @Override
+ public int compareBottom(int rootDoc) throws IOException {
+ if (rootDoc == 0 || rootDocuments == null || innerDocuments == null) {
+ return compareBottomMissing(wrappedComparator);
+ }
+
+ int prevRootDoc = rootDocuments.prevSetBit(rootDoc - 1);
+ int nestedDoc = innerDocuments.nextSetBit(prevRootDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ return compareBottomMissing(wrappedComparator);
+ }
+
+ int cmp = wrappedComparator.compareBottom(nestedDoc);
+ if (cmp < 0) {
+ return cmp;
+ }
+
+ while (true) {
+ nestedDoc = innerDocuments.nextSetBit(nestedDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ return cmp;
+ }
+ int cmp1 = wrappedComparator.compareBottom(nestedDoc);
+ if (cmp1 < 0) {
+ return cmp1;
+ } else if (cmp1 == 0) {
+ cmp = 0;
+ }
+ }
+ }
+
+ @Override
+ public void copy(int slot, int rootDoc) throws IOException {
+ if (rootDoc == 0 || rootDocuments == null || innerDocuments == null) {
+ copyMissing(wrappedComparator, slot);
+ return;
+ }
+
+ int prevRootDoc = rootDocuments.prevSetBit(rootDoc - 1);
+ int nestedDoc = innerDocuments.nextSetBit(prevRootDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ copyMissing(wrappedComparator, slot);
+ return;
+ }
+ wrappedComparator.copy(slot, nestedDoc);
+
+ while (true) {
+ nestedDoc = innerDocuments.nextSetBit(nestedDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ return;
+ }
+ wrappedComparator.copy(spareSlot, nestedDoc);
+ if (wrappedComparator.compare(spareSlot, slot) > 0) {
+ wrappedComparator.copy(slot, nestedDoc);
+ }
+ }
+ }
+
+ }
+
+ static abstract class NumericNestedFieldComparatorBase extends NestedFieldComparator {
+ protected NumberComparatorBase numberComparator;
+
+ NumericNestedFieldComparatorBase(NumberComparatorBase wrappedComparator, Filter rootDocumentsFilter, Filter innerDocumentsFilter, int spareSlot) {
+ super(wrappedComparator, rootDocumentsFilter, innerDocumentsFilter, spareSlot);
+ this.numberComparator = wrappedComparator;
+ }
+
+ @Override
+ public final int compareBottom(int rootDoc) throws IOException {
+ if (rootDoc == 0 || rootDocuments == null || innerDocuments == null) {
+ return compareBottomMissing(wrappedComparator);
+ }
+
+ final int prevRootDoc = rootDocuments.prevSetBit(rootDoc - 1);
+ int nestedDoc = innerDocuments.nextSetBit(prevRootDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ return compareBottomMissing(wrappedComparator);
+ }
+
+ int counter = 1;
+ wrappedComparator.copy(spareSlot, nestedDoc);
+ nestedDoc = innerDocuments.nextSetBit(nestedDoc + 1);
+ while (nestedDoc > prevRootDoc && nestedDoc < rootDoc) {
+ onNested(spareSlot, nestedDoc);
+ nestedDoc = innerDocuments.nextSetBit(nestedDoc + 1);
+ counter++;
+ }
+ afterNested(spareSlot, counter);
+ return compare(bottomSlot, spareSlot);
+ }
+
+ @Override
+ public final void copy(int slot, int rootDoc) throws IOException {
+ if (rootDoc == 0 || rootDocuments == null || innerDocuments == null) {
+ copyMissing(wrappedComparator, slot);
+ return;
+ }
+
+ final int prevRootDoc = rootDocuments.prevSetBit(rootDoc - 1);
+ int nestedDoc = innerDocuments.nextSetBit(prevRootDoc + 1);
+ if (nestedDoc >= rootDoc || nestedDoc == -1) {
+ copyMissing(wrappedComparator, slot);
+ return;
+ }
+ int counter = 1;
+ wrappedComparator.copy(slot, nestedDoc);
+ nestedDoc = innerDocuments.nextSetBit(nestedDoc + 1);
+ while (nestedDoc > prevRootDoc && nestedDoc < rootDoc) {
+ onNested(slot, nestedDoc);
+ nestedDoc = innerDocuments.nextSetBit(nestedDoc + 1);
+ counter++;
+ }
+ afterNested(slot, counter);
+ }
+
+ protected abstract void onNested(int slot, int nestedDoc);
+
+ protected abstract void afterNested(int slot, int count);
+
+ @Override
+ public final FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
+ super.setNextReader(context);
+ numberComparator = (NumberComparatorBase) super.wrappedComparator;
+ return this;
+ }
+ }
+
+ final static class Sum extends NumericNestedFieldComparatorBase {
+
+ Sum(NumberComparatorBase wrappedComparator, Filter rootDocumentsFilter, Filter innerDocumentsFilter, int spareSlot) {
+ super(wrappedComparator, rootDocumentsFilter, innerDocumentsFilter, spareSlot);
+ }
+
+ @Override
+ protected void onNested(int slot, int nestedDoc) {
+ numberComparator.add(slot, nestedDoc);
+ }
+
+ @Override
+ protected void afterNested(int slot, int count) {
+ }
+
+ }
+
+ final static class Avg extends NumericNestedFieldComparatorBase {
+ Avg(NumberComparatorBase wrappedComparator, Filter rootDocumentsFilter, Filter innerDocumentsFilter, int spareSlot) {
+ super(wrappedComparator, rootDocumentsFilter, innerDocumentsFilter, spareSlot);
+ }
+
+ @Override
+ protected void onNested(int slot, int nestedDoc) {
+ numberComparator.add(slot, nestedDoc);
+ }
+
+ @Override
+ protected void afterNested(int slot, int count) {
+ numberComparator.divide(slot, count);
+ }
+
+
+ }
+
+ static final void copyMissing(FieldComparator<?> comparator, int slot) {
+ if (comparator instanceof NestedWrappableComparator<?>) {
+ ((NestedWrappableComparator<?>) comparator).missing(slot);
+ }
+ }
+
+ static final int compareBottomMissing(FieldComparator<?> comparator) {
+ if (comparator instanceof NestedWrappableComparator<?>) {
+ return ((NestedWrappableComparator<?>) comparator).compareBottomMissing();
+ } else {
+ return 0;
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java b/src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java
new file mode 100644
index 0000000..50e1df9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.PrefixFilter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+
+import java.io.IOException;
+
+public class NonNestedDocsFilter extends Filter {
+
+ public static final NonNestedDocsFilter INSTANCE = new NonNestedDocsFilter();
+
+ private final PrefixFilter filter = new PrefixFilter(new Term(TypeFieldMapper.NAME, new BytesRef("__")));
+
+ private final int hashCode = filter.hashCode();
+
+ private NonNestedDocsFilter() {
+
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ DocIdSet docSet = filter.getDocIdSet(context, acceptDocs);
+ if (DocIdSets.isEmpty(docSet)) {
+ // will almost never happen, and we need an OpenBitSet for the parent filter in
+ // BlockJoinQuery, we cache it anyhow...
+ docSet = new FixedBitSet(context.reader().maxDoc());
+ }
+ ((FixedBitSet) docSet).flip(0, context.reader().maxDoc());
+ return docSet;
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj == INSTANCE;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java b/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java
new file mode 100644
index 0000000..bf8aa8e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.shape;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * Service which retrieves pre-indexed Shapes from another index
+ */
+public class ShapeFetchService extends AbstractComponent {
+
+ private final Client client;
+
+ @Inject
+ public ShapeFetchService(Client client, Settings settings) {
+ super(settings);
+ this.client = client;
+ }
+
+ /**
+ * Fetches the Shape with the given ID in the given type and index.
+ *
+ * @param id ID of the Shape to fetch
+ * @param type Index type where the Shape is indexed
+ * @param index Index where the Shape is indexed
+ * @param path Name or path of the field in the Shape Document where the Shape itself is located
+ * @return Shape with the given ID
+ * @throws IOException Can be thrown while parsing the Shape Document and extracting the Shape
+ */
+ public ShapeBuilder fetch(String id, String type, String index, String path) throws IOException {
+ GetResponse response = client.get(new GetRequest(index, type, id).preference("_local").operationThreaded(false)).actionGet();
+ if (!response.isExists()) {
+ throw new ElasticsearchIllegalArgumentException("Shape with ID [" + id + "] in type [" + type + "] not found");
+ }
+
+ String[] pathElements = Strings.splitStringToArray(path, '.');
+ int currentPathSlot = 0;
+
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(response.getSourceAsBytesRef());
+ XContentParser.Token currentToken;
+ while ((currentToken = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (currentToken == XContentParser.Token.FIELD_NAME) {
+ if (pathElements[currentPathSlot].equals(parser.currentName())) {
+ parser.nextToken();
+ if (++currentPathSlot == pathElements.length) {
+ return ShapeBuilder.parse(parser);
+ }
+ } else {
+ parser.nextToken();
+ parser.skipChildren();
+ }
+ }
+ }
+ throw new ElasticsearchIllegalStateException("Shape with name [" + id + "] found but missing " + path + " field");
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/shape/ShapeModule.java b/src/main/java/org/elasticsearch/index/search/shape/ShapeModule.java
new file mode 100644
index 0000000..510d04f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/shape/ShapeModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.shape;
+
+import org.elasticsearch.common.geo.ShapesAvailability;
+import org.elasticsearch.common.inject.AbstractModule;
+
+public class ShapeModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ // TODO: We could wrap this entire module in a JTS_AVAILABILITY check
+ if (ShapesAvailability.JTS_AVAILABLE) {
+ bind(ShapeFetchService.class).asEagerSingleton();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/slowlog/ShardSlowLogSearchService.java b/src/main/java/org/elasticsearch/index/search/slowlog/ShardSlowLogSearchService.java
new file mode 100644
index 0000000..77ec6ce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/slowlog/ShardSlowLogSearchService.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.slowlog;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class ShardSlowLogSearchService extends AbstractIndexShardComponent {
+
+ private boolean reformat;
+
+ private long queryWarnThreshold;
+ private long queryInfoThreshold;
+ private long queryDebugThreshold;
+ private long queryTraceThreshold;
+
+ private long fetchWarnThreshold;
+ private long fetchInfoThreshold;
+ private long fetchDebugThreshold;
+ private long fetchTraceThreshold;
+
+ private String level;
+
+ private final ESLogger queryLogger;
+ private final ESLogger fetchLogger;
+
+ public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN = "index.search.slowlog.threshold.query.warn";
+ public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO = "index.search.slowlog.threshold.query.info";
+ public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG = "index.search.slowlog.threshold.query.debug";
+ public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE = "index.search.slowlog.threshold.query.trace";
+ public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN = "index.search.slowlog.threshold.fetch.warn";
+ public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO = "index.search.slowlog.threshold.fetch.info";
+ public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG = "index.search.slowlog.threshold.fetch.debug";
+ public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE = "index.search.slowlog.threshold.fetch.trace";
+ public static final String INDEX_SEARCH_SLOWLOG_REFORMAT = "index.search.slowlog.reformat";
+ public static final String INDEX_SEARCH_SLOWLOG_LEVEL = "index.search.slowlog.level";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+ @Override
+ public synchronized void onRefreshSettings(Settings settings) {
+ long queryWarnThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, TimeValue.timeValueNanos(ShardSlowLogSearchService.this.queryWarnThreshold)).nanos();
+ if (queryWarnThreshold != ShardSlowLogSearchService.this.queryWarnThreshold) {
+ ShardSlowLogSearchService.this.queryWarnThreshold = queryWarnThreshold;
+ }
+ long queryInfoThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO, TimeValue.timeValueNanos(ShardSlowLogSearchService.this.queryInfoThreshold)).nanos();
+ if (queryInfoThreshold != ShardSlowLogSearchService.this.queryInfoThreshold) {
+ ShardSlowLogSearchService.this.queryInfoThreshold = queryInfoThreshold;
+ }
+ long queryDebugThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG, TimeValue.timeValueNanos(ShardSlowLogSearchService.this.queryDebugThreshold)).nanos();
+ if (queryDebugThreshold != ShardSlowLogSearchService.this.queryDebugThreshold) {
+ ShardSlowLogSearchService.this.queryDebugThreshold = queryDebugThreshold;
+ }
+ long queryTraceThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE, TimeValue.timeValueNanos(ShardSlowLogSearchService.this.queryTraceThreshold)).nanos();
+ if (queryTraceThreshold != ShardSlowLogSearchService.this.queryTraceThreshold) {
+ ShardSlowLogSearchService.this.queryTraceThreshold = queryTraceThreshold;
+ }
+
+ long fetchWarnThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN, TimeValue.timeValueNanos(ShardSlowLogSearchService.this.fetchWarnThreshold)).nanos();
+ if (fetchWarnThreshold != ShardSlowLogSearchService.this.fetchWarnThreshold) {
+ ShardSlowLogSearchService.this.fetchWarnThreshold = fetchWarnThreshold;
+ }
+ long fetchInfoThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO, TimeValue.timeValueNanos(ShardSlowLogSearchService.this.fetchInfoThreshold)).nanos();
+ if (fetchInfoThreshold != ShardSlowLogSearchService.this.fetchInfoThreshold) {
+ ShardSlowLogSearchService.this.fetchInfoThreshold = fetchInfoThreshold;
+ }
+ long fetchDebugThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG, TimeValue.timeValueNanos(ShardSlowLogSearchService.this.fetchDebugThreshold)).nanos();
+ if (fetchDebugThreshold != ShardSlowLogSearchService.this.fetchDebugThreshold) {
+ ShardSlowLogSearchService.this.fetchDebugThreshold = fetchDebugThreshold;
+ }
+ long fetchTraceThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE, TimeValue.timeValueNanos(ShardSlowLogSearchService.this.fetchTraceThreshold)).nanos();
+ if (fetchTraceThreshold != ShardSlowLogSearchService.this.fetchTraceThreshold) {
+ ShardSlowLogSearchService.this.fetchTraceThreshold = fetchTraceThreshold;
+ }
+
+ String level = settings.get(INDEX_SEARCH_SLOWLOG_LEVEL, ShardSlowLogSearchService.this.level);
+ if (!level.equals(ShardSlowLogSearchService.this.level)) {
+ ShardSlowLogSearchService.this.queryLogger.setLevel(level.toUpperCase(Locale.ROOT));
+ ShardSlowLogSearchService.this.fetchLogger.setLevel(level.toUpperCase(Locale.ROOT));
+ ShardSlowLogSearchService.this.level = level;
+ }
+
+ boolean reformat = settings.getAsBoolean(INDEX_SEARCH_SLOWLOG_REFORMAT, ShardSlowLogSearchService.this.reformat);
+ if (reformat != ShardSlowLogSearchService.this.reformat) {
+ ShardSlowLogSearchService.this.reformat = reformat;
+ }
+ }
+ }
+
+ @Inject
+ public ShardSlowLogSearchService(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService) {
+ super(shardId, indexSettings);
+
+ this.reformat = componentSettings.getAsBoolean("reformat", true);
+
+ this.queryWarnThreshold = componentSettings.getAsTime("threshold.query.warn", TimeValue.timeValueNanos(-1)).nanos();
+ this.queryInfoThreshold = componentSettings.getAsTime("threshold.query.info", TimeValue.timeValueNanos(-1)).nanos();
+ this.queryDebugThreshold = componentSettings.getAsTime("threshold.query.debug", TimeValue.timeValueNanos(-1)).nanos();
+ this.queryTraceThreshold = componentSettings.getAsTime("threshold.query.trace", TimeValue.timeValueNanos(-1)).nanos();
+
+ this.fetchWarnThreshold = componentSettings.getAsTime("threshold.fetch.warn", TimeValue.timeValueNanos(-1)).nanos();
+ this.fetchInfoThreshold = componentSettings.getAsTime("threshold.fetch.info", TimeValue.timeValueNanos(-1)).nanos();
+ this.fetchDebugThreshold = componentSettings.getAsTime("threshold.fetch.debug", TimeValue.timeValueNanos(-1)).nanos();
+ this.fetchTraceThreshold = componentSettings.getAsTime("threshold.fetch.trace", TimeValue.timeValueNanos(-1)).nanos();
+
+ this.level = componentSettings.get("level", "TRACE").toUpperCase(Locale.ROOT);
+
+ this.queryLogger = Loggers.getLogger(logger, ".query");
+ this.fetchLogger = Loggers.getLogger(logger, ".fetch");
+
+ queryLogger.setLevel(level);
+ fetchLogger.setLevel(level);
+
+ indexSettingsService.addListener(new ApplySettings());
+ }
+
+ public void onQueryPhase(SearchContext context, long tookInNanos) {
+ if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) {
+ queryLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
+ } else if (queryInfoThreshold >= 0 && tookInNanos > queryInfoThreshold) {
+ queryLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
+ } else if (queryDebugThreshold >= 0 && tookInNanos > queryDebugThreshold) {
+ queryLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
+ } else if (queryTraceThreshold >= 0 && tookInNanos > queryTraceThreshold) {
+ queryLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
+ }
+ }
+
+ public void onFetchPhase(SearchContext context, long tookInNanos) {
+ if (fetchWarnThreshold >= 0 && tookInNanos > fetchWarnThreshold) {
+ fetchLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
+ } else if (fetchInfoThreshold >= 0 && tookInNanos > fetchInfoThreshold) {
+ fetchLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
+ } else if (fetchDebugThreshold >= 0 && tookInNanos > fetchDebugThreshold) {
+ fetchLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
+ } else if (fetchTraceThreshold >= 0 && tookInNanos > fetchTraceThreshold) {
+ fetchLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
+ }
+ }
+
+ public static class SlowLogSearchContextPrinter {
+ private final SearchContext context;
+ private final long tookInNanos;
+ private final boolean reformat;
+
+ public SlowLogSearchContextPrinter(SearchContext context, long tookInNanos, boolean reformat) {
+ this.context = context;
+ this.tookInNanos = tookInNanos;
+ this.reformat = reformat;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], ");
+ if (context.types() == null) {
+ sb.append("types[], ");
+ } else {
+ sb.append("types[");
+ Strings.arrayToDelimitedString(context.types(), ",", sb);
+ sb.append("], ");
+ }
+ if (context.groupStats() == null) {
+ sb.append("stats[], ");
+ } else {
+ sb.append("stats[");
+ Strings.collectionToDelimitedString(context.groupStats(), ",", "", "", sb);
+ sb.append("], ");
+ }
+ sb.append("search_type[").append(context.searchType()).append("], total_shards[").append(context.numberOfShards()).append("], ");
+ if (context.request().source() != null && context.request().source().length() > 0) {
+ try {
+ sb.append("source[").append(XContentHelper.convertToJson(context.request().source(), reformat)).append("], ");
+ } catch (IOException e) {
+ sb.append("source[_failed_to_convert_], ");
+ }
+ } else {
+ sb.append("source[], ");
+ }
+ if (context.request().extraSource() != null && context.request().extraSource().length() > 0) {
+ try {
+ sb.append("extra_source[").append(XContentHelper.convertToJson(context.request().extraSource(), reformat)).append("], ");
+ } catch (IOException e) {
+ sb.append("extra_source[_failed_to_convert_], ");
+ }
+ } else {
+ sb.append("extra_source[], ");
+ }
+ return sb.toString();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java b/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java
new file mode 100644
index 0000000..7e2fda3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.stats;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ */
+public class SearchStats implements Streamable, ToXContent {
+
+ public static class Stats implements Streamable, ToXContent {
+
+ private long queryCount;
+ private long queryTimeInMillis;
+ private long queryCurrent;
+
+ private long fetchCount;
+ private long fetchTimeInMillis;
+ private long fetchCurrent;
+
+ Stats() {
+
+ }
+
+ public Stats(long queryCount, long queryTimeInMillis, long queryCurrent, long fetchCount, long fetchTimeInMillis, long fetchCurrent) {
+ this.queryCount = queryCount;
+ this.queryTimeInMillis = queryTimeInMillis;
+ this.queryCurrent = queryCurrent;
+ this.fetchCount = fetchCount;
+ this.fetchTimeInMillis = fetchTimeInMillis;
+ this.fetchCurrent = fetchCurrent;
+ }
+
+ public void add(Stats stats) {
+ queryCount += stats.queryCount;
+ queryTimeInMillis += stats.queryTimeInMillis;
+ queryCurrent += stats.queryCurrent;
+
+ fetchCount += stats.fetchCount;
+ fetchTimeInMillis += stats.fetchTimeInMillis;
+ fetchCurrent += stats.fetchCurrent;
+ }
+
+ public long getQueryCount() {
+ return queryCount;
+ }
+
+ public TimeValue getQueryTime() {
+ return new TimeValue(queryTimeInMillis);
+ }
+
+ public long getQueryTimeInMillis() {
+ return queryTimeInMillis;
+ }
+
+ public long getQueryCurrent() {
+ return queryCurrent;
+ }
+
+ public long getFetchCount() {
+ return fetchCount;
+ }
+
+ public TimeValue getFetchTime() {
+ return new TimeValue(fetchTimeInMillis);
+ }
+
+ public long getFetchTimeInMillis() {
+ return fetchTimeInMillis;
+ }
+
+ public long getFetchCurrent() {
+ return fetchCurrent;
+ }
+
+
+ public static Stats readStats(StreamInput in) throws IOException {
+ Stats stats = new Stats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ queryCount = in.readVLong();
+ queryTimeInMillis = in.readVLong();
+ queryCurrent = in.readVLong();
+
+ fetchCount = in.readVLong();
+ fetchTimeInMillis = in.readVLong();
+ fetchCurrent = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(queryCount);
+ out.writeVLong(queryTimeInMillis);
+ out.writeVLong(queryCurrent);
+
+ out.writeVLong(fetchCount);
+ out.writeVLong(fetchTimeInMillis);
+ out.writeVLong(fetchCurrent);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.QUERY_TOTAL, queryCount);
+ builder.timeValueField(Fields.QUERY_TIME_IN_MILLIS, Fields.QUERY_TIME, queryTimeInMillis);
+ builder.field(Fields.QUERY_CURRENT, queryCurrent);
+
+ builder.field(Fields.FETCH_TOTAL, fetchCount);
+ builder.timeValueField(Fields.FETCH_TIME_IN_MILLIS, Fields.FETCH_TIME, fetchTimeInMillis);
+ builder.field(Fields.FETCH_CURRENT, fetchCurrent);
+
+ return builder;
+ }
+ }
+
+ Stats totalStats;
+ long openContexts;
+
+ @Nullable
+ Map<String, Stats> groupStats;
+
+ public SearchStats() {
+ totalStats = new Stats();
+ }
+
+ public SearchStats(Stats totalStats, long openContexts, @Nullable Map<String, Stats> groupStats) {
+ this.totalStats = totalStats;
+ this.openContexts = openContexts;
+ this.groupStats = groupStats;
+ }
+
+ public void add(SearchStats searchStats) {
+ add(searchStats, true);
+ }
+
+ public void add(SearchStats searchStats, boolean includeTypes) {
+ if (searchStats == null) {
+ return;
+ }
+ totalStats.add(searchStats.totalStats);
+ openContexts += searchStats.openContexts;
+ if (includeTypes && searchStats.groupStats != null && !searchStats.groupStats.isEmpty()) {
+ if (groupStats == null) {
+ groupStats = new HashMap<String, Stats>(searchStats.groupStats.size());
+ }
+ for (Map.Entry<String, Stats> entry : searchStats.groupStats.entrySet()) {
+ Stats stats = groupStats.get(entry.getKey());
+ if (stats == null) {
+ groupStats.put(entry.getKey(), entry.getValue());
+ } else {
+ stats.add(entry.getValue());
+ }
+ }
+ }
+ }
+
+ public Stats getTotal() {
+ return this.totalStats;
+ }
+
+ public long getOpenContexts() {
+ return this.openContexts;
+ }
+
+ @Nullable
+ public Map<String, Stats> getGroupStats() {
+ return this.groupStats;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject(Fields.SEARCH);
+ builder.field(Fields.OPEN_CONTEXTS, openContexts);
+ totalStats.toXContent(builder, params);
+ if (groupStats != null && !groupStats.isEmpty()) {
+ builder.startObject(Fields.GROUPS);
+ for (Map.Entry<String, Stats> entry : groupStats.entrySet()) {
+ builder.startObject(entry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
+ entry.getValue().toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString SEARCH = new XContentBuilderString("search");
+ static final XContentBuilderString OPEN_CONTEXTS = new XContentBuilderString("open_contexts");
+ static final XContentBuilderString GROUPS = new XContentBuilderString("groups");
+ static final XContentBuilderString QUERY_TOTAL = new XContentBuilderString("query_total");
+ static final XContentBuilderString QUERY_TIME = new XContentBuilderString("query_time");
+ static final XContentBuilderString QUERY_TIME_IN_MILLIS = new XContentBuilderString("query_time_in_millis");
+ static final XContentBuilderString QUERY_CURRENT = new XContentBuilderString("query_current");
+ static final XContentBuilderString FETCH_TOTAL = new XContentBuilderString("fetch_total");
+ static final XContentBuilderString FETCH_TIME = new XContentBuilderString("fetch_time");
+ static final XContentBuilderString FETCH_TIME_IN_MILLIS = new XContentBuilderString("fetch_time_in_millis");
+ static final XContentBuilderString FETCH_CURRENT = new XContentBuilderString("fetch_current");
+ }
+
+ public static SearchStats readSearchStats(StreamInput in) throws IOException {
+ SearchStats searchStats = new SearchStats();
+ searchStats.readFrom(in);
+ return searchStats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ totalStats = Stats.readStats(in);
+ openContexts = in.readVLong();
+ if (in.readBoolean()) {
+ int size = in.readVInt();
+ groupStats = new HashMap<String, Stats>(size);
+ for (int i = 0; i < size; i++) {
+ groupStats.put(in.readString(), Stats.readStats(in));
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ totalStats.writeTo(out);
+ out.writeVLong(openContexts);
+ if (groupStats == null || groupStats.isEmpty()) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(groupStats.size());
+ for (Map.Entry<String, Stats> entry : groupStats.entrySet()) {
+ out.writeString(entry.getKey());
+ entry.getValue().writeTo(out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/stats/ShardSearchModule.java b/src/main/java/org/elasticsearch/index/search/stats/ShardSearchModule.java
new file mode 100644
index 0000000..28f8c09
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/stats/ShardSearchModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.stats;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.search.slowlog.ShardSlowLogSearchService;
+
+/**
+ */
+public class ShardSearchModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(ShardSearchService.class).asEagerSingleton();
+ bind(ShardSlowLogSearchService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/stats/ShardSearchService.java b/src/main/java/org/elasticsearch/index/search/stats/ShardSearchService.java
new file mode 100644
index 0000000..03b31c8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/stats/ShardSearchService.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.stats;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.metrics.MeanMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.search.slowlog.ShardSlowLogSearchService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class ShardSearchService extends AbstractIndexShardComponent {
+
+ private final ShardSlowLogSearchService slowLogSearchService;
+
+ private final StatsHolder totalStats = new StatsHolder();
+ private final CounterMetric openContexts = new CounterMetric();
+
+ private volatile Map<String, StatsHolder> groupsStats = ImmutableMap.of();
+
+ @Inject
+ public ShardSearchService(ShardId shardId, @IndexSettings Settings indexSettings, ShardSlowLogSearchService slowLogSearchService) {
+ super(shardId, indexSettings);
+ this.slowLogSearchService = slowLogSearchService;
+ }
+
+ /**
+ * Returns the stats, including group specific stats. If the groups are null/0 length, then nothing
+ * is returned for them. If they are set, then only groups provided will be returned, or
+ * <tt>_all</tt> for all groups.
+ */
+ public SearchStats stats(String... groups) {
+ SearchStats.Stats total = totalStats.stats();
+ Map<String, SearchStats.Stats> groupsSt = null;
+ if (groups != null && groups.length > 0) {
+ if (groups.length == 1 && groups[0].equals("_all")) {
+ groupsSt = new HashMap<String, SearchStats.Stats>(groupsStats.size());
+ for (Map.Entry<String, StatsHolder> entry : groupsStats.entrySet()) {
+ groupsSt.put(entry.getKey(), entry.getValue().stats());
+ }
+ } else {
+ groupsSt = new HashMap<String, SearchStats.Stats>(groups.length);
+ for (String group : groups) {
+ StatsHolder statsHolder = groupsStats.get(group);
+ if (statsHolder != null) {
+ groupsSt.put(group, statsHolder.stats());
+ }
+ }
+ }
+ }
+ return new SearchStats(total, openContexts.count(), groupsSt);
+ }
+
+ public void onPreQueryPhase(SearchContext searchContext) {
+ totalStats.queryCurrent.inc();
+ if (searchContext.groupStats() != null) {
+ for (int i = 0; i < searchContext.groupStats().size(); i++) {
+ groupStats(searchContext.groupStats().get(i)).queryCurrent.inc();
+ }
+ }
+ }
+
+ public void onFailedQueryPhase(SearchContext searchContext) {
+ totalStats.queryCurrent.dec();
+ if (searchContext.groupStats() != null) {
+ for (int i = 0; i < searchContext.groupStats().size(); i++) {
+ groupStats(searchContext.groupStats().get(i)).queryCurrent.dec();
+ }
+ }
+ }
+
+ public void onQueryPhase(SearchContext searchContext, long tookInNanos) {
+ totalStats.queryMetric.inc(tookInNanos);
+ totalStats.queryCurrent.dec();
+ if (searchContext.groupStats() != null) {
+ for (int i = 0; i < searchContext.groupStats().size(); i++) {
+ StatsHolder statsHolder = groupStats(searchContext.groupStats().get(i));
+ statsHolder.queryMetric.inc(tookInNanos);
+ statsHolder.queryCurrent.dec();
+ }
+ }
+ slowLogSearchService.onQueryPhase(searchContext, tookInNanos);
+ }
+
+ public void onPreFetchPhase(SearchContext searchContext) {
+ totalStats.fetchCurrent.inc();
+ if (searchContext.groupStats() != null) {
+ for (int i = 0; i < searchContext.groupStats().size(); i++) {
+ groupStats(searchContext.groupStats().get(i)).fetchCurrent.inc();
+ }
+ }
+ }
+
+ public void onFailedFetchPhase(SearchContext searchContext) {
+ totalStats.fetchCurrent.dec();
+ if (searchContext.groupStats() != null) {
+ for (int i = 0; i < searchContext.groupStats().size(); i++) {
+ groupStats(searchContext.groupStats().get(i)).fetchCurrent.dec();
+ }
+ }
+ }
+
+
+ public void onFetchPhase(SearchContext searchContext, long tookInNanos) {
+ totalStats.fetchMetric.inc(tookInNanos);
+ totalStats.fetchCurrent.dec();
+ if (searchContext.groupStats() != null) {
+ for (int i = 0; i < searchContext.groupStats().size(); i++) {
+ StatsHolder statsHolder = groupStats(searchContext.groupStats().get(i));
+ statsHolder.fetchMetric.inc(tookInNanos);
+ statsHolder.fetchCurrent.dec();
+ }
+ }
+ slowLogSearchService.onFetchPhase(searchContext, tookInNanos);
+ }
+
+ public void clear() {
+ totalStats.clear();
+ synchronized (this) {
+ if (!groupsStats.isEmpty()) {
+ MapBuilder<String, StatsHolder> typesStatsBuilder = MapBuilder.newMapBuilder();
+ for (Map.Entry<String, StatsHolder> typeStats : groupsStats.entrySet()) {
+ if (typeStats.getValue().totalCurrent() > 0) {
+ typeStats.getValue().clear();
+ typesStatsBuilder.put(typeStats.getKey(), typeStats.getValue());
+ }
+ }
+ groupsStats = typesStatsBuilder.immutableMap();
+ }
+ }
+ }
+
+ private StatsHolder groupStats(String group) {
+ StatsHolder stats = groupsStats.get(group);
+ if (stats == null) {
+ synchronized (this) {
+ stats = groupsStats.get(group);
+ if (stats == null) {
+ stats = new StatsHolder();
+ groupsStats = MapBuilder.newMapBuilder(groupsStats).put(group, stats).immutableMap();
+ }
+ }
+ }
+ return stats;
+ }
+
+ public void onNewContext(SearchContext context) {
+ openContexts.inc();
+ }
+
+ public void onFreeContext(SearchContext context) {
+ openContexts.dec();
+ }
+
+ static class StatsHolder {
+ public final MeanMetric queryMetric = new MeanMetric();
+ public final MeanMetric fetchMetric = new MeanMetric();
+ public final CounterMetric queryCurrent = new CounterMetric();
+ public final CounterMetric fetchCurrent = new CounterMetric();
+
+ public SearchStats.Stats stats() {
+ return new SearchStats.Stats(
+ queryMetric.count(), TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()), queryCurrent.count(),
+ fetchMetric.count(), TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()), fetchCurrent.count());
+ }
+
+ public long totalCurrent() {
+ return queryCurrent.count() + fetchCurrent.count();
+ }
+
+ public void clear() {
+ queryMetric.clear();
+ fetchMetric.clear();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java b/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java
new file mode 100644
index 0000000..4ac3b2c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.stats;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ */
+public class StatsGroupsParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ if (token.isValue()) {
+ context.groupStats(ImmutableList.of(parser.text()));
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ List<String> groupStats = new ArrayList<String>(4);
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ groupStats.add(parser.text());
+ }
+ context.groupStats(groupStats);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/service/IndexService.java b/src/main/java/org/elasticsearch/index/service/IndexService.java
new file mode 100644
index 0000000..1c08305
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/service/IndexService.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.service;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.index.IndexComponent;
+import org.elasticsearch.index.IndexShardMissingException;
+import org.elasticsearch.index.aliases.IndexAliasesService;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.engine.IndexEngine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.gateway.IndexGateway;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.store.IndexStore;
+
+/**
+ *
+ */
+public interface IndexService extends IndexComponent, Iterable<IndexShard> {
+
+ Injector injector();
+
+ IndexGateway gateway();
+
+ IndexCache cache();
+
+ IndexFieldDataService fieldData();
+
+ IndexSettingsService settingsService();
+
+ AnalysisService analysisService();
+
+ MapperService mapperService();
+
+ IndexQueryParserService queryParserService();
+
+ SimilarityService similarityService();
+
+ IndexAliasesService aliasesService();
+
+ IndexEngine engine();
+
+ IndexStore store();
+
+ IndexShard createShard(int sShardId) throws ElasticsearchException;
+
+ /**
+ * Removes the shard, does not delete local data or the gateway.
+ */
+ void removeShard(int shardId, String reason) throws ElasticsearchException;
+
+ int numberOfShards();
+
+ ImmutableSet<Integer> shardIds();
+
+ boolean hasShard(int shardId);
+
+ IndexShard shard(int shardId);
+
+ IndexShard shardSafe(int shardId) throws IndexShardMissingException;
+
+ Injector shardInjector(int shardId);
+
+ Injector shardInjectorSafe(int shardId) throws IndexShardMissingException;
+
+ String indexUUID();
+}
diff --git a/src/main/java/org/elasticsearch/index/service/InternalIndexService.java b/src/main/java/org/elasticsearch/index/service/InternalIndexService.java
new file mode 100644
index 0000000..130dc91
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/service/InternalIndexService.java
@@ -0,0 +1,452 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.service;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.UnmodifiableIterator;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.*;
+import org.elasticsearch.index.aliases.IndexAliasesService;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.cache.filter.ShardFilterCacheModule;
+import org.elasticsearch.index.cache.id.ShardIdCacheModule;
+import org.elasticsearch.index.deletionpolicy.DeletionPolicyModule;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineModule;
+import org.elasticsearch.index.engine.IndexEngine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.ShardFieldDataModule;
+import org.elasticsearch.index.gateway.IndexGateway;
+import org.elasticsearch.index.gateway.IndexShardGatewayModule;
+import org.elasticsearch.index.gateway.IndexShardGatewayService;
+import org.elasticsearch.index.get.ShardGetModule;
+import org.elasticsearch.index.indexing.ShardIndexingModule;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.merge.policy.MergePolicyModule;
+import org.elasticsearch.index.merge.policy.MergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerModule;
+import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
+import org.elasticsearch.index.percolator.PercolatorShardModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.search.stats.ShardSearchModule;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.IndexShardCreationException;
+import org.elasticsearch.index.shard.IndexShardModule;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.snapshots.IndexShardSnapshotModule;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.StoreModule;
+import org.elasticsearch.index.termvectors.ShardTermVectorModule;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogModule;
+import org.elasticsearch.index.translog.TranslogService;
+import org.elasticsearch.indices.IndicesLifecycle;
+import org.elasticsearch.indices.InternalIndicesLifecycle;
+import org.elasticsearch.plugins.PluginsService;
+import org.elasticsearch.plugins.ShardsPluginsModule;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executor;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
+
+/**
+ *
+ */
+public class InternalIndexService extends AbstractIndexComponent implements IndexService {
+
+ private final Injector injector;
+
+ private final Settings indexSettings;
+
+ private final ThreadPool threadPool;
+
+ private final PluginsService pluginsService;
+
+ private final InternalIndicesLifecycle indicesLifecycle;
+
+ private final AnalysisService analysisService;
+
+ private final MapperService mapperService;
+
+ private final IndexQueryParserService queryParserService;
+
+ private final SimilarityService similarityService;
+
+ private final IndexAliasesService aliasesService;
+
+ private final IndexCache indexCache;
+
+ private final IndexFieldDataService indexFieldData;
+
+ private final IndexEngine indexEngine;
+
+ private final IndexGateway indexGateway;
+
+ private final IndexStore indexStore;
+
+ private final IndexSettingsService settingsService;
+
+ private volatile ImmutableMap<Integer, Injector> shardsInjectors = ImmutableMap.of();
+
+ private volatile ImmutableMap<Integer, IndexShard> shards = ImmutableMap.of();
+
+ private volatile boolean closed = false;
+
+ @Inject
+ public InternalIndexService(Injector injector, Index index, @IndexSettings Settings indexSettings, NodeEnvironment nodeEnv, ThreadPool threadPool,
+ AnalysisService analysisService, MapperService mapperService, IndexQueryParserService queryParserService,
+ SimilarityService similarityService, IndexAliasesService aliasesService, IndexCache indexCache, IndexEngine indexEngine,
+ IndexGateway indexGateway, IndexStore indexStore, IndexSettingsService settingsService, IndexFieldDataService indexFieldData) {
+ super(index, indexSettings);
+ this.injector = injector;
+ this.threadPool = threadPool;
+ this.indexSettings = indexSettings;
+ this.analysisService = analysisService;
+ this.mapperService = mapperService;
+ this.queryParserService = queryParserService;
+ this.similarityService = similarityService;
+ this.aliasesService = aliasesService;
+ this.indexCache = indexCache;
+ this.indexFieldData = indexFieldData;
+ this.indexEngine = indexEngine;
+ this.indexGateway = indexGateway;
+ this.indexStore = indexStore;
+ this.settingsService = settingsService;
+
+ this.pluginsService = injector.getInstance(PluginsService.class);
+ this.indicesLifecycle = (InternalIndicesLifecycle) injector.getInstance(IndicesLifecycle.class);
+
+ // inject workarounds for cyclic dep
+ indexCache.filter().setIndexService(this);
+ indexCache.idCache().setIndexService(this);
+ indexFieldData.setIndexService(this);
+ }
+
+ @Override
+ public int numberOfShards() {
+ return shards.size();
+ }
+
+ @Override
+ public UnmodifiableIterator<IndexShard> iterator() {
+ return shards.values().iterator();
+ }
+
+ @Override
+ public boolean hasShard(int shardId) {
+ return shards.containsKey(shardId);
+ }
+
+ @Override
+ public IndexShard shard(int shardId) {
+ return shards.get(shardId);
+ }
+
+ @Override
+ public IndexShard shardSafe(int shardId) throws IndexShardMissingException {
+ IndexShard indexShard = shard(shardId);
+ if (indexShard == null) {
+ throw new IndexShardMissingException(new ShardId(index, shardId));
+ }
+ return indexShard;
+ }
+
+ @Override
+ public ImmutableSet<Integer> shardIds() {
+ return shards.keySet();
+ }
+
+ @Override
+ public Injector injector() {
+ return injector;
+ }
+
+ @Override
+ public IndexGateway gateway() {
+ return indexGateway;
+ }
+
+ @Override
+ public IndexSettingsService settingsService() {
+ return this.settingsService;
+ }
+
+ @Override
+ public IndexStore store() {
+ return indexStore;
+ }
+
+ @Override
+ public IndexCache cache() {
+ return indexCache;
+ }
+
+ @Override
+ public IndexFieldDataService fieldData() {
+ return indexFieldData;
+ }
+
+ @Override
+ public AnalysisService analysisService() {
+ return this.analysisService;
+ }
+
+ @Override
+ public MapperService mapperService() {
+ return mapperService;
+ }
+
+ @Override
+ public IndexQueryParserService queryParserService() {
+ return queryParserService;
+ }
+
+ @Override
+ public SimilarityService similarityService() {
+ return similarityService;
+ }
+
+ @Override
+ public IndexAliasesService aliasesService() {
+ return aliasesService;
+ }
+
+ @Override
+ public IndexEngine engine() {
+ return indexEngine;
+ }
+
+ public void close(final String reason, @Nullable Executor executor) {
+ synchronized (this) {
+ closed = true;
+ }
+ Set<Integer> shardIds = shardIds();
+ final CountDownLatch latch = new CountDownLatch(shardIds.size());
+ for (final int shardId : shardIds) {
+ executor = executor == null ? threadPool.generic() : executor;
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ removeShard(shardId, reason);
+ } catch (Throwable e) {
+ logger.warn("failed to close shard", e);
+ } finally {
+ latch.countDown();
+ }
+ }
+ });
+ }
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ logger.debug("Interrupted closing index [{}]", e, index().name());
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ @Override
+ public Injector shardInjector(int shardId) throws ElasticsearchException {
+ return shardsInjectors.get(shardId);
+ }
+
+ @Override
+ public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException {
+ Injector shardInjector = shardInjector(shardId);
+ if (shardInjector == null) {
+ throw new IndexShardMissingException(new ShardId(index, shardId));
+ }
+ return shardInjector;
+ }
+
+ @Override
+ public String indexUUID() {
+ return indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
+ }
+
+ @Override
+ public synchronized IndexShard createShard(int sShardId) throws ElasticsearchException {
+ /*
+ * TODO: we execute this in parallel but it's a synced method. Yet, we might
+ * be able to serialize the execution via the cluster state in the future. for now we just
+ * keep it synced.
+ */
+ if (closed) {
+ throw new ElasticsearchIllegalStateException("Can't create shard [" + index.name() + "][" + sShardId + "], closed");
+ }
+ ShardId shardId = new ShardId(index, sShardId);
+ if (shardsInjectors.containsKey(shardId.id())) {
+ throw new IndexShardAlreadyExistsException(shardId + " already exists");
+ }
+
+ indicesLifecycle.beforeIndexShardCreated(shardId);
+
+ logger.debug("creating shard_id [{}]", shardId.id());
+
+ ModulesBuilder modules = new ModulesBuilder();
+ modules.add(new ShardsPluginsModule(indexSettings, pluginsService));
+ modules.add(new IndexShardModule(indexSettings, shardId));
+ modules.add(new ShardIndexingModule());
+ modules.add(new ShardSearchModule());
+ modules.add(new ShardGetModule());
+ modules.add(new StoreModule(indexSettings, injector.getInstance(IndexStore.class)));
+ modules.add(new DeletionPolicyModule(indexSettings));
+ modules.add(new MergePolicyModule(indexSettings));
+ modules.add(new MergeSchedulerModule(indexSettings));
+ modules.add(new ShardFilterCacheModule());
+ modules.add(new ShardFieldDataModule());
+ modules.add(new ShardIdCacheModule());
+ modules.add(new TranslogModule(indexSettings));
+ modules.add(new EngineModule(indexSettings));
+ modules.add(new IndexShardGatewayModule(injector.getInstance(IndexGateway.class)));
+ modules.add(new PercolatorShardModule());
+ modules.add(new ShardTermVectorModule());
+ modules.add(new IndexShardSnapshotModule());
+
+ Injector shardInjector;
+ try {
+ shardInjector = modules.createChildInjector(injector);
+ } catch (CreationException e) {
+ throw new IndexShardCreationException(shardId, Injectors.getFirstErrorFailure(e));
+ } catch (Throwable e) {
+ throw new IndexShardCreationException(shardId, e);
+ }
+
+ shardsInjectors = newMapBuilder(shardsInjectors).put(shardId.id(), shardInjector).immutableMap();
+
+ IndexShard indexShard = shardInjector.getInstance(IndexShard.class);
+
+ indicesLifecycle.indexShardStateChanged(indexShard, null, "shard created");
+ indicesLifecycle.afterIndexShardCreated(indexShard);
+
+ shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap();
+
+ return indexShard;
+ }
+
+ @Override
+ public synchronized void removeShard(int shardId, String reason) throws ElasticsearchException {
+ final Injector shardInjector;
+ final IndexShard indexShard;
+ final ShardId sId = new ShardId(index, shardId);
+ Map<Integer, Injector> tmpShardInjectors = newHashMap(shardsInjectors);
+ shardInjector = tmpShardInjectors.remove(shardId);
+ if (shardInjector == null) {
+ return;
+ }
+ shardsInjectors = ImmutableMap.copyOf(tmpShardInjectors);
+ Map<Integer, IndexShard> tmpShardsMap = newHashMap(shards);
+ indexShard = tmpShardsMap.remove(shardId);
+ shards = ImmutableMap.copyOf(tmpShardsMap);
+ indicesLifecycle.beforeIndexShardClosed(sId, indexShard);
+ for (Class<? extends CloseableIndexComponent> closeable : pluginsService.shardServices()) {
+ try {
+ shardInjector.getInstance(closeable).close();
+ } catch (Throwable e) {
+ logger.debug("failed to clean plugin shard service [{}]", e, closeable);
+ }
+ }
+ try {
+ // now we can close the translog service, we need to close it before the we close the shard
+ shardInjector.getInstance(TranslogService.class).close();
+ } catch (Throwable e) {
+ logger.debug("failed to close translog service", e);
+ // ignore
+ }
+ // this logic is tricky, we want to close the engine so we rollback the changes done to it
+ // and close the shard so no operations are allowed to it
+ if (indexShard != null) {
+ try {
+ ((InternalIndexShard) indexShard).close(reason);
+ } catch (Throwable e) {
+ logger.debug("failed to close index shard", e);
+ // ignore
+ }
+ }
+ try {
+ shardInjector.getInstance(Engine.class).close();
+ } catch (Throwable e) {
+ logger.debug("failed to close engine", e);
+ // ignore
+ }
+ try {
+ shardInjector.getInstance(MergePolicyProvider.class).close();
+ } catch (Throwable e) {
+ logger.debug("failed to close merge policy provider", e);
+ // ignore
+ }
+ try {
+ shardInjector.getInstance(IndexShardGatewayService.class).snapshotOnClose();
+ } catch (Throwable e) {
+ logger.debug("failed to snapshot index shard gateway on close", e);
+ // ignore
+ }
+ try {
+ shardInjector.getInstance(IndexShardGatewayService.class).close();
+ } catch (Throwable e) {
+ logger.debug("failed to close index shard gateway", e);
+ // ignore
+ }
+ try {
+ // now we can close the translog
+ shardInjector.getInstance(Translog.class).close();
+ } catch (Throwable e) {
+ logger.debug("failed to close translog", e);
+ // ignore
+ }
+ try {
+ // now we can close the translog
+ shardInjector.getInstance(PercolatorQueriesRegistry.class).close();
+ } catch (Throwable e) {
+ logger.debug("failed to close PercolatorQueriesRegistry", e);
+ // ignore
+ }
+
+ // call this before we close the store, so we can release resources for it
+ indicesLifecycle.afterIndexShardClosed(sId);
+ // if we delete or have no gateway or the store is not persistent, clean the store...
+ Store store = shardInjector.getInstance(Store.class);
+ // and close it
+ try {
+ store.close();
+ } catch (Throwable e) {
+ logger.warn("failed to close store on shard deletion", e);
+ }
+ Injectors.close(injector);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettings.java b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettings.java
new file mode 100644
index 0000000..0f686ef
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettings.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.settings;
+
+import org.elasticsearch.common.inject.BindingAnnotation;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.ElementType.PARAMETER;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+
+@BindingAnnotation
+@Target({FIELD, PARAMETER})
+@Retention(RUNTIME)
+@Documented
+public @interface IndexDynamicSettings {
+
+}
diff --git a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java
new file mode 100644
index 0000000..39248ad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.settings;
+
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
+import org.elasticsearch.cluster.settings.DynamicSettings;
+import org.elasticsearch.cluster.settings.Validator;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.gateway.local.LocalGatewayAllocator;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.engine.internal.InternalEngine;
+import org.elasticsearch.index.gateway.IndexShardGatewayService;
+import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
+import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider;
+import org.elasticsearch.index.merge.policy.LogDocMergePolicyProvider;
+import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider;
+import org.elasticsearch.index.search.slowlog.ShardSlowLogSearchService;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.store.support.AbstractIndexStore;
+import org.elasticsearch.index.translog.TranslogService;
+import org.elasticsearch.index.translog.fs.FsTranslog;
+import org.elasticsearch.indices.ttl.IndicesTTLService;
+import org.elasticsearch.indices.warmer.InternalIndicesWarmer;
+
+/**
+ */
+public class IndexDynamicSettingsModule extends AbstractModule {
+
+ private final DynamicSettings indexDynamicSettings;
+
+ public IndexDynamicSettingsModule() {
+ indexDynamicSettings = new DynamicSettings();
+ indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
+ indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE);
+ indexDynamicSettings.addDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*");
+ indexDynamicSettings.addDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "*");
+ indexDynamicSettings.addDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "*");
+ indexDynamicSettings.addDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE);
+ indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION);
+ indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION);
+ indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION);
+ indexDynamicSettings.addDynamicSetting(FsTranslog.INDEX_TRANSLOG_FS_TYPE);
+ indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, Validator.NON_NEGATIVE_INTEGER);
+ indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
+ indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_READ_ONLY);
+ indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_READ);
+ indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_WRITE);
+ indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_METADATA);
+ indexDynamicSettings.addDynamicSetting(IndexShardGatewayService.INDEX_GATEWAY_SNAPSHOT_INTERVAL, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE);
+ indexDynamicSettings.addDynamicSetting(InternalIndexShard.INDEX_REFRESH_INTERVAL, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(LocalGatewayAllocator.INDEX_RECOVERY_INITIAL_SHARDS);
+ indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_SIZE, Validator.BYTES_SIZE);
+ indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_SIZE, Validator.BYTES_SIZE);
+ indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, Validator.POSITIVE_INTEGER);
+ indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, Validator.INTEGER_GTE_2);
+ indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_COMPOUND_FORMAT);
+ indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_DOCS, Validator.POSITIVE_INTEGER);
+ indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, Validator.POSITIVE_INTEGER);
+ indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, Validator.INTEGER_GTE_2);
+ indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_COMPOUND_FORMAT);
+ indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_INDEX_CONCURRENCY, Validator.NON_NEGATIVE_INTEGER);
+ indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_COMPOUND_ON_FLUSH, Validator.BOOLEAN);
+ indexDynamicSettings.addDynamicSetting(CodecService.INDEX_CODEC_BLOOM_LOAD, Validator.BOOLEAN);
+ indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_GC_DELETES, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_CODEC);
+ indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_FAIL_ON_MERGE_FAILURE);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_REFORMAT);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_LEVEL);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_REFORMAT);
+ indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_LEVEL);
+ indexDynamicSettings.addDynamicSetting(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
+ indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, Validator.DOUBLE);
+ indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT, Validator.BYTES_SIZE);
+ indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, Validator.INTEGER_GTE_2);
+ indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, Validator.INTEGER_GTE_2);
+ indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, Validator.BYTES_SIZE);
+ indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, Validator.DOUBLE_GTE_2);
+ indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, Validator.NON_NEGATIVE_DOUBLE);
+ indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT);
+ indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_INTERVAL, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, Validator.INTEGER);
+ indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
+ indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, Validator.TIME);
+ indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH);
+ indexDynamicSettings.addDynamicSetting(InternalIndicesWarmer.INDEX_WARMER_ENABLED);
+ }
+
+ public void addDynamicSettings(String... settings) {
+ indexDynamicSettings.addDynamicSettings(settings);
+ }
+
+ public void addDynamicSetting(String setting, Validator validator) {
+ indexDynamicSettings.addDynamicSetting(setting, validator);
+ }
+
+ @Override
+ protected void configure() {
+ bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/settings/IndexSettings.java b/src/main/java/org/elasticsearch/index/settings/IndexSettings.java
new file mode 100644
index 0000000..b430564
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/settings/IndexSettings.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.settings;
+
+import org.elasticsearch.common.inject.BindingAnnotation;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.ElementType.PARAMETER;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ *
+ */
+
+@BindingAnnotation
+@Target({FIELD, PARAMETER})
+@Retention(RUNTIME)
+@Documented
+public @interface IndexSettings {
+}
diff --git a/src/main/java/org/elasticsearch/index/settings/IndexSettingsModule.java b/src/main/java/org/elasticsearch/index/settings/IndexSettingsModule.java
new file mode 100644
index 0000000..d4a0637
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/settings/IndexSettingsModule.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.settings;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+
+/**
+ *
+ */
+public class IndexSettingsModule extends AbstractModule {
+
+ private final Index index;
+
+ private final Settings settings;
+
+ public IndexSettingsModule(Index index, Settings settings) {
+ this.index = index;
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ IndexSettingsService indexSettingsService = new IndexSettingsService(index, settings);
+ bind(IndexSettingsService.class).toInstance(indexSettingsService);
+ bind(Settings.class).annotatedWith(IndexSettings.class).toProvider(new IndexSettingsProvider(indexSettingsService));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/settings/IndexSettingsProvider.java b/src/main/java/org/elasticsearch/index/settings/IndexSettingsProvider.java
new file mode 100644
index 0000000..f1d5858
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/settings/IndexSettingsProvider.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.settings;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Provider;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * A wrapper around the {@link IndexSettingsService} allowing to get the current
+ * settings associated with an index (thus, allowing to change those settings and
+ * new shards will use the new settings).
+ */
+public class IndexSettingsProvider implements Provider<Settings> {
+
+ private final IndexSettingsService indexSettingsService;
+
+ @Inject
+ public IndexSettingsProvider(IndexSettingsService indexSettingsService) {
+ this.indexSettingsService = indexSettingsService;
+ }
+
+ @Override
+ public Settings get() {
+ return indexSettingsService.getSettings();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/settings/IndexSettingsService.java b/src/main/java/org/elasticsearch/index/settings/IndexSettingsService.java
new file mode 100644
index 0000000..03a0cac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/settings/IndexSettingsService.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.settings;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ * A holds to the latest, updated settings for an index.
+ */
+public class IndexSettingsService extends AbstractIndexComponent {
+
+ private volatile Settings settings;
+
+ private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<Listener>();
+
+ @Inject
+ public IndexSettingsService(Index index, Settings settings) {
+ super(index, settings);
+ this.settings = settings;
+ }
+
+ public synchronized void refreshSettings(Settings settings) {
+ // this.settings include also the node settings
+ if (this.settings.getByPrefix("index.").getAsMap().equals(settings.getByPrefix("index.").getAsMap())) {
+ // nothing to update, same settings
+ return;
+ }
+ this.settings = ImmutableSettings.settingsBuilder().put(this.settings).put(settings).build();
+ for (Listener listener : listeners) {
+ try {
+ listener.onRefreshSettings(settings);
+ } catch (Exception e) {
+ logger.warn("failed to refresh settings for [{}]", e, listener);
+ }
+ }
+ }
+
+ public Settings getSettings() {
+ return this.settings;
+ }
+
+ /**
+ * Only settings registered in {@link IndexDynamicSettingsModule} can be changed dynamically.
+ */
+ public void addListener(Listener listener) {
+ this.listeners.add(listener);
+ }
+
+ public void removeListener(Listener listener) {
+ this.listeners.remove(listener);
+ }
+
+ public static interface Listener {
+ void onRefreshSettings(Settings settings);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java
new file mode 100644
index 0000000..d280cda
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public abstract class AbstractIndexShardComponent implements IndexShardComponent {
+
+ protected final ESLogger logger;
+
+ protected final ShardId shardId;
+
+ protected final Settings indexSettings;
+
+ protected final Settings componentSettings;
+
+ protected AbstractIndexShardComponent(ShardId shardId, @IndexSettings Settings indexSettings) {
+ this.shardId = shardId;
+ this.indexSettings = indexSettings;
+ this.componentSettings = indexSettings.getComponentSettings(getClass());
+
+ this.logger = Loggers.getLogger(getClass(), indexSettings, shardId);
+ }
+
+ protected AbstractIndexShardComponent(ShardId shardId, @IndexSettings Settings indexSettings, String prefixSettings) {
+ this.shardId = shardId;
+ this.indexSettings = indexSettings;
+ this.componentSettings = indexSettings.getComponentSettings(prefixSettings, getClass());
+
+ this.logger = Loggers.getLogger(getClass(), indexSettings, shardId);
+ }
+
+ @Override
+ public ShardId shardId() {
+ return this.shardId;
+ }
+
+ @Override
+ public Settings indexSettings() {
+ return this.indexSettings;
+ }
+
+ public String nodeName() {
+ return indexSettings.get("name", "");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/DocsStats.java b/src/main/java/org/elasticsearch/index/shard/DocsStats.java
new file mode 100644
index 0000000..3ce99e4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/DocsStats.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ */
+public class DocsStats implements Streamable, ToXContent {
+
+ long count = 0;
+ long deleted = 0;
+
+ public DocsStats() {
+
+ }
+
+ public DocsStats(long count, long deleted) {
+ this.count = count;
+ this.deleted = deleted;
+ }
+
+ public void add(DocsStats docsStats) {
+ if (docsStats == null) {
+ return;
+ }
+ count += docsStats.count;
+ deleted += docsStats.deleted;
+ }
+
+ public long getCount() {
+ return this.count;
+ }
+
+ public long getDeleted() {
+ return this.deleted;
+ }
+
+ public static DocsStats readDocStats(StreamInput in) throws IOException {
+ DocsStats docsStats = new DocsStats();
+ docsStats.readFrom(in);
+ return docsStats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ count = in.readVLong();
+ deleted = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(count);
+ out.writeVLong(deleted);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.DOCS);
+ builder.field(Fields.COUNT, count);
+ builder.field(Fields.DELETED, deleted);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString DOCS = new XContentBuilderString("docs");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString DELETED = new XContentBuilderString("deleted");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java b/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java
new file mode 100644
index 0000000..3f6cf92
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class IllegalIndexShardStateException extends IndexShardException {
+
+ private final IndexShardState currentState;
+
+ public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg) {
+ super(shardId, "CurrentState[" + currentState + "] " + msg);
+ this.currentState = currentState;
+ }
+
+ public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg, Throwable ex) {
+ super(shardId, "CurrentState[" + currentState + "] ", ex);
+ this.currentState = currentState;
+ }
+
+ public IndexShardState currentState() {
+ return currentState;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java
new file mode 100644
index 0000000..fa68f32
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+/**
+ *
+ */
+public class IndexShardClosedException extends IllegalIndexShardStateException {
+ public IndexShardClosedException(ShardId shardId) {
+ super(shardId, IndexShardState.CLOSED, "Closed");
+ }
+
+ public IndexShardClosedException(ShardId shardId, Throwable t) {
+ super(shardId, IndexShardState.CLOSED, "Closed", t);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java b/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java
new file mode 100644
index 0000000..3888603
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public interface IndexShardComponent {
+
+ ShardId shardId();
+
+ Settings indexSettings();
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardCreationException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardCreationException.java
new file mode 100644
index 0000000..15835da
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardCreationException.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+/**
+ */
+public class IndexShardCreationException extends IndexShardException {
+
+ public IndexShardCreationException(ShardId shardId, Throwable cause) {
+ super(shardId, "failed to create shard", cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardException.java
new file mode 100644
index 0000000..e9217fd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardException.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.index.IndexException;
+
+/**
+ *
+ */
+public class IndexShardException extends IndexException {
+
+ private final ShardId shardId;
+
+ public IndexShardException(ShardId shardId, String msg) {
+ this(shardId, msg, null);
+ }
+
+ public IndexShardException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId == null ? null : shardId.index(), false, "[" + (shardId == null ? "_na" : shardId.id()) + "] " + msg, cause);
+ this.shardId = shardId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java b/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java
new file mode 100644
index 0000000..3303333
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.warmer.ShardIndexWarmerService;
+
+/**
+ *
+ */
+public class IndexShardModule extends AbstractModule {
+
+ private final Settings settings;
+
+ private final ShardId shardId;
+
+ public IndexShardModule(Settings settings, ShardId shardId) {
+ this.settings = settings;
+ this.shardId = shardId;
+ }
+
+ @Override
+ protected void configure() {
+ bind(ShardId.class).toInstance(shardId);
+ bind(IndexShard.class).to(InternalIndexShard.class).asEagerSingleton();
+ bind(ShardIndexWarmerService.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java
new file mode 100644
index 0000000..abdd1c1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+/**
+ *
+ */
+public class IndexShardNotRecoveringException extends IllegalIndexShardStateException {
+
+ public IndexShardNotRecoveringException(ShardId shardId, IndexShardState currentState) {
+ super(shardId, currentState, "Shard not in recovering state");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java
new file mode 100644
index 0000000..fa9ba4b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+/**
+ *
+ */
+public class IndexShardNotStartedException extends IllegalIndexShardStateException {
+
+ public IndexShardNotStartedException(ShardId shardId, IndexShardState currentState) {
+ super(shardId, currentState, "Shard not started");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java
new file mode 100644
index 0000000..37a1773
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+/**
+ *
+ */
+public class IndexShardRecoveringException extends IllegalIndexShardStateException {
+
+ public IndexShardRecoveringException(ShardId shardId) {
+ super(shardId, IndexShardState.RECOVERING, "Already recovering");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java
new file mode 100644
index 0000000..f678142
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+/**
+ *
+ */
+public class IndexShardRelocatedException extends IllegalIndexShardStateException {
+
+ public IndexShardRelocatedException(ShardId shardId) {
+ super(shardId, IndexShardState.RELOCATED, "Already relocated");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java
new file mode 100644
index 0000000..c909a9c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+/**
+ *
+ */
+public class IndexShardStartedException extends IllegalIndexShardStateException {
+
+ public IndexShardStartedException(ShardId shardId) {
+ super(shardId, IndexShardState.STARTED, "Already started");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardState.java b/src/main/java/org/elasticsearch/index/shard/IndexShardState.java
new file mode 100644
index 0000000..1b4edcb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/IndexShardState.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ *
+ */
+public enum IndexShardState {
+ CREATED((byte) 0),
+ RECOVERING((byte) 1),
+ POST_RECOVERY((byte) 2),
+ STARTED((byte) 3),
+ RELOCATED((byte) 4),
+ CLOSED((byte) 5);
+
+ private static final IndexShardState[] IDS = new IndexShardState[IndexShardState.values().length];
+
+ static {
+ for (IndexShardState state : IndexShardState.values()) {
+ assert state.id() < IDS.length && state.id() >= 0;
+ IDS[state.id()] = state;
+ }
+ }
+
+ private final byte id;
+
+ IndexShardState(byte id) {
+ this.id = id;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ public static IndexShardState fromId(byte id) throws ElasticsearchIllegalArgumentException {
+ if (id < 0 || id >= IDS.length) {
+ throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]");
+ }
+ return IDS[id];
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/ShardId.java b/src/main/java/org/elasticsearch/index/shard/ShardId.java
new file mode 100644
index 0000000..b7fe363
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/ShardId.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.index.Index;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * Allows for shard level components to be injected with the shard id.
+ */
+public class ShardId implements Serializable, Streamable {
+
+ private Index index;
+
+ private int shardId;
+
+ private int hashCode;
+
+ private ShardId() {
+
+ }
+
+ public ShardId(String index, int shardId) {
+ this(new Index(index), shardId);
+ }
+
+ public ShardId(Index index, int shardId) {
+ this.index = index;
+ this.shardId = shardId;
+ this.hashCode = computeHashCode();
+ }
+
+ public Index index() {
+ return this.index;
+ }
+
+ public String getIndex() {
+ return index().name();
+ }
+
+ public int id() {
+ return this.shardId;
+ }
+
+ public int getId() {
+ return id();
+ }
+
+ @Override
+ public String toString() {
+ return "[" + index.name() + "][" + shardId + "]";
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null) return false;
+ ShardId shardId1 = (ShardId) o;
+ return shardId == shardId1.shardId && index.name().equals(shardId1.index.name());
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ private int computeHashCode() {
+ int result = index != null ? index.hashCode() : 0;
+ result = 31 * result + shardId;
+ return result;
+ }
+
+ public static ShardId readShardId(StreamInput in) throws IOException {
+ ShardId shardId = new ShardId();
+ shardId.readFrom(in);
+ return shardId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ index = Index.readIndexName(in);
+ shardId = in.readVInt();
+ hashCode = computeHashCode();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ index.writeTo(out);
+ out.writeVInt(shardId);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/shard/ShardUtils.java b/src/main/java/org/elasticsearch/index/shard/ShardUtils.java
new file mode 100644
index 0000000..811ec4a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/ShardUtils.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.SegmentReader;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.lucene.SegmentReaderUtils;
+import org.elasticsearch.index.store.DirectoryUtils;
+import org.elasticsearch.index.store.Store;
+
+/**
+ */
+public class ShardUtils {
+
+ /**
+ * Tries to extract the shard id from a reader if possible, when its not possible,
+ * will return null. This method requires the reader to be a {@link SegmentReader}
+ * and the directory backing it to be {@link org.elasticsearch.index.store.Store.StoreDirectory}.
+ * This will be the case in almost all cases, except for percolator currently.
+ */
+ @Nullable
+ public static ShardId extractShardId(AtomicReader reader) {
+ return extractShardId(SegmentReaderUtils.segmentReaderOrNull(reader));
+ }
+
+ @Nullable
+ private static ShardId extractShardId(SegmentReader reader) {
+ if (reader != null) {
+ assert reader.getRefCount() > 0 : "SegmentReader is already closed";
+ // reader.directory doesn't call ensureOpen for internal reasons.
+ Store.StoreDirectory storeDir = DirectoryUtils.getStoreDirectory(reader.directory());
+ if (storeDir != null) {
+ return storeDir.shardId();
+ }
+ }
+ return null;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/service/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/service/IndexShard.java
new file mode 100644
index 0000000..9bddf00
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/service/IndexShard.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard.service;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.index.translog.TranslogStats;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.cache.filter.FilterCacheStats;
+import org.elasticsearch.index.cache.filter.ShardFilterCache;
+import org.elasticsearch.index.cache.id.IdCacheStats;
+import org.elasticsearch.index.cache.id.ShardIdCache;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.engine.SegmentsStats;
+import org.elasticsearch.index.fielddata.FieldDataStats;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.ShardFieldData;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.get.GetStats;
+import org.elasticsearch.index.get.ShardGetService;
+import org.elasticsearch.index.indexing.IndexingStats;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
+import org.elasticsearch.index.percolator.stats.ShardPercolateService;
+import org.elasticsearch.index.refresh.RefreshStats;
+import org.elasticsearch.index.search.stats.SearchStats;
+import org.elasticsearch.index.search.stats.ShardSearchService;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.DocsStats;
+import org.elasticsearch.index.shard.IllegalIndexShardStateException;
+import org.elasticsearch.index.shard.IndexShardComponent;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.store.StoreStats;
+import org.elasticsearch.index.termvectors.ShardTermVectorService;
+import org.elasticsearch.index.warmer.ShardIndexWarmerService;
+import org.elasticsearch.index.warmer.WarmerStats;
+import org.elasticsearch.search.suggest.completion.CompletionStats;
+
+/**
+ *
+ */
+public interface IndexShard extends IndexShardComponent {
+
+ ShardIndexingService indexingService();
+
+ ShardGetService getService();
+
+ ShardSearchService searchService();
+
+ ShardIndexWarmerService warmerService();
+
+ ShardFilterCache filterCache();
+
+ ShardIdCache idCache();
+
+ ShardFieldData fieldData();
+
+ ShardRouting routingEntry();
+
+ DocsStats docStats();
+
+ StoreStats storeStats();
+
+ IndexingStats indexingStats(String... types);
+
+ SearchStats searchStats(String... groups);
+
+ GetStats getStats();
+
+ MergeStats mergeStats();
+
+ SegmentsStats segmentStats();
+
+ RefreshStats refreshStats();
+
+ FlushStats flushStats();
+
+ WarmerStats warmerStats();
+
+ FilterCacheStats filterCacheStats();
+
+ IdCacheStats idCacheStats();
+
+ FieldDataStats fieldDataStats(String... fields);
+
+ CompletionStats completionStats(String... fields);
+
+ TranslogStats translogStats();
+
+ PercolatorQueriesRegistry percolateRegistry();
+
+ ShardPercolateService shardPercolateService();
+
+ ShardTermVectorService termVectorService();
+
+ MapperService mapperService();
+
+ IndexFieldDataService indexFieldDataService();
+
+ IndexService indexService();
+
+ IndexShardState state();
+
+ Engine.Create prepareCreate(SourceToParse source) throws ElasticsearchException;
+
+ ParsedDocument create(Engine.Create create) throws ElasticsearchException;
+
+ Engine.Index prepareIndex(SourceToParse source) throws ElasticsearchException;
+
+ ParsedDocument index(Engine.Index index) throws ElasticsearchException;
+
+ Engine.Delete prepareDelete(String type, String id, long version) throws ElasticsearchException;
+
+ void delete(Engine.Delete delete) throws ElasticsearchException;
+
+ Engine.DeleteByQuery prepareDeleteByQuery(BytesReference source, @Nullable String[] filteringAliases, String... types) throws ElasticsearchException;
+
+ void deleteByQuery(Engine.DeleteByQuery deleteByQuery) throws ElasticsearchException;
+
+ Engine.GetResult get(Engine.Get get) throws ElasticsearchException;
+
+ void refresh(Engine.Refresh refresh) throws ElasticsearchException;
+
+ void flush(Engine.Flush flush) throws ElasticsearchException;
+
+ void optimize(Engine.Optimize optimize) throws ElasticsearchException;
+
+ <T> T snapshot(Engine.SnapshotHandler<T> snapshotHandler) throws EngineException;
+
+ SnapshotIndexCommit snapshotIndex() throws EngineException;
+
+ void recover(Engine.RecoveryHandler recoveryHandler) throws EngineException;
+
+ Engine.Searcher acquireSearcher(String source);
+
+ Engine.Searcher acquireSearcher(String source, Mode mode);
+
+ /**
+ * Returns <tt>true</tt> if this shard can ignore a recovery attempt made to it (since the already doing/done it)
+ */
+ public boolean ignoreRecoveryAttempt();
+
+ void readAllowed() throws IllegalIndexShardStateException;
+
+ void readAllowed(Mode mode) throws IllegalIndexShardStateException;
+
+ public enum Mode {
+ READ,
+ WRITE
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java b/src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java
new file mode 100644
index 0000000..e0a34f6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java
@@ -0,0 +1,1045 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard.service;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.index.CheckIndex;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.ThreadInterruptedException;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.index.translog.TranslogStats;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.metrics.MeanMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.aliases.IndexAliasesService;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.cache.filter.FilterCacheStats;
+import org.elasticsearch.index.cache.filter.ShardFilterCache;
+import org.elasticsearch.index.cache.id.IdCacheStats;
+import org.elasticsearch.index.cache.id.ShardIdCache;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.engine.*;
+import org.elasticsearch.index.fielddata.FieldDataStats;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.ShardFieldData;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.get.GetStats;
+import org.elasticsearch.index.get.ShardGetService;
+import org.elasticsearch.index.indexing.IndexingStats;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
+import org.elasticsearch.index.percolator.stats.ShardPercolateService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.refresh.RefreshStats;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.index.search.stats.SearchStats;
+import org.elasticsearch.index.search.stats.ShardSearchService;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.*;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.StoreStats;
+import org.elasticsearch.index.termvectors.ShardTermVectorService;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.warmer.ShardIndexWarmerService;
+import org.elasticsearch.index.warmer.WarmerStats;
+import org.elasticsearch.indices.IndicesLifecycle;
+import org.elasticsearch.indices.InternalIndicesLifecycle;
+import org.elasticsearch.indices.recovery.RecoveryStatus;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat;
+import org.elasticsearch.search.suggest.completion.CompletionStats;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.nio.channels.ClosedByInterruptException;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.index.mapper.SourceToParse.source;
+
+/**
+ *
+ */
+public class InternalIndexShard extends AbstractIndexShardComponent implements IndexShard {
+
+ private final ThreadPool threadPool;
+ private final IndexSettingsService indexSettingsService;
+ private final MapperService mapperService;
+ private final IndexQueryParserService queryParserService;
+ private final IndexCache indexCache;
+ private final InternalIndicesLifecycle indicesLifecycle;
+ private final Store store;
+ private final MergeSchedulerProvider mergeScheduler;
+ private final Engine engine;
+ private final Translog translog;
+ private final IndexAliasesService indexAliasesService;
+ private final ShardIndexingService indexingService;
+ private final ShardSearchService searchService;
+ private final ShardGetService getService;
+ private final ShardIndexWarmerService shardWarmerService;
+ private final ShardFilterCache shardFilterCache;
+ private final ShardIdCache shardIdCache;
+ private final ShardFieldData shardFieldData;
+ private final PercolatorQueriesRegistry percolatorQueriesRegistry;
+ private final ShardPercolateService shardPercolateService;
+ private final CodecService codecService;
+ private final ShardTermVectorService termVectorService;
+ private final IndexFieldDataService indexFieldDataService;
+ private final IndexService indexService;
+
+ private final Object mutex = new Object();
+ private final String checkIndexOnStartup;
+ private long checkIndexTook = 0;
+ private volatile IndexShardState state;
+
+ private TimeValue refreshInterval;
+ private final TimeValue mergeInterval;
+
+ private volatile ScheduledFuture refreshScheduledFuture;
+ private volatile ScheduledFuture mergeScheduleFuture;
+ private volatile ShardRouting shardRouting;
+
+ private RecoveryStatus peerRecoveryStatus;
+
+ private ApplyRefreshSettings applyRefreshSettings = new ApplyRefreshSettings();
+
+ private final MeanMetric refreshMetric = new MeanMetric();
+ private final MeanMetric flushMetric = new MeanMetric();
+
+ @Inject
+ public InternalIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, Engine engine, MergeSchedulerProvider mergeScheduler, Translog translog,
+ ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, ShardIndexingService indexingService, ShardGetService getService, ShardSearchService searchService, ShardIndexWarmerService shardWarmerService,
+ ShardFilterCache shardFilterCache, ShardIdCache shardIdCache, ShardFieldData shardFieldData,
+ PercolatorQueriesRegistry percolatorQueriesRegistry, ShardPercolateService shardPercolateService, CodecService codecService,
+ ShardTermVectorService termVectorService, IndexFieldDataService indexFieldDataService, IndexService indexService) {
+ super(shardId, indexSettings);
+ this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle;
+ this.indexSettingsService = indexSettingsService;
+ this.store = store;
+ this.engine = engine;
+ this.mergeScheduler = mergeScheduler;
+ this.translog = translog;
+ this.threadPool = threadPool;
+ this.mapperService = mapperService;
+ this.queryParserService = queryParserService;
+ this.indexCache = indexCache;
+ this.indexAliasesService = indexAliasesService;
+ this.indexingService = indexingService;
+ this.getService = getService.setIndexShard(this);
+ this.termVectorService = termVectorService.setIndexShard(this);
+ this.searchService = searchService;
+ this.shardWarmerService = shardWarmerService;
+ this.shardFilterCache = shardFilterCache;
+ this.shardIdCache = shardIdCache;
+ this.shardFieldData = shardFieldData;
+ this.percolatorQueriesRegistry = percolatorQueriesRegistry;
+ this.shardPercolateService = shardPercolateService;
+ this.indexFieldDataService = indexFieldDataService;
+ this.indexService = indexService;
+ this.codecService = codecService;
+ state = IndexShardState.CREATED;
+
+ this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, engine.defaultRefreshInterval());
+ this.mergeInterval = indexSettings.getAsTime("index.merge.async_interval", TimeValue.timeValueSeconds(1));
+
+ indexSettingsService.addListener(applyRefreshSettings);
+
+ logger.debug("state: [CREATED]");
+
+ this.checkIndexOnStartup = indexSettings.get("index.shard.check_on_startup", "false");
+ }
+
+ public MergeSchedulerProvider mergeScheduler() {
+ return this.mergeScheduler;
+ }
+
+ public Store store() {
+ return this.store;
+ }
+
+ public Engine engine() {
+ return engine;
+ }
+
+ public Translog translog() {
+ return translog;
+ }
+
+ public ShardIndexingService indexingService() {
+ return this.indexingService;
+ }
+
+ @Override
+ public ShardGetService getService() {
+ return this.getService;
+ }
+
+ @Override
+ public ShardTermVectorService termVectorService() {
+ return termVectorService;
+ }
+
+ @Override
+ public IndexFieldDataService indexFieldDataService() {
+ return indexFieldDataService;
+ }
+
+ @Override
+ public MapperService mapperService() {
+ return mapperService;
+ }
+
+ @Override
+ public IndexService indexService() {
+ return indexService;
+ }
+
+ @Override
+ public ShardSearchService searchService() {
+ return this.searchService;
+ }
+
+ @Override
+ public ShardIndexWarmerService warmerService() {
+ return this.shardWarmerService;
+ }
+
+ @Override
+ public ShardFilterCache filterCache() {
+ return this.shardFilterCache;
+ }
+
+ @Override
+ public ShardIdCache idCache() {
+ return this.shardIdCache;
+ }
+
+ @Override
+ public ShardFieldData fieldData() {
+ return this.shardFieldData;
+ }
+
+ @Override
+ public ShardRouting routingEntry() {
+ return this.shardRouting;
+ }
+
+ public InternalIndexShard routingEntry(ShardRouting newRouting) {
+ ShardRouting currentRouting = this.shardRouting;
+ if (!newRouting.shardId().equals(shardId())) {
+ throw new ElasticsearchIllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]");
+ }
+ if (currentRouting != null) {
+ if (!newRouting.primary() && currentRouting.primary()) {
+ logger.warn("suspect illegal state: trying to move shard from primary mode to replica mode");
+ }
+ // if its the same routing, return
+ if (currentRouting.equals(newRouting)) {
+ return this;
+ }
+ }
+
+ if (state == IndexShardState.POST_RECOVERY) {
+ // if the state is started or relocating (cause it might move right away from started to relocating)
+ // then move to STARTED
+ if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) {
+ // we want to refresh *before* we move to internal STARTED state
+ try {
+ engine.refresh(new Engine.Refresh("cluster_state_started").force(true));
+ } catch (Throwable t) {
+ logger.debug("failed to refresh due to move to cluster wide started", t);
+ }
+
+ boolean movedToStarted = false;
+ synchronized (mutex) {
+ // do the check under a mutex, so we make sure to only change to STARTED if in POST_RECOVERY
+ if (state == IndexShardState.POST_RECOVERY) {
+ changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
+ movedToStarted = true;
+ } else {
+ logger.debug("state [{}] not changed, not in POST_RECOVERY, global state is [{}]", state, newRouting.state());
+ }
+ }
+ if (movedToStarted) {
+ indicesLifecycle.afterIndexShardStarted(this);
+ }
+ }
+ }
+
+ this.shardRouting = newRouting;
+ indicesLifecycle.shardRoutingChanged(this, currentRouting, newRouting);
+
+ return this;
+ }
+
+ /**
+ * Marks the shard as recovering, fails with exception is recovering is not allowed to be set.
+ */
+ public IndexShardState recovering(String reason) throws IndexShardStartedException,
+ IndexShardRelocatedException, IndexShardRecoveringException, IndexShardClosedException {
+ synchronized (mutex) {
+ if (state == IndexShardState.CLOSED) {
+ throw new IndexShardClosedException(shardId);
+ }
+ if (state == IndexShardState.STARTED) {
+ throw new IndexShardStartedException(shardId);
+ }
+ if (state == IndexShardState.RELOCATED) {
+ throw new IndexShardRelocatedException(shardId);
+ }
+ if (state == IndexShardState.RECOVERING) {
+ throw new IndexShardRecoveringException(shardId);
+ }
+ if (state == IndexShardState.POST_RECOVERY) {
+ throw new IndexShardRecoveringException(shardId);
+ }
+ return changeState(IndexShardState.RECOVERING, reason);
+ }
+ }
+
+ public InternalIndexShard relocated(String reason) throws IndexShardNotStartedException {
+ synchronized (mutex) {
+ if (state != IndexShardState.STARTED) {
+ throw new IndexShardNotStartedException(shardId, state);
+ }
+ changeState(IndexShardState.RELOCATED, reason);
+ }
+ return this;
+ }
+
+ @Override
+ public IndexShardState state() {
+ return state;
+ }
+
+ /**
+ * Changes the state of the current shard
+ *
+ * @param newState the new shard state
+ * @param reason the reason for the state change
+ * @return the previous shard state
+ */
+ private IndexShardState changeState(IndexShardState newState, String reason) {
+ logger.debug("state: [{}]->[{}], reason [{}]", state, newState, reason);
+ IndexShardState previousState = state;
+ state = newState;
+ this.indicesLifecycle.indexShardStateChanged(this, previousState, reason);
+ return previousState;
+ }
+
+ @Override
+ public Engine.Create prepareCreate(SourceToParse source) throws ElasticsearchException {
+ long startTime = System.nanoTime();
+ DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(source.type());
+ ParsedDocument doc = docMapper.parse(source);
+ return new Engine.Create(docMapper, docMapper.uidMapper().term(doc.uid().stringValue()), doc).startTime(startTime);
+ }
+
+ @Override
+ public ParsedDocument create(Engine.Create create) throws ElasticsearchException {
+ writeAllowed(create.origin());
+ create = indexingService.preCreate(create);
+ if (logger.isTraceEnabled()) {
+ logger.trace("index {}", create.docs());
+ }
+ engine.create(create);
+ create.endTime(System.nanoTime());
+ indexingService.postCreate(create);
+ return create.parsedDoc();
+ }
+
+ @Override
+ public Engine.Index prepareIndex(SourceToParse source) throws ElasticsearchException {
+ long startTime = System.nanoTime();
+ DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(source.type());
+ ParsedDocument doc = docMapper.parse(source);
+ return new Engine.Index(docMapper, docMapper.uidMapper().term(doc.uid().stringValue()), doc).startTime(startTime);
+ }
+
+ @Override
+ public ParsedDocument index(Engine.Index index) throws ElasticsearchException {
+ writeAllowed(index.origin());
+ index = indexingService.preIndex(index);
+ try {
+ if (logger.isTraceEnabled()) {
+ logger.trace("index {}", index.docs());
+ }
+ engine.index(index);
+ index.endTime(System.nanoTime());
+ } catch (RuntimeException ex) {
+ indexingService.failedIndex(index);
+ throw ex;
+ }
+ indexingService.postIndex(index);
+ return index.parsedDoc();
+ }
+
+ @Override
+ public Engine.Delete prepareDelete(String type, String id, long version) throws ElasticsearchException {
+ long startTime = System.nanoTime();
+ DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(type);
+ return new Engine.Delete(type, id, docMapper.uidMapper().term(type, id)).version(version).startTime(startTime);
+ }
+
+ @Override
+ public void delete(Engine.Delete delete) throws ElasticsearchException {
+ writeAllowed(delete.origin());
+ delete = indexingService.preDelete(delete);
+ try {
+ if (logger.isTraceEnabled()) {
+ logger.trace("delete [{}]", delete.uid().text());
+ }
+ engine.delete(delete);
+ delete.endTime(System.nanoTime());
+ } catch (RuntimeException ex) {
+ indexingService.failedDelete(delete);
+ throw ex;
+ }
+ indexingService.postDelete(delete);
+ }
+
+ @Override
+ public Engine.DeleteByQuery prepareDeleteByQuery(BytesReference source, @Nullable String[] filteringAliases, String... types) throws ElasticsearchException {
+ long startTime = System.nanoTime();
+ if (types == null) {
+ types = Strings.EMPTY_ARRAY;
+ }
+ Query query = queryParserService.parseQuery(source).query();
+ query = filterQueryIfNeeded(query, types);
+
+ Filter aliasFilter = indexAliasesService.aliasFilter(filteringAliases);
+ Filter parentFilter = mapperService.hasNested() ? indexCache.filter().cache(NonNestedDocsFilter.INSTANCE) : null;
+ return new Engine.DeleteByQuery(query, source, filteringAliases, aliasFilter, parentFilter, types).startTime(startTime);
+ }
+
+ @Override
+ public void deleteByQuery(Engine.DeleteByQuery deleteByQuery) throws ElasticsearchException {
+ writeAllowed(deleteByQuery.origin());
+ if (logger.isTraceEnabled()) {
+ logger.trace("delete_by_query [{}]", deleteByQuery.query());
+ }
+ deleteByQuery = indexingService.preDeleteByQuery(deleteByQuery);
+ engine.delete(deleteByQuery);
+ deleteByQuery.endTime(System.nanoTime());
+ indexingService.postDeleteByQuery(deleteByQuery);
+ }
+
+ @Override
+ public Engine.GetResult get(Engine.Get get) throws ElasticsearchException {
+ readAllowed();
+ return engine.get(get);
+ }
+
+ @Override
+ public void refresh(Engine.Refresh refresh) throws ElasticsearchException {
+ verifyNotClosed();
+ if (logger.isTraceEnabled()) {
+ logger.trace("refresh with {}", refresh);
+ }
+ long time = System.nanoTime();
+ engine.refresh(refresh);
+ refreshMetric.inc(System.nanoTime() - time);
+ }
+
+ @Override
+ public RefreshStats refreshStats() {
+ return new RefreshStats(refreshMetric.count(), TimeUnit.NANOSECONDS.toMillis(refreshMetric.sum()));
+ }
+
+ @Override
+ public FlushStats flushStats() {
+ return new FlushStats(flushMetric.count(), TimeUnit.NANOSECONDS.toMillis(flushMetric.sum()));
+ }
+
+ @Override
+ public DocsStats docStats() {
+ final Engine.Searcher searcher = acquireSearcher("doc_stats");
+ try {
+ return new DocsStats(searcher.reader().numDocs(), searcher.reader().numDeletedDocs());
+ } finally {
+ searcher.release();
+ }
+ }
+
+ @Override
+ public IndexingStats indexingStats(String... types) {
+ return indexingService.stats(types);
+ }
+
+ @Override
+ public SearchStats searchStats(String... groups) {
+ return searchService.stats(groups);
+ }
+
+ @Override
+ public GetStats getStats() {
+ return getService.stats();
+ }
+
+ @Override
+ public StoreStats storeStats() {
+ try {
+ return store.stats();
+ } catch (IOException e) {
+ throw new ElasticsearchException("io exception while building 'store stats'", e);
+ }
+ }
+
+ @Override
+ public MergeStats mergeStats() {
+ return mergeScheduler.stats();
+ }
+
+ @Override
+ public SegmentsStats segmentStats() {
+ return engine.segmentsStats();
+ }
+
+ @Override
+ public WarmerStats warmerStats() {
+ return shardWarmerService.stats();
+ }
+
+ @Override
+ public FilterCacheStats filterCacheStats() {
+ return shardFilterCache.stats();
+ }
+
+ @Override
+ public FieldDataStats fieldDataStats(String... fields) {
+ return shardFieldData.stats(fields);
+ }
+
+ @Override
+ public PercolatorQueriesRegistry percolateRegistry() {
+ return percolatorQueriesRegistry;
+ }
+
+ @Override
+ public ShardPercolateService shardPercolateService() {
+ return shardPercolateService;
+ }
+
+ @Override
+ public IdCacheStats idCacheStats() {
+ return shardIdCache.stats();
+ }
+
+ @Override
+ public TranslogStats translogStats() {
+ return translog.stats();
+ }
+
+ @Override
+ public CompletionStats completionStats(String... fields) {
+ CompletionStats completionStats = new CompletionStats();
+ final Engine.Searcher currentSearcher = acquireSearcher("completion_stats");
+ try {
+ PostingsFormat postingsFormat = this.codecService.postingsFormatService().get(Completion090PostingsFormat.CODEC_NAME).get();
+ if (postingsFormat instanceof Completion090PostingsFormat) {
+ Completion090PostingsFormat completionPostingsFormat = (Completion090PostingsFormat) postingsFormat;
+ completionStats.add(completionPostingsFormat.completionStats(currentSearcher.reader(), fields));
+ }
+ } finally {
+ currentSearcher.release();
+ }
+ return completionStats;
+ }
+
+ @Override
+ public void flush(Engine.Flush flush) throws ElasticsearchException {
+ // we allows flush while recovering, since we allow for operations to happen
+ // while recovering, and we want to keep the translog at bay (up to deletes, which
+ // we don't gc).
+ verifyStartedOrRecovering();
+ if (logger.isTraceEnabled()) {
+ logger.trace("flush with {}", flush);
+ }
+ long time = System.nanoTime();
+ engine.flush(flush);
+ flushMetric.inc(System.nanoTime() - time);
+ }
+
+ @Override
+ public void optimize(Engine.Optimize optimize) throws ElasticsearchException {
+ verifyStarted();
+ if (logger.isTraceEnabled()) {
+ logger.trace("optimize with {}", optimize);
+ }
+ engine.optimize(optimize);
+ }
+
+ @Override
+ public <T> T snapshot(Engine.SnapshotHandler<T> snapshotHandler) throws EngineException {
+ IndexShardState state = this.state; // one time volatile read
+ // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
+ if (state == IndexShardState.POST_RECOVERY || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
+ return engine.snapshot(snapshotHandler);
+ } else {
+ throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
+ }
+ }
+
+ @Override
+ public SnapshotIndexCommit snapshotIndex() throws EngineException {
+ IndexShardState state = this.state; // one time volatile read
+ // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
+ if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
+ return engine.snapshotIndex();
+ } else {
+ throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
+ }
+ }
+
+ @Override
+ public void recover(Engine.RecoveryHandler recoveryHandler) throws EngineException {
+ verifyStarted();
+ engine.recover(recoveryHandler);
+ }
+
+ @Override
+ public Engine.Searcher acquireSearcher(String source) {
+ return acquireSearcher(source, Mode.READ);
+ }
+
+ @Override
+ public Engine.Searcher acquireSearcher(String source, Mode mode) {
+ readAllowed(mode);
+ return engine.acquireSearcher(source);
+ }
+
+ public void close(String reason) {
+ synchronized (mutex) {
+ indexSettingsService.removeListener(applyRefreshSettings);
+ if (state != IndexShardState.CLOSED) {
+ if (refreshScheduledFuture != null) {
+ refreshScheduledFuture.cancel(true);
+ refreshScheduledFuture = null;
+ }
+ if (mergeScheduleFuture != null) {
+ mergeScheduleFuture.cancel(true);
+ mergeScheduleFuture = null;
+ }
+ }
+ changeState(IndexShardState.CLOSED, reason);
+ }
+ }
+
+ public long checkIndexTook() {
+ return this.checkIndexTook;
+ }
+
+
+ public InternalIndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
+ synchronized (mutex) {
+ if (state == IndexShardState.CLOSED) {
+ throw new IndexShardClosedException(shardId);
+ }
+ if (state == IndexShardState.STARTED) {
+ throw new IndexShardStartedException(shardId);
+ }
+ if (state == IndexShardState.RELOCATED) {
+ throw new IndexShardRelocatedException(shardId);
+ }
+ if (Booleans.parseBoolean(checkIndexOnStartup, false)) {
+ checkIndex(true);
+ }
+ engine.start();
+ startScheduledTasksIfNeeded();
+ changeState(IndexShardState.POST_RECOVERY, reason);
+ }
+ indicesLifecycle.afterIndexShardPostRecovery(this);
+ return this;
+ }
+
+ /**
+ * After the store has been recovered, we need to start the engine in order to apply operations
+ */
+ public void performRecoveryPrepareForTranslog() throws ElasticsearchException {
+ if (state != IndexShardState.RECOVERING) {
+ throw new IndexShardNotRecoveringException(shardId, state);
+ }
+ // also check here, before we apply the translog
+ if (Booleans.parseBoolean(checkIndexOnStartup, false)) {
+ checkIndex(true);
+ }
+ // we disable deletes since we allow for operations to be executed against the shard while recovering
+ // but we need to make sure we don't loose deletes until we are done recovering
+ engine.enableGcDeletes(false);
+ engine.start();
+ }
+
+ /**
+ * The peer recovery status if this shard recovered from a peer shard.
+ */
+ public RecoveryStatus peerRecoveryStatus() {
+ return this.peerRecoveryStatus;
+ }
+
+ public void performRecoveryFinalization(boolean withFlush, RecoveryStatus peerRecoveryStatus) throws ElasticsearchException {
+ performRecoveryFinalization(withFlush);
+ this.peerRecoveryStatus = peerRecoveryStatus;
+ }
+
+ public void performRecoveryFinalization(boolean withFlush) throws ElasticsearchException {
+ if (withFlush) {
+ engine.flush(new Engine.Flush());
+ }
+ // clear unreferenced files
+ translog.clearUnreferenced();
+ engine.refresh(new Engine.Refresh("recovery_finalization").force(true));
+ synchronized (mutex) {
+ changeState(IndexShardState.POST_RECOVERY, "post recovery");
+ }
+ indicesLifecycle.afterIndexShardPostRecovery(this);
+ startScheduledTasksIfNeeded();
+ engine.enableGcDeletes(true);
+ }
+
+ public void performRecoveryOperation(Translog.Operation operation) throws ElasticsearchException {
+ if (state != IndexShardState.RECOVERING) {
+ throw new IndexShardNotRecoveringException(shardId, state);
+ }
+ try {
+ switch (operation.opType()) {
+ case CREATE:
+ Translog.Create create = (Translog.Create) operation;
+ engine.create(prepareCreate(source(create.source()).type(create.type()).id(create.id())
+ .routing(create.routing()).parent(create.parent()).timestamp(create.timestamp()).ttl(create.ttl())).version(create.version())
+ .origin(Engine.Operation.Origin.RECOVERY));
+ break;
+ case SAVE:
+ Translog.Index index = (Translog.Index) operation;
+ engine.index(prepareIndex(source(index.source()).type(index.type()).id(index.id())
+ .routing(index.routing()).parent(index.parent()).timestamp(index.timestamp()).ttl(index.ttl())).version(index.version())
+ .origin(Engine.Operation.Origin.RECOVERY));
+ break;
+ case DELETE:
+ Translog.Delete delete = (Translog.Delete) operation;
+ Uid uid = Uid.createUid(delete.uid().text());
+ engine.delete(new Engine.Delete(uid.type(), uid.id(), delete.uid()).version(delete.version())
+ .origin(Engine.Operation.Origin.RECOVERY));
+ break;
+ case DELETE_BY_QUERY:
+ Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) operation;
+ engine.delete(prepareDeleteByQuery(deleteByQuery.source(), deleteByQuery.filteringAliases(), deleteByQuery.types()).origin(Engine.Operation.Origin.RECOVERY));
+ break;
+ default:
+ throw new ElasticsearchIllegalStateException("No operation defined for [" + operation + "]");
+ }
+ } catch (ElasticsearchException e) {
+ boolean hasIgnoreOnRecoveryException = false;
+ ElasticsearchException current = e;
+ while (true) {
+ if (current instanceof IgnoreOnRecoveryEngineException) {
+ hasIgnoreOnRecoveryException = true;
+ break;
+ }
+ if (current.getCause() instanceof ElasticsearchException) {
+ current = (ElasticsearchException) current.getCause();
+ } else {
+ break;
+ }
+ }
+ if (!hasIgnoreOnRecoveryException) {
+ throw e;
+ }
+ }
+ }
+
+ /**
+ * Returns <tt>true</tt> if this shard can ignore a recovery attempt made to it (since the already doing/done it)
+ */
+ public boolean ignoreRecoveryAttempt() {
+ IndexShardState state = state(); // one time volatile read
+ return state == IndexShardState.POST_RECOVERY || state == IndexShardState.RECOVERING || state == IndexShardState.STARTED ||
+ state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED;
+ }
+
+ public void readAllowed() throws IllegalIndexShardStateException {
+ readAllowed(Mode.READ);
+ }
+
+
+ public void readAllowed(Mode mode) throws IllegalIndexShardStateException {
+ IndexShardState state = this.state; // one time volatile read
+ switch (mode) {
+ case READ:
+ if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
+ throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when started/relocated");
+ }
+ break;
+ case WRITE:
+ if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
+ throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when started/relocated");
+ }
+ break;
+ }
+ }
+
+ private void writeAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException {
+ IndexShardState state = this.state; // one time volatile read
+
+ if (origin == Engine.Operation.Origin.PRIMARY) {
+ // for primaries, we only allow to write when actually started (so the cluster has decided we started)
+ // otherwise, we need to retry, we also want to still allow to index if we are relocated in case it fails
+ if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
+ throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
+ }
+ } else {
+ // for replicas, we allow to write also while recovering, since we index also during recovery to replicas
+ // and rely on version checks to make sure its consistent
+ if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
+ throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
+ }
+ }
+ }
+
+ private void verifyStartedOrRecovering() throws IllegalIndexShardStateException {
+ IndexShardState state = this.state; // one time volatile read
+ if (state != IndexShardState.STARTED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
+ throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering");
+ }
+ }
+
+ private void verifyNotClosed() throws IllegalIndexShardStateException {
+ IndexShardState state = this.state; // one time volatile read
+ if (state == IndexShardState.CLOSED) {
+ throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed");
+ }
+ }
+
+ private void verifyStarted() throws IllegalIndexShardStateException {
+ IndexShardState state = this.state; // one time volatile read
+ if (state != IndexShardState.STARTED) {
+ throw new IndexShardNotStartedException(shardId, state);
+ }
+ }
+
+ private void startScheduledTasksIfNeeded() {
+ if (refreshInterval.millis() > 0) {
+ refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, new EngineRefresher());
+ logger.debug("scheduling refresher every {}", refreshInterval);
+ } else {
+ logger.debug("scheduled refresher disabled");
+ }
+ // since we can do async merging, it will not be called explicitly when indexing (adding / deleting docs), and only when flushing
+ // so, make sure we periodically call it, this need to be a small enough value so mergine will actually
+ // happen and reduce the number of segments
+ if (mergeInterval.millis() > 0) {
+ mergeScheduleFuture = threadPool.schedule(mergeInterval, ThreadPool.Names.SAME, new EngineMerger());
+ logger.debug("scheduling optimizer / merger every {}", mergeInterval);
+ } else {
+ logger.debug("scheduled optimizer / merger disabled");
+ }
+ }
+
+ private Query filterQueryIfNeeded(Query query, String[] types) {
+ Filter searchFilter = mapperService.searchFilter(types);
+ if (searchFilter != null) {
+ query = new XFilteredQuery(query, indexCache.filter().cache(searchFilter));
+ }
+ return query;
+ }
+
+ public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval";
+
+ private class ApplyRefreshSettings implements IndexSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ synchronized (mutex) {
+ if (state == IndexShardState.CLOSED) {
+ return;
+ }
+ TimeValue refreshInterval = settings.getAsTime(INDEX_REFRESH_INTERVAL, InternalIndexShard.this.refreshInterval);
+ if (!refreshInterval.equals(InternalIndexShard.this.refreshInterval)) {
+ logger.info("updating refresh_interval from [{}] to [{}]", InternalIndexShard.this.refreshInterval, refreshInterval);
+ if (refreshScheduledFuture != null) {
+ refreshScheduledFuture.cancel(false);
+ refreshScheduledFuture = null;
+ }
+ InternalIndexShard.this.refreshInterval = refreshInterval;
+ if (refreshInterval.millis() > 0) {
+ refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, new EngineRefresher());
+ }
+ }
+ }
+ }
+ }
+
+ class EngineRefresher implements Runnable {
+ @Override
+ public void run() {
+ // we check before if a refresh is needed, if not, we reschedule, otherwise, we fork, refresh, and then reschedule
+ if (!engine().refreshNeeded()) {
+ synchronized (mutex) {
+ if (state != IndexShardState.CLOSED) {
+ refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, this);
+ }
+ }
+ return;
+ }
+ threadPool.executor(ThreadPool.Names.REFRESH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ if (engine.refreshNeeded()) {
+ refresh(new Engine.Refresh("scheduled").force(false));
+ }
+ } catch (EngineClosedException e) {
+ // we are being closed, ignore
+ } catch (RefreshFailedEngineException e) {
+ if (e.getCause() instanceof InterruptedException) {
+ // ignore, we are being shutdown
+ } else if (e.getCause() instanceof ClosedByInterruptException) {
+ // ignore, we are being shutdown
+ } else if (e.getCause() instanceof ThreadInterruptedException) {
+ // ignore, we are being shutdown
+ } else {
+ if (state != IndexShardState.CLOSED) {
+ logger.warn("Failed to perform scheduled engine refresh", e);
+ }
+ }
+ } catch (Exception e) {
+ if (state != IndexShardState.CLOSED) {
+ logger.warn("Failed to perform scheduled engine refresh", e);
+ }
+ }
+ synchronized (mutex) {
+ if (state != IndexShardState.CLOSED) {
+ refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, EngineRefresher.this);
+ }
+ }
+ }
+ });
+ }
+ }
+
+ class EngineMerger implements Runnable {
+ @Override
+ public void run() {
+ if (!engine().possibleMergeNeeded()) {
+ synchronized (mutex) {
+ if (state != IndexShardState.CLOSED) {
+ mergeScheduleFuture = threadPool.schedule(mergeInterval, ThreadPool.Names.SAME, this);
+ }
+ }
+ return;
+ }
+ threadPool.executor(ThreadPool.Names.MERGE).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ engine.maybeMerge();
+ } catch (EngineClosedException e) {
+ // we are being closed, ignore
+ } catch (OptimizeFailedEngineException e) {
+ if (e.getCause() instanceof EngineClosedException) {
+ // ignore, we are being shutdown
+ } else if (e.getCause() instanceof InterruptedException) {
+ // ignore, we are being shutdown
+ } else if (e.getCause() instanceof ClosedByInterruptException) {
+ // ignore, we are being shutdown
+ } else if (e.getCause() instanceof ThreadInterruptedException) {
+ // ignore, we are being shutdown
+ } else {
+ if (state != IndexShardState.CLOSED) {
+ logger.warn("Failed to perform scheduled engine optimize/merge", e);
+ }
+ }
+ } catch (Exception e) {
+ if (state != IndexShardState.CLOSED) {
+ logger.warn("Failed to perform scheduled engine optimize/merge", e);
+ }
+ }
+ synchronized (mutex) {
+ if (state != IndexShardState.CLOSED) {
+ mergeScheduleFuture = threadPool.schedule(mergeInterval, ThreadPool.Names.SAME, EngineMerger.this);
+ }
+ }
+ }
+ });
+ }
+ }
+
+ private void checkIndex(boolean throwException) throws IndexShardException {
+ try {
+ checkIndexTook = 0;
+ long time = System.currentTimeMillis();
+ if (!Lucene.indexExists(store.directory())) {
+ return;
+ }
+ CheckIndex checkIndex = new CheckIndex(store.directory());
+ BytesStreamOutput os = new BytesStreamOutput();
+ PrintStream out = new PrintStream(os, false, Charsets.UTF_8.name());
+ checkIndex.setInfoStream(out);
+ out.flush();
+ CheckIndex.Status status = checkIndex.checkIndex();
+ if (!status.clean) {
+ if (state == IndexShardState.CLOSED) {
+ // ignore if closed....
+ return;
+ }
+ logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
+ if ("fix".equalsIgnoreCase(checkIndexOnStartup)) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("fixing index, writing new segments file ...");
+ }
+ checkIndex.fixIndex(status);
+ if (logger.isDebugEnabled()) {
+ logger.debug("index fixed, wrote new segments file \"{}\"", status.segmentsFileName);
+ }
+ } else {
+ // only throw a failure if we are not going to fix the index
+ if (throwException) {
+ throw new IndexShardException(shardId, "index check failure");
+ }
+ }
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("check index [success]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
+ }
+ }
+ checkIndexTook = System.currentTimeMillis() - time;
+ } catch (Exception e) {
+ logger.warn("failed to check index", e);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java
new file mode 100644
index 0000000..7ae0067
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import org.apache.lucene.search.similarities.*;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * Abstract implemenetation of {@link SimilarityProvider} providing common behaviour
+ */
+public abstract class AbstractSimilarityProvider implements SimilarityProvider {
+
+ protected static final Normalization NO_NORMALIZATION = new Normalization.NoNormalization();
+
+ private final String name;
+
+ /**
+ * Creates a new AbstractSimilarityProvider with the given name
+ *
+ * @param name Name of the Provider
+ */
+ protected AbstractSimilarityProvider(String name) {
+ this.name = name;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String name() {
+ return this.name;
+ }
+
+ /**
+ * Parses the given Settings and creates the appropriate {@link Normalization}
+ *
+ * @param settings Settings to parse
+ * @return {@link Normalization} referred to in the Settings
+ */
+ protected Normalization parseNormalization(Settings settings) {
+ String normalization = settings.get("normalization");
+
+ if ("no".equals(normalization)) {
+ return NO_NORMALIZATION;
+ } else if ("h1".equals(normalization)) {
+ float c = settings.getAsFloat("normalization.h1.c", 1f);
+ return new NormalizationH1(c);
+ } else if ("h2".equals(normalization)) {
+ float c = settings.getAsFloat("normalization.h2.c", 1f);
+ return new NormalizationH2(c);
+ } else if ("h3".equals(normalization)) {
+ float c = settings.getAsFloat("normalization.h3.c", 800f);
+ return new NormalizationH3(c);
+ } else if ("z".equals(normalization)) {
+ float z = settings.getAsFloat("normalization.z.z", 0.30f);
+ return new NormalizationZ(z);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Unsupported Normalization [" + normalization + "]");
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java
new file mode 100644
index 0000000..ca24985
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import org.apache.lucene.search.similarities.BM25Similarity;
+import org.apache.lucene.search.similarities.Similarity;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * {@link SimilarityProvider} for the {@link BM25Similarity}.
+ * <p/>
+ * Configuration options available:
+ * <ul>
+ * <li>k1</li>
+ * <li>b</li>
+ * <li>discount_overlaps</li>
+ * </ul>
+ * @see BM25Similarity For more information about configuration
+ */
+public class BM25SimilarityProvider extends AbstractSimilarityProvider {
+
+ private final BM25Similarity similarity;
+
+ @Inject
+ public BM25SimilarityProvider(@Assisted String name, @Assisted Settings settings) {
+ super(name);
+ float k1 = settings.getAsFloat("k1", 1.2f);
+ float b = settings.getAsFloat("b", 0.75f);
+ boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true);
+
+ this.similarity = new BM25Similarity(k1, b);
+ this.similarity.setDiscountOverlaps(discountOverlaps);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Similarity get() {
+ return similarity;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java
new file mode 100644
index 0000000..c6c6a48
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.search.similarities.*;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * {@link SimilarityProvider} for {@link DFRSimilarity}.
+ * <p/>
+ * Configuration options available:
+ * <ul>
+ * <li>basic_model</li>
+ * <li>after_effect</li>
+ * <li>normalization</li>
+ * </ul>
+ * @see DFRSimilarity For more information about configuration
+ */
+public class DFRSimilarityProvider extends AbstractSimilarityProvider {
+
+ private static final ImmutableMap<String, BasicModel> MODEL_CACHE;
+ private static final ImmutableMap<String, AfterEffect> EFFECT_CACHE;
+
+ static {
+ MapBuilder<String, BasicModel> models = MapBuilder.newMapBuilder();
+ models.put("be", new BasicModelBE());
+ models.put("d", new BasicModelD());
+ models.put("g", new BasicModelG());
+ models.put("if", new BasicModelIF());
+ models.put("in", new BasicModelIn());
+ models.put("ine", new BasicModelIne());
+ models.put("p", new BasicModelP());
+ MODEL_CACHE = models.immutableMap();
+
+ MapBuilder<String, AfterEffect> effects = MapBuilder.newMapBuilder();
+ effects.put("no", new AfterEffect.NoAfterEffect());
+ effects.put("b", new AfterEffectB());
+ effects.put("l", new AfterEffectL());
+ EFFECT_CACHE = effects.immutableMap();
+ }
+
+ private final DFRSimilarity similarity;
+
+ @Inject
+ public DFRSimilarityProvider(@Assisted String name, @Assisted Settings settings) {
+ super(name);
+ BasicModel basicModel = parseBasicModel(settings);
+ AfterEffect afterEffect = parseAfterEffect(settings);
+ Normalization normalization = parseNormalization(settings);
+ this.similarity = new DFRSimilarity(basicModel, afterEffect, normalization);
+ }
+
+ /**
+ * Parses the given Settings and creates the appropriate {@link BasicModel}
+ *
+ * @param settings Settings to parse
+ * @return {@link BasicModel} referred to in the Settings
+ */
+ protected BasicModel parseBasicModel(Settings settings) {
+ String basicModel = settings.get("basic_model");
+ BasicModel model = MODEL_CACHE.get(basicModel);
+ if (model == null) {
+ throw new ElasticsearchIllegalArgumentException("Unsupported BasicModel [" + basicModel + "]");
+ }
+ return model;
+ }
+
+ /**
+ * Parses the given Settings and creates the appropriate {@link AfterEffect}
+ *
+ * @param settings Settings to parse
+ * @return {@link AfterEffect} referred to in the Settings
+ */
+ protected AfterEffect parseAfterEffect(Settings settings) {
+ String afterEffect = settings.get("after_effect");
+ AfterEffect effect = EFFECT_CACHE.get(afterEffect);
+ if (effect == null) {
+ throw new ElasticsearchIllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]");
+ }
+ return effect;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Similarity get() {
+ return similarity;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java
new file mode 100644
index 0000000..d43fa29
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import org.apache.lucene.search.similarities.DefaultSimilarity;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ * {@link SimilarityProvider} for {@link DefaultSimilarity}.
+ * <p/>
+ * Configuration options available:
+ * <ul>
+ * <li>discount_overlaps</li>
+ * </ul>
+ * @see DefaultSimilarity For more information about configuration
+ */
+public class DefaultSimilarityProvider extends AbstractSimilarityProvider {
+
+ private final DefaultSimilarity similarity = new DefaultSimilarity();
+
+ @Inject
+ public DefaultSimilarityProvider(@Assisted String name, @Assisted Settings settings) {
+ super(name);
+ boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true);
+ this.similarity.setDiscountOverlaps(discountOverlaps);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public DefaultSimilarity get() {
+ return similarity;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java
new file mode 100644
index 0000000..c5e9d29
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.search.similarities.*;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * {@link SimilarityProvider} for {@link IBSimilarity}.
+ * <p/>
+ * Configuration options available:
+ * <ul>
+ * <li>distribution</li>
+ * <li>lambda</li>
+ * <li>normalization</li>
+ * </ul>
+ * @see IBSimilarity For more information about configuration
+ */
+public class IBSimilarityProvider extends AbstractSimilarityProvider {
+
+ private static final ImmutableMap<String, Distribution> DISTRIBUTION_CACHE;
+ private static final ImmutableMap<String, Lambda> LAMBDA_CACHE;
+
+ static {
+ MapBuilder<String, Distribution> distributions = MapBuilder.newMapBuilder();
+ distributions.put("ll", new DistributionLL());
+ distributions.put("spl", new DistributionSPL());
+ DISTRIBUTION_CACHE = distributions.immutableMap();
+
+ MapBuilder<String, Lambda> lamdas = MapBuilder.newMapBuilder();
+ lamdas.put("df", new LambdaDF());
+ lamdas.put("ttf", new LambdaTTF());
+ LAMBDA_CACHE = lamdas.immutableMap();
+ }
+
+ private final IBSimilarity similarity;
+
+ @Inject
+ public IBSimilarityProvider(@Assisted String name, @Assisted Settings settings) {
+ super(name);
+ Distribution distribution = parseDistribution(settings);
+ Lambda lambda = parseLambda(settings);
+ Normalization normalization = parseNormalization(settings);
+ this.similarity = new IBSimilarity(distribution, lambda, normalization);
+ }
+
+ /**
+ * Parses the given Settings and creates the appropriate {@link Distribution}
+ *
+ * @param settings Settings to parse
+ * @return {@link Normalization} referred to in the Settings
+ */
+ protected Distribution parseDistribution(Settings settings) {
+ String rawDistribution = settings.get("distribution");
+ Distribution distribution = DISTRIBUTION_CACHE.get(rawDistribution);
+ if (distribution == null) {
+ throw new ElasticsearchIllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]");
+ }
+ return distribution;
+ }
+
+ /**
+ * Parses the given Settings and creates the appropriate {@link Lambda}
+ *
+ * @param settings Settings to parse
+ * @return {@link Normalization} referred to in the Settings
+ */
+ protected Lambda parseLambda(Settings settings) {
+ String rawLambda = settings.get("lambda");
+ Lambda lambda = LAMBDA_CACHE.get(rawLambda);
+ if (lambda == null) {
+ throw new ElasticsearchIllegalArgumentException("Unsupported Lambda [" + rawLambda + "]");
+ }
+ return lambda;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Similarity get() {
+ return similarity;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/PreBuiltSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/PreBuiltSimilarityProvider.java
new file mode 100644
index 0000000..4b3f0cc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/PreBuiltSimilarityProvider.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import org.apache.lucene.search.similarities.Similarity;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * {@link SimilarityProvider} for pre-built Similarities
+ */
+public class PreBuiltSimilarityProvider extends AbstractSimilarityProvider {
+
+ public static class Factory implements SimilarityProvider.Factory {
+
+ private final PreBuiltSimilarityProvider similarity;
+
+ public Factory(String name, Similarity similarity) {
+ this.similarity = new PreBuiltSimilarityProvider(name, similarity);
+ }
+
+ @Override
+ public SimilarityProvider create(String name, Settings settings) {
+ return similarity;
+ }
+
+ public String name() {
+ return similarity.name();
+ }
+
+ public SimilarityProvider get() {
+ return similarity;
+ }
+ }
+
+ private final Similarity similarity;
+
+ /**
+ * Creates a new {@link PreBuiltSimilarityProvider} with the given name and given
+ * pre-built Similarity
+ *
+ * @param name Name of the Provider
+ * @param similarity Pre-built Similarity
+ */
+ public PreBuiltSimilarityProvider(String name, Similarity similarity) {
+ super(name);
+ this.similarity = similarity;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Similarity get() {
+ return similarity;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/Similarities.java b/src/main/java/org/elasticsearch/index/similarity/Similarities.java
new file mode 100644
index 0000000..4dbdca1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/Similarities.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import com.google.common.collect.ImmutableCollection;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.search.similarities.BM25Similarity;
+import org.apache.lucene.search.similarities.DefaultSimilarity;
+import org.elasticsearch.common.collect.MapBuilder;
+
+/**
+ * Cache of pre-defined Similarities
+ */
+public class Similarities {
+
+ private static final ImmutableMap<String, PreBuiltSimilarityProvider.Factory> PRE_BUILT_SIMILARITIES;
+
+ static {
+ MapBuilder<String, PreBuiltSimilarityProvider.Factory> similarities = MapBuilder.newMapBuilder();
+ similarities.put(SimilarityLookupService.DEFAULT_SIMILARITY,
+ new PreBuiltSimilarityProvider.Factory(SimilarityLookupService.DEFAULT_SIMILARITY, new DefaultSimilarity()));
+ similarities.put("BM25", new PreBuiltSimilarityProvider.Factory("BM25", new BM25Similarity()));
+
+ PRE_BUILT_SIMILARITIES = similarities.immutableMap();
+ }
+
+ private Similarities() {
+ }
+
+ /**
+ * Returns the list of pre-defined SimilarityProvider Factories
+ *
+ * @return Pre-defined SimilarityProvider Factories
+ */
+ public static ImmutableCollection<PreBuiltSimilarityProvider.Factory> listFactories() {
+ return PRE_BUILT_SIMILARITIES.values();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/SimilarityLookupService.java b/src/main/java/org/elasticsearch/index/similarity/SimilarityLookupService.java
new file mode 100644
index 0000000..7e049aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/SimilarityLookupService.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.util.Map;
+
+/**
+ * Service for looking up configured {@link SimilarityProvider} implementations by name.
+ * <p/>
+ * The service instantiates the Providers through their Factories using configuration
+ * values found with the {@link SimilarityModule#SIMILARITY_SETTINGS_PREFIX} prefix.
+ */
+public class SimilarityLookupService extends AbstractIndexComponent {
+
+ public final static String DEFAULT_SIMILARITY = "default";
+
+ private final ImmutableMap<String, SimilarityProvider> similarities;
+
+ public SimilarityLookupService(Index index, Settings indexSettings) {
+ this(index, indexSettings, ImmutableMap.<String, SimilarityProvider.Factory>of());
+ }
+
+ @Inject
+ public SimilarityLookupService(Index index, @IndexSettings Settings indexSettings, Map<String, SimilarityProvider.Factory> similarities) {
+ super(index, indexSettings);
+
+ MapBuilder<String, SimilarityProvider> providers = MapBuilder.newMapBuilder();
+
+ Map<String, Settings> similaritySettings = indexSettings.getGroups(SimilarityModule.SIMILARITY_SETTINGS_PREFIX);
+ for (Map.Entry<String, SimilarityProvider.Factory> entry : similarities.entrySet()) {
+ String name = entry.getKey();
+ SimilarityProvider.Factory factory = entry.getValue();
+
+ Settings settings = similaritySettings.get(name);
+ if (settings == null) {
+ settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+ providers.put(name, factory.create(name, settings));
+ }
+
+ // For testing
+ for (PreBuiltSimilarityProvider.Factory factory : Similarities.listFactories()) {
+ if (!providers.containsKey(factory.name())) {
+ providers.put(factory.name(), factory.get());
+ }
+ }
+
+ this.similarities = providers.immutableMap();
+ }
+
+ /**
+ * Returns the {@link SimilarityProvider} with the given name
+ *
+ * @param name Name of the SimilarityProvider to find
+ * @return {@link SimilarityProvider} with the given name, or {@code null} if no Provider exists
+ */
+ public SimilarityProvider similarity(String name) {
+ return similarities.get(name);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java b/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java
new file mode 100644
index 0000000..cbd7729
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.inject.assistedinject.FactoryProvider;
+import org.elasticsearch.common.inject.multibindings.MapBinder;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+
+/**
+ * {@link SimilarityModule} is responsible gathering registered and configured {@link SimilarityProvider}
+ * implementations and making them available through the {@link SimilarityLookupService} and {@link SimilarityService}.
+ *
+ * New {@link SimilarityProvider} implementations can be registered through {@link #addSimilarity(String, Class)}
+ * while existing Providers can be referenced through Settings under the {@link #SIMILARITY_SETTINGS_PREFIX} prefix
+ * along with the "type" value. For example, to reference the {@link BM25SimilarityProvider}, the configuration
+ * <tt>"index.similarity.my_similarity.type : "BM25"</tt> can be used.
+ */
+public class SimilarityModule extends AbstractModule {
+
+ public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity";
+
+ private final Settings settings;
+ private final Map<String, Class<? extends SimilarityProvider>> similarities = Maps.newHashMap();
+
+ public SimilarityModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ /**
+ * Registers the given {@link SimilarityProvider} with the given name
+ *
+ * @param name Name of the SimilarityProvider
+ * @param similarity SimilarityProvider to register
+ */
+ public void addSimilarity(String name, Class<? extends SimilarityProvider> similarity) {
+ similarities.put(name, similarity);
+ }
+
+ @Override
+ protected void configure() {
+ Map<String, Class<? extends SimilarityProvider>> providers = Maps.newHashMap(similarities);
+
+ Map<String, Settings> similaritySettings = settings.getGroups(SIMILARITY_SETTINGS_PREFIX);
+ for (Map.Entry<String, Settings> entry : similaritySettings.entrySet()) {
+ String name = entry.getKey();
+ Settings settings = entry.getValue();
+
+ Class<? extends SimilarityProvider> type =
+ settings.getAsClass("type", null, "org.elasticsearch.index.similarity.", "SimilarityProvider");
+ if (type == null) {
+ throw new ElasticsearchIllegalArgumentException("SimilarityProvider [" + name + "] must have an associated type");
+ }
+ providers.put(name, type);
+ }
+
+ MapBinder<String, SimilarityProvider.Factory> similarityBinder =
+ MapBinder.newMapBinder(binder(), String.class, SimilarityProvider.Factory.class);
+
+ for (Map.Entry<String, Class<? extends SimilarityProvider>> entry : providers.entrySet()) {
+ similarityBinder.addBinding(entry.getKey()).toProvider(FactoryProvider.newFactory(SimilarityProvider.Factory.class, entry.getValue())).in(Scopes.SINGLETON);
+ }
+
+ for (PreBuiltSimilarityProvider.Factory factory : Similarities.listFactories()) {
+ if (!providers.containsKey(factory.name())) {
+ similarityBinder.addBinding(factory.name()).toInstance(factory);
+ }
+ }
+
+ bind(SimilarityLookupService.class).asEagerSingleton();
+ bind(SimilarityService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java
new file mode 100644
index 0000000..4dbe3d0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import org.apache.lucene.search.similarities.Similarity;
+import org.elasticsearch.common.inject.Provider;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.IndexComponent;
+
+/**
+ * Provider for {@link Similarity} instances
+ */
+public interface SimilarityProvider {
+
+ /**
+ * Returns the name associated with the Provider
+ *
+ * @return Name of the Provider
+ */
+ String name();
+
+ /**
+ * Returns the {@link Similarity} the Provider is for
+ *
+ * @return Provided {@link Similarity}
+ */
+ Similarity get();
+
+ /**
+ * Factory for creating {@link SimilarityProvider} instances
+ */
+ public static interface Factory {
+
+ /**
+ * Creates a new {@link SimilarityProvider} instance
+ *
+ * @param name Name of the provider
+ * @param settings Settings to be used by the Provider
+ * @return {@link SimilarityProvider} instance created by the Factory
+ */
+ SimilarityProvider create(String name, Settings settings);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java
new file mode 100644
index 0000000..277640b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
+import org.apache.lucene.search.similarities.Similarity;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.settings.IndexSettings;
+
+/**
+ *
+ */
+public class SimilarityService extends AbstractIndexComponent {
+
+ private final SimilarityLookupService similarityLookupService;
+ private final MapperService mapperService;
+
+ private final Similarity perFieldSimilarity;
+
+ public SimilarityService(Index index) {
+ this(index, ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public SimilarityService(Index index, Settings settings) {
+ this(index, settings, new SimilarityLookupService(index, settings), null);
+ }
+
+ @Inject
+ public SimilarityService(Index index, @IndexSettings Settings indexSettings,
+ final SimilarityLookupService similarityLookupService, final MapperService mapperService) {
+ super(index, indexSettings);
+ this.similarityLookupService = similarityLookupService;
+ this.mapperService = mapperService;
+
+ Similarity defaultSimilarity = similarityLookupService.similarity(SimilarityLookupService.DEFAULT_SIMILARITY).get();
+ // Expert users can configure the base type as being different to default, but out-of-box we use default.
+ Similarity baseSimilarity = (similarityLookupService.similarity("base") != null) ? similarityLookupService.similarity("base").get() :
+ defaultSimilarity;
+
+ this.perFieldSimilarity = (mapperService != null) ? new PerFieldSimilarity(defaultSimilarity, baseSimilarity, mapperService) :
+ defaultSimilarity;
+ }
+
+ public Similarity similarity() {
+ return perFieldSimilarity;
+ }
+
+ public SimilarityLookupService similarityLookupService() {
+ return similarityLookupService;
+ }
+
+ public MapperService mapperService() {
+ return mapperService;
+ }
+
+ static class PerFieldSimilarity extends PerFieldSimilarityWrapper {
+
+ private final Similarity defaultSimilarity;
+ private final Similarity baseSimilarity;
+ private final MapperService mapperService;
+
+ PerFieldSimilarity(Similarity defaultSimilarity, Similarity baseSimilarity, MapperService mapperService) {
+ this.defaultSimilarity = defaultSimilarity;
+ this.baseSimilarity = baseSimilarity;
+ this.mapperService = mapperService;
+ }
+
+ @Override
+ public float coord(int overlap, int maxOverlap) {
+ return baseSimilarity.coord(overlap, maxOverlap);
+ }
+
+ @Override
+ public float queryNorm(float valueForNormalization) {
+ return baseSimilarity.queryNorm(valueForNormalization);
+ }
+
+ @Override
+ public Similarity get(String name) {
+ FieldMapper mapper = mapperService.smartNameFieldMapper(name);
+ return (mapper != null && mapper.similarity() != null) ? mapper.similarity().get() : defaultSimilarity;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java
new file mode 100644
index 0000000..760a397
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots;
+
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.gateway.RecoveryStatus;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * Shard-level snapshot repository
+ * <p/>
+ * IndexShardRepository is used on data node to create snapshots of individual shards. See {@link org.elasticsearch.repositories.Repository}
+ * for more information.
+ */
+public interface IndexShardRepository {
+
+ /**
+ * Creates a snapshot of the shard based on the index commit point.
+ * <p/>
+ * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.internal.InternalEngine#snapshotIndex()} method.
+ * IndexShardRepository implementations shouldn't release the snapshot index commit point. It is done by the method caller.
+ * <p/>
+ * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check
+ * {@link IndexShardSnapshotStatus#aborted()} to see if the snapshot process should be aborted.
+ *
+ * @param snapshotId snapshot id
+ * @param shardId shard to be snapshotted
+ * @param snapshotIndexCommit commit point
+ * @param snapshotStatus snapshot status
+ */
+ void snapshot(SnapshotId snapshotId, ShardId shardId, SnapshotIndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus);
+
+ /**
+ * Restores snapshot of the shard.
+ * <p/>
+ * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied.
+ *
+ * @param snapshotId snapshot id
+ * @param shardId shard id (in the current index)
+ * @param snapshotShardId shard id (in the snapshot)
+ * @param recoveryStatus recovery status
+ */
+ void restore(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryStatus recoveryStatus);
+
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreException.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreException.java
new file mode 100644
index 0000000..705a8ef
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots;
+
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * Generic shard restore exception
+ */
+public class IndexShardRestoreException extends IndexShardException {
+ public IndexShardRestoreException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public IndexShardRestoreException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreFailedException.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreFailedException.java
new file mode 100644
index 0000000..784ba57
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreFailedException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * Thrown when restore of a shard fails
+ */
+public class IndexShardRestoreFailedException extends IndexShardRestoreException {
+ public IndexShardRestoreFailedException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public IndexShardRestoreFailedException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java
new file mode 100644
index 0000000..a862fdc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots;
+
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.engine.SnapshotFailedEngineException;
+import org.elasticsearch.index.gateway.RecoveryStatus;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.snapshots.RestoreService;
+
+
+/**
+ * Shard level snapshot and restore service
+ * <p/>
+ * Performs snapshot and restore operations on the shard level.
+ */
+public class IndexShardSnapshotAndRestoreService extends AbstractIndexShardComponent {
+
+ private final InternalIndexShard indexShard;
+
+ private final RepositoriesService repositoriesService;
+
+ private final RestoreService restoreService;
+
+ @Inject
+ public IndexShardSnapshotAndRestoreService(ShardId shardId, @IndexSettings Settings indexSettings, IndexShard indexShard, RepositoriesService repositoriesService, RestoreService restoreService) {
+ super(shardId, indexSettings);
+ this.indexShard = (InternalIndexShard) indexShard;
+ this.repositoriesService = repositoriesService;
+ this.restoreService = restoreService;
+ }
+
+ /**
+ * Creates shard snapshot
+ *
+ * @param snapshotId snapshot id
+ * @param snapshotStatus snapshot status
+ */
+ public void snapshot(final SnapshotId snapshotId, final IndexShardSnapshotStatus snapshotStatus) {
+ IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(snapshotId.getRepository());
+ if (!indexShard.routingEntry().primary()) {
+ throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary");
+ }
+ if (indexShard.routingEntry().relocating()) {
+ // do not snapshot when in the process of relocation of primaries so we won't get conflicts
+ throw new IndexShardSnapshotFailedException(shardId, "cannot snapshot while relocating");
+ }
+ if (indexShard.state() == IndexShardState.CREATED || indexShard.state() == IndexShardState.RECOVERING) {
+ // shard has just been created, or still recovering
+ throw new IndexShardSnapshotFailedException(shardId, "shard didn't fully recover yet");
+ }
+
+ try {
+ SnapshotIndexCommit snapshotIndexCommit = indexShard.snapshotIndex();
+ try {
+ indexShardRepository.snapshot(snapshotId, shardId, snapshotIndexCommit, snapshotStatus);
+ if (logger.isDebugEnabled()) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("snapshot (").append(snapshotId.getSnapshot()).append(") completed to ").append(indexShardRepository).append(", took [").append(TimeValue.timeValueMillis(snapshotStatus.time())).append("]\n");
+ sb.append(" index : version [").append(snapshotStatus.indexVersion()).append("], number_of_files [").append(snapshotStatus.numberOfFiles()).append("] with total_size [").append(new ByteSizeValue(snapshotStatus.totalSize())).append("]\n");
+ logger.debug(sb.toString());
+ }
+ } finally {
+ snapshotIndexCommit.release();
+ }
+ } catch (SnapshotFailedEngineException e) {
+ throw e;
+ } catch (IndexShardSnapshotFailedException e) {
+ throw e;
+ } catch (Throwable e) {
+ throw new IndexShardSnapshotFailedException(shardId, "Failed to snapshot", e);
+ }
+ }
+
+ /**
+ * Restores shard from {@link RestoreSource} associated with this shard in routing table
+ *
+ * @param recoveryStatus recovery status
+ */
+ public void restore(final RecoveryStatus recoveryStatus) {
+ RestoreSource restoreSource = indexShard.routingEntry().restoreSource();
+ if (restoreSource == null) {
+ throw new IndexShardRestoreFailedException(shardId, "empty restore source");
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] restoring shard [{}]", restoreSource.snapshotId(), shardId);
+ }
+ try {
+ IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository());
+ ShardId snapshotShardId = shardId;
+ if (!shardId.getIndex().equals(restoreSource.index())) {
+ snapshotShardId = new ShardId(restoreSource.index(), shardId.id());
+ }
+ indexShardRepository.restore(restoreSource.snapshotId(), shardId, snapshotShardId, recoveryStatus);
+ restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), shardId);
+ } catch (Throwable t) {
+ throw new IndexShardRestoreFailedException(shardId, "restore failed", t);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotException.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotException.java
new file mode 100644
index 0000000..e346654
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots;
+
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * Generic shard snapshot exception
+ */
+public class IndexShardSnapshotException extends IndexShardException {
+ public IndexShardSnapshotException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public IndexShardSnapshotException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotFailedException.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotFailedException.java
new file mode 100644
index 0000000..9b9495a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotFailedException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots;
+
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * Thrown when snapshot process is failed on a shard level
+ */
+public class IndexShardSnapshotFailedException extends IndexShardSnapshotException {
+ public IndexShardSnapshotFailedException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public IndexShardSnapshotFailedException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotModule.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotModule.java
new file mode 100644
index 0000000..c0cf978
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotModule.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ * This shard-level module configures {@link IndexShardSnapshotAndRestoreService}
+ */
+public class IndexShardSnapshotModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexShardSnapshotAndRestoreService.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java
new file mode 100644
index 0000000..cb9c7be
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots;
+
+/**
+ * Represent shard snapshot status
+ */
+public class IndexShardSnapshotStatus {
+
+ /**
+ * Snapshot stage
+ */
+ public static enum Stage {
+ /**
+ * Snapshot hasn't started yet
+ */
+ INIT,
+ /**
+ * Index files are being copied
+ */
+ STARTED,
+ /**
+ * Snapshot metadata is being written
+ */
+ FINALIZE,
+ /**
+ * Snapshot completed successfully
+ */
+ DONE,
+ /**
+ * Snapshot failed
+ */
+ FAILURE
+ }
+
+ private Stage stage = Stage.INIT;
+
+ private long startTime;
+
+ private long time;
+
+ private int numberOfFiles;
+
+ private long totalSize;
+
+ private long indexVersion;
+
+ private boolean aborted;
+
+ /**
+ * Returns current snapshot stage
+ *
+ * @return current snapshot stage
+ */
+ public Stage stage() {
+ return this.stage;
+ }
+
+ /**
+ * Sets new snapshot stage
+ *
+ * @param stage new snapshot stage
+ */
+ public void updateStage(Stage stage) {
+ this.stage = stage;
+ }
+
+ /**
+ * Returns snapshot start time
+ *
+ * @return snapshot start time
+ */
+ public long startTime() {
+ return this.startTime;
+ }
+
+ /**
+ * Sets snapshot start time
+ *
+ * @param startTime snapshot start time
+ */
+ public void startTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ /**
+ * Returns snapshot processing time
+ *
+ * @return processing time
+ */
+ public long time() {
+ return this.time;
+ }
+
+ /**
+ * Sets snapshot processing time
+ *
+ * @param time snapshot processing time
+ */
+ public void time(long time) {
+ this.time = time;
+ }
+
+ /**
+ * Returns true if snapshot process was aborted
+ *
+ * @return true if snapshot process was aborted
+ */
+ public boolean aborted() {
+ return this.aborted;
+ }
+
+ /**
+ * Marks snapshot as aborted
+ */
+ public void abort() {
+ this.aborted = true;
+ }
+
+ /**
+ * Sets files stats
+ *
+ * @param numberOfFiles number of files in this snapshot
+ * @param totalSize total size of files in this snapshot
+ */
+ public void files(int numberOfFiles, long totalSize) {
+ this.numberOfFiles = numberOfFiles;
+ this.totalSize = totalSize;
+ }
+
+ /**
+ * Number of files
+ *
+ * @return number of files
+ */
+ public int numberOfFiles() {
+ return numberOfFiles;
+ }
+
+ /**
+ * Total snapshot size
+ *
+ * @return snapshot size
+ */
+ public long totalSize() {
+ return totalSize;
+ }
+
+ /**
+ * Sets index version
+ *
+ * @param indexVersion index version
+ */
+ public void indexVersion(long indexVersion) {
+ this.indexVersion = indexVersion;
+ }
+
+ /**
+ * Returns index version
+ *
+ * @return index version
+ */
+ public long indexVersion() {
+ return indexVersion;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java
new file mode 100644
index 0000000..215ff34
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java
@@ -0,0 +1,760 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots.blobstore;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RateLimiter;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.blobstore.*;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
+import org.elasticsearch.common.lucene.store.ThreadSafeInputStreamIndexInput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.gateway.RecoveryStatus;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.snapshots.*;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.repositories.RepositoryName;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * Blob store based implementation of IndexShardRepository
+ */
+public class BlobStoreIndexShardRepository extends AbstractComponent implements IndexShardRepository {
+
+ private BlobStore blobStore;
+
+ private BlobPath basePath;
+
+ private final String repositoryName;
+
+ private ByteSizeValue chunkSize;
+
+ private final IndicesService indicesService;
+
+ private RateLimiter snapshotRateLimiter;
+
+ private RateLimiter restoreRateLimiter;
+
+ private RateLimiterListener rateLimiterListener;
+
+ private RateLimitingInputStream.Listener snapshotThrottleListener;
+
+ private static final String SNAPSHOT_PREFIX = "snapshot-";
+
+ @Inject
+ BlobStoreIndexShardRepository(Settings settings, RepositoryName repositoryName, IndicesService indicesService) {
+ super(settings);
+ this.repositoryName = repositoryName.name();
+ this.indicesService = indicesService;
+ }
+
+ /**
+ * Called by {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository} on repository startup
+ *
+ * @param blobStore blob store
+ * @param basePath base path to blob store
+ * @param chunkSize chunk size
+ */
+ public void initialize(BlobStore blobStore, BlobPath basePath, ByteSizeValue chunkSize,
+ RateLimiter snapshotRateLimiter, RateLimiter restoreRateLimiter,
+ final RateLimiterListener rateLimiterListener) {
+ this.blobStore = blobStore;
+ this.basePath = basePath;
+ this.chunkSize = chunkSize;
+ this.snapshotRateLimiter = snapshotRateLimiter;
+ this.restoreRateLimiter = restoreRateLimiter;
+ this.rateLimiterListener = rateLimiterListener;
+ this.snapshotThrottleListener = new RateLimitingInputStream.Listener() {
+ @Override
+ public void onPause(long nanos) {
+ rateLimiterListener.onSnapshotPause(nanos);
+ }
+ };
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void snapshot(SnapshotId snapshotId, ShardId shardId, SnapshotIndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {
+ SnapshotContext snapshotContext = new SnapshotContext(snapshotId, shardId, snapshotStatus);
+ snapshotStatus.startTime(System.currentTimeMillis());
+
+ try {
+ snapshotContext.snapshot(snapshotIndexCommit);
+ snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime());
+ snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE);
+ } catch (Throwable e) {
+ snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime());
+ snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE);
+ if (e instanceof IndexShardSnapshotFailedException) {
+ throw (IndexShardSnapshotFailedException) e;
+ } else {
+ throw new IndexShardSnapshotFailedException(shardId, e.getMessage(), e);
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void restore(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryStatus recoveryStatus) {
+ RestoreContext snapshotContext = new RestoreContext(snapshotId, shardId, snapshotShardId, recoveryStatus);
+
+ try {
+ recoveryStatus.index().startTime(System.currentTimeMillis());
+ snapshotContext.restore();
+ recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
+ } catch (Throwable e) {
+ throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId.getSnapshot() + "]", e);
+ }
+ }
+
+ /**
+ * Delete shard snapshot
+ *
+ * @param snapshotId snapshot id
+ * @param shardId shard id
+ */
+ public void delete(SnapshotId snapshotId, ShardId shardId) {
+ Context context = new Context(snapshotId, shardId, shardId);
+ context.delete();
+ }
+
+ @Override
+ public String toString() {
+ return "BlobStoreIndexShardRepository[" +
+ "[" + repositoryName +
+ "], [" + blobStore + ']' +
+ ']';
+ }
+
+ /**
+ * Returns shard snapshot metadata file name
+ *
+ * @param snapshotId snapshot id
+ * @return shard snapshot metadata file name
+ */
+ private String snapshotBlobName(SnapshotId snapshotId) {
+ return SNAPSHOT_PREFIX + snapshotId.getSnapshot();
+ }
+
+ /**
+ * Serializes snapshot to JSON
+ *
+ * @param snapshot snapshot
+ * @return JSON representation of the snapshot
+ * @throws IOException
+ */
+ public static byte[] writeSnapshot(BlobStoreIndexShardSnapshot snapshot) throws IOException {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint();
+ BlobStoreIndexShardSnapshot.toXContent(snapshot, builder, ToXContent.EMPTY_PARAMS);
+ return builder.bytes().toBytes();
+ }
+
+ /**
+ * Parses JSON representation of a snapshot
+ *
+ * @param data JSON
+ * @return snapshot
+ * @throws IOException
+ */
+ public static BlobStoreIndexShardSnapshot readSnapshot(byte[] data) throws IOException {
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(data);
+ try {
+ parser.nextToken();
+ return BlobStoreIndexShardSnapshot.fromXContent(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ /**
+ * Context for snapshot/restore operations
+ */
+ private class Context {
+
+ protected final SnapshotId snapshotId;
+
+ protected final ShardId shardId;
+
+ protected final ImmutableBlobContainer blobContainer;
+
+ public Context(SnapshotId snapshotId, ShardId shardId) {
+ this(snapshotId, shardId, shardId);
+ }
+
+ public Context(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId) {
+ this.snapshotId = snapshotId;
+ this.shardId = shardId;
+ blobContainer = blobStore.immutableBlobContainer(basePath.add("indices").add(snapshotShardId.getIndex()).add(Integer.toString(snapshotShardId.getId())));
+ }
+
+ /**
+ * Delete shard snapshot
+ */
+ public void delete() {
+ final ImmutableMap<String, BlobMetaData> blobs;
+ try {
+ blobs = blobContainer.listBlobs();
+ } catch (IOException e) {
+ throw new IndexShardSnapshotException(shardId, "Failed to list content of gateway", e);
+ }
+
+ BlobStoreIndexShardSnapshots snapshots = buildBlobStoreIndexShardSnapshots(blobs);
+
+ String commitPointName = snapshotBlobName(snapshotId);
+
+ try {
+ blobContainer.deleteBlob(commitPointName);
+ } catch (IOException e) {
+ logger.debug("[{}] [{}] failed to delete shard snapshot file", shardId, snapshotId);
+ }
+
+ // delete all files that are not referenced by any commit point
+ // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
+ List<BlobStoreIndexShardSnapshot> newSnapshotsList = Lists.newArrayList();
+ for (BlobStoreIndexShardSnapshot point : snapshots) {
+ if (!point.snapshot().equals(snapshotId.getSnapshot())) {
+ newSnapshotsList.add(point);
+ }
+ }
+ cleanup(newSnapshotsList, blobs);
+ }
+
+ /**
+ * Removes all unreferenced files from the repository
+ *
+ * @param snapshots list of active snapshots in the container
+ * @param blobs list of blobs in the container
+ */
+ protected void cleanup(List<BlobStoreIndexShardSnapshot> snapshots, ImmutableMap<String, BlobMetaData> blobs) {
+ BlobStoreIndexShardSnapshots newSnapshots = new BlobStoreIndexShardSnapshots(snapshots);
+ // now go over all the blobs, and if they don't exists in a snapshot, delete them
+ for (String blobName : blobs.keySet()) {
+ if (!blobName.startsWith("__")) {
+ continue;
+ }
+ if (newSnapshots.findNameFile(FileInfo.canonicalName(blobName)) == null) {
+ try {
+ blobContainer.deleteBlob(blobName);
+ } catch (IOException e) {
+ logger.debug("[{}] [{}] error deleting blob [{}] during cleanup", e, snapshotId, shardId, blobName);
+ }
+ }
+ }
+ }
+
+ /**
+ * Generates blob name
+ *
+ * @param generation the blob number
+ * @return the blob name
+ */
+ protected String fileNameFromGeneration(long generation) {
+ return "__" + Long.toString(generation, Character.MAX_RADIX);
+ }
+
+ /**
+ * Finds the next available blob number
+ *
+ * @param blobs list of blobs in the repository
+ * @return next available blob number
+ */
+ protected long findLatestFileNameGeneration(ImmutableMap<String, BlobMetaData> blobs) {
+ long generation = -1;
+ for (String name : blobs.keySet()) {
+ if (!name.startsWith("__")) {
+ continue;
+ }
+ name = FileInfo.canonicalName(name);
+ try {
+ long currentGen = Long.parseLong(name.substring(2) /*__*/, Character.MAX_RADIX);
+ if (currentGen > generation) {
+ generation = currentGen;
+ }
+ } catch (NumberFormatException e) {
+ logger.warn("file [{}] does not conform to the '__' schema");
+ }
+ }
+ return generation;
+ }
+
+ /**
+ * Loads all available snapshots in the repository
+ *
+ * @param blobs list of blobs in repository
+ * @return BlobStoreIndexShardSnapshots
+ */
+ protected BlobStoreIndexShardSnapshots buildBlobStoreIndexShardSnapshots(ImmutableMap<String, BlobMetaData> blobs) {
+ List<BlobStoreIndexShardSnapshot> snapshots = Lists.newArrayList();
+ for (String name : blobs.keySet()) {
+ if (name.startsWith(SNAPSHOT_PREFIX)) {
+ try {
+ snapshots.add(readSnapshot(blobContainer.readBlobFully(name)));
+ } catch (IOException e) {
+ logger.warn("failed to read commit point [{}]", e, name);
+ }
+ }
+ }
+ return new BlobStoreIndexShardSnapshots(snapshots);
+ }
+ }
+
+ /**
+ * Context for snapshot operations
+ */
+ private class SnapshotContext extends Context {
+
+ private final Store store;
+
+ private final IndexShardSnapshotStatus snapshotStatus;
+
+ /**
+ * Constructs new context
+ *
+ * @param snapshotId snapshot id
+ * @param shardId shard to be snapshotted
+ * @param snapshotStatus snapshot status to report progress
+ */
+ public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) {
+ super(snapshotId, shardId);
+ store = indicesService.indexServiceSafe(shardId.getIndex()).shardInjectorSafe(shardId.id()).getInstance(Store.class);
+ this.snapshotStatus = snapshotStatus;
+ }
+
+ /**
+ * Create snapshot from index commit point
+ *
+ * @param snapshotIndexCommit
+ */
+ public void snapshot(SnapshotIndexCommit snapshotIndexCommit) {
+ logger.debug("[{}] [{}] snapshot to [{}] ...", shardId, snapshotId, repositoryName);
+
+ final ImmutableMap<String, BlobMetaData> blobs;
+ try {
+ blobs = blobContainer.listBlobs();
+ } catch (IOException e) {
+ throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
+ }
+
+ long generation = findLatestFileNameGeneration(blobs);
+ BlobStoreIndexShardSnapshots snapshots = buildBlobStoreIndexShardSnapshots(blobs);
+
+ snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.STARTED);
+
+ final CountDownLatch indexLatch = new CountDownLatch(snapshotIndexCommit.getFiles().length);
+ final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
+ final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles = newArrayList();
+
+ int indexNumberOfFiles = 0;
+ long indexTotalFilesSize = 0;
+ for (String fileName : snapshotIndexCommit.getFiles()) {
+ if (snapshotStatus.aborted()) {
+ logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName);
+ throw new IndexShardSnapshotFailedException(shardId, "Aborted");
+ }
+ logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
+ final StoreFileMetaData md;
+ try {
+ md = store.metaData(fileName);
+ } catch (IOException e) {
+ throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e);
+ }
+
+ boolean snapshotRequired = false;
+ // TODO: For now segment files are copied on each commit because segment files don't have checksum
+// if (snapshot.indexChanged() && fileName.equals(snapshotIndexCommit.getSegmentsFileName())) {
+// snapshotRequired = true; // we want to always snapshot the segment file if the index changed
+// }
+
+ BlobStoreIndexShardSnapshot.FileInfo fileInfo = snapshots.findPhysicalIndexFile(fileName);
+
+ if (fileInfo == null || !fileInfo.isSame(md) || !snapshotFileExistsInBlobs(fileInfo, blobs)) {
+ // commit point file does not exists in any commit point, or has different length, or does not fully exists in the listed blobs
+ snapshotRequired = true;
+ }
+
+ if (snapshotRequired) {
+ indexNumberOfFiles++;
+ indexTotalFilesSize += md.length();
+ // create a new FileInfo
+ try {
+ BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(fileNameFromGeneration(++generation), fileName, md.length(), chunkSize, md.checksum());
+ indexCommitPointFiles.add(snapshotFileInfo);
+ snapshotFile(snapshotFileInfo, indexLatch, failures);
+ } catch (IOException e) {
+ failures.add(e);
+ }
+ } else {
+ indexCommitPointFiles.add(fileInfo);
+ indexLatch.countDown();
+ }
+ }
+
+ snapshotStatus.files(indexNumberOfFiles, indexTotalFilesSize);
+ snapshotStatus.indexVersion(snapshotIndexCommit.getGeneration());
+
+ try {
+ indexLatch.await();
+ } catch (InterruptedException e) {
+ failures.add(e);
+ Thread.currentThread().interrupt();
+ }
+ if (!failures.isEmpty()) {
+ throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", failures.get(0));
+ }
+
+ // now create and write the commit point
+ snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FINALIZE);
+
+ String commitPointName = snapshotBlobName(snapshotId);
+ BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getSnapshot(), snapshotIndexCommit.getGeneration(), indexCommitPointFiles);
+ try {
+ byte[] snapshotData = writeSnapshot(snapshot);
+ logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
+ blobContainer.writeBlob(commitPointName, new BytesStreamInput(snapshotData, false), snapshotData.length);
+ } catch (IOException e) {
+ throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
+ }
+
+ // delete all files that are not referenced by any commit point
+ // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
+ List<BlobStoreIndexShardSnapshot> newSnapshotsList = Lists.newArrayList();
+ newSnapshotsList.add(snapshot);
+ for (BlobStoreIndexShardSnapshot point : snapshots) {
+ newSnapshotsList.add(point);
+ }
+ cleanup(newSnapshotsList, blobs);
+ snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE);
+ }
+
+ /**
+ * Snapshot individual file
+ * <p/>
+ * This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are
+ * added to the {@code failures} list
+ *
+ * @param fileInfo file to be snapshotted
+ * @param latch latch that should be counted down once file is snapshoted
+ * @param failures thread-safe list of failures
+ * @throws IOException
+ */
+ private void snapshotFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) throws IOException {
+ final AtomicLong counter = new AtomicLong(fileInfo.numberOfParts());
+ for (long i = 0; i < fileInfo.numberOfParts(); i++) {
+ IndexInput indexInput = null;
+ try {
+ indexInput = store.openInputRaw(fileInfo.physicalName(), IOContext.READONCE);
+ indexInput.seek(i * fileInfo.partBytes());
+ InputStreamIndexInput inputStreamIndexInput = new ThreadSafeInputStreamIndexInput(indexInput, fileInfo.partBytes());
+
+ final IndexInput fIndexInput = indexInput;
+ long size = inputStreamIndexInput.actualSizeToRead();
+ InputStream inputStream;
+ if (snapshotRateLimiter != null) {
+ inputStream = new RateLimitingInputStream(inputStreamIndexInput, snapshotRateLimiter, snapshotThrottleListener);
+ } else {
+ inputStream = inputStreamIndexInput;
+ }
+ blobContainer.writeBlob(fileInfo.partName(i), inputStream, size, new ImmutableBlobContainer.WriterListener() {
+ @Override
+ public void onCompleted() {
+ IOUtils.closeWhileHandlingException(fIndexInput);
+ if (counter.decrementAndGet() == 0) {
+ latch.countDown();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ IOUtils.closeWhileHandlingException(fIndexInput);
+ failures.add(t);
+ if (counter.decrementAndGet() == 0) {
+ latch.countDown();
+ }
+ }
+ });
+ } catch (Throwable e) {
+ IOUtils.closeWhileHandlingException(indexInput);
+ failures.add(e);
+ latch.countDown();
+ }
+ }
+ }
+
+ /**
+ * Checks if snapshot file already exists in the list of blobs
+ *
+ * @param fileInfo file to check
+ * @param blobs list of blobs
+ * @return true if file exists in the list of blobs
+ */
+ private boolean snapshotFileExistsInBlobs(BlobStoreIndexShardSnapshot.FileInfo fileInfo, ImmutableMap<String, BlobMetaData> blobs) {
+ BlobMetaData blobMetaData = blobs.get(fileInfo.name());
+ if (blobMetaData != null) {
+ return blobMetaData.length() == fileInfo.length();
+ } else if (blobs.containsKey(fileInfo.partName(0))) {
+ // multi part file sum up the size and check
+ int part = 0;
+ long totalSize = 0;
+ while (true) {
+ blobMetaData = blobs.get(fileInfo.partName(part++));
+ if (blobMetaData == null) {
+ break;
+ }
+ totalSize += blobMetaData.length();
+ }
+ return totalSize == fileInfo.length();
+ }
+ // no file, not exact and not multipart
+ return false;
+ }
+
+ }
+
+ /**
+ * Context for restore operations
+ */
+ private class RestoreContext extends Context {
+
+ private final Store store;
+
+ private final RecoveryStatus recoveryStatus;
+
+ /**
+ * Constructs new restore context
+ *
+ * @param snapshotId snapshot id
+ * @param shardId shard to be restored
+ * @param snapshotShardId shard in the snapshot that data should be restored from
+ * @param recoveryStatus recovery status to report progress
+ */
+ public RestoreContext(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryStatus recoveryStatus) {
+ super(snapshotId, shardId, snapshotShardId);
+ store = indicesService.indexServiceSafe(shardId.getIndex()).shardInjectorSafe(shardId.id()).getInstance(Store.class);
+ this.recoveryStatus = recoveryStatus;
+ }
+
+ /**
+ * Performs restore operation
+ */
+ public void restore() {
+ logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId);
+ BlobStoreIndexShardSnapshot snapshot;
+ try {
+ snapshot = readSnapshot(blobContainer.readBlobFully(snapshotBlobName(snapshotId)));
+ } catch (IOException ex) {
+ throw new IndexShardRestoreFailedException(shardId, "failed to read shard snapshot file", ex);
+ }
+
+ recoveryStatus.updateStage(RecoveryStatus.Stage.INDEX);
+ int numberOfFiles = 0;
+ long totalSize = 0;
+ int numberOfReusedFiles = 0;
+ long reusedTotalSize = 0;
+
+ List<FileInfo> filesToRecover = Lists.newArrayList();
+ for (FileInfo fileInfo : snapshot.indexFiles()) {
+ String fileName = fileInfo.physicalName();
+ StoreFileMetaData md = null;
+ try {
+ md = store.metaData(fileName);
+ } catch (IOException e) {
+ // no file
+ }
+ numberOfFiles++;
+ // we don't compute checksum for segments, so always recover them
+ if (!fileName.startsWith("segments") && md != null && fileInfo.isSame(md)) {
+ totalSize += md.length();
+ numberOfReusedFiles++;
+ reusedTotalSize += md.length();
+ if (logger.isTraceEnabled()) {
+ logger.trace("not_recovering [{}], exists in local store and is same", fileInfo.physicalName());
+ }
+ } else {
+ totalSize += fileInfo.length();
+ filesToRecover.add(fileInfo);
+ if (logger.isTraceEnabled()) {
+ if (md == null) {
+ logger.trace("recovering [{}], does not exists in local store", fileInfo.physicalName());
+ } else {
+ logger.trace("recovering [{}], exists in local store but is different", fileInfo.physicalName());
+ }
+ }
+ }
+ }
+
+ recoveryStatus.index().files(numberOfFiles, totalSize, numberOfReusedFiles, reusedTotalSize);
+ if (filesToRecover.isEmpty()) {
+ logger.trace("no files to recover, all exists within the local store");
+ }
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] [{}] recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", shardId, snapshotId, numberOfFiles, new ByteSizeValue(totalSize), numberOfReusedFiles, new ByteSizeValue(reusedTotalSize));
+ }
+
+ final CountDownLatch latch = new CountDownLatch(filesToRecover.size());
+ final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
+
+ for (final FileInfo fileToRecover : filesToRecover) {
+ logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
+ restoreFile(fileToRecover, latch, failures);
+ }
+
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+
+ if (!failures.isEmpty()) {
+ throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", failures.get(0));
+ }
+
+ // read the snapshot data persisted
+ long version = -1;
+ try {
+ if (Lucene.indexExists(store.directory())) {
+ version = Lucene.readSegmentInfos(store.directory()).getVersion();
+ }
+ } catch (IOException e) {
+ throw new IndexShardRestoreFailedException(shardId, "Failed to fetch index version after copying it over", e);
+ }
+ recoveryStatus.index().updateVersion(version);
+
+ /// now, go over and clean files that are in the store, but were not in the snapshot
+ try {
+ for (String storeFile : store.directory().listAll()) {
+ if (!snapshot.containPhysicalIndexFile(storeFile)) {
+ try {
+ store.directory().deleteFile(storeFile);
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ }
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+
+ /**
+ * Restores a file
+ * This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are
+ * added to the {@code failures} list
+ *
+ * @param fileInfo file to be restored
+ * @param latch latch that should be counted down once file is snapshoted
+ * @param failures thread-safe list of failures
+ */
+ private void restoreFile(final FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) {
+ final IndexOutput indexOutput;
+ try {
+ // we create an output with no checksum, this is because the pure binary data of the file is not
+ // the checksum (because of seek). We will create the checksum file once copying is done
+ indexOutput = store.createOutputRaw(fileInfo.physicalName());
+ } catch (IOException e) {
+ failures.add(e);
+ latch.countDown();
+ return;
+ }
+
+ String firstFileToRecover = fileInfo.partName(0);
+ final AtomicInteger partIndex = new AtomicInteger();
+
+ blobContainer.readBlob(firstFileToRecover, new BlobContainer.ReadBlobListener() {
+ @Override
+ public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
+ recoveryStatus.index().addCurrentFilesSize(size);
+ indexOutput.writeBytes(data, offset, size);
+ if (restoreRateLimiter != null) {
+ rateLimiterListener.onRestorePause(restoreRateLimiter.pause(size));
+ }
+ }
+
+ @Override
+ public synchronized void onCompleted() {
+ int part = partIndex.incrementAndGet();
+ if (part < fileInfo.numberOfParts()) {
+ String partName = fileInfo.partName(part);
+ // continue with the new part
+ blobContainer.readBlob(partName, this);
+ return;
+ } else {
+ // we are done...
+ try {
+ indexOutput.close();
+ // write the checksum
+ if (fileInfo.checksum() != null) {
+ store.writeChecksum(fileInfo.physicalName(), fileInfo.checksum());
+ }
+ store.directory().sync(Collections.singleton(fileInfo.physicalName()));
+ } catch (IOException e) {
+ onFailure(e);
+ return;
+ }
+ }
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ failures.add(t);
+ latch.countDown();
+ }
+ });
+ }
+
+ }
+
+ public interface RateLimiterListener {
+ void onRestorePause(long nanos);
+
+ void onSnapshotPause(long nanos);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
new file mode 100644
index 0000000..67a6ec4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
@@ -0,0 +1,413 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots.blobstore;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.store.StoreFileMetaData;
+
+import java.io.IOException;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * Shard snapshot metadata
+ */
+public class BlobStoreIndexShardSnapshot {
+
+ /**
+ * Information about snapshotted file
+ */
+ public static class FileInfo {
+ private final String name;
+ private final String physicalName;
+ private final long length;
+ private final String checksum;
+ private final ByteSizeValue partSize;
+ private final long partBytes;
+ private final long numberOfParts;
+
+ /**
+ * Constructs a new instance of file info
+ *
+ * @param name file name as stored in the blob store
+ * @param physicalName original file name
+ * @param length total length of the file
+ * @param partSize size of the single chunk
+ * @param checksum checksum for the file
+ */
+ public FileInfo(String name, String physicalName, long length, ByteSizeValue partSize, String checksum) {
+ this.name = name;
+ this.physicalName = physicalName;
+ this.length = length;
+ this.checksum = checksum;
+
+ long partBytes = Long.MAX_VALUE;
+ if (partSize != null) {
+ partBytes = partSize.bytes();
+ }
+
+ long totalLength = length;
+ long numberOfParts = totalLength / partBytes;
+ if (totalLength % partBytes > 0) {
+ numberOfParts++;
+ }
+ if (numberOfParts == 0) {
+ numberOfParts++;
+ }
+ this.numberOfParts = numberOfParts;
+ this.partSize = partSize;
+ this.partBytes = partBytes;
+ }
+
+ /**
+ * Returns the base file name
+ *
+ * @return file name
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * Returns part name if file is stored as multiple parts
+ *
+ * @param part part number
+ * @return part name
+ */
+ public String partName(long part) {
+ if (numberOfParts > 1) {
+ return name + ".part" + part;
+ } else {
+ return name;
+ }
+ }
+
+ /**
+ * Returns base file name from part name
+ *
+ * @param blobName part name
+ * @return base file name
+ */
+ public static String canonicalName(String blobName) {
+ if (blobName.contains(".part")) {
+ return blobName.substring(0, blobName.indexOf(".part"));
+ }
+ return blobName;
+ }
+
+ /**
+ * Returns original file name
+ *
+ * @return original file name
+ */
+ public String physicalName() {
+ return this.physicalName;
+ }
+
+ /**
+ * File length
+ *
+ * @return file length
+ */
+ public long length() {
+ return length;
+ }
+
+ /**
+ * Returns part size
+ *
+ * @return part size
+ */
+ public ByteSizeValue partSize() {
+ return partSize;
+ }
+
+ /**
+ * Return maximum number of bytes in a part
+ *
+ * @return maximum number of bytes in a part
+ */
+ public long partBytes() {
+ return partBytes;
+ }
+
+ /**
+ * Returns number of parts
+ *
+ * @return number of parts
+ */
+ public long numberOfParts() {
+ return numberOfParts;
+ }
+
+ /**
+ * Returns file md5 checksum provided by {@link org.elasticsearch.index.store.Store}
+ *
+ * @return file checksum
+ */
+ @Nullable
+ public String checksum() {
+ return checksum;
+ }
+
+ /**
+ * Checks if a file in a store is the same file
+ *
+ * @param md file in a store
+ * @return true if file in a store this this file have the same checksum and length
+ */
+ public boolean isSame(StoreFileMetaData md) {
+ if (checksum == null || md.checksum() == null) {
+ return false;
+ }
+ return length == md.length() && checksum.equals(md.checksum());
+ }
+
+ static final class Fields {
+ static final XContentBuilderString NAME = new XContentBuilderString("name");
+ static final XContentBuilderString PHYSICAL_NAME = new XContentBuilderString("physical_name");
+ static final XContentBuilderString LENGTH = new XContentBuilderString("length");
+ static final XContentBuilderString CHECKSUM = new XContentBuilderString("checksum");
+ static final XContentBuilderString PART_SIZE = new XContentBuilderString("part_size");
+ }
+
+ /**
+ * Serializes file info into JSON
+ *
+ * @param file file info
+ * @param builder XContent builder
+ * @param params parameters
+ * @throws IOException
+ */
+ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ builder.field(Fields.NAME, file.name);
+ builder.field(Fields.PHYSICAL_NAME, file.physicalName);
+ builder.field(Fields.LENGTH, file.length);
+ if (file.checksum != null) {
+ builder.field(Fields.CHECKSUM, file.checksum);
+ }
+ if (file.partSize != null) {
+ builder.field(Fields.PART_SIZE, file.partSize.bytes());
+ }
+ builder.endObject();
+ }
+
+ /**
+ * Parses JSON that represents file info
+ *
+ * @param parser parser
+ * @return file info
+ * @throws IOException
+ */
+ public static FileInfo fromXContent(XContentParser parser) throws IOException {
+ XContentParser.Token token = parser.currentToken();
+ String name = null;
+ String physicalName = null;
+ long length = -1;
+ String checksum = null;
+ ByteSizeValue partSize = null;
+ if (token == XContentParser.Token.START_OBJECT) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String currentFieldName = parser.currentName();
+ token = parser.nextToken();
+ if (token.isValue()) {
+ if ("name".equals(currentFieldName)) {
+ name = parser.text();
+ } else if ("physical_name".equals(currentFieldName)) {
+ physicalName = parser.text();
+ } else if ("length".equals(currentFieldName)) {
+ length = parser.longValue();
+ } else if ("checksum".equals(currentFieldName)) {
+ checksum = parser.text();
+ } else if ("part_size".equals(currentFieldName)) {
+ partSize = new ByteSizeValue(parser.longValue());
+ } else {
+ throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "]");
+ }
+ } else {
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ }
+ } else {
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ }
+ }
+ }
+ // TODO: Verify???
+ return new FileInfo(name, physicalName, length, partSize, checksum);
+ }
+
+ }
+
+ private final String snapshot;
+
+ private final long indexVersion;
+
+ private final ImmutableList<FileInfo> indexFiles;
+
+ /**
+ * Constructs new shard snapshot metadata from snapshot metadata
+ *
+ * @param snapshot snapshot id
+ * @param indexVersion index version
+ * @param indexFiles list of files in the shard
+ */
+ public BlobStoreIndexShardSnapshot(String snapshot, long indexVersion, List<FileInfo> indexFiles) {
+ assert snapshot != null;
+ assert indexVersion >= 0;
+ this.snapshot = snapshot;
+ this.indexVersion = indexVersion;
+ this.indexFiles = ImmutableList.copyOf(indexFiles);
+ }
+
+ /**
+ * Returns index version
+ *
+ * @return index version
+ */
+ public long indexVersion() {
+ return indexVersion;
+ }
+
+ /**
+ * Returns snapshot id
+ *
+ * @return snapshot id
+ */
+ public String snapshot() {
+ return snapshot;
+ }
+
+ /**
+ * Returns list of files in the shard
+ *
+ * @return list of files
+ */
+ public ImmutableList<FileInfo> indexFiles() {
+ return indexFiles;
+ }
+
+ /**
+ * Serializes shard snapshot metadata info into JSON
+ *
+ * @param snapshot shard snapshot metadata
+ * @param builder XContent builder
+ * @param params parameters
+ * @throws IOException
+ */
+ public static void toXContent(BlobStoreIndexShardSnapshot snapshot, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ builder.field("name", snapshot.snapshot);
+ builder.field("index-version", snapshot.indexVersion);
+ builder.startArray("files");
+ for (FileInfo fileInfo : snapshot.indexFiles) {
+ FileInfo.toXContent(fileInfo, builder, params);
+ }
+ builder.endArray();
+ builder.endObject();
+ }
+
+ /**
+ * Parses shard snapshot metadata
+ *
+ * @param parser parser
+ * @return shard snapshot metadata
+ * @throws IOException
+ */
+ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) throws IOException {
+
+ String snapshot = null;
+ long indexVersion = -1;
+
+ List<FileInfo> indexFiles = newArrayList();
+
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String currentFieldName = parser.currentName();
+ token = parser.nextToken();
+ if (token.isValue()) {
+ if ("name".equals(currentFieldName)) {
+ snapshot = parser.text();
+ } else if ("index-version".equals(currentFieldName)) {
+ indexVersion = parser.longValue();
+ } else {
+ throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ indexFiles.add(FileInfo.fromXContent(parser));
+ }
+ } else {
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ }
+ } else {
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ }
+ }
+ }
+ return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, ImmutableList.<FileInfo>copyOf(indexFiles));
+ }
+
+ /**
+ * Returns true if this snapshot contains a file with a given original name
+ *
+ * @param physicalName original file name
+ * @return true if the file was found, false otherwise
+ */
+ public boolean containPhysicalIndexFile(String physicalName) {
+ return findPhysicalIndexFile(physicalName) != null;
+ }
+
+ public FileInfo findPhysicalIndexFile(String physicalName) {
+ for (FileInfo file : indexFiles) {
+ if (file.physicalName().equals(physicalName)) {
+ return file;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns true if this snapshot contains a file with a given name
+ *
+ * @param name file name
+ * @return true if file was found, false otherwise
+ */
+ public FileInfo findNameFile(String name) {
+ for (FileInfo file : indexFiles) {
+ if (file.name().equals(name)) {
+ return file;
+ }
+ }
+ return null;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java
new file mode 100644
index 0000000..a865765
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots.blobstore;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo;
+
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Contains information about all snapshot for the given shard in repository
+ * <p/>
+ * This class is used to find files that were already snapshoted and clear out files that no longer referenced by any
+ * snapshots
+ */
+public class BlobStoreIndexShardSnapshots implements Iterable<BlobStoreIndexShardSnapshot> {
+ private final ImmutableList<BlobStoreIndexShardSnapshot> shardSnapshots;
+
+ public BlobStoreIndexShardSnapshots(List<BlobStoreIndexShardSnapshot> shardSnapshots) {
+ this.shardSnapshots = ImmutableList.copyOf(shardSnapshots);
+ }
+
+ /**
+ * Returns list of snapshots
+ *
+ * @return list of snapshots
+ */
+ public ImmutableList<BlobStoreIndexShardSnapshot> snapshots() {
+ return this.shardSnapshots;
+ }
+
+ /**
+ * Finds reference to a snapshotted file by its original name
+ *
+ * @param physicalName original name
+ * @return file info or null if file is not present in any of snapshots
+ */
+ public FileInfo findPhysicalIndexFile(String physicalName) {
+ for (BlobStoreIndexShardSnapshot snapshot : shardSnapshots) {
+ FileInfo fileInfo = snapshot.findPhysicalIndexFile(physicalName);
+ if (fileInfo != null) {
+ return fileInfo;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Finds reference to a snapshotted file by its snapshot name
+ *
+ * @param name file name
+ * @return file info or null if file is not present in any of snapshots
+ */
+ public FileInfo findNameFile(String name) {
+ for (BlobStoreIndexShardSnapshot snapshot : shardSnapshots) {
+ FileInfo fileInfo = snapshot.findNameFile(name);
+ if (fileInfo != null) {
+ return fileInfo;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public Iterator<BlobStoreIndexShardSnapshot> iterator() {
+ return shardSnapshots.iterator();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/snapshots/blobstore/RateLimitingInputStream.java b/src/main/java/org/elasticsearch/index/snapshots/blobstore/RateLimitingInputStream.java
new file mode 100644
index 0000000..71d1e05
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/snapshots/blobstore/RateLimitingInputStream.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.snapshots.blobstore;
+
+import org.apache.lucene.store.RateLimiter;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Rate limiting wrapper for InputStream
+ */
+public class RateLimitingInputStream extends InputStream {
+
+ private final InputStream delegate;
+
+ private final RateLimiter rateLimiter;
+
+ private final Listener listener;
+
+ public interface Listener {
+ void onPause(long nanos);
+ }
+
+ public RateLimitingInputStream(InputStream delegate, RateLimiter rateLimiter, Listener listener) {
+ this.delegate = delegate;
+ this.rateLimiter = rateLimiter;
+ this.listener = listener;
+ }
+
+ @Override
+ public int read() throws IOException {
+ int b = delegate.read();
+ long pause = rateLimiter.pause(1);
+ if (pause > 0) {
+ listener.onPause(pause);
+ }
+ return b;
+ }
+
+ @Override
+ public int read(byte[] b) throws IOException {
+ return read(b, 0, b.length);
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ int n = delegate.read(b, off, len);
+ if (n > 0) {
+ listener.onPause(rateLimiter.pause(n));
+ }
+ return n;
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ return delegate.skip(n);
+ }
+
+ @Override
+ public int available() throws IOException {
+ return delegate.available();
+ }
+
+ @Override
+ public void close() throws IOException {
+ delegate.close();
+ }
+
+ @Override
+ public void mark(int readlimit) {
+ delegate.mark(readlimit);
+ }
+
+ @Override
+ public void reset() throws IOException {
+ delegate.reset();
+ }
+
+ @Override
+ public boolean markSupported() {
+ return delegate.markSupported();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/DirectoryService.java b/src/main/java/org/elasticsearch/index/store/DirectoryService.java
new file mode 100644
index 0000000..54f996c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/DirectoryService.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import org.apache.lucene.store.Directory;
+
+import java.io.IOException;
+
+/**
+ */
+public interface DirectoryService {
+
+ Directory[] build() throws IOException;
+
+ long throttleTimeInNanos();
+
+ void renameFile(Directory dir, String from, String to) throws IOException;
+
+ void fullDelete(Directory dir) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/index/store/DirectoryUtils.java b/src/main/java/org/elasticsearch/index/store/DirectoryUtils.java
new file mode 100644
index 0000000..942478a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/DirectoryUtils.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import org.apache.lucene.store.CompoundFileDirectory;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FilterDirectory;
+import org.elasticsearch.common.Nullable;
+
+/**
+ * Utils for working with {@link Directory} classes.
+ */
+public final class DirectoryUtils {
+
+ private DirectoryUtils() {} // no instance
+
+ /**
+ * Try and extract a store directory out of a directory, tries to take into
+ * account the fact that a directory is a filter directory, and/or a compound dir.
+ */
+ @Nullable
+ public static Store.StoreDirectory getStoreDirectory(Directory dir) {
+ Directory current = dir;
+ while (true) {
+ if (current instanceof Store.StoreDirectory) {
+ return (Store.StoreDirectory) current;
+ }
+ if (current instanceof FilterDirectory) {
+ current = ((FilterDirectory) current).getDelegate();
+ } else if (current instanceof CompoundFileDirectory) {
+ current = ((CompoundFileDirectory) current).getDirectory();
+ } else {
+ return null;
+ }
+ }
+ }
+
+ private static final Directory getLeafDirectory(FilterDirectory dir) {
+ Directory current = dir.getDelegate();
+ while ((current instanceof FilterDirectory)) {
+ current = ((FilterDirectory) current).getDelegate();
+ }
+ return current;
+ }
+
+ /**
+ * Tries to extract the leaf of the {@link Directory} if the directory is a {@link FilterDirectory} and cast
+ * it to the given target class or returns <code>null</code> if the leaf is not assignable to the target class.
+ * If the given {@link Directory} is a concrete directory it will treated as a leaf and the above applies.
+ */
+ public static <T extends Directory> T getLeaf(Directory dir, Class<T> targetClass) {
+ return getLeaf(dir, targetClass, null);
+ }
+ /**
+ * Tries to extract the leaf of the {@link Directory} if the directory is a {@link FilterDirectory} and cast
+ * it to the given target class or returns the given default value, if the leaf is not assignable to the target class.
+ * If the given {@link Directory} is a concrete directory it will treated as a leaf and the above applies.
+ */
+ public static <T extends Directory> T getLeaf(Directory dir, Class<T> targetClass, T defaultValue) {
+ Directory d = dir;
+ if (dir instanceof FilterDirectory) {
+ d = getLeafDirectory((FilterDirectory) dir);
+ }
+ if (targetClass.isAssignableFrom(d.getClass())) {
+ return targetClass.cast(d);
+ } else {
+ return defaultValue;
+ }
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/index/store/IndexStore.java b/src/main/java/org/elasticsearch/index/store/IndexStore.java
new file mode 100644
index 0000000..743a256
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/IndexStore.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import org.apache.lucene.store.StoreRateLimiting;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.CloseableIndexComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.store.IndicesStore;
+
+import java.io.IOException;
+
+/**
+ * Index store is an index level information of the {@link Store} each shard will use.
+ */
+public interface IndexStore extends CloseableIndexComponent {
+
+ /**
+ * Is the store a persistent store that can survive full restarts.
+ */
+ boolean persistent();
+
+ IndicesStore indicesStore();
+
+ /**
+ * Returns the rate limiting, either of the index is explicitly configured, or
+ * the node level one (defaults to the node level one).
+ */
+ StoreRateLimiting rateLimiting();
+
+ /**
+ * The shard store class that should be used for each shard.
+ */
+ Class<? extends DirectoryService> shardDirectory();
+
+ /**
+ * Returns the backing store total space. Return <tt>-1</tt> if not available.
+ */
+ ByteSizeValue backingStoreTotalSpace();
+
+ /**
+ * Returns the backing store free space. Return <tt>-1</tt> if not available.
+ */
+ ByteSizeValue backingStoreFreeSpace();
+
+ /**
+ * Returns <tt>true</tt> if this shard is allocated on this node. Allocated means
+ * that it has storage files that can be deleted using {@link #deleteUnallocated(org.elasticsearch.index.shard.ShardId)}.
+ */
+ boolean canDeleteUnallocated(ShardId shardId);
+
+ /**
+ * Deletes this shard store since its no longer allocated.
+ */
+ void deleteUnallocated(ShardId shardId) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java
new file mode 100644
index 0000000..9f54dd2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.store.MMapDirectory;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.Modules;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.store.fs.MmapFsIndexStoreModule;
+import org.elasticsearch.index.store.fs.NioFsIndexStoreModule;
+import org.elasticsearch.index.store.fs.SimpleFsIndexStoreModule;
+import org.elasticsearch.index.store.memory.MemoryIndexStoreModule;
+import org.elasticsearch.index.store.ram.RamIndexStoreModule;
+
+/**
+ *
+ */
+public class IndexStoreModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ public IndexStoreModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ Class<? extends Module> indexStoreModule = NioFsIndexStoreModule.class;
+ // Same logic as FSDirectory#open ...
+ if ((Constants.WINDOWS || Constants.SUN_OS || Constants.LINUX)
+ && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
+ indexStoreModule = MmapFsIndexStoreModule.class;
+ } else if (Constants.WINDOWS) {
+ indexStoreModule = SimpleFsIndexStoreModule.class;
+ }
+ String storeType = settings.get("index.store.type");
+ if ("ram".equalsIgnoreCase(storeType)) {
+ indexStoreModule = RamIndexStoreModule.class;
+ } else if ("memory".equalsIgnoreCase(storeType)) {
+ indexStoreModule = MemoryIndexStoreModule.class;
+ } else if ("fs".equalsIgnoreCase(storeType)) {
+ // nothing to set here ... (we default to fs)
+ } else if ("simplefs".equalsIgnoreCase(storeType) || "simple_fs".equals(storeType)) {
+ indexStoreModule = SimpleFsIndexStoreModule.class;
+ } else if ("niofs".equalsIgnoreCase(storeType) || "nio_fs".equalsIgnoreCase(storeType)) {
+ indexStoreModule = NioFsIndexStoreModule.class;
+ } else if ("mmapfs".equalsIgnoreCase(storeType) || "mmap_fs".equalsIgnoreCase(storeType)) {
+ indexStoreModule = MmapFsIndexStoreModule.class;
+ } else if (storeType != null) {
+ indexStoreModule = settings.getAsClass("index.store.type", indexStoreModule, "org.elasticsearch.index.store.", "IndexStoreModule");
+ }
+ return ImmutableList.of(Modules.createModule(indexStoreModule, settings));
+ }
+
+ @Override
+ protected void configure() {
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/store/Store.java b/src/main/java/org/elasticsearch/index/store/Store.java
new file mode 100644
index 0000000..a5e7ed0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/Store.java
@@ -0,0 +1,662 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.compress.Compressor;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.Directories;
+import org.elasticsearch.common.lucene.store.ChecksumIndexOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.CloseableIndexComponent;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.distributor.Distributor;
+import org.elasticsearch.index.store.support.ForceSyncDirectory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.zip.Adler32;
+
+/**
+ */
+public class Store extends AbstractIndexShardComponent implements CloseableIndexComponent {
+
+ static final String CHECKSUMS_PREFIX = "_checksums-";
+
+ public static final boolean isChecksum(String name) {
+ return name.startsWith(CHECKSUMS_PREFIX);
+ }
+
+ private final IndexStore indexStore;
+ final CodecService codecService;
+ private final DirectoryService directoryService;
+ private final StoreDirectory directory;
+
+ private volatile ImmutableOpenMap<String, StoreFileMetaData> filesMetadata = ImmutableOpenMap.of();
+ private volatile String[] files = Strings.EMPTY_ARRAY;
+ private final Object mutex = new Object();
+
+ private final boolean sync;
+
+ @Inject
+ public Store(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, CodecService codecService, DirectoryService directoryService, Distributor distributor) throws IOException {
+ super(shardId, indexSettings);
+ this.indexStore = indexStore;
+ this.codecService = codecService;
+ this.directoryService = directoryService;
+ this.sync = componentSettings.getAsBoolean("sync", true); // TODO we don't really need to fsync when using shared gateway...
+ this.directory = new StoreDirectory(distributor);
+ }
+
+ public IndexStore indexStore() {
+ return this.indexStore;
+ }
+
+ public Directory directory() {
+ return directory;
+ }
+
+ public ImmutableMap<String, StoreFileMetaData> list() throws IOException {
+ ImmutableMap.Builder<String, StoreFileMetaData> builder = ImmutableMap.builder();
+ for (String name : files) {
+ StoreFileMetaData md = metaData(name);
+ if (md != null) {
+ builder.put(md.name(), md);
+ }
+ }
+ return builder.build();
+ }
+
+ public StoreFileMetaData metaData(String name) throws IOException {
+ StoreFileMetaData md = filesMetadata.get(name);
+ if (md == null) {
+ return null;
+ }
+ // IndexOutput not closed, does not exists
+ if (md.length() == -1) {
+ return null;
+ }
+ return md;
+ }
+
+ /**
+ * Deletes the content of a shard store. Be careful calling this!.
+ */
+ public void deleteContent() throws IOException {
+ String[] files = directory.listAll();
+ IOException lastException = null;
+ for (String file : files) {
+ if (isChecksum(file)) {
+ try {
+ directory.deleteFileChecksum(file);
+ } catch (IOException e) {
+ lastException = e;
+ }
+ } else {
+ try {
+ directory.deleteFile(file);
+ } catch (FileNotFoundException e) {
+ // ignore
+ } catch (IOException e) {
+ lastException = e;
+ }
+ }
+ }
+ if (lastException != null) {
+ throw lastException;
+ }
+ }
+
+ public StoreStats stats() throws IOException {
+ return new StoreStats(Directories.estimateSize(directory), directoryService.throttleTimeInNanos());
+ }
+
+ public ByteSizeValue estimateSize() throws IOException {
+ return new ByteSizeValue(Directories.estimateSize(directory));
+ }
+
+ public void renameFile(String from, String to) throws IOException {
+ synchronized (mutex) {
+ StoreFileMetaData fromMetaData = filesMetadata.get(from); // we should always find this one
+ if (fromMetaData == null) {
+ throw new FileNotFoundException(from);
+ }
+ directoryService.renameFile(fromMetaData.directory(), from, to);
+ StoreFileMetaData toMetaData = new StoreFileMetaData(to, fromMetaData.length(), fromMetaData.checksum(), fromMetaData.directory());
+ filesMetadata = ImmutableOpenMap.builder(filesMetadata).fRemove(from).fPut(to, toMetaData).build();
+ files = filesMetadata.keys().toArray(String.class);
+ }
+ }
+
+ public static Map<String, String> readChecksums(File[] locations) throws IOException {
+ Directory[] dirs = new Directory[locations.length];
+ try {
+ for (int i = 0; i < locations.length; i++) {
+ dirs[i] = new SimpleFSDirectory(locations[i]);
+ }
+ return readChecksums(dirs, null);
+ } finally {
+ for (Directory dir : dirs) {
+ if (dir != null) {
+ try {
+ dir.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ }
+ }
+ }
+
+ static Map<String, String> readChecksums(Directory[] dirs, Map<String, String> defaultValue) throws IOException {
+ long lastFound = -1;
+ Directory lastDir = null;
+ for (Directory dir : dirs) {
+ for (String name : dir.listAll()) {
+ if (!isChecksum(name)) {
+ continue;
+ }
+ long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
+ if (current > lastFound) {
+ lastFound = current;
+ lastDir = dir;
+ }
+ }
+ }
+ if (lastFound == -1) {
+ return defaultValue;
+ }
+ IndexInput indexInput = lastDir.openInput(CHECKSUMS_PREFIX + lastFound, IOContext.READONCE);
+ try {
+ indexInput.readInt(); // version
+ return indexInput.readStringStringMap();
+ } catch (Throwable e) {
+ // failed to load checksums, ignore and return an empty map
+ return defaultValue;
+ } finally {
+ indexInput.close();
+ }
+ }
+
+ public void writeChecksums() throws IOException {
+ ImmutableMap<String, StoreFileMetaData> files = list();
+ String checksumName = CHECKSUMS_PREFIX + System.currentTimeMillis();
+ synchronized (mutex) {
+ Map<String, String> checksums = new HashMap<String, String>();
+ for (StoreFileMetaData metaData : files.values()) {
+ if (metaData.checksum() != null) {
+ checksums.put(metaData.name(), metaData.checksum());
+ }
+ }
+ while (directory.fileExists(checksumName)) {
+ checksumName = CHECKSUMS_PREFIX + System.currentTimeMillis();
+ }
+ IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT, true);
+ try {
+ output.writeInt(0); // version
+ output.writeStringStringMap(checksums);
+ } finally {
+ output.close();
+ }
+
+ }
+ for (StoreFileMetaData metaData : files.values()) {
+ if (metaData.name().startsWith(CHECKSUMS_PREFIX) && !checksumName.equals(metaData.name())) {
+ try {
+ directory.deleteFileChecksum(metaData.name());
+ } catch (Throwable e) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Returns <tt>true</tt> by default.
+ */
+ public boolean suggestUseCompoundFile() {
+ return false;
+ }
+
+ public void close() {
+ try {
+ directory.close();
+ } catch (IOException e) {
+ logger.debug("failed to close directory", e);
+ }
+ }
+
+ /**
+ * Creates a raw output, no checksum is computed, and no compression if enabled.
+ */
+ public IndexOutput createOutputRaw(String name) throws IOException {
+ return directory.createOutput(name, IOContext.DEFAULT, true);
+ }
+
+ /**
+ * Opened an index input in raw form, no decompression for example.
+ */
+ public IndexInput openInputRaw(String name, IOContext context) throws IOException {
+ StoreFileMetaData metaData = filesMetadata.get(name);
+ if (metaData == null) {
+ throw new FileNotFoundException(name);
+ }
+ return metaData.directory().openInput(name, context);
+ }
+
+ public void writeChecksum(String name, String checksum) throws IOException {
+ // update the metadata to include the checksum and write a new checksums file
+ synchronized (mutex) {
+ StoreFileMetaData metaData = filesMetadata.get(name);
+ metaData = new StoreFileMetaData(metaData.name(), metaData.length(), checksum, metaData.directory());
+ filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, metaData).build();
+ writeChecksums();
+ }
+ }
+
+ public void writeChecksums(Map<String, String> checksums) throws IOException {
+ // update the metadata to include the checksum and write a new checksums file
+ synchronized (mutex) {
+ for (Map.Entry<String, String> entry : checksums.entrySet()) {
+ StoreFileMetaData metaData = filesMetadata.get(entry.getKey());
+ metaData = new StoreFileMetaData(metaData.name(), metaData.length(), entry.getValue(), metaData.directory());
+ filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(entry.getKey(), metaData).build();
+ }
+ writeChecksums();
+ }
+ }
+
+ /**
+ * The idea of the store directory is to cache file level meta data, as well as md5 of it
+ */
+ public class StoreDirectory extends BaseDirectory implements ForceSyncDirectory {
+
+ private final Distributor distributor;
+
+ StoreDirectory(Distributor distributor) throws IOException {
+ this.distributor = distributor;
+ synchronized (mutex) {
+ ImmutableOpenMap.Builder<String, StoreFileMetaData> builder = ImmutableOpenMap.builder();
+ Map<String, String> checksums = readChecksums(distributor.all(), new HashMap<String, String>());
+ for (Directory delegate : distributor.all()) {
+ for (String file : delegate.listAll()) {
+ String checksum = checksums.get(file);
+ builder.put(file, new StoreFileMetaData(file, delegate.fileLength(file), checksum, delegate));
+ }
+ }
+ filesMetadata = builder.build();
+ files = filesMetadata.keys().toArray(String.class);
+ }
+ }
+
+ public ShardId shardId() {
+ return Store.this.shardId();
+ }
+
+ public Settings settings() {
+ return Store.this.indexSettings();
+ }
+
+ @Nullable
+ public CodecService codecService() {
+ return Store.this.codecService;
+ }
+
+ public Directory[] delegates() {
+ return distributor.all();
+ }
+
+ @Override
+ public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
+ ensureOpen();
+ // lets the default implementation happen, so we properly open an input and create an output
+ super.copy(to, src, dest, context);
+ }
+
+ @Override
+ public String[] listAll() throws IOException {
+ ensureOpen();
+ return files;
+ }
+
+ @Override
+ public boolean fileExists(String name) throws IOException {
+ ensureOpen();
+ return filesMetadata.containsKey(name);
+ }
+
+ public void deleteFileChecksum(String name) throws IOException {
+ ensureOpen();
+ StoreFileMetaData metaData = filesMetadata.get(name);
+ if (metaData != null) {
+ try {
+ metaData.directory().deleteFile(name);
+ } catch (IOException e) {
+ if (metaData.directory().fileExists(name)) {
+ throw e;
+ }
+ }
+ }
+ synchronized (mutex) {
+ filesMetadata = ImmutableOpenMap.builder(filesMetadata).fRemove(name).build();
+ files = filesMetadata.keys().toArray(String.class);
+ }
+ }
+
+ @Override
+ public void deleteFile(String name) throws IOException {
+ ensureOpen();
+ // we don't allow to delete the checksums files, only using the deleteChecksum method
+ if (isChecksum(name)) {
+ return;
+ }
+ StoreFileMetaData metaData = filesMetadata.get(name);
+ if (metaData != null) {
+ try {
+ metaData.directory().deleteFile(name);
+ } catch (IOException e) {
+ if (metaData.directory().fileExists(name)) {
+ throw e;
+ }
+ }
+ }
+ synchronized (mutex) {
+ filesMetadata = ImmutableOpenMap.builder(filesMetadata).fRemove(name).build();
+ files = filesMetadata.keys().toArray(String.class);
+ }
+ }
+
+ /**
+ * Returns the *actual* file length, not the uncompressed one if compression is enabled, this
+ * messes things up when using compound file format, but it shouldn't be used in any case...
+ */
+ @Override
+ public long fileLength(String name) throws IOException {
+ ensureOpen();
+ StoreFileMetaData metaData = filesMetadata.get(name);
+ if (metaData == null) {
+ throw new FileNotFoundException(name);
+ }
+ // not set yet (IndexOutput not closed)
+ if (metaData.length() != -1) {
+ return metaData.length();
+ }
+ return metaData.directory().fileLength(name);
+ }
+
+ @Override
+ public IndexOutput createOutput(String name, IOContext context) throws IOException {
+ return createOutput(name, context, false);
+ }
+
+ public IndexOutput createOutput(String name, IOContext context, boolean raw) throws IOException {
+ ensureOpen();
+ Directory directory;
+ // we want to write the segments gen file to the same directory *all* the time
+ // to make sure we don't create multiple copies of it
+ if (isChecksum(name) || IndexFileNames.SEGMENTS_GEN.equals(name)) {
+ directory = distributor.primary();
+ } else {
+ directory = distributor.any();
+ }
+ IndexOutput out = directory.createOutput(name, context);
+ boolean success = false;
+ try {
+ synchronized (mutex) {
+ StoreFileMetaData metaData = new StoreFileMetaData(name, -1, null, directory);
+ filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, metaData).build();
+ files = filesMetadata.keys().toArray(String.class);
+ boolean computeChecksum = !raw;
+ if (computeChecksum) {
+ // don't compute checksum for segment based files
+ if (IndexFileNames.SEGMENTS_GEN.equals(name) || name.startsWith(IndexFileNames.SEGMENTS)) {
+ computeChecksum = false;
+ }
+ }
+ if (computeChecksum) {
+ out = new BufferedChecksumIndexOutput(out, new Adler32());
+ }
+
+ final StoreIndexOutput storeIndexOutput = new StoreIndexOutput(metaData, out, name);
+ success = true;
+ return storeIndexOutput;
+ }
+ } finally {
+ if (!success) {
+ IOUtils.closeWhileHandlingException(out);
+ }
+ }
+ }
+
+ @Override
+ public IndexInput openInput(String name, IOContext context) throws IOException {
+ ensureOpen();
+ StoreFileMetaData metaData = filesMetadata.get(name);
+ if (metaData == null) {
+ throw new FileNotFoundException(name);
+ }
+ IndexInput in = metaData.directory().openInput(name, context);
+ boolean success = false;
+ try {
+ // Only for backward comp. since we now use Lucene codec compression
+ if (name.endsWith(".fdt") || name.endsWith(".tvf")) {
+ Compressor compressor = CompressorFactory.compressor(in);
+ if (compressor != null) {
+ in = compressor.indexInput(in);
+ }
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ IOUtils.closeWhileHandlingException(in);
+ }
+ }
+ return in;
+ }
+
+ @Override
+ public IndexInputSlicer createSlicer(String name, IOContext context) throws IOException {
+ ensureOpen();
+ StoreFileMetaData metaData = filesMetadata.get(name);
+ if (metaData == null) {
+ throw new FileNotFoundException(name);
+ }
+ // Only for backward comp. since we now use Lucene codec compression
+ if (name.endsWith(".fdt") || name.endsWith(".tvf")) {
+ // rely on the slicer from the base class that uses an input, since they might be compressed...
+ // note, it seems like slicers are only used in compound file format..., so not relevant for now
+ return super.createSlicer(name, context);
+ }
+ return metaData.directory().createSlicer(name, context);
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ isOpen = false;
+ for (Directory delegate : distributor.all()) {
+ delegate.close();
+ }
+ synchronized (mutex) {
+ filesMetadata = ImmutableOpenMap.of();
+ files = Strings.EMPTY_ARRAY;
+ }
+ }
+
+ @Override
+ public Lock makeLock(String name) {
+ return distributor.primary().makeLock(name);
+ }
+
+ @Override
+ public void clearLock(String name) throws IOException {
+ distributor.primary().clearLock(name);
+ }
+
+ @Override
+ public void setLockFactory(LockFactory lockFactory) throws IOException {
+ distributor.primary().setLockFactory(lockFactory);
+ }
+
+ @Override
+ public LockFactory getLockFactory() {
+ return distributor.primary().getLockFactory();
+ }
+
+ @Override
+ public String getLockID() {
+ return distributor.primary().getLockID();
+ }
+
+ @Override
+ public void sync(Collection<String> names) throws IOException {
+ ensureOpen();
+ if (sync) {
+ Map<Directory, Collection<String>> map = Maps.newHashMap();
+ for (String name : names) {
+ StoreFileMetaData metaData = filesMetadata.get(name);
+ if (metaData == null) {
+ throw new FileNotFoundException(name);
+ }
+ Collection<String> dirNames = map.get(metaData.directory());
+ if (dirNames == null) {
+ dirNames = new ArrayList<String>();
+ map.put(metaData.directory(), dirNames);
+ }
+ dirNames.add(name);
+ }
+ for (Map.Entry<Directory, Collection<String>> entry : map.entrySet()) {
+ entry.getKey().sync(entry.getValue());
+ }
+ }
+ for (String name : names) {
+ // write the checksums file when we sync on the segments file (committed)
+ if (!name.equals(IndexFileNames.SEGMENTS_GEN) && name.startsWith(IndexFileNames.SEGMENTS)) {
+ writeChecksums();
+ break;
+ }
+ }
+ }
+
+ @Override
+ public void forceSync(String name) throws IOException {
+ sync(ImmutableList.of(name));
+ }
+
+ @Override
+ public String toString() {
+ return "store(" + distributor.toString() + ")";
+ }
+ }
+
+ class StoreIndexOutput extends IndexOutput {
+
+ private final StoreFileMetaData metaData;
+
+ private final IndexOutput out;
+
+ private final String name;
+
+ StoreIndexOutput(StoreFileMetaData metaData, IndexOutput delegate, String name) {
+ this.metaData = metaData;
+ this.out = delegate;
+ this.name = name;
+ }
+
+ @Override
+ public void close() throws IOException {
+ out.close();
+ String checksum = null;
+ IndexOutput underlying = out;
+ if (underlying instanceof BufferedChecksumIndexOutput) {
+ checksum = Long.toString(((BufferedChecksumIndexOutput) underlying).digest().getValue(), Character.MAX_RADIX);
+ } else if (underlying instanceof ChecksumIndexOutput) {
+ checksum = Long.toString(((ChecksumIndexOutput) underlying).digest().getValue(), Character.MAX_RADIX);
+ }
+ synchronized (mutex) {
+ StoreFileMetaData md = new StoreFileMetaData(name, metaData.directory().fileLength(name), checksum, metaData.directory());
+ filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, md).build();
+ files = filesMetadata.keys().toArray(String.class);
+ }
+ }
+
+ @Override
+ public void copyBytes(DataInput input, long numBytes) throws IOException {
+ out.copyBytes(input, numBytes);
+ }
+
+ @Override
+ public long getFilePointer() {
+ return out.getFilePointer();
+ }
+
+ @Override
+ public void writeByte(byte b) throws IOException {
+ out.writeByte(b);
+ }
+
+ @Override
+ public void writeBytes(byte[] b, int offset, int length) throws IOException {
+ out.writeBytes(b, offset, length);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ out.seek(pos);
+ }
+
+ @Override
+ public long length() throws IOException {
+ return out.length();
+ }
+
+ @Override
+ public void setLength(long length) throws IOException {
+ out.setLength(length);
+ }
+
+ @Override
+ public String toString() {
+ return out.toString();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/StoreException.java b/src/main/java/org/elasticsearch/index/store/StoreException.java
new file mode 100644
index 0000000..c5b491e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/StoreException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class StoreException extends IndexShardException {
+
+ public StoreException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public StoreException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java
new file mode 100644
index 0000000..8177cbb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class StoreFileMetaData implements Streamable {
+
+ private String name;
+
+ // the actual file size on "disk", if compressed, the compressed size
+ private long length;
+
+ private String checksum;
+
+ private transient Directory directory;
+
+ private StoreFileMetaData() {
+ }
+
+ public StoreFileMetaData(String name, long length, String checksum) {
+ this(name, length, checksum, null);
+ }
+
+ public StoreFileMetaData(String name, long length, String checksum, @Nullable Directory directory) {
+ this.name = name;
+ this.length = length;
+ this.checksum = checksum;
+ this.directory = directory;
+ }
+
+ public Directory directory() {
+ return this.directory;
+ }
+
+ public String name() {
+ return name;
+ }
+
+ /**
+ * the actual file size on "disk", if compressed, the compressed size
+ */
+ public long length() {
+ return length;
+ }
+
+ @Nullable
+ public String checksum() {
+ return this.checksum;
+ }
+
+ public boolean isSame(StoreFileMetaData other) {
+ if (checksum == null || other.checksum == null) {
+ return false;
+ }
+ return length == other.length && checksum.equals(other.checksum);
+ }
+
+ public static StoreFileMetaData readStoreFileMetaData(StreamInput in) throws IOException {
+ StoreFileMetaData md = new StoreFileMetaData();
+ md.readFrom(in);
+ return md;
+ }
+
+ @Override
+ public String toString() {
+ return "name [" + name + "], length [" + length + "], checksum [" + checksum + "]";
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ length = in.readVLong();
+ if (in.readBoolean()) {
+ checksum = in.readString();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVLong(length);
+ if (checksum == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(checksum);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/StoreModule.java b/src/main/java/org/elasticsearch/index/store/StoreModule.java
new file mode 100644
index 0000000..ca2826b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/StoreModule.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.store.distributor.Distributor;
+import org.elasticsearch.index.store.distributor.LeastUsedDistributor;
+import org.elasticsearch.index.store.distributor.RandomWeightedDistributor;
+
+/**
+ *
+ */
+public class StoreModule extends AbstractModule {
+
+ private final Settings settings;
+
+ private final IndexStore indexStore;
+
+ private Class<? extends Distributor> distributor;
+
+ public StoreModule(Settings settings, IndexStore indexStore) {
+ this.indexStore = indexStore;
+ this.settings = settings;
+ }
+
+ public void setDistributor(Class<? extends Distributor> distributor) {
+ this.distributor = distributor;
+ }
+
+ @Override
+ protected void configure() {
+ bind(DirectoryService.class).to(indexStore.shardDirectory()).asEagerSingleton();
+ bind(Store.class).asEagerSingleton();
+ if (distributor == null) {
+ distributor = loadDistributor(settings);
+ }
+ bind(Distributor.class).to(distributor).asEagerSingleton();
+ }
+
+ private Class<? extends Distributor> loadDistributor(Settings settings) {
+ final Class<? extends Distributor> distributor;
+ final String type = settings.get("index.store.distributor");
+ if ("least_used".equals(type)) {
+ distributor = LeastUsedDistributor.class;
+ } else if ("random".equals(type)) {
+ distributor = RandomWeightedDistributor.class;
+ } else {
+ distributor = settings.getAsClass("index.store.distributor", LeastUsedDistributor.class,
+ "org.elasticsearch.index.store.distributor.", "Distributor");
+ }
+ return distributor;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/store/StoreStats.java b/src/main/java/org/elasticsearch/index/store/StoreStats.java
new file mode 100644
index 0000000..3b7f52a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/StoreStats.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ */
+public class StoreStats implements Streamable, ToXContent {
+
+ private long sizeInBytes;
+
+ private long throttleTimeInNanos;
+
+ public StoreStats() {
+
+ }
+
+ public StoreStats(long sizeInBytes, long throttleTimeInNanos) {
+ this.sizeInBytes = sizeInBytes;
+ this.throttleTimeInNanos = throttleTimeInNanos;
+ }
+
+ public void add(StoreStats stats) {
+ if (stats == null) {
+ return;
+ }
+ sizeInBytes += stats.sizeInBytes;
+ throttleTimeInNanos += stats.throttleTimeInNanos;
+ }
+
+
+ public long sizeInBytes() {
+ return sizeInBytes;
+ }
+
+ public long getSizeInBytes() {
+ return sizeInBytes;
+ }
+
+ public ByteSizeValue size() {
+ return new ByteSizeValue(sizeInBytes);
+ }
+
+ public ByteSizeValue getSize() {
+ return size();
+ }
+
+ public TimeValue throttleTime() {
+ return TimeValue.timeValueNanos(throttleTimeInNanos);
+ }
+
+ public TimeValue getThrottleTime() {
+ return throttleTime();
+ }
+
+ public static StoreStats readStoreStats(StreamInput in) throws IOException {
+ StoreStats store = new StoreStats();
+ store.readFrom(in);
+ return store;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ sizeInBytes = in.readVLong();
+ throttleTimeInNanos = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(sizeInBytes);
+ out.writeVLong(throttleTimeInNanos);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.STORE);
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, sizeInBytes);
+ builder.timeValueField(Fields.THROTTLE_TIME_IN_MILLIS, Fields.THROTTLE_TIME, throttleTime());
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString STORE = new XContentBuilderString("store");
+ static final XContentBuilderString SIZE = new XContentBuilderString("size");
+ static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes");
+
+ static final XContentBuilderString THROTTLE_TIME = new XContentBuilderString("throttle_time");
+ static final XContentBuilderString THROTTLE_TIME_IN_MILLIS = new XContentBuilderString("throttle_time_in_millis");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/distributor/AbstractDistributor.java b/src/main/java/org/elasticsearch/index/store/distributor/AbstractDistributor.java
new file mode 100644
index 0000000..98ef6d3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/distributor/AbstractDistributor.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.distributor;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.elasticsearch.index.store.DirectoryUtils;
+import org.elasticsearch.index.store.DirectoryService;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+public abstract class AbstractDistributor implements Distributor {
+
+ protected final Directory[] delegates;
+
+ protected AbstractDistributor(DirectoryService directoryService) throws IOException {
+ delegates = directoryService.build();
+ }
+
+ public Directory[] all() {
+ return delegates;
+ }
+
+ @Override
+ public Directory primary() {
+ return delegates[0];
+ }
+
+ @Override
+ public Directory any() {
+ if (delegates.length == 1) {
+ return delegates[0];
+ } else {
+ return doAny();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ protected long getUsableSpace(Directory directory) {
+ final FSDirectory leaf = DirectoryUtils.getLeaf(directory, FSDirectory.class);
+ if (leaf != null) {
+ return leaf.getDirectory().getUsableSpace();
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ public String toString() {
+ return name() + Arrays.toString(delegates);
+ }
+
+ protected abstract Directory doAny();
+
+ protected abstract String name();
+
+}
diff --git a/src/main/java/org/elasticsearch/index/store/distributor/Distributor.java b/src/main/java/org/elasticsearch/index/store/distributor/Distributor.java
new file mode 100644
index 0000000..e609479
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/distributor/Distributor.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.distributor;
+
+import org.apache.lucene.store.Directory;
+
+/**
+ * Keeps track of available directories and selects a directory
+ * based on some distribution strategy
+ */
+public interface Distributor {
+
+ /**
+ * Returns primary directory (typically first directory in the list)
+ */
+ Directory primary();
+
+ /**
+ * Returns all directories
+ */
+ Directory[] all();
+
+ /**
+ * Selects one of the directories based on distribution strategy
+ */
+ Directory any();
+}
diff --git a/src/main/java/org/elasticsearch/index/store/distributor/LeastUsedDistributor.java b/src/main/java/org/elasticsearch/index/store/distributor/LeastUsedDistributor.java
new file mode 100644
index 0000000..904bc87
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/distributor/LeastUsedDistributor.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.distributor;
+
+import jsr166y.ThreadLocalRandom;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.store.DirectoryService;
+
+import java.io.IOException;
+
+/**
+ * Implements directory distributor that always return the directory is the most available space
+ */
+public class LeastUsedDistributor extends AbstractDistributor {
+
+ @Inject
+ public LeastUsedDistributor(DirectoryService directoryService) throws IOException {
+ super(directoryService);
+ }
+
+ @Override
+ public Directory doAny() {
+ Directory directory = null;
+ long size = Long.MIN_VALUE;
+ int sameSize = 0;
+ for (Directory delegate : delegates) {
+ long currentSize = getUsableSpace(delegate);
+ if (currentSize > size) {
+ size = currentSize;
+ directory = delegate;
+ sameSize = 1;
+ } else if (currentSize == size) {
+ sameSize++;
+ // Ensure uniform distribution between all directories with the same size
+ if (ThreadLocalRandom.current().nextDouble() < 1.0 / sameSize) {
+ directory = delegate;
+ }
+ }
+ }
+
+ return directory;
+ }
+
+ @Override
+ public String name() {
+ return "least_used";
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/store/distributor/RandomWeightedDistributor.java b/src/main/java/org/elasticsearch/index/store/distributor/RandomWeightedDistributor.java
new file mode 100644
index 0000000..f5e2402
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/distributor/RandomWeightedDistributor.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.distributor;
+
+import jsr166y.ThreadLocalRandom;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.store.DirectoryService;
+
+import java.io.IOException;
+
+/**
+ * Implements directory distributor that picks a directory at random. The probability of selecting a directory
+ * is proportional to the amount of usable space in this directory.
+ */
+public class RandomWeightedDistributor extends AbstractDistributor {
+
+ @Inject
+ public RandomWeightedDistributor(DirectoryService directoryService) throws IOException {
+ super(directoryService);
+ }
+
+ @Override
+ public Directory doAny() {
+ long[] usableSpace = new long[delegates.length];
+ long size = 0;
+
+ for (int i = 0; i < delegates.length; i++) {
+ size += getUsableSpace(delegates[i]);
+ usableSpace[i] = size;
+ }
+
+ if (size != 0) {
+ long random = ThreadLocalRandom.current().nextLong(size);
+ for (int i = 0; i < delegates.length; i++) {
+ if (usableSpace[i] > random) {
+ return delegates[i];
+ }
+ }
+ }
+
+ // TODO: size is 0 - should we bail out or fall back on random distribution?
+ return delegates[ThreadLocalRandom.current().nextInt(delegates.length)];
+ }
+
+ @Override
+ public String name() {
+ return "random";
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java
new file mode 100644
index 0000000..a78ca99
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.apache.lucene.store.*;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.DirectoryUtils;
+import org.elasticsearch.index.store.IndexStore;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+
+/**
+ */
+public abstract class FsDirectoryService extends AbstractIndexShardComponent implements DirectoryService, StoreRateLimiting.Listener, StoreRateLimiting.Provider {
+
+ protected final FsIndexStore indexStore;
+
+ private final CounterMetric rateLimitingTimeInNanos = new CounterMetric();
+
+ public FsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore) {
+ super(shardId, indexSettings);
+ this.indexStore = (FsIndexStore) indexStore;
+ }
+
+ @Override
+ public final long throttleTimeInNanos() {
+ return rateLimitingTimeInNanos.count();
+ }
+
+ @Override
+ public final StoreRateLimiting rateLimiting() {
+ return indexStore.rateLimiting();
+ }
+
+ protected final LockFactory buildLockFactory() throws IOException {
+ String fsLock = componentSettings.get("lock", componentSettings.get("fs_lock", "native"));
+ LockFactory lockFactory = NoLockFactory.getNoLockFactory();
+ if (fsLock.equals("native")) {
+ // TODO LUCENE MONITOR: this is not needed in next Lucene version
+ lockFactory = new NativeFSLockFactory();
+ } else if (fsLock.equals("simple")) {
+ lockFactory = new SimpleFSLockFactory();
+ } else if (fsLock.equals("none")) {
+ lockFactory = NoLockFactory.getNoLockFactory();
+ }
+ return lockFactory;
+ }
+
+ @Override
+ public final void renameFile(Directory dir, String from, String to) throws IOException {
+ final FSDirectory fsDirectory = DirectoryUtils.getLeaf(dir, FSDirectory.class);
+ if (fsDirectory == null) {
+ throw new ElasticsearchIllegalArgumentException("Can not rename file on non-filesystem based directory ");
+ }
+ File directory = fsDirectory.getDirectory();
+ File old = new File(directory, from);
+ File nu = new File(directory, to);
+ if (nu.exists())
+ if (!nu.delete())
+ throw new IOException("Cannot delete " + nu);
+
+ if (!old.exists()) {
+ throw new FileNotFoundException("Can't rename from [" + from + "] to [" + to + "], from does not exists");
+ }
+
+ boolean renamed = false;
+ for (int i = 0; i < 3; i++) {
+ if (old.renameTo(nu)) {
+ renamed = true;
+ break;
+ }
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException(e.getMessage());
+ }
+ }
+ if (!renamed) {
+ throw new IOException("Failed to rename, from [" + from + "], to [" + to + "]");
+ }
+ }
+
+ @Override
+ public final void fullDelete(Directory dir) throws IOException {
+ final FSDirectory fsDirectory = DirectoryUtils.getLeaf(dir, FSDirectory.class);
+ if (fsDirectory == null) {
+ throw new ElasticsearchIllegalArgumentException("Can not fully delete on non-filesystem based directory");
+ }
+ FileSystemUtils.deleteRecursively(fsDirectory.getDirectory());
+ // if we are the last ones, delete also the actual index
+ String[] list = fsDirectory.getDirectory().getParentFile().list();
+ if (list == null || list.length == 0) {
+ FileSystemUtils.deleteRecursively(fsDirectory.getDirectory().getParentFile());
+ }
+ }
+
+ @Override
+ public Directory[] build() throws IOException {
+ File[] locations = indexStore.shardIndexLocations(shardId);
+ Directory[] dirs = new Directory[locations.length];
+ for (int i = 0; i < dirs.length; i++) {
+ FileSystemUtils.mkdirs(locations[i]);
+ FSDirectory wrapped = newFSDirectory(locations[i], buildLockFactory());
+ dirs[i] = new RateLimitedFSDirectory(wrapped, this, this) ;
+ }
+ return dirs;
+ }
+
+ protected abstract FSDirectory newFSDirectory(File location, LockFactory lockFactory) throws IOException;
+
+ @Override
+ public final void onPause(long nanos) {
+ rateLimitingTimeInNanos.inc(nanos);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/fs/FsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/FsIndexStore.java
new file mode 100644
index 0000000..d4ab729
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/FsIndexStore.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.support.AbstractIndexStore;
+import org.elasticsearch.indices.store.IndicesStore;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class FsIndexStore extends AbstractIndexStore {
+
+ private final NodeEnvironment nodeEnv;
+
+ private final File[] locations;
+
+ public FsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) {
+ super(index, indexSettings, indexService, indicesStore);
+ this.nodeEnv = nodeEnv;
+ if (nodeEnv.hasNodeFile()) {
+ this.locations = nodeEnv.indexLocations(index);
+ } else {
+ this.locations = null;
+ }
+ }
+
+ @Override
+ public boolean persistent() {
+ return true;
+ }
+
+ @Override
+ public ByteSizeValue backingStoreTotalSpace() {
+ if (locations == null) {
+ return new ByteSizeValue(0);
+ }
+ long totalSpace = 0;
+ for (File location : locations) {
+ totalSpace += location.getTotalSpace();
+ }
+ return new ByteSizeValue(totalSpace);
+ }
+
+ @Override
+ public ByteSizeValue backingStoreFreeSpace() {
+ if (locations == null) {
+ return new ByteSizeValue(0);
+ }
+ long usableSpace = 0;
+ for (File location : locations) {
+ usableSpace += location.getUsableSpace();
+ }
+ return new ByteSizeValue(usableSpace);
+ }
+
+ @Override
+ public boolean canDeleteUnallocated(ShardId shardId) {
+ if (locations == null) {
+ return false;
+ }
+ if (indexService.hasShard(shardId.id())) {
+ return false;
+ }
+ for (File location : shardLocations(shardId)) {
+ if (location.exists()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public void deleteUnallocated(ShardId shardId) throws IOException {
+ if (locations == null) {
+ return;
+ }
+ if (indexService.hasShard(shardId.id())) {
+ throw new ElasticsearchIllegalStateException(shardId + " allocated, can't be deleted");
+ }
+ FileSystemUtils.deleteRecursively(shardLocations(shardId));
+ }
+
+ public File[] shardLocations(ShardId shardId) {
+ return nodeEnv.shardLocations(shardId);
+ }
+
+ public File[] shardIndexLocations(ShardId shardId) {
+ File[] shardLocations = shardLocations(shardId);
+ File[] shardIndexLocations = new File[shardLocations.length];
+ for (int i = 0; i < shardLocations.length; i++) {
+ shardIndexLocations[i] = new File(shardLocations[i], "index");
+ }
+ return shardIndexLocations;
+ }
+
+ // not used currently, but here to state that this store also defined a file based translog location
+
+ public File[] shardTranslogLocations(ShardId shardId) {
+ File[] shardLocations = shardLocations(shardId);
+ File[] shardTranslogLocations = new File[shardLocations.length];
+ for (int i = 0; i < shardLocations.length; i++) {
+ shardTranslogLocations[i] = new File(shardLocations[i], "translog");
+ }
+ return shardTranslogLocations;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java
new file mode 100644
index 0000000..18d4d10
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.MMapDirectory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.IndexStore;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ */
+public class MmapFsDirectoryService extends FsDirectoryService {
+
+ @Inject
+ public MmapFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore) {
+ super(shardId, indexSettings, indexStore);
+ }
+
+ @Override
+ protected FSDirectory newFSDirectory(File location, LockFactory lockFactory) throws IOException {
+ return new MMapDirectory(location, buildLockFactory());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java
new file mode 100644
index 0000000..2784037
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.indices.store.IndicesStore;
+
+/**
+ *
+ */
+public final class MmapFsIndexStore extends FsIndexStore {
+
+ @Inject
+ public MmapFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) {
+ super(index, indexSettings, indexService, indicesStore, nodeEnv);
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return MmapFsDirectoryService.class;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java
new file mode 100644
index 0000000..7f65590
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.store.IndexStore;
+
+/**
+ *
+ */
+public class MmapFsIndexStoreModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(MmapFsIndexStore.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java
new file mode 100644
index 0000000..3b9d385
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.NIOFSDirectory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.IndexStore;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ */
+public class NioFsDirectoryService extends FsDirectoryService {
+
+ @Inject
+ public NioFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore) {
+ super(shardId, indexSettings, indexStore);
+ }
+
+ @Override
+ protected FSDirectory newFSDirectory(File location, LockFactory lockFactory) throws IOException {
+ return new NIOFSDirectory(location, lockFactory);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java
new file mode 100644
index 0000000..139f11a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.indices.store.IndicesStore;
+
+/**
+ *
+ */
+public final class NioFsIndexStore extends FsIndexStore {
+
+ @Inject
+ public NioFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) {
+ super(index, indexSettings, indexService, indicesStore, nodeEnv);
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return NioFsDirectoryService.class;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java
new file mode 100644
index 0000000..9db1cbd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.store.IndexStore;
+
+/**
+ *
+ */
+public class NioFsIndexStoreModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(NioFsIndexStore.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java
new file mode 100644
index 0000000..098d858
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.SimpleFSDirectory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.IndexStore;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ */
+public class SimpleFsDirectoryService extends FsDirectoryService {
+
+ @Inject
+ public SimpleFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore) {
+ super(shardId, indexSettings, indexStore);
+ }
+
+ @Override
+ protected FSDirectory newFSDirectory(File location, LockFactory lockFactory) throws IOException {
+ return new SimpleFSDirectory(location, lockFactory);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java
new file mode 100644
index 0000000..b7ff6fa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.indices.store.IndicesStore;
+
+/**
+ *
+ */
+public final class SimpleFsIndexStore extends FsIndexStore {
+
+ @Inject
+ public SimpleFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) {
+ super(index, indexSettings, indexService, indicesStore, nodeEnv);
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return SimpleFsDirectoryService.class;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java
new file mode 100644
index 0000000..c35997a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.fs;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.store.IndexStore;
+
+/**
+ *
+ */
+public class SimpleFsIndexStoreModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(SimpleFsIndexStore.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/store/memory/ByteBufferDirectoryService.java b/src/main/java/org/elasticsearch/index/store/memory/ByteBufferDirectoryService.java
new file mode 100644
index 0000000..caa8f56
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/memory/ByteBufferDirectoryService.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.memory;
+
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.index.store.DirectoryUtils;
+import org.apache.lucene.store.bytebuffer.ByteBufferAllocator;
+import org.apache.lucene.store.bytebuffer.ByteBufferDirectory;
+import org.apache.lucene.store.bytebuffer.ByteBufferFile;
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+/**
+ */
+public final class ByteBufferDirectoryService extends AbstractIndexShardComponent implements DirectoryService {
+
+ private final ByteBufferCache byteBufferCache;
+
+ @Inject
+ public ByteBufferDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, ByteBufferCache byteBufferCache) {
+ super(shardId, indexSettings);
+ this.byteBufferCache = byteBufferCache;
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return 0;
+ }
+
+ @Override
+ public Directory[] build() {
+ return new Directory[]{new CustomByteBufferDirectory(byteBufferCache)};
+ }
+
+ @Override
+ public void renameFile(Directory dir, String from, String to) throws IOException {
+ CustomByteBufferDirectory leaf = DirectoryUtils.getLeaf(dir, CustomByteBufferDirectory.class);
+ assert leaf != null;
+ leaf.renameTo(from, to);
+ }
+
+ @Override
+ public void fullDelete(Directory dir) {
+ }
+
+ static class CustomByteBufferDirectory extends ByteBufferDirectory {
+
+ CustomByteBufferDirectory() {
+ }
+
+ CustomByteBufferDirectory(ByteBufferAllocator allocator) {
+ super(allocator);
+ }
+
+ public void renameTo(String from, String to) throws IOException {
+ ByteBufferFile fromFile = files.get(from);
+ if (fromFile == null)
+ throw new FileNotFoundException(from);
+ ByteBufferFile toFile = files.get(to);
+ if (toFile != null) {
+ files.remove(from);
+ }
+ files.put(to, fromFile);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/memory/ByteBufferIndexStore.java b/src/main/java/org/elasticsearch/index/store/memory/ByteBufferIndexStore.java
new file mode 100644
index 0000000..df1a7be
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/memory/ByteBufferIndexStore.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.memory;
+
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.support.AbstractIndexStore;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.monitor.jvm.JvmStats;
+
+/**
+ *
+ */
+public class ByteBufferIndexStore extends AbstractIndexStore {
+
+ private final boolean direct;
+
+ @Inject
+ public ByteBufferIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService,
+ ByteBufferCache byteBufferCache, IndicesStore indicesStore) {
+ super(index, indexSettings, indexService, indicesStore);
+ this.direct = byteBufferCache.direct();
+ }
+
+ @Override
+ public boolean persistent() {
+ return false;
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return ByteBufferDirectoryService.class;
+ }
+
+ @Override
+ public ByteSizeValue backingStoreTotalSpace() {
+ if (direct) {
+ // TODO, we can use sigar...
+ return new ByteSizeValue(-1, ByteSizeUnit.BYTES);
+ }
+ return JvmInfo.jvmInfo().mem().heapMax();
+ }
+
+ @Override
+ public ByteSizeValue backingStoreFreeSpace() {
+ if (direct) {
+ return new ByteSizeValue(-1, ByteSizeUnit.BYTES);
+ }
+ return JvmStats.jvmStats().mem().heapUsed();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/store/memory/MemoryIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/memory/MemoryIndexStoreModule.java
new file mode 100644
index 0000000..c2377b5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/memory/MemoryIndexStoreModule.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.memory;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.store.IndexStore;
+
+/**
+ *
+ */
+public class MemoryIndexStoreModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public MemoryIndexStoreModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(ByteBufferIndexStore.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/store/ram/RamDirectoryService.java b/src/main/java/org/elasticsearch/index/store/ram/RamDirectoryService.java
new file mode 100644
index 0000000..a0ad08e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/ram/RamDirectoryService.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.ram;
+
+import org.apache.lucene.store.*;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.DirectoryUtils;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+/**
+ */
+public final class RamDirectoryService extends AbstractIndexShardComponent implements DirectoryService {
+
+ @Inject
+ public RamDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings) {
+ super(shardId, indexSettings);
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return 0;
+ }
+
+ @Override
+ public Directory[] build() {
+ return new Directory[]{new CustomRAMDirectory()};
+ }
+
+ @Override
+ public void renameFile(Directory dir, String from, String to) throws IOException {
+ CustomRAMDirectory leaf = DirectoryUtils.getLeaf(dir, CustomRAMDirectory.class);
+ assert leaf != null;
+ leaf.renameTo(from, to);
+ }
+
+ @Override
+ public void fullDelete(Directory dir) {
+ }
+
+ static class CustomRAMDirectory extends RAMDirectory {
+
+ public synchronized void renameTo(String from, String to) throws IOException {
+ RAMFile fromFile = fileMap.get(from);
+ if (fromFile == null)
+ throw new FileNotFoundException(from);
+ RAMFile toFile = fileMap.get(to);
+ if (toFile != null) {
+ sizeInBytes.addAndGet(-fileLength(from));
+ fileMap.remove(from);
+ }
+ fileMap.put(to, fromFile);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/ram/RamIndexStore.java b/src/main/java/org/elasticsearch/index/store/ram/RamIndexStore.java
new file mode 100644
index 0000000..96bb5da
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/ram/RamIndexStore.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.ram;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.support.AbstractIndexStore;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.monitor.jvm.JvmStats;
+
+/**
+ *
+ */
+public class RamIndexStore extends AbstractIndexStore {
+
+ @Inject
+ public RamIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore) {
+ super(index, indexSettings, indexService, indicesStore);
+ }
+
+ @Override
+ public boolean persistent() {
+ return false;
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return RamDirectoryService.class;
+ }
+
+ @Override
+ public ByteSizeValue backingStoreTotalSpace() {
+ return JvmInfo.jvmInfo().getMem().heapMax();
+ }
+
+ @Override
+ public ByteSizeValue backingStoreFreeSpace() {
+ return JvmStats.jvmStats().getMem().heapUsed();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/ram/RamIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/ram/RamIndexStoreModule.java
new file mode 100644
index 0000000..24bb356
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/ram/RamIndexStoreModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.ram;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.store.IndexStore;
+
+/**
+ *
+ */
+public class RamIndexStoreModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(RamIndexStore.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java b/src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java
new file mode 100644
index 0000000..5fea4f8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.support;
+
+import org.apache.lucene.store.StoreRateLimiting;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.indices.store.IndicesStore;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class AbstractIndexStore extends AbstractIndexComponent implements IndexStore {
+
+ public static final String INDEX_STORE_THROTTLE_TYPE = "index.store.throttle.type";
+ public static final String INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC = "index.store.throttle.max_bytes_per_sec";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ String rateLimitingType = settings.get(INDEX_STORE_THROTTLE_TYPE, AbstractIndexStore.this.rateLimitingType);
+ if (!rateLimitingType.equals(AbstractIndexStore.this.rateLimitingType)) {
+ logger.info("updating index.store.throttle.type from [{}] to [{}]", AbstractIndexStore.this.rateLimitingType, rateLimitingType);
+ if (rateLimitingType.equalsIgnoreCase("node")) {
+ AbstractIndexStore.this.rateLimitingType = rateLimitingType;
+ AbstractIndexStore.this.nodeRateLimiting = true;
+ } else {
+ StoreRateLimiting.Type.fromString(rateLimitingType);
+ AbstractIndexStore.this.rateLimitingType = rateLimitingType;
+ AbstractIndexStore.this.nodeRateLimiting = false;
+ AbstractIndexStore.this.rateLimiting.setType(rateLimitingType);
+ }
+ }
+
+ ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, AbstractIndexStore.this.rateLimitingThrottle);
+ if (!rateLimitingThrottle.equals(AbstractIndexStore.this.rateLimitingThrottle)) {
+ logger.info("updating index.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", AbstractIndexStore.this.rateLimitingThrottle, rateLimitingThrottle, AbstractIndexStore.this.rateLimitingType);
+ AbstractIndexStore.this.rateLimitingThrottle = rateLimitingThrottle;
+ AbstractIndexStore.this.rateLimiting.setMaxRate(rateLimitingThrottle);
+ }
+ }
+ }
+
+
+ protected final IndexService indexService;
+
+ protected final IndicesStore indicesStore;
+
+ private volatile String rateLimitingType;
+ private volatile ByteSizeValue rateLimitingThrottle;
+ private volatile boolean nodeRateLimiting;
+
+ private final StoreRateLimiting rateLimiting = new StoreRateLimiting();
+
+ private final ApplySettings applySettings = new ApplySettings();
+
+ protected AbstractIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore) {
+ super(index, indexSettings);
+ this.indexService = indexService;
+ this.indicesStore = indicesStore;
+
+ this.rateLimitingType = indexSettings.get(INDEX_STORE_THROTTLE_TYPE, "node");
+ if (rateLimitingType.equalsIgnoreCase("node")) {
+ nodeRateLimiting = true;
+ } else {
+ nodeRateLimiting = false;
+ rateLimiting.setType(rateLimitingType);
+ }
+ this.rateLimitingThrottle = indexSettings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(0));
+ rateLimiting.setMaxRate(rateLimitingThrottle);
+
+ logger.debug("using index.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle);
+
+ indexService.settingsService().addListener(applySettings);
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ indexService.settingsService().removeListener(applySettings);
+ }
+
+ @Override
+ public boolean canDeleteUnallocated(ShardId shardId) {
+ return false;
+ }
+
+ @Override
+ public void deleteUnallocated(ShardId shardId) throws IOException {
+ // do nothing here...
+ }
+
+ @Override
+ public IndicesStore indicesStore() {
+ return indicesStore;
+ }
+
+ @Override
+ public StoreRateLimiting rateLimiting() {
+ return nodeRateLimiting ? indicesStore.rateLimiting() : this.rateLimiting;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/store/support/ForceSyncDirectory.java b/src/main/java/org/elasticsearch/index/store/support/ForceSyncDirectory.java
new file mode 100644
index 0000000..e14885d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/store/support/ForceSyncDirectory.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.support;
+
+import java.io.IOException;
+
+/**
+ * A custom directory that allows to forceSync (since the actual directory might disable it)
+ */
+public interface ForceSyncDirectory {
+
+ /**
+ * Similar to {@link org.apache.lucene.store.Directory#sync(java.util.Collection)} but forces it even if its
+ * disabled.
+ */
+ void forceSync(String name) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorModule.java b/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorModule.java
new file mode 100644
index 0000000..a2c94e0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.termvectors;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class ShardTermVectorModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(ShardTermVectorService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorService.java b/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorService.java
new file mode 100644
index 0000000..893b478
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorService.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.termvectors;
+
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.Term;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.termvector.TermVectorRequest;
+import org.elasticsearch.action.termvector.TermVectorResponse;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+
+/**
+ */
+
+public class ShardTermVectorService extends AbstractIndexShardComponent {
+
+ private IndexShard indexShard;
+ private MapperService mapperService;
+
+ @Inject
+ public ShardTermVectorService(ShardId shardId, @IndexSettings Settings indexSettings, MapperService mapperService) {
+ super(shardId, indexSettings);
+ }
+
+ // sadly, to overcome cyclic dep, we need to do this and inject it ourselves...
+ public ShardTermVectorService setIndexShard(IndexShard indexShard) {
+ this.indexShard = indexShard;
+ return this;
+ }
+
+ public TermVectorResponse getTermVector(TermVectorRequest request) {
+ final Engine.Searcher searcher = indexShard.acquireSearcher("term_vector");
+ IndexReader topLevelReader = searcher.reader();
+ final TermVectorResponse termVectorResponse = new TermVectorResponse(request.index(), request.type(), request.id());
+ final Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
+ try {
+ Fields topLevelFields = MultiFields.getFields(topLevelReader);
+ Versions.DocIdAndVersion docIdAndVersion = Versions.loadDocIdAndVersion(topLevelReader, uidTerm);
+ if (docIdAndVersion != null) {
+
+ Fields termVectorsByField = docIdAndVersion.context.reader().getTermVectors(docIdAndVersion.docId);
+ termVectorResponse.setFields(termVectorsByField, request.selectedFields(), request.getFlags(), topLevelFields);
+ termVectorResponse.setExists(true);
+ termVectorResponse.setDocVersion(docIdAndVersion.version);
+ } else {
+ termVectorResponse.setExists(false);
+ }
+ } catch (Throwable ex) {
+ throw new ElasticsearchException("failed to execute term vector request", ex);
+ } finally {
+ searcher.release();
+ }
+ return termVectorResponse;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/Translog.java b/src/main/java/org/elasticsearch/index/translog/Translog.java
new file mode 100644
index 0000000..6b9d0dc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/Translog.java
@@ -0,0 +1,656 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.apache.lucene.index.Term;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.CloseableIndexComponent;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.IndexShardComponent;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ *
+ */
+public interface Translog extends IndexShardComponent, CloseableIndexComponent {
+
+ static ByteSizeValue INACTIVE_SHARD_TRANSLOG_BUFFER = ByteSizeValue.parseBytesSizeValue("1kb");
+
+ public static final String TRANSLOG_ID_KEY = "translog_id";
+
+ void updateBuffer(ByteSizeValue bufferSize);
+
+ void closeWithDelete();
+
+ /**
+ * Returns the id of the current transaction log.
+ */
+ long currentId();
+
+ /**
+ * Returns the number of operations in the transaction log.
+ */
+ int estimatedNumberOfOperations();
+
+ /**
+ * The estimated memory size this translog is taking.
+ */
+ long memorySizeInBytes();
+
+ /**
+ * Returns the size in bytes of the translog.
+ */
+ long translogSizeInBytes();
+
+ /**
+ * Creates a new transaction log internally.
+ * <p/>
+ * <p>Can only be called by one thread.
+ */
+ void newTranslog(long id) throws TranslogException;
+
+ /**
+ * Creates a new transient translog, where added ops will be added to the current one, and to
+ * it.
+ * <p/>
+ * <p>Can only be called by one thread.
+ */
+ void newTransientTranslog(long id) throws TranslogException;
+
+ /**
+ * Swaps the transient translog to be the current one.
+ * <p/>
+ * <p>Can only be called by one thread.
+ */
+ void makeTransientCurrent();
+
+ /**
+ * Reverts back to not have a transient translog.
+ */
+ void revertTransient();
+
+ /**
+ * Adds a create operation to the transaction log.
+ */
+ Location add(Operation operation) throws TranslogException;
+
+ byte[] read(Location location);
+
+ /**
+ * Snapshots the current transaction log allowing to safely iterate over the snapshot.
+ */
+ Snapshot snapshot() throws TranslogException;
+
+ /**
+ * Snapshots the delta between the current state of the translog, and the state defined
+ * by the provided snapshot. If a new translog has been created after the provided snapshot
+ * has been take, will return a snapshot on the current trasnlog.
+ */
+ Snapshot snapshot(Snapshot snapshot);
+
+ /**
+ * Clears unreferenced transaclogs.
+ */
+ void clearUnreferenced();
+
+ /**
+ * Sync's the translog.
+ */
+ void sync();
+
+ boolean syncNeeded();
+
+ void syncOnEachOperation(boolean syncOnEachOperation);
+
+ /**
+ * return stats
+ */
+ TranslogStats stats();
+
+ static class Location {
+ public final long translogId;
+ public final long translogLocation;
+ public final int size;
+
+ public Location(long translogId, long translogLocation, int size) {
+ this.translogId = translogId;
+ this.translogLocation = translogLocation;
+ this.size = size;
+ }
+ }
+
+ /**
+ * A snapshot of the transaction log, allows to iterate over all the transaction log operations.
+ */
+ static interface Snapshot extends Releasable {
+
+ /**
+ * The id of the translog the snapshot was taken with.
+ */
+ long translogId();
+
+ long position();
+
+ /**
+ * Returns the internal length (*not* number of operations) of this snapshot.
+ */
+ long length();
+
+ /**
+ * The total number of operations in the translog.
+ */
+ int estimatedTotalOperations();
+
+ boolean hasNext();
+
+ Operation next();
+
+ void seekForward(long length);
+
+ /**
+ * Returns a stream of this snapshot.
+ */
+ InputStream stream() throws IOException;
+
+ /**
+ * The length in bytes of this stream.
+ */
+ long lengthInBytes();
+ }
+
+ /**
+ * A generic interface representing an operation performed on the transaction log.
+ * Each is associated with a type.
+ */
+ static interface Operation extends Streamable {
+ static enum Type {
+ CREATE((byte) 1),
+ SAVE((byte) 2),
+ DELETE((byte) 3),
+ DELETE_BY_QUERY((byte) 4);
+
+ private final byte id;
+
+ private Type(byte id) {
+ this.id = id;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ public static Type fromId(byte id) {
+ switch (id) {
+ case 1:
+ return CREATE;
+ case 2:
+ return SAVE;
+ case 3:
+ return DELETE;
+ case 4:
+ return DELETE_BY_QUERY;
+ default:
+ throw new IllegalArgumentException("No type mapped for [" + id + "]");
+ }
+ }
+ }
+
+ Type opType();
+
+ long estimateSize();
+
+ Source readSource(StreamInput in) throws IOException;
+ }
+
+ static class Source {
+ public final BytesReference source;
+ public final String routing;
+ public final String parent;
+ public final long timestamp;
+ public final long ttl;
+
+ public Source(BytesReference source, String routing, String parent, long timestamp, long ttl) {
+ this.source = source;
+ this.routing = routing;
+ this.parent = parent;
+ this.timestamp = timestamp;
+ this.ttl = ttl;
+ }
+ }
+
+ static class Create implements Operation {
+ private String id;
+ private String type;
+ private BytesReference source;
+ private String routing;
+ private String parent;
+ private long timestamp;
+ private long ttl;
+ private long version;
+
+ public Create() {
+ }
+
+ public Create(Engine.Create create) {
+ this.id = create.id();
+ this.type = create.type();
+ this.source = create.source();
+ this.routing = create.routing();
+ this.parent = create.parent();
+ this.timestamp = create.timestamp();
+ this.ttl = create.ttl();
+ this.version = create.version();
+ }
+
+ public Create(String type, String id, byte[] source) {
+ this.id = id;
+ this.type = type;
+ this.source = new BytesArray(source);
+ }
+
+ @Override
+ public Type opType() {
+ return Type.CREATE;
+ }
+
+ @Override
+ public long estimateSize() {
+ return ((id.length() + type.length()) * 2) + source.length() + 12;
+ }
+
+ public String id() {
+ return this.id;
+ }
+
+ public BytesReference source() {
+ return this.source;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String routing() {
+ return this.routing;
+ }
+
+ public String parent() {
+ return this.parent;
+ }
+
+ public long timestamp() {
+ return this.timestamp;
+ }
+
+ public long ttl() {
+ return this.ttl;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ @Override
+ public Source readSource(StreamInput in) throws IOException {
+ readFrom(in);
+ return new Source(source, routing, parent, timestamp, ttl);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int version = in.readVInt(); // version
+ id = in.readString();
+ type = in.readString();
+ source = in.readBytesReference();
+ if (version >= 1) {
+ if (in.readBoolean()) {
+ routing = in.readString();
+ }
+ }
+ if (version >= 2) {
+ if (in.readBoolean()) {
+ parent = in.readString();
+ }
+ }
+ if (version >= 3) {
+ this.version = in.readLong();
+ }
+ if (version >= 4) {
+ this.timestamp = in.readLong();
+ }
+ if (version >= 5) {
+ this.ttl = in.readLong();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(5); // version
+ out.writeString(id);
+ out.writeString(type);
+ out.writeBytesReference(source);
+ if (routing == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(routing);
+ }
+ if (parent == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(parent);
+ }
+ out.writeLong(version);
+ out.writeLong(timestamp);
+ out.writeLong(ttl);
+ }
+ }
+
+ static class Index implements Operation {
+ private String id;
+ private String type;
+ private long version;
+ private BytesReference source;
+ private String routing;
+ private String parent;
+ private long timestamp;
+ private long ttl;
+
+ public Index() {
+ }
+
+ public Index(Engine.Index index) {
+ this.id = index.id();
+ this.type = index.type();
+ this.source = index.source();
+ this.routing = index.routing();
+ this.parent = index.parent();
+ this.version = index.version();
+ this.timestamp = index.timestamp();
+ this.ttl = index.ttl();
+ }
+
+ public Index(String type, String id, byte[] source) {
+ this.type = type;
+ this.id = id;
+ this.source = new BytesArray(source);
+ }
+
+ @Override
+ public Type opType() {
+ return Type.SAVE;
+ }
+
+ @Override
+ public long estimateSize() {
+ return ((id.length() + type.length()) * 2) + source.length() + 12;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String id() {
+ return this.id;
+ }
+
+ public String routing() {
+ return this.routing;
+ }
+
+ public String parent() {
+ return this.parent;
+ }
+
+ public long timestamp() {
+ return this.timestamp;
+ }
+
+ public long ttl() {
+ return this.ttl;
+ }
+
+ public BytesReference source() {
+ return this.source;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ @Override
+ public Source readSource(StreamInput in) throws IOException {
+ readFrom(in);
+ return new Source(source, routing, parent, timestamp, ttl);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int version = in.readVInt(); // version
+ id = in.readString();
+ type = in.readString();
+ source = in.readBytesReference();
+ if (version >= 1) {
+ if (in.readBoolean()) {
+ routing = in.readString();
+ }
+ }
+ if (version >= 2) {
+ if (in.readBoolean()) {
+ parent = in.readString();
+ }
+ }
+ if (version >= 3) {
+ this.version = in.readLong();
+ }
+ if (version >= 4) {
+ this.timestamp = in.readLong();
+ }
+ if (version >= 5) {
+ this.ttl = in.readLong();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(5); // version
+ out.writeString(id);
+ out.writeString(type);
+ out.writeBytesReference(source);
+ if (routing == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(routing);
+ }
+ if (parent == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(parent);
+ }
+ out.writeLong(version);
+ out.writeLong(timestamp);
+ out.writeLong(ttl);
+ }
+ }
+
+ static class Delete implements Operation {
+ private Term uid;
+ private long version;
+
+ public Delete() {
+ }
+
+ public Delete(Engine.Delete delete) {
+ this(delete.uid());
+ this.version = delete.version();
+ }
+
+ public Delete(Term uid) {
+ this.uid = uid;
+ }
+
+ @Override
+ public Type opType() {
+ return Type.DELETE;
+ }
+
+ @Override
+ public long estimateSize() {
+ return ((uid.field().length() + uid.text().length()) * 2) + 20;
+ }
+
+ public Term uid() {
+ return this.uid;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ @Override
+ public Source readSource(StreamInput in) throws IOException {
+ throw new ElasticsearchIllegalStateException("trying to read doc source from delete operation");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int version = in.readVInt(); // version
+ uid = new Term(in.readString(), in.readString());
+ if (version >= 1) {
+ this.version = in.readLong();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(1); // version
+ out.writeString(uid.field());
+ out.writeString(uid.text());
+ out.writeLong(version);
+ }
+ }
+
+ static class DeleteByQuery implements Operation {
+ private BytesReference source;
+ @Nullable
+ private String[] filteringAliases;
+ private String[] types = Strings.EMPTY_ARRAY;
+
+ public DeleteByQuery() {
+ }
+
+ public DeleteByQuery(Engine.DeleteByQuery deleteByQuery) {
+ this(deleteByQuery.source(), deleteByQuery.filteringAliases(), deleteByQuery.types());
+ }
+
+ public DeleteByQuery(BytesReference source, String[] filteringAliases, String... types) {
+ this.source = source;
+ this.types = types == null ? Strings.EMPTY_ARRAY : types;
+ this.filteringAliases = filteringAliases;
+ }
+
+ @Override
+ public Type opType() {
+ return Type.DELETE_BY_QUERY;
+ }
+
+ @Override
+ public long estimateSize() {
+ return source.length() + 8;
+ }
+
+ public BytesReference source() {
+ return this.source;
+ }
+
+ public String[] filteringAliases() {
+ return filteringAliases;
+ }
+
+ public String[] types() {
+ return this.types;
+ }
+
+ @Override
+ public Source readSource(StreamInput in) throws IOException {
+ throw new ElasticsearchIllegalStateException("trying to read doc source from delete_by_query operation");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int version = in.readVInt(); // version
+ source = in.readBytesReference();
+ if (version < 2) {
+ // for query_parser_name, which was removed
+ if (in.readBoolean()) {
+ in.readString();
+ }
+ }
+ int typesSize = in.readVInt();
+ if (typesSize > 0) {
+ types = new String[typesSize];
+ for (int i = 0; i < typesSize; i++) {
+ types[i] = in.readString();
+ }
+ }
+ if (version >= 1) {
+ int aliasesSize = in.readVInt();
+ if (aliasesSize > 0) {
+ filteringAliases = new String[aliasesSize];
+ for (int i = 0; i < aliasesSize; i++) {
+ filteringAliases[i] = in.readString();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(2); // version
+ out.writeBytesReference(source);
+ out.writeVInt(types.length);
+ for (String type : types) {
+ out.writeString(type);
+ }
+ if (filteringAliases != null) {
+ out.writeVInt(filteringAliases.length);
+ for (String alias : filteringAliases) {
+ out.writeString(alias);
+ }
+ } else {
+ out.writeVInt(0);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogException.java b/src/main/java/org/elasticsearch/index/translog/TranslogException.java
new file mode 100644
index 0000000..8608023
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/TranslogException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class TranslogException extends IndexShardException {
+
+ public TranslogException(ShardId shardId, String msg) {
+ super(shardId, msg);
+ }
+
+ public TranslogException(ShardId shardId, String msg, Throwable cause) {
+ super(shardId, msg, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogModule.java b/src/main/java/org/elasticsearch/index/translog/TranslogModule.java
new file mode 100644
index 0000000..aeaff13
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/TranslogModule.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.translog.fs.FsTranslog;
+
+/**
+ *
+ */
+public class TranslogModule extends AbstractModule {
+
+ public static class TranslogSettings {
+ public static final String TYPE = "index.translog.type";
+ }
+
+ private final Settings settings;
+
+ public TranslogModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(Translog.class)
+ .to(settings.getAsClass(TranslogSettings.TYPE, FsTranslog.class))
+ .in(Scopes.SINGLETON);
+ bind(TranslogService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogService.java b/src/main/java/org/elasticsearch/index/translog/TranslogService.java
new file mode 100644
index 0000000..14a5e49
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/TranslogService.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.IllegalIndexShardStateException;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.ScheduledFuture;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
+
+/**
+ *
+ */
+public class TranslogService extends AbstractIndexShardComponent {
+
+ private final ThreadPool threadPool;
+ private final IndexSettingsService indexSettingsService;
+ private final IndexShard indexShard;
+ private final Translog translog;
+
+ private volatile TimeValue interval;
+ private volatile int flushThresholdOperations;
+ private volatile ByteSizeValue flushThresholdSize;
+ private volatile TimeValue flushThresholdPeriod;
+ private volatile boolean disableFlush;
+ private volatile ScheduledFuture future;
+
+ private final ApplySettings applySettings = new ApplySettings();
+
+ @Inject
+ public TranslogService(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, ThreadPool threadPool, IndexShard indexShard, Translog translog) {
+ super(shardId, indexSettings);
+ this.threadPool = threadPool;
+ this.indexSettingsService = indexSettingsService;
+ this.indexShard = indexShard;
+ this.translog = translog;
+
+ this.flushThresholdOperations = componentSettings.getAsInt("flush_threshold_ops", componentSettings.getAsInt("flush_threshold", 5000));
+ this.flushThresholdSize = componentSettings.getAsBytesSize("flush_threshold_size", new ByteSizeValue(200, ByteSizeUnit.MB));
+ this.flushThresholdPeriod = componentSettings.getAsTime("flush_threshold_period", TimeValue.timeValueMinutes(30));
+ this.interval = componentSettings.getAsTime("interval", timeValueMillis(5000));
+ this.disableFlush = componentSettings.getAsBoolean("disable_flush", false);
+
+ logger.debug("interval [{}], flush_threshold_ops [{}], flush_threshold_size [{}], flush_threshold_period [{}]", interval, flushThresholdOperations, flushThresholdSize, flushThresholdPeriod);
+
+ this.future = threadPool.schedule(interval, ThreadPool.Names.SAME, new TranslogBasedFlush());
+
+ indexSettingsService.addListener(applySettings);
+ }
+
+
+ public void close() {
+ indexSettingsService.removeListener(applySettings);
+ this.future.cancel(true);
+ }
+
+ public static final String INDEX_TRANSLOG_FLUSH_INTERVAL = "index.translog.interval";
+ public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS = "index.translog.flush_threshold_ops";
+ public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE = "index.translog.flush_threshold_size";
+ public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD = "index.translog.flush_threshold_period";
+ public static final String INDEX_TRANSLOG_DISABLE_FLUSH = "index.translog.disable_flush";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ int flushThresholdOperations = settings.getAsInt(INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, TranslogService.this.flushThresholdOperations);
+ if (flushThresholdOperations != TranslogService.this.flushThresholdOperations) {
+ logger.info("updating flush_threshold_ops from [{}] to [{}]", TranslogService.this.flushThresholdOperations, flushThresholdOperations);
+ TranslogService.this.flushThresholdOperations = flushThresholdOperations;
+ }
+ ByteSizeValue flushThresholdSize = settings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, TranslogService.this.flushThresholdSize);
+ if (!flushThresholdSize.equals(TranslogService.this.flushThresholdSize)) {
+ logger.info("updating flush_threshold_size from [{}] to [{}]", TranslogService.this.flushThresholdSize, flushThresholdSize);
+ TranslogService.this.flushThresholdSize = flushThresholdSize;
+ }
+ TimeValue flushThresholdPeriod = settings.getAsTime(INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, TranslogService.this.flushThresholdPeriod);
+ if (!flushThresholdPeriod.equals(TranslogService.this.flushThresholdPeriod)) {
+ logger.info("updating flush_threshold_period from [{}] to [{}]", TranslogService.this.flushThresholdPeriod, flushThresholdPeriod);
+ TranslogService.this.flushThresholdPeriod = flushThresholdPeriod;
+ }
+ TimeValue interval = settings.getAsTime(INDEX_TRANSLOG_FLUSH_INTERVAL, TranslogService.this.interval);
+ if (!interval.equals(TranslogService.this.interval)) {
+ logger.info("updating interval from [{}] to [{}]", TranslogService.this.interval, interval);
+ TranslogService.this.interval = interval;
+ }
+ boolean disableFlush = settings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, TranslogService.this.disableFlush);
+ if (disableFlush != TranslogService.this.disableFlush) {
+ logger.info("updating disable_flush from [{}] to [{}]", TranslogService.this.disableFlush, disableFlush);
+ TranslogService.this.disableFlush = disableFlush;
+ }
+ }
+ }
+
+ private TimeValue computeNextInterval() {
+ return new TimeValue(interval.millis() + (ThreadLocalRandom.current().nextLong(interval.millis())));
+ }
+
+ private class TranslogBasedFlush implements Runnable {
+
+ private volatile long lastFlushTime = System.currentTimeMillis();
+
+ @Override
+ public void run() {
+ if (indexShard.state() == IndexShardState.CLOSED) {
+ return;
+ }
+
+ // flush is disabled, but still reschedule
+ if (disableFlush) {
+ reschedule();
+ return;
+ }
+
+ if (indexShard.state() == IndexShardState.CREATED) {
+ reschedule();
+ return;
+ }
+
+ int currentNumberOfOperations = translog.estimatedNumberOfOperations();
+ if (currentNumberOfOperations == 0) {
+ reschedule();
+ return;
+ }
+
+ if (flushThresholdOperations > 0) {
+ if (currentNumberOfOperations > flushThresholdOperations) {
+ logger.trace("flushing translog, operations [{}], breached [{}]", currentNumberOfOperations, flushThresholdOperations);
+ asyncFlushAndReschedule();
+ return;
+ }
+ }
+
+ if (flushThresholdSize.bytes() > 0) {
+ long sizeInBytes = translog.translogSizeInBytes();
+ if (sizeInBytes > flushThresholdSize.bytes()) {
+ logger.trace("flushing translog, size [{}], breached [{}]", new ByteSizeValue(sizeInBytes), flushThresholdSize);
+ asyncFlushAndReschedule();
+ return;
+ }
+ }
+
+ if (flushThresholdPeriod.millis() > 0) {
+ if ((threadPool.estimatedTimeInMillis() - lastFlushTime) > flushThresholdPeriod.millis()) {
+ logger.trace("flushing translog, last_flush_time [{}], breached [{}]", lastFlushTime, flushThresholdPeriod);
+ asyncFlushAndReschedule();
+ return;
+ }
+ }
+
+ reschedule();
+ }
+
+ private void reschedule() {
+ future = threadPool.schedule(computeNextInterval(), ThreadPool.Names.SAME, this);
+ }
+
+ private void asyncFlushAndReschedule() {
+ threadPool.executor(ThreadPool.Names.FLUSH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ indexShard.flush(new Engine.Flush());
+ } catch (IllegalIndexShardStateException e) {
+ // we are being closed, or in created state, ignore
+ } catch (FlushNotAllowedEngineException e) {
+ // ignore this exception, we are not allowed to perform flush
+ } catch (Throwable e) {
+ logger.warn("failed to flush shard on translog threshold", e);
+ }
+ lastFlushTime = threadPool.estimatedTimeInMillis();
+
+ if (indexShard.state() != IndexShardState.CLOSED) {
+ future = threadPool.schedule(computeNextInterval(), ThreadPool.Names.SAME, TranslogBasedFlush.this);
+ }
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/src/main/java/org/elasticsearch/index/translog/TranslogStats.java
new file mode 100644
index 0000000..ea917ea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/TranslogStats.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.translog;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class TranslogStats implements ToXContent, Streamable {
+
+ private long translogSizeInBytes = 0;
+ private int estimatedNumberOfOperations = 0;
+
+ public TranslogStats() {}
+
+ public TranslogStats(int estimatedNumberOfOperations, long translogSizeInBytes) {
+ this.estimatedNumberOfOperations = estimatedNumberOfOperations;
+ this.translogSizeInBytes = translogSizeInBytes;
+ }
+
+ public void add(TranslogStats translogStats) {
+ if (translogStats == null) {
+ return;
+ }
+
+ this.estimatedNumberOfOperations += translogStats.estimatedNumberOfOperations;
+ this.translogSizeInBytes =+ translogStats.translogSizeInBytes;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.TRANSLOG);
+ builder.field(Fields.OPERATIONS, estimatedNumberOfOperations);
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, translogSizeInBytes);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString TRANSLOG = new XContentBuilderString("translog");
+ static final XContentBuilderString OPERATIONS = new XContentBuilderString("operations");
+ static final XContentBuilderString SIZE = new XContentBuilderString("size");
+ static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ estimatedNumberOfOperations = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(estimatedNumberOfOperations);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java b/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java
new file mode 100644
index 0000000..b25418a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class TranslogStreams {
+
+ public static Translog.Operation readTranslogOperation(StreamInput in) throws IOException {
+ Translog.Operation.Type type = Translog.Operation.Type.fromId(in.readByte());
+ Translog.Operation operation;
+ switch (type) {
+ case CREATE:
+ operation = new Translog.Create();
+ break;
+ case DELETE:
+ operation = new Translog.Delete();
+ break;
+ case DELETE_BY_QUERY:
+ operation = new Translog.DeleteByQuery();
+ break;
+ case SAVE:
+ operation = new Translog.Index();
+ break;
+ default:
+ throw new IOException("No type for [" + type + "]");
+ }
+ operation.readFrom(in);
+ return operation;
+ }
+
+ public static Translog.Source readSource(byte[] data) throws IOException {
+ BytesStreamInput in = new BytesStreamInput(data, false);
+ in.readInt(); // the size header
+ Translog.Operation.Type type = Translog.Operation.Type.fromId(in.readByte());
+ Translog.Operation operation;
+ switch (type) {
+ case CREATE:
+ operation = new Translog.Create();
+ break;
+ case DELETE:
+ operation = new Translog.Delete();
+ break;
+ case DELETE_BY_QUERY:
+ operation = new Translog.DeleteByQuery();
+ break;
+ case SAVE:
+ operation = new Translog.Index();
+ break;
+ default:
+ throw new IOException("No type for [" + type + "]");
+ }
+ return operation.readSource(in);
+ }
+
+ public static void writeTranslogOperation(StreamOutput out, Translog.Operation op) throws IOException {
+ out.writeByte(op.opType().id());
+ op.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java
new file mode 100644
index 0000000..3ada5e4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogException;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ */
+public class BufferingFsTranslogFile implements FsTranslogFile {
+
+ private final long id;
+ private final ShardId shardId;
+ private final RafReference raf;
+
+ private final ReadWriteLock rwl = new ReentrantReadWriteLock();
+
+ private volatile int operationCounter;
+
+ private long lastPosition;
+ private volatile long lastWrittenPosition;
+
+ private volatile long lastSyncPosition = 0;
+
+ private byte[] buffer;
+ private int bufferCount;
+
+ public BufferingFsTranslogFile(ShardId shardId, long id, RafReference raf, int bufferSize) throws IOException {
+ this.shardId = shardId;
+ this.id = id;
+ this.raf = raf;
+ this.buffer = new byte[bufferSize];
+ raf.raf().setLength(0);
+ }
+
+ public long id() {
+ return this.id;
+ }
+
+ public int estimatedNumberOfOperations() {
+ return operationCounter;
+ }
+
+ public long translogSizeInBytes() {
+ return lastWrittenPosition;
+ }
+
+ @Override
+ public Translog.Location add(byte[] data, int from, int size) throws IOException {
+ rwl.writeLock().lock();
+ try {
+ operationCounter++;
+ long position = lastPosition;
+ if (size >= buffer.length) {
+ flushBuffer();
+ // we use the channel to write, since on windows, writing to the RAF might not be reflected
+ // when reading through the channel
+ raf.channel().write(ByteBuffer.wrap(data, from, size));
+ lastWrittenPosition += size;
+ lastPosition += size;
+ return new Translog.Location(id, position, size);
+ }
+ if (size > buffer.length - bufferCount) {
+ flushBuffer();
+ }
+ System.arraycopy(data, from, buffer, bufferCount, size);
+ bufferCount += size;
+ lastPosition += size;
+ return new Translog.Location(id, position, size);
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+
+ private void flushBuffer() throws IOException {
+ if (bufferCount > 0) {
+ // we use the channel to write, since on windows, writing to the RAF might not be reflected
+ // when reading through the channel
+ raf.channel().write(ByteBuffer.wrap(buffer, 0, bufferCount));
+ lastWrittenPosition += bufferCount;
+ bufferCount = 0;
+ }
+ }
+
+ @Override
+ public byte[] read(Translog.Location location) throws IOException {
+ rwl.readLock().lock();
+ try {
+ if (location.translogLocation >= lastWrittenPosition) {
+ byte[] data = new byte[location.size];
+ System.arraycopy(buffer, (int) (location.translogLocation - lastWrittenPosition), data, 0, location.size);
+ return data;
+ }
+ } finally {
+ rwl.readLock().unlock();
+ }
+ ByteBuffer buffer = ByteBuffer.allocate(location.size);
+ raf.channel().read(buffer, location.translogLocation);
+ return buffer.array();
+ }
+
+ @Override
+ public FsChannelSnapshot snapshot() throws TranslogException {
+ rwl.writeLock().lock();
+ try {
+ flushBuffer();
+ if (!raf.increaseRefCount()) {
+ return null;
+ }
+ return new FsChannelSnapshot(this.id, raf, lastWrittenPosition, operationCounter);
+ } catch (IOException e) {
+ throw new TranslogException(shardId, "failed to flush", e);
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+
+ @Override
+ public boolean syncNeeded() {
+ return lastPosition != lastSyncPosition;
+ }
+
+ @Override
+ public void sync() {
+ try {
+ // check if we really need to sync here...
+ long last = lastPosition;
+ if (last == lastSyncPosition) {
+ return;
+ }
+ lastSyncPosition = last;
+ rwl.writeLock().lock();
+ try {
+ flushBuffer();
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ raf.channel().force(false);
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ @Override
+ public void close(boolean delete) {
+ if (!delete) {
+ rwl.writeLock().lock();
+ try {
+ flushBuffer();
+ sync();
+ } catch (IOException e) {
+ throw new TranslogException(shardId, "failed to close", e);
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+ raf.decreaseRefCount(delete);
+ }
+
+ @Override
+ public void reuse(FsTranslogFile other) {
+ if (!(other instanceof BufferingFsTranslogFile)) {
+ return;
+ }
+ rwl.writeLock().lock();
+ try {
+ flushBuffer();
+ this.buffer = ((BufferingFsTranslogFile) other).buffer;
+ } catch (IOException e) {
+ throw new TranslogException(shardId, "failed to flush", e);
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+
+ @Override
+ public void updateBufferSize(int bufferSize) {
+ rwl.writeLock().lock();
+ try {
+ if (this.buffer.length == bufferSize) {
+ return;
+ }
+ flushBuffer();
+ this.buffer = new byte[bufferSize];
+ } catch (IOException e) {
+ throw new TranslogException(shardId, "failed to flush", e);
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java
new file mode 100644
index 0000000..334bab1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.io.FileChannelInputStream;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogStreams;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+/**
+ *
+ */
+public class FsChannelSnapshot implements Translog.Snapshot {
+
+ private final long id;
+
+ private final int totalOperations;
+
+ private final RafReference raf;
+
+ private final FileChannel channel;
+
+ private final long length;
+
+ private Translog.Operation lastOperationRead = null;
+
+ private int position = 0;
+
+ private ByteBuffer cacheBuffer;
+
+ public FsChannelSnapshot(long id, RafReference raf, long length, int totalOperations) throws FileNotFoundException {
+ this.id = id;
+ this.raf = raf;
+ this.channel = raf.raf().getChannel();
+ this.length = length;
+ this.totalOperations = totalOperations;
+ }
+
+ @Override
+ public long translogId() {
+ return this.id;
+ }
+
+ @Override
+ public long position() {
+ return this.position;
+ }
+
+ @Override
+ public long length() {
+ return this.length;
+ }
+
+ @Override
+ public int estimatedTotalOperations() {
+ return this.totalOperations;
+ }
+
+ @Override
+ public InputStream stream() throws IOException {
+ return new FileChannelInputStream(channel, position, lengthInBytes());
+ }
+
+ @Override
+ public long lengthInBytes() {
+ return length - position;
+ }
+
+ @Override
+ public boolean hasNext() {
+ try {
+ if (position > length) {
+ return false;
+ }
+ if (cacheBuffer == null) {
+ cacheBuffer = ByteBuffer.allocate(1024);
+ }
+ cacheBuffer.limit(4);
+ int bytesRead = channel.read(cacheBuffer, position);
+ if (bytesRead < 4) {
+ return false;
+ }
+ cacheBuffer.flip();
+ int opSize = cacheBuffer.getInt();
+ position += 4;
+ if ((position + opSize) > length) {
+ // restore the position to before we read the opSize
+ position -= 4;
+ return false;
+ }
+ if (cacheBuffer.capacity() < opSize) {
+ cacheBuffer = ByteBuffer.allocate(opSize);
+ }
+ cacheBuffer.clear();
+ cacheBuffer.limit(opSize);
+ channel.read(cacheBuffer, position);
+ cacheBuffer.flip();
+ position += opSize;
+ lastOperationRead = TranslogStreams.readTranslogOperation(new BytesStreamInput(cacheBuffer.array(), 0, opSize, true));
+ return true;
+ } catch (Exception e) {
+ return false;
+ }
+ }
+
+ @Override
+ public Translog.Operation next() {
+ return this.lastOperationRead;
+ }
+
+ @Override
+ public void seekForward(long length) {
+ this.position += length;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ raf.decreaseRefCount(true);
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java
new file mode 100644
index 0000000..3c92ca0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java
@@ -0,0 +1,416 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.index.translog.TranslogStats;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogException;
+import org.elasticsearch.index.translog.TranslogStreams;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.channels.ClosedChannelException;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ *
+ */
+public class FsTranslog extends AbstractIndexShardComponent implements Translog {
+
+ public static final String INDEX_TRANSLOG_FS_TYPE = "index.translog.fs.type";
+
+ class ApplySettings implements IndexSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ FsTranslogFile.Type type = FsTranslogFile.Type.fromString(settings.get(INDEX_TRANSLOG_FS_TYPE, FsTranslog.this.type.name()));
+ if (type != FsTranslog.this.type) {
+ logger.info("updating type from [{}] to [{}]", FsTranslog.this.type, type);
+ FsTranslog.this.type = type;
+ }
+ }
+ }
+
+ private final IndexSettingsService indexSettingsService;
+
+ private final ReadWriteLock rwl = new ReentrantReadWriteLock();
+ private final File[] locations;
+
+ private volatile FsTranslogFile current;
+ private volatile FsTranslogFile trans;
+
+ private FsTranslogFile.Type type;
+
+ private boolean syncOnEachOperation = false;
+
+ private volatile int bufferSize;
+ private volatile int transientBufferSize;
+
+ private final ApplySettings applySettings = new ApplySettings();
+
+ @Inject
+ public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, NodeEnvironment nodeEnv) {
+ super(shardId, indexSettings);
+ this.indexSettingsService = indexSettingsService;
+ File[] shardLocations = nodeEnv.shardLocations(shardId);
+ this.locations = new File[shardLocations.length];
+ for (int i = 0; i < shardLocations.length; i++) {
+ locations[i] = new File(shardLocations[i], "translog");
+ FileSystemUtils.mkdirs(locations[i]);
+ }
+
+ this.type = FsTranslogFile.Type.fromString(componentSettings.get("type", FsTranslogFile.Type.BUFFERED.name()));
+ this.bufferSize = (int) componentSettings.getAsBytesSize("buffer_size", ByteSizeValue.parseBytesSizeValue("64k")).bytes(); // Not really interesting, updated by IndexingMemoryController...
+ this.transientBufferSize = (int) componentSettings.getAsBytesSize("transient_buffer_size", ByteSizeValue.parseBytesSizeValue("8k")).bytes();
+
+ indexSettingsService.addListener(applySettings);
+ }
+
+ public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, File location) {
+ super(shardId, indexSettings);
+ this.indexSettingsService = null;
+ this.locations = new File[]{location};
+ FileSystemUtils.mkdirs(location);
+
+ this.type = FsTranslogFile.Type.fromString(componentSettings.get("type", FsTranslogFile.Type.BUFFERED.name()));
+ }
+
+ @Override
+ public void closeWithDelete() {
+ close(true);
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ close(false);
+ }
+
+ @Override
+ public void updateBuffer(ByteSizeValue bufferSize) {
+ this.bufferSize = bufferSize.bytesAsInt();
+ rwl.writeLock().lock();
+ try {
+ FsTranslogFile current1 = this.current;
+ if (current1 != null) {
+ current1.updateBufferSize(this.bufferSize);
+ }
+ current1 = this.trans;
+ if (current1 != null) {
+ current1.updateBufferSize(this.bufferSize);
+ }
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+
+ private void close(boolean delete) {
+ if (indexSettingsService != null) {
+ indexSettingsService.removeListener(applySettings);
+ }
+ rwl.writeLock().lock();
+ try {
+ FsTranslogFile current1 = this.current;
+ if (current1 != null) {
+ current1.close(delete);
+ }
+ current1 = this.trans;
+ if (current1 != null) {
+ current1.close(delete);
+ }
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+
+ public File[] locations() {
+ return locations;
+ }
+
+ @Override
+ public long currentId() {
+ FsTranslogFile current1 = this.current;
+ if (current1 == null) {
+ return -1;
+ }
+ return current1.id();
+ }
+
+ @Override
+ public int estimatedNumberOfOperations() {
+ FsTranslogFile current1 = this.current;
+ if (current1 == null) {
+ return 0;
+ }
+ return current1.estimatedNumberOfOperations();
+ }
+
+ @Override
+ public long memorySizeInBytes() {
+ return 0;
+ }
+
+ @Override
+ public long translogSizeInBytes() {
+ FsTranslogFile current1 = this.current;
+ if (current1 == null) {
+ return 0;
+ }
+ return current1.translogSizeInBytes();
+ }
+
+ @Override
+ public void clearUnreferenced() {
+ rwl.writeLock().lock();
+ try {
+ for (File location : locations) {
+ File[] files = location.listFiles();
+ if (files != null) {
+ for (File file : files) {
+ if (file.getName().equals("translog-" + current.id())) {
+ continue;
+ }
+ if (trans != null && file.getName().equals("translog-" + trans.id())) {
+ continue;
+ }
+ try {
+ file.delete();
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ }
+ }
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+
+ @Override
+ public void newTranslog(long id) throws TranslogException {
+ rwl.writeLock().lock();
+ try {
+ FsTranslogFile newFile;
+ long size = Long.MAX_VALUE;
+ File location = null;
+ for (File file : locations) {
+ long currentFree = file.getFreeSpace();
+ if (currentFree < size) {
+ size = currentFree;
+ location = file;
+ } else if (currentFree == size && ThreadLocalRandom.current().nextBoolean()) {
+ location = file;
+ }
+ }
+ try {
+ newFile = type.create(shardId, id, new RafReference(new File(location, "translog-" + id)), bufferSize);
+ } catch (IOException e) {
+ throw new TranslogException(shardId, "failed to create new translog file", e);
+ }
+ FsTranslogFile old = current;
+ current = newFile;
+ if (old != null) {
+ // we might create a new translog overriding the current translog id
+ boolean delete = true;
+ if (old.id() == id) {
+ delete = false;
+ }
+ old.close(delete);
+ }
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+
+ @Override
+ public void newTransientTranslog(long id) throws TranslogException {
+ rwl.writeLock().lock();
+ try {
+ assert this.trans == null;
+ long size = Long.MAX_VALUE;
+ File location = null;
+ for (File file : locations) {
+ long currentFree = file.getFreeSpace();
+ if (currentFree < size) {
+ size = currentFree;
+ location = file;
+ } else if (currentFree == size && ThreadLocalRandom.current().nextBoolean()) {
+ location = file;
+ }
+ }
+ this.trans = type.create(shardId, id, new RafReference(new File(location, "translog-" + id)), transientBufferSize);
+ } catch (IOException e) {
+ throw new TranslogException(shardId, "failed to create new translog file", e);
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ }
+
+ @Override
+ public void makeTransientCurrent() {
+ FsTranslogFile old;
+ rwl.writeLock().lock();
+ try {
+ assert this.trans != null;
+ old = current;
+ this.current = this.trans;
+ this.trans = null;
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ old.close(true);
+ current.reuse(old);
+ }
+
+ @Override
+ public void revertTransient() {
+ FsTranslogFile tmpTransient;
+ rwl.writeLock().lock();
+ try {
+ tmpTransient = trans;
+ this.trans = null;
+ } finally {
+ rwl.writeLock().unlock();
+ }
+ // previous transient might be null because it was failed on its creation
+ // for example
+ if (tmpTransient != null) {
+ tmpTransient.close(true);
+ }
+ }
+
+ public byte[] read(Location location) {
+ rwl.readLock().lock();
+ try {
+ FsTranslogFile trans = this.trans;
+ if (trans != null && trans.id() == location.translogId) {
+ try {
+ return trans.read(location);
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ if (current.id() == location.translogId) {
+ try {
+ return current.read(location);
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ return null;
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ @Override
+ public Location add(Operation operation) throws TranslogException {
+ rwl.readLock().lock();
+ try {
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.writeInt(0); // marker for the size...
+ TranslogStreams.writeTranslogOperation(out, operation);
+ out.flush();
+
+ int size = out.size();
+ out.seek(0);
+ out.writeInt(size - 4);
+
+ Location location = current.add(out.bytes().array(), out.bytes().arrayOffset(), size);
+ if (syncOnEachOperation) {
+ current.sync();
+ }
+ FsTranslogFile trans = this.trans;
+ if (trans != null) {
+ try {
+ location = trans.add(out.bytes().array(), out.bytes().arrayOffset(), size);
+ } catch (ClosedChannelException e) {
+ // ignore
+ }
+ }
+ return location;
+ } catch (Exception e) {
+ throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e);
+ } finally {
+ rwl.readLock().unlock();
+ }
+ }
+
+ @Override
+ public FsChannelSnapshot snapshot() throws TranslogException {
+ while (true) {
+ FsChannelSnapshot snapshot = current.snapshot();
+ if (snapshot != null) {
+ return snapshot;
+ }
+ Thread.yield();
+ }
+ }
+
+ @Override
+ public Snapshot snapshot(Snapshot snapshot) {
+ FsChannelSnapshot snap = snapshot();
+ if (snap.translogId() == snapshot.translogId()) {
+ snap.seekForward(snapshot.position());
+ }
+ return snap;
+ }
+
+ @Override
+ public void sync() {
+ FsTranslogFile current1 = this.current;
+ if (current1 == null) {
+ return;
+ }
+ current1.sync();
+ }
+
+ @Override
+ public boolean syncNeeded() {
+ FsTranslogFile current1 = this.current;
+ return current1 != null && current1.syncNeeded();
+ }
+
+ @Override
+ public void syncOnEachOperation(boolean syncOnEachOperation) {
+ this.syncOnEachOperation = syncOnEachOperation;
+ if (syncOnEachOperation) {
+ type = FsTranslogFile.Type.SIMPLE;
+ } else {
+ type = FsTranslogFile.Type.BUFFERED;
+ }
+ }
+
+ @Override
+ public TranslogStats stats() {
+ return new TranslogStats(estimatedNumberOfOperations(), translogSizeInBytes());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java
new file mode 100644
index 0000000..7f4e3f6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogException;
+
+import java.io.IOException;
+
+public interface FsTranslogFile {
+
+ public static enum Type {
+
+ SIMPLE() {
+ @Override
+ public FsTranslogFile create(ShardId shardId, long id, RafReference raf, int bufferSize) throws IOException {
+ return new SimpleFsTranslogFile(shardId, id, raf);
+ }
+ },
+ BUFFERED() {
+ @Override
+ public FsTranslogFile create(ShardId shardId, long id, RafReference raf, int bufferSize) throws IOException {
+ return new BufferingFsTranslogFile(shardId, id, raf, bufferSize);
+ }
+ };
+
+ public abstract FsTranslogFile create(ShardId shardId, long id, RafReference raf, int bufferSize) throws IOException;
+
+ public static Type fromString(String type) throws ElasticsearchIllegalArgumentException {
+ if (SIMPLE.name().equalsIgnoreCase(type)) {
+ return SIMPLE;
+ } else if (BUFFERED.name().equalsIgnoreCase(type)) {
+ return BUFFERED;
+ }
+ throw new ElasticsearchIllegalArgumentException("No translog fs type [" + type + "]");
+ }
+ }
+
+ long id();
+
+ int estimatedNumberOfOperations();
+
+ long translogSizeInBytes();
+
+ Translog.Location add(byte[] data, int from, int size) throws IOException;
+
+ byte[] read(Translog.Location location) throws IOException;
+
+ void close(boolean delete) throws TranslogException;
+
+ FsChannelSnapshot snapshot() throws TranslogException;
+
+ void reuse(FsTranslogFile other) throws TranslogException;
+
+ void updateBufferSize(int bufferSize) throws TranslogException;
+
+ void sync();
+
+ boolean syncNeeded();
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/fs/RafReference.java b/src/main/java/org/elasticsearch/index/translog/fs/RafReference.java
new file mode 100644
index 0000000..ee87523
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/fs/RafReference.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.channels.FileChannel;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ *
+ */
+public class RafReference {
+
+ private final File file;
+
+ private final RandomAccessFile raf;
+
+ private final FileChannel channel;
+
+ private final AtomicInteger refCount = new AtomicInteger();
+
+ public RafReference(File file) throws FileNotFoundException {
+ this.file = file;
+ this.raf = new RandomAccessFile(file, "rw");
+ this.channel = raf.getChannel();
+ this.refCount.incrementAndGet();
+ }
+
+ public File file() {
+ return this.file;
+ }
+
+ public FileChannel channel() {
+ return this.channel;
+ }
+
+ public RandomAccessFile raf() {
+ return this.raf;
+ }
+
+ /**
+ * Increases the ref count, and returns <tt>true</tt> if it managed to
+ * actually increment it.
+ */
+ public boolean increaseRefCount() {
+ return refCount.incrementAndGet() > 1;
+ }
+
+ public void decreaseRefCount(boolean delete) {
+ if (refCount.decrementAndGet() <= 0) {
+ try {
+ raf.close();
+ if (delete) {
+ file.delete();
+ }
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java
new file mode 100644
index 0000000..35e625b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogException;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class SimpleFsTranslogFile implements FsTranslogFile {
+
+ private final long id;
+ private final ShardId shardId;
+ private final RafReference raf;
+
+ private final AtomicInteger operationCounter = new AtomicInteger();
+
+ private final AtomicLong lastPosition = new AtomicLong(0);
+ private final AtomicLong lastWrittenPosition = new AtomicLong(0);
+
+ private volatile long lastSyncPosition = 0;
+
+ public SimpleFsTranslogFile(ShardId shardId, long id, RafReference raf) throws IOException {
+ this.shardId = shardId;
+ this.id = id;
+ this.raf = raf;
+ raf.raf().setLength(0);
+ }
+
+ public long id() {
+ return this.id;
+ }
+
+ public int estimatedNumberOfOperations() {
+ return operationCounter.get();
+ }
+
+ public long translogSizeInBytes() {
+ return lastWrittenPosition.get();
+ }
+
+ public Translog.Location add(byte[] data, int from, int size) throws IOException {
+ long position = lastPosition.getAndAdd(size);
+ raf.channel().write(ByteBuffer.wrap(data, from, size), position);
+ lastWrittenPosition.getAndAdd(size);
+ operationCounter.incrementAndGet();
+ return new Translog.Location(id, position, size);
+ }
+
+ public byte[] read(Translog.Location location) throws IOException {
+ ByteBuffer buffer = ByteBuffer.allocate(location.size);
+ raf.channel().read(buffer, location.translogLocation);
+ return buffer.array();
+ }
+
+ public void close(boolean delete) {
+ sync();
+ raf.decreaseRefCount(delete);
+ }
+
+ /**
+ * Returns a snapshot on this file, <tt>null</tt> if it failed to snapshot.
+ */
+ public FsChannelSnapshot snapshot() throws TranslogException {
+ try {
+ if (!raf.increaseRefCount()) {
+ return null;
+ }
+ return new FsChannelSnapshot(this.id, raf, lastWrittenPosition.get(), operationCounter.get());
+ } catch (Exception e) {
+ throw new TranslogException(shardId, "Failed to snapshot", e);
+ }
+ }
+
+ @Override
+ public boolean syncNeeded() {
+ return lastWrittenPosition.get() != lastSyncPosition;
+ }
+
+ public void sync() {
+ try {
+ // check if we really need to sync here...
+ long last = lastWrittenPosition.get();
+ if (last == lastSyncPosition) {
+ return;
+ }
+ lastSyncPosition = last;
+ raf.channel().force(false);
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ @Override
+ public void reuse(FsTranslogFile other) {
+ // nothing to do there
+ }
+
+ @Override
+ public void updateBufferSize(int bufferSize) throws TranslogException {
+ // nothing to do here...
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java b/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java
new file mode 100644
index 0000000..416198e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/warmer/ShardIndexWarmerService.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.warmer;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.metrics.MeanMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class ShardIndexWarmerService extends AbstractIndexShardComponent {
+
+ private final CounterMetric current = new CounterMetric();
+ private final MeanMetric warmerMetric = new MeanMetric();
+
+
+ @Inject
+ public ShardIndexWarmerService(ShardId shardId, @IndexSettings Settings indexSettings) {
+ super(shardId, indexSettings);
+ }
+
+ public ESLogger logger() {
+ return this.logger;
+ }
+
+ public void onPreWarm() {
+ current.inc();
+ }
+
+ public void onPostWarm(long tookInNanos) {
+ current.dec();
+ warmerMetric.inc(tookInNanos);
+ }
+
+ public WarmerStats stats() {
+ return new WarmerStats(current.count(), warmerMetric.count(), TimeUnit.NANOSECONDS.toMillis(warmerMetric.sum()));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java b/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java
new file mode 100644
index 0000000..2e68d14
--- /dev/null
+++ b/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.warmer;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+public class WarmerStats implements Streamable, ToXContent {
+
+ private long current;
+
+ private long total;
+
+ private long totalTimeInMillis;
+
+ public WarmerStats() {
+
+ }
+
+ public WarmerStats(long current, long total, long totalTimeInMillis) {
+ this.current = current;
+ this.total = total;
+ this.totalTimeInMillis = totalTimeInMillis;
+ }
+
+ public void add(long current, long total, long totalTimeInMillis) {
+ this.current += current;
+ this.total += total;
+ this.totalTimeInMillis += totalTimeInMillis;
+ }
+
+ public void add(WarmerStats warmerStats) {
+ if (warmerStats == null) {
+ return;
+ }
+ this.current += warmerStats.current;
+ this.total += warmerStats.total;
+ this.totalTimeInMillis += warmerStats.totalTimeInMillis;
+ }
+
+ public long current() {
+ return this.current;
+ }
+
+ /**
+ * The total number of warmer executed.
+ */
+ public long total() {
+ return this.total;
+ }
+
+ /**
+ * The total time warmer have been executed (in milliseconds).
+ */
+ public long totalTimeInMillis() {
+ return this.totalTimeInMillis;
+ }
+
+ /**
+ * The total time warmer have been executed.
+ */
+ public TimeValue totalTime() {
+ return new TimeValue(totalTimeInMillis);
+ }
+
+ public static WarmerStats readWarmerStats(StreamInput in) throws IOException {
+ WarmerStats refreshStats = new WarmerStats();
+ refreshStats.readFrom(in);
+ return refreshStats;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.WARMER);
+ builder.field(Fields.CURRENT, current);
+ builder.field(Fields.TOTAL, total);
+ builder.timeValueField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, totalTimeInMillis);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString WARMER = new XContentBuilderString("warmer");
+ static final XContentBuilderString CURRENT = new XContentBuilderString("current");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_TIME = new XContentBuilderString("total_time");
+ static final XContentBuilderString TOTAL_TIME_IN_MILLIS = new XContentBuilderString("total_time_in_millis");
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ current = in.readVLong();
+ total = in.readVLong();
+ totalTimeInMillis = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(current);
+ out.writeVLong(total);
+ out.writeVLong(totalTimeInMillis);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java b/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java
new file mode 100644
index 0000000..aa7b559
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+
+/**
+ *
+ */
+public class AliasFilterParsingException extends IndexException {
+
+ public AliasFilterParsingException(Index index, String name, String desc) {
+ super(index, "[" + name + "], " + desc);
+ }
+
+ public AliasFilterParsingException(Index index, String name, String desc, Throwable ex) {
+ super(index, "[" + name + "], " + desc, ex);
+ }
+
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java b/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java
new file mode 100644
index 0000000..4c2df2e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class IndexAlreadyExistsException extends IndexException {
+
+ public IndexAlreadyExistsException(Index index) {
+ this(index, "already exists");
+ }
+
+ public IndexAlreadyExistsException(Index index, String message) {
+ super(index, message);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/IndexCreationException.java b/src/main/java/org/elasticsearch/indices/IndexCreationException.java
new file mode 100644
index 0000000..9016a45
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/IndexCreationException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+
+/**
+ */
+public class IndexCreationException extends IndexException implements ElasticsearchWrapperException {
+
+ public IndexCreationException(Index index, Throwable cause) {
+ super(index, "failed to create index", cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/IndexMissingException.java b/src/main/java/org/elasticsearch/indices/IndexMissingException.java
new file mode 100644
index 0000000..dbadef5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/IndexMissingException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class IndexMissingException extends IndexException {
+
+ public IndexMissingException(Index index) {
+ super(index, "missing");
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java b/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java
new file mode 100644
index 0000000..e4434a3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ * Thrown when some action cannot be performed because the primary shard of
+ * some shard group in an index has not been allocated post api action.
+ */
+public class IndexPrimaryShardNotAllocatedException extends IndexException {
+
+ public IndexPrimaryShardNotAllocatedException(Index index) {
+ super(index, "primary not allocated post api");
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.CONFLICT;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java b/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java
new file mode 100644
index 0000000..5775de0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class IndexTemplateAlreadyExistsException extends ElasticsearchException {
+
+ private final String name;
+
+ public IndexTemplateAlreadyExistsException(String name) {
+ super("index_template [" + name + "] already exists");
+ this.name = name;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/IndexTemplateMissingException.java b/src/main/java/org/elasticsearch/indices/IndexTemplateMissingException.java
new file mode 100644
index 0000000..ba1948e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/IndexTemplateMissingException.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class IndexTemplateMissingException extends ElasticsearchException {
+
+ private final String name;
+
+ public IndexTemplateMissingException(String name) {
+ super("index_template [" + name + "] missing");
+ this.name = name;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/IndicesLifecycle.java b/src/main/java/org/elasticsearch/indices/IndicesLifecycle.java
new file mode 100644
index 0000000..5a2bbac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/IndicesLifecycle.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+
+/**
+ * A global component allowing to register for lifecycle of an index (create/closed) and
+ * an index shard (created/closed).
+ */
+public interface IndicesLifecycle {
+
+ /**
+ * Add a listener.
+ */
+ void addListener(Listener listener);
+
+ /**
+ * Remove a listener.
+ */
+ void removeListener(Listener listener);
+
+ /**
+ * A listener for index and index shard lifecycle events (create/closed).
+ */
+ public abstract static class Listener {
+
+ /**
+ * Called when the shard routing has changed state.
+ *
+ * @param indexShard The index shard
+ * @param oldRouting The old routing state (can be null)
+ * @param newRouting The new routing state
+ */
+ public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) {
+
+ }
+
+ /**
+ * Called before the index gets created.
+ */
+ public void beforeIndexCreated(Index index) {
+
+ }
+
+ /**
+ * Called after the index has been created.
+ */
+ public void afterIndexCreated(IndexService indexService) {
+
+ }
+
+ /**
+ * Called before the index shard gets created.
+ */
+ public void beforeIndexShardCreated(ShardId shardId) {
+
+ }
+
+ /**
+ * Called after the index shard has been created.
+ */
+ public void afterIndexShardCreated(IndexShard indexShard) {
+
+ }
+
+ public void afterIndexShardPostRecovery(IndexShard indexShard) {
+
+ }
+
+ /**
+ * Called after the index shard has been started.
+ */
+ public void afterIndexShardStarted(IndexShard indexShard) {
+
+ }
+
+ /**
+ * Called before the index get closed.
+ *
+ * @param indexService The index service
+ */
+ public void beforeIndexClosed(IndexService indexService) {
+
+ }
+
+ /**
+ * Called after the index has been closed.
+ *
+ * @param index The index
+ */
+ public void afterIndexClosed(Index index) {
+
+ }
+
+ /**
+ * Called before the index shard gets closed.
+ *
+ * @param indexShard The index shard
+ */
+ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard) {
+
+ }
+
+ /**
+ * Called after the index shard has been closed.
+ *
+ * @param shardId The shard id
+ */
+ public void afterIndexShardClosed(ShardId shardId) {
+
+ }
+
+ /**
+ * Called after a shard's {@link org.elasticsearch.index.shard.IndexShardState} changes.
+ * The order of concurrent events is preserved. The execution must be lightweight.
+ *
+ * @param indexShard the shard the new state was applied to
+ * @param previousState the previous index shard state if there was one, null otherwise
+ * @param currentState the new shard state
+ * @param reason the reason for the state change if there is one, null otherwise
+ */
+ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) {
+
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/indices/IndicesModule.java b/src/main/java/org/elasticsearch/indices/IndicesModule.java
new file mode 100644
index 0000000..76ac7e1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/IndicesModule.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.update.UpdateHelper;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
+import org.elasticsearch.indices.cache.filter.terms.IndicesTermsFilterCache;
+import org.elasticsearch.indices.cluster.IndicesClusterStateService;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.InternalCircuitBreakerService;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.indices.memory.IndexingMemoryController;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.indices.recovery.RecoverySource;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
+import org.elasticsearch.indices.ttl.IndicesTTLService;
+import org.elasticsearch.indices.warmer.IndicesWarmer;
+import org.elasticsearch.indices.warmer.InternalIndicesWarmer;
+
+/**
+ *
+ */
+public class IndicesModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ public IndicesModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(new IndicesQueriesModule(), new IndicesAnalysisModule());
+ }
+
+ @Override
+ protected void configure() {
+ bind(IndicesLifecycle.class).to(InternalIndicesLifecycle.class).asEagerSingleton();
+
+ bind(IndicesService.class).to(InternalIndicesService.class).asEagerSingleton();
+
+ bind(RecoverySettings.class).asEagerSingleton();
+ bind(RecoveryTarget.class).asEagerSingleton();
+ bind(RecoverySource.class).asEagerSingleton();
+
+ bind(IndicesStore.class).asEagerSingleton();
+ bind(IndicesClusterStateService.class).asEagerSingleton();
+ bind(IndexingMemoryController.class).asEagerSingleton();
+ bind(IndicesFilterCache.class).asEagerSingleton();
+ bind(IndicesFieldDataCache.class).asEagerSingleton();
+ bind(IndicesTermsFilterCache.class).asEagerSingleton();
+ bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton();
+ bind(IndicesTTLService.class).asEagerSingleton();
+ bind(IndicesWarmer.class).to(InternalIndicesWarmer.class).asEagerSingleton();
+ bind(UpdateHelper.class).asEagerSingleton();
+
+ bind(CircuitBreakerService.class).to(InternalCircuitBreakerService.class).asEagerSingleton();
+ bind(IndicesFieldDataCacheListener.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/IndicesService.java b/src/main/java/org/elasticsearch/indices/IndicesService.java
new file mode 100644
index 0000000..9365a7a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.service.IndexService;
+
+import java.util.Set;
+
+/**
+ *
+ */
+public interface IndicesService extends Iterable<IndexService>, LifecycleComponent<IndicesService> {
+
+ /**
+ * Returns <tt>true</tt> if changes (adding / removing) indices, shards and so on are allowed.
+ */
+ public boolean changesAllowed();
+
+ /**
+ * Returns the node stats indices stats. The <tt>includePrevious</tt> flag controls
+ * if old shards stats will be aggregated as well (only for relevant stats, such as
+ * refresh and indexing, not for docs/store).
+ */
+ NodeIndicesStats stats(boolean includePrevious);
+
+ NodeIndicesStats stats(boolean includePrevious, CommonStatsFlags flags);
+
+ boolean hasIndex(String index);
+
+ IndicesLifecycle indicesLifecycle();
+
+ Set<String> indices();
+
+ IndexService indexService(String index);
+
+ IndexService indexServiceSafe(String index) throws IndexMissingException;
+
+ IndexService createIndex(String index, Settings settings, String localNodeId) throws ElasticsearchException;
+
+ void removeIndex(String index, String reason) throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/indices/InternalIndicesLifecycle.java b/src/main/java/org/elasticsearch/indices/InternalIndicesLifecycle.java
new file mode 100644
index 0000000..692357a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/InternalIndicesLifecycle.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ *
+ */
+public class InternalIndicesLifecycle extends AbstractComponent implements IndicesLifecycle {
+
+ private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<Listener>();
+
+ @Inject
+ public InternalIndicesLifecycle(Settings settings) {
+ super(settings);
+ }
+
+ public void addListener(Listener listener) {
+ listeners.add(listener);
+ }
+
+ public void removeListener(Listener listener) {
+ listeners.remove(listener);
+ }
+
+ public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) {
+ for (Listener listener : listeners) {
+ try {
+ listener.shardRoutingChanged(indexShard, oldRouting, newRouting);
+ } catch (Throwable t) {
+ logger.warn("{} failed to invoke shard touring changed callback", t, indexShard.shardId());
+ }
+ }
+ }
+
+ public void beforeIndexCreated(Index index) {
+ for (Listener listener : listeners) {
+ try {
+ listener.beforeIndexCreated(index);
+ } catch (Throwable t) {
+ logger.warn("[{}] failed to invoke before index created callback", t, index.name());
+ }
+ }
+ }
+
+ public void afterIndexCreated(IndexService indexService) {
+ for (Listener listener : listeners) {
+ try {
+ listener.afterIndexCreated(indexService);
+ } catch (Throwable t) {
+ logger.warn("[{}] failed to invoke after index created callback", t, indexService.index().name());
+ }
+ }
+ }
+
+ public void beforeIndexShardCreated(ShardId shardId) {
+ for (Listener listener : listeners) {
+ try {
+ listener.beforeIndexShardCreated(shardId);
+ } catch (Throwable t) {
+ logger.warn("{} failed to invoke before shard created callback", t, shardId);
+ }
+ }
+ }
+
+ public void afterIndexShardCreated(IndexShard indexShard) {
+ for (Listener listener : listeners) {
+ try {
+ listener.afterIndexShardCreated(indexShard);
+ } catch (Throwable t) {
+ logger.warn("{} failed to invoke after shard created callback", t, indexShard.shardId());
+ }
+ }
+ }
+
+ public void afterIndexShardPostRecovery(IndexShard indexShard) {
+ for (Listener listener : listeners) {
+ try {
+ listener.afterIndexShardPostRecovery(indexShard);
+ } catch (Throwable t) {
+ logger.warn("{} failed to invoke after shard post recovery callback", t, indexShard.shardId());
+ }
+ }
+ }
+
+ public void afterIndexShardStarted(IndexShard indexShard) {
+ for (Listener listener : listeners) {
+ try {
+ listener.afterIndexShardStarted(indexShard);
+ } catch (Throwable t) {
+ logger.warn("{} failed to invoke after shard started callback", t, indexShard.shardId());
+ }
+ }
+ }
+
+ public void beforeIndexClosed(IndexService indexService) {
+ for (Listener listener : listeners) {
+ try {
+ listener.beforeIndexClosed(indexService);
+ } catch (Throwable t) {
+ logger.warn("[{}] failed to invoke before index closed callback", t, indexService.index().name());
+ }
+ }
+ }
+
+ public void afterIndexClosed(Index index) {
+ for (Listener listener : listeners) {
+ try {
+ listener.afterIndexClosed(index);
+ } catch (Throwable t) {
+ logger.warn("[{}] failed to invoke after index closed callback", t, index.name());
+ }
+ }
+ }
+
+ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard) {
+ for (Listener listener : listeners) {
+ try {
+ listener.beforeIndexShardClosed(shardId, indexShard);
+ } catch (Throwable t) {
+ logger.warn("{} failed to invoke before shard closed callback", t, shardId);
+ }
+ }
+ }
+
+ public void afterIndexShardClosed(ShardId shardId) {
+ for (Listener listener : listeners) {
+ try {
+ listener.afterIndexShardClosed(shardId);
+ } catch (Throwable t) {
+ logger.warn("{} failed to invoke after shard closed callback", t, shardId);
+ }
+ }
+ }
+
+ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, @Nullable String reason) {
+ for (Listener listener : listeners) {
+ try {
+ listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason);
+ } catch (Throwable t) {
+ logger.warn("{} failed to invoke index shard state changed callback", t, indexShard.shardId());
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/InternalIndicesService.java b/src/main/java/org/elasticsearch/indices/InternalIndicesService.java
new file mode 100644
index 0000000..8115962
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/InternalIndicesService.java
@@ -0,0 +1,377 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import com.google.common.collect.*;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
+import org.elasticsearch.action.admin.indices.stats.IndexShardStats;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.*;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.index.*;
+import org.elasticsearch.index.aliases.IndexAliasesServiceModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngine;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.fielddata.IndexFieldDataModule;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.gateway.IndexGateway;
+import org.elasticsearch.index.gateway.IndexGatewayModule;
+import org.elasticsearch.index.get.GetStats;
+import org.elasticsearch.index.indexing.IndexingStats;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MapperServiceModule;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.refresh.RefreshStats;
+import org.elasticsearch.index.search.stats.SearchStats;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.service.InternalIndexService;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.shard.IllegalIndexShardStateException;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.IndexStoreModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.plugins.IndexPluginsModule;
+import org.elasticsearch.plugins.PluginsService;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ *
+ */
+public class InternalIndicesService extends AbstractLifecycleComponent<IndicesService> implements IndicesService {
+
+ private final InternalIndicesLifecycle indicesLifecycle;
+
+ private final IndicesAnalysisService indicesAnalysisService;
+
+ private final IndicesStore indicesStore;
+
+ private final Injector injector;
+
+ private final PluginsService pluginsService;
+
+ private final Map<String, Injector> indicesInjectors = new HashMap<String, Injector>();
+
+ private volatile ImmutableMap<String, IndexService> indices = ImmutableMap.of();
+
+ private final OldShardsStats oldShardsStats = new OldShardsStats();
+
+ @Inject
+ public InternalIndicesService(Settings settings, IndicesLifecycle indicesLifecycle, IndicesAnalysisService indicesAnalysisService, IndicesStore indicesStore, Injector injector) {
+ super(settings);
+ this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle;
+ this.indicesAnalysisService = indicesAnalysisService;
+ this.indicesStore = indicesStore;
+ this.injector = injector;
+
+ this.pluginsService = injector.getInstance(PluginsService.class);
+
+ this.indicesLifecycle.addListener(oldShardsStats);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ ImmutableSet<String> indices = ImmutableSet.copyOf(this.indices.keySet());
+ final CountDownLatch latch = new CountDownLatch(indices.size());
+
+ final ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown"));
+ final ExecutorService shardsStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("shards_shutdown"));
+
+ for (final String index : indices) {
+ indicesStopExecutor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ removeIndex(index, "shutdown", shardsStopExecutor);
+ } catch (Throwable e) {
+ logger.warn("failed to delete index on stop [" + index + "]", e);
+ } finally {
+ latch.countDown();
+ }
+ }
+ });
+ }
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ // ignore
+ } finally {
+ shardsStopExecutor.shutdown();
+ indicesStopExecutor.shutdown();
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ injector.getInstance(RecoverySettings.class).close();
+ indicesStore.close();
+ indicesAnalysisService.close();
+ }
+
+ @Override
+ public IndicesLifecycle indicesLifecycle() {
+ return this.indicesLifecycle;
+ }
+
+ @Override
+ public NodeIndicesStats stats(boolean includePrevious) {
+ return stats(true, new CommonStatsFlags().all());
+ }
+
+ @Override
+ public NodeIndicesStats stats(boolean includePrevious, CommonStatsFlags flags) {
+ CommonStats oldStats = new CommonStats(flags);
+
+ if (includePrevious) {
+ Flag[] setFlags = flags.getFlags();
+ for (Flag flag : setFlags) {
+ switch (flag) {
+ case Get:
+ oldStats.get.add(oldShardsStats.getStats);
+ break;
+ case Indexing:
+ oldStats.indexing.add(oldShardsStats.indexingStats);
+ break;
+ case Search:
+ oldStats.search.add(oldShardsStats.searchStats);
+ break;
+ case Merge:
+ oldStats.merge.add(oldShardsStats.mergeStats);
+ break;
+ case Refresh:
+ oldStats.refresh.add(oldShardsStats.refreshStats);
+ break;
+ case Flush:
+ oldStats.flush.add(oldShardsStats.flushStats);
+ break;
+ }
+ }
+ }
+
+ Map<Index, List<IndexShardStats>> statsByShard = Maps.newHashMap();
+ for (IndexService indexService : indices.values()) {
+ for (IndexShard indexShard : indexService) {
+ try {
+ IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard, flags) });
+ if (!statsByShard.containsKey(indexService.index())) {
+ statsByShard.put(indexService.index(), Lists.<IndexShardStats>newArrayList(indexShardStats));
+ } else {
+ statsByShard.get(indexService.index()).add(indexShardStats);
+ }
+ } catch (IllegalIndexShardStateException e) {
+ // we can safely ignore illegal state on ones that are closing for example
+ }
+ }
+ }
+ return new NodeIndicesStats(oldStats, statsByShard);
+ }
+
+ /**
+ * Returns <tt>true</tt> if changes (adding / removing) indices, shards and so on are allowed.
+ */
+ public boolean changesAllowed() {
+ // we check on stop here since we defined stop when we delete the indices
+ return lifecycle.started();
+ }
+
+ @Override
+ public UnmodifiableIterator<IndexService> iterator() {
+ return indices.values().iterator();
+ }
+
+ public boolean hasIndex(String index) {
+ return indices.containsKey(index);
+ }
+
+ public Set<String> indices() {
+ return newHashSet(indices.keySet());
+ }
+
+ public IndexService indexService(String index) {
+ return indices.get(index);
+ }
+
+ @Override
+ public IndexService indexServiceSafe(String index) throws IndexMissingException {
+ IndexService indexService = indexService(index);
+ if (indexService == null) {
+ throw new IndexMissingException(new Index(index));
+ }
+ return indexService;
+ }
+
+ public synchronized IndexService createIndex(String sIndexName, Settings settings, String localNodeId) throws ElasticsearchException {
+ if (!lifecycle.started()) {
+ throw new ElasticsearchIllegalStateException("Can't create an index [" + sIndexName + "], node is closed");
+ }
+ Index index = new Index(sIndexName);
+ if (indicesInjectors.containsKey(index.name())) {
+ throw new IndexAlreadyExistsException(index);
+ }
+
+ indicesLifecycle.beforeIndexCreated(index);
+
+ logger.debug("creating Index [{}], shards [{}]/[{}]", sIndexName, settings.get(SETTING_NUMBER_OF_SHARDS), settings.get(SETTING_NUMBER_OF_REPLICAS));
+
+ Settings indexSettings = settingsBuilder()
+ .put(this.settings)
+ .put(settings)
+ .classLoader(settings.getClassLoader())
+ .build();
+
+ ModulesBuilder modules = new ModulesBuilder();
+ modules.add(new IndexNameModule(index));
+ modules.add(new LocalNodeIdModule(localNodeId));
+ modules.add(new IndexSettingsModule(index, indexSettings));
+ modules.add(new IndexPluginsModule(indexSettings, pluginsService));
+ modules.add(new IndexStoreModule(indexSettings));
+ modules.add(new IndexEngineModule(indexSettings));
+ modules.add(new AnalysisModule(indexSettings, indicesAnalysisService));
+ modules.add(new SimilarityModule(indexSettings));
+ modules.add(new IndexCacheModule(indexSettings));
+ modules.add(new IndexFieldDataModule(indexSettings));
+ modules.add(new CodecModule(indexSettings));
+ modules.add(new MapperServiceModule());
+ modules.add(new IndexQueryParserModule(indexSettings));
+ modules.add(new IndexAliasesServiceModule());
+ modules.add(new IndexGatewayModule(indexSettings, injector.getInstance(Gateway.class)));
+ modules.add(new IndexModule(indexSettings));
+
+ Injector indexInjector;
+ try {
+ indexInjector = modules.createChildInjector(injector);
+ } catch (CreationException e) {
+ throw new IndexCreationException(index, Injectors.getFirstErrorFailure(e));
+ } catch (Throwable e) {
+ throw new IndexCreationException(index, e);
+ }
+
+ indicesInjectors.put(index.name(), indexInjector);
+
+ IndexService indexService = indexInjector.getInstance(IndexService.class);
+
+ indicesLifecycle.afterIndexCreated(indexService);
+
+ indices = newMapBuilder(indices).put(index.name(), indexService).immutableMap();
+
+ return indexService;
+ }
+
+ @Override
+ public void removeIndex(String index, String reason) throws ElasticsearchException {
+ removeIndex(index, reason, null);
+ }
+
+ private synchronized void removeIndex(String index, String reason, @Nullable Executor executor) throws ElasticsearchException {
+ IndexService indexService;
+ Injector indexInjector = indicesInjectors.remove(index);
+ if (indexInjector == null) {
+ return;
+ }
+
+ Map<String, IndexService> tmpMap = newHashMap(indices);
+ indexService = tmpMap.remove(index);
+ indices = ImmutableMap.copyOf(tmpMap);
+
+ indicesLifecycle.beforeIndexClosed(indexService);
+
+ for (Class<? extends CloseableIndexComponent> closeable : pluginsService.indexServices()) {
+ indexInjector.getInstance(closeable).close();
+ }
+
+ ((InternalIndexService) indexService).close(reason, executor);
+
+ indexInjector.getInstance(IndexCache.class).close();
+ indexInjector.getInstance(IndexFieldDataService.class).clear();
+ indexInjector.getInstance(AnalysisService.class).close();
+ indexInjector.getInstance(IndexEngine.class).close();
+
+ indexInjector.getInstance(IndexGateway.class).close();
+ indexInjector.getInstance(MapperService.class).close();
+ indexInjector.getInstance(IndexQueryParserService.class).close();
+
+ indexInjector.getInstance(IndexStore.class).close();
+
+ Injectors.close(injector);
+
+ indicesLifecycle.afterIndexClosed(indexService.index());
+ }
+
+ static class OldShardsStats extends IndicesLifecycle.Listener {
+
+ final SearchStats searchStats = new SearchStats();
+ final GetStats getStats = new GetStats();
+ final IndexingStats indexingStats = new IndexingStats();
+ final MergeStats mergeStats = new MergeStats();
+ final RefreshStats refreshStats = new RefreshStats();
+ final FlushStats flushStats = new FlushStats();
+
+ @Override
+ public synchronized void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard) {
+ if (indexShard != null) {
+ getStats.add(indexShard.getStats());
+ indexingStats.add(indexShard.indexingStats(), false);
+ searchStats.add(indexShard.searchStats(), false);
+ mergeStats.add(indexShard.mergeStats());
+ refreshStats.add(indexShard.refreshStats());
+ flushStats.add(indexShard.flushStats());
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java b/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java
new file mode 100644
index 0000000..9dee3f7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class InvalidAliasNameException extends IndexException {
+
+ public InvalidAliasNameException(Index index, String name, String desc) {
+ super(index, "Invalid alias name [" + name + "], " + desc);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java b/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java
new file mode 100644
index 0000000..3f16a3b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class InvalidIndexNameException extends IndexException {
+
+ public InvalidIndexNameException(Index index, String name, String desc) {
+ super(index, "Invalid index name [" + name + "], " + desc);
+ }
+
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/InvalidIndexTemplateException.java b/src/main/java/org/elasticsearch/indices/InvalidIndexTemplateException.java
new file mode 100644
index 0000000..d9ad8c5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/InvalidIndexTemplateException.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class InvalidIndexTemplateException extends ElasticsearchException {
+
+ private final String name;
+
+ public InvalidIndexTemplateException(String name, String msg) {
+ super("index_template [" + name + "] invalid, cause [" + msg + "]");
+ this.name = name;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/InvalidTypeNameException.java b/src/main/java/org/elasticsearch/indices/InvalidTypeNameException.java
new file mode 100644
index 0000000..106e76f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/InvalidTypeNameException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.index.mapper.MapperException;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ *
+ */
+public class InvalidTypeNameException extends MapperException {
+
+ public InvalidTypeNameException(String message) {
+ super(message);
+ }
+
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java
new file mode 100644
index 0000000..c6b2577
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.action.admin.indices.stats.IndexShardStats;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.filter.FilterCacheStats;
+import org.elasticsearch.index.cache.id.IdCacheStats;
+import org.elasticsearch.index.engine.SegmentsStats;
+import org.elasticsearch.index.fielddata.FieldDataStats;
+import org.elasticsearch.index.flush.FlushStats;
+import org.elasticsearch.index.get.GetStats;
+import org.elasticsearch.index.indexing.IndexingStats;
+import org.elasticsearch.index.merge.MergeStats;
+import org.elasticsearch.index.percolator.stats.PercolateStats;
+import org.elasticsearch.index.refresh.RefreshStats;
+import org.elasticsearch.index.search.stats.SearchStats;
+import org.elasticsearch.index.shard.DocsStats;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.store.StoreStats;
+import org.elasticsearch.search.suggest.completion.CompletionStats;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Global information on indices stats running on a specific node.
+ */
+public class NodeIndicesStats implements Streamable, Serializable, ToXContent {
+
+ private CommonStats stats;
+ private Map<Index, List<IndexShardStats>> statsByShard;
+
+ NodeIndicesStats() {
+ }
+
+ public NodeIndicesStats(CommonStats oldStats, Map<Index, List<IndexShardStats>> statsByShard) {
+ //this.stats = stats;
+ this.statsByShard = statsByShard;
+
+ // make a total common stats from old ones and current ones
+ this.stats = oldStats;
+ for (List<IndexShardStats> shardStatsList : statsByShard.values()) {
+ for (IndexShardStats indexShardStats : shardStatsList) {
+ for (ShardStats shardStats : indexShardStats.getShards()) {
+ stats.add(shardStats.getStats());
+ }
+ }
+ }
+ }
+
+ @Nullable
+ public StoreStats getStore() {
+ return stats.getStore();
+ }
+
+ @Nullable
+ public DocsStats getDocs() {
+ return stats.getDocs();
+ }
+
+ @Nullable
+ public IndexingStats getIndexing() {
+ return stats.getIndexing();
+ }
+
+ @Nullable
+ public GetStats getGet() {
+ return stats.getGet();
+ }
+
+ @Nullable
+ public SearchStats getSearch() {
+ return stats.getSearch();
+ }
+
+ @Nullable
+ public PercolateStats getPercolate() {
+ return stats.getPercolate();
+ }
+
+ @Nullable
+ public MergeStats getMerge() {
+ return stats.getMerge();
+ }
+
+ @Nullable
+ public RefreshStats getRefresh() {
+ return stats.getRefresh();
+ }
+
+ @Nullable
+ public FlushStats getFlush() {
+ return stats.getFlush();
+ }
+
+ @Nullable
+ public FieldDataStats getFieldData() {
+ return stats.getFieldData();
+ }
+
+ @Nullable
+ public FilterCacheStats getFilterCache() {
+ return stats.getFilterCache();
+ }
+
+ @Nullable
+ public IdCacheStats getIdCache() {
+ return stats.getIdCache();
+ }
+
+ @Nullable
+ public CompletionStats getCompletion() {
+ return stats.getCompletion();
+ }
+
+ @Nullable
+ public SegmentsStats getSegments() {
+ return stats.getSegments();
+ }
+
+ public static NodeIndicesStats readIndicesStats(StreamInput in) throws IOException {
+ NodeIndicesStats stats = new NodeIndicesStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ stats = CommonStats.readCommonStats(in);
+ if (in.readBoolean()) {
+ int entries = in.readVInt();
+ statsByShard = Maps.newHashMap();
+ for (int i = 0; i < entries; i++) {
+ Index index = Index.readIndexName(in);
+ int indexShardListSize = in.readVInt();
+ List<IndexShardStats> indexShardStats = Lists.newArrayListWithCapacity(indexShardListSize);
+ for (int j = 0; j < indexShardListSize; j++) {
+ indexShardStats.add(IndexShardStats.readIndexShardStats(in));
+ }
+ statsByShard.put(index, indexShardStats);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ stats.writeTo(out);
+ out.writeBoolean(statsByShard != null);
+ if (statsByShard != null) {
+ out.writeVInt(statsByShard.size());
+ for (Map.Entry<Index, List<IndexShardStats>> entry : statsByShard.entrySet()) {
+ entry.getKey().writeTo(out);
+ out.writeVInt(entry.getValue().size());
+ for (IndexShardStats indexShardStats : entry.getValue()) {
+ indexShardStats.writeTo(out);
+ }
+ }
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ String level = params.param("level", "node");
+ boolean isLevelValid = "node".equalsIgnoreCase(level) || "indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level);
+ if (!isLevelValid) {
+ return builder;
+ }
+
+ // "node" level
+ builder.startObject(Fields.INDICES);
+ stats.toXContent(builder, params);
+
+ if ("indices".equals(level)) {
+ Map<Index, CommonStats> indexStats = createStatsByIndex();
+ builder.startObject(Fields.INDICES);
+ for (Map.Entry<Index, CommonStats> entry : indexStats.entrySet()) {
+ builder.startObject(entry.getKey().name());
+ entry.getValue().toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endObject();
+ } else if ("shards".equals(level)) {
+ builder.startObject("shards");
+ for (Map.Entry<Index, List<IndexShardStats>> entry : statsByShard.entrySet()) {
+ builder.startArray(entry.getKey().name());
+ for (IndexShardStats indexShardStats : entry.getValue()) {
+ builder.startObject().startObject(String.valueOf(indexShardStats.getShardId().getId()));
+ for (ShardStats shardStats : indexShardStats.getShards()) {
+ shardStats.toXContent(builder, params);
+ }
+ builder.endObject().endObject();
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ private Map<Index, CommonStats> createStatsByIndex() {
+ Map<Index, CommonStats> statsMap = Maps.newHashMap();
+ for (Map.Entry<Index, List<IndexShardStats>> entry : statsByShard.entrySet()) {
+ if (!statsMap.containsKey(entry.getKey())) {
+ statsMap.put(entry.getKey(), new CommonStats());
+ }
+
+ for (IndexShardStats indexShardStats : entry.getValue()) {
+ for (ShardStats shardStats : indexShardStats.getShards()) {
+ statsMap.get(entry.getKey()).add(shardStats.getStats());
+ }
+ }
+ }
+
+ return statsMap;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString INDICES = new XContentBuilderString("indices");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/TypeMissingException.java b/src/main/java/org/elasticsearch/indices/TypeMissingException.java
new file mode 100644
index 0000000..ae2830b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/TypeMissingException.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+import org.elasticsearch.rest.RestStatus;
+
+import java.util.Arrays;
+
+/**
+ *
+ */
+public class TypeMissingException extends IndexException {
+
+ public TypeMissingException(Index index, String... types) {
+ super(index, "type[" + Arrays.toString(types) + "] missing");
+ }
+
+ public TypeMissingException(Index index, String[] types, String message) {
+ super(index, "type[" + Arrays.toString(types) + "] missing: " + message);
+ }
+
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java
new file mode 100644
index 0000000..26e02c6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java
@@ -0,0 +1,257 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analysis;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import org.apache.lucene.analysis.hunspell.HunspellDictionary;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+
+import java.io.*;
+import java.net.MalformedURLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * Serves as a node level registry for hunspell dictionaries. This services expects all dictionaries to be located under
+ * the {@code <path.conf>/hunspell} directory, where each locale has its dedicated sub-directory which holds the dictionary
+ * files. For example, the dictionary files for {@code en_US} locale must be placed under {@code <path.conf>/hunspell/en_US}
+ * directory.
+ * <p/>
+ * The following settings can be set for each dictionary:
+ * <ul>
+ * <li>{@code ignore_case} - If true, dictionary matching will be case insensitive (defaults to {@code false})</li>
+ * <li>{@code strict_affix_parsing} - Determines whether errors while reading a affix rules file will cause exception or simple be ignored (defaults to {@code true})</li>
+ * </ul>
+ * <p/>
+ * These settings can either be configured as node level configuration, such as:
+ * <br/><br/>
+ * <pre><code>
+ * indices.analysis.hunspell.dictionary.en_US.ignore_case: true
+ * indices.analysis.hunspell.dictionary.en_US.strict_affix_parsing: false
+ * </code></pre>
+ * <p/>
+ * or, as dedicated configuration per dictionary, placed in a {@code settings.yml} file under the dictionary directory. For
+ * example, the following can be the content of the {@code <path.config>/hunspell/en_US/settings.yml} file:
+ * <br/><br/>
+ * <pre><code>
+ * ignore_case: true
+ * strict_affix_parsing: false
+ * </code></pre>
+ *
+ * @see org.elasticsearch.index.analysis.HunspellTokenFilterFactory
+ */
+public class HunspellService extends AbstractComponent {
+
+ private final static DictionaryFileFilter DIC_FILE_FILTER = new DictionaryFileFilter();
+ private final static AffixFileFilter AFFIX_FILE_FILTER = new AffixFileFilter();
+
+ private final LoadingCache<String, HunspellDictionary> dictionaries;
+ private final Map<String, HunspellDictionary> knownDictionaries;
+
+ private final boolean defaultIgnoreCase;
+ private final boolean defaultStrictAffixParsing;
+ private final File hunspellDir;
+
+ public HunspellService(final Settings settings, final Environment env) {
+ this(settings, env, Collections.<String, HunspellDictionary>emptyMap());
+ }
+
+ @Inject
+ public HunspellService(final Settings settings, final Environment env, final Map<String, HunspellDictionary> knownDictionaries) {
+ super(settings);
+ this.knownDictionaries = knownDictionaries;
+ this.hunspellDir = resolveHunspellDirectory(settings, env);
+ this.defaultIgnoreCase = settings.getAsBoolean("indices.analysis.hunspell.dictionary.ignore_case", false);
+ this.defaultStrictAffixParsing = settings.getAsBoolean("indices.analysis.hunspell.dictionary.strict_affix_parsing", false);
+ final Version version = Lucene.parseVersion(settings.get("indices.analysis.hunspell.version"), Lucene.ANALYZER_VERSION, logger);
+ dictionaries = CacheBuilder.newBuilder().build(new CacheLoader<String, HunspellDictionary>() {
+ @Override
+ public HunspellDictionary load(String locale) throws Exception {
+ HunspellDictionary dictionary = knownDictionaries.get(locale);
+ if (dictionary == null) {
+ dictionary = loadDictionary(locale, settings, env, version);
+ }
+ return dictionary;
+ }
+ });
+ scanAndLoadDictionaries();
+ }
+
+ /**
+ * Returns the hunspell dictionary for the given locale.
+ *
+ * @param locale The name of the locale
+ */
+ public HunspellDictionary getDictionary(String locale) {
+ return dictionaries.getUnchecked(locale);
+ }
+
+ private File resolveHunspellDirectory(Settings settings, Environment env) {
+ String location = settings.get("indices.analysis.hunspell.dictionary.location", null);
+ if (location != null) {
+ return new File(location);
+ }
+ return new File(env.configFile(), "hunspell");
+ }
+
+ /**
+ * Scans the hunspell directory and loads all found dictionaries
+ */
+ private void scanAndLoadDictionaries() {
+ if (hunspellDir.exists() && hunspellDir.isDirectory()) {
+ for (File file : hunspellDir.listFiles()) {
+ if (file.isDirectory()) {
+ if (file.list(AFFIX_FILE_FILTER).length > 0) { // just making sure it's indeed a dictionary dir
+ dictionaries.getUnchecked(file.getName());
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Loads the hunspell dictionary for the given local.
+ *
+ * @param locale The locale of the hunspell dictionary to be loaded.
+ * @param nodeSettings The node level settings
+ * @param env The node environment (from which the conf path will be resolved)
+ * @param version The lucene version
+ * @return The loaded Hunspell dictionary
+ * @throws Exception when loading fails (due to IO errors or malformed dictionary files)
+ */
+ private HunspellDictionary loadDictionary(String locale, Settings nodeSettings, Environment env, Version version) throws Exception {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Loading huspell dictionary [{}]...", locale);
+ }
+ File dicDir = new File(hunspellDir, locale);
+ if (!dicDir.exists() || !dicDir.isDirectory()) {
+ throw new ElasticsearchException(String.format(Locale.ROOT, "Could not find hunspell dictionary [%s]", locale));
+ }
+
+ // merging node settings with hunspell dictionary specific settings
+ nodeSettings = loadDictionarySettings(dicDir, nodeSettings.getByPrefix("indices.analysis.hunspell.dictionary." + locale + "."));
+
+ boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);
+ boolean strictAffixParsing = nodeSettings.getAsBoolean("strict_affix_parsing", defaultStrictAffixParsing);
+
+ File[] affixFiles = dicDir.listFiles(AFFIX_FILE_FILTER);
+ if (affixFiles.length != 1) {
+ throw new ElasticsearchException(String.format(Locale.ROOT, "Missing affix file for hunspell dictionary [%s]", locale));
+ }
+ InputStream affixStream = null;
+
+ File[] dicFiles = dicDir.listFiles(DIC_FILE_FILTER);
+ List<InputStream> dicStreams = new ArrayList<InputStream>(dicFiles.length);
+ try {
+
+ for (int i = 0; i < dicFiles.length; i++) {
+ dicStreams.add(new FileInputStream(dicFiles[i]));
+ }
+
+ affixStream = new FileInputStream(affixFiles[0]);
+
+ return new HunspellDictionary(affixStream, dicStreams, version, ignoreCase, strictAffixParsing);
+
+ } catch (Exception e) {
+ logger.error("Could not load hunspell dictionary [{}]", e, locale);
+ throw e;
+ } finally {
+ if (affixStream != null) {
+ try {
+ affixStream.close();
+ } catch (IOException e) {
+ // nothing much we can do here
+ }
+ }
+ for (InputStream in : dicStreams) {
+ if (in != null) {
+ try {
+ in.close();
+ } catch (IOException e) {
+ // nothing much we can do here
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Each hunspell dictionary directory may contain a {@code settings.yml} which holds dictionary specific settings. Default
+ * values for these settings are defined in the given default settings.
+ *
+ * @param dir The directory of the dictionary
+ * @param defaults The default settings for this dictionary
+ * @return The resolved settings.
+ */
+ private static Settings loadDictionarySettings(File dir, Settings defaults) {
+ File file = new File(dir, "settings.yml");
+ if (file.exists()) {
+ try {
+ return ImmutableSettings.settingsBuilder().loadFromUrl(file.toURI().toURL()).put(defaults).build();
+ } catch (MalformedURLException e) {
+ throw new ElasticsearchException(String.format(Locale.ROOT, "Could not load hunspell dictionary settings from [%s]", file.getAbsolutePath()), e);
+ }
+ }
+
+ file = new File(dir, "settings.json");
+ if (file.exists()) {
+ try {
+ return ImmutableSettings.settingsBuilder().loadFromUrl(file.toURI().toURL()).put(defaults).build();
+ } catch (MalformedURLException e) {
+ throw new ElasticsearchException(String.format(Locale.ROOT, "Could not load hunspell dictionary settings from [%s]", file.getAbsolutePath()), e);
+ }
+ }
+
+ return defaults;
+ }
+
+ /**
+ * Only accepts {@code *.dic} files
+ */
+ static class DictionaryFileFilter implements FilenameFilter {
+ @Override
+ public boolean accept(File dir, String name) {
+ return name.toLowerCase(Locale.ROOT).endsWith(".dic");
+ }
+ }
+
+ /**
+ * Only accepts {@code *.aff} files
+ */
+ static class AffixFileFilter implements FilenameFilter {
+ @Override
+ public boolean accept(File dir, String name) {
+ return name.toLowerCase(Locale.ROOT).endsWith(".aff");
+ }
+ }
+
+}
+
diff --git a/src/main/java/org/elasticsearch/indices/analysis/IndicesAnalysisModule.java b/src/main/java/org/elasticsearch/indices/analysis/IndicesAnalysisModule.java
new file mode 100644
index 0000000..8d3295d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/analysis/IndicesAnalysisModule.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.analysis.hunspell.HunspellDictionary;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.MapBinder;
+
+import java.util.Map;
+
+public class IndicesAnalysisModule extends AbstractModule {
+
+ private final Map<String, HunspellDictionary> hunspellDictionaries = Maps.newHashMap();
+
+ public void addHunspellDictionary(String lang, HunspellDictionary dictionary) {
+ hunspellDictionaries.put(lang, dictionary);
+ }
+
+ @Override
+ protected void configure() {
+ bind(IndicesAnalysisService.class).asEagerSingleton();
+
+ MapBinder<String, HunspellDictionary> dictionariesBinder = MapBinder.newMapBinder(binder(), String.class, HunspellDictionary.class);
+ for (Map.Entry<String, HunspellDictionary> entry : hunspellDictionaries.entrySet()) {
+ dictionariesBinder.addBinding(entry.getKey()).toInstance(entry.getValue());
+ }
+ bind(HunspellService.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/analysis/IndicesAnalysisService.java b/src/main/java/org/elasticsearch/indices/analysis/IndicesAnalysisService.java
new file mode 100644
index 0000000..12849c1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/analysis/IndicesAnalysisService.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
+import org.apache.lucene.analysis.ar.ArabicStemFilter;
+import org.apache.lucene.analysis.br.BrazilianStemFilter;
+import org.apache.lucene.analysis.charfilter.HTMLStripCharFilter;
+import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
+import org.apache.lucene.analysis.core.*;
+import org.apache.lucene.analysis.cz.CzechStemFilter;
+import org.apache.lucene.analysis.de.GermanStemFilter;
+import org.apache.lucene.analysis.en.KStemFilter;
+import org.apache.lucene.analysis.en.PorterStemFilter;
+import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
+import org.apache.lucene.analysis.fr.FrenchAnalyzer;
+import org.apache.lucene.analysis.fr.FrenchStemFilter;
+import org.apache.lucene.analysis.miscellaneous.*;
+import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
+import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
+import org.apache.lucene.analysis.ngram.NGramTokenFilter;
+import org.apache.lucene.analysis.ngram.NGramTokenizer;
+import org.apache.lucene.analysis.nl.DutchStemFilter;
+import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
+import org.apache.lucene.analysis.pattern.PatternTokenizer;
+import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.apache.lucene.analysis.snowball.SnowballFilter;
+import org.apache.lucene.analysis.standard.*;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.util.ElisionFilter;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.analysis.*;
+
+import java.io.Reader;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+/**
+ * A node level registry of analyzers, to be reused by different indices which use default analyzers.
+ */
+public class IndicesAnalysisService extends AbstractComponent {
+
+ private final Map<String, PreBuiltAnalyzerProviderFactory> analyzerProviderFactories = ConcurrentCollections.newConcurrentMap();
+ private final Map<String, PreBuiltTokenizerFactoryFactory> tokenizerFactories = ConcurrentCollections.newConcurrentMap();
+ private final Map<String, PreBuiltTokenFilterFactoryFactory> tokenFilterFactories = ConcurrentCollections.newConcurrentMap();
+ private final Map<String, PreBuiltCharFilterFactoryFactory> charFilterFactories = ConcurrentCollections.newConcurrentMap();
+
+ public IndicesAnalysisService() {
+ super(EMPTY_SETTINGS);
+ }
+
+ @Inject
+ public IndicesAnalysisService(Settings settings) {
+ super(settings);
+
+ // Analyzers
+ for (PreBuiltAnalyzers preBuiltAnalyzerEnum : PreBuiltAnalyzers.values()) {
+ String name = preBuiltAnalyzerEnum.name().toLowerCase(Locale.ROOT);
+ analyzerProviderFactories.put(name, new PreBuiltAnalyzerProviderFactory(name, AnalyzerScope.INDICES, preBuiltAnalyzerEnum.getAnalyzer(Version.CURRENT)));
+ }
+
+ // Tokenizers
+ for (PreBuiltTokenizers preBuiltTokenizer : PreBuiltTokenizers.values()) {
+ String name = preBuiltTokenizer.name().toLowerCase(Locale.ROOT);
+ tokenizerFactories.put(name, new PreBuiltTokenizerFactoryFactory(preBuiltTokenizer.getTokenizerFactory(Version.CURRENT)));
+ }
+
+ // Tokenizer aliases
+ tokenizerFactories.put("nGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.NGRAM.getTokenizerFactory(Version.CURRENT)));
+ tokenizerFactories.put("edgeNGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.EDGE_NGRAM.getTokenizerFactory(Version.CURRENT)));
+
+
+ // Token filters
+ for (PreBuiltTokenFilters preBuiltTokenFilter : PreBuiltTokenFilters.values()) {
+ String name = preBuiltTokenFilter.name().toLowerCase(Locale.ROOT);
+ tokenFilterFactories.put(name, new PreBuiltTokenFilterFactoryFactory(preBuiltTokenFilter.getTokenFilterFactory(Version.CURRENT)));
+ }
+ // Token filter aliases
+ tokenFilterFactories.put("nGram", new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.NGRAM.getTokenFilterFactory(Version.CURRENT)));
+ tokenFilterFactories.put("edgeNGram", new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.EDGE_NGRAM.getTokenFilterFactory(Version.CURRENT)));
+
+
+ // Char Filters
+ for (PreBuiltCharFilters preBuiltCharFilter : PreBuiltCharFilters.values()) {
+ String name = preBuiltCharFilter.name().toLowerCase(Locale.ROOT);
+ charFilterFactories.put(name, new PreBuiltCharFilterFactoryFactory(preBuiltCharFilter.getCharFilterFactory(Version.CURRENT)));
+ }
+ // Char filter aliases
+ charFilterFactories.put("htmlStrip", new PreBuiltCharFilterFactoryFactory(PreBuiltCharFilters.HTML_STRIP.getCharFilterFactory(Version.CURRENT)));
+ }
+
+ public boolean hasCharFilter(String name) {
+ return charFilterFactoryFactory(name) != null;
+ }
+
+ public Map<String, PreBuiltCharFilterFactoryFactory> charFilterFactories() {
+ return charFilterFactories;
+ }
+
+ public CharFilterFactoryFactory charFilterFactoryFactory(String name) {
+ return charFilterFactories.get(name);
+ }
+
+ public boolean hasTokenFilter(String name) {
+ return tokenFilterFactoryFactory(name) != null;
+ }
+
+ public Map<String, PreBuiltTokenFilterFactoryFactory> tokenFilterFactories() {
+ return tokenFilterFactories;
+ }
+
+ public TokenFilterFactoryFactory tokenFilterFactoryFactory(String name) {
+ return tokenFilterFactories.get(name);
+ }
+
+ public boolean hasTokenizer(String name) {
+ return tokenizerFactoryFactory(name) != null;
+ }
+
+ public Map<String, PreBuiltTokenizerFactoryFactory> tokenizerFactories() {
+ return tokenizerFactories;
+ }
+
+ public TokenizerFactoryFactory tokenizerFactoryFactory(String name) {
+ return tokenizerFactories.get(name);
+ }
+
+ public Map<String, PreBuiltAnalyzerProviderFactory> analyzerProviderFactories() {
+ return analyzerProviderFactories;
+ }
+
+ public PreBuiltAnalyzerProviderFactory analyzerProviderFactory(String name) {
+ return analyzerProviderFactories.get(name);
+ }
+
+ public boolean hasAnalyzer(String name) {
+ return analyzerProviderFactories.containsKey(name);
+ }
+
+ public Analyzer analyzer(String name) {
+ PreBuiltAnalyzerProviderFactory analyzerProviderFactory = analyzerProviderFactory(name);
+ if (analyzerProviderFactory == null) {
+ return null;
+ }
+ return analyzerProviderFactory.analyzer();
+ }
+
+ public void close() {
+ for (PreBuiltAnalyzerProviderFactory analyzerProviderFactory : analyzerProviderFactories.values()) {
+ try {
+ analyzerProviderFactory.analyzer().close();
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java
new file mode 100644
index 0000000..a5aae82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java
@@ -0,0 +1,419 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.ar.ArabicAnalyzer;
+import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
+import org.apache.lucene.analysis.br.BrazilianAnalyzer;
+import org.apache.lucene.analysis.ca.CatalanAnalyzer;
+import org.apache.lucene.analysis.cjk.CJKAnalyzer;
+import org.apache.lucene.analysis.cn.ChineseAnalyzer;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.core.SimpleAnalyzer;
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.cz.CzechAnalyzer;
+import org.apache.lucene.analysis.da.DanishAnalyzer;
+import org.apache.lucene.analysis.de.GermanAnalyzer;
+import org.apache.lucene.analysis.el.GreekAnalyzer;
+import org.apache.lucene.analysis.en.EnglishAnalyzer;
+import org.apache.lucene.analysis.es.SpanishAnalyzer;
+import org.apache.lucene.analysis.eu.BasqueAnalyzer;
+import org.apache.lucene.analysis.fa.PersianAnalyzer;
+import org.apache.lucene.analysis.fi.FinnishAnalyzer;
+import org.apache.lucene.analysis.fr.FrenchAnalyzer;
+import org.apache.lucene.analysis.ga.IrishAnalyzer;
+import org.apache.lucene.analysis.gl.GalicianAnalyzer;
+import org.apache.lucene.analysis.hi.HindiAnalyzer;
+import org.apache.lucene.analysis.hu.HungarianAnalyzer;
+import org.apache.lucene.analysis.hy.ArmenianAnalyzer;
+import org.apache.lucene.analysis.id.IndonesianAnalyzer;
+import org.apache.lucene.analysis.it.ItalianAnalyzer;
+import org.apache.lucene.analysis.lv.LatvianAnalyzer;
+import org.apache.lucene.analysis.miscellaneous.PatternAnalyzer;
+import org.apache.lucene.analysis.nl.DutchAnalyzer;
+import org.apache.lucene.analysis.no.NorwegianAnalyzer;
+import org.apache.lucene.analysis.pt.PortugueseAnalyzer;
+import org.apache.lucene.analysis.ro.RomanianAnalyzer;
+import org.apache.lucene.analysis.ru.RussianAnalyzer;
+import org.apache.lucene.analysis.snowball.SnowballAnalyzer;
+import org.apache.lucene.analysis.standard.ClassicAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.sv.SwedishAnalyzer;
+import org.apache.lucene.analysis.th.ThaiAnalyzer;
+import org.apache.lucene.analysis.tr.TurkishAnalyzer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.index.analysis.StandardHtmlStripAnalyzer;
+import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
+
+import java.util.Locale;
+
+/**
+ *
+ */
+public enum PreBuiltAnalyzers {
+
+ STANDARD(CachingStrategy.ELASTICSEARCH) { // we don't do stopwords anymore from 1.0Beta on
+ @Override
+ protected Analyzer create(Version version) {
+ if (version.onOrAfter(Version.V_1_0_0_Beta1)) {
+ return new StandardAnalyzer(version.luceneVersion, CharArraySet.EMPTY_SET);
+ }
+ return new StandardAnalyzer(version.luceneVersion);
+ }
+ },
+
+ DEFAULT(CachingStrategy.ELASTICSEARCH){
+ @Override
+ protected Analyzer create(Version version) {
+ // by calling get analyzer we are ensuring reuse of the same STANDARD analyzer for DEFAULT!
+ // this call does not create a new instance
+ return STANDARD.getAnalyzer(version);
+ }
+ },
+
+ KEYWORD(CachingStrategy.ONE) {
+ @Override
+ protected Analyzer create(Version version) {
+ return new KeywordAnalyzer();
+ }
+ },
+
+ STOP {
+ @Override
+ protected Analyzer create(Version version) {
+ return new StopAnalyzer(version.luceneVersion);
+ }
+ },
+
+ WHITESPACE {
+ @Override
+ protected Analyzer create(Version version) {
+ return new WhitespaceAnalyzer(version.luceneVersion);
+ }
+ },
+
+ SIMPLE {
+ @Override
+ protected Analyzer create(Version version) {
+ return new SimpleAnalyzer(version.luceneVersion);
+ }
+ },
+
+ CLASSIC {
+ @Override
+ protected Analyzer create(Version version) {
+ return new ClassicAnalyzer(version.luceneVersion);
+ }
+ },
+
+ SNOWBALL {
+ @Override
+ protected Analyzer create(Version version) {
+ return new SnowballAnalyzer(version.luceneVersion, "English", StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ }
+ },
+
+ PATTERN(CachingStrategy.ELASTICSEARCH) {
+ @Override
+ protected Analyzer create(Version version) {
+ if (version.onOrAfter(Version.V_1_0_0_RC1)) {
+ return new PatternAnalyzer(version.luceneVersion, Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET);
+ }
+ return new PatternAnalyzer(version.luceneVersion, Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ }
+ },
+
+ STANDARD_HTML_STRIP(CachingStrategy.ELASTICSEARCH) {
+ @Override
+ protected Analyzer create(Version version) {
+ if (version.onOrAfter(Version.V_1_0_0_RC1)) {
+ return new StandardHtmlStripAnalyzer(version.luceneVersion, CharArraySet.EMPTY_SET);
+ }
+ return new StandardHtmlStripAnalyzer(version.luceneVersion);
+ }
+ },
+
+ ARABIC {
+ @Override
+ protected Analyzer create(Version version) {
+ return new ArabicAnalyzer(version.luceneVersion);
+ }
+ },
+
+ ARMENIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new ArmenianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ BASQUE {
+ @Override
+ protected Analyzer create(Version version) {
+ return new BasqueAnalyzer(version.luceneVersion);
+ }
+ },
+
+ BRAZILIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new BrazilianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ BULGARIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new BulgarianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ CATALAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new CatalanAnalyzer(version.luceneVersion);
+ }
+ },
+
+ CHINESE(CachingStrategy.ONE) {
+ @Override
+ protected Analyzer create(Version version) {
+ return new ChineseAnalyzer();
+ }
+ },
+
+ CJK {
+ @Override
+ protected Analyzer create(Version version) {
+ return new CJKAnalyzer(version.luceneVersion);
+ }
+ },
+
+ CZECH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new CzechAnalyzer(version.luceneVersion);
+ }
+ },
+
+ DUTCH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new DutchAnalyzer(version.luceneVersion);
+ }
+ },
+
+ DANISH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new DanishAnalyzer(version.luceneVersion);
+ }
+ },
+
+ ENGLISH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new EnglishAnalyzer(version.luceneVersion);
+ }
+ },
+
+ FINNISH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new FinnishAnalyzer(version.luceneVersion);
+ }
+ },
+
+ FRENCH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new FrenchAnalyzer(version.luceneVersion);
+ }
+ },
+
+ GALICIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new GalicianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ GERMAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new GermanAnalyzer(version.luceneVersion);
+ }
+ },
+
+ GREEK {
+ @Override
+ protected Analyzer create(Version version) {
+ return new GreekAnalyzer(version.luceneVersion);
+ }
+ },
+
+ HINDI {
+ @Override
+ protected Analyzer create(Version version) {
+ return new HindiAnalyzer(version.luceneVersion);
+ }
+ },
+
+ HUNGARIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new HungarianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ INDONESIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new IndonesianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ IRISH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new IrishAnalyzer(version.luceneVersion);
+ }
+ },
+
+ ITALIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new ItalianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ LATVIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new LatvianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ NORWEGIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new NorwegianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ PERSIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new PersianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ PORTUGUESE {
+ @Override
+ protected Analyzer create(Version version) {
+ return new PortugueseAnalyzer(version.luceneVersion);
+ }
+ },
+
+ ROMANIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new RomanianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ RUSSIAN {
+ @Override
+ protected Analyzer create(Version version) {
+ return new RussianAnalyzer(version.luceneVersion);
+ }
+ },
+
+ SPANISH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new SpanishAnalyzer(version.luceneVersion);
+ }
+ },
+
+ SWEDISH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new SwedishAnalyzer(version.luceneVersion);
+ }
+ },
+
+ TURKISH {
+ @Override
+ protected Analyzer create(Version version) {
+ return new TurkishAnalyzer(version.luceneVersion);
+ }
+ },
+
+ THAI {
+ @Override
+ protected Analyzer create(Version version) {
+ return new ThaiAnalyzer(version.luceneVersion);
+ }
+ };
+
+ abstract protected Analyzer create(Version version);
+
+ protected final PreBuiltCacheFactory.PreBuiltCache<Analyzer> cache;
+
+ PreBuiltAnalyzers() {
+ this(PreBuiltCacheFactory.CachingStrategy.LUCENE);
+ }
+
+ PreBuiltAnalyzers(PreBuiltCacheFactory.CachingStrategy cachingStrategy) {
+ cache = PreBuiltCacheFactory.getCache(cachingStrategy);
+ }
+
+ PreBuiltCacheFactory.PreBuiltCache<Analyzer> getCache() {
+ return cache;
+ }
+
+ public synchronized Analyzer getAnalyzer(Version version) {
+ Analyzer analyzer = cache.get(version);
+ if (analyzer == null) {
+ analyzer = this.create(version);
+ cache.put(version, analyzer);
+ }
+
+ return analyzer;
+ }
+
+ /**
+ * Get a pre built Analyzer by its name or fallback to the default one
+ * @param name Analyzer name
+ * @param defaultAnalyzer default Analyzer if name not found
+ */
+ public static PreBuiltAnalyzers getOrDefault(String name, PreBuiltAnalyzers defaultAnalyzer) {
+ try {
+ return valueOf(name.toUpperCase(Locale.ROOT));
+ } catch (IllegalArgumentException e) {
+ return defaultAnalyzer;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java
new file mode 100644
index 0000000..a71dc57
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analysis;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class PreBuiltCacheFactory {
+
+ /**
+ * The strategy of caching the analyzer
+ *
+ * ONE Exactly one version is stored. Useful for analyzers which do not store version information
+ * LUCENE Exactly one version for each lucene version is stored. Useful to prevent different analyzers with the same version
+ * ELASTICSEARCH Exactly one version per elasticsearch version is stored. Useful if you change an analyzer between elasticsearch releases, when the lucene version does not change
+ */
+ static enum CachingStrategy { ONE, LUCENE, ELASTICSEARCH };
+
+ public interface PreBuiltCache<T> {
+ T get(Version version);
+ void put(Version version, T t);
+ }
+
+ private PreBuiltCacheFactory() {}
+
+ static <T> PreBuiltCache<T> getCache(CachingStrategy cachingStrategy) {
+ switch (cachingStrategy) {
+ case ONE:
+ return new PreBuiltCacheStrategyOne<T>();
+ case LUCENE:
+ return new PreBuiltCacheStrategyLucene<T>();
+ case ELASTICSEARCH:
+ return new PreBuiltCacheStrategyElasticsearch<T>();
+ default:
+ throw new ElasticsearchException("No action configured for caching strategy[" + cachingStrategy + "]");
+ }
+ }
+
+ /**
+ * This is a pretty simple cache, it only contains one version
+ */
+ private static class PreBuiltCacheStrategyOne<T> implements PreBuiltCache<T> {
+
+ private T model = null;
+
+ @Override
+ public T get(Version version) {
+ return model;
+ }
+
+ @Override
+ public void put(Version version, T model) {
+ this.model = model;
+ }
+ }
+
+ /**
+ * This cache contains one version for each elasticsearch version object
+ */
+ private static class PreBuiltCacheStrategyElasticsearch<T> implements PreBuiltCache<T> {
+
+ Map<Version, T> mapModel = Maps.newHashMapWithExpectedSize(2);
+
+ @Override
+ public T get(Version version) {
+ return mapModel.get(version);
+ }
+
+ @Override
+ public void put(Version version, T model) {
+ mapModel.put(version, model);
+ }
+ }
+
+ /**
+ * This cache uses the lucene version for caching
+ */
+ private static class PreBuiltCacheStrategyLucene<T> implements PreBuiltCache<T> {
+
+ private Map<org.apache.lucene.util.Version, T> mapModel = Maps.newHashMapWithExpectedSize(2);
+
+ @Override
+ public T get(Version version) {
+ return mapModel.get(version.luceneVersion);
+ }
+
+ @Override
+ public void put(org.elasticsearch.Version version, T model) {
+ mapModel.put(version.luceneVersion, model);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCharFilters.java b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCharFilters.java
new file mode 100644
index 0000000..3e86478
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCharFilters.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.charfilter.HTMLStripCharFilter;
+import org.elasticsearch.Version;
+import org.elasticsearch.index.analysis.CharFilterFactory;
+import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
+
+import java.io.Reader;
+import java.util.Locale;
+
+/**
+ *
+ */
+public enum PreBuiltCharFilters {
+
+ HTML_STRIP(CachingStrategy.ONE) {
+ @Override
+ public Reader create(Reader tokenStream, Version version) {
+ return new HTMLStripCharFilter(tokenStream);
+ }
+ };
+
+ abstract public Reader create(Reader tokenStream, Version version);
+
+ protected final PreBuiltCacheFactory.PreBuiltCache<CharFilterFactory> cache;
+
+ PreBuiltCharFilters(CachingStrategy cachingStrategy) {
+ cache = PreBuiltCacheFactory.getCache(cachingStrategy);
+ }
+
+ public synchronized CharFilterFactory getCharFilterFactory(final Version version) {
+ CharFilterFactory charFilterFactory = cache.get(version);
+ if (charFilterFactory == null) {
+ final String finalName = name();
+
+ charFilterFactory = new CharFilterFactory() {
+ @Override
+ public String name() {
+ return finalName.toLowerCase(Locale.ROOT);
+ }
+
+ @Override
+ public Reader create(Reader tokenStream) {
+ return valueOf(finalName).create(tokenStream, version);
+ }
+ };
+ cache.put(version, charFilterFactory);
+ }
+
+ return charFilterFactory;
+ }
+
+ /**
+ * Get a pre built CharFilter by its name or fallback to the default one
+ * @param name CharFilter name
+ * @param defaultCharFilter default CharFilter if name not found
+ */
+ public static PreBuiltCharFilters getOrDefault(String name, PreBuiltCharFilters defaultCharFilter) {
+ try {
+ return valueOf(name.toUpperCase(Locale.ROOT));
+ } catch (IllegalArgumentException e) {
+ return defaultCharFilter;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java
new file mode 100644
index 0000000..ce195a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java
@@ -0,0 +1,324 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
+import org.apache.lucene.analysis.ar.ArabicStemFilter;
+import org.apache.lucene.analysis.br.BrazilianStemFilter;
+import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.apache.lucene.analysis.cz.CzechStemFilter;
+import org.apache.lucene.analysis.de.GermanStemFilter;
+import org.apache.lucene.analysis.en.KStemFilter;
+import org.apache.lucene.analysis.en.PorterStemFilter;
+import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
+import org.apache.lucene.analysis.fr.FrenchAnalyzer;
+import org.apache.lucene.analysis.fr.FrenchStemFilter;
+import org.apache.lucene.analysis.miscellaneous.*;
+import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
+import org.apache.lucene.analysis.ngram.NGramTokenFilter;
+import org.apache.lucene.analysis.nl.DutchStemFilter;
+import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.apache.lucene.analysis.shingle.ShingleFilter;
+import org.apache.lucene.analysis.snowball.SnowballFilter;
+import org.apache.lucene.analysis.standard.ClassicFilter;
+import org.apache.lucene.analysis.standard.StandardFilter;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.util.ElisionFilter;
+import org.elasticsearch.Version;
+import org.elasticsearch.index.analysis.TokenFilterFactory;
+import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
+
+import java.util.Locale;
+
+/**
+ *
+ */
+public enum PreBuiltTokenFilters {
+
+ WORD_DELIMITER(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new WordDelimiterFilter(tokenStream,
+ WordDelimiterFilter.GENERATE_WORD_PARTS |
+ WordDelimiterFilter.GENERATE_NUMBER_PARTS |
+ WordDelimiterFilter.SPLIT_ON_CASE_CHANGE |
+ WordDelimiterFilter.SPLIT_ON_NUMERICS |
+ WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
+ }
+ },
+
+ STOP(CachingStrategy.LUCENE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new StopFilter(version.luceneVersion, tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ }
+ },
+
+ TRIM(CachingStrategy.LUCENE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new TrimFilter(version.luceneVersion, tokenStream);
+ }
+ },
+
+ REVERSE(CachingStrategy.LUCENE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new ReverseStringFilter(version.luceneVersion, tokenStream);
+ }
+ },
+
+ ASCIIFOLDING(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new ASCIIFoldingFilter(tokenStream);
+ }
+ },
+
+ LENGTH(CachingStrategy.LUCENE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new LengthFilter(version.luceneVersion, tokenStream, 0, Integer.MAX_VALUE);
+ }
+ },
+
+ COMMON_GRAMS(CachingStrategy.LUCENE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new CommonGramsFilter(version.luceneVersion, tokenStream, CharArraySet.EMPTY_SET);
+ }
+ },
+
+ LOWERCASE(CachingStrategy.LUCENE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new LowerCaseFilter(version.luceneVersion, tokenStream);
+ }
+ },
+
+ KSTEM(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new KStemFilter(tokenStream);
+ }
+ },
+
+ PORTER_STEM(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new PorterStemFilter(tokenStream);
+ }
+ },
+
+ STANDARD(CachingStrategy.LUCENE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new StandardFilter(version.luceneVersion, tokenStream);
+ }
+ },
+
+ CLASSIC(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new ClassicFilter(tokenStream);
+ }
+ },
+
+ NGRAM(CachingStrategy.LUCENE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new NGramTokenFilter(version.luceneVersion, tokenStream);
+ }
+ },
+
+ EDGE_NGRAM(CachingStrategy.LUCENE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new EdgeNGramTokenFilter(version.luceneVersion, tokenStream, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE);
+ }
+ },
+
+ UNIQUE(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new UniqueTokenFilter(tokenStream);
+ }
+ },
+
+ TRUNCATE(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new TruncateTokenFilter(tokenStream, 10);
+ }
+ },
+
+ // Extended Token Filters
+ SNOWBALL(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new SnowballFilter(tokenStream, "English");
+ }
+ },
+
+ STEMMER(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new PorterStemFilter(tokenStream);
+ }
+ },
+
+ ELISION(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new ElisionFilter(tokenStream, FrenchAnalyzer.DEFAULT_ARTICLES);
+ }
+ },
+
+ ARABIC_STEM(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new ArabicStemFilter(tokenStream);
+ }
+ },
+
+ BRAZILIAN_STEM(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new BrazilianStemFilter(tokenStream);
+ }
+ },
+
+ CZECH_STEM(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new CzechStemFilter(tokenStream);
+ }
+ },
+
+ DUTCH_STEM(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new DutchStemFilter(tokenStream);
+ }
+ },
+
+ FRENCH_STEM(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new FrenchStemFilter(tokenStream);
+ }
+ },
+
+ GERMAN_STEM(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new GermanStemFilter(tokenStream);
+ }
+ },
+
+ RUSSIAN_STEM(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new SnowballFilter(tokenStream, "Russian");
+ }
+ },
+
+ KEYWORD_REPEAT(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new KeywordRepeatFilter(tokenStream);
+ }
+ },
+
+ ARABIC_NORMALIZATION(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new ArabicNormalizationFilter(tokenStream);
+ }
+ },
+
+ PERSIAN_NORMALIZATION(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new PersianNormalizationFilter(tokenStream);
+ }
+ },
+
+ TYPE_AS_PAYLOAD(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new TypeAsPayloadTokenFilter(tokenStream);
+ }
+ },
+
+ SHINGLE(CachingStrategy.ONE) {
+ @Override
+ public TokenStream create(TokenStream tokenStream, Version version) {
+ return new ShingleFilter(tokenStream);
+ }
+ };
+
+ abstract public TokenStream create(TokenStream tokenStream, Version version);
+
+ protected final PreBuiltCacheFactory.PreBuiltCache<TokenFilterFactory> cache;
+
+
+ PreBuiltTokenFilters(CachingStrategy cachingStrategy) {
+ cache = PreBuiltCacheFactory.getCache(cachingStrategy);
+ }
+
+ public synchronized TokenFilterFactory getTokenFilterFactory(final Version version) {
+ TokenFilterFactory factory = cache.get(version);
+ if (factory == null) {
+ final String finalName = name();
+ factory = new TokenFilterFactory() {
+ @Override
+ public String name() {
+ return finalName.toLowerCase(Locale.ROOT);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return valueOf(finalName).create(tokenStream, version);
+ }
+ };
+ cache.put(version, factory);
+ }
+
+ return factory;
+ }
+
+ /**
+ * Get a pre built TokenFilter by its name or fallback to the default one
+ * @param name TokenFilter name
+ * @param defaultTokenFilter default TokenFilter if name not found
+ */
+ public static PreBuiltTokenFilters getOrDefault(String name, PreBuiltTokenFilters defaultTokenFilter) {
+ try {
+ return valueOf(name.toUpperCase(Locale.ROOT));
+ } catch (IllegalArgumentException e) {
+ return defaultTokenFilter;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java
new file mode 100644
index 0000000..96438a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.KeywordTokenizer;
+import org.apache.lucene.analysis.core.LetterTokenizer;
+import org.apache.lucene.analysis.core.LowerCaseTokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
+import org.apache.lucene.analysis.ngram.NGramTokenizer;
+import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
+import org.apache.lucene.analysis.pattern.PatternTokenizer;
+import org.apache.lucene.analysis.standard.ClassicTokenizer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.index.analysis.TokenizerFactory;
+import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
+
+import java.io.Reader;
+import java.util.Locale;
+
+/**
+ *
+ */
+public enum PreBuiltTokenizers {
+
+ STANDARD(CachingStrategy.LUCENE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new StandardTokenizer(version.luceneVersion, reader);
+ }
+ },
+
+ CLASSIC(CachingStrategy.LUCENE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new ClassicTokenizer(version.luceneVersion, reader);
+ }
+ },
+
+ UAX_URL_EMAIL(CachingStrategy.LUCENE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new UAX29URLEmailTokenizer(version.luceneVersion, reader);
+ }
+ },
+
+ PATH_HIERARCHY(CachingStrategy.ONE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new PathHierarchyTokenizer(reader);
+ }
+ },
+
+ KEYWORD(CachingStrategy.ONE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new KeywordTokenizer(reader);
+ }
+ },
+
+ LETTER(CachingStrategy.LUCENE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new LetterTokenizer(version.luceneVersion, reader);
+ }
+ },
+
+ LOWERCASE(CachingStrategy.LUCENE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new LowerCaseTokenizer(version.luceneVersion, reader);
+ }
+ },
+
+ WHITESPACE(CachingStrategy.LUCENE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new WhitespaceTokenizer(version.luceneVersion, reader);
+ }
+ },
+
+ NGRAM(CachingStrategy.LUCENE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new NGramTokenizer(version.luceneVersion, reader);
+ }
+ },
+
+ EDGE_NGRAM(CachingStrategy.LUCENE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new EdgeNGramTokenizer(version.luceneVersion, reader, EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE);
+ }
+ },
+
+ PATTERN(CachingStrategy.ONE) {
+ @Override
+ protected Tokenizer create(Reader reader, Version version) {
+ return new PatternTokenizer(reader, Regex.compile("\\W+", null), -1);
+ }
+ };
+
+ abstract protected Tokenizer create(Reader reader, Version version);
+
+ protected final PreBuiltCacheFactory.PreBuiltCache<TokenizerFactory> cache;
+
+
+ PreBuiltTokenizers(CachingStrategy cachingStrategy) {
+ cache = PreBuiltCacheFactory.getCache(cachingStrategy);
+ }
+
+ public synchronized TokenizerFactory getTokenizerFactory(final Version version) {
+ TokenizerFactory tokenizerFactory = cache.get(version);
+ if (tokenizerFactory == null) {
+ final String finalName = name();
+
+ tokenizerFactory = new TokenizerFactory() {
+ @Override
+ public String name() {
+ return finalName.toLowerCase(Locale.ROOT);
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ return valueOf(finalName).create(reader, version);
+ }
+ };
+ cache.put(version, tokenizerFactory);
+ }
+
+ return tokenizerFactory;
+ }
+
+ /**
+ * Get a pre built Tokenizer by its name or fallback to the default one
+ * @param name Tokenizer name
+ * @param defaultTokenizer default Tokenizer if name not found
+ */
+ public static PreBuiltTokenizers getOrDefault(String name, PreBuiltTokenizers defaultTokenizer) {
+ try {
+ return valueOf(name.toUpperCase(Locale.ROOT));
+ } catch (IllegalArgumentException e) {
+ return defaultTokenizer;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java b/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java
new file mode 100644
index 0000000..978d567
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cache.filter;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.google.common.base.Objects;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import org.apache.lucene.search.DocIdSet;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.MemorySizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Iterator;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+public class IndicesFilterCache extends AbstractComponent implements RemovalListener<WeightedFilterCache.FilterCacheKey, DocIdSet> {
+
+ private final ThreadPool threadPool;
+ private final CacheRecycler cacheRecycler;
+
+ private Cache<WeightedFilterCache.FilterCacheKey, DocIdSet> cache;
+
+ private volatile String size;
+ private volatile long sizeInBytes;
+ private volatile TimeValue expire;
+
+ private final TimeValue cleanInterval;
+
+ private final Set<Object> readersKeysToClean = ConcurrentCollections.newConcurrentSet();
+
+ private volatile boolean closed;
+
+
+ public static final String INDICES_CACHE_FILTER_SIZE = "indices.cache.filter.size";
+ public static final String INDICES_CACHE_FILTER_EXPIRE = "indices.cache.filter.expire";
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ boolean replace = false;
+ String size = settings.get(INDICES_CACHE_FILTER_SIZE, IndicesFilterCache.this.size);
+ if (!size.equals(IndicesFilterCache.this.size)) {
+ logger.info("updating [indices.cache.filter.size] from [{}] to [{}]", IndicesFilterCache.this.size, size);
+ IndicesFilterCache.this.size = size;
+ replace = true;
+ }
+ TimeValue expire = settings.getAsTime(INDICES_CACHE_FILTER_EXPIRE, IndicesFilterCache.this.expire);
+ if (!Objects.equal(expire, IndicesFilterCache.this.expire)) {
+ logger.info("updating [indices.cache.filter.expire] from [{}] to [{}]", IndicesFilterCache.this.expire, expire);
+ IndicesFilterCache.this.expire = expire;
+ replace = true;
+ }
+ if (replace) {
+ Cache<WeightedFilterCache.FilterCacheKey, DocIdSet> oldCache = IndicesFilterCache.this.cache;
+ computeSizeInBytes();
+ buildCache();
+ oldCache.invalidateAll();
+ }
+ }
+ }
+
+ @Inject
+ public IndicesFilterCache(Settings settings, ThreadPool threadPool, CacheRecycler cacheRecycler, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.cacheRecycler = cacheRecycler;
+ this.size = componentSettings.get("size", "20%");
+ this.expire = componentSettings.getAsTime("expire", null);
+ this.cleanInterval = componentSettings.getAsTime("clean_interval", TimeValue.timeValueSeconds(60));
+ computeSizeInBytes();
+ buildCache();
+ logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], expire [{}], clean_interval [{}]",
+ size, new ByteSizeValue(sizeInBytes), expire, cleanInterval);
+
+ nodeSettingsService.addListener(new ApplySettings());
+ threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, new ReaderCleaner());
+ }
+
+ private void buildCache() {
+ CacheBuilder<WeightedFilterCache.FilterCacheKey, DocIdSet> cacheBuilder = CacheBuilder.newBuilder()
+ .removalListener(this)
+ .maximumWeight(sizeInBytes).weigher(new WeightedFilterCache.FilterCacheValueWeigher());
+
+ // defaults to 4, but this is a busy map for all indices, increase it a bit
+ cacheBuilder.concurrencyLevel(16);
+
+ if (expire != null) {
+ cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS);
+ }
+
+ cache = cacheBuilder.build();
+ }
+
+ private void computeSizeInBytes() {
+ this.sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size).bytes();
+ }
+
+ public void addReaderKeyToClean(Object readerKey) {
+ readersKeysToClean.add(readerKey);
+ }
+
+ public void close() {
+ closed = true;
+ cache.invalidateAll();
+ }
+
+ public Cache<WeightedFilterCache.FilterCacheKey, DocIdSet> cache() {
+ return this.cache;
+ }
+
+ @Override
+ public void onRemoval(RemovalNotification<WeightedFilterCache.FilterCacheKey, DocIdSet> removalNotification) {
+ WeightedFilterCache.FilterCacheKey key = removalNotification.getKey();
+ if (key == null) {
+ return;
+ }
+ if (key.removalListener != null) {
+ key.removalListener.onRemoval(removalNotification);
+ }
+ }
+
+ /**
+ * The reason we need this class is because we need to clean all the filters that are associated
+ * with a reader. We don't want to do it every time a reader closes, since iterating over all the map
+ * is expensive. There doesn't seem to be a nicer way to do it (and maintaining a list per reader
+ * of the filters will cost more).
+ */
+ class ReaderCleaner implements Runnable {
+
+ @Override
+ public void run() {
+ if (closed) {
+ return;
+ }
+ if (readersKeysToClean.isEmpty()) {
+ schedule();
+ return;
+ }
+ try {
+ threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() {
+ @Override
+ public void run() {
+ Recycler.V<ObjectOpenHashSet<Object>> keys = cacheRecycler.hashSet(-1);
+ try {
+ for (Iterator<Object> it = readersKeysToClean.iterator(); it.hasNext(); ) {
+ keys.v().add(it.next());
+ it.remove();
+ }
+ cache.cleanUp();
+ if (!keys.v().isEmpty()) {
+ for (Iterator<WeightedFilterCache.FilterCacheKey> it = cache.asMap().keySet().iterator(); it.hasNext(); ) {
+ WeightedFilterCache.FilterCacheKey filterCacheKey = it.next();
+ if (keys.v().contains(filterCacheKey.readerKey())) {
+ // same as invalidate
+ it.remove();
+ }
+ }
+ }
+ schedule();
+ } finally {
+ keys.release();
+ }
+ }
+ });
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Can not run ReaderCleaner - execution rejected", ex);
+ }
+ }
+
+ private void schedule() {
+ try {
+ threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this);
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Can not schedule ReaderCleaner - execution rejected", ex);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/cache/filter/terms/IndicesTermsFilterCache.java b/src/main/java/org/elasticsearch/indices/cache/filter/terms/IndicesTermsFilterCache.java
new file mode 100644
index 0000000..bad9a24
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/cache/filter/terms/IndicesTermsFilterCache.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cache.filter.terms;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.Weigher;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class IndicesTermsFilterCache extends AbstractComponent {
+
+ private static TermsFilterValue NO_TERMS = new TermsFilterValue(0, Queries.MATCH_NO_FILTER);
+
+ private final Client client;
+
+ private final Cache<BytesRef, TermsFilterValue> cache;
+
+ @Inject
+ public IndicesTermsFilterCache(Settings settings, Client client) {
+ super(settings);
+ this.client = client;
+
+ ByteSizeValue size = componentSettings.getAsBytesSize("size", new ByteSizeValue(10, ByteSizeUnit.MB));
+ TimeValue expireAfterWrite = componentSettings.getAsTime("expire_after_write", null);
+ TimeValue expireAfterAccess = componentSettings.getAsTime("expire_after_access", null);
+
+ CacheBuilder<BytesRef, TermsFilterValue> builder = CacheBuilder.newBuilder()
+ .maximumWeight(size.bytes())
+ .weigher(new TermsFilterValueWeigher());
+
+ if (expireAfterAccess != null) {
+ builder.expireAfterAccess(expireAfterAccess.millis(), TimeUnit.MILLISECONDS);
+ }
+ if (expireAfterWrite != null) {
+ builder.expireAfterWrite(expireAfterWrite.millis(), TimeUnit.MILLISECONDS);
+ }
+
+ this.cache = builder.build();
+ }
+
+ @Nullable
+ public Filter termsFilter(final TermsLookup lookup, boolean cacheLookup, @Nullable CacheKeyFilter.Key cacheKey) throws RuntimeException {
+ if (!cacheLookup) {
+ return buildTermsFilterValue(lookup).filter;
+ }
+
+ BytesRef key;
+ if (cacheKey != null) {
+ key = new BytesRef(cacheKey.bytes());
+ } else {
+ key = new BytesRef(lookup.toString());
+ }
+ try {
+ return cache.get(key, new Callable<TermsFilterValue>() {
+ @Override
+ public TermsFilterValue call() throws Exception {
+ return buildTermsFilterValue(lookup);
+ }
+ }).filter;
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof RuntimeException) {
+ throw (RuntimeException) e.getCause();
+ }
+ throw new ElasticsearchException(e.getMessage(), e.getCause());
+ }
+ }
+
+ TermsFilterValue buildTermsFilterValue(TermsLookup lookup) {
+ GetResponse getResponse = client.get(new GetRequest(lookup.getIndex(), lookup.getType(), lookup.getId()).preference("_local").routing(lookup.getRouting())).actionGet();
+ if (!getResponse.isExists()) {
+ return NO_TERMS;
+ }
+ List<Object> values = XContentMapValues.extractRawValues(lookup.getPath(), getResponse.getSourceAsMap());
+ if (values.isEmpty()) {
+ return NO_TERMS;
+ }
+ Filter filter = lookup.getFieldMapper().termsFilter(values, lookup.getQueryParseContext());
+ return new TermsFilterValue(estimateSizeInBytes(values), filter);
+ }
+
+ long estimateSizeInBytes(List<Object> terms) {
+ long size = 8;
+ for (Object term : terms) {
+ if (term instanceof BytesRef) {
+ size += ((BytesRef) term).length;
+ } else if (term instanceof String) {
+ size += ((String) term).length() / 2;
+ } else {
+ size += 4;
+ }
+ }
+ return size;
+ }
+
+ public void clear(String reason) {
+ cache.invalidateAll();
+ }
+
+ public void clear(String reason, String[] keys) {
+ for (String key : keys) {
+ cache.invalidate(new BytesRef(key));
+ }
+ }
+
+ static class TermsFilterValueWeigher implements Weigher<BytesRef, TermsFilterValue> {
+
+ @Override
+ public int weigh(BytesRef key, TermsFilterValue value) {
+ return (int) (key.length + value.sizeInBytes);
+ }
+ }
+
+ // TODO: if TermsFilter exposed sizeInBytes, we won't need this wrapper
+ static class TermsFilterValue {
+ public final long sizeInBytes;
+ public final Filter filter;
+
+ TermsFilterValue(long sizeInBytes, Filter filter) {
+ this.sizeInBytes = sizeInBytes;
+ this.filter = filter;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/cache/filter/terms/TermsLookup.java b/src/main/java/org/elasticsearch/indices/cache/filter/terms/TermsLookup.java
new file mode 100644
index 0000000..0bc72ee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/cache/filter/terms/TermsLookup.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cache.filter.terms;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.query.QueryParseContext;
+
+/**
+ */
+public class TermsLookup {
+
+ private final FieldMapper fieldMapper;
+
+ private final String index;
+ private final String type;
+ private final String id;
+ private final String routing;
+ private final String path;
+
+ @Nullable
+ private final QueryParseContext queryParseContext;
+
+ public TermsLookup(FieldMapper fieldMapper, String index, String type, String id, String routing, String path, @Nullable QueryParseContext queryParseContext) {
+ this.fieldMapper = fieldMapper;
+ this.index = index;
+ this.type = type;
+ this.id = id;
+ this.routing = routing;
+ this.path = path;
+ this.queryParseContext = queryParseContext;
+ }
+
+ public FieldMapper getFieldMapper() {
+ return fieldMapper;
+ }
+
+ public String getIndex() {
+ return index;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public String getId() {
+ return id;
+ }
+
+ public String getRouting() {
+ return this.routing;
+ }
+
+ public String getPath() {
+ return path;
+ }
+
+ @Nullable
+ public QueryParseContext getQueryParseContext() {
+ return queryParseContext;
+ }
+
+ public String toString() {
+ return fieldMapper.names().fullName() + ":" + index + "/" + type + "/" + id + "/" + path;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
new file mode 100644
index 0000000..6b8ff62
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
@@ -0,0 +1,842 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cluster;
+
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.ObjectContainer;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
+import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.IndexShardAlreadyExistsException;
+import org.elasticsearch.index.IndexShardMissingException;
+import org.elasticsearch.index.aliases.IndexAlias;
+import org.elasticsearch.index.aliases.IndexAliasesService;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException;
+import org.elasticsearch.index.gateway.IndexShardGatewayService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.recovery.RecoveryFailedException;
+import org.elasticsearch.indices.recovery.RecoveryStatus;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
+import org.elasticsearch.indices.recovery.StartRecoveryRequest;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.ExceptionsHelper.detailedMessage;
+
+/**
+ *
+ */
+public class IndicesClusterStateService extends AbstractLifecycleComponent<IndicesClusterStateService> implements ClusterStateListener {
+
+ private final IndicesService indicesService;
+ private final ClusterService clusterService;
+ private final ThreadPool threadPool;
+ private final RecoveryTarget recoveryTarget;
+ private final ShardStateAction shardStateAction;
+ private final NodeIndexDeletedAction nodeIndexDeletedAction;
+ private final NodeMappingRefreshAction nodeMappingRefreshAction;
+
+ // a map of mappings type we have seen per index due to cluster state
+ // we need this so we won't remove types automatically created as part of the indexing process
+ private final ConcurrentMap<Tuple<String, String>, Boolean> seenMappings = ConcurrentCollections.newConcurrentMap();
+
+ // a list of shards that failed during recovery
+ // we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update
+ private final ConcurrentMap<ShardId, FailedShard> failedShards = ConcurrentCollections.newConcurrentMap();
+
+ static class FailedShard {
+ public final long version;
+ public final long timestamp;
+
+ FailedShard(long version) {
+ this.version = version;
+ this.timestamp = System.currentTimeMillis();
+ }
+ }
+
+ private final Object mutex = new Object();
+ private final FailedEngineHandler failedEngineHandler = new FailedEngineHandler();
+
+ private final boolean sendRefreshMapping;
+
+ @Inject
+ public IndicesClusterStateService(Settings settings, IndicesService indicesService, ClusterService clusterService,
+ ThreadPool threadPool, RecoveryTarget recoveryTarget,
+ ShardStateAction shardStateAction,
+ NodeIndexDeletedAction nodeIndexDeletedAction,
+ NodeMappingRefreshAction nodeMappingRefreshAction) {
+ super(settings);
+ this.indicesService = indicesService;
+ this.clusterService = clusterService;
+ this.threadPool = threadPool;
+ this.recoveryTarget = recoveryTarget;
+ this.shardStateAction = shardStateAction;
+ this.nodeIndexDeletedAction = nodeIndexDeletedAction;
+ this.nodeMappingRefreshAction = nodeMappingRefreshAction;
+
+ this.sendRefreshMapping = componentSettings.getAsBoolean("send_refresh_mapping", true);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ clusterService.addFirst(this);
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ clusterService.remove(this);
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ @Override
+ public void clusterChanged(final ClusterChangedEvent event) {
+ if (!indicesService.changesAllowed()) {
+ return;
+ }
+
+ if (!lifecycle.started()) {
+ return;
+ }
+
+ synchronized (mutex) {
+ // we need to clean the shards and indices we have on this node, since we
+ // are going to recover them again once state persistence is disabled (no master / not recovered)
+ // TODO: this feels a bit hacky here, a block disables state persistence, and then we clean the allocated shards, maybe another flag in blocks?
+ if (event.state().blocks().disableStatePersistence()) {
+ for (final String index : indicesService.indices()) {
+ IndexService indexService = indicesService.indexService(index);
+ for (Integer shardId : indexService.shardIds()) {
+ logger.debug("[{}][{}] removing shard (disabled block persistence)", index, shardId);
+ try {
+ indexService.removeShard(shardId, "removing shard (disabled block persistence)");
+ } catch (Throwable e) {
+ logger.warn("[{}] failed to remove shard (disabled block persistence)", e, index);
+ }
+ }
+ removeIndex(index, "cleaning index (disabled block persistence)");
+ }
+ return;
+ }
+
+ cleanFailedShards(event);
+ cleanMismatchedIndexUUIDs(event);
+ applyNewIndices(event);
+ applyMappings(event);
+ applyAliases(event);
+ applyNewOrUpdatedShards(event);
+ applyDeletedIndices(event);
+ applyDeletedShards(event);
+ applyCleanedIndices(event);
+ applySettings(event);
+ sendIndexLifecycleEvents(event);
+ }
+ }
+
+ private void sendIndexLifecycleEvents(final ClusterChangedEvent event) {
+ String localNodeId = event.state().nodes().localNodeId();
+ assert localNodeId != null;
+ for (String index : event.indicesDeleted()) {
+ try {
+ nodeIndexDeletedAction.nodeIndexDeleted(event.state(), index, localNodeId);
+ } catch (Throwable e) {
+ logger.debug("failed to send to master index {} deleted event", e, index);
+ }
+ }
+ }
+
+ private void cleanMismatchedIndexUUIDs(final ClusterChangedEvent event) {
+ for (IndexService indexService : indicesService) {
+ IndexMetaData indexMetaData = event.state().metaData().index(indexService.index().name());
+ if (indexMetaData == null) {
+ // got deleted on us, will be deleted later
+ continue;
+ }
+ if (!indexMetaData.isSameUUID(indexService.indexUUID())) {
+ logger.debug("[{}] mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated", indexMetaData.index());
+ removeIndex(indexMetaData.index(), "mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated");
+ }
+ }
+ }
+
+ private void applyCleanedIndices(final ClusterChangedEvent event) {
+ // handle closed indices, since they are not allocated on a node once they are closed
+ // so applyDeletedIndices might not take them into account
+ for (final String index : indicesService.indices()) {
+ IndexMetaData indexMetaData = event.state().metaData().index(index);
+ if (indexMetaData != null && indexMetaData.state() == IndexMetaData.State.CLOSE) {
+ IndexService indexService = indicesService.indexService(index);
+ for (Integer shardId : indexService.shardIds()) {
+ logger.debug("[{}][{}] removing shard (index is closed)", index, shardId);
+ try {
+ indexService.removeShard(shardId, "removing shard (index is closed)");
+ } catch (Throwable e) {
+ logger.warn("[{}] failed to remove shard (index is closed)", e, index);
+ }
+ }
+ }
+ }
+ for (final String index : indicesService.indices()) {
+ if (indicesService.indexService(index).shardIds().isEmpty()) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] cleaning index (no shards allocated)", index);
+ }
+ // clean the index
+ removeIndex(index, "removing index (no shards allocated)");
+ }
+ }
+ }
+
+ private void applyDeletedIndices(final ClusterChangedEvent event) {
+ for (final String index : indicesService.indices()) {
+ if (!event.state().metaData().hasIndex(index)) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] cleaning index, no longer part of the metadata", index);
+ }
+ removeIndex(index, "index no longer part of the metadata");
+ }
+ }
+ }
+
+ private void applyDeletedShards(final ClusterChangedEvent event) {
+ RoutingNodes.RoutingNodeIterator routingNode = event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
+ if (routingNode == null) {
+ return;
+ }
+ IntOpenHashSet newShardIds = new IntOpenHashSet();
+ for (IndexService indexService : indicesService) {
+ String index = indexService.index().name();
+ IndexMetaData indexMetaData = event.state().metaData().index(index);
+ if (indexMetaData == null) {
+ continue;
+ }
+ // now, go over and delete shards that needs to get deleted
+ newShardIds.clear();
+ for (MutableShardRouting shard : routingNode) {
+ if (shard.index().equals(index)) {
+ newShardIds.add(shard.id());
+ }
+ }
+ for (Integer existingShardId : indexService.shardIds()) {
+ if (!newShardIds.contains(existingShardId)) {
+ if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}] removing shard (index is closed)", index, existingShardId);
+ }
+ indexService.removeShard(existingShardId, "removing shard (index is closed)");
+ } else {
+ // we can just remove the shard, without cleaning it locally, since we will clean it
+ // when all shards are allocated in the IndicesStore
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}] removing shard (not allocated)", index, existingShardId);
+ }
+ indexService.removeShard(existingShardId, "removing shard (not allocated)");
+ }
+ }
+ }
+ }
+ }
+
+ private void applyNewIndices(final ClusterChangedEvent event) {
+ // we only create indices for shards that are allocated
+ RoutingNodes.RoutingNodeIterator routingNode = event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
+ if (routingNode == null) {
+ return;
+ }
+ for (MutableShardRouting shard : routingNode) {
+ if (!indicesService.hasIndex(shard.index())) {
+ final IndexMetaData indexMetaData = event.state().metaData().index(shard.index());
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] creating index", indexMetaData.index());
+ }
+ indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), event.state().nodes().localNode().id());
+ }
+ }
+ }
+
+ private void applySettings(ClusterChangedEvent event) {
+ if (!event.metaDataChanged()) {
+ return;
+ }
+ for (IndexMetaData indexMetaData : event.state().metaData()) {
+ if (!indicesService.hasIndex(indexMetaData.index())) {
+ // we only create / update here
+ continue;
+ }
+ // if the index meta data didn't change, no need check for refreshed settings
+ if (!event.indexMetaDataChanged(indexMetaData)) {
+ continue;
+ }
+ String index = indexMetaData.index();
+ IndexService indexService = indicesService.indexServiceSafe(index);
+ IndexSettingsService indexSettingsService = indexService.injector().getInstance(IndexSettingsService.class);
+ indexSettingsService.refreshSettings(indexMetaData.settings());
+ }
+ }
+
+
+ private void applyMappings(ClusterChangedEvent event) {
+ // go over and update mappings
+ for (IndexMetaData indexMetaData : event.state().metaData()) {
+ if (!indicesService.hasIndex(indexMetaData.index())) {
+ // we only create / update here
+ continue;
+ }
+ List<String> typesToRefresh = null;
+ String index = indexMetaData.index();
+ IndexService indexService = indicesService.indexService(index);
+ if (indexService == null) {
+ // got deleted on us, ignore (closing the node)
+ return;
+ }
+ MapperService mapperService = indexService.mapperService();
+ // first, go over and update the _default_ mapping (if exists)
+ if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
+ processMapping(index, mapperService, MapperService.DEFAULT_MAPPING, indexMetaData.mapping(MapperService.DEFAULT_MAPPING).source());
+ }
+
+ // go over and add the relevant mappings (or update them)
+ for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
+ MappingMetaData mappingMd = cursor.value;
+ String mappingType = mappingMd.type();
+ CompressedString mappingSource = mappingMd.source();
+ if (mappingType.equals(MapperService.DEFAULT_MAPPING)) { // we processed _default_ first
+ continue;
+ }
+ boolean requireRefresh = processMapping(index, mapperService, mappingType, mappingSource);
+ if (requireRefresh) {
+ if (typesToRefresh == null) {
+ typesToRefresh = Lists.newArrayList();
+ }
+ typesToRefresh.add(mappingType);
+ }
+ }
+ if (typesToRefresh != null) {
+ if (sendRefreshMapping) {
+ nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
+ new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.uuid(),
+ typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId()));
+ }
+ }
+ // go over and remove mappings
+ for (DocumentMapper documentMapper : mapperService) {
+ if (seenMappings.containsKey(new Tuple<String, String>(index, documentMapper.type())) && !indexMetaData.mappings().containsKey(documentMapper.type())) {
+ // we have it in our mappings, but not in the metadata, and we have seen it in the cluster state, remove it
+ mapperService.remove(documentMapper.type());
+ seenMappings.remove(new Tuple<String, String>(index, documentMapper.type()));
+ }
+ }
+ }
+ }
+
+ private boolean processMapping(String index, MapperService mapperService, String mappingType, CompressedString mappingSource) {
+ if (!seenMappings.containsKey(new Tuple<String, String>(index, mappingType))) {
+ seenMappings.put(new Tuple<String, String>(index, mappingType), true);
+ }
+
+ // refresh mapping can happen for 2 reasons. The first is less urgent, and happens when the mapping on this
+ // node is ahead of what there is in the cluster state (yet an update-mapping has been sent to it already,
+ // it just hasn't been processed yet and published). Eventually, the mappings will converge, and the refresh
+ // mapping sent is more of a safe keeping (assuming the update mapping failed to reach the master, ...)
+ // the second case is where the parsing/merging of the mapping from the metadata doesn't result in the same
+ // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
+ // merge version of it, which it does when refreshing the mappings), and warn log it.
+ boolean requiresRefresh = false;
+ try {
+ if (!mapperService.hasMapping(mappingType)) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] adding mapping [{}], source [{}]", index, mappingType, mappingSource.string());
+ }
+ // we don't apply default, since it has been applied when the mappings were parsed initially
+ mapperService.merge(mappingType, mappingSource, false);
+ if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) {
+ logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
+ requiresRefresh = true;
+ }
+ } else {
+ DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
+ if (!mappingSource.equals(existingMapper.mappingSource())) {
+ // mapping changed, update it
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] updating mapping [{}], source [{}]", index, mappingType, mappingSource.string());
+ }
+ // we don't apply default, since it has been applied when the mappings were parsed initially
+ mapperService.merge(mappingType, mappingSource, false);
+ if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) {
+ requiresRefresh = true;
+ logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
+ }
+ }
+ }
+ } catch (Throwable e) {
+ logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index, mappingType, mappingSource);
+ }
+ return requiresRefresh;
+ }
+
+ private boolean aliasesChanged(ClusterChangedEvent event) {
+ return !event.state().metaData().aliases().equals(event.previousState().metaData().aliases()) ||
+ !event.state().routingTable().equals(event.previousState().routingTable());
+ }
+
+ private void applyAliases(ClusterChangedEvent event) {
+ // check if aliases changed
+ if (aliasesChanged(event)) {
+ // go over and update aliases
+ for (IndexMetaData indexMetaData : event.state().metaData()) {
+ if (!indicesService.hasIndex(indexMetaData.index())) {
+ // we only create / update here
+ continue;
+ }
+ String index = indexMetaData.index();
+ IndexService indexService = indicesService.indexService(index);
+ IndexAliasesService indexAliasesService = indexService.aliasesService();
+ processAliases(index, indexMetaData.aliases().values(), indexAliasesService);
+ // go over and remove aliases
+ for (IndexAlias indexAlias : indexAliasesService) {
+ if (!indexMetaData.aliases().containsKey(indexAlias.alias())) {
+ // we have it in our aliases, but not in the metadata, remove it
+ indexAliasesService.remove(indexAlias.alias());
+ }
+ }
+ }
+ }
+ }
+
+ private void processAliases(String index, ObjectContainer<AliasMetaData> aliases, IndexAliasesService indexAliasesService) {
+ HashMap<String, IndexAlias> newAliases = newHashMap();
+ for (ObjectCursor<AliasMetaData> cursor : aliases) {
+ AliasMetaData aliasMd = cursor.value;
+ String alias = aliasMd.alias();
+ CompressedString filter = aliasMd.filter();
+ try {
+ if (!indexAliasesService.hasAlias(alias)) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] adding alias [{}], filter [{}]", index, alias, filter);
+ }
+ newAliases.put(alias, indexAliasesService.create(alias, filter));
+ } else {
+ if ((filter == null && indexAliasesService.alias(alias).filter() != null) ||
+ (filter != null && !filter.equals(indexAliasesService.alias(alias).filter()))) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] updating alias [{}], filter [{}]", index, alias, filter);
+ }
+ newAliases.put(alias, indexAliasesService.create(alias, filter));
+ }
+ }
+ } catch (Throwable e) {
+ logger.warn("[{}] failed to add alias [{}], filter [{}]", e, index, alias, filter);
+ }
+ }
+ indexAliasesService.addAll(newAliases);
+ }
+
+ private void applyNewOrUpdatedShards(final ClusterChangedEvent event) throws ElasticsearchException {
+ if (!indicesService.changesAllowed()) {
+ return;
+ }
+
+ RoutingTable routingTable = event.state().routingTable();
+ RoutingNodes.RoutingNodeIterator routingNode = event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());;
+ if (routingNode == null) {
+ failedShards.clear();
+ return;
+ }
+ DiscoveryNodes nodes = event.state().nodes();
+
+ for (final ShardRouting shardRouting : routingNode) {
+ final IndexService indexService = indicesService.indexService(shardRouting.index());
+ if (indexService == null) {
+ // got deleted on us, ignore
+ continue;
+ }
+ final IndexMetaData indexMetaData = event.state().metaData().index(shardRouting.index());
+ if (indexMetaData == null) {
+ // the index got deleted on the metadata, we will clean it later in the apply deleted method call
+ continue;
+ }
+
+ final int shardId = shardRouting.id();
+
+ if (!indexService.hasShard(shardId) && shardRouting.started()) {
+ if (!failedShards.containsKey(shardRouting.shardId())) {
+ // the master thinks we are started, but we don't have this shard at all, mark it as failed
+ logger.warn("[{}][{}] master [{}] marked shard as started, but shard has not been created, mark shard as failed", shardRouting.index(), shardId, nodes.masterNode());
+ failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version()));
+ shardStateAction.shardFailed(shardRouting, indexMetaData.getUUID(),
+ "master " + nodes.masterNode() + " marked shard as started, but shard has not been created, mark shard as failed");
+ }
+ continue;
+ }
+
+ if (indexService.hasShard(shardId)) {
+ InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId);
+ ShardRouting currentRoutingEntry = indexShard.routingEntry();
+ // if the current and global routing are initializing, but are still not the same, its a different "shard" being allocated
+ // for example: a shard that recovers from one node and now needs to recover to another node,
+ // or a replica allocated and then allocating a primary because the primary failed on another node
+ if (currentRoutingEntry.initializing() && shardRouting.initializing() && !currentRoutingEntry.equals(shardRouting)) {
+ logger.debug("[{}][{}] removing shard (different instance of it allocated on this node, current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting);
+ // cancel recovery just in case we are in recovery (its fine if we are not in recovery, it will be a noop).
+ recoveryTarget.cancelRecovery(indexShard);
+ indexService.removeShard(shardRouting.id(), "removing shard (different instance of it allocated on this node)");
+ }
+ }
+
+ if (indexService.hasShard(shardId)) {
+ InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId);
+ if (!shardRouting.equals(indexShard.routingEntry())) {
+ indexShard.routingEntry(shardRouting);
+ indexService.shardInjector(shardId).getInstance(IndexShardGatewayService.class).routingStateChanged();
+ }
+ }
+
+ if (shardRouting.initializing()) {
+ applyInitializingShard(routingTable, nodes, indexMetaData, routingTable.index(shardRouting.index()).shard(shardRouting.id()), shardRouting);
+ }
+ }
+ }
+
+ private void cleanFailedShards(final ClusterChangedEvent event) {
+ RoutingTable routingTable = event.state().routingTable();
+ RoutingNodes.RoutingNodeIterator routingNode = event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());;
+ if (routingNode == null) {
+ failedShards.clear();
+ return;
+ }
+
+ DiscoveryNodes nodes = event.state().nodes();
+ long now = System.currentTimeMillis();
+ String localNodeId = nodes.localNodeId();
+ Iterator<Map.Entry<ShardId, FailedShard>> iterator = failedShards.entrySet().iterator();
+ shards:
+ while (iterator.hasNext()) {
+ Map.Entry<ShardId, FailedShard> entry = iterator.next();
+ FailedShard failedShard = entry.getValue();
+ IndexRoutingTable indexRoutingTable = routingTable.index(entry.getKey().getIndex());
+ if (indexRoutingTable != null) {
+ IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(entry.getKey().id());
+ if (shardRoutingTable != null) {
+ for (ShardRouting shardRouting : shardRoutingTable.assignedShards()) {
+ if (localNodeId.equals(shardRouting.currentNodeId())) {
+ // we have a timeout here just to make sure we don't have dangled failed shards for some reason
+ // its just another safely layer
+ if (shardRouting.version() == failedShard.version && ((now - failedShard.timestamp) < TimeValue.timeValueMinutes(60).millis())) {
+ // It's the same failed shard - keep it if it hasn't timed out
+ continue shards;
+ } else {
+ // Different version or expired, remove it
+ break;
+ }
+ }
+ }
+ }
+ }
+ iterator.remove();
+ }
+ }
+
+ private void applyInitializingShard(final RoutingTable routingTable, final DiscoveryNodes nodes, final IndexMetaData indexMetaData, final IndexShardRoutingTable indexShardRouting, final ShardRouting shardRouting) throws ElasticsearchException {
+ final IndexService indexService = indicesService.indexService(shardRouting.index());
+ if (indexService == null) {
+ // got deleted on us, ignore
+ return;
+ }
+ final int shardId = shardRouting.id();
+
+ if (indexService.hasShard(shardId)) {
+ IndexShard indexShard = indexService.shardSafe(shardId);
+ if (indexShard.state() == IndexShardState.STARTED || indexShard.state() == IndexShardState.POST_RECOVERY) {
+ // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting
+ // for master to confirm a shard started message (either master failover, or a cluster event before
+ // we managed to tell the master we started), mark us as started
+ if (logger.isTraceEnabled()) {
+ logger.trace("{} master marked shard as initializing, but shard has state [{}], resending shard started",
+ indexShard.shardId(), indexShard.state());
+ }
+ shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(),
+ "master " + nodes.masterNode() + " marked shard as initializing, but shard state is [" + indexShard.state() + "], mark shard as started");
+ return;
+ } else {
+ if (indexShard.ignoreRecoveryAttempt()) {
+ return;
+ }
+ }
+ }
+ // if there is no shard, create it
+ if (!indexService.hasShard(shardId)) {
+ if (failedShards.containsKey(shardRouting.shardId())) {
+ // already tried to create this shard but it failed - ignore
+ logger.trace("[{}][{}] not initializing, this shards failed to recover on this node before, waiting for reassignment", shardRouting.index(), shardRouting.id());
+ return;
+ }
+ try {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}] creating shard", shardRouting.index(), shardId);
+ }
+ InternalIndexShard indexShard = (InternalIndexShard) indexService.createShard(shardId);
+ indexShard.routingEntry(shardRouting);
+ indexShard.engine().addFailedEngineListener(failedEngineHandler);
+ } catch (IndexShardAlreadyExistsException e) {
+ // ignore this, the method call can happen several times
+ } catch (Throwable e) {
+ logger.warn("[{}][{}] failed to create shard", e, shardRouting.index(), shardRouting.id());
+ try {
+ indexService.removeShard(shardId, "failed to create [" + ExceptionsHelper.detailedMessage(e) + "]");
+ } catch (IndexShardMissingException e1) {
+ // ignore
+ } catch (Throwable e1) {
+ logger.warn("[{}][{}] failed to remove shard after failed creation", e1, shardRouting.index(), shardRouting.id());
+ }
+ failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version()));
+ shardStateAction.shardFailed(shardRouting, indexMetaData.getUUID(), "Failed to create shard, message [" + detailedMessage(e) + "]");
+ return;
+ }
+ }
+ final InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(shardId);
+
+ if (indexShard.ignoreRecoveryAttempt()) {
+ // we are already recovering (we can get to this state since the cluster event can happen several
+ // times while we recover)
+ return;
+ }
+
+
+ if (!shardRouting.primary()) {
+ // recovery from primary
+ IndexShardRoutingTable shardRoutingTable = routingTable.index(shardRouting.index()).shard(shardRouting.id());
+ for (ShardRouting entry : shardRoutingTable) {
+ if (entry.primary() && entry.started()) {
+ // only recover from started primary, if we can't find one, we will do it next round
+ final DiscoveryNode sourceNode = nodes.get(entry.currentNodeId());
+ try {
+ // we are recovering a backup from a primary, so no need to mark it as relocated
+ final StartRecoveryRequest request = new StartRecoveryRequest(indexShard.shardId(), sourceNode, nodes.localNode(), false, indexShard.store().list());
+ recoveryTarget.startRecovery(request, indexShard, new PeerRecoveryListener(request, shardRouting, indexService, indexMetaData));
+ } catch (Throwable e) {
+ handleRecoveryFailure(indexService, indexMetaData, shardRouting, true, e);
+ break;
+ }
+ break;
+ }
+ }
+ } else {
+ if (shardRouting.relocatingNodeId() == null) {
+ // we are the first primary, recover from the gateway
+ // if its post api allocation, the index should exists
+ boolean indexShouldExists = indexShardRouting.primaryAllocatedPostApi();
+ IndexShardGatewayService shardGatewayService = indexService.shardInjector(shardId).getInstance(IndexShardGatewayService.class);
+ shardGatewayService.recover(indexShouldExists, new IndexShardGatewayService.RecoveryListener() {
+ @Override
+ public void onRecoveryDone() {
+ shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(), "after recovery from gateway");
+ }
+
+ @Override
+ public void onIgnoreRecovery(String reason) {
+ }
+
+ @Override
+ public void onRecoveryFailed(IndexShardGatewayRecoveryException e) {
+ handleRecoveryFailure(indexService, indexMetaData, shardRouting, true, e);
+ }
+ });
+ } else {
+ // relocating primaries, recovery from the relocating shard
+ final DiscoveryNode sourceNode = nodes.get(shardRouting.relocatingNodeId());
+ try {
+ // we don't mark this one as relocated at the end, requests in any case are routed to both when its relocating
+ // and that way we handle the edge case where its mark as relocated, and we might need to roll it back...
+ final StartRecoveryRequest request = new StartRecoveryRequest(indexShard.shardId(), sourceNode, nodes.localNode(), false, indexShard.store().list());
+ recoveryTarget.startRecovery(request, indexShard, new PeerRecoveryListener(request, shardRouting, indexService, indexMetaData));
+ } catch (Throwable e) {
+ handleRecoveryFailure(indexService, indexMetaData, shardRouting, true, e);
+ }
+ }
+ }
+ }
+
+ private class PeerRecoveryListener implements RecoveryTarget.RecoveryListener {
+
+ private final StartRecoveryRequest request;
+ private final ShardRouting shardRouting;
+ private final IndexService indexService;
+ private final IndexMetaData indexMetaData;
+
+ private PeerRecoveryListener(StartRecoveryRequest request, ShardRouting shardRouting, IndexService indexService, IndexMetaData indexMetaData) {
+ this.request = request;
+ this.shardRouting = shardRouting;
+ this.indexService = indexService;
+ this.indexMetaData = indexMetaData;
+ }
+
+ @Override
+ public void onRecoveryDone() {
+ shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(), "after recovery (replica) from node [" + request.sourceNode() + "]");
+ }
+
+ @Override
+ public void onRetryRecovery(TimeValue retryAfter, RecoveryStatus recoveryStatus) {
+ recoveryTarget.retryRecovery(request, recoveryStatus, PeerRecoveryListener.this);
+ }
+
+ @Override
+ public void onIgnoreRecovery(boolean removeShard, String reason) {
+ if (!removeShard) {
+ return;
+ }
+ synchronized (mutex) {
+ if (indexService.hasShard(shardRouting.shardId().id())) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}][{}] removing shard on ignored recovery, reason [{}]", shardRouting.index(), shardRouting.shardId().id(), reason);
+ }
+ try {
+ indexService.removeShard(shardRouting.shardId().id(), "ignore recovery: " + reason);
+ } catch (IndexShardMissingException e) {
+ // the node got closed on us, ignore it
+ } catch (Throwable e1) {
+ logger.warn("[{}][{}] failed to delete shard after ignore recovery", e1, indexService.index().name(), shardRouting.shardId().id());
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onRecoveryFailure(RecoveryFailedException e, boolean sendShardFailure) {
+ handleRecoveryFailure(indexService, indexMetaData, shardRouting, sendShardFailure, e);
+ }
+ }
+
+ private void handleRecoveryFailure(IndexService indexService, IndexMetaData indexMetaData, ShardRouting shardRouting, boolean sendShardFailure, Throwable failure) {
+ logger.warn("[{}][{}] failed to start shard", failure, indexService.index().name(), shardRouting.shardId().id());
+ synchronized (mutex) {
+ if (indexService.hasShard(shardRouting.shardId().id())) {
+ try {
+ indexService.removeShard(shardRouting.shardId().id(), "recovery failure [" + ExceptionsHelper.detailedMessage(failure) + "]");
+ } catch (IndexShardMissingException e) {
+ // the node got closed on us, ignore it
+ } catch (Throwable e1) {
+ logger.warn("[{}][{}] failed to delete shard after failed startup", e1, indexService.index().name(), shardRouting.shardId().id());
+ }
+ }
+ if (sendShardFailure) {
+ try {
+ failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version()));
+ shardStateAction.shardFailed(shardRouting, indexMetaData.getUUID(), "Failed to start shard, message [" + detailedMessage(failure) + "]");
+ } catch (Throwable e1) {
+ logger.warn("[{}][{}] failed to mark shard as failed after a failed start", e1, indexService.index().name(), shardRouting.id());
+ }
+ }
+ }
+ }
+
+ private void removeIndex(String index, String reason) {
+ try {
+ indicesService.removeIndex(index, reason);
+ } catch (Throwable e) {
+ logger.warn("failed to clean index ({})", e, reason);
+ }
+ // clear seen mappings as well
+ for (Tuple<String, String> tuple : seenMappings.keySet()) {
+ if (tuple.v1().equals(index)) {
+ seenMappings.remove(tuple);
+ }
+ }
+ }
+
+ private class FailedEngineHandler implements Engine.FailedEngineListener {
+ @Override
+ public void onFailedEngine(final ShardId shardId, final Throwable failure) {
+ ShardRouting shardRouting = null;
+ final IndexService indexService = indicesService.indexService(shardId.index().name());
+ if (indexService != null) {
+ IndexShard indexShard = indexService.shard(shardId.id());
+ if (indexShard != null) {
+ shardRouting = indexShard.routingEntry();
+ }
+ }
+ if (shardRouting == null) {
+ logger.warn("[{}][{}] engine failed, but can't find index shard", shardId.index().name(), shardId.id());
+ return;
+ }
+ final ShardRouting fShardRouting = shardRouting;
+ final String indexUUID = indexService.indexUUID(); // we know indexService is not null here.
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ synchronized (mutex) {
+ if (indexService.hasShard(shardId.id())) {
+ try {
+ indexService.removeShard(shardId.id(), "engine failure [" + ExceptionsHelper.detailedMessage(failure) + "]");
+ } catch (IndexShardMissingException e) {
+ // the node got closed on us, ignore it
+ } catch (Throwable e1) {
+ logger.warn("[{}][{}] failed to delete shard after failed engine", e1, indexService.index().name(), shardId.id());
+ }
+ }
+ try {
+ failedShards.put(fShardRouting.shardId(), new FailedShard(fShardRouting.version()));
+ shardStateAction.shardFailed(fShardRouting, indexUUID, "engine failure, message [" + detailedMessage(failure) + "]");
+ } catch (Throwable e1) {
+ logger.warn("[{}][{}] failed to mark shard as failed after a failed engine", e1, indexService.index().name(), shardId.id());
+ }
+ }
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerService.java b/src/main/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerService.java
new file mode 100644
index 0000000..c53cc3e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerService.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.breaker;
+
+import org.elasticsearch.common.breaker.MemoryCircuitBreaker;
+
+/**
+ * Interface for Circuit Breaker services, which provide breakers to classes
+ * that load field data.
+ */
+public interface CircuitBreakerService {
+
+ /**
+ * @return the breaker that can be used to register estimates against
+ */
+ public MemoryCircuitBreaker getBreaker();
+
+
+ /**
+ * @return stats about the breaker
+ */
+ public FieldDataBreakerStats stats();
+}
diff --git a/src/main/java/org/elasticsearch/indices/fielddata/breaker/FieldDataBreakerStats.java b/src/main/java/org/elasticsearch/indices/fielddata/breaker/FieldDataBreakerStats.java
new file mode 100644
index 0000000..41392ff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/fielddata/breaker/FieldDataBreakerStats.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.breaker;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ * Class encapsulating stats about the circuit breaker
+ */
+public class FieldDataBreakerStats implements Streamable, ToXContent {
+
+ private long maximum;
+ private long estimated;
+ private double overhead;
+
+ FieldDataBreakerStats() {
+
+ }
+
+ public FieldDataBreakerStats(long maximum, long estimated, double overhead) {
+ this.maximum = maximum;
+ this.estimated = estimated;
+ this.overhead = overhead;
+ }
+
+ public long getMaximum() {
+ return this.maximum;
+ }
+
+ public long getEstimated() {
+ return this.estimated;
+ }
+
+ public double getOverhead() {
+ return this.overhead;
+ }
+
+ public static FieldDataBreakerStats readOptionalCircuitBreakerStats(StreamInput in) throws IOException {
+ FieldDataBreakerStats stats = in.readOptionalStreamable(new FieldDataBreakerStats());
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ maximum = in.readLong();
+ estimated = in.readLong();
+ overhead = in.readDouble();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(maximum);
+ out.writeLong(estimated);
+ out.writeDouble(overhead);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.BREAKER);
+ builder.field(Fields.MAX, maximum);
+ builder.field(Fields.MAX_HUMAN, new ByteSizeValue(maximum));
+ builder.field(Fields.ESTIMATED, estimated);
+ builder.field(Fields.ESTIMATED_HUMAN, new ByteSizeValue(estimated));
+ builder.field(Fields.OVERHEAD, overhead);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString BREAKER = new XContentBuilderString("fielddata_breaker");
+ static final XContentBuilderString MAX = new XContentBuilderString("maximum_size_in_bytes");
+ static final XContentBuilderString MAX_HUMAN = new XContentBuilderString("maximum_size");
+ static final XContentBuilderString ESTIMATED = new XContentBuilderString("estimated_size_in_bytes");
+ static final XContentBuilderString ESTIMATED_HUMAN = new XContentBuilderString("estimated_size");
+ static final XContentBuilderString OVERHEAD = new XContentBuilderString("overhead");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/fielddata/breaker/InternalCircuitBreakerService.java b/src/main/java/org/elasticsearch/indices/fielddata/breaker/InternalCircuitBreakerService.java
new file mode 100644
index 0000000..a6bed47
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/fielddata/breaker/InternalCircuitBreakerService.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.fielddata.breaker;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.breaker.MemoryCircuitBreaker;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+/**
+ * The InternalCircuitBreakerService handles providing
+ * {@link org.elasticsearch.common.breaker.MemoryCircuitBreaker}s
+ * that can be used to keep track of memory usage across the node, preventing
+ * actions that could cause an {@link OutOfMemoryError} on the node.
+ */
+public class InternalCircuitBreakerService extends AbstractLifecycleComponent<InternalCircuitBreakerService> implements CircuitBreakerService {
+
+ public static final String CIRCUIT_BREAKER_MAX_BYTES_SETTING = "indices.fielddata.breaker.limit";
+ public static final String CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.fielddata.breaker.overhead";
+
+ public static final double DEFAULT_OVERHEAD_CONSTANT = 1.03;
+ private static final String DEFAULT_BREAKER_LIMIT = "80%";
+
+ private volatile MemoryCircuitBreaker breaker;
+ private volatile long maxBytes;
+ private volatile double overhead;
+
+ @Inject
+ public InternalCircuitBreakerService(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+ this.maxBytes = settings.getAsMemory(CIRCUIT_BREAKER_MAX_BYTES_SETTING, DEFAULT_BREAKER_LIMIT).bytes();
+ this.overhead = settings.getAsDouble(CIRCUIT_BREAKER_OVERHEAD_SETTING, DEFAULT_OVERHEAD_CONSTANT);
+
+ this.breaker = new MemoryCircuitBreaker(new ByteSizeValue(maxBytes), overhead, null, logger);
+
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ // clear breaker now that settings have changed
+ long newMaxByteSizeValue = settings.getAsMemory(CIRCUIT_BREAKER_MAX_BYTES_SETTING, Long.toString(maxBytes)).bytes();
+ boolean breakerResetNeeded = false;
+
+ if (newMaxByteSizeValue != maxBytes) {
+ logger.info("updating [{}] from [{}]({}) to [{}]({})", CIRCUIT_BREAKER_MAX_BYTES_SETTING,
+ InternalCircuitBreakerService.this.maxBytes, new ByteSizeValue(InternalCircuitBreakerService.this.maxBytes),
+ newMaxByteSizeValue, new ByteSizeValue(newMaxByteSizeValue));
+ maxBytes = newMaxByteSizeValue;
+ breakerResetNeeded = true;
+ }
+
+ double newOverhead = settings.getAsDouble(CIRCUIT_BREAKER_OVERHEAD_SETTING, overhead);
+ if (newOverhead != overhead) {
+ logger.info("updating [{}] from [{}] to [{}]", CIRCUIT_BREAKER_OVERHEAD_SETTING,
+ overhead, newOverhead);
+ overhead = newOverhead;
+ breakerResetNeeded = true;
+ }
+
+ if (breakerResetNeeded) {
+ resetBreaker();
+ }
+ }
+ }
+
+ /**
+ * @return a {@link org.elasticsearch.common.breaker.MemoryCircuitBreaker} that can be used for aggregating memory usage
+ */
+ public MemoryCircuitBreaker getBreaker() {
+ return this.breaker;
+ }
+
+ /**
+ * Reset the breaker, creating a new one and initializing its used value
+ * to the actual field data usage, or the existing estimated usage if the
+ * actual value is not available. Will not trip the breaker even if the
+ * used value is higher than the limit for the breaker.
+ */
+ public synchronized void resetBreaker() {
+ final MemoryCircuitBreaker oldBreaker = this.breaker;
+ // discard old breaker by creating a new one and pre-populating from the current breaker
+ this.breaker = new MemoryCircuitBreaker(new ByteSizeValue(maxBytes), overhead, oldBreaker, logger);
+ }
+
+ @Override
+ public FieldDataBreakerStats stats() {
+ return new FieldDataBreakerStats(breaker.getMaximum(), breaker.getUsed(), breaker.getOverhead());
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java
new file mode 100644
index 0000000..f08f514
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.cache;
+
+import com.google.common.cache.*;
+import com.google.common.collect.Lists;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.SegmentReader;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.SegmentReaderUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldDataCache;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardUtils;
+import org.elasticsearch.index.shard.service.IndexShard;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, AtomicFieldData> {
+
+ private final IndicesFieldDataCacheListener indicesFieldDataCacheListener;
+
+ Cache<Key, AtomicFieldData> cache;
+
+ private volatile String size;
+ private volatile long sizeInBytes;
+ private volatile TimeValue expire;
+
+
+ @Inject
+ public IndicesFieldDataCache(Settings settings, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
+ super(settings);
+ this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
+ this.size = componentSettings.get("size", "-1");
+ this.sizeInBytes = componentSettings.getAsMemory("size", "-1").bytes();
+ this.expire = componentSettings.getAsTime("expire", null);
+ buildCache();
+ }
+
+ private void buildCache() {
+ CacheBuilder<Key, AtomicFieldData> cacheBuilder = CacheBuilder.newBuilder()
+ .removalListener(this);
+ if (sizeInBytes > 0) {
+ cacheBuilder.maximumWeight(sizeInBytes).weigher(new FieldDataWeigher());
+ }
+ // defaults to 4, but this is a busy map for all indices, increase it a bit
+ cacheBuilder.concurrencyLevel(16);
+ if (expire != null && expire.millis() > 0) {
+ cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS);
+ }
+ logger.debug("using size [{}] [{}], expire [{}]", size, new ByteSizeValue(sizeInBytes), expire);
+ cache = cacheBuilder.build();
+ }
+
+ public void close() {
+ cache.invalidateAll();
+ }
+
+ public IndexFieldDataCache buildIndexFieldDataCache(@Nullable IndexService indexService, Index index, FieldMapper.Names fieldNames, FieldDataType fieldDataType) {
+ return new IndexFieldCache(indexService, index, fieldNames, fieldDataType);
+ }
+
+ @Override
+ public void onRemoval(RemovalNotification<Key, AtomicFieldData> notification) {
+ Key key = notification.getKey();
+ assert key != null && key.listeners != null;
+
+ IndexFieldCache indexCache = key.indexCache;
+ long sizeInBytes = key.sizeInBytes;
+ AtomicFieldData value = notification.getValue();
+ if (sizeInBytes == -1 && value != null) {
+ sizeInBytes = value.getMemorySizeInBytes();
+ }
+ for (IndexFieldDataCache.Listener listener : key.listeners) {
+ listener.onUnload(indexCache.fieldNames, indexCache.fieldDataType, notification.wasEvicted(), sizeInBytes, value);
+ }
+ }
+
+ public static class FieldDataWeigher implements Weigher<Key, AtomicFieldData> {
+
+ @Override
+ public int weigh(Key key, AtomicFieldData fieldData) {
+ int weight = (int) Math.min(fieldData.getMemorySizeInBytes(), Integer.MAX_VALUE);
+ return weight == 0 ? 1 : weight;
+ }
+ }
+
+ /**
+ * A specific cache instance for the relevant parameters of it (index, fieldNames, fieldType).
+ */
+ class IndexFieldCache implements IndexFieldDataCache, SegmentReader.CoreClosedListener {
+
+ @Nullable
+ private final IndexService indexService;
+ final Index index;
+ final FieldMapper.Names fieldNames;
+ final FieldDataType fieldDataType;
+
+ IndexFieldCache(@Nullable IndexService indexService, Index index, FieldMapper.Names fieldNames, FieldDataType fieldDataType) {
+ this.indexService = indexService;
+ this.index = index;
+ this.fieldNames = fieldNames;
+ this.fieldDataType = fieldDataType;
+ }
+
+ @Override
+ public <FD extends AtomicFieldData, IFD extends IndexFieldData<FD>> FD load(final AtomicReaderContext context, final IFD indexFieldData) throws Exception {
+ final Key key = new Key(this, context.reader().getCoreCacheKey());
+ //noinspection unchecked
+ return (FD) cache.get(key, new Callable<AtomicFieldData>() {
+ @Override
+ public AtomicFieldData call() throws Exception {
+ SegmentReaderUtils.registerCoreListener(context.reader(), IndexFieldCache.this);
+ AtomicFieldData fieldData = indexFieldData.loadDirect(context);
+ key.listeners.add(indicesFieldDataCacheListener);
+
+ if (indexService != null) {
+ ShardId shardId = ShardUtils.extractShardId(context.reader());
+ if (shardId != null) {
+ IndexShard shard = indexService.shard(shardId.id());
+ if (shard != null) {
+ key.listeners.add(shard.fieldData());
+ }
+ }
+ }
+ for (Listener listener : key.listeners) {
+ listener.onLoad(fieldNames, fieldDataType, fieldData);
+ }
+ return fieldData;
+ }
+ });
+ }
+
+ @Override
+ public void onClose(Object coreKey) {
+ cache.invalidate(new Key(this, coreKey));
+ }
+
+ @Override
+ public void clear() {
+ for (Key key : cache.asMap().keySet()) {
+ if (key.indexCache.index.equals(index)) {
+ cache.invalidate(key);
+ }
+ }
+ }
+
+ @Override
+ public void clear(String fieldName) {
+ for (Key key : cache.asMap().keySet()) {
+ if (key.indexCache.index.equals(index)) {
+ if (key.indexCache.fieldNames.fullName().equals(fieldName)) {
+ cache.invalidate(key);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void clear(Object coreCacheKey) {
+ cache.invalidate(new Key(this, coreCacheKey));
+ }
+ }
+
+ public static class Key {
+ public final IndexFieldCache indexCache;
+ public final Object readerKey;
+
+ public final List<IndexFieldDataCache.Listener> listeners = Lists.newArrayList();
+ long sizeInBytes = -1; // optional size in bytes (we keep it here in case the values are soft references)
+
+
+ Key(IndexFieldCache indexCache, Object readerKey) {
+ this.indexCache = indexCache;
+ this.readerKey = readerKey;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ Key key = (Key) o;
+ if (!indexCache.equals(key.indexCache)) return false;
+ if (!readerKey.equals(key.readerKey)) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = indexCache.hashCode();
+ result = 31 * result + readerKey.hashCode();
+ return result;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java b/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java
new file mode 100644
index 0000000..51c79de
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.cache;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataCache;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+
+/**
+ * A {@link IndexFieldDataCache.Listener} implementation that updates indices (node) level statistics / service about
+ * field data entries being loaded and unloaded.
+ *
+ * Currently it only decrements the memory used in the {@link CircuitBreakerService}.
+ */
+public class IndicesFieldDataCacheListener implements IndexFieldDataCache.Listener {
+
+ private final CircuitBreakerService circuitBreakerService;
+
+ @Inject
+ public IndicesFieldDataCacheListener(CircuitBreakerService circuitBreakerService) {
+ this.circuitBreakerService = circuitBreakerService;
+ }
+
+ @Override
+ public void onLoad(FieldMapper.Names fieldNames, FieldDataType fieldDataType, AtomicFieldData fieldData) {
+ }
+
+ @Override
+ public void onUnload(FieldMapper.Names fieldNames, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes, @Nullable AtomicFieldData fieldData) {
+ assert sizeInBytes >= 0 : "When reducing circuit breaker, it should be adjusted with a number higher or equal to 0 and not [" + sizeInBytes + "]";
+ circuitBreakerService.getBreaker().addWithoutBreaking(-sizeInBytes);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java b/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java
new file mode 100644
index 0000000..5f2da8c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java
@@ -0,0 +1,299 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.memory;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineClosedException;
+import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.indices.IndicesLifecycle;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ *
+ */
+public class IndexingMemoryController extends AbstractLifecycleComponent<IndexingMemoryController> {
+
+ private final ThreadPool threadPool;
+ private final IndicesService indicesService;
+
+ private final ByteSizeValue indexingBuffer;
+ private final ByteSizeValue minShardIndexBufferSize;
+ private final ByteSizeValue maxShardIndexBufferSize;
+
+ private final ByteSizeValue translogBuffer;
+ private final ByteSizeValue minShardTranslogBufferSize;
+ private final ByteSizeValue maxShardTranslogBufferSize;
+
+ private final TimeValue inactiveTime;
+ private final TimeValue interval;
+ private final AtomicBoolean shardsCreatedOrDeleted = new AtomicBoolean();
+
+ private final Listener listener = new Listener();
+
+ private final Map<ShardId, ShardIndexingStatus> shardsIndicesStatus = Maps.newHashMap();
+
+ private volatile ScheduledFuture scheduler;
+
+ private final Object mutex = new Object();
+
+ @Inject
+ public IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.indicesService = indicesService;
+
+ ByteSizeValue indexingBuffer;
+ String indexingBufferSetting = componentSettings.get("index_buffer_size", "10%");
+ if (indexingBufferSetting.endsWith("%")) {
+ double percent = Double.parseDouble(indexingBufferSetting.substring(0, indexingBufferSetting.length() - 1));
+ indexingBuffer = new ByteSizeValue((long) (((double) JvmInfo.jvmInfo().mem().heapMax().bytes()) * (percent / 100)));
+ ByteSizeValue minIndexingBuffer = componentSettings.getAsBytesSize("min_index_buffer_size", new ByteSizeValue(48, ByteSizeUnit.MB));
+ ByteSizeValue maxIndexingBuffer = componentSettings.getAsBytesSize("max_index_buffer_size", null);
+
+ if (indexingBuffer.bytes() < minIndexingBuffer.bytes()) {
+ indexingBuffer = minIndexingBuffer;
+ }
+ if (maxIndexingBuffer != null && indexingBuffer.bytes() > maxIndexingBuffer.bytes()) {
+ indexingBuffer = maxIndexingBuffer;
+ }
+ } else {
+ indexingBuffer = ByteSizeValue.parseBytesSizeValue(indexingBufferSetting, null);
+ }
+ this.indexingBuffer = indexingBuffer;
+ this.minShardIndexBufferSize = componentSettings.getAsBytesSize("min_shard_index_buffer_size", new ByteSizeValue(4, ByteSizeUnit.MB));
+ // LUCENE MONITOR: Based on this thread, currently (based on Mike), having a large buffer does not make a lot of sense: https://issues.apache.org/jira/browse/LUCENE-2324?focusedCommentId=13005155&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13005155
+ this.maxShardIndexBufferSize = componentSettings.getAsBytesSize("max_shard_index_buffer_size", new ByteSizeValue(512, ByteSizeUnit.MB));
+
+ ByteSizeValue translogBuffer;
+ String translogBufferSetting = componentSettings.get("translog_buffer_size", "1%");
+ if (translogBufferSetting.endsWith("%")) {
+ double percent = Double.parseDouble(translogBufferSetting.substring(0, translogBufferSetting.length() - 1));
+ translogBuffer = new ByteSizeValue((long) (((double) JvmInfo.jvmInfo().mem().heapMax().bytes()) * (percent / 100)));
+ ByteSizeValue minTranslogBuffer = componentSettings.getAsBytesSize("min_translog_buffer_size", new ByteSizeValue(256, ByteSizeUnit.KB));
+ ByteSizeValue maxTranslogBuffer = componentSettings.getAsBytesSize("max_translog_buffer_size", null);
+
+ if (translogBuffer.bytes() < minTranslogBuffer.bytes()) {
+ translogBuffer = minTranslogBuffer;
+ }
+ if (maxTranslogBuffer != null && translogBuffer.bytes() > maxTranslogBuffer.bytes()) {
+ translogBuffer = maxTranslogBuffer;
+ }
+ } else {
+ translogBuffer = ByteSizeValue.parseBytesSizeValue(translogBufferSetting, null);
+ }
+ this.translogBuffer = translogBuffer;
+ this.minShardTranslogBufferSize = componentSettings.getAsBytesSize("min_shard_translog_buffer_size", new ByteSizeValue(2, ByteSizeUnit.KB));
+ this.maxShardTranslogBufferSize = componentSettings.getAsBytesSize("max_shard_translog_buffer_size", new ByteSizeValue(64, ByteSizeUnit.KB));
+
+ this.inactiveTime = componentSettings.getAsTime("shard_inactive_time", TimeValue.timeValueMinutes(30));
+ // we need to have this relatively small to move a shard from inactive to active fast (enough)
+ this.interval = componentSettings.getAsTime("interval", TimeValue.timeValueSeconds(30));
+
+ logger.debug("using index_buffer_size [{}], with min_shard_index_buffer_size [{}], max_shard_index_buffer_size [{}], shard_inactive_time [{}]", this.indexingBuffer, this.minShardIndexBufferSize, this.maxShardIndexBufferSize, this.inactiveTime);
+
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ indicesService.indicesLifecycle().addListener(listener);
+ // its fine to run it on the scheduler thread, no busy work
+ this.scheduler = threadPool.scheduleWithFixedDelay(new ShardsIndicesStatusChecker(), interval);
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ indicesService.indicesLifecycle().removeListener(listener);
+ if (scheduler != null) {
+ scheduler.cancel(false);
+ scheduler = null;
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ class ShardsIndicesStatusChecker implements Runnable {
+ @Override
+ public void run() {
+ synchronized (mutex) {
+ boolean activeInactiveStatusChanges = false;
+ List<IndexShard> activeToInactiveIndexingShards = Lists.newArrayList();
+ List<IndexShard> inactiveToActiveIndexingShards = Lists.newArrayList();
+ for (IndexService indexService : indicesService) {
+ for (IndexShard indexShard : indexService) {
+ long time = threadPool.estimatedTimeInMillis();
+ Translog translog = ((InternalIndexShard) indexShard).translog();
+ ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
+ if (status == null) { // not added yet
+ continue;
+ }
+ // check if it is deemed to be inactive (sam translogId and numberOfOperations over a long period of time)
+ if (status.translogId == translog.currentId() && translog.estimatedNumberOfOperations() == 0) {
+ if (status.time == -1) { // first time
+ status.time = time;
+ }
+ // inactive?
+ if (!status.inactiveIndexing) {
+ // mark it as inactive only if enough time has passed and there are no ongoing merges going on...
+ if ((time - status.time) > inactiveTime.millis() && indexShard.mergeStats().getCurrent() == 0) {
+ // inactive for this amount of time, mark it
+ activeToInactiveIndexingShards.add(indexShard);
+ status.inactiveIndexing = true;
+ activeInactiveStatusChanges = true;
+ logger.debug("marking shard [{}][{}] as inactive (inactive_time[{}]) indexing wise, setting size to [{}]", indexShard.shardId().index().name(), indexShard.shardId().id(), inactiveTime, Engine.INACTIVE_SHARD_INDEXING_BUFFER);
+ }
+ }
+ } else {
+ if (status.inactiveIndexing) {
+ inactiveToActiveIndexingShards.add(indexShard);
+ status.inactiveIndexing = false;
+ activeInactiveStatusChanges = true;
+ logger.debug("marking shard [{}][{}] as active indexing wise", indexShard.shardId().index().name(), indexShard.shardId().id());
+ }
+ status.time = -1;
+ }
+ status.translogId = translog.currentId();
+ status.translogNumberOfOperations = translog.estimatedNumberOfOperations();
+ }
+ }
+ for (IndexShard indexShard : activeToInactiveIndexingShards) {
+ // update inactive indexing buffer size
+ try {
+ ((InternalIndexShard) indexShard).engine().updateIndexingBufferSize(Engine.INACTIVE_SHARD_INDEXING_BUFFER);
+ ((InternalIndexShard) indexShard).translog().updateBuffer(Translog.INACTIVE_SHARD_TRANSLOG_BUFFER);
+ } catch (EngineClosedException e) {
+ // ignore
+ } catch (FlushNotAllowedEngineException e) {
+ // ignore
+ }
+ }
+ boolean shardsCreatedOrDeleted = IndexingMemoryController.this.shardsCreatedOrDeleted.compareAndSet(true, false);
+ if (shardsCreatedOrDeleted || activeInactiveStatusChanges) {
+ calcAndSetShardBuffers("active/inactive[" + activeInactiveStatusChanges + "] created/deleted[" + shardsCreatedOrDeleted + "]");
+ }
+ }
+ }
+ }
+
+ class Listener extends IndicesLifecycle.Listener {
+
+ @Override
+ public void afterIndexShardCreated(IndexShard indexShard) {
+ synchronized (mutex) {
+ shardsIndicesStatus.put(indexShard.shardId(), new ShardIndexingStatus());
+ shardsCreatedOrDeleted.set(true);
+ }
+ }
+
+ @Override
+ public void afterIndexShardClosed(ShardId shardId) {
+ synchronized (mutex) {
+ shardsIndicesStatus.remove(shardId);
+ shardsCreatedOrDeleted.set(true);
+ }
+ }
+ }
+
+
+ private void calcAndSetShardBuffers(String reason) {
+ int shardsCount = countShards();
+ if (shardsCount == 0) {
+ return;
+ }
+ ByteSizeValue shardIndexingBufferSize = new ByteSizeValue(indexingBuffer.bytes() / shardsCount);
+ if (shardIndexingBufferSize.bytes() < minShardIndexBufferSize.bytes()) {
+ shardIndexingBufferSize = minShardIndexBufferSize;
+ }
+ if (shardIndexingBufferSize.bytes() > maxShardIndexBufferSize.bytes()) {
+ shardIndexingBufferSize = maxShardIndexBufferSize;
+ }
+
+ ByteSizeValue shardTranslogBufferSize = new ByteSizeValue(translogBuffer.bytes() / shardsCount);
+ if (shardTranslogBufferSize.bytes() < minShardTranslogBufferSize.bytes()) {
+ shardTranslogBufferSize = minShardTranslogBufferSize;
+ }
+ if (shardTranslogBufferSize.bytes() > maxShardTranslogBufferSize.bytes()) {
+ shardTranslogBufferSize = maxShardTranslogBufferSize;
+ }
+
+ logger.debug("recalculating shard indexing buffer (reason={}), total is [{}] with [{}] active shards, each shard set to indexing=[{}], translog=[{}]", reason, indexingBuffer, shardsCount, shardIndexingBufferSize, shardTranslogBufferSize);
+ for (IndexService indexService : indicesService) {
+ for (IndexShard indexShard : indexService) {
+ ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
+ if (status == null || !status.inactiveIndexing) {
+ try {
+ ((InternalIndexShard) indexShard).engine().updateIndexingBufferSize(shardIndexingBufferSize);
+ ((InternalIndexShard) indexShard).translog().updateBuffer(shardTranslogBufferSize);
+ } catch (EngineClosedException e) {
+ // ignore
+ continue;
+ } catch (FlushNotAllowedEngineException e) {
+ // ignore
+ continue;
+ } catch (Exception e) {
+ logger.warn("failed to set shard [{}][{}] index buffer to [{}]", indexShard.shardId().index().name(), indexShard.shardId().id(), shardIndexingBufferSize);
+ }
+ }
+ }
+ }
+ }
+
+ private int countShards() {
+ int shardsCount = 0;
+ for (IndexService indexService : indicesService) {
+ for (IndexShard indexShard : indexService) {
+ ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
+ if (status == null || !status.inactiveIndexing) {
+ shardsCount++;
+ }
+ }
+ }
+ return shardsCount;
+ }
+
+ static class ShardIndexingStatus {
+ long translogId = -1;
+ int translogNumberOfOperations = -1;
+ boolean inactiveIndexing = false;
+ long time = -1; // contains the first time we saw this shard with no operations done on it
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/query/IndicesQueriesModule.java b/src/main/java/org/elasticsearch/indices/query/IndicesQueriesModule.java
new file mode 100644
index 0000000..d616c90
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/query/IndicesQueriesModule.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.query;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.common.geo.ShapesAvailability;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.functionscore.FunctionScoreQueryParser;
+
+import java.util.Set;
+
+public class IndicesQueriesModule extends AbstractModule {
+
+ private Set<Class<QueryParser>> queryParsersClasses = Sets.newHashSet();
+ private Set<QueryParser> queryParsers = Sets.newHashSet();
+ private Set<Class<FilterParser>> filterParsersClasses = Sets.newHashSet();
+ private Set<FilterParser> filterParsers = Sets.newHashSet();
+
+ public synchronized IndicesQueriesModule addQuery(Class<QueryParser> queryParser) {
+ queryParsersClasses.add(queryParser);
+ return this;
+ }
+
+ public synchronized IndicesQueriesModule addQuery(QueryParser queryParser) {
+ queryParsers.add(queryParser);
+ return this;
+ }
+
+ public synchronized IndicesQueriesModule addFilter(Class<FilterParser> filterParser) {
+ filterParsersClasses.add(filterParser);
+ return this;
+ }
+
+ public synchronized IndicesQueriesModule addFilter(FilterParser filterParser) {
+ filterParsers.add(filterParser);
+ return this;
+ }
+
+ @Override
+ protected void configure() {
+ bind(IndicesQueriesRegistry.class).asEagerSingleton();
+
+ Multibinder<QueryParser> qpBinders = Multibinder.newSetBinder(binder(), QueryParser.class);
+ for (Class<QueryParser> queryParser : queryParsersClasses) {
+ qpBinders.addBinding().to(queryParser).asEagerSingleton();
+ }
+ for (QueryParser queryParser : queryParsers) {
+ qpBinders.addBinding().toInstance(queryParser);
+ }
+ qpBinders.addBinding().to(MatchQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(MultiMatchQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(NestedQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(HasChildQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(HasParentQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(TopChildrenQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(DisMaxQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(IdsQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(MatchAllQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(QueryStringQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(BoostingQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(BoolQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(TermQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(TermsQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(FuzzyQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(RegexpQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(RangeQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(PrefixQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(WildcardQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(FilteredQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(ConstantScoreQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(CustomBoostFactorQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(CustomScoreQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(CustomFiltersScoreQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(SpanTermQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(SpanNotQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(FieldMaskingSpanQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(SpanFirstQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(SpanNearQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(SpanOrQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(MoreLikeThisQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(MoreLikeThisFieldQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(FuzzyLikeThisQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(FuzzyLikeThisFieldQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(WrapperQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(IndicesQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(CommonTermsQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(SpanMultiTermQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(FunctionScoreQueryParser.class).asEagerSingleton();
+ qpBinders.addBinding().to(SimpleQueryStringParser.class).asEagerSingleton();
+
+ if (ShapesAvailability.JTS_AVAILABLE) {
+ qpBinders.addBinding().to(GeoShapeQueryParser.class).asEagerSingleton();
+ }
+
+ Multibinder<FilterParser> fpBinders = Multibinder.newSetBinder(binder(), FilterParser.class);
+ for (Class<FilterParser> filterParser : filterParsersClasses) {
+ fpBinders.addBinding().to(filterParser).asEagerSingleton();
+ }
+ for (FilterParser filterParser : filterParsers) {
+ fpBinders.addBinding().toInstance(filterParser);
+ }
+ fpBinders.addBinding().to(HasChildFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(HasParentFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(NestedFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(TypeFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(IdsFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(LimitFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(TermFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(TermsFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(RangeFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(NumericRangeFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(PrefixFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(RegexpFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(ScriptFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(GeoDistanceFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(GeoDistanceRangeFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(GeoBoundingBoxFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(GeohashCellFilter.Parser.class).asEagerSingleton();
+ fpBinders.addBinding().to(GeoPolygonFilterParser.class).asEagerSingleton();
+ if (ShapesAvailability.JTS_AVAILABLE) {
+ fpBinders.addBinding().to(GeoShapeFilterParser.class).asEagerSingleton();
+ }
+ fpBinders.addBinding().to(QueryFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(FQueryFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(BoolFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(AndFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(OrFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(NotFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(MatchAllFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(ExistsFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(MissingFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(IndicesFilterParser.class).asEagerSingleton();
+ fpBinders.addBinding().to(WrapperFilterParser.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java b/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java
new file mode 100644
index 0000000..dff63a9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.query;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterParser;
+import org.elasticsearch.index.query.QueryParser;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public class IndicesQueriesRegistry extends AbstractComponent {
+
+ private ImmutableMap<String, QueryParser> queryParsers;
+ private ImmutableMap<String, FilterParser> filterParsers;
+
+ @Inject
+ public IndicesQueriesRegistry(Settings settings, Set<QueryParser> injectedQueryParsers, Set<FilterParser> injectedFilterParsers) {
+ super(settings);
+ Map<String, QueryParser> queryParsers = Maps.newHashMap();
+ for (QueryParser queryParser : injectedQueryParsers) {
+ addQueryParser(queryParsers, queryParser);
+ }
+ this.queryParsers = ImmutableMap.copyOf(queryParsers);
+
+ Map<String, FilterParser> filterParsers = Maps.newHashMap();
+ for (FilterParser filterParser : injectedFilterParsers) {
+ addFilterParser(filterParsers, filterParser);
+ }
+ this.filterParsers = ImmutableMap.copyOf(filterParsers);
+ }
+
+ /**
+ * Adds a global query parser.
+ */
+ public synchronized void addQueryParser(QueryParser queryParser) {
+ Map<String, QueryParser> queryParsers = Maps.newHashMap(this.queryParsers);
+ addQueryParser(queryParsers, queryParser);
+ this.queryParsers = ImmutableMap.copyOf(queryParsers);
+ }
+
+ public synchronized void addFilterParser(FilterParser filterParser) {
+ Map<String, FilterParser> filterParsers = Maps.newHashMap(this.filterParsers);
+ addFilterParser(filterParsers, filterParser);
+ this.filterParsers = ImmutableMap.copyOf(filterParsers);
+ }
+
+ public ImmutableMap<String, QueryParser> queryParsers() {
+ return queryParsers;
+ }
+
+ public ImmutableMap<String, FilterParser> filterParsers() {
+ return filterParsers;
+ }
+
+ private void addQueryParser(Map<String, QueryParser> queryParsers, QueryParser queryParser) {
+ for (String name : queryParser.names()) {
+ queryParsers.put(name, queryParser);
+ }
+ }
+
+ private void addFilterParser(Map<String, FilterParser> filterParsers, FilterParser filterParser) {
+ for (String name : filterParser.names()) {
+ filterParsers.put(name, filterParser);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/indices/recovery/DelayRecoveryException.java b/src/main/java/org/elasticsearch/indices/recovery/DelayRecoveryException.java
new file mode 100644
index 0000000..f9c47d0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/DelayRecoveryException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ * An exception marking that this recovery attempt should be ignored (since probably, we already recovered).
+ *
+ *
+ */
+public class DelayRecoveryException extends ElasticsearchException {
+
+ public DelayRecoveryException(String msg) {
+ super(msg);
+ }
+
+ public DelayRecoveryException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/IgnoreRecoveryException.java b/src/main/java/org/elasticsearch/indices/recovery/IgnoreRecoveryException.java
new file mode 100644
index 0000000..2ea2ca5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/IgnoreRecoveryException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ * An exception marking that this recovery attempt should be ignored (since probably, we already recovered).
+ *
+ *
+ */
+public class IgnoreRecoveryException extends ElasticsearchException {
+
+ public IgnoreRecoveryException(String msg) {
+ super(msg);
+ }
+
+ public IgnoreRecoveryException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java b/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java
new file mode 100644
index 0000000..1439208
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class RecoverFilesRecoveryException extends IndexShardException implements ElasticsearchWrapperException {
+
+ private final int numberOfFiles;
+
+ private final ByteSizeValue totalFilesSize;
+
+ public RecoverFilesRecoveryException(ShardId shardId, int numberOfFiles, ByteSizeValue totalFilesSize, Throwable cause) {
+ super(shardId, "Failed to transfer [" + numberOfFiles + "] files with total size of [" + totalFilesSize + "]", cause);
+ this.numberOfFiles = numberOfFiles;
+ this.totalFilesSize = totalFilesSize;
+ }
+
+ public int numberOfFiles() {
+ return numberOfFiles;
+ }
+
+ public ByteSizeValue totalFilesSize() {
+ return totalFilesSize;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java
new file mode 100644
index 0000000..5d506df
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ *
+ */
+class RecoveryCleanFilesRequest extends TransportRequest {
+
+ private long recoveryId;
+ private ShardId shardId;
+
+ private Set<String> snapshotFiles;
+
+ RecoveryCleanFilesRequest() {
+ }
+
+ RecoveryCleanFilesRequest(long recoveryId, ShardId shardId, Set<String> snapshotFiles) {
+ this.recoveryId = recoveryId;
+ this.shardId = shardId;
+ this.snapshotFiles = snapshotFiles;
+ }
+
+ public long recoveryId() {
+ return this.recoveryId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ public Set<String> snapshotFiles() {
+ return snapshotFiles;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ recoveryId = in.readLong();
+ shardId = ShardId.readShardId(in);
+ int size = in.readVInt();
+ snapshotFiles = Sets.newHashSetWithExpectedSize(size);
+ for (int i = 0; i < size; i++) {
+ snapshotFiles.add(in.readString());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(recoveryId);
+ shardId.writeTo(out);
+ out.writeVInt(snapshotFiles.size());
+ for (String snapshotFile : snapshotFiles) {
+ out.writeString(snapshotFile);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryFailedException.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFailedException.java
new file mode 100644
index 0000000..f4e565b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFailedException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ *
+ */
+public class RecoveryFailedException extends ElasticsearchException {
+
+ public RecoveryFailedException(StartRecoveryRequest request, Throwable cause) {
+ this(request.shardId(), request.sourceNode(), request.targetNode(), cause);
+ }
+
+ public RecoveryFailedException(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, Throwable cause) {
+ super(shardId + ": Recovery failed from " + sourceNode + " into " + targetNode, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java
new file mode 100644
index 0000000..afd3ded
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class RecoveryFileChunkRequest extends TransportRequest {
+
+ private long recoveryId;
+ private ShardId shardId;
+ private String name;
+ private long position;
+ private long length;
+ private String checksum;
+ private BytesReference content;
+
+ RecoveryFileChunkRequest() {
+ }
+
+ RecoveryFileChunkRequest(long recoveryId, ShardId shardId, String name, long position, long length, String checksum, BytesArray content) {
+ this.recoveryId = recoveryId;
+ this.shardId = shardId;
+ this.name = name;
+ this.position = position;
+ this.length = length;
+ this.checksum = checksum;
+ this.content = content;
+ }
+
+ public long recoveryId() {
+ return this.recoveryId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public long position() {
+ return position;
+ }
+
+ @Nullable
+ public String checksum() {
+ return this.checksum;
+ }
+
+ public long length() {
+ return length;
+ }
+
+ public BytesReference content() {
+ return content;
+ }
+
+ public RecoveryFileChunkRequest readFileChunk(StreamInput in) throws IOException {
+ RecoveryFileChunkRequest request = new RecoveryFileChunkRequest();
+ request.readFrom(in);
+ return request;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ recoveryId = in.readLong();
+ shardId = ShardId.readShardId(in);
+ name = in.readString();
+ position = in.readVLong();
+ length = in.readVLong();
+ checksum = in.readOptionalString();
+ content = in.readBytesReference();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(recoveryId);
+ shardId.writeTo(out);
+ out.writeString(name);
+ out.writeVLong(position);
+ out.writeVLong(length);
+ out.writeOptionalString(checksum);
+ out.writeBytesReference(content);
+ }
+
+ @Override
+ public String toString() {
+ return shardId + ": name='" + name + '\'' +
+ ", position=" + position +
+ ", length=" + length;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java
new file mode 100644
index 0000000..7500687
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+class RecoveryFilesInfoRequest extends TransportRequest {
+
+ private long recoveryId;
+ private ShardId shardId;
+
+ List<String> phase1FileNames;
+ List<Long> phase1FileSizes;
+ List<String> phase1ExistingFileNames;
+ List<Long> phase1ExistingFileSizes;
+ long phase1TotalSize;
+ long phase1ExistingTotalSize;
+
+ RecoveryFilesInfoRequest() {
+ }
+
+ RecoveryFilesInfoRequest(long recoveryId, ShardId shardId, List<String> phase1FileNames, List<Long> phase1FileSizes, List<String> phase1ExistingFileNames, List<Long> phase1ExistingFileSizes, long phase1TotalSize, long phase1ExistingTotalSize) {
+ this.recoveryId = recoveryId;
+ this.shardId = shardId;
+ this.phase1FileNames = phase1FileNames;
+ this.phase1FileSizes = phase1FileSizes;
+ this.phase1ExistingFileNames = phase1ExistingFileNames;
+ this.phase1ExistingFileSizes = phase1ExistingFileSizes;
+ this.phase1TotalSize = phase1TotalSize;
+ this.phase1ExistingTotalSize = phase1ExistingTotalSize;
+ }
+
+ public long recoveryId() {
+ return this.recoveryId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ recoveryId = in.readLong();
+ shardId = ShardId.readShardId(in);
+ int size = in.readVInt();
+ phase1FileNames = new ArrayList<String>(size);
+ for (int i = 0; i < size; i++) {
+ phase1FileNames.add(in.readString());
+ }
+
+ size = in.readVInt();
+ phase1FileSizes = new ArrayList<Long>(size);
+ for (int i = 0; i < size; i++) {
+ phase1FileSizes.add(in.readVLong());
+ }
+
+ size = in.readVInt();
+ phase1ExistingFileNames = new ArrayList<String>(size);
+ for (int i = 0; i < size; i++) {
+ phase1ExistingFileNames.add(in.readString());
+ }
+
+ size = in.readVInt();
+ phase1ExistingFileSizes = new ArrayList<Long>(size);
+ for (int i = 0; i < size; i++) {
+ phase1ExistingFileSizes.add(in.readVLong());
+ }
+
+ phase1TotalSize = in.readVLong();
+ phase1ExistingTotalSize = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(recoveryId);
+ shardId.writeTo(out);
+
+ out.writeVInt(phase1FileNames.size());
+ for (String phase1FileName : phase1FileNames) {
+ out.writeString(phase1FileName);
+ }
+
+ out.writeVInt(phase1FileSizes.size());
+ for (Long phase1FileSize : phase1FileSizes) {
+ out.writeVLong(phase1FileSize);
+ }
+
+ out.writeVInt(phase1ExistingFileNames.size());
+ for (String phase1ExistingFileName : phase1ExistingFileNames) {
+ out.writeString(phase1ExistingFileName);
+ }
+
+ out.writeVInt(phase1ExistingFileSizes.size());
+ for (Long phase1ExistingFileSize : phase1ExistingFileSizes) {
+ out.writeVLong(phase1ExistingFileSize);
+ }
+
+ out.writeVLong(phase1TotalSize);
+ out.writeVLong(phase1ExistingTotalSize);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java
new file mode 100644
index 0000000..e3f6ac1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class RecoveryFinalizeRecoveryRequest extends TransportRequest {
+
+ private long recoveryId;
+
+ private ShardId shardId;
+
+ RecoveryFinalizeRecoveryRequest() {
+ }
+
+ RecoveryFinalizeRecoveryRequest(long recoveryId, ShardId shardId) {
+ this.recoveryId = recoveryId;
+ this.shardId = shardId;
+ }
+
+ public long recoveryId() {
+ return this.recoveryId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ recoveryId = in.readLong();
+ shardId = ShardId.readShardId(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(recoveryId);
+ shardId.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java
new file mode 100644
index 0000000..e5a1318
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest {
+
+ private long recoveryId;
+ private ShardId shardId;
+
+ RecoveryPrepareForTranslogOperationsRequest() {
+ }
+
+ RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId) {
+ this.recoveryId = recoveryId;
+ this.shardId = shardId;
+ }
+
+ public long recoveryId() {
+ return this.recoveryId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ recoveryId = in.readLong();
+ shardId = ShardId.readShardId(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(recoveryId);
+ shardId.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java
new file mode 100644
index 0000000..86d776e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+class RecoveryResponse extends TransportResponse {
+
+ List<String> phase1FileNames = Lists.newArrayList();
+ List<Long> phase1FileSizes = Lists.newArrayList();
+ List<String> phase1ExistingFileNames = Lists.newArrayList();
+ List<Long> phase1ExistingFileSizes = Lists.newArrayList();
+ long phase1TotalSize;
+ long phase1ExistingTotalSize;
+ long phase1Time;
+ long phase1ThrottlingWaitTime;
+
+ long startTime;
+
+ int phase2Operations;
+ long phase2Time;
+
+ int phase3Operations;
+ long phase3Time;
+
+ RecoveryResponse() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ int size = in.readVInt();
+ phase1FileNames = Lists.newArrayListWithCapacity(size);
+ for (int i = 0; i < size; i++) {
+ phase1FileNames.add(in.readString());
+ }
+ size = in.readVInt();
+ phase1FileSizes = Lists.newArrayListWithCapacity(size);
+ for (int i = 0; i < size; i++) {
+ phase1FileSizes.add(in.readVLong());
+ }
+
+ size = in.readVInt();
+ phase1ExistingFileNames = Lists.newArrayListWithCapacity(size);
+ for (int i = 0; i < size; i++) {
+ phase1ExistingFileNames.add(in.readString());
+ }
+ size = in.readVInt();
+ phase1ExistingFileSizes = Lists.newArrayListWithCapacity(size);
+ for (int i = 0; i < size; i++) {
+ phase1ExistingFileSizes.add(in.readVLong());
+ }
+
+ phase1TotalSize = in.readVLong();
+ phase1ExistingTotalSize = in.readVLong();
+ phase1Time = in.readVLong();
+ phase1ThrottlingWaitTime = in.readVLong();
+ startTime = in.readVLong();
+ phase2Operations = in.readVInt();
+ phase2Time = in.readVLong();
+ phase3Operations = in.readVInt();
+ phase3Time = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(phase1FileNames.size());
+ for (String name : phase1FileNames) {
+ out.writeString(name);
+ }
+ out.writeVInt(phase1FileSizes.size());
+ for (long size : phase1FileSizes) {
+ out.writeVLong(size);
+ }
+
+ out.writeVInt(phase1ExistingFileNames.size());
+ for (String name : phase1ExistingFileNames) {
+ out.writeString(name);
+ }
+ out.writeVInt(phase1ExistingFileSizes.size());
+ for (long size : phase1ExistingFileSizes) {
+ out.writeVLong(size);
+ }
+
+ out.writeVLong(phase1TotalSize);
+ out.writeVLong(phase1ExistingTotalSize);
+ out.writeVLong(phase1Time);
+ out.writeVLong(phase1ThrottlingWaitTime);
+ out.writeVLong(startTime);
+ out.writeVInt(phase2Operations);
+ out.writeVLong(phase2Time);
+ out.writeVInt(phase3Operations);
+ out.writeVLong(phase3Time);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java
new file mode 100644
index 0000000..88890a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import com.google.common.base.Objects;
+import org.apache.lucene.store.RateLimiter;
+import org.apache.lucene.store.RateLimiter.SimpleRateLimiter;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class RecoverySettings extends AbstractComponent {
+
+ public static final String INDICES_RECOVERY_FILE_CHUNK_SIZE = "indices.recovery.file_chunk_size";
+ public static final String INDICES_RECOVERY_TRANSLOG_OPS = "indices.recovery.translog_ops";
+ public static final String INDICES_RECOVERY_TRANSLOG_SIZE = "indices.recovery.translog_size";
+ public static final String INDICES_RECOVERY_COMPRESS = "indices.recovery.compress";
+ public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams";
+ public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams";
+ public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec";
+
+ public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb").bytes();
+
+ /**
+ * Use {@link #INDICES_RECOVERY_MAX_BYTES_PER_SEC} instead
+ */
+ @Deprecated
+ public static final String INDICES_RECOVERY_MAX_SIZE_PER_SEC = "indices.recovery.max_size_per_sec";
+
+ private volatile ByteSizeValue fileChunkSize;
+
+ private volatile boolean compress;
+ private volatile int translogOps;
+ private volatile ByteSizeValue translogSize;
+
+ private volatile int concurrentStreams;
+ private volatile int concurrentSmallFileStreams;
+ private final ThreadPoolExecutor concurrentStreamPool;
+ private final ThreadPoolExecutor concurrentSmallFileStreamPool;
+
+ private volatile ByteSizeValue maxBytesPerSec;
+ private volatile SimpleRateLimiter rateLimiter;
+
+ @Inject
+ public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) {
+ super(settings);
+
+ this.fileChunkSize = componentSettings.getAsBytesSize("file_chunk_size", settings.getAsBytesSize("index.shard.recovery.file_chunk_size", new ByteSizeValue(512, ByteSizeUnit.KB)));
+ this.translogOps = componentSettings.getAsInt("translog_ops", settings.getAsInt("index.shard.recovery.translog_ops", 1000));
+ this.translogSize = componentSettings.getAsBytesSize("translog_size", settings.getAsBytesSize("index.shard.recovery.translog_size", new ByteSizeValue(512, ByteSizeUnit.KB)));
+ this.compress = componentSettings.getAsBoolean("compress", true);
+
+ this.concurrentStreams = componentSettings.getAsInt("concurrent_streams", settings.getAsInt("index.shard.recovery.concurrent_streams", 3));
+ this.concurrentStreamPool = EsExecutors.newScaling(0, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[recovery_stream]"));
+ this.concurrentSmallFileStreams = componentSettings.getAsInt("concurrent_small_file_streams", settings.getAsInt("index.shard.recovery.concurrent_small_file_streams", 2));
+ this.concurrentSmallFileStreamPool = EsExecutors.newScaling(0, concurrentSmallFileStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]"));
+
+ this.maxBytesPerSec = componentSettings.getAsBytesSize("max_bytes_per_sec", componentSettings.getAsBytesSize("max_size_per_sec", new ByteSizeValue(20, ByteSizeUnit.MB)));
+ if (maxBytesPerSec.bytes() <= 0) {
+ rateLimiter = null;
+ } else {
+ rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac());
+ }
+
+ logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}], translog_ops [{}], and compress [{}]",
+ maxBytesPerSec, concurrentStreams, fileChunkSize, translogSize, translogOps, compress);
+
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ public void close() {
+ concurrentStreamPool.shutdown();
+ try {
+ concurrentStreamPool.awaitTermination(1, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ // that's fine...
+ }
+ concurrentStreamPool.shutdownNow();
+ }
+
+ public ByteSizeValue fileChunkSize() {
+ return fileChunkSize;
+ }
+
+ public boolean compress() {
+ return compress;
+ }
+
+ public int translogOps() {
+ return translogOps;
+ }
+
+ public ByteSizeValue translogSize() {
+ return translogSize;
+ }
+
+ public int concurrentStreams() {
+ return concurrentStreams;
+ }
+
+ public ThreadPoolExecutor concurrentStreamPool() {
+ return concurrentStreamPool;
+ }
+
+ public ThreadPoolExecutor concurrentSmallFileStreamPool() {
+ return concurrentSmallFileStreamPool;
+ }
+
+ public RateLimiter rateLimiter() {
+ return rateLimiter;
+ }
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ ByteSizeValue maxSizePerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, settings.getAsBytesSize(INDICES_RECOVERY_MAX_SIZE_PER_SEC, RecoverySettings.this.maxBytesPerSec));
+ if (!Objects.equal(maxSizePerSec, RecoverySettings.this.maxBytesPerSec)) {
+ logger.info("updating [{}] from [{}] to [{}]", INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec, maxSizePerSec);
+ RecoverySettings.this.maxBytesPerSec = maxSizePerSec;
+ if (maxSizePerSec.bytes() <= 0) {
+ rateLimiter = null;
+ } else if (rateLimiter != null) {
+ rateLimiter.setMbPerSec(maxSizePerSec.mbFrac());
+ } else {
+ rateLimiter = new SimpleRateLimiter(maxSizePerSec.mbFrac());
+ }
+ }
+
+ ByteSizeValue fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, RecoverySettings.this.fileChunkSize);
+ if (!fileChunkSize.equals(RecoverySettings.this.fileChunkSize)) {
+ logger.info("updating [indices.recovery.file_chunk_size] from [{}] to [{}]", RecoverySettings.this.fileChunkSize, fileChunkSize);
+ RecoverySettings.this.fileChunkSize = fileChunkSize;
+ }
+
+ int translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, RecoverySettings.this.translogOps);
+ if (translogOps != RecoverySettings.this.translogOps) {
+ logger.info("updating [indices.recovery.translog_ops] from [{}] to [{}]", RecoverySettings.this.translogOps, translogOps);
+ RecoverySettings.this.translogOps = translogOps;
+ }
+
+ ByteSizeValue translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.this.translogSize);
+ if (!translogSize.equals(RecoverySettings.this.translogSize)) {
+ logger.info("updating [indices.recovery.translog_size] from [{}] to [{}]", RecoverySettings.this.translogSize, translogSize);
+ RecoverySettings.this.translogSize = translogSize;
+ }
+
+ boolean compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, RecoverySettings.this.compress);
+ if (compress != RecoverySettings.this.compress) {
+ logger.info("updating [indices.recovery.compress] from [{}] to [{}]", RecoverySettings.this.compress, compress);
+ RecoverySettings.this.compress = compress;
+ }
+
+ int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams);
+ if (concurrentStreams != RecoverySettings.this.concurrentStreams) {
+ logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams);
+ RecoverySettings.this.concurrentStreams = concurrentStreams;
+ RecoverySettings.this.concurrentStreamPool.setMaximumPoolSize(concurrentStreams);
+ }
+
+ int concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RecoverySettings.this.concurrentSmallFileStreams);
+ if (concurrentSmallFileStreams != RecoverySettings.this.concurrentSmallFileStreams) {
+ logger.info("updating [indices.recovery.concurrent_small_file_streams] from [{}] to [{}]", RecoverySettings.this.concurrentSmallFileStreams, concurrentSmallFileStreams);
+ RecoverySettings.this.concurrentSmallFileStreams = concurrentSmallFileStreams;
+ RecoverySettings.this.concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java b/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java
new file mode 100644
index 0000000..0cedfce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.IllegalIndexShardStateException;
+import org.elasticsearch.index.shard.IndexShardClosedException;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * The source recovery accepts recovery requests from other peer shards and start the recovery process from this
+ * source shard to the target shard.
+ */
+public class RecoverySource extends AbstractComponent {
+
+ public static class Actions {
+ public static final String START_RECOVERY = "index/shard/recovery/startRecovery";
+ }
+
+ private final TransportService transportService;
+ private final IndicesService indicesService;
+ private final RecoverySettings recoverySettings;
+
+ private final ClusterService clusterService;
+
+ private final TimeValue internalActionTimeout;
+ private final TimeValue internalActionLongTimeout;
+
+
+ @Inject
+ public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService,
+ RecoverySettings recoverySettings, ClusterService clusterService) {
+ super(settings);
+ this.transportService = transportService;
+ this.indicesService = indicesService;
+ this.clusterService = clusterService;
+
+ this.recoverySettings = recoverySettings;
+
+ transportService.registerHandler(Actions.START_RECOVERY, new StartRecoveryTransportRequestHandler());
+ this.internalActionTimeout = componentSettings.getAsTime("internal_action_timeout", TimeValue.timeValueMinutes(15));
+ this.internalActionLongTimeout = new TimeValue(internalActionTimeout.millis() * 2);
+ }
+
+ private RecoveryResponse recover(final StartRecoveryRequest request) {
+ final InternalIndexShard shard = (InternalIndexShard) indicesService.indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());
+
+ // verify that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
+ // the index operations will not be routed to it properly
+ RoutingNode node = clusterService.state().readOnlyRoutingNodes().node(request.targetNode().id());
+ if (node == null) {
+ throw new DelayRecoveryException("source node does not have the node [" + request.targetNode() + "] in its state yet..");
+ }
+ ShardRouting targetShardRouting = null;
+ for (ShardRouting shardRouting : node) {
+ if (shardRouting.shardId().equals(request.shardId())) {
+ targetShardRouting = shardRouting;
+ break;
+ }
+ }
+ if (targetShardRouting == null) {
+ throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node");
+ }
+ if (!targetShardRouting.initializing()) {
+ throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]");
+ }
+
+ logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().index().name(), request.shardId().id(), request.targetNode(), request.markAsRelocated());
+ final RecoveryResponse response = new RecoveryResponse();
+ shard.recover(new Engine.RecoveryHandler() {
+ @Override
+ public void phase1(final SnapshotIndexCommit snapshot) throws ElasticsearchException {
+ long totalSize = 0;
+ long existingTotalSize = 0;
+ try {
+ StopWatch stopWatch = new StopWatch().start();
+
+ for (String name : snapshot.getFiles()) {
+ StoreFileMetaData md = shard.store().metaData(name);
+ boolean useExisting = false;
+ if (request.existingFiles().containsKey(name)) {
+ // we don't compute checksum for segments, so always recover them
+ if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
+ response.phase1ExistingFileNames.add(name);
+ response.phase1ExistingFileSizes.add(md.length());
+ existingTotalSize += md.length();
+ useExisting = true;
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, md.checksum(), md.length());
+ }
+ }
+ }
+ if (!useExisting) {
+ if (request.existingFiles().containsKey(name)) {
+ logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, request.existingFiles().get(name), md);
+ } else {
+ logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name);
+ }
+ response.phase1FileNames.add(name);
+ response.phase1FileSizes.add(md.length());
+ }
+ totalSize += md.length();
+ }
+ response.phase1TotalSize = totalSize;
+ response.phase1ExistingTotalSize = existingTotalSize;
+
+ logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
+
+ RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), response.phase1FileNames, response.phase1FileSizes,
+ response.phase1ExistingFileNames, response.phase1ExistingFileSizes, response.phase1TotalSize, response.phase1ExistingTotalSize);
+ transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
+
+ final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
+ final AtomicReference<Throwable> lastException = new AtomicReference<Throwable>();
+ int fileIndex = 0;
+ for (final String name : response.phase1FileNames) {
+ ThreadPoolExecutor pool;
+ long fileSize = response.phase1FileSizes.get(fileIndex);
+ if (fileSize > recoverySettings.SMALL_FILE_CUTOFF_BYTES) {
+ pool = recoverySettings.concurrentStreamPool();
+ } else {
+ pool = recoverySettings.concurrentSmallFileStreamPool();
+ }
+
+ pool.execute(new Runnable() {
+ @Override
+ public void run() {
+ IndexInput indexInput = null;
+ try {
+ final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
+ byte[] buf = new byte[BUFFER_SIZE];
+ StoreFileMetaData md = shard.store().metaData(name);
+ // TODO: maybe use IOContext.READONCE?
+ indexInput = shard.store().openInputRaw(name, IOContext.READ);
+ boolean shouldCompressRequest = recoverySettings.compress();
+ if (CompressorFactory.isCompressed(indexInput)) {
+ shouldCompressRequest = false;
+ }
+
+ long len = indexInput.length();
+ long readCount = 0;
+ while (readCount < len) {
+ if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
+ throw new IndexShardClosedException(shard.shardId());
+ }
+ int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
+ long position = indexInput.getFilePointer();
+
+ if (recoverySettings.rateLimiter() != null) {
+ recoverySettings.rateLimiter().pause(toRead);
+ }
+
+ indexInput.readBytes(buf, 0, toRead, false);
+ BytesArray content = new BytesArray(buf, 0, toRead);
+ transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), name, position, len, md.checksum(), content),
+ TransportRequestOptions.options().withCompress(shouldCompressRequest).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
+ readCount += toRead;
+ }
+ } catch (Throwable e) {
+ lastException.set(e);
+ } finally {
+ IOUtils.closeWhileHandlingException(indexInput);
+ latch.countDown();
+ }
+ }
+ });
+ fileIndex++;
+ }
+
+ latch.await();
+
+ if (lastException.get() != null) {
+ throw lastException.get();
+ }
+
+ // now, set the clean files request
+ Set<String> snapshotFiles = Sets.newHashSet(snapshot.getFiles());
+ transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), snapshotFiles), TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
+
+ stopWatch.stop();
+ logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
+ response.phase1Time = stopWatch.totalTime().millis();
+ } catch (Throwable e) {
+ throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
+ }
+ }
+
+ @Override
+ public void phase2(Translog.Snapshot snapshot) throws ElasticsearchException {
+ if (shard.state() == IndexShardState.CLOSED) {
+ throw new IndexShardClosedException(request.shardId());
+ }
+ logger.trace("[{}][{}] recovery [phase2] to {}: start", request.shardId().index().name(), request.shardId().id(), request.targetNode());
+ StopWatch stopWatch = new StopWatch().start();
+ transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
+ stopWatch.stop();
+ response.startTime = stopWatch.totalTime().millis();
+ logger.trace("[{}][{}] recovery [phase2] to {}: start took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
+
+ logger.trace("[{}][{}] recovery [phase2] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
+ stopWatch = new StopWatch().start();
+ int totalOperations = sendSnapshot(snapshot);
+ stopWatch.stop();
+ logger.trace("[{}][{}] recovery [phase2] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
+ response.phase2Time = stopWatch.totalTime().millis();
+ response.phase2Operations = totalOperations;
+ }
+
+ @Override
+ public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException {
+ if (shard.state() == IndexShardState.CLOSED) {
+ throw new IndexShardClosedException(request.shardId());
+ }
+ logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
+ StopWatch stopWatch = new StopWatch().start();
+ int totalOperations = sendSnapshot(snapshot);
+ transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE, new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.options().withTimeout(internalActionLongTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
+ if (request.markAsRelocated()) {
+ // TODO what happens if the recovery process fails afterwards, we need to mark this back to started
+ try {
+ shard.relocated("to " + request.targetNode());
+ } catch (IllegalIndexShardStateException e) {
+ // we can ignore this exception since, on the other node, when it moved to phase3
+ // it will also send shard started, which might cause the index shard we work against
+ // to move be closed by the time we get to the the relocated method
+ }
+ }
+ stopWatch.stop();
+ logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
+ response.phase3Time = stopWatch.totalTime().millis();
+ response.phase3Operations = totalOperations;
+ }
+
+ private int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException {
+ int ops = 0;
+ long size = 0;
+ int totalOperations = 0;
+ List<Translog.Operation> operations = Lists.newArrayList();
+ while (snapshot.hasNext()) {
+ if (shard.state() == IndexShardState.CLOSED) {
+ throw new IndexShardClosedException(request.shardId());
+ }
+ Translog.Operation operation = snapshot.next();
+ operations.add(operation);
+ ops += 1;
+ size += operation.estimateSize();
+ totalOperations++;
+ if (ops >= recoverySettings.translogOps() || size >= recoverySettings.translogSize().bytes()) {
+
+ // don't throttle translog, since we lock for phase3 indexing, so we need to move it as
+ // fast as possible. Note, sine we index docs to replicas while the index files are recovered
+ // the lock can potentially be removed, in which case, it might make sense to re-enable
+ // throttling in this phase
+// if (recoverySettings.rateLimiter() != null) {
+// recoverySettings.rateLimiter().pause(size);
+// }
+
+ RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations);
+ transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, TransportRequestOptions.options().withCompress(recoverySettings.compress()).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(internalActionLongTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
+ ops = 0;
+ size = 0;
+ operations.clear();
+ }
+ }
+ // send the leftover
+ if (!operations.isEmpty()) {
+ RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations);
+ transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, TransportRequestOptions.options().withCompress(recoverySettings.compress()).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(internalActionLongTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
+ }
+ return totalOperations;
+ }
+ });
+ return response;
+ }
+
+ class StartRecoveryTransportRequestHandler extends BaseTransportRequestHandler<StartRecoveryRequest> {
+
+ @Override
+ public StartRecoveryRequest newInstance() {
+ return new StartRecoveryRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(final StartRecoveryRequest request, final TransportChannel channel) throws Exception {
+ RecoveryResponse response = recover(request);
+ channel.sendResponse(response);
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryStatus.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryStatus.java
new file mode 100644
index 0000000..6d0d1c5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryStatus.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.apache.lucene.store.IndexOutput;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.store.Store;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ *
+ */
+public class RecoveryStatus {
+
+ public static enum Stage {
+ INIT,
+ INDEX,
+ TRANSLOG,
+ FINALIZE,
+ DONE
+ }
+
+ final ShardId shardId;
+ final long recoveryId;
+ final InternalIndexShard indexShard;
+
+ public RecoveryStatus(long recoveryId, InternalIndexShard indexShard) {
+ this.recoveryId = recoveryId;
+ this.indexShard = indexShard;
+ this.shardId = indexShard.shardId();
+ }
+
+ volatile Thread recoveryThread;
+ private volatile boolean canceled;
+ volatile boolean sentCanceledToSource;
+
+ private volatile ConcurrentMap<String, IndexOutput> openIndexOutputs = ConcurrentCollections.newConcurrentMap();
+ ConcurrentMap<String, String> checksums = ConcurrentCollections.newConcurrentMap();
+
+ final long startTime = System.currentTimeMillis();
+ long time;
+ List<String> phase1FileNames;
+ List<Long> phase1FileSizes;
+ List<String> phase1ExistingFileNames;
+ List<Long> phase1ExistingFileSizes;
+ long phase1TotalSize;
+ long phase1ExistingTotalSize;
+
+ volatile Stage stage = Stage.INIT;
+ volatile long currentTranslogOperations = 0;
+ AtomicLong currentFilesSize = new AtomicLong();
+
+ public long startTime() {
+ return startTime;
+ }
+
+ public long time() {
+ return this.time;
+ }
+
+ public long phase1TotalSize() {
+ return phase1TotalSize;
+ }
+
+ public long phase1ExistingTotalSize() {
+ return phase1ExistingTotalSize;
+ }
+
+ public Stage stage() {
+ return stage;
+ }
+
+ public long currentTranslogOperations() {
+ return currentTranslogOperations;
+ }
+
+ public long currentFilesSize() {
+ return currentFilesSize.get();
+ }
+
+ public boolean isCanceled() {
+ return canceled;
+ }
+
+ public synchronized void cancel() {
+ canceled = true;
+ }
+
+ public IndexOutput getOpenIndexOutput(String key) {
+ final ConcurrentMap<String, IndexOutput> outputs = openIndexOutputs;
+ if (canceled || outputs == null) {
+ return null;
+ }
+ return outputs.get(key);
+ }
+
+ public synchronized Set<Entry<String, IndexOutput>> cancleAndClearOpenIndexInputs() {
+ cancel();
+ final ConcurrentMap<String, IndexOutput> outputs = openIndexOutputs;
+ openIndexOutputs = null;
+ if (outputs == null) {
+ return null;
+ }
+ Set<Entry<String, IndexOutput>> entrySet = outputs.entrySet();
+ return entrySet;
+ }
+
+
+ public IndexOutput removeOpenIndexOutputs(String name) {
+ final ConcurrentMap<String, IndexOutput> outputs = openIndexOutputs;
+ if (outputs == null) {
+ return null;
+ }
+ return outputs.remove(name);
+ }
+
+ public synchronized IndexOutput openAndPutIndexOutput(String key, String name, Store store) throws IOException {
+ if (isCanceled()) {
+ return null;
+ }
+ final ConcurrentMap<String, IndexOutput> outputs = openIndexOutputs;
+ IndexOutput indexOutput = store.createOutputRaw(name);
+ outputs.put(key, indexOutput);
+ return indexOutput;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java
new file mode 100644
index 0000000..1a22fba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java
@@ -0,0 +1,660 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import com.google.common.collect.Sets;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
+import org.elasticsearch.index.IndexShardMissingException;
+import org.elasticsearch.index.engine.RecoveryEngineException;
+import org.elasticsearch.index.shard.*;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndicesLifecycle;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
+
+/**
+ * The recovery target handles recoveries of peer shards of the shard+node to recover to.
+ * <p/>
+ * <p>Note, it can be safely assumed that there will only be a single recovery per shard (index+id) and
+ * not several of them (since we don't allocate several shard replicas to the same node).
+ */
+public class RecoveryTarget extends AbstractComponent {
+
+ public static class Actions {
+ public static final String FILES_INFO = "index/shard/recovery/filesInfo";
+ public static final String FILE_CHUNK = "index/shard/recovery/fileChunk";
+ public static final String CLEAN_FILES = "index/shard/recovery/cleanFiles";
+ public static final String TRANSLOG_OPS = "index/shard/recovery/translogOps";
+ public static final String PREPARE_TRANSLOG = "index/shard/recovery/prepareTranslog";
+ public static final String FINALIZE = "index/shard/recovery/finalize";
+ }
+
+ private final ThreadPool threadPool;
+
+ private final TransportService transportService;
+
+ private final IndicesService indicesService;
+
+ private final RecoverySettings recoverySettings;
+
+ private final ConcurrentMapLong<RecoveryStatus> onGoingRecoveries = ConcurrentCollections.newConcurrentMapLong();
+
+ @Inject
+ public RecoveryTarget(Settings settings, ThreadPool threadPool, TransportService transportService, IndicesService indicesService,
+ IndicesLifecycle indicesLifecycle, RecoverySettings recoverySettings) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.transportService = transportService;
+ this.indicesService = indicesService;
+ this.recoverySettings = recoverySettings;
+
+ transportService.registerHandler(Actions.FILES_INFO, new FilesInfoRequestHandler());
+ transportService.registerHandler(Actions.FILE_CHUNK, new FileChunkTransportRequestHandler());
+ transportService.registerHandler(Actions.CLEAN_FILES, new CleanFilesRequestHandler());
+ transportService.registerHandler(Actions.PREPARE_TRANSLOG, new PrepareForTranslogOperationsRequestHandler());
+ transportService.registerHandler(Actions.TRANSLOG_OPS, new TranslogOperationsRequestHandler());
+ transportService.registerHandler(Actions.FINALIZE, new FinalizeRecoveryRequestHandler());
+
+ indicesLifecycle.addListener(new IndicesLifecycle.Listener() {
+ @Override
+ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard) {
+ if (indexShard != null) {
+ removeAndCleanOnGoingRecovery(findRecoveryByShard(indexShard));
+ }
+ }
+ });
+ }
+
+ public RecoveryStatus peerRecoveryStatus(ShardId shardId) {
+ RecoveryStatus peerRecoveryStatus = findRecoveryByShardId(shardId);
+ if (peerRecoveryStatus == null) {
+ return null;
+ }
+ // update how long it takes if we are still recovering...
+ if (peerRecoveryStatus.startTime > 0 && peerRecoveryStatus.stage != RecoveryStatus.Stage.DONE) {
+ peerRecoveryStatus.time = System.currentTimeMillis() - peerRecoveryStatus.startTime;
+ }
+ return peerRecoveryStatus;
+ }
+
+ public void cancelRecovery(IndexShard indexShard) {
+ RecoveryStatus recoveryStatus = findRecoveryByShard(indexShard);
+ // it might be if the recovery source got canceled first
+ if (recoveryStatus == null) {
+ return;
+ }
+ if (recoveryStatus.sentCanceledToSource) {
+ return;
+ }
+ recoveryStatus.cancel();
+ try {
+ if (recoveryStatus.recoveryThread != null) {
+ recoveryStatus.recoveryThread.interrupt();
+ }
+ // give it a grace period of actually getting the sent ack part
+ final long sleepTime = 100;
+ final long maxSleepTime = 10000;
+ long rounds = Math.round(maxSleepTime / sleepTime);
+ while (!recoveryStatus.sentCanceledToSource && rounds > 0) {
+ rounds--;
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ break; // interrupted - step out!
+ }
+ }
+ } finally {
+ removeAndCleanOnGoingRecovery(recoveryStatus);
+ }
+
+ }
+
+ public void startRecovery(final StartRecoveryRequest request, final InternalIndexShard indexShard, final RecoveryListener listener) {
+ try {
+ indexShard.recovering("from " + request.sourceNode());
+ } catch (IllegalIndexShardStateException e) {
+ // that's fine, since we might be called concurrently, just ignore this, we are already recovering
+ listener.onIgnoreRecovery(false, "already in recovering process, " + e.getMessage());
+ return;
+ }
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ // create a new recovery status, and process...
+ RecoveryStatus recoveryStatus = new RecoveryStatus(request.recoveryId(), indexShard);
+ onGoingRecoveries.put(recoveryStatus.recoveryId, recoveryStatus);
+ doRecovery(request, recoveryStatus, listener);
+ }
+ });
+ }
+
+ public void retryRecovery(final StartRecoveryRequest request, final RecoveryStatus status, final RecoveryListener listener) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ doRecovery(request, status, listener);
+ }
+ });
+ }
+
+ private void doRecovery(final StartRecoveryRequest request, final RecoveryStatus recoveryStatus, final RecoveryListener listener) {
+ if (request.sourceNode() == null) {
+ listener.onIgnoreRecovery(false, "No node to recover from, retry on next cluster state update");
+ return;
+ }
+ final InternalIndexShard shard = recoveryStatus.indexShard;
+ if (shard == null) {
+ listener.onIgnoreRecovery(false, "shard missing locally, stop recovery");
+ return;
+ }
+ if (shard.state() == IndexShardState.CLOSED) {
+ listener.onIgnoreRecovery(false, "local shard closed, stop recovery");
+ return;
+ }
+ if (recoveryStatus.isCanceled()) {
+ // don't remove it, the cancellation code will remove it...
+ listener.onIgnoreRecovery(false, "canceled recovery");
+ return;
+ }
+
+ recoveryStatus.recoveryThread = Thread.currentThread();
+
+ try {
+ logger.trace("[{}][{}] starting recovery from {}", request.shardId().index().name(), request.shardId().id(), request.sourceNode());
+
+ StopWatch stopWatch = new StopWatch().start();
+ RecoveryResponse recoveryResponse = transportService.submitRequest(request.sourceNode(), RecoverySource.Actions.START_RECOVERY, request, new FutureTransportResponseHandler<RecoveryResponse>() {
+ @Override
+ public RecoveryResponse newInstance() {
+ return new RecoveryResponse();
+ }
+ }).txGet();
+ if (shard.state() == IndexShardState.CLOSED) {
+ removeAndCleanOnGoingRecovery(recoveryStatus);
+ listener.onIgnoreRecovery(false, "local shard closed, stop recovery");
+ return;
+ }
+ stopWatch.stop();
+ if (logger.isTraceEnabled()) {
+ StringBuilder sb = new StringBuilder();
+ sb.append('[').append(request.shardId().index().name()).append(']').append('[').append(request.shardId().id()).append("] ");
+ sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(stopWatch.totalTime()).append("]\n");
+ sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]")
+ .append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']')
+ .append("\n");
+ sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
+ sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
+ sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations")
+ .append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
+ .append("\n");
+ sb.append(" phase3: recovered [").append(recoveryResponse.phase3Operations).append("]").append(" transaction log operations")
+ .append(", took [").append(timeValueMillis(recoveryResponse.phase3Time)).append("]");
+ logger.trace(sb.toString());
+ } else if (logger.isDebugEnabled()) {
+ logger.debug("recovery completed from [{}], took [{}]", request.shardId(), request.sourceNode(), stopWatch.totalTime());
+ }
+ removeAndCleanOnGoingRecovery(recoveryStatus);
+ listener.onRecoveryDone();
+ } catch (Throwable e) {
+// logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().index().name(), request.shardId().id());
+ if (recoveryStatus.isCanceled()) {
+ // don't remove it, the cancellation code will remove it...
+ listener.onIgnoreRecovery(false, "canceled recovery");
+ return;
+ }
+ if (shard.state() == IndexShardState.CLOSED) {
+ removeAndCleanOnGoingRecovery(recoveryStatus);
+ listener.onIgnoreRecovery(false, "local shard closed, stop recovery");
+ return;
+ }
+ Throwable cause = ExceptionsHelper.unwrapCause(e);
+ if (cause instanceof RecoveryEngineException) {
+ // unwrap an exception that was thrown as part of the recovery
+ cause = cause.getCause();
+ }
+ // do it twice, in case we have double transport exception
+ cause = ExceptionsHelper.unwrapCause(cause);
+ if (cause instanceof RecoveryEngineException) {
+ // unwrap an exception that was thrown as part of the recovery
+ cause = cause.getCause();
+ }
+
+ // here, we would add checks against exception that need to be retried (and not removeAndClean in this case)
+
+ if (cause instanceof IndexShardNotStartedException || cause instanceof IndexMissingException || cause instanceof IndexShardMissingException) {
+ // if the target is not ready yet, retry
+ listener.onRetryRecovery(TimeValue.timeValueMillis(500), recoveryStatus);
+ return;
+ }
+
+ if (cause instanceof DelayRecoveryException) {
+ listener.onRetryRecovery(TimeValue.timeValueMillis(500), recoveryStatus);
+ return;
+ }
+
+ // here, we check against ignore recovery options
+
+ // in general, no need to clean the shard on ignored recovery, since we want to try and reuse it later
+ // it will get deleted in the IndicesStore if all are allocated and no shard exists on this node...
+
+ removeAndCleanOnGoingRecovery(recoveryStatus);
+
+ if (cause instanceof ConnectTransportException) {
+ listener.onIgnoreRecovery(true, "source node disconnected (" + request.sourceNode() + ")");
+ return;
+ }
+
+ if (cause instanceof IndexShardClosedException) {
+ listener.onIgnoreRecovery(true, "source shard is closed (" + request.sourceNode() + ")");
+ return;
+ }
+
+ if (cause instanceof AlreadyClosedException) {
+ listener.onIgnoreRecovery(true, "source shard is closed (" + request.sourceNode() + ")");
+ return;
+ }
+
+ logger.trace("[{}][{}] recovery from [{}] failed", e, request.shardId().index().name(), request.shardId().id(), request.sourceNode());
+ listener.onRecoveryFailure(new RecoveryFailedException(request, e), true);
+ }
+ }
+
+ public static interface RecoveryListener {
+ void onRecoveryDone();
+
+ void onRetryRecovery(TimeValue retryAfter, RecoveryStatus status);
+
+ void onIgnoreRecovery(boolean removeShard, String reason);
+
+ void onRecoveryFailure(RecoveryFailedException e, boolean sendShardFailure);
+ }
+
+ @Nullable
+ private RecoveryStatus findRecoveryByShardId(ShardId shardId) {
+ for (RecoveryStatus recoveryStatus : onGoingRecoveries.values()) {
+ if (recoveryStatus.shardId.equals(shardId)) {
+ return recoveryStatus;
+ }
+ }
+ return null;
+ }
+
+ @Nullable
+ private RecoveryStatus findRecoveryByShard(IndexShard indexShard) {
+ for (RecoveryStatus recoveryStatus : onGoingRecoveries.values()) {
+ if (recoveryStatus.indexShard == indexShard) {
+ return recoveryStatus;
+ }
+ }
+ return null;
+ }
+
+ private void removeAndCleanOnGoingRecovery(@Nullable RecoveryStatus status) {
+ if (status == null) {
+ return;
+ }
+ // clean it from the on going recoveries since it is being closed
+ status = onGoingRecoveries.remove(status.recoveryId);
+ if (status == null) {
+ return;
+ }
+ // just mark it as canceled as well, just in case there are in flight requests
+ // coming from the recovery target
+ status.cancel();
+ // clean open index outputs
+ Set<Entry<String, IndexOutput>> entrySet = status.cancleAndClearOpenIndexInputs();
+ Iterator<Entry<String, IndexOutput>> iterator = entrySet.iterator();
+ while (iterator.hasNext()) {
+ Map.Entry<String, IndexOutput> entry = iterator.next();
+ synchronized (entry.getValue()) {
+ IOUtils.closeWhileHandlingException(entry.getValue());
+ }
+ iterator.remove();
+
+ }
+ status.checksums = null;
+ }
+
+ class PrepareForTranslogOperationsRequestHandler extends BaseTransportRequestHandler<RecoveryPrepareForTranslogOperationsRequest> {
+
+ @Override
+ public RecoveryPrepareForTranslogOperationsRequest newInstance() {
+ return new RecoveryPrepareForTranslogOperationsRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception {
+ RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
+ if (onGoingRecovery == null) {
+ // shard is getting closed on us
+ throw new IndexShardClosedException(request.shardId());
+ }
+ if (onGoingRecovery.isCanceled()) {
+ onGoingRecovery.sentCanceledToSource = true;
+ throw new IndexShardClosedException(request.shardId());
+ }
+
+ onGoingRecovery.stage = RecoveryStatus.Stage.TRANSLOG;
+
+ onGoingRecovery.indexShard.performRecoveryPrepareForTranslog();
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+
+ class FinalizeRecoveryRequestHandler extends BaseTransportRequestHandler<RecoveryFinalizeRecoveryRequest> {
+
+ @Override
+ public RecoveryFinalizeRecoveryRequest newInstance() {
+ return new RecoveryFinalizeRecoveryRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception {
+ RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
+ if (onGoingRecovery == null) {
+ // shard is getting closed on us
+ throw new IndexShardClosedException(request.shardId());
+ }
+ if (onGoingRecovery.isCanceled()) {
+ onGoingRecovery.sentCanceledToSource = true;
+ throw new IndexShardClosedException(request.shardId());
+ }
+
+ onGoingRecovery.stage = RecoveryStatus.Stage.FINALIZE;
+ onGoingRecovery.indexShard.performRecoveryFinalization(false, onGoingRecovery);
+ onGoingRecovery.time = System.currentTimeMillis() - onGoingRecovery.startTime;
+ onGoingRecovery.stage = RecoveryStatus.Stage.DONE;
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+
+ class TranslogOperationsRequestHandler extends BaseTransportRequestHandler<RecoveryTranslogOperationsRequest> {
+
+
+ @Override
+ public RecoveryTranslogOperationsRequest newInstance() {
+ return new RecoveryTranslogOperationsRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(RecoveryTranslogOperationsRequest request, TransportChannel channel) throws Exception {
+ RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
+ if (onGoingRecovery == null) {
+ // shard is getting closed on us
+ throw new IndexShardClosedException(request.shardId());
+ }
+ if (onGoingRecovery.isCanceled()) {
+ onGoingRecovery.sentCanceledToSource = true;
+ throw new IndexShardClosedException(request.shardId());
+ }
+
+ InternalIndexShard shard = (InternalIndexShard) indicesService.indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());
+ for (Translog.Operation operation : request.operations()) {
+ if (onGoingRecovery.isCanceled()) {
+ onGoingRecovery.sentCanceledToSource = true;
+ throw new IndexShardClosedException(request.shardId());
+ }
+ shard.performRecoveryOperation(operation);
+ onGoingRecovery.currentTranslogOperations++;
+ }
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+
+ class FilesInfoRequestHandler extends BaseTransportRequestHandler<RecoveryFilesInfoRequest> {
+
+ @Override
+ public RecoveryFilesInfoRequest newInstance() {
+ return new RecoveryFilesInfoRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception {
+ RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
+ if (onGoingRecovery == null) {
+ // shard is getting closed on us
+ throw new IndexShardClosedException(request.shardId());
+ }
+ if (onGoingRecovery.isCanceled()) {
+ onGoingRecovery.sentCanceledToSource = true;
+ throw new IndexShardClosedException(request.shardId());
+ }
+
+ onGoingRecovery.phase1FileNames = request.phase1FileNames;
+ onGoingRecovery.phase1FileSizes = request.phase1FileSizes;
+ onGoingRecovery.phase1ExistingFileNames = request.phase1ExistingFileNames;
+ onGoingRecovery.phase1ExistingFileSizes = request.phase1ExistingFileSizes;
+ onGoingRecovery.phase1TotalSize = request.phase1TotalSize;
+ onGoingRecovery.phase1ExistingTotalSize = request.phase1ExistingTotalSize;
+ onGoingRecovery.stage = RecoveryStatus.Stage.INDEX;
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+
+ class CleanFilesRequestHandler extends BaseTransportRequestHandler<RecoveryCleanFilesRequest> {
+
+ @Override
+ public RecoveryCleanFilesRequest newInstance() {
+ return new RecoveryCleanFilesRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception {
+ RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
+ if (onGoingRecovery == null) {
+ // shard is getting closed on us
+ throw new IndexShardClosedException(request.shardId());
+ }
+ if (onGoingRecovery.isCanceled()) {
+ onGoingRecovery.sentCanceledToSource = true;
+ throw new IndexShardClosedException(request.shardId());
+ }
+
+ Store store = onGoingRecovery.indexShard.store();
+ // first, we go and move files that were created with the recovery id suffix to
+ // the actual names, its ok if we have a corrupted index here, since we have replicas
+ // to recover from in case of a full cluster shutdown just when this code executes...
+ String prefix = "recovery." + onGoingRecovery.startTime + ".";
+ Set<String> filesToRename = Sets.newHashSet();
+ for (String existingFile : store.directory().listAll()) {
+ if (existingFile.startsWith(prefix)) {
+ filesToRename.add(existingFile.substring(prefix.length(), existingFile.length()));
+ }
+ }
+ Exception failureToRename = null;
+ if (!filesToRename.isEmpty()) {
+ // first, go and delete the existing ones
+ final Directory directory = store.directory();
+ for (String file : filesToRename) {
+ try {
+ directory.deleteFile(file);
+ } catch (Throwable ex) {
+ logger.debug("failed to delete file [{}]", ex, file);
+ }
+ }
+ for (String fileToRename : filesToRename) {
+ // now, rename the files... and fail it it won't work
+ store.renameFile(prefix + fileToRename, fileToRename);
+ }
+ }
+ // now write checksums
+ store.writeChecksums(onGoingRecovery.checksums);
+
+ for (String existingFile : store.directory().listAll()) {
+ // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
+ if (!request.snapshotFiles().contains(existingFile) && !Store.isChecksum(existingFile)) {
+ try {
+ store.directory().deleteFile(existingFile);
+ } catch (Exception e) {
+ // ignore, we don't really care, will get deleted later on
+ }
+ }
+ }
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+
+ class FileChunkTransportRequestHandler extends BaseTransportRequestHandler<RecoveryFileChunkRequest> {
+
+
+ @Override
+ public RecoveryFileChunkRequest newInstance() {
+ return new RecoveryFileChunkRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception {
+ RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
+ if (onGoingRecovery == null) {
+ // shard is getting closed on us
+ throw new IndexShardClosedException(request.shardId());
+ }
+ if (onGoingRecovery.isCanceled()) {
+ onGoingRecovery.sentCanceledToSource = true;
+ throw new IndexShardClosedException(request.shardId());
+ }
+
+ Store store = onGoingRecovery.indexShard.store();
+
+ IndexOutput indexOutput;
+ if (request.position() == 0) {
+ // first request
+ onGoingRecovery.checksums.remove(request.name());
+ indexOutput = onGoingRecovery.removeOpenIndexOutputs(request.name());
+ IOUtils.closeWhileHandlingException(indexOutput);
+ // we create an output with no checksum, this is because the pure binary data of the file is not
+ // the checksum (because of seek). We will create the checksum file once copying is done
+
+ // also, we check if the file already exists, if it does, we create a file name based
+ // on the current recovery "id" and later we make the switch, the reason for that is that
+ // we only want to overwrite the index files once we copied all over, and not create a
+ // case where the index is half moved
+
+ String fileName = request.name();
+ if (store.directory().fileExists(fileName)) {
+ fileName = "recovery." + onGoingRecovery.startTime + "." + fileName;
+ }
+ indexOutput = onGoingRecovery.openAndPutIndexOutput(request.name(), fileName, store);
+ } else {
+ indexOutput = onGoingRecovery.getOpenIndexOutput(request.name());
+ }
+ if (indexOutput == null) {
+ // shard is getting closed on us
+ throw new IndexShardClosedException(request.shardId());
+ }
+ boolean success = false;
+ synchronized (indexOutput) {
+ try {
+ if (recoverySettings.rateLimiter() != null) {
+ recoverySettings.rateLimiter().pause(request.content().length());
+ }
+ BytesReference content = request.content();
+ if (!content.hasArray()) {
+ content = content.toBytesArray();
+ }
+ indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
+ onGoingRecovery.currentFilesSize.addAndGet(request.length());
+ if (indexOutput.getFilePointer() == request.length()) {
+ // we are done
+ indexOutput.close();
+ // write the checksum
+ if (request.checksum() != null) {
+ onGoingRecovery.checksums.put(request.name(), request.checksum());
+ }
+ store.directory().sync(Collections.singleton(request.name()));
+ IndexOutput remove = onGoingRecovery.removeOpenIndexOutputs(request.name());
+ assert remove == indexOutput;
+
+ }
+ success = true;
+ } finally {
+ if (!success || onGoingRecovery.isCanceled()) {
+ IndexOutput remove = onGoingRecovery.removeOpenIndexOutputs(request.name());
+ assert remove == indexOutput;
+ IOUtils.closeWhileHandlingException(indexOutput);
+ }
+ }
+ }
+ if (onGoingRecovery.isCanceled()) {
+ onGoingRecovery.sentCanceledToSource = true;
+ throw new IndexShardClosedException(request.shardId());
+ }
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java
new file mode 100644
index 0000000..1856732
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogStreams;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+class RecoveryTranslogOperationsRequest extends TransportRequest {
+
+ private long recoveryId;
+ private ShardId shardId;
+ private List<Translog.Operation> operations;
+
+ RecoveryTranslogOperationsRequest() {
+ }
+
+ RecoveryTranslogOperationsRequest(long recoveryId, ShardId shardId, List<Translog.Operation> operations) {
+ this.recoveryId = recoveryId;
+ this.shardId = shardId;
+ this.operations = operations;
+ }
+
+ public long recoveryId() {
+ return this.recoveryId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ public List<Translog.Operation> operations() {
+ return operations;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ recoveryId = in.readLong();
+ shardId = ShardId.readShardId(in);
+ int size = in.readVInt();
+ operations = Lists.newArrayListWithExpectedSize(size);
+ for (int i = 0; i < size; i++) {
+ operations.add(TranslogStreams.readTranslogOperation(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(recoveryId);
+ shardId.writeTo(out);
+ out.writeVInt(operations.size());
+ for (Translog.Operation operation : operations) {
+ TranslogStreams.writeTranslogOperation(out, operation);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java b/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java
new file mode 100644
index 0000000..51198c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ *
+ */
+public class StartRecoveryRequest extends TransportRequest {
+
+ private static final AtomicLong recoveryIdGenerator = new AtomicLong();
+
+ private long recoveryId;
+
+ private ShardId shardId;
+
+ private DiscoveryNode sourceNode;
+
+ private DiscoveryNode targetNode;
+
+ private boolean markAsRelocated;
+
+ private Map<String, StoreFileMetaData> existingFiles;
+
+ StartRecoveryRequest() {
+ }
+
+ /**
+ * Start recovery request.
+ *
+ * @param shardId
+ * @param sourceNode The node to recover from
+ * @param targetNode The node to recover to
+ * @param markAsRelocated
+ * @param existingFiles
+ */
+ public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, boolean markAsRelocated, Map<String, StoreFileMetaData> existingFiles) {
+ this.recoveryId = recoveryIdGenerator.incrementAndGet();
+ this.shardId = shardId;
+ this.sourceNode = sourceNode;
+ this.targetNode = targetNode;
+ this.markAsRelocated = markAsRelocated;
+ this.existingFiles = existingFiles;
+ }
+
+ public long recoveryId() {
+ return this.recoveryId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ public DiscoveryNode sourceNode() {
+ return sourceNode;
+ }
+
+ public DiscoveryNode targetNode() {
+ return targetNode;
+ }
+
+ public boolean markAsRelocated() {
+ return markAsRelocated;
+ }
+
+ public Map<String, StoreFileMetaData> existingFiles() {
+ return existingFiles;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ recoveryId = in.readLong();
+ shardId = ShardId.readShardId(in);
+ sourceNode = DiscoveryNode.readNode(in);
+ targetNode = DiscoveryNode.readNode(in);
+ markAsRelocated = in.readBoolean();
+ int size = in.readVInt();
+ existingFiles = Maps.newHashMapWithExpectedSize(size);
+ for (int i = 0; i < size; i++) {
+ StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
+ existingFiles.put(md.name(), md);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(recoveryId);
+ shardId.writeTo(out);
+ sourceNode.writeTo(out);
+ targetNode.writeTo(out);
+ out.writeBoolean(markAsRelocated);
+ out.writeVInt(existingFiles.size());
+ for (StoreFileMetaData md : existingFiles.values()) {
+ md.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
new file mode 100644
index 0000000..f4985c2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import org.apache.lucene.store.StoreRateLimiting;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.File;
+
+/**
+ *
+ */
+public class IndicesStore extends AbstractComponent implements ClusterStateListener {
+
+ public static final String INDICES_STORE_THROTTLE_TYPE = "indices.store.throttle.type";
+ public static final String INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC = "indices.store.throttle.max_bytes_per_sec";
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ String rateLimitingType = settings.get(INDICES_STORE_THROTTLE_TYPE, IndicesStore.this.rateLimitingType);
+ // try and parse the type
+ StoreRateLimiting.Type.fromString(rateLimitingType);
+ if (!rateLimitingType.equals(IndicesStore.this.rateLimitingType)) {
+ logger.info("updating indices.store.throttle.type from [{}] to [{}]", IndicesStore.this.rateLimitingType, rateLimitingType);
+ IndicesStore.this.rateLimitingType = rateLimitingType;
+ IndicesStore.this.rateLimiting.setType(rateLimitingType);
+ }
+
+ ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, IndicesStore.this.rateLimitingThrottle);
+ if (!rateLimitingThrottle.equals(IndicesStore.this.rateLimitingThrottle)) {
+ logger.info("updating indices.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", IndicesStore.this.rateLimitingThrottle, rateLimitingThrottle, IndicesStore.this.rateLimitingType);
+ IndicesStore.this.rateLimitingThrottle = rateLimitingThrottle;
+ IndicesStore.this.rateLimiting.setMaxRate(rateLimitingThrottle);
+ }
+ }
+ }
+
+
+ private final NodeEnvironment nodeEnv;
+
+ private final NodeSettingsService nodeSettingsService;
+
+ private final IndicesService indicesService;
+
+ private final ClusterService clusterService;
+
+ private volatile String rateLimitingType;
+ private volatile ByteSizeValue rateLimitingThrottle;
+ private final StoreRateLimiting rateLimiting = new StoreRateLimiting();
+
+ private final ApplySettings applySettings = new ApplySettings();
+
+ @Inject
+ public IndicesStore(Settings settings, NodeEnvironment nodeEnv, NodeSettingsService nodeSettingsService, IndicesService indicesService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings);
+ this.nodeEnv = nodeEnv;
+ this.nodeSettingsService = nodeSettingsService;
+ this.indicesService = indicesService;
+ this.clusterService = clusterService;
+ // we limit with 20MB / sec by default with a default type set to merge sice 0.90.1
+ this.rateLimitingType = componentSettings.get("throttle.type", StoreRateLimiting.Type.MERGE.name());
+ rateLimiting.setType(rateLimitingType);
+ this.rateLimitingThrottle = componentSettings.getAsBytesSize("throttle.max_bytes_per_sec", new ByteSizeValue(20, ByteSizeUnit.MB));
+ rateLimiting.setMaxRate(rateLimitingThrottle);
+
+ logger.debug("using indices.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle);
+
+ nodeSettingsService.addListener(applySettings);
+ clusterService.addLast(this);
+ }
+
+ public StoreRateLimiting rateLimiting() {
+ return this.rateLimiting;
+ }
+
+ public void close() {
+ nodeSettingsService.removeListener(applySettings);
+ clusterService.remove(this);
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (!event.routingTableChanged()) {
+ return;
+ }
+
+ if (event.state().blocks().disableStatePersistence()) {
+ return;
+ }
+
+ for (IndexRoutingTable indexRoutingTable : event.state().routingTable()) {
+ // Note, closed indices will not have any routing information, so won't be deleted
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ ShardId shardId = indexShardRoutingTable.shardId();
+ // a shard can be deleted if all its copies are active, and its not allocated on this node
+ boolean shardCanBeDeleted = true;
+ if (indexShardRoutingTable.size() == 0) {
+ // should not really happen, there should always be at least 1 (primary) shard in a
+ // shard replication group, in any case, protected from deleting something by mistake
+ shardCanBeDeleted = false;
+ } else {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ // be conservative here, check on started, not even active
+ if (!shardRouting.started()) {
+ shardCanBeDeleted = false;
+ break;
+ }
+
+ // if the allocated or relocation node id doesn't exists in the cluster state, its a stale
+ // node, make sure we don't do anything with this until the routing table has properly been
+ // rerouted to reflect the fact that the node does not exists
+ if (!event.state().nodes().nodeExists(shardRouting.currentNodeId())) {
+ shardCanBeDeleted = false;
+ break;
+ }
+ if (shardRouting.relocatingNodeId() != null) {
+ if (!event.state().nodes().nodeExists(shardRouting.relocatingNodeId())) {
+ shardCanBeDeleted = false;
+ break;
+ }
+ }
+
+ // check if shard is active on the current node or is getting relocated to the our node
+ String localNodeId = clusterService.localNode().id();
+ if (localNodeId.equals(shardRouting.currentNodeId()) || localNodeId.equals(shardRouting.relocatingNodeId())) {
+ shardCanBeDeleted = false;
+ break;
+ }
+ }
+ }
+ if (shardCanBeDeleted) {
+ IndexService indexService = indicesService.indexService(indexRoutingTable.index());
+ if (indexService == null) {
+ // not physical allocation of the index, delete it from the file system if applicable
+ if (nodeEnv.hasNodeFile()) {
+ File[] shardLocations = nodeEnv.shardLocations(shardId);
+ if (FileSystemUtils.exists(shardLocations)) {
+ logger.debug("[{}][{}] deleting shard that is no longer used", shardId.index().name(), shardId.id());
+ FileSystemUtils.deleteRecursively(shardLocations);
+ }
+ }
+ } else {
+ if (!indexService.hasShard(shardId.id())) {
+ if (indexService.store().canDeleteUnallocated(shardId)) {
+ logger.debug("[{}][{}] deleting shard that is no longer used", shardId.index().name(), shardId.id());
+ try {
+ indexService.store().deleteUnallocated(indexShardRoutingTable.shardId());
+ } catch (Exception e) {
+ logger.debug("[{}][{}] failed to delete unallocated shard, ignoring", e, indexShardRoutingTable.shardId().index().name(), indexShardRoutingTable.shardId().id());
+ }
+ }
+ } else {
+ // this state is weird, should we log?
+ // basically, it means that the shard is not allocated on this node using the routing
+ // but its still physically exists on an IndexService
+ // Note, this listener should run after IndicesClusterStateService...
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java
new file mode 100644
index 0000000..2d160de
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java
@@ -0,0 +1,427 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.FailedNodeException;
+import org.elasticsearch.action.support.nodes.*;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+
+/**
+ *
+ */
+public class TransportNodesListShardStoreMetaData extends TransportNodesOperationAction<TransportNodesListShardStoreMetaData.Request, TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData, TransportNodesListShardStoreMetaData.NodeRequest, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> {
+
+ private final IndicesService indicesService;
+
+ private final NodeEnvironment nodeEnv;
+
+ @Inject
+ public TransportNodesListShardStoreMetaData(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
+ IndicesService indicesService, NodeEnvironment nodeEnv) {
+ super(settings, clusterName, threadPool, clusterService, transportService);
+ this.indicesService = indicesService;
+ this.nodeEnv = nodeEnv;
+ }
+
+ public ActionFuture<NodesStoreFilesMetaData> list(ShardId shardId, boolean onlyUnallocated, String[] nodesIds, @Nullable TimeValue timeout) {
+ return execute(new Request(shardId, onlyUnallocated, nodesIds).timeout(timeout));
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ protected String transportAction() {
+ return "/cluster/nodes/indices/shard/store";
+ }
+
+ @Override
+ protected Request newRequest() {
+ return new Request();
+ }
+
+ @Override
+ protected NodeRequest newNodeRequest() {
+ return new NodeRequest();
+ }
+
+ @Override
+ protected NodeRequest newNodeRequest(String nodeId, Request request) {
+ return new NodeRequest(nodeId, request);
+ }
+
+ @Override
+ protected NodeStoreFilesMetaData newNodeResponse() {
+ return new NodeStoreFilesMetaData();
+ }
+
+ @Override
+ protected NodesStoreFilesMetaData newResponse(Request request, AtomicReferenceArray responses) {
+ final List<NodeStoreFilesMetaData> nodeStoreFilesMetaDatas = Lists.newArrayList();
+ final List<FailedNodeException> failures = Lists.newArrayList();
+ for (int i = 0; i < responses.length(); i++) {
+ Object resp = responses.get(i);
+ if (resp instanceof NodeStoreFilesMetaData) { // will also filter out null response for unallocated ones
+ nodeStoreFilesMetaDatas.add((NodeStoreFilesMetaData) resp);
+ } else if (resp instanceof FailedNodeException) {
+ failures.add((FailedNodeException) resp);
+ }
+ }
+ return new NodesStoreFilesMetaData(clusterName, nodeStoreFilesMetaDatas.toArray(new NodeStoreFilesMetaData[nodeStoreFilesMetaDatas.size()]),
+ failures.toArray(new FailedNodeException[failures.size()]));
+ }
+
+ @Override
+ protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) throws ElasticsearchException {
+ if (request.unallocated) {
+ IndexService indexService = indicesService.indexService(request.shardId.index().name());
+ if (indexService == null) {
+ return new NodeStoreFilesMetaData(clusterService.localNode(), null);
+ }
+ if (!indexService.hasShard(request.shardId.id())) {
+ return new NodeStoreFilesMetaData(clusterService.localNode(), null);
+ }
+ }
+ IndexMetaData metaData = clusterService.state().metaData().index(request.shardId.index().name());
+ if (metaData == null) {
+ return new NodeStoreFilesMetaData(clusterService.localNode(), null);
+ }
+ try {
+ return new NodeStoreFilesMetaData(clusterService.localNode(), listStoreMetaData(request.shardId));
+ } catch (IOException e) {
+ throw new ElasticsearchException("Failed to list store metadata for shard [" + request.shardId + "]", e);
+ }
+ }
+
+ private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException {
+ IndexService indexService = indicesService.indexService(shardId.index().name());
+ if (indexService != null) {
+ InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId.id());
+ if (indexShard != null) {
+ return new StoreFilesMetaData(true, shardId, indexShard.store().list());
+ }
+ }
+ // try and see if we an list unallocated
+ IndexMetaData metaData = clusterService.state().metaData().index(shardId.index().name());
+ if (metaData == null) {
+ return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
+ }
+ String storeType = metaData.settings().get("index.store.type", "fs");
+ if (!storeType.contains("fs")) {
+ return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
+ }
+ File[] shardLocations = nodeEnv.shardLocations(shardId);
+ File[] shardIndexLocations = new File[shardLocations.length];
+ for (int i = 0; i < shardLocations.length; i++) {
+ shardIndexLocations[i] = new File(shardLocations[i], "index");
+ }
+ boolean exists = false;
+ for (File shardIndexLocation : shardIndexLocations) {
+ if (shardIndexLocation.exists()) {
+ exists = true;
+ break;
+ }
+ }
+ if (!exists) {
+ return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
+ }
+
+ Map<String, String> checksums = Store.readChecksums(shardIndexLocations);
+ if (checksums == null) {
+ checksums = ImmutableMap.of();
+ }
+
+ Map<String, StoreFileMetaData> files = Maps.newHashMap();
+ for (File shardIndexLocation : shardIndexLocations) {
+ File[] listedFiles = shardIndexLocation.listFiles();
+ if (listedFiles == null) {
+ continue;
+ }
+ for (File file : listedFiles) {
+ // BACKWARD CKS SUPPORT
+ if (file.getName().endsWith(".cks")) {
+ continue;
+ }
+ if (Store.isChecksum(file.getName())) {
+ continue;
+ }
+ files.put(file.getName(), new StoreFileMetaData(file.getName(), file.length(), checksums.get(file.getName())));
+ }
+ }
+
+ return new StoreFilesMetaData(false, shardId, files);
+ }
+
+ @Override
+ protected boolean accumulateExceptions() {
+ return true;
+ }
+
+ public static class StoreFilesMetaData implements Iterable<StoreFileMetaData>, Streamable {
+ private boolean allocated;
+ private ShardId shardId;
+ private Map<String, StoreFileMetaData> files;
+
+ StoreFilesMetaData() {
+ }
+
+ public StoreFilesMetaData(boolean allocated, ShardId shardId, Map<String, StoreFileMetaData> files) {
+ this.allocated = allocated;
+ this.shardId = shardId;
+ this.files = files;
+ }
+
+ public boolean allocated() {
+ return allocated;
+ }
+
+ public ShardId shardId() {
+ return this.shardId;
+ }
+
+ public long totalSizeInBytes() {
+ long totalSizeInBytes = 0;
+ for (StoreFileMetaData file : this) {
+ totalSizeInBytes += file.length();
+ }
+ return totalSizeInBytes;
+ }
+
+ @Override
+ public Iterator<StoreFileMetaData> iterator() {
+ return files.values().iterator();
+ }
+
+ public boolean fileExists(String name) {
+ return files.containsKey(name);
+ }
+
+ public StoreFileMetaData file(String name) {
+ return files.get(name);
+ }
+
+ public static StoreFilesMetaData readStoreFilesMetaData(StreamInput in) throws IOException {
+ StoreFilesMetaData md = new StoreFilesMetaData();
+ md.readFrom(in);
+ return md;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ allocated = in.readBoolean();
+ shardId = ShardId.readShardId(in);
+ int size = in.readVInt();
+ files = Maps.newHashMapWithExpectedSize(size);
+ for (int i = 0; i < size; i++) {
+ StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
+ files.put(md.name(), md);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeBoolean(allocated);
+ shardId.writeTo(out);
+ out.writeVInt(files.size());
+ for (StoreFileMetaData md : files.values()) {
+ md.writeTo(out);
+ }
+ }
+ }
+
+
+ static class Request extends NodesOperationRequest<Request> {
+
+ private ShardId shardId;
+
+ private boolean unallocated;
+
+ public Request() {
+ }
+
+ public Request(ShardId shardId, boolean unallocated, Set<String> nodesIds) {
+ super(nodesIds.toArray(new String[nodesIds.size()]));
+ this.shardId = shardId;
+ this.unallocated = unallocated;
+ }
+
+ public Request(ShardId shardId, boolean unallocated, String... nodesIds) {
+ super(nodesIds);
+ this.shardId = shardId;
+ this.unallocated = unallocated;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = ShardId.readShardId(in);
+ unallocated = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardId.writeTo(out);
+ out.writeBoolean(unallocated);
+ }
+ }
+
+ public static class NodesStoreFilesMetaData extends NodesOperationResponse<NodeStoreFilesMetaData> {
+
+ private FailedNodeException[] failures;
+
+ NodesStoreFilesMetaData() {
+ }
+
+ public NodesStoreFilesMetaData(ClusterName clusterName, NodeStoreFilesMetaData[] nodes, FailedNodeException[] failures) {
+ super(clusterName, nodes);
+ this.failures = failures;
+ }
+
+ public FailedNodeException[] failures() {
+ return failures;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ nodes = new NodeStoreFilesMetaData[in.readVInt()];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeStoreFilesMetaData.readListShardStoreNodeOperationResponse(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(nodes.length);
+ for (NodeStoreFilesMetaData response : nodes) {
+ response.writeTo(out);
+ }
+ }
+ }
+
+
+ static class NodeRequest extends NodeOperationRequest {
+
+ private ShardId shardId;
+
+ private boolean unallocated;
+
+ NodeRequest() {
+ }
+
+ NodeRequest(String nodeId, TransportNodesListShardStoreMetaData.Request request) {
+ super(request, nodeId);
+ this.shardId = request.shardId;
+ this.unallocated = request.unallocated;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = ShardId.readShardId(in);
+ unallocated = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardId.writeTo(out);
+ out.writeBoolean(unallocated);
+ }
+ }
+
+ public static class NodeStoreFilesMetaData extends NodeOperationResponse {
+
+ private StoreFilesMetaData storeFilesMetaData;
+
+ NodeStoreFilesMetaData() {
+ }
+
+ public NodeStoreFilesMetaData(DiscoveryNode node, StoreFilesMetaData storeFilesMetaData) {
+ super(node);
+ this.storeFilesMetaData = storeFilesMetaData;
+ }
+
+ public StoreFilesMetaData storeFilesMetaData() {
+ return storeFilesMetaData;
+ }
+
+ public static NodeStoreFilesMetaData readListShardStoreNodeOperationResponse(StreamInput in) throws IOException {
+ NodeStoreFilesMetaData resp = new NodeStoreFilesMetaData();
+ resp.readFrom(in);
+ return resp;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ if (in.readBoolean()) {
+ storeFilesMetaData = StoreFilesMetaData.readStoreFilesMetaData(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (storeFilesMetaData == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ storeFilesMetaData.writeTo(out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java
new file mode 100644
index 0000000..71820f8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java
@@ -0,0 +1,352 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.ttl;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.bulk.*;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fieldvisitor.UidAndRoutingFieldsVisitor;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+
+/**
+ * A node level service that delete expired docs on node primary shards.
+ */
+public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLService> {
+
+ public static final String INDICES_TTL_INTERVAL = "indices.ttl.interval";
+ public static final String INDEX_TTL_DISABLE_PURGE = "index.ttl.disable_purge";
+
+ private final ClusterService clusterService;
+ private final IndicesService indicesService;
+ private final TransportBulkAction bulkAction;
+
+ private final int bulkSize;
+ private PurgerThread purgerThread;
+
+ @Inject
+ public IndicesTTLService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeSettingsService nodeSettingsService, TransportBulkAction bulkAction) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.indicesService = indicesService;
+ TimeValue interval = componentSettings.getAsTime("interval", TimeValue.timeValueSeconds(60));
+ this.bulkAction = bulkAction;
+ this.bulkSize = componentSettings.getAsInt("bulk_size", 10000);
+ this.purgerThread = new PurgerThread(EsExecutors.threadName(settings, "[ttl_expire]"), interval);
+
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ this.purgerThread.start();
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ try {
+ this.purgerThread.shutdown();
+ } catch (InterruptedException e) {
+ Thread.interrupted();
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ private class PurgerThread extends Thread {
+ private final AtomicBoolean running = new AtomicBoolean(true);
+ private final Notifier notifier;
+ private final CountDownLatch shutdownLatch = new CountDownLatch(1);
+
+
+ public PurgerThread(String name, TimeValue interval) {
+ super(name);
+ setDaemon(true);
+ this.notifier = new Notifier(interval);
+ }
+
+ public void shutdown() throws InterruptedException {
+ if (running.compareAndSet(true, false)) {
+ notifier.doNotify();
+ shutdownLatch.await();
+ }
+
+ }
+
+ public void resetInterval(TimeValue interval) {
+ notifier.setTimeout(interval);
+ }
+
+ public void run() {
+ try {
+ while (running.get()) {
+ try {
+ List<IndexShard> shardsToPurge = getShardsToPurge();
+ purgeShards(shardsToPurge);
+ } catch (Throwable e) {
+ if (running.get()) {
+ logger.warn("failed to execute ttl purge", e);
+ }
+ }
+ if (running.get()) {
+ notifier.await();
+ }
+ }
+ } finally {
+ shutdownLatch.countDown();
+ }
+ }
+
+ /**
+ * Returns the shards to purge, i.e. the local started primary shards that have ttl enabled and disable_purge to false
+ */
+ private List<IndexShard> getShardsToPurge() {
+ List<IndexShard> shardsToPurge = new ArrayList<IndexShard>();
+ MetaData metaData = clusterService.state().metaData();
+ for (IndexService indexService : indicesService) {
+ // check the value of disable_purge for this index
+ IndexMetaData indexMetaData = metaData.index(indexService.index().name());
+ if (indexMetaData == null) {
+ continue;
+ }
+ boolean disablePurge = indexMetaData.settings().getAsBoolean(INDEX_TTL_DISABLE_PURGE, false);
+ if (disablePurge) {
+ continue;
+ }
+
+ // should be optimized with the hasTTL flag
+ FieldMappers ttlFieldMappers = indexService.mapperService().name(TTLFieldMapper.NAME);
+ if (ttlFieldMappers == null) {
+ continue;
+ }
+ // check if ttl is enabled for at least one type of this index
+ boolean hasTTLEnabled = false;
+ for (FieldMapper ttlFieldMapper : ttlFieldMappers) {
+ if (((TTLFieldMapper) ttlFieldMapper).enabled()) {
+ hasTTLEnabled = true;
+ break;
+ }
+ }
+ if (hasTTLEnabled) {
+ for (IndexShard indexShard : indexService) {
+ if (indexShard.state() == IndexShardState.STARTED && indexShard.routingEntry().primary() && indexShard.routingEntry().started()) {
+ shardsToPurge.add(indexShard);
+ }
+ }
+ }
+ }
+ return shardsToPurge;
+ }
+
+ public TimeValue getInterval() {
+ return notifier.getTimeout();
+ }
+ }
+
+ private void purgeShards(List<IndexShard> shardsToPurge) {
+ for (IndexShard shardToPurge : shardsToPurge) {
+ Query query = NumericRangeQuery.newLongRange(TTLFieldMapper.NAME, null, System.currentTimeMillis(), false, true);
+ Engine.Searcher searcher = shardToPurge.acquireSearcher("indices_ttl");
+ try {
+ logger.debug("[{}][{}] purging shard", shardToPurge.routingEntry().index(), shardToPurge.routingEntry().id());
+ ExpiredDocsCollector expiredDocsCollector = new ExpiredDocsCollector();
+ searcher.searcher().search(query, expiredDocsCollector);
+ List<DocToPurge> docsToPurge = expiredDocsCollector.getDocsToPurge();
+
+ BulkRequest bulkRequest = new BulkRequest();
+ for (DocToPurge docToPurge : docsToPurge) {
+
+ bulkRequest.add(new DeleteRequest().index(shardToPurge.routingEntry().index()).type(docToPurge.type).id(docToPurge.id).version(docToPurge.version).routing(docToPurge.routing));
+ bulkRequest = processBulkIfNeeded(bulkRequest, false);
+ }
+ processBulkIfNeeded(bulkRequest, true);
+ } catch (Exception e) {
+ logger.warn("failed to purge", e);
+ } finally {
+ searcher.release();
+ }
+ }
+ }
+
+ private static class DocToPurge {
+ public final String type;
+ public final String id;
+ public final long version;
+ public final String routing;
+
+ public DocToPurge(String type, String id, long version, String routing) {
+ this.type = type;
+ this.id = id;
+ this.version = version;
+ this.routing = routing;
+ }
+ }
+
+ private class ExpiredDocsCollector extends Collector {
+ private AtomicReaderContext context;
+ private List<DocToPurge> docsToPurge = new ArrayList<DocToPurge>();
+
+ public ExpiredDocsCollector() {
+ }
+
+ public void setScorer(Scorer scorer) {
+ }
+
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ public void collect(int doc) {
+ try {
+ UidAndRoutingFieldsVisitor fieldsVisitor = new UidAndRoutingFieldsVisitor();
+ context.reader().document(doc, fieldsVisitor);
+ Uid uid = fieldsVisitor.uid();
+ final long version = Versions.loadVersion(context.reader(), new Term(UidFieldMapper.NAME, uid.toBytesRef()));
+ docsToPurge.add(new DocToPurge(uid.type(), uid.id(), version, fieldsVisitor.routing()));
+ } catch (Exception e) {
+ logger.trace("failed to collect doc", e);
+ }
+ }
+
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ this.context = context;
+ }
+
+ public List<DocToPurge> getDocsToPurge() {
+ return this.docsToPurge;
+ }
+ }
+
+ private BulkRequest processBulkIfNeeded(BulkRequest bulkRequest, boolean force) {
+ if ((force && bulkRequest.numberOfActions() > 0) || bulkRequest.numberOfActions() >= bulkSize) {
+ try {
+ bulkAction.executeBulk(bulkRequest, new ActionListener<BulkResponse>() {
+ @Override
+ public void onResponse(BulkResponse bulkResponse) {
+ logger.trace("bulk took " + bulkResponse.getTookInMillis() + "ms");
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.warn("failed to execute bulk");
+ }
+ });
+ } catch (Exception e) {
+ logger.warn("failed to process bulk", e);
+ }
+ bulkRequest = new BulkRequest();
+ }
+ return bulkRequest;
+ }
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ final TimeValue currentInterval = IndicesTTLService.this.purgerThread.getInterval();
+ final TimeValue interval = settings.getAsTime(INDICES_TTL_INTERVAL, currentInterval);
+ if (!interval.equals(currentInterval)) {
+ logger.info("updating indices.ttl.interval from [{}] to [{}]",currentInterval, interval);
+ IndicesTTLService.this.purgerThread.resetInterval(interval);
+
+ }
+ }
+ }
+
+
+ private static final class Notifier {
+
+ private final ReentrantLock lock = new ReentrantLock();
+ private final Condition condition = lock.newCondition();
+ private volatile TimeValue timeout;
+
+ public Notifier(TimeValue timeout) {
+ assert timeout != null;
+ this.timeout = timeout;
+ }
+
+ public void await() {
+ lock.lock();
+ try {
+ condition.await(timeout.millis(), TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ Thread.interrupted();
+ } finally {
+ lock.unlock();
+ }
+
+ }
+
+ public void setTimeout(TimeValue timeout) {
+ assert timeout != null;
+ this.timeout = timeout;
+ doNotify();
+ }
+
+ public TimeValue getTimeout() {
+ return timeout;
+ }
+
+ public void doNotify() {
+ lock.lock();
+ try {
+ condition.signalAll();
+ } finally {
+ lock.unlock();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/indices/warmer/IndicesWarmer.java b/src/main/java/org/elasticsearch/indices/warmer/IndicesWarmer.java
new file mode 100644
index 0000000..67a6c82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/warmer/IndicesWarmer.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.warmer;
+
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.threadpool.ThreadPool;
+
+/**
+ */
+public interface IndicesWarmer {
+
+ public abstract class Listener {
+
+ public String executor() {
+ return ThreadPool.Names.WARMER;
+ }
+
+ /** A handle on the execution of warm-up action. */
+ public static interface TerminationHandle {
+
+ public static TerminationHandle NO_WAIT = new TerminationHandle() {
+ @Override
+ public void awaitTermination() {}
+ };
+
+ /** Wait until execution of the warm-up action completes. */
+ void awaitTermination() throws InterruptedException;
+ }
+
+ /** Queue tasks to warm-up the given segments and return handles that allow to wait for termination of the execution of those tasks. */
+ public abstract TerminationHandle warm(IndexShard indexShard, IndexMetaData indexMetaData, WarmerContext context, ThreadPool threadPool);
+ }
+
+ public static class WarmerContext {
+
+ private final ShardId shardId;
+
+ private final Engine.Searcher newSearcher;
+
+ public WarmerContext(ShardId shardId, Engine.Searcher newSearcher) {
+ this.shardId = shardId;
+ this.newSearcher = newSearcher;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ /** Return a searcher instance that only wraps the segments to warm. */
+ public Engine.Searcher newSearcher() {
+ return newSearcher;
+ }
+ }
+
+ void addListener(Listener listener);
+
+ void removeListener(Listener listener);
+}
diff --git a/src/main/java/org/elasticsearch/indices/warmer/InternalIndicesWarmer.java b/src/main/java/org/elasticsearch/indices/warmer/InternalIndicesWarmer.java
new file mode 100644
index 0000000..e3be26c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/indices/warmer/InternalIndicesWarmer.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.warmer;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class InternalIndicesWarmer extends AbstractComponent implements IndicesWarmer {
+
+ public static final String INDEX_WARMER_ENABLED = "index.warmer.enabled";
+
+ private final ThreadPool threadPool;
+
+ private final ClusterService clusterService;
+
+ private final IndicesService indicesService;
+
+ private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<Listener>();
+
+ @Inject
+ public InternalIndicesWarmer(Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.indicesService = indicesService;
+ }
+
+ @Override
+ public void addListener(Listener listener) {
+ listeners.add(listener);
+ }
+
+ @Override
+ public void removeListener(Listener listener) {
+ listeners.remove(listener);
+ }
+
+ public void warm(final WarmerContext context) {
+ final IndexMetaData indexMetaData = clusterService.state().metaData().index(context.shardId().index().name());
+ if (indexMetaData == null) {
+ return;
+ }
+ if (!indexMetaData.settings().getAsBoolean(INDEX_WARMER_ENABLED, settings.getAsBoolean(INDEX_WARMER_ENABLED, true))) {
+ return;
+ }
+ IndexService indexService = indicesService.indexService(context.shardId().index().name());
+ if (indexService == null) {
+ return;
+ }
+ final IndexShard indexShard = indexService.shard(context.shardId().id());
+ if (indexShard == null) {
+ return;
+ }
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}][{}] warming [{}]", context.shardId().index().name(), context.shardId().id(), context.newSearcher().reader());
+ }
+ indexShard.warmerService().onPreWarm();
+ long time = System.nanoTime();
+ final List<IndicesWarmer.Listener.TerminationHandle> terminationHandles = Lists.newArrayList();
+ // get a handle on pending tasks
+ for (final Listener listener : listeners) {
+ terminationHandles.add(listener.warm(indexShard, indexMetaData, context, threadPool));
+ }
+ // wait for termination
+ for (IndicesWarmer.Listener.TerminationHandle terminationHandle : terminationHandles) {
+ try {
+ terminationHandle.awaitTermination();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ logger.warn("Warming has been interrupted", e);
+ break;
+ }
+ }
+ long took = System.nanoTime() - time;
+ indexShard.warmerService().onPostWarm(took);
+ if (indexShard.warmerService().logger().isTraceEnabled()) {
+ indexShard.warmerService().logger().trace("warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/MonitorModule.java b/src/main/java/org/elasticsearch/monitor/MonitorModule.java
new file mode 100644
index 0000000..bd573a3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/MonitorModule.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Scopes;
+import org.elasticsearch.common.inject.assistedinject.FactoryProvider;
+import org.elasticsearch.common.inject.multibindings.MapBinder;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.dump.DumpContributorFactory;
+import org.elasticsearch.monitor.dump.DumpMonitorService;
+import org.elasticsearch.monitor.dump.cluster.ClusterDumpContributor;
+import org.elasticsearch.monitor.dump.heap.HeapDumpContributor;
+import org.elasticsearch.monitor.dump.summary.SummaryDumpContributor;
+import org.elasticsearch.monitor.dump.thread.ThreadDumpContributor;
+import org.elasticsearch.monitor.fs.FsProbe;
+import org.elasticsearch.monitor.fs.FsService;
+import org.elasticsearch.monitor.fs.JmxFsProbe;
+import org.elasticsearch.monitor.fs.SigarFsProbe;
+import org.elasticsearch.monitor.jvm.JvmMonitorService;
+import org.elasticsearch.monitor.jvm.JvmService;
+import org.elasticsearch.monitor.network.JmxNetworkProbe;
+import org.elasticsearch.monitor.network.NetworkProbe;
+import org.elasticsearch.monitor.network.NetworkService;
+import org.elasticsearch.monitor.network.SigarNetworkProbe;
+import org.elasticsearch.monitor.os.JmxOsProbe;
+import org.elasticsearch.monitor.os.OsProbe;
+import org.elasticsearch.monitor.os.OsService;
+import org.elasticsearch.monitor.os.SigarOsProbe;
+import org.elasticsearch.monitor.process.JmxProcessProbe;
+import org.elasticsearch.monitor.process.ProcessProbe;
+import org.elasticsearch.monitor.process.ProcessService;
+import org.elasticsearch.monitor.process.SigarProcessProbe;
+import org.elasticsearch.monitor.sigar.SigarService;
+
+import java.util.Map;
+
+import static org.elasticsearch.monitor.dump.cluster.ClusterDumpContributor.CLUSTER;
+import static org.elasticsearch.monitor.dump.heap.HeapDumpContributor.HEAP_DUMP;
+import static org.elasticsearch.monitor.dump.summary.SummaryDumpContributor.SUMMARY;
+import static org.elasticsearch.monitor.dump.thread.ThreadDumpContributor.THREAD_DUMP;
+
+/**
+ *
+ */
+public class MonitorModule extends AbstractModule {
+
+ public static final class MonitorSettings {
+ public static final String MEMORY_MANAGER_TYPE = "monitor.memory.type";
+ }
+
+ private final Settings settings;
+
+ public MonitorModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ boolean sigarLoaded = false;
+ try {
+ settings.getClassLoader().loadClass("org.hyperic.sigar.Sigar");
+ SigarService sigarService = new SigarService(settings);
+ if (sigarService.sigarAvailable()) {
+ bind(SigarService.class).toInstance(sigarService);
+ bind(ProcessProbe.class).to(SigarProcessProbe.class).asEagerSingleton();
+ bind(OsProbe.class).to(SigarOsProbe.class).asEagerSingleton();
+ bind(NetworkProbe.class).to(SigarNetworkProbe.class).asEagerSingleton();
+ bind(FsProbe.class).to(SigarFsProbe.class).asEagerSingleton();
+ sigarLoaded = true;
+ }
+ } catch (Throwable e) {
+ // no sigar
+ Loggers.getLogger(SigarService.class).trace("failed to load sigar", e);
+ }
+ if (!sigarLoaded) {
+ // bind non sigar implementations
+ bind(ProcessProbe.class).to(JmxProcessProbe.class).asEagerSingleton();
+ bind(OsProbe.class).to(JmxOsProbe.class).asEagerSingleton();
+ bind(NetworkProbe.class).to(JmxNetworkProbe.class).asEagerSingleton();
+ bind(FsProbe.class).to(JmxFsProbe.class).asEagerSingleton();
+ }
+ // bind other services
+ bind(ProcessService.class).asEagerSingleton();
+ bind(OsService.class).asEagerSingleton();
+ bind(NetworkService.class).asEagerSingleton();
+ bind(JvmService.class).asEagerSingleton();
+ bind(FsService.class).asEagerSingleton();
+
+ bind(JvmMonitorService.class).asEagerSingleton();
+
+ MapBinder<String, DumpContributorFactory> tokenFilterBinder
+ = MapBinder.newMapBinder(binder(), String.class, DumpContributorFactory.class);
+
+ Map<String, Settings> dumpContSettings = settings.getGroups("monitor.dump");
+ for (Map.Entry<String, Settings> entry : dumpContSettings.entrySet()) {
+ String dumpContributorName = entry.getKey();
+ Settings dumpContributorSettings = entry.getValue();
+
+ Class<? extends DumpContributorFactory> type = dumpContributorSettings.getAsClass("type", null, "org.elasticsearch.monitor.dump." + dumpContributorName + ".", "DumpContributor");
+ if (type == null) {
+ throw new IllegalArgumentException("Dump Contributor [" + dumpContributorName + "] must have a type associated with it");
+ }
+ tokenFilterBinder.addBinding(dumpContributorName).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, type)).in(Scopes.SINGLETON);
+ }
+ // add default
+ if (!dumpContSettings.containsKey(SUMMARY)) {
+ tokenFilterBinder.addBinding(SUMMARY).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, SummaryDumpContributor.class)).in(Scopes.SINGLETON);
+ }
+ if (!dumpContSettings.containsKey(THREAD_DUMP)) {
+ tokenFilterBinder.addBinding(THREAD_DUMP).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, ThreadDumpContributor.class)).in(Scopes.SINGLETON);
+ }
+ if (!dumpContSettings.containsKey(HEAP_DUMP)) {
+ tokenFilterBinder.addBinding(HEAP_DUMP).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, HeapDumpContributor.class)).in(Scopes.SINGLETON);
+ }
+ if (!dumpContSettings.containsKey(CLUSTER)) {
+ tokenFilterBinder.addBinding(CLUSTER).toProvider(FactoryProvider.newFactory(DumpContributorFactory.class, ClusterDumpContributor.class)).in(Scopes.SINGLETON);
+ }
+
+
+ bind(DumpMonitorService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/MonitorService.java b/src/main/java/org/elasticsearch/monitor/MonitorService.java
new file mode 100644
index 0000000..96d129b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/MonitorService.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.fs.FsService;
+import org.elasticsearch.monitor.jvm.JvmMonitorService;
+import org.elasticsearch.monitor.jvm.JvmService;
+import org.elasticsearch.monitor.network.NetworkService;
+import org.elasticsearch.monitor.os.OsService;
+import org.elasticsearch.monitor.process.ProcessService;
+
+/**
+ *
+ */
+public class MonitorService extends AbstractLifecycleComponent<MonitorService> {
+
+ private final JvmMonitorService jvmMonitorService;
+
+ private final OsService osService;
+
+ private final ProcessService processService;
+
+ private final JvmService jvmService;
+
+ private final NetworkService networkService;
+
+ private final FsService fsService;
+
+ @Inject
+ public MonitorService(Settings settings, JvmMonitorService jvmMonitorService,
+ OsService osService, ProcessService processService, JvmService jvmService, NetworkService networkService,
+ FsService fsService) {
+ super(settings);
+ this.jvmMonitorService = jvmMonitorService;
+ this.osService = osService;
+ this.processService = processService;
+ this.jvmService = jvmService;
+ this.networkService = networkService;
+ this.fsService = fsService;
+ }
+
+ public OsService osService() {
+ return this.osService;
+ }
+
+ public ProcessService processService() {
+ return this.processService;
+ }
+
+ public JvmService jvmService() {
+ return this.jvmService;
+ }
+
+ public NetworkService networkService() {
+ return this.networkService;
+ }
+
+ public FsService fsService() {
+ return this.fsService;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ jvmMonitorService.start();
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ jvmMonitorService.stop();
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ jvmMonitorService.close();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/AbstractDump.java b/src/main/java/org/elasticsearch/monitor/dump/AbstractDump.java
new file mode 100644
index 0000000..8a78311
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/AbstractDump.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.Streams;
+
+import java.io.*;
+import java.util.ArrayList;
+import java.util.Map;
+
+/**
+ *
+ */
+public abstract class AbstractDump implements Dump {
+
+ private final long timestamp;
+
+ private final String cause;
+
+ private final Map<String, Object> context;
+
+ private final ArrayList<File> files = new ArrayList<File>();
+
+ protected AbstractDump(long timestamp, String cause, @Nullable Map<String, Object> context) {
+ this.timestamp = timestamp;
+ this.cause = cause;
+ if (context == null) {
+ context = ImmutableMap.of();
+ }
+ this.context = context;
+ }
+
+ @Override
+ public long timestamp() {
+ return timestamp;
+ }
+
+ @Override
+ public Map<String, Object> context() {
+ return this.context;
+ }
+
+ @Override
+ public String cause() {
+ return cause;
+ }
+
+ @Override
+ public File[] files() {
+ return files.toArray(new File[files.size()]);
+ }
+
+ @Override
+ public File createFile(String name) throws DumpException {
+ File file = doCreateFile(name);
+ files.add(file);
+ return file;
+ }
+
+ protected abstract File doCreateFile(String name) throws DumpException;
+
+ @Override
+ public OutputStream createFileOutputStream(String name) throws DumpException {
+ try {
+ return new FileOutputStream(createFile(name));
+ } catch (FileNotFoundException e) {
+ throw new DumpException("Failed to create file [" + name + "]", e);
+ }
+ }
+
+ @Override
+ public Writer createFileWriter(String name) throws DumpException {
+ return new OutputStreamWriter(createFileOutputStream(name), Charsets.UTF_8);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/Dump.java b/src/main/java/org/elasticsearch/monitor/dump/Dump.java
new file mode 100644
index 0000000..dfbb494
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/Dump.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+import java.io.File;
+import java.io.OutputStream;
+import java.io.Writer;
+import java.util.Map;
+
+/**
+ *
+ */
+public interface Dump {
+
+ long timestamp();
+
+ Map<String, Object> context();
+
+ String cause();
+
+ File createFile(String name) throws DumpException;
+
+ Writer createFileWriter(String name) throws DumpException;
+
+ OutputStream createFileOutputStream(String name) throws DumpException;
+
+ File[] files();
+
+ void finish() throws DumpException;
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/DumpContributionFailedException.java b/src/main/java/org/elasticsearch/monitor/dump/DumpContributionFailedException.java
new file mode 100644
index 0000000..c5493d6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/DumpContributionFailedException.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+/**
+ *
+ */
+public class DumpContributionFailedException extends DumpException {
+
+ private final String name;
+
+ public DumpContributionFailedException(String name, String msg) {
+ this(name, msg, null);
+ }
+
+ public DumpContributionFailedException(String name, String msg, Throwable cause) {
+ super(name + ": " + msg, cause);
+ this.name = name;
+ }
+
+ public String name() {
+ return this.name;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/DumpContributor.java b/src/main/java/org/elasticsearch/monitor/dump/DumpContributor.java
new file mode 100644
index 0000000..8a23c62
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/DumpContributor.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+/**
+ *
+ */
+public interface DumpContributor {
+
+ String getName();
+
+ void contribute(Dump dump) throws DumpContributionFailedException;
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/DumpContributorFactory.java b/src/main/java/org/elasticsearch/monitor/dump/DumpContributorFactory.java
new file mode 100644
index 0000000..f6d8ac8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/DumpContributorFactory.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public interface DumpContributorFactory {
+
+ DumpContributor create(String name, Settings settings);
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/DumpException.java b/src/main/java/org/elasticsearch/monitor/dump/DumpException.java
new file mode 100644
index 0000000..166e169
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/DumpException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class DumpException extends ElasticsearchException {
+
+ public DumpException(String msg) {
+ super(msg);
+ }
+
+ public DumpException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/DumpGenerationFailedException.java b/src/main/java/org/elasticsearch/monitor/dump/DumpGenerationFailedException.java
new file mode 100644
index 0000000..952fd9c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/DumpGenerationFailedException.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+/**
+ *
+ */
+public class DumpGenerationFailedException extends DumpException {
+
+ public DumpGenerationFailedException(String msg) {
+ super(msg);
+ }
+
+ public DumpGenerationFailedException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/DumpGenerator.java b/src/main/java/org/elasticsearch/monitor/dump/DumpGenerator.java
new file mode 100644
index 0000000..1f65b81
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/DumpGenerator.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+
+import org.elasticsearch.common.Nullable;
+
+import java.io.File;
+import java.util.Map;
+
+/**
+ *
+ */
+public interface DumpGenerator {
+
+ Result generateDump(String cause, @Nullable Map<String, Object> context) throws DumpGenerationFailedException;
+
+ Result generateDump(String cause, @Nullable Map<String, Object> context, String... contributors) throws DumpGenerationFailedException;
+
+ static class Result {
+ private final File location;
+ private Iterable<DumpContributionFailedException> failedContributors;
+
+ public Result(File location, Iterable<DumpContributionFailedException> failedContributors) {
+ this.location = location;
+ this.failedContributors = failedContributors;
+ }
+
+ public String location() {
+ return location.toString();
+ }
+
+ public Iterable<DumpContributionFailedException> failedContributors() {
+ return failedContributors;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/DumpMonitorService.java b/src/main/java/org/elasticsearch/monitor/dump/DumpMonitorService.java
new file mode 100644
index 0000000..dd23158
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/DumpMonitorService.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.monitor.dump.heap.HeapDumpContributor;
+import org.elasticsearch.monitor.dump.summary.SummaryDumpContributor;
+import org.elasticsearch.monitor.dump.thread.ThreadDumpContributor;
+
+import java.io.File;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.monitor.dump.heap.HeapDumpContributor.HEAP_DUMP;
+import static org.elasticsearch.monitor.dump.summary.SummaryDumpContributor.SUMMARY;
+import static org.elasticsearch.monitor.dump.thread.ThreadDumpContributor.THREAD_DUMP;
+
+/**
+ *
+ */
+public class DumpMonitorService extends AbstractComponent {
+
+ private final String dumpLocation;
+
+ private final DumpGenerator generator;
+
+ private final ClusterService clusterService;
+ private final Map<String, Settings> contSettings;
+ private final Map<String, DumpContributorFactory> contributors;
+ private final File workFile;
+
+ public DumpMonitorService() {
+ this(EMPTY_SETTINGS, new Environment(EMPTY_SETTINGS), null, null);
+ }
+
+ @Inject
+ public DumpMonitorService(Settings settings, Environment environment,
+ @Nullable ClusterService clusterService, @Nullable Map<String, DumpContributorFactory> contributors) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.contributors = contributors;
+ contSettings = settings.getGroups("monitor.dump");
+ workFile = environment.workWithClusterFile();
+
+ this.dumpLocation = settings.get("dump_location");
+
+ File dumpLocationFile;
+ if (dumpLocation != null) {
+ dumpLocationFile = new File(dumpLocation);
+ } else {
+ dumpLocationFile = new File(workFile, "dump");
+ }
+
+ Map<String, DumpContributor> contributorMap = newHashMap();
+ if (contributors != null) {
+ for (Map.Entry<String, DumpContributorFactory> entry : contributors.entrySet()) {
+ String contName = entry.getKey();
+ DumpContributorFactory dumpContributorFactory = entry.getValue();
+
+ Settings analyzerSettings = contSettings.get(contName);
+ if (analyzerSettings == null) {
+ analyzerSettings = EMPTY_SETTINGS;
+ }
+
+ DumpContributor analyzerFactory = dumpContributorFactory.create(contName, analyzerSettings);
+ contributorMap.put(contName, analyzerFactory);
+ }
+ }
+ if (!contributorMap.containsKey(SUMMARY)) {
+ contributorMap.put(SUMMARY, new SummaryDumpContributor(SUMMARY, EMPTY_SETTINGS));
+ }
+ if (!contributorMap.containsKey(HEAP_DUMP)) {
+ contributorMap.put(HEAP_DUMP, new HeapDumpContributor(HEAP_DUMP, EMPTY_SETTINGS));
+ }
+ if (!contributorMap.containsKey(THREAD_DUMP)) {
+ contributorMap.put(THREAD_DUMP, new ThreadDumpContributor(THREAD_DUMP, EMPTY_SETTINGS));
+ }
+ generator = new SimpleDumpGenerator(dumpLocationFile, contributorMap);
+ }
+
+ public DumpGenerator.Result generateDump(String cause, @Nullable Map<String, Object> context) throws DumpGenerationFailedException {
+ return generator.generateDump(cause, fillContextMap(context));
+ }
+
+ public DumpGenerator.Result generateDump(String cause, @Nullable Map<String, Object> context, String... contributors) throws DumpGenerationFailedException {
+ return generator.generateDump(cause, fillContextMap(context), contributors);
+ }
+
+ private Map<String, Object> fillContextMap(Map<String, Object> context) {
+ if (context == null) {
+ context = newHashMap();
+ }
+ if (clusterService != null) {
+ context.put("localNode", clusterService.localNode());
+ }
+ return context;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/SimpleDump.java b/src/main/java/org/elasticsearch/monitor/dump/SimpleDump.java
new file mode 100644
index 0000000..0f48e25
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/SimpleDump.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+import org.elasticsearch.common.Nullable;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class SimpleDump extends AbstractDump {
+
+ private final File location;
+
+ public SimpleDump(long timestamp, String cause, @Nullable Map<String, Object> context, File location) throws FileNotFoundException {
+ super(timestamp, cause, context);
+ this.location = location;
+ }
+
+ @Override
+ protected File doCreateFile(String name) throws DumpException {
+ return new File(location, name);
+ }
+
+ @Override
+ public void finish() throws DumpException {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/SimpleDumpGenerator.java b/src/main/java/org/elasticsearch/monitor/dump/SimpleDumpGenerator.java
new file mode 100644
index 0000000..8b09ede
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/SimpleDumpGenerator.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.FileSystemUtils;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.util.ArrayList;
+import java.util.Map;
+
+/**
+ *
+ */
+public class SimpleDumpGenerator implements DumpGenerator {
+
+ private final File dumpLocation;
+
+ private final ImmutableMap<String, DumpContributor> contributors;
+
+ public SimpleDumpGenerator(File dumpLocation, Map<String, DumpContributor> contributors) {
+ this.dumpLocation = dumpLocation;
+ this.contributors = ImmutableMap.copyOf(contributors);
+ }
+
+ public Result generateDump(String cause, @Nullable Map<String, Object> context) throws DumpGenerationFailedException {
+ return generateDump(cause, context, contributors.keySet().toArray(new String[contributors.size()]));
+ }
+
+ public Result generateDump(String cause, @Nullable Map<String, Object> context, String... contributors) throws DumpGenerationFailedException {
+ long timestamp = System.currentTimeMillis();
+ String fileName = "";
+ if (context.containsKey("localNode")) {
+ DiscoveryNode localNode = (DiscoveryNode) context.get("localNode");
+ if (localNode.name() != null) {
+ fileName += localNode.name() + "-";
+ }
+ fileName += localNode.id() + "-";
+ }
+ File file = new File(dumpLocation, fileName + cause + "-" + timestamp);
+ FileSystemUtils.mkdirs(file);
+ SimpleDump dump;
+ try {
+ dump = new SimpleDump(System.currentTimeMillis(), cause, context, file);
+ } catch (FileNotFoundException e) {
+ throw new DumpGenerationFailedException("Failed to generate dump", e);
+ }
+ ArrayList<DumpContributionFailedException> failedContributors = new ArrayList<DumpContributionFailedException>();
+ for (String name : contributors) {
+ DumpContributor contributor = this.contributors.get(name);
+ if (contributor == null) {
+ failedContributors.add(new DumpContributionFailedException(name, "No contributor"));
+ continue;
+ }
+ try {
+ contributor.contribute(dump);
+ } catch (DumpContributionFailedException e) {
+ failedContributors.add(e);
+ } catch (Exception e) {
+ failedContributors.add(new DumpContributionFailedException(contributor.getName(), "Failed", e));
+ }
+ }
+ dump.finish();
+ return new Result(file, failedContributors);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/cluster/ClusterDumpContributor.java b/src/main/java/org/elasticsearch/monitor/dump/cluster/ClusterDumpContributor.java
new file mode 100644
index 0000000..86c3963
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/cluster/ClusterDumpContributor.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump.cluster;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.dump.Dump;
+import org.elasticsearch.monitor.dump.DumpContributionFailedException;
+import org.elasticsearch.monitor.dump.DumpContributor;
+
+import java.io.PrintWriter;
+
+/**
+ *
+ */
+public class ClusterDumpContributor implements DumpContributor {
+
+ public static final String CLUSTER = "cluster";
+
+ private final String name;
+
+ private final ClusterService clusterService;
+
+ @Inject
+ public ClusterDumpContributor(ClusterService clusterService, @Assisted String name, @Assisted Settings settings) {
+ this.clusterService = clusterService;
+ this.name = name;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public void contribute(Dump dump) throws DumpContributionFailedException {
+ ClusterState clusterState = clusterService.state();
+ DiscoveryNodes nodes = clusterState.nodes();
+ RoutingTable routingTable = clusterState.routingTable();
+
+ PrintWriter writer = new PrintWriter(dump.createFileWriter("cluster.txt"));
+
+ writer.println("===== CLUSTER NODES ======");
+ writer.print(nodes.prettyPrint());
+
+ writer.println("===== ROUTING TABLE ======");
+ writer.print(routingTable.prettyPrint());
+
+ writer.close();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/heap/HeapDumpContributor.java b/src/main/java/org/elasticsearch/monitor/dump/heap/HeapDumpContributor.java
new file mode 100644
index 0000000..4344d44
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/heap/HeapDumpContributor.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump.heap;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.dump.Dump;
+import org.elasticsearch.monitor.dump.DumpContributionFailedException;
+import org.elasticsearch.monitor.dump.DumpContributor;
+
+import java.lang.reflect.Method;
+
+/**
+ *
+ */
+public class HeapDumpContributor implements DumpContributor {
+
+ public static final String HEAP_DUMP = "heap";
+
+ private final Method heapDumpMethod;
+ private final Object diagnosticMBean;
+
+ private final String name;
+
+ @Inject
+ public HeapDumpContributor(@Assisted String name, @Assisted Settings settings) {
+ this.name = name;
+ Method heapDumpMethod;
+ Object diagnosticMBean;
+ try {
+ Class managementFactoryClass = Class.forName("sun.management.ManagementFactory", true, HeapDumpContributor.class.getClassLoader());
+ Method method = managementFactoryClass.getMethod("getDiagnosticMXBean");
+ diagnosticMBean = method.invoke(null);
+ heapDumpMethod = diagnosticMBean.getClass().getMethod("dumpHeap", String.class, boolean.class);
+ } catch (Exception _ex) {
+ heapDumpMethod = null;
+ diagnosticMBean = null;
+ }
+ this.heapDumpMethod = heapDumpMethod;
+ this.diagnosticMBean = diagnosticMBean;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public void contribute(Dump dump) throws DumpContributionFailedException {
+ if (heapDumpMethod == null) {
+ throw new DumpContributionFailedException(getName(), "Heap dump not enabled on this JVM");
+ }
+ try {
+ heapDumpMethod.invoke(diagnosticMBean, dump.createFile("heap.hprof").getAbsolutePath(), true);
+ } catch (Exception e) {
+ throw new DumpContributionFailedException(getName(), "Failed to generate heap dump", e);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/summary/SummaryDumpContributor.java b/src/main/java/org/elasticsearch/monitor/dump/summary/SummaryDumpContributor.java
new file mode 100644
index 0000000..3bfc738
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/summary/SummaryDumpContributor.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump.summary;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.dump.Dump;
+import org.elasticsearch.monitor.dump.DumpContributionFailedException;
+import org.elasticsearch.monitor.dump.DumpContributor;
+
+import java.io.PrintWriter;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Collection;
+import java.util.Date;
+import java.util.Locale;
+
+/**
+ *
+ */
+public class SummaryDumpContributor implements DumpContributor {
+
+ private final DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS", Locale.ROOT);
+ private final Object formatterLock = new Object();
+
+ public static final String SUMMARY = "summary";
+
+ private final String name;
+
+ @Inject
+ public SummaryDumpContributor(@Assisted String name, @Assisted Settings settings) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void contribute(Dump dump) throws DumpContributionFailedException {
+ PrintWriter writer = new PrintWriter(dump.createFileWriter("summary.txt"));
+ try {
+ processHeader(writer, dump.timestamp());
+ processCause(writer, dump.cause());
+ processThrowables(writer, dump);
+ } catch (Exception e) {
+ throw new DumpContributionFailedException(getName(), "Failed to generate", e);
+ } finally {
+ try {
+ writer.close();
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ }
+
+ private void processHeader(PrintWriter writer, long timestamp) {
+ synchronized (formatterLock) {
+ writer.println("===== TIME =====");
+ writer.println(dateFormat.format(new Date(timestamp)));
+ writer.println();
+ }
+ }
+
+ private void processCause(PrintWriter writer, String cause) {
+ writer.println("===== CAUSE =====");
+ writer.println(cause);
+ writer.println();
+ }
+
+ private void processThrowables(PrintWriter writer, Dump dump) {
+ writer.println("===== EXCEPTIONS =====");
+ Object throwables = dump.context().get("throwables");
+ if (throwables == null) {
+ return;
+ }
+ if (throwables instanceof Throwable[]) {
+ Throwable[] array = (Throwable[]) throwables;
+ for (Throwable t : array) {
+ writer.println();
+ writer.println("---- Exception ----");
+ t.printStackTrace(writer);
+ }
+ } else if (throwables instanceof Collection) {
+ Collection collection = (Collection) throwables;
+ for (Object o : collection) {
+ Throwable t = (Throwable) o;
+ writer.println();
+ writer.println("---- Exception ----");
+ t.printStackTrace(writer);
+ }
+ } else {
+ throw new DumpContributionFailedException(getName(), "Can't handle throwables type [" + throwables.getClass() + "]");
+ }
+ writer.println();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/dump/thread/ThreadDumpContributor.java b/src/main/java/org/elasticsearch/monitor/dump/thread/ThreadDumpContributor.java
new file mode 100644
index 0000000..8c0f8ff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/dump/thread/ThreadDumpContributor.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.dump.thread;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.dump.Dump;
+import org.elasticsearch.monitor.dump.DumpContributionFailedException;
+import org.elasticsearch.monitor.dump.DumpContributor;
+
+import java.io.PrintWriter;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MonitorInfo;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+import java.util.Locale;
+
+/**
+ *
+ */
+public class ThreadDumpContributor implements DumpContributor {
+
+ private static final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
+
+ public static final String THREAD_DUMP = "thread";
+
+ private final String name;
+
+ @Inject
+ public ThreadDumpContributor(@Assisted String name, @Assisted Settings settings) {
+ this.name = name;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public void contribute(Dump dump) throws DumpContributionFailedException {
+ PrintWriter writer = new PrintWriter(dump.createFileWriter("threads.txt"));
+ try {
+ processDeadlocks(writer);
+ processAllThreads(writer);
+ } catch (Exception e) {
+ throw new DumpContributionFailedException(getName(), "Failed to generate", e);
+ } finally {
+ try {
+ writer.close();
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ }
+
+ private void processDeadlocks(PrintWriter dump) {
+ dump.println("===== Deadlocked Threads =====");
+ long deadlockedThreadIds[] = findDeadlockedThreads();
+ if (deadlockedThreadIds != null)
+ dumpThreads(dump, getThreadInfo(deadlockedThreadIds));
+ }
+
+ private void processAllThreads(PrintWriter dump) {
+ dump.println();
+ dump.println("===== All Threads =====");
+ dumpThreads(dump, dumpAllThreads());
+ }
+
+ private void dumpThreads(PrintWriter dump, ThreadInfo infos[]) {
+ for (ThreadInfo info : infos) {
+ dump.println();
+ write(info, dump);
+ }
+ }
+
+ private ThreadInfo[] dumpAllThreads() {
+ return threadBean.dumpAllThreads(true, true);
+ }
+
+ public long[] findDeadlockedThreads() {
+ return threadBean.findDeadlockedThreads();
+ }
+
+ public ThreadInfo[] getThreadInfo(long[] threadIds) {
+ return threadBean.getThreadInfo(threadIds, true, true);
+ }
+
+ private void write(ThreadInfo threadInfo, PrintWriter writer) {
+ writer.print(String.format(Locale.ROOT, "\"%s\" Id=%s %s", threadInfo.getThreadName(), threadInfo.getThreadId(), threadInfo.getThreadState()));
+ if (threadInfo.getLockName() != null) {
+ writer.print(String.format(Locale.ROOT, " on %s", threadInfo.getLockName()));
+ if (threadInfo.getLockOwnerName() != null)
+ writer.print(String.format(Locale.ROOT, " owned by \"%s\" Id=%s", threadInfo.getLockOwnerName(), threadInfo.getLockOwnerId()));
+ }
+ if (threadInfo.isInNative())
+ writer.println(" (in native)");
+ else
+ writer.println();
+ MonitorInfo[] lockedMonitors = threadInfo.getLockedMonitors();
+ StackTraceElement stackTraceElements[] = threadInfo.getStackTrace();
+ for (StackTraceElement stackTraceElement : stackTraceElements) {
+ writer.println(" at " + stackTraceElement);
+ MonitorInfo lockedMonitor = findLockedMonitor(stackTraceElement, lockedMonitors);
+ if (lockedMonitor != null)
+ writer.println((" - locked " + lockedMonitor.getClassName() + "@" + lockedMonitor.getIdentityHashCode()));
+ }
+
+ }
+
+ private static MonitorInfo findLockedMonitor(StackTraceElement stackTraceElement, MonitorInfo lockedMonitors[]) {
+ for (MonitorInfo monitorInfo : lockedMonitors) {
+ if (stackTraceElement.equals(monitorInfo.getLockedStackFrame()))
+ return monitorInfo;
+ }
+
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java
new file mode 100644
index 0000000..80a65aa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.fs;
+
+/**
+ */
+public interface FsProbe {
+
+ FsStats stats();
+}
diff --git a/src/main/java/org/elasticsearch/monitor/fs/FsService.java b/src/main/java/org/elasticsearch/monitor/fs/FsService.java
new file mode 100644
index 0000000..20a96c9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/fs/FsService.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.fs;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ */
+public class FsService extends AbstractComponent {
+
+ private final FsProbe probe;
+
+ private final TimeValue refreshInterval;
+
+ private FsStats cachedStats;
+
+ @Inject
+ public FsService(Settings settings, FsProbe probe) {
+ super(settings);
+ this.probe = probe;
+ this.cachedStats = probe.stats();
+
+ this.refreshInterval = componentSettings.getAsTime("refresh_interval", TimeValue.timeValueSeconds(1));
+
+ logger.debug("Using probe [{}] with refresh_interval [{}]", probe, refreshInterval);
+ }
+
+ public synchronized FsStats stats() {
+ if ((System.currentTimeMillis() - cachedStats.getTimestamp()) > refreshInterval.millis()) {
+ cachedStats = probe.stats();
+ }
+ return cachedStats;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/monitor/fs/FsStats.java b/src/main/java/org/elasticsearch/monitor/fs/FsStats.java
new file mode 100644
index 0000000..4b7bede
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/fs/FsStats.java
@@ -0,0 +1,371 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.fs;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+
+/**
+ */
+public class FsStats implements Iterable<FsStats.Info>, Streamable, ToXContent {
+
+ public static class Info implements Streamable, ToXContent {
+
+ String path;
+ @Nullable
+ String mount;
+ @Nullable
+ String dev;
+ long total = -1;
+ long free = -1;
+ long available = -1;
+ long diskReads = -1;
+ long diskWrites = -1;
+ long diskReadBytes = -1;
+ long diskWriteBytes = -1;
+ double diskQueue = -1;
+ double diskServiceTime = -1;
+
+ static public Info readInfoFrom(StreamInput in) throws IOException {
+ Info i = new Info();
+ i.readFrom(in);
+ return i;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ path = in.readOptionalString();
+ mount = in.readOptionalString();
+ dev = in.readOptionalString();
+ total = in.readLong();
+ free = in.readLong();
+ available = in.readLong();
+ diskReads = in.readLong();
+ diskWrites = in.readLong();
+ diskReadBytes = in.readLong();
+ diskWriteBytes = in.readLong();
+ diskQueue = in.readDouble();
+ diskServiceTime = in.readDouble();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeOptionalString(path); // total aggregates do not have a path
+ out.writeOptionalString(mount);
+ out.writeOptionalString(dev);
+ out.writeLong(total);
+ out.writeLong(free);
+ out.writeLong(available);
+ out.writeLong(diskReads);
+ out.writeLong(diskWrites);
+ out.writeLong(diskReadBytes);
+ out.writeLong(diskWriteBytes);
+ out.writeDouble(diskQueue);
+ out.writeDouble(diskServiceTime);
+ }
+
+ public String getPath() {
+ return path;
+ }
+
+ public String getMount() {
+ return mount;
+ }
+
+ public String getDev() {
+ return dev;
+ }
+
+ public ByteSizeValue getTotal() {
+ return new ByteSizeValue(total);
+ }
+
+ public ByteSizeValue getFree() {
+ return new ByteSizeValue(free);
+ }
+
+ public ByteSizeValue getAvailable() {
+ return new ByteSizeValue(available);
+ }
+
+ public long getDiskReads() {
+ return this.diskReads;
+ }
+
+ public long getDiskWrites() {
+ return this.diskWrites;
+ }
+
+ public long getDiskReadSizeInBytes() {
+ return diskReadBytes;
+ }
+
+ public ByteSizeValue getDiskReadSizeSize() {
+ return new ByteSizeValue(diskReadBytes);
+ }
+
+ public long getDiskWriteSizeInBytes() {
+ return diskWriteBytes;
+ }
+
+ public ByteSizeValue getDiskWriteSizeSize() {
+ return new ByteSizeValue(diskWriteBytes);
+ }
+
+ public double getDiskQueue() {
+ return diskQueue;
+ }
+
+ public double getDiskServiceTime() {
+ return diskServiceTime;
+ }
+
+ private long addLong(long current, long other) {
+ if (other == -1) {
+ return current;
+ }
+ if (current == -1) {
+ return other;
+ }
+ return current + other;
+ }
+
+ private double addDouble(double current, double other) {
+ if (other == -1) {
+ return current;
+ }
+ if (current == -1) {
+ return other;
+ }
+ return current + other;
+ }
+
+ public void add(Info info) {
+ total = addLong(total, info.total);
+ free = addLong(free, info.free);
+ available = addLong(available, info.available);
+ diskReads = addLong(diskReads, info.diskReads);
+ diskWrites = addLong(diskWrites, info.diskWrites);
+ diskReadBytes = addLong(diskReadBytes, info.diskReadBytes);
+ diskWriteBytes = addLong(diskWriteBytes, info.diskWriteBytes);
+ diskQueue = addDouble(diskQueue, info.diskQueue);
+ diskServiceTime = addDouble(diskServiceTime, info.diskServiceTime);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString PATH = new XContentBuilderString("path");
+ static final XContentBuilderString MOUNT = new XContentBuilderString("mount");
+ static final XContentBuilderString DEV = new XContentBuilderString("dev");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_IN_BYTES = new XContentBuilderString("total_in_bytes");
+ static final XContentBuilderString FREE = new XContentBuilderString("free");
+ static final XContentBuilderString FREE_IN_BYTES = new XContentBuilderString("free_in_bytes");
+ static final XContentBuilderString AVAILABLE = new XContentBuilderString("available");
+ static final XContentBuilderString AVAILABLE_IN_BYTES = new XContentBuilderString("available_in_bytes");
+ static final XContentBuilderString DISK_READS = new XContentBuilderString("disk_reads");
+ static final XContentBuilderString DISK_WRITES = new XContentBuilderString("disk_writes");
+ static final XContentBuilderString DISK_IO_OP = new XContentBuilderString("disk_io_op");
+ static final XContentBuilderString DISK_READ_SIZE = new XContentBuilderString("disk_read_size");
+ static final XContentBuilderString DISK_READ_SIZE_IN_BYTES = new XContentBuilderString("disk_read_size_in_bytes");
+ static final XContentBuilderString DISK_WRITE_SIZE = new XContentBuilderString("disk_write_size");
+ static final XContentBuilderString DISK_WRITE_SIZE_IN_BYTES = new XContentBuilderString("disk_write_size_in_bytes");
+ static final XContentBuilderString DISK_IO_SIZE = new XContentBuilderString("disk_io_size");
+ static final XContentBuilderString DISK_IO_IN_BYTES = new XContentBuilderString("disk_io_size_in_bytes");
+ static final XContentBuilderString DISK_QUEUE = new XContentBuilderString("disk_queue");
+ static final XContentBuilderString DISK_SERVICE_TIME = new XContentBuilderString("disk_service_time");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (path != null) {
+ builder.field(Fields.PATH, path, XContentBuilder.FieldCaseConversion.NONE);
+ }
+ if (mount != null) {
+ builder.field(Fields.MOUNT, mount, XContentBuilder.FieldCaseConversion.NONE);
+ }
+ if (dev != null) {
+ builder.field(Fields.DEV, dev, XContentBuilder.FieldCaseConversion.NONE);
+ }
+
+ if (total != -1) {
+ builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, total);
+ }
+ if (free != -1) {
+ builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, free);
+ }
+ if (available != -1) {
+ builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, available);
+ }
+
+ long iop = -1;
+
+ if (diskReads != -1) {
+ iop = diskReads;
+ builder.field(Fields.DISK_READS, diskReads);
+ }
+ if (diskWrites != -1) {
+ if (iop != -1) {
+ iop += diskWrites;
+ } else {
+ iop = diskWrites;
+ }
+ builder.field(Fields.DISK_WRITES, diskWrites);
+ }
+
+ if (iop != -1) {
+ builder.field(Fields.DISK_IO_OP, iop);
+ }
+
+ long ioBytes = -1;
+
+ if (diskReadBytes != -1) {
+ ioBytes = diskReadBytes;
+ builder.byteSizeField(Fields.DISK_READ_SIZE_IN_BYTES, Fields.DISK_READ_SIZE, diskReadBytes);
+ }
+ if (diskWriteBytes != -1) {
+ if (ioBytes != -1) {
+ ioBytes += diskWriteBytes;
+ } else {
+ ioBytes = diskWriteBytes;
+ }
+ builder.byteSizeField(Fields.DISK_WRITE_SIZE_IN_BYTES, Fields.DISK_WRITE_SIZE, diskWriteBytes);
+ }
+
+ if (ioBytes != -1) {
+ builder.byteSizeField(Fields.DISK_IO_IN_BYTES, Fields.DISK_IO_SIZE, ioBytes);
+ }
+
+ if (diskQueue != -1) {
+ builder.field(Fields.DISK_QUEUE, Strings.format1Decimals(diskQueue, ""));
+ }
+ if (diskServiceTime != -1) {
+ builder.field(Fields.DISK_SERVICE_TIME, Strings.format1Decimals(diskServiceTime, ""));
+ }
+
+ builder.endObject();
+ return builder;
+ }
+ }
+
+ long timestamp;
+ Info total;
+ Info[] infos;
+
+ FsStats() {
+
+ }
+
+ FsStats(long timestamp, Info[] infos) {
+ this.timestamp = timestamp;
+ this.infos = infos;
+ this.total = null;
+ }
+
+ public Info getTotal() {
+ return total();
+ }
+
+ public Info total() {
+ if (total != null) {
+ return total;
+ }
+ Info res = new Info();
+ Set<String> seenDevices = new HashSet<String>(infos.length);
+ for (Info subInfo : infos) {
+ if (subInfo.dev != null) {
+ if (!seenDevices.add(subInfo.dev)) {
+ continue; // already added numbers for this device;
+ }
+ }
+ res.add(subInfo);
+ }
+ total = res;
+ return res;
+ }
+
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ @Override
+ public Iterator<Info> iterator() {
+ return Iterators.forArray(infos);
+ }
+
+ public static FsStats readFsStats(StreamInput in) throws IOException {
+ FsStats stats = new FsStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ timestamp = in.readVLong();
+ infos = new Info[in.readVInt()];
+ for (int i = 0; i < infos.length; i++) {
+ infos[i] = Info.readInfoFrom(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(timestamp);
+ out.writeVInt(infos.length);
+ for (Info info : infos) {
+ info.writeTo(out);
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString FS = new XContentBuilderString("fs");
+ static final XContentBuilderString TIMESTAMP = new XContentBuilderString("timestamp");
+ static final XContentBuilderString DATA = new XContentBuilderString("data");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.FS);
+ builder.field(Fields.TIMESTAMP, timestamp);
+ builder.field(Fields.TOTAL);
+ total().toXContent(builder, params);
+ builder.startArray(Fields.DATA);
+ for (Info info : infos) {
+ info.toXContent(builder, params);
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/fs/JmxFsProbe.java b/src/main/java/org/elasticsearch/monitor/fs/JmxFsProbe.java
new file mode 100644
index 0000000..5f0ec33
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/fs/JmxFsProbe.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.fs;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+
+import java.io.File;
+
+/**
+ */
+public class JmxFsProbe extends AbstractComponent implements FsProbe {
+
+ private final NodeEnvironment nodeEnv;
+
+ @Inject
+ public JmxFsProbe(Settings settings, NodeEnvironment nodeEnv) {
+ super(settings);
+ this.nodeEnv = nodeEnv;
+ }
+
+ @Override
+ public FsStats stats() {
+ if (!nodeEnv.hasNodeFile()) {
+ return new FsStats(System.currentTimeMillis(), new FsStats.Info[0]);
+ }
+ File[] dataLocations = nodeEnv.nodeDataLocations();
+ FsStats.Info[] infos = new FsStats.Info[dataLocations.length];
+ for (int i = 0; i < dataLocations.length; i++) {
+ File dataLocation = dataLocations[i];
+ FsStats.Info info = new FsStats.Info();
+ info.path = dataLocation.getAbsolutePath();
+ info.total = dataLocation.getTotalSpace();
+ info.free = dataLocation.getFreeSpace();
+ info.available = dataLocation.getUsableSpace();
+ infos[i] = info;
+ }
+ return new FsStats(System.currentTimeMillis(), infos);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/fs/SigarFsProbe.java b/src/main/java/org/elasticsearch/monitor/fs/SigarFsProbe.java
new file mode 100644
index 0000000..9e01562
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/fs/SigarFsProbe.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.fs;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.monitor.sigar.SigarService;
+import org.hyperic.sigar.FileSystem;
+import org.hyperic.sigar.FileSystemMap;
+import org.hyperic.sigar.FileSystemUsage;
+import org.hyperic.sigar.Sigar;
+import org.hyperic.sigar.SigarException;
+
+import java.io.File;
+import java.util.Map;
+
+/**
+ */
+public class SigarFsProbe extends AbstractComponent implements FsProbe {
+
+ private final NodeEnvironment nodeEnv;
+
+ private final SigarService sigarService;
+
+ private Map<File, FileSystem> fileSystems = Maps.newHashMap();
+
+ @Inject
+ public SigarFsProbe(Settings settings, NodeEnvironment nodeEnv, SigarService sigarService) {
+ super(settings);
+ this.nodeEnv = nodeEnv;
+ this.sigarService = sigarService;
+ }
+
+ @Override
+ public synchronized FsStats stats() {
+ if (!nodeEnv.hasNodeFile()) {
+ return new FsStats(System.currentTimeMillis(), new FsStats.Info[0]);
+ }
+ File[] dataLocations = nodeEnv.nodeDataLocations();
+ FsStats.Info[] infos = new FsStats.Info[dataLocations.length];
+ for (int i = 0; i < dataLocations.length; i++) {
+ File dataLocation = dataLocations[i];
+
+ FsStats.Info info = new FsStats.Info();
+ info.path = dataLocation.getAbsolutePath();
+
+ try {
+ FileSystem fileSystem = fileSystems.get(dataLocation);
+ Sigar sigar = sigarService.sigar();
+ if (fileSystem == null) {
+ FileSystemMap fileSystemMap = sigar.getFileSystemMap();
+ if (fileSystemMap != null) {
+ fileSystem = fileSystemMap.getMountPoint(dataLocation.getPath());
+ fileSystems.put(dataLocation, fileSystem);
+ }
+ }
+ if (fileSystem != null) {
+ info.mount = fileSystem.getDirName();
+ info.dev = fileSystem.getDevName();
+ FileSystemUsage fileSystemUsage = sigar.getFileSystemUsage(fileSystem.getDirName());
+ if (fileSystemUsage != null) {
+ // total/free/available seem to be in megabytes?
+ info.total = fileSystemUsage.getTotal() * 1024;
+ info.free = fileSystemUsage.getFree() * 1024;
+ info.available = fileSystemUsage.getAvail() * 1024;
+ info.diskReads = fileSystemUsage.getDiskReads();
+ info.diskWrites = fileSystemUsage.getDiskWrites();
+ info.diskReadBytes = fileSystemUsage.getDiskReadBytes();
+ info.diskWriteBytes = fileSystemUsage.getDiskWriteBytes();
+ info.diskQueue = fileSystemUsage.getDiskQueue();
+ info.diskServiceTime = fileSystemUsage.getDiskServiceTime();
+ }
+ }
+ } catch (SigarException e) {
+ // failed...
+ }
+
+ infos[i] = info;
+ }
+
+ return new FsStats(System.currentTimeMillis(), infos);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java b/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java
new file mode 100644
index 0000000..98d4b69
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.jvm;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+import java.util.*;
+
+/**
+ *
+ */
+public class DeadlockAnalyzer {
+
+ private static final Deadlock NULL_RESULT[] = new Deadlock[0];
+ private final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
+
+ private static DeadlockAnalyzer INSTANCE = new DeadlockAnalyzer();
+
+ public static DeadlockAnalyzer deadlockAnalyzer() {
+ return INSTANCE;
+ }
+
+ private DeadlockAnalyzer() {
+
+ }
+
+ public Deadlock[] findDeadlocks() {
+ long deadlockedThreads[] = threadBean.findMonitorDeadlockedThreads();
+ if (deadlockedThreads == null || deadlockedThreads.length == 0) {
+ return NULL_RESULT;
+ }
+ ImmutableMap<Long, ThreadInfo> threadInfoMap = createThreadInfoMap(deadlockedThreads);
+ Set<LinkedHashSet<ThreadInfo>> cycles = calculateCycles(threadInfoMap);
+ Set<LinkedHashSet<ThreadInfo>> chains = calculateCycleDeadlockChains(threadInfoMap, cycles);
+ cycles.addAll(chains);
+ return createDeadlockDescriptions(cycles);
+ }
+
+
+ private Deadlock[] createDeadlockDescriptions(Set<LinkedHashSet<ThreadInfo>> cycles) {
+ Deadlock result[] = new Deadlock[cycles.size()];
+ int count = 0;
+ for (LinkedHashSet<ThreadInfo> cycle : cycles) {
+ ThreadInfo asArray[] = cycle.toArray(new ThreadInfo[cycle.size()]);
+ Deadlock d = new Deadlock(asArray);
+ result[count++] = d;
+ }
+ return result;
+ }
+
+
+ private Set<LinkedHashSet<ThreadInfo>> calculateCycles(ImmutableMap<Long, ThreadInfo> threadInfoMap) {
+ Set<LinkedHashSet<ThreadInfo>> cycles = new HashSet<LinkedHashSet<ThreadInfo>>();
+ for (Map.Entry<Long, ThreadInfo> entry : threadInfoMap.entrySet()) {
+ LinkedHashSet<ThreadInfo> cycle = new LinkedHashSet<ThreadInfo>();
+ for (ThreadInfo t = entry.getValue(); !cycle.contains(t); t = threadInfoMap.get(Long.valueOf(t.getLockOwnerId())))
+ cycle.add(t);
+
+ if (!cycles.contains(cycle))
+ cycles.add(cycle);
+ }
+ return cycles;
+ }
+
+
+ private Set<LinkedHashSet<ThreadInfo>> calculateCycleDeadlockChains(ImmutableMap<Long, ThreadInfo> threadInfoMap, Set<LinkedHashSet<ThreadInfo>> cycles) {
+ ThreadInfo allThreads[] = threadBean.getThreadInfo(threadBean.getAllThreadIds());
+ Set<LinkedHashSet<ThreadInfo>> deadlockChain = new HashSet<LinkedHashSet<ThreadInfo>>();
+ Set<Long> knownDeadlockedThreads = threadInfoMap.keySet();
+ for (ThreadInfo threadInfo : allThreads) {
+ Thread.State state = threadInfo.getThreadState();
+ if (state == Thread.State.BLOCKED && !knownDeadlockedThreads.contains(threadInfo.getThreadId())) {
+ for (LinkedHashSet cycle : cycles) {
+ if (cycle.contains(threadInfoMap.get(Long.valueOf(threadInfo.getLockOwnerId())))) {
+ LinkedHashSet<ThreadInfo> chain = new LinkedHashSet<ThreadInfo>();
+ for (ThreadInfo node = threadInfo; !chain.contains(node); node = threadInfoMap.get(Long.valueOf(node.getLockOwnerId())))
+ chain.add(node);
+
+ deadlockChain.add(chain);
+ }
+ }
+
+ }
+ }
+
+ return deadlockChain;
+ }
+
+
+ private ImmutableMap<Long, ThreadInfo> createThreadInfoMap(long threadIds[]) {
+ ThreadInfo threadInfos[] = threadBean.getThreadInfo(threadIds);
+ ImmutableMap.Builder<Long, ThreadInfo> threadInfoMap = ImmutableMap.builder();
+ for (ThreadInfo threadInfo : threadInfos) {
+ threadInfoMap.put(threadInfo.getThreadId(), threadInfo);
+ }
+ return threadInfoMap.build();
+ }
+
+
+ public static class Deadlock {
+
+ private final ThreadInfo members[];
+ private final String description;
+ private final ImmutableSet<Long> memberIds;
+
+ public Deadlock(ThreadInfo[] members) {
+ this.members = members;
+
+ ImmutableSet.Builder<Long> builder = ImmutableSet.builder();
+ StringBuilder sb = new StringBuilder();
+ for (int x = 0; x < members.length; x++) {
+ ThreadInfo ti = members[x];
+ sb.append(ti.getThreadName());
+ if (x < members.length)
+ sb.append(" > ");
+ if (x == members.length - 1)
+ sb.append(ti.getLockOwnerName());
+ builder.add(ti.getThreadId());
+ }
+ this.description = sb.toString();
+ this.memberIds = builder.build();
+ }
+
+ public ThreadInfo[] members() {
+ return members;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Deadlock deadlock = (Deadlock) o;
+
+ if (memberIds != null ? !memberIds.equals(deadlock.memberIds) : deadlock.memberIds != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = members != null ? Arrays.hashCode(members) : 0;
+ result = 31 * result + (description != null ? description.hashCode() : 0);
+ result = 31 * result + (memberIds != null ? memberIds.hashCode() : 0);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return description;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java b/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java
new file mode 100644
index 0000000..082f18b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.jvm;
+
+/**
+ */
+public class GcNames {
+
+ public static String YOUNG = "young";
+ public static String OLD = "old";
+ public static String SURVIVOR = "survivor";
+
+ /**
+ * Resolves the GC type by its memory pool name ({@link java.lang.management.MemoryPoolMXBean#getName()}.
+ */
+ public static String getByMemoryPoolName(String poolName, String defaultName) {
+ if ("Eden Space".equals(poolName) || "PS Eden Space".equals(poolName) || "Par Eden Space".equals(poolName) || "G1 Eden Space".equals(poolName)) {
+ return YOUNG;
+ }
+ if ("Survivor Space".equals(poolName) || "PS Survivor Space".equals(poolName) || "Par Survivor Space".equals(poolName) || "G1 Survivor Space".equals(poolName)) {
+ return SURVIVOR;
+ }
+ if ("Tenured Gen".equals(poolName) || "PS Old Gen".equals(poolName) || "CMS Old Gen".equals(poolName) || "G1 Old Gen".equals(poolName)) {
+ return OLD;
+ }
+ return defaultName;
+ }
+
+ public static String getByGcName(String gcName, String defaultName) {
+ if ("Copy".equals(gcName) || "PS Scavenge".equals(gcName) || "ParNew".equals(gcName) || "G1 Young Generation".equals(gcName)) {
+ return YOUNG;
+ }
+ if ("MarkSweepCompact".equals(gcName) || "PS MarkSweep".equals(gcName) || "ConcurrentMarkSweep".equals(gcName) || "G1 Old Generation".equals(gcName)) {
+ return OLD;
+ }
+ return defaultName;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java
new file mode 100644
index 0000000..7f69e97
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.jvm;
+
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class HotThreads {
+
+ private static final Object mutex = new Object();
+
+ private int busiestThreads = 3;
+ private TimeValue interval = new TimeValue(500, TimeUnit.MILLISECONDS);
+ private TimeValue threadElementsSnapshotDelay = new TimeValue(10);
+ private int threadElementsSnapshotCount = 10;
+ private String type = "cpu";
+
+ public HotThreads interval(TimeValue interval) {
+ this.interval = interval;
+ return this;
+ }
+
+ public HotThreads busiestThreads(int busiestThreads) {
+ this.busiestThreads = busiestThreads;
+ return this;
+ }
+
+ public HotThreads threadElementsSnapshotDelay(TimeValue threadElementsSnapshotDelay) {
+ this.threadElementsSnapshotDelay = threadElementsSnapshotDelay;
+ return this;
+ }
+
+ public HotThreads threadElementsSnapshotCount(int threadElementsSnapshotCount) {
+ this.threadElementsSnapshotCount = threadElementsSnapshotCount;
+ return this;
+ }
+
+ public HotThreads type(String type) {
+ if ("cpu".equals(type) || "wait".equals(type) || "block".equals(type)) {
+ this.type = type;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("type not supported [" + type + "]");
+ }
+ return this;
+ }
+
+ public String detect() throws Exception {
+ synchronized (mutex) {
+ return innerDetect();
+ }
+ }
+
+ private String innerDetect() throws Exception {
+ StringBuilder sb = new StringBuilder();
+ ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
+ boolean enabledCpu = false;
+ try {
+ if (threadBean.isThreadCpuTimeSupported()) {
+ if (!threadBean.isThreadCpuTimeEnabled()) {
+ enabledCpu = true;
+ threadBean.setThreadCpuTimeEnabled(true);
+ }
+ } else {
+ throw new IllegalStateException("MBean doesn't support thread CPU Time");
+ }
+ Map<Long, MyThreadInfo> threadInfos = new HashMap<Long, MyThreadInfo>();
+ for (long threadId : threadBean.getAllThreadIds()) {
+ // ignore our own thread...
+ if (Thread.currentThread().getId() == threadId) {
+ continue;
+ }
+ long cpu = threadBean.getThreadCpuTime(threadId);
+ if (cpu == -1) {
+ continue;
+ }
+ ThreadInfo info = threadBean.getThreadInfo(threadId, 0);
+ if (info == null) {
+ continue;
+ }
+ threadInfos.put(threadId, new MyThreadInfo(cpu, info));
+ }
+ Thread.sleep(interval.millis());
+ for (long threadId : threadBean.getAllThreadIds()) {
+ // ignore our own thread...
+ if (Thread.currentThread().getId() == threadId) {
+ continue;
+ }
+ long cpu = threadBean.getThreadCpuTime(threadId);
+ if (cpu == -1) {
+ threadInfos.remove(threadId);
+ continue;
+ }
+ ThreadInfo info = threadBean.getThreadInfo(threadId, 0);
+ if (info == null) {
+ threadInfos.remove(threadId);
+ continue;
+ }
+ MyThreadInfo data = threadInfos.get(threadId);
+ if (data != null) {
+ data.setDelta(cpu, info);
+ } else {
+ threadInfos.remove(threadId);
+ }
+ }
+ // sort by delta CPU time on thread.
+ List<MyThreadInfo> hotties = new ArrayList<MyThreadInfo>(threadInfos.values());
+ final int busiestThreads = Math.min(this.busiestThreads, hotties.size());
+ // skip that for now
+ CollectionUtil.introSort(hotties, new Comparator<MyThreadInfo>() {
+ public int compare(MyThreadInfo o1, MyThreadInfo o2) {
+ if ("cpu".equals(type)) {
+ return (int) (o2.cpuTime - o1.cpuTime);
+ } else if ("wait".equals(type)) {
+ return (int) (o2.waitedTime - o1.waitedTime);
+ } else if ("block".equals(type)) {
+ return (int) (o2.blockedTime - o1.blockedTime);
+ }
+ throw new IllegalArgumentException();
+ }
+ });
+ // analyse N stack traces for M busiest threads
+ long[] ids = new long[busiestThreads];
+ for (int i = 0; i < busiestThreads; i++) {
+ MyThreadInfo info = hotties.get(i);
+ ids[i] = info.info.getThreadId();
+ }
+ ThreadInfo[][] allInfos = new ThreadInfo[threadElementsSnapshotCount][];
+ for (int j = 0; j < threadElementsSnapshotCount; j++) {
+ // NOTE, javadoc of getThreadInfo says: If a thread of the given ID is not alive or does not exist,
+ // null will be set in the corresponding element in the returned array. A thread is alive if it has
+ // been started and has not yet died.
+ allInfos[j] = threadBean.getThreadInfo(ids, Integer.MAX_VALUE);
+ Thread.sleep(threadElementsSnapshotDelay.millis());
+ }
+ for (int t = 0; t < busiestThreads; t++) {
+ long time = 0;
+ if ("cpu".equals(type)) {
+ time = hotties.get(t).cpuTime;
+ } else if ("wait".equals(type)) {
+ time = hotties.get(t).waitedTime;
+ } else if ("block".equals(type)) {
+ time = hotties.get(t).blockedTime;
+ }
+ String threadName = null;
+ if (allInfos[0][t] == null) {
+ for (ThreadInfo[] info : allInfos) {
+ if (info != null && info[t] != null) {
+ threadName = info[t].getThreadName();
+ break;
+ }
+ }
+ if (threadName == null) {
+ continue; // thread is not alive yet or died before the first snapshot - ignore it!
+ }
+ } else {
+ threadName = allInfos[0][t].getThreadName();
+ }
+ double percent = (((double) time) / interval.nanos()) * 100;
+ sb.append(String.format(Locale.ROOT, "%n%4.1f%% (%s out of %s) %s usage by thread '%s'%n", percent, TimeValue.timeValueNanos(time), interval, type, threadName));
+ // for each snapshot (2nd array index) find later snapshot for same thread with max number of
+ // identical StackTraceElements (starting from end of each)
+ boolean[] done = new boolean[threadElementsSnapshotCount];
+ for (int i = 0; i < threadElementsSnapshotCount; i++) {
+ if (done[i]) continue;
+ int maxSim = 1;
+ boolean[] similars = new boolean[threadElementsSnapshotCount];
+ for (int j = i + 1; j < threadElementsSnapshotCount; j++) {
+ if (done[j]) continue;
+ int similarity = similarity(allInfos[i][t], allInfos[j][t]);
+ if (similarity > maxSim) {
+ maxSim = similarity;
+ similars = new boolean[threadElementsSnapshotCount];
+ }
+ if (similarity == maxSim) similars[j] = true;
+ }
+ // print out trace maxSim levels of i, and mark similar ones as done
+ int count = 1;
+ for (int j = i + 1; j < threadElementsSnapshotCount; j++) {
+ if (similars[j]) {
+ done[j] = true;
+ count++;
+ }
+ }
+ if (allInfos[i][t] != null) {
+ final StackTraceElement[] show = allInfos[i][t].getStackTrace();
+ if (count == 1) {
+ sb.append(String.format(Locale.ROOT, " unique snapshot%n"));
+ for (int l = 0; l < show.length; l++) {
+ sb.append(String.format(Locale.ROOT, " %s%n", show[l]));
+ }
+ } else {
+ sb.append(String.format(Locale.ROOT, " %d/%d snapshots sharing following %d elements%n", count, threadElementsSnapshotCount, maxSim));
+ for (int l = show.length - maxSim; l < show.length; l++) {
+ sb.append(String.format(Locale.ROOT, " %s%n", show[l]));
+ }
+ }
+ }
+ }
+ }
+ return sb.toString();
+ } finally {
+ if (enabledCpu) {
+ threadBean.setThreadCpuTimeEnabled(false);
+ }
+ }
+ }
+
+ private static final StackTraceElement[] EMPTY = new StackTraceElement[0];
+
+ private int similarity(ThreadInfo threadInfo, ThreadInfo threadInfo0) {
+ StackTraceElement[] s1 = threadInfo == null ? EMPTY : threadInfo.getStackTrace();
+ StackTraceElement[] s2 = threadInfo0 == null ? EMPTY : threadInfo0.getStackTrace();
+ int i = s1.length - 1;
+ int j = s2.length - 1;
+ int rslt = 0;
+ while (i >= 0 && j >= 0 && s1[i].equals(s2[j])) {
+ rslt++;
+ i--;
+ j--;
+ }
+ return rslt;
+ }
+
+
+ class MyThreadInfo {
+ long cpuTime;
+ long blockedCount;
+ long blockedTime;
+ long waitedCount;
+ long waitedTime;
+ boolean deltaDone;
+ ThreadInfo info;
+
+ MyThreadInfo(long cpuTime, ThreadInfo info) {
+ blockedCount = info.getBlockedCount();
+ blockedTime = info.getBlockedTime();
+ waitedCount = info.getWaitedCount();
+ waitedTime = info.getWaitedTime();
+ this.cpuTime = cpuTime;
+ this.info = info;
+ }
+
+ void setDelta(long cpuTime, ThreadInfo info) {
+ if (deltaDone) throw new IllegalStateException("setDelta already called once");
+ blockedCount = info.getBlockedCount() - blockedCount;
+ blockedTime = info.getBlockedTime() - blockedTime;
+ waitedCount = info.getWaitedCount() - waitedCount;
+ waitedTime = info.getWaitedTime() - waitedTime;
+ this.cpuTime = cpuTime - this.cpuTime;
+ deltaDone = true;
+ this.info = info;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java
new file mode 100644
index 0000000..31cbd67
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java
@@ -0,0 +1,462 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.jvm;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.lang.management.*;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class JvmInfo implements Streamable, Serializable, ToXContent {
+
+ private static JvmInfo INSTANCE;
+
+ static {
+ RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean();
+ MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
+
+ // returns the <process id>@<host>
+ long pid;
+ String xPid = runtimeMXBean.getName();
+ try {
+ xPid = xPid.split("@")[0];
+ pid = Long.parseLong(xPid);
+ } catch (Exception e) {
+ pid = -1;
+ }
+ JvmInfo info = new JvmInfo();
+ info.pid = pid;
+ info.startTime = runtimeMXBean.getStartTime();
+ info.version = runtimeMXBean.getSystemProperties().get("java.version");
+ info.vmName = runtimeMXBean.getVmName();
+ info.vmVendor = runtimeMXBean.getVmVendor();
+ info.vmVersion = runtimeMXBean.getVmVersion();
+ info.mem = new Mem();
+ info.mem.heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getInit();
+ info.mem.heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getMax();
+ info.mem.nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit();
+ info.mem.nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getMax();
+ try {
+ Class<?> vmClass = Class.forName("sun.misc.VM");
+ info.mem.directMemoryMax = (Long) vmClass.getMethod("maxDirectMemory").invoke(null);
+ } catch (Throwable t) {
+ // ignore
+ }
+ info.inputArguments = runtimeMXBean.getInputArguments().toArray(new String[runtimeMXBean.getInputArguments().size()]);
+ info.bootClassPath = runtimeMXBean.getBootClassPath();
+ info.classPath = runtimeMXBean.getClassPath();
+ info.systemProperties = runtimeMXBean.getSystemProperties();
+
+ List<GarbageCollectorMXBean> gcMxBeans = ManagementFactory.getGarbageCollectorMXBeans();
+ info.gcCollectors = new String[gcMxBeans.size()];
+ for (int i = 0; i < gcMxBeans.size(); i++) {
+ GarbageCollectorMXBean gcMxBean = gcMxBeans.get(i);
+ info.gcCollectors[i] = gcMxBean.getName();
+ }
+
+ List<MemoryPoolMXBean> memoryPoolMXBeans = ManagementFactory.getMemoryPoolMXBeans();
+ info.memoryPools = new String[memoryPoolMXBeans.size()];
+ for (int i = 0; i < memoryPoolMXBeans.size(); i++) {
+ MemoryPoolMXBean memoryPoolMXBean = memoryPoolMXBeans.get(i);
+ info.memoryPools[i] = memoryPoolMXBean.getName();
+ }
+
+ INSTANCE = info;
+ }
+
+ public static JvmInfo jvmInfo() {
+ return INSTANCE;
+ }
+
+ long pid = -1;
+
+ String version = "";
+ String vmName = "";
+ String vmVersion = "";
+ String vmVendor = "";
+
+ long startTime = -1;
+
+ Mem mem;
+
+ String[] inputArguments;
+
+ String bootClassPath;
+
+ String classPath;
+
+ Map<String, String> systemProperties;
+
+ String[] gcCollectors = Strings.EMPTY_ARRAY;
+ String[] memoryPools = Strings.EMPTY_ARRAY;
+
+ private JvmInfo() {
+ }
+
+ /**
+ * The process id.
+ */
+ public long pid() {
+ return this.pid;
+ }
+
+ /**
+ * The process id.
+ */
+ public long getPid() {
+ return pid;
+ }
+
+ public String version() {
+ return this.version;
+ }
+
+ public String getVersion() {
+ return this.version;
+ }
+
+ public int versionAsInteger() {
+ try {
+ int i = 0;
+ String sVersion = "";
+ for (; i < version.length(); i++) {
+ if (!Character.isDigit(version.charAt(i)) && version.charAt(i) != '.') {
+ break;
+ }
+ if (version.charAt(i) != '.') {
+ sVersion += version.charAt(i);
+ }
+ }
+ if (i == 0) {
+ return -1;
+ }
+ return Integer.parseInt(sVersion);
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public int versionUpdatePack() {
+ try {
+ int i = 0;
+ String sVersion = "";
+ for (; i < version.length(); i++) {
+ if (!Character.isDigit(version.charAt(i)) && version.charAt(i) != '.') {
+ break;
+ }
+ if (version.charAt(i) != '.') {
+ sVersion += version.charAt(i);
+ }
+ }
+ if (i == 0) {
+ return -1;
+ }
+ Integer.parseInt(sVersion);
+ int from;
+ if (version.charAt(i) == '_') {
+ // 1.7.0_4
+ from = ++i;
+ } else if (version.charAt(i) == '-' && version.charAt(i + 1) == 'u') {
+ // 1.7.0-u2-b21
+ i = i + 2;
+ from = i;
+ } else {
+ return -1;
+ }
+ for (; i < version.length(); i++) {
+ if (!Character.isDigit(version.charAt(i)) && version.charAt(i) != '.') {
+ break;
+ }
+ }
+ if (from == i) {
+ return -1;
+ }
+ return Integer.parseInt(version.substring(from, i));
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public String vmName() {
+ return vmName;
+ }
+
+ public String getVmName() {
+ return vmName;
+ }
+
+ public String vmVersion() {
+ return vmVersion;
+ }
+
+ public String getVmVersion() {
+ return vmVersion;
+ }
+
+ public String vmVendor() {
+ return vmVendor;
+ }
+
+ public String getVmVendor() {
+ return vmVendor;
+ }
+
+ public long startTime() {
+ return startTime;
+ }
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ public Mem mem() {
+ return mem;
+ }
+
+ public Mem getMem() {
+ return mem();
+ }
+
+ public String[] inputArguments() {
+ return inputArguments;
+ }
+
+ public String[] getInputArguments() {
+ return inputArguments;
+ }
+
+ public String bootClassPath() {
+ return bootClassPath;
+ }
+
+ public String getBootClassPath() {
+ return bootClassPath;
+ }
+
+ public String classPath() {
+ return classPath;
+ }
+
+ public String getClassPath() {
+ return classPath;
+ }
+
+ public Map<String, String> systemProperties() {
+ return systemProperties;
+ }
+
+ public Map<String, String> getSystemProperties() {
+ return systemProperties;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.JVM);
+ builder.field(Fields.PID, pid);
+ builder.field(Fields.VERSION, version);
+ builder.field(Fields.VM_NAME, vmName);
+ builder.field(Fields.VM_VERSION, vmVersion);
+ builder.field(Fields.VM_VENDOR, vmVendor);
+ builder.field(Fields.START_TIME, startTime);
+
+ builder.startObject(Fields.MEM);
+ builder.byteSizeField(Fields.HEAP_INIT_IN_BYTES, Fields.HEAP_INIT, mem.heapInit);
+ builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, mem.heapMax);
+ builder.byteSizeField(Fields.NON_HEAP_INIT_IN_BYTES, Fields.NON_HEAP_INIT, mem.nonHeapInit);
+ builder.byteSizeField(Fields.NON_HEAP_MAX_IN_BYTES, Fields.NON_HEAP_MAX, mem.nonHeapMax);
+ builder.byteSizeField(Fields.DIRECT_MAX_IN_BYTES, Fields.DIRECT_MAX, mem.directMemoryMax);
+ builder.endObject();
+
+ builder.field(Fields.GC_COLLECTORS, gcCollectors);
+ builder.field(Fields.MEMORY_POOLS, memoryPools);
+
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString JVM = new XContentBuilderString("jvm");
+ static final XContentBuilderString PID = new XContentBuilderString("pid");
+ static final XContentBuilderString VERSION = new XContentBuilderString("version");
+ static final XContentBuilderString VM_NAME = new XContentBuilderString("vm_name");
+ static final XContentBuilderString VM_VERSION = new XContentBuilderString("vm_version");
+ static final XContentBuilderString VM_VENDOR = new XContentBuilderString("vm_vendor");
+ static final XContentBuilderString START_TIME = new XContentBuilderString("start_time");
+
+ static final XContentBuilderString MEM = new XContentBuilderString("mem");
+ static final XContentBuilderString HEAP_INIT = new XContentBuilderString("heap_init");
+ static final XContentBuilderString HEAP_INIT_IN_BYTES = new XContentBuilderString("heap_init_in_bytes");
+ static final XContentBuilderString HEAP_MAX = new XContentBuilderString("heap_max");
+ static final XContentBuilderString HEAP_MAX_IN_BYTES = new XContentBuilderString("heap_max_in_bytes");
+ static final XContentBuilderString NON_HEAP_INIT = new XContentBuilderString("non_heap_init");
+ static final XContentBuilderString NON_HEAP_INIT_IN_BYTES = new XContentBuilderString("non_heap_init_in_bytes");
+ static final XContentBuilderString NON_HEAP_MAX = new XContentBuilderString("non_heap_max");
+ static final XContentBuilderString NON_HEAP_MAX_IN_BYTES = new XContentBuilderString("non_heap_max_in_bytes");
+ static final XContentBuilderString DIRECT_MAX = new XContentBuilderString("direct_max");
+ static final XContentBuilderString DIRECT_MAX_IN_BYTES = new XContentBuilderString("direct_max_in_bytes");
+ static final XContentBuilderString GC_COLLECTORS = new XContentBuilderString("gc_collectors");
+ static final XContentBuilderString MEMORY_POOLS = new XContentBuilderString("memory_pools");
+ }
+
+ public static JvmInfo readJvmInfo(StreamInput in) throws IOException {
+ JvmInfo jvmInfo = new JvmInfo();
+ jvmInfo.readFrom(in);
+ return jvmInfo;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ pid = in.readLong();
+ version = in.readString();
+ vmName = in.readString();
+ vmVersion = in.readString();
+ vmVendor = in.readString();
+ startTime = in.readLong();
+ inputArguments = new String[in.readInt()];
+ for (int i = 0; i < inputArguments.length; i++) {
+ inputArguments[i] = in.readString();
+ }
+ bootClassPath = in.readString();
+ classPath = in.readString();
+ systemProperties = new HashMap<String, String>();
+ int size = in.readInt();
+ for (int i = 0; i < size; i++) {
+ systemProperties.put(in.readString(), in.readString());
+ }
+ mem = new Mem();
+ mem.readFrom(in);
+ gcCollectors = in.readStringArray();
+ memoryPools = in.readStringArray();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(pid);
+ out.writeString(version);
+ out.writeString(vmName);
+ out.writeString(vmVersion);
+ out.writeString(vmVendor);
+ out.writeLong(startTime);
+ out.writeInt(inputArguments.length);
+ for (String inputArgument : inputArguments) {
+ out.writeString(inputArgument);
+ }
+ out.writeString(bootClassPath);
+ out.writeString(classPath);
+ out.writeInt(systemProperties.size());
+ for (Map.Entry<String, String> entry : systemProperties.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeString(entry.getValue());
+ }
+ mem.writeTo(out);
+ out.writeStringArray(gcCollectors);
+ out.writeStringArray(memoryPools);
+ }
+
+ public static class Mem implements Streamable, Serializable {
+
+ long heapInit = 0;
+ long heapMax = 0;
+ long nonHeapInit = 0;
+ long nonHeapMax = 0;
+ long directMemoryMax = 0;
+
+ Mem() {
+ }
+
+ public ByteSizeValue heapInit() {
+ return new ByteSizeValue(heapInit);
+ }
+
+ public ByteSizeValue getHeapInit() {
+ return heapInit();
+ }
+
+ public ByteSizeValue heapMax() {
+ return new ByteSizeValue(heapMax);
+ }
+
+ public ByteSizeValue getHeapMax() {
+ return heapMax();
+ }
+
+ public ByteSizeValue nonHeapInit() {
+ return new ByteSizeValue(nonHeapInit);
+ }
+
+ public ByteSizeValue getNonHeapInit() {
+ return nonHeapInit();
+ }
+
+ public ByteSizeValue nonHeapMax() {
+ return new ByteSizeValue(nonHeapMax);
+ }
+
+ public ByteSizeValue getNonHeapMax() {
+ return nonHeapMax();
+ }
+
+ public ByteSizeValue directMemoryMax() {
+ return new ByteSizeValue(directMemoryMax);
+ }
+
+ public ByteSizeValue getDirectMemoryMax() {
+ return directMemoryMax();
+ }
+
+ public static Mem readMem(StreamInput in) throws IOException {
+ Mem mem = new Mem();
+ mem.readFrom(in);
+ return mem;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ heapInit = in.readVLong();
+ heapMax = in.readVLong();
+ nonHeapInit = in.readVLong();
+ nonHeapMax = in.readVLong();
+ directMemoryMax = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(heapInit);
+ out.writeVLong(heapMax);
+ out.writeVLong(nonHeapInit);
+ out.writeVLong(nonHeapMax);
+ out.writeVLong(directMemoryMax);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java b/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java
new file mode 100644
index 0000000..7214f0b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.jvm;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.monitor.dump.DumpGenerator;
+import org.elasticsearch.monitor.dump.DumpMonitorService;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ScheduledFuture;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.elasticsearch.monitor.dump.summary.SummaryDumpContributor.SUMMARY;
+import static org.elasticsearch.monitor.dump.thread.ThreadDumpContributor.THREAD_DUMP;
+import static org.elasticsearch.monitor.jvm.DeadlockAnalyzer.deadlockAnalyzer;
+import static org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector;
+import static org.elasticsearch.monitor.jvm.JvmStats.jvmStats;
+
+/**
+ *
+ */
+public class JvmMonitorService extends AbstractLifecycleComponent<JvmMonitorService> {
+
+ private final ThreadPool threadPool;
+
+ private final DumpMonitorService dumpMonitorService;
+
+ private final boolean enabled;
+
+ private final TimeValue interval;
+
+ private final ImmutableMap<String, GcThreshold> gcThresholds;
+
+ private volatile ScheduledFuture scheduledFuture;
+
+ static class GcThreshold {
+ public final String name;
+ public final long warnThreshold;
+ public final long infoThreshold;
+ public final long debugThreshold;
+
+ GcThreshold(String name, long warnThreshold, long infoThreshold, long debugThreshold) {
+ this.name = name;
+ this.warnThreshold = warnThreshold;
+ this.infoThreshold = infoThreshold;
+ this.debugThreshold = debugThreshold;
+ }
+
+ @Override
+ public String toString() {
+ return "GcThreshold{" +
+ "name='" + name + '\'' +
+ ", warnThreshold=" + warnThreshold +
+ ", infoThreshold=" + infoThreshold +
+ ", debugThreshold=" + debugThreshold +
+ '}';
+ }
+ }
+
+ @Inject
+ public JvmMonitorService(Settings settings, ThreadPool threadPool, DumpMonitorService dumpMonitorService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.dumpMonitorService = dumpMonitorService;
+
+ this.enabled = componentSettings.getAsBoolean("enabled", true);
+ this.interval = componentSettings.getAsTime("interval", timeValueSeconds(1));
+
+ MapBuilder<String, GcThreshold> gcThresholds = MapBuilder.newMapBuilder();
+ Map<String, Settings> gcThresholdGroups = componentSettings.getGroups("gc");
+ for (Map.Entry<String, Settings> entry : gcThresholdGroups.entrySet()) {
+ String name = entry.getKey();
+ TimeValue warn = entry.getValue().getAsTime("warn", null);
+ TimeValue info = entry.getValue().getAsTime("info", null);
+ TimeValue debug = entry.getValue().getAsTime("debug", null);
+ if (warn == null || info == null || debug == null) {
+ logger.warn("ignoring gc_threshold for [{}], missing warn/info/debug values", name);
+ } else {
+ gcThresholds.put(name, new GcThreshold(name, warn.millis(), info.millis(), debug.millis()));
+ }
+ }
+ if (!gcThresholds.containsKey(GcNames.YOUNG)) {
+ gcThresholds.put(GcNames.YOUNG, new GcThreshold(GcNames.YOUNG, 1000, 700, 400));
+ }
+ if (!gcThresholds.containsKey(GcNames.OLD)) {
+ gcThresholds.put(GcNames.OLD, new GcThreshold(GcNames.OLD, 10000, 5000, 2000));
+ }
+ if (!gcThresholds.containsKey("default")) {
+ gcThresholds.put("default", new GcThreshold("default", 10000, 5000, 2000));
+ }
+
+ this.gcThresholds = gcThresholds.immutableMap();
+
+ logger.debug("enabled [{}], last_gc_enabled [{}], interval [{}], gc_threshold [{}]", enabled, JvmStats.isLastGcEnabled(), interval, this.gcThresholds);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ if (!enabled) {
+ return;
+ }
+ scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(), interval);
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ if (!enabled) {
+ return;
+ }
+ scheduledFuture.cancel(true);
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ private class JvmMonitor implements Runnable {
+
+ private JvmStats lastJvmStats = jvmStats();
+
+ private long seq = 0;
+
+ private final Set<DeadlockAnalyzer.Deadlock> lastSeenDeadlocks = new HashSet<DeadlockAnalyzer.Deadlock>();
+
+ public JvmMonitor() {
+ }
+
+ @Override
+ public void run() {
+ try {
+// monitorDeadlock();
+ monitorLongGc();
+ } catch (Throwable t) {
+ logger.debug("failed to monitor", t);
+ }
+ }
+
+ private synchronized void monitorLongGc() {
+ seq++;
+ JvmStats currentJvmStats = jvmStats();
+
+ for (int i = 0; i < currentJvmStats.gc().collectors().length; i++) {
+ GarbageCollector gc = currentJvmStats.gc().collectors()[i];
+ GarbageCollector prevGc = lastJvmStats.gc.collectors[i];
+
+ // no collection has happened
+ long collections = gc.collectionCount - prevGc.collectionCount;
+ if (collections == 0) {
+ continue;
+ }
+ long collectionTime = gc.collectionTime - prevGc.collectionTime;
+ if (collectionTime == 0) {
+ continue;
+ }
+
+ GcThreshold gcThreshold = gcThresholds.get(gc.name());
+ if (gcThreshold == null) {
+ gcThreshold = gcThresholds.get("default");
+ }
+
+ if (gc.lastGc() != null && prevGc.lastGc() != null) {
+ GarbageCollector.LastGc lastGc = gc.lastGc();
+ if (lastGc.startTime == prevGc.lastGc().startTime()) {
+ // we already handled this one...
+ continue;
+ }
+ // Ignore any duration > 1hr; getLastGcInfo occasionally returns total crap
+ if (lastGc.duration().hoursFrac() > 1) {
+ continue;
+ }
+ if (lastGc.duration().millis() > gcThreshold.warnThreshold) {
+ logger.warn("[last_gc][{}][{}][{}] duration [{}], collections [{}], total [{}]/[{}], reclaimed [{}], leaving [{}][{}]/[{}]",
+ gc.name(), seq, gc.getCollectionCount(), lastGc.duration(), collections, TimeValue.timeValueMillis(collectionTime), gc.collectionTime(), lastGc.reclaimed(), lastGc.afterUsed(), lastGc.max());
+ } else if (lastGc.duration().millis() > gcThreshold.infoThreshold) {
+ logger.info("[last_gc][{}][{}][{}] duration [{}], collections [{}], total [{}]/[{}], reclaimed [{}], leaving [{}]/[{}]",
+ gc.name(), seq, gc.getCollectionCount(), lastGc.duration(), collections, TimeValue.timeValueMillis(collectionTime), gc.collectionTime(), lastGc.reclaimed(), lastGc.afterUsed(), lastGc.max());
+ } else if (lastGc.duration().millis() > gcThreshold.debugThreshold && logger.isDebugEnabled()) {
+ logger.debug("[last_gc][{}][{}][{}] duration [{}], collections [{}], total [{}]/[{}], reclaimed [{}], leaving [{}]/[{}]",
+ gc.name(), seq, gc.getCollectionCount(), lastGc.duration(), collections, TimeValue.timeValueMillis(collectionTime), gc.collectionTime(), lastGc.reclaimed(), lastGc.afterUsed(), lastGc.max());
+ }
+ }
+
+ long avgCollectionTime = collectionTime / collections;
+
+ if (avgCollectionTime > gcThreshold.warnThreshold) {
+ logger.warn("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
+ gc.name(), seq, gc.collectionCount(), TimeValue.timeValueMillis(collectionTime), collections, TimeValue.timeValueMillis(currentJvmStats.timestamp() - lastJvmStats.timestamp()), TimeValue.timeValueMillis(collectionTime), gc.collectionTime(), lastJvmStats.mem().heapUsed(), currentJvmStats.mem().heapUsed(), JvmInfo.jvmInfo().mem().heapMax(), buildPools(lastJvmStats, currentJvmStats));
+ } else if (avgCollectionTime > gcThreshold.infoThreshold) {
+ logger.info("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
+ gc.name(), seq, gc.collectionCount(), TimeValue.timeValueMillis(collectionTime), collections, TimeValue.timeValueMillis(currentJvmStats.timestamp() - lastJvmStats.timestamp()), TimeValue.timeValueMillis(collectionTime), gc.collectionTime(), lastJvmStats.mem().heapUsed(), currentJvmStats.mem().heapUsed(), JvmInfo.jvmInfo().mem().heapMax(), buildPools(lastJvmStats, currentJvmStats));
+ } else if (avgCollectionTime > gcThreshold.debugThreshold && logger.isDebugEnabled()) {
+ logger.debug("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
+ gc.name(), seq, gc.collectionCount(), TimeValue.timeValueMillis(collectionTime), collections, TimeValue.timeValueMillis(currentJvmStats.timestamp() - lastJvmStats.timestamp()), TimeValue.timeValueMillis(collectionTime), gc.collectionTime(), lastJvmStats.mem().heapUsed(), currentJvmStats.mem().heapUsed(), JvmInfo.jvmInfo().mem().heapMax(), buildPools(lastJvmStats, currentJvmStats));
+ }
+ }
+ lastJvmStats = currentJvmStats;
+ }
+
+ private String buildPools(JvmStats prev, JvmStats current) {
+ StringBuilder sb = new StringBuilder();
+ for (JvmStats.MemoryPool currentPool : current.mem()) {
+ JvmStats.MemoryPool prevPool = null;
+ for (JvmStats.MemoryPool pool : prev.mem()) {
+ if (pool.getName().equals(currentPool.getName())) {
+ prevPool = pool;
+ break;
+ }
+ }
+ sb.append("{[").append(currentPool.name())
+ .append("] [").append(prevPool == null ? "?" : prevPool.used()).append("]->[").append(currentPool.used()).append("]/[").append(currentPool.getMax()).append("]}");
+ }
+ return sb.toString();
+ }
+
+ private void monitorDeadlock() {
+ DeadlockAnalyzer.Deadlock[] deadlocks = deadlockAnalyzer().findDeadlocks();
+ if (deadlocks != null && deadlocks.length > 0) {
+ ImmutableSet<DeadlockAnalyzer.Deadlock> asSet = new ImmutableSet.Builder<DeadlockAnalyzer.Deadlock>().add(deadlocks).build();
+ if (!asSet.equals(lastSeenDeadlocks)) {
+ DumpGenerator.Result genResult = dumpMonitorService.generateDump("deadlock", null, SUMMARY, THREAD_DUMP);
+ StringBuilder sb = new StringBuilder("Detected Deadlock(s)");
+ for (DeadlockAnalyzer.Deadlock deadlock : asSet) {
+ sb.append("\n ----> ").append(deadlock);
+ }
+ sb.append("\nDump generated [").append(genResult.location()).append("]");
+ logger.error(sb.toString());
+ lastSeenDeadlocks.clear();
+ lastSeenDeadlocks.addAll(asSet);
+ }
+ } else {
+ lastSeenDeadlocks.clear();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java b/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java
new file mode 100644
index 0000000..b182073
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.jvm;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class JvmService extends AbstractComponent {
+
+ private final JvmInfo jvmInfo;
+
+ private final TimeValue refreshInterval;
+
+ private JvmStats jvmStats;
+
+ @Inject
+ public JvmService(Settings settings) {
+ super(settings);
+ this.jvmInfo = JvmInfo.jvmInfo();
+ this.jvmStats = JvmStats.jvmStats();
+
+ this.refreshInterval = componentSettings.getAsTime("refresh_interval", TimeValue.timeValueSeconds(1));
+
+ logger.debug("Using refresh_interval [{}]", refreshInterval);
+ }
+
+ public JvmInfo info() {
+ return this.jvmInfo;
+ }
+
+ public synchronized JvmStats stats() {
+ if ((System.currentTimeMillis() - jvmStats.timestamp()) > refreshInterval.millis()) {
+ jvmStats = JvmStats.jvmStats();
+ }
+ return jvmStats;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java
new file mode 100644
index 0000000..e53404b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java
@@ -0,0 +1,981 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.jvm;
+
+import com.google.common.collect.Iterators;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.lang.management.*;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class JvmStats implements Streamable, Serializable, ToXContent {
+
+ private static boolean enableLastGc;
+
+ public static boolean isLastGcEnabled() {
+ return enableLastGc;
+ }
+
+ private final static RuntimeMXBean runtimeMXBean;
+ private final static MemoryMXBean memoryMXBean;
+ private final static ThreadMXBean threadMXBean;
+
+ private static Method managementFactoryPlatformMXBeansMethod;
+
+ private static Method getLastGcInfoMethod;
+ private static Method getMemoryUsageBeforeGcMethod;
+ private static Method getMemoryUsageAfterGcMethod;
+ private static Method getStartTimeMethod;
+ private static Method getEndTimeMethod;
+ private static Method getDurationMethod;
+
+ private static boolean bufferPoolsEnabled;
+ private static Class bufferPoolMXBeanClass;
+ private static Method bufferPoolMXBeanNameMethod;
+ private static Method bufferPoolMXBeanCountMethod;
+ private static Method bufferPoolMXBeanTotalCapacityMethod;
+ private static Method bufferPoolMXBeanMemoryUsedMethod;
+
+ static {
+ runtimeMXBean = ManagementFactory.getRuntimeMXBean();
+ memoryMXBean = ManagementFactory.getMemoryMXBean();
+ threadMXBean = ManagementFactory.getThreadMXBean();
+
+ try {
+ managementFactoryPlatformMXBeansMethod = ManagementFactory.class.getMethod("getPlatformMXBeans", Class.class);
+ } catch (Throwable e) {
+ managementFactoryPlatformMXBeansMethod = null;
+ }
+
+
+ try {
+ bufferPoolMXBeanClass = Class.forName("java.lang.management.BufferPoolMXBean");
+ bufferPoolMXBeanNameMethod = bufferPoolMXBeanClass.getMethod("getName");
+ bufferPoolMXBeanCountMethod = bufferPoolMXBeanClass.getMethod("getCount");
+ bufferPoolMXBeanTotalCapacityMethod = bufferPoolMXBeanClass.getMethod("getTotalCapacity");
+ bufferPoolMXBeanMemoryUsedMethod = bufferPoolMXBeanClass.getMethod("getMemoryUsed");
+ bufferPoolsEnabled = true;
+ } catch (Throwable t) {
+ bufferPoolsEnabled = false;
+ }
+
+ JvmInfo info = JvmInfo.jvmInfo();
+ boolean defaultEnableLastGc = false;
+ if (info.versionAsInteger() == 170) {
+ defaultEnableLastGc = info.versionUpdatePack() >= 4;
+ } else if (info.versionAsInteger() > 170) {
+ defaultEnableLastGc = true;
+ }
+ // always disable lastG, some reports it gives are strange...
+ defaultEnableLastGc = false;
+
+ boolean enableLastGc = Booleans.parseBoolean(System.getProperty("monitor.jvm.enable_last_gc"), defaultEnableLastGc);
+ if (enableLastGc) {
+ try {
+ Class sunGcClass = Class.forName("com.sun.management.GarbageCollectorMXBean");
+ Class gcInfoClass = Class.forName("com.sun.management.GcInfo");
+
+ getLastGcInfoMethod = sunGcClass.getDeclaredMethod("getLastGcInfo");
+ getLastGcInfoMethod.setAccessible(true);
+
+ getMemoryUsageBeforeGcMethod = gcInfoClass.getDeclaredMethod("getMemoryUsageBeforeGc");
+ getMemoryUsageBeforeGcMethod.setAccessible(true);
+ getMemoryUsageAfterGcMethod = gcInfoClass.getDeclaredMethod("getMemoryUsageAfterGc");
+ getMemoryUsageAfterGcMethod.setAccessible(true);
+ getStartTimeMethod = gcInfoClass.getDeclaredMethod("getStartTime");
+ getStartTimeMethod.setAccessible(true);
+ getEndTimeMethod = gcInfoClass.getDeclaredMethod("getEndTime");
+ getEndTimeMethod.setAccessible(true);
+ getDurationMethod = gcInfoClass.getDeclaredMethod("getDuration");
+ getDurationMethod.setAccessible(true);
+
+ } catch (Throwable ex) {
+ enableLastGc = false;
+ }
+ }
+
+ JvmStats.enableLastGc = enableLastGc;
+ }
+
+ public static JvmStats jvmStats() {
+ JvmStats stats = new JvmStats(System.currentTimeMillis(), runtimeMXBean.getUptime());
+ stats.mem = new Mem();
+ MemoryUsage memUsage = memoryMXBean.getHeapMemoryUsage();
+ stats.mem.heapUsed = memUsage.getUsed() < 0 ? 0 : memUsage.getUsed();
+ stats.mem.heapCommitted = memUsage.getCommitted() < 0 ? 0 : memUsage.getCommitted();
+ stats.mem.heapMax = memUsage.getMax() < 0 ? 0 : memUsage.getMax();
+ memUsage = memoryMXBean.getNonHeapMemoryUsage();
+ stats.mem.nonHeapUsed = memUsage.getUsed() < 0 ? 0 : memUsage.getUsed();
+ stats.mem.nonHeapCommitted = memUsage.getCommitted() < 0 ? 0 : memUsage.getCommitted();
+
+ List<MemoryPoolMXBean> memoryPoolMXBeans = ManagementFactory.getMemoryPoolMXBeans();
+ List<MemoryPool> pools = new ArrayList<MemoryPool>();
+ for (int i = 0; i < memoryPoolMXBeans.size(); i++) {
+ try {
+ MemoryPoolMXBean memoryPoolMXBean = memoryPoolMXBeans.get(i);
+ MemoryUsage usage = memoryPoolMXBean.getUsage();
+ MemoryUsage peakUsage = memoryPoolMXBean.getPeakUsage();
+ String name = GcNames.getByMemoryPoolName(memoryPoolMXBean.getName(), null);
+ if (name == null) { // if we can't resolve it, its not interesting.... (Per Gen, Code Cache)
+ continue;
+ }
+ pools.add(new MemoryPool(name,
+ usage.getUsed() < 0 ? 0 : usage.getUsed(),
+ usage.getMax() < 0 ? 0 : usage.getMax(),
+ peakUsage.getUsed() < 0 ? 0 : peakUsage.getUsed(),
+ peakUsage.getMax() < 0 ? 0 : peakUsage.getMax()
+ ));
+ } catch (OutOfMemoryError err) {
+ throw err; // rethrow
+ } catch (Throwable ex) {
+ /* ignore some JVMs might barf here with:
+ * java.lang.InternalError: Memory Pool not found
+ * we just omit the pool in that case!*/
+ }
+ }
+ stats.mem.pools = pools.toArray(new MemoryPool[pools.size()]);
+
+ stats.threads = new Threads();
+ stats.threads.count = threadMXBean.getThreadCount();
+ stats.threads.peakCount = threadMXBean.getPeakThreadCount();
+
+ List<GarbageCollectorMXBean> gcMxBeans = ManagementFactory.getGarbageCollectorMXBeans();
+ stats.gc = new GarbageCollectors();
+ stats.gc.collectors = new GarbageCollector[gcMxBeans.size()];
+ for (int i = 0; i < stats.gc.collectors.length; i++) {
+ GarbageCollectorMXBean gcMxBean = gcMxBeans.get(i);
+ stats.gc.collectors[i] = new GarbageCollector();
+ stats.gc.collectors[i].name = GcNames.getByGcName(gcMxBean.getName(), gcMxBean.getName());
+ stats.gc.collectors[i].collectionCount = gcMxBean.getCollectionCount();
+ stats.gc.collectors[i].collectionTime = gcMxBean.getCollectionTime();
+ if (enableLastGc) {
+ try {
+ Object lastGcInfo = getLastGcInfoMethod.invoke(gcMxBean);
+ if (lastGcInfo != null) {
+ Map<String, MemoryUsage> usageBeforeGc = (Map<String, MemoryUsage>) getMemoryUsageBeforeGcMethod.invoke(lastGcInfo);
+ Map<String, MemoryUsage> usageAfterGc = (Map<String, MemoryUsage>) getMemoryUsageAfterGcMethod.invoke(lastGcInfo);
+ long startTime = (Long) getStartTimeMethod.invoke(lastGcInfo);
+ long endTime = (Long) getEndTimeMethod.invoke(lastGcInfo);
+ long duration = (Long) getDurationMethod.invoke(lastGcInfo);
+
+ long previousMemoryUsed = 0;
+ long memoryUsed = 0;
+ long memoryMax = 0;
+ for (Map.Entry<String, MemoryUsage> entry : usageBeforeGc.entrySet()) {
+ previousMemoryUsed += entry.getValue().getUsed();
+ }
+ for (Map.Entry<String, MemoryUsage> entry : usageAfterGc.entrySet()) {
+ MemoryUsage mu = entry.getValue();
+ memoryUsed += mu.getUsed();
+ memoryMax += mu.getMax();
+ }
+
+ stats.gc.collectors[i].lastGc = new GarbageCollector.LastGc(startTime, endTime, memoryMax, previousMemoryUsed, memoryUsed, duration);
+ }
+ } catch (Exception e) {
+// e.printStackTrace();
+ }
+ }
+ }
+
+
+ if (bufferPoolsEnabled) {
+ try {
+ List bufferPools = (List) managementFactoryPlatformMXBeansMethod.invoke(null, bufferPoolMXBeanClass);
+ stats.bufferPools = new ArrayList<BufferPool>(bufferPools.size());
+ for (Object bufferPool : bufferPools) {
+ String name = (String) bufferPoolMXBeanNameMethod.invoke(bufferPool);
+ Long count = (Long) bufferPoolMXBeanCountMethod.invoke(bufferPool);
+ Long totalCapacity = (Long) bufferPoolMXBeanTotalCapacityMethod.invoke(bufferPool);
+ Long memoryUsed = (Long) bufferPoolMXBeanMemoryUsedMethod.invoke(bufferPool);
+ stats.bufferPools.add(new BufferPool(name, count, totalCapacity, memoryUsed));
+ }
+ } catch (Throwable t) {
+ //t.printStackTrace();
+ }
+ }
+
+ return stats;
+ }
+
+ long timestamp = -1;
+
+ long uptime;
+
+ Mem mem;
+
+ Threads threads;
+
+ GarbageCollectors gc;
+
+ List<BufferPool> bufferPools;
+
+ private JvmStats() {
+ }
+
+ public JvmStats(long timestamp, long uptime) {
+ this.timestamp = timestamp;
+ this.uptime = uptime;
+ }
+
+ public long timestamp() {
+ return timestamp;
+ }
+
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ public TimeValue uptime() {
+ return new TimeValue(uptime);
+ }
+
+ public TimeValue getUptime() {
+ return uptime();
+ }
+
+ public Mem mem() {
+ return this.mem;
+ }
+
+ public Mem getMem() {
+ return mem();
+ }
+
+ public Threads threads() {
+ return threads;
+ }
+
+ public Threads getThreads() {
+ return threads();
+ }
+
+ public GarbageCollectors gc() {
+ return gc;
+ }
+
+ public GarbageCollectors getGc() {
+ return gc();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.JVM);
+ builder.field(Fields.TIMESTAMP, timestamp);
+ builder.timeValueField(Fields.UPTIME_IN_MILLIS, Fields.UPTIME, uptime);
+ if (mem != null) {
+ builder.startObject(Fields.MEM);
+
+ builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, mem.heapUsed);
+ if (mem.heapUsedPercent() >= 0) {
+ builder.field(Fields.HEAP_USED_PERCENT, mem.heapUsedPercent());
+ }
+ builder.byteSizeField(Fields.HEAP_COMMITTED_IN_BYTES, Fields.HEAP_COMMITTED, mem.heapCommitted);
+ builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, mem.heapMax);
+ builder.byteSizeField(Fields.NON_HEAP_USED_IN_BYTES, Fields.NON_HEAP_USED, mem.nonHeapUsed);
+ builder.byteSizeField(Fields.NON_HEAP_COMMITTED_IN_BYTES, Fields.NON_HEAP_COMMITTED, mem.nonHeapCommitted);
+
+ builder.startObject(Fields.POOLS);
+ for (MemoryPool pool : mem) {
+ builder.startObject(pool.name(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, pool.used);
+ builder.byteSizeField(Fields.MAX_IN_BYTES, Fields.MAX, pool.max);
+
+ builder.byteSizeField(Fields.PEAK_USED_IN_BYTES, Fields.PEAK_USED, pool.peakUsed);
+ builder.byteSizeField(Fields.PEAK_MAX_IN_BYTES, Fields.PEAK_MAX, pool.peakMax);
+
+ builder.endObject();
+ }
+ builder.endObject();
+
+ builder.endObject();
+ }
+ if (threads != null) {
+ builder.startObject(Fields.THREADS);
+ builder.field(Fields.COUNT, threads.count());
+ builder.field(Fields.PEAK_COUNT, threads.peakCount());
+ builder.endObject();
+ }
+ if (gc != null) {
+ builder.startObject(Fields.GC);
+
+ builder.startObject(Fields.COLLECTORS);
+ for (GarbageCollector collector : gc) {
+ builder.startObject(collector.name(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field(Fields.COLLECTION_COUNT, collector.collectionCount());
+ builder.timeValueField(Fields.COLLECTION_TIME_IN_MILLIS, Fields.COLLECTION_TIME, collector.collectionTime);
+ builder.endObject();
+ }
+ builder.endObject();
+
+ builder.endObject();
+ }
+
+ if (bufferPools != null) {
+ builder.startObject(Fields.BUFFER_POOLS);
+ for (BufferPool bufferPool : bufferPools) {
+ builder.startObject(bufferPool.name(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field(Fields.COUNT, bufferPool.count());
+ builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, bufferPool.used);
+ builder.byteSizeField(Fields.TOTAL_CAPACITY_IN_BYTES, Fields.TOTAL_CAPACITY, bufferPool.totalCapacity);
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString JVM = new XContentBuilderString("jvm");
+ static final XContentBuilderString TIMESTAMP = new XContentBuilderString("timestamp");
+ static final XContentBuilderString UPTIME = new XContentBuilderString("uptime");
+ static final XContentBuilderString UPTIME_IN_MILLIS = new XContentBuilderString("uptime_in_millis");
+
+ static final XContentBuilderString MEM = new XContentBuilderString("mem");
+ static final XContentBuilderString HEAP_USED = new XContentBuilderString("heap_used");
+ static final XContentBuilderString HEAP_USED_IN_BYTES = new XContentBuilderString("heap_used_in_bytes");
+ static final XContentBuilderString HEAP_USED_PERCENT = new XContentBuilderString("heap_used_percent");
+ static final XContentBuilderString HEAP_MAX = new XContentBuilderString("heap_max");
+ static final XContentBuilderString HEAP_MAX_IN_BYTES = new XContentBuilderString("heap_max_in_bytes");
+ static final XContentBuilderString HEAP_COMMITTED = new XContentBuilderString("heap_committed");
+ static final XContentBuilderString HEAP_COMMITTED_IN_BYTES = new XContentBuilderString("heap_committed_in_bytes");
+
+ static final XContentBuilderString NON_HEAP_USED = new XContentBuilderString("non_heap_used");
+ static final XContentBuilderString NON_HEAP_USED_IN_BYTES = new XContentBuilderString("non_heap_used_in_bytes");
+ static final XContentBuilderString NON_HEAP_COMMITTED = new XContentBuilderString("non_heap_committed");
+ static final XContentBuilderString NON_HEAP_COMMITTED_IN_BYTES = new XContentBuilderString("non_heap_committed_in_bytes");
+
+ static final XContentBuilderString POOLS = new XContentBuilderString("pools");
+ static final XContentBuilderString USED = new XContentBuilderString("used");
+ static final XContentBuilderString USED_IN_BYTES = new XContentBuilderString("used_in_bytes");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ static final XContentBuilderString MAX_IN_BYTES = new XContentBuilderString("max_in_bytes");
+ static final XContentBuilderString PEAK_USED = new XContentBuilderString("peak_used");
+ static final XContentBuilderString PEAK_USED_IN_BYTES = new XContentBuilderString("peak_used_in_bytes");
+ static final XContentBuilderString PEAK_MAX = new XContentBuilderString("peak_max");
+ static final XContentBuilderString PEAK_MAX_IN_BYTES = new XContentBuilderString("peak_max_in_bytes");
+
+ static final XContentBuilderString THREADS = new XContentBuilderString("threads");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString PEAK_COUNT = new XContentBuilderString("peak_count");
+
+ static final XContentBuilderString GC = new XContentBuilderString("gc");
+ static final XContentBuilderString COLLECTORS = new XContentBuilderString("collectors");
+ static final XContentBuilderString COLLECTION_COUNT = new XContentBuilderString("collection_count");
+ static final XContentBuilderString COLLECTION_TIME = new XContentBuilderString("collection_time");
+ static final XContentBuilderString COLLECTION_TIME_IN_MILLIS = new XContentBuilderString("collection_time_in_millis");
+
+ static final XContentBuilderString BUFFER_POOLS = new XContentBuilderString("buffer_pools");
+ static final XContentBuilderString NAME = new XContentBuilderString("name");
+ static final XContentBuilderString TOTAL_CAPACITY = new XContentBuilderString("total_capacity");
+ static final XContentBuilderString TOTAL_CAPACITY_IN_BYTES = new XContentBuilderString("total_capacity_in_bytes");
+ }
+
+
+ public static JvmStats readJvmStats(StreamInput in) throws IOException {
+ JvmStats jvmStats = new JvmStats();
+ jvmStats.readFrom(in);
+ return jvmStats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ timestamp = in.readVLong();
+ uptime = in.readVLong();
+
+ mem = Mem.readMem(in);
+ threads = Threads.readThreads(in);
+ gc = GarbageCollectors.readGarbageCollectors(in);
+
+ if (in.readBoolean()) {
+ int size = in.readVInt();
+ bufferPools = new ArrayList<BufferPool>(size);
+ for (int i = 0; i < size; i++) {
+ BufferPool bufferPool = new BufferPool();
+ bufferPool.readFrom(in);
+ bufferPools.add(bufferPool);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(timestamp);
+ out.writeVLong(uptime);
+
+ mem.writeTo(out);
+ threads.writeTo(out);
+ gc.writeTo(out);
+
+ if (bufferPools == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(bufferPools.size());
+ for (BufferPool bufferPool : bufferPools) {
+ bufferPool.writeTo(out);
+ }
+ }
+ }
+
+ public static class GarbageCollectors implements Streamable, Serializable, Iterable<GarbageCollector> {
+
+ GarbageCollector[] collectors;
+
+ GarbageCollectors() {
+ }
+
+ public static GarbageCollectors readGarbageCollectors(StreamInput in) throws IOException {
+ GarbageCollectors collectors = new GarbageCollectors();
+ collectors.readFrom(in);
+ return collectors;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ collectors = new GarbageCollector[in.readVInt()];
+ for (int i = 0; i < collectors.length; i++) {
+ collectors[i] = GarbageCollector.readGarbageCollector(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(collectors.length);
+ for (GarbageCollector gc : collectors) {
+ gc.writeTo(out);
+ }
+ }
+
+ public GarbageCollector[] collectors() {
+ return this.collectors;
+ }
+
+ @Override
+ public Iterator<GarbageCollector> iterator() {
+ return Iterators.forArray(collectors);
+ }
+ }
+
+ public static class GarbageCollector implements Streamable, Serializable {
+
+ public static class LastGc implements Streamable {
+
+ long startTime;
+ long endTime;
+ long max;
+ long beforeUsed;
+ long afterUsed;
+ long duration;
+
+ LastGc() {
+ }
+
+ public LastGc(long startTime, long endTime, long max, long beforeUsed, long afterUsed, long duration) {
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.max = max;
+ this.beforeUsed = beforeUsed;
+ this.afterUsed = afterUsed;
+ this.duration = duration;
+ }
+
+ public long startTime() {
+ return this.startTime;
+ }
+
+ public long getStartTime() {
+ return startTime();
+ }
+
+ public long endTime() {
+ return this.endTime;
+ }
+
+ public long getEndTime() {
+ return endTime();
+ }
+
+ public ByteSizeValue max() {
+ return new ByteSizeValue(max);
+ }
+
+ public ByteSizeValue getMax() {
+ return max();
+ }
+
+ public ByteSizeValue afterUsed() {
+ return new ByteSizeValue(afterUsed);
+ }
+
+ public ByteSizeValue getAfterUsed() {
+ return afterUsed();
+ }
+
+ public ByteSizeValue beforeUsed() {
+ return new ByteSizeValue(beforeUsed);
+ }
+
+ public ByteSizeValue getBeforeUsed() {
+ return beforeUsed();
+ }
+
+ public ByteSizeValue reclaimed() {
+ return new ByteSizeValue(beforeUsed - afterUsed);
+ }
+
+ public ByteSizeValue getReclaimed() {
+ return reclaimed();
+ }
+
+ public TimeValue duration() {
+ return new TimeValue(this.duration);
+ }
+
+ public TimeValue getDuration() {
+ return duration();
+ }
+
+ public static LastGc readLastGc(StreamInput in) throws IOException {
+ LastGc lastGc = new LastGc();
+ lastGc.readFrom(in);
+ return lastGc;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ startTime = in.readVLong();
+ endTime = in.readVLong();
+ max = in.readVLong();
+ beforeUsed = in.readVLong();
+ afterUsed = in.readVLong();
+ duration = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(startTime);
+ out.writeVLong(endTime);
+ out.writeVLong(max);
+ out.writeVLong(beforeUsed);
+ out.writeVLong(afterUsed);
+ out.writeVLong(duration);
+ }
+ }
+
+ String name;
+ long collectionCount;
+ long collectionTime;
+ LastGc lastGc;
+
+ GarbageCollector() {
+ }
+
+ public static GarbageCollector readGarbageCollector(StreamInput in) throws IOException {
+ GarbageCollector gc = new GarbageCollector();
+ gc.readFrom(in);
+ return gc;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ collectionCount = in.readVLong();
+ collectionTime = in.readVLong();
+ if (in.readBoolean()) {
+ lastGc = LastGc.readLastGc(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVLong(collectionCount);
+ out.writeVLong(collectionTime);
+ if (lastGc == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ lastGc.writeTo(out);
+ }
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public String getName() {
+ return name();
+ }
+
+ public long collectionCount() {
+ return collectionCount;
+ }
+
+ public long getCollectionCount() {
+ return collectionCount();
+ }
+
+ public TimeValue collectionTime() {
+ return new TimeValue(collectionTime, TimeUnit.MILLISECONDS);
+ }
+
+ public TimeValue getCollectionTime() {
+ return collectionTime();
+ }
+
+ public LastGc lastGc() {
+ return this.lastGc;
+ }
+
+ public LastGc getLastGc() {
+ return lastGc();
+ }
+ }
+
+ public static class Threads implements Streamable, Serializable {
+
+ int count;
+ int peakCount;
+
+ Threads() {
+ }
+
+ public int count() {
+ return count;
+ }
+
+ public int getCount() {
+ return count();
+ }
+
+ public int peakCount() {
+ return peakCount;
+ }
+
+ public int getPeakCount() {
+ return peakCount();
+ }
+
+ public static Threads readThreads(StreamInput in) throws IOException {
+ Threads threads = new Threads();
+ threads.readFrom(in);
+ return threads;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ count = in.readVInt();
+ peakCount = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(count);
+ out.writeVInt(peakCount);
+ }
+ }
+
+ public static class MemoryPool implements Streamable, Serializable {
+
+ String name;
+ long used;
+ long max;
+
+ long peakUsed;
+ long peakMax;
+
+ MemoryPool() {
+
+ }
+
+ public MemoryPool(String name, long used, long max, long peakUsed, long peakMax) {
+ this.name = name;
+ this.used = used;
+ this.max = max;
+ this.peakUsed = peakUsed;
+ this.peakMax = peakMax;
+ }
+
+ public static MemoryPool readMemoryPool(StreamInput in) throws IOException {
+ MemoryPool pool = new MemoryPool();
+ pool.readFrom(in);
+ return pool;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public ByteSizeValue used() {
+ return new ByteSizeValue(used);
+ }
+
+ public ByteSizeValue getUsed() {
+ return used();
+ }
+
+ public ByteSizeValue max() {
+ return new ByteSizeValue(max);
+ }
+
+ public ByteSizeValue getMax() {
+ return max();
+ }
+
+ public ByteSizeValue peakUsed() {
+ return new ByteSizeValue(peakUsed);
+ }
+
+ public ByteSizeValue getPeakUsed() {
+ return peakUsed();
+ }
+
+ public ByteSizeValue peakMax() {
+ return new ByteSizeValue(peakMax);
+ }
+
+ public ByteSizeValue getPeakMax() {
+ return peakMax();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ used = in.readVLong();
+ max = in.readVLong();
+ peakUsed = in.readVLong();
+ peakMax = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVLong(used);
+ out.writeVLong(max);
+ out.writeVLong(peakUsed);
+ out.writeVLong(peakMax);
+ }
+ }
+
+ public static class Mem implements Streamable, Serializable, Iterable<MemoryPool> {
+
+ long heapCommitted;
+ long heapUsed;
+ long heapMax;
+ long nonHeapCommitted;
+ long nonHeapUsed;
+
+ MemoryPool[] pools = new MemoryPool[0];
+
+ Mem() {
+ }
+
+ public static Mem readMem(StreamInput in) throws IOException {
+ Mem mem = new Mem();
+ mem.readFrom(in);
+ return mem;
+ }
+
+ @Override
+ public Iterator<MemoryPool> iterator() {
+ return Iterators.forArray(pools);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ heapCommitted = in.readVLong();
+ heapUsed = in.readVLong();
+ nonHeapCommitted = in.readVLong();
+ nonHeapUsed = in.readVLong();
+ heapMax = in.readVLong();
+ pools = new MemoryPool[in.readVInt()];
+ for (int i = 0; i < pools.length; i++) {
+ pools[i] = MemoryPool.readMemoryPool(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(heapCommitted);
+ out.writeVLong(heapUsed);
+ out.writeVLong(nonHeapCommitted);
+ out.writeVLong(nonHeapUsed);
+ out.writeVLong(heapMax);
+ out.writeVInt(pools.length);
+ for (MemoryPool pool : pools) {
+ pool.writeTo(out);
+ }
+ }
+
+ public ByteSizeValue heapCommitted() {
+ return new ByteSizeValue(heapCommitted);
+ }
+
+ public ByteSizeValue getHeapCommitted() {
+ return heapCommitted();
+ }
+
+ public ByteSizeValue heapUsed() {
+ return new ByteSizeValue(heapUsed);
+ }
+
+ public ByteSizeValue getHeapUsed() {
+ return heapUsed();
+ }
+
+ /**
+ * returns the maximum heap size. 0 bytes signals unknown.
+ */
+ public ByteSizeValue heapMax() {
+ return new ByteSizeValue(heapMax);
+ }
+
+ /**
+ * returns the maximum heap size. 0 bytes signals unknown.
+ */
+ public ByteSizeValue getHeapMax() {
+ return heapMax();
+ }
+
+ /**
+ * returns the heap usage in percent. -1 signals unknown.
+ */
+ public short heapUsedPercent() {
+ if (heapMax == 0) {
+ return -1;
+ }
+ return (short) (heapUsed * 100 / heapMax);
+ }
+
+ /**
+ * returns the heap usage in percent. -1 signals unknown.
+ */
+ public short getHeapUsedPrecent() {
+ return heapUsedPercent();
+ }
+
+ public ByteSizeValue nonHeapCommitted() {
+ return new ByteSizeValue(nonHeapCommitted);
+ }
+
+ public ByteSizeValue getNonHeapCommitted() {
+ return nonHeapCommitted();
+ }
+
+ public ByteSizeValue nonHeapUsed() {
+ return new ByteSizeValue(nonHeapUsed);
+ }
+
+ public ByteSizeValue getNonHeapUsed() {
+ return nonHeapUsed();
+ }
+ }
+
+ public static class BufferPool implements Streamable {
+
+ String name;
+ long count;
+ long totalCapacity;
+ long used;
+
+ BufferPool() {
+ }
+
+ public BufferPool(String name, long count, long totalCapacity, long used) {
+ this.name = name;
+ this.count = count;
+ this.totalCapacity = totalCapacity;
+ this.used = used;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public long count() {
+ return this.count;
+ }
+
+ public long getCount() {
+ return this.count;
+ }
+
+ public ByteSizeValue totalCapacity() {
+ return new ByteSizeValue(totalCapacity);
+ }
+
+ public ByteSizeValue getTotalCapacity() {
+ return totalCapacity();
+ }
+
+ public ByteSizeValue used() {
+ return new ByteSizeValue(used);
+ }
+
+ public ByteSizeValue getUsed() {
+ return used();
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ count = in.readLong();
+ totalCapacity = in.readLong();
+ used = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeLong(count);
+ out.writeLong(totalCapacity);
+ out.writeLong(used);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/network/JmxNetworkProbe.java b/src/main/java/org/elasticsearch/monitor/network/JmxNetworkProbe.java
new file mode 100644
index 0000000..839bb90
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/network/JmxNetworkProbe.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.network;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class JmxNetworkProbe extends AbstractComponent implements NetworkProbe {
+
+ @Inject
+ public JmxNetworkProbe(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public NetworkInfo networkInfo() {
+ NetworkInfo info = new NetworkInfo();
+ return info;
+ }
+
+ @Override
+ public NetworkStats networkStats() {
+ NetworkStats stats = new NetworkStats();
+ stats.timestamp = System.currentTimeMillis();
+ return stats;
+ }
+
+ @Override
+ public String ifconfig() {
+ return "NA";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/network/NetworkInfo.java b/src/main/java/org/elasticsearch/monitor/network/NetworkInfo.java
new file mode 100644
index 0000000..b590c45
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/network/NetworkInfo.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.network;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class NetworkInfo implements Streamable, Serializable, ToXContent {
+
+ public static final Interface NA_INTERFACE = new Interface();
+
+ long refreshInterval;
+
+ Interface primary = NA_INTERFACE;
+
+ public long refreshInterval() {
+ return this.refreshInterval;
+ }
+
+ public long getRefreshInterval() {
+ return this.refreshInterval;
+ }
+
+ public Interface primaryInterface() {
+ return primary;
+ }
+
+ public Interface getPrimaryInterface() {
+ return primaryInterface();
+ }
+
+ static final class Fields {
+ static final XContentBuilderString NETWORK = new XContentBuilderString("network");
+ static final XContentBuilderString REFRESH_INTERVAL = new XContentBuilderString("refresh_interval");
+ static final XContentBuilderString PRIMARY_INTERFACE = new XContentBuilderString("primary_interface");
+ static final XContentBuilderString ADDRESS = new XContentBuilderString("address");
+ static final XContentBuilderString NAME = new XContentBuilderString("name");
+ static final XContentBuilderString MAC_ADDRESS = new XContentBuilderString("mac_address");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.NETWORK);
+ builder.field(Fields.REFRESH_INTERVAL, refreshInterval);
+ if (primary != NA_INTERFACE) {
+ builder.startObject(Fields.PRIMARY_INTERFACE);
+ builder.field(Fields.ADDRESS, primary.address());
+ builder.field(Fields.NAME, primary.name());
+ builder.field(Fields.MAC_ADDRESS, primary.macAddress());
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ public static NetworkInfo readNetworkInfo(StreamInput in) throws IOException {
+ NetworkInfo info = new NetworkInfo();
+ info.readFrom(in);
+ return info;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ refreshInterval = in.readLong();
+ primary = Interface.readNetworkInterface(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(refreshInterval);
+ primary.writeTo(out);
+ }
+
+ public static class Interface implements Streamable, Serializable {
+
+ private String name = "";
+ private String address = "";
+ private String macAddress = "";
+
+ private Interface() {
+ }
+
+ public Interface(String name, String address, String macAddress) {
+ this.name = name;
+ this.address = address;
+ this.macAddress = macAddress;
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public String getName() {
+ return name();
+ }
+
+ public String address() {
+ return address;
+ }
+
+ public String getAddress() {
+ return address();
+ }
+
+ public String macAddress() {
+ return macAddress;
+ }
+
+ public String getMacAddress() {
+ return macAddress();
+ }
+
+ public static Interface readNetworkInterface(StreamInput in) throws IOException {
+ Interface inf = new Interface();
+ inf.readFrom(in);
+ return inf;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ address = in.readString();
+ macAddress = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeString(address);
+ out.writeString(macAddress);
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/network/NetworkProbe.java b/src/main/java/org/elasticsearch/monitor/network/NetworkProbe.java
new file mode 100644
index 0000000..7ff0293
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/network/NetworkProbe.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.network;
+
+/**
+ *
+ */
+public interface NetworkProbe {
+
+ NetworkInfo networkInfo();
+
+ NetworkStats networkStats();
+
+ String ifconfig();
+}
diff --git a/src/main/java/org/elasticsearch/monitor/network/NetworkService.java b/src/main/java/org/elasticsearch/monitor/network/NetworkService.java
new file mode 100644
index 0000000..47c4fa7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/network/NetworkService.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.network;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.util.Enumeration;
+
+/**
+ *
+ */
+public class NetworkService extends AbstractComponent {
+
+ private final NetworkProbe probe;
+
+ private final NetworkInfo info;
+
+ private final TimeValue refreshInterval;
+
+ private NetworkStats cachedStats;
+
+ @Inject
+ public NetworkService(Settings settings, NetworkProbe probe) {
+ super(settings);
+ this.probe = probe;
+
+ this.refreshInterval = componentSettings.getAsTime("refresh_interval", TimeValue.timeValueSeconds(5));
+
+ logger.debug("Using probe [{}] with refresh_interval [{}]", probe, refreshInterval);
+
+ this.info = probe.networkInfo();
+ this.info.refreshInterval = refreshInterval.millis();
+ this.cachedStats = probe.networkStats();
+
+ if (logger.isDebugEnabled()) {
+ StringBuilder netDebug = new StringBuilder("net_info");
+ try {
+ Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
+ String hostName = InetAddress.getLocalHost().getHostName();
+ netDebug.append("\nhost [").append(hostName).append("]\n");
+ while (interfaces.hasMoreElements()) {
+ NetworkInterface net = interfaces.nextElement();
+
+ netDebug.append(net.getName()).append('\t').append("display_name [").append(net.getDisplayName()).append("]\n");
+ Enumeration<InetAddress> addresses = net.getInetAddresses();
+ netDebug.append("\t\taddress ");
+ while (addresses.hasMoreElements()) {
+ netDebug.append("[").append(addresses.nextElement()).append("] ");
+ }
+ netDebug.append('\n');
+ netDebug.append("\t\tmtu [").append(net.getMTU()).append("] multicast [").append(net.supportsMulticast()).append("] ptp [").append(net.isPointToPoint())
+ .append("] loopback [").append(net.isLoopback()).append("] up [").append(net.isUp()).append("] virtual [").append(net.isVirtual()).append("]")
+ .append('\n');
+
+ Enumeration<NetworkInterface> subInterfaces = net.getSubInterfaces();
+ if (subInterfaces != null && subInterfaces.hasMoreElements()) {
+ netDebug.append("\t\t\tsub interfaces:\n");
+
+ while (subInterfaces.hasMoreElements()) {
+
+ net = subInterfaces.nextElement();
+
+ netDebug.append("\t\t\t").append(net.getName()).append("\t").append("display_name [").append(net.getDisplayName()).append("]\n");
+ addresses = net.getInetAddresses();
+ netDebug.append("\t\t\t\t\taddress ");
+ while (addresses.hasMoreElements()) {
+ netDebug.append("[").append(addresses.nextElement()).append("] ");
+ }
+ netDebug.append('\n');
+ netDebug.append("\t\t\t\t\tmtu [").append(net.getMTU()).append("] multicast [").append(net.supportsMulticast()).append("] ptp [").append(net.isPointToPoint())
+ .append("] loopback [").append(net.isLoopback()).append("] up [").append(net.isUp()).append("] virtual [").append(net.isVirtual()).append("]")
+ .append('\n');
+ }
+ }
+ }
+ } catch (Exception ex) {
+ netDebug.append("failed to get Network Interface Info [" + ex.getMessage() + "]");
+ }
+ logger.debug(netDebug.toString());
+ }
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("ifconfig\n\n" + ifconfig());
+ }
+ }
+
+ public NetworkInfo info() {
+ return this.info;
+ }
+
+ public synchronized NetworkStats stats() {
+ if ((System.currentTimeMillis() - cachedStats.timestamp()) > refreshInterval.millis()) {
+ cachedStats = probe.networkStats();
+ }
+ return cachedStats;
+ }
+
+ public String ifconfig() {
+ return probe.ifconfig();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/network/NetworkStats.java b/src/main/java/org/elasticsearch/monitor/network/NetworkStats.java
new file mode 100644
index 0000000..221a091
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/network/NetworkStats.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.network;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class NetworkStats implements Streamable, Serializable, ToXContent {
+
+ long timestamp;
+
+ Tcp tcp = null;
+
+ NetworkStats() {
+
+ }
+
+ static final class Fields {
+ static final XContentBuilderString NETWORK = new XContentBuilderString("network");
+ static final XContentBuilderString TCP = new XContentBuilderString("tcp");
+ static final XContentBuilderString ACTIVE_OPENS = new XContentBuilderString("active_opens");
+ static final XContentBuilderString PASSIVE_OPENS = new XContentBuilderString("passive_opens");
+ static final XContentBuilderString CURR_ESTAB = new XContentBuilderString("curr_estab");
+ static final XContentBuilderString IN_SEGS = new XContentBuilderString("in_segs");
+ static final XContentBuilderString OUT_SEGS = new XContentBuilderString("out_segs");
+ static final XContentBuilderString RETRANS_SEGS = new XContentBuilderString("retrans_segs");
+ static final XContentBuilderString ESTAB_RESETS = new XContentBuilderString("estab_resets");
+ static final XContentBuilderString ATTEMPT_FAILS = new XContentBuilderString("attempt_fails");
+ static final XContentBuilderString IN_ERRS = new XContentBuilderString("in_errs");
+ static final XContentBuilderString OUT_RSTS = new XContentBuilderString("out_rsts");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.NETWORK);
+ if (tcp != null) {
+ builder.startObject(Fields.TCP);
+ builder.field(Fields.ACTIVE_OPENS, tcp.getActiveOpens());
+ builder.field(Fields.PASSIVE_OPENS, tcp.getPassiveOpens());
+ builder.field(Fields.CURR_ESTAB, tcp.getCurrEstab());
+ builder.field(Fields.IN_SEGS, tcp.getInSegs());
+ builder.field(Fields.OUT_SEGS, tcp.getOutSegs());
+ builder.field(Fields.RETRANS_SEGS, tcp.getRetransSegs());
+ builder.field(Fields.ESTAB_RESETS, tcp.getEstabResets());
+ builder.field(Fields.ATTEMPT_FAILS, tcp.getAttemptFails());
+ builder.field(Fields.IN_ERRS, tcp.getInErrs());
+ builder.field(Fields.OUT_RSTS, tcp.getOutRsts());
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ public static NetworkStats readNetworkStats(StreamInput in) throws IOException {
+ NetworkStats stats = new NetworkStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ timestamp = in.readVLong();
+ if (in.readBoolean()) {
+ tcp = Tcp.readNetworkTcp(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(timestamp);
+ if (tcp == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ tcp.writeTo(out);
+ }
+ }
+
+ public long timestamp() {
+ return timestamp;
+ }
+
+ public long getTimestamp() {
+ return timestamp();
+ }
+
+ public Tcp tcp() {
+ return tcp;
+ }
+
+ public Tcp getTcp() {
+ return tcp();
+ }
+
+ public static class Tcp implements Serializable, Streamable {
+
+ long activeOpens;
+ long passiveOpens;
+ long attemptFails;
+ long estabResets;
+ long currEstab;
+ long inSegs;
+ long outSegs;
+ long retransSegs;
+ long inErrs;
+ long outRsts;
+
+ public static Tcp readNetworkTcp(StreamInput in) throws IOException {
+ Tcp tcp = new Tcp();
+ tcp.readFrom(in);
+ return tcp;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ activeOpens = in.readLong();
+ passiveOpens = in.readLong();
+ attemptFails = in.readLong();
+ estabResets = in.readLong();
+ currEstab = in.readLong();
+ inSegs = in.readLong();
+ outSegs = in.readLong();
+ retransSegs = in.readLong();
+ inErrs = in.readLong();
+ outRsts = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(activeOpens);
+ out.writeLong(passiveOpens);
+ out.writeLong(attemptFails);
+ out.writeLong(estabResets);
+ out.writeLong(currEstab);
+ out.writeLong(inSegs);
+ out.writeLong(outSegs);
+ out.writeLong(retransSegs);
+ out.writeLong(inErrs);
+ out.writeLong(outRsts);
+ }
+
+ public long activeOpens() {
+ return this.activeOpens;
+ }
+
+ public long getActiveOpens() {
+ return activeOpens();
+ }
+
+ public long passiveOpens() {
+ return passiveOpens;
+ }
+
+ public long getPassiveOpens() {
+ return passiveOpens();
+ }
+
+ public long attemptFails() {
+ return attemptFails;
+ }
+
+ public long getAttemptFails() {
+ return attemptFails();
+ }
+
+ public long estabResets() {
+ return estabResets;
+ }
+
+ public long getEstabResets() {
+ return estabResets();
+ }
+
+ public long currEstab() {
+ return currEstab;
+ }
+
+ public long getCurrEstab() {
+ return currEstab();
+ }
+
+ public long inSegs() {
+ return inSegs;
+ }
+
+ public long getInSegs() {
+ return inSegs();
+ }
+
+ public long outSegs() {
+ return outSegs;
+ }
+
+ public long getOutSegs() {
+ return outSegs();
+ }
+
+ public long retransSegs() {
+ return retransSegs;
+ }
+
+ public long getRetransSegs() {
+ return retransSegs();
+ }
+
+ public long inErrs() {
+ return inErrs;
+ }
+
+ public long getInErrs() {
+ return inErrs();
+ }
+
+ public long outRsts() {
+ return outRsts;
+ }
+
+ public long getOutRsts() {
+ return outRsts();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/network/SigarNetworkProbe.java b/src/main/java/org/elasticsearch/monitor/network/SigarNetworkProbe.java
new file mode 100644
index 0000000..35be350
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/network/SigarNetworkProbe.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.network;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.sigar.SigarService;
+import org.hyperic.sigar.*;
+
+/**
+ *
+ */
+public class SigarNetworkProbe extends AbstractComponent implements NetworkProbe {
+
+ private final SigarService sigarService;
+
+ @Inject
+ public SigarNetworkProbe(Settings settings, SigarService sigarService) {
+ super(settings);
+ this.sigarService = sigarService;
+ }
+
+ @Override
+ public NetworkInfo networkInfo() {
+ Sigar sigar = sigarService.sigar();
+
+ NetworkInfo networkInfo = new NetworkInfo();
+
+ try {
+ NetInterfaceConfig netInterfaceConfig = sigar.getNetInterfaceConfig(null);
+ networkInfo.primary = new NetworkInfo.Interface(netInterfaceConfig.getName(), netInterfaceConfig.getAddress(), netInterfaceConfig.getHwaddr());
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ return networkInfo;
+ }
+
+ @Override
+ public synchronized NetworkStats networkStats() {
+ Sigar sigar = sigarService.sigar();
+
+ NetworkStats stats = new NetworkStats();
+ stats.timestamp = System.currentTimeMillis();
+
+ try {
+ Tcp tcp = sigar.getTcp();
+ stats.tcp = new NetworkStats.Tcp();
+ stats.tcp.activeOpens = tcp.getActiveOpens();
+ stats.tcp.passiveOpens = tcp.getPassiveOpens();
+ stats.tcp.attemptFails = tcp.getAttemptFails();
+ stats.tcp.estabResets = tcp.getEstabResets();
+ stats.tcp.currEstab = tcp.getCurrEstab();
+ stats.tcp.inSegs = tcp.getInSegs();
+ stats.tcp.outSegs = tcp.getOutSegs();
+ stats.tcp.retransSegs = tcp.getRetransSegs();
+ stats.tcp.inErrs = tcp.getInErrs();
+ stats.tcp.outRsts = tcp.getOutRsts();
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ return stats;
+ }
+
+ @Override
+ public String ifconfig() {
+ Sigar sigar = sigarService.sigar();
+ StringBuilder sb = new StringBuilder();
+ try {
+ for (String ifname : sigar.getNetInterfaceList()) {
+ NetInterfaceConfig ifconfig = null;
+ try {
+ ifconfig = sigar.getNetInterfaceConfig(ifname);
+ } catch (SigarException e) {
+ sb.append(ifname + "\t" + "Not Avaialbe [" + e.getMessage() + "]");
+ continue;
+ }
+ long flags = ifconfig.getFlags();
+
+ String hwaddr = "";
+ if (!NetFlags.NULL_HWADDR.equals(ifconfig.getHwaddr())) {
+ hwaddr = " HWaddr " + ifconfig.getHwaddr();
+ }
+
+ if (!ifconfig.getName().equals(ifconfig.getDescription())) {
+ sb.append(ifconfig.getDescription()).append('\n');
+ }
+
+ sb.append(ifconfig.getName() + "\t" + "Link encap:" + ifconfig.getType() + hwaddr).append('\n');
+
+ String ptp = "";
+ if ((flags & NetFlags.IFF_POINTOPOINT) > 0) {
+ ptp = " P-t-P:" + ifconfig.getDestination();
+ }
+
+ String bcast = "";
+ if ((flags & NetFlags.IFF_BROADCAST) > 0) {
+ bcast = " Bcast:" + ifconfig.getBroadcast();
+ }
+
+ sb.append("\t" +
+ "inet addr:" + ifconfig.getAddress() +
+ ptp + //unlikely
+ bcast +
+ " Mask:" + ifconfig.getNetmask()).append('\n');
+
+ sb.append("\t" +
+ NetFlags.getIfFlagsString(flags) +
+ " MTU:" + ifconfig.getMtu() +
+ " Metric:" + ifconfig.getMetric()).append('\n');
+ try {
+ NetInterfaceStat ifstat = sigar.getNetInterfaceStat(ifname);
+
+ sb.append("\t" +
+ "RX packets:" + ifstat.getRxPackets() +
+ " errors:" + ifstat.getRxErrors() +
+ " dropped:" + ifstat.getRxDropped() +
+ " overruns:" + ifstat.getRxOverruns() +
+ " frame:" + ifstat.getRxFrame()).append('\n');
+
+ sb.append("\t" +
+ "TX packets:" + ifstat.getTxPackets() +
+ " errors:" + ifstat.getTxErrors() +
+ " dropped:" + ifstat.getTxDropped() +
+ " overruns:" + ifstat.getTxOverruns() +
+ " carrier:" + ifstat.getTxCarrier()).append('\n');
+ sb.append("\t" + "collisions:" +
+ ifstat.getTxCollisions()).append('\n');
+
+ long rxBytes = ifstat.getRxBytes();
+ long txBytes = ifstat.getTxBytes();
+
+ sb.append("\t" +
+ "RX bytes:" + rxBytes +
+ " (" + Sigar.formatSize(rxBytes) + ")" +
+ " " +
+ "TX bytes:" + txBytes +
+ " (" + Sigar.formatSize(txBytes) + ")").append('\n');
+ } catch (SigarException e) {
+ }
+ }
+ return sb.toString();
+ } catch (SigarException e) {
+ return "NA";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/os/JmxOsProbe.java b/src/main/java/org/elasticsearch/monitor/os/JmxOsProbe.java
new file mode 100644
index 0000000..2036fda
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/os/JmxOsProbe.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.os;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class JmxOsProbe extends AbstractComponent implements OsProbe {
+
+ @Inject
+ public JmxOsProbe(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public OsInfo osInfo() {
+ return new OsInfo();
+ }
+
+ @Override
+ public OsStats osStats() {
+ OsStats stats = new OsStats();
+ stats.timestamp = System.currentTimeMillis();
+ return stats;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/src/main/java/org/elasticsearch/monitor/os/OsInfo.java
new file mode 100644
index 0000000..74db434
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/os/OsInfo.java
@@ -0,0 +1,377 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.os;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class OsInfo implements Streamable, Serializable, ToXContent {
+
+ long refreshInterval;
+
+ int availableProcessors;
+
+ Cpu cpu = null;
+
+ Mem mem = null;
+
+ Swap swap = null;
+
+ OsInfo() {
+ }
+
+ public long refreshInterval() {
+ return this.refreshInterval;
+ }
+
+ public long getRefreshInterval() {
+ return this.refreshInterval;
+ }
+
+ public int availableProcessors() {
+ return this.availableProcessors;
+ }
+
+ public int getAvailableProcessors() {
+ return this.availableProcessors;
+ }
+
+ public Cpu cpu() {
+ return this.cpu;
+ }
+
+ public Cpu getCpu() {
+ return cpu();
+ }
+
+ public Mem mem() {
+ return this.mem;
+ }
+
+ public Mem getMem() {
+ return mem();
+ }
+
+ public Swap swap() {
+ return this.swap;
+ }
+
+ public Swap getSwap() {
+ return swap();
+ }
+
+ static final class Fields {
+ static final XContentBuilderString OS = new XContentBuilderString("os");
+ static final XContentBuilderString REFRESH_INTERVAL = new XContentBuilderString("refresh_interval");
+ static final XContentBuilderString AVAILABLE_PROCESSORS = new XContentBuilderString("available_processors");
+ static final XContentBuilderString CPU = new XContentBuilderString("cpu");
+ static final XContentBuilderString VENDOR = new XContentBuilderString("vendor");
+ static final XContentBuilderString MODEL = new XContentBuilderString("model");
+ static final XContentBuilderString MHZ = new XContentBuilderString("mhz");
+ static final XContentBuilderString TOTAL_CORES = new XContentBuilderString("total_cores");
+ static final XContentBuilderString TOTAL_SOCKETS = new XContentBuilderString("total_sockets");
+ static final XContentBuilderString CORES_PER_SOCKET = new XContentBuilderString("cores_per_socket");
+ static final XContentBuilderString CACHE_SIZE = new XContentBuilderString("cache_size");
+ static final XContentBuilderString CACHE_SIZE_IN_BYTES = new XContentBuilderString("cache_size_in_bytes");
+
+ static final XContentBuilderString MEM = new XContentBuilderString("mem");
+ static final XContentBuilderString SWAP = new XContentBuilderString("swap");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_IN_BYTES = new XContentBuilderString("total_in_bytes");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.OS);
+ builder.field(Fields.REFRESH_INTERVAL, refreshInterval);
+ builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
+ if (cpu != null) {
+ builder.startObject(Fields.CPU);
+ cpu.toXContent(builder, params);
+ builder.endObject();
+ }
+ if (mem != null) {
+ builder.startObject(Fields.MEM);
+ builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, mem.total);
+ builder.endObject();
+ }
+ if (swap != null) {
+ builder.startObject(Fields.SWAP);
+ builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, swap.total);
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ public static OsInfo readOsInfo(StreamInput in) throws IOException {
+ OsInfo info = new OsInfo();
+ info.readFrom(in);
+ return info;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ refreshInterval = in.readLong();
+ availableProcessors = in.readInt();
+ if (in.readBoolean()) {
+ cpu = Cpu.readCpu(in);
+ }
+ if (in.readBoolean()) {
+ mem = Mem.readMem(in);
+ }
+ if (in.readBoolean()) {
+ swap = Swap.readSwap(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(refreshInterval);
+ out.writeInt(availableProcessors);
+ if (cpu == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ cpu.writeTo(out);
+ }
+ if (mem == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ mem.writeTo(out);
+ }
+ if (swap == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ swap.writeTo(out);
+ }
+ }
+
+ public static class Swap implements Streamable, Serializable {
+
+ long total = -1;
+
+ Swap() {
+
+ }
+
+ public static Swap readSwap(StreamInput in) throws IOException {
+ Swap swap = new Swap();
+ swap.readFrom(in);
+ return swap;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ total = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(total);
+ }
+
+ public ByteSizeValue total() {
+ return new ByteSizeValue(total);
+ }
+
+ public ByteSizeValue getTotal() {
+ return total();
+ }
+
+ }
+
+ public static class Mem implements Streamable, Serializable {
+
+ long total = -1;
+
+ Mem() {
+
+ }
+
+ public static Mem readMem(StreamInput in) throws IOException {
+ Mem mem = new Mem();
+ mem.readFrom(in);
+ return mem;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ total = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(total);
+ }
+
+ public ByteSizeValue total() {
+ return new ByteSizeValue(total);
+ }
+
+ public ByteSizeValue getTotal() {
+ return total();
+ }
+
+ }
+
+ public static class Cpu implements Streamable, Serializable, ToXContent {
+
+ String vendor = "";
+ String model = "";
+ int mhz = -1;
+ int totalCores = -1;
+ int totalSockets = -1;
+ int coresPerSocket = -1;
+ long cacheSize = -1;
+
+ Cpu() {
+
+ }
+
+ public String vendor() {
+ return this.vendor;
+ }
+
+ public String getVendor() {
+ return vendor();
+ }
+
+ public String model() {
+ return model;
+ }
+
+ public String getModel() {
+ return model;
+ }
+
+ public int mhz() {
+ return mhz;
+ }
+
+ public int getMhz() {
+ return mhz;
+ }
+
+ public int totalCores() {
+ return totalCores;
+ }
+
+ public int getTotalCores() {
+ return totalCores();
+ }
+
+ public int totalSockets() {
+ return totalSockets;
+ }
+
+ public int getTotalSockets() {
+ return totalSockets();
+ }
+
+ public int coresPerSocket() {
+ return coresPerSocket;
+ }
+
+ public int getCoresPerSocket() {
+ return coresPerSocket();
+ }
+
+ public ByteSizeValue cacheSize() {
+ return new ByteSizeValue(cacheSize);
+ }
+
+ public ByteSizeValue getCacheSize() {
+ return cacheSize();
+ }
+
+ public static Cpu readCpu(StreamInput in) throws IOException {
+ Cpu cpu = new Cpu();
+ cpu.readFrom(in);
+ return cpu;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ vendor = in.readString();
+ model = in.readString();
+ mhz = in.readInt();
+ totalCores = in.readInt();
+ totalSockets = in.readInt();
+ coresPerSocket = in.readInt();
+ cacheSize = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(vendor);
+ out.writeString(model);
+ out.writeInt(mhz);
+ out.writeInt(totalCores);
+ out.writeInt(totalSockets);
+ out.writeInt(coresPerSocket);
+ out.writeLong(cacheSize);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ Cpu cpu = (Cpu) o;
+
+ return model.equals(cpu.model) && vendor.equals(cpu.vendor);
+ }
+
+ @Override
+ public int hashCode() {
+ return model.hashCode();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.VENDOR, vendor);
+ builder.field(Fields.MODEL, model);
+ builder.field(Fields.MHZ, mhz);
+ builder.field(Fields.TOTAL_CORES, totalCores);
+ builder.field(Fields.TOTAL_SOCKETS, totalSockets);
+ builder.field(Fields.CORES_PER_SOCKET, coresPerSocket);
+ builder.byteSizeField(Fields.CACHE_SIZE_IN_BYTES, Fields.CACHE_SIZE, cacheSize);
+ return builder;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/src/main/java/org/elasticsearch/monitor/os/OsProbe.java
new file mode 100644
index 0000000..b956e5f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/os/OsProbe.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.os;
+
+/**
+ *
+ */
+public interface OsProbe {
+
+ OsInfo osInfo();
+
+ OsStats osStats();
+}
diff --git a/src/main/java/org/elasticsearch/monitor/os/OsService.java b/src/main/java/org/elasticsearch/monitor/os/OsService.java
new file mode 100644
index 0000000..6e7b5f0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/os/OsService.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.os;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class OsService extends AbstractComponent {
+
+ private final OsProbe probe;
+
+ private final OsInfo info;
+
+ private final TimeValue refreshInterval;
+
+ private OsStats cachedStats;
+
+ @Inject
+ public OsService(Settings settings, OsProbe probe) {
+ super(settings);
+ this.probe = probe;
+
+ this.refreshInterval = componentSettings.getAsTime("refresh_interval", TimeValue.timeValueSeconds(1));
+
+ this.info = probe.osInfo();
+ this.info.refreshInterval = refreshInterval.millis();
+ this.info.availableProcessors = Runtime.getRuntime().availableProcessors();
+ this.cachedStats = probe.osStats();
+
+ logger.debug("Using probe [{}] with refresh_interval [{}]", probe, refreshInterval);
+ }
+
+ public OsInfo info() {
+ return this.info;
+ }
+
+ public synchronized OsStats stats() {
+ if ((System.currentTimeMillis() - cachedStats.timestamp()) > refreshInterval.millis()) {
+ cachedStats = probe.osStats();
+ }
+ return cachedStats;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/src/main/java/org/elasticsearch/monitor/os/OsStats.java
new file mode 100644
index 0000000..44f43fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/os/OsStats.java
@@ -0,0 +1,441 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.os;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class OsStats implements Streamable, Serializable, ToXContent {
+
+ public static final double[] EMPTY_LOAD = new double[0];
+
+
+ long timestamp;
+
+ double[] loadAverage = EMPTY_LOAD;
+
+ long uptime = -1;
+
+ Cpu cpu = null;
+
+ Mem mem = null;
+
+ Swap swap = null;
+
+ OsStats() {
+ }
+
+ public long timestamp() {
+ return timestamp;
+ }
+
+ public long getTimestamp() {
+ return timestamp();
+ }
+
+ public double[] loadAverage() {
+ return loadAverage;
+ }
+
+ public double[] getLoadAverage() {
+ return loadAverage();
+ }
+
+ public TimeValue uptime() {
+ return new TimeValue(uptime, TimeUnit.SECONDS);
+ }
+
+ public TimeValue getUptime() {
+ return uptime();
+ }
+
+ public Cpu cpu() {
+ return this.cpu;
+ }
+
+ public Cpu getCpu() {
+ return cpu();
+ }
+
+ public Mem mem() {
+ return this.mem;
+ }
+
+ public Mem getMem() {
+ return mem();
+ }
+
+ public Swap swap() {
+ return this.swap;
+ }
+
+ public Swap getSwap() {
+ return swap();
+ }
+
+ static final class Fields {
+ static final XContentBuilderString OS = new XContentBuilderString("os");
+ static final XContentBuilderString TIMESTAMP = new XContentBuilderString("timestamp");
+ static final XContentBuilderString UPTIME = new XContentBuilderString("uptime");
+ static final XContentBuilderString UPTIME_IN_MILLIS = new XContentBuilderString("uptime_in_millis");
+ static final XContentBuilderString LOAD_AVERAGE = new XContentBuilderString("load_average");
+ static final XContentBuilderString LOAD_AVERAGE_1m = new XContentBuilderString("1m");
+ static final XContentBuilderString LOAD_AVERAGE_5m = new XContentBuilderString("5m");
+ static final XContentBuilderString LOAD_AVERAGE_15m = new XContentBuilderString("15m");
+
+ static final XContentBuilderString CPU = new XContentBuilderString("cpu");
+ static final XContentBuilderString SYS = new XContentBuilderString("sys");
+ static final XContentBuilderString USER = new XContentBuilderString("user");
+ static final XContentBuilderString USAGE = new XContentBuilderString("usage");
+ static final XContentBuilderString IDLE = new XContentBuilderString("idle");
+ static final XContentBuilderString STOLEN = new XContentBuilderString("stolen");
+
+ static final XContentBuilderString MEM = new XContentBuilderString("mem");
+ static final XContentBuilderString SWAP = new XContentBuilderString("swap");
+ static final XContentBuilderString FREE = new XContentBuilderString("free");
+ static final XContentBuilderString FREE_IN_BYTES = new XContentBuilderString("free_in_bytes");
+ static final XContentBuilderString USED = new XContentBuilderString("used");
+ static final XContentBuilderString USED_IN_BYTES = new XContentBuilderString("used_in_bytes");
+
+ static final XContentBuilderString FREE_PERCENT = new XContentBuilderString("free_percent");
+ static final XContentBuilderString USED_PERCENT = new XContentBuilderString("used_percent");
+
+ static final XContentBuilderString ACTUAL_FREE = new XContentBuilderString("actual_free");
+ static final XContentBuilderString ACTUAL_FREE_IN_BYTES = new XContentBuilderString("actual_free_in_bytes");
+ static final XContentBuilderString ACTUAL_USED = new XContentBuilderString("actual_used");
+ static final XContentBuilderString ACTUAL_USED_IN_BYTES = new XContentBuilderString("actual_used_in_bytes");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.OS);
+ builder.field(Fields.TIMESTAMP, timestamp);
+
+ if (uptime != -1) {
+ builder.timeValueField(Fields.UPTIME_IN_MILLIS, Fields.UPTIME, uptime);
+ }
+
+ if (loadAverage.length > 0) {
+ if (params.param("load_average_format", "array").equals("hash")) {
+ builder.startObject(Fields.LOAD_AVERAGE);
+ builder.field(Fields.LOAD_AVERAGE_1m, loadAverage[0]);
+ builder.field(Fields.LOAD_AVERAGE_5m, loadAverage[1]);
+ builder.field(Fields.LOAD_AVERAGE_15m, loadAverage[2]);
+ builder.endObject();
+ } else {
+ builder.startArray(Fields.LOAD_AVERAGE);
+ for (double value : loadAverage) {
+ builder.value(value);
+ }
+ builder.endArray();
+ }
+ }
+
+ if (cpu != null) {
+ builder.startObject(Fields.CPU);
+ builder.field(Fields.SYS, cpu.sys());
+ builder.field(Fields.USER, cpu.user());
+ builder.field(Fields.IDLE, cpu.idle());
+ builder.field(Fields.USAGE, cpu.user() + cpu.sys());
+ builder.field(Fields.STOLEN, cpu.stolen());
+ builder.endObject();
+ }
+
+ if (mem != null) {
+ builder.startObject(Fields.MEM);
+ builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, mem.free);
+ builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, mem.used);
+
+ builder.field(Fields.FREE_PERCENT, mem.freePercent());
+ builder.field(Fields.USED_PERCENT, mem.usedPercent());
+
+ builder.byteSizeField(Fields.ACTUAL_FREE_IN_BYTES, Fields.ACTUAL_FREE, mem.actualFree);
+ builder.byteSizeField(Fields.ACTUAL_USED_IN_BYTES, Fields.ACTUAL_USED, mem.actualUsed);
+
+ builder.endObject();
+ }
+
+ if (swap != null) {
+ builder.startObject(Fields.SWAP);
+ builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, swap.used);
+ builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, swap.free);
+ builder.endObject();
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ public static OsStats readOsStats(StreamInput in) throws IOException {
+ OsStats stats = new OsStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ timestamp = in.readVLong();
+ loadAverage = new double[in.readVInt()];
+ for (int i = 0; i < loadAverage.length; i++) {
+ loadAverage[i] = in.readDouble();
+ }
+ uptime = in.readLong();
+ if (in.readBoolean()) {
+ cpu = Cpu.readCpu(in);
+ }
+ if (in.readBoolean()) {
+ mem = Mem.readMem(in);
+ }
+ if (in.readBoolean()) {
+ swap = Swap.readSwap(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(timestamp);
+ out.writeVInt(loadAverage.length);
+ for (double val : loadAverage) {
+ out.writeDouble(val);
+ }
+ out.writeLong(uptime);
+ if (cpu == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ cpu.writeTo(out);
+ }
+ if (mem == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ mem.writeTo(out);
+ }
+ if (swap == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ swap.writeTo(out);
+ }
+ }
+
+ public static class Swap implements Streamable, Serializable {
+
+ long free = -1;
+ long used = -1;
+
+ public ByteSizeValue free() {
+ return new ByteSizeValue(free);
+ }
+
+ public ByteSizeValue getFree() {
+ return free();
+ }
+
+ public ByteSizeValue used() {
+ return new ByteSizeValue(used);
+ }
+
+ public ByteSizeValue getUsed() {
+ return used();
+ }
+
+ public static Swap readSwap(StreamInput in) throws IOException {
+ Swap swap = new Swap();
+ swap.readFrom(in);
+ return swap;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ free = in.readLong();
+ used = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(free);
+ out.writeLong(used);
+ }
+ }
+
+ public static class Mem implements Streamable, Serializable {
+
+ long free = -1;
+ short freePercent = -1;
+ long used = -1;
+ short usedPercent = -1;
+ long actualFree = -1;
+ long actualUsed = -1;
+
+ public static Mem readMem(StreamInput in) throws IOException {
+ Mem mem = new Mem();
+ mem.readFrom(in);
+ return mem;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ free = in.readLong();
+ freePercent = in.readShort();
+ used = in.readLong();
+ usedPercent = in.readShort();
+ actualFree = in.readLong();
+ actualUsed = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(free);
+ out.writeShort(freePercent);
+ out.writeLong(used);
+ out.writeShort(usedPercent);
+ out.writeLong(actualFree);
+ out.writeLong(actualUsed);
+ }
+
+ public ByteSizeValue used() {
+ return new ByteSizeValue(used);
+ }
+
+ public ByteSizeValue getUsed() {
+ return used();
+ }
+
+ public short usedPercent() {
+ return usedPercent;
+ }
+
+ public short getUsedPercent() {
+ return usedPercent();
+ }
+
+ public ByteSizeValue free() {
+ return new ByteSizeValue(free);
+ }
+
+ public ByteSizeValue getFree() {
+ return free();
+ }
+
+ public short freePercent() {
+ return freePercent;
+ }
+
+ public short getFreePercent() {
+ return freePercent();
+ }
+
+ public ByteSizeValue actualFree() {
+ return new ByteSizeValue(actualFree);
+ }
+
+ public ByteSizeValue getActualFree() {
+ return actualFree();
+ }
+
+ public ByteSizeValue actualUsed() {
+ return new ByteSizeValue(actualUsed);
+ }
+
+ public ByteSizeValue getActualUsed() {
+ return actualUsed();
+ }
+ }
+
+ public static class Cpu implements Streamable, Serializable {
+
+ short sys = -1;
+ short user = -1;
+ short idle = -1;
+ short stolen = -1;
+
+ Cpu() {
+
+ }
+
+ public static Cpu readCpu(StreamInput in) throws IOException {
+ Cpu cpu = new Cpu();
+ cpu.readFrom(in);
+ return cpu;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ sys = in.readShort();
+ user = in.readShort();
+ idle = in.readShort();
+ stolen = in.readShort();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeShort(sys);
+ out.writeShort(user);
+ out.writeShort(idle);
+ out.writeShort(stolen);
+ }
+
+ public short sys() {
+ return sys;
+ }
+
+ public short getSys() {
+ return sys();
+ }
+
+ public short user() {
+ return user;
+ }
+
+ public short getUser() {
+ return user();
+ }
+
+ public short idle() {
+ return idle;
+ }
+
+ public short getIdle() {
+ return idle();
+ }
+
+ public short stolen() {
+ return stolen;
+ }
+
+ public short getStolen() {
+ return stolen();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/os/SigarOsProbe.java b/src/main/java/org/elasticsearch/monitor/os/SigarOsProbe.java
new file mode 100644
index 0000000..87f0019
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/os/SigarOsProbe.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.os;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.sigar.SigarService;
+import org.hyperic.sigar.*;
+
+/**
+ *
+ */
+public class SigarOsProbe extends AbstractComponent implements OsProbe {
+
+ private final SigarService sigarService;
+
+ @Inject
+ public SigarOsProbe(Settings settings, SigarService sigarService) {
+ super(settings);
+ this.sigarService = sigarService;
+ }
+
+ @Override
+ public OsInfo osInfo() {
+ Sigar sigar = sigarService.sigar();
+ OsInfo info = new OsInfo();
+ try {
+ CpuInfo[] infos = sigar.getCpuInfoList();
+ info.cpu = new OsInfo.Cpu();
+ info.cpu.vendor = infos[0].getVendor();
+ info.cpu.model = infos[0].getModel();
+ info.cpu.mhz = infos[0].getMhz();
+ info.cpu.totalCores = infos[0].getTotalCores();
+ info.cpu.totalSockets = infos[0].getTotalSockets();
+ info.cpu.coresPerSocket = infos[0].getCoresPerSocket();
+ if (infos[0].getCacheSize() != Sigar.FIELD_NOTIMPL) {
+ info.cpu.cacheSize = infos[0].getCacheSize();
+ }
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ try {
+ Mem mem = sigar.getMem();
+ info.mem = new OsInfo.Mem();
+ info.mem.total = mem.getTotal();
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ try {
+ Swap swap = sigar.getSwap();
+ info.swap = new OsInfo.Swap();
+ info.swap.total = swap.getTotal();
+ } catch (SigarException e) {
+ // ignore
+ }
+
+
+ return info;
+ }
+
+ @Override
+ public OsStats osStats() {
+ Sigar sigar = sigarService.sigar();
+ OsStats stats = new OsStats();
+ stats.timestamp = System.currentTimeMillis();
+ try {
+ stats.loadAverage = sigar.getLoadAverage();
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ try {
+ stats.uptime = (long) sigar.getUptime().getUptime();
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ try {
+ CpuPerc cpuPerc = sigar.getCpuPerc();
+ stats.cpu = new OsStats.Cpu();
+ stats.cpu.sys = (short) (cpuPerc.getSys() * 100);
+ stats.cpu.user = (short) (cpuPerc.getUser() * 100);
+ stats.cpu.idle = (short) (cpuPerc.getIdle() * 100);
+ stats.cpu.stolen = (short) (cpuPerc.getStolen() * 100);
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ try {
+ Mem mem = sigar.getMem();
+ stats.mem = new OsStats.Mem();
+ stats.mem.free = mem.getFree();
+ stats.mem.freePercent = (short) mem.getFreePercent();
+ stats.mem.used = mem.getUsed();
+ stats.mem.usedPercent = (short) mem.getUsedPercent();
+ stats.mem.actualFree = mem.getActualFree();
+ stats.mem.actualUsed = mem.getActualUsed();
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ try {
+ Swap swap = sigar.getSwap();
+ stats.swap = new OsStats.Swap();
+ stats.swap.free = swap.getFree();
+ stats.swap.used = swap.getUsed();
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ return stats;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/process/JmxProcessProbe.java b/src/main/java/org/elasticsearch/monitor/process/JmxProcessProbe.java
new file mode 100644
index 0000000..f91d782
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/process/JmxProcessProbe.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.process;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.OperatingSystemMXBean;
+import java.lang.reflect.Method;
+
+import static org.elasticsearch.monitor.jvm.JvmInfo.jvmInfo;
+
+/**
+ *
+ */
+public class JmxProcessProbe extends AbstractComponent implements ProcessProbe {
+
+ private static final OperatingSystemMXBean osMxBean = ManagementFactory.getOperatingSystemMXBean();
+
+ private static final Method getMaxFileDescriptorCountField;
+ private static final Method getOpenFileDescriptorCountField;
+
+ static {
+ Method method = null;
+ try {
+ method = osMxBean.getClass().getDeclaredMethod("getMaxFileDescriptorCount");
+ method.setAccessible(true);
+ } catch (Exception e) {
+ // not available
+ }
+ getMaxFileDescriptorCountField = method;
+
+ method = null;
+ try {
+ method = osMxBean.getClass().getDeclaredMethod("getOpenFileDescriptorCount");
+ method.setAccessible(true);
+ } catch (Exception e) {
+ // not available
+ }
+ getOpenFileDescriptorCountField = method;
+ }
+
+ public static long getMaxFileDescriptorCount() {
+ if (getMaxFileDescriptorCountField == null) {
+ return -1;
+ }
+ try {
+ return (Long) getMaxFileDescriptorCountField.invoke(osMxBean);
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public static long getOpenFileDescriptorCount() {
+ if (getOpenFileDescriptorCountField == null) {
+ return -1;
+ }
+ try {
+ return (Long) getOpenFileDescriptorCountField.invoke(osMxBean);
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ @Inject
+ public JmxProcessProbe(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public ProcessInfo processInfo() {
+ return new ProcessInfo(jvmInfo().pid(), getMaxFileDescriptorCount());
+ }
+
+ @Override
+ public ProcessStats processStats() {
+ ProcessStats stats = new ProcessStats();
+ stats.timestamp = System.currentTimeMillis();
+ stats.openFileDescriptors = getOpenFileDescriptorCount();
+ return stats;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java b/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java
new file mode 100644
index 0000000..8607f94
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/process/ProcessInfo.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.process;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.jna.Natives;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class ProcessInfo implements Streamable, Serializable, ToXContent {
+
+ long refreshInterval;
+
+ private long id;
+
+ private long maxFileDescriptors = -1;
+
+ private boolean mlockall;
+
+ ProcessInfo() {
+
+ }
+
+ public ProcessInfo(long id, long maxFileDescriptors) {
+ this.id = id;
+ this.maxFileDescriptors = maxFileDescriptors;
+ this.mlockall = Natives.LOCAL_MLOCKALL;
+ }
+
+ public long refreshInterval() {
+ return this.refreshInterval;
+ }
+
+ public long getRefreshInterval() {
+ return this.refreshInterval;
+ }
+
+ /**
+ * The process id.
+ */
+ public long id() {
+ return this.id;
+ }
+
+ /**
+ * The process id.
+ */
+ public long getId() {
+ return id();
+ }
+
+ public long maxFileDescriptors() {
+ return this.maxFileDescriptors;
+ }
+
+ public long getMaxFileDescriptors() {
+ return maxFileDescriptors;
+ }
+
+ public boolean mlockAll() {
+ return mlockall;
+ }
+
+ public boolean isMlockall() {
+ return mlockall;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString PROCESS = new XContentBuilderString("process");
+ static final XContentBuilderString REFRESH_INTERVAL = new XContentBuilderString("refresh_interval");
+ static final XContentBuilderString ID = new XContentBuilderString("id");
+ static final XContentBuilderString MAX_FILE_DESCRIPTORS = new XContentBuilderString("max_file_descriptors");
+ static final XContentBuilderString MLOCKALL = new XContentBuilderString("mlockall");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.PROCESS);
+ builder.field(Fields.REFRESH_INTERVAL, refreshInterval);
+ builder.field(Fields.ID, id);
+ builder.field(Fields.MAX_FILE_DESCRIPTORS, maxFileDescriptors);
+ builder.field(Fields.MLOCKALL, mlockall);
+ builder.endObject();
+ return builder;
+ }
+
+ public static ProcessInfo readProcessInfo(StreamInput in) throws IOException {
+ ProcessInfo info = new ProcessInfo();
+ info.readFrom(in);
+ return info;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ refreshInterval = in.readLong();
+ id = in.readLong();
+ maxFileDescriptors = in.readLong();
+ mlockall = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(refreshInterval);
+ out.writeLong(id);
+ out.writeLong(maxFileDescriptors);
+ out.writeBoolean(mlockall);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java b/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java
new file mode 100644
index 0000000..89d56ed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/process/ProcessProbe.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.process;
+
+/**
+ *
+ */
+public interface ProcessProbe {
+
+ ProcessInfo processInfo();
+
+ ProcessStats processStats();
+}
diff --git a/src/main/java/org/elasticsearch/monitor/process/ProcessService.java b/src/main/java/org/elasticsearch/monitor/process/ProcessService.java
new file mode 100644
index 0000000..509701e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/process/ProcessService.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.process;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class ProcessService extends AbstractComponent {
+
+ private final ProcessProbe probe;
+
+ private final ProcessInfo info;
+
+ private final TimeValue refreshInterval;
+
+ private ProcessStats cachedStats;
+
+ @Inject
+ public ProcessService(Settings settings, ProcessProbe probe) {
+ super(settings);
+ this.probe = probe;
+
+ this.refreshInterval = componentSettings.getAsTime("refresh_interval", TimeValue.timeValueSeconds(1));
+
+ this.info = probe.processInfo();
+ this.info.refreshInterval = refreshInterval.millis();
+ this.cachedStats = probe.processStats();
+
+ logger.debug("Using probe [{}] with refresh_interval [{}]", probe, refreshInterval);
+ }
+
+ public ProcessInfo info() {
+ return this.info;
+ }
+
+ public synchronized ProcessStats stats() {
+ if ((System.currentTimeMillis() - cachedStats.timestamp()) > refreshInterval.millis()) {
+ cachedStats = probe.processStats();
+ }
+ return cachedStats;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java b/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java
new file mode 100644
index 0000000..c286b96
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.process;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class ProcessStats implements Streamable, Serializable, ToXContent {
+
+ long timestamp = -1;
+
+ long openFileDescriptors;
+
+ Cpu cpu = null;
+
+ Mem mem = null;
+
+ ProcessStats() {
+ }
+
+ public long timestamp() {
+ return this.timestamp;
+ }
+
+ public long getTimestamp() {
+ return timestamp();
+ }
+
+ public long openFileDescriptors() {
+ return this.openFileDescriptors;
+ }
+
+ public long getOpenFileDescriptors() {
+ return openFileDescriptors;
+ }
+
+ public Cpu cpu() {
+ return cpu;
+ }
+
+ public Cpu getCpu() {
+ return cpu();
+ }
+
+ public Mem mem() {
+ return mem;
+ }
+
+ public Mem getMem() {
+ return mem();
+ }
+
+ static final class Fields {
+ static final XContentBuilderString PROCESS = new XContentBuilderString("process");
+ static final XContentBuilderString TIMESTAMP = new XContentBuilderString("timestamp");
+ static final XContentBuilderString OPEN_FILE_DESCRIPTORS = new XContentBuilderString("open_file_descriptors");
+
+ static final XContentBuilderString CPU = new XContentBuilderString("cpu");
+ static final XContentBuilderString PERCENT = new XContentBuilderString("percent");
+ static final XContentBuilderString SYS = new XContentBuilderString("sys");
+ static final XContentBuilderString SYS_IN_MILLIS = new XContentBuilderString("sys_in_millis");
+ static final XContentBuilderString USER = new XContentBuilderString("user");
+ static final XContentBuilderString USER_IN_MILLIS = new XContentBuilderString("user_in_millis");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_IN_MILLIS = new XContentBuilderString("total_in_millis");
+
+ static final XContentBuilderString MEM = new XContentBuilderString("mem");
+ static final XContentBuilderString RESIDENT = new XContentBuilderString("resident");
+ static final XContentBuilderString RESIDENT_IN_BYTES = new XContentBuilderString("resident_in_bytes");
+ static final XContentBuilderString SHARE = new XContentBuilderString("share");
+ static final XContentBuilderString SHARE_IN_BYTES = new XContentBuilderString("share_in_bytes");
+ static final XContentBuilderString TOTAL_VIRTUAL = new XContentBuilderString("total_virtual");
+ static final XContentBuilderString TOTAL_VIRTUAL_IN_BYTES = new XContentBuilderString("total_virtual_in_bytes");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.PROCESS);
+ builder.field(Fields.TIMESTAMP, timestamp);
+ builder.field(Fields.OPEN_FILE_DESCRIPTORS, openFileDescriptors);
+ if (cpu != null) {
+ builder.startObject(Fields.CPU);
+ builder.field(Fields.PERCENT, cpu.percent());
+ builder.timeValueField(Fields.SYS_IN_MILLIS, Fields.SYS, cpu.sys);
+ builder.timeValueField(Fields.USER_IN_MILLIS, Fields.USER, cpu.user);
+ builder.timeValueField(Fields.TOTAL_IN_MILLIS, Fields.TOTAL, cpu.total);
+ builder.endObject();
+ }
+ if (mem != null) {
+ builder.startObject(Fields.MEM);
+ builder.byteSizeField(Fields.RESIDENT_IN_BYTES, Fields.RESIDENT, mem.resident);
+ builder.byteSizeField(Fields.SHARE_IN_BYTES, Fields.SHARE, mem.share);
+ builder.byteSizeField(Fields.TOTAL_VIRTUAL_IN_BYTES, Fields.TOTAL_VIRTUAL, mem.totalVirtual);
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ public static ProcessStats readProcessStats(StreamInput in) throws IOException {
+ ProcessStats stats = new ProcessStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ timestamp = in.readVLong();
+ openFileDescriptors = in.readLong();
+ if (in.readBoolean()) {
+ cpu = Cpu.readCpu(in);
+ }
+ if (in.readBoolean()) {
+ mem = Mem.readMem(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(timestamp);
+ out.writeLong(openFileDescriptors);
+ if (cpu == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ cpu.writeTo(out);
+ }
+ if (mem == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ mem.writeTo(out);
+ }
+ }
+
+ public static class Mem implements Streamable, Serializable {
+
+ long totalVirtual = -1;
+ long resident = -1;
+ long share = -1;
+
+ Mem() {
+ }
+
+ public static Mem readMem(StreamInput in) throws IOException {
+ Mem mem = new Mem();
+ mem.readFrom(in);
+ return mem;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ totalVirtual = in.readLong();
+ resident = in.readLong();
+ share = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeLong(totalVirtual);
+ out.writeLong(resident);
+ out.writeLong(share);
+ }
+
+ public ByteSizeValue totalVirtual() {
+ return new ByteSizeValue(totalVirtual);
+ }
+
+ public ByteSizeValue getTotalVirtual() {
+ return totalVirtual();
+ }
+
+ public ByteSizeValue resident() {
+ return new ByteSizeValue(resident);
+ }
+
+ public ByteSizeValue getResident() {
+ return resident();
+ }
+
+ public ByteSizeValue share() {
+ return new ByteSizeValue(share);
+ }
+
+ public ByteSizeValue getShare() {
+ return share();
+ }
+ }
+
+ public static class Cpu implements Streamable, Serializable {
+
+ short percent = -1;
+ long sys = -1;
+ long user = -1;
+ long total = -1;
+
+ Cpu() {
+
+ }
+
+ public static Cpu readCpu(StreamInput in) throws IOException {
+ Cpu cpu = new Cpu();
+ cpu.readFrom(in);
+ return cpu;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ percent = in.readShort();
+ sys = in.readLong();
+ user = in.readLong();
+ total = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeShort(percent);
+ out.writeLong(sys);
+ out.writeLong(user);
+ out.writeLong(total);
+ }
+
+ /**
+ * Get the Process cpu usage.
+ * <p/>
+ * <p>Supported Platforms: All.
+ */
+ public short percent() {
+ return percent;
+ }
+
+ /**
+ * Get the Process cpu usage.
+ * <p/>
+ * <p>Supported Platforms: All.
+ */
+ public short getPercent() {
+ return percent();
+ }
+
+ /**
+ * Get the Process cpu kernel time.
+ * <p/>
+ * <p>Supported Platforms: All.
+ */
+ public TimeValue sys() {
+ return new TimeValue(sys);
+ }
+
+ /**
+ * Get the Process cpu kernel time.
+ * <p/>
+ * <p>Supported Platforms: All.
+ */
+ public TimeValue getSys() {
+ return sys();
+ }
+
+ /**
+ * Get the Process cpu user time.
+ * <p/>
+ * <p>Supported Platforms: All.
+ */
+ public TimeValue user() {
+ return new TimeValue(user);
+ }
+
+ /**
+ * Get the Process cpu time (sum of User and Sys).
+ * <p/>
+ * Supported Platforms: All.
+ */
+ public TimeValue total() {
+ return new TimeValue(total);
+ }
+
+ /**
+ * Get the Process cpu time (sum of User and Sys).
+ * <p/>
+ * Supported Platforms: All.
+ */
+ public TimeValue getTotal() {
+ return total();
+ }
+
+ /**
+ * Get the Process cpu user time.
+ * <p/>
+ * <p>Supported Platforms: All.
+ */
+ public TimeValue getUser() {
+ return user();
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/process/SigarProcessProbe.java b/src/main/java/org/elasticsearch/monitor/process/SigarProcessProbe.java
new file mode 100644
index 0000000..8b25917
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/process/SigarProcessProbe.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.process;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.sigar.SigarService;
+import org.hyperic.sigar.ProcCpu;
+import org.hyperic.sigar.ProcMem;
+import org.hyperic.sigar.Sigar;
+import org.hyperic.sigar.SigarException;
+
+/**
+ *
+ */
+public class SigarProcessProbe extends AbstractComponent implements ProcessProbe {
+
+ private final SigarService sigarService;
+
+ @Inject
+ public SigarProcessProbe(Settings settings, SigarService sigarService) {
+ super(settings);
+ this.sigarService = sigarService;
+ }
+
+ @Override
+ public synchronized ProcessInfo processInfo() {
+ return new ProcessInfo(sigarService.sigar().getPid(), JmxProcessProbe.getMaxFileDescriptorCount());
+ }
+
+ @Override
+ public synchronized ProcessStats processStats() {
+ Sigar sigar = sigarService.sigar();
+ ProcessStats stats = new ProcessStats();
+ stats.timestamp = System.currentTimeMillis();
+ stats.openFileDescriptors = JmxProcessProbe.getOpenFileDescriptorCount();
+
+ try {
+ ProcCpu cpu = sigar.getProcCpu(sigar.getPid());
+ stats.cpu = new ProcessStats.Cpu();
+ stats.cpu.percent = (short) (cpu.getPercent() * 100);
+ stats.cpu.sys = cpu.getSys();
+ stats.cpu.user = cpu.getUser();
+ stats.cpu.total = cpu.getTotal();
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ try {
+ ProcMem mem = sigar.getProcMem(sigar.getPid());
+ stats.mem = new ProcessStats.Mem();
+ stats.mem.totalVirtual = mem.getSize();
+ stats.mem.resident = mem.getResident();
+ stats.mem.share = mem.getShare();
+ } catch (SigarException e) {
+ // ignore
+ }
+
+ return stats;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/monitor/sigar/SigarService.java b/src/main/java/org/elasticsearch/monitor/sigar/SigarService.java
new file mode 100644
index 0000000..047ef13
--- /dev/null
+++ b/src/main/java/org/elasticsearch/monitor/sigar/SigarService.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor.sigar;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.hyperic.sigar.Sigar;
+
+/**
+ *
+ */
+public class SigarService extends AbstractComponent {
+
+ private final Sigar sigar;
+
+ @Inject
+ public SigarService(Settings settings) {
+ super(settings);
+
+ Sigar sigar = null;
+ try {
+ sigar = new Sigar();
+ // call it to make sure the library was loaded
+ sigar.getPid();
+ logger.trace("sigar loaded successfully");
+ } catch (Throwable t) {
+ logger.trace("failed to load sigar", t);
+ if (sigar != null) {
+ try {
+ sigar.close();
+ } catch (Throwable t1) {
+ // ignore
+ } finally {
+ sigar = null;
+ }
+ }
+ }
+ this.sigar = sigar;
+ }
+
+ public boolean sigarAvailable() {
+ return sigar != null;
+ }
+
+ public Sigar sigar() {
+ return this.sigar;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/node/Node.java b/src/main/java/org/elasticsearch/node/Node.java
new file mode 100644
index 0000000..729633c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/node/Node.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * A node represent a node within a cluster (<tt>cluster.name</tt>). The {@link #client()} can be used
+ * in order to use a {@link Client} to perform actions/operations against the cluster.
+ * <p/>
+ * <p>In order to create a node, the {@link NodeBuilder} can be used. When done with it, make sure to
+ * call {@link #close()} on it.
+ *
+ *
+ */
+public interface Node {
+
+ /**
+ * The settings that were used to create the node.
+ */
+ Settings settings();
+
+ /**
+ * A client that can be used to execute actions (operations) against the cluster.
+ */
+ Client client();
+
+ /**
+ * Start the node. If the node is already started, this method is no-op.
+ */
+ Node start();
+
+ /**
+ * Stops the node. If the node is already stopped, this method is no-op.
+ */
+ Node stop();
+
+ /**
+ * Closes the node (and {@link #stop}s if its running).
+ */
+ void close();
+
+ /**
+ * Returns <tt>true</tt> if the node is closed.
+ */
+ boolean isClosed();
+}
diff --git a/src/main/java/org/elasticsearch/node/NodeBuilder.java b/src/main/java/org/elasticsearch/node/NodeBuilder.java
new file mode 100644
index 0000000..1b06a65
--- /dev/null
+++ b/src/main/java/org/elasticsearch/node/NodeBuilder.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.internal.InternalNode;
+
+/**
+ * A node builder is used to construct a {@link Node} instance.
+ * <p/>
+ * <p>Settings will be loaded relative to the ES home (with or without <tt>config/</tt> prefix) and if not found,
+ * within the classpath (with or without <tt>config/<tt> prefix). The settings file loaded can either be named
+ * <tt>elasticsearch.yml</tt> or <tt>elasticsearch.json</tt>). Loading settings can be disabled by calling
+ * {@link #loadConfigSettings(boolean)} with <tt>false<tt>.
+ * <p/>
+ * <p>Explicit settings can be passed by using the {@link #settings(Settings)} method.
+ * <p/>
+ * <p>In any case, settings will be resolved from system properties as well that are either prefixed with <tt>es.</tt>
+ * or <tt>elasticsearch.</tt>.
+ * <p/>
+ * <p>An example for creating a simple node with optional settings loaded from the classpath:
+ * <p/>
+ * <pre>
+ * Node node = NodeBuilder.nodeBuilder().node();
+ * </pre>
+ * <p/>
+ * <p>An example for creating a node with explicit settings (in this case, a node in the cluster that does not hold
+ * data):
+ * <p/>
+ * <pre>
+ * Node node = NodeBuilder.nodeBuilder()
+ * .settings(ImmutableSettings.settingsBuilder().put("node.data", false)
+ * .node();
+ * </pre>
+ * <p/>
+ * <p>When done with the node, make sure you call {@link Node#close()} on it.
+ *
+ *
+ */
+public class NodeBuilder {
+
+ private final ImmutableSettings.Builder settings = ImmutableSettings.settingsBuilder();
+
+ private boolean loadConfigSettings = true;
+
+ /**
+ * A convenient factory method to create a {@link NodeBuilder}.
+ */
+ public static NodeBuilder nodeBuilder() {
+ return new NodeBuilder();
+ }
+
+ /**
+ * Set addition settings simply by working directly against the settings builder.
+ */
+ public ImmutableSettings.Builder settings() {
+ return settings;
+ }
+
+ /**
+ * Set addition settings simply by working directly against the settings builder.
+ */
+ public ImmutableSettings.Builder getSettings() {
+ return settings;
+ }
+
+ /**
+ * Explicit node settings to set.
+ */
+ public NodeBuilder settings(Settings.Builder settings) {
+ return settings(settings.build());
+ }
+
+ /**
+ * Explicit node settings to set.
+ */
+ public NodeBuilder settings(Settings settings) {
+ this.settings.put(settings);
+ return this;
+ }
+
+ /**
+ * Should the node builder automatically try and load config settings from the file system / classpath. Defaults
+ * to <tt>true</tt>.
+ */
+ public NodeBuilder loadConfigSettings(boolean loadConfigSettings) {
+ this.loadConfigSettings = loadConfigSettings;
+ return this;
+ }
+
+ /**
+ * Is the node going to be a client node which means it will hold no data (<tt>node.data</tt> is
+ * set to <tt>false</tt>) and other optimizations by different modules.
+ *
+ * @param client Should the node be just a client node or not.
+ */
+ public NodeBuilder client(boolean client) {
+ settings.put("node.client", client);
+ return this;
+ }
+
+ /**
+ * Is the node going to be allowed to allocate data (shards) to it or not. This setting map to
+ * the <tt>node.data</tt> setting. Note, when setting {@link #client(boolean)}, the node will
+ * not hold any data by default.
+ *
+ * @param data Should the node be allocated data to or not.
+ */
+ public NodeBuilder data(boolean data) {
+ settings.put("node.data", data);
+ return this;
+ }
+
+ /**
+ * Is the node a local node. A local node is a node that uses a local (JVM level) discovery and
+ * transport. Other (local) nodes started within the same JVM (actually, class-loader) will be
+ * discovered and communicated with. Nodes outside of the JVM will not be discovered.
+ *
+ * @param local Should the node be local or not
+ */
+ public NodeBuilder local(boolean local) {
+ settings.put("node.local", local);
+ return this;
+ }
+
+ /**
+ * The cluster name this node is part of (maps to the <tt>cluster.name</tt> setting). Defaults
+ * to <tt>elasticsearch</tt>.
+ *
+ * @param clusterName The cluster name this node is part of.
+ */
+ public NodeBuilder clusterName(String clusterName) {
+ settings.put("cluster.name", clusterName);
+ return this;
+ }
+
+ /**
+ * Builds the node without starting it.
+ */
+ public Node build() {
+ return new InternalNode(settings.build(), loadConfigSettings);
+ }
+
+ /**
+ * {@link #build()}s and starts the node.
+ */
+ public Node node() {
+ return build().start();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/node/NodeClosedException.java b/src/main/java/org/elasticsearch/node/NodeClosedException.java
new file mode 100644
index 0000000..280efd5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/node/NodeClosedException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+/**
+ * An exception indicating that node is closed.
+ *
+ *
+ */
+public class NodeClosedException extends ElasticsearchException {
+
+ public NodeClosedException(DiscoveryNode node) {
+ super("node closed " + node);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/node/internal/InternalNode.java b/src/main/java/org/elasticsearch/node/internal/InternalNode.java
new file mode 100644
index 0000000..f9e8ae6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/node/internal/InternalNode.java
@@ -0,0 +1,406 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node.internal;
+
+import org.elasticsearch.Build;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionModule;
+import org.elasticsearch.bulk.udp.BulkUdpModule;
+import org.elasticsearch.bulk.udp.BulkUdpService;
+import org.elasticsearch.cache.NodeCache;
+import org.elasticsearch.cache.NodeCacheModule;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecyclerModule;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.node.NodeClientModule;
+import org.elasticsearch.cluster.ClusterModule;
+import org.elasticsearch.cluster.ClusterNameModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.routing.RoutingService;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Injectors;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.io.CachedStreams;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.DiscoveryModule;
+import org.elasticsearch.discovery.DiscoveryService;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.env.NodeEnvironmentModule;
+import org.elasticsearch.gateway.GatewayModule;
+import org.elasticsearch.gateway.GatewayService;
+import org.elasticsearch.http.HttpServer;
+import org.elasticsearch.http.HttpServerModule;
+import org.elasticsearch.index.search.shape.ShapeModule;
+import org.elasticsearch.indices.IndicesModule;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
+import org.elasticsearch.indices.cluster.IndicesClusterStateService;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.indices.memory.IndexingMemoryController;
+import org.elasticsearch.indices.ttl.IndicesTTLService;
+import org.elasticsearch.monitor.MonitorModule;
+import org.elasticsearch.monitor.MonitorService;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.percolator.PercolatorModule;
+import org.elasticsearch.percolator.PercolatorService;
+import org.elasticsearch.plugins.PluginsModule;
+import org.elasticsearch.plugins.PluginsService;
+import org.elasticsearch.repositories.RepositoriesModule;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestModule;
+import org.elasticsearch.river.RiversManager;
+import org.elasticsearch.river.RiversModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchModule;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.elasticsearch.transport.TransportModule;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.tribe.TribeModule;
+import org.elasticsearch.tribe.TribeService;
+import org.elasticsearch.watcher.ResourceWatcherModule;
+import org.elasticsearch.watcher.ResourceWatcherService;
+
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public final class InternalNode implements Node {
+
+ private final Lifecycle lifecycle = new Lifecycle();
+
+ private final Injector injector;
+
+ private final Settings settings;
+
+ private final Environment environment;
+
+ private final PluginsService pluginsService;
+
+ private final Client client;
+
+ public InternalNode() throws ElasticsearchException {
+ this(ImmutableSettings.Builder.EMPTY_SETTINGS, true);
+ }
+
+ public InternalNode(Settings pSettings, boolean loadConfigSettings) throws ElasticsearchException {
+ Tuple<Settings, Environment> tuple = InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings);
+ tuple = new Tuple<Settings, Environment>(TribeService.processSettings(tuple.v1()), tuple.v2());
+
+ Version version = Version.CURRENT;
+
+ ESLogger logger = Loggers.getLogger(Node.class, tuple.v1().get("name"));
+ logger.info("version[{}], pid[{}], build[{}/{}]", version, JvmInfo.jvmInfo().pid(), Build.CURRENT.hashShort(), Build.CURRENT.timestamp());
+
+ logger.info("initializing ...");
+
+ if (logger.isDebugEnabled()) {
+ Environment env = tuple.v2();
+ logger.debug("using home [{}], config [{}], data [{}], logs [{}], work [{}], plugins [{}]",
+ env.homeFile(), env.configFile(), Arrays.toString(env.dataFiles()), env.logsFile(),
+ env.workFile(), env.pluginsFile());
+ }
+
+ this.pluginsService = new PluginsService(tuple.v1(), tuple.v2());
+ this.settings = pluginsService.updatedSettings();
+ // create the environment based on the finalized (processed) view of the settings
+ this.environment = new Environment(this.settings());
+
+ CompressorFactory.configure(settings);
+
+ NodeEnvironment nodeEnvironment = new NodeEnvironment(this.settings, this.environment);
+
+ ModulesBuilder modules = new ModulesBuilder();
+ modules.add(new Version.Module(version));
+ modules.add(new CacheRecyclerModule(settings));
+ modules.add(new PageCacheRecyclerModule(settings));
+ modules.add(new PluginsModule(settings, pluginsService));
+ modules.add(new SettingsModule(settings));
+ modules.add(new NodeModule(this));
+ modules.add(new NetworkModule());
+ modules.add(new NodeCacheModule(settings));
+ modules.add(new ScriptModule(settings));
+ modules.add(new EnvironmentModule(environment));
+ modules.add(new NodeEnvironmentModule(nodeEnvironment));
+ modules.add(new ClusterNameModule(settings));
+ modules.add(new ThreadPoolModule(settings));
+ modules.add(new DiscoveryModule(settings));
+ modules.add(new ClusterModule(settings));
+ modules.add(new RestModule(settings));
+ modules.add(new TransportModule(settings));
+ if (settings.getAsBoolean("http.enabled", true)) {
+ modules.add(new HttpServerModule(settings));
+ }
+ modules.add(new RiversModule(settings));
+ modules.add(new IndicesModule(settings));
+ modules.add(new SearchModule());
+ modules.add(new ActionModule(false));
+ modules.add(new MonitorModule(settings));
+ modules.add(new GatewayModule(settings));
+ modules.add(new NodeClientModule());
+ modules.add(new BulkUdpModule());
+ modules.add(new ShapeModule());
+ modules.add(new PercolatorModule());
+ modules.add(new ResourceWatcherModule());
+ modules.add(new RepositoriesModule());
+ modules.add(new TribeModule());
+
+ injector = modules.createInjector();
+
+ client = injector.getInstance(Client.class);
+
+ logger.info("initialized");
+ }
+
+ @Override
+ public Settings settings() {
+ return this.settings;
+ }
+
+ @Override
+ public Client client() {
+ return client;
+ }
+
+ public Node start() {
+ if (!lifecycle.moveToStarted()) {
+ return this;
+ }
+
+ ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
+ logger.info("starting ...");
+
+ // hack around dependency injection problem (for now...)
+ injector.getInstance(Discovery.class).setAllocationService(injector.getInstance(AllocationService.class));
+
+ for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
+ injector.getInstance(plugin).start();
+ }
+
+ injector.getInstance(IndicesService.class).start();
+ injector.getInstance(IndexingMemoryController.class).start();
+ injector.getInstance(IndicesClusterStateService.class).start();
+ injector.getInstance(IndicesTTLService.class).start();
+ injector.getInstance(RiversManager.class).start();
+ injector.getInstance(ClusterService.class).start();
+ injector.getInstance(RoutingService.class).start();
+ injector.getInstance(SearchService.class).start();
+ injector.getInstance(MonitorService.class).start();
+ injector.getInstance(RestController.class).start();
+ injector.getInstance(TransportService.class).start();
+ DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();
+
+ // gateway should start after disco, so it can try and recovery from gateway on "start"
+ injector.getInstance(GatewayService.class).start();
+
+ if (settings.getAsBoolean("http.enabled", true)) {
+ injector.getInstance(HttpServer.class).start();
+ }
+ injector.getInstance(BulkUdpService.class).start();
+ injector.getInstance(ResourceWatcherService.class).start();
+ injector.getInstance(TribeService.class).start();
+
+ logger.info("started");
+
+ return this;
+ }
+
+ @Override
+ public Node stop() {
+ if (!lifecycle.moveToStopped()) {
+ return this;
+ }
+ ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
+ logger.info("stopping ...");
+
+ injector.getInstance(TribeService.class).stop();
+ injector.getInstance(BulkUdpService.class).stop();
+ injector.getInstance(ResourceWatcherService.class).stop();
+ if (settings.getAsBoolean("http.enabled", true)) {
+ injector.getInstance(HttpServer.class).stop();
+ }
+
+ injector.getInstance(RiversManager.class).stop();
+
+ // stop any changes happening as a result of cluster state changes
+ injector.getInstance(IndicesClusterStateService.class).stop();
+ // we close indices first, so operations won't be allowed on it
+ injector.getInstance(IndexingMemoryController.class).stop();
+ injector.getInstance(IndicesTTLService.class).stop();
+ injector.getInstance(IndicesService.class).stop();
+ // sleep a bit to let operations finish with indices service
+// try {
+// Thread.sleep(500);
+// } catch (InterruptedException e) {
+// // ignore
+// }
+ injector.getInstance(RoutingService.class).stop();
+ injector.getInstance(ClusterService.class).stop();
+ injector.getInstance(DiscoveryService.class).stop();
+ injector.getInstance(MonitorService.class).stop();
+ injector.getInstance(GatewayService.class).stop();
+ injector.getInstance(SearchService.class).stop();
+ injector.getInstance(RestController.class).stop();
+ injector.getInstance(TransportService.class).stop();
+
+ for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
+ injector.getInstance(plugin).stop();
+ }
+
+ logger.info("stopped");
+
+ return this;
+ }
+
+ public void close() {
+ if (lifecycle.started()) {
+ stop();
+ }
+ if (!lifecycle.moveToClosed()) {
+ return;
+ }
+
+ ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
+ logger.info("closing ...");
+
+ StopWatch stopWatch = new StopWatch("node_close");
+ stopWatch.start("tribe");
+ injector.getInstance(TribeService.class).close();
+ stopWatch.stop().start("bulk.udp");
+ injector.getInstance(BulkUdpService.class).close();
+ stopWatch.stop().start("http");
+ if (settings.getAsBoolean("http.enabled", true)) {
+ injector.getInstance(HttpServer.class).close();
+ }
+
+ stopWatch.stop().start("rivers");
+ injector.getInstance(RiversManager.class).close();
+
+ stopWatch.stop().start("client");
+ injector.getInstance(Client.class).close();
+ stopWatch.stop().start("indices_cluster");
+ injector.getInstance(IndicesClusterStateService.class).close();
+ stopWatch.stop().start("indices");
+ injector.getInstance(IndicesFilterCache.class).close();
+ injector.getInstance(IndicesFieldDataCache.class).close();
+ injector.getInstance(IndexingMemoryController.class).close();
+ injector.getInstance(IndicesTTLService.class).close();
+ injector.getInstance(IndicesService.class).close();
+ stopWatch.stop().start("routing");
+ injector.getInstance(RoutingService.class).close();
+ stopWatch.stop().start("cluster");
+ injector.getInstance(ClusterService.class).close();
+ stopWatch.stop().start("discovery");
+ injector.getInstance(DiscoveryService.class).close();
+ stopWatch.stop().start("monitor");
+ injector.getInstance(MonitorService.class).close();
+ stopWatch.stop().start("gateway");
+ injector.getInstance(GatewayService.class).close();
+ stopWatch.stop().start("search");
+ injector.getInstance(SearchService.class).close();
+ stopWatch.stop().start("rest");
+ injector.getInstance(RestController.class).close();
+ stopWatch.stop().start("transport");
+ injector.getInstance(TransportService.class).close();
+ stopWatch.stop().start("percolator_service");
+ injector.getInstance(PercolatorService.class).close();
+
+ for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
+ stopWatch.stop().start("plugin(" + plugin.getName() + ")");
+ injector.getInstance(plugin).close();
+ }
+
+ stopWatch.stop().start("node_cache");
+ injector.getInstance(NodeCache.class).close();
+
+ stopWatch.stop().start("script");
+ injector.getInstance(ScriptService.class).close();
+
+ stopWatch.stop().start("thread_pool");
+ injector.getInstance(ThreadPool.class).shutdown();
+ try {
+ injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ stopWatch.stop().start("thread_pool_force_shutdown");
+ try {
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ } catch (Exception e) {
+ // ignore
+ }
+ stopWatch.stop();
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
+ }
+
+ injector.getInstance(NodeEnvironment.class).close();
+ injector.getInstance(CacheRecycler.class).close();
+ injector.getInstance(PageCacheRecycler.class).close();
+ Injectors.close(injector);
+
+ CachedStreams.clear();
+
+ logger.info("closed");
+ }
+
+ @Override
+ public boolean isClosed() {
+ return lifecycle.closed();
+ }
+
+ public Injector injector() {
+ return this.injector;
+ }
+
+ public static void main(String[] args) throws Exception {
+ final InternalNode node = new InternalNode();
+ node.start();
+ Runtime.getRuntime().addShutdownHook(new Thread() {
+ @Override
+ public void run() {
+ node.close();
+ }
+ });
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java
new file mode 100644
index 0000000..6ceedbd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node.internal;
+
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.Names;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.FailedToResolveConfigException;
+
+import java.util.Map;
+
+import static org.elasticsearch.common.Strings.cleanPath;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ *
+ */
+public class InternalSettingsPreparer {
+
+ public static Tuple<Settings, Environment> prepareSettings(Settings pSettings, boolean loadConfigSettings) {
+ // ignore this prefixes when getting properties from es. and elasticsearch.
+ String[] ignorePrefixes = new String[]{"es.default.", "elasticsearch.default."};
+ boolean useSystemProperties = !pSettings.getAsBoolean("config.ignore_system_properties", false);
+ // just create enough settings to build the environment
+ ImmutableSettings.Builder settingsBuilder = settingsBuilder().put(pSettings);
+ if (useSystemProperties) {
+ settingsBuilder.putProperties("elasticsearch.default.", System.getProperties())
+ .putProperties("es.default.", System.getProperties())
+ .putProperties("elasticsearch.", System.getProperties(), ignorePrefixes)
+ .putProperties("es.", System.getProperties(), ignorePrefixes);
+ }
+ settingsBuilder.replacePropertyPlaceholders();
+
+ Environment environment = new Environment(settingsBuilder.build());
+
+ if (loadConfigSettings) {
+ boolean loadFromEnv = true;
+ if (useSystemProperties) {
+ // if its default, then load it, but also load form env
+ if (Strings.hasText(System.getProperty("es.default.config"))) {
+ loadFromEnv = true;
+ settingsBuilder.loadFromUrl(environment.resolveConfig(System.getProperty("es.default.config")));
+ }
+ // if explicit, just load it and don't load from env
+ if (Strings.hasText(System.getProperty("es.config"))) {
+ loadFromEnv = false;
+ settingsBuilder.loadFromUrl(environment.resolveConfig(System.getProperty("es.config")));
+ }
+ if (Strings.hasText(System.getProperty("elasticsearch.config"))) {
+ loadFromEnv = false;
+ settingsBuilder.loadFromUrl(environment.resolveConfig(System.getProperty("elasticsearch.config")));
+ }
+ }
+ if (loadFromEnv) {
+ try {
+ settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.yml"));
+ } catch (FailedToResolveConfigException e) {
+ // ignore
+ } catch (NoClassDefFoundError e) {
+ // ignore, no yaml
+ }
+ try {
+ settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.json"));
+ } catch (FailedToResolveConfigException e) {
+ // ignore
+ }
+ try {
+ settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.properties"));
+ } catch (FailedToResolveConfigException e) {
+ // ignore
+ }
+ }
+ }
+
+ settingsBuilder.put(pSettings);
+ if (useSystemProperties) {
+ settingsBuilder.putProperties("elasticsearch.", System.getProperties(), ignorePrefixes)
+ .putProperties("es.", System.getProperties(), ignorePrefixes);
+ }
+ settingsBuilder.replacePropertyPlaceholders();
+
+ // allow to force set properties based on configuration of the settings provided
+ for (Map.Entry<String, String> entry : pSettings.getAsMap().entrySet()) {
+ String setting = entry.getKey();
+ if (setting.startsWith("force.")) {
+ settingsBuilder.remove(setting);
+ settingsBuilder.put(setting.substring(".force".length()), entry.getValue());
+ }
+ }
+ settingsBuilder.replacePropertyPlaceholders();
+
+ // generate the name
+ if (settingsBuilder.get("name") == null) {
+ String name = System.getProperty("name");
+ if (name == null || name.isEmpty()) {
+ name = settingsBuilder.get("node.name");
+ if (name == null || name.isEmpty()) {
+ name = Names.randomNodeName(environment.resolveConfig("names.txt"));
+ }
+ }
+
+ if (name != null) {
+ settingsBuilder.put("name", name);
+ }
+ }
+
+ // put the cluster name
+ if (settingsBuilder.get(ClusterName.SETTING) == null) {
+ settingsBuilder.put(ClusterName.SETTING, ClusterName.DEFAULT.value());
+ }
+
+ Settings v1 = settingsBuilder.build();
+ environment = new Environment(v1);
+
+ // put back the env settings
+ settingsBuilder = settingsBuilder().put(v1);
+ // we put back the path.logs so we can use it in the logging configuration file
+ settingsBuilder.put("path.logs", cleanPath(environment.logsFile().getAbsolutePath()));
+
+ v1 = settingsBuilder.build();
+
+ return new Tuple<Settings, Environment>(v1, environment);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/node/internal/NodeModule.java b/src/main/java/org/elasticsearch/node/internal/NodeModule.java
new file mode 100644
index 0000000..7ce0f4e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/node/internal/NodeModule.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node.internal;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+/**
+ *
+ */
+public class NodeModule extends AbstractModule {
+
+ private final Node node;
+
+ public NodeModule(Node node) {
+ this.node = node;
+ }
+
+ @Override
+ protected void configure() {
+ bind(Node.class).toInstance(node);
+ bind(NodeSettingsService.class).asEagerSingleton();
+ bind(NodeService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/node/package-info.java b/src/main/java/org/elasticsearch/node/package-info.java
new file mode 100644
index 0000000..fa503a9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/node/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Allow to build a {@link org.elasticsearch.node.Node} using {@link org.elasticsearch.node.NodeBuilder} which is a
+ * node within the cluster.
+ */
+package org.elasticsearch.node; \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/node/service/NodeService.java b/src/main/java/org/elasticsearch/node/service/NodeService.java
new file mode 100644
index 0000000..2c2c493
--- /dev/null
+++ b/src/main/java/org/elasticsearch/node/service/NodeService.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node.service;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Build;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.http.HttpServer;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.monitor.MonitorService;
+import org.elasticsearch.plugins.PluginsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ */
+public class NodeService extends AbstractComponent {
+
+ private final ThreadPool threadPool;
+ private final MonitorService monitorService;
+ private final TransportService transportService;
+ private final IndicesService indicesService;
+ private final PluginsService pluginService;
+ private final CircuitBreakerService circuitBreakerService;
+ @Nullable
+ private HttpServer httpServer;
+
+ private volatile ImmutableMap<String, String> serviceAttributes = ImmutableMap.of();
+
+ private final Version version;
+
+ private final Discovery disovery;
+
+ @Inject
+ public NodeService(Settings settings, ThreadPool threadPool, MonitorService monitorService, Discovery discovery,
+ TransportService transportService, IndicesService indicesService,
+ PluginsService pluginService, CircuitBreakerService circuitBreakerService, Version version) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.monitorService = monitorService;
+ this.transportService = transportService;
+ this.indicesService = indicesService;
+ this.disovery = discovery;
+ discovery.setNodeService(this);
+ this.version = version;
+ this.pluginService = pluginService;
+ this.circuitBreakerService = circuitBreakerService;
+ }
+
+ public void setHttpServer(@Nullable HttpServer httpServer) {
+ this.httpServer = httpServer;
+ }
+
+ @Deprecated
+ public void putNodeAttribute(String key, String value) {
+ putAttribute(key, value);
+ }
+
+ @Deprecated
+ public void removeNodeAttribute(String key) {
+ removeAttribute(key);
+ }
+
+ public synchronized void putAttribute(String key, String value) {
+ serviceAttributes = new MapBuilder<String, String>(serviceAttributes).put(key, value).immutableMap();
+ }
+
+ public synchronized void removeAttribute(String key) {
+ serviceAttributes = new MapBuilder<String, String>(serviceAttributes).remove(key).immutableMap();
+ }
+
+ /**
+ * Attributes different services in the node can add to be reported as part of the node info (for example).
+ */
+ public ImmutableMap<String, String> attributes() {
+ return this.serviceAttributes;
+ }
+
+ public NodeInfo info() {
+ return new NodeInfo(version, Build.CURRENT, disovery.localNode(), serviceAttributes,
+ settings,
+ monitorService.osService().info(),
+ monitorService.processService().info(),
+ monitorService.jvmService().info(),
+ threadPool.info(),
+ monitorService.networkService().info(),
+ transportService.info(),
+ httpServer == null ? null : httpServer.info(),
+ pluginService == null ? null : pluginService.info()
+ );
+ }
+
+ public NodeInfo info(boolean settings, boolean os, boolean process, boolean jvm, boolean threadPool,
+ boolean network, boolean transport, boolean http, boolean plugin) {
+ return new NodeInfo(version, Build.CURRENT, disovery.localNode(), serviceAttributes,
+ settings ? this.settings : null,
+ os ? monitorService.osService().info() : null,
+ process ? monitorService.processService().info() : null,
+ jvm ? monitorService.jvmService().info() : null,
+ threadPool ? this.threadPool.info() : null,
+ network ? monitorService.networkService().info() : null,
+ transport ? transportService.info() : null,
+ http ? (httpServer == null ? null : httpServer.info()) : null,
+ plugin ? (pluginService == null ? null : pluginService.info()) : null
+ );
+ }
+
+ public NodeStats stats() {
+ // for indices stats we want to include previous allocated shards stats as well (it will
+ // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats)
+ return new NodeStats(disovery.localNode(), System.currentTimeMillis(),
+ indicesService.stats(true),
+ monitorService.osService().stats(),
+ monitorService.processService().stats(),
+ monitorService.jvmService().stats(),
+ threadPool.stats(),
+ monitorService.networkService().stats(),
+ monitorService.fsService().stats(),
+ transportService.stats(),
+ httpServer == null ? null : httpServer.stats(),
+ circuitBreakerService.stats()
+ );
+ }
+
+ public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, boolean jvm, boolean threadPool, boolean network,
+ boolean fs, boolean transport, boolean http, boolean circuitBreaker) {
+ // for indices stats we want to include previous allocated shards stats as well (it will
+ // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats)
+ return new NodeStats(disovery.localNode(), System.currentTimeMillis(),
+ indices.anySet() ? indicesService.stats(true, indices) : null,
+ os ? monitorService.osService().stats() : null,
+ process ? monitorService.processService().stats() : null,
+ jvm ? monitorService.jvmService().stats() : null,
+ threadPool ? this.threadPool.stats() : null,
+ network ? monitorService.networkService().stats() : null,
+ fs ? monitorService.fsService().stats() : null,
+ transport ? transportService.stats() : null,
+ http ? (httpServer == null ? null : httpServer.stats()) : null,
+ circuitBreaker ? circuitBreakerService.stats() : null
+ );
+ }
+}
diff --git a/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java b/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java
new file mode 100644
index 0000000..20724e3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node.settings;
+
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ * A service that allows to register for node settings change that can come from cluster
+ * events holding new settings.
+ */
+public class NodeSettingsService extends AbstractComponent implements ClusterStateListener {
+
+ private static volatile Settings globalSettings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ /**
+ * Returns the global (static) settings last updated by a node. Note, if you have multiple
+ * nodes on the same JVM, it will just return the latest one set...
+ */
+ public static Settings getGlobalSettings() {
+ return globalSettings;
+ }
+
+ private volatile Settings lastSettingsApplied;
+
+ private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<Listener>();
+
+ @Inject
+ public NodeSettingsService(Settings settings) {
+ super(settings);
+ globalSettings = settings;
+ }
+
+ // inject it as a member, so we won't get into possible cyclic problems
+ public void setClusterService(ClusterService clusterService) {
+ clusterService.add(this);
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
+ if (event.state().blocks().disableStatePersistence()) {
+ return;
+ }
+
+ if (!event.metaDataChanged()) {
+ // nothing changed in the metadata, no need to check
+ return;
+ }
+
+ if (lastSettingsApplied != null && event.state().metaData().settings().equals(lastSettingsApplied)) {
+ // nothing changed in the settings, ignore
+ return;
+ }
+
+ for (Listener listener : listeners) {
+ try {
+ listener.onRefreshSettings(event.state().metaData().settings());
+ } catch (Exception e) {
+ logger.warn("failed to refresh settings for [{}]", e, listener);
+ }
+ }
+
+ try {
+ for (Map.Entry<String, String> entry : event.state().metaData().settings().getAsMap().entrySet()) {
+ if (entry.getKey().startsWith("logger.")) {
+ String component = entry.getKey().substring("logger.".length());
+ if ("_root".equals(component)) {
+ ESLoggerFactory.getRootLogger().setLevel(entry.getValue());
+ } else {
+ ESLoggerFactory.getLogger(component).setLevel(entry.getValue());
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("failed to refresh settings for [{}]", e, "logger");
+ }
+
+ lastSettingsApplied = event.state().metaData().settings();
+ globalSettings = lastSettingsApplied;
+ }
+
+ /**
+ * Only settings registered in {@link org.elasticsearch.cluster.settings.ClusterDynamicSettingsModule} can be changed dynamically.
+ */
+ public void addListener(Listener listener) {
+ this.listeners.add(listener);
+ }
+
+ public void removeListener(Listener listener) {
+ this.listeners.remove(listener);
+ }
+
+ public static interface Listener {
+ void onRefreshSettings(Settings settings);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/src/main/java/org/elasticsearch/percolator/PercolateContext.java
new file mode 100644
index 0000000..811b86e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/percolator/PercolateContext.java
@@ -0,0 +1,732 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.memory.MemoryIndex;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.percolate.PercolateShardRequest;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.docset.DocSetCache;
+import org.elasticsearch.index.cache.filter.FilterCache;
+import org.elasticsearch.index.cache.id.IdCache;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fieldvisitor.JustSourceFieldsVisitor;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.SearchContextAggregations;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.facet.SearchContextFacets;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
+import org.elasticsearch.search.fetch.partial.PartialFieldsContext;
+import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.highlight.SearchContextHighlight;
+import org.elasticsearch.search.internal.*;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.scan.ScanContext;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ */
+public class PercolateContext extends SearchContext {
+
+ public boolean limit;
+ private int size;
+ public boolean doSort;
+ public byte percolatorTypeId;
+ private boolean trackScores;
+
+ private final PercolateShardRequest request;
+ private final SearchShardTarget searchShardTarget;
+ private final IndexService indexService;
+ private final IndexFieldDataService fieldDataService;
+ private final IndexShard indexShard;
+ private final CacheRecycler cacheRecycler;
+ private final PageCacheRecycler pageCacheRecycler;
+ private final ScriptService scriptService;
+ private final ConcurrentMap<HashedBytesRef, Query> percolateQueries;
+ private String[] types;
+
+ private Engine.Searcher docEngineSearcher;
+ private Engine.Searcher engineSearcher;
+ private ContextIndexSearcher searcher;
+
+ private SearchContextHighlight highlight;
+ private SearchLookup searchLookup;
+ private ParsedQuery parsedQuery;
+ private Query query;
+ private boolean queryRewritten;
+ private Query percolateQuery;
+ private FetchSubPhase.HitContext hitContext;
+ private SearchContextFacets facets;
+ private SearchContextAggregations aggregations;
+ private QuerySearchResult querySearchResult;
+ private Sort sort;
+
+ public PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, IndexShard indexShard,
+ IndexService indexService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler,
+ ScriptService scriptService) {
+ this.request = request;
+ this.indexShard = indexShard;
+ this.indexService = indexService;
+ this.fieldDataService = indexService.fieldData();
+ this.searchShardTarget = searchShardTarget;
+ this.percolateQueries = indexShard.percolateRegistry().percolateQueries();
+ this.types = new String[]{request.documentType()};
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ this.querySearchResult = new QuerySearchResult(0, searchShardTarget);
+ this.engineSearcher = indexShard.acquireSearcher("percolate");
+ this.searcher = new ContextIndexSearcher(this, engineSearcher);
+ this.scriptService = scriptService;
+ }
+
+ public void initialize(final MemoryIndex memoryIndex, ParsedDocument parsedDocument) {
+ final IndexSearcher docSearcher = memoryIndex.createSearcher();
+ final IndexReader topLevelReader = docSearcher.getIndexReader();
+ AtomicReaderContext readerContext = topLevelReader.leaves().get(0);
+ docEngineSearcher = new Engine.Searcher() {
+ @Override
+ public String source() {
+ return "percolate";
+ }
+
+ @Override
+ public IndexReader reader() {
+ return topLevelReader;
+ }
+
+ @Override
+ public IndexSearcher searcher() {
+ return docSearcher;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ try {
+ docSearcher.getIndexReader().close();
+ memoryIndex.reset();
+ } catch (IOException e) {
+ throw new ElasticsearchException("failed to close percolator in-memory index", e);
+ }
+ return true;
+ }
+ };
+ lookup().setNextReader(readerContext);
+ lookup().setNextDocId(0);
+ lookup().source().setNextSource(parsedDocument.source());
+
+ Map<String, SearchHitField> fields = new HashMap<String, SearchHitField>();
+ for (IndexableField field : parsedDocument.rootDoc().getFields()) {
+ fields.put(field.name(), new InternalSearchHitField(field.name(), ImmutableList.of()));
+ }
+ hitContext = new FetchSubPhase.HitContext();
+ hitContext.reset(new InternalSearchHit(0, "unknown", new StringText(request.documentType()), fields), readerContext, 0, topLevelReader, 0, new JustSourceFieldsVisitor());
+ }
+
+ public IndexSearcher docSearcher() {
+ return docEngineSearcher.searcher();
+ }
+
+ public IndexShard indexShard() {
+ return indexShard;
+ }
+
+ public IndexService indexService() {
+ return indexService;
+ }
+
+ public ConcurrentMap<HashedBytesRef, Query> percolateQueries() {
+ return percolateQueries;
+ }
+
+ public Query percolateQuery() {
+ return percolateQuery;
+ }
+
+ public void percolateQuery(Query percolateQuery) {
+ this.percolateQuery = percolateQuery;
+ }
+
+ public FetchSubPhase.HitContext hitContext() {
+ return hitContext;
+ }
+
+ @Override
+ public SearchContextHighlight highlight() {
+ return highlight;
+ }
+
+ @Override
+ public void highlight(SearchContextHighlight highlight) {
+ if (highlight != null) {
+ // Enforce highlighting by source, because MemoryIndex doesn't support stored fields.
+ highlight.globalForceSource(true);
+ }
+ this.highlight = highlight;
+ }
+
+ @Override
+ public SearchShardTarget shardTarget() {
+ return searchShardTarget;
+ }
+
+ @Override
+ public SearchLookup lookup() {
+ if (searchLookup == null) {
+ searchLookup = new SearchLookup(mapperService(), fieldData(), types);
+ }
+ return searchLookup;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ try {
+ if (docEngineSearcher != null) {
+ IndexReader indexReader = docEngineSearcher.reader();
+ fieldDataService.clear(indexReader);
+ indexService.cache().clear(indexReader);
+ return docEngineSearcher.release();
+ } else {
+ return false;
+ }
+ } finally {
+ engineSearcher.release();
+ }
+ }
+
+ @Override
+ public MapperService mapperService() {
+ return indexService.mapperService();
+ }
+
+ @Override
+ public SearchContext parsedQuery(ParsedQuery query) {
+ this.parsedQuery = query;
+ this.query = query.query();
+ this.queryRewritten = false;
+ return this;
+ }
+
+ @Override
+ public ParsedQuery parsedQuery() {
+ return parsedQuery;
+ }
+
+ @Override
+ public Query query() {
+ return query;
+ }
+
+ @Override
+ public boolean queryRewritten() {
+ return queryRewritten;
+ }
+
+ @Override
+ public SearchContext updateRewriteQuery(Query rewriteQuery) {
+ queryRewritten = true;
+ query = rewriteQuery;
+ return this;
+ }
+
+ @Override
+ public String[] types() {
+ return types;
+ }
+
+ public void types(String[] types) {
+ this.types = types;
+ searchLookup = new SearchLookup(mapperService(), fieldData(), types);
+ }
+
+ @Override
+ public IndexFieldDataService fieldData() {
+ return fieldDataService;
+ }
+
+ @Override
+ public SearchContextAggregations aggregations() {
+ return aggregations;
+ }
+
+ @Override
+ public SearchContext aggregations(SearchContextAggregations aggregations) {
+ this.aggregations = aggregations;
+ return this;
+ }
+
+ @Override
+ public SearchContextFacets facets() {
+ return facets;
+ }
+
+ @Override
+ public SearchContext facets(SearchContextFacets facets) {
+ this.facets = facets;
+ return this;
+ }
+
+ // Unused:
+ @Override
+ public boolean clearAndRelease() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void preProcess() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Filter searchFilter(String[] types) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long id() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String source() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ShardSearchRequest request() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchType searchType() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchContext searchType(SearchType searchType) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int numberOfShards() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean hasTypes() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public float queryBoost() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchContext queryBoost(float queryBoost) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long nowInMillis() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Scroll scroll() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchContext scroll(Scroll scroll) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SuggestionSearchContext suggest() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void suggest(SuggestionSearchContext suggest) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public RescoreSearchContext rescore() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void rescore(RescoreSearchContext rescore) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean hasFieldDataFields() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FieldDataFieldsContext fieldDataFields() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean hasScriptFields() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ScriptFieldsContext scriptFields() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean hasPartialFields() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public PartialFieldsContext partialFields() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean sourceRequested() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean hasFetchSourceContext() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FetchSourceContext fetchSourceContext() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ContextIndexSearcher searcher() {
+ return searcher;
+ }
+
+ @Override
+ public AnalysisService analysisService() {
+ return indexService.analysisService();
+ }
+
+ @Override
+ public IndexQueryParserService queryParserService() {
+ return indexService.queryParserService();
+ }
+
+ @Override
+ public SimilarityService similarityService() {
+ return indexService.similarityService();
+ }
+
+ @Override
+ public ScriptService scriptService() {
+ return scriptService;
+ }
+
+ @Override
+ public CacheRecycler cacheRecycler() {
+ return cacheRecycler;
+ }
+
+ @Override
+ public PageCacheRecycler pageCacheRecycler() {
+ return pageCacheRecycler;
+ }
+
+ @Override
+ public FilterCache filterCache() {
+ return indexService.cache().filter();
+ }
+
+ @Override
+ public DocSetCache docSetCache() {
+ return indexService.cache().docSet();
+ }
+
+ @Override
+ public IdCache idCache() {
+ return indexService.cache().idCache();
+ }
+
+ @Override
+ public long timeoutInMillis() {
+ return -1;
+ }
+
+ @Override
+ public void timeoutInMillis(long timeoutInMillis) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchContext minimumScore(float minimumScore) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Float minimumScore() {
+ return null;
+ }
+
+ @Override
+ public SearchContext sort(Sort sort) {
+ this.sort = sort;
+ return this;
+ }
+
+ @Override
+ public Sort sort() {
+ return sort;
+ }
+
+ @Override
+ public SearchContext trackScores(boolean trackScores) {
+ this.trackScores = trackScores;
+ return this;
+ }
+
+ @Override
+ public boolean trackScores() {
+ return trackScores;
+ }
+
+ @Override
+ public SearchContext parsedPostFilter(ParsedFilter postFilter) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ParsedFilter parsedPostFilter() {
+ return null;
+ }
+
+ @Override
+ public Filter aliasFilter() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int from() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchContext from(int from) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int size() {
+ return size;
+ }
+
+ @Override
+ public SearchContext size(int size) {
+ this.size = size;
+ this.limit = true;
+ return this;
+ }
+
+ @Override
+ public boolean hasFieldNames() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<String> fieldNames() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void emptyFieldNames() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean explain() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void explain(boolean explain) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<String> groupStats() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void groupStats(List<String> groupStats) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean version() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void version(boolean version) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int[] docIdsToLoad() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int docIdsToLoadFrom() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int docIdsToLoadSize() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void accessed(long accessTime) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long lastAccessTime() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long keepAlive() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void keepAlive(long keepAlive) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public DfsSearchResult dfsResult() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public QuerySearchResult queryResult() {
+ return querySearchResult;
+ }
+
+ @Override
+ public FetchSearchResult fetchResult() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void addReleasable(Releasable releasable) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void clearReleasables() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ScanContext scanContext() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public MapperService.SmartNameFieldMappers smartFieldMappers(String name) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FieldMappers smartNameFieldMappers(String name) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FieldMapper smartNameFieldMapper(String name) {
+ return mapperService().smartNameFieldMapper(name, types);
+ }
+
+ @Override
+ public MapperService.SmartNameObjectMapper smartNameObjectMapper(String name) {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/percolator/PercolateException.java b/src/main/java/org/elasticsearch/percolator/PercolateException.java
new file mode 100644
index 0000000..c5b7fa7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/percolator/PercolateException.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.index.shard.ShardId;
+
+/**
+ * Exception during percolating document(s) at runtime.
+ */
+public class PercolateException extends ElasticsearchException implements ElasticsearchWrapperException {
+
+ private final ShardId shardId;
+
+ public PercolateException(String msg, ShardId shardId) {
+ super(msg);
+ this.shardId = shardId;
+ }
+
+ public PercolateException(ShardId shardId, String msg, Throwable cause) {
+ super(msg, cause);
+ this.shardId = shardId;
+ }
+
+ public ShardId getShardId() {
+ return shardId;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorModule.java b/src/main/java/org/elasticsearch/percolator/PercolatorModule.java
new file mode 100644
index 0000000..fb18c46
--- /dev/null
+++ b/src/main/java/org/elasticsearch/percolator/PercolatorModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class PercolatorModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(PercolatorService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java
new file mode 100644
index 0000000..0ff35a0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java
@@ -0,0 +1,913 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import com.carrotsearch.hppc.ByteObjectOpenHashMap;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.ReaderUtil;
+import org.apache.lucene.index.memory.ExtendedMemoryIndex;
+import org.apache.lucene.index.memory.MemoryIndex;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CloseableThreadLocal;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.percolate.PercolateShardRequest;
+import org.elasticsearch.action.percolate.PercolateShardResponse;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.XCollector;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.BytesText;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.percolator.stats.ShardPercolateService;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.percolator.QueryCollector.*;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.AggregationPhase;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.FacetPhase;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.InternalFacets;
+import org.elasticsearch.search.highlight.HighlightField;
+import org.elasticsearch.search.highlight.HighlightPhase;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.sort.SortParseElement;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.SourceToParse.source;
+import static org.elasticsearch.percolator.QueryCollector.*;
+
+/**
+ */
+public class PercolatorService extends AbstractComponent {
+
+ public final static float NO_SCORE = Float.NEGATIVE_INFINITY;
+ public final static String TYPE_NAME = ".percolator";
+
+ private final CloseableThreadLocal<MemoryIndex> cache;
+ private final IndicesService indicesService;
+ private final ByteObjectOpenHashMap<PercolatorType> percolatorTypes;
+ private final CacheRecycler cacheRecycler;
+ private final PageCacheRecycler pageCacheRecycler;
+ private final ClusterService clusterService;
+
+ private final FacetPhase facetPhase;
+ private final HighlightPhase highlightPhase;
+ private final AggregationPhase aggregationPhase;
+ private final SortParseElement sortParseElement;
+ private final ScriptService scriptService;
+ private final MappingUpdatedAction mappingUpdatedAction;
+
+ @Inject
+ public PercolatorService(Settings settings, IndicesService indicesService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler,
+ HighlightPhase highlightPhase, ClusterService clusterService, FacetPhase facetPhase,
+ AggregationPhase aggregationPhase, ScriptService scriptService,
+ MappingUpdatedAction mappingUpdatedAction) {
+ super(settings);
+ this.indicesService = indicesService;
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ this.clusterService = clusterService;
+ this.highlightPhase = highlightPhase;
+ this.facetPhase = facetPhase;
+ this.aggregationPhase = aggregationPhase;
+ this.scriptService = scriptService;
+ this.mappingUpdatedAction = mappingUpdatedAction;
+ this.sortParseElement = new SortParseElement();
+
+ final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes();
+ cache = new CloseableThreadLocal<MemoryIndex>() {
+ @Override
+ protected MemoryIndex initialValue() {
+ return new ExtendedMemoryIndex(true, maxReuseBytes);
+ }
+ };
+
+ percolatorTypes = new ByteObjectOpenHashMap<PercolatorType>(6);
+ percolatorTypes.put(countPercolator.id(), countPercolator);
+ percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator);
+ percolatorTypes.put(matchPercolator.id(), matchPercolator);
+ percolatorTypes.put(queryPercolator.id(), queryPercolator);
+ percolatorTypes.put(scoringPercolator.id(), scoringPercolator);
+ percolatorTypes.put(topMatchingPercolator.id(), topMatchingPercolator);
+ }
+
+
+ public ReduceResult reduce(byte percolatorTypeId, List<PercolateShardResponse> shardResults) {
+ PercolatorType percolatorType = percolatorTypes.get(percolatorTypeId);
+ return percolatorType.reduce(shardResults);
+ }
+
+ public PercolateShardResponse percolate(PercolateShardRequest request) {
+ IndexService percolateIndexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = percolateIndexService.shardSafe(request.shardId());
+ indexShard.readAllowed(); // check if we can read the shard...
+
+ ShardPercolateService shardPercolateService = indexShard.shardPercolateService();
+ shardPercolateService.prePercolate();
+ long startTime = System.nanoTime();
+
+ SearchShardTarget searchShardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
+ final PercolateContext context = new PercolateContext(
+ request, searchShardTarget, indexShard, percolateIndexService, cacheRecycler, pageCacheRecycler, scriptService
+ );
+ try {
+
+ ParsedDocument parsedDocument = parseRequest(percolateIndexService, request, context);
+ if (context.percolateQueries().isEmpty()) {
+ return new PercolateShardResponse(context, request.index(), request.shardId());
+ }
+
+ if (request.docSource() != null && request.docSource().length() != 0) {
+ parsedDocument = parseFetchedDoc(context, request.docSource(), percolateIndexService, request.documentType());
+ } else if (parsedDocument == null) {
+ throw new ElasticsearchIllegalArgumentException("Nothing to percolate");
+ }
+
+ if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.facets() != null || context.aggregations() != null)) {
+ context.percolateQuery(new MatchAllDocsQuery());
+ }
+
+ if (context.doSort && !context.limit) {
+ throw new ElasticsearchIllegalArgumentException("Can't sort if size isn't specified");
+ }
+
+ if (context.highlight() != null && !context.limit) {
+ throw new ElasticsearchIllegalArgumentException("Can't highlight if size isn't specified");
+ }
+
+ if (context.size() < 0) {
+ context.size(0);
+ }
+
+ // first, parse the source doc into a MemoryIndex
+ final MemoryIndex memoryIndex = cache.get();
+ // TODO: This means percolation does not support nested docs...
+ // So look into: ByteBufferDirectory
+ for (IndexableField field : parsedDocument.rootDoc().getFields()) {
+ if (!field.fieldType().indexed() && field.name().equals(UidFieldMapper.NAME)) {
+ continue;
+ }
+ try {
+ TokenStream tokenStream = field.tokenStream(parsedDocument.analyzer());
+ if (tokenStream != null) {
+ memoryIndex.addField(field.name(), tokenStream, field.boost());
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchException("Failed to create token stream", e);
+ }
+ }
+
+ PercolatorType action;
+ if (request.onlyCount()) {
+ action = context.percolateQuery() != null ? queryCountPercolator : countPercolator;
+ } else {
+ if (context.doSort) {
+ action = topMatchingPercolator;
+ } else if (context.percolateQuery() != null) {
+ action = context.trackScores() ? scoringPercolator : queryPercolator;
+ } else {
+ action = matchPercolator;
+ }
+ }
+ context.percolatorTypeId = action.id();
+
+ context.initialize(memoryIndex, parsedDocument);
+ return action.doPercolate(request, context);
+ } finally {
+ context.release();
+ shardPercolateService.postPercolate(System.nanoTime() - startTime);
+ }
+ }
+
+ private ParsedDocument parseRequest(IndexService documentIndexService, PercolateShardRequest request, PercolateContext context) throws ElasticsearchException {
+ BytesReference source = request.source();
+ if (source == null || source.length() == 0) {
+ return null;
+ }
+
+ // TODO: combine all feature parse elements into one map
+ Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements();
+ Map<String, ? extends SearchParseElement> facetElements = facetPhase.parseElements();
+ Map<String, ? extends SearchParseElement> aggregationElements = aggregationPhase.parseElements();
+
+ ParsedDocument doc = null;
+ XContentParser parser = null;
+
+ // Some queries (function_score query when for decay functions) rely on a SearchContext being set:
+ // We switch types because this context needs to be in the context of the percolate queries in the shard and
+ // not the in memory percolate doc
+ String[] previousTypes = context.types();
+ context.types(new String[]{TYPE_NAME});
+ SearchContext.setCurrent(context);
+ try {
+ parser = XContentFactory.xContent(source).createParser(source);
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ // we need to check the "doc" here, so the next token will be START_OBJECT which is
+ // the actual document starting
+ if ("doc".equals(currentFieldName)) {
+ if (doc != null) {
+ throw new ElasticsearchParseException("Either specify doc or get, not both");
+ }
+
+ MapperService mapperService = documentIndexService.mapperService();
+ DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(request.documentType());
+ doc = docMapper.parse(source(parser).type(request.documentType()).flyweight(true));
+ if (doc.mappingsModified()) {
+ updateMappingOnMaster(docMapper, request, documentIndexService.indexUUID());
+ }
+ // the document parsing exists the "doc" object, so we need to set the new current field.
+ currentFieldName = parser.currentName();
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ SearchParseElement element = hlElements.get(currentFieldName);
+ if (element == null) {
+ element = facetElements.get(currentFieldName);
+ if (element == null) {
+ element = aggregationElements.get(currentFieldName);
+ }
+ }
+
+ if ("query".equals(currentFieldName)) {
+ if (context.percolateQuery() != null) {
+ throw new ElasticsearchParseException("Either specify query or filter, not both");
+ }
+ context.percolateQuery(documentIndexService.queryParserService().parse(parser).query());
+ } else if ("filter".equals(currentFieldName)) {
+ if (context.percolateQuery() != null) {
+ throw new ElasticsearchParseException("Either specify query or filter, not both");
+ }
+ Filter filter = documentIndexService.queryParserService().parseInnerFilter(parser).filter();
+ context.percolateQuery(new XConstantScoreQuery(filter));
+ } else if ("sort".equals(currentFieldName)) {
+ parseSort(parser, context);
+ } else if (element != null) {
+ element.parse(parser, context);
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("sort".equals(currentFieldName)) {
+ parseSort(parser, context);
+ }
+ } else if (token == null) {
+ break;
+ } else if (token.isValue()) {
+ if ("size".equals(currentFieldName)) {
+ context.size(parser.intValue());
+ if (context.size() < 0) {
+ throw new ElasticsearchParseException("size is set to [" + context.size() + "] and is expected to be higher or equal to 0");
+ }
+ } else if ("sort".equals(currentFieldName)) {
+ parseSort(parser, context);
+ } else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) {
+ context.trackScores(parser.booleanValue());
+ }
+ }
+ }
+
+ // We need to get the actual source from the request body for highlighting, so parse the request body again
+ // and only get the doc source.
+ if (context.highlight() != null) {
+ parser.close();
+ currentFieldName = null;
+ parser = XContentFactory.xContent(source).createParser(source);
+ token = parser.nextToken();
+ assert token == XContentParser.Token.START_OBJECT;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("doc".equals(currentFieldName)) {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream);
+ builder.copyCurrentStructure(parser);
+ builder.close();
+ doc.setSource(bStream.bytes());
+ break;
+ } else {
+ parser.skipChildren();
+ }
+ } else if (token == null) {
+ break;
+ }
+ }
+ }
+
+ } catch (Throwable e) {
+ throw new ElasticsearchParseException("failed to parse request", e);
+ } finally {
+ context.types(previousTypes);
+ SearchContext.removeCurrent();
+ if (parser != null) {
+ parser.close();
+ }
+ }
+
+ return doc;
+ }
+
+ private void parseSort(XContentParser parser, PercolateContext context) throws Exception {
+ sortParseElement.parse(parser, context);
+ // null, means default sorting by relevancy
+ if (context.sort() == null) {
+ context.doSort = true;
+ } else {
+ throw new ElasticsearchParseException("Only _score desc is supported");
+ }
+ }
+
+ private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, IndexService documentIndexService, String type) {
+ ParsedDocument doc = null;
+ XContentParser parser = null;
+ try {
+ parser = XContentFactory.xContent(fetchedDoc).createParser(fetchedDoc);
+ MapperService mapperService = documentIndexService.mapperService();
+ DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(type);
+ doc = docMapper.parse(source(parser).type(type).flyweight(true));
+
+ if (context.highlight() != null) {
+ doc.setSource(fetchedDoc);
+ }
+ } catch (Throwable e) {
+ throw new ElasticsearchParseException("failed to parse request", e);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+
+ if (doc == null) {
+ throw new ElasticsearchParseException("No doc to percolate in the request");
+ }
+
+ return doc;
+ }
+
+ public void close() {
+ cache.close();
+ }
+
+ interface PercolatorType {
+
+ // 0x00 is reserved for empty type.
+ byte id();
+
+ ReduceResult reduce(List<PercolateShardResponse> shardResults);
+
+ PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context);
+
+ }
+
+ private final PercolatorType countPercolator = new PercolatorType() {
+
+ @Override
+ public byte id() {
+ return 0x01;
+ }
+
+ @Override
+ public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
+ long finalCount = 0;
+ for (PercolateShardResponse shardResponse : shardResults) {
+ finalCount += shardResponse.count();
+ }
+
+ assert !shardResults.isEmpty();
+ InternalFacets reducedFacets = reduceFacets(shardResults);
+ InternalAggregations reducedAggregations = reduceAggregations(shardResults);
+ return new ReduceResult(finalCount, reducedFacets, reducedAggregations);
+ }
+
+ @Override
+ public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
+ long count = 0;
+ Lucene.ExistsCollector collector = new Lucene.ExistsCollector();
+ for (Map.Entry<HashedBytesRef, Query> entry : context.percolateQueries().entrySet()) {
+ collector.reset();
+ try {
+ context.docSearcher().search(entry.getValue(), collector);
+ } catch (Throwable e) {
+ logger.debug("[" + entry.getKey() + "] failed to execute query", e);
+ throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
+ }
+
+ if (collector.exists()) {
+ count++;
+ }
+ }
+ return new PercolateShardResponse(count, context, request.index(), request.shardId());
+ }
+
+ };
+
+ private final PercolatorType queryCountPercolator = new PercolatorType() {
+
+ @Override
+ public byte id() {
+ return 0x02;
+ }
+
+ @Override
+ public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
+ return countPercolator.reduce(shardResults);
+ }
+
+ @Override
+ public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
+ long count = 0;
+ Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
+ try {
+ Count countCollector = count(logger, context);
+ queryBasedPercolating(percolatorSearcher, context, countCollector);
+ count = countCollector.counter();
+ } catch (Throwable e) {
+ logger.warn("failed to execute", e);
+ } finally {
+ percolatorSearcher.release();
+ }
+ return new PercolateShardResponse(count, context, request.index(), request.shardId());
+ }
+
+ };
+
+ private final PercolatorType matchPercolator = new PercolatorType() {
+
+ @Override
+ public byte id() {
+ return 0x03;
+ }
+
+ @Override
+ public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
+ long foundMatches = 0;
+ int numMatches = 0;
+ for (PercolateShardResponse response : shardResults) {
+ foundMatches += response.count();
+ numMatches += response.matches().length;
+ }
+ int requestedSize = shardResults.get(0).requestedSize();
+
+ // Use a custom impl of AbstractBigArray for Object[]?
+ List<PercolateResponse.Match> finalMatches = new ArrayList<PercolateResponse.Match>(requestedSize == 0 ? numMatches : requestedSize);
+ outer:
+ for (PercolateShardResponse response : shardResults) {
+ Text index = new StringText(response.getIndex());
+ for (int i = 0; i < response.matches().length; i++) {
+ float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i];
+ Text match = new BytesText(new BytesArray(response.matches()[i]));
+ Map<String, HighlightField> hl = response.hls().isEmpty() ? null : response.hls().get(i);
+ finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
+ if (requestedSize != 0 && finalMatches.size() == requestedSize) {
+ break outer;
+ }
+ }
+ }
+
+ assert !shardResults.isEmpty();
+ InternalFacets reducedFacets = reduceFacets(shardResults);
+ InternalAggregations reducedAggregations = reduceAggregations(shardResults);
+ return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedFacets, reducedAggregations);
+ }
+
+ @Override
+ public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
+ long count = 0;
+ List<BytesRef> matches = new ArrayList<BytesRef>();
+ List<Map<String, HighlightField>> hls = new ArrayList<Map<String, HighlightField>>();
+ Lucene.ExistsCollector collector = new Lucene.ExistsCollector();
+
+ for (Map.Entry<HashedBytesRef, Query> entry : context.percolateQueries().entrySet()) {
+ collector.reset();
+ if (context.highlight() != null) {
+ context.parsedQuery(new ParsedQuery(entry.getValue(), ImmutableMap.<String, Filter>of()));
+ context.hitContext().cache().clear();
+ }
+ try {
+ context.docSearcher().search(entry.getValue(), collector);
+ } catch (Throwable e) {
+ logger.debug("[" + entry.getKey() + "] failed to execute query", e);
+ throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
+ }
+
+ if (collector.exists()) {
+ if (!context.limit || count < context.size()) {
+ matches.add(entry.getKey().bytes);
+ if (context.highlight() != null) {
+ highlightPhase.hitExecute(context, context.hitContext());
+ hls.add(context.hitContext().hit().getHighlightFields());
+ }
+ }
+ count++;
+ }
+ }
+
+ BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
+ return new PercolateShardResponse(finalMatches, hls, count, context, request.index(), request.shardId());
+ }
+ };
+
+ private final PercolatorType queryPercolator = new PercolatorType() {
+
+ @Override
+ public byte id() {
+ return 0x04;
+ }
+
+ @Override
+ public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
+ return matchPercolator.reduce(shardResults);
+ }
+
+ @Override
+ public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
+ Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
+ try {
+ Match match = match(logger, context, highlightPhase);
+ queryBasedPercolating(percolatorSearcher, context, match);
+ List<BytesRef> matches = match.matches();
+ List<Map<String, HighlightField>> hls = match.hls();
+ long count = match.counter();
+
+ BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
+ return new PercolateShardResponse(finalMatches, hls, count, context, request.index(), request.shardId());
+ } catch (Throwable e) {
+ logger.debug("failed to execute", e);
+ throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
+ } finally {
+ percolatorSearcher.release();
+ }
+ }
+ };
+
+ private final PercolatorType scoringPercolator = new PercolatorType() {
+
+ @Override
+ public byte id() {
+ return 0x05;
+ }
+
+ @Override
+ public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
+ return matchPercolator.reduce(shardResults);
+ }
+
+ @Override
+ public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
+ Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
+ try {
+ MatchAndScore matchAndScore = matchAndScore(logger, context, highlightPhase);
+ queryBasedPercolating(percolatorSearcher, context, matchAndScore);
+ List<BytesRef> matches = matchAndScore.matches();
+ List<Map<String, HighlightField>> hls = matchAndScore.hls();
+ float[] scores = matchAndScore.scores().toArray();
+ long count = matchAndScore.counter();
+
+ BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
+ return new PercolateShardResponse(finalMatches, hls, count, scores, context, request.index(), request.shardId());
+ } catch (Throwable e) {
+ logger.debug("failed to execute", e);
+ throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
+ } finally {
+ percolatorSearcher.release();
+ }
+ }
+ };
+
+ private final PercolatorType topMatchingPercolator = new PercolatorType() {
+
+ @Override
+ public byte id() {
+ return 0x06;
+ }
+
+ @Override
+ public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
+ long foundMatches = 0;
+ int nonEmptyResponses = 0;
+ int firstNonEmptyIndex = 0;
+ for (int i = 0; i < shardResults.size(); i++) {
+ PercolateShardResponse response = shardResults.get(i);
+ foundMatches += response.count();
+ if (response.matches().length != 0) {
+ if (firstNonEmptyIndex == 0) {
+ firstNonEmptyIndex = i;
+ }
+ nonEmptyResponses++;
+ }
+ }
+
+ int requestedSize = shardResults.get(0).requestedSize();
+
+ // Use a custom impl of AbstractBigArray for Object[]?
+ List<PercolateResponse.Match> finalMatches = new ArrayList<PercolateResponse.Match>(requestedSize);
+ if (nonEmptyResponses == 1) {
+ PercolateShardResponse response = shardResults.get(firstNonEmptyIndex);
+ Text index = new StringText(response.getIndex());
+ for (int i = 0; i < response.matches().length; i++) {
+ float score = response.scores().length == 0 ? Float.NaN : response.scores()[i];
+ Text match = new BytesText(new BytesArray(response.matches()[i]));
+ if (!response.hls().isEmpty()) {
+ Map<String, HighlightField> hl = response.hls().get(i);
+ finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
+ } else {
+ finalMatches.add(new PercolateResponse.Match(index, match, score));
+ }
+ }
+ } else {
+ int[] slots = new int[shardResults.size()];
+ while (true) {
+ float lowestScore = Float.NEGATIVE_INFINITY;
+ int requestIndex = -1;
+ int itemIndex = -1;
+ for (int i = 0; i < shardResults.size(); i++) {
+ int scoreIndex = slots[i];
+ float[] scores = shardResults.get(i).scores();
+ if (scoreIndex >= scores.length) {
+ continue;
+ }
+
+ float score = scores[scoreIndex];
+ int cmp = Float.compare(lowestScore, score);
+ // TODO: Maybe add a tie?
+ if (cmp < 0) {
+ requestIndex = i;
+ itemIndex = scoreIndex;
+ lowestScore = score;
+ }
+ }
+
+ // This means the shard matches have been exhausted and we should bail
+ if (requestIndex == -1) {
+ break;
+ }
+
+ slots[requestIndex]++;
+
+ PercolateShardResponse shardResponse = shardResults.get(requestIndex);
+ Text index = new StringText(shardResponse.getIndex());
+ Text match = new BytesText(new BytesArray(shardResponse.matches()[itemIndex]));
+ float score = shardResponse.scores()[itemIndex];
+ if (!shardResponse.hls().isEmpty()) {
+ Map<String, HighlightField> hl = shardResponse.hls().get(itemIndex);
+ finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
+ } else {
+ finalMatches.add(new PercolateResponse.Match(index, match, score));
+ }
+ if (finalMatches.size() == requestedSize) {
+ break;
+ }
+ }
+ }
+
+ assert !shardResults.isEmpty();
+ InternalFacets reducedFacets = reduceFacets(shardResults);
+ InternalAggregations reducedAggregations = reduceAggregations(shardResults);
+ return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedFacets, reducedAggregations);
+ }
+
+ @Override
+ public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
+ Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
+ try {
+ MatchAndSort matchAndSort = QueryCollector.matchAndSort(logger, context);
+ queryBasedPercolating(percolatorSearcher, context, matchAndSort);
+ TopDocs topDocs = matchAndSort.topDocs();
+ long count = topDocs.totalHits;
+ List<BytesRef> matches = new ArrayList<BytesRef>(topDocs.scoreDocs.length);
+ float[] scores = new float[topDocs.scoreDocs.length];
+ List<Map<String, HighlightField>> hls = null;
+ if (context.highlight() != null) {
+ hls = new ArrayList<Map<String, HighlightField>>(topDocs.scoreDocs.length);
+ }
+
+ final FieldMapper<?> idMapper = context.mapperService().smartNameFieldMapper(IdFieldMapper.NAME);
+ final IndexFieldData<?> idFieldData = context.fieldData().getForField(idMapper);
+ int i = 0;
+ final HashedBytesRef spare = new HashedBytesRef(new BytesRef());
+ for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
+ int segmentIdx = ReaderUtil.subIndex(scoreDoc.doc, percolatorSearcher.reader().leaves());
+ AtomicReaderContext atomicReaderContext = percolatorSearcher.reader().leaves().get(segmentIdx);
+ BytesValues values = idFieldData.load(atomicReaderContext).getBytesValues(true);
+ final int localDocId = scoreDoc.doc - atomicReaderContext.docBase;
+ final int numValues = values.setDocument(localDocId);
+ assert numValues == 1;
+ spare.bytes = values.nextValue();
+ spare.hash = values.currentValueHash();
+ matches.add(values.copyShared());
+ if (hls != null) {
+ Query query = context.percolateQueries().get(spare);
+ context.parsedQuery(new ParsedQuery(query, ImmutableMap.<String, Filter>of()));
+ context.hitContext().cache().clear();
+ highlightPhase.hitExecute(context, context.hitContext());
+ hls.add(i, context.hitContext().hit().getHighlightFields());
+ }
+ scores[i++] = scoreDoc.score;
+ }
+ if (hls != null) {
+ return new PercolateShardResponse(matches.toArray(new BytesRef[matches.size()]), hls, count, scores, context, request.index(), request.shardId());
+ } else {
+ return new PercolateShardResponse(matches.toArray(new BytesRef[matches.size()]), count, scores, context, request.index(), request.shardId());
+ }
+ } catch (Throwable e) {
+ logger.debug("failed to execute", e);
+ throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
+ } finally {
+ percolatorSearcher.release();
+ }
+ }
+
+ };
+
+ private void queryBasedPercolating(Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException {
+ Filter percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter();
+ percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter);
+ FilteredQuery query = new FilteredQuery(context.percolateQuery(), percolatorTypeFilter);
+ percolatorSearcher.searcher().search(query, percolateCollector);
+
+ for (Collector queryCollector : percolateCollector.facetAndAggregatorCollector) {
+ if (queryCollector instanceof XCollector) {
+ ((XCollector) queryCollector).postCollection();
+ }
+ }
+ if (context.facets() != null) {
+ facetPhase.execute(context);
+ }
+ if (context.aggregations() != null) {
+ aggregationPhase.execute(context);
+ }
+ }
+
+ public final static class ReduceResult {
+
+ private final long count;
+ private final PercolateResponse.Match[] matches;
+ private final InternalFacets reducedFacets;
+ private final InternalAggregations reducedAggregations;
+
+ ReduceResult(long count, PercolateResponse.Match[] matches, InternalFacets reducedFacets, InternalAggregations reducedAggregations) {
+ this.count = count;
+ this.matches = matches;
+ this.reducedFacets = reducedFacets;
+ this.reducedAggregations = reducedAggregations;
+ }
+
+ public ReduceResult(long count, InternalFacets reducedFacets, InternalAggregations reducedAggregations) {
+ this.count = count;
+ this.matches = null;
+ this.reducedFacets = reducedFacets;
+ this.reducedAggregations = reducedAggregations;
+ }
+
+ public long count() {
+ return count;
+ }
+
+ public PercolateResponse.Match[] matches() {
+ return matches;
+ }
+
+ public InternalFacets reducedFacets() {
+ return reducedFacets;
+ }
+
+ public InternalAggregations reducedAggregations() {
+ return reducedAggregations;
+ }
+ }
+
+ // TODO: maybe move this logic into MappingUpdatedAction? There is similar logic for the index and bulk api now.
+ private void updateMappingOnMaster(DocumentMapper documentMapper, final PercolateShardRequest request, String indexUUID) {
+ // we generate the order id before we get the mapping to send and refresh the source, so
+ // if 2 happen concurrently, we know that the later order will include the previous one
+ long orderId = mappingUpdatedAction.generateNextMappingUpdateOrder();
+ documentMapper.refreshSource();
+ DiscoveryNode node = clusterService.localNode();
+ final MappingUpdatedAction.MappingUpdatedRequest mappingRequest = new MappingUpdatedAction.MappingUpdatedRequest(
+ request.index(), indexUUID, request.documentType(), documentMapper.mappingSource(), orderId, node != null ? node.id() : null
+ );
+ logger.trace("Sending mapping updated to master: {}", mappingRequest);
+ mappingUpdatedAction.execute(mappingRequest, new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() {
+ @Override
+ public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) {
+ // all is well
+ logger.debug("Successfully updated master with mapping update: {}", mappingRequest);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.warn("Failed to update master on updated mapping for {}", e, mappingRequest);
+ }
+ });
+ }
+
+ private InternalFacets reduceFacets(List<PercolateShardResponse> shardResults) {
+ if (shardResults.get(0).facets() == null) {
+ return null;
+ }
+
+ if (shardResults.size() == 1) {
+ return shardResults.get(0).facets();
+ }
+
+ PercolateShardResponse firstShardResponse = shardResults.get(0);
+ List<Facet> aggregatedFacets = Lists.newArrayList();
+ List<Facet> namedFacets = Lists.newArrayList();
+ for (Facet facet : firstShardResponse.facets()) {
+ // aggregate each facet name into a single list, and aggregate it
+ namedFacets.clear();
+ for (PercolateShardResponse entry : shardResults) {
+ for (Facet facet1 : entry.facets()) {
+ if (facet.getName().equals(facet1.getName())) {
+ namedFacets.add(facet1);
+ }
+ }
+ }
+ if (!namedFacets.isEmpty()) {
+ Facet aggregatedFacet = ((InternalFacet) namedFacets.get(0)).reduce(new InternalFacet.ReduceContext(cacheRecycler, namedFacets));
+ aggregatedFacets.add(aggregatedFacet);
+ }
+ }
+ return new InternalFacets(aggregatedFacets);
+ }
+
+ private InternalAggregations reduceAggregations(List<PercolateShardResponse> shardResults) {
+ if (shardResults.get(0).aggregations() == null) {
+ return null;
+ }
+
+ if (shardResults.size() == 1) {
+ return shardResults.get(0).aggregations();
+ }
+
+ List<InternalAggregations> aggregationsList = new ArrayList<InternalAggregations>(shardResults.size());
+ for (PercolateShardResponse shardResult : shardResults) {
+ aggregationsList.add(shardResult.aggregations());
+ }
+ return InternalAggregations.reduce(aggregationsList, cacheRecycler);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/percolator/QueryCollector.java b/src/main/java/org/elasticsearch/percolator/QueryCollector.java
new file mode 100644
index 0000000..6529341
--- /dev/null
+++ b/src/main/java/org/elasticsearch/percolator/QueryCollector.java
@@ -0,0 +1,393 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.FilteredCollector;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.search.aggregations.AggregationPhase;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.facet.SearchContextFacets;
+import org.elasticsearch.search.facet.nested.NestedFacetExecutor;
+import org.elasticsearch.search.highlight.HighlightField;
+import org.elasticsearch.search.highlight.HighlightPhase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ */
+abstract class QueryCollector extends Collector {
+
+ final IndexFieldData<?> idFieldData;
+ final IndexSearcher searcher;
+ final ConcurrentMap<HashedBytesRef, Query> queries;
+ final ESLogger logger;
+
+ final Lucene.ExistsCollector collector = new Lucene.ExistsCollector();
+ final HashedBytesRef spare = new HashedBytesRef(new BytesRef());
+
+ BytesValues values;
+
+ final List<Collector> facetAndAggregatorCollector;
+
+ QueryCollector(ESLogger logger, PercolateContext context) {
+ this.logger = logger;
+ this.queries = context.percolateQueries();
+ this.searcher = context.docSearcher();
+ final FieldMapper<?> idMapper = context.mapperService().smartNameFieldMapper(IdFieldMapper.NAME);
+ this.idFieldData = context.fieldData().getForField(idMapper);
+
+ ImmutableList.Builder<Collector> facetAggCollectorBuilder = ImmutableList.builder();
+ if (context.facets() != null) {
+ for (SearchContextFacets.Entry entry : context.facets().entries()) {
+ if (entry.isGlobal()) {
+ continue; // not supported for now
+ }
+ Collector collector = entry.getFacetExecutor().collector();
+ if (entry.getFilter() != null) {
+ if (collector instanceof NestedFacetExecutor.Collector) {
+ collector = new NestedFacetExecutor.Collector((NestedFacetExecutor.Collector) collector, entry.getFilter());
+ } else {
+ collector = new FilteredCollector(collector, entry.getFilter());
+ }
+ }
+ facetAggCollectorBuilder.add(collector);
+ }
+ }
+
+ if (context.aggregations() != null) {
+ AggregationContext aggregationContext = new AggregationContext(context);
+ context.aggregations().aggregationContext(aggregationContext);
+
+ List<Aggregator> aggregatorCollectors = new ArrayList<Aggregator>();
+ Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext);
+ for (int i = 0; i < aggregators.length; i++) {
+ if (!(aggregators[i] instanceof GlobalAggregator)) {
+ Aggregator aggregator = aggregators[i];
+ if (aggregator.shouldCollect()) {
+ aggregatorCollectors.add(aggregator);
+ }
+ }
+ }
+ context.aggregations().aggregators(aggregators);
+ if (!aggregatorCollectors.isEmpty()) {
+ facetAggCollectorBuilder.add(new AggregationPhase.AggregationsCollector(aggregatorCollectors, aggregationContext));
+ }
+ }
+ facetAndAggregatorCollector = facetAggCollectorBuilder.build();
+ }
+
+ public void postMatch(int doc) throws IOException {
+ for (Collector collector : facetAndAggregatorCollector) {
+ collector.collect(doc);
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ for (Collector collector : facetAndAggregatorCollector) {
+ collector.setScorer(scorer);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ // we use the UID because id might not be indexed
+ values = idFieldData.load(context).getBytesValues(true);
+ for (Collector collector : facetAndAggregatorCollector) {
+ collector.setNextReader(context);
+ }
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+
+ static Match match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase) {
+ return new Match(logger, context, highlightPhase);
+ }
+
+ static Count count(ESLogger logger, PercolateContext context) {
+ return new Count(logger, context);
+ }
+
+ static MatchAndScore matchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase) {
+ return new MatchAndScore(logger, context, highlightPhase);
+ }
+
+ static MatchAndSort matchAndSort(ESLogger logger, PercolateContext context) {
+ return new MatchAndSort(logger, context);
+ }
+
+
+ protected final Query getQuery(int doc) {
+ final int numValues = values.setDocument(doc);
+ if (numValues == 0) {
+ return null;
+ }
+ assert numValues == 1;
+ spare.reset(values.nextValue(), values.currentValueHash());
+ return queries.get(spare);
+ }
+
+
+
+ final static class Match extends QueryCollector {
+
+ final PercolateContext context;
+ final HighlightPhase highlightPhase;
+
+ final List<BytesRef> matches = new ArrayList<BytesRef>();
+ final List<Map<String, HighlightField>> hls = new ArrayList<Map<String, HighlightField>>();
+ final boolean limit;
+ final int size;
+ long counter = 0;
+
+ Match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase) {
+ super(logger, context);
+ this.limit = context.limit;
+ this.size = context.size();
+ this.context = context;
+ this.highlightPhase = highlightPhase;
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ final Query query = getQuery(doc);
+ if (query == null) {
+ // log???
+ return;
+ }
+ // run the query
+ try {
+ collector.reset();
+ if (context.highlight() != null) {
+ context.parsedQuery(new ParsedQuery(query, ImmutableMap.<String, Filter>of()));
+ context.hitContext().cache().clear();
+ }
+
+ searcher.search(query, collector);
+ if (collector.exists()) {
+ if (!limit || counter < size) {
+ matches.add(values.copyShared());
+ if (context.highlight() != null) {
+ highlightPhase.hitExecute(context, context.hitContext());
+ hls.add(context.hitContext().hit().getHighlightFields());
+ }
+ }
+ counter++;
+ postMatch(doc);
+ }
+ } catch (IOException e) {
+ logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
+ }
+ }
+
+ long counter() {
+ return counter;
+ }
+
+ List<BytesRef> matches() {
+ return matches;
+ }
+
+ List<Map<String, HighlightField>> hls() {
+ return hls;
+ }
+ }
+
+ final static class MatchAndSort extends QueryCollector {
+
+ private final TopScoreDocCollector topDocsCollector;
+
+ MatchAndSort(ESLogger logger, PercolateContext context) {
+ super(logger, context);
+ // TODO: Use TopFieldCollector.create(...) for ascending and decending scoring?
+ topDocsCollector = TopScoreDocCollector.create(context.size(), false);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ final Query query = getQuery(doc);
+ if (query == null) {
+ // log???
+ return;
+ }
+ // run the query
+ try {
+ collector.reset();
+ searcher.search(query, collector);
+ if (collector.exists()) {
+ topDocsCollector.collect(doc);
+ postMatch(doc);
+ }
+ } catch (IOException e) {
+ logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ super.setNextReader(context);
+ topDocsCollector.setNextReader(context);
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ topDocsCollector.setScorer(scorer);
+ }
+
+ TopDocs topDocs() {
+ return topDocsCollector.topDocs();
+ }
+
+ }
+
+ final static class MatchAndScore extends QueryCollector {
+
+ final PercolateContext context;
+ final HighlightPhase highlightPhase;
+
+ final List<BytesRef> matches = new ArrayList<BytesRef>();
+ final List<Map<String, HighlightField>> hls = new ArrayList<Map<String, HighlightField>>();
+ // TODO: Use thread local in order to cache the scores lists?
+ final FloatArrayList scores = new FloatArrayList();
+ final boolean limit;
+ final int size;
+ long counter = 0;
+
+ private Scorer scorer;
+
+ MatchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase) {
+ super(logger, context);
+ this.limit = context.limit;
+ this.size = context.size();
+ this.context = context;
+ this.highlightPhase = highlightPhase;
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ final Query query = getQuery(doc);
+ if (query == null) {
+ // log???
+ return;
+ }
+ // run the query
+ try {
+ collector.reset();
+ if (context.highlight() != null) {
+ context.parsedQuery(new ParsedQuery(query, ImmutableMap.<String, Filter>of()));
+ context.hitContext().cache().clear();
+ }
+ searcher.search(query, collector);
+ if (collector.exists()) {
+ if (!limit || counter < size) {
+ matches.add(values.copyShared());
+ scores.add(scorer.score());
+ if (context.highlight() != null) {
+ highlightPhase.hitExecute(context, context.hitContext());
+ hls.add(context.hitContext().hit().getHighlightFields());
+ }
+ }
+ counter++;
+ postMatch(doc);
+ }
+ } catch (IOException e) {
+ logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ this.scorer = scorer;
+ }
+
+ long counter() {
+ return counter;
+ }
+
+ List<BytesRef> matches() {
+ return matches;
+ }
+
+ FloatArrayList scores() {
+ return scores;
+ }
+
+ List<Map<String, HighlightField>> hls() {
+ return hls;
+ }
+ }
+
+ final static class Count extends QueryCollector {
+
+ private long counter = 0;
+
+ Count(ESLogger logger, PercolateContext context) {
+ super(logger, context);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ final Query query = getQuery(doc);
+ if (query == null) {
+ // log???
+ return;
+ }
+ // run the query
+ try {
+ collector.reset();
+ searcher.search(query, collector);
+ if (collector.exists()) {
+ counter++;
+ postMatch(doc);
+ }
+ } catch (IOException e) {
+ logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
+ }
+ }
+
+ long counter() {
+ return counter;
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/plugins/AbstractPlugin.java b/src/main/java/org/elasticsearch/plugins/AbstractPlugin.java
new file mode 100644
index 0000000..971593f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/plugins/AbstractPlugin.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.CloseableIndexComponent;
+
+import java.util.Collection;
+
+/**
+ * A base class for a plugin.
+ * <p/>
+ * A plugin can be dynamically injected with {@link Module} by implementing <tt>onModule(AnyModule)</tt> method
+ * removing the need to override {@link #processModule(org.elasticsearch.common.inject.Module)} and check using
+ * instanceof.
+ */
+public abstract class AbstractPlugin implements Plugin {
+
+ /**
+ * Defaults to return an empty list.
+ */
+ @Override
+ public Collection<Class<? extends Module>> modules() {
+ return ImmutableList.of();
+ }
+
+ /**
+ * Defaults to return an empty list.
+ */
+ @Override
+ public Collection<Module> modules(Settings settings) {
+ return ImmutableList.of();
+ }
+
+ /**
+ * Defaults to return an empty list.
+ */
+ @Override
+ public Collection<Class<? extends LifecycleComponent>> services() {
+ return ImmutableList.of();
+ }
+
+ /**
+ * Defaults to return an empty list.
+ */
+ @Override
+ public Collection<Class<? extends Module>> indexModules() {
+ return ImmutableList.of();
+ }
+
+ /**
+ * Defaults to return an empty list.
+ */
+ @Override
+ public Collection<Module> indexModules(Settings settings) {
+ return ImmutableList.of();
+ }
+
+ /**
+ * Defaults to return an empty list.
+ */
+ @Override
+ public Collection<Class<? extends CloseableIndexComponent>> indexServices() {
+ return ImmutableList.of();
+ }
+
+ /**
+ * Defaults to return an empty list.
+ */
+ @Override
+ public Collection<Class<? extends Module>> shardModules() {
+ return ImmutableList.of();
+ }
+
+ /**
+ * Defaults to return an empty list.
+ */
+ @Override
+ public Collection<Module> shardModules(Settings settings) {
+ return ImmutableList.of();
+ }
+
+ /**
+ * Defaults to return an empty list.
+ */
+ @Override
+ public Collection<Class<? extends CloseableIndexComponent>> shardServices() {
+ return ImmutableList.of();
+ }
+
+ @Override
+ public void processModule(Module module) {
+ // nothing to do here
+ }
+
+ @Override
+ public Settings additionalSettings() {
+ return ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/plugins/IndexPluginsModule.java b/src/main/java/org/elasticsearch/plugins/IndexPluginsModule.java
new file mode 100644
index 0000000..d9f35ae
--- /dev/null
+++ b/src/main/java/org/elasticsearch/plugins/IndexPluginsModule.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.PreProcessModule;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Collection;
+import java.util.List;
+
+import static org.elasticsearch.common.inject.Modules.createModule;
+
+/**
+ *
+ */
+public class IndexPluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
+
+ private final Settings settings;
+
+ private final PluginsService pluginsService;
+
+ public IndexPluginsModule(Settings settings, PluginsService pluginsService) {
+ this.settings = settings;
+ this.pluginsService = pluginsService;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ List<Module> modules = Lists.newArrayList();
+ Collection<Class<? extends Module>> modulesClasses = pluginsService.indexModules();
+ for (Class<? extends Module> moduleClass : modulesClasses) {
+ modules.add(createModule(moduleClass, settings));
+ }
+ modules.addAll(pluginsService.indexModules(settings));
+ return modules;
+ }
+
+ @Override
+ public void processModule(Module module) {
+ pluginsService.processModule(module);
+ }
+
+ @Override
+ protected void configure() {
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/plugins/Plugin.java b/src/main/java/org/elasticsearch/plugins/Plugin.java
new file mode 100644
index 0000000..eee51d9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/plugins/Plugin.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.CloseableIndexComponent;
+
+import java.util.Collection;
+
+/**
+ * An extension point allowing to plug in custom functionality.
+ * <p/>
+ * A plugin can be dynamically injected with {@link Module} by implementing <tt>onModule(AnyModule)</tt> method
+ * removing the need to override {@link #processModule(org.elasticsearch.common.inject.Module)} and check using
+ * instanceof.
+ */
+public interface Plugin {
+
+ /**
+ * The name of the plugin.
+ */
+ String name();
+
+ /**
+ * The description of the plugin.
+ */
+ String description();
+
+ /**
+ * Node level modules (classes, will automatically be created).
+ */
+ Collection<Class<? extends Module>> modules();
+
+ /**
+ * Node level modules (instances)
+ *
+ * @param settings The node level settings.
+ */
+ Collection<? extends Module> modules(Settings settings);
+
+ /**
+ * Node level services that will be automatically started/stopped/closed.
+ */
+ Collection<Class<? extends LifecycleComponent>> services();
+
+ /**
+ * Per index modules.
+ */
+ Collection<Class<? extends Module>> indexModules();
+
+ /**
+ * Per index modules.
+ */
+ Collection<? extends Module> indexModules(Settings settings);
+
+ /**
+ * Per index services that will be automatically closed.
+ */
+ Collection<Class<? extends CloseableIndexComponent>> indexServices();
+
+ /**
+ * Per index shard module.
+ */
+ Collection<Class<? extends Module>> shardModules();
+
+ /**
+ * Per index shard module.
+ */
+ Collection<? extends Module> shardModules(Settings settings);
+
+ /**
+ * Per index shard service that will be automatically closed.
+ */
+ Collection<Class<? extends CloseableIndexComponent>> shardServices();
+
+ /**
+ * Process a specific module. Note, its simpler to implement a custom <tt>onModule(AnyModule module)</tt>
+ * method, which will be automatically be called by the relevant type.
+ */
+ void processModule(Module module);
+
+ /**
+ * Additional node settings loaded by the plugin
+ */
+ Settings additionalSettings();
+}
diff --git a/src/main/java/org/elasticsearch/plugins/PluginManager.java b/src/main/java/org/elasticsearch/plugins/PluginManager.java
new file mode 100644
index 0000000..8c34eba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/plugins/PluginManager.java
@@ -0,0 +1,546 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import com.google.common.base.Strings;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.http.client.HttpDownloadHelper;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.X509TrustManager;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.*;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+/**
+ *
+ */
+public class PluginManager {
+ public static final class ACTION {
+ public static final int NONE = 0;
+ public static final int INSTALL = 1;
+ public static final int REMOVE = 2;
+ public static final int LIST = 3;
+ }
+
+ public enum OutputMode {
+ DEFAULT, SILENT, VERBOSE
+ }
+
+ // By default timeout is 0 which means no timeout
+ public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueMillis(0);
+
+ private final Environment environment;
+
+ private String url;
+ private OutputMode outputMode;
+ private TimeValue timeout;
+
+ public PluginManager(Environment environment, String url, OutputMode outputMode, TimeValue timeout) {
+ this.environment = environment;
+ this.url = url;
+ this.outputMode = outputMode;
+ this.timeout = timeout;
+
+ TrustManager[] trustAllCerts = new TrustManager[]{
+ new X509TrustManager() {
+ public java.security.cert.X509Certificate[] getAcceptedIssuers() {
+ return null;
+ }
+
+ public void checkClientTrusted(
+ java.security.cert.X509Certificate[] certs, String authType) {
+ }
+
+ public void checkServerTrusted(
+ java.security.cert.X509Certificate[] certs, String authType) {
+ }
+ }
+ };
+
+ // Install the all-trusting trust manager
+ try {
+ SSLContext sc = SSLContext.getInstance("SSL");
+ sc.init(null, trustAllCerts, new java.security.SecureRandom());
+ HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ public void downloadAndExtract(String name) throws IOException {
+ HttpDownloadHelper downloadHelper = new HttpDownloadHelper();
+ boolean downloaded = false;
+ HttpDownloadHelper.DownloadProgress progress;
+ if (outputMode == OutputMode.SILENT) {
+ progress = new HttpDownloadHelper.NullProgress();
+ } else {
+ progress = new HttpDownloadHelper.VerboseProgress(System.out);
+ }
+
+ if (!environment.pluginsFile().canWrite()) {
+ System.err.println();
+ throw new IOException("plugin directory " + environment.pluginsFile() + " is read only");
+ }
+
+ PluginHandle pluginHandle = PluginHandle.parse(name);
+ File pluginFile = pluginHandle.distroFile(environment);
+ // extract the plugin
+ File extractLocation = pluginHandle.extractedDir(environment);
+ if (extractLocation.exists()) {
+ throw new IOException("plugin directory " + extractLocation.getAbsolutePath() + " already exists. To update the plugin, uninstall it first using -remove " + name + " command");
+ }
+
+ // first, try directly from the URL provided
+ if (url != null) {
+ URL pluginUrl = new URL(url);
+ log("Trying " + pluginUrl.toExternalForm() + "...");
+ try {
+ downloadHelper.download(pluginUrl, pluginFile, progress, this.timeout);
+ downloaded = true;
+ } catch (ElasticsearchTimeoutException e) {
+ throw e;
+ } catch (Exception e) {
+ // ignore
+ log("Failed: " + ExceptionsHelper.detailedMessage(e));
+ }
+ }
+
+ if (!downloaded) {
+ // We try all possible locations
+ for (URL url : pluginHandle.urls()) {
+ log("Trying " + url.toExternalForm() + "...");
+ try {
+ downloadHelper.download(url, pluginFile, progress, this.timeout);
+ downloaded = true;
+ break;
+ } catch (ElasticsearchTimeoutException e) {
+ throw e;
+ } catch (Exception e) {
+ debug("Failed: " + ExceptionsHelper.detailedMessage(e));
+ }
+ }
+ }
+
+ if (!downloaded) {
+ throw new IOException("failed to download out of all possible locations..., use -verbose to get detailed information");
+ }
+
+ ZipFile zipFile = null;
+ try {
+ zipFile = new ZipFile(pluginFile);
+ //we check whether we need to remove the top-level folder while extracting
+ //sometimes (e.g. github) the downloaded archive contains a top-level folder which needs to be removed
+ boolean removeTopLevelDir = topLevelDirInExcess(zipFile);
+ Enumeration<? extends ZipEntry> zipEntries = zipFile.entries();
+ while (zipEntries.hasMoreElements()) {
+ ZipEntry zipEntry = zipEntries.nextElement();
+ if (zipEntry.isDirectory()) {
+ continue;
+ }
+ String zipEntryName = zipEntry.getName().replace('\\', '/');
+ if (removeTopLevelDir) {
+ zipEntryName = zipEntryName.substring(zipEntryName.indexOf('/'));
+ }
+ File target = new File(extractLocation, zipEntryName);
+ FileSystemUtils.mkdirs(target.getParentFile());
+ Streams.copy(zipFile.getInputStream(zipEntry), new FileOutputStream(target));
+ }
+ log("Installed " + name + " into " + extractLocation.getAbsolutePath());
+ } catch (Exception e) {
+ log("failed to extract plugin [" + pluginFile + "]: " + ExceptionsHelper.detailedMessage(e));
+ return;
+ } finally {
+ if (zipFile != null) {
+ try {
+ zipFile.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ pluginFile.delete();
+ }
+
+ if (FileSystemUtils.hasExtensions(extractLocation, ".java")) {
+ debug("Plugin installation assumed to be site plugin, but contains source code, aborting installation...");
+ FileSystemUtils.deleteRecursively(extractLocation);
+ throw new IllegalArgumentException("Plugin installation assumed to be site plugin, but contains source code, aborting installation.");
+ }
+
+ File binFile = new File(extractLocation, "bin");
+ if (binFile.exists() && binFile.isDirectory()) {
+ File toLocation = pluginHandle.binDir(environment);
+ debug("Found bin, moving to " + toLocation.getAbsolutePath());
+ FileSystemUtils.deleteRecursively(toLocation);
+ binFile.renameTo(toLocation);
+ debug("Installed " + name + " into " + toLocation.getAbsolutePath());
+ }
+
+ // try and identify the plugin type, see if it has no .class or .jar files in it
+ // so its probably a _site, and it it does not have a _site in it, move everything to _site
+ if (!new File(extractLocation, "_site").exists()) {
+ if (!FileSystemUtils.hasExtensions(extractLocation, ".class", ".jar")) {
+ log("Identified as a _site plugin, moving to _site structure ...");
+ File site = new File(extractLocation, "_site");
+ File tmpLocation = new File(environment.pluginsFile(), extractLocation.getName() + ".tmp");
+ if (!extractLocation.renameTo(tmpLocation)) {
+ throw new IOException("failed to rename in order to copy to _site (rename to " + tmpLocation.getAbsolutePath() + "");
+ }
+ FileSystemUtils.mkdirs(extractLocation);
+ if (!tmpLocation.renameTo(site)) {
+ throw new IOException("failed to rename in order to copy to _site (rename to " + site.getAbsolutePath() + "");
+ }
+ debug("Installed " + name + " into " + site.getAbsolutePath());
+ }
+ }
+ }
+
+ public void removePlugin(String name) throws IOException {
+ PluginHandle pluginHandle = PluginHandle.parse(name);
+ boolean removed = false;
+
+ if (Strings.isNullOrEmpty(pluginHandle.name)) {
+ throw new ElasticsearchIllegalArgumentException("plugin name is incorrect");
+ }
+
+ File pluginToDelete = pluginHandle.extractedDir(environment);
+ if (pluginToDelete.exists()) {
+ debug("Removing: " + pluginToDelete.getPath());
+ FileSystemUtils.deleteRecursively(pluginToDelete, true);
+ removed = true;
+ }
+ pluginToDelete = pluginHandle.distroFile(environment);
+ if (pluginToDelete.exists()) {
+ debug("Removing: " + pluginToDelete.getPath());
+ pluginToDelete.delete();
+ removed = true;
+ }
+ File binLocation = pluginHandle.binDir(environment);
+ if (binLocation.exists()) {
+ debug("Removing: " + binLocation.getPath());
+ FileSystemUtils.deleteRecursively(binLocation);
+ removed = true;
+ }
+ if (removed) {
+ log("Removed " + name);
+ } else {
+ log("Plugin " + name + " not found. Run plugin --list to get list of installed plugins.");
+ }
+ }
+
+ public File[] getListInstalledPlugins() {
+ File[] plugins = environment.pluginsFile().listFiles();
+ return plugins;
+ }
+
+ public void listInstalledPlugins() {
+ File[] plugins = getListInstalledPlugins();
+ log("Installed plugins:");
+ if (plugins == null || plugins.length == 0) {
+ log(" - No plugin detected in " + environment.pluginsFile().getAbsolutePath());
+ } else {
+ for (int i = 0; i < plugins.length; i++) {
+ log(" - " + plugins[i].getName());
+ }
+ }
+ }
+
+ private boolean topLevelDirInExcess(ZipFile zipFile) {
+ //We don't rely on ZipEntry#isDirectory because it might be that there is no explicit dir
+ //but the files path do contain dirs, thus they are going to be extracted on sub-folders anyway
+ Enumeration<? extends ZipEntry> zipEntries = zipFile.entries();
+ Set<String> topLevelDirNames = new HashSet<String>();
+ while (zipEntries.hasMoreElements()) {
+ ZipEntry zipEntry = zipEntries.nextElement();
+ String zipEntryName = zipEntry.getName().replace('\\', '/');
+
+ int slash = zipEntryName.indexOf('/');
+ //if there isn't a slash in the entry name it means that we have a file in the top-level
+ if (slash == -1) {
+ return false;
+ }
+
+ topLevelDirNames.add(zipEntryName.substring(0, slash));
+ //if we have more than one top-level folder
+ if (topLevelDirNames.size() > 1) {
+ return false;
+ }
+ }
+ return topLevelDirNames.size() == 1 && !"_site".equals(topLevelDirNames.iterator().next());
+ }
+
+ private static final int EXIT_CODE_OK = 0;
+ private static final int EXIT_CODE_CMD_USAGE = 64;
+ private static final int EXIT_CODE_IO_ERROR = 74;
+ private static final int EXIT_CODE_ERROR = 70;
+
+ public static void main(String[] args) {
+ Tuple<Settings, Environment> initialSettings = InternalSettingsPreparer.prepareSettings(EMPTY_SETTINGS, true);
+
+ if (!initialSettings.v2().pluginsFile().exists()) {
+ FileSystemUtils.mkdirs(initialSettings.v2().pluginsFile());
+ }
+
+ String url = null;
+ OutputMode outputMode = OutputMode.DEFAULT;
+ String pluginName = null;
+ TimeValue timeout = DEFAULT_TIMEOUT;
+ int action = ACTION.NONE;
+
+ if (args.length < 1) {
+ displayHelp(null);
+ }
+
+ try {
+ for (int c = 0; c < args.length; c++) {
+ String command = args[c];
+ if ("-u".equals(command) || "--url".equals(command)
+ // Deprecated commands
+ || "url".equals(command) || "-url".equals(command)) {
+ url = args[++c];
+ } else if ("-v".equals(command) || "--verbose".equals(command)
+ || "verbose".equals(command) || "-verbose".equals(command)) {
+ outputMode = OutputMode.VERBOSE;
+ } else if ("-s".equals(command) || "--silent".equals(command)
+ || "silent".equals(command) || "-silent".equals(command)) {
+ outputMode = OutputMode.SILENT;
+ } else if (command.equals("-i") || command.equals("--install")
+ // Deprecated commands
+ || command.equals("install") || command.equals("-install")) {
+ pluginName = args[++c];
+ action = ACTION.INSTALL;
+ } else if (command.equals("-t") || command.equals("--timeout")
+ || command.equals("timeout") || command.equals("-timeout")) {
+ timeout = TimeValue.parseTimeValue(args[++c], DEFAULT_TIMEOUT);
+ } else if (command.equals("-r") || command.equals("--remove")
+ // Deprecated commands
+ || command.equals("remove") || command.equals("-remove")) {
+ pluginName = args[++c];
+
+ action = ACTION.REMOVE;
+ } else if (command.equals("-l") || command.equals("--list")) {
+ action = ACTION.LIST;
+ } else if (command.equals("-h") || command.equals("--help")) {
+ displayHelp(null);
+ } else {
+ displayHelp("Command [" + args[c] + "] unknown.");
+ // Unknown command. We break...
+ System.exit(EXIT_CODE_CMD_USAGE);
+ }
+ }
+ } catch (Throwable e) {
+ displayHelp("Error while parsing options: " + e.getClass().getSimpleName() +
+ ": " + e.getMessage());
+ System.exit(EXIT_CODE_CMD_USAGE);
+ }
+
+ if (action > ACTION.NONE) {
+ int exitCode = EXIT_CODE_ERROR; // we fail unless it's reset
+ PluginManager pluginManager = new PluginManager(initialSettings.v2(), url, outputMode, timeout);
+ switch (action) {
+ case ACTION.INSTALL:
+ try {
+ pluginManager.log("-> Installing " + pluginName + "...");
+ pluginManager.downloadAndExtract(pluginName);
+ exitCode = EXIT_CODE_OK;
+ } catch (IOException e) {
+ exitCode = EXIT_CODE_IO_ERROR;
+ pluginManager.log("Failed to install " + pluginName + ", reason: " + e.getMessage());
+ } catch (Throwable e) {
+ exitCode = EXIT_CODE_ERROR;
+ displayHelp("Error while installing plugin, reason: " + e.getClass().getSimpleName() +
+ ": " + e.getMessage());
+ }
+ break;
+ case ACTION.REMOVE:
+ try {
+ pluginManager.log("-> Removing " + pluginName + " ");
+ pluginManager.removePlugin(pluginName);
+ exitCode = EXIT_CODE_OK;
+ } catch (ElasticsearchIllegalArgumentException e) {
+ exitCode = EXIT_CODE_CMD_USAGE;
+ pluginManager.log("Failed to remove " + pluginName + ", reason: " + e.getMessage());
+ } catch (IOException e) {
+ exitCode = EXIT_CODE_IO_ERROR;
+ pluginManager.log("Failed to remove " + pluginName + ", reason: " + e.getMessage());
+ } catch (Throwable e) {
+ exitCode = EXIT_CODE_ERROR;
+ displayHelp("Error while removing plugin, reason: " + e.getClass().getSimpleName() +
+ ": " + e.getMessage());
+ }
+ break;
+ case ACTION.LIST:
+ try {
+ pluginManager.listInstalledPlugins();
+ exitCode = EXIT_CODE_OK;
+ } catch (Throwable e) {
+ displayHelp("Error while listing plugins, reason: " + e.getClass().getSimpleName() +
+ ": " + e.getMessage());
+ }
+ break;
+
+ default:
+ pluginManager.log("Unknown Action [" + action + "]");
+ exitCode = EXIT_CODE_ERROR;
+
+ }
+ System.exit(exitCode); // exit here!
+ }
+ }
+
+ private static void displayHelp(String message) {
+ System.out.println("Usage:");
+ System.out.println(" -u, --url [plugin location] : Set exact URL to download the plugin from");
+ System.out.println(" -i, --install [plugin name] : Downloads and installs listed plugins [*]");
+ System.out.println(" -t, --timeout [duration] : Timeout setting: 30s, 1m, 1h... (infinite by default)");
+ System.out.println(" -r, --remove [plugin name] : Removes listed plugins");
+ System.out.println(" -l, --list : List installed plugins");
+ System.out.println(" -v, --verbose : Prints verbose messages");
+ System.out.println(" -s, --silent : Run in silent mode");
+ System.out.println(" -h, --help : Prints this help message");
+ System.out.println();
+ System.out.println(" [*] Plugin name could be:");
+ System.out.println(" elasticsearch/plugin/version for official elasticsearch plugins (download from download.elasticsearch.org)");
+ System.out.println(" groupId/artifactId/version for community plugins (download from maven central or oss sonatype)");
+ System.out.println(" username/repository for site plugins (download from github master)");
+
+ if (message != null) {
+ System.out.println();
+ System.out.println("Message:");
+ System.out.println(" " + message);
+ }
+ }
+
+ private void debug(String line) {
+ if (outputMode == OutputMode.VERBOSE) System.out.println(line);
+ }
+
+ private void log(String line) {
+ if (outputMode != OutputMode.SILENT) System.out.println(line);
+ }
+
+ /**
+ * Helper class to extract properly user name, repository name, version and plugin name
+ * from plugin name given by a user.
+ */
+ static class PluginHandle {
+
+ final String name;
+ final String version;
+ final String user;
+ final String repo;
+
+ PluginHandle(String name, String version, String user, String repo) {
+ this.name = name;
+ this.version = version;
+ this.user = user;
+ this.repo = repo;
+ }
+
+ List<URL> urls() {
+ List<URL> urls = new ArrayList<URL>();
+ if (version != null) {
+ // Elasticsearch download service
+ addUrl(urls, "http://download.elasticsearch.org/" + user + "/" + repo + "/" + repo + "-" + version + ".zip");
+ // Maven central repository
+ addUrl(urls, "http://search.maven.org/remotecontent?filepath=" + user.replace('.', '/') + "/" + repo + "/" + version + "/" + repo + "-" + version + ".zip");
+ // Sonatype repository
+ addUrl(urls, "https://oss.sonatype.org/service/local/repositories/releases/content/" + user.replace('.', '/') + "/" + repo + "/" + version + "/" + repo + "-" + version + ".zip");
+ // Github repository
+ addUrl(urls, "https://github.com/" + user + "/" + repo + "/archive/v" + version + ".zip");
+ }
+ // Github repository for master branch (assume site)
+ addUrl(urls, "https://github.com/" + user + "/" + repo + "/archive/master.zip");
+ return urls;
+ }
+
+ private static void addUrl(List<URL> urls, String url) {
+ try {
+ urls.add(new URL(url));
+ } catch (MalformedURLException e) {
+ // We simply ignore malformed URL
+ }
+ }
+
+ File distroFile(Environment env) {
+ return new File(env.pluginsFile(), name + ".zip");
+ }
+
+ File extractedDir(Environment env) {
+ return new File(env.pluginsFile(), name);
+ }
+
+ File binDir(Environment env) {
+ return new File(new File(env.homeFile(), "bin"), name);
+ }
+
+ static PluginHandle parse(String name) {
+ String[] elements = name.split("/");
+ // We first consider the simplest form: pluginname
+ String repo = elements[0];
+ String user = null;
+ String version = null;
+
+ // We consider the form: username/pluginname
+ if (elements.length > 1) {
+ user = elements[0];
+ repo = elements[1];
+
+ // We consider the form: username/pluginname/version
+ if (elements.length > 2) {
+ version = elements[2];
+ }
+ }
+
+ if (repo.startsWith("elasticsearch-")) {
+ // remove elasticsearch- prefix
+ String endname = repo.substring("elasticsearch-".length());
+ return new PluginHandle(endname, version, user, repo);
+ }
+
+ if (name.startsWith("es-")) {
+ // remove es- prefix
+ String endname = repo.substring("es-".length());
+ return new PluginHandle(endname, version, user, repo);
+ }
+
+ return new PluginHandle(repo, version, user, repo);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/plugins/PluginsModule.java b/src/main/java/org/elasticsearch/plugins/PluginsModule.java
new file mode 100644
index 0000000..baf9f59
--- /dev/null
+++ b/src/main/java/org/elasticsearch/plugins/PluginsModule.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.PreProcessModule;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Collection;
+import java.util.List;
+
+import static org.elasticsearch.common.inject.Modules.createModule;
+
+/**
+ *
+ */
+public class PluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
+
+ private final Settings settings;
+
+ private final PluginsService pluginsService;
+
+ public PluginsModule(Settings settings, PluginsService pluginsService) {
+ this.settings = settings;
+ this.pluginsService = pluginsService;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ List<Module> modules = Lists.newArrayList();
+ Collection<Class<? extends Module>> modulesClasses = pluginsService.modules();
+ for (Class<? extends Module> moduleClass : modulesClasses) {
+ modules.add(createModule(moduleClass, settings));
+ }
+ modules.addAll(pluginsService.modules(settings));
+ return modules;
+ }
+
+ @Override
+ public void processModule(Module module) {
+ pluginsService.processModule(module);
+ }
+
+ @Override
+ protected void configure() {
+ bind(PluginsService.class).toInstance(pluginsService);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/plugins/PluginsService.java b/src/main/java/org/elasticsearch/plugins/PluginsService.java
new file mode 100644
index 0000000..159ca15
--- /dev/null
+++ b/src/main/java/org/elasticsearch/plugins/PluginsService.java
@@ -0,0 +1,519 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import com.google.common.collect.*;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
+import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.CloseableIndexComponent;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.util.*;
+
+/**
+ *
+ */
+public class PluginsService extends AbstractComponent {
+ private static final String ES_PLUGIN_PROPERTIES = "es-plugin.properties";
+
+ private final Environment environment;
+
+ /**
+ * We keep around a list of jvm plugins
+ */
+ private final ImmutableList<Tuple<PluginInfo, Plugin>> plugins;
+
+ private final ImmutableMap<Plugin, List<OnModuleReference>> onModuleReferences;
+
+ private PluginsInfo cachedPluginsInfo;
+ private final TimeValue refreshInterval;
+ private long lastRefresh;
+
+ static class OnModuleReference {
+ public final Class<? extends Module> moduleClass;
+ public final Method onModuleMethod;
+
+ OnModuleReference(Class<? extends Module> moduleClass, Method onModuleMethod) {
+ this.moduleClass = moduleClass;
+ this.onModuleMethod = onModuleMethod;
+ }
+ }
+
+ /**
+ * Constructs a new PluginService
+ * @param settings The settings of the system
+ * @param environment The environment of the system
+ */
+ public PluginsService(Settings settings, Environment environment) {
+ super(settings);
+ this.environment = environment;
+
+ ImmutableList.Builder<Tuple<PluginInfo, Plugin>> tupleBuilder = ImmutableList.builder();
+
+ // first we load all the default plugins from the settings
+ String[] defaultPluginsClasses = settings.getAsArray("plugin.types");
+ for (String pluginClass : defaultPluginsClasses) {
+ Plugin plugin = loadPlugin(pluginClass, settings);
+ PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), hasSite(plugin.name()), true, PluginInfo.VERSION_NOT_AVAILABLE);
+ if (logger.isTraceEnabled()) {
+ logger.trace("plugin loaded from settings [{}]", pluginInfo);
+ }
+ tupleBuilder.add(new Tuple<PluginInfo, Plugin>(pluginInfo, plugin));
+ }
+
+ // now, find all the ones that are in the classpath
+ loadPluginsIntoClassLoader();
+ tupleBuilder.addAll(loadPluginsFromClasspath(settings));
+ this.plugins = tupleBuilder.build();
+
+ // We need to build a List of jvm and site plugins for checking mandatory plugins
+ Map<String, Plugin> jvmPlugins = Maps.newHashMap();
+ List<String> sitePlugins = Lists.newArrayList();
+
+ for (Tuple<PluginInfo, Plugin> tuple : this.plugins) {
+ jvmPlugins.put(tuple.v2().name(), tuple.v2());
+ if (tuple.v1().isSite()) {
+ sitePlugins.add(tuple.v1().getName());
+ }
+ }
+
+ // we load site plugins
+ ImmutableList<Tuple<PluginInfo, Plugin>> tuples = loadSitePlugins();
+ for (Tuple<PluginInfo, Plugin> tuple : tuples) {
+ sitePlugins.add(tuple.v1().getName());
+ }
+
+ // Checking expected plugins
+ String[] mandatoryPlugins = settings.getAsArray("plugin.mandatory", null);
+ if (mandatoryPlugins != null) {
+ Set<String> missingPlugins = Sets.newHashSet();
+ for (String mandatoryPlugin : mandatoryPlugins) {
+ if (!jvmPlugins.containsKey(mandatoryPlugin) && !sitePlugins.contains(mandatoryPlugin) && !missingPlugins.contains(mandatoryPlugin)) {
+ missingPlugins.add(mandatoryPlugin);
+ }
+ }
+ if (!missingPlugins.isEmpty()) {
+ throw new ElasticsearchException("Missing mandatory plugins [" + Strings.collectionToDelimitedString(missingPlugins, ", ") + "]");
+ }
+ }
+
+ logger.info("loaded {}, sites {}", jvmPlugins.keySet(), sitePlugins);
+
+ MapBuilder<Plugin, List<OnModuleReference>> onModuleReferences = MapBuilder.newMapBuilder();
+ for (Plugin plugin : jvmPlugins.values()) {
+ List<OnModuleReference> list = Lists.newArrayList();
+ for (Method method : plugin.getClass().getDeclaredMethods()) {
+ if (!method.getName().equals("onModule")) {
+ continue;
+ }
+ if (method.getParameterTypes().length == 0 || method.getParameterTypes().length > 1) {
+ logger.warn("Plugin: {} implementing onModule with no parameters or more than one parameter", plugin.name());
+ continue;
+ }
+ Class moduleClass = method.getParameterTypes()[0];
+ if (!Module.class.isAssignableFrom(moduleClass)) {
+ logger.warn("Plugin: {} implementing onModule by the type is not of Module type {}", plugin.name(), moduleClass);
+ continue;
+ }
+ method.setAccessible(true);
+ list.add(new OnModuleReference(moduleClass, method));
+ }
+ if (!list.isEmpty()) {
+ onModuleReferences.put(plugin, list);
+ }
+ }
+ this.onModuleReferences = onModuleReferences.immutableMap();
+
+ this.refreshInterval = componentSettings.getAsTime("info_refresh_interval", TimeValue.timeValueSeconds(10));
+
+ }
+
+ public ImmutableList<Tuple<PluginInfo, Plugin>> plugins() {
+ return plugins;
+ }
+
+ public void processModules(Iterable<Module> modules) {
+ for (Module module : modules) {
+ processModule(module);
+ }
+ }
+
+ public void processModule(Module module) {
+ for (Tuple<PluginInfo, Plugin> plugin : plugins()) {
+ plugin.v2().processModule(module);
+ // see if there are onModule references
+ List<OnModuleReference> references = onModuleReferences.get(plugin.v2());
+ if (references != null) {
+ for (OnModuleReference reference : references) {
+ if (reference.moduleClass.isAssignableFrom(module.getClass())) {
+ try {
+ reference.onModuleMethod.invoke(plugin.v2(), module);
+ } catch (Exception e) {
+ logger.warn("plugin {}, failed to invoke custom onModule method", e, plugin.v2().name());
+ }
+ }
+ }
+ }
+ }
+ }
+
+ public Settings updatedSettings() {
+ ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder()
+ .put(this.settings);
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ builder.put(plugin.v2().additionalSettings());
+ }
+ return builder.build();
+ }
+
+ public Collection<Class<? extends Module>> modules() {
+ List<Class<? extends Module>> modules = Lists.newArrayList();
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ modules.addAll(plugin.v2().modules());
+ }
+ return modules;
+ }
+
+ public Collection<Module> modules(Settings settings) {
+ List<Module> modules = Lists.newArrayList();
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ modules.addAll(plugin.v2().modules(settings));
+ }
+ return modules;
+ }
+
+ public Collection<Class<? extends LifecycleComponent>> services() {
+ List<Class<? extends LifecycleComponent>> services = Lists.newArrayList();
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ services.addAll(plugin.v2().services());
+ }
+ return services;
+ }
+
+ public Collection<Class<? extends Module>> indexModules() {
+ List<Class<? extends Module>> modules = Lists.newArrayList();
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ modules.addAll(plugin.v2().indexModules());
+ }
+ return modules;
+ }
+
+ public Collection<Module> indexModules(Settings settings) {
+ List<Module> modules = Lists.newArrayList();
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ modules.addAll(plugin.v2().indexModules(settings));
+ }
+ return modules;
+ }
+
+ public Collection<Class<? extends CloseableIndexComponent>> indexServices() {
+ List<Class<? extends CloseableIndexComponent>> services = Lists.newArrayList();
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ services.addAll(plugin.v2().indexServices());
+ }
+ return services;
+ }
+
+ public Collection<Class<? extends Module>> shardModules() {
+ List<Class<? extends Module>> modules = Lists.newArrayList();
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ modules.addAll(plugin.v2().shardModules());
+ }
+ return modules;
+ }
+
+ public Collection<Module> shardModules(Settings settings) {
+ List<Module> modules = Lists.newArrayList();
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ modules.addAll(plugin.v2().shardModules(settings));
+ }
+ return modules;
+ }
+
+ public Collection<Class<? extends CloseableIndexComponent>> shardServices() {
+ List<Class<? extends CloseableIndexComponent>> services = Lists.newArrayList();
+ for (Tuple<PluginInfo, Plugin> plugin : plugins) {
+ services.addAll(plugin.v2().shardServices());
+ }
+ return services;
+ }
+
+ /**
+ * Get information about plugins (jvm and site plugins).
+ * Information are cached for 10 seconds by default. Modify `plugins.info_refresh_interval` property if needed.
+ * Setting `plugins.info_refresh_interval` to `-1` will cause infinite caching.
+ * Setting `plugins.info_refresh_interval` to `0` will disable caching.
+ * @return List of plugins information
+ */
+ synchronized public PluginsInfo info() {
+ if (refreshInterval.millis() != 0) {
+ if (cachedPluginsInfo != null &&
+ (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("using cache to retrieve plugins info");
+ }
+ return cachedPluginsInfo;
+ }
+ lastRefresh = System.currentTimeMillis();
+ }
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("starting to fetch info on plugins");
+ }
+ cachedPluginsInfo = new PluginsInfo();
+
+ // We first add all JvmPlugins
+ for (Tuple<PluginInfo, Plugin> plugin : this.plugins) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("adding jvm plugin [{}]", plugin.v1());
+ }
+ cachedPluginsInfo.add(plugin.v1());
+ }
+
+ // We reload site plugins (in case of some changes)
+ for (Tuple<PluginInfo, Plugin> plugin : loadSitePlugins()) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("adding site plugin [{}]", plugin.v1());
+ }
+ cachedPluginsInfo.add(plugin.v1());
+ }
+
+ return cachedPluginsInfo;
+ }
+
+ private void loadPluginsIntoClassLoader() {
+ File pluginsFile = environment.pluginsFile();
+ if (!pluginsFile.exists()) {
+ return;
+ }
+ if (!pluginsFile.isDirectory()) {
+ return;
+ }
+
+ ClassLoader classLoader = settings.getClassLoader();
+ Class classLoaderClass = classLoader.getClass();
+ Method addURL = null;
+ while (!classLoaderClass.equals(Object.class)) {
+ try {
+ addURL = classLoaderClass.getDeclaredMethod("addURL", URL.class);
+ addURL.setAccessible(true);
+ break;
+ } catch (NoSuchMethodException e) {
+ // no method, try the parent
+ classLoaderClass = classLoaderClass.getSuperclass();
+ }
+ }
+ if (addURL == null) {
+ logger.debug("failed to find addURL method on classLoader [" + classLoader + "] to add methods");
+ return;
+ }
+
+ File[] pluginsFiles = pluginsFile.listFiles();
+ if (pluginsFile != null) {
+ for (File pluginFile : pluginsFiles) {
+ if (pluginFile.isDirectory()) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("--- adding plugin [" + pluginFile.getAbsolutePath() + "]");
+ }
+ try {
+ // add the root
+ addURL.invoke(classLoader, pluginFile.toURI().toURL());
+ // gather files to add
+ List<File> libFiles = Lists.newArrayList();
+ if (pluginFile.listFiles() != null) {
+ libFiles.addAll(Arrays.asList(pluginFile.listFiles()));
+ }
+ File libLocation = new File(pluginFile, "lib");
+ if (libLocation.exists() && libLocation.isDirectory() && libLocation.listFiles() != null) {
+ libFiles.addAll(Arrays.asList(libLocation.listFiles()));
+ }
+
+ // if there are jars in it, add it as well
+ for (File libFile : libFiles) {
+ if (!(libFile.getName().endsWith(".jar") || libFile.getName().endsWith(".zip"))) {
+ continue;
+ }
+ addURL.invoke(classLoader, libFile.toURI().toURL());
+ }
+ } catch (Throwable e) {
+ logger.warn("failed to add plugin [" + pluginFile + "]", e);
+ }
+ }
+ }
+ } else {
+ logger.debug("failed to list plugins from {}. Check your right access.", pluginsFile.getAbsolutePath());
+ }
+ }
+
+ private ImmutableList<Tuple<PluginInfo,Plugin>> loadPluginsFromClasspath(Settings settings) {
+ ImmutableList.Builder<Tuple<PluginInfo, Plugin>> plugins = ImmutableList.builder();
+
+ // Trying JVM plugins: looking for es-plugin.properties files
+ try {
+ Enumeration<URL> pluginUrls = settings.getClassLoader().getResources(ES_PLUGIN_PROPERTIES);
+ while (pluginUrls.hasMoreElements()) {
+ URL pluginUrl = pluginUrls.nextElement();
+ Properties pluginProps = new Properties();
+ InputStream is = null;
+ try {
+ is = pluginUrl.openStream();
+ pluginProps.load(is);
+ String pluginClassName = pluginProps.getProperty("plugin");
+ String pluginVersion = pluginProps.getProperty("version", PluginInfo.VERSION_NOT_AVAILABLE);
+ Plugin plugin = loadPlugin(pluginClassName, settings);
+
+ // Is it a site plugin as well? Does it have also an embedded _site structure
+ File siteFile = new File(new File(environment.pluginsFile(), plugin.name()), "_site");
+ boolean isSite = siteFile.exists() && siteFile.isDirectory();
+ if (logger.isTraceEnabled()) {
+ logger.trace("found a jvm plugin [{}], [{}]{}",
+ plugin.name(), plugin.description(), isSite ? ": with _site structure" : "");
+ }
+
+ PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), isSite, true, pluginVersion);
+
+ plugins.add(new Tuple<PluginInfo, Plugin>(pluginInfo, plugin));
+ } catch (Throwable e) {
+ logger.warn("failed to load plugin from [" + pluginUrl + "]", e);
+ } finally {
+ IOUtils.closeWhileHandlingException(is);
+ }
+ }
+ } catch (IOException e) {
+ logger.warn("failed to find jvm plugins from classpath", e);
+ }
+
+ return plugins.build();
+ }
+
+ private ImmutableList<Tuple<PluginInfo,Plugin>> loadSitePlugins() {
+ ImmutableList.Builder<Tuple<PluginInfo, Plugin>> sitePlugins = ImmutableList.builder();
+ List<String> loadedJvmPlugins = new ArrayList<String>();
+
+ // Already known jvm plugins are ignored
+ for(Tuple<PluginInfo, Plugin> tuple : plugins) {
+ if (tuple.v1().isSite()) {
+ loadedJvmPlugins.add(tuple.v1().getName());
+ }
+ }
+
+ // Let's try to find all _site plugins we did not already found
+ File pluginsFile = environment.pluginsFile();
+
+ if (!pluginsFile.exists() || !pluginsFile.isDirectory()) {
+ return sitePlugins.build();
+ }
+
+ for (File pluginFile : pluginsFile.listFiles()) {
+ if (!loadedJvmPlugins.contains(pluginFile.getName())) {
+ File sitePluginDir = new File(pluginFile, "_site");
+ if (sitePluginDir.exists()) {
+ // We have a _site plugin. Let's try to get more information on it
+ String name = pluginFile.getName();
+ String version = PluginInfo.VERSION_NOT_AVAILABLE;
+ String description = PluginInfo.DESCRIPTION_NOT_AVAILABLE;
+
+ // We check if es-plugin.properties exists in plugin/_site dir
+ File pluginPropFile = new File(sitePluginDir, ES_PLUGIN_PROPERTIES);
+ if (pluginPropFile.exists()) {
+
+ Properties pluginProps = new Properties();
+ InputStream is = null;
+ try {
+ is = new FileInputStream(pluginPropFile.getAbsolutePath());
+ pluginProps.load(is);
+ description = pluginProps.getProperty("description", PluginInfo.DESCRIPTION_NOT_AVAILABLE);
+ version = pluginProps.getProperty("version", PluginInfo.VERSION_NOT_AVAILABLE);
+ } catch (Exception e) {
+ // Can not load properties for this site plugin. Ignoring.
+ logger.debug("can not load {} file.", e, ES_PLUGIN_PROPERTIES);
+ } finally {
+ IOUtils.closeWhileHandlingException(is);
+ }
+ }
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("found a site plugin name [{}], version [{}], description [{}]",
+ name, version, description);
+ }
+ sitePlugins.add(new Tuple<PluginInfo, Plugin>(new PluginInfo(name, description, true, false, version), null));
+ }
+ }
+ }
+
+ return sitePlugins.build();
+ }
+
+ /**
+ * @param name plugin name
+ * @return if this jvm plugin has also a _site structure
+ */
+ private boolean hasSite(String name) {
+ // Let's try to find all _site plugins we did not already found
+ File pluginsFile = environment.pluginsFile();
+
+ if (!pluginsFile.exists() || !pluginsFile.isDirectory()) {
+ return false;
+ }
+
+ File sitePluginDir = new File(pluginsFile, name + "/_site");
+ return sitePluginDir.exists();
+ }
+
+ private Plugin loadPlugin(String className, Settings settings) {
+ try {
+ Class<? extends Plugin> pluginClass = (Class<? extends Plugin>) settings.getClassLoader().loadClass(className);
+ Plugin plugin;
+ try {
+ plugin = pluginClass.getConstructor(Settings.class).newInstance(settings);
+ } catch (NoSuchMethodException e) {
+ try {
+ plugin = pluginClass.getConstructor().newInstance();
+ } catch (NoSuchMethodException e1) {
+ throw new ElasticsearchException("No constructor for [" + pluginClass + "]. A plugin class must " +
+ "have either an empty default constructor or a single argument constructor accepting a " +
+ "Settings instance");
+ }
+ }
+
+ return plugin;
+
+ } catch (Throwable e) {
+ throw new ElasticsearchException("Failed to load plugin class [" + className + "]", e);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/plugins/ShardsPluginsModule.java b/src/main/java/org/elasticsearch/plugins/ShardsPluginsModule.java
new file mode 100644
index 0000000..b9e2178
--- /dev/null
+++ b/src/main/java/org/elasticsearch/plugins/ShardsPluginsModule.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.PreProcessModule;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Collection;
+import java.util.List;
+
+import static org.elasticsearch.common.inject.Modules.createModule;
+
+/**
+ *
+ */
+public class ShardsPluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
+
+ private final Settings settings;
+
+ private final PluginsService pluginsService;
+
+ public ShardsPluginsModule(Settings settings, PluginsService pluginsService) {
+ this.settings = settings;
+ this.pluginsService = pluginsService;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ List<Module> modules = Lists.newArrayList();
+ Collection<Class<? extends Module>> modulesClasses = pluginsService.shardModules();
+ for (Class<? extends Module> moduleClass : modulesClasses) {
+ modules.add(createModule(moduleClass, settings));
+ }
+ modules.addAll(pluginsService.shardModules(settings));
+ return modules;
+ }
+
+ @Override
+ public void processModule(Module module) {
+ pluginsService.processModule(module);
+ }
+
+ @Override
+ protected void configure() {
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java b/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java
new file mode 100644
index 0000000..11024bb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.repositories.fs.FsRepository;
+import org.elasticsearch.repositories.fs.FsRepositoryModule;
+import org.elasticsearch.repositories.uri.URLRepository;
+import org.elasticsearch.repositories.uri.URLRepositoryModule;
+import org.elasticsearch.snapshots.RestoreService;
+import org.elasticsearch.snapshots.SnapshotsService;
+
+import java.util.Map;
+
+/**
+ * Module responsible for registering other repositories.
+ * <p/>
+ * Repositories implemented as plugins should implement {@code onModule(RepositoriesModule module)} method, in which
+ * they should register repository using {@link #registerRepository(String, Class)} method.
+ */
+public class RepositoriesModule extends AbstractModule {
+
+ private Map<String, Class<? extends Module>> repositoryTypes = Maps.newHashMap();
+
+ public RepositoriesModule() {
+ registerRepository(FsRepository.TYPE, FsRepositoryModule.class);
+ registerRepository(URLRepository.TYPE, URLRepositoryModule.class);
+ }
+
+ /**
+ * Registers a custom repository type name against a module.
+ *
+ * @param type The type
+ * @param module The module
+ */
+ public void registerRepository(String type, Class<? extends Module> module) {
+ repositoryTypes.put(type, module);
+ }
+
+ @Override
+ protected void configure() {
+ bind(RepositoriesService.class).asEagerSingleton();
+ bind(SnapshotsService.class).asEagerSingleton();
+ bind(RestoreService.class).asEagerSingleton();
+ bind(RepositoryTypesRegistry.class).toInstance(new RepositoryTypesRegistry(ImmutableMap.copyOf(repositoryTypes)));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/src/main/java/org/elasticsearch/repositories/RepositoriesService.java
new file mode 100644
index 0000000..7c9ad72
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/RepositoriesService.java
@@ -0,0 +1,503 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
+import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
+import org.elasticsearch.cluster.metadata.RepositoryMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Injectors;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.snapshots.RestoreService;
+import org.elasticsearch.snapshots.SnapshotsService;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+/**
+ * Service responsible for maintaining and providing access to snapshot repositories on nodes.
+ */
+public class RepositoriesService extends AbstractComponent implements ClusterStateListener {
+
+ private final RepositoryTypesRegistry typesRegistry;
+
+ private final Injector injector;
+
+ private final ClusterService clusterService;
+
+ private volatile ImmutableMap<String, RepositoryHolder> repositories = ImmutableMap.of();
+
+ @Inject
+ public RepositoriesService(Settings settings, ClusterService clusterService, RepositoryTypesRegistry typesRegistry, Injector injector) {
+ super(settings);
+ this.typesRegistry = typesRegistry;
+ this.injector = injector;
+ this.clusterService = clusterService;
+ // Doesn't make sense to maintain repositories on non-master and non-data nodes
+ // Nothing happens there anyway
+ if (DiscoveryNode.dataNode(settings) || DiscoveryNode.masterNode(settings)) {
+ clusterService.add(this);
+ }
+ }
+
+ /**
+ * Registers new repository in the cluster
+ * <p/>
+ * This method can be only called on the master node. It tries to create a new repository on the master
+ * and if it was successful it adds new repository to cluster metadata.
+ *
+ * @param request register repository request
+ * @param listener register repository listener
+ */
+ public void registerRepository(final RegisterRepositoryRequest request, final ActionListener<RegisterRepositoryResponse> listener) {
+ final RepositoryMetaData newRepositoryMetaData = new RepositoryMetaData(request.name, request.type, request.settings);
+
+ clusterService.submitStateUpdateTask(request.cause, new AckedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ ensureRepositoryNotInUse(currentState, request.name);
+ // Trying to create the new repository on master to make sure it works
+ if (!registerRepository(newRepositoryMetaData)) {
+ // The new repository has the same settings as the old one - ignore
+ return currentState;
+ }
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
+ if (repositories == null) {
+ logger.info("put repository [{}]", request.name);
+ repositories = new RepositoriesMetaData(new RepositoryMetaData(request.name, request.type, request.settings));
+ } else {
+ boolean found = false;
+ List<RepositoryMetaData> repositoriesMetaData = new ArrayList<RepositoryMetaData>(repositories.repositories().size() + 1);
+
+ for (RepositoryMetaData repositoryMetaData : repositories.repositories()) {
+ if (repositoryMetaData.name().equals(newRepositoryMetaData.name())) {
+ found = true;
+ repositoriesMetaData.add(newRepositoryMetaData);
+ } else {
+ repositoriesMetaData.add(repositoryMetaData);
+ }
+ }
+ if (!found) {
+ logger.info("put repository [{}]", request.name);
+ repositoriesMetaData.add(new RepositoryMetaData(request.name, request.type, request.settings));
+ } else {
+ logger.info("update repository [{}]", request.name);
+ }
+ repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()]));
+ }
+ mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories);
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("failed to create repository [{}]", t, request.name);
+ listener.onFailure(t);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+
+ }
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return discoveryNode.masterNode();
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new RegisterRepositoryResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new RegisterRepositoryResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.ackTimeout();
+ }
+ });
+ }
+
+ /**
+ * Unregisters repository in the cluster
+ * <p/>
+ * This method can be only called on the master node. It removes repository information from cluster metadata.
+ *
+ * @param request unregister repository request
+ * @param listener unregister repository listener
+ */
+ public void unregisterRepository(final UnregisterRepositoryRequest request, final ActionListener<UnregisterRepositoryResponse> listener) {
+ clusterService.submitStateUpdateTask(request.cause, new AckedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ ensureRepositoryNotInUse(currentState, request.name);
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
+ if (repositories != null && repositories.repositories().size() > 0) {
+ List<RepositoryMetaData> repositoriesMetaData = new ArrayList<RepositoryMetaData>(repositories.repositories().size());
+ boolean changed = false;
+ for (RepositoryMetaData repositoryMetaData : repositories.repositories()) {
+ if (Regex.simpleMatch(request.name, repositoryMetaData.name())) {
+ logger.info("delete repository [{}]", repositoryMetaData.name());
+ changed = true;
+ } else {
+ repositoriesMetaData.add(repositoryMetaData);
+ }
+ }
+ if (changed) {
+ repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()]));
+ mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories);
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+ }
+ throw new RepositoryMissingException(request.name);
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ // Since operation occurs only on masters, it's enough that only master-eligible nodes acked
+ return discoveryNode.masterNode();
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ listener.onResponse(new UnregisterRepositoryResponse(true));
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new UnregisterRepositoryResponse(false));
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return request.ackTimeout();
+ }
+ });
+ }
+
+ /**
+ * Checks if new repositories appeared in or disappeared from cluster metadata and updates current list of
+ * repositories accordingly.
+ *
+ * @param event cluster changed event
+ */
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ try {
+ RepositoriesMetaData oldMetaData = event.previousState().getMetaData().custom(RepositoriesMetaData.TYPE);
+ RepositoriesMetaData newMetaData = event.state().getMetaData().custom(RepositoriesMetaData.TYPE);
+
+ // Check if repositories got changed
+ if ((oldMetaData == null && newMetaData == null) || (oldMetaData != null && oldMetaData.equals(newMetaData))) {
+ return;
+ }
+
+ Map<String, RepositoryHolder> survivors = newHashMap();
+ // First, remove repositories that are no longer there
+ for (Map.Entry<String, RepositoryHolder> entry : repositories.entrySet()) {
+ if (newMetaData == null || newMetaData.repository(entry.getKey()) == null) {
+ closeRepository(entry.getKey(), entry.getValue());
+ } else {
+ survivors.put(entry.getKey(), entry.getValue());
+ }
+ }
+
+ ImmutableMap.Builder<String, RepositoryHolder> builder = ImmutableMap.builder();
+ if (newMetaData != null) {
+ // Now go through all repositories and update existing or create missing
+ for (RepositoryMetaData repositoryMetaData : newMetaData.repositories()) {
+ RepositoryHolder holder = survivors.get(repositoryMetaData.name());
+ if (holder != null) {
+ // Found previous version of this repository
+ if (!holder.type.equals(repositoryMetaData.type()) || !holder.settings.equals(repositoryMetaData.settings())) {
+ // Previous version is different from the version in settings
+ closeRepository(repositoryMetaData.name(), holder);
+ holder = createRepositoryHolder(repositoryMetaData);
+ }
+ } else {
+ holder = createRepositoryHolder(repositoryMetaData);
+ }
+ if (holder != null) {
+ builder.put(repositoryMetaData.name(), holder);
+ }
+ }
+ }
+ repositories = builder.build();
+ } catch (Throwable ex) {
+ logger.warn("failure updating cluster state ", ex);
+ }
+ }
+
+ /**
+ * Returns registered repository
+ * <p/>
+ * This method is called only on the master node
+ *
+ * @param repository repository name
+ * @return registered repository
+ * @throws RepositoryMissingException if repository with such name isn't registered
+ */
+ public Repository repository(String repository) {
+ RepositoryHolder holder = repositories.get(repository);
+ if (holder != null) {
+ return holder.repository;
+ }
+ throw new RepositoryMissingException(repository);
+ }
+
+ /**
+ * Returns registered index shard repository
+ * <p/>
+ * This method is called only on data nodes
+ *
+ * @param repository repository name
+ * @return registered repository
+ * @throws RepositoryMissingException if repository with such name isn't registered
+ */
+ public IndexShardRepository indexShardRepository(String repository) {
+ RepositoryHolder holder = repositories.get(repository);
+ if (holder != null) {
+ return holder.indexShardRepository;
+ }
+ throw new RepositoryMissingException(repository);
+ }
+
+ /**
+ * Creates a new repository and adds it to the list of registered repositories.
+ * <p/>
+ * If a repository with the same name but different types or settings already exists, it will be closed and
+ * replaced with the new repository. If a repository with the same name exists but it has the same type and settings
+ * the new repository is ignored.
+ *
+ * @param repositoryMetaData new repository metadata
+ * @return {@code true} if new repository was added or {@code false} if it was ignored
+ */
+ private boolean registerRepository(RepositoryMetaData repositoryMetaData) {
+ RepositoryHolder previous = repositories.get(repositoryMetaData.name());
+ if (previous != null) {
+ if (!previous.type.equals(repositoryMetaData.type()) && previous.settings.equals(repositoryMetaData.settings())) {
+ // Previous version is the same as this one - ignore it
+ return false;
+ }
+ }
+ RepositoryHolder holder = createRepositoryHolder(repositoryMetaData);
+ if (previous != null) {
+ // Closing previous version
+ closeRepository(repositoryMetaData.name(), previous);
+ }
+ Map<String, RepositoryHolder> newRepositories = newHashMap(repositories);
+ newRepositories.put(repositoryMetaData.name(), holder);
+ repositories = ImmutableMap.copyOf(newRepositories);
+ return true;
+ }
+
+ /**
+ * Closes the repository
+ *
+ * @param name repository name
+ * @param holder repository holder
+ */
+ private void closeRepository(String name, RepositoryHolder holder) {
+ logger.debug("closing repository [{}][{}]", holder.type, name);
+ if (holder.injector != null) {
+ Injectors.close(holder.injector);
+ }
+ if (holder.repository != null) {
+ holder.repository.close();
+ }
+ }
+
+ /**
+ * Creates repository holder
+ */
+ private RepositoryHolder createRepositoryHolder(RepositoryMetaData repositoryMetaData) {
+ logger.debug("creating repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name());
+ Injector repositoryInjector = null;
+ try {
+ ModulesBuilder modules = new ModulesBuilder();
+ RepositoryName name = new RepositoryName(repositoryMetaData.type(), repositoryMetaData.name());
+ modules.add(new RepositoryNameModule(name));
+ modules.add(new RepositoryModule(name, repositoryMetaData.settings(), this.settings, typesRegistry));
+
+ repositoryInjector = modules.createChildInjector(injector);
+ Repository repository = repositoryInjector.getInstance(Repository.class);
+ IndexShardRepository indexShardRepository = repositoryInjector.getInstance(IndexShardRepository.class);
+ repository.start();
+ return new RepositoryHolder(repositoryMetaData.type(), repositoryMetaData.settings(), repositoryInjector, repository, indexShardRepository);
+ } catch (Throwable t) {
+ if (repositoryInjector != null) {
+ Injectors.close(repositoryInjector);
+ }
+ logger.warn("failed to create repository [{}][{}]", t, repositoryMetaData.type(), repositoryMetaData.name());
+ throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", t);
+ }
+ }
+
+ private void ensureRepositoryNotInUse(ClusterState clusterState, String repository) {
+ if (SnapshotsService.isRepositoryInUse(clusterState, repository) || RestoreService.isRepositoryInUse(clusterState, repository)) {
+ throw new ElasticsearchIllegalStateException("trying to modify or unregister repository that is currently used ");
+ }
+ }
+
+ /**
+ * Internal data structure for holding repository with its configuration information and injector
+ */
+ private static class RepositoryHolder {
+
+ private final String type;
+ private final Settings settings;
+ private final Injector injector;
+ private final Repository repository;
+ private final IndexShardRepository indexShardRepository;
+
+ public RepositoryHolder(String type, Settings settings, Injector injector, Repository repository, IndexShardRepository indexShardRepository) {
+ this.type = type;
+ this.settings = settings;
+ this.repository = repository;
+ this.indexShardRepository = indexShardRepository;
+ this.injector = injector;
+ }
+ }
+
+ /**
+ * Register repository request
+ */
+ public static class RegisterRepositoryRequest extends ClusterStateUpdateRequest<RegisterRepositoryRequest> {
+
+ final String cause;
+
+ final String name;
+
+ final String type;
+
+ Settings settings = EMPTY_SETTINGS;
+
+ /**
+ * Constructs new register repository request
+ *
+ * @param cause repository registration cause
+ * @param name repository name
+ * @param type repository type
+ */
+ public RegisterRepositoryRequest(String cause, String name, String type) {
+ this.cause = cause;
+ this.name = name;
+ this.type = type;
+ }
+
+ /**
+ * Sets repository settings
+ *
+ * @param settings repository settings
+ * @return this request
+ */
+ public RegisterRepositoryRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+ }
+
+ /**
+ * Register repository response
+ */
+ public static class RegisterRepositoryResponse extends ClusterStateUpdateResponse {
+
+ RegisterRepositoryResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+
+ }
+
+ /**
+ * Unregister repository request
+ */
+ public static class UnregisterRepositoryRequest extends ClusterStateUpdateRequest<UnregisterRepositoryRequest> {
+
+ final String cause;
+
+ final String name;
+
+ /**
+ * Creates a new unregister repository request
+ *
+ * @param cause repository unregistration cause
+ * @param name repository name
+ */
+ public UnregisterRepositoryRequest(String cause, String name) {
+ this.cause = cause;
+ this.name = name;
+ }
+
+ }
+
+ /**
+ * Unregister repository response
+ */
+ public static class UnregisterRepositoryResponse extends ClusterStateUpdateResponse {
+ UnregisterRepositoryResponse(boolean acknowledged) {
+ super(acknowledged);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/Repository.java b/src/main/java/org/elasticsearch/repositories/Repository.java
new file mode 100644
index 0000000..44c8762
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/Repository.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.snapshots.Snapshot;
+import org.elasticsearch.snapshots.SnapshotShardFailure;
+
+/**
+ * Snapshot repository interface.
+ * <p/>
+ * Responsible for index and cluster level operations. It's called only on master.
+ * Shard-level operations are performed using {@link org.elasticsearch.index.snapshots.IndexShardRepository}
+ * interface on data nodes.
+ * <p/>
+ * Typical snapshot usage pattern:
+ * <ul>
+ * <li>Master calls {@link #initializeSnapshot(org.elasticsearch.cluster.metadata.SnapshotId, com.google.common.collect.ImmutableList, org.elasticsearch.cluster.metadata.MetaData)}
+ * with list of indices that will be included into the snapshot</li>
+ * <li>Data nodes call {@link org.elasticsearch.index.snapshots.IndexShardRepository#snapshot(org.elasticsearch.cluster.metadata.SnapshotId, org.elasticsearch.index.shard.ShardId, org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit, org.elasticsearch.index.snapshots.IndexShardSnapshotStatus)} for each shard</li>
+ * <li>When all shard calls return master calls {@link #finalizeSnapshot(org.elasticsearch.cluster.metadata.SnapshotId, String, int, com.google.common.collect.ImmutableList)}
+ * with possible list of failures</li>
+ * </ul>
+ */
+public interface Repository extends LifecycleComponent<Repository> {
+
+ /**
+ * Reads snapshot description from repository.
+ *
+ * @param snapshotId snapshot ID
+ * @return information about snapshot
+ */
+ Snapshot readSnapshot(SnapshotId snapshotId);
+
+ /**
+ * Returns global metadata associate with the snapshot.
+ * <p/>
+ * The returned meta data contains global metadata as well as metadata for all indices listed in the indices parameter.
+ *
+ * @param snapshotId snapshot ID
+ * @param indices list of indices
+ * @return information about snapshot
+ */
+ MetaData readSnapshotMetaData(SnapshotId snapshotId, ImmutableList<String> indices);
+
+ /**
+ * Returns the list of snapshots currently stored in the repository
+ *
+ * @return snapshot list
+ */
+ ImmutableList<SnapshotId> snapshots();
+
+ /**
+ * Starts snapshotting process
+ *
+ * @param snapshotId snapshot id
+ * @param indices list of indices to be snapshotted
+ * @param metaData cluster metadata
+ */
+ void initializeSnapshot(SnapshotId snapshotId, ImmutableList<String> indices, MetaData metaData);
+
+ /**
+ * Finalizes snapshotting process
+ * <p/>
+ * This method is called on master after all shards are snapshotted.
+ *
+ * @param snapshotId snapshot id
+ * @param failure global failure reason or null
+ * @param totalShards total number of shards
+ * @param shardFailures list of shard failures
+ * @return snapshot description
+ */
+ Snapshot finalizeSnapshot(SnapshotId snapshotId, String failure, int totalShards, ImmutableList<SnapshotShardFailure> shardFailures);
+
+ /**
+ * Deletes snapshot
+ *
+ * @param snapshotId snapshot id
+ */
+ void deleteSnapshot(SnapshotId snapshotId);
+
+ /**
+ * Returns snapshot throttle time in nanoseconds
+ */
+ long snapshotThrottleTimeInNanos();
+
+ /**
+ * Returns restore throttle time in nanoseconds
+ */
+ long restoreThrottleTimeInNanos();
+
+
+}
diff --git a/src/main/java/org/elasticsearch/repositories/RepositoryException.java b/src/main/java/org/elasticsearch/repositories/RepositoryException.java
new file mode 100644
index 0000000..f2593ce
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/RepositoryException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ * Generic repository exception
+ */
+public class RepositoryException extends ElasticsearchException {
+ private final String repository;
+
+ public RepositoryException(String repository, String msg) {
+ this(repository, msg, null);
+ }
+
+ public RepositoryException(String repository, String msg, Throwable cause) {
+ super("[" + (repository == null ? "_na" : repository) + "] " + msg, cause);
+ this.repository = repository;
+ }
+
+ /**
+ * Returns repository name
+ *
+ * @return repository name
+ */
+ public String repository() {
+ return repository;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/RepositoryMissingException.java b/src/main/java/org/elasticsearch/repositories/RepositoryMissingException.java
new file mode 100644
index 0000000..11d56ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/RepositoryMissingException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories;
+
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ * Repository missing exception
+ */
+public class RepositoryMissingException extends RepositoryException {
+
+
+ public RepositoryMissingException(String repository) {
+ super(repository, "missing");
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/RepositoryModule.java b/src/main/java/org/elasticsearch/repositories/RepositoryModule.java
new file mode 100644
index 0000000..6298284
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/RepositoryModule.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.Classes;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.Modules;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.NoClassSettingsException;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Locale;
+
+import static org.elasticsearch.common.Strings.toCamelCase;
+
+/**
+ * This module spawns specific repository module
+ */
+public class RepositoryModule extends AbstractModule implements SpawnModules {
+
+ private RepositoryName repositoryName;
+
+ private final Settings globalSettings;
+
+ private final Settings settings;
+
+ private final RepositoryTypesRegistry typesRegistry;
+
+ /**
+ * Spawns module for repository with specified name, type and settings
+ *
+ * @param repositoryName repository name and type
+ * @param settings repository settings
+ * @param globalSettings global settings
+ * @param typesRegistry registry of repository types
+ */
+ public RepositoryModule(RepositoryName repositoryName, Settings settings, Settings globalSettings, RepositoryTypesRegistry typesRegistry) {
+ this.repositoryName = repositoryName;
+ this.globalSettings = globalSettings;
+ this.settings = settings;
+ this.typesRegistry = typesRegistry;
+ }
+
+ /**
+ * Returns repository module.
+ * <p/>
+ * First repository type is looked up in typesRegistry and if it's not found there, this module tries to
+ * load repository by it's class name.
+ *
+ * @return repository module
+ */
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(Modules.createModule(loadTypeModule(repositoryName.type(), "org.elasticsearch.repositories.", "RepositoryModule"), globalSettings));
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void configure() {
+ bind(RepositorySettings.class).toInstance(new RepositorySettings(globalSettings, settings));
+ }
+
+ private Class<? extends Module> loadTypeModule(String type, String prefixPackage, String suffixClassName) {
+ Class<? extends Module> registered = typesRegistry.type(type);
+ if (registered != null) {
+ return registered;
+ }
+ return Classes.loadClass(globalSettings.getClassLoader(), type, prefixPackage, suffixClassName);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/RepositoryName.java b/src/main/java/org/elasticsearch/repositories/RepositoryName.java
new file mode 100644
index 0000000..7cef6a6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/RepositoryName.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories;
+
+/**
+ * Combines together the name and type of the repository
+ */
+public class RepositoryName {
+
+ private final String type;
+
+ private final String name;
+
+ public RepositoryName(String type, String name) {
+ this.type = type;
+ this.name = name;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String getType() {
+ return type();
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public String getName() {
+ return name();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ RepositoryName that = (RepositoryName) o;
+
+ if (name != null ? !name.equals(that.name) : that.name != null) return false;
+ if (type != null ? !type.equals(that.type) : that.type != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = type != null ? type.hashCode() : 0;
+ result = 31 * result + (name != null ? name.hashCode() : 0);
+ return result;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/RepositoryNameModule.java b/src/main/java/org/elasticsearch/repositories/RepositoryNameModule.java
new file mode 100644
index 0000000..47be67d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/RepositoryNameModule.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ * Binds specific instance of RepositoryName for injection to repository module
+ */
+public class RepositoryNameModule extends AbstractModule {
+
+ private final RepositoryName repositoryName;
+
+ public RepositoryNameModule(RepositoryName repositoryName) {
+ this.repositoryName = repositoryName;
+ }
+
+ @Override
+ protected void configure() {
+ bind(RepositoryName.class).toInstance(repositoryName);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/RepositorySettings.java b/src/main/java/org/elasticsearch/repositories/RepositorySettings.java
new file mode 100644
index 0000000..222d606
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/RepositorySettings.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * Combines repository-specific settings with global settings
+ */
+
+public class RepositorySettings {
+
+ private final Settings globalSettings;
+
+ private final Settings settings;
+
+ public RepositorySettings(Settings globalSettings, Settings settings) {
+ this.globalSettings = globalSettings;
+ this.settings = settings;
+ }
+
+ public Settings globalSettings() {
+ return globalSettings;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/RepositoryTypesRegistry.java b/src/main/java/org/elasticsearch/repositories/RepositoryTypesRegistry.java
new file mode 100644
index 0000000..1322b65
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/RepositoryTypesRegistry.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.inject.Module;
+
+/**
+ * Map of registered repository types and associated with these types modules
+ */
+public class RepositoryTypesRegistry {
+ private final ImmutableMap<String, Class<? extends Module>> repositoryTypes;
+
+ /**
+ * Creates new repository with given map of types
+ *
+ * @param repositoryTypes
+ */
+ public RepositoryTypesRegistry(ImmutableMap<String, Class<? extends Module>> repositoryTypes) {
+ this.repositoryTypes = repositoryTypes;
+ }
+
+ /**
+ * Returns repository module class for the given type
+ *
+ * @param type repository type
+ * @return repository module class or null if type is not found
+ */
+ public Class<? extends Module> type(String type) {
+ return repositoryTypes.get(type);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
new file mode 100644
index 0000000..362e641
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -0,0 +1,648 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.blobstore;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.lucene.store.RateLimiter;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.metrics.CounterMetric;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository.RateLimiterListener;
+import org.elasticsearch.repositories.Repository;
+import org.elasticsearch.repositories.RepositoryException;
+import org.elasticsearch.repositories.RepositorySettings;
+import org.elasticsearch.snapshots.*;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * BlobStore - based implementation of Snapshot Repository
+ * <p/>
+ * This repository works with any {@link BlobStore} implementation. The blobStore should be initialized in the derived
+ * class before {@link #doStart()} is called.
+ * <p/>
+ * <p/>
+ * BlobStoreRepository maintains the following structure in the blob store
+ * <pre>
+ * {@code
+ * STORE_ROOT
+ * |- index - list of all snapshot name as JSON array
+ * |- snapshot-20131010 - JSON serialized BlobStoreSnapshot for snapshot "20131010"
+ * |- metadata-20131010 - JSON serialized MetaData for snapshot "20131010" (includes only global metadata)
+ * |- snapshot-20131011 - JSON serialized BlobStoreSnapshot for snapshot "20131011"
+ * |- metadata-20131011 - JSON serialized MetaData for snapshot "20131011"
+ * .....
+ * |- indices/ - data for all indices
+ * |- foo/ - data for index "foo"
+ * | |- snapshot-20131010 - JSON Serialized IndexMetaData for index "foo"
+ * | |- 0/ - data for shard "0" of index "foo"
+ * | | |- __1 \
+ * | | |- __2 |
+ * | | |- __3 |- files from different segments see snapshot-* for their mappings to real segment files
+ * | | |- __4 |
+ * | | |- __5 /
+ * | | .....
+ * | | |- snapshot-20131010 - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131010"
+ * | | |- snapshot-20131011 - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131011"
+ * | |
+ * | |- 1/ - data for shard "1" of index "foo"
+ * | | |- __1
+ * | | .....
+ * | |
+ * | |-2/
+ * | ......
+ * |
+ * |- bar/ - data for index bar
+ * ......
+ * }
+ * </pre>
+ */
+public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Repository> implements Repository, RateLimiterListener {
+
+ private ImmutableBlobContainer snapshotsBlobContainer;
+
+ protected final String repositoryName;
+
+ private static final String SNAPSHOT_PREFIX = "snapshot-";
+
+ private static final String SNAPSHOTS_FILE = "index";
+
+ private static final String METADATA_PREFIX = "metadata-";
+
+ private final BlobStoreIndexShardRepository indexShardRepository;
+
+ private final ToXContent.Params globalOnlyFormatParams;
+
+ private final RateLimiter snapshotRateLimiter;
+
+ private final RateLimiter restoreRateLimiter;
+
+ private final CounterMetric snapshotRateLimitingTimeInNanos = new CounterMetric();
+
+ private final CounterMetric restoreRateLimitingTimeInNanos = new CounterMetric();
+
+
+ /**
+ * Constructs new BlobStoreRepository
+ *
+ * @param repositoryName repository name
+ * @param repositorySettings repository settings
+ * @param indexShardRepository an instance of IndexShardRepository
+ */
+ protected BlobStoreRepository(String repositoryName, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) {
+ super(repositorySettings.globalSettings());
+ this.repositoryName = repositoryName;
+ this.indexShardRepository = (BlobStoreIndexShardRepository) indexShardRepository;
+ Map<String, String> globalOnlyParams = Maps.newHashMap();
+ globalOnlyParams.put(MetaData.GLOBAL_PERSISTENT_ONLY_PARAM, "true");
+ globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams);
+ snapshotRateLimiter = getRateLimiter(repositorySettings, "max_snapshot_bytes_per_sec", new ByteSizeValue(20, ByteSizeUnit.MB));
+ restoreRateLimiter = getRateLimiter(repositorySettings, "max_restore_bytes_per_sec", new ByteSizeValue(20, ByteSizeUnit.MB));
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ this.snapshotsBlobContainer = blobStore().immutableBlobContainer(basePath());
+ indexShardRepository.initialize(blobStore(), basePath(), chunkSize(), snapshotRateLimiter, restoreRateLimiter, this);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ try {
+ blobStore().close();
+ } catch (Throwable t) {
+ logger.warn("cannot close blob store", t);
+ }
+ }
+
+ /**
+ * Returns initialized and ready to use BlobStore
+ * <p/>
+ * This method is first called in the {@link #doStart()} method.
+ *
+ * @return blob store
+ */
+ abstract protected BlobStore blobStore();
+
+ /**
+ * Returns base path of the repository
+ */
+ abstract protected BlobPath basePath();
+
+ /**
+ * Returns true if metadata and snapshot files should be compressed
+ *
+ * @return true if compression is needed
+ */
+ protected boolean isCompress() {
+ return false;
+ }
+
+ /**
+ * Returns data file chunk size.
+ * <p/>
+ * This method should return null if no chunking is needed.
+ *
+ * @return chunk size
+ */
+ protected ByteSizeValue chunkSize() {
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void initializeSnapshot(SnapshotId snapshotId, ImmutableList<String> indices, MetaData metaData) {
+ try {
+ BlobStoreSnapshot blobStoreSnapshot = BlobStoreSnapshot.builder()
+ .name(snapshotId.getSnapshot())
+ .indices(indices)
+ .startTime(System.currentTimeMillis())
+ .build();
+ BytesStreamOutput bStream = writeSnapshot(blobStoreSnapshot);
+ String snapshotBlobName = snapshotBlobName(snapshotId);
+ if (snapshotsBlobContainer.blobExists(snapshotBlobName)) {
+ // TODO: Can we make it atomic?
+ throw new InvalidSnapshotNameException(snapshotId, "snapshot with such name already exists");
+ }
+ snapshotsBlobContainer.writeBlob(snapshotBlobName, bStream.bytes().streamInput(), bStream.bytes().length());
+ // Write Global MetaData
+ // TODO: Check if metadata needs to be written
+ bStream = writeGlobalMetaData(metaData);
+ snapshotsBlobContainer.writeBlob(metaDataBlobName(snapshotId), bStream.bytes().streamInput(), bStream.bytes().length());
+ for (String index : indices) {
+ IndexMetaData indexMetaData = metaData.index(index);
+ BlobPath indexPath = basePath().add("indices").add(index);
+ ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
+ bStream = new BytesStreamOutput();
+ StreamOutput stream = bStream;
+ if (isCompress()) {
+ stream = CompressorFactory.defaultCompressor().streamOutput(stream);
+ }
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
+ builder.startObject();
+ IndexMetaData.Builder.toXContent(indexMetaData, builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ builder.close();
+ indexMetaDataBlobContainer.writeBlob(snapshotBlobName(snapshotId), bStream.bytes().streamInput(), bStream.bytes().length());
+ }
+ } catch (IOException ex) {
+ throw new SnapshotCreationException(snapshotId, ex);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void deleteSnapshot(SnapshotId snapshotId) {
+ Snapshot snapshot = readSnapshot(snapshotId);
+ MetaData metaData = readSnapshotMetaData(snapshotId, snapshot.indices());
+ try {
+ String blobName = snapshotBlobName(snapshotId);
+ // Delete snapshot file first so we wouldn't end up with partially deleted snapshot that looks OK
+ snapshotsBlobContainer.deleteBlob(blobName);
+ snapshotsBlobContainer.deleteBlob(metaDataBlobName(snapshotId));
+ // Delete snapshot from the snapshot list
+ ImmutableList<SnapshotId> snapshotIds = snapshots();
+ if (snapshotIds.contains(snapshotId)) {
+ ImmutableList.Builder<SnapshotId> builder = ImmutableList.builder();
+ for (SnapshotId id : snapshotIds) {
+ if (!snapshotId.equals(id)) {
+ builder.add(id);
+ }
+ }
+ snapshotIds = builder.build();
+ }
+ writeSnapshotList(snapshotIds);
+ // Now delete all indices
+ for (String index : snapshot.indices()) {
+ BlobPath indexPath = basePath().add("indices").add(index);
+ ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
+ try {
+ indexMetaDataBlobContainer.deleteBlob(blobName);
+ } catch (IOException ex) {
+ throw new SnapshotException(snapshotId, "failed to delete metadata", ex);
+ }
+ IndexMetaData indexMetaData = metaData.index(index);
+ for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
+ indexShardRepository.delete(snapshotId, new ShardId(index, i));
+ }
+ }
+ } catch (IOException ex) {
+ throw new RepositoryException(this.repositoryName, "failed to update snapshot in repository", ex);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Snapshot finalizeSnapshot(SnapshotId snapshotId, String failure, int totalShards, ImmutableList<SnapshotShardFailure> shardFailures) {
+ BlobStoreSnapshot snapshot = (BlobStoreSnapshot) readSnapshot(snapshotId);
+ if (snapshot == null) {
+ throw new SnapshotMissingException(snapshotId);
+ }
+ if (snapshot.state().completed()) {
+ throw new SnapshotException(snapshotId, "snapshot is already closed");
+ }
+ try {
+ String blobName = snapshotBlobName(snapshotId);
+ BlobStoreSnapshot.Builder updatedSnapshot = BlobStoreSnapshot.builder().snapshot(snapshot);
+ if (failure == null) {
+ updatedSnapshot.success();
+ updatedSnapshot.failures(totalShards, shardFailures);
+ } else {
+ updatedSnapshot.failed(failure);
+ }
+ updatedSnapshot.endTime(System.currentTimeMillis());
+ snapshot = updatedSnapshot.build();
+ BytesStreamOutput bStream = writeSnapshot(snapshot);
+ snapshotsBlobContainer.writeBlob(blobName, bStream.bytes().streamInput(), bStream.bytes().length());
+ ImmutableList<SnapshotId> snapshotIds = snapshots();
+ if (!snapshotIds.contains(snapshotId)) {
+ snapshotIds = ImmutableList.<SnapshotId>builder().addAll(snapshotIds).add(snapshotId).build();
+ }
+ writeSnapshotList(snapshotIds);
+ return snapshot;
+ } catch (IOException ex) {
+ throw new RepositoryException(this.repositoryName, "failed to update snapshot in repository", ex);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public ImmutableList<SnapshotId> snapshots() {
+ try {
+ List<SnapshotId> snapshots = newArrayList();
+ ImmutableMap<String, BlobMetaData> blobs;
+ try {
+ blobs = snapshotsBlobContainer.listBlobsByPrefix(SNAPSHOT_PREFIX);
+ } catch (UnsupportedOperationException ex) {
+ // Fall back in case listBlobsByPrefix isn't supported by the blob store
+ return readSnapshotList();
+ }
+ int prefixLength = SNAPSHOT_PREFIX.length();
+ for (BlobMetaData md : blobs.values()) {
+ String name = md.name().substring(prefixLength);
+ snapshots.add(new SnapshotId(repositoryName, name));
+ }
+ return ImmutableList.copyOf(snapshots);
+ } catch (IOException ex) {
+ throw new RepositoryException(repositoryName, "failed to list snapshots in repository", ex);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public MetaData readSnapshotMetaData(SnapshotId snapshotId, ImmutableList<String> indices) {
+ MetaData metaData;
+ try {
+ byte[] data = snapshotsBlobContainer.readBlobFully(metaDataBlobName(snapshotId));
+ metaData = readMetaData(data);
+ } catch (FileNotFoundException ex) {
+ throw new SnapshotMissingException(snapshotId, ex);
+ } catch (IOException ex) {
+ throw new SnapshotException(snapshotId, "failed to get snapshots", ex);
+ }
+ MetaData.Builder metaDataBuilder = MetaData.builder(metaData);
+ for (String index : indices) {
+ BlobPath indexPath = basePath().add("indices").add(index);
+ ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
+ XContentParser parser = null;
+ try {
+ byte[] data = indexMetaDataBlobContainer.readBlobFully(snapshotBlobName(snapshotId));
+ parser = XContentHelper.createParser(data, 0, data.length);
+ XContentParser.Token token;
+ if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
+ IndexMetaData indexMetaData = IndexMetaData.Builder.fromXContent(parser);
+ if ((token = parser.nextToken()) == XContentParser.Token.END_OBJECT) {
+ metaDataBuilder.put(indexMetaData, false);
+ continue;
+ }
+ }
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ } catch (IOException ex) {
+ throw new SnapshotException(snapshotId, "failed to read metadata", ex);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+ return metaDataBuilder.build();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Snapshot readSnapshot(SnapshotId snapshotId) {
+ try {
+ String blobName = snapshotBlobName(snapshotId);
+ int retryCount = 0;
+ while (true) {
+ byte[] data = snapshotsBlobContainer.readBlobFully(blobName);
+ // Because we are overriding snapshot during finalization, it's possible that
+ // we can get an empty or incomplete snapshot for a brief moment
+ // retrying after some what can resolve the issue
+ // TODO: switch to atomic update after non-local gateways are removed and we switch to java 1.7
+ try {
+ return readSnapshot(data);
+ } catch (ElasticsearchParseException ex) {
+ if (retryCount++ < 3) {
+ try {
+ Thread.sleep(50);
+ } catch (InterruptedException ex1) {
+ Thread.currentThread().interrupt();
+ }
+ } else {
+ throw ex;
+ }
+ }
+ }
+ } catch (FileNotFoundException ex) {
+ throw new SnapshotMissingException(snapshotId, ex);
+ } catch (IOException ex) {
+ throw new SnapshotException(snapshotId, "failed to get snapshots", ex);
+ }
+ }
+
+ /**
+ * Configures RateLimiter based on repository and global settings
+ *
+ * @param repositorySettings repository settings
+ * @param setting setting to use to configure rate limiter
+ * @param defaultRate default limiting rate
+ * @return rate limiter or null of no throttling is needed
+ */
+ private RateLimiter getRateLimiter(RepositorySettings repositorySettings, String setting, ByteSizeValue defaultRate) {
+ ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.settings().getAsBytesSize(setting,
+ componentSettings.getAsBytesSize(setting, defaultRate));
+ if (maxSnapshotBytesPerSec.bytes() <= 0) {
+ return null;
+ } else {
+ return new RateLimiter.SimpleRateLimiter(maxSnapshotBytesPerSec.mbFrac());
+ }
+ }
+
+ /**
+ * Parses JSON containing snapshot description
+ *
+ * @param data snapshot description in JSON format
+ * @return parsed snapshot description
+ * @throws IOException parse exceptions
+ */
+ private BlobStoreSnapshot readSnapshot(byte[] data) throws IOException {
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(data, 0, data.length);
+ XContentParser.Token token;
+ if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
+ if ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) {
+ parser.nextToken();
+ BlobStoreSnapshot snapshot = BlobStoreSnapshot.Builder.fromXContent(parser);
+ if ((token = parser.nextToken()) == XContentParser.Token.END_OBJECT) {
+ return snapshot;
+ }
+ }
+ }
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ /**
+ * Parses JSON containing cluster metadata
+ *
+ * @param data cluster metadata in JSON format
+ * @return parsed metadata
+ * @throws IOException parse exceptions
+ */
+ private MetaData readMetaData(byte[] data) throws IOException {
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(data, 0, data.length);
+ XContentParser.Token token;
+ if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
+ if ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) {
+ parser.nextToken();
+ MetaData metaData = MetaData.Builder.fromXContent(parser);
+ if ((token = parser.nextToken()) == XContentParser.Token.END_OBJECT) {
+ return metaData;
+ }
+ }
+ }
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ /**
+ * Returns name of snapshot blob
+ *
+ * @param snapshotId snapshot id
+ * @return name of snapshot blob
+ */
+ private String snapshotBlobName(SnapshotId snapshotId) {
+ return SNAPSHOT_PREFIX + snapshotId.getSnapshot();
+ }
+
+ /**
+ * Returns name of metadata blob
+ *
+ * @param snapshotId snapshot id
+ * @return name of metadata blob
+ */
+ private String metaDataBlobName(SnapshotId snapshotId) {
+ return METADATA_PREFIX + snapshotId.getSnapshot();
+ }
+
+ /**
+ * Serializes BlobStoreSnapshot into JSON
+ *
+ * @param snapshot - snapshot description
+ * @return BytesStreamOutput representing JSON serialized BlobStoreSnapshot
+ * @throws IOException
+ */
+ private BytesStreamOutput writeSnapshot(BlobStoreSnapshot snapshot) throws IOException {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput stream = bStream;
+ if (isCompress()) {
+ stream = CompressorFactory.defaultCompressor().streamOutput(stream);
+ }
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
+ builder.startObject();
+ BlobStoreSnapshot.Builder.toXContent(snapshot, builder, globalOnlyFormatParams);
+ builder.endObject();
+ builder.close();
+ return bStream;
+ }
+
+ /**
+ * Serializes global MetaData into JSON
+ *
+ * @param metaData - metaData
+ * @return BytesStreamOutput representing JSON serialized global MetaData
+ * @throws IOException
+ */
+ private BytesStreamOutput writeGlobalMetaData(MetaData metaData) throws IOException {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput stream = bStream;
+ if (isCompress()) {
+ stream = CompressorFactory.defaultCompressor().streamOutput(stream);
+ }
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
+ builder.startObject();
+ MetaData.Builder.toXContent(metaData, builder, globalOnlyFormatParams);
+ builder.endObject();
+ builder.close();
+ return bStream;
+ }
+
+ /**
+ * Writes snapshot index file
+ * <p/>
+ * This file can be used by read-only repositories that are unable to list files in the repository
+ *
+ * @param snapshots list of snapshot ids
+ * @throws IOException I/O errors
+ */
+ protected void writeSnapshotList(ImmutableList<SnapshotId> snapshots) throws IOException {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput stream = bStream;
+ if (isCompress()) {
+ stream = CompressorFactory.defaultCompressor().streamOutput(stream);
+ }
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
+ builder.startObject();
+ builder.startArray("snapshots");
+ for (SnapshotId snapshot : snapshots) {
+ builder.value(snapshot.getSnapshot());
+ }
+ builder.endArray();
+ builder.endObject();
+ builder.close();
+ snapshotsBlobContainer.writeBlob(SNAPSHOTS_FILE, bStream.bytes().streamInput(), bStream.bytes().length());
+ }
+
+ /**
+ * Reads snapshot index file
+ * <p/>
+ * This file can be used by read-only repositories that are unable to list files in the repository
+ *
+ * @return list of snapshots in the repository
+ * @throws IOException I/O errors
+ */
+ protected ImmutableList<SnapshotId> readSnapshotList() throws IOException {
+ byte[] data = snapshotsBlobContainer.readBlobFully(SNAPSHOTS_FILE);
+ ArrayList<SnapshotId> snapshots = new ArrayList<SnapshotId>();
+ XContentParser parser = null;
+ try {
+ parser = XContentHelper.createParser(data, 0, data.length);
+ if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
+ if (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ String currentFieldName = parser.currentName();
+ if ("snapshots".equals(currentFieldName)) {
+ if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ snapshots.add(new SnapshotId(repositoryName, parser.text()));
+ }
+ }
+ }
+ }
+ }
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ return ImmutableList.copyOf(snapshots);
+ }
+
+ @Override
+ public void onRestorePause(long nanos) {
+ restoreRateLimitingTimeInNanos.inc(nanos);
+ }
+
+ @Override
+ public void onSnapshotPause(long nanos) {
+ snapshotRateLimitingTimeInNanos.inc(nanos);
+ }
+
+ @Override
+ public long snapshotThrottleTimeInNanos() {
+ return snapshotRateLimitingTimeInNanos.count();
+ }
+
+ @Override
+ public long restoreThrottleTimeInNanos() {
+ return restoreRateLimitingTimeInNanos.count();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreSnapshot.java b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreSnapshot.java
new file mode 100644
index 0000000..9d451a0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreSnapshot.java
@@ -0,0 +1,505 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.blobstore;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.primitives.Longs;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.snapshots.Snapshot;
+import org.elasticsearch.snapshots.SnapshotShardFailure;
+import org.elasticsearch.snapshots.SnapshotState;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+/**
+ * Immutable snapshot description for BlobStoreRepository
+ * <p/>
+ * Stored in the the root of the repository, see {@link BlobStoreRepository} for more information.
+ */
+public class BlobStoreSnapshot implements Snapshot {
+ private final String name;
+
+ private final Version version;
+
+ private final SnapshotState state;
+
+ private final String reason;
+
+ private final ImmutableList<String> indices;
+
+ private final long startTime;
+
+ private final long endTime;
+
+ private final int totalShard;
+
+ private final int successfulShards;
+
+ private final ImmutableList<SnapshotShardFailure> shardFailures;
+
+ private BlobStoreSnapshot(String name, ImmutableList<String> indices, SnapshotState state, String reason, Version version, long startTime, long endTime,
+ int totalShard, int successfulShards, ImmutableList<SnapshotShardFailure> shardFailures) {
+ assert name != null;
+ assert indices != null;
+ assert state != null;
+ assert shardFailures != null;
+ this.name = name;
+ this.indices = indices;
+ this.state = state;
+ this.reason = reason;
+ this.version = version;
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.totalShard = totalShard;
+ this.successfulShards = successfulShards;
+ this.shardFailures = shardFailures;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String name() {
+ return name;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public SnapshotState state() {
+ return state;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String reason() {
+ return reason;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Version version() {
+ return version;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public ImmutableList<String> indices() {
+ return indices;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public long startTime() {
+ return startTime;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public long endTime() {
+ return endTime;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public int totalShard() {
+ return totalShard;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public int successfulShards() {
+ return successfulShards;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public ImmutableList<SnapshotShardFailure> shardFailures() {
+ return shardFailures;
+ }
+
+ /**
+ * Creates new BlobStoreSnapshot builder
+ *
+ * @return
+ */
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ /**
+ * Compares two snapshots by their start time
+ *
+ * @param o other snapshot
+ * @return the value {@code 0} if snapshots were created at the same time;
+ * a value less than {@code 0} if this snapshot was created before snapshot {@code o}; and
+ * a value greater than {@code 0} if this snapshot was created after snapshot {@code o};
+ */
+ @Override
+ public int compareTo(Snapshot o) {
+ return Longs.compare(startTime, ((BlobStoreSnapshot) o).startTime);
+ }
+
+ /**
+ * BlobStoreSnapshot builder
+ */
+ public static class Builder {
+
+ private String name;
+
+ private Version version = Version.CURRENT;
+
+ private SnapshotState state = SnapshotState.IN_PROGRESS;
+
+ private String reason;
+
+ private ImmutableList<String> indices;
+
+ private long startTime;
+
+ private long endTime;
+
+ private int totalShard;
+
+ private int successfulShards;
+
+ private ImmutableList<SnapshotShardFailure> shardFailures = ImmutableList.of();
+
+ /**
+ * Copies data from another snapshot into the builder
+ *
+ * @param snapshot another snapshot
+ * @return this builder
+ */
+ public Builder snapshot(BlobStoreSnapshot snapshot) {
+ name = snapshot.name();
+ indices = snapshot.indices();
+ version = snapshot.version();
+ reason = snapshot.reason();
+ state = snapshot.state();
+ startTime = snapshot.startTime();
+ endTime = snapshot.endTime();
+ totalShard = snapshot.totalShard();
+ successfulShards = snapshot.successfulShards();
+ shardFailures = snapshot.shardFailures();
+ return this;
+ }
+
+ /**
+ * Sets snapshot name
+ *
+ * @param name snapshot name
+ * @return this builder
+ */
+ public Builder name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ /**
+ * Sets list of indices in the snapshot
+ *
+ * @param indices list of indices
+ * @return this builder
+ */
+ public Builder indices(Collection<String> indices) {
+ this.indices = ImmutableList.copyOf(indices);
+ return this;
+ }
+
+ /**
+ * Sets list of indices in the snapshot
+ *
+ * @param indices list of indices
+ * @return this builder
+ */
+ public Builder indices(String[] indices) {
+ this.indices = ImmutableList.copyOf(indices);
+ return this;
+ }
+
+ /**
+ * Sets snapshot state
+ *
+ * @param state snapshot state
+ * @return this builder
+ */
+ public Builder state(SnapshotState state) {
+ this.state = state;
+ return this;
+ }
+
+ /**
+ * Sets snapshot failure reason
+ *
+ * @param reason snapshot failure reason
+ * @return this builder
+ */
+ public Builder reason(String reason) {
+ this.reason = reason;
+ return this;
+ }
+
+ /**
+ * Marks snapshot as successful
+ *
+ * @return this builder
+ */
+ public Builder success() {
+ this.state = SnapshotState.SUCCESS;
+ return this;
+ }
+
+ /**
+ * Marks snapshot as failed and saves failure reason
+ *
+ * @param reason failure reason
+ * @return this builder
+ */
+ public Builder failed(String reason) {
+ this.state = SnapshotState.FAILED;
+ this.reason = reason;
+ return this;
+ }
+
+ /**
+ * Sets version of Elasticsearch that created this snapshot
+ *
+ * @param version version
+ * @return this builder
+ */
+ public Builder version(Version version) {
+ this.version = version;
+ return this;
+ }
+
+ /**
+ * Sets snapshot start time
+ *
+ * @param startTime snapshot start time
+ * @return this builder
+ */
+ public Builder startTime(long startTime) {
+ this.startTime = startTime;
+ return this;
+ }
+
+ /**
+ * Sets snapshot end time
+ *
+ * @param endTime snapshot end time
+ * @return this builder
+ */
+ public Builder endTime(long endTime) {
+ this.endTime = endTime;
+ return this;
+ }
+
+ /**
+ * Sets total number of shards across all snapshot indices
+ *
+ * @param totalShard number of shards
+ * @return this builder
+ */
+ public Builder totalShard(int totalShard) {
+ this.totalShard = totalShard;
+ return this;
+ }
+
+ /**
+ * Sets total number fo shards that were successfully snapshotted
+ *
+ * @param successfulShards number of successful shards
+ * @return this builder
+ */
+ public Builder successfulShards(int successfulShards) {
+ this.successfulShards = successfulShards;
+ return this;
+ }
+
+ /**
+ * Sets the list of individual shard failures
+ *
+ * @param shardFailures list of shard failures
+ * @return this builder
+ */
+ public Builder shardFailures(ImmutableList<SnapshotShardFailure> shardFailures) {
+ this.shardFailures = shardFailures;
+ return this;
+ }
+
+ /**
+ * Sets the total number of shards and the list of individual shard failures
+ *
+ * @param totalShard number of shards
+ * @param shardFailures list of shard failures
+ * @return this builder
+ */
+ public Builder failures(int totalShard, ImmutableList<SnapshotShardFailure> shardFailures) {
+ this.totalShard = totalShard;
+ this.successfulShards = totalShard - shardFailures.size();
+ this.shardFailures = shardFailures;
+ return this;
+ }
+
+ /**
+ * Builds new BlobStoreSnapshot
+ *
+ * @return
+ */
+ public BlobStoreSnapshot build() {
+ return new BlobStoreSnapshot(name, indices, state, reason, version, startTime, endTime, totalShard, successfulShards, shardFailures);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot");
+ static final XContentBuilderString NAME = new XContentBuilderString("name");
+ static final XContentBuilderString VERSION_ID = new XContentBuilderString("version_id");
+ static final XContentBuilderString INDICES = new XContentBuilderString("indices");
+ static final XContentBuilderString STATE = new XContentBuilderString("state");
+ static final XContentBuilderString REASON = new XContentBuilderString("reason");
+ static final XContentBuilderString START_TIME = new XContentBuilderString("start_time");
+ static final XContentBuilderString END_TIME = new XContentBuilderString("end_time");
+ static final XContentBuilderString TOTAL_SHARDS = new XContentBuilderString("total_shards");
+ static final XContentBuilderString SUCCESSFUL_SHARDS = new XContentBuilderString("successful_shards");
+ static final XContentBuilderString FAILURES = new XContentBuilderString("failures");
+ }
+
+ /**
+ * Serializes the snapshot
+ *
+ * @param snapshot snapshot to be serialized
+ * @param builder XContent builder
+ * @param params serialization parameters
+ * @throws IOException
+ */
+ public static void toXContent(BlobStoreSnapshot snapshot, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject(Fields.SNAPSHOT);
+ builder.field(Fields.NAME, snapshot.name);
+ builder.field(Fields.VERSION_ID, snapshot.version.id);
+ builder.startArray(Fields.INDICES);
+ for (String index : snapshot.indices) {
+ builder.value(index);
+ }
+ builder.endArray();
+ builder.field(Fields.STATE, snapshot.state);
+ if (snapshot.reason != null) {
+ builder.field(Fields.REASON, snapshot.reason);
+ }
+ builder.field(Fields.START_TIME, snapshot.startTime);
+ builder.field(Fields.END_TIME, snapshot.endTime);
+ builder.field(Fields.TOTAL_SHARDS, snapshot.totalShard);
+ builder.field(Fields.SUCCESSFUL_SHARDS, snapshot.successfulShards);
+ builder.startArray(Fields.FAILURES);
+ for (SnapshotShardFailure shardFailure : snapshot.shardFailures) {
+ SnapshotShardFailure.toXContent(shardFailure, builder, params);
+ }
+ builder.endArray();
+ builder.endObject();
+ }
+
+ /**
+ * Parses the snapshot
+ *
+ * @param parser XContent parser
+ * @return snapshot
+ * @throws IOException
+ */
+ public static BlobStoreSnapshot fromXContent(XContentParser parser) throws IOException {
+ Builder builder = new Builder();
+
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ String currentFieldName = parser.currentName();
+ if ("snapshot".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ token = parser.nextToken();
+ if (token.isValue()) {
+ if ("name".equals(currentFieldName)) {
+ builder.name(parser.text());
+ } else if ("state".equals(currentFieldName)) {
+ builder.state(SnapshotState.valueOf(parser.text()));
+ } else if ("reason".equals(currentFieldName)) {
+ builder.reason(parser.text());
+ } else if ("start_time".equals(currentFieldName)) {
+ builder.startTime(parser.longValue());
+ } else if ("end_time".equals(currentFieldName)) {
+ builder.endTime(parser.longValue());
+ } else if ("total_shards".equals(currentFieldName)) {
+ builder.totalShard(parser.intValue());
+ } else if ("successful_shards".equals(currentFieldName)) {
+ builder.successfulShards(parser.intValue());
+ } else if ("version_id".equals(currentFieldName)) {
+ builder.version(Version.fromId(parser.intValue()));
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("indices".equals(currentFieldName)) {
+ ArrayList<String> indices = new ArrayList<String>();
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ indices.add(parser.text());
+ }
+ builder.indices(indices);
+ } else if ("failures".equals(currentFieldName)) {
+ ArrayList<SnapshotShardFailure> failures = new ArrayList<SnapshotShardFailure>();
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ failures.add(SnapshotShardFailure.fromXContent(parser));
+ }
+ builder.shardFailures(ImmutableList.copyOf(failures));
+ } else {
+ // It was probably created by newer version - ignoring
+ parser.skipChildren();
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ // It was probably created by newer version - ignoring
+ parser.skipChildren();
+ }
+ }
+ }
+ }
+ }
+ return builder.build();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
new file mode 100644
index 0000000..eae5972
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.fs;
+
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.blobstore.fs.FsBlobStore;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.repositories.RepositoryException;
+import org.elasticsearch.repositories.RepositoryName;
+import org.elasticsearch.repositories.RepositorySettings;
+import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Shared file system implementation of the BlobStoreRepository
+ * <p/>
+ * Shared file system repository supports the following settings
+ * <dl>
+ * <dt>{@code location}</dt><dd>Path to the root of repository. This is mandatory parameter.</dd>
+ * <dt>{@code concurrent_streams}</dt><dd>Number of concurrent read/write stream (per repository on each node). Defaults to 5.</dd>
+ * <dt>{@code chunk_size}</dt><dd>Large file can be divided into chunks. This parameter specifies the chunk size. Defaults to not chucked.</dd>
+ * <dt>{@code compress}</dt><dd>If set to true metadata files will be stored compressed. Defaults to false.</dd>
+ * </ol>
+ */
+public class FsRepository extends BlobStoreRepository {
+
+ public final static String TYPE = "fs";
+
+ private final FsBlobStore blobStore;
+
+ private ByteSizeValue chunkSize;
+
+ private final BlobPath basePath;
+
+ private boolean compress;
+
+ /**
+ * Constructs new shared file system repository
+ *
+ * @param name repository name
+ * @param repositorySettings repository settings
+ * @param indexShardRepository index shard repository
+ * @throws IOException
+ */
+ @Inject
+ public FsRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
+ super(name.getName(), repositorySettings, indexShardRepository);
+ File locationFile;
+ String location = repositorySettings.settings().get("location", componentSettings.get("location"));
+ if (location == null) {
+ logger.warn("using local fs location for gateway, should be changed to be a shared location across nodes");
+ throw new RepositoryException(name.name(), "missing location");
+ } else {
+ locationFile = new File(location);
+ }
+ int concurrentStreams = repositorySettings.settings().getAsInt("concurrent_streams", componentSettings.getAsInt("concurrent_streams", 5));
+ ExecutorService concurrentStreamPool = EsExecutors.newScaling(1, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[fs_stream]"));
+ blobStore = new FsBlobStore(componentSettings, concurrentStreamPool, locationFile);
+ this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", componentSettings.getAsBytesSize("chunk_size", null));
+ this.compress = repositorySettings.settings().getAsBoolean("compress", componentSettings.getAsBoolean("compress", false));
+ this.basePath = BlobPath.cleanPath();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected BlobStore blobStore() {
+ return blobStore;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected boolean isCompress() {
+ return compress;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected ByteSizeValue chunkSize() {
+ return chunkSize;
+ }
+
+ @Override
+ protected BlobPath basePath() {
+ return basePath;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/fs/FsRepositoryModule.java b/src/main/java/org/elasticsearch/repositories/fs/FsRepositoryModule.java
new file mode 100644
index 0000000..2b8a4f4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/fs/FsRepositoryModule.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.fs;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
+import org.elasticsearch.repositories.Repository;
+
+/**
+ * File system repository module
+ */
+public class FsRepositoryModule extends AbstractModule {
+
+ public FsRepositoryModule() {
+ super();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void configure() {
+ bind(Repository.class).to(FsRepository.class).asEagerSingleton();
+ bind(IndexShardRepository.class).to(BlobStoreIndexShardRepository.class).asEagerSingleton();
+ }
+
+}
+
diff --git a/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java b/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java
new file mode 100644
index 0000000..e934194
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.uri;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.blobstore.url.URLBlobStore;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.repositories.RepositoryException;
+import org.elasticsearch.repositories.RepositoryName;
+import org.elasticsearch.repositories.RepositorySettings;
+import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
+
+import java.io.IOException;
+import java.net.URL;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Read-only URL-based implementation of the BlobStoreRepository
+ * <p/>
+ * This repository supports the following settings
+ * <dl>
+ * <dt>{@code url}</dt><dd>URL to the root of repository. This is mandatory parameter.</dd>
+ * <dt>{@code concurrent_streams}</dt><dd>Number of concurrent read/write stream (per repository on each node). Defaults to 5.</dd>
+ * </ol>
+ */
+public class URLRepository extends BlobStoreRepository {
+
+ public final static String TYPE = "url";
+
+ private final URLBlobStore blobStore;
+
+ private final BlobPath basePath;
+
+ private boolean listDirectories;
+
+ /**
+ * Constructs new read-only URL-based repository
+ *
+ * @param name repository name
+ * @param repositorySettings repository settings
+ * @param indexShardRepository shard repository
+ * @throws IOException
+ */
+ @Inject
+ public URLRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
+ super(name.getName(), repositorySettings, indexShardRepository);
+ URL url;
+ String path = repositorySettings.settings().get("url", componentSettings.get("url"));
+ if (path == null) {
+ throw new RepositoryException(name.name(), "missing url");
+ } else {
+ url = new URL(path);
+ }
+ int concurrentStreams = repositorySettings.settings().getAsInt("concurrent_streams", componentSettings.getAsInt("concurrent_streams", 5));
+ ExecutorService concurrentStreamPool = EsExecutors.newScaling(1, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[fs_stream]"));
+ listDirectories = repositorySettings.settings().getAsBoolean("list_directories", componentSettings.getAsBoolean("list_directories", true));
+ blobStore = new URLBlobStore(componentSettings, concurrentStreamPool, url);
+ basePath = BlobPath.cleanPath();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected BlobStore blobStore() {
+ return blobStore;
+ }
+
+ @Override
+ protected BlobPath basePath() {
+ return basePath;
+ }
+
+ @Override
+ public ImmutableList<SnapshotId> snapshots() {
+ if (listDirectories) {
+ return super.snapshots();
+ } else {
+ try {
+ return readSnapshotList();
+ } catch (IOException ex) {
+ throw new RepositoryException(repositoryName, "failed to get snapshot list in repository", ex);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/repositories/uri/URLRepositoryModule.java b/src/main/java/org/elasticsearch/repositories/uri/URLRepositoryModule.java
new file mode 100644
index 0000000..949a1b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/repositories/uri/URLRepositoryModule.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.uri;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
+import org.elasticsearch.repositories.Repository;
+
+/**
+ * URL repository module
+ */
+public class URLRepositoryModule extends AbstractModule {
+
+ public URLRepositoryModule() {
+ super();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void configure() {
+ bind(Repository.class).to(URLRepository.class).asEagerSingleton();
+ bind(IndexShardRepository.class).to(BlobStoreIndexShardRepository.class).asEagerSingleton();
+ }
+
+}
+
diff --git a/src/main/java/org/elasticsearch/rest/AbstractRestResponse.java b/src/main/java/org/elasticsearch/rest/AbstractRestResponse.java
new file mode 100644
index 0000000..46ff821
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/AbstractRestResponse.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+
+/**
+ *
+ */
+public abstract class AbstractRestResponse implements RestResponse {
+
+ Map<String, List<String>> customHeaders;
+
+ @Override
+ public byte[] prefixContent() {
+ return null;
+ }
+
+ @Override
+ public int prefixContentLength() {
+ return -1;
+ }
+
+ @Override
+ public int prefixContentOffset() {
+ return 0;
+ }
+
+ @Override
+ public byte[] suffixContent() {
+ return null;
+ }
+
+ @Override
+ public int suffixContentLength() {
+ return -1;
+ }
+
+ @Override
+ public int suffixContentOffset() {
+ return 0;
+ }
+
+ @Override
+ public void addHeader(String name, String value) {
+ if (customHeaders == null) {
+ customHeaders = new HashMap<String, List<String>>(2);
+ }
+ List<String> header = customHeaders.get(name);
+ if (header == null) {
+ header = new ArrayList<String>();
+ customHeaders.put(name, header);
+ }
+ header.add(value);
+ }
+
+ @Override
+ public Map<String, List<String>> getHeaders() {
+ return customHeaders;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/AbstractRestResponseActionListener.java b/src/main/java/org/elasticsearch/rest/AbstractRestResponseActionListener.java
new file mode 100644
index 0000000..4a9da12
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/AbstractRestResponseActionListener.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.logging.ESLogger;
+
+import java.io.IOException;
+
+/**
+ */
+public abstract class AbstractRestResponseActionListener<T extends ActionResponse> implements ActionListener<T> {
+
+ protected final RestChannel channel;
+ protected final RestRequest request;
+ protected final ESLogger logger;
+
+ public AbstractRestResponseActionListener(final RestRequest request, final RestChannel channel, final ESLogger logger) {
+ this.request = request;
+ this.channel = channel;
+ this.logger = logger;
+ }
+
+ @Override
+ public abstract void onResponse(T t);
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/AcknowledgedRestResponseActionListener.java b/src/main/java/org/elasticsearch/rest/AcknowledgedRestResponseActionListener.java
new file mode 100644
index 0000000..e7d6aea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/AcknowledgedRestResponseActionListener.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest;
+
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ */
+public class AcknowledgedRestResponseActionListener<T extends AcknowledgedResponse> extends AbstractRestResponseActionListener<T> {
+
+ public AcknowledgedRestResponseActionListener(RestRequest request, RestChannel channel, ESLogger logger) {
+ super(request, channel, logger);
+ }
+
+ @Override
+ public void onResponse(T response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject()
+ .field(Fields.ACKNOWLEDGED, response.isAcknowledged());
+ addCustomFields(builder, response);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (IOException e) {
+ onFailure(e);
+ }
+ }
+
+ /**
+ * Adds api specific fields to the rest response
+ * Does nothing by default but can be overridden by subclasses
+ */
+ protected void addCustomFields(XContentBuilder builder, T response) throws IOException {
+
+ }
+
+ static final class Fields {
+ static final XContentBuilderString ACKNOWLEDGED = new XContentBuilderString("acknowledged");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/src/main/java/org/elasticsearch/rest/BaseRestHandler.java
new file mode 100644
index 0000000..4d024e3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/BaseRestHandler.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public abstract class BaseRestHandler extends AbstractComponent implements RestHandler {
+
+ protected final Client client;
+
+ protected BaseRestHandler(Settings settings, Client client) {
+ super(settings);
+ this.client = client;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/src/main/java/org/elasticsearch/rest/BytesRestResponse.java
new file mode 100644
index 0000000..ca23412
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/BytesRestResponse.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import java.io.IOException;
+
+public class BytesRestResponse extends AbstractRestResponse {
+
+ private final byte[] bytes;
+
+ private final String contentType;
+
+ public BytesRestResponse(byte[] bytes, String contentType) {
+ this.bytes = bytes;
+ this.contentType = contentType;
+ }
+
+ @Override
+ public boolean contentThreadSafe() {
+ return true;
+ }
+
+ @Override
+ public String contentType() {
+ return contentType;
+ }
+
+ @Override
+ public byte[] content() throws IOException {
+ return bytes;
+ }
+
+ @Override
+ public int contentLength() throws IOException {
+ return bytes.length;
+ }
+
+ @Override
+ public int contentOffset() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.OK;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/HttpRedirectRestResponse.java b/src/main/java/org/elasticsearch/rest/HttpRedirectRestResponse.java
new file mode 100644
index 0000000..0229448
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/HttpRedirectRestResponse.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+/**
+ * Redirect to another web page
+ */
+public class HttpRedirectRestResponse extends StringRestResponse {
+
+ public HttpRedirectRestResponse(String url) {
+ super(RestStatus.MOVED_PERMANENTLY, "<head><meta http-equiv=\"refresh\" content=\"0; URL="+url+"\"></head>");
+ }
+
+ @Override
+ public String contentType() {
+ return "text/html";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/RestChannel.java b/src/main/java/org/elasticsearch/rest/RestChannel.java
new file mode 100644
index 0000000..85345db
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/RestChannel.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+/**
+ *
+ */
+public interface RestChannel {
+
+ void sendResponse(RestResponse response);
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/RestController.java b/src/main/java/org/elasticsearch/rest/RestController.java
new file mode 100644
index 0000000..c90f006
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/RestController.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.path.PathTrie;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.support.RestUtils;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ *
+ */
+public class RestController extends AbstractLifecycleComponent<RestController> {
+
+ private final PathTrie<RestHandler> getHandlers = new PathTrie<RestHandler>(RestUtils.REST_DECODER);
+ private final PathTrie<RestHandler> postHandlers = new PathTrie<RestHandler>(RestUtils.REST_DECODER);
+ private final PathTrie<RestHandler> putHandlers = new PathTrie<RestHandler>(RestUtils.REST_DECODER);
+ private final PathTrie<RestHandler> deleteHandlers = new PathTrie<RestHandler>(RestUtils.REST_DECODER);
+ private final PathTrie<RestHandler> headHandlers = new PathTrie<RestHandler>(RestUtils.REST_DECODER);
+ private final PathTrie<RestHandler> optionsHandlers = new PathTrie<RestHandler>(RestUtils.REST_DECODER);
+
+ private final RestHandlerFilter handlerFilter = new RestHandlerFilter();
+
+ // non volatile since the assumption is that pre processors are registered on startup
+ private RestFilter[] filters = new RestFilter[0];
+
+ @Inject
+ public RestController(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ for (RestFilter filter : filters) {
+ filter.close();
+ }
+ }
+
+ /**
+ * Registers a pre processor to be executed before the rest request is actually handled.
+ */
+ public synchronized void registerFilter(RestFilter preProcessor) {
+ RestFilter[] copy = new RestFilter[filters.length + 1];
+ System.arraycopy(filters, 0, copy, 0, filters.length);
+ copy[filters.length] = preProcessor;
+ Arrays.sort(copy, new Comparator<RestFilter>() {
+ @Override
+ public int compare(RestFilter o1, RestFilter o2) {
+ return o2.order() - o1.order();
+ }
+ });
+ filters = copy;
+ }
+
+ /**
+ * Registers a rest handler to be execute when the provided method and path match the request.
+ */
+ public void registerHandler(RestRequest.Method method, String path, RestHandler handler) {
+ switch (method) {
+ case GET:
+ getHandlers.insert(path, handler);
+ break;
+ case DELETE:
+ deleteHandlers.insert(path, handler);
+ break;
+ case POST:
+ postHandlers.insert(path, handler);
+ break;
+ case PUT:
+ putHandlers.insert(path, handler);
+ break;
+ case OPTIONS:
+ optionsHandlers.insert(path, handler);
+ break;
+ case HEAD:
+ headHandlers.insert(path, handler);
+ break;
+ default:
+ throw new ElasticsearchIllegalArgumentException("Can't handle [" + method + "] for path [" + path + "]");
+ }
+ }
+
+ /**
+ * Returns a filter chain (if needed) to execute. If this method returns null, simply execute
+ * as usual.
+ */
+ @Nullable
+ public RestFilterChain filterChainOrNull(RestFilter executionFilter) {
+ if (filters.length == 0) {
+ return null;
+ }
+ return new ControllerFilterChain(executionFilter);
+ }
+
+ /**
+ * Returns a filter chain with the final filter being the provided filter.
+ */
+ public RestFilterChain filterChain(RestFilter executionFilter) {
+ return new ControllerFilterChain(executionFilter);
+ }
+
+ public void dispatchRequest(final RestRequest request, final RestChannel channel) {
+ if (filters.length == 0) {
+ try {
+ executeHandler(request, channel);
+ } catch (Exception e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response for uri [" + request.uri() + "]", e1);
+ }
+ }
+ } else {
+ ControllerFilterChain filterChain = new ControllerFilterChain(handlerFilter);
+ filterChain.continueProcessing(request, channel);
+ }
+ }
+
+ void executeHandler(RestRequest request, RestChannel channel) {
+ final RestHandler handler = getHandler(request);
+ if (handler != null) {
+ handler.handleRequest(request, channel);
+ } else {
+ if (request.method() == RestRequest.Method.OPTIONS) {
+ // when we have OPTIONS request, simply send OK by default (with the Access Control Origin header which gets automatically added)
+ StringRestResponse response = new StringRestResponse(OK);
+ channel.sendResponse(response);
+ } else {
+ channel.sendResponse(new StringRestResponse(BAD_REQUEST, "No handler found for uri [" + request.uri() + "] and method [" + request.method() + "]"));
+ }
+ }
+ }
+
+ private RestHandler getHandler(RestRequest request) {
+ String path = getPath(request);
+ RestRequest.Method method = request.method();
+ if (method == RestRequest.Method.GET) {
+ return getHandlers.retrieve(path, request.params());
+ } else if (method == RestRequest.Method.POST) {
+ return postHandlers.retrieve(path, request.params());
+ } else if (method == RestRequest.Method.PUT) {
+ return putHandlers.retrieve(path, request.params());
+ } else if (method == RestRequest.Method.DELETE) {
+ return deleteHandlers.retrieve(path, request.params());
+ } else if (method == RestRequest.Method.HEAD) {
+ return headHandlers.retrieve(path, request.params());
+ } else if (method == RestRequest.Method.OPTIONS) {
+ return optionsHandlers.retrieve(path, request.params());
+ } else {
+ return null;
+ }
+ }
+
+ private String getPath(RestRequest request) {
+ // we use rawPath since we don't want to decode it while processing the path resolution
+ // so we can handle things like:
+ // my_index/my_type/http%3A%2F%2Fwww.google.com
+ return request.rawPath();
+ }
+
+ class ControllerFilterChain implements RestFilterChain {
+
+ private final RestFilter executionFilter;
+
+ private volatile int index;
+
+ ControllerFilterChain(RestFilter executionFilter) {
+ this.executionFilter = executionFilter;
+ }
+
+ @Override
+ public void continueProcessing(RestRequest request, RestChannel channel) {
+ try {
+ int loc = index;
+ index++;
+ if (loc > filters.length) {
+ throw new ElasticsearchIllegalStateException("filter continueProcessing was called more than expected");
+ } else if (loc == filters.length) {
+ executionFilter.process(request, channel, this);
+ } else {
+ RestFilter preProcessor = filters[loc];
+ preProcessor.process(request, channel, this);
+ }
+ } catch (Exception e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response for uri [" + request.uri() + "]", e1);
+ }
+ }
+ }
+ }
+
+ class RestHandlerFilter extends RestFilter {
+
+ @Override
+ public void process(RestRequest request, RestChannel channel, RestFilterChain filterChain) {
+ executeHandler(request, channel);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/RestFilter.java b/src/main/java/org/elasticsearch/rest/RestFilter.java
new file mode 100644
index 0000000..d9066f4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/RestFilter.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.CloseableComponent;
+
+/**
+ * A filter allowing to filter rest operations.
+ */
+public abstract class RestFilter implements CloseableComponent {
+
+ /**
+ * Optionally, the order of the filter. Execution is done from lowest value to highest.
+ * It is a good practice to allow to configure this for the relevant filter.
+ */
+ public int order() {
+ return 0;
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ // a no op
+ }
+
+ /**
+ * Process the rest request. Using the channel to send a response, or the filter chain to continue
+ * processing the request.
+ */
+ public abstract void process(RestRequest request, RestChannel channel, RestFilterChain filterChain);
+}
diff --git a/src/main/java/org/elasticsearch/rest/RestFilterChain.java b/src/main/java/org/elasticsearch/rest/RestFilterChain.java
new file mode 100644
index 0000000..be14d25
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/RestFilterChain.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+/**
+ * A filter chain allowing to continue and process the rest request.
+ */
+public interface RestFilterChain {
+
+ /**
+ * Continue processing the request. Should only be called if a response has not been sent
+ * through the channel.
+ */
+ void continueProcessing(RestRequest request, RestChannel channel);
+}
diff --git a/src/main/java/org/elasticsearch/rest/RestHandler.java b/src/main/java/org/elasticsearch/rest/RestHandler.java
new file mode 100644
index 0000000..c68e6b6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/RestHandler.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+/**
+ *
+ */
+public interface RestHandler {
+
+ void handleRequest(RestRequest request, RestChannel channel);
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/RestModule.java b/src/main/java/org/elasticsearch/rest/RestModule.java
new file mode 100644
index 0000000..a5d6e1b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/RestModule.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.action.RestActionModule;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class RestModule extends AbstractModule {
+
+ private final Settings settings;
+ private List<Class<? extends BaseRestHandler>> restPluginsActions = Lists.newArrayList();
+
+ public void addRestAction(Class<? extends BaseRestHandler> restAction) {
+ restPluginsActions.add(restAction);
+ }
+
+ public RestModule(Settings settings) {
+ this.settings = settings;
+ }
+
+
+ @Override
+ protected void configure() {
+ bind(RestController.class).asEagerSingleton();
+ new RestActionModule(restPluginsActions).configure(binder());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/RestRequest.java b/src/main/java/org/elasticsearch/rest/RestRequest.java
new file mode 100644
index 0000000..d45013e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/RestRequest.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.rest.support.RestUtils;
+
+import java.net.SocketAddress;
+import java.util.Map;
+
+import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
+import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
+
+/**
+ *
+ */
+public abstract class RestRequest implements ToXContent.Params {
+
+ public enum Method {
+ GET, POST, PUT, DELETE, OPTIONS, HEAD
+ }
+
+ public abstract Method method();
+
+ /**
+ * The uri of the rest request, with the query string.
+ */
+ public abstract String uri();
+
+ /**
+ * The non decoded, raw path provided.
+ */
+ public abstract String rawPath();
+
+ /**
+ * The path part of the URI (without the query string), decoded.
+ */
+ public final String path() {
+ return RestUtils.decodeComponent(rawPath());
+ }
+
+ public abstract boolean hasContent();
+
+ /**
+ * Is the byte array content safe or unsafe for usage on other threads
+ */
+ public abstract boolean contentUnsafe();
+
+ public abstract BytesReference content();
+
+ public abstract String header(String name);
+
+ public abstract Iterable<Map.Entry<String, String>> headers();
+
+ @Nullable
+ public SocketAddress getRemoteAddress() {
+ return null;
+ }
+
+ @Nullable
+ public SocketAddress getLocalAddress() {
+ return null;
+ }
+
+ public abstract boolean hasParam(String key);
+
+ @Override
+ public abstract String param(String key);
+
+ public abstract Map<String, String> params();
+
+ public float paramAsFloat(String key, float defaultValue) {
+ String sValue = param(key);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Float.parseFloat(sValue);
+ } catch (NumberFormatException e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to parse float parameter [" + key + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ public int paramAsInt(String key, int defaultValue) {
+ String sValue = param(key);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Integer.parseInt(sValue);
+ } catch (NumberFormatException e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ public long paramAsLong(String key, long defaultValue) {
+ String sValue = param(key);
+ if (sValue == null) {
+ return defaultValue;
+ }
+ try {
+ return Long.parseLong(sValue);
+ } catch (NumberFormatException e) {
+ throw new ElasticsearchIllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e);
+ }
+ }
+
+ @Override
+ public boolean paramAsBoolean(String key, boolean defaultValue) {
+ return Booleans.parseBoolean(param(key), defaultValue);
+ }
+
+ @Override
+ public Boolean paramAsBoolean(String key, Boolean defaultValue) {
+ return Booleans.parseBoolean(param(key), defaultValue);
+ }
+
+ @Override @Deprecated
+ public Boolean paramAsBooleanOptional(String key, Boolean defaultValue) {
+ return paramAsBoolean(key, defaultValue);
+ }
+
+ public TimeValue paramAsTime(String key, TimeValue defaultValue) {
+ return parseTimeValue(param(key), defaultValue);
+ }
+
+ public ByteSizeValue paramAsSize(String key, ByteSizeValue defaultValue) {
+ return parseBytesSizeValue(param(key), defaultValue);
+ }
+
+ public String[] paramAsStringArray(String key, String[] defaultValue) {
+ String value = param(key);
+ if (value == null) {
+ return defaultValue;
+ }
+ return Strings.splitStringByCommaToArray(value);
+ }
+
+ public String[] paramAsStringArrayOrEmptyIfAll(String key) {
+ String[] params = paramAsStringArray(key, Strings.EMPTY_ARRAY);
+ if (Strings.isAllOrWildcard(params)) {
+ return Strings.EMPTY_ARRAY;
+ }
+ return params;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/rest/RestResponse.java b/src/main/java/org/elasticsearch/rest/RestResponse.java
new file mode 100644
index 0000000..4b3ca6a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/RestResponse.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public interface RestResponse {
+
+ /**
+ * Can the content byte[] be used only with this thread (<tt>false</tt>), or by any thread (<tt>true</tt>).
+ */
+ boolean contentThreadSafe();
+
+ String contentType();
+
+ /**
+ * Returns the actual content. Note, use {@link #contentLength()} in order to know the
+ * content length of the byte array.
+ */
+ byte[] content() throws IOException;
+
+ /**
+ * The content length.
+ */
+ int contentLength() throws IOException;
+
+ int contentOffset() throws IOException;
+
+ byte[] prefixContent();
+
+ int prefixContentLength();
+
+ int prefixContentOffset();
+
+ byte[] suffixContent();
+
+ int suffixContentLength();
+
+ int suffixContentOffset();
+
+ RestStatus status();
+
+ void addHeader(String name, String value);
+
+ /**
+ * @return The custom headers or null if none have been set
+ */
+ Map<String, List<String>> getHeaders();
+}
diff --git a/src/main/java/org/elasticsearch/rest/RestStatus.java b/src/main/java/org/elasticsearch/rest/RestStatus.java
new file mode 100644
index 0000000..e5a6663
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/RestStatus.java
@@ -0,0 +1,493 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+public enum RestStatus {
+ /**
+ * The client SHOULD continue with its request. This interim response is used to inform the client that the
+ * initial part of the request has been received and has not yet been rejected by the server. The client
+ * SHOULD continue by sending the remainder of the request or, if the request has already been completed,
+ * ignore this response. The server MUST send a final response after the request has been completed.
+ */
+ CONTINUE(100),
+ /**
+ * The server understands and is willing to comply with the client's request, via the Upgrade message header field
+ * (section 14.42), for a change in the application protocol being used on this connection. The server will
+ * switch protocols to those defined by the response's Upgrade header field immediately after the empty line
+ * which terminates the 101 response.
+ */
+ SWITCHING_PROTOCOLS(101),
+ /**
+ * The request has succeeded. The information returned with the response is dependent on the method
+ * used in the request, for example:
+ * <ul>
+ * <li>GET: an entity corresponding to the requested resource is sent in the response;</li>
+ * <li>HEAD: the entity-header fields corresponding to the requested resource are sent in the response without any message-body;</li>
+ * <li>POST: an entity describing or containing the result of the action;</li>
+ * <li>TRACE: an entity containing the request message as received by the end server.</li>
+ * </ul>
+ */
+ OK(200),
+ /**
+ * The request has been fulfilled and resulted in a new resource being created. The newly created resource can
+ * be referenced by the URI(s) returned in the entity of the response, with the most specific URI for the
+ * resource given by a Location header field. The response SHOULD include an entity containing a list of resource
+ * characteristics and location(s) from which the user or user agent can choose the one most appropriate. The
+ * entity format is specified by the media type given in the Content-Type header field. The origin server MUST
+ * create the resource before returning the 201 status code. If the action cannot be carried out immediately, the
+ * server SHOULD respond with 202 (Accepted) response instead.
+ * <p/>
+ * <p>A 201 response MAY contain an ETag response header field indicating the current value of the entity tag
+ * for the requested variant just created, see section 14.19.
+ */
+ CREATED(201),
+ /**
+ * The request has been accepted for processing, but the processing has not been completed. The request might
+ * or might not eventually be acted upon, as it might be disallowed when processing actually takes place. There
+ * is no facility for re-sending a status code from an asynchronous operation such as this.
+ * <p/>
+ * <p>The 202 response is intentionally non-committal. Its purpose is to allow a server to accept a request for
+ * some other process (perhaps a batch-oriented process that is only run once per day) without requiring that
+ * the user agent's connection to the server persist until the process is completed. The entity returned with
+ * this response SHOULD include an indication of the request's current status and either a pointer to a status
+ * monitor or some estimate of when the user can expect the request to be fulfilled.
+ */
+ ACCEPTED(202),
+ /**
+ * The returned meta information in the entity-header is not the definitive set as available from the origin
+ * server, but is gathered from a local or a third-party copy. The set presented MAY be a subset or super set
+ * of the original version. For example, including local annotation information about the resource might
+ * result in a super set of the meta information known by the origin server. Use of this response code
+ * is not required and is only appropriate when the response would otherwise be 200 (OK).
+ */
+ NON_AUTHORITATIVE_INFORMATION(203),
+ /**
+ * The server has fulfilled the request but does not need to return an entity-body, and might want to return
+ * updated meta information. The response MAY include new or updated meta information in the form of
+ * entity-headers, which if present SHOULD be associated with the requested variant.
+ * <p/>
+ * <p>If the client is a user agent, it SHOULD NOT change its document view from that which caused the request
+ * to be sent. This response is primarily intended to allow input for actions to take place without causing a
+ * change to the user agent's active document view, although any new or updated meta information SHOULD be
+ * applied to the document currently in the user agent's active view.
+ * <p/>
+ * <p>The 204 response MUST NOT include a message-body, and thus is always terminated by the first empty
+ * line after the header fields.
+ */
+ NO_CONTENT(204),
+ /**
+ * The server has fulfilled the request and the user agent SHOULD reset the document view which caused the
+ * request to be sent. This response is primarily intended to allow input for actions to take place via user
+ * input, followed by a clearing of the form in which the input is given so that the user can easily initiate
+ * another input action. The response MUST NOT include an entity.
+ */
+ RESET_CONTENT(205),
+ /**
+ * The server has fulfilled the partial GET request for the resource. The request MUST have included a Range
+ * header field (section 14.35) indicating the desired range, and MAY have included an If-Range header
+ * field (section 14.27) to make the request conditional.
+ * <p/>
+ * <p>The response MUST include the following header fields:
+ * <ul>
+ * <li>Either a Content-Range header field (section 14.16) indicating the range included with this response,
+ * or a multipart/byteranges Content-Type including Content-Range fields for each part. If a Content-Length
+ * header field is present in the response, its value MUST match the actual number of OCTETs transmitted in
+ * the message-body.</li>
+ * <li>Date</li>
+ * <li>ETag and/or Content-Location, if the header would have been sent in a 200 response to the same request</li>
+ * <li>Expires, Cache-Control, and/or Vary, if the field-value might differ from that sent in any previous
+ * response for the same variant</li>
+ * </ul>
+ * <p/>
+ * <p>If the 206 response is the result of an If-Range request that used a strong cache validator
+ * (see section 13.3.3), the response SHOULD NOT include other entity-headers. If the response is the result
+ * of an If-Range request that used a weak validator, the response MUST NOT include other entity-headers;
+ * this prevents inconsistencies between cached entity-bodies and updated headers. Otherwise, the response MUST
+ * include all of the entity-headers that would have been returned with a 200 (OK) response to the same request.
+ * <p/>
+ * <p>A cache MUST NOT combine a 206 response with other previously cached content if the ETag or Last-Modified
+ * headers do not match exactly, see 13.5.4.
+ * <p/>
+ * <p>A cache that does not support the Range and Content-Range headers MUST NOT cache 206 (Partial) responses.
+ */
+ PARTIAL_CONTENT(206),
+ /**
+ * The 207 (Multi-Status) status code provides status for multiple independent operations (see Section 13 for
+ * more information).
+ * <p/>
+ * <p>A Multi-Status response conveys information about multiple resources in situations where multiple status
+ * codes might be appropriate. The default Multi-Status response body is a text/xml or application/xml HTTP
+ * entity with a 'multistatus' root element. Further elements contain 200, 300, 400, and 500 series status codes
+ * generated during the method invocation. 100 series status codes SHOULD NOT be recorded in a 'response'
+ * XML element.
+ * <p/>
+ * <p>Although '207' is used as the overall response status code, the recipient needs to consult the contents
+ * of the multistatus response body for further information about the success or failure of the method execution.
+ * The response MAY be used in success, partial success and also in failure situations.
+ * <p/>
+ * <p>The 'multistatus' root element holds zero or more 'response' elements in any order, each with
+ * information about an individual resource. Each 'response' element MUST have an 'href' element
+ * to identify the resource.
+ */
+ MULTI_STATUS(207),
+ /**
+ * The requested resource corresponds to any one of a set of representations, each with its own specific
+ * location, and agent-driven negotiation information (section 12) is being provided so that the user (or user
+ * agent) can select a preferred representation and redirect its request to that location.
+ * <p/>
+ * <p>Unless it was a HEAD request, the response SHOULD include an entity containing a list of resource
+ * characteristics and location(s) from which the user or user agent can choose the one most appropriate.
+ * The entity format is specified by the media type given in the Content-Type header field. Depending upon the
+ * format and the capabilities of the user agent, selection of the most appropriate choice MAY be performed
+ * automatically. However, this specification does not define any standard for such automatic selection.
+ * <p/>
+ * <p>If the server has a preferred choice of representation, it SHOULD include the specific URI for that
+ * representation in the Location field; user agents MAY use the Location field value for automatic redirection.
+ * This response is cacheable unless indicated otherwise.
+ */
+ MULTIPLE_CHOICES(300),
+ /**
+ * The requested resource has been assigned a new permanent URI and any future references to this resource
+ * SHOULD use one of the returned URIs. Clients with link editing capabilities ought to automatically re-link
+ * references to the Request-URI to one or more of the new references returned by the server, where possible.
+ * This response is cacheable unless indicated otherwise.
+ * <p/>
+ * <p>The new permanent URI SHOULD be given by the Location field in the response. Unless the request method
+ * was HEAD, the entity of the response SHOULD contain a short hypertext note with a hyperlink to the new URI(s).
+ * <p/>
+ * <p>If the 301 status code is received in response to a request other than GET or HEAD, the user agent
+ * MUST NOT automatically redirect the request unless it can be confirmed by the user, since this might change
+ * the conditions under which the request was issued.
+ */
+ MOVED_PERMANENTLY(301),
+ /**
+ * The requested resource resides temporarily under a different URI. Since the redirection might be altered on
+ * occasion, the client SHOULD continue to use the Request-URI for future requests. This response is only
+ * cacheable if indicated by a Cache-Control or Expires header field.
+ * <p/>
+ * <p>The temporary URI SHOULD be given by the Location field in the response. Unless the request method was
+ * HEAD, the entity of the response SHOULD contain a short hypertext note with a hyperlink to the new URI(s).
+ * <p/>
+ * <p>If the 302 status code is received in response to a request other than GET or HEAD, the user agent
+ * MUST NOT automatically redirect the request unless it can be confirmed by the user, since this might change
+ * the conditions under which the request was issued.
+ */
+ FOUND(302),
+ /**
+ * The response to the request can be found under a different URI and SHOULD be retrieved using a GET method on
+ * that resource. This method exists primarily to allow the output of a POST-activated script to redirect the
+ * user agent to a selected resource. The new URI is not a substitute reference for the originally requested
+ * resource. The 303 response MUST NOT be cached, but the response to the second (redirected) request might be
+ * cacheable.
+ * <p/>
+ * <p>The different URI SHOULD be given by the Location field in the response. Unless the request method was
+ * HEAD, the entity of the response SHOULD contain a short hypertext note with a hyperlink to the new URI(s).
+ */
+ SEE_OTHER(303),
+ /**
+ * If the client has performed a conditional GET request and access is allowed, but the document has not been
+ * modified, the server SHOULD respond with this status code. The 304 response MUST NOT contain a message-body,
+ * and thus is always terminated by the first empty line after the header fields.
+ * <p/>
+ * <p>The response MUST include the following header fields:
+ * <ul>
+ * <li>Date, unless its omission is required by section 14.18.1
+ * If a clockless origin server obeys these rules, and proxies and clients add their own Date to any
+ * response received without one (as already specified by [RFC 2068], section 14.19), caches will operate
+ * correctly.
+ * </li>
+ * <li>ETag and/or Content-Location, if the header would have been sent in a 200 response to the same request</li>
+ * <li>Expires, Cache-Control, and/or Vary, if the field-value might differ from that sent in any previous
+ * response for the same variant</li>
+ * </ul>
+ * <p/>
+ * <p>If the conditional GET used a strong cache validator (see section 13.3.3), the response SHOULD NOT include
+ * other entity-headers. Otherwise (i.e., the conditional GET used a weak validator), the response MUST NOT
+ * include other entity-headers; this prevents inconsistencies between cached entity-bodies and updated headers.
+ * <p/>
+ * <p>If a 304 response indicates an entity not currently cached, then the cache MUST disregard the response
+ * and repeat the request without the conditional.
+ * <p/>
+ * <p>If a cache uses a received 304 response to update a cache entry, the cache MUST update the entry to
+ * reflect any new field values given in the response.
+ */
+ NOT_MODIFIED(304),
+ /**
+ * The requested resource MUST be accessed through the proxy given by the Location field. The Location field
+ * gives the URI of the proxy. The recipient is expected to repeat this single request via the proxy.
+ * 305 responses MUST only be generated by origin servers.
+ */
+ USE_PROXY(305),
+ /**
+ * The requested resource resides temporarily under a different URI. Since the redirection MAY be altered on
+ * occasion, the client SHOULD continue to use the Request-URI for future requests. This response is only
+ * cacheable if indicated by a Cache-Control or Expires header field.
+ * <p/>
+ * <p>The temporary URI SHOULD be given by the Location field in the response. Unless the request method was
+ * HEAD, the entity of the response SHOULD contain a short hypertext note with a hyperlink to the new URI(s) ,
+ * since many pre-HTTP/1.1 user agents do not understand the 307 status. Therefore, the note SHOULD contain
+ * the information necessary for a user to repeat the original request on the new URI.
+ * <p/>
+ * <p>If the 307 status code is received in response to a request other than GET or HEAD, the user agent MUST NOT
+ * automatically redirect the request unless it can be confirmed by the user, since this might change the
+ * conditions under which the request was issued.
+ */
+ TEMPORARY_REDIRECT(307),
+ /**
+ * The request could not be understood by the server due to malformed syntax. The client SHOULD NOT repeat the
+ * request without modifications.
+ */
+ BAD_REQUEST(400),
+ /**
+ * The request requires user authentication. The response MUST include a WWW-Authenticate header field
+ * (section 14.47) containing a challenge applicable to the requested resource. The client MAY repeat the request
+ * with a suitable Authorization header field (section 14.8). If the request already included Authorization
+ * credentials, then the 401 response indicates that authorization has been refused for those credentials.
+ * If the 401 response contains the same challenge as the prior response, and the user agent has already attempted
+ * authentication at least once, then the user SHOULD be presented the entity that was given in the response,
+ * since that entity might include relevant diagnostic information. HTTP access authentication is explained in
+ * "HTTP Authentication: Basic and Digest Access Authentication" [43].
+ */
+ UNAUTHORIZED(401),
+ /**
+ * This code is reserved for future use.
+ */
+ PAYMENT_REQUIRED(402),
+ /**
+ * The server understood the request, but is refusing to fulfill it. Authorization will not help and the request
+ * SHOULD NOT be repeated. If the request method was not HEAD and the server wishes to make public why the
+ * request has not been fulfilled, it SHOULD describe the reason for the refusal in the entity. If the server
+ * does not wish to make this information available to the client, the status code 404 (Not Found) can be used
+ * instead.
+ */
+ FORBIDDEN(403),
+ /**
+ * The server has not found anything matching the Request-URI. No indication is given of whether the condition
+ * is temporary or permanent. The 410 (Gone) status code SHOULD be used if the server knows, through some
+ * internally configurable mechanism, that an old resource is permanently unavailable and has no forwarding
+ * address. This status code is commonly used when the server does not wish to reveal exactly why the request
+ * has been refused, or when no other response is applicable.
+ */
+ NOT_FOUND(404),
+ /**
+ * The method specified in the Request-Line is not allowed for the resource identified by the Request-URI.
+ * The response MUST include an Allow header containing a list of valid methods for the requested resource.
+ */
+ METHOD_NOT_ALLOWED(405),
+ /**
+ * The resource identified by the request is only capable of generating response entities which have content
+ * characteristics not acceptable according to the accept headers sent in the request.
+ * <p/>
+ * <p>Unless it was a HEAD request, the response SHOULD include an entity containing a list of available entity
+ * characteristics and location(s) from which the user or user agent can choose the one most appropriate.
+ * The entity format is specified by the media type given in the Content-Type header field. Depending upon the
+ * format and the capabilities of the user agent, selection of the most appropriate choice MAY be performed
+ * automatically. However, this specification does not define any standard for such automatic selection.
+ * <p/>
+ * <p>Note: HTTP/1.1 servers are allowed to return responses which are not acceptable according to the accept
+ * headers sent in the request. In some cases, this may even be preferable to sending a 406 response. User
+ * agents are encouraged to inspect the headers of an incoming response to determine if it is acceptable.
+ * <p/>
+ * <p>If the response could be unacceptable, a user agent SHOULD temporarily stop receipt of more data and query
+ * the user for a decision on further actions.
+ */
+ NOT_ACCEPTABLE(406),
+ /**
+ * This code is similar to 401 (Unauthorized), but indicates that the client must first authenticate itself with
+ * the proxy. The proxy MUST return a Proxy-Authenticate header field (section 14.33) containing a challenge
+ * applicable to the proxy for the requested resource. The client MAY repeat the request with a suitable
+ * Proxy-Authorization header field (section 14.34). HTTP access authentication is explained in
+ * "HTTP Authentication: Basic and Digest Access Authentication" [43].
+ */
+ PROXY_AUTHENTICATION(407),
+ /**
+ * The client did not produce a request within the time that the server was prepared to wait. The client MAY
+ * repeat the request without modifications at any later time.
+ */
+ REQUEST_TIMEOUT(408),
+ /**
+ * The request could not be completed due to a conflict with the current state of the resource. This code is
+ * only allowed in situations where it is expected that the user might be able to resolve the conflict and
+ * resubmit the request. The response body SHOULD include enough information for the user to recognize the
+ * source of the conflict. Ideally, the response entity would include enough information for the user or user
+ * agent to fix the problem; however, that might not be possible and is not required.
+ * <p/>
+ * <p>Conflicts are most likely to occur in response to a PUT request. For example, if versioning were being
+ * used and the entity being PUT included changes to a resource which conflict with those made by an earlier
+ * (third-party) request, the server might use the 409 response to indicate that it can't complete the request.
+ * In this case, the response entity would likely contain a list of the differences between the two versions in
+ * a format defined by the response Content-Type.
+ */
+ CONFLICT(409),
+ /**
+ * The requested resource is no longer available at the server and no forwarding address is known. This condition
+ * is expected to be considered permanent. Clients with link editing capabilities SHOULD delete references to
+ * the Request-URI after user approval. If the server does not know, or has no facility to determine, whether or
+ * not the condition is permanent, the status code 404 (Not Found) SHOULD be used instead. This response is
+ * cacheable unless indicated otherwise.
+ * <p/>
+ * <p>The 410 response is primarily intended to assist the task of web maintenance by notifying the recipient
+ * that the resource is intentionally unavailable and that the server owners desire that remote links to that
+ * resource be removed. Such an event is common for limited-time, promotional services and for resources belonging
+ * to individuals no longer working at the server's site. It is not necessary to mark all permanently unavailable
+ * resources as "gone" or to keep the mark for any length of time -- that is left to the discretion of the server
+ * owner.
+ */
+ GONE(410),
+ /**
+ * The server refuses to accept the request without a defined Content-Length. The client MAY repeat the request
+ * if it adds a valid Content-Length header field containing the length of the message-body in the request message.
+ */
+ LENGTH_REQUIRED(411),
+ /**
+ * The precondition given in one or more of the request-header fields evaluated to false when it was tested on
+ * the server. This response code allows the client to place preconditions on the current resource metainformation
+ * (header field data) and thus prevent the requested method from being applied to a resource other than the one
+ * intended.
+ */
+ PRECONDITION_FAILED(412),
+ /**
+ * The server is refusing to process a request because the request entity is larger than the server is willing
+ * or able to process. The server MAY close the connection to prevent the client from continuing the request.
+ * <p/>
+ * <p>If the condition is temporary, the server SHOULD include a Retry-After header field to indicate that it
+ * is temporary and after what time the client MAY try again.
+ */
+ REQUEST_ENTITY_TOO_LARGE(413),
+ /**
+ * The server is refusing to service the request because the Request-URI is longer than the server is willing
+ * to interpret. This rare condition is only likely to occur when a client has improperly converted a POST
+ * request to a GET request with long query information, when the client has descended into a URI "black hole"
+ * of redirection (e.g., a redirected URI prefix that points to a suffix of itself), or when the server is
+ * under attack by a client attempting to exploit security holes present in some servers using fixed-length
+ * buffers for reading or manipulating the Request-URI.
+ */
+ REQUEST_URI_TOO_LONG(414),
+ /**
+ * The server is refusing to service the request because the entity of the request is in a format not supported
+ * by the requested resource for the requested method.
+ */
+ UNSUPPORTED_MEDIA_TYPE(415),
+ /**
+ * A server SHOULD return a response with this status code if a request included a Range request-header field
+ * (section 14.35), and none of the range-specifier values in this field overlap the current extent of the
+ * selected resource, and the request did not include an If-Range request-header field. (For byte-ranges, this
+ * means that the first-byte-pos of all of the byte-range-spec values were greater than the current length of
+ * the selected resource.)
+ * <p/>
+ * <p>When this status code is returned for a byte-range request, the response SHOULD include a Content-Range
+ * entity-header field specifying the current length of the selected resource (see section 14.16). This
+ * response MUST NOT use the multipart/byteranges content-type.
+ */
+ REQUESTED_RANGE_NOT_SATISFIED(416),
+ /**
+ * The expectation given in an Expect request-header field (see section 14.20) could not be met by this server,
+ * or, if the server is a proxy, the server has unambiguous evidence that the request could not be met by the
+ * next-hop server.
+ */
+ EXPECTATION_FAILED(417),
+ /**
+ * The 422 (Unprocessable Entity) status code means the server understands the content type of the request
+ * entity (hence a 415(Unsupported Media Type) status code is inappropriate), and the syntax of the request
+ * entity is correct (thus a 400 (Bad Request) status code is inappropriate) but was unable to process the
+ * contained instructions. For example, this error condition may occur if an XML request body contains
+ * well-formed (i.e., syntactically correct), but semantically erroneous, XML instructions.
+ */
+ UNPROCESSABLE_ENTITY(422),
+ /**
+ * The 423 (Locked) status code means the source or destination resource of a method is locked. This response
+ * SHOULD contain an appropriate precondition or postcondition code, such as 'lock-token-submitted' or
+ * 'no-conflicting-lock'.
+ */
+ LOCKED(423),
+ /**
+ * The 424 (Failed Dependency) status code means that the method could not be performed on the resource because
+ * the requested action depended on another action and that action failed. For example, if a command in a
+ * PROPPATCH method fails, then, at minimum, the rest of the commands will also fail with 424 (Failed Dependency).
+ */
+ FAILED_DEPENDENCY(424),
+ /**
+ * The server encountered an unexpected condition which prevented it from fulfilling the request.
+ */
+ INTERNAL_SERVER_ERROR(500),
+ /**
+ * The server does not support the functionality required to fulfill the request. This is the appropriate
+ * response when the server does not recognize the request method and is not capable of supporting it for any
+ * resource.
+ */
+ NOT_IMPLEMENTED(501),
+ /**
+ * The server, while acting as a gateway or proxy, received an invalid response from the upstream server it
+ * accessed in attempting to fulfill the request.
+ */
+ BAD_GATEWAY(502),
+ /**
+ * The server is currently unable to handle the request due to a temporary overloading or maintenance of the
+ * server. The implication is that this is a temporary condition which will be alleviated after some delay.
+ * If known, the length of the delay MAY be indicated in a Retry-After header. If no Retry-After is given,
+ * the client SHOULD handle the response as it would for a 500 response.
+ */
+ SERVICE_UNAVAILABLE(503),
+ /**
+ * The server, while acting as a gateway or proxy, did not receive a timely response from the upstream server
+ * specified by the URI (e.g. HTTP, FTP, LDAP) or some other auxiliary server (e.g. DNS) it needed to access
+ * in attempting to complete the request.
+ */
+ GATEWAY_TIMEOUT(504),
+ /**
+ * The server does not support, or refuses to support, the HTTP protocol version that was used in the request
+ * message. The server is indicating that it is unable or unwilling to complete the request using the same major
+ * version as the client, as described in section 3.1, other than with this error message. The response SHOULD
+ * contain an entity describing why that version is not supported and what other protocols are supported by
+ * that server.
+ */
+ HTTP_VERSION_NOT_SUPPORTED(505),
+ /**
+ * The 507 (Insufficient Storage) status code means the method could not be performed on the resource because
+ * the server is unable to store the representation needed to successfully complete the request. This condition
+ * is considered to be temporary. If the request that received this status code was the result of a user action,
+ * the request MUST NOT be repeated until it is requested by a separate user action.
+ */
+ INSUFFICIENT_STORAGE(506);
+
+
+ private int status;
+
+ RestStatus(int status) {
+ this.status = (short) status;
+ }
+
+ public int getStatus() {
+ return status;
+ }
+
+ public static RestStatus readFrom(StreamInput in) throws IOException {
+ return RestStatus.valueOf(in.readString());
+ }
+
+ public static void writeTo(StreamOutput out, RestStatus status) throws IOException {
+ out.writeString(status.name());
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/StringRestResponse.java b/src/main/java/org/elasticsearch/rest/StringRestResponse.java
new file mode 100644
index 0000000..ef310ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/StringRestResponse.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+
+/**
+ *
+ */
+public class StringRestResponse extends Utf8RestResponse {
+
+ public StringRestResponse(RestStatus status) {
+ super(status);
+ }
+
+ public StringRestResponse(RestStatus status, String content) {
+ super(status, convert(content));
+ }
+
+ private static BytesRef convert(String content) {
+ BytesRef result = new BytesRef();
+ UnicodeUtil.UTF16toUTF8(content, 0, content.length(), result);
+ return result;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/Utf8RestResponse.java b/src/main/java/org/elasticsearch/rest/Utf8RestResponse.java
new file mode 100644
index 0000000..6d9e8ad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/Utf8RestResponse.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * An http response that is built on top of {@link org.apache.lucene.util.BytesRef}.
+ * <p/>
+ * <p>Note, this class assumes that the utf8 result is not thread safe.
+ */
+public class Utf8RestResponse extends AbstractRestResponse implements RestResponse {
+
+ public static final BytesRef EMPTY = new BytesRef();
+
+ private final RestStatus status;
+
+ private final BytesRef utf8Result;
+
+ private final BytesRef prefixUtf8Result;
+
+ private final BytesRef suffixUtf8Result;
+
+ public Utf8RestResponse(RestStatus status) {
+ this(status, EMPTY);
+ }
+
+ public Utf8RestResponse(RestStatus status, BytesRef utf8Result) {
+ this(status, utf8Result, null, null);
+ }
+
+ public Utf8RestResponse(RestStatus status, BytesRef utf8Result,
+ BytesRef prefixUtf8Result, BytesRef suffixUtf8Result) {
+ this.status = status;
+ this.utf8Result = utf8Result;
+ this.prefixUtf8Result = prefixUtf8Result;
+ this.suffixUtf8Result = suffixUtf8Result;
+ }
+
+ @Override
+ public boolean contentThreadSafe() {
+ return true;
+ }
+
+ @Override
+ public String contentType() {
+ return "text/plain; charset=UTF-8";
+ }
+
+ @Override
+ public byte[] content() {
+ return utf8Result.bytes;
+ }
+
+ @Override
+ public int contentLength() {
+ return utf8Result.length;
+ }
+
+ @Override
+ public int contentOffset() {
+ return utf8Result.offset;
+ }
+
+ @Override
+ public RestStatus status() {
+ return status;
+ }
+
+ @Override
+ public byte[] prefixContent() {
+ return prefixUtf8Result != null ? prefixUtf8Result.bytes : null;
+ }
+
+ @Override
+ public int prefixContentLength() {
+ return prefixUtf8Result != null ? prefixUtf8Result.length : 0;
+ }
+
+ @Override
+ public int prefixContentOffset() {
+ return prefixUtf8Result != null ? prefixUtf8Result.offset : 0;
+ }
+
+ @Override
+ public byte[] suffixContent() {
+ return suffixUtf8Result != null ? suffixUtf8Result.bytes : null;
+ }
+
+ @Override
+ public int suffixContentLength() {
+ return suffixUtf8Result != null ? suffixUtf8Result.length : 0;
+ }
+
+ @Override
+ public int suffixContentOffset() {
+ return suffixUtf8Result != null ? suffixUtf8Result.offset : 0;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/XContentRestResponse.java b/src/main/java/org/elasticsearch/rest/XContentRestResponse.java
new file mode 100644
index 0000000..ce4cd55
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/XContentRestResponse.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class XContentRestResponse extends AbstractRestResponse {
+
+ private static final byte[] END_JSONP;
+
+ static {
+ BytesRef U_END_JSONP = new BytesRef();
+ UnicodeUtil.UTF16toUTF8(");", 0, ");".length(), U_END_JSONP);
+ END_JSONP = new byte[U_END_JSONP.length];
+ System.arraycopy(U_END_JSONP.bytes, U_END_JSONP.offset, END_JSONP, 0, U_END_JSONP.length);
+ }
+
+ private final BytesRef prefixUtf8Result;
+
+ private final RestStatus status;
+
+ private final XContentBuilder builder;
+
+ public XContentRestResponse(RestRequest request, RestStatus status, XContentBuilder builder) throws IOException {
+ if (request == null) {
+ throw new ElasticsearchIllegalArgumentException("request must be set");
+ }
+ this.builder = builder;
+ this.status = status;
+ this.prefixUtf8Result = startJsonp(request);
+ }
+
+ public XContentBuilder builder() {
+ return this.builder;
+ }
+
+ @Override
+ public String contentType() {
+ return builder.contentType().restContentType();
+ }
+
+ @Override
+ public boolean contentThreadSafe() {
+ return true;
+ }
+
+ @Override
+ public byte[] content() throws IOException {
+ return builder.bytes().array();
+ }
+
+ @Override
+ public int contentLength() throws IOException {
+ return builder.bytes().length();
+ }
+
+ @Override
+ public int contentOffset() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public RestStatus status() {
+ return this.status;
+ }
+
+ @Override
+ public byte[] prefixContent() {
+ if (prefixUtf8Result != null) {
+ return prefixUtf8Result.bytes;
+ }
+ return null;
+ }
+
+ @Override
+ public int prefixContentLength() {
+ if (prefixUtf8Result != null) {
+ return prefixUtf8Result.length;
+ }
+ return 0;
+ }
+
+ @Override
+ public int prefixContentOffset() {
+ if (prefixUtf8Result != null) {
+ return prefixUtf8Result.offset;
+ }
+ return 0;
+ }
+
+ @Override
+ public byte[] suffixContent() {
+ if (prefixUtf8Result != null) {
+ return END_JSONP;
+ }
+ return null;
+ }
+
+ @Override
+ public int suffixContentLength() {
+ if (prefixUtf8Result != null) {
+ return END_JSONP.length;
+ }
+ return 0;
+ }
+
+ @Override
+ public int suffixContentOffset() {
+ return 0;
+ }
+
+ private static BytesRef startJsonp(RestRequest request) {
+ String callback = request.param("callback");
+ if (callback == null) {
+ return null;
+ }
+ final BytesRef result = new BytesRef();
+ UnicodeUtil.UTF16toUTF8(callback, 0, callback.length(), result);
+ result.bytes[result.length] = '(';
+ result.length++;
+ return result;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/XContentThrowableRestResponse.java b/src/main/java/org/elasticsearch/rest/XContentThrowableRestResponse.java
new file mode 100644
index 0000000..09a0c7a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/XContentThrowableRestResponse.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.ExceptionsHelper.detailedMessage;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ *
+ */
+public class XContentThrowableRestResponse extends XContentRestResponse {
+
+ public XContentThrowableRestResponse(RestRequest request, Throwable t) throws IOException {
+ this(request, ((t instanceof ElasticsearchException) ? ((ElasticsearchException) t).status() : RestStatus.INTERNAL_SERVER_ERROR), t);
+ }
+
+ public XContentThrowableRestResponse(RestRequest request, RestStatus status, Throwable t) throws IOException {
+ super(request, status, convert(request, status, t));
+ }
+
+ private static XContentBuilder convert(RestRequest request, RestStatus status, Throwable t) throws IOException {
+ XContentBuilder builder = restContentBuilder(request).startObject()
+ .field("error", detailedMessage(t))
+ .field("status", status.getStatus());
+ if (t != null && request.paramAsBoolean("error_trace", false)) {
+ builder.startObject("error_trace");
+ boolean first = true;
+ while (t != null) {
+ if (!first) {
+ builder.startObject("cause");
+ }
+ buildThrowable(t, builder);
+ if (!first) {
+ builder.endObject();
+ }
+ t = t.getCause();
+ first = false;
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ private static void buildThrowable(Throwable t, XContentBuilder builder) throws IOException {
+ builder.field("message", t.getMessage());
+ for (StackTraceElement stElement : t.getStackTrace()) {
+ builder.startObject("at")
+ .field("class", stElement.getClassName())
+ .field("method", stElement.getMethodName());
+ if (stElement.getFileName() != null) {
+ builder.field("file", stElement.getFileName());
+ }
+ if (stElement.getLineNumber() >= 0) {
+ builder.field("line", stElement.getLineNumber());
+ }
+ builder.endObject();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java
new file mode 100644
index 0000000..fcb1961
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java
@@ -0,0 +1,230 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction;
+import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
+import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
+import org.elasticsearch.rest.action.admin.cluster.node.restart.RestNodesRestartAction;
+import org.elasticsearch.rest.action.admin.cluster.node.shutdown.RestNodesShutdownAction;
+import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction;
+import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction;
+import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction;
+import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction;
+import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction;
+import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
+import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
+import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
+import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction;
+import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction;
+import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction;
+import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction;
+import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction;
+import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction;
+import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction;
+import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction;
+import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction;
+import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction;
+import org.elasticsearch.rest.action.admin.indices.gateway.snapshot.RestGatewaySnapshotAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.delete.RestDeleteMappingAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction;
+import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction;
+import org.elasticsearch.rest.action.admin.indices.optimize.RestOptimizeAction;
+import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction;
+import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction;
+import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction;
+import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction;
+import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction;
+import org.elasticsearch.rest.action.admin.indices.status.RestIndicesStatusAction;
+import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
+import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction;
+import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction;
+import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction;
+import org.elasticsearch.rest.action.bulk.RestBulkAction;
+import org.elasticsearch.rest.action.cat.*;
+import org.elasticsearch.rest.action.delete.RestDeleteAction;
+import org.elasticsearch.rest.action.deletebyquery.RestDeleteByQueryAction;
+import org.elasticsearch.rest.action.explain.RestExplainAction;
+import org.elasticsearch.rest.action.get.RestGetAction;
+import org.elasticsearch.rest.action.get.RestGetSourceAction;
+import org.elasticsearch.rest.action.get.RestHeadAction;
+import org.elasticsearch.rest.action.get.RestMultiGetAction;
+import org.elasticsearch.rest.action.index.RestIndexAction;
+import org.elasticsearch.rest.action.main.RestMainAction;
+import org.elasticsearch.rest.action.mlt.RestMoreLikeThisAction;
+import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction;
+import org.elasticsearch.rest.action.percolate.RestPercolateAction;
+import org.elasticsearch.rest.action.search.RestClearScrollAction;
+import org.elasticsearch.rest.action.search.RestMultiSearchAction;
+import org.elasticsearch.rest.action.search.RestSearchAction;
+import org.elasticsearch.rest.action.search.RestSearchScrollAction;
+import org.elasticsearch.rest.action.suggest.RestSuggestAction;
+import org.elasticsearch.rest.action.termvector.RestMultiTermVectorsAction;
+import org.elasticsearch.rest.action.termvector.RestTermVectorAction;
+import org.elasticsearch.rest.action.update.RestUpdateAction;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class RestActionModule extends AbstractModule {
+ private List<Class<? extends BaseRestHandler>> restPluginsActions = Lists.newArrayList();
+
+ public RestActionModule(List<Class<? extends BaseRestHandler>> restPluginsActions) {
+ this.restPluginsActions = restPluginsActions;
+ }
+
+ @Override
+ protected void configure() {
+ for (Class<? extends BaseRestHandler> restAction : restPluginsActions) {
+ bind(restAction).asEagerSingleton();
+ }
+
+ bind(RestMainAction.class).asEagerSingleton();
+
+ bind(RestNodesInfoAction.class).asEagerSingleton();
+ bind(RestNodesStatsAction.class).asEagerSingleton();
+ bind(RestNodesHotThreadsAction.class).asEagerSingleton();
+ bind(RestNodesShutdownAction.class).asEagerSingleton();
+ bind(RestNodesRestartAction.class).asEagerSingleton();
+ bind(RestClusterStatsAction.class).asEagerSingleton();
+ bind(RestClusterStateAction.class).asEagerSingleton();
+ bind(RestClusterHealthAction.class).asEagerSingleton();
+ bind(RestClusterUpdateSettingsAction.class).asEagerSingleton();
+ bind(RestClusterGetSettingsAction.class).asEagerSingleton();
+ bind(RestClusterRerouteAction.class).asEagerSingleton();
+ bind(RestClusterSearchShardsAction.class).asEagerSingleton();
+ bind(RestPendingClusterTasksAction.class).asEagerSingleton();
+ bind(RestPutRepositoryAction.class).asEagerSingleton();
+ bind(RestGetRepositoriesAction.class).asEagerSingleton();
+ bind(RestDeleteRepositoryAction.class).asEagerSingleton();
+ bind(RestGetSnapshotsAction.class).asEagerSingleton();
+ bind(RestCreateSnapshotAction.class).asEagerSingleton();
+ bind(RestRestoreSnapshotAction.class).asEagerSingleton();
+ bind(RestDeleteSnapshotAction.class).asEagerSingleton();
+
+ bind(RestIndicesExistsAction.class).asEagerSingleton();
+ bind(RestTypesExistsAction.class).asEagerSingleton();
+ bind(RestIndicesStatsAction.class).asEagerSingleton();
+ bind(RestIndicesStatusAction.class).asEagerSingleton();
+ bind(RestIndicesSegmentsAction.class).asEagerSingleton();
+ bind(RestGetAliasesAction.class).asEagerSingleton();
+ bind(RestAliasesExistAction.class).asEagerSingleton();
+ bind(RestIndexDeleteAliasesAction.class).asEagerSingleton();
+ bind(RestIndexPutAliasAction.class).asEagerSingleton();
+ bind(RestIndicesAliasesAction.class).asEagerSingleton();
+ bind(RestGetIndicesAliasesAction.class).asEagerSingleton();
+ bind(RestCreateIndexAction.class).asEagerSingleton();
+ bind(RestDeleteIndexAction.class).asEagerSingleton();
+ bind(RestCloseIndexAction.class).asEagerSingleton();
+ bind(RestOpenIndexAction.class).asEagerSingleton();
+
+ bind(RestUpdateSettingsAction.class).asEagerSingleton();
+ bind(RestGetSettingsAction.class).asEagerSingleton();
+
+ bind(RestAnalyzeAction.class).asEagerSingleton();
+ bind(RestGetIndexTemplateAction.class).asEagerSingleton();
+ bind(RestPutIndexTemplateAction.class).asEagerSingleton();
+ bind(RestDeleteIndexTemplateAction.class).asEagerSingleton();
+ bind(RestHeadIndexTemplateAction.class).asEagerSingleton();
+
+ bind(RestPutWarmerAction.class).asEagerSingleton();
+ bind(RestDeleteWarmerAction.class).asEagerSingleton();
+ bind(RestGetWarmerAction.class).asEagerSingleton();
+
+ bind(RestPutMappingAction.class).asEagerSingleton();
+ bind(RestDeleteMappingAction.class).asEagerSingleton();
+ bind(RestGetMappingAction.class).asEagerSingleton();
+ bind(RestGetFieldMappingAction.class).asEagerSingleton();
+
+ bind(RestGatewaySnapshotAction.class).asEagerSingleton();
+
+ bind(RestRefreshAction.class).asEagerSingleton();
+ bind(RestFlushAction.class).asEagerSingleton();
+ bind(RestOptimizeAction.class).asEagerSingleton();
+ bind(RestClearIndicesCacheAction.class).asEagerSingleton();
+
+ bind(RestIndexAction.class).asEagerSingleton();
+ bind(RestGetAction.class).asEagerSingleton();
+ bind(RestGetSourceAction.class).asEagerSingleton();
+ bind(RestHeadAction.class).asEagerSingleton();
+ bind(RestMultiGetAction.class).asEagerSingleton();
+ bind(RestDeleteAction.class).asEagerSingleton();
+ bind(RestDeleteByQueryAction.class).asEagerSingleton();
+ bind(org.elasticsearch.rest.action.count.RestCountAction.class).asEagerSingleton();
+ bind(RestSuggestAction.class).asEagerSingleton();
+ bind(RestTermVectorAction.class).asEagerSingleton();
+ bind(RestMultiTermVectorsAction.class).asEagerSingleton();
+ bind(RestBulkAction.class).asEagerSingleton();
+ bind(RestUpdateAction.class).asEagerSingleton();
+ bind(RestPercolateAction.class).asEagerSingleton();
+ bind(RestMultiPercolateAction.class).asEagerSingleton();
+
+ bind(RestSearchAction.class).asEagerSingleton();
+ bind(RestSearchScrollAction.class).asEagerSingleton();
+ bind(RestClearScrollAction.class).asEagerSingleton();
+ bind(RestMultiSearchAction.class).asEagerSingleton();
+
+ bind(RestValidateQueryAction.class).asEagerSingleton();
+
+ bind(RestMoreLikeThisAction.class).asEagerSingleton();
+
+ bind(RestExplainAction.class).asEagerSingleton();
+
+ // cat API
+ Multibinder<AbstractCatAction> catActionMultibinder = Multibinder.newSetBinder(binder(), AbstractCatAction.class);
+ catActionMultibinder.addBinding().to(RestAllocationAction.class).asEagerSingleton();
+ catActionMultibinder.addBinding().to(RestShardsAction.class).asEagerSingleton();
+ catActionMultibinder.addBinding().to(RestMasterAction.class).asEagerSingleton();
+ catActionMultibinder.addBinding().to(RestNodesAction.class).asEagerSingleton();
+ catActionMultibinder.addBinding().to(RestIndicesAction.class).asEagerSingleton();
+ // Fully qualified to prevent interference with rest.action.count.RestCountAction
+ catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestCountAction.class).asEagerSingleton();
+ catActionMultibinder.addBinding().to(RestRecoveryAction.class).asEagerSingleton();
+ catActionMultibinder.addBinding().to(RestHealthAction.class).asEagerSingleton();
+ catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class).asEagerSingleton();
+ catActionMultibinder.addBinding().to(RestAliasAction.class).asEagerSingleton();
+ catActionMultibinder.addBinding().to(RestThreadPoolAction.class).asEagerSingleton();
+ // no abstract cat action
+ bind(RestCatAction.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java
new file mode 100644
index 0000000..97cc2a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.health;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.rest.RestStatus.PRECONDITION_FAILED;
+
+/**
+ *
+ */
+public class RestClusterHealthAction extends BaseRestHandler {
+
+ @Inject
+ public RestClusterHealthAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/health", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/health/{index}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ ClusterHealthRequest clusterHealthRequest = clusterHealthRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local()));
+ clusterHealthRequest.listenerThreaded(false);
+ try {
+ clusterHealthRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterHealthRequest.masterNodeTimeout()));
+ clusterHealthRequest.timeout(request.paramAsTime("timeout", clusterHealthRequest.timeout()));
+ String waitForStatus = request.param("wait_for_status");
+ if (waitForStatus != null) {
+ clusterHealthRequest.waitForStatus(ClusterHealthStatus.valueOf(waitForStatus.toUpperCase(Locale.ROOT)));
+ }
+ clusterHealthRequest.waitForRelocatingShards(request.paramAsInt("wait_for_relocating_shards", clusterHealthRequest.waitForRelocatingShards()));
+ clusterHealthRequest.waitForActiveShards(request.paramAsInt("wait_for_active_shards", clusterHealthRequest.waitForActiveShards()));
+ clusterHealthRequest.waitForNodes(request.param("wait_for_nodes", clusterHealthRequest.waitForNodes()));
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, PRECONDITION_FAILED, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.admin().cluster().health(clusterHealthRequest, new ActionListener<ClusterHealthResponse>() {
+ @Override
+ public void onResponse(ClusterHealthResponse response) {
+ try {
+ RestStatus status = RestStatus.OK;
+ // not sure..., we handle the health API, so we are not unavailable
+ // in any case, "/" should be used for
+ //if (response.status() == ClusterHealthStatus.RED) {
+ // status = RestStatus.SERVICE_UNAVAILABLE;
+ //}
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+
+ channel.sendResponse(new XContentRestResponse(request, status, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java
new file mode 100644
index 0000000..a6873af
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.node.hotthreads;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+
+
+/**
+ */
+public class RestNodesHotThreadsAction extends BaseRestHandler {
+
+ @Inject
+ public RestNodesHotThreadsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/nodes/hotthreads", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/nodes/hot_threads", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/nodes/{nodeId}/hotthreads", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/nodes/{nodeId}/hot_threads", this);
+
+ controller.registerHandler(RestRequest.Method.GET, "/_nodes/hotthreads", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_nodes/hot_threads", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_nodes/{nodeId}/hotthreads", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_nodes/{nodeId}/hot_threads", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
+ NodesHotThreadsRequest nodesHotThreadsRequest = new NodesHotThreadsRequest(nodesIds);
+ nodesHotThreadsRequest.threads(request.paramAsInt("threads", nodesHotThreadsRequest.threads()));
+ nodesHotThreadsRequest.type(request.param("type", nodesHotThreadsRequest.type()));
+ nodesHotThreadsRequest.interval(TimeValue.parseTimeValue(request.param("interval"), nodesHotThreadsRequest.interval()));
+ nodesHotThreadsRequest.snapshots(request.paramAsInt("snapshots", nodesHotThreadsRequest.snapshots()));
+ client.admin().cluster().nodesHotThreads(nodesHotThreadsRequest, new ActionListener<NodesHotThreadsResponse>() {
+ @Override
+ public void onResponse(NodesHotThreadsResponse response) {
+ try {
+ StringBuilder sb = new StringBuilder();
+ for (NodeHotThreads node : response) {
+ sb.append("::: ").append(node.getNode().toString()).append("\n");
+ Strings.spaceify(3, node.getHotThreads(), sb);
+ sb.append('\n');
+ }
+ channel.sendResponse(new StringRestResponse(RestStatus.OK, sb.toString()));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java
new file mode 100644
index 0000000..fe22ffb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.node.info;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+
+/**
+ *
+ */
+public class RestNodesInfoAction extends BaseRestHandler {
+
+ private final SettingsFilter settingsFilter;
+ private final static Set<String> ALLOWED_METRICS = Sets.newHashSet("http", "jvm", "network", "os", "plugin", "process", "settings", "thread_pool", "transport");
+
+ @Inject
+ public RestNodesInfoAction(Settings settings, Client client, RestController controller,
+ SettingsFilter settingsFilter) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_nodes", this);
+ // this endpoint is used for metrics, not for nodeIds, like /_nodes/fs
+ controller.registerHandler(GET, "/_nodes/{nodeId}", this);
+ controller.registerHandler(GET, "/_nodes/{nodeId}/{metrics}", this);
+ // added this endpoint to be aligned with stats
+ controller.registerHandler(GET, "/_nodes/{nodeId}/info/{metrics}", this);
+
+ this.settingsFilter = settingsFilter;
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String[] nodeIds;
+ Set<String> metrics;
+
+ // special case like /_nodes/os (in this case os are metrics and not the nodeId)
+ // still, /_nodes/_local (or any other node id) should work and be treated as usual
+ // this means one must differentiate between allowed metrics and arbitrary node ids in the same place
+ if (request.hasParam("nodeId") && !request.hasParam("metrics")) {
+ Set<String> metricsOrNodeIds = Strings.splitStringByCommaToSet(request.param("nodeId", "_all"));
+ boolean isMetricsOnly = ALLOWED_METRICS.containsAll(metricsOrNodeIds);
+ if (isMetricsOnly) {
+ nodeIds = new String[] { "_all" };
+ metrics = metricsOrNodeIds;
+ } else {
+ nodeIds = metricsOrNodeIds.toArray(new String[]{});
+ metrics = Sets.newHashSet("_all");
+ }
+ } else {
+ nodeIds = Strings.splitStringByCommaToArray(request.param("nodeId", "_all"));
+ metrics = Strings.splitStringByCommaToSet(request.param("metrics", "_all"));
+ }
+
+ final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds);
+ nodesInfoRequest.listenerThreaded(false);
+
+ // shortcut, dont do checks if only all is specified
+ if (metrics.size() == 1 && metrics.contains("_all")) {
+ nodesInfoRequest.all();
+ } else {
+ nodesInfoRequest.clear();
+ nodesInfoRequest.settings(metrics.contains("settings"));
+ nodesInfoRequest.os(metrics.contains("os"));
+ nodesInfoRequest.process(metrics.contains("process"));
+ nodesInfoRequest.jvm(metrics.contains("jvm"));
+ nodesInfoRequest.threadPool(metrics.contains("thread_pool"));
+ nodesInfoRequest.network(metrics.contains("network"));
+ nodesInfoRequest.transport(metrics.contains("transport"));
+ nodesInfoRequest.http(metrics.contains("http"));
+ nodesInfoRequest.plugin(metrics.contains("plugin"));
+ }
+
+ client.admin().cluster().nodesInfo(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
+ @Override
+ public void onResponse(NodesInfoResponse response) {
+ try {
+ response.settingsFilter(settingsFilter);
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/restart/RestNodesRestartAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/restart/RestNodesRestartAction.java
new file mode 100644
index 0000000..1709880
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/restart/RestNodesRestartAction.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.node.restart;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartRequest;
+import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ *
+ */
+public class RestNodesRestartAction extends BaseRestHandler {
+
+ @Inject
+ public RestNodesRestartAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+
+ controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/_restart", this);
+ controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/{nodeId}/_restart", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
+ NodesRestartRequest nodesRestartRequest = new NodesRestartRequest(nodesIds);
+ nodesRestartRequest.listenerThreaded(false);
+ nodesRestartRequest.delay(request.paramAsTime("delay", nodesRestartRequest.delay()));
+ client.admin().cluster().nodesRestart(nodesRestartRequest, new ActionListener<NodesRestartResponse>() {
+ @Override
+ public void onResponse(NodesRestartResponse result) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ builder.startObject();
+ builder.field("cluster_name", result.getClusterName().value());
+
+ builder.startObject("nodes");
+ for (NodesRestartResponse.NodeRestartResponse nodeInfo : result) {
+ builder.startObject(nodeInfo.getNode().id());
+ builder.field("name", nodeInfo.getNode().name());
+ builder.endObject();
+ }
+ builder.endObject();
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java
new file mode 100644
index 0000000..a0d2a69
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.node.shutdown;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest;
+import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ *
+ */
+public class RestNodesShutdownAction extends BaseRestHandler {
+
+ @Inject
+ public RestNodesShutdownAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+
+ controller.registerHandler(RestRequest.Method.POST, "/_shutdown", this);
+ controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/_shutdown", this);
+ controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/{nodeId}/_shutdown", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
+ NodesShutdownRequest nodesShutdownRequest = new NodesShutdownRequest(nodesIds);
+ nodesShutdownRequest.listenerThreaded(false);
+ nodesShutdownRequest.delay(request.paramAsTime("delay", nodesShutdownRequest.delay()));
+ nodesShutdownRequest.exit(request.paramAsBoolean("exit", nodesShutdownRequest.exit()));
+ client.admin().cluster().nodesShutdown(nodesShutdownRequest, new ActionListener<NodesShutdownResponse>() {
+ @Override
+ public void onResponse(NodesShutdownResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ builder.startObject();
+ builder.field("cluster_name", response.getClusterName().value());
+
+ builder.startObject("nodes");
+ for (DiscoveryNode node : response.getNodes()) {
+ builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("name", node.name(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.endObject();
+ }
+ builder.endObject();
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java
new file mode 100644
index 0000000..477b2bf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.node.stats;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+
+/**
+ *
+ */
+public class RestNodesStatsAction extends BaseRestHandler {
+
+ @Inject
+ public RestNodesStatsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_nodes/stats", this);
+ controller.registerHandler(GET, "/_nodes/{nodeId}/stats", this);
+
+ controller.registerHandler(GET, "/_nodes/stats/{metric}", this);
+ controller.registerHandler(GET, "/_nodes/{nodeId}/stats/{metric}", this);
+
+ controller.registerHandler(GET, "/_nodes/stats/{metric}/{indexMetric}", this);
+ controller.registerHandler(GET, "/_nodes/stats/{metric}/{indexMetric}/{fields}", this);
+
+ controller.registerHandler(GET, "/_nodes/{nodeId}/stats/{metric}/{indexMetric}", this);
+ controller.registerHandler(GET, "/_nodes/{nodeId}/stats/{metric}/{indexMetric}/{fields}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
+ Set<String> metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all"));
+
+ NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(nodesIds);
+ nodesStatsRequest.listenerThreaded(false);
+
+ if (metrics.size() == 1 && metrics.contains("_all")) {
+ nodesStatsRequest.all();
+ nodesStatsRequest.indices(CommonStatsFlags.ALL);
+ } else {
+ nodesStatsRequest.clear();
+ nodesStatsRequest.os(metrics.contains("os"));
+ nodesStatsRequest.jvm(metrics.contains("jvm"));
+ nodesStatsRequest.threadPool(metrics.contains("thread_pool"));
+ nodesStatsRequest.network(metrics.contains("network"));
+ nodesStatsRequest.fs(metrics.contains("fs"));
+ nodesStatsRequest.transport(metrics.contains("transport"));
+ nodesStatsRequest.http(metrics.contains("http"));
+ nodesStatsRequest.indices(metrics.contains("indices"));
+ nodesStatsRequest.process(metrics.contains("process"));
+ nodesStatsRequest.breaker(metrics.contains("breaker"));
+
+ // check for index specific metrics
+ if (metrics.contains("indices")) {
+ Set<String> indexMetrics = Strings.splitStringByCommaToSet(request.param("indexMetric", "_all"));
+ if (indexMetrics.size() == 1 && indexMetrics.contains("_all")) {
+ nodesStatsRequest.indices(CommonStatsFlags.ALL);
+ } else {
+ CommonStatsFlags flags = new CommonStatsFlags();
+ for (Flag flag : CommonStatsFlags.Flag.values()) {
+ flags.set(flag, indexMetrics.contains(flag.getRestName()));
+ }
+ nodesStatsRequest.indices(flags);
+ }
+ }
+ }
+
+ if (nodesStatsRequest.indices().isSet(Flag.FieldData) && (request.hasParam("fields") || request.hasParam("fielddata_fields"))) {
+ nodesStatsRequest.indices().fieldDataFields(request.paramAsStringArray("fielddata_fields", request.paramAsStringArray("fields", null)));
+ }
+ if (nodesStatsRequest.indices().isSet(Flag.Completion) && (request.hasParam("fields") || request.hasParam("completion_fields"))) {
+ nodesStatsRequest.indices().completionDataFields(request.paramAsStringArray("completion_fields", request.paramAsStringArray("fields", null)));
+ }
+ if (nodesStatsRequest.indices().isSet(Flag.Search) && (request.hasParam("groups"))) {
+ nodesStatsRequest.indices().groups(request.paramAsStringArray("groups", null));
+ }
+ if (nodesStatsRequest.indices().isSet(Flag.Indexing) && (request.hasParam("types"))) {
+ nodesStatsRequest.indices().types(request.paramAsStringArray("types", null));
+ }
+
+ client.admin().cluster().nodesStats(nodesStatsRequest, new ActionListener<NodesStatsResponse>() {
+ @Override
+ public void onResponse(NodesStatsResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java
new file mode 100644
index 0000000..4df5c1e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.repositories.delete;
+
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.client.Requests.deleteRepositoryRequest;
+import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+
+/**
+ * Unregisters a repository
+ */
+public class RestDeleteRepositoryAction extends BaseRestHandler {
+
+ @Inject
+ public RestDeleteRepositoryAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(DELETE, "/_snapshot/{repository}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ DeleteRepositoryRequest deleteRepositoryRequest = deleteRepositoryRequest(request.param("repository"));
+ deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout()));
+ deleteRepositoryRequest.listenerThreaded(false);
+ deleteRepositoryRequest.timeout(request.paramAsTime("timeout", deleteRepositoryRequest.timeout()));
+ deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout()));
+ client.admin().cluster().deleteRepository(deleteRepositoryRequest, new AcknowledgedRestResponseActionListener<DeleteRepositoryResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java
new file mode 100644
index 0000000..7c94ffd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.repositories.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
+import org.elasticsearch.cluster.metadata.RepositoryMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.getRepositoryRequest;
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ * Returns repository information
+ */
+public class RestGetRepositoriesAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetRepositoriesAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_snapshot", this);
+ controller.registerHandler(GET, "/_snapshot/{repository}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY);
+ GetRepositoriesRequest getRepositoriesRequest = getRepositoryRequest(repositories);
+ getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout()));
+ getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local()));
+ client.admin().cluster().getRepositories(getRepositoriesRequest, new ActionListener<GetRepositoriesResponse>() {
+ @Override
+ public void onResponse(GetRepositoriesResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ for (RepositoryMetaData repositoryMetaData : response.repositories()) {
+ RepositoriesMetaData.FACTORY.toXContent(repositoryMetaData, builder, request);
+ }
+ builder.endObject();
+
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java
new file mode 100644
index 0000000..743803a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.repositories.put;
+
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.client.Requests.putRepositoryRequest;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestRequest.Method.PUT;
+
+/**
+ * Registers repositories
+ */
+public class RestPutRepositoryAction extends BaseRestHandler {
+
+ @Inject
+ public RestPutRepositoryAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(PUT, "/_snapshot/{repository}", this);
+ controller.registerHandler(POST, "/_snapshot/{repository}", this);
+ }
+
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ PutRepositoryRequest putRepositoryRequest = putRepositoryRequest(request.param("repository"));
+ putRepositoryRequest.listenerThreaded(false);
+ putRepositoryRequest.source(request.content().toUtf8());
+ putRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRepositoryRequest.masterNodeTimeout()));
+ putRepositoryRequest.timeout(request.paramAsTime("timeout", putRepositoryRequest.timeout()));
+ client.admin().cluster().putRepository(putRepositoryRequest, new AcknowledgedRestResponseActionListener<PutRepositoryResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java
new file mode 100644
index 0000000..4554a04
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.reroute;
+
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+
+/**
+ */
+public class RestClusterRerouteAction extends BaseRestHandler {
+
+ private final SettingsFilter settingsFilter;
+
+ @Inject
+ public RestClusterRerouteAction(Settings settings, Client client, RestController controller,
+ SettingsFilter settingsFilter) {
+ super(settings, client);
+ this.settingsFilter = settingsFilter;
+ controller.registerHandler(RestRequest.Method.POST, "/_cluster/reroute", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final ClusterRerouteRequest clusterRerouteRequest = Requests.clusterRerouteRequest();
+ clusterRerouteRequest.listenerThreaded(false);
+ clusterRerouteRequest.dryRun(request.paramAsBoolean("dry_run", clusterRerouteRequest.dryRun()));
+ clusterRerouteRequest.timeout(request.paramAsTime("timeout", clusterRerouteRequest.timeout()));
+ clusterRerouteRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterRerouteRequest.masterNodeTimeout()));
+ if (request.hasContent()) {
+ try {
+ clusterRerouteRequest.source(request.content());
+ } catch (Exception e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ return;
+ }
+ }
+
+ client.admin().cluster().reroute(clusterRerouteRequest, new AcknowledgedRestResponseActionListener<ClusterRerouteResponse>(request, channel, logger) {
+ @Override
+ protected void addCustomFields(XContentBuilder builder, ClusterRerouteResponse response) throws IOException {
+ builder.startObject("state");
+ // by default, filter metadata
+ if (request.param("filter_metadata") == null) {
+ request.params().put("filter_metadata", "true");
+ }
+ response.getState().settingsFilter(settingsFilter).toXContent(builder, request);
+ builder.endObject();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to handle cluster reroute", e);
+ }
+ super.onFailure(e);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java
new file mode 100644
index 0000000..0bcd4e1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.settings;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+/**
+ */
+public class RestClusterGetSettingsAction extends BaseRestHandler {
+
+ @Inject
+ public RestClusterGetSettingsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/settings", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest()
+ .listenerThreaded(false)
+ .routingTable(false)
+ .nodes(false);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
+ @Override
+ public void onResponse(ClusterStateResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+
+ builder.startObject("persistent");
+ response.getState().metaData().persistentSettings().toXContent(builder, request);
+ builder.endObject();
+
+ builder.startObject("transient");
+ response.getState().metaData().transientSettings().toXContent(builder, request);
+ builder.endObject();
+
+ builder.endObject();
+
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java
new file mode 100644
index 0000000..787bcf9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.settings;
+
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ */
+public class RestClusterUpdateSettingsAction extends BaseRestHandler {
+
+ @Inject
+ public RestClusterUpdateSettingsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.PUT, "/_cluster/settings", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = Requests.clusterUpdateSettingsRequest();
+ clusterUpdateSettingsRequest.listenerThreaded(false);
+ clusterUpdateSettingsRequest.timeout(request.paramAsTime("timeout", clusterUpdateSettingsRequest.timeout()));
+ clusterUpdateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterUpdateSettingsRequest.masterNodeTimeout()));
+ try {
+ Map<String, Object> source = XContentFactory.xContent(request.content()).createParser(request.content()).mapAndClose();
+ if (source.containsKey("transient")) {
+ clusterUpdateSettingsRequest.transientSettings((Map) source.get("transient"));
+ }
+ if (source.containsKey("persistent")) {
+ clusterUpdateSettingsRequest.persistentSettings((Map) source.get("persistent"));
+ }
+ } catch (Exception e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ return;
+ }
+
+ client.admin().cluster().updateSettings(clusterUpdateSettingsRequest, new AcknowledgedRestResponseActionListener<ClusterUpdateSettingsResponse>(request, channel, logger) {
+
+ @Override
+ protected void addCustomFields(XContentBuilder builder, ClusterUpdateSettingsResponse response) throws IOException {
+ builder.startObject("persistent");
+ response.getPersistentSettings().toXContent(builder, request);
+ builder.endObject();
+
+ builder.startObject("transient");
+ response.getTransientSettings().toXContent(builder, request);
+ builder.endObject();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to handle cluster state", e);
+ }
+ super.onFailure(e);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java
new file mode 100644
index 0000000..0fe341f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.shards;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+
+/**
+ */
+public class RestClusterSearchShardsAction extends BaseRestHandler {
+
+ @Inject
+ public RestClusterSearchShardsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_search_shards", this);
+ controller.registerHandler(POST, "/_search_shards", this);
+ controller.registerHandler(GET, "/{index}/_search_shards", this);
+ controller.registerHandler(POST, "/{index}/_search_shards", this);
+ controller.registerHandler(GET, "/{index}/{type}/_search_shards", this);
+ controller.registerHandler(POST, "/{index}/{type}/_search_shards", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final ClusterSearchShardsRequest clusterSearchShardsRequest = Requests.clusterSearchShardsRequest(indices);
+ clusterSearchShardsRequest.local(request.paramAsBoolean("local", clusterSearchShardsRequest.local()));
+ clusterSearchShardsRequest.listenerThreaded(false);
+
+ clusterSearchShardsRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
+ clusterSearchShardsRequest.routing(request.param("routing"));
+ clusterSearchShardsRequest.preference(request.param("preference"));
+ clusterSearchShardsRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterSearchShardsRequest.indicesOptions()));
+
+ client.admin().cluster().searchShards(clusterSearchShardsRequest, new ActionListener<ClusterSearchShardsResponse>() {
+ @Override
+ public void onResponse(ClusterSearchShardsResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java
new file mode 100644
index 0000000..8adfd39
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.snapshots.create;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.createSnapshotRequest;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestRequest.Method.PUT;
+
+/**
+ * Creates a new snapshot
+ */
+public class RestCreateSnapshotAction extends BaseRestHandler {
+
+ @Inject
+ public RestCreateSnapshotAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(PUT, "/_snapshot/{repository}/{snapshot}", this);
+ controller.registerHandler(POST, "/_snapshot/{repository}/{snapshot}/_create", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ CreateSnapshotRequest createSnapshotRequest = createSnapshotRequest(request.param("repository"), request.param("snapshot"));
+ createSnapshotRequest.listenerThreaded(false);
+ createSnapshotRequest.source(request.content().toUtf8());
+ createSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createSnapshotRequest.masterNodeTimeout()));
+ createSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false));
+ client.admin().cluster().createSnapshot(createSnapshotRequest, new ActionListener<CreateSnapshotResponse>() {
+ @Override
+ public void onResponse(CreateSnapshotResponse response) {
+ try {
+
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, response.status(), builder));
+ } catch (IOException e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/delete/RestDeleteSnapshotAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/delete/RestDeleteSnapshotAction.java
new file mode 100644
index 0000000..9b69fd6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/delete/RestDeleteSnapshotAction.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.snapshots.delete;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.deleteSnapshotRequest;
+import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ * Deletes a snapshot
+ */
+public class RestDeleteSnapshotAction extends BaseRestHandler {
+
+ @Inject
+ public RestDeleteSnapshotAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(DELETE, "/_snapshot/{repository}/{snapshot}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ DeleteSnapshotRequest deleteSnapshotRequest = deleteSnapshotRequest(request.param("repository"), request.param("snapshot"));
+ deleteSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteSnapshotRequest.masterNodeTimeout()));
+ client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new AcknowledgedRestResponseActionListener<DeleteSnapshotResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/get/RestGetSnapshotsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/get/RestGetSnapshotsAction.java
new file mode 100644
index 0000000..5f7374b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/get/RestGetSnapshotsAction.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.snapshots.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.getSnapshotsRequest;
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ * Returns information about snapshot
+ */
+public class RestGetSnapshotsAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetSnapshotsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_snapshot/{repository}/{snapshot}", this);
+ }
+
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String repository = request.param("repository");
+ String[] snapshots = request.paramAsStringArray("snapshot", Strings.EMPTY_ARRAY);
+ if (snapshots.length == 1 && "_all".equalsIgnoreCase(snapshots[0])) {
+ snapshots = Strings.EMPTY_ARRAY;
+ }
+ GetSnapshotsRequest getSnapshotsRequest = getSnapshotsRequest(repository).snapshots(snapshots);
+ getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout()));
+ client.admin().cluster().getSnapshots(getSnapshotsRequest, new ActionListener<GetSnapshotsResponse>() {
+ @Override
+ public void onResponse(GetSnapshotsResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (IOException e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/restore/RestRestoreSnapshotAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/restore/RestRestoreSnapshotAction.java
new file mode 100644
index 0000000..e6a7366
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/restore/RestRestoreSnapshotAction.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.snapshots.restore;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.restoreSnapshotRequest;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ * Restores a snapshot
+ */
+public class RestRestoreSnapshotAction extends BaseRestHandler {
+
+ @Inject
+ public RestRestoreSnapshotAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_snapshot/{repository}/{snapshot}/_restore", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ RestoreSnapshotRequest restoreSnapshotRequest = restoreSnapshotRequest(request.param("repository"), request.param("snapshot"));
+ restoreSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", restoreSnapshotRequest.masterNodeTimeout()));
+ restoreSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false));
+ restoreSnapshotRequest.source(request.content().toUtf8());
+ client.admin().cluster().restoreSnapshot(restoreSnapshotRequest, new ActionListener<RestoreSnapshotResponse>() {
+ @Override
+ public void onResponse(RestoreSnapshotResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (IOException e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java
new file mode 100644
index 0000000..db89585
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.state;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+import java.util.Set;
+
+
+/**
+ *
+ */
+public class RestClusterStateAction extends BaseRestHandler {
+
+ private final SettingsFilter settingsFilter;
+
+ @Inject
+ public RestClusterStateAction(Settings settings, Client client, RestController controller,
+ SettingsFilter settingsFilter) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/state", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/state/{metric}", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/state/{metric}/{indices}", this);
+
+ this.settingsFilter = settingsFilter;
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest();
+ clusterStateRequest.listenerThreaded(false);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
+
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("indices", "_all"));
+ boolean isAllIndicesOnly = indices.length == 1 && "_all".equals(indices[0]);
+ if (!isAllIndicesOnly) {
+ clusterStateRequest.indices(indices);
+ }
+
+ Set<String> metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all"));
+ boolean isAllMetricsOnly = metrics.size() == 1 && metrics.contains("_all");
+ if (!isAllMetricsOnly) {
+ clusterStateRequest.nodes(metrics.contains("nodes") || metrics.contains("master_node"));
+ clusterStateRequest.routingTable(metrics.contains("routing_table"));
+ clusterStateRequest.metaData(metrics.contains("metadata"));
+ clusterStateRequest.blocks(metrics.contains("blocks"));
+ clusterStateRequest.indexTemplates(request.paramAsStringArray("index_templates", Strings.EMPTY_ARRAY));
+ }
+
+ client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
+ @Override
+ public void onResponse(ClusterStateResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ builder.field(Fields.CLUSTER_NAME, response.getClusterName().value());
+ response.getState().settingsFilter(settingsFilter).toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to handle cluster state", e);
+ }
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ static final class Fields {
+ static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java
new file mode 100644
index 0000000..2363070
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.stats;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+
+/**
+ *
+ */
+public class RestClusterStatsAction extends BaseRestHandler {
+
+ @Inject
+ public RestClusterStatsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/stats", this);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/stats/nodes/{nodeId}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null));
+ clusterStatsRequest.listenerThreaded(false);
+ client.admin().cluster().clusterStats(clusterStatsRequest, new ActionListener<ClusterStatsResponse>() {
+ @Override
+ public void onResponse(ClusterStatsResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/tasks/RestPendingClusterTasksAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/tasks/RestPendingClusterTasksAction.java
new file mode 100644
index 0000000..cfa010a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/tasks/RestPendingClusterTasksAction.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.tasks;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+/**
+ */
+public class RestPendingClusterTasksAction extends BaseRestHandler {
+
+ @Inject
+ public RestPendingClusterTasksAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/pending_tasks", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest();
+ pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout()));
+ pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local()));
+ client.admin().cluster().pendingClusterTasks(pendingClusterTasksRequest, new ActionListener<PendingClusterTasksResponse>() {
+
+ @Override
+ public void onResponse(PendingClusterTasksResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to get pending cluster tasks", e);
+ }
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java
new file mode 100644
index 0000000..0ed1a67
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.alias;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+
+/**
+ *
+ */
+public class RestIndicesAliasesAction extends BaseRestHandler {
+
+ @Inject
+ public RestIndicesAliasesAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_aliases", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
+ indicesAliasesRequest.listenerThreaded(false);
+ indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout()));
+ XContentParser parser = null;
+ try {
+ // {
+ // actions : [
+ // { add : { index : "test1", alias : "alias1", filter : {"user" : "kimchy"} } }
+ // { remove : { index : "test1", alias : "alias1" } }
+ // ]
+ // }
+ indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout()));
+ parser = XContentFactory.xContent(request.content()).createParser(request.content());
+ XContentParser.Token token = parser.nextToken();
+ if (token == null) {
+ throw new ElasticsearchIllegalArgumentException("No action is specified");
+ }
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.START_ARRAY) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String action = parser.currentName();
+ AliasAction.Type type;
+ if ("add".equals(action)) {
+ type = AliasAction.Type.ADD;
+ } else if ("remove".equals(action)) {
+ type = AliasAction.Type.REMOVE;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Alias action [" + action + "] not supported");
+ }
+ String index = null;
+ String alias = null;
+ Map<String, Object> filter = null;
+ String routing = null;
+ boolean routingSet = false;
+ String indexRouting = null;
+ boolean indexRoutingSet = false;
+ String searchRouting = null;
+ boolean searchRoutingSet = false;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ index = parser.text();
+ } else if ("alias".equals(currentFieldName)) {
+ alias = parser.text();
+ } else if ("routing".equals(currentFieldName)) {
+ routing = parser.textOrNull();
+ routingSet = true;
+ } else if ("indexRouting".equals(currentFieldName) || "index-routing".equals(currentFieldName) || "index_routing".equals(currentFieldName)) {
+ indexRouting = parser.textOrNull();
+ indexRoutingSet = true;
+ } else if ("searchRouting".equals(currentFieldName) || "search-routing".equals(currentFieldName) || "search_routing".equals(currentFieldName)) {
+ searchRouting = parser.textOrNull();
+ searchRoutingSet = true;
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("filter".equals(currentFieldName)) {
+ filter = parser.mapOrdered();
+ }
+ }
+ }
+
+ if (type == AliasAction.Type.ADD) {
+ AliasAction aliasAction = newAddAliasAction(index, alias).filter(filter);
+ if (routingSet) {
+ aliasAction.routing(routing);
+ }
+ if (indexRoutingSet) {
+ aliasAction.indexRouting(indexRouting);
+ }
+ if (searchRoutingSet) {
+ aliasAction.searchRouting(searchRouting);
+ }
+ indicesAliasesRequest.addAliasAction(aliasAction);
+ } else if (type == AliasAction.Type.REMOVE) {
+ indicesAliasesRequest.removeAlias(index, alias);
+ }
+ }
+ }
+ }
+ }
+ } catch (Exception e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ return;
+ } finally {
+ parser.close();
+ }
+ client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestResponseActionListener<IndicesAliasesResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesMissingException.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesMissingException.java
new file mode 100644
index 0000000..dfc98b0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesMissingException.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.alias.delete;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+import java.util.Arrays;
+
+/**
+ *
+ */
+public class AliasesMissingException extends ElasticsearchException {
+
+ private final String[] names;
+
+ public AliasesMissingException(String... names) {
+ super("aliases [" + Arrays.toString(names) + "] missing");
+ this.names = names;
+ }
+
+ public String[] names() {
+ return this.names;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/RestIndexDeleteAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/RestIndexDeleteAliasesAction.java
new file mode 100644
index 0000000..8cbd9d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/RestIndexDeleteAliasesAction.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.alias.delete;
+
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+
+/**
+ */
+public class RestIndexDeleteAliasesAction extends BaseRestHandler {
+
+ @Inject
+ public RestIndexDeleteAliasesAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(DELETE, "/{index}/_alias/{name}", this);
+ controller.registerHandler(DELETE, "/{index}/_aliases/{name}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final String[] aliases = Strings.splitStringByCommaToArray(request.param("name"));
+ IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
+ indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout()));
+ indicesAliasesRequest.removeAlias(indices, aliases);
+ indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout()));
+
+ client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestResponseActionListener<IndicesAliasesResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetAliasesAction.java
new file mode 100644
index 0000000..699de1a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetAliasesAction.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.alias.get;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ */
+public class RestGetAliasesAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetAliasesAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_alias/", this);
+ controller.registerHandler(GET, "/_alias/{name}", this);
+ controller.registerHandler(GET, "/{index}/_alias/{name}", this);
+ controller.registerHandler(GET, "/{index}/_alias", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name");
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases);
+ getAliasesRequest.indices(indices);
+ getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions()));
+ getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local()));
+
+ client.admin().indices().getAliases(getAliasesRequest, new ActionListener<GetAliasesResponse>() {
+
+ @Override
+ public void onResponse(GetAliasesResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ // empty body, if indices were specified but no aliases were
+ if (indices.length > 0 && response.getAliases().isEmpty()) {
+ channel.sendResponse(new XContentRestResponse(request, OK, RestXContentBuilder.emptyBuilder(request)));
+ return;
+ } else if (response.getAliases().isEmpty()) {
+ String message = String.format(Locale.ROOT, "alias [%s] missing", toNamesString(getAliasesRequest.aliases()));
+ builder.startObject()
+ .field("error", message)
+ .field("status", RestStatus.NOT_FOUND.getStatus())
+ .endObject();
+ channel.sendResponse(new XContentRestResponse(request, RestStatus.NOT_FOUND, builder));
+ return;
+ }
+
+ builder.startObject();
+ for (ObjectObjectCursor<String, List<AliasMetaData>> entry : response.getAliases()) {
+ builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE);
+ builder.startObject(Fields.ALIASES);
+ for (AliasMetaData alias : entry.value) {
+ AliasMetaData.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS);
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ private static String toNamesString(String... names) {
+ if (names == null || names.length == 0) {
+ return "";
+ } else if (names.length == 1) {
+ return names[0];
+ } else {
+ StringBuilder builder = new StringBuilder(names[0]);
+ for (int i = 1; i < names.length; i++) {
+ builder.append(',').append(names[i]);
+ }
+ return builder.toString();
+ }
+ }
+
+ static class Fields {
+
+ static final XContentBuilderString ALIASES = new XContentBuilderString("aliases");
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java
new file mode 100644
index 0000000..39989cd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.alias.get;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.Strings.isAllOrWildcard;
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ */
+@Deprecated
+public class RestGetIndicesAliasesAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetIndicesAliasesAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_aliases", this);
+ controller.registerHandler(GET, "/{index}/_aliases", this);
+ controller.registerHandler(GET, "/{index}/_aliases/{name}", this);
+ controller.registerHandler(GET, "/_aliases/{name}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final String[] aliases = Strings.splitStringByCommaToArray(request.param("name"));
+
+ ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest()
+ .routingTable(false)
+ .nodes(false)
+ .indices(indices);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.listenerThreaded(false);
+
+ client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
+ @Override
+ public void onResponse(ClusterStateResponse response) {
+ try {
+ MetaData metaData = response.getState().metaData();
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+
+ final boolean isAllAliasesRequested = isAllOrWildcard(aliases);
+ for (IndexMetaData indexMetaData : metaData) {
+ builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.startObject("aliases");
+
+ for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
+ if (isAllAliasesRequested || Regex.simpleMatch(aliases, cursor.value.alias())) {
+ AliasMetaData.Builder.toXContent(cursor.value, builder, ToXContent.EMPTY_PARAMS);
+ }
+ }
+
+ builder.endObject();
+ builder.endObject();
+ }
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/head/RestAliasesExistAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/head/RestAliasesExistAction.java
new file mode 100644
index 0000000..5b9da08
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/head/RestAliasesExistAction.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.alias.head;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.HEAD;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ */
+public class RestAliasesExistAction extends BaseRestHandler {
+
+ @Inject
+ public RestAliasesExistAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(HEAD, "/_alias/{name}", this);
+ controller.registerHandler(HEAD, "/{index}/_alias/{name}", this);
+ controller.registerHandler(HEAD, "/{index}/_alias", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String[] aliases = request.paramAsStringArray("name", Strings.EMPTY_ARRAY);
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases);
+ getAliasesRequest.indices(indices);
+ getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions()));
+ getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local()));
+
+ client.admin().indices().aliasesExist(getAliasesRequest, new ActionListener<AliasesExistResponse>() {
+
+ @Override
+ public void onResponse(AliasesExistResponse response) {
+ try {
+ if (response.isExists()) {
+ channel.sendResponse(new StringRestResponse(OK));
+ } else {
+ channel.sendResponse(new StringRestResponse(NOT_FOUND));
+ }
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new StringRestResponse(ExceptionsHelper.status(e)));
+ } catch (Exception e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java
new file mode 100644
index 0000000..c2e3b5d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.alias.put;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestRequest.Method.PUT;
+
+/**
+ */
+public class RestIndexPutAliasAction extends BaseRestHandler {
+
+ @Inject
+ public RestIndexPutAliasAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(PUT, "/{index}/_alias/{name}", this);
+ controller.registerHandler(PUT, "/_alias/{name}", this);
+ controller.registerHandler(PUT, "/{index}/_aliases/{name}", this);
+ controller.registerHandler(PUT, "/_aliases/{name}", this);
+ controller.registerHandler(PUT, "/{index}/_alias", this);
+ controller.registerHandler(PUT, "/_alias", this);
+
+ controller.registerHandler(POST, "/{index}/_alias/{name}", this);
+ controller.registerHandler(POST, "/_alias/{name}", this);
+ controller.registerHandler(POST, "/{index}/_aliases/{name}", this);
+ controller.registerHandler(POST, "/_aliases/{name}", this);
+ controller.registerHandler(PUT, "/{index}/_aliases", this);
+ //we cannot add POST for "/_aliases" because this is the _aliases api already defined in RestIndicesAliasesAction
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ String alias = request.param("name");
+ Map<String, Object> filter = null;
+ String routing = null;
+ String indexRouting = null;
+ String searchRouting = null;
+
+ if (request.hasContent()) {
+ XContentParser parser = null;
+ try {
+ parser = XContentFactory.xContent(request.content()).createParser(request.content());
+ XContentParser.Token token = parser.nextToken();
+ if (token == null) {
+ throw new ElasticsearchIllegalArgumentException("No index alias is specified");
+ }
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != null) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ indices = Strings.splitStringByCommaToArray(parser.text());
+ } else if ("alias".equals(currentFieldName)) {
+ alias = parser.text();
+ } else if ("routing".equals(currentFieldName)) {
+ routing = parser.textOrNull();
+ } else if ("indexRouting".equals(currentFieldName) || "index-routing".equals(currentFieldName) || "index_routing".equals(currentFieldName)) {
+ indexRouting = parser.textOrNull();
+ } else if ("searchRouting".equals(currentFieldName) || "search-routing".equals(currentFieldName) || "search_routing".equals(currentFieldName)) {
+ searchRouting = parser.textOrNull();
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("filter".equals(currentFieldName)) {
+ filter = parser.mapOrdered();
+ }
+ }
+ }
+ } catch (Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ return;
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
+ indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout()));
+ String[] aliases = new String[] {alias};
+ IndicesAliasesRequest.AliasActions aliasAction = new AliasActions(AliasAction.Type.ADD, indices, aliases);
+ indicesAliasesRequest.addAliasAction(aliasAction);
+ indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout()));
+
+
+ if (routing != null) {
+ aliasAction.routing(routing);
+ }
+ if (searchRouting != null) {
+ aliasAction.searchRouting(searchRouting);
+ }
+ if (indexRouting != null) {
+ aliasAction.indexRouting(indexRouting);
+ }
+ if (filter != null) {
+ aliasAction.filter(filter);
+ }
+ client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestResponseActionListener<IndicesAliasesResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java
new file mode 100644
index 0000000..e7556ad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.analyze;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ *
+ */
+public class RestAnalyzeAction extends BaseRestHandler {
+
+ @Inject
+ public RestAnalyzeAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_analyze", this);
+ controller.registerHandler(GET, "/{index}/_analyze", this);
+ controller.registerHandler(POST, "/_analyze", this);
+ controller.registerHandler(POST, "/{index}/_analyze", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String text = request.param("text");
+ if (text == null && request.hasContent()) {
+ text = request.content().toUtf8();
+ }
+ if (text == null) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, new ElasticsearchIllegalArgumentException("text is missing")));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ return;
+ }
+
+ AnalyzeRequest analyzeRequest = new AnalyzeRequest(request.param("index"), text);
+ analyzeRequest.listenerThreaded(false);
+ analyzeRequest.preferLocal(request.paramAsBoolean("prefer_local", analyzeRequest.preferLocalShard()));
+ analyzeRequest.analyzer(request.param("analyzer"));
+ analyzeRequest.field(request.param("field"));
+ analyzeRequest.tokenizer(request.param("tokenizer"));
+ analyzeRequest.tokenFilters(request.paramAsStringArray("token_filters", request.paramAsStringArray("filters", null)));
+ client.admin().indices().analyze(analyzeRequest, new ActionListener<AnalyzeResponse>() {
+ @Override
+ public void onResponse(AnalyzeResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request, null);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java
new file mode 100644
index 0000000..f1c6584
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.cache.clear;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ *
+ */
+public class RestClearIndicesCacheAction extends BaseRestHandler {
+
+ @Inject
+ public RestClearIndicesCacheAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_cache/clear", this);
+ controller.registerHandler(POST, "/{index}/_cache/clear", this);
+
+ controller.registerHandler(GET, "/_cache/clear", this);
+ controller.registerHandler(GET, "/{index}/_cache/clear", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ clearIndicesCacheRequest.listenerThreaded(false);
+ clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions()));
+ try {
+ if (request.hasParam("filter")) {
+ clearIndicesCacheRequest.filterCache(request.paramAsBoolean("filter", clearIndicesCacheRequest.filterCache()));
+ }
+ if (request.hasParam("filter_cache")) {
+ clearIndicesCacheRequest.filterCache(request.paramAsBoolean("filter_cache", clearIndicesCacheRequest.filterCache()));
+ }
+ if (request.hasParam("field_data")) {
+ clearIndicesCacheRequest.fieldDataCache(request.paramAsBoolean("field_data", clearIndicesCacheRequest.fieldDataCache()));
+ }
+ if (request.hasParam("fielddata")) {
+ clearIndicesCacheRequest.fieldDataCache(request.paramAsBoolean("fielddata", clearIndicesCacheRequest.fieldDataCache()));
+ }
+ if (request.hasParam("id")) {
+ clearIndicesCacheRequest.idCache(request.paramAsBoolean("id", clearIndicesCacheRequest.idCache()));
+ }
+ if (request.hasParam("id_cache")) {
+ clearIndicesCacheRequest.idCache(request.paramAsBoolean("id_cache", clearIndicesCacheRequest.idCache()));
+ }
+ if (request.hasParam("recycler")) {
+ clearIndicesCacheRequest.recycler(request.paramAsBoolean("recycler", clearIndicesCacheRequest.recycler()));
+ }
+ clearIndicesCacheRequest.fields(request.paramAsStringArray("fields", clearIndicesCacheRequest.fields()));
+ clearIndicesCacheRequest.filterKeys(request.paramAsStringArray("filter_keys", clearIndicesCacheRequest.filterKeys()));
+
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operationThreading"), BroadcastOperationThreading.THREAD_PER_SHARD);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = BroadcastOperationThreading.THREAD_PER_SHARD;
+ }
+ clearIndicesCacheRequest.operationThreading(operationThreading);
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+ client.admin().indices().clearCache(clearIndicesCacheRequest, new ActionListener<ClearIndicesCacheResponse>() {
+ @Override
+ public void onResponse(ClearIndicesCacheResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+
+ buildBroadcastShardsHeader(builder, response);
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java
new file mode 100644
index 0000000..7d814f6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.close;
+
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+/**
+ *
+ */
+public class RestCloseIndexAction extends BaseRestHandler {
+
+ @Inject
+ public RestCloseIndexAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.POST, "/_close", this);
+ controller.registerHandler(RestRequest.Method.POST, "/{index}/_close", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ closeIndexRequest.listenerThreaded(false);
+ closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout()));
+ closeIndexRequest.timeout(request.paramAsTime("timeout", closeIndexRequest.timeout()));
+ closeIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, closeIndexRequest.indicesOptions()));
+ client.admin().indices().close(closeIndexRequest, new AcknowledgedRestResponseActionListener<CloseIndexResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java
new file mode 100644
index 0000000..7d0205c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.create;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class RestCreateIndexAction extends BaseRestHandler {
+
+ @Inject
+ public RestCreateIndexAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.PUT, "/{index}", this);
+ controller.registerHandler(RestRequest.Method.POST, "/{index}", this);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ CreateIndexRequest createIndexRequest = new CreateIndexRequest(request.param("index"));
+ createIndexRequest.listenerThreaded(false);
+ if (request.hasContent()) {
+ try {
+ createIndexRequest.source(request.content());
+ } catch (Exception e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ return;
+ }
+ }
+
+ createIndexRequest.timeout(request.paramAsTime("timeout", createIndexRequest.timeout()));
+ createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout()));
+
+ client.admin().indices().create(createIndexRequest, new AcknowledgedRestResponseActionListener<CreateIndexResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java
new file mode 100644
index 0000000..b00478b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.delete;
+
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+/**
+ *
+ */
+public class RestDeleteIndexAction extends BaseRestHandler {
+
+ @Inject
+ public RestDeleteIndexAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.DELETE, "/", this);
+ controller.registerHandler(RestRequest.Method.DELETE, "/{index}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ deleteIndexRequest.listenerThreaded(false);
+ deleteIndexRequest.timeout(request.paramAsTime("timeout", deleteIndexRequest.timeout()));
+ deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout()));
+ deleteIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteIndexRequest.indicesOptions()));
+ client.admin().indices().delete(deleteIndexRequest, new AcknowledgedRestResponseActionListener<DeleteIndexResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java
new file mode 100644
index 0000000..08ed131
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.exists.indices;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.HEAD;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ *
+ */
+public class RestIndicesExistsAction extends BaseRestHandler {
+
+ private final SettingsFilter settingsFilter;
+
+ @Inject
+ public RestIndicesExistsAction(Settings settings, Client client, RestController controller,
+ SettingsFilter settingsFilter) {
+ super(settings, client);
+ controller.registerHandler(HEAD, "/{index}", this);
+
+ this.settingsFilter = settingsFilter;
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ IndicesExistsRequest indicesExistsRequest = new IndicesExistsRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ indicesExistsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesExistsRequest.indicesOptions()));
+ indicesExistsRequest.local(request.paramAsBoolean("local", indicesExistsRequest.local()));
+ indicesExistsRequest.listenerThreaded(false);
+ client.admin().indices().exists(indicesExistsRequest, new ActionListener<IndicesExistsResponse>() {
+ @Override
+ public void onResponse(IndicesExistsResponse response) {
+ try {
+ if (response.isExists()) {
+ channel.sendResponse(new StringRestResponse(OK));
+ } else {
+ channel.sendResponse(new StringRestResponse(NOT_FOUND));
+ }
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new StringRestResponse(ExceptionsHelper.status(e)));
+ } catch (Exception e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java
new file mode 100644
index 0000000..7f4582f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.exists.types;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequest;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.HEAD;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ * Rest api for checking if a type exists.
+ */
+public class RestTypesExistsAction extends BaseRestHandler {
+
+ @Inject
+ public RestTypesExistsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(HEAD, "/{index}/{type}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ TypesExistsRequest typesExistsRequest = new TypesExistsRequest(
+ Strings.splitStringByCommaToArray(request.param("index")), Strings.splitStringByCommaToArray(request.param("type"))
+ );
+ typesExistsRequest.listenerThreaded(false);
+ typesExistsRequest.local(request.paramAsBoolean("local", typesExistsRequest.local()));
+ typesExistsRequest.indicesOptions(IndicesOptions.fromRequest(request, typesExistsRequest.indicesOptions()));
+ client.admin().indices().typesExists(typesExistsRequest, new ActionListener<TypesExistsResponse>() {
+ @Override
+ public void onResponse(TypesExistsResponse response) {
+ try {
+ if (response.isExists()) {
+ channel.sendResponse(new StringRestResponse(OK));
+ } else {
+ channel.sendResponse(new StringRestResponse(NOT_FOUND));
+ }
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new StringRestResponse(ExceptionsHelper.status(e)));
+ } catch (Exception e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java
new file mode 100644
index 0000000..152ffdf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.flush;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ *
+ */
+public class RestFlushAction extends BaseRestHandler {
+
+ @Inject
+ public RestFlushAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_flush", this);
+ controller.registerHandler(POST, "/{index}/_flush", this);
+
+ controller.registerHandler(GET, "/_flush", this);
+ controller.registerHandler(GET, "/{index}/_flush", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ flushRequest.listenerThreaded(false);
+ flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions()));
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operationThreading"), BroadcastOperationThreading.THREAD_PER_SHARD);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = BroadcastOperationThreading.THREAD_PER_SHARD;
+ }
+ flushRequest.operationThreading(operationThreading);
+ flushRequest.full(request.paramAsBoolean("full", flushRequest.full()));
+ flushRequest.force(request.paramAsBoolean("force", flushRequest.force()));
+ client.admin().indices().flush(flushRequest, new ActionListener<FlushResponse>() {
+ @Override
+ public void onResponse(FlushResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+
+ buildBroadcastShardsHeader(builder, response);
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/gateway/snapshot/RestGatewaySnapshotAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/gateway/snapshot/RestGatewaySnapshotAction.java
new file mode 100644
index 0000000..a22888a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/gateway/snapshot/RestGatewaySnapshotAction.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.gateway.snapshot;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ *
+ */
+@Deprecated
+public class RestGatewaySnapshotAction extends BaseRestHandler {
+
+ @Inject
+ public RestGatewaySnapshotAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_gateway/snapshot", this);
+ controller.registerHandler(POST, "/{index}/_gateway/snapshot", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ GatewaySnapshotRequest gatewaySnapshotRequest = new GatewaySnapshotRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ gatewaySnapshotRequest.listenerThreaded(false);
+ gatewaySnapshotRequest.indicesOptions(IndicesOptions.fromRequest(request, gatewaySnapshotRequest.indicesOptions()));
+ client.admin().indices().gatewaySnapshot(gatewaySnapshotRequest, new ActionListener<GatewaySnapshotResponse>() {
+ @Override
+ public void onResponse(GatewaySnapshotResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+
+ buildBroadcastShardsHeader(builder, response);
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/delete/RestDeleteMappingAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/delete/RestDeleteMappingAction.java
new file mode 100644
index 0000000..d915609
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/delete/RestDeleteMappingAction.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.mapping.delete;
+
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequest;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.client.Requests.deleteMappingRequest;
+import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+
+/**
+ *
+ */
+public class RestDeleteMappingAction extends BaseRestHandler {
+
+ @Inject
+ public RestDeleteMappingAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(DELETE, "/{index}/{type}/_mapping", this);
+ controller.registerHandler(DELETE, "/{index}/{type}", this);
+ controller.registerHandler(DELETE, "/{index}/_mapping/{type}", this);
+
+ //support _mappings also
+ controller.registerHandler(DELETE, "/{index}/{type}/_mappings", this);
+ controller.registerHandler(DELETE, "/{index}/_mappings/{type}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ DeleteMappingRequest deleteMappingRequest = deleteMappingRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ deleteMappingRequest.listenerThreaded(false);
+ deleteMappingRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
+ deleteMappingRequest.timeout(request.paramAsTime("timeout", deleteMappingRequest.timeout()));
+ deleteMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteMappingRequest.masterNodeTimeout()));
+ deleteMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteMappingRequest.indicesOptions()));
+ client.admin().indices().deleteMapping(deleteMappingRequest, new AcknowledgedRestResponseActionListener<DeleteMappingResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetFieldMappingAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetFieldMappingAction.java
new file mode 100644
index 0000000..4ac48c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetFieldMappingAction.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.mapping.get;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.emptyBuilder;
+
+/**
+ *
+ */
+public class RestGetFieldMappingAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetFieldMappingAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_mapping/field/{fields}", this);
+ controller.registerHandler(GET, "/_mapping/{type}/field/{fields}", this);
+ controller.registerHandler(GET, "/{index}/_mapping/field/{fields}", this);
+ controller.registerHandler(GET, "/{index}/{type}/_mapping/field/{fields}", this);
+ controller.registerHandler(GET, "/{index}/_mapping/{type}/field/{fields}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final String[] types = request.paramAsStringArrayOrEmptyIfAll("type");
+ final String[] fields = Strings.splitStringByCommaToArray(request.param("fields"));
+ GetFieldMappingsRequest getMappingsRequest = new GetFieldMappingsRequest();
+ getMappingsRequest.indices(indices).types(types).fields(fields).includeDefaults(request.paramAsBoolean("include_defaults", false));
+ getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions()));
+ getMappingsRequest.local(request.paramAsBoolean("local", getMappingsRequest.local()));
+ client.admin().indices().getFieldMappings(getMappingsRequest, new ActionListener<GetFieldMappingsResponse>() {
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void onResponse(GetFieldMappingsResponse response) {
+ try {
+ ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappingsByIndex = response.mappings();
+
+ boolean isPossibleSingleFieldRequest = indices.length == 1 && types.length == 1 && fields.length == 1;
+ if (isPossibleSingleFieldRequest && isFieldMappingMissingField(mappingsByIndex)) {
+ channel.sendResponse(new XContentRestResponse(request, OK, emptyBuilder(request)));
+ return;
+ }
+
+ RestStatus status = OK;
+ if (mappingsByIndex.isEmpty() && fields.length > 0) {
+ status = NOT_FOUND;
+ }
+
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, status, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ /**
+ *
+ * Helper method to find out if the only included fieldmapping metadata is typed NULL, which means
+ * that type and index exist, but the field did not
+ */
+ private boolean isFieldMappingMissingField(ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappingsByIndex) throws IOException {
+ if (mappingsByIndex.size() != 1) {
+ return false;
+ }
+
+ for (ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>> value : mappingsByIndex.values()) {
+ for (ImmutableMap<String, FieldMappingMetaData> fieldValue : value.values()) {
+ for (Map.Entry<String, FieldMappingMetaData> fieldMappingMetaDataEntry : fieldValue.entrySet()) {
+ if (fieldMappingMetaDataEntry.getValue().isNull()) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java
new file mode 100644
index 0000000..446525f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.mapping.get;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.TypeMissingException;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ *
+ */
+public class RestGetMappingAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetMappingAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_mapping", this);
+ controller.registerHandler(GET, "/{index}/_mapping", this);
+ controller.registerHandler(GET, "/{index}/{type}/_mapping", this);
+ controller.registerHandler(GET, "/{index}/_mappings/{type}", this);
+ controller.registerHandler(GET, "/{index}/_mapping/{type}", this);
+ controller.registerHandler(GET, "/_mapping/{type}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final String[] types = request.paramAsStringArrayOrEmptyIfAll("type");
+ GetMappingsRequest getMappingsRequest = new GetMappingsRequest();
+ getMappingsRequest.indices(indices).types(types);
+ getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions()));
+ getMappingsRequest.local(request.paramAsBoolean("local", getMappingsRequest.local()));
+ client.admin().indices().getMappings(getMappingsRequest, new ActionListener<GetMappingsResponse>() {
+
+ @Override
+ public void onResponse(GetMappingsResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+
+ ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsByIndex = response.getMappings();
+ if (mappingsByIndex.isEmpty()) {
+ if (indices.length != 0 && types.length != 0) {
+ channel.sendResponse(new XContentRestResponse(request, OK, RestXContentBuilder.emptyBuilder(request)));
+ } else if (indices.length != 0) {
+ channel.sendResponse(new XContentThrowableRestResponse(request, new IndexMissingException(new Index(indices[0]))));
+ } else if (types.length != 0) {
+ channel.sendResponse(new XContentThrowableRestResponse(request, new TypeMissingException(new Index("_all"), types[0])));
+ } else {
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ }
+ return;
+ }
+
+ for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappingsByIndex) {
+ if (indexEntry.value.isEmpty()) {
+ continue;
+ }
+ builder.startObject(indexEntry.key, XContentBuilder.FieldCaseConversion.NONE);
+ builder.startObject(Fields.MAPPINGS);
+ for (ObjectObjectCursor<String, MappingMetaData> typeEntry : indexEntry.value) {
+ builder.field(typeEntry.key);
+ builder.map(typeEntry.value.sourceAsMap());
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ static class Fields {
+ static final XContentBuilderString MAPPINGS = new XContentBuilderString("mappings");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java
new file mode 100644
index 0000000..b6e9a5e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.mapping.put;
+
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.client.Requests.putMappingRequest;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestRequest.Method.PUT;
+
+/**
+ *
+ */
+public class RestPutMappingAction extends BaseRestHandler {
+
+
+ @Inject
+ public RestPutMappingAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(PUT, "/{index}/_mapping/", this);
+ controller.registerHandler(PUT, "/{index}/{type}/_mapping", this);
+ controller.registerHandler(PUT, "/{index}/_mapping/{type}", this);
+ controller.registerHandler(PUT, "/_mapping/{type}", this);
+
+ controller.registerHandler(POST, "/{index}/_mapping/", this);
+ controller.registerHandler(POST, "/{index}/{type}/_mapping", this);
+ controller.registerHandler(POST, "/{index}/_mapping/{type}", this);
+ controller.registerHandler(POST, "/_mapping/{type}", this);
+
+ //register the same paths, but with plural form _mappings
+ controller.registerHandler(PUT, "/{index}/_mappings/", this);
+ controller.registerHandler(PUT, "/{index}/{type}/_mappings", this);
+ controller.registerHandler(PUT, "/{index}/_mappings/{type}", this);
+ controller.registerHandler(PUT, "/_mappings/{type}", this);
+
+ controller.registerHandler(POST, "/{index}/_mappings/", this);
+ controller.registerHandler(POST, "/{index}/{type}/_mappings", this);
+ controller.registerHandler(POST, "/{index}/_mappings/{type}", this);
+ controller.registerHandler(POST, "/_mappings/{type}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ putMappingRequest.listenerThreaded(false);
+ putMappingRequest.type(request.param("type"));
+ putMappingRequest.source(request.content().toUtf8());
+ putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout()));
+ putMappingRequest.ignoreConflicts(request.paramAsBoolean("ignore_conflicts", putMappingRequest.ignoreConflicts()));
+ putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout()));
+ putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions()));
+ client.admin().indices().putMapping(putMappingRequest, new AcknowledgedRestResponseActionListener<PutMappingResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java
new file mode 100644
index 0000000..a0a08fa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.open;
+
+import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+/**
+ *
+ */
+public class RestOpenIndexAction extends BaseRestHandler {
+
+ @Inject
+ public RestOpenIndexAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.POST, "/_open", this);
+ controller.registerHandler(RestRequest.Method.POST, "/{index}/_open", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ OpenIndexRequest openIndexRequest = new OpenIndexRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ openIndexRequest.listenerThreaded(false);
+ openIndexRequest.timeout(request.paramAsTime("timeout", openIndexRequest.timeout()));
+ openIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", openIndexRequest.masterNodeTimeout()));
+ openIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, openIndexRequest.indicesOptions()));
+ client.admin().indices().open(openIndexRequest, new AcknowledgedRestResponseActionListener<OpenIndexResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java
new file mode 100644
index 0000000..d4897ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.optimize;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ *
+ */
+public class RestOptimizeAction extends BaseRestHandler {
+
+ @Inject
+ public RestOptimizeAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_optimize", this);
+ controller.registerHandler(POST, "/{index}/_optimize", this);
+
+ controller.registerHandler(GET, "/_optimize", this);
+ controller.registerHandler(GET, "/{index}/_optimize", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ OptimizeRequest optimizeRequest = new OptimizeRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ optimizeRequest.listenerThreaded(false);
+ optimizeRequest.indicesOptions(IndicesOptions.fromRequest(request, optimizeRequest.indicesOptions()));
+ try {
+ optimizeRequest.waitForMerge(request.paramAsBoolean("wait_for_merge", optimizeRequest.waitForMerge()));
+ optimizeRequest.maxNumSegments(request.paramAsInt("max_num_segments", optimizeRequest.maxNumSegments()));
+ optimizeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", optimizeRequest.onlyExpungeDeletes()));
+ optimizeRequest.flush(request.paramAsBoolean("flush", optimizeRequest.flush()));
+
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operation_threading"), BroadcastOperationThreading.THREAD_PER_SHARD);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = BroadcastOperationThreading.THREAD_PER_SHARD;
+ }
+ optimizeRequest.operationThreading(operationThreading);
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+ client.admin().indices().optimize(optimizeRequest, new ActionListener<OptimizeResponse>() {
+ @Override
+ public void onResponse(OptimizeResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+
+ buildBroadcastShardsHeader(builder, response);
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java
new file mode 100644
index 0000000..95d9abc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.refresh;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ *
+ */
+public class RestRefreshAction extends BaseRestHandler {
+
+ @Inject
+ public RestRefreshAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_refresh", this);
+ controller.registerHandler(POST, "/{index}/_refresh", this);
+
+ controller.registerHandler(GET, "/_refresh", this);
+ controller.registerHandler(GET, "/{index}/_refresh", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ refreshRequest.listenerThreaded(false);
+ refreshRequest.force(request.paramAsBoolean("force", refreshRequest.force()));
+ refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions()));
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operation_threading"), BroadcastOperationThreading.THREAD_PER_SHARD);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = BroadcastOperationThreading.THREAD_PER_SHARD;
+ }
+ refreshRequest.operationThreading(operationThreading);
+ client.admin().indices().refresh(refreshRequest, new ActionListener<RefreshResponse>() {
+ @Override
+ public void onResponse(RefreshResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+
+ buildBroadcastShardsHeader(builder, response);
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java
new file mode 100644
index 0000000..46bce19
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.segments;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ */
+public class RestIndicesSegmentsAction extends BaseRestHandler {
+
+ @Inject
+ public RestIndicesSegmentsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_segments", this);
+ controller.registerHandler(GET, "/{index}/_segments", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ IndicesSegmentsRequest indicesSegmentsRequest = new IndicesSegmentsRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ indicesSegmentsRequest.listenerThreaded(false);
+ indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions()));
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operation_threading"), BroadcastOperationThreading.THREAD_PER_SHARD);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = BroadcastOperationThreading.SINGLE_THREAD;
+ }
+ indicesSegmentsRequest.operationThreading(operationThreading);
+ client.admin().indices().segments(indicesSegmentsRequest, new ActionListener<IndicesSegmentResponse>() {
+ @Override
+ public void onResponse(IndicesSegmentResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ buildBroadcastShardsHeader(builder, response);
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestGetSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestGetSettingsAction.java
new file mode 100644
index 0000000..14eab63
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestGetSettingsAction.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.settings;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+public class RestGetSettingsAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetSettingsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_settings", this);
+ controller.registerHandler(GET, "/{index}/_settings", this);
+ controller.registerHandler(GET, "/{index}/_settings/{name}", this);
+ controller.registerHandler(GET, "/_settings/{name}", this);
+ controller.registerHandler(GET, "/{index}/_setting/{name}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final String[] names = request.paramAsStringArrayOrEmptyIfAll("name");
+ GetSettingsRequest getSettingsRequest = new GetSettingsRequest()
+ .indices(Strings.splitStringByCommaToArray(request.param("index")))
+ .indicesOptions(IndicesOptions.fromRequest(request, IndicesOptions.strict()))
+ .names(names);
+ getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local()));
+
+ client.admin().indices().getSettings(getSettingsRequest, new ActionListener<GetSettingsResponse>() {
+
+ @Override
+ public void onResponse(GetSettingsResponse getSettingsResponse) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ for (ObjectObjectCursor<String, Settings> cursor : getSettingsResponse.getIndexToSettings()) {
+ // no settings, jump over it to shorten the response data
+ if (cursor.value.getAsMap().isEmpty()) {
+ continue;
+ }
+ builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
+ builder.startObject(Fields.SETTINGS);
+ cursor.value.toXContent(builder, request);
+ builder.endObject();
+ builder.endObject();
+ }
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (IOException e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ static class Fields {
+
+ static final XContentBuilderString SETTINGS = new XContentBuilderString("settings");
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java
new file mode 100644
index 0000000..e3fcfd1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.settings;
+
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsException;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.client.Requests.updateSettingsRequest;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+
+/**
+ *
+ */
+public class RestUpdateSettingsAction extends BaseRestHandler {
+
+ @Inject
+ public RestUpdateSettingsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.PUT, "/{index}/_settings", this);
+ controller.registerHandler(RestRequest.Method.PUT, "/_settings", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ UpdateSettingsRequest updateSettingsRequest = updateSettingsRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ updateSettingsRequest.listenerThreaded(false);
+ updateSettingsRequest.timeout(request.paramAsTime("timeout", updateSettingsRequest.timeout()));
+ updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout()));
+ updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions()));
+
+ ImmutableSettings.Builder updateSettings = ImmutableSettings.settingsBuilder();
+ String bodySettingsStr = request.content().toUtf8();
+ if (Strings.hasText(bodySettingsStr)) {
+ try {
+ Settings buildSettings = ImmutableSettings.settingsBuilder().loadFromSource(bodySettingsStr).build();
+ for (Map.Entry<String, String> entry : buildSettings.getAsMap().entrySet()) {
+ String key = entry.getKey();
+ String value = entry.getValue();
+ // clean up in case the body is wrapped with "settings" : { ... }
+ if (key.startsWith("settings.")) {
+ key = key.substring("settings.".length());
+ }
+ updateSettings.put(key, value);
+ }
+ } catch (Exception e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, BAD_REQUEST, new SettingsException("Failed to parse index settings", e)));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ return;
+ }
+ }
+ for (Map.Entry<String, String> entry : request.params().entrySet()) {
+ if (entry.getKey().equals("pretty") || entry.getKey().equals("timeout") || entry.getKey().equals("master_timeout")) {
+ continue;
+ }
+ updateSettings.put(entry.getKey(), entry.getValue());
+ }
+ updateSettingsRequest.settings(updateSettings);
+
+ client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestResponseActionListener<UpdateSettingsResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java
new file mode 100644
index 0000000..6aaa1ad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.stats;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ */
+public class RestIndicesStatsAction extends BaseRestHandler {
+
+ @Inject
+ public RestIndicesStatsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_stats", this);
+ controller.registerHandler(GET, "/_stats/{metric}", this);
+ controller.registerHandler(GET, "/_stats/{metric}/{indexMetric}", this);
+ controller.registerHandler(GET, "/{index}/_stats", this);
+ controller.registerHandler(GET, "/{index}/_stats/{metric}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();
+ indicesStatsRequest.listenerThreaded(false);
+ indicesStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesStatsRequest.indicesOptions()));
+ indicesStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index")));
+ indicesStatsRequest.types(Strings.splitStringByCommaToArray(request.param("types")));
+
+ if (request.hasParam("groups")) {
+ indicesStatsRequest.groups(Strings.splitStringByCommaToArray(request.param("groups")));
+ }
+
+ if (request.hasParam("types")) {
+ indicesStatsRequest.types(Strings.splitStringByCommaToArray(request.param("types")));
+ }
+
+ Set<String> metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all"));
+ // short cut, if no metrics have been specified in URI
+ if (metrics.size() == 1 && metrics.contains("_all")) {
+ indicesStatsRequest.all();
+ } else {
+ indicesStatsRequest.clear();
+ indicesStatsRequest.docs(metrics.contains("docs"));
+ indicesStatsRequest.store(metrics.contains("store"));
+ indicesStatsRequest.indexing(metrics.contains("indexing"));
+ indicesStatsRequest.search(metrics.contains("search"));
+ indicesStatsRequest.get(metrics.contains("get"));
+ indicesStatsRequest.merge(metrics.contains("merge"));
+ indicesStatsRequest.refresh(metrics.contains("refresh"));
+ indicesStatsRequest.flush(metrics.contains("flush"));
+ indicesStatsRequest.warmer(metrics.contains("warmer"));
+ indicesStatsRequest.filterCache(metrics.contains("filter_cache"));
+ indicesStatsRequest.idCache(metrics.contains("id_cache"));
+ indicesStatsRequest.percolate(metrics.contains("percolate"));
+ indicesStatsRequest.segments(metrics.contains("segments"));
+ indicesStatsRequest.fieldData(metrics.contains("fielddata"));
+ indicesStatsRequest.completion(metrics.contains("completion"));
+ }
+
+ if (indicesStatsRequest.completion() && (request.hasParam("fields") || request.hasParam("completion_fields"))) {
+ indicesStatsRequest.completionFields(request.paramAsStringArray("completion_fields", request.paramAsStringArray("fields", Strings.EMPTY_ARRAY)));
+ }
+
+ if (indicesStatsRequest.fieldData() && (request.hasParam("fields") || request.hasParam("fielddata_fields"))) {
+ indicesStatsRequest.fieldDataFields(request.paramAsStringArray("fielddata_fields", request.paramAsStringArray("fields", Strings.EMPTY_ARRAY)));
+ }
+
+ client.admin().indices().stats(indicesStatsRequest, new ActionListener<IndicesStatsResponse>() {
+ @Override
+ public void onResponse(IndicesStatsResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ buildBroadcastShardsHeader(builder, response);
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/status/RestIndicesStatusAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/status/RestIndicesStatusAction.java
new file mode 100644
index 0000000..1730607
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/status/RestIndicesStatusAction.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.status;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ *
+ */
+public class RestIndicesStatusAction extends BaseRestHandler {
+
+ private final SettingsFilter settingsFilter;
+
+ @Inject
+ public RestIndicesStatusAction(Settings settings, Client client, RestController controller,
+ SettingsFilter settingsFilter) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_status", this);
+ controller.registerHandler(GET, "/{index}/_status", this);
+
+ this.settingsFilter = settingsFilter;
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ IndicesStatusRequest indicesStatusRequest = new IndicesStatusRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ indicesStatusRequest.listenerThreaded(false);
+ indicesStatusRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesStatusRequest.indicesOptions()));
+ indicesStatusRequest.recovery(request.paramAsBoolean("recovery", indicesStatusRequest.recovery()));
+ indicesStatusRequest.snapshot(request.paramAsBoolean("snapshot", indicesStatusRequest.snapshot()));
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operation_threading"), BroadcastOperationThreading.THREAD_PER_SHARD);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = BroadcastOperationThreading.SINGLE_THREAD;
+ }
+ indicesStatusRequest.operationThreading(operationThreading);
+ client.admin().indices().status(indicesStatusRequest, new ActionListener<IndicesStatusResponse>() {
+ @Override
+ public void onResponse(IndicesStatusResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ buildBroadcastShardsHeader(builder, response);
+ response.toXContent(builder, request, settingsFilter);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java
new file mode 100644
index 0000000..eb2b525
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.template.delete;
+
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
+import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+/**
+ *
+ */
+public class RestDeleteIndexTemplateAction extends BaseRestHandler {
+
+ @Inject
+ public RestDeleteIndexTemplateAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.DELETE, "/_template/{name}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ DeleteIndexTemplateRequest deleteIndexTemplateRequest = new DeleteIndexTemplateRequest(request.param("name"));
+ deleteIndexTemplateRequest.listenerThreaded(false);
+ deleteIndexTemplateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexTemplateRequest.masterNodeTimeout()));
+ client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new AcknowledgedRestResponseActionListener<DeleteIndexTemplateResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java
new file mode 100644
index 0000000..629e80d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.template.get;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.util.Map;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ *
+ */
+public class RestGetIndexTemplateAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetIndexTemplateAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+
+ controller.registerHandler(GET, "/_template", this);
+ controller.registerHandler(GET, "/_template/{name}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final String[] names = Strings.splitStringByCommaToArray(request.param("name"));
+
+ GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names);
+ getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local()));
+ getIndexTemplatesRequest.listenerThreaded(false);
+
+ final boolean implicitAll = getIndexTemplatesRequest.names().length == 0;
+
+ client.admin().indices().getTemplates(getIndexTemplatesRequest, new ActionListener<GetIndexTemplatesResponse>() {
+ @Override
+ public void onResponse(GetIndexTemplatesResponse getIndexTemplatesResponse) {
+ try {
+ boolean templateExists = getIndexTemplatesResponse.getIndexTemplates().size() > 0;
+
+ Map<String, String> paramsMap = Maps.newHashMap();
+ paramsMap.put("reduce_mappings", "true");
+ ToXContent.Params params = new ToXContent.DelegatingMapParams(paramsMap, request);
+
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ for (IndexTemplateMetaData indexTemplateMetaData : getIndexTemplatesResponse.getIndexTemplates()) {
+ IndexTemplateMetaData.Builder.toXContent(indexTemplateMetaData, builder, params);
+ }
+ builder.endObject();
+
+ RestStatus restStatus = (templateExists || implicitAll) ? OK : NOT_FOUND;
+
+ channel.sendResponse(new XContentRestResponse(request, restStatus, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (Exception e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/head/RestHeadIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/head/RestHeadIndexTemplateAction.java
new file mode 100644
index 0000000..0b18217
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/head/RestHeadIndexTemplateAction.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.template.head;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.HEAD;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ *
+ */
+public class RestHeadIndexTemplateAction extends BaseRestHandler {
+
+ @Inject
+ public RestHeadIndexTemplateAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+
+ controller.registerHandler(HEAD, "/_template/{name}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(request.param("name"));
+ getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local()));
+ client.admin().indices().getTemplates(getIndexTemplatesRequest, new ActionListener<GetIndexTemplatesResponse>() {
+ @Override
+ public void onResponse(GetIndexTemplatesResponse getIndexTemplatesResponse) {
+ try {
+ boolean templateExists = getIndexTemplatesResponse.getIndexTemplates().size() > 0;
+
+ if (templateExists) {
+ channel.sendResponse(new StringRestResponse(OK));
+ } else {
+ channel.sendResponse(new StringRestResponse(NOT_FOUND));
+ }
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new StringRestResponse(ExceptionsHelper.status(e)));
+ } catch (Exception e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java
new file mode 100644
index 0000000..d65fa12
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.template.put;
+
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class RestPutIndexTemplateAction extends BaseRestHandler {
+
+ @Inject
+ public RestPutIndexTemplateAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.PUT, "/_template/{name}", this);
+ controller.registerHandler(RestRequest.Method.POST, "/_template/{name}", new CreateHandler());
+ }
+
+ final class CreateHandler implements RestHandler {
+ @Override
+ public void handleRequest(RestRequest request, RestChannel channel) {
+ request.params().put("create", "true");
+ RestPutIndexTemplateAction.this.handleRequest(request, channel);
+ }
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest(request.param("name"));
+ putRequest.listenerThreaded(false);
+ putRequest.template(request.param("template", putRequest.template()));
+ putRequest.order(request.paramAsInt("order", putRequest.order()));
+ putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout()));
+
+ try {
+ putRequest.create(request.paramAsBoolean("create", false));
+ putRequest.cause(request.param("cause", ""));
+ putRequest.source(request.content());
+ } catch (Exception e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ return;
+ }
+
+ client.admin().indices().putTemplate(putRequest, new AcknowledgedRestResponseActionListener<PutIndexTemplateResponse>(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java
new file mode 100644
index 0000000..a4cf05d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.validate.query;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.validate.query.QueryExplanation;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ *
+ */
+public class RestValidateQueryAction extends BaseRestHandler {
+
+ @Inject
+ public RestValidateQueryAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_validate/query", this);
+ controller.registerHandler(POST, "/_validate/query", this);
+ controller.registerHandler(GET, "/{index}/_validate/query", this);
+ controller.registerHandler(POST, "/{index}/_validate/query", this);
+ controller.registerHandler(GET, "/{index}/{type}/_validate/query", this);
+ controller.registerHandler(POST, "/{index}/{type}/_validate/query", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ validateQueryRequest.listenerThreaded(false);
+ validateQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, validateQueryRequest.indicesOptions()));
+ try {
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operation_threading"), BroadcastOperationThreading.SINGLE_THREAD);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = BroadcastOperationThreading.SINGLE_THREAD;
+ }
+ validateQueryRequest.operationThreading(operationThreading);
+ if (request.hasContent()) {
+ validateQueryRequest.source(request.content(), request.contentUnsafe());
+ } else {
+ String source = request.param("source");
+ if (source != null) {
+ validateQueryRequest.source(source);
+ } else {
+ QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request);
+ if (querySourceBuilder != null) {
+ validateQueryRequest.source(querySourceBuilder);
+ }
+ }
+ }
+ validateQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
+ if (request.paramAsBoolean("explain", false)) {
+ validateQueryRequest.explain(true);
+ } else {
+ validateQueryRequest.explain(false);
+ }
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.admin().indices().validateQuery(validateQueryRequest, new ActionListener<ValidateQueryResponse>() {
+ @Override
+ public void onResponse(ValidateQueryResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ builder.field("valid", response.isValid());
+
+ buildBroadcastShardsHeader(builder, response);
+
+ if (response.getQueryExplanation() != null && !response.getQueryExplanation().isEmpty()) {
+ builder.startArray("explanations");
+ for (QueryExplanation explanation : response.getQueryExplanation()) {
+ builder.startObject();
+ if (explanation.getIndex() != null) {
+ builder.field("index", explanation.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
+ }
+ builder.field("valid", explanation.isValid());
+ if (explanation.getError() != null) {
+ builder.field("error", explanation.getError());
+ }
+ if (explanation.getExplanation() != null) {
+ builder.field("explanation", explanation.getExplanation());
+ }
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java
new file mode 100644
index 0000000..e54e458
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.warmer.delete;
+
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+
+/**
+ */
+public class RestDeleteWarmerAction extends BaseRestHandler {
+
+ @Inject
+ public RestDeleteWarmerAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(DELETE, "/{index}/_warmer", this);
+ controller.registerHandler(DELETE, "/{index}/_warmer/{name}", this);
+ controller.registerHandler(DELETE, "/{index}/_warmers", this);
+ controller.registerHandler(DELETE, "/{index}/_warmers/{name}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ DeleteWarmerRequest deleteWarmerRequest = new DeleteWarmerRequest(Strings.splitStringByCommaToArray(request.param("name")))
+ .indices(Strings.splitStringByCommaToArray(request.param("index")));
+ deleteWarmerRequest.listenerThreaded(false);
+ deleteWarmerRequest.timeout(request.paramAsTime("timeout", deleteWarmerRequest.timeout()));
+ deleteWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteWarmerRequest.masterNodeTimeout()));
+ deleteWarmerRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteWarmerRequest.indicesOptions()));
+ client.admin().indices().deleteWarmer(deleteWarmerRequest, new AcknowledgedRestResponseActionListener(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java
new file mode 100644
index 0000000..3634424
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.warmer.get;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ *
+ */
+public class RestGetWarmerAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetWarmerAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_warmer", this);
+ controller.registerHandler(GET, "/_warmer/{name}", this);
+ controller.registerHandler(GET, "/{index}/_warmer", this);
+ controller.registerHandler(GET, "/{index}/_warmer/{name}", this);
+ controller.registerHandler(GET, "/{index}/_warmers/{name}", this);
+ controller.registerHandler(GET, "/{index}/{type}/_warmer/{name}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final String[] types = Strings.splitStringByCommaToArray(request.param("type"));
+ final String[] names = request.paramAsStringArray("name", Strings.EMPTY_ARRAY);
+
+ GetWarmersRequest getWarmersRequest = new GetWarmersRequest();
+ getWarmersRequest.indices(indices).types(types).warmers(names);
+ getWarmersRequest.local(request.paramAsBoolean("local", getWarmersRequest.local()));
+ getWarmersRequest.indicesOptions(IndicesOptions.fromRequest(request, getWarmersRequest.indicesOptions()));
+ client.admin().indices().getWarmers(getWarmersRequest, new ActionListener<GetWarmersResponse>() {
+
+ @Override
+ public void onResponse(GetWarmersResponse response) {
+ try {
+ if (indices.length > 0 && response.warmers().isEmpty()) {
+ channel.sendResponse(new XContentRestResponse(request, OK, RestXContentBuilder.emptyBuilder(request)));
+ return;
+ }
+
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ for (ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> entry : response.warmers()) {
+ builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE);
+ builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE);
+ for (IndexWarmersMetaData.Entry warmerEntry : entry.value) {
+ IndexWarmersMetaData.FACTORY.toXContent(warmerEntry, builder, request);
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+ builder.endObject();
+
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java
new file mode 100644
index 0000000..b0bc3b1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.admin.indices.warmer.put;
+
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestRequest.Method.PUT;
+
+/**
+ */
+public class RestPutWarmerAction extends BaseRestHandler {
+
+ @Inject
+ public RestPutWarmerAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(PUT, "/_warmer/{name}", this);
+ controller.registerHandler(PUT, "/{index}/_warmer/{name}", this);
+ controller.registerHandler(PUT, "/{index}/{type}/_warmer/{name}", this);
+
+ controller.registerHandler(PUT, "/_warmers/{name}", this);
+ controller.registerHandler(PUT, "/{index}/_warmers/{name}", this);
+ controller.registerHandler(PUT, "/{index}/{type}/_warmers/{name}", this);
+
+ controller.registerHandler(POST, "/_warmer/{name}", this);
+ controller.registerHandler(POST, "/{index}/_warmer/{name}", this);
+ controller.registerHandler(POST, "/{index}/{type}/_warmer/{name}", this);
+
+ controller.registerHandler(POST, "/_warmers/{name}", this);
+ controller.registerHandler(POST, "/{index}/_warmers/{name}", this);
+ controller.registerHandler(POST, "/{index}/{type}/_warmers/{name}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ PutWarmerRequest putWarmerRequest = new PutWarmerRequest(request.param("name"));
+ putWarmerRequest.listenerThreaded(false);
+ SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index")))
+ .types(Strings.splitStringByCommaToArray(request.param("type")))
+ .source(request.content(), request.contentUnsafe());
+ searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
+ putWarmerRequest.searchRequest(searchRequest);
+ putWarmerRequest.timeout(request.paramAsTime("timeout", putWarmerRequest.timeout()));
+ putWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putWarmerRequest.masterNodeTimeout()));
+ client.admin().indices().putWarmer(putWarmerRequest, new AcknowledgedRestResponseActionListener(request, channel, logger));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java b/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java
new file mode 100644
index 0000000..011f0ca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.bulk;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.bulk.BulkShardRequest;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestRequest.Method.PUT;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ * <pre>
+ * { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" }
+ * { "type1" : { "field1" : "value1" } }
+ * { "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
+ * { "create" : { "_index" : "test", "_type" : "type1", "_id" : "1" }
+ * { "type1" : { "field1" : "value1" } }
+ * </pre>
+ */
+public class RestBulkAction extends BaseRestHandler {
+
+ private final boolean allowExplicitIndex;
+
+ @Inject
+ public RestBulkAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+
+ controller.registerHandler(POST, "/_bulk", this);
+ controller.registerHandler(PUT, "/_bulk", this);
+ controller.registerHandler(POST, "/{index}/_bulk", this);
+ controller.registerHandler(PUT, "/{index}/_bulk", this);
+ controller.registerHandler(POST, "/{index}/{type}/_bulk", this);
+ controller.registerHandler(PUT, "/{index}/{type}/_bulk", this);
+
+ this.allowExplicitIndex = settings.getAsBoolean("rest.action.multi.allow_explicit_index", true);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ BulkRequest bulkRequest = Requests.bulkRequest();
+ bulkRequest.listenerThreaded(false);
+ String defaultIndex = request.param("index");
+ String defaultType = request.param("type");
+ String defaultRouting = request.param("routing");
+
+ String replicationType = request.param("replication");
+ if (replicationType != null) {
+ bulkRequest.replicationType(ReplicationType.fromString(replicationType));
+ }
+ String consistencyLevel = request.param("consistency");
+ if (consistencyLevel != null) {
+ bulkRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel));
+ }
+ bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
+ bulkRequest.refresh(request.paramAsBoolean("refresh", bulkRequest.refresh()));
+ try {
+ bulkRequest.add(request.content(), request.contentUnsafe(), defaultIndex, defaultType, defaultRouting, null, allowExplicitIndex);
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.bulk(bulkRequest, new ActionListener<BulkResponse>() {
+ @Override
+ public void onResponse(BulkResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ builder.startObject();
+ builder.field(Fields.TOOK, response.getTookInMillis());
+ builder.field(Fields.ERRORS, response.hasFailures());
+ builder.startArray(Fields.ITEMS);
+ for (BulkItemResponse itemResponse : response) {
+ builder.startObject();
+ builder.startObject(itemResponse.getOpType());
+ builder.field(Fields._INDEX, itemResponse.getIndex());
+ builder.field(Fields._TYPE, itemResponse.getType());
+ builder.field(Fields._ID, itemResponse.getId());
+ long version = itemResponse.getVersion();
+ if (version != -1) {
+ builder.field(Fields._VERSION, itemResponse.getVersion());
+ }
+ if (itemResponse.isFailed()) {
+ builder.field(Fields.STATUS, itemResponse.getFailure().getStatus().getStatus());
+ builder.field(Fields.ERROR, itemResponse.getFailure().getMessage());
+ } else {
+ if (itemResponse.getResponse() instanceof DeleteResponse) {
+ DeleteResponse deleteResponse = itemResponse.getResponse();
+ if (deleteResponse.isFound()) {
+ builder.field(Fields.STATUS, RestStatus.OK.getStatus());
+ } else {
+ builder.field(Fields.STATUS, RestStatus.NOT_FOUND.getStatus());
+ }
+ builder.field(Fields.FOUND, deleteResponse.isFound());
+ } else if (itemResponse.getResponse() instanceof IndexResponse) {
+ IndexResponse indexResponse = itemResponse.getResponse();
+ if (indexResponse.isCreated()) {
+ builder.field(Fields.STATUS, RestStatus.CREATED.getStatus());
+ } else {
+ builder.field(Fields.STATUS, RestStatus.OK.getStatus());
+ }
+ } else if (itemResponse.getResponse() instanceof UpdateResponse) {
+ UpdateResponse updateResponse = itemResponse.getResponse();
+ if (updateResponse.isCreated()) {
+ builder.field(Fields.STATUS, RestStatus.CREATED.getStatus());
+ } else {
+ builder.field(Fields.STATUS, RestStatus.OK.getStatus());
+ }
+ }
+ }
+ builder.endObject();
+ builder.endObject();
+ }
+ builder.endArray();
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ static final class Fields {
+ static final XContentBuilderString ITEMS = new XContentBuilderString("items");
+ static final XContentBuilderString ERRORS = new XContentBuilderString("errors");
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString STATUS = new XContentBuilderString("status");
+ static final XContentBuilderString ERROR = new XContentBuilderString("error");
+ static final XContentBuilderString TOOK = new XContentBuilderString("took");
+ static final XContentBuilderString _VERSION = new XContentBuilderString("_version");
+ static final XContentBuilderString FOUND = new XContentBuilderString("found");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java b/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java
new file mode 100644
index 0000000..7d5f507
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.rest.action.support.RestTable.buildHelpWidths;
+import static org.elasticsearch.rest.action.support.RestTable.pad;
+
+/**
+ *
+ */
+public abstract class AbstractCatAction extends BaseRestHandler {
+
+ public AbstractCatAction(Settings settings, Client client) {
+ super(settings, client);
+ }
+
+ abstract void doRequest(final RestRequest request, final RestChannel channel);
+ abstract void documentation(StringBuilder sb);
+ abstract Table getTableWithHeader(final RestRequest request);
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ boolean helpWanted = request.paramAsBoolean("help", false);
+ if (helpWanted) {
+ Table table = getTableWithHeader(request);
+ int[] width = buildHelpWidths(table, request, false);
+ StringBuilder out = new StringBuilder();
+ for (Table.Cell cell : table.getHeaders()) {
+ // need to do left-align always, so create new cells
+ pad(new Table.Cell(cell.value), width[0], request, out);
+ out.append(" | ");
+ pad(new Table.Cell(cell.attr.containsKey("alias") ? cell.attr.get("alias") : ""), width[1], request, out);
+ out.append(" | ");
+ pad(new Table.Cell(cell.attr.containsKey("desc") ? cell.attr.get("desc") : "not available"), width[2], request, out);
+ out.append("\n");
+ }
+ channel.sendResponse(new StringRestResponse(RestStatus.OK, out.toString()));
+ } else {
+ doRequest(request, channel);
+ }
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java
new file mode 100644
index 0000000..c204d65
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.cat;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.XContentThrowableRestResponse;
+import org.elasticsearch.rest.action.support.RestTable;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+/**
+ *
+ */
+public class RestAliasAction extends AbstractCatAction {
+
+ @Inject
+ public RestAliasAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat/aliases", this);
+ controller.registerHandler(GET, "/_cat/aliases/{alias}", this);
+ }
+
+
+ @Override
+ void doRequest(final RestRequest request, final RestChannel channel) {
+ final GetAliasesRequest getAliasesRequest = request.hasParam("alias") ?
+ new GetAliasesRequest(request.param("alias")) :
+ new GetAliasesRequest();
+ getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local()));
+
+ client.admin().indices().getAliases(getAliasesRequest, new ActionListener<GetAliasesResponse>() {
+ @Override
+ public void onResponse(GetAliasesResponse response) {
+ try {
+ Table tab = buildTable(request, response);
+ channel.sendResponse(RestTable.buildResponse(tab, request, channel));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/aliases\n");
+ sb.append("/_cat/aliases/{alias}\n");
+ }
+
+ @Override
+ Table getTableWithHeader(RestRequest request) {
+ final Table table = new Table();
+ table.startHeaders();
+ table.addCell("alias", "alias:a;desc:alias name");
+ table.addCell("index", "alias:i,idx;desc:index alias points to");
+ table.addCell("filter", "alias:f,fi;desc:filter");
+ table.addCell("routing.index", "alias:ri,routingIndex;desc:index routing");
+ table.addCell("routing.search", "alias:rs,routingSearch;desc:search routing");
+ table.endHeaders();
+ return table;
+ }
+
+ private Table buildTable(RestRequest request, GetAliasesResponse response) {
+ Table table = getTableWithHeader(request);
+
+ for (ObjectObjectCursor<String, List<AliasMetaData>> cursor : response.getAliases()) {
+ String indexName = cursor.key;
+ for (AliasMetaData aliasMetaData : cursor.value) {
+ table.startRow();
+ table.addCell(aliasMetaData.alias());
+ table.addCell(indexName);
+ table.addCell(aliasMetaData.filteringRequired() ? "*" : "-");
+ String indexRouting = Strings.hasLength(aliasMetaData.indexRouting()) ? aliasMetaData.indexRouting() : "-";
+ table.addCell(indexRouting);
+ String searchRouting = Strings.hasLength(aliasMetaData.searchRouting()) ? aliasMetaData.searchRouting() : "-";
+ table.addCell(searchRouting);
+ table.endRow();
+ }
+ }
+
+ return table;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java
new file mode 100644
index 0000000..3f488bb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.monitor.fs.FsStats;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.XContentThrowableRestResponse;
+import org.elasticsearch.rest.action.support.RestTable;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Locale;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+
+public class RestAllocationAction extends AbstractCatAction {
+
+ @Inject
+ public RestAllocationAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat/allocation", this);
+ controller.registerHandler(GET, "/_cat/allocation/{nodes}", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/allocation\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ final String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes"));
+ final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.clear().routingTable(true);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
+
+ client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
+ @Override
+ public void onResponse(final ClusterStateResponse state) {
+ NodesStatsRequest statsRequest = new NodesStatsRequest(nodes);
+ statsRequest.clear().fs(true);
+
+ client.admin().cluster().nodesStats(statsRequest, new ActionListener<NodesStatsResponse>() {
+ @Override
+ public void onResponse(NodesStatsResponse stats) {
+ try {
+ Table tab = buildTable(request, state, stats);
+ channel.sendResponse(RestTable.buildResponse(tab, request, channel));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+
+ }
+
+ @Override
+ Table getTableWithHeader(final RestRequest request) {
+ final Table table = new Table();
+ table.startHeaders();
+ table.addCell("shards", "alias:s;text-align:right;desc:number of shards on node");
+ table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just ES)");
+ table.addCell("disk.avail", "alias:da,diskAvail;text-align:right;desc:disk available");
+ table.addCell("disk.total", "alias:dt,diskTotal;text-align:right;desc:total capacity of all volumes");
+ table.addCell("disk.percent", "alias:dp,diskPercent;text-align:right;desc:percent disk used");
+ table.addCell("host", "alias:h;desc:host of node");
+ table.addCell("ip", "desc:ip of node");
+ table.addCell("node", "alias:n;desc:name of node");
+ table.endHeaders();
+ return table;
+ }
+
+ private Table buildTable(RestRequest request, final ClusterStateResponse state, final NodesStatsResponse stats) {
+ final ObjectIntOpenHashMap<String> allocs = new ObjectIntOpenHashMap<String>();
+
+ for (ShardRouting shard : state.getState().routingTable().allShards()) {
+ String nodeId = "UNASSIGNED";
+
+ if (shard.assignedToNode()) {
+ nodeId = shard.currentNodeId();
+ }
+
+ allocs.addTo(nodeId, 1);
+ }
+
+ Table table = getTableWithHeader(request);
+
+ for (NodeStats nodeStats : stats.getNodes()) {
+ DiscoveryNode node = nodeStats.getNode();
+
+ int shardCount = 0;
+ if (allocs.containsKey(node.id())) {
+ shardCount = allocs.lget();
+ }
+
+ long used = nodeStats.getFs().getTotal().getTotal().bytes() - nodeStats.getFs().getTotal().getAvailable().bytes();
+ long avail = nodeStats.getFs().getTotal().getAvailable().bytes();
+
+ short diskPercent = -1;
+ if (used >= 0 && avail >= 0) {
+ diskPercent = (short) (used * 100 / (used + avail));
+ }
+
+ table.startRow();
+ table.addCell(shardCount);
+ table.addCell(used < 0 ? null : new ByteSizeValue(used));
+ table.addCell(avail < 0 ? null : new ByteSizeValue(avail));
+ table.addCell(nodeStats.getFs().getTotal().getTotal());
+ table.addCell(diskPercent < 0 ? null : diskPercent);
+ table.addCell(node == null ? null : node.getHostName());
+ table.addCell(node == null ? null : node.getHostAddress());
+ table.addCell(node == null ? "UNASSIGNED" : node.name());
+ table.endRow();
+ }
+
+ if (allocs.containsKey("UNASSIGNED")) {
+ table.startRow();
+ table.addCell(allocs.lget());
+ table.addCell(null);
+ table.addCell(null);
+ table.addCell(null);
+ table.addCell(null);
+ table.addCell(null);
+ table.addCell(null);
+ table.addCell("UNASSIGNED");
+ table.endRow();
+ }
+
+ return table;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java
new file mode 100644
index 0000000..765b6fb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestCatAction extends BaseRestHandler {
+
+ private static final String CAT = "=^.^=";
+ private static final String CAT_NL = CAT + "\n";
+ private final String HELP;
+
+ @Inject
+ public RestCatAction(Settings settings, Client client, RestController controller, Set<AbstractCatAction> catActions) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat", this);
+ StringBuilder sb = new StringBuilder();
+ sb.append(CAT_NL);
+ for (AbstractCatAction catAction : catActions) {
+ catAction.documentation(sb);
+ }
+ HELP = sb.toString();
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ try {
+ channel.sendResponse(new StringRestResponse(RestStatus.OK, HELP));
+ } catch (Throwable t) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, t));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java
new file mode 100644
index 0000000..95f1322
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.count.CountRequest;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.XContentThrowableRestResponse;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.rest.action.support.RestTable;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestCountAction extends AbstractCatAction {
+
+ @Inject
+ protected RestCountAction(Settings settings, Client client, RestController restController) {
+ super(settings, client);
+ restController.registerHandler(GET, "/_cat/count", this);
+ restController.registerHandler(GET, "/_cat/count/{index}", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/count\n");
+ sb.append("/_cat/count/{index}\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ CountRequest countRequest = new CountRequest(indices);
+ countRequest.operationThreading(BroadcastOperationThreading.SINGLE_THREAD);
+
+ String source = request.param("source");
+ if (source != null) {
+ countRequest.source(source);
+ } else {
+ QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request);
+ if (querySourceBuilder != null) {
+ countRequest.source(querySourceBuilder);
+ }
+ }
+
+ client.count(countRequest, new ActionListener<CountResponse>() {
+ @Override
+ public void onResponse(CountResponse countResponse) {
+ try {
+ channel.sendResponse(RestTable.buildResponse(buildTable(request, countResponse), request, channel));
+ } catch (Throwable t) {
+ onFailure(t);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, t));
+ } catch (IOException e) {
+ logger.error("Failed to send failure response", e);
+ }
+ }
+ });
+ }
+
+ @Override
+ Table getTableWithHeader(final RestRequest request) {
+ Table table = new Table();
+ table.startHeaders();
+ table.addCell("epoch", "alias:t,time;desc:seconds since 1970-01-01 00:00:00, that the count was executed");
+ table.addCell("timestamp", "alias:ts,hms;desc:time that the count was executed");
+ table.addCell("count", "alias:dc,docs.count,docsCount;desc:the document count");
+ table.endHeaders();
+ return table;
+ }
+
+ private DateTimeFormatter dateFormat = DateTimeFormat.forPattern("HH:mm:ss");
+
+ private Table buildTable(RestRequest request, CountResponse response) {
+ Table table = getTableWithHeader(request);
+ long time = System.currentTimeMillis();
+ table.startRow();
+ table.addCell(TimeUnit.SECONDS.convert(time, TimeUnit.MILLISECONDS));
+ table.addCell(dateFormat.print(time));
+ table.addCell(response.getCount());
+ table.endRow();
+
+ return table;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java
new file mode 100644
index 0000000..2d87686
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.XContentThrowableRestResponse;
+import org.elasticsearch.rest.action.support.RestTable;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestHealthAction extends AbstractCatAction {
+
+ @Inject
+ public RestHealthAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat/health", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/health\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest();
+
+ client.admin().cluster().health(clusterHealthRequest, new ActionListener<ClusterHealthResponse>() {
+ @Override
+ public void onResponse(final ClusterHealthResponse health) {
+ try {
+ channel.sendResponse(RestTable.buildResponse(buildTable(health, request), request, channel));
+ } catch (Throwable t) {
+ onFailure(t);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ Table getTableWithHeader(final RestRequest request) {
+ Table t = new Table();
+ t.startHeaders();
+ t.addCell("epoch", "alias:t,time;desc:seconds since 1970-01-01 00:00:00");
+ t.addCell("timestamp", "alias:ts,hms,hhmmss;desc:time in HH:MM:SS");
+ t.addCell("cluster", "alias:cl;desc:cluster name");
+ t.addCell("status", "alias:st;desc:health status");
+ t.addCell("node.total", "alias:nt,nodeTotal;text-align:right;desc:total number of nodes");
+ t.addCell("node.data", "alias:nd,nodeData;text-align:right;desc:number of nodes that can store data");
+ t.addCell("shards", "alias:t,sh,shards.total,shardsTotal;text-align:right;desc:total number of shards");
+ t.addCell("pri", "alias:p,shards.primary,shardsPrimary;text-align:right;desc:number of primary shards");
+ t.addCell("relo", "alias:r,shards.relocating,shardsRelocating;text-align:right;desc:number of relocating nodes");
+ t.addCell("init", "alias:i,shards.initializing,shardsInitializing;text-align:right;desc:number of initializing nodes");
+ t.addCell("unassign", "alias:u,shards.unassigned,shardsUnassigned;text-align:right;desc:number of unassigned shards");
+ t.endHeaders();
+
+ return t;
+ }
+
+ private DateTimeFormatter dateFormat = DateTimeFormat.forPattern("HH:mm:ss");
+
+ private Table buildTable(final ClusterHealthResponse health, final RestRequest request) {
+ long time = System.currentTimeMillis();
+ Table t = getTableWithHeader(request);
+ t.startRow();
+ t.addCell(TimeUnit.SECONDS.convert(time, TimeUnit.MILLISECONDS));
+ t.addCell(dateFormat.print(time));
+ t.addCell(health.getClusterName());
+ t.addCell(health.getStatus().name().toLowerCase(Locale.ROOT));
+ t.addCell(health.getNumberOfNodes());
+ t.addCell(health.getNumberOfDataNodes());
+ t.addCell(health.getActiveShards());
+ t.addCell(health.getActivePrimaryShards());
+ t.addCell(health.getRelocatingShards());
+ t.addCell(health.getInitializingShards());
+ t.addCell(health.getUnassignedShards());
+ t.endRow();
+ return t;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java
new file mode 100644
index 0000000..c5cb409
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java
@@ -0,0 +1,448 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestTable;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestIndicesAction extends AbstractCatAction {
+
+ @Inject
+ public RestIndicesAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat/indices", this);
+ controller.registerHandler(GET, "/_cat/indices/{index}", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/indices\n");
+ sb.append("/_cat/indices/{index}\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.clear().indices(indices).metaData(true);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
+
+ client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
+ @Override
+ public void onResponse(final ClusterStateResponse clusterStateResponse) {
+ final String[] concreteIndices = clusterStateResponse.getState().metaData().concreteIndicesIgnoreMissing(indices);
+ ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(concreteIndices);
+ clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local()));
+ client.admin().cluster().health(clusterHealthRequest, new ActionListener<ClusterHealthResponse>() {
+ @Override
+ public void onResponse(final ClusterHealthResponse clusterHealthResponse) {
+ IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();
+ indicesStatsRequest.all();
+ client.admin().indices().stats(indicesStatsRequest, new ActionListener<IndicesStatsResponse>() {
+ @Override
+ public void onResponse(IndicesStatsResponse indicesStatsResponse) {
+ try {
+ Table tab = buildTable(request, concreteIndices, clusterHealthResponse, indicesStatsResponse);
+ channel.sendResponse(RestTable.buildResponse(tab, request, channel));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ Table getTableWithHeader(final RestRequest request) {
+ Table table = new Table();
+ table.startHeaders();
+ table.addCell("health", "alias:h;desc:current health status");
+ table.addCell("index", "alias:i,idx;desc:index name");
+ table.addCell("pri", "alias:p,shards.primary,shardsPrimary;text-align:right;desc:number of primary shards");
+ table.addCell("rep", "alias:r,shards.replica,shardsReplica;text-align:right;desc:number of replica shards");
+ table.addCell("docs.count", "alias:dc,docsCount;text-align:right;desc:available docs");
+ table.addCell("docs.deleted", "alias:dd,docsDeleted;text-align:right;desc:deleted docs");
+
+ table.addCell("store.size", "sibling:pri;alias:ss,storeSize;text-align:right;desc:store size of primaries & replicas");
+ table.addCell("pri.store.size", "text-align:right;desc:store size of primaries");
+
+ table.addCell("completion.size", "sibling:pri;alias:cs,completionSize;default:false;text-align:right;desc:size of completion");
+ table.addCell("pri.completion.size", "default:false;text-align:right;desc:size of completion");
+
+ table.addCell("fielddata.memory_size", "sibling:pri;alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache");
+ table.addCell("pri.fielddata.memory_size", "default:false;text-align:right;desc:used fielddata cache");
+
+ table.addCell("fielddata.evictions", "sibling:pri;alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions");
+ table.addCell("pri.fielddata.evictions", "default:false;text-align:right;desc:fielddata evictions");
+
+ table.addCell("filter_cache.memory_size", "sibling:pri;alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache");
+ table.addCell("pri.filter_cache.memory_size", "default:false;text-align:right;desc:used filter cache");
+
+ table.addCell("filter_cache.evictions", "sibling:pri;alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions");
+ table.addCell("pri.filter_cache.evictions", "default:false;text-align:right;desc:filter cache evictions");
+
+ table.addCell("flush.total", "sibling:pri;alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
+ table.addCell("pri.flush.total", "default:false;text-align:right;desc:number of flushes");
+
+ table.addCell("flush.total_time", "sibling:pri;alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
+ table.addCell("pri.flush.total_time", "default:false;text-align:right;desc:time spent in flush");
+
+ table.addCell("get.current", "sibling:pri;alias:gc,getCurrent;default:false;text-align:right;desc:number of current get ops");
+ table.addCell("pri.get.current", "default:false;text-align:right;desc:number of current get ops");
+
+ table.addCell("get.time", "sibling:pri;alias:gti,getTime;default:false;text-align:right;desc:time spent in get");
+ table.addCell("pri.get.time", "default:false;text-align:right;desc:time spent in get");
+
+ table.addCell("get.total", "sibling:pri;alias:gto,getTotal;default:false;text-align:right;desc:number of get ops");
+ table.addCell("pri.get.total", "default:false;text-align:right;desc:number of get ops");
+
+ table.addCell("get.exists_time", "sibling:pri;alias:geti,getExistsTime;default:false;text-align:right;desc:time spent in successful gets");
+ table.addCell("pri.get.exists_time", "default:false;text-align:right;desc:time spent in successful gets");
+
+ table.addCell("get.exists_total", "sibling:pri;alias:geto,getExistsTotal;default:false;text-align:right;desc:number of successful gets");
+ table.addCell("pri.get.exists_total", "default:false;text-align:right;desc:number of successful gets");
+
+ table.addCell("get.missing_time", "sibling:pri;alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets");
+ table.addCell("pri.get.missing_time", "default:false;text-align:right;desc:time spent in failed gets");
+
+ table.addCell("get.missing_total", "sibling:pri;alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets");
+ table.addCell("pri.get.missing_total", "default:false;text-align:right;desc:number of failed gets");
+
+ table.addCell("id_cache.memory_size", "sibling:pri;alias:im,idCacheMemory;default:false;text-align:right;desc:used id cache");
+ table.addCell("pri.id_cache.memory_size", "default:false;text-align:right;desc:used id cache");
+
+ table.addCell("indexing.delete_current", "sibling:pri;alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
+ table.addCell("pri.indexing.delete_current", "default:false;text-align:right;desc:number of current deletions");
+
+ table.addCell("indexing.delete_time", "sibling:pri;alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions");
+ table.addCell("pri.indexing.delete_time", "default:false;text-align:right;desc:time spent in deletions");
+
+ table.addCell("indexing.delete_total", "sibling:pri;alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops");
+ table.addCell("pri.indexing.delete_total", "default:false;text-align:right;desc:number of delete ops");
+
+ table.addCell("indexing.index_current", "sibling:pri;alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops");
+ table.addCell("pri.indexing.index_current", "default:false;text-align:right;desc:number of current indexing ops");
+
+ table.addCell("indexing.index_time", "sibling:pri;alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing");
+ table.addCell("pri.indexing.index_time", "default:false;text-align:right;desc:time spent in indexing");
+
+ table.addCell("indexing.index_total", "sibling:pri;alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops");
+ table.addCell("pri.indexing.index_total", "default:false;text-align:right;desc:number of indexing ops");
+
+ table.addCell("merges.current", "sibling:pri;alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges");
+ table.addCell("pri.merges.current", "default:false;text-align:right;desc:number of current merges");
+
+ table.addCell("merges.current_docs", "sibling:pri;alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs");
+ table.addCell("pri.merges.current_docs", "default:false;text-align:right;desc:number of current merging docs");
+
+ table.addCell("merges.current_size", "sibling:pri;alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges");
+ table.addCell("pri.merges.current_size", "default:false;text-align:right;desc:size of current merges");
+
+ table.addCell("merges.total", "sibling:pri;alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops");
+ table.addCell("pri.merges.total", "default:false;text-align:right;desc:number of completed merge ops");
+
+ table.addCell("merges.total_docs", "sibling:pri;alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged");
+ table.addCell("pri.merges.total_docs", "default:false;text-align:right;desc:docs merged");
+
+ table.addCell("merges.total_size", "sibling:pri;alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged");
+ table.addCell("pri.merges.total_size", "default:false;text-align:right;desc:size merged");
+
+ table.addCell("merges.total_time", "sibling:pri;alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges");
+ table.addCell("pri.merges.total_time", "default:false;text-align:right;desc:time spent in merges");
+
+ table.addCell("percolate.current", "sibling:pri;alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations");
+ table.addCell("pri.percolate.current", "default:false;text-align:right;desc:number of current percolations");
+
+ table.addCell("percolate.memory_size", "sibling:pri;alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations");
+ table.addCell("pri.percolate.memory_size", "default:false;text-align:right;desc:memory used by percolations");
+
+ table.addCell("percolate.queries", "sibling:pri;alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries");
+ table.addCell("pri.percolate.queries", "default:false;text-align:right;desc:number of registered percolation queries");
+
+ table.addCell("percolate.time", "sibling:pri;alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating");
+ table.addCell("pri.percolate.time", "default:false;text-align:right;desc:time spent percolating");
+
+ table.addCell("percolate.total", "sibling:pri;alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations");
+ table.addCell("pri.percolate.total", "default:false;text-align:right;desc:total percolations");
+
+ table.addCell("refresh.total", "sibling:pri;alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
+ table.addCell("pri.refresh.total", "default:false;text-align:right;desc:total refreshes");
+
+ table.addCell("refresh.time", "sibling:pri;alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
+ table.addCell("pri.refresh.time", "default:false;text-align:right;desc:time spent in refreshes");
+
+ table.addCell("search.fetch_current", "sibling:pri;alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops");
+ table.addCell("pri.search.fetch_current", "default:false;text-align:right;desc:current fetch phase ops");
+
+ table.addCell("search.fetch_time", "sibling:pri;alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase");
+ table.addCell("pri.search.fetch_time", "default:false;text-align:right;desc:time spent in fetch phase");
+
+ table.addCell("search.fetch_total", "sibling:pri;alias:sfto,searchFetchTotal;default:false;text-align:right;desc:total fetch ops");
+ table.addCell("pri.search.fetch_total", "default:false;text-align:right;desc:total fetch ops");
+
+ table.addCell("search.open_contexts", "sibling:pri;alias:so,searchOpenContexts;default:false;text-align:right;desc:open search contexts");
+ table.addCell("pri.search.open_contexts", "default:false;text-align:right;desc:open search contexts");
+
+ table.addCell("search.query_current", "sibling:pri;alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops");
+ table.addCell("pri.search.query_current", "default:false;text-align:right;desc:current query phase ops");
+
+ table.addCell("search.query_time", "sibling:pri;alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase");
+ table.addCell("pri.search.query_time", "default:false;text-align:right;desc:time spent in query phase");
+
+ table.addCell("search.query_total", "sibling:pri;alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops");
+ table.addCell("pri.search.query_total", "default:false;text-align:right;desc:total query phase ops");
+
+ table.addCell("segments.count", "sibling:pri;alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments");
+ table.addCell("pri.segments.count", "default:false;text-align:right;desc:number of segments");
+
+ table.addCell("segments.memory", "sibling:pri;alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments");
+ table.addCell("pri.segments.memory", "default:false;text-align:right;desc:memory used by segments");
+
+ table.addCell("warmer.current", "sibling:pri;alias:wc,warmerCurrent;default:false;text-align:right;desc:current warmer ops");
+ table.addCell("pri.warmer.current", "default:false;text-align:right;desc:current warmer ops");
+
+ table.addCell("warmer.total", "sibling:pri;alias:wto,warmerTotal;default:false;text-align:right;desc:total warmer ops");
+ table.addCell("pri.warmer.total", "default:false;text-align:right;desc:total warmer ops");
+
+ table.addCell("warmer.total_time", "sibling:pri;alias:wtt,warmerTotalTime;default:false;text-align:right;desc:time spent in warmers");
+ table.addCell("pri.warmer.total_time", "default:false;text-align:right;desc:time spent in warmers");
+
+ table.endHeaders();
+ return table;
+ }
+
+ private Table buildTable(RestRequest request, String[] indices, ClusterHealthResponse health, IndicesStatsResponse stats) {
+ Table table = getTableWithHeader(request);
+
+ for (String index : indices) {
+ ClusterIndexHealth indexHealth = health.getIndices().get(index);
+ IndexStats indexStats = stats.getIndices().get(index);
+
+ table.startRow();
+ table.addCell(indexHealth == null ? "red*" : indexHealth.getStatus().toString().toLowerCase(Locale.getDefault()));
+ table.addCell(index);
+ table.addCell(indexHealth == null ? null : indexHealth.getNumberOfShards());
+ table.addCell(indexHealth == null ? null : indexHealth.getNumberOfReplicas());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getDocs().getCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getDocs().getDeleted());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getStore().size());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getStore().size());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getCompletion().getSize());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getCompletion().getSize());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getFieldData().getMemorySize());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFieldData().getMemorySize());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getFieldData().getEvictions());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFieldData().getEvictions());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getFilterCache().getMemorySize());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFilterCache().getMemorySize());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getFilterCache().getEvictions());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFilterCache().getEvictions());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getFlush().getTotal());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFlush().getTotal());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getFlush().getTotalTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFlush().getTotalTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getGet().current());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getGet().current());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getGet().getTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getGet().getTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getGet().getCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getGet().getCount());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getGet().getExistsTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getGet().getExistsTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getGet().getExistsCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getGet().getExistsCount());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getGet().getMissingTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getGet().getMissingTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getGet().getMissingCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getGet().getMissingCount());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getIdCache().getMemorySize());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIdCache().getMemorySize());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getIndexing().getTotal().getDeleteCurrent());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIndexing().getTotal().getDeleteCurrent());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getIndexing().getTotal().getDeleteTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIndexing().getTotal().getDeleteTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getIndexing().getTotal().getDeleteCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIndexing().getTotal().getDeleteCount());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getIndexing().getTotal().getIndexCurrent());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIndexing().getTotal().getIndexCurrent());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getIndexing().getTotal().getIndexTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIndexing().getTotal().getIndexTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getIndexing().getTotal().getIndexCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIndexing().getTotal().getIndexCount());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getCurrent());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getCurrentSize());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getCurrentNumDocs());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getCurrentNumDocs());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getCurrentSize());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getCurrent());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getTotal());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getTotal());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getTotalNumDocs());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getTotalNumDocs());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getTotalSize());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getTotalSize());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getTotalTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getTotalTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getCurrent());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getCurrent());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getMemorySize());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getMemorySize());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getNumQueries());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getNumQueries());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getCount());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getRefresh().getTotal());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRefresh().getTotal());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getRefresh().getTotalTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRefresh().getTotalTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getFetchCurrent());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getFetchCurrent());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getFetchTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getFetchTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getFetchCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getFetchCount());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getOpenContexts());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getOpenContexts());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getQueryCurrent());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getQueryCurrent());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getQueryTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getQueryTime());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getQueryCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getQueryCount());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getSegments().getCount());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSegments().getCount());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getSegments().getMemory());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSegments().getMemory());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getWarmer().current());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getWarmer().current());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getWarmer().total());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getWarmer().total());
+
+ table.addCell(indexStats == null ? null : indexStats.getTotal().getWarmer().totalTime());
+ table.addCell(indexStats == null ? null : indexStats.getPrimaries().getWarmer().totalTime());
+
+ table.endRow();
+ }
+
+ return table;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java
new file mode 100644
index 0000000..b5d5f86
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.XContentThrowableRestResponse;
+import org.elasticsearch.rest.action.support.RestTable;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestMasterAction extends AbstractCatAction {
+
+ @Inject
+ public RestMasterAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat/master", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/master\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.clear().nodes(true);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
+
+ client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
+ @Override
+ public void onResponse(final ClusterStateResponse clusterStateResponse) {
+ try {
+ channel.sendResponse(RestTable.buildResponse(buildTable(request, clusterStateResponse), request, channel));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ Table getTableWithHeader(final RestRequest request) {
+ Table table = new Table();
+ table.startHeaders()
+ .addCell("id", "desc:node id")
+ .addCell("host", "alias:h;desc:host name")
+ .addCell("ip", "desc:ip address ")
+ .addCell("node", "alias:n;desc:node name")
+ .endHeaders();
+ return table;
+ }
+
+ private Table buildTable(RestRequest request, ClusterStateResponse state) {
+ Table table = getTableWithHeader(request);
+ DiscoveryNodes nodes = state.getState().nodes();
+
+ table.startRow();
+ DiscoveryNode master = nodes.get(nodes.masterNodeId());
+ if (master == null) {
+ table.addCell("-");
+ table.addCell("-");
+ table.addCell("-");
+ table.addCell("-");
+ } else {
+ table.addCell(master.getId());
+ table.addCell(master.getHostName());
+ table.addCell(master.getHostAddress());
+ table.addCell(master.getName());
+ }
+ table.endRow();
+
+ return table;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java
new file mode 100644
index 0000000..56d8a4b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java
@@ -0,0 +1,307 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.XContentThrowableRestResponse;
+import org.elasticsearch.rest.action.support.RestTable;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestNodesAction extends AbstractCatAction {
+
+ @Inject
+ public RestNodesAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat/nodes", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/nodes\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.clear().nodes(true);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
+
+ client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
+ @Override
+ public void onResponse(final ClusterStateResponse clusterStateResponse) {
+ NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
+ nodesInfoRequest.clear().jvm(true).os(true).process(true);
+ client.admin().cluster().nodesInfo(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
+ @Override
+ public void onResponse(final NodesInfoResponse nodesInfoResponse) {
+ NodesStatsRequest nodesStatsRequest = new NodesStatsRequest();
+ nodesStatsRequest.clear().jvm(true).os(true).fs(true).indices(true);
+ client.admin().cluster().nodesStats(nodesStatsRequest, new ActionListener<NodesStatsResponse>() {
+ @Override
+ public void onResponse(NodesStatsResponse nodesStatsResponse) {
+ try {
+ channel.sendResponse(RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse), request, channel));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ Table getTableWithHeader(final RestRequest request) {
+ Table table = new Table();
+ table.startHeaders();
+ table.addCell("id", "default:false;alias:id,nodeId;desc:unique node id");
+ table.addCell("pid", "default:false;alias:p;desc:process id");
+ table.addCell("host", "alias:h;desc:host name");
+ table.addCell("ip", "alias:i;desc:ip address");
+ table.addCell("port", "default:false;alias:po;desc:bound transport port");
+
+ table.addCell("version", "default:false;alias:v;desc:es version");
+ table.addCell("build", "default:false;alias:b;desc:es build hash");
+ table.addCell("jdk", "default:false;alias:j;desc:jdk version");
+ table.addCell("disk.avail", "default:false;alias:d,disk,diskAvail;text-align:right;desc:available disk space");
+ table.addCell("heap.percent", "alias:hp,heapPercent;text-align:right;desc:used heap ratio");
+ table.addCell("heap.max", "default:false;alias:hm,heapMax;text-align:right;desc:max configured heap");
+ table.addCell("ram.percent", "alias:rp,ramPercent;text-align:right;desc:used machine memory ratio");
+ table.addCell("ram.max", "default:false;alias:rm,ramMax;text-align:right;desc:total machine memory");
+
+ table.addCell("load", "alias:l;text-align:right;desc:most recent load avg");
+ table.addCell("uptime", "default:false;alias:u;text-align:right;desc:node uptime");
+ table.addCell("node.role", "alias:r,role,dc,nodeRole;desc:d:data node, c:client node");
+ table.addCell("master", "alias:m;desc:m:master-eligible, *:current master");
+ table.addCell("name", "alias:n;desc:node name");
+
+ table.addCell("completion.size", "alias:cs,completionSize;default:false;text-align:right;desc:size of completion");
+
+ table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache");
+ table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions");
+
+ table.addCell("filter_cache.memory_size", "alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache");
+ table.addCell("filter_cache.evictions", "alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions");
+
+ table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
+ table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
+
+ table.addCell("get.current", "alias:gc,getCurrent;default:false;text-align:right;desc:number of current get ops");
+ table.addCell("get.time", "alias:gti,getTime;default:false;text-align:right;desc:time spent in get");
+ table.addCell("get.total", "alias:gto,getTotal;default:false;text-align:right;desc:number of get ops");
+ table.addCell("get.exists_time", "alias:geti,getExistsTime;default:false;text-align:right;desc:time spent in successful gets");
+ table.addCell("get.exists_total", "alias:geto,getExistsTotal;default:false;text-align:right;desc:number of successful gets");
+ table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets");
+ table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets");
+
+ table.addCell("id_cache.memory_size", "alias:im,idCacheMemory;default:false;text-align:right;desc:used id cache");
+
+ table.addCell("indexing.delete_current", "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
+ table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions");
+ table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops");
+ table.addCell("indexing.index_current", "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops");
+ table.addCell("indexing.index_time", "alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing");
+ table.addCell("indexing.index_total", "alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops");
+
+ table.addCell("merges.current", "alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges");
+ table.addCell("merges.current_docs", "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs");
+ table.addCell("merges.current_size", "alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges");
+ table.addCell("merges.total", "alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops");
+ table.addCell("merges.total_docs", "alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged");
+ table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged");
+ table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges");
+
+ table.addCell("percolate.current", "alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations");
+ table.addCell("percolate.memory_size", "alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations");
+ table.addCell("percolate.queries", "alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries");
+ table.addCell("percolate.time", "alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating");
+ table.addCell("percolate.total", "alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations");
+
+ table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
+ table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
+
+ table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops");
+ table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase");
+ table.addCell("search.fetch_total", "alias:sfto,searchFetchTotal;default:false;text-align:right;desc:total fetch ops");
+ table.addCell("search.open_contexts", "alias:so,searchOpenContexts;default:false;text-align:right;desc:open search contexts");
+ table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops");
+ table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase");
+ table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops");
+
+ table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments");
+ table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments");
+
+ table.endHeaders();
+ return table;
+ }
+
+ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) {
+ boolean fullId = req.paramAsBoolean("full_id", false);
+
+ DiscoveryNodes nodes = state.getState().nodes();
+ String masterId = nodes.masterNodeId();
+ Table table = getTableWithHeader(req);
+
+ for (DiscoveryNode node : nodes) {
+ NodeInfo info = nodesInfo.getNodesMap().get(node.id());
+ NodeStats stats = nodesStats.getNodesMap().get(node.id());
+
+ table.startRow();
+
+ table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
+ table.addCell(info == null ? null : info.getProcess().id());
+ table.addCell(node.getHostName());
+ table.addCell(node.getHostAddress());
+ if (node.address() instanceof InetSocketTransportAddress) {
+ table.addCell(((InetSocketTransportAddress) node.address()).address().getPort());
+ } else {
+ table.addCell("-");
+ }
+
+ table.addCell(info == null ? null : info.getVersion().number());
+ table.addCell(info == null ? null : info.getBuild().hashShort());
+ table.addCell(info == null ? null : info.getJvm().version());
+ table.addCell(stats == null ? null : stats.getFs() == null ? null : stats.getFs().total().getAvailable());
+ table.addCell(stats == null ? null : stats.getJvm().getMem().getHeapUsedPrecent());
+ table.addCell(info == null ? null : info.getJvm().getMem().getHeapMax());
+ table.addCell(stats == null ? null : stats.getOs().mem() == null ? null : stats.getOs().mem().usedPercent());
+ table.addCell(info == null ? null : info.getOs().mem() == null ? null : info.getOs().mem().total()); // sigar fails to load in IntelliJ
+
+ table.addCell(stats == null ? null : stats.getOs() == null ? null : stats.getOs().getLoadAverage().length < 1 ? null : String.format(Locale.ROOT, "%.2f", stats.getOs().getLoadAverage()[0]));
+ table.addCell(stats == null ? null : stats.getJvm().uptime());
+ table.addCell(node.clientNode() ? "c" : node.dataNode() ? "d" : "-");
+ table.addCell(masterId == null ? "x" : masterId.equals(node.id()) ? "*" : node.masterNode() ? "m" : "-");
+ table.addCell(node.name());
+
+ table.addCell(stats == null ? null : stats.getIndices().getCompletion().getSize());
+
+ table.addCell(stats == null ? null : stats.getIndices().getFieldData().getMemorySize());
+ table.addCell(stats == null ? null : stats.getIndices().getFieldData().getEvictions());
+
+ table.addCell(stats == null ? null : stats.getIndices().getFilterCache().getMemorySize());
+ table.addCell(stats == null ? null : stats.getIndices().getFilterCache().getEvictions());
+
+ table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotal());
+ table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotalTime());
+
+ table.addCell(stats == null ? null : stats.getIndices().getGet().current());
+ table.addCell(stats == null ? null : stats.getIndices().getGet().getTime());
+ table.addCell(stats == null ? null : stats.getIndices().getGet().getCount());
+ table.addCell(stats == null ? null : stats.getIndices().getGet().getExistsTime());
+ table.addCell(stats == null ? null : stats.getIndices().getGet().getExistsCount());
+ table.addCell(stats == null ? null : stats.getIndices().getGet().getMissingTime());
+ table.addCell(stats == null ? null : stats.getIndices().getGet().getMissingCount());
+
+ table.addCell(stats == null ? null : stats.getIndices().getIdCache().getMemorySize());
+
+ table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteCurrent());
+ table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteTime());
+ table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteCount());
+ table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexCurrent());
+ table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexTime());
+ table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexCount());
+
+ table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrent());
+ table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrentNumDocs());
+ table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrentSize());
+ table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotal());
+ table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalNumDocs());
+ table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalSize());
+ table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalTime());
+
+ table.addCell(stats == null ? null : stats.getIndices().getPercolate().getCurrent());
+ table.addCell(stats == null ? null : stats.getIndices().getPercolate().getMemorySize());
+ table.addCell(stats == null ? null : stats.getIndices().getPercolate().getNumQueries());
+ table.addCell(stats == null ? null : stats.getIndices().getPercolate().getTime());
+ table.addCell(stats == null ? null : stats.getIndices().getPercolate().getCount());
+
+ table.addCell(stats == null ? null : stats.getIndices().getRefresh().getTotal());
+ table.addCell(stats == null ? null : stats.getIndices().getRefresh().getTotalTime());
+
+ table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchCurrent());
+ table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchTime());
+ table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchCount());
+ table.addCell(stats == null ? null : stats.getIndices().getSearch().getOpenContexts());
+ table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryCurrent());
+ table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryTime());
+ table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryCount());
+
+ table.addCell(stats == null ? null : stats.getIndices().getSegments().getCount());
+ table.addCell(stats == null ? null : stats.getIndices().getSegments().getMemory());
+
+ table.endRow();
+ }
+
+ return table;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java
new file mode 100644
index 0000000..80e5f18
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestTable;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestPendingClusterTasksAction extends AbstractCatAction {
+ @Inject
+ public RestPendingClusterTasksAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat/pending_tasks", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/pending_tasks\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest();
+ pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout()));
+ pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local()));
+ client.admin().cluster().pendingClusterTasks(pendingClusterTasksRequest, new ActionListener<PendingClusterTasksResponse>() {
+ @Override
+ public void onResponse(PendingClusterTasksResponse pendingClusterTasks) {
+ try {
+ Table tab = buildTable(request, pendingClusterTasks);
+ channel.sendResponse(RestTable.buildResponse(tab, request, channel));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ Table getTableWithHeader(final RestRequest request) {
+ Table t = new Table();
+ t.startHeaders();
+ t.addCell("insertOrder", "alias:o;text-align:right;desc:task insertion order");
+ t.addCell("timeInQueue", "alias:t;text-align:right;desc:how long task has been in queue");
+ t.addCell("priority", "alias:p;desc:task priority");
+ t.addCell("source", "alias:s;desc:task source");
+ t.endHeaders();
+ return t;
+ }
+
+ private Table buildTable(RestRequest request, PendingClusterTasksResponse tasks) {
+ Table t = getTableWithHeader(request);
+
+ for (PendingClusterTask task : tasks) {
+ t.startRow();
+ t.addCell(task.getInsertOrder());
+ t.addCell(task.getTimeInQueue());
+ t.addCell(task.getPriority());
+ t.addCell(task.getSource());
+ t.endRow();
+ }
+
+ return t;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java
new file mode 100644
index 0000000..c170cbc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusRequest;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.admin.indices.status.ShardStatus;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.XContentThrowableRestResponse;
+import org.elasticsearch.rest.action.support.RestTable;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+/**
+ * RestRecoveryAction provides information about the status of replica recovery
+ * in a string format, designed to be used at the command line. An Index can
+ * be specified to limit output to a particular index or indices.
+ */
+public class RestRecoveryAction extends AbstractCatAction {
+
+ @Inject
+ protected RestRecoveryAction(Settings settings, Client client, RestController restController) {
+ super(settings, client);
+ restController.registerHandler(GET, "/_cat/recovery", this);
+ restController.registerHandler(GET, "/_cat/recovery/{index}", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/recovery\n");
+ sb.append("/_cat/recovery/{index}\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.clear().nodes(true);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
+
+ client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
+ @Override
+ public void onResponse(final ClusterStateResponse clusterStateResponse) {
+ IndicesStatusRequest indicesStatusRequest = new IndicesStatusRequest(indices);
+ indicesStatusRequest.recovery(true);
+ indicesStatusRequest.operationThreading(BroadcastOperationThreading.SINGLE_THREAD);
+
+ client.admin().indices().status(indicesStatusRequest, new ActionListener<IndicesStatusResponse>() {
+ @Override
+ public void onResponse(IndicesStatusResponse indicesStatusResponse) {
+ Map<String, Long> primarySizes = new HashMap<String, Long>();
+ Set<ShardStatus> replicas = new HashSet<ShardStatus>();
+
+ // Loop through all the shards in the index status, keeping
+ // track of the primary shard size with a Map and the
+ // recovering shards in a Set of ShardStatus objects
+ for (ShardStatus shardStatus : indicesStatusResponse.getShards()) {
+ if (shardStatus.getShardRouting().primary()) {
+ primarySizes.put(shardStatus.getShardRouting().getIndex() + shardStatus.getShardRouting().getId(),
+ shardStatus.getStoreSize().bytes());
+ } else if (shardStatus.getState() == IndexShardState.RECOVERING) {
+ replicas.add(shardStatus);
+ }
+ }
+
+ try {
+ channel.sendResponse(RestTable.buildResponse(buildRecoveryTable(request, clusterStateResponse, primarySizes, replicas), request, channel));
+ } catch (Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e2) {
+ logger.error("Unable to send recovery status response", e2);
+ }
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+
+ }
+
+ @Override
+ Table getTableWithHeader(RestRequest request) {
+ Table t = new Table();
+ t.startHeaders().addCell("index", "alias:i,idx;desc:index name")
+ .addCell("shard", "alias:s,sh;desc:shard name")
+ .addCell("target", "alias:t;text-align:right;desc:bytes of source shard")
+ .addCell("recovered", "alias:r;text-align:right;desc:bytes recovered so far")
+ .addCell("percent", "alias:per,ratio;text-align:right;desc:percent recovered so far")
+ .addCell("host", "alias:h;desc:node host where source shard lives")
+ .addCell("ip", "desc:node ip where source shard lives")
+ .addCell("node", "alias:n;desc:node name where source shard lives")
+ .endHeaders();
+ return t;
+ }
+
+ /**
+ * buildRecoveryTable will build a table of recovery information suitable
+ * for displaying at the command line.
+ *
+ * @param request
+ * @param state Current cluster state.
+ * @param primarySizes A Map of {@code index + shardId} strings to store size for all primary shards.
+ * @param recoveringReplicas A Set of {@link org.elasticsearch.action.admin.indices.status.ShardStatus} objects for each recovering replica to be displayed.
+ * @return A table containing index, shardId, node, target size, recovered size and percentage for each recovering replica
+ */
+ public Table buildRecoveryTable(RestRequest request, ClusterStateResponse state, Map<String, Long> primarySizes, Set<ShardStatus> recoveringReplicas) {
+ Table t = getTableWithHeader(request);
+ for (ShardStatus status : recoveringReplicas) {
+ DiscoveryNode node = state.getState().nodes().get(status.getShardRouting().currentNodeId());
+
+ String index = status.getShardRouting().getIndex();
+ int id = status.getShardId();
+ long replicaSize = status.getStoreSize().bytes();
+ Long primarySize = primarySizes.get(index + id);
+ t.startRow();
+ t.addCell(index);
+ t.addCell(id);
+ t.addCell(primarySize);
+ t.addCell(replicaSize);
+ t.addCell(primarySize == null ? null : String.format(Locale.ROOT, "%1.1f%%", 100.0 * (float) replicaSize / primarySize));
+ t.addCell(node == null ? null : node.getHostName());
+ t.addCell(node == null ? null : node.getHostAddress());
+ t.addCell(node == null ? null : node.name());
+ t.endRow();
+ }
+ return t;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java
new file mode 100644
index 0000000..cedabaf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestTable;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestShardsAction extends AbstractCatAction {
+
+ @Inject
+ public RestShardsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat/shards", this);
+ controller.registerHandler(GET, "/_cat/shards/{index}", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/shards\n");
+ sb.append("/_cat/shards/{index}\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
+ clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices);
+ client.admin().cluster().state(clusterStateRequest, new AbstractRestResponseActionListener<ClusterStateResponse>(request, channel, logger) {
+ @Override
+ public void onResponse(final ClusterStateResponse clusterStateResponse) {
+ IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();
+ indicesStatsRequest.all();
+ client.admin().indices().stats(indicesStatsRequest, new AbstractRestResponseActionListener<IndicesStatsResponse>(request, channel, logger) {
+ @Override
+ public void onResponse(IndicesStatsResponse indicesStatsResponse) {
+ try {
+ channel.sendResponse(RestTable.buildResponse(buildTable(request, clusterStateResponse, indicesStatsResponse), request, channel));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+ });
+ }
+ });
+ }
+
+ @Override
+ Table getTableWithHeader(final RestRequest request) {
+ Table table = new Table();
+ table.startHeaders()
+ .addCell("index", "default:true;alias:i,idx;desc:index name")
+ .addCell("shard", "default:true;alias:s,sh;desc:shard name")
+ .addCell("prirep", "alias:p,pr,primaryOrReplica;default:true;desc:primary or replica")
+ .addCell("state", "default:true;alias:st;desc:shard state")
+ .addCell("docs", "alias:d,dc;text-align:right;desc:number of docs in shard")
+ .addCell("store", "alias:sto;text-align:right;desc:store size of shard (how much disk it uses)")
+ .addCell("ip", "default:true;desc:ip of node where it lives")
+ .addCell("node", "default:true;alias:n;desc:name of node where it lives");
+
+ table.addCell("completion.size", "alias:cs,completionSize;default:false;text-align:right;desc:size of completion");
+
+ table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache");
+ table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions");
+
+ table.addCell("filter_cache.memory_size", "alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache");
+ table.addCell("filter_cache.evictions", "alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions");
+
+ table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
+ table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
+
+ table.addCell("get.current", "alias:gc,getCurrent;default:false;text-align:right;desc:number of current get ops");
+ table.addCell("get.time", "alias:gti,getTime;default:false;text-align:right;desc:time spent in get");
+ table.addCell("get.total", "alias:gto,getTotal;default:false;text-align:right;desc:number of get ops");
+ table.addCell("get.exists_time", "alias:geti,getExistsTime;default:false;text-align:right;desc:time spent in successful gets");
+ table.addCell("get.exists_total", "alias:geto,getExistsTotal;default:false;text-align:right;desc:number of successful gets");
+ table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets");
+ table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets");
+
+ table.addCell("id_cache.memory_size", "alias:im,idCacheMemory;default:false;text-align:right;desc:used id cache");
+
+ table.addCell("indexing.delete_current", "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
+ table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions");
+ table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops");
+ table.addCell("indexing.index_current", "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops");
+ table.addCell("indexing.index_time", "alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing");
+ table.addCell("indexing.index_total", "alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops");
+
+ table.addCell("merges.current", "alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges");
+ table.addCell("merges.current_docs", "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs");
+ table.addCell("merges.current_size", "alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges");
+ table.addCell("merges.total", "alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops");
+ table.addCell("merges.total_docs", "alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged");
+ table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged");
+ table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges");
+
+ table.addCell("percolate.current", "alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations");
+ table.addCell("percolate.memory_size", "alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations");
+ table.addCell("percolate.queries", "alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries");
+ table.addCell("percolate.time", "alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating");
+ table.addCell("percolate.total", "alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations");
+
+ table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
+ table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
+
+ table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops");
+ table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase");
+ table.addCell("search.fetch_total", "alias:sfto,searchFetchTotal;default:false;text-align:right;desc:total fetch ops");
+ table.addCell("search.open_contexts", "alias:so,searchOpenContexts;default:false;text-align:right;desc:open search contexts");
+ table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops");
+ table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase");
+ table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops");
+
+ table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments");
+ table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments");
+
+ table.addCell("warmer.current", "alias:wc,warmerCurrent;default:false;text-align:right;desc:current warmer ops");
+ table.addCell("warmer.total", "alias:wto,warmerTotal;default:false;text-align:right;desc:total warmer ops");
+ table.addCell("warmer.total_time", "alias:wtt,warmerTotalTime;default:false;text-align:right;desc:time spent in warmers");
+
+ table.endHeaders();
+ return table;
+ }
+
+ private Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsResponse stats) {
+ Table table = getTableWithHeader(request);
+
+ for (ShardRouting shard : state.getState().routingTable().allShards()) {
+ CommonStats shardStats = stats.asMap().get(shard);
+
+ table.startRow();
+
+ table.addCell(shard.index());
+ table.addCell(shard.id());
+ table.addCell(shard.primary() ? "p" : "r");
+ table.addCell(shard.state());
+ table.addCell(shardStats == null ? null : shardStats.getDocs().getCount());
+ table.addCell(shardStats == null ? null : shardStats.getStore().getSize());
+ if (shard.assignedToNode()) {
+ String ip = state.getState().nodes().get(shard.currentNodeId()).getHostAddress();
+ StringBuilder name = new StringBuilder();
+ name.append(state.getState().nodes().get(shard.currentNodeId()).name());
+ if (shard.relocating()) {
+ String reloIp = state.getState().nodes().get(shard.relocatingNodeId()).getHostAddress();
+ String reloNme = state.getState().nodes().get(shard.relocatingNodeId()).name();
+ name.append(" -> ");
+ name.append(reloIp);
+ name.append(" ");
+ name.append(reloNme);
+ }
+ table.addCell(ip);
+ table.addCell(name);
+ } else {
+ table.addCell(null);
+ table.addCell(null);
+ }
+
+ table.addCell(shardStats == null ? null : shardStats.getCompletion().getSize());
+
+ table.addCell(shardStats == null ? null : shardStats.getFieldData().getMemorySize());
+ table.addCell(shardStats == null ? null : shardStats.getFieldData().getEvictions());
+
+ table.addCell(shardStats == null ? null : shardStats.getFilterCache().getMemorySize());
+ table.addCell(shardStats == null ? null : shardStats.getFilterCache().getEvictions());
+
+ table.addCell(shardStats == null ? null : shardStats.getFlush().getTotal());
+ table.addCell(shardStats == null ? null : shardStats.getFlush().getTotalTime());
+
+ table.addCell(shardStats == null ? null : shardStats.getGet().current());
+ table.addCell(shardStats == null ? null : shardStats.getGet().getTime());
+ table.addCell(shardStats == null ? null : shardStats.getGet().getCount());
+ table.addCell(shardStats == null ? null : shardStats.getGet().getExistsTime());
+ table.addCell(shardStats == null ? null : shardStats.getGet().getExistsCount());
+ table.addCell(shardStats == null ? null : shardStats.getGet().getMissingTime());
+ table.addCell(shardStats == null ? null : shardStats.getGet().getMissingCount());
+
+ table.addCell(shardStats == null ? null : shardStats.getIdCache().getMemorySize());
+
+ table.addCell(shardStats == null ? null : shardStats.getIndexing().getTotal().getDeleteCurrent());
+ table.addCell(shardStats == null ? null : shardStats.getIndexing().getTotal().getDeleteTime());
+ table.addCell(shardStats == null ? null : shardStats.getIndexing().getTotal().getDeleteCount());
+ table.addCell(shardStats == null ? null : shardStats.getIndexing().getTotal().getIndexCurrent());
+ table.addCell(shardStats == null ? null : shardStats.getIndexing().getTotal().getIndexTime());
+ table.addCell(shardStats == null ? null : shardStats.getIndexing().getTotal().getIndexCount());
+
+ table.addCell(shardStats == null ? null : shardStats.getMerge().getCurrent());
+ table.addCell(shardStats == null ? null : shardStats.getMerge().getCurrentNumDocs());
+ table.addCell(shardStats == null ? null : shardStats.getMerge().getCurrentSize());
+ table.addCell(shardStats == null ? null : shardStats.getMerge().getTotal());
+ table.addCell(shardStats == null ? null : shardStats.getMerge().getTotalNumDocs());
+ table.addCell(shardStats == null ? null : shardStats.getMerge().getTotalSize());
+ table.addCell(shardStats == null ? null : shardStats.getMerge().getTotalTime());
+
+ table.addCell(shardStats == null ? null : shardStats.getPercolate().getCurrent());
+ table.addCell(shardStats == null ? null : shardStats.getPercolate().getMemorySize());
+ table.addCell(shardStats == null ? null : shardStats.getPercolate().getNumQueries());
+ table.addCell(shardStats == null ? null : shardStats.getPercolate().getTime());
+ table.addCell(shardStats == null ? null : shardStats.getPercolate().getCount());
+
+ table.addCell(shardStats == null ? null : shardStats.getRefresh().getTotal());
+ table.addCell(shardStats == null ? null : shardStats.getRefresh().getTotalTime());
+
+ table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getFetchCurrent());
+ table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getFetchTime());
+ table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getFetchCount());
+ table.addCell(shardStats == null ? null : shardStats.getSearch().getOpenContexts());
+ table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getQueryCurrent());
+ table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getQueryTime());
+ table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getQueryCount());
+
+ table.addCell(shardStats == null ? null : shardStats.getSegments().getCount());
+ table.addCell(shardStats == null ? null : shardStats.getSegments().getMemory());
+
+ table.addCell(shardStats == null ? null : shardStats.getWarmer().current());
+ table.addCell(shardStats == null ? null : shardStats.getWarmer().total());
+ table.addCell(shardStats == null ? null : shardStats.getWarmer().totalTime());
+
+ table.endRow();
+ }
+
+ return table;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java
new file mode 100644
index 0000000..9d352ea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.cat;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.rest.AbstractRestResponseActionListener;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.action.support.RestTable;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolStats;
+
+import java.util.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+
+public class RestThreadPoolAction extends AbstractCatAction {
+
+ private final static String[] SUPPORTED_NAMES = new String[] {
+ ThreadPool.Names.BULK,
+ ThreadPool.Names.FLUSH,
+ ThreadPool.Names.GENERIC,
+ ThreadPool.Names.GET,
+ ThreadPool.Names.INDEX,
+ ThreadPool.Names.MANAGEMENT,
+ ThreadPool.Names.MERGE,
+ ThreadPool.Names.OPTIMIZE,
+ ThreadPool.Names.PERCOLATE,
+ ThreadPool.Names.REFRESH,
+ ThreadPool.Names.SEARCH,
+ ThreadPool.Names.SNAPSHOT,
+ ThreadPool.Names.SUGGEST,
+ ThreadPool.Names.WARMER
+ };
+
+ private final static String[] SUPPORTED_ALIASES = new String[] {
+ "b",
+ "f",
+ "ge",
+ "g",
+ "i",
+ "ma",
+ "m",
+ "o",
+ "p",
+ "r",
+ "s",
+ "sn",
+ "su",
+ "w"
+ };
+
+ private final static String[] DEFAULT_THREAD_POOLS = new String[] {
+ ThreadPool.Names.BULK,
+ ThreadPool.Names.INDEX,
+ ThreadPool.Names.SEARCH,
+ };
+
+ private final static Map<String, String> ALIAS_TO_THREAD_POOL;
+ private final static Map<String, String> THREAD_POOL_TO_ALIAS;
+
+ static {
+ ALIAS_TO_THREAD_POOL = Maps.newHashMapWithExpectedSize(SUPPORTED_NAMES.length);
+ for (String supportedThreadPool : SUPPORTED_NAMES) {
+ ALIAS_TO_THREAD_POOL.put(supportedThreadPool.substring(0, 3), supportedThreadPool);
+ }
+ THREAD_POOL_TO_ALIAS = Maps.newHashMapWithExpectedSize(SUPPORTED_NAMES.length);
+ for (int i = 0; i < SUPPORTED_NAMES.length; i++) {
+ THREAD_POOL_TO_ALIAS.put(SUPPORTED_NAMES[i], SUPPORTED_ALIASES[i]);
+ }
+ }
+
+ @Inject
+ public RestThreadPoolAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_cat/thread_pool", this);
+ }
+
+ @Override
+ void documentation(StringBuilder sb) {
+ sb.append("/_cat/thread_pool\n");
+ }
+
+ @Override
+ public void doRequest(final RestRequest request, final RestChannel channel) {
+ final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.clear().nodes(true);
+ clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
+ clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
+ final String[] pools = fetchSortedPools(request, DEFAULT_THREAD_POOLS);
+
+ client.admin().cluster().state(clusterStateRequest, new AbstractRestResponseActionListener<ClusterStateResponse>(request, channel, logger) {
+ @Override
+ public void onResponse(final ClusterStateResponse clusterStateResponse) {
+ NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
+ nodesInfoRequest.clear().process(true);
+ client.admin().cluster().nodesInfo(nodesInfoRequest, new AbstractRestResponseActionListener<NodesInfoResponse>(request, channel, logger) {
+ @Override
+ public void onResponse(final NodesInfoResponse nodesInfoResponse) {
+ NodesStatsRequest nodesStatsRequest = new NodesStatsRequest();
+ nodesStatsRequest.clear().threadPool(true);
+ client.admin().cluster().nodesStats(nodesStatsRequest, new AbstractRestResponseActionListener<NodesStatsResponse>(request, channel, logger) {
+ @Override
+ public void onResponse(NodesStatsResponse nodesStatsResponse) {
+ try {
+ channel.sendResponse(RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse, pools), request, channel));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+ });
+ }
+ });
+ }
+ });
+ }
+
+ @Override
+ Table getTableWithHeader(final RestRequest request) {
+ Table table = new Table();
+ table.startHeaders();
+ table.addCell("id", "default:false;alias:id,nodeId;desc:unique node id");
+ table.addCell("pid", "default:false;alias:p;desc:process id");
+ table.addCell("host", "alias:h;desc:host name");
+ table.addCell("ip", "alias:i;desc:ip address");
+ table.addCell("port", "default:false;alias:po;desc:bound transport port");
+
+ final String[] requestedPools = fetchSortedPools(request, DEFAULT_THREAD_POOLS);
+ for (String pool : SUPPORTED_NAMES) {
+ String poolAlias = THREAD_POOL_TO_ALIAS.get(pool);
+ boolean display = false;
+ for (String requestedPool : requestedPools) {
+ if (pool.equals(requestedPool)) {
+ display = true;
+ break;
+ }
+ }
+
+ String defaultDisplayVal = Boolean.toString(display);
+ table.addCell(
+ pool + ".active",
+ "alias:" + poolAlias + "a;default:" + defaultDisplayVal + ";text-align:right;desc:number of active " + pool + " threads"
+ );
+ table.addCell(
+ pool + ".size",
+ "alias:" + poolAlias + "s;default:false;text-align:right;desc:number of active " + pool + " threads"
+ );
+ table.addCell(
+ pool + ".queue",
+ "alias:" + poolAlias + "q;default:" + defaultDisplayVal + ";text-align:right;desc:number of " + pool + " threads in queue"
+ );
+ table.addCell(
+ pool + ".rejected",
+ "alias:" + poolAlias + "r;default:" + defaultDisplayVal + ";text-align:right;desc:number of rejected " + pool + " threads"
+ );
+ table.addCell(
+ pool + ".largest",
+ "alias:" + poolAlias + "l;default:false;text-align:right;desc:highest number of seen active " + pool + " threads"
+ );
+ table.addCell(
+ pool + ".completed",
+ "alias:" + poolAlias + "c;default:false;text-align:right;desc:number of completed " + pool + " threads"
+ );
+ }
+
+ table.endHeaders();
+ return table;
+ }
+
+
+ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats, String[] pools) {
+ boolean fullId = req.paramAsBoolean("full_id", false);
+ DiscoveryNodes nodes = state.getState().nodes();
+ Table table = getTableWithHeader(req);
+
+ for (DiscoveryNode node : nodes) {
+ NodeInfo info = nodesInfo.getNodesMap().get(node.id());
+ NodeStats stats = nodesStats.getNodesMap().get(node.id());
+ table.startRow();
+
+ table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
+ table.addCell(info == null ? null : info.getProcess().id());
+ table.addCell(node.getHostName());
+ table.addCell(node.getHostAddress());
+ if (node.address() instanceof InetSocketTransportAddress) {
+ table.addCell(((InetSocketTransportAddress) node.address()).address().getPort());
+ } else {
+ table.addCell("-");
+ }
+
+ final Map<String, ThreadPoolStats.Stats> poolThreadStats;
+ if (stats == null) {
+ poolThreadStats = Collections.emptyMap();
+ } else {
+ poolThreadStats = new HashMap<String, ThreadPoolStats.Stats>(14);
+ ThreadPoolStats threadPoolStats = stats.getThreadPool();
+ for (ThreadPoolStats.Stats threadPoolStat : threadPoolStats) {
+ poolThreadStats.put(threadPoolStat.getName(), threadPoolStat);
+ }
+ }
+ for (String pool : SUPPORTED_NAMES) {
+ ThreadPoolStats.Stats poolStats = poolThreadStats.get(pool);
+ table.addCell(poolStats == null ? null : poolStats.getActive());
+ table.addCell(poolStats == null ? null : poolStats.getThreads());
+ table.addCell(poolStats == null ? null : poolStats.getQueue());
+ table.addCell(poolStats == null ? null : poolStats.getRejected());
+ table.addCell(poolStats == null ? null : poolStats.getLargest());
+ table.addCell(poolStats == null ? null : poolStats.getCompleted());
+ }
+
+ table.endRow();
+ }
+
+ return table;
+ }
+
+ // The thread pool columns should always be in the same order.
+ private String[] fetchSortedPools(RestRequest request, String[] defaults) {
+ String[] headers = request.paramAsStringArray("h", null);
+ if (headers == null) {
+ return defaults;
+ } else {
+ Set<String> requestedPools = new LinkedHashSet<String>(headers.length);
+ for (String header : headers) {
+ int dotIndex = header.indexOf('.');
+ if (dotIndex != -1) {
+ String headerPrefix = header.substring(0, dotIndex);
+ if (THREAD_POOL_TO_ALIAS.containsKey(headerPrefix)) {
+ requestedPools.add(headerPrefix);
+ }
+ } else if (ALIAS_TO_THREAD_POOL.containsKey(header)) {
+ requestedPools.add(ALIAS_TO_THREAD_POOL.get(header));
+ }
+
+ }
+ return requestedPools.toArray(new String[requestedPools.size()]);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java
new file mode 100644
index 0000000..da6a773
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.count;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.count.CountRequest;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.count.CountRequest.DEFAULT_MIN_SCORE;
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ *
+ */
+public class RestCountAction extends BaseRestHandler {
+
+ @Inject
+ public RestCountAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_count", this);
+ controller.registerHandler(GET, "/_count", this);
+ controller.registerHandler(POST, "/{index}/_count", this);
+ controller.registerHandler(GET, "/{index}/_count", this);
+ controller.registerHandler(POST, "/{index}/{type}/_count", this);
+ controller.registerHandler(GET, "/{index}/{type}/_count", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ CountRequest countRequest = new CountRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ countRequest.indicesOptions(IndicesOptions.fromRequest(request, countRequest.indicesOptions()));
+ countRequest.listenerThreaded(false);
+ try {
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operation_threading"), BroadcastOperationThreading.THREAD_PER_SHARD);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = BroadcastOperationThreading.SINGLE_THREAD;
+ }
+ countRequest.operationThreading(operationThreading);
+ if (request.hasContent()) {
+ countRequest.source(request.content(), request.contentUnsafe());
+ } else {
+ String source = request.param("source");
+ if (source != null) {
+ countRequest.source(source);
+ } else {
+ QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request);
+ if (querySourceBuilder != null) {
+ countRequest.source(querySourceBuilder);
+ }
+ }
+ }
+ countRequest.routing(request.param("routing"));
+ countRequest.minScore(request.paramAsFloat("min_score", DEFAULT_MIN_SCORE));
+ countRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
+ countRequest.preference(request.param("preference"));
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.count(countRequest, new ActionListener<CountResponse>() {
+ @Override
+ public void onResponse(CountResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ builder.field("count", response.getCount());
+
+ buildBroadcastShardsHeader(builder, response);
+
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, response.status(), builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java b/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java
new file mode 100644
index 0000000..01d83e0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.delete;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ *
+ */
+public class RestDeleteAction extends BaseRestHandler {
+
+ @Inject
+ public RestDeleteAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(DELETE, "/{index}/{type}/{id}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id"));
+
+ deleteRequest.listenerThreaded(false);
+ deleteRequest.operationThreaded(true);
+
+ deleteRequest.routing(request.param("routing"));
+ deleteRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
+ deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT));
+ deleteRequest.refresh(request.paramAsBoolean("refresh", deleteRequest.refresh()));
+ deleteRequest.version(RestActions.parseVersion(request));
+ deleteRequest.versionType(VersionType.fromString(request.param("version_type"), deleteRequest.versionType()));
+
+ String replicationType = request.param("replication");
+ if (replicationType != null) {
+ deleteRequest.replicationType(ReplicationType.fromString(replicationType));
+ }
+ String consistencyLevel = request.param("consistency");
+ if (consistencyLevel != null) {
+ deleteRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel));
+ }
+
+ client.delete(deleteRequest, new ActionListener<DeleteResponse>() {
+ @Override
+ public void onResponse(DeleteResponse result) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject()
+ .field(Fields.FOUND, result.isFound())
+ .field(Fields._INDEX, result.getIndex())
+ .field(Fields._TYPE, result.getType())
+ .field(Fields._ID, result.getId())
+ .field(Fields._VERSION, result.getVersion())
+ .endObject();
+ RestStatus status = OK;
+ if (!result.isFound()) {
+ status = NOT_FOUND;
+ }
+ channel.sendResponse(new XContentRestResponse(request, status, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ static final class Fields {
+ static final XContentBuilderString FOUND = new XContentBuilderString("found");
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString _VERSION = new XContentBuilderString("_version");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java b/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java
new file mode 100644
index 0000000..a38011d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.deletebyquery;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse;
+import org.elasticsearch.action.deletebyquery.ShardDeleteByQueryRequest;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+import static org.elasticsearch.rest.RestStatus.PRECONDITION_FAILED;
+
+/**
+ *
+ */
+public class RestDeleteByQueryAction extends BaseRestHandler {
+
+ @Inject
+ public RestDeleteByQueryAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(DELETE, "/{index}/_query", this);
+ controller.registerHandler(DELETE, "/{index}/{type}/_query", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ deleteByQueryRequest.listenerThreaded(false);
+ try {
+ if (request.hasContent()) {
+ deleteByQueryRequest.source(request.content(), request.contentUnsafe());
+ } else {
+ String source = request.param("source");
+ if (source != null) {
+ deleteByQueryRequest.source(source);
+ } else {
+ QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request);
+ if (querySourceBuilder != null) {
+ deleteByQueryRequest.source(querySourceBuilder);
+ }
+ }
+ }
+ deleteByQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
+ deleteByQueryRequest.timeout(request.paramAsTime("timeout", ShardDeleteByQueryRequest.DEFAULT_TIMEOUT));
+
+ deleteByQueryRequest.routing(request.param("routing"));
+ String replicationType = request.param("replication");
+ if (replicationType != null) {
+ deleteByQueryRequest.replicationType(ReplicationType.fromString(replicationType));
+ }
+ String consistencyLevel = request.param("consistency");
+ if (consistencyLevel != null) {
+ deleteByQueryRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel));
+ }
+ deleteByQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteByQueryRequest.indicesOptions()));
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, PRECONDITION_FAILED, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+ client.deleteByQuery(deleteByQueryRequest, new ActionListener<DeleteByQueryResponse>() {
+ @Override
+ public void onResponse(DeleteByQueryResponse result) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ RestStatus restStatus = result.status();
+ builder.startObject();
+ builder.startObject("_indices");
+ for (IndexDeleteByQueryResponse indexDeleteByQueryResponse : result.getIndices().values()) {
+ builder.startObject(indexDeleteByQueryResponse.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
+
+ builder.startObject("_shards");
+ builder.field("total", indexDeleteByQueryResponse.getTotalShards());
+ builder.field("successful", indexDeleteByQueryResponse.getSuccessfulShards());
+ builder.field("failed", indexDeleteByQueryResponse.getFailedShards());
+ builder.endObject();
+
+ builder.endObject();
+ }
+ builder.endObject();
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, restStatus, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java
new file mode 100644
index 0000000..b6656ff
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.explain;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.explain.ExplainRequest;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.get.GetResult;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryStringQueryBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ * Rest action for computing a score explanation for specific documents.
+ */
+public class RestExplainAction extends BaseRestHandler {
+
+ @Inject
+ public RestExplainAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/{index}/{type}/{id}/_explain", this);
+ controller.registerHandler(POST, "/{index}/{type}/{id}/_explain", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final ExplainRequest explainRequest = new ExplainRequest(request.param("index"), request.param("type"), request.param("id"));
+ explainRequest.parent(request.param("parent"));
+ explainRequest.routing(request.param("routing"));
+ explainRequest.preference(request.param("preference"));
+ String sourceString = request.param("source");
+ String queryString = request.param("q");
+ if (request.hasContent()) {
+ explainRequest.source(request.content(), request.contentUnsafe());
+ } else if (sourceString != null) {
+ explainRequest.source(new BytesArray(request.param("source")), false);
+ } else if (queryString != null) {
+ QueryStringQueryBuilder queryStringBuilder = QueryBuilders.queryString(queryString);
+ queryStringBuilder.defaultField(request.param("df"));
+ queryStringBuilder.analyzer(request.param("analyzer"));
+ queryStringBuilder.analyzeWildcard(request.paramAsBoolean("analyze_wildcard", false));
+ queryStringBuilder.lowercaseExpandedTerms(request.paramAsBoolean("lowercase_expanded_terms", true));
+ queryStringBuilder.lenient(request.paramAsBoolean("lenient", null));
+ String defaultOperator = request.param("default_operator");
+ if (defaultOperator != null) {
+ if ("OR".equals(defaultOperator)) {
+ queryStringBuilder.defaultOperator(QueryStringQueryBuilder.Operator.OR);
+ } else if ("AND".equals(defaultOperator)) {
+ queryStringBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]");
+ }
+ }
+
+ QuerySourceBuilder querySourceBuilder = new QuerySourceBuilder();
+ querySourceBuilder.setQuery(queryStringBuilder);
+ explainRequest.source(querySourceBuilder);
+ }
+
+ String sField = request.param("fields");
+ if (sField != null) {
+ String[] sFields = Strings.splitStringByCommaToArray(sField);
+ if (sFields != null) {
+ explainRequest.fields(sFields);
+ }
+ }
+
+ explainRequest.fetchSourceContext(FetchSourceContext.parseFromRestRequest(request));
+
+ client.explain(explainRequest, new ActionListener<ExplainResponse>() {
+
+ @Override
+ public void onResponse(ExplainResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ builder.startObject();
+ builder.field(Fields._INDEX, explainRequest.index())
+ .field(Fields._TYPE, explainRequest.type())
+ .field(Fields._ID, explainRequest.id())
+ .field(Fields.MATCHED, response.isMatch());
+
+ if (response.hasExplanation()) {
+ builder.startObject(Fields.EXPLANATION);
+ buildExplanation(builder, response.getExplanation());
+ builder.endObject();
+ }
+ GetResult getResult = response.getGetResult();
+ if (getResult != null) {
+ builder.startObject(Fields.GET);
+ response.getGetResult().toXContentEmbedded(builder, request);
+ builder.endObject();
+ }
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, response.isExists() ? OK : NOT_FOUND, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ private void buildExplanation(XContentBuilder builder, Explanation explanation) throws IOException {
+ builder.field(Fields.VALUE, explanation.getValue());
+ builder.field(Fields.DESCRIPTION, explanation.getDescription());
+ Explanation[] innerExps = explanation.getDetails();
+ if (innerExps != null) {
+ builder.startArray(Fields.DETAILS);
+ for (Explanation exp : innerExps) {
+ builder.startObject();
+ buildExplanation(builder, exp);
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ static class Fields {
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString MATCHED = new XContentBuilderString("matched");
+ static final XContentBuilderString EXPLANATION = new XContentBuilderString("explanation");
+ static final XContentBuilderString VALUE = new XContentBuilderString("value");
+ static final XContentBuilderString DESCRIPTION = new XContentBuilderString("description");
+ static final XContentBuilderString DETAILS = new XContentBuilderString("details");
+ static final XContentBuilderString GET = new XContentBuilderString("get");
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java
new file mode 100644
index 0000000..046c76c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ *
+ */
+public class RestGetAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/{index}/{type}/{id}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id"));
+ getRequest.listenerThreaded(false);
+ getRequest.operationThreaded(true);
+ getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh()));
+ getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing
+ getRequest.parent(request.param("parent"));
+ getRequest.preference(request.param("preference"));
+ getRequest.realtime(request.paramAsBoolean("realtime", null));
+
+ String sField = request.param("fields");
+ if (sField != null) {
+ String[] sFields = Strings.splitStringByCommaToArray(sField);
+ if (sFields != null) {
+ getRequest.fields(sFields);
+ }
+ }
+
+ getRequest.version(RestActions.parseVersion(request));
+ getRequest.versionType(VersionType.fromString(request.param("version_type"), getRequest.versionType()));
+
+ getRequest.fetchSourceContext(FetchSourceContext.parseFromRestRequest(request));
+
+ client.get(getRequest, new ActionListener<GetResponse>() {
+ @Override
+ public void onResponse(GetResponse response) {
+
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ response.toXContent(builder, request);
+ if (!response.isExists()) {
+ channel.sendResponse(new XContentRestResponse(request, NOT_FOUND, builder));
+ } else {
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ }
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java
new file mode 100644
index 0000000..bb9f0e7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ *
+ */
+public class RestGetSourceAction extends BaseRestHandler {
+
+ @Inject
+ public RestGetSourceAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/{index}/{type}/{id}/_source", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id"));
+ getRequest.listenerThreaded(false);
+ getRequest.operationThreaded(true);
+ getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh()));
+ getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing
+ getRequest.parent(request.param("parent"));
+ getRequest.preference(request.param("preference"));
+ getRequest.realtime(request.paramAsBoolean("realtime", null));
+
+ getRequest.fetchSourceContext(FetchSourceContext.parseFromRestRequest(request));
+
+ if (getRequest.fetchSourceContext() != null && !getRequest.fetchSourceContext().fetchSource()) {
+ try {
+ ActionRequestValidationException validationError = new ActionRequestValidationException();
+ validationError.addValidationError("fetching source can not be disabled");
+ channel.sendResponse(new XContentThrowableRestResponse(request, validationError));
+ } catch (IOException e) {
+ logger.error("Failed to send failure response", e);
+ }
+ }
+
+ client.get(getRequest, new ActionListener<GetResponse>() {
+ @Override
+ public void onResponse(GetResponse response) {
+
+ try {
+ XContentBuilder builder = restContentBuilder(request, response.getSourceInternal());
+ if (!response.isExists()) {
+ channel.sendResponse(new XContentRestResponse(request, NOT_FOUND, builder));
+ } else {
+ RestXContentBuilder.directSource(response.getSourceInternal(), builder, request);
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ }
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java
new file mode 100644
index 0000000..dfa74cb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.get;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+import static org.elasticsearch.rest.RestRequest.Method.HEAD;
+import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ *
+ */
+public class RestHeadAction extends BaseRestHandler {
+
+ @Inject
+ public RestHeadAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(HEAD, "/{index}/{type}/{id}", this);
+ controller.registerHandler(HEAD, "/{index}/{type}/{id}/_source", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id"));
+ getRequest.listenerThreaded(false);
+ getRequest.operationThreaded(true);
+ getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh()));
+ getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing
+ getRequest.parent(request.param("parent"));
+ getRequest.preference(request.param("preference"));
+ getRequest.realtime(request.paramAsBoolean("realtime", null));
+ // don't get any fields back...
+ getRequest.fields(Strings.EMPTY_ARRAY);
+ // TODO we can also just return the document size as Content-Length
+
+ client.get(getRequest, new ActionListener<GetResponse>() {
+ @Override
+ public void onResponse(GetResponse response) {
+ try {
+ if (!response.isExists()) {
+ channel.sendResponse(new StringRestResponse(NOT_FOUND));
+ } else {
+ channel.sendResponse(new StringRestResponse(OK));
+ }
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new StringRestResponse(ExceptionsHelper.status(e)));
+ } catch (Exception e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java
new file mode 100644
index 0000000..a89385f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.get;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+public class RestMultiGetAction extends BaseRestHandler {
+
+ private final boolean allowExplicitIndex;
+
+ @Inject
+ public RestMultiGetAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_mget", this);
+ controller.registerHandler(POST, "/_mget", this);
+ controller.registerHandler(GET, "/{index}/_mget", this);
+ controller.registerHandler(POST, "/{index}/_mget", this);
+ controller.registerHandler(GET, "/{index}/{type}/_mget", this);
+ controller.registerHandler(POST, "/{index}/{type}/_mget", this);
+
+ this.allowExplicitIndex = settings.getAsBoolean("rest.action.multi.allow_explicit_index", true);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ MultiGetRequest multiGetRequest = new MultiGetRequest();
+ multiGetRequest.listenerThreaded(false);
+ multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh()));
+ multiGetRequest.preference(request.param("preference"));
+ multiGetRequest.realtime(request.paramAsBoolean("realtime", null));
+
+ String[] sFields = null;
+ String sField = request.param("fields");
+ if (sField != null) {
+ sFields = Strings.splitStringByCommaToArray(sField);
+ }
+
+ FetchSourceContext defaultFetchSource = FetchSourceContext.parseFromRestRequest(request);
+
+ try {
+ multiGetRequest.add(request.param("index"), request.param("type"), sFields, defaultFetchSource, request.param("routing"), RestActions.getRestContent(request), allowExplicitIndex);
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.multiGet(multiGetRequest, new ActionListener<MultiGetResponse>() {
+ @Override
+ public void onResponse(MultiGetResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ response.toXContent(builder, request);
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java
new file mode 100644
index 0000000..29ba868
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.index;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestRequest.Method.PUT;
+import static org.elasticsearch.rest.RestStatus.*;
+
+/**
+ *
+ */
+public class RestIndexAction extends BaseRestHandler {
+
+ @Inject
+ public RestIndexAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/{index}/{type}", this); // auto id creation
+ controller.registerHandler(PUT, "/{index}/{type}/{id}", this);
+ controller.registerHandler(POST, "/{index}/{type}/{id}", this);
+ controller.registerHandler(PUT, "/{index}/{type}/{id}/_create", new CreateHandler());
+ controller.registerHandler(POST, "/{index}/{type}/{id}/_create", new CreateHandler());
+ }
+
+ final class CreateHandler implements RestHandler {
+ @Override
+ public void handleRequest(RestRequest request, RestChannel channel) {
+ request.params().put("op_type", "create");
+ RestIndexAction.this.handleRequest(request, channel);
+ }
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id"));
+ indexRequest.listenerThreaded(false);
+ indexRequest.operationThreaded(true);
+ indexRequest.routing(request.param("routing"));
+ indexRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
+ indexRequest.timestamp(request.param("timestamp"));
+ if (request.hasParam("ttl")) {
+ indexRequest.ttl(request.paramAsTime("ttl", null).millis());
+ }
+ indexRequest.source(request.content(), request.contentUnsafe());
+ indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT));
+ indexRequest.refresh(request.paramAsBoolean("refresh", indexRequest.refresh()));
+ indexRequest.version(RestActions.parseVersion(request));
+ indexRequest.versionType(VersionType.fromString(request.param("version_type"), indexRequest.versionType()));
+ String sOpType = request.param("op_type");
+ if (sOpType != null) {
+ if ("index".equals(sOpType)) {
+ indexRequest.opType(IndexRequest.OpType.INDEX);
+ } else if ("create".equals(sOpType)) {
+ indexRequest.opType(IndexRequest.OpType.CREATE);
+ } else {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", "opType [" + sOpType + "] not allowed, either [index] or [create] are allowed").endObject()));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ return;
+ }
+ }
+ }
+ String replicationType = request.param("replication");
+ if (replicationType != null) {
+ indexRequest.replicationType(ReplicationType.fromString(replicationType));
+ }
+ String consistencyLevel = request.param("consistency");
+ if (consistencyLevel != null) {
+ indexRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel));
+ }
+ client.index(indexRequest, new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject()
+ .field(Fields._INDEX, response.getIndex())
+ .field(Fields._TYPE, response.getType())
+ .field(Fields._ID, response.getId())
+ .field(Fields._VERSION, response.getVersion())
+ .field(Fields.CREATED, response.isCreated());
+ builder.endObject();
+ RestStatus status = OK;
+ if (response.isCreated()) {
+ status = CREATED;
+ }
+ channel.sendResponse(new XContentRestResponse(request, status, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString _VERSION = new XContentBuilderString("_version");
+ static final XContentBuilderString CREATED = new XContentBuilderString("created");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java b/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java
new file mode 100644
index 0000000..8f26c9f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.main;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.Build;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.HEAD;
+
+/**
+ *
+ */
+public class RestMainAction extends BaseRestHandler {
+
+ private final Version version;
+
+ @Inject
+ public RestMainAction(Settings settings, Version version, Client client, RestController controller) {
+ super(settings, client);
+ this.version = version;
+ controller.registerHandler(GET, "/", this);
+ controller.registerHandler(HEAD, "/", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.listenerThreaded(false);
+ clusterStateRequest.masterNodeTimeout(TimeValue.timeValueMillis(0));
+ clusterStateRequest.local(true);
+ clusterStateRequest.clear().blocks(true);
+ client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
+ @Override
+ public void onResponse(ClusterStateResponse response) {
+ RestStatus status = RestStatus.OK;
+ if (response.getState().blocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE)) {
+ status = RestStatus.SERVICE_UNAVAILABLE;
+ }
+ if (request.method() == RestRequest.Method.HEAD) {
+ channel.sendResponse(new StringRestResponse(status));
+ return;
+ }
+
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+
+ // Default to pretty printing, but allow ?pretty=false to disable
+ if (!request.hasParam("pretty")) {
+ builder.prettyPrint().lfAtEnd();
+ }
+
+ builder.startObject();
+ builder.field("status", status.getStatus());
+ if (settings.get("name") != null) {
+ builder.field("name", settings.get("name"));
+ }
+ builder.startObject("version")
+ .field("number", version.number())
+ .field("build_hash", Build.CURRENT.hash())
+ .field("build_timestamp", Build.CURRENT.timestamp())
+ .field("build_snapshot", version.snapshot)
+ // We use the lucene version from lucene constants since
+ // this includes bugfix release version as well and is already in
+ // the right format. We can also be sure that the format is maitained
+ // since this is also recorded in lucene segments and has BW compat
+ .field("lucene_version", Constants.LUCENE_MAIN_VERSION)
+ .endObject();
+ builder.field("tagline", "You Know, for Search");
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, status, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ if (request.method() == HEAD) {
+ channel.sendResponse(new StringRestResponse(ExceptionsHelper.status(e)));
+ } else {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ }
+ } catch (Exception e1) {
+ logger.warn("Failed to send response", e);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java b/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java
new file mode 100644
index 0000000..4dacd0e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.mlt;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.mlt.MoreLikeThisRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.search.Scroll;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.moreLikeThisRequest;
+import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ *
+ */
+public class RestMoreLikeThisAction extends BaseRestHandler {
+
+ @Inject
+ public RestMoreLikeThisAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/{index}/{type}/{id}/_mlt", this);
+ controller.registerHandler(POST, "/{index}/{type}/{id}/_mlt", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ MoreLikeThisRequest mltRequest = moreLikeThisRequest(request.param("index")).type(request.param("type")).id(request.param("id"));
+ mltRequest.routing(request.param("routing"));
+
+ mltRequest.listenerThreaded(false);
+ try {
+ //TODO the ParseField class that encapsulates the supported names used for an attribute
+ //needs some work if it is to be used in a REST context like this too
+ // See the MoreLikeThisQueryParser constants that hold the valid syntax
+ mltRequest.fields(request.paramAsStringArray("mlt_fields", null));
+ mltRequest.percentTermsToMatch(request.paramAsFloat("percent_terms_to_match", -1));
+ mltRequest.minTermFreq(request.paramAsInt("min_term_freq", -1));
+ mltRequest.maxQueryTerms(request.paramAsInt("max_query_terms", -1));
+ mltRequest.stopWords(request.paramAsStringArray("stop_words", null));
+ mltRequest.minDocFreq(request.paramAsInt("min_doc_freq", -1));
+ mltRequest.maxDocFreq(request.paramAsInt("max_doc_freq", -1));
+ mltRequest.minWordLength(request.paramAsInt("min_word_len", request.paramAsInt("min_word_length",-1)));
+ mltRequest.maxWordLength(request.paramAsInt("max_word_len", request.paramAsInt("max_word_length",-1)));
+ mltRequest.boostTerms(request.paramAsFloat("boost_terms", -1));
+
+ mltRequest.searchType(SearchType.fromString(request.param("search_type")));
+ mltRequest.searchIndices(request.paramAsStringArray("search_indices", null));
+ mltRequest.searchTypes(request.paramAsStringArray("search_types", null));
+ mltRequest.searchQueryHint(request.param("search_query_hint"));
+ mltRequest.searchSize(request.paramAsInt("search_size", mltRequest.searchSize()));
+ mltRequest.searchFrom(request.paramAsInt("search_from", mltRequest.searchFrom()));
+ String searchScroll = request.param("search_scroll");
+ if (searchScroll != null) {
+ mltRequest.searchScroll(new Scroll(parseTimeValue(searchScroll, null)));
+ }
+ if (request.hasContent()) {
+ mltRequest.searchSource(request.content(), request.contentUnsafe());
+ } else {
+ String searchSource = request.param("search_source");
+ if (searchSource != null) {
+ mltRequest.searchSource(searchSource);
+ }
+ }
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.moreLikeThis(mltRequest, new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/percolate/RestMultiPercolateAction.java b/src/main/java/org/elasticsearch/rest/action/percolate/RestMultiPercolateAction.java
new file mode 100644
index 0000000..b806459
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/percolate/RestMultiPercolateAction.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.percolate;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.percolate.MultiPercolateRequest;
+import org.elasticsearch.action.percolate.MultiPercolateResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ *
+ */
+public class RestMultiPercolateAction extends BaseRestHandler {
+
+ private final boolean allowExplicitIndex;
+
+ @Inject
+ public RestMultiPercolateAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_mpercolate", this);
+ controller.registerHandler(POST, "/{index}/_mpercolate", this);
+ controller.registerHandler(POST, "/{index}/{type}/_mpercolate", this);
+
+ controller.registerHandler(GET, "/_mpercolate", this);
+ controller.registerHandler(GET, "/{index}/_mpercolate", this);
+ controller.registerHandler(GET, "/{index}/{type}/_mpercolate", this);
+
+ this.allowExplicitIndex = settings.getAsBoolean("rest.action.multi.allow_explicit_index", true);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest restRequest, final RestChannel restChannel) {
+ MultiPercolateRequest multiPercolateRequest = new MultiPercolateRequest();
+ multiPercolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, multiPercolateRequest.indicesOptions()));
+ multiPercolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("index")));
+ multiPercolateRequest.documentType(restRequest.param("type"));
+
+ try {
+ multiPercolateRequest.add(RestActions.getRestContent(restRequest), restRequest.contentUnsafe(), allowExplicitIndex);
+ } catch (Exception e) {
+ try {
+ restChannel.sendResponse(new XContentThrowableRestResponse(restRequest, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.multiPercolate(multiPercolateRequest, new ActionListener<MultiPercolateResponse>() {
+
+ @Override
+ public void onResponse(MultiPercolateResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(restRequest);
+ response.toXContent(builder, restRequest);
+ restChannel.sendResponse(new XContentRestResponse(restRequest, OK, builder));
+ } catch (IOException e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ restChannel.sendResponse(new XContentThrowableRestResponse(restRequest, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java b/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java
new file mode 100644
index 0000000..0b2f71a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.action.percolate;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.percolate.PercolateRequest;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ *
+ */
+public class RestPercolateAction extends BaseRestHandler {
+
+ @Inject
+ public RestPercolateAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/{index}/{type}/_percolate", this);
+ controller.registerHandler(POST, "/{index}/{type}/_percolate", this);
+
+ RestPercolateExistingDocHandler existingDocHandler = new RestPercolateExistingDocHandler();
+ controller.registerHandler(GET, "/{index}/{type}/{id}/_percolate", existingDocHandler);
+ controller.registerHandler(POST, "/{index}/{type}/{id}/_percolate", existingDocHandler);
+
+ RestCountPercolateDocHandler countHandler = new RestCountPercolateDocHandler();
+ controller.registerHandler(GET, "/{index}/{type}/_percolate/count", countHandler);
+ controller.registerHandler(POST, "/{index}/{type}/_percolate/count", countHandler);
+
+ RestCountPercolateExistingDocHandler countExistingDocHandler = new RestCountPercolateExistingDocHandler();
+ controller.registerHandler(GET, "/{index}/{type}/{id}/_percolate/count", countExistingDocHandler);
+ controller.registerHandler(POST, "/{index}/{type}/{id}/_percolate/count", countExistingDocHandler);
+ }
+
+ void parseDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel) {
+ percolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("index")));
+ percolateRequest.documentType(restRequest.param("type"));
+ percolateRequest.routing(restRequest.param("routing"));
+ percolateRequest.preference(restRequest.param("preference"));
+ percolateRequest.source(RestActions.getRestContent(restRequest), restRequest.contentUnsafe());
+
+ percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions()));
+ executePercolate(percolateRequest, restRequest, restChannel);
+ }
+
+ void parseExistingDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel) {
+ String index = restRequest.param("index");
+ String type = restRequest.param("type");
+ percolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("percolate_index", index)));
+ percolateRequest.documentType(restRequest.param("percolate_type", type));
+
+ GetRequest getRequest = new GetRequest(index, type,
+ restRequest.param("id"));
+ getRequest.routing(restRequest.param("routing"));
+ getRequest.preference(restRequest.param("preference"));
+ getRequest.refresh(restRequest.paramAsBoolean("refresh", getRequest.refresh()));
+ getRequest.realtime(restRequest.paramAsBoolean("realtime", null));
+ getRequest.version(RestActions.parseVersion(restRequest));
+ getRequest.versionType(VersionType.fromString(restRequest.param("version_type"), getRequest.versionType()));
+
+ percolateRequest.getRequest(getRequest);
+ percolateRequest.routing(restRequest.param("percolate_routing"));
+ percolateRequest.preference(restRequest.param("percolate_preference"));
+ percolateRequest.source(restRequest.content(), restRequest.contentUnsafe());
+
+ percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions()));
+ executePercolate(percolateRequest, restRequest, restChannel);
+ }
+
+ void executePercolate(final PercolateRequest percolateRequest, final RestRequest restRequest, final RestChannel restChannel) {
+ // we just send a response, no need to fork
+ percolateRequest.listenerThreaded(false);
+
+ if (restRequest.hasParam("operation_threading")) {
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(restRequest.param("operation_threading"), null);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // don't do work on the network thread
+ operationThreading = BroadcastOperationThreading.SINGLE_THREAD;
+ }
+ percolateRequest.operationThreading(operationThreading);
+ }
+
+ client.percolate(percolateRequest, new ActionListener<PercolateResponse>() {
+ @Override
+ public void onResponse(PercolateResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(restRequest);
+ response.toXContent(builder, restRequest);
+ restChannel.sendResponse(new XContentRestResponse(restRequest, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ restChannel.sendResponse(new XContentThrowableRestResponse(restRequest, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void handleRequest(RestRequest restRequest, RestChannel restChannel) {
+ PercolateRequest percolateRequest = new PercolateRequest();
+ parseDocPercolate(percolateRequest, restRequest, restChannel);
+ }
+
+ final class RestCountPercolateDocHandler implements RestHandler {
+
+ @Override
+ public void handleRequest(RestRequest restRequest, RestChannel restChannel) {
+ PercolateRequest percolateRequest = new PercolateRequest();
+ percolateRequest.onlyCount(true);
+ parseDocPercolate(percolateRequest, restRequest, restChannel);
+ }
+
+ }
+
+ final class RestPercolateExistingDocHandler implements RestHandler {
+
+ @Override
+ public void handleRequest(RestRequest restRequest, RestChannel restChannel) {
+ PercolateRequest percolateRequest = new PercolateRequest();
+ parseExistingDocPercolate(percolateRequest, restRequest, restChannel);
+ }
+
+ }
+
+ final class RestCountPercolateExistingDocHandler implements RestHandler {
+
+ @Override
+ public void handleRequest(RestRequest restRequest, RestChannel restChannel) {
+ PercolateRequest percolateRequest = new PercolateRequest();
+ percolateRequest.onlyCount(true);
+ parseExistingDocPercolate(percolateRequest, restRequest, restChannel);
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java
new file mode 100644
index 0000000..9be724e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.search;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.ClearScrollRequest;
+import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ */
+public class RestClearScrollAction extends BaseRestHandler {
+
+ @Inject
+ public RestClearScrollAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+
+ controller.registerHandler(DELETE, "/_search/scroll", this);
+ controller.registerHandler(DELETE, "/_search/scroll/{scroll_id}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String scrollIds = request.param("scroll_id");
+ if (scrollIds == null) {
+ scrollIds = RestActions.getRestContent(request).toUtf8();
+ }
+
+ ClearScrollRequest clearRequest = new ClearScrollRequest();
+ clearRequest.setScrollIds(Arrays.asList(splitScrollIds(scrollIds)));
+ client.clearScroll(clearRequest, new ActionListener<ClearScrollResponse>() {
+ @Override
+ public void onResponse(ClearScrollResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ builder.startObject();
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ public static String[] splitScrollIds(String scrollIds) {
+ if (scrollIds == null) {
+ return Strings.EMPTY_ARRAY;
+ }
+ return Strings.splitStringByCommaToArray(scrollIds);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java
new file mode 100644
index 0000000..10a1ec0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.search;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.MultiSearchRequest;
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ */
+public class RestMultiSearchAction extends BaseRestHandler {
+
+ private final boolean allowExplicitIndex;
+
+ @Inject
+ public RestMultiSearchAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+
+ controller.registerHandler(GET, "/_msearch", this);
+ controller.registerHandler(POST, "/_msearch", this);
+ controller.registerHandler(GET, "/{index}/_msearch", this);
+ controller.registerHandler(POST, "/{index}/_msearch", this);
+ controller.registerHandler(GET, "/{index}/{type}/_msearch", this);
+ controller.registerHandler(POST, "/{index}/{type}/_msearch", this);
+
+ this.allowExplicitIndex = settings.getAsBoolean("rest.action.multi.allow_explicit_index", true);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
+ multiSearchRequest.listenerThreaded(false);
+
+ String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ String[] types = Strings.splitStringByCommaToArray(request.param("type"));
+ IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions());
+
+ try {
+ multiSearchRequest.add(RestActions.getRestContent(request), request.contentUnsafe(), indices, types, request.param("search_type"), request.param("routing"), indicesOptions, allowExplicitIndex);
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.multiSearch(multiSearchRequest, new ActionListener<MultiSearchResponse>() {
+ @Override
+ public void onResponse(MultiSearchResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
new file mode 100644
index 0000000..17ab668
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.search;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.SearchOperationThreading;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryStringQueryBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.sort.SortOrder;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+import static org.elasticsearch.search.suggest.SuggestBuilder.termSuggestion;
+
+/**
+ *
+ */
+public class RestSearchAction extends BaseRestHandler {
+
+ @Inject
+ public RestSearchAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_search", this);
+ controller.registerHandler(POST, "/_search", this);
+ controller.registerHandler(GET, "/{index}/_search", this);
+ controller.registerHandler(POST, "/{index}/_search", this);
+ controller.registerHandler(GET, "/{index}/{type}/_search", this);
+ controller.registerHandler(POST, "/{index}/{type}/_search", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ SearchRequest searchRequest;
+ try {
+ searchRequest = RestSearchAction.parseSearchRequest(request);
+ searchRequest.listenerThreaded(false);
+ SearchOperationThreading operationThreading = SearchOperationThreading.fromString(request.param("operation_threading"), null);
+ if (operationThreading != null) {
+ if (operationThreading == SearchOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = SearchOperationThreading.SINGLE_THREAD;
+ }
+ searchRequest.operationThreading(operationThreading);
+ }
+ } catch (Exception e) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to parse search request parameters", e);
+ }
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+ client.search(searchRequest, new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, response.status(), builder));
+ } catch (Exception e) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("failed to execute search (building response)", e);
+ }
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ public static SearchRequest parseSearchRequest(RestRequest request) {
+ String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ SearchRequest searchRequest = new SearchRequest(indices);
+ // get the content, and put it in the body
+ if (request.hasContent()) {
+ searchRequest.source(request.content(), request.contentUnsafe());
+ } else {
+ String source = request.param("source");
+ if (source != null) {
+ searchRequest.source(source);
+ }
+ }
+ // add extra source based on the request parameters
+ searchRequest.extraSource(parseSearchSource(request));
+
+ searchRequest.searchType(request.param("search_type"));
+
+ String scroll = request.param("scroll");
+ if (scroll != null) {
+ searchRequest.scroll(new Scroll(parseTimeValue(scroll, null)));
+ }
+
+ searchRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
+ searchRequest.routing(request.param("routing"));
+ searchRequest.preference(request.param("preference"));
+ searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
+
+ return searchRequest;
+ }
+
+ public static SearchSourceBuilder parseSearchSource(RestRequest request) {
+ SearchSourceBuilder searchSourceBuilder = null;
+ String queryString = request.param("q");
+ if (queryString != null) {
+ QueryStringQueryBuilder queryBuilder = QueryBuilders.queryString(queryString);
+ queryBuilder.defaultField(request.param("df"));
+ queryBuilder.analyzer(request.param("analyzer"));
+ queryBuilder.analyzeWildcard(request.paramAsBoolean("analyze_wildcard", false));
+ queryBuilder.lowercaseExpandedTerms(request.paramAsBoolean("lowercase_expanded_terms", true));
+ queryBuilder.lenient(request.paramAsBoolean("lenient", null));
+ String defaultOperator = request.param("default_operator");
+ if (defaultOperator != null) {
+ if ("OR".equals(defaultOperator)) {
+ queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.OR);
+ } else if ("AND".equals(defaultOperator)) {
+ queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]");
+ }
+ }
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ searchSourceBuilder.query(queryBuilder);
+ }
+
+ int from = request.paramAsInt("from", -1);
+ if (from != -1) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ searchSourceBuilder.from(from);
+ }
+ int size = request.paramAsInt("size", -1);
+ if (size != -1) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ searchSourceBuilder.size(size);
+ }
+
+ if (request.hasParam("explain")) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ searchSourceBuilder.explain(request.paramAsBoolean("explain", null));
+ }
+ if (request.hasParam("version")) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ searchSourceBuilder.version(request.paramAsBoolean("version", null));
+ }
+ if (request.hasParam("timeout")) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ searchSourceBuilder.timeout(request.paramAsTime("timeout", null));
+ }
+
+ String sField = request.param("fields");
+ if (sField != null) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ if (!Strings.hasText(sField)) {
+ searchSourceBuilder.noFields();
+ } else {
+ String[] sFields = Strings.splitStringByCommaToArray(sField);
+ if (sFields != null) {
+ for (String field : sFields) {
+ searchSourceBuilder.field(field);
+ }
+ }
+ }
+ }
+ FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
+ if (fetchSourceContext != null) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ searchSourceBuilder.fetchSource(fetchSourceContext);
+ }
+
+ if (request.hasParam("track_scores")) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false));
+ }
+
+ String sSorts = request.param("sort");
+ if (sSorts != null) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ String[] sorts = Strings.splitStringByCommaToArray(sSorts);
+ for (String sort : sorts) {
+ int delimiter = sort.lastIndexOf(":");
+ if (delimiter != -1) {
+ String sortField = sort.substring(0, delimiter);
+ String reverse = sort.substring(delimiter + 1);
+ if ("asc".equals(reverse)) {
+ searchSourceBuilder.sort(sortField, SortOrder.ASC);
+ } else if ("desc".equals(reverse)) {
+ searchSourceBuilder.sort(sortField, SortOrder.DESC);
+ }
+ } else {
+ searchSourceBuilder.sort(sort);
+ }
+ }
+ }
+
+ String sIndicesBoost = request.param("indices_boost");
+ if (sIndicesBoost != null) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ String[] indicesBoost = Strings.splitStringByCommaToArray(sIndicesBoost);
+ for (String indexBoost : indicesBoost) {
+ int divisor = indexBoost.indexOf(',');
+ if (divisor == -1) {
+ throw new ElasticsearchIllegalArgumentException("Illegal index boost [" + indexBoost + "], no ','");
+ }
+ String indexName = indexBoost.substring(0, divisor);
+ String sBoost = indexBoost.substring(divisor + 1);
+ try {
+ searchSourceBuilder.indexBoost(indexName, Float.parseFloat(sBoost));
+ } catch (NumberFormatException e) {
+ throw new ElasticsearchIllegalArgumentException("Illegal index boost [" + indexBoost + "], boost not a float number");
+ }
+ }
+ }
+
+ String sStats = request.param("stats");
+ if (sStats != null) {
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ searchSourceBuilder.stats(Strings.splitStringByCommaToArray(sStats));
+ }
+
+ String suggestField = request.param("suggest_field");
+ if (suggestField != null) {
+ String suggestText = request.param("suggest_text", queryString);
+ int suggestSize = request.paramAsInt("suggest_size", 5);
+ if (searchSourceBuilder == null) {
+ searchSourceBuilder = new SearchSourceBuilder();
+ }
+ String suggestMode = request.param("suggest_mode");
+ searchSourceBuilder.suggest().addSuggestion(
+ termSuggestion(suggestField).field(suggestField).text(suggestText).size(suggestSize)
+ .suggestMode(suggestMode)
+ );
+ }
+
+ return searchSourceBuilder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java
new file mode 100644
index 0000000..7de4470
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.search;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.SearchOperationThreading;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchScrollRequest;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.search.Scroll;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ *
+ */
+public class RestSearchScrollAction extends BaseRestHandler {
+
+ @Inject
+ public RestSearchScrollAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+
+ controller.registerHandler(GET, "/_search/scroll", this);
+ controller.registerHandler(POST, "/_search/scroll", this);
+ controller.registerHandler(GET, "/_search/scroll/{scroll_id}", this);
+ controller.registerHandler(POST, "/_search/scroll/{scroll_id}", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ String scrollId = request.param("scroll_id");
+ if (scrollId == null) {
+ scrollId = RestActions.getRestContent(request).toUtf8();
+ }
+ SearchScrollRequest searchScrollRequest = new SearchScrollRequest(scrollId);
+ searchScrollRequest.listenerThreaded(false);
+ try {
+ String scroll = request.param("scroll");
+ if (scroll != null) {
+ searchScrollRequest.scroll(new Scroll(parseTimeValue(scroll, null)));
+ }
+ SearchOperationThreading operationThreading = SearchOperationThreading.fromString(request.param("operation_threading"), null);
+ if (operationThreading != null) {
+ if (operationThreading == SearchOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = SearchOperationThreading.SINGLE_THREAD;
+ }
+ searchScrollRequest.operationThreading(operationThreading);
+ }
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.searchScroll(searchScrollRequest, new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ builder.startObject();
+ response.toXContent(builder, request);
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, response.status(), builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java
new file mode 100644
index 0000000..2f3d653
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.suggest;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.suggest.SuggestRequest;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+import org.elasticsearch.search.suggest.Suggest;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
+
+/**
+ *
+ */
+public class RestSuggestAction extends BaseRestHandler {
+
+ @Inject
+ public RestSuggestAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/_suggest", this);
+ controller.registerHandler(GET, "/_suggest", this);
+ controller.registerHandler(POST, "/{index}/_suggest", this);
+ controller.registerHandler(GET, "/{index}/_suggest", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ SuggestRequest suggestRequest = new SuggestRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ suggestRequest.indicesOptions(IndicesOptions.fromRequest(request, suggestRequest.indicesOptions()));
+ suggestRequest.listenerThreaded(false);
+ try {
+ BroadcastOperationThreading operationThreading = BroadcastOperationThreading.fromString(request.param("operation_threading"), BroadcastOperationThreading.THREAD_PER_SHARD);
+ if (operationThreading == BroadcastOperationThreading.NO_THREADS) {
+ // since we don't spawn, don't allow no_threads, but change it to a single thread
+ operationThreading = BroadcastOperationThreading.SINGLE_THREAD;
+ }
+ suggestRequest.operationThreading(operationThreading);
+ if (request.hasContent()) {
+ suggestRequest.suggest(request.content(), request.contentUnsafe());
+ } else {
+ String source = request.param("source");
+ if (source != null) {
+ suggestRequest.suggest(source);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("no content or source provided to execute suggestion");
+ }
+ }
+ suggestRequest.routing(request.param("routing"));
+ suggestRequest.preference(request.param("preference"));
+ } catch (Exception e) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ return;
+ }
+
+ client.suggest(suggestRequest, new ActionListener<SuggestResponse>() {
+ @Override
+ public void onResponse(SuggestResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject();
+ buildBroadcastShardsHeader(builder, response);
+ Suggest suggest = response.getSuggest();
+ if (suggest != null) {
+ suggest.toXContent(builder, request);
+ }
+ builder.endObject();
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/support/RestActions.java b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java
new file mode 100644
index 0000000..8cc9002
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.support;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryStringQueryBuilder;
+import org.elasticsearch.rest.RestRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class RestActions {
+
+ public static long parseVersion(RestRequest request) {
+ if (request.hasParam("version")) {
+ return request.paramAsLong("version", 0);
+ }
+ String ifMatch = request.header("If-Match");
+ if (ifMatch != null) {
+ return Long.parseLong(ifMatch);
+ }
+ return 0;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _SHARDS = new XContentBuilderString("_shards");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString SUCCESSFUL = new XContentBuilderString("successful");
+ static final XContentBuilderString FAILED = new XContentBuilderString("failed");
+ static final XContentBuilderString FAILURES = new XContentBuilderString("failures");
+ static final XContentBuilderString INDEX = new XContentBuilderString("index");
+ static final XContentBuilderString SHARD = new XContentBuilderString("shard");
+ static final XContentBuilderString REASON = new XContentBuilderString("reason");
+ }
+
+ public static void buildBroadcastShardsHeader(XContentBuilder builder, BroadcastOperationResponse response) throws IOException {
+ builder.startObject(Fields._SHARDS);
+ builder.field(Fields.TOTAL, response.getTotalShards());
+ builder.field(Fields.SUCCESSFUL, response.getSuccessfulShards());
+ builder.field(Fields.FAILED, response.getFailedShards());
+ if (response.getShardFailures() != null && response.getShardFailures().length > 0) {
+ builder.startArray(Fields.FAILURES);
+ for (ShardOperationFailedException shardFailure : response.getShardFailures()) {
+ builder.startObject();
+ if (shardFailure.index() != null) {
+ builder.field(Fields.INDEX, shardFailure.index(), XContentBuilder.FieldCaseConversion.NONE);
+ }
+ if (shardFailure.shardId() != -1) {
+ builder.field(Fields.SHARD, shardFailure.shardId());
+ }
+ builder.field(Fields.REASON, shardFailure.reason());
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+ }
+
+ public static QuerySourceBuilder parseQuerySource(RestRequest request) {
+ String queryString = request.param("q");
+ if (queryString == null) {
+ return null;
+ }
+ QueryStringQueryBuilder queryBuilder = QueryBuilders.queryString(queryString);
+ queryBuilder.defaultField(request.param("df"));
+ queryBuilder.analyzer(request.param("analyzer"));
+ String defaultOperator = request.param("default_operator");
+ if (defaultOperator != null) {
+ if ("OR".equals(defaultOperator)) {
+ queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.OR);
+ } else if ("AND".equals(defaultOperator)) {
+ queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]");
+ }
+ }
+ return new QuerySourceBuilder().setQuery(queryBuilder);
+ }
+
+ /**
+ * Get Rest content from either payload or source parameter
+ * @param request Rest request
+ * @return rest content
+ */
+ public static BytesReference getRestContent(RestRequest request) {
+ assert request != null;
+
+ BytesReference content = request.content();
+ if (!request.hasContent()) {
+ String source = request.param("source");
+ if (source != null) {
+ content = new BytesArray(source);
+ }
+ }
+
+ return content;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/support/RestTable.java b/src/main/java/org/elasticsearch/rest/action/support/RestTable.java
new file mode 100644
index 0000000..21d41eb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/support/RestTable.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.support;
+
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.Table;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.rest.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ */
+public class RestTable {
+
+ public static RestResponse buildResponse(Table table, RestRequest request, RestChannel channel) throws Exception {
+ XContentType xContentType = XContentType.fromRestContentType(request.param("format", request.header("Content-Type")));
+ if (xContentType != null) {
+ return buildXContentBuilder(table, request, channel);
+ }
+ return buildTextPlainResponse(table, request, channel);
+ }
+
+ public static RestResponse buildXContentBuilder(Table table, RestRequest request, RestChannel channel) throws Exception {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ List<DisplayHeader> displayHeaders = buildDisplayHeaders(table, request);
+
+ builder.startArray();
+ for (int row = 0; row < table.getRows().size(); row++) {
+ builder.startObject();
+ for (DisplayHeader header : displayHeaders) {
+ builder.field(header.display, renderValue(request, table.getAsMap().get(header.name).get(row).value));
+ }
+ builder.endObject();
+
+ }
+ builder.endArray();
+ return new XContentRestResponse(request, RestStatus.OK, builder);
+ }
+
+ public static RestResponse buildTextPlainResponse(Table table, RestRequest request, RestChannel channel) {
+ boolean verbose = request.paramAsBoolean("v", false);
+
+ List<DisplayHeader> headers = buildDisplayHeaders(table, request);
+ int[] width = buildWidths(table, request, verbose, headers);
+
+ StringBuilder out = new StringBuilder();
+ if (verbose) {
+ for (int col = 0; col < headers.size(); col++) {
+ DisplayHeader header = headers.get(col);
+ pad(new Table.Cell(header.display, table.findHeaderByName(header.name)), width[col], request, out);
+ out.append(" ");
+ }
+ out.append("\n");
+ }
+
+ for (int row = 0; row < table.getRows().size(); row++) {
+ for (int col = 0; col < headers.size(); col++) {
+ DisplayHeader header = headers.get(col);
+ pad(table.getAsMap().get(header.name).get(row), width[col], request, out);
+ out.append(" ");
+ }
+ out.append("\n");
+ }
+
+ return new StringRestResponse(RestStatus.OK, out.toString());
+ }
+
+ private static List<DisplayHeader> buildDisplayHeaders(Table table, RestRequest request) {
+ String pHeaders = request.param("h");
+ List<DisplayHeader> display = new ArrayList<DisplayHeader>();
+ if (pHeaders != null) {
+ for (String possibility : Strings.splitStringByCommaToArray(pHeaders)) {
+ DisplayHeader dispHeader = null;
+
+ if (table.getAsMap().containsKey(possibility)) {
+ dispHeader = new DisplayHeader(possibility, possibility);
+ } else {
+ for (Table.Cell headerCell : table.getHeaders()) {
+ String aliases = headerCell.attr.get("alias");
+ if (aliases != null) {
+ for (String alias : Strings.splitStringByCommaToArray(aliases)) {
+ if (possibility.equals(alias)) {
+ dispHeader = new DisplayHeader(headerCell.value.toString(), alias);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (dispHeader != null) {
+ // We know we need the header asked for:
+ display.add(dispHeader);
+
+ // Look for accompanying sibling column
+ Table.Cell hcell = table.getHeaderMap().get(dispHeader.name);
+ String siblingFlag = hcell.attr.get("sibling");
+ if (siblingFlag != null) {
+ // ...link the sibling and check that its flag is set
+ String sibling = siblingFlag + "." + dispHeader.name;
+ Table.Cell c = table.getHeaderMap().get(sibling);
+ if (c != null && request.paramAsBoolean(siblingFlag, false)) {
+ display.add(new DisplayHeader(c.value.toString(), siblingFlag + "." + dispHeader.display));
+ }
+ }
+ }
+ }
+ } else {
+ for (Table.Cell cell : table.getHeaders()) {
+ String d = cell.attr.get("default");
+ if (Booleans.parseBoolean(d, true)) {
+ display.add(new DisplayHeader(cell.value.toString(), cell.value.toString()));
+ }
+ }
+ }
+ return display;
+ }
+
+ public static int[] buildHelpWidths(Table table, RestRequest request, boolean verbose) {
+ int[] width = new int[3];
+ for (Table.Cell cell : table.getHeaders()) {
+ String v = renderValue(request, cell.value);
+ int vWidth = v == null ? 0 : v.length();
+ if (width[0] < vWidth) {
+ width[0] = vWidth;
+ }
+
+ v = renderValue(request, cell.attr.containsKey("alias") ? cell.attr.get("alias") : "");
+ vWidth = v == null ? 0 : v.length();
+ if (width[1] < vWidth) {
+ width[1] = vWidth;
+ }
+
+ v = renderValue(request, cell.attr.containsKey("desc") ? cell.attr.get("desc") : "not available");
+ vWidth = v == null ? 0 : v.length();
+ if (width[2] < vWidth) {
+ width[2] = vWidth;
+ }
+ }
+ return width;
+ }
+
+ private static int[] buildWidths(Table table, RestRequest request, boolean verbose, List<DisplayHeader> headers) {
+ int[] width = new int[headers.size()];
+ int i;
+
+ if (verbose) {
+ i = 0;
+ for (DisplayHeader hdr : headers) {
+ int vWidth = hdr.display.length();
+ if (width[i] < vWidth) {
+ width[i] = vWidth;
+ }
+ i++;
+ }
+ }
+
+ i = 0;
+ for (DisplayHeader hdr : headers) {
+ for (Table.Cell cell : table.getAsMap().get(hdr.name)) {
+ String v = renderValue(request, cell.value);
+ int vWidth = v == null ? 0 : v.length();
+ if (width[i] < vWidth) {
+ width[i] = vWidth;
+ }
+ }
+ i++;
+ }
+ return width;
+ }
+
+ public static void pad(Table.Cell cell, int width, RestRequest request, StringBuilder out) {
+ String sValue = renderValue(request, cell.value);
+ int length = sValue == null ? 0 : sValue.length();
+ byte leftOver = (byte) (width - length);
+ String textAlign = cell.attr.get("text-align");
+ if (textAlign == null) {
+ textAlign = "left";
+ }
+ if (leftOver > 0 && textAlign.equals("right")) {
+ for (byte i = 0; i < leftOver; i++) {
+ out.append(" ");
+ }
+ if (sValue != null) {
+ out.append(sValue);
+ }
+ } else {
+ if (sValue != null) {
+ out.append(sValue);
+ }
+ for (byte i = 0; i < leftOver; i++) {
+ out.append(" ");
+ }
+ }
+ }
+
+ private static String renderValue(RestRequest request, Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof ByteSizeValue) {
+ ByteSizeValue v = (ByteSizeValue) value;
+ String resolution = request.param("bytes");
+ if ("b".equals(resolution)) {
+ return Long.toString(v.bytes());
+ } else if ("k".equals(resolution)) {
+ return Long.toString(v.kb());
+ } else if ("m".equals(resolution)) {
+ return Long.toString(v.mb());
+ } else if ("g".equals(resolution)) {
+ return Long.toString(v.gb());
+ } else if ("t".equals(resolution)) {
+ return Long.toString(v.tb());
+ } else if ("p".equals(resolution)) {
+ return Long.toString(v.pb());
+ } else {
+ return v.toString();
+ }
+ }
+ if (value instanceof SizeValue) {
+ SizeValue v = (SizeValue) value;
+ String resolution = request.param("size");
+ if ("b".equals(resolution)) {
+ return Long.toString(v.singles());
+ } else if ("k".equals(resolution)) {
+ return Long.toString(v.kilo());
+ } else if ("m".equals(resolution)) {
+ return Long.toString(v.mega());
+ } else if ("g".equals(resolution)) {
+ return Long.toString(v.giga());
+ } else if ("t".equals(resolution)) {
+ return Long.toString(v.tera());
+ } else if ("p".equals(resolution)) {
+ return Long.toString(v.peta());
+ } else {
+ return v.toString();
+ }
+ }
+ if (value instanceof TimeValue) {
+ TimeValue v = (TimeValue) value;
+ String resolution = request.param("time");
+ if ("ms".equals(resolution)) {
+ return Long.toString(v.millis());
+ } else if ("s".equals(resolution)) {
+ return Long.toString(v.seconds());
+ } else if ("m".equals(resolution)) {
+ return Long.toString(v.minutes());
+ } else if ("h".equals(resolution)) {
+ return Long.toString(v.hours());
+ } else {
+ return v.toString();
+ }
+ }
+ // Add additional built in data points we can render based on request parameters?
+ return value.toString();
+ }
+
+ static class DisplayHeader {
+ public final String name;
+ public final String display;
+
+ DisplayHeader(String name, String display) {
+ this.name = name;
+ this.display = display;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/support/RestXContentBuilder.java b/src/main/java/org/elasticsearch/rest/action/support/RestXContentBuilder.java
new file mode 100644
index 0000000..cff2555
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/support/RestXContentBuilder.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.support;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.rest.RestRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class RestXContentBuilder {
+
+ public static XContentBuilder restContentBuilder(RestRequest request) throws IOException {
+ // use the request body as the auto detect source (if it exists)
+ return restContentBuilder(request, request.hasContent() ? request.content() : null);
+ }
+
+ public static XContentBuilder restContentBuilder(RestRequest request, @Nullable BytesReference autoDetectSource) throws IOException {
+ XContentType contentType = XContentType.fromRestContentType(request.param("format", request.header("Content-Type")));
+ if (contentType == null) {
+ // try and guess it from the auto detect source
+ if (autoDetectSource != null) {
+ contentType = XContentFactory.xContentType(autoDetectSource);
+ }
+ }
+ if (contentType == null) {
+ // default to JSON
+ contentType = XContentType.JSON;
+ }
+ XContentBuilder builder = new XContentBuilder(XContentFactory.xContent(contentType), new BytesStreamOutput());
+ if (request.paramAsBoolean("pretty", false)) {
+ builder.prettyPrint().lfAtEnd();
+ }
+
+ builder.humanReadable(request.paramAsBoolean("human", builder.humanReadable()));
+
+ String casing = request.param("case");
+ if (casing != null && "camelCase".equals(casing)) {
+ builder.fieldCaseConversion(XContentBuilder.FieldCaseConversion.CAMELCASE);
+ } else {
+ // we expect all REST interfaces to write results in underscore casing, so
+ // no need for double casing
+ builder.fieldCaseConversion(XContentBuilder.FieldCaseConversion.NONE);
+ }
+ return builder;
+ }
+
+ public static XContentBuilder emptyBuilder(RestRequest request) throws IOException {
+ return restContentBuilder(request, request.hasContent() ? request.content() : null).startObject().endObject();
+ }
+
+ /**
+ * Directly writes the source to the output builder
+ */
+ public static void directSource(BytesReference source, XContentBuilder rawBuilder, ToXContent.Params params) throws IOException {
+ XContentHelper.writeDirect(source, rawBuilder, params);
+ }
+
+ public static void restDocumentSource(BytesReference source, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ XContentHelper.writeRawField("_source", source, builder, params);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/termvector/RestMultiTermVectorsAction.java b/src/main/java/org/elasticsearch/rest/action/termvector/RestMultiTermVectorsAction.java
new file mode 100644
index 0000000..e1b0b6f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/termvector/RestMultiTermVectorsAction.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.termvector;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.termvector.MultiTermVectorsRequest;
+import org.elasticsearch.action.termvector.MultiTermVectorsResponse;
+import org.elasticsearch.action.termvector.TermVectorRequest;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+public class RestMultiTermVectorsAction extends BaseRestHandler {
+
+ @Inject
+ public RestMultiTermVectorsAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/_mtermvectors", this);
+ controller.registerHandler(POST, "/_mtermvectors", this);
+ controller.registerHandler(GET, "/{index}/_mtermvectors", this);
+ controller.registerHandler(POST, "/{index}/_mtermvectors", this);
+ controller.registerHandler(GET, "/{index}/{type}/_mtermvectors", this);
+ controller.registerHandler(POST, "/{index}/{type}/_mtermvectors", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+
+ MultiTermVectorsRequest multiTermVectorsRequest = new MultiTermVectorsRequest();
+ multiTermVectorsRequest.listenerThreaded(false);
+ TermVectorRequest template = new TermVectorRequest();
+ template.index(request.param("index"));
+ template.type(request.param("type"));
+ RestTermVectorAction.readURIParameters(template, request);
+ multiTermVectorsRequest.ids(Strings.commaDelimitedListToStringArray(request.param("ids")));
+
+ try {
+ multiTermVectorsRequest.add(template, RestActions.getRestContent(request));
+ } catch (Throwable t) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, t));
+ } catch (Throwable tIO) {
+ logger.error("Failed to send failure response", tIO);
+ }
+ return;
+ }
+
+ client.multiTermVectors(multiTermVectorsRequest, new ActionListener<MultiTermVectorsResponse>() {
+ @Override
+ public void onResponse(MultiTermVectorsResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ response.toXContent(builder, request);
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable t) {
+ onFailure(t);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (Throwable t) {
+ logger.error("Failed to send failure response", t);
+ }
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/termvector/RestTermVectorAction.java b/src/main/java/org/elasticsearch/rest/action/termvector/RestTermVectorAction.java
new file mode 100644
index 0000000..1aa3702
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/termvector/RestTermVectorAction.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.termvector;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.termvector.TermVectorRequest;
+import org.elasticsearch.action.termvector.TermVectorResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder;
+
+/**
+ * This class parses the json request and translates it into a
+ * TermVectorRequest.
+ */
+public class RestTermVectorAction extends BaseRestHandler {
+
+ @Inject
+ public RestTermVectorAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(GET, "/{index}/{type}/{id}/_termvector", this);
+ controller.registerHandler(POST, "/{index}/{type}/{id}/_termvector", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+
+ TermVectorRequest termVectorRequest = new TermVectorRequest(request.param("index"), request.param("type"), request.param("id"));
+ XContentParser parser = null;
+ if (request.hasContent()) {
+ try {
+ parser = XContentFactory.xContent(request.content()).createParser(request.content());
+ TermVectorRequest.parseRequest(termVectorRequest, parser);
+ } catch (IOException e) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
+
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ return;
+ }
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+ readURIParameters(termVectorRequest, request);
+
+ client.termVector(termVectorRequest, new ActionListener<TermVectorResponse>() {
+ @Override
+ public void onResponse(TermVectorResponse response) {
+ try {
+ XContentBuilder builder = restContentBuilder(request);
+ response.toXContent(builder, request);
+ channel.sendResponse(new XContentRestResponse(request, OK, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+
+ }
+
+ static public void readURIParameters(TermVectorRequest termVectorRequest, RestRequest request) {
+ String fields = request.param("fields");
+ addFieldStringsFromParameter(termVectorRequest, fields);
+ termVectorRequest.offsets(request.paramAsBoolean("offsets", termVectorRequest.offsets()));
+ termVectorRequest.positions(request.paramAsBoolean("positions", termVectorRequest.positions()));
+ termVectorRequest.payloads(request.paramAsBoolean("payloads", termVectorRequest.payloads()));
+ termVectorRequest.routing(request.param("routing"));
+ termVectorRequest.parent(request.param("parent"));
+ termVectorRequest.preference(request.param("preference"));
+ termVectorRequest.termStatistics(request.paramAsBoolean("termStatistics", termVectorRequest.termStatistics()));
+ termVectorRequest.termStatistics(request.paramAsBoolean("term_statistics", termVectorRequest.termStatistics()));
+ termVectorRequest.fieldStatistics(request.paramAsBoolean("fieldStatistics", termVectorRequest.fieldStatistics()));
+ termVectorRequest.fieldStatistics(request.paramAsBoolean("field_statistics", termVectorRequest.fieldStatistics()));
+ }
+
+ static public void addFieldStringsFromParameter(TermVectorRequest termVectorRequest, String fields) {
+ Set<String> selectedFields = termVectorRequest.selectedFields();
+ if (fields != null) {
+ String[] paramFieldStrings = Strings.commaDelimitedListToStringArray(fields);
+ for (String field : paramFieldStrings) {
+ if (selectedFields == null) {
+ selectedFields = new HashSet<String>();
+ }
+ if (!selectedFields.contains(field)) {
+ field = field.replaceAll("\\s", "");
+ selectedFields.add(field);
+ }
+ }
+ }
+ if (selectedFields != null) {
+ termVectorRequest.selectedFields(selectedFields.toArray(new String[selectedFields.size()]));
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java b/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java
new file mode 100644
index 0000000..98f414a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.update;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.rest.*;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+import static org.elasticsearch.rest.RestStatus.CREATED;
+import static org.elasticsearch.rest.RestStatus.OK;
+
+/**
+ */
+public class RestUpdateAction extends BaseRestHandler {
+
+ @Inject
+ public RestUpdateAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(POST, "/{index}/{type}/{id}/_update", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel) {
+ UpdateRequest updateRequest = new UpdateRequest(request.param("index"), request.param("type"), request.param("id"));
+ updateRequest.listenerThreaded(false);
+ updateRequest.routing(request.param("routing"));
+ updateRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
+ updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout()));
+ updateRequest.refresh(request.paramAsBoolean("refresh", updateRequest.refresh()));
+ String replicationType = request.param("replication");
+ if (replicationType != null) {
+ updateRequest.replicationType(ReplicationType.fromString(replicationType));
+ }
+ String consistencyLevel = request.param("consistency");
+ if (consistencyLevel != null) {
+ updateRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel));
+ }
+ updateRequest.docAsUpsert(request.paramAsBoolean("doc_as_upsert", updateRequest.docAsUpsert()));
+ updateRequest.script(request.param("script"));
+ updateRequest.scriptLang(request.param("lang"));
+ for (Map.Entry<String, String> entry : request.params().entrySet()) {
+ if (entry.getKey().startsWith("sp_")) {
+ updateRequest.addScriptParam(entry.getKey().substring(3), entry.getValue());
+ }
+ }
+ String sField = request.param("fields");
+ if (sField != null) {
+ String[] sFields = Strings.splitStringByCommaToArray(sField);
+ if (sFields != null) {
+ updateRequest.fields(sFields);
+ }
+ }
+ updateRequest.retryOnConflict(request.paramAsInt("retry_on_conflict", updateRequest.retryOnConflict()));
+ updateRequest.version(RestActions.parseVersion(request));
+ updateRequest.versionType(VersionType.fromString(request.param("version_type"), updateRequest.versionType()));
+
+
+ // see if we have it in the body
+ if (request.hasContent()) {
+ try {
+ updateRequest.source(request.content());
+ IndexRequest upsertRequest = updateRequest.upsertRequest();
+ if (upsertRequest != null) {
+ upsertRequest.routing(request.param("routing"));
+ upsertRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
+ upsertRequest.timestamp(request.param("timestamp"));
+ if (request.hasParam("ttl")) {
+ upsertRequest.ttl(request.paramAsTime("ttl", null).millis());
+ }
+ upsertRequest.version(RestActions.parseVersion(request));
+ upsertRequest.versionType(VersionType.fromString(request.param("version_type"), upsertRequest.versionType()));
+ }
+ IndexRequest doc = updateRequest.doc();
+ if (doc != null) {
+ doc.routing(request.param("routing"));
+ doc.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
+ doc.timestamp(request.param("timestamp"));
+ if (request.hasParam("ttl")) {
+ doc.ttl(request.paramAsTime("ttl", null).millis());
+ }
+ doc.version(RestActions.parseVersion(request));
+ doc.versionType(VersionType.fromString(request.param("version_type"), doc.versionType()));
+ }
+ } catch (Exception e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.warn("Failed to send response", e1);
+ }
+ return;
+ }
+ }
+
+ client.update(updateRequest, new ActionListener<UpdateResponse>() {
+ @Override
+ public void onResponse(UpdateResponse response) {
+ try {
+ XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
+ builder.startObject()
+ .field(Fields._INDEX, response.getIndex())
+ .field(Fields._TYPE, response.getType())
+ .field(Fields._ID, response.getId())
+ .field(Fields._VERSION, response.getVersion());
+
+ if (response.getGetResult() != null) {
+ builder.startObject(Fields.GET);
+ response.getGetResult().toXContentEmbedded(builder, request);
+ builder.endObject();
+ }
+
+ builder.endObject();
+ RestStatus status = OK;
+ if (response.isCreated()) {
+ status = CREATED;
+ }
+ channel.sendResponse(new XContentRestResponse(request, status, builder));
+ } catch (Throwable e) {
+ onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ channel.sendResponse(new XContentThrowableRestResponse(request, e));
+ } catch (IOException e1) {
+ logger.error("Failed to send failure response", e1);
+ }
+ }
+ });
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString _VERSION = new XContentBuilderString("_version");
+ static final XContentBuilderString MATCHES = new XContentBuilderString("matches");
+ static final XContentBuilderString GET = new XContentBuilderString("get");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/rest/support/RestUtils.java b/src/main/java/org/elasticsearch/rest/support/RestUtils.java
new file mode 100644
index 0000000..8f4ee5f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/rest/support/RestUtils.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.support;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.path.PathTrie;
+
+import java.nio.charset.Charset;
+import java.util.Map;
+
+/**
+ *
+ */
+public class RestUtils {
+
+ public static PathTrie.Decoder REST_DECODER = new PathTrie.Decoder() {
+ @Override
+ public String decode(String value) {
+ return RestUtils.decodeComponent(value);
+ }
+ };
+
+ public static boolean isBrowser(@Nullable String userAgent) {
+ if (userAgent == null) {
+ return false;
+ }
+ // chrome, safari, firefox, ie
+ if (userAgent.startsWith("Mozilla")) {
+ return true;
+ }
+ return false;
+ }
+
+ public static void decodeQueryString(String s, int fromIndex, Map<String, String> params) {
+ if (fromIndex < 0) {
+ return;
+ }
+ if (fromIndex >= s.length()) {
+ return;
+ }
+
+ String name = null;
+ int pos = fromIndex; // Beginning of the unprocessed region
+ int i; // End of the unprocessed region
+ char c = 0; // Current character
+ for (i = fromIndex; i < s.length(); i++) {
+ c = s.charAt(i);
+ if (c == '=' && name == null) {
+ if (pos != i) {
+ name = decodeComponent(s.substring(pos, i));
+ }
+ pos = i + 1;
+ } else if (c == '&') {
+ if (name == null && pos != i) {
+ // We haven't seen an `=' so far but moved forward.
+ // Must be a param of the form '&a&' so add it with
+ // an empty value.
+ addParam(params, decodeComponent(s.substring(pos, i)), "");
+ } else if (name != null) {
+ addParam(params, name, decodeComponent(s.substring(pos, i)));
+ name = null;
+ }
+ pos = i + 1;
+ }
+ }
+
+ if (pos != i) { // Are there characters we haven't dealt with?
+ if (name == null) { // Yes and we haven't seen any `='.
+ addParam(params, decodeComponent(s.substring(pos, i)), "");
+ } else { // Yes and this must be the last value.
+ addParam(params, name, decodeComponent(s.substring(pos, i)));
+ }
+ } else if (name != null) { // Have we seen a name without value?
+ addParam(params, name, "");
+ }
+ }
+
+ private static void addParam(Map<String, String> params, String name, String value) {
+ params.put(name, value);
+ }
+
+ /**
+ * Decodes a bit of an URL encoded by a browser.
+ * <p/>
+ * This is equivalent to calling {@link #decodeComponent(String, Charset)}
+ * with the UTF-8 charset (recommended to comply with RFC 3986, Section 2).
+ *
+ * @param s The string to decode (can be empty).
+ * @return The decoded string, or {@code s} if there's nothing to decode.
+ * If the string to decode is {@code null}, returns an empty string.
+ * @throws IllegalArgumentException if the string contains a malformed
+ * escape sequence.
+ */
+ public static String decodeComponent(final String s) {
+ return decodeComponent(s, Charsets.UTF_8);
+ }
+
+ /**
+ * Decodes a bit of an URL encoded by a browser.
+ * <p/>
+ * The string is expected to be encoded as per RFC 3986, Section 2.
+ * This is the encoding used by JavaScript functions {@code encodeURI}
+ * and {@code encodeURIComponent}, but not {@code escape}. For example
+ * in this encoding, &eacute; (in Unicode {@code U+00E9} or in UTF-8
+ * {@code 0xC3 0xA9}) is encoded as {@code %C3%A9} or {@code %c3%a9}.
+ * <p/>
+ * This is essentially equivalent to calling
+ * <code>{@link java.net.URLDecoder URLDecoder}.{@link
+ * java.net.URLDecoder#decode(String, String)}</code>
+ * except that it's over 2x faster and generates less garbage for the GC.
+ * Actually this function doesn't allocate any memory if there's nothing
+ * to decode, the argument itself is returned.
+ *
+ * @param s The string to decode (can be empty).
+ * @param charset The charset to use to decode the string (should really
+ * be {@link Charsets#UTF_8}.
+ * @return The decoded string, or {@code s} if there's nothing to decode.
+ * If the string to decode is {@code null}, returns an empty string.
+ * @throws IllegalArgumentException if the string contains a malformed
+ * escape sequence.
+ */
+ @SuppressWarnings("fallthrough")
+ public static String decodeComponent(final String s, final Charset charset) {
+ if (s == null) {
+ return "";
+ }
+ final int size = s.length();
+ boolean modified = false;
+ for (int i = 0; i < size; i++) {
+ final char c = s.charAt(i);
+ switch (c) {
+ case '%':
+ i++; // We can skip at least one char, e.g. `%%'.
+ // Fall through.
+ case '+':
+ modified = true;
+ break;
+ }
+ }
+ if (!modified) {
+ return s;
+ }
+ final byte[] buf = new byte[size];
+ int pos = 0; // position in `buf'.
+ for (int i = 0; i < size; i++) {
+ char c = s.charAt(i);
+ switch (c) {
+ case '+':
+ buf[pos++] = ' '; // "+" -> " "
+ break;
+ case '%':
+ if (i == size - 1) {
+ throw new IllegalArgumentException("unterminated escape"
+ + " sequence at end of string: " + s);
+ }
+ c = s.charAt(++i);
+ if (c == '%') {
+ buf[pos++] = '%'; // "%%" -> "%"
+ break;
+ } else if (i == size - 1) {
+ throw new IllegalArgumentException("partial escape"
+ + " sequence at end of string: " + s);
+ }
+ c = decodeHexNibble(c);
+ final char c2 = decodeHexNibble(s.charAt(++i));
+ if (c == Character.MAX_VALUE || c2 == Character.MAX_VALUE) {
+ throw new IllegalArgumentException(
+ "invalid escape sequence `%" + s.charAt(i - 1)
+ + s.charAt(i) + "' at index " + (i - 2)
+ + " of: " + s);
+ }
+ c = (char) (c * 16 + c2);
+ // Fall through.
+ default:
+ buf[pos++] = (byte) c;
+ break;
+ }
+ }
+ return new String(buf, 0, pos, charset);
+ }
+
+ /**
+ * Helper to decode half of a hexadecimal number from a string.
+ *
+ * @param c The ASCII character of the hexadecimal number to decode.
+ * Must be in the range {@code [0-9a-fA-F]}.
+ * @return The hexadecimal value represented in the ASCII character
+ * given, or {@link Character#MAX_VALUE} if the character is invalid.
+ */
+ private static char decodeHexNibble(final char c) {
+ if ('0' <= c && c <= '9') {
+ return (char) (c - '0');
+ } else if ('a' <= c && c <= 'f') {
+ return (char) (c - 'a' + 10);
+ } else if ('A' <= c && c <= 'F') {
+ return (char) (c - 'A' + 10);
+ } else {
+ return Character.MAX_VALUE;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/AbstractRiverComponent.java b/src/main/java/org/elasticsearch/river/AbstractRiverComponent.java
new file mode 100644
index 0000000..804ffac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/AbstractRiverComponent.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+/**
+ *
+ */
+public class AbstractRiverComponent implements RiverComponent {
+
+ protected final ESLogger logger;
+
+ protected final RiverName riverName;
+
+ protected final RiverSettings settings;
+
+ protected AbstractRiverComponent(RiverName riverName, RiverSettings settings) {
+ this.riverName = riverName;
+ this.settings = settings;
+
+ this.logger = Loggers.getLogger(getClass(), settings.globalSettings(), riverName);
+ }
+
+ @Override
+ public RiverName riverName() {
+ return riverName;
+ }
+
+ public String nodeName() {
+ return settings.globalSettings().get("name", "");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/river/River.java b/src/main/java/org/elasticsearch/river/River.java
new file mode 100644
index 0000000..6f9638c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/River.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+/**
+ * Allows to import data into elasticsearch via plugin
+ * Gets allocated on a node and eventually automatically re-allocated if needed
+ */
+public interface River extends RiverComponent {
+
+ /**
+ * Called whenever the river is registered on a node, which can happen when:
+ * 1) the river _meta document gets indexed
+ * 2) an already registered river gets started on a node
+ */
+ void start();
+
+ /**
+ * Called when the river is closed on a node, which can happen when:
+ * 1) the river is deleted by deleting its type through the delete mapping api
+ * 2) the node where the river is allocated is shut down or the river gets rerouted to another node
+ */
+ void close();
+}
diff --git a/src/main/java/org/elasticsearch/river/RiverComponent.java b/src/main/java/org/elasticsearch/river/RiverComponent.java
new file mode 100644
index 0000000..f07ac97
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiverComponent.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+/**
+ *
+ */
+public interface RiverComponent {
+
+ RiverName riverName();
+}
diff --git a/src/main/java/org/elasticsearch/river/RiverException.java b/src/main/java/org/elasticsearch/river/RiverException.java
new file mode 100644
index 0000000..4f92439
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiverException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class RiverException extends ElasticsearchException {
+
+ private final RiverName river;
+
+ public RiverException(RiverName river, String msg) {
+ this(river, msg, null);
+ }
+
+ public RiverException(RiverName river, String msg, Throwable cause) {
+ this(river, true, msg, cause);
+ }
+
+ protected RiverException(RiverName river, boolean withSpace, String msg, Throwable cause) {
+ super("[" + river.type() + "][" + river.name() + "]" + (withSpace ? " " : "") + msg, cause);
+ this.river = river;
+ }
+
+ public RiverName riverName() {
+ return river;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/RiverIndexName.java b/src/main/java/org/elasticsearch/river/RiverIndexName.java
new file mode 100644
index 0000000..66d2d03
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiverIndexName.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import org.elasticsearch.common.inject.BindingAnnotation;
+import org.elasticsearch.common.settings.Settings;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.ElementType.PARAMETER;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+/**
+ *
+ */
+
+@BindingAnnotation
+@Target({FIELD, PARAMETER})
+@Retention(RUNTIME)
+@Documented
+public @interface RiverIndexName {
+
+ static class Conf {
+ public static final String DEFAULT_INDEX_NAME = "_river";
+
+ public static String indexName(Settings settings) {
+ return settings.get("river.index_name", DEFAULT_INDEX_NAME);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/RiverModule.java b/src/main/java/org/elasticsearch/river/RiverModule.java
new file mode 100644
index 0000000..5075255
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiverModule.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.Modules;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.NoClassSettingsException;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.Strings.toCamelCase;
+
+/**
+ *
+ */
+public class RiverModule extends AbstractModule implements SpawnModules {
+
+ private RiverName riverName;
+
+ private final Settings globalSettings;
+
+ private final Map<String, Object> settings;
+
+ private final RiversTypesRegistry typesRegistry;
+
+ public RiverModule(RiverName riverName, Map<String, Object> settings, Settings globalSettings, RiversTypesRegistry typesRegistry) {
+ this.riverName = riverName;
+ this.globalSettings = globalSettings;
+ this.settings = settings;
+ this.typesRegistry = typesRegistry;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(Modules.createModule(loadTypeModule(riverName.type(), "org.elasticsearch.river.", "RiverModule"), globalSettings));
+ }
+
+ @Override
+ protected void configure() {
+ bind(RiverSettings.class).toInstance(new RiverSettings(globalSettings, settings));
+ }
+
+ private Class<? extends Module> loadTypeModule(String type, String prefixPackage, String suffixClassName) {
+ Class<? extends Module> registered = typesRegistry.type(type);
+ if (registered != null) {
+ return registered;
+ }
+ String fullClassName = type;
+ try {
+ return (Class<? extends Module>) globalSettings.getClassLoader().loadClass(fullClassName);
+ } catch (ClassNotFoundException e) {
+ fullClassName = prefixPackage + Strings.capitalize(toCamelCase(type)) + suffixClassName;
+ try {
+ return (Class<? extends Module>) globalSettings.getClassLoader().loadClass(fullClassName);
+ } catch (ClassNotFoundException e1) {
+ fullClassName = prefixPackage + toCamelCase(type) + "." + Strings.capitalize(toCamelCase(type)) + suffixClassName;
+ try {
+ return (Class<? extends Module>) globalSettings.getClassLoader().loadClass(fullClassName);
+ } catch (ClassNotFoundException e2) {
+ fullClassName = prefixPackage + toCamelCase(type).toLowerCase(Locale.ROOT) + "." + Strings.capitalize(toCamelCase(type)) + suffixClassName;
+ try {
+ return (Class<? extends Module>) globalSettings.getClassLoader().loadClass(fullClassName);
+ } catch (ClassNotFoundException e3) {
+ throw new NoClassSettingsException("Failed to load class with value [" + type + "]", e);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/RiverName.java b/src/main/java/org/elasticsearch/river/RiverName.java
new file mode 100644
index 0000000..e214972
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiverName.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class RiverName implements Serializable {
+
+ private final String type;
+
+ private final String name;
+
+ public RiverName(String type, String name) {
+ this.type = type;
+ this.name = name;
+ }
+
+ public String type() {
+ return this.type;
+ }
+
+ public String getType() {
+ return type();
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public String getName() {
+ return name();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ RiverName that = (RiverName) o;
+
+ if (name != null ? !name.equals(that.name) : that.name != null) return false;
+ if (type != null ? !type.equals(that.type) : that.type != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = type != null ? type.hashCode() : 0;
+ result = 31 * result + (name != null ? name.hashCode() : 0);
+ return result;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/RiverNameModule.java b/src/main/java/org/elasticsearch/river/RiverNameModule.java
new file mode 100644
index 0000000..cc908c0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiverNameModule.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class RiverNameModule extends AbstractModule {
+
+ private final RiverName riverName;
+
+ public RiverNameModule(RiverName riverName) {
+ this.riverName = riverName;
+ }
+
+ @Override
+ protected void configure() {
+ bind(RiverName.class).toInstance(riverName);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/RiverSettings.java b/src/main/java/org/elasticsearch/river/RiverSettings.java
new file mode 100644
index 0000000..a062148
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiverSettings.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+
+/**
+ * (shayy.banon)
+ */
+
+public class RiverSettings {
+
+ private final Settings globalSettings;
+
+ private final Map<String, Object> settings;
+
+ public RiverSettings(Settings globalSettings, Map<String, Object> settings) {
+ this.globalSettings = globalSettings;
+ this.settings = settings;
+ }
+
+ public Settings globalSettings() {
+ return globalSettings;
+ }
+
+ public Map<String, Object> settings() {
+ return settings;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/RiversManager.java b/src/main/java/org/elasticsearch/river/RiversManager.java
new file mode 100644
index 0000000..30d67d4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiversManager.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.river.cluster.RiverClusterService;
+import org.elasticsearch.river.routing.RiversRouter;
+
+/**
+ *
+ */
+public class RiversManager extends AbstractLifecycleComponent<RiversManager> {
+
+ private final RiversService riversService;
+
+ private final RiverClusterService clusterService;
+
+ private final RiversRouter riversRouter;
+
+ @Inject
+ public RiversManager(Settings settings, RiversService riversService, RiverClusterService clusterService, RiversRouter riversRouter) {
+ super(settings);
+ this.riversService = riversService;
+ this.clusterService = clusterService;
+ this.riversRouter = riversRouter;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ riversRouter.start();
+ riversService.start();
+ clusterService.start();
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ riversRouter.stop();
+ clusterService.stop();
+ riversService.stop();
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ riversRouter.close();
+ clusterService.close();
+ riversService.close();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/RiversModule.java b/src/main/java/org/elasticsearch/river/RiversModule.java
new file mode 100644
index 0000000..912874f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiversModule.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.river.cluster.RiverClusterService;
+import org.elasticsearch.river.routing.RiversRouter;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class RiversModule extends AbstractModule {
+
+ private final Settings settings;
+
+ private Map<String, Class<? extends Module>> riverTypes = Maps.newHashMap();
+
+ public RiversModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ /**
+ * Registers a custom river type name against a module.
+ *
+ * @param type The type
+ * @param module The module
+ */
+ public void registerRiver(String type, Class<? extends Module> module) {
+ riverTypes.put(type, module);
+ }
+
+ @Override
+ protected void configure() {
+ bind(String.class).annotatedWith(RiverIndexName.class).toInstance(RiverIndexName.Conf.indexName(settings));
+ bind(RiversService.class).asEagerSingleton();
+ bind(RiverClusterService.class).asEagerSingleton();
+ bind(RiversRouter.class).asEagerSingleton();
+ bind(RiversManager.class).asEagerSingleton();
+ bind(RiversTypesRegistry.class).toInstance(new RiversTypesRegistry(ImmutableMap.copyOf(riverTypes)));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/RiversPluginsModule.java b/src/main/java/org/elasticsearch/river/RiversPluginsModule.java
new file mode 100644
index 0000000..f2059fd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiversPluginsModule.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.PreProcessModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.plugins.PluginsService;
+
+/**
+ * A module that simply calls the {@link PluginsService#processModule(org.elasticsearch.common.inject.Module)}
+ * in order to allow plugins to pre process specific river modules.
+ */
+public class RiversPluginsModule extends AbstractModule implements PreProcessModule {
+
+ private final Settings settings;
+
+ private final PluginsService pluginsService;
+
+ public RiversPluginsModule(Settings settings, PluginsService pluginsService) {
+ this.settings = settings;
+ this.pluginsService = pluginsService;
+ }
+
+ @Override
+ public void processModule(Module module) {
+ pluginsService.processModule(module);
+ }
+
+ @Override
+ protected void configure() {
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/river/RiversService.java b/src/main/java/org/elasticsearch/river/RiversService.java
new file mode 100644
index 0000000..e924158
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiversService.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingResponse;
+import org.elasticsearch.action.get.GetRequestBuilder;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.Injectors;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.plugins.PluginsService;
+import org.elasticsearch.river.cluster.RiverClusterChangedEvent;
+import org.elasticsearch.river.cluster.RiverClusterService;
+import org.elasticsearch.river.cluster.RiverClusterState;
+import org.elasticsearch.river.cluster.RiverClusterStateListener;
+import org.elasticsearch.river.routing.RiverRouting;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException;
+
+/**
+ *
+ */
+public class RiversService extends AbstractLifecycleComponent<RiversService> {
+
+ private final String riverIndexName;
+
+ private Client client;
+
+ private final ThreadPool threadPool;
+
+ private final ClusterService clusterService;
+
+ private final RiversTypesRegistry typesRegistry;
+
+ private final Injector injector;
+
+ private final Map<RiverName, Injector> riversInjectors = Maps.newHashMap();
+
+ private volatile ImmutableMap<RiverName, River> rivers = ImmutableMap.of();
+
+ @Inject
+ public RiversService(Settings settings, Client client, ThreadPool threadPool, ClusterService clusterService, RiversTypesRegistry typesRegistry, RiverClusterService riverClusterService, Injector injector) {
+ super(settings);
+ this.riverIndexName = RiverIndexName.Conf.indexName(settings);
+ this.client = client;
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.typesRegistry = typesRegistry;
+ this.injector = injector;
+ riverClusterService.add(new ApplyRivers());
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ ImmutableSet<RiverName> indices = ImmutableSet.copyOf(this.rivers.keySet());
+ final CountDownLatch latch = new CountDownLatch(indices.size());
+ for (final RiverName riverName : indices) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ closeRiver(riverName);
+ } catch (Exception e) {
+ logger.warn("failed to delete river on stop [{}]/[{}]", e, riverName.type(), riverName.name());
+ } finally {
+ latch.countDown();
+ }
+ }
+ });
+ }
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ public synchronized void createRiver(RiverName riverName, Map<String, Object> settings) throws ElasticsearchException {
+ if (riversInjectors.containsKey(riverName)) {
+ logger.warn("ignoring river [{}][{}] creation, already exists", riverName.type(), riverName.name());
+ return;
+ }
+
+ logger.debug("creating river [{}][{}]", riverName.type(), riverName.name());
+
+ try {
+ ModulesBuilder modules = new ModulesBuilder();
+ modules.add(new RiverNameModule(riverName));
+ modules.add(new RiverModule(riverName, settings, this.settings, typesRegistry));
+ modules.add(new RiversPluginsModule(this.settings, injector.getInstance(PluginsService.class)));
+
+ Injector indexInjector = modules.createChildInjector(injector);
+ riversInjectors.put(riverName, indexInjector);
+ River river = indexInjector.getInstance(River.class);
+ rivers = MapBuilder.newMapBuilder(rivers).put(riverName, river).immutableMap();
+
+
+ // we need this start so there can be operations done (like creating an index) which can't be
+ // done on create since Guice can't create two concurrent child injectors
+ river.start();
+
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+
+ builder.startObject("node");
+ builder.field("id", clusterService.localNode().id());
+ builder.field("name", clusterService.localNode().name());
+ builder.field("transport_address", clusterService.localNode().address().toString());
+ builder.endObject();
+
+ builder.endObject();
+
+
+ client.prepareIndex(riverIndexName, riverName.name(), "_status")
+ .setConsistencyLevel(WriteConsistencyLevel.ONE)
+ .setSource(builder).execute().actionGet();
+ } catch (Exception e) {
+ logger.warn("failed to create river [{}][{}]", e, riverName.type(), riverName.name());
+
+ try {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ builder.field("error", ExceptionsHelper.detailedMessage(e));
+
+ builder.startObject("node");
+ builder.field("id", clusterService.localNode().id());
+ builder.field("name", clusterService.localNode().name());
+ builder.field("transport_address", clusterService.localNode().address().toString());
+ builder.endObject();
+ builder.endObject();
+
+ client.prepareIndex(riverIndexName, riverName.name(), "_status")
+ .setConsistencyLevel(WriteConsistencyLevel.ONE)
+ .setSource(builder).execute().actionGet();
+ } catch (Exception e1) {
+ logger.warn("failed to write failed status for river creation", e);
+ }
+ }
+ }
+
+ public synchronized void closeRiver(RiverName riverName) throws ElasticsearchException {
+ Injector riverInjector;
+ River river;
+ synchronized (this) {
+ riverInjector = riversInjectors.remove(riverName);
+ if (riverInjector == null) {
+ throw new RiverException(riverName, "missing");
+ }
+ logger.debug("closing river [{}][{}]", riverName.type(), riverName.name());
+
+ Map<RiverName, River> tmpMap = Maps.newHashMap(rivers);
+ river = tmpMap.remove(riverName);
+ rivers = ImmutableMap.copyOf(tmpMap);
+ }
+
+ river.close();
+
+ Injectors.close(injector);
+ }
+
+ private class ApplyRivers implements RiverClusterStateListener {
+ @Override
+ public void riverClusterChanged(RiverClusterChangedEvent event) {
+ DiscoveryNode localNode = clusterService.localNode();
+ RiverClusterState state = event.state();
+
+ // first, go over and delete ones that either don't exists or are not allocated
+ for (final RiverName riverName : rivers.keySet()) {
+ RiverRouting routing = state.routing().routing(riverName);
+ if (routing == null || !localNode.equals(routing.node())) {
+ // not routed at all, and not allocated here, clean it (we delete the relevant ones before)
+ closeRiver(riverName);
+ // also, double check and delete the river content if it was deleted (_meta does not exists)
+ try {
+ client.prepareGet(riverIndexName, riverName.name(), "_meta").setListenerThreaded(true).execute(new ActionListener<GetResponse>() {
+ @Override
+ public void onResponse(GetResponse getResponse) {
+ if (!getResponse.isExists()) {
+ // verify the river is deleted
+ client.admin().indices().prepareDeleteMapping(riverIndexName).setType(riverName.name()).execute(new ActionListener<DeleteMappingResponse>() {
+ @Override
+ public void onResponse(DeleteMappingResponse deleteMappingResponse) {
+ // all is well...
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.debug("failed to (double) delete river [{}] content", e, riverName.name());
+ }
+ });
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.debug("failed to (double) delete river [{}] content", e, riverName.name());
+ }
+ });
+ } catch (IndexMissingException e) {
+ // all is well, the _river index was deleted
+ } catch (Exception e) {
+ logger.warn("unexpected failure when trying to verify river [{}] deleted", e, riverName.name());
+ }
+ }
+ }
+
+ for (final RiverRouting routing : state.routing()) {
+ // not allocated
+ if (routing.node() == null) {
+ logger.trace("river {} has no routing node", routing.riverName().getName());
+ continue;
+ }
+ // only apply changes to the local node
+ if (!routing.node().equals(localNode)) {
+ logger.trace("river {} belongs to node {}", routing.riverName().getName(), routing.node());
+ continue;
+ }
+ // if its already created, ignore it
+ if (rivers.containsKey(routing.riverName())) {
+ logger.trace("river {} is already allocated", routing.riverName().getName());
+ continue;
+ }
+ prepareGetMetaDocument(routing.riverName().name()).execute(new ActionListener<GetResponse>() {
+ @Override
+ public void onResponse(GetResponse getResponse) {
+ if (!rivers.containsKey(routing.riverName())) {
+ if (getResponse.isExists()) {
+ // only create the river if it exists, otherwise, the indexing meta data has not been visible yet...
+ createRiver(routing.riverName(), getResponse.getSourceAsMap());
+ } else {
+ //this should never happen as we've just found the _meta document in RiversRouter
+ logger.warn("{}/{}/_meta document not found", riverIndexName, routing.riverName().getName());
+ }
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ // if its this is a failure that need to be retried, then do it
+ // this might happen if the state of the river index has not been propagated yet to this node, which
+ // should happen pretty fast since we managed to get the _meta in the RiversRouter
+ Throwable failure = ExceptionsHelper.unwrapCause(e);
+ if (isShardNotAvailableException(failure)) {
+ logger.debug("failed to get _meta from [{}]/[{}], retrying...", e, routing.riverName().type(), routing.riverName().name());
+ final ActionListener<GetResponse> listener = this;
+ try {
+ threadPool.schedule(TimeValue.timeValueSeconds(5), ThreadPool.Names.SAME, new Runnable() {
+ @Override
+ public void run() {
+ prepareGetMetaDocument(routing.riverName().name()).execute(listener);
+ }
+ });
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Couldn't schedule river start retry, node might be shutting down", ex);
+ }
+ } else {
+ logger.warn("failed to get _meta from [{}]/[{}]", e, routing.riverName().type(), routing.riverName().name());
+ }
+ }
+ });
+ }
+ }
+
+ private GetRequestBuilder prepareGetMetaDocument(String riverName) {
+ return client.prepareGet(riverIndexName, riverName, "_meta").setPreference("_primary").setListenerThreaded(true);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/RiversTypesRegistry.java b/src/main/java/org/elasticsearch/river/RiversTypesRegistry.java
new file mode 100644
index 0000000..3400dc4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/RiversTypesRegistry.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.inject.Module;
+
+/**
+ * A type registry for rivers
+ */
+public class RiversTypesRegistry {
+
+ private final ImmutableMap<String, Class<? extends Module>> riverTypes;
+
+ public RiversTypesRegistry(ImmutableMap<String, Class<? extends Module>> riverTypes) {
+ this.riverTypes = riverTypes;
+ }
+
+ public Class<? extends Module> type(String type) {
+ return riverTypes.get(type);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java b/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java
new file mode 100644
index 0000000..2931539
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.cluster;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class PublishRiverClusterStateAction extends AbstractComponent {
+
+ public static interface NewClusterStateListener {
+ void onNewClusterState(RiverClusterState clusterState);
+ }
+
+ private final TransportService transportService;
+
+ private final ClusterService clusterService;
+
+ private final NewClusterStateListener listener;
+
+ public PublishRiverClusterStateAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ NewClusterStateListener listener) {
+ super(settings);
+ this.transportService = transportService;
+ this.clusterService = clusterService;
+ this.listener = listener;
+ transportService.registerHandler(PublishClusterStateRequestHandler.ACTION, new PublishClusterStateRequestHandler());
+ }
+
+ public void close() {
+ transportService.removeHandler(PublishClusterStateRequestHandler.ACTION);
+ }
+
+ public void publish(RiverClusterState clusterState) {
+ final DiscoveryNodes discoNodes = clusterService.state().nodes();
+ final DiscoveryNode localNode = discoNodes.localNode();
+ for (final DiscoveryNode node : discoNodes) {
+ if (node.equals(localNode)) {
+ // no need to send to our self
+ continue;
+ }
+
+ // we only want to send nodes that are either possible master nodes or river nodes
+ // master nodes because they will handle the state and the allocation of rivers
+ // and river nodes since they will end up creating indexes
+
+ if (!node.masterNode() && !RiverNodeHelper.isRiverNode(node)) {
+ continue;
+ }
+
+ transportService.sendRequest(node, PublishClusterStateRequestHandler.ACTION, new PublishClusterStateRequest(clusterState), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleException(TransportException exp) {
+ logger.debug("failed to send cluster state to [{}], should be detected as failed soon...", exp, node);
+ }
+ });
+ }
+ }
+
+ private class PublishClusterStateRequest extends TransportRequest {
+
+ private RiverClusterState clusterState;
+
+ private PublishClusterStateRequest() {
+ }
+
+ private PublishClusterStateRequest(RiverClusterState clusterState) {
+ this.clusterState = clusterState;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ clusterState = RiverClusterState.Builder.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ RiverClusterState.Builder.writeTo(clusterState, out);
+ }
+ }
+
+ private class PublishClusterStateRequestHandler extends BaseTransportRequestHandler<PublishClusterStateRequest> {
+
+ static final String ACTION = "river/state/publish";
+
+ @Override
+ public PublishClusterStateRequest newInstance() {
+ return new PublishClusterStateRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void messageReceived(PublishClusterStateRequest request, TransportChannel channel) throws Exception {
+ listener.onNewClusterState(request.clusterState);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/cluster/RiverClusterChangedEvent.java b/src/main/java/org/elasticsearch/river/cluster/RiverClusterChangedEvent.java
new file mode 100644
index 0000000..2863a40
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/cluster/RiverClusterChangedEvent.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.cluster;
+
+/**
+ *
+ */
+public class RiverClusterChangedEvent {
+
+ private final String source;
+
+ private final RiverClusterState previousState;
+
+ private final RiverClusterState state;
+
+ public RiverClusterChangedEvent(String source, RiverClusterState state, RiverClusterState previousState) {
+ this.source = source;
+ this.state = state;
+ this.previousState = previousState;
+ }
+
+ /**
+ * The source that caused this cluster event to be raised.
+ */
+ public String source() {
+ return this.source;
+ }
+
+ public RiverClusterState state() {
+ return this.state;
+ }
+
+ public RiverClusterState previousState() {
+ return this.previousState;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java b/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java
new file mode 100644
index 0000000..91dfe93
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.cluster;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import static java.util.concurrent.Executors.newSingleThreadExecutor;
+import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
+
+/**
+ *
+ */
+public class RiverClusterService extends AbstractLifecycleComponent<RiverClusterService> {
+
+ private final ClusterService clusterService;
+
+ private final PublishRiverClusterStateAction publishAction;
+
+ private final List<RiverClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<RiverClusterStateListener>();
+
+ private volatile ExecutorService updateTasksExecutor;
+
+ private volatile RiverClusterState clusterState = RiverClusterState.builder().build();
+
+ @Inject
+ public RiverClusterService(Settings settings, TransportService transportService, ClusterService clusterService) {
+ super(settings);
+ this.clusterService = clusterService;
+
+ this.publishAction = new PublishRiverClusterStateAction(settings, transportService, clusterService, new UpdateClusterStateListener());
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ this.updateTasksExecutor = newSingleThreadExecutor(daemonThreadFactory(settings, "riverClusterService#updateTask"));
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ updateTasksExecutor.shutdown();
+ try {
+ updateTasksExecutor.awaitTermination(10, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ public void add(RiverClusterStateListener listener) {
+ clusterStateListeners.add(listener);
+ }
+
+ public void remove(RiverClusterStateListener listener) {
+ clusterStateListeners.remove(listener);
+ }
+
+ /**
+ * The current state.
+ */
+ public ClusterState state() {
+ return clusterService.state();
+ }
+
+ public void submitStateUpdateTask(final String source, final RiverClusterStateUpdateTask updateTask) {
+ if (!lifecycle.started()) {
+ return;
+ }
+ updateTasksExecutor.execute(new Runnable() {
+ @Override
+ public void run() {
+ if (!lifecycle.started()) {
+ logger.debug("processing [{}]: ignoring, cluster_service not started", source);
+ return;
+ }
+ logger.debug("processing [{}]: execute", source);
+
+ RiverClusterState previousClusterState = clusterState;
+ try {
+ clusterState = updateTask.execute(previousClusterState);
+ } catch (Exception e) {
+ StringBuilder sb = new StringBuilder("failed to execute cluster state update, state:\nversion [").append(clusterState.version()).append("], source [").append(source).append("]\n");
+ logger.warn(sb.toString(), e);
+ return;
+ }
+ if (previousClusterState != clusterState) {
+ if (clusterService.state().nodes().localNodeMaster()) {
+ // only the master controls the version numbers
+ clusterState = new RiverClusterState(clusterState.version() + 1, clusterState);
+ } else {
+ // we got this cluster state from the master, filter out based on versions (don't call listeners)
+ if (clusterState.version() < previousClusterState.version()) {
+ logger.debug("got old cluster state [" + clusterState.version() + "<" + previousClusterState.version() + "] from source [" + source + "], ignoring");
+ return;
+ }
+ }
+
+ if (logger.isTraceEnabled()) {
+ StringBuilder sb = new StringBuilder("cluster state updated:\nversion [").append(clusterState.version()).append("], source [").append(source).append("]\n");
+ logger.trace(sb.toString());
+ } else if (logger.isDebugEnabled()) {
+ logger.debug("cluster state updated, version [{}], source [{}]", clusterState.version(), source);
+ }
+
+ RiverClusterChangedEvent clusterChangedEvent = new RiverClusterChangedEvent(source, clusterState, previousClusterState);
+
+ for (RiverClusterStateListener listener : clusterStateListeners) {
+ listener.riverClusterChanged(clusterChangedEvent);
+ }
+
+ // if we are the master, publish the new state to all nodes
+ if (clusterService.state().nodes().localNodeMaster()) {
+ publishAction.publish(clusterState);
+ }
+
+ logger.debug("processing [{}]: done applying updated cluster_state", source);
+ } else {
+ logger.debug("processing [{}]: no change in cluster_state", source);
+ }
+ }
+ });
+ }
+
+ private class UpdateClusterStateListener implements PublishRiverClusterStateAction.NewClusterStateListener {
+ @Override
+ public void onNewClusterState(final RiverClusterState clusterState) {
+ ClusterState state = clusterService.state();
+ if (state.nodes().localNodeMaster()) {
+ logger.warn("master should not receive new cluster state from [{}]", state.nodes().masterNode());
+ return;
+ }
+
+ submitStateUpdateTask("received_state", new RiverClusterStateUpdateTask() {
+ @Override
+ public RiverClusterState execute(RiverClusterState currentState) {
+ return clusterState;
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/cluster/RiverClusterState.java b/src/main/java/org/elasticsearch/river/cluster/RiverClusterState.java
new file mode 100644
index 0000000..7be85a6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/cluster/RiverClusterState.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.cluster;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.river.routing.RiversRouting;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class RiverClusterState {
+
+ private final long version;
+
+ private final RiversRouting routing;
+
+ public RiverClusterState(long version, RiverClusterState state) {
+ this.version = version;
+ this.routing = state.routing();
+ }
+
+ RiverClusterState(long version, RiversRouting routing) {
+ this.version = version;
+ this.routing = routing;
+ }
+
+ public long version() {
+ return this.version;
+ }
+
+ public RiversRouting routing() {
+ return routing;
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public static class Builder {
+
+ private long version = 0;
+
+ private RiversRouting routing = RiversRouting.EMPTY;
+
+ public Builder state(RiverClusterState state) {
+ this.version = state.version();
+ this.routing = state.routing();
+ return this;
+ }
+
+ public Builder routing(RiversRouting.Builder builder) {
+ return routing(builder.build());
+ }
+
+ public Builder routing(RiversRouting routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ public RiverClusterState build() {
+ return new RiverClusterState(version, routing);
+ }
+
+ public static RiverClusterState readFrom(StreamInput in) throws IOException {
+ Builder builder = new Builder();
+ builder.version = in.readVLong();
+ builder.routing = RiversRouting.Builder.readFrom(in);
+ return builder.build();
+ }
+
+ public static void writeTo(RiverClusterState clusterState, StreamOutput out) throws IOException {
+ out.writeVLong(clusterState.version);
+ RiversRouting.Builder.writeTo(clusterState.routing, out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/cluster/RiverClusterStateListener.java b/src/main/java/org/elasticsearch/river/cluster/RiverClusterStateListener.java
new file mode 100644
index 0000000..c8cac75
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/cluster/RiverClusterStateListener.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.cluster;
+
+/**
+ *
+ */
+public interface RiverClusterStateListener {
+
+ void riverClusterChanged(RiverClusterChangedEvent event);
+}
diff --git a/src/main/java/org/elasticsearch/river/cluster/RiverClusterStateUpdateTask.java b/src/main/java/org/elasticsearch/river/cluster/RiverClusterStateUpdateTask.java
new file mode 100644
index 0000000..eb7d3fe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/cluster/RiverClusterStateUpdateTask.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.cluster;
+
+/**
+ *
+ */
+public interface RiverClusterStateUpdateTask {
+
+ RiverClusterState execute(RiverClusterState currentState);
+}
diff --git a/src/main/java/org/elasticsearch/river/cluster/RiverNodeHelper.java b/src/main/java/org/elasticsearch/river/cluster/RiverNodeHelper.java
new file mode 100644
index 0000000..6300751
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/cluster/RiverNodeHelper.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.cluster;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.river.RiverName;
+
+/**
+ *
+ */
+public class RiverNodeHelper {
+
+ public static boolean isRiverNode(DiscoveryNode node) {
+ // we don't allocate rivers on client nodes
+ if (node.clientNode()) {
+ return false;
+ }
+ String river = node.attributes().get("river");
+ // by default, if not set, it's a river node (better OOB exp)
+ if (river == null) {
+ return true;
+ }
+ if ("_none_".equals(river)) {
+ return false;
+ }
+ // there is at least one river settings, we need it
+ return true;
+ }
+
+ public static boolean isRiverNode(DiscoveryNode node, RiverName riverName) {
+ if (!isRiverNode(node)) {
+ return false;
+ }
+ String river = node.attributes().get("river");
+ // by default, if not set, its an river node (better OOB exp)
+ return river == null || river.contains(riverName.type()) || river.contains(riverName.name());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/dummy/DummyRiver.java b/src/main/java/org/elasticsearch/river/dummy/DummyRiver.java
new file mode 100644
index 0000000..f90a928
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/dummy/DummyRiver.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.dummy;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.river.AbstractRiverComponent;
+import org.elasticsearch.river.River;
+import org.elasticsearch.river.RiverName;
+import org.elasticsearch.river.RiverSettings;
+
+/**
+ *
+ */
+public class DummyRiver extends AbstractRiverComponent implements River {
+
+ @Inject
+ public DummyRiver(RiverName riverName, RiverSettings settings) {
+ super(riverName, settings);
+ logger.info("create");
+ }
+
+ @Override
+ public void start() {
+ logger.info("start");
+ }
+
+ @Override
+ public void close() {
+ logger.info("close");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/dummy/DummyRiverModule.java b/src/main/java/org/elasticsearch/river/dummy/DummyRiverModule.java
new file mode 100644
index 0000000..a0e3057
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/dummy/DummyRiverModule.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.dummy;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.river.River;
+
+/**
+ *
+ */
+public class DummyRiverModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(River.class).to(DummyRiver.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/routing/RiverRouting.java b/src/main/java/org/elasticsearch/river/routing/RiverRouting.java
new file mode 100644
index 0000000..0fe41d6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/routing/RiverRouting.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.routing;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.river.RiverName;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class RiverRouting implements Streamable {
+
+ private RiverName riverName;
+
+ private DiscoveryNode node;
+
+ private RiverRouting() {
+ }
+
+ RiverRouting(RiverName riverName, DiscoveryNode node) {
+ this.riverName = riverName;
+ this.node = node;
+ }
+
+ public RiverName riverName() {
+ return riverName;
+ }
+
+ /**
+ * The node the river is allocated to, <tt>null</tt> if its not allocated.
+ */
+ public DiscoveryNode node() {
+ return node;
+ }
+
+ void node(DiscoveryNode node) {
+ this.node = node;
+ }
+
+ public static RiverRouting readRiverRouting(StreamInput in) throws IOException {
+ RiverRouting routing = new RiverRouting();
+ routing.readFrom(in);
+ return routing;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ riverName = new RiverName(in.readString(), in.readString());
+ if (in.readBoolean()) {
+ node = DiscoveryNode.readNode(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(riverName.type());
+ out.writeString(riverName.name());
+ if (node == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ node.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/routing/RiversRouter.java b/src/main/java/org/elasticsearch/river/routing/RiversRouter.java
new file mode 100644
index 0000000..5984b24
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/routing/RiversRouter.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.routing;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.NoShardAvailableActionException;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.CountDown;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.shard.IllegalIndexShardStateException;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.river.RiverIndexName;
+import org.elasticsearch.river.RiverName;
+import org.elasticsearch.river.cluster.RiverClusterService;
+import org.elasticsearch.river.cluster.RiverClusterState;
+import org.elasticsearch.river.cluster.RiverClusterStateUpdateTask;
+import org.elasticsearch.river.cluster.RiverNodeHelper;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class RiversRouter extends AbstractLifecycleComponent<RiversRouter> implements ClusterStateListener {
+
+ private static final TimeValue RIVER_START_RETRY_INTERVAL = TimeValue.timeValueMillis(1000);
+ private static final int RIVER_START_MAX_RETRIES = 5;
+
+ private final String riverIndexName;
+
+ private final Client client;
+
+ private final RiverClusterService riverClusterService;
+
+ private final ThreadPool threadPool;
+
+ @Inject
+ public RiversRouter(Settings settings, Client client, ClusterService clusterService, RiverClusterService riverClusterService, ThreadPool threadPool) {
+ super(settings);
+ this.riverIndexName = RiverIndexName.Conf.indexName(settings);
+ this.riverClusterService = riverClusterService;
+ this.client = client;
+ this.threadPool = threadPool;
+ clusterService.add(this);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ @Override
+ public void clusterChanged(final ClusterChangedEvent event) {
+ if (!event.localNodeMaster()) {
+ return;
+ }
+ final String source = "reroute_rivers_node_changed";
+ //we'll try again a few times if we don't find the river _meta document while the type is there
+ final CountDown countDown = new CountDown(RIVER_START_MAX_RETRIES);
+ riverClusterService.submitStateUpdateTask(source, new RiverClusterStateUpdateTask() {
+ @Override
+ public RiverClusterState execute(RiverClusterState currentState) {
+ return updateRiverClusterState(source, currentState, event.state(), countDown);
+ }
+ });
+ }
+
+ protected RiverClusterState updateRiverClusterState(final String source, final RiverClusterState currentState,
+ ClusterState newClusterState, final CountDown countDown) {
+ if (!newClusterState.metaData().hasIndex(riverIndexName)) {
+ // if there are routings, publish an empty one (so it will be deleted on nodes), otherwise, return the same state
+ if (!currentState.routing().isEmpty()) {
+ return RiverClusterState.builder().state(currentState).routing(RiversRouting.builder()).build();
+ }
+ return currentState;
+ }
+
+ RiversRouting.Builder routingBuilder = RiversRouting.builder().routing(currentState.routing());
+ boolean dirty = false;
+ IndexMetaData indexMetaData = newClusterState.metaData().index(riverIndexName);
+
+ boolean metaFound = true;
+ // go over and create new river routing (with no node) for new types (rivers names)
+ for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
+ String mappingType = cursor.value.type(); // mapping type is the name of the river
+ if (MapperService.DEFAULT_MAPPING.equals(mappingType)) {
+ continue;
+ }
+ if (!currentState.routing().hasRiverByName(mappingType)) {
+ // no river, we need to add it to the routing with no node allocation
+ try {
+ GetResponse getResponse = client.prepareGet(riverIndexName, mappingType, "_meta").setPreference("_primary").get();
+ if (getResponse.isExists()) {
+
+ logger.debug("{}/{}/_meta document found.", riverIndexName, mappingType);
+
+ String riverType = XContentMapValues.nodeStringValue(getResponse.getSourceAsMap().get("type"), null);
+ if (riverType == null) {
+ logger.warn("no river type provided for [{}], ignoring...", riverIndexName);
+ } else {
+ routingBuilder.put(new RiverRouting(new RiverName(riverType, mappingType), null));
+ dirty = true;
+ }
+ } else {
+ // At least one type does not have _meta
+ metaFound = false;
+ }
+ } catch (NoShardAvailableActionException e) {
+ // ignore, we will get it next time...
+ } catch (ClusterBlockException e) {
+ // ignore, we will get it next time
+ } catch (IndexMissingException e) {
+ // ignore, we will get it next time
+ } catch (IllegalIndexShardStateException e) {
+ // ignore, we will get it next time
+ } catch (Exception e) {
+ logger.warn("failed to get/parse _meta for [{}]", e, mappingType);
+ }
+ }
+ }
+
+ // At least one type does not have _meta, so we are
+ // going to reschedule some checks
+ if (!metaFound) {
+ if (countDown.countDown()) {
+ logger.warn("no river _meta document found after {} attempts", RIVER_START_MAX_RETRIES);
+ } else {
+ logger.debug("no river _meta document found retrying in {} ms", RIVER_START_RETRY_INTERVAL.millis());
+ try {
+ threadPool.schedule(RIVER_START_RETRY_INTERVAL, ThreadPool.Names.GENERIC, new Runnable() {
+ @Override
+ public void run() {
+ riverClusterService.submitStateUpdateTask(source, new RiverClusterStateUpdateTask() {
+ @Override
+ public RiverClusterState execute(RiverClusterState currentState) {
+ return updateRiverClusterState(source, currentState, riverClusterService.state(), countDown);
+ }
+ });
+ }
+ });
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Couldn't schedule river start retry, node might be shutting down", ex);
+ }
+ }
+ }
+
+ // now, remove routings that were deleted
+ // also, apply nodes that were removed and rivers were running on
+ for (RiverRouting routing : currentState.routing()) {
+ if (!indexMetaData.mappings().containsKey(routing.riverName().name())) {
+ routingBuilder.remove(routing);
+ dirty = true;
+ } else if (routing.node() != null && !newClusterState.nodes().nodeExists(routing.node().id())) {
+ routingBuilder.remove(routing);
+ routingBuilder.put(new RiverRouting(routing.riverName(), null));
+ dirty = true;
+ }
+ }
+
+ // build a list from nodes to rivers
+ Map<DiscoveryNode, List<RiverRouting>> nodesToRivers = Maps.newHashMap();
+
+ for (DiscoveryNode node : newClusterState.nodes()) {
+ if (RiverNodeHelper.isRiverNode(node)) {
+ nodesToRivers.put(node, Lists.<RiverRouting>newArrayList());
+ }
+ }
+
+ List<RiverRouting> unassigned = Lists.newArrayList();
+ for (RiverRouting routing : routingBuilder.build()) {
+ if (routing.node() == null) {
+ unassigned.add(routing);
+ } else {
+ List<RiverRouting> l = nodesToRivers.get(routing.node());
+ if (l == null) {
+ l = Lists.newArrayList();
+ nodesToRivers.put(routing.node(), l);
+ }
+ l.add(routing);
+ }
+ }
+ for (Iterator<RiverRouting> it = unassigned.iterator(); it.hasNext(); ) {
+ RiverRouting routing = it.next();
+ DiscoveryNode smallest = null;
+ int smallestSize = Integer.MAX_VALUE;
+ for (Map.Entry<DiscoveryNode, List<RiverRouting>> entry : nodesToRivers.entrySet()) {
+ if (RiverNodeHelper.isRiverNode(entry.getKey(), routing.riverName())) {
+ if (entry.getValue().size() < smallestSize) {
+ smallestSize = entry.getValue().size();
+ smallest = entry.getKey();
+ }
+ }
+ }
+ if (smallest != null) {
+ dirty = true;
+ it.remove();
+ routing.node(smallest);
+ nodesToRivers.get(smallest).add(routing);
+ logger.debug("going to allocate river [{}] on node {}", routing.riverName().getName(), smallest);
+ }
+ }
+
+
+ // add relocation logic...
+
+ if (dirty) {
+ return RiverClusterState.builder().state(currentState).routing(routingBuilder).build();
+ }
+ return currentState;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/river/routing/RiversRouting.java b/src/main/java/org/elasticsearch/river/routing/RiversRouting.java
new file mode 100644
index 0000000..837e3dd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/river/routing/RiversRouting.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river.routing;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.river.RiverName;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ *
+ */
+public class RiversRouting implements Iterable<RiverRouting> {
+
+ public static final RiversRouting EMPTY = RiversRouting.builder().build();
+
+ private final ImmutableMap<RiverName, RiverRouting> rivers;
+
+ private RiversRouting(ImmutableMap<RiverName, RiverRouting> rivers) {
+ this.rivers = rivers;
+ }
+
+ public boolean isEmpty() {
+ return rivers.isEmpty();
+ }
+
+ public RiverRouting routing(RiverName riverName) {
+ return rivers.get(riverName);
+ }
+
+ public boolean hasRiverByName(String name) {
+ for (RiverName riverName : rivers.keySet()) {
+ if (riverName.name().equals(name)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public Iterator<RiverRouting> iterator() {
+ return rivers.values().iterator();
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public static class Builder {
+
+ private MapBuilder<RiverName, RiverRouting> rivers = MapBuilder.newMapBuilder();
+
+ public Builder routing(RiversRouting routing) {
+ rivers.putAll(routing.rivers);
+ return this;
+ }
+
+ public Builder put(RiverRouting routing) {
+ rivers.put(routing.riverName(), routing);
+ return this;
+ }
+
+ public Builder remove(RiverRouting routing) {
+ rivers.remove(routing.riverName());
+ return this;
+ }
+
+ public Builder remove(RiverName riverName) {
+ rivers.remove(riverName);
+ return this;
+ }
+
+ public Builder remote(String riverName) {
+ for (RiverName name : rivers.map().keySet()) {
+ if (name.name().equals(riverName)) {
+ rivers.remove(name);
+ }
+ }
+ return this;
+ }
+
+ public RiversRouting build() {
+ return new RiversRouting(rivers.immutableMap());
+ }
+
+ public static RiversRouting readFrom(StreamInput in) throws IOException {
+ Builder builder = new Builder();
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ builder.put(RiverRouting.readRiverRouting(in));
+ }
+ return builder.build();
+ }
+
+ public static void writeTo(RiversRouting routing, StreamOutput out) throws IOException {
+ out.writeVInt(routing.rivers.size());
+ for (RiverRouting riverRouting : routing) {
+ riverRouting.writeTo(out);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/script/AbstractDoubleSearchScript.java b/src/main/java/org/elasticsearch/script/AbstractDoubleSearchScript.java
new file mode 100644
index 0000000..6a803ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/AbstractDoubleSearchScript.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+/**
+ * A simpler base class instead of {@link AbstractSearchScript} for computations
+ * that return a double number.
+ */
+public abstract class AbstractDoubleSearchScript extends AbstractSearchScript {
+
+ @Override
+ public Object run() {
+ return runAsDouble();
+ }
+
+ @Override
+ public abstract double runAsDouble();
+
+ @Override
+ public long runAsLong() {
+ return (long) runAsDouble();
+ }
+
+ @Override
+ public float runAsFloat() {
+ return (float) runAsDouble();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/script/AbstractExecutableScript.java b/src/main/java/org/elasticsearch/script/AbstractExecutableScript.java
new file mode 100644
index 0000000..41ed875
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/AbstractExecutableScript.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+public abstract class AbstractExecutableScript implements ExecutableScript {
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ return value;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/script/AbstractFloatSearchScript.java b/src/main/java/org/elasticsearch/script/AbstractFloatSearchScript.java
new file mode 100644
index 0000000..21db0b0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/AbstractFloatSearchScript.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+/**
+ * A simpler base class instead of {@link AbstractSearchScript} for computations
+ * that return a float number.
+ */
+public abstract class AbstractFloatSearchScript extends AbstractSearchScript {
+
+ @Override
+ public Object run() {
+ return runAsFloat();
+ }
+
+ @Override
+ public abstract float runAsFloat();
+
+ @Override
+ public double runAsDouble() {
+ return runAsFloat();
+ }
+
+ @Override
+ public long runAsLong() {
+ return (long) runAsFloat();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/script/AbstractLongSearchScript.java b/src/main/java/org/elasticsearch/script/AbstractLongSearchScript.java
new file mode 100644
index 0000000..53a2400
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/AbstractLongSearchScript.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+/**
+ * A simpler base class instead of {@link AbstractSearchScript} for computations
+ * that return a long number.
+ */
+public abstract class AbstractLongSearchScript extends AbstractSearchScript {
+
+ @Override
+ public Object run() {
+ return runAsLong();
+ }
+
+ @Override
+ public abstract long runAsLong();
+
+ @Override
+ public double runAsDouble() {
+ return runAsLong();
+ }
+
+ @Override
+ public float runAsFloat() {
+ return runAsLong();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/script/AbstractSearchScript.java b/src/main/java/org/elasticsearch/script/AbstractSearchScript.java
new file mode 100644
index 0000000..769aac0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/AbstractSearchScript.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.search.lookup.IndexLookup;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.search.lookup.DocLookup;
+import org.elasticsearch.search.lookup.FieldsLookup;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.lookup.SourceLookup;
+
+import java.util.Map;
+
+/**
+ * A base class for any script type that is used during the search process (custom score, facets, and so on).
+ * <p/>
+ * <p>If the script returns a specific numeric type, consider overriding the type specific base classes
+ * such as {@link AbstractDoubleSearchScript}, {@link AbstractFloatSearchScript} and {@link AbstractLongSearchScript}
+ * for better performance.
+ * <p/>
+ * <p>The use is required to implement the {@link #run()} method.
+ */
+public abstract class AbstractSearchScript extends AbstractExecutableScript implements SearchScript {
+
+ private SearchLookup lookup;
+
+ private float score = Float.NaN;
+
+ /**
+ * Returns the current score and only applicable when used as a scoring script in a custom score query!.
+ * For other cases, use {@link #doc()} and get the score from it.
+ */
+ protected final float score() {
+ return score;
+ }
+
+ /**
+ * Returns the doc lookup allowing to access field data (cached) values as well as the current document score
+ * (where applicable).
+ */
+ protected final DocLookup doc() {
+ return lookup.doc();
+ }
+
+ /**
+ * Returns field data strings access for the provided field.
+ */
+ protected ScriptDocValues.Strings docFieldStrings(String field) {
+ return (ScriptDocValues.Strings) doc().get(field);
+ }
+
+ /**
+ * Returns field data double (floating point) access for the provided field.
+ */
+ protected ScriptDocValues.Doubles docFieldDoubles(String field) {
+ return (ScriptDocValues.Doubles) doc().get(field);
+ }
+
+ /**
+ * Returns field data long (integers) access for the provided field.
+ */
+ protected ScriptDocValues.Longs docFieldLongs(String field) {
+ return (ScriptDocValues.Longs) doc().get(field);
+ }
+
+ /**
+ * Allows to access the actual source (loaded and parsed).
+ */
+ protected final SourceLookup source() {
+ return lookup.source();
+ }
+
+ /**
+ * Allows to access statistics on terms and fields.
+ */
+ protected final IndexLookup indexLookup() {
+ return lookup.indexLookup();
+ }
+
+ /**
+ * Allows to access the *stored* fields.
+ */
+ protected final FieldsLookup fields() {
+ return lookup.fields();
+ }
+
+ void setLookup(SearchLookup lookup) {
+ this.lookup = lookup;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) {
+ lookup.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) {
+ lookup.setNextReader(context);
+ }
+
+ @Override
+ public void setNextDocId(int doc) {
+ lookup.setNextDocId(doc);
+ }
+
+ @Override
+ public void setNextSource(Map<String, Object> source) {
+ lookup.source().setNextSource(source);
+ }
+
+ @Override
+ public void setNextScore(float score) {
+ this.score = score;
+ }
+
+ @Override
+ public float runAsFloat() {
+ return ((Number) run()).floatValue();
+ }
+
+ @Override
+ public long runAsLong() {
+ return ((Number) run()).longValue();
+ }
+
+ @Override
+ public double runAsDouble() {
+ return ((Number) run()).doubleValue();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/script/CompiledScript.java b/src/main/java/org/elasticsearch/script/CompiledScript.java
new file mode 100644
index 0000000..54cca31
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/CompiledScript.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+/**
+ *
+ */
+public class CompiledScript {
+
+ private final String type;
+
+ private final Object compiled;
+
+ public CompiledScript(String type, Object compiled) {
+ this.type = type;
+ this.compiled = compiled;
+ }
+
+ public String lang() {
+ return type;
+ }
+
+ public Object compiled() {
+ return compiled;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/script/ExecutableScript.java b/src/main/java/org/elasticsearch/script/ExecutableScript.java
new file mode 100644
index 0000000..43c9902
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/ExecutableScript.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+/**
+ * An executable script, can't be used concurrently.
+ */
+public interface ExecutableScript {
+
+ void setNextVar(String name, Object value);
+
+ /**
+ * Executes the script.
+ */
+ Object run();
+
+ /**
+ * Unwraps a possible script value. For example, when passing vars and expecting the returned value to
+ * be part of the vars.
+ */
+ Object unwrap(Object value);
+}
diff --git a/src/main/java/org/elasticsearch/script/ExplainableSearchScript.java b/src/main/java/org/elasticsearch/script/ExplainableSearchScript.java
new file mode 100644
index 0000000..c4a5a65
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/ExplainableSearchScript.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.apache.lucene.search.Explanation;
+
+/**
+ * To be implemented by {@link SearchScript} which can provided an {@link Explanation} of the score
+ */
+public interface ExplainableSearchScript extends SearchScript {
+
+ /**
+ * Build the explanation of the current document being scored
+ *
+ * @param subQueryExpl the explanation of the subQuery
+ */
+ Explanation explain(Explanation subQueryExpl);
+
+}
diff --git a/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java
new file mode 100644
index 0000000..1986551
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.lookup.SearchLookup;
+
+import java.util.Map;
+
+/**
+ * A native script engine service.
+ */
+public class NativeScriptEngineService extends AbstractComponent implements ScriptEngineService {
+
+ private final ImmutableMap<String, NativeScriptFactory> scripts;
+
+ @Inject
+ public NativeScriptEngineService(Settings settings, Map<String, NativeScriptFactory> scripts) {
+ super(settings);
+ this.scripts = ImmutableMap.copyOf(scripts);
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{"native"};
+ }
+
+ @Override
+ public String[] extensions() {
+ return new String[0];
+ }
+
+ @Override
+ public Object compile(String script) {
+ NativeScriptFactory scriptFactory = scripts.get(script);
+ if (scriptFactory != null) {
+ return scriptFactory;
+ }
+ throw new ElasticsearchIllegalArgumentException("Native script [" + script + "] not found");
+ }
+
+ @Override
+ public ExecutableScript executable(Object compiledScript, @Nullable Map<String, Object> vars) {
+ NativeScriptFactory scriptFactory = (NativeScriptFactory) compiledScript;
+ return scriptFactory.newScript(vars);
+ }
+
+ @Override
+ public SearchScript search(Object compiledScript, SearchLookup lookup, @Nullable Map<String, Object> vars) {
+ NativeScriptFactory scriptFactory = (NativeScriptFactory) compiledScript;
+ AbstractSearchScript script = (AbstractSearchScript) scriptFactory.newScript(vars);
+ script.setLookup(lookup);
+ return script;
+ }
+
+ @Override
+ public Object execute(Object compiledScript, Map<String, Object> vars) {
+ return executable(compiledScript, vars).run();
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ return value;
+ }
+
+ @Override
+ public void close() {
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/script/NativeScriptFactory.java b/src/main/java/org/elasticsearch/script/NativeScriptFactory.java
new file mode 100644
index 0000000..016103c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/NativeScriptFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.common.Nullable;
+
+import java.util.Map;
+
+/**
+ * A factory to create instances of either {@link ExecutableScript} or {@link SearchScript}. Note,
+ * if this factory creates {@link SearchScript}, it must extend {@link AbstractSearchScript}.
+ *
+ * @see AbstractExecutableScript
+ * @see AbstractSearchScript
+ * @see AbstractFloatSearchScript
+ * @see AbstractLongSearchScript
+ * @see AbstractDoubleSearchScript
+ */
+public interface NativeScriptFactory {
+
+ /**
+ * Creates a new instance of either a {@link ExecutableScript} or a {@link SearchScript}.
+ *
+ * @param params The parameters passed to the script. Can be <tt>null</tt>.
+ */
+ ExecutableScript newScript(@Nullable Map<String, Object> params);
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/script/ScriptEngineService.java b/src/main/java/org/elasticsearch/script/ScriptEngineService.java
new file mode 100644
index 0000000..516d89a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/ScriptEngineService.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.search.lookup.SearchLookup;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public interface ScriptEngineService {
+
+ String[] types();
+
+ String[] extensions();
+
+ Object compile(String script);
+
+ ExecutableScript executable(Object compiledScript, @Nullable Map<String, Object> vars);
+
+ SearchScript search(Object compiledScript, SearchLookup lookup, @Nullable Map<String, Object> vars);
+
+ Object execute(Object compiledScript, Map<String, Object> vars);
+
+ Object unwrap(Object value);
+
+ void close();
+}
diff --git a/src/main/java/org/elasticsearch/script/ScriptException.java b/src/main/java/org/elasticsearch/script/ScriptException.java
new file mode 100644
index 0000000..4b031d2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/ScriptException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class ScriptException extends ElasticsearchException {
+
+ public ScriptException(String msg) {
+ super(msg);
+ }
+
+ public ScriptException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/script/ScriptModule.java b/src/main/java/org/elasticsearch/script/ScriptModule.java
new file mode 100644
index 0000000..bee40a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/ScriptModule.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.MapBinder;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.mvel.MvelScriptEngineService;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class ScriptModule extends AbstractModule {
+
+ private final Settings settings;
+
+ private final List<Class<? extends ScriptEngineService>> scriptEngines = Lists.newArrayList();
+
+ private final Map<String, Class<? extends NativeScriptFactory>> scripts = Maps.newHashMap();
+
+ public ScriptModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ public void addScriptEngine(Class<? extends ScriptEngineService> scriptEngine) {
+ scriptEngines.add(scriptEngine);
+ }
+
+ public void registerScript(String name, Class<? extends NativeScriptFactory> script) {
+ scripts.put(name, script);
+ }
+
+ @Override
+ protected void configure() {
+ MapBinder<String, NativeScriptFactory> scriptsBinder
+ = MapBinder.newMapBinder(binder(), String.class, NativeScriptFactory.class);
+ for (Map.Entry<String, Class<? extends NativeScriptFactory>> entry : scripts.entrySet()) {
+ scriptsBinder.addBinding(entry.getKey()).to(entry.getValue());
+ }
+
+ // now, check for config based ones
+ Map<String, Settings> nativeSettings = settings.getGroups("script.native");
+ for (Map.Entry<String, Settings> entry : nativeSettings.entrySet()) {
+ String name = entry.getKey();
+ Class<? extends NativeScriptFactory> type = entry.getValue().getAsClass("type", NativeScriptFactory.class);
+ if (type == NativeScriptFactory.class) {
+ throw new ElasticsearchIllegalArgumentException("type is missing for native script [" + name + "]");
+ }
+ scriptsBinder.addBinding(name).to(type);
+ }
+
+ Multibinder<ScriptEngineService> multibinder = Multibinder.newSetBinder(binder(), ScriptEngineService.class);
+ multibinder.addBinding().to(NativeScriptEngineService.class);
+ try {
+ multibinder.addBinding().to(MvelScriptEngineService.class);
+ } catch (Throwable t) {
+ // no MVEL
+ }
+ for (Class<? extends ScriptEngineService> scriptEngine : scriptEngines) {
+ multibinder.addBinding().to(scriptEngine);
+ }
+
+ bind(ScriptService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/script/ScriptService.java b/src/main/java/org/elasticsearch/script/ScriptService.java
new file mode 100644
index 0000000..a859e13
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/ScriptService.java
@@ -0,0 +1,286 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import com.google.common.base.Charsets;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.watcher.FileChangesListener;
+import org.elasticsearch.watcher.FileWatcher;
+import org.elasticsearch.watcher.ResourceWatcherService;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class ScriptService extends AbstractComponent {
+
+ private final String defaultLang;
+
+ private final ImmutableMap<String, ScriptEngineService> scriptEngines;
+
+ private final ConcurrentMap<String, CompiledScript> staticCache = ConcurrentCollections.newConcurrentMap();
+
+ private final Cache<CacheKey, CompiledScript> cache;
+ private final File scriptsDirectory;
+
+ private final boolean disableDynamic;
+
+ @Inject
+ public ScriptService(Settings settings, Environment env, Set<ScriptEngineService> scriptEngines,
+ ResourceWatcherService resourceWatcherService) {
+ super(settings);
+
+ int cacheMaxSize = componentSettings.getAsInt("cache.max_size", 500);
+ TimeValue cacheExpire = componentSettings.getAsTime("cache.expire", null);
+ logger.debug("using script cache with max_size [{}], expire [{}]", cacheMaxSize, cacheExpire);
+
+ this.defaultLang = componentSettings.get("default_lang", "mvel");
+ this.disableDynamic = componentSettings.getAsBoolean("disable_dynamic", false);
+
+ CacheBuilder cacheBuilder = CacheBuilder.newBuilder();
+ if (cacheMaxSize >= 0) {
+ cacheBuilder.maximumSize(cacheMaxSize);
+ }
+ if (cacheExpire != null) {
+ cacheBuilder.expireAfterAccess(cacheExpire.nanos(), TimeUnit.NANOSECONDS);
+ }
+ this.cache = cacheBuilder.build();
+
+ ImmutableMap.Builder<String, ScriptEngineService> builder = ImmutableMap.builder();
+ for (ScriptEngineService scriptEngine : scriptEngines) {
+ for (String type : scriptEngine.types()) {
+ builder.put(type, scriptEngine);
+ }
+ }
+ this.scriptEngines = builder.build();
+
+ // put some default optimized scripts
+ staticCache.put("doc.score", new CompiledScript("native", new DocScoreNativeScriptFactory()));
+
+ // add file watcher for static scripts
+ scriptsDirectory = new File(env.configFile(), "scripts");
+ FileWatcher fileWatcher = new FileWatcher(scriptsDirectory);
+ fileWatcher.addListener(new ScriptChangesListener());
+
+ if (componentSettings.getAsBoolean("auto_reload_enabled", true)) {
+ // automatic reload is enabled - register scripts
+ resourceWatcherService.add(fileWatcher);
+ } else {
+ // automatic reload is disable just load scripts once
+ fileWatcher.init();
+ }
+ }
+
+ public void close() {
+ for (ScriptEngineService engineService : scriptEngines.values()) {
+ engineService.close();
+ }
+ }
+
+ public CompiledScript compile(String script) {
+ return compile(defaultLang, script);
+ }
+
+ public CompiledScript compile(String lang, String script) {
+ CompiledScript compiled = staticCache.get(script);
+ if (compiled != null) {
+ return compiled;
+ }
+ if (lang == null) {
+ lang = defaultLang;
+ }
+ if (dynamicScriptDisabled(lang)) {
+ throw new ScriptException("dynamic scripting disabled");
+ }
+ CacheKey cacheKey = new CacheKey(lang, script);
+ compiled = cache.getIfPresent(cacheKey);
+ if (compiled != null) {
+ return compiled;
+ }
+ // not the end of the world if we compile it twice...
+ ScriptEngineService service = scriptEngines.get(lang);
+ if (service == null) {
+ throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + lang + "]");
+ }
+ compiled = new CompiledScript(lang, service.compile(script));
+ cache.put(cacheKey, compiled);
+ return compiled;
+ }
+
+ public ExecutableScript executable(String lang, String script, Map vars) {
+ return executable(compile(lang, script), vars);
+ }
+
+ public ExecutableScript executable(CompiledScript compiledScript, Map vars) {
+ return scriptEngines.get(compiledScript.lang()).executable(compiledScript.compiled(), vars);
+ }
+
+ public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map<String, Object> vars) {
+ return scriptEngines.get(compiledScript.lang()).search(compiledScript.compiled(), lookup, vars);
+ }
+
+ public SearchScript search(SearchLookup lookup, String lang, String script, @Nullable Map<String, Object> vars) {
+ return search(compile(lang, script), lookup, vars);
+ }
+
+ public SearchScript search(MapperService mapperService, IndexFieldDataService fieldDataService, String lang, String script, @Nullable Map<String, Object> vars) {
+ return search(compile(lang, script), new SearchLookup(mapperService, fieldDataService, null), vars);
+ }
+
+ public Object execute(CompiledScript compiledScript, Map vars) {
+ return scriptEngines.get(compiledScript.lang()).execute(compiledScript.compiled(), vars);
+ }
+
+ public void clear() {
+ cache.invalidateAll();
+ }
+
+ private boolean dynamicScriptDisabled(String lang) {
+ if (!disableDynamic) {
+ return false;
+ }
+ // we allow "native" executions since they register through plugins, so they are "allowed"
+ return !"native".equals(lang);
+ }
+
+ private class ScriptChangesListener extends FileChangesListener {
+
+ private Tuple<String, String> scriptNameExt(File file) {
+ String scriptPath = scriptsDirectory.toURI().relativize(file.toURI()).getPath();
+ int extIndex = scriptPath.lastIndexOf('.');
+ if (extIndex != -1) {
+ String ext = scriptPath.substring(extIndex + 1);
+ String scriptName = scriptPath.substring(0, extIndex).replace(File.separatorChar, '_');
+ return new Tuple<String, String>(scriptName, ext);
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public void onFileInit(File file) {
+ Tuple<String, String> scriptNameExt = scriptNameExt(file);
+ if (scriptNameExt != null) {
+ boolean found = false;
+ for (ScriptEngineService engineService : scriptEngines.values()) {
+ for (String s : engineService.extensions()) {
+ if (s.equals(scriptNameExt.v2())) {
+ found = true;
+ try {
+ logger.trace("compiling script file " + file.getAbsolutePath());
+ String script = Streams.copyToString(new InputStreamReader(new FileInputStream(file), Charsets.UTF_8));
+ staticCache.put(scriptNameExt.v1(), new CompiledScript(engineService.types()[0], engineService.compile(script)));
+ } catch (Throwable e) {
+ logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1());
+ }
+ break;
+ }
+ }
+ if (found) {
+ break;
+ }
+ }
+ if (!found) {
+ logger.warn("no script engine found for [{}]", scriptNameExt.v2());
+ }
+ }
+ }
+
+ @Override
+ public void onFileCreated(File file) {
+ onFileInit(file);
+ }
+
+ @Override
+ public void onFileDeleted(File file) {
+ Tuple<String, String> scriptNameExt = scriptNameExt(file);
+ logger.trace("removing script file " + file.getAbsolutePath());
+ staticCache.remove(scriptNameExt.v1());
+ }
+
+ @Override
+ public void onFileChanged(File file) {
+ onFileInit(file);
+ }
+
+ }
+
+ public static class CacheKey {
+ public final String lang;
+ public final String script;
+
+ public CacheKey(String lang, String script) {
+ this.lang = lang;
+ this.script = script;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ CacheKey other = (CacheKey) o;
+ return lang.equals(other.lang) && script.equals(other.script);
+ }
+
+ @Override
+ public int hashCode() {
+ return lang.hashCode() + 31 * script.hashCode();
+ }
+ }
+
+ public static class DocScoreNativeScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new DocScoreSearchScript();
+ }
+ }
+
+ public static class DocScoreSearchScript extends AbstractFloatSearchScript {
+ @Override
+ public float runAsFloat() {
+ try {
+ return doc().score();
+ } catch (IOException e) {
+ return 0;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/script/SearchScript.java b/src/main/java/org/elasticsearch/script/SearchScript.java
new file mode 100644
index 0000000..a02e67e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/SearchScript.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.script;
+
+import org.elasticsearch.common.lucene.ReaderContextAware;
+import org.elasticsearch.common.lucene.ScorerAware;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.lookup.SearchLookup;
+
+import java.util.Map;
+
+/**
+ * A search script.
+ *
+ * @see ExplainableSearchScript for script which can explain a score
+ */
+public interface SearchScript extends ExecutableScript, ReaderContextAware, ScorerAware {
+
+ void setNextDocId(int doc);
+
+ void setNextSource(Map<String, Object> source);
+
+ void setNextScore(float score);
+
+ float runAsFloat();
+
+ long runAsLong();
+
+ double runAsDouble();
+
+ public static class Builder {
+
+ private String script;
+ private String lang;
+ private Map<String, Object> params;
+
+ public Builder script(String script) {
+ this.script = script;
+ return this;
+ }
+
+ public Builder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ public Builder params(Map<String, Object> params) {
+ this.params = params;
+ return this;
+ }
+
+ public SearchScript build(SearchContext context) {
+ return build(context.scriptService(), context.lookup());
+ }
+
+ public SearchScript build(ScriptService service, SearchLookup lookup) {
+ return service.search(lookup, lang, script, params);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/script/mvel/MvelScriptEngineService.java b/src/main/java/org/elasticsearch/script/mvel/MvelScriptEngineService.java
new file mode 100644
index 0000000..358f4ee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/script/mvel/MvelScriptEngineService.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script.mvel;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.math.UnboxedMathUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.ScriptEngineService;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.mvel2.MVEL;
+import org.mvel2.ParserConfiguration;
+import org.mvel2.ParserContext;
+import org.mvel2.compiler.ExecutableStatement;
+import org.mvel2.integration.impl.MapVariableResolverFactory;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ *
+ */
+public class MvelScriptEngineService extends AbstractComponent implements ScriptEngineService {
+
+ private final ParserConfiguration parserConfiguration;
+
+ @Inject
+ public MvelScriptEngineService(Settings settings) {
+ super(settings);
+
+ parserConfiguration = new ParserConfiguration();
+ parserConfiguration.addPackageImport("java.util");
+ parserConfiguration.addPackageImport("org.joda.time");
+ parserConfiguration.addImport("time", MVEL.getStaticMethod(System.class, "currentTimeMillis", new Class[0]));
+ // unboxed version of Math, better performance since conversion from boxed to unboxed my mvel is not needed
+ for (Method m : UnboxedMathUtils.class.getMethods()) {
+ if ((m.getModifiers() & Modifier.STATIC) > 0) {
+ parserConfiguration.addImport(m.getName(), m);
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ // nothing to do here...
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{"mvel"};
+ }
+
+ @Override
+ public String[] extensions() {
+ return new String[]{"mvel"};
+ }
+
+ @Override
+ public Object compile(String script) {
+ return MVEL.compileExpression(script.trim(), new ParserContext(parserConfiguration));
+ }
+
+ @Override
+ public Object execute(Object compiledScript, Map vars) {
+ return MVEL.executeExpression(compiledScript, vars);
+ }
+
+ @Override
+ public ExecutableScript executable(Object compiledScript, Map vars) {
+ return new MvelExecutableScript(compiledScript, vars);
+ }
+
+ @Override
+ public SearchScript search(Object compiledScript, SearchLookup lookup, @Nullable Map<String, Object> vars) {
+ return new MvelSearchScript(compiledScript, lookup, vars);
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ return value;
+ }
+
+ public static class MvelExecutableScript implements ExecutableScript {
+
+ private final ExecutableStatement script;
+
+ private final MapVariableResolverFactory resolver;
+
+ public MvelExecutableScript(Object script, Map vars) {
+ this.script = (ExecutableStatement) script;
+ if (vars != null) {
+ this.resolver = new MapVariableResolverFactory(vars);
+ } else {
+ this.resolver = new MapVariableResolverFactory(new HashMap());
+ }
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ resolver.createVariable(name, value);
+ }
+
+ @Override
+ public Object run() {
+ return script.getValue(null, resolver);
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ return value;
+ }
+ }
+
+ public static class MvelSearchScript implements SearchScript {
+
+ private final ExecutableStatement script;
+
+ private final SearchLookup lookup;
+
+ private final MapVariableResolverFactory resolver;
+
+ public MvelSearchScript(Object script, SearchLookup lookup, Map<String, Object> vars) {
+ this.script = (ExecutableStatement) script;
+ this.lookup = lookup;
+ if (vars != null) {
+ this.resolver = new MapVariableResolverFactory(vars);
+ } else {
+ this.resolver = new MapVariableResolverFactory(new HashMap());
+ }
+ for (Map.Entry<String, Object> entry : lookup.asMap().entrySet()) {
+ resolver.createVariable(entry.getKey(), entry.getValue());
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) {
+ lookup.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) {
+ lookup.setNextReader(context);
+ }
+
+ @Override
+ public void setNextDocId(int doc) {
+ lookup.setNextDocId(doc);
+ }
+
+ @Override
+ public void setNextScore(float score) {
+ resolver.createVariable("_score", score);
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ resolver.createVariable(name, value);
+ }
+
+ @Override
+ public void setNextSource(Map<String, Object> source) {
+ lookup.source().setNextSource(source);
+ }
+
+ @Override
+ public Object run() {
+ return script.getValue(null, resolver);
+ }
+
+ @Override
+ public float runAsFloat() {
+ return ((Number) run()).floatValue();
+ }
+
+ @Override
+ public long runAsLong() {
+ return ((Number) run()).longValue();
+ }
+
+ @Override
+ public double runAsDouble() {
+ return ((Number) run()).doubleValue();
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ return value;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/Scroll.java b/src/main/java/org/elasticsearch/search/Scroll.java
new file mode 100644
index 0000000..2d703bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/Scroll.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
+
+/**
+ * A scroll enables scrolling of search request. It holds a {@link #keepAlive()} time that
+ * will control how long to keep the scrolling resources open.
+ *
+ *
+ */
+public class Scroll implements Streamable {
+
+ private TimeValue keepAlive;
+
+ private Scroll() {
+
+ }
+
+ /**
+ * Constructs a new scroll of the provided keep alive.
+ */
+ public Scroll(TimeValue keepAlive) {
+ this.keepAlive = keepAlive;
+ }
+
+ /**
+ * How long the resources will be kept open to support the scroll request.
+ */
+ public TimeValue keepAlive() {
+ return keepAlive;
+ }
+
+ public static Scroll readScroll(StreamInput in) throws IOException {
+ Scroll scroll = new Scroll();
+ scroll.readFrom(in);
+ return scroll;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ keepAlive = readTimeValue(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (keepAlive == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ keepAlive.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchContextException.java b/src/main/java/org/elasticsearch/search/SearchContextException.java
new file mode 100644
index 0000000..39bea64
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchContextException.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class SearchContextException extends SearchException {
+
+ public SearchContextException(SearchContext context, String msg) {
+ super(context.shardTarget(), buildMessage(context, msg));
+ }
+
+ public SearchContextException(SearchContext context, String msg, Throwable t) {
+ super(context.shardTarget(), buildMessage(context, msg), t);
+ }
+
+ private static String buildMessage(SearchContext context, String msg) {
+ StringBuilder sb = new StringBuilder();
+ sb.append('[').append(context.shardTarget().index()).append("][").append(context.shardTarget().shardId()).append("]: ");
+ if (context.parsedQuery() != null) {
+ try {
+ sb.append("query[").append(context.parsedQuery().query()).append("],");
+ } catch (Exception e) {
+ sb.append("query[_failed_to_string_],");
+ }
+ }
+ sb.append("from[").append(context.from()).append("],size[").append(context.size()).append("]");
+ if (context.sort() != null) {
+ sb.append(",sort[").append(context.sort()).append("]");
+ }
+ return sb.append(": ").append(msg).toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchContextMissingException.java b/src/main/java/org/elasticsearch/search/SearchContextMissingException.java
new file mode 100644
index 0000000..b778985
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchContextMissingException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class SearchContextMissingException extends ElasticsearchException {
+
+ private final long id;
+
+ public SearchContextMissingException(long id) {
+ super("No search context found for id [" + id + "]");
+ this.id = id;
+ }
+
+ public long id() {
+ return this.id;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchException.java b/src/main/java/org/elasticsearch/search/SearchException.java
new file mode 100644
index 0000000..bc3579b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchException.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class SearchException extends ElasticsearchException {
+
+ private final SearchShardTarget shardTarget;
+
+ public SearchException(SearchShardTarget shardTarget, String msg) {
+ super(msg);
+ this.shardTarget = shardTarget;
+ }
+
+ public SearchException(SearchShardTarget shardTarget, String msg, Throwable cause) {
+ super(msg, cause);
+ this.shardTarget = shardTarget;
+ }
+
+ public SearchShardTarget shard() {
+ return this.shardTarget;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchHit.java b/src/main/java/org/elasticsearch/search/SearchHit.java
new file mode 100644
index 0000000..5363ba2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchHit.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.search.highlight.HighlightField;
+
+import java.util.Map;
+
+/**
+ * A single search hit.
+ *
+ * @see SearchHits
+ */
+public interface SearchHit extends Streamable, ToXContent, Iterable<SearchHitField> {
+
+ /**
+ * The score.
+ */
+ float score();
+
+ /**
+ * The score.
+ */
+ float getScore();
+
+ /**
+ * The index of the hit.
+ */
+ String index();
+
+ /**
+ * The index of the hit.
+ */
+ String getIndex();
+
+ /**
+ * The id of the document.
+ */
+ String id();
+
+ /**
+ * The id of the document.
+ */
+ String getId();
+
+ /**
+ * The type of the document.
+ */
+ String type();
+
+ /**
+ * The type of the document.
+ */
+ String getType();
+
+ /**
+ * The version of the hit.
+ */
+ long version();
+
+ /**
+ * The version of the hit.
+ */
+ long getVersion();
+
+ /**
+ * Returns bytes reference, also un compress the source if needed.
+ */
+ BytesReference sourceRef();
+
+ /**
+ * Returns bytes reference, also un compress the source if needed.
+ */
+ BytesReference getSourceRef();
+
+ /**
+ * The source of the document (can be <tt>null</tt>). Note, its a copy of the source
+ * into a byte array, consider using {@link #sourceRef()} so there won't be a need to copy.
+ */
+ byte[] source();
+
+ /**
+ * Is the source empty (not available) or not.
+ */
+ boolean isSourceEmpty();
+
+ /**
+ * The source of the document as a map (can be <tt>null</tt>).
+ */
+ Map<String, Object> getSource();
+
+ /**
+ * The source of the document as string (can be <tt>null</tt>).
+ */
+ String sourceAsString();
+
+ /**
+ * The source of the document as string (can be <tt>null</tt>).
+ */
+ String getSourceAsString();
+
+ /**
+ * The source of the document as a map (can be <tt>null</tt>).
+ */
+ Map<String, Object> sourceAsMap() throws ElasticsearchParseException;
+
+ /**
+ * If enabled, the explanation of the search hit.
+ */
+ Explanation explanation();
+
+ /**
+ * If enabled, the explanation of the search hit.
+ */
+ Explanation getExplanation();
+
+ /**
+ * The hit field matching the given field name.
+ */
+ public SearchHitField field(String fieldName);
+
+ /**
+ * A map of hit fields (from field name to hit fields) if additional fields
+ * were required to be loaded.
+ */
+ Map<String, SearchHitField> fields();
+
+ /**
+ * A map of hit fields (from field name to hit fields) if additional fields
+ * were required to be loaded.
+ */
+ Map<String, SearchHitField> getFields();
+
+ /**
+ * A map of highlighted fields.
+ */
+ Map<String, HighlightField> highlightFields();
+
+ /**
+ * A map of highlighted fields.
+ */
+ Map<String, HighlightField> getHighlightFields();
+
+ /**
+ * An array of the sort values used.
+ */
+ Object[] sortValues();
+
+ /**
+ * An array of the sort values used.
+ */
+ Object[] getSortValues();
+
+ /**
+ * The set of query and filter names the query matched with. Mainly makes sense for compound filters and queries.
+ */
+ String[] matchedQueries();
+
+ /**
+ * The set of query and filter names the query matched with. Mainly makes sense for compound filters and queries.
+ */
+ String[] getMatchedQueries();
+
+ /**
+ * The shard of the search hit.
+ */
+ SearchShardTarget shard();
+
+ /**
+ * The shard of the search hit.
+ */
+ SearchShardTarget getShard();
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchHitField.java b/src/main/java/org/elasticsearch/search/SearchHitField.java
new file mode 100644
index 0000000..5747bbe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchHitField.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.util.List;
+
+/**
+ * A single field name and values part of a {@link SearchHit}.
+ *
+ * @see SearchHit
+ */
+public interface SearchHitField extends Streamable, Iterable<Object> {
+
+ /**
+ * The name of the field.
+ */
+ String name();
+
+ /**
+ * The name of the field.
+ */
+ String getName();
+
+ /**
+ * The first value of the hit.
+ */
+ <V> V value();
+
+ /**
+ * The first value of the hit.
+ */
+ <V> V getValue();
+
+ /**
+ * The field values.
+ */
+ List<Object> values();
+
+ /**
+ * The field values.
+ */
+ List<Object> getValues();
+
+ /**
+ * @return The field is a metadata field
+ */
+ boolean isMetadataField();
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchHits.java b/src/main/java/org/elasticsearch/search/SearchHits.java
new file mode 100644
index 0000000..08d13bf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchHits.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+
+/**
+ * The hits of a search request.
+ *
+ *
+ */
+public interface SearchHits extends Streamable, ToXContent, Iterable<SearchHit> {
+
+ /**
+ * The total number of hits that matches the search request.
+ */
+ long totalHits();
+
+ /**
+ * The total number of hits that matches the search request.
+ */
+ long getTotalHits();
+
+ /**
+ * The maximum score of this query.
+ */
+ float maxScore();
+
+ /**
+ * The maximum score of this query.
+ */
+ float getMaxScore();
+
+ /**
+ * The hits of the search request (based on the search type, and from / size provided).
+ */
+ SearchHit[] hits();
+
+ /**
+ * Return the hit as the provided position.
+ */
+ SearchHit getAt(int position);
+
+ /**
+ * The hits of the search request (based on the search type, and from / size provided).
+ */
+ public SearchHit[] getHits();
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchModule.java b/src/main/java/org/elasticsearch/search/SearchModule.java
new file mode 100644
index 0000000..1092e0e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchModule.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.search.aggregations.AggregationModule;
+import org.elasticsearch.search.controller.SearchPhaseController;
+import org.elasticsearch.search.dfs.DfsPhase;
+import org.elasticsearch.search.facet.FacetModule;
+import org.elasticsearch.search.fetch.FetchPhase;
+import org.elasticsearch.search.fetch.explain.ExplainFetchSubPhase;
+import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase;
+import org.elasticsearch.search.fetch.matchedqueries.MatchedQueriesFetchSubPhase;
+import org.elasticsearch.search.fetch.partial.PartialFieldsFetchSubPhase;
+import org.elasticsearch.search.fetch.script.ScriptFieldsFetchSubPhase;
+import org.elasticsearch.search.fetch.source.FetchSourceSubPhase;
+import org.elasticsearch.search.fetch.version.VersionFetchSubPhase;
+import org.elasticsearch.search.highlight.HighlightModule;
+import org.elasticsearch.search.highlight.HighlightPhase;
+import org.elasticsearch.search.query.QueryPhase;
+import org.elasticsearch.search.suggest.SuggestModule;
+
+/**
+ *
+ */
+public class SearchModule extends AbstractModule implements SpawnModules {
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(new TransportSearchModule(), new FacetModule(), new HighlightModule(), new SuggestModule(), new FunctionScoreModule(), new AggregationModule());
+ }
+
+ @Override
+ protected void configure() {
+ bind(DfsPhase.class).asEagerSingleton();
+ bind(QueryPhase.class).asEagerSingleton();
+ bind(SearchService.class).asEagerSingleton();
+ bind(SearchPhaseController.class).asEagerSingleton();
+
+ bind(FetchPhase.class).asEagerSingleton();
+ bind(ExplainFetchSubPhase.class).asEagerSingleton();
+ bind(FieldDataFieldsFetchSubPhase.class).asEagerSingleton();
+ bind(ScriptFieldsFetchSubPhase.class).asEagerSingleton();
+ bind(PartialFieldsFetchSubPhase.class).asEagerSingleton();
+ bind(FetchSourceSubPhase.class).asEagerSingleton();
+ bind(VersionFetchSubPhase.class).asEagerSingleton();
+ bind(MatchedQueriesFetchSubPhase.class).asEagerSingleton();
+ bind(HighlightPhase.class).asEagerSingleton();
+
+ bind(SearchServiceTransportAction.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchParseElement.java b/src/main/java/org/elasticsearch/search/SearchParseElement.java
new file mode 100644
index 0000000..9bf680d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchParseElement.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public interface SearchParseElement {
+
+ void parse(XContentParser parser, SearchContext context) throws Exception;
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchParseException.java b/src/main/java/org/elasticsearch/search/SearchParseException.java
new file mode 100644
index 0000000..5f528f2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchParseException.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class SearchParseException extends SearchContextException {
+
+ public SearchParseException(SearchContext context, String msg) {
+ super(context, "Parse Failure [" + msg + "]");
+ }
+
+ public SearchParseException(SearchContext context, String msg, Throwable cause) {
+ super(context, "Parse Failure [" + msg + "]", cause);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchPhase.java b/src/main/java/org/elasticsearch/search/SearchPhase.java
new file mode 100644
index 0000000..30013b5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchPhase.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public interface SearchPhase {
+
+ Map<String, ? extends SearchParseElement> parseElements();
+
+ /**
+ * Performs pre processing of the search context before the execute.
+ */
+ void preProcess(SearchContext context);
+
+ void execute(SearchContext context) throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchPhaseResult.java b/src/main/java/org/elasticsearch/search/SearchPhaseResult.java
new file mode 100644
index 0000000..067761b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchPhaseResult.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.common.io.stream.Streamable;
+
+/**
+ *
+ */
+public interface SearchPhaseResult extends Streamable {
+
+ long id();
+
+ SearchShardTarget shardTarget();
+
+ void shardTarget(SearchShardTarget shardTarget);
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchService.java b/src/main/java/org/elasticsearch/search/SearchService.java
new file mode 100644
index 0000000..ce23565
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchService.java
@@ -0,0 +1,863 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.ObjectSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.search.TopDocs;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMapper.Loading;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.search.stats.StatsGroupsParseElement;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesLifecycle;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.warmer.IndicesWarmer;
+import org.elasticsearch.indices.warmer.IndicesWarmer.WarmerContext;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.dfs.CachedDfSource;
+import org.elasticsearch.search.dfs.DfsPhase;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.fetch.*;
+import org.elasticsearch.search.internal.DefaultSearchContext;
+import org.elasticsearch.search.internal.InternalScrollSearchRequest;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.query.*;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
+
+/**
+ *
+ */
+public class SearchService extends AbstractLifecycleComponent<SearchService> {
+
+ public static final String NORMS_LOADING_KEY = "index.norms.loading";
+ private static final String DEFAUTL_KEEPALIVE_COMPONENENT_KEY ="default_keep_alive";
+ public static final String DEFAUTL_KEEPALIVE_KEY ="search."+DEFAUTL_KEEPALIVE_COMPONENENT_KEY;
+ private static final String KEEPALIVE_INTERVAL_COMPONENENT_KEY ="keep_alive_interval";
+ public static final String KEEPALIVE_INTERVAL_KEY ="search."+KEEPALIVE_INTERVAL_COMPONENENT_KEY;
+
+
+ private final ThreadPool threadPool;
+
+ private final ClusterService clusterService;
+
+ private final IndicesService indicesService;
+
+ private final IndicesWarmer indicesWarmer;
+
+ private final ScriptService scriptService;
+
+ private final CacheRecycler cacheRecycler;
+
+ private final PageCacheRecycler pageCacheRecycler;
+
+ private final DfsPhase dfsPhase;
+
+ private final QueryPhase queryPhase;
+
+ private final FetchPhase fetchPhase;
+
+ private final long defaultKeepAlive;
+
+ private final ScheduledFuture<?> keepAliveReaper;
+
+ private final AtomicLong idGenerator = new AtomicLong();
+
+ private final ConcurrentMapLong<SearchContext> activeContexts = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
+
+ private final ImmutableMap<String, SearchParseElement> elementParsers;
+
+ @Inject
+ public SearchService(Settings settings, ClusterService clusterService, IndicesService indicesService, IndicesLifecycle indicesLifecycle, IndicesWarmer indicesWarmer, ThreadPool threadPool,
+ ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.indicesService = indicesService;
+ this.indicesWarmer = indicesWarmer;
+ this.scriptService = scriptService;
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ this.dfsPhase = dfsPhase;
+ this.queryPhase = queryPhase;
+ this.fetchPhase = fetchPhase;
+
+ TimeValue keepAliveInterval = componentSettings.getAsTime(KEEPALIVE_INTERVAL_COMPONENENT_KEY, timeValueMinutes(1));
+ // we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
+ this.defaultKeepAlive = componentSettings.getAsTime(DEFAUTL_KEEPALIVE_COMPONENENT_KEY, timeValueMinutes(5)).millis();
+
+ Map<String, SearchParseElement> elementParsers = new HashMap<String, SearchParseElement>();
+ elementParsers.putAll(dfsPhase.parseElements());
+ elementParsers.putAll(queryPhase.parseElements());
+ elementParsers.putAll(fetchPhase.parseElements());
+ elementParsers.put("stats", new StatsGroupsParseElement());
+ this.elementParsers = ImmutableMap.copyOf(elementParsers);
+
+ this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval);
+
+ this.indicesWarmer.addListener(new NormsWarmer());
+ this.indicesWarmer.addListener(new FieldDataWarmer());
+ this.indicesWarmer.addListener(new SearchWarmer());
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ for (SearchContext context : activeContexts.values()) {
+ freeContext(context);
+ }
+ activeContexts.clear();
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ keepAliveReaper.cancel(false);
+ }
+
+ public DfsSearchResult executeDfsPhase(ShardSearchRequest request) throws ElasticsearchException {
+ SearchContext context = createAndPutContext(request);
+ try {
+ contextProcessing(context);
+ dfsPhase.execute(context);
+ contextProcessedSuccessfully(context);
+ return context.dfsResult();
+ } catch (Throwable e) {
+ logger.trace("Dfs phase failed", e);
+ freeContext(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ public QuerySearchResult executeScan(ShardSearchRequest request) throws ElasticsearchException {
+ SearchContext context = createAndPutContext(request);
+ assert context.searchType() == SearchType.SCAN;
+ context.searchType(SearchType.COUNT); // move to COUNT, and then, when scrolling, move to SCAN
+ assert context.searchType() == SearchType.COUNT;
+ try {
+ if (context.scroll() == null) {
+ throw new ElasticsearchException("Scroll must be provided when scanning...");
+ }
+ contextProcessing(context);
+ queryPhase.execute(context);
+ contextProcessedSuccessfully(context);
+ return context.queryResult();
+ } catch (Throwable e) {
+ logger.trace("Scan phase failed", e);
+ freeContext(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) throws ElasticsearchException {
+ SearchContext context = findContext(request.id());
+ contextProcessing(context);
+ try {
+ processScroll(request, context);
+ if (context.searchType() == SearchType.COUNT) {
+ // first scanning, reset the from to 0
+ context.searchType(SearchType.SCAN);
+ context.from(0);
+ }
+ queryPhase.execute(context);
+ shortcutDocIdsToLoadForScanning(context);
+ fetchPhase.execute(context);
+ if (context.scroll() == null || context.fetchResult().hits().hits().length < context.size()) {
+ freeContext(request.id());
+ } else {
+ contextProcessedSuccessfully(context);
+ }
+ return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), context.shardTarget());
+ } catch (Throwable e) {
+ logger.trace("Scan phase failed", e);
+ freeContext(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ public QuerySearchResult executeQueryPhase(ShardSearchRequest request) throws ElasticsearchException {
+ SearchContext context = createAndPutContext(request);
+ try {
+ context.indexShard().searchService().onPreQueryPhase(context);
+ long time = System.nanoTime();
+ contextProcessing(context);
+ queryPhase.execute(context);
+ if (context.searchType() == SearchType.COUNT) {
+ freeContext(context.id());
+ } else {
+ contextProcessedSuccessfully(context);
+ }
+ context.indexShard().searchService().onQueryPhase(context, System.nanoTime() - time);
+ return context.queryResult();
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedQueryPhase(context);
+ logger.trace("Query phase failed", e);
+ freeContext(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) throws ElasticsearchException {
+ SearchContext context = findContext(request.id());
+ try {
+ context.indexShard().searchService().onPreQueryPhase(context);
+ long time = System.nanoTime();
+ contextProcessing(context);
+ processScroll(request, context);
+ queryPhase.execute(context);
+ contextProcessedSuccessfully(context);
+ context.indexShard().searchService().onQueryPhase(context, System.nanoTime() - time);
+ return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedQueryPhase(context);
+ logger.trace("Query phase failed", e);
+ freeContext(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ public QuerySearchResult executeQueryPhase(QuerySearchRequest request) throws ElasticsearchException {
+ SearchContext context = findContext(request.id());
+ contextProcessing(context);
+ try {
+ context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity()));
+ } catch (Throwable e) {
+ freeContext(context);
+ cleanContext(context);
+ throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
+ }
+ try {
+ context.indexShard().searchService().onPreQueryPhase(context);
+ long time = System.nanoTime();
+ queryPhase.execute(context);
+ contextProcessedSuccessfully(context);
+ context.indexShard().searchService().onQueryPhase(context, System.nanoTime() - time);
+ return context.queryResult();
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedQueryPhase(context);
+ logger.trace("Query phase failed", e);
+ freeContext(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) throws ElasticsearchException {
+ SearchContext context = createAndPutContext(request);
+ contextProcessing(context);
+ try {
+ context.indexShard().searchService().onPreQueryPhase(context);
+ long time = System.nanoTime();
+ try {
+ queryPhase.execute(context);
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedQueryPhase(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ long time2 = System.nanoTime();
+ context.indexShard().searchService().onQueryPhase(context, time2 - time);
+ context.indexShard().searchService().onPreFetchPhase(context);
+ try {
+ shortcutDocIdsToLoad(context);
+ fetchPhase.execute(context);
+ if (context.scroll() == null) {
+ freeContext(context.id());
+ } else {
+ contextProcessedSuccessfully(context);
+ }
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedFetchPhase(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time2);
+ return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
+ } catch (Throwable e) {
+ logger.trace("Fetch phase failed", e);
+ freeContext(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) throws ElasticsearchException {
+ SearchContext context = findContext(request.id());
+ contextProcessing(context);
+ try {
+ context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity()));
+ } catch (Throwable e) {
+ freeContext(context);
+ cleanContext(context);
+ throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
+ }
+ try {
+ context.indexShard().searchService().onPreQueryPhase(context);
+ long time = System.nanoTime();
+ try {
+ queryPhase.execute(context);
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedQueryPhase(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ long time2 = System.nanoTime();
+ context.indexShard().searchService().onQueryPhase(context, time2 - time);
+ context.indexShard().searchService().onPreFetchPhase(context);
+ try {
+ shortcutDocIdsToLoad(context);
+ fetchPhase.execute(context);
+ if (context.scroll() == null) {
+ freeContext(request.id());
+ } else {
+ contextProcessedSuccessfully(context);
+ }
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedFetchPhase(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time2);
+ return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
+ } catch (Throwable e) {
+ logger.trace("Fetch phase failed", e);
+ freeContext(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) throws ElasticsearchException {
+ SearchContext context = findContext(request.id());
+ contextProcessing(context);
+ try {
+ processScroll(request, context);
+ context.indexShard().searchService().onPreQueryPhase(context);
+ long time = System.nanoTime();
+ try {
+ queryPhase.execute(context);
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedQueryPhase(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ long time2 = System.nanoTime();
+ context.indexShard().searchService().onQueryPhase(context, time2 - time);
+ context.indexShard().searchService().onPreFetchPhase(context);
+ try {
+ shortcutDocIdsToLoad(context);
+ fetchPhase.execute(context);
+ if (context.scroll() == null) {
+ freeContext(request.id());
+ } else {
+ contextProcessedSuccessfully(context);
+ }
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedFetchPhase(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time2);
+ return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), context.shardTarget());
+ } catch (Throwable e) {
+ logger.trace("Fetch phase failed", e);
+ freeContext(context);
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ public FetchSearchResult executeFetchPhase(FetchSearchRequest request) throws ElasticsearchException {
+ SearchContext context = findContext(request.id());
+ contextProcessing(context);
+ try {
+ context.docIdsToLoad(request.docIds(), 0, request.docIdsSize());
+ context.indexShard().searchService().onPreFetchPhase(context);
+ long time = System.nanoTime();
+ fetchPhase.execute(context);
+ if (context.scroll() == null) {
+ freeContext(request.id());
+ } else {
+ contextProcessedSuccessfully(context);
+ }
+ context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time);
+ return context.fetchResult();
+ } catch (Throwable e) {
+ context.indexShard().searchService().onFailedFetchPhase(context);
+ logger.trace("Fetch phase failed", e);
+ freeContext(context); // we just try to make sure this is freed - rethrow orig exception.
+ throw ExceptionsHelper.convertToRuntime(e);
+ } finally {
+ cleanContext(context);
+ }
+ }
+
+ private SearchContext findContext(long id) throws SearchContextMissingException {
+ SearchContext context = activeContexts.get(id);
+ if (context == null) {
+ throw new SearchContextMissingException(id);
+ }
+ SearchContext.setCurrent(context);
+ return context;
+ }
+
+ final SearchContext createAndPutContext(ShardSearchRequest request) throws ElasticsearchException {
+ SearchContext context = createContext(request, null);
+ boolean success = false;
+ try {
+ activeContexts.put(context.id(), context);
+ context.indexShard().searchService().onNewContext(context);
+ success = true;
+ return context;
+ } finally {
+ if (!success) {
+ freeContext(context);
+ }
+ }
+ }
+
+ final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws ElasticsearchException {
+ IndexService indexService = indicesService.indexServiceSafe(request.index());
+ IndexShard indexShard = indexService.shardSafe(request.shardId());
+
+ SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
+
+ Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
+ SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, cacheRecycler, pageCacheRecycler);
+ SearchContext.setCurrent(context);
+ try {
+ context.scroll(request.scroll());
+
+ parseSource(context, request.source());
+ parseSource(context, request.extraSource());
+
+ // if the from and size are still not set, default them
+ if (context.from() == -1) {
+ context.from(0);
+ }
+ if (context.size() == -1) {
+ context.size(10);
+ }
+
+ // pre process
+ dfsPhase.preProcess(context);
+ queryPhase.preProcess(context);
+ fetchPhase.preProcess(context);
+
+ // compute the context keep alive
+ long keepAlive = defaultKeepAlive;
+ if (request.scroll() != null && request.scroll().keepAlive() != null) {
+ keepAlive = request.scroll().keepAlive().millis();
+ }
+ context.keepAlive(keepAlive);
+ } catch (Throwable e) {
+ context.release();
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+
+ return context;
+ }
+
+ public void freeContext(long id) {
+ SearchContext context = activeContexts.remove(id);
+ if (context == null) {
+ return;
+ }
+ context.indexShard().searchService().onFreeContext(context);
+ context.release();
+ }
+
+ private void freeContext(SearchContext context) {
+ SearchContext removed = activeContexts.remove(context.id());
+ if (removed != null) {
+ removed.indexShard().searchService().onFreeContext(removed);
+ }
+ context.release();
+ }
+
+ public void freeAllScrollContexts() {
+ for (SearchContext searchContext : activeContexts.values()) {
+ if (searchContext.scroll() != null) {
+ freeContext(searchContext);
+ }
+ }
+ }
+
+ private void contextProcessing(SearchContext context) {
+ // disable timeout while executing a search
+ context.accessed(-1);
+ }
+
+ private void contextProcessedSuccessfully(SearchContext context) {
+ context.accessed(threadPool.estimatedTimeInMillis());
+ }
+
+ private void cleanContext(SearchContext context) {
+ SearchContext.removeCurrent();
+ }
+
+ private void parseSource(SearchContext context, BytesReference source) throws SearchParseException {
+ // nothing to parse...
+ if (source == null || source.length() == 0) {
+ return;
+ }
+ XContentParser parser = null;
+ try {
+ parser = XContentFactory.xContent(source).createParser(source);
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String fieldName = parser.currentName();
+ parser.nextToken();
+ SearchParseElement element = elementParsers.get(fieldName);
+ if (element == null) {
+ throw new SearchParseException(context, "No parser for element [" + fieldName + "]");
+ }
+ element.parse(parser, context);
+ } else if (token == null) {
+ break;
+ }
+ }
+ } catch (Throwable e) {
+ String sSource = "_na_";
+ try {
+ sSource = XContentHelper.convertToJson(source, false);
+ } catch (Throwable e1) {
+ // ignore
+ }
+ throw new SearchParseException(context, "Failed to parse source [" + sSource + "]", e);
+ } finally {
+ if (parser != null) {
+ parser.close();
+ }
+ }
+ }
+
+ private static final int[] EMPTY_DOC_IDS = new int[0];
+
+ /**
+ * Shortcut ids to load, we load only "from" and up to "size". The phase controller
+ * handles this as well since the result is always size * shards for Q_A_F
+ */
+ private void shortcutDocIdsToLoad(SearchContext context) {
+ TopDocs topDocs = context.queryResult().topDocs();
+ if (topDocs.scoreDocs.length < context.from()) {
+ // no more docs...
+ context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
+ return;
+ }
+ int totalSize = context.from() + context.size();
+ int[] docIdsToLoad = new int[Math.min(topDocs.scoreDocs.length - context.from(), context.size())];
+ int counter = 0;
+ for (int i = context.from(); i < totalSize; i++) {
+ if (i < topDocs.scoreDocs.length) {
+ docIdsToLoad[counter] = topDocs.scoreDocs[i].doc;
+ } else {
+ break;
+ }
+ counter++;
+ }
+ context.docIdsToLoad(docIdsToLoad, 0, counter);
+ }
+
+ private void shortcutDocIdsToLoadForScanning(SearchContext context) {
+ TopDocs topDocs = context.queryResult().topDocs();
+ if (topDocs.scoreDocs.length == 0) {
+ // no more docs...
+ context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
+ return;
+ }
+ int[] docIdsToLoad = new int[topDocs.scoreDocs.length];
+ for (int i = 0; i < docIdsToLoad.length; i++) {
+ docIdsToLoad[i] = topDocs.scoreDocs[i].doc;
+ }
+ context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
+ }
+
+ private void processScroll(InternalScrollSearchRequest request, SearchContext context) {
+ // process scroll
+ context.from(context.from() + context.size());
+ context.scroll(request.scroll());
+ // update the context keep alive based on the new scroll value
+ if (request.scroll() != null && request.scroll().keepAlive() != null) {
+ context.keepAlive(request.scroll().keepAlive().millis());
+ }
+ }
+
+ static class NormsWarmer extends IndicesWarmer.Listener {
+
+ @Override
+ public TerminationHandle warm(final IndexShard indexShard, IndexMetaData indexMetaData, final WarmerContext context, ThreadPool threadPool) {
+ final Loading defaultLoading = Loading.parse(indexMetaData.settings().get(NORMS_LOADING_KEY), Loading.LAZY);
+ final MapperService mapperService = indexShard.mapperService();
+ final ObjectSet<String> warmUp = new ObjectOpenHashSet<String>();
+ for (DocumentMapper docMapper : mapperService) {
+ for (FieldMapper<?> fieldMapper : docMapper.mappers().mappers()) {
+ final String indexName = fieldMapper.names().indexName();
+ if (fieldMapper.fieldType().indexed() && !fieldMapper.fieldType().omitNorms() && fieldMapper.normsLoading(defaultLoading) == Loading.EAGER) {
+ warmUp.add(indexName);
+ }
+ }
+ }
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ // Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single task
+ threadPool.executor(executor()).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ for (Iterator<ObjectCursor<String>> it = warmUp.iterator(); it.hasNext(); ) {
+ final String indexName = it.next().value;
+ final long start = System.nanoTime();
+ for (final AtomicReaderContext ctx : context.newSearcher().reader().leaves()) {
+ final NumericDocValues values = ctx.reader().getNormValues(indexName);
+ if (values != null) {
+ values.get(0);
+ }
+ }
+ if (indexShard.warmerService().logger().isTraceEnabled()) {
+ indexShard.warmerService().logger().trace("warmed norms for [{}], took [{}]", indexName, TimeValue.timeValueNanos(System.nanoTime() - start));
+ }
+ }
+ } catch (Throwable t) {
+ indexShard.warmerService().logger().warn("failed to warm-up norms", t);
+ } finally {
+ latch.countDown();
+ }
+ }
+ });
+
+ return new TerminationHandle() {
+ @Override
+ public void awaitTermination() throws InterruptedException {
+ latch.await();
+ }
+ };
+ }
+ }
+
+ static class FieldDataWarmer extends IndicesWarmer.Listener {
+
+ @Override
+ public TerminationHandle warm(final IndexShard indexShard, IndexMetaData indexMetaData, final WarmerContext context, ThreadPool threadPool) {
+ final MapperService mapperService = indexShard.mapperService();
+ final Map<String, FieldMapper<?>> warmUp = new HashMap<String, FieldMapper<?>>();
+ boolean parentChild = false;
+ for (DocumentMapper docMapper : mapperService) {
+ for (FieldMapper<?> fieldMapper : docMapper.mappers().mappers()) {
+ if (fieldMapper instanceof ParentFieldMapper) {
+ ParentFieldMapper parentFieldMapper = (ParentFieldMapper) fieldMapper;
+ if (parentFieldMapper.active()) {
+ parentChild = true;
+ }
+ }
+ final FieldDataType fieldDataType = fieldMapper.fieldDataType();
+ if (fieldDataType == null) {
+ continue;
+ }
+ if (fieldDataType.getLoading() != Loading.EAGER) {
+ continue;
+ }
+ final String indexName = fieldMapper.names().indexName();
+ if (warmUp.containsKey(indexName)) {
+ continue;
+ }
+ warmUp.put(indexName, fieldMapper);
+ }
+ }
+ final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
+ final Executor executor = threadPool.executor(executor());
+ final CountDownLatch latch = new CountDownLatch(context.newSearcher().reader().leaves().size() * warmUp.size() + (parentChild ? 1 : 0));
+ for (final AtomicReaderContext ctx : context.newSearcher().reader().leaves()) {
+ for (final FieldMapper<?> fieldMapper : warmUp.values()) {
+ executor.execute(new Runnable() {
+
+ @Override
+ public void run() {
+ try {
+ final long start = System.nanoTime();
+ indexFieldDataService.getForField(fieldMapper).load(ctx);
+ if (indexShard.warmerService().logger().isTraceEnabled()) {
+ indexShard.warmerService().logger().trace("warmed fielddata for [{}], took [{}]", fieldMapper.names().name(), TimeValue.timeValueNanos(System.nanoTime() - start));
+ }
+ } catch (Throwable t) {
+ indexShard.warmerService().logger().warn("failed to warm-up fielddata for [{}]", t, fieldMapper.names().name());
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ });
+ }
+ }
+
+ if (parentChild) {
+ executor.execute(new Runnable() {
+
+ @Override
+ public void run() {
+ try {
+ final long start = System.nanoTime();
+ indexShard.indexService().cache().idCache().refresh(context.newSearcher().reader().leaves());
+ if (indexShard.warmerService().logger().isTraceEnabled()) {
+ indexShard.warmerService().logger().trace("warmed id_cache, took [{}]", TimeValue.timeValueNanos(System.nanoTime() - start));
+ }
+ } catch (Throwable t) {
+ indexShard.warmerService().logger().warn("failed to warm-up id cache", t);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ });
+ }
+
+ return new TerminationHandle() {
+ @Override
+ public void awaitTermination() throws InterruptedException {
+ latch.await();
+ }
+ };
+ }
+
+ }
+
+ class SearchWarmer extends IndicesWarmer.Listener {
+
+ @Override
+ public TerminationHandle warm(final IndexShard indexShard, final IndexMetaData indexMetaData, final IndicesWarmer.WarmerContext warmerContext, ThreadPool threadPool) {
+ IndexWarmersMetaData custom = indexMetaData.custom(IndexWarmersMetaData.TYPE);
+ if (custom == null) {
+ return TerminationHandle.NO_WAIT;
+ }
+ final Executor executor = threadPool.executor(executor());
+ final CountDownLatch latch = new CountDownLatch(custom.entries().size());
+ for (final IndexWarmersMetaData.Entry entry : custom.entries()) {
+ executor.execute(new Runnable() {
+
+ @Override
+ public void run() {
+ SearchContext context = null;
+ try {
+ long now = System.nanoTime();
+ ShardSearchRequest request = new ShardSearchRequest(indexShard.shardId().index().name(), indexShard.shardId().id(), indexMetaData.numberOfShards(),
+ SearchType.QUERY_THEN_FETCH /* we don't use COUNT so sorting will also kick in whatever warming logic*/)
+ .source(entry.source())
+ .types(entry.types());
+ context = createContext(request, warmerContext.newSearcher());
+ queryPhase.execute(context);
+ long took = System.nanoTime() - now;
+ if (indexShard.warmerService().logger().isTraceEnabled()) {
+ indexShard.warmerService().logger().trace("warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took));
+ }
+ } catch (Throwable t) {
+ indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name());
+ } finally {
+ try {
+ if (context != null) {
+ freeContext(context);
+ cleanContext(context);
+ }
+ } finally {
+ latch.countDown();
+ }
+ }
+ }
+
+ });
+ }
+ return new TerminationHandle() {
+ @Override
+ public void awaitTermination() throws InterruptedException {
+ latch.await();
+ }
+ };
+ }
+ }
+
+ class Reaper implements Runnable {
+ @Override
+ public void run() {
+ final long time = threadPool.estimatedTimeInMillis();
+ for (SearchContext context : activeContexts.values()) {
+ // Use the same value for both checks since lastAccessTime can
+ // be modified by another thread between checks!
+ final long lastAccessTime = context.lastAccessTime();
+ if (lastAccessTime == -1l) { // its being processed or timeout is disabled
+ continue;
+ }
+ if ((time - lastAccessTime > context.keepAlive())) {
+ logger.debug("freeing search context [{}], time [{}], lastAccessTime [{}], keepAlive [{}]", context.id(), time, lastAccessTime, context.keepAlive());
+ freeContext(context);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/src/main/java/org/elasticsearch/search/SearchShardTarget.java
new file mode 100644
index 0000000..ce489a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/SearchShardTarget.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * The target that the search request was executed on.
+ */
+public class SearchShardTarget implements Streamable, Serializable, Comparable<SearchShardTarget> {
+
+ private Text nodeId;
+ private Text index;
+ private int shardId;
+
+ private SearchShardTarget() {
+
+ }
+
+ public SearchShardTarget(String nodeId, String index, int shardId) {
+ this.nodeId = nodeId == null ? null : new StringAndBytesText(nodeId);
+ this.index = new StringAndBytesText(index);
+ this.shardId = shardId;
+ }
+
+ @Nullable
+ public String nodeId() {
+ return nodeId.string();
+ }
+
+ @Nullable
+ public String getNodeId() {
+ return nodeId();
+ }
+
+ public Text nodeIdText() {
+ return this.nodeId;
+ }
+
+ public String index() {
+ return index.string();
+ }
+
+ public String getIndex() {
+ return index();
+ }
+
+ public Text indexText() {
+ return this.index;
+ }
+
+ public int shardId() {
+ return shardId;
+ }
+
+ public int getShardId() {
+ return shardId;
+ }
+
+ public static SearchShardTarget readSearchShardTarget(StreamInput in) throws IOException {
+ SearchShardTarget result = new SearchShardTarget();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public int compareTo(SearchShardTarget o) {
+ int i = index.string().compareTo(o.index());
+ if (i == 0) {
+ i = shardId - o.shardId;
+ }
+ return i;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ nodeId = in.readSharedText();
+ }
+ index = in.readSharedText();
+ shardId = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (nodeId == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeSharedText(nodeId);
+ }
+ out.writeSharedText(index);
+ out.writeVInt(shardId);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ SearchShardTarget that = (SearchShardTarget) o;
+
+ if (shardId != that.shardId) return false;
+ if (index != null ? !index.equals(that.index) : that.index != null) return false;
+ if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = nodeId != null ? nodeId.hashCode() : 0;
+ result = 31 * result + (index != null ? index.hashCode() : 0);
+ result = 31 * result + shardId;
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ if (nodeId == null) {
+ return "[_na_][" + index + "][" + shardId + "]";
+ }
+ return "[" + nodeId + "][" + index + "][" + shardId + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/TransportSearchModule.java b/src/main/java/org/elasticsearch/search/TransportSearchModule.java
new file mode 100644
index 0000000..ce13006
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/TransportSearchModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.search.aggregations.TransportAggregationModule;
+import org.elasticsearch.search.facet.TransportFacetModule;
+
+/**
+ *
+ */
+public class TransportSearchModule extends AbstractModule implements SpawnModules {
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ return ImmutableList.of(new TransportFacetModule(), new TransportAggregationModule());
+ }
+
+ @Override
+ protected void configure() {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/action/SearchServiceListener.java b/src/main/java/org/elasticsearch/search/action/SearchServiceListener.java
new file mode 100644
index 0000000..46fdb91
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/action/SearchServiceListener.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.action;
+
+/**
+ *
+ */
+public interface SearchServiceListener<T> {
+
+ void onResult(T result);
+
+ void onFailure(Throwable t);
+}
diff --git a/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java b/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java
new file mode 100644
index 0000000..a6ce0ee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java
@@ -0,0 +1,800 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.action;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.search.ClearScrollRequest;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.fetch.FetchSearchRequest;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.QueryFetchSearchResult;
+import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
+import org.elasticsearch.search.internal.InternalScrollSearchRequest;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.query.QuerySearchRequest;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.query.ScrollQuerySearchResult;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+
+/**
+ * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through
+ * transport.
+ */
+public class SearchServiceTransportAction extends AbstractComponent {
+
+ static final class FreeContextResponseHandler extends EmptyTransportResponseHandler {
+
+ private final ESLogger logger;
+
+ FreeContextResponseHandler(ESLogger logger) {
+ super(ThreadPool.Names.SAME);
+ this.logger = logger;
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("Failed to send release search context", exp);
+ }
+ }
+
+ private final TransportService transportService;
+
+ private final ClusterService clusterService;
+
+ private final SearchService searchService;
+
+ private final FreeContextResponseHandler freeContextResponseHandler = new FreeContextResponseHandler(logger);
+
+ @Inject
+ public SearchServiceTransportAction(Settings settings, TransportService transportService, ClusterService clusterService, SearchService searchService) {
+ super(settings);
+ this.transportService = transportService;
+ this.clusterService = clusterService;
+ this.searchService = searchService;
+
+ transportService.registerHandler(SearchFreeContextTransportHandler.ACTION, new SearchFreeContextTransportHandler());
+ transportService.registerHandler(ClearScrollContextsTransportHandler.ACTION, new ClearScrollContextsTransportHandler());
+ transportService.registerHandler(SearchDfsTransportHandler.ACTION, new SearchDfsTransportHandler());
+ transportService.registerHandler(SearchQueryTransportHandler.ACTION, new SearchQueryTransportHandler());
+ transportService.registerHandler(SearchQueryByIdTransportHandler.ACTION, new SearchQueryByIdTransportHandler());
+ transportService.registerHandler(SearchQueryScrollTransportHandler.ACTION, new SearchQueryScrollTransportHandler());
+ transportService.registerHandler(SearchQueryFetchTransportHandler.ACTION, new SearchQueryFetchTransportHandler());
+ transportService.registerHandler(SearchQueryQueryFetchTransportHandler.ACTION, new SearchQueryQueryFetchTransportHandler());
+ transportService.registerHandler(SearchQueryFetchScrollTransportHandler.ACTION, new SearchQueryFetchScrollTransportHandler());
+ transportService.registerHandler(SearchFetchByIdTransportHandler.ACTION, new SearchFetchByIdTransportHandler());
+ transportService.registerHandler(SearchScanTransportHandler.ACTION, new SearchScanTransportHandler());
+ transportService.registerHandler(SearchScanScrollTransportHandler.ACTION, new SearchScanScrollTransportHandler());
+ }
+
+ public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ searchService.freeContext(contextId);
+ } else {
+ transportService.sendRequest(node, SearchFreeContextTransportHandler.ACTION, new SearchFreeContextRequest(request, contextId), freeContextResponseHandler);
+ }
+ }
+
+ public void sendFreeContext(DiscoveryNode node, long contextId, ClearScrollRequest request, final ActionListener<Boolean> actionListener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ searchService.freeContext(contextId);
+ actionListener.onResponse(true);
+ } else {
+ transportService.sendRequest(node, SearchFreeContextTransportHandler.ACTION, new SearchFreeContextRequest(request, contextId), new TransportResponseHandler<TransportResponse>() {
+ @Override
+ public TransportResponse newInstance() {
+ return TransportResponse.Empty.INSTANCE;
+ }
+
+ @Override
+ public void handleResponse(TransportResponse response) {
+ actionListener.onResponse(true);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ actionListener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendClearAllScrollContexts(DiscoveryNode node, ClearScrollRequest request, final ActionListener<Boolean> actionListener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ searchService.freeAllScrollContexts();
+ actionListener.onResponse(true);
+ } else {
+ transportService.sendRequest(node, ClearScrollContextsTransportHandler.ACTION, new ClearScrollContextsRequest(request), new TransportResponseHandler<TransportResponse>() {
+ @Override
+ public TransportResponse newInstance() {
+ return TransportResponse.Empty.INSTANCE;
+ }
+
+ @Override
+ public void handleResponse(TransportResponse response) {
+ actionListener.onResponse(true);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ actionListener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteDfs(DiscoveryNode node, final ShardSearchRequest request, final SearchServiceListener<DfsSearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ DfsSearchResult result = searchService.executeDfsPhase(request);
+ listener.onResult(result);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchDfsTransportHandler.ACTION, request, new BaseTransportResponseHandler<DfsSearchResult>() {
+
+ @Override
+ public DfsSearchResult newInstance() {
+ return new DfsSearchResult();
+ }
+
+ @Override
+ public void handleResponse(DfsSearchResult response) {
+ listener.onResult(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteQuery(DiscoveryNode node, final ShardSearchRequest request, final SearchServiceListener<QuerySearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ QuerySearchResult result = searchService.executeQueryPhase(request);
+ listener.onResult(result);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchQueryTransportHandler.ACTION, request, new BaseTransportResponseHandler<QuerySearchResult>() {
+
+ @Override
+ public QuerySearchResult newInstance() {
+ return new QuerySearchResult();
+ }
+
+ @Override
+ public void handleResponse(QuerySearchResult response) {
+ listener.onResult(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteQuery(DiscoveryNode node, final QuerySearchRequest request, final SearchServiceListener<QuerySearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ QuerySearchResult result = searchService.executeQueryPhase(request);
+ listener.onResult(result);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchQueryByIdTransportHandler.ACTION, request, new BaseTransportResponseHandler<QuerySearchResult>() {
+
+ @Override
+ public QuerySearchResult newInstance() {
+ return new QuerySearchResult();
+ }
+
+ @Override
+ public void handleResponse(QuerySearchResult response) {
+ listener.onResult(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request, final SearchServiceListener<QuerySearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
+ listener.onResult(result.queryResult());
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchQueryScrollTransportHandler.ACTION, request, new BaseTransportResponseHandler<ScrollQuerySearchResult>() {
+
+ @Override
+ public ScrollQuerySearchResult newInstance() {
+ return new ScrollQuerySearchResult();
+ }
+
+ @Override
+ public void handleResponse(ScrollQuerySearchResult response) {
+ listener.onResult(response.queryResult());
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteFetch(DiscoveryNode node, final ShardSearchRequest request, final SearchServiceListener<QueryFetchSearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ QueryFetchSearchResult result = searchService.executeFetchPhase(request);
+ listener.onResult(result);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchQueryFetchTransportHandler.ACTION, request, new BaseTransportResponseHandler<QueryFetchSearchResult>() {
+
+ @Override
+ public QueryFetchSearchResult newInstance() {
+ return new QueryFetchSearchResult();
+ }
+
+ @Override
+ public void handleResponse(QueryFetchSearchResult response) {
+ listener.onResult(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteFetch(DiscoveryNode node, final QuerySearchRequest request, final SearchServiceListener<QueryFetchSearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ QueryFetchSearchResult result = searchService.executeFetchPhase(request);
+ listener.onResult(result);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchQueryQueryFetchTransportHandler.ACTION, request, new BaseTransportResponseHandler<QueryFetchSearchResult>() {
+
+ @Override
+ public QueryFetchSearchResult newInstance() {
+ return new QueryFetchSearchResult();
+ }
+
+ @Override
+ public void handleResponse(QueryFetchSearchResult response) {
+ listener.onResult(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request, final SearchServiceListener<QueryFetchSearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
+ listener.onResult(result.result());
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchQueryFetchScrollTransportHandler.ACTION, request, new BaseTransportResponseHandler<ScrollQueryFetchSearchResult>() {
+
+ @Override
+ public ScrollQueryFetchSearchResult newInstance() {
+ return new ScrollQueryFetchSearchResult();
+ }
+
+ @Override
+ public void handleResponse(ScrollQueryFetchSearchResult response) {
+ listener.onResult(response.result());
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteFetch(DiscoveryNode node, final FetchSearchRequest request, final SearchServiceListener<FetchSearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ FetchSearchResult result = searchService.executeFetchPhase(request);
+ listener.onResult(result);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchFetchByIdTransportHandler.ACTION, request, new BaseTransportResponseHandler<FetchSearchResult>() {
+
+ @Override
+ public FetchSearchResult newInstance() {
+ return new FetchSearchResult();
+ }
+
+ @Override
+ public void handleResponse(FetchSearchResult response) {
+ listener.onResult(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteScan(DiscoveryNode node, final ShardSearchRequest request, final SearchServiceListener<QuerySearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ QuerySearchResult result = searchService.executeScan(request);
+ listener.onResult(result);
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchScanTransportHandler.ACTION, request, new BaseTransportResponseHandler<QuerySearchResult>() {
+
+ @Override
+ public QuerySearchResult newInstance() {
+ return new QuerySearchResult();
+ }
+
+ @Override
+ public void handleResponse(QuerySearchResult response) {
+ listener.onResult(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ public void sendExecuteScan(DiscoveryNode node, final InternalScrollSearchRequest request, final SearchServiceListener<QueryFetchSearchResult> listener) {
+ if (clusterService.state().nodes().localNodeId().equals(node.id())) {
+ try {
+ ScrollQueryFetchSearchResult result = searchService.executeScan(request);
+ listener.onResult(result.result());
+ } catch (Throwable e) {
+ listener.onFailure(e);
+ }
+ } else {
+ transportService.sendRequest(node, SearchScanScrollTransportHandler.ACTION, request, new BaseTransportResponseHandler<ScrollQueryFetchSearchResult>() {
+
+ @Override
+ public ScrollQueryFetchSearchResult newInstance() {
+ return new ScrollQueryFetchSearchResult();
+ }
+
+ @Override
+ public void handleResponse(ScrollQueryFetchSearchResult response) {
+ listener.onResult(response.result());
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+ }
+
+ class SearchFreeContextRequest extends TransportRequest {
+
+ private long id;
+
+ SearchFreeContextRequest() {
+ }
+
+ SearchFreeContextRequest(TransportRequest request, long id) {
+ super(request);
+ this.id = id;
+ }
+
+ public long id() {
+ return this.id;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ }
+ }
+
+ class SearchFreeContextTransportHandler extends BaseTransportRequestHandler<SearchFreeContextRequest> {
+
+ static final String ACTION = "search/freeContext";
+
+ @Override
+ public SearchFreeContextRequest newInstance() {
+ return new SearchFreeContextRequest();
+ }
+
+ @Override
+ public void messageReceived(SearchFreeContextRequest request, TransportChannel channel) throws Exception {
+ searchService.freeContext(request.id());
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ // freeing the context is cheap,
+ // no need for fork it to another thread
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ class ClearScrollContextsRequest extends TransportRequest {
+
+ ClearScrollContextsRequest() {
+ }
+
+ ClearScrollContextsRequest(TransportRequest request) {
+ super(request);
+ }
+
+ }
+
+ class ClearScrollContextsTransportHandler extends BaseTransportRequestHandler<ClearScrollContextsRequest> {
+
+ static final String ACTION = "search/clearScrollContexts";
+
+ @Override
+ public ClearScrollContextsRequest newInstance() {
+ return new ClearScrollContextsRequest();
+ }
+
+ @Override
+ public void messageReceived(ClearScrollContextsRequest request, TransportChannel channel) throws Exception {
+ searchService.freeAllScrollContexts();
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ // freeing the context is cheap,
+ // no need for fork it to another thread
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+ private class SearchDfsTransportHandler extends BaseTransportRequestHandler<ShardSearchRequest> {
+
+ static final String ACTION = "search/phase/dfs";
+
+ @Override
+ public ShardSearchRequest newInstance() {
+ return new ShardSearchRequest();
+ }
+
+ @Override
+ public void messageReceived(ShardSearchRequest request, TransportChannel channel) throws Exception {
+ DfsSearchResult result = searchService.executeDfsPhase(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+
+ private class SearchQueryTransportHandler extends BaseTransportRequestHandler<ShardSearchRequest> {
+
+ static final String ACTION = "search/phase/query";
+
+ @Override
+ public ShardSearchRequest newInstance() {
+ return new ShardSearchRequest();
+ }
+
+ @Override
+ public void messageReceived(ShardSearchRequest request, TransportChannel channel) throws Exception {
+ QuerySearchResult result = searchService.executeQueryPhase(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+
+ private class SearchQueryByIdTransportHandler extends BaseTransportRequestHandler<QuerySearchRequest> {
+
+ static final String ACTION = "search/phase/query/id";
+
+ @Override
+ public QuerySearchRequest newInstance() {
+ return new QuerySearchRequest();
+ }
+
+ @Override
+ public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
+ QuerySearchResult result = searchService.executeQueryPhase(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+
+ private class SearchQueryScrollTransportHandler extends BaseTransportRequestHandler<InternalScrollSearchRequest> {
+
+ static final String ACTION = "search/phase/query/scroll";
+
+ @Override
+ public InternalScrollSearchRequest newInstance() {
+ return new InternalScrollSearchRequest();
+ }
+
+ @Override
+ public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
+ ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+
+ private class SearchQueryFetchTransportHandler extends BaseTransportRequestHandler<ShardSearchRequest> {
+
+ static final String ACTION = "search/phase/query+fetch";
+
+ @Override
+ public ShardSearchRequest newInstance() {
+ return new ShardSearchRequest();
+ }
+
+ @Override
+ public void messageReceived(ShardSearchRequest request, TransportChannel channel) throws Exception {
+ QueryFetchSearchResult result = searchService.executeFetchPhase(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+
+ private class SearchQueryQueryFetchTransportHandler extends BaseTransportRequestHandler<QuerySearchRequest> {
+
+ static final String ACTION = "search/phase/query/query+fetch";
+
+ @Override
+ public QuerySearchRequest newInstance() {
+ return new QuerySearchRequest();
+ }
+
+ @Override
+ public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
+ QueryFetchSearchResult result = searchService.executeFetchPhase(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+
+ private class SearchFetchByIdTransportHandler extends BaseTransportRequestHandler<FetchSearchRequest> {
+
+ static final String ACTION = "search/phase/fetch/id";
+
+ @Override
+ public FetchSearchRequest newInstance() {
+ return new FetchSearchRequest();
+ }
+
+ @Override
+ public void messageReceived(FetchSearchRequest request, TransportChannel channel) throws Exception {
+ FetchSearchResult result = searchService.executeFetchPhase(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+
+ private class SearchQueryFetchScrollTransportHandler extends BaseTransportRequestHandler<InternalScrollSearchRequest> {
+
+ static final String ACTION = "search/phase/query+fetch/scroll";
+
+ @Override
+ public InternalScrollSearchRequest newInstance() {
+ return new InternalScrollSearchRequest();
+ }
+
+ @Override
+ public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
+ ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+
+ private class SearchScanTransportHandler extends BaseTransportRequestHandler<ShardSearchRequest> {
+
+ static final String ACTION = "search/phase/scan";
+
+ @Override
+ public ShardSearchRequest newInstance() {
+ return new ShardSearchRequest();
+ }
+
+ @Override
+ public void messageReceived(ShardSearchRequest request, TransportChannel channel) throws Exception {
+ QuerySearchResult result = searchService.executeScan(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+
+ private class SearchScanScrollTransportHandler extends BaseTransportRequestHandler<InternalScrollSearchRequest> {
+
+ static final String ACTION = "search/phase/scan/scroll";
+
+ @Override
+ public InternalScrollSearchRequest newInstance() {
+ return new InternalScrollSearchRequest();
+ }
+
+ @Override
+ public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
+ ScrollQueryFetchSearchResult result = searchService.executeScan(request);
+ channel.sendResponse(result);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SEARCH;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java
new file mode 100644
index 0000000..62bd73f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+
+/**
+ *
+ */
+public abstract class AbstractAggregationBuilder implements ToXContent {
+
+ protected final String name;
+ protected final String type;
+
+ protected AbstractAggregationBuilder(String name, String type) {
+ this.name = name;
+ this.type = type;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/Aggregation.java b/src/main/java/org/elasticsearch/search/aggregations/Aggregation.java
new file mode 100644
index 0000000..614fafb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/Aggregation.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+/**
+ * An aggregation
+ */
+public interface Aggregation {
+
+ /**
+ * @return The name of this aggregation.
+ */
+ String getName();
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationBinaryParseElement.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationBinaryParseElement.java
new file mode 100644
index 0000000..8f929e8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationBinaryParseElement.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class AggregationBinaryParseElement extends AggregationParseElement {
+
+ @Inject
+ public AggregationBinaryParseElement(AggregatorParsers aggregatorParsers) {
+ super(aggregatorParsers);
+ }
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ byte[] facetSource = parser.binaryValue();
+ XContentParser aSourceParser = XContentFactory.xContent(facetSource).createParser(facetSource);
+ try {
+ aSourceParser.nextToken(); // move past the first START_OBJECT
+ super.parse(aSourceParser, context);
+ } finally {
+ aSourceParser.close();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java
new file mode 100644
index 0000000..19f758c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A base class for all bucket aggregation builders.
+ */
+public abstract class AggregationBuilder<B extends AggregationBuilder<B>> extends AbstractAggregationBuilder {
+
+ private List<AbstractAggregationBuilder> aggregations;
+ private BytesReference aggregationsBinary;
+
+ protected AggregationBuilder(String name, String type) {
+ super(name, type);
+ }
+
+ /**
+ * Add a sub get to this bucket get.
+ */
+ @SuppressWarnings("unchecked")
+ public B subAggregation(AbstractAggregationBuilder aggregation) {
+ if (aggregations == null) {
+ aggregations = Lists.newArrayList();
+ }
+ aggregations.add(aggregation);
+ return (B) this;
+ }
+
+ /**
+ * Sets a raw (xcontent / json) sub addAggregation.
+ */
+ public B subAggregation(byte[] aggregationsBinary) {
+ return subAggregation(aggregationsBinary, 0, aggregationsBinary.length);
+ }
+
+ /**
+ * Sets a raw (xcontent / json) sub addAggregation.
+ */
+ public B subAggregation(byte[] aggregationsBinary, int aggregationsBinaryOffset, int aggregationsBinaryLength) {
+ return subAggregation(new BytesArray(aggregationsBinary, aggregationsBinaryOffset, aggregationsBinaryLength));
+ }
+
+ /**
+ * Sets a raw (xcontent / json) sub addAggregation.
+ */
+ @SuppressWarnings("unchecked")
+ public B subAggregation(BytesReference aggregationsBinary) {
+ this.aggregationsBinary = aggregationsBinary;
+ return (B) this;
+ }
+
+ /**
+ * Sets a raw (xcontent / json) sub addAggregation.
+ */
+ public B subAggregation(XContentBuilder facets) {
+ return subAggregation(facets.bytes());
+ }
+
+ /**
+ * Sets a raw (xcontent / json) sub addAggregation.
+ */
+ public B subAggregation(Map<String, Object> facets) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE);
+ builder.map(facets);
+ return subAggregation(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + facets + "]", e);
+ }
+ }
+
+ @Override
+ public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+
+ builder.field(type);
+ internalXContent(builder, params);
+
+ if (aggregations != null || aggregationsBinary != null) {
+ builder.startObject("aggregations");
+
+ if (aggregations != null) {
+ for (AbstractAggregationBuilder subAgg : aggregations) {
+ subAgg.toXContent(builder, params);
+ }
+ }
+
+ if (aggregationsBinary != null) {
+ if (XContentFactory.xContentType(aggregationsBinary) == builder.contentType()) {
+ builder.rawField("aggregations", aggregationsBinary);
+ } else {
+ builder.field("aggregations_binary", aggregationsBinary);
+ }
+ }
+
+ builder.endObject();
+ }
+
+ return builder.endObject();
+ }
+
+ protected abstract XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java
new file mode 100644
index 0000000..743d37d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridBuilder;
+import org.elasticsearch.search.aggregations.bucket.global.GlobalBuilder;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramBuilder;
+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramBuilder;
+import org.elasticsearch.search.aggregations.bucket.missing.MissingBuilder;
+import org.elasticsearch.search.aggregations.bucket.nested.NestedBuilder;
+import org.elasticsearch.search.aggregations.bucket.range.RangeBuilder;
+import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeBuilder;
+import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceBuilder;
+import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4RangeBuilder;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.search.aggregations.metrics.avg.AvgBuilder;
+import org.elasticsearch.search.aggregations.metrics.max.MaxBuilder;
+import org.elasticsearch.search.aggregations.metrics.min.MinBuilder;
+import org.elasticsearch.search.aggregations.metrics.stats.StatsBuilder;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsBuilder;
+import org.elasticsearch.search.aggregations.metrics.sum.SumBuilder;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountBuilder;
+
+/**
+ *
+ */
+public class AggregationBuilders {
+
+ protected AggregationBuilders() {
+ }
+
+ public static ValueCountBuilder count(String name) {
+ return new ValueCountBuilder(name);
+ }
+
+ public static AvgBuilder avg(String name) {
+ return new AvgBuilder(name);
+ }
+
+ public static MaxBuilder max(String name) {
+ return new MaxBuilder(name);
+ }
+
+ public static MinBuilder min(String name) {
+ return new MinBuilder(name);
+ }
+
+ public static SumBuilder sum(String name) {
+ return new SumBuilder(name);
+ }
+
+ public static StatsBuilder stats(String name) {
+ return new StatsBuilder(name);
+ }
+
+ public static ExtendedStatsBuilder extendedStats(String name) {
+ return new ExtendedStatsBuilder(name);
+ }
+
+ public static FilterAggregationBuilder filter(String name) {
+ return new FilterAggregationBuilder(name);
+ }
+
+ public static GlobalBuilder global(String name) {
+ return new GlobalBuilder(name);
+ }
+
+ public static MissingBuilder missing(String name) {
+ return new MissingBuilder(name);
+ }
+
+ public static NestedBuilder nested(String name) {
+ return new NestedBuilder(name);
+ }
+
+ public static GeoDistanceBuilder geoDistance(String name) {
+ return new GeoDistanceBuilder(name);
+ }
+
+ public static HistogramBuilder histogram(String name) {
+ return new HistogramBuilder(name);
+ }
+
+ public static GeoHashGridBuilder geohashGrid(String name) {
+ return new GeoHashGridBuilder(name);
+ }
+
+ public static DateHistogramBuilder dateHistogram(String name) {
+ return new DateHistogramBuilder(name);
+ }
+
+ public static RangeBuilder range(String name) {
+ return new RangeBuilder(name);
+ }
+
+ public static DateRangeBuilder dateRange(String name) {
+ return new DateRangeBuilder(name);
+ }
+
+ public static IPv4RangeBuilder ipRange(String name) {
+ return new IPv4RangeBuilder(name);
+ }
+
+ public static TermsBuilder terms(String name) {
+ return new TermsBuilder(name);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationExecutionException.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationExecutionException.java
new file mode 100644
index 0000000..60afa4e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationExecutionException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ * Thrown when failing to execute an aggregation
+ */
+public class AggregationExecutionException extends ElasticsearchException {
+
+ public AggregationExecutionException(String msg) {
+ super(msg);
+ }
+
+ public AggregationExecutionException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java
new file mode 100644
index 0000000..b7f8595
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ * Thrown when failing to execute an aggregation
+ */
+public class AggregationInitializationException extends ElasticsearchException {
+
+ public AggregationInitializationException(String msg) {
+ super(msg);
+ }
+
+ public AggregationInitializationException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java
new file mode 100644
index 0000000..25e114e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.search.aggregations.bucket.filter.FilterParser;
+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridParser;
+import org.elasticsearch.search.aggregations.bucket.global.GlobalParser;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramParser;
+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramParser;
+import org.elasticsearch.search.aggregations.bucket.missing.MissingParser;
+import org.elasticsearch.search.aggregations.bucket.nested.NestedParser;
+import org.elasticsearch.search.aggregations.bucket.range.RangeParser;
+import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeParser;
+import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceParser;
+import org.elasticsearch.search.aggregations.bucket.range.ipv4.IpRangeParser;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsParser;
+import org.elasticsearch.search.aggregations.metrics.avg.AvgParser;
+import org.elasticsearch.search.aggregations.metrics.max.MaxParser;
+import org.elasticsearch.search.aggregations.metrics.min.MinParser;
+import org.elasticsearch.search.aggregations.metrics.stats.StatsParser;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsParser;
+import org.elasticsearch.search.aggregations.metrics.sum.SumParser;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser;
+
+import java.util.List;
+
+/**
+ * The main module for the get (binding all get components together)
+ */
+public class AggregationModule extends AbstractModule {
+
+ private List<Class<? extends Aggregator.Parser>> parsers = Lists.newArrayList();
+
+ public AggregationModule() {
+ parsers.add(AvgParser.class);
+ parsers.add(SumParser.class);
+ parsers.add(MinParser.class);
+ parsers.add(MaxParser.class);
+ parsers.add(StatsParser.class);
+ parsers.add(ExtendedStatsParser.class);
+ parsers.add(ValueCountParser.class);
+
+ parsers.add(GlobalParser.class);
+ parsers.add(MissingParser.class);
+ parsers.add(FilterParser.class);
+ parsers.add(TermsParser.class);
+ parsers.add(RangeParser.class);
+ parsers.add(DateRangeParser.class);
+ parsers.add(IpRangeParser.class);
+ parsers.add(HistogramParser.class);
+ parsers.add(DateHistogramParser.class);
+ parsers.add(GeoDistanceParser.class);
+ parsers.add(GeoHashGridParser.class);
+ parsers.add(NestedParser.class);
+ }
+
+ /**
+ * Enabling extending the get module by adding a custom aggregation parser.
+ *
+ * @param parser The parser for the custom aggregator.
+ */
+ public void addAggregatorParser(Class<? extends Aggregator.Parser> parser) {
+ parsers.add(parser);
+ }
+
+ @Override
+ protected void configure() {
+ Multibinder<Aggregator.Parser> multibinder = Multibinder.newSetBinder(binder(), Aggregator.Parser.class);
+ for (Class<? extends Aggregator.Parser> parser : parsers) {
+ multibinder.addBinding().to(parser);
+ }
+ bind(AggregatorParsers.class).asEagerSingleton();
+ bind(AggregationParseElement.class).asEagerSingleton();
+ bind(AggregationPhase.class).asEagerSingleton();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationParseElement.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationParseElement.java
new file mode 100644
index 0000000..767cbfb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationParseElement.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ * The search parse element that is responsible for parsing the get part of the request.
+ *
+ * For example (in bold):
+ * <pre>
+ * curl -XGET 'localhost:9200/_search?search_type=count' -d '{
+ * query: {
+ * match_all : {}
+ * },
+ * addAggregation : {
+ * avg_price: {
+ * avg : { field : price }
+ * },
+ * categories: {
+ * terms : { field : category, size : 12 },
+ * addAggregation: {
+ * avg_price : { avg : { field : price }}
+ * }
+ * }
+ * }
+ * }'
+ * </pre>
+ */
+public class AggregationParseElement implements SearchParseElement {
+
+ private final AggregatorParsers aggregatorParsers;
+
+ @Inject
+ public AggregationParseElement(AggregatorParsers aggregatorParsers) {
+ this.aggregatorParsers = aggregatorParsers;
+ }
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ AggregatorFactories factories = aggregatorParsers.parseAggregators(parser, context);
+ context.aggregations(new SearchContextAggregations(factories));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
new file mode 100644
index 0000000..c30b9d2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XCollector;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchPhase;
+import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.query.QueryPhaseExecutionException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class AggregationPhase implements SearchPhase {
+
+ private final AggregationParseElement parseElement;
+
+ private final AggregationBinaryParseElement binaryParseElement;
+
+ @Inject
+ public AggregationPhase(AggregationParseElement parseElement, AggregationBinaryParseElement binaryParseElement) {
+ this.parseElement = parseElement;
+ this.binaryParseElement = binaryParseElement;
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ return ImmutableMap.<String, SearchParseElement>builder()
+ .put("aggregations", parseElement)
+ .put("aggs", parseElement)
+ .put("aggregations_binary", binaryParseElement)
+ .put("aggregationsBinary", binaryParseElement)
+ .put("aggs_binary", binaryParseElement)
+ .put("aggsBinary", binaryParseElement)
+ .build();
+ }
+
+ @Override
+ public void preProcess(SearchContext context) {
+ if (context.aggregations() != null) {
+ AggregationContext aggregationContext = new AggregationContext(context);
+ context.aggregations().aggregationContext(aggregationContext);
+
+ List<Aggregator> collectors = new ArrayList<Aggregator>();
+ Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext);
+ for (int i = 0; i < aggregators.length; i++) {
+ if (!(aggregators[i] instanceof GlobalAggregator)) {
+ Aggregator aggregator = aggregators[i];
+ if (aggregator.shouldCollect()) {
+ collectors.add(aggregator);
+ }
+ }
+ }
+ context.aggregations().aggregators(aggregators);
+ if (!collectors.isEmpty()) {
+ context.searcher().addMainQueryCollector(new AggregationsCollector(collectors, aggregationContext));
+ }
+ }
+ }
+
+ @Override
+ public void execute(SearchContext context) throws ElasticsearchException {
+ if (context.aggregations() == null) {
+ return;
+ }
+
+ if (context.queryResult().aggregations() != null) {
+ // no need to compute the facets twice, they should be computed on a per context basis
+ return;
+ }
+
+ Aggregator[] aggregators = context.aggregations().aggregators();
+ boolean success = false;
+ try {
+ List<Aggregator> globals = new ArrayList<Aggregator>();
+ for (int i = 0; i < aggregators.length; i++) {
+ if (aggregators[i] instanceof GlobalAggregator) {
+ globals.add(aggregators[i]);
+ }
+ }
+
+ // optimize the global collector based execution
+ if (!globals.isEmpty()) {
+ AggregationsCollector collector = new AggregationsCollector(globals, context.aggregations().aggregationContext());
+ Query query = new XConstantScoreQuery(Queries.MATCH_ALL_FILTER);
+ Filter searchFilter = context.searchFilter(context.types());
+ if (searchFilter != null) {
+ query = new XFilteredQuery(query, searchFilter);
+ }
+ try {
+ context.searcher().search(query, collector);
+ } catch (Exception e) {
+ throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e);
+ }
+ collector.postCollection();
+ }
+
+ List<InternalAggregation> aggregations = new ArrayList<InternalAggregation>(aggregators.length);
+ for (Aggregator aggregator : context.aggregations().aggregators()) {
+ aggregations.add(aggregator.buildAggregation(0));
+ }
+ context.queryResult().aggregations(new InternalAggregations(aggregations));
+ success = true;
+ } finally {
+ Releasables.release(success, aggregators);
+ }
+
+ }
+
+
+ public static class AggregationsCollector extends XCollector {
+
+ private final AggregationContext aggregationContext;
+ private final List<Aggregator> collectors;
+
+ public AggregationsCollector(List<Aggregator> collectors, AggregationContext aggregationContext) {
+ this.collectors = collectors;
+ this.aggregationContext = aggregationContext;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ aggregationContext.setScorer(scorer);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ for (int i = 0; i < collectors.size(); i++) {
+ collectors.get(i).collect(doc, 0);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ aggregationContext.setNextReader(context);
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ @Override
+ public void postCollection() {
+ for (int i = 0; i < collectors.size(); i++) {
+ collectors.get(i).postCollection();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationStreams.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationStreams.java
new file mode 100644
index 0000000..97985f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationStreams.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.stream.StreamInput;
+
+import java.io.IOException;
+
+/**
+ * A registry for all the dedicated streams in the aggregation module. This is to support dynamic addAggregation that
+ * know how to stream themselves.
+ */
+public class AggregationStreams {
+
+ private static ImmutableMap<BytesReference, Stream> streams = ImmutableMap.of();
+
+ /**
+ * A stream that knows how to read an aggregation from the input.
+ */
+ public static interface Stream {
+ InternalAggregation readResult(StreamInput in) throws IOException;
+ }
+
+ /**
+ * Registers the given stream and associate it with the given types.
+ *
+ * @param stream The streams to register
+ * @param types The types associated with the streams
+ */
+ public static synchronized void registerStream(Stream stream, BytesReference... types) {
+ MapBuilder<BytesReference, Stream> uStreams = MapBuilder.newMapBuilder(streams);
+ for (BytesReference type : types) {
+ uStreams.put(type, stream);
+ }
+ streams = uStreams.immutableMap();
+ }
+
+ /**
+ * Returns the stream that is registered for the given type
+ *
+ * @param type The given type
+ * @return The associated stream
+ */
+ public static Stream stream(BytesReference type) {
+ return streams.get(type);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/Aggregations.java b/src/main/java/org/elasticsearch/search/aggregations/Aggregations.java
new file mode 100644
index 0000000..a19dad9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/Aggregations.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Represents a set of computed addAggregation.
+ */
+public interface Aggregations extends Iterable<Aggregation> {
+
+ /**
+ * The list of {@link Aggregation}s.
+ */
+ List<Aggregation> asList();
+
+ /**
+ * Returns the {@link Aggregation}s keyed by aggregation name.
+ */
+ Map<String, Aggregation> asMap();
+
+ /**
+ * Returns the {@link Aggregation}s keyed by aggregation name.
+ */
+ Map<String, Aggregation> getAsMap();
+
+ /**
+ * Returns the aggregation that is associated with the specified name.
+ */
+ <A extends Aggregation> A get(String name);
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java b/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java
new file mode 100644
index 0000000..d07f2f4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public abstract class Aggregator implements Releasable {
+
+ /**
+ * Defines the nature of the aggregator's aggregation execution when nested in other aggregators and the buckets they create.
+ */
+ public static enum BucketAggregationMode {
+
+ /**
+ * In this mode, a new aggregator instance will be created per bucket (created by the parent aggregator)
+ */
+ PER_BUCKET,
+
+ /**
+ * In this mode, a single aggregator instance will be created per parent aggregator, that will handle the aggregations of all its buckets.
+ */
+ MULTI_BUCKETS
+ }
+
+ protected final String name;
+ protected final Aggregator parent;
+ protected final AggregationContext context;
+ protected final int depth;
+ protected final long estimatedBucketCount;
+
+ protected final BucketAggregationMode bucketAggregationMode;
+ protected final AggregatorFactories factories;
+ protected final Aggregator[] subAggregators;
+
+ /**
+ * Constructs a new Aggregator.
+ *
+ * @param name The name of the aggregation
+ * @param bucketAggregationMode The nature of execution as a sub-aggregator (see {@link BucketAggregationMode})
+ * @param factories The factories for all the sub-aggregators under this aggregator
+ * @param estimatedBucketsCount When served as a sub-aggregator, indicate how many buckets the parent aggregator will generate.
+ * @param context The aggregation context
+ * @param parent The parent aggregator (may be {@code null} for top level aggregators)
+ */
+ protected Aggregator(String name, BucketAggregationMode bucketAggregationMode, AggregatorFactories factories, long estimatedBucketsCount, AggregationContext context, Aggregator parent) {
+ this.name = name;
+ this.parent = parent;
+ this.estimatedBucketCount = estimatedBucketsCount;
+ this.context = context;
+ this.depth = parent == null ? 0 : 1 + parent.depth();
+ this.bucketAggregationMode = bucketAggregationMode;
+ assert factories != null : "sub-factories provided to BucketAggregator must not be null, use AggragatorFactories.EMPTY instead";
+ this.factories = factories;
+ this.subAggregators = factories.createSubAggregators(this, estimatedBucketsCount);
+ }
+
+ /**
+ * @return The name of the aggregation.
+ */
+ public String name() {
+ return name;
+ }
+
+ /** Return the estimated number of buckets. */
+ public final long estimatedBucketCount() {
+ return estimatedBucketCount;
+ }
+
+ /** Return the depth of this aggregator in the aggregation tree. */
+ public final int depth() {
+ return depth;
+ }
+
+ /**
+ * @return The parent aggregator of this aggregator. The addAggregation are hierarchical in the sense that some can
+ * be composed out of others (more specifically, bucket addAggregation can define other addAggregation that will
+ * be aggregated per bucket). This method returns the direct parent aggregator that contains this aggregator, or
+ * {@code null} if there is none (meaning, this aggregator is a top level one)
+ */
+ public Aggregator parent() {
+ return parent;
+ }
+
+ public Aggregator[] subAggregators() {
+ return subAggregators;
+ }
+
+ /**
+ * @return The current aggregation context.
+ */
+ public AggregationContext context() {
+ return context;
+ }
+
+ /**
+ * @return The bucket aggregation mode of this aggregator. This mode defines the nature in which the aggregation is executed
+ * @see BucketAggregationMode
+ */
+ public BucketAggregationMode bucketAggregationMode() {
+ return bucketAggregationMode;
+ }
+
+ /**
+ * @return Whether this aggregator is in the state where it can collect documents. Some aggregators can do their aggregations without
+ * actually collecting documents, for example, an aggregator that computes stats over unmapped fields doesn't need to collect
+ * anything as it knows to just return "empty" stats as the aggregation result.
+ */
+ public abstract boolean shouldCollect();
+
+ /**
+ * Called during the query phase, to collect & aggregate the given document.
+ *
+ * @param doc The document to be collected/aggregated
+ * @param owningBucketOrdinal The ordinal of the bucket this aggregator belongs to, assuming this aggregator is not a top level aggregator.
+ * Typically, aggregators with {@code #bucketAggregationMode} set to {@link BucketAggregationMode#MULTI_BUCKETS}
+ * will heavily depend on this ordinal. Other aggregators may or may not use it and can see this ordinal as just
+ * an extra information for the aggregation context. For top level aggregators, the ordinal will always be
+ * equal to 0.
+ * @throws IOException
+ */
+ public abstract void collect(int doc, long owningBucketOrdinal) throws IOException;
+
+ /**
+ * Called after collection of all document is done.
+ */
+ public final void postCollection() {
+ for (int i = 0; i < subAggregators.length; i++) {
+ subAggregators[i].postCollection();
+ }
+ doPostCollection();
+ }
+
+ /** Called upon release of the aggregator. */
+ @Override
+ public boolean release() {
+ boolean success = false;
+ try {
+ doRelease();
+ success = true;
+ } finally {
+ Releasables.release(success, subAggregators);
+ }
+ return true;
+ }
+
+ /** Release instance-specific data. */
+ protected void doRelease() {}
+
+ /**
+ * Can be overriden by aggregator implementation to be called back when the collection phase ends.
+ */
+ protected void doPostCollection() {
+ }
+
+ /**
+ * @return The aggregated & built aggregation
+ */
+ public abstract InternalAggregation buildAggregation(long owningBucketOrdinal);
+
+ public abstract InternalAggregation buildEmptyAggregation();
+
+ protected final InternalAggregations buildEmptySubAggregations() {
+ List<InternalAggregation> aggs = new ArrayList<InternalAggregation>();
+ for (Aggregator aggregator : subAggregators) {
+ aggs.add(aggregator.buildEmptyAggregation());
+ }
+ return new InternalAggregations(aggs);
+ }
+
+ /**
+ * Parses the aggregation request and creates the appropriate aggregator factory for it.
+ *
+ * @see {@link AggregatorFactory}
+ */
+ public static interface Parser {
+
+ /**
+ * @return The aggregation type this parser is associated with.
+ */
+ String type();
+
+ /**
+ * Returns the aggregator factory with which this parser is associated, may return {@code null} indicating the
+ * aggregation should be skipped (e.g. when trying to aggregate on unmapped fields).
+ *
+ * @param aggregationName The name of the aggregation
+ * @param parser The xcontent parser
+ * @param context The search context
+ * @return The resolved aggregator factory or {@code null} in case the aggregation should be skipped
+ * @throws java.io.IOException When parsing fails
+ */
+ AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException;
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java
new file mode 100644
index 0000000..fe3be6d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.ObjectArray;
+import org.elasticsearch.search.aggregations.Aggregator.BucketAggregationMode;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public class AggregatorFactories {
+
+ public static final AggregatorFactories EMPTY = new Empty();
+
+ private final AggregatorFactory[] factories;
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ private AggregatorFactories(AggregatorFactory[] factories) {
+ this.factories = factories;
+ }
+
+ /**
+ * Create all aggregators so that they can be consumed with multiple buckets.
+ */
+ public Aggregator[] createSubAggregators(Aggregator parent, final long estimatedBucketsCount) {
+ Aggregator[] aggregators = new Aggregator[count()];
+ for (int i = 0; i < factories.length; ++i) {
+ final AggregatorFactory factory = factories[i];
+ final Aggregator first = factory.create(parent.context(), parent, estimatedBucketsCount);
+ if (first.bucketAggregationMode() == BucketAggregationMode.MULTI_BUCKETS) {
+ // This aggregator already supports multiple bucket ordinals, can be used directly
+ aggregators[i] = first;
+ continue;
+ }
+ // the aggregator doesn't support multiple ordinals, let's wrap it so that it does.
+ aggregators[i] = new Aggregator(first.name(), BucketAggregationMode.MULTI_BUCKETS, AggregatorFactories.EMPTY, 1, first.context(), first.parent()) {
+
+ ObjectArray<Aggregator> aggregators;
+
+ {
+ // if estimated count is zero, we at least create a single aggregator.
+ // The estimated count is just an estimation and we can't rely on how it's estimated (that is, an
+ // estimation of 0 should not imply that we'll end up without any buckets)
+ long arraySize = estimatedBucketsCount > 0 ? estimatedBucketsCount : 1;
+ aggregators = BigArrays.newObjectArray(arraySize, context.pageCacheRecycler());
+ aggregators.set(0, first);
+ for (long i = 1; i < arraySize; ++i) {
+ aggregators.set(i, factory.create(parent.context(), parent, estimatedBucketsCount));
+ }
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return first.shouldCollect();
+ }
+
+ @Override
+ protected void doPostCollection() {
+ for (long i = 0; i < aggregators.size(); ++i) {
+ final Aggregator aggregator = aggregators.get(i);
+ if (aggregator != null) {
+ aggregator.postCollection();
+ }
+ }
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ aggregators = BigArrays.grow(aggregators, owningBucketOrdinal + 1);
+ Aggregator aggregator = aggregators.get(owningBucketOrdinal);
+ if (aggregator == null) {
+ aggregator = factory.create(parent.context(), parent, estimatedBucketsCount);
+ aggregators.set(owningBucketOrdinal, aggregator);
+ }
+ aggregator.collect(doc, 0);
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ // The bucket ordinal may be out of range in case of eg. a terms/filter/terms where
+ // the filter matches no document in the highest buckets of the first terms agg
+ if (owningBucketOrdinal >= aggregators.size() || aggregators.get(owningBucketOrdinal) == null) {
+ return first.buildEmptyAggregation();
+ } else {
+ return aggregators.get(owningBucketOrdinal).buildAggregation(0);
+ }
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return first.buildEmptyAggregation();
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(aggregators);
+ }
+ };
+ }
+ return aggregators;
+ }
+
+ public Aggregator[] createTopLevelAggregators(AggregationContext ctx) {
+ // These aggregators are going to be used with a single bucket ordinal, no need to wrap the PER_BUCKET ones
+ Aggregator[] aggregators = new Aggregator[factories.length];
+ for (int i = 0; i < factories.length; i++) {
+ aggregators[i] = factories[i].create(ctx, null, 0);
+ }
+ return aggregators;
+ }
+
+ public int count() {
+ return factories.length;
+ }
+
+ void setParent(AggregatorFactory parent) {
+ for (AggregatorFactory factory : factories) {
+ factory.parent = parent;
+ }
+ }
+
+ public void validate() {
+ for (AggregatorFactory factory : factories) {
+ factory.validate();
+ }
+ }
+
+ private final static class Empty extends AggregatorFactories {
+
+ private static final AggregatorFactory[] EMPTY_FACTORIES = new AggregatorFactory[0];
+ private static final Aggregator[] EMPTY_AGGREGATORS = new Aggregator[0];
+
+ private Empty() {
+ super(EMPTY_FACTORIES);
+ }
+
+ @Override
+ public Aggregator[] createSubAggregators(Aggregator parent, long estimatedBucketsCount) {
+ return EMPTY_AGGREGATORS;
+ }
+
+ @Override
+ public Aggregator[] createTopLevelAggregators(AggregationContext ctx) {
+ return EMPTY_AGGREGATORS;
+ }
+
+ }
+
+ public static class Builder {
+
+ private List<AggregatorFactory> factories = new ArrayList<AggregatorFactory>();
+
+ public Builder add(AggregatorFactory factory) {
+ factories.add(factory);
+ return this;
+ }
+
+ public AggregatorFactories build() {
+ if (factories.isEmpty()) {
+ return EMPTY;
+ }
+ return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java
new file mode 100644
index 0000000..e7c14fc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+
+/**
+ * A factory that knows how to create an {@link Aggregator} of a specific type.
+ */
+public abstract class AggregatorFactory {
+
+ protected String name;
+ protected String type;
+ protected AggregatorFactory parent;
+ protected AggregatorFactories factories = AggregatorFactories.EMPTY;
+
+ /**
+ * Constructs a new aggregator factory.
+ *
+ * @param name The aggregation name
+ * @param type The aggregation type
+ */
+ public AggregatorFactory(String name, String type) {
+ this.name = name;
+ this.type = type;
+ }
+
+ /**
+ * Registers sub-factories with this factory. The sub-factory will be responsible for the creation of sub-aggregators under the
+ * aggregator created by this factory.
+ *
+ * @param subFactories The sub-factories
+ * @return this factory (fluent interface)
+ */
+ public AggregatorFactory subFactories(AggregatorFactories subFactories) {
+ this.factories = subFactories;
+ this.factories.setParent(this);
+ return this;
+ }
+
+ /**
+ * Validates the state of this factory (makes sure the factory is properly configured)
+ */
+ public final void validate() {
+ doValidate();
+ factories.validate();
+ }
+
+ /**
+ * @return The parent factory if one exists (will always return {@code null} for top level aggregator factories).
+ */
+ public AggregatorFactory parent() {
+ return parent;
+ }
+
+ /**
+ * Creates the aggregator
+ *
+ * @param context The aggregation context
+ * @param parent The parent aggregator (if this is a top level factory, the parent will be {@code null})
+ * @param expectedBucketsCount If this is a sub-factory of another factory, this will indicate the number of bucket the parent aggregator
+ * may generate (this is an estimation only). For top level factories, this will always be 0
+ *
+ * @return The created aggregator
+ */
+ public abstract Aggregator create(AggregationContext context, Aggregator parent, long expectedBucketsCount);
+
+ public void doValidate() {
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java
new file mode 100644
index 0000000..372e119
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A registry for all the aggregator parser, also servers as the main parser for the aggregations module
+ */
+public class AggregatorParsers {
+
+ private final ImmutableMap<String, Aggregator.Parser> parsers;
+
+ /**
+ * Constructs the AggregatorParsers out of all the given parsers
+ *
+ * @param parsers The available aggregator parsers (dynamically injected by the {@link org.elasticsearch.search.aggregations.AggregationModule}).
+ */
+ @Inject
+ public AggregatorParsers(Set<Aggregator.Parser> parsers) {
+ MapBuilder<String, Aggregator.Parser> builder = MapBuilder.newMapBuilder();
+ for (Aggregator.Parser parser : parsers) {
+ builder.put(parser.type(), parser);
+ }
+ this.parsers = builder.immutableMap();
+ }
+
+ /**
+ * Returns the parser that is registered under the given aggregation type.
+ *
+ * @param type The aggregation type
+ * @return The parser associated with the given aggregation type.
+ */
+ public Aggregator.Parser parser(String type) {
+ return parsers.get(type);
+ }
+
+ /**
+ * Parses the aggregation request recursively generating aggregator factories in turn.
+ *
+ * @param parser The input xcontent that will be parsed.
+ * @param context The search context.
+ *
+ * @return The parsed aggregator factories.
+ *
+ * @throws IOException When parsing fails for unknown reasons.
+ */
+ public AggregatorFactories parseAggregators(XContentParser parser, SearchContext context) throws IOException {
+ return parseAggregators(parser, context, 0);
+ }
+
+
+ private AggregatorFactories parseAggregators(XContentParser parser, SearchContext context, int level) throws IOException {
+ XContentParser.Token token = null;
+ String currentFieldName = null;
+
+ AggregatorFactories.Builder factories = new AggregatorFactories.Builder();
+
+ String aggregationName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ aggregationName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ String aggregatorType = null;
+ AggregatorFactory factory = null;
+ AggregatorFactories subFactories = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("aggregations".equals(currentFieldName) || "aggs".equals(currentFieldName)) {
+ subFactories = parseAggregators(parser, context, level+1);
+ } else if (aggregatorType != null) {
+ throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + aggregatorType + "] and [" + currentFieldName + "]. Only one type is allowed.");
+ } else {
+ aggregatorType = currentFieldName;
+ Aggregator.Parser aggregatorParser = parser(aggregatorType);
+ if (aggregatorParser == null) {
+ throw new SearchParseException(context, "Could not find aggregator type [" + currentFieldName + "]");
+ }
+ factory = aggregatorParser.parse(aggregationName, parser, context);
+ }
+ }
+ }
+
+ if (factory == null) {
+ // skipping the aggregation
+ continue;
+ }
+
+ if (subFactories != null) {
+ factory.subFactories(subFactories);
+ }
+
+ if (level == 0) {
+ factory.validate();
+ }
+
+ factories.add(factory);
+ }
+ }
+
+ return factories.build();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
new file mode 100644
index 0000000..6037d02
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.util.List;
+
+/**
+ * An internal implementation of {@link Aggregation}. Serves as a base class for all aggregation implementations.
+ */
+public abstract class InternalAggregation implements Aggregation, ToXContent, Streamable {
+
+ /**
+ * The aggregation type that holds all the string types that are associated with an aggregation:
+ * <ul>
+ * <li>name - used as the parser type</li>
+ * <li>stream - used as the stream type</li>
+ * </ul>
+ */
+ public static class Type {
+
+ private String name;
+ private BytesReference stream;
+
+ public Type(String name) {
+ this(name, new BytesArray(name));
+ }
+
+ public Type(String name, String stream) {
+ this(name, new BytesArray(stream));
+ }
+
+ public Type(String name, BytesReference stream) {
+ this.name = name;
+ this.stream = stream;
+ }
+
+ /**
+ * @return The name of the type (mainly used for registering the parser for the aggregator (see {@link org.elasticsearch.search.aggregations.Aggregator.Parser#type()}).
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * @return The name of the stream type (used for registering the aggregation stream
+ * (see {@link AggregationStreams#registerStream(AggregationStreams.Stream, org.elasticsearch.common.bytes.BytesReference...)}).
+ */
+ public BytesReference stream() {
+ return stream;
+ }
+ }
+
+ protected static class ReduceContext {
+
+ private final List<InternalAggregation> aggregations;
+ private final CacheRecycler cacheRecycler;
+
+ public ReduceContext(List<InternalAggregation> aggregations, CacheRecycler cacheRecycler) {
+ this.aggregations = aggregations;
+ this.cacheRecycler = cacheRecycler;
+ }
+
+ public List<InternalAggregation> aggregations() {
+ return aggregations;
+ }
+
+ public CacheRecycler cacheRecycler() {
+ return cacheRecycler;
+ }
+ }
+
+
+ protected String name;
+
+ /** Constructs an un initialized addAggregation (used for serialization) **/
+ protected InternalAggregation() {}
+
+ /**
+ * Constructs an get with a given name.
+ *
+ * @param name The name of the get.
+ */
+ protected InternalAggregation(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return The {@link Type} of this aggregation
+ */
+ public abstract Type type();
+
+ /**
+ * Reduces the given addAggregation to a single one and returns it. In <b>most</b> cases, the assumption will be the all given
+ * addAggregation are of the same type (the same type as this aggregation). For best efficiency, when implementing,
+ * try reusing an existing get instance (typically the first in the given list) to save on redundant object
+ * construction.
+ */
+ public abstract InternalAggregation reduce(ReduceContext reduceContext);
+
+
+ /**
+ * Common xcontent fields that are shared among addAggregation
+ */
+ public static final class CommonFields {
+ public static final XContentBuilderString BUCKETS = new XContentBuilderString("buckets");
+ public static final XContentBuilderString VALUE = new XContentBuilderString("value");
+ public static final XContentBuilderString VALUE_AS_STRING = new XContentBuilderString("value_as_string");
+ public static final XContentBuilderString DOC_COUNT = new XContentBuilderString("doc_count");
+ public static final XContentBuilderString KEY = new XContentBuilderString("key");
+ public static final XContentBuilderString KEY_AS_STRING = new XContentBuilderString("key_as_string");
+ public static final XContentBuilderString FROM = new XContentBuilderString("from");
+ public static final XContentBuilderString FROM_AS_STRING = new XContentBuilderString("from_as_string");
+ public static final XContentBuilderString TO = new XContentBuilderString("to");
+ public static final XContentBuilderString TO_AS_STRING = new XContentBuilderString("to_as_string");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java
new file mode 100644
index 0000000..17d9387
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import com.google.common.base.Function;
+import com.google.common.collect.*;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.*;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ * An internal implementation of {@link Aggregations}.
+ */
+public class InternalAggregations implements Aggregations, ToXContent, Streamable {
+
+ public final static InternalAggregations EMPTY = new InternalAggregations();
+ private static final Function<InternalAggregation, Aggregation> SUPERTYPE_CAST = new Function<InternalAggregation, Aggregation>() {
+ @Override
+ public Aggregation apply(InternalAggregation input) {
+ return input;
+ }
+ };
+
+ private List<InternalAggregation> aggregations = ImmutableList.of();
+
+ private Map<String, InternalAggregation> aggregationsAsMap;
+
+ private InternalAggregations() {
+ }
+
+ /**
+ * Constructs a new addAggregation.
+ */
+ public InternalAggregations(List<InternalAggregation> aggregations) {
+ this.aggregations = aggregations;
+ }
+
+ /** Resets the internal addAggregation */
+ void reset(List<InternalAggregation> aggregations) {
+ this.aggregations = aggregations;
+ this.aggregationsAsMap = null;
+ }
+
+ /**
+ * Iterates over the {@link Aggregation}s.
+ */
+ @Override
+ public Iterator<Aggregation> iterator() {
+ return Iterators.transform(aggregations.iterator(), SUPERTYPE_CAST);
+ }
+
+ /**
+ * The list of {@link Aggregation}s.
+ */
+ public List<Aggregation> asList() {
+ return Lists.transform(aggregations, SUPERTYPE_CAST);
+ }
+
+ /**
+ * Returns the {@link Aggregation}s keyed by map.
+ */
+ public Map<String, Aggregation> asMap() {
+ return getAsMap();
+ }
+
+ /**
+ * Returns the {@link Aggregation}s keyed by map.
+ */
+ public Map<String, Aggregation> getAsMap() {
+ if (aggregationsAsMap == null) {
+ Map<String, InternalAggregation> aggregationsAsMap = newHashMap();
+ for (InternalAggregation aggregation : aggregations) {
+ aggregationsAsMap.put(aggregation.getName(), aggregation);
+ }
+ this.aggregationsAsMap = aggregationsAsMap;
+ }
+ return Maps.transformValues(aggregationsAsMap, SUPERTYPE_CAST);
+ }
+
+ /**
+ * @return the aggregation of the specified name.
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public <A extends Aggregation> A get(String name) {
+ return (A) asMap().get(name);
+ }
+
+ /**
+ * Reduces the given lists of addAggregation.
+ *
+ * @param aggregationsList A list of aggregation to reduce
+ * @return The reduced addAggregation
+ */
+ public static InternalAggregations reduce(List<InternalAggregations> aggregationsList, CacheRecycler cacheRecycler) {
+ if (aggregationsList.isEmpty()) {
+ return null;
+ }
+
+ // first we collect all aggregations of the same type and list them together
+
+ Map<String, List<InternalAggregation>> aggByName = new HashMap<String, List<InternalAggregation>>();
+ for (InternalAggregations aggregations : aggregationsList) {
+ for (InternalAggregation aggregation : aggregations.aggregations) {
+ List<InternalAggregation> aggs = aggByName.get(aggregation.getName());
+ if (aggs == null) {
+ aggs = new ArrayList<InternalAggregation>(aggregationsList.size());
+ aggByName.put(aggregation.getName(), aggs);
+ }
+ aggs.add(aggregation);
+ }
+ }
+
+ // now we can use the first aggregation of each list to handle the reduce of its list
+
+ List<InternalAggregation> reducedAggregations = new ArrayList<InternalAggregation>();
+ for (Map.Entry<String, List<InternalAggregation>> entry : aggByName.entrySet()) {
+ List<InternalAggregation> aggregations = entry.getValue();
+ InternalAggregation first = aggregations.get(0); // the list can't be empty as it's created on demand
+ reducedAggregations.add(first.reduce(new InternalAggregation.ReduceContext(aggregations, cacheRecycler)));
+ }
+ InternalAggregations result = aggregationsList.get(0);
+ result.reset(reducedAggregations);
+ return result;
+ }
+
+ /**
+ * Reduces this aggregations, effectively propagates the reduce to all the sub aggregations
+ * @param cacheRecycler
+ */
+ public void reduce(CacheRecycler cacheRecycler) {
+ for (int i = 0; i < aggregations.size(); i++) {
+ InternalAggregation aggregation = aggregations.get(i);
+ aggregations.set(i, aggregation.reduce(new InternalAggregation.ReduceContext(ImmutableList.of(aggregation), cacheRecycler)));
+ }
+ }
+
+ /** The fields required to write this addAggregation to xcontent */
+ static class Fields {
+ public static final XContentBuilderString AGGREGATIONS = new XContentBuilderString("aggregations");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (aggregations.isEmpty()) {
+ return builder;
+ }
+ builder.startObject(Fields.AGGREGATIONS);
+ toXContentInternal(builder, params);
+ return builder.endObject();
+ }
+
+ /**
+ * Directly write all the addAggregation without their bounding object. Used by sub-addAggregation (non top level addAggregation)
+ */
+ public XContentBuilder toXContentInternal(XContentBuilder builder, Params params) throws IOException {
+ for (Aggregation aggregation : aggregations) {
+ ((InternalAggregation) aggregation).toXContent(builder, params);
+ }
+ return builder;
+ }
+
+ public static InternalAggregations readAggregations(StreamInput in) throws IOException {
+ InternalAggregations result = new InternalAggregations();
+ result.readFrom(in);
+ return result;
+ }
+
+ public static InternalAggregations readOptionalAggregations(StreamInput in) throws IOException {
+ return in.readOptionalStreamable(new InternalAggregations());
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int size = in.readVInt();
+ if (size == 0) {
+ aggregations = ImmutableList.of();
+ aggregationsAsMap = ImmutableMap.of();
+ } else {
+ aggregations = Lists.newArrayListWithCapacity(size);
+ for (int i = 0; i < size; i++) {
+ BytesReference type = in.readBytesReference();
+ InternalAggregation aggregation = AggregationStreams.stream(type).readResult(in);
+ aggregations.add(aggregation);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(aggregations.size());
+ for (Aggregation aggregation : aggregations) {
+ InternalAggregation internal = (InternalAggregation) aggregation;
+ out.writeBytesReference(internal.type().stream());
+ internal.writeTo(out);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java b/src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java
new file mode 100644
index 0000000..f003519
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+
+/**
+ * The aggregation context that is part of the search context.
+ */
+public class SearchContextAggregations {
+
+ private final AggregatorFactories factories;
+ private Aggregator[] aggregators;
+ private AggregationContext aggregationContext;
+
+ /**
+ * Creates a new aggregation context with the parsed aggregator factories
+ */
+ public SearchContextAggregations(AggregatorFactories factories) {
+ this.factories = factories;
+ }
+
+ public AggregatorFactories factories() {
+ return factories;
+ }
+
+ public Aggregator[] aggregators() {
+ return aggregators;
+ }
+
+ public AggregationContext aggregationContext() {
+ return aggregationContext;
+ }
+
+ public void aggregationContext(AggregationContext aggregationContext) {
+ this.aggregationContext = aggregationContext;
+ }
+
+ /**
+ * Registers all the created aggregators (top level aggregators) for the search execution context.
+ *
+ * @param aggregators The top level aggregators of the search execution.
+ */
+ public void aggregators(Aggregator[] aggregators) {
+ this.aggregators = aggregators;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java
new file mode 100644
index 0000000..a2a5b25
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter;
+import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid;
+import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal;
+import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
+import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing;
+import org.elasticsearch.search.aggregations.bucket.nested.InternalNested;
+import org.elasticsearch.search.aggregations.bucket.range.InternalRange;
+import org.elasticsearch.search.aggregations.bucket.range.date.InternalDateRange;
+import org.elasticsearch.search.aggregations.bucket.range.geodistance.InternalGeoDistance;
+import org.elasticsearch.search.aggregations.bucket.range.ipv4.InternalIPv4Range;
+import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.UnmappedTerms;
+import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg;
+import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
+import org.elasticsearch.search.aggregations.metrics.min.InternalMin;
+import org.elasticsearch.search.aggregations.metrics.stats.InternalStats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.sum.InternalSum;
+import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount;
+
+/**
+ * A module that registers all the transport streams for the addAggregation
+ */
+public class TransportAggregationModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+
+ // calcs
+ InternalAvg.registerStreams();
+ InternalSum.registerStreams();
+ InternalMin.registerStreams();
+ InternalMax.registerStreams();
+ InternalStats.registerStreams();
+ InternalExtendedStats.registerStreams();
+ InternalValueCount.registerStreams();
+
+ // buckets
+ InternalGlobal.registerStreams();
+ InternalFilter.registerStreams();
+ InternalMissing.registerStreams();
+ StringTerms.registerStreams();
+ LongTerms.registerStreams();
+ InternalGeoHashGrid.registerStreams();
+ DoubleTerms.registerStreams();
+ UnmappedTerms.registerStreams();
+ InternalRange.registerStream();
+ InternalDateRange.registerStream();
+ InternalIPv4Range.registerStream();
+ InternalHistogram.registerStream();
+ InternalDateHistogram.registerStream();
+ InternalGeoDistance.registerStream();
+ InternalNested.registerStream();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/ValuesSourceAggregationBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/ValuesSourceAggregationBuilder.java
new file mode 100644
index 0000000..c611643
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/ValuesSourceAggregationBuilder.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * A base class for all bucket aggregation builders that are based on values (either script generated or field data values)
+ */
+public abstract class ValuesSourceAggregationBuilder<B extends ValuesSourceAggregationBuilder<B>> extends AggregationBuilder<B> {
+
+ private String field;
+ private String script;
+ private String lang;
+ private Map<String, Object> params;
+
+ /**
+ * Constructs a new builder.
+ *
+ * @param name The name of the aggregation.
+ * @param type The type of the aggregation.
+ */
+ protected ValuesSourceAggregationBuilder(String name, String type) {
+ super(name, type);
+ }
+
+ /**
+ * Sets the field from which the values will be extracted.
+ *
+ * @param field The name of the field
+ * @return This builder (fluent interface support)
+ */
+ @SuppressWarnings("unchecked")
+ public B field(String field) {
+ this.field = field;
+ return (B) this;
+ }
+
+ /**
+ * Sets the script which generates the values. If the script is configured along with the field (as in {@link #field(String)}), then
+ * this script will be treated as a {@code value script}. A <i>value script</i> will be applied on the values that are extracted from
+ * the field data (you can refer to that value in the script using the {@code _value} reserved variable). If only the script is configured
+ * (and the no field is configured next to it), then the script will be responsible to generate the values that will be aggregated.
+ *
+ * @param script The configured script.
+ * @return This builder (fluent interface support)
+ */
+ @SuppressWarnings("unchecked")
+ public B script(String script) {
+ this.script = script;
+ return (B) this;
+ }
+
+ /**
+ * Sets the language of the script (if one is defined).
+ * <p/>
+ * Also see {@link #script(String)}.
+ *
+ * @param lang The language of the script.
+ * @return This builder (fluent interface support)
+ */
+ @SuppressWarnings("unchecked")
+ public B lang(String lang) {
+ this.lang = lang;
+ return (B) this;
+ }
+
+ /**
+ * Sets the value of a parameter that is used in the script (if one is configured).
+ *
+ * @param name The name of the parameter.
+ * @param value The value of the parameter.
+ * @return This builder (fluent interface support)
+ */
+ @SuppressWarnings("unchecked")
+ public B param(String name, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(name, value);
+ return (B) this;
+ }
+
+ /**
+ * Sets the values of a parameters that are used in the script (if one is configured).
+ *
+ * @param params The the parameters.
+ * @return This builder (fluent interface support)
+ */
+ @SuppressWarnings("unchecked")
+ public B params(Map<String, Object> params) {
+ if (this.params == null) {
+ this.params = Maps.newHashMap();
+ }
+ this.params.putAll(params);
+ return (B) this;
+ }
+
+ @Override
+ protected final XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (field != null) {
+ builder.field("field", field);
+ }
+ if (script != null) {
+ builder.field("script", script);
+ }
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params").map(this.params);
+ }
+
+ doInternalXContent(builder, params);
+ return builder.endObject();
+ }
+
+ protected abstract XContentBuilder doInternalXContent(XContentBuilder builder, Params params) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractHash.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractHash.java
new file mode 100644
index 0000000..5085ef7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractHash.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.base.Preconditions;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.LongArray;
+
+/**
+ * Base implementation for {@link BytesRefHash} and {@link LongHash}.
+ */
+// IDs are internally stored as id + 1 so that 0 encodes for an empty slot
+abstract class AbstractHash implements Releasable {
+
+ // Open addressing typically requires having smaller load factors compared to linked lists because
+ // collisions may result into worse lookup performance.
+ static final float DEFAULT_MAX_LOAD_FACTOR = 0.6f;
+
+ final float maxLoadFactor;
+ long size, maxSize;
+ LongArray ids;
+ long mask;
+
+ AbstractHash(long capacity, float maxLoadFactor, PageCacheRecycler recycler) {
+ Preconditions.checkArgument(capacity >= 0, "capacity must be >= 0");
+ Preconditions.checkArgument(maxLoadFactor > 0 && maxLoadFactor < 1, "maxLoadFactor must be > 0 and < 1");
+ this.maxLoadFactor = maxLoadFactor;
+ long buckets = 1L + (long) (capacity / maxLoadFactor);
+ buckets = Math.max(1, Long.highestOneBit(buckets - 1) << 1); // next power of two
+ assert buckets == Long.highestOneBit(buckets);
+ maxSize = (long) (buckets * maxLoadFactor);
+ assert maxSize >= capacity;
+ size = 0;
+ ids = BigArrays.newLongArray(buckets, recycler, true);
+ mask = buckets - 1;
+ }
+
+ /**
+ * Return the number of allocated slots to store this hash table.
+ */
+ public long capacity() {
+ return ids.size();
+ }
+
+ /**
+ * Return the number of longs in this hash table.
+ */
+ public long size() {
+ return size;
+ }
+
+ static long slot(long hash, long mask) {
+ return hash & mask;
+ }
+
+ static long nextSlot(long curSlot, long mask) {
+ return (curSlot + 1) & mask; // linear probing
+ }
+
+ /**
+ * Get the id associated with key at <code>0 &lte; index &lte; capacity()</code> or -1 if this slot is unused.
+ */
+ public long id(long index) {
+ return ids.get(index) - 1;
+ }
+
+ protected final long id(long index, long id) {
+ return ids.set(index, id + 1) - 1;
+ }
+
+ /** Resize keys to the given capacity. */
+ protected void resizeKeys(long capacity) {}
+
+ /** Remove key at the given index and */
+ protected abstract void removeAndAdd(long index, long id);
+
+ protected final void grow() {
+ // The difference of this implementation of grow() compared to standard hash tables is that we are growing in-place, which makes
+ // the re-mapping of keys to slots a bit more tricky.
+ assert size == maxSize;
+ final long prevSize = size;
+ final long buckets = capacity();
+ // Resize arrays
+ final long newBuckets = buckets << 1;
+ assert newBuckets == Long.highestOneBit(newBuckets) : newBuckets; // power of 2
+ resizeKeys(newBuckets);
+ ids = BigArrays.resize(ids, newBuckets);
+ mask = newBuckets - 1;
+ // First let's remap in-place: most data will be put in its final position directly
+ for (long i = 0; i < buckets; ++i) {
+ final long id = id(i, -1);
+ if (id != -1) {
+ removeAndAdd(i, id);
+ }
+ }
+ // The only entries which have not been put in their final position in the previous loop are those that were stored in a slot that
+ // is < slot(key, mask). This only happens when slot(key, mask) returned a slot that was close to the end of the array and colision
+ // resolution has put it back in the first slots. This time, collision resolution will have put them at the beginning of the newly
+ // allocated slots. Let's re-add them to make sure they are in the right slot. This 2nd loop will typically exit very early.
+ for (long i = buckets; i < newBuckets; ++i) {
+ final long id = id(i, -1);
+ if (id != -1) {
+ removeAndAdd(i, id); // add it back
+ } else {
+ break;
+ }
+ }
+ assert size == prevSize;
+ maxSize = (long) (newBuckets * maxLoadFactor);
+ assert size < maxSize;
+ }
+
+ @Override
+ public boolean release() {
+ Releasables.release(ids);
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java
new file mode 100644
index 0000000..c518f73
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+/**
+ * Helper functions for common Bucketing functions
+ */
+public class BucketUtils {
+
+ /**
+ * Heuristic used to determine the size of shard-side PriorityQueues when
+ * selecting the top N terms from a distributed index.
+ *
+ * @param finalSize
+ * The number of terms required in the final reduce phase.
+ * @param numberOfShards
+ * The number of shards being queried.
+ * @return A suggested default for the size of any shard-side PriorityQueues
+ */
+ public static int suggestShardSideQueueSize(int finalSize, int numberOfShards) {
+ assert numberOfShards >= 1;
+ if (numberOfShards == 1) {
+ return finalSize;
+ }
+ //Cap the multiplier used for shards to avoid excessive data transfer
+ final int shardSampleSize = finalSize * Math.min(10, numberOfShards);
+ // When finalSize is very small e.g. 1 and there is a low number of
+ // shards then we need to ensure we still gather a reasonable sample of statistics from each
+ // shard (at low cost) to improve the chances of the final result being accurate.
+ return Math.max(10, shardSampleSize);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java
new file mode 100644
index 0000000..9d8a154
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ *
+ */
+public abstract class BucketsAggregator extends Aggregator {
+
+ private LongArray docCounts;
+
+ private final Aggregator[] collectableSugAggregators;
+
+ public BucketsAggregator(String name, BucketAggregationMode bucketAggregationMode, AggregatorFactories factories,
+ long estimatedBucketsCount, AggregationContext context, Aggregator parent) {
+ super(name, bucketAggregationMode, factories, estimatedBucketsCount, context, parent);
+ docCounts = BigArrays.newLongArray(estimatedBucketsCount, context.pageCacheRecycler(), true);
+ List<Aggregator> collectables = new ArrayList<Aggregator>(subAggregators.length);
+ for (int i = 0; i < subAggregators.length; i++) {
+ if (subAggregators[i].shouldCollect()) {
+ collectables.add((subAggregators[i]));
+ }
+ }
+ collectableSugAggregators = collectables.toArray(new Aggregator[collectables.size()]);
+ }
+
+ /**
+ * Utility method to collect the given doc in the given bucket (identified by the bucket ordinal)
+ */
+ protected final void collectBucket(int doc, long bucketOrd) throws IOException {
+ docCounts = BigArrays.grow(docCounts, bucketOrd + 1);
+ docCounts.increment(bucketOrd, 1);
+ for (int i = 0; i < collectableSugAggregators.length; i++) {
+ collectableSugAggregators[i].collect(doc, bucketOrd);
+ }
+ }
+
+ /**
+ * Utility method to collect the given doc in the given bucket but not to update the doc counts of the bucket
+ */
+ protected final void collectBucketNoCounts(int doc, long bucketOrd) throws IOException {
+ for (int i = 0; i < collectableSugAggregators.length; i++) {
+ collectableSugAggregators[i].collect(doc, bucketOrd);
+ }
+ }
+
+ /**
+ * Utility method to increment the doc counts of the given bucket (identified by the bucket ordinal)
+ */
+ protected final void incrementBucketDocCount(int inc, long bucketOrd) throws IOException {
+ docCounts = BigArrays.grow(docCounts, bucketOrd + 1);
+ docCounts.increment(bucketOrd, inc);
+ }
+
+ /**
+ * Utility method to return the number of documents that fell in the given bucket (identified by the bucket ordinal)
+ */
+ protected final long bucketDocCount(long bucketOrd) {
+ if (bucketOrd >= docCounts.size()) {
+ // This may happen eg. if no document in the highest buckets is accepted by a sub aggregator.
+ // For example, if there is a long terms agg on 3 terms 1,2,3 with a sub filter aggregator and if no document with 3 as a value
+ // matches the filter, then the filter will never collect bucket ord 3. However, the long terms agg will call bucketAggregations(3)
+ // on the filter aggregator anyway to build sub-aggregations.
+ return 0L;
+ } else {
+ return docCounts.get(bucketOrd);
+ }
+ }
+
+ /**
+ * Utility method to build the aggregations of the given bucket (identified by the bucket ordinal)
+ */
+ protected final InternalAggregations bucketAggregations(long bucketOrd) {
+ final InternalAggregation[] aggregations = new InternalAggregation[subAggregators.length];
+ final long bucketDocCount = bucketDocCount(bucketOrd);
+ for (int i = 0; i < subAggregators.length; i++) {
+ aggregations[i] = bucketDocCount == 0L
+ ? subAggregators[i].buildEmptyAggregation()
+ : subAggregators[i].buildAggregation(bucketOrd);
+ }
+ return new InternalAggregations(Arrays.asList(aggregations));
+ }
+
+ @Override
+ public final boolean release() {
+ boolean success = false;
+ try {
+ super.release();
+ success = true;
+ } finally {
+ Releasables.release(success, docCounts);
+ }
+ return true;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BytesRefHash.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BytesRefHash.java
new file mode 100644
index 0000000..baa51b4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BytesRefHash.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.hash.MurmurHash3;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.*;
+
+/**
+ * Specialized hash table implementation similar to Lucene's BytesRefHash that maps
+ * BytesRef values to ids. Collisions are resolved with open addressing and linear
+ * probing, growth is smooth thanks to {@link BigArrays}, hashes are cached for faster
+ * re-hashing and capacity is always a multiple of 2 for faster identification of buckets.
+ */
+public final class BytesRefHash extends AbstractHash {
+
+ private LongArray startOffsets;
+ private ByteArray bytes;
+ private IntArray hashes; // we cache hashes for faster re-hashing
+ private final BytesRef spare;
+
+ // Constructor with configurable capacity and default maximum load factor.
+ public BytesRefHash(long capacity, PageCacheRecycler recycler) {
+ this(capacity, DEFAULT_MAX_LOAD_FACTOR, recycler);
+ }
+
+ //Constructor with configurable capacity and load factor.
+ public BytesRefHash(long capacity, float maxLoadFactor, PageCacheRecycler recycler) {
+ super(capacity, maxLoadFactor, recycler);
+ startOffsets = BigArrays.newLongArray(capacity + 1, recycler, false);
+ bytes = BigArrays.newByteArray(capacity * 3, recycler, false);
+ hashes = BigArrays.newIntArray(capacity, recycler, false);
+ spare = new BytesRef();
+ }
+
+ // BytesRef has a weak hashCode function so we try to improve it by rehashing using Murmur3
+ // Feel free to remove rehashing if BytesRef gets a better hash function
+ private static int rehash(int hash) {
+ return MurmurHash3.hash(hash);
+ }
+
+ /**
+ * Return the key at <code>0 &lte; index &lte; capacity()</code>. The result is undefined if the slot is unused.
+ * <p color="red">Beware that the content of the {@link BytesRef} may become invalid as soon as {@link #release()} is called</p>
+ */
+ public BytesRef get(long id, BytesRef dest) {
+ final long startOffset = startOffsets.get(id);
+ final int length = (int) (startOffsets.get(id + 1) - startOffset);
+ bytes.get(startOffset, length, dest);
+ return dest;
+ }
+
+ /**
+ * Get the id associated with <code>key</code>
+ */
+ public long find(BytesRef key, int code) {
+ final long slot = slot(rehash(code), mask);
+ for (long index = slot; ; index = nextSlot(index, mask)) {
+ final long id = id(index);
+ if (id == -1L || UnsafeUtils.equals(key, get(id, spare))) {
+ return id;
+ }
+ }
+ }
+
+ /** Sugar for {@link #find(BytesRef, int) find(key, key.hashCode()} */
+ public long find(BytesRef key) {
+ return find(key, key.hashCode());
+ }
+
+ private long set(BytesRef key, int code, long id) {
+ assert rehash(key.hashCode()) == code;
+ assert size < maxSize;
+ final long slot = slot(code, mask);
+ for (long index = slot; ; index = nextSlot(index, mask)) {
+ final long curId = id(index);
+ if (curId == -1) { // means unset
+ id(index, id);
+ append(id, key, code);
+ ++size;
+ return id;
+ } else if (UnsafeUtils.equals(key, get(curId, spare))) {
+ return -1 - curId;
+ }
+ }
+ }
+
+ private void append(long id, BytesRef key, int code) {
+ assert size == id;
+ final long startOffset = startOffsets.get(size);
+ bytes = BigArrays.grow(bytes, startOffset + key.length);
+ bytes.set(startOffset, key.bytes, key.offset, key.length);
+ startOffsets = BigArrays.grow(startOffsets, size + 2);
+ startOffsets.set(size + 1, startOffset + key.length);
+ hashes = BigArrays.grow(hashes, id + 1);
+ hashes.set(id, code);
+ }
+
+ private boolean assertConsistent(long id, int code) {
+ get(id, spare);
+ return rehash(spare.hashCode()) == code;
+ }
+
+ private void reset(int code, long id) {
+ assert assertConsistent(id, code);
+ final long slot = slot(code, mask);
+ for (long index = slot; ; index = nextSlot(index, mask)) {
+ final long curId = id(index);
+ if (curId == -1) { // means unset
+ id(index, id);
+ break;
+ }
+ }
+ }
+
+ /**
+ * Try to add <code>key</code>. Return its newly allocated id if it wasn't in the hash table yet, or </code>-1-id</code>
+ * if it was already present in the hash table.
+ */
+ public long add(BytesRef key, int code) {
+ if (size >= maxSize) {
+ assert size == maxSize;
+ grow();
+ }
+ assert size < maxSize;
+ return set(key, rehash(code), size);
+ }
+
+ /** Sugar to {@link #add(BytesRef, int) add(key, key.hashCode()}. */
+ public long add(BytesRef key) {
+ return add(key, key.hashCode());
+ }
+
+ @Override
+ protected void removeAndAdd(long index, long id) {
+ final int code = hashes.get(id);
+ reset(code, id);
+ }
+
+ @Override
+ public boolean release() {
+ boolean success = false;
+ try {
+ super.release();
+ success = true;
+ } finally {
+ Releasables.release(success, bytes, hashes, startOffsets);
+ }
+ return true;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java
new file mode 100644
index 0000000..638a98a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A base class for all the single bucket aggregations.
+ */
+public abstract class InternalSingleBucketAggregation extends InternalAggregation implements SingleBucketAggregation {
+
+ protected long docCount;
+ protected InternalAggregations aggregations;
+
+ protected InternalSingleBucketAggregation() {} // for serialization
+
+ /**
+ * Creates a single bucket aggregation.
+ *
+ * @param name The aggregation name.
+ * @param docCount The document count in the single bucket.
+ * @param aggregations The already built sub-aggregations that are associated with the bucket.
+ */
+ protected InternalSingleBucketAggregation(String name, long docCount, InternalAggregations aggregations) {
+ super(name);
+ this.docCount = docCount;
+ this.aggregations = aggregations;
+ }
+
+ @Override
+ public long getDocCount() {
+ return docCount;
+ }
+
+ @Override
+ public InternalAggregations getAggregations() {
+ return aggregations;
+ }
+
+ @Override
+ public InternalAggregation reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ InternalSingleBucketAggregation reduced = ((InternalSingleBucketAggregation) aggregations.get(0));
+ reduced.aggregations.reduce(reduceContext.cacheRecycler());
+ return reduced;
+ }
+ InternalSingleBucketAggregation reduced = null;
+ List<InternalAggregations> subAggregationsList = new ArrayList<InternalAggregations>(aggregations.size());
+ for (InternalAggregation aggregation : aggregations) {
+ if (reduced == null) {
+ reduced = (InternalSingleBucketAggregation) aggregation;
+ } else {
+ this.docCount += ((InternalSingleBucketAggregation) aggregation).docCount;
+ }
+ subAggregationsList.add(((InternalSingleBucketAggregation) aggregation).aggregations);
+ }
+ reduced.aggregations = InternalAggregations.reduce(subAggregationsList, reduceContext.cacheRecycler());
+ return reduced;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ docCount = in.readVLong();
+ aggregations = InternalAggregations.readAggregations(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVLong(docCount);
+ aggregations.writeTo(out);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.field(CommonFields.DOC_COUNT, docCount);
+ aggregations.toXContentInternal(builder, params);
+ return builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/LongHash.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/LongHash.java
new file mode 100644
index 0000000..d701b78
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/LongHash.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.hash.MurmurHash3;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.LongArray;
+
+/**
+ * Specialized hash table implementation similar to BytesRefHash that maps
+ * long values to ids. Collisions are resolved with open addressing and linear
+ * probing, growth is smooth thanks to {@link BigArrays} and capacity is always
+ * a multiple of 2 for faster identification of buckets.
+ */
+// IDs are internally stored as id + 1 so that 0 encodes for an empty slot
+public final class LongHash extends AbstractHash {
+
+ private LongArray keys;
+
+ // Constructor with configurable capacity and default maximum load factor.
+ public LongHash(long capacity, PageCacheRecycler recycler) {
+ this(capacity, DEFAULT_MAX_LOAD_FACTOR, recycler);
+ }
+
+ //Constructor with configurable capacity and load factor.
+ public LongHash(long capacity, float maxLoadFactor, PageCacheRecycler recycler) {
+ super(capacity, maxLoadFactor, recycler);
+ keys = BigArrays.newLongArray(capacity(), recycler, false);
+ }
+
+ private static long hash(long value) {
+ // Don't use the value directly. Under some cases eg dates, it could be that the low bits don't carry much value and we would like
+ // all bits of the hash to carry as much value
+ return MurmurHash3.hash(value);
+ }
+
+ /**
+ * Return the key at <code>0 &lte; index &lte; capacity()</code>. The result is undefined if the slot is unused.
+ */
+ public long key(long index) {
+ return keys.get(index);
+ }
+
+ /**
+ * Get the id associated with <code>key</code>
+ */
+ public long find(long key) {
+ final long slot = slot(hash(key), mask);
+ for (long index = slot; ; index = nextSlot(index, mask)) {
+ final long id = id(index);
+ if (id == -1 || keys.get(index) == key) {
+ return id;
+ }
+ }
+ }
+
+ private long set(long key, long id) {
+ assert size < maxSize;
+ final long slot = slot(hash(key), mask);
+ for (long index = slot; ; index = nextSlot(index, mask)) {
+ final long curId = id(index);
+ if (curId == -1) { // means unset
+ id(index, id);
+ keys.set(index, key);
+ ++size;
+ return id;
+ } else if (keys.get(index) == key) {
+ return -1 - curId;
+ }
+ }
+ }
+
+ private void reset(long key, long id) {
+ final long slot = slot(hash(key), mask);
+ for (long index = slot; ; index = nextSlot(index, mask)) {
+ final long curId = id(index);
+ if (curId == -1) { // means unset
+ id(index, id);
+ keys.set(index, key);
+ break;
+ } else {
+ assert keys.get(index) != key;
+ }
+ }
+ }
+
+ /**
+ * Try to add <code>key</code>. Return its newly allocated id if it wasn't in the hash table yet, or </code>-1-id</code>
+ * if it was already present in the hash table.
+ */
+ public long add(long key) {
+ if (size >= maxSize) {
+ assert size == maxSize;
+ grow();
+ }
+ assert size < maxSize;
+ return set(key, size);
+ }
+
+ @Override
+ protected void resizeKeys(long capacity) {
+ keys = BigArrays.resize(keys, capacity);
+ }
+
+ @Override
+ protected void removeAndAdd(long index, long id) {
+ final long key = keys.set(index, 0);
+ reset(key, id);
+ }
+
+ @Override
+ public boolean release() {
+ boolean success = false;
+ try {
+ super.release();
+ success = true;
+ } finally {
+ Releasables.release(success, keys);
+ }
+ return true;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java
new file mode 100644
index 0000000..4c7408c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.util.Comparators;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregation;
+
+import java.util.Collection;
+
+/**
+ * An aggregation that returns multiple buckets
+ */
+public interface MultiBucketsAggregation extends Aggregation {
+
+
+ /**
+ * A bucket represents a criteria to which all documents that fall in it adhere to. It is also uniquely identified
+ * by a key, and can potentially hold sub-aggregations computed over all documents in it.
+ */
+ public interface Bucket {
+
+ /**
+ * @return The key associated with the bucket as a string
+ */
+ String getKey();
+
+ /**
+ * @return The key associated with the bucket as text (ideal for further streaming this instance)
+ */
+ Text getKeyAsText();
+
+ /**
+ * @return The number of documents that fall within this bucket
+ */
+ long getDocCount();
+
+ /**
+ * @return The sub-aggregations of this bucket
+ */
+ Aggregations getAggregations();
+
+ static class SubAggregationComparator<B extends Bucket> implements java.util.Comparator<B> {
+
+ private final String aggName;
+ private final String valueName;
+ private final boolean asc;
+
+ public SubAggregationComparator(String expression, boolean asc) {
+ this.asc = asc;
+ int i = expression.indexOf('.');
+ if (i < 0) {
+ this.aggName = expression;
+ this.valueName = null;
+ } else {
+ this.aggName = expression.substring(0, i);
+ this.valueName = expression.substring(i+1);
+ }
+ }
+
+ public SubAggregationComparator(String aggName, String valueName, boolean asc) {
+ this.aggName = aggName;
+ this.valueName = valueName;
+ this.asc = asc;
+ }
+
+ public boolean asc() {
+ return asc;
+ }
+
+ public String aggName() {
+ return aggName;
+ }
+
+ public String valueName() {
+ return valueName;
+ }
+
+ @Override
+ public int compare(B b1, B b2) {
+ double v1 = value(b1);
+ double v2 = value(b2);
+ return Comparators.compareDiscardNaN(v1, v2, asc);
+ }
+
+ private double value(B bucket) {
+ MetricsAggregation aggregation = bucket.getAggregations().get(aggName);
+ if (aggregation == null) {
+ throw new ElasticsearchIllegalArgumentException("Unknown aggregation named [" + aggName + "]");
+ }
+ if (aggregation instanceof MetricsAggregation.SingleValue) {
+ //TODO should we throw an exception if the value name is specified?
+ return ((MetricsAggregation.SingleValue) aggregation).value();
+ }
+ if (aggregation instanceof MetricsAggregation.MultiValue) {
+ if (valueName == null) {
+ throw new ElasticsearchIllegalArgumentException("Cannot sort on multi valued aggregation [" + aggName + "]. A value name is required");
+ }
+ return ((MetricsAggregation.MultiValue) aggregation).value(valueName);
+ }
+
+ throw new ElasticsearchIllegalArgumentException("A mal attempt to sort terms by aggregation [" + aggregation.getName() +
+ "]. Terms can only be ordered by either standard order or direct calc aggregators of the terms");
+ }
+ }
+ }
+
+ /**
+ * @return The buckets of this aggregation.
+ */
+ Collection<? extends Bucket> getBuckets();
+
+ /**
+ * The bucket that is associated with the given key.
+ *
+ * @param key The key of the requested bucket.
+ * @return The bucket
+ */
+ <B extends Bucket> B getBucketByKey(String key);
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregation.java
new file mode 100644
index 0000000..de02387
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregation.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.Aggregations;
+
+/**
+ * A single bucket aggregation
+ */
+public interface SingleBucketAggregation extends Aggregation {
+
+ /**
+ * @return The number of documents in this bucket
+ */
+ long getDocCount();
+
+ /**
+ * @return The sub-aggregations of this bucket
+ */
+ Aggregations getAggregations();
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java
new file mode 100644
index 0000000..1481c53
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+
+/**
+ * A bucket aggregator that doesn't create new buckets.
+ */
+public abstract class SingleBucketAggregator extends BucketsAggregator {
+
+ protected SingleBucketAggregator(String name, AggregatorFactories factories,
+ AggregationContext aggregationContext, Aggregator parent) {
+ super(name, BucketAggregationMode.MULTI_BUCKETS, factories, parent == null ? 1 : parent.estimatedBucketCount(), aggregationContext, parent);
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return true;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/Filter.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/Filter.java
new file mode 100644
index 0000000..5003aa1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/Filter.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.filter;
+
+import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregation;
+
+/**
+ * A {@code filter} aggregation. Defines a single bucket that holds all documents that match a specific filter.
+ */
+public interface Filter extends SingleBucketAggregation {
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java
new file mode 100644
index 0000000..af6f18b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.filter;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FilterAggregationBuilder extends AggregationBuilder<FilterAggregationBuilder> {
+
+ private FilterBuilder filter;
+
+ public FilterAggregationBuilder(String name) {
+ super(name, InternalFilter.TYPE.name());
+ }
+
+ public FilterAggregationBuilder filter(FilterBuilder filter) {
+ this.filter = filter;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+ if (filter == null) {
+ throw new SearchSourceBuilderException("filter must be set on filter aggregation [" + name + "]");
+ }
+ filter.toXContent(builder, params);
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java
new file mode 100644
index 0000000..e016a98
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.filter;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.ReaderContextAware;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+
+import java.io.IOException;
+
+/**
+ * Aggregate all docs that match a filter.
+ */
+public class FilterAggregator extends SingleBucketAggregator implements ReaderContextAware {
+
+ private final Filter filter;
+
+ private Bits bits;
+
+ public FilterAggregator(String name,
+ org.apache.lucene.search.Filter filter,
+ AggregatorFactories factories,
+ AggregationContext aggregationContext,
+ Aggregator parent) {
+ super(name, factories, aggregationContext, parent);
+ this.filter = filter;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ try {
+ bits = DocIdSets.toSafeBits(reader.reader(), filter.getDocIdSet(reader, reader.reader().getLiveDocs()));
+ } catch (IOException ioe) {
+ throw new AggregationExecutionException("Failed to aggregate filter aggregator [" + name + "]", ioe);
+ }
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ if (bits.get(doc)) {
+ collectBucket(doc, owningBucketOrdinal);
+ }
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ return new InternalFilter(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalFilter(name, 0, buildEmptySubAggregations());
+ }
+
+ public static class Factory extends AggregatorFactory {
+
+ private org.apache.lucene.search.Filter filter;
+
+ public Factory(String name, Filter filter) {
+ super(name, InternalFilter.TYPE.name());
+ this.filter = filter;
+ }
+
+ @Override
+ public Aggregator create(AggregationContext context, Aggregator parent, long expectedBucketsCount) {
+ FilterAggregator aggregator = new FilterAggregator(name, filter, factories, context, parent);
+ context.registerReaderContextAware(aggregator);
+ return aggregator;
+ }
+
+ }
+}
+
+
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterParser.java
new file mode 100644
index 0000000..9c5efed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterParser.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.filter;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FilterParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalFilter.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+ ParsedFilter filter = context.queryParserService().parseInnerFilter(parser);
+ return new FilterAggregator.Factory(aggregationName, filter.filter());
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java
new file mode 100644
index 0000000..a5db72a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.filter;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation;
+
+import java.io.IOException;
+
+/**
+*
+*/
+public class InternalFilter extends InternalSingleBucketAggregation implements Filter {
+
+ public final static Type TYPE = new Type("filter");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalFilter readResult(StreamInput in) throws IOException {
+ InternalFilter result = new InternalFilter();
+ result.readFrom(in);
+ return result;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ InternalFilter() {} // for serialization
+
+ InternalFilter(String name, long docCount, InternalAggregations subAggregations) {
+ super(name, docCount, subAggregations);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGrid.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGrid.java
new file mode 100644
index 0000000..c73d24e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGrid.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.geogrid;
+
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
+
+import java.util.Collection;
+
+/**
+ * A {@code geohash_grid} aggregation. Defines multiple buckets, each representing a cell in a geo-grid of a specific
+ * precision.
+ */
+public interface GeoHashGrid extends MultiBucketsAggregation {
+
+ /**
+ * A bucket that is associated with a {@code geohash_grid} cell. The key of the bucket is the {@cod geohash} of the cell
+ */
+ public static interface Bucket extends MultiBucketsAggregation.Bucket {
+
+ /**
+ * @return The geohash of the cell as a geo point
+ */
+ GeoPoint getKeyAsGeoPoint();
+
+ /**
+ * @return A numeric representation of the geohash of the cell
+ */
+ Number getKeyAsNumber();
+
+ }
+
+ /**
+ * @return The buckets of this aggregation (each bucket representing a geohash grid cell)
+ */
+ @Override
+ Collection<Bucket> getBuckets();
+
+ @Override
+ Bucket getBucketByKey(String key);
+
+ Bucket getBucketByKey(Number key);
+
+ Bucket getBucketByKey(GeoPoint key);
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java
new file mode 100644
index 0000000..25889bc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.geogrid;
+
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
+import org.elasticsearch.search.aggregations.bucket.LongHash;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+
+/**
+ * Aggregates data expressed as GeoHash longs (for efficiency's sake) but formats results as Geohash strings.
+ *
+ */
+
+public class GeoHashGridAggregator extends BucketsAggregator {
+
+ private static final int INITIAL_CAPACITY = 50; // TODO sizing
+
+ private final int requiredSize;
+ private final int shardSize;
+ private final NumericValuesSource valuesSource;
+ private final LongHash bucketOrds;
+
+ public GeoHashGridAggregator(String name, AggregatorFactories factories, NumericValuesSource valuesSource,
+ int requiredSize, int shardSize, AggregationContext aggregationContext, Aggregator parent) {
+ super(name, BucketAggregationMode.PER_BUCKET, factories, INITIAL_CAPACITY, aggregationContext, parent);
+ this.valuesSource = valuesSource;
+ this.requiredSize = requiredSize;
+ this.shardSize = shardSize;
+ bucketOrds = new LongHash(INITIAL_CAPACITY,aggregationContext.pageCacheRecycler());
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return true;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert owningBucketOrdinal == 0;
+ final LongValues values = valuesSource.longValues();
+ final int valuesCount = values.setDocument(doc);
+
+ for (int i = 0; i < valuesCount; ++i) {
+ final long val = values.nextValue();
+ long bucketOrdinal = bucketOrds.add(val);
+ if (bucketOrdinal < 0) { // already seen
+ bucketOrdinal = - 1 - bucketOrdinal;
+ }
+ collectBucket(doc, bucketOrdinal);
+ }
+ }
+
+ // private impl that stores a bucket ord. This allows for computing the aggregations lazily.
+ static class OrdinalBucket extends InternalGeoHashGrid.Bucket {
+
+ long bucketOrd;
+
+ public OrdinalBucket() {
+ super(0, 0, (InternalAggregations) null);
+ }
+
+ }
+
+ @Override
+ public InternalGeoHashGrid buildAggregation(long owningBucketOrdinal) {
+ assert owningBucketOrdinal == 0;
+ final int size = (int) Math.min(bucketOrds.size(), shardSize);
+
+ InternalGeoHashGrid.BucketPriorityQueue ordered = new InternalGeoHashGrid.BucketPriorityQueue(size);
+ OrdinalBucket spare = null;
+ for (long i = 0; i < bucketOrds.capacity(); ++i) {
+ final long ord = bucketOrds.id(i);
+ if (ord < 0) {
+ // slot is not allocated
+ continue;
+ }
+
+ if (spare == null) {
+ spare = new OrdinalBucket();
+ }
+ spare.geohashAsLong = bucketOrds.key(i);
+ spare.docCount = bucketDocCount(ord);
+ spare.bucketOrd = ord;
+ spare = (OrdinalBucket) ordered.insertWithOverflow(spare);
+ }
+
+ final InternalGeoHashGrid.Bucket[] list = new InternalGeoHashGrid.Bucket[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; --i) {
+ final OrdinalBucket bucket = (OrdinalBucket) ordered.pop();
+ bucket.aggregations = bucketAggregations(bucket.bucketOrd);
+ list[i] = bucket;
+ }
+ return new InternalGeoHashGrid(name, requiredSize, Arrays.asList(list));
+ }
+
+ @Override
+ public InternalGeoHashGrid buildEmptyAggregation() {
+ return new InternalGeoHashGrid(name, requiredSize, Collections.<InternalGeoHashGrid.Bucket>emptyList());
+ }
+
+
+ @Override
+ public void doRelease() {
+ Releasables.release(bucketOrds);
+ }
+
+ public static class Unmapped extends Aggregator {
+ private int requiredSize;
+ public Unmapped(String name, int requiredSize, AggregationContext aggregationContext, Aggregator parent) {
+
+ super(name, BucketAggregationMode.PER_BUCKET, AggregatorFactories.EMPTY, 0, aggregationContext, parent);
+ this.requiredSize=requiredSize;
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return false;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ }
+
+ @Override
+ public InternalGeoHashGrid buildAggregation(long owningBucketOrdinal) {
+ return (InternalGeoHashGrid) buildEmptyAggregation();
+ }
+
+ @Override
+ public InternalGeoHashGrid buildEmptyAggregation() {
+ return new InternalGeoHashGrid(name, requiredSize, Collections.<InternalGeoHashGrid.Bucket>emptyList());
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java
new file mode 100644
index 0000000..dd79baf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.geogrid;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+
+import java.io.IOException;
+
+/**
+ * Creates an aggregation based on bucketing points into GeoHashes
+ */
+public class GeoHashGridBuilder extends AggregationBuilder<GeoHashGridBuilder> {
+
+
+ private String field;
+ private int precision = GeoHashGridParser.DEFAULT_PRECISION;
+ private int requiredSize = GeoHashGridParser.DEFAULT_MAX_NUM_CELLS;
+ private int shardSize = 0;
+
+ public GeoHashGridBuilder(String name) {
+ super(name, InternalGeoHashGrid.TYPE.name());
+ }
+
+ public GeoHashGridBuilder field(String field) {
+ this.field = field;
+ return this;
+ }
+
+ public GeoHashGridBuilder precision(int precision) {
+ if ((precision < 1) || (precision > 12)) {
+ throw new ElasticsearchIllegalArgumentException("Invalid geohash aggregation precision of " + precision
+ + "must be between 1 and 12");
+ }
+ this.precision = precision;
+ return this;
+ }
+
+ public GeoHashGridBuilder size(int requiredSize) {
+ this.requiredSize = requiredSize;
+ return this;
+ }
+
+ public GeoHashGridBuilder shardSize(int shardSize) {
+ this.shardSize = shardSize;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (field != null) {
+ builder.field("field", field);
+ }
+ if (precision != GeoHashGridParser.DEFAULT_PRECISION) {
+ builder.field("precision", precision);
+ }
+ if (requiredSize != GeoHashGridParser.DEFAULT_MAX_NUM_CELLS) {
+ builder.field("size", requiredSize);
+ }
+ if (shardSize != 0) {
+ builder.field("shard_size", shardSize);
+ }
+
+ return builder.endObject();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java
new file mode 100644
index 0000000..e870444
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.geogrid;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.query.GeoBoundingBoxFilterBuilder;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.bucket.BucketUtils;
+import org.elasticsearch.search.aggregations.support.*;
+import org.elasticsearch.search.aggregations.support.geopoints.GeoPointValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ * Aggregates Geo information into cells determined by geohashes of a given precision.
+ * WARNING - for high-precision geohashes it may prove necessary to use a {@link GeoBoundingBoxFilterBuilder}
+ * aggregation to focus in on a smaller area to avoid generating too many buckets and using too much RAM
+ */
+public class GeoHashGridParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalGeoHashGrid.TYPE.name();
+ }
+
+ public static final int DEFAULT_PRECISION = 5;
+ public static final int DEFAULT_MAX_NUM_CELLS = 10000;
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ String field = null;
+ int precision = DEFAULT_PRECISION;
+ int requiredSize = DEFAULT_MAX_NUM_CELLS;
+ int shardSize = 0;
+
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ }
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("precision".equals(currentFieldName)) {
+ precision = parser.intValue();
+ } else if ("size".equals(currentFieldName)) {
+ requiredSize = parser.intValue();
+ } else if ("shard_size".equals(currentFieldName) || "shardSize".equals(currentFieldName)) {
+ shardSize = parser.intValue();
+ }
+
+ }
+ }
+ if (shardSize == 0) {
+ //Use default heuristic to avoid any wrong-ranking caused by distributed counting
+ shardSize = BucketUtils.suggestShardSideQueueSize(requiredSize, context.numberOfShards());
+ }
+
+ ValuesSourceConfig<GeoPointValuesSource> config = new ValuesSourceConfig<GeoPointValuesSource>(GeoPointValuesSource.class);
+ if (field == null) {
+ return new GeoGridFactory(aggregationName, config, precision, requiredSize, shardSize);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return new GeoGridFactory(aggregationName, config, precision, requiredSize, shardSize);
+ }
+
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+ config.fieldContext(new FieldContext(field, indexFieldData));
+ return new GeoGridFactory(aggregationName, config, precision, requiredSize, shardSize);
+ }
+
+
+ private static class GeoGridFactory extends ValueSourceAggregatorFactory<GeoPointValuesSource> {
+
+ private int precision;
+ private int requiredSize;
+ private int shardSize;
+
+ public GeoGridFactory(String name, ValuesSourceConfig<GeoPointValuesSource> valueSourceConfig,
+ int precision, int requiredSize, int shardSize) {
+ super(name, InternalGeoHashGrid.TYPE.name(), valueSourceConfig);
+ this.precision = precision;
+ this.requiredSize = requiredSize;
+ this.shardSize = shardSize;
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new GeoHashGridAggregator.Unmapped(name, requiredSize, aggregationContext, parent);
+ }
+
+ @Override
+ protected Aggregator create(final GeoPointValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ final CellValues cellIdValues = new CellValues(valuesSource, precision);
+ FieldDataSource.Numeric cellIdSource = new CellIdSource(cellIdValues, valuesSource.metaData());
+ if (cellIdSource.metaData().multiValued()) {
+ // we need to wrap to ensure uniqueness
+ cellIdSource = new FieldDataSource.Numeric.SortedAndUnique(cellIdSource);
+ }
+ final NumericValuesSource geohashIdSource = new NumericValuesSource(cellIdSource, null, null);
+ return new GeoHashGridAggregator(name, factories, geohashIdSource, requiredSize,
+ shardSize, aggregationContext, parent);
+
+ }
+
+ private static class CellValues extends LongValues {
+
+ private GeoPointValuesSource geoPointValues;
+ private GeoPointValues geoValues;
+ private int precision;
+
+ protected CellValues(GeoPointValuesSource geoPointValues, int precision) {
+ super(true);
+ this.geoPointValues = geoPointValues;
+ this.precision = precision;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ geoValues = geoPointValues.values();
+ return geoValues.setDocument(docId);
+ }
+
+ @Override
+ public long nextValue() {
+ GeoPoint target = geoValues.nextValue();
+ return GeoHashUtils.encodeAsLong(target.getLat(), target.getLon(), precision);
+ }
+
+ }
+
+ private static class CellIdSource extends FieldDataSource.Numeric {
+ private final LongValues values;
+ private MetaData metaData;
+
+ public CellIdSource(LongValues values, MetaData delegate) {
+ this.values = values;
+ //different GeoPoints could map to the same or different geohash cells.
+ this.metaData = MetaData.builder(delegate).uniqueness(MetaData.Uniqueness.UNKNOWN).build();
+ }
+
+ @Override
+ public boolean isFloatingPoint() {
+ return false;
+ }
+
+ @Override
+ public LongValues longValues() {
+ return values;
+ }
+
+ @Override
+ public DoubleValues doubleValues() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesValues bytesValues() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public MetaData metaData() {
+ return metaData;
+ }
+
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java
new file mode 100644
index 0000000..25c621f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.geogrid;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import org.apache.lucene.util.PriorityQueue;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * Represents a grid of cells where each cell's location is determined by a geohash.
+ * All geohashes in a grid are of the same precision and held internally as a single long
+ * for efficiency's sake.
+ */
+public class InternalGeoHashGrid extends InternalAggregation implements GeoHashGrid {
+
+ public static final Type TYPE = new Type("geohash_grid", "ghcells");
+
+ public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalGeoHashGrid readResult(StreamInput in) throws IOException {
+ InternalGeoHashGrid buckets = new InternalGeoHashGrid();
+ buckets.readFrom(in);
+ return buckets;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+
+ static class Bucket implements GeoHashGrid.Bucket, Comparable<Bucket> {
+
+ protected long geohashAsLong;
+ protected long docCount;
+ protected InternalAggregations aggregations;
+
+ public Bucket(long geohashAsLong, long docCount, InternalAggregations aggregations) {
+ this.docCount = docCount;
+ this.aggregations = aggregations;
+ this.geohashAsLong = geohashAsLong;
+ }
+
+ public String getKey() {
+ return GeoHashUtils.toString(geohashAsLong);
+ }
+
+ @Override
+ public Text getKeyAsText() {
+ return new StringText(getKey());
+ }
+
+ public GeoPoint getKeyAsGeoPoint() {
+ return GeoHashUtils.decode(geohashAsLong);
+ }
+
+ @Override
+ public long getDocCount() {
+ return docCount;
+ }
+
+ @Override
+ public Aggregations getAggregations() {
+ return aggregations;
+ }
+
+ @Override
+ public int compareTo(Bucket other) {
+ if (this.geohashAsLong > other.geohashAsLong) {
+ return 1;
+ }
+ if (this.geohashAsLong < other.geohashAsLong) {
+ return -1;
+ }
+ return 0;
+ }
+
+ public Bucket reduce(List<? extends Bucket> buckets, CacheRecycler cacheRecycler) {
+ if (buckets.size() == 1) {
+ // we still need to reduce the sub aggs
+ Bucket bucket = buckets.get(0);
+ bucket.aggregations.reduce(cacheRecycler);
+ return bucket;
+ }
+ Bucket reduced = null;
+ List<InternalAggregations> aggregationsList = new ArrayList<InternalAggregations>(buckets.size());
+ for (Bucket bucket : buckets) {
+ if (reduced == null) {
+ reduced = bucket;
+ } else {
+ reduced.docCount += bucket.docCount;
+ }
+ aggregationsList.add(bucket.aggregations);
+ }
+ reduced.aggregations = InternalAggregations.reduce(aggregationsList, cacheRecycler);
+ return reduced;
+ }
+
+ @Override
+ public Number getKeyAsNumber() {
+ return geohashAsLong;
+ }
+
+ }
+
+ private int requiredSize;
+ private Collection<Bucket> buckets;
+ protected Map<String, Bucket> bucketMap;
+
+ InternalGeoHashGrid() {
+ } // for serialization
+
+ public InternalGeoHashGrid(String name, int requiredSize, Collection<Bucket> buckets) {
+ super(name);
+ this.requiredSize = requiredSize;
+ this.buckets = buckets;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public Collection<GeoHashGrid.Bucket> getBuckets() {
+ Object o = buckets;
+ return (Collection<GeoHashGrid.Bucket>) o;
+ }
+
+ @Override
+ public GeoHashGrid.Bucket getBucketByKey(String geohash) {
+ if (bucketMap == null) {
+ bucketMap = new HashMap<String, Bucket>(buckets.size());
+ for (Bucket bucket : buckets) {
+ bucketMap.put(bucket.getKey(), bucket);
+ }
+ }
+ return bucketMap.get(geohash);
+ }
+
+ @Override
+ public GeoHashGrid.Bucket getBucketByKey(Number key) {
+ return getBucketByKey(GeoHashUtils.toString(key.longValue()));
+ }
+
+ @Override
+ public GeoHashGrid.Bucket getBucketByKey(GeoPoint key) {
+ return getBucketByKey(key.geohash());
+ }
+
+ @Override
+ public InternalGeoHashGrid reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ InternalGeoHashGrid grid = (InternalGeoHashGrid) aggregations.get(0);
+ grid.reduceAndTrimBuckets(reduceContext.cacheRecycler());
+ return grid;
+ }
+ InternalGeoHashGrid reduced = null;
+
+ Recycler.V<LongObjectOpenHashMap<List<Bucket>>> buckets = null;
+ for (InternalAggregation aggregation : aggregations) {
+ InternalGeoHashGrid grid = (InternalGeoHashGrid) aggregation;
+ if (reduced == null) {
+ reduced = grid;
+ }
+ if (buckets == null) {
+ buckets = reduceContext.cacheRecycler().longObjectMap(grid.buckets.size());
+ }
+ for (Bucket bucket : grid.buckets) {
+ List<Bucket> existingBuckets = buckets.v().get(bucket.geohashAsLong);
+ if (existingBuckets == null) {
+ existingBuckets = new ArrayList<Bucket>(aggregations.size());
+ buckets.v().put(bucket.geohashAsLong, existingBuckets);
+ }
+ existingBuckets.add(bucket);
+ }
+ }
+
+ if (reduced == null) {
+ // there are only unmapped terms, so we just return the first one (no need to reduce)
+ return (InternalGeoHashGrid) aggregations.get(0);
+ }
+
+ // TODO: would it be better to sort the backing array buffer of the hppc map directly instead of using a PQ?
+ final int size = Math.min(requiredSize, buckets.v().size());
+ BucketPriorityQueue ordered = new BucketPriorityQueue(size);
+ Object[] internalBuckets = buckets.v().values;
+ boolean[] states = buckets.v().allocated;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ List<Bucket> sameCellBuckets = (List<Bucket>) internalBuckets[i];
+ ordered.insertWithOverflow(sameCellBuckets.get(0).reduce(sameCellBuckets, reduceContext.cacheRecycler()));
+ }
+ }
+ buckets.release();
+ Bucket[] list = new Bucket[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; i--) {
+ list[i] = ordered.pop();
+ }
+ reduced.buckets = Arrays.asList(list);
+ return reduced;
+ }
+
+ protected void reduceAndTrimBuckets(CacheRecycler cacheRecycler) {
+
+ if (requiredSize > buckets.size()) { // nothing to trim
+ for (Bucket bucket : buckets) {
+ bucket.aggregations.reduce(cacheRecycler);
+ }
+ return;
+ }
+
+ List<Bucket> trimmedBuckets = new ArrayList<Bucket>(requiredSize);
+ for (Bucket bucket : buckets) {
+ if (trimmedBuckets.size() >= requiredSize) {
+ break;
+ }
+ bucket.aggregations.reduce(cacheRecycler);
+ trimmedBuckets.add(bucket);
+ }
+ buckets = trimmedBuckets;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ this.name = in.readString();
+ this.requiredSize = in.readVInt();
+ int size = in.readVInt();
+ List<Bucket> buckets = new ArrayList<Bucket>(size);
+ for (int i = 0; i < size; i++) {
+ buckets.add(new Bucket(in.readLong(), in.readVLong(), InternalAggregations.readAggregations(in)));
+ }
+ this.buckets = buckets;
+ this.bucketMap = null;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVInt(requiredSize);
+ out.writeVInt(buckets.size());
+ for (Bucket bucket : buckets) {
+ out.writeLong(bucket.geohashAsLong);
+ out.writeVLong(bucket.getDocCount());
+ ((InternalAggregations) bucket.getAggregations()).writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.startArray(CommonFields.BUCKETS);
+ for (Bucket bucket : buckets) {
+ builder.startObject();
+ builder.field(CommonFields.KEY, bucket.getKeyAsText());
+ builder.field(CommonFields.DOC_COUNT, bucket.getDocCount());
+ ((InternalAggregations) bucket.getAggregations()).toXContentInternal(builder, params);
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ static class BucketPriorityQueue extends PriorityQueue<Bucket> {
+
+ public BucketPriorityQueue(int size) {
+ super(size);
+ }
+
+ @Override
+ protected boolean lessThan(Bucket o1, Bucket o2) {
+ long i = o2.getDocCount() - o1.getDocCount();
+ if (i == 0) {
+ i = o2.compareTo(o1);
+ if (i == 0) {
+ i = System.identityHashCode(o2) - System.identityHashCode(o1);
+ }
+ }
+ return i > 0;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/Global.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/Global.java
new file mode 100644
index 0000000..8993c10
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/Global.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.global;
+
+import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregation;
+
+/**
+ * A {@code global} aggregation. Defines a single bucket the holds all the documents in the search context.
+ */
+public interface Global extends SingleBucketAggregation {
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java
new file mode 100644
index 0000000..e50aed3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.global;
+
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class GlobalAggregator extends SingleBucketAggregator {
+
+ public GlobalAggregator(String name, AggregatorFactories subFactories, AggregationContext aggregationContext) {
+ super(name, subFactories, aggregationContext, null);
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert owningBucketOrdinal == 0 : "global aggregator can only be a top level aggregator";
+ collectBucket(doc, owningBucketOrdinal);
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ assert owningBucketOrdinal == 0 : "global aggregator can only be a top level aggregator";
+ return new InternalGlobal(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ throw new UnsupportedOperationException("global aggregations cannot serve as sub-aggregations, hence should never be called on #buildEmptyAggregations");
+ }
+
+ public static class Factory extends AggregatorFactory {
+
+ public Factory(String name) {
+ super(name, InternalGlobal.TYPE.name());
+ }
+
+ @Override
+ public Aggregator create(AggregationContext context, Aggregator parent, long expectedBucketsCount) {
+ if (parent != null) {
+ throw new AggregationExecutionException("Aggregation [" + parent.name() + "] cannot have a global " +
+ "sub-aggregation [" + name + "]. Global aggregations can only be defined as top level aggregations");
+ }
+ return new GlobalAggregator(name, factories, context);
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalBuilder.java
new file mode 100644
index 0000000..0e7cb2b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalBuilder.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.global;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class GlobalBuilder extends AggregationBuilder<GlobalBuilder> {
+
+ public GlobalBuilder(String name) {
+ super(name, InternalGlobal.TYPE.name());
+ }
+
+ @Override
+ protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder.startObject().endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalParser.java
new file mode 100644
index 0000000..c70cf0f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalParser.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.global;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class GlobalParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalGlobal.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+ parser.nextToken();
+ return new GlobalAggregator.Factory(aggregationName);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java
new file mode 100644
index 0000000..b71e922
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.global;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation;
+
+import java.io.IOException;
+
+/**
+ * A global scope get (the document set on which we aggregate is all documents in the search context (ie. index + type)
+ * regardless the query.
+ */
+public class InternalGlobal extends InternalSingleBucketAggregation implements Global {
+
+ public final static Type TYPE = new Type("global");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalGlobal readResult(StreamInput in) throws IOException {
+ InternalGlobal result = new InternalGlobal();
+ result.readFrom(in);
+ return result;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ InternalGlobal() {} // for serialization
+
+ InternalGlobal(String name, long docCount, InternalAggregations aggregations) {
+ super(name, docCount, aggregations);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogram.java
new file mode 100644
index 0000000..0eb9693
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogram.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import org.joda.time.DateTime;
+
+import java.util.Collection;
+
+/**
+ * A {@code date_histogram} aggregation.
+ */
+public interface DateHistogram extends Histogram {
+
+ static interface Bucket extends Histogram.Bucket {
+
+ /**
+ * @return the key as a date construct (in UTC timezone).
+ */
+ DateTime getKeyAsDate();
+
+ }
+
+ @Override
+ Collection<? extends DateHistogram.Bucket> getBuckets();
+
+ @Override
+ Bucket getBucketByKey(String key);
+
+ @Override
+ Bucket getBucketByKey(Number key);
+
+ Bucket getBucketByKey(DateTime key);
+
+ /**
+ * The interval the date histogram is based on.
+ */
+ static class Interval {
+
+ public static final Interval SECOND = new Interval("1s");
+ public static final Interval MINUTE = new Interval("1m");
+ public static final Interval HOUR = new Interval("1h");
+ public static final Interval DAY = new Interval("1d");
+ public static final Interval WEEK = new Interval("1w");
+ public static final Interval MONTH = new Interval("1M");
+ public static final Interval QUARTER = new Interval("1q");
+ public static final Interval YEAR = new Interval("1y");
+
+ public static Interval seconds(int sec) {
+ return new Interval(sec + "s");
+ }
+
+ public static Interval minutes(int min) {
+ return new Interval(min + "m");
+ }
+
+ public static Interval hours(int hours) {
+ return new Interval(hours + "h");
+ }
+
+ public static Interval days(int days) {
+ return new Interval(days + "d");
+ }
+
+ public static Interval weeks(int weeks) {
+ return new Interval(weeks + "w");
+ }
+
+ private final String expression;
+
+ public Interval(String expression) {
+ this.expression = expression;
+ }
+
+ @Override
+ public String toString() {
+ return expression;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramBuilder.java
new file mode 100644
index 0000000..7ea4581
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramBuilder.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.ValuesSourceAggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class DateHistogramBuilder extends ValuesSourceAggregationBuilder<DateHistogramBuilder> {
+
+ private Object interval;
+ private Histogram.Order order;
+ private Long minDocCount;
+ private String preZone;
+ private String postZone;
+ private boolean preZoneAdjustLargeInterval;
+ private String format;
+ long preOffset = 0;
+ long postOffset = 0;
+ float factor = 1.0f;
+
+ public DateHistogramBuilder(String name) {
+ super(name, InternalDateHistogram.TYPE.name());
+ }
+
+ public DateHistogramBuilder interval(long interval) {
+ this.interval = interval;
+ return this;
+ }
+
+ public DateHistogramBuilder interval(DateHistogram.Interval interval) {
+ this.interval = interval;
+ return this;
+ }
+
+ public DateHistogramBuilder order(DateHistogram.Order order) {
+ this.order = order;
+ return this;
+ }
+
+ public DateHistogramBuilder minDocCount(long minDocCount) {
+ this.minDocCount = minDocCount;
+ return this;
+ }
+
+ public DateHistogramBuilder preZone(String preZone) {
+ this.preZone = preZone;
+ return this;
+ }
+
+ public DateHistogramBuilder postZone(String postZone) {
+ this.postZone = postZone;
+ return this;
+ }
+
+ public DateHistogramBuilder preZoneAdjustLargeInterval(boolean preZoneAdjustLargeInterval) {
+ this.preZoneAdjustLargeInterval = preZoneAdjustLargeInterval;
+ return this;
+ }
+
+ public DateHistogramBuilder preOffset(long preOffset) {
+ this.preOffset = preOffset;
+ return this;
+ }
+
+ public DateHistogramBuilder postOffset(long postOffset) {
+ this.postOffset = postOffset;
+ return this;
+ }
+
+ public DateHistogramBuilder factor(float factor) {
+ this.factor = factor;
+ return this;
+ }
+
+ public DateHistogramBuilder format(String format) {
+ this.format = format;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder doInternalXContent(XContentBuilder builder, Params params) throws IOException {
+ if (interval == null) {
+ throw new SearchSourceBuilderException("[interval] must be defined for histogram aggregation [" + name + "]");
+ }
+ if (interval instanceof Number) {
+ interval = TimeValue.timeValueMillis(((Number) interval).longValue()).toString();
+ }
+ builder.field("interval", interval);
+
+ if (minDocCount != null) {
+ builder.field("min_doc_count", minDocCount);
+ }
+
+ if (order != null) {
+ builder.field("order");
+ order.toXContent(builder, params);
+ }
+
+ if (preZone != null) {
+ builder.field("pre_zone", preZone);
+ }
+
+ if (postZone != null) {
+ builder.field("post_zone", postZone);
+ }
+
+ if (preZoneAdjustLargeInterval) {
+ builder.field("pre_zone_adjust_large_interval", true);
+ }
+
+ if (preOffset != 0) {
+ builder.field("pre_offset", preOffset);
+ }
+
+ if (postOffset != 0) {
+ builder.field("post_offset", postOffset);
+ }
+
+ if (factor != 1.0f) {
+ builder.field("factor", factor);
+ }
+
+ if (format != null) {
+ builder.field("format", format);
+ }
+
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java
new file mode 100644
index 0000000..428ba72
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.joda.DateMathParser;
+import org.elasticsearch.common.rounding.DateTimeUnit;
+import org.elasticsearch.common.rounding.TimeZoneRounding;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.support.FieldContext;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueParser;
+import org.elasticsearch.search.internal.SearchContext;
+import org.joda.time.DateTimeZone;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class DateHistogramParser implements Aggregator.Parser {
+
+ private final ImmutableMap<String, DateTimeUnit> dateFieldUnits;
+
+ public DateHistogramParser() {
+ dateFieldUnits = MapBuilder.<String, DateTimeUnit>newMapBuilder()
+ .put("year", DateTimeUnit.YEAR_OF_CENTURY)
+ .put("1y", DateTimeUnit.YEAR_OF_CENTURY)
+ .put("quarter", DateTimeUnit.QUARTER)
+ .put("1q", DateTimeUnit.QUARTER)
+ .put("month", DateTimeUnit.MONTH_OF_YEAR)
+ .put("1M", DateTimeUnit.MONTH_OF_YEAR)
+ .put("week", DateTimeUnit.WEEK_OF_WEEKYEAR)
+ .put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR)
+ .put("day", DateTimeUnit.DAY_OF_MONTH)
+ .put("1d", DateTimeUnit.DAY_OF_MONTH)
+ .put("hour", DateTimeUnit.HOUR_OF_DAY)
+ .put("1h", DateTimeUnit.HOUR_OF_DAY)
+ .put("minute", DateTimeUnit.MINUTES_OF_HOUR)
+ .put("1m", DateTimeUnit.MINUTES_OF_HOUR)
+ .put("second", DateTimeUnit.SECOND_OF_MINUTE)
+ .put("1s", DateTimeUnit.SECOND_OF_MINUTE)
+ .immutableMap();
+ }
+
+ @Override
+ public String type() {
+ return InternalDateHistogram.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
+
+ String field = null;
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> scriptParams = null;
+ boolean keyed = false;
+ long minDocCount = 1;
+ InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC;
+ String interval = null;
+ boolean preZoneAdjustLargeInterval = false;
+ DateTimeZone preZone = DateTimeZone.UTC;
+ DateTimeZone postZone = DateTimeZone.UTC;
+ String format = null;
+ long preOffset = 0;
+ long postOffset = 0;
+ boolean assumeSorted = false;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("time_zone".equals(currentFieldName) || "timeZone".equals(currentFieldName)) {
+ preZone = parseZone(parser.text());
+ } else if ("pre_zone".equals(currentFieldName) || "preZone".equals(currentFieldName)) {
+ preZone = parseZone(parser.text());
+ } else if ("post_zone".equals(currentFieldName) || "postZone".equals(currentFieldName)) {
+ postZone = parseZone(parser.text());
+ } else if ("pre_offset".equals(currentFieldName) || "preOffset".equals(currentFieldName)) {
+ preOffset = parseOffset(parser.text());
+ } else if ("post_offset".equals(currentFieldName) || "postOffset".equals(currentFieldName)) {
+ postOffset = parseOffset(parser.text());
+ } else if ("interval".equals(currentFieldName)) {
+ interval = parser.text();
+ } else if ("format".equals(currentFieldName)) {
+ format = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ if ("keyed".equals(currentFieldName)) {
+ keyed = parser.booleanValue();
+ } else if ("script_values_sorted".equals(currentFieldName) || "scriptValuesSorted".equals(currentFieldName)) {
+ assumeSorted = parser.booleanValue();
+ } else if ("pre_zone_adjust_large_interval".equals(currentFieldName) || "preZoneAdjustLargeInterval".equals(currentFieldName)) {
+ preZoneAdjustLargeInterval = parser.booleanValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("min_doc_count".equals(currentFieldName) || "minDocCount".equals(currentFieldName)) {
+ minDocCount = parser.longValue();
+ } else if ("time_zone".equals(currentFieldName) || "timeZone".equals(currentFieldName)) {
+ preZone = DateTimeZone.forOffsetHours(parser.intValue());
+ } else if ("pre_zone".equals(currentFieldName) || "preZone".equals(currentFieldName)) {
+ preZone = DateTimeZone.forOffsetHours(parser.intValue());
+ } else if ("post_zone".equals(currentFieldName) || "postZone".equals(currentFieldName)) {
+ postZone = DateTimeZone.forOffsetHours(parser.intValue());
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ scriptParams = parser.map();
+ } else if ("order".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ String dir = parser.text();
+ boolean asc = "asc".equals(dir);
+ order = resolveOrder(currentFieldName, asc);
+ //TODO should we throw an error if the value is not "asc" or "desc"???
+ }
+ }
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ if (interval == null) {
+ throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]");
+ }
+
+ SearchScript searchScript = null;
+ if (script != null) {
+ searchScript = context.scriptService().search(context.lookup(), scriptLang, script, scriptParams);
+ config.script(searchScript);
+ }
+
+ if (!assumeSorted) {
+ // we need values to be sorted and unique for efficiency
+ config.ensureSorted(true);
+ }
+
+ TimeZoneRounding.Builder tzRoundingBuilder;
+ DateTimeUnit dateTimeUnit = dateFieldUnits.get(interval);
+ if (dateTimeUnit != null) {
+ tzRoundingBuilder = TimeZoneRounding.builder(dateTimeUnit);
+ } else {
+ // the interval is a time value?
+ tzRoundingBuilder = TimeZoneRounding.builder(TimeValue.parseTimeValue(interval, null));
+ }
+
+ TimeZoneRounding rounding = tzRoundingBuilder
+ .preZone(preZone).postZone(postZone)
+ .preZoneAdjustLargeInterval(preZoneAdjustLargeInterval)
+ .preOffset(preOffset).postOffset(postOffset)
+ .build();
+
+ if (format != null) {
+ config.formatter(new ValueFormatter.DateTime(format));
+ }
+
+ if (field == null) {
+
+ if (searchScript != null) {
+ ValueParser valueParser = new ValueParser.DateMath(new DateMathParser(DateFieldMapper.Defaults.DATE_TIME_FORMATTER, DateFieldMapper.Defaults.TIME_UNIT));
+ config.parser(valueParser);
+ return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
+ }
+
+ // falling back on the get field data context
+ return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
+ }
+
+ if (!(mapper instanceof DateFieldMapper)) {
+ throw new SearchParseException(context, "date histogram can only be aggregated on date fields but [" + field + "] is not a date field");
+ }
+
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+ config.fieldContext(new FieldContext(field, indexFieldData));
+ return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
+ }
+
+ private static InternalOrder resolveOrder(String key, boolean asc) {
+ if ("_key".equals(key) || "_time".equals(key)) {
+ return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);
+ }
+ if ("_count".equals(key)) {
+ return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC);
+ }
+ int i = key.indexOf('.');
+ if (i < 0) {
+ return new InternalOrder.Aggregation(key, null, asc);
+ }
+ return new InternalOrder.Aggregation(key.substring(0, i), key.substring(i + 1), asc);
+ }
+
+ private long parseOffset(String offset) throws IOException {
+ if (offset.charAt(0) == '-') {
+ return -TimeValue.parseTimeValue(offset.substring(1), null).millis();
+ }
+ int beginIndex = offset.charAt(0) == '+' ? 1 : 0;
+ return TimeValue.parseTimeValue(offset.substring(beginIndex), null).millis();
+ }
+
+ private DateTimeZone parseZone(String text) throws IOException {
+ int index = text.indexOf(':');
+ if (index != -1) {
+ int beginIndex = text.charAt(0) == '+' ? 1 : 0;
+ // format like -02:30
+ return DateTimeZone.forOffsetHoursMinutes(
+ Integer.parseInt(text.substring(beginIndex, index)),
+ Integer.parseInt(text.substring(index + 1))
+ );
+ } else {
+ // id, listed here: http://joda-time.sourceforge.net/timezones.html
+ return DateTimeZone.forID(text);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java
new file mode 100644
index 0000000..229e807
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import com.google.common.primitives.Longs;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
+
+import java.util.Collection;
+import java.util.Comparator;
+
+/**
+ * A {@code histogram} aggregation. Defines multiple buckets, each representing an interval in a histogram.
+ */
+public interface Histogram extends MultiBucketsAggregation {
+
+ /**
+ * A bucket in the histogram where documents fall in
+ */
+ static interface Bucket extends MultiBucketsAggregation.Bucket {
+
+ /**
+ * @return The key associated with the bucket (all documents that fall in this bucket were rounded to this key)
+ */
+ Number getKeyAsNumber();
+
+ }
+
+ /**
+ * @return The buckets of this histogram (each bucket representing an interval in the histogram)
+ */
+ Collection<? extends Bucket> getBuckets();
+
+ /**
+ * Returns a bucket by the key associated with it.
+ *
+ * @param key The key of the bucket.
+ * @return The bucket that is associated with the given key.
+ */
+ Bucket getBucketByKey(String key);
+
+ /**
+ * Returns a bucket by the key associated with it.
+ *
+ * @param key The key of the bucket.
+ * @return The bucket that is associated with the given key.
+ */
+ Bucket getBucketByKey(Number key);
+
+
+ /**
+ * A strategy defining the order in which the buckets in this histogram are ordered.
+ */
+ static abstract class Order implements ToXContent {
+
+ public static final Order KEY_ASC = new InternalOrder((byte) 1, "_key", true, new Comparator<InternalHistogram.Bucket>() {
+ @Override
+ public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {
+ return Longs.compare(b1.key, b2.key);
+ }
+ });
+
+ public static final Order KEY_DESC = new InternalOrder((byte) 2, "_key", false, new Comparator<InternalHistogram.Bucket>() {
+ @Override
+ public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {
+ return -Longs.compare(b1.key, b2.key);
+ }
+ });
+
+ public static final Order COUNT_ASC = new InternalOrder((byte) 3, "_count", true, new Comparator<InternalHistogram.Bucket>() {
+ @Override
+ public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {
+ int cmp = Longs.compare(b1.getDocCount(), b2.getDocCount());
+ if (cmp == 0) {
+ cmp = Longs.compare(b1.key, b2.key);
+ }
+ return cmp;
+ }
+ });
+
+
+ public static final Order COUNT_DESC = new InternalOrder((byte) 4, "_count", false, new Comparator<InternalHistogram.Bucket>() {
+ @Override
+ public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {
+ int cmp = -Longs.compare(b1.getDocCount(), b2.getDocCount());
+ if (cmp == 0) {
+ cmp = Longs.compare(b1.key, b2.key);
+ }
+ return cmp;
+ }
+ });
+
+ /**
+ * Creates a bucket ordering strategy that sorts buckets based on a single-valued calc sug-aggregation
+ *
+ * @param aggregationName the name of the aggregation
+ * @param asc The direction of the order (ascending or descending)
+ */
+ public static Order aggregation(String aggregationName, boolean asc) {
+ return new InternalOrder.Aggregation(aggregationName, null, asc);
+ }
+
+ /**
+ * Creates a bucket ordering strategy that sorts buckets based on a multi-valued calc sug-aggregation
+ *
+ * @param aggregationName the name of the aggregation
+ * @param valueName The name of the value of the multi-value get by which the sorting will be applied
+ * @param asc The direction of the order (ascending or descending)
+ */
+ public static Order aggregation(String aggregationName, String valueName, boolean asc) {
+ return new InternalOrder.Aggregation(aggregationName, valueName, asc);
+ }
+
+ /**
+ * @return The bucket comparator by which the order will be applied.
+ */
+ abstract Comparator<InternalHistogram.Bucket> comparator();
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java
new file mode 100644
index 0000000..9c05863
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.common.inject.internal.Nullable;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.rounding.Rounding;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
+import org.elasticsearch.search.aggregations.bucket.LongHash;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class HistogramAggregator extends BucketsAggregator {
+
+ private final NumericValuesSource valuesSource;
+ private final Rounding rounding;
+ private final InternalOrder order;
+ private final boolean keyed;
+ private final long minDocCount;
+ private final InternalHistogram.Factory histogramFactory;
+
+ private final LongHash bucketOrds;
+ private LongValues values;
+
+ public HistogramAggregator(String name,
+ AggregatorFactories factories,
+ Rounding rounding,
+ InternalOrder order,
+ boolean keyed,
+ long minDocCount,
+ @Nullable NumericValuesSource valuesSource,
+ long initialCapacity,
+ InternalHistogram.Factory<?> histogramFactory,
+ AggregationContext aggregationContext,
+ Aggregator parent) {
+
+ super(name, BucketAggregationMode.PER_BUCKET, factories, initialCapacity, aggregationContext, parent);
+ this.valuesSource = valuesSource;
+ this.rounding = rounding;
+ this.order = order;
+ this.keyed = keyed;
+ this.minDocCount = minDocCount;
+ this.histogramFactory = histogramFactory;
+
+ bucketOrds = new LongHash(initialCapacity, aggregationContext.pageCacheRecycler());
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return valuesSource != null;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert owningBucketOrdinal == 0;
+ final LongValues values = valuesSource.longValues();
+ final int valuesCount = values.setDocument(doc);
+
+ long previousKey = Long.MIN_VALUE;
+ for (int i = 0; i < valuesCount; ++i) {
+ long value = values.nextValue();
+ long key = rounding.round(value);
+ assert key >= previousKey;
+ if (key == previousKey) {
+ continue;
+ }
+ long bucketOrd = bucketOrds.add(key);
+ if (bucketOrd < 0) { // already seen
+ bucketOrd = -1 - bucketOrd;
+ }
+ collectBucket(doc, bucketOrd);
+ previousKey = key;
+ }
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ assert owningBucketOrdinal == 0;
+ List<InternalHistogram.Bucket> buckets = new ArrayList<InternalHistogram.Bucket>((int) bucketOrds.size());
+ for (long i = 0; i < bucketOrds.capacity(); ++i) {
+ final long ord = bucketOrds.id(i);
+ if (ord < 0) {
+ continue; // slot is not allocated
+ }
+ buckets.add(histogramFactory.createBucket(bucketOrds.key(i), bucketDocCount(ord), bucketAggregations(ord), valuesSource.formatter()));
+ }
+
+ CollectionUtil.introSort(buckets, order.comparator());
+
+ // value source will be null for unmapped fields
+ ValueFormatter formatter = valuesSource != null ? valuesSource.formatter() : null;
+ InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations()) : null;
+ return histogramFactory.create(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed);
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ ValueFormatter formatter = valuesSource != null ? valuesSource.formatter() : null;
+ InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations()) : null;
+ return histogramFactory.create(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed);
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(bucketOrds);
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory<NumericValuesSource> {
+
+ private final Rounding rounding;
+ private final InternalOrder order;
+ private final boolean keyed;
+ private final long minDocCount;
+ private final InternalHistogram.Factory<?> histogramFactory;
+
+ public Factory(String name, ValuesSourceConfig<NumericValuesSource> valueSourceConfig,
+ Rounding rounding, InternalOrder order, boolean keyed, long minDocCount, InternalHistogram.Factory<?> histogramFactory) {
+ super(name, histogramFactory.type(), valueSourceConfig);
+ this.rounding = rounding;
+ this.order = order;
+ this.keyed = keyed;
+ this.minDocCount = minDocCount;
+ this.histogramFactory = histogramFactory;
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, null, 0, histogramFactory, aggregationContext, parent);
+ }
+
+ @Override
+ protected Aggregator create(NumericValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ // todo if we'll keep track of min/max values in IndexFieldData, we could use the max here to come up with a better estimation for the buckets count
+ return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, valuesSource, 50, histogramFactory, aggregationContext, parent);
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramBuilder.java
new file mode 100644
index 0000000..1dd638d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramBuilder.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.ValuesSourceAggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+
+import java.io.IOException;
+
+/**
+ * A builder for a histogram aggregation.
+ */
+public class HistogramBuilder extends ValuesSourceAggregationBuilder<HistogramBuilder> {
+
+ private Long interval;
+ private Histogram.Order order;
+ private Long minDocCount;
+
+ /**
+ * Constructs a new histogram aggregation builder.
+ *
+ * @param name The name of the aggregation (will serve as the unique identifier for the aggregation result in the response)
+ */
+ public HistogramBuilder(String name) {
+ super(name, InternalHistogram.TYPE.name());
+ }
+
+ /**
+ * Sets the interval for the histogram.
+ *
+ * @param interval The interval for the histogram
+ * @return This builder
+ */
+ public HistogramBuilder interval(long interval) {
+ this.interval = interval;
+ return this;
+ }
+
+ /**
+ * Sets the order by which the buckets will be returned.
+ *
+ * @param order The order by which the buckets will be returned
+ * @return This builder
+ */
+ public HistogramBuilder order(Histogram.Order order) {
+ this.order = order;
+ return this;
+ }
+
+ /**
+ * Sets the minimum document count per bucket. Buckets with less documents than this min value will not be returned.
+ *
+ * @param minDocCount The minimum document count per bucket
+ * @return This builder
+ */
+ public HistogramBuilder minDocCount(long minDocCount) {
+ this.minDocCount = minDocCount;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder doInternalXContent(XContentBuilder builder, Params params) throws IOException {
+ if (interval == null) {
+ throw new SearchSourceBuilderException("[interval] must be defined for histogram aggregation [" + name + "]");
+ }
+ builder.field("interval", interval);
+
+ if (order != null) {
+ builder.field("order");
+ order.toXContent(builder, params);
+ }
+
+ if (minDocCount != null) {
+ builder.field("min_doc_count", minDocCount);
+ }
+
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java
new file mode 100644
index 0000000..7005db5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import org.elasticsearch.common.rounding.Rounding;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.support.FieldContext;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Parses the histogram request
+ */
+public class HistogramParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalHistogram.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
+
+ String field = null;
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> scriptParams = null;
+ boolean keyed = false;
+ long minDocCount = 1;
+ InternalOrder order = (InternalOrder) InternalOrder.KEY_ASC;
+ long interval = -1;
+ boolean assumeSorted = false;
+ String format = null;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("format".equals(currentFieldName)) {
+ format = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("interval".equals(currentFieldName)) {
+ interval = parser.longValue();
+ } else if ("min_doc_count".equals(currentFieldName) || "minDocCount".equals(currentFieldName)) {
+ minDocCount = parser.longValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ if ("keyed".equals(currentFieldName)) {
+ keyed = parser.booleanValue();
+ } else if ("script_values_sorted".equals(currentFieldName) || "scriptValuesSorted".equals(currentFieldName)) {
+ assumeSorted = parser.booleanValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ scriptParams = parser.map();
+ } else if ("order".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ String dir = parser.text();
+ boolean asc = "asc".equals(dir);
+ if (!asc && !"desc".equals(dir)) {
+ throw new SearchParseException(context, "Unknown order direction [" + dir + "] in aggregation [" + aggregationName + "]. Should be either [asc] or [desc]");
+ }
+ order = resolveOrder(currentFieldName, asc);
+ }
+ }
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in aggregation [" + aggregationName + "].");
+ }
+ }
+
+ if (interval < 0) {
+ throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]");
+ }
+ Rounding rounding = new Rounding.Interval(interval);
+
+ if (script != null) {
+ config.script(context.scriptService().search(context.lookup(), scriptLang, script, scriptParams));
+ }
+
+ if (!assumeSorted) {
+ // we need values to be sorted and unique for efficiency
+ config.ensureSorted(true);
+ }
+
+ if (field == null) {
+ return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalHistogram.FACTORY);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalHistogram.FACTORY);
+ }
+
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+ config.fieldContext(new FieldContext(field, indexFieldData));
+
+ if (format != null) {
+ config.formatter(new ValueFormatter.Number.Pattern(format));
+ }
+
+ return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalHistogram.FACTORY);
+
+ }
+
+ static InternalOrder resolveOrder(String key, boolean asc) {
+ if ("_key".equals(key)) {
+ return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);
+ }
+ if ("_count".equals(key)) {
+ return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC);
+ }
+ int i = key.indexOf('.');
+ if (i < 0) {
+ return new InternalOrder.Aggregation(key, null, asc);
+ }
+ return new InternalOrder.Aggregation(key.substring(0, i), key.substring(i + 1), asc);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
new file mode 100644
index 0000000..065c8d9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalDateHistogram extends InternalHistogram<InternalDateHistogram.Bucket> implements DateHistogram {
+
+ final static Type TYPE = new Type("date_histogram", "dhisto");
+ final static Factory FACTORY = new Factory();
+
+ private final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalDateHistogram readResult(StreamInput in) throws IOException {
+ InternalDateHistogram histogram = new InternalDateHistogram();
+ histogram.readFrom(in);
+ return histogram;
+ }
+ };
+
+ public static void registerStream() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ static class Bucket extends InternalHistogram.Bucket implements DateHistogram.Bucket {
+
+ private final ValueFormatter formatter;
+
+ Bucket(long key, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ super(key, docCount, aggregations);
+ this.formatter = formatter;
+ }
+
+ @Override
+ public String getKey() {
+ return formatter != null ? formatter.format(key) : DateFieldMapper.Defaults.DATE_TIME_FORMATTER.printer().print(key);
+ }
+
+ @Override
+ public DateTime getKeyAsDate() {
+ return new DateTime(key, DateTimeZone.UTC);
+ }
+ }
+
+ static class Factory extends InternalHistogram.Factory<InternalDateHistogram.Bucket> {
+
+ private Factory() {
+ }
+
+ @Override
+ public String type() {
+ return TYPE.name();
+ }
+
+ @Override
+ public InternalDateHistogram create(String name, List<InternalDateHistogram.Bucket> buckets, InternalOrder order,
+ long minDocCount, EmptyBucketInfo emptyBucketInfo, ValueFormatter formatter, boolean keyed) {
+ return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed);
+ }
+
+ @Override
+ public InternalDateHistogram.Bucket createBucket(long key, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return new Bucket(key, docCount, aggregations, formatter);
+ }
+ }
+
+ private ObjectObjectOpenHashMap<String, InternalDateHistogram.Bucket> bucketsMap;
+
+ InternalDateHistogram() {} // for serialization
+
+ InternalDateHistogram(String name, List<InternalDateHistogram.Bucket> buckets, InternalOrder order, long minDocCount,
+ EmptyBucketInfo emptyBucketInfo, ValueFormatter formatter, boolean keyed) {
+ super(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public Bucket getBucketByKey(String key) {
+ try {
+ long time = Long.parseLong(key);
+ return super.getBucketByKey(time);
+ } catch (NumberFormatException nfe) {
+ // it's not a number, so lets try to parse it as a date using the formatter.
+ }
+ if (bucketsMap == null) {
+ bucketsMap = new ObjectObjectOpenHashMap<String, InternalDateHistogram.Bucket>();
+ for (InternalDateHistogram.Bucket bucket : buckets) {
+ bucketsMap.put(bucket.getKey(), bucket);
+ }
+ }
+ return bucketsMap.get(key);
+ }
+
+ @Override
+ public DateHistogram.Bucket getBucketByKey(DateTime key) {
+ return getBucketByKey(key.getMillis());
+ }
+
+ @Override
+ protected InternalDateHistogram.Bucket createBucket(long key, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return new Bucket(key, docCount, aggregations, formatter);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ bucketsMap = null; // we need to reset this on read (as it's lazily created on demand)
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
new file mode 100644
index 0000000..73eed06
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
@@ -0,0 +1,403 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.rounding.Rounding;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatterStreams;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.ListIterator;
+
+/**
+ * TODO should be renamed to InternalNumericHistogram (see comment on {@link Histogram})?
+ */
+public class InternalHistogram<B extends InternalHistogram.Bucket> extends InternalAggregation implements Histogram {
+
+ final static Type TYPE = new Type("histogram", "histo");
+ final static Factory FACTORY = new Factory();
+
+ private final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalHistogram readResult(StreamInput in) throws IOException {
+ InternalHistogram histogram = new InternalHistogram();
+ histogram.readFrom(in);
+ return histogram;
+ }
+ };
+
+ public static void registerStream() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ public static class Bucket implements Histogram.Bucket {
+
+ long key;
+ long docCount;
+ InternalAggregations aggregations;
+
+ public Bucket(long key, long docCount, InternalAggregations aggregations) {
+ this.key = key;
+ this.docCount = docCount;
+ this.aggregations = aggregations;
+ }
+
+ @Override
+ public String getKey() {
+ return String.valueOf(key);
+ }
+
+ @Override
+ public Text getKeyAsText() {
+ return new StringText(getKey());
+ }
+
+ @Override
+ public Number getKeyAsNumber() {
+ return key;
+ }
+
+ @Override
+ public long getDocCount() {
+ return docCount;
+ }
+
+ @Override
+ public Aggregations getAggregations() {
+ return aggregations;
+ }
+
+ <B extends Bucket> B reduce(List<B> buckets, CacheRecycler cacheRecycler) {
+ if (buckets.size() == 1) {
+ // we only need to reduce the sub aggregations
+ Bucket bucket = buckets.get(0);
+ bucket.aggregations.reduce(cacheRecycler);
+ return (B) bucket;
+ }
+ List<InternalAggregations> aggregations = new ArrayList<InternalAggregations>(buckets.size());
+ Bucket reduced = null;
+ for (Bucket bucket : buckets) {
+ if (reduced == null) {
+ reduced = bucket;
+ } else {
+ reduced.docCount += bucket.docCount;
+ }
+ aggregations.add((InternalAggregations) bucket.getAggregations());
+ }
+ reduced.aggregations = InternalAggregations.reduce(aggregations, cacheRecycler);
+ return (B) reduced;
+ }
+ }
+
+ static class EmptyBucketInfo {
+ final Rounding rounding;
+ final InternalAggregations subAggregations;
+
+ EmptyBucketInfo(Rounding rounding, InternalAggregations subAggregations) {
+ this.rounding = rounding;
+ this.subAggregations = subAggregations;
+ }
+
+ public static EmptyBucketInfo readFrom(StreamInput in) throws IOException {
+ return new EmptyBucketInfo(Rounding.Streams.read(in), InternalAggregations.readAggregations(in));
+ }
+
+ public static void writeTo(EmptyBucketInfo info, StreamOutput out) throws IOException {
+ Rounding.Streams.write(info.rounding, out);
+ info.subAggregations.writeTo(out);
+ }
+ }
+
+ static class Factory<B extends InternalHistogram.Bucket> {
+
+ protected Factory() {
+ }
+
+ public String type() {
+ return TYPE.name();
+ }
+
+ public InternalHistogram<B> create(String name, List<B> buckets, InternalOrder order, long minDocCount,
+ EmptyBucketInfo emptyBucketInfo, ValueFormatter formatter, boolean keyed) {
+ return new InternalHistogram<B>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed);
+ }
+
+ public B createBucket(long key, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return (B) new Bucket(key, docCount, aggregations);
+ }
+
+ }
+
+ protected List<B> buckets;
+ private LongObjectOpenHashMap<B> bucketsMap;
+ private InternalOrder order;
+ private ValueFormatter formatter;
+ private boolean keyed;
+ private long minDocCount;
+ private EmptyBucketInfo emptyBucketInfo;
+
+ InternalHistogram() {} // for serialization
+
+ InternalHistogram(String name, List<B> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, ValueFormatter formatter, boolean keyed) {
+ super(name);
+ this.buckets = buckets;
+ this.order = order;
+ assert (minDocCount == 0) == (emptyBucketInfo != null);
+ this.minDocCount = minDocCount;
+ this.emptyBucketInfo = emptyBucketInfo;
+ this.formatter = formatter;
+ this.keyed = keyed;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public Collection<B> getBuckets() {
+ return buckets;
+ }
+
+ @Override
+ public B getBucketByKey(String key) {
+ return getBucketByKey(Long.valueOf(key));
+ }
+
+ @Override
+ public B getBucketByKey(Number key) {
+ if (bucketsMap == null) {
+ bucketsMap = new LongObjectOpenHashMap<B>(buckets.size());
+ for (B bucket : buckets) {
+ bucketsMap.put(bucket.key, bucket);
+ }
+ }
+ return bucketsMap.get(key.longValue());
+ }
+
+ @Override
+ public InternalAggregation reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+
+ InternalHistogram<B> histo = (InternalHistogram<B>) aggregations.get(0);
+
+ if (minDocCount == 1) {
+ for (B bucket : histo.buckets) {
+ bucket.aggregations.reduce(reduceContext.cacheRecycler());
+ }
+ return histo;
+ }
+
+
+ CollectionUtil.introSort(histo.buckets, order.asc ? InternalOrder.KEY_ASC.comparator() : InternalOrder.KEY_DESC.comparator());
+ List<B> list = order.asc ? histo.buckets : Lists.reverse(histo.buckets);
+ B prevBucket = null;
+ ListIterator<B> iter = list.listIterator();
+ if (minDocCount == 0) {
+ // we need to fill the gaps with empty buckets
+ while (iter.hasNext()) {
+ // look ahead on the next bucket without advancing the iter
+ // so we'll be able to insert elements at the right position
+ B nextBucket = list.get(iter.nextIndex());
+ nextBucket.aggregations.reduce(reduceContext.cacheRecycler());
+ if (prevBucket != null) {
+ long key = emptyBucketInfo.rounding.nextRoundingValue(prevBucket.key);
+ while (key != nextBucket.key) {
+ iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
+ key = emptyBucketInfo.rounding.nextRoundingValue(key);
+ }
+ }
+ prevBucket = iter.next();
+ }
+ } else {
+ while (iter.hasNext()) {
+ InternalHistogram.Bucket bucket = iter.next();
+ if (bucket.getDocCount() < minDocCount) {
+ iter.remove();
+ } else {
+ bucket.aggregations.reduce(reduceContext.cacheRecycler());
+ }
+ }
+ }
+
+ if (order != InternalOrder.KEY_ASC && order != InternalOrder.KEY_DESC) {
+ CollectionUtil.introSort(histo.buckets, order.comparator());
+ }
+
+ return histo;
+ }
+
+ InternalHistogram reduced = (InternalHistogram) aggregations.get(0);
+
+ Recycler.V<LongObjectOpenHashMap<List<Histogram.Bucket>>> bucketsByKey = reduceContext.cacheRecycler().longObjectMap(-1);
+ for (InternalAggregation aggregation : aggregations) {
+ InternalHistogram<B> histogram = (InternalHistogram) aggregation;
+ for (B bucket : histogram.buckets) {
+ List<Histogram.Bucket> bucketList = bucketsByKey.v().get(bucket.key);
+ if (bucketList == null) {
+ bucketList = new ArrayList<Histogram.Bucket>(aggregations.size());
+ bucketsByKey.v().put(bucket.key, bucketList);
+ }
+ bucketList.add(bucket);
+ }
+ }
+
+ List<B> reducedBuckets = new ArrayList<B>(bucketsByKey.v().size());
+ Object[] buckets = bucketsByKey.v().values;
+ boolean[] allocated = bucketsByKey.v().allocated;
+ for (int i = 0; i < allocated.length; i++) {
+ if (allocated[i]) {
+ B bucket = ((List<B>) buckets[i]).get(0).reduce(((List<B>) buckets[i]), reduceContext.cacheRecycler());
+ if (bucket.getDocCount() >= minDocCount) {
+ reducedBuckets.add(bucket);
+ }
+ }
+ }
+ bucketsByKey.release();
+
+ // adding empty buckets in needed
+ if (minDocCount == 0) {
+ CollectionUtil.introSort(reducedBuckets, order.asc ? InternalOrder.KEY_ASC.comparator() : InternalOrder.KEY_DESC.comparator());
+ List<B> list = order.asc ? reducedBuckets : Lists.reverse(reducedBuckets);
+ B prevBucket = null;
+ ListIterator<B> iter = list.listIterator();
+ while (iter.hasNext()) {
+ B nextBucket = list.get(iter.nextIndex());
+ if (prevBucket != null) {
+ long key = emptyBucketInfo.rounding.nextRoundingValue(prevBucket.key);
+ while (key != nextBucket.key) {
+ iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
+ key = emptyBucketInfo.rounding.nextRoundingValue(key);
+ }
+ }
+ prevBucket = iter.next();
+ }
+
+ if (order != InternalOrder.KEY_ASC && order != InternalOrder.KEY_DESC) {
+ CollectionUtil.introSort(reducedBuckets, order.comparator());
+ }
+
+ } else {
+ CollectionUtil.introSort(reducedBuckets, order.comparator());
+ }
+
+
+ reduced.buckets = reducedBuckets;
+ return reduced;
+ }
+
+ protected B createBucket(long key, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return (B) new InternalHistogram.Bucket(key, docCount, aggregations);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ order = InternalOrder.Streams.readOrder(in);
+ minDocCount = in.readVLong();
+ if (minDocCount == 0) {
+ emptyBucketInfo = EmptyBucketInfo.readFrom(in);
+ }
+ formatter = ValueFormatterStreams.readOptional(in);
+ keyed = in.readBoolean();
+ int size = in.readVInt();
+ List<B> buckets = new ArrayList<B>(size);
+ for (int i = 0; i < size; i++) {
+ buckets.add(createBucket(in.readLong(), in.readVLong(), InternalAggregations.readAggregations(in), formatter));
+ }
+ this.buckets = buckets;
+ this.bucketsMap = null;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ InternalOrder.Streams.writeOrder(order, out);
+ out.writeVLong(minDocCount);
+ if (minDocCount == 0) {
+ EmptyBucketInfo.writeTo(emptyBucketInfo, out);
+ }
+ ValueFormatterStreams.writeOptional(formatter, out);
+ out.writeBoolean(keyed);
+ out.writeVInt(buckets.size());
+ for (B bucket : buckets) {
+ out.writeLong(bucket.key);
+ out.writeVLong(bucket.docCount);
+ bucket.aggregations.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ if (keyed) {
+ builder.startObject(CommonFields.BUCKETS);
+ } else {
+ builder.startArray(CommonFields.BUCKETS);
+ }
+
+ for (B bucket : buckets) {
+ if (formatter != null) {
+ Text keyTxt = new StringText(formatter.format(bucket.key));
+ if (keyed) {
+ builder.startObject(keyTxt.string());
+ } else {
+ builder.startObject();
+ }
+ builder.field(CommonFields.KEY_AS_STRING, keyTxt);
+ } else {
+ if (keyed) {
+ builder.startObject(String.valueOf(bucket.getKeyAsNumber()));
+ } else {
+ builder.startObject();
+ }
+ }
+ builder.field(CommonFields.KEY, bucket.key);
+ builder.field(CommonFields.DOC_COUNT, bucket.docCount);
+ bucket.aggregations.toXContentInternal(builder, params);
+ builder.endObject();
+ }
+
+ if (keyed) {
+ builder.endObject();
+ } else {
+ builder.endArray();
+ }
+ return builder.endObject();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java
new file mode 100644
index 0000000..e427499
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.histogram;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
+
+import java.io.IOException;
+import java.util.Comparator;
+
+/**
+ * An internal {@link Histogram.Order} strategy which is identified by a unique id.
+ */
+class InternalOrder extends Histogram.Order {
+
+ final byte id;
+ final String key;
+ final boolean asc;
+ final Comparator<InternalHistogram.Bucket> comparator;
+
+ InternalOrder(byte id, String key, boolean asc, Comparator<InternalHistogram.Bucket> comparator) {
+ this.id = id;
+ this.key = key;
+ this.asc = asc;
+ this.comparator = comparator;
+ }
+
+ byte id() {
+ return id;
+ }
+
+ String key() {
+ return key;
+ }
+
+ boolean asc() {
+ return asc;
+ }
+
+ @Override
+ Comparator<InternalHistogram.Bucket> comparator() {
+ return comparator;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder.startObject().field(key, asc ? "asc" : "desc").endObject();
+ }
+
+ static class Aggregation extends InternalOrder {
+
+ static final byte ID = 0;
+
+ Aggregation(String key, boolean asc) {
+ super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator<InternalHistogram.Bucket>(key, asc));
+ }
+
+ Aggregation(String aggName, String valueName, boolean asc) {
+ super(ID, key(aggName, valueName), asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator<InternalHistogram.Bucket>(aggName, valueName, asc));
+ }
+
+ private static String key(String aggName, String valueName) {
+ return (valueName == null) ? aggName : aggName + "." + valueName;
+ }
+
+ }
+
+ static class Streams {
+
+ /**
+ * Writes the given order to the given output (based on the id of the order).
+ */
+ public static void writeOrder(InternalOrder order, StreamOutput out) throws IOException {
+ out.writeByte(order.id());
+ if (order instanceof InternalOrder.Aggregation) {
+ out.writeBoolean(order.asc());
+ out.writeString(order.key());
+ }
+ }
+
+ /**
+ * Reads an order from the given input (based on the id of the order).
+ *
+ * @see Streams#writeOrder(InternalOrder, org.elasticsearch.common.io.stream.StreamOutput)
+ */
+ public static InternalOrder readOrder(StreamInput in) throws IOException {
+ byte id = in.readByte();
+ switch (id) {
+ case 1: return (InternalOrder) Histogram.Order.KEY_ASC;
+ case 2: return (InternalOrder) Histogram.Order.KEY_DESC;
+ case 3: return (InternalOrder) Histogram.Order.COUNT_ASC;
+ case 4: return (InternalOrder) Histogram.Order.COUNT_DESC;
+ case 0:
+ boolean asc = in.readBoolean();
+ String key = in.readString();
+ return new InternalOrder.Aggregation(key, asc);
+ default:
+ throw new RuntimeException("unknown histogram order");
+ }
+ }
+
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java
new file mode 100644
index 0000000..7e4198c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.missing;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class InternalMissing extends InternalSingleBucketAggregation implements Missing {
+
+ public final static Type TYPE = new Type("missing");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalMissing readResult(StreamInput in) throws IOException {
+ InternalMissing missing = new InternalMissing();
+ missing.readFrom(in);
+ return missing;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+
+ InternalMissing() {
+ }
+
+ InternalMissing(String name, long docCount, InternalAggregations aggregations) {
+ super(name, docCount, aggregations);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/Missing.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/Missing.java
new file mode 100644
index 0000000..66d02b5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/Missing.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.missing;
+
+import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregation;
+
+/**
+ * A {@code missing} aggregation. Defines a single bucket of all documents that are missing a specific field.
+ */
+public interface Missing extends SingleBucketAggregation {
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java
new file mode 100644
index 0000000..8cb7811
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.missing;
+
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MissingAggregator extends SingleBucketAggregator {
+
+ private ValuesSource valuesSource;
+
+ public MissingAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource,
+ AggregationContext aggregationContext, Aggregator parent) {
+ super(name, factories, aggregationContext, parent);
+ this.valuesSource = valuesSource;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ if (valuesSource == null || valuesSource.bytesValues().setDocument(doc) == 0) {
+ collectBucket(doc, owningBucketOrdinal);
+ }
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ return new InternalMissing(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalMissing(name, 0, buildEmptySubAggregations());
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory {
+
+ public Factory(String name, ValuesSourceConfig valueSourceConfig) {
+ super(name, InternalMissing.TYPE.name(), valueSourceConfig);
+ }
+
+ @Override
+ protected MissingAggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new MissingAggregator(name, factories, null, aggregationContext, parent);
+ }
+
+ @Override
+ protected MissingAggregator create(ValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ return new MissingAggregator(name, factories, valuesSource, aggregationContext, parent);
+ }
+ }
+
+}
+
+
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingBuilder.java
new file mode 100644
index 0000000..94ba66c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingBuilder.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.missing;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MissingBuilder extends AggregationBuilder<MissingBuilder> {
+
+ private String field;
+
+ public MissingBuilder(String name) {
+ super(name, InternalMissing.TYPE.name());
+ }
+
+ public MissingBuilder field(String field) {
+ this.field = field;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (field != null) {
+ builder.field("field", field);
+ }
+ return builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java
new file mode 100644
index 0000000..dc1914d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.missing;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.support.FieldContext;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MissingParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalMissing.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ ValuesSourceConfig<ValuesSource> config = new ValuesSourceConfig<ValuesSource>(ValuesSource.class);
+
+ String field = null;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ if (field == null) {
+ return new MissingAggregator.Factory(aggregationName, config);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return new MissingAggregator.Factory(aggregationName, config);
+ }
+
+ config.fieldContext(new FieldContext(field, context.fieldData().getForField(mapper)));
+ return new MissingAggregator.Factory(aggregationName, config);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java
new file mode 100644
index 0000000..ed968ad
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.nested;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class InternalNested extends InternalSingleBucketAggregation implements Nested {
+
+ public static final Type TYPE = new Type("nested");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalNested readResult(StreamInput in) throws IOException {
+ InternalNested result = new InternalNested();
+ result.readFrom(in);
+ return result;
+ }
+ };
+
+ public static void registerStream() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ public InternalNested() {
+ }
+
+ public InternalNested(String name, long docCount, InternalAggregations aggregations) {
+ super(name, docCount, aggregations);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/Nested.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/Nested.java
new file mode 100644
index 0000000..7ec3b75
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/Nested.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.nested;
+
+import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregation;
+
+/**
+ * A {@code nested} aggregation. Defines a single bucket that holds all the nested documents of a specific path.
+ */
+public interface Nested extends SingleBucketAggregation {
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java
new file mode 100644
index 0000000..0e15cd8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.nested;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.ReaderContextAware;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.search.aggregations.*;
+import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NestedAggregator extends SingleBucketAggregator implements ReaderContextAware {
+
+ private final Aggregator parentAggregator;
+ private Filter parentFilter;
+ private final Filter childFilter;
+
+ private Bits childDocs;
+ private FixedBitSet parentDocs;
+
+ public NestedAggregator(String name, AggregatorFactories factories, String nestedPath, AggregationContext aggregationContext, Aggregator parentAggregator) {
+ super(name, factories, aggregationContext, parentAggregator);
+ this.parentAggregator = parentAggregator;
+ MapperService.SmartNameObjectMapper mapper = aggregationContext.searchContext().smartNameObjectMapper(nestedPath);
+ if (mapper == null) {
+ throw new AggregationExecutionException("facet nested path [" + nestedPath + "] not found");
+ }
+ ObjectMapper objectMapper = mapper.mapper();
+ if (objectMapper == null) {
+ throw new AggregationExecutionException("facet nested path [" + nestedPath + "] not found");
+ }
+ if (!objectMapper.nested().isNested()) {
+ throw new AggregationExecutionException("facet nested path [" + nestedPath + "] is not nested");
+ }
+
+ childFilter = aggregationContext.searchContext().filterCache().cache(objectMapper.nestedTypeFilter());
+ }
+
+ private NestedAggregator findClosestNestedAggregator(Aggregator parent) {
+ for (; parent != null; parent = parent.parent()) {
+ if (parent instanceof NestedAggregator) {
+ return (NestedAggregator) parent;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ if (parentFilter == null) {
+ NestedAggregator closestNestedAggregator = findClosestNestedAggregator(parentAggregator);
+ final Filter parentFilterNotCached;
+ if (closestNestedAggregator == null) {
+ parentFilterNotCached = NonNestedDocsFilter.INSTANCE;
+ } else {
+ // The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs
+ // So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed.
+ // So the trick to set at the last moment just before needed and we can use its child filter as the
+ // parent filter.
+ parentFilterNotCached = closestNestedAggregator.childFilter;
+ }
+ parentFilter = SearchContext.current().filterCache().cache(parentFilterNotCached);
+ }
+
+ try {
+ DocIdSet docIdSet = parentFilter.getDocIdSet(reader, null);
+ // In ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here.
+ childDocs = DocIdSets.toSafeBits(reader.reader(), childFilter.getDocIdSet(reader, null));
+ if (DocIdSets.isEmpty(docIdSet)) {
+ parentDocs = null;
+ } else {
+ parentDocs = (FixedBitSet) docIdSet;
+ }
+ } catch (IOException ioe) {
+ throw new AggregationExecutionException("Failed to aggregate [" + name + "]", ioe);
+ }
+ }
+
+ @Override
+ public void collect(int parentDoc, long bucketOrd) throws IOException {
+
+ // here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them
+ // so they'll be collected
+
+ if (parentDoc == 0 || parentDocs == null) {
+ return;
+ }
+ int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
+ int numChildren = 0;
+ for (int i = (parentDoc - 1); i > prevParentDoc; i--) {
+ if (childDocs.get(i)) {
+ ++numChildren;
+ collectBucketNoCounts(i, bucketOrd);
+ }
+ }
+ incrementBucketDocCount(numChildren, bucketOrd);
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ return new InternalNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalNested(name, 0, buildEmptySubAggregations());
+ }
+
+ public static class Factory extends AggregatorFactory {
+
+ private final String path;
+
+ public Factory(String name, String path) {
+ super(name, InternalNested.TYPE.name());
+ this.path = path;
+ }
+
+ @Override
+ public Aggregator create(AggregationContext context, Aggregator parent, long expectedBucketsCount) {
+ NestedAggregator aggregator = new NestedAggregator(name, factories, path, context, parent);
+ context.registerReaderContextAware(aggregator);
+ return aggregator;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedBuilder.java
new file mode 100644
index 0000000..6867c4e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedBuilder.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.nested;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NestedBuilder extends AggregationBuilder<NestedBuilder> {
+
+ private String path;
+
+ public NestedBuilder(String name) {
+ super(name, InternalNested.TYPE.name());
+ }
+
+ public NestedBuilder path(String path) {
+ this.path = path;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (path == null) {
+ throw new SearchSourceBuilderException("nested path must be set on nested aggregation [" + name + "]");
+ }
+ builder.field("path", path);
+ return builder.endObject();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java
new file mode 100644
index 0000000..c2c08b0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.nested;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class NestedParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalNested.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+ String path = null;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("path".equals(currentFieldName)) {
+ path = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ if (path == null) {
+ // "field" doesn't exist, so we fall back to the context of the ancestors
+ throw new SearchParseException(context, "Missing [path] field for nested aggregation [" + aggregationName + "]");
+ }
+
+ return new NestedAggregator.Factory(aggregationName, path);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java
new file mode 100644
index 0000000..19082d2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.range;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.ValuesSourceAggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public abstract class AbstractRangeBuilder<B extends AbstractRangeBuilder<B>> extends ValuesSourceAggregationBuilder<B> {
+
+ protected static class Range implements ToXContent {
+
+ private String key;
+ private Object from;
+ private Object to;
+
+ public Range(String key, Object from, Object to) {
+ this.key = key;
+ this.from = from;
+ this.to = to;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (key != null) {
+ builder.field("key", key);
+ }
+ if (from != null) {
+ builder.field("from", from);
+ }
+ if (to != null) {
+ builder.field("to", to);
+ }
+ return builder.endObject();
+ }
+ }
+
+ protected List<Range> ranges = Lists.newArrayList();
+
+ protected AbstractRangeBuilder(String name, String type) {
+ super(name, type);
+ }
+
+ @Override
+ protected XContentBuilder doInternalXContent(XContentBuilder builder, Params params) throws IOException {
+ if (ranges.isEmpty()) {
+ throw new SearchSourceBuilderException("at least one range must be defined for range aggregation [" + name + "]");
+ }
+ builder.startArray("ranges");
+ for (Range range : ranges) {
+ range.toXContent(builder, params);
+ }
+ return builder.endArray();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java
new file mode 100644
index 0000000..9a7b1ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java
@@ -0,0 +1,320 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatterStreams;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ *
+ */
+public class InternalRange<B extends InternalRange.Bucket> extends InternalAggregation implements Range {
+
+ static final Factory FACTORY = new Factory();
+
+ public final static Type TYPE = new Type("range");
+
+ private final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalRange readResult(StreamInput in) throws IOException {
+ InternalRange ranges = new InternalRange();
+ ranges.readFrom(in);
+ return ranges;
+ }
+ };
+
+ public static void registerStream() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ public static class Bucket implements Range.Bucket {
+
+ private double from = Double.NEGATIVE_INFINITY;
+ private double to = Double.POSITIVE_INFINITY;
+ private long docCount;
+ InternalAggregations aggregations;
+ private String key;
+ private boolean explicitKey;
+
+ public Bucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ if (key != null) {
+ this.key = key;
+ explicitKey = true;
+ } else {
+ this.key = key(from, to, formatter);
+ explicitKey = false;
+ }
+ this.from = from;
+ this.to = to;
+ this.docCount = docCount;
+ this.aggregations = aggregations;
+ }
+
+ public String getKey() {
+ return key;
+ }
+
+ @Override
+ public Text getKeyAsText() {
+ return new StringText(getKey());
+ }
+
+ @Override
+ public Number getFrom() {
+ return from;
+ }
+
+ @Override
+ public Number getTo() {
+ return to;
+ }
+
+ @Override
+ public long getDocCount() {
+ return docCount;
+ }
+
+ @Override
+ public Aggregations getAggregations() {
+ return aggregations;
+ }
+
+ Bucket reduce(List<Bucket> ranges, CacheRecycler cacheRecycler) {
+ if (ranges.size() == 1) {
+ // we stil need to call reduce on all the sub aggregations
+ Bucket bucket = ranges.get(0);
+ bucket.aggregations.reduce(cacheRecycler);
+ return bucket;
+ }
+ Bucket reduced = null;
+ List<InternalAggregations> aggregationsList = Lists.newArrayListWithCapacity(ranges.size());
+ for (Bucket range : ranges) {
+ if (reduced == null) {
+ reduced = range;
+ } else {
+ reduced.docCount += range.docCount;
+ }
+ aggregationsList.add(range.aggregations);
+ }
+ reduced.aggregations = InternalAggregations.reduce(aggregationsList, cacheRecycler);
+ return reduced;
+ }
+
+ void toXContent(XContentBuilder builder, Params params, ValueFormatter formatter, boolean keyed) throws IOException {
+ if (keyed) {
+ builder.startObject(key);
+ } else {
+ builder.startObject();
+ if (explicitKey) {
+ builder.field(CommonFields.KEY, key);
+ }
+ }
+ if (!Double.isInfinite(from)) {
+ builder.field(CommonFields.FROM, from);
+ if (formatter != null) {
+ builder.field(CommonFields.FROM_AS_STRING, formatter.format(from));
+ }
+ }
+ if (!Double.isInfinite(to)) {
+ builder.field(CommonFields.TO, to);
+ if (formatter != null) {
+ builder.field(CommonFields.TO_AS_STRING, formatter.format(to));
+ }
+ }
+ builder.field(CommonFields.DOC_COUNT, docCount);
+ aggregations.toXContentInternal(builder, params);
+ builder.endObject();
+ }
+
+ private static String key(double from, double to, ValueFormatter formatter) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(Double.isInfinite(from) ? "*" : formatter != null ? formatter.format(from) : from);
+ sb.append("-");
+ sb.append(Double.isInfinite(to) ? "*" : formatter != null ? formatter.format(to) : to);
+ return sb.toString();
+ }
+
+ }
+
+ public static class Factory<B extends Bucket, R extends InternalRange<B>> {
+
+ public String type() {
+ return TYPE.name();
+ }
+
+ public R create(String name, List<B> ranges, ValueFormatter formatter, boolean keyed, boolean unmapped) {
+ return (R) new InternalRange<B>(name, ranges, formatter, keyed, unmapped);
+ }
+
+
+ public B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return (B) new Bucket(key, from, to, docCount, aggregations, formatter);
+ }
+ }
+
+ private List<B> ranges;
+ private Map<String, B> rangeMap;
+ private ValueFormatter formatter;
+ private boolean keyed;
+
+ private boolean unmapped;
+
+ public InternalRange() {} // for serialization
+
+ public InternalRange(String name, List<B> ranges, ValueFormatter formatter, boolean keyed, boolean unmapped) {
+ super(name);
+ this.ranges = ranges;
+ this.formatter = formatter;
+ this.keyed = keyed;
+ this.unmapped = unmapped;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public Collection<B> getBuckets() {
+ return ranges;
+ }
+
+ @Override
+ public B getBucketByKey(String key) {
+ if (rangeMap == null) {
+ rangeMap = new HashMap<String, B>(ranges.size());
+ for (Range.Bucket bucket : ranges) {
+ rangeMap.put(bucket.getKey(), (B) bucket);
+ }
+ }
+ return rangeMap.get(key);
+ }
+
+ @Override
+ public InternalAggregation reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ InternalRange<B> reduced = (InternalRange<B>) aggregations.get(0);
+ for (B bucket : reduced.ranges) {
+ bucket.aggregations.reduce(reduceContext.cacheRecycler());
+ }
+ return reduced;
+ }
+ List<List<Bucket>> rangesList = null;
+ for (InternalAggregation aggregation : aggregations) {
+ InternalRange<Bucket> ranges = (InternalRange) aggregation;
+ if (ranges.unmapped) {
+ continue;
+ }
+ if (rangesList == null) {
+ rangesList = new ArrayList<List<Bucket>>(ranges.ranges.size());
+ for (Bucket bucket : ranges.ranges) {
+ List<Bucket> sameRangeList = new ArrayList<Bucket>(aggregations.size());
+ sameRangeList.add(bucket);
+ rangesList.add(sameRangeList);
+ }
+ } else {
+ int i = 0;
+ for (Bucket range : ranges.ranges) {
+ rangesList.get(i++).add(range);
+ }
+ }
+ }
+
+ if (rangesList == null) {
+ // unmapped, we can just take the first one
+ return aggregations.get(0);
+ }
+
+ InternalRange reduced = (InternalRange) aggregations.get(0);
+ int i = 0;
+ for (List<Bucket> sameRangeList : rangesList) {
+ reduced.ranges.set(i++, (sameRangeList.get(0)).reduce(sameRangeList, reduceContext.cacheRecycler()));
+ }
+ return reduced;
+ }
+
+ protected B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return (B) new Bucket(key, from, to, docCount, aggregations, formatter);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ formatter = ValueFormatterStreams.readOptional(in);
+ keyed = in.readBoolean();
+ int size = in.readVInt();
+ List<B> ranges = Lists.newArrayListWithCapacity(size);
+ for (int i = 0; i < size; i++) {
+ String key = in.readOptionalString();
+ ranges.add(createBucket(key, in.readDouble(), in.readDouble(), in.readVLong(), InternalAggregations.readAggregations(in), formatter));
+ }
+ this.ranges = ranges;
+ this.rangeMap = null;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ ValueFormatterStreams.writeOptional(formatter, out);
+ out.writeBoolean(keyed);
+ out.writeVInt(ranges.size());
+ for (B bucket : ranges) {
+ out.writeOptionalString(((Bucket) bucket).key);
+ out.writeDouble(((Bucket) bucket).from);
+ out.writeDouble(((Bucket) bucket).to);
+ out.writeVLong(((Bucket) bucket).docCount);
+ bucket.aggregations.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ if (keyed) {
+ builder.startObject(CommonFields.BUCKETS);
+ } else {
+ builder.startArray(CommonFields.BUCKETS);
+ }
+ for (B range : ranges) {
+ range.toXContent(builder, params, formatter, keyed);
+ }
+ if (keyed) {
+ builder.endObject();
+ } else {
+ builder.endArray();
+ }
+ return builder.endObject();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/Range.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/Range.java
new file mode 100644
index 0000000..6e508a6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/Range.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range;
+
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
+
+import java.util.Collection;
+
+/**
+ * A {@code range} aggregation. Defines multiple buckets, each associated with a pre-defined value range of a field,
+ * and where the value of that fields in all documents in each bucket fall in the bucket's range.
+ */
+public interface Range extends MultiBucketsAggregation {
+
+ /**
+ * A bucket associated with a specific range
+ */
+ public static interface Bucket extends MultiBucketsAggregation.Bucket {
+
+ /**
+ * @return The lower bound of the range
+ */
+ Number getFrom();
+
+ /**
+ * @return The upper bound of the range (excluding)
+ */
+ Number getTo();
+ }
+
+ Collection<? extends Bucket> getBuckets();
+
+ @Override
+ Bucket getBucketByKey(String key);
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java
new file mode 100644
index 0000000..4ddbc58
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java
@@ -0,0 +1,312 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.InPlaceMergeSorter;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueParser;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public class RangeAggregator extends BucketsAggregator {
+
+ public static class Range {
+
+ public String key;
+ public double from = Double.NEGATIVE_INFINITY;
+ String fromAsStr;
+ public double to = Double.POSITIVE_INFINITY;
+ String toAsStr;
+
+ public Range(String key, double from, String fromAsStr, double to, String toAsStr) {
+ this.key = key;
+ this.from = from;
+ this.fromAsStr = fromAsStr;
+ this.to = to;
+ this.toAsStr = toAsStr;
+ }
+
+ boolean matches(double value) {
+ return value >= from && value < to;
+ }
+
+ @Override
+ public String toString() {
+ return "[" + from + " to " + to + ")";
+ }
+
+ public void process(ValueParser parser, AggregationContext aggregationContext) {
+ if (fromAsStr != null) {
+ from = parser != null ? parser.parseDouble(fromAsStr, aggregationContext.searchContext()) : Double.valueOf(fromAsStr);
+ }
+ if (toAsStr != null) {
+ to = parser != null ? parser.parseDouble(toAsStr, aggregationContext.searchContext()) : Double.valueOf(toAsStr);
+ }
+ }
+ }
+
+ private final NumericValuesSource valuesSource;
+ private final Range[] ranges;
+ private final boolean keyed;
+ private final InternalRange.Factory rangeFactory;
+
+ final double[] maxTo;
+
+ public RangeAggregator(String name,
+ AggregatorFactories factories,
+ NumericValuesSource valuesSource,
+ InternalRange.Factory rangeFactory,
+ List<Range> ranges,
+ boolean keyed,
+ AggregationContext aggregationContext,
+ Aggregator parent) {
+
+ super(name, BucketAggregationMode.MULTI_BUCKETS, factories, ranges.size() * (parent == null ? 1 : parent.estimatedBucketCount()), aggregationContext, parent);
+ assert valuesSource != null;
+ this.valuesSource = valuesSource;
+ this.keyed = keyed;
+ this.rangeFactory = rangeFactory;
+ this.ranges = ranges.toArray(new Range[ranges.size()]);
+ for (int i = 0; i < this.ranges.length; i++) {
+ this.ranges[i].process(valuesSource.parser(), context);
+ }
+ sortRanges(this.ranges);
+
+ maxTo = new double[this.ranges.length];
+ maxTo[0] = this.ranges[0].to;
+ for (int i = 1; i < this.ranges.length; ++i) {
+ maxTo[i] = Math.max(this.ranges[i].to,maxTo[i-1]);
+ }
+
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return true;
+ }
+
+ private final long subBucketOrdinal(long owningBucketOrdinal, int rangeOrd) {
+ return owningBucketOrdinal * ranges.length + rangeOrd;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ final DoubleValues values = valuesSource.doubleValues();
+ final int valuesCount = values.setDocument(doc);
+ for (int i = 0, lo = 0; i < valuesCount; ++i) {
+ final double value = values.nextValue();
+ lo = collect(doc, value, owningBucketOrdinal, lo);
+ }
+ }
+
+ private int collect(int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException {
+ int lo = lowBound, hi = ranges.length - 1; // all candidates are between these indexes
+ int mid = (lo + hi) >>> 1;
+ while (lo <= hi) {
+ if (value < ranges[mid].from) {
+ hi = mid - 1;
+ } else if (value >= maxTo[mid]) {
+ lo = mid + 1;
+ } else {
+ break;
+ }
+ mid = (lo + hi) >>> 1;
+ }
+ if (lo > hi) return lo; // no potential candidate
+
+ // binary search the lower bound
+ int startLo = lo, startHi = mid;
+ while (startLo <= startHi) {
+ final int startMid = (startLo + startHi) >>> 1;
+ if (value >= maxTo[startMid]) {
+ startLo = startMid + 1;
+ } else {
+ startHi = startMid - 1;
+ }
+ }
+
+ // binary search the upper bound
+ int endLo = mid, endHi = hi;
+ while (endLo <= endHi) {
+ final int endMid = (endLo + endHi) >>> 1;
+ if (value < ranges[endMid].from) {
+ endHi = endMid - 1;
+ } else {
+ endLo = endMid + 1;
+ }
+ }
+
+ assert startLo == lowBound || value >= maxTo[startLo - 1];
+ assert endHi == ranges.length - 1 || value < ranges[endHi + 1].from;
+
+ for (int i = startLo; i <= endHi; ++i) {
+ if (ranges[i].matches(value)) {
+ collectBucket(doc, subBucketOrdinal(owningBucketOrdinal, i));
+ }
+ }
+
+ return endHi + 1;
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ List<org.elasticsearch.search.aggregations.bucket.range.Range.Bucket> buckets = Lists.newArrayListWithCapacity(ranges.length);
+ for (int i = 0; i < ranges.length; i++) {
+ Range range = ranges[i];
+ final long bucketOrd = subBucketOrdinal(owningBucketOrdinal, i);
+ org.elasticsearch.search.aggregations.bucket.range.Range.Bucket bucket = rangeFactory.createBucket(
+ range.key, range.from, range.to, bucketDocCount(bucketOrd),bucketAggregations(bucketOrd), valuesSource.formatter());
+ buckets.add(bucket);
+ }
+ // value source can be null in the case of unmapped fields
+ ValueFormatter formatter = valuesSource != null ? valuesSource.formatter() : null;
+ return rangeFactory.create(name, buckets, formatter, keyed, false);
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ InternalAggregations subAggs = buildEmptySubAggregations();
+ List<org.elasticsearch.search.aggregations.bucket.range.Range.Bucket> buckets = Lists.newArrayListWithCapacity(ranges.length);
+ for (int i = 0; i < ranges.length; i++) {
+ Range range = ranges[i];
+ org.elasticsearch.search.aggregations.bucket.range.Range.Bucket bucket = rangeFactory.createBucket(
+ range.key, range.from, range.to, 0, subAggs, valuesSource.formatter());
+ buckets.add(bucket);
+ }
+ // value source can be null in the case of unmapped fields
+ ValueFormatter formatter = valuesSource != null ? valuesSource.formatter() : null;
+ return rangeFactory.create(name, buckets, formatter, keyed, false);
+ }
+
+ private static final void sortRanges(final Range[] ranges) {
+ new InPlaceMergeSorter() {
+
+ @Override
+ protected void swap(int i, int j) {
+ final Range tmp = ranges[i];
+ ranges[i] = ranges[j];
+ ranges[j] = tmp;
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ int cmp = Double.compare(ranges[i].from, ranges[j].from);
+ if (cmp == 0) {
+ cmp = Double.compare(ranges[i].to, ranges[j].to);
+ }
+ return cmp;
+ }
+ }.sort(0, ranges.length);
+ }
+
+ public static class Unmapped extends Aggregator {
+
+ private final List<RangeAggregator.Range> ranges;
+ private final boolean keyed;
+ private final InternalRange.Factory factory;
+ private final ValueFormatter formatter;
+ private final ValueParser parser;
+
+ public Unmapped(String name,
+ List<RangeAggregator.Range> ranges,
+ boolean keyed,
+ ValueFormatter formatter,
+ ValueParser parser,
+ AggregationContext aggregationContext,
+ Aggregator parent,
+ InternalRange.Factory factory) {
+
+ super(name, BucketAggregationMode.MULTI_BUCKETS, AggregatorFactories.EMPTY, 0, aggregationContext, parent);
+ this.ranges = ranges;
+ for (Range range : this.ranges) {
+ range.process(parser, context);
+ }
+ this.keyed = keyed;
+ this.formatter = formatter;
+ this.parser = parser;
+ this.factory = factory;
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return false;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ }
+
+ @Override
+ public InternalRange buildAggregation(long owningBucketOrdinal) {
+ return buildEmptyAggregation();
+ }
+
+ @Override
+ public InternalRange buildEmptyAggregation() {
+ InternalAggregations subAggs = buildEmptySubAggregations();
+ List<org.elasticsearch.search.aggregations.bucket.range.Range.Bucket> buckets =
+ new ArrayList<org.elasticsearch.search.aggregations.bucket.range.Range.Bucket>(ranges.size());
+ for (RangeAggregator.Range range : ranges) {
+ buckets.add(factory.createBucket(range.key, range.from, range.to, 0, subAggs, formatter));
+ }
+ return factory.create(name, buckets, formatter, keyed, true);
+ }
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory<NumericValuesSource> {
+
+ private final InternalRange.Factory rangeFactory;
+ private final List<Range> ranges;
+ private final boolean keyed;
+
+ public Factory(String name, ValuesSourceConfig<NumericValuesSource> valueSourceConfig, InternalRange.Factory rangeFactory, List<Range> ranges, boolean keyed) {
+ super(name, rangeFactory.type(), valueSourceConfig);
+ this.rangeFactory = rangeFactory;
+ this.ranges = ranges;
+ this.keyed = keyed;
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new Unmapped(name, ranges, keyed, valuesSourceConfig.formatter(), valuesSourceConfig.parser(), aggregationContext, parent, rangeFactory);
+ }
+
+ @Override
+ protected Aggregator create(NumericValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ return new RangeAggregator(name, factories, valuesSource, rangeFactory, ranges, keyed, aggregationContext, parent);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeBuilder.java
new file mode 100644
index 0000000..0d7599b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeBuilder.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range;
+
+/**
+ *
+ */
+public class RangeBuilder extends AbstractRangeBuilder<RangeBuilder> {
+
+ public RangeBuilder(String name) {
+ super(name, InternalRange.TYPE.name());
+ }
+
+
+ public RangeBuilder addRange(String key, double from, double to) {
+ ranges.add(new Range(key, from, to));
+ return this;
+ }
+
+ public RangeBuilder addRange(double from, double to) {
+ return addRange(null, from, to);
+ }
+
+ public RangeBuilder addUnboundedTo(String key, double to) {
+ ranges.add(new Range(key, null, to));
+ return this;
+ }
+
+ public RangeBuilder addUnboundedTo(double to) {
+ return addUnboundedTo(null, to);
+ }
+
+ public RangeBuilder addUnboundedFrom(String key, double from) {
+ ranges.add(new Range(key, from, null));
+ return this;
+ }
+
+ public RangeBuilder addUnboundedFrom(double from) {
+ return addUnboundedFrom(null, from);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java
new file mode 100644
index 0000000..6745fd4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.support.FieldContext;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class RangeParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalRange.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
+
+ String field = null;
+ List<RangeAggregator.Range> ranges = null;
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> scriptParams = null;
+ boolean keyed = false;
+ boolean assumeSorted = false;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("ranges".equals(currentFieldName)) {
+ ranges = new ArrayList<RangeAggregator.Range>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ double from = Double.NEGATIVE_INFINITY;
+ String fromAsStr = null;
+ double to = Double.POSITIVE_INFINITY;
+ String toAsStr = null;
+ String key = null;
+ String toOrFromOrKey = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ toOrFromOrKey = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("from".equals(toOrFromOrKey)) {
+ from = parser.doubleValue();
+ } else if ("to".equals(toOrFromOrKey)) {
+ to = parser.doubleValue();
+ }
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("from".equals(toOrFromOrKey)) {
+ fromAsStr = parser.text();
+ } else if ("to".equals(toOrFromOrKey)) {
+ toAsStr = parser.text();
+ } else if ("key".equals(toOrFromOrKey)) {
+ key = parser.text();
+ }
+ }
+ }
+ ranges.add(new RangeAggregator.Range(key, from, fromAsStr, to, toAsStr));
+ }
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ scriptParams = parser.map();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ if ("keyed".equals(currentFieldName)) {
+ keyed = parser.booleanValue();
+ } else if ("script_values_sorted".equals(currentFieldName) || "scriptValuesSorted".equals(currentFieldName)) {
+ assumeSorted = parser.booleanValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ if (ranges == null) {
+ throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]");
+ }
+
+ if (script != null) {
+ config.script(context.scriptService().search(context.lookup(), scriptLang, script, scriptParams));
+ }
+
+ if (!assumeSorted) {
+ // we need values to be sorted and unique for efficiency
+ config.ensureSorted(true);
+ }
+
+ if (field == null) {
+ return new RangeAggregator.Factory(aggregationName, config, InternalRange.FACTORY, ranges, keyed);
+ }
+
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return new RangeAggregator.Factory(aggregationName, config, InternalRange.FACTORY, ranges, keyed);
+ }
+
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+ config.fieldContext(new FieldContext(field, indexFieldData));
+ return new RangeAggregator.Factory(aggregationName, config, InternalRange.FACTORY, ranges, keyed);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRange.java
new file mode 100644
index 0000000..c479daf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRange.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.date;
+
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.joda.time.DateTime;
+
+import java.util.Collection;
+
+/**
+ *
+ */
+public interface DateRange extends Range {
+
+ static interface Bucket extends Range.Bucket {
+
+ DateTime getFromAsDate();
+
+ DateTime getToAsDate();
+ }
+
+ @Override
+ Collection<? extends DateRange.Bucket> getBuckets();
+
+ @Override
+ DateRange.Bucket getBucketByKey(String key);
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeBuilder.java
new file mode 100644
index 0000000..c3f2d4a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeBuilder.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.date;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.bucket.range.AbstractRangeBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class DateRangeBuilder extends AbstractRangeBuilder<DateRangeBuilder> {
+
+ private String format;
+
+ public DateRangeBuilder(String name) {
+ super(name, InternalDateRange.TYPE.name());
+ }
+
+ public DateRangeBuilder addRange(String key, Object from, Object to) {
+ ranges.add(new Range(key, from, to));
+ return this;
+ }
+
+ public DateRangeBuilder addRange(Object from, Object to) {
+ return addRange(null, from, to);
+ }
+
+ public DateRangeBuilder addUnboundedTo(String key, Object to) {
+ ranges.add(new Range(key, null, to));
+ return this;
+ }
+
+ public DateRangeBuilder addUnboundedTo(Object to) {
+ return addUnboundedTo(null, to);
+ }
+
+ public DateRangeBuilder addUnboundedFrom(String key, Object from) {
+ ranges.add(new Range(key, from, null));
+ return this;
+ }
+
+ public DateRangeBuilder addUnboundedFrom(Object from) {
+ return addUnboundedFrom(null, from);
+ }
+
+ public DateRangeBuilder format(String format) {
+ this.format = format;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder doInternalXContent(XContentBuilder builder, Params params) throws IOException {
+ super.doInternalXContent(builder, params);
+ if (format != null) {
+ builder.field("format", format);
+ }
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java
new file mode 100644
index 0000000..a58043a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.date;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator;
+import org.elasticsearch.search.aggregations.support.FieldContext;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class DateRangeParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalDateRange.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
+
+ String field = null;
+ List<RangeAggregator.Range> ranges = null;
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> scriptParams = null;
+ boolean keyed = false;
+ String format = null;
+ boolean assumeSorted = false;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("format".equals(currentFieldName)) {
+ format = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("ranges".equals(currentFieldName)) {
+ ranges = new ArrayList<RangeAggregator.Range>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ double from = Double.NEGATIVE_INFINITY;
+ String fromAsStr = null;
+ double to = Double.POSITIVE_INFINITY;
+ String toAsStr = null;
+ String key = null;
+ String toOrFromOrKey = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ toOrFromOrKey = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("from".equals(toOrFromOrKey)) {
+ from = parser.doubleValue();
+ } else if ("to".equals(toOrFromOrKey)) {
+ to = parser.doubleValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("from".equals(toOrFromOrKey)) {
+ fromAsStr = parser.text();
+ } else if ("to".equals(toOrFromOrKey)) {
+ toAsStr = parser.text();
+ } else if ("key".equals(toOrFromOrKey)) {
+ key = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ }
+ }
+ ranges.add(new RangeAggregator.Range(key, from, fromAsStr, to, toAsStr));
+ }
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ scriptParams = parser.map();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ if ("keyed".equals(currentFieldName)) {
+ keyed = parser.booleanValue();
+ } else if ("script_values_sorted".equals(currentFieldName) || "scriptValuesSorted".equals(currentFieldName)) {
+ assumeSorted = parser.booleanValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ if (ranges == null) {
+ throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]");
+ }
+
+ if (script != null) {
+ config.script(context.scriptService().search(context.lookup(), scriptLang, script, scriptParams));
+ }
+
+ if (!assumeSorted) {
+ // we need values to be sorted and unique for efficiency
+ config.ensureSorted(true);
+ }
+
+ if (format != null) {
+ config.formatter(new ValueFormatter.DateTime(format));
+ } else {
+ config.formatter(ValueFormatter.DateTime.DEFAULT);
+ }
+
+ config.parser(ValueParser.DateMath.DEFAULT);
+
+ if (field == null) {
+ return new RangeAggregator.Factory(aggregationName, config, InternalDateRange.FACTORY, ranges, keyed);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return new RangeAggregator.Factory(aggregationName, config, InternalDateRange.FACTORY, ranges, keyed);
+ }
+
+ if (!(mapper instanceof DateFieldMapper)) {
+ throw new AggregationExecutionException("date_range aggregation can only be applied to date fields which is not the case with field [" + field + "]");
+ }
+
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+ config.fieldContext(new FieldContext(field, indexFieldData));
+ if (format == null) {
+ config.formatter(new ValueFormatter.DateTime(((DateFieldMapper) mapper).dateTimeFormatter()));
+ }
+ config.parser(new ValueParser.DateMath(((DateFieldMapper) mapper).dateMathParser()));
+ return new RangeAggregator.Factory(aggregationName, config, InternalDateRange.FACTORY, ranges, keyed);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java
new file mode 100644
index 0000000..fbed40b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.date;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.range.InternalRange;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalDateRange extends InternalRange<InternalDateRange.Bucket> implements DateRange {
+
+ public final static Type TYPE = new Type("date_range", "drange");
+
+ private final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalDateRange readResult(StreamInput in) throws IOException {
+ InternalDateRange ranges = new InternalDateRange();
+ ranges.readFrom(in);
+ return ranges;
+ }
+ };
+
+ public static void registerStream() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ public static final Factory FACTORY = new Factory();
+
+ public static class Bucket extends InternalRange.Bucket implements DateRange.Bucket {
+
+ public Bucket(String key, double from, double to, long docCount, List<InternalAggregation> aggregations, ValueFormatter formatter) {
+ super(key, from, to, docCount, new InternalAggregations(aggregations), formatter);
+ }
+
+ public Bucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ super(key, from, to, docCount, aggregations, formatter);
+ }
+
+ @Override
+ public DateTime getFromAsDate() {
+ return Double.isInfinite(getFrom().doubleValue()) ? null : new DateTime(getFrom().longValue(), DateTimeZone.UTC);
+ }
+
+ @Override
+ public DateTime getToAsDate() {
+ return Double.isInfinite(getTo().doubleValue()) ? null : new DateTime(getTo().longValue(), DateTimeZone.UTC);
+ }
+ }
+
+ private static class Factory extends InternalRange.Factory<InternalDateRange.Bucket, InternalDateRange> {
+
+ @Override
+ public String type() {
+ return TYPE.name();
+ }
+
+ @Override
+ public InternalDateRange create(String name, List<InternalDateRange.Bucket> ranges, ValueFormatter formatter, boolean keyed, boolean unmapped) {
+ return new InternalDateRange(name, ranges, formatter, keyed, unmapped);
+ }
+
+ @Override
+ public Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return new Bucket(key, from, to, docCount, aggregations, formatter);
+ }
+ }
+
+ InternalDateRange() {} // for serialization
+
+ InternalDateRange(String name, List<InternalDateRange.Bucket> ranges, ValueFormatter formatter, boolean keyed, boolean unmapped) {
+ super(name, ranges, formatter, keyed, unmapped);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ protected Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return new Bucket(key, from, to, docCount, aggregations, formatter);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistance.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistance.java
new file mode 100644
index 0000000..a8e41d2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistance.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.geodistance;
+
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+
+import java.util.Collection;
+
+/**
+ *
+ */
+public interface GeoDistance extends Range {
+
+ public static interface Bucket extends Range.Bucket {
+ }
+
+ @Override
+ Collection<? extends GeoDistance.Bucket> getBuckets();
+
+ @Override
+ GeoDistance.Bucket getBucketByKey(String key);
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceBuilder.java
new file mode 100644
index 0000000..7bb41f7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceBuilder.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.range.geodistance;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ *
+ */
+public class GeoDistanceBuilder extends AggregationBuilder<GeoDistanceBuilder> {
+
+ public static class Range implements ToXContent {
+
+ private String key;
+ private Double from;
+ private Double to;
+
+ public Range(String key, Double from, Double to) {
+ this.key = key;
+ this.from = from;
+ this.to = to;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (from != null) {
+ builder.field("from", from.doubleValue());
+ }
+ if (to != null) {
+ builder.field("to", to.doubleValue());
+ }
+ if (key != null) {
+ builder.field("key", key);
+ }
+ return builder.endObject();
+ }
+
+ }
+
+ private String field;
+ private DistanceUnit unit;
+ private GeoDistance distanceType;
+ private GeoPoint point;
+
+ private List<Range> ranges = Lists.newArrayList();
+
+ public GeoDistanceBuilder(String name) {
+ super(name, InternalGeoDistance.TYPE.name());
+ }
+
+ public GeoDistanceBuilder field(String field) {
+ this.field = field;
+ return this;
+ }
+
+ public GeoDistanceBuilder unit(DistanceUnit unit) {
+ this.unit = unit;
+ return this;
+ }
+
+ public GeoDistanceBuilder distanceType(GeoDistance distanceType) {
+ this.distanceType = distanceType;
+ return this;
+ }
+
+ public GeoDistanceBuilder point(String latLon) {
+ return point(GeoPoint.parseFromLatLon(latLon));
+ }
+
+ public GeoDistanceBuilder point(GeoPoint point) {
+ this.point = point;
+ return this;
+ }
+
+ public GeoDistanceBuilder geohash(String geohash) {
+ if (this.point == null) {
+ this.point = new GeoPoint();
+ }
+ this.point.resetFromGeoHash(geohash);
+ return this;
+ }
+
+ public GeoDistanceBuilder lat(double lat) {
+ if (this.point == null) {
+ point = new GeoPoint();
+ }
+ point.resetLat(lat);
+ return this;
+ }
+
+ public GeoDistanceBuilder lon(double lon) {
+ if (this.point == null) {
+ point = new GeoPoint();
+ }
+ point.resetLon(lon);
+ return this;
+ }
+
+ public GeoDistanceBuilder addRange(String key, double from, double to) {
+ ranges.add(new Range(key, from, to));
+ return this;
+ }
+
+ public GeoDistanceBuilder addRange(double from, double to) {
+ return addRange(null, from, to);
+ }
+
+ public GeoDistanceBuilder addUnboundedTo(String key, double to) {
+ ranges.add(new Range(key, null, to));
+ return this;
+ }
+
+ public GeoDistanceBuilder addUnboundedTo(double to) {
+ return addUnboundedTo(null, to);
+ }
+
+ public GeoDistanceBuilder addUnboundedFrom(String key, double from) {
+ ranges.add(new Range(key, from, null));
+ return this;
+ }
+
+ public GeoDistanceBuilder addUnboundedFrom(double from) {
+ return addUnboundedFrom(null, from);
+ }
+
+ @Override
+ protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (ranges.isEmpty()) {
+ throw new SearchSourceBuilderException("at least one range must be defined for geo_distance aggregation [" + name + "]");
+ }
+ if (point == null) {
+ throw new SearchSourceBuilderException("center point must be defined for geo_distance aggregation [" + name + "]");
+ }
+
+ if (field != null) {
+ builder.field("field", field);
+ }
+
+ if (unit != null) {
+ builder.field("unit", unit);
+ }
+
+ if (distanceType != null) {
+ builder.field("distance_type", distanceType.name().toLowerCase(Locale.ROOT));
+ }
+
+ builder.startObject("center")
+ .field("lat", point.lat())
+ .field("lon", point.lon())
+ .endObject();
+
+ builder.startArray("ranges");
+ for (Range range : ranges) {
+ range.toXContent(builder, params);
+ }
+ builder.endArray();
+
+ return builder.endObject();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java
new file mode 100644
index 0000000..7c4575e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.geodistance;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.bucket.range.InternalRange;
+import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator;
+import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Unmapped;
+import org.elasticsearch.search.aggregations.support.*;
+import org.elasticsearch.search.aggregations.support.geopoints.GeoPointValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public class GeoDistanceParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalGeoDistance.TYPE.name();
+ }
+
+ private static String key(String key, double from, double to) {
+ if (key != null) {
+ return key;
+ }
+ StringBuilder sb = new StringBuilder();
+ sb.append(from == 0 ? "*" : from);
+ sb.append("-");
+ sb.append(Double.isInfinite(to) ? "*" : to);
+ return sb.toString();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ String field = null;
+ List<RangeAggregator.Range> ranges = null;
+ GeoPoint origin = null;
+ DistanceUnit unit = DistanceUnit.DEFAULT;
+ GeoDistance distanceType = GeoDistance.DEFAULT;
+ boolean keyed = false;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("unit".equals(currentFieldName)) {
+ unit = DistanceUnit.fromString(parser.text());
+ } else if ("distance_type".equals(currentFieldName) || "distanceType".equals(currentFieldName)) {
+ distanceType = GeoDistance.fromString(parser.text());
+ } else if ("point".equals(currentFieldName) || "origin".equals(currentFieldName) || "center".equals(currentFieldName)) {
+ origin = new GeoPoint();
+ origin.resetFromString(parser.text());
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ if ("keyed".equals(currentFieldName)) {
+ keyed = parser.booleanValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("ranges".equals(currentFieldName)) {
+ ranges = new ArrayList<RangeAggregator.Range>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ String fromAsStr = null;
+ String toAsStr = null;
+ double from = 0.0;
+ double to = Double.POSITIVE_INFINITY;
+ String key = null;
+ String toOrFromOrKey = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ toOrFromOrKey = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("from".equals(toOrFromOrKey)) {
+ from = parser.doubleValue();
+ } else if ("to".equals(toOrFromOrKey)) {
+ to = parser.doubleValue();
+ }
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("key".equals(toOrFromOrKey)) {
+ key = parser.text();
+ } else if ("from".equals(toOrFromOrKey)) {
+ fromAsStr = parser.text();
+ } else if ("to".equals(toOrFromOrKey)) {
+ toAsStr = parser.text();
+ }
+ }
+ }
+ ranges.add(new RangeAggregator.Range(key(key, from, to), from, fromAsStr, to, toAsStr));
+ }
+ } else if ("point".equals(currentFieldName) || "origin".equals(currentFieldName) || "center".equals(currentFieldName)) {
+ double lat = Double.NaN;
+ double lon = Double.NaN;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (Double.isNaN(lon)) {
+ lon = parser.doubleValue();
+ } else if (Double.isNaN(lat)) {
+ lat = parser.doubleValue();
+ } else {
+ throw new SearchParseException(context, "malformed [origin] geo point array in geo_distance aggregator [" + aggregationName + "]. " +
+ "a geo point array must be of the form [lon, lat]");
+ }
+ }
+ origin = new GeoPoint(lat, lon);
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("point".equals(currentFieldName) || "origin".equals(currentFieldName) || "center".equals(currentFieldName)) {
+ double lat = Double.NaN;
+ double lon = Double.NaN;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("lat".equals(currentFieldName)) {
+ lat = parser.doubleValue();
+ } else if ("lon".equals(currentFieldName)) {
+ lon = parser.doubleValue();
+ }
+ }
+ }
+ if (Double.isNaN(lat) || Double.isNaN(lon)) {
+ throw new SearchParseException(context, "malformed [origin] geo point object. either [lat] or [lon] (or both) are " +
+ "missing in geo_distance aggregator [" + aggregationName + "]");
+ }
+ origin = new GeoPoint(lat, lon);
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ if (ranges == null) {
+ throw new SearchParseException(context, "Missing [ranges] in geo_distance aggregator [" + aggregationName + "]");
+ }
+
+ if (origin == null) {
+ throw new SearchParseException(context, "Missing [origin] in geo_distance aggregator [" + aggregationName + "]");
+ }
+
+ ValuesSourceConfig<GeoPointValuesSource> config = new ValuesSourceConfig<GeoPointValuesSource>(GeoPointValuesSource.class);
+
+ if (field == null) {
+ return new GeoDistanceFactory(aggregationName, config, InternalGeoDistance.FACTORY, origin, unit, distanceType, ranges, keyed);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return new GeoDistanceFactory(aggregationName, config, InternalGeoDistance.FACTORY, origin, unit, distanceType, ranges, keyed);
+ }
+
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+ config.fieldContext(new FieldContext(field, indexFieldData));
+ return new GeoDistanceFactory(aggregationName, config, InternalGeoDistance.FACTORY, origin, unit, distanceType, ranges, keyed);
+ }
+
+ private static class GeoDistanceFactory extends ValueSourceAggregatorFactory<GeoPointValuesSource> {
+
+ private final GeoPoint origin;
+ private final DistanceUnit unit;
+ private final GeoDistance distanceType;
+ private final InternalRange.Factory rangeFactory;
+ private final List<RangeAggregator.Range> ranges;
+ private final boolean keyed;
+
+ public GeoDistanceFactory(String name, ValuesSourceConfig<GeoPointValuesSource> valueSourceConfig,
+ InternalRange.Factory rangeFactory, GeoPoint origin, DistanceUnit unit, GeoDistance distanceType,
+ List<RangeAggregator.Range> ranges, boolean keyed) {
+ super(name, rangeFactory.type(), valueSourceConfig);
+ this.origin = origin;
+ this.unit = unit;
+ this.distanceType = distanceType;
+ this.rangeFactory = rangeFactory;
+ this.ranges = ranges;
+ this.keyed = keyed;
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new Unmapped(name, ranges, keyed, valuesSourceConfig.formatter(), valuesSourceConfig.parser(), aggregationContext, parent, rangeFactory);
+ }
+
+ @Override
+ protected Aggregator create(final GeoPointValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ final DistanceValues distanceValues = new DistanceValues(valuesSource, distanceType, origin, unit);
+ FieldDataSource.Numeric distanceSource = new DistanceSource(distanceValues, valuesSource.metaData());
+ if (distanceSource.metaData().multiValued()) {
+ // we need to ensure uniqueness
+ distanceSource = new FieldDataSource.Numeric.SortedAndUnique(distanceSource);
+ }
+ final NumericValuesSource numericSource = new NumericValuesSource(distanceSource, null, null);
+ return new RangeAggregator(name, factories, numericSource, rangeFactory, ranges, keyed, aggregationContext, parent);
+ }
+
+ private static class DistanceValues extends DoubleValues {
+
+ private final GeoPointValuesSource geoPointValues;
+ private GeoPointValues geoValues;
+ private final GeoDistance distanceType;
+ private final GeoPoint origin;
+ private final DistanceUnit unit;
+
+ protected DistanceValues(GeoPointValuesSource geoPointValues, GeoDistance distanceType, GeoPoint origin, DistanceUnit unit) {
+ super(true);
+ this.geoPointValues = geoPointValues;
+ this.distanceType = distanceType;
+ this.origin = origin;
+ this.unit = unit;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ geoValues = geoPointValues.values();
+ return geoValues.setDocument(docId);
+ }
+
+ @Override
+ public double nextValue() {
+ final GeoPoint target = geoValues.nextValue();
+ return distanceType.calculate(origin.getLat(), origin.getLon(), target.getLat(), target.getLon(), unit);
+ }
+
+ }
+
+ private static class DistanceSource extends FieldDataSource.Numeric {
+
+ private final DoubleValues values;
+ private final MetaData metaData;
+
+ public DistanceSource(DoubleValues values, MetaData metaData) {
+ this.values = values;
+ // even if the geo points are unique, there's no guarantee the distances are
+ this.metaData = MetaData.builder(metaData).uniqueness(MetaData.Uniqueness.UNKNOWN).build();
+ }
+
+ @Override
+ public MetaData metaData() {
+ return metaData;
+ }
+
+ @Override
+ public boolean isFloatingPoint() {
+ return true;
+ }
+
+ @Override
+ public LongValues longValues() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public DoubleValues doubleValues() {
+ return values;
+ }
+
+ @Override
+ public BytesValues bytesValues() {
+ throw new UnsupportedOperationException();
+ }
+
+ }
+
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java
new file mode 100644
index 0000000..0493993
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.geodistance;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.range.InternalRange;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalGeoDistance extends InternalRange<InternalGeoDistance.Bucket> implements GeoDistance {
+
+ public static final Type TYPE = new Type("geo_distance", "gdist");
+
+ public static final AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalGeoDistance readResult(StreamInput in) throws IOException {
+ InternalGeoDistance geoDistance = new InternalGeoDistance();
+ geoDistance.readFrom(in);
+ return geoDistance;
+ }
+ };
+
+ public static void registerStream() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ public static final Factory FACTORY = new Factory();
+
+ static class Bucket extends InternalRange.Bucket implements GeoDistance.Bucket {
+
+ Bucket(String key, double from, double to, long docCount, List<InternalAggregation> aggregations, ValueFormatter formatter) {
+ this(key, from, to, docCount, new InternalAggregations(aggregations), formatter);
+ }
+
+ Bucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ super(key, from, to, docCount, aggregations, formatter);
+ }
+
+ }
+
+ private static class Factory extends InternalRange.Factory<InternalGeoDistance.Bucket, InternalGeoDistance> {
+
+ @Override
+ public String type() {
+ return TYPE.name();
+ }
+
+ @Override
+ public InternalGeoDistance create(String name, List<Bucket> ranges, ValueFormatter formatter, boolean keyed, boolean unmapped) {
+ return new InternalGeoDistance(name, ranges, formatter, keyed, unmapped);
+ }
+
+ @Override
+ public Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return new Bucket(key, from, to, docCount, aggregations, formatter);
+ }
+ }
+
+ InternalGeoDistance() {} // for serialization
+
+ public InternalGeoDistance(String name, List<Bucket> ranges, ValueFormatter formatter, boolean keyed, boolean unmapped) {
+ super(name, ranges, formatter, keyed, unmapped);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ protected Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return new Bucket(key, from, to, docCount, aggregations, formatter);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IPv4Range.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IPv4Range.java
new file mode 100644
index 0000000..9ca5793
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IPv4Range.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.ipv4;
+
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+
+import java.util.Collection;
+
+/**
+ *
+ */
+public interface IPv4Range extends Range {
+
+ static interface Bucket extends Range.Bucket {
+
+ String getFromAsString();
+
+ String getToAsString();
+
+ }
+
+ @Override
+ Collection<? extends IPv4Range.Bucket> getBuckets();
+
+ @Override
+ IPv4Range.Bucket getBucketByKey(String key);
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IPv4RangeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IPv4RangeBuilder.java
new file mode 100644
index 0000000..a1e424f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IPv4RangeBuilder.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.range.ipv4;
+
+import org.elasticsearch.search.aggregations.bucket.range.AbstractRangeBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class IPv4RangeBuilder extends AbstractRangeBuilder<IPv4RangeBuilder> {
+
+ private static final Pattern MASK_PATTERN = Pattern.compile("[\\.|/]");
+
+ public IPv4RangeBuilder(String name) {
+ super(name, InternalIPv4Range.TYPE.name());
+ }
+
+ public IPv4RangeBuilder addRange(String key, String from, String to) {
+ ranges.add(new Range(key, from, to));
+ return this;
+ }
+
+ public IPv4RangeBuilder addMaskRange(String mask) {
+ return addMaskRange(mask, mask);
+ }
+
+ public IPv4RangeBuilder addMaskRange(String key, String mask) {
+ long[] fromTo = cidrMaskToMinMax(mask);
+ if (fromTo == null) {
+ throw new SearchSourceBuilderException("invalid CIDR mask [" + mask + "] in ip_range aggregation [" + name + "]");
+ }
+ ranges.add(new Range(key, fromTo[0] < 0 ? null : fromTo[0], fromTo[1] < 0 ? null : fromTo[1]));
+ return this;
+ }
+
+ public IPv4RangeBuilder addRange(String from, String to) {
+ return addRange(null, from, to);
+ }
+
+ public IPv4RangeBuilder addUnboundedTo(String key, String to) {
+ ranges.add(new Range(key, null, to));
+ return this;
+ }
+
+ public IPv4RangeBuilder addUnboundedTo(String to) {
+ return addUnboundedTo(null, to);
+ }
+
+ public IPv4RangeBuilder addUnboundedFrom(String key, String from) {
+ ranges.add(new Range(key, from, null));
+ return this;
+ }
+
+ public IPv4RangeBuilder addUnboundedFrom(String from) {
+ return addUnboundedFrom(null, from);
+ }
+
+ /**
+ * Computes the min & max ip addresses (represented as long values - same way as stored in index) represented by the given CIDR mask
+ * expression. The returned array has the length of 2, where the first entry represents the {@code min} address and the second the {@code max}.
+ * A {@code -1} value for either the {@code min} or the {@code max}, represents an unbounded end. In other words:
+ *
+ * <p>
+ * {@code min == -1 == "0.0.0.0" }
+ * </p>
+ *
+ * and
+ *
+ * <p>
+ * {@code max == -1 == "255.255.255.255" }
+ * </p>
+ *
+ * @param cidr
+ * @return
+ */
+ static long[] cidrMaskToMinMax(String cidr) {
+ String[] parts = MASK_PATTERN.split(cidr);
+ if (parts.length != 5) {
+ return null;
+ }
+ int addr = (( Integer.parseInt(parts[0]) << 24 ) & 0xFF000000)
+ | (( Integer.parseInt(parts[1]) << 16 ) & 0xFF0000)
+ | (( Integer.parseInt(parts[2]) << 8 ) & 0xFF00)
+ | ( Integer.parseInt(parts[3]) & 0xFF);
+
+ int mask = (-1) << (32 - Integer.parseInt(parts[4]));
+
+ int from = addr & mask;
+ long longFrom = intIpToLongIp(from);
+ if (longFrom == 0) {
+ longFrom = -1;
+ }
+
+ int to = from + (~mask);
+ long longTo = intIpToLongIp(to) + 1; // we have to +1 here as the range is non-inclusive on the "to" side
+ if (longTo == InternalIPv4Range.MAX_IP) {
+ longTo = -1;
+ }
+
+ return new long[] { longFrom, longTo };
+ }
+
+ public static long intIpToLongIp(int i) {
+ long p1 = ((long) ((i >> 24 ) & 0xFF)) << 24;
+ int p2 = ((i >> 16 ) & 0xFF) << 16;
+ int p3 = ((i >> 8 ) & 0xFF) << 8;
+ int p4 = i & 0xFF;
+ return p1 + p2 + p3 + p4;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java
new file mode 100644
index 0000000..c881aef
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.ipv4;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.range.InternalRange;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalIPv4Range extends InternalRange<InternalIPv4Range.Bucket> implements IPv4Range {
+
+ public static final long MAX_IP = 4294967296l;
+
+ public final static Type TYPE = new Type("ip_range", "iprange");
+
+ private final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalIPv4Range readResult(StreamInput in) throws IOException {
+ InternalIPv4Range range = new InternalIPv4Range();
+ range.readFrom(in);
+ return range;
+ }
+ };
+
+ public static void registerStream() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ public static final Factory FACTORY = new Factory();
+
+ public static class Bucket extends InternalRange.Bucket implements IPv4Range.Bucket {
+
+ public Bucket(String key, double from, double to, long docCount, List<InternalAggregation> aggregations, ValueFormatter formatter) {
+ super(key, from, to, docCount, new InternalAggregations(aggregations), formatter);
+ }
+
+ public Bucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ super(key, from, to, docCount, aggregations, formatter);
+ }
+
+ @Override
+ public String getFromAsString() {
+ double from = getFrom().doubleValue();
+ return Double.isInfinite(from) ? null : from == 0 ? null : ValueFormatter.IPv4.format(from);
+ }
+
+ @Override
+ public String getToAsString() {
+ double to = getTo().doubleValue();
+ return Double.isInfinite(to) ? null : MAX_IP == to ? null : ValueFormatter.IPv4.format(to);
+ }
+ }
+
+ private static class Factory extends InternalRange.Factory<InternalIPv4Range.Bucket, InternalIPv4Range> {
+
+ @Override
+ public String type() {
+ return TYPE.name();
+ }
+
+ @Override
+ public InternalIPv4Range create(String name, List<Bucket> ranges, ValueFormatter formatter, boolean keyed, boolean unmapped) {
+ return new InternalIPv4Range(name, ranges, keyed, unmapped);
+ }
+
+ @Override
+ public Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return new Bucket(key, from, to, docCount, aggregations, formatter);
+ }
+ }
+
+ public InternalIPv4Range() {} // for serialization
+
+ public InternalIPv4Range(String name, List<InternalIPv4Range.Bucket> ranges, boolean keyed, boolean unmapped) {
+ super(name, ranges, ValueFormatter.IPv4, keyed, unmapped);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ protected Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
+ return new Bucket(key, from, to, docCount, aggregations, formatter);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java
new file mode 100644
index 0000000..f7fb1b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.range.ipv4;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.ip.IpFieldMapper;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator;
+import org.elasticsearch.search.aggregations.support.FieldContext;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class IpRangeParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalIPv4Range.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
+
+ String field = null;
+ List<RangeAggregator.Range> ranges = null;
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> scriptParams = null;
+ boolean keyed = false;
+ boolean assumeSorted = false;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("ranges".equals(currentFieldName)) {
+ ranges = new ArrayList<RangeAggregator.Range>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ double from = Double.NEGATIVE_INFINITY;
+ String fromAsStr = null;
+ double to = Double.POSITIVE_INFINITY;
+ String toAsStr = null;
+ String key = null;
+ String mask = null;
+ String toOrFromOrMaskOrKey = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ toOrFromOrMaskOrKey = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("from".equals(toOrFromOrMaskOrKey)) {
+ from = parser.doubleValue();
+ } else if ("to".equals(toOrFromOrMaskOrKey)) {
+ to = parser.doubleValue();
+ }
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("from".equals(toOrFromOrMaskOrKey)) {
+ fromAsStr = parser.text();
+ } else if ("to".equals(toOrFromOrMaskOrKey)) {
+ toAsStr = parser.text();
+ } else if ("key".equals(toOrFromOrMaskOrKey)) {
+ key = parser.text();
+ } else if ("mask".equals(toOrFromOrMaskOrKey)) {
+ mask = parser.text();
+ }
+ }
+ }
+ RangeAggregator.Range range = new RangeAggregator.Range(key, from, fromAsStr, to, toAsStr);
+ if (mask != null) {
+ parseMaskRange(mask, range, aggregationName, context);
+ }
+ ranges.add(range);
+ }
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ scriptParams = parser.map();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ if ("keyed".equals(currentFieldName)) {
+ keyed = parser.booleanValue();
+ } else if ("script_values_sorted".equals(currentFieldName) || "scriptValuesSorted".equals(currentFieldName)) {
+ assumeSorted = parser.booleanValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ if (ranges == null) {
+ throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]");
+ }
+
+ if (script != null) {
+ config.script(context.scriptService().search(context.lookup(), scriptLang, script, scriptParams));
+ }
+
+ if (!assumeSorted) {
+ // we need values to be sorted and unique for efficiency
+ config.ensureSorted(true);
+ }
+
+ config.formatter(ValueFormatter.IPv4);
+ config.parser(ValueParser.IPv4);
+
+ if (field == null) {
+ return new RangeAggregator.Factory(aggregationName, config, InternalIPv4Range.FACTORY, ranges, keyed);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return new RangeAggregator.Factory(aggregationName, config, InternalIPv4Range.FACTORY, ranges, keyed);
+ }
+
+ if (!(mapper instanceof IpFieldMapper)) {
+ throw new AggregationExecutionException("ip_range aggregation can only be applied to ip fields which is not the case with field [" + field + "]");
+ }
+
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+ config.fieldContext(new FieldContext(field, indexFieldData));
+ return new RangeAggregator.Factory(aggregationName, config, InternalIPv4Range.FACTORY, ranges, keyed);
+ }
+
+ private static void parseMaskRange(String cidr, RangeAggregator.Range range, String aggregationName, SearchContext ctx) {
+ long[] fromTo = IPv4RangeBuilder.cidrMaskToMinMax(cidr);
+ if (fromTo == null) {
+ throw new SearchParseException(ctx, "invalid CIDR mask [" + cidr + "] in aggregation [" + aggregationName + "]");
+ }
+ range.from = fromTo[0] < 0 ? Double.NEGATIVE_INFINITY : fromTo[0];
+ range.to = fromTo[1] < 0 ? Double.POSITIVE_INFINITY : fromTo[1];
+ if (range.key == null) {
+ range.key = cidr;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java
new file mode 100644
index 0000000..dac5283
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import com.carrotsearch.hppc.DoubleObjectOpenHashMap;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatterStreams;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ *
+ */
+public class DoubleTerms extends InternalTerms {
+
+ public static final Type TYPE = new Type("terms", "dterms");
+
+ public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public DoubleTerms readResult(StreamInput in) throws IOException {
+ DoubleTerms buckets = new DoubleTerms();
+ buckets.readFrom(in);
+ return buckets;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ static class Bucket extends InternalTerms.Bucket {
+
+ double term;
+
+ public Bucket(double term, long docCount, InternalAggregations aggregations) {
+ super(docCount, aggregations);
+ this.term = term;
+ }
+
+ @Override
+ public String getKey() {
+ return String.valueOf(term);
+ }
+
+ @Override
+ public Text getKeyAsText() {
+ return new StringText(String.valueOf(term));
+ }
+
+ @Override
+ public Number getKeyAsNumber() {
+ return term;
+ }
+
+ @Override
+ int compareTerm(Terms.Bucket other) {
+ return Double.compare(term, other.getKeyAsNumber().doubleValue());
+ }
+
+ }
+
+ private ValueFormatter valueFormatter;
+
+ DoubleTerms() {} // for serialization
+
+ public DoubleTerms(String name, InternalOrder order, int requiredSize, long minDocCount, Collection<InternalTerms.Bucket> buckets) {
+ this(name, order, null, requiredSize, minDocCount, buckets);
+ }
+
+ public DoubleTerms(String name, InternalOrder order, ValueFormatter valueFormatter, int requiredSize, long minDocCount, Collection<InternalTerms.Bucket> buckets) {
+ super(name, order, requiredSize, minDocCount, buckets);
+ this.valueFormatter = valueFormatter;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public InternalTerms reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ InternalTerms terms = (InternalTerms) aggregations.get(0);
+ terms.trimExcessEntries(reduceContext.cacheRecycler());
+ return terms;
+ }
+ InternalTerms reduced = null;
+
+ Recycler.V<DoubleObjectOpenHashMap<List<Bucket>>> buckets = null;
+ for (InternalAggregation aggregation : aggregations) {
+ InternalTerms terms = (InternalTerms) aggregation;
+ if (terms instanceof UnmappedTerms) {
+ continue;
+ }
+ if (reduced == null) {
+ reduced = terms;
+ }
+ if (buckets == null) {
+ buckets = reduceContext.cacheRecycler().doubleObjectMap(terms.buckets.size());
+ }
+ for (Terms.Bucket bucket : terms.buckets) {
+ List<Bucket> existingBuckets = buckets.v().get(((Bucket) bucket).term);
+ if (existingBuckets == null) {
+ existingBuckets = new ArrayList<Bucket>(aggregations.size());
+ buckets.v().put(((Bucket) bucket).term, existingBuckets);
+ }
+ existingBuckets.add((Bucket) bucket);
+ }
+ }
+
+ if (reduced == null) {
+ // there are only unmapped terms, so we just return the first one (no need to reduce)
+ return (UnmappedTerms) aggregations.get(0);
+ }
+
+ // TODO: would it be better to sort the backing array buffer of hppc map directly instead of using a PQ?
+ final int size = Math.min(requiredSize, buckets.v().size());
+ BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(null));
+ boolean[] states = buckets.v().allocated;
+ Object[] internalBuckets = buckets.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ List<DoubleTerms.Bucket> sameTermBuckets = (List<DoubleTerms.Bucket>) internalBuckets[i];
+ final InternalTerms.Bucket b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext.cacheRecycler());
+ if (b.getDocCount() >= minDocCount) {
+ ordered.insertWithOverflow(b);
+ }
+ }
+ }
+ buckets.release();
+ InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; i--) {
+ list[i] = (Bucket) ordered.pop();
+ }
+ reduced.buckets = Arrays.asList(list);
+ return reduced;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ this.name = in.readString();
+ this.order = InternalOrder.Streams.readOrder(in);
+ this.valueFormatter = ValueFormatterStreams.readOptional(in);
+ this.requiredSize = in.readVInt();
+ this.minDocCount = in.readVLong();
+ int size = in.readVInt();
+ List<InternalTerms.Bucket> buckets = new ArrayList<InternalTerms.Bucket>(size);
+ for (int i = 0; i < size; i++) {
+ buckets.add(new Bucket(in.readDouble(), in.readVLong(), InternalAggregations.readAggregations(in)));
+ }
+ this.buckets = buckets;
+ this.bucketMap = null;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ InternalOrder.Streams.writeOrder(order, out);
+ ValueFormatterStreams.writeOptional(valueFormatter, out);
+ out.writeVInt(requiredSize);
+ out.writeVLong(minDocCount);
+ out.writeVInt(buckets.size());
+ for (InternalTerms.Bucket bucket : buckets) {
+ out.writeDouble(((Bucket) bucket).term);
+ out.writeVLong(bucket.getDocCount());
+ ((InternalAggregations) bucket.getAggregations()).writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.startArray(CommonFields.BUCKETS);
+ for (InternalTerms.Bucket bucket : buckets) {
+ builder.startObject();
+ builder.field(CommonFields.KEY, ((Bucket) bucket).term);
+ if (valueFormatter != null) {
+ builder.field(CommonFields.KEY_AS_STRING, valueFormatter.format(((Bucket) bucket).term));
+ }
+ builder.field(CommonFields.DOC_COUNT, bucket.getDocCount());
+ ((InternalAggregations) bucket.getAggregations()).toXContentInternal(builder, params);
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java
new file mode 100644
index 0000000..155afb7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
+import org.elasticsearch.search.aggregations.bucket.LongHash;
+import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+
+/**
+ *
+ */
+public class DoubleTermsAggregator extends BucketsAggregator {
+
+ private final InternalOrder order;
+ private final int requiredSize;
+ private final int shardSize;
+ private final long minDocCount;
+ private final NumericValuesSource valuesSource;
+ private final LongHash bucketOrds;
+
+ public DoubleTermsAggregator(String name, AggregatorFactories factories, NumericValuesSource valuesSource, long estimatedBucketCount,
+ InternalOrder order, int requiredSize, int shardSize, long minDocCount, AggregationContext aggregationContext, Aggregator parent) {
+ super(name, BucketAggregationMode.PER_BUCKET, factories, estimatedBucketCount, aggregationContext, parent);
+ this.valuesSource = valuesSource;
+ this.order = InternalOrder.validate(order, this);
+ this.requiredSize = requiredSize;
+ this.shardSize = shardSize;
+ this.minDocCount = minDocCount;
+ bucketOrds = new LongHash(estimatedBucketCount, aggregationContext.pageCacheRecycler());
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return true;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert owningBucketOrdinal == 0;
+ final DoubleValues values = valuesSource.doubleValues();
+ final int valuesCount = values.setDocument(doc);
+
+ for (int i = 0; i < valuesCount; ++i) {
+ final double val = values.nextValue();
+ final long bits = Double.doubleToRawLongBits(val);
+ long bucketOrdinal = bucketOrds.add(bits);
+ if (bucketOrdinal < 0) { // already seen
+ bucketOrdinal = - 1 - bucketOrdinal;
+ }
+ collectBucket(doc, bucketOrdinal);
+ }
+ }
+
+ @Override
+ public DoubleTerms buildAggregation(long owningBucketOrdinal) {
+ assert owningBucketOrdinal == 0;
+
+ if (minDocCount == 0 && (order != InternalOrder.COUNT_DESC || bucketOrds.size() < requiredSize)) {
+ // we need to fill-in the blanks
+ for (AtomicReaderContext ctx : context.searchContext().searcher().getTopReaderContext().leaves()) {
+ context.setNextReader(ctx);
+ final DoubleValues values = valuesSource.doubleValues();
+ for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
+ final int valueCount = values.setDocument(docId);
+ for (int i = 0; i < valueCount; ++i) {
+ bucketOrds.add(Double.doubleToLongBits(values.nextValue()));
+ }
+ }
+ }
+ }
+
+ final int size = (int) Math.min(bucketOrds.size(), shardSize);
+
+ BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(this));
+ DoubleTerms.Bucket spare = null;
+ for (long i = 0; i < bucketOrds.capacity(); ++i) {
+ final long ord = bucketOrds.id(i);
+ if (ord < 0) {
+ // slot is not allocated
+ continue;
+ }
+
+ if (spare == null) {
+ spare = new DoubleTerms.Bucket(0, 0, null);
+ }
+ spare.term = Double.longBitsToDouble(bucketOrds.key(i));
+ spare.docCount = bucketDocCount(ord);
+ spare.bucketOrd = ord;
+ spare = (DoubleTerms.Bucket) ordered.insertWithOverflow(spare);
+ }
+
+ final InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; --i) {
+ final DoubleTerms.Bucket bucket = (DoubleTerms.Bucket) ordered.pop();
+ bucket.aggregations = bucketAggregations(bucket.bucketOrd);
+ list[i] = bucket;
+ }
+ return new DoubleTerms(name, order, valuesSource.formatter(), requiredSize, minDocCount, Arrays.asList(list));
+ }
+
+ @Override
+ public DoubleTerms buildEmptyAggregation() {
+ return new DoubleTerms(name, order, valuesSource.formatter(), requiredSize, minDocCount, Collections.<InternalTerms.Bucket>emptyList());
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(bucketOrds);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java
new file mode 100644
index 0000000..73adfe9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import com.google.common.primitives.Longs;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.util.Comparators;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregator;
+
+import java.io.IOException;
+import java.util.Comparator;
+
+/**
+ *
+ */
+class InternalOrder extends Terms.Order {
+
+ /**
+ * Order by the (higher) count of each term.
+ */
+ public static final InternalOrder COUNT_DESC = new InternalOrder((byte) 1, "_count", false, new Comparator<Terms.Bucket>() {
+ @Override
+ public int compare(Terms.Bucket o1, Terms.Bucket o2) {
+ int cmp = - Longs.compare(o1.getDocCount(), o2.getDocCount());
+ if (cmp == 0) {
+ cmp = o1.compareTerm(o2);
+ }
+ return cmp;
+ }
+ });
+
+ /**
+ * Order by the (lower) count of each term.
+ */
+ public static final InternalOrder COUNT_ASC = new InternalOrder((byte) 2, "_count", true, new Comparator<Terms.Bucket>() {
+
+ @Override
+ public int compare(Terms.Bucket o1, Terms.Bucket o2) {
+ int cmp = Longs.compare(o1.getDocCount(), o2.getDocCount());
+ if (cmp == 0) {
+ cmp = o1.compareTerm(o2);
+ }
+ return cmp;
+ }
+ });
+
+ /**
+ * Order by the terms.
+ */
+ public static final InternalOrder TERM_DESC = new InternalOrder((byte) 3, "_term", false, new Comparator<Terms.Bucket>() {
+
+ @Override
+ public int compare(Terms.Bucket o1, Terms.Bucket o2) {
+ return - o1.compareTerm(o2);
+ }
+ });
+
+ /**
+ * Order by the terms.
+ */
+ public static final InternalOrder TERM_ASC = new InternalOrder((byte) 4, "_term", true, new Comparator<Terms.Bucket>() {
+
+ @Override
+ public int compare(Terms.Bucket o1, Terms.Bucket o2) {
+ return o1.compareTerm(o2);
+ }
+ });
+
+
+ final byte id;
+ final String key;
+ final boolean asc;
+ protected final Comparator<Terms.Bucket> comparator;
+
+ InternalOrder(byte id, String key, boolean asc, Comparator<Terms.Bucket> comparator) {
+ this.id = id;
+ this.key = key;
+ this.asc = asc;
+ this.comparator = comparator;
+ }
+
+ byte id() {
+ return id;
+ }
+
+ String key() {
+ return key;
+ }
+
+ boolean asc() {
+ return asc;
+ }
+
+ @Override
+ protected Comparator<Terms.Bucket> comparator(Aggregator aggregator) {
+ return comparator;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder.startObject().field(key, asc ? "asc" : "desc").endObject();
+ }
+
+ public static InternalOrder validate(InternalOrder order, Aggregator termsAggregator) {
+ if (!(order instanceof Aggregation)) {
+ return order;
+ }
+ String aggName = ((Aggregation) order).aggName();
+ Aggregator[] subAggregators = termsAggregator.subAggregators();
+ for (int i = 0; i < subAggregators.length; i++) {
+ Aggregator aggregator = subAggregators[i];
+ if (aggregator.name().equals(aggName)) {
+
+ // we can only apply order on metrics sub-aggregators
+ if (!(aggregator instanceof MetricsAggregator)) {
+ throw new AggregationExecutionException("terms aggregation [" + termsAggregator.name() + "] is configured to order by sub-aggregation ["
+ + aggName + "] which is is not a metrics aggregation. Terms aggregation order can only refer to metrics aggregations");
+ }
+
+ if (aggregator instanceof MetricsAggregator.MultiValue) {
+ String valueName = ((Aggregation) order).metricName();
+ if (valueName == null) {
+ throw new AggregationExecutionException("terms aggregation [" + termsAggregator.name() + "] is configured with a sub-aggregation order ["
+ + aggName + "] which is a multi-valued aggregation, yet no metric name was specified");
+ }
+ if (!((MetricsAggregator.MultiValue) aggregator).hasMetric(valueName)) {
+ throw new AggregationExecutionException("terms aggregation [" + termsAggregator.name() + "] is configured with a sub-aggregation order ["
+ + aggName + "] and value [" + valueName + "] yet the referred sub aggregator holds no metric that goes by this name");
+ }
+ return order;
+ }
+
+ // aggregator must be of a single value type
+ // todo we can also choose to be really strict and verify that the user didn't specify a value name and if so fail?
+ return order;
+ }
+ }
+
+ throw new AggregationExecutionException("terms aggregation [" + termsAggregator.name() + "] is configured with a sub-aggregation order ["
+ + aggName + "] but no sub aggregation with this name is configured");
+ }
+
+ static class Aggregation extends InternalOrder {
+
+ static final byte ID = 0;
+
+ Aggregation(String key, boolean asc) {
+ super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator<Terms.Bucket>(key, asc));
+ }
+
+ Aggregation(String aggName, String metricName, boolean asc) {
+ super(ID, key(aggName, metricName), asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator<Terms.Bucket>(aggName, metricName, asc));
+ }
+
+ String aggName() {
+ int index = key.indexOf('.');
+ return index < 0 ? key : key.substring(0, index);
+ }
+
+ String metricName() {
+ int index = key.indexOf('.');
+ return index < 0 ? null : key.substring(index + 1, key.length());
+ }
+
+ private static String key(String aggName, String valueName) {
+ return (valueName == null) ? aggName : aggName + "." + valueName;
+ }
+
+ @Override
+ protected Comparator<Terms.Bucket> comparator(Aggregator termsAggregator) {
+ if (termsAggregator == null) {
+ return comparator;
+ }
+
+ // Internal Optimization:
+ //
+ // in this phase, if the order is based on sub-aggregations, we need to use a different comparator
+ // to avoid constructing buckets for ordering purposes (we can potentially have a lot of buckets and building
+ // them will cause loads of redundant object constructions). The "special" comparators here will fetch the
+ // sub aggregation values directly from the sub aggregators bypassing bucket creation. Note that the comparator
+ // attached to the order will still be used in the reduce phase of the Aggregation.
+
+ final Aggregator aggregator = subAggregator(aggName(), termsAggregator);
+ assert aggregator != null && aggregator instanceof MetricsAggregator : "this should be picked up before the aggregation is executed";
+ if (aggregator instanceof MetricsAggregator.MultiValue) {
+ final String valueName = metricName();
+ assert valueName != null : "this should be picked up before the aggregation is executed";
+ return new Comparator<Terms.Bucket>() {
+ @Override
+ public int compare(Terms.Bucket o1, Terms.Bucket o2) {
+ double v1 = ((MetricsAggregator.MultiValue) aggregator).metric(valueName, ((InternalTerms.Bucket) o1).bucketOrd);
+ double v2 = ((MetricsAggregator.MultiValue) aggregator).metric(valueName, ((InternalTerms.Bucket) o2).bucketOrd);
+ // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to
+ // the bottom
+ return Comparators.compareDiscardNaN(v1, v2, asc);
+ }
+ };
+ }
+
+ return new Comparator<Terms.Bucket>() {
+ @Override
+ public int compare(Terms.Bucket o1, Terms.Bucket o2) {
+ double v1 = ((MetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) o1).bucketOrd);
+ double v2 = ((MetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) o2).bucketOrd);
+ // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to
+ // the bottom
+ return Comparators.compareDiscardNaN(v1, v2, asc);
+ }
+ };
+ }
+
+ private Aggregator subAggregator(String aggName, Aggregator termsAggregator) {
+ Aggregator[] subAggregators = termsAggregator.subAggregators();
+ for (int i = 0; i < subAggregators.length; i++) {
+ if (subAggregators[i].name().equals(aggName)) {
+ return subAggregators[i];
+ }
+ }
+ return null;
+ }
+ }
+
+ public static class Streams {
+
+ public static void writeOrder(InternalOrder order, StreamOutput out) throws IOException {
+ out.writeByte(order.id());
+ if (order instanceof Aggregation) {
+ out.writeBoolean(((MultiBucketsAggregation.Bucket.SubAggregationComparator) order.comparator).asc());
+ out.writeString(((MultiBucketsAggregation.Bucket.SubAggregationComparator) order.comparator).aggName());
+ boolean hasValueName = ((MultiBucketsAggregation.Bucket.SubAggregationComparator) order.comparator).valueName() != null;
+ out.writeBoolean(hasValueName);
+ if (hasValueName) {
+ out.writeString(((MultiBucketsAggregation.Bucket.SubAggregationComparator) order.comparator).valueName());
+ }
+ }
+ }
+
+ public static InternalOrder readOrder(StreamInput in) throws IOException {
+ byte id = in.readByte();
+ switch (id) {
+ case 1: return InternalOrder.COUNT_DESC;
+ case 2: return InternalOrder.COUNT_ASC;
+ case 3: return InternalOrder.TERM_DESC;
+ case 4: return InternalOrder.TERM_ASC;
+ case 0:
+ boolean asc = in.readBoolean();
+ String key = in.readString();
+ if (in.readBoolean()) {
+ return new InternalOrder.Aggregation(key, in.readString(), asc);
+ }
+ return new InternalOrder.Aggregation(key, asc);
+ default:
+ throw new RuntimeException("unknown terms order");
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
new file mode 100644
index 0000000..529d564
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue;
+
+import java.util.*;
+
+/**
+ *
+ */
+public abstract class InternalTerms extends InternalAggregation implements Terms, ToXContent, Streamable {
+
+ public static abstract class Bucket extends Terms.Bucket {
+
+ long bucketOrd;
+
+ protected long docCount;
+ protected InternalAggregations aggregations;
+
+ protected Bucket(long docCount, InternalAggregations aggregations) {
+ this.docCount = docCount;
+ this.aggregations = aggregations;
+ }
+
+ @Override
+ public long getDocCount() {
+ return docCount;
+ }
+
+ @Override
+ public Aggregations getAggregations() {
+ return aggregations;
+ }
+
+ public Bucket reduce(List<? extends Bucket> buckets, CacheRecycler cacheRecycler) {
+ if (buckets.size() == 1) {
+ Bucket bucket = buckets.get(0);
+ bucket.aggregations.reduce(cacheRecycler);
+ return bucket;
+ }
+ Bucket reduced = null;
+ List<InternalAggregations> aggregationsList = new ArrayList<InternalAggregations>(buckets.size());
+ for (Bucket bucket : buckets) {
+ if (reduced == null) {
+ reduced = bucket;
+ } else {
+ reduced.docCount += bucket.docCount;
+ }
+ aggregationsList.add(bucket.aggregations);
+ }
+ reduced.aggregations = InternalAggregations.reduce(aggregationsList, cacheRecycler);
+ return reduced;
+ }
+ }
+
+ protected InternalOrder order;
+ protected int requiredSize;
+ protected long minDocCount;
+ protected Collection<Bucket> buckets;
+ protected Map<String, Bucket> bucketMap;
+
+ protected InternalTerms() {} // for serialization
+
+ protected InternalTerms(String name, InternalOrder order, int requiredSize, long minDocCount, Collection<Bucket> buckets) {
+ super(name);
+ this.order = order;
+ this.requiredSize = requiredSize;
+ this.minDocCount = minDocCount;
+ this.buckets = buckets;
+ }
+
+ @Override
+ public Collection<Terms.Bucket> getBuckets() {
+ Object o = buckets;
+ return (Collection<Terms.Bucket>) o;
+ }
+
+ @Override
+ public Terms.Bucket getBucketByKey(String term) {
+ if (bucketMap == null) {
+ bucketMap = Maps.newHashMapWithExpectedSize(buckets.size());
+ for (Bucket bucket : buckets) {
+ bucketMap.put(bucket.getKey(), bucket);
+ }
+ }
+ return bucketMap.get(term);
+ }
+
+ @Override
+ public InternalTerms reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ InternalTerms terms = (InternalTerms) aggregations.get(0);
+ terms.trimExcessEntries(reduceContext.cacheRecycler());
+ return terms;
+ }
+
+ InternalTerms reduced = null;
+
+ Map<Text, List<InternalTerms.Bucket>> buckets = null;
+ for (InternalAggregation aggregation : aggregations) {
+ InternalTerms terms = (InternalTerms) aggregation;
+ if (terms instanceof UnmappedTerms) {
+ continue;
+ }
+ if (reduced == null) {
+ reduced = terms;
+ }
+ if (buckets == null) {
+ buckets = new HashMap<Text, List<Bucket>>(terms.buckets.size());
+ }
+ for (Bucket bucket : terms.buckets) {
+ List<Bucket> existingBuckets = buckets.get(bucket.getKeyAsText());
+ if (existingBuckets == null) {
+ existingBuckets = new ArrayList<Bucket>(aggregations.size());
+ buckets.put(bucket.getKeyAsText(), existingBuckets);
+ }
+ existingBuckets.add(bucket);
+ }
+ }
+
+ if (reduced == null) {
+ // there are only unmapped terms, so we just return the first one (no need to reduce)
+ return (UnmappedTerms) aggregations.get(0);
+ }
+
+ final int size = Math.min(requiredSize, buckets.size());
+ BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(null));
+ for (Map.Entry<Text, List<Bucket>> entry : buckets.entrySet()) {
+ List<Bucket> sameTermBuckets = entry.getValue();
+ final Bucket b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext.cacheRecycler());
+ if (b.docCount >= minDocCount) {
+ ordered.insertWithOverflow(b);
+ }
+ }
+ Bucket[] list = new Bucket[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; i--) {
+ list[i] = (Bucket) ordered.pop();
+ }
+ reduced.buckets = Arrays.asList(list);
+ return reduced;
+ }
+
+ final void trimExcessEntries(CacheRecycler cacheRecycler) {
+ final List<Bucket> newBuckets = Lists.newArrayList();
+ for (Bucket b : buckets) {
+ if (newBuckets.size() >= requiredSize) {
+ break;
+ }
+ if (b.docCount >= minDocCount) {
+ newBuckets.add(b);
+ b.aggregations.reduce(cacheRecycler);
+ }
+ }
+ buckets = newBuckets;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
new file mode 100644
index 0000000..4c11f68
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import com.google.common.primitives.Longs;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatterStreams;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ *
+ */
+public class LongTerms extends InternalTerms {
+
+ public static final Type TYPE = new Type("terms", "lterms");
+
+ public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public LongTerms readResult(StreamInput in) throws IOException {
+ LongTerms buckets = new LongTerms();
+ buckets.readFrom(in);
+ return buckets;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+
+ static class Bucket extends InternalTerms.Bucket {
+
+ long term;
+
+ public Bucket(long term, long docCount, InternalAggregations aggregations) {
+ super(docCount, aggregations);
+ this.term = term;
+ }
+
+ @Override
+ public String getKey() {
+ return String.valueOf(term);
+ }
+
+ @Override
+ public Text getKeyAsText() {
+ return new StringText(String.valueOf(term));
+ }
+
+ @Override
+ public Number getKeyAsNumber() {
+ return term;
+ }
+
+ @Override
+ int compareTerm(Terms.Bucket other) {
+ return Longs.compare(term, other.getKeyAsNumber().longValue());
+ }
+ }
+
+ private ValueFormatter valueFormatter;
+
+ LongTerms() {} // for serialization
+
+ public LongTerms(String name, InternalOrder order, ValueFormatter valueFormatter, int requiredSize, long minDocCount, Collection<InternalTerms.Bucket> buckets) {
+ super(name, order, requiredSize, minDocCount, buckets);
+ this.valueFormatter = valueFormatter;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public InternalTerms reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ InternalTerms terms = (InternalTerms) aggregations.get(0);
+ terms.trimExcessEntries(reduceContext.cacheRecycler());
+ return terms;
+ }
+ InternalTerms reduced = null;
+
+ Recycler.V<LongObjectOpenHashMap<List<Bucket>>> buckets = null;
+ for (InternalAggregation aggregation : aggregations) {
+ InternalTerms terms = (InternalTerms) aggregation;
+ if (terms instanceof UnmappedTerms) {
+ continue;
+ }
+ if (reduced == null) {
+ reduced = terms;
+ }
+ if (buckets == null) {
+ buckets = reduceContext.cacheRecycler().longObjectMap(terms.buckets.size());
+ }
+ for (Terms.Bucket bucket : terms.buckets) {
+ List<Bucket> existingBuckets = buckets.v().get(((Bucket) bucket).term);
+ if (existingBuckets == null) {
+ existingBuckets = new ArrayList<Bucket>(aggregations.size());
+ buckets.v().put(((Bucket) bucket).term, existingBuckets);
+ }
+ existingBuckets.add((Bucket) bucket);
+ }
+ }
+
+ if (reduced == null) {
+ // there are only unmapped terms, so we just return the first one (no need to reduce)
+ return (UnmappedTerms) aggregations.get(0);
+ }
+
+ // TODO: would it be better to sort the backing array buffer of the hppc map directly instead of using a PQ?
+ final int size = Math.min(requiredSize, buckets.v().size());
+ BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(null));
+ Object[] internalBuckets = buckets.v().values;
+ boolean[] states = buckets.v().allocated;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ List<LongTerms.Bucket> sameTermBuckets = (List<LongTerms.Bucket>) internalBuckets[i];
+ final InternalTerms.Bucket b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext.cacheRecycler());
+ if (b.getDocCount() >= minDocCount) {
+ ordered.insertWithOverflow(b);
+ }
+ }
+ }
+ buckets.release();
+ InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; i--) {
+ list[i] = (Bucket) ordered.pop();
+ }
+ reduced.buckets = Arrays.asList(list);
+ return reduced;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ this.name = in.readString();
+ this.order = InternalOrder.Streams.readOrder(in);
+ this.valueFormatter = ValueFormatterStreams.readOptional(in);
+ this.requiredSize = in.readVInt();
+ this.minDocCount = in.readVLong();
+ int size = in.readVInt();
+ List<InternalTerms.Bucket> buckets = new ArrayList<InternalTerms.Bucket>(size);
+ for (int i = 0; i < size; i++) {
+ buckets.add(new Bucket(in.readLong(), in.readVLong(), InternalAggregations.readAggregations(in)));
+ }
+ this.buckets = buckets;
+ this.bucketMap = null;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ InternalOrder.Streams.writeOrder(order, out);
+ ValueFormatterStreams.writeOptional(valueFormatter, out);
+ out.writeVInt(requiredSize);
+ out.writeVLong(minDocCount);
+ out.writeVInt(buckets.size());
+ for (InternalTerms.Bucket bucket : buckets) {
+ out.writeLong(((Bucket) bucket).term);
+ out.writeVLong(bucket.getDocCount());
+ ((InternalAggregations) bucket.getAggregations()).writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.startArray(CommonFields.BUCKETS);
+ for (InternalTerms.Bucket bucket : buckets) {
+ builder.startObject();
+ builder.field(CommonFields.KEY, ((Bucket) bucket).term);
+ if (valueFormatter != null) {
+ builder.field(CommonFields.KEY_AS_STRING, valueFormatter.format(((Bucket) bucket).term));
+ }
+ builder.field(CommonFields.DOC_COUNT, bucket.getDocCount());
+ ((InternalAggregations) bucket.getAggregations()).toXContentInternal(builder, params);
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java
new file mode 100644
index 0000000..9bac176
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
+import org.elasticsearch.search.aggregations.bucket.LongHash;
+import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+
+/**
+ *
+ */
+public class LongTermsAggregator extends BucketsAggregator {
+
+ private final InternalOrder order;
+ private final int requiredSize;
+ private final int shardSize;
+ private final long minDocCount;
+ private final NumericValuesSource valuesSource;
+ private final LongHash bucketOrds;
+
+ public LongTermsAggregator(String name, AggregatorFactories factories, NumericValuesSource valuesSource, long estimatedBucketCount,
+ InternalOrder order, int requiredSize, int shardSize, long minDocCount, AggregationContext aggregationContext, Aggregator parent) {
+ super(name, BucketAggregationMode.PER_BUCKET, factories, estimatedBucketCount, aggregationContext, parent);
+ this.valuesSource = valuesSource;
+ this.order = InternalOrder.validate(order, this);
+ this.requiredSize = requiredSize;
+ this.shardSize = shardSize;
+ this.minDocCount = minDocCount;
+ bucketOrds = new LongHash(estimatedBucketCount, aggregationContext.pageCacheRecycler());
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return true;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert owningBucketOrdinal == 0;
+ final LongValues values = valuesSource.longValues();
+ final int valuesCount = values.setDocument(doc);
+
+ for (int i = 0; i < valuesCount; ++i) {
+ final long val = values.nextValue();
+ long bucketOrdinal = bucketOrds.add(val);
+ if (bucketOrdinal < 0) { // already seen
+ bucketOrdinal = - 1 - bucketOrdinal;
+ }
+ collectBucket(doc, bucketOrdinal);
+ }
+ }
+
+ @Override
+ public LongTerms buildAggregation(long owningBucketOrdinal) {
+ assert owningBucketOrdinal == 0;
+
+ if (minDocCount == 0 && (order != InternalOrder.COUNT_DESC || bucketOrds.size() < requiredSize)) {
+ // we need to fill-in the blanks
+ for (AtomicReaderContext ctx : context.searchContext().searcher().getTopReaderContext().leaves()) {
+ context.setNextReader(ctx);
+ final LongValues values = valuesSource.longValues();
+ for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
+ final int valueCount = values.setDocument(docId);
+ for (int i = 0; i < valueCount; ++i) {
+ bucketOrds.add(values.nextValue());
+ }
+ }
+ }
+ }
+
+ final int size = (int) Math.min(bucketOrds.size(), shardSize);
+
+ BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(this));
+ LongTerms.Bucket spare = null;
+ for (long i = 0; i < bucketOrds.capacity(); ++i) {
+ final long ord = bucketOrds.id(i);
+ if (ord < 0) {
+ // slot is not allocated
+ continue;
+ }
+
+ if (spare == null) {
+ spare = new LongTerms.Bucket(0, 0, null);
+ }
+ spare.term = bucketOrds.key(i);
+ spare.docCount = bucketDocCount(ord);
+ spare.bucketOrd = ord;
+ spare = (LongTerms.Bucket) ordered.insertWithOverflow(spare);
+ }
+
+ final InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; --i) {
+ final LongTerms.Bucket bucket = (LongTerms.Bucket) ordered.pop();
+ bucket.aggregations = bucketAggregations(bucket.bucketOrd);
+ list[i] = bucket;
+ }
+ return new LongTerms(name, order, valuesSource.formatter(), requiredSize, minDocCount, Arrays.asList(list));
+ }
+
+ @Override
+ public LongTerms buildEmptyAggregation() {
+ return new LongTerms(name, order, valuesSource.formatter(), requiredSize, minDocCount, Collections.<InternalTerms.Bucket>emptyList());
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(bucketOrds);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java
new file mode 100644
index 0000000..6f31eaa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.text.BytesText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ *
+ */
+public class StringTerms extends InternalTerms {
+
+ public static final InternalAggregation.Type TYPE = new Type("terms", "sterms");
+
+ public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public StringTerms readResult(StreamInput in) throws IOException {
+ StringTerms buckets = new StringTerms();
+ buckets.readFrom(in);
+ return buckets;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+
+ public static class Bucket extends InternalTerms.Bucket {
+
+ BytesRef termBytes;
+
+ public Bucket(BytesRef term, long docCount, InternalAggregations aggregations) {
+ super(docCount, aggregations);
+ this.termBytes = term;
+ }
+
+ @Override
+ public String getKey() {
+ return termBytes.utf8ToString();
+ }
+
+ @Override
+ public Text getKeyAsText() {
+ return new BytesText(new BytesArray(termBytes));
+ }
+
+ @Override
+ public Number getKeyAsNumber() {
+ // this method is needed for scripted numeric faceting
+ return Double.parseDouble(termBytes.utf8ToString());
+ }
+
+ @Override
+ int compareTerm(Terms.Bucket other) {
+ return BytesRef.getUTF8SortedAsUnicodeComparator().compare(termBytes, ((Bucket) other).termBytes);
+ }
+ }
+
+ StringTerms() {} // for serialization
+
+ public StringTerms(String name, InternalOrder order, int requiredSize, long minDocCount, Collection<InternalTerms.Bucket> buckets) {
+ super(name, order, requiredSize, minDocCount, buckets);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ this.name = in.readString();
+ this.order = InternalOrder.Streams.readOrder(in);
+ this.requiredSize = in.readVInt();
+ this.minDocCount = in.readVLong();
+ int size = in.readVInt();
+ List<InternalTerms.Bucket> buckets = new ArrayList<InternalTerms.Bucket>(size);
+ for (int i = 0; i < size; i++) {
+ buckets.add(new Bucket(in.readBytesRef(), in.readVLong(), InternalAggregations.readAggregations(in)));
+ }
+ this.buckets = buckets;
+ this.bucketMap = null;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ InternalOrder.Streams.writeOrder(order, out);
+ out.writeVInt(requiredSize);
+ out.writeVLong(minDocCount);
+ out.writeVInt(buckets.size());
+ for (InternalTerms.Bucket bucket : buckets) {
+ out.writeBytesRef(((Bucket) bucket).termBytes);
+ out.writeVLong(bucket.getDocCount());
+ ((InternalAggregations) bucket.getAggregations()).writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.startArray(CommonFields.BUCKETS);
+ for (InternalTerms.Bucket bucket : buckets) {
+ builder.startObject();
+ builder.field(CommonFields.KEY, ((Bucket) bucket).termBytes);
+ builder.field(CommonFields.DOC_COUNT, bucket.getDocCount());
+ ((InternalAggregations) bucket.getAggregations()).toXContentInternal(builder, params);
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java
new file mode 100644
index 0000000..78c6f24
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java
@@ -0,0 +1,312 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.collect.UnmodifiableIterator;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.collect.Iterators2;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.lucene.ReaderContextAware;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
+import org.elasticsearch.search.aggregations.bucket.BytesRefHash;
+import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue;
+import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.search.aggregations.support.bytes.BytesValuesSource;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * An aggregator of string values.
+ */
+public class StringTermsAggregator extends BucketsAggregator {
+
+ private final ValuesSource valuesSource;
+ private final InternalOrder order;
+ private final int requiredSize;
+ private final int shardSize;
+ private final long minDocCount;
+ protected final BytesRefHash bucketOrds;
+ private final IncludeExclude includeExclude;
+
+ public StringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, long estimatedBucketCount,
+ InternalOrder order, int requiredSize, int shardSize, long minDocCount,
+ IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent) {
+
+ super(name, BucketAggregationMode.PER_BUCKET, factories, estimatedBucketCount, aggregationContext, parent);
+ this.valuesSource = valuesSource;
+ this.order = InternalOrder.validate(order, this);
+ this.requiredSize = requiredSize;
+ this.shardSize = shardSize;
+ this.minDocCount = minDocCount;
+ this.includeExclude = includeExclude;
+ bucketOrds = new BytesRefHash(estimatedBucketCount, aggregationContext.pageCacheRecycler());
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return true;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert owningBucketOrdinal == 0;
+ final BytesValues values = valuesSource.bytesValues();
+ final int valuesCount = values.setDocument(doc);
+
+ for (int i = 0; i < valuesCount; ++i) {
+ final BytesRef bytes = values.nextValue();
+ if (includeExclude != null && !includeExclude.accept(bytes)) {
+ continue;
+ }
+ final int hash = values.currentValueHash();
+ assert hash == bytes.hashCode();
+ long bucketOrdinal = bucketOrds.add(bytes, hash);
+ if (bucketOrdinal < 0) { // already seen
+ bucketOrdinal = - 1 - bucketOrdinal;
+ }
+ collectBucket(doc, bucketOrdinal);
+ }
+ }
+
+ /** Returns an iterator over the field data terms. */
+ private static Iterator<BytesRef> terms(final BytesValues.WithOrdinals bytesValues, boolean reverse) {
+ final Ordinals.Docs ordinals = bytesValues.ordinals();
+ if (reverse) {
+ return new UnmodifiableIterator<BytesRef>() {
+
+ long i = ordinals.getMaxOrd() - 1;
+
+ @Override
+ public boolean hasNext() {
+ return i >= Ordinals.MIN_ORDINAL;
+ }
+
+ @Override
+ public BytesRef next() {
+ bytesValues.getValueByOrd(i--);
+ return bytesValues.copyShared();
+ }
+
+ };
+ } else {
+ return new UnmodifiableIterator<BytesRef>() {
+
+ long i = Ordinals.MIN_ORDINAL;
+
+ @Override
+ public boolean hasNext() {
+ return i < ordinals.getMaxOrd();
+ }
+
+ @Override
+ public BytesRef next() {
+ bytesValues.getValueByOrd(i++);
+ return bytesValues.copyShared();
+ }
+
+ };
+ }
+ }
+
+ @Override
+ public StringTerms buildAggregation(long owningBucketOrdinal) {
+ assert owningBucketOrdinal == 0;
+
+ if (minDocCount == 0 && (order != InternalOrder.COUNT_DESC || bucketOrds.size() < requiredSize)) {
+ // we need to fill-in the blanks
+ List<BytesValues.WithOrdinals> valuesWithOrdinals = Lists.newArrayList();
+ for (AtomicReaderContext ctx : context.searchContext().searcher().getTopReaderContext().leaves()) {
+ context.setNextReader(ctx);
+ final BytesValues values = valuesSource.bytesValues();
+ if (values instanceof BytesValues.WithOrdinals) {
+ valuesWithOrdinals.add((BytesValues.WithOrdinals) values);
+ } else {
+ // brute force
+ for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
+ final int valueCount = values.setDocument(docId);
+ for (int i = 0; i < valueCount; ++i) {
+ final BytesRef term = values.nextValue();
+ if (includeExclude == null || includeExclude.accept(term)) {
+ bucketOrds.add(term, values.currentValueHash());
+ }
+ }
+ }
+ }
+ }
+
+ // With ordinals we can be smarter and add just as many terms as necessary to the hash table
+ // For instance, if sorting by term asc, we only need to get the first `requiredSize` terms as other terms would
+ // either be excluded by the priority queue or at reduce time.
+ if (valuesWithOrdinals.size() > 0) {
+ final boolean reverse = order == InternalOrder.TERM_DESC;
+ Comparator<BytesRef> comparator = BytesRef.getUTF8SortedAsUnicodeComparator();
+ if (reverse) {
+ comparator = Collections.reverseOrder(comparator);
+ }
+ Iterator<? extends BytesRef>[] iterators = new Iterator[valuesWithOrdinals.size()];
+ for (int i = 0; i < valuesWithOrdinals.size(); ++i) {
+ iterators[i] = terms(valuesWithOrdinals.get(i), reverse);
+ }
+ Iterator<BytesRef> terms = Iterators2.mergeSorted(Arrays.asList(iterators), comparator, true);
+ if (includeExclude != null) {
+ terms = Iterators.filter(terms, new Predicate<BytesRef>() {
+ @Override
+ public boolean apply(BytesRef input) {
+ return includeExclude.accept(input);
+ }
+ });
+ }
+ if (order == InternalOrder.COUNT_ASC) {
+ // let's try to find `shardSize` terms that matched no hit
+ // this one needs shardSize and not requiredSize because even though terms have a count of 0 here,
+ // they might have higher counts on other shards
+ for (int added = 0; added < shardSize && terms.hasNext(); ) {
+ if (bucketOrds.add(terms.next()) >= 0) {
+ ++added;
+ }
+ }
+ } else if (order == InternalOrder.COUNT_DESC) {
+ // add terms until there are enough buckets
+ while (bucketOrds.size() < requiredSize && terms.hasNext()) {
+ bucketOrds.add(terms.next());
+ }
+ } else if (order == InternalOrder.TERM_ASC || order == InternalOrder.TERM_DESC) {
+ // add the `requiredSize` least terms
+ for (int i = 0; i < requiredSize && terms.hasNext(); ++i) {
+ bucketOrds.add(terms.next());
+ }
+ } else {
+ // other orders (aggregations) are not optimizable
+ while (terms.hasNext()) {
+ bucketOrds.add(terms.next());
+ }
+ }
+ }
+ }
+
+ final int size = (int) Math.min(bucketOrds.size(), shardSize);
+
+ BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(this));
+ StringTerms.Bucket spare = null;
+ for (int i = 0; i < bucketOrds.size(); i++) {
+ if (spare == null) {
+ spare = new StringTerms.Bucket(new BytesRef(), 0, null);
+ }
+ bucketOrds.get(i, spare.termBytes);
+ spare.docCount = bucketDocCount(i);
+ spare.bucketOrd = i;
+ spare = (StringTerms.Bucket) ordered.insertWithOverflow(spare);
+ }
+
+ final InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; --i) {
+ final StringTerms.Bucket bucket = (StringTerms.Bucket) ordered.pop();
+ // the terms are owned by the BytesRefHash, we need to pull a copy since the BytesRef hash data may be recycled at some point
+ bucket.termBytes = BytesRef.deepCopyOf(bucket.termBytes);
+ bucket.aggregations = bucketAggregations(bucket.bucketOrd);
+ list[i] = bucket;
+ }
+
+ return new StringTerms(name, order, requiredSize, minDocCount, Arrays.asList(list));
+ }
+
+ @Override
+ public StringTerms buildEmptyAggregation() {
+ return new StringTerms(name, order, requiredSize, minDocCount, Collections.<InternalTerms.Bucket>emptyList());
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(bucketOrds);
+ }
+
+ /**
+ * Extension of StringTermsAggregator that caches bucket ords using terms ordinals.
+ */
+ public static class WithOrdinals extends StringTermsAggregator implements ReaderContextAware {
+
+ private final BytesValuesSource.WithOrdinals valuesSource;
+ private BytesValues.WithOrdinals bytesValues;
+ private Ordinals.Docs ordinals;
+ private LongArray ordinalToBucket;
+
+ public WithOrdinals(String name, AggregatorFactories factories, BytesValuesSource.WithOrdinals valuesSource, long esitmatedBucketCount,
+ InternalOrder order, int requiredSize, int shardSize, long minDocCount, AggregationContext aggregationContext, Aggregator parent) {
+ super(name, factories, valuesSource, esitmatedBucketCount, order, requiredSize, shardSize, minDocCount, null, aggregationContext, parent);
+ this.valuesSource = valuesSource;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ bytesValues = valuesSource.bytesValues();
+ ordinals = bytesValues.ordinals();
+ final long maxOrd = ordinals.getMaxOrd();
+ if (ordinalToBucket == null || ordinalToBucket.size() < maxOrd) {
+ if (ordinalToBucket != null) {
+ ordinalToBucket.release();
+ }
+ ordinalToBucket = BigArrays.newLongArray(BigArrays.overSize(maxOrd), context().pageCacheRecycler(), false);
+ }
+ ordinalToBucket.fill(0, maxOrd, -1L);
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert owningBucketOrdinal == 0 : "this is a per_bucket aggregator";
+ final int valuesCount = ordinals.setDocument(doc);
+
+ for (int i = 0; i < valuesCount; ++i) {
+ final long ord = ordinals.nextOrd();
+ long bucketOrd = ordinalToBucket.get(ord);
+ if (bucketOrd < 0) { // unlikely condition on a low-cardinality field
+ final BytesRef bytes = bytesValues.getValueByOrd(ord);
+ final int hash = bytesValues.currentValueHash();
+ assert hash == bytes.hashCode();
+ bucketOrd = bucketOrds.add(bytes, hash);
+ if (bucketOrd < 0) { // already seen in another segment
+ bucketOrd = - 1 - bucketOrd;
+ }
+ ordinalToBucket.set(ord, bucketOrd);
+ }
+
+ collectBucket(doc, bucketOrd);
+ }
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(bucketOrds, ordinalToBucket);
+ }
+ }
+
+}
+
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java
new file mode 100644
index 0000000..5547876
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
+import org.elasticsearch.search.aggregations.support.ScriptValueType;
+
+import java.util.Collection;
+import java.util.Comparator;
+
+/**
+ * A {@code terms} aggregation. Defines multiple bucket, each associated with a unique term for a specific field.
+ * All documents in a bucket has the bucket's term in that field.
+ */
+public interface Terms extends MultiBucketsAggregation {
+
+ static enum ValueType {
+
+ STRING(ScriptValueType.STRING),
+ LONG(ScriptValueType.LONG),
+ DOUBLE(ScriptValueType.DOUBLE);
+
+ final ScriptValueType scriptValueType;
+
+ private ValueType(ScriptValueType scriptValueType) {
+ this.scriptValueType = scriptValueType;
+ }
+
+ static ValueType resolveType(String type) {
+ if ("string".equals(type)) {
+ return STRING;
+ }
+ if ("double".equals(type) || "float".equals(type)) {
+ return DOUBLE;
+ }
+ if ("long".equals(type) || "integer".equals(type) || "short".equals(type) || "byte".equals(type)) {
+ return LONG;
+ }
+ return null;
+ }
+ }
+
+ /**
+ * A bucket that is associated with a single term
+ */
+ static abstract class Bucket implements MultiBucketsAggregation.Bucket {
+
+ public abstract Number getKeyAsNumber();
+
+ abstract int compareTerm(Terms.Bucket other);
+
+ }
+
+ Collection<Bucket> getBuckets();
+
+ Bucket getBucketByKey(String term);
+
+ /**
+ * Determines the order by which the term buckets will be sorted
+ */
+ static abstract class Order implements ToXContent {
+
+ /**
+ * @return a bucket ordering strategy that sorts buckets by their document counts (ascending or descending)
+ */
+ public static Order count(boolean asc) {
+ return asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC;
+ }
+
+ /**
+ * @return a bucket ordering strategy that sorts buckets by their terms (ascending or descending)
+ */
+ public static Order term(boolean asc) {
+ return asc ? InternalOrder.TERM_ASC : InternalOrder.TERM_DESC;
+ }
+
+ /**
+ * Creates a bucket ordering strategy which sorts buckets based on a single-valued calc get
+ *
+ * @param aggregationName the name of the get
+ * @param asc The direction of the order (ascending or descending)
+ */
+ public static Order aggregation(String aggregationName, boolean asc) {
+ return new InternalOrder.Aggregation(aggregationName, null, asc);
+ }
+
+ /**
+ * Creates a bucket ordering strategy which sorts buckets based on a multi-valued calc get
+ *
+ * @param aggregationName the name of the get
+ * @param metricName The name of the value of the multi-value get by which the sorting will be applied
+ * @param asc The direction of the order (ascending or descending)
+ */
+ public static Order aggregation(String aggregationName, String metricName, boolean asc) {
+ return new InternalOrder.Aggregation(aggregationName, metricName, asc);
+ }
+
+ /**
+ * @return A comparator for the bucket based on the given terms aggregator. The comparator is used in two phases:
+ *
+ * - aggregation phase, where each shard builds a list of term buckets to be sent to the coordinating node.
+ * In this phase, the passed in aggregator will be the terms aggregator that aggregates the buckets on the
+ * shard level.
+ *
+ * - reduce phase, where the coordinating node gathers all the buckets from all the shards and reduces them
+ * to a final bucket list. In this case, the passed in aggregator will be {@code null}
+ */
+ protected abstract Comparator<Bucket> comparator(Aggregator aggregator);
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java
new file mode 100644
index 0000000..0bb55a9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.Aggregator.BucketAggregationMode;
+import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.bytes.BytesValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+/**
+ *
+ */
+public class TermsAggregatorFactory extends ValueSourceAggregatorFactory {
+
+ public static final String EXECUTION_HINT_VALUE_MAP = "map";
+ public static final String EXECUTION_HINT_VALUE_ORDINALS = "ordinals";
+
+ private final InternalOrder order;
+ private final int requiredSize;
+ private final int shardSize;
+ private final long minDocCount;
+ private final IncludeExclude includeExclude;
+ private final String executionHint;
+
+ public TermsAggregatorFactory(String name, ValuesSourceConfig valueSourceConfig, InternalOrder order, int requiredSize, int shardSize, long minDocCount, IncludeExclude includeExclude, String executionHint) {
+ super(name, StringTerms.TYPE.name(), valueSourceConfig);
+ this.order = order;
+ this.requiredSize = requiredSize;
+ this.shardSize = shardSize;
+ this.minDocCount = minDocCount;
+ this.includeExclude = includeExclude;
+ this.executionHint = executionHint;
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new UnmappedTermsAggregator(name, order, requiredSize, minDocCount, aggregationContext, parent);
+ }
+
+ private static boolean hasParentBucketAggregator(Aggregator parent) {
+ if (parent == null) {
+ return false;
+ } else if (parent.bucketAggregationMode() == BucketAggregationMode.PER_BUCKET) {
+ return true;
+ } else {
+ return hasParentBucketAggregator(parent.parent());
+ }
+ }
+
+ @Override
+ protected Aggregator create(ValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ long estimatedBucketCount = valuesSource.metaData().maxAtomicUniqueValuesCount();
+ if (estimatedBucketCount < 0) {
+ // there isn't an estimation available.. 50 should be a good start
+ estimatedBucketCount = 50;
+ }
+
+ // adding an upper bound on the estimation as some atomic field data in the future (binary doc values) and not
+ // going to know their exact cardinality and will return upper bounds in AtomicFieldData.getNumberUniqueValues()
+ // that may be largely over-estimated.. the value chosen here is arbitrary just to play nice with typical CPU cache
+ //
+ // Another reason is that it may be faster to resize upon growth than to start directly with the appropriate size.
+ // And that all values are not necessarily visited by the matches.
+ estimatedBucketCount = Math.min(estimatedBucketCount, 512);
+
+ if (valuesSource instanceof BytesValuesSource) {
+ if (executionHint != null && !executionHint.equals(EXECUTION_HINT_VALUE_MAP) && !executionHint.equals(EXECUTION_HINT_VALUE_ORDINALS)) {
+ throw new ElasticsearchIllegalArgumentException("execution_hint can only be '" + EXECUTION_HINT_VALUE_MAP + "' or '" + EXECUTION_HINT_VALUE_ORDINALS + "', not " + executionHint);
+ }
+ String execution = executionHint;
+ if (!(valuesSource instanceof BytesValuesSource.WithOrdinals)) {
+ execution = EXECUTION_HINT_VALUE_MAP;
+ } else if (includeExclude != null) {
+ execution = EXECUTION_HINT_VALUE_MAP;
+ }
+ if (execution == null) {
+ if ((valuesSource instanceof BytesValuesSource.WithOrdinals)
+ && !hasParentBucketAggregator(parent)) {
+ execution = EXECUTION_HINT_VALUE_ORDINALS;
+ } else {
+ execution = EXECUTION_HINT_VALUE_MAP;
+ }
+ }
+ assert execution != null;
+
+ if (execution.equals(EXECUTION_HINT_VALUE_ORDINALS)) {
+ assert includeExclude == null;
+ final StringTermsAggregator.WithOrdinals aggregator = new StringTermsAggregator.WithOrdinals(name,
+ factories, (BytesValuesSource.WithOrdinals) valuesSource, estimatedBucketCount, order, requiredSize, shardSize, minDocCount, aggregationContext, parent);
+ aggregationContext.registerReaderContextAware(aggregator);
+ return aggregator;
+ } else {
+ return new StringTermsAggregator(name, factories, valuesSource, estimatedBucketCount, order, requiredSize, shardSize, minDocCount, includeExclude, aggregationContext, parent);
+ }
+ }
+
+ if (includeExclude != null) {
+ throw new AggregationExecutionException("Aggregation [" + name + "] cannot support the include/exclude " +
+ "settings as it can only be applied to string values");
+ }
+
+ if (valuesSource instanceof NumericValuesSource) {
+ if (((NumericValuesSource) valuesSource).isFloatingPoint()) {
+ return new DoubleTermsAggregator(name, factories, (NumericValuesSource) valuesSource, estimatedBucketCount, order, requiredSize, shardSize, minDocCount, aggregationContext, parent);
+ }
+ return new LongTermsAggregator(name, factories, (NumericValuesSource) valuesSource, estimatedBucketCount, order, requiredSize, shardSize, minDocCount, aggregationContext, parent);
+ }
+
+ throw new AggregationExecutionException("terms aggregation cannot be applied to field [" + valuesSourceConfig.fieldContext().field() +
+ "]. It can only be applied to numeric or string fields.");
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsBuilder.java
new file mode 100644
index 0000000..0a0dac9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsBuilder.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.ValuesSourceAggregationBuilder;
+
+import java.io.IOException;
+import java.util.Locale;
+
+/**
+ * Builds a {@code terms} aggregation
+ */
+public class TermsBuilder extends ValuesSourceAggregationBuilder<TermsBuilder> {
+
+
+ private int size = -1;
+ private int shardSize = -1;
+ private long minDocCount = -1;
+ private Terms.ValueType valueType;
+ private Terms.Order order;
+ private String includePattern;
+ private int includeFlags;
+ private String excludePattern;
+ private int excludeFlags;
+ private String executionHint;
+
+ public TermsBuilder(String name) {
+ super(name, "terms");
+ }
+
+ /**
+ * Sets the size - indicating how many term buckets should be returned (defaults to 10)
+ */
+ public TermsBuilder size(int size) {
+ this.size = size;
+ return this;
+ }
+
+ /**
+ * Sets the shard_size - indicating the number of term buckets each shard will return to the coordinating node (the
+ * node that coordinates the search execution). The higher the shard size is, the more accurate the results are.
+ */
+ public TermsBuilder shardSize(int shardSize) {
+ this.shardSize = shardSize;
+ return this;
+ }
+
+ /**
+ * Set the minimum document count terms should have in order to appear in the response.
+ */
+ public TermsBuilder minDocCount(long minDocCount) {
+ this.minDocCount = minDocCount;
+ return this;
+ }
+
+ /**
+ * Define a regular expression that will determine what terms should be aggregated. The regular expression is based
+ * on the {@link java.util.regex.Pattern} class.
+ *
+ * @see #include(String, int)
+ */
+ public TermsBuilder include(String regex) {
+ return include(regex, 0);
+ }
+
+ /**
+ * Define a regular expression that will determine what terms should be aggregated. The regular expression is based
+ * on the {@link java.util.regex.Pattern} class.
+ *
+ * @see java.util.regex.Pattern#compile(String, int)
+ */
+ public TermsBuilder include(String regex, int flags) {
+ this.includePattern = regex;
+ this.includeFlags = flags;
+ return this;
+ }
+
+ /**
+ * Define a regular expression that will filter out terms that should be excluded from the aggregation. The regular
+ * expression is based on the {@link java.util.regex.Pattern} class.
+ *
+ * @see #exclude(String, int)
+ */
+ public TermsBuilder exclude(String regex) {
+ return exclude(regex, 0);
+ }
+
+ /**
+ * Define a regular expression that will filter out terms that should be excluded from the aggregation. The regular
+ * expression is based on the {@link java.util.regex.Pattern} class.
+ *
+ * @see java.util.regex.Pattern#compile(String, int)
+ */
+ public TermsBuilder exclude(String regex, int flags) {
+ this.excludePattern = regex;
+ this.excludeFlags = flags;
+ return this;
+ }
+
+ /**
+ * When using scripts, the value type indicates the types of the values the script is generating.
+ */
+ public TermsBuilder valueType(Terms.ValueType valueType) {
+ this.valueType = valueType;
+ return this;
+ }
+
+ /**
+ * Defines the order in which the buckets will be returned.
+ */
+ public TermsBuilder order(Terms.Order order) {
+ this.order = order;
+ return this;
+ }
+
+ public TermsBuilder executionHint(String executionHint) {
+ this.executionHint = executionHint;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder doInternalXContent(XContentBuilder builder, Params params) throws IOException {
+ if (size >=0) {
+ builder.field("size", size);
+ }
+ if (shardSize >= 0) {
+ builder.field("shard_size", shardSize);
+ }
+ if (minDocCount >= 0) {
+ builder.field("min_doc_count", minDocCount);
+ }
+ if (valueType != null) {
+ builder.field("value_type", valueType.name().toLowerCase(Locale.ROOT));
+ }
+ if (order != null) {
+ builder.field("order");
+ order.toXContent(builder, params);
+ }
+ if (includePattern != null) {
+ if (includeFlags == 0) {
+ builder.field("include", includePattern);
+ } else {
+ builder.startObject("include")
+ .field("pattern", includePattern)
+ .field("flags", includeFlags)
+ .endObject();
+ }
+ }
+ if (excludePattern != null) {
+ if (excludeFlags == 0) {
+ builder.field("exclude", excludePattern);
+ } else {
+ builder.startObject("exclude")
+ .field("pattern", excludePattern)
+ .field("flags", excludeFlags)
+ .endObject();
+ }
+ }
+ if (executionHint != null) {
+ builder.field("execution_hint", executionHint);
+ }
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java
new file mode 100644
index 0000000..395e476
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.ip.IpFieldMapper;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
+import org.elasticsearch.search.aggregations.support.FieldContext;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.bytes.BytesValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class TermsParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return StringTerms.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ String field = null;
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> scriptParams = null;
+ Terms.ValueType valueType = null;
+ int requiredSize = 10;
+ int shardSize = -1;
+ String orderKey = "_count";
+ boolean orderAsc = false;
+ String format = null;
+ boolean assumeUnique = false;
+ String include = null;
+ int includeFlags = 0; // 0 means no flags
+ String exclude = null;
+ int excludeFlags = 0; // 0 means no flags
+ String executionHint = null;
+ long minDocCount = 1;
+
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("value_type".equals(currentFieldName) || "valueType".equals(currentFieldName)) {
+ valueType = Terms.ValueType.resolveType(parser.text());
+ } else if ("format".equals(currentFieldName)) {
+ format = parser.text();
+ } else if ("include".equals(currentFieldName)) {
+ include = parser.text();
+ } else if ("exclude".equals(currentFieldName)) {
+ exclude = parser.text();
+ } else if ("execution_hint".equals(currentFieldName) || "executionHint".equals(currentFieldName)) {
+ executionHint = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ if ("script_values_unique".equals(currentFieldName) || "scriptValuesUnique".equals(currentFieldName)) {
+ assumeUnique = parser.booleanValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("size".equals(currentFieldName)) {
+ requiredSize = parser.intValue();
+ } else if ("shard_size".equals(currentFieldName) || "shardSize".equals(currentFieldName)) {
+ shardSize = parser.intValue();
+ } else if ("min_doc_count".equals(currentFieldName) || "minDocCount".equals(currentFieldName)) {
+ minDocCount = parser.intValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ scriptParams = parser.map();
+ } else if ("order".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ orderKey = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ String dir = parser.text();
+ if ("asc".equalsIgnoreCase(dir)) {
+ orderAsc = true;
+ } else if ("desc".equalsIgnoreCase(dir)) {
+ orderAsc = false;
+ } else {
+ throw new SearchParseException(context, "Unknown terms order direction [" + dir + "] in terms aggregation [" + aggregationName + "]");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " for [order] in [" + aggregationName + "].");
+ }
+ }
+ } else if ("include".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("pattern".equals(currentFieldName)) {
+ include = parser.text();
+ } else if ("flags".equals(currentFieldName)) {
+ includeFlags = Regex.flagsFromString(parser.text());
+ }
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("flags".equals(currentFieldName)) {
+ includeFlags = parser.intValue();
+ }
+ }
+ }
+ } else if ("exclude".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("pattern".equals(currentFieldName)) {
+ exclude = parser.text();
+ } else if ("flags".equals(currentFieldName)) {
+ excludeFlags = Regex.flagsFromString(parser.text());
+ }
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ if ("flags".equals(currentFieldName)) {
+ excludeFlags = parser.intValue();
+ }
+ }
+ }
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ // shard_size cannot be smaller than size as we need to at least fetch <size> entries from every shards in order to return <size>
+ if (shardSize < requiredSize) {
+ shardSize = requiredSize;
+ }
+
+ IncludeExclude includeExclude = null;
+ if (include != null || exclude != null) {
+ Pattern includePattern = include != null ? Pattern.compile(include, includeFlags) : null;
+ Pattern excludePattern = exclude != null ? Pattern.compile(exclude, excludeFlags) : null;
+ includeExclude = new IncludeExclude(includePattern, excludePattern);
+ }
+
+ InternalOrder order = resolveOrder(orderKey, orderAsc);
+ SearchScript searchScript = null;
+ if (script != null) {
+ searchScript = context.scriptService().search(context.lookup(), scriptLang, script, scriptParams);
+ }
+
+ if (field == null) {
+
+ Class<? extends ValuesSource> valueSourceType = script == null ?
+ ValuesSource.class : // unknown, will inherit whatever is in the context
+ valueType != null ? valueType.scriptValueType.getValuesSourceType() : // the user explicitly defined a value type
+ BytesValuesSource.class; // defaulting to bytes
+
+ ValuesSourceConfig<?> config = new ValuesSourceConfig(valueSourceType);
+ if (valueType != null) {
+ config.scriptValueType(valueType.scriptValueType);
+ }
+ config.script(searchScript);
+ if (!assumeUnique) {
+ config.ensureUnique(true);
+ }
+ return new TermsAggregatorFactory(aggregationName, config, order, requiredSize, shardSize, minDocCount, includeExclude, executionHint);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ ValuesSourceConfig<?> config = new ValuesSourceConfig<BytesValuesSource>(BytesValuesSource.class);
+ config.unmapped(true);
+ return new TermsAggregatorFactory(aggregationName, config, order, requiredSize, shardSize, minDocCount, includeExclude, executionHint);
+ }
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+
+ ValuesSourceConfig<?> config;
+
+ if (mapper instanceof DateFieldMapper) {
+ DateFieldMapper dateMapper = (DateFieldMapper) mapper;
+ ValueFormatter formatter = format == null ?
+ new ValueFormatter.DateTime(dateMapper.dateTimeFormatter()) :
+ new ValueFormatter.DateTime(format);
+ config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class)
+ .formatter(formatter)
+ .parser(new ValueParser.DateMath(dateMapper.dateMathParser()));
+
+ } else if (mapper instanceof IpFieldMapper) {
+ config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class)
+ .formatter(ValueFormatter.IPv4)
+ .parser(ValueParser.IPv4);
+
+ } else if (indexFieldData instanceof IndexNumericFieldData) {
+ config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
+ if (format != null) {
+ config.formatter(new ValueFormatter.Number.Pattern(format));
+ }
+
+ } else {
+ config = new ValuesSourceConfig<BytesValuesSource>(BytesValuesSource.class);
+ // TODO: it will make sense to set false instead here if the aggregator factory uses
+ // ordinals instead of hash tables
+ config.needsHashes(true);
+ }
+
+ config.script(searchScript);
+
+ config.fieldContext(new FieldContext(field, indexFieldData));
+
+ // We need values to be unique to be able to run terms aggs efficiently
+ if (!assumeUnique) {
+ config.ensureUnique(true);
+ }
+
+ return new TermsAggregatorFactory(aggregationName, config, order, requiredSize, shardSize, minDocCount, includeExclude, executionHint);
+ }
+
+ static InternalOrder resolveOrder(String key, boolean asc) {
+ if ("_term".equals(key)) {
+ return asc ? InternalOrder.TERM_ASC : InternalOrder.TERM_DESC;
+ }
+ if ("_count".equals(key)) {
+ return asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC;
+ }
+ int i = key.indexOf('.');
+ if (i < 0) {
+ return new InternalOrder.Aggregation(key, asc);
+ }
+ return new InternalOrder.Aggregation(key.substring(0, i), key.substring(i+1), asc);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java
new file mode 100644
index 0000000..bf04f37
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ *
+ */
+public class UnmappedTerms extends InternalTerms {
+
+ public static final Type TYPE = new Type("terms", "umterms");
+
+ private static final Collection<Bucket> BUCKETS = Collections.emptyList();
+ private static final Map<String, Bucket> BUCKETS_MAP = Collections.emptyMap();
+
+ public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public UnmappedTerms readResult(StreamInput in) throws IOException {
+ UnmappedTerms buckets = new UnmappedTerms();
+ buckets.readFrom(in);
+ return buckets;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ UnmappedTerms() {} // for serialization
+
+ public UnmappedTerms(String name, InternalOrder order, int requiredSize, long minDocCount) {
+ super(name, order, requiredSize, minDocCount, BUCKETS);
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ this.name = in.readString();
+ this.order = InternalOrder.Streams.readOrder(in);
+ this.requiredSize = in.readVInt();
+ this.minDocCount = in.readVLong();
+ this.buckets = BUCKETS;
+ this.bucketMap = BUCKETS_MAP;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ InternalOrder.Streams.writeOrder(order, out);
+ out.writeVInt(requiredSize);
+ out.writeVLong(minDocCount);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.startArray(CommonFields.BUCKETS).endArray();
+ builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTermsAggregator.java
new file mode 100644
index 0000000..a772687
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTermsAggregator.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms;
+
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class UnmappedTermsAggregator extends Aggregator {
+
+ private final InternalOrder order;
+ private final int requiredSize;
+ private final long minDocCount;
+
+ public UnmappedTermsAggregator(String name, InternalOrder order, int requiredSize, long minDocCount, AggregationContext aggregationContext, Aggregator parent) {
+ super(name, BucketAggregationMode.PER_BUCKET, AggregatorFactories.EMPTY, 0, aggregationContext, parent);
+ this.order = order;
+ this.requiredSize = requiredSize;
+ this.minDocCount = minDocCount;
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return false;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ assert owningBucketOrdinal == 0;
+ return new UnmappedTerms(name, order, requiredSize, minDocCount);
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new UnmappedTerms(name, order, requiredSize, minDocCount);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/BucketPriorityQueue.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/BucketPriorityQueue.java
new file mode 100644
index 0000000..4d2205c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/BucketPriorityQueue.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms.support;
+
+import org.apache.lucene.util.PriorityQueue;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+
+import java.util.Comparator;
+
+public class BucketPriorityQueue extends PriorityQueue<Terms.Bucket> {
+
+ private final Comparator<Terms.Bucket> comparator;
+
+ public BucketPriorityQueue(int size, Comparator<Terms.Bucket> comparator) {
+ super(size);
+ this.comparator = comparator;
+ }
+
+ @Override
+ protected boolean lessThan(Terms.Bucket a, Terms.Bucket b) {
+ return comparator.compare(a, b) > 0; // reverse, since we reverse again when adding to a list
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java
new file mode 100644
index 0000000..936efca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.terms.support;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Defines the include/exclude regular expression filtering for string terms aggregation. In this filtering logic,
+ * exclusion has precedence, where the {@code include} is evaluated first and then the {@code exclude}.
+ */
+public class IncludeExclude {
+
+ private final Matcher include;
+ private final Matcher exclude;
+ private final CharsRef scratch = new CharsRef();
+
+ /**
+ * @param include The regular expression pattern for the terms to be included
+ * (may only be {@code null} if {@code exclude} is not {@code null}
+ * @param exclude The regular expression pattern for the terms to be excluded
+ * (may only be {@code null} if {@code include} is not {@code null}
+ */
+ public IncludeExclude(Pattern include, Pattern exclude) {
+ assert include != null || exclude != null : "include & exclude cannot both be null"; // otherwise IncludeExclude object should be null
+ this.include = include != null ? include.matcher("") : null;
+ this.exclude = exclude != null ? exclude.matcher("") : null;
+ }
+
+ /**
+ * Returns whether the given value is accepted based on the {@code include} & {@code exclude} patterns.
+ */
+ public boolean accept(BytesRef value) {
+ UnicodeUtil.UTF8toUTF16(value, scratch);
+ if (include == null) {
+ // exclude must not be null
+ return !exclude.reset(scratch).matches();
+ }
+ if (!include.reset(scratch).matches()) {
+ return false;
+ }
+ if (exclude == null) {
+ return true;
+ }
+ return !exclude.reset(scratch).matches();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregation.java
new file mode 100644
index 0000000..d1d1c5c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregation.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+
+/**
+ *
+ */
+public abstract class MetricsAggregation extends InternalAggregation {
+
+ protected ValueFormatter valueFormatter;
+
+ public static abstract class SingleValue extends MetricsAggregation {
+
+ protected SingleValue() {}
+
+ protected SingleValue(String name) {
+ super(name);
+ }
+
+ public abstract double value();
+ }
+
+ public static abstract class MultiValue extends MetricsAggregation {
+
+ protected MultiValue() {}
+
+ protected MultiValue(String name) {
+ super(name);
+ }
+
+ public abstract double value(String name);
+
+ }
+
+ protected MetricsAggregation() {} // for serialization
+
+ protected MetricsAggregation(String name) {
+ super(name);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregationBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregationBuilder.java
new file mode 100644
index 0000000..7e149f6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregationBuilder.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class MetricsAggregationBuilder<B extends MetricsAggregationBuilder<B>> extends AbstractAggregationBuilder {
+
+ public MetricsAggregationBuilder(String name, String type) {
+ super(name, type);
+ }
+
+ @Override
+ public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name).startObject(type);
+ internalXContent(builder, params);
+ return builder.endObject().endObject();
+ }
+
+ protected abstract void internalXContent(XContentBuilder builder, Params params) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java
new file mode 100644
index 0000000..e3ab5f2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+
+/**
+ *
+ */
+public abstract class MetricsAggregator extends Aggregator {
+
+ private MetricsAggregator(String name, long estimatedBucketsCount, AggregationContext context, Aggregator parent) {
+ super(name, BucketAggregationMode.MULTI_BUCKETS, AggregatorFactories.EMPTY, estimatedBucketsCount, context, parent);
+ }
+
+ public static abstract class SingleValue extends MetricsAggregator {
+
+ protected SingleValue(String name, long estimatedBucketsCount, AggregationContext context, Aggregator parent) {
+ super(name, estimatedBucketsCount, context, parent);
+ }
+
+ public abstract double metric(long owningBucketOrd);
+ }
+
+ public static abstract class MultiValue extends MetricsAggregator {
+
+ protected MultiValue(String name, long estimatedBucketsCount, AggregationContext context, Aggregator parent) {
+ super(name, estimatedBucketsCount, context, parent);
+ }
+
+ public abstract boolean hasMetric(String name);
+
+ public abstract double metric(String name, long owningBucketOrd);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregationBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregationBuilder.java
new file mode 100644
index 0000000..bbf1827
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregationBuilder.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public abstract class ValuesSourceMetricsAggregationBuilder<B extends ValuesSourceMetricsAggregationBuilder<B>> extends MetricsAggregationBuilder<B> {
+
+ private String field;
+ private String script;
+ private String lang;
+ private Map<String, Object> params;
+
+ protected ValuesSourceMetricsAggregationBuilder(String name, String type) {
+ super(name, type);
+ }
+
+ @SuppressWarnings("unchecked")
+ public B field(String field) {
+ this.field = field;
+ return (B) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public B script(String script) {
+ this.script = script;
+ return (B) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public B lang(String lang) {
+ this.lang = lang;
+ return (B) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public B params(Map<String, Object> params) {
+ if (this.params == null) {
+ this.params = params;
+ } else {
+ this.params.putAll(params);
+ }
+ return (B) this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public B param(String name, Object value) {
+ if (this.params == null) {
+ this.params = Maps.newHashMap();
+ }
+ this.params.put(name, value);
+ return (B) this;
+ }
+
+ @Override
+ protected void internalXContent(XContentBuilder builder, Params params) throws IOException {
+ if (field != null) {
+ builder.field("field", field);
+ }
+
+ if (script != null) {
+ builder.field("script", script);
+ }
+
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+
+ if (this.params != null && !this.params.isEmpty()) {
+ builder.field("params").map(this.params);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregatorParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregatorParser.java
new file mode 100644
index 0000000..48636a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregatorParser.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.support.FieldContext;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public abstract class ValuesSourceMetricsAggregatorParser<S extends MetricsAggregation> implements Aggregator.Parser {
+
+ protected boolean requiresSortedValues() {
+ return false;
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
+
+ String field = null;
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> scriptParams = null;
+ boolean assumeSorted = false;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ scriptParams = parser.map();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ if ("script_values_sorted".equals(currentFieldName) || "scriptValuesSorted".equals(currentFieldName)) {
+ assumeSorted = parser.booleanValue();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ if (script != null) {
+ config.script(context.scriptService().search(context.lookup(), scriptLang, script, scriptParams));
+ }
+
+ if (!assumeSorted && requiresSortedValues()) {
+ config.ensureSorted(true);
+ }
+
+ if (field == null) {
+ return createFactory(aggregationName, config);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return createFactory(aggregationName, config);
+ }
+
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+ config.fieldContext(new FieldContext(field, indexFieldData));
+ return createFactory(aggregationName, config);
+ }
+
+ protected abstract AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<NumericValuesSource> config);
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/Avg.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/Avg.java
new file mode 100644
index 0000000..e53509f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/Avg.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.avg;
+
+import org.elasticsearch.search.aggregations.Aggregation;
+
+/**
+ *
+ */
+public interface Avg extends Aggregation {
+
+ double getValue();
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java
new file mode 100644
index 0000000..ccfe48a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.avg;
+
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.DoubleArray;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class AvgAggregator extends MetricsAggregator.SingleValue {
+
+ private final NumericValuesSource valuesSource;
+
+ private LongArray counts;
+ private DoubleArray sums;
+
+ public AvgAggregator(String name, long estimatedBucketsCount, NumericValuesSource valuesSource, AggregationContext context, Aggregator parent) {
+ super(name, estimatedBucketsCount, context, parent);
+ this.valuesSource = valuesSource;
+ if (valuesSource != null) {
+ final long initialSize = estimatedBucketsCount < 2 ? 1 : estimatedBucketsCount;
+ counts = BigArrays.newLongArray(initialSize, context.pageCacheRecycler(), true);
+ sums = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), true);
+ }
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return valuesSource != null;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert valuesSource != null : "if value source is null, collect should never be called";
+
+ DoubleValues values = valuesSource.doubleValues();
+ if (values == null) {
+ return;
+ }
+
+ counts = BigArrays.grow(counts, owningBucketOrdinal + 1);
+ sums = BigArrays.grow(sums, owningBucketOrdinal + 1);
+
+ final int valueCount = values.setDocument(doc);
+ counts.increment(owningBucketOrdinal, valueCount);
+ double sum = 0;
+ for (int i = 0; i < valueCount; i++) {
+ sum += values.nextValue();
+ }
+ sums.increment(owningBucketOrdinal, sum);
+ }
+
+ @Override
+ public double metric(long owningBucketOrd) {
+ return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ if (valuesSource == null || owningBucketOrdinal >= counts.size()) {
+ return new InternalAvg(name, 0l, 0);
+ }
+ return new InternalAvg(name, sums.get(owningBucketOrdinal), counts.get(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalAvg(name, 0.0, 0l);
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory.LeafOnly<NumericValuesSource> {
+
+ public Factory(String name, String type, ValuesSourceConfig<NumericValuesSource> valuesSourceConfig) {
+ super(name, type, valuesSourceConfig);
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new AvgAggregator(name, 0, null, aggregationContext, parent);
+ }
+
+ @Override
+ protected Aggregator create(NumericValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ return new AvgAggregator(name, expectedBucketsCount, valuesSource, aggregationContext, parent);
+ }
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(counts, sums);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgBuilder.java
new file mode 100644
index 0000000..8df85ef
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgBuilder.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics.avg;
+
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder;
+
+/**
+ *
+ */
+public class AvgBuilder extends ValuesSourceMetricsAggregationBuilder<AvgBuilder> {
+
+ public AvgBuilder(String name) {
+ super(name, InternalAvg.TYPE.name());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java
new file mode 100644
index 0000000..28e247d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.avg;
+
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregatorParser;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+/**
+ *
+ */
+public class AvgParser extends ValuesSourceMetricsAggregatorParser<InternalAvg> {
+
+ @Override
+ public String type() {
+ return InternalAvg.TYPE.name();
+ }
+
+ @Override
+ protected AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<NumericValuesSource> config) {
+ return new AvgAggregator.Factory(aggregationName, type(), config);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java
new file mode 100644
index 0000000..1500271
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.avg;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregation;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatterStreams;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+*
+*/
+public class InternalAvg extends MetricsAggregation.SingleValue implements Avg {
+
+ public final static Type TYPE = new Type("avg");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalAvg readResult(StreamInput in) throws IOException {
+ InternalAvg result = new InternalAvg();
+ result.readFrom(in);
+ return result;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ private double sum;
+ private long count;
+
+ InternalAvg() {} // for serialization
+
+ public InternalAvg(String name, double sum, long count) {
+ super(name);
+ this.sum = sum;
+ this.count = count;
+ }
+
+ @Override
+ public double value() {
+ return getValue();
+ }
+
+ public double getValue() {
+ return sum / count;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public InternalAvg reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ return (InternalAvg) aggregations.get(0);
+ }
+ InternalAvg reduced = null;
+ for (InternalAggregation aggregation : aggregations) {
+ if (reduced == null) {
+ reduced = (InternalAvg) aggregation;
+ } else {
+ reduced.count += ((InternalAvg) aggregation).count;
+ reduced.sum += ((InternalAvg) aggregation).sum;
+ }
+ }
+ return reduced;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ valueFormatter = ValueFormatterStreams.readOptional(in);
+ sum = in.readDouble();
+ count = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ ValueFormatterStreams.writeOptional(valueFormatter, out);
+ out.writeDouble(sum);
+ out.writeVLong(count);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.field(CommonFields.VALUE, count != 0 ? getValue() : null);
+ if (count != 0 && valueFormatter != null) {
+ builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(getValue()));
+ }
+ builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java
new file mode 100644
index 0000000..9a747c7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.max;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregation;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatterStreams;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+*
+*/
+public class InternalMax extends MetricsAggregation.SingleValue implements Max {
+
+ public final static Type TYPE = new Type("max");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalMax readResult(StreamInput in) throws IOException {
+ InternalMax result = new InternalMax();
+ result.readFrom(in);
+ return result;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ private double max;
+
+ InternalMax() {} // for serialization
+
+ public InternalMax(String name, double max) {
+ super(name);
+ this.max = max;
+ }
+
+ @Override
+ public double value() {
+ return max;
+ }
+
+ public double getValue() {
+ return max;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public InternalMax reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ return (InternalMax) aggregations.get(0);
+ }
+ InternalMax reduced = null;
+ for (InternalAggregation aggregation : aggregations) {
+ if (reduced == null) {
+ reduced = (InternalMax) aggregation;
+ } else {
+ reduced.max = Math.max(reduced.max, ((InternalMax) aggregation).max);
+ }
+ }
+ if (reduced != null) {
+ return reduced;
+ }
+ return (InternalMax) aggregations.get(0);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ valueFormatter = ValueFormatterStreams.readOptional(in);
+ max = in.readDouble();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ ValueFormatterStreams.writeOptional(valueFormatter, out);
+ out.writeDouble(max);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ boolean hasValue = !Double.isInfinite(max);
+ builder.field(CommonFields.VALUE, hasValue ? max : null);
+ if (hasValue && valueFormatter != null) {
+ builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(max));
+ }
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/Max.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/Max.java
new file mode 100644
index 0000000..aee0df5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/Max.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.max;
+
+import org.elasticsearch.search.aggregations.Aggregation;
+
+/**
+ *
+ */
+public interface Max extends Aggregation {
+
+ double getValue();
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java
new file mode 100644
index 0000000..4966b34
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.max;
+
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.DoubleArray;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MaxAggregator extends MetricsAggregator.SingleValue {
+
+ private final NumericValuesSource valuesSource;
+
+ private DoubleArray maxes;
+
+ public MaxAggregator(String name, long estimatedBucketsCount, NumericValuesSource valuesSource, AggregationContext context, Aggregator parent) {
+ super(name, estimatedBucketsCount, context, parent);
+ this.valuesSource = valuesSource;
+ if (valuesSource != null) {
+ final long initialSize = estimatedBucketsCount < 2 ? 1 : estimatedBucketsCount;
+ maxes = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), false);
+ maxes.fill(0, maxes.size(), Double.NEGATIVE_INFINITY);
+ }
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return valuesSource != null;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert valuesSource != null : "collect should only be called when value source is not null";
+
+ DoubleValues values = valuesSource.doubleValues();
+ if (values == null) {
+ return;
+ }
+
+ if (owningBucketOrdinal >= maxes.size()) {
+ long from = maxes.size();
+ maxes = BigArrays.grow(maxes, owningBucketOrdinal + 1);
+ maxes.fill(from, maxes.size(), Double.NEGATIVE_INFINITY);
+ }
+
+ final int valueCount = values.setDocument(doc);
+ double max = maxes.get(owningBucketOrdinal);
+ for (int i = 0; i < valueCount; i++) {
+ max = Math.max(max, values.nextValue());
+ }
+ maxes.set(owningBucketOrdinal, max);
+ }
+
+ @Override
+ public double metric(long owningBucketOrd) {
+ return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd);
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ if (valuesSource == null) {
+ return new InternalMax(name, Double.NEGATIVE_INFINITY);
+ }
+ assert owningBucketOrdinal < maxes.size();
+ return new InternalMax(name, maxes.get(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalMax(name, Double.NEGATIVE_INFINITY);
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory.LeafOnly<NumericValuesSource> {
+
+ public Factory(String name, ValuesSourceConfig<NumericValuesSource> valuesSourceConfig) {
+ super(name, InternalMax.TYPE.name(), valuesSourceConfig);
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new MaxAggregator(name, 0, null, aggregationContext, parent);
+ }
+
+ @Override
+ protected Aggregator create(NumericValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ return new MaxAggregator(name, expectedBucketsCount, valuesSource, aggregationContext, parent);
+ }
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(maxes);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxBuilder.java
new file mode 100644
index 0000000..b4dabd3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxBuilder.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics.max;
+
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder;
+
+/**
+ *
+ */
+public class MaxBuilder extends ValuesSourceMetricsAggregationBuilder<MaxBuilder> {
+
+ public MaxBuilder(String name) {
+ super(name, InternalMax.TYPE.name());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java
new file mode 100644
index 0000000..102131f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.max;
+
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregatorParser;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+/**
+ *
+ */
+public class MaxParser extends ValuesSourceMetricsAggregatorParser<InternalMax> {
+
+ @Override
+ public String type() {
+ return InternalMax.TYPE.name();
+ }
+
+ @Override
+ protected AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<NumericValuesSource> config) {
+ return new MaxAggregator.Factory(aggregationName, config);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java
new file mode 100644
index 0000000..24c41e4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.min;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregation;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatterStreams;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+*
+*/
+public class InternalMin extends MetricsAggregation.SingleValue implements Min {
+
+ public final static Type TYPE = new Type("min");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalMin readResult(StreamInput in) throws IOException {
+ InternalMin result = new InternalMin();
+ result.readFrom(in);
+ return result;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+
+ private double min;
+
+ InternalMin() {} // for serialization
+
+ public InternalMin(String name, double min) {
+ super(name);
+ this.min = min;
+ }
+
+ @Override
+ public double value() {
+ return min;
+ }
+
+ public double getValue() {
+ return min;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public InternalMin reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ return (InternalMin) aggregations.get(0);
+ }
+ InternalMin reduced = null;
+ for (InternalAggregation aggregation : aggregations) {
+ if (reduced == null) {
+ reduced = (InternalMin) aggregation;
+ } else {
+ reduced.min = Math.min(reduced.min, ((InternalMin) aggregation).min);
+ }
+ }
+ if (reduced != null) {
+ return reduced;
+ }
+ return (InternalMin) aggregations.get(0);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ valueFormatter = ValueFormatterStreams.readOptional(in);
+ min = in.readDouble();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ ValueFormatterStreams.writeOptional(valueFormatter, out);
+ out.writeDouble(min);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ boolean hasValue = !Double.isInfinite(min);
+ builder.field(CommonFields.VALUE, hasValue ? min : null);
+ if (hasValue && valueFormatter != null) {
+ builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(min));
+ }
+ builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/Min.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/Min.java
new file mode 100644
index 0000000..91967a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/Min.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.min;
+
+import org.elasticsearch.search.aggregations.Aggregation;
+
+/**
+ *
+ */
+public interface Min extends Aggregation {
+
+ double getValue();
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java
new file mode 100644
index 0000000..c5ae9f2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.min;
+
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.DoubleArray;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MinAggregator extends MetricsAggregator.SingleValue {
+
+ private final NumericValuesSource valuesSource;
+
+ private DoubleArray mins;
+
+ public MinAggregator(String name, long estimatedBucketsCount, NumericValuesSource valuesSource, AggregationContext context, Aggregator parent) {
+ super(name, estimatedBucketsCount, context, parent);
+ this.valuesSource = valuesSource;
+ if (valuesSource != null) {
+ final long initialSize = estimatedBucketsCount < 2 ? 1 : estimatedBucketsCount;
+ mins = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), false);
+ mins.fill(0, mins.size(), Double.POSITIVE_INFINITY);
+ }
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return valuesSource != null;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert valuesSource != null : "collect must only be called if #shouldCollect returns true";
+
+ DoubleValues values = valuesSource.doubleValues();
+ if (values == null || values.setDocument(doc) == 0) {
+ return;
+ }
+
+ if (owningBucketOrdinal >= mins.size()) {
+ long from = mins.size();
+ mins = BigArrays.grow(mins, owningBucketOrdinal + 1);
+ mins.fill(from, mins.size(), Double.POSITIVE_INFINITY);
+ }
+
+ mins.set(owningBucketOrdinal, Math.min(values.nextValue(), mins.get(owningBucketOrdinal)));
+ }
+
+ @Override
+ public double metric(long owningBucketOrd) {
+ return valuesSource == null ? Double.POSITIVE_INFINITY : mins.get(owningBucketOrd);
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ if (valuesSource == null) {
+ return new InternalMin(name, Double.POSITIVE_INFINITY);
+ }
+ assert owningBucketOrdinal < mins.size();
+ return new InternalMin(name, mins.get(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalMin(name, Double.POSITIVE_INFINITY);
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory.LeafOnly<NumericValuesSource> {
+
+ public Factory(String name, ValuesSourceConfig<NumericValuesSource> valuesSourceConfig) {
+ super(name, InternalMin.TYPE.name(), valuesSourceConfig);
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new MinAggregator(name, 0, null, aggregationContext, parent);
+ }
+
+ @Override
+ protected Aggregator create(NumericValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ return new MinAggregator(name, expectedBucketsCount, valuesSource, aggregationContext, parent);
+ }
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(mins);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinBuilder.java
new file mode 100644
index 0000000..0cec858
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinBuilder.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics.min;
+
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder;
+
+/**
+ *
+ */
+public class MinBuilder extends ValuesSourceMetricsAggregationBuilder<MinBuilder> {
+
+ public MinBuilder(String name) {
+ super(name, InternalMin.TYPE.name());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java
new file mode 100644
index 0000000..33d625e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.min;
+
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregatorParser;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+/**
+ *
+ */
+public class MinParser extends ValuesSourceMetricsAggregatorParser<InternalMin> {
+
+ @Override
+ public String type() {
+ return InternalMin.TYPE.name();
+ }
+
+ @Override
+ protected boolean requiresSortedValues() {
+ return true;
+ }
+
+ @Override
+ protected AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<NumericValuesSource> config) {
+ return new MinAggregator.Factory(aggregationName, config);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java
new file mode 100644
index 0000000..e5ca2bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.stats;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregation;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatterStreams;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+*
+*/
+public class InternalStats extends MetricsAggregation.MultiValue implements Stats {
+
+ public final static Type TYPE = new Type("stats");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalStats readResult(StreamInput in) throws IOException {
+ InternalStats result = new InternalStats();
+ result.readFrom(in);
+ return result;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ enum Metrics {
+
+ count, sum, min, max, avg;
+
+ public static Metrics resolve(String name) {
+ return Metrics.valueOf(name);
+ }
+ }
+
+ protected long count;
+ protected double min;
+ protected double max;
+ protected double sum;
+
+ protected InternalStats() {} // for serialization
+
+ public InternalStats(String name, long count, double sum, double min, double max) {
+ super(name);
+ this.count = count;
+ this.sum = sum;
+ this.min = min;
+ this.max = max;
+ }
+
+ @Override
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public double getMin() {
+ return min;
+ }
+
+ @Override
+ public double getMax() {
+ return max;
+ }
+
+ @Override
+ public double getAvg() {
+ return sum / count;
+ }
+
+ @Override
+ public double getSum() {
+ return sum;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public double value(String name) {
+ Metrics metrics = Metrics.valueOf(name);
+ switch (metrics) {
+ case min: return this.min;
+ case max: return this.max;
+ case avg: return this.getAvg();
+ case count: return this.count;
+ case sum: return this.sum;
+ default:
+ throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation");
+ }
+ }
+
+ @Override
+ public InternalStats reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ return (InternalStats) aggregations.get(0);
+ }
+ InternalStats reduced = null;
+ for (InternalAggregation aggregation : aggregations) {
+ if (reduced == null) {
+ if (((InternalStats) aggregation).count != 0) {
+ reduced = (InternalStats) aggregation;
+ }
+ } else {
+ if (((InternalStats) aggregation).count != 0) {
+ reduced.count += ((InternalStats) aggregation).count;
+ reduced.min = Math.min(reduced.min, ((InternalStats) aggregation).min);
+ reduced.max = Math.max(reduced.max, ((InternalStats) aggregation).max);
+ reduced.sum += ((InternalStats) aggregation).sum;
+ mergeOtherStats(reduced, aggregation);
+ }
+ }
+ }
+ if (reduced != null) {
+ return reduced;
+ }
+ return (InternalStats) aggregations.get(0);
+ }
+
+ protected void mergeOtherStats(InternalStats to, InternalAggregation from) {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ valueFormatter = ValueFormatterStreams.readOptional(in);
+ count = in.readVLong();
+ min = in.readDouble();
+ max = in.readDouble();
+ sum = in.readDouble();
+ readOtherStatsFrom(in);
+ }
+
+ public void readOtherStatsFrom(StreamInput in) throws IOException {
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ ValueFormatterStreams.writeOptional(valueFormatter, out);
+ out.writeVLong(count);
+ out.writeDouble(min);
+ out.writeDouble(max);
+ out.writeDouble(sum);
+ writeOtherStatsTo(out);
+ }
+
+ protected void writeOtherStatsTo(StreamOutput out) throws IOException {
+ }
+
+ static class Fields {
+ public static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ public static final XContentBuilderString MIN = new XContentBuilderString("min");
+ public static final XContentBuilderString MIN_AS_STRING = new XContentBuilderString("min_as_string");
+ public static final XContentBuilderString MAX = new XContentBuilderString("max");
+ public static final XContentBuilderString MAX_AS_STRING = new XContentBuilderString("max_as_string");
+ public static final XContentBuilderString AVG = new XContentBuilderString("avg");
+ public static final XContentBuilderString AVG_AS_STRING = new XContentBuilderString("avg_as_string");
+ public static final XContentBuilderString SUM = new XContentBuilderString("sum");
+ public static final XContentBuilderString SUM_AS_STRING = new XContentBuilderString("sum_as_string");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.field(Fields.COUNT, count);
+ builder.field(Fields.MIN, count != 0 ? min : null);
+ builder.field(Fields.MAX, count != 0 ? max : null);
+ builder.field(Fields.AVG, count != 0 ? getAvg() : null);
+ builder.field(Fields.SUM, count != 0 ? sum : null);
+ if (count != 0 && valueFormatter != null) {
+ builder.field(Fields.MIN_AS_STRING, valueFormatter.format(min));
+ builder.field(Fields.MAX_AS_STRING, valueFormatter.format(max));
+ builder.field(Fields.AVG_AS_STRING, valueFormatter.format(getAvg()));
+ builder.field(Fields.SUM_AS_STRING, valueFormatter.format(sum));
+ }
+ otherStatsToXCotent(builder, params);
+ builder.endObject();
+ return builder;
+ }
+
+ protected XContentBuilder otherStatsToXCotent(XContentBuilder builder, Params params) throws IOException {
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/Stats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/Stats.java
new file mode 100644
index 0000000..b093c59
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/Stats.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.stats;
+
+import org.elasticsearch.search.aggregations.Aggregation;
+
+/**
+ * Statistics over a set of values (either aggregated over field data or scripts)
+ */
+public interface Stats extends Aggregation {
+
+ /**
+ * @return The number of values that were aggregated
+ */
+ long getCount();
+
+ /**
+ * @return The minimum value of all aggregated values.
+ */
+ double getMin();
+
+ /**
+ * @return The maximum value of all aggregated values.
+ */
+ double getMax();
+
+ /**
+ * @return The avg value over all aggregated values.
+ */
+ double getAvg();
+
+ /**
+ * @return The sum of aggregated values.
+ */
+ double getSum();
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java
new file mode 100644
index 0000000..37600cb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.stats;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.DoubleArray;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class StatsAggegator extends MetricsAggregator.MultiValue {
+
+ private final NumericValuesSource valuesSource;
+
+ private LongArray counts;
+ private DoubleArray sums;
+ private DoubleArray mins;
+ private DoubleArray maxes;
+
+ public StatsAggegator(String name, long estimatedBucketsCount, NumericValuesSource valuesSource, AggregationContext context, Aggregator parent) {
+ super(name, estimatedBucketsCount, context, parent);
+ this.valuesSource = valuesSource;
+ if (valuesSource != null) {
+ final long initialSize = estimatedBucketsCount < 2 ? 1 : estimatedBucketsCount;
+ counts = BigArrays.newLongArray(initialSize, context.pageCacheRecycler(), true);
+ sums = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), true);
+ mins = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), false);
+ mins.fill(0, mins.size(), Double.POSITIVE_INFINITY);
+ maxes = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), false);
+ maxes.fill(0, maxes.size(), Double.NEGATIVE_INFINITY);
+ }
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return valuesSource != null;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert valuesSource != null : "collect must only be called if #shouldCollect returns true";
+
+ DoubleValues values = valuesSource.doubleValues();
+ if (values == null) {
+ return;
+ }
+
+ if (owningBucketOrdinal >= counts.size()) {
+ final long from = counts.size();
+ final long overSize = BigArrays.overSize(owningBucketOrdinal + 1);
+ counts = BigArrays.resize(counts, overSize);
+ sums = BigArrays.resize(sums, overSize);
+ mins = BigArrays.resize(mins, overSize);
+ maxes = BigArrays.resize(maxes, overSize);
+ mins.fill(from, overSize, Double.POSITIVE_INFINITY);
+ maxes.fill(from, overSize, Double.NEGATIVE_INFINITY);
+ }
+
+ final int valuesCount = values.setDocument(doc);
+ counts.increment(owningBucketOrdinal, valuesCount);
+ double sum = 0;
+ double min = mins.get(owningBucketOrdinal);
+ double max = maxes.get(owningBucketOrdinal);
+ for (int i = 0; i < valuesCount; i++) {
+ double value = values.nextValue();
+ sum += value;
+ min = Math.min(min, value);
+ max = Math.max(max, value);
+ }
+ sums.increment(owningBucketOrdinal, sum);
+ mins.set(owningBucketOrdinal, min);
+ maxes.set(owningBucketOrdinal, max);
+ }
+
+ @Override
+ public boolean hasMetric(String name) {
+ try {
+ InternalStats.Metrics.resolve(name);
+ return true;
+ } catch (IllegalArgumentException iae) {
+ return false;
+ }
+ }
+
+ @Override
+ public double metric(String name, long owningBucketOrd) {
+ switch(InternalStats.Metrics.resolve(name)) {
+ case count: return valuesSource == null ? 0 : counts.get(owningBucketOrd);
+ case sum: return valuesSource == null ? 0 : sums.get(owningBucketOrd);
+ case min: return valuesSource == null ? Double.POSITIVE_INFINITY : mins.get(owningBucketOrd);
+ case max: return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd);
+ case avg: return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
+ default:
+ throw new ElasticsearchIllegalArgumentException("Unknown value [" + name + "] in common stats aggregation");
+ }
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ if (valuesSource == null) {
+ return new InternalStats(name, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ }
+ assert owningBucketOrdinal < counts.size();
+ return new InternalStats(name, counts.get(owningBucketOrdinal), sums.get(owningBucketOrdinal), mins.get(owningBucketOrdinal), maxes.get(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalStats(name, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory.LeafOnly<NumericValuesSource> {
+
+ public Factory(String name, ValuesSourceConfig<NumericValuesSource> valuesSourceConfig) {
+ super(name, InternalStats.TYPE.name(), valuesSourceConfig);
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new StatsAggegator(name, 0, null, aggregationContext, parent);
+ }
+
+ @Override
+ protected Aggregator create(NumericValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ return new StatsAggegator(name, expectedBucketsCount, valuesSource, aggregationContext, parent);
+ }
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(counts, maxes, mins, sums);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsBuilder.java
new file mode 100644
index 0000000..97b5fe5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsBuilder.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics.stats;
+
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder;
+
+/**
+ *
+ */
+public class StatsBuilder extends ValuesSourceMetricsAggregationBuilder<StatsBuilder> {
+
+ public StatsBuilder(String name) {
+ super(name, InternalStats.TYPE.name());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java
new file mode 100644
index 0000000..60908cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.stats;
+
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregatorParser;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+/**
+ *
+ */
+public class StatsParser extends ValuesSourceMetricsAggregatorParser<InternalStats> {
+
+ @Override
+ public String type() {
+ return InternalStats.TYPE.name();
+ }
+
+ @Override
+ protected AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<NumericValuesSource> config) {
+ return new StatsAggegator.Factory(aggregationName, config);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStats.java
new file mode 100644
index 0000000..954b3af
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStats.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.stats.extended;
+
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+
+/**
+ * Statistics over a set of values (either aggregated over field data or scripts)
+ */
+public interface ExtendedStats extends Stats {
+
+ double getSumOfSquares();
+
+ double getVariance();
+
+ double getStdDeviation();
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java
new file mode 100644
index 0000000..c046288
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.stats.extended;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.DoubleArray;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ExtendedStatsAggregator extends MetricsAggregator.MultiValue {
+
+ private final NumericValuesSource valuesSource;
+
+ private LongArray counts;
+ private DoubleArray sums;
+ private DoubleArray mins;
+ private DoubleArray maxes;
+ private DoubleArray sumOfSqrs;
+
+ public ExtendedStatsAggregator(String name, long estimatedBucketsCount, NumericValuesSource valuesSource, AggregationContext context, Aggregator parent) {
+ super(name, estimatedBucketsCount, context, parent);
+ this.valuesSource = valuesSource;
+ if (valuesSource != null) {
+ final long initialSize = estimatedBucketsCount < 2 ? 1 : estimatedBucketsCount;
+ counts = BigArrays.newLongArray(initialSize, context.pageCacheRecycler(), true);
+ sums = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), true);
+ mins = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), false);
+ mins.fill(0, mins.size(), Double.POSITIVE_INFINITY);
+ maxes = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), false);
+ maxes.fill(0, maxes.size(), Double.NEGATIVE_INFINITY);
+ sumOfSqrs = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), true);
+ }
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return valuesSource != null;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert valuesSource != null : "collect must only be called if #shouldCollect returns true";
+
+ DoubleValues values = valuesSource.doubleValues();
+ if (values == null) {
+ return;
+ }
+
+ if (owningBucketOrdinal >= counts.size()) {
+ final long from = counts.size();
+ final long overSize = BigArrays.overSize(owningBucketOrdinal + 1);
+ counts = BigArrays.resize(counts, overSize);
+ sums = BigArrays.resize(sums, overSize);
+ mins = BigArrays.resize(mins, overSize);
+ maxes = BigArrays.resize(maxes, overSize);
+ sumOfSqrs = BigArrays.resize(sumOfSqrs, overSize);
+ mins.fill(from, overSize, Double.POSITIVE_INFINITY);
+ maxes.fill(from, overSize, Double.NEGATIVE_INFINITY);
+ }
+
+ final int valuesCount = values.setDocument(doc);
+ counts.increment(owningBucketOrdinal, valuesCount);
+ double sum = 0;
+ double sumOfSqr = 0;
+ double min = mins.get(owningBucketOrdinal);
+ double max = maxes.get(owningBucketOrdinal);
+ for (int i = 0; i < valuesCount; i++) {
+ double value = values.nextValue();
+ sum += value;
+ sumOfSqr += value * value;
+ min = Math.min(min, value);
+ max = Math.max(max, value);
+ }
+ sums.increment(owningBucketOrdinal, sum);
+ sumOfSqrs.increment(owningBucketOrdinal, sumOfSqr);
+ mins.set(owningBucketOrdinal, min);
+ maxes.set(owningBucketOrdinal, max);
+ }
+
+ @Override
+ public boolean hasMetric(String name) {
+ try {
+ InternalExtendedStats.Metrics.resolve(name);
+ return true;
+ } catch (IllegalArgumentException iae) {
+ return false;
+ }
+ }
+
+ @Override
+ public double metric(String name, long owningBucketOrd) {
+ switch(InternalExtendedStats.Metrics.resolve(name)) {
+ case count: return valuesSource == null ? 0 : counts.get(owningBucketOrd);
+ case sum: return valuesSource == null ? 0 : sums.get(owningBucketOrd);
+ case min: return valuesSource == null ? Double.POSITIVE_INFINITY : mins.get(owningBucketOrd);
+ case max: return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd);
+ case avg: return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
+ case sum_of_squares: return valuesSource == null ? 0 : sumOfSqrs.get(owningBucketOrd);
+ case variance: return valuesSource == null ? Double.NaN : variance(owningBucketOrd);
+ case std_deviation: return valuesSource == null ? Double.NaN : Math.sqrt(variance(owningBucketOrd));
+ default:
+ throw new ElasticsearchIllegalArgumentException("Unknown value [" + name + "] in common stats aggregation");
+ }
+ }
+
+ private double variance(long owningBucketOrd) {
+ double sum = sums.get(owningBucketOrd);
+ long count = counts.get(owningBucketOrd);
+ return (sumOfSqrs.get(owningBucketOrd) - ((sum * sum) / count)) / count;
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ if (valuesSource == null) {
+ return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d);
+ }
+ assert owningBucketOrdinal < counts.size();
+ return new InternalExtendedStats(name, counts.get(owningBucketOrdinal), sums.get(owningBucketOrdinal), mins.get(owningBucketOrdinal),
+ maxes.get(owningBucketOrdinal), sumOfSqrs.get(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d);
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(counts, maxes, mins, sumOfSqrs, sums);
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory.LeafOnly<NumericValuesSource> {
+
+ public Factory(String name, ValuesSourceConfig<NumericValuesSource> valuesSourceConfig) {
+ super(name, InternalExtendedStats.TYPE.name(), valuesSourceConfig);
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new ExtendedStatsAggregator(name, 0, null, aggregationContext, parent);
+ }
+
+ @Override
+ protected Aggregator create(NumericValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ return new ExtendedStatsAggregator(name, expectedBucketsCount, valuesSource, aggregationContext, parent);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsBuilder.java
new file mode 100644
index 0000000..732cd96
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsBuilder.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics.stats.extended;
+
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder;
+
+/**
+ *
+ */
+public class ExtendedStatsBuilder extends ValuesSourceMetricsAggregationBuilder<ExtendedStatsBuilder> {
+
+ public ExtendedStatsBuilder(String name) {
+ super(name, InternalExtendedStats.TYPE.name());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java
new file mode 100644
index 0000000..607efc9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.stats.extended;
+
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregatorParser;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+/**
+ *
+ */
+public class ExtendedStatsParser extends ValuesSourceMetricsAggregatorParser<InternalExtendedStats> {
+
+ @Override
+ public String type() {
+ return InternalExtendedStats.TYPE.name();
+ }
+
+ @Override
+ protected AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<NumericValuesSource> config) {
+ return new ExtendedStatsAggregator.Factory(aggregationName, config);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java
new file mode 100644
index 0000000..c555243
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.stats.extended;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.stats.InternalStats;
+
+import java.io.IOException;
+
+/**
+*
+*/
+public class InternalExtendedStats extends InternalStats implements ExtendedStats {
+
+ public final static Type TYPE = new Type("extended_stats", "estats");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalExtendedStats readResult(StreamInput in) throws IOException {
+ InternalExtendedStats result = new InternalExtendedStats();
+ result.readFrom(in);
+ return result;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ enum Metrics {
+
+ count, sum, min, max, avg, sum_of_squares, variance, std_deviation;
+
+ public static Metrics resolve(String name) {
+ return Metrics.valueOf(name);
+ }
+ }
+
+ private double sumOfSqrs;
+
+ InternalExtendedStats() {} // for serialization
+
+ public InternalExtendedStats(String name, long count, double sum, double min, double max, double sumOfSqrs) {
+ super(name, count, sum, min, max);
+ this.sumOfSqrs = sumOfSqrs;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public double value(String name) {
+ if ("sum_of_squares".equals(name)) {
+ return sumOfSqrs;
+ }
+ if ("variance".equals(name)) {
+ return getVariance();
+ }
+ if ("std_deviation".equals(name)) {
+ return getStdDeviation();
+ }
+ return super.value(name);
+ }
+
+ @Override
+ public double getSumOfSquares() {
+ return sumOfSqrs;
+ }
+
+ @Override
+ public double getVariance() {
+ return (sumOfSqrs - ((sum * sum) / count)) / count;
+ }
+
+ @Override
+ public double getStdDeviation() {
+ return Math.sqrt(getVariance());
+ }
+
+ @Override
+ protected void mergeOtherStats(InternalStats to, InternalAggregation from) {
+ ((InternalExtendedStats) to).sumOfSqrs += ((InternalExtendedStats) from).sumOfSqrs;
+ }
+
+ @Override
+ public void readOtherStatsFrom(StreamInput in) throws IOException {
+ sumOfSqrs = in.readDouble();
+ }
+
+ @Override
+ protected void writeOtherStatsTo(StreamOutput out) throws IOException {
+ out.writeDouble(sumOfSqrs);
+ }
+
+ static class Fields {
+ public static final XContentBuilderString SUM_OF_SQRS = new XContentBuilderString("sum_of_squares");
+ public static final XContentBuilderString SUM_OF_SQRS_AS_STRING = new XContentBuilderString("sum_of_squares_as_string");
+ public static final XContentBuilderString VARIANCE = new XContentBuilderString("variance");
+ public static final XContentBuilderString VARIANCE_AS_STRING = new XContentBuilderString("variance_as_string");
+ public static final XContentBuilderString STD_DEVIATION = new XContentBuilderString("std_deviation");
+ public static final XContentBuilderString STD_DEVIATION_AS_STRING = new XContentBuilderString("std_deviation_as_string");
+ }
+
+ @Override
+ protected XContentBuilder otherStatsToXCotent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.SUM_OF_SQRS, count != 0 ? sumOfSqrs : null);
+ builder.field(Fields.VARIANCE, count != 0 ? getVariance() : null);
+ builder.field(Fields.STD_DEVIATION, count != 0 ? getStdDeviation() : null);
+ if (count != 0 && valueFormatter != null) {
+ builder.field(Fields.SUM_OF_SQRS_AS_STRING, valueFormatter.format(sumOfSqrs));
+ builder.field(Fields.VARIANCE_AS_STRING, valueFormatter.format(getVariance()));
+ builder.field(Fields.STD_DEVIATION_AS_STRING, valueFormatter.format(getStdDeviation()));
+ }
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java
new file mode 100644
index 0000000..f223a1c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.sum;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregation;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatterStreams;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+*
+*/
+public class InternalSum extends MetricsAggregation.SingleValue implements Sum {
+
+ public final static Type TYPE = new Type("sum");
+
+ public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalSum readResult(StreamInput in) throws IOException {
+ InternalSum result = new InternalSum();
+ result.readFrom(in);
+ return result;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ private double sum;
+
+ InternalSum() {} // for serialization
+
+ InternalSum(String name, double sum) {
+ super(name);
+ this.sum = sum;
+ }
+
+ @Override
+ public double value() {
+ return sum;
+ }
+
+ public double getValue() {
+ return sum;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public InternalSum reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ return (InternalSum) aggregations.get(0);
+ }
+ InternalSum reduced = null;
+ for (InternalAggregation aggregation : aggregations) {
+ if (reduced == null) {
+ reduced = (InternalSum) aggregation;
+ } else {
+ reduced.sum += ((InternalSum) aggregation).sum;
+ }
+ }
+ if (reduced != null) {
+ return reduced;
+ }
+ return (InternalSum) aggregations.get(0);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ valueFormatter = ValueFormatterStreams.readOptional(in);
+ sum = in.readDouble();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ ValueFormatterStreams.writeOptional(valueFormatter, out);
+ out.writeDouble(sum);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder.field(CommonFields.VALUE, sum);
+ if (valueFormatter != null) {
+ builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(sum));
+ }
+ builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/Sum.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/Sum.java
new file mode 100644
index 0000000..81a652b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/Sum.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.sum;
+
+import org.elasticsearch.search.aggregations.Aggregation;
+
+/**
+ *
+ */
+public interface Sum extends Aggregation {
+
+ double getValue();
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java
new file mode 100644
index 0000000..7219985
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.sum;
+
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.DoubleArray;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SumAggregator extends MetricsAggregator.SingleValue {
+
+ private final NumericValuesSource valuesSource;
+
+ private DoubleArray sums;
+
+ public SumAggregator(String name, long estimatedBucketsCount, NumericValuesSource valuesSource, AggregationContext context, Aggregator parent) {
+ super(name, estimatedBucketsCount, context, parent);
+ this.valuesSource = valuesSource;
+ if (valuesSource != null) {
+ final long initialSize = estimatedBucketsCount < 2 ? 1 : estimatedBucketsCount;
+ sums = BigArrays.newDoubleArray(initialSize, context.pageCacheRecycler(), true);
+ }
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return valuesSource != null;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ assert valuesSource != null : "collect must only be called after #shouldCollect returns true";
+
+ DoubleValues values = valuesSource.doubleValues();
+ if (values == null) {
+ return;
+ }
+
+ sums = BigArrays.grow(sums, owningBucketOrdinal + 1);
+
+ final int valuesCount = values.setDocument(doc);
+ double sum = 0;
+ for (int i = 0; i < valuesCount; i++) {
+ sum += values.nextValue();
+ }
+ sums.increment(owningBucketOrdinal, sum);
+ }
+
+ @Override
+ public double metric(long owningBucketOrd) {
+ return valuesSource == null ? 0 : sums.get(owningBucketOrd);
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ if (valuesSource == null) {
+ return new InternalSum(name, 0);
+ }
+ return new InternalSum(name, sums.get(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalSum(name, 0.0);
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory.LeafOnly<NumericValuesSource> {
+
+ public Factory(String name, ValuesSourceConfig<NumericValuesSource> valuesSourceConfig) {
+ super(name, InternalSum.TYPE.name(), valuesSourceConfig);
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new SumAggregator(name, 0, null, aggregationContext, parent);
+ }
+
+ @Override
+ protected Aggregator create(NumericValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ return new SumAggregator(name, expectedBucketsCount, valuesSource, aggregationContext, parent);
+ }
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(sums);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumBuilder.java
new file mode 100644
index 0000000..04beafe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumBuilder.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics.sum;
+
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder;
+
+/**
+ *
+ */
+public class SumBuilder extends ValuesSourceMetricsAggregationBuilder<SumBuilder> {
+
+ public SumBuilder(String name) {
+ super(name, InternalSum.TYPE.name());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java
new file mode 100644
index 0000000..6d468ca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.sum;
+
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregatorParser;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+/**
+ *
+ */
+public class SumParser extends ValuesSourceMetricsAggregatorParser<InternalSum> {
+
+ @Override
+ public String type() {
+ return InternalSum.TYPE.name();
+ }
+
+ @Override
+ protected AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<NumericValuesSource> config) {
+ return new SumAggregator.Factory(aggregationName, config);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java
new file mode 100644
index 0000000..057866d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.valuecount;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationStreams;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregation;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * An internal implementation of {@link ValueCount}.
+ */
+public class InternalValueCount extends MetricsAggregation implements ValueCount {
+
+ public static final Type TYPE = new Type("value_count", "vcount");
+
+ private static final AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
+ @Override
+ public InternalValueCount readResult(StreamInput in) throws IOException {
+ InternalValueCount count = new InternalValueCount();
+ count.readFrom(in);
+ return count;
+ }
+ };
+
+ public static void registerStreams() {
+ AggregationStreams.registerStream(STREAM, TYPE.stream());
+ }
+
+ private long value;
+
+ InternalValueCount() {} // for serialization
+
+ public InternalValueCount(String name, long value) {
+ super(name);
+ this.value = value;
+ }
+
+ @Override
+ public long getValue() {
+ return value;
+ }
+
+ @Override
+ public Type type() {
+ return TYPE;
+ }
+
+ @Override
+ public InternalAggregation reduce(ReduceContext reduceContext) {
+ List<InternalAggregation> aggregations = reduceContext.aggregations();
+ if (aggregations.size() == 1) {
+ return aggregations.get(0);
+ }
+ InternalValueCount reduced = null;
+ for (InternalAggregation aggregation : aggregations) {
+ if (reduced == null) {
+ reduced = (InternalValueCount) aggregation;
+ } else {
+ reduced.value += ((InternalValueCount) aggregation).value;
+ }
+ }
+ return reduced;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ value = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVLong(value);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder.startObject(name)
+ .field(CommonFields.VALUE, value)
+ .endObject();
+ }
+
+ @Override
+ public String toString() {
+ return "count[" + value + "]";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCount.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCount.java
new file mode 100644
index 0000000..1863c7b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCount.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.valuecount;
+
+import org.elasticsearch.search.aggregations.Aggregation;
+
+/**
+ * An get that holds the number of <strong>values</strong> that the current document set has for a specific
+ * field.
+ */
+public interface ValueCount extends Aggregation {
+
+ /**
+ * @return The count
+ */
+ long getValue();
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java
new file mode 100644
index 0000000..618bd8c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.valuecount;
+
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.bytes.BytesValuesSource;
+
+import java.io.IOException;
+
+/**
+ * A field data based aggregator that counts the number of values a specific field has within the aggregation context.
+ *
+ * This aggregator works in a multi-bucket mode, that is, when serves as a sub-aggregator, a single aggregator instance aggregates the
+ * counts for all buckets owned by the parent aggregator)
+ */
+public class ValueCountAggregator extends MetricsAggregator.SingleValue {
+
+ private final BytesValuesSource valuesSource;
+
+ // a count per bucket
+ LongArray counts;
+
+ public ValueCountAggregator(String name, long expectedBucketsCount, BytesValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent) {
+ super(name, 0, aggregationContext, parent);
+ this.valuesSource = valuesSource;
+ if (valuesSource != null) {
+ // expectedBucketsCount == 0 means it's a top level bucket
+ final long initialSize = expectedBucketsCount < 2 ? 1 : expectedBucketsCount;
+ counts = BigArrays.newLongArray(initialSize, context.pageCacheRecycler(), true);
+ }
+ }
+
+ @Override
+ public boolean shouldCollect() {
+ return valuesSource != null;
+ }
+
+ @Override
+ public void collect(int doc, long owningBucketOrdinal) throws IOException {
+ BytesValues values = valuesSource.bytesValues();
+ if (values == null) {
+ return;
+ }
+ counts = BigArrays.grow(counts, owningBucketOrdinal + 1);
+ counts.increment(owningBucketOrdinal, values.setDocument(doc));
+ }
+
+ @Override
+ public double metric(long owningBucketOrd) {
+ return valuesSource == null ? 0 : counts.get(owningBucketOrd);
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long owningBucketOrdinal) {
+ if (valuesSource == null) {
+ return new InternalValueCount(name, 0);
+ }
+ assert owningBucketOrdinal < counts.size();
+ return new InternalValueCount(name, counts.get(owningBucketOrdinal));
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return new InternalValueCount(name, 0l);
+ }
+
+ @Override
+ public void doRelease() {
+ Releasables.release(counts);
+ }
+
+ public static class Factory extends ValueSourceAggregatorFactory.LeafOnly<BytesValuesSource> {
+
+ public Factory(String name, ValuesSourceConfig<BytesValuesSource> valuesSourceBuilder) {
+ super(name, InternalValueCount.TYPE.name(), valuesSourceBuilder);
+ }
+
+ @Override
+ protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
+ return new ValueCountAggregator(name, 0, null, aggregationContext, parent);
+ }
+
+ @Override
+ protected Aggregator create(BytesValuesSource valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) {
+ return new ValueCountAggregator(name, expectedBucketsCount, valuesSource, aggregationContext, parent);
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountBuilder.java
new file mode 100644
index 0000000..c67cbc8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountBuilder.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics.valuecount;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregationBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ValueCountBuilder extends MetricsAggregationBuilder<ValueCountBuilder> {
+
+ private String field;
+
+ public ValueCountBuilder(String name) {
+ super(name, InternalValueCount.TYPE.name());
+ }
+
+ public ValueCountBuilder field(String field) {
+ this.field = field;
+ return this;
+ }
+
+ @Override
+ protected void internalXContent(XContentBuilder builder, Params params) throws IOException {
+ if (field != null) {
+ builder.field("field", field);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java
new file mode 100644
index 0000000..b0006ac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics.valuecount;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.support.FieldContext;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.bytes.BytesValuesSource;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ValueCountParser implements Aggregator.Parser {
+
+ @Override
+ public String type() {
+ return InternalValueCount.TYPE.name();
+ }
+
+ @Override
+ public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
+
+ ValuesSourceConfig<BytesValuesSource> config = new ValuesSourceConfig<BytesValuesSource>(BytesValuesSource.class);
+
+ String field = null;
+
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else {
+ throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
+ }
+ } else {
+ throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
+ }
+ }
+
+ if (field == null) {
+ return new ValueCountAggregator.Factory(aggregationName, config);
+ }
+
+ FieldMapper<?> mapper = context.smartNameFieldMapper(field);
+ if (mapper == null) {
+ config.unmapped(true);
+ return new ValueCountAggregator.Factory(aggregationName, config);
+ }
+
+ IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
+ config.fieldContext(new FieldContext(field, indexFieldData));
+ return new ValueCountAggregator.Factory(aggregationName, config);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java b/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java
new file mode 100644
index 0000000..d5da18a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support;
+
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.lucene.ReaderContextAware;
+import org.elasticsearch.common.lucene.ScorerAware;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.support.bytes.BytesValuesSource;
+import org.elasticsearch.search.aggregations.support.geopoints.GeoPointValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ *
+ */
+@SuppressWarnings({"unchecked", "ForLoopReplaceableByForEach"})
+public class AggregationContext implements ReaderContextAware, ScorerAware {
+
+ private final SearchContext searchContext;
+
+ private ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource>[] perDepthFieldDataSources = new ObjectObjectOpenHashMap[4];
+ private List<ReaderContextAware> readerAwares = new ArrayList<ReaderContextAware>();
+ private List<ScorerAware> scorerAwares = new ArrayList<ScorerAware>();
+
+ private AtomicReaderContext reader;
+ private Scorer scorer;
+
+ public AggregationContext(SearchContext searchContext) {
+ this.searchContext = searchContext;
+ }
+
+ public SearchContext searchContext() {
+ return searchContext;
+ }
+
+ public CacheRecycler cacheRecycler() {
+ return searchContext.cacheRecycler();
+ }
+
+ public PageCacheRecycler pageCacheRecycler() {
+ return searchContext.pageCacheRecycler();
+ }
+
+ public AtomicReaderContext currentReader() {
+ return reader;
+ }
+
+ public Scorer currentScorer() {
+ return scorer;
+ }
+
+ public void setNextReader(AtomicReaderContext reader) {
+ this.reader = reader;
+ for (ReaderContextAware aware : readerAwares) {
+ aware.setNextReader(reader);
+ }
+ }
+
+ public void setScorer(Scorer scorer) {
+ this.scorer = scorer;
+ for (ScorerAware scorerAware : scorerAwares) {
+ scorerAware.setScorer(scorer);
+ }
+ }
+
+ /** Get a value source given its configuration and the depth of the aggregator in the aggregation tree. */
+ public <VS extends ValuesSource> VS valuesSource(ValuesSourceConfig<VS> config, int depth) {
+ assert config.valid() : "value source config is invalid - must have either a field context or a script or marked as unmapped";
+ assert !config.unmapped : "value source should not be created for unmapped fields";
+
+ if (perDepthFieldDataSources.length <= depth) {
+ perDepthFieldDataSources = Arrays.copyOf(perDepthFieldDataSources, ArrayUtil.oversize(1 + depth, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
+ }
+ if (perDepthFieldDataSources[depth] == null) {
+ perDepthFieldDataSources[depth] = new ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource>();
+ }
+ final ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource> fieldDataSources = perDepthFieldDataSources[depth];
+
+ if (config.fieldContext == null) {
+ if (NumericValuesSource.class.isAssignableFrom(config.valueSourceType)) {
+ return (VS) numericScript(config);
+ }
+ if (BytesValuesSource.class.isAssignableFrom(config.valueSourceType)) {
+ return (VS) bytesScript(config);
+ }
+ throw new AggregationExecutionException("value source of type [" + config.valueSourceType.getSimpleName() + "] is not supported by scripts");
+ }
+
+ if (NumericValuesSource.class.isAssignableFrom(config.valueSourceType)) {
+ return (VS) numericField(fieldDataSources, config);
+ }
+ if (GeoPointValuesSource.class.isAssignableFrom(config.valueSourceType)) {
+ return (VS) geoPointField(fieldDataSources, config);
+ }
+ // falling back to bytes values
+ return (VS) bytesField(fieldDataSources, config);
+ }
+
+ private NumericValuesSource numericScript(ValuesSourceConfig<?> config) {
+ setScorerIfNeeded(config.script);
+ setReaderIfNeeded(config.script);
+ scorerAwares.add(config.script);
+ readerAwares.add(config.script);
+ FieldDataSource.Numeric source = new FieldDataSource.Numeric.Script(config.script, config.scriptValueType);
+ if (config.ensureUnique || config.ensureSorted) {
+ source = new FieldDataSource.Numeric.SortedAndUnique(source);
+ readerAwares.add((ReaderContextAware) source);
+ }
+ return new NumericValuesSource(source, config.formatter(), config.parser());
+ }
+
+ private NumericValuesSource numericField(ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {
+ final ConfigCacheKey cacheKey = new ConfigCacheKey(config);
+ FieldDataSource.Numeric dataSource = (FieldDataSource.Numeric) fieldDataSources.get(cacheKey);
+ if (dataSource == null) {
+ FieldDataSource.MetaData metaData = FieldDataSource.MetaData.load(config.fieldContext.indexFieldData(), searchContext);
+ dataSource = new FieldDataSource.Numeric.FieldData((IndexNumericFieldData<?>) config.fieldContext.indexFieldData(), metaData);
+ setReaderIfNeeded((ReaderContextAware) dataSource);
+ readerAwares.add((ReaderContextAware) dataSource);
+ fieldDataSources.put(cacheKey, dataSource);
+ }
+ if (config.script != null) {
+ setScorerIfNeeded(config.script);
+ setReaderIfNeeded(config.script);
+ scorerAwares.add(config.script);
+ readerAwares.add(config.script);
+ dataSource = new FieldDataSource.Numeric.WithScript(dataSource, config.script);
+
+ if (config.ensureUnique || config.ensureSorted) {
+ dataSource = new FieldDataSource.Numeric.SortedAndUnique(dataSource);
+ readerAwares.add((ReaderContextAware) dataSource);
+ }
+ }
+ if (config.needsHashes) {
+ dataSource.setNeedsHashes(true);
+ }
+ return new NumericValuesSource(dataSource, config.formatter(), config.parser());
+ }
+
+ private ValuesSource bytesField(ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {
+ final ConfigCacheKey cacheKey = new ConfigCacheKey(config);
+ FieldDataSource dataSource = fieldDataSources.get(cacheKey);
+ if (dataSource == null) {
+ final IndexFieldData<?> indexFieldData = config.fieldContext.indexFieldData();
+ FieldDataSource.MetaData metaData = FieldDataSource.MetaData.load(config.fieldContext.indexFieldData(), searchContext);
+ if (indexFieldData instanceof IndexFieldData.WithOrdinals) {
+ dataSource = new FieldDataSource.Bytes.WithOrdinals.FieldData((IndexFieldData.WithOrdinals) indexFieldData, metaData);
+ } else {
+ dataSource = new FieldDataSource.Bytes.FieldData(indexFieldData, metaData);
+ }
+ setReaderIfNeeded((ReaderContextAware) dataSource);
+ readerAwares.add((ReaderContextAware) dataSource);
+ fieldDataSources.put(cacheKey, dataSource);
+ }
+ if (config.script != null) {
+ setScorerIfNeeded(config.script);
+ setReaderIfNeeded(config.script);
+ scorerAwares.add(config.script);
+ readerAwares.add(config.script);
+ dataSource = new FieldDataSource.WithScript(dataSource, config.script);
+ }
+ // Even in case we wrap field data, we might still need to wrap for sorting, because the wrapped field data might be
+ // eg. a numeric field data that doesn't sort according to the byte order. However field data values are unique so no
+ // need to wrap for uniqueness
+ if ((config.ensureUnique && !dataSource.metaData().uniqueness().unique()) || config.ensureSorted) {
+ dataSource = new FieldDataSource.Bytes.SortedAndUnique(dataSource);
+ readerAwares.add((ReaderContextAware) dataSource);
+ }
+
+ if (config.needsHashes) { // the data source needs hash if at least one consumer needs hashes
+ dataSource.setNeedsHashes(true);
+ }
+ if (dataSource instanceof FieldDataSource.Bytes.WithOrdinals) {
+ return new BytesValuesSource.WithOrdinals((FieldDataSource.Bytes.WithOrdinals) dataSource);
+ } else {
+ return new BytesValuesSource(dataSource);
+ }
+ }
+
+ private BytesValuesSource bytesScript(ValuesSourceConfig<?> config) {
+ setScorerIfNeeded(config.script);
+ setReaderIfNeeded(config.script);
+ scorerAwares.add(config.script);
+ readerAwares.add(config.script);
+ FieldDataSource.Bytes source = new FieldDataSource.Bytes.Script(config.script);
+ if (config.ensureUnique || config.ensureSorted) {
+ source = new FieldDataSource.Bytes.SortedAndUnique(source);
+ readerAwares.add((ReaderContextAware) source);
+ }
+ return new BytesValuesSource(source);
+ }
+
+ private GeoPointValuesSource geoPointField(ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {
+ final ConfigCacheKey cacheKey = new ConfigCacheKey(config);
+ FieldDataSource.GeoPoint dataSource = (FieldDataSource.GeoPoint) fieldDataSources.get(cacheKey);
+ if (dataSource == null) {
+ FieldDataSource.MetaData metaData = FieldDataSource.MetaData.load(config.fieldContext.indexFieldData(), searchContext);
+ dataSource = new FieldDataSource.GeoPoint((IndexGeoPointFieldData<?>) config.fieldContext.indexFieldData(), metaData);
+ setReaderIfNeeded(dataSource);
+ readerAwares.add(dataSource);
+ fieldDataSources.put(cacheKey, dataSource);
+ }
+ if (config.needsHashes) {
+ dataSource.setNeedsHashes(true);
+ }
+ return new GeoPointValuesSource(dataSource);
+ }
+
+ public void registerReaderContextAware(ReaderContextAware readerContextAware) {
+ setReaderIfNeeded(readerContextAware);
+ readerAwares.add(readerContextAware);
+ }
+
+ public void registerScorerAware(ScorerAware scorerAware) {
+ setScorerIfNeeded(scorerAware);
+ scorerAwares.add(scorerAware);
+ }
+
+ private void setReaderIfNeeded(ReaderContextAware readerAware) {
+ if (reader != null) {
+ readerAware.setNextReader(reader);
+ }
+ }
+
+ private void setScorerIfNeeded(ScorerAware scorerAware) {
+ if (scorer != null) {
+ scorerAware.setScorer(scorer);
+ }
+ }
+
+ private static class ConfigCacheKey {
+
+ private final String field;
+ private final Class<? extends ValuesSource> valueSourceType;
+
+ private ConfigCacheKey(ValuesSourceConfig config) {
+ this.field = config.fieldContext.field();
+ this.valueSourceType = config.valueSourceType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ ConfigCacheKey that = (ConfigCacheKey) o;
+
+ if (!field.equals(that.field)) return false;
+ if (!valueSourceType.equals(that.valueSourceType)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = field.hashCode();
+ result = 31 * result + valueSourceType.hashCode();
+ return result;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java b/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java
new file mode 100644
index 0000000..9a84f4a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support;
+
+import org.elasticsearch.index.fielddata.IndexFieldData;
+
+/**
+ * Used by all field data based aggregators. This determine the context of the field data the aggregators are operating
+ * in. I holds both the field names and the index field datas that are associated with them.
+ */
+public class FieldContext {
+
+ private final String field;
+ private final IndexFieldData<?> indexFieldData;
+
+ /**
+ * Constructs a field data context for the given field and its index field data
+ *
+ * @param field The name of the field
+ * @param indexFieldData The index field data of the field
+ */
+ public FieldContext(String field, IndexFieldData<?> indexFieldData) {
+ this.field = field;
+ this.indexFieldData = indexFieldData;
+ }
+
+ public String field() {
+ return field;
+ }
+
+ /**
+ * @return The index field datas in this context
+ */
+ public IndexFieldData<?> indexFieldData() {
+ return indexFieldData;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/FieldDataSource.java b/src/main/java/org/elasticsearch/search/aggregations/support/FieldDataSource.java
new file mode 100644
index 0000000..5d1152a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/FieldDataSource.java
@@ -0,0 +1,813 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support;
+
+import com.google.common.primitives.Longs;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefHash;
+import org.apache.lucene.util.InPlaceMergeSorter;
+import org.elasticsearch.common.lucene.ReaderContextAware;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.index.fielddata.*;
+import org.elasticsearch.index.fielddata.AtomicFieldData.Order;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.aggregations.support.FieldDataSource.Bytes.SortedAndUnique.SortedUniqueBytesValues;
+import org.elasticsearch.search.aggregations.support.bytes.ScriptBytesValues;
+import org.elasticsearch.search.aggregations.support.numeric.ScriptDoubleValues;
+import org.elasticsearch.search.aggregations.support.numeric.ScriptLongValues;
+import org.elasticsearch.search.internal.SearchContext;
+
+public abstract class FieldDataSource {
+
+ public static class MetaData {
+
+ public static final MetaData UNKNOWN = new MetaData();
+
+ public enum Uniqueness {
+ UNIQUE,
+ NOT_UNIQUE,
+ UNKNOWN;
+
+ public boolean unique() {
+ return this == UNIQUE;
+ }
+ }
+
+ private long maxAtomicUniqueValuesCount = -1;
+ private boolean multiValued = true;
+ private Uniqueness uniqueness = Uniqueness.UNKNOWN;
+
+ private MetaData() {}
+
+ private MetaData(MetaData other) {
+ this.maxAtomicUniqueValuesCount = other.maxAtomicUniqueValuesCount;
+ this.multiValued = other.multiValued;
+ this.uniqueness = other.uniqueness;
+ }
+
+ private MetaData(long maxAtomicUniqueValuesCount, boolean multiValued, Uniqueness uniqueness) {
+ this.maxAtomicUniqueValuesCount = maxAtomicUniqueValuesCount;
+ this.multiValued = multiValued;
+ this.uniqueness = uniqueness;
+ }
+
+ public long maxAtomicUniqueValuesCount() {
+ return maxAtomicUniqueValuesCount;
+ }
+
+ public boolean multiValued() {
+ return multiValued;
+ }
+
+ public Uniqueness uniqueness() {
+ return uniqueness;
+ }
+
+ public static MetaData load(IndexFieldData indexFieldData, SearchContext context) {
+ MetaData metaData = new MetaData();
+ metaData.uniqueness = Uniqueness.UNIQUE;
+ for (AtomicReaderContext readerContext : context.searcher().getTopReaderContext().leaves()) {
+ AtomicFieldData fieldData = indexFieldData.load(readerContext);
+ metaData.multiValued |= fieldData.isMultiValued();
+ metaData.maxAtomicUniqueValuesCount = Math.max(metaData.maxAtomicUniqueValuesCount, fieldData.getNumberUniqueValues());
+ }
+ return metaData;
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public static Builder builder(MetaData other) {
+ return new Builder(other);
+ }
+
+ public static class Builder {
+
+ private final MetaData metaData;
+
+ private Builder() {
+ metaData = new MetaData();
+ }
+
+ private Builder(MetaData metaData) {
+ this.metaData = new MetaData(metaData);
+ }
+
+ public Builder maxAtomicUniqueValuesCount(long maxAtomicUniqueValuesCount) {
+ metaData.maxAtomicUniqueValuesCount = maxAtomicUniqueValuesCount;
+ return this;
+ }
+
+ public Builder multiValued(boolean multiValued) {
+ metaData.multiValued = multiValued;
+ return this;
+ }
+
+ public Builder uniqueness(Uniqueness uniqueness) {
+ metaData.uniqueness = uniqueness;
+ return this;
+ }
+
+ public MetaData build() {
+ return metaData;
+ }
+ }
+
+ }
+
+ /**
+ * Get the current {@link BytesValues}.
+ */
+ public abstract BytesValues bytesValues();
+
+ /**
+ * Ask the underlying data source to provide pre-computed hashes, optional operation.
+ */
+ public void setNeedsHashes(boolean needsHashes) {}
+
+ public abstract MetaData metaData();
+
+ public static abstract class Bytes extends FieldDataSource {
+
+ public static abstract class WithOrdinals extends Bytes {
+
+ public abstract BytesValues.WithOrdinals bytesValues();
+
+ public static class FieldData extends WithOrdinals implements ReaderContextAware {
+
+ protected boolean needsHashes;
+ protected final IndexFieldData.WithOrdinals<?> indexFieldData;
+ protected final MetaData metaData;
+ protected AtomicFieldData.WithOrdinals<?> atomicFieldData;
+ private BytesValues.WithOrdinals bytesValues;
+
+ public FieldData(IndexFieldData.WithOrdinals<?> indexFieldData, MetaData metaData) {
+ this.indexFieldData = indexFieldData;
+ this.metaData = metaData;
+ needsHashes = false;
+ }
+
+ @Override
+ public MetaData metaData() {
+ return metaData;
+ }
+
+ public final void setNeedsHashes(boolean needsHashes) {
+ this.needsHashes = needsHashes;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ atomicFieldData = indexFieldData.load(reader);
+ if (bytesValues != null) {
+ bytesValues = atomicFieldData.getBytesValues(needsHashes);
+ }
+ }
+
+ @Override
+ public BytesValues.WithOrdinals bytesValues() {
+ if (bytesValues == null) {
+ bytesValues = atomicFieldData.getBytesValues(needsHashes);
+ }
+ return bytesValues;
+ }
+
+ }
+
+ }
+
+ public static class FieldData extends Bytes implements ReaderContextAware {
+
+ protected boolean needsHashes;
+ protected final IndexFieldData<?> indexFieldData;
+ protected final MetaData metaData;
+ protected AtomicFieldData<?> atomicFieldData;
+ private BytesValues bytesValues;
+
+ public FieldData(IndexFieldData<?> indexFieldData, MetaData metaData) {
+ this.indexFieldData = indexFieldData;
+ this.metaData = metaData;
+ needsHashes = false;
+ }
+
+ @Override
+ public MetaData metaData() {
+ return metaData;
+ }
+
+ public final void setNeedsHashes(boolean needsHashes) {
+ this.needsHashes = needsHashes;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ atomicFieldData = indexFieldData.load(reader);
+ if (bytesValues != null) {
+ bytesValues = atomicFieldData.getBytesValues(needsHashes);
+ }
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
+ if (bytesValues == null) {
+ bytesValues = atomicFieldData.getBytesValues(needsHashes);
+ }
+ return bytesValues;
+ }
+ }
+
+ public static class Script extends Bytes {
+
+ private final ScriptBytesValues values;
+
+ public Script(SearchScript script) {
+ values = new ScriptBytesValues(script);
+ }
+
+ @Override
+ public MetaData metaData() {
+ return MetaData.UNKNOWN;
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
+ return values;
+ }
+ }
+
+ public static class SortedAndUnique extends Bytes implements ReaderContextAware {
+
+ private final FieldDataSource delegate;
+ private final MetaData metaData;
+ private BytesValues bytesValues;
+
+ public SortedAndUnique(FieldDataSource delegate) {
+ this.delegate = delegate;
+ this.metaData = MetaData.builder(delegate.metaData()).uniqueness(MetaData.Uniqueness.UNIQUE).build();
+ }
+
+ @Override
+ public MetaData metaData() {
+ return metaData;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ bytesValues = null; // order may change per-segment -> reset
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
+ if (bytesValues == null) {
+ bytesValues = delegate.bytesValues();
+ if (bytesValues.isMultiValued() &&
+ (!delegate.metaData().uniqueness.unique() || bytesValues.getOrder() != Order.BYTES)) {
+ bytesValues = new SortedUniqueBytesValues(bytesValues);
+ }
+ }
+ return bytesValues;
+ }
+
+ static class SortedUniqueBytesValues extends BytesValues {
+
+ final BytesValues delegate;
+ int[] sortedIds;
+ final BytesRefHash bytes;
+ int numUniqueValues;
+ int pos = Integer.MAX_VALUE;
+
+ public SortedUniqueBytesValues(BytesValues delegate) {
+ super(delegate.isMultiValued());
+ this.delegate = delegate;
+ bytes = new BytesRefHash();
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ final int numValues = delegate.setDocument(docId);
+ if (numValues == 0) {
+ sortedIds = null;
+ return 0;
+ }
+ bytes.clear();
+ bytes.reinit();
+ for (int i = 0; i < numValues; ++i) {
+ final BytesRef next = delegate.nextValue();
+ final int hash = delegate.currentValueHash();
+ assert hash == next.hashCode();
+ bytes.add(next, hash);
+ }
+ numUniqueValues = bytes.size();
+ sortedIds = bytes.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
+ pos = 0;
+ return numUniqueValues;
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ bytes.get(sortedIds[pos++], scratch);
+ return scratch;
+ }
+
+ @Override
+ public Order getOrder() {
+ return Order.BYTES;
+ }
+
+ }
+
+ }
+
+ }
+
+ public static abstract class Numeric extends FieldDataSource {
+
+ /** Whether the underlying data is floating-point or not. */
+ public abstract boolean isFloatingPoint();
+
+ /** Get the current {@link LongValues}. */
+ public abstract LongValues longValues();
+
+ /** Get the current {@link DoubleValues}. */
+ public abstract DoubleValues doubleValues();
+
+ public static class WithScript extends Numeric {
+
+ private final LongValues longValues;
+ private final DoubleValues doubleValues;
+ private final FieldDataSource.WithScript.BytesValues bytesValues;
+
+ public WithScript(Numeric delegate, SearchScript script) {
+ this.longValues = new LongValues(delegate, script);
+ this.doubleValues = new DoubleValues(delegate, script);
+ this.bytesValues = new FieldDataSource.WithScript.BytesValues(delegate, script);
+ }
+
+ @Override
+ public boolean isFloatingPoint() {
+ return true; // even if the underlying source produces longs, scripts can change them to doubles
+ }
+
+ @Override
+ public BytesValues bytesValues() {
+ return bytesValues;
+ }
+
+ @Override
+ public LongValues longValues() {
+ return longValues;
+ }
+
+ @Override
+ public DoubleValues doubleValues() {
+ return doubleValues;
+ }
+
+ @Override
+ public MetaData metaData() {
+ return MetaData.UNKNOWN;
+ }
+
+ static class LongValues extends org.elasticsearch.index.fielddata.LongValues {
+
+ private final Numeric source;
+ private final SearchScript script;
+
+ public LongValues(Numeric source, SearchScript script) {
+ super(true);
+ this.source = source;
+ this.script = script;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return source.longValues().setDocument(docId);
+ }
+
+ @Override
+ public long nextValue() {
+ script.setNextVar("_value", source.longValues().nextValue());
+ return script.runAsLong();
+ }
+ }
+
+ static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
+
+ private final Numeric source;
+ private final SearchScript script;
+
+ public DoubleValues(Numeric source, SearchScript script) {
+ super(true);
+ this.source = source;
+ this.script = script;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return source.doubleValues().setDocument(docId);
+ }
+
+ @Override
+ public double nextValue() {
+ script.setNextVar("_value", source.doubleValues().nextValue());
+ return script.runAsDouble();
+ }
+ }
+ }
+
+ public static class FieldData extends Numeric implements ReaderContextAware {
+
+ protected boolean needsHashes;
+ protected final IndexNumericFieldData<?> indexFieldData;
+ protected final MetaData metaData;
+ protected AtomicNumericFieldData atomicFieldData;
+ private BytesValues bytesValues;
+ private LongValues longValues;
+ private DoubleValues doubleValues;
+
+ public FieldData(IndexNumericFieldData<?> indexFieldData, MetaData metaData) {
+ this.indexFieldData = indexFieldData;
+ this.metaData = metaData;
+ needsHashes = false;
+ }
+
+ @Override
+ public MetaData metaData() {
+ return metaData;
+ }
+
+ @Override
+ public boolean isFloatingPoint() {
+ return indexFieldData.getNumericType().isFloatingPoint();
+ }
+
+ @Override
+ public final void setNeedsHashes(boolean needsHashes) {
+ this.needsHashes = needsHashes;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ atomicFieldData = indexFieldData.load(reader);
+ if (bytesValues != null) {
+ bytesValues = atomicFieldData.getBytesValues(needsHashes);
+ }
+ if (longValues != null) {
+ longValues = atomicFieldData.getLongValues();
+ }
+ if (doubleValues != null) {
+ doubleValues = atomicFieldData.getDoubleValues();
+ }
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
+ if (bytesValues == null) {
+ bytesValues = atomicFieldData.getBytesValues(needsHashes);
+ }
+ return bytesValues;
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.LongValues longValues() {
+ if (longValues == null) {
+ longValues = atomicFieldData.getLongValues();
+ }
+ assert longValues.getOrder() == Order.NUMERIC;
+ return longValues;
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.DoubleValues doubleValues() {
+ if (doubleValues == null) {
+ doubleValues = atomicFieldData.getDoubleValues();
+ }
+ assert doubleValues.getOrder() == Order.NUMERIC;
+ return doubleValues;
+ }
+ }
+
+ public static class Script extends Numeric {
+ private final ScriptValueType scriptValueType;
+
+ private final ScriptDoubleValues doubleValues;
+ private final ScriptLongValues longValues;
+ private final ScriptBytesValues bytesValues;
+
+ public Script(SearchScript script, ScriptValueType scriptValueType) {
+ this.scriptValueType = scriptValueType;
+ longValues = new ScriptLongValues(script);
+ doubleValues = new ScriptDoubleValues(script);
+ bytesValues = new ScriptBytesValues(script);
+ }
+
+ @Override
+ public MetaData metaData() {
+ return MetaData.UNKNOWN;
+ }
+
+ @Override
+ public boolean isFloatingPoint() {
+ return scriptValueType != null ? scriptValueType.isFloatingPoint() : true;
+ }
+
+ @Override
+ public LongValues longValues() {
+ return longValues;
+ }
+
+ @Override
+ public DoubleValues doubleValues() {
+ return doubleValues;
+ }
+
+ @Override
+ public BytesValues bytesValues() {
+ return bytesValues;
+ }
+
+ }
+
+ public static class SortedAndUnique extends Numeric implements ReaderContextAware {
+
+ private final Numeric delegate;
+ private final MetaData metaData;
+ private LongValues longValues;
+ private DoubleValues doubleValues;
+ private BytesValues bytesValues;
+
+ public SortedAndUnique(Numeric delegate) {
+ this.delegate = delegate;
+ this.metaData = MetaData.builder(delegate.metaData()).uniqueness(MetaData.Uniqueness.UNIQUE).build();
+ }
+
+ @Override
+ public MetaData metaData() {
+ return metaData;
+ }
+
+ @Override
+ public boolean isFloatingPoint() {
+ return delegate.isFloatingPoint();
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ longValues = null; // order may change per-segment -> reset
+ doubleValues = null;
+ bytesValues = null;
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.LongValues longValues() {
+ if (longValues == null) {
+ longValues = delegate.longValues();
+ if (longValues.isMultiValued() &&
+ (!delegate.metaData().uniqueness.unique() || longValues.getOrder() != Order.NUMERIC)) {
+ longValues = new SortedUniqueLongValues(longValues);
+ }
+ }
+ return longValues;
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.DoubleValues doubleValues() {
+ if (doubleValues == null) {
+ doubleValues = delegate.doubleValues();
+ if (doubleValues.isMultiValued() &&
+ (!delegate.metaData().uniqueness.unique() || doubleValues.getOrder() != Order.NUMERIC)) {
+ doubleValues = new SortedUniqueDoubleValues(doubleValues);
+ }
+ }
+ return doubleValues;
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
+ if (bytesValues == null) {
+ bytesValues = delegate.bytesValues();
+ if (bytesValues.isMultiValued() &&
+ (!delegate.metaData().uniqueness.unique() || bytesValues.getOrder() != Order.BYTES)) {
+ bytesValues = new SortedUniqueBytesValues(bytesValues);
+ }
+ }
+ return bytesValues;
+ }
+
+ private static class SortedUniqueLongValues extends FilterLongValues {
+
+ int numUniqueValues;
+ long[] array = new long[2];
+ int pos = Integer.MAX_VALUE;
+
+ final InPlaceMergeSorter sorter = new InPlaceMergeSorter() {
+ @Override
+ protected void swap(int i, int j) {
+ final long tmp = array[i];
+ array[i] = array[j];
+ array[j] = tmp;
+ }
+ @Override
+ protected int compare(int i, int j) {
+ final long l1 = array[i];
+ final long l2 = array[j];
+ return Longs.compare(l1, l2);
+ }
+ };
+
+ protected SortedUniqueLongValues(LongValues delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ final int numValues = super.setDocument(docId);
+ array = ArrayUtil.grow(array, numValues);
+ for (int i = 0; i < numValues; ++i) {
+ array[i] = super.nextValue();
+ }
+ pos = 0;
+ return numUniqueValues = CollectionUtils.sortAndDedup(array, numValues);
+ }
+
+ @Override
+ public long nextValue() {
+ assert pos < numUniqueValues;
+ return array[pos++];
+ }
+
+ @Override
+ public Order getOrder() {
+ return Order.NUMERIC;
+ }
+
+ }
+
+ private static class SortedUniqueDoubleValues extends FilterDoubleValues {
+
+ int numUniqueValues;
+ double[] array = new double[2];
+ int pos = Integer.MAX_VALUE;
+
+ final InPlaceMergeSorter sorter = new InPlaceMergeSorter() {
+ @Override
+ protected void swap(int i, int j) {
+ final double tmp = array[i];
+ array[i] = array[j];
+ array[j] = tmp;
+ }
+ @Override
+ protected int compare(int i, int j) {
+ return Double.compare(array[i], array[j]);
+ }
+ };
+
+ SortedUniqueDoubleValues(DoubleValues delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ final int numValues = super.setDocument(docId);
+ array = ArrayUtil.grow(array, numValues);
+ for (int i = 0; i < numValues; ++i) {
+ array[i] = super.nextValue();
+ }
+ pos = 0;
+ return numUniqueValues = CollectionUtils.sortAndDedup(array, numValues);
+ }
+
+ @Override
+ public double nextValue() {
+ assert pos < numUniqueValues;
+ return array[pos++];
+ }
+
+ @Override
+ public Order getOrder() {
+ return Order.NUMERIC;
+ }
+
+ }
+
+ }
+
+ }
+
+ // No need to implement ReaderContextAware here, the delegate already takes care of updating data structures
+ public static class WithScript extends Bytes {
+
+ private final BytesValues bytesValues;
+
+ public WithScript(FieldDataSource delegate, SearchScript script) {
+ this.bytesValues = new BytesValues(delegate, script);
+ }
+
+ @Override
+ public MetaData metaData() {
+ return MetaData.UNKNOWN;
+ }
+
+ @Override
+ public BytesValues bytesValues() {
+ return bytesValues;
+ }
+
+ static class BytesValues extends org.elasticsearch.index.fielddata.BytesValues {
+
+ private final FieldDataSource source;
+ private final SearchScript script;
+
+ public BytesValues(FieldDataSource source, SearchScript script) {
+ super(true);
+ this.source = source;
+ this.script = script;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ return source.bytesValues().setDocument(docId);
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ BytesRef value = source.bytesValues().nextValue();
+ script.setNextVar("_value", value.utf8ToString());
+ scratch.copyChars(script.run().toString());
+ return scratch;
+ }
+ }
+ }
+
+ public static class GeoPoint extends FieldDataSource implements ReaderContextAware {
+
+ protected boolean needsHashes;
+ protected final IndexGeoPointFieldData<?> indexFieldData;
+ private final MetaData metaData;
+ protected AtomicGeoPointFieldData<?> atomicFieldData;
+ private BytesValues bytesValues;
+ private GeoPointValues geoPointValues;
+
+ public GeoPoint(IndexGeoPointFieldData<?> indexFieldData, MetaData metaData) {
+ this.indexFieldData = indexFieldData;
+ this.metaData = metaData;
+ needsHashes = false;
+ }
+
+ @Override
+ public MetaData metaData() {
+ return metaData;
+ }
+
+ @Override
+ public final void setNeedsHashes(boolean needsHashes) {
+ this.needsHashes = needsHashes;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ atomicFieldData = indexFieldData.load(reader);
+ if (bytesValues != null) {
+ bytesValues = atomicFieldData.getBytesValues(needsHashes);
+ }
+ if (geoPointValues != null) {
+ geoPointValues = atomicFieldData.getGeoPointValues();
+ }
+ }
+
+ @Override
+ public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
+ if (bytesValues == null) {
+ bytesValues = atomicFieldData.getBytesValues(needsHashes);
+ }
+ return bytesValues;
+ }
+
+ public org.elasticsearch.index.fielddata.GeoPointValues geoPointValues() {
+ if (geoPointValues == null) {
+ geoPointValues = atomicFieldData.getGeoPointValues();
+ }
+ return geoPointValues;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ScriptValueType.java b/src/main/java/org/elasticsearch/search/aggregations/support/ScriptValueType.java
new file mode 100644
index 0000000..9ec4da8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/ScriptValueType.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.support;
+
+import org.elasticsearch.search.aggregations.support.bytes.BytesValuesSource;
+import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
+
+/**
+ *
+ */
+public enum ScriptValueType {
+
+ STRING(BytesValuesSource.class),
+ LONG(NumericValuesSource.class),
+ DOUBLE(NumericValuesSource.class);
+
+ final Class<? extends ValuesSource> valuesSourceType;
+
+ private ScriptValueType(Class<? extends ValuesSource> valuesSourceType) {
+ this.valuesSourceType = valuesSourceType;
+ }
+
+ public Class<? extends ValuesSource> getValuesSourceType() {
+ return valuesSourceType;
+ }
+
+ public boolean isNumeric() {
+ return this != STRING;
+ }
+
+ public boolean isFloatingPoint() {
+ return this == DOUBLE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ScriptValues.java b/src/main/java/org/elasticsearch/search/aggregations/support/ScriptValues.java
new file mode 100644
index 0000000..6d67aa2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/ScriptValues.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support;
+
+import org.elasticsearch.script.SearchScript;
+
+/**
+ *
+ */
+public interface ScriptValues {
+
+ SearchScript script();
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ValueSourceAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/support/ValueSourceAggregatorFactory.java
new file mode 100644
index 0000000..4b530e4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/ValueSourceAggregatorFactory.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support;
+
+import org.elasticsearch.search.aggregations.*;
+
+/**
+ *
+ */
+public abstract class ValueSourceAggregatorFactory<VS extends ValuesSource> extends AggregatorFactory implements ValuesSourceBased {
+
+ public static abstract class LeafOnly<VS extends ValuesSource> extends ValueSourceAggregatorFactory<VS> {
+
+ protected LeafOnly(String name, String type, ValuesSourceConfig<VS> valuesSourceConfig) {
+ super(name, type, valuesSourceConfig);
+ }
+
+ @Override
+ public AggregatorFactory subFactories(AggregatorFactories subFactories) {
+ throw new AggregationInitializationException("Aggregator [" + name + "] of type [" + type + "] cannot accept sub-aggregations");
+ }
+ }
+
+ protected ValuesSourceConfig<VS> valuesSourceConfig;
+
+ protected ValueSourceAggregatorFactory(String name, String type, ValuesSourceConfig<VS> valuesSourceConfig) {
+ super(name, type);
+ this.valuesSourceConfig = valuesSourceConfig;
+ }
+
+ @Override
+ public ValuesSourceConfig valuesSourceConfig() {
+ return valuesSourceConfig;
+ }
+
+ @Override
+ public Aggregator create(AggregationContext context, Aggregator parent, long expectedBucketsCount) {
+ if (valuesSourceConfig.unmapped()) {
+ return createUnmapped(context, parent);
+ }
+ VS vs = context.valuesSource(valuesSourceConfig, parent == null ? 0 : 1 + parent.depth());
+ return create(vs, expectedBucketsCount, context, parent);
+ }
+
+ @Override
+ public void doValidate() {
+ if (valuesSourceConfig == null || !valuesSourceConfig.valid()) {
+ valuesSourceConfig = resolveValuesSourceConfigFromAncestors(name, parent, valuesSourceConfig.valueSourceType());
+ }
+ }
+
+ protected abstract Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent);
+
+ protected abstract Aggregator create(VS valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent);
+
+ private static <VS extends ValuesSource> ValuesSourceConfig<VS> resolveValuesSourceConfigFromAncestors(String aggName, AggregatorFactory parent, Class<VS> requiredValuesSourceType) {
+ ValuesSourceConfig config;
+ while (parent != null) {
+ if (parent instanceof ValuesSourceBased) {
+ config = ((ValuesSourceBased) parent).valuesSourceConfig();
+ if (config != null && config.valid()) {
+ if (requiredValuesSourceType == null || requiredValuesSourceType.isAssignableFrom(config.valueSourceType())) {
+ return (ValuesSourceConfig<VS>) config;
+ }
+ }
+ }
+ parent = parent.parent();
+ }
+ throw new AggregationExecutionException("could not find the appropriate value context to perform aggregation [" + aggName + "]");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java
new file mode 100644
index 0000000..256e5c5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support;
+
+import org.elasticsearch.index.fielddata.BytesValues;
+
+/**
+ * An abstraction of a source from which values are resolved per document.
+ */
+public interface ValuesSource {
+
+ FieldDataSource.MetaData metaData();
+
+ /**
+ * @return A {@link org.apache.lucene.util.BytesRef bytesref} view over the values that are resolved from this value source.
+ */
+ BytesValues bytesValues();
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceBased.java b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceBased.java
new file mode 100644
index 0000000..48a6abb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceBased.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support;
+
+/**
+ *
+ */
+public interface ValuesSourceBased {
+
+ ValuesSourceConfig valuesSourceConfig();
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java
new file mode 100644
index 0000000..b9d88b2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support;
+
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.aggregations.support.numeric.ValueFormatter;
+import org.elasticsearch.search.aggregations.support.numeric.ValueParser;
+
+/**
+ *
+ */
+public class ValuesSourceConfig<VS extends ValuesSource> {
+
+ final Class<VS> valueSourceType;
+ FieldContext fieldContext;
+ SearchScript script;
+ ValueFormatter formatter;
+ ValueParser parser;
+ ScriptValueType scriptValueType;
+ boolean unmapped = false;
+ boolean needsHashes = false;
+ boolean ensureUnique = false;
+ boolean ensureSorted = false;
+
+ public ValuesSourceConfig(Class<VS> valueSourceType) {
+ this.valueSourceType = valueSourceType;
+ }
+
+ public Class<VS> valueSourceType() {
+ return valueSourceType;
+ }
+
+ public FieldContext fieldContext() {
+ return fieldContext;
+ }
+
+ public boolean unmapped() {
+ return unmapped;
+ }
+
+ public boolean valid() {
+ return fieldContext != null || script != null || unmapped;
+ }
+
+ public ValuesSourceConfig<VS> fieldContext(FieldContext fieldContext) {
+ this.fieldContext = fieldContext;
+ return this;
+ }
+
+ public ValuesSourceConfig<VS> script(SearchScript script) {
+ this.script = script;
+ return this;
+ }
+
+ public ValuesSourceConfig<VS> formatter(ValueFormatter formatter) {
+ this.formatter = formatter;
+ return this;
+ }
+
+ public ValueFormatter formatter() {
+ return formatter;
+ }
+
+ public ValuesSourceConfig<VS> parser(ValueParser parser) {
+ this.parser = parser;
+ return this;
+ }
+
+ public ValueParser parser() {
+ return parser;
+ }
+
+ public ValuesSourceConfig<VS> scriptValueType(ScriptValueType scriptValueType) {
+ this.scriptValueType = scriptValueType;
+ return this;
+ }
+
+ public ScriptValueType scriptValueType() {
+ return scriptValueType;
+ }
+
+ public ValuesSourceConfig<VS> unmapped(boolean unmapped) {
+ this.unmapped = unmapped;
+ return this;
+ }
+
+ public ValuesSourceConfig<VS> needsHashes(boolean needsHashes) {
+ this.needsHashes = needsHashes;
+ return this;
+ }
+
+ public ValuesSourceConfig<VS> ensureUnique(boolean unique) {
+ this.ensureUnique = unique;
+ return this;
+ }
+
+ public ValuesSourceConfig<VS> ensureSorted(boolean sorted) {
+ this.ensureSorted = sorted;
+ return this;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/bytes/BytesValuesSource.java b/src/main/java/org/elasticsearch/search/aggregations/support/bytes/BytesValuesSource.java
new file mode 100644
index 0000000..9548d0b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/bytes/BytesValuesSource.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support.bytes;
+
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.search.aggregations.support.FieldDataSource;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+
+/**
+ *
+ */
+public class BytesValuesSource implements ValuesSource {
+
+ private final FieldDataSource source;
+
+ public BytesValuesSource(FieldDataSource source) {
+ this.source = source;
+ }
+
+ @Override
+ public FieldDataSource.MetaData metaData() {
+ return source.metaData();
+ }
+
+ @Override
+ public BytesValues bytesValues() {
+ return source.bytesValues();
+ }
+
+ public static final class WithOrdinals extends BytesValuesSource {
+
+ private final FieldDataSource.Bytes.WithOrdinals source;
+
+ public WithOrdinals(FieldDataSource.Bytes.WithOrdinals source) {
+ super(source);
+ this.source = source;
+ }
+
+ @Override
+ public BytesValues.WithOrdinals bytesValues() {
+ return source.bytesValues();
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/bytes/ScriptBytesValues.java b/src/main/java/org/elasticsearch/search/aggregations/support/bytes/ScriptBytesValues.java
new file mode 100644
index 0000000..b7d46a7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/bytes/ScriptBytesValues.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support.bytes;
+
+import com.google.common.collect.Iterators;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.aggregations.support.ScriptValues;
+
+import java.lang.reflect.Array;
+import java.util.Collection;
+import java.util.Iterator;
+
+/**
+ *
+ */
+public class ScriptBytesValues extends BytesValues implements ScriptValues {
+
+ final SearchScript script;
+
+ private Iterator<?> iter;
+ private Object value;
+
+ public ScriptBytesValues(SearchScript script) {
+ super(true); // assume multi-valued
+ this.script = script;
+ }
+
+ @Override
+ public SearchScript script() {
+ return script;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ script.setNextDocId(docId);
+ value = script.run();
+
+ if (value == null) {
+ iter = Iterators.emptyIterator();
+ return 0;
+ }
+
+ if (value.getClass().isArray()) {
+ final int length = Array.getLength(value);
+ // don't use Arrays.asList because the array may be an array of primitives?
+ iter = new Iterator<Object>() {
+
+ int i = 0;
+
+ @Override
+ public boolean hasNext() {
+ return i < length;
+ }
+
+ @Override
+ public Object next() {
+ return Array.get(value, i++);
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+ return length;
+ }
+
+ if (value instanceof Collection) {
+ final Collection<?> coll = (Collection<?>) value;
+ iter = coll.iterator();
+ return coll.size();
+ }
+
+ iter = Iterators.singletonIterator(value);
+ return 1;
+ }
+
+ @Override
+ public BytesRef nextValue() {
+ final String next = iter.next().toString();
+ scratch.copyChars(next);
+ return scratch;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/geopoints/GeoPointValuesSource.java b/src/main/java/org/elasticsearch/search/aggregations/support/geopoints/GeoPointValuesSource.java
new file mode 100644
index 0000000..0bd405b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/geopoints/GeoPointValuesSource.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support.geopoints;
+
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.search.aggregations.support.FieldDataSource;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+
+/**
+ * A source of geo points.
+ */
+public final class GeoPointValuesSource implements ValuesSource {
+
+ private final FieldDataSource.GeoPoint source;
+
+ public GeoPointValuesSource(FieldDataSource.GeoPoint source) {
+ this.source = source;
+ }
+
+ @Override
+ public FieldDataSource.MetaData metaData() {
+ return source.metaData();
+ }
+
+ @Override
+ public BytesValues bytesValues() {
+ return source.bytesValues();
+ }
+
+ public final GeoPointValues values() {
+ return source.geoPointValues();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/numeric/NumericValuesSource.java b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/NumericValuesSource.java
new file mode 100644
index 0000000..55572b6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/NumericValuesSource.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support.numeric;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.search.aggregations.support.FieldDataSource;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+
+/**
+ * A source of numeric data.
+ */
+public final class NumericValuesSource implements ValuesSource {
+
+ private final FieldDataSource.Numeric source;
+ private final ValueFormatter formatter;
+ private final ValueParser parser;
+
+ public NumericValuesSource(FieldDataSource.Numeric source, @Nullable ValueFormatter formatter, @Nullable ValueParser parser) {
+ this.source = source;
+ this.formatter = formatter;
+ this.parser = parser;
+ }
+
+ @Override
+ public FieldDataSource.MetaData metaData() {
+ return source.metaData();
+ }
+
+ @Override
+ public BytesValues bytesValues() {
+ return source.bytesValues();
+ }
+
+ public boolean isFloatingPoint() {
+ return source.isFloatingPoint();
+ }
+
+ public LongValues longValues() {
+ return source.longValues();
+ }
+
+ public DoubleValues doubleValues() {
+ return source.doubleValues();
+ }
+
+ public ValueFormatter formatter() {
+ return formatter;
+ }
+
+ public ValueParser parser() {
+ return parser;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ScriptDoubleValues.java b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ScriptDoubleValues.java
new file mode 100644
index 0000000..907daf8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ScriptDoubleValues.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support.numeric;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.support.ScriptValues;
+
+import java.lang.reflect.Array;
+import java.util.Collection;
+import java.util.Iterator;
+
+/**
+ * {@link DoubleValues} implementation which is based on a script
+ */
+public class ScriptDoubleValues extends DoubleValues implements ScriptValues {
+
+ final SearchScript script;
+
+ private Object value;
+ private double[] values = new double[1];
+ private int valueCount;
+ private int valueOffset;
+
+
+ public ScriptDoubleValues(SearchScript script) {
+ super(true); // assume multi-valued
+ this.script = script;
+ }
+
+ @Override
+ public SearchScript script() {
+ return script;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ script.setNextDocId(docId);
+ value = script.run();
+
+ if (value == null) {
+ valueCount = 0;
+ }
+
+ else if (value instanceof Number) {
+ valueCount = 1;
+ values[0] = ((Number) value).doubleValue();
+ }
+
+ else if (value.getClass().isArray()) {
+ valueCount = Array.getLength(value);
+ values = ArrayUtil.grow(values, valueCount);
+ for (int i = 0; i < valueCount; ++i) {
+ values[i] = ((Number) Array.get(value, i)).doubleValue();
+ }
+ }
+
+ else if (value instanceof Collection) {
+ valueCount = ((Collection<?>) value).size();
+ values = ArrayUtil.grow(values, valueCount);
+ int i = 0;
+ for (Iterator<?> it = ((Collection<?>) value).iterator(); it.hasNext(); ++i) {
+ values[i] = ((Number) it.next()).doubleValue();
+ }
+ assert i == valueCount;
+ }
+
+ else {
+ throw new AggregationExecutionException("Unsupported script value [" + value + "]");
+ }
+
+ valueOffset = 0;
+ return valueCount;
+ }
+
+ @Override
+ public double nextValue() {
+ assert valueOffset < valueCount;
+ return values[valueOffset++];
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ScriptLongValues.java b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ScriptLongValues.java
new file mode 100644
index 0000000..373c286
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ScriptLongValues.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support.numeric;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.support.ScriptValues;
+
+import java.lang.reflect.Array;
+import java.util.Collection;
+import java.util.Iterator;
+
+/**
+ * {@link LongValues} implementation which is based on a script
+ */
+public class ScriptLongValues extends LongValues implements ScriptValues {
+
+ final SearchScript script;
+
+ private Object value;
+ private long[] values = new long[1];
+ private int valueCount;
+ private int valueOffset;
+
+ public ScriptLongValues(SearchScript script) {
+ super(true); // assume multi-valued
+ this.script = script;
+ }
+
+ @Override
+ public SearchScript script() {
+ return script;
+ }
+
+ @Override
+ public int setDocument(int docId) {
+ this.docId = docId;
+ script.setNextDocId(docId);
+ value = script.run();
+
+ if (value == null) {
+ valueCount = 0;
+ }
+
+ else if (value instanceof Number) {
+ valueCount = 1;
+ values[0] = ((Number) value).longValue();
+ }
+
+ else if (value.getClass().isArray()) {
+ valueCount = Array.getLength(value);
+ values = ArrayUtil.grow(values, valueCount);
+ for (int i = 0; i < valueCount; ++i) {
+ values[i] = ((Number) Array.get(value, i)).longValue();
+ }
+ }
+
+ else if (value instanceof Collection) {
+ valueCount = ((Collection<?>) value).size();
+ values = ArrayUtil.grow(values, valueCount);
+ int i = 0;
+ for (Iterator<?> it = ((Collection<?>) value).iterator(); it.hasNext(); ++i) {
+ values[i] = ((Number) it.next()).longValue();
+ }
+ assert i == valueCount;
+ }
+
+ else {
+ throw new AggregationExecutionException("Unsupported script value [" + value + "]");
+ }
+
+ valueOffset = 0;
+ return valueCount;
+ }
+
+ @Override
+ public long nextValue() {
+ assert valueOffset < valueCount;
+ return values[valueOffset++];
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueFormatter.java b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueFormatter.java
new file mode 100644
index 0000000..fbc735a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueFormatter.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support.numeric;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.ip.IpFieldMapper;
+
+import java.io.IOException;
+import java.text.DecimalFormat;
+import java.text.DecimalFormatSymbols;
+import java.text.NumberFormat;
+import java.util.Locale;
+
+/**
+ * A strategy for formatting time represented as millis long value to string
+ */
+public interface ValueFormatter extends Streamable {
+
+ public final static ValueFormatter RAW = new Raw();
+ public final static ValueFormatter IPv4 = new IPv4Formatter();
+
+ /**
+ * Uniquely identifies this formatter (used for efficient serialization)
+ *
+ * @return The id of this formatter
+ */
+ byte id();
+
+ /**
+ * Formats the given millis time value (since the epoch) to string.
+ *
+ * @param value The long value to format.
+ * @return The formatted value as string.
+ */
+ String format(long value);
+
+ /**
+ * The
+ * @param value double The double value to format.
+ * @return The formatted value as string
+ */
+ String format(double value);
+
+
+ static class Raw implements ValueFormatter {
+
+ static final byte ID = 1;
+
+ @Override
+ public String format(long value) {
+ return String.valueOf(value);
+ }
+
+ @Override
+ public String format(double value) {
+ return String.valueOf(value);
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ }
+ }
+
+ /**
+ * A time formatter which is based on date/time format.
+ */
+ public static class DateTime implements ValueFormatter {
+
+ public static final ValueFormatter DEFAULT = new ValueFormatter.DateTime(DateFieldMapper.Defaults.DATE_TIME_FORMATTER);
+
+ static final byte ID = 2;
+
+ FormatDateTimeFormatter formatter;
+
+ DateTime() {} // for serialization
+
+ public DateTime(String format) {
+ this.formatter = Joda.forPattern(format);
+ }
+
+ public DateTime(FormatDateTimeFormatter formatter) {
+ this.formatter = formatter;
+ }
+
+ @Override
+ public String format(long time) {
+ return formatter.printer().print(time);
+ }
+
+ public String format(double value) {
+ return format((long) value);
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ formatter = Joda.forPattern(in.readString());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(formatter.format());
+ }
+ }
+
+ public static abstract class Number implements ValueFormatter {
+
+ NumberFormat format;
+
+ Number() {} // for serialization
+
+ Number(NumberFormat format) {
+ this.format = format;
+ }
+
+ @Override
+ public String format(long value) {
+ return format.format(value);
+ }
+
+ @Override
+ public String format(double value) {
+ return format.format(value);
+ }
+
+ public static class Pattern extends Number {
+
+ private static final DecimalFormatSymbols SYMBOLS = new DecimalFormatSymbols(Locale.ROOT);
+
+ static final byte ID = 4;
+
+ String pattern;
+
+ Pattern() {} // for serialization
+
+ public Pattern(String pattern) {
+ super(new DecimalFormat(pattern, SYMBOLS));
+ this.pattern = pattern;
+ }
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ pattern = in.readString();
+ format = new DecimalFormat(pattern, SYMBOLS);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(pattern);
+ }
+ }
+ }
+
+ static class IPv4Formatter implements ValueFormatter {
+
+ static final byte ID = 6;
+
+ @Override
+ public byte id() {
+ return ID;
+ }
+
+ @Override
+ public String format(long value) {
+ return IpFieldMapper.longToIp(value);
+ }
+
+ @Override
+ public String format(double value) {
+ return format((long) value);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ }
+ }
+
+
+
+
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueFormatterStreams.java b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueFormatterStreams.java
new file mode 100644
index 0000000..bf2b287
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueFormatterStreams.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support.numeric;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ValueFormatterStreams {
+
+ public static ValueFormatter read(StreamInput in) throws IOException {
+ byte id = in.readByte();
+ ValueFormatter formatter = null;
+ switch (id) {
+ case ValueFormatter.Raw.ID: return ValueFormatter.RAW;
+ case ValueFormatter.IPv4Formatter.ID: return ValueFormatter.IPv4;
+ case ValueFormatter.DateTime.ID: formatter = new ValueFormatter.DateTime(); break;
+ case ValueFormatter.Number.Pattern.ID: formatter = new ValueFormatter.Number.Pattern(); break;
+ default: throw new ElasticsearchIllegalArgumentException("Unknown value formatter with id [" + id + "]");
+ }
+ formatter.readFrom(in);
+ return formatter;
+ }
+
+ public static ValueFormatter readOptional(StreamInput in) throws IOException {
+ if (!in.readBoolean()) {
+ return null;
+ }
+ return read(in);
+ }
+
+ public static void write(ValueFormatter formatter, StreamOutput out) throws IOException {
+ out.writeByte(formatter.id());
+ formatter.writeTo(out);
+ }
+
+ public static void writeOptional(ValueFormatter formatter, StreamOutput out) throws IOException {
+ out.writeBoolean(formatter != null);
+ if (formatter != null) {
+ write(formatter, out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueParser.java b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueParser.java
new file mode 100644
index 0000000..6450e66
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/aggregations/support/numeric/ValueParser.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.support.numeric;
+
+import org.elasticsearch.common.joda.DateMathParser;
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.ip.IpFieldMapper;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public interface ValueParser {
+
+ static final ValueParser IPv4 = new ValueParser.IPv4();
+
+ long parseLong(String value, SearchContext searchContext);
+
+ double parseDouble(String value, SearchContext searchContext);
+
+
+ /**
+ * Knows how to parse datatime values based on date/time format
+ */
+ static class DateTime implements ValueParser {
+
+ private FormatDateTimeFormatter formatter;
+
+ public DateTime(String format) {
+ this(Joda.forPattern(format));
+ }
+
+ public DateTime(FormatDateTimeFormatter formatter) {
+ this.formatter = formatter;
+ }
+
+ @Override
+ public long parseLong(String value, SearchContext searchContext) {
+ return formatter.parser().parseMillis(value);
+ }
+
+ @Override
+ public double parseDouble(String value, SearchContext searchContext) {
+ return parseLong(value, searchContext);
+ }
+ }
+
+ /**
+ * Knows how to parse datatime values based on elasticsearch's date math expression
+ */
+ static class DateMath implements ValueParser {
+
+ public static final DateMath DEFAULT = new ValueParser.DateMath(new DateMathParser(DateFieldMapper.Defaults.DATE_TIME_FORMATTER, DateFieldMapper.Defaults.TIME_UNIT));
+
+ private DateMathParser parser;
+
+ public DateMath(String format, TimeUnit timeUnit) {
+ this(new DateMathParser(Joda.forPattern(format), timeUnit));
+ }
+
+ public DateMath(DateMathParser parser) {
+ this.parser = parser;
+ }
+
+ @Override
+ public long parseLong(String value, SearchContext searchContext) {
+ return parser.parse(value, searchContext.nowInMillis());
+ }
+
+ @Override
+ public double parseDouble(String value, SearchContext searchContext) {
+ return parseLong(value, searchContext);
+ }
+ }
+
+ /**
+ * Knows how to parse IPv4 formats
+ */
+ static class IPv4 implements ValueParser {
+
+ private IPv4() {
+ }
+
+ @Override
+ public long parseLong(String value, SearchContext searchContext) {
+ return IpFieldMapper.ipToLong(value);
+ }
+
+ @Override
+ public double parseDouble(String value, SearchContext searchContext) {
+ return parseLong(value, searchContext);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
new file mode 100644
index 0000000..fc4f4a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -0,0 +1,976 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.builder;
+
+import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
+import org.elasticsearch.search.facet.FacetBuilder;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.highlight.HighlightBuilder;
+import org.elasticsearch.search.rescore.RescoreBuilder;
+import org.elasticsearch.search.sort.SortBuilder;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A search source builder allowing to easily build search source. Simple construction
+ * using {@link org.elasticsearch.search.builder.SearchSourceBuilder#searchSource()}.
+ *
+ * @see org.elasticsearch.action.search.SearchRequest#source(SearchSourceBuilder)
+ */
+public class SearchSourceBuilder implements ToXContent {
+
+ /**
+ * A static factory method to construct a new search source.
+ */
+ public static SearchSourceBuilder searchSource() {
+ return new SearchSourceBuilder();
+ }
+
+ /**
+ * A static factory method to construct new search highlights.
+ */
+ public static HighlightBuilder highlight() {
+ return new HighlightBuilder();
+ }
+
+ private QueryBuilder queryBuilder;
+
+ private BytesReference queryBinary;
+
+ private FilterBuilder postFilterBuilder;
+
+ private BytesReference filterBinary;
+
+ private int from = -1;
+
+ private int size = -1;
+
+ private Boolean explain;
+
+ private Boolean version;
+
+ private List<SortBuilder> sorts;
+
+ private boolean trackScores = false;
+
+ private Float minScore;
+
+ private long timeoutInMillis = -1;
+
+ private List<String> fieldNames;
+ private List<String> fieldDataFields;
+ private List<ScriptField> scriptFields;
+ private List<PartialField> partialFields;
+ private FetchSourceContext fetchSourceContext;
+
+ private List<FacetBuilder> facets;
+ private BytesReference facetsBinary;
+
+ private List<AbstractAggregationBuilder> aggregations;
+ private BytesReference aggregationsBinary;
+
+
+ private HighlightBuilder highlightBuilder;
+
+ private SuggestBuilder suggestBuilder;
+
+ private RescoreBuilder rescoreBuilder;
+
+ private ObjectFloatOpenHashMap<String> indexBoost = null;
+
+ private String[] stats;
+
+
+ /**
+ * Constructs a new search source builder.
+ */
+ public SearchSourceBuilder() {
+ }
+
+ /**
+ * Constructs a new search source builder with a search query.
+ *
+ * @see org.elasticsearch.index.query.QueryBuilders
+ */
+ public SearchSourceBuilder query(QueryBuilder query) {
+ this.queryBuilder = query;
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchSourceBuilder query(byte[] queryBinary) {
+ return query(queryBinary, 0, queryBinary.length);
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchSourceBuilder query(byte[] queryBinary, int queryBinaryOffset, int queryBinaryLength) {
+ return query(new BytesArray(queryBinary, queryBinaryOffset, queryBinaryLength));
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchSourceBuilder query(BytesReference queryBinary) {
+ this.queryBinary = queryBinary;
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a raw search query.
+ */
+ public SearchSourceBuilder query(String queryString) {
+ return query(queryString.getBytes(Charsets.UTF_8));
+ }
+
+ /**
+ * Constructs a new search source builder with a query from a builder.
+ */
+ public SearchSourceBuilder query(XContentBuilder query) {
+ return query(query.bytes());
+ }
+
+ /**
+ * Constructs a new search source builder with a query from a map.
+ */
+ public SearchSourceBuilder query(Map query) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE);
+ builder.map(query);
+ return query(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + query + "]", e);
+ }
+ }
+
+ /**
+ * Sets a filter that will be executed after the query has been executed and only has affect on the search hits
+ * (not aggregations or facets). This filter is always executed as last filtering mechanism.
+ */
+ public SearchSourceBuilder postFilter(FilterBuilder postFilter) {
+ this.postFilterBuilder = postFilter;
+ return this;
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchSourceBuilder postFilter(String postFilterString) {
+ return postFilter(postFilterString.getBytes(Charsets.UTF_8));
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchSourceBuilder postFilter(byte[] postFilter) {
+ return postFilter(postFilter, 0, postFilter.length);
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchSourceBuilder postFilter(byte[] postFilterBinary, int postFilterBinaryOffset, int postFilterBinaryLength) {
+ return postFilter(new BytesArray(postFilterBinary, postFilterBinaryOffset, postFilterBinaryLength));
+ }
+
+ /**
+ * Sets a filter on the query executed that only applies to the search query
+ * (and not facets for example).
+ */
+ public SearchSourceBuilder postFilter(BytesReference postFilterBinary) {
+ this.filterBinary = postFilterBinary;
+ return this;
+ }
+
+ /**
+ * Constructs a new search source builder with a query from a builder.
+ */
+ public SearchSourceBuilder postFilter(XContentBuilder postFilter) {
+ return postFilter(postFilter.bytes());
+ }
+
+ /**
+ * Constructs a new search source builder with a query from a map.
+ */
+ public SearchSourceBuilder postFilter(Map postFilter) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE);
+ builder.map(postFilter);
+ return postFilter(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + postFilter + "]", e);
+ }
+ }
+
+ /**
+ * From index to start the search from. Defaults to <tt>0</tt>.
+ */
+ public SearchSourceBuilder from(int from) {
+ this.from = from;
+ return this;
+ }
+
+ /**
+ * The number of search hits to return. Defaults to <tt>10</tt>.
+ */
+ public SearchSourceBuilder size(int size) {
+ this.size = size;
+ return this;
+ }
+
+ /**
+ * Sets the minimum score below which docs will be filtered out.
+ */
+ public SearchSourceBuilder minScore(float minScore) {
+ this.minScore = minScore;
+ return this;
+ }
+
+ /**
+ * Should each {@link org.elasticsearch.search.SearchHit} be returned with an
+ * explanation of the hit (ranking).
+ */
+ public SearchSourceBuilder explain(Boolean explain) {
+ this.explain = explain;
+ return this;
+ }
+
+ /**
+ * Should each {@link org.elasticsearch.search.SearchHit} be returned with a version
+ * associated with it.
+ */
+ public SearchSourceBuilder version(Boolean version) {
+ this.version = version;
+ return this;
+ }
+
+ /**
+ * An optional timeout to control how long search is allowed to take.
+ */
+ public SearchSourceBuilder timeout(TimeValue timeout) {
+ this.timeoutInMillis = timeout.millis();
+ return this;
+ }
+
+ /**
+ * An optional timeout to control how long search is allowed to take.
+ */
+ public SearchSourceBuilder timeout(String timeout) {
+ this.timeoutInMillis = TimeValue.parseTimeValue(timeout, null).millis();
+ return this;
+ }
+
+ /**
+ * Adds a sort against the given field name and the sort ordering.
+ *
+ * @param name The name of the field
+ * @param order The sort ordering
+ */
+ public SearchSourceBuilder sort(String name, SortOrder order) {
+ return sort(SortBuilders.fieldSort(name).order(order));
+ }
+
+ /**
+ * Add a sort against the given field name.
+ *
+ * @param name The name of the field to sort by
+ */
+ public SearchSourceBuilder sort(String name) {
+ return sort(SortBuilders.fieldSort(name));
+ }
+
+ /**
+ * Adds a sort builder.
+ */
+ public SearchSourceBuilder sort(SortBuilder sort) {
+ if (sorts == null) {
+ sorts = Lists.newArrayList();
+ }
+ sorts.add(sort);
+ return this;
+ }
+
+ /**
+ * Applies when sorting, and controls if scores will be tracked as well. Defaults to
+ * <tt>false</tt>.
+ */
+ public SearchSourceBuilder trackScores(boolean trackScores) {
+ this.trackScores = trackScores;
+ return this;
+ }
+
+ /**
+ * Add a facet to perform as part of the search.
+ */
+ public SearchSourceBuilder facet(FacetBuilder facet) {
+ if (facets == null) {
+ facets = Lists.newArrayList();
+ }
+ facets.add(facet);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent / json) facets.
+ */
+ public SearchSourceBuilder facets(byte[] facetsBinary) {
+ return facets(facetsBinary, 0, facetsBinary.length);
+ }
+
+ /**
+ * Sets a raw (xcontent / json) facets.
+ */
+ public SearchSourceBuilder facets(byte[] facetsBinary, int facetBinaryOffset, int facetBinaryLength) {
+ return facets(new BytesArray(facetsBinary, facetBinaryOffset, facetBinaryLength));
+ }
+
+ /**
+ * Sets a raw (xcontent / json) facets.
+ */
+ public SearchSourceBuilder facets(BytesReference facetsBinary) {
+ this.facetsBinary = facetsBinary;
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent / json) facets.
+ */
+ public SearchSourceBuilder facets(XContentBuilder facets) {
+ return facets(facets.bytes());
+ }
+
+ /**
+ * Sets a raw (xcontent / json) facets.
+ */
+ public SearchSourceBuilder facets(Map facets) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE);
+ builder.map(facets);
+ return facets(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + facets + "]", e);
+ }
+ }
+
+ /**
+ * Add an get to perform as part of the search.
+ */
+ public SearchSourceBuilder aggregation(AbstractAggregationBuilder aggregation) {
+ if (aggregations == null) {
+ aggregations = Lists.newArrayList();
+ }
+ aggregations.add(aggregation);
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent / json) addAggregation.
+ */
+ public SearchSourceBuilder aggregations(byte[] aggregationsBinary) {
+ return aggregations(aggregationsBinary, 0, aggregationsBinary.length);
+ }
+
+ /**
+ * Sets a raw (xcontent / json) addAggregation.
+ */
+ public SearchSourceBuilder aggregations(byte[] aggregationsBinary, int aggregationsBinaryOffset, int aggregationsBinaryLength) {
+ return aggregations(new BytesArray(aggregationsBinary, aggregationsBinaryOffset, aggregationsBinaryLength));
+ }
+
+ /**
+ * Sets a raw (xcontent / json) addAggregation.
+ */
+ public SearchSourceBuilder aggregations(BytesReference aggregationsBinary) {
+ this.aggregationsBinary = aggregationsBinary;
+ return this;
+ }
+
+ /**
+ * Sets a raw (xcontent / json) addAggregation.
+ */
+ public SearchSourceBuilder aggregations(XContentBuilder facets) {
+ return aggregations(facets.bytes());
+ }
+
+ /**
+ * Sets a raw (xcontent / json) addAggregation.
+ */
+ public SearchSourceBuilder aggregations(Map aggregations) {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE);
+ builder.map(aggregations);
+ return aggregations(builder);
+ } catch (IOException e) {
+ throw new ElasticsearchGenerationException("Failed to generate [" + aggregations + "]", e);
+ }
+ }
+
+ public HighlightBuilder highlighter() {
+ if (highlightBuilder == null) {
+ highlightBuilder = new HighlightBuilder();
+ }
+ return highlightBuilder;
+ }
+
+ /**
+ * Adds highlight to perform as part of the search.
+ */
+ public SearchSourceBuilder highlight(HighlightBuilder highlightBuilder) {
+ this.highlightBuilder = highlightBuilder;
+ return this;
+ }
+
+ public SuggestBuilder suggest() {
+ if (suggestBuilder == null) {
+ suggestBuilder = new SuggestBuilder("suggest");
+ }
+ return suggestBuilder;
+ }
+
+ public RescoreBuilder rescore() {
+ if (rescoreBuilder == null) {
+ rescoreBuilder = new RescoreBuilder();
+ }
+ return rescoreBuilder;
+ }
+
+ /**
+ * Indicates whether the response should contain the stored _source for every hit
+ *
+ * @param fetch
+ * @return
+ */
+ public SearchSourceBuilder fetchSource(boolean fetch) {
+ if (this.fetchSourceContext == null) {
+ this.fetchSourceContext = new FetchSourceContext(fetch);
+ } else {
+ this.fetchSourceContext.fetchSource(fetch);
+ }
+ return this;
+ }
+
+ /**
+ * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param include An optional include (optionally wildcarded) pattern to filter the returned _source
+ * @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public SearchSourceBuilder fetchSource(@Nullable String include, @Nullable String exclude) {
+ return fetchSource(include == null ? Strings.EMPTY_ARRAY : new String[]{include}, include == null ? Strings.EMPTY_ARRAY : new String[]{exclude});
+ }
+
+ /**
+ * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
+ * @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
+ */
+ public SearchSourceBuilder fetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
+ fetchSourceContext = new FetchSourceContext(includes, excludes);
+ return this;
+ }
+
+ /**
+ * Indicate how the _source should be fetched.
+ */
+ public SearchSourceBuilder fetchSource(@Nullable FetchSourceContext fetchSourceContext) {
+ this.fetchSourceContext = fetchSourceContext;
+ return this;
+ }
+
+ /**
+ * Sets no fields to be loaded, resulting in only id and type to be returned per field.
+ */
+ public SearchSourceBuilder noFields() {
+ this.fieldNames = ImmutableList.of();
+ return this;
+ }
+
+ /**
+ * Sets the fields to load and return as part of the search request. If none are specified,
+ * the source of the document will be returned.
+ */
+ public SearchSourceBuilder fields(List<String> fields) {
+ this.fieldNames = fields;
+ return this;
+ }
+
+ /**
+ * Adds the fields to load and return as part of the search request. If none are specified,
+ * the source of the document will be returned.
+ */
+ public SearchSourceBuilder fields(String... fields) {
+ if (fieldNames == null) {
+ fieldNames = new ArrayList<String>();
+ }
+ for (String field : fields) {
+ fieldNames.add(field);
+ }
+ return this;
+ }
+
+ /**
+ * Adds a field to load and return (note, it must be stored) as part of the search request.
+ * If none are specified, the source of the document will be return.
+ */
+ public SearchSourceBuilder field(String name) {
+ if (fieldNames == null) {
+ fieldNames = new ArrayList<String>();
+ }
+ fieldNames.add(name);
+ return this;
+ }
+
+ /**
+ * Adds a field to load from the field data cache and return as part of the search request.
+ */
+ public SearchSourceBuilder fieldDataField(String name) {
+ if (fieldDataFields == null) {
+ fieldDataFields = new ArrayList<String>();
+ }
+ fieldDataFields.add(name);
+ return this;
+ }
+
+ /**
+ * Adds a script field under the given name with the provided script.
+ *
+ * @param name The name of the field
+ * @param script The script
+ */
+ public SearchSourceBuilder scriptField(String name, String script) {
+ return scriptField(name, null, script, null);
+ }
+
+ /**
+ * Adds a script field.
+ *
+ * @param name The name of the field
+ * @param script The script to execute
+ * @param params The script parameters
+ */
+ public SearchSourceBuilder scriptField(String name, String script, Map<String, Object> params) {
+ return scriptField(name, null, script, params);
+ }
+
+ /**
+ * Adds a script field.
+ *
+ * @param name The name of the field
+ * @param lang The language of the script
+ * @param script The script to execute
+ * @param params The script parameters (can be <tt>null</tt>)
+ */
+ public SearchSourceBuilder scriptField(String name, String lang, String script, Map<String, Object> params) {
+ if (scriptFields == null) {
+ scriptFields = Lists.newArrayList();
+ }
+ scriptFields.add(new ScriptField(name, lang, script, params));
+ return this;
+ }
+
+ /**
+ * Adds a partial field based on _source, with an "include" and/or "exclude" set which can include simple wildcard
+ * elements.
+ *
+ * @deprecated since 1.0.0
+ * use {@link SearchSourceBuilder#fetchSource(String, String)} instead
+ *
+ * @param name The name of the field
+ * @param include An optional include (optionally wildcarded) pattern from _source
+ * @param exclude An optional exclude (optionally wildcarded) pattern from _source
+ */
+ @Deprecated
+ public SearchSourceBuilder partialField(String name, @Nullable String include, @Nullable String exclude) {
+ if (partialFields == null) {
+ partialFields = Lists.newArrayList();
+ }
+ partialFields.add(new PartialField(name, include, exclude));
+ return this;
+ }
+
+ /**
+ * Adds a partial field based on _source, with an "includes" and/or "excludes set which can include simple wildcard
+ * elements.
+ *
+ * @deprecated since 1.0.0
+ * use {@link SearchSourceBuilder#fetchSource(String[], String[])} instead
+ *
+ * @param name The name of the field
+ * @param includes An optional list of includes (optionally wildcarded) patterns from _source
+ * @param excludes An optional list of excludes (optionally wildcarded) patterns from _source
+ */
+ @Deprecated
+ public SearchSourceBuilder partialField(String name, @Nullable String[] includes, @Nullable String[] excludes) {
+ if (partialFields == null) {
+ partialFields = Lists.newArrayList();
+ }
+ partialFields.add(new PartialField(name, includes, excludes));
+ return this;
+ }
+
+ /**
+ * Sets the boost a specific index will receive when the query is executeed against it.
+ *
+ * @param index The index to apply the boost against
+ * @param indexBoost The boost to apply to the index
+ */
+ public SearchSourceBuilder indexBoost(String index, float indexBoost) {
+ if (this.indexBoost == null) {
+ this.indexBoost = new ObjectFloatOpenHashMap<String>();
+ }
+ this.indexBoost.put(index, indexBoost);
+ return this;
+ }
+
+ /**
+ * The stats groups this request will be aggregated under.
+ */
+ public SearchSourceBuilder stats(String... statsGroups) {
+ this.stats = statsGroups;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint();
+ toXContent(builder, ToXContent.EMPTY_PARAMS);
+ return builder.string();
+ } catch (Exception e) {
+ return "{ \"error\" : \"" + e.getMessage() + "\"}";
+ }
+ }
+
+ public BytesReference buildAsBytes() throws SearchSourceBuilderException {
+ return buildAsBytes(Requests.CONTENT_TYPE);
+ }
+
+ public BytesReference buildAsBytes(XContentType contentType) throws SearchSourceBuilderException {
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType);
+ toXContent(builder, ToXContent.EMPTY_PARAMS);
+ return builder.bytes();
+ } catch (Exception e) {
+ throw new SearchSourceBuilderException("Failed to build search source", e);
+ }
+ }
+
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+
+ if (from != -1) {
+ builder.field("from", from);
+ }
+ if (size != -1) {
+ builder.field("size", size);
+ }
+
+ if (timeoutInMillis != -1) {
+ builder.field("timeout", timeoutInMillis);
+ }
+
+ if (queryBuilder != null) {
+ builder.field("query");
+ queryBuilder.toXContent(builder, params);
+ }
+
+ if (queryBinary != null) {
+ if (XContentFactory.xContentType(queryBinary) == builder.contentType()) {
+ builder.rawField("query", queryBinary);
+ } else {
+ builder.field("query_binary", queryBinary);
+ }
+ }
+
+ if (postFilterBuilder != null) {
+ builder.field("post_filter");
+ postFilterBuilder.toXContent(builder, params);
+ }
+
+ if (filterBinary != null) {
+ if (XContentFactory.xContentType(filterBinary) == builder.contentType()) {
+ builder.rawField("filter", filterBinary);
+ } else {
+ builder.field("filter_binary", filterBinary);
+ }
+ }
+
+ if (minScore != null) {
+ builder.field("min_score", minScore);
+ }
+
+ if (version != null) {
+ builder.field("version", version);
+ }
+
+ if (explain != null) {
+ builder.field("explain", explain);
+ }
+
+ if (fetchSourceContext != null) {
+ if (!fetchSourceContext.fetchSource()) {
+ builder.field("_source", false);
+ } else {
+ builder.startObject("_source");
+ builder.array("includes", fetchSourceContext.includes());
+ builder.array("excludes", fetchSourceContext.excludes());
+ builder.endObject();
+ }
+ }
+
+ if (fieldNames != null) {
+ if (fieldNames.size() == 1) {
+ builder.field("fields", fieldNames.get(0));
+ } else {
+ builder.startArray("fields");
+ for (String fieldName : fieldNames) {
+ builder.value(fieldName);
+ }
+ builder.endArray();
+ }
+ }
+
+ if (fieldDataFields != null) {
+ builder.startArray("fielddata_fields");
+ for (String fieldName : fieldDataFields) {
+ builder.value(fieldName);
+ }
+ builder.endArray();
+ }
+
+ if (partialFields != null) {
+ builder.startObject("partial_fields");
+ for (PartialField partialField : partialFields) {
+ builder.startObject(partialField.name());
+ if (partialField.includes() != null) {
+ if (partialField.includes().length == 1) {
+ builder.field("include", partialField.includes()[0]);
+ } else {
+ builder.field("include", partialField.includes());
+ }
+ }
+ if (partialField.excludes() != null) {
+ if (partialField.excludes().length == 1) {
+ builder.field("exclude", partialField.excludes()[0]);
+ } else {
+ builder.field("exclude", partialField.excludes());
+ }
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+
+ if (scriptFields != null) {
+ builder.startObject("script_fields");
+ for (ScriptField scriptField : scriptFields) {
+ builder.startObject(scriptField.fieldName());
+ builder.field("script", scriptField.script());
+ if (scriptField.lang() != null) {
+ builder.field("lang", scriptField.lang());
+ }
+ if (scriptField.params() != null) {
+ builder.field("params");
+ builder.map(scriptField.params());
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+
+ if (sorts != null) {
+ builder.startArray("sort");
+ for (SortBuilder sort : sorts) {
+ builder.startObject();
+ sort.toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+
+ if (trackScores) {
+ builder.field("track_scores", trackScores);
+ }
+
+ if (indexBoost != null) {
+ builder.startObject("indices_boost");
+ final boolean[] states = indexBoost.allocated;
+ final Object[] keys = indexBoost.keys;
+ final float[] values = indexBoost.values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ builder.field((String) keys[i], values[i]);
+ }
+ }
+ builder.endObject();
+ }
+
+ if (facets != null) {
+ builder.field("facets");
+ builder.startObject();
+ for (FacetBuilder facet : facets) {
+ facet.toXContent(builder, params);
+ }
+ builder.endObject();
+ }
+
+ if (facetsBinary != null) {
+ if (XContentFactory.xContentType(facetsBinary) == builder.contentType()) {
+ builder.rawField("facets", facetsBinary);
+ } else {
+ builder.field("facets_binary", facetsBinary);
+ }
+ }
+
+ if (aggregations != null) {
+ builder.field("aggregations");
+ builder.startObject();
+ for (AbstractAggregationBuilder aggregation : aggregations) {
+ aggregation.toXContent(builder, params);
+ }
+ builder.endObject();
+ }
+
+ if (aggregationsBinary != null) {
+ if (XContentFactory.xContentType(aggregationsBinary) == builder.contentType()) {
+ builder.rawField("aggregations", aggregationsBinary);
+ } else {
+ builder.field("aggregations_binary", aggregationsBinary);
+ }
+ }
+
+ if (highlightBuilder != null) {
+ highlightBuilder.toXContent(builder, params);
+ }
+
+ if (suggestBuilder != null) {
+ suggestBuilder.toXContent(builder, params);
+ }
+
+ if (rescoreBuilder != null) {
+ rescoreBuilder.toXContent(builder, params);
+ }
+
+ if (stats != null) {
+ builder.startArray("stats");
+ for (String stat : stats) {
+ builder.value(stat);
+ }
+ builder.endArray();
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ private static class ScriptField {
+ private final String fieldName;
+ private final String script;
+ private final String lang;
+ private final Map<String, Object> params;
+
+ private ScriptField(String fieldName, String lang, String script, Map<String, Object> params) {
+ this.fieldName = fieldName;
+ this.lang = lang;
+ this.script = script;
+ this.params = params;
+ }
+
+ public String fieldName() {
+ return fieldName;
+ }
+
+ public String script() {
+ return script;
+ }
+
+ public String lang() {
+ return this.lang;
+ }
+
+ public Map<String, Object> params() {
+ return params;
+ }
+ }
+
+ private static class PartialField {
+ private final String name;
+ private final String[] includes;
+ private final String[] excludes;
+
+ private PartialField(String name, String[] includes, String[] excludes) {
+ this.name = name;
+ this.includes = includes;
+ this.excludes = excludes;
+ }
+
+ private PartialField(String name, String include, String exclude) {
+ this.name = name;
+ this.includes = include == null ? null : new String[]{include};
+ this.excludes = exclude == null ? null : new String[]{exclude};
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public String[] includes() {
+ return includes;
+ }
+
+ public String[] excludes() {
+ return excludes;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java
new file mode 100644
index 0000000..d0ecd55
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.builder;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class SearchSourceBuilderException extends ElasticsearchException {
+
+ public SearchSourceBuilderException(String msg) {
+ super(msg);
+ }
+
+ public SearchSourceBuilderException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/controller/ScoreDocQueue.java b/src/main/java/org/elasticsearch/search/controller/ScoreDocQueue.java
new file mode 100644
index 0000000..68ebe0c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/controller/ScoreDocQueue.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.controller;
+
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.util.PriorityQueue;
+
+/**
+ * <p>Same as lucene {@link org.apache.lucene.search.HitQueue}.
+ */
+public class ScoreDocQueue extends PriorityQueue<ScoreDoc> {
+
+ public ScoreDocQueue(int size) {
+ super(size);
+ }
+
+ protected final boolean lessThan(ScoreDoc hitA, ScoreDoc hitB) {
+ if (hitA.score == hitB.score) {
+ int c = hitA.shardIndex - hitB.shardIndex;
+ if (c == 0) {
+ return hitA.doc > hitB.doc;
+ }
+ return c > 0;
+ } else {
+ return hitA.score < hitB.score;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
new file mode 100644
index 0000000..76d2c3d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
@@ -0,0 +1,443 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.controller;
+
+import com.carrotsearch.hppc.IntArrayList;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.google.common.collect.Lists;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.PriorityQueue;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.collect.HppcMaps;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.dfs.AggregatedDfs;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.InternalFacets;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.FetchSearchResultProvider;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.InternalSearchHits;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.query.QuerySearchResultProvider;
+import org.elasticsearch.search.suggest.Suggest;
+
+import java.util.*;
+
+/**
+ *
+ */
+public class SearchPhaseController extends AbstractComponent {
+
+ public static Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>> QUERY_RESULT_ORDERING = new Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>>() {
+ @Override
+ public int compare(AtomicArray.Entry<? extends QuerySearchResultProvider> o1, AtomicArray.Entry<? extends QuerySearchResultProvider> o2) {
+ int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index());
+ if (i == 0) {
+ i = o1.value.shardTarget().shardId() - o2.value.shardTarget().shardId();
+ }
+ return i;
+ }
+ };
+
+ public static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
+
+ private final CacheRecycler cacheRecycler;
+ private final boolean optimizeSingleShard;
+
+ @Inject
+ public SearchPhaseController(Settings settings, CacheRecycler cacheRecycler) {
+ super(settings);
+ this.cacheRecycler = cacheRecycler;
+ this.optimizeSingleShard = componentSettings.getAsBoolean("optimize_single_shard", true);
+ }
+
+ public boolean optimizeSingleShard() {
+ return optimizeSingleShard;
+ }
+
+ public AggregatedDfs aggregateDfs(AtomicArray<DfsSearchResult> results) {
+ ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics = HppcMaps.newNoNullKeysMap();
+ ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
+ long aggMaxDoc = 0;
+ for (AtomicArray.Entry<DfsSearchResult> lEntry : results.asList()) {
+ final Term[] terms = lEntry.value.terms();
+ final TermStatistics[] stats = lEntry.value.termStatistics();
+ assert terms.length == stats.length;
+ for (int i = 0; i < terms.length; i++) {
+ assert terms[i] != null;
+ TermStatistics existing = termStatistics.get(terms[i]);
+ if (existing != null) {
+ assert terms[i].bytes().equals(existing.term());
+ // totalTermFrequency is an optional statistic we need to check if either one or both
+ // are set to -1 which means not present and then set it globally to -1
+ termStatistics.put(terms[i], new TermStatistics(existing.term(),
+ existing.docFreq() + stats[i].docFreq(),
+ optionalSum(existing.totalTermFreq(), stats[i].totalTermFreq())));
+ } else {
+ termStatistics.put(terms[i], stats[i]);
+ }
+
+ }
+ final boolean[] states = lEntry.value.fieldStatistics().allocated;
+ final Object[] keys = lEntry.value.fieldStatistics().keys;
+ final Object[] values = lEntry.value.fieldStatistics().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ String key = (String) keys[i];
+ CollectionStatistics value = (CollectionStatistics) values[i];
+ assert key != null;
+ CollectionStatistics existing = fieldStatistics.get(key);
+ if (existing != null) {
+ CollectionStatistics merged = new CollectionStatistics(
+ key, existing.maxDoc() + value.maxDoc(),
+ optionalSum(existing.docCount(), value.docCount()),
+ optionalSum(existing.sumTotalTermFreq(), value.sumTotalTermFreq()),
+ optionalSum(existing.sumDocFreq(), value.sumDocFreq())
+ );
+ fieldStatistics.put(key, merged);
+ } else {
+ fieldStatistics.put(key, value);
+ }
+ }
+ }
+ aggMaxDoc += lEntry.value.maxDoc();
+ }
+ return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc);
+ }
+
+ private static long optionalSum(long left, long right) {
+ return Math.min(left, right) == -1 ? -1 : left + right;
+ }
+
+ public ScoreDoc[] sortDocs(AtomicArray<? extends QuerySearchResultProvider> resultsArr) {
+ List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results = resultsArr.asList();
+ if (results.isEmpty()) {
+ return EMPTY_DOCS;
+ }
+
+ if (optimizeSingleShard) {
+ boolean canOptimize = false;
+ QuerySearchResult result = null;
+ int shardIndex = -1;
+ if (results.size() == 1) {
+ canOptimize = true;
+ result = results.get(0).value.queryResult();
+ shardIndex = results.get(0).index;
+ } else {
+ // lets see if we only got hits from a single shard, if so, we can optimize...
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
+ if (entry.value.queryResult().topDocs().scoreDocs.length > 0) {
+ if (result != null) { // we already have one, can't really optimize
+ canOptimize = false;
+ break;
+ }
+ canOptimize = true;
+ result = entry.value.queryResult();
+ shardIndex = entry.index;
+ }
+ }
+ }
+ if (canOptimize) {
+ ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
+ if (scoreDocs.length < result.from()) {
+ return EMPTY_DOCS;
+ }
+ int resultDocsSize = result.size();
+ if ((scoreDocs.length - result.from()) < resultDocsSize) {
+ resultDocsSize = scoreDocs.length - result.from();
+ }
+ if (result.topDocs() instanceof TopFieldDocs) {
+ ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
+ for (int i = 0; i < resultDocsSize; i++) {
+ ScoreDoc scoreDoc = scoreDocs[result.from() + i];
+ scoreDoc.shardIndex = shardIndex;
+ docs[i] = scoreDoc;
+ }
+ return docs;
+ } else {
+ ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
+ for (int i = 0; i < resultDocsSize; i++) {
+ ScoreDoc scoreDoc = scoreDocs[result.from() + i];
+ scoreDoc.shardIndex = shardIndex;
+ docs[i] = scoreDoc;
+ }
+ return docs;
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ AtomicArray.Entry<? extends QuerySearchResultProvider>[] sortedResults = results.toArray(new AtomicArray.Entry[results.size()]);
+ Arrays.sort(sortedResults, QUERY_RESULT_ORDERING);
+ QuerySearchResultProvider firstResult = sortedResults[0].value;
+
+ int totalNumDocs = 0;
+
+ int queueSize = firstResult.queryResult().from() + firstResult.queryResult().size();
+ if (firstResult.includeFetch()) {
+ // if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
+ // this is also important since we shortcut and fetch only docs from "from" and up to "size"
+ queueSize *= sortedResults.length;
+ }
+
+ // we don't use TopDocs#merge here because with TopDocs#merge, when pagination, we need to ask for "from + size" topN
+ // hits, which ends up creating a "from + size" ScoreDoc[], while in our implementation, we can actually get away with
+ // just create "size" ScoreDoc (the reverse order in the queue). would be nice to improve TopDocs#merge to allow for
+ // it in which case we won't need this logic...
+
+ PriorityQueue queue;
+ if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) {
+ // sorting, first if the type is a String, chance CUSTOM to STRING so we handle nulls properly (since our CUSTOM String sorting might return null)
+ TopFieldDocs fieldDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
+ for (int i = 0; i < fieldDocs.fields.length; i++) {
+ boolean allValuesAreNull = true;
+ boolean resolvedField = false;
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : sortedResults) {
+ for (ScoreDoc doc : entry.value.queryResult().topDocs().scoreDocs) {
+ FieldDoc fDoc = (FieldDoc) doc;
+ if (fDoc.fields[i] != null) {
+ allValuesAreNull = false;
+ if (fDoc.fields[i] instanceof String) {
+ fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.Type.STRING, fieldDocs.fields[i].getReverse());
+ }
+ resolvedField = true;
+ break;
+ }
+ }
+ if (resolvedField) {
+ break;
+ }
+ }
+ if (!resolvedField && allValuesAreNull && fieldDocs.fields[i].getField() != null) {
+ // we did not manage to resolve a field (and its not score or doc, which have no field), and all the fields are null (which can only happen for STRING), make it a STRING
+ fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.Type.STRING, fieldDocs.fields[i].getReverse());
+ }
+ }
+ queue = new ShardFieldDocSortedHitQueue(fieldDocs.fields, queueSize);
+
+ // we need to accumulate for all and then filter the from
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : sortedResults) {
+ QuerySearchResult result = entry.value.queryResult();
+ ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
+ totalNumDocs += scoreDocs.length;
+ for (ScoreDoc doc : scoreDocs) {
+ doc.shardIndex = entry.index;
+ if (queue.insertWithOverflow(doc) == doc) {
+ // filled the queue, break
+ break;
+ }
+ }
+ }
+ } else {
+ queue = new ScoreDocQueue(queueSize); // we need to accumulate for all and then filter the from
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : sortedResults) {
+ QuerySearchResult result = entry.value.queryResult();
+ ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
+ totalNumDocs += scoreDocs.length;
+ for (ScoreDoc doc : scoreDocs) {
+ doc.shardIndex = entry.index;
+ if (queue.insertWithOverflow(doc) == doc) {
+ // filled the queue, break
+ break;
+ }
+ }
+ }
+
+ }
+
+ int resultDocsSize = firstResult.queryResult().size();
+ if (firstResult.includeFetch()) {
+ // if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
+ resultDocsSize *= sortedResults.length;
+ }
+ if (totalNumDocs < queueSize) {
+ resultDocsSize = totalNumDocs - firstResult.queryResult().from();
+ }
+
+ if (resultDocsSize <= 0) {
+ return EMPTY_DOCS;
+ }
+
+ // we only pop the first, this handles "from" nicely since the "from" are down the queue
+ // that we already fetched, so we are actually popping the "from" and up to "size"
+ ScoreDoc[] shardDocs = new ScoreDoc[resultDocsSize];
+ for (int i = resultDocsSize - 1; i >= 0; i--) // put docs in array
+ shardDocs[i] = (ScoreDoc) queue.pop();
+ return shardDocs;
+ }
+
+ /**
+ * Builds an array, with potential null elements, with docs to load.
+ */
+ public void fillDocIdsToLoad(AtomicArray<IntArrayList> docsIdsToLoad, ScoreDoc[] shardDocs) {
+ for (ScoreDoc shardDoc : shardDocs) {
+ IntArrayList list = docsIdsToLoad.get(shardDoc.shardIndex);
+ if (list == null) {
+ list = new IntArrayList(); // can't be shared!, uses unsafe on it later on
+ docsIdsToLoad.set(shardDoc.shardIndex, list);
+ }
+ list.add(shardDoc.doc);
+ }
+ }
+
+ public InternalSearchResponse merge(ScoreDoc[] sortedDocs, AtomicArray<? extends QuerySearchResultProvider> queryResultsArr, AtomicArray<? extends FetchSearchResultProvider> fetchResultsArr) {
+
+ List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults = queryResultsArr.asList();
+ List<? extends AtomicArray.Entry<? extends FetchSearchResultProvider>> fetchResults = fetchResultsArr.asList();
+
+ if (queryResults.isEmpty()) {
+ return InternalSearchResponse.empty();
+ }
+
+ QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
+
+ boolean sorted = false;
+ int sortScoreIndex = -1;
+ if (firstResult.topDocs() instanceof TopFieldDocs) {
+ sorted = true;
+ TopFieldDocs fieldDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
+ for (int i = 0; i < fieldDocs.fields.length; i++) {
+ if (fieldDocs.fields[i].getType() == SortField.Type.SCORE) {
+ sortScoreIndex = i;
+ }
+ }
+ }
+
+ // merge facets
+ InternalFacets facets = null;
+ if (!queryResults.isEmpty()) {
+ // we rely on the fact that the order of facets is the same on all query results
+ if (firstResult.facets() != null && firstResult.facets().facets() != null && !firstResult.facets().facets().isEmpty()) {
+ List<Facet> aggregatedFacets = Lists.newArrayList();
+ List<Facet> namedFacets = Lists.newArrayList();
+ for (Facet facet : firstResult.facets()) {
+ // aggregate each facet name into a single list, and aggregate it
+ namedFacets.clear();
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
+ for (Facet facet1 : entry.value.queryResult().facets()) {
+ if (facet.getName().equals(facet1.getName())) {
+ namedFacets.add(facet1);
+ }
+ }
+ }
+ if (!namedFacets.isEmpty()) {
+ Facet aggregatedFacet = ((InternalFacet) namedFacets.get(0)).reduce(new InternalFacet.ReduceContext(cacheRecycler, namedFacets));
+ aggregatedFacets.add(aggregatedFacet);
+ }
+ }
+ facets = new InternalFacets(aggregatedFacets);
+ }
+ }
+
+ // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
+ long totalHits = 0;
+ float maxScore = Float.NEGATIVE_INFINITY;
+ boolean timedOut = false;
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
+ QuerySearchResult result = entry.value.queryResult();
+ if (result.searchTimedOut()) {
+ timedOut = true;
+ }
+ totalHits += result.topDocs().totalHits;
+ if (!Float.isNaN(result.topDocs().getMaxScore())) {
+ maxScore = Math.max(maxScore, result.topDocs().getMaxScore());
+ }
+ }
+ if (Float.isInfinite(maxScore)) {
+ maxScore = Float.NaN;
+ }
+
+ // clean the fetch counter
+ for (AtomicArray.Entry<? extends FetchSearchResultProvider> entry : fetchResults) {
+ entry.value.fetchResult().initCounter();
+ }
+
+ // merge hits
+ List<InternalSearchHit> hits = new ArrayList<InternalSearchHit>();
+ if (!fetchResults.isEmpty()) {
+ for (ScoreDoc shardDoc : sortedDocs) {
+ FetchSearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
+ if (fetchResultProvider == null) {
+ continue;
+ }
+ FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
+ int index = fetchResult.counterGetAndIncrement();
+ if (index < fetchResult.hits().internalHits().length) {
+ InternalSearchHit searchHit = fetchResult.hits().internalHits()[index];
+ searchHit.score(shardDoc.score);
+ searchHit.shard(fetchResult.shardTarget());
+
+ if (sorted) {
+ FieldDoc fieldDoc = (FieldDoc) shardDoc;
+ searchHit.sortValues(fieldDoc.fields);
+ if (sortScoreIndex != -1) {
+ searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
+ }
+ }
+
+ hits.add(searchHit);
+ }
+ }
+ }
+
+ // merge suggest results
+ Suggest suggest = null;
+ if (!queryResults.isEmpty()) {
+ final Map<String, List<Suggest.Suggestion>> groupedSuggestions = new HashMap<String, List<Suggest.Suggestion>>();
+ boolean hasSuggestions = false;
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
+ Suggest shardResult = entry.value.queryResult().queryResult().suggest();
+
+ if (shardResult == null) {
+ continue;
+ }
+ hasSuggestions = true;
+ Suggest.group(groupedSuggestions, shardResult);
+ }
+
+ suggest = hasSuggestions ? new Suggest(Suggest.Fields.SUGGEST, Suggest.reduce(groupedSuggestions)) : null;
+ }
+
+ // merge addAggregation
+ InternalAggregations aggregations = null;
+ if (!queryResults.isEmpty()) {
+ if (firstResult.aggregations() != null && firstResult.aggregations().asList() != null) {
+ List<InternalAggregations> aggregationsList = new ArrayList<InternalAggregations>(queryResults.size());
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
+ aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations());
+ }
+ aggregations = InternalAggregations.reduce(aggregationsList, cacheRecycler);
+ }
+ }
+
+ InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore);
+
+ return new InternalSearchResponse(searchHits, facets, aggregations, suggest, timedOut);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/controller/ShardFieldDocSortedHitQueue.java b/src/main/java/org/elasticsearch/search/controller/ShardFieldDocSortedHitQueue.java
new file mode 100644
index 0000000..ffbcb0c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/controller/ShardFieldDocSortedHitQueue.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.controller;
+
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.PriorityQueue;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+// LUCENE TRACK, Had to copy over in order ot improve same order tie break to take shards into account
+public class ShardFieldDocSortedHitQueue extends PriorityQueue<FieldDoc> {
+
+ volatile SortField[] fields = null;
+
+ // used in the case where the fields are sorted by locale
+ // based strings
+ //volatile Collator[] collators = null;
+
+ FieldComparator[] comparators = null;
+
+ /**
+ * Creates a hit queue sorted by the given list of fields.
+ *
+ * @param fields Fieldable names, in priority order (highest priority first).
+ * @param size The number of hits to retain. Must be greater than zero.
+ */
+ public ShardFieldDocSortedHitQueue(SortField[] fields, int size) {
+ super(size);
+ setFields(fields);
+ }
+
+
+ /**
+ * Allows redefinition of sort fields if they are <code>null</code>.
+ * This is to handle the case using ParallelMultiSearcher where the
+ * original list contains AUTO and we don't know the actual sort
+ * type until the values come back. The fields can only be set once.
+ * This method should be synchronized external like all other PQ methods.
+ *
+ * @param fields
+ */
+ public void setFields(SortField[] fields) {
+ this.fields = fields;
+ //this.collators = hasCollators(fields);
+ try {
+ comparators = new FieldComparator[fields.length];
+ for (int fieldIDX = 0; fieldIDX < fields.length; fieldIDX++) {
+ comparators[fieldIDX] = fields[fieldIDX].getComparator(1, fieldIDX);
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchIllegalStateException("failed to get comparator", e);
+ }
+ }
+
+
+ /**
+ * Returns the fields being used to sort.
+ */
+ SortField[] getFields() {
+ return fields;
+ }
+
+ /**
+ * Returns whether <code>a</code> is less relevant than <code>b</code>.
+ *
+ * @param docA ScoreDoc
+ * @param docB ScoreDoc
+ * @return <code>true</code> if document <code>a</code> should be sorted after document <code>b</code>.
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ protected final boolean lessThan(final FieldDoc docA, final FieldDoc docB) {
+ final int n = fields.length;
+ int c = 0;
+ for (int i = 0; i < n && c == 0; ++i) {
+ final SortField.Type type = fields[i].getType();
+ if (type == SortField.Type.STRING) {
+ final BytesRef s1 = (BytesRef) docA.fields[i];
+ final BytesRef s2 = (BytesRef) docB.fields[i];
+ // null values need to be sorted first, because of how FieldCache.getStringIndex()
+ // works - in that routine, any documents without a value in the given field are
+ // put first. If both are null, the next SortField is used
+ if (s1 == null) {
+ c = (s2 == null) ? 0 : -1;
+ } else if (s2 == null) {
+ c = 1;
+ } else { //if (fields[i].getLocale() == null) {
+ c = s1.compareTo(s2);
+ }
+// } else {
+// c = collators[i].compare(s1, s2);
+// }
+ } else {
+ c = comparators[i].compareValues(docA.fields[i], docB.fields[i]);
+ }
+ // reverse sort
+ if (fields[i].getReverse()) {
+ c = -c;
+ }
+ }
+
+ // avoid random sort order that could lead to duplicates (bug #31241):
+ if (c == 0) {
+ // CHANGE: Add shard base tie breaking
+ c = docA.shardIndex - docB.shardIndex;
+ if (c == 0) {
+ return docA.doc > docB.doc;
+ }
+ }
+
+ return c > 0;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java b/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java
new file mode 100644
index 0000000..9a817a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.dfs;
+
+
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.CollectionStatistics;
+import org.apache.lucene.search.TermStatistics;
+import org.elasticsearch.common.collect.HppcMaps;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+
+public class AggregatedDfs implements Streamable {
+
+ private ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics;
+ private ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics;
+ private long maxDoc;
+
+ private AggregatedDfs() {
+ }
+
+ public AggregatedDfs(ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics, ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics, long maxDoc) {
+ this.termStatistics = termStatistics;
+ this.fieldStatistics = fieldStatistics;
+ this.maxDoc = maxDoc;
+ }
+
+ public ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics() {
+ return termStatistics;
+ }
+
+ public ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics() {
+ return fieldStatistics;
+ }
+
+ public long maxDoc() {
+ return maxDoc;
+ }
+
+ public static AggregatedDfs readAggregatedDfs(StreamInput in) throws IOException {
+ AggregatedDfs result = new AggregatedDfs();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int size = in.readVInt();
+ termStatistics = HppcMaps.newMap(size);
+ for (int i = 0; i < size; i++) {
+ Term term = new Term(in.readString(), in.readBytesRef());
+ TermStatistics stats = new TermStatistics(in.readBytesRef(),
+ in.readVLong(),
+ DfsSearchResult.subOne(in.readVLong()));
+ termStatistics.put(term, stats);
+ }
+ fieldStatistics = DfsSearchResult.readFieldStats(in);
+ maxDoc = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(final StreamOutput out) throws IOException {
+ out.writeVInt(termStatistics.size());
+ final boolean[] states = termStatistics.allocated;
+ final Object[] keys = termStatistics.keys;
+ final Object[] values = termStatistics.values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ Term term = (Term) keys[i];
+ out.writeString(term.field());
+ out.writeBytesRef(term.bytes());
+ TermStatistics stats = (TermStatistics) values[i];
+ out.writeBytesRef(stats.term());
+ out.writeVLong(stats.docFreq());
+ out.writeVLong(DfsSearchResult.addOne(stats.totalTermFreq()));
+ }
+ }
+ DfsSearchResult.writeFieldStats(out, fieldStatistics);
+ out.writeVLong(maxDoc);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java b/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java
new file mode 100644
index 0000000..70795a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.dfs;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.similarities.Similarity;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class CachedDfSource extends IndexSearcher {
+
+ private final AggregatedDfs aggregatedDfs;
+
+ private final int maxDoc;
+
+ public CachedDfSource(IndexReader reader, AggregatedDfs aggregatedDfs, Similarity similarity) throws IOException {
+ super(reader);
+ this.aggregatedDfs = aggregatedDfs;
+ setSimilarity(similarity);
+ if (aggregatedDfs.maxDoc() > Integer.MAX_VALUE) {
+ maxDoc = Integer.MAX_VALUE;
+ } else {
+ maxDoc = (int) aggregatedDfs.maxDoc();
+ }
+ }
+
+
+ @Override
+ public TermStatistics termStatistics(Term term, TermContext context) throws IOException {
+ TermStatistics termStatistics = aggregatedDfs.termStatistics().get(term);
+ if (termStatistics == null) {
+ // we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
+ return super.termStatistics(term, context);
+ }
+ return termStatistics;
+ }
+
+ @Override
+ public CollectionStatistics collectionStatistics(String field) throws IOException {
+ CollectionStatistics collectionStatistics = aggregatedDfs.fieldStatistics().get(field);
+ if (collectionStatistics == null) {
+ // we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
+ return super.collectionStatistics(field);
+ }
+ return collectionStatistics;
+ }
+
+ public int maxDoc() {
+ return this.maxDoc;
+ }
+
+ public Query rewrite(Query query) {
+ // this is a bit of a hack. We know that a query which
+ // creates a Weight based on this Dummy-Searcher is
+ // always already rewritten (see preparedWeight()).
+ // Therefore we just return the unmodified query here
+ return query;
+ }
+
+ public Document doc(int i) {
+ throw new UnsupportedOperationException();
+ }
+
+ public void doc(int docID, StoredFieldVisitor fieldVisitor) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ public Explanation explain(Weight weight, int doc) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected void search(List<AtomicReaderContext> leaves, Weight weight, Collector collector) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected TopDocs search(Weight weight, ScoreDoc after, int nDocs) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected TopDocs search(List<AtomicReaderContext> leaves, Weight weight, ScoreDoc after, int nDocs) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected TopFieldDocs search(Weight weight, int nDocs, Sort sort, boolean doDocScores, boolean doMaxScore) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected TopFieldDocs search(Weight weight, FieldDoc after, int nDocs, Sort sort, boolean fillFields, boolean doDocScores, boolean doMaxScore) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected TopFieldDocs search(List<AtomicReaderContext> leaves, Weight weight, FieldDoc after, int nDocs, Sort sort, boolean fillFields, boolean doDocScores, boolean doMaxScore) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java
new file mode 100644
index 0000000..635a927
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.dfs;
+
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.search.CollectionStatistics;
+import org.apache.lucene.search.TermStatistics;
+import org.elasticsearch.common.collect.HppcMaps;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchPhase;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.AbstractSet;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ *
+ */
+public class DfsPhase implements SearchPhase {
+
+ private static ThreadLocal<ObjectOpenHashSet<Term>> cachedTermsSet = new ThreadLocal<ObjectOpenHashSet<Term>>() {
+ @Override
+ protected ObjectOpenHashSet<Term> initialValue() {
+ return new ObjectOpenHashSet<Term>();
+ }
+ };
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ return ImmutableMap.of();
+ }
+
+ @Override
+ public void preProcess(SearchContext context) {
+ }
+
+ public void execute(SearchContext context) {
+ final ObjectOpenHashSet<Term> termsSet = cachedTermsSet.get();
+ try {
+ if (!context.queryRewritten()) {
+ context.updateRewriteQuery(context.searcher().rewrite(context.query()));
+ }
+
+ if (!termsSet.isEmpty()) {
+ termsSet.clear();
+ }
+ context.query().extractTerms(new DelegateSet(termsSet));
+ if (context.rescore() != null) {
+ context.rescore().rescorer().extractTerms(context, context.rescore(), new DelegateSet(termsSet));
+ }
+
+ Term[] terms = termsSet.toArray(Term.class);
+ TermStatistics[] termStatistics = new TermStatistics[terms.length];
+ IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext();
+ for (int i = 0; i < terms.length; i++) {
+ // LUCENE 4 UPGRADE: cache TermContext?
+ TermContext termContext = TermContext.build(indexReaderContext, terms[i]);
+ termStatistics[i] = context.searcher().termStatistics(terms[i], termContext);
+ }
+
+ ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
+ for (Term term : terms) {
+ assert term.field() != null : "field is null";
+ if (!fieldStatistics.containsKey(term.field())) {
+ final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field());
+ fieldStatistics.put(term.field(), collectionStatistics);
+ }
+ }
+
+ context.dfsResult().termsStatistics(terms, termStatistics)
+ .fieldStatistics(fieldStatistics)
+ .maxDoc(context.searcher().getIndexReader().maxDoc());
+ } catch (Exception e) {
+ throw new DfsPhaseExecutionException(context, "Exception during dfs phase", e);
+ } finally {
+ termsSet.clear(); // don't hold on to terms
+ }
+ }
+
+ // We need to bridge to JCF world, b/c of Query#extractTerms
+ private static class DelegateSet extends AbstractSet<Term> {
+
+ private final ObjectOpenHashSet<Term> delegate;
+
+ private DelegateSet(ObjectOpenHashSet<Term> delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public boolean add(Term term) {
+ return delegate.add(term);
+ }
+
+ @Override
+ public boolean addAll(Collection<? extends Term> terms) {
+ boolean result = false;
+ for (Term term : terms) {
+ result = delegate.add(term);
+ }
+ return result;
+ }
+
+ @Override
+ public Iterator<Term> iterator() {
+ final Iterator<ObjectCursor<Term>> iterator = delegate.iterator();
+ return new Iterator<Term>() {
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public Term next() {
+ return iterator.next().value;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
+
+ @Override
+ public int size() {
+ return delegate.size();
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java b/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java
new file mode 100644
index 0000000..2490c8b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.dfs;
+
+import org.elasticsearch.search.SearchContextException;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class DfsPhaseExecutionException extends SearchContextException {
+
+ public DfsPhaseExecutionException(SearchContext context, String msg, Throwable t) {
+ super(context, "Dfs Failed [" + msg + "]", t);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java
new file mode 100644
index 0000000..4456bea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.dfs;
+
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.CollectionStatistics;
+import org.apache.lucene.search.TermStatistics;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.collect.HppcMaps;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.SearchPhaseResult;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class DfsSearchResult extends TransportResponse implements SearchPhaseResult {
+
+ private static final Term[] EMPTY_TERMS = new Term[0];
+ private static final TermStatistics[] EMPTY_TERM_STATS = new TermStatistics[0];
+
+ private SearchShardTarget shardTarget;
+ private long id;
+ private Term[] terms;
+ private TermStatistics[] termStatistics;
+ private ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
+ private int maxDoc;
+
+ public DfsSearchResult() {
+
+ }
+
+ public DfsSearchResult(long id, SearchShardTarget shardTarget) {
+ this.id = id;
+ this.shardTarget = shardTarget;
+ }
+
+ public long id() {
+ return this.id;
+ }
+
+ public SearchShardTarget shardTarget() {
+ return shardTarget;
+ }
+
+ @Override
+ public void shardTarget(SearchShardTarget shardTarget) {
+ this.shardTarget = shardTarget;
+ }
+
+ public DfsSearchResult maxDoc(int maxDoc) {
+ this.maxDoc = maxDoc;
+ return this;
+ }
+
+ public int maxDoc() {
+ return maxDoc;
+ }
+
+ public DfsSearchResult termsStatistics(Term[] terms, TermStatistics[] termStatistics) {
+ this.terms = terms;
+ this.termStatistics = termStatistics;
+ return this;
+ }
+
+ public DfsSearchResult fieldStatistics(ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics) {
+ this.fieldStatistics = fieldStatistics;
+ return this;
+ }
+
+ public Term[] terms() {
+ return terms;
+ }
+
+ public TermStatistics[] termStatistics() {
+ return termStatistics;
+ }
+
+ public ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics() {
+ return fieldStatistics;
+ }
+
+ public static DfsSearchResult readDfsSearchResult(StreamInput in) throws IOException, ClassNotFoundException {
+ DfsSearchResult result = new DfsSearchResult();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ int termsSize = in.readVInt();
+ if (termsSize == 0) {
+ terms = EMPTY_TERMS;
+ } else {
+ terms = new Term[termsSize];
+ for (int i = 0; i < terms.length; i++) {
+ terms[i] = new Term(in.readString(), in.readBytesRef());
+ }
+ }
+ this.termStatistics = readTermStats(in, terms);
+ readFieldStats(in, fieldStatistics);
+
+
+ maxDoc = in.readVInt();
+ }
+
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ out.writeVInt(terms.length);
+ for (Term term : terms) {
+ out.writeString(term.field());
+ out.writeBytesRef(term.bytes());
+ }
+ writeTermStats(out, termStatistics);
+ writeFieldStats(out, fieldStatistics);
+ out.writeVInt(maxDoc);
+ }
+
+ public static void writeFieldStats(StreamOutput out, ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics) throws IOException {
+ out.writeVInt(fieldStatistics.size());
+ final boolean[] states = fieldStatistics.allocated;
+ Object[] keys = fieldStatistics.keys;
+ Object[] values = fieldStatistics.values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ out.writeString((String) keys[i]);
+ CollectionStatistics statistics = (CollectionStatistics) values[i];
+ assert statistics.maxDoc() >= 0;
+ out.writeVLong(statistics.maxDoc());
+ out.writeVLong(addOne(statistics.docCount()));
+ out.writeVLong(addOne(statistics.sumTotalTermFreq()));
+ out.writeVLong(addOne(statistics.sumDocFreq()));
+ }
+ }
+ }
+
+ public static void writeTermStats(StreamOutput out, TermStatistics[] termStatistics) throws IOException {
+ out.writeVInt(termStatistics.length);
+ for (TermStatistics termStatistic : termStatistics) {
+ writeSingleTermStats(out, termStatistic);
+ }
+ }
+
+ public static void writeSingleTermStats(StreamOutput out, TermStatistics termStatistic) throws IOException {
+ assert termStatistic.docFreq() >= 0;
+ out.writeVLong(termStatistic.docFreq());
+ out.writeVLong(addOne(termStatistic.totalTermFreq()));
+ }
+
+ public static ObjectObjectOpenHashMap<String, CollectionStatistics> readFieldStats(StreamInput in) throws IOException {
+ return readFieldStats(in, null);
+ }
+
+ public static ObjectObjectOpenHashMap<String, CollectionStatistics> readFieldStats(StreamInput in, ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics) throws IOException {
+ final int numFieldStatistics = in.readVInt();
+ if (fieldStatistics == null) {
+ fieldStatistics = HppcMaps.newNoNullKeysMap(numFieldStatistics);
+ }
+ for (int i = 0; i < numFieldStatistics; i++) {
+ final String field = in.readString();
+ assert field != null;
+ final long maxDoc = in.readVLong();
+ final long docCount = subOne(in.readVLong());
+ final long sumTotalTermFreq = subOne(in.readVLong());
+ final long sumDocFreq = subOne(in.readVLong());
+ CollectionStatistics stats = new CollectionStatistics(field, maxDoc, docCount, sumTotalTermFreq, sumDocFreq);
+ fieldStatistics.put(field, stats);
+ }
+ return fieldStatistics;
+ }
+
+ public static TermStatistics[] readTermStats(StreamInput in, Term[] terms) throws IOException {
+ int termsStatsSize = in.readVInt();
+ final TermStatistics[] termStatistics;
+ if (termsStatsSize == 0) {
+ termStatistics = EMPTY_TERM_STATS;
+ } else {
+ termStatistics = new TermStatistics[termsStatsSize];
+ assert terms.length == termsStatsSize;
+ for (int i = 0; i < termStatistics.length; i++) {
+ BytesRef term = terms[i].bytes();
+ final long docFreq = in.readVLong();
+ assert docFreq >= 0;
+ final long totalTermFreq = subOne(in.readVLong());
+ termStatistics[i] = new TermStatistics(term, docFreq, totalTermFreq);
+ }
+ }
+ return termStatistics;
+ }
+
+
+ /*
+ * optional statistics are set to -1 in lucene by default.
+ * Since we are using var longs to encode values we add one to each value
+ * to ensure we don't waste space and don't add negative values.
+ */
+ public static long addOne(long value) {
+ assert value + 1 >= 0;
+ return value + 1;
+ }
+
+
+ /*
+ * See #addOne this just subtracting one and asserts that the actual value
+ * is positive.
+ */
+ public static long subOne(long value) {
+ assert value >= 0;
+ return value - 1;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/DoubleFacetAggregatorBase.java b/src/main/java/org/elasticsearch/search/facet/DoubleFacetAggregatorBase.java
new file mode 100644
index 0000000..4f00ddf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/DoubleFacetAggregatorBase.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import org.elasticsearch.index.fielddata.DoubleValues;
+
+/**
+ * Simple Facet aggregator base class for {@link DoubleValues}
+ */
+public abstract class DoubleFacetAggregatorBase {
+ private int total;
+ private int missing;
+
+ public void onDoc(int docId, DoubleValues values) {
+ int numValues = values.setDocument(docId);
+ int tempMissing = 1;
+ for (int i = 0; i < numValues; i++) {
+ tempMissing = 0;
+ onValue(docId, values.nextValue());
+ total++;
+ }
+ missing += tempMissing;
+ }
+
+ protected abstract void onValue(int docId, double next);
+
+ public final int total() {
+ return total;
+ }
+
+ public final int missing() {
+ return missing;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/Facet.java b/src/main/java/org/elasticsearch/search/facet/Facet.java
new file mode 100644
index 0000000..eb728b8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/Facet.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+/**
+ * A search facet.
+ */
+public interface Facet {
+
+ /**
+ * The "logical" name of the search facet.
+ */
+ String getName();
+
+ /**
+ * The type of the facet.
+ */
+ String getType();
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetBinaryParseElement.java b/src/main/java/org/elasticsearch/search/facet/FacetBinaryParseElement.java
new file mode 100644
index 0000000..49e9a1d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetBinaryParseElement.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class FacetBinaryParseElement extends FacetParseElement {
+
+ @Inject
+ public FacetBinaryParseElement(FacetParsers facetParsers) {
+ super(facetParsers);
+ }
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ byte[] facetSource = parser.binaryValue();
+ XContentParser fSourceParser = XContentFactory.xContent(facetSource).createParser(facetSource);
+ try {
+ fSourceParser.nextToken(); // move past the first START_OBJECT
+ super.parse(fSourceParser, context);
+ } finally {
+ fSourceParser.close();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/FacetBuilder.java
new file mode 100644
index 0000000..eaf1f8d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetBuilder.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public abstract class FacetBuilder implements ToXContent {
+
+ public static enum Mode {
+ COLLECTOR() {
+ @Override
+ public String toString() {
+ return "collector";
+ }
+ },
+ POST() {
+ @Override
+ public String toString() {
+ return "post";
+ }
+ };
+
+ public abstract String toString();
+ }
+
+ protected final String name;
+ protected FilterBuilder facetFilter;
+ protected Boolean global;
+ protected String nested;
+ protected Mode mode;
+
+ protected FacetBuilder(String name) {
+ this.name = name;
+ }
+
+ public FacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public FacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ /**
+ * Marks the facet to run in a global scope, not bounded by any query.
+ */
+ public FacetBuilder global(boolean global) {
+ this.global = global;
+ return this;
+ }
+
+ public FacetBuilder mode(Mode mode) {
+ this.mode = mode;
+ return this;
+ }
+
+ protected void addFilterFacetAndGlobal(XContentBuilder builder, Params params) throws IOException {
+ if (facetFilter != null) {
+ builder.field("facet_filter");
+ facetFilter.toXContent(builder, params);
+ }
+ if (nested != null) {
+ builder.field("nested", nested);
+ }
+ if (global != null) {
+ builder.field("global", global);
+ }
+ if (mode != null) {
+ builder.field("mode", mode.toString());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetBuilders.java b/src/main/java/org/elasticsearch/search/facet/FacetBuilders.java
new file mode 100644
index 0000000..dc32ccd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetBuilders.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.facet.datehistogram.DateHistogramFacetBuilder;
+import org.elasticsearch.search.facet.filter.FilterFacetBuilder;
+import org.elasticsearch.search.facet.geodistance.GeoDistanceFacetBuilder;
+import org.elasticsearch.search.facet.histogram.HistogramFacetBuilder;
+import org.elasticsearch.search.facet.histogram.HistogramScriptFacetBuilder;
+import org.elasticsearch.search.facet.query.QueryFacetBuilder;
+import org.elasticsearch.search.facet.range.RangeFacetBuilder;
+import org.elasticsearch.search.facet.range.RangeScriptFacetBuilder;
+import org.elasticsearch.search.facet.statistical.StatisticalFacetBuilder;
+import org.elasticsearch.search.facet.statistical.StatisticalScriptFacetBuilder;
+import org.elasticsearch.search.facet.terms.TermsFacetBuilder;
+import org.elasticsearch.search.facet.termsstats.TermsStatsFacetBuilder;
+
+/**
+ *
+ */
+public class FacetBuilders {
+
+ public static QueryFacetBuilder queryFacet(String facetName) {
+ return new QueryFacetBuilder(facetName);
+ }
+
+ public static QueryFacetBuilder queryFacet(String facetName, QueryBuilder query) {
+ return new QueryFacetBuilder(facetName).query(query);
+ }
+
+ public static FilterFacetBuilder filterFacet(String facetName) {
+ return new FilterFacetBuilder(facetName);
+ }
+
+ public static FilterFacetBuilder filterFacet(String facetName, FilterBuilder filter) {
+ return new FilterFacetBuilder(facetName).filter(filter);
+ }
+
+ public static TermsFacetBuilder termsFacet(String facetName) {
+ return new TermsFacetBuilder(facetName);
+ }
+
+ public static TermsStatsFacetBuilder termsStatsFacet(String facetName) {
+ return new TermsStatsFacetBuilder(facetName);
+ }
+
+ public static StatisticalFacetBuilder statisticalFacet(String facetName) {
+ return new StatisticalFacetBuilder(facetName);
+ }
+
+ public static StatisticalScriptFacetBuilder statisticalScriptFacet(String facetName) {
+ return new StatisticalScriptFacetBuilder(facetName);
+ }
+
+ public static HistogramFacetBuilder histogramFacet(String facetName) {
+ return new HistogramFacetBuilder(facetName);
+ }
+
+ public static DateHistogramFacetBuilder dateHistogramFacet(String facetName) {
+ return new DateHistogramFacetBuilder(facetName);
+ }
+
+ public static HistogramScriptFacetBuilder histogramScriptFacet(String facetName) {
+ return new HistogramScriptFacetBuilder(facetName);
+ }
+
+ public static RangeFacetBuilder rangeFacet(String facetName) {
+ return new RangeFacetBuilder(facetName);
+ }
+
+ public static RangeScriptFacetBuilder rangeScriptFacet(String facetName) {
+ return new RangeScriptFacetBuilder(facetName);
+ }
+
+ public static GeoDistanceFacetBuilder geoDistanceFacet(String facetName) {
+ return new GeoDistanceFacetBuilder(facetName);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/FacetExecutor.java
new file mode 100644
index 0000000..73c2f8f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetExecutor.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.common.lucene.docset.AndDocIdSet;
+import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
+import org.elasticsearch.common.lucene.search.XCollector;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A facet processor ends up actually executing the relevant facet for a specific
+ * search request.
+ * <p/>
+ * The facet executor requires at least the {@link #collector()} method to be implemented,
+ * with an optional {@link #post()} implementation if specific optimizations can be done.
+ */
+public abstract class FacetExecutor {
+
+ /**
+ * A post class extends this class to implement post hits processing.
+ */
+ public static abstract class Post {
+
+ public abstract void executePost(List<ContextDocIdSet> docSets) throws IOException;
+
+ /**
+ * A filtered post execution.
+ */
+ public static class Filtered extends Post {
+
+ private final Post post;
+ private final Filter filter;
+
+ public Filtered(Post post, Filter filter) {
+ this.post = post;
+ this.filter = filter;
+ }
+
+ @Override
+ public void executePost(List<ContextDocIdSet> docSets) throws IOException {
+ List<ContextDocIdSet> filteredEntries = new ArrayList<ContextDocIdSet>(docSets.size());
+ for (int i = 0; i < docSets.size(); i++) {
+ ContextDocIdSet entry = docSets.get(i);
+ DocIdSet filteredSet = filter.getDocIdSet(entry.context, null);
+ if (filteredSet != null) {
+ filteredEntries.add(new ContextDocIdSet(
+ entry.context,
+ // TODO: can we be smart here, maybe AndDocIdSet is not always fastest?
+ new AndDocIdSet(new DocIdSet[]{entry.docSet, filteredSet})
+ ));
+ }
+ }
+ post.executePost(filteredEntries);
+ }
+ }
+
+ /**
+ * A {@link FacetExecutor.Collector} based post.
+ */
+ public static class Collector extends Post {
+
+ private final FacetExecutor.Collector collector;
+
+ public Collector(FacetExecutor.Collector collector) {
+ this.collector = collector;
+ }
+
+ @Override
+ public void executePost(List<ContextDocIdSet> docSets) throws IOException {
+ for (int i = 0; i < docSets.size(); i++) {
+ ContextDocIdSet docSet = docSets.get(i);
+ collector.setNextReader(docSet.context);
+ DocIdSetIterator it = docSet.docSet.iterator();
+ int doc;
+ while ((doc = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+ collector.collect(doc);
+ }
+ }
+ collector.postCollection();
+ }
+ }
+ }
+
+ /**
+ * Simple extension to {@link XCollector} that implements methods that are typically
+ * not needed when doing collector based faceting.
+ */
+ public static abstract class Collector extends XCollector {
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ @Override
+ public abstract void postCollection();
+ }
+
+ /**
+ * The mode of the execution.
+ */
+ public static enum Mode {
+ /**
+ * Collector mode, maps to {@link #collector()}.
+ */
+ COLLECTOR,
+ /**
+ * Post mode, maps to {@link #post()}.
+ */
+ POST
+ }
+
+ /**
+ * Builds the facet.
+ */
+ public abstract InternalFacet buildFacet(String facetName);
+
+ /**
+ * A collector based facet implementation, collection the facet as hits match.
+ */
+ public abstract Collector collector();
+
+ /**
+ * A post based facet that executes the facet using the aggregated docs. By default
+ * uses the {@link Post.Collector} based implementation.
+ * <p/>
+ * Can be overridden if a more optimized non collector based implementation can be implemented.
+ */
+ public Post post() {
+ return new Post.Collector(collector());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetModule.java b/src/main/java/org/elasticsearch/search/facet/FacetModule.java
new file mode 100644
index 0000000..0713557
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetModule.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.search.facet.datehistogram.DateHistogramFacetParser;
+import org.elasticsearch.search.facet.filter.FilterFacetParser;
+import org.elasticsearch.search.facet.geodistance.GeoDistanceFacetParser;
+import org.elasticsearch.search.facet.histogram.HistogramFacetParser;
+import org.elasticsearch.search.facet.query.QueryFacetParser;
+import org.elasticsearch.search.facet.range.RangeFacetParser;
+import org.elasticsearch.search.facet.statistical.StatisticalFacetParser;
+import org.elasticsearch.search.facet.terms.TermsFacetParser;
+import org.elasticsearch.search.facet.termsstats.TermsStatsFacetParser;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class FacetModule extends AbstractModule {
+
+ private List<Class<? extends FacetParser>> processors = Lists.newArrayList();
+
+ public FacetModule() {
+ processors.add(FilterFacetParser.class);
+ processors.add(QueryFacetParser.class);
+ processors.add(GeoDistanceFacetParser.class);
+ processors.add(HistogramFacetParser.class);
+ processors.add(DateHistogramFacetParser.class);
+ processors.add(RangeFacetParser.class);
+ processors.add(StatisticalFacetParser.class);
+ processors.add(TermsFacetParser.class);
+ processors.add(TermsStatsFacetParser.class);
+ }
+
+ public void addFacetProcessor(Class<? extends FacetParser> facetProcessor) {
+ processors.add(facetProcessor);
+ }
+
+ @Override
+ protected void configure() {
+ Multibinder<FacetParser> multibinder = Multibinder.newSetBinder(binder(), FacetParser.class);
+ for (Class<? extends FacetParser> processor : processors) {
+ multibinder.addBinding().to(processor);
+ }
+ bind(FacetParsers.class).asEagerSingleton();
+ bind(FacetParseElement.class).asEagerSingleton();
+ bind(FacetPhase.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetParseElement.java b/src/main/java/org/elasticsearch/search/facet/FacetParseElement.java
new file mode 100644
index 0000000..bd33347
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetParseElement.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.facet.nested.NestedFacetExecutor;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * <pre>
+ * facets : {
+ * facet1: {
+ * query : { ... },
+ * global : false
+ * },
+ * facet2: {
+ * terms : {
+ * name : "myfield",
+ * size : 12
+ * },
+ * global : false
+ * }
+ * }
+ * </pre>
+ */
+public class FacetParseElement implements SearchParseElement {
+
+ private final FacetParsers facetParsers;
+
+ @Inject
+ public FacetParseElement(FacetParsers facetParsers) {
+ this.facetParsers = facetParsers;
+ }
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token;
+
+ List<SearchContextFacets.Entry> entries = new ArrayList<SearchContextFacets.Entry>();
+
+ String facetName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ facetName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ FacetExecutor facetExecutor = null;
+ boolean global = false;
+ FacetExecutor.Mode defaultMainMode = null;
+ FacetExecutor.Mode defaultGlobalMode = null;
+ FacetExecutor.Mode mode = null;
+ Filter filter = null;
+ boolean cacheFilter = false;
+ String nestedPath = null;
+
+ String fieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("facet_filter".equals(fieldName) || "facetFilter".equals(fieldName)) {
+ ParsedFilter parsedFilter = context.queryParserService().parseInnerFilter(parser);
+ filter = parsedFilter == null ? null : parsedFilter.filter();
+ } else {
+ FacetParser facetParser = facetParsers.parser(fieldName);
+ if (facetParser == null) {
+ throw new SearchParseException(context, "No facet type found for [" + fieldName + "]");
+ }
+ facetExecutor = facetParser.parse(facetName, parser, context);
+ defaultMainMode = facetParser.defaultMainMode();
+ defaultGlobalMode = facetParser.defaultGlobalMode();
+ }
+ } else if (token.isValue()) {
+ if ("global".equals(fieldName)) {
+ global = parser.booleanValue();
+ } else if ("mode".equals(fieldName)) {
+ String modeAsText = parser.text();
+ if ("collector".equals(modeAsText)) {
+ mode = FacetExecutor.Mode.COLLECTOR;
+ } else if ("post".equals(modeAsText)) {
+ mode = FacetExecutor.Mode.POST;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("failed to parse facet mode [" + modeAsText + "]");
+ }
+ } else if ("scope".equals(fieldName) || "_scope".equals(fieldName)) {
+ throw new SearchParseException(context, "the [scope] support in facets have been removed");
+ } else if ("cache_filter".equals(fieldName) || "cacheFilter".equals(fieldName)) {
+ cacheFilter = parser.booleanValue();
+ } else if ("nested".equals(fieldName)) {
+ nestedPath = parser.text();
+ }
+ }
+ }
+
+ if (filter != null) {
+ if (cacheFilter) {
+ filter = context.filterCache().cache(filter);
+ }
+ }
+
+ if (facetExecutor == null) {
+ throw new SearchParseException(context, "no facet type found for facet named [" + facetName + "]");
+ }
+
+ if (nestedPath != null) {
+ facetExecutor = new NestedFacetExecutor(facetExecutor, context, nestedPath);
+ }
+
+ if (mode == null) {
+ mode = global ? defaultGlobalMode : defaultMainMode;
+ }
+ entries.add(new SearchContextFacets.Entry(facetName, mode, facetExecutor, global, filter));
+ }
+ }
+
+ context.facets(new SearchContextFacets(entries));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetParser.java b/src/main/java/org/elasticsearch/search/facet/FacetParser.java
new file mode 100644
index 0000000..ff630e0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetParser.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ * A facet parser parses the relevant matching "type" of facet into a {@link FacetExecutor}.
+ * <p/>
+ * The parser also suggest the default {@link FacetExecutor.Mode} both for global and main executions.
+ */
+public interface FacetParser {
+
+ /**
+ * The type of the facet, for example, terms.
+ */
+ String[] types();
+
+ /**
+ * The default mode to use when executed as a "main" (query level) facet.
+ */
+ FacetExecutor.Mode defaultMainMode();
+
+ /**
+ * The default mode to use when executed as a "global" (all docs) facet.
+ */
+ FacetExecutor.Mode defaultGlobalMode();
+
+ /**
+ * Parses the facet into a {@link FacetExecutor}.
+ */
+ FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetParsers.java b/src/main/java/org/elasticsearch/search/facet/FacetParsers.java
new file mode 100644
index 0000000..fbe3fe1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetParsers.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+
+import java.util.Set;
+
+/**
+ *
+ */
+public class FacetParsers {
+
+ private final ImmutableMap<String, FacetParser> parsers;
+
+ @Inject
+ public FacetParsers(Set<FacetParser> parsers) {
+ MapBuilder<String, FacetParser> builder = MapBuilder.newMapBuilder();
+ for (FacetParser parser : parsers) {
+ for (String type : parser.types()) {
+ builder.put(type, parser);
+ }
+ }
+ this.parsers = builder.immutableMap();
+ }
+
+ public FacetParser parser(String type) {
+ return parsers.get(type);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetPhase.java b/src/main/java/org/elasticsearch/search/facet/FacetPhase.java
new file mode 100644
index 0000000..b038ca7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetPhase.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.*;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.docset.AllDocIdSet;
+import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
+import org.elasticsearch.common.lucene.search.*;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchPhase;
+import org.elasticsearch.search.facet.nested.NestedFacetExecutor;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.query.QueryPhaseExecutionException;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class FacetPhase implements SearchPhase {
+
+ private final FacetParseElement facetParseElement;
+
+ private final FacetBinaryParseElement facetBinaryParseElement;
+
+ @Inject
+ public FacetPhase(FacetParseElement facetParseElement, FacetBinaryParseElement facetBinaryParseElement) {
+ this.facetParseElement = facetParseElement;
+ this.facetBinaryParseElement = facetBinaryParseElement;
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ return ImmutableMap.of("facets", facetParseElement, "facets_binary", facetBinaryParseElement, "facetsBinary", facetBinaryParseElement);
+ }
+
+ @Override
+ public void preProcess(SearchContext context) {
+ if (context.facets() != null && context.facets().hasQuery()) {
+ for (SearchContextFacets.Entry entry : context.facets().entries()) {
+ if (entry.isGlobal()) {
+ continue;
+ }
+ if (entry.getMode() == FacetExecutor.Mode.COLLECTOR) {
+ // TODO: We can pass the filter as param to collector method, then this filter wrapper logic can
+ // be moved to NestedFacetExecutor impl, the other implementations would just wrap it into
+ // FilteredCollector.
+ Collector collector = entry.getFacetExecutor().collector();
+
+ if (entry.getFilter() != null) {
+ if (collector instanceof NestedFacetExecutor.Collector) {
+ // We get rootDoc ids as hits in the collect method, so we need to first translate from
+ // rootDoc hit to nested doc hit and then apply filter.
+ collector = new NestedFacetExecutor.Collector((NestedFacetExecutor.Collector) collector, entry.getFilter());
+ // If we would first apply the filter on the rootDoc level and then translate it back to the
+ // nested docs we ignore the facet filter and all nested docs are passed to facet collector
+ } else {
+ collector = new FilteredCollector(collector, entry.getFilter());
+ }
+ }
+ context.searcher().addMainQueryCollector(collector);
+ } else if (entry.getMode() == FacetExecutor.Mode.POST) {
+ context.searcher().enableMainDocIdSetCollector();
+ } else {
+ throw new ElasticsearchIllegalStateException("what mode?");
+ }
+ }
+ }
+ }
+
+ @Override
+ public void execute(SearchContext context) throws ElasticsearchException {
+ if (context.facets() == null) {
+ return;
+ }
+
+ if (context.queryResult().facets() != null) {
+ // no need to compute the facets twice, they should be computed on a per context basis
+ return;
+ }
+
+ Map<Filter, List<Collector>> filtersByCollector = null;
+ List<ContextDocIdSet> globalDocSets = null;
+ for (SearchContextFacets.Entry entry : context.facets().entries()) {
+ if (!entry.isGlobal()) {
+ if (entry.getMode() == FacetExecutor.Mode.POST) {
+ FacetExecutor.Post post = entry.getFacetExecutor().post();
+ if (entry.getFilter() != null) {
+ if (post instanceof NestedFacetExecutor.Post) {
+ post = new NestedFacetExecutor.Post((NestedFacetExecutor.Post) post, entry.getFilter());
+ } else {
+ post = new FacetExecutor.Post.Filtered(post, entry.getFilter());
+ }
+ }
+ try {
+ post.executePost(context.searcher().mainDocIdSetCollector().docSets());
+ } catch (Exception e) {
+ throw new QueryPhaseExecutionException(context, "failed to execute facet [" + entry.getFacetName() + "]", e);
+ }
+ }
+ } else {
+ if (entry.getMode() == FacetExecutor.Mode.POST) {
+ if (globalDocSets == null) {
+ // build global post entries, map a reader context to a live docs docIdSet
+ List<AtomicReaderContext> leaves = context.searcher().getIndexReader().leaves();
+ globalDocSets = new ArrayList<ContextDocIdSet>(leaves.size());
+ for (AtomicReaderContext leaf : leaves) {
+ globalDocSets.add(new ContextDocIdSet(
+ leaf,
+ BitsFilteredDocIdSet.wrap(new AllDocIdSet(leaf.reader().maxDoc()), leaf.reader().getLiveDocs())) // need to only include live docs
+ );
+ }
+ }
+ try {
+ FacetExecutor.Post post = entry.getFacetExecutor().post();
+ if (entry.getFilter() != null) {
+ if (post instanceof NestedFacetExecutor.Post) {
+ post = new NestedFacetExecutor.Post((NestedFacetExecutor.Post) post, entry.getFilter());
+ } else {
+ post = new FacetExecutor.Post.Filtered(post, entry.getFilter());
+ }
+ }
+ post.executePost(globalDocSets);
+ } catch (Exception e) {
+ throw new QueryPhaseExecutionException(context, "Failed to execute facet [" + entry.getFacetName() + "]", e);
+ }
+ } else if (entry.getMode() == FacetExecutor.Mode.COLLECTOR) {
+ Filter filter = Queries.MATCH_ALL_FILTER;
+ Collector collector = entry.getFacetExecutor().collector();
+ if (entry.getFilter() != null) {
+ if (collector instanceof NestedFacetExecutor.Collector) {
+ collector = new NestedFacetExecutor.Collector((NestedFacetExecutor.Collector) collector, entry.getFilter());
+ } else {
+ collector = new FilteredCollector(collector, entry.getFilter());
+ }
+ }
+ if (filtersByCollector == null) {
+ filtersByCollector = Maps.newHashMap();
+ }
+ List<Collector> list = filtersByCollector.get(filter);
+ if (list == null) {
+ list = new ArrayList<Collector>();
+ filtersByCollector.put(filter, list);
+ }
+ list.add(collector);
+ }
+ }
+ }
+
+ // optimize the global collector based execution
+ if (filtersByCollector != null) {
+ // now, go and execute the filters->collector ones
+ for (Map.Entry<Filter, List<Collector>> entry : filtersByCollector.entrySet()) {
+ Filter filter = entry.getKey();
+ Query query = new XConstantScoreQuery(filter);
+ Filter searchFilter = context.searchFilter(context.types());
+ if (searchFilter != null) {
+ query = new XFilteredQuery(query, searchFilter);
+ }
+ try {
+ context.searcher().search(query, MultiCollector.wrap(entry.getValue().toArray(new Collector[entry.getValue().size()])));
+ } catch (Exception e) {
+ throw new QueryPhaseExecutionException(context, "Failed to execute global facets", e);
+ }
+ for (Collector collector : entry.getValue()) {
+ if (collector instanceof XCollector) {
+ ((XCollector) collector).postCollection();
+ }
+ }
+ }
+ }
+
+ List<Facet> facets = new ArrayList<Facet>(context.facets().entries().size());
+ for (SearchContextFacets.Entry entry : context.facets().entries()) {
+ facets.add(entry.getFacetExecutor().buildFacet(entry.getFacetName()));
+ }
+ context.queryResult().facets(new InternalFacets(facets));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/FacetPhaseExecutionException.java b/src/main/java/org/elasticsearch/search/facet/FacetPhaseExecutionException.java
new file mode 100644
index 0000000..70176be
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/FacetPhaseExecutionException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class FacetPhaseExecutionException extends ElasticsearchException {
+
+ public FacetPhaseExecutionException(String facetName, String msg) {
+ super("Facet [" + facetName + "]: " + msg);
+ }
+
+ public FacetPhaseExecutionException(String facetName, String msg, Throwable t) {
+ super("Facet [" + facetName + "]: " + msg, t);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/Facets.java b/src/main/java/org/elasticsearch/search/facet/Facets.java
new file mode 100644
index 0000000..db3d270
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/Facets.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Facets of search action.
+ *
+ *
+ */
+public interface Facets extends Iterable<Facet> {
+
+ /**
+ * The list of {@link Facet}s.
+ */
+ List<Facet> facets();
+
+ /**
+ * Returns the {@link Facet}s keyed by facet name.
+ */
+ Map<String, Facet> getFacets();
+
+ /**
+ * Returns the {@link Facet}s keyed by facet name.
+ */
+ Map<String, Facet> facetsAsMap();
+
+ /**
+ * Returns the facet by name already casted to the specified type.
+ */
+ <T extends Facet> T facet(Class<T> facetType, String name);
+
+ /**
+ * A facet of the specified name.
+ */
+ <T extends Facet> T facet(String name);
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/InternalFacet.java b/src/main/java/org/elasticsearch/search/facet/InternalFacet.java
new file mode 100644
index 0000000..92d2cd4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/InternalFacet.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public abstract class InternalFacet implements Facet, Streamable, ToXContent {
+
+ public static class ReduceContext {
+ private final CacheRecycler cacheRecycler;
+ private final List<Facet> facets;
+
+ public ReduceContext(CacheRecycler cacheRecycler, List<Facet> facets) {
+ this.cacheRecycler = cacheRecycler;
+ this.facets = facets;
+ }
+
+ public CacheRecycler cacheRecycler() {
+ return cacheRecycler;
+ }
+
+ public List<Facet> facets() {
+ return facets;
+ }
+ }
+
+ private String facetName;
+
+ /**
+ * Here just for streams...
+ */
+ protected InternalFacet() {
+
+ }
+
+ protected InternalFacet(String facetName) {
+ this.facetName = facetName;
+ }
+
+ public abstract BytesReference streamType();
+
+ public abstract Facet reduce(ReduceContext context);
+
+ public static interface Stream {
+ Facet readFacet(StreamInput in) throws IOException;
+ }
+
+ public static class Streams {
+
+ private static ImmutableMap<BytesReference, Stream> streams = ImmutableMap.of();
+
+ public static synchronized void registerStream(Stream stream, BytesReference... types) {
+ MapBuilder<BytesReference, Stream> uStreams = MapBuilder.newMapBuilder(streams);
+ for (BytesReference type : types) {
+ uStreams.put(type, stream);
+ }
+ streams = uStreams.immutableMap();
+ }
+
+ public static Stream stream(BytesReference type) {
+ return streams.get(type);
+ }
+ }
+
+ @Override
+ public final String getName() {
+ return facetName;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ facetName = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(facetName);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/InternalFacets.java b/src/main/java/org/elasticsearch/search/facet/InternalFacets.java
new file mode 100644
index 0000000..9a2c5ee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/InternalFacets.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ *
+ */
+public class InternalFacets implements Facets, Streamable, ToXContent, Iterable<Facet> {
+
+ private List<Facet> facets = ImmutableList.of();
+
+ private Map<String, Facet> facetsAsMap;
+
+ private InternalFacets() {
+
+ }
+
+ /**
+ * Constructs a new facets.
+ */
+ public InternalFacets(List<Facet> facets) {
+ this.facets = facets;
+ }
+
+ /**
+ * Iterates over the {@link Facet}s.
+ */
+ @Override
+ public Iterator<Facet> iterator() {
+ return facets.iterator();
+ }
+
+ /**
+ * The list of {@link Facet}s.
+ */
+ public List<Facet> facets() {
+ return facets;
+ }
+
+ /**
+ * Returns the {@link Facet}s keyed by map.
+ */
+ public Map<String, Facet> getFacets() {
+ return facetsAsMap();
+ }
+
+ /**
+ * Returns the {@link Facet}s keyed by map.
+ */
+ public Map<String, Facet> facetsAsMap() {
+ if (facetsAsMap != null) {
+ return facetsAsMap;
+ }
+ Map<String, Facet> facetsAsMap = newHashMap();
+ for (Facet facet : facets) {
+ facetsAsMap.put(facet.getName(), facet);
+ }
+ this.facetsAsMap = facetsAsMap;
+ return facetsAsMap;
+ }
+
+ /**
+ * Returns the facet by name already casted to the specified type.
+ */
+ @Override
+ public <T extends Facet> T facet(Class<T> facetType, String name) {
+ return facetType.cast(facet(name));
+ }
+
+ /**
+ * A facet of the specified name.
+ */
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public <T extends Facet> T facet(String name) {
+ return (T) facetsAsMap().get(name);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString FACETS = new XContentBuilderString("facets");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.FACETS);
+ for (Facet facet : facets) {
+ ((InternalFacet) facet).toXContent(builder, params);
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalFacets readFacets(StreamInput in) throws IOException {
+ InternalFacets result = new InternalFacets();
+ result.readFrom(in);
+ return result;
+ }
+
+ public static InternalFacets readOptionalFacets(StreamInput in) throws IOException {
+ return in.readOptionalStreamable(new InternalFacets());
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int size = in.readVInt();
+ if (size == 0) {
+ facets = ImmutableList.of();
+ facetsAsMap = ImmutableMap.of();
+ } else {
+ facets = Lists.newArrayListWithCapacity(size);
+ for (int i = 0; i < size; i++) {
+ BytesReference type = in.readBytesReference();
+ Facet facet = InternalFacet.Streams.stream(type).readFacet(in);
+ facets.add(facet);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(facets.size());
+ for (Facet facet : facets) {
+ InternalFacet internalFacet = (InternalFacet) facet;
+ out.writeBytesReference(internalFacet.streamType());
+ internalFacet.writeTo(out);
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/search/facet/LongFacetAggregatorBase.java b/src/main/java/org/elasticsearch/search/facet/LongFacetAggregatorBase.java
new file mode 100644
index 0000000..ab6c892
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/LongFacetAggregatorBase.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import org.elasticsearch.index.fielddata.LongValues;
+
+/**
+ * Simple Facet aggregator base class for {@link LongValues}
+ */
+public abstract class LongFacetAggregatorBase {
+ private int total;
+ private int missing;
+
+ public void onDoc(int docId, LongValues values) {
+ final int numValues = values.setDocument(docId);
+ int tempMissing = 1;
+ for (int i = 0; i < numValues; i++) {
+ tempMissing = 0;
+ onValue(docId, values.nextValue());
+ total++;
+ }
+ missing += tempMissing;
+ }
+
+ protected abstract void onValue(int docId, long next);
+
+ public final int total() {
+ return total;
+ }
+
+ public final int missing() {
+ return missing;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/SearchContextFacets.java b/src/main/java/org/elasticsearch/search/facet/SearchContextFacets.java
new file mode 100644
index 0000000..ef60de3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/SearchContextFacets.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.Nullable;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class SearchContextFacets {
+
+ public static class Entry {
+ private final String facetName;
+ private final FacetExecutor.Mode mode;
+ private final FacetExecutor facetExecutor;
+ private final boolean global;
+ @Nullable
+ private final Filter filter;
+
+ public Entry(String facetName, FacetExecutor.Mode mode, FacetExecutor facetExecutor, boolean global, @Nullable Filter filter) {
+ this.facetName = facetName;
+ this.mode = mode;
+ this.facetExecutor = facetExecutor;
+ this.global = global;
+ this.filter = filter;
+ }
+
+ public String getFacetName() {
+ return facetName;
+ }
+
+ public FacetExecutor.Mode getMode() {
+ return mode;
+ }
+
+ public FacetExecutor getFacetExecutor() {
+ return facetExecutor;
+ }
+
+ public boolean isGlobal() {
+ return global;
+ }
+
+ public Filter getFilter() {
+ return filter;
+ }
+ }
+
+ private final List<Entry> entries;
+
+ private boolean hasQuery;
+ private boolean hasGlobal;
+
+ public SearchContextFacets(List<Entry> entries) {
+ this.entries = entries;
+ for (Entry entry : entries) {
+ if (entry.global) {
+ hasGlobal = true;
+ } else {
+ hasQuery = true;
+ }
+ }
+ }
+
+ public List<Entry> entries() {
+ return this.entries;
+ }
+
+ /**
+ * Are there facets that need to be computed on the query hits?
+ */
+ public boolean hasQuery() {
+ return hasQuery;
+ }
+
+ /**
+ * Are there global facets that need to be computed on all the docs.
+ */
+ public boolean hasGlobal() {
+ return hasGlobal;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/TransportFacetModule.java b/src/main/java/org/elasticsearch/search/facet/TransportFacetModule.java
new file mode 100644
index 0000000..92bd906
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/TransportFacetModule.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.search.facet.datehistogram.InternalDateHistogramFacet;
+import org.elasticsearch.search.facet.filter.InternalFilterFacet;
+import org.elasticsearch.search.facet.geodistance.InternalGeoDistanceFacet;
+import org.elasticsearch.search.facet.histogram.InternalHistogramFacet;
+import org.elasticsearch.search.facet.query.InternalQueryFacet;
+import org.elasticsearch.search.facet.range.InternalRangeFacet;
+import org.elasticsearch.search.facet.statistical.InternalStatisticalFacet;
+import org.elasticsearch.search.facet.terms.InternalTermsFacet;
+import org.elasticsearch.search.facet.termsstats.InternalTermsStatsFacet;
+
+/**
+ *
+ */
+public class TransportFacetModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ InternalFilterFacet.registerStreams();
+ InternalQueryFacet.registerStreams();
+ InternalGeoDistanceFacet.registerStreams();
+ InternalHistogramFacet.registerStreams();
+ InternalDateHistogramFacet.registerStreams();
+ InternalRangeFacet.registerStreams();
+ InternalStatisticalFacet.registerStreams();
+ InternalTermsFacet.registerStreams();
+ InternalTermsStatsFacet.registerStreams();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/datehistogram/CountDateHistogramFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/datehistogram/CountDateHistogramFacetExecutor.java
new file mode 100644
index 0000000..47a76af
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/datehistogram/CountDateHistogramFacetExecutor.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.datehistogram;
+
+import com.carrotsearch.hppc.LongLongOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.rounding.TimeZoneRounding;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.LongFacetAggregatorBase;
+
+import java.io.IOException;
+
+/**
+ * A date histogram facet collector that uses the same field as the key as well as the
+ * value.
+ */
+public class CountDateHistogramFacetExecutor extends FacetExecutor {
+
+ private final TimeZoneRounding tzRounding;
+ private final IndexNumericFieldData indexFieldData;
+ final DateHistogramFacet.ComparatorType comparatorType;
+
+ final Recycler.V<LongLongOpenHashMap> counts;
+
+ public CountDateHistogramFacetExecutor(IndexNumericFieldData indexFieldData, TimeZoneRounding tzRounding, DateHistogramFacet.ComparatorType comparatorType, CacheRecycler cacheRecycler) {
+ this.comparatorType = comparatorType;
+ this.indexFieldData = indexFieldData;
+ this.tzRounding = tzRounding;
+
+ this.counts = cacheRecycler.longLongMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ InternalCountDateHistogramFacet.CountEntry[] countEntries = new InternalCountDateHistogramFacet.CountEntry[counts.v().size()];
+ final boolean[] states = counts.v().allocated;
+ final long[] keys = counts.v().keys;
+ final long[] values = counts.v().values;
+
+ int entryIndex = 0;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ countEntries[entryIndex++] = new InternalCountDateHistogramFacet.CountEntry(keys[i], values[i]);
+ }
+ }
+ counts.release();
+ return new InternalCountDateHistogramFacet(facetName, comparatorType, countEntries);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private LongValues values;
+ private final DateHistogramProc histoProc;
+
+ public Collector() {
+ this.histoProc = new DateHistogramProc(counts.v(), tzRounding);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getLongValues();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ histoProc.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public static class DateHistogramProc extends LongFacetAggregatorBase {
+
+ private final LongLongOpenHashMap counts;
+ private final TimeZoneRounding tzRounding;
+
+ public DateHistogramProc(LongLongOpenHashMap counts, TimeZoneRounding tzRounding) {
+ this.counts = counts;
+ this.tzRounding = tzRounding;
+ }
+
+ @Override
+ public void onValue(int docId, long value) {
+ counts.addTo(tzRounding.round(value), 1);
+ }
+
+ public LongLongOpenHashMap counts() {
+ return counts;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacet.java b/src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacet.java
new file mode 100644
index 0000000..8916206
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacet.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.datehistogram;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.search.facet.Facet;
+
+import java.util.Comparator;
+import java.util.List;
+
+/**
+ * A date histogram facet.
+ */
+public interface DateHistogramFacet extends Facet, Iterable<DateHistogramFacet.Entry> {
+
+ /**
+ * The type of the filter facet.
+ */
+ public static final String TYPE = "date_histogram";
+
+ /**
+ * An ordered list of histogram facet entries.
+ */
+ List<? extends Entry> getEntries();
+
+ public static enum ComparatorType {
+ TIME((byte) 0, "time", new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return (o1.getTime() < o2.getTime() ? -1 : (o1.getTime() == o2.getTime() ? 0 : 1));
+ }
+ }),
+ COUNT((byte) 1, "count", new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return (o1.getCount() < o2.getCount() ? -1 : (o1.getCount() == o2.getCount() ? 0 : 1));
+ }
+ }),
+ TOTAL((byte) 2, "total", new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return (o1.getTotal() < o2.getTotal() ? -1 : (o1.getTotal() == o2.getTotal() ? 0 : 1));
+ }
+ });
+
+ private final byte id;
+
+ private final String description;
+
+ private final Comparator<Entry> comparator;
+
+ ComparatorType(byte id, String description, Comparator<Entry> comparator) {
+ this.id = id;
+ this.description = description;
+ this.comparator = comparator;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ public String description() {
+ return this.description;
+ }
+
+ public Comparator<Entry> comparator() {
+ return comparator;
+ }
+
+ public static ComparatorType fromId(byte id) {
+ if (id == 0) {
+ return TIME;
+ } else if (id == 1) {
+ return COUNT;
+ } else if (id == 2) {
+ return TOTAL;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type argument match for histogram comparator [" + id + "]");
+ }
+
+ public static ComparatorType fromString(String type) {
+ if ("time".equals(type)) {
+ return TIME;
+ } else if ("count".equals(type)) {
+ return COUNT;
+ } else if ("total".equals(type)) {
+ return TOTAL;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type argument match for histogram comparator [" + type + "]");
+ }
+ }
+
+ public interface Entry {
+
+ /**
+ * The time bucket start (in milliseconds).
+ */
+ long getTime();
+
+ /**
+ * The number of hits that fall within that key "range" or "interval".
+ */
+ long getCount();
+
+ /**
+ * The total count of values aggregated to compute the total.
+ */
+ long getTotalCount();
+
+ /**
+ * The sum / total of the value field that fall within this key "interval".
+ */
+ double getTotal();
+
+ /**
+ * The mean of this facet interval.
+ */
+ double getMean();
+
+ /**
+ * The minimum value.
+ */
+ double getMin();
+
+ /**
+ * The maximum value.
+ */
+ double getMax();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacetBuilder.java
new file mode 100644
index 0000000..5f50e22
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacetBuilder.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.datehistogram;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * A facet builder of date histogram facets.
+ */
+public class DateHistogramFacetBuilder extends FacetBuilder {
+ private String keyFieldName;
+ private String valueFieldName;
+ private String interval = null;
+ private String preZone = null;
+ private String postZone = null;
+ private Boolean preZoneAdjustLargeInterval;
+ long preOffset = 0;
+ long postOffset = 0;
+ float factor = 1.0f;
+ private DateHistogramFacet.ComparatorType comparatorType;
+
+ private String valueScript;
+ private Map<String, Object> params;
+ private String lang;
+
+ /**
+ * Constructs a new date histogram facet with the provided facet logical name.
+ *
+ * @param name The logical name of the facet
+ */
+ public DateHistogramFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * The field name to perform the histogram facet. Translates to perform the histogram facet
+ * using the provided field as both the {@link #keyField(String)} and {@link #valueField(String)}.
+ */
+ public DateHistogramFacetBuilder field(String field) {
+ this.keyFieldName = field;
+ return this;
+ }
+
+ /**
+ * The field name to use in order to control where the hit will "fall into" within the histogram
+ * entries. Essentially, using the key field numeric value, the hit will be "rounded" into the relevant
+ * bucket controlled by the interval.
+ */
+ public DateHistogramFacetBuilder keyField(String keyField) {
+ this.keyFieldName = keyField;
+ return this;
+ }
+
+ /**
+ * The field name to use as the value of the hit to compute data based on values within the interval
+ * (for example, total).
+ */
+ public DateHistogramFacetBuilder valueField(String valueField) {
+ this.valueFieldName = valueField;
+ return this;
+ }
+
+ public DateHistogramFacetBuilder valueScript(String valueScript) {
+ this.valueScript = valueScript;
+ return this;
+ }
+
+ public DateHistogramFacetBuilder param(String name, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(name, value);
+ return this;
+ }
+
+ /**
+ * The language of the value script.
+ */
+ public DateHistogramFacetBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ /**
+ * The interval used to control the bucket "size" where each key value of a hit will fall into. Check
+ * the docs for all available values.
+ */
+ public DateHistogramFacetBuilder interval(String interval) {
+ this.interval = interval;
+ return this;
+ }
+
+ /**
+ * Should pre zone be adjusted for large (day and above) intervals. Defaults to <tt>false</tt>.
+ */
+ public DateHistogramFacetBuilder preZoneAdjustLargeInterval(boolean preZoneAdjustLargeInterval) {
+ this.preZoneAdjustLargeInterval = preZoneAdjustLargeInterval;
+ return this;
+ }
+
+ /**
+ * Sets the pre time zone to use when bucketing the values. This timezone will be applied before
+ * rounding off the result.
+ * <p/>
+ * Can either be in the form of "-10:00" or
+ * one of the values listed here: http://joda-time.sourceforge.net/timezones.html.
+ */
+ public DateHistogramFacetBuilder preZone(String preZone) {
+ this.preZone = preZone;
+ return this;
+ }
+
+ /**
+ * Sets the post time zone to use when bucketing the values. This timezone will be applied after
+ * rounding off the result.
+ * <p/>
+ * Can either be in the form of "-10:00" or
+ * one of the values listed here: http://joda-time.sourceforge.net/timezones.html.
+ */
+ public DateHistogramFacetBuilder postZone(String postZone) {
+ this.postZone = postZone;
+ return this;
+ }
+
+ /**
+ * Sets a pre offset that will be applied before rounding the results.
+ */
+ public DateHistogramFacetBuilder preOffset(TimeValue preOffset) {
+ this.preOffset = preOffset.millis();
+ return this;
+ }
+
+ /**
+ * Sets a post offset that will be applied after rounding the results.
+ */
+ public DateHistogramFacetBuilder postOffset(TimeValue postOffset) {
+ this.postOffset = postOffset.millis();
+ return this;
+ }
+
+ /**
+ * Sets the factor that will be used to multiply the value with before and divided
+ * by after the rounding of the results.
+ */
+ public DateHistogramFacetBuilder factor(float factor) {
+ this.factor = factor;
+ return this;
+ }
+
+ public DateHistogramFacetBuilder comparator(DateHistogramFacet.ComparatorType comparatorType) {
+ this.comparatorType = comparatorType;
+ return this;
+ }
+
+ /**
+ * Should the facet run in global mode (not bounded by the search query) or not (bounded by
+ * the search query). Defaults to <tt>false</tt>.
+ */
+ @Override
+ public DateHistogramFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ /**
+ * An additional filter used to further filter down the set of documents the facet will run on.
+ */
+ @Override
+ public DateHistogramFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public DateHistogramFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (keyFieldName == null) {
+ throw new SearchSourceBuilderException("field must be set on date histogram facet for facet [" + name + "]");
+ }
+ if (interval == null) {
+ throw new SearchSourceBuilderException("interval must be set on date histogram facet for facet [" + name + "]");
+ }
+ builder.startObject(name);
+
+ builder.startObject(DateHistogramFacet.TYPE);
+ if (valueFieldName != null) {
+ builder.field("key_field", keyFieldName);
+ builder.field("value_field", valueFieldName);
+ } else {
+ builder.field("field", keyFieldName);
+ }
+ if (valueScript != null) {
+ builder.field("value_script", valueScript);
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ }
+ builder.field("interval", interval);
+ if (preZone != null) {
+ builder.field("pre_zone", preZone);
+ }
+ if (preZoneAdjustLargeInterval != null) {
+ builder.field("pre_zone_adjust_large_interval", preZoneAdjustLargeInterval);
+ }
+ if (postZone != null) {
+ builder.field("post_zone", postZone);
+ }
+ if (preOffset != 0) {
+ builder.field("pre_offset", preOffset);
+ }
+ if (postOffset != 0) {
+ builder.field("post_offset", postOffset);
+ }
+ if (factor != 1.0f) {
+ builder.field("factor", factor);
+ }
+ if (comparatorType != null) {
+ builder.field("comparator", comparatorType.description());
+ }
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacetParser.java b/src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacetParser.java
new file mode 100644
index 0000000..3a54baa
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/datehistogram/DateHistogramFacetParser.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.datehistogram;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.rounding.DateTimeUnit;
+import org.elasticsearch.common.rounding.TimeZoneRounding;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.FacetParser;
+import org.elasticsearch.search.facet.FacetPhaseExecutionException;
+import org.elasticsearch.search.internal.SearchContext;
+import org.joda.time.Chronology;
+import org.joda.time.DateTimeZone;
+import org.joda.time.chrono.ISOChronology;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class DateHistogramFacetParser extends AbstractComponent implements FacetParser {
+
+ private final ImmutableMap<String, DateTimeUnit> dateTimeUnits;
+
+ @Inject
+ public DateHistogramFacetParser(Settings settings) {
+ super(settings);
+ InternalDateHistogramFacet.registerStreams();
+
+ dateTimeUnits = MapBuilder.<String, DateTimeUnit>newMapBuilder()
+ .put("year", DateTimeUnit.YEAR_OF_CENTURY)
+ .put("1y", DateTimeUnit.YEAR_OF_CENTURY)
+ .put("quarter", DateTimeUnit.QUARTER)
+ .put("1q", DateTimeUnit.QUARTER)
+ .put("month", DateTimeUnit.MONTH_OF_YEAR)
+ .put("1M", DateTimeUnit.MONTH_OF_YEAR)
+ .put("week", DateTimeUnit.WEEK_OF_WEEKYEAR)
+ .put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR)
+ .put("day", DateTimeUnit.DAY_OF_MONTH)
+ .put("1d", DateTimeUnit.DAY_OF_MONTH)
+ .put("hour", DateTimeUnit.HOUR_OF_DAY)
+ .put("1h", DateTimeUnit.HOUR_OF_DAY)
+ .put("minute", DateTimeUnit.MINUTES_OF_HOUR)
+ .put("1m", DateTimeUnit.MINUTES_OF_HOUR)
+ .put("second", DateTimeUnit.SECOND_OF_MINUTE)
+ .put("1s", DateTimeUnit.SECOND_OF_MINUTE)
+ .immutableMap();
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{DateHistogramFacet.TYPE, "dateHistogram"};
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultMainMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultGlobalMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
+ String keyField = null;
+ String valueField = null;
+ String valueScript = null;
+ String scriptLang = null;
+ Map<String, Object> params = null;
+ String interval = null;
+ DateTimeZone preZone = DateTimeZone.UTC;
+ DateTimeZone postZone = DateTimeZone.UTC;
+ boolean preZoneAdjustLargeInterval = false;
+ long preOffset = 0;
+ long postOffset = 0;
+ float factor = 1.0f;
+ Chronology chronology = ISOChronology.getInstanceUTC();
+ DateHistogramFacet.ComparatorType comparatorType = DateHistogramFacet.ComparatorType.TIME;
+ XContentParser.Token token;
+ String fieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(fieldName)) {
+ params = parser.map();
+ }
+ } else if (token.isValue()) {
+ if ("field".equals(fieldName)) {
+ keyField = parser.text();
+ } else if ("key_field".equals(fieldName) || "keyField".equals(fieldName)) {
+ keyField = parser.text();
+ } else if ("value_field".equals(fieldName) || "valueField".equals(fieldName)) {
+ valueField = parser.text();
+ } else if ("interval".equals(fieldName)) {
+ interval = parser.text();
+ } else if ("time_zone".equals(fieldName) || "timeZone".equals(fieldName)) {
+ preZone = parseZone(parser, token);
+ } else if ("pre_zone".equals(fieldName) || "preZone".equals(fieldName)) {
+ preZone = parseZone(parser, token);
+ } else if ("pre_zone_adjust_large_interval".equals(fieldName) || "preZoneAdjustLargeInterval".equals(fieldName)) {
+ preZoneAdjustLargeInterval = parser.booleanValue();
+ } else if ("post_zone".equals(fieldName) || "postZone".equals(fieldName)) {
+ postZone = parseZone(parser, token);
+ } else if ("pre_offset".equals(fieldName) || "preOffset".equals(fieldName)) {
+ preOffset = parseOffset(parser.text());
+ } else if ("post_offset".equals(fieldName) || "postOffset".equals(fieldName)) {
+ postOffset = parseOffset(parser.text());
+ } else if ("factor".equals(fieldName)) {
+ factor = parser.floatValue();
+ } else if ("value_script".equals(fieldName) || "valueScript".equals(fieldName)) {
+ valueScript = parser.text();
+ } else if ("order".equals(fieldName) || "comparator".equals(fieldName)) {
+ comparatorType = DateHistogramFacet.ComparatorType.fromString(parser.text());
+ } else if ("lang".equals(fieldName)) {
+ scriptLang = parser.text();
+ }
+ }
+ }
+
+ if (interval == null) {
+ throw new FacetPhaseExecutionException(facetName, "[interval] is required to be set for histogram facet");
+ }
+
+ if (keyField == null) {
+ throw new FacetPhaseExecutionException(facetName, "key field is required to be set for histogram facet, either using [field] or using [key_field]");
+ }
+
+ FieldMapper keyMapper = context.smartNameFieldMapper(keyField);
+ if (keyMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "(key) field [" + keyField + "] not found");
+ }
+ IndexNumericFieldData keyIndexFieldData = context.fieldData().getForField(keyMapper);
+
+ TimeZoneRounding.Builder tzRoundingBuilder;
+ DateTimeUnit dateTimeUnit = dateTimeUnits.get(interval);
+ if (dateTimeUnit != null) {
+ tzRoundingBuilder = TimeZoneRounding.builder(dateTimeUnit);
+ } else {
+ // the interval is a time value?
+ tzRoundingBuilder = TimeZoneRounding.builder(TimeValue.parseTimeValue(interval, null));
+ }
+
+ TimeZoneRounding tzRounding = tzRoundingBuilder
+ .preZone(preZone).postZone(postZone)
+ .preZoneAdjustLargeInterval(preZoneAdjustLargeInterval)
+ .preOffset(preOffset).postOffset(postOffset)
+ .factor(factor)
+ .build();
+
+ if (valueScript != null) {
+ SearchScript script = context.scriptService().search(context.lookup(), scriptLang, valueScript, params);
+ return new ValueScriptDateHistogramFacetExecutor(keyIndexFieldData, script, tzRounding, comparatorType, context.cacheRecycler());
+ } else if (valueField != null) {
+ FieldMapper valueMapper = context.smartNameFieldMapper(valueField);
+ if (valueMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "(value) field [" + valueField + "] not found");
+ }
+ IndexNumericFieldData valueIndexFieldData = context.fieldData().getForField(valueMapper);
+ return new ValueDateHistogramFacetExecutor(keyIndexFieldData, valueIndexFieldData, tzRounding, comparatorType, context.cacheRecycler());
+ } else {
+ return new CountDateHistogramFacetExecutor(keyIndexFieldData, tzRounding, comparatorType, context.cacheRecycler());
+ }
+ }
+
+ private long parseOffset(String offset) throws IOException {
+ if (offset.charAt(0) == '-') {
+ return -TimeValue.parseTimeValue(offset.substring(1), null).millis();
+ }
+ int beginIndex = offset.charAt(0) == '+' ? 1 : 0;
+ return TimeValue.parseTimeValue(offset.substring(beginIndex), null).millis();
+ }
+
+ private DateTimeZone parseZone(XContentParser parser, XContentParser.Token token) throws IOException {
+ if (token == XContentParser.Token.VALUE_NUMBER) {
+ return DateTimeZone.forOffsetHours(parser.intValue());
+ } else {
+ String text = parser.text();
+ int index = text.indexOf(':');
+ if (index != -1) {
+ int beginIndex = text.charAt(0) == '+' ? 1 : 0;
+ // format like -02:30
+ return DateTimeZone.forOffsetHoursMinutes(
+ Integer.parseInt(text.substring(beginIndex, index)),
+ Integer.parseInt(text.substring(index + 1))
+ );
+ } else {
+ // id, listed here: http://joda-time.sourceforge.net/timezones.html
+ return DateTimeZone.forID(text);
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/datehistogram/InternalCountDateHistogramFacet.java b/src/main/java/org/elasticsearch/search/facet/datehistogram/InternalCountDateHistogramFacet.java
new file mode 100644
index 0000000..9f50ef6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/datehistogram/InternalCountDateHistogramFacet.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.datehistogram;
+
+import com.carrotsearch.hppc.LongLongOpenHashMap;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalCountDateHistogramFacet extends InternalDateHistogramFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("cdHistogram"));
+
+ public static void registerStreams() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readHistogramFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+
+ /**
+ * A histogram entry representing a single entry within the result of a histogram facet.
+ */
+ public static class CountEntry implements Entry {
+ private final long time;
+ private final long count;
+
+ public CountEntry(long time, long count) {
+ this.time = time;
+ this.count = count;
+ }
+
+ @Override
+ public long getTime() {
+ return time;
+ }
+
+ @Override
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return 0;
+ }
+
+ @Override
+ public double getTotal() {
+ return Double.NaN;
+ }
+
+ @Override
+ public double getMean() {
+ return Double.NaN;
+ }
+
+ @Override
+ public double getMin() {
+ return Double.NaN;
+ }
+
+ @Override
+ public double getMax() {
+ return Double.NaN;
+ }
+ }
+
+ ComparatorType comparatorType;
+ CountEntry[] entries = null;
+
+ InternalCountDateHistogramFacet() {
+ }
+
+ public InternalCountDateHistogramFacet(String name, ComparatorType comparatorType, CountEntry[] entries) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.entries = entries;
+ }
+
+ @Override
+ public List<CountEntry> getEntries() {
+ return Arrays.asList(entries);
+ }
+
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) getEntries().iterator();
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ InternalCountDateHistogramFacet histoFacet = (InternalCountDateHistogramFacet) facets.get(0);
+ Arrays.sort(histoFacet.entries, histoFacet.comparatorType.comparator());
+ return facets.get(0);
+ }
+
+ Recycler.V<LongLongOpenHashMap> counts = context.cacheRecycler().longLongMap(-1);
+ for (Facet facet : facets) {
+ InternalCountDateHistogramFacet histoFacet = (InternalCountDateHistogramFacet) facet;
+ for (CountEntry entry : histoFacet.entries) {
+ counts.v().addTo(entry.getTime(), entry.getCount());
+ }
+ }
+
+ CountEntry[] countEntries = new CountEntry[counts.v().size()];
+ final boolean[] states = counts.v().allocated;
+ final long[] keys = counts.v().keys;
+ final long[] values = counts.v().values;
+ int entriesIndex = 0;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ countEntries[entriesIndex++] = new CountEntry(keys[i], values[i]);
+ }
+ }
+ counts.release();
+
+ Arrays.sort(countEntries, comparatorType.comparator());
+
+ return new InternalCountDateHistogramFacet(getName(), comparatorType, countEntries);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString ENTRIES = new XContentBuilderString("entries");
+ static final XContentBuilderString TIME = new XContentBuilderString("time");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, TYPE);
+ builder.startArray(Fields.ENTRIES);
+ for (Entry entry : entries) {
+ builder.startObject();
+ builder.field(Fields.TIME, entry.getTime());
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalCountDateHistogramFacet readHistogramFacet(StreamInput in) throws IOException {
+ InternalCountDateHistogramFacet facet = new InternalCountDateHistogramFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+
+ int size = in.readVInt();
+ entries = new CountEntry[size];
+ for (int i = 0; i < size; i++) {
+ entries[i] = new CountEntry(in.readLong(), in.readVLong());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(entries.length);
+ for (CountEntry entry : entries) {
+ out.writeLong(entry.getTime());
+ out.writeVLong(entry.getCount());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/datehistogram/InternalDateHistogramFacet.java b/src/main/java/org/elasticsearch/search/facet/datehistogram/InternalDateHistogramFacet.java
new file mode 100644
index 0000000..a44d4e0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/datehistogram/InternalDateHistogramFacet.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.datehistogram;
+
+import org.elasticsearch.search.facet.InternalFacet;
+
+/**
+ *
+ */
+public abstract class InternalDateHistogramFacet extends InternalFacet implements DateHistogramFacet {
+
+ public static void registerStreams() {
+ InternalCountDateHistogramFacet.registerStreams();
+ InternalFullDateHistogramFacet.registerStreams();
+ }
+
+ protected InternalDateHistogramFacet() {
+ }
+
+ protected InternalDateHistogramFacet(String facetName) {
+ super(facetName);
+ }
+
+ @Override
+ public final String getType() {
+ return TYPE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/datehistogram/InternalFullDateHistogramFacet.java b/src/main/java/org/elasticsearch/search/facet/datehistogram/InternalFullDateHistogramFacet.java
new file mode 100644
index 0000000..5a911c3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/datehistogram/InternalFullDateHistogramFacet.java
@@ -0,0 +1,265 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.datehistogram;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ *
+ */
+public class InternalFullDateHistogramFacet extends InternalDateHistogramFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("fdHistogram"));
+
+ public static void registerStreams() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readHistogramFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+
+ /**
+ * A histogram entry representing a single entry within the result of a histogram facet.
+ */
+ public static class FullEntry implements Entry {
+ private final long time;
+ long count;
+ long totalCount;
+ double total;
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+
+ public FullEntry(long time, long count, double min, double max, long totalCount, double total) {
+ this.time = time;
+ this.count = count;
+ this.min = min;
+ this.max = max;
+ this.totalCount = totalCount;
+ this.total = total;
+ }
+
+ @Override
+ public long getTime() {
+ return time;
+ }
+
+ @Override
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public double getTotal() {
+ return total;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return totalCount;
+ }
+
+ @Override
+ public double getMean() {
+ if (totalCount == 0) {
+ return totalCount;
+ }
+ return total / totalCount;
+ }
+
+ @Override
+ public double getMin() {
+ return this.min;
+ }
+
+ @Override
+ public double getMax() {
+ return this.max;
+ }
+ }
+
+ private ComparatorType comparatorType;
+ List<FullEntry> entries;
+
+ InternalFullDateHistogramFacet() {
+ }
+
+ InternalFullDateHistogramFacet(String name) {
+ super(name);
+ }
+
+ public InternalFullDateHistogramFacet(String name, ComparatorType comparatorType, List<FullEntry> entries) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.entries = entries;
+ }
+
+ @Override
+ public List<FullEntry> getEntries() {
+ return entries;
+ }
+
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) getEntries().iterator();
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ // we need to sort it
+ InternalFullDateHistogramFacet internalFacet = (InternalFullDateHistogramFacet) facets.get(0);
+ List<FullEntry> entries = internalFacet.getEntries();
+ CollectionUtil.timSort(entries, comparatorType.comparator());
+ return internalFacet;
+ }
+
+ Recycler.V<LongObjectOpenHashMap<FullEntry>> map = context.cacheRecycler().longObjectMap(-1);
+
+ for (Facet facet : facets) {
+ InternalFullDateHistogramFacet histoFacet = (InternalFullDateHistogramFacet) facet;
+ for (FullEntry fullEntry : histoFacet.entries) {
+ FullEntry current = map.v().get(fullEntry.time);
+ if (current != null) {
+ current.count += fullEntry.count;
+ current.total += fullEntry.total;
+ current.totalCount += fullEntry.totalCount;
+ if (fullEntry.min < current.min) {
+ current.min = fullEntry.min;
+ }
+ if (fullEntry.max > current.max) {
+ current.max = fullEntry.max;
+ }
+ } else {
+ map.v().put(fullEntry.time, fullEntry);
+ }
+ }
+ }
+
+ // sort
+ // TODO: hppc - not happy with toArray
+ Object[] values = map.v().values().toArray();
+ Arrays.sort(values, (Comparator) comparatorType.comparator());
+ List<FullEntry> ordered = new ArrayList<FullEntry>(map.v().size());
+ for (int i = 0; i < map.v().size(); i++) {
+ FullEntry value = (FullEntry) values[i];
+ if (value == null) {
+ break;
+ }
+ ordered.add(value);
+ }
+
+ map.release();
+
+ // just initialize it as already ordered facet
+ InternalFullDateHistogramFacet ret = new InternalFullDateHistogramFacet(getName());
+ ret.comparatorType = comparatorType;
+ ret.entries = ordered;
+ return ret;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString ENTRIES = new XContentBuilderString("entries");
+ static final XContentBuilderString TIME = new XContentBuilderString("time");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
+ static final XContentBuilderString MEAN = new XContentBuilderString("mean");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, TYPE);
+ builder.startArray(Fields.ENTRIES);
+ for (Entry entry : getEntries()) {
+ builder.startObject();
+ builder.field(Fields.TIME, entry.getTime());
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.field(Fields.MIN, entry.getMin());
+ builder.field(Fields.MAX, entry.getMax());
+ builder.field(Fields.TOTAL, entry.getTotal());
+ builder.field(Fields.TOTAL_COUNT, entry.getTotalCount());
+ builder.field(Fields.MEAN, entry.getMean());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalFullDateHistogramFacet readHistogramFacet(StreamInput in) throws IOException {
+ InternalFullDateHistogramFacet facet = new InternalFullDateHistogramFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+ int size = in.readVInt();
+ entries = new ArrayList<FullEntry>(size);
+ for (int i = 0; i < size; i++) {
+ entries.add(new FullEntry(in.readLong(), in.readVLong(), in.readDouble(), in.readDouble(), in.readVLong(), in.readDouble()));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(entries.size());
+ for (FullEntry entry : entries) {
+ out.writeLong(entry.time);
+ out.writeVLong(entry.count);
+ out.writeDouble(entry.min);
+ out.writeDouble(entry.max);
+ out.writeVLong(entry.totalCount);
+ out.writeDouble(entry.total);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/datehistogram/ValueDateHistogramFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/datehistogram/ValueDateHistogramFacetExecutor.java
new file mode 100644
index 0000000..3d626d9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/datehistogram/ValueDateHistogramFacetExecutor.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.datehistogram;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.rounding.TimeZoneRounding;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.LongFacetAggregatorBase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A histogram facet collector that uses different fields for the key and the value.
+ */
+public class ValueDateHistogramFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData keyIndexFieldData;
+ private final IndexNumericFieldData valueIndexFieldData;
+ private final DateHistogramFacet.ComparatorType comparatorType;
+ final TimeZoneRounding tzRounding;
+
+ final Recycler.V<LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry>> entries;
+
+ public ValueDateHistogramFacetExecutor(IndexNumericFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, TimeZoneRounding tzRounding, DateHistogramFacet.ComparatorType comparatorType, CacheRecycler cacheRecycler) {
+ this.comparatorType = comparatorType;
+ this.keyIndexFieldData = keyIndexFieldData;
+ this.valueIndexFieldData = valueIndexFieldData;
+ this.tzRounding = tzRounding;
+
+ this.entries = cacheRecycler.longObjectMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ ArrayList<InternalFullDateHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullDateHistogramFacet.FullEntry>(entries.v().size());
+ final boolean[] states = entries.v().allocated;
+ final Object[] values = entries.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ InternalFullDateHistogramFacet.FullEntry value = (InternalFullDateHistogramFacet.FullEntry) values[i];
+ entries1.add(value);
+ }
+ }
+
+ entries.release();
+ return new InternalFullDateHistogramFacet(facetName, comparatorType, entries1);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final DateHistogramProc histoProc;
+ private LongValues keyValues;
+
+ public Collector() {
+ this.histoProc = new DateHistogramProc(tzRounding, entries.v());
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ keyValues = keyIndexFieldData.load(context).getLongValues();
+ histoProc.valueValues = valueIndexFieldData.load(context).getDoubleValues();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ histoProc.onDoc(doc, keyValues);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public static class DateHistogramProc extends LongFacetAggregatorBase {
+
+ final LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry> entries;
+ private final TimeZoneRounding tzRounding;
+
+ DoubleValues valueValues;
+
+ final ValueAggregator valueAggregator = new ValueAggregator();
+
+ public DateHistogramProc(TimeZoneRounding tzRounding, LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry> entries) {
+ this.tzRounding = tzRounding;
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, long value) {
+ long time = tzRounding.round(value);
+
+ InternalFullDateHistogramFacet.FullEntry entry = entries.get(time);
+ if (entry == null) {
+ entry = new InternalFullDateHistogramFacet.FullEntry(time, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0);
+ entries.put(time, entry);
+ }
+ entry.count++;
+ valueAggregator.entry = entry;
+ valueAggregator.onDoc(docId, valueValues);
+ }
+
+ public final static class ValueAggregator extends DoubleFacetAggregatorBase {
+
+ InternalFullDateHistogramFacet.FullEntry entry;
+
+ @Override
+ public void onValue(int docId, double value) {
+ entry.totalCount++;
+ entry.total += value;
+ if (value < entry.min) {
+ entry.min = value;
+ }
+ if (value > entry.max) {
+ entry.max = value;
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/datehistogram/ValueScriptDateHistogramFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/datehistogram/ValueScriptDateHistogramFacetExecutor.java
new file mode 100644
index 0000000..bf3b2b0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/datehistogram/ValueScriptDateHistogramFacetExecutor.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.datehistogram;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.rounding.TimeZoneRounding;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.LongFacetAggregatorBase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A histogram facet collector that uses the same field as the key as well as the
+ * value.
+ */
+public class ValueScriptDateHistogramFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData keyIndexFieldData;
+ private final DateHistogramFacet.ComparatorType comparatorType;
+ final SearchScript valueScript;
+ final TimeZoneRounding tzRounding;
+
+ final Recycler.V<LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry>> entries;
+
+ public ValueScriptDateHistogramFacetExecutor(IndexNumericFieldData keyIndexFieldData, SearchScript valueScript, TimeZoneRounding tzRounding, DateHistogramFacet.ComparatorType comparatorType, CacheRecycler cacheRecycler) {
+ this.comparatorType = comparatorType;
+ this.keyIndexFieldData = keyIndexFieldData;
+ this.valueScript = valueScript;
+ this.tzRounding = tzRounding;
+
+ this.entries = cacheRecycler.longObjectMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ ArrayList<InternalFullDateHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullDateHistogramFacet.FullEntry>(entries.v().size());
+ final boolean[] states = entries.v().allocated;
+ final Object[] values = entries.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ InternalFullDateHistogramFacet.FullEntry value = (InternalFullDateHistogramFacet.FullEntry) values[i];
+ entries1.add(value);
+ }
+ }
+
+ entries.release();
+ return new InternalFullDateHistogramFacet(facetName, comparatorType, entries1);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final DateHistogramProc histoProc;
+ private LongValues keyValues;
+
+ public Collector() {
+ histoProc = new DateHistogramProc(tzRounding, valueScript, entries.v());
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ valueScript.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ keyValues = keyIndexFieldData.load(context).getLongValues();
+ valueScript.setNextReader(context);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ histoProc.onDoc(doc, keyValues);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public static class DateHistogramProc extends LongFacetAggregatorBase {
+
+ private final TimeZoneRounding tzRounding;
+ protected final SearchScript valueScript;
+
+ final LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry> entries;
+
+ public DateHistogramProc(TimeZoneRounding tzRounding, SearchScript valueScript, final LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry> entries) {
+ this.tzRounding = tzRounding;
+ this.valueScript = valueScript;
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, long value) {
+ valueScript.setNextDocId(docId);
+ long time = tzRounding.round(value);
+ double scriptValue = valueScript.runAsDouble();
+
+ InternalFullDateHistogramFacet.FullEntry entry = entries.get(time);
+ if (entry == null) {
+ entry = new InternalFullDateHistogramFacet.FullEntry(time, 1, scriptValue, scriptValue, 1, scriptValue);
+ entries.put(time, entry);
+ } else {
+ entry.count++;
+ entry.totalCount++;
+ entry.total += scriptValue;
+ if (scriptValue < entry.min) {
+ entry.min = scriptValue;
+ }
+ if (scriptValue > entry.max) {
+ entry.max = scriptValue;
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/filter/FilterFacet.java b/src/main/java/org/elasticsearch/search/facet/filter/FilterFacet.java
new file mode 100644
index 0000000..16522d0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/filter/FilterFacet.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.filter;
+
+import org.elasticsearch.search.facet.Facet;
+
+/**
+ * A query facets returns the count (number of hits) for a facet based on a query.
+ */
+public interface FilterFacet extends Facet {
+
+ /**
+ * The type of the filter facet.
+ */
+ public static final String TYPE = "filter";
+
+ /**
+ * The count of the facet.
+ */
+ long getCount();
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/filter/FilterFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/filter/FilterFacetBuilder.java
new file mode 100644
index 0000000..9f6c6af
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/filter/FilterFacetBuilder.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.filter;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FilterFacetBuilder extends FacetBuilder {
+
+ private FilterBuilder filter;
+
+ public FilterFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * Marks the facet to run in a global scope, not bounded by any query.
+ */
+ @Override
+ public FilterFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ public FilterFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public FilterFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ public FilterFacetBuilder filter(FilterBuilder filter) {
+ this.filter = filter;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (filter == null) {
+ throw new SearchSourceBuilderException("filter must be set on filter facet for facet [" + name + "]");
+ }
+ builder.startObject(name);
+ builder.field(FilterFacet.TYPE);
+ filter.toXContent(builder, params);
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/filter/FilterFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/filter/FilterFacetExecutor.java
new file mode 100644
index 0000000..6401306
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/filter/FilterFacetExecutor.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.filter;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.docset.AndDocIdSet;
+import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class FilterFacetExecutor extends FacetExecutor {
+
+ private final Filter filter;
+
+ long count = -1;
+
+ public FilterFacetExecutor(Filter filter) {
+ this.filter = filter;
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public Post post() {
+ return new Post();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalFilterFacet(facetName, count);
+ }
+
+ class Post extends FacetExecutor.Post {
+
+ @Override
+ public void executePost(List<ContextDocIdSet> docSets) throws IOException {
+ int count = 0;
+ for (ContextDocIdSet docSet : docSets) {
+ DocIdSet filteredDocIdSet = filter.getDocIdSet(docSet.context, docSet.context.reader().getLiveDocs());
+ if (filteredDocIdSet == null || docSet.docSet == null) {
+ continue;
+ }
+ DocIdSetIterator iter = new AndDocIdSet(new DocIdSet[]{docSet.docSet, filteredDocIdSet}).iterator();
+ while (iter.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ count++;
+ }
+ }
+ FilterFacetExecutor.this.count = count;
+ }
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private long count = 0;
+ private Bits bits;
+
+ @Override
+ public void collect(int doc) throws IOException {
+ if (bits.get(doc)) {
+ count++;
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ bits = DocIdSets.toSafeBits(context.reader(), filter.getDocIdSet(context, context.reader().getLiveDocs()));
+ }
+
+ @Override
+ public void postCollection() {
+ bits = null;
+ FilterFacetExecutor.this.count = count;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/filter/FilterFacetParser.java b/src/main/java/org/elasticsearch/search/facet/filter/FilterFacetParser.java
new file mode 100644
index 0000000..34d226a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/filter/FilterFacetParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.filter;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.FacetParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FilterFacetParser extends AbstractComponent implements FacetParser {
+
+ @Inject
+ public FilterFacetParser(Settings settings) {
+ super(settings);
+ InternalFilterFacet.registerStreams();
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{FilterFacet.TYPE};
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultMainMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultGlobalMode() {
+ return FacetExecutor.Mode.POST;
+ }
+
+ @Override
+ public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
+ ParsedFilter parsedFilter = context.queryParserService().parseInnerFilter(parser);
+ return new FilterFacetExecutor(parsedFilter == null ? Queries.MATCH_ALL_FILTER : parsedFilter.filter());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/filter/InternalFilterFacet.java b/src/main/java/org/elasticsearch/search/facet/filter/InternalFilterFacet.java
new file mode 100644
index 0000000..efc964e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/filter/InternalFilterFacet.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.filter;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.InternalFacet;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalFilterFacet extends InternalFacet implements FilterFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("filter"));
+
+ public static void registerStreams() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readFilterFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ private long count;
+
+ InternalFilterFacet() {
+ }
+
+ public InternalFilterFacet(String name, long count) {
+ super(name);
+ this.count = count;
+ }
+
+ @Override
+ public String getType() {
+ return TYPE;
+ }
+
+ /**
+ * The count of the facet.
+ */
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ return facets.get(0);
+ }
+ long count = 0;
+ for (Facet facet : facets) {
+ count += ((FilterFacet) facet).getCount();
+ }
+ return new InternalFilterFacet(getName(), count);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, FilterFacet.TYPE);
+ builder.field(Fields.COUNT, count);
+ builder.endObject();
+ return builder;
+ }
+
+ public static FilterFacet readFilterFacet(StreamInput in) throws IOException {
+ InternalFilterFacet result = new InternalFilterFacet();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ count = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVLong(count);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacet.java b/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacet.java
new file mode 100644
index 0000000..5057815
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacet.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.geodistance;
+
+import org.elasticsearch.search.facet.Facet;
+
+import java.util.List;
+
+/**
+ *
+ */
+public interface GeoDistanceFacet extends Facet, Iterable<GeoDistanceFacet.Entry> {
+
+ /**
+ * The type of the filter facet.
+ */
+ public static final String TYPE = "geo_distance";
+
+ /**
+ * An ordered list of geo distance facet entries.
+ */
+ List<Entry> getEntries();
+
+ public class Entry {
+
+ double from = Double.NEGATIVE_INFINITY;
+ double to = Double.POSITIVE_INFINITY;
+ long count;
+ long totalCount;
+ double total;
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+
+ /**
+ * internal field used to see if this entry was already found for a doc
+ */
+ boolean foundInDoc = false;
+
+ Entry() {
+ }
+
+ public Entry(double from, double to, long count, long totalCount, double total, double min, double max) {
+ this.from = from;
+ this.to = to;
+ this.count = count;
+ this.totalCount = totalCount;
+ this.total = total;
+ this.min = min;
+ this.max = max;
+ }
+
+ public double getFrom() {
+ return this.from;
+ }
+
+ public double getTo() {
+ return this.to;
+ }
+
+ public long getCount() {
+ return this.count;
+ }
+
+ public long getTotalCount() {
+ return this.totalCount;
+ }
+
+ public double getTotal() {
+ return this.total;
+ }
+
+ /**
+ * The mean of this facet interval.
+ */
+ public double getMean() {
+ if (totalCount == 0) {
+ return 0;
+ }
+ return total / totalCount;
+ }
+
+ public double getMin() {
+ return this.min;
+ }
+
+ public double getMax() {
+ return this.max;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetBuilder.java
new file mode 100644
index 0000000..875400b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetBuilder.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.geodistance;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * A geo distance builder allowing to create a facet of distances from a specific location including the
+ * number of hits within each distance range, and aggregated data (like totals of either the distance or
+ * cusotm value fields).
+ */
+public class GeoDistanceFacetBuilder extends FacetBuilder {
+
+ private String fieldName;
+
+ private String valueFieldName;
+
+ private double lat;
+
+ private double lon;
+
+ private String geohash;
+
+ private GeoDistance geoDistance;
+
+ private DistanceUnit unit;
+
+ private Map<String, Object> params;
+
+ private String valueScript;
+
+ private String lang;
+
+ private List<Entry> entries = Lists.newArrayList();
+
+ /**
+ * Constructs a new geo distance with the provided facet name.
+ */
+ public GeoDistanceFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * The geo point field that will be used to extract the document location(s).
+ */
+ public GeoDistanceFacetBuilder field(String fieldName) {
+ this.fieldName = fieldName;
+ return this;
+ }
+
+ /**
+ * A custom value field (numeric) that will be used to provide aggregated data for each facet (for example, total).
+ */
+ public GeoDistanceFacetBuilder valueField(String valueFieldName) {
+ this.valueFieldName = valueFieldName;
+ return this;
+ }
+
+ /**
+ * A custom value script (result is numeric) that will be used to provide aggregated data for each facet (for example, total).
+ */
+ public GeoDistanceFacetBuilder valueScript(String valueScript) {
+ this.valueScript = valueScript;
+ return this;
+ }
+
+ /**
+ * The language of the {@link #valueScript(String)} script.
+ */
+ public GeoDistanceFacetBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ /**
+ * Parameters for {@link #valueScript(String)} to improve performance when executing the same script with different parameters.
+ */
+ public GeoDistanceFacetBuilder scriptParam(String name, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(name, value);
+ return this;
+ }
+
+ /**
+ * The point to create the range distance facets from.
+ *
+ * @param lat latitude.
+ * @param lon longitude.
+ */
+ public GeoDistanceFacetBuilder point(double lat, double lon) {
+ this.lat = lat;
+ this.lon = lon;
+ return this;
+ }
+
+ /**
+ * The latitude to create the range distance facets from.
+ */
+ public GeoDistanceFacetBuilder lat(double lat) {
+ this.lat = lat;
+ return this;
+ }
+
+ /**
+ * The longitude to create the range distance facets from.
+ */
+ public GeoDistanceFacetBuilder lon(double lon) {
+ this.lon = lon;
+ return this;
+ }
+
+ /**
+ * The geohash of the geo point to create the range distance facets from.
+ */
+ public GeoDistanceFacetBuilder geohash(String geohash) {
+ this.geohash = geohash;
+ return this;
+ }
+
+ /**
+ * The geo distance type used to compute the distance.
+ */
+ public GeoDistanceFacetBuilder geoDistance(GeoDistance geoDistance) {
+ this.geoDistance = geoDistance;
+ return this;
+ }
+
+ /**
+ * Adds a range entry with explicit from and to.
+ *
+ * @param from The from distance limit
+ * @param to The to distance limit
+ */
+ public GeoDistanceFacetBuilder addRange(double from, double to) {
+ entries.add(new Entry(from, to));
+ return this;
+ }
+
+ /**
+ * Adds a range entry with explicit from and unbounded to.
+ *
+ * @param from the from distance limit, to is unbounded.
+ */
+ public GeoDistanceFacetBuilder addUnboundedTo(double from) {
+ entries.add(new Entry(from, Double.POSITIVE_INFINITY));
+ return this;
+ }
+
+ /**
+ * Adds a range entry with explicit to and unbounded from.
+ *
+ * @param to the to distance limit, from is unbounded.
+ */
+ public GeoDistanceFacetBuilder addUnboundedFrom(double to) {
+ entries.add(new Entry(Double.NEGATIVE_INFINITY, to));
+ return this;
+ }
+
+ /**
+ * The distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#KILOMETERS}
+ */
+ public GeoDistanceFacetBuilder unit(DistanceUnit unit) {
+ this.unit = unit;
+ return this;
+ }
+
+ /**
+ * Marks the facet to run in a global scope, not bounded by any query.
+ */
+ public GeoDistanceFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ public GeoDistanceFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public GeoDistanceFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (fieldName == null) {
+ throw new SearchSourceBuilderException("field must be set on geo_distance facet for facet [" + name + "]");
+ }
+ if (entries.isEmpty()) {
+ throw new SearchSourceBuilderException("at least one range must be defined for geo_distance facet [" + name + "]");
+ }
+
+ builder.startObject(name);
+
+ builder.startObject(GeoDistanceFacet.TYPE);
+
+ if (geohash != null) {
+ builder.field(fieldName, geohash);
+ } else {
+ builder.startArray(fieldName).value(lon).value(lat).endArray();
+ }
+
+ if (valueFieldName != null) {
+ builder.field("value_field", valueFieldName);
+ }
+
+ if (valueScript != null) {
+ builder.field("value_script", valueScript);
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ }
+
+ builder.startArray("ranges");
+ for (Entry entry : entries) {
+ builder.startObject();
+ if (!Double.isInfinite(entry.from)) {
+ builder.field("from", entry.from);
+ }
+ if (!Double.isInfinite(entry.to)) {
+ builder.field("to", entry.to);
+ }
+ builder.endObject();
+ }
+ builder.endArray();
+
+ if (unit != null) {
+ builder.field("unit", unit);
+ }
+ if (geoDistance != null) {
+ builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT));
+ }
+
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+
+ private static class Entry {
+ final double from;
+ final double to;
+
+ private Entry(double from, double to) {
+ this.from = from;
+ this.to = to;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetExecutor.java
new file mode 100644
index 0000000..822c4a0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetExecutor.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.geodistance;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.index.fielddata.GeoPointValues;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class GeoDistanceFacetExecutor extends FacetExecutor {
+
+ final IndexGeoPointFieldData indexFieldData;
+ final double lat;
+ final double lon;
+ final DistanceUnit unit;
+ final GeoDistance geoDistance;
+ final GeoDistance.FixedSourceDistance fixedSourceDistance;
+
+ final GeoDistanceFacet.Entry[] entries;
+
+ public GeoDistanceFacetExecutor(IndexGeoPointFieldData indexFieldData, double lat, double lon, DistanceUnit unit, GeoDistance geoDistance,
+ GeoDistanceFacet.Entry[] entries, SearchContext context) {
+ this.lat = lat;
+ this.lon = lon;
+ this.unit = unit;
+ this.entries = entries;
+ this.geoDistance = geoDistance;
+ this.indexFieldData = indexFieldData;
+ this.fixedSourceDistance = geoDistance.fixedSourceDistance(lat, lon, unit);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector(new Aggregator(fixedSourceDistance, entries));
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalGeoDistanceFacet(facetName, entries);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ protected GeoPointValues values;
+ protected final Aggregator aggregator;
+
+ Collector(Aggregator aggregator) {
+ this.aggregator = aggregator;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getGeoPointValues();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ for (GeoDistanceFacet.Entry entry : entries) {
+ entry.foundInDoc = false;
+ }
+ this.aggregator.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public static class Aggregator {
+
+ protected final GeoDistance.FixedSourceDistance fixedSourceDistance;
+
+ protected final GeoDistanceFacet.Entry[] entries;
+
+ public Aggregator(GeoDistance.FixedSourceDistance fixedSourceDistance, GeoDistanceFacet.Entry[] entries) {
+ this.fixedSourceDistance = fixedSourceDistance;
+ this.entries = entries;
+ }
+
+ public void onDoc(int docId, GeoPointValues values) {
+ final int length = values.setDocument(docId);
+ for (int i = 0; i < length; i++) {
+ final GeoPoint next = values.nextValue();
+ double distance = fixedSourceDistance.calculate(next.getLat(), next.getLon());
+ for (GeoDistanceFacet.Entry entry : entries) {
+ if (entry.foundInDoc) {
+ continue;
+ }
+ if (distance >= entry.getFrom() && distance < entry.getTo()) {
+ entry.foundInDoc = true;
+ collectGeoPoint(entry, docId, distance);
+ }
+ }
+ }
+ }
+
+ protected void collectGeoPoint(GeoDistanceFacet.Entry entry, int docId, double distance) {
+ entry.count++;
+ entry.totalCount++;
+ entry.total += distance;
+ if (distance < entry.min) {
+ entry.min = distance;
+ }
+ if (distance > entry.max) {
+ entry.max = distance;
+ }
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetParser.java b/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetParser.java
new file mode 100644
index 0000000..4cbcbbe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/geodistance/GeoDistanceFacetParser.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.geodistance;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.FacetParser;
+import org.elasticsearch.search.facet.FacetPhaseExecutionException;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class GeoDistanceFacetParser extends AbstractComponent implements FacetParser {
+
+ @Inject
+ public GeoDistanceFacetParser(Settings settings) {
+ super(settings);
+ InternalGeoDistanceFacet.registerStreams();
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{GeoDistanceFacet.TYPE, "geoDistance"};
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultMainMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultGlobalMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
+ String fieldName = null;
+ String valueFieldName = null;
+ String valueScript = null;
+ String scriptLang = null;
+ Map<String, Object> params = null;
+ GeoPoint point = new GeoPoint();
+ DistanceUnit unit = DistanceUnit.DEFAULT;
+ GeoDistance geoDistance = GeoDistance.DEFAULT;
+ List<GeoDistanceFacet.Entry> entries = Lists.newArrayList();
+
+ boolean normalizeLon = true;
+ boolean normalizeLat = true;
+
+ XContentParser.Token token;
+ String currentName = parser.currentName();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("ranges".equals(currentName) || "entries".equals(currentName)) {
+ // "ranges" : [
+ // { "from" : 0, "to" : 12.5 }
+ // { "from" : 12.5 }
+ // ]
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ double from = Double.NEGATIVE_INFINITY;
+ double to = Double.POSITIVE_INFINITY;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("from".equals(currentName)) {
+ from = parser.doubleValue();
+ } else if ("to".equals(currentName)) {
+ to = parser.doubleValue();
+ }
+ }
+ }
+ entries.add(new GeoDistanceFacet.Entry(from, to, 0, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY));
+ }
+ } else {
+ GeoPoint.parse(parser, point);
+ fieldName = currentName;
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentName)) {
+ params = parser.map();
+ } else {
+ // the json in the format of -> field : { lat : 30, lon : 12 }
+ fieldName = currentName;
+ GeoPoint.parse(parser, point);
+ }
+ } else if (token.isValue()) {
+ if (currentName.equals("unit")) {
+ unit = DistanceUnit.fromString(parser.text());
+ } else if (currentName.equals("distance_type") || currentName.equals("distanceType")) {
+ geoDistance = GeoDistance.fromString(parser.text());
+ } else if ("value_field".equals(currentName) || "valueField".equals(currentName)) {
+ valueFieldName = parser.text();
+ } else if ("value_script".equals(currentName) || "valueScript".equals(currentName)) {
+ valueScript = parser.text();
+ } else if ("lang".equals(currentName)) {
+ scriptLang = parser.text();
+ } else if ("normalize".equals(currentName)) {
+ normalizeLat = parser.booleanValue();
+ normalizeLon = parser.booleanValue();
+ } else {
+ // assume the value is the actual value
+ point.resetFromString(parser.text());
+
+ fieldName = currentName;
+ }
+ }
+ }
+
+ if (entries.isEmpty()) {
+ throw new FacetPhaseExecutionException(facetName, "no ranges defined for geo_distance facet");
+ }
+
+ if (normalizeLat || normalizeLon) {
+ GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
+ }
+
+ FieldMapper keyFieldMapper = context.smartNameFieldMapper(fieldName);
+ if (keyFieldMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "failed to find mapping for [" + fieldName + "]");
+ }
+ IndexGeoPointFieldData keyIndexFieldData = context.fieldData().getForField(keyFieldMapper);
+
+ if (valueFieldName != null) {
+ FieldMapper valueFieldMapper = context.smartNameFieldMapper(valueFieldName);
+ if (valueFieldMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "failed to find mapping for [" + valueFieldName + "]");
+ }
+ IndexNumericFieldData valueIndexFieldData = context.fieldData().getForField(valueFieldMapper);
+ return new ValueGeoDistanceFacetExecutor(keyIndexFieldData, point.lat(), point.lon(), unit, geoDistance, entries.toArray(new GeoDistanceFacet.Entry[entries.size()]),
+ context, valueIndexFieldData);
+ }
+
+ if (valueScript != null) {
+ return new ScriptGeoDistanceFacetExecutor(keyIndexFieldData, point.lat(), point.lon(), unit, geoDistance, entries.toArray(new GeoDistanceFacet.Entry[entries.size()]),
+ context, scriptLang, valueScript, params);
+ }
+
+ return new GeoDistanceFacetExecutor(keyIndexFieldData, point.lat(), point.lon(), unit, geoDistance, entries.toArray(new GeoDistanceFacet.Entry[entries.size()]),
+ context);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/geodistance/InternalGeoDistanceFacet.java b/src/main/java/org/elasticsearch/search/facet/geodistance/InternalGeoDistanceFacet.java
new file mode 100644
index 0000000..7ef8639
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/geodistance/InternalGeoDistanceFacet.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.geodistance;
+
+import com.google.common.collect.ImmutableList;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.InternalFacet;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalGeoDistanceFacet extends InternalFacet implements GeoDistanceFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("geoDistance"));
+
+ public static void registerStreams() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readGeoDistanceFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ Entry[] entries;
+
+ InternalGeoDistanceFacet() {
+ }
+
+ public InternalGeoDistanceFacet(String name, Entry[] entries) {
+ super(name);
+ this.entries = entries;
+ }
+
+ @Override
+ public String getType() {
+ return TYPE;
+ }
+
+ @Override
+ public List<Entry> getEntries() {
+ return ImmutableList.copyOf(entries);
+ }
+
+ @Override
+ public Iterator<Entry> iterator() {
+ return getEntries().iterator();
+ }
+
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ return facets.get(0);
+ }
+ InternalGeoDistanceFacet agg = (InternalGeoDistanceFacet) facets.get(0);
+ for (int i = 1; i < facets.size(); i++) {
+ InternalGeoDistanceFacet geoDistanceFacet = (InternalGeoDistanceFacet) facets.get(i);
+ for (int j = 0; j < geoDistanceFacet.entries.length; j++) {
+ GeoDistanceFacet.Entry aggEntry = agg.entries[j];
+ GeoDistanceFacet.Entry currentEntry = geoDistanceFacet.entries[j];
+ aggEntry.count += currentEntry.count;
+ aggEntry.totalCount += currentEntry.totalCount;
+ aggEntry.total += currentEntry.total;
+ if (currentEntry.min < aggEntry.min) {
+ aggEntry.min = currentEntry.min;
+ }
+ if (currentEntry.max > aggEntry.max) {
+ aggEntry.max = currentEntry.max;
+ }
+ }
+ }
+ return agg;
+ }
+
+ public static InternalGeoDistanceFacet readGeoDistanceFacet(StreamInput in) throws IOException {
+ InternalGeoDistanceFacet facet = new InternalGeoDistanceFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ entries = new Entry[in.readVInt()];
+ for (int i = 0; i < entries.length; i++) {
+ entries[i] = new Entry(in.readDouble(), in.readDouble(), in.readVLong(), in.readVLong(), in.readDouble(), in.readDouble(), in.readDouble());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(entries.length);
+ for (Entry entry : entries) {
+ out.writeDouble(entry.from);
+ out.writeDouble(entry.to);
+ out.writeVLong(entry.count);
+ out.writeVLong(entry.totalCount);
+ out.writeDouble(entry.total);
+ out.writeDouble(entry.min);
+ out.writeDouble(entry.max);
+ }
+ }
+
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString RANGES = new XContentBuilderString("ranges");
+ static final XContentBuilderString FROM = new XContentBuilderString("from");
+ static final XContentBuilderString TO = new XContentBuilderString("to");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString MEAN = new XContentBuilderString("mean");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, GeoDistanceFacet.TYPE);
+ builder.startArray(Fields.RANGES);
+ for (Entry entry : entries) {
+ builder.startObject();
+ if (!Double.isInfinite(entry.from)) {
+ builder.field(Fields.FROM, entry.from);
+ }
+ if (!Double.isInfinite(entry.to)) {
+ builder.field(Fields.TO, entry.to);
+ }
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.field(Fields.MIN, entry.getMin());
+ builder.field(Fields.MAX, entry.getMax());
+ builder.field(Fields.TOTAL_COUNT, entry.getTotalCount());
+ builder.field(Fields.TOTAL, entry.getTotal());
+ builder.field(Fields.MEAN, entry.getMean());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/geodistance/ScriptGeoDistanceFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/geodistance/ScriptGeoDistanceFacetExecutor.java
new file mode 100644
index 0000000..61f6183
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/geodistance/ScriptGeoDistanceFacetExecutor.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.geodistance;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class ScriptGeoDistanceFacetExecutor extends GeoDistanceFacetExecutor {
+
+ private final SearchScript script;
+
+ public ScriptGeoDistanceFacetExecutor(IndexGeoPointFieldData indexFieldData, double lat, double lon, DistanceUnit unit, GeoDistance geoDistance,
+ GeoDistanceFacet.Entry[] entries, SearchContext context,
+ String scriptLang, String script, Map<String, Object> params) {
+ super(indexFieldData, lat, lon, unit, geoDistance, entries, context);
+ this.script = context.scriptService().search(context.lookup(), scriptLang, script, params);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector(new ScriptAggregator(fixedSourceDistance, entries, script));
+ }
+
+ class Collector extends GeoDistanceFacetExecutor.Collector {
+
+
+ private ScriptAggregator scriptAggregator;
+
+ Collector(ScriptAggregator aggregator) {
+ super(aggregator);
+ this.scriptAggregator = aggregator;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ script.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ super.setNextReader(context);
+ script.setNextReader(context);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ script.setNextDocId(doc);
+ scriptAggregator.scriptValue = script.runAsDouble();
+ super.collect(doc);
+ }
+ }
+
+ public final static class ScriptAggregator extends GeoDistanceFacetExecutor.Aggregator{
+
+ private double scriptValue;
+
+ public ScriptAggregator(GeoDistance.FixedSourceDistance fixedSourceDistance, GeoDistanceFacet.Entry[] entries, SearchScript script) {
+ super(fixedSourceDistance, entries);
+ }
+
+ @Override
+ protected void collectGeoPoint(GeoDistanceFacet.Entry entry, int docId, double distance) {
+ final double scriptValue = this.scriptValue;
+ entry.count++;
+ entry.totalCount++;
+ entry.total += scriptValue;
+ if (scriptValue < entry.min) {
+ entry.min = scriptValue;
+ }
+ if (scriptValue > entry.max) {
+ entry.max = scriptValue;
+ }
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/geodistance/ValueGeoDistanceFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/geodistance/ValueGeoDistanceFacetExecutor.java
new file mode 100644
index 0000000..13e92c3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/geodistance/ValueGeoDistanceFacetExecutor.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.geodistance;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class ValueGeoDistanceFacetExecutor extends GeoDistanceFacetExecutor {
+
+ private final IndexNumericFieldData valueIndexFieldData;
+
+ public ValueGeoDistanceFacetExecutor(IndexGeoPointFieldData indexFieldData, double lat, double lon, DistanceUnit unit, GeoDistance geoDistance,
+ GeoDistanceFacet.Entry[] entries, SearchContext context, IndexNumericFieldData valueIndexFieldData) {
+ super(indexFieldData, lat, lon, unit, geoDistance, entries, context);
+ this.valueIndexFieldData = valueIndexFieldData;
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector(new Aggregator(fixedSourceDistance, entries));
+ }
+
+ class Collector extends GeoDistanceFacetExecutor.Collector {
+
+ Collector(Aggregator aggregator) {
+ super(aggregator);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ super.setNextReader(context);
+ ((Aggregator) this.aggregator).valueValues = valueIndexFieldData.load(context).getDoubleValues();
+ }
+ }
+
+ public static class Aggregator extends GeoDistanceFacetExecutor.Aggregator {
+
+ DoubleValues valueValues;
+
+ public Aggregator(GeoDistance.FixedSourceDistance fixedSourceDistance, GeoDistanceFacet.Entry[] entries) {
+ super(fixedSourceDistance, entries);
+ }
+
+
+ @Override
+ protected void collectGeoPoint(GeoDistanceFacet.Entry entry, int docId, double distance) {
+ entry.foundInDoc = true;
+ entry.count++;
+ int seek = valueValues.setDocument(docId);
+ for (int i = 0; i < seek; i++) {
+ double value = valueValues.nextValue();
+ entry.totalCount++;
+ entry.total += value;
+ if (value < entry.min) {
+ entry.min = value;
+ }
+ if (value > entry.max) {
+ entry.max = value;
+ }
+ }
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/CountHistogramFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/histogram/CountHistogramFacetExecutor.java
new file mode 100644
index 0000000..99793f0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/CountHistogramFacetExecutor.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.histogram;
+
+import com.carrotsearch.hppc.LongLongOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ * A histogram facet collector that uses the same field as the key as well as the
+ * value.
+ */
+public class CountHistogramFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData indexFieldData;
+ private final HistogramFacet.ComparatorType comparatorType;
+ final long interval;
+
+ final Recycler.V<LongLongOpenHashMap> counts;
+
+ public CountHistogramFacetExecutor(IndexNumericFieldData indexFieldData, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
+ this.comparatorType = comparatorType;
+ this.indexFieldData = indexFieldData;
+ this.interval = interval;
+
+ this.counts = context.cacheRecycler().longLongMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ InternalCountHistogramFacet.CountEntry[] entries = new InternalCountHistogramFacet.CountEntry[counts.v().size()];
+ final boolean[] states = counts.v().allocated;
+ final long[] keys = counts.v().keys;
+ final long[] values = counts.v().values;
+ int entryIndex = 0;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ entries[entryIndex++] = new InternalCountHistogramFacet.CountEntry(keys[i], values[i]);
+ }
+ }
+ counts.release();
+ return new InternalCountHistogramFacet(facetName, comparatorType, entries);
+ }
+
+ public static long bucket(double value, long interval) {
+ return (((long) (value / interval)) * interval);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final HistogramProc histoProc;
+ private DoubleValues values;
+
+ public Collector() {
+ histoProc = new HistogramProc(interval, counts.v());
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getDoubleValues();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ histoProc.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public final static class HistogramProc extends DoubleFacetAggregatorBase {
+
+ private final long interval;
+ private final LongLongOpenHashMap counts;
+
+ public HistogramProc(long interval, LongLongOpenHashMap counts) {
+ this.interval = interval;
+ this.counts = counts;
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ long bucket = bucket(value, interval);
+ counts.addTo(bucket, 1);
+ }
+
+ public LongLongOpenHashMap counts() {
+ return counts;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/FullHistogramFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/histogram/FullHistogramFacetExecutor.java
new file mode 100644
index 0000000..9fb0d9b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/FullHistogramFacetExecutor.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.histogram;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A histogram facet collector that uses the same field as the key as well as the
+ * value.
+ */
+public class FullHistogramFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData indexFieldData;
+ private final HistogramFacet.ComparatorType comparatorType;
+ final long interval;
+
+ final Recycler.V<LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry>> entries;
+
+ public FullHistogramFacetExecutor(IndexNumericFieldData indexFieldData, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
+ this.comparatorType = comparatorType;
+ this.indexFieldData = indexFieldData;
+ this.interval = interval;
+
+ this.entries = context.cacheRecycler().longObjectMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ List<InternalFullHistogramFacet.FullEntry> fullEntries = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().size());
+ boolean[] states = entries.v().allocated;
+ Object[] values = entries.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ fullEntries.add((InternalFullHistogramFacet.FullEntry) values[i]);
+ }
+ }
+ entries.release();
+ return new InternalFullHistogramFacet(facetName, comparatorType, fullEntries);
+ }
+
+ public static long bucket(double value, long interval) {
+ return (((long) (value / interval)) * interval);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final HistogramProc histoProc;
+ private DoubleValues values;
+
+ Collector() {
+ this.histoProc = new HistogramProc(interval, entries.v());
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getDoubleValues();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ histoProc.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public final static class HistogramProc extends DoubleFacetAggregatorBase {
+
+ final long interval;
+ final LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries;
+
+ public HistogramProc(long interval, LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries) {
+ this.interval = interval;
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ long bucket = bucket(value, interval);
+ InternalFullHistogramFacet.FullEntry entry = entries.get(bucket);
+ if (entry == null) {
+ entry = new InternalFullHistogramFacet.FullEntry(bucket, 1, value, value, 1, value);
+ entries.put(bucket, entry);
+ } else {
+ entry.count++;
+ entry.totalCount++;
+ entry.total += value;
+ if (value < entry.min) {
+ entry.min = value;
+ }
+ if (value > entry.max) {
+ entry.max = value;
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacet.java b/src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacet.java
new file mode 100644
index 0000000..6ec1d2d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacet.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.histogram;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.search.facet.Facet;
+
+import java.util.Comparator;
+import java.util.List;
+
+/**
+ * Numeric histogram facet.
+ */
+public interface HistogramFacet extends Facet, Iterable<HistogramFacet.Entry> {
+
+ /**
+ * The type of the filter facet.
+ */
+ public static final String TYPE = "histogram";
+
+ /**
+ * An ordered list of histogram facet entries.
+ */
+ List<? extends Entry> getEntries();
+
+ public static enum ComparatorType {
+ KEY((byte) 0, "key", new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return (o1.getKey() < o2.getKey() ? -1 : (o1.getKey() == o2.getKey() ? 0 : 1));
+ }
+ }),
+ COUNT((byte) 1, "count", new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return (o1.getCount() < o2.getCount() ? -1 : (o1.getCount() == o2.getCount() ? 0 : 1));
+ }
+ }),
+ TOTAL((byte) 2, "total", new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return (o1.getTotal() < o2.getTotal() ? -1 : (o1.getTotal() == o2.getTotal() ? 0 : 1));
+ }
+ });
+
+ private final byte id;
+
+ private final String description;
+
+ private final Comparator<Entry> comparator;
+
+ ComparatorType(byte id, String description, Comparator<Entry> comparator) {
+ this.id = id;
+ this.description = description;
+ this.comparator = comparator;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ public String description() {
+ return this.description;
+ }
+
+ public Comparator<Entry> comparator() {
+ return comparator;
+ }
+
+ public static ComparatorType fromId(byte id) {
+ if (id == 0) {
+ return KEY;
+ } else if (id == 1) {
+ return COUNT;
+ } else if (id == 2) {
+ return TOTAL;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type argument match for histogram comparator [" + id + "]");
+ }
+
+ public static ComparatorType fromString(String type) {
+ if ("key".equals(type)) {
+ return KEY;
+ } else if ("count".equals(type)) {
+ return COUNT;
+ } else if ("total".equals(type)) {
+ return TOTAL;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type argument match for histogram comparator [" + type + "]");
+ }
+ }
+
+ public interface Entry {
+
+ /**
+ * The key value of the histogram.
+ */
+ long getKey();
+
+ /**
+ * The number of hits that fall within that key "range" or "interval".
+ */
+ long getCount();
+
+ /**
+ * The total count of values aggregated to compute the total.
+ */
+ long getTotalCount();
+
+ /**
+ * The sum / total of the value field that fall within this key "interval".
+ */
+ double getTotal();
+
+ /**
+ * The mean of this facet interval.
+ */
+ double getMean();
+
+ /**
+ * The minimum value.
+ */
+ double getMin();
+
+ /**
+ * The maximum value.
+ */
+ double getMax();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacetBuilder.java
new file mode 100644
index 0000000..2bd1ab1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacetBuilder.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.histogram;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A facet builder of histogram facets.
+ */
+public class HistogramFacetBuilder extends FacetBuilder {
+ private String keyFieldName;
+ private String valueFieldName;
+ private long interval = -1;
+ private HistogramFacet.ComparatorType comparatorType;
+
+ /**
+ * Constructs a new histogram facet with the provided facet logical name.
+ *
+ * @param name The logical name of the facet
+ */
+ public HistogramFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * The field name to perform the histogram facet. Translates to perform the histogram facet
+ * using the provided field as both the {@link #keyField(String)} and {@link #valueField(String)}.
+ */
+ public HistogramFacetBuilder field(String field) {
+ this.keyFieldName = field;
+ return this;
+ }
+
+ /**
+ * The field name to use in order to control where the hit will "fall into" within the histogram
+ * entries. Essentially, using the key field numeric value, the hit will be "rounded" into the relevant
+ * bucket controlled by the interval.
+ */
+ public HistogramFacetBuilder keyField(String keyField) {
+ this.keyFieldName = keyField;
+ return this;
+ }
+
+ /**
+ * The field name to use as the value of the hit to compute data based on values within the interval
+ * (for example, total).
+ */
+ public HistogramFacetBuilder valueField(String valueField) {
+ this.valueFieldName = valueField;
+ return this;
+ }
+
+ /**
+ * The interval used to control the bucket "size" where each key value of a hit will fall into.
+ */
+ public HistogramFacetBuilder interval(long interval) {
+ this.interval = interval;
+ return this;
+ }
+
+ /**
+ * The interval used to control the bucket "size" where each key value of a hit will fall into.
+ */
+ public HistogramFacetBuilder interval(long interval, TimeUnit unit) {
+ return interval(unit.toMillis(interval));
+ }
+
+ public HistogramFacetBuilder comparator(HistogramFacet.ComparatorType comparatorType) {
+ this.comparatorType = comparatorType;
+ return this;
+ }
+
+ /**
+ * Should the facet run in global mode (not bounded by the search query) or not (bounded by
+ * the search query). Defaults to <tt>false</tt>.
+ */
+ public HistogramFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ /**
+ * An additional filter used to further filter down the set of documents the facet will run on.
+ */
+ public HistogramFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public HistogramFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (keyFieldName == null) {
+ throw new SearchSourceBuilderException("field must be set on histogram facet for facet [" + name + "]");
+ }
+ if (interval < 0) {
+ throw new SearchSourceBuilderException("interval must be set on histogram facet for facet [" + name + "]");
+ }
+ builder.startObject(name);
+
+ builder.startObject(HistogramFacet.TYPE);
+ if (valueFieldName != null) {
+ builder.field("key_field", keyFieldName);
+ builder.field("value_field", valueFieldName);
+ } else {
+ builder.field("field", keyFieldName);
+ }
+ builder.field("interval", interval);
+
+ if (comparatorType != null) {
+ builder.field("comparator", comparatorType.description());
+ }
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacetParser.java b/src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacetParser.java
new file mode 100644
index 0000000..58cb447
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/HistogramFacetParser.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.histogram;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.FacetParser;
+import org.elasticsearch.search.facet.FacetPhaseExecutionException;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class HistogramFacetParser extends AbstractComponent implements FacetParser {
+
+ @Inject
+ public HistogramFacetParser(Settings settings) {
+ super(settings);
+ InternalHistogramFacet.registerStreams();
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{HistogramFacet.TYPE};
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultMainMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultGlobalMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
+ String keyField = null;
+ String valueField = null;
+ String keyScript = null;
+ String valueScript = null;
+ String scriptLang = null;
+ Map<String, Object> params = null;
+ long interval = 0;
+ HistogramFacet.ComparatorType comparatorType = HistogramFacet.ComparatorType.KEY;
+ XContentParser.Token token;
+ String fieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(fieldName)) {
+ params = parser.map();
+ }
+ } else if (token.isValue()) {
+ if ("field".equals(fieldName)) {
+ keyField = parser.text();
+ } else if ("key_field".equals(fieldName) || "keyField".equals(fieldName)) {
+ keyField = parser.text();
+ } else if ("value_field".equals(fieldName) || "valueField".equals(fieldName)) {
+ valueField = parser.text();
+ } else if ("interval".equals(fieldName)) {
+ interval = parser.longValue();
+ } else if ("time_interval".equals(fieldName) || "timeInterval".equals(fieldName)) {
+ interval = TimeValue.parseTimeValue(parser.text(), null).millis();
+ } else if ("key_script".equals(fieldName) || "keyScript".equals(fieldName)) {
+ keyScript = parser.text();
+ } else if ("value_script".equals(fieldName) || "valueScript".equals(fieldName)) {
+ valueScript = parser.text();
+ } else if ("order".equals(fieldName) || "comparator".equals(fieldName)) {
+ comparatorType = HistogramFacet.ComparatorType.fromString(parser.text());
+ } else if ("lang".equals(fieldName)) {
+ scriptLang = parser.text();
+ }
+ }
+ }
+
+ if (keyScript != null && valueScript != null) {
+ return new ScriptHistogramFacetExecutor(scriptLang, keyScript, valueScript, params, interval, comparatorType, context);
+ }
+
+ if (keyField == null) {
+ throw new FacetPhaseExecutionException(facetName, "key field is required to be set for histogram facet, either using [field] or using [key_field]");
+ }
+
+ if (interval <= 0) {
+ throw new FacetPhaseExecutionException(facetName, "[interval] is required to be set for histogram facet");
+ }
+
+ FieldMapper keyMapper = context.smartNameFieldMapper(keyField);
+ if (keyMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "No mapping found for key_field [" + keyField + "]");
+ }
+ IndexNumericFieldData keyIndexFieldData = context.fieldData().getForField(keyMapper);
+
+ IndexNumericFieldData valueIndexFieldData = null;
+ if (valueField != null) {
+ FieldMapper valueMapper = context.smartNameFieldMapper(valueField);
+ if (valueMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "No mapping found for value_field [" + valueField + "]");
+ }
+ valueIndexFieldData = context.fieldData().getForField(valueMapper);
+ }
+
+ if (valueScript != null) {
+ return new ValueScriptHistogramFacetExecutor(keyIndexFieldData, scriptLang, valueScript, params, interval, comparatorType, context);
+ } else if (valueField == null) {
+ return new CountHistogramFacetExecutor(keyIndexFieldData, interval, comparatorType, context);
+ } else if (keyField.equals(valueField)) {
+ return new FullHistogramFacetExecutor(keyIndexFieldData, interval, comparatorType, context);
+ } else {
+ // we have a value field, and its different than the key
+ return new ValueHistogramFacetExecutor(keyIndexFieldData, valueIndexFieldData, interval, comparatorType, context);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/HistogramScriptFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/histogram/HistogramScriptFacetBuilder.java
new file mode 100644
index 0000000..699d06d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/HistogramScriptFacetBuilder.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.histogram;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class HistogramScriptFacetBuilder extends FacetBuilder {
+ private String lang;
+ private String keyFieldName;
+ private String keyScript;
+ private String valueScript;
+ private Map<String, Object> params;
+ private long interval = -1;
+ private HistogramFacet.ComparatorType comparatorType;
+
+ public HistogramScriptFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * The language of the script.
+ */
+ public HistogramScriptFacetBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ public HistogramScriptFacetBuilder keyField(String keyFieldName) {
+ this.keyFieldName = keyFieldName;
+ return this;
+ }
+
+ public HistogramScriptFacetBuilder keyScript(String keyScript) {
+ this.keyScript = keyScript;
+ return this;
+ }
+
+ public HistogramScriptFacetBuilder valueScript(String valueScript) {
+ this.valueScript = valueScript;
+ return this;
+ }
+
+ public HistogramScriptFacetBuilder interval(long interval) {
+ this.interval = interval;
+ return this;
+ }
+
+ public HistogramScriptFacetBuilder param(String name, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(name, value);
+ return this;
+ }
+
+ public HistogramScriptFacetBuilder comparator(HistogramFacet.ComparatorType comparatorType) {
+ this.comparatorType = comparatorType;
+ return this;
+ }
+
+ /**
+ * Marks the facet to run in a global scope, not bounded by any query.
+ */
+ @Override
+ public HistogramScriptFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ public HistogramScriptFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public HistogramScriptFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (keyScript == null && keyFieldName == null) {
+ throw new SearchSourceBuilderException("key_script or key_field must be set on histogram script facet for facet [" + name + "]");
+ }
+ if (valueScript == null) {
+ throw new SearchSourceBuilderException("value_script must be set on histogram script facet for facet [" + name + "]");
+ }
+ builder.startObject(name);
+
+ builder.startObject(HistogramFacet.TYPE);
+ if (keyFieldName != null) {
+ builder.field("key_field", keyFieldName);
+ } else if (keyScript != null) {
+ builder.field("key_script", keyScript);
+ }
+ builder.field("value_script", valueScript);
+
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (interval > 0) { // interval is optional in script facet, can be defined by the key script
+ builder.field("interval", interval);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ if (comparatorType != null) {
+ builder.field("comparator", comparatorType.description());
+ }
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/InternalCountHistogramFacet.java b/src/main/java/org/elasticsearch/search/facet/histogram/InternalCountHistogramFacet.java
new file mode 100644
index 0000000..2f934c9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/InternalCountHistogramFacet.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.histogram;
+
+import com.carrotsearch.hppc.LongLongOpenHashMap;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalCountHistogramFacet extends InternalHistogramFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("cHistogram"));
+
+ public static void registerStreams() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readHistogramFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+
+ /**
+ * A histogram entry representing a single entry within the result of a histogram facet.
+ */
+ public static class CountEntry implements Entry {
+ private final long key;
+ private final long count;
+
+ public CountEntry(long key, long count) {
+ this.key = key;
+ this.count = count;
+ }
+
+ @Override
+ public long getKey() {
+ return key;
+ }
+
+ @Override
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public double getTotal() {
+ return Double.NaN;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return 0;
+ }
+
+ @Override
+ public double getMean() {
+ return Double.NaN;
+ }
+
+ @Override
+ public double getMin() {
+ return Double.NaN;
+ }
+
+ @Override
+ public double getMax() {
+ return Double.NaN;
+ }
+ }
+
+ ComparatorType comparatorType;
+ CountEntry[] entries = null;
+
+ InternalCountHistogramFacet() {
+ }
+
+ public InternalCountHistogramFacet(String name, ComparatorType comparatorType, CountEntry[] entries) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.entries = entries;
+ }
+
+ @Override
+ public List<CountEntry> getEntries() {
+ return Arrays.asList(entries);
+ }
+
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) getEntries().iterator();
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ // need to sort here...
+ InternalCountHistogramFacet histoFacet = (InternalCountHistogramFacet) facets.get(0);
+ Arrays.sort(histoFacet.entries, histoFacet.comparatorType.comparator());
+ return facets.get(0);
+ }
+
+ Recycler.V<LongLongOpenHashMap> counts = context.cacheRecycler().longLongMap(-1);
+ for (Facet facet : facets) {
+ InternalCountHistogramFacet histoFacet = (InternalCountHistogramFacet) facet;
+ for (Entry entry : histoFacet.entries) {
+ counts.v().addTo(entry.getKey(), entry.getCount());
+ }
+ }
+ final boolean[] states = counts.v().allocated;
+ final long[] keys = counts.v().keys;
+ final long[] values = counts.v().values;
+ CountEntry[] entries = new CountEntry[counts.v().size()];
+ int entryIndex = 0;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ entries[entryIndex++] = new CountEntry(keys[i], values[i]);
+ }
+ }
+ counts.release();
+
+ Arrays.sort(entries, comparatorType.comparator());
+
+ return new InternalCountHistogramFacet(getName(), comparatorType, entries);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString ENTRIES = new XContentBuilderString("entries");
+ static final XContentBuilderString KEY = new XContentBuilderString("key");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, HistogramFacet.TYPE);
+ builder.startArray(Fields.ENTRIES);
+ for (Entry entry : entries) {
+ builder.startObject();
+ builder.field(Fields.KEY, entry.getKey());
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalCountHistogramFacet readHistogramFacet(StreamInput in) throws IOException {
+ InternalCountHistogramFacet facet = new InternalCountHistogramFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+ int size = in.readVInt();
+ entries = new CountEntry[size];
+ for (int i = 0; i < size; i++) {
+ entries[i] = new CountEntry(in.readLong(), in.readVLong());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(entries.length);
+ for (CountEntry entry : entries) {
+ out.writeLong(entry.getKey());
+ out.writeVLong(entry.getCount());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/InternalFullHistogramFacet.java b/src/main/java/org/elasticsearch/search/facet/histogram/InternalFullHistogramFacet.java
new file mode 100644
index 0000000..abefc6d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/InternalFullHistogramFacet.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.histogram;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ *
+ */
+public class InternalFullHistogramFacet extends InternalHistogramFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("fHistogram"));
+
+ public static void registerStreams() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readHistogramFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+
+ /**
+ * A histogram entry representing a single entry within the result of a histogram facet.
+ */
+ public static class FullEntry implements Entry {
+ long key;
+ long count;
+ long totalCount;
+ double total;
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+
+ public FullEntry(long key, long count, double min, double max, long totalCount, double total) {
+ this.key = key;
+ this.count = count;
+ this.min = min;
+ this.max = max;
+ this.totalCount = totalCount;
+ this.total = total;
+ }
+
+ @Override
+ public long getKey() {
+ return key;
+ }
+
+ @Override
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public double getTotal() {
+ return total;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return this.totalCount;
+ }
+
+ @Override
+ public double getMean() {
+ return total / totalCount;
+ }
+
+ @Override
+ public double getMin() {
+ return this.min;
+ }
+
+ @Override
+ public double getMax() {
+ return this.max;
+ }
+ }
+
+ private ComparatorType comparatorType;
+ List<FullEntry> entries;
+
+ InternalFullHistogramFacet() {
+ }
+
+ InternalFullHistogramFacet(String name) {
+ super(name);
+ }
+
+ public InternalFullHistogramFacet(String name, ComparatorType comparatorType, List<FullEntry> entries) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.entries = entries;
+ }
+
+ @Override
+ public List<FullEntry> getEntries() {
+ return entries;
+ }
+
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) getEntries().iterator();
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ // we need to sort it
+ InternalFullHistogramFacet internalFacet = (InternalFullHistogramFacet) facets.get(0);
+ List<FullEntry> entries = internalFacet.getEntries();
+ CollectionUtil.timSort(entries, comparatorType.comparator());
+ return internalFacet;
+ }
+
+ Recycler.V<LongObjectOpenHashMap<FullEntry>> map = context.cacheRecycler().longObjectMap(-1);
+
+ for (Facet facet : facets) {
+ InternalFullHistogramFacet histoFacet = (InternalFullHistogramFacet) facet;
+ for (FullEntry fullEntry : histoFacet.entries) {
+ FullEntry current = map.v().get(fullEntry.key);
+ if (current != null) {
+ current.count += fullEntry.count;
+ current.total += fullEntry.total;
+ current.totalCount += fullEntry.totalCount;
+ if (fullEntry.min < current.min) {
+ current.min = fullEntry.min;
+ }
+ if (fullEntry.max > current.max) {
+ current.max = fullEntry.max;
+ }
+ } else {
+ map.v().put(fullEntry.key, fullEntry);
+ }
+ }
+ }
+
+ // sort
+ // TODO: hppc - toArray?
+ Object[] values = map.v().values().toArray();
+ Arrays.sort(values, (Comparator) comparatorType.comparator());
+ List<FullEntry> ordered = new ArrayList<FullEntry>(map.v().size());
+ for (int i = 0; i < map.v().size(); i++) {
+ FullEntry value = (FullEntry) values[i];
+ if (value == null) {
+ break;
+ }
+ ordered.add(value);
+ }
+
+ map.release();
+
+ // just initialize it as already ordered facet
+ InternalFullHistogramFacet ret = new InternalFullHistogramFacet(getName());
+ ret.comparatorType = comparatorType;
+ ret.entries = ordered;
+ return ret;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString ENTRIES = new XContentBuilderString("entries");
+ static final XContentBuilderString KEY = new XContentBuilderString("key");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
+ static final XContentBuilderString MEAN = new XContentBuilderString("mean");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, HistogramFacet.TYPE);
+ builder.startArray(Fields.ENTRIES);
+ for (Entry entry : entries) {
+ builder.startObject();
+ builder.field(Fields.KEY, entry.getKey());
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.field(Fields.MIN, entry.getMin());
+ builder.field(Fields.MAX, entry.getMax());
+ builder.field(Fields.TOTAL, entry.getTotal());
+ builder.field(Fields.TOTAL_COUNT, entry.getTotalCount());
+ builder.field(Fields.MEAN, entry.getMean());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalFullHistogramFacet readHistogramFacet(StreamInput in) throws IOException {
+ InternalFullHistogramFacet facet = new InternalFullHistogramFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+ int size = in.readVInt();
+ entries = new ArrayList<FullEntry>(size);
+ for (int i = 0; i < size; i++) {
+ entries.add(new FullEntry(in.readLong(), in.readVLong(), in.readDouble(), in.readDouble(), in.readVLong(), in.readDouble()));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(entries.size());
+ for (FullEntry entry : entries) {
+ out.writeLong(entry.key);
+ out.writeVLong(entry.count);
+ out.writeDouble(entry.min);
+ out.writeDouble(entry.max);
+ out.writeVLong(entry.totalCount);
+ out.writeDouble(entry.total);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/InternalHistogramFacet.java b/src/main/java/org/elasticsearch/search/facet/histogram/InternalHistogramFacet.java
new file mode 100644
index 0000000..3b56430
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/InternalHistogramFacet.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.histogram;
+
+import org.elasticsearch.search.facet.InternalFacet;
+
+/**
+ *
+ */
+public abstract class InternalHistogramFacet extends InternalFacet implements HistogramFacet {
+
+ public static void registerStreams() {
+ InternalFullHistogramFacet.registerStreams();
+ InternalCountHistogramFacet.registerStreams();
+ }
+
+ protected InternalHistogramFacet() {
+ }
+
+ protected InternalHistogramFacet(String facetName) {
+ super(facetName);
+ }
+
+ @Override
+ public final String getType() {
+ return TYPE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/ScriptHistogramFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/histogram/ScriptHistogramFacetExecutor.java
new file mode 100644
index 0000000..64e954d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/ScriptHistogramFacetExecutor.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.histogram;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class ScriptHistogramFacetExecutor extends FacetExecutor {
+
+ final SearchScript keyScript;
+ final SearchScript valueScript;
+ final long interval;
+ private final HistogramFacet.ComparatorType comparatorType;
+
+ final Recycler.V<LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry>> entries;
+
+ public ScriptHistogramFacetExecutor(String scriptLang, String keyScript, String valueScript, Map<String, Object> params, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
+ this.keyScript = context.scriptService().search(context.lookup(), scriptLang, keyScript, params);
+ this.valueScript = context.scriptService().search(context.lookup(), scriptLang, valueScript, params);
+ this.interval = interval > 0 ? interval : 0;
+ this.comparatorType = comparatorType;
+
+ this.entries = context.cacheRecycler().longObjectMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector(entries.v());
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().size());
+ final boolean[] states = entries.v().allocated;
+ final Object[] values = entries.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ InternalFullHistogramFacet.FullEntry value = (InternalFullHistogramFacet.FullEntry) values[i];
+ entries1.add(value);
+ }
+ }
+
+ entries.release();
+ return new InternalFullHistogramFacet(facetName, comparatorType, entries1);
+ }
+
+ public static long bucket(double value, long interval) {
+ return (((long) (value / interval)) * interval);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ final LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries;
+
+ Collector(LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries) {
+ this.entries = entries;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ keyScript.setScorer(scorer);
+ valueScript.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ keyScript.setNextReader(context);
+ valueScript.setNextReader(context);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ keyScript.setNextDocId(doc);
+ valueScript.setNextDocId(doc);
+ long bucket;
+ if (interval == 0) {
+ bucket = keyScript.runAsLong();
+ } else {
+ bucket = bucket(keyScript.runAsDouble(), interval);
+ }
+ double value = valueScript.runAsDouble();
+
+ InternalFullHistogramFacet.FullEntry entry = entries.get(bucket);
+ if (entry == null) {
+ entry = new InternalFullHistogramFacet.FullEntry(bucket, 1, value, value, 1, value);
+ entries.put(bucket, entry);
+ } else {
+ entry.count++;
+ entry.totalCount++;
+ entry.total += value;
+ if (value < entry.min) {
+ entry.min = value;
+ }
+ if (value > entry.max) {
+ entry.max = value;
+ }
+ }
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/ValueHistogramFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/histogram/ValueHistogramFacetExecutor.java
new file mode 100644
index 0000000..750d32a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/ValueHistogramFacetExecutor.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.histogram;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A histogram facet collector that uses different fields for the key and the value.
+ */
+public class ValueHistogramFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData keyIndexFieldData;
+ private final IndexNumericFieldData valueIndexFieldData;
+ private final HistogramFacet.ComparatorType comparatorType;
+ private final long interval;
+
+ final Recycler.V<LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry>> entries;
+
+ public ValueHistogramFacetExecutor(IndexNumericFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
+ this.comparatorType = comparatorType;
+ this.keyIndexFieldData = keyIndexFieldData;
+ this.valueIndexFieldData = valueIndexFieldData;
+ this.interval = interval;
+ this.entries = context.cacheRecycler().longObjectMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().size());
+ final boolean [] states = entries.v().allocated;
+ final Object[] values = entries.v().values;
+
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ InternalFullHistogramFacet.FullEntry value = (InternalFullHistogramFacet.FullEntry) values[i];
+ entries1.add(value);
+ }
+ }
+ entries.release();
+ return new InternalFullHistogramFacet(facetName, comparatorType, entries1);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final HistogramProc histoProc;
+ private DoubleValues keyValues;
+
+ public Collector() {
+ this.histoProc = new HistogramProc(interval, entries.v());
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ keyValues = keyIndexFieldData.load(context).getDoubleValues();
+ histoProc.valueValues = valueIndexFieldData.load(context).getDoubleValues();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ histoProc.onDoc(doc, keyValues);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public final static class HistogramProc extends DoubleFacetAggregatorBase {
+
+ final long interval;
+ final LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries;
+
+ DoubleValues valueValues;
+
+ final ValueAggregator valueAggregator = new ValueAggregator();
+
+ public HistogramProc(long interval, LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries) {
+ this.interval = interval;
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ long bucket = FullHistogramFacetExecutor.bucket(value, interval);
+ InternalFullHistogramFacet.FullEntry entry = entries.get(bucket);
+ if (entry == null) {
+ entry = new InternalFullHistogramFacet.FullEntry(bucket, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0);
+ entries.put(bucket, entry);
+ }
+ entry.count++;
+ valueAggregator.entry = entry;
+ valueAggregator.onDoc(docId, valueValues);
+ }
+
+ public final static class ValueAggregator extends DoubleFacetAggregatorBase {
+
+ InternalFullHistogramFacet.FullEntry entry;
+
+ @Override
+ public void onValue(int docId, double value) {
+ entry.totalCount++;
+ entry.total += value;
+ if (value < entry.min) {
+ entry.min = value;
+ }
+ if (value > entry.max) {
+ entry.max = value;
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/histogram/ValueScriptHistogramFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/histogram/ValueScriptHistogramFacetExecutor.java
new file mode 100644
index 0000000..e4e5323
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/histogram/ValueScriptHistogramFacetExecutor.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.histogram;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A histogram facet collector that uses the same field as the key as well as the
+ * value.
+ */
+public class ValueScriptHistogramFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData indexFieldData;
+ private final HistogramFacet.ComparatorType comparatorType;
+ final SearchScript valueScript;
+ final long interval;
+
+ final Recycler.V<LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry>> entries;
+
+ public ValueScriptHistogramFacetExecutor(IndexNumericFieldData indexFieldData, String scriptLang, String valueScript, Map<String, Object> params, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
+ this.comparatorType = comparatorType;
+ this.indexFieldData = indexFieldData;
+ this.interval = interval;
+ this.valueScript = context.scriptService().search(context.lookup(), scriptLang, valueScript, params);
+
+ this.entries = context.cacheRecycler().longObjectMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().size());
+ final boolean[] states = entries.v().allocated;
+ final Object[] values = entries.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ InternalFullHistogramFacet.FullEntry value = (InternalFullHistogramFacet.FullEntry) values[i];
+ entries1.add(value);
+ }
+ }
+
+ entries.release();
+ return new InternalFullHistogramFacet(facetName, comparatorType, entries1);
+ }
+
+ public static long bucket(double value, long interval) {
+ return (((long) (value / interval)) * interval);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private DoubleValues values;
+ private final HistogramProc histoProc;
+
+ public Collector() {
+ histoProc = new HistogramProc(interval, valueScript, entries.v());
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ valueScript.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getDoubleValues();
+ valueScript.setNextReader(context);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ histoProc.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public static class HistogramProc extends DoubleFacetAggregatorBase {
+
+ private final long interval;
+
+ private final SearchScript valueScript;
+
+ final LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries;
+
+ public HistogramProc(long interval, SearchScript valueScript, LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries) {
+ this.interval = interval;
+ this.valueScript = valueScript;
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ valueScript.setNextDocId(docId);
+ long bucket = bucket(value, interval);
+ double scriptValue = valueScript.runAsDouble();
+
+ InternalFullHistogramFacet.FullEntry entry = entries.get(bucket);
+ if (entry == null) {
+ entry = new InternalFullHistogramFacet.FullEntry(bucket, 1, scriptValue, scriptValue, 1, scriptValue);
+ entries.put(bucket, entry);
+ } else {
+ entry.count++;
+ entry.totalCount++;
+ entry.total += scriptValue;
+ if (scriptValue < entry.min) {
+ entry.min = scriptValue;
+ }
+ if (scriptValue > entry.max) {
+ entry.max = scriptValue;
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/nested/NestedFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/nested/NestedFacetExecutor.java
new file mode 100644
index 0000000..76350cc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/nested/NestedFacetExecutor.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.nested;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.search.FilteredCollector;
+import org.elasticsearch.common.lucene.search.XCollector;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ */
+public class NestedFacetExecutor extends FacetExecutor {
+
+ private final FacetExecutor facetExecutor;
+ private final Filter parentFilter;
+ private final Filter childFilter;
+
+ public NestedFacetExecutor(FacetExecutor facetExecutor, SearchContext context, String nestedPath) {
+ this.facetExecutor = facetExecutor;
+ MapperService.SmartNameObjectMapper mapper = context.smartNameObjectMapper(nestedPath);
+ if (mapper == null) {
+ throw new SearchParseException(context, "facet nested path [" + nestedPath + "] not found");
+ }
+ ObjectMapper objectMapper = mapper.mapper();
+ if (objectMapper == null) {
+ throw new SearchParseException(context, "facet nested path [" + nestedPath + "] not found");
+ }
+ if (!objectMapper.nested().isNested()) {
+ throw new SearchParseException(context, "facet nested path [" + nestedPath + "] is not nested");
+ }
+ parentFilter = context.filterCache().cache(NonNestedDocsFilter.INSTANCE);
+ childFilter = context.filterCache().cache(objectMapper.nestedTypeFilter());
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return facetExecutor.buildFacet(facetName);
+ }
+
+ @Override
+ public Collector collector() {
+ XCollector collector = facetExecutor.collector();
+ if (collector == null) {
+ return null;
+ }
+ return new Collector(collector, parentFilter, childFilter);
+ }
+
+ @Override
+ public Post post() {
+ FacetExecutor.Post post = facetExecutor.post();
+ if (post == null) {
+ return null;
+ }
+ return new Post(post, parentFilter, childFilter);
+ }
+
+ public static class Post extends FacetExecutor.Post {
+
+ private final FacetExecutor.Post post;
+ private final Filter parentFilter;
+ private final Filter childFilter;
+
+ public Post(FacetExecutor.Post post, Filter parentFilter, Filter childFilter) {
+ this.post = post;
+ this.parentFilter = parentFilter;
+ this.childFilter = childFilter;
+ }
+
+ public Post(Post post, Filter filter) {
+ this.post = new FacetExecutor.Post.Filtered(post.post, filter);
+ this.parentFilter = post.parentFilter;
+ this.childFilter = post.childFilter;
+ }
+
+ @Override
+ public void executePost(List<ContextDocIdSet> docSets) throws IOException {
+ List<ContextDocIdSet> nestedEntries = new ArrayList<ContextDocIdSet>(docSets.size());
+ for (int i = 0; i < docSets.size(); i++) {
+ ContextDocIdSet entry = docSets.get(i);
+ AtomicReaderContext context = entry.context;
+ // Can use null as acceptedDocs here, since only live doc ids are being pushed to collect method.
+ DocIdSet docIdSet = parentFilter.getDocIdSet(context, null);
+ if (DocIdSets.isEmpty(docIdSet)) {
+ continue;
+ }
+ // Im ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here.
+ Bits childDocs = DocIdSets.toSafeBits(context.reader(), childFilter.getDocIdSet(context, null));
+ FixedBitSet parentDocs = (FixedBitSet) docIdSet;
+
+ DocIdSetIterator iter = entry.docSet.iterator();
+ int parentDoc = iter.nextDoc();
+ if (parentDoc == DocIdSetIterator.NO_MORE_DOCS) {
+ continue;
+ }
+ if (parentDoc == 0) {
+ parentDoc = iter.nextDoc();
+ }
+ if (parentDoc == DocIdSetIterator.NO_MORE_DOCS) {
+ continue;
+ }
+ FixedBitSet childSet = new FixedBitSet(context.reader().maxDoc());
+ do {
+ int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
+ for (int childDocId = (parentDoc - 1); childDocId > prevParentDoc; childDocId--) {
+ if (childDocs.get(childDocId)) {
+ childSet.set(childDocId);
+ }
+ }
+ } while ((parentDoc = iter.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS);
+ nestedEntries.add(new ContextDocIdSet(entry.context, childSet));
+ }
+ post.executePost(nestedEntries);
+ }
+ }
+
+
+ public static class Collector extends FacetExecutor.Collector {
+
+ private final org.apache.lucene.search.Collector collector;
+ private final Filter parentFilter;
+ private final Filter childFilter;
+ private Bits childDocs;
+ private FixedBitSet parentDocs;
+
+ // We can move
+ public Collector(Collector collector, Filter filter) {
+ this.collector = new FilteredCollector(collector.collector, filter);
+ this.parentFilter = collector.parentFilter;
+ this.childFilter = collector.childFilter;
+ }
+
+ public Collector(org.apache.lucene.search.Collector collector, Filter parentFilter, Filter childFilter) {
+ this.collector = collector;
+ this.parentFilter = parentFilter;
+ this.childFilter = childFilter;
+ }
+
+ @Override
+ public void postCollection() {
+ if (collector instanceof XCollector) {
+ ((XCollector) collector).postCollection();
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ collector.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ collector.setNextReader(context);
+ // Can use null as acceptedDocs here, since only live doc ids are being pushed to collect method.
+ DocIdSet docIdSet = parentFilter.getDocIdSet(context, null);
+ // Im ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here.
+ childDocs = DocIdSets.toSafeBits(context.reader(), childFilter.getDocIdSet(context, null));
+ if (DocIdSets.isEmpty(docIdSet)) {
+ parentDocs = null;
+ } else {
+ parentDocs = (FixedBitSet) docIdSet;
+ }
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return collector.acceptsDocsOutOfOrder();
+ }
+
+ @Override
+ public void collect(int parentDoc) throws IOException {
+ if (parentDoc == 0 || parentDocs == null) {
+ return;
+ }
+ int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
+ for (int i = (parentDoc - 1); i > prevParentDoc; i--) {
+ if (childDocs.get(i)) {
+ collector.collect(i);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/query/InternalQueryFacet.java b/src/main/java/org/elasticsearch/search/facet/query/InternalQueryFacet.java
new file mode 100644
index 0000000..73dcb8b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/query/InternalQueryFacet.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.query;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.InternalFacet;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalQueryFacet extends InternalFacet implements QueryFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("query"));
+
+ public static void registerStreams() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readQueryFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ private long count;
+
+ private InternalQueryFacet() {
+
+ }
+
+ public InternalQueryFacet(String name, long count) {
+ super(name);
+ this.count = count;
+ }
+
+ @Override
+ public String getType() {
+ return TYPE;
+ }
+
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ return facets.get(0);
+ }
+ long count = 0;
+ for (Facet facet : facets) {
+ count += ((QueryFacet) facet).getCount();
+ }
+ return new InternalQueryFacet(getName(), count);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, QueryFacet.TYPE);
+ builder.field(Fields.COUNT, count);
+ builder.endObject();
+ return builder;
+ }
+
+ public static QueryFacet readQueryFacet(StreamInput in) throws IOException {
+ InternalQueryFacet result = new InternalQueryFacet();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ count = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVLong(count);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/query/QueryFacet.java b/src/main/java/org/elasticsearch/search/facet/query/QueryFacet.java
new file mode 100644
index 0000000..817e801
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/query/QueryFacet.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.query;
+
+import org.elasticsearch.search.facet.Facet;
+
+/**
+ * A query facets returns the count (number of hits) for a facet based on a query.
+ */
+public interface QueryFacet extends Facet {
+
+ /**
+ * The type of the filter facet.
+ */
+ public static final String TYPE = "query";
+
+ /**
+ * The count of the facet.
+ */
+ long getCount();
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/query/QueryFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/query/QueryFacetBuilder.java
new file mode 100644
index 0000000..524b8d5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/query/QueryFacetBuilder.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class QueryFacetBuilder extends FacetBuilder {
+
+ private QueryBuilder query;
+
+ public QueryFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * Marks the facet to run in a global scope, not bounded by any query.
+ */
+ @Override
+ public QueryFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ public QueryFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public QueryFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ public QueryFacetBuilder query(QueryBuilder query) {
+ this.query = query;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (query == null) {
+ throw new SearchSourceBuilderException("query must be set on query facet for facet [" + name + "]");
+ }
+ builder.startObject(name);
+ builder.field(QueryFacet.TYPE);
+ query.toXContent(builder, params);
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/query/QueryFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/query/QueryFacetExecutor.java
new file mode 100644
index 0000000..1a3bcc7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/query/QueryFacetExecutor.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.query;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.docset.AndDocIdSet;
+import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class QueryFacetExecutor extends FacetExecutor {
+
+ private final Query query;
+ private final Filter filter;
+
+ // default to not initialized
+ long count = -1;
+
+ public QueryFacetExecutor(Query query) {
+ this.query = query;
+ Filter possibleFilter = extractFilterIfApplicable(query);
+ if (possibleFilter != null) {
+ this.filter = possibleFilter;
+ } else {
+ this.filter = Queries.wrap(query);
+ }
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public Post post() {
+ return new Post();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalQueryFacet(facetName, count);
+ }
+
+ class Post extends FacetExecutor.Post {
+
+ @Override
+ public void executePost(List<ContextDocIdSet> docSets) throws IOException {
+ int count = 0;
+ for (ContextDocIdSet entry : docSets) {
+ DocIdSet filteredDocIdSet = filter.getDocIdSet(entry.context, entry.context.reader().getLiveDocs());
+ if (filteredDocIdSet == null || entry.docSet == null) {
+ continue;
+ }
+ DocIdSetIterator iter = new AndDocIdSet(new DocIdSet[]{entry.docSet, filteredDocIdSet}).iterator();
+ while (iter.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ count++;
+ }
+ }
+ QueryFacetExecutor.this.count = count;
+ }
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private long count = 0;
+ private Bits bits;
+
+ @Override
+ public void collect(int doc) throws IOException {
+ if (bits.get(doc)) {
+ count++;
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ bits = DocIdSets.toSafeBits(context.reader(), filter.getDocIdSet(context, context.reader().getLiveDocs()));
+ }
+
+ @Override
+ public void postCollection() {
+ bits = null;
+ QueryFacetExecutor.this.count = count;
+ }
+ }
+
+ /**
+ * If its a filtered query with a match all, then we just need the inner filter.
+ */
+ private Filter extractFilterIfApplicable(Query query) {
+ if (query instanceof XFilteredQuery) {
+ XFilteredQuery fQuery = (XFilteredQuery) query;
+ if (Queries.isConstantMatchAllQuery(fQuery.getQuery())) {
+ return fQuery.getFilter();
+ }
+ } else if (query instanceof XConstantScoreQuery) {
+ return ((XConstantScoreQuery) query).getFilter();
+ } else if (query instanceof ConstantScoreQuery) {
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) query;
+ if (constantScoreQuery.getFilter() != null) {
+ return constantScoreQuery.getFilter();
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/query/QueryFacetParser.java b/src/main/java/org/elasticsearch/search/facet/query/QueryFacetParser.java
new file mode 100644
index 0000000..42649ba
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/query/QueryFacetParser.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.query;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.FacetParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class QueryFacetParser extends AbstractComponent implements FacetParser {
+
+ @Inject
+ public QueryFacetParser(Settings settings) {
+ super(settings);
+ InternalQueryFacet.registerStreams();
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{QueryFacet.TYPE};
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultMainMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultGlobalMode() {
+ return FacetExecutor.Mode.POST;
+ }
+
+ @Override
+ public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
+ Query facetQuery = context.queryParserService().parse(parser).query();
+ return new QueryFacetExecutor(facetQuery);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/range/InternalRangeFacet.java b/src/main/java/org/elasticsearch/search/facet/range/InternalRangeFacet.java
new file mode 100644
index 0000000..7b16859
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/range/InternalRangeFacet.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.range;
+
+import com.google.common.collect.ImmutableList;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.InternalFacet;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalRangeFacet extends InternalFacet implements RangeFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("range"));
+
+ public static void registerStreams() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readRangeFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ Entry[] entries;
+
+ InternalRangeFacet() {
+ }
+
+ public InternalRangeFacet(String name, Entry[] entries) {
+ super(name);
+ this.entries = entries;
+ }
+
+ @Override
+ public String getType() {
+ return RangeFacet.TYPE;
+ }
+
+ @Override
+ public List<Entry> getEntries() {
+ return ImmutableList.copyOf(entries);
+ }
+
+ @Override
+ public Iterator<Entry> iterator() {
+ return getEntries().iterator();
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ return facets.get(0);
+ }
+ InternalRangeFacet agg = null;
+ for (Facet facet : facets) {
+ InternalRangeFacet geoDistanceFacet = (InternalRangeFacet) facet;
+ if (agg == null) {
+ agg = geoDistanceFacet;
+ } else {
+ for (int i = 0; i < geoDistanceFacet.entries.length; i++) {
+ RangeFacet.Entry aggEntry = agg.entries[i];
+ RangeFacet.Entry currentEntry = geoDistanceFacet.entries[i];
+ aggEntry.count += currentEntry.count;
+ aggEntry.totalCount += currentEntry.totalCount;
+ aggEntry.total += currentEntry.total;
+ if (currentEntry.min < aggEntry.min) {
+ aggEntry.min = currentEntry.min;
+ }
+ if (currentEntry.max > aggEntry.max) {
+ aggEntry.max = currentEntry.max;
+ }
+ }
+ }
+ }
+ return agg;
+ }
+
+ public static InternalRangeFacet readRangeFacet(StreamInput in) throws IOException {
+ InternalRangeFacet facet = new InternalRangeFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ entries = new Entry[in.readVInt()];
+ for (int i = 0; i < entries.length; i++) {
+ Entry entry = new Entry();
+ entry.from = in.readDouble();
+ entry.to = in.readDouble();
+ if (in.readBoolean()) {
+ entry.fromAsString = in.readString();
+ }
+ if (in.readBoolean()) {
+ entry.toAsString = in.readString();
+ }
+ entry.count = in.readVLong();
+ entry.totalCount = in.readVLong();
+ entry.total = in.readDouble();
+ entry.min = in.readDouble();
+ entry.max = in.readDouble();
+ entries[i] = entry;
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(entries.length);
+ for (Entry entry : entries) {
+ out.writeDouble(entry.from);
+ out.writeDouble(entry.to);
+ if (entry.fromAsString == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(entry.fromAsString);
+ }
+ if (entry.toAsString == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeString(entry.toAsString);
+ }
+ out.writeVLong(entry.count);
+ out.writeVLong(entry.totalCount);
+ out.writeDouble(entry.total);
+ out.writeDouble(entry.min);
+ out.writeDouble(entry.max);
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString RANGES = new XContentBuilderString("ranges");
+ static final XContentBuilderString FROM = new XContentBuilderString("from");
+ static final XContentBuilderString FROM_STR = new XContentBuilderString("from_str");
+ static final XContentBuilderString TO = new XContentBuilderString("to");
+ static final XContentBuilderString TO_STR = new XContentBuilderString("to_str");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
+ static final XContentBuilderString MEAN = new XContentBuilderString("mean");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, "range");
+ builder.startArray(Fields.RANGES);
+ for (Entry entry : entries) {
+ builder.startObject();
+ if (!Double.isInfinite(entry.from)) {
+ builder.field(Fields.FROM, entry.from);
+ }
+ if (entry.fromAsString != null) {
+ builder.field(Fields.FROM_STR, entry.fromAsString);
+ }
+ if (!Double.isInfinite(entry.to)) {
+ builder.field(Fields.TO, entry.to);
+ }
+ if (entry.toAsString != null) {
+ builder.field(Fields.TO_STR, entry.toAsString);
+ }
+ builder.field(Fields.COUNT, entry.getCount());
+ // only output min and max if there are actually documents matching this range...
+ if (entry.getTotalCount() > 0) {
+ builder.field(Fields.MIN, entry.getMin());
+ builder.field(Fields.MAX, entry.getMax());
+ }
+ builder.field(Fields.TOTAL_COUNT, entry.getTotalCount());
+ builder.field(Fields.TOTAL, entry.getTotal());
+ builder.field(Fields.MEAN, entry.getMean());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/range/KeyValueRangeFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/range/KeyValueRangeFacetExecutor.java
new file mode 100644
index 0000000..5a3976d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/range/KeyValueRangeFacetExecutor.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.range;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class KeyValueRangeFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData keyIndexFieldData;
+ private final IndexNumericFieldData valueIndexFieldData;
+
+ private final RangeFacet.Entry[] entries;
+
+ public KeyValueRangeFacetExecutor(IndexNumericFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, RangeFacet.Entry[] entries, SearchContext context) {
+ this.entries = entries;
+ this.keyIndexFieldData = keyIndexFieldData;
+ this.valueIndexFieldData = valueIndexFieldData;
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalRangeFacet(facetName, entries);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final RangeProc rangeProc;
+ private DoubleValues keyValues;
+
+ public Collector() {
+ this.rangeProc = new RangeProc(entries);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ keyValues = keyIndexFieldData.load(context).getDoubleValues();
+ rangeProc.valueValues = valueIndexFieldData.load(context).getDoubleValues();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ for (RangeFacet.Entry entry : entries) {
+ entry.foundInDoc = false;
+ }
+ rangeProc.onDoc(doc, keyValues);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public static class RangeProc extends DoubleFacetAggregatorBase {
+
+ private final RangeFacet.Entry[] entries;
+
+ DoubleValues valueValues;
+
+ public RangeProc(RangeFacet.Entry[] entries) {
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ for (RangeFacet.Entry entry : entries) {
+ if (entry.foundInDoc) {
+ continue;
+ }
+ if (value >= entry.getFrom() && value < entry.getTo()) {
+ entry.foundInDoc = true;
+ entry.count++;
+ int seek = valueValues.setDocument(docId);
+ for (int i = 0; i < seek; i++) {
+ double valueValue = valueValues.nextValue();
+ entry.total += valueValue;
+ entry.min = Math.min(entry.min, valueValue);
+ entry.max = Math.max(entry.max, valueValue);
+ }
+ entry.totalCount+=seek;
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/range/RangeFacet.java b/src/main/java/org/elasticsearch/search/facet/range/RangeFacet.java
new file mode 100644
index 0000000..e8ea288
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/range/RangeFacet.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.range;
+
+import org.elasticsearch.search.facet.Facet;
+
+import java.util.List;
+
+/**
+ *
+ */
+public interface RangeFacet extends Facet, Iterable<RangeFacet.Entry> {
+
+ /**
+ * The type of the filter facet.
+ */
+ public static final String TYPE = "range";
+
+ /**
+ * An ordered list of range facet entries.
+ */
+ List<Entry> getEntries();
+
+ public class Entry {
+
+ double from = Double.NEGATIVE_INFINITY;
+ double to = Double.POSITIVE_INFINITY;
+ String fromAsString;
+ String toAsString;
+ long count;
+ long totalCount;
+ double total;
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+
+ /**
+ * Internal field used in facet collection
+ */
+ boolean foundInDoc;
+
+ Entry() {
+ }
+
+ public double getFrom() {
+ return this.from;
+ }
+
+ public String getFromAsString() {
+ if (fromAsString != null) {
+ return fromAsString;
+ }
+ return Double.toString(from);
+ }
+
+ public double getTo() {
+ return this.to;
+ }
+
+ public String getToAsString() {
+ if (toAsString != null) {
+ return toAsString;
+ }
+ return Double.toString(to);
+ }
+
+ public long getCount() {
+ return this.count;
+ }
+
+ public long getTotalCount() {
+ return this.totalCount;
+ }
+
+ public double getTotal() {
+ return this.total;
+ }
+
+ /**
+ * The mean of this facet interval.
+ */
+ public double getMean() {
+ if (totalCount == 0) {
+ return 0;
+ }
+ return total / totalCount;
+ }
+
+ public double getMin() {
+ return this.min;
+ }
+
+ public double getMax() {
+ return this.max;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/range/RangeFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/range/RangeFacetBuilder.java
new file mode 100644
index 0000000..04b4eb5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/range/RangeFacetBuilder.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.range;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * A facet builder of range facets.
+ */
+public class RangeFacetBuilder extends FacetBuilder {
+
+ private String keyFieldName;
+ private String valueFieldName;
+
+ private List<Entry> entries = Lists.newArrayList();
+
+ /**
+ * Constructs a new range facet with the provided facet logical name.
+ *
+ * @param name The logical name of the facet
+ */
+ public RangeFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * The field name to perform the range facet. Translates to perform the range facet
+ * using the provided field as both the {@link #keyField(String)} and {@link #valueField(String)}.
+ */
+ public RangeFacetBuilder field(String field) {
+ this.keyFieldName = field;
+ this.valueFieldName = field;
+ return this;
+ }
+
+ /**
+ * The field name to use in order to control where the hit will "fall into" within the range
+ * entries. Essentially, using the key field numeric value, the hit will be "rounded" into the relevant
+ * bucket controlled by the interval.
+ */
+ public RangeFacetBuilder keyField(String keyField) {
+ this.keyFieldName = keyField;
+ return this;
+ }
+
+ /**
+ * The field name to use as the value of the hit to compute data based on values within the interval
+ * (for example, total).
+ */
+ public RangeFacetBuilder valueField(String valueField) {
+ this.valueFieldName = valueField;
+ return this;
+ }
+
+ /**
+ * Adds a range entry with explicit from and to.
+ *
+ * @param from The from range limit
+ * @param to The to range limit
+ */
+ public RangeFacetBuilder addRange(double from, double to) {
+ entries.add(new Entry(from, to));
+ return this;
+ }
+
+ public RangeFacetBuilder addRange(String from, String to) {
+ entries.add(new Entry(from, to));
+ return this;
+ }
+
+ /**
+ * Adds a range entry with explicit from and unbounded to.
+ *
+ * @param from the from range limit, to is unbounded.
+ */
+ public RangeFacetBuilder addUnboundedTo(double from) {
+ entries.add(new Entry(from, Double.POSITIVE_INFINITY));
+ return this;
+ }
+
+ public RangeFacetBuilder addUnboundedTo(String from) {
+ entries.add(new Entry(from, null));
+ return this;
+ }
+
+ /**
+ * Adds a range entry with explicit to and unbounded from.
+ *
+ * @param to the to range limit, from is unbounded.
+ */
+ public RangeFacetBuilder addUnboundedFrom(double to) {
+ entries.add(new Entry(Double.NEGATIVE_INFINITY, to));
+ return this;
+ }
+
+ public RangeFacetBuilder addUnboundedFrom(String to) {
+ entries.add(new Entry(null, to));
+ return this;
+ }
+
+ /**
+ * Should the facet run in global mode (not bounded by the search query) or not (bounded by
+ * the search query). Defaults to <tt>false</tt>.
+ */
+ public RangeFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ /**
+ * An additional filter used to further filter down the set of documents the facet will run on.
+ */
+ public RangeFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public RangeFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (keyFieldName == null) {
+ throw new SearchSourceBuilderException("field must be set on range facet for facet [" + name + "]");
+ }
+
+ if (entries.isEmpty()) {
+ throw new SearchSourceBuilderException("at least one range must be defined for range facet [" + name + "]");
+ }
+
+ builder.startObject(name);
+
+ builder.startObject(RangeFacet.TYPE);
+ if (valueFieldName != null && !keyFieldName.equals(valueFieldName)) {
+ builder.field("key_field", keyFieldName);
+ builder.field("value_field", valueFieldName);
+ } else {
+ builder.field("field", keyFieldName);
+ }
+
+ builder.startArray("ranges");
+ for (Entry entry : entries) {
+ builder.startObject();
+ if (entry.fromAsString != null) {
+ builder.field("from", entry.fromAsString);
+ } else if (!Double.isInfinite(entry.from)) {
+ builder.field("from", entry.from);
+ }
+ if (entry.toAsString != null) {
+ builder.field("to", entry.toAsString);
+ } else if (!Double.isInfinite(entry.to)) {
+ builder.field("to", entry.to);
+ }
+ builder.endObject();
+ }
+ builder.endArray();
+
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+
+ static class Entry {
+ double from = Double.NEGATIVE_INFINITY;
+ double to = Double.POSITIVE_INFINITY;
+
+ String fromAsString;
+ String toAsString;
+
+ Entry(String fromAsString, String toAsString) {
+ this.fromAsString = fromAsString;
+ this.toAsString = toAsString;
+ }
+
+ Entry(double from, double to) {
+ this.from = from;
+ this.to = to;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/range/RangeFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/range/RangeFacetExecutor.java
new file mode 100644
index 0000000..693dd96
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/range/RangeFacetExecutor.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.range;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class RangeFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData indexFieldData;
+
+ private final RangeFacet.Entry[] entries;
+
+ public RangeFacetExecutor(IndexNumericFieldData indexFieldData, RangeFacet.Entry[] entries, SearchContext context) {
+ this.indexFieldData = indexFieldData;
+ this.entries = entries;
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalRangeFacet(facetName, entries);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final RangeProc rangeProc;
+ private DoubleValues values;
+
+ public Collector() {
+ rangeProc = new RangeProc(entries);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getDoubleValues();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ for (RangeFacet.Entry entry : entries) {
+ entry.foundInDoc = false;
+ }
+ rangeProc.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+
+ public static class RangeProc extends DoubleFacetAggregatorBase{
+
+ private final RangeFacet.Entry[] entries;
+
+ public RangeProc(RangeFacet.Entry[] entries) {
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ for (RangeFacet.Entry entry : entries) {
+ if (entry.foundInDoc) {
+ continue;
+ }
+ if (value >= entry.getFrom() && value < entry.getTo()) {
+ entry.foundInDoc = true;
+ entry.count++;
+ entry.totalCount++;
+ entry.total += value;
+ if (value < entry.min) {
+ entry.min = value;
+ }
+ if (value > entry.max) {
+ entry.max = value;
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/range/RangeFacetParser.java b/src/main/java/org/elasticsearch/search/facet/range/RangeFacetParser.java
new file mode 100644
index 0000000..9e3a853
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/range/RangeFacetParser.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.range;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.FacetParser;
+import org.elasticsearch.search.facet.FacetPhaseExecutionException;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class RangeFacetParser extends AbstractComponent implements FacetParser {
+
+ @Inject
+ public RangeFacetParser(Settings settings) {
+ super(settings);
+ InternalRangeFacet.registerStreams();
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{RangeFacet.TYPE};
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultMainMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultGlobalMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
+ String keyField = null;
+ String valueField = null;
+ String scriptLang = null;
+ String keyScript = null;
+ String valueScript = null;
+ Map<String, Object> params = null;
+ XContentParser.Token token;
+ String fieldName = null;
+ List<RangeFacet.Entry> entries = Lists.newArrayList();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if (!"ranges".equals(fieldName)) {
+ // this is the actual field name, so also update the keyField
+ keyField = fieldName;
+ }
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ RangeFacet.Entry entry = new RangeFacet.Entry();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ if ("from".equals(fieldName)) {
+ entry.fromAsString = parser.text();
+ } else if ("to".equals(fieldName)) {
+ entry.toAsString = parser.text();
+ }
+ } else if (token.isValue()) {
+ if ("from".equals(fieldName)) {
+ entry.from = parser.doubleValue();
+ } else if ("to".equals(fieldName)) {
+ entry.to = parser.doubleValue();
+ }
+ }
+ }
+ entries.add(entry);
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(fieldName)) {
+ params = parser.map();
+ }
+ } else if (token.isValue()) {
+ if ("field".equals(fieldName)) {
+ keyField = parser.text();
+ } else if ("key_field".equals(fieldName) || "keyField".equals(fieldName)) {
+ keyField = parser.text();
+ } else if ("value_field".equals(fieldName) || "valueField".equals(fieldName)) {
+ valueField = parser.text();
+ } else if ("key_script".equals(fieldName) || "keyScript".equals(fieldName)) {
+ keyScript = parser.text();
+ } else if ("value_script".equals(fieldName) || "valueScript".equals(fieldName)) {
+ valueScript = parser.text();
+ } else if ("lang".equals(fieldName)) {
+ scriptLang = parser.text();
+ }
+ }
+ }
+
+ if (entries.isEmpty()) {
+ throw new FacetPhaseExecutionException(facetName, "no ranges defined for range facet");
+ }
+
+ RangeFacet.Entry[] rangeEntries = entries.toArray(new RangeFacet.Entry[entries.size()]);
+
+ if (keyScript != null && valueScript != null) {
+ return new ScriptRangeFacetExecutor(scriptLang, keyScript, valueScript, params, rangeEntries, context);
+ }
+
+ if (keyField == null) {
+ throw new FacetPhaseExecutionException(facetName, "key field is required to be set for range facet, either using [field] or using [key_field]");
+ }
+
+ // we have a keyField
+ FieldMapper keyFieldMapper = context.smartNameFieldMapper(keyField);
+ if (keyFieldMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "No mapping found for key_field [" + keyField + "]");
+ }
+ for (RangeFacet.Entry entry : rangeEntries) {
+ if (entry.fromAsString != null) {
+ entry.from = ((Number) keyFieldMapper.value(entry.fromAsString)).doubleValue();
+ }
+ if (entry.toAsString != null) {
+ entry.to = ((Number) keyFieldMapper.value(entry.toAsString)).doubleValue();
+ }
+ }
+
+ IndexNumericFieldData keyIndexFieldData = context.fieldData().getForField(keyFieldMapper);
+
+ if (valueField == null || keyField.equals(valueField)) {
+ return new RangeFacetExecutor(keyIndexFieldData, rangeEntries, context);
+ } else {
+ FieldMapper valueFieldMapper = context.smartNameFieldMapper(valueField);
+ if (valueFieldMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "No mapping found for value_field [" + valueField + "]");
+ }
+ IndexNumericFieldData valueIndexFieldData = context.fieldData().getForField(valueFieldMapper);
+ // we have a value field, and its different than the key
+ return new KeyValueRangeFacetExecutor(keyIndexFieldData, valueIndexFieldData, rangeEntries, context);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/range/RangeScriptFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/range/RangeScriptFacetBuilder.java
new file mode 100644
index 0000000..db2a2a1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/range/RangeScriptFacetBuilder.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.range;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class RangeScriptFacetBuilder extends FacetBuilder {
+
+ private String lang;
+ private String keyScript;
+ private String valueScript;
+ private Map<String, Object> params;
+ private List<Entry> entries = Lists.newArrayList();
+
+ public RangeScriptFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * The language of the script.
+ */
+ public RangeScriptFacetBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ public RangeScriptFacetBuilder keyScript(String keyScript) {
+ this.keyScript = keyScript;
+ return this;
+ }
+
+ public RangeScriptFacetBuilder valueScript(String valueScript) {
+ this.valueScript = valueScript;
+ return this;
+ }
+
+ public RangeScriptFacetBuilder param(String name, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(name, value);
+ return this;
+ }
+
+ /**
+ * Adds a range entry with explicit from and to.
+ *
+ * @param from The from range limit
+ * @param to The to range limit
+ */
+ public RangeScriptFacetBuilder addRange(double from, double to) {
+ entries.add(new Entry(from, to));
+ return this;
+ }
+
+ /**
+ * Adds a range entry with explicit from and unbounded to.
+ *
+ * @param from the from range limit, to is unbounded.
+ */
+ public RangeScriptFacetBuilder addUnboundedTo(double from) {
+ entries.add(new Entry(from, Double.POSITIVE_INFINITY));
+ return this;
+ }
+
+ /**
+ * Adds a range entry with explicit to and unbounded from.
+ *
+ * @param to the to range limit, from is unbounded.
+ */
+ public RangeScriptFacetBuilder addUnboundedFrom(double to) {
+ entries.add(new Entry(Double.NEGATIVE_INFINITY, to));
+ return this;
+ }
+
+
+ /**
+ * Should the facet run in global mode (not bounded by the search query) or not (bounded by
+ * the search query). Defaults to <tt>false</tt>.
+ */
+ public RangeScriptFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ public RangeScriptFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public RangeScriptFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (keyScript == null) {
+ throw new SearchSourceBuilderException("key_script must be set on range script facet for facet [" + name + "]");
+ }
+ if (valueScript == null) {
+ throw new SearchSourceBuilderException("value_script must be set on range script facet for facet [" + name + "]");
+ }
+
+ if (entries.isEmpty()) {
+ throw new SearchSourceBuilderException("at least one range must be defined for range facet [" + name + "]");
+ }
+
+ builder.startObject(name);
+
+ builder.startObject(RangeFacet.TYPE);
+ builder.field("key_script", keyScript);
+ builder.field("value_script", valueScript);
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+
+ builder.startArray("ranges");
+ for (Entry entry : entries) {
+ builder.startObject();
+ if (!Double.isInfinite(entry.from)) {
+ builder.field("from", entry.from);
+ }
+ if (!Double.isInfinite(entry.to)) {
+ builder.field("to", entry.to);
+ }
+ builder.endObject();
+ }
+ builder.endArray();
+
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+
+ private static class Entry {
+ final double from;
+ final double to;
+
+ private Entry(double from, double to) {
+ this.from = from;
+ this.to = to;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/range/ScriptRangeFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/range/ScriptRangeFacetExecutor.java
new file mode 100644
index 0000000..613cca3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/range/ScriptRangeFacetExecutor.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.range;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class ScriptRangeFacetExecutor extends FacetExecutor {
+
+ final SearchScript keyScript;
+ final SearchScript valueScript;
+
+ private final RangeFacet.Entry[] entries;
+
+ public ScriptRangeFacetExecutor(String scriptLang, String keyScript, String valueScript, Map<String, Object> params, RangeFacet.Entry[] entries, SearchContext context) {
+ this.keyScript = context.scriptService().search(context.lookup(), scriptLang, keyScript, params);
+ this.valueScript = context.scriptService().search(context.lookup(), scriptLang, valueScript, params);
+ this.entries = entries;
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalRangeFacet(facetName, entries);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ keyScript.setScorer(scorer);
+ valueScript.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ keyScript.setNextReader(context);
+ valueScript.setNextReader(context);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ keyScript.setNextDocId(doc);
+ valueScript.setNextDocId(doc);
+ double key = keyScript.runAsDouble();
+ double value = valueScript.runAsDouble();
+
+ for (RangeFacet.Entry entry : entries) {
+ if (key >= entry.getFrom() && key < entry.getTo()) {
+ entry.count++;
+ entry.totalCount++;
+ entry.total += value;
+ if (value < entry.min) {
+ entry.min = value;
+ }
+ if (value > entry.max) {
+ entry.max = value;
+ }
+ }
+ }
+ }
+
+ @Override
+ public void postCollection() {
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/statistical/InternalStatisticalFacet.java b/src/main/java/org/elasticsearch/search/facet/statistical/InternalStatisticalFacet.java
new file mode 100644
index 0000000..e7fb322
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/statistical/InternalStatisticalFacet.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.statistical;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.InternalFacet;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalStatisticalFacet extends InternalFacet implements StatisticalFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("statistical"));
+
+ public static void registerStreams() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readStatisticalFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ private double min;
+ private double max;
+ private double total;
+ private double sumOfSquares;
+ private long count;
+
+ private InternalStatisticalFacet() {
+ }
+
+ public InternalStatisticalFacet(String name, double min, double max, double total, double sumOfSquares, long count) {
+ super(name);
+ this.min = min;
+ this.max = max;
+ this.total = total;
+ this.sumOfSquares = sumOfSquares;
+ this.count = count;
+ }
+
+ @Override
+ public String getType() {
+ return TYPE;
+ }
+
+ @Override
+ public long getCount() {
+ return this.count;
+ }
+
+ @Override
+ public double getTotal() {
+ return this.total;
+ }
+
+ @Override
+ public double getSumOfSquares() {
+ return this.sumOfSquares;
+ }
+
+ @Override
+ public double getMean() {
+ if (count == 0) {
+ return 0;
+ }
+ return total / count;
+ }
+
+ @Override
+ public double getMin() {
+ return this.min;
+ }
+
+ @Override
+ public double getMax() {
+ return this.max;
+ }
+
+ public double getVariance() {
+ return (sumOfSquares - ((total * total) / count)) / count;
+ }
+
+ public double getStdDeviation() {
+ return Math.sqrt(getVariance());
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ return facets.get(0);
+ }
+ double min = Double.NaN;
+ double max = Double.NaN;
+ double total = 0;
+ double sumOfSquares = 0;
+ long count = 0;
+
+ for (Facet facet : facets) {
+ InternalStatisticalFacet statsFacet = (InternalStatisticalFacet) facet;
+ if (statsFacet.getMin() < min || Double.isNaN(min)) {
+ min = statsFacet.getMin();
+ }
+ if (statsFacet.getMax() > max || Double.isNaN(max)) {
+ max = statsFacet.getMax();
+ }
+ total += statsFacet.getTotal();
+ sumOfSquares += statsFacet.getSumOfSquares();
+ count += statsFacet.getCount();
+ }
+
+ return new InternalStatisticalFacet(getName(), min, max, total, sumOfSquares, count);
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ static final XContentBuilderString MEAN = new XContentBuilderString("mean");
+ static final XContentBuilderString SUM_OF_SQUARES = new XContentBuilderString("sum_of_squares");
+ static final XContentBuilderString VARIANCE = new XContentBuilderString("variance");
+ static final XContentBuilderString STD_DEVIATION = new XContentBuilderString("std_deviation");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, StatisticalFacet.TYPE);
+ builder.field(Fields.COUNT, getCount());
+ builder.field(Fields.TOTAL, getTotal());
+ builder.field(Fields.MIN, getMin());
+ builder.field(Fields.MAX, getMax());
+ builder.field(Fields.MEAN, getMean());
+ builder.field(Fields.SUM_OF_SQUARES, getSumOfSquares());
+ builder.field(Fields.VARIANCE, getVariance());
+ builder.field(Fields.STD_DEVIATION, getStdDeviation());
+ builder.endObject();
+ return builder;
+ }
+
+ public static StatisticalFacet readStatisticalFacet(StreamInput in) throws IOException {
+ InternalStatisticalFacet facet = new InternalStatisticalFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ count = in.readVLong();
+ total = in.readDouble();
+ min = in.readDouble();
+ max = in.readDouble();
+ sumOfSquares = in.readDouble();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVLong(count);
+ out.writeDouble(total);
+ out.writeDouble(min);
+ out.writeDouble(max);
+ out.writeDouble(sumOfSquares);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/statistical/ScriptStatisticalFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/statistical/ScriptStatisticalFacetExecutor.java
new file mode 100644
index 0000000..2e323f9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/statistical/ScriptStatisticalFacetExecutor.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.statistical;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class ScriptStatisticalFacetExecutor extends FacetExecutor {
+
+ private final SearchScript script;
+
+ private double min = Double.POSITIVE_INFINITY;
+ private double max = Double.NEGATIVE_INFINITY;
+ private double total = 0;
+ private double sumOfSquares = 0.0;
+ private long count;
+
+ public ScriptStatisticalFacetExecutor(String scriptLang, String script, Map<String, Object> params, SearchContext context) {
+ this.script = context.scriptService().search(context.lookup(), scriptLang, script, params);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalStatisticalFacet(facetName, min, max, total, sumOfSquares, count);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private double min = Double.POSITIVE_INFINITY;
+ private double max = Double.NEGATIVE_INFINITY;
+ private double total = 0;
+ private double sumOfSquares = 0.0;
+ private long count;
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ script.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ script.setNextReader(context);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ script.setNextDocId(doc);
+ double value = script.runAsDouble();
+ if (value < min) {
+ min = value;
+ }
+ if (value > max) {
+ max = value;
+ }
+ sumOfSquares += value * value;
+ total += value;
+ count++;
+ }
+
+ @Override
+ public void postCollection() {
+ ScriptStatisticalFacetExecutor.this.min = min;
+ ScriptStatisticalFacetExecutor.this.max = max;
+ ScriptStatisticalFacetExecutor.this.total = total;
+ ScriptStatisticalFacetExecutor.this.sumOfSquares = sumOfSquares;
+ ScriptStatisticalFacetExecutor.this.count = count;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacet.java b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacet.java
new file mode 100644
index 0000000..ee2cd88
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacet.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.statistical;
+
+import org.elasticsearch.search.facet.Facet;
+
+/**
+ * Numeric statistical information.
+ */
+public interface StatisticalFacet extends Facet {
+
+ /**
+ * The type of the filter facet.
+ */
+ public static final String TYPE = "statistical";
+
+ /**
+ * The number of values counted.
+ */
+ long getCount();
+
+ /**
+ * The total (sum) of values.
+ */
+ double getTotal();
+
+ /**
+ * The sum of squares of the values.
+ */
+ double getSumOfSquares();
+
+ /**
+ * The mean (average) of the values.
+ */
+ double getMean();
+
+ /**
+ * The minimum value.
+ */
+ double getMin();
+
+ /**
+ * The maximum value.
+ */
+ double getMax();
+
+ /**
+ * Variance of the values.
+ */
+ double getVariance();
+
+ /**
+ * Standard deviation of the values.
+ */
+ double getStdDeviation();
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetBuilder.java
new file mode 100644
index 0000000..8e69297
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetBuilder.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.statistical;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class StatisticalFacetBuilder extends FacetBuilder {
+ private String[] fieldsNames;
+ private String fieldName;
+
+ public StatisticalFacetBuilder(String name) {
+ super(name);
+ }
+
+ public StatisticalFacetBuilder field(String field) {
+ this.fieldName = field;
+ return this;
+ }
+
+ /**
+ * The fields the terms will be collected from.
+ */
+ public StatisticalFacetBuilder fields(String... fields) {
+ this.fieldsNames = fields;
+ return this;
+ }
+
+ /**
+ * Marks the facet to run in a global scope, not bounded by any query.
+ */
+ public StatisticalFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ public StatisticalFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public StatisticalFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (fieldName == null && fieldsNames == null) {
+ throw new SearchSourceBuilderException("field must be set on statistical facet for facet [" + name + "]");
+ }
+ builder.startObject(name);
+
+ builder.startObject(StatisticalFacet.TYPE);
+ if (fieldsNames != null) {
+ if (fieldsNames.length == 1) {
+ builder.field("field", fieldsNames[0]);
+ } else {
+ builder.field("fields", fieldsNames);
+ }
+ } else {
+ builder.field("field", fieldName);
+ }
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetExecutor.java
new file mode 100644
index 0000000..8cc9537
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetExecutor.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.statistical;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class StatisticalFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData indexFieldData;
+
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+ double total = 0;
+ double sumOfSquares = 0.0;
+ long count;
+ int missing;
+
+ public StatisticalFacetExecutor(IndexNumericFieldData indexFieldData, SearchContext context) {
+ this.indexFieldData = indexFieldData;
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalStatisticalFacet(facetName, min, max, total, sumOfSquares, count);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final StatsProc statsProc = new StatsProc();
+ private DoubleValues values;
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getDoubleValues();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ statsProc.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ StatisticalFacetExecutor.this.min = statsProc.min;
+ StatisticalFacetExecutor.this.max = statsProc.max;
+ StatisticalFacetExecutor.this.total = statsProc.total;
+ StatisticalFacetExecutor.this.sumOfSquares = statsProc.sumOfSquares;
+ StatisticalFacetExecutor.this.count = statsProc.count;
+ StatisticalFacetExecutor.this.missing = statsProc.missing;
+ }
+ }
+
+ public static class StatsProc extends DoubleFacetAggregatorBase {
+
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+ double total = 0;
+ double sumOfSquares = 0.0;
+ long count;
+ int missing;
+
+ @Override
+ public void onValue(int docId, double value) {
+ if (value < min) {
+ min = value;
+ }
+ if (value > max) {
+ max = value;
+ }
+ sumOfSquares += value * value;
+ total += value;
+ count++;
+ }
+
+ public final double min() {
+ return min;
+ }
+
+ public final double max() {
+ return max;
+ }
+
+ public final long count() {
+ return count;
+ }
+
+ public final double sumOfSquares() {
+ return sumOfSquares;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetParser.java b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetParser.java
new file mode 100644
index 0000000..1fa31b0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFacetParser.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.statistical;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.FacetParser;
+import org.elasticsearch.search.facet.FacetPhaseExecutionException;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class StatisticalFacetParser extends AbstractComponent implements FacetParser {
+
+ @Inject
+ public StatisticalFacetParser(Settings settings) {
+ super(settings);
+ InternalStatisticalFacet.registerStreams();
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{StatisticalFacet.TYPE};
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultMainMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultGlobalMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
+ String field = null;
+ String[] fieldsNames = null;
+
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> params = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ params = parser.map();
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("fields".equals(currentFieldName)) {
+ List<String> fields = Lists.newArrayListWithCapacity(4);
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ fields.add(parser.text());
+ }
+ fieldsNames = fields.toArray(new String[fields.size()]);
+ }
+ } else if (token.isValue()) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ }
+ }
+ }
+ if (fieldsNames != null) {
+ IndexNumericFieldData[] indexFieldDatas = new IndexNumericFieldData[fieldsNames.length];
+ for (int i = 0; i < fieldsNames.length; i++) {
+ FieldMapper fieldMapper = context.smartNameFieldMapper(fieldsNames[i]);
+ if (fieldMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "No mapping found for field [" + fieldsNames[i] + "]");
+ }
+ if (!(fieldMapper instanceof NumberFieldMapper)) {
+ throw new FacetPhaseExecutionException(facetName, "field [" + field + "] isn't a number field, but a " + fieldMapper.fieldDataType().getType());
+ }
+ indexFieldDatas[i] = context.fieldData().getForField(fieldMapper);
+ }
+ return new StatisticalFieldsFacetExecutor(indexFieldDatas, context);
+ }
+ if (script == null && field == null) {
+ throw new FacetPhaseExecutionException(facetName, "statistical facet requires either [script] or [field] to be set");
+ }
+ if (field != null) {
+ FieldMapper fieldMapper = context.smartNameFieldMapper(field);
+ if (fieldMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "No mapping found for field [" + field + "]");
+ }
+ if (!(fieldMapper instanceof NumberFieldMapper)) {
+ throw new FacetPhaseExecutionException(facetName, "field [" + field + "] isn't a number field, but a " + fieldMapper.fieldDataType().getType());
+ }
+ IndexNumericFieldData indexFieldData = context.fieldData().getForField(fieldMapper);
+ return new StatisticalFacetExecutor(indexFieldData, context);
+ } else {
+ return new ScriptStatisticalFacetExecutor(scriptLang, script, params, context);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFieldsFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFieldsFacetExecutor.java
new file mode 100644
index 0000000..26d405e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalFieldsFacetExecutor.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.statistical;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class StatisticalFieldsFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData[] indexFieldDatas;
+
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+ double total = 0;
+ double sumOfSquares = 0.0;
+ long count;
+ int missing;
+
+ public StatisticalFieldsFacetExecutor(IndexNumericFieldData[] indexFieldDatas, SearchContext context) {
+ this.indexFieldDatas = indexFieldDatas;
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalStatisticalFacet(facetName, min, max, total, sumOfSquares, count);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final StatsProc statsProc = new StatsProc();
+ private DoubleValues[] values;
+
+ public Collector() {
+ this.values = new DoubleValues[indexFieldDatas.length];
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ for (int i = 0; i < indexFieldDatas.length; i++) {
+ values[i] = indexFieldDatas[i].load(context).getDoubleValues();
+ }
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ for (DoubleValues value : values) {
+ statsProc.onDoc(doc, value);
+ }
+ }
+
+ @Override
+ public void postCollection() {
+ StatisticalFieldsFacetExecutor.this.min = statsProc.min;
+ StatisticalFieldsFacetExecutor.this.max = statsProc.max;
+ StatisticalFieldsFacetExecutor.this.total = statsProc.sum;
+ StatisticalFieldsFacetExecutor.this.sumOfSquares = statsProc.sumOfSquares;
+ StatisticalFieldsFacetExecutor.this.count = statsProc.count;
+ StatisticalFieldsFacetExecutor.this.missing = statsProc.missing;
+ }
+ }
+
+ public static class StatsProc extends DoubleFacetAggregatorBase {
+
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+ double sum = 0;
+ double sumOfSquares = 0.0;
+ long count;
+ int missing;
+
+ @Override
+ public void onValue(int docId, double value) {
+ if (value < min) {
+ min = value;
+ }
+ if (value > max) {
+ max = value;
+ }
+ sumOfSquares += value * value;
+ sum += value;
+ count++;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalScriptFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalScriptFacetBuilder.java
new file mode 100644
index 0000000..da9b11a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/statistical/StatisticalScriptFacetBuilder.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.statistical;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class StatisticalScriptFacetBuilder extends FacetBuilder {
+ private String lang;
+ private String script;
+ private Map<String, Object> params;
+
+ public StatisticalScriptFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * Marks the facet to run in a global scope, not bounded by any query.
+ */
+ public StatisticalScriptFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ public StatisticalScriptFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public StatisticalScriptFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ /**
+ * The language of the script.
+ */
+ public StatisticalScriptFacetBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ public StatisticalScriptFacetBuilder script(String script) {
+ this.script = script;
+ return this;
+ }
+
+ public StatisticalScriptFacetBuilder param(String name, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(name, value);
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (script == null) {
+ throw new SearchSourceBuilderException("script must be set on statistical script facet [" + name + "]");
+ }
+ builder.startObject(name);
+
+ builder.startObject(StatisticalFacet.TYPE);
+ builder.field("script", script);
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/InternalTermsFacet.java b/src/main/java/org/elasticsearch/search/facet/terms/InternalTermsFacet.java
new file mode 100644
index 0000000..a54cff9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/InternalTermsFacet.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms;
+
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.terms.doubles.InternalDoubleTermsFacet;
+import org.elasticsearch.search.facet.terms.longs.InternalLongTermsFacet;
+import org.elasticsearch.search.facet.terms.strings.InternalStringTermsFacet;
+
+/**
+ *
+ */
+public abstract class InternalTermsFacet extends InternalFacet implements TermsFacet {
+
+ public static void registerStreams() {
+ InternalStringTermsFacet.registerStream();
+ InternalLongTermsFacet.registerStream();
+ InternalDoubleTermsFacet.registerStream();
+ }
+
+ protected InternalTermsFacet() {
+ }
+
+ protected InternalTermsFacet(String facetName) {
+ super(facetName);
+ }
+
+ @Override
+ public final String getType() {
+ return TYPE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/TermsFacet.java b/src/main/java/org/elasticsearch/search/facet/terms/TermsFacet.java
new file mode 100644
index 0000000..f5516ed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/TermsFacet.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.terms;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.search.facet.Facet;
+
+import java.util.Comparator;
+import java.util.List;
+
+/**
+ * Terms facet allows to return facets of the most popular terms within the search query.
+ */
+public interface TermsFacet extends Facet, Iterable<TermsFacet.Entry> {
+
+ /**
+ * The type of the filter facet.
+ */
+ public static final String TYPE = "terms";
+
+ public interface Entry extends Comparable<Entry> {
+
+ Text getTerm();
+
+ Number getTermAsNumber();
+
+ int getCount();
+ }
+
+ /**
+ * Controls how the terms facets are ordered.
+ */
+ public static enum ComparatorType {
+ /**
+ * Order by the (higher) count of each term.
+ */
+ COUNT((byte) 0, new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ int i = o2.getCount() - o1.getCount();
+ if (i == 0) {
+ i = o2.compareTo(o1);
+ if (i == 0) {
+ i = System.identityHashCode(o2) - System.identityHashCode(o1);
+ }
+ }
+ return i;
+ }
+ }),
+ /**
+ * Order by the (lower) count of each term.
+ */
+ REVERSE_COUNT((byte) 1, new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ return -COUNT.comparator().compare(o1, o2);
+ }
+ }),
+ /**
+ * Order by the terms.
+ */
+ TERM((byte) 2, new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ return o1.compareTo(o2);
+ }
+ }),
+ /**
+ * Order by the terms.
+ */
+ REVERSE_TERM((byte) 3, new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ return -TERM.comparator().compare(o1, o2);
+ }
+ });
+
+ private final byte id;
+
+ private final Comparator<Entry> comparator;
+
+ ComparatorType(byte id, Comparator<Entry> comparator) {
+ this.id = id;
+ this.comparator = comparator;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ public Comparator<Entry> comparator() {
+ return comparator;
+ }
+
+ public static ComparatorType fromId(byte id) {
+ if (id == COUNT.id()) {
+ return COUNT;
+ } else if (id == REVERSE_COUNT.id()) {
+ return REVERSE_COUNT;
+ } else if (id == TERM.id()) {
+ return TERM;
+ } else if (id == REVERSE_TERM.id()) {
+ return REVERSE_TERM;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type argument match for terms facet comparator [" + id + "]");
+ }
+
+ public static ComparatorType fromString(String type) {
+ if ("count".equals(type)) {
+ return COUNT;
+ } else if ("term".equals(type)) {
+ return TERM;
+ } else if ("reverse_count".equals(type) || "reverseCount".equals(type)) {
+ return REVERSE_COUNT;
+ } else if ("reverse_term".equals(type) || "reverseTerm".equals(type)) {
+ return REVERSE_TERM;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type argument match for terms facet comparator [" + type + "]");
+ }
+ }
+
+ /**
+ * The number of docs missing a value.
+ */
+ long getMissingCount();
+
+ /**
+ * The total count of terms.
+ */
+ long getTotalCount();
+
+ /**
+ * The count of terms other than the one provided by the entries.
+ */
+ long getOtherCount();
+
+ /**
+ * The terms and counts.
+ */
+ List<? extends TermsFacet.Entry> getEntries();
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/TermsFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/terms/TermsFacetBuilder.java
new file mode 100644
index 0000000..5898e86
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/TermsFacetBuilder.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.terms;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * Term facets allow to collect frequency of terms within one (or more) field.
+ */
+public class TermsFacetBuilder extends FacetBuilder {
+ private String fieldName;
+ private String[] fieldsNames;
+ private int size = 10;
+ private int shardSize = -1;
+ private Boolean allTerms;
+ private Object[] exclude;
+ private String regex;
+ private int regexFlags = 0;
+ private TermsFacet.ComparatorType comparatorType;
+ private String script;
+ private String lang;
+ private Map<String, Object> params;
+ String executionHint;
+
+ /**
+ * Construct a new term facet with the provided facet name.
+ *
+ * @param name The facet name.
+ */
+ public TermsFacetBuilder(String name) {
+ super(name);
+ }
+
+ /**
+ * Should the fact run in global mode (not bounded by the search query) or not. Defaults
+ * to <tt>false</tt>.
+ */
+ public TermsFacetBuilder global(boolean global) {
+ super.global(global);
+ return this;
+ }
+
+ /**
+ * An additional facet filter that will further filter the documents the facet will be
+ * executed on.
+ */
+ public TermsFacetBuilder facetFilter(FilterBuilder filter) {
+ this.facetFilter = filter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path the facet will execute on. A match (root object) will then cause all the
+ * nested objects matching the path to be computed into the facet.
+ */
+ public TermsFacetBuilder nested(String nested) {
+ this.nested = nested;
+ return this;
+ }
+
+ /**
+ * The field the terms will be collected from.
+ */
+ public TermsFacetBuilder field(String field) {
+ this.fieldName = field;
+ return this;
+ }
+
+ /**
+ * The fields the terms will be collected from.
+ */
+ public TermsFacetBuilder fields(String... fields) {
+ this.fieldsNames = fields;
+ return this;
+ }
+
+ /**
+ * Define a script field that will control the terms that will be used (and not filtered, as is the
+ * case when the script is provided on top of field / fields).
+ */
+ public TermsFacetBuilder scriptField(String scriptField) {
+ this.script = scriptField;
+ return this;
+ }
+
+ /**
+ * A set of terms that will be excluded.
+ */
+ public TermsFacetBuilder exclude(Object... exclude) {
+ this.exclude = exclude;
+ return this;
+ }
+
+ /**
+ * The number of terms (and frequencies) to return. Defaults to 10.
+ */
+ public TermsFacetBuilder size(int size) {
+ this.size = size;
+ return this;
+ }
+
+ /**
+ * Sets the number of terms that will be returned from each shard. The higher the number the more accurate the results will be. The
+ * shard size cannot be smaller than {@link #size(int) size}, therefore in this case it will fall back and be treated as being equal to
+ * size.
+ */
+ public TermsFacetBuilder shardSize(int shardSize) {
+ this.shardSize = shardSize;
+ return this;
+ }
+
+ /**
+ * A regular expression to use in order to further filter terms.
+ */
+ public TermsFacetBuilder regex(String regex) {
+ return regex(regex, 0);
+ }
+
+ /**
+ * A regular expression (with flags) to use in order to further filter terms.
+ */
+ public TermsFacetBuilder regex(String regex, int flags) {
+ this.regex = regex;
+ this.regexFlags = flags;
+ return this;
+ }
+
+ /**
+ * The order by which to return the facets by. Defaults to {@link TermsFacet.ComparatorType#COUNT}.
+ */
+ public TermsFacetBuilder order(TermsFacet.ComparatorType comparatorType) {
+ this.comparatorType = comparatorType;
+ return this;
+ }
+
+ /**
+ * A script allowing to either modify or ignore a provided term (can be accessed using <tt>term</tt> var).
+ */
+ public TermsFacetBuilder script(String script) {
+ this.script = script;
+ return this;
+ }
+
+ /**
+ * The language of the script.
+ */
+ public TermsFacetBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ /**
+ * An execution hint to how the facet is computed.
+ */
+ public TermsFacetBuilder executionHint(String executionHint) {
+ this.executionHint = executionHint;
+ return this;
+ }
+
+ /**
+ * A parameter that will be passed to the script.
+ *
+ * @param name The name of the script parameter.
+ * @param value The value of the script parameter.
+ */
+ public TermsFacetBuilder param(String name, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(name, value);
+ return this;
+ }
+
+ /**
+ * Sets all possible terms to be loaded, even ones with 0 count. Note, this *should not* be used
+ * with a field that has many possible terms.
+ */
+ public TermsFacetBuilder allTerms(boolean allTerms) {
+ this.allTerms = allTerms;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (fieldName == null && fieldsNames == null && script == null) {
+ throw new SearchSourceBuilderException("field/fields/script must be set on terms facet for facet [" + name + "]");
+ }
+ builder.startObject(name);
+
+ builder.startObject(TermsFacet.TYPE);
+ if (fieldsNames != null) {
+ if (fieldsNames.length == 1) {
+ builder.field("field", fieldsNames[0]);
+ } else {
+ builder.field("fields", fieldsNames);
+ }
+ } else if (fieldName != null) {
+ builder.field("field", fieldName);
+ }
+ builder.field("size", size);
+
+ // no point in sending shard size if it's not greater than size
+ if (shardSize > size) {
+ builder.field("shard_size", shardSize);
+ }
+
+ if (exclude != null) {
+ builder.startArray("exclude");
+ for (Object ex : exclude) {
+ builder.value(ex);
+ }
+ builder.endArray();
+ }
+ if (regex != null) {
+ builder.field("regex", regex);
+ if (regexFlags != 0) {
+ builder.field("regex_flags", Regex.flagsToString(regexFlags));
+ }
+ }
+ if (comparatorType != null) {
+ builder.field("order", comparatorType.name().toLowerCase(Locale.ROOT));
+ }
+ if (allTerms != null) {
+ builder.field("all_terms", allTerms);
+ }
+
+ if (script != null) {
+ builder.field("script", script);
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ }
+
+ if (executionHint != null) {
+ builder.field("execution_hint", executionHint);
+ }
+
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/TermsFacetParser.java b/src/main/java/org/elasticsearch/search/facet/terms/TermsFacetParser.java
new file mode 100644
index 0000000..d913f33
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/TermsFacetParser.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.FacetParser;
+import org.elasticsearch.search.facet.terms.doubles.TermsDoubleFacetExecutor;
+import org.elasticsearch.search.facet.terms.index.IndexNameFacetExecutor;
+import org.elasticsearch.search.facet.terms.longs.TermsLongFacetExecutor;
+import org.elasticsearch.search.facet.terms.strings.FieldsTermsStringFacetExecutor;
+import org.elasticsearch.search.facet.terms.strings.ScriptTermsStringFieldFacetExecutor;
+import org.elasticsearch.search.facet.terms.strings.TermsStringFacetExecutor;
+import org.elasticsearch.search.facet.terms.strings.TermsStringOrdinalsFacetExecutor;
+import org.elasticsearch.search.facet.terms.unmapped.UnmappedFieldExecutor;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class TermsFacetParser extends AbstractComponent implements FacetParser {
+
+ private final int ordinalsCacheAbove;
+
+ @Inject
+ public TermsFacetParser(Settings settings) {
+ super(settings);
+ InternalTermsFacet.registerStreams();
+ this.ordinalsCacheAbove = componentSettings.getAsInt("ordinals_cache_above", 10000); // above 40k we want to cache
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{TermsFacet.TYPE};
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultMainMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultGlobalMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
+ String field = null;
+ int size = 10;
+ int shardSize = -1;
+
+ String[] fieldsNames = null;
+ ImmutableSet<BytesRef> excluded = ImmutableSet.of();
+ String regex = null;
+ String regexFlags = null;
+ TermsFacet.ComparatorType comparatorType = TermsFacet.ComparatorType.COUNT;
+ String scriptLang = null;
+ String script = null;
+ Map<String, Object> params = null;
+ boolean allTerms = false;
+ String executionHint = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ params = parser.map();
+ } else {
+ throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "] while parsing terms facet [" + facetName + "]");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("exclude".equals(currentFieldName)) {
+ ImmutableSet.Builder<BytesRef> builder = ImmutableSet.builder();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ builder.add(parser.bytes());
+ }
+ excluded = builder.build();
+ } else if ("fields".equals(currentFieldName)) {
+ List<String> fields = Lists.newArrayListWithCapacity(4);
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ fields.add(parser.text());
+ }
+ fieldsNames = fields.toArray(new String[fields.size()]);
+ } else {
+ throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "] while parsing terms facet [" + facetName + "]");
+ }
+ } else if (token.isValue()) {
+ if ("field".equals(currentFieldName)) {
+ field = parser.text();
+ } else if ("script_field".equals(currentFieldName) || "scriptField".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("size".equals(currentFieldName)) {
+ size = parser.intValue();
+ } else if ("shard_size".equals(currentFieldName) || "shardSize".equals(currentFieldName)) {
+ shardSize = parser.intValue();
+ } else if ("all_terms".equals(currentFieldName) || "allTerms".equals(currentFieldName)) {
+ allTerms = parser.booleanValue();
+ } else if ("regex".equals(currentFieldName)) {
+ regex = parser.text();
+ } else if ("regex_flags".equals(currentFieldName) || "regexFlags".equals(currentFieldName)) {
+ regexFlags = parser.text();
+ } else if ("order".equals(currentFieldName) || "comparator".equals(currentFieldName)) {
+ comparatorType = TermsFacet.ComparatorType.fromString(parser.text());
+ } else if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("execution_hint".equals(currentFieldName) || "executionHint".equals(currentFieldName)) {
+ executionHint = parser.textOrNull();
+ } else {
+ throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "] while parsing terms facet [" + facetName + "]");
+ }
+ }
+ }
+
+ if ("_index".equals(field)) {
+ return new IndexNameFacetExecutor(context.shardTarget().index(), comparatorType, size);
+ }
+
+ if (fieldsNames != null && fieldsNames.length == 1) {
+ field = fieldsNames[0];
+ fieldsNames = null;
+ }
+
+ Pattern pattern = null;
+ if (regex != null) {
+ pattern = Regex.compile(regex, regexFlags);
+ }
+
+ SearchScript searchScript = null;
+ if (script != null) {
+ searchScript = context.scriptService().search(context.lookup(), scriptLang, script, params);
+ }
+
+ // shard_size cannot be smaller than size as we need to at least fetch <size> entries from every shards in order to return <size>
+ if (shardSize < size) {
+ shardSize = size;
+ }
+
+ if (fieldsNames != null) {
+
+ // in case of multi files, we only collect the fields that are mapped and facet on them.
+ ArrayList<FieldMapper> mappers = new ArrayList<FieldMapper>(fieldsNames.length);
+ for (int i = 0; i < fieldsNames.length; i++) {
+ FieldMapper mapper = context.smartNameFieldMapper(fieldsNames[i]);
+ if (mapper != null) {
+ mappers.add(mapper);
+ }
+ }
+ if (mappers.isEmpty()) {
+ // non of the fields is mapped
+ return new UnmappedFieldExecutor(size, comparatorType);
+ }
+ return new FieldsTermsStringFacetExecutor(mappers.toArray(new FieldMapper[mappers.size()]), size, shardSize, comparatorType, allTerms, context, excluded, pattern, searchScript);
+ }
+ if (field == null && script != null) {
+ return new ScriptTermsStringFieldFacetExecutor(size, shardSize, comparatorType, context, excluded, pattern, scriptLang, script, params, context.cacheRecycler());
+ }
+
+ if (field == null) {
+ throw new ElasticsearchParseException("terms facet [" + facetName + "] must have a field, fields or script parameter");
+ }
+
+ FieldMapper fieldMapper = context.smartNameFieldMapper(field);
+ if (fieldMapper == null) {
+ return new UnmappedFieldExecutor(size, comparatorType);
+ }
+
+ IndexFieldData indexFieldData = context.fieldData().getForField(fieldMapper);
+ if (indexFieldData instanceof IndexNumericFieldData) {
+ IndexNumericFieldData indexNumericFieldData = (IndexNumericFieldData) indexFieldData;
+ if (indexNumericFieldData.getNumericType().isFloatingPoint()) {
+ return new TermsDoubleFacetExecutor(indexNumericFieldData, size, shardSize, comparatorType, allTerms, context, excluded, searchScript, context.cacheRecycler());
+ } else {
+ return new TermsLongFacetExecutor(indexNumericFieldData, size, shardSize, comparatorType, allTerms, context, excluded, searchScript, context.cacheRecycler());
+ }
+ } else {
+ if (script != null || "map".equals(executionHint)) {
+ return new TermsStringFacetExecutor(indexFieldData, size, shardSize, comparatorType, allTerms, context, excluded, pattern, searchScript);
+ } else if (indexFieldData instanceof IndexFieldData.WithOrdinals) {
+ return new TermsStringOrdinalsFacetExecutor((IndexFieldData.WithOrdinals) indexFieldData, size, shardSize, comparatorType, allTerms, context, excluded, pattern, ordinalsCacheAbove);
+ } else {
+ return new TermsStringFacetExecutor(indexFieldData, size, shardSize, comparatorType, allTerms, context, excluded, pattern, searchScript);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/doubles/InternalDoubleTermsFacet.java b/src/main/java/org/elasticsearch/search/facet/terms/doubles/InternalDoubleTermsFacet.java
new file mode 100644
index 0000000..ad60f54
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/doubles/InternalDoubleTermsFacet.java
@@ -0,0 +1,292 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms.doubles;
+
+import com.carrotsearch.hppc.DoubleIntOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.collect.BoundedTreeSet;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.terms.InternalTermsFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalDoubleTermsFacet extends InternalTermsFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("dTerms"));
+
+ public static void registerStream() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readTermsFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ public static class DoubleEntry implements Entry {
+
+ double term;
+ int count;
+
+ public DoubleEntry(double term, int count) {
+ this.term = term;
+ this.count = count;
+ }
+
+ public Text getTerm() {
+ return new StringText(Double.toString(term));
+ }
+
+ @Override
+ public Number getTermAsNumber() {
+ return term;
+ }
+
+ @Override
+ public int getCount() {
+ return count;
+ }
+
+ @Override
+ public int compareTo(Entry o) {
+ double anotherVal = ((DoubleEntry) o).term;
+ if (term < anotherVal) {
+ return -1;
+ }
+ if (term == anotherVal) {
+ int i = count - o.getCount();
+ if (i == 0) {
+ i = System.identityHashCode(this) - System.identityHashCode(o);
+ }
+ return i;
+ }
+ return 1;
+ }
+ }
+
+ int requiredSize;
+ long missing;
+ long total;
+ Collection<DoubleEntry> entries = ImmutableList.of();
+ ComparatorType comparatorType;
+
+ InternalDoubleTermsFacet() {
+ }
+
+ public InternalDoubleTermsFacet(String name, ComparatorType comparatorType, int requiredSize, Collection<DoubleEntry> entries, long missing, long total) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.requiredSize = requiredSize;
+ this.entries = entries;
+ this.missing = missing;
+ this.total = total;
+ }
+
+ @Override
+ public List<DoubleEntry> getEntries() {
+ if (!(entries instanceof List)) {
+ entries = ImmutableList.copyOf(entries);
+ }
+ return (List<DoubleEntry>) entries;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) entries.iterator();
+ }
+
+ @Override
+ public long getMissingCount() {
+ return this.missing;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return this.total;
+ }
+
+ @Override
+ public long getOtherCount() {
+ long other = total;
+ for (Entry entry : entries) {
+ other -= entry.getCount();
+ }
+ return other;
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ Facet facet = facets.get(0);
+
+ // can be of type InternalStringTermsFacet representing unmapped fields
+ if (facet instanceof InternalDoubleTermsFacet) {
+ ((InternalDoubleTermsFacet) facet).trimExcessEntries();
+ }
+ return facet;
+ }
+
+ InternalDoubleTermsFacet first = null;
+
+ Recycler.V<DoubleIntOpenHashMap> aggregated = context.cacheRecycler().doubleIntMap(-1);
+ long missing = 0;
+ long total = 0;
+ for (Facet facet : facets) {
+ TermsFacet termsFacet = (TermsFacet) facet;
+ // termsFacet could be of type InternalStringTermsFacet representing unmapped fields
+ if (first == null && termsFacet instanceof InternalDoubleTermsFacet) {
+ first = (InternalDoubleTermsFacet) termsFacet;
+ }
+ missing += termsFacet.getMissingCount();
+ total += termsFacet.getTotalCount();
+ for (Entry entry : termsFacet.getEntries()) {
+ aggregated.v().addTo(((DoubleEntry) entry).term, entry.getCount());
+ }
+ }
+
+ BoundedTreeSet<DoubleEntry> ordered = new BoundedTreeSet<DoubleEntry>(first.comparatorType.comparator(), first.requiredSize);
+ final boolean[] states = aggregated.v().allocated;
+ final double[] keys = aggregated.v().keys;
+ final int[] values = aggregated.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ ordered.add(new DoubleEntry(keys[i], values[i]));
+ }
+ }
+
+ first.entries = ordered;
+ first.missing = missing;
+ first.total = total;
+
+ aggregated.release();
+
+ return first;
+ }
+
+ private void trimExcessEntries() {
+ if (requiredSize >= entries.size()) {
+ return;
+ }
+
+ if (entries instanceof List) {
+ entries = ((List) entries).subList(0, requiredSize);
+ return;
+ }
+
+ int i = 0;
+ for (Iterator<DoubleEntry> iter = entries.iterator(); iter.hasNext();) {
+ iter.next();
+ if (i++ >= requiredSize) {
+ iter.remove();
+ }
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString MISSING = new XContentBuilderString("missing");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString OTHER = new XContentBuilderString("other");
+ static final XContentBuilderString TERMS = new XContentBuilderString("terms");
+ static final XContentBuilderString TERM = new XContentBuilderString("term");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, TermsFacet.TYPE);
+ builder.field(Fields.MISSING, missing);
+ builder.field(Fields.TOTAL, total);
+ builder.field(Fields.OTHER, getOtherCount());
+ builder.startArray(Fields.TERMS);
+ for (DoubleEntry entry : entries) {
+ builder.startObject();
+ builder.field(Fields.TERM, entry.term);
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalDoubleTermsFacet readTermsFacet(StreamInput in) throws IOException {
+ InternalDoubleTermsFacet facet = new InternalDoubleTermsFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+ requiredSize = in.readVInt();
+ missing = in.readVLong();
+ total = in.readVLong();
+
+ int size = in.readVInt();
+ entries = new ArrayList<DoubleEntry>(size);
+ for (int i = 0; i < size; i++) {
+ entries.add(new DoubleEntry(in.readDouble(), in.readVInt()));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(requiredSize);
+ out.writeVLong(missing);
+ out.writeVLong(total);
+
+ out.writeVInt(entries.size());
+ for (DoubleEntry entry : entries) {
+ out.writeDouble(entry.term);
+ out.writeVInt(entry.getCount());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/doubles/TermsDoubleFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/terms/doubles/TermsDoubleFacetExecutor.java
new file mode 100644
index 0000000..bb21e32
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/doubles/TermsDoubleFacetExecutor.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms.doubles;
+
+import com.carrotsearch.hppc.DoubleIntOpenHashMap;
+import com.carrotsearch.hppc.DoubleOpenHashSet;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.collect.BoundedTreeSet;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.support.EntryPriorityQueue;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+/**
+ *
+ */
+public class TermsDoubleFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData indexFieldData;
+ private final TermsFacet.ComparatorType comparatorType;
+ private final int size;
+ private final int shardSize;
+ private final SearchScript script;
+ private final ImmutableSet<BytesRef> excluded;
+
+ final Recycler.V<DoubleIntOpenHashMap> facets;
+ long missing;
+ long total;
+
+ public TermsDoubleFacetExecutor(IndexNumericFieldData indexFieldData, int size, int shardSize, TermsFacet.ComparatorType comparatorType, boolean allTerms, SearchContext context,
+ ImmutableSet<BytesRef> excluded, SearchScript script, CacheRecycler cacheRecycler) {
+ this.indexFieldData = indexFieldData;
+ this.size = size;
+ this.shardSize = shardSize;
+ this.comparatorType = comparatorType;
+ this.script = script;
+ this.excluded = excluded;
+
+ this.facets = cacheRecycler.doubleIntMap(-1);
+
+ if (allTerms) {
+ for (AtomicReaderContext readerContext : context.searcher().getTopReaderContext().leaves()) {
+ int maxDoc = readerContext.reader().maxDoc();
+ DoubleValues values = indexFieldData.load(readerContext).getDoubleValues();
+ if (values instanceof DoubleValues.WithOrdinals) {
+ DoubleValues.WithOrdinals valuesWithOrds = (DoubleValues.WithOrdinals) values;
+ Ordinals.Docs ordinals = valuesWithOrds.ordinals();
+ for (long ord = Ordinals.MIN_ORDINAL; ord < ordinals.getMaxOrd(); ord++) {
+ facets.v().putIfAbsent(valuesWithOrds.getValueByOrd(ord), 0);
+ }
+ } else {
+ for (int docId = 0; docId < maxDoc; docId++) {
+ int numValues = values.setDocument(docId);
+ DoubleIntOpenHashMap map = facets.v();
+ for (int i = 0; i < numValues; i++) {
+ map.putIfAbsent(values.nextValue(), 0);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ if (facets.v().isEmpty()) {
+ facets.release();
+ return new InternalDoubleTermsFacet(facetName, comparatorType, size, ImmutableList.<InternalDoubleTermsFacet.DoubleEntry>of(), missing, total);
+ } else {
+ final boolean[] states = facets.v().allocated;
+ final double[] keys = facets.v().keys;
+ final int[] values = facets.v().values;
+ if (size < EntryPriorityQueue.LIMIT) {
+ EntryPriorityQueue ordered = new EntryPriorityQueue(shardSize, comparatorType.comparator());
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ ordered.insertWithOverflow(new InternalDoubleTermsFacet.DoubleEntry(keys[i], values[i]));
+ }
+ }
+ InternalDoubleTermsFacet.DoubleEntry[] list = new InternalDoubleTermsFacet.DoubleEntry[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; i--) {
+ list[i] = (InternalDoubleTermsFacet.DoubleEntry) ordered.pop();
+ }
+ facets.release();
+ return new InternalDoubleTermsFacet(facetName, comparatorType, size, Arrays.asList(list), missing, total);
+ } else {
+ BoundedTreeSet<InternalDoubleTermsFacet.DoubleEntry> ordered = new BoundedTreeSet<InternalDoubleTermsFacet.DoubleEntry>(comparatorType.comparator(), shardSize);
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ ordered.add(new InternalDoubleTermsFacet.DoubleEntry(keys[i], values[i]));
+ }
+ }
+ facets.release();
+ return new InternalDoubleTermsFacet(facetName, comparatorType, size, ordered, missing, total);
+ }
+ }
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final StaticAggregatorValueProc aggregator;
+ private DoubleValues values;
+
+ public Collector() {
+ if (script == null && excluded.isEmpty()) {
+ aggregator = new StaticAggregatorValueProc(facets.v());
+ } else {
+ aggregator = new AggregatorValueProc(facets.v(), excluded, script);
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ if (script != null) {
+ script.setScorer(scorer);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getDoubleValues();
+ if (script != null) {
+ script.setNextReader(context);
+ }
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ aggregator.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ TermsDoubleFacetExecutor.this.missing = aggregator.missing();
+ TermsDoubleFacetExecutor.this.total = aggregator.total();
+ }
+ }
+
+ public static class AggregatorValueProc extends StaticAggregatorValueProc {
+
+ private final SearchScript script;
+
+ private final DoubleOpenHashSet excluded;
+
+ public AggregatorValueProc(DoubleIntOpenHashMap facets, Set<BytesRef> excluded, SearchScript script) {
+ super(facets);
+ this.script = script;
+ if (excluded == null || excluded.isEmpty()) {
+ this.excluded = null;
+ } else {
+ this.excluded = new DoubleOpenHashSet(excluded.size());
+ for (BytesRef s : excluded) {
+ this.excluded.add(Double.parseDouble(s.utf8ToString()));
+ }
+ }
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ if (excluded != null && excluded.contains(value)) {
+ return;
+ }
+ if (script != null) {
+ script.setNextDocId(docId);
+ script.setNextVar("term", value);
+ Object scriptValue = script.run();
+ if (scriptValue == null) {
+ return;
+ }
+ if (scriptValue instanceof Boolean) {
+ if (!((Boolean) scriptValue)) {
+ return;
+ }
+ } else {
+ value = ((Number) scriptValue).doubleValue();
+ }
+ }
+ super.onValue(docId, value);
+ }
+ }
+
+ public static class StaticAggregatorValueProc extends DoubleFacetAggregatorBase {
+
+ private final DoubleIntOpenHashMap facets;
+
+ public StaticAggregatorValueProc(DoubleIntOpenHashMap facets) {
+ this.facets = facets;
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ facets.addTo(value, 1);
+ }
+
+ public final DoubleIntOpenHashMap facets() {
+ return facets;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/index/IndexNameFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/terms/index/IndexNameFacetExecutor.java
new file mode 100644
index 0000000..d0ad19f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/index/IndexNameFacetExecutor.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.terms.index;
+
+import com.google.common.collect.Sets;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.strings.InternalStringTermsFacet;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class IndexNameFacetExecutor extends FacetExecutor {
+
+ private final String indexName;
+ private final InternalStringTermsFacet.ComparatorType comparatorType;
+ private final int size;
+
+ private int count = 0;
+
+ public IndexNameFacetExecutor(String indexName, TermsFacet.ComparatorType comparatorType, int size) {
+ this.indexName = indexName;
+ this.comparatorType = comparatorType;
+ this.size = size;
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ return new InternalStringTermsFacet(facetName, comparatorType, size, Sets.newHashSet(new InternalStringTermsFacet.TermEntry(indexName, count)), 0, count);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private int count;
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ count++;
+ }
+
+ @Override
+ public void postCollection() {
+ IndexNameFacetExecutor.this.count = count;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/longs/InternalLongTermsFacet.java b/src/main/java/org/elasticsearch/search/facet/terms/longs/InternalLongTermsFacet.java
new file mode 100644
index 0000000..37382c9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/longs/InternalLongTermsFacet.java
@@ -0,0 +1,293 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms.longs;
+
+import com.carrotsearch.hppc.LongIntOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.collect.BoundedTreeSet;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.terms.InternalTermsFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalLongTermsFacet extends InternalTermsFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("lTerms"));
+
+ public static void registerStream() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readTermsFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ public static class LongEntry implements Entry {
+
+ long term;
+ int count;
+
+ public LongEntry(long term, int count) {
+ this.term = term;
+ this.count = count;
+ }
+
+ @Override
+ public Text getTerm() {
+ return new StringText(Long.toString(term));
+ }
+
+ @Override
+ public Number getTermAsNumber() {
+ return term;
+ }
+
+ @Override
+ public int getCount() {
+ return count;
+ }
+
+ @Override
+ public int compareTo(Entry o) {
+ long anotherVal = ((LongEntry) o).term;
+ if (term < anotherVal) {
+ return -1;
+ }
+ if (term == anotherVal) {
+ int i = count - o.getCount();
+ if (i == 0) {
+ i = System.identityHashCode(this) - System.identityHashCode(o);
+ }
+ return i;
+ }
+ return 1;
+ }
+ }
+
+ int requiredSize;
+ long missing;
+ long total;
+ Collection<LongEntry> entries = ImmutableList.of();
+ ComparatorType comparatorType;
+
+ InternalLongTermsFacet() {
+ }
+
+ public InternalLongTermsFacet(String name, ComparatorType comparatorType, int requiredSize, Collection<LongEntry> entries, long missing, long total) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.requiredSize = requiredSize;
+ this.entries = entries;
+ this.missing = missing;
+ this.total = total;
+ }
+
+ @Override
+ public List<LongEntry> getEntries() {
+ if (!(entries instanceof List)) {
+ entries = ImmutableList.copyOf(entries);
+ }
+ return (List<LongEntry>) entries;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) entries.iterator();
+ }
+
+ @Override
+ public long getMissingCount() {
+ return this.missing;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return this.total;
+ }
+
+ @Override
+ public long getOtherCount() {
+ long other = total;
+ for (Entry entry : entries) {
+ other -= entry.getCount();
+ }
+ return other;
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ Facet facet = facets.get(0);
+
+ // facet could be InternalStringTermsFacet representing unmapped fields
+ if (facet instanceof InternalLongTermsFacet) {
+ ((InternalLongTermsFacet) facet).trimExcessEntries();
+ }
+ return facet;
+ }
+
+ InternalLongTermsFacet first = null;
+
+ Recycler.V<LongIntOpenHashMap> aggregated = context.cacheRecycler().longIntMap(-1);
+ long missing = 0;
+ long total = 0;
+ for (Facet facet : facets) {
+ TermsFacet termsFacet = (TermsFacet) facet;
+ // termsFacet could be of type InternalStringTermsFacet representing unmapped fields
+ if (first == null && termsFacet instanceof InternalLongTermsFacet) {
+ first = (InternalLongTermsFacet) termsFacet;
+ }
+ missing += termsFacet.getMissingCount();
+ total += termsFacet.getTotalCount();
+ for (Entry entry : termsFacet.getEntries()) {
+ aggregated.v().addTo(((LongEntry) entry).term, entry.getCount());
+ }
+ }
+
+ BoundedTreeSet<LongEntry> ordered = new BoundedTreeSet<LongEntry>(first.comparatorType.comparator(), first.requiredSize);
+ LongIntOpenHashMap entries = aggregated.v();
+ final boolean[] states = aggregated.v().allocated;
+ final long[] keys = aggregated.v().keys;
+ final int[] values = aggregated.v().values;
+ for (int i = 0; i < entries.allocated.length; i++) {
+ if (states[i]) {
+ ordered.add(new LongEntry(keys[i], values[i]));
+ }
+ }
+ first.entries = ordered;
+ first.missing = missing;
+ first.total = total;
+
+ aggregated.release();
+
+ return first;
+ }
+
+ private void trimExcessEntries() {
+ if (requiredSize >= entries.size()) {
+ return;
+ }
+
+ if (entries instanceof List) {
+ entries = ((List) entries).subList(0, requiredSize);
+ return;
+ }
+
+ int i = 0;
+ for (Iterator<LongEntry> iter = entries.iterator(); iter.hasNext();) {
+ iter.next();
+ if (i++ >= requiredSize) {
+ iter.remove();
+ }
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString MISSING = new XContentBuilderString("missing");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString OTHER = new XContentBuilderString("other");
+ static final XContentBuilderString TERMS = new XContentBuilderString("terms");
+ static final XContentBuilderString TERM = new XContentBuilderString("term");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, TermsFacet.TYPE);
+ builder.field(Fields.MISSING, missing);
+ builder.field(Fields.TOTAL, total);
+ builder.field(Fields.OTHER, getOtherCount());
+ builder.startArray(Fields.TERMS);
+ for (LongEntry entry : entries) {
+ builder.startObject();
+ builder.field(Fields.TERM, entry.term);
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalLongTermsFacet readTermsFacet(StreamInput in) throws IOException {
+ InternalLongTermsFacet facet = new InternalLongTermsFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+ requiredSize = in.readVInt();
+ missing = in.readVLong();
+ total = in.readVLong();
+
+ int size = in.readVInt();
+ entries = new ArrayList<LongEntry>(size);
+ for (int i = 0; i < size; i++) {
+ entries.add(new LongEntry(in.readLong(), in.readVInt()));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(requiredSize);
+ out.writeVLong(missing);
+ out.writeVLong(total);
+
+ out.writeVInt(entries.size());
+ for (LongEntry entry : entries) {
+ out.writeLong(entry.term);
+ out.writeVInt(entry.getCount());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/longs/TermsLongFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/terms/longs/TermsLongFacetExecutor.java
new file mode 100644
index 0000000..8f60a51
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/longs/TermsLongFacetExecutor.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms.longs;
+
+import com.carrotsearch.hppc.LongIntOpenHashMap;
+import com.carrotsearch.hppc.LongOpenHashSet;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.collect.BoundedTreeSet;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.LongFacetAggregatorBase;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.support.EntryPriorityQueue;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+/**
+ *
+ */
+public class TermsLongFacetExecutor extends FacetExecutor {
+
+ private final IndexNumericFieldData indexFieldData;
+ private final TermsFacet.ComparatorType comparatorType;
+ private final int shardSize;
+ private final int size;
+ private final SearchScript script;
+ private final ImmutableSet<BytesRef> excluded;
+
+ final Recycler.V<LongIntOpenHashMap> facets;
+ long missing;
+ long total;
+
+ public TermsLongFacetExecutor(IndexNumericFieldData indexFieldData, int size, int shardSize, TermsFacet.ComparatorType comparatorType, boolean allTerms, SearchContext context,
+ ImmutableSet<BytesRef> excluded, SearchScript script, CacheRecycler cacheRecycler) {
+ this.indexFieldData = indexFieldData;
+ this.size = size;
+ this.shardSize = shardSize;
+ this.comparatorType = comparatorType;
+ this.script = script;
+ this.excluded = excluded;
+ this.facets = cacheRecycler.longIntMap(-1);
+
+ if (allTerms) {
+ for (AtomicReaderContext readerContext : context.searcher().getTopReaderContext().leaves()) {
+ int maxDoc = readerContext.reader().maxDoc();
+ LongValues values = indexFieldData.load(readerContext).getLongValues();
+ if (values instanceof LongValues.WithOrdinals) {
+ LongValues.WithOrdinals valuesWithOrds = (LongValues.WithOrdinals) values;
+ Ordinals.Docs ordinals = valuesWithOrds.ordinals();
+ for (long ord = Ordinals.MIN_ORDINAL; ord < ordinals.getMaxOrd(); ord++) {
+ facets.v().putIfAbsent(valuesWithOrds.getValueByOrd(ord), 0);
+ }
+ } else {
+ for (int docId = 0; docId < maxDoc; docId++) {
+ final int numValues = values.setDocument(docId);
+ final LongIntOpenHashMap v = facets.v();
+ for (int i = 0; i < numValues; i++) {
+ v.putIfAbsent(values.nextValue(), 0);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ if (facets.v().isEmpty()) {
+ facets.release();
+ return new InternalLongTermsFacet(facetName, comparatorType, size, ImmutableList.<InternalLongTermsFacet.LongEntry>of(), missing, total);
+ } else {
+ LongIntOpenHashMap facetEntries = facets.v();
+ final boolean[] states = facets.v().allocated;
+ final long[] keys = facets.v().keys;
+ final int[] values = facets.v().values;
+ if (size < EntryPriorityQueue.LIMIT) {
+ EntryPriorityQueue ordered = new EntryPriorityQueue(shardSize, comparatorType.comparator());
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ ordered.insertWithOverflow(new InternalLongTermsFacet.LongEntry(keys[i], values[i]));
+ }
+ }
+ InternalLongTermsFacet.LongEntry[] list = new InternalLongTermsFacet.LongEntry[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; i--) {
+ list[i] = (InternalLongTermsFacet.LongEntry) ordered.pop();
+ }
+ facets.release();
+ return new InternalLongTermsFacet(facetName, comparatorType, size, Arrays.asList(list), missing, total);
+ } else {
+ BoundedTreeSet<InternalLongTermsFacet.LongEntry> ordered = new BoundedTreeSet<InternalLongTermsFacet.LongEntry>(comparatorType.comparator(), shardSize);
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ ordered.add(new InternalLongTermsFacet.LongEntry(keys[i], values[i]));
+ }
+ }
+ facets.release();
+ return new InternalLongTermsFacet(facetName, comparatorType, size, ordered, missing, total);
+ }
+ }
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final StaticAggregatorValueProc aggregator;
+ private LongValues values;
+
+ public Collector() {
+ if (script == null && excluded.isEmpty()) {
+ aggregator = new StaticAggregatorValueProc(facets.v());
+ } else {
+ aggregator = new AggregatorValueProc(facets.v(), excluded, script);
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ if (script != null) {
+ script.setScorer(scorer);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getLongValues();
+ if (script != null) {
+ script.setNextReader(context);
+ }
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ aggregator.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ TermsLongFacetExecutor.this.missing = aggregator.missing();
+ TermsLongFacetExecutor.this.total = aggregator.total();
+ }
+ }
+
+ public static class AggregatorValueProc extends StaticAggregatorValueProc {
+
+ private final SearchScript script;
+
+ private final LongOpenHashSet excluded;
+
+ public AggregatorValueProc(LongIntOpenHashMap facets, Set<BytesRef> excluded, SearchScript script) {
+ super(facets);
+ this.script = script;
+ if (excluded == null || excluded.isEmpty()) {
+ this.excluded = null;
+ } else {
+ this.excluded = new LongOpenHashSet(excluded.size());
+ for (BytesRef s : excluded) {
+ this.excluded.add(Long.parseLong(s.utf8ToString()));
+ }
+ }
+ }
+
+ @Override
+ public void onValue(int docId, long value) {
+ if (excluded != null && excluded.contains(value)) {
+ return;
+ }
+ if (script != null) {
+ script.setNextDocId(docId);
+ script.setNextVar("term", value);
+ Object scriptValue = script.run();
+ if (scriptValue == null) {
+ return;
+ }
+ if (scriptValue instanceof Boolean) {
+ if (!((Boolean) scriptValue)) {
+ return;
+ }
+ } else {
+ value = ((Number) scriptValue).longValue();
+ }
+ }
+ super.onValue(docId, value);
+ }
+ }
+
+ public static class StaticAggregatorValueProc extends LongFacetAggregatorBase {
+
+ private final LongIntOpenHashMap facets;
+
+ public StaticAggregatorValueProc(LongIntOpenHashMap facets) {
+ this.facets = facets;
+ }
+
+ @Override
+ public void onValue(int docId, long value) {
+ facets.addTo(value, 1);
+ }
+
+ public final LongIntOpenHashMap facets() {
+ return facets;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/strings/FieldsTermsStringFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/terms/strings/FieldsTermsStringFacetExecutor.java
new file mode 100644
index 0000000..df8dc8b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/strings/FieldsTermsStringFacetExecutor.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.terms.strings;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class FieldsTermsStringFacetExecutor extends FacetExecutor {
+
+ private final InternalStringTermsFacet.ComparatorType comparatorType;
+ private final int size;
+ private final int shardSize;
+ private final IndexFieldData[] indexFieldDatas;
+ private final SearchScript script;
+ private final HashedAggregator aggregator;
+ long missing;
+ long total;
+
+ public FieldsTermsStringFacetExecutor(FieldMapper[] fieldMappers, int size, int shardSize, InternalStringTermsFacet.ComparatorType comparatorType,
+ boolean allTerms, SearchContext context, ImmutableSet<BytesRef> excluded, Pattern pattern, SearchScript script) {
+ this.size = size;
+ this.shardSize = shardSize;
+ this.comparatorType = comparatorType;
+ this.script = script;
+ this.indexFieldDatas = new IndexFieldData[fieldMappers.length];
+ for (int i = 0; i < fieldMappers.length; i++) {
+ FieldMapper mapper = fieldMappers[i];
+ indexFieldDatas[i] = context.fieldData().getForField(mapper);
+ }
+ if (excluded.isEmpty() && pattern == null && script == null) {
+ aggregator = new HashedAggregator();
+ } else {
+ aggregator = new HashedScriptAggregator(excluded, pattern, script);
+ }
+
+ if (allTerms) {
+ for (int i = 0; i < fieldMappers.length; i++) {
+ TermsStringFacetExecutor.loadAllTerms(context, indexFieldDatas[i], aggregator);
+ }
+ }
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector(aggregator);
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ try {
+ return HashedAggregator.buildFacet(facetName, size, shardSize, missing, total, comparatorType, aggregator);
+ } finally {
+ aggregator.release();
+ }
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final HashedAggregator aggregator;
+ private BytesValues[] values;
+
+ public Collector(HashedAggregator aggregator) {
+ values = new BytesValues[indexFieldDatas.length];
+ this.aggregator = aggregator;
+
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ if (script != null) {
+ script.setScorer(scorer);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ for (int i = 0; i < indexFieldDatas.length; i++) {
+ values[i] = indexFieldDatas[i].load(context).getBytesValues(true);
+ }
+ if (script != null) {
+ script.setNextReader(context);
+ }
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ for (int i = 0; i < values.length; i++) {
+ aggregator.onDoc(doc, values[i]);
+ }
+ }
+
+ @Override
+ public void postCollection() {
+ FieldsTermsStringFacetExecutor.this.missing = aggregator.missing();
+ FieldsTermsStringFacetExecutor.this.total = aggregator.total();
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/strings/HashedAggregator.java b/src/main/java/org/elasticsearch/search/facet/terms/strings/HashedAggregator.java
new file mode 100644
index 0000000..5e46dd4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/strings/HashedAggregator.java
@@ -0,0 +1,275 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms.strings;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefHash;
+import org.elasticsearch.common.collect.BoundedTreeSet;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.support.EntryPriorityQueue;
+
+import java.util.Arrays;
+
+public class HashedAggregator {
+ private int missing;
+ private int total;
+ private final HashCount hash;
+ private final HashCount assertHash = getAssertHash();
+
+ public HashedAggregator() {
+ hash = new BytesRefHashHashCount(new BytesRefHash());
+ }
+
+ public void onDoc(int docId, BytesValues values) {
+ final int length = values.setDocument(docId);
+ int pendingMissing = 1;
+ total += length;
+ for (int i = 0; i < length; i++) {
+ final BytesRef value = values.nextValue();
+ onValue(docId, value, values.currentValueHash(), values);
+ pendingMissing = 0;
+ }
+ missing += pendingMissing;
+ }
+
+ public void addValue(BytesRef value, int hashCode, BytesValues values) {
+ final boolean added = hash.addNoCount(value, hashCode, values);
+ assert assertHash.addNoCount(value, hashCode, values) == added : "asserting counter diverged from current counter - value: "
+ + value + " hash: " + hashCode;
+ }
+
+ protected void onValue(int docId, BytesRef value, int hashCode, BytesValues values) {
+ final boolean added = hash.add(value, hashCode, values);
+ // note: we must do a deep copy here the incoming value could have been
+ // modified by a script or so
+ assert assertHash.add(BytesRef.deepCopyOf(value), hashCode, values) == added : "asserting counter diverged from current counter - value: "
+ + value + " hash: " + hashCode;
+ }
+
+ public final int missing() {
+ return missing;
+ }
+
+ public final int total() {
+ return total;
+ }
+
+ public final boolean isEmpty() {
+ return hash.size() == 0;
+ }
+
+ public BytesRefCountIterator getIter() {
+ assert hash.size() == assertHash.size();
+ return hash.iter();
+ }
+
+ public void release() {
+ hash.release();
+ }
+
+ public static interface BytesRefCountIterator {
+ public BytesRef next();
+
+ BytesRef makeSafe();
+
+ public int count();
+
+ public boolean shared();
+ }
+
+ public static InternalFacet buildFacet(String facetName, int size, int shardSize, long missing, long total, TermsFacet.ComparatorType comparatorType,
+ HashedAggregator aggregator) {
+ if (aggregator.isEmpty()) {
+ return new InternalStringTermsFacet(facetName, comparatorType, size, ImmutableList.<InternalStringTermsFacet.TermEntry>of(), missing, total);
+ } else {
+ if (shardSize < EntryPriorityQueue.LIMIT) {
+ EntryPriorityQueue ordered = new EntryPriorityQueue(shardSize, comparatorType.comparator());
+ BytesRefCountIterator iter = aggregator.getIter();
+ while (iter.next() != null) {
+ ordered.insertWithOverflow(new InternalStringTermsFacet.TermEntry(iter.makeSafe(), iter.count()));
+ // maybe we can survive with a 0-copy here if we keep the
+ // bytes ref hash around?
+ }
+ InternalStringTermsFacet.TermEntry[] list = new InternalStringTermsFacet.TermEntry[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; i--) {
+ list[i] = ((InternalStringTermsFacet.TermEntry) ordered.pop());
+ }
+ return new InternalStringTermsFacet(facetName, comparatorType, size, Arrays.asList(list), missing, total);
+ } else {
+ BoundedTreeSet<InternalStringTermsFacet.TermEntry> ordered = new BoundedTreeSet<InternalStringTermsFacet.TermEntry>(comparatorType.comparator(), shardSize);
+ BytesRefCountIterator iter = aggregator.getIter();
+ while (iter.next() != null) {
+ ordered.add(new InternalStringTermsFacet.TermEntry(iter.makeSafe(), iter.count()));
+ // maybe we can survive with a 0-copy here if we keep the
+ // bytes ref hash around?
+ }
+ return new InternalStringTermsFacet(facetName, comparatorType, size, ordered, missing, total);
+ }
+ }
+ }
+
+ private HashCount getAssertHash() {
+ HashCount count = null;
+ assert (count = new AssertingHashCount()) != null;
+ return count;
+ }
+
+ private static interface HashCount {
+ public boolean add(BytesRef value, int hashCode, BytesValues values);
+
+ public boolean addNoCount(BytesRef value, int hashCode, BytesValues values);
+
+ public void release();
+
+ public int size();
+
+ public BytesRefCountIterator iter();
+ }
+
+ private static final class BytesRefHashHashCount implements HashCount {
+ private final BytesRefHash hash;
+ private int[] counts = new int[10];
+
+ public BytesRefHashHashCount(BytesRefHash hash) {
+ this.hash = hash;
+ }
+
+ @Override
+ public boolean add(BytesRef value, int hashCode, BytesValues values) {
+ int key = hash.add(value, hashCode);
+ if (key < 0) {
+ key = ((-key) - 1);
+ } else if (key >= counts.length) {
+ counts = ArrayUtil.grow(counts, key + 1);
+ }
+ return (counts[key]++) == 0;
+ }
+
+ public boolean addNoCount(BytesRef value, int hashCode, BytesValues values) {
+ int key = hash.add(value, hashCode);
+ final boolean added = key >= 0;
+ if (key < 0) {
+ key = ((-key) - 1);
+ } else if (key >= counts.length) {
+ counts = ArrayUtil.grow(counts, key + 1);
+ }
+ return added;
+ }
+
+ @Override
+ public BytesRefCountIterator iter() {
+ return new BytesRefCountIteratorImpl();
+ }
+
+ public final class BytesRefCountIteratorImpl implements BytesRefCountIterator {
+ final BytesRef spare = new BytesRef();
+ private final int size;
+ private int current = 0;
+ private int currentCount = -1;
+
+ BytesRefCountIteratorImpl() {
+ this.size = hash.size();
+ }
+
+ public BytesRef next() {
+ if (current < size) {
+ currentCount = counts[current];
+ hash.get(current++, spare);
+ return spare;
+ }
+ currentCount = -1;
+ return null;
+ }
+
+ @Override
+ public BytesRef makeSafe() {
+ return BytesRef.deepCopyOf(spare);
+ }
+
+ public int count() {
+ return currentCount;
+ }
+
+ @Override
+ public boolean shared() {
+ return true;
+ }
+ }
+
+ @Override
+ public int size() {
+ return hash.size();
+ }
+
+ @Override
+ public void release() {
+ hash.close();
+ }
+
+ }
+
+ private static final class AssertingHashCount implements HashCount { // simple
+ // implementation for assertions
+ private final ObjectIntOpenHashMap<HashedBytesRef> valuesAndCount = new ObjectIntOpenHashMap<HashedBytesRef>();
+ private HashedBytesRef spare = new HashedBytesRef();
+
+ @Override
+ public boolean add(BytesRef value, int hashCode, BytesValues values) {
+ int adjustedValue = valuesAndCount.addTo(spare.reset(value, hashCode), 1);
+ assert adjustedValue >= 1;
+ if (adjustedValue == 1) { // only if we added the spare we create a
+ // new instance
+ spare.bytes = BytesRef.deepCopyOf(value);
+ spare = new HashedBytesRef();
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public int size() {
+ return valuesAndCount.size();
+ }
+
+ @Override
+ public BytesRefCountIterator iter() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void release() {
+ }
+
+ @Override
+ public boolean addNoCount(BytesRef value, int hashCode, BytesValues values) {
+ if (!valuesAndCount.containsKey(spare.reset(value, hashCode))) {
+ valuesAndCount.addTo(spare.reset(BytesRef.deepCopyOf(value), hashCode), 0);
+ spare = new HashedBytesRef(); // reset the reference since we just added to the hash
+ return true;
+ }
+ return false;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/strings/HashedScriptAggregator.java b/src/main/java/org/elasticsearch/search/facet/terms/strings/HashedScriptAggregator.java
new file mode 100644
index 0000000..e7bc5a2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/strings/HashedScriptAggregator.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms.strings;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.script.SearchScript;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public final class HashedScriptAggregator extends HashedAggregator {
+
+ private final ImmutableSet<BytesRef> excluded;
+ private final Matcher matcher;
+ private final SearchScript script;
+ private final CharsRef spare = new CharsRef();
+ private final BytesRef scriptSpare = new BytesRef();
+ private final boolean convert;
+
+ public HashedScriptAggregator(ImmutableSet<BytesRef> excluded, Pattern pattern, SearchScript script) {
+ this.excluded = excluded;
+ this.matcher = pattern != null ? pattern.matcher("") : null;
+ this.script = script;
+ this.convert = script != null || matcher != null;
+ }
+
+ @Override
+ public void addValue(BytesRef value, int hashCode, BytesValues values) {
+ if (accept(value)) {
+ super.addValue(value, hashCode, values);
+ }
+ }
+
+ private boolean accept(BytesRef value) {
+ if (excluded != null && excluded.contains(value)) {
+ return false;
+ }
+ if(convert) {
+ // only convert if we need to and only once per doc...
+ UnicodeUtil.UTF8toUTF16(value, spare);
+ if (matcher != null) {
+ assert convert : "regexp: [convert == false] but should be true";
+ assert value.utf8ToString().equals(spare.toString()) : "not converted";
+ return matcher.reset(spare).matches();
+ }
+ }
+ return true;
+ }
+
+ @Override
+ protected void onValue(int docId, BytesRef value, int hashCode, BytesValues values) {
+ if (accept(value)) {
+ if (script != null) {
+ assert convert : "script: [convert == false] but should be true";
+ assert value.utf8ToString().equals(spare.toString()) : "not converted";
+ script.setNextDocId(docId);
+ // LUCENE 4 UPGRADE: needs optimization -- maybe a CharSequence
+ // does the job here?
+ // we only create that string if we really need
+ script.setNextVar("term", spare.toString());
+ Object scriptValue = script.run();
+ if (scriptValue == null) {
+ return;
+ }
+ if (scriptValue instanceof Boolean) {
+ if (!((Boolean) scriptValue)) {
+ return;
+ }
+ } else {
+ scriptSpare.copyChars(scriptValue.toString());
+ hashCode = scriptSpare.hashCode();
+ super.onValue(docId, scriptSpare, hashCode, values);
+ return;
+ }
+ }
+ assert convert || (matcher == null && script == null);
+ super.onValue(docId, value, hashCode, values);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/strings/InternalStringTermsFacet.java b/src/main/java/org/elasticsearch/search/facet/terms/strings/InternalStringTermsFacet.java
new file mode 100644
index 0000000..414808b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/strings/InternalStringTermsFacet.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.terms.strings;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.collect.BoundedTreeSet;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.text.BytesText;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.terms.InternalTermsFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalStringTermsFacet extends InternalTermsFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("tTerms"));
+
+ public static void registerStream() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readTermsFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ public static class TermEntry implements Entry {
+
+ private Text term;
+ private int count;
+
+ public TermEntry(String term, int count) {
+ this.term = new StringText(term);
+ this.count = count;
+ }
+
+ public TermEntry(BytesRef term, int count) {
+ this.term = new BytesText(new BytesArray(term));
+ this.count = count;
+ }
+
+ public TermEntry(Text term, int count) {
+ this.term = term;
+ this.count = count;
+ }
+
+ @Override
+ public Text getTerm() {
+ return term;
+ }
+
+ @Override
+ public Number getTermAsNumber() {
+ return Double.parseDouble(term.string());
+ }
+
+ @Override
+ public int getCount() {
+ return count;
+ }
+
+ @Override
+ public int compareTo(Entry o) {
+ int i = this.term.compareTo(o.getTerm());
+ if (i == 0) {
+ i = count - o.getCount();
+ if (i == 0) {
+ i = System.identityHashCode(this) - System.identityHashCode(o);
+ }
+ }
+ return i;
+ }
+ }
+
+ int requiredSize;
+ long missing;
+ long total;
+ Collection<TermEntry> entries = ImmutableList.of();
+ ComparatorType comparatorType;
+
+ InternalStringTermsFacet() {
+ }
+
+ public InternalStringTermsFacet(String name, ComparatorType comparatorType, int requiredSize, Collection<TermEntry> entries, long missing, long total) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.requiredSize = requiredSize;
+ this.entries = entries;
+ this.missing = missing;
+ this.total = total;
+ }
+
+ @Override
+ public List<TermEntry> getEntries() {
+ if (!(entries instanceof List)) {
+ entries = ImmutableList.copyOf(entries);
+ }
+ return (List<TermEntry>) entries;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) entries.iterator();
+ }
+
+ @Override
+ public long getMissingCount() {
+ return this.missing;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return this.total;
+ }
+
+ @Override
+ public long getOtherCount() {
+ long other = total;
+ for (Entry entry : entries) {
+ other -= entry.getCount();
+ }
+ return other;
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ InternalStringTermsFacet facet = (InternalStringTermsFacet) facets.get(0);
+ facet.trimExcessEntries();
+ return facet;
+ }
+
+ InternalStringTermsFacet first = null;
+
+ Recycler.V<ObjectIntOpenHashMap<Text>> aggregated = context.cacheRecycler().objectIntMap(-1);
+ long missing = 0;
+ long total = 0;
+ for (Facet facet : facets) {
+ InternalTermsFacet termsFacet = (InternalTermsFacet) facet;
+ missing += termsFacet.getMissingCount();
+ total += termsFacet.getTotalCount();
+
+ if (!(termsFacet instanceof InternalStringTermsFacet)) {
+ // the assumption is that if one of the facets is of different type, it should do the
+ // reduction (all the facets we iterated so far most likely represent unmapped fields, if not
+ // class cast exception will be thrown)
+ return termsFacet.reduce(context);
+ }
+
+ if (first == null) {
+ first = (InternalStringTermsFacet) termsFacet;
+ }
+
+ for (Entry entry : termsFacet.getEntries()) {
+ aggregated.v().addTo(entry.getTerm(), entry.getCount());
+ }
+ }
+
+ BoundedTreeSet<TermEntry> ordered = new BoundedTreeSet<TermEntry>(first.comparatorType.comparator(), first.requiredSize);
+ ObjectIntOpenHashMap<Text> aggregatedEntries = aggregated.v();
+
+ final boolean[] states = aggregatedEntries.allocated;
+ Object[] keys = aggregatedEntries.keys;
+ int[] values = aggregatedEntries.values;
+ for (int i = 0; i < aggregatedEntries.allocated.length; i++) {
+ if (states[i]) {
+ Text key = (Text) keys[i];
+ ordered.add(new TermEntry(key, values[i]));
+ }
+ }
+ first.entries = ordered;
+ first.missing = missing;
+ first.total = total;
+
+ aggregated.release();
+
+ return first;
+ }
+
+ private void trimExcessEntries() {
+ if (requiredSize >= entries.size()) {
+ return;
+ }
+
+ if (entries instanceof List) {
+ entries = ((List) entries).subList(0, requiredSize);
+ return;
+ }
+
+ int i = 0;
+ for (Iterator<TermEntry> iter = entries.iterator(); iter.hasNext();) {
+ iter.next();
+ if (i++ >= requiredSize) {
+ iter.remove();
+ }
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString MISSING = new XContentBuilderString("missing");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString OTHER = new XContentBuilderString("other");
+ static final XContentBuilderString TERMS = new XContentBuilderString("terms");
+ static final XContentBuilderString TERM = new XContentBuilderString("term");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, TermsFacet.TYPE);
+ builder.field(Fields.MISSING, missing);
+ builder.field(Fields.TOTAL, total);
+ builder.field(Fields.OTHER, getOtherCount());
+ builder.startArray(Fields.TERMS);
+ for (Entry entry : entries) {
+ builder.startObject();
+ builder.field(Fields.TERM, entry.getTerm());
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalStringTermsFacet readTermsFacet(StreamInput in) throws IOException {
+ InternalStringTermsFacet facet = new InternalStringTermsFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+ requiredSize = in.readVInt();
+ missing = in.readVLong();
+ total = in.readVLong();
+
+ int size = in.readVInt();
+ entries = new ArrayList<TermEntry>(size);
+ for (int i = 0; i < size; i++) {
+ entries.add(new TermEntry(in.readText(), in.readVInt()));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(requiredSize);
+ out.writeVLong(missing);
+ out.writeVLong(total);
+
+ out.writeVInt(entries.size());
+ for (Entry entry : entries) {
+ out.writeText(entry.getTerm());
+ out.writeVInt(entry.getCount());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/strings/ScriptTermsStringFieldFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/terms/strings/ScriptTermsStringFieldFacetExecutor.java
new file mode 100644
index 0000000..0cb9353
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/strings/ScriptTermsStringFieldFacetExecutor.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.terms.strings;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.collect.BoundedTreeSet;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.terms.support.EntryPriorityQueue;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class ScriptTermsStringFieldFacetExecutor extends FacetExecutor {
+
+ private final InternalStringTermsFacet.ComparatorType comparatorType;
+ private final int size;
+ private final int shardSize;
+ private final SearchScript script;
+ private final Matcher matcher;
+ private final ImmutableSet<BytesRef> excluded;
+ private final int numberOfShards;
+
+ final Recycler.V<ObjectIntOpenHashMap<BytesRef>> facets;
+ long missing;
+ long total;
+
+ public ScriptTermsStringFieldFacetExecutor(int size, int shardSize, InternalStringTermsFacet.ComparatorType comparatorType, SearchContext context,
+ ImmutableSet<BytesRef> excluded, Pattern pattern, String scriptLang, String script, Map<String, Object> params,
+ CacheRecycler cacheRecycler) {
+ this.size = size;
+ this.shardSize = shardSize;
+ this.comparatorType = comparatorType;
+ this.numberOfShards = context.numberOfShards();
+ this.script = context.scriptService().search(context.lookup(), scriptLang, script, params);
+
+ this.excluded = excluded;
+ this.matcher = pattern != null ? pattern.matcher("") : null;
+
+ this.facets = cacheRecycler.objectIntMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector(matcher, excluded, script, facets.v());
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ if (facets.v().isEmpty()) {
+ facets.release();
+ return new InternalStringTermsFacet(facetName, comparatorType, size, ImmutableList.<InternalStringTermsFacet.TermEntry>of(), missing, total);
+ } else {
+ final boolean[] states = facets.v().allocated;
+ final Object[] keys = facets.v().keys;
+ final int[] values = facets.v().values;
+ if (shardSize < EntryPriorityQueue.LIMIT) {
+ EntryPriorityQueue ordered = new EntryPriorityQueue(shardSize, comparatorType.comparator());
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ BytesRef key = (BytesRef) keys[i];
+ ordered.insertWithOverflow(new InternalStringTermsFacet.TermEntry(key, values[i]));
+ }
+ }
+ InternalStringTermsFacet.TermEntry[] list = new InternalStringTermsFacet.TermEntry[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; i--) {
+ list[i] = ((InternalStringTermsFacet.TermEntry) ordered.pop());
+ }
+ facets.release();
+ return new InternalStringTermsFacet(facetName, comparatorType, size, Arrays.asList(list), missing, total);
+ } else {
+ BoundedTreeSet<InternalStringTermsFacet.TermEntry> ordered = new BoundedTreeSet<InternalStringTermsFacet.TermEntry>(comparatorType.comparator(), shardSize);
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ BytesRef key = (BytesRef) keys[i];
+ ordered.add(new InternalStringTermsFacet.TermEntry(key, values[i]));
+ }
+ }
+ facets.release();
+ return new InternalStringTermsFacet(facetName, comparatorType, size, ordered, missing, total);
+ }
+ }
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final Matcher matcher;
+ private final ImmutableSet<BytesRef> excluded;
+ private final SearchScript script;
+ private final ObjectIntOpenHashMap<BytesRef> facets;
+
+ long missing;
+ long total;
+
+ Collector(Matcher matcher, ImmutableSet<BytesRef> excluded, SearchScript script, ObjectIntOpenHashMap<BytesRef> facets) {
+ this.matcher = matcher;
+ this.excluded = excluded;
+ this.script = script;
+ this.facets = facets;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ script.setScorer(scorer);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ script.setNextReader(context);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ script.setNextDocId(doc);
+ Object o = script.run();
+ if (o == null) {
+ missing++;
+ return;
+ }
+ if (o instanceof Iterable) {
+ boolean found = false;
+ for (Object o1 : ((Iterable) o)) {
+ String value = o1.toString();
+ if (match(value)) {
+ found = true;
+ // LUCENE 4 UPGRADE: should be possible to convert directly to BR
+ facets.addTo(new BytesRef(value), 1);
+ total++;
+ }
+ }
+ if (!found) {
+ missing++;
+ }
+ } else if (o instanceof Object[]) {
+ boolean found = false;
+ for (Object o1 : ((Object[]) o)) {
+ String value = o1.toString();
+ if (match(value)) {
+ found = true;
+ // LUCENE 4 UPGRADE: should be possible to convert directly to BR
+ facets.addTo(new BytesRef(value), 1);
+ total++;
+ }
+ }
+ if (!found) {
+ missing++;
+ }
+ } else {
+ String value = o.toString();
+ if (match(value)) {
+ // LUCENE 4 UPGRADE: should be possible to convert directly to BR
+ facets.addTo(new BytesRef(value), 1);
+ total++;
+ } else {
+ missing++;
+ }
+ }
+ }
+
+ @Override
+ public void postCollection() {
+ ScriptTermsStringFieldFacetExecutor.this.missing = missing;
+ ScriptTermsStringFieldFacetExecutor.this.total = total;
+ }
+
+ private boolean match(String value) {
+ if (excluded != null && excluded.contains(new BytesRef(value))) {
+ return false;
+ }
+ if (matcher != null && !matcher.reset(value).matches()) {
+ return false;
+ }
+ return true;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/strings/TermsStringFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/terms/strings/TermsStringFacetExecutor.java
new file mode 100644
index 0000000..f51640f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/strings/TermsStringFacetExecutor.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.terms.strings;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class TermsStringFacetExecutor extends FacetExecutor {
+
+ private final IndexFieldData indexFieldData;
+ private final TermsFacet.ComparatorType comparatorType;
+ private final SearchScript script;
+
+ private final int shardSize;
+ private final int size;
+
+ // the aggregation map
+ long missing;
+ long total;
+ private final boolean allTerms;
+ private final HashedAggregator aggregator;
+
+ public TermsStringFacetExecutor(IndexFieldData indexFieldData, int size, int shardSize, TermsFacet.ComparatorType comparatorType, boolean allTerms, SearchContext context,
+ ImmutableSet<BytesRef> excluded, Pattern pattern, SearchScript script) {
+ this.indexFieldData = indexFieldData;
+ this.size = size;
+ this.shardSize = shardSize;
+ this.comparatorType = comparatorType;
+ this.script = script;
+ this.allTerms = allTerms;
+
+ if (excluded.isEmpty() && pattern == null && script == null) {
+ aggregator = new HashedAggregator();
+ } else {
+ aggregator = new HashedScriptAggregator(excluded, pattern, script);
+ }
+
+ if (allTerms) {
+ loadAllTerms(context, indexFieldData, aggregator);
+ }
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector(aggregator, allTerms);
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ try {
+ return HashedAggregator.buildFacet(facetName, size, shardSize, missing, total, comparatorType, aggregator);
+ } finally {
+ aggregator.release();
+ }
+ }
+
+ final class Collector extends FacetExecutor.Collector {
+
+ private final HashedAggregator aggregator;
+ private final boolean allTerms;
+ private BytesValues values;
+
+ Collector(HashedAggregator aggregator, boolean allTerms) {
+ this.aggregator = aggregator;
+ this.allTerms = allTerms;
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ if (script != null) {
+ script.setScorer(scorer);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ values = indexFieldData.load(context).getBytesValues(true);
+ if (script != null) {
+ script.setNextReader(context);
+ }
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ aggregator.onDoc(doc, values);
+ }
+
+ @Override
+ public void postCollection() {
+ TermsStringFacetExecutor.this.missing = aggregator.missing();
+ TermsStringFacetExecutor.this.total = aggregator.total();
+ }
+ }
+
+ static void loadAllTerms(SearchContext context, IndexFieldData indexFieldData, HashedAggregator aggregator) {
+
+ for (AtomicReaderContext readerContext : context.searcher().getTopReaderContext().leaves()) {
+ int maxDoc = readerContext.reader().maxDoc();
+ if (indexFieldData instanceof IndexFieldData.WithOrdinals) {
+ BytesValues.WithOrdinals values = ((IndexFieldData.WithOrdinals) indexFieldData).load(readerContext).getBytesValues(false);
+ Ordinals.Docs ordinals = values.ordinals();
+ // 0 = docs with no value for field, so start from 1 instead
+ for (long ord = Ordinals.MIN_ORDINAL; ord < ordinals.getMaxOrd(); ord++) {
+ BytesRef value = values.getValueByOrd(ord);
+ aggregator.addValue(value, value.hashCode(), values);
+ }
+ } else {
+ BytesValues values = indexFieldData.load(readerContext).getBytesValues(true);
+ for (int docId = 0; docId < maxDoc; docId++) {
+ final int size = values.setDocument(docId);
+ for (int i = 0; i < size; i++) {
+ final BytesRef value = values.nextValue();
+ aggregator.addValue(value, values.currentValueHash(), values);
+ }
+ }
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/strings/TermsStringOrdinalsFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/terms/strings/TermsStringOrdinalsFacetExecutor.java
new file mode 100644
index 0000000..c89dfca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/strings/TermsStringOrdinalsFacetExecutor.java
@@ -0,0 +1,289 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.terms.strings;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.common.collect.BoundedTreeSet;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.IntArray;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.support.EntryPriorityQueue;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ *
+ */
+public class TermsStringOrdinalsFacetExecutor extends FacetExecutor {
+
+ private final IndexFieldData.WithOrdinals indexFieldData;
+
+ final CacheRecycler cacheRecycler;
+ private final TermsFacet.ComparatorType comparatorType;
+ private final int size;
+ private final int shardSize;
+ private final int minCount;
+ private final ImmutableSet<BytesRef> excluded;
+ private final Matcher matcher;
+ final int ordinalsCacheAbove;
+
+ final List<ReaderAggregator> aggregators;
+ long missing;
+ long total;
+
+ public TermsStringOrdinalsFacetExecutor(IndexFieldData.WithOrdinals indexFieldData, int size, int shardSize, TermsFacet.ComparatorType comparatorType, boolean allTerms, SearchContext context,
+ ImmutableSet<BytesRef> excluded, Pattern pattern, int ordinalsCacheAbove) {
+ this.indexFieldData = indexFieldData;
+ this.size = size;
+ this.shardSize = shardSize;
+ this.comparatorType = comparatorType;
+ this.ordinalsCacheAbove = ordinalsCacheAbove;
+
+ if (excluded == null || excluded.isEmpty()) {
+ this.excluded = null;
+ } else {
+ this.excluded = excluded;
+ }
+ this.matcher = pattern != null ? pattern.matcher("") : null;
+
+ // minCount is offset by -1
+ if (allTerms) {
+ minCount = -1;
+ } else {
+ minCount = 0;
+ }
+
+ this.cacheRecycler = context.cacheRecycler();
+
+ this.aggregators = new ArrayList<ReaderAggregator>(context.searcher().getIndexReader().leaves().size());
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ final CharsRef spare = new CharsRef();
+ AggregatorPriorityQueue queue = new AggregatorPriorityQueue(aggregators.size());
+ for (ReaderAggregator aggregator : aggregators) {
+ if (aggregator.nextPosition()) {
+ queue.add(aggregator);
+ }
+ }
+
+ // YACK, we repeat the same logic, but once with an optimizer priority queue for smaller sizes
+ if (shardSize < EntryPriorityQueue.LIMIT) {
+ // optimize to use priority size
+ EntryPriorityQueue ordered = new EntryPriorityQueue(shardSize, comparatorType.comparator());
+
+ while (queue.size() > 0) {
+ ReaderAggregator agg = queue.top();
+ BytesRef value = agg.copyCurrent(); // we need to makeSafe it, since we end up pushing it... (can we get around this?)
+ int count = 0;
+ do {
+ count += agg.counts.get(agg.position);
+ if (agg.nextPosition()) {
+ agg = queue.updateTop();
+ } else {
+ // we are done with this reader
+ queue.pop();
+ agg = queue.top();
+ }
+ } while (agg != null && value.equals(agg.current));
+
+ if (count > minCount) {
+ if (excluded != null && excluded.contains(value)) {
+ continue;
+ }
+ if (matcher != null) {
+ UnicodeUtil.UTF8toUTF16(value, spare);
+ assert spare.toString().equals(value.utf8ToString());
+ if (!matcher.reset(spare).matches()) {
+ continue;
+ }
+ }
+ InternalStringTermsFacet.TermEntry entry = new InternalStringTermsFacet.TermEntry(value, count);
+ ordered.insertWithOverflow(entry);
+ }
+ }
+ InternalStringTermsFacet.TermEntry[] list = new InternalStringTermsFacet.TermEntry[ordered.size()];
+ for (int i = ordered.size() - 1; i >= 0; i--) {
+ list[i] = (InternalStringTermsFacet.TermEntry) ordered.pop();
+ }
+
+ return new InternalStringTermsFacet(facetName, comparatorType, size, Arrays.asList(list), missing, total);
+ }
+
+ BoundedTreeSet<InternalStringTermsFacet.TermEntry> ordered = new BoundedTreeSet<InternalStringTermsFacet.TermEntry>(comparatorType.comparator(), shardSize);
+
+ while (queue.size() > 0) {
+ ReaderAggregator agg = queue.top();
+ BytesRef value = agg.copyCurrent(); // we need to makeSafe it, since we end up pushing it... (can we work around that?)
+ int count = 0;
+ do {
+ count += agg.counts.get(agg.position);
+ if (agg.nextPosition()) {
+ agg = queue.updateTop();
+ } else {
+ // we are done with this reader
+ queue.pop();
+ agg = queue.top();
+ }
+ } while (agg != null && value.equals(agg.current));
+
+ if (count > minCount) {
+ if (excluded != null && excluded.contains(value)) {
+ continue;
+ }
+ if (matcher != null) {
+ UnicodeUtil.UTF8toUTF16(value, spare);
+ assert spare.toString().equals(value.utf8ToString());
+ if (!matcher.reset(spare).matches()) {
+ continue;
+ }
+ }
+ InternalStringTermsFacet.TermEntry entry = new InternalStringTermsFacet.TermEntry(value, count);
+ ordered.add(entry);
+ }
+ }
+
+ return new InternalStringTermsFacet(facetName, comparatorType, size, ordered, missing, total);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private long missing;
+ private long total;
+ private BytesValues.WithOrdinals values;
+ private ReaderAggregator current;
+ private Ordinals.Docs ordinals;
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ if (current != null) {
+ missing += current.counts.get(0);
+ total += current.total - current.counts.get(0);
+ if (current.values.ordinals().getNumOrds() > 0) {
+ aggregators.add(current);
+ }
+ }
+ values = indexFieldData.load(context).getBytesValues(false);
+ current = new ReaderAggregator(values, ordinalsCacheAbove, cacheRecycler);
+ ordinals = values.ordinals();
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ final int length = ordinals.setDocument(doc);
+ int missing = 1;
+ for (int i = 0; i < length; i++) {
+ current.onOrdinal(doc, ordinals.nextOrd());
+ missing = 0;
+ }
+ current.incrementMissing(missing);
+ }
+
+ @Override
+ public void postCollection() {
+ if (current != null) {
+ missing += current.counts.get(0);
+ total += current.total - current.counts.get(0);
+ // if we have values for this one, add it
+ if (current.values.ordinals().getNumOrds() > 0) {
+ aggregators.add(current);
+ }
+ current = null;
+ }
+ TermsStringOrdinalsFacetExecutor.this.missing = missing;
+ TermsStringOrdinalsFacetExecutor.this.total = total;
+ }
+ }
+
+ public static final class ReaderAggregator {
+
+ private final long maxOrd;
+
+ final BytesValues.WithOrdinals values;
+ final IntArray counts;
+ long position = 0;
+ BytesRef current;
+ int total;
+
+
+ public ReaderAggregator(BytesValues.WithOrdinals values, int ordinalsCacheLimit, CacheRecycler cacheRecycler) {
+ this.values = values;
+ this.maxOrd = values.ordinals().getMaxOrd();
+ this.counts = BigArrays.newIntArray(maxOrd);
+ }
+
+ final void onOrdinal(int docId, long ordinal) {
+ counts.increment(ordinal, 1);
+ total++;
+ }
+
+ final void incrementMissing(int numMissing) {
+ counts.increment(0, numMissing);
+ total += numMissing;
+ }
+
+ public boolean nextPosition() {
+ if (++position >= maxOrd) {
+ return false;
+ }
+ current = values.getValueByOrd(position);
+ return true;
+ }
+
+ public BytesRef copyCurrent() {
+ return values.copyShared();
+ }
+ }
+
+ public static class AggregatorPriorityQueue extends PriorityQueue<ReaderAggregator> {
+
+ public AggregatorPriorityQueue(int size) {
+ super(size);
+ }
+
+ @Override
+ protected boolean lessThan(ReaderAggregator a, ReaderAggregator b) {
+ return a.current.compareTo(b.current) < 0;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/support/EntryPriorityQueue.java b/src/main/java/org/elasticsearch/search/facet/terms/support/EntryPriorityQueue.java
new file mode 100644
index 0000000..f39906b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/support/EntryPriorityQueue.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.terms.support;
+
+import org.apache.lucene.util.PriorityQueue;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+
+import java.util.Comparator;
+
+public class EntryPriorityQueue extends PriorityQueue<TermsFacet.Entry> {
+
+ public static final int LIMIT = 5000;
+
+ private final Comparator<TermsFacet.Entry> comparator;
+
+ public EntryPriorityQueue(int size, Comparator<TermsFacet.Entry> comparator) {
+ super(size);
+ this.comparator = comparator;
+ }
+
+ @Override
+ protected boolean lessThan(TermsFacet.Entry a, TermsFacet.Entry b) {
+ return comparator.compare(a, b) > 0; // reverse, since we reverse again when adding to a list
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/terms/unmapped/UnmappedFieldExecutor.java b/src/main/java/org/elasticsearch/search/facet/terms/unmapped/UnmappedFieldExecutor.java
new file mode 100644
index 0000000..623a2cd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/terms/unmapped/UnmappedFieldExecutor.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms.unmapped;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.strings.InternalStringTermsFacet;
+
+import java.io.IOException;
+import java.util.Collection;
+
+/**
+ * A facet executor that only aggregates the missing count for unmapped fields and builds a terms facet over it
+ */
+public class UnmappedFieldExecutor extends FacetExecutor {
+
+ final int size;
+ final TermsFacet.ComparatorType comparatorType;
+
+ long missing;
+
+ public UnmappedFieldExecutor(int size, TermsFacet.ComparatorType comparatorType) {
+ this.size = size;
+ this.comparatorType = comparatorType;
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ Collection<InternalStringTermsFacet.TermEntry> entries = ImmutableList.of();
+ return new InternalStringTermsFacet(facetName, comparatorType, size, entries, missing, 0);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+
+ final class Collector extends FacetExecutor.Collector {
+
+ private long missing;
+
+ @Override
+ public void postCollection() {
+ UnmappedFieldExecutor.this.missing = missing;
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ missing++;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/InternalTermsStatsFacet.java b/src/main/java/org/elasticsearch/search/facet/termsstats/InternalTermsStatsFacet.java
new file mode 100644
index 0000000..42ea009
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/InternalTermsStatsFacet.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats;
+
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.termsstats.doubles.InternalTermsStatsDoubleFacet;
+import org.elasticsearch.search.facet.termsstats.longs.InternalTermsStatsLongFacet;
+import org.elasticsearch.search.facet.termsstats.strings.InternalTermsStatsStringFacet;
+
+public abstract class InternalTermsStatsFacet extends InternalFacet implements TermsStatsFacet {
+
+ public static void registerStreams() {
+ InternalTermsStatsStringFacet.registerStream();
+ InternalTermsStatsLongFacet.registerStream();
+ InternalTermsStatsDoubleFacet.registerStream();
+ }
+
+ protected InternalTermsStatsFacet() {
+ }
+
+ protected InternalTermsStatsFacet(String facetName) {
+ super(facetName);
+ }
+
+ @Override
+ public final String getType() {
+ return TYPE;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacet.java b/src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacet.java
new file mode 100644
index 0000000..c64cb1a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacet.java
@@ -0,0 +1,402 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.search.facet.Facet;
+
+import java.util.Comparator;
+import java.util.List;
+
+public interface TermsStatsFacet extends Facet, Iterable<TermsStatsFacet.Entry> {
+
+ public static final String TYPE = "terms_stats";
+
+
+ /**
+ * The number of docs missing a value.
+ */
+ long getMissingCount();
+
+ /**
+ * The terms and counts.
+ */
+ List<? extends TermsStatsFacet.Entry> getEntries();
+
+ /**
+ * Controls how the terms facets are ordered.
+ */
+ public static enum ComparatorType {
+ /**
+ * Order by the (higher) count of each term.
+ */
+ COUNT((byte) 0, new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ int i = (o2.getCount() < o1.getCount() ? -1 : (o1.getCount() == o2.getCount() ? 0 : 1));
+ if (i == 0) {
+ i = o2.compareTo(o1);
+ }
+ return i;
+ }
+ }),
+ /**
+ * Order by the (lower) count of each term.
+ */
+ REVERSE_COUNT((byte) 1, new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return -COUNT.comparator().compare(o1, o2);
+ }
+ }),
+ /**
+ * Order by the terms.
+ */
+ TERM((byte) 2, new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ int i = o1.compareTo(o2);
+ if (i == 0) {
+ i = COUNT.comparator().compare(o1, o2);
+ }
+ return i;
+ }
+ }),
+ /**
+ * Order by the terms.
+ */
+ REVERSE_TERM((byte) 3, new Comparator<Entry>() {
+
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return -TERM.comparator().compare(o1, o2);
+ }
+ }),
+
+ /**
+ * Order by the (higher) total of each term.
+ */
+ TOTAL((byte) 4, new Comparator<Entry>() {
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ int i = -Double.compare(o1.getTotal(), o2.getTotal());
+ if (i == 0) {
+ i = COUNT.comparator().compare(o1, o2);
+ }
+ return i;
+ }
+ }),
+
+ /**
+ * Order by the (lower) total of each term.
+ */
+ REVERSE_TOTAL((byte) 5, new Comparator<Entry>() {
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return -TOTAL.comparator().compare(o1, o2);
+ }
+ }),
+
+ /**
+ * Order by the (lower) min of each term.
+ */
+ MIN((byte) 6, new Comparator<Entry>() {
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ int i = Double.compare(o1.getMin(), o2.getMin());
+ if (i == 0) {
+ i = COUNT.comparator().compare(o1, o2);
+ }
+ return i;
+ }
+ }),
+ /**
+ * Order by the (higher) min of each term.
+ */
+ REVERSE_MIN((byte) 7, new Comparator<Entry>() {
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return -MIN.comparator().compare(o1, o2);
+ }
+ }),
+ /**
+ * Order by the (higher) max of each term.
+ */
+ MAX((byte) 8, new Comparator<Entry>() {
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ int i = -Double.compare(o1.getMax(), o2.getMax());
+ if (i == 0) {
+ i = COUNT.comparator().compare(o1, o2);
+ }
+ return i;
+ }
+ }),
+ /**
+ * Order by the (lower) max of each term.
+ */
+ REVERSE_MAX((byte) 9, new Comparator<Entry>() {
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return -MAX.comparator().compare(o1, o2);
+ }
+ }),
+ /**
+ * Order by the (higher) mean of each term.
+ */
+ MEAN((byte) 10, new Comparator<Entry>() {
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ int i = -Double.compare(o1.getMean(), o2.getMean());
+ if (i == 0) {
+ i = COUNT.comparator().compare(o1, o2);
+ }
+ return i;
+ }
+ }),
+ /**
+ * Order by the (lower) mean of each term.
+ */
+ REVERSE_MEAN((byte) 11, new Comparator<Entry>() {
+ @Override
+ public int compare(Entry o1, Entry o2) {
+ // push nulls to the end
+ if (o1 == null) {
+ if (o2 == null) {
+ return 0;
+ }
+ return 1;
+ }
+ if (o2 == null) {
+ return -1;
+ }
+ return -MEAN.comparator().compare(o1, o2);
+ }
+ }),;
+
+ private final byte id;
+
+ private final Comparator<Entry> comparator;
+
+ ComparatorType(byte id, Comparator<Entry> comparator) {
+ this.id = id;
+ this.comparator = comparator;
+ }
+
+ public byte id() {
+ return this.id;
+ }
+
+ public Comparator<Entry> comparator() {
+ return comparator;
+ }
+
+ public static ComparatorType fromId(byte id) {
+ if (id == COUNT.id()) {
+ return COUNT;
+ } else if (id == REVERSE_COUNT.id()) {
+ return REVERSE_COUNT;
+ } else if (id == TERM.id()) {
+ return TERM;
+ } else if (id == REVERSE_TERM.id()) {
+ return REVERSE_TERM;
+ } else if (id == TOTAL.id()) {
+ return TOTAL;
+ } else if (id == REVERSE_TOTAL.id()) {
+ return REVERSE_TOTAL;
+ } else if (id == MIN.id()) {
+ return MIN;
+ } else if (id == REVERSE_MIN.id()) {
+ return REVERSE_MIN;
+ } else if (id == MAX.id()) {
+ return MAX;
+ } else if (id == REVERSE_MAX.id()) {
+ return REVERSE_MAX;
+ } else if (id == MEAN.id()) {
+ return MEAN;
+ } else if (id == REVERSE_MEAN.id()) {
+ return REVERSE_MEAN;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type argument match for terms facet comparator [" + id + "]");
+ }
+
+ public static ComparatorType fromString(String type) {
+ if ("count".equals(type)) {
+ return COUNT;
+ } else if ("term".equals(type)) {
+ return TERM;
+ } else if ("reverse_count".equals(type) || "reverseCount".equals(type)) {
+ return REVERSE_COUNT;
+ } else if ("reverse_term".equals(type) || "reverseTerm".equals(type)) {
+ return REVERSE_TERM;
+ } else if ("total".equals(type)) {
+ return TOTAL;
+ } else if ("reverse_total".equals(type) || "reverseTotal".equals(type)) {
+ return REVERSE_TOTAL;
+ } else if ("min".equals(type)) {
+ return MIN;
+ } else if ("reverse_min".equals(type) || "reverseMin".equals(type)) {
+ return REVERSE_MIN;
+ } else if ("max".equals(type)) {
+ return MAX;
+ } else if ("reverse_max".equals(type) || "reverseMax".equals(type)) {
+ return REVERSE_MAX;
+ } else if ("mean".equals(type)) {
+ return MEAN;
+ } else if ("reverse_mean".equals(type) || "reverseMean".equals(type)) {
+ return REVERSE_MEAN;
+ }
+ throw new ElasticsearchIllegalArgumentException("No type argument match for terms stats facet comparator [" + type + "]");
+ }
+ }
+
+ public interface Entry extends Comparable<Entry> {
+
+ Text getTerm();
+
+ Number getTermAsNumber();
+
+ long getCount();
+
+ long getTotalCount();
+
+ double getMin();
+
+ double getMax();
+
+ double getTotal();
+
+ double getMean();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacetBuilder.java b/src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacetBuilder.java
new file mode 100644
index 0000000..4df6f57
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacetBuilder.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilderException;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ *
+ */
+public class TermsStatsFacetBuilder extends FacetBuilder {
+
+ private String keyField;
+ private String valueField;
+ private int size = -1;
+ private int shardSize = -1;
+ private TermsStatsFacet.ComparatorType comparatorType;
+
+ private String script;
+ private String lang;
+ private Map<String, Object> params;
+
+ /**
+ * Constructs a new terms stats facet builder under the provided facet name.
+ */
+ public TermsStatsFacetBuilder(String name) {
+ super(name);
+ }
+
+ public TermsStatsFacetBuilder keyField(String keyField) {
+ this.keyField = keyField;
+ return this;
+ }
+
+ public TermsStatsFacetBuilder valueField(String valueField) {
+ this.valueField = valueField;
+ return this;
+ }
+
+ /**
+ * The order by which to return the facets by. Defaults to {@link TermsStatsFacet.ComparatorType#COUNT}.
+ */
+ public TermsStatsFacetBuilder order(TermsStatsFacet.ComparatorType comparatorType) {
+ this.comparatorType = comparatorType;
+ return this;
+ }
+
+ /**
+ * Sets the size of the result.
+ */
+ public TermsStatsFacetBuilder size(int size) {
+ this.size = size;
+ return this;
+ }
+
+ /**
+ * Sets the number of terms that will be returned from each shard. The higher the number the more accurate the results will be. The
+ * shard size cannot be smaller than {@link #size(int) size}, therefore in this case it will fall back and be treated as being equal to
+ * size.
+ */
+ public TermsStatsFacetBuilder shardSize(int shardSize) {
+ this.shardSize = shardSize;
+ return this;
+ }
+
+ /**
+ * Marks all terms to be returned, even ones with 0 counts.
+ */
+ public TermsStatsFacetBuilder allTerms() {
+ this.size = 0;
+ return this;
+ }
+
+ /**
+ * A value script to be executed (instead of value field) which results (numeric) will be used
+ * to compute the totals.
+ */
+ public TermsStatsFacetBuilder valueScript(String script) {
+ this.script = script;
+ return this;
+ }
+
+ /**
+ * The language of the script.
+ */
+ public TermsStatsFacetBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ /**
+ * A parameter that will be passed to the script.
+ *
+ * @param name The name of the script parameter.
+ * @param value The value of the script parameter.
+ */
+ public TermsStatsFacetBuilder param(String name, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(name, value);
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (keyField == null) {
+ throw new SearchSourceBuilderException("key field must be set on terms facet for facet [" + name + "]");
+ }
+ if (valueField == null && script == null) {
+ throw new SearchSourceBuilderException("value field or value script must be set on terms facet for facet [" + name + "]");
+ }
+ builder.startObject(name);
+
+ builder.startObject(TermsStatsFacet.TYPE);
+ builder.field("key_field", keyField);
+ if (valueField != null) {
+ builder.field("value_field", valueField);
+ }
+ if (script != null) {
+ builder.field("value_script", script);
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ }
+
+ if (comparatorType != null) {
+ builder.field("order", comparatorType.name().toLowerCase(Locale.ROOT));
+ }
+
+ if (size != -1) {
+ builder.field("size", size);
+ }
+ if (shardSize > size) {
+ builder.field("shard_size", shardSize);
+ }
+
+ builder.endObject();
+
+ addFilterFacetAndGlobal(builder, params);
+
+ builder.endObject();
+
+ return builder;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacetParser.java b/src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacetParser.java
new file mode 100644
index 0000000..80634ea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/TermsStatsFacetParser.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.FacetParser;
+import org.elasticsearch.search.facet.FacetPhaseExecutionException;
+import org.elasticsearch.search.facet.termsstats.doubles.TermsStatsDoubleFacetExecutor;
+import org.elasticsearch.search.facet.termsstats.longs.TermsStatsLongFacetExecutor;
+import org.elasticsearch.search.facet.termsstats.strings.TermsStatsStringFacetExecutor;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class TermsStatsFacetParser extends AbstractComponent implements FacetParser {
+
+ @Inject
+ public TermsStatsFacetParser(Settings settings) {
+ super(settings);
+ InternalTermsStatsFacet.registerStreams();
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{TermsStatsFacet.TYPE, "termsStats"};
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultMainMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor.Mode defaultGlobalMode() {
+ return FacetExecutor.Mode.COLLECTOR;
+ }
+
+ @Override
+ public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
+ String keyField = null;
+ String valueField = null;
+ int size = 10;
+ int shardSize = -1;
+ TermsStatsFacet.ComparatorType comparatorType = TermsStatsFacet.ComparatorType.COUNT;
+ String scriptLang = null;
+ String script = null;
+ Map<String, Object> params = null;
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ params = parser.map();
+ }
+ } else if (token.isValue()) {
+ if ("key_field".equals(currentFieldName) || "keyField".equals(currentFieldName)) {
+ keyField = parser.text();
+ } else if ("value_field".equals(currentFieldName) || "valueField".equals(currentFieldName)) {
+ valueField = parser.text();
+ } else if ("script_field".equals(currentFieldName) || "scriptField".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("value_script".equals(currentFieldName) || "valueScript".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("size".equals(currentFieldName)) {
+ size = parser.intValue();
+ } else if ("shard_size".equals(currentFieldName) || "shardSize".equals(currentFieldName)) {
+ shardSize = parser.intValue();
+ } else if ("all_terms".equals(currentFieldName) || "allTerms".equals(currentFieldName)) {
+ if (parser.booleanValue()) {
+ size = 0; // indicates all terms
+ }
+ } else if ("order".equals(currentFieldName) || "comparator".equals(currentFieldName)) {
+ comparatorType = TermsStatsFacet.ComparatorType.fromString(parser.text());
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ }
+ }
+ }
+
+ if (keyField == null) {
+ throw new FacetPhaseExecutionException(facetName, "[key_field] is required to be set for terms stats facet");
+ }
+ if (valueField == null && script == null) {
+ throw new FacetPhaseExecutionException(facetName, "either [value_field] or [script] are required to be set for terms stats facet");
+ }
+
+ FieldMapper keyMapper = context.smartNameFieldMapper(keyField);
+ if (keyMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "failed to find mapping for " + keyField);
+ }
+ IndexFieldData keyIndexFieldData = context.fieldData().getForField(keyMapper);
+
+ if (shardSize < size) {
+ shardSize = size;
+ }
+
+ IndexNumericFieldData valueIndexFieldData = null;
+ SearchScript valueScript = null;
+ if (valueField != null) {
+ FieldMapper fieldMapper = context.smartNameFieldMapper(valueField);
+ if (fieldMapper == null) {
+ throw new FacetPhaseExecutionException(facetName, "failed to find mapping for " + valueField);
+ } else if (!(fieldMapper instanceof NumberFieldMapper)) {
+ throw new FacetPhaseExecutionException(facetName, "value_field [" + valueField + "] isn't a number field, but a " + fieldMapper.fieldDataType().getType());
+ }
+ valueIndexFieldData = context.fieldData().getForField(fieldMapper);
+ } else {
+ valueScript = context.scriptService().search(context.lookup(), scriptLang, script, params);
+ }
+
+ if (keyIndexFieldData instanceof IndexNumericFieldData) {
+ IndexNumericFieldData keyIndexNumericFieldData = (IndexNumericFieldData) keyIndexFieldData;
+ if (keyIndexNumericFieldData.getNumericType().isFloatingPoint()) {
+ return new TermsStatsDoubleFacetExecutor(keyIndexNumericFieldData, valueIndexFieldData, valueScript, size, shardSize, comparatorType, context);
+ } else {
+ return new TermsStatsLongFacetExecutor(keyIndexNumericFieldData, valueIndexFieldData, valueScript, size, shardSize, comparatorType, context);
+ }
+ }
+ return new TermsStatsStringFacetExecutor(keyIndexFieldData, valueIndexFieldData, valueScript, size, shardSize, comparatorType, context);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/doubles/InternalTermsStatsDoubleFacet.java b/src/main/java/org/elasticsearch/search/facet/termsstats/doubles/InternalTermsStatsDoubleFacet.java
new file mode 100644
index 0000000..262376f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/doubles/InternalTermsStatsDoubleFacet.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats.doubles;
+
+import com.carrotsearch.hppc.DoubleObjectOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.termsstats.InternalTermsStatsFacet;
+
+import java.io.IOException;
+import java.util.*;
+
+public class InternalTermsStatsDoubleFacet extends InternalTermsStatsFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("dTS"));
+
+ public static void registerStream() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readTermsStatsFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ public InternalTermsStatsDoubleFacet() {
+ }
+
+ public static class DoubleEntry implements Entry {
+
+ double term;
+ long count;
+ long totalCount;
+ double total;
+ double min;
+ double max;
+
+ public DoubleEntry(double term, long count, long totalCount, double total, double min, double max) {
+ this.term = term;
+ this.count = count;
+ this.total = total;
+ this.totalCount = totalCount;
+ this.min = min;
+ this.max = max;
+ }
+
+ @Override
+ public Text getTerm() {
+ return new StringText(Double.toString(term));
+ }
+
+ @Override
+ public Number getTermAsNumber() {
+ return term;
+ }
+
+ @Override
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return this.totalCount;
+ }
+
+ @Override
+ public double getMin() {
+ return this.min;
+ }
+
+ @Override
+ public double getMax() {
+ return max;
+ }
+
+ @Override
+ public double getTotal() {
+ return total;
+ }
+
+ @Override
+ public double getMean() {
+ if (totalCount == 0) {
+ return 0;
+ }
+ return total / totalCount;
+ }
+
+ @Override
+ public int compareTo(Entry o) {
+ DoubleEntry other = (DoubleEntry) o;
+ return (term < other.term ? -1 : (term == other.term ? 0 : 1));
+ }
+ }
+
+ int requiredSize;
+ long missing;
+ Collection<DoubleEntry> entries = ImmutableList.of();
+ ComparatorType comparatorType;
+
+ public InternalTermsStatsDoubleFacet(String name, ComparatorType comparatorType, int requiredSize, Collection<DoubleEntry> entries, long missing) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.requiredSize = requiredSize;
+ this.entries = entries;
+ this.missing = missing;
+ }
+
+ @Override
+ public List<DoubleEntry> getEntries() {
+ if (!(entries instanceof List)) {
+ entries = ImmutableList.copyOf(entries);
+ }
+ return (List<DoubleEntry>) entries;
+ }
+
+ List<DoubleEntry> mutableList() {
+ if (!(entries instanceof List)) {
+ entries = new ArrayList<DoubleEntry>(entries);
+ }
+ return (List<DoubleEntry>) entries;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) entries.iterator();
+ }
+
+ @Override
+ public long getMissingCount() {
+ return this.missing;
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ InternalTermsStatsDoubleFacet tsFacet = (InternalTermsStatsDoubleFacet) facets.get(0);
+ if (requiredSize == 0) {
+ // we need to sort it here!
+ if (!tsFacet.entries.isEmpty()) {
+ List<DoubleEntry> entries = tsFacet.mutableList();
+ CollectionUtil.timSort(entries, comparatorType.comparator());
+ }
+ }
+ tsFacet.trimExcessEntries();
+ return facets.get(0);
+ }
+ int missing = 0;
+ Recycler.V<DoubleObjectOpenHashMap<DoubleEntry>> map = context.cacheRecycler().doubleObjectMap(-1);
+ for (Facet facet : facets) {
+ InternalTermsStatsDoubleFacet tsFacet = (InternalTermsStatsDoubleFacet) facet;
+ missing += tsFacet.missing;
+ for (Entry entry : tsFacet) {
+ DoubleEntry doubleEntry = (DoubleEntry) entry;
+ DoubleEntry current = map.v().get(doubleEntry.term);
+ if (current != null) {
+ current.count += doubleEntry.count;
+ current.totalCount += doubleEntry.totalCount;
+ current.total += doubleEntry.total;
+ if (doubleEntry.min < current.min) {
+ current.min = doubleEntry.min;
+ }
+ if (doubleEntry.max > current.max) {
+ current.max = doubleEntry.max;
+ }
+ } else {
+ map.v().put(doubleEntry.term, doubleEntry);
+ }
+ }
+ }
+
+ // sort
+ if (requiredSize == 0) { // all terms
+ DoubleEntry[] entries1 = map.v().values().toArray(DoubleEntry.class);
+ Arrays.sort(entries1, comparatorType.comparator());
+ map.release();
+ return new InternalTermsStatsDoubleFacet(getName(), comparatorType, requiredSize, Arrays.asList(entries1), missing);
+ } else {
+ Object[] values = map.v().values;
+ Arrays.sort(values, (Comparator) comparatorType.comparator());
+ List<DoubleEntry> ordered = new ArrayList<DoubleEntry>(map.v().size());
+ for (int i = 0; i < requiredSize; i++) {
+ DoubleEntry value = (DoubleEntry) values[i];
+ if (value == null) {
+ break;
+ }
+ ordered.add(value);
+ }
+ map.release();
+ return new InternalTermsStatsDoubleFacet(getName(), comparatorType, requiredSize, ordered, missing);
+ }
+ }
+
+ private void trimExcessEntries() {
+ if (requiredSize == 0 || requiredSize >= entries.size()) {
+ return;
+ }
+
+ if (entries instanceof List) {
+ entries = ((List) entries).subList(0, requiredSize);
+ return;
+ }
+
+ int i = 0;
+ for (Iterator<DoubleEntry> iter = entries.iterator(); iter.hasNext();) {
+ iter.next();
+ if (i++ >= requiredSize) {
+ iter.remove();
+ }
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString MISSING = new XContentBuilderString("missing");
+ static final XContentBuilderString TERMS = new XContentBuilderString("terms");
+ static final XContentBuilderString TERM = new XContentBuilderString("term");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString MEAN = new XContentBuilderString("mean");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, InternalTermsStatsFacet.TYPE);
+ builder.field(Fields.MISSING, missing);
+ builder.startArray(Fields.TERMS);
+ for (Entry entry : entries) {
+ builder.startObject();
+ builder.field(Fields.TERM, ((DoubleEntry) entry).term);
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.field(Fields.TOTAL_COUNT, entry.getTotalCount());
+ builder.field(Fields.MIN, entry.getMin());
+ builder.field(Fields.MAX, entry.getMax());
+ builder.field(Fields.TOTAL, entry.getTotal());
+ builder.field(Fields.MEAN, entry.getMean());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalTermsStatsDoubleFacet readTermsStatsFacet(StreamInput in) throws IOException {
+ InternalTermsStatsDoubleFacet facet = new InternalTermsStatsDoubleFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+ requiredSize = in.readVInt();
+ missing = in.readVLong();
+
+ int size = in.readVInt();
+ entries = new ArrayList<DoubleEntry>(size);
+ for (int i = 0; i < size; i++) {
+ entries.add(new DoubleEntry(in.readDouble(), in.readVLong(), in.readVLong(), in.readDouble(), in.readDouble(), in.readDouble()));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(requiredSize);
+ out.writeVLong(missing);
+
+ out.writeVInt(entries.size());
+ for (Entry entry : entries) {
+ out.writeDouble(((DoubleEntry) entry).term);
+ out.writeVLong(entry.getCount());
+ out.writeVLong(entry.getTotalCount());
+ out.writeDouble(entry.getTotal());
+ out.writeDouble(entry.getMin());
+ out.writeDouble(entry.getMax());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/doubles/TermsStatsDoubleFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/termsstats/doubles/TermsStatsDoubleFacetExecutor.java
new file mode 100644
index 0000000..1dabf89
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/doubles/TermsStatsDoubleFacetExecutor.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats.doubles;
+
+import com.carrotsearch.hppc.DoubleObjectOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+
+public class TermsStatsDoubleFacetExecutor extends FacetExecutor {
+
+ private final TermsStatsFacet.ComparatorType comparatorType;
+
+ final IndexNumericFieldData keyIndexFieldData;
+ final IndexNumericFieldData valueIndexFieldData;
+ final SearchScript script;
+
+ private final int size;
+ private final int shardSize;
+
+ final Recycler.V<DoubleObjectOpenHashMap<InternalTermsStatsDoubleFacet.DoubleEntry>> entries;
+ long missing;
+
+ public TermsStatsDoubleFacetExecutor(IndexNumericFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, SearchScript script,
+ int size, int shardSize, TermsStatsFacet.ComparatorType comparatorType, SearchContext context) {
+ this.size = size;
+ this.shardSize = shardSize;
+ this.comparatorType = comparatorType;
+ this.keyIndexFieldData = keyIndexFieldData;
+ this.valueIndexFieldData = valueIndexFieldData;
+ this.script = script;
+
+ this.entries = context.cacheRecycler().doubleObjectMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ if (entries.v().isEmpty()) {
+ entries.release();
+ return new InternalTermsStatsDoubleFacet(facetName, comparatorType, size, ImmutableList.<InternalTermsStatsDoubleFacet.DoubleEntry>of(), missing);
+ }
+ if (size == 0) { // all terms
+ // all terms, just return the collection, we will sort it on the way back
+ List<InternalTermsStatsDoubleFacet.DoubleEntry> doubleEntries = new ArrayList<InternalTermsStatsDoubleFacet.DoubleEntry>(entries.v().size());
+ boolean[] states = entries.v().allocated;
+ Object[] values = entries.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ doubleEntries.add((InternalTermsStatsDoubleFacet.DoubleEntry) values[i]);
+ }
+ }
+ entries.release();
+ return new InternalTermsStatsDoubleFacet(facetName, comparatorType, 0 /* indicates all terms*/, doubleEntries, missing);
+ }
+ Object[] values = entries.v().values;
+ Arrays.sort(values, (Comparator) comparatorType.comparator());
+
+ int limit = shardSize;
+ List<InternalTermsStatsDoubleFacet.DoubleEntry> ordered = Lists.newArrayList();
+ for (int i = 0; i < limit; i++) {
+ InternalTermsStatsDoubleFacet.DoubleEntry value = (InternalTermsStatsDoubleFacet.DoubleEntry) values[i];
+ if (value == null) {
+ break;
+ }
+ ordered.add(value);
+ }
+
+ entries.release();
+ return new InternalTermsStatsDoubleFacet(facetName, comparatorType, size, ordered, missing);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final Aggregator aggregator;
+ private DoubleValues keyValues;
+
+ public Collector() {
+ if (script == null) {
+ this.aggregator = new Aggregator(entries.v());
+ } else {
+ this.aggregator = new ScriptAggregator(entries.v(), script);
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ if (script != null) {
+ script.setScorer(scorer);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ keyValues = keyIndexFieldData.load(context).getDoubleValues();
+ if (script != null) {
+ script.setNextReader(context);
+ } else {
+ aggregator.valueFieldData = valueIndexFieldData.load(context).getDoubleValues();
+ }
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ aggregator.onDoc(doc, keyValues);
+ }
+
+ @Override
+ public void postCollection() {
+ TermsStatsDoubleFacetExecutor.this.missing = aggregator.missing;
+ }
+ }
+
+ public static class Aggregator extends DoubleFacetAggregatorBase {
+
+ final DoubleObjectOpenHashMap<InternalTermsStatsDoubleFacet.DoubleEntry> entries;
+ int missing;
+ DoubleValues valueFieldData;
+ final ValueAggregator valueAggregator = new ValueAggregator();
+
+ public Aggregator(DoubleObjectOpenHashMap<InternalTermsStatsDoubleFacet.DoubleEntry> entries) {
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ InternalTermsStatsDoubleFacet.DoubleEntry doubleEntry = entries.get(value);
+ if (doubleEntry == null) {
+ doubleEntry = new InternalTermsStatsDoubleFacet.DoubleEntry(value, 0, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ entries.put(value, doubleEntry);
+ }
+ doubleEntry.count++;
+ valueAggregator.doubleEntry = doubleEntry;
+ valueAggregator.onDoc(docId, valueFieldData);
+ }
+
+ public static class ValueAggregator extends DoubleFacetAggregatorBase {
+
+ InternalTermsStatsDoubleFacet.DoubleEntry doubleEntry;
+
+
+ @Override
+ public void onValue(int docId, double value) {
+ if (value < doubleEntry.min) {
+ doubleEntry.min = value;
+ }
+ if (value > doubleEntry.max) {
+ doubleEntry.max = value;
+ }
+ doubleEntry.total += value;
+ doubleEntry.totalCount++;
+ }
+ }
+ }
+
+ public static class ScriptAggregator extends Aggregator {
+
+ private final SearchScript script;
+
+ public ScriptAggregator(DoubleObjectOpenHashMap<InternalTermsStatsDoubleFacet.DoubleEntry> entries, SearchScript script) {
+ super(entries);
+ this.script = script;
+ }
+
+ @Override
+ public void onValue(int docId, double value) {
+ InternalTermsStatsDoubleFacet.DoubleEntry doubleEntry = entries.get(value);
+ if (doubleEntry == null) {
+ doubleEntry = new InternalTermsStatsDoubleFacet.DoubleEntry(value, 1, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ entries.put(value, doubleEntry);
+ } else {
+ doubleEntry.count++;
+ }
+ script.setNextDocId(docId);
+ double valueValue = script.runAsDouble();
+ if (valueValue < doubleEntry.min) {
+ doubleEntry.min = valueValue;
+ }
+ if (valueValue > doubleEntry.max) {
+ doubleEntry.max = valueValue;
+ }
+ doubleEntry.totalCount++;
+ doubleEntry.total += valueValue;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/longs/InternalTermsStatsLongFacet.java b/src/main/java/org/elasticsearch/search/facet/termsstats/longs/InternalTermsStatsLongFacet.java
new file mode 100644
index 0000000..79ec6e4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/longs/InternalTermsStatsLongFacet.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats.longs;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.termsstats.InternalTermsStatsFacet;
+
+import java.io.IOException;
+import java.util.*;
+
+public class InternalTermsStatsLongFacet extends InternalTermsStatsFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("lTS"));
+
+ public static void registerStream() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readTermsStatsFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ public InternalTermsStatsLongFacet() {
+ }
+
+ public static class LongEntry implements Entry {
+
+ long term;
+ long count;
+ long totalCount;
+ double total;
+ double min;
+ double max;
+
+ public LongEntry(long term, long count, long totalCount, double total, double min, double max) {
+ this.term = term;
+ this.count = count;
+ this.totalCount = totalCount;
+ this.total = total;
+ this.min = min;
+ this.max = max;
+ }
+
+ @Override
+ public Text getTerm() {
+ return new StringText(Long.toString(term));
+ }
+
+ @Override
+ public Number getTermAsNumber() {
+ return term;
+ }
+
+ @Override
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return this.totalCount;
+ }
+
+ @Override
+ public double getMin() {
+ return this.min;
+ }
+
+ @Override
+ public double getMax() {
+ return max;
+ }
+
+ @Override
+ public double getTotal() {
+ return total;
+ }
+
+ @Override
+ public double getMean() {
+ if (totalCount == 0) {
+ return 0;
+ }
+ return total / totalCount;
+ }
+
+ @Override
+ public int compareTo(Entry o) {
+ LongEntry other = (LongEntry) o;
+ return (term < other.term ? -1 : (term == other.term ? 0 : 1));
+ }
+ }
+
+ int requiredSize;
+ long missing;
+ Collection<LongEntry> entries = ImmutableList.of();
+ ComparatorType comparatorType;
+
+ public InternalTermsStatsLongFacet(String name, ComparatorType comparatorType, int requiredSize, Collection<LongEntry> entries, long missing) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.requiredSize = requiredSize;
+ this.entries = entries;
+ this.missing = missing;
+ }
+
+ @Override
+ public List<LongEntry> getEntries() {
+ if (!(entries instanceof List)) {
+ entries = ImmutableList.copyOf(entries);
+ }
+ return (List<LongEntry>) entries;
+ }
+
+ List<LongEntry> mutableList() {
+ if (!(entries instanceof List)) {
+ entries = new ArrayList<LongEntry>(entries);
+ }
+ return (List<LongEntry>) entries;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) entries.iterator();
+ }
+
+ @Override
+ public long getMissingCount() {
+ return this.missing;
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ InternalTermsStatsLongFacet tsFacet = (InternalTermsStatsLongFacet) facets.get(0);
+ if (requiredSize == 0) {
+ // we need to sort it here!
+ if (!tsFacet.entries.isEmpty()) {
+ List<LongEntry> entries = tsFacet.mutableList();
+ CollectionUtil.timSort(entries, comparatorType.comparator());
+ }
+ }
+ tsFacet.trimExcessEntries();
+ return facets.get(0);
+ }
+ int missing = 0;
+ Recycler.V<LongObjectOpenHashMap<LongEntry>> map = context.cacheRecycler().longObjectMap(-1);
+ for (Facet facet : facets) {
+ InternalTermsStatsLongFacet tsFacet = (InternalTermsStatsLongFacet) facet;
+ missing += tsFacet.missing;
+ for (Entry entry : tsFacet) {
+ LongEntry longEntry = (LongEntry) entry;
+ LongEntry current = map.v().get(longEntry.term);
+ if (current != null) {
+ current.count += longEntry.count;
+ current.totalCount += longEntry.totalCount;
+ current.total += longEntry.total;
+ if (longEntry.min < current.min) {
+ current.min = longEntry.min;
+ }
+ if (longEntry.max > current.max) {
+ current.max = longEntry.max;
+ }
+ } else {
+ map.v().put(longEntry.term, longEntry);
+ }
+ }
+ }
+
+ // sort
+ if (requiredSize == 0) { // all terms
+ LongEntry[] entries1 = map.v().values().toArray(LongEntry.class);
+ Arrays.sort(entries1, comparatorType.comparator());
+ map.release();
+ return new InternalTermsStatsLongFacet(getName(), comparatorType, requiredSize, Arrays.asList(entries1), missing);
+ } else {
+ Object[] values = map.v().values;
+ Arrays.sort(values, (Comparator) comparatorType.comparator());
+ List<LongEntry> ordered = new ArrayList<LongEntry>(map.v().size());
+ for (int i = 0; i < requiredSize; i++) {
+ LongEntry value = (LongEntry) values[i];
+ if (value == null) {
+ break;
+ }
+ ordered.add(value);
+ }
+ map.release();
+ return new InternalTermsStatsLongFacet(getName(), comparatorType, requiredSize, ordered, missing);
+ }
+ }
+
+ private void trimExcessEntries() {
+ if (requiredSize == 0 || requiredSize >= entries.size()) {
+ return;
+ }
+
+ if (entries instanceof List) {
+ entries = ((List) entries).subList(0, requiredSize);
+ return;
+ }
+
+ int i = 0;
+ for (Iterator<LongEntry> iter = entries.iterator(); iter.hasNext();) {
+ iter.next();
+ if (i++ >= requiredSize) {
+ iter.remove();
+ }
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString MISSING = new XContentBuilderString("missing");
+ static final XContentBuilderString TERMS = new XContentBuilderString("terms");
+ static final XContentBuilderString TERM = new XContentBuilderString("term");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString MEAN = new XContentBuilderString("mean");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, InternalTermsStatsFacet.TYPE);
+ builder.field(Fields.MISSING, missing);
+ builder.startArray(Fields.TERMS);
+ for (Entry entry : entries) {
+ builder.startObject();
+ builder.field(Fields.TERM, ((LongEntry) entry).term);
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.field(Fields.TOTAL_COUNT, entry.getTotalCount());
+ builder.field(Fields.MIN, entry.getMin());
+ builder.field(Fields.MAX, entry.getMax());
+ builder.field(Fields.TOTAL, entry.getTotal());
+ builder.field(Fields.MEAN, entry.getMean());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalTermsStatsLongFacet readTermsStatsFacet(StreamInput in) throws IOException {
+ InternalTermsStatsLongFacet facet = new InternalTermsStatsLongFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+ requiredSize = in.readVInt();
+ missing = in.readVLong();
+
+ int size = in.readVInt();
+ entries = new ArrayList<LongEntry>(size);
+ for (int i = 0; i < size; i++) {
+ entries.add(new LongEntry(in.readLong(), in.readVLong(), in.readVLong(), in.readDouble(), in.readDouble(), in.readDouble()));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(requiredSize);
+ out.writeVLong(missing);
+
+ out.writeVInt(entries.size());
+ for (Entry entry : entries) {
+ out.writeLong(((LongEntry) entry).term);
+ out.writeVLong(entry.getCount());
+ out.writeVLong(entry.getTotalCount());
+ out.writeDouble(entry.getTotal());
+ out.writeDouble(entry.getMin());
+ out.writeDouble(entry.getMax());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/longs/TermsStatsLongFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/termsstats/longs/TermsStatsLongFacetExecutor.java
new file mode 100644
index 0000000..9f655b2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/longs/TermsStatsLongFacetExecutor.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats.longs;
+
+import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.fielddata.LongValues;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.LongFacetAggregatorBase;
+import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+
+public class TermsStatsLongFacetExecutor extends FacetExecutor {
+
+ private final TermsStatsFacet.ComparatorType comparatorType;
+ final IndexNumericFieldData keyIndexFieldData;
+ final IndexNumericFieldData valueIndexFieldData;
+ final SearchScript script;
+
+ private final int size;
+ private final int shardSize;
+
+ final Recycler.V<LongObjectOpenHashMap<InternalTermsStatsLongFacet.LongEntry>> entries;
+ long missing;
+
+ public TermsStatsLongFacetExecutor(IndexNumericFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, SearchScript script,
+ int size, int shardSize, TermsStatsFacet.ComparatorType comparatorType, SearchContext context) {
+ this.size = size;
+ this.shardSize = shardSize;
+ this.comparatorType = comparatorType;
+ this.keyIndexFieldData = keyIndexFieldData;
+ this.valueIndexFieldData = valueIndexFieldData;
+ this.script = script;
+
+ this.entries = context.cacheRecycler().longObjectMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ if (entries.v().isEmpty()) {
+ entries.release();
+ return new InternalTermsStatsLongFacet(facetName, comparatorType, size, ImmutableList.<InternalTermsStatsLongFacet.LongEntry>of(), missing);
+ }
+ if (size == 0) { // all terms
+ // all terms, just return the collection, we will sort it on the way back
+ List<InternalTermsStatsLongFacet.LongEntry> longEntries = new ArrayList<InternalTermsStatsLongFacet.LongEntry>(entries.v().size());
+ boolean[] states = entries.v().allocated;
+ Object[] values = entries.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ longEntries.add((InternalTermsStatsLongFacet.LongEntry) values[i]);
+ }
+ }
+
+ entries.release();
+ return new InternalTermsStatsLongFacet(facetName, comparatorType, 0 /* indicates all terms*/, longEntries, missing);
+ }
+
+ // we need to fetch facets of "size * numberOfShards" because of problems in how they are distributed across shards
+ Object[] values = entries.v().values;
+ Arrays.sort(values, (Comparator) comparatorType.comparator());
+
+ int limit = shardSize;
+ List<InternalTermsStatsLongFacet.LongEntry> ordered = Lists.newArrayList();
+ for (int i = 0; i < limit; i++) {
+ InternalTermsStatsLongFacet.LongEntry value = (InternalTermsStatsLongFacet.LongEntry) values[i];
+ if (value == null) {
+ break;
+ }
+ ordered.add(value);
+ }
+ entries.release();
+ return new InternalTermsStatsLongFacet(facetName, comparatorType, size, ordered, missing);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final Aggregator aggregator;
+ private LongValues keyValues;
+
+ public Collector() {
+ if (script == null) {
+ this.aggregator = new Aggregator(entries.v());
+ } else {
+ this.aggregator = new ScriptAggregator(entries.v(), script);
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ if (script != null) {
+ script.setScorer(scorer);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ keyValues = keyIndexFieldData.load(context).getLongValues();
+ if (script != null) {
+ script.setNextReader(context);
+ } else {
+ aggregator.valueValues = valueIndexFieldData.load(context).getDoubleValues();
+ }
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ aggregator.onDoc(doc, keyValues);
+ }
+
+ @Override
+ public void postCollection() {
+ TermsStatsLongFacetExecutor.this.missing = aggregator.missing();
+ }
+ }
+
+ public static class Aggregator extends LongFacetAggregatorBase {
+
+ final LongObjectOpenHashMap<InternalTermsStatsLongFacet.LongEntry> entries;
+ DoubleValues valueValues;
+ final ValueAggregator valueAggregator = new ValueAggregator();
+
+ public Aggregator(LongObjectOpenHashMap<InternalTermsStatsLongFacet.LongEntry> entries) {
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, long value) {
+ InternalTermsStatsLongFacet.LongEntry longEntry = entries.get(value);
+ if (longEntry == null) {
+ longEntry = new InternalTermsStatsLongFacet.LongEntry(value, 0, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ entries.put(value, longEntry);
+ }
+ longEntry.count++;
+ valueAggregator.longEntry = longEntry;
+ valueAggregator.onDoc(docId, valueValues);
+ }
+
+
+ public final static class ValueAggregator extends DoubleFacetAggregatorBase {
+
+ InternalTermsStatsLongFacet.LongEntry longEntry;
+
+ @Override
+ public void onValue(int docId, double value) {
+ if (value < longEntry.min) {
+ longEntry.min = value;
+ }
+ if (value > longEntry.max) {
+ longEntry.max = value;
+ }
+ longEntry.total += value;
+ longEntry.totalCount++;
+ }
+ }
+ }
+
+ public static class ScriptAggregator extends Aggregator {
+
+ private final SearchScript script;
+
+ public ScriptAggregator(LongObjectOpenHashMap<InternalTermsStatsLongFacet.LongEntry> entries, SearchScript script) {
+ super(entries);
+ this.script = script;
+ }
+
+ @Override
+ public void onValue(int docId, long value) {
+ InternalTermsStatsLongFacet.LongEntry longEntry = entries.get(value);
+ if (longEntry == null) {
+ longEntry = new InternalTermsStatsLongFacet.LongEntry(value, 1, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ entries.put(value, longEntry);
+ } else {
+ longEntry.count++;
+ }
+ script.setNextDocId(docId);
+ double valueValue = script.runAsDouble();
+ if (valueValue < longEntry.min) {
+ longEntry.min = valueValue;
+ }
+ if (valueValue > longEntry.max) {
+ longEntry.max = valueValue;
+ }
+ longEntry.totalCount++;
+ longEntry.total += valueValue;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/strings/InternalTermsStatsStringFacet.java b/src/main/java/org/elasticsearch/search/facet/termsstats/strings/InternalTermsStatsStringFacet.java
new file mode 100644
index 0000000..285a92c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/strings/InternalTermsStatsStringFacet.java
@@ -0,0 +1,328 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats.strings;
+
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.text.BytesText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.facet.Facet;
+import org.elasticsearch.search.facet.termsstats.InternalTermsStatsFacet;
+
+import java.io.IOException;
+import java.util.*;
+
+public class InternalTermsStatsStringFacet extends InternalTermsStatsFacet {
+
+ private static final BytesReference STREAM_TYPE = new HashedBytesArray(Strings.toUTF8Bytes("tTS"));
+
+ public static void registerStream() {
+ Streams.registerStream(STREAM, STREAM_TYPE);
+ }
+
+ static Stream STREAM = new Stream() {
+ @Override
+ public Facet readFacet(StreamInput in) throws IOException {
+ return readTermsStatsFacet(in);
+ }
+ };
+
+ @Override
+ public BytesReference streamType() {
+ return STREAM_TYPE;
+ }
+
+ public InternalTermsStatsStringFacet() {
+ }
+
+ public static class StringEntry implements Entry {
+
+ Text term;
+ long count;
+ long totalCount;
+ double total;
+ double min;
+ double max;
+
+ public StringEntry(HashedBytesRef term, long count, long totalCount, double total, double min, double max) {
+ this(new BytesText(new BytesArray(term.bytes)), count, totalCount, total, min, max);
+ }
+
+ public StringEntry(Text term, long count, long totalCount, double total, double min, double max) {
+ this.term = term;
+ this.count = count;
+ this.totalCount = totalCount;
+ this.total = total;
+ this.min = min;
+ this.max = max;
+ }
+
+ @Override
+ public Text getTerm() {
+ return term;
+ }
+
+ @Override
+ public Number getTermAsNumber() {
+ return Double.parseDouble(term.string());
+ }
+
+ @Override
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public long getTotalCount() {
+ return this.totalCount;
+ }
+
+ @Override
+ public double getMin() {
+ return this.min;
+ }
+
+ @Override
+ public double getMax() {
+ return this.max;
+ }
+
+ @Override
+ public double getTotal() {
+ return total;
+ }
+
+ @Override
+ public double getMean() {
+ if (totalCount == 0) {
+ return 0;
+ }
+ return total / totalCount;
+ }
+
+ @Override
+ public int compareTo(Entry other) {
+ return term.compareTo(other.getTerm());
+ }
+ }
+
+ int requiredSize;
+ long missing;
+ Collection<StringEntry> entries = ImmutableList.of();
+ ComparatorType comparatorType;
+
+ public InternalTermsStatsStringFacet(String name, ComparatorType comparatorType, int requiredSize, Collection<StringEntry> entries, long missing) {
+ super(name);
+ this.comparatorType = comparatorType;
+ this.requiredSize = requiredSize;
+ this.entries = entries;
+ this.missing = missing;
+ }
+
+ @Override
+ public List<StringEntry> getEntries() {
+ if (!(entries instanceof List)) {
+ entries = ImmutableList.copyOf(entries);
+ }
+ return (List<StringEntry>) entries;
+ }
+
+ List<StringEntry> mutableList() {
+ if (!(entries instanceof List)) {
+ entries = new ArrayList<StringEntry>(entries);
+ }
+ return (List<StringEntry>) entries;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public Iterator<Entry> iterator() {
+ return (Iterator) entries.iterator();
+ }
+
+ @Override
+ public long getMissingCount() {
+ return this.missing;
+ }
+
+ @Override
+ public Facet reduce(ReduceContext context) {
+ List<Facet> facets = context.facets();
+ if (facets.size() == 1) {
+ InternalTermsStatsStringFacet tsFacet = (InternalTermsStatsStringFacet) facets.get(0);
+ if (requiredSize == 0) {
+ // we need to sort it here!
+ if (!tsFacet.entries.isEmpty()) {
+ List<StringEntry> entries = tsFacet.mutableList();
+ CollectionUtil.timSort(entries, comparatorType.comparator());
+ }
+ }
+ tsFacet.trimExcessEntries();
+ return tsFacet;
+ }
+ int missing = 0;
+ Recycler.V<ObjectObjectOpenHashMap<Text, StringEntry>> map = context.cacheRecycler().hashMap(-1);
+ for (Facet facet : facets) {
+ InternalTermsStatsStringFacet tsFacet = (InternalTermsStatsStringFacet) facet;
+ missing += tsFacet.missing;
+ for (Entry entry : tsFacet) {
+ StringEntry stringEntry = (StringEntry) entry;
+ StringEntry current = map.v().get(stringEntry.getTerm());
+ if (current != null) {
+ current.count += stringEntry.count;
+ current.totalCount += stringEntry.totalCount;
+ current.total += stringEntry.total;
+ if (stringEntry.min < current.min) {
+ current.min = stringEntry.min;
+ }
+ if (stringEntry.max > current.max) {
+ current.max = stringEntry.max;
+ }
+ } else {
+ map.v().put(stringEntry.getTerm(), stringEntry);
+ }
+ }
+ }
+
+ // sort
+ if (requiredSize == 0) { // all terms
+ StringEntry[] entries1 = map.v().values().toArray(StringEntry.class);
+ Arrays.sort(entries1, comparatorType.comparator());
+ map.release();
+ return new InternalTermsStatsStringFacet(getName(), comparatorType, requiredSize, Arrays.asList(entries1), missing);
+ } else {
+ Object[] values = map.v().values;
+ Arrays.sort(values, (Comparator) comparatorType.comparator());
+ List<StringEntry> ordered = new ArrayList<StringEntry>(Math.min(map.v().size(), requiredSize));
+ for (int i = 0; i < requiredSize; i++) {
+ StringEntry value = (StringEntry) values[i];
+ if (value == null) {
+ break;
+ }
+ ordered.add(value);
+ }
+ map.release();
+ return new InternalTermsStatsStringFacet(getName(), comparatorType, requiredSize, ordered, missing);
+ }
+ }
+
+ private void trimExcessEntries() {
+ if (requiredSize == 0 || requiredSize >= entries.size()) {
+ return;
+ }
+
+ if (entries instanceof List) {
+ entries = ((List) entries).subList(0, requiredSize);
+ return;
+ }
+
+ int i = 0;
+ for (Iterator<StringEntry> iter = entries.iterator(); iter.hasNext();) {
+ iter.next();
+ if (i++ >= requiredSize) {
+ iter.remove();
+ }
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString MISSING = new XContentBuilderString("missing");
+ static final XContentBuilderString TERMS = new XContentBuilderString("terms");
+ static final XContentBuilderString TERM = new XContentBuilderString("term");
+ static final XContentBuilderString COUNT = new XContentBuilderString("count");
+ static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ static final XContentBuilderString MEAN = new XContentBuilderString("mean");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(getName());
+ builder.field(Fields._TYPE, InternalTermsStatsFacet.TYPE);
+ builder.field(Fields.MISSING, missing);
+ builder.startArray(Fields.TERMS);
+ for (Entry entry : entries) {
+ builder.startObject();
+ builder.field(Fields.TERM, entry.getTerm());
+ builder.field(Fields.COUNT, entry.getCount());
+ builder.field(Fields.TOTAL_COUNT, entry.getTotalCount());
+ builder.field(Fields.MIN, entry.getMin());
+ builder.field(Fields.MAX, entry.getMax());
+ builder.field(Fields.TOTAL, entry.getTotal());
+ builder.field(Fields.MEAN, entry.getMean());
+ builder.endObject();
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalTermsStatsStringFacet readTermsStatsFacet(StreamInput in) throws IOException {
+ InternalTermsStatsStringFacet facet = new InternalTermsStatsStringFacet();
+ facet.readFrom(in);
+ return facet;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ comparatorType = ComparatorType.fromId(in.readByte());
+ requiredSize = in.readVInt();
+ missing = in.readVLong();
+
+ int size = in.readVInt();
+ entries = new ArrayList<StringEntry>(size);
+ for (int i = 0; i < size; i++) {
+ entries.add(new StringEntry(in.readText(), in.readVLong(), in.readVLong(), in.readDouble(), in.readDouble(), in.readDouble()));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeByte(comparatorType.id());
+ out.writeVInt(requiredSize);
+ out.writeVLong(missing);
+
+ out.writeVInt(entries.size());
+ for (Entry entry : entries) {
+ out.writeText(entry.getTerm());
+ out.writeVLong(entry.getCount());
+ out.writeVLong(entry.getTotalCount());
+ out.writeDouble(entry.getTotal());
+ out.writeDouble(entry.getMin());
+ out.writeDouble(entry.getMax());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/facet/termsstats/strings/TermsStatsStringFacetExecutor.java b/src/main/java/org/elasticsearch/search/facet/termsstats/strings/TermsStatsStringFacetExecutor.java
new file mode 100644
index 0000000..5be9dbc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/facet/termsstats/strings/TermsStatsStringFacetExecutor.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet.termsstats.strings;
+
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.index.fielddata.DoubleValues;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
+import org.elasticsearch.search.facet.FacetExecutor;
+import org.elasticsearch.search.facet.InternalFacet;
+import org.elasticsearch.search.facet.terms.strings.HashedAggregator;
+import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+
+public class TermsStatsStringFacetExecutor extends FacetExecutor {
+
+ private final TermsStatsFacet.ComparatorType comparatorType;
+ final IndexFieldData keyIndexFieldData;
+ final IndexNumericFieldData valueIndexFieldData;
+ final SearchScript script;
+ private final int size;
+ private final int shardSize;
+
+ final Recycler.V<ObjectObjectOpenHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry>> entries;
+ long missing;
+
+ public TermsStatsStringFacetExecutor(IndexFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, SearchScript valueScript,
+ int size, int shardSize, TermsStatsFacet.ComparatorType comparatorType, SearchContext context) {
+ this.keyIndexFieldData = keyIndexFieldData;
+ this.valueIndexFieldData = valueIndexFieldData;
+ this.script = valueScript;
+ this.size = size;
+ this.shardSize = shardSize;
+ this.comparatorType = comparatorType;
+
+ this.entries = context.cacheRecycler().hashMap(-1);
+ }
+
+ @Override
+ public Collector collector() {
+ return new Collector();
+ }
+
+ @Override
+ public InternalFacet buildFacet(String facetName) {
+ if (entries.v().isEmpty()) {
+ entries.release();
+ return new InternalTermsStatsStringFacet(facetName, comparatorType, size, ImmutableList.<InternalTermsStatsStringFacet.StringEntry>of(), missing);
+ }
+ if (size == 0) { // all terms
+ // all terms, just return the collection, we will sort it on the way back
+ List<InternalTermsStatsStringFacet.StringEntry> stringEntries = new ArrayList<InternalTermsStatsStringFacet.StringEntry>();
+ final boolean[] states = entries.v().allocated;
+ final Object[] values = entries.v().values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ stringEntries.add((InternalTermsStatsStringFacet.StringEntry) values[i]);
+ }
+ }
+ return new InternalTermsStatsStringFacet(facetName, comparatorType, 0 /* indicates all terms*/, stringEntries, missing);
+ }
+ Object[] values = entries.v().values;
+ Arrays.sort(values, (Comparator) comparatorType.comparator());
+
+ List<InternalTermsStatsStringFacet.StringEntry> ordered = Lists.newArrayList();
+ int limit = shardSize;
+ for (int i = 0; i < limit; i++) {
+ InternalTermsStatsStringFacet.StringEntry value = (InternalTermsStatsStringFacet.StringEntry) values[i];
+ if (value == null) {
+ break;
+ }
+ ordered.add(value);
+ }
+
+ entries.release();
+ return new InternalTermsStatsStringFacet(facetName, comparatorType, size, ordered, missing);
+ }
+
+ class Collector extends FacetExecutor.Collector {
+
+ private final Aggregator aggregator;
+ private BytesValues keyValues;
+
+ public Collector() {
+ if (script != null) {
+ this.aggregator = new ScriptAggregator(entries.v(), script);
+ } else {
+ this.aggregator = new Aggregator(entries.v());
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ if (script != null) {
+ script.setScorer(scorer);
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ keyValues = keyIndexFieldData.load(context).getBytesValues(true);
+ if (script != null) {
+ script.setNextReader(context);
+ } else {
+ aggregator.valueValues = valueIndexFieldData.load(context).getDoubleValues();
+ }
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ aggregator.onDoc(doc, keyValues);
+ }
+
+ @Override
+ public void postCollection() {
+ TermsStatsStringFacetExecutor.this.missing = aggregator.missing;
+ aggregator.release();
+ }
+ }
+
+ public static class Aggregator extends HashedAggregator {
+
+ final ObjectObjectOpenHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry> entries;
+ final HashedBytesRef spare = new HashedBytesRef();
+ int missing = 0;
+
+ DoubleValues valueValues;
+
+ ValueAggregator valueAggregator = new ValueAggregator();
+
+ public Aggregator(ObjectObjectOpenHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry> entries) {
+ this.entries = entries;
+ }
+
+ @Override
+ public void onValue(int docId, BytesRef value, int hashCode, BytesValues values) {
+ spare.reset(value, hashCode);
+ InternalTermsStatsStringFacet.StringEntry stringEntry = entries.get(spare);
+ if (stringEntry == null) {
+ HashedBytesRef theValue = new HashedBytesRef(values.copyShared(), hashCode);
+ stringEntry = new InternalTermsStatsStringFacet.StringEntry(theValue, 0, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ entries.put(theValue, stringEntry);
+ }
+ stringEntry.count++;
+ valueAggregator.stringEntry = stringEntry;
+ valueAggregator.onDoc(docId, valueValues);
+ }
+
+ public static class ValueAggregator extends DoubleFacetAggregatorBase {
+
+ InternalTermsStatsStringFacet.StringEntry stringEntry;
+
+ @Override
+ public void onValue(int docId, double value) {
+ if (value < stringEntry.min) {
+ stringEntry.min = value;
+ }
+ if (value > stringEntry.max) {
+ stringEntry.max = value;
+ }
+ stringEntry.total += value;
+ stringEntry.totalCount++;
+ }
+ }
+ }
+
+ public static class ScriptAggregator extends Aggregator {
+ private final SearchScript script;
+
+ public ScriptAggregator(ObjectObjectOpenHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry> entries, SearchScript script) {
+ super(entries);
+ this.script = script;
+ }
+
+ @Override
+ public void onValue(int docId, BytesRef value, int hashCode, BytesValues values) {
+ spare.reset(value, hashCode);
+ InternalTermsStatsStringFacet.StringEntry stringEntry = entries.get(spare);
+ if (stringEntry == null) {
+ HashedBytesRef theValue = new HashedBytesRef(values.copyShared(), hashCode);
+ stringEntry = new InternalTermsStatsStringFacet.StringEntry(theValue, 1, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ entries.put(theValue, stringEntry);
+ } else {
+ stringEntry.count++;
+ }
+
+ script.setNextDocId(docId);
+ double valueValue = script.runAsDouble();
+ if (valueValue < stringEntry.min) {
+ stringEntry.min = valueValue;
+ }
+ if (valueValue > stringEntry.max) {
+ stringEntry.max = valueValue;
+ }
+ stringEntry.total += valueValue;
+ stringEntry.totalCount++;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
new file mode 100644
index 0000000..7dc1929
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.ReaderUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.index.fieldvisitor.*;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchPhase;
+import org.elasticsearch.search.fetch.explain.ExplainFetchSubPhase;
+import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase;
+import org.elasticsearch.search.fetch.matchedqueries.MatchedQueriesFetchSubPhase;
+import org.elasticsearch.search.fetch.partial.PartialFieldsFetchSubPhase;
+import org.elasticsearch.search.fetch.script.ScriptFieldsFetchSubPhase;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.fetch.source.FetchSourceSubPhase;
+import org.elasticsearch.search.fetch.version.VersionFetchSubPhase;
+import org.elasticsearch.search.highlight.HighlightPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.InternalSearchHitField;
+import org.elasticsearch.search.internal.InternalSearchHits;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.*;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ *
+ */
+public class FetchPhase implements SearchPhase {
+
+ private final FetchSubPhase[] fetchSubPhases;
+
+ @Inject
+ public FetchPhase(HighlightPhase highlightPhase, ScriptFieldsFetchSubPhase scriptFieldsPhase, PartialFieldsFetchSubPhase partialFieldsPhase,
+ MatchedQueriesFetchSubPhase matchedQueriesPhase, ExplainFetchSubPhase explainPhase, VersionFetchSubPhase versionPhase,
+ FetchSourceSubPhase fetchSourceSubPhase, FieldDataFieldsFetchSubPhase fieldDataFieldsFetchSubPhase) {
+ this.fetchSubPhases = new FetchSubPhase[]{scriptFieldsPhase, partialFieldsPhase, matchedQueriesPhase, explainPhase, highlightPhase,
+ fetchSourceSubPhase, versionPhase, fieldDataFieldsFetchSubPhase};
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder();
+ parseElements.put("fields", new FieldsParseElement());
+ for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
+ parseElements.putAll(fetchSubPhase.parseElements());
+ }
+ return parseElements.build();
+ }
+
+ @Override
+ public void preProcess(SearchContext context) {
+ }
+
+ public void execute(SearchContext context) {
+ FieldsVisitor fieldsVisitor;
+ List<String> extractFieldNames = null;
+
+ if (!context.hasFieldNames()) {
+ if (context.hasPartialFields()) {
+ // partial fields need the source, so fetch it
+ fieldsVisitor = new UidAndSourceFieldsVisitor();
+ } else {
+ // no fields specified, default to return source if no explicit indication
+ if (!context.hasScriptFields() && !context.hasFetchSourceContext()) {
+ context.fetchSourceContext(new FetchSourceContext(true));
+ }
+ fieldsVisitor = context.sourceRequested() ? new UidAndSourceFieldsVisitor() : new JustUidFieldsVisitor();
+ }
+ } else if (context.fieldNames().isEmpty()) {
+ if (context.sourceRequested()) {
+ fieldsVisitor = new UidAndSourceFieldsVisitor();
+ } else {
+ fieldsVisitor = new JustUidFieldsVisitor();
+ }
+ } else {
+ boolean loadAllStored = false;
+ Set<String> fieldNames = null;
+ for (String fieldName : context.fieldNames()) {
+ if (fieldName.equals("*")) {
+ loadAllStored = true;
+ continue;
+ }
+ if (fieldName.equals(SourceFieldMapper.NAME)) {
+ if (context.hasFetchSourceContext()) {
+ context.fetchSourceContext().fetchSource(true);
+ } else {
+ context.fetchSourceContext(new FetchSourceContext(true));
+ }
+ continue;
+ }
+ FieldMappers x = context.smartNameFieldMappers(fieldName);
+ if (x == null) {
+ // Only fail if we know it is a object field, missing paths / fields shouldn't fail.
+ if (context.smartNameObjectMapper(fieldName) != null) {
+ throw new ElasticsearchIllegalArgumentException("field [" + fieldName + "] isn't a leaf field");
+ }
+ } else if (x.mapper().fieldType().stored()) {
+ if (fieldNames == null) {
+ fieldNames = new HashSet<String>();
+ }
+ fieldNames.add(x.mapper().names().indexName());
+ } else {
+ if (extractFieldNames == null) {
+ extractFieldNames = newArrayList();
+ }
+ extractFieldNames.add(fieldName);
+ }
+ }
+ if (loadAllStored) {
+ fieldsVisitor = new AllFieldsVisitor(); // load everything, including _source
+ } else if (fieldNames != null) {
+ boolean loadSource = extractFieldNames != null || context.sourceRequested();
+ fieldsVisitor = new CustomFieldsVisitor(fieldNames, loadSource);
+ } else if (extractFieldNames != null || context.sourceRequested()) {
+ fieldsVisitor = new UidAndSourceFieldsVisitor();
+ } else {
+ fieldsVisitor = new JustUidFieldsVisitor();
+ }
+ }
+
+ InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()];
+ FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
+ for (int index = 0; index < context.docIdsToLoadSize(); index++) {
+ int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index];
+
+ loadStoredFields(context, fieldsVisitor, docId);
+ fieldsVisitor.postProcess(context.mapperService());
+
+ Map<String, SearchHitField> searchFields = null;
+ if (!fieldsVisitor.fields().isEmpty()) {
+ searchFields = new HashMap<String, SearchHitField>(fieldsVisitor.fields().size());
+ for (Map.Entry<String, List<Object>> entry : fieldsVisitor.fields().entrySet()) {
+ searchFields.put(entry.getKey(), new InternalSearchHitField(entry.getKey(), entry.getValue()));
+ }
+ }
+
+ DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type());
+ Text typeText;
+ if (documentMapper == null) {
+ typeText = new StringAndBytesText(fieldsVisitor.uid().type());
+ } else {
+ typeText = documentMapper.typeText();
+ }
+ InternalSearchHit searchHit = new InternalSearchHit(docId, fieldsVisitor.uid().id(), typeText, searchFields);
+
+ hits[index] = searchHit;
+
+ int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves());
+ AtomicReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex);
+ int subDoc = docId - subReaderContext.docBase;
+
+ // go over and extract fields that are not mapped / stored
+ context.lookup().setNextReader(subReaderContext);
+ context.lookup().setNextDocId(subDoc);
+ if (fieldsVisitor.source() != null) {
+ context.lookup().source().setNextSource(fieldsVisitor.source());
+ }
+ if (extractFieldNames != null) {
+ for (String extractFieldName : extractFieldNames) {
+ List<Object> values = context.lookup().source().extractRawValues(extractFieldName);
+ if (!values.isEmpty()) {
+ if (searchHit.fieldsOrNull() == null) {
+ searchHit.fields(new HashMap<String, SearchHitField>(2));
+ }
+
+ SearchHitField hitField = searchHit.fields().get(extractFieldName);
+ if (hitField == null) {
+ hitField = new InternalSearchHitField(extractFieldName, new ArrayList<Object>(2));
+ searchHit.fields().put(extractFieldName, hitField);
+ }
+ for (Object value : values) {
+ hitField.values().add(value);
+ }
+ }
+ }
+ }
+
+ hitContext.reset(searchHit, subReaderContext, subDoc, context.searcher().getIndexReader(), docId, fieldsVisitor);
+ for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
+ if (fetchSubPhase.hitExecutionNeeded(context)) {
+ fetchSubPhase.hitExecute(context, hitContext);
+ }
+ }
+ }
+
+ for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
+ if (fetchSubPhase.hitsExecutionNeeded(context)) {
+ fetchSubPhase.hitsExecute(context, hits);
+ }
+ }
+
+ context.fetchResult().hits(new InternalSearchHits(hits, context.queryResult().topDocs().totalHits, context.queryResult().topDocs().getMaxScore()));
+ }
+
+ private void loadStoredFields(SearchContext context, FieldsVisitor fieldVisitor, int docId) {
+ fieldVisitor.reset();
+ try {
+ context.searcher().doc(docId, fieldVisitor);
+ } catch (IOException e) {
+ throw new FetchPhaseExecutionException(context, "Failed to fetch doc id [" + docId + "]", e);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java b/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java
new file mode 100644
index 0000000..ae27555
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch;
+
+import org.elasticsearch.search.SearchContextException;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class FetchPhaseExecutionException extends SearchContextException {
+
+ public FetchPhaseExecutionException(SearchContext context, String msg) {
+ super(context, "Fetch Failed [" + msg + "]");
+ }
+
+ public FetchPhaseExecutionException(SearchContext context, String msg, Throwable t) {
+ super(context, "Fetch Failed [" + msg + "]", t);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchSearchRequest.java b/src/main/java/org/elasticsearch/search/fetch/FetchSearchRequest.java
new file mode 100644
index 0000000..fec5c45
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/FetchSearchRequest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch;
+
+import com.carrotsearch.hppc.IntArrayList;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class FetchSearchRequest extends TransportRequest {
+
+ private long id;
+
+ private int[] docIds;
+
+ private int size;
+
+ public FetchSearchRequest() {
+ }
+
+ public FetchSearchRequest(TransportRequest request, long id, IntArrayList list) {
+ super(request);
+ this.id = id;
+ this.docIds = list.buffer;
+ this.size = list.size();
+ }
+
+ public long id() {
+ return id;
+ }
+
+ public int[] docIds() {
+ return docIds;
+ }
+
+ public int docIdsSize() {
+ return size;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ size = in.readVInt();
+ docIds = new int[size];
+ for (int i = 0; i < size; i++) {
+ docIds[i] = in.readVInt();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ out.writeVInt(size);
+ for (int i = 0; i < size; i++) {
+ out.writeVInt(docIds[i]);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java
new file mode 100644
index 0000000..0db1096
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.internal.InternalSearchHits;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.internal.InternalSearchHits.StreamContext;
+
+/**
+ *
+ */
+public class FetchSearchResult extends TransportResponse implements FetchSearchResultProvider {
+
+ private long id;
+ private SearchShardTarget shardTarget;
+ private InternalSearchHits hits;
+ // client side counter
+ private transient int counter;
+
+ public FetchSearchResult() {
+
+ }
+
+ public FetchSearchResult(long id, SearchShardTarget shardTarget) {
+ this.id = id;
+ this.shardTarget = shardTarget;
+ }
+
+ @Override
+ public FetchSearchResult fetchResult() {
+ return this;
+ }
+
+ public long id() {
+ return this.id;
+ }
+
+ public SearchShardTarget shardTarget() {
+ return this.shardTarget;
+ }
+
+ @Override
+ public void shardTarget(SearchShardTarget shardTarget) {
+ this.shardTarget = shardTarget;
+ }
+
+ public void hits(InternalSearchHits hits) {
+ this.hits = hits;
+ }
+
+ public InternalSearchHits hits() {
+ return hits;
+ }
+
+ public FetchSearchResult initCounter() {
+ counter = 0;
+ return this;
+ }
+
+ public int counterGetAndIncrement() {
+ return counter++;
+ }
+
+ public static FetchSearchResult readFetchSearchResult(StreamInput in) throws IOException {
+ FetchSearchResult result = new FetchSearchResult();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ hits = InternalSearchHits.readSearchHits(in, InternalSearchHits.streamContext().streamShardTarget(StreamContext.ShardTargetType.NO_STREAM));
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ hits.writeTo(out, InternalSearchHits.streamContext().streamShardTarget(StreamContext.ShardTargetType.NO_STREAM));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java b/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java
new file mode 100644
index 0000000..5f4b810
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch;
+
+import org.elasticsearch.search.SearchPhaseResult;
+
+/**
+ *
+ */
+public interface FetchSearchResultProvider extends SearchPhaseResult {
+
+ FetchSearchResult fetchResult();
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java
new file mode 100644
index 0000000..b0e39bb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.fetch;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.IndexSearcher;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public interface FetchSubPhase {
+
+ public static class HitContext {
+ private InternalSearchHit hit;
+ private IndexReader topLevelReader;
+ private int topLevelDocId;
+ private AtomicReaderContext readerContext;
+ private int docId;
+ private FieldsVisitor fieldVisitor;
+ private Map<String, Object> cache;
+ private IndexSearcher atomicIndexSearcher;
+
+ public void reset(InternalSearchHit hit, AtomicReaderContext context, int docId, IndexReader topLevelReader, int topLevelDocId, FieldsVisitor fieldVisitor) {
+ this.hit = hit;
+ this.readerContext = context;
+ this.docId = docId;
+ this.topLevelReader = topLevelReader;
+ this.topLevelDocId = topLevelDocId;
+ this.fieldVisitor = fieldVisitor;
+ this.atomicIndexSearcher = null;
+ }
+
+ public InternalSearchHit hit() {
+ return hit;
+ }
+
+ public AtomicReader reader() {
+ return readerContext.reader();
+ }
+
+ public AtomicReaderContext readerContext() {
+ return readerContext;
+ }
+
+ public IndexSearcher searcher() {
+ if (atomicIndexSearcher == null) {
+ // Use the reader directly otherwise the IndexSearcher assertion will trip because it expects a top level
+ // reader context.
+ atomicIndexSearcher = new IndexSearcher(readerContext.reader());
+ }
+ return atomicIndexSearcher;
+ }
+
+ public int docId() {
+ return docId;
+ }
+
+ public IndexReader topLevelReader() {
+ return topLevelReader;
+ }
+
+ public int topLevelDocId() {
+ return topLevelDocId;
+ }
+
+ public FieldsVisitor fieldVisitor() {
+ return fieldVisitor;
+ }
+
+ public Map<String, Object> cache() {
+ if (cache == null) {
+ cache = Maps.newHashMap();
+ }
+ return cache;
+ }
+ }
+
+ Map<String, ? extends SearchParseElement> parseElements();
+
+ boolean hitExecutionNeeded(SearchContext context);
+
+ /**
+ * Executes the hit level phase, with a reader and doc id (note, its a low level reader, and the matching doc).
+ */
+ void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException;
+
+ boolean hitsExecutionNeeded(SearchContext context);
+
+ void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException;
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/FieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/FieldsParseElement.java
new file mode 100644
index 0000000..140d60d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/FieldsParseElement.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class FieldsParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.START_ARRAY) {
+ boolean added = false;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ String name = parser.text();
+ added = true;
+ context.fieldNames().add(name);
+ }
+ if (!added) {
+ context.emptyFieldNames();
+ }
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ String name = parser.text();
+ context.fieldNames().add(name);
+
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java b/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java
new file mode 100644
index 0000000..e43ec50
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.query.QuerySearchResultProvider;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.fetch.FetchSearchResult.readFetchSearchResult;
+import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult;
+
+/**
+ *
+ */
+public class QueryFetchSearchResult extends TransportResponse implements QuerySearchResultProvider, FetchSearchResultProvider {
+
+ private QuerySearchResult queryResult;
+ private FetchSearchResult fetchResult;
+
+ public QueryFetchSearchResult() {
+
+ }
+
+ public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) {
+ this.queryResult = queryResult;
+ this.fetchResult = fetchResult;
+ }
+
+ public long id() {
+ return queryResult.id();
+ }
+
+ public SearchShardTarget shardTarget() {
+ return queryResult.shardTarget();
+ }
+
+ @Override
+ public void shardTarget(SearchShardTarget shardTarget) {
+ queryResult.shardTarget(shardTarget);
+ fetchResult.shardTarget(shardTarget);
+ }
+
+ @Override
+ public boolean includeFetch() {
+ return true;
+ }
+
+ public QuerySearchResult queryResult() {
+ return queryResult;
+ }
+
+ public FetchSearchResult fetchResult() {
+ return fetchResult;
+ }
+
+ public static QueryFetchSearchResult readQueryFetchSearchResult(StreamInput in) throws IOException {
+ QueryFetchSearchResult result = new QueryFetchSearchResult();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ queryResult = readQuerySearchResult(in);
+ fetchResult = readFetchSearchResult(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ queryResult.writeTo(out);
+ fetchResult.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java b/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java
new file mode 100644
index 0000000..fb0fc75
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
+import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult;
+
+/**
+ *
+ */
+public class ScrollQueryFetchSearchResult extends TransportResponse {
+
+ private QueryFetchSearchResult result;
+ private SearchShardTarget shardTarget;
+
+ public ScrollQueryFetchSearchResult() {
+ }
+
+ public ScrollQueryFetchSearchResult(QueryFetchSearchResult result, SearchShardTarget shardTarget) {
+ this.result = result;
+ this.shardTarget = shardTarget;
+ }
+
+ public QueryFetchSearchResult result() {
+ return result;
+ }
+
+ public SearchShardTarget shardTarget() {
+ return shardTarget;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardTarget = readSearchShardTarget(in);
+ result = readQueryFetchSearchResult(in);
+ result.shardTarget(shardTarget);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardTarget.writeTo(out);
+ result.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java
new file mode 100644
index 0000000..2697f5d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.fetch.explain;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.rescore.Rescorer;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class ExplainFetchSubPhase implements FetchSubPhase {
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ return ImmutableMap.of("explain", new ExplainParseElement());
+ }
+
+ @Override
+ public boolean hitsExecutionNeeded(SearchContext context) {
+ return false;
+ }
+
+ @Override
+ public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
+ }
+
+ @Override
+ public boolean hitExecutionNeeded(SearchContext context) {
+ return context.explain();
+ }
+
+ @Override
+ public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
+ try {
+ final int topLevelDocId = hitContext.hit().docId();
+ Explanation explanation;
+
+ if (context.rescore() != null) {
+ RescoreSearchContext ctx = context.rescore();
+ Rescorer rescorer = ctx.rescorer();
+ explanation = rescorer.explain(topLevelDocId, context, ctx);
+ } else {
+ explanation = context.searcher().explain(context.query(), topLevelDocId);
+ }
+ // we use the top level doc id, since we work with the top level searcher
+ hitContext.hit().explanation(explanation);
+ } catch (IOException e) {
+ throw new FetchPhaseExecutionException(context, "Failed to explain doc [" + hitContext.hit().type() + "#" + hitContext.hit().id() + "]", e);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/explain/ExplainParseElement.java b/src/main/java/org/elasticsearch/search/fetch/explain/ExplainParseElement.java
new file mode 100644
index 0000000..7d44a9d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/explain/ExplainParseElement.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.explain;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class ExplainParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ if (token.isValue()) {
+ context.explain(parser.booleanValue());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsContext.java b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsContext.java
new file mode 100644
index 0000000..aebed39
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsContext.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.fetch.fielddata;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * All the required context to pull a field from the field data cache.
+ */
+public class FieldDataFieldsContext {
+
+ public static class FieldDataField {
+ private final String name;
+
+ public FieldDataField(String name) {
+ this.name = name;
+ }
+
+ public String name() {
+ return name;
+ }
+ }
+
+ private List<FieldDataField> fields = Lists.newArrayList();
+
+ public FieldDataFieldsContext() {
+ }
+
+ public void add(FieldDataField field) {
+ this.fields.add(field);
+ }
+
+ public List<FieldDataField> fields() {
+ return this.fields;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java
new file mode 100644
index 0000000..6dc1cb1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.fetch.fielddata;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.fielddata.AtomicFieldData;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.InternalSearchHitField;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Query sub phase which pulls data from field data (using the cache if
+ * available, building it if not).
+ *
+ * Specifying {@code "fielddata_fields": ["field1", "field2"]}
+ */
+public class FieldDataFieldsFetchSubPhase implements FetchSubPhase {
+
+ @Inject
+ public FieldDataFieldsFetchSubPhase() {
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder();
+ parseElements.put("fielddata_fields", new FieldDataFieldsParseElement())
+ .put("fielddataFields", new FieldDataFieldsParseElement());
+ return parseElements.build();
+ }
+
+ @Override
+ public boolean hitsExecutionNeeded(SearchContext context) {
+ return false;
+ }
+
+ @Override
+ public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
+ }
+
+ @Override
+ public boolean hitExecutionNeeded(SearchContext context) {
+ return context.hasFieldDataFields();
+ }
+
+ @Override
+ public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
+ for (FieldDataFieldsContext.FieldDataField field : context.fieldDataFields().fields()) {
+ if (hitContext.hit().fieldsOrNull() == null) {
+ hitContext.hit().fields(new HashMap<String, SearchHitField>(2));
+ }
+ SearchHitField hitField = hitContext.hit().fields().get(field.name());
+ if (hitField == null) {
+ hitField = new InternalSearchHitField(field.name(), new ArrayList<Object>(2));
+ hitContext.hit().fields().put(field.name(), hitField);
+ }
+ FieldMapper mapper = context.mapperService().smartNameFieldMapper(field.name());
+ if (mapper != null) {
+ AtomicFieldData data = context.fieldData().getForField(mapper).load(hitContext.readerContext());
+ ScriptDocValues values = data.getScriptValues();
+ values.setNextDocId(hitContext.docId());
+ hitField.values().addAll(values.getValues());
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java
new file mode 100644
index 0000000..f264864
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.fetch.fielddata;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ * Parses field name values from the {@code fielddata_fields} parameter in a
+ * search request.
+ *
+ * <pre>
+ * {
+ * "query": {...},
+ * "fielddata_fields" : ["field1", "field2"]
+ * }
+ * </pre>
+ */
+public class FieldDataFieldsParseElement implements SearchParseElement {
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ String fieldName = parser.text();
+ context.fieldDataFields().add(new FieldDataFieldsContext.FieldDataField(fieldName));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java
new file mode 100644
index 0000000..913d52e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.fetch.matchedqueries;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class MatchedQueriesFetchSubPhase implements FetchSubPhase {
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ return ImmutableMap.of();
+ }
+
+ @Override
+ public boolean hitsExecutionNeeded(SearchContext context) {
+ return false;
+ }
+
+ @Override
+ public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
+ }
+
+ @Override
+ public boolean hitExecutionNeeded(SearchContext context) {
+ return !context.parsedQuery().namedFilters().isEmpty()
+ || (context.parsedPostFilter() !=null && !context.parsedPostFilter().namedFilters().isEmpty());
+ }
+
+ @Override
+ public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
+ List<String> matchedQueries = Lists.newArrayListWithCapacity(2);
+
+ addMatchedQueries(hitContext, context.parsedQuery().namedFilters(), matchedQueries);
+
+ if (context.parsedPostFilter() != null) {
+ addMatchedQueries(hitContext, context.parsedPostFilter().namedFilters(), matchedQueries);
+ }
+
+ hitContext.hit().matchedQueries(matchedQueries.toArray(new String[matchedQueries.size()]));
+ }
+
+ private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Filter> namedFiltersAndQueries, List<String> matchedQueries) {
+ for (Map.Entry<String, Filter> entry : namedFiltersAndQueries.entrySet()) {
+ String name = entry.getKey();
+ Filter filter = entry.getValue();
+ try {
+ DocIdSet docIdSet = filter.getDocIdSet(hitContext.readerContext(), null); // null is fine, since we filter by hitContext.docId()
+ if (!DocIdSets.isEmpty(docIdSet)) {
+ Bits bits = docIdSet.bits();
+ if (bits != null) {
+ if (bits.get(hitContext.docId())) {
+ matchedQueries.add(name);
+ }
+ } else {
+ DocIdSetIterator iterator = docIdSet.iterator();
+ if (iterator != null) {
+ if (iterator.advance(hitContext.docId()) == hitContext.docId()) {
+ matchedQueries.add(name);
+ }
+ }
+ }
+ }
+ } catch (IOException e) {
+ // ignore
+ } finally {
+ SearchContext.current().clearReleasables();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsContext.java b/src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsContext.java
new file mode 100644
index 0000000..8f1d5b3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsContext.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.partial;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ */
+public class PartialFieldsContext {
+
+ public static class PartialField {
+ private final String name;
+ private final String[] includes;
+ private final String[] excludes;
+
+ public PartialField(String name, String[] includes, String[] excludes) {
+ this.name = name;
+ this.includes = includes;
+ this.excludes = excludes;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public String[] includes() {
+ return this.includes;
+ }
+
+ public String[] excludes() {
+ return this.excludes;
+ }
+ }
+
+ private final List<PartialField> fields = Lists.newArrayList();
+
+ public PartialFieldsContext() {
+
+ }
+
+ public void add(PartialField field) {
+ fields.add(field);
+ }
+
+ public List<PartialField> fields() {
+ return this.fields;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsFetchSubPhase.java
new file mode 100644
index 0000000..d5a780d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsFetchSubPhase.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.partial;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.InternalSearchHitField;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ */
+public class PartialFieldsFetchSubPhase implements FetchSubPhase {
+
+ @Inject
+ public PartialFieldsFetchSubPhase() {
+
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder();
+ parseElements.put("partial_fields", new PartialFieldsParseElement())
+ .put("partialFields", new PartialFieldsParseElement());
+ return parseElements.build();
+ }
+
+ @Override
+ public boolean hitsExecutionNeeded(SearchContext context) {
+ return false;
+ }
+
+ @Override
+ public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
+ }
+
+ @Override
+ public boolean hitExecutionNeeded(SearchContext context) {
+ return context.hasPartialFields();
+ }
+
+ @Override
+ public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
+ for (PartialFieldsContext.PartialField field : context.partialFields().fields()) {
+ Object value = context.lookup().source().filter(field.includes(), field.excludes());
+
+ if (hitContext.hit().fieldsOrNull() == null) {
+ hitContext.hit().fields(new HashMap<String, SearchHitField>(2));
+ }
+
+ SearchHitField hitField = hitContext.hit().fields().get(field.name());
+ if (hitField == null) {
+ hitField = new InternalSearchHitField(field.name(), new ArrayList<Object>(2));
+ hitContext.hit().fields().put(field.name(), hitField);
+ }
+ hitField.values().add(value);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsParseElement.java
new file mode 100644
index 0000000..60e2542
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/partial/PartialFieldsParseElement.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.partial;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * <pre>
+ * "partial_fields" : {
+ * "test1" : {
+ * "includes" : "doc['field_name'].value"
+ * },
+ * "test2" : {
+ * "excludes" : "..."
+ * }
+ * }
+ * </pre>
+ */
+public class PartialFieldsParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ String fieldName = currentFieldName;
+ List<String> includes = null;
+ List<String> excludes = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("includes".equals(currentFieldName) || "include".equals(currentFieldName)) {
+ if (includes == null) {
+ includes = new ArrayList<String>(2);
+ }
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ includes.add(parser.text());
+ }
+ } else if ("excludes".equals(currentFieldName) || "exclude".equals(currentFieldName)) {
+ if (excludes == null) {
+ excludes = new ArrayList<String>(2);
+ }
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ excludes.add(parser.text());
+ }
+ }
+ } else if (token.isValue()) {
+ if ("include".equals(currentFieldName)) {
+ if (includes == null) {
+ includes = new ArrayList<String>(2);
+ }
+ includes.add(parser.text());
+ } else if ("exclude".equals(currentFieldName)) {
+ if (excludes == null) {
+ excludes = new ArrayList<String>(2);
+ }
+ excludes.add(parser.text());
+ }
+ }
+ }
+ PartialFieldsContext.PartialField field = new PartialFieldsContext.PartialField(fieldName,
+ includes == null ? Strings.EMPTY_ARRAY : includes.toArray(new String[includes.size()]),
+ excludes == null ? Strings.EMPTY_ARRAY : excludes.toArray(new String[excludes.size()]));
+ context.partialFields().add(field);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsContext.java b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsContext.java
new file mode 100644
index 0000000..27a7b2a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsContext.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.script;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.script.SearchScript;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class ScriptFieldsContext {
+
+ public static class ScriptField {
+ private final String name;
+ private final SearchScript script;
+ private final boolean ignoreException;
+
+ public ScriptField(String name, SearchScript script, boolean ignoreException) {
+ this.name = name;
+ this.script = script;
+ this.ignoreException = ignoreException;
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public SearchScript script() {
+ return this.script;
+ }
+
+ public boolean ignoreException() {
+ return ignoreException;
+ }
+ }
+
+ private List<ScriptField> fields = Lists.newArrayList();
+
+ public ScriptFieldsContext() {
+ }
+
+ public void add(ScriptField field) {
+ this.fields.add(field);
+ }
+
+ public List<ScriptField> fields() {
+ return this.fields;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java
new file mode 100644
index 0000000..54321a7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.fetch.script;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.InternalSearchHitField;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ *
+ */
+public class ScriptFieldsFetchSubPhase implements FetchSubPhase {
+
+ @Inject
+ public ScriptFieldsFetchSubPhase() {
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder();
+ parseElements.put("script_fields", new ScriptFieldsParseElement())
+ .put("scriptFields", new ScriptFieldsParseElement());
+ return parseElements.build();
+ }
+
+ @Override
+ public boolean hitsExecutionNeeded(SearchContext context) {
+ return false;
+ }
+
+ @Override
+ public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
+ }
+
+ @Override
+ public boolean hitExecutionNeeded(SearchContext context) {
+ return context.hasScriptFields();
+ }
+
+ @Override
+ public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
+ for (ScriptFieldsContext.ScriptField scriptField : context.scriptFields().fields()) {
+ scriptField.script().setNextReader(hitContext.readerContext());
+ scriptField.script().setNextDocId(hitContext.docId());
+
+ Object value;
+ try {
+ value = scriptField.script().run();
+ value = scriptField.script().unwrap(value);
+ } catch (RuntimeException e) {
+ if (scriptField.ignoreException()) {
+ continue;
+ }
+ throw e;
+ }
+
+ if (hitContext.hit().fieldsOrNull() == null) {
+ hitContext.hit().fields(new HashMap<String, SearchHitField>(2));
+ }
+
+ SearchHitField hitField = hitContext.hit().fields().get(scriptField.name());
+ if (hitField == null) {
+ hitField = new InternalSearchHitField(scriptField.name(), new ArrayList<Object>(2));
+ hitContext.hit().fields().put(scriptField.name(), hitField);
+ }
+ hitField.values().add(value);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java
new file mode 100644
index 0000000..2bdafb1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.script;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.Map;
+
+/**
+ * <pre>
+ * "script_fields" : {
+ * "test1" : {
+ * "script" : "doc['field_name'].value"
+ * },
+ * "test2" : {
+ * "script" : "..."
+ * }
+ * }
+ * </pre>
+ *
+ *
+ */
+public class ScriptFieldsParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token;
+ String currentFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ String fieldName = currentFieldName;
+ String script = null;
+ String scriptLang = null;
+ Map<String, Object> params = null;
+ boolean ignoreException = false;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ params = parser.map();
+ } else if (token.isValue()) {
+ if ("script".equals(currentFieldName)) {
+ script = parser.text();
+ } else if ("lang".equals(currentFieldName)) {
+ scriptLang = parser.text();
+ } else if ("ignore_failure".equals(currentFieldName)) {
+ ignoreException = parser.booleanValue();
+ }
+ }
+ }
+ SearchScript searchScript = context.scriptService().search(context.lookup(), scriptLang, script, params);
+ context.scriptFields().add(new ScriptFieldsContext.ScriptField(fieldName, searchScript, ignoreException));
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceContext.java b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceContext.java
new file mode 100644
index 0000000..2fa7314
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceContext.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.source;
+
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.rest.RestRequest;
+
+import java.io.IOException;
+
+/**
+ */
+public class FetchSourceContext implements Streamable {
+
+ public static final FetchSourceContext FETCH_SOURCE = new FetchSourceContext(true);
+ public static final FetchSourceContext DO_NOT_FETCH_SOURCE = new FetchSourceContext(false);
+ private boolean fetchSource;
+ private String[] includes;
+ private String[] excludes;
+
+
+ FetchSourceContext() {
+
+ }
+
+ public FetchSourceContext(boolean fetchSource) {
+ this(fetchSource, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY);
+ }
+
+ public FetchSourceContext(String include) {
+ this(include, null);
+ }
+
+ public FetchSourceContext(String include, String exclude) {
+ this(true,
+ include == null ? Strings.EMPTY_ARRAY : new String[]{include},
+ exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude});
+ }
+
+ public FetchSourceContext(String[] includes) {
+ this(true, includes, Strings.EMPTY_ARRAY);
+ }
+
+ public FetchSourceContext(String[] includes, String[] excludes) {
+ this(true, includes, excludes);
+ }
+
+ public FetchSourceContext(boolean fetchSource, String[] includes, String[] excludes) {
+ this.fetchSource = fetchSource;
+ this.includes = includes == null ? Strings.EMPTY_ARRAY : includes;
+ this.excludes = excludes == null ? Strings.EMPTY_ARRAY : excludes;
+ }
+
+ public boolean fetchSource() {
+ return this.fetchSource;
+ }
+
+ public FetchSourceContext fetchSource(boolean fetchSource) {
+ this.fetchSource = fetchSource;
+ return this;
+ }
+
+ public String[] includes() {
+ return this.includes;
+ }
+
+ public FetchSourceContext includes(String[] includes) {
+ this.includes = includes;
+ return this;
+ }
+
+ public String[] excludes() {
+ return this.excludes;
+ }
+
+ public FetchSourceContext excludes(String[] excludes) {
+ this.excludes = excludes;
+ return this;
+ }
+
+ public static FetchSourceContext optionalReadFromStream(StreamInput in) throws IOException {
+ if (!in.readBoolean()) {
+ return null;
+ }
+ FetchSourceContext context = new FetchSourceContext();
+ context.readFrom(in);
+ return context;
+ }
+
+ public static void optionalWriteToStream(FetchSourceContext context, StreamOutput out) throws IOException {
+ if (context == null) {
+ out.writeBoolean(false);
+ return;
+ }
+ out.writeBoolean(true);
+ context.writeTo(out);
+ }
+
+ public static FetchSourceContext parseFromRestRequest(RestRequest request) {
+ Boolean fetchSource = null;
+ String[] source_excludes = null;
+ String[] source_includes = null;
+
+ String source = request.param("_source");
+ if (source != null) {
+ if (Booleans.isExplicitTrue(source)) {
+ fetchSource = true;
+ } else if (Booleans.isExplicitFalse(source)) {
+ fetchSource = false;
+ } else {
+ source_includes = Strings.splitStringByCommaToArray(source);
+ }
+ }
+ String sIncludes = request.param("_source_includes");
+ sIncludes = request.param("_source_include", sIncludes);
+ if (sIncludes != null) {
+ source_includes = Strings.splitStringByCommaToArray(sIncludes);
+ }
+
+ String sExcludes = request.param("_source_excludes");
+ sExcludes = request.param("_source_exclude", sExcludes);
+ if (sExcludes != null) {
+ source_excludes = Strings.splitStringByCommaToArray(sExcludes);
+ }
+
+ if (fetchSource != null || source_includes != null || source_excludes != null) {
+ return new FetchSourceContext(fetchSource == null ? true : fetchSource, source_includes, source_excludes);
+ }
+ return null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ fetchSource = in.readBoolean();
+ includes = in.readStringArray();
+ excludes = in.readStringArray();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeBoolean(fetchSource);
+ out.writeStringArray(includes);
+ out.writeStringArray(excludes);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceParseElement.java b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceParseElement.java
new file mode 100644
index 0000000..61c2ebd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceParseElement.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.source;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * <pre>
+ * "source" : true/false
+ * "source" : "field"
+ * "source" : [ "include", "include" ]
+ * "source" : {
+ * "include" : ["obj"]
+ * "exclude" : ["obj"]
+ * }
+ * </pre>
+ */
+public class FetchSourceParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token;
+
+ List<String> includes = null, excludes = null;
+ String currentFieldName = null;
+ token = parser.currentToken(); // we get it on the value
+ if (parser.isBooleanValue()) {
+ context.fetchSourceContext(new FetchSourceContext(parser.booleanValue()));
+ return;
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ context.fetchSourceContext(new FetchSourceContext(new String[]{parser.text()}));
+ return;
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ includes = new ArrayList<String>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ includes.add(parser.text());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+
+ List<String> currentList = null;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ if ("includes".equals(currentFieldName) || "include".equals(currentFieldName)) {
+ currentList = includes != null ? includes : (includes = new ArrayList<String>(2));
+ } else if ("excludes".equals(currentFieldName) || "exclude".equals(currentFieldName)) {
+ currentList = excludes != null ? excludes : (excludes = new ArrayList<String>(2));
+ } else {
+ throw new ElasticsearchParseException("Source definition may not contain " + parser.text());
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ currentList.add(parser.text());
+ }
+ } else if (token.isValue()) {
+ currentList.add(parser.text());
+ } else {
+ throw new ElasticsearchParseException("unexpected token while parsing source settings");
+ }
+ }
+ } else {
+ throw new ElasticsearchParseException("source element value can be of type " + token.name());
+ }
+
+
+ context.fetchSourceContext(new FetchSourceContext(
+ includes == null ? Strings.EMPTY_ARRAY : includes.toArray(new String[includes.size()]),
+ excludes == null ? Strings.EMPTY_ARRAY : excludes.toArray(new String[excludes.size()])));
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java
new file mode 100644
index 0000000..d4d208a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.source;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ */
+public class FetchSourceSubPhase implements FetchSubPhase {
+
+ @Inject
+ public FetchSourceSubPhase() {
+
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder();
+ parseElements.put("_source", new FetchSourceParseElement());
+ return parseElements.build();
+ }
+
+ @Override
+ public boolean hitsExecutionNeeded(SearchContext context) {
+ return false;
+ }
+
+ @Override
+ public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
+ }
+
+ @Override
+ public boolean hitExecutionNeeded(SearchContext context) {
+ return context.sourceRequested();
+ }
+
+ @Override
+ public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
+ FetchSourceContext fetchSourceContext = context.fetchSourceContext();
+ assert fetchSourceContext.fetchSource();
+ if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) {
+ hitContext.hit().sourceRef(context.lookup().source().internalSourceRef());
+ return;
+ }
+
+ Object value = context.lookup().source().filter(fetchSourceContext.includes(), fetchSourceContext.excludes());
+ try {
+ XContentBuilder builder = XContentFactory.contentBuilder(context.lookup().source().sourceContentType());
+ builder.value(value);
+ hitContext.hit().sourceRef(builder.bytes());
+ } catch (IOException e) {
+ throw new ElasticsearchException("Error filtering source", e);
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java
new file mode 100644
index 0000000..f0d8a44
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.fetch.version;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.Term;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class VersionFetchSubPhase implements FetchSubPhase {
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ return ImmutableMap.of("version", new VersionParseElement());
+ }
+
+ @Override
+ public boolean hitsExecutionNeeded(SearchContext context) {
+ return false;
+ }
+
+ @Override
+ public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
+ }
+
+ @Override
+ public boolean hitExecutionNeeded(SearchContext context) {
+ return context.version();
+ }
+
+ @Override
+ public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
+ // it might make sense to cache the TermDocs on a shared fetch context and just skip here)
+ // it is going to mean we work on the high level multi reader and not the lower level reader as is
+ // the case below...
+ long version;
+ try {
+ version = Versions.loadVersion(
+ hitContext.readerContext().reader(),
+ new Term(UidFieldMapper.NAME, hitContext.fieldVisitor().uid().toBytesRef())
+ );
+ } catch (IOException e) {
+ throw new ElasticsearchException("Could not query index for _version", e);
+ }
+
+ if (version < 0) {
+ version = -1;
+ }
+ hitContext.hit().version(version);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/fetch/version/VersionParseElement.java b/src/main/java/org/elasticsearch/search/fetch/version/VersionParseElement.java
new file mode 100644
index 0000000..bbcf967
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/fetch/version/VersionParseElement.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.fetch.version;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class VersionParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ if (token.isValue()) {
+ context.version(parser.booleanValue());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java b/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java
new file mode 100644
index 0000000..45f656d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.highlight.QueryScorer;
+import org.apache.lucene.search.highlight.WeightedSpanTerm;
+import org.apache.lucene.search.highlight.WeightedSpanTermExtractor;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
+import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
+
+public final class CustomQueryScorer extends QueryScorer {
+
+ public CustomQueryScorer(Query query, IndexReader reader, String field,
+ String defaultField) {
+ super(query, reader, field, defaultField);
+ }
+
+ public CustomQueryScorer(Query query, IndexReader reader, String field) {
+ super(query, reader, field);
+ }
+
+ public CustomQueryScorer(Query query, String field, String defaultField) {
+ super(query, field, defaultField);
+ }
+
+ public CustomQueryScorer(Query query, String field) {
+ super(query, field);
+ }
+
+ public CustomQueryScorer(Query query) {
+ super(query);
+ }
+
+ public CustomQueryScorer(WeightedSpanTerm[] weightedTerms) {
+ super(weightedTerms);
+ }
+
+ @Override
+ protected WeightedSpanTermExtractor newTermExtractor(String defaultField) {
+ return defaultField == null ? new CustomWeightedSpanTermExtractor()
+ : new CustomWeightedSpanTermExtractor(defaultField);
+ }
+
+ private static class CustomWeightedSpanTermExtractor extends WeightedSpanTermExtractor {
+
+ public CustomWeightedSpanTermExtractor() {
+ super();
+ }
+
+ public CustomWeightedSpanTermExtractor(String defaultField) {
+ super(defaultField);
+ }
+
+ @Override
+ protected void extractUnknownQuery(Query query,
+ Map<String, WeightedSpanTerm> terms) throws IOException {
+ if (query instanceof FunctionScoreQuery) {
+ query = ((FunctionScoreQuery) query).getSubQuery();
+ extract(query, terms);
+ } else if (query instanceof FiltersFunctionScoreQuery) {
+ query = ((FiltersFunctionScoreQuery) query).getSubQuery();
+ extract(query, terms);
+ } else if (query instanceof XFilteredQuery) {
+ query = ((XFilteredQuery) query).getQuery();
+ extract(query, terms);
+ }
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java
new file mode 100644
index 0000000..0852578
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.search.highlight.Encoder;
+import org.apache.lucene.search.vectorhighlight.*;
+import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.highlight.vectorhighlight.SimpleFragmentsBuilder;
+import org.elasticsearch.search.highlight.vectorhighlight.SourceScoreOrderFragmentsBuilder;
+import org.elasticsearch.search.highlight.vectorhighlight.SourceSimpleFragmentsBuilder;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ *
+ */
+public class FastVectorHighlighter implements Highlighter {
+
+ private static final SimpleBoundaryScanner DEFAULT_BOUNDARY_SCANNER = new SimpleBoundaryScanner();
+
+ private static final String CACHE_KEY = "highlight-fsv";
+ private final Boolean termVectorMultiValue;
+
+ @Inject
+ public FastVectorHighlighter(Settings settings) {
+ this.termVectorMultiValue = settings.getAsBoolean("search.highlight.term_vector_multi_value", true);
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{"fvh", "fast-vector-highlighter"};
+ }
+
+ @Override
+ public HighlightField highlight(HighlighterContext highlighterContext) {
+ SearchContextHighlight.Field field = highlighterContext.field;
+ SearchContext context = highlighterContext.context;
+ FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
+ FieldMapper<?> mapper = highlighterContext.mapper;
+
+ if (!(mapper.fieldType().storeTermVectors() && mapper.fieldType().storeTermVectorOffsets() && mapper.fieldType().storeTermVectorPositions())) {
+ throw new ElasticsearchIllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with term vector with position offsets to be used with fast vector highlighter");
+ }
+
+ Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
+
+ if (!hitContext.cache().containsKey(CACHE_KEY)) {
+ hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
+ }
+ HighlighterEntry cache = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
+
+ try {
+ FieldQuery fieldQuery;
+ if (field.fieldOptions().requireFieldMatch()) {
+ if (cache.fieldMatchFieldQuery == null) {
+ // we use top level reader to rewrite the query against all readers, with use caching it across hits (and across readers...)
+ cache.fieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query.originalQuery(), hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
+ }
+ fieldQuery = cache.fieldMatchFieldQuery;
+ } else {
+ if (cache.noFieldMatchFieldQuery == null) {
+ // we use top level reader to rewrite the query against all readers, with use caching it across hits (and across readers...)
+ cache.noFieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query.originalQuery(), hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
+ }
+ fieldQuery = cache.noFieldMatchFieldQuery;
+ }
+
+ MapperHighlightEntry entry = cache.mappers.get(mapper);
+ if (entry == null) {
+ FragListBuilder fragListBuilder;
+ BaseFragmentsBuilder fragmentsBuilder;
+
+ BoundaryScanner boundaryScanner = DEFAULT_BOUNDARY_SCANNER;
+ if (field.fieldOptions().boundaryMaxScan() != SimpleBoundaryScanner.DEFAULT_MAX_SCAN || field.fieldOptions().boundaryChars() != SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS) {
+ boundaryScanner = new SimpleBoundaryScanner(field.fieldOptions().boundaryMaxScan(), field.fieldOptions().boundaryChars());
+ }
+ boolean forceSource = context.highlight().forceSource(field);
+ if (field.fieldOptions().numberOfFragments() == 0) {
+ fragListBuilder = new SingleFragListBuilder();
+
+ if (!forceSource && mapper.fieldType().stored()) {
+ fragmentsBuilder = new SimpleFragmentsBuilder(mapper, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
+ } else {
+ fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
+ }
+ } else {
+ fragListBuilder = field.fieldOptions().fragmentOffset() == -1 ? new SimpleFragListBuilder() : new SimpleFragListBuilder(field.fieldOptions().fragmentOffset());
+ if (field.fieldOptions().scoreOrdered()) {
+ if (!forceSource && mapper.fieldType().stored()) {
+ fragmentsBuilder = new ScoreOrderFragmentsBuilder(field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
+ } else {
+ fragmentsBuilder = new SourceScoreOrderFragmentsBuilder(mapper, context, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
+ }
+ } else {
+ if (!forceSource && mapper.fieldType().stored()) {
+ fragmentsBuilder = new SimpleFragmentsBuilder(mapper, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
+ } else {
+ fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
+ }
+ }
+ }
+ fragmentsBuilder.setDiscreteMultiValueHighlighting(termVectorMultiValue);
+ entry = new MapperHighlightEntry();
+ entry.fragListBuilder = fragListBuilder;
+ entry.fragmentsBuilder = fragmentsBuilder;
+ if (cache.fvh == null) {
+ // parameters to FVH are not requires since:
+ // first two booleans are not relevant since they are set on the CustomFieldQuery (phrase and fieldMatch)
+ // fragment builders are used explicitly
+ cache.fvh = new org.apache.lucene.search.vectorhighlight.FastVectorHighlighter();
+ }
+ CustomFieldQuery.highlightFilters.set(field.fieldOptions().highlightFilter());
+ cache.mappers.put(mapper, entry);
+ }
+ cache.fvh.setPhraseLimit(field.fieldOptions().phraseLimit());
+
+ String[] fragments;
+
+ // a HACK to make highlighter do highlighting, even though its using the single frag list builder
+ int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? Integer.MAX_VALUE : field.fieldOptions().numberOfFragments();
+ int fragmentCharSize = field.fieldOptions().numberOfFragments() == 0 ? Integer.MAX_VALUE : field.fieldOptions().fragmentCharSize();
+ // we highlight against the low level reader and docId, because if we load source, we want to reuse it if possible
+ // Only send matched fields if they were requested to save time.
+ if (field.fieldOptions().matchedFields() != null && !field.fieldOptions().matchedFields().isEmpty()) {
+ fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), mapper.names().indexName(), field.fieldOptions().matchedFields(), fragmentCharSize,
+ numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
+ } else {
+ fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), mapper.names().indexName(), fragmentCharSize,
+ numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
+ }
+
+ if (fragments != null && fragments.length > 0) {
+ return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ }
+
+ int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
+ if (noMatchSize > 0) {
+ // Essentially we just request that a fragment is built from 0 to noMatchSize using the normal fragmentsBuilder
+ FieldFragList fieldFragList = new SimpleFieldFragList(-1 /*ignored*/);
+ fieldFragList.add(0, noMatchSize, Collections.<WeightedPhraseInfo>emptyList());
+ fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.names().indexName(),
+ fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
+ if (fragments != null && fragments.length > 0) {
+ return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ }
+ }
+
+ return null;
+
+ } catch (Exception e) {
+ throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
+ }
+ }
+
+ private class MapperHighlightEntry {
+ public FragListBuilder fragListBuilder;
+ public FragmentsBuilder fragmentsBuilder;
+
+ public org.apache.lucene.search.highlight.Highlighter highlighter;
+ }
+
+ private class HighlighterEntry {
+ public org.apache.lucene.search.vectorhighlight.FastVectorHighlighter fvh;
+ public FieldQuery noFieldMatchFieldQuery;
+ public FieldQuery fieldMatchFieldQuery;
+ public Map<FieldMapper, MapperHighlightEntry> mappers = Maps.newHashMap();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java b/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java
new file mode 100644
index 0000000..4382a60
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java
@@ -0,0 +1,539 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * A builder for search highlighting.
+ *
+ * @see org.elasticsearch.search.builder.SearchSourceBuilder#highlight()
+ */
+public class HighlightBuilder implements ToXContent {
+
+ private List<Field> fields;
+
+ private String tagsSchema;
+
+ private String[] preTags;
+
+ private String[] postTags;
+
+ private String order;
+
+ private String encoder;
+
+ private Boolean requireFieldMatch;
+
+ private String highlighterType;
+
+ private String fragmenter;
+
+ private QueryBuilder highlightQuery;
+
+ private Integer noMatchSize;
+
+ private Integer phraseLimit;
+
+ private Map<String, Object> options;
+
+ private Boolean forceSource;
+
+ /**
+ * Adds a field to be highlighted with default fragment size of 100 characters, and
+ * default number of fragments of 5 using the default encoder
+ *
+ * @param name The field to highlight
+ */
+ public HighlightBuilder field(String name) {
+ if (fields == null) {
+ fields = newArrayList();
+ }
+ fields.add(new Field(name));
+ return this;
+ }
+
+
+ /**
+ * Adds a field to be highlighted with a provided fragment size (in characters), and
+ * default number of fragments of 5.
+ *
+ * @param name The field to highlight
+ * @param fragmentSize The size of a fragment in characters
+ */
+ public HighlightBuilder field(String name, int fragmentSize) {
+ if (fields == null) {
+ fields = newArrayList();
+ }
+ fields.add(new Field(name).fragmentSize(fragmentSize));
+ return this;
+ }
+
+
+ /**
+ * Adds a field to be highlighted with a provided fragment size (in characters), and
+ * a provided (maximum) number of fragments.
+ *
+ * @param name The field to highlight
+ * @param fragmentSize The size of a fragment in characters
+ * @param numberOfFragments The (maximum) number of fragments
+ */
+ public HighlightBuilder field(String name, int fragmentSize, int numberOfFragments) {
+ if (fields == null) {
+ fields = newArrayList();
+ }
+ fields.add(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments));
+ return this;
+ }
+
+
+ /**
+ * Adds a field to be highlighted with a provided fragment size (in characters), and
+ * a provided (maximum) number of fragments.
+ *
+ * @param name The field to highlight
+ * @param fragmentSize The size of a fragment in characters
+ * @param numberOfFragments The (maximum) number of fragments
+ * @param fragmentOffset The offset from the start of the fragment to the start of the highlight
+ */
+ public HighlightBuilder field(String name, int fragmentSize, int numberOfFragments, int fragmentOffset) {
+ if (fields == null) {
+ fields = newArrayList();
+ }
+ fields.add(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments)
+ .fragmentOffset(fragmentOffset));
+ return this;
+ }
+
+ public HighlightBuilder field(Field field) {
+ if (fields == null) {
+ fields = newArrayList();
+ }
+ fields.add(field);
+ return this;
+ }
+
+ /**
+ * Set a tag scheme that encapsulates a built in pre and post tags. The allows schemes
+ * are <tt>styled</tt> and <tt>default</tt>.
+ *
+ * @param schemaName The tag scheme name
+ */
+ public HighlightBuilder tagsSchema(String schemaName) {
+ this.tagsSchema = schemaName;
+ return this;
+ }
+
+
+ /**
+ * Set encoder for the highlighting
+ * are <tt>styled</tt> and <tt>default</tt>.
+ *
+ * @param encoder name
+ */
+ public HighlightBuilder encoder(String encoder) {
+ this.encoder = encoder;
+ return this;
+ }
+
+ /**
+ * Explicitly set the pre tags that will be used for highlighting.
+ */
+ public HighlightBuilder preTags(String... preTags) {
+ this.preTags = preTags;
+ return this;
+ }
+
+ /**
+ * Explicitly set the post tags that will be used for highlighting.
+ */
+ public HighlightBuilder postTags(String... postTags) {
+ this.postTags = postTags;
+ return this;
+ }
+
+ /**
+ * The order of fragments per field. By default, ordered by the order in the
+ * highlighted text. Can be <tt>score</tt>, which then it will be ordered
+ * by score of the fragments.
+ */
+ public HighlightBuilder order(String order) {
+ this.order = order;
+ return this;
+ }
+
+ public HighlightBuilder requireFieldMatch(boolean requireFieldMatch) {
+ this.requireFieldMatch = requireFieldMatch;
+ return this;
+ }
+
+ /**
+ * Set type of highlighter to use. Supported types
+ * are <tt>highlighter</tt>, <tt>fast-vector-highlighter</tt> and <tt>postings-highlighter</tt>.
+ */
+ public HighlightBuilder highlighterType(String highlighterType) {
+ this.highlighterType = highlighterType;
+ return this;
+ }
+
+ /**
+ * Sets what fragmenter to use to break up text that is eligible for highlighting.
+ * This option is only applicable when using plain / normal highlighter.
+ */
+ public HighlightBuilder fragmenter(String fragmenter) {
+ this.fragmenter = fragmenter;
+ return this;
+ }
+
+ /**
+ * Sets a query to be used for highlighting all fields instead of the search query.
+ */
+ public HighlightBuilder highlightQuery(QueryBuilder highlightQuery) {
+ this.highlightQuery = highlightQuery;
+ return this;
+ }
+
+ /**
+ * Sets the size of the fragment to return from the beginning of the field if there are no matches to
+ * highlight and the field doesn't also define noMatchSize.
+ * @param noMatchSize integer to set or null to leave out of request. default is null.
+ * @return this for chaining
+ */
+ public HighlightBuilder noMatchSize(Integer noMatchSize) {
+ this.noMatchSize = noMatchSize;
+ return this;
+ }
+
+ /**
+ * Sets the maximum number of phrases the fvh will consider if the field doesn't also define phraseLimit.
+ * @param phraseLimit maximum number of phrases the fvh will consider
+ * @return this for chaining
+ */
+ public HighlightBuilder phraseLimit(Integer phraseLimit) {
+ this.phraseLimit = phraseLimit;
+ return this;
+ }
+
+ /**
+ * Allows to set custom options for custom highlighters.
+ */
+ public HighlightBuilder options(Map<String, Object> options) {
+ this.options = options;
+ return this;
+ }
+
+ /**
+ * Forces the highlighting to highlight fields based on the source even if fields are stored separately.
+ */
+ public HighlightBuilder forceSource(boolean forceSource) {
+ this.forceSource = forceSource;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("highlight");
+ if (tagsSchema != null) {
+ builder.field("tags_schema", tagsSchema);
+ }
+ if (preTags != null) {
+ builder.array("pre_tags", preTags);
+ }
+ if (postTags != null) {
+ builder.array("post_tags", postTags);
+ }
+ if (order != null) {
+ builder.field("order", order);
+ }
+ if (encoder != null) {
+ builder.field("encoder", encoder);
+ }
+ if (requireFieldMatch != null) {
+ builder.field("require_field_match", requireFieldMatch);
+ }
+ if (highlighterType != null) {
+ builder.field("type", highlighterType);
+ }
+ if (fragmenter != null) {
+ builder.field("fragmenter", fragmenter);
+ }
+ if (highlightQuery != null) {
+ builder.field("highlight_query", highlightQuery);
+ }
+ if (noMatchSize != null) {
+ builder.field("no_match_size", noMatchSize);
+ }
+ if (phraseLimit != null) {
+ builder.field("phrase_limit", phraseLimit);
+ }
+ if (options != null && options.size() > 0) {
+ builder.field("options", options);
+ }
+ if (forceSource != null) {
+ builder.field("force_source", forceSource);
+ }
+ if (fields != null) {
+ builder.startObject("fields");
+ for (Field field : fields) {
+ builder.startObject(field.name());
+ if (field.preTags != null) {
+ builder.field("pre_tags", field.preTags);
+ }
+ if (field.postTags != null) {
+ builder.field("post_tags", field.postTags);
+ }
+ if (field.fragmentSize != -1) {
+ builder.field("fragment_size", field.fragmentSize);
+ }
+ if (field.numOfFragments != -1) {
+ builder.field("number_of_fragments", field.numOfFragments);
+ }
+ if (field.fragmentOffset != -1) {
+ builder.field("fragment_offset", field.fragmentOffset);
+ }
+ if (field.highlightFilter != null) {
+ builder.field("highlight_filter", field.highlightFilter);
+ }
+ if (field.order != null) {
+ builder.field("order", field.order);
+ }
+ if (field.requireFieldMatch != null) {
+ builder.field("require_field_match", field.requireFieldMatch);
+ }
+ if (field.boundaryMaxScan != -1) {
+ builder.field("boundary_max_scan", field.boundaryMaxScan);
+ }
+ if (field.boundaryChars != null) {
+ builder.field("boundary_chars", field.boundaryChars);
+ }
+ if (field.highlighterType != null) {
+ builder.field("type", field.highlighterType);
+ }
+ if (field.fragmenter != null) {
+ builder.field("fragmenter", field.fragmenter);
+ }
+ if (field.highlightQuery != null) {
+ builder.field("highlight_query", field.highlightQuery);
+ }
+ if (field.noMatchSize != null) {
+ builder.field("no_match_size", field.noMatchSize);
+ }
+ if (field.matchedFields != null) {
+ builder.field("matched_fields", field.matchedFields);
+ }
+ if (field.phraseLimit != null) {
+ builder.field("phrase_limit", field.phraseLimit);
+ }
+ if (field.options != null && field.options.size() > 0) {
+ builder.field("options", field.options);
+ }
+ if (field.forceSource != null) {
+ builder.field("force_source", field.forceSource);
+ }
+
+ builder.endObject();
+ }
+ builder.endObject();
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
+ public static class Field {
+ final String name;
+ String[] preTags;
+ String[] postTags;
+ int fragmentSize = -1;
+ int fragmentOffset = -1;
+ int numOfFragments = -1;
+ Boolean highlightFilter;
+ String order;
+ Boolean requireFieldMatch;
+ int boundaryMaxScan = -1;
+ char[] boundaryChars;
+ String highlighterType;
+ String fragmenter;
+ QueryBuilder highlightQuery;
+ Integer noMatchSize;
+ String[] matchedFields;
+ Integer phraseLimit;
+ Map<String, Object> options;
+ Boolean forceSource;
+
+ public Field(String name) {
+ this.name = name;
+ }
+
+ public String name() {
+ return name;
+ }
+
+ /**
+ * Explicitly set the pre tags for this field that will be used for highlighting.
+ * This overrides global settings set by {@link HighlightBuilder#preTags(String...)}.
+ */
+ public Field preTags(String... preTags) {
+ this.preTags = preTags;
+ return this;
+ }
+
+ /**
+ * Explicitly set the post tags for this field that will be used for highlighting.
+ * This overrides global settings set by {@link HighlightBuilder#postTags(String...)}.
+ */
+ public Field postTags(String... postTags) {
+ this.postTags = postTags;
+ return this;
+ }
+
+ public Field fragmentSize(int fragmentSize) {
+ this.fragmentSize = fragmentSize;
+ return this;
+ }
+
+ public Field fragmentOffset(int fragmentOffset) {
+ this.fragmentOffset = fragmentOffset;
+ return this;
+ }
+
+ public Field numOfFragments(int numOfFragments) {
+ this.numOfFragments = numOfFragments;
+ return this;
+ }
+
+ public Field highlightFilter(boolean highlightFilter) {
+ this.highlightFilter = highlightFilter;
+ return this;
+ }
+
+ /**
+ * The order of fragments per field. By default, ordered by the order in the
+ * highlighted text. Can be <tt>score</tt>, which then it will be ordered
+ * by score of the fragments.
+ * This overrides global settings set by {@link HighlightBuilder#order(String)}.
+ */
+ public Field order(String order) {
+ this.order = order;
+ return this;
+ }
+
+ public Field requireFieldMatch(boolean requireFieldMatch) {
+ this.requireFieldMatch = requireFieldMatch;
+ return this;
+ }
+
+ public Field boundaryMaxScan(int boundaryMaxScan) {
+ this.boundaryMaxScan = boundaryMaxScan;
+ return this;
+ }
+
+ public Field boundaryChars(char[] boundaryChars) {
+ this.boundaryChars = boundaryChars;
+ return this;
+ }
+
+ /**
+ * Set type of highlighter to use. Supported types
+ * are <tt>highlighter</tt>, <tt>fast-vector-highlighter</tt> nad <tt>postings-highlighter</tt>.
+ * This overrides global settings set by {@link HighlightBuilder#highlighterType(String)}.
+ */
+ public Field highlighterType(String highlighterType) {
+ this.highlighterType = highlighterType;
+ return this;
+ }
+
+ /**
+ * Sets what fragmenter to use to break up text that is eligible for highlighting.
+ * This option is only applicable when using plain / normal highlighter.
+ * This overrides global settings set by {@link HighlightBuilder#fragmenter(String)}.
+ */
+ public Field fragmenter(String fragmenter) {
+ this.fragmenter = fragmenter;
+ return this;
+ }
+
+ /**
+ * Sets a query to use for highlighting this field instead of the search query.
+ */
+ public Field highlightQuery(QueryBuilder highlightQuery) {
+ this.highlightQuery = highlightQuery;
+ return this;
+ }
+
+ /**
+ * Sets the size of the fragment to return from the beginning of the field if there are no matches to
+ * highlight.
+ * @param noMatchSize integer to set or null to leave out of request. default is null.
+ * @return this for chaining
+ */
+ public Field noMatchSize(Integer noMatchSize) {
+ this.noMatchSize = noMatchSize;
+ return this;
+ }
+
+ /**
+ * Allows to set custom options for custom highlighters.
+ * This overrides global settings set by {@link HighlightBuilder#options(Map<String, Object>)}.
+ */
+ public Field options(Map<String, Object> options) {
+ this.options = options;
+ return this;
+ }
+
+ /**
+ * Set the matched fields to highlight against this field data. Default to null, meaning just
+ * the named field. If you provide a list of fields here then don't forget to include name as
+ * it is not automatically included.
+ */
+ public Field matchedFields(String... matchedFields) {
+ this.matchedFields = matchedFields;
+ return this;
+ }
+
+ /**
+ * Sets the maximum number of phrases the fvh will consider.
+ * @param phraseLimit maximum number of phrases the fvh will consider
+ * @return this for chaining
+ */
+ public Field phraseLimit(Integer phraseLimit) {
+ this.phraseLimit = phraseLimit;
+ return this;
+ }
+
+
+ /**
+ * Forces the highlighting to highlight this field based on the source even if this field is stored separately.
+ */
+ public Field forceSource(boolean forceSource) {
+ this.forceSource = forceSource;
+ return this;
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlightField.java b/src/main/java/org/elasticsearch/search/highlight/HighlightField.java
new file mode 100644
index 0000000..30b2ae1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/HighlightField.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ * A field highlighted with its highlighted fragments.
+ */
+public class HighlightField implements Streamable {
+
+ private String name;
+
+ private Text[] fragments;
+
+ HighlightField() {
+ }
+
+ public HighlightField(String name, Text[] fragments) {
+ this.name = name;
+ this.fragments = fragments;
+ }
+
+ /**
+ * The name of the field highlighted.
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * The name of the field highlighted.
+ */
+ public String getName() {
+ return name();
+ }
+
+ /**
+ * The highlighted fragments. <tt>null</tt> if failed to highlight (for example, the field is not stored).
+ */
+ public Text[] fragments() {
+ return fragments;
+ }
+
+ /**
+ * The highlighted fragments. <tt>null</tt> if failed to highlight (for example, the field is not stored).
+ */
+ public Text[] getFragments() {
+ return fragments();
+ }
+
+ @Override
+ public String toString() {
+ return "[" + name + "], fragments[" + Arrays.toString(fragments) + "]";
+ }
+
+ public static HighlightField readHighlightField(StreamInput in) throws IOException {
+ HighlightField field = new HighlightField();
+ field.readFrom(in);
+ return field;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readSharedString();
+ if (in.readBoolean()) {
+ int size = in.readVInt();
+ if (size == 0) {
+ fragments = StringText.EMPTY_ARRAY;
+ } else {
+ fragments = new Text[size];
+ for (int i = 0; i < size; i++) {
+ fragments[i] = in.readText();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeSharedString(name);
+ if (fragments == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(fragments.length);
+ for (Text fragment : fragments) {
+ out.writeText(fragment);
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlightModule.java b/src/main/java/org/elasticsearch/search/highlight/HighlightModule.java
new file mode 100644
index 0000000..7772fe1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/HighlightModule.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class HighlightModule extends AbstractModule {
+
+ private List<Class<? extends Highlighter>> highlighters = Lists.newArrayList();
+
+ public HighlightModule() {
+ registerHighlighter(FastVectorHighlighter.class);
+ registerHighlighter(PlainHighlighter.class);
+ registerHighlighter(PostingsHighlighter.class);
+ }
+
+ public void registerHighlighter(Class<? extends Highlighter> clazz) {
+ highlighters.add(clazz);
+ }
+
+ @Override
+ protected void configure() {
+ Multibinder<Highlighter> multibinder = Multibinder.newSetBinder(binder(), Highlighter.class);
+ for (Class<? extends Highlighter> highlighter : highlighters) {
+ multibinder.addBinding().to(highlighter);
+ }
+ bind(Highlighters.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java b/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java
new file mode 100644
index 0000000..4cc9033
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.index.FieldInfo;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.InternalSearchHit;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ *
+ */
+public class HighlightPhase extends AbstractComponent implements FetchSubPhase {
+
+ private final Highlighters highlighters;
+
+ @Inject
+ public HighlightPhase(Settings settings, Highlighters highlighters) {
+ super(settings);
+ this.highlighters = highlighters;
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ return ImmutableMap.of("highlight", new HighlighterParseElement());
+ }
+
+ @Override
+ public boolean hitsExecutionNeeded(SearchContext context) {
+ return false;
+ }
+
+ @Override
+ public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
+ }
+
+ @Override
+ public boolean hitExecutionNeeded(SearchContext context) {
+ return context.highlight() != null;
+ }
+
+ @Override
+ public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
+ Map<String, HighlightField> highlightFields = newHashMap();
+ for (SearchContextHighlight.Field field : context.highlight().fields()) {
+ Set<String> fieldNamesToHighlight;
+ if (Regex.isSimpleMatchPattern(field.field())) {
+ DocumentMapper documentMapper = context.mapperService().documentMapper(hitContext.hit().type());
+ fieldNamesToHighlight = documentMapper.mappers().simpleMatchToFullName(field.field());
+ } else {
+ fieldNamesToHighlight = ImmutableSet.of(field.field());
+ }
+
+ if (context.highlight().forceSource(field)) {
+ SourceFieldMapper sourceFieldMapper = context.mapperService().documentMapper(hitContext.hit().type()).sourceMapper();
+ if (!sourceFieldMapper.enabled()) {
+ throw new ElasticsearchIllegalArgumentException("source is forced for fields " + fieldNamesToHighlight + " but type [" + hitContext.hit().type() + "] has disabled _source");
+ }
+ }
+
+ for (String fieldName : fieldNamesToHighlight) {
+ FieldMapper<?> fieldMapper = getMapperForField(fieldName, context, hitContext);
+ if (fieldMapper == null) {
+ continue;
+ }
+
+ String highlighterType = field.fieldOptions().highlighterType();
+ if (highlighterType == null) {
+ boolean useFastVectorHighlighter = fieldMapper.fieldType().storeTermVectors() && fieldMapper.fieldType().storeTermVectorOffsets() && fieldMapper.fieldType().storeTermVectorPositions();
+ if (useFastVectorHighlighter) {
+ highlighterType = "fvh";
+ } else if (fieldMapper.fieldType().indexOptions() == FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
+ highlighterType = "postings";
+ } else {
+ highlighterType = "plain";
+ }
+ }
+
+ Highlighter highlighter = highlighters.get(highlighterType);
+ if (highlighter == null) {
+ throw new ElasticsearchIllegalArgumentException("unknown highlighter type [" + highlighterType + "] for the field [" + fieldName + "]");
+ }
+
+ HighlighterContext.HighlightQuery highlightQuery;
+ if (field.fieldOptions().highlightQuery() == null) {
+ highlightQuery = new HighlighterContext.HighlightQuery(context.parsedQuery().query(), context.query(), context.queryRewritten());
+ } else {
+ highlightQuery = new HighlighterContext.HighlightQuery(field.fieldOptions().highlightQuery(), field.fieldOptions().highlightQuery(), false);
+ }
+ HighlighterContext highlighterContext = new HighlighterContext(fieldName, field, fieldMapper, context, hitContext, highlightQuery);
+ HighlightField highlightField = highlighter.highlight(highlighterContext);
+ if (highlightField != null) {
+ highlightFields.put(highlightField.name(), highlightField);
+ }
+ }
+ }
+
+ hitContext.hit().highlightFields(highlightFields);
+ }
+
+ private FieldMapper<?> getMapperForField(String fieldName, SearchContext searchContext, HitContext hitContext) {
+ DocumentMapper documentMapper = searchContext.mapperService().documentMapper(hitContext.hit().type());
+ FieldMapper<?> mapper = documentMapper.mappers().smartNameFieldMapper(fieldName);
+ if (mapper == null) {
+ MapperService.SmartNameFieldMappers fullMapper = searchContext.mapperService().smartName(fieldName);
+ if (fullMapper == null || !fullMapper.hasDocMapper() || fullMapper.docMapper().type().equals(hitContext.hit().type())) {
+ return null;
+ }
+ mapper = fullMapper.mapper();
+ }
+
+ return mapper;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java b/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java
new file mode 100644
index 0000000..042c243
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.search.highlight.DefaultEncoder;
+import org.apache.lucene.search.highlight.Encoder;
+import org.apache.lucene.search.highlight.SimpleHTMLEncoder;
+import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.lookup.SearchLookup;
+
+import java.io.IOException;
+import java.util.List;
+
+public final class HighlightUtils {
+
+ //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting (postings highlighter)
+ public static final char PARAGRAPH_SEPARATOR = 8233;
+
+ private HighlightUtils() {
+
+ }
+
+ static List<Object> loadFieldValues(SearchContextHighlight.Field field, FieldMapper<?> mapper, SearchContext searchContext, FetchSubPhase.HitContext hitContext) throws IOException {
+ //percolator needs to always load from source, thus it sets the global force source to true
+ boolean forceSource = searchContext.highlight().forceSource(field);
+ List<Object> textsToHighlight;
+ if (!forceSource && mapper.fieldType().stored()) {
+ CustomFieldsVisitor fieldVisitor = new CustomFieldsVisitor(ImmutableSet.of(mapper.names().indexName()), false);
+ hitContext.reader().document(hitContext.docId(), fieldVisitor);
+ textsToHighlight = fieldVisitor.fields().get(mapper.names().indexName());
+ if (textsToHighlight == null) {
+ // Can happen if the document doesn't have the field to highlight
+ textsToHighlight = ImmutableList.of();
+ }
+ } else {
+ SearchLookup lookup = searchContext.lookup();
+ lookup.setNextReader(hitContext.readerContext());
+ lookup.setNextDocId(hitContext.docId());
+ textsToHighlight = lookup.source().extractRawValues(mapper.names().sourcePath());
+ }
+ assert textsToHighlight != null;
+ return textsToHighlight;
+ }
+
+ static class Encoders {
+ static Encoder DEFAULT = new DefaultEncoder();
+ static Encoder HTML = new SimpleHTMLEncoder();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/Highlighter.java b/src/main/java/org/elasticsearch/search/highlight/Highlighter.java
new file mode 100644
index 0000000..407cdc7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/Highlighter.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+/**
+ *
+ */
+public interface Highlighter {
+
+ String[] names();
+
+ HighlightField highlight(HighlighterContext highlighterContext);
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlighterContext.java b/src/main/java/org/elasticsearch/search/highlight/HighlighterContext.java
new file mode 100644
index 0000000..b0cf5bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/HighlighterContext.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class HighlighterContext {
+
+ public final String fieldName;
+ public final SearchContextHighlight.Field field;
+ public final FieldMapper<?> mapper;
+ public final SearchContext context;
+ public final FetchSubPhase.HitContext hitContext;
+ public final HighlightQuery query;
+
+ public HighlighterContext(String fieldName, SearchContextHighlight.Field field, FieldMapper<?> mapper, SearchContext context,
+ FetchSubPhase.HitContext hitContext, HighlightQuery query) {
+ this.fieldName = fieldName;
+ this.field = field;
+ this.mapper = mapper;
+ this.context = context;
+ this.hitContext = hitContext;
+ this.query = query;
+ }
+
+ static class HighlightQuery {
+ private final Query originalQuery;
+ private final Query query;
+ private final boolean queryRewritten;
+
+ HighlightQuery(Query originalQuery, Query query, boolean queryRewritten) {
+ this.originalQuery = originalQuery;
+ this.query = query;
+ this.queryRewritten = queryRewritten;
+ }
+
+ public boolean queryRewritten() {
+ return queryRewritten;
+ }
+
+ public Query originalQuery() {
+ return originalQuery;
+ }
+
+ public Query query() {
+ return query;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java
new file mode 100644
index 0000000..4188b30
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.List;
+import java.util.Set;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * <pre>
+ * highlight : {
+ * tags_schema : "styled",
+ * pre_tags : ["tag1", "tag2"],
+ * post_tags : ["tag1", "tag2"],
+ * order : "score",
+ * highlight_filter : true,
+ * fields : {
+ * field1 : { },
+ * field2 : { fragment_size : 100, number_of_fragments : 2 },
+ * field3 : { number_of_fragments : 5, order : "simple", tags_schema : "styled" },
+ * field4 : { number_of_fragments: 0, pre_tags : ["openingTagA", "openingTagB"], post_tags : ["closingTag"] }
+ * }
+ * }
+ * </pre>
+ */
+public class HighlighterParseElement implements SearchParseElement {
+
+ private static final String[] DEFAULT_PRE_TAGS = new String[]{"<em>"};
+ private static final String[] DEFAULT_POST_TAGS = new String[]{"</em>"};
+
+ private static final String[] STYLED_PRE_TAG = {
+ "<em class=\"hlt1\">", "<em class=\"hlt2\">", "<em class=\"hlt3\">",
+ "<em class=\"hlt4\">", "<em class=\"hlt5\">", "<em class=\"hlt6\">",
+ "<em class=\"hlt7\">", "<em class=\"hlt8\">", "<em class=\"hlt9\">",
+ "<em class=\"hlt10\">"
+ };
+ private static final String[] STYLED_POST_TAGS = {"</em>"};
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token;
+ String topLevelFieldName = null;
+ List<Tuple<String, SearchContextHighlight.FieldOptions.Builder>> fieldsOptions = newArrayList();
+
+ SearchContextHighlight.FieldOptions.Builder globalOptionsBuilder = new SearchContextHighlight.FieldOptions.Builder()
+ .preTags(DEFAULT_PRE_TAGS).postTags(DEFAULT_POST_TAGS).scoreOrdered(false).highlightFilter(false)
+ .requireFieldMatch(false).forceSource(false).fragmentCharSize(100).numberOfFragments(5)
+ .encoder("default").boundaryMaxScan(SimpleBoundaryScanner.DEFAULT_MAX_SCAN)
+ .boundaryChars(SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS)
+ .noMatchSize(0).phraseLimit(256);
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ topLevelFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("pre_tags".equals(topLevelFieldName) || "preTags".equals(topLevelFieldName)) {
+ List<String> preTagsList = Lists.newArrayList();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ preTagsList.add(parser.text());
+ }
+ globalOptionsBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()]));
+ } else if ("post_tags".equals(topLevelFieldName) || "postTags".equals(topLevelFieldName)) {
+ List<String> postTagsList = Lists.newArrayList();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ postTagsList.add(parser.text());
+ }
+ globalOptionsBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()]));
+ }
+ } else if (token.isValue()) {
+ if ("order".equals(topLevelFieldName)) {
+ globalOptionsBuilder.scoreOrdered("score".equals(parser.text()));
+ } else if ("tags_schema".equals(topLevelFieldName) || "tagsSchema".equals(topLevelFieldName)) {
+ String schema = parser.text();
+ if ("styled".equals(schema)) {
+ globalOptionsBuilder.preTags(STYLED_PRE_TAG);
+ globalOptionsBuilder.postTags(STYLED_POST_TAGS);
+ }
+ } else if ("highlight_filter".equals(topLevelFieldName) || "highlightFilter".equals(topLevelFieldName)) {
+ globalOptionsBuilder.highlightFilter(parser.booleanValue());
+ } else if ("fragment_size".equals(topLevelFieldName) || "fragmentSize".equals(topLevelFieldName)) {
+ globalOptionsBuilder.fragmentCharSize(parser.intValue());
+ } else if ("number_of_fragments".equals(topLevelFieldName) || "numberOfFragments".equals(topLevelFieldName)) {
+ globalOptionsBuilder.numberOfFragments(parser.intValue());
+ } else if ("encoder".equals(topLevelFieldName)) {
+ globalOptionsBuilder.encoder(parser.text());
+ } else if ("require_field_match".equals(topLevelFieldName) || "requireFieldMatch".equals(topLevelFieldName)) {
+ globalOptionsBuilder.requireFieldMatch(parser.booleanValue());
+ } else if ("boundary_max_scan".equals(topLevelFieldName) || "boundaryMaxScan".equals(topLevelFieldName)) {
+ globalOptionsBuilder.boundaryMaxScan(parser.intValue());
+ } else if ("boundary_chars".equals(topLevelFieldName) || "boundaryChars".equals(topLevelFieldName)) {
+ char[] charsArr = parser.text().toCharArray();
+ Character[] globalBoundaryChars = new Character[charsArr.length];
+ for (int i = 0; i < charsArr.length; i++) {
+ globalBoundaryChars[i] = charsArr[i];
+ }
+ globalOptionsBuilder.boundaryChars(globalBoundaryChars);
+ } else if ("type".equals(topLevelFieldName)) {
+ globalOptionsBuilder.highlighterType(parser.text());
+ } else if ("fragmenter".equals(topLevelFieldName)) {
+ globalOptionsBuilder.fragmenter(parser.text());
+ } else if ("no_match_size".equals(topLevelFieldName) || "noMatchSize".equals(topLevelFieldName)) {
+ globalOptionsBuilder.noMatchSize(parser.intValue());
+ } else if ("force_source".equals(topLevelFieldName) || "forceSource".equals(topLevelFieldName)) {
+ globalOptionsBuilder.forceSource(parser.booleanValue());
+ } else if ("phrase_limit".equals(topLevelFieldName) || "phraseLimit".equals(topLevelFieldName)) {
+ globalOptionsBuilder.phraseLimit(parser.intValue());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT && "options".equals(topLevelFieldName)) {
+ globalOptionsBuilder.options(parser.map());
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("fields".equals(topLevelFieldName)) {
+ String highlightFieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ highlightFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ SearchContextHighlight.FieldOptions.Builder fieldOptionsBuilder = new SearchContextHighlight.FieldOptions.Builder();
+ String fieldName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("pre_tags".equals(fieldName) || "preTags".equals(fieldName)) {
+ List<String> preTagsList = Lists.newArrayList();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ preTagsList.add(parser.text());
+ }
+ fieldOptionsBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()]));
+ } else if ("post_tags".equals(fieldName) || "postTags".equals(fieldName)) {
+ List<String> postTagsList = Lists.newArrayList();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ postTagsList.add(parser.text());
+ }
+ fieldOptionsBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()]));
+ } else if ("matched_fields".equals(fieldName) || "matchedFields".equals(fieldName)) {
+ Set<String> matchedFields = Sets.newHashSet();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ matchedFields.add(parser.text());
+ }
+ fieldOptionsBuilder.matchedFields(matchedFields);
+ }
+ } else if (token.isValue()) {
+ if ("fragment_size".equals(fieldName) || "fragmentSize".equals(fieldName)) {
+ fieldOptionsBuilder.fragmentCharSize(parser.intValue());
+ } else if ("number_of_fragments".equals(fieldName) || "numberOfFragments".equals(fieldName)) {
+ fieldOptionsBuilder.numberOfFragments(parser.intValue());
+ } else if ("fragment_offset".equals(fieldName) || "fragmentOffset".equals(fieldName)) {
+ fieldOptionsBuilder.fragmentOffset(parser.intValue());
+ } else if ("highlight_filter".equals(fieldName) || "highlightFilter".equals(fieldName)) {
+ fieldOptionsBuilder.highlightFilter(parser.booleanValue());
+ } else if ("order".equals(fieldName)) {
+ fieldOptionsBuilder.scoreOrdered("score".equals(parser.text()));
+ } else if ("require_field_match".equals(fieldName) || "requireFieldMatch".equals(fieldName)) {
+ fieldOptionsBuilder.requireFieldMatch(parser.booleanValue());
+ } else if ("boundary_max_scan".equals(topLevelFieldName) || "boundaryMaxScan".equals(topLevelFieldName)) {
+ fieldOptionsBuilder.boundaryMaxScan(parser.intValue());
+ } else if ("boundary_chars".equals(topLevelFieldName) || "boundaryChars".equals(topLevelFieldName)) {
+ char[] charsArr = parser.text().toCharArray();
+ Character[] boundaryChars = new Character[charsArr.length];
+ for (int i = 0; i < charsArr.length; i++) {
+ boundaryChars[i] = charsArr[i];
+ }
+ fieldOptionsBuilder.boundaryChars(boundaryChars);
+ } else if ("type".equals(fieldName)) {
+ fieldOptionsBuilder.highlighterType(parser.text());
+ } else if ("fragmenter".equals(fieldName)) {
+ fieldOptionsBuilder.fragmenter(parser.text());
+ } else if ("no_match_size".equals(fieldName) || "noMatchSize".equals(fieldName)) {
+ fieldOptionsBuilder.noMatchSize(parser.intValue());
+ } else if ("force_source".equals(fieldName) || "forceSource".equals(fieldName)) {
+ fieldOptionsBuilder.forceSource(parser.booleanValue());
+ } else if ("phrase_limit".equals(fieldName) || "phraseLimit".equals(fieldName)) {
+ fieldOptionsBuilder.phraseLimit(parser.intValue());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("highlight_query".equals(fieldName) || "highlightQuery".equals(fieldName)) {
+ fieldOptionsBuilder.highlightQuery(context.queryParserService().parse(parser).query());
+ } else if ("options".equals(fieldName)) {
+ fieldOptionsBuilder.options(parser.map());
+ }
+ }
+ }
+ fieldsOptions.add(Tuple.tuple(highlightFieldName, fieldOptionsBuilder));
+ }
+ }
+ } else if ("highlight_query".equals(topLevelFieldName) || "highlightQuery".equals(topLevelFieldName)) {
+ globalOptionsBuilder.highlightQuery(context.queryParserService().parse(parser).query());
+ }
+ }
+ }
+
+ SearchContextHighlight.FieldOptions globalOptions = globalOptionsBuilder.build();
+ if (globalOptions.preTags() != null && globalOptions.postTags() == null) {
+ throw new SearchParseException(context, "Highlighter global preTags are set, but global postTags are not set");
+ }
+
+ List<SearchContextHighlight.Field> fields = Lists.newArrayList();
+ // now, go over and fill all fieldsOptions with default values from the global state
+ for (Tuple<String, SearchContextHighlight.FieldOptions.Builder> tuple : fieldsOptions) {
+ fields.add(new SearchContextHighlight.Field(tuple.v1(), tuple.v2().merge(globalOptions).build()));
+ }
+
+ context.highlight(new SearchContextHighlight(fields));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/Highlighters.java b/src/main/java/org/elasticsearch/search/highlight/Highlighters.java
new file mode 100644
index 0000000..34e165d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/Highlighters.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+
+import java.util.Set;
+
+/**
+ *
+ */
+public class Highlighters {
+ private final ImmutableMap<String, Highlighter> parsers;
+
+ @Inject
+ public Highlighters(Set<Highlighter> parsers) {
+ MapBuilder<String, Highlighter> builder = MapBuilder.newMapBuilder();
+ for (Highlighter parser : parsers) {
+ for (String type : parser.names()) {
+ builder.put(type, parser);
+ }
+ }
+ this.parsers = builder.immutableMap();
+ }
+
+ public Highlighter get(String type) {
+ return parsers.get(type);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java
new file mode 100644
index 0000000..aad0f91
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.highlight.*;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class PlainHighlighter implements Highlighter {
+
+ private static final String CACHE_KEY = "highlight-plain";
+
+ @Override
+ public String[] names() {
+ return new String[] { "plain", "highlighter" };
+ }
+
+ public HighlightField highlight(HighlighterContext highlighterContext) {
+ SearchContextHighlight.Field field = highlighterContext.field;
+ SearchContext context = highlighterContext.context;
+ FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
+ FieldMapper<?> mapper = highlighterContext.mapper;
+
+ Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
+
+ if (!hitContext.cache().containsKey(CACHE_KEY)) {
+ Map<FieldMapper<?>, org.apache.lucene.search.highlight.Highlighter> mappers = Maps.newHashMap();
+ hitContext.cache().put(CACHE_KEY, mappers);
+ }
+ @SuppressWarnings("unchecked")
+ Map<FieldMapper<?>, org.apache.lucene.search.highlight.Highlighter> cache = (Map<FieldMapper<?>, org.apache.lucene.search.highlight.Highlighter>) hitContext.cache().get(CACHE_KEY);
+
+ org.apache.lucene.search.highlight.Highlighter entry = cache.get(mapper);
+ if (entry == null) {
+ Query query = highlighterContext.query.originalQuery();
+ QueryScorer queryScorer = new CustomQueryScorer(query, field.fieldOptions().requireFieldMatch() ? mapper.names().indexName() : null);
+ queryScorer.setExpandMultiTermQuery(true);
+ Fragmenter fragmenter;
+ if (field.fieldOptions().numberOfFragments() == 0) {
+ fragmenter = new NullFragmenter();
+ } else if (field.fieldOptions().fragmenter() == null) {
+ fragmenter = new SimpleSpanFragmenter(queryScorer, field.fieldOptions().fragmentCharSize());
+ } else if ("simple".equals(field.fieldOptions().fragmenter())) {
+ fragmenter = new SimpleFragmenter(field.fieldOptions().fragmentCharSize());
+ } else if ("span".equals(field.fieldOptions().fragmenter())) {
+ fragmenter = new SimpleSpanFragmenter(queryScorer, field.fieldOptions().fragmentCharSize());
+ } else {
+ throw new ElasticsearchIllegalArgumentException("unknown fragmenter option [" + field.fieldOptions().fragmenter() + "] for the field [" + highlighterContext.fieldName + "]");
+ }
+ Formatter formatter = new SimpleHTMLFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0]);
+
+ entry = new org.apache.lucene.search.highlight.Highlighter(formatter, encoder, queryScorer);
+ entry.setTextFragmenter(fragmenter);
+ // always highlight across all data
+ entry.setMaxDocCharsToAnalyze(Integer.MAX_VALUE);
+
+ cache.put(mapper, entry);
+ }
+
+ // a HACK to make highlighter do highlighting, even though its using the single frag list builder
+ int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? 1 : field.fieldOptions().numberOfFragments();
+ ArrayList<TextFragment> fragsList = new ArrayList<TextFragment>();
+ List<Object> textsToHighlight;
+
+ try {
+ textsToHighlight = HighlightUtils.loadFieldValues(field, mapper, context, hitContext);
+
+ for (Object textToHighlight : textsToHighlight) {
+ String text = textToHighlight.toString();
+ Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer();
+ TokenStream tokenStream = analyzer.tokenStream(mapper.names().indexName(), text);
+ if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {
+ // can't perform highlighting if the stream has no terms (binary token stream) or no offsets
+ continue;
+ }
+ TextFragment[] bestTextFragments = entry.getBestTextFragments(tokenStream, text, false, numberOfFragments);
+ for (TextFragment bestTextFragment : bestTextFragments) {
+ if (bestTextFragment != null && bestTextFragment.getScore() > 0) {
+ fragsList.add(bestTextFragment);
+ }
+ }
+ }
+ } catch (Exception e) {
+ throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
+ }
+ if (field.fieldOptions().scoreOrdered()) {
+ CollectionUtil.introSort(fragsList, new Comparator<TextFragment>() {
+ public int compare(TextFragment o1, TextFragment o2) {
+ return Math.round(o2.getScore() - o1.getScore());
+ }
+ });
+ }
+ String[] fragments;
+ // number_of_fragments is set to 0 but we have a multivalued field
+ if (field.fieldOptions().numberOfFragments() == 0 && textsToHighlight.size() > 1 && fragsList.size() > 0) {
+ fragments = new String[fragsList.size()];
+ for (int i = 0; i < fragsList.size(); i++) {
+ fragments[i] = fragsList.get(i).toString();
+ }
+ } else {
+ // refine numberOfFragments if needed
+ numberOfFragments = fragsList.size() < numberOfFragments ? fragsList.size() : numberOfFragments;
+ fragments = new String[numberOfFragments];
+ for (int i = 0; i < fragments.length; i++) {
+ fragments[i] = fragsList.get(i).toString();
+ }
+ }
+
+ if (fragments.length > 0) {
+ return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ }
+
+ int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
+ if (noMatchSize > 0 && textsToHighlight.size() > 0) {
+ // Pull an excerpt from the beginning of the string but make sure to split the string on a term boundary.
+ String fieldContents = textsToHighlight.get(0).toString();
+ Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer();
+ int end;
+ try {
+ end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer.tokenStream(mapper.names().indexName(), fieldContents));
+ } catch (Exception e) {
+ throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
+ }
+ if (end > 0) {
+ return new HighlightField(highlighterContext.fieldName, new Text[] { new StringText(fieldContents.substring(0, end)) });
+ }
+ }
+ return null;
+ }
+
+ private static int findGoodEndForNoHighlightExcerpt(int noMatchSize, TokenStream tokenStream) throws IOException {
+ try {
+ if (!tokenStream.hasAttribute(OffsetAttribute.class)) {
+ // Can't split on term boundaries without offsets
+ return -1;
+ }
+ int end = -1;
+ tokenStream.reset();
+ while (tokenStream.incrementToken()) {
+ OffsetAttribute attr = tokenStream.getAttribute(OffsetAttribute.class);
+ if (attr.endOffset() >= noMatchSize) {
+ // Jump to the end of this token if it wouldn't put us past the boundary
+ if (attr.endOffset() == noMatchSize) {
+ end = noMatchSize;
+ }
+ return end;
+ }
+ end = attr.endOffset();
+ }
+ // We've exhausted the token stream so we should just highlight everything.
+ return end;
+ } finally {
+ tokenStream.end();
+ tokenStream.close();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java
new file mode 100644
index 0000000..f033fd0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java
@@ -0,0 +1,293 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.highlight.Encoder;
+import org.apache.lucene.search.postingshighlight.CustomPassageFormatter;
+import org.apache.lucene.search.postingshighlight.CustomPostingsHighlighter;
+import org.apache.lucene.search.postingshighlight.Snippet;
+import org.apache.lucene.search.postingshighlight.WholeBreakIterator;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CollectionUtil;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.text.BreakIterator;
+import java.util.*;
+
+public class PostingsHighlighter implements Highlighter {
+
+ private static final String CACHE_KEY = "highlight-postings";
+
+ @Override
+ public String[] names() {
+ return new String[]{"postings", "postings-highlighter"};
+ }
+
+ @Override
+ public HighlightField highlight(HighlighterContext highlighterContext) {
+
+ FieldMapper<?> fieldMapper = highlighterContext.mapper;
+ SearchContextHighlight.Field field = highlighterContext.field;
+ if (fieldMapper.fieldType().indexOptions() != FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
+ throw new ElasticsearchIllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter");
+ }
+
+ SearchContext context = highlighterContext.context;
+ FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
+
+ if (!hitContext.cache().containsKey(CACHE_KEY)) {
+ //get the non rewritten query and rewrite it
+ Query query;
+ try {
+ query = rewrite(highlighterContext, hitContext.topLevelReader());
+ } catch (IOException e) {
+ throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
+ }
+ SortedSet<Term> queryTerms = extractTerms(query);
+ hitContext.cache().put(CACHE_KEY, new HighlighterEntry(queryTerms));
+ }
+
+ HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
+ MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper);
+
+ if (mapperHighlighterEntry == null) {
+ Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder);
+ BytesRef[] filteredQueryTerms = filterTerms(highlighterEntry.queryTerms, fieldMapper.names().indexName(), field.fieldOptions().requireFieldMatch());
+ mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter, filteredQueryTerms);
+ }
+
+ //we merge back multiple values into a single value using the paragraph separator, unless we have to highlight every single value separately (number_of_fragments=0).
+ boolean mergeValues = field.fieldOptions().numberOfFragments() != 0;
+ List<Snippet> snippets = new ArrayList<Snippet>();
+ int numberOfFragments;
+
+ try {
+ //we manually load the field values (from source if needed)
+ List<Object> textsToHighlight = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext);
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(mapperHighlighterEntry.passageFormatter, textsToHighlight, mergeValues, Integer.MAX_VALUE-1, field.fieldOptions().noMatchSize());
+
+ if (field.fieldOptions().numberOfFragments() == 0) {
+ highlighter.setBreakIterator(new WholeBreakIterator());
+ numberOfFragments = 1; //1 per value since we highlight per value
+ } else {
+ numberOfFragments = field.fieldOptions().numberOfFragments();
+ }
+
+ //we highlight every value separately calling the highlight method multiple times, only if we need to have back a snippet per value (whole value)
+ int values = mergeValues ? 1 : textsToHighlight.size();
+ for (int i = 0; i < values; i++) {
+ Snippet[] fieldSnippets = highlighter.highlightDoc(fieldMapper.names().indexName(), mapperHighlighterEntry.filteredQueryTerms, hitContext.searcher(), hitContext.docId(), numberOfFragments);
+ if (fieldSnippets != null) {
+ for (Snippet fieldSnippet : fieldSnippets) {
+ if (Strings.hasText(fieldSnippet.getText())) {
+ snippets.add(fieldSnippet);
+ }
+ }
+ }
+ }
+
+ } catch(IOException e) {
+ throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
+ }
+
+ snippets = filterSnippets(snippets, field.fieldOptions().numberOfFragments());
+
+ if (field.fieldOptions().scoreOrdered()) {
+ //let's sort the snippets by score if needed
+ CollectionUtil.introSort(snippets, new Comparator<Snippet>() {
+ public int compare(Snippet o1, Snippet o2) {
+ return (int) Math.signum(o2.getScore() - o1.getScore());
+ }
+ });
+ }
+
+ String[] fragments = new String[snippets.size()];
+ for (int i = 0; i < fragments.length; i++) {
+ fragments[i] = snippets.get(i).getText();
+ }
+
+ if (fragments.length > 0) {
+ return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ }
+
+ return null;
+ }
+
+ private static Query rewrite(HighlighterContext highlighterContext, IndexReader reader) throws IOException {
+
+ Query original = highlighterContext.query.originalQuery();
+
+ //we walk the query tree and when we encounter multi term queries we need to make sure the rewrite method
+ //supports multi term extraction. If not we temporarily override it (and restore it after the rewrite).
+ List<Tuple<MultiTermQuery, MultiTermQuery.RewriteMethod>> modifiedMultiTermQueries = Lists.newArrayList();
+ overrideMultiTermRewriteMethod(original, modifiedMultiTermQueries);
+
+ //rewrite is expensive: if the query was already rewritten we try not to rewrite it again
+ if (highlighterContext.query.queryRewritten() && modifiedMultiTermQueries.size() == 0) {
+ //return the already rewritten query
+ return highlighterContext.query.query();
+ }
+
+ Query query = original;
+ for (Query rewrittenQuery = query.rewrite(reader); rewrittenQuery != query;
+ rewrittenQuery = query.rewrite(reader)) {
+ query = rewrittenQuery;
+ }
+
+ //set back the original rewrite method after the rewrite is done
+ for (Tuple<MultiTermQuery, MultiTermQuery.RewriteMethod> modifiedMultiTermQuery : modifiedMultiTermQueries) {
+ modifiedMultiTermQuery.v1().setRewriteMethod(modifiedMultiTermQuery.v2());
+ }
+
+ return query;
+ }
+
+ private static void overrideMultiTermRewriteMethod(Query query, List<Tuple<MultiTermQuery, MultiTermQuery.RewriteMethod>> modifiedMultiTermQueries) {
+
+ if (query instanceof MultiTermQuery) {
+ MultiTermQuery originalMultiTermQuery = (MultiTermQuery) query;
+ if (!allowsForTermExtraction(originalMultiTermQuery.getRewriteMethod())) {
+ MultiTermQuery.RewriteMethod originalRewriteMethod = originalMultiTermQuery.getRewriteMethod();
+ originalMultiTermQuery.setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(50));
+ //we need to rewrite anyway if it is a multi term query which was rewritten with the wrong rewrite method
+ modifiedMultiTermQueries.add(Tuple.tuple(originalMultiTermQuery, originalRewriteMethod));
+ }
+ }
+
+ if (query instanceof BooleanQuery) {
+ BooleanQuery booleanQuery = (BooleanQuery) query;
+ for (BooleanClause booleanClause : booleanQuery) {
+ overrideMultiTermRewriteMethod(booleanClause.getQuery(), modifiedMultiTermQueries);
+ }
+ }
+
+ if (query instanceof XFilteredQuery) {
+ overrideMultiTermRewriteMethod(((XFilteredQuery) query).getQuery(), modifiedMultiTermQueries);
+ }
+
+ if (query instanceof FilteredQuery) {
+ overrideMultiTermRewriteMethod(((FilteredQuery) query).getQuery(), modifiedMultiTermQueries);
+ }
+
+ if (query instanceof ConstantScoreQuery) {
+ overrideMultiTermRewriteMethod(((ConstantScoreQuery) query).getQuery(), modifiedMultiTermQueries);
+ }
+ }
+
+ private static boolean allowsForTermExtraction(MultiTermQuery.RewriteMethod rewriteMethod) {
+ return rewriteMethod instanceof TopTermsRewrite || rewriteMethod instanceof ScoringRewrite;
+ }
+
+ private static SortedSet<Term> extractTerms(Query query) {
+ SortedSet<Term> queryTerms = new TreeSet<Term>();
+ query.extractTerms(queryTerms);
+ return queryTerms;
+ }
+
+ private static BytesRef[] filterTerms(SortedSet<Term> queryTerms, String field, boolean requireFieldMatch) {
+ SortedSet<Term> fieldTerms;
+ if (requireFieldMatch) {
+ Term floor = new Term(field, "");
+ Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
+ fieldTerms = queryTerms.subSet(floor, ceiling);
+ } else {
+ fieldTerms = queryTerms;
+ }
+
+ BytesRef terms[] = new BytesRef[fieldTerms.size()];
+ int termUpto = 0;
+ for(Term term : fieldTerms) {
+ terms[termUpto++] = term.bytes();
+ }
+
+ return terms;
+ }
+
+ private static List<Snippet> filterSnippets(List<Snippet> snippets, int numberOfFragments) {
+
+ //We need to filter the snippets as due to no_match_size we could have
+ //either highlighted snippets together non highlighted ones
+ //We don't want to mix those up
+ List<Snippet> filteredSnippets = new ArrayList<Snippet>(snippets.size());
+ for (Snippet snippet : snippets) {
+ if (snippet.isHighlighted()) {
+ filteredSnippets.add(snippet);
+ }
+ }
+
+ //if there's at least one highlighted snippet, we return all the highlighted ones
+ //otherwise we return the first non highlighted one if available
+ if (filteredSnippets.size() == 0) {
+ if (snippets.size() > 0) {
+ Snippet snippet = snippets.get(0);
+ //if we did discrete per value highlighting using whole break iterator (as number_of_fragments was 0)
+ //we need to obtain the first sentence of the first value
+ if (numberOfFragments == 0) {
+ BreakIterator bi = BreakIterator.getSentenceInstance(Locale.ROOT);
+ String text = snippet.getText();
+ bi.setText(text);
+ int next = bi.next();
+ if (next != BreakIterator.DONE) {
+ String newText = text.substring(0, next).trim();
+ snippet = new Snippet(newText, snippet.getScore(), snippet.isHighlighted());
+ }
+ }
+ filteredSnippets.add(snippet);
+ }
+ }
+
+ return filteredSnippets;
+ }
+
+ private static class HighlighterEntry {
+ final SortedSet<Term> queryTerms;
+ Map<FieldMapper<?>, MapperHighlighterEntry> mappers = Maps.newHashMap();
+
+ private HighlighterEntry(SortedSet<Term> queryTerms) {
+ this.queryTerms = queryTerms;
+ }
+ }
+
+ private static class MapperHighlighterEntry {
+ final CustomPassageFormatter passageFormatter;
+ final BytesRef[] filteredQueryTerms;
+
+ private MapperHighlighterEntry(CustomPassageFormatter passageFormatter, BytesRef[] filteredQueryTerms) {
+ this.passageFormatter = passageFormatter;
+ this.filteredQueryTerms = filteredQueryTerms;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/SearchContextHighlight.java b/src/main/java/org/elasticsearch/search/highlight/SearchContextHighlight.java
new file mode 100644
index 0000000..7b0f15f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/SearchContextHighlight.java
@@ -0,0 +1,354 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.search.Query;
+
+import java.util.*;
+
+/**
+ *
+ */
+public class SearchContextHighlight {
+
+ private final Map<String, Field> fields;
+
+ private boolean globalForceSource = false;
+
+ public SearchContextHighlight(Collection<Field> fields) {
+ assert fields != null;
+ this.fields = Maps.newHashMap();
+ for (Field field : fields) {
+ this.fields.put(field.field, field);
+ }
+ }
+
+ public Collection<Field> fields() {
+ return fields.values();
+ }
+
+ public void globalForceSource(boolean globalForceSource) {
+ this.globalForceSource = globalForceSource;
+ }
+
+ public boolean forceSource(Field field) {
+ if (globalForceSource) {
+ return true;
+ }
+
+ Field _field = fields.get(field.field);
+ return _field == null ? false : _field.fieldOptions.forceSource;
+ }
+
+ public static class Field {
+ private final String field;
+ private final FieldOptions fieldOptions;
+
+ Field(String field, FieldOptions fieldOptions) {
+ assert field != null;
+ assert fieldOptions != null;
+ this.field = field;
+ this.fieldOptions = fieldOptions;
+ }
+
+ public String field() {
+ return field;
+ }
+
+ public FieldOptions fieldOptions() {
+ return fieldOptions;
+ }
+ }
+
+ public static class FieldOptions {
+
+ // Field options that default to null or -1 are often set to their real default in HighlighterParseElement#parse
+ private int fragmentCharSize = -1;
+
+ private int numberOfFragments = -1;
+
+ private int fragmentOffset = -1;
+
+ private String encoder;
+
+ private String[] preTags;
+
+ private String[] postTags;
+
+ private Boolean scoreOrdered;
+
+ private Boolean highlightFilter;
+
+ private Boolean requireFieldMatch;
+
+ private String highlighterType;
+
+ private Boolean forceSource;
+
+ private String fragmenter;
+
+ private int boundaryMaxScan = -1;
+
+ private Character[] boundaryChars = null;
+
+ private Query highlightQuery;
+
+ private int noMatchSize = -1;
+
+ private Set<String> matchedFields;
+
+ private Map<String, Object> options;
+
+ private int phraseLimit = -1;
+
+ public int fragmentCharSize() {
+ return fragmentCharSize;
+ }
+
+ public int numberOfFragments() {
+ return numberOfFragments;
+ }
+
+ public int fragmentOffset() {
+ return fragmentOffset;
+ }
+
+ public String encoder() {
+ return encoder;
+ }
+
+ public String[] preTags() {
+ return preTags;
+ }
+
+ public String[] postTags() {
+ return postTags;
+ }
+
+ public Boolean scoreOrdered() {
+ return scoreOrdered;
+ }
+
+ public Boolean highlightFilter() {
+ return highlightFilter;
+ }
+
+ public Boolean requireFieldMatch() {
+ return requireFieldMatch;
+ }
+
+ public String highlighterType() {
+ return highlighterType;
+ }
+
+ public String fragmenter() {
+ return fragmenter;
+ }
+
+ public int boundaryMaxScan() {
+ return boundaryMaxScan;
+ }
+
+ public Character[] boundaryChars() {
+ return boundaryChars;
+ }
+
+ public Query highlightQuery() {
+ return highlightQuery;
+ }
+
+ public int noMatchSize() {
+ return noMatchSize;
+ }
+
+ public int phraseLimit() {
+ return phraseLimit;
+ }
+
+ public Set<String> matchedFields() {
+ return matchedFields;
+ }
+
+ public Map<String, Object> options() {
+ return options;
+ }
+
+ static class Builder {
+
+ private final FieldOptions fieldOptions = new FieldOptions();
+
+ Builder fragmentCharSize(int fragmentCharSize) {
+ fieldOptions.fragmentCharSize = fragmentCharSize;
+ return this;
+ }
+
+ Builder numberOfFragments(int numberOfFragments) {
+ fieldOptions.numberOfFragments = numberOfFragments;
+ return this;
+ }
+
+ Builder fragmentOffset(int fragmentOffset) {
+ fieldOptions.fragmentOffset = fragmentOffset;
+ return this;
+ }
+
+ Builder encoder(String encoder) {
+ fieldOptions.encoder = encoder;
+ return this;
+ }
+
+ Builder preTags(String[] preTags) {
+ fieldOptions.preTags = preTags;
+ return this;
+ }
+
+ Builder postTags(String[] postTags) {
+ fieldOptions.postTags = postTags;
+ return this;
+ }
+
+ Builder scoreOrdered(boolean scoreOrdered) {
+ fieldOptions.scoreOrdered = scoreOrdered;
+ return this;
+ }
+
+ Builder highlightFilter(boolean highlightFilter) {
+ fieldOptions.highlightFilter = highlightFilter;
+ return this;
+ }
+
+ Builder requireFieldMatch(boolean requireFieldMatch) {
+ fieldOptions.requireFieldMatch = requireFieldMatch;
+ return this;
+ }
+
+ Builder highlighterType(String type) {
+ fieldOptions.highlighterType = type;
+ return this;
+ }
+
+ Builder forceSource(boolean forceSource) {
+ fieldOptions.forceSource = forceSource;
+ return this;
+ }
+
+ Builder fragmenter(String fragmenter) {
+ fieldOptions.fragmenter = fragmenter;
+ return this;
+ }
+
+ Builder boundaryMaxScan(int boundaryMaxScan) {
+ fieldOptions.boundaryMaxScan = boundaryMaxScan;
+ return this;
+ }
+
+ Builder boundaryChars(Character[] boundaryChars) {
+ fieldOptions.boundaryChars = boundaryChars;
+ return this;
+ }
+
+ Builder highlightQuery(Query highlightQuery) {
+ fieldOptions.highlightQuery = highlightQuery;
+ return this;
+ }
+
+ Builder noMatchSize(int noMatchSize) {
+ fieldOptions.noMatchSize = noMatchSize;
+ return this;
+ }
+
+ Builder phraseLimit(int phraseLimit) {
+ fieldOptions.phraseLimit = phraseLimit;
+ return this;
+ }
+
+ Builder matchedFields(Set<String> matchedFields) {
+ fieldOptions.matchedFields = matchedFields;
+ return this;
+ }
+
+ Builder options(Map<String, Object> options) {
+ fieldOptions.options = options;
+ return this;
+ }
+
+ FieldOptions build() {
+ return fieldOptions;
+ }
+
+ Builder merge(FieldOptions globalOptions) {
+ if (fieldOptions.preTags == null && globalOptions.preTags != null) {
+ fieldOptions.preTags = Arrays.copyOf(globalOptions.preTags, globalOptions.preTags.length);
+ }
+ if (fieldOptions.postTags == null && globalOptions.postTags != null) {
+ fieldOptions.postTags = Arrays.copyOf(globalOptions.postTags, globalOptions.postTags.length);
+ }
+ if (fieldOptions.highlightFilter == null) {
+ fieldOptions.highlightFilter = globalOptions.highlightFilter;
+ }
+ if (fieldOptions.scoreOrdered == null) {
+ fieldOptions.scoreOrdered = globalOptions.scoreOrdered;
+ }
+ if (fieldOptions.fragmentCharSize == -1) {
+ fieldOptions.fragmentCharSize = globalOptions.fragmentCharSize;
+ }
+ if (fieldOptions.numberOfFragments == -1) {
+ fieldOptions.numberOfFragments = globalOptions.numberOfFragments;
+ }
+ if (fieldOptions.encoder == null) {
+ fieldOptions.encoder = globalOptions.encoder;
+ }
+ if (fieldOptions.requireFieldMatch == null) {
+ fieldOptions.requireFieldMatch = globalOptions.requireFieldMatch;
+ }
+ if (fieldOptions.boundaryMaxScan == -1) {
+ fieldOptions.boundaryMaxScan = globalOptions.boundaryMaxScan;
+ }
+ if (fieldOptions.boundaryChars == null && globalOptions.boundaryChars != null) {
+ fieldOptions.boundaryChars = Arrays.copyOf(globalOptions.boundaryChars, globalOptions.boundaryChars.length);
+ }
+ if (fieldOptions.highlighterType == null) {
+ fieldOptions.highlighterType = globalOptions.highlighterType;
+ }
+ if (fieldOptions.fragmenter == null) {
+ fieldOptions.fragmenter = globalOptions.fragmenter;
+ }
+ if ((fieldOptions.options == null || fieldOptions.options.size() == 0) && globalOptions.options != null) {
+ fieldOptions.options = Maps.newHashMap(globalOptions.options);
+ }
+ if (fieldOptions.highlightQuery == null && globalOptions.highlightQuery != null) {
+ fieldOptions.highlightQuery = globalOptions.highlightQuery;
+ }
+ if (fieldOptions.noMatchSize == -1) {
+ fieldOptions.noMatchSize = globalOptions.noMatchSize;
+ }
+ if (fieldOptions.forceSource == null) {
+ fieldOptions.forceSource = globalOptions.forceSource;
+ }
+ if (fieldOptions.phraseLimit == -1) {
+ fieldOptions.phraseLimit = globalOptions.phraseLimit;
+ }
+
+ return this;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/FragmentBuilderHelper.java b/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/FragmentBuilderHelper.java
new file mode 100644
index 0000000..1530045
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/FragmentBuilderHelper.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight.vectorhighlight;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter;
+import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo;
+import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo.SubInfo;
+import org.apache.lucene.search.vectorhighlight.FragmentsBuilder;
+import org.apache.lucene.util.CollectionUtil;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.index.analysis.*;
+import org.elasticsearch.index.mapper.FieldMapper;
+
+import java.util.Comparator;
+import java.util.List;
+
+/**
+ * Simple helper class for {@link FastVectorHighlighter} {@link FragmentsBuilder} implemenations.
+ */
+public final class FragmentBuilderHelper {
+
+ private FragmentBuilderHelper() {
+ // no instance
+ }
+
+ /**
+ * Fixes problems with broken analysis chains if positions and offsets are messed up that can lead to
+ * {@link StringIndexOutOfBoundsException} in the {@link FastVectorHighlighter}
+ */
+ public static WeightedFragInfo fixWeightedFragInfo(FieldMapper<?> mapper, Field[] values, WeightedFragInfo fragInfo) {
+ assert fragInfo != null : "FragInfo must not be null";
+ assert mapper.names().indexName().equals(values[0].name()) : "Expected FieldMapper for field " + values[0].name();
+ if (!fragInfo.getSubInfos().isEmpty() && (containsBrokenAnalysis(mapper.indexAnalyzer()))) {
+ /* This is a special case where broken analysis like WDF is used for term-vector creation at index-time
+ * which can potentially mess up the offsets. To prevent a SAIIOBException we need to resort
+ * the fragments based on their offsets rather than using soley the positions as it is done in
+ * the FastVectorHighlighter. Yet, this is really a lucene problem and should be fixed in lucene rather
+ * than in this hack... aka. "we are are working on in!" */
+ final List<SubInfo> subInfos = fragInfo.getSubInfos();
+ CollectionUtil.introSort(subInfos, new Comparator<SubInfo>() {
+ @Override
+ public int compare(SubInfo o1, SubInfo o2) {
+ int startOffset = o1.getTermsOffsets().get(0).getStartOffset();
+ int startOffset2 = o2.getTermsOffsets().get(0).getStartOffset();
+ return FragmentBuilderHelper.compare(startOffset, startOffset2);
+ }
+ });
+ return new WeightedFragInfo(Math.min(fragInfo.getSubInfos().get(0).getTermsOffsets().get(0).getStartOffset(),
+ fragInfo.getStartOffset()), fragInfo.getEndOffset(), subInfos, fragInfo.getTotalBoost());
+ } else {
+ return fragInfo;
+ }
+ }
+
+ private static int compare(int x, int y) {
+ return (x < y) ? -1 : ((x == y) ? 0 : 1);
+ }
+
+ private static boolean containsBrokenAnalysis(Analyzer analyzer) {
+ // TODO maybe we need a getter on Namedanalyzer that tells if this uses broken Analysis
+ if (analyzer instanceof NamedAnalyzer) {
+ analyzer = ((NamedAnalyzer) analyzer).analyzer();
+ }
+ if (analyzer instanceof CustomAnalyzer) {
+ final CustomAnalyzer a = (CustomAnalyzer) analyzer;
+ if (a.tokenizerFactory() instanceof EdgeNGramTokenizerFactory
+ || (a.tokenizerFactory() instanceof NGramTokenizerFactory
+ && !((NGramTokenizerFactory)a.tokenizerFactory()).version().onOrAfter(Version.LUCENE_42))) {
+ // ngram tokenizer is broken before 4.2
+ return true;
+ }
+ TokenFilterFactory[] tokenFilters = a.tokenFilters();
+ for (TokenFilterFactory tokenFilterFactory : tokenFilters) {
+ if (tokenFilterFactory instanceof WordDelimiterTokenFilterFactory
+ || tokenFilterFactory instanceof EdgeNGramTokenFilterFactory) {
+ return true;
+ }
+ if (tokenFilterFactory instanceof NGramTokenFilterFactory
+ && !((NGramTokenFilterFactory)tokenFilterFactory).version().onOrAfter(Version.LUCENE_42)) {
+ // ngram token filter is broken before 4.2
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SimpleFragmentsBuilder.java b/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SimpleFragmentsBuilder.java
new file mode 100644
index 0000000..5fe5f19
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SimpleFragmentsBuilder.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight.vectorhighlight;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.highlight.Encoder;
+import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
+import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo;
+import org.elasticsearch.index.mapper.FieldMapper;
+
+/**
+ * Direct Subclass of Lucene's org.apache.lucene.search.vectorhighlight.SimpleFragmentsBuilder
+ * that corrects offsets for broken analysis chains.
+ */
+public class SimpleFragmentsBuilder extends org.apache.lucene.search.vectorhighlight.SimpleFragmentsBuilder {
+ protected final FieldMapper<?> mapper;
+
+ public SimpleFragmentsBuilder(FieldMapper<?> mapper,
+ String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
+ super(preTags, postTags, boundaryScanner);
+ this.mapper = mapper;
+ }
+
+ @Override
+ protected String makeFragment( StringBuilder buffer, int[] index, Field[] values, WeightedFragInfo fragInfo,
+ String[] preTags, String[] postTags, Encoder encoder ){
+ return super.makeFragment(buffer, index, values, FragmentBuilderHelper.fixWeightedFragInfo(mapper, values, fragInfo), preTags, postTags, encoder);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SourceScoreOrderFragmentsBuilder.java b/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SourceScoreOrderFragmentsBuilder.java
new file mode 100644
index 0000000..7404a72
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SourceScoreOrderFragmentsBuilder.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight.vectorhighlight;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.ngram.NGramTokenizerFactory;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.highlight.Encoder;
+import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
+import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo;
+import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo.SubInfo;
+import org.apache.lucene.search.vectorhighlight.ScoreOrderFragmentsBuilder;
+import org.elasticsearch.index.analysis.CustomAnalyzer;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.lookup.SearchLookup;
+
+/**
+ *
+ */
+public class SourceScoreOrderFragmentsBuilder extends ScoreOrderFragmentsBuilder {
+
+ private final FieldMapper<?> mapper;
+
+ private final SearchContext searchContext;
+
+ public SourceScoreOrderFragmentsBuilder(FieldMapper<?> mapper, SearchContext searchContext,
+ String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
+ super(preTags, postTags, boundaryScanner);
+ this.mapper = mapper;
+ this.searchContext = searchContext;
+ }
+
+ @Override
+ protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException {
+ // we know its low level reader, and matching docId, since that's how we call the highlighter with
+ SearchLookup lookup = searchContext.lookup();
+ lookup.setNextReader((AtomicReaderContext) reader.getContext());
+ lookup.setNextDocId(docId);
+
+ List<Object> values = lookup.source().extractRawValues(mapper.names().sourcePath());
+ Field[] fields = new Field[values.size()];
+ for (int i = 0; i < values.size(); i++) {
+ fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), TextField.TYPE_NOT_STORED);
+ }
+ return fields;
+ }
+
+ protected String makeFragment( StringBuilder buffer, int[] index, Field[] values, WeightedFragInfo fragInfo,
+ String[] preTags, String[] postTags, Encoder encoder ){
+ return super.makeFragment(buffer, index, values, FragmentBuilderHelper.fixWeightedFragInfo(mapper, values, fragInfo), preTags, postTags, encoder);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SourceSimpleFragmentsBuilder.java b/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SourceSimpleFragmentsBuilder.java
new file mode 100644
index 0000000..abfd907
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/SourceSimpleFragmentsBuilder.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight.vectorhighlight;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.lookup.SearchLookup;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ *
+ */
+public class SourceSimpleFragmentsBuilder extends SimpleFragmentsBuilder {
+
+ private final SearchContext searchContext;
+
+ public SourceSimpleFragmentsBuilder(FieldMapper<?> mapper, SearchContext searchContext,
+ String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
+ super(mapper, preTags, postTags, boundaryScanner);
+ this.searchContext = searchContext;
+ }
+
+ public static final Field[] EMPTY_FIELDS = new Field[0];
+
+ @Override
+ protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException {
+ // we know its low level reader, and matching docId, since that's how we call the highlighter with
+ SearchLookup lookup = searchContext.lookup();
+ lookup.setNextReader((AtomicReaderContext) reader.getContext());
+ lookup.setNextDocId(docId);
+
+ List<Object> values = lookup.source().extractRawValues(mapper.names().sourcePath());
+ if (values.isEmpty()) {
+ return EMPTY_FIELDS;
+ }
+ Field[] fields = new Field[values.size()];
+ for (int i = 0; i < values.size(); i++) {
+ fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), TextField.TYPE_NOT_STORED);
+ }
+ return fields;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
new file mode 100644
index 0000000..231d6d5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
@@ -0,0 +1,206 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.internal;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.*;
+import org.elasticsearch.common.lucene.MinimumScoreCollector;
+import org.elasticsearch.common.lucene.MultiCollector;
+import org.elasticsearch.common.lucene.search.FilteredCollector;
+import org.elasticsearch.common.lucene.search.XCollector;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.search.dfs.CachedDfSource;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Context-aware extension of {@link IndexSearcher}.
+ */
+public class ContextIndexSearcher extends IndexSearcher {
+
+ public static enum Stage {
+ NA,
+ MAIN_QUERY
+ }
+
+ /** The wrapped {@link IndexSearcher}. The reason why we sometimes prefer delegating to this searcher instead of <tt>super</tt> is that
+ * this instance may have more assertions, for example if it comes from MockInternalEngine which wraps the IndexSearcher into an
+ * AssertingIndexSearcher. */
+ private final IndexSearcher in;
+
+ private final SearchContext searchContext;
+
+ private CachedDfSource dfSource;
+
+ private List<Collector> queryCollectors;
+
+ private Stage currentState = Stage.NA;
+
+ private boolean enableMainDocIdSetCollector;
+ private DocIdSetCollector mainDocIdSetCollector;
+
+ public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) {
+ super(searcher.reader());
+ in = searcher.searcher();
+ this.searchContext = searchContext;
+ setSimilarity(searcher.searcher().getSimilarity());
+ }
+
+ public void release() {
+ if (mainDocIdSetCollector != null) {
+ mainDocIdSetCollector.release();
+ }
+ }
+
+ public void dfSource(CachedDfSource dfSource) {
+ this.dfSource = dfSource;
+ }
+
+ /**
+ * Adds a query level collector that runs at {@link Stage#MAIN_QUERY}. Note, supports
+ * {@link org.elasticsearch.common.lucene.search.XCollector} allowing for a callback
+ * when collection is done.
+ */
+ public void addMainQueryCollector(Collector collector) {
+ if (queryCollectors == null) {
+ queryCollectors = new ArrayList<Collector>();
+ }
+ queryCollectors.add(collector);
+ }
+
+ public DocIdSetCollector mainDocIdSetCollector() {
+ return this.mainDocIdSetCollector;
+ }
+
+ public void enableMainDocIdSetCollector() {
+ this.enableMainDocIdSetCollector = true;
+ }
+
+ public void inStage(Stage stage) {
+ this.currentState = stage;
+ }
+
+ public void finishStage(Stage stage) {
+ assert currentState == stage : "Expected stage " + stage + " but was stage " + currentState;
+ this.currentState = Stage.NA;
+ }
+
+ @Override
+ public Query rewrite(Query original) throws IOException {
+ if (original == searchContext.query() || original == searchContext.parsedQuery().query()) {
+ // optimize in case its the top level search query and we already rewrote it...
+ if (searchContext.queryRewritten()) {
+ return searchContext.query();
+ }
+ Query rewriteQuery = in.rewrite(original);
+ searchContext.updateRewriteQuery(rewriteQuery);
+ return rewriteQuery;
+ } else {
+ return in.rewrite(original);
+ }
+ }
+
+ @Override
+ public Weight createNormalizedWeight(Query query) throws IOException {
+ try {
+ // if its the main query, use we have dfs data, only then do it
+ if (dfSource != null && (query == searchContext.query() || query == searchContext.parsedQuery().query())) {
+ return dfSource.createNormalizedWeight(query);
+ }
+ return in.createNormalizedWeight(query);
+ } catch (Throwable t) {
+ searchContext.clearReleasables();
+ throw new RuntimeException(t);
+ }
+ }
+
+ @Override
+ public void search(List<AtomicReaderContext> leaves, Weight weight, Collector collector) throws IOException {
+ if (searchContext.timeoutInMillis() != -1) {
+ // TODO: change to use our own counter that uses the scheduler in ThreadPool
+ collector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), searchContext.timeoutInMillis());
+ }
+ if (currentState == Stage.MAIN_QUERY) {
+ if (enableMainDocIdSetCollector) {
+ // TODO should we create a cache of segment->docIdSets so we won't create one each time?
+ collector = this.mainDocIdSetCollector = new DocIdSetCollector(searchContext.docSetCache(), collector);
+ }
+ if (searchContext.parsedPostFilter() != null) {
+ // this will only get applied to the actual search collector and not
+ // to any scoped collectors, also, it will only be applied to the main collector
+ // since that is where the filter should only work
+ collector = new FilteredCollector(collector, searchContext.parsedPostFilter().filter());
+ }
+ if (queryCollectors != null && !queryCollectors.isEmpty()) {
+ collector = new MultiCollector(collector, queryCollectors.toArray(new Collector[queryCollectors.size()]));
+ }
+
+ // apply the minimum score after multi collector so we filter facets as well
+ if (searchContext.minimumScore() != null) {
+ collector = new MinimumScoreCollector(collector, searchContext.minimumScore());
+ }
+ }
+
+ // we only compute the doc id set once since within a context, we execute the same query always...
+ try {
+ if (searchContext.timeoutInMillis() != -1) {
+ try {
+ super.search(leaves, weight, collector);
+ } catch (TimeLimitingCollector.TimeExceededException e) {
+ searchContext.queryResult().searchTimedOut(true);
+ }
+ } else {
+ super.search(leaves, weight, collector);
+ }
+
+ if (currentState == Stage.MAIN_QUERY) {
+ if (enableMainDocIdSetCollector) {
+ enableMainDocIdSetCollector = false;
+ mainDocIdSetCollector.postCollection();
+ }
+ if (queryCollectors != null && !queryCollectors.isEmpty()) {
+ for (Collector queryCollector : queryCollectors) {
+ if (queryCollector instanceof XCollector) {
+ ((XCollector) queryCollector).postCollection();
+ }
+ }
+ }
+ }
+ } finally {
+ searchContext.clearReleasables();
+ }
+ }
+
+ @Override
+ public Explanation explain(Query query, int doc) throws IOException {
+ try {
+ if (searchContext.aliasFilter() == null) {
+ return super.explain(query, doc);
+ }
+ XFilteredQuery filteredQuery = new XFilteredQuery(query, searchContext.aliasFilter());
+ return super.explain(filteredQuery, doc);
+ } finally {
+ searchContext.clearReleasables();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
new file mode 100644
index 0000000..060f33c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
@@ -0,0 +1,705 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.internal;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.lucene.search.function.BoostScoreFunction;
+import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.docset.DocSetCache;
+import org.elasticsearch.index.cache.filter.FilterCache;
+import org.elasticsearch.index.cache.id.IdCache;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.SearchContextAggregations;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.facet.SearchContextFacets;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
+import org.elasticsearch.search.fetch.partial.PartialFieldsContext;
+import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.highlight.SearchContextHighlight;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.scan.ScanContext;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *
+ */
+public class DefaultSearchContext extends SearchContext {
+
+ private final long id;
+
+ private final ShardSearchRequest request;
+
+ private final SearchShardTarget shardTarget;
+
+ private SearchType searchType;
+
+ private final Engine.Searcher engineSearcher;
+
+ private final ScriptService scriptService;
+
+ private final CacheRecycler cacheRecycler;
+
+ private final PageCacheRecycler pageCacheRecycler;
+
+ private final IndexShard indexShard;
+
+ private final IndexService indexService;
+
+ private final ContextIndexSearcher searcher;
+
+ private final DfsSearchResult dfsResult;
+
+ private final QuerySearchResult queryResult;
+
+ private final FetchSearchResult fetchResult;
+
+ // lazy initialized only if needed
+ private ScanContext scanContext;
+
+ private float queryBoost = 1.0f;
+
+ // timeout in millis
+ private long timeoutInMillis = -1;
+
+
+ private List<String> groupStats;
+
+ private Scroll scroll;
+
+ private boolean explain;
+
+ private boolean version = false; // by default, we don't return versions
+
+ private List<String> fieldNames;
+ private FieldDataFieldsContext fieldDataFields;
+ private ScriptFieldsContext scriptFields;
+ private PartialFieldsContext partialFields;
+ private FetchSourceContext fetchSourceContext;
+
+ private int from = -1;
+
+ private int size = -1;
+
+ private Sort sort;
+
+ private Float minimumScore;
+
+ private boolean trackScores = false; // when sorting, track scores as well...
+
+ private ParsedQuery originalQuery;
+
+ private Query query;
+
+ private ParsedFilter postFilter;
+
+ private Filter aliasFilter;
+
+ private int[] docIdsToLoad;
+
+ private int docsIdsToLoadFrom;
+
+ private int docsIdsToLoadSize;
+
+ private SearchContextAggregations aggregations;
+
+ private SearchContextFacets facets;
+
+ private SearchContextHighlight highlight;
+
+ private SuggestionSearchContext suggest;
+
+ private RescoreSearchContext rescore;
+
+ private SearchLookup searchLookup;
+
+ private boolean queryRewritten;
+
+ private volatile long keepAlive;
+
+ private volatile long lastAccessTime = -1;
+
+ private List<Releasable> clearables = null;
+
+
+ public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
+ Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard,
+ ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler) {
+ this.id = id;
+ this.request = request;
+ this.searchType = request.searchType();
+ this.shardTarget = shardTarget;
+ this.engineSearcher = engineSearcher;
+ this.scriptService = scriptService;
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ this.dfsResult = new DfsSearchResult(id, shardTarget);
+ this.queryResult = new QuerySearchResult(id, shardTarget);
+ this.fetchResult = new FetchSearchResult(id, shardTarget);
+ this.indexShard = indexShard;
+ this.indexService = indexService;
+
+ this.searcher = new ContextIndexSearcher(this, engineSearcher);
+
+ // initialize the filtering alias based on the provided filters
+ aliasFilter = indexService.aliasesService().aliasFilter(request.filteringAliases());
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ if (scanContext != null) {
+ scanContext.clear();
+ }
+ // clear and scope phase we have
+ searcher.release();
+ engineSearcher.release();
+ return true;
+ }
+
+ public boolean clearAndRelease() {
+ clearReleasables();
+ return release();
+ }
+
+ /**
+ * Should be called before executing the main query and after all other parameters have been set.
+ */
+ public void preProcess() {
+ if (query() == null) {
+ parsedQuery(ParsedQuery.parsedMatchAllQuery());
+ }
+ if (queryBoost() != 1.0f) {
+ parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new BoostScoreFunction(queryBoost)), parsedQuery()));
+ }
+ Filter searchFilter = searchFilter(types());
+ if (searchFilter != null) {
+ if (Queries.isConstantMatchAllQuery(query())) {
+ Query q = new XConstantScoreQuery(searchFilter);
+ q.setBoost(query().getBoost());
+ parsedQuery(new ParsedQuery(q, parsedQuery()));
+ } else {
+ parsedQuery(new ParsedQuery(new XFilteredQuery(query(), searchFilter), parsedQuery()));
+ }
+ }
+ }
+
+ public Filter searchFilter(String[] types) {
+ Filter filter = mapperService().searchFilter(types);
+ if (filter == null) {
+ return aliasFilter;
+ } else {
+ filter = filterCache().cache(filter);
+ if (aliasFilter != null) {
+ return new AndFilter(ImmutableList.of(filter, aliasFilter));
+ }
+ return filter;
+ }
+ }
+
+ public long id() {
+ return this.id;
+ }
+
+ public String source() {
+ return engineSearcher.source();
+ }
+
+ public ShardSearchRequest request() {
+ return this.request;
+ }
+
+ public SearchType searchType() {
+ return this.searchType;
+ }
+
+ public SearchContext searchType(SearchType searchType) {
+ this.searchType = searchType;
+ return this;
+ }
+
+ public SearchShardTarget shardTarget() {
+ return this.shardTarget;
+ }
+
+ public int numberOfShards() {
+ return request.numberOfShards();
+ }
+
+ public boolean hasTypes() {
+ return request.types() != null && request.types().length > 0;
+ }
+
+ public String[] types() {
+ return request.types();
+ }
+
+ public float queryBoost() {
+ return queryBoost;
+ }
+
+ public SearchContext queryBoost(float queryBoost) {
+ this.queryBoost = queryBoost;
+ return this;
+ }
+
+ public long nowInMillis() {
+ return request.nowInMillis();
+ }
+
+ public Scroll scroll() {
+ return this.scroll;
+ }
+
+ public SearchContext scroll(Scroll scroll) {
+ this.scroll = scroll;
+ return this;
+ }
+
+ @Override
+ public SearchContextAggregations aggregations() {
+ return aggregations;
+ }
+
+ @Override
+ public SearchContext aggregations(SearchContextAggregations aggregations) {
+ this.aggregations = aggregations;
+ return this;
+ }
+
+ public SearchContextFacets facets() {
+ return facets;
+ }
+
+ public SearchContext facets(SearchContextFacets facets) {
+ this.facets = facets;
+ return this;
+ }
+
+ public SearchContextHighlight highlight() {
+ return highlight;
+ }
+
+ public void highlight(SearchContextHighlight highlight) {
+ this.highlight = highlight;
+ }
+
+ public SuggestionSearchContext suggest() {
+ return suggest;
+ }
+
+ public void suggest(SuggestionSearchContext suggest) {
+ this.suggest = suggest;
+ }
+
+ public RescoreSearchContext rescore() {
+ return this.rescore;
+ }
+
+ public void rescore(RescoreSearchContext rescore) {
+ this.rescore = rescore;
+ }
+
+ public boolean hasFieldDataFields() {
+ return fieldDataFields != null;
+ }
+
+ public FieldDataFieldsContext fieldDataFields() {
+ if (fieldDataFields == null) {
+ fieldDataFields = new FieldDataFieldsContext();
+ }
+ return this.fieldDataFields;
+ }
+
+ public boolean hasScriptFields() {
+ return scriptFields != null;
+ }
+
+ public ScriptFieldsContext scriptFields() {
+ if (scriptFields == null) {
+ scriptFields = new ScriptFieldsContext();
+ }
+ return this.scriptFields;
+ }
+
+ public boolean hasPartialFields() {
+ return partialFields != null;
+ }
+
+ public PartialFieldsContext partialFields() {
+ if (partialFields == null) {
+ partialFields = new PartialFieldsContext();
+ }
+ return this.partialFields;
+ }
+
+ /**
+ * A shortcut function to see whether there is a fetchSourceContext and it says the source is requested.
+ *
+ * @return
+ */
+ public boolean sourceRequested() {
+ return fetchSourceContext != null && fetchSourceContext.fetchSource();
+ }
+
+ public boolean hasFetchSourceContext() {
+ return fetchSourceContext != null;
+ }
+
+ public FetchSourceContext fetchSourceContext() {
+ return this.fetchSourceContext;
+ }
+
+ public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
+ this.fetchSourceContext = fetchSourceContext;
+ return this;
+ }
+
+ public ContextIndexSearcher searcher() {
+ return this.searcher;
+ }
+
+ public IndexShard indexShard() {
+ return this.indexShard;
+ }
+
+ public MapperService mapperService() {
+ return indexService.mapperService();
+ }
+
+ public AnalysisService analysisService() {
+ return indexService.analysisService();
+ }
+
+ public IndexQueryParserService queryParserService() {
+ return indexService.queryParserService();
+ }
+
+ public SimilarityService similarityService() {
+ return indexService.similarityService();
+ }
+
+ public ScriptService scriptService() {
+ return scriptService;
+ }
+
+ public CacheRecycler cacheRecycler() {
+ return cacheRecycler;
+ }
+
+ public PageCacheRecycler pageCacheRecycler() {
+ return pageCacheRecycler;
+ }
+
+ public FilterCache filterCache() {
+ return indexService.cache().filter();
+ }
+
+ public DocSetCache docSetCache() {
+ return indexService.cache().docSet();
+ }
+
+ public IndexFieldDataService fieldData() {
+ return indexService.fieldData();
+ }
+
+ public IdCache idCache() {
+ return indexService.cache().idCache();
+ }
+
+ public long timeoutInMillis() {
+ return timeoutInMillis;
+ }
+
+ public void timeoutInMillis(long timeoutInMillis) {
+ this.timeoutInMillis = timeoutInMillis;
+ }
+
+ public SearchContext minimumScore(float minimumScore) {
+ this.minimumScore = minimumScore;
+ return this;
+ }
+
+ public Float minimumScore() {
+ return this.minimumScore;
+ }
+
+ public SearchContext sort(Sort sort) {
+ this.sort = sort;
+ return this;
+ }
+
+ public Sort sort() {
+ return this.sort;
+ }
+
+ public SearchContext trackScores(boolean trackScores) {
+ this.trackScores = trackScores;
+ return this;
+ }
+
+ public boolean trackScores() {
+ return this.trackScores;
+ }
+
+ public SearchContext parsedPostFilter(ParsedFilter postFilter) {
+ this.postFilter = postFilter;
+ return this;
+ }
+
+ public ParsedFilter parsedPostFilter() {
+ return this.postFilter;
+ }
+
+ public Filter aliasFilter() {
+ return aliasFilter;
+ }
+
+ public SearchContext parsedQuery(ParsedQuery query) {
+ queryRewritten = false;
+ this.originalQuery = query;
+ this.query = query.query();
+ return this;
+ }
+
+ public ParsedQuery parsedQuery() {
+ return this.originalQuery;
+ }
+
+ /**
+ * The query to execute, might be rewritten.
+ */
+ public Query query() {
+ return this.query;
+ }
+
+ /**
+ * Has the query been rewritten already?
+ */
+ public boolean queryRewritten() {
+ return queryRewritten;
+ }
+
+ /**
+ * Rewrites the query and updates it. Only happens once.
+ */
+ public SearchContext updateRewriteQuery(Query rewriteQuery) {
+ query = rewriteQuery;
+ queryRewritten = true;
+ return this;
+ }
+
+ public int from() {
+ return from;
+ }
+
+ public SearchContext from(int from) {
+ this.from = from;
+ return this;
+ }
+
+ public int size() {
+ return size;
+ }
+
+ public SearchContext size(int size) {
+ this.size = size;
+ return this;
+ }
+
+ public boolean hasFieldNames() {
+ return fieldNames != null;
+ }
+
+ public List<String> fieldNames() {
+ if (fieldNames == null) {
+ fieldNames = Lists.newArrayList();
+ }
+ return fieldNames;
+ }
+
+ public void emptyFieldNames() {
+ this.fieldNames = ImmutableList.of();
+ }
+
+ public boolean explain() {
+ return explain;
+ }
+
+ public void explain(boolean explain) {
+ this.explain = explain;
+ }
+
+ @Nullable
+ public List<String> groupStats() {
+ return this.groupStats;
+ }
+
+ public void groupStats(List<String> groupStats) {
+ this.groupStats = groupStats;
+ }
+
+ public boolean version() {
+ return version;
+ }
+
+ public void version(boolean version) {
+ this.version = version;
+ }
+
+ public int[] docIdsToLoad() {
+ return docIdsToLoad;
+ }
+
+ public int docIdsToLoadFrom() {
+ return docsIdsToLoadFrom;
+ }
+
+ public int docIdsToLoadSize() {
+ return docsIdsToLoadSize;
+ }
+
+ public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) {
+ this.docIdsToLoad = docIdsToLoad;
+ this.docsIdsToLoadFrom = docsIdsToLoadFrom;
+ this.docsIdsToLoadSize = docsIdsToLoadSize;
+ return this;
+ }
+
+ public void accessed(long accessTime) {
+ this.lastAccessTime = accessTime;
+ }
+
+ public long lastAccessTime() {
+ return this.lastAccessTime;
+ }
+
+ public long keepAlive() {
+ return this.keepAlive;
+ }
+
+ public void keepAlive(long keepAlive) {
+ this.keepAlive = keepAlive;
+ }
+
+ public SearchLookup lookup() {
+ // TODO: The types should take into account the parsing context in QueryParserContext...
+ if (searchLookup == null) {
+ searchLookup = new SearchLookup(mapperService(), fieldData(), request.types());
+ }
+ return searchLookup;
+ }
+
+ public DfsSearchResult dfsResult() {
+ return dfsResult;
+ }
+
+ public QuerySearchResult queryResult() {
+ return queryResult;
+ }
+
+ public FetchSearchResult fetchResult() {
+ return fetchResult;
+ }
+
+ @Override
+ public void addReleasable(Releasable releasable) {
+ if (clearables == null) {
+ clearables = new ArrayList<Releasable>();
+ }
+ clearables.add(releasable);
+ }
+
+ @Override
+ public void clearReleasables() {
+ if (clearables != null) {
+ Throwable th = null;
+ for (Releasable releasable : clearables) {
+ try {
+ releasable.release();
+ } catch (Throwable t) {
+ if (th == null) {
+ th = t;
+ }
+ }
+ }
+ clearables.clear();
+ if (th != null) {
+ throw new RuntimeException(th);
+ }
+ }
+ }
+
+ public ScanContext scanContext() {
+ if (scanContext == null) {
+ scanContext = new ScanContext();
+ }
+ return scanContext;
+ }
+
+ public MapperService.SmartNameFieldMappers smartFieldMappers(String name) {
+ return mapperService().smartName(name, request.types());
+ }
+
+ public FieldMappers smartNameFieldMappers(String name) {
+ return mapperService().smartNameFieldMappers(name, request.types());
+ }
+
+ public FieldMapper smartNameFieldMapper(String name) {
+ return mapperService().smartNameFieldMapper(name, request.types());
+ }
+
+ public MapperService.SmartNameObjectMapper smartNameObjectMapper(String name) {
+ return mapperService().smartNameObjectMapper(name, request.types());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/internal/DocIdSetCollector.java b/src/main/java/org/elasticsearch/search/internal/DocIdSetCollector.java
new file mode 100644
index 0000000..5ffab22
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/DocIdSetCollector.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.internal;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
+import org.elasticsearch.common.lucene.search.XCollector;
+import org.elasticsearch.index.cache.docset.DocSetCache;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ */
+public class DocIdSetCollector extends XCollector {
+
+ private final DocSetCache docSetCache;
+ private final Collector collector;
+
+ private final List<ContextDocIdSet> docSets;
+ private boolean currentHasDocs;
+ private ContextDocIdSet currentContext;
+ private FixedBitSet currentSet;
+
+ public DocIdSetCollector(DocSetCache docSetCache, Collector collector) {
+ this.docSetCache = docSetCache;
+ this.collector = collector;
+ this.docSets = new ArrayList<ContextDocIdSet>();
+ }
+
+ public List<ContextDocIdSet> docSets() {
+ return docSets;
+ }
+
+ public void release() {
+ for (ContextDocIdSet docSet : docSets) {
+ docSetCache.release(docSet);
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ collector.setScorer(scorer);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ collector.collect(doc);
+ currentHasDocs = true;
+ currentSet.set(doc);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ collector.setNextReader(context);
+ if (currentContext != null) {
+ if (currentHasDocs) {
+ docSets.add(currentContext);
+ } else {
+ docSetCache.release(currentContext);
+ }
+ }
+ currentContext = docSetCache.obtain(context);
+ currentSet = (FixedBitSet) currentContext.docSet;
+ currentHasDocs = false;
+ }
+
+ @Override
+ public void postCollection() {
+ if (collector instanceof XCollector) {
+ ((XCollector) collector).postCollection();
+ }
+ if (currentContext != null) {
+ if (currentHasDocs) {
+ docSets.add(currentContext);
+ } else {
+ docSetCache.release(currentContext);
+ }
+ currentContext = null;
+ currentSet = null;
+ currentHasDocs = false;
+ }
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java b/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java
new file mode 100644
index 0000000..2cd4e45
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.internal;
+
+import org.elasticsearch.action.search.SearchScrollRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.Scroll.readScroll;
+
+/**
+ *
+ */
+public class InternalScrollSearchRequest extends TransportRequest {
+
+ private long id;
+
+ private Scroll scroll;
+
+ public InternalScrollSearchRequest() {
+ }
+
+ public InternalScrollSearchRequest(SearchScrollRequest request, long id) {
+ super(request);
+ this.id = id;
+ this.scroll = request.scroll();
+ }
+
+ public InternalScrollSearchRequest(long id) {
+ this.id = id;
+ }
+
+ public long id() {
+ return id;
+ }
+
+ public Scroll scroll() {
+ return scroll;
+ }
+
+ public InternalScrollSearchRequest scroll(Scroll scroll) {
+ this.scroll = scroll;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ if (in.readBoolean()) {
+ scroll = readScroll(in);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ if (scroll == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ scroll.writeTo(out);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
new file mode 100644
index 0000000..0d68961
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
@@ -0,0 +1,732 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.internal;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.rest.action.support.RestXContentBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.highlight.HighlightField;
+import org.elasticsearch.search.lookup.SourceLookup;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.elasticsearch.common.lucene.Lucene.readExplanation;
+import static org.elasticsearch.common.lucene.Lucene.writeExplanation;
+import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
+import static org.elasticsearch.search.highlight.HighlightField.readHighlightField;
+import static org.elasticsearch.search.internal.InternalSearchHitField.readSearchHitField;
+
+/**
+ *
+ */
+public class InternalSearchHit implements SearchHit {
+
+ private static final Object[] EMPTY_SORT_VALUES = new Object[0];
+ private static final Text MAX_TERM_AS_TEXT = new StringAndBytesText(BytesRefFieldComparatorSource.MAX_TERM.utf8ToString());
+
+ private transient int docId;
+
+ private float score = Float.NEGATIVE_INFINITY;
+
+ private Text id;
+ private Text type;
+
+ private long version = -1;
+
+ private BytesReference source;
+
+ private Map<String, SearchHitField> fields = ImmutableMap.of();
+
+ private Map<String, HighlightField> highlightFields = null;
+
+ private Object[] sortValues = EMPTY_SORT_VALUES;
+
+ private String[] matchedQueries = Strings.EMPTY_ARRAY;
+
+ private Explanation explanation;
+
+ @Nullable
+ private SearchShardTarget shard;
+
+ private Map<String, Object> sourceAsMap;
+ private byte[] sourceAsBytes;
+
+ private InternalSearchHit() {
+
+ }
+
+ public InternalSearchHit(int docId, String id, Text type, Map<String, SearchHitField> fields) {
+ this.docId = docId;
+ this.id = new StringAndBytesText(id);
+ this.type = type;
+ this.fields = fields;
+ }
+
+ public int docId() {
+ return this.docId;
+ }
+
+ public void shardTarget(SearchShardTarget shardTarget) {
+ this.shard = shardTarget;
+ }
+
+ public void score(float score) {
+ this.score = score;
+ }
+
+ @Override
+ public float score() {
+ return this.score;
+ }
+
+ @Override
+ public float getScore() {
+ return score();
+ }
+
+ public void version(long version) {
+ this.version = version;
+ }
+
+ @Override
+ public long version() {
+ return this.version;
+ }
+
+ @Override
+ public long getVersion() {
+ return this.version;
+ }
+
+ @Override
+ public String index() {
+ return shard.index();
+ }
+
+ @Override
+ public String getIndex() {
+ return index();
+ }
+
+ @Override
+ public String id() {
+ return id.string();
+ }
+
+ @Override
+ public String getId() {
+ return id();
+ }
+
+ @Override
+ public String type() {
+ return type.string();
+ }
+
+ @Override
+ public String getType() {
+ return type();
+ }
+
+
+ /**
+ * Returns bytes reference, also un compress the source if needed.
+ */
+ public BytesReference sourceRef() {
+ try {
+ this.source = CompressorFactory.uncompressIfNeeded(this.source);
+ return this.source;
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to decompress source", e);
+ }
+ }
+
+ /**
+ * Sets representation, might be compressed....
+ */
+ public InternalSearchHit sourceRef(BytesReference source) {
+ this.source = source;
+ this.sourceAsBytes = null;
+ this.sourceAsMap = null;
+ return this;
+ }
+
+ @Override
+ public BytesReference getSourceRef() {
+ return sourceRef();
+ }
+
+ /**
+ * Internal source representation, might be compressed....
+ */
+ public BytesReference internalSourceRef() {
+ return source;
+ }
+
+
+ @Override
+ public byte[] source() {
+ if (source == null) {
+ return null;
+ }
+ if (sourceAsBytes != null) {
+ return sourceAsBytes;
+ }
+ this.sourceAsBytes = sourceRef().toBytes();
+ return this.sourceAsBytes;
+ }
+
+ @Override
+ public boolean isSourceEmpty() {
+ return source == null;
+ }
+
+ @Override
+ public Map<String, Object> getSource() {
+ return sourceAsMap();
+ }
+
+ @Override
+ public String sourceAsString() {
+ if (source == null) {
+ return null;
+ }
+ try {
+ return XContentHelper.convertToJson(sourceRef(), false);
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to convert source to a json string");
+ }
+ }
+
+ @Override
+ public String getSourceAsString() {
+ return sourceAsString();
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public Map<String, Object> sourceAsMap() throws ElasticsearchParseException {
+ if (source == null) {
+ return null;
+ }
+ if (sourceAsMap != null) {
+ return sourceAsMap;
+ }
+
+ sourceAsMap = SourceLookup.sourceAsMap(source);
+ return sourceAsMap;
+ }
+
+ @Override
+ public Iterator<SearchHitField> iterator() {
+ return fields.values().iterator();
+ }
+
+ @Override
+ public SearchHitField field(String fieldName) {
+ return fields().get(fieldName);
+ }
+
+ @Override
+ public Map<String, SearchHitField> fields() {
+ if (fields == null) {
+ return ImmutableMap.of();
+ }
+ return fields;
+ }
+
+ // returns the fields without handling null cases
+ public Map<String, SearchHitField> fieldsOrNull() {
+ return this.fields;
+ }
+
+ @Override
+ public Map<String, SearchHitField> getFields() {
+ return fields();
+ }
+
+ public void fields(Map<String, SearchHitField> fields) {
+ this.fields = fields;
+ }
+
+ public Map<String, HighlightField> internalHighlightFields() {
+ return highlightFields;
+ }
+
+ @Override
+ public Map<String, HighlightField> highlightFields() {
+ if (highlightFields == null) {
+ return ImmutableMap.of();
+ }
+ return this.highlightFields;
+ }
+
+ @Override
+ public Map<String, HighlightField> getHighlightFields() {
+ return highlightFields();
+ }
+
+ public void highlightFields(Map<String, HighlightField> highlightFields) {
+ this.highlightFields = highlightFields;
+ }
+
+ public void sortValues(Object[] sortValues) {
+ // LUCENE 4 UPGRADE: There must be a better way
+ // we want to convert to a Text object here, and not BytesRef
+ if (sortValues != null) {
+ for (int i = 0; i < sortValues.length; i++) {
+ if (sortValues[i] instanceof BytesRef) {
+ sortValues[i] = new StringAndBytesText(new BytesArray((BytesRef) sortValues[i]));
+ }
+ }
+ }
+ this.sortValues = sortValues;
+ }
+
+ @Override
+ public Object[] sortValues() {
+ return sortValues;
+ }
+
+ @Override
+ public Object[] getSortValues() {
+ return sortValues();
+ }
+
+ @Override
+ public Explanation explanation() {
+ return explanation;
+ }
+
+ @Override
+ public Explanation getExplanation() {
+ return explanation();
+ }
+
+ public void explanation(Explanation explanation) {
+ this.explanation = explanation;
+ }
+
+ @Override
+ public SearchShardTarget shard() {
+ return shard;
+ }
+
+ @Override
+ public SearchShardTarget getShard() {
+ return shard();
+ }
+
+ public void shard(SearchShardTarget target) {
+ this.shard = target;
+ }
+
+ public void matchedQueries(String[] matchedQueries) {
+ this.matchedQueries = matchedQueries;
+ }
+
+ @Override
+ public String[] matchedQueries() {
+ return this.matchedQueries;
+ }
+
+ @Override
+ public String[] getMatchedQueries() {
+ return this.matchedQueries;
+ }
+
+ public static class Fields {
+ static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
+ static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
+ static final XContentBuilderString _ID = new XContentBuilderString("_id");
+ static final XContentBuilderString _VERSION = new XContentBuilderString("_version");
+ static final XContentBuilderString _SCORE = new XContentBuilderString("_score");
+ static final XContentBuilderString FIELDS = new XContentBuilderString("fields");
+ static final XContentBuilderString HIGHLIGHT = new XContentBuilderString("highlight");
+ static final XContentBuilderString SORT = new XContentBuilderString("sort");
+ static final XContentBuilderString MATCHED_QUERIES = new XContentBuilderString("matched_queries");
+ static final XContentBuilderString _EXPLANATION = new XContentBuilderString("_explanation");
+ static final XContentBuilderString VALUE = new XContentBuilderString("value");
+ static final XContentBuilderString DESCRIPTION = new XContentBuilderString("description");
+ static final XContentBuilderString DETAILS = new XContentBuilderString("details");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (explanation() != null) {
+ builder.field("_shard", shard.shardId());
+ builder.field("_node", shard.nodeIdText());
+ }
+ builder.field(Fields._INDEX, shard.indexText());
+ builder.field(Fields._TYPE, type);
+ builder.field(Fields._ID, id);
+ if (version != -1) {
+ builder.field(Fields._VERSION, version);
+ }
+ if (Float.isNaN(score)) {
+ builder.nullField(Fields._SCORE);
+ } else {
+ builder.field(Fields._SCORE, score);
+ }
+ if (source != null) {
+ RestXContentBuilder.restDocumentSource(source, builder, params);
+ }
+ if (fields != null && !fields.isEmpty()) {
+ builder.startObject(Fields.FIELDS);
+ for (SearchHitField field : fields.values()) {
+ if (field.values().isEmpty()) {
+ continue;
+ }
+ String fieldName = field.getName();
+ if (field.isMetadataField()) {
+ builder.field(fieldName, field.value());
+ } else {
+ builder.startArray(fieldName);
+ for (Object value : field.getValues()) {
+ builder.value(value);
+ }
+ builder.endArray();
+ }
+ }
+ builder.endObject();
+ }
+ if (highlightFields != null && !highlightFields.isEmpty()) {
+ builder.startObject(Fields.HIGHLIGHT);
+ for (HighlightField field : highlightFields.values()) {
+ builder.field(field.name());
+ if (field.fragments() == null) {
+ builder.nullValue();
+ } else {
+ builder.startArray();
+ for (Text fragment : field.fragments()) {
+ builder.value(fragment);
+ }
+ builder.endArray();
+ }
+ }
+ builder.endObject();
+ }
+ if (sortValues != null && sortValues.length > 0) {
+ builder.startArray(Fields.SORT);
+ for (Object sortValue : sortValues) {
+ if (sortValue != null && sortValue.equals(MAX_TERM_AS_TEXT)) {
+ // We don't display MAX_TERM in JSON responses in case some clients have UTF-8 parsers that wouldn't accept a
+ // non-character in the response, even though this is valid UTF-8
+ builder.nullValue();
+ } else {
+ builder.value(sortValue);
+ }
+ }
+ builder.endArray();
+ }
+ if (matchedQueries.length > 0) {
+ builder.startArray(Fields.MATCHED_QUERIES);
+ for (String matchedFilter : matchedQueries) {
+ builder.value(matchedFilter);
+ }
+ builder.endArray();
+ }
+ if (explanation() != null) {
+ builder.field(Fields._EXPLANATION);
+ buildExplanation(builder, explanation());
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ private void buildExplanation(XContentBuilder builder, Explanation explanation) throws IOException {
+ builder.startObject();
+ builder.field(Fields.VALUE, explanation.getValue());
+ builder.field(Fields.DESCRIPTION, explanation.getDescription());
+ Explanation[] innerExps = explanation.getDetails();
+ if (innerExps != null) {
+ builder.startArray(Fields.DETAILS);
+ for (Explanation exp : innerExps) {
+ buildExplanation(builder, exp);
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+ }
+
+ public static InternalSearchHit readSearchHit(StreamInput in, InternalSearchHits.StreamContext context) throws IOException {
+ InternalSearchHit hit = new InternalSearchHit();
+ hit.readFrom(in, context);
+ return hit;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ readFrom(in, InternalSearchHits.streamContext().streamShardTarget(InternalSearchHits.StreamContext.ShardTargetType.STREAM));
+ }
+
+ public void readFrom(StreamInput in, InternalSearchHits.StreamContext context) throws IOException {
+ score = in.readFloat();
+ id = in.readText();
+ type = in.readSharedText();
+ version = in.readLong();
+ source = in.readBytesReference();
+ if (source.length() == 0) {
+ source = null;
+ }
+ if (in.readBoolean()) {
+ explanation = readExplanation(in);
+ }
+ int size = in.readVInt();
+ if (size == 0) {
+ fields = ImmutableMap.of();
+ } else if (size == 1) {
+ SearchHitField hitField = readSearchHitField(in);
+ fields = ImmutableMap.of(hitField.name(), hitField);
+ } else if (size == 2) {
+ SearchHitField hitField1 = readSearchHitField(in);
+ SearchHitField hitField2 = readSearchHitField(in);
+ fields = ImmutableMap.of(hitField1.name(), hitField1, hitField2.name(), hitField2);
+ } else if (size == 3) {
+ SearchHitField hitField1 = readSearchHitField(in);
+ SearchHitField hitField2 = readSearchHitField(in);
+ SearchHitField hitField3 = readSearchHitField(in);
+ fields = ImmutableMap.of(hitField1.name(), hitField1, hitField2.name(), hitField2, hitField3.name(), hitField3);
+ } else if (size == 4) {
+ SearchHitField hitField1 = readSearchHitField(in);
+ SearchHitField hitField2 = readSearchHitField(in);
+ SearchHitField hitField3 = readSearchHitField(in);
+ SearchHitField hitField4 = readSearchHitField(in);
+ fields = ImmutableMap.of(hitField1.name(), hitField1, hitField2.name(), hitField2, hitField3.name(), hitField3, hitField4.name(), hitField4);
+ } else if (size == 5) {
+ SearchHitField hitField1 = readSearchHitField(in);
+ SearchHitField hitField2 = readSearchHitField(in);
+ SearchHitField hitField3 = readSearchHitField(in);
+ SearchHitField hitField4 = readSearchHitField(in);
+ SearchHitField hitField5 = readSearchHitField(in);
+ fields = ImmutableMap.of(hitField1.name(), hitField1, hitField2.name(), hitField2, hitField3.name(), hitField3, hitField4.name(), hitField4, hitField5.name(), hitField5);
+ } else {
+ ImmutableMap.Builder<String, SearchHitField> builder = ImmutableMap.builder();
+ for (int i = 0; i < size; i++) {
+ SearchHitField hitField = readSearchHitField(in);
+ builder.put(hitField.name(), hitField);
+ }
+ fields = builder.build();
+ }
+
+ size = in.readVInt();
+ if (size == 0) {
+ highlightFields = ImmutableMap.of();
+ } else if (size == 1) {
+ HighlightField field = readHighlightField(in);
+ highlightFields = ImmutableMap.of(field.name(), field);
+ } else if (size == 2) {
+ HighlightField field1 = readHighlightField(in);
+ HighlightField field2 = readHighlightField(in);
+ highlightFields = ImmutableMap.of(field1.name(), field1, field2.name(), field2);
+ } else if (size == 3) {
+ HighlightField field1 = readHighlightField(in);
+ HighlightField field2 = readHighlightField(in);
+ HighlightField field3 = readHighlightField(in);
+ highlightFields = ImmutableMap.of(field1.name(), field1, field2.name(), field2, field3.name(), field3);
+ } else if (size == 4) {
+ HighlightField field1 = readHighlightField(in);
+ HighlightField field2 = readHighlightField(in);
+ HighlightField field3 = readHighlightField(in);
+ HighlightField field4 = readHighlightField(in);
+ highlightFields = ImmutableMap.of(field1.name(), field1, field2.name(), field2, field3.name(), field3, field4.name(), field4);
+ } else {
+ ImmutableMap.Builder<String, HighlightField> builder = ImmutableMap.builder();
+ for (int i = 0; i < size; i++) {
+ HighlightField field = readHighlightField(in);
+ builder.put(field.name(), field);
+ }
+ highlightFields = builder.build();
+ }
+
+ size = in.readVInt();
+ if (size > 0) {
+ sortValues = new Object[size];
+ for (int i = 0; i < sortValues.length; i++) {
+ byte type = in.readByte();
+ if (type == 0) {
+ sortValues[i] = null;
+ } else if (type == 1) {
+ sortValues[i] = in.readString();
+ } else if (type == 2) {
+ sortValues[i] = in.readInt();
+ } else if (type == 3) {
+ sortValues[i] = in.readLong();
+ } else if (type == 4) {
+ sortValues[i] = in.readFloat();
+ } else if (type == 5) {
+ sortValues[i] = in.readDouble();
+ } else if (type == 6) {
+ sortValues[i] = in.readByte();
+ } else if (type == 7) {
+ sortValues[i] = in.readShort();
+ } else if (type == 8) {
+ sortValues[i] = in.readBoolean();
+ } else if (type == 9) {
+ sortValues[i] = in.readText();
+ } else {
+ throw new IOException("Can't match type [" + type + "]");
+ }
+ }
+ }
+
+ size = in.readVInt();
+ if (size > 0) {
+ matchedQueries = new String[size];
+ for (int i = 0; i < size; i++) {
+ matchedQueries[i] = in.readString();
+ }
+ }
+
+ if (context.streamShardTarget() == InternalSearchHits.StreamContext.ShardTargetType.STREAM) {
+ if (in.readBoolean()) {
+ shard = readSearchShardTarget(in);
+ }
+ } else if (context.streamShardTarget() == InternalSearchHits.StreamContext.ShardTargetType.LOOKUP) {
+ int lookupId = in.readVInt();
+ if (lookupId > 0) {
+ shard = context.handleShardLookup().get(lookupId);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ writeTo(out, InternalSearchHits.streamContext().streamShardTarget(InternalSearchHits.StreamContext.ShardTargetType.STREAM));
+ }
+
+ public void writeTo(StreamOutput out, InternalSearchHits.StreamContext context) throws IOException {
+ out.writeFloat(score);
+ out.writeText(id);
+ out.writeSharedText(type);
+ out.writeLong(version);
+ out.writeBytesReference(source);
+ if (explanation == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ writeExplanation(out, explanation);
+ }
+ if (fields == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(fields.size());
+ for (SearchHitField hitField : fields().values()) {
+ hitField.writeTo(out);
+ }
+ }
+ if (highlightFields == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(highlightFields.size());
+ for (HighlightField highlightField : highlightFields.values()) {
+ highlightField.writeTo(out);
+ }
+ }
+
+ if (sortValues.length == 0) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(sortValues.length);
+ for (Object sortValue : sortValues) {
+ if (sortValue == null) {
+ out.writeByte((byte) 0);
+ } else {
+ Class type = sortValue.getClass();
+ if (type == String.class) {
+ out.writeByte((byte) 1);
+ out.writeString((String) sortValue);
+ } else if (type == Integer.class) {
+ out.writeByte((byte) 2);
+ out.writeInt((Integer) sortValue);
+ } else if (type == Long.class) {
+ out.writeByte((byte) 3);
+ out.writeLong((Long) sortValue);
+ } else if (type == Float.class) {
+ out.writeByte((byte) 4);
+ out.writeFloat((Float) sortValue);
+ } else if (type == Double.class) {
+ out.writeByte((byte) 5);
+ out.writeDouble((Double) sortValue);
+ } else if (type == Byte.class) {
+ out.writeByte((byte) 6);
+ out.writeByte((Byte) sortValue);
+ } else if (type == Short.class) {
+ out.writeByte((byte) 7);
+ out.writeShort((Short) sortValue);
+ } else if (type == Boolean.class) {
+ out.writeByte((byte) 8);
+ out.writeBoolean((Boolean) sortValue);
+ } else if (sortValue instanceof Text) {
+ out.writeByte((byte) 9);
+ out.writeText((Text) sortValue);
+ } else {
+ throw new IOException("Can't handle sort field value of type [" + type + "]");
+ }
+ }
+ }
+ }
+
+ if (matchedQueries.length == 0) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(matchedQueries.length);
+ for (String matchedFilter : matchedQueries) {
+ out.writeString(matchedFilter);
+ }
+ }
+
+ if (context.streamShardTarget() == InternalSearchHits.StreamContext.ShardTargetType.STREAM) {
+ if (shard == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ shard.writeTo(out);
+ }
+ } else if (context.streamShardTarget() == InternalSearchHits.StreamContext.ShardTargetType.LOOKUP) {
+ if (shard == null) {
+ out.writeVInt(0);
+ } else {
+ out.writeVInt(context.shardHandleLookup().get(shard));
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java b/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java
new file mode 100644
index 0000000..7f8fcdd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.internal;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.search.SearchHitField;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ *
+ */
+public class InternalSearchHitField implements SearchHitField {
+
+ private String name;
+ private List<Object> values;
+
+ private InternalSearchHitField() {
+ }
+
+ public InternalSearchHitField(String name, List<Object> values) {
+ this.name = name;
+ this.values = values;
+ }
+
+ public String name() {
+ return name;
+ }
+
+ @Override
+ public String getName() {
+ return name();
+ }
+
+ @Override
+ public Object value() {
+ if (values == null || values.isEmpty()) {
+ return null;
+ }
+ return values.get(0);
+ }
+
+ @Override
+ public Object getValue() {
+ return value();
+ }
+
+ public List<Object> values() {
+ return values;
+ }
+
+ @Override
+ public List<Object> getValues() {
+ return values();
+ }
+
+ @Override
+ public boolean isMetadataField() {
+ return MapperService.isMetadataField(name);
+ }
+
+ @Override
+ public Iterator<Object> iterator() {
+ return values.iterator();
+ }
+
+ public static InternalSearchHitField readSearchHitField(StreamInput in) throws IOException {
+ InternalSearchHitField result = new InternalSearchHitField();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readSharedString();
+ int size = in.readVInt();
+ values = new ArrayList<Object>(size);
+ for (int i = 0; i < size; i++) {
+ values.add(in.readGenericValue());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeSharedString(name);
+ out.writeVInt(values.size());
+ for (Object value : values) {
+ out.writeGenericValue(value);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java b/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java
new file mode 100644
index 0000000..7073305
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.internal;
+
+import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import com.google.common.collect.Iterators;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.SearchShardTarget;
+
+import java.io.IOException;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
+import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit;
+
+/**
+ *
+ */
+public class InternalSearchHits implements SearchHits {
+
+ public static class StreamContext {
+
+ public static enum ShardTargetType {
+ STREAM,
+ LOOKUP,
+ NO_STREAM
+ }
+
+ private IdentityHashMap<SearchShardTarget, Integer> shardHandleLookup = new IdentityHashMap<SearchShardTarget, Integer>();
+ private IntObjectOpenHashMap<SearchShardTarget> handleShardLookup = new IntObjectOpenHashMap<SearchShardTarget>();
+ private ShardTargetType streamShardTarget = ShardTargetType.STREAM;
+
+ public StreamContext reset() {
+ shardHandleLookup.clear();
+ handleShardLookup.clear();
+ streamShardTarget = ShardTargetType.STREAM;
+ return this;
+ }
+
+ public IdentityHashMap<SearchShardTarget, Integer> shardHandleLookup() {
+ return shardHandleLookup;
+ }
+
+ public IntObjectOpenHashMap<SearchShardTarget> handleShardLookup() {
+ return handleShardLookup;
+ }
+
+ public ShardTargetType streamShardTarget() {
+ return streamShardTarget;
+ }
+
+ public StreamContext streamShardTarget(ShardTargetType streamShardTarget) {
+ this.streamShardTarget = streamShardTarget;
+ return this;
+ }
+ }
+
+ private static final ThreadLocal<StreamContext> cache = new ThreadLocal<StreamContext>() {
+ @Override
+ protected StreamContext initialValue() {
+ return new StreamContext();
+ }
+ };
+
+ public static StreamContext streamContext() {
+ return cache.get().reset();
+ }
+
+
+ public static final InternalSearchHit[] EMPTY = new InternalSearchHit[0];
+
+ private InternalSearchHit[] hits;
+
+ public long totalHits;
+
+ private float maxScore;
+
+ InternalSearchHits() {
+
+ }
+
+ public InternalSearchHits(InternalSearchHit[] hits, long totalHits, float maxScore) {
+ this.hits = hits;
+ this.totalHits = totalHits;
+ this.maxScore = maxScore;
+ }
+
+ public void shardTarget(SearchShardTarget shardTarget) {
+ for (InternalSearchHit hit : hits) {
+ hit.shardTarget(shardTarget);
+ }
+ }
+
+ public long totalHits() {
+ return totalHits;
+ }
+
+ @Override
+ public long getTotalHits() {
+ return totalHits();
+ }
+
+ @Override
+ public float maxScore() {
+ return this.maxScore;
+ }
+
+ @Override
+ public float getMaxScore() {
+ return maxScore();
+ }
+
+ public SearchHit[] hits() {
+ return this.hits;
+ }
+
+ @Override
+ public SearchHit getAt(int position) {
+ return hits[position];
+ }
+
+ @Override
+ public SearchHit[] getHits() {
+ return hits();
+ }
+
+ @Override
+ public Iterator<SearchHit> iterator() {
+ return Iterators.forArray(hits());
+ }
+
+ public InternalSearchHit[] internalHits() {
+ return this.hits;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString HITS = new XContentBuilderString("hits");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString MAX_SCORE = new XContentBuilderString("max_score");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.HITS);
+ builder.field(Fields.TOTAL, totalHits);
+ if (Float.isNaN(maxScore)) {
+ builder.nullField(Fields.MAX_SCORE);
+ } else {
+ builder.field(Fields.MAX_SCORE, maxScore);
+ }
+ builder.field(Fields.HITS);
+ builder.startArray();
+ for (SearchHit hit : hits) {
+ hit.toXContent(builder, params);
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ public static InternalSearchHits readSearchHits(StreamInput in, StreamContext context) throws IOException {
+ InternalSearchHits hits = new InternalSearchHits();
+ hits.readFrom(in, context);
+ return hits;
+ }
+
+ public static InternalSearchHits readSearchHits(StreamInput in) throws IOException {
+ InternalSearchHits hits = new InternalSearchHits();
+ hits.readFrom(in);
+ return hits;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ readFrom(in, streamContext().streamShardTarget(StreamContext.ShardTargetType.LOOKUP));
+ }
+
+ public void readFrom(StreamInput in, StreamContext context) throws IOException {
+ totalHits = in.readVLong();
+ maxScore = in.readFloat();
+ int size = in.readVInt();
+ if (size == 0) {
+ hits = EMPTY;
+ } else {
+ if (context.streamShardTarget() == StreamContext.ShardTargetType.LOOKUP) {
+ // read the lookup table first
+ int lookupSize = in.readVInt();
+ for (int i = 0; i < lookupSize; i++) {
+ context.handleShardLookup().put(in.readVInt(), readSearchShardTarget(in));
+ }
+ }
+
+ hits = new InternalSearchHit[size];
+ for (int i = 0; i < hits.length; i++) {
+ hits[i] = readSearchHit(in, context);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ writeTo(out, streamContext().streamShardTarget(StreamContext.ShardTargetType.LOOKUP));
+ }
+
+ public void writeTo(StreamOutput out, StreamContext context) throws IOException {
+ out.writeVLong(totalHits);
+ out.writeFloat(maxScore);
+ out.writeVInt(hits.length);
+ if (hits.length > 0) {
+ if (context.streamShardTarget() == StreamContext.ShardTargetType.LOOKUP) {
+ // start from 1, 0 is for null!
+ int counter = 1;
+ for (InternalSearchHit hit : hits) {
+ if (hit.shard() != null) {
+ Integer handle = context.shardHandleLookup().get(hit.shard());
+ if (handle == null) {
+ context.shardHandleLookup().put(hit.shard(), counter++);
+ }
+ }
+ }
+ out.writeVInt(context.shardHandleLookup().size());
+ if (!context.shardHandleLookup().isEmpty()) {
+ for (Map.Entry<SearchShardTarget, Integer> entry : context.shardHandleLookup().entrySet()) {
+ out.writeVInt(entry.getValue());
+ entry.getKey().writeTo(out);
+ }
+ }
+ }
+
+ for (InternalSearchHit hit : hits) {
+ hit.writeTo(out, context);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
new file mode 100644
index 0000000..c48ad7c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.internal;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.facet.Facets;
+import org.elasticsearch.search.facet.InternalFacets;
+import org.elasticsearch.search.suggest.Suggest;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits;
+
+/**
+ *
+ */
+public class InternalSearchResponse implements Streamable, ToXContent {
+
+ public static InternalSearchResponse empty() {
+ return new InternalSearchResponse(new InternalSearchHits(new InternalSearchHit[0], 0, 0), null, null, null, false);
+ }
+
+ private InternalSearchHits hits;
+
+ private InternalFacets facets;
+
+ private InternalAggregations aggregations;
+
+ private Suggest suggest;
+
+ private boolean timedOut;
+
+ private InternalSearchResponse() {
+ }
+
+ public InternalSearchResponse(InternalSearchHits hits, InternalFacets facets, InternalAggregations aggregations, Suggest suggest, boolean timedOut) {
+ this.hits = hits;
+ this.facets = facets;
+ this.aggregations = aggregations;
+ this.suggest = suggest;
+ this.timedOut = timedOut;
+ }
+
+ public boolean timedOut() {
+ return this.timedOut;
+ }
+
+ public SearchHits hits() {
+ return hits;
+ }
+
+ public Facets facets() {
+ return facets;
+ }
+
+ public Aggregations aggregations() {
+ return aggregations;
+ }
+
+ public Suggest suggest() {
+ return suggest;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ hits.toXContent(builder, params);
+ if (facets != null) {
+ facets.toXContent(builder, params);
+ }
+ if (aggregations != null) {
+ aggregations.toXContent(builder, params);
+ }
+ if (suggest != null) {
+ suggest.toXContent(builder, params);
+ }
+ return builder;
+ }
+
+ public static InternalSearchResponse readInternalSearchResponse(StreamInput in) throws IOException {
+ InternalSearchResponse response = new InternalSearchResponse();
+ response.readFrom(in);
+ return response;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ hits = readSearchHits(in);
+ if (in.readBoolean()) {
+ facets = InternalFacets.readFacets(in);
+ }
+ if (in.readBoolean()) {
+ aggregations = InternalAggregations.readAggregations(in);
+ }
+ if (in.readBoolean()) {
+ suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in);
+ }
+ timedOut = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ hits.writeTo(out);
+ if (facets == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ facets.writeTo(out);
+ }
+ if (aggregations == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ aggregations.writeTo(out);
+ }
+ if (suggest == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ suggest.writeTo(out);
+ }
+ out.writeBoolean(timedOut);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/src/main/java/org/elasticsearch/search/internal/SearchContext.java
new file mode 100644
index 0000000..6ec1cfb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/SearchContext.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.internal;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.docset.DocSetCache;
+import org.elasticsearch.index.cache.filter.FilterCache;
+import org.elasticsearch.index.cache.id.IdCache;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.SearchContextAggregations;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.facet.SearchContextFacets;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
+import org.elasticsearch.search.fetch.partial.PartialFieldsContext;
+import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.highlight.SearchContextHighlight;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.scan.ScanContext;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+
+import java.util.List;
+
+/**
+ */
+public abstract class SearchContext implements Releasable {
+
+ private static ThreadLocal<SearchContext> current = new ThreadLocal<SearchContext>();
+
+ public static void setCurrent(SearchContext value) {
+ current.set(value);
+ QueryParseContext.setTypes(value.types());
+ }
+
+ public static void removeCurrent() {
+ current.remove();
+ QueryParseContext.removeTypes();
+ }
+
+ public static SearchContext current() {
+ return current.get();
+ }
+
+ public abstract boolean clearAndRelease();
+
+ /**
+ * Should be called before executing the main query and after all other parameters have been set.
+ */
+ public abstract void preProcess();
+
+ public abstract Filter searchFilter(String[] types);
+
+ public abstract long id();
+
+ public abstract String source();
+
+ public abstract ShardSearchRequest request();
+
+ public abstract SearchType searchType();
+
+ public abstract SearchContext searchType(SearchType searchType);
+
+ public abstract SearchShardTarget shardTarget();
+
+ public abstract int numberOfShards();
+
+ public abstract boolean hasTypes();
+
+ public abstract String[] types();
+
+ public abstract float queryBoost();
+
+ public abstract SearchContext queryBoost(float queryBoost);
+
+ public abstract long nowInMillis();
+
+ public abstract Scroll scroll();
+
+ public abstract SearchContext scroll(Scroll scroll);
+
+ public abstract SearchContextAggregations aggregations();
+
+ public abstract SearchContext aggregations(SearchContextAggregations aggregations);
+
+ public abstract SearchContextFacets facets();
+
+ public abstract SearchContext facets(SearchContextFacets facets);
+
+ public abstract SearchContextHighlight highlight();
+
+ public abstract void highlight(SearchContextHighlight highlight);
+
+ public abstract SuggestionSearchContext suggest();
+
+ public abstract void suggest(SuggestionSearchContext suggest);
+
+ /**
+ * @return the rescore context or null if rescoring wasn't specified or isn't supported
+ */
+ public abstract RescoreSearchContext rescore();
+
+ public abstract void rescore(RescoreSearchContext rescore);
+
+ public abstract boolean hasFieldDataFields();
+
+ public abstract FieldDataFieldsContext fieldDataFields();
+
+ public abstract boolean hasScriptFields();
+
+ public abstract ScriptFieldsContext scriptFields();
+
+ public abstract boolean hasPartialFields();
+
+ public abstract PartialFieldsContext partialFields();
+
+ /**
+ * A shortcut function to see whether there is a fetchSourceContext and it says the source is requested.
+ *
+ * @return
+ */
+ public abstract boolean sourceRequested();
+
+ public abstract boolean hasFetchSourceContext();
+
+ public abstract FetchSourceContext fetchSourceContext();
+
+ public abstract SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext);
+
+ public abstract ContextIndexSearcher searcher();
+
+ public abstract IndexShard indexShard();
+
+ public abstract MapperService mapperService();
+
+ public abstract AnalysisService analysisService();
+
+ public abstract IndexQueryParserService queryParserService();
+
+ public abstract SimilarityService similarityService();
+
+ public abstract ScriptService scriptService();
+
+ public abstract CacheRecycler cacheRecycler();
+
+ public abstract PageCacheRecycler pageCacheRecycler();
+
+ public abstract FilterCache filterCache();
+
+ public abstract DocSetCache docSetCache();
+
+ public abstract IndexFieldDataService fieldData();
+
+ public abstract IdCache idCache();
+
+ public abstract long timeoutInMillis();
+
+ public abstract void timeoutInMillis(long timeoutInMillis);
+
+ public abstract SearchContext minimumScore(float minimumScore);
+
+ public abstract Float minimumScore();
+
+ public abstract SearchContext sort(Sort sort);
+
+ public abstract Sort sort();
+
+ public abstract SearchContext trackScores(boolean trackScores);
+
+ public abstract boolean trackScores();
+
+ public abstract SearchContext parsedPostFilter(ParsedFilter postFilter);
+
+ public abstract ParsedFilter parsedPostFilter();
+
+ public abstract Filter aliasFilter();
+
+ public abstract SearchContext parsedQuery(ParsedQuery query);
+
+ public abstract ParsedQuery parsedQuery();
+
+ /**
+ * The query to execute, might be rewritten.
+ */
+ public abstract Query query();
+
+ /**
+ * Has the query been rewritten already?
+ */
+ public abstract boolean queryRewritten();
+
+ /**
+ * Rewrites the query and updates it. Only happens once.
+ */
+ public abstract SearchContext updateRewriteQuery(Query rewriteQuery);
+
+ public abstract int from();
+
+ public abstract SearchContext from(int from);
+
+ public abstract int size();
+
+ public abstract SearchContext size(int size);
+
+ public abstract boolean hasFieldNames();
+
+ public abstract List<String> fieldNames();
+
+ public abstract void emptyFieldNames();
+
+ public abstract boolean explain();
+
+ public abstract void explain(boolean explain);
+
+ @Nullable
+ public abstract List<String> groupStats();
+
+ public abstract void groupStats(List<String> groupStats);
+
+ public abstract boolean version();
+
+ public abstract void version(boolean version);
+
+ public abstract int[] docIdsToLoad();
+
+ public abstract int docIdsToLoadFrom();
+
+ public abstract int docIdsToLoadSize();
+
+ public abstract SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize);
+
+ public abstract void accessed(long accessTime);
+
+ public abstract long lastAccessTime();
+
+ public abstract long keepAlive();
+
+ public abstract void keepAlive(long keepAlive);
+
+ public abstract SearchLookup lookup();
+
+ public abstract DfsSearchResult dfsResult();
+
+ public abstract QuerySearchResult queryResult();
+
+ public abstract FetchSearchResult fetchResult();
+
+ public abstract void addReleasable(Releasable releasable);
+
+ public abstract void clearReleasables();
+
+ public abstract ScanContext scanContext();
+
+ public abstract MapperService.SmartNameFieldMappers smartFieldMappers(String name);
+
+ public abstract FieldMappers smartNameFieldMappers(String name);
+
+ public abstract FieldMapper smartNameFieldMapper(String name);
+
+ public abstract MapperService.SmartNameObjectMapper smartNameObjectMapper(String name);
+
+}
diff --git a/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java
new file mode 100644
index 0000000..d95f424
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.internal;
+
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.Scroll.readScroll;
+
+/**
+ * Source structure:
+ * <p/>
+ * <pre>
+ * {
+ * from : 0, size : 20, (optional, can be set on the request)
+ * sort : { "name.first" : {}, "name.last" : { reverse : true } }
+ * fields : [ "name.first", "name.last" ]
+ * query : { ... }
+ * facets : {
+ * "facet1" : {
+ * query : { ... }
+ * }
+ * }
+ * }
+ * </pre>
+ */
+public class ShardSearchRequest extends TransportRequest {
+
+ private String index;
+
+ private int shardId;
+
+ private int numberOfShards;
+
+ private SearchType searchType;
+
+ private Scroll scroll;
+
+ private String[] types = Strings.EMPTY_ARRAY;
+
+ private String[] filteringAliases;
+
+ private BytesReference source;
+ private BytesReference extraSource;
+
+ private long nowInMillis;
+
+ public ShardSearchRequest() {
+ }
+
+ public ShardSearchRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards) {
+ super(searchRequest);
+ this.index = shardRouting.index();
+ this.shardId = shardRouting.id();
+ this.numberOfShards = numberOfShards;
+ this.searchType = searchRequest.searchType();
+ this.source = searchRequest.source();
+ this.extraSource = searchRequest.extraSource();
+ this.scroll = searchRequest.scroll();
+ this.types = searchRequest.types();
+
+ }
+
+ public ShardSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchType searchType) {
+ this(shardRouting.index(), shardRouting.id(), numberOfShards, searchType);
+ }
+
+ public ShardSearchRequest(String index, int shardId, int numberOfShards, SearchType searchType) {
+ this.index = index;
+ this.shardId = shardId;
+ this.numberOfShards = numberOfShards;
+ this.searchType = searchType;
+ }
+
+ public String index() {
+ return index;
+ }
+
+ public int shardId() {
+ return shardId;
+ }
+
+ public SearchType searchType() {
+ return this.searchType;
+ }
+
+ public int numberOfShards() {
+ return numberOfShards;
+ }
+
+ public BytesReference source() {
+ return this.source;
+ }
+
+ public BytesReference extraSource() {
+ return this.extraSource;
+ }
+
+ public ShardSearchRequest source(BytesReference source) {
+ this.source = source;
+ return this;
+ }
+
+ public ShardSearchRequest extraSource(BytesReference extraSource) {
+ this.extraSource = extraSource;
+ return this;
+ }
+
+ public ShardSearchRequest nowInMillis(long nowInMillis) {
+ this.nowInMillis = nowInMillis;
+ return this;
+ }
+
+ public long nowInMillis() {
+ return this.nowInMillis;
+ }
+
+ public Scroll scroll() {
+ return scroll;
+ }
+
+ public ShardSearchRequest scroll(Scroll scroll) {
+ this.scroll = scroll;
+ return this;
+ }
+
+ public String[] filteringAliases() {
+ return filteringAliases;
+ }
+
+ public ShardSearchRequest filteringAliases(String[] filteringAliases) {
+ this.filteringAliases = filteringAliases;
+ return this;
+ }
+
+ public String[] types() {
+ return types;
+ }
+
+ public ShardSearchRequest types(String[] types) {
+ this.types = types;
+ return this;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ index = in.readString();
+ shardId = in.readVInt();
+ searchType = SearchType.fromId(in.readByte());
+ numberOfShards = in.readVInt();
+ if (in.readBoolean()) {
+ scroll = readScroll(in);
+ }
+
+ source = in.readBytesReference();
+ extraSource = in.readBytesReference();
+
+ types = in.readStringArray();
+ filteringAliases = in.readStringArray();
+ nowInMillis = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(index);
+ out.writeVInt(shardId);
+ out.writeByte(searchType.id());
+ out.writeVInt(numberOfShards);
+ if (scroll == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ scroll.writeTo(out);
+ }
+ out.writeBytesReference(source);
+ out.writeBytesReference(extraSource);
+ out.writeStringArray(types);
+ out.writeStringArrayNullable(filteringAliases);
+ out.writeVLong(nowInMillis);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/CachedPositionIterator.java b/src/main/java/org/elasticsearch/search/lookup/CachedPositionIterator.java
new file mode 100644
index 0000000..1dd21cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/CachedPositionIterator.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.lookup;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IntsRef;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/*
+ * Can iterate over the positions of a term an arbotrary number of times.
+ * */
+public class CachedPositionIterator extends PositionIterator {
+
+ public CachedPositionIterator(IndexFieldTerm indexFieldTerm) {
+ super(indexFieldTerm);
+ }
+
+ // all payloads of the term in the current document in one bytes array.
+ // payloadStarts and payloadLength mark the start and end of one payload.
+ final BytesRef payloads = new BytesRef();
+
+ final IntsRef payloadsLengths = new IntsRef(0);
+
+ final IntsRef payloadsStarts = new IntsRef(0);
+
+ final IntsRef positions = new IntsRef(0);
+
+ final IntsRef startOffsets = new IntsRef(0);
+
+ final IntsRef endOffsets = new IntsRef(0);
+
+ @Override
+ public Iterator<TermPosition> reset() {
+ return new Iterator<TermPosition>() {
+ private int pos = 0;
+ private final TermPosition termPosition = new TermPosition();
+
+ @Override
+ public boolean hasNext() {
+ return pos < freq;
+ }
+
+ @Override
+ public TermPosition next() {
+ termPosition.position = positions.ints[pos];
+ termPosition.startOffset = startOffsets.ints[pos];
+ termPosition.endOffset = endOffsets.ints[pos];
+ termPosition.payload = payloads;
+ payloads.offset = payloadsStarts.ints[pos];
+ payloads.length = payloadsLengths.ints[pos];
+ pos++;
+ return termPosition;
+ }
+
+ @Override
+ public void remove() {
+ }
+ };
+ }
+
+
+ private void record() throws IOException {
+ TermPosition termPosition;
+ for (int i = 0; i < freq; i++) {
+ termPosition = super.next();
+ positions.ints[i] = termPosition.position;
+ addPayload(i, termPosition.payload);
+ startOffsets.ints[i] = termPosition.startOffset;
+ endOffsets.ints[i] = termPosition.endOffset;
+ }
+ }
+ private void ensureSize(int freq) {
+ if (freq == 0) {
+ return;
+ }
+ if (startOffsets.ints.length < freq) {
+ startOffsets.grow(freq);
+ endOffsets.grow(freq);
+ positions.grow(freq);
+ payloadsLengths.grow(freq);
+ payloadsStarts.grow(freq);
+ }
+ payloads.offset = 0;
+ payloadsLengths.offset = 0;
+ payloadsStarts.offset = 0;
+ payloads.grow(freq * 8);// this is just a guess....
+
+ }
+
+ private void addPayload(int i, BytesRef currPayload) {
+ if (currPayload != null) {
+ payloadsLengths.ints[i] = currPayload.length;
+ payloadsStarts.ints[i] = i == 0 ? 0 : payloadsStarts.ints[i - 1] + payloadsLengths.ints[i - 1];
+ if (payloads.bytes.length < payloadsStarts.ints[i] + payloadsLengths.ints[i]) {
+ payloads.offset = 0; // the offset serves no purpose here. but
+ // we must assure that it is 0 before
+ // grow() is called
+ payloads.grow(payloads.bytes.length * 2); // just a guess
+ }
+ System.arraycopy(currPayload.bytes, currPayload.offset, payloads.bytes, payloadsStarts.ints[i], currPayload.length);
+ } else {
+ payloadsLengths.ints[i] = 0;
+ payloadsStarts.ints[i] = i == 0 ? 0 : payloadsStarts.ints[i - 1] + payloadsLengths.ints[i - 1];
+ }
+ }
+
+
+ @Override
+ public void nextDoc() throws IOException {
+ super.nextDoc();
+ ensureSize(freq);
+ record();
+ }
+
+ @Override
+ public TermPosition next() {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/DocLookup.java b/src/main/java/org/elasticsearch/search/lookup/DocLookup.java
new file mode 100644
index 0000000..94fe9cb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/DocLookup.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.lookup;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public class DocLookup implements Map {
+
+ private final Map<String, ScriptDocValues> localCacheFieldData = Maps.newHashMapWithExpectedSize(4);
+
+ private final MapperService mapperService;
+ private final IndexFieldDataService fieldDataService;
+
+ @Nullable
+ private final String[] types;
+
+ private AtomicReaderContext reader;
+
+ private Scorer scorer;
+
+ private int docId = -1;
+
+ DocLookup(MapperService mapperService, IndexFieldDataService fieldDataService, @Nullable String[] types) {
+ this.mapperService = mapperService;
+ this.fieldDataService = fieldDataService;
+ this.types = types;
+ }
+
+ public MapperService mapperService() {
+ return this.mapperService;
+ }
+
+ public void setNextReader(AtomicReaderContext context) {
+ if (this.reader == context) { // if we are called with the same reader, don't invalidate source
+ return;
+ }
+ this.reader = context;
+ this.docId = -1;
+ localCacheFieldData.clear();
+ }
+
+ public void setScorer(Scorer scorer) {
+ this.scorer = scorer;
+ }
+
+ public void setNextDocId(int docId) {
+ this.docId = docId;
+ }
+
+ public float score() throws IOException {
+ return scorer.score();
+ }
+
+ public float getScore() throws IOException {
+ return scorer.score();
+ }
+
+ @Override
+ public Object get(Object key) {
+ // assume its a string...
+ String fieldName = key.toString();
+ ScriptDocValues scriptValues = localCacheFieldData.get(fieldName);
+ if (scriptValues == null) {
+ FieldMapper mapper = mapperService.smartNameFieldMapper(fieldName, types);
+ if (mapper == null) {
+ throw new ElasticsearchIllegalArgumentException("No field found for [" + fieldName + "] in mapping with types " + Arrays.toString(types) + "");
+ }
+ scriptValues = fieldDataService.getForField(mapper).load(reader).getScriptValues();
+ localCacheFieldData.put(fieldName, scriptValues);
+ }
+ scriptValues.setNextDocId(docId);
+ return scriptValues;
+ }
+
+ public boolean containsKey(Object key) {
+ // assume its a string...
+ String fieldName = key.toString();
+ ScriptDocValues scriptValues = localCacheFieldData.get(fieldName);
+ if (scriptValues == null) {
+ FieldMapper mapper = mapperService.smartNameFieldMapper(fieldName, types);
+ if (mapper == null) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public int size() {
+ throw new UnsupportedOperationException();
+ }
+
+ public boolean isEmpty() {
+ throw new UnsupportedOperationException();
+ }
+
+ public boolean containsValue(Object value) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Object put(Object key, Object value) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Object remove(Object key) {
+ throw new UnsupportedOperationException();
+ }
+
+ public void putAll(Map m) {
+ throw new UnsupportedOperationException();
+ }
+
+ public void clear() {
+ throw new UnsupportedOperationException();
+ }
+
+ public Set keySet() {
+ throw new UnsupportedOperationException();
+ }
+
+ public Collection values() {
+ throw new UnsupportedOperationException();
+ }
+
+ public Set entrySet() {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java b/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java
new file mode 100644
index 0000000..5d4af11
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.lookup;
+
+import org.elasticsearch.index.mapper.FieldMapper;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class FieldLookup {
+
+ // we can cached mapper completely per name, since its on an index/shard level (the lookup, and it does not change within the scope of a search request)
+ private final FieldMapper mapper;
+
+ private Map<String, List<Object>> fields;
+
+ private Object value;
+
+ private boolean valueLoaded = false;
+
+ private List<Object> values = new ArrayList<Object>();
+
+ private boolean valuesLoaded = false;
+
+ FieldLookup(FieldMapper mapper) {
+ this.mapper = mapper;
+ }
+
+ public FieldMapper mapper() {
+ return mapper;
+ }
+
+ public Map<String, List<Object>> fields() {
+ return fields;
+ }
+
+ /**
+ * Sets the post processed values.
+ */
+ public void fields(Map<String, List<Object>> fields) {
+ this.fields = fields;
+ }
+
+ public void clear() {
+ value = null;
+ valueLoaded = false;
+ values.clear();
+ valuesLoaded = false;
+ fields = null;
+ }
+
+ public boolean isEmpty() {
+ if (valueLoaded) {
+ return value == null;
+ }
+ if (valuesLoaded) {
+ return values.isEmpty();
+ }
+ return getValue() == null;
+ }
+
+ public Object getValue() {
+ if (valueLoaded) {
+ return value;
+ }
+ valueLoaded = true;
+ value = null;
+ List<Object> values = fields.get(mapper.names().indexName());
+ return values != null ? value = values.get(0) : null;
+ }
+
+ public List<Object> getValues() {
+ if (valuesLoaded) {
+ return values;
+ }
+ valuesLoaded = true;
+ values.clear();
+ return values = fields().get(mapper.names().indexName());
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java b/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java
new file mode 100644
index 0000000..906a63e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.lookup;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public class FieldsLookup implements Map {
+
+ private final MapperService mapperService;
+
+ @Nullable
+ private final String[] types;
+
+ private AtomicReader reader;
+
+ private int docId = -1;
+
+ private final Map<String, FieldLookup> cachedFieldData = Maps.newHashMap();
+
+ private final SingleFieldsVisitor fieldVisitor;
+
+ FieldsLookup(MapperService mapperService, @Nullable String[] types) {
+ this.mapperService = mapperService;
+ this.types = types;
+ this.fieldVisitor = new SingleFieldsVisitor(null);
+ }
+
+ public void setNextReader(AtomicReaderContext context) {
+ if (this.reader == context.reader()) { // if we are called with the same reader, don't invalidate source
+ return;
+ }
+ this.reader = context.reader();
+ clearCache();
+ this.docId = -1;
+ }
+
+ public void setNextDocId(int docId) {
+ if (this.docId == docId) { // if we are called with the same docId, don't invalidate source
+ return;
+ }
+ this.docId = docId;
+ clearCache();
+ }
+
+
+ @Override
+ public Object get(Object key) {
+ return loadFieldData(key.toString());
+ }
+
+ @Override
+ public boolean containsKey(Object key) {
+ try {
+ loadFieldData(key.toString());
+ return true;
+ } catch (Exception e) {
+ return false;
+ }
+ }
+
+ @Override
+ public int size() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean isEmpty() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Set keySet() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Collection values() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Set entrySet() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Object put(Object key, Object value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Object remove(Object key) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void clear() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void putAll(Map m) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean containsValue(Object value) {
+ throw new UnsupportedOperationException();
+ }
+
+ private FieldLookup loadFieldData(String name) {
+ FieldLookup data = cachedFieldData.get(name);
+ if (data == null) {
+ FieldMapper mapper = mapperService.smartNameFieldMapper(name, types);
+ if (mapper == null) {
+ throw new ElasticsearchIllegalArgumentException("No field found for [" + name + "] in mapping with types " + Arrays.toString(types) + "");
+ }
+ data = new FieldLookup(mapper);
+ cachedFieldData.put(name, data);
+ }
+ if (data.fields() == null) {
+ String fieldName = data.mapper().names().indexName();
+ fieldVisitor.reset(fieldName);
+ try {
+ reader.document(docId, fieldVisitor);
+ fieldVisitor.postProcess(data.mapper());
+ data.fields(ImmutableMap.of(name, fieldVisitor.fields().get(data.mapper().names().indexName())));
+ } catch (IOException e) {
+ throw new ElasticsearchParseException("failed to load field [" + name + "]", e);
+ }
+ }
+ return data;
+ }
+
+ private void clearCache() {
+ for (Entry<String, FieldLookup> entry : cachedFieldData.entrySet()) {
+ entry.getValue().clear();
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/IndexField.java b/src/main/java/org/elasticsearch/search/lookup/IndexField.java
new file mode 100644
index 0000000..cc488ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/IndexField.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.lookup;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.search.CollectionStatistics;
+import org.elasticsearch.common.util.MinimalMap;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Script interface to all information regarding a field.
+ * */
+public class IndexField extends MinimalMap<String, IndexFieldTerm> {
+
+ /*
+ * TermsInfo Objects that represent the Terms are stored in this map when
+ * requested. Information such as frequency, doc frequency and positions
+ * information can be retrieved from the TermInfo objects in this map.
+ */
+ private final Map<String, IndexFieldTerm> terms = new HashMap<String, IndexFieldTerm>();
+
+ // the name of this field
+ private final String fieldName;
+
+ /*
+ * The holds the current reader. We need it to populate the field
+ * statistics. We just delegate all requests there
+ */
+ private IndexLookup indexLookup;
+
+ /*
+ * General field statistics such as number of documents containing the
+ * field.
+ */
+ private final CollectionStatistics fieldStats;
+
+ /*
+ * Uodate posting lists in all TermInfo objects
+ */
+ void setReader(AtomicReader reader) {
+ for (IndexFieldTerm ti : terms.values()) {
+ ti.setNextReader(reader);
+ }
+ }
+
+ /*
+ * Represents a field in a document. Can be used to return information on
+ * statistics of this field. Information on specific terms in this field can
+ * be accessed by calling get(String term).
+ */
+ public IndexField(String fieldName, IndexLookup indexLookup) throws IOException {
+
+ assert fieldName != null;
+ this.fieldName = fieldName;
+
+ assert indexLookup != null;
+ this.indexLookup = indexLookup;
+
+ fieldStats = this.indexLookup.getIndexSearcher().collectionStatistics(fieldName);
+ }
+
+ /* get number of documents containing the field */
+ public long docCount() throws IOException {
+ return fieldStats.docCount();
+ }
+
+ /* get sum of the number of words over all documents that were indexed */
+ public long sumttf() throws IOException {
+ return fieldStats.sumTotalTermFreq();
+ }
+
+ /*
+ * get the sum of doc frequencies over all words that appear in any document
+ * that has the field.
+ */
+ public long sumdf() throws IOException {
+ return fieldStats.sumDocFreq();
+ }
+
+ // TODO: might be good to get the field lengths here somewhere?
+
+ /*
+ * Returns a TermInfo object that can be used to access information on
+ * specific terms. flags can be set as described in TermInfo.
+ *
+ * TODO: here might be potential for running time improvement? If we knew in
+ * advance which terms are requested, we could provide an array which the
+ * user could then iterate over.
+ */
+ public IndexFieldTerm get(Object key, int flags) {
+ String termString = (String) key;
+ IndexFieldTerm indexFieldTerm = terms.get(termString);
+ // see if we initialized already...
+ if (indexFieldTerm == null) {
+ indexFieldTerm = new IndexFieldTerm(termString, fieldName, indexLookup, flags);
+ terms.put(termString, indexFieldTerm);
+ }
+ indexFieldTerm.validateFlags(flags);
+ return indexFieldTerm;
+ }
+
+ /*
+ * Returns a TermInfo object that can be used to access information on
+ * specific terms. flags can be set as described in TermInfo.
+ */
+ public IndexFieldTerm get(Object key) {
+ // per default, do not initialize any positions info
+ return get(key, IndexLookup.FLAG_FREQUENCIES);
+ }
+
+ public void setDocIdInTerms(int docId) {
+ for (IndexFieldTerm ti : terms.values()) {
+ ti.setNextDoc(docId);
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java b/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java
new file mode 100644
index 0000000..72dd8d6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java
@@ -0,0 +1,284 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.lookup;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.TermStatistics;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.lucene.search.EmptyScorer;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * Holds all information on a particular term in a field.
+ * */
+public class IndexFieldTerm implements Iterable<TermPosition> {
+
+ // The posting list for this term. Is null if the term or field does not
+ // exist. Can be DocsEnum or DocsAndPositionsEnum.
+ DocsEnum docsEnum;
+
+ // Stores if positions, offsets and payloads are requested.
+ private final int flags;
+
+ private final String fieldName;
+
+ private final String term;
+
+ private final PositionIterator iterator;
+
+ // for lucene calls
+ private final Term identifier;
+
+ private final TermStatistics termStats;
+
+ static private EmptyScorer EMPTY_DOCS_ENUM = new EmptyScorer(null);
+
+ // get the document frequency of the term
+ public long df() throws IOException {
+ return termStats.docFreq();
+ }
+
+ // get the total term frequency of the term, that is, how often does the
+ // term appear in any document?
+ public long ttf() throws IOException {
+ return termStats.totalTermFreq();
+ }
+
+ // when the reader changes, we have to get the posting list for this term
+ // and reader
+ void setNextReader(AtomicReader reader) {
+ try {
+ // Get the posting list for a specific term. Depending on the flags,
+ // this
+ // will either get a DocsEnum or a DocsAndPositionsEnum if
+ // available.
+
+ // get lucene frequency flag
+ int luceneFrequencyFlag = getLuceneFrequencyFlag(flags);
+ if (shouldRetrieveFrequenciesOnly()) {
+ docsEnum = getOnlyDocsEnum(luceneFrequencyFlag, reader);
+ } else {
+ int lucenePositionsFlags = getLucenePositionsFlags(flags);
+ docsEnum = getDocsAndPosEnum(lucenePositionsFlags, reader);
+ if (docsEnum == null) {// no pos available
+ docsEnum = getOnlyDocsEnum(luceneFrequencyFlag, reader);
+ }
+ }
+ } catch (IOException e) {
+ throw new ElasticsearchException("Unable to get posting list for field " + fieldName + " and term " + term, e);
+ }
+
+ }
+
+ private boolean shouldRetrieveFrequenciesOnly() {
+ return (flags & ~IndexLookup.FLAG_FREQUENCIES) == 0;
+ }
+
+ private int getLuceneFrequencyFlag(int flags) {
+ return (flags & IndexLookup.FLAG_FREQUENCIES) > 0 ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE;
+ }
+
+ private int getLucenePositionsFlags(int flags) {
+ int lucenePositionsFlags = (flags & IndexLookup.FLAG_PAYLOADS) > 0 ? DocsAndPositionsEnum.FLAG_PAYLOADS : 0x0;
+ lucenePositionsFlags |= (flags & IndexLookup.FLAG_OFFSETS) > 0 ? DocsAndPositionsEnum.FLAG_OFFSETS : 0x0;
+ return lucenePositionsFlags;
+ }
+
+ // get the DocsAndPositionsEnum from the reader.
+ private DocsEnum getDocsAndPosEnum(int luceneFlags, AtomicReader reader) throws IOException {
+ assert identifier.field() != null;
+ assert identifier.bytes() != null;
+ final Fields fields = reader.fields();
+ DocsEnum newDocsEnum = null;
+ if (fields != null) {
+ final Terms terms = fields.terms(identifier.field());
+ if (terms != null) {
+ if (terms.hasPositions()) {
+ final TermsEnum termsEnum = terms.iterator(null);
+ if (termsEnum.seekExact(identifier.bytes())) {
+ newDocsEnum = termsEnum.docsAndPositions(reader.getLiveDocs(),
+ docsEnum instanceof DocsAndPositionsEnum ? (DocsAndPositionsEnum) docsEnum : null, luceneFlags);
+ }
+ }
+ }
+ }
+ return newDocsEnum;
+ }
+
+ // get the DocsEnum from the reader.
+ private DocsEnum getOnlyDocsEnum(int luceneFlags, AtomicReader reader) throws IOException {
+ assert identifier.field() != null;
+ assert identifier.bytes() != null;
+ final Fields fields = reader.fields();
+ DocsEnum newDocsEnum = null;
+ if (fields != null) {
+ final Terms terms = fields.terms(identifier.field());
+ if (terms != null) {
+ TermsEnum termsEnum = terms.iterator(null);
+ if (termsEnum.seekExact(identifier.bytes())) {
+ newDocsEnum = termsEnum.docs(reader.getLiveDocs(), docsEnum, luceneFlags);
+ }
+ }
+ }
+ if (newDocsEnum == null) {
+ newDocsEnum = EMPTY_DOCS_ENUM;
+ }
+ return newDocsEnum;
+ }
+
+ private int freq = 0;
+
+ public void setNextDoc(int docId) {
+ assert (docsEnum != null);
+ try {
+ // we try to advance to the current document.
+ int currentDocPos = docsEnum.docID();
+ if (currentDocPos < docId) {
+ currentDocPos = docsEnum.advance(docId);
+ }
+ if (currentDocPos == docId) {
+ freq = docsEnum.freq();
+ } else {
+ freq = 0;
+ }
+ iterator.nextDoc();
+ } catch (IOException e) {
+ throw new ElasticsearchException("While trying to initialize term positions in IndexFieldTerm.setNextDoc() ", e);
+ }
+ }
+
+ public IndexFieldTerm(String term, String fieldName, IndexLookup indexLookup, int flags) {
+ assert fieldName != null;
+ this.fieldName = fieldName;
+ assert term != null;
+ this.term = term;
+ assert indexLookup != null;
+ identifier = new Term(fieldName, (String) term);
+ this.flags = flags;
+ boolean doRecord = ((flags & IndexLookup.FLAG_CACHE) > 0);
+ if (withPositions()) {
+ if (!doRecord) {
+ iterator = new PositionIterator(this);
+ } else {
+ iterator = new CachedPositionIterator(this);
+ }
+ } else {
+ iterator = new PositionIterator(this);
+ }
+ setNextReader(indexLookup.getReader());
+ setNextDoc(indexLookup.getDocId());
+ try {
+ termStats = indexLookup.getIndexSearcher().termStatistics(identifier,
+ TermContext.build(indexLookup.getReaderContext(), identifier));
+ } catch (IOException e) {
+ throw new ElasticsearchException("Cannot get term statistics: ", e);
+ }
+ }
+
+ private boolean withPositions() {
+ return shouldRetrievePositions() || shouldRetrieveOffsets() || shouldRetrievePayloads();
+ }
+
+ protected boolean shouldRetrievePositions() {
+ return (flags & IndexLookup.FLAG_POSITIONS) > 0;
+ }
+
+ protected boolean shouldRetrieveOffsets() {
+ return (flags & IndexLookup.FLAG_OFFSETS) > 0;
+ }
+
+ protected boolean shouldRetrievePayloads() {
+ return (flags & IndexLookup.FLAG_PAYLOADS) > 0;
+ }
+
+ public int tf() throws IOException {
+ return freq;
+ }
+
+ @Override
+ public Iterator<TermPosition> iterator() {
+ return iterator.reset();
+ }
+
+ /*
+ * A user might decide inside a script to call get with _POSITIONS and then
+ * a second time with _PAYLOADS. If the positions were recorded but the
+ * payloads were not, the user will not have access to them. Therfore, throw
+ * exception here explaining how to call get().
+ */
+ public void validateFlags(int flags2) {
+ if ((this.flags & flags2) < flags2) {
+ throw new ElasticsearchException("You must call get with all required flags! Instead of " + getCalledStatement(flags2)
+ + "call " + getCallStatement(flags2 | this.flags) + " once");
+ }
+ }
+
+ private String getCalledStatement(int flags2) {
+ String calledFlagsCall1 = getFlagsString(flags);
+ String calledFlagsCall2 = getFlagsString(flags2);
+ String callStatement1 = getCallStatement(calledFlagsCall1);
+ String callStatement2 = getCallStatement(calledFlagsCall2);
+ return " " + callStatement1 + " and " + callStatement2 + " ";
+ }
+
+ private String getCallStatement(String calledFlags) {
+ return "_index['" + this.fieldName + "'].get('" + this.term + "', " + calledFlags + ")";
+ }
+
+ private String getFlagsString(int flags2) {
+ String flagsString = null;
+ if ((flags2 & IndexLookup.FLAG_FREQUENCIES) != 0) {
+ flagsString = anddToFlagsString(flagsString, "_FREQUENCIES");
+ }
+ if ((flags2 & IndexLookup.FLAG_POSITIONS) != 0) {
+ flagsString = anddToFlagsString(flagsString, "_POSITIONS");
+ }
+ if ((flags2 & IndexLookup.FLAG_OFFSETS) != 0) {
+ flagsString = anddToFlagsString(flagsString, "_OFFSETS");
+ }
+ if ((flags2 & IndexLookup.FLAG_PAYLOADS) != 0) {
+ flagsString = anddToFlagsString(flagsString, "_PAYLOADS");
+ }
+ if ((flags2 & IndexLookup.FLAG_CACHE) != 0) {
+ flagsString = anddToFlagsString(flagsString, "_CACHE");
+ }
+ return flagsString;
+ }
+
+ private String anddToFlagsString(String flagsString, String flag) {
+ if (flagsString != null) {
+ flagsString += " | ";
+ } else {
+ flagsString = "";
+ }
+ flagsString += flag;
+ return flagsString;
+ }
+
+ private String getCallStatement(int flags2) {
+ String calledFlags = getFlagsString(flags2);
+ String callStatement = getCallStatement(calledFlags);
+ return " " + callStatement + " ";
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/IndexLookup.java b/src/main/java/org/elasticsearch/search/lookup/IndexLookup.java
new file mode 100644
index 0000000..647c794
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/IndexLookup.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.lookup;
+
+import com.google.common.collect.ImmutableMap.Builder;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.util.MinimalMap;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class IndexLookup extends MinimalMap<String, IndexField> {
+
+ /**
+ * Flag to pass to {@link IndexField#get(String, flags)} if you require
+ * offsets in the returned {@link IndexFieldTerm}.
+ */
+ public static final int FLAG_OFFSETS = 2;
+
+ /**
+ * Flag to pass to {@link IndexField#get(String, flags)} if you require
+ * payloads in the returned {@link IndexFieldTerm}.
+ */
+ public static final int FLAG_PAYLOADS = 4;
+
+ /**
+ * Flag to pass to {@link IndexField#get(String, flags)} if you require
+ * frequencies in the returned {@link IndexFieldTerm}. Frequencies might be
+ * returned anyway for some lucene codecs even if this flag is no set.
+ */
+ public static final int FLAG_FREQUENCIES = 8;
+
+ /**
+ * Flag to pass to {@link IndexField#get(String, flags)} if you require
+ * positions in the returned {@link IndexFieldTerm}.
+ */
+ public static final int FLAG_POSITIONS = 16;
+
+ /**
+ * Flag to pass to {@link IndexField#get(String, flags)} if you require
+ * positions in the returned {@link IndexFieldTerm}.
+ */
+ public static final int FLAG_CACHE = 32;
+
+ // Current reader from which we can get the term vectors. No info on term
+ // and field statistics.
+ private AtomicReader reader;
+
+ // The parent reader from which we can get proper field and term
+ // statistics
+ private CompositeReader parentReader;
+
+ // we need this later to get the field and term statistics of the shard
+ private IndexSearcher indexSearcher;
+
+ // we need this later to get the term statistics of the shard
+ private IndexReaderContext indexReaderContext;
+
+ // current docId
+ private int docId = -1;
+
+ // stores the objects that are used in the script. we maintain this map
+ // because we do not want to re-initialize the objects each time a field is
+ // accessed
+ private final Map<String, IndexField> indexFields = new HashMap<String, IndexField>();
+
+ // number of documents per shard. cached here because the computation is
+ // expensive
+ private int numDocs = -1;
+
+ // the maximum doc number of the shard.
+ private int maxDoc = -1;
+
+ // number of deleted documents per shard. cached here because the
+ // computation is expensive
+ private int numDeletedDocs = -1;
+
+ public int numDocs() {
+ if (numDocs == -1) {
+ numDocs = parentReader.numDocs();
+ }
+ return numDocs;
+ }
+
+ public int maxDoc() {
+ if (maxDoc == -1) {
+ maxDoc = parentReader.maxDoc();
+ }
+ return maxDoc;
+ }
+
+ public int numDeletedDocs() {
+ if (numDeletedDocs == -1) {
+ numDeletedDocs = parentReader.numDeletedDocs();
+ }
+ return numDeletedDocs;
+ }
+
+ public IndexLookup(Builder<String, Object> builder) {
+ builder.put("_FREQUENCIES", IndexLookup.FLAG_FREQUENCIES);
+ builder.put("_POSITIONS", IndexLookup.FLAG_POSITIONS);
+ builder.put("_OFFSETS", IndexLookup.FLAG_OFFSETS);
+ builder.put("_PAYLOADS", IndexLookup.FLAG_PAYLOADS);
+ builder.put("_CACHE", IndexLookup.FLAG_CACHE);
+ }
+
+ public void setNextReader(AtomicReaderContext context) {
+ if (reader == context.reader()) { // if we are called with the same
+ // reader, nothing to do
+ return;
+ }
+ // check if we have to invalidate all field and shard stats - only if
+ // parent reader changed
+ if (context.parent != null) {
+ if (parentReader == null) {
+ parentReader = context.parent.reader();
+ indexSearcher = new IndexSearcher(parentReader);
+ indexReaderContext = context.parent;
+ } else {
+ // parent reader may only be set once. TODO we could also call
+ // indexFields.clear() here instead of assertion just to be on
+ // the save side
+ assert (parentReader == context.parent.reader());
+ }
+ } else {
+ assert parentReader == null;
+ }
+ reader = context.reader();
+ docId = -1;
+ setReaderInFields();
+ }
+
+ protected void setReaderInFields() {
+ for (IndexField stat : indexFields.values()) {
+ stat.setReader(reader);
+ }
+ }
+
+ public void setNextDocId(int docId) {
+ if (this.docId == docId) { // if we are called with the same docId,
+ // nothing to do
+ return;
+ }
+ // We assume that docs are processed in ascending order of id. If this
+ // is not the case, we would have to re initialize all posting lists in
+ // IndexFieldTerm. TODO: Instead of assert we could also call
+ // setReaderInFields(); here?
+ if (this.docId > docId) {
+ // This might happen if the same SearchLookup is used in different
+ // phases, such as score and fetch phase.
+ // In this case we do not want to re initialize posting list etc.
+ // because we do not even know if term and field statistics will be
+ // needed in this new phase.
+ // Therefore we just remove all IndexFieldTerms.
+ indexFields.clear();
+ }
+ this.docId = docId;
+ setNextDocIdInFields();
+ }
+
+ protected void setNextDocIdInFields() {
+ for (IndexField stat : indexFields.values()) {
+ stat.setDocIdInTerms(this.docId);
+ }
+ }
+
+ /*
+ * TODO: here might be potential for running time improvement? If we knew in
+ * advance which terms are requested, we could provide an array which the
+ * user could then iterate over.
+ */
+ @Override
+ public IndexField get(Object key) {
+ String stringField = (String) key;
+ IndexField indexField = indexFields.get(key);
+ if (indexField == null) {
+ try {
+ indexField = new IndexField(stringField, this);
+ indexFields.put(stringField, indexField);
+ } catch (IOException e) {
+ throw new ElasticsearchException(e.getMessage());
+ }
+ }
+ return indexField;
+ }
+
+ /*
+ * Get the lucene term vectors. See
+ * https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html
+ * *
+ */
+ public Fields termVectors() throws IOException {
+ assert reader != null;
+ return reader.getTermVectors(docId);
+ }
+
+ AtomicReader getReader() {
+ return reader;
+ }
+
+ public int getDocId() {
+ return docId;
+ }
+
+ public IndexReader getParentReader() {
+ if (parentReader == null) {
+ return reader;
+ }
+ return parentReader;
+ }
+
+ public IndexSearcher getIndexSearcher() {
+ if (indexSearcher == null) {
+ return new IndexSearcher(reader);
+ }
+ return indexSearcher;
+ }
+
+ public IndexReaderContext getReaderContext() {
+ return indexReaderContext;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/PositionIterator.java b/src/main/java/org/elasticsearch/search/lookup/PositionIterator.java
new file mode 100644
index 0000000..24cc74e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/PositionIterator.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.lookup;
+
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+public class PositionIterator implements Iterator<TermPosition> {
+
+ private static final DocsAndPositionsEnum EMPTY = new EmptyDocsAndPosEnum();
+
+ private boolean resetted = false;
+
+ protected IndexFieldTerm indexFieldTerm;
+
+ protected int freq = -1;
+
+ // current position of iterator
+ private int currentPos;
+
+ protected final TermPosition termPosition = new TermPosition();
+
+ private DocsAndPositionsEnum docsAndPos;
+
+ public PositionIterator(IndexFieldTerm indexFieldTerm) {
+ this.indexFieldTerm = indexFieldTerm;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException("Cannot remove anything from TermPosition iterator.");
+ }
+
+ @Override
+ public boolean hasNext() {
+ return currentPos < freq;
+ }
+
+
+ @Override
+ public TermPosition next() {
+ try {
+ termPosition.position = docsAndPos.nextPosition();
+ termPosition.startOffset = docsAndPos.startOffset();
+ termPosition.endOffset = docsAndPos.endOffset();
+ termPosition.payload = docsAndPos.getPayload();
+ } catch (IOException ex) {
+ throw new ElasticsearchException("can not advance iterator", ex);
+ }
+ currentPos++;
+ return termPosition;
+ }
+
+ public void nextDoc() throws IOException {
+ resetted = false;
+ currentPos = 0;
+ freq = indexFieldTerm.tf();
+ if (indexFieldTerm.docsEnum instanceof DocsAndPositionsEnum) {
+ docsAndPos = (DocsAndPositionsEnum) indexFieldTerm.docsEnum;
+ } else {
+ docsAndPos = EMPTY;
+ }
+ }
+
+ public Iterator<TermPosition> reset() {
+ if (resetted) {
+ throw new ElasticsearchException(
+ "Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitely.");
+ }
+ resetted = true;
+ return this;
+ }
+
+ // we use this to make sure we can also iterate if there are no positions
+ private static final class EmptyDocsAndPosEnum extends DocsAndPositionsEnum {
+
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int docID() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long cost() {
+ throw new UnsupportedOperationException();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java b/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
new file mode 100644
index 0000000..781e24a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.lookup;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.MapperService;
+
+/**
+ *
+ */
+public class SearchLookup {
+
+ final DocLookup docMap;
+
+ final SourceLookup sourceLookup;
+
+ final FieldsLookup fieldsLookup;
+
+ final IndexLookup indexLookup;
+
+ final ImmutableMap<String, Object> asMap;
+
+ public SearchLookup(MapperService mapperService, IndexFieldDataService fieldDataService, @Nullable String[] types) {
+ ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
+ docMap = new DocLookup(mapperService, fieldDataService, types);
+ sourceLookup = new SourceLookup();
+ fieldsLookup = new FieldsLookup(mapperService, types);
+ indexLookup = new IndexLookup(builder);
+
+ builder.put("doc", docMap);
+ builder.put("_doc", docMap);
+ builder.put("_source", sourceLookup);
+ builder.put("_fields", fieldsLookup);
+ builder.put("_index", indexLookup);
+ asMap = builder.build();
+ }
+
+ public ImmutableMap<String, Object> asMap() {
+ return this.asMap;
+ }
+
+ public SourceLookup source() {
+ return this.sourceLookup;
+ }
+
+ public IndexLookup indexLookup() {
+ return this.indexLookup;
+ }
+
+ public FieldsLookup fields() {
+ return this.fieldsLookup;
+ }
+
+ public DocLookup doc() {
+ return this.docMap;
+ }
+
+ public void setScorer(Scorer scorer) {
+ docMap.setScorer(scorer);
+ }
+
+ public void setNextReader(AtomicReaderContext context) {
+ docMap.setNextReader(context);
+ sourceLookup.setNextReader(context);
+ fieldsLookup.setNextReader(context);
+ indexLookup.setNextReader(context);
+ }
+
+ public void setNextDocId(int docId) {
+ docMap.setNextDocId(docId);
+ sourceLookup.setNextDocId(docId);
+ fieldsLookup.setNextDocId(docId);
+ indexLookup.setNextDocId(docId);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/SourceLookup.java b/src/main/java/org/elasticsearch/search/lookup/SourceLookup.java
new file mode 100644
index 0000000..add63b4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/SourceLookup.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.lookup;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.index.fieldvisitor.JustSourceFieldsVisitor;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ */
+public class SourceLookup implements Map {
+
+ private AtomicReader reader;
+
+ private int docId = -1;
+
+ private BytesReference sourceAsBytes;
+ private Map<String, Object> source;
+ private XContentType sourceContentType;
+
+ public Map<String, Object> source() {
+ return source;
+ }
+
+ public XContentType sourceContentType() {
+ return sourceContentType;
+ }
+
+ private Map<String, Object> loadSourceIfNeeded() {
+ if (source != null) {
+ return source;
+ }
+ if (sourceAsBytes != null) {
+ Tuple<XContentType, Map<String, Object>> tuple = sourceAsMapAndType(sourceAsBytes);
+ sourceContentType = tuple.v1();
+ source = tuple.v2();
+ return source;
+ }
+ try {
+ JustSourceFieldsVisitor sourceFieldVisitor = new JustSourceFieldsVisitor();
+ reader.document(docId, sourceFieldVisitor);
+ BytesReference source = sourceFieldVisitor.source();
+ if (source == null) {
+ this.source = ImmutableMap.of();
+ this.sourceContentType = null;
+ } else {
+ Tuple<XContentType, Map<String, Object>> tuple = sourceAsMapAndType(source);
+ this.sourceContentType = tuple.v1();
+ this.source = tuple.v2();
+ }
+ } catch (Exception e) {
+ throw new ElasticsearchParseException("failed to parse / load source", e);
+ }
+ return this.source;
+ }
+
+ public static Tuple<XContentType, Map<String, Object>> sourceAsMapAndType(BytesReference source) throws ElasticsearchParseException {
+ return XContentHelper.convertToMap(source, false);
+ }
+
+ public static Map<String, Object> sourceAsMap(BytesReference source) throws ElasticsearchParseException {
+ return sourceAsMapAndType(source).v2();
+ }
+
+ public static Tuple<XContentType, Map<String, Object>> sourceAsMapAndType(byte[] bytes, int offset, int length) throws ElasticsearchParseException {
+ return XContentHelper.convertToMap(bytes, offset, length, false);
+ }
+
+ public static Map<String, Object> sourceAsMap(byte[] bytes, int offset, int length) throws ElasticsearchParseException {
+ return sourceAsMapAndType(bytes, offset, length).v2();
+ }
+
+ public void setNextReader(AtomicReaderContext context) {
+ if (this.reader == context.reader()) { // if we are called with the same reader, don't invalidate source
+ return;
+ }
+ this.reader = context.reader();
+ this.source = null;
+ this.sourceAsBytes = null;
+ this.docId = -1;
+ }
+
+ public void setNextDocId(int docId) {
+ if (this.docId == docId) { // if we are called with the same docId, don't invalidate source
+ return;
+ }
+ this.docId = docId;
+ this.sourceAsBytes = null;
+ this.source = null;
+ }
+
+ public void setNextSource(BytesReference source) {
+ this.sourceAsBytes = source;
+ }
+
+ public void setNextSource(Map<String, Object> source) {
+ this.source = source;
+ }
+
+ /**
+ * Internal source representation, might be compressed....
+ */
+ public BytesReference internalSourceRef() {
+ return sourceAsBytes;
+ }
+
+ /**
+ * Returns the values associated with the path. Those are "low" level values, and it can
+ * handle path expression where an array/list is navigated within.
+ */
+ public List<Object> extractRawValues(String path) {
+ return XContentMapValues.extractRawValues(path, loadSourceIfNeeded());
+ }
+
+ public Object filter(String[] includes, String[] excludes) {
+ return XContentMapValues.filter(loadSourceIfNeeded(), includes, excludes);
+ }
+
+ public Object extractValue(String path) {
+ return XContentMapValues.extractValue(path, loadSourceIfNeeded());
+ }
+
+ @Override
+ public Object get(Object key) {
+ return loadSourceIfNeeded().get(key);
+ }
+
+ @Override
+ public int size() {
+ return loadSourceIfNeeded().size();
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return loadSourceIfNeeded().isEmpty();
+ }
+
+ @Override
+ public boolean containsKey(Object key) {
+ return loadSourceIfNeeded().containsKey(key);
+ }
+
+ @Override
+ public boolean containsValue(Object value) {
+ return loadSourceIfNeeded().containsValue(value);
+ }
+
+ @Override
+ public Set keySet() {
+ return loadSourceIfNeeded().keySet();
+ }
+
+ @Override
+ public Collection values() {
+ return loadSourceIfNeeded().values();
+ }
+
+ @Override
+ public Set entrySet() {
+ return loadSourceIfNeeded().entrySet();
+ }
+
+ @Override
+ public Object put(Object key, Object value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Object remove(Object key) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void putAll(Map m) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void clear() {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/lookup/TermPosition.java b/src/main/java/org/elasticsearch/search/lookup/TermPosition.java
new file mode 100644
index 0000000..1ffe9ea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/lookup/TermPosition.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.lookup;
+
+import org.apache.lucene.analysis.payloads.PayloadHelper;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+
+public class TermPosition {
+
+ public int position = -1;
+ public int startOffset = -1;
+ public int endOffset = -1;
+ public BytesRef payload;
+ private CharsRef spare = new CharsRef(0);
+
+ public String payloadAsString() {
+ if (payload != null && payload.length != 0) {
+ UnicodeUtil.UTF8toUTF16(payload.bytes, payload.offset, payload.length, spare);
+ return spare.toString();
+ } else {
+ return null;
+ }
+ }
+
+ public float payloadAsFloat(float defaultMissing) {
+ if (payload != null && payload.length != 0) {
+ return PayloadHelper.decodeFloat(payload.bytes, payload.offset);
+ } else {
+ return defaultMissing;
+ }
+ }
+
+ public int payloadAsInt(int defaultMissing) {
+ if (payload != null && payload.length != 0) {
+ return PayloadHelper.decodeInt(payload.bytes, payload.offset);
+ } else {
+ return defaultMissing;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/query/FilterBinaryParseElement.java b/src/main/java/org/elasticsearch/search/query/FilterBinaryParseElement.java
new file mode 100644
index 0000000..1a256e3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/FilterBinaryParseElement.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class FilterBinaryParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ byte[] filterSource = parser.binaryValue();
+ XContentParser fSourceParser = XContentFactory.xContent(filterSource).createParser(filterSource);
+ try {
+ ParsedFilter filter = context.queryParserService().parseInnerFilter(fSourceParser);
+ if (filter != null) {
+ context.parsedPostFilter(filter);
+ }
+ } finally {
+ fSourceParser.close();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/query/FromParseElement.java b/src/main/java/org/elasticsearch/search/query/FromParseElement.java
new file mode 100644
index 0000000..13e58ca
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/FromParseElement.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class FromParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ if (token.isValue()) {
+ int from = parser.intValue();
+ if (from < 0) {
+ throw new SearchParseException(context, "from is set to [" + from + "] and is expected to be higher or equal to 0");
+ }
+ context.from(from);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/query/IndicesBoostParseElement.java b/src/main/java/org/elasticsearch/search/query/IndicesBoostParseElement.java
new file mode 100644
index 0000000..eeb70eb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/IndicesBoostParseElement.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ * <pre>
+ * {
+ * indicesBoost : {
+ * "index1" : 1.4,
+ * "index2" : 1.5
+ * }
+ * }
+ * </pre>
+ *
+ *
+ */
+public class IndicesBoostParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String indexName = parser.currentName();
+ if (indexName.equals(context.shardTarget().index())) {
+ parser.nextToken(); // move to the value
+ // we found our query boost
+ context.queryBoost(parser.floatValue());
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/query/MinScoreParseElement.java b/src/main/java/org/elasticsearch/search/query/MinScoreParseElement.java
new file mode 100644
index 0000000..4904871
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/MinScoreParseElement.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class MinScoreParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ if (token.isValue()) {
+ context.minimumScore(parser.floatValue());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/query/PostFilterParseElement.java b/src/main/java/org/elasticsearch/search/query/PostFilterParseElement.java
new file mode 100644
index 0000000..dcd30a7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/PostFilterParseElement.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class PostFilterParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ ParsedFilter postFilter = context.queryParserService().parseInnerFilter(parser);
+ if (postFilter != null) {
+ context.parsedPostFilter(postFilter);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/query/QueryBinaryParseElement.java b/src/main/java/org/elasticsearch/search/query/QueryBinaryParseElement.java
new file mode 100644
index 0000000..c0a38d5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/QueryBinaryParseElement.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class QueryBinaryParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ byte[] querySource = parser.binaryValue();
+ XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource);
+ try {
+ context.parsedQuery(context.queryParserService().parse(qSourceParser));
+ } finally {
+ qSourceParser.close();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/query/QueryParseElement.java b/src/main/java/org/elasticsearch/search/query/QueryParseElement.java
new file mode 100644
index 0000000..8db4cb3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/QueryParseElement.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class QueryParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ context.parsedQuery(context.queryParserService().parse(parser));
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/src/main/java/org/elasticsearch/search/query/QueryPhase.java
new file mode 100644
index 0000000..0e6aacd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/QueryPhase.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TotalHitCountCollector;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchPhase;
+import org.elasticsearch.search.aggregations.AggregationPhase;
+import org.elasticsearch.search.facet.FacetPhase;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.rescore.RescorePhase;
+import org.elasticsearch.search.sort.SortParseElement;
+import org.elasticsearch.search.sort.TrackScoresParseElement;
+import org.elasticsearch.search.suggest.SuggestPhase;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class QueryPhase implements SearchPhase {
+
+ private final FacetPhase facetPhase;
+ private final AggregationPhase aggregationPhase;
+ private final SuggestPhase suggestPhase;
+ private RescorePhase rescorePhase;
+
+ @Inject
+ public QueryPhase(FacetPhase facetPhase, AggregationPhase aggregationPhase, SuggestPhase suggestPhase, RescorePhase rescorePhase) {
+ this.facetPhase = facetPhase;
+ this.aggregationPhase = aggregationPhase;
+ this.suggestPhase = suggestPhase;
+ this.rescorePhase = rescorePhase;
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder();
+ parseElements.put("from", new FromParseElement()).put("size", new SizeParseElement())
+ .put("indices_boost", new IndicesBoostParseElement())
+ .put("indicesBoost", new IndicesBoostParseElement())
+ .put("query", new QueryParseElement())
+ .put("queryBinary", new QueryBinaryParseElement())
+ .put("query_binary", new QueryBinaryParseElement())
+ .put("filter", new PostFilterParseElement()) // For bw comp reason, should be removed in version 1.1
+ .put("post_filter", new PostFilterParseElement())
+ .put("postFilter", new PostFilterParseElement())
+ .put("filterBinary", new FilterBinaryParseElement())
+ .put("filter_binary", new FilterBinaryParseElement())
+ .put("sort", new SortParseElement())
+ .put("trackScores", new TrackScoresParseElement())
+ .put("track_scores", new TrackScoresParseElement())
+ .put("min_score", new MinScoreParseElement())
+ .put("minScore", new MinScoreParseElement())
+ .put("timeout", new TimeoutParseElement())
+ .putAll(facetPhase.parseElements())
+ .putAll(aggregationPhase.parseElements())
+ .putAll(suggestPhase.parseElements())
+ .putAll(rescorePhase.parseElements());
+ return parseElements.build();
+ }
+
+ @Override
+ public void preProcess(SearchContext context) {
+ context.preProcess();
+ facetPhase.preProcess(context);
+ aggregationPhase.preProcess(context);
+ }
+
+ public void execute(SearchContext searchContext) throws QueryPhaseExecutionException {
+ searchContext.queryResult().searchTimedOut(false);
+
+ searchContext.searcher().inStage(ContextIndexSearcher.Stage.MAIN_QUERY);
+ boolean rescore = false;
+ try {
+ searchContext.queryResult().from(searchContext.from());
+ searchContext.queryResult().size(searchContext.size());
+
+ Query query = searchContext.query();
+
+ TopDocs topDocs;
+ int numDocs = searchContext.from() + searchContext.size();
+
+ if (searchContext.searchType() == SearchType.COUNT || numDocs == 0) {
+ TotalHitCountCollector collector = new TotalHitCountCollector();
+ searchContext.searcher().search(query, collector);
+ topDocs = new TopDocs(collector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0);
+ } else if (searchContext.searchType() == SearchType.SCAN) {
+ topDocs = searchContext.scanContext().execute(searchContext);
+ } else if (searchContext.sort() != null) {
+ topDocs = searchContext.searcher().search(query, null, numDocs, searchContext.sort(),
+ searchContext.trackScores(), searchContext.trackScores());
+ } else {
+ if (searchContext.rescore() != null) {
+ rescore = true;
+ numDocs = Math.max(searchContext.rescore().window(), numDocs);
+ }
+ topDocs = searchContext.searcher().search(query, numDocs);
+ }
+ searchContext.queryResult().topDocs(topDocs);
+ } catch (Throwable e) {
+ throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e);
+ } finally {
+ searchContext.searcher().finishStage(ContextIndexSearcher.Stage.MAIN_QUERY);
+ }
+ if (rescore) { // only if we do a regular search
+ rescorePhase.execute(searchContext);
+ }
+ suggestPhase.execute(searchContext);
+ facetPhase.execute(searchContext);
+ aggregationPhase.execute(searchContext);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java b/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java
new file mode 100644
index 0000000..233f733
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.search.SearchContextException;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class QueryPhaseExecutionException extends SearchContextException {
+
+ public QueryPhaseExecutionException(SearchContext context, String msg, Throwable cause) {
+ super(context, "Query Failed [" + msg + "]", cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java b/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java
new file mode 100644
index 0000000..27ca7a6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.dfs.AggregatedDfs;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.dfs.AggregatedDfs.readAggregatedDfs;
+
+/**
+ *
+ */
+public class QuerySearchRequest extends TransportRequest {
+
+ private long id;
+
+ private AggregatedDfs dfs;
+
+ public QuerySearchRequest() {
+ }
+
+ public QuerySearchRequest(SearchRequest request, long id, AggregatedDfs dfs) {
+ super(request);
+ this.id = id;
+ this.dfs = dfs;
+ }
+
+ public long id() {
+ return id;
+ }
+
+ public AggregatedDfs dfs() {
+ return dfs;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ dfs = readAggregatedDfs(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ dfs.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
new file mode 100644
index 0000000..70df605
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.apache.lucene.search.TopDocs;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.facet.Facets;
+import org.elasticsearch.search.facet.InternalFacets;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.lucene.Lucene.readTopDocs;
+import static org.elasticsearch.common.lucene.Lucene.writeTopDocs;
+
+/**
+ *
+ */
+public class QuerySearchResult extends TransportResponse implements QuerySearchResultProvider {
+
+ private long id;
+ private SearchShardTarget shardTarget;
+ private int from;
+ private int size;
+ private TopDocs topDocs;
+ private InternalFacets facets;
+ private InternalAggregations aggregations;
+ private Suggest suggest;
+ private boolean searchTimedOut;
+
+ public QuerySearchResult() {
+
+ }
+
+ public QuerySearchResult(long id, SearchShardTarget shardTarget) {
+ this.id = id;
+ this.shardTarget = shardTarget;
+ }
+
+ @Override
+ public boolean includeFetch() {
+ return false;
+ }
+
+ @Override
+ public QuerySearchResult queryResult() {
+ return this;
+ }
+
+ public long id() {
+ return this.id;
+ }
+
+ public SearchShardTarget shardTarget() {
+ return shardTarget;
+ }
+
+ @Override
+ public void shardTarget(SearchShardTarget shardTarget) {
+ this.shardTarget = shardTarget;
+ }
+
+ public void searchTimedOut(boolean searchTimedOut) {
+ this.searchTimedOut = searchTimedOut;
+ }
+
+ public boolean searchTimedOut() {
+ return searchTimedOut;
+ }
+
+ public TopDocs topDocs() {
+ return topDocs;
+ }
+
+ public void topDocs(TopDocs topDocs) {
+ this.topDocs = topDocs;
+ }
+
+ public Facets facets() {
+ return facets;
+ }
+
+ public void facets(InternalFacets facets) {
+ this.facets = facets;
+ }
+
+ public Aggregations aggregations() {
+ return aggregations;
+ }
+
+ public void aggregations(InternalAggregations aggregations) {
+ this.aggregations = aggregations;
+ }
+
+ public Suggest suggest() {
+ return suggest;
+ }
+
+ public void suggest(Suggest suggest) {
+ this.suggest = suggest;
+ }
+
+ public int from() {
+ return from;
+ }
+
+ public QuerySearchResult from(int from) {
+ this.from = from;
+ return this;
+ }
+
+ public int size() {
+ return size;
+ }
+
+ public QuerySearchResult size(int size) {
+ this.size = size;
+ return this;
+ }
+
+ public static QuerySearchResult readQuerySearchResult(StreamInput in) throws IOException {
+ QuerySearchResult result = new QuerySearchResult();
+ result.readFrom(in);
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+// shardTarget = readSearchShardTarget(in);
+ from = in.readVInt();
+ size = in.readVInt();
+ topDocs = readTopDocs(in);
+ if (in.readBoolean()) {
+ facets = InternalFacets.readFacets(in);
+ }
+ if (in.readBoolean()) {
+ aggregations = InternalAggregations.readAggregations(in);
+ }
+ if (in.readBoolean()) {
+ suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in);
+ }
+ searchTimedOut = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+// shardTarget.writeTo(out);
+ out.writeVInt(from);
+ out.writeVInt(size);
+ writeTopDocs(out, topDocs, 0);
+ if (facets == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ facets.writeTo(out);
+ }
+ if (aggregations == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ aggregations.writeTo(out);
+ }
+ if (suggest == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ suggest.writeTo(out);
+ }
+ out.writeBoolean(searchTimedOut);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java b/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java
new file mode 100644
index 0000000..61b85b6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.search.SearchPhaseResult;
+
+/**
+ *
+ */
+public interface QuerySearchResultProvider extends SearchPhaseResult {
+
+ /**
+ * If both query and fetch happened on the same call.
+ */
+ boolean includeFetch();
+
+ QuerySearchResult queryResult();
+}
diff --git a/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java b/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java
new file mode 100644
index 0000000..ebb7615
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
+import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult;
+
+/**
+ *
+ */
+public class ScrollQuerySearchResult extends TransportResponse {
+
+ private QuerySearchResult queryResult;
+ private SearchShardTarget shardTarget;
+
+ public ScrollQuerySearchResult() {
+ }
+
+ public ScrollQuerySearchResult(QuerySearchResult queryResult, SearchShardTarget shardTarget) {
+ this.queryResult = queryResult;
+ this.shardTarget = shardTarget;
+ }
+
+ public QuerySearchResult queryResult() {
+ return queryResult;
+ }
+
+ public SearchShardTarget shardTarget() {
+ return shardTarget;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardTarget = readSearchShardTarget(in);
+ queryResult = readQuerySearchResult(in);
+ queryResult.shardTarget(shardTarget);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardTarget.writeTo(out);
+ queryResult.writeTo(out);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/query/SizeParseElement.java b/src/main/java/org/elasticsearch/search/query/SizeParseElement.java
new file mode 100644
index 0000000..b729ea4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/SizeParseElement.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class SizeParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ if (token.isValue()) {
+ int size = parser.intValue();
+ if (size < 0) {
+ throw new SearchParseException(context, "size is set to [" + size + "] and is expected to be higher or equal to 0");
+ }
+ context.size(size);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/query/TimeoutParseElement.java b/src/main/java/org/elasticsearch/search/query/TimeoutParseElement.java
new file mode 100644
index 0000000..d5b0420
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/query/TimeoutParseElement.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ */
+public class TimeoutParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.VALUE_NUMBER) {
+ context.timeoutInMillis(parser.longValue());
+ } else {
+ context.timeoutInMillis(TimeValue.parseTimeValue(parser.text(), null).millis());
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java
new file mode 100644
index 0000000..b8e26a8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java
@@ -0,0 +1,417 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.rescore;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.IntroSorter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+public final class QueryRescorer implements Rescorer {
+
+ private static enum ScoreMode {
+ Avg {
+ @Override
+ public float combine(float primary, float secondary) {
+ return (primary + secondary) / 2;
+ }
+
+ @Override
+ public String toString() {
+ return "avg";
+ }
+ },
+ Max {
+ @Override
+ public float combine(float primary, float secondary) {
+ return Math.max(primary, secondary);
+ }
+
+ @Override
+ public String toString() {
+ return "max";
+ }
+ },
+ Min {
+ @Override
+ public float combine(float primary, float secondary) {
+ return Math.min(primary, secondary);
+ }
+
+ @Override
+ public String toString() {
+ return "min";
+ }
+ },
+ Total {
+ @Override
+ public float combine(float primary, float secondary) {
+ return primary + secondary;
+ }
+
+ @Override
+ public String toString() {
+ return "sum";
+ }
+ },
+ Multiply {
+ @Override
+ public float combine(float primary, float secondary) {
+ return primary * secondary;
+ }
+
+ @Override
+ public String toString() {
+ return "product";
+ }
+ };
+
+ public abstract float combine(float primary, float secondary);
+ }
+
+ public static final Rescorer INSTANCE = new QueryRescorer();
+ public static final String NAME = "query";
+
+ @Override
+ public String name() {
+ return NAME;
+ }
+
+ @Override
+ public void rescore(TopDocs topDocs, SearchContext context, RescoreSearchContext rescoreContext) throws IOException {
+ assert rescoreContext != null;
+ QueryRescoreContext rescore = ((QueryRescoreContext) rescoreContext);
+ TopDocs queryTopDocs = context.queryResult().topDocs();
+ if (queryTopDocs == null || queryTopDocs.totalHits == 0 || queryTopDocs.scoreDocs.length == 0) {
+ return;
+ }
+
+ ContextIndexSearcher searcher = context.searcher();
+ topDocs = searcher.search(rescore.query(), new TopDocsFilter(queryTopDocs), queryTopDocs.scoreDocs.length);
+ context.queryResult().topDocs(merge(queryTopDocs, topDocs, rescore));
+ }
+
+ @Override
+ public Explanation explain(int topLevelDocId, SearchContext context, RescoreSearchContext rescoreContext) throws IOException {
+ QueryRescoreContext rescore = ((QueryRescoreContext) context.rescore());
+ ContextIndexSearcher searcher = context.searcher();
+ Explanation primaryExplain = searcher.explain(context.query(), topLevelDocId);
+ if (primaryExplain == null) {
+ // this should not happen but just in case
+ return new ComplexExplanation(false, 0.0f, "nothing matched");
+ }
+ Explanation rescoreExplain = searcher.explain(rescore.query(), topLevelDocId);
+ float primaryWeight = rescore.queryWeight();
+ ComplexExplanation prim = new ComplexExplanation(primaryExplain.isMatch(),
+ primaryExplain.getValue() * primaryWeight,
+ "product of:");
+ prim.addDetail(primaryExplain);
+ prim.addDetail(new Explanation(primaryWeight, "primaryWeight"));
+ if (rescoreExplain != null && rescoreExplain.isMatch()) {
+ float secondaryWeight = rescore.rescoreQueryWeight();
+ ComplexExplanation sec = new ComplexExplanation(rescoreExplain.isMatch(),
+ rescoreExplain.getValue() * secondaryWeight,
+ "product of:");
+ sec.addDetail(rescoreExplain);
+ sec.addDetail(new Explanation(secondaryWeight, "secondaryWeight"));
+ ScoreMode scoreMode = rescore.scoreMode();
+ ComplexExplanation calcExpl = new ComplexExplanation();
+ calcExpl.setDescription(scoreMode + " of:");
+ calcExpl.addDetail(prim);
+ calcExpl.setMatch(prim.isMatch());
+ calcExpl.addDetail(sec);
+ calcExpl.setValue(scoreMode.combine(prim.getValue(), sec.getValue()));
+ return calcExpl;
+ } else {
+ return prim;
+ }
+ }
+
+ @Override
+ public RescoreSearchContext parse(XContentParser parser, SearchContext context) throws IOException {
+ Token token;
+ String fieldName = null;
+ QueryRescoreContext rescoreContext = new QueryRescoreContext(this);
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ if ("rescore_query".equals(fieldName)) {
+ ParsedQuery parsedQuery = context.queryParserService().parse(parser);
+ rescoreContext.setParsedQuery(parsedQuery);
+ }
+ } else if (token.isValue()) {
+ if ("query_weight".equals(fieldName)) {
+ rescoreContext.setQueryWeight(parser.floatValue());
+ } else if ("rescore_query_weight".equals(fieldName)) {
+ rescoreContext.setRescoreQueryWeight(parser.floatValue());
+ } else if ("score_mode".equals(fieldName)) {
+ String sScoreMode = parser.text();
+ if ("avg".equals(sScoreMode)) {
+ rescoreContext.setScoreMode(ScoreMode.Avg);
+ } else if ("max".equals(sScoreMode)) {
+ rescoreContext.setScoreMode(ScoreMode.Max);
+ } else if ("min".equals(sScoreMode)) {
+ rescoreContext.setScoreMode(ScoreMode.Min);
+ } else if ("total".equals(sScoreMode)) {
+ rescoreContext.setScoreMode(ScoreMode.Total);
+ } else if ("multiply".equals(sScoreMode)) {
+ rescoreContext.setScoreMode(ScoreMode.Multiply);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("[rescore] illegal score_mode [" + sScoreMode + "]");
+ }
+ } else {
+ throw new ElasticsearchIllegalArgumentException("rescore doesn't support [" + fieldName + "]");
+ }
+ }
+ }
+ return rescoreContext;
+ }
+
+ public static class QueryRescoreContext extends RescoreSearchContext {
+
+ public QueryRescoreContext(QueryRescorer rescorer) {
+ super(NAME, 10, rescorer);
+ this.scoreMode = ScoreMode.Total;
+ }
+
+ private ParsedQuery parsedQuery;
+ private float queryWeight = 1.0f;
+ private float rescoreQueryWeight = 1.0f;
+ private ScoreMode scoreMode;
+
+ public void setParsedQuery(ParsedQuery parsedQuery) {
+ this.parsedQuery = parsedQuery;
+ }
+
+ public Query query() {
+ return parsedQuery.query();
+ }
+
+ public float queryWeight() {
+ return queryWeight;
+ }
+
+ public float rescoreQueryWeight() {
+ return rescoreQueryWeight;
+ }
+
+ public ScoreMode scoreMode() {
+ return scoreMode;
+ }
+
+ public void setRescoreQueryWeight(float rescoreQueryWeight) {
+ this.rescoreQueryWeight = rescoreQueryWeight;
+ }
+
+ public void setQueryWeight(float queryWeight) {
+ this.queryWeight = queryWeight;
+ }
+
+ public void setScoreMode(ScoreMode scoreMode) {
+ this.scoreMode = scoreMode;
+ }
+
+ }
+
+
+ private TopDocs merge(TopDocs primary, TopDocs secondary, QueryRescoreContext context) {
+ DocIdSorter sorter = new DocIdSorter();
+ sorter.array = primary.scoreDocs;
+ sorter.sort(0, sorter.array.length);
+ ScoreDoc[] primaryDocs = sorter.array;
+ sorter.array = secondary.scoreDocs;
+ sorter.sort(0, sorter.array.length);
+ ScoreDoc[] secondaryDocs = sorter.array;
+ int j = 0;
+ float primaryWeight = context.queryWeight();
+ float secondaryWeight = context.rescoreQueryWeight();
+ ScoreMode scoreMode = context.scoreMode();
+ for (int i = 0; i < primaryDocs.length; i++) {
+ if (j < secondaryDocs.length && primaryDocs[i].doc == secondaryDocs[j].doc) {
+ primaryDocs[i].score = scoreMode.combine(primaryDocs[i].score * primaryWeight, secondaryDocs[j++].score * secondaryWeight);
+ } else {
+ primaryDocs[i].score *= primaryWeight;
+ }
+ }
+ ScoreSorter scoreSorter = new ScoreSorter();
+ scoreSorter.array = primaryDocs;
+ scoreSorter.sort(0, primaryDocs.length);
+ primary.setMaxScore(primaryDocs[0].score);
+ return primary;
+ }
+
+ private static final class DocIdSorter extends IntroSorter {
+ private ScoreDoc[] array;
+ private ScoreDoc pivot;
+
+ @Override
+ protected void swap(int i, int j) {
+ ScoreDoc scoreDoc = array[i];
+ array[i] = array[j];
+ array[j] = scoreDoc;
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ return compareDocId(array[i], array[j]);
+ }
+
+ @Override
+ protected void setPivot(int i) {
+ pivot = array[i];
+
+ }
+
+ @Override
+ protected int comparePivot(int j) {
+ return compareDocId(pivot, array[j]);
+ }
+
+ }
+
+ private static final int compareDocId(ScoreDoc left, ScoreDoc right) {
+ if (left.doc < right.doc) {
+ return 1;
+ } else if (left.doc == right.doc) {
+ return 0;
+ }
+ return -1;
+ }
+
+ private static final class ScoreSorter extends IntroSorter {
+ private ScoreDoc[] array;
+ private ScoreDoc pivot;
+
+ @Override
+ protected void swap(int i, int j) {
+ ScoreDoc scoreDoc = array[i];
+ array[i] = array[j];
+ array[j] = scoreDoc;
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ int cmp = Float.compare(array[j].score, array[i].score);
+ return cmp == 0 ? compareDocId(array[i], array[j]) : cmp;
+ }
+
+ @Override
+ protected void setPivot(int i) {
+ pivot = array[i];
+
+ }
+
+ @Override
+ protected int comparePivot(int j) {
+ int cmp = Float.compare(array[j].score, pivot.score);
+ return cmp == 0 ? compareDocId(pivot, array[j]) : cmp;
+ }
+
+ }
+
+ private static final class TopDocsFilter extends Filter {
+
+ private final int[] docIds;
+
+ public TopDocsFilter(TopDocs topDocs) {
+ this.docIds = new int[topDocs.scoreDocs.length];
+ ScoreDoc[] scoreDocs = topDocs.scoreDocs;
+ for (int i = 0; i < scoreDocs.length; i++) {
+ docIds[i] = scoreDocs[i].doc;
+ }
+ Arrays.sort(docIds);
+
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ final int docBase = context.docBase;
+ int limit = docBase + context.reader().maxDoc();
+ int offset = Arrays.binarySearch(docIds, docBase);
+ if (offset < 0) {
+ offset = (-offset) - 1;
+ }
+ int end = Arrays.binarySearch(docIds, limit);
+ if (end < 0) {
+ end = (-end) - 1;
+ }
+ final int start = offset;
+ final int stop = end;
+
+ return new DocIdSet() {
+
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ return new DocIdSetIterator() {
+ private int current = start;
+ private int docId = NO_MORE_DOCS;
+
+ @Override
+ public int nextDoc() throws IOException {
+ if (current < stop) {
+ return docId = docIds[current++] - docBase;
+ }
+ return docId = NO_MORE_DOCS;
+ }
+
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ if (target == NO_MORE_DOCS) {
+ current = stop;
+ return docId = NO_MORE_DOCS;
+ }
+ while (nextDoc() < target) {
+ }
+ return docId;
+ }
+
+ @Override
+ public long cost() {
+ return docIds.length;
+ }
+ };
+ }
+ };
+ }
+
+ }
+
+ @Override
+ public void extractTerms(SearchContext context, RescoreSearchContext rescoreContext, Set<Term> termsSet) {
+ ((QueryRescoreContext) context.rescore()).query().extractTerms(termsSet);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java b/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java
new file mode 100644
index 0000000..04af439
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.rescore;
+
+import java.io.IOException;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+
+public class RescoreBuilder implements ToXContent {
+
+ private Rescorer rescorer;
+ private Integer windowSize;
+
+ public static QueryRescorer queryRescorer(QueryBuilder queryBuilder) {
+ return new QueryRescorer(queryBuilder);
+ }
+
+ public RescoreBuilder rescorer(Rescorer rescorer) {
+ this.rescorer = rescorer;
+ return this;
+ }
+
+ public RescoreBuilder windowSize(int windowSize) {
+ this.windowSize = windowSize;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if (rescorer != null) {
+ builder.startObject("rescore");
+ if (windowSize != null) {
+ builder.field("window_size", windowSize);
+ }
+ rescorer.toXContent(builder, params);
+ builder.endObject();
+ }
+ return builder;
+ }
+
+ public static abstract class Rescorer implements ToXContent {
+
+ private String name;
+
+ public Rescorer(String name) {
+ this.name = name;
+ }
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ builder = innerToXContent(builder, params);
+ builder.endObject();
+ return builder;
+ }
+
+ protected abstract XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException;
+
+ }
+
+ public static class QueryRescorer extends Rescorer {
+ private static final String NAME = "query";
+ private QueryBuilder queryBuilder;
+ private Float rescoreQueryWeight;
+ private Float queryWeight;
+ private String scoreMode;
+
+ /**
+ * Creates a new {@link QueryRescorer} instance
+ * @param builder the query builder to build the rescore query from
+ */
+ public QueryRescorer(QueryBuilder builder) {
+ super(NAME);
+ this.queryBuilder = builder;
+ }
+ /**
+ * Sets the original query weight for rescoring. The default is <tt>1.0</tt>
+ */
+ public QueryRescorer setQueryWeight(float queryWeight) {
+ this.queryWeight = queryWeight;
+ return this;
+ }
+
+ /**
+ * Sets the original query weight for rescoring. The default is <tt>1.0</tt>
+ */
+ public QueryRescorer setRescoreQueryWeight(float rescoreQueryWeight) {
+ this.rescoreQueryWeight = rescoreQueryWeight;
+ return this;
+ }
+
+ /**
+ * Sets the original query score mode. The default is <tt>total</tt>
+ */
+ public QueryRescorer setScoreMode(String scoreMode) {
+ this.scoreMode = scoreMode;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("rescore_query", queryBuilder);
+ if (queryWeight != null) {
+ builder.field("query_weight", queryWeight);
+ }
+ if (rescoreQueryWeight != null) {
+ builder.field("rescore_query_weight", rescoreQueryWeight);
+ }
+ if (scoreMode != null) {
+ builder.field("score_mode", scoreMode);
+ }
+ return builder;
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java b/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java
new file mode 100644
index 0000000..a1d0e5d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.rescore;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class RescoreParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ String fieldName = null;
+ RescoreSearchContext rescoreContext = null;
+ Integer windowSize = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ if (QueryRescorer.NAME.equals(fieldName)) {
+ // we only have one at this point
+ Rescorer rescorer = QueryRescorer.INSTANCE;
+ token = parser.nextToken();
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new ElasticsearchParseException("rescore type malformed, must start with start_object");
+ }
+ rescoreContext = rescorer.parse(parser, context);
+ }
+ } else if (token.isValue()) {
+ if ("window_size".equals(fieldName)) {
+ windowSize = parser.intValue();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("rescore doesn't support [" + fieldName + "]");
+ }
+ }
+ }
+ if (rescoreContext == null) {
+ throw new ElasticsearchIllegalArgumentException("missing rescore type");
+ }
+ if (windowSize != null) {
+ rescoreContext.setWindowSize(windowSize.intValue());
+ }
+ context.rescore(rescoreContext);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java
new file mode 100644
index 0000000..baf4600
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.rescore;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchPhase;
+import org.elasticsearch.search.internal.SearchContext;
+
+import com.google.common.collect.ImmutableMap;
+
+/**
+ */
+public class RescorePhase extends AbstractComponent implements SearchPhase {
+
+ @Inject
+ public RescorePhase(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder();
+ parseElements.put("rescore", new RescoreParseElement());
+ return parseElements.build();
+ }
+
+ @Override
+ public void preProcess(SearchContext context) {
+ }
+
+ @Override
+ public void execute(SearchContext context) throws ElasticsearchException {
+ final RescoreSearchContext ctx = context.rescore();
+ final Rescorer rescorer = ctx.rescorer();
+ try {
+ rescorer.rescore(context.queryResult().topDocs(), context, ctx);
+ } catch (IOException e) {
+ throw new ElasticsearchException("Rescore Phase Failed", e);
+ }
+ }
+
+
+
+
+
+}
diff --git a/src/main/java/org/elasticsearch/search/rescore/RescoreSearchContext.java b/src/main/java/org/elasticsearch/search/rescore/RescoreSearchContext.java
new file mode 100644
index 0000000..6e3722f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/rescore/RescoreSearchContext.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.rescore;
+
+
+
+/**
+ */
+public class RescoreSearchContext {
+
+ private int windowSize;
+
+ private final String type;
+
+ private final Rescorer rescorer;
+
+ public RescoreSearchContext(String type, int windowSize, Rescorer rescorer) {
+ super();
+ this.type = type;
+ this.windowSize = windowSize;
+ this.rescorer = rescorer;
+ }
+
+ public Rescorer rescorer() {
+ return rescorer;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public void setWindowSize(int windowSize) {
+ this.windowSize = windowSize;
+ }
+
+ public int window() {
+ return windowSize;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/rescore/Rescorer.java b/src/main/java/org/elasticsearch/search/rescore/Rescorer.java
new file mode 100644
index 0000000..5ff6c56
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/rescore/Rescorer.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.rescore;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.TopDocs;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A query rescorer interface used to re-rank the Top-K results of a previously
+ * executed search.
+ */
+public interface Rescorer {
+
+ /**
+ * Returns the name of this rescorer
+ */
+ public String name();
+
+ /**
+ * Modifies the result of the previously executed search ({@link TopDocs})
+ * in place based on the given {@link RescoreSearchContext}.
+ *
+ * @param topDocs the result of the previously exectued search
+ * @param context the current {@link SearchContext}. This will never be <code>null</code>.
+ * @param rescoreContext the {@link RescoreSearchContext}. This will never be <code>null</code>
+ * @throws IOException if an {@link IOException} occurs during rescoring
+ */
+ public void rescore(TopDocs topDocs, SearchContext context, RescoreSearchContext rescoreContext) throws IOException;
+
+ /**
+ * Executes an {@link Explanation} phase on the rescorer.
+ *
+ * @param topLevelDocId the global / top-level document ID to explain
+ * @param context the current {@link SearchContext}
+ * @param rescoreContext TODO
+ * @return the explain for the given top level document ID.
+ * @throws IOException if an {@link IOException} occurs
+ */
+ public Explanation explain(int topLevelDocId, SearchContext context, RescoreSearchContext rescoreContext) throws IOException;
+
+ /**
+ * Parses the {@link RescoreSearchContext} for this impelementation
+ *
+ * @param parser the parser to read the context from
+ * @param context the current search context
+ * @return the parsed {@link RescoreSearchContext}
+ * @throws IOException if an {@link IOException} occurs while parsing the context
+ */
+ public RescoreSearchContext parse(XContentParser parser, SearchContext context) throws IOException;
+
+ /**
+ * Extracts all terms needed to exectue this {@link Rescorer}. This method
+ * is executed in a distributed frequency collection roundtrip for
+ * {@link SearchType#DFS_QUERY_AND_FETCH} and
+ * {@link SearchType#DFS_QUERY_THEN_FETCH}
+ */
+ public void extractTerms(SearchContext context, RescoreSearchContext rescoreContext, Set<Term> termsSet);
+
+ /*
+ * TODO: At this point we only have one implemenation which modifies the
+ * TopDocs given. Future implemenations might return actual resutls that
+ * contain information about the rescore context. For example a pair wise
+ * reranker might return the feature vector for the top N window in order to
+ * merge results on the callers side. For now we don't have a return type at
+ * all since something like this requires a more general refactoring how
+ * documents are merged since in such a case we don't really have a score
+ * per document rather a "X is more relevant than Y" relation
+ */
+
+}
diff --git a/src/main/java/org/elasticsearch/search/scan/ScanContext.java b/src/main/java/org/elasticsearch/search/scan/ScanContext.java
new file mode 100644
index 0000000..76d30ae
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/scan/ScanContext.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scan;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.common.lucene.docset.AllDocIdSet;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Map;
+
+/**
+ * The scan context allows to optimize readers we already processed during scanning. We do that by keeping track
+ * of the count per reader, and if we are done with it, we no longer process it by using a filter that returns
+ * null docIdSet for this reader.
+ */
+public class ScanContext {
+
+ private final Map<IndexReader, ReaderState> readerStates = Maps.newHashMap();
+
+ public void clear() {
+ readerStates.clear();
+ }
+
+ public TopDocs execute(SearchContext context) throws IOException {
+ ScanCollector collector = new ScanCollector(readerStates, context.from(), context.size(), context.trackScores());
+ Query query = new XFilteredQuery(context.query(), new ScanFilter(readerStates, collector));
+ try {
+ context.searcher().search(query, collector);
+ } catch (ScanCollector.StopCollectingException e) {
+ // all is well
+ }
+ return collector.topDocs();
+ }
+
+ static class ScanCollector extends Collector {
+
+ private final Map<IndexReader, ReaderState> readerStates;
+
+ private final int from;
+
+ private final int to;
+
+ private final ArrayList<ScoreDoc> docs;
+
+ private final boolean trackScores;
+
+ private Scorer scorer;
+
+ private int docBase;
+
+ private int counter;
+
+ private IndexReader currentReader;
+ private ReaderState readerState;
+
+ ScanCollector(Map<IndexReader, ReaderState> readerStates, int from, int size, boolean trackScores) {
+ this.readerStates = readerStates;
+ this.from = from;
+ this.to = from + size;
+ this.trackScores = trackScores;
+ this.docs = new ArrayList<ScoreDoc>(size);
+ }
+
+ void incCounter(int count) {
+ this.counter += count;
+ }
+
+ public TopDocs topDocs() {
+ return new TopDocs(docs.size(), docs.toArray(new ScoreDoc[docs.size()]), 0f);
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ this.scorer = scorer;
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ if (counter >= from) {
+ docs.add(new ScoreDoc(docBase + doc, trackScores ? scorer.score() : 0f));
+ }
+ readerState.count++;
+ counter++;
+ if (counter >= to) {
+ throw StopCollectingException;
+ }
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ // if we have a reader state, and we haven't registered one already, register it
+ // we need to check in readersState since even when the filter return null, setNextReader is still
+ // called for that reader (before)
+ if (currentReader != null && !readerStates.containsKey(currentReader)) {
+ assert readerState != null;
+ readerState.done = true;
+ readerStates.put(currentReader, readerState);
+ }
+ this.currentReader = context.reader();
+ this.docBase = context.docBase;
+ this.readerState = new ReaderState();
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ public static final RuntimeException StopCollectingException = new StopCollectingException();
+
+ static class StopCollectingException extends RuntimeException {
+ @Override
+ public Throwable fillInStackTrace() {
+ return null;
+ }
+ }
+ }
+
+ public static class ScanFilter extends Filter {
+
+ private final Map<IndexReader, ReaderState> readerStates;
+
+ private final ScanCollector scanCollector;
+
+ public ScanFilter(Map<IndexReader, ReaderState> readerStates, ScanCollector scanCollector) {
+ this.readerStates = readerStates;
+ this.scanCollector = scanCollector;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptedDocs) throws IOException {
+ ReaderState readerState = readerStates.get(context.reader());
+ if (readerState != null && readerState.done) {
+ scanCollector.incCounter(readerState.count);
+ return null;
+ }
+ return new AllDocIdSet(context.reader().maxDoc());
+ }
+ }
+
+ static class ReaderState {
+ public int count;
+ public boolean done;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
new file mode 100644
index 0000000..9e4e4a6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+import java.io.IOException;
+
+/**
+ * A sort builder to sort based on a document field.
+ */
+public class FieldSortBuilder extends SortBuilder {
+
+ private final String fieldName;
+
+ private SortOrder order;
+
+ private Object missing;
+
+ private Boolean ignoreUnampped;
+
+ private String sortMode;
+
+ private FilterBuilder nestedFilter;
+
+ private String nestedPath;
+
+ /**
+ * Constructs a new sort based on a document field.
+ *
+ * @param fieldName The field name.
+ */
+ public FieldSortBuilder(String fieldName) {
+ if (fieldName == null) {
+ throw new ElasticsearchIllegalArgumentException("fieldName must not be null");
+ }
+ this.fieldName = fieldName;
+ }
+
+ /**
+ * The order of sorting. Defaults to {@link SortOrder#ASC}.
+ */
+ @Override
+ public FieldSortBuilder order(SortOrder order) {
+ this.order = order;
+ return this;
+ }
+
+ /**
+ * Sets the value when a field is missing in a doc. Can also be set to <tt>_last</tt> or
+ * <tt>_first</tt> to sort missing last or first respectively.
+ */
+ @Override
+ public FieldSortBuilder missing(Object missing) {
+ this.missing = missing;
+ return this;
+ }
+
+ /**
+ * Sets if the field does not exists in the index, it should be ignored and not sorted by or not. Defaults
+ * to <tt>false</tt> (not ignoring).
+ */
+ public FieldSortBuilder ignoreUnmapped(boolean ignoreUnmapped) {
+ this.ignoreUnampped = ignoreUnmapped;
+ return this;
+ }
+
+ /**
+ * Defines what values to pick in the case a document contains multiple values for the targeted sort field.
+ * Possible values: min, max, sum and avg
+ * <p/>
+ * The last two values are only applicable for number based fields.
+ */
+ public FieldSortBuilder sortMode(String sortMode) {
+ this.sortMode = sortMode;
+ return this;
+ }
+
+ /**
+ * Sets the nested filter that the nested objects should match with in order to be taken into account
+ * for sorting.
+ */
+ public FieldSortBuilder setNestedFilter(FilterBuilder nestedFilter) {
+ this.nestedFilter = nestedFilter;
+ return this;
+ }
+
+
+ /**
+ * Sets the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a
+ * field inside a nested object, the nearest upper nested object is selected as nested path.
+ */
+ public FieldSortBuilder setNestedPath(String nestedPath) {
+ this.nestedPath = nestedPath;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(fieldName);
+ if (order != null) {
+ builder.field("order", order.toString());
+ }
+ if (missing != null) {
+ builder.field("missing", missing);
+ }
+ if (ignoreUnampped != null) {
+ builder.field("ignore_unmapped", ignoreUnampped);
+ }
+ if (sortMode != null) {
+ builder.field("mode", sortMode);
+ }
+ if (nestedFilter != null) {
+ builder.field("nested_filter", nestedFilter, params);
+ }
+ if (nestedPath != null) {
+ builder.field("nested_path", nestedPath);
+ }
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
new file mode 100644
index 0000000..d99e715
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+
+import java.io.IOException;
+import java.util.Locale;
+
+/**
+ * A geo distance based sorting on a geo point like field.
+ */
+public class GeoDistanceSortBuilder extends SortBuilder {
+
+ final String fieldName;
+
+ private double lat;
+ private double lon;
+ private String geohash;
+
+ private GeoDistance geoDistance;
+ private DistanceUnit unit;
+ private SortOrder order;
+ private String sortMode;
+ private FilterBuilder nestedFilter;
+ private String nestedPath;
+
+ /**
+ * Constructs a new distance based sort on a geo point like field.
+ *
+ * @param fieldName The geo point like field name.
+ */
+ public GeoDistanceSortBuilder(String fieldName) {
+ this.fieldName = fieldName;
+ }
+
+ /**
+ * The point to create the range distance facets from.
+ *
+ * @param lat latitude.
+ * @param lon longitude.
+ */
+ public GeoDistanceSortBuilder point(double lat, double lon) {
+ this.lat = lat;
+ this.lon = lon;
+ return this;
+ }
+
+ /**
+ * The geohash of the geo point to create the range distance facets from.
+ */
+ public GeoDistanceSortBuilder geohash(String geohash) {
+ this.geohash = geohash;
+ return this;
+ }
+
+ /**
+ * The geo distance type used to compute the distance.
+ */
+ public GeoDistanceSortBuilder geoDistance(GeoDistance geoDistance) {
+ this.geoDistance = geoDistance;
+ return this;
+ }
+
+ /**
+ * The distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#KILOMETERS}
+ */
+ public GeoDistanceSortBuilder unit(DistanceUnit unit) {
+ this.unit = unit;
+ return this;
+ }
+
+ /**
+ * The order of sorting. Defaults to {@link SortOrder#ASC}.
+ */
+ @Override
+ public GeoDistanceSortBuilder order(SortOrder order) {
+ this.order = order;
+ return this;
+ }
+
+ /**
+ * Not relevant.
+ */
+ @Override
+ public SortBuilder missing(Object missing) {
+ return this;
+ }
+
+ /**
+ * Defines which distance to use for sorting in the case a document contains multiple geo points.
+ * Possible values: min and max
+ */
+ public GeoDistanceSortBuilder sortMode(String sortMode) {
+ this.sortMode = sortMode;
+ return this;
+ }
+
+ /**
+ * Sets the nested filter that the nested objects should match with in order to be taken into account
+ * for sorting.
+ */
+ public GeoDistanceSortBuilder setNestedFilter(FilterBuilder nestedFilter) {
+ this.nestedFilter = nestedFilter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a
+ * field inside a nested object, the nearest upper nested object is selected as nested path.
+ */
+ public GeoDistanceSortBuilder setNestedPath(String nestedPath) {
+ this.nestedPath = nestedPath;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("_geo_distance");
+
+ if (geohash != null) {
+ builder.field(fieldName, geohash);
+ } else {
+ builder.startArray(fieldName).value(lon).value(lat).endArray();
+ }
+
+ if (unit != null) {
+ builder.field("unit", unit);
+ }
+ if (geoDistance != null) {
+ builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT));
+ }
+ if (order == SortOrder.DESC) {
+ builder.field("reverse", true);
+ }
+ if (sortMode != null) {
+ builder.field("mode", sortMode);
+ }
+
+ if (nestedPath != null) {
+ builder.field("nested_path", nestedPath);
+ }
+ if (nestedFilter != null) {
+ builder.field("nested_filter", nestedFilter, params);
+ }
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java
new file mode 100644
index 0000000..49b1d8a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.SortField;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.GeoDistanceComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.ObjectMappers;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.index.search.nested.NestedFieldComparatorSource;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class GeoDistanceSortParser implements SortParser {
+
+ @Override
+ public String[] names() {
+ return new String[]{"_geo_distance", "_geoDistance"};
+ }
+
+ @Override
+ public SortField parse(XContentParser parser, SearchContext context) throws Exception {
+ String fieldName = null;
+ GeoPoint point = new GeoPoint();
+ DistanceUnit unit = DistanceUnit.DEFAULT;
+ GeoDistance geoDistance = GeoDistance.DEFAULT;
+ boolean reverse = false;
+ SortMode sortMode = null;
+ String nestedPath = null;
+ Filter nestedFilter = null;
+
+ boolean normalizeLon = true;
+ boolean normalizeLat = true;
+
+ XContentParser.Token token;
+ String currentName = parser.currentName();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ GeoPoint.parse(parser, point);
+ fieldName = currentName;
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ // the json in the format of -> field : { lat : 30, lon : 12 }
+ if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) {
+ ParsedFilter parsedFilter = context.queryParserService().parseInnerFilter(parser);
+ nestedFilter = parsedFilter == null ? null : parsedFilter.filter();
+ } else {
+ fieldName = currentName;
+ GeoPoint.parse(parser, point);
+ }
+ } else if (token.isValue()) {
+ if ("reverse".equals(currentName)) {
+ reverse = parser.booleanValue();
+ } else if ("order".equals(currentName)) {
+ reverse = "desc".equals(parser.text());
+ } else if (currentName.equals("unit")) {
+ unit = DistanceUnit.fromString(parser.text());
+ } else if (currentName.equals("distance_type") || currentName.equals("distanceType")) {
+ geoDistance = GeoDistance.fromString(parser.text());
+ } else if ("normalize".equals(currentName)) {
+ normalizeLat = parser.booleanValue();
+ normalizeLon = parser.booleanValue();
+ } else if ("sort_mode".equals(currentName) || "sortMode".equals(currentName) || "mode".equals(currentName)) {
+ sortMode = SortMode.fromString(parser.text());
+ } else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) {
+ nestedPath = parser.text();
+ } else {
+ point.resetFromString(parser.text());
+ fieldName = currentName;
+ }
+ }
+ }
+
+ if (normalizeLat || normalizeLon) {
+ GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
+ }
+
+ if (sortMode == null) {
+ sortMode = reverse ? SortMode.MAX : SortMode.MIN;
+ }
+
+ if (sortMode == SortMode.SUM) {
+ throw new ElasticsearchIllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance");
+ }
+
+ FieldMapper mapper = context.smartNameFieldMapper(fieldName);
+ if (mapper == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort");
+ }
+ IndexGeoPointFieldData indexFieldData = context.fieldData().getForField(mapper);
+
+ IndexFieldData.XFieldComparatorSource geoDistanceComparatorSource = new GeoDistanceComparatorSource(
+ indexFieldData, point.lat(), point.lon(), unit, geoDistance, sortMode
+ );
+ ObjectMapper objectMapper;
+ if (nestedPath != null) {
+ ObjectMappers objectMappers = context.mapperService().objectMapper(nestedPath);
+ if (objectMappers == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find nested object mapping for explicit nested path [" + nestedPath + "]");
+ }
+ objectMapper = objectMappers.mapper();
+ if (!objectMapper.nested().isNested()) {
+ throw new ElasticsearchIllegalArgumentException("mapping for explicit nested path is not mapped as nested: [" + nestedPath + "]");
+ }
+ } else {
+ objectMapper = context.mapperService().resolveClosestNestedObjectMapper(fieldName);
+ }
+ if (objectMapper != null && objectMapper.nested().isNested()) {
+ Filter rootDocumentsFilter = context.filterCache().cache(NonNestedDocsFilter.INSTANCE);
+ Filter innerDocumentsFilter;
+ if (nestedFilter != null) {
+ innerDocumentsFilter = context.filterCache().cache(nestedFilter);
+ } else {
+ innerDocumentsFilter = context.filterCache().cache(objectMapper.nestedTypeFilter());
+ }
+ geoDistanceComparatorSource = new NestedFieldComparatorSource(
+ sortMode, geoDistanceComparatorSource, rootDocumentsFilter, innerDocumentsFilter
+ );
+ }
+
+ return new SortField(fieldName, geoDistanceComparatorSource, reverse);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java
new file mode 100644
index 0000000..7435ff9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A sort builder allowing to sort by score.
+ *
+ *
+ */
+public class ScoreSortBuilder extends SortBuilder {
+
+ private SortOrder order;
+
+ /**
+ * The order of sort scoring. By default, its {@link SortOrder#DESC}.
+ */
+ @Override
+ public ScoreSortBuilder order(SortOrder order) {
+ this.order = order;
+ return this;
+ }
+
+ @Override
+ public SortBuilder missing(Object missing) {
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("_score");
+ if (order == SortOrder.ASC) {
+ builder.field("reverse", true);
+ }
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
new file mode 100644
index 0000000..9798f94
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Script sort builder allows to sort based on a custom script expression.
+ */
+public class ScriptSortBuilder extends SortBuilder {
+
+ private String lang;
+
+ private final String script;
+
+ private final String type;
+
+ private SortOrder order;
+
+ private Map<String, Object> params;
+
+ private String sortMode;
+
+ private FilterBuilder nestedFilter;
+
+ private String nestedPath;
+
+ /**
+ * Constructs a script sort builder with the script and the type.
+ *
+ * @param script The script to use.
+ * @param type The type, can either be "string" or "number".
+ */
+ public ScriptSortBuilder(String script, String type) {
+ this.script = script;
+ this.type = type;
+ }
+
+ /**
+ * Adds a parameter to the script.
+ *
+ * @param name The name of the parameter.
+ * @param value The value of the parameter.
+ */
+ public ScriptSortBuilder param(String name, Object value) {
+ if (params == null) {
+ params = Maps.newHashMap();
+ }
+ params.put(name, value);
+ return this;
+ }
+
+ /**
+ * Sets parameters for the script.
+ *
+ * @param params The script parameters
+ */
+ public ScriptSortBuilder setParams(Map<String, Object> params) {
+ this.params = params;
+ return this;
+ }
+
+ /**
+ * Sets the sort order.
+ */
+ @Override
+ public ScriptSortBuilder order(SortOrder order) {
+ this.order = order;
+ return this;
+ }
+
+ /**
+ * Not really relevant.
+ */
+ @Override
+ public SortBuilder missing(Object missing) {
+ return this;
+ }
+
+ /**
+ * The language of the script.
+ */
+ public ScriptSortBuilder lang(String lang) {
+ this.lang = lang;
+ return this;
+ }
+
+ /**
+ * Defines which distance to use for sorting in the case a document contains multiple geo points.
+ * Possible values: min and max
+ */
+ public ScriptSortBuilder sortMode(String sortMode) {
+ this.sortMode = sortMode;
+ return this;
+ }
+
+ /**
+ * Sets the nested filter that the nested objects should match with in order to be taken into account
+ * for sorting.
+ */
+ public ScriptSortBuilder setNestedFilter(FilterBuilder nestedFilter) {
+ this.nestedFilter = nestedFilter;
+ return this;
+ }
+
+ /**
+ * Sets the nested path if sorting occurs on a field that is inside a nested object. For sorting by script this
+ * needs to be specified.
+ */
+ public ScriptSortBuilder setNestedPath(String nestedPath) {
+ this.nestedPath = nestedPath;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("_script");
+ builder.field("script", script);
+ builder.field("type", type);
+ if (order == SortOrder.DESC) {
+ builder.field("reverse", true);
+ }
+ if (lang != null) {
+ builder.field("lang", lang);
+ }
+ if (this.params != null) {
+ builder.field("params", this.params);
+ }
+ if (sortMode != null) {
+ builder.field("mode", sortMode);
+ }
+ if (nestedPath != null) {
+ builder.field("nested_path", nestedPath);
+ }
+ if (nestedFilter != null) {
+ builder.field("nested_filter", nestedFilter, params);
+ }
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java
new file mode 100644
index 0000000..19095bd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.SortField;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.DoubleScriptDataComparator;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.fieldcomparator.StringScriptDataComparator;
+import org.elasticsearch.index.mapper.ObjectMappers;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.index.search.nested.NestedFieldComparatorSource;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class ScriptSortParser implements SortParser {
+
+ @Override
+ public String[] names() {
+ return new String[]{"_script"};
+ }
+
+ @Override
+ public SortField parse(XContentParser parser, SearchContext context) throws Exception {
+ String script = null;
+ String scriptLang = null;
+ String type = null;
+ Map<String, Object> params = null;
+ boolean reverse = false;
+ SortMode sortMode = null;
+ String nestedPath = null;
+ Filter nestedFilter = null;
+
+ XContentParser.Token token;
+ String currentName = parser.currentName();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentName)) {
+ params = parser.map();
+ } else if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) {
+ ParsedFilter parsedFilter = context.queryParserService().parseInnerFilter(parser);
+ nestedFilter = parsedFilter == null ? null : parsedFilter.filter();
+ }
+ } else if (token.isValue()) {
+ if ("reverse".equals(currentName)) {
+ reverse = parser.booleanValue();
+ } else if ("order".equals(currentName)) {
+ reverse = "desc".equals(parser.text());
+ } else if ("script".equals(currentName)) {
+ script = parser.text();
+ } else if ("type".equals(currentName)) {
+ type = parser.text();
+ } else if ("lang".equals(currentName)) {
+ scriptLang = parser.text();
+ } else if ("mode".equals(currentName)) {
+ sortMode = SortMode.fromString(parser.text());
+ } else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) {
+ nestedPath = parser.text();
+ }
+ }
+ }
+
+ if (script == null) {
+ throw new SearchParseException(context, "_script sorting requires setting the script to sort by");
+ }
+ if (type == null) {
+ throw new SearchParseException(context, "_script sorting requires setting the type of the script");
+ }
+ SearchScript searchScript = context.scriptService().search(context.lookup(), scriptLang, script, params);
+ IndexFieldData.XFieldComparatorSource fieldComparatorSource;
+ if ("string".equals(type)) {
+ fieldComparatorSource = StringScriptDataComparator.comparatorSource(searchScript);
+ } else if ("number".equals(type)) {
+ fieldComparatorSource = DoubleScriptDataComparator.comparatorSource(searchScript);
+ } else {
+ throw new SearchParseException(context, "custom script sort type [" + type + "] not supported");
+ }
+
+ if ("string".equals(type) && (sortMode == SortMode.SUM || sortMode == SortMode.AVG)) {
+ throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]");
+ }
+
+ if (sortMode == null) {
+ sortMode = reverse ? SortMode.MAX : SortMode.MIN;
+ }
+
+ // If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource`
+ ObjectMapper objectMapper;
+ if (nestedPath != null) {
+ ObjectMappers objectMappers = context.mapperService().objectMapper(nestedPath);
+ if (objectMappers == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find nested object mapping for explicit nested path [" + nestedPath + "]");
+ }
+ objectMapper = objectMappers.mapper();
+ if (!objectMapper.nested().isNested()) {
+ throw new ElasticsearchIllegalArgumentException("mapping for explicit nested path is not mapped as nested: [" + nestedPath + "]");
+ }
+
+ Filter rootDocumentsFilter = context.filterCache().cache(NonNestedDocsFilter.INSTANCE);
+ Filter innerDocumentsFilter;
+ if (nestedFilter != null) {
+ innerDocumentsFilter = context.filterCache().cache(nestedFilter);
+ } else {
+ innerDocumentsFilter = context.filterCache().cache(objectMapper.nestedTypeFilter());
+ }
+ fieldComparatorSource = new NestedFieldComparatorSource(sortMode, fieldComparatorSource, rootDocumentsFilter, innerDocumentsFilter);
+ }
+
+ return new SortField("_script", fieldComparatorSource, reverse);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/src/main/java/org/elasticsearch/search/sort/SortBuilder.java
new file mode 100644
index 0000000..58f57fc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/SortBuilder.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+
+/**
+ *
+ */
+public abstract class SortBuilder implements ToXContent {
+
+ /**
+ * The order of sorting. Defaults to {@link SortOrder#ASC}.
+ */
+ public abstract SortBuilder order(SortOrder order);
+
+ /**
+ * Sets the value when a field is missing in a doc. Can also be set to <tt>_last</tt> or
+ * <tt>_first</tt> to sort missing last or first respectively.
+ */
+ public abstract SortBuilder missing(Object missing);
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/SortBuilders.java b/src/main/java/org/elasticsearch/search/sort/SortBuilders.java
new file mode 100644
index 0000000..ea5f31f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/SortBuilders.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+/**
+ * A set of static factory methods for {@link SortBuilder}s.
+ *
+ *
+ */
+public class SortBuilders {
+
+ /**
+ * Constructs a new score sort.
+ */
+ public static ScoreSortBuilder scoreSort() {
+ return new ScoreSortBuilder();
+ }
+
+ /**
+ * Constructs a new field based sort.
+ *
+ * @param field The field name.
+ */
+ public static FieldSortBuilder fieldSort(String field) {
+ return new FieldSortBuilder(field);
+ }
+
+ /**
+ * Constructs a new script based sort.
+ *
+ * @param script The script to use.
+ * @param type The type, can either be "string" or "number".
+ */
+ public static ScriptSortBuilder scriptSort(String script, String type) {
+ return new ScriptSortBuilder(script, type);
+ }
+
+ /**
+ * A geo distance based sort.
+ *
+ * @param fieldName The geo point like field name.
+ */
+ public static GeoDistanceSortBuilder geoDistanceSort(String fieldName) {
+ return new GeoDistanceSortBuilder(fieldName);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/SortOrder.java b/src/main/java/org/elasticsearch/search/sort/SortOrder.java
new file mode 100644
index 0000000..cb2bca2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/SortOrder.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+/**
+ * A sorting order.
+ *
+ *
+ */
+public enum SortOrder {
+ /**
+ * Ascending order.
+ */
+ ASC {
+ @Override
+ public String toString() {
+ return "asc";
+ }
+ },
+ /**
+ * Descending order.
+ */
+ DESC {
+ @Override
+ public String toString() {
+ return "desc";
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java
new file mode 100644
index 0000000..2017413
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.ObjectMappers;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.index.search.nested.NestedFieldComparatorSource;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchParseException;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class SortParseElement implements SearchParseElement {
+
+ public static final SortField SORT_SCORE = new SortField(null, SortField.Type.SCORE);
+ private static final SortField SORT_SCORE_REVERSE = new SortField(null, SortField.Type.SCORE, true);
+ private static final SortField SORT_DOC = new SortField(null, SortField.Type.DOC);
+ private static final SortField SORT_DOC_REVERSE = new SortField(null, SortField.Type.DOC, true);
+
+ public static final String SCORE_FIELD_NAME = "_score";
+ public static final String DOC_FIELD_NAME = "_doc";
+
+ private final ImmutableMap<String, SortParser> parsers;
+
+ public SortParseElement() {
+ ImmutableMap.Builder<String, SortParser> builder = ImmutableMap.builder();
+ addParser(builder, new ScriptSortParser());
+ addParser(builder, new GeoDistanceSortParser());
+ this.parsers = builder.build();
+ }
+
+ private void addParser(ImmutableMap.Builder<String, SortParser> parsers, SortParser parser) {
+ for (String name : parser.names()) {
+ parsers.put(name, parser);
+ }
+ }
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ List<SortField> sortFields = Lists.newArrayListWithCapacity(2);
+ if (token == XContentParser.Token.START_ARRAY) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.START_OBJECT) {
+ addCompoundSortField(parser, context, sortFields);
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ addSortField(context, sortFields, parser.text(), false, false, null, null, null, null);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("malformed sort format, within the sort array, an object, or an actual string are allowed");
+ }
+ }
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ addSortField(context, sortFields, parser.text(), false, false, null, null, null, null);
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ addCompoundSortField(parser, context, sortFields);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("malformed sort format, either start with array, object, or an actual string");
+ }
+ if (!sortFields.isEmpty()) {
+ // optimize if we just sort on score non reversed, we don't really need sorting
+ boolean sort;
+ if (sortFields.size() > 1) {
+ sort = true;
+ } else {
+ SortField sortField = sortFields.get(0);
+ if (sortField.getType() == SortField.Type.SCORE && !sortField.getReverse()) {
+ sort = false;
+ } else {
+ sort = true;
+ }
+ }
+ if (sort) {
+ context.sort(new Sort(sortFields.toArray(new SortField[sortFields.size()])));
+ }
+ }
+ }
+
+ private void addCompoundSortField(XContentParser parser, SearchContext context, List<SortField> sortFields) throws Exception {
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String fieldName = parser.currentName();
+ boolean reverse = false;
+ String missing = null;
+ String innerJsonName = null;
+ boolean ignoreUnmapped = false;
+ SortMode sortMode = null;
+ Filter nestedFilter = null;
+ String nestedPath = null;
+ token = parser.nextToken();
+ if (token == XContentParser.Token.VALUE_STRING) {
+ String direction = parser.text();
+ if (direction.equals("asc")) {
+ reverse = SCORE_FIELD_NAME.equals(fieldName);
+ } else if (direction.equals("desc")) {
+ reverse = !SCORE_FIELD_NAME.equals(fieldName);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("sort direction [" + fieldName + "] not supported");
+ }
+ addSortField(context, sortFields, fieldName, reverse, ignoreUnmapped, missing, sortMode, nestedPath, nestedFilter);
+ } else {
+ if (parsers.containsKey(fieldName)) {
+ sortFields.add(parsers.get(fieldName).parse(parser, context));
+ } else {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ innerJsonName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("reverse".equals(innerJsonName)) {
+ reverse = parser.booleanValue();
+ } else if ("order".equals(innerJsonName)) {
+ if ("asc".equals(parser.text())) {
+ reverse = SCORE_FIELD_NAME.equals(fieldName);
+ } else if ("desc".equals(parser.text())) {
+ reverse = !SCORE_FIELD_NAME.equals(fieldName);
+ }
+ } else if ("missing".equals(innerJsonName)) {
+ missing = parser.textOrNull();
+ } else if ("ignore_unmapped".equals(innerJsonName) || "ignoreUnmapped".equals(innerJsonName)) {
+ ignoreUnmapped = parser.booleanValue();
+ } else if ("mode".equals(innerJsonName)) {
+ sortMode = SortMode.fromString(parser.text());
+ } else if ("nested_path".equals(innerJsonName) || "nestedPath".equals(innerJsonName)) {
+ nestedPath = parser.text();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("sort option [" + innerJsonName + "] not supported");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) {
+ ParsedFilter parsedFilter = context.queryParserService().parseInnerFilter(parser);
+ nestedFilter = parsedFilter == null ? null : parsedFilter.filter();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("sort option [" + innerJsonName + "] not supported");
+ }
+ }
+ }
+ addSortField(context, sortFields, fieldName, reverse, ignoreUnmapped, missing, sortMode, nestedPath, nestedFilter);
+ }
+ }
+ }
+ }
+ }
+
+ private void addSortField(SearchContext context, List<SortField> sortFields, String fieldName, boolean reverse, boolean ignoreUnmapped, @Nullable final String missing, SortMode sortMode, String nestedPath, Filter nestedFilter) {
+ if (SCORE_FIELD_NAME.equals(fieldName)) {
+ if (reverse) {
+ sortFields.add(SORT_SCORE_REVERSE);
+ } else {
+ sortFields.add(SORT_SCORE);
+ }
+ } else if (DOC_FIELD_NAME.equals(fieldName)) {
+ if (reverse) {
+ sortFields.add(SORT_DOC_REVERSE);
+ } else {
+ sortFields.add(SORT_DOC);
+ }
+ } else {
+ FieldMapper fieldMapper = context.smartNameFieldMapper(fieldName);
+ if (fieldMapper == null) {
+ if (ignoreUnmapped) {
+ return;
+ }
+ throw new SearchParseException(context, "No mapping found for [" + fieldName + "] in order to sort on");
+ }
+
+ if (!fieldMapper.isSortable()) {
+ throw new SearchParseException(context, "Sorting not supported for field[" + fieldName + "]");
+ }
+
+ // Enable when we also know how to detect fields that do tokenize, but only emit one token
+ /*if (fieldMapper instanceof StringFieldMapper) {
+ StringFieldMapper stringFieldMapper = (StringFieldMapper) fieldMapper;
+ if (stringFieldMapper.fieldType().tokenized()) {
+ // Fail early
+ throw new SearchParseException(context, "Can't sort on tokenized string field[" + fieldName + "]");
+ }
+ }*/
+
+ // We only support AVG and SUM on number based fields
+ if (!(fieldMapper instanceof NumberFieldMapper) && (sortMode == SortMode.SUM || sortMode == SortMode.AVG)) {
+ sortMode = null;
+ }
+ if (sortMode == null) {
+ sortMode = resolveDefaultSortMode(reverse);
+ }
+
+ IndexFieldData.XFieldComparatorSource fieldComparatorSource = context.fieldData().getForField(fieldMapper)
+ .comparatorSource(missing, sortMode);
+ ObjectMapper objectMapper;
+ if (nestedPath != null) {
+ ObjectMappers objectMappers = context.mapperService().objectMapper(nestedPath);
+ if (objectMappers == null) {
+ throw new ElasticsearchIllegalArgumentException("failed to find nested object mapping for explicit nested path [" + nestedPath + "]");
+ }
+ objectMapper = objectMappers.mapper();
+ if (!objectMapper.nested().isNested()) {
+ throw new ElasticsearchIllegalArgumentException("mapping for explicit nested path is not mapped as nested: [" + nestedPath + "]");
+ }
+ } else {
+ objectMapper = context.mapperService().resolveClosestNestedObjectMapper(fieldName);
+ }
+ if (objectMapper != null && objectMapper.nested().isNested()) {
+ Filter rootDocumentsFilter = context.filterCache().cache(NonNestedDocsFilter.INSTANCE);
+ Filter innerDocumentsFilter;
+ if (nestedFilter != null) {
+ innerDocumentsFilter = context.filterCache().cache(nestedFilter);
+ } else {
+ innerDocumentsFilter = context.filterCache().cache(objectMapper.nestedTypeFilter());
+ }
+ fieldComparatorSource = new NestedFieldComparatorSource(sortMode, fieldComparatorSource, rootDocumentsFilter, innerDocumentsFilter);
+ }
+ sortFields.add(new SortField(fieldMapper.names().indexName(), fieldComparatorSource, reverse));
+ }
+ }
+
+ private static SortMode resolveDefaultSortMode(boolean reverse) {
+ return reverse ? SortMode.MAX : SortMode.MIN;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/SortParser.java b/src/main/java/org/elasticsearch/search/sort/SortParser.java
new file mode 100644
index 0000000..6383afd
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/SortParser.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+import org.apache.lucene.search.SortField;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public interface SortParser {
+
+ String[] names();
+
+ SortField parse(XContentParser parser, SearchContext context) throws Exception;
+}
diff --git a/src/main/java/org/elasticsearch/search/sort/TrackScoresParseElement.java b/src/main/java/org/elasticsearch/search/sort/TrackScoresParseElement.java
new file mode 100644
index 0000000..a2eaf54
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/sort/TrackScoresParseElement.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.sort;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+
+/**
+ *
+ */
+public class TrackScoresParseElement implements SearchParseElement {
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ XContentParser.Token token = parser.currentToken();
+ if (token.isValue()) {
+ context.trackScores(parser.booleanValue());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java b/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java
new file mode 100644
index 0000000..2b4687c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.search.spell.DirectSpellChecker;
+import org.apache.lucene.search.spell.StringDistance;
+import org.apache.lucene.search.spell.SuggestMode;
+import org.apache.lucene.util.automaton.LevenshteinAutomata;
+
+public class DirectSpellcheckerSettings {
+
+ private SuggestMode suggestMode = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX;
+ private float accuracy = 0.5f;
+ private Suggest.Suggestion.Sort sort = Suggest.Suggestion.Sort.SCORE;
+ private StringDistance stringDistance = DirectSpellChecker.INTERNAL_LEVENSHTEIN;
+ private int maxEdits = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE;
+ private int maxInspections = 5;
+ private float maxTermFreq = 0.01f;
+ private int prefixLength = 1;
+ private int minWordLength = 4;
+ private float minDocFreq = 0f;
+
+ public SuggestMode suggestMode() {
+ return suggestMode;
+ }
+
+ public void suggestMode(SuggestMode suggestMode) {
+ this.suggestMode = suggestMode;
+ }
+
+ public float accuracy() {
+ return accuracy;
+ }
+
+ public void accuracy(float accuracy) {
+ this.accuracy = accuracy;
+ }
+
+ public Suggest.Suggestion.Sort sort() {
+ return sort;
+ }
+
+ public void sort(Suggest.Suggestion.Sort sort) {
+ this.sort = sort;
+ }
+
+ public StringDistance stringDistance() {
+ return stringDistance;
+ }
+
+ public void stringDistance(StringDistance distance) {
+ this.stringDistance = distance;
+ }
+
+ public int maxEdits() {
+ return maxEdits;
+ }
+
+ public void maxEdits(int maxEdits) {
+ this.maxEdits = maxEdits;
+ }
+
+ public int maxInspections() {
+ return maxInspections;
+ }
+
+ public void maxInspections(int maxInspections) {
+ this.maxInspections = maxInspections;
+ }
+
+ public float maxTermFreq() {
+ return maxTermFreq;
+ }
+
+ public void maxTermFreq(float maxTermFreq) {
+ this.maxTermFreq = maxTermFreq;
+ }
+
+ public int prefixLength() {
+ return prefixLength;
+ }
+
+ public void prefixLength(int prefixLength) {
+ this.prefixLength = prefixLength;
+ }
+
+ public int minWordLength() {
+ return minWordLength;
+ }
+
+ public void minQueryLength(int minQueryLength) {
+ this.minWordLength = minQueryLength;
+ }
+
+ public float minDocFreq() {
+ return minDocFreq;
+ }
+
+ public void minDocFreq(float minDocFreq) {
+ this.minDocFreq = minDocFreq;
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/src/main/java/org/elasticsearch/search/suggest/Suggest.java
new file mode 100644
index 0000000..6eafdfe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/Suggest.java
@@ -0,0 +1,654 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestion;
+import org.elasticsearch.search.suggest.term.TermSuggestion;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * Top level suggest result, containing the result for each suggestion.
+ */
+public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? extends Option>>>, Streamable, ToXContent {
+
+ public static class Fields {
+ public static final XContentBuilderString SUGGEST = new XContentBuilderString("suggest");
+ }
+
+ private static final Comparator<Option> COMPARATOR = new Comparator<Suggest.Suggestion.Entry.Option>() {
+ @Override
+ public int compare(Option first, Option second) {
+ int cmp = Float.compare(second.getScore(), first.getScore());
+ if (cmp != 0) {
+ return cmp;
+ }
+ return first.getText().compareTo(second.getText());
+ }
+ };
+
+ private final XContentBuilderString name;
+
+ private List<Suggestion<? extends Entry<? extends Option>>> suggestions;
+
+ private Map<String, Suggestion<? extends Entry<? extends Option>>> suggestMap;
+
+ public Suggest() {
+ this.name = null;
+ }
+
+ public Suggest(XContentBuilderString name) {
+ this.name = name;
+ }
+
+ public Suggest(List<Suggestion<? extends Entry<? extends Option>>> suggestions) {
+ this(null, suggestions);
+ }
+
+ public Suggest(XContentBuilderString name, List<Suggestion<? extends Entry<? extends Option>>> suggestions) {
+ this.name = name;
+ this.suggestions = suggestions;
+ }
+
+ @Override
+ public Iterator<Suggestion<? extends Entry<? extends Option>>> iterator() {
+ return suggestions.iterator();
+ }
+
+ /**
+ * The number of suggestions in this {@link Suggest} result
+ */
+ public int size() {
+ return suggestions.size();
+ }
+
+ public <T extends Suggestion<? extends Entry<? extends Option>>> T getSuggestion(String name) {
+ if (suggestions.isEmpty() || name == null) {
+ return null;
+ } else if (suggestions.size() == 1) {
+ return (T) (name.equals(suggestions.get(0).name) ? suggestions.get(0) : null);
+ } else if (this.suggestMap == null) {
+ suggestMap = new HashMap<String, Suggestion<? extends Entry<? extends Option>>>();
+ for (Suggest.Suggestion<? extends Entry<? extends Option>> item : suggestions) {
+ suggestMap.put(item.getName(), item);
+ }
+ }
+ return (T) suggestMap.get(name);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ final int size = in.readVInt();
+ suggestions = new ArrayList<Suggestion<? extends Entry<? extends Option>>>(size);
+ for (int i = 0; i < size; i++) {
+ Suggestion<? extends Entry<? extends Option>> suggestion;
+ final int type = in.readVInt();
+ switch (type) {
+ case TermSuggestion.TYPE:
+ suggestion = new TermSuggestion();
+ break;
+ case CompletionSuggestion.TYPE:
+ suggestion = new CompletionSuggestion();
+ break;
+ case PhraseSuggestion.TYPE:
+ suggestion = new PhraseSuggestion();
+ break;
+ default:
+ suggestion = new Suggestion<Entry<Option>>();
+ break;
+ }
+ suggestion.readFrom(in);
+ suggestions.add(suggestion);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(suggestions.size());
+ for (Suggestion<?> command : suggestions) {
+ out.writeVInt(command.getType());
+ command.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if(name == null) {
+ for (Suggestion<?> suggestion : suggestions) {
+ suggestion.toXContent(builder, params);
+ }
+ } else {
+ builder.startObject(name);
+ for (Suggestion<?> suggestion : suggestions) {
+ suggestion.toXContent(builder, params);
+ }
+ builder.endObject();
+ }
+
+ return builder;
+ }
+
+ public static Suggest readSuggest(XContentBuilderString name, StreamInput in) throws IOException {
+ Suggest result = new Suggest(name);
+ result.readFrom(in);
+ return result;
+ }
+
+ public static Map<String, List<Suggest.Suggestion>> group(Map<String, List<Suggest.Suggestion>> groupedSuggestions, Suggest suggest) {
+ for (Suggestion<? extends Entry<? extends Option>> suggestion : suggest) {
+ List<Suggestion> list = groupedSuggestions.get(suggestion.getName());
+ if (list == null) {
+ list = new ArrayList<Suggest.Suggestion>();
+ groupedSuggestions.put(suggestion.getName(), list);
+ }
+ list.add(suggestion);
+ }
+ return groupedSuggestions;
+ }
+
+ public static List<Suggestion<? extends Entry<? extends Option>>> reduce(Map<String, List<Suggest.Suggestion>> groupedSuggestions) {
+ List<Suggestion<? extends Entry<? extends Option>>> reduced = new ArrayList<Suggestion<? extends Entry<? extends Option>>>(groupedSuggestions.size());
+ for (java.util.Map.Entry<String, List<Suggestion>> unmergedResults : groupedSuggestions.entrySet()) {
+ List<Suggestion> value = unmergedResults.getValue();
+ Suggestion reduce = value.get(0).reduce(value);
+ reduce.trim();
+ reduced.add(reduce);
+ }
+ return reduced;
+ }
+
+ /**
+ * The suggestion responses corresponding with the suggestions in the request.
+ */
+ public static class Suggestion<T extends Suggestion.Entry> implements Iterable<T>, Streamable, ToXContent {
+
+
+ public static final int TYPE = 0;
+ protected String name;
+ protected int size;
+ protected final List<T> entries = new ArrayList<T>(5);
+
+ public Suggestion() {
+ }
+
+ public Suggestion(String name, int size) {
+ this.name = name;
+ this.size = size; // The suggested term size specified in request, only used for merging shard responses
+ }
+
+ public void addTerm(T entry) {
+ entries.add(entry);
+ }
+
+ public int getType() {
+ return TYPE;
+ }
+
+ @Override
+ public Iterator<T> iterator() {
+ return entries.iterator();
+ }
+
+ /**
+ * @return The entries for this suggestion.
+ */
+ public List<T> getEntries() {
+ return entries;
+ }
+
+ /**
+ * @return The name of the suggestion as is defined in the request.
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * Merges the result of another suggestion into this suggestion.
+ * For internal usage.
+ */
+ public Suggestion<T> reduce(List<Suggestion<T>> toReduce) {
+ if (toReduce.size() == 1) {
+ return toReduce.get(0);
+ } else if (toReduce.isEmpty()) {
+ return null;
+ }
+ Suggestion<T> leader = toReduce.get(0);
+ List<T> entries = leader.entries;
+ final int size = entries.size();
+ Comparator<Option> sortComparator = sortComparator();
+ List<T> currentEntries = new ArrayList<T>();
+ for (int i = 0; i < size; i++) {
+ for (Suggestion<T> suggestion : toReduce) {
+ if(suggestion.entries.size() != size) {
+ throw new ElasticsearchIllegalStateException("Can't merge suggest result, this might be caused by suggest calls " +
+ "across multiple indices with different analysis chains. Suggest entries have different sizes actual [" +
+ suggestion.entries.size() + "] expected [" + size +"]");
+ }
+ assert suggestion.name.equals(leader.name);
+ currentEntries.add(suggestion.entries.get(i));
+ }
+ T entry = (T) entries.get(i).reduce(currentEntries);
+ entry.sort(sortComparator);
+ entries.set(i, entry);
+ currentEntries.clear();
+ }
+ return leader;
+ }
+
+ protected Comparator<Option> sortComparator() {
+ return COMPARATOR;
+ }
+
+ /**
+ * Trims the number of options per suggest text term to the requested size.
+ * For internal usage.
+ */
+ public void trim() {
+ for (Entry<?> entry : entries) {
+ entry.trim(size);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ innerReadFrom(in);
+ int size = in.readVInt();
+ entries.clear();
+ for (int i = 0; i < size; i++) {
+ T newEntry = newEntry();
+ newEntry.readFrom(in);
+ entries.add(newEntry);
+ }
+ }
+
+ protected T newEntry() {
+ return (T)new Entry();
+ }
+
+
+ protected void innerReadFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ size = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ innerWriteTo(out);
+ out.writeVInt(entries.size());
+ for (Entry<?> entry : entries) {
+ entry.writeTo(out);
+ }
+ }
+
+ public void innerWriteTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVInt(size);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray(name);
+ for (Entry<?> entry : entries) {
+ entry.toXContent(builder, params);
+ }
+ builder.endArray();
+ return builder;
+ }
+
+
+ /**
+ * Represents a part from the suggest text with suggested options.
+ */
+ public static class Entry<O extends Entry.Option> implements Iterable<O>, Streamable, ToXContent {
+
+ static class Fields {
+
+ static final XContentBuilderString TEXT = new XContentBuilderString("text");
+ static final XContentBuilderString OFFSET = new XContentBuilderString("offset");
+ static final XContentBuilderString LENGTH = new XContentBuilderString("length");
+ static final XContentBuilderString OPTIONS = new XContentBuilderString("options");
+
+ }
+
+ protected Text text;
+ protected int offset;
+ protected int length;
+
+ protected List<O> options;
+
+ public Entry(Text text, int offset, int length) {
+ this.text = text;
+ this.offset = offset;
+ this.length = length;
+ this.options = new ArrayList<O>(5);
+ }
+
+ public Entry() {
+ }
+
+ public void addOption(O option) {
+ options.add(option);
+ }
+
+ protected void sort(Comparator<O> comparator) {
+ CollectionUtil.timSort(options, comparator);
+ }
+
+ protected <T extends Entry<O>> Entry<O> reduce(List<T> toReduce) {
+ if (toReduce.size() == 1) {
+ return toReduce.get(0);
+ }
+ final Map<O, O> entries = new HashMap<O, O>();
+ Entry<O> leader = toReduce.get(0);
+ for (Entry<O> entry : toReduce) {
+ if (!leader.text.equals(entry.text)) {
+ throw new ElasticsearchIllegalStateException("Can't merge suggest entries, this might be caused by suggest calls " +
+ "across multiple indices with different analysis chains. Suggest entries have different text actual [" +
+ entry.text + "] expected [" + leader.text +"]");
+ }
+ assert leader.offset == entry.offset;
+ assert leader.length == entry.length;
+ leader.merge(entry);
+ for (O option : entry) {
+ O merger = entries.get(option);
+ if (merger == null) {
+ entries.put(option, option);
+ } else {
+ merger.mergeInto(option);
+ }
+ }
+ }
+ leader.options.clear();
+ for (O option: entries.keySet()) {
+ leader.addOption(option);
+ }
+ return leader;
+ }
+
+ /**
+ * Merge any extra fields for this subtype.
+ */
+ protected void merge(Entry<O> other) {
+ }
+
+ /**
+ * @return the text (analyzed by suggest analyzer) originating from the suggest text. Usually this is a
+ * single term.
+ */
+ public Text getText() {
+ return text;
+ }
+
+ /**
+ * @return the start offset (not analyzed) for this entry in the suggest text.
+ */
+ public int getOffset() {
+ return offset;
+ }
+
+ /**
+ * @return the length (not analyzed) for this entry in the suggest text.
+ */
+ public int getLength() {
+ return length;
+ }
+
+ @Override
+ public Iterator<O> iterator() {
+ return options.iterator();
+ }
+
+ /**
+ * @return The suggested options for this particular suggest entry. If there are no suggested terms then
+ * an empty list is returned.
+ */
+ public List<O> getOptions() {
+ return options;
+ }
+
+ void trim(int size) {
+ int optionsToRemove = Math.max(0, options.size() - size);
+ for (int i = 0; i < optionsToRemove; i++) {
+ options.remove(options.size() - 1);
+ }
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Entry<?> entry = (Entry<?>) o;
+
+ if (length != entry.length) return false;
+ if (offset != entry.offset) return false;
+ if (!this.text.equals(entry.text)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = text.hashCode();
+ result = 31 * result + offset;
+ result = 31 * result + length;
+ return result;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ text = in.readText();
+ offset = in.readVInt();
+ length = in.readVInt();
+ int suggestedWords = in.readVInt();
+ options = new ArrayList<O>(suggestedWords);
+ for (int j = 0; j < suggestedWords; j++) {
+ O newOption = newOption();
+ newOption.readFrom(in);
+ options.add(newOption);
+ }
+ }
+
+ protected O newOption(){
+ return (O) new Option();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeText(text);
+ out.writeVInt(offset);
+ out.writeVInt(length);
+ out.writeVInt(options.size());
+ for (Option option : options) {
+ option.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(Fields.TEXT, text);
+ builder.field(Fields.OFFSET, offset);
+ builder.field(Fields.LENGTH, length);
+ builder.startArray(Fields.OPTIONS);
+ for (Option option : options) {
+ option.toXContent(builder, params);
+ }
+ builder.endArray();
+ builder.endObject();
+ return builder;
+ }
+
+ /**
+ * Contains the suggested text with its document frequency and score.
+ */
+ public static class Option implements Streamable, ToXContent {
+
+ static class Fields {
+
+ static final XContentBuilderString TEXT = new XContentBuilderString("text");
+ static final XContentBuilderString HIGHLIGHTED = new XContentBuilderString("highlighted");
+ static final XContentBuilderString SCORE = new XContentBuilderString("score");
+
+ }
+
+ private Text text;
+ private Text highlighted;
+ private float score;
+
+ public Option(Text text, Text highlighted, float score) {
+ this.text = text;
+ this.highlighted = highlighted;
+ this.score = score;
+ }
+
+ public Option(Text text, float score) {
+ this(text, null, score);
+ }
+
+ public Option() {
+ }
+
+ /**
+ * @return The actual suggested text.
+ */
+ public Text getText() {
+ return text;
+ }
+
+ /**
+ * @return Copy of suggested text with changes from user supplied text highlighted.
+ */
+ public Text getHighlighted() {
+ return highlighted;
+ }
+
+ /**
+ * @return The score based on the edit distance difference between the suggested term and the
+ * term in the suggest text.
+ */
+ public float getScore() {
+ return score;
+ }
+
+ protected void setScore(float score) {
+ this.score = score;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ text = in.readText();
+ score = in.readFloat();
+ highlighted = in.readOptionalText();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeText(text);
+ out.writeFloat(score);
+ out.writeOptionalText(highlighted);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ innerToXContent(builder, params);
+ builder.endObject();
+ return builder;
+ }
+
+ protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(Fields.TEXT, text);
+ if (highlighted != null) {
+ builder.field(Fields.HIGHLIGHTED, highlighted);
+ }
+ builder.field(Fields.SCORE, score);
+ return builder;
+ }
+
+ protected void mergeInto(Option otherOption) {
+ score = Math.max(score, otherOption.score);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ Option that = (Option) o;
+ return text.equals(that.text);
+
+ }
+
+ @Override
+ public int hashCode() {
+ return text.hashCode();
+ }
+ }
+ }
+
+ public enum Sort {
+
+ /**
+ * Sort should first be based on score.
+ */
+ SCORE((byte) 0x0),
+
+ /**
+ * Sort should first be based on document frequency.
+ */
+ FREQUENCY((byte) 0x1);
+
+ private byte id;
+
+ private Sort(byte id) {
+ this.id = id;
+ }
+
+ public byte id() {
+ return id;
+ }
+
+ public static Sort fromId(byte id) {
+ if (id == 0) {
+ return SCORE;
+ } else if (id == 1) {
+ return FREQUENCY;
+ } else {
+ throw new ElasticsearchException("Illegal suggest sort " + id);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java b/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java
new file mode 100644
index 0000000..a4609e8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
+import org.elasticsearch.search.suggest.term.TermSuggestionBuilder;
+
+/**
+ * Defines how to perform suggesting. This builders allows a number of global options to be specified and
+ * an arbitrary number of {@link org.elasticsearch.search.suggest.SuggestBuilder.TermSuggestionBuilder} instances.
+ * <p/>
+ * Suggesting works by suggesting terms that appear in the suggest text that are similar compared to the terms in
+ * provided text. These spelling suggestions are based on several options described in this class.
+ */
+public class SuggestBuilder implements ToXContent {
+
+ private final String name;
+ private String globalText;
+
+ private final List<SuggestionBuilder<?>> suggestions = new ArrayList<SuggestionBuilder<?>>();
+
+ public SuggestBuilder() {
+ this.name = null;
+ }
+
+ public SuggestBuilder(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Sets the text to provide suggestions for. The suggest text is a required option that needs
+ * to be set either via this setter or via the {@link org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder#setText(String)} method.
+ * <p/>
+ * The suggest text gets analyzed by the suggest analyzer or the suggest field search analyzer.
+ * For each analyzed token, suggested terms are suggested if possible.
+ */
+ public SuggestBuilder setText(String globalText) {
+ this.globalText = globalText;
+ return this;
+ }
+
+ /**
+ * Adds an {@link org.elasticsearch.search.suggest.SuggestBuilder.TermSuggestionBuilder} instance under a user defined name.
+ * The order in which the <code>Suggestions</code> are added, is the same as in the response.
+ */
+ public SuggestBuilder addSuggestion(SuggestionBuilder<?> suggestion) {
+ suggestions.add(suggestion);
+ return this;
+ }
+
+ /**
+ * Returns all suggestions with the defined names.
+ */
+ public List<SuggestionBuilder<?>> getSuggestion() {
+ return suggestions;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ if(name == null) {
+ builder.startObject();
+ } else {
+ builder.startObject(name);
+ }
+
+ if (globalText != null) {
+ builder.field("text", globalText);
+ }
+ for (SuggestionBuilder<?> suggestion : suggestions) {
+ builder = suggestion.toXContent(builder, params);
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ /**
+ * Convenience factory method.
+ *
+ * @param name The name of this suggestion. This is a required parameter.
+ */
+ public static TermSuggestionBuilder termSuggestion(String name) {
+ return new TermSuggestionBuilder(name);
+ }
+
+ /**
+ * Convenience factory method.
+ *
+ * @param name The name of this suggestion. This is a required parameter.
+ */
+ public static PhraseSuggestionBuilder phraseSuggestion(String name) {
+ return new PhraseSuggestionBuilder(name);
+ }
+
+ public static abstract class SuggestionBuilder<T> implements ToXContent {
+
+ private String name;
+ private String suggester;
+ private String text;
+ private String field;
+ private String analyzer;
+ private Integer size;
+ private Integer shardSize;
+
+ public SuggestionBuilder(String name, String suggester) {
+ this.name = name;
+ this.suggester = suggester;
+ }
+
+ /**
+ * Same as in {@link SuggestBuilder#setText(String)}, but in the suggestion scope.
+ */
+ @SuppressWarnings("unchecked")
+ public T text(String text) {
+ this.text = text;
+ return (T) this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+ if (text != null) {
+ builder.field("text", text);
+ }
+ builder.startObject(suggester);
+ if (analyzer != null) {
+ builder.field("analyzer", analyzer);
+ }
+ if (field != null) {
+ builder.field("field", field);
+ }
+ if (size != null) {
+ builder.field("size", size);
+ }
+ if (shardSize != null) {
+ builder.field("shard_size", shardSize);
+ }
+ builder = innerToXContent(builder, params);
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ }
+
+ protected abstract XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException;
+
+ /**
+ * Sets from what field to fetch the candidate suggestions from. This is an
+ * required option and needs to be set via this setter or
+ * {@link org.elasticsearch.search.suggest.SuggestBuilder.TermSuggestionBuilder#setField(String)}
+ * method
+ */
+ @SuppressWarnings("unchecked")
+ public T field(String field) {
+ this.field = field;
+ return (T)this;
+ }
+
+ /**
+ * Sets the analyzer to analyse to suggest text with. Defaults to the search
+ * analyzer of the suggest field.
+ */
+ @SuppressWarnings("unchecked")
+ public T analyzer(String analyzer) {
+ this.analyzer = analyzer;
+ return (T)this;
+ }
+
+ /**
+ * Sets the maximum suggestions to be returned per suggest text term.
+ */
+ @SuppressWarnings("unchecked")
+ public T size(int size) {
+ if (size <= 0) {
+ throw new ElasticsearchIllegalArgumentException("Size must be positive");
+ }
+ this.size = size;
+ return (T)this;
+ }
+
+ /**
+ * Sets the maximum number of suggested term to be retrieved from each
+ * individual shard. During the reduce phase the only the top N suggestions
+ * are returned based on the <code>size</code> option. Defaults to the
+ * <code>size</code> option.
+ * <p/>
+ * Setting this to a value higher than the `size` can be useful in order to
+ * get a more accurate document frequency for suggested terms. Due to the
+ * fact that terms are partitioned amongst shards, the shard level document
+ * frequencies of suggestions may not be precise. Increasing this will make
+ * these document frequencies more precise.
+ */
+ @SuppressWarnings("unchecked")
+ public T shardSize(Integer shardSize) {
+ this.shardSize = shardSize;
+ return (T)this;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java b/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java
new file mode 100644
index 0000000..ce16542
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import java.io.IOException;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+
+public interface SuggestContextParser {
+ public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService) throws IOException;
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestModule.java b/src/main/java/org/elasticsearch/search/suggest/SuggestModule.java
new file mode 100644
index 0000000..c01ab3a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/SuggestModule.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.search.suggest.completion.CompletionSuggester;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggester;
+import org.elasticsearch.search.suggest.term.TermSuggester;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class SuggestModule extends AbstractModule {
+
+ private List<Class<? extends Suggester>> suggesters = Lists.newArrayList();
+
+ public SuggestModule() {
+ registerSuggester(PhraseSuggester.class);
+ registerSuggester(TermSuggester.class);
+ registerSuggester(CompletionSuggester.class);
+ }
+
+ public void registerSuggester(Class<? extends Suggester> suggester) {
+ suggesters.add(suggester);
+ }
+
+ @Override
+ protected void configure() {
+ Multibinder<Suggester> suggesterMultibinder = Multibinder.newSetBinder(binder(), Suggester.class);
+ for (Class<? extends Suggester> clazz : suggesters) {
+ suggesterMultibinder.addBinding().to(clazz);
+ }
+
+ bind(SuggestParseElement.class).asEagerSingleton();
+ bind(SuggestPhase.class).asEagerSingleton();
+ bind(Suggesters.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java b/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java
new file mode 100644
index 0000000..a27e1f1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+
+/**
+ *
+ */
+public final class SuggestParseElement implements SearchParseElement {
+ private Suggesters suggesters;
+
+ @Inject
+ public SuggestParseElement(Suggesters suggesters) {
+ this.suggesters = suggesters;
+ }
+
+ @Override
+ public void parse(XContentParser parser, SearchContext context) throws Exception {
+ SuggestionSearchContext suggestionSearchContext = parseInternal(parser, context.mapperService(), context.shardTarget().index(), context.shardTarget().shardId());
+ context.suggest(suggestionSearchContext);
+ }
+
+ public SuggestionSearchContext parseInternal(XContentParser parser, MapperService mapperService, String index, int shardId) throws IOException {
+ SuggestionSearchContext suggestionSearchContext = new SuggestionSearchContext();
+ BytesRef globalText = null;
+ String fieldName = null;
+ Map<String, SuggestionContext> suggestionContexts = newHashMap();
+
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("text".equals(fieldName)) {
+ globalText = parser.bytes();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("[suggest] does not support [" + fieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ String suggestionName = fieldName;
+ BytesRef suggestText = null;
+ SuggestionContext suggestionContext = null;
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("text".equals(fieldName)) {
+ suggestText = parser.bytes();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("[suggest] does not support [" + fieldName + "]");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if (suggestionName == null) {
+ throw new ElasticsearchIllegalArgumentException("Suggestion must have name");
+ }
+ if (suggesters.get(fieldName) == null) {
+ throw new ElasticsearchIllegalArgumentException("Suggester[" + fieldName + "] not supported");
+ }
+ final SuggestContextParser contextParser = suggesters.get(fieldName).getContextParser();
+ suggestionContext = contextParser.parse(parser, mapperService);
+ }
+ }
+ if (suggestionContext != null) {
+ suggestionContext.setText(suggestText);
+ suggestionContexts.put(suggestionName, suggestionContext);
+ }
+
+ }
+ }
+
+ for (Map.Entry<String, SuggestionContext> entry : suggestionContexts.entrySet()) {
+ String suggestionName = entry.getKey();
+ SuggestionContext suggestionContext = entry.getValue();
+
+ suggestionContext.setShard(shardId);
+ suggestionContext.setIndex(index);
+ SuggestUtils.verifySuggestion(mapperService, globalText, suggestionContext);
+ suggestionSearchContext.addSuggestion(suggestionName, suggestionContext);
+ }
+
+ return suggestionSearchContext;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java b/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java
new file mode 100644
index 0000000..77c45cb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.CharsRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.SearchParseElement;
+import org.elasticsearch.search.SearchPhase;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.suggest.Suggest.Suggestion;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
+import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ */
+public class SuggestPhase extends AbstractComponent implements SearchPhase {
+
+ private final SuggestParseElement parseElement;
+
+ @Inject
+ public SuggestPhase(Settings settings, SuggestParseElement suggestParseElement) {
+ super(settings);
+ this.parseElement = suggestParseElement;
+ }
+
+ @Override
+ public Map<String, ? extends SearchParseElement> parseElements() {
+ ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder();
+ parseElements.put("suggest", parseElement);
+ return parseElements.build();
+ }
+
+ public SuggestParseElement parseElement() {
+ return parseElement;
+ }
+
+ @Override
+ public void preProcess(SearchContext context) {
+ }
+
+ @Override
+ public void execute(SearchContext context) throws ElasticsearchException {
+ final SuggestionSearchContext suggest = context.suggest();
+ if (suggest == null) {
+ return;
+ }
+ context.queryResult().suggest(execute(suggest, context.searcher().getIndexReader()));
+ }
+
+ public Suggest execute(SuggestionSearchContext suggest, IndexReader reader) {
+ try {
+ CharsRef spare = new CharsRef(); // Maybe add CharsRef to CacheRecycler?
+ final List<Suggestion<? extends Entry<? extends Option>>> suggestions = new ArrayList<Suggestion<? extends Entry<? extends Option>>>(suggest.suggestions().size());
+
+ for (Map.Entry<String, SuggestionSearchContext.SuggestionContext> entry : suggest.suggestions().entrySet()) {
+ SuggestionSearchContext.SuggestionContext suggestion = entry.getValue();
+ Suggester<SuggestionContext> suggester = suggestion.getSuggester();
+ Suggestion<? extends Entry<? extends Option>> result = suggester.execute(entry.getKey(), suggestion, reader, spare);
+ if (result != null) {
+ assert entry.getKey().equals(result.name);
+ suggestions.add(result);
+ }
+ }
+
+ return new Suggest(Suggest.Fields.SUGGEST, suggestions);
+ } catch (IOException e) {
+ throw new ElasticsearchException("I/O exception during suggest phase", e);
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java b/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java
new file mode 100644
index 0000000..c61fa04
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java
@@ -0,0 +1,305 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.search.spell.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util.automaton.LevenshteinAutomata;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.io.FastCharArrayReader;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.analysis.CustomAnalyzer;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.analysis.ShingleTokenFilterFactory;
+import org.elasticsearch.index.analysis.TokenFilterFactory;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext;
+
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.Locale;
+
+public final class SuggestUtils {
+ public static Comparator<SuggestWord> LUCENE_FREQUENCY = new SuggestWordFrequencyComparator();
+ public static Comparator<SuggestWord> SCORE_COMPARATOR = SuggestWordQueue.DEFAULT_COMPARATOR;
+
+ private SuggestUtils() {
+ // utils!!
+ }
+
+ public static DirectSpellChecker getDirectSpellChecker(DirectSpellcheckerSettings suggestion) {
+
+ DirectSpellChecker directSpellChecker = new DirectSpellChecker();
+ directSpellChecker.setAccuracy(suggestion.accuracy());
+ Comparator<SuggestWord> comparator;
+ switch (suggestion.sort()) {
+ case SCORE:
+ comparator = SCORE_COMPARATOR;
+ break;
+ case FREQUENCY:
+ comparator = LUCENE_FREQUENCY;
+ break;
+ default:
+ throw new ElasticsearchIllegalArgumentException("Illegal suggest sort: " + suggestion.sort());
+ }
+ directSpellChecker.setComparator(comparator);
+ directSpellChecker.setDistance(suggestion.stringDistance());
+ directSpellChecker.setMaxEdits(suggestion.maxEdits());
+ directSpellChecker.setMaxInspections(suggestion.maxInspections());
+ directSpellChecker.setMaxQueryFrequency(suggestion.maxTermFreq());
+ directSpellChecker.setMinPrefix(suggestion.prefixLength());
+ directSpellChecker.setMinQueryLength(suggestion.minWordLength());
+ directSpellChecker.setThresholdFrequency(suggestion.minDocFreq());
+ directSpellChecker.setLowerCaseTerms(false);
+ return directSpellChecker;
+ }
+
+ public static BytesRef join(BytesRef separator, BytesRef result, BytesRef... toJoin) {
+ int len = separator.length * toJoin.length - 1;
+ for (BytesRef br : toJoin) {
+ len += br.length;
+ }
+
+ result.grow(len);
+ return joinPreAllocated(separator, result, toJoin);
+ }
+
+ public static BytesRef joinPreAllocated(BytesRef separator, BytesRef result, BytesRef... toJoin) {
+ result.length = 0;
+ result.offset = 0;
+ for (int i = 0; i < toJoin.length - 1; i++) {
+ BytesRef br = toJoin[i];
+ System.arraycopy(br.bytes, br.offset, result.bytes, result.offset, br.length);
+ result.offset += br.length;
+ System.arraycopy(separator.bytes, separator.offset, result.bytes, result.offset, separator.length);
+ result.offset += separator.length;
+ }
+ final BytesRef br = toJoin[toJoin.length-1];
+ System.arraycopy(br.bytes, br.offset, result.bytes, result.offset, br.length);
+
+ result.length = result.offset + br.length;
+ result.offset = 0;
+ return result;
+ }
+
+ public static abstract class TokenConsumer {
+ protected CharTermAttribute charTermAttr;
+ protected PositionIncrementAttribute posIncAttr;
+ protected OffsetAttribute offsetAttr;
+
+ public void reset(TokenStream stream) {
+ charTermAttr = stream.addAttribute(CharTermAttribute.class);
+ posIncAttr = stream.addAttribute(PositionIncrementAttribute.class);
+ offsetAttr = stream.addAttribute(OffsetAttribute.class);
+ }
+
+ protected BytesRef fillBytesRef(BytesRef spare) {
+ spare.offset = 0;
+ spare.length = spare.bytes.length;
+ char[] source = charTermAttr.buffer();
+ UnicodeUtil.UTF16toUTF8(source, 0, charTermAttr.length(), spare);
+ return spare;
+ }
+
+ public abstract void nextToken() throws IOException;
+
+ public void end() {}
+ }
+
+ public static int analyze(Analyzer analyzer, BytesRef toAnalyze, String field, TokenConsumer consumer, CharsRef spare) throws IOException {
+ UnicodeUtil.UTF8toUTF16(toAnalyze, spare);
+ return analyze(analyzer, spare, field, consumer);
+ }
+
+ public static int analyze(Analyzer analyzer, CharsRef toAnalyze, String field, TokenConsumer consumer) throws IOException {
+ TokenStream ts = analyzer.tokenStream(
+ field, new FastCharArrayReader(toAnalyze.chars, toAnalyze.offset, toAnalyze.length)
+ );
+ return analyze(ts, consumer);
+ }
+
+ public static int analyze(TokenStream stream, TokenConsumer consumer) throws IOException {
+ stream.reset();
+ consumer.reset(stream);
+ int numTokens = 0;
+ while (stream.incrementToken()) {
+ consumer.nextToken();
+ numTokens++;
+ }
+ consumer.end();
+ stream.close();
+ return numTokens;
+ }
+
+ public static SuggestMode resolveSuggestMode(String suggestMode) {
+ suggestMode = suggestMode.toLowerCase(Locale.US);
+ if ("missing".equals(suggestMode)) {
+ return SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX;
+ } else if ("popular".equals(suggestMode)) {
+ return SuggestMode.SUGGEST_MORE_POPULAR;
+ } else if ("always".equals(suggestMode)) {
+ return SuggestMode.SUGGEST_ALWAYS;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Illegal suggest mode " + suggestMode);
+ }
+ }
+
+ public static Suggest.Suggestion.Sort resolveSort(String sortVal) {
+ if ("score".equals(sortVal)) {
+ return Suggest.Suggestion.Sort.SCORE;
+ } else if ("frequency".equals(sortVal)) {
+ return Suggest.Suggestion.Sort.FREQUENCY;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Illegal suggest sort " + sortVal);
+ }
+ }
+
+ public static StringDistance resolveDistance(String distanceVal) {
+ if ("internal".equals(distanceVal)) {
+ return DirectSpellChecker.INTERNAL_LEVENSHTEIN;
+ } else if ("damerau_levenshtein".equals(distanceVal) || "damerauLevenshtein".equals(distanceVal)) {
+ return new LuceneLevenshteinDistance();
+ } else if ("levenstein".equals(distanceVal)) {
+ return new LevensteinDistance();
+ //TODO Jaro and Winkler are 2 people - so apply same naming logic as damerau_levenshtein
+ } else if ("jarowinkler".equals(distanceVal)) {
+ return new JaroWinklerDistance();
+ } else if ("ngram".equals(distanceVal)) {
+ return new NGramDistance();
+ } else {
+ throw new ElasticsearchIllegalArgumentException("Illegal distance option " + distanceVal);
+ }
+ }
+
+ public static class Fields {
+ public static final ParseField STRING_DISTANCE = new ParseField("string_distance");
+ public static final ParseField SUGGEST_MODE = new ParseField("suggest_mode");
+ public static final ParseField MAX_EDITS = new ParseField("max_edits");
+ public static final ParseField MAX_INSPECTIONS = new ParseField("max_inspections");
+ // TODO some of these constants are the same as MLT constants and
+ // could be moved to a shared class for maintaining consistency across
+ // the platform
+ public static final ParseField MAX_TERM_FREQ = new ParseField("max_term_freq");
+ public static final ParseField PREFIX_LENGTH = new ParseField("prefix_length", "prefix_len");
+ public static final ParseField MIN_WORD_LENGTH = new ParseField("min_word_length", "min_word_len");
+ public static final ParseField MIN_DOC_FREQ = new ParseField("min_doc_freq");
+ public static final ParseField SHARD_SIZE = new ParseField("shard_size");
+ }
+
+ public static boolean parseDirectSpellcheckerSettings(XContentParser parser, String fieldName,
+ DirectSpellcheckerSettings suggestion) throws IOException {
+ if ("accuracy".equals(fieldName)) {
+ suggestion.accuracy(parser.floatValue());
+ } else if (Fields.SUGGEST_MODE.match(fieldName)) {
+ suggestion.suggestMode(SuggestUtils.resolveSuggestMode(parser.text()));
+ } else if ("sort".equals(fieldName)) {
+ suggestion.sort(SuggestUtils.resolveSort(parser.text()));
+ } else if (Fields.STRING_DISTANCE.match(fieldName)) {
+ suggestion.stringDistance(SuggestUtils.resolveDistance(parser.text()));
+ } else if (Fields.MAX_EDITS.match(fieldName)) {
+ suggestion.maxEdits(parser.intValue());
+ if (suggestion.maxEdits() < 1 || suggestion.maxEdits() > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
+ throw new ElasticsearchIllegalArgumentException("Illegal max_edits value " + suggestion.maxEdits());
+ }
+ } else if (Fields.MAX_INSPECTIONS.match(fieldName)) {
+ suggestion.maxInspections(parser.intValue());
+ } else if (Fields.MAX_TERM_FREQ.match(fieldName)) {
+ suggestion.maxTermFreq(parser.floatValue());
+ } else if (Fields.PREFIX_LENGTH.match(fieldName)) {
+ suggestion.prefixLength(parser.intValue());
+ } else if (Fields.MIN_WORD_LENGTH.match(fieldName)) {
+ suggestion.minQueryLength(parser.intValue());
+ } else if (Fields.MIN_DOC_FREQ.match(fieldName)) {
+ suggestion.minDocFreq(parser.floatValue());
+ } else {
+ return false;
+ }
+ return true;
+ }
+
+ public static boolean parseSuggestContext(XContentParser parser, MapperService mapperService, String fieldName,
+ SuggestionSearchContext.SuggestionContext suggestion) throws IOException {
+
+ if ("analyzer".equals(fieldName)) {
+ String analyzerName = parser.text();
+ Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
+ if (analyzer == null) {
+ throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
+ }
+ suggestion.setAnalyzer(analyzer);
+ } else if ("field".equals(fieldName)) {
+ suggestion.setField(parser.text());
+ } else if ("size".equals(fieldName)) {
+ suggestion.setSize(parser.intValue());
+ } else if (Fields.SHARD_SIZE.match(fieldName)) {
+ suggestion.setShardSize(parser.intValue());
+ } else {
+ return false;
+ }
+ return true;
+
+ }
+
+
+ public static void verifySuggestion(MapperService mapperService, BytesRef globalText, SuggestionContext suggestion) {
+ // Verify options and set defaults
+ if (suggestion.getField() == null) {
+ throw new ElasticsearchIllegalArgumentException("The required field option is missing");
+ }
+ if (suggestion.getText() == null) {
+ if (globalText == null) {
+ throw new ElasticsearchIllegalArgumentException("The required text option is missing");
+ }
+ suggestion.setText(globalText);
+ }
+ if (suggestion.getAnalyzer() == null) {
+ suggestion.setAnalyzer(mapperService.searchAnalyzer());
+ }
+ if (suggestion.getShardSize() == -1) {
+ suggestion.setShardSize(Math.max(suggestion.getSize(), 5));
+ }
+ }
+
+
+ public static ShingleTokenFilterFactory.Factory getShingleFilterFactory(Analyzer analyzer) {
+ if (analyzer instanceof NamedAnalyzer) {
+ analyzer = ((NamedAnalyzer)analyzer).analyzer();
+ }
+ if (analyzer instanceof CustomAnalyzer) {
+ final CustomAnalyzer a = (CustomAnalyzer) analyzer;
+ final TokenFilterFactory[] tokenFilters = a.tokenFilters();
+ for (TokenFilterFactory tokenFilterFactory : tokenFilters) {
+ if (tokenFilterFactory instanceof ShingleTokenFilterFactory) {
+ return ((ShingleTokenFilterFactory)tokenFilterFactory).getInnerFactory();
+ } else if (tokenFilterFactory instanceof ShingleTokenFilterFactory.Factory) {
+ return (ShingleTokenFilterFactory.Factory) tokenFilterFactory;
+ }
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/Suggester.java b/src/main/java/org/elasticsearch/search/suggest/Suggester.java
new file mode 100644
index 0000000..0630219
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/Suggester.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.CharsRef;
+
+import java.io.IOException;
+
+public abstract class Suggester<T extends SuggestionSearchContext.SuggestionContext> {
+
+ protected abstract Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>>
+ innerExecute(String name, T suggestion, IndexReader indexReader, CharsRef spare) throws IOException;
+
+ public abstract String[] names();
+
+ public abstract SuggestContextParser getContextParser();
+
+ public Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>>
+ execute(String name, T suggestion, IndexReader indexReader, CharsRef spare) throws IOException {
+ // #3469 We want to ignore empty shards
+ if (indexReader.numDocs() == 0) {
+ return null;
+ }
+ return innerExecute(name, suggestion, indexReader, spare);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/Suggesters.java b/src/main/java/org/elasticsearch/search/suggest/Suggesters.java
new file mode 100644
index 0000000..356bf20
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/Suggesters.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.inject.Inject;
+
+import java.util.Set;
+
+/**
+ *
+ */
+public class Suggesters {
+ private final ImmutableMap<String, Suggester> parsers;
+
+ @Inject
+ public Suggesters(Set<Suggester> suggesters) {
+ MapBuilder<String, Suggester> builder = MapBuilder.newMapBuilder();
+ for (Suggester suggester : suggesters) {
+ for (String type : suggester.names()) {
+ builder.put(type, suggester);
+ }
+ }
+ this.parsers = builder.immutableMap();
+ }
+
+ public Suggester get(String type) {
+ return parsers.get(type);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java b/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java
new file mode 100644
index 0000000..a45f979
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ */
+public class SuggestionSearchContext {
+
+ private final Map<String, SuggestionContext> suggestions = new LinkedHashMap<String, SuggestionContext>(4);
+
+ public void addSuggestion(String name, SuggestionContext suggestion) {
+ suggestions.put(name, suggestion);
+ }
+
+ public Map<String, SuggestionContext> suggestions() {
+ return suggestions;
+ }
+
+ public static class SuggestionContext {
+
+ private BytesRef text;
+ private final Suggester suggester;
+ private String field;
+ private Analyzer analyzer;
+ private int size = 5;
+ private int shardSize = -1;
+ private int shardId;
+ private String index;
+
+ public BytesRef getText() {
+ return text;
+ }
+
+ public void setText(BytesRef text) {
+ this.text = text;
+ }
+
+ public SuggestionContext(Suggester suggester) {
+ this.suggester = suggester;
+ }
+
+ public Suggester<SuggestionContext> getSuggester() {
+ return this.suggester;
+ }
+
+ public Analyzer getAnalyzer() {
+ return analyzer;
+ }
+
+ public void setAnalyzer(Analyzer analyzer) {
+ this.analyzer = analyzer;
+ }
+
+ public String getField() {
+ return field;
+ }
+
+ public void setField(String field) {
+ this.field = field;
+ }
+
+ public int getSize() {
+ return size;
+ }
+
+ public void setSize(int size) {
+ if (size <= 0) {
+ throw new ElasticsearchIllegalArgumentException("Size must be positive but was: " + size);
+ }
+ this.size = size;
+ }
+
+ public Integer getShardSize() {
+ return shardSize;
+ }
+
+ public void setShardSize(int shardSize) {
+ if (shardSize <= 0) {
+ throw new ElasticsearchIllegalArgumentException("ShardSize must be positive but was: " + shardSize);
+ }
+ this.shardSize = shardSize;
+ }
+
+ public void setShard(int shardId) {
+ this.shardId = shardId;
+ }
+
+ public void setIndex(String index) {
+ this.index = index;
+ }
+
+ public String getIndex() {
+ return index;
+ }
+
+ public int getShard() {
+ return shardId;
+ }
+ }
+
+
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java b/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java
new file mode 100644
index 0000000..6714eed
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest.completion;
+
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.codecs.*;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.search.suggest.analyzing.XFuzzySuggester;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.fst.ByteSequenceOutputs;
+import org.apache.lucene.util.fst.FST;
+import org.apache.lucene.util.fst.PairOutputs;
+import org.apache.lucene.util.fst.PairOutputs.Pair;
+import org.apache.lucene.util.fst.PositiveIntOutputs;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.CompletionLookupProvider;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.LookupFactory;
+
+import java.io.IOException;
+import java.util.*;
+
+public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider {
+
+ // for serialization
+ public static final int SERIALIZE_PRESERVE_SEPERATORS = 1;
+ public static final int SERIALIZE_HAS_PAYLOADS = 2;
+ public static final int SERIALIZE_PRESERVE_POSITION_INCREMENTS = 4;
+
+ private static final int MAX_SURFACE_FORMS_PER_ANALYZED_FORM = 256;
+ private static final int MAX_GRAPH_EXPANSIONS = -1;
+
+ public static final String CODEC_NAME = "analyzing";
+ public static final int CODEC_VERSION_START = 1;
+ public static final int CODEC_VERSION_LATEST = 2;
+
+ private boolean preserveSep;
+ private boolean preservePositionIncrements;
+ private int maxSurfaceFormsPerAnalyzedForm;
+ private int maxGraphExpansions;
+ private boolean hasPayloads;
+ private final XAnalyzingSuggester prototype;
+
+ public AnalyzingCompletionLookupProvider(boolean preserveSep, boolean exactFirst, boolean preservePositionIncrements, boolean hasPayloads) {
+ this.preserveSep = preserveSep;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.hasPayloads = hasPayloads;
+ this.maxSurfaceFormsPerAnalyzedForm = MAX_SURFACE_FORMS_PER_ANALYZED_FORM;
+ this.maxGraphExpansions = MAX_GRAPH_EXPANSIONS;
+ int options = preserveSep ? XAnalyzingSuggester.PRESERVE_SEP : 0;
+ // needs to fixed in the suggester first before it can be supported
+ //options |= exactFirst ? XAnalyzingSuggester.EXACT_FIRST : 0;
+ prototype = new XAnalyzingSuggester(null, null, options, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions, preservePositionIncrements, null, false, 1, XAnalyzingSuggester.SEP_LABEL, XAnalyzingSuggester.PAYLOAD_SEP, XAnalyzingSuggester.END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ }
+
+ @Override
+ public String getName() {
+ return "analyzing";
+ }
+
+ @Override
+ public FieldsConsumer consumer(final IndexOutput output) throws IOException {
+ CodecUtil.writeHeader(output, CODEC_NAME, CODEC_VERSION_LATEST);
+ return new FieldsConsumer() {
+ private Map<FieldInfo, Long> fieldOffsets = new HashMap<FieldInfo, Long>();
+
+ @Override
+ public void close() throws IOException {
+ try { /*
+ * write the offsets per field such that we know where
+ * we need to load the FSTs from
+ */
+ long pointer = output.getFilePointer();
+ output.writeVInt(fieldOffsets.size());
+ for (Map.Entry<FieldInfo, Long> entry : fieldOffsets.entrySet()) {
+ output.writeString(entry.getKey().name);
+ output.writeVLong(entry.getValue());
+ }
+ output.writeLong(pointer);
+ output.flush();
+ } finally {
+ IOUtils.close(output);
+ }
+ }
+
+ @Override
+ public TermsConsumer addField(final FieldInfo field) throws IOException {
+
+ return new TermsConsumer() {
+ final XAnalyzingSuggester.XBuilder builder = new XAnalyzingSuggester.XBuilder(maxSurfaceFormsPerAnalyzedForm, hasPayloads, XAnalyzingSuggester.PAYLOAD_SEP);
+ final CompletionPostingsConsumer postingsConsumer = new CompletionPostingsConsumer(AnalyzingCompletionLookupProvider.this, builder);
+
+ @Override
+ public PostingsConsumer startTerm(BytesRef text) throws IOException {
+ builder.startTerm(text);
+ return postingsConsumer;
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() throws IOException {
+ return BytesRef.getUTF8SortedAsUnicodeComparator();
+ }
+
+ @Override
+ public void finishTerm(BytesRef text, TermStats stats) throws IOException {
+ builder.finishTerm(stats.docFreq); // use doc freq as a fallback
+ }
+
+ @Override
+ public void finish(long sumTotalTermFreq, long sumDocFreq, int docCount) throws IOException {
+ /*
+ * Here we are done processing the field and we can
+ * buid the FST and write it to disk.
+ */
+ FST<Pair<Long, BytesRef>> build = builder.build();
+ assert build != null || docCount == 0 : "the FST is null but docCount is != 0 actual value: [" + docCount + "]";
+ /*
+ * it's possible that the FST is null if we have 2 segments that get merged
+ * and all docs that have a value in this field are deleted. This will cause
+ * a consumer to be created but it doesn't consume any values causing the FSTBuilder
+ * to return null.
+ */
+ if (build != null) {
+ fieldOffsets.put(field, output.getFilePointer());
+ build.save(output);
+ /* write some more meta-info */
+ output.writeVInt(postingsConsumer.getMaxAnalyzedPathsForOneInput());
+ output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
+ output.writeInt(maxGraphExpansions); // can be negative
+ int options = 0;
+ options |= preserveSep ? SERIALIZE_PRESERVE_SEPERATORS : 0;
+ options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
+ options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
+ output.writeVInt(options);
+ output.writeVInt(XAnalyzingSuggester.SEP_LABEL);
+ output.writeVInt(XAnalyzingSuggester.END_BYTE);
+ output.writeVInt(XAnalyzingSuggester.PAYLOAD_SEP);
+ output.writeVInt(XAnalyzingSuggester.HOLE_CHARACTER);
+ }
+ }
+ };
+ }
+ };
+ }
+
+ private static final class CompletionPostingsConsumer extends PostingsConsumer {
+ private final SuggestPayload spare = new SuggestPayload();
+ private AnalyzingCompletionLookupProvider analyzingSuggestLookupProvider;
+ private XAnalyzingSuggester.XBuilder builder;
+ private int maxAnalyzedPathsForOneInput = 0;
+
+ public CompletionPostingsConsumer(AnalyzingCompletionLookupProvider analyzingSuggestLookupProvider, XAnalyzingSuggester.XBuilder builder) {
+ this.analyzingSuggestLookupProvider = analyzingSuggestLookupProvider;
+ this.builder = builder;
+ }
+
+ @Override
+ public void startDoc(int docID, int freq) throws IOException {
+ }
+
+ @Override
+ public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) throws IOException {
+ analyzingSuggestLookupProvider.parsePayload(payload, spare);
+ builder.addSurface(spare.surfaceForm, spare.payload, spare.weight);
+ // multi fields have the same surface form so we sum up here
+ maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, position + 1);
+ }
+
+ @Override
+ public void finishDoc() throws IOException {
+ }
+
+ public int getMaxAnalyzedPathsForOneInput() {
+ return maxAnalyzedPathsForOneInput;
+ }
+ }
+
+ ;
+
+
+ @Override
+ public LookupFactory load(IndexInput input) throws IOException {
+ long sizeInBytes = 0;
+ int version = CodecUtil.checkHeader(input, CODEC_NAME, CODEC_VERSION_START, CODEC_VERSION_LATEST);
+ final Map<String, AnalyzingSuggestHolder> lookupMap = new HashMap<String, AnalyzingSuggestHolder>();
+ input.seek(input.length() - 8);
+ long metaPointer = input.readLong();
+ input.seek(metaPointer);
+ int numFields = input.readVInt();
+
+ Map<Long, String> meta = new TreeMap<Long, String>();
+ for (int i = 0; i < numFields; i++) {
+ String name = input.readString();
+ long offset = input.readVLong();
+ meta.put(offset, name);
+ }
+
+ for (Map.Entry<Long, String> entry : meta.entrySet()) {
+ input.seek(entry.getKey());
+ FST<Pair<Long, BytesRef>> fst = new FST<Pair<Long, BytesRef>>(input, new PairOutputs<Long, BytesRef>(
+ PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()));
+ int maxAnalyzedPathsForOneInput = input.readVInt();
+ int maxSurfaceFormsPerAnalyzedForm = input.readVInt();
+ int maxGraphExpansions = input.readInt();
+ int options = input.readVInt();
+ boolean preserveSep = (options & SERIALIZE_PRESERVE_SEPERATORS) != 0;
+ boolean hasPayloads = (options & SERIALIZE_HAS_PAYLOADS) != 0;
+ boolean preservePositionIncrements = (options & SERIALIZE_PRESERVE_POSITION_INCREMENTS) != 0;
+
+ // first version did not include these three fields, so fall back to old default (before the analyzingsuggester
+ // was updated in Lucene, so we cannot use the suggester defaults)
+ int sepLabel, payloadSep, endByte, holeCharacter;
+ switch (version) {
+ case CODEC_VERSION_START:
+ sepLabel = 0xFF;
+ payloadSep = '\u001f';
+ endByte = 0x0;
+ holeCharacter = '\u001E';
+ break;
+ default:
+ sepLabel = input.readVInt();
+ endByte = input.readVInt();
+ payloadSep = input.readVInt();
+ holeCharacter = input.readVInt();
+ }
+
+ AnalyzingSuggestHolder holder = new AnalyzingSuggestHolder(preserveSep, preservePositionIncrements, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions,
+ hasPayloads, maxAnalyzedPathsForOneInput, fst, sepLabel, payloadSep, endByte, holeCharacter);
+ sizeInBytes += fst.sizeInBytes();
+ lookupMap.put(entry.getValue(), holder);
+ }
+ final long ramBytesUsed = sizeInBytes;
+ return new LookupFactory() {
+ @Override
+ public Lookup getLookup(FieldMapper<?> mapper, CompletionSuggestionContext suggestionContext) {
+ AnalyzingSuggestHolder analyzingSuggestHolder = lookupMap.get(mapper.names().indexName());
+ if (analyzingSuggestHolder == null) {
+ return null;
+ }
+ int flags = analyzingSuggestHolder.preserveSep ? XAnalyzingSuggester.PRESERVE_SEP : 0;
+
+ XAnalyzingSuggester suggester;
+ if (suggestionContext.isFuzzy()) {
+ suggester = new XFuzzySuggester(mapper.indexAnalyzer(), mapper.searchAnalyzer(), flags,
+ analyzingSuggestHolder.maxSurfaceFormsPerAnalyzedForm, analyzingSuggestHolder.maxGraphExpansions,
+ suggestionContext.getFuzzyEditDistance(), suggestionContext.isFuzzyTranspositions(),
+ suggestionContext.getFuzzyPrefixLength(), suggestionContext.getFuzzyMinLength(), suggestionContext.isFuzzyUnicodeAware(),
+ analyzingSuggestHolder.fst, analyzingSuggestHolder.hasPayloads,
+ analyzingSuggestHolder.maxAnalyzedPathsForOneInput, analyzingSuggestHolder.sepLabel, analyzingSuggestHolder.payloadSep, analyzingSuggestHolder.endByte,
+ analyzingSuggestHolder.holeCharacter);
+
+ } else {
+ suggester = new XAnalyzingSuggester(mapper.indexAnalyzer(), mapper.searchAnalyzer(), flags,
+ analyzingSuggestHolder.maxSurfaceFormsPerAnalyzedForm, analyzingSuggestHolder.maxGraphExpansions,
+ analyzingSuggestHolder.preservePositionIncrements, analyzingSuggestHolder.fst, analyzingSuggestHolder.hasPayloads,
+ analyzingSuggestHolder.maxAnalyzedPathsForOneInput, analyzingSuggestHolder.sepLabel, analyzingSuggestHolder.payloadSep, analyzingSuggestHolder.endByte,
+ analyzingSuggestHolder.holeCharacter);
+ }
+ return suggester;
+ }
+
+ @Override
+ public CompletionStats stats(String... fields) {
+ long sizeInBytes = 0;
+ ObjectLongOpenHashMap<String> completionFields = null;
+ if (fields != null && fields.length > 0) {
+ completionFields = new ObjectLongOpenHashMap<String>(fields.length);
+ }
+
+ for (Map.Entry<String, AnalyzingSuggestHolder> entry : lookupMap.entrySet()) {
+ sizeInBytes += entry.getValue().fst.sizeInBytes();
+ if (fields == null || fields.length == 0) {
+ continue;
+ }
+ for (String field : fields) {
+ // support for getting fields by regex as in fielddata
+ if (Regex.simpleMatch(field, entry.getKey())) {
+ long fstSize = entry.getValue().fst.sizeInBytes();
+ completionFields.addTo(field, fstSize);
+ }
+ }
+ }
+
+ return new CompletionStats(sizeInBytes, completionFields);
+ }
+
+ @Override
+ AnalyzingSuggestHolder getAnalyzingSuggestHolder(FieldMapper<?> mapper) {
+ return lookupMap.get(mapper.names().indexName());
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return ramBytesUsed;
+ }
+ };
+ }
+
+ static class AnalyzingSuggestHolder {
+ final boolean preserveSep;
+ final boolean preservePositionIncrements;
+ final int maxSurfaceFormsPerAnalyzedForm;
+ final int maxGraphExpansions;
+ final boolean hasPayloads;
+ final int maxAnalyzedPathsForOneInput;
+ final FST<Pair<Long, BytesRef>> fst;
+ final int sepLabel;
+ final int payloadSep;
+ final int endByte;
+ final int holeCharacter;
+
+ public AnalyzingSuggestHolder(boolean preserveSep, boolean preservePositionIncrements, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
+ boolean hasPayloads, int maxAnalyzedPathsForOneInput, FST<Pair<Long, BytesRef>> fst) {
+ this(preserveSep, preservePositionIncrements, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions, hasPayloads, maxAnalyzedPathsForOneInput, fst, XAnalyzingSuggester.SEP_LABEL, XAnalyzingSuggester.PAYLOAD_SEP, XAnalyzingSuggester.END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ }
+
+ public AnalyzingSuggestHolder(boolean preserveSep, boolean preservePositionIncrements, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions, boolean hasPayloads, int maxAnalyzedPathsForOneInput, FST<Pair<Long, BytesRef>> fst, int sepLabel, int payloadSep, int endByte, int holeCharacter) {
+ this.preserveSep = preserveSep;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
+ this.maxGraphExpansions = maxGraphExpansions;
+ this.hasPayloads = hasPayloads;
+ this.maxAnalyzedPathsForOneInput = maxAnalyzedPathsForOneInput;
+ this.fst = fst;
+ this.sepLabel = sepLabel;
+ this.payloadSep = payloadSep;
+ this.endByte = endByte;
+ this.holeCharacter = holeCharacter;
+ }
+ }
+
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return prototype.toFiniteStrings(prototype.getTokenStreamToAutomaton(), stream);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java b/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java
new file mode 100644
index 0000000..8162160
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java
@@ -0,0 +1,372 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
+import org.apache.lucene.codecs.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.FilterAtomicReader.FilterTerms;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.store.IOContext.Context;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.suggest.completion.CompletionTokenStream.ToFiniteStrings;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * This {@link PostingsFormat} is basically a T-Sink for a default postings
+ * format that is used to store postings on disk fitting the lucene APIs and
+ * builds a suggest FST as an auxiliary data structure next to the actual
+ * postings format. It uses the delegate postings format for simplicity to
+ * handle all the merge operations. The auxiliary suggest FST data structure is
+ * only loaded if a FieldsProducer is requested for reading, for merging it uses
+ * the low memory delegate postings format.
+ */
+public class Completion090PostingsFormat extends PostingsFormat {
+
+ public static final String CODEC_NAME = "completion090";
+ public static final int SUGGEST_CODEC_VERSION = 1;
+ public static final String EXTENSION = "cmp";
+
+ private final static ESLogger logger = Loggers.getLogger(Completion090PostingsFormat.class);
+ private PostingsFormat delegatePostingsFormat;
+ private final static Map<String, CompletionLookupProvider> providers;
+ private CompletionLookupProvider writeProvider;
+
+
+ static {
+ final CompletionLookupProvider provider = new AnalyzingCompletionLookupProvider(true, false, true, false);
+ final Builder<String, CompletionLookupProvider> builder = ImmutableMap.builder();
+ providers = builder.put(provider.getName(), provider).build();
+ }
+
+ public Completion090PostingsFormat(PostingsFormat delegatePostingsFormat, CompletionLookupProvider provider) {
+ super(CODEC_NAME);
+ this.delegatePostingsFormat = delegatePostingsFormat;
+ this.writeProvider = provider;
+ assert delegatePostingsFormat != null && writeProvider != null;
+ }
+
+ /*
+ * Used only by core Lucene at read-time via Service Provider instantiation
+ * do not use at Write-time in application code.
+ */
+ public Completion090PostingsFormat() {
+ super(CODEC_NAME);
+ }
+
+ @Override
+ public CompletionFieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
+ if (delegatePostingsFormat == null) {
+ throw new UnsupportedOperationException("Error - " + getClass().getName()
+ + " has been constructed without a choice of PostingsFormat");
+ }
+ assert writeProvider != null;
+ return new CompletionFieldsConsumer(state);
+ }
+
+ @Override
+ public CompletionFieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
+ return new CompletionFieldsProducer(state);
+ }
+
+ private class CompletionFieldsConsumer extends FieldsConsumer {
+
+ private FieldsConsumer delegatesFieldsConsumer;
+ private FieldsConsumer suggestFieldsConsumer;
+
+ public CompletionFieldsConsumer(SegmentWriteState state) throws IOException {
+ this.delegatesFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state);
+ String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
+ IndexOutput output = null;
+ boolean success = false;
+ try {
+ output = state.directory.createOutput(suggestFSTFile, state.context);
+ CodecUtil.writeHeader(output, CODEC_NAME, SUGGEST_CODEC_VERSION);
+ /*
+ * we write the delegate postings format name so we can load it
+ * without getting an instance in the ctor
+ */
+ output.writeString(delegatePostingsFormat.getName());
+ output.writeString(writeProvider.getName());
+ this.suggestFieldsConsumer = writeProvider.consumer(output);
+ success = true;
+ } finally {
+ if (!success) {
+ IOUtils.closeWhileHandlingException(output);
+ }
+ }
+ }
+
+ @Override
+ public TermsConsumer addField(final FieldInfo field) throws IOException {
+ final TermsConsumer delegateConsumer = delegatesFieldsConsumer.addField(field);
+ final TermsConsumer suggestTermConsumer = suggestFieldsConsumer.addField(field);
+ final GroupedPostingsConsumer groupedPostingsConsumer = new GroupedPostingsConsumer(delegateConsumer, suggestTermConsumer);
+
+ return new TermsConsumer() {
+ @Override
+ public PostingsConsumer startTerm(BytesRef text) throws IOException {
+ groupedPostingsConsumer.startTerm(text);
+ return groupedPostingsConsumer;
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() throws IOException {
+ return delegateConsumer.getComparator();
+ }
+
+ @Override
+ public void finishTerm(BytesRef text, TermStats stats) throws IOException {
+ suggestTermConsumer.finishTerm(text, stats);
+ delegateConsumer.finishTerm(text, stats);
+ }
+
+ @Override
+ public void finish(long sumTotalTermFreq, long sumDocFreq, int docCount) throws IOException {
+ suggestTermConsumer.finish(sumTotalTermFreq, sumDocFreq, docCount);
+ delegateConsumer.finish(sumTotalTermFreq, sumDocFreq, docCount);
+ }
+ };
+ }
+
+ @Override
+ public void close() throws IOException {
+ IOUtils.close(delegatesFieldsConsumer, suggestFieldsConsumer);
+ }
+ }
+
+ private class GroupedPostingsConsumer extends PostingsConsumer {
+
+ private TermsConsumer[] termsConsumers;
+ private PostingsConsumer[] postingsConsumers;
+
+ public GroupedPostingsConsumer(TermsConsumer... termsConsumersArgs) {
+ termsConsumers = termsConsumersArgs;
+ postingsConsumers = new PostingsConsumer[termsConsumersArgs.length];
+ }
+
+ @Override
+ public void startDoc(int docID, int freq) throws IOException {
+ for (PostingsConsumer postingsConsumer : postingsConsumers) {
+ postingsConsumer.startDoc(docID, freq);
+ }
+ }
+
+ @Override
+ public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) throws IOException {
+ for (PostingsConsumer postingsConsumer : postingsConsumers) {
+ postingsConsumer.addPosition(position, payload, startOffset, endOffset);
+ }
+ }
+
+ @Override
+ public void finishDoc() throws IOException {
+ for (PostingsConsumer postingsConsumer : postingsConsumers) {
+ postingsConsumer.finishDoc();
+ }
+ }
+
+ public void startTerm(BytesRef text) throws IOException {
+ for (int i = 0; i < termsConsumers.length; i++) {
+ postingsConsumers[i] = termsConsumers[i].startTerm(text);
+ }
+ }
+ }
+
+ private static class CompletionFieldsProducer extends FieldsProducer {
+
+ private final FieldsProducer delegateProducer;
+ private final LookupFactory lookupFactory;
+
+ public CompletionFieldsProducer(SegmentReadState state) throws IOException {
+ String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
+ IndexInput input = state.directory.openInput(suggestFSTFile, state.context);
+ CodecUtil.checkHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_CODEC_VERSION);
+ FieldsProducer delegateProducer = null;
+ boolean success = false;
+ try {
+ PostingsFormat delegatePostingsFormat = PostingsFormat.forName(input.readString());
+ String providerName = input.readString();
+ CompletionLookupProvider completionLookupProvider = providers.get(providerName);
+ if (completionLookupProvider == null) {
+ throw new ElasticsearchIllegalStateException("no provider with name [" + providerName + "] registered");
+ }
+ // TODO: we could clone the ReadState and make it always forward IOContext.MERGE to prevent unecessary heap usage?
+ delegateProducer = delegatePostingsFormat.fieldsProducer(state);
+ /*
+ * If we are merging we don't load the FSTs at all such that we
+ * don't consume so much memory during merge
+ */
+ if (state.context.context != Context.MERGE) {
+ // TODO: maybe we can do this in a fully lazy fashion based on some configuration
+ // eventually we should have some kind of curciut breaker that prevents us from going OOM here
+ // with some configuration
+ this.lookupFactory = completionLookupProvider.load(input);
+ } else {
+ this.lookupFactory = null;
+ }
+ this.delegateProducer = delegateProducer;
+ success = true;
+ } finally {
+ if (!success) {
+ IOUtils.closeWhileHandlingException(delegateProducer, input);
+ } else {
+ IOUtils.close(input);
+ }
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ IOUtils.close(delegateProducer);
+ }
+
+ @Override
+ public Iterator<String> iterator() {
+ return delegateProducer.iterator();
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ final Terms terms = delegateProducer.terms(field);
+ if (terms == null || lookupFactory == null) {
+ return terms;
+ }
+ return new CompletionTerms(terms, lookupFactory);
+ }
+
+ @Override
+ public int size() {
+ return delegateProducer.size();
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return (lookupFactory == null ? 0 : lookupFactory.ramBytesUsed()) + delegateProducer.ramBytesUsed();
+ }
+ }
+
+ public static final class CompletionTerms extends FilterTerms {
+ private final LookupFactory lookup;
+
+ public CompletionTerms(Terms delegate, LookupFactory lookup) {
+ super(delegate);
+ this.lookup = lookup;
+ }
+
+ public Lookup getLookup(FieldMapper<?> mapper, CompletionSuggestionContext suggestionContext) {
+ return lookup.getLookup(mapper, suggestionContext);
+ }
+
+ public CompletionStats stats(String ... fields) {
+ return lookup.stats(fields);
+ }
+ }
+
+ public static abstract class CompletionLookupProvider implements PayloadProcessor, ToFiniteStrings {
+
+ public static final char UNIT_SEPARATOR = '\u001f';
+
+ public abstract FieldsConsumer consumer(IndexOutput output) throws IOException;
+
+ public abstract String getName();
+
+ public abstract LookupFactory load(IndexInput input) throws IOException;
+
+ @Override
+ public BytesRef buildPayload(BytesRef surfaceForm, long weight, BytesRef payload) throws IOException {
+ if (weight < -1 || weight > Integer.MAX_VALUE) {
+ throw new IllegalArgumentException("weight must be >= -1 && <= Integer.MAX_VALUE");
+ }
+ for (int i = 0; i < surfaceForm.length; i++) {
+ if (surfaceForm.bytes[i] == UNIT_SEPARATOR) {
+ throw new IllegalArgumentException(
+ "surface form cannot contain unit separator character U+001F; this character is reserved");
+ }
+ }
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ OutputStreamDataOutput output = new OutputStreamDataOutput(byteArrayOutputStream);
+ output.writeVLong(weight + 1);
+ output.writeVInt(surfaceForm.length);
+ output.writeBytes(surfaceForm.bytes, surfaceForm.offset, surfaceForm.length);
+ output.writeVInt(payload.length);
+ output.writeBytes(payload.bytes, 0, payload.length);
+
+ output.close();
+ return new BytesRef(byteArrayOutputStream.toByteArray());
+ }
+
+ @Override
+ public void parsePayload(BytesRef payload, SuggestPayload ref) throws IOException {
+ ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(payload.bytes, payload.offset, payload.length);
+ InputStreamDataInput input = new InputStreamDataInput(byteArrayInputStream);
+ ref.weight = input.readVLong() - 1;
+ int len = input.readVInt();
+ ref.surfaceForm.grow(len);
+ ref.surfaceForm.length = len;
+ input.readBytes(ref.surfaceForm.bytes, ref.surfaceForm.offset, ref.surfaceForm.length);
+ len = input.readVInt();
+ ref.payload.grow(len);
+ ref.payload.length = len;
+ input.readBytes(ref.payload.bytes, ref.payload.offset, ref.payload.length);
+ input.close();
+ }
+ }
+
+ public CompletionStats completionStats(IndexReader indexReader, String ... fields) {
+ CompletionStats completionStats = new CompletionStats();
+ for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
+ AtomicReader atomicReader = atomicReaderContext.reader();
+ try {
+ for (String fieldName : atomicReader.fields()) {
+ Terms terms = atomicReader.fields().terms(fieldName);
+ if (terms instanceof CompletionTerms) {
+ CompletionTerms completionTerms = (CompletionTerms) terms;
+ completionStats.add(completionTerms.stats(fields));
+ }
+ }
+ } catch (IOException e) {
+ logger.error("Could not get completion stats: {}", e, e.getMessage());
+ }
+ }
+
+ return completionStats;
+ }
+
+ public static abstract class LookupFactory {
+ public abstract Lookup getLookup(FieldMapper<?> mapper, CompletionSuggestionContext suggestionContext);
+ public abstract CompletionStats stats(String ... fields);
+ abstract AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder getAnalyzingSuggestHolder(FieldMapper<?> mapper);
+ public abstract long ramBytesUsed();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatProvider.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatProvider.java
new file mode 100644
index 0000000..05299b7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatProvider.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import org.apache.lucene.codecs.PostingsFormat;
+import org.elasticsearch.index.codec.postingsformat.AbstractPostingsFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+
+/**
+ *
+ */
+public final class CompletionPostingsFormatProvider extends AbstractPostingsFormatProvider {
+
+ private final Completion090PostingsFormat postingsFormat;
+
+ public CompletionPostingsFormatProvider(String name, PostingsFormatProvider delegate, Completion090PostingsFormat.CompletionLookupProvider provider) {
+ super(name);
+ this.postingsFormat = new Completion090PostingsFormat(delegate.get(), provider);
+ }
+
+ @Override
+ public PostingsFormat get() {
+ return postingsFormat;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java
new file mode 100644
index 0000000..6a535ab
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class CompletionStats implements Streamable, ToXContent {
+
+ private long sizeInBytes;
+
+ @Nullable
+ private ObjectLongOpenHashMap<String> fields;
+
+ public CompletionStats() {
+ }
+
+ public CompletionStats(long size, @Nullable ObjectLongOpenHashMap<String> fields) {
+ this.sizeInBytes = size;
+ this.fields = fields;
+ }
+
+ public long getSizeInBytes() {
+ return sizeInBytes;
+ }
+
+ public ByteSizeValue getSize() {
+ return new ByteSizeValue(sizeInBytes);
+ }
+
+ public ObjectLongOpenHashMap<String> getFields() {
+ return fields;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ sizeInBytes = in.readVLong();
+ if (in.readBoolean()) {
+ int size = in.readVInt();
+ fields = new ObjectLongOpenHashMap<String>(size);
+ for (int i = 0; i < size; i++) {
+ fields.put(in.readString(), in.readVLong());
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(sizeInBytes);
+ if (fields == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(fields.size());
+ final boolean[] states = fields.allocated;
+ final Object[] keys = fields.keys;
+ final long[] values = fields.values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ out.writeString((String) keys[i]);
+ out.writeVLong(values[i]);
+ }
+ }
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.COMPLETION);
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, sizeInBytes);
+ if (fields != null) {
+ builder.startObject(Fields.FIELDS);
+ final boolean[] states = fields.allocated;
+ final Object[] keys = fields.keys;
+ final long[] values = fields.values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ builder.startObject((String) keys[i], XContentBuilder.FieldCaseConversion.NONE);
+ builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, values[i]);
+ builder.endObject();
+ }
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ public static CompletionStats readCompletionStats(StreamInput in) throws IOException {
+ CompletionStats stats = new CompletionStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString COMPLETION = new XContentBuilderString("completion");
+ static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes");
+ static final XContentBuilderString SIZE = new XContentBuilderString("size");
+ static final XContentBuilderString FIELDS = new XContentBuilderString("fields");
+ }
+
+ public void add(CompletionStats completion) {
+ if (completion == null) {
+ return;
+ }
+
+ sizeInBytes += completion.getSizeInBytes();
+
+ if (completion.fields != null) {
+ if (fields == null) fields = new ObjectLongOpenHashMap<String>();
+
+ final boolean[] states = completion.fields.allocated;
+ final Object[] keys = completion.fields.keys;
+ final long[] values = completion.fields.values;
+ for (int i = 0; i < states.length; i++) {
+ if (states[i]) {
+ fields.addTo((String) keys[i], values[i]);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java
new file mode 100644
index 0000000..2be279c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.search.suggest.SuggestContextParser;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+
+import java.io.IOException;
+
+import static org.elasticsearch.search.suggest.SuggestUtils.parseSuggestContext;
+
+/**
+ *
+ */
+public class CompletionSuggestParser implements SuggestContextParser {
+
+ private CompletionSuggester completionSuggester;
+ private static final ParseField FUZZINESS = Fuzziness.FIELD.withDeprecation("edit_distance");
+
+ public CompletionSuggestParser(CompletionSuggester completionSuggester) {
+ this.completionSuggester = completionSuggester;
+ }
+
+ @Override
+ public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService) throws IOException {
+ XContentParser.Token token;
+ String fieldName = null;
+ CompletionSuggestionContext suggestion = new CompletionSuggestionContext(completionSuggester);
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if (!parseSuggestContext(parser, mapperService, fieldName, suggestion)) {
+ if (token == XContentParser.Token.VALUE_BOOLEAN && "fuzzy".equals(fieldName)) {
+ suggestion.setFuzzy(parser.booleanValue());
+ }
+ }
+ } else if (token == XContentParser.Token.START_OBJECT && "fuzzy".equals(fieldName)) {
+ suggestion.setFuzzy(true);
+ String fuzzyConfigName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fuzzyConfigName = parser.currentName();
+ } else if (token.isValue()) {
+ if (FUZZINESS.match(fuzzyConfigName, ParseField.EMPTY_FLAGS)) {
+ suggestion.setFuzzyEditDistance(Fuzziness.parse(parser).asDistance());
+ } else if ("transpositions".equals(fuzzyConfigName)) {
+ suggestion.setFuzzyTranspositions(parser.booleanValue());
+ } else if ("min_length".equals(fuzzyConfigName) || "minLength".equals(fuzzyConfigName)) {
+ suggestion.setFuzzyMinLength(parser.intValue());
+ } else if ("prefix_length".equals(fuzzyConfigName) || "prefixLength".equals(fuzzyConfigName)) {
+ suggestion.setFuzzyPrefixLength(parser.intValue());
+ } else if ("unicode_aware".equals(fuzzyConfigName) || "unicodeAware".equals(fuzzyConfigName)) {
+ suggestion.setFuzzyUnicodeAware(parser.booleanValue());
+ }
+ }
+ }
+ } else {
+ throw new ElasticsearchIllegalArgumentException("suggester[completion] doesn't support field [" + fieldName + "]");
+ }
+ }
+ suggestion.mapper(mapperService.smartNameFieldMapper(suggestion.getField()));
+
+ return suggestion;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java
new file mode 100644
index 0000000..67ded5e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import com.google.common.collect.Maps;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.CollectionUtil;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.search.suggest.SuggestContextParser;
+import org.elasticsearch.search.suggest.Suggester;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestion.Entry.Option;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+
+public class CompletionSuggester extends Suggester<CompletionSuggestionContext> {
+
+ private static final ScoreComparator scoreComparator = new ScoreComparator();
+
+
+ @Override
+ protected Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> innerExecute(String name,
+ CompletionSuggestionContext suggestionContext, IndexReader indexReader, CharsRef spare) throws IOException {
+ if (suggestionContext.mapper() == null || !(suggestionContext.mapper() instanceof CompletionFieldMapper)) {
+ throw new ElasticsearchException("Field [" + suggestionContext.getField() + "] is not a completion suggest field");
+ }
+
+ CompletionSuggestion completionSuggestion = new CompletionSuggestion(name, suggestionContext.getSize());
+ UnicodeUtil.UTF8toUTF16(suggestionContext.getText(), spare);
+
+ CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new StringText(spare.toString()), 0, spare.length());
+ completionSuggestion.addTerm(completionSuggestEntry);
+
+ String fieldName = suggestionContext.getField();
+ Map<String, CompletionSuggestion.Entry.Option> results = Maps.newHashMapWithExpectedSize(indexReader.leaves().size() * suggestionContext.getSize());
+ for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
+ AtomicReader atomicReader = atomicReaderContext.reader();
+ Terms terms = atomicReader.fields().terms(fieldName);
+ if (terms instanceof Completion090PostingsFormat.CompletionTerms) {
+ final Completion090PostingsFormat.CompletionTerms lookupTerms = (Completion090PostingsFormat.CompletionTerms) terms;
+ final Lookup lookup = lookupTerms.getLookup(suggestionContext.mapper(), suggestionContext);
+ if (lookup == null) {
+ // we don't have a lookup for this segment.. this might be possible if a merge dropped all
+ // docs from the segment that had a value in this segment.
+ continue;
+ }
+ List<Lookup.LookupResult> lookupResults = lookup.lookup(spare, false, suggestionContext.getSize());
+ for (Lookup.LookupResult res : lookupResults) {
+
+ final String key = res.key.toString();
+ final float score = res.value;
+ final Option value = results.get(key);
+ if (value == null) {
+ final Option option = new CompletionSuggestion.Entry.Option(new StringText(key), score, res.payload == null ? null
+ : new BytesArray(res.payload));
+ results.put(key, option);
+ } else if (value.getScore() < score) {
+ value.setScore(score);
+ value.setPayload(res.payload == null ? null : new BytesArray(res.payload));
+ }
+ }
+ }
+ }
+ final List<CompletionSuggestion.Entry.Option> options = new ArrayList<CompletionSuggestion.Entry.Option>(results.values());
+ CollectionUtil.introSort(options, scoreComparator);
+
+ int optionCount = Math.min(suggestionContext.getSize(), options.size());
+ for (int i = 0 ; i < optionCount ; i++) {
+ completionSuggestEntry.addOption(options.get(i));
+ }
+
+ return completionSuggestion;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] { "completion" };
+ }
+
+ @Override
+ public SuggestContextParser getContextParser() {
+ return new CompletionSuggestParser(this);
+ }
+
+ public static class ScoreComparator implements Comparator<CompletionSuggestion.Entry.Option> {
+ @Override
+ public int compare(Option o1, Option o2) {
+ return Float.compare(o2.getScore(), o1.getScore());
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java
new file mode 100644
index 0000000..653e64c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.search.suggest.Suggest;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ *
+ */
+public class CompletionSuggestion extends Suggest.Suggestion<CompletionSuggestion.Entry> {
+
+ public static final int TYPE = 2;
+
+ public CompletionSuggestion() {
+ }
+
+ public CompletionSuggestion(String name, int size) {
+ super(name, size);
+ }
+
+ @Override
+ public int getType() {
+ return TYPE;
+ }
+
+ @Override
+ protected Entry newEntry() {
+ return new Entry();
+ }
+
+ public static class Entry extends org.elasticsearch.search.suggest.Suggest.Suggestion.Entry<CompletionSuggestion.Entry.Option> {
+
+ public Entry(Text text, int offset, int length) {
+ super(text, offset, length);
+ }
+
+ protected Entry() {
+ super();
+ }
+
+ @Override
+ protected Option newOption() {
+ return new Option();
+ }
+
+ public static class Option extends org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option {
+ private BytesReference payload;
+
+ public Option(Text text, float score, BytesReference payload) {
+ super(text, score);
+ this.payload = payload;
+ }
+
+
+ protected Option() {
+ super();
+ }
+
+ public void setPayload(BytesReference payload) {
+ this.payload = payload;
+ }
+
+ public BytesReference getPayload() {
+ return payload;
+ }
+
+ public String getPayloadAsString() {
+ return payload.toUtf8();
+ }
+
+ public long getPayloadAsLong() {
+ return Long.parseLong(payload.toUtf8());
+ }
+
+ public double getPayloadAsDouble() {
+ return Double.parseDouble(payload.toUtf8());
+ }
+
+ public Map<String, Object> getPayloadAsMap() {
+ return XContentHelper.convertToMap(payload, false).v2();
+ }
+
+ public void setScore(float score) {
+ super.setScore(score);
+ }
+
+ @Override
+ protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ super.innerToXContent(builder, params);
+ if (payload != null && payload.length() > 0) {
+ builder.rawField("payload", payload);
+ }
+ return builder;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ payload = in.readBytesReference();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBytesReference(payload);
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java
new file mode 100644
index 0000000..d757e82
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import org.apache.lucene.search.suggest.analyzing.XFuzzySuggester;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class CompletionSuggestionBuilder extends SuggestBuilder.SuggestionBuilder<CompletionSuggestionBuilder> {
+
+ public CompletionSuggestionBuilder(String name) {
+ super(name, "completion");
+ }
+
+ @Override
+ protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java
new file mode 100644
index 0000000..adf6911
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import org.apache.lucene.search.suggest.analyzing.XFuzzySuggester;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.suggest.Suggester;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+
+/**
+ *
+ */
+public class CompletionSuggestionContext extends SuggestionSearchContext.SuggestionContext {
+
+ private FieldMapper<?> mapper;
+ private int fuzzyEditDistance = XFuzzySuggester.DEFAULT_MAX_EDITS;
+ private boolean fuzzyTranspositions = XFuzzySuggester.DEFAULT_TRANSPOSITIONS;
+ private int fuzzyMinLength = XFuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH;
+ private int fuzzyPrefixLength = XFuzzySuggester.DEFAULT_NON_FUZZY_PREFIX;
+ private boolean fuzzy = false;
+ private boolean fuzzyUnicodeAware = XFuzzySuggester.DEFAULT_UNICODE_AWARE;
+
+ public CompletionSuggestionContext(Suggester suggester) {
+ super(suggester);
+ }
+
+ public FieldMapper<?> mapper() {
+ return this.mapper;
+ }
+
+ public void mapper(FieldMapper<?> mapper) {
+ this.mapper = mapper;
+ }
+
+ public void setFuzzyEditDistance(int fuzzyEditDistance) {
+ this.fuzzyEditDistance = fuzzyEditDistance;
+ }
+
+ public int getFuzzyEditDistance() {
+ return fuzzyEditDistance;
+ }
+
+ public void setFuzzyTranspositions(boolean fuzzyTranspositions) {
+ this.fuzzyTranspositions = fuzzyTranspositions;
+ }
+
+ public boolean isFuzzyTranspositions() {
+ return fuzzyTranspositions;
+ }
+
+ public void setFuzzyMinLength(int fuzzyMinPrefixLength) {
+ this.fuzzyMinLength = fuzzyMinPrefixLength;
+ }
+
+ public int getFuzzyMinLength() {
+ return fuzzyMinLength;
+ }
+
+ public void setFuzzyPrefixLength(int fuzzyNonPrefixLength) {
+ this.fuzzyPrefixLength = fuzzyNonPrefixLength;
+ }
+
+ public int getFuzzyPrefixLength() {
+ return fuzzyPrefixLength;
+ }
+
+ public void setFuzzy(boolean fuzzy) {
+ this.fuzzy = fuzzy;
+ }
+
+ public boolean isFuzzy() {
+ return fuzzy;
+ }
+
+ public void setFuzzyUnicodeAware(boolean fuzzyUnicodeAware) {
+ this.fuzzyUnicodeAware = fuzzyUnicodeAware;
+ }
+
+ public boolean isFuzzyUnicodeAware() {
+ return fuzzyUnicodeAware;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionFuzzyBuilder.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionFuzzyBuilder.java
new file mode 100644
index 0000000..2059a28
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionFuzzyBuilder.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import org.apache.lucene.search.suggest.analyzing.XFuzzySuggester;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class CompletionSuggestionFuzzyBuilder extends SuggestBuilder.SuggestionBuilder<CompletionSuggestionFuzzyBuilder> {
+
+ public CompletionSuggestionFuzzyBuilder(String name) {
+ super(name, "completion");
+ }
+
+ private Fuzziness fuzziness = Fuzziness.ONE;
+ private boolean fuzzyTranspositions = XFuzzySuggester.DEFAULT_TRANSPOSITIONS;
+ private int fuzzyMinLength = XFuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH;
+ private int fuzzyPrefixLength = XFuzzySuggester.DEFAULT_NON_FUZZY_PREFIX;
+ private boolean unicodeAware = XFuzzySuggester.DEFAULT_UNICODE_AWARE;
+
+ public Fuzziness getFuzziness() {
+ return fuzziness;
+ }
+
+ public CompletionSuggestionFuzzyBuilder setFuzziness(Fuzziness fuzziness) {
+ this.fuzziness = fuzziness;
+ return this;
+ }
+
+ public boolean isFuzzyTranspositions() {
+ return fuzzyTranspositions;
+ }
+
+ public CompletionSuggestionFuzzyBuilder setFuzzyTranspositions(boolean fuzzyTranspositions) {
+ this.fuzzyTranspositions = fuzzyTranspositions;
+ return this;
+ }
+
+ public int getFuzzyMinLength() {
+ return fuzzyMinLength;
+ }
+
+ public CompletionSuggestionFuzzyBuilder setFuzzyMinLength(int fuzzyMinLength) {
+ this.fuzzyMinLength = fuzzyMinLength;
+ return this;
+ }
+
+ public int getFuzzyPrefixLength() {
+ return fuzzyPrefixLength;
+ }
+
+ public CompletionSuggestionFuzzyBuilder setFuzzyPrefixLength(int fuzzyPrefixLength) {
+ this.fuzzyPrefixLength = fuzzyPrefixLength;
+ return this;
+ }
+
+ public boolean isUnicodeAware() {
+ return unicodeAware;
+ }
+
+ public CompletionSuggestionFuzzyBuilder setUnicodeAware(boolean unicodeAware) {
+ this.unicodeAware = unicodeAware;
+ return this;
+ }
+
+ @Override
+ protected XContentBuilder innerToXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject("fuzzy");
+
+ if (fuzziness != Fuzziness.ONE) {
+ fuzziness.toXContent(builder, params);
+ }
+ if (fuzzyTranspositions != XFuzzySuggester.DEFAULT_TRANSPOSITIONS) {
+ builder.field("transpositions", fuzzyTranspositions);
+ }
+ if (fuzzyMinLength != XFuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH) {
+ builder.field("min_length", fuzzyMinLength);
+ }
+ if (fuzzyPrefixLength != XFuzzySuggester.DEFAULT_NON_FUZZY_PREFIX) {
+ builder.field("prefix_length", fuzzyPrefixLength);
+ }
+ if (unicodeAware != XFuzzySuggester.DEFAULT_UNICODE_AWARE) {
+ builder.field("unicode_aware", unicodeAware);
+ }
+
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java
new file mode 100644
index 0000000..1830678
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.completion;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.util.*;
+import org.apache.lucene.util.fst.Util;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Set;
+
+/**
+ *
+ */
+public final class CompletionTokenStream extends TokenStream {
+
+ private final PayloadAttribute payloadAttr = addAttribute(PayloadAttribute.class);
+ private final PositionIncrementAttribute posAttr = addAttribute(PositionIncrementAttribute.class);
+ private final ByteTermAttribute bytesAtt = addAttribute(ByteTermAttribute.class);;
+
+
+ private final TokenStream input;
+ private BytesRef payload;
+ private Iterator<IntsRef> finiteStrings;
+ private ToFiniteStrings toFiniteStrings;
+ private int posInc = -1;
+ private static final int MAX_PATHS = 256;
+ private CharTermAttribute charTermAttribute;
+
+ public CompletionTokenStream(TokenStream input, BytesRef payload, ToFiniteStrings toFiniteStrings) throws IOException {
+ // Don't call the super(input) ctor - this is a true delegate and has a new attribute source since we consume
+ // the input stream entirely in toFiniteStrings(input)
+ this.input = input;
+ this.payload = payload;
+ this.toFiniteStrings = toFiniteStrings;
+ }
+
+ @Override
+ public boolean incrementToken() throws IOException {
+ clearAttributes();
+ if (finiteStrings == null) {
+ Set<IntsRef> strings = toFiniteStrings.toFiniteStrings(input);
+
+ if (strings.size() > MAX_PATHS) {
+ throw new IllegalArgumentException("TokenStream expanded to " + strings.size() + " finite strings. Only <= " + MAX_PATHS
+ + " finite strings are supported");
+ }
+ posInc = strings.size();
+ finiteStrings = strings.iterator();
+ }
+ if (finiteStrings.hasNext()) {
+ posAttr.setPositionIncrement(posInc);
+ /*
+ * this posInc encodes the number of paths that this surface form
+ * produced. Multi Fields have the same surface form and therefore sum up
+ */
+ posInc = 0;
+ Util.toBytesRef(finiteStrings.next(), bytesAtt.getBytesRef()); // now we have UTF-8
+ if (charTermAttribute != null) {
+ charTermAttribute.setLength(0);
+ charTermAttribute.append(bytesAtt.toUTF16());
+ }
+ if (payload != null) {
+ payloadAttr.setPayload(this.payload);
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ public void end() throws IOException {
+ super.end();
+ if (posInc == -1) {
+ input.end();
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (posInc == -1) {
+ input.close();
+ }
+ }
+
+ public static interface ToFiniteStrings {
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException;
+ }
+
+ @Override
+ public void reset() throws IOException {
+ super.reset();
+ if (hasAttribute(CharTermAttribute.class)) {
+ // we only create this if we really need it to safe the UTF-8 to UTF-16 conversion
+ charTermAttribute = getAttribute(CharTermAttribute.class);
+ }
+ finiteStrings = null;
+ posInc = -1;
+ }
+
+ public interface ByteTermAttribute extends TermToBytesRefAttribute {
+ // marker interface
+
+ public CharSequence toUTF16();
+ }
+
+ public static final class ByteTermAttributeImpl extends AttributeImpl implements ByteTermAttribute, TermToBytesRefAttribute {
+ private final BytesRef bytes = new BytesRef();
+ private CharsRef charsRef;
+
+ @Override
+ public int fillBytesRef() {
+ return bytes.hashCode();
+ }
+
+ @Override
+ public BytesRef getBytesRef() {
+ return bytes;
+ }
+
+ @Override
+ public void clear() {
+ bytes.length = 0;
+ }
+
+ @Override
+ public void copyTo(AttributeImpl target) {
+ ByteTermAttributeImpl other = (ByteTermAttributeImpl) target;
+ other.bytes.copyBytes(bytes);
+ }
+
+ @Override
+ public CharSequence toUTF16() {
+ if (charsRef == null) {
+ charsRef = new CharsRef();
+ }
+ UnicodeUtil.UTF8toUTF16(bytes, charsRef);
+ return charsRef;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/PayloadProcessor.java b/src/main/java/org/elasticsearch/search/suggest/completion/PayloadProcessor.java
new file mode 100644
index 0000000..4ee79b6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/completion/PayloadProcessor.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest.completion;
+
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
+
+interface PayloadProcessor {
+
+ BytesRef buildPayload(BytesRef surfaceForm, long weight, BytesRef payload) throws IOException;
+
+ void parsePayload(BytesRef payload, SuggestPayload ref) throws IOException;
+
+ static class SuggestPayload {
+ final BytesRef payload = new BytesRef();
+ long weight = 0;
+ final BytesRef surfaceForm = new BytesRef();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java b/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java
new file mode 100644
index 0000000..56c6181
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import java.io.IOException;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.CandidateSet;
+
+//TODO public for tests
+public abstract class CandidateGenerator {
+
+ public abstract boolean isKnownWord(BytesRef term) throws IOException;
+
+ public abstract long frequency(BytesRef term) throws IOException;
+
+ public CandidateSet drawCandidates(BytesRef term) throws IOException {
+ CandidateSet set = new CandidateSet(Candidate.EMPTY, createCandidate(term, true));
+ return drawCandidates(set);
+ }
+
+ public Candidate createCandidate(BytesRef term, boolean userInput) throws IOException {
+ return createCandidate(term, frequency(term), 1.0, userInput);
+ }
+ public Candidate createCandidate(BytesRef term, long frequency, double channelScore) throws IOException {
+ return createCandidate(term, frequency, channelScore, false);
+ }
+
+ public abstract Candidate createCandidate(BytesRef term, long frequency, double channelScore, boolean userInput) throws IOException;
+
+ public abstract CandidateSet drawCandidates(CandidateSet set) throws IOException;
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java b/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java
new file mode 100644
index 0000000..122d3f5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+import java.io.IOException;
+
+import org.apache.lucene.util.PriorityQueue;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.CandidateSet;
+
+final class CandidateScorer {
+ private final WordScorer scorer;
+ private final int maxNumCorrections;
+ private final int gramSize;
+
+ public CandidateScorer(WordScorer scorer, int maxNumCorrections, int gramSize) {
+ this.scorer = scorer;
+ this.maxNumCorrections = maxNumCorrections;
+ this.gramSize = gramSize;
+ }
+
+
+ public Correction[] findBestCandiates(CandidateSet[] sets, float errorFraction, double cutoffScore) throws IOException {
+ if (sets.length == 0) {
+ return Correction.EMPTY;
+ }
+ PriorityQueue<Correction> corrections = new PriorityQueue<Correction>(maxNumCorrections) {
+ @Override
+ protected boolean lessThan(Correction a, Correction b) {
+ return a.score < b.score;
+ }
+ };
+ int numMissspellings = 1;
+ if (errorFraction >= 1.0) {
+ numMissspellings = (int) errorFraction;
+ } else {
+ numMissspellings = Math.round(errorFraction * sets.length);
+ }
+ findCandidates(sets, new Candidate[sets.length], 0, Math.max(1, numMissspellings), corrections, cutoffScore, 0.0);
+ Correction[] result = new Correction[corrections.size()];
+ for (int i = result.length - 1; i >= 0; i--) {
+ result[i] = corrections.pop();
+ }
+ assert corrections.size() == 0;
+ return result;
+
+ }
+
+ public void findCandidates(CandidateSet[] candidates, Candidate[] path, int ord, int numMissspellingsLeft,
+ PriorityQueue<Correction> corrections, double cutoffScore, final double pathScore) throws IOException {
+ CandidateSet current = candidates[ord];
+ if (ord == candidates.length - 1) {
+ path[ord] = current.originalTerm;
+ updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize));
+ if (numMissspellingsLeft > 0) {
+ for (int i = 0; i < current.candidates.length; i++) {
+ path[ord] = current.candidates[i];
+ updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize));
+ }
+ }
+ } else {
+ if (numMissspellingsLeft > 0) {
+ path[ord] = current.originalTerm;
+ findCandidates(candidates, path, ord + 1, numMissspellingsLeft, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize));
+ for (int i = 0; i < current.candidates.length; i++) {
+ path[ord] = current.candidates[i];
+ findCandidates(candidates, path, ord + 1, numMissspellingsLeft - 1, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize));
+ }
+ } else {
+ path[ord] = current.originalTerm;
+ findCandidates(candidates, path, ord + 1, 0, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize));
+ }
+ }
+
+ }
+
+ private void updateTop(CandidateSet[] candidates, Candidate[] path, PriorityQueue<Correction> corrections, double cutoffScore, double score)
+ throws IOException {
+ score = Math.exp(score);
+ assert Math.abs(score - score(path, candidates)) < 0.00001;
+ if (score > cutoffScore) {
+ if (corrections.size() < maxNumCorrections) {
+ Candidate[] c = new Candidate[candidates.length];
+ System.arraycopy(path, 0, c, 0, path.length);
+ corrections.add(new Correction(score, c));
+ } else if (corrections.top().score < score) {
+ Correction top = corrections.top();
+ System.arraycopy(path, 0, top.candidates, 0, path.length);
+ top.score = score;
+ corrections.updateTop();
+ }
+ }
+ }
+
+ public double score(Candidate[] path, CandidateSet[] candidates) throws IOException {
+ double score = 0.0d;
+ for (int i = 0; i < candidates.length; i++) {
+ score += scorer.score(path, candidates, i, gramSize);
+ }
+ return Math.exp(score);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/Correction.java b/src/main/java/org/elasticsearch/search/suggest/phrase/Correction.java
new file mode 100644
index 0000000..f0be477
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/Correction.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
+
+import java.util.Arrays;
+//TODO public for tests
+public final class Correction {
+
+ public static final Correction[] EMPTY = new Correction[0];
+ public double score;
+ public final Candidate[] candidates;
+
+ public Correction(double score, Candidate[] candidates) {
+ this.score = score;
+ this.candidates = candidates;
+ }
+
+ @Override
+ public String toString() {
+ return "Correction [score=" + score + ", candidates=" + Arrays.toString(candidates) + "]";
+ }
+
+ public BytesRef join(BytesRef separator) {
+ return join(separator, null, null);
+ }
+
+ public BytesRef join(BytesRef separator, BytesRef preTag, BytesRef postTag) {
+ return join(separator, new BytesRef(), preTag, postTag);
+ }
+
+ public BytesRef join(BytesRef separator, BytesRef result, BytesRef preTag, BytesRef postTag) {
+ BytesRef[] toJoin = new BytesRef[this.candidates.length];
+ int len = separator.length * this.candidates.length - 1;
+ for (int i = 0; i < toJoin.length; i++) {
+ Candidate candidate = candidates[i];
+ if (preTag == null || candidate.userInput) {
+ toJoin[i] = candidate.term;
+ } else {
+ final int maxLen = preTag.length + postTag.length + candidate.term.length;
+ final BytesRef highlighted = new BytesRef(maxLen);// just allocate once
+ if (i == 0 || candidates[i-1].userInput) {
+ highlighted.append(preTag);
+ }
+ highlighted.append(candidate.term);
+ if (toJoin.length == i + 1 || candidates[i+1].userInput) {
+ highlighted.append(postTag);
+ }
+ toJoin[i] = highlighted;
+ }
+ len += toJoin[i].length;
+ }
+ result.offset = 0;
+ result.grow(len);
+ return SuggestUtils.joinPreAllocated(separator, result, toJoin);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java
new file mode 100644
index 0000000..041b92d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.spell.DirectSpellChecker;
+import org.apache.lucene.search.spell.SuggestMode;
+import org.apache.lucene.search.spell.SuggestWord;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.search.suggest.SuggestUtils;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+//TODO public for tests
+public final class DirectCandidateGenerator extends CandidateGenerator {
+
+ private final DirectSpellChecker spellchecker;
+ private final String field;
+ private final SuggestMode suggestMode;
+ private final TermsEnum termsEnum;
+ private final IndexReader reader;
+ private final long dictSize;
+ private final double logBase = 5;
+ private final long frequencyPlateau;
+ private final Analyzer preFilter;
+ private final Analyzer postFilter;
+ private final double nonErrorLikelihood;
+ private final boolean useTotalTermFrequency;
+ private final CharsRef spare = new CharsRef();
+ private final BytesRef byteSpare = new BytesRef();
+ private final int numCandidates;
+
+ public DirectCandidateGenerator(DirectSpellChecker spellchecker, String field, SuggestMode suggestMode, IndexReader reader, double nonErrorLikelihood, int numCandidates) throws IOException {
+ this(spellchecker, field, suggestMode, reader, nonErrorLikelihood, numCandidates, null, null, MultiFields.getTerms(reader, field));
+ }
+
+
+ public DirectCandidateGenerator(DirectSpellChecker spellchecker, String field, SuggestMode suggestMode, IndexReader reader, double nonErrorLikelihood, int numCandidates, Analyzer preFilter, Analyzer postFilter, Terms terms) throws IOException {
+ if (terms == null) {
+ throw new ElasticsearchIllegalArgumentException("generator field [" + field + "] doesn't exist");
+ }
+ this.spellchecker = spellchecker;
+ this.field = field;
+ this.numCandidates = numCandidates;
+ this.suggestMode = suggestMode;
+ this.reader = reader;
+ final long dictSize = terms.getSumTotalTermFreq();
+ this.useTotalTermFrequency = dictSize != -1;
+ this.dictSize = dictSize == -1 ? reader.maxDoc() : dictSize;
+ this.preFilter = preFilter;
+ this.postFilter = postFilter;
+ this.nonErrorLikelihood = nonErrorLikelihood;
+ float thresholdFrequency = spellchecker.getThresholdFrequency();
+ this.frequencyPlateau = thresholdFrequency >= 1.0f ? (int) thresholdFrequency: (int)(dictSize * thresholdFrequency);
+ termsEnum = terms.iterator(null);
+ }
+
+ /* (non-Javadoc)
+ * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#isKnownWord(org.apache.lucene.util.BytesRef)
+ */
+ @Override
+ public boolean isKnownWord(BytesRef term) throws IOException {
+ return frequency(term) > 0;
+ }
+
+ /* (non-Javadoc)
+ * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#frequency(org.apache.lucene.util.BytesRef)
+ */
+ @Override
+ public long frequency(BytesRef term) throws IOException {
+ term = preFilter(term, spare, byteSpare);
+ return internalFrequency(term);
+ }
+
+
+ public long internalFrequency(BytesRef term) throws IOException {
+ if (termsEnum.seekExact(term)) {
+ return useTotalTermFrequency ? termsEnum.totalTermFreq() : termsEnum.docFreq();
+ }
+ return 0;
+ }
+
+ public String getField() {
+ return field;
+ }
+
+ /* (non-Javadoc)
+ * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#drawCandidates(org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.CandidateSet, int)
+ */
+ @Override
+ public CandidateSet drawCandidates(CandidateSet set) throws IOException {
+ Candidate original = set.originalTerm;
+ BytesRef term = preFilter(original.term, spare, byteSpare);
+ final long frequency = original.frequency;
+ spellchecker.setThresholdFrequency(this.suggestMode == SuggestMode.SUGGEST_ALWAYS ? 0 : thresholdFrequency(frequency, dictSize));
+ SuggestWord[] suggestSimilar = spellchecker.suggestSimilar(new Term(field, term), numCandidates, reader, this.suggestMode);
+ List<Candidate> candidates = new ArrayList<Candidate>(suggestSimilar.length);
+ for (int i = 0; i < suggestSimilar.length; i++) {
+ SuggestWord suggestWord = suggestSimilar[i];
+ BytesRef candidate = new BytesRef(suggestWord.string);
+ postFilter(new Candidate(candidate, internalFrequency(candidate), suggestWord.score, score(suggestWord.freq, suggestWord.score, dictSize), false), spare, byteSpare, candidates);
+ }
+ set.addCandidates(candidates);
+ return set;
+ }
+
+ protected BytesRef preFilter(final BytesRef term, final CharsRef spare, final BytesRef byteSpare) throws IOException {
+ if (preFilter == null) {
+ return term;
+ }
+ final BytesRef result = byteSpare;
+ SuggestUtils.analyze(preFilter, term, field, new SuggestUtils.TokenConsumer() {
+
+ @Override
+ public void nextToken() throws IOException {
+ this.fillBytesRef(result);
+ }
+ }, spare);
+ return result;
+ }
+
+ protected void postFilter(final Candidate candidate, final CharsRef spare, BytesRef byteSpare, final List<Candidate> candidates) throws IOException {
+ if (postFilter == null) {
+ candidates.add(candidate);
+ } else {
+ final BytesRef result = byteSpare;
+ SuggestUtils.analyze(postFilter, candidate.term, field, new SuggestUtils.TokenConsumer() {
+ @Override
+ public void nextToken() throws IOException {
+ this.fillBytesRef(result);
+
+ if (posIncAttr.getPositionIncrement() > 0 && result.bytesEquals(candidate.term)) {
+ BytesRef term = BytesRef.deepCopyOf(result);
+ long freq = frequency(term);
+ candidates.add(new Candidate(BytesRef.deepCopyOf(term), freq, candidate.stringDistance, score(candidate.frequency, candidate.stringDistance, dictSize), false));
+ } else {
+ candidates.add(new Candidate(BytesRef.deepCopyOf(result), candidate.frequency, nonErrorLikelihood, score(candidate.frequency, candidate.stringDistance, dictSize), false));
+ }
+ }
+ }, spare);
+ }
+ }
+
+ private double score(long frequency, double errorScore, long dictionarySize) {
+ return errorScore * (((double)frequency + 1) / ((double)dictionarySize +1));
+ }
+
+ protected long thresholdFrequency(long termFrequency, long dictionarySize) {
+ if (termFrequency > 0) {
+ return (long) Math.max(0, Math.round(termFrequency * (Math.log10(termFrequency - frequencyPlateau) * (1.0 / Math.log10(logBase))) + 1));
+ }
+ return 0;
+
+ }
+
+ public static class CandidateSet {
+ public Candidate[] candidates;
+ public final Candidate originalTerm;
+
+ public CandidateSet(Candidate[] candidates, Candidate originalTerm) {
+ this.candidates = candidates;
+ this.originalTerm = originalTerm;
+ }
+
+ public void addCandidates(List<Candidate> candidates) {
+ final Set<Candidate> set = new HashSet<DirectCandidateGenerator.Candidate>(candidates);
+ for (int i = 0; i < this.candidates.length; i++) {
+ set.add(this.candidates[i]);
+ }
+ this.candidates = set.toArray(new Candidate[set.size()]);
+ }
+
+ public void addOneCandidate(Candidate candidate) {
+ Candidate[] candidates = new Candidate[this.candidates.length + 1];
+ System.arraycopy(this.candidates, 0, candidates, 0, this.candidates.length);
+ candidates[candidates.length-1] = candidate;
+ this.candidates = candidates;
+ }
+
+ }
+
+ public static class Candidate {
+ public static final Candidate[] EMPTY = new Candidate[0];
+ public final BytesRef term;
+ public final double stringDistance;
+ public final long frequency;
+ public final double score;
+ public final boolean userInput;
+
+ public Candidate(BytesRef term, long frequency, double stringDistance, double score, boolean userInput) {
+ this.frequency = frequency;
+ this.term = term;
+ this.stringDistance = stringDistance;
+ this.score = score;
+ this.userInput = userInput;
+ }
+
+ @Override
+ public String toString() {
+ return "Candidate [term=" + term.utf8ToString() + ", stringDistance=" + stringDistance + ", frequency=" + frequency +
+ (userInput ? ", userInput" : "" ) + "]";
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((term == null) ? 0 : term.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ Candidate other = (Candidate) obj;
+ if (term == null) {
+ if (other.term != null)
+ return false;
+ } else if (!term.equals(other.term))
+ return false;
+ return true;
+ }
+ }
+
+ @Override
+ public Candidate createCandidate(BytesRef term, long frequency, double channelScore, boolean userInput) throws IOException {
+ return new Candidate(term, frequency, channelScore, score(frequency, channelScore, dictSize), userInput);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java b/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java
new file mode 100644
index 0000000..9725d7b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
+
+import java.io.IOException;
+//TODO public for tests
+public final class LaplaceScorer extends WordScorer {
+
+ public static final WordScorerFactory FACTORY = new WordScorer.WordScorerFactory() {
+ @Override
+ public WordScorer newScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) throws IOException {
+ return new LaplaceScorer(reader, terms, field, realWordLikelyhood, separator, 0.5);
+ }
+ };
+
+ private double alpha;
+
+ public LaplaceScorer(IndexReader reader, Terms terms, String field,
+ double realWordLikelyhood, BytesRef separator, double alpha) throws IOException {
+ super(reader, terms, field, realWordLikelyhood, separator);
+ this.alpha = alpha;
+ }
+
+ @Override
+ protected double scoreBigram(Candidate word, Candidate w_1) throws IOException {
+ SuggestUtils.join(separator, spare, w_1.term, word.term);
+ return (alpha + frequency(spare)) / (alpha + w_1.frequency + vocabluarySize);
+ }
+
+ @Override
+ protected double scoreTrigram(Candidate word, Candidate w_1, Candidate w_2) throws IOException {
+ SuggestUtils.join(separator, spare, w_2.term, w_1.term, word.term);
+ long trigramCount = frequency(spare);
+ SuggestUtils.join(separator, spare, w_1.term, word.term);
+ return (alpha + trigramCount) / (alpha + frequency(spare) + vocabluarySize);
+ }
+
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpoatingScorer.java b/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpoatingScorer.java
new file mode 100644
index 0000000..2d46ec9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpoatingScorer.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
+
+import java.io.IOException;
+
+//TODO public for tests
+public final class LinearInterpoatingScorer extends WordScorer {
+
+ private final double unigramLambda;
+ private final double bigramLambda;
+ private final double trigramLambda;
+
+ public LinearInterpoatingScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator, double trigramLambda, double bigramLambda, double unigramLambda)
+ throws IOException {
+ super(reader, terms, field, realWordLikelyhood, separator);
+ double sum = unigramLambda + bigramLambda + trigramLambda;
+ this.unigramLambda = unigramLambda / sum;
+ this.bigramLambda = bigramLambda / sum;
+ this.trigramLambda = trigramLambda / sum;
+ }
+
+ @Override
+ protected double scoreBigram(Candidate word, Candidate w_1) throws IOException {
+ SuggestUtils.join(separator, spare, w_1.term, word.term);
+ final long count = frequency(spare);
+ if (count < 1) {
+ return unigramLambda * scoreUnigram(word);
+ }
+ return bigramLambda * (count / (0.5d+w_1.frequency)) + unigramLambda * scoreUnigram(word);
+ }
+
+ @Override
+ protected double scoreTrigram(Candidate w, Candidate w_1, Candidate w_2) throws IOException {
+ SuggestUtils.join(separator, spare, w.term, w_1.term, w_2.term);
+ final long count = frequency(spare);
+ if (count < 1) {
+ return scoreBigram(w, w_1);
+ }
+ SuggestUtils.join(separator, spare, w.term, w_1.term);
+ return trigramLambda * (count / (1.d+frequency(spare))) + scoreBigram(w, w_1);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java b/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java
new file mode 100644
index 0000000..0d6f893
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.CandidateSet;
+//TODO public for tests
+public final class MultiCandidateGeneratorWrapper extends CandidateGenerator {
+
+
+ private final CandidateGenerator[] candidateGenerator;
+ private int numCandidates ;
+
+ public MultiCandidateGeneratorWrapper(int numCandidates, CandidateGenerator...candidateGenerators) {
+ this.candidateGenerator = candidateGenerators;
+ this.numCandidates = numCandidates;
+ }
+ @Override
+ public boolean isKnownWord(BytesRef term) throws IOException {
+ return candidateGenerator[0].isKnownWord(term);
+ }
+
+ @Override
+ public long frequency(BytesRef term) throws IOException {
+ return candidateGenerator[0].frequency(term);
+ }
+
+ @Override
+ public CandidateSet drawCandidates(CandidateSet set) throws IOException {
+ for (CandidateGenerator generator : candidateGenerator) {
+ generator.drawCandidates(set);
+ }
+ return reduce(set, numCandidates);
+ }
+
+ private final CandidateSet reduce(CandidateSet set, int numCandidates) {
+ if (set.candidates.length > numCandidates) {
+ Candidate[] candidates = set.candidates;
+ Arrays.sort(candidates, new Comparator<Candidate>() {
+
+ @Override
+ public int compare(Candidate left, Candidate right) {
+ return Double.compare(right.score, left.score);
+ }
+ });
+ Candidate[] newSet = new Candidate[numCandidates];
+ System.arraycopy(candidates, 0, newSet, 0, numCandidates);
+ set.candidates = newSet;
+ }
+
+ return set;
+ }
+ @Override
+ public Candidate createCandidate(BytesRef term, long frequency, double channelScore, boolean userInput) throws IOException {
+ return candidateGenerator[0].createCandidate(term, frequency, channelScore, userInput);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java b/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java
new file mode 100644
index 0000000..47cd29d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.shingle.ShingleFilter;
+import org.apache.lucene.analysis.synonym.SynonymFilter;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.common.io.FastCharArrayReader;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.CandidateSet;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+//TODO public for tests
+public final class NoisyChannelSpellChecker {
+ public static final double REAL_WORD_LIKELYHOOD = 0.95d;
+ public static final int DEFAULT_TOKEN_LIMIT = 10;
+ private final double realWordLikelihood;
+ private final boolean requireUnigram;
+ private final int tokenLimit;
+
+ public NoisyChannelSpellChecker() {
+ this(REAL_WORD_LIKELYHOOD);
+ }
+
+ public NoisyChannelSpellChecker(double nonErrorLikelihood) {
+ this(nonErrorLikelihood, true, DEFAULT_TOKEN_LIMIT);
+ }
+
+ public NoisyChannelSpellChecker(double nonErrorLikelihood, boolean requireUnigram, int tokenLimit) {
+ this.realWordLikelihood = nonErrorLikelihood;
+ this.requireUnigram = requireUnigram;
+ this.tokenLimit = tokenLimit;
+
+ }
+
+ public Result getCorrections(TokenStream stream, final CandidateGenerator generator,
+ float maxErrors, int numCorrections, IndexReader reader, WordScorer wordScorer, BytesRef separator, float confidence, int gramSize) throws IOException {
+
+ final List<CandidateSet> candidateSetsList = new ArrayList<DirectCandidateGenerator.CandidateSet>();
+ SuggestUtils.analyze(stream, new SuggestUtils.TokenConsumer() {
+ CandidateSet currentSet = null;
+ private TypeAttribute typeAttribute;
+ private final BytesRef termsRef = new BytesRef();
+ private boolean anyUnigram = false;
+ private boolean anyTokens = false;
+ @Override
+ public void reset(TokenStream stream) {
+ super.reset(stream);
+ typeAttribute = stream.addAttribute(TypeAttribute.class);
+ }
+
+ @Override
+ public void nextToken() throws IOException {
+ anyTokens = true;
+ BytesRef term = fillBytesRef(termsRef);
+ if (requireUnigram && typeAttribute.type() == ShingleFilter.DEFAULT_TOKEN_TYPE) {
+ return;
+ }
+ anyUnigram = true;
+ if (posIncAttr.getPositionIncrement() == 0 && typeAttribute.type() == SynonymFilter.TYPE_SYNONYM) {
+ assert currentSet != null;
+ long freq = 0;
+ if ((freq = generator.frequency(term)) > 0) {
+ currentSet.addOneCandidate(generator.createCandidate(BytesRef.deepCopyOf(term), freq, realWordLikelihood));
+ }
+ } else {
+ if (currentSet != null) {
+ candidateSetsList.add(currentSet);
+ }
+ currentSet = new CandidateSet(Candidate.EMPTY, generator.createCandidate(BytesRef.deepCopyOf(term), true));
+ }
+ }
+
+ @Override
+ public void end() {
+ if (currentSet != null) {
+ candidateSetsList.add(currentSet);
+ }
+ if (requireUnigram && !anyUnigram && anyTokens) {
+ throw new IllegalStateException("At least one unigram is required but all tokens were ngrams");
+ }
+ }
+ });
+
+ if (candidateSetsList.isEmpty() || candidateSetsList.size() >= tokenLimit) {
+ return Result.EMPTY;
+ }
+
+ for (CandidateSet candidateSet : candidateSetsList) {
+ generator.drawCandidates(candidateSet);
+ }
+ double cutoffScore = Double.MIN_VALUE;
+ CandidateScorer scorer = new CandidateScorer(wordScorer, numCorrections, gramSize);
+ CandidateSet[] candidateSets = candidateSetsList.toArray(new CandidateSet[candidateSetsList.size()]);
+ if (confidence > 0.0) {
+ Candidate[] candidates = new Candidate[candidateSets.length];
+ for (int i = 0; i < candidates.length; i++) {
+ candidates[i] = candidateSets[i].originalTerm;
+ }
+ double inputPhraseScore = scorer.score(candidates, candidateSets);
+ cutoffScore = inputPhraseScore * confidence;
+ }
+ Correction[] findBestCandiates = scorer.findBestCandiates(candidateSets, maxErrors, cutoffScore);
+
+ return new Result(findBestCandiates, cutoffScore);
+ }
+
+ public Result getCorrections(Analyzer analyzer, BytesRef query, CandidateGenerator generator,
+ float maxErrors, int numCorrections, IndexReader reader, String analysisField, WordScorer scorer, float confidence, int gramSize) throws IOException {
+
+ return getCorrections(tokenStream(analyzer, query, new CharsRef(), analysisField), generator, maxErrors, numCorrections, reader, scorer, new BytesRef(" "), confidence, gramSize);
+
+ }
+
+ public TokenStream tokenStream(Analyzer analyzer, BytesRef query, CharsRef spare, String field) throws IOException {
+ UnicodeUtil.UTF8toUTF16(query, spare);
+ return analyzer.tokenStream(field, new FastCharArrayReader(spare.chars, spare.offset, spare.length));
+ }
+
+ public static class Result {
+ public static final Result EMPTY = new Result(Correction.EMPTY, Double.MIN_VALUE);
+ public final Correction[] corrections;
+ public final double cutoffScore;
+
+ public Result(Correction[] corrections, double cutoffScore) {
+ this.corrections = corrections;
+ this.cutoffScore = cutoffScore;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java
new file mode 100644
index 0000000..a5d8596
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
+import org.elasticsearch.index.analysis.ShingleTokenFilterFactory;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.search.suggest.SuggestContextParser;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator;
+
+import java.io.IOException;
+
+public final class PhraseSuggestParser implements SuggestContextParser {
+
+ private PhraseSuggester suggester;
+
+ public PhraseSuggestParser(PhraseSuggester suggester) {
+ this.suggester = suggester;
+ }
+
+ public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService) throws IOException {
+ PhraseSuggestionContext suggestion = new PhraseSuggestionContext(suggester);
+ XContentParser.Token token;
+ String fieldName = null;
+ boolean gramSizeSet = false;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if (!SuggestUtils.parseSuggestContext(parser, mapperService, fieldName, suggestion)) {
+ if ("real_word_error_likelihood".equals(fieldName) || "realWorldErrorLikelihood".equals(fieldName)) {
+ suggestion.setRealWordErrorLikelihood(parser.floatValue());
+ if (suggestion.realworldErrorLikelyhood() <= 0.0) {
+ throw new ElasticsearchIllegalArgumentException("real_word_error_likelihood must be > 0.0");
+ }
+ } else if ("confidence".equals(fieldName)) {
+ suggestion.setConfidence(parser.floatValue());
+ if (suggestion.confidence() < 0.0) {
+ throw new ElasticsearchIllegalArgumentException("confidence must be >= 0.0");
+ }
+ } else if ("separator".equals(fieldName)) {
+ suggestion.setSeparator(new BytesRef(parser.text()));
+ } else if ("max_errors".equals(fieldName) || "maxErrors".equals(fieldName)) {
+ suggestion.setMaxErrors(parser.floatValue());
+ if (suggestion.maxErrors() <= 0.0) {
+ throw new ElasticsearchIllegalArgumentException("max_error must be > 0.0");
+ }
+ } else if ("gram_size".equals(fieldName) || "gramSize".equals(fieldName)) {
+ suggestion.setGramSize(parser.intValue());
+ if (suggestion.gramSize() < 1) {
+ throw new ElasticsearchIllegalArgumentException("gram_size must be >= 1");
+ }
+ gramSizeSet = true;
+ } else if ("force_unigrams".equals(fieldName) || "forceUnigrams".equals(fieldName)) {
+ suggestion.setRequireUnigram(parser.booleanValue());
+ } else if ("token_limit".equals(fieldName) || "tokenLimit".equals(fieldName)) {
+ int tokenLimit = parser.intValue();
+ if (tokenLimit <= 0) {
+ throw new ElasticsearchIllegalArgumentException("token_limit must be >= 1");
+ }
+ suggestion.setTokenLimit(tokenLimit);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]");
+ }
+ }
+ } else if (token == Token.START_ARRAY) {
+ if ("direct_generator".equals(fieldName) || "directGenerator".equals(fieldName)) {
+ // for now we only have a single type of generators
+ while ((token = parser.nextToken()) == Token.START_OBJECT) {
+ PhraseSuggestionContext.DirectCandidateGenerator generator = new PhraseSuggestionContext.DirectCandidateGenerator();
+ while ((token = parser.nextToken()) != Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ }
+ if (token.isValue()) {
+ parseCandidateGenerator(parser, mapperService, fieldName, generator);
+ }
+ }
+ verifyGenerator(generator);
+ suggestion.addGenerator(generator);
+ }
+ } else {
+ throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]");
+ }
+ } else if (token == Token.START_OBJECT) {
+ if ("smoothing".equals(fieldName)) {
+ parseSmoothingModel(parser, suggestion, fieldName);
+ } else if ("highlight".equals(fieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("pre_tag".equals(fieldName) || "preTag".equals(fieldName)) {
+ suggestion.setPreTag(parser.bytes());
+ } else if ("post_tag".equals(fieldName) || "postTag".equals(fieldName)) {
+ suggestion.setPostTag(parser.bytes());
+ } else {
+ throw new ElasticsearchIllegalArgumentException(
+ "suggester[phrase][highlight] doesn't support field [" + fieldName + "]");
+ }
+ }
+ }
+ } else {
+ throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]");
+ }
+ } else {
+ throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]");
+ }
+ }
+
+ if (suggestion.getField() == null) {
+ throw new ElasticsearchIllegalArgumentException("The required field option is missing");
+ }
+
+ if (mapperService.smartNameFieldMapper(suggestion.getField()) == null) {
+ throw new ElasticsearchIllegalArgumentException("No mapping found for field [" + suggestion.getField() + "]");
+ }
+
+ if (suggestion.model() == null) {
+ suggestion.setModel(StupidBackoffScorer.FACTORY);
+ }
+
+ if (!gramSizeSet || suggestion.generators().isEmpty()) {
+ final ShingleTokenFilterFactory.Factory shingleFilterFactory = SuggestUtils.getShingleFilterFactory(suggestion.getAnalyzer() == null ? mapperService.fieldSearchAnalyzer(suggestion.getField()) : suggestion.getAnalyzer()); ;
+ if (!gramSizeSet) {
+ // try to detect the shingle size
+ if (shingleFilterFactory != null) {
+ suggestion.setGramSize(shingleFilterFactory.getMaxShingleSize());
+ if (suggestion.getAnalyzer() == null && shingleFilterFactory.getMinShingleSize() > 1 && !shingleFilterFactory.getOutputUnigrams()) {
+ throw new ElasticsearchIllegalArgumentException("The default analyzer for field: [" + suggestion.getField() + "] doesn't emit unigrams. If this is intentional try to set the analyzer explicitly");
+ }
+ }
+ }
+ if (suggestion.generators().isEmpty()) {
+ if (shingleFilterFactory != null && shingleFilterFactory.getMinShingleSize() > 1 && !shingleFilterFactory.getOutputUnigrams() && suggestion.getRequireUnigram()) {
+ throw new ElasticsearchIllegalArgumentException("The default candidate generator for phrase suggest can't operate on field: [" + suggestion.getField() + "] since it doesn't emit unigrams. If this is intentional try to set the candidate generator field explicitly");
+ }
+ // use a default generator on the same field
+ DirectCandidateGenerator generator = new DirectCandidateGenerator();
+ generator.setField(suggestion.getField());
+ suggestion.addGenerator(generator);
+ }
+ }
+
+
+
+ return suggestion;
+ }
+
+ public void parseSmoothingModel(XContentParser parser, PhraseSuggestionContext suggestion, String fieldName) throws IOException {
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ if ("linear".equals(fieldName)) {
+ ensureNoSmoothing(suggestion);
+ final double[] lambdas = new double[3];
+ while ((token = parser.nextToken()) != Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ }
+ if (token.isValue()) {
+ if ("trigram_lambda".equals(fieldName) || "trigramLambda".equals(fieldName)) {
+ lambdas[0] = parser.doubleValue();
+ if (lambdas[0] < 0) {
+ throw new ElasticsearchIllegalArgumentException("trigram_lambda must be positive");
+ }
+ } else if ("bigram_lambda".equals(fieldName) || "bigramLambda".equals(fieldName)) {
+ lambdas[1] = parser.doubleValue();
+ if (lambdas[1] < 0) {
+ throw new ElasticsearchIllegalArgumentException("bigram_lambda must be positive");
+ }
+ } else if ("unigram_lambda".equals(fieldName) || "unigramLambda".equals(fieldName)) {
+ lambdas[2] = parser.doubleValue();
+ if (lambdas[2] < 0) {
+ throw new ElasticsearchIllegalArgumentException("unigram_lambda must be positive");
+ }
+ } else {
+ throw new ElasticsearchIllegalArgumentException(
+ "suggester[phrase][smoothing][linear] doesn't support field [" + fieldName + "]");
+ }
+ }
+ }
+ double sum = 0.0d;
+ for (int i = 0; i < lambdas.length; i++) {
+ sum += lambdas[i];
+ }
+ if (Math.abs(sum - 1.0) > 0.001) {
+ throw new ElasticsearchIllegalArgumentException("linear smoothing lambdas must sum to 1");
+ }
+ suggestion.setModel(new WordScorer.WordScorerFactory() {
+ @Override
+ public WordScorer newScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator)
+ throws IOException {
+ return new LinearInterpoatingScorer(reader, terms, field, realWordLikelyhood, separator, lambdas[0], lambdas[1],
+ lambdas[2]);
+ }
+ });
+ } else if ("laplace".equals(fieldName)) {
+ ensureNoSmoothing(suggestion);
+ double theAlpha = 0.5;
+
+ while ((token = parser.nextToken()) != Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ }
+ if (token.isValue() && "alpha".equals(fieldName)) {
+ theAlpha = parser.doubleValue();
+ }
+ }
+ final double alpha = theAlpha;
+ suggestion.setModel(new WordScorer.WordScorerFactory() {
+ @Override
+ public WordScorer newScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator)
+ throws IOException {
+ return new LaplaceScorer(reader, terms, field, realWordLikelyhood, separator, alpha);
+ }
+ });
+
+ } else if ("stupid_backoff".equals(fieldName) || "stupidBackoff".equals(fieldName)) {
+ ensureNoSmoothing(suggestion);
+ double theDiscount = 0.4;
+ while ((token = parser.nextToken()) != Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ }
+ if (token.isValue() && "discount".equals(fieldName)) {
+ theDiscount = parser.doubleValue();
+ }
+ }
+ final double discount = theDiscount;
+ suggestion.setModel(new WordScorer.WordScorerFactory() {
+ @Override
+ public WordScorer newScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator)
+ throws IOException {
+ return new StupidBackoffScorer(reader, terms, field, realWordLikelyhood, separator, discount);
+ }
+ });
+
+ } else {
+ throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support object field [" + fieldName + "]");
+ }
+ }
+ }
+ }
+
+ private void ensureNoSmoothing(PhraseSuggestionContext suggestion) {
+ if (suggestion.model() != null) {
+ throw new ElasticsearchIllegalArgumentException("only one smoothing model supported");
+ }
+ }
+
+ private void verifyGenerator(PhraseSuggestionContext.DirectCandidateGenerator suggestion) {
+ // Verify options and set defaults
+ if (suggestion.field() == null) {
+ throw new ElasticsearchIllegalArgumentException("The required field option is missing");
+ }
+ }
+
+ private void parseCandidateGenerator(XContentParser parser, MapperService mapperService, String fieldName,
+ PhraseSuggestionContext.DirectCandidateGenerator generator) throws IOException {
+ if (!SuggestUtils.parseDirectSpellcheckerSettings(parser, fieldName, generator)) {
+ if ("field".equals(fieldName)) {
+ generator.setField(parser.text());
+ if (mapperService.smartNameFieldMapper(generator.field()) == null) {
+ throw new ElasticsearchIllegalArgumentException("No mapping found for field [" + generator.field() + "]");
+ }
+ } else if ("size".equals(fieldName)) {
+ generator.size(parser.intValue());
+ } else if ("pre_filter".equals(fieldName) || "preFilter".equals(fieldName)) {
+ String analyzerName = parser.text();
+ Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
+ if (analyzer == null) {
+ throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
+ }
+ generator.preFilter(analyzer);
+ } else if ("post_filter".equals(fieldName) || "postFilter".equals(fieldName)) {
+ String analyzerName = parser.text();
+ Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
+ if (analyzer == null) {
+ throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
+ }
+ generator.postFilter(analyzer);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]");
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
new file mode 100644
index 0000000..15f7148
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.search.spell.DirectSpellChecker;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.search.suggest.Suggest.Suggestion;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
+import org.elasticsearch.search.suggest.*;
+import org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.Result;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
+ private final BytesRef SEPARATOR = new BytesRef(" ");
+
+ /*
+ * More Ideas:
+ * - add ability to find whitespace problems -> we can build a poor mans decompounder with our index based on a automaton?
+ * - add ability to build different error models maybe based on a confusion matrix?
+ * - try to combine a token with its subsequent token to find / detect word splits (optional)
+ * - for this to work we need some way to defined the position length of a candidate
+ * - phonetic filters could be interesting here too for candidate selection
+ */
+ @Override
+ public Suggestion<? extends Entry<? extends Option>> innerExecute(String name, PhraseSuggestionContext suggestion,
+ IndexReader indexReader, CharsRef spare) throws IOException {
+ double realWordErrorLikelihood = suggestion.realworldErrorLikelyhood();
+ final PhraseSuggestion response = new PhraseSuggestion(name, suggestion.getSize());
+
+ List<PhraseSuggestionContext.DirectCandidateGenerator> generators = suggestion.generators();
+ final int numGenerators = generators.size();
+ final List<CandidateGenerator> gens = new ArrayList<CandidateGenerator>(generators.size());
+ for (int i = 0; i < numGenerators; i++) {
+ PhraseSuggestionContext.DirectCandidateGenerator generator = generators.get(i);
+ DirectSpellChecker directSpellChecker = SuggestUtils.getDirectSpellChecker(generator);
+ Terms terms = MultiFields.getTerms(indexReader, generator.field());
+ if (terms != null) {
+ gens.add(new DirectCandidateGenerator(directSpellChecker, generator.field(), generator.suggestMode(),
+ indexReader, realWordErrorLikelihood, generator.size(), generator.preFilter(), generator.postFilter(), terms));
+ }
+ }
+ final String suggestField = suggestion.getField();
+ final Terms suggestTerms = MultiFields.getTerms(indexReader, suggestField);
+ if (gens.size() > 0 && suggestTerms != null) {
+ final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit());
+ final BytesRef separator = suggestion.separator();
+ TokenStream stream = checker.tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField());
+
+ WordScorer wordScorer = suggestion.model().newScorer(indexReader, suggestTerms, suggestField, realWordErrorLikelihood, separator);
+ Result checkerResult = checker.getCorrections(stream, new MultiCandidateGeneratorWrapper(suggestion.getShardSize(),
+ gens.toArray(new CandidateGenerator[gens.size()])), suggestion.maxErrors(),
+ suggestion.getShardSize(), indexReader,wordScorer , separator, suggestion.confidence(), suggestion.gramSize());
+
+ PhraseSuggestion.Entry resultEntry = buildResultEntry(suggestion, spare, checkerResult.cutoffScore);
+ response.addTerm(resultEntry);
+
+ BytesRef byteSpare = new BytesRef();
+ for (Correction correction : checkerResult.corrections) {
+ UnicodeUtil.UTF8toUTF16(correction.join(SEPARATOR, byteSpare, null, null), spare);
+ Text phrase = new StringText(spare.toString());
+ Text highlighted = null;
+ if (suggestion.getPreTag() != null) {
+ UnicodeUtil.UTF8toUTF16(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag()), spare);
+ highlighted = new StringText(spare.toString());
+ }
+ resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score)));
+ }
+ } else {
+ response.addTerm(buildResultEntry(suggestion, spare, Double.MIN_VALUE));
+ }
+ return response;
+ }
+
+ private PhraseSuggestion.Entry buildResultEntry(PhraseSuggestionContext suggestion, CharsRef spare, double cutoffScore) {
+ UnicodeUtil.UTF8toUTF16(suggestion.getText(), spare);
+ return new PhraseSuggestion.Entry(new StringText(spare.toString()), 0, spare.length, cutoffScore);
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] {"phrase"};
+ }
+
+ @Override
+ public SuggestContextParser getContextParser() {
+ return new PhraseSuggestParser(this);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java
new file mode 100644
index 0000000..6435c37
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest.phrase;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.search.suggest.Suggest.Suggestion;
+
+import java.io.IOException;
+
+/**
+ * Suggestion entry returned from the {@link PhraseSuggester}.
+ */
+public class PhraseSuggestion extends Suggest.Suggestion<PhraseSuggestion.Entry> {
+ public static final int TYPE = 3;
+
+ public PhraseSuggestion() {
+ }
+
+ public PhraseSuggestion(String name, int size) {
+ super(name, size);
+ }
+
+ @Override
+ public int getType() {
+ return TYPE;
+ }
+
+ @Override
+ protected Entry newEntry() {
+ return new Entry();
+ }
+
+ public static class Entry extends Suggestion.Entry<Suggestion.Entry.Option> {
+ static class Fields {
+ static final XContentBuilderString CUTOFF_SCORE = new XContentBuilderString("cutoff_score");
+ }
+
+ protected double cutoffScore = Double.MIN_VALUE;
+
+ public Entry(Text text, int offset, int length, double cutoffScore) {
+ super(text, offset, length);
+ this.cutoffScore = cutoffScore;
+ }
+
+ public Entry() {
+ }
+
+ /**
+ * @return cutoff score for suggestions. input term score * confidence for phrase suggest, 0 otherwise
+ */
+ public double getCutoffScore() {
+ return cutoffScore;
+ }
+
+ @Override
+ protected void merge(Suggestion.Entry<Suggestion.Entry.Option> other) {
+ super.merge(other);
+ // If the cluster contains both pre 0.90.4 and post 0.90.4 nodes then we'll see Suggestion.Entry
+ // objects being merged with PhraseSuggestion.Entry objects. We merge Suggestion.Entry objects
+ // by assuming they had a low cutoff score rather than a high one as that is the more common scenario
+ // and the simplest one for us to implement.
+ if (!(other instanceof PhraseSuggestion.Entry)) {
+ return;
+ }
+ PhraseSuggestion.Entry otherSuggestionEntry = (PhraseSuggestion.Entry) other;
+ this.cutoffScore = Math.max(this.cutoffScore, otherSuggestionEntry.cutoffScore);
+ }
+
+ @Override
+ public void addOption(Suggestion.Entry.Option option) {
+ if (option.getScore() > this.cutoffScore) {
+ this.options.add(option);
+ }
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ cutoffScore = in.readDouble();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeDouble(cutoffScore);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java
new file mode 100644
index 0000000..9e5dfb5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java
@@ -0,0 +1,613 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.Map.Entry;
+
+/**
+ * Defines the actual suggest command for phrase suggestions ( <tt>phrase</tt>).
+ */
+public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSuggestionBuilder> {
+ private Float maxErrors;
+ private String separator;
+ private Float realWordErrorLikelihood;
+ private Float confidence;
+ private final Map<String, List<CandidateGenerator>> generators = new HashMap<String, List<PhraseSuggestionBuilder.CandidateGenerator>>();
+ private Integer gramSize;
+ private SmoothingModel model;
+ private Boolean forceUnigrams;
+ private Integer tokenLimit;
+ private String preTag;
+ private String postTag;
+
+ public PhraseSuggestionBuilder(String name) {
+ super(name, "phrase");
+ }
+
+ /**
+ * Sets the gram size for the n-gram model used for this suggester. The
+ * default value is <tt>1</tt> corresponding to <tt>unigrams</tt>. Use
+ * <tt>2</tt> for <tt>bigrams</tt> and <tt>3</tt> for <tt>trigrams</tt>.
+ */
+ public PhraseSuggestionBuilder gramSize(int gramSize) {
+ if (gramSize < 1) {
+ throw new ElasticsearchIllegalArgumentException("gramSize must be >= 1");
+ }
+ this.gramSize = gramSize;
+ return this;
+ }
+
+ /**
+ * Sets the maximum percentage of the terms that at most considered to be
+ * misspellings in order to form a correction. This method accepts a float
+ * value in the range [0..1) as a fraction of the actual query terms a
+ * number <tt>&gt;=1</tt> as an absolut number of query terms.
+ *
+ * The default is set to <tt>1.0</tt> which corresponds to that only
+ * corrections with at most 1 missspelled term are returned.
+ */
+ public PhraseSuggestionBuilder maxErrors(Float maxErrors) {
+ this.maxErrors = maxErrors;
+ return this;
+ }
+
+ /**
+ * Sets the separator that is used to separate terms in the bigram field. If
+ * not set the whitespace character is used as a separator.
+ */
+ public PhraseSuggestionBuilder separator(String separator) {
+ this.separator = separator;
+ return this;
+ }
+
+ /**
+ * Sets the likelihood of a term being a misspelled even if the term exists
+ * in the dictionary. The default it <tt>0.95</tt> corresponding to 5% or
+ * the real words are misspelled.
+ */
+ public PhraseSuggestionBuilder realWordErrorLikelihood(Float realWordErrorLikelihood) {
+ this.realWordErrorLikelihood = realWordErrorLikelihood;
+ return this;
+ }
+
+ /**
+ * Sets the confidence level for this suggester. The confidence level
+ * defines a factor applied to the input phrases score which is used as a
+ * threshold for other suggest candidates. Only candidates that score higher
+ * than the threshold will be included in the result. For instance a
+ * confidence level of <tt>1.0</tt> will only return suggestions that score
+ * higher than the input phrase. If set to <tt>0.0</tt> the top N candidates
+ * are returned. The default is <tt>1.0</tt>
+ */
+ public PhraseSuggestionBuilder confidence(Float confidence) {
+ this.confidence = confidence;
+ return this;
+ }
+
+ /**
+ * Adds a {@link CandidateGenerator} to this suggester. The
+ * {@link CandidateGenerator} is used to draw candidates for each individual
+ * phrase term before the candidates are scored.
+ */
+ public PhraseSuggestionBuilder addCandidateGenerator(CandidateGenerator generator) {
+ List<CandidateGenerator> list = this.generators.get(generator.getType());
+ if (list == null) {
+ list = new ArrayList<PhraseSuggestionBuilder.CandidateGenerator>();
+ this.generators.put(generator.getType(), list);
+ }
+ list.add(generator);
+ return this;
+ }
+
+ /**
+ * Clear the candidate generators.
+ */
+ public PhraseSuggestionBuilder clearCandidateGenerators() {
+ this.generators.clear();
+ return this;
+ }
+
+ /**
+ * If set to <code>true</code> the phrase suggester will fail if the analyzer only
+ * produces ngrams. the default it <code>true</code>.
+ */
+ public PhraseSuggestionBuilder forceUnigrams(boolean forceUnigrams) {
+ this.forceUnigrams = forceUnigrams;
+ return this;
+ }
+
+ /**
+ * Sets an explicit smoothing model used for this suggester. The default is
+ * {@link PhraseSuggester#StupidBackoff}.
+ */
+ public PhraseSuggestionBuilder smoothingModel(SmoothingModel model) {
+ this.model = model;
+ return this;
+ }
+
+ public PhraseSuggestionBuilder tokenLimit(int tokenLimit) {
+ this.tokenLimit = tokenLimit;
+ return this;
+ }
+
+ /**
+ * Setup highlighting for suggestions. If this is called a highlight field
+ * is returned with suggestions wrapping changed tokens with preTag and postTag.
+ */
+ public PhraseSuggestionBuilder highlight(String preTag, String postTag) {
+ if ((preTag == null) != (postTag == null)) {
+ throw new ElasticsearchIllegalArgumentException("Pre and post tag must both be null or both not be null.");
+ }
+ this.preTag = preTag;
+ this.postTag = postTag;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ if (realWordErrorLikelihood != null) {
+ builder.field("real_word_error_likelihood", realWordErrorLikelihood);
+ }
+ if (confidence != null) {
+ builder.field("confidence", confidence);
+ }
+ if (separator != null) {
+ builder.field("separator", separator);
+ }
+ if (maxErrors != null) {
+ builder.field("max_errors", maxErrors);
+ }
+ if (gramSize != null) {
+ builder.field("gram_size", gramSize);
+ }
+ if (forceUnigrams != null) {
+ builder.field("force_unigrams", forceUnigrams);
+ }
+ if (tokenLimit != null) {
+ builder.field("token_limit", tokenLimit);
+ }
+ if (!generators.isEmpty()) {
+ Set<Entry<String, List<CandidateGenerator>>> entrySet = generators.entrySet();
+ for (Entry<String, List<CandidateGenerator>> entry : entrySet) {
+ builder.startArray(entry.getKey());
+ for (CandidateGenerator generator : entry.getValue()) {
+ generator.toXContent(builder, params);
+ }
+ builder.endArray();
+ }
+ }
+ if (model != null) {
+ builder.startObject("smoothing");
+ model.toXContent(builder, params);
+ builder.endObject();
+ }
+ if (preTag != null) {
+ builder.startObject("highlight");
+ builder.field("pre_tag", preTag);
+ builder.field("post_tag", postTag);
+ builder.endObject();
+ }
+ return builder;
+ }
+
+ /**
+ * Creates a new {@link DirectCandidateGenerator}
+ *
+ * @param field
+ * the field this candidate generator operates on.
+ */
+ public static DirectCandidateGenerator candidateGenerator(String field) {
+ return new DirectCandidateGenerator(field);
+ }
+
+ /**
+ * A "stupid-backoff" smoothing model simialr to <a
+ * href="http://en.wikipedia.org/wiki/Katz's_back-off_model"> Katz's
+ * Backoff</a>. This model is used as the default if no model is configured.
+ * <p>
+ * See <a
+ * href="http://en.wikipedia.org/wiki/N-gram#Smoothing_techniques">N-Gram
+ * Smoothing</a> for details.
+ * </p>
+ */
+ public static final class StupidBackoff extends SmoothingModel {
+ private final double discount;
+
+ /**
+ * Creates a Stupid-Backoff smoothing model.
+ *
+ * @param discount
+ * the discount given to lower order ngrams if the higher order ngram doesn't exits
+ */
+ public StupidBackoff(double discount) {
+ super("stupid_backoff");
+ this.discount = discount;
+ }
+
+ @Override
+ protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("discount", discount);
+ return builder;
+ }
+ }
+
+ /**
+ * An <a href="http://en.wikipedia.org/wiki/Additive_smoothing">additive
+ * smoothing</a> model.
+ * <p>
+ * See <a
+ * href="http://en.wikipedia.org/wiki/N-gram#Smoothing_techniques">N-Gram
+ * Smoothing</a> for details.
+ * </p>
+ */
+ public static final class Laplace extends SmoothingModel {
+ private final double alpha;
+ /**
+ * Creates a Laplace smoothing model.
+ *
+ * @param discount
+ * the discount given to lower order ngrams if the higher order ngram doesn't exits
+ */
+ public Laplace(double alpha) {
+ super("laplace");
+ this.alpha = alpha;
+ }
+
+ @Override
+ protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("alpha", alpha);
+ return builder;
+ }
+ }
+
+
+ public static abstract class SmoothingModel implements ToXContent {
+ private final String type;
+
+ protected SmoothingModel(String type) {
+ this.type = type;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(type);
+ innerToXContent(builder,params);
+ builder.endObject();
+ return builder;
+ }
+
+ protected abstract XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException;
+ }
+
+ /**
+ * Linear interpolation smoothing model.
+ * <p>
+ * See <a
+ * href="http://en.wikipedia.org/wiki/N-gram#Smoothing_techniques">N-Gram
+ * Smoothing</a> for details.
+ * </p>
+ */
+ public static final class LinearInterpolation extends SmoothingModel {
+ private final double trigramLambda;
+ private final double bigramLambda;
+ private final double unigramLambda;
+
+ /**
+ * Creates a linear interpolation smoothing model.
+ *
+ * Note: the lambdas must sum up to one.
+ *
+ * @param trigramLambda
+ * the trigram lambda
+ * @param bigramLambda
+ * the bigram lambda
+ * @param unigramLambda
+ * the unigram lambda
+ */
+ public LinearInterpolation(double trigramLambda, double bigramLambda, double unigramLambda) {
+ super("linear");
+ this.trigramLambda = trigramLambda;
+ this.bigramLambda = bigramLambda;
+ this.unigramLambda = unigramLambda;
+ }
+
+ @Override
+ protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("trigram_lambda", trigramLambda);
+ builder.field("bigram_lambda", bigramLambda);
+ builder.field("unigram_lambda", unigramLambda);
+ return builder;
+ }
+ }
+
+ /**
+ * {@link CandidateGenerator} base class.
+ */
+ public static abstract class CandidateGenerator implements ToXContent {
+ private final String type;
+
+ public CandidateGenerator(String type) {
+ this.type = type;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ }
+
+ /**
+ *
+ *
+ */
+ public static final class DirectCandidateGenerator extends CandidateGenerator {
+ private final String field;
+ private String preFilter;
+ private String postFilter;
+ private String suggestMode;
+ private Float accuracy;
+ private Integer size;
+ private String sort;
+ private String stringDistance;
+ private Integer maxEdits;
+ private Integer maxInspections;
+ private Float maxTermFreq;
+ private Integer prefixLength;
+ private Integer minWordLength;
+ private Float minDocFreq;
+
+ /**
+ * Sets from what field to fetch the candidate suggestions from. This is
+ * an required option and needs to be set via this setter or
+ * {@link org.elasticsearch.search.suggest.SuggestBuilder.TermSuggestionBuilder#setField(String)}
+ * method
+ */
+ public DirectCandidateGenerator(String field) {
+ super("direct_generator");
+ this.field = field;
+ }
+
+ /**
+ * The global suggest mode controls what suggested terms are included or
+ * controls for what suggest text tokens, terms should be suggested for.
+ * Three possible values can be specified:
+ * <ol>
+ * <li><code>missing</code> - Only suggest terms in the suggest text
+ * that aren't in the index. This is the default.
+ * <li><code>popular</code> - Only suggest terms that occur in more docs
+ * then the original suggest text term.
+ * <li><code>always</code> - Suggest any matching suggest terms based on
+ * tokens in the suggest text.
+ * </ol>
+ */
+ public DirectCandidateGenerator suggestMode(String suggestMode) {
+ this.suggestMode = suggestMode;
+ return this;
+ }
+
+ /**
+ * Sets how similar the suggested terms at least need to be compared to
+ * the original suggest text tokens. A value between 0 and 1 can be
+ * specified. This value will be compared to the string distance result
+ * of each candidate spelling correction.
+ * <p/>
+ * Default is <tt>0.5</tt>
+ */
+ public DirectCandidateGenerator accuracy(float accuracy) {
+ this.accuracy = accuracy;
+ return this;
+ }
+
+ /**
+ * Sets the maximum suggestions to be returned per suggest text term.
+ */
+ public DirectCandidateGenerator size(int size) {
+ if (size <= 0) {
+ throw new ElasticsearchIllegalArgumentException("Size must be positive");
+ }
+ this.size = size;
+ return this;
+ }
+
+ /**
+ * Sets how to sort the suggest terms per suggest text token. Two
+ * possible values:
+ * <ol>
+ * <li><code>score</code> - Sort should first be based on score, then
+ * document frequency and then the term itself.
+ * <li><code>frequency</code> - Sort should first be based on document
+ * frequency, then scotr and then the term itself.
+ * </ol>
+ * <p/>
+ * What the score is depends on the suggester being used.
+ */
+ public DirectCandidateGenerator sort(String sort) {
+ this.sort = sort;
+ return this;
+ }
+
+ /**
+ * Sets what string distance implementation to use for comparing how
+ * similar suggested terms are. Four possible values can be specified:
+ * <ol>
+ * <li><code>internal</code> - This is the default and is based on
+ * <code>damerau_levenshtein</code>, but highly optimized for comparing
+ * string distance for terms inside the index.
+ * <li><code>damerau_levenshtein</code> - String distance algorithm
+ * based on Damerau-Levenshtein algorithm.
+ * <li><code>levenstein</code> - String distance algorithm based on
+ * Levenstein edit distance algorithm.
+ * <li><code>jarowinkler</code> - String distance algorithm based on
+ * Jaro-Winkler algorithm.
+ * <li><code>ngram</code> - String distance algorithm based on character
+ * n-grams.
+ * </ol>
+ */
+ public DirectCandidateGenerator stringDistance(String stringDistance) {
+ this.stringDistance = stringDistance;
+ return this;
+ }
+
+ /**
+ * Sets the maximum edit distance candidate suggestions can have in
+ * order to be considered as a suggestion. Can only be a value between 1
+ * and 2. Any other value result in an bad request error being thrown.
+ * Defaults to <tt>2</tt>.
+ */
+ public DirectCandidateGenerator maxEdits(Integer maxEdits) {
+ this.maxEdits = maxEdits;
+ return this;
+ }
+
+ /**
+ * A factor that is used to multiply with the size in order to inspect
+ * more candidate suggestions. Can improve accuracy at the cost of
+ * performance. Defaults to <tt>5</tt>.
+ */
+ public DirectCandidateGenerator maxInspections(Integer maxInspections) {
+ this.maxInspections = maxInspections;
+ return this;
+ }
+
+ /**
+ * Sets a maximum threshold in number of documents a suggest text token
+ * can exist in order to be corrected. Can be a relative percentage
+ * number (e.g 0.4) or an absolute number to represent document
+ * frequencies. If an value higher than 1 is specified then fractional
+ * can not be specified. Defaults to <tt>0.01</tt>.
+ * <p/>
+ * This can be used to exclude high frequency terms from being
+ * suggested. High frequency terms are usually spelled correctly on top
+ * of this this also improves the suggest performance.
+ */
+ public DirectCandidateGenerator maxTermFreq(float maxTermFreq) {
+ this.maxTermFreq = maxTermFreq;
+ return this;
+ }
+
+ /**
+ * Sets the number of minimal prefix characters that must match in order
+ * be a candidate suggestion. Defaults to 1. Increasing this number
+ * improves suggest performance. Usually misspellings don't occur in the
+ * beginning of terms.
+ */
+ public DirectCandidateGenerator prefixLength(int prefixLength) {
+ this.prefixLength = prefixLength;
+ return this;
+ }
+
+ /**
+ * The minimum length a suggest text term must have in order to be
+ * corrected. Defaults to <tt>4</tt>.
+ */
+ public DirectCandidateGenerator minWordLength(int minWordLength) {
+ this.minWordLength = minWordLength;
+ return this;
+ }
+
+ /**
+ * Sets a minimal threshold in number of documents a suggested term
+ * should appear in. This can be specified as an absolute number or as a
+ * relative percentage of number of documents. This can improve quality
+ * by only suggesting high frequency terms. Defaults to 0f and is not
+ * enabled. If a value higher than 1 is specified then the number cannot
+ * be fractional.
+ */
+ public DirectCandidateGenerator minDocFreq(float minDocFreq) {
+ this.minDocFreq = minDocFreq;
+ return this;
+ }
+
+ /**
+ * Sets a filter (analyzer) that is applied to each of the tokens passed to this candidate generator.
+ * This filter is applied to the original token before candidates are generated.
+ */
+ public DirectCandidateGenerator preFilter(String preFilter) {
+ this.preFilter = preFilter;
+ return this;
+ }
+
+ /**
+ * Sets a filter (analyzer) that is applied to each of the generated tokens
+ * before they are passed to the actual phrase scorer.
+ */
+ public DirectCandidateGenerator postFilter(String postFilter) {
+ this.postFilter = postFilter;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ if (field != null) {
+ builder.field("field", field);
+ }
+ if (suggestMode != null) {
+ builder.field("suggest_mode", suggestMode);
+ }
+ if (accuracy != null) {
+ builder.field("accuracy", accuracy);
+ }
+ if (size != null) {
+ builder.field("size", size);
+ }
+ if (sort != null) {
+ builder.field("sort", sort);
+ }
+ if (stringDistance != null) {
+ builder.field("string_distance", stringDistance);
+ }
+ if (maxEdits != null) {
+ builder.field("max_edits", maxEdits);
+ }
+ if (maxInspections != null) {
+ builder.field("max_inspections", maxInspections);
+ }
+ if (maxTermFreq != null) {
+ builder.field("max_term_freq", maxTermFreq);
+ }
+ if (prefixLength != null) {
+ builder.field("prefix_length", prefixLength);
+ }
+ if (minWordLength != null) {
+ builder.field("min_word_length", minWordLength);
+ }
+ if (minDocFreq != null) {
+ builder.field("min_doc_freq", minDocFreq);
+ }
+ if (preFilter != null) {
+ builder.field("pre_filter", preFilter);
+ }
+ if (postFilter != null) {
+ builder.field("post_filter", postFilter);
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java
new file mode 100644
index 0000000..81de10d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.search.suggest.DirectSpellcheckerSettings;
+import org.elasticsearch.search.suggest.Suggester;
+import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext;
+
+class PhraseSuggestionContext extends SuggestionContext {
+ private final BytesRef SEPARATOR = new BytesRef(" ");
+
+ private float maxErrors = 0.5f;
+ private BytesRef separator = SEPARATOR;
+ private float realworldErrorLikelihood = 0.95f;
+ private List<DirectCandidateGenerator> generators = new ArrayList<PhraseSuggestionContext.DirectCandidateGenerator>();
+ private int gramSize = 1;
+ private float confidence = 1.0f;
+ private int tokenLimit = NoisyChannelSpellChecker.DEFAULT_TOKEN_LIMIT;
+ private BytesRef preTag;
+ private BytesRef postTag;
+
+ private WordScorer.WordScorerFactory scorer;
+
+ private boolean requireUnigram = true;
+
+ public PhraseSuggestionContext(Suggester<? extends PhraseSuggestionContext> suggester) {
+ super(suggester);
+ }
+
+ public float maxErrors() {
+ return maxErrors;
+ }
+
+ public void setMaxErrors(Float maxErrors) {
+ this.maxErrors = maxErrors;
+ }
+
+ public BytesRef separator() {
+ return separator;
+ }
+
+ public void setSeparator(BytesRef separator) {
+ this.separator = separator;
+ }
+
+ public Float realworldErrorLikelyhood() {
+ return realworldErrorLikelihood;
+ }
+
+ public void setRealWordErrorLikelihood(Float realworldErrorLikelihood) {
+ this.realworldErrorLikelihood = realworldErrorLikelihood;
+ }
+
+ public void addGenerator(DirectCandidateGenerator generator) {
+ this.generators.add(generator);
+ }
+
+ public List<DirectCandidateGenerator> generators() {
+ return this.generators ;
+ }
+
+ public void setGramSize(int gramSize) {
+ this.gramSize = gramSize;
+ }
+
+ public int gramSize() {
+ return gramSize;
+ }
+
+ public float confidence() {
+ return confidence;
+ }
+
+ public void setConfidence(float confidence) {
+ this.confidence = confidence;
+ }
+
+ public void setModel(WordScorer.WordScorerFactory scorer) {
+ this.scorer = scorer;
+ }
+
+ public WordScorer.WordScorerFactory model() {
+ return scorer;
+ }
+
+ static class DirectCandidateGenerator extends DirectSpellcheckerSettings {
+ private Analyzer preFilter;
+ private Analyzer postFilter;
+ private String field;
+ private int size = 5;
+
+ public String field() {
+ return field;
+ }
+
+ public void setField(String field) {
+ this.field = field;
+ }
+
+ public int size() {
+ return size;
+ }
+
+ public void size(int size) {
+ if (size <= 0) {
+ throw new ElasticsearchIllegalArgumentException("Size must be positive");
+ }
+ this.size = size;
+ }
+
+ public Analyzer preFilter() {
+ return preFilter;
+ }
+
+ public void preFilter(Analyzer preFilter) {
+ this.preFilter = preFilter;
+ }
+
+ public Analyzer postFilter() {
+ return postFilter;
+ }
+
+ public void postFilter(Analyzer postFilter) {
+ this.postFilter = postFilter;
+ }
+
+
+ }
+
+ public void setRequireUnigram(boolean requireUnigram) {
+ this.requireUnigram = requireUnigram;
+ }
+
+ public boolean getRequireUnigram() {
+ return requireUnigram;
+ }
+
+ public void setTokenLimit(int tokenLimit) {
+ this.tokenLimit = tokenLimit;
+ }
+
+ public int getTokenLimit() {
+ return tokenLimit;
+ }
+
+ public void setPreTag(BytesRef preTag) {
+ this.preTag = preTag;
+ }
+
+ public BytesRef getPreTag() {
+ return preTag;
+ }
+
+ public void setPostTag(BytesRef postTag) {
+ this.postTag = postTag;
+ }
+
+ public BytesRef getPostTag() {
+ return postTag;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoffScorer.java b/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoffScorer.java
new file mode 100644
index 0000000..72d2b97
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoffScorer.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
+
+import java.io.IOException;
+
+public class StupidBackoffScorer extends WordScorer {
+ public static final WordScorerFactory FACTORY = new WordScorer.WordScorerFactory() {
+ @Override
+ public WordScorer newScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) throws IOException {
+ return new StupidBackoffScorer(reader, terms, field, realWordLikelyhood, separator, 0.4f);
+ }
+ };
+
+ private final double discount;
+
+ public StupidBackoffScorer(IndexReader reader, Terms terms,String field, double realWordLikelyhood, BytesRef separator, double discount)
+ throws IOException {
+ super(reader, terms, field, realWordLikelyhood, separator);
+ this.discount = discount;
+ }
+
+ @Override
+ protected double scoreBigram(Candidate word, Candidate w_1) throws IOException {
+ SuggestUtils.join(separator, spare, w_1.term, word.term);
+ final long count = frequency(spare);
+ if (count < 1) {
+ return discount * scoreUnigram(word);
+ }
+ return count / (w_1.frequency + 0.00000000001d);
+ }
+
+ @Override
+ protected double scoreTrigram(Candidate w, Candidate w_1, Candidate w_2) throws IOException {
+ SuggestUtils.join(separator, spare, w_2.term, w_1.term, w.term);
+ final long trigramCount = frequency(spare);
+ if (trigramCount < 1) {
+ SuggestUtils.join(separator, spare, w_1.term, w.term);
+ final long count = frequency(spare);
+ if (count < 1) {
+ return discount * scoreUnigram(w);
+ }
+ return discount * (count / (w_1.frequency + 0.00000000001d));
+ }
+ SuggestUtils.join(separator, spare, w_1.term, w.term);
+ final long bigramCount = frequency(spare);
+ return trigramCount / (bigramCount + 0.00000000001d);
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java b/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java
new file mode 100644
index 0000000..96d3f2f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
+import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.CandidateSet;
+
+import java.io.IOException;
+
+//TODO public for tests
+public abstract class WordScorer {
+ protected final IndexReader reader;
+ protected final String field;
+ protected final Terms terms;
+ protected final long vocabluarySize;
+ protected double realWordLikelyhood;
+ protected final BytesRef spare = new BytesRef();
+ protected final BytesRef separator;
+ protected final TermsEnum termsEnum;
+ private final long numTerms;
+ private final boolean useTotalTermFreq;
+
+ public WordScorer(IndexReader reader, String field, double realWordLikelyHood, BytesRef separator) throws IOException {
+ this(reader, MultiFields.getTerms(reader, field), field, realWordLikelyHood, separator);
+ }
+
+ public WordScorer(IndexReader reader, Terms terms, String field, double realWordLikelyHood, BytesRef separator) throws IOException {
+ this.field = field;
+ if (terms == null) {
+ throw new ElasticsearchIllegalArgumentException("Field: [" + field + "] does not exist");
+ }
+ this.terms = terms;
+ final long vocSize = terms.getSumTotalTermFreq();
+ this.vocabluarySize = vocSize == -1 ? reader.maxDoc() : vocSize;
+ this.useTotalTermFreq = vocSize != -1;
+ this.numTerms = terms.size();
+ this.termsEnum = terms.iterator(null);
+ this.reader = reader;
+ this.realWordLikelyhood = realWordLikelyHood;
+ this.separator = separator;
+ }
+
+ public long frequency(BytesRef term) throws IOException {
+ if (termsEnum.seekExact(term)) {
+ return useTotalTermFreq ? termsEnum.totalTermFreq() : termsEnum.docFreq();
+ }
+ return 0;
+ }
+
+ protected double channelScore(Candidate candidate, Candidate original) throws IOException {
+ if (candidate.stringDistance == 1.0d) {
+ return realWordLikelyhood;
+ }
+ return candidate.stringDistance;
+ }
+
+ public double score(Candidate[] path, CandidateSet[] candidateSet, int at, int gramSize) throws IOException {
+ if (at == 0 || gramSize == 1) {
+ return Math.log10(channelScore(path[at], candidateSet[at].originalTerm) * scoreUnigram(path[at]));
+ } else if (at == 1 || gramSize == 2) {
+ return Math.log10(channelScore(path[at], candidateSet[at].originalTerm) * scoreBigram(path[at], path[at - 1]));
+ } else {
+ return Math.log10(channelScore(path[at], candidateSet[at].originalTerm) * scoreTrigram(path[at], path[at - 1], path[at - 2]));
+ }
+ }
+
+ protected double scoreUnigram(Candidate word) throws IOException {
+ return (1.0 + frequency(word.term)) / (vocabluarySize + numTerms);
+ }
+
+ protected double scoreBigram(Candidate word, Candidate w_1) throws IOException {
+ return scoreUnigram(word);
+ }
+
+ protected double scoreTrigram(Candidate word, Candidate w_1, Candidate w_2) throws IOException {
+ return scoreBigram(word, w_1);
+ }
+
+ public static interface WordScorerFactory {
+ public WordScorer newScorer(IndexReader reader, Terms terms,
+ String field, double realWordLikelyhood, BytesRef separator) throws IOException;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java
new file mode 100644
index 0000000..1e2ed4f
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.term;
+
+import java.io.IOException;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.search.suggest.DirectSpellcheckerSettings;
+import org.elasticsearch.search.suggest.SuggestContextParser;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+
+public final class TermSuggestParser implements SuggestContextParser {
+
+ private TermSuggester suggester;
+
+ public TermSuggestParser(TermSuggester suggester) {
+ this.suggester = suggester;
+ }
+
+ public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService) throws IOException {
+ XContentParser.Token token;
+ String fieldName = null;
+ TermSuggestionContext suggestion = new TermSuggestionContext(suggester);
+ DirectSpellcheckerSettings settings = suggestion.getDirectSpellCheckerSettings();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token.isValue()) {
+ parseTokenValue(parser, mapperService, fieldName, suggestion, settings);
+ } else {
+ throw new ElasticsearchIllegalArgumentException("suggester[term] doesn't support field [" + fieldName + "]");
+ }
+ }
+ return suggestion;
+ }
+
+ private void parseTokenValue(XContentParser parser, MapperService mapperService, String fieldName, TermSuggestionContext suggestion,
+ DirectSpellcheckerSettings settings) throws IOException {
+ if (!(SuggestUtils.parseSuggestContext(parser, mapperService, fieldName, suggestion) || SuggestUtils.parseDirectSpellcheckerSettings(
+ parser, fieldName, settings))) {
+ throw new ElasticsearchIllegalArgumentException("suggester[term] doesn't support [" + fieldName + "]");
+
+ }
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java
new file mode 100644
index 0000000..515d375
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.term;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.spell.DirectSpellChecker;
+import org.apache.lucene.search.spell.SuggestWord;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.text.BytesText;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.search.suggest.SuggestContextParser;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.Suggester;
+import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public final class TermSuggester extends Suggester<TermSuggestionContext> {
+
+ @Override
+ public TermSuggestion innerExecute(String name, TermSuggestionContext suggestion, IndexReader indexReader, CharsRef spare) throws IOException {
+ DirectSpellChecker directSpellChecker = SuggestUtils.getDirectSpellChecker(suggestion.getDirectSpellCheckerSettings());
+
+ TermSuggestion response = new TermSuggestion(
+ name, suggestion.getSize(), suggestion.getDirectSpellCheckerSettings().sort()
+ );
+ List<Token> tokens = queryTerms(suggestion, spare);
+ for (Token token : tokens) {
+ // TODO: Extend DirectSpellChecker in 4.1, to get the raw suggested words as BytesRef
+ SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar(
+ token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode()
+ );
+ Text key = new BytesText(new BytesArray(token.term.bytes()));
+ TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset);
+ for (SuggestWord suggestWord : suggestedWords) {
+ Text word = new StringText(suggestWord.string);
+ resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score));
+ }
+ response.addTerm(resultEntry);
+ }
+ return response;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] {"term"};
+ }
+
+ @Override
+ public SuggestContextParser getContextParser() {
+ return new TermSuggestParser(this);
+ }
+
+
+ private List<Token> queryTerms(SuggestionContext suggestion, CharsRef spare) throws IOException {
+ final List<Token> result = new ArrayList<TermSuggester.Token>();
+ final String field = suggestion.getField();
+ SuggestUtils.analyze(suggestion.getAnalyzer(), suggestion.getText(), field, new SuggestUtils.TokenConsumer() {
+ @Override
+ public void nextToken() {
+ Term term = new Term(field, BytesRef.deepCopyOf(fillBytesRef(new BytesRef())));
+ result.add(new Token(term, offsetAttr.startOffset(), offsetAttr.endOffset()));
+ }
+ }, spare);
+ return result;
+ }
+
+ private static class Token {
+
+ public final Term term;
+ public final int startOffset;
+ public final int endOffset;
+
+ private Token(Term term, int startOffset, int endOffset) {
+ this.term = term;
+ this.startOffset = startOffset;
+ this.endOffset = endOffset;
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java
new file mode 100644
index 0000000..33c995e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.term;
+
+import java.io.IOException;
+import java.util.Comparator;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.search.suggest.Suggest.Suggestion;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
+
+/**
+ * The suggestion responses corresponding with the suggestions in the request.
+ */
+public class TermSuggestion extends Suggestion<TermSuggestion.Entry> {
+
+ public static Comparator<Suggestion.Entry.Option> SCORE = new Score();
+ public static Comparator<Suggestion.Entry.Option> FREQUENCY = new Frequency();
+
+ // Same behaviour as comparators in suggest module, but for SuggestedWord
+ // Highest score first, then highest freq first, then lowest term first
+ public static class Score implements Comparator<Suggestion.Entry.Option> {
+
+ @Override
+ public int compare(Suggestion.Entry.Option first, Suggestion.Entry.Option second) {
+ // first criteria: the distance
+ int cmp = Float.compare(second.getScore(), first.getScore());
+ if (cmp != 0) {
+ return cmp;
+ }
+ return FREQUENCY.compare(first, second);
+ }
+
+ }
+
+ // Same behaviour as comparators in suggest module, but for SuggestedWord
+ // Highest freq first, then highest score first, then lowest term first
+ public static class Frequency implements Comparator<Suggestion.Entry.Option> {
+
+ @Override
+ public int compare(Suggestion.Entry.Option first, Suggestion.Entry.Option second) {
+
+ // first criteria: the popularity
+ int cmp = ((TermSuggestion.Entry.Option) second).getFreq() - ((TermSuggestion.Entry.Option) first).getFreq();
+ if (cmp != 0) {
+ return cmp;
+ }
+
+ // second criteria (if first criteria is equal): the distance
+ cmp = Float.compare(second.getScore(), first.getScore());
+ if (cmp != 0) {
+ return cmp;
+ }
+
+ // third criteria: term text
+ return first.getText().compareTo(second.getText());
+ }
+
+ }
+
+ public static final int TYPE = 1;
+ private Sort sort;
+
+ public TermSuggestion() {
+ }
+
+ public TermSuggestion(String name, int size, Sort sort) {
+ super(name, size);
+ this.sort = sort;
+ }
+
+ public int getType() {
+ return TYPE;
+ }
+
+ @Override
+ protected Comparator<Option> sortComparator() {
+ switch (sort) {
+ case SCORE:
+ return SCORE;
+ case FREQUENCY:
+ return FREQUENCY;
+ default:
+ throw new ElasticsearchException("Could not resolve comparator for sort key: [" + sort + "]");
+ }
+ }
+
+ @Override
+ protected void innerReadFrom(StreamInput in) throws IOException {
+ super.innerReadFrom(in);
+ sort = Sort.fromId(in.readByte());
+ }
+
+ @Override
+ public void innerWriteTo(StreamOutput out) throws IOException {
+ super.innerWriteTo(out);
+ out.writeByte(sort.id());
+ }
+
+ protected Entry newEntry() {
+ return new Entry();
+ }
+
+ /**
+ * Represents a part from the suggest text with suggested options.
+ */
+ public static class Entry extends
+ org.elasticsearch.search.suggest.Suggest.Suggestion.Entry<TermSuggestion.Entry.Option> {
+
+ Entry(Text text, int offset, int length) {
+ super(text, offset, length);
+ }
+
+ Entry() {
+ }
+
+ @Override
+ protected Option newOption() {
+ return new Option();
+ }
+
+ /**
+ * Contains the suggested text with its document frequency and score.
+ */
+ public static class Option extends org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option {
+
+ static class Fields {
+ static final XContentBuilderString FREQ = new XContentBuilderString("freq");
+ }
+
+ private int freq;
+
+ protected Option(Text text, int freq, float score) {
+ super(text, score);
+ this.freq = freq;
+ }
+
+ @Override
+ protected void mergeInto(Suggestion.Entry.Option otherOption) {
+ super.mergeInto(otherOption);
+ freq += ((Option) otherOption).freq;
+ }
+
+ protected Option() {
+ super();
+ }
+
+ public void setFreq(int freq) {
+ this.freq = freq;
+ }
+
+ /**
+ * @return How often this suggested text appears in the index.
+ */
+ public int getFreq() {
+ return freq;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ freq = in.readVInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(freq);
+ }
+
+ @Override
+ protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ builder = super.innerToXContent(builder, params);
+ builder.field(Fields.FREQ, freq);
+ return builder;
+ }
+ }
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java
new file mode 100644
index 0000000..0fd5ae1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.term;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder;
+
+import java.io.IOException;
+
+/**
+ * Defines the actual suggest command. Each command uses the global options
+ * unless defined in the suggestion itself. All options are the same as the
+ * global options, but are only applicable for this suggestion.
+ */
+public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuilder> {
+
+ private String suggestMode;
+ private Float accuracy;
+ private String sort;
+ private String stringDistance;
+ private Integer maxEdits;
+ private Integer maxInspections;
+ private Float maxTermFreq;
+ private Integer prefixLength;
+ private Integer minWordLength;
+ private Float minDocFreq;
+
+ /**
+ * @param name
+ * The name of this suggestion. This is a required parameter.
+ */
+ public TermSuggestionBuilder(String name) {
+ super(name, "term");
+ }
+
+ /**
+ * The global suggest mode controls what suggested terms are included or
+ * controls for what suggest text tokens, terms should be suggested for.
+ * Three possible values can be specified:
+ * <ol>
+ * <li><code>missing</code> - Only suggest terms in the suggest text that
+ * aren't in the index. This is the default.
+ * <li><code>popular</code> - Only suggest terms that occur in more docs
+ * then the original suggest text term.
+ * <li><code>always</code> - Suggest any matching suggest terms based on
+ * tokens in the suggest text.
+ * </ol>
+ */
+ public TermSuggestionBuilder suggestMode(String suggestMode) {
+ this.suggestMode = suggestMode;
+ return this;
+ }
+
+ /**
+ * s how similar the suggested terms at least need to be compared to the
+ * original suggest text tokens. A value between 0 and 1 can be specified.
+ * This value will be compared to the string distance result of each
+ * candidate spelling correction.
+ * <p/>
+ * Default is <tt>0.5</tt>
+ */
+ public TermSuggestionBuilder setAccuracy(float accuracy) {
+ this.accuracy = accuracy;
+ return this;
+ }
+
+ /**
+ * Sets how to sort the suggest terms per suggest text token. Two possible
+ * values:
+ * <ol>
+ * <li><code>score</code> - Sort should first be based on score, then
+ * document frequency and then the term itself.
+ * <li><code>frequency</code> - Sort should first be based on document
+ * frequency, then scotr and then the term itself.
+ * </ol>
+ * <p/>
+ * What the score is depends on the suggester being used.
+ */
+ public TermSuggestionBuilder sort(String sort) {
+ this.sort = sort;
+ return this;
+ }
+
+ /**
+ * Sets what string distance implementation to use for comparing how similar
+ * suggested terms are. Four possible values can be specified:
+ * <ol>
+ * <li><code>internal</code> - This is the default and is based on
+ * <code>damerau_levenshtein</code>, but highly optimized for comparing
+ * string distance for terms inside the index.
+ * <li><code>damerau_levenshtein</code> - String distance algorithm based on
+ * Damerau-Levenshtein algorithm.
+ * <li><code>levenstein</code> - String distance algorithm based on
+ * Levenstein edit distance algorithm.
+ * <li><code>jarowinkler</code> - String distance algorithm based on
+ * Jaro-Winkler algorithm.
+ * <li><code>ngram</code> - String distance algorithm based on character
+ * n-grams.
+ * </ol>
+ */
+ public TermSuggestionBuilder stringDistance(String stringDistance) {
+ this.stringDistance = stringDistance;
+ return this;
+ }
+
+ /**
+ * Sets the maximum edit distance candidate suggestions can have in order to
+ * be considered as a suggestion. Can only be a value between 1 and 2. Any
+ * other value result in an bad request error being thrown. Defaults to
+ * <tt>2</tt>.
+ */
+ public TermSuggestionBuilder maxEdits(Integer maxEdits) {
+ this.maxEdits = maxEdits;
+ return this;
+ }
+
+ /**
+ * A factor that is used to multiply with the size in order to inspect more
+ * candidate suggestions. Can improve accuracy at the cost of performance.
+ * Defaults to <tt>5</tt>.
+ */
+ public TermSuggestionBuilder maxInspections(Integer maxInspections) {
+ this.maxInspections = maxInspections;
+ return this;
+ }
+
+ /**
+ * Sets a maximum threshold in number of documents a suggest text token can
+ * exist in order to be corrected. Can be a relative percentage number (e.g
+ * 0.4) or an absolute number to represent document frequencies. If an value
+ * higher than 1 is specified then fractional can not be specified. Defaults
+ * to <tt>0.01</tt>.
+ * <p/>
+ * This can be used to exclude high frequency terms from being suggested.
+ * High frequency terms are usually spelled correctly on top of this this
+ * also improves the suggest performance.
+ */
+ public TermSuggestionBuilder maxTermFreq(float maxTermFreq) {
+ this.maxTermFreq = maxTermFreq;
+ return this;
+ }
+
+ /**
+ * Sets the number of minimal prefix characters that must match in order be
+ * a candidate suggestion. Defaults to 1. Increasing this number improves
+ * suggest performance. Usually misspellings don't occur in the beginning of
+ * terms.
+ */
+ public TermSuggestionBuilder prefixLength(int prefixLength) {
+ this.prefixLength = prefixLength;
+ return this;
+ }
+
+ /**
+ * The minimum length a suggest text term must have in order to be
+ * corrected. Defaults to <tt>4</tt>.
+ */
+ public TermSuggestionBuilder minWordLength(int minWordLength) {
+ this.minWordLength = minWordLength;
+ return this;
+ }
+
+ /**
+ * Sets a minimal threshold in number of documents a suggested term should
+ * appear in. This can be specified as an absolute number or as a relative
+ * percentage of number of documents. This can improve quality by only
+ * suggesting high frequency terms. Defaults to 0f and is not enabled. If a
+ * value higher than 1 is specified then the number cannot be fractional.
+ */
+ public TermSuggestionBuilder minDocFreq(float minDocFreq) {
+ this.minDocFreq = minDocFreq;
+ return this;
+ }
+
+ @Override
+ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
+ if (suggestMode != null) {
+ builder.field("suggest_mode", suggestMode);
+ }
+ if (accuracy != null) {
+ builder.field("accuracy", accuracy);
+ }
+ if (sort != null) {
+ builder.field("sort", sort);
+ }
+ if (stringDistance != null) {
+ builder.field("string_distance", stringDistance);
+ }
+ if (maxEdits != null) {
+ builder.field("max_edits", maxEdits);
+ }
+ if (maxInspections != null) {
+ builder.field("max_inspections", maxInspections);
+ }
+ if (maxTermFreq != null) {
+ builder.field("max_term_freq", maxTermFreq);
+ }
+ if (prefixLength != null) {
+ builder.field("prefix_length", prefixLength);
+ }
+ if (minWordLength != null) {
+ builder.field("min_word_length", minWordLength);
+ }
+ if (minDocFreq != null) {
+ builder.field("min_doc_freq", minDocFreq);
+ }
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionContext.java b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionContext.java
new file mode 100644
index 0000000..4ff32d7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionContext.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.term;
+
+import org.elasticsearch.search.suggest.DirectSpellcheckerSettings;
+import org.elasticsearch.search.suggest.Suggester;
+import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext;
+
+final class TermSuggestionContext extends SuggestionContext {
+
+ private final DirectSpellcheckerSettings settings = new DirectSpellcheckerSettings();
+
+ public TermSuggestionContext(Suggester<? extends TermSuggestionContext> suggester) {
+ super(suggester);
+ }
+
+ public DirectSpellcheckerSettings getDirectSpellCheckerSettings() {
+ return settings;
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/search/warmer/IndexWarmerMissingException.java b/src/main/java/org/elasticsearch/search/warmer/IndexWarmerMissingException.java
new file mode 100644
index 0000000..3892714
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/warmer/IndexWarmerMissingException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.warmer;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.rest.RestStatus;
+
+import java.util.Arrays;
+
+/**
+ *
+ */
+public class IndexWarmerMissingException extends ElasticsearchException {
+
+ private final String[] names;
+
+ public IndexWarmerMissingException(String... names) {
+ super("index_warmer [" + Arrays.toString(names) + "] missing");
+ this.names = names;
+ }
+
+ public String[] names() {
+ return this.names;
+ }
+
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java b/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java
new file mode 100644
index 0000000..b626e59
--- /dev/null
+++ b/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.warmer;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ */
+public class IndexWarmersMetaData implements IndexMetaData.Custom {
+
+ public static final String TYPE = "warmers";
+
+ public static final Factory FACTORY = new Factory();
+
+ public static class Entry {
+ private final String name;
+ private final String[] types;
+ private final BytesReference source;
+
+ public Entry(String name, String[] types, BytesReference source) {
+ this.name = name;
+ this.types = types == null ? Strings.EMPTY_ARRAY : types;
+ this.source = source;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public String[] types() {
+ return this.types;
+ }
+
+ @Nullable
+ public BytesReference source() {
+ return this.source;
+ }
+ }
+
+ private final ImmutableList<Entry> entries;
+
+
+ public IndexWarmersMetaData(Entry... entries) {
+ this.entries = ImmutableList.copyOf(entries);
+ }
+
+ public ImmutableList<Entry> entries() {
+ return this.entries;
+ }
+
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ public static class Factory implements IndexMetaData.Custom.Factory<IndexWarmersMetaData> {
+
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ @Override
+ public IndexWarmersMetaData readFrom(StreamInput in) throws IOException {
+ Entry[] entries = new Entry[in.readVInt()];
+ for (int i = 0; i < entries.length; i++) {
+ entries[i] = new Entry(in.readString(), in.readStringArray(), in.readBoolean() ? in.readBytesReference() : null);
+ }
+ return new IndexWarmersMetaData(entries);
+ }
+
+ @Override
+ public void writeTo(IndexWarmersMetaData warmers, StreamOutput out) throws IOException {
+ out.writeVInt(warmers.entries().size());
+ for (Entry entry : warmers.entries()) {
+ out.writeString(entry.name());
+ out.writeStringArray(entry.types());
+ if (entry.source() == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeBytesReference(entry.source());
+ }
+ }
+ }
+
+ @Override
+ public IndexWarmersMetaData fromMap(Map<String, Object> map) throws IOException {
+ // if it starts with the type, remove it
+ if (map.size() == 1 && map.containsKey(TYPE)) {
+ map = (Map<String, Object>) map.values().iterator().next();
+ }
+ XContentBuilder builder = XContentFactory.smileBuilder().map(map);
+ XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes());
+ try {
+ // move to START_OBJECT
+ parser.nextToken();
+ return fromXContent(parser);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException {
+ // we get here after we are at warmers token
+ String currentFieldName = null;
+ XContentParser.Token token;
+ List<Entry> entries = new ArrayList<Entry>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ String name = currentFieldName;
+ List<String> types = new ArrayList<String>(2);
+ BytesReference source = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("types".equals(currentFieldName)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ types.add(parser.text());
+ }
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("source".equals(currentFieldName)) {
+ XContentBuilder builder = XContentFactory.jsonBuilder().map(parser.mapOrdered());
+ source = builder.bytes();
+ }
+ } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
+ if ("source".equals(currentFieldName)) {
+ source = new BytesArray(parser.binaryValue());
+ }
+ }
+ }
+ entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), source));
+ }
+ }
+ return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()]));
+ }
+
+ @Override
+ public void toXContent(IndexWarmersMetaData warmers, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ //No need, IndexMetaData already writes it
+ //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE);
+ for (Entry entry : warmers.entries()) {
+ toXContent(entry, builder, params);
+ }
+ //No need, IndexMetaData already writes it
+ //builder.endObject();
+ }
+
+ public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ boolean binary = params.paramAsBoolean("binary", false);
+ builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE);
+ builder.field("types", entry.types());
+ builder.field("source");
+ if (binary) {
+ builder.value(entry.source());
+ } else {
+ Map<String, Object> mapping = XContentFactory.xContent(entry.source()).createParser(entry.source()).mapOrderedAndClose();
+ builder.map(mapping);
+ }
+ builder.endObject();
+ }
+
+ @Override
+ public IndexWarmersMetaData merge(IndexWarmersMetaData first, IndexWarmersMetaData second) {
+ List<Entry> entries = Lists.newArrayList();
+ entries.addAll(first.entries());
+ for (Entry secondEntry : second.entries()) {
+ boolean found = false;
+ for (Entry firstEntry : first.entries()) {
+ if (firstEntry.name().equals(secondEntry.name())) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ entries.add(secondEntry);
+ }
+ }
+ return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()]));
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/ConcurrentSnapshotExecutionException.java b/src/main/java/org/elasticsearch/snapshots/ConcurrentSnapshotExecutionException.java
new file mode 100644
index 0000000..6214ca5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/ConcurrentSnapshotExecutionException.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ * Thrown when a user tries to start multiple snapshots at the same time
+ */
+public class ConcurrentSnapshotExecutionException extends SnapshotException {
+ public ConcurrentSnapshotExecutionException(SnapshotId snapshot, String msg) {
+ super(snapshot, msg);
+ }
+
+ public ConcurrentSnapshotExecutionException(SnapshotId snapshot, String msg, Throwable cause) {
+ super(snapshot, msg, cause);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.SERVICE_UNAVAILABLE;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/InvalidSnapshotNameException.java b/src/main/java/org/elasticsearch/snapshots/InvalidSnapshotNameException.java
new file mode 100644
index 0000000..9ac5743
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/InvalidSnapshotNameException.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ * Thrown on the attempt to create a snapshot with invalid name
+ */
+public class InvalidSnapshotNameException extends SnapshotException implements ElasticsearchWrapperException {
+
+ public InvalidSnapshotNameException(SnapshotId snapshot, String desc) {
+ super(snapshot, "Invalid snapshot name [" + snapshot.getSnapshot() + "], " + desc);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.BAD_REQUEST;
+ }
+
+}
+
diff --git a/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java b/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java
new file mode 100644
index 0000000..b6ac239
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+
+/**
+ * Information about successfully completed restore operation.
+ * <p/>
+ * Returned as part of {@link org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse}
+ */
+public class RestoreInfo implements ToXContent, Streamable {
+
+ private String name;
+
+ private ImmutableList<String> indices;
+
+ private int totalShards;
+
+ private int successfulShards;
+
+ RestoreInfo() {
+
+ }
+
+ public RestoreInfo(String name, ImmutableList<String> indices, int totalShards, int successfulShards) {
+ this.name = name;
+ this.indices = indices;
+ this.totalShards = totalShards;
+ this.successfulShards = successfulShards;
+ }
+
+ /**
+ * Snapshot name
+ *
+ * @return snapshot name
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * List of restored indices
+ *
+ * @return list of restored indices
+ */
+ public ImmutableList<String> indices() {
+ return indices;
+ }
+
+ /**
+ * Number of shards being restored
+ *
+ * @return number of being restored
+ */
+ public int totalShards() {
+ return totalShards;
+ }
+
+ /**
+ * Number of failed shards
+ *
+ * @return number of failed shards
+ */
+ public int failedShards() {
+ return totalShards - successfulShards;
+ }
+
+ /**
+ * Number of successful shards
+ *
+ * @return number of successful shards
+ */
+ public int successfulShards() {
+ return successfulShards;
+ }
+
+ /**
+ * REST status of the operation
+ *
+ * @return REST status
+ */
+ public RestStatus status() {
+ return RestStatus.OK;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot");
+ static final XContentBuilderString INDICES = new XContentBuilderString("indices");
+ static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString FAILED = new XContentBuilderString("failed");
+ static final XContentBuilderString SUCCESSFUL = new XContentBuilderString("successful");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(Fields.SNAPSHOT, name);
+ builder.startArray(Fields.INDICES);
+ for (String index : indices) {
+ builder.value(index);
+ }
+ builder.endArray();
+ builder.startObject(Fields.SHARDS);
+ builder.field(Fields.TOTAL, totalShards);
+ builder.field(Fields.FAILED, failedShards());
+ builder.field(Fields.SUCCESSFUL, successfulShards);
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ int size = in.readVInt();
+ ImmutableList.Builder<String> indicesListBuilder = ImmutableList.builder();
+ for (int i = 0; i < size; i++) {
+ indicesListBuilder.add(in.readString());
+ }
+ indices = indicesListBuilder.build();
+ totalShards = in.readVInt();
+ successfulShards = in.readVInt();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVInt(indices.size());
+ for (String index : indices) {
+ out.writeString(index);
+ }
+ out.writeVInt(totalShards);
+ out.writeVInt(successfulShards);
+ }
+
+ /**
+ * Reads restore info from {@link StreamInput}
+ *
+ * @param in stream input
+ * @return restore info
+ * @throws IOException
+ */
+ public static RestoreInfo readRestoreInfo(StreamInput in) throws IOException {
+ RestoreInfo snapshotInfo = new RestoreInfo();
+ snapshotInfo.readFrom(in);
+ return snapshotInfo;
+ }
+
+ /**
+ * Reads optional restore info from {@link StreamInput}
+ *
+ * @param in stream input
+ * @return restore info
+ * @throws IOException
+ */
+ public static RestoreInfo readOptionalRestoreInfo(StreamInput in) throws IOException {
+ return in.readOptionalStreamable(new RestoreInfo());
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/src/main/java/org/elasticsearch/snapshots/RestoreService.java
new file mode 100644
index 0000000..6fea6d3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/RestoreService.java
@@ -0,0 +1,787 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.*;
+import org.elasticsearch.cluster.metadata.RestoreMetaData.ShardRestoreStatus;
+import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.repositories.Repository;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.cluster.metadata.MetaDataIndexStateService.INDEX_CLOSED_BLOCK;
+
+/**
+ * Service responsible for restoring snapshots
+ * <p/>
+ * Restore operation is performed in several stages.
+ * <p/>
+ * First {@link #restoreSnapshot(RestoreRequest, RestoreSnapshotListener)}
+ * method reads information about snapshot and metadata from repository. In update cluster state task it checks restore
+ * preconditions, restores global state if needed, creates {@link RestoreMetaData} record with list of shards that needs
+ * to be restored and adds this shard to the routing table using {@link RoutingTable.Builder#addAsRestore(IndexMetaData, RestoreSource)}
+ * method.
+ * <p/>
+ * Individual shards are getting restored as part of normal recovery process in
+ * {@link org.elasticsearch.index.gateway.IndexShardGatewayService#recover(boolean, org.elasticsearch.index.gateway.IndexShardGatewayService.RecoveryListener)}
+ * method, which detects that shard should be restored from snapshot rather than recovered from gateway by looking
+ * at the {@link org.elasticsearch.cluster.routing.ShardRouting#restoreSource()} property. If this property is not null
+ * {@code recover} method uses {@link org.elasticsearch.index.snapshots.IndexShardSnapshotAndRestoreService#restore(org.elasticsearch.index.gateway.RecoveryStatus)}
+ * method to start shard restore process.
+ * <p/>
+ * At the end of the successful restore process {@code IndexShardSnapshotAndRestoreService} calls {@link #indexShardRestoreCompleted(SnapshotId, ShardId)},
+ * which updates {@link RestoreMetaData} in cluster state or removes it when all shards are completed. In case of
+ * restore failure a normal recovery fail-over process kicks in.
+ */
+public class RestoreService extends AbstractComponent implements ClusterStateListener {
+
+ private final ClusterService clusterService;
+
+ private final RepositoriesService repositoriesService;
+
+ private final TransportService transportService;
+
+ private final AllocationService allocationService;
+
+ private final MetaDataCreateIndexService createIndexService;
+
+ private final CopyOnWriteArrayList<RestoreCompletionListener> listeners = new CopyOnWriteArrayList<RestoreCompletionListener>();
+
+ @Inject
+ public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, TransportService transportService, AllocationService allocationService, MetaDataCreateIndexService createIndexService) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.repositoriesService = repositoriesService;
+ this.transportService = transportService;
+ this.allocationService = allocationService;
+ this.createIndexService = createIndexService;
+ transportService.registerHandler(UpdateRestoreStateRequestHandler.ACTION, new UpdateRestoreStateRequestHandler());
+ clusterService.add(this);
+ }
+
+ /**
+ * Restores snapshot specified in the restore request.
+ *
+ * @param request restore request
+ * @param listener restore listener
+ */
+ public void restoreSnapshot(final RestoreRequest request, final RestoreSnapshotListener listener) {
+ try {
+ // Read snapshot info and metadata from the repository
+ Repository repository = repositoriesService.repository(request.repository());
+ final SnapshotId snapshotId = new SnapshotId(request.repository(), request.name());
+ final Snapshot snapshot = repository.readSnapshot(snapshotId);
+ ImmutableList<String> filteredIndices = SnapshotUtils.filterIndices(snapshot.indices(), request.indices(), request.indicesOptions());
+ final MetaData metaData = repository.readSnapshotMetaData(snapshotId, filteredIndices);
+
+ // Make sure that we can restore from this snapshot
+ if (snapshot.state() != SnapshotState.SUCCESS) {
+ throw new SnapshotRestoreException(snapshotId, "unsupported snapshot state [" + snapshot.state() + "]");
+ }
+ if (Version.CURRENT.before(snapshot.version())) {
+ throw new SnapshotRestoreException(snapshotId, "incompatible snapshot version [" + snapshot.version() + "]");
+ }
+
+ // Find list of indices that we need to restore
+ final Map<String, String> renamedIndices = newHashMap();
+ for (String index : filteredIndices) {
+ String renamedIndex = index;
+ if (request.renameReplacement() != null && request.renamePattern() != null) {
+ renamedIndex = index.replaceAll(request.renamePattern(), request.renameReplacement());
+ }
+ String previousIndex = renamedIndices.put(renamedIndex, index);
+ if (previousIndex != null) {
+ throw new SnapshotRestoreException(snapshotId, "indices [" + index + "] and [" + previousIndex + "] are renamed into the same index [" + renamedIndex + "]");
+ }
+ }
+
+ // Now we can start the actual restore process by adding shards to be recovered in the cluster state
+ // and updating cluster metadata (global and index) as needed
+ clusterService.submitStateUpdateTask(request.cause(), new TimeoutClusterStateUpdateTask() {
+ RestoreInfo restoreInfo = null;
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ // Check if another restore process is already running - cannot run two restore processes at the
+ // same time
+ RestoreMetaData restoreMetaData = currentState.metaData().custom(RestoreMetaData.TYPE);
+ if (restoreMetaData != null && !restoreMetaData.entries().isEmpty()) {
+ throw new ConcurrentSnapshotExecutionException(snapshotId, "Restore process is already running in this cluster");
+ }
+
+ // Updating cluster state
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
+ RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
+ if (!metaData.indices().isEmpty()) {
+ // We have some indices to restore
+ ImmutableMap.Builder<ShardId, RestoreMetaData.ShardRestoreStatus> shards = ImmutableMap.builder();
+ for (Map.Entry<String, String> indexEntry : renamedIndices.entrySet()) {
+ String index = indexEntry.getValue();
+ // Make sure that index was fully snapshotted - don't restore
+ if (failed(snapshot, index)) {
+ throw new SnapshotRestoreException(snapshotId, "index [" + index + "] wasn't fully snapshotted - cannot restore");
+ }
+ RestoreSource restoreSource = new RestoreSource(snapshotId, index);
+ String renamedIndex = indexEntry.getKey();
+ IndexMetaData snapshotIndexMetaData = metaData.index(index);
+ // Check that the index is closed or doesn't exist
+ IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndex);
+ if (currentIndexMetaData == null) {
+ // Index doesn't exist - create it and start recovery
+ // Make sure that the index we are about to create has a validate name
+ createIndexService.validateIndexName(renamedIndex, currentState);
+ IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndex);
+ IndexMetaData updatedIndexMetaData = indexMdBuilder.build();
+ rtBuilder.addAsNewRestore(updatedIndexMetaData, restoreSource);
+ mdBuilder.put(updatedIndexMetaData, true);
+ } else {
+ // Index exist - checking that it's closed
+ if (currentIndexMetaData.state() != IndexMetaData.State.CLOSE) {
+ // TODO: Enable restore for open indices
+ throw new SnapshotRestoreException(snapshotId, "cannot restore index [" + renamedIndex + "] because it's open");
+ }
+ // Make sure that the number of shards is the same. That's the only thing that we cannot change
+ if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
+ throw new SnapshotRestoreException(snapshotId, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() +
+ "] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
+ }
+ // Index exists and it's closed - open it in metadata and start recovery
+ IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN);
+ indexMdBuilder.version(Math.max(snapshotIndexMetaData.version(), currentIndexMetaData.version() + 1));
+ IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndex).build();
+ rtBuilder.addAsRestore(updatedIndexMetaData, restoreSource);
+ blocks.removeIndexBlock(renamedIndex, INDEX_CLOSED_BLOCK);
+ mdBuilder.put(updatedIndexMetaData, true);
+ }
+ for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) {
+ shards.put(new ShardId(renamedIndex, shard), new RestoreMetaData.ShardRestoreStatus(clusterService.state().nodes().localNodeId()));
+ }
+ }
+
+ RestoreMetaData.Entry restoreEntry = new RestoreMetaData.Entry(snapshotId, RestoreMetaData.State.INIT, ImmutableList.copyOf(renamedIndices.keySet()), shards.build());
+ mdBuilder.putCustom(RestoreMetaData.TYPE, new RestoreMetaData(restoreEntry));
+ }
+
+ // Restore global state if needed
+ if (request.includeGlobalState()) {
+ if (metaData.persistentSettings() != null) {
+ mdBuilder.persistentSettings(metaData.persistentSettings());
+ }
+ if (metaData.templates() != null) {
+ // TODO: Should all existing templates be deleted first?
+ for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) {
+ mdBuilder.put(cursor.value);
+ }
+ }
+ if (metaData.customs() != null) {
+ for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
+ if (!RepositoriesMetaData.TYPE.equals(cursor.key)) {
+ // Don't restore repositories while we are working with them
+ // TODO: Should we restore them at the end?
+ mdBuilder.putCustom(cursor.key, cursor.value);
+ }
+ }
+ }
+ }
+
+ if (metaData.indices().isEmpty()) {
+ // We don't have any indices to restore - we are done
+ restoreInfo = new RestoreInfo(request.name(), ImmutableList.<String>of(), 0, 0);
+ }
+
+ ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocks).routingTable(rtBuilder).build();
+ RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder).build());
+ return ClusterState.builder(updatedState).routingResult(routingResult).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("[{}] failed to restore snapshot", t, snapshotId);
+ listener.onFailure(t);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ listener.onResponse(restoreInfo);
+ }
+ });
+
+
+ } catch (Throwable e) {
+ logger.warn("[{}][{}] failed to restore snapshot", e, request.repository(), request.name());
+ listener.onFailure(e);
+ }
+ }
+
+ /**
+ * This method is used by {@link org.elasticsearch.index.snapshots.IndexShardSnapshotAndRestoreService} to notify
+ * {@code RestoreService} about shard restore completion.
+ *
+ * @param snapshotId snapshot id
+ * @param shardId shard id
+ */
+ public void indexShardRestoreCompleted(SnapshotId snapshotId, ShardId shardId) {
+ logger.trace("[{}] successfully restored shard [{}]", snapshotId, shardId);
+ UpdateIndexShardRestoreStatusRequest request = new UpdateIndexShardRestoreStatusRequest(snapshotId, shardId,
+ new ShardRestoreStatus(clusterService.state().nodes().localNodeId(), RestoreMetaData.State.SUCCESS));
+ if (clusterService.state().nodes().localNodeMaster()) {
+ innerUpdateRestoreState(request);
+ } else {
+ transportService.sendRequest(clusterService.state().nodes().masterNode(),
+ UpdateRestoreStateRequestHandler.ACTION, request, EmptyTransportResponseHandler.INSTANCE_SAME);
+ }
+ }
+
+ /**
+ * Updates shard restore record in the cluster state.
+ *
+ * @param request update shard status request
+ */
+ private void innerUpdateRestoreState(final UpdateIndexShardRestoreStatusRequest request) {
+ clusterService.submitStateUpdateTask("update snapshot state", new ProcessedClusterStateUpdateTask() {
+
+ private boolean completed = true;
+
+ private RestoreInfo restoreInfo = null;
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ RestoreMetaData restore = metaData.custom(RestoreMetaData.TYPE);
+ if (restore != null) {
+ boolean changed = false;
+ ArrayList<RestoreMetaData.Entry> entries = newArrayList();
+ for (RestoreMetaData.Entry entry : restore.entries()) {
+ if (entry.snapshotId().equals(request.snapshotId())) {
+ HashMap<ShardId, ShardRestoreStatus> shards = newHashMap(entry.shards());
+ logger.trace("[{}] Updating shard [{}] with status [{}]", request.snapshotId(), request.shardId(), request.status().state());
+ shards.put(request.shardId(), request.status());
+ for (RestoreMetaData.ShardRestoreStatus status : shards.values()) {
+ if (!status.state().completed()) {
+ completed = false;
+ break;
+ }
+ }
+ if (!completed) {
+ entries.add(new RestoreMetaData.Entry(entry.snapshotId(), RestoreMetaData.State.STARTED, entry.indices(), ImmutableMap.copyOf(shards)));
+ } else {
+ logger.info("restore [{}] is done", request.snapshotId());
+ int failedShards = 0;
+ for (RestoreMetaData.ShardRestoreStatus status : shards.values()) {
+ if (status.state() == RestoreMetaData.State.FAILURE) {
+ failedShards++;
+ }
+ }
+ restoreInfo = new RestoreInfo(entry.snapshotId().getSnapshot(), entry.indices(), shards.size(), shards.size() - failedShards);
+ }
+ changed = true;
+ } else {
+ entries.add(entry);
+ }
+ }
+ if (changed) {
+ restore = new RestoreMetaData(entries.toArray(new RestoreMetaData.Entry[entries.size()]));
+ mdBuilder.putCustom(RestoreMetaData.TYPE, restore);
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("[{}][{}] failed to update snapshot status to [{}]", t, request.snapshotId(), request.shardId(), request.status());
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ if (restoreInfo != null) {
+ for (RestoreCompletionListener listener : listeners) {
+ try {
+ listener.onRestoreCompletion(request.snapshotId, restoreInfo);
+ } catch (Throwable e) {
+ logger.warn("failed to update snapshot status for [{}]", e, listener);
+ }
+ }
+ }
+ }
+ });
+ }
+
+ /**
+ * Checks if any of the deleted indices are still recovering and fails recovery on the shards of these indices
+ *
+ * @param event cluster changed event
+ */
+ private void processDeletedIndices(ClusterChangedEvent event) {
+ MetaData metaData = event.state().metaData();
+ RestoreMetaData restore = metaData.custom(RestoreMetaData.TYPE);
+ if (restore == null) {
+ // Not restoring - nothing to do
+ return;
+ }
+
+ if (!event.indicesDeleted().isEmpty()) {
+ // Some indices were deleted, let's make sure all indices that we are restoring still exist
+ for (RestoreMetaData.Entry entry : restore.entries()) {
+ List<ShardId> shardsToFail = null;
+ for (ImmutableMap.Entry<ShardId, ShardRestoreStatus> shard : entry.shards().entrySet()) {
+ if (!shard.getValue().state().completed()) {
+ if (!event.state().metaData().hasIndex(shard.getKey().getIndex())) {
+ if (shardsToFail == null) {
+ shardsToFail = newArrayList();
+ }
+ shardsToFail.add(shard.getKey());
+ }
+ }
+ }
+ if (shardsToFail != null) {
+ for (ShardId shardId : shardsToFail) {
+ logger.trace("[{}] failing running shard restore [{}]", entry.snapshotId(), shardId);
+ innerUpdateRestoreState(new UpdateIndexShardRestoreStatusRequest(entry.snapshotId(), shardId, new ShardRestoreStatus(null, RestoreMetaData.State.FAILURE, "index was deleted")));
+ }
+ }
+ }
+ }
+ }
+
+ private boolean failed(Snapshot snapshot, String index) {
+ for (SnapshotShardFailure failure : snapshot.shardFailures()) {
+ if (index.equals(failure.index())) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private boolean failed(Snapshot snapshot, String index, int shard) {
+ for (SnapshotShardFailure failure : snapshot.shardFailures()) {
+ if (index.equals(failure.index()) && shard == failure.shardId()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Adds restore completion listener
+ * <p/>
+ * This listener is called for each snapshot that finishes restore operation in the cluster. It's responsibility of
+ * the listener to decide if it's called for the appropriate snapshot or not.
+ *
+ * @param listener restore completion listener
+ */
+ public void addListener(RestoreCompletionListener listener) {
+ this.listeners.add(listener);
+ }
+
+ /**
+ * Removes restore completion listener
+ * <p/>
+ * This listener is called for each snapshot that finishes restore operation in the cluster.
+ *
+ * @param listener restore completion listener
+ */
+ public void removeListener(RestoreCompletionListener listener) {
+ this.listeners.remove(listener);
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ try {
+ if (event.localNodeMaster()) {
+ processDeletedIndices(event);
+ }
+ } catch (Throwable t) {
+ logger.warn("Failed to update restore state ", t);
+ }
+ }
+
+ /**
+ * Checks if a repository is currently in use by one of the snapshots
+ *
+ * @param clusterState cluster state
+ * @param repository repository id
+ * @return true if repository is currently in use by one of the running snapshots
+ */
+ public static boolean isRepositoryInUse(ClusterState clusterState, String repository) {
+ MetaData metaData = clusterState.metaData();
+ RestoreMetaData snapshots = metaData.custom(RestoreMetaData.TYPE);
+ if (snapshots != null) {
+ for (RestoreMetaData.Entry snapshot : snapshots.entries()) {
+ if (repository.equals(snapshot.snapshotId().getRepository())) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Restore snapshot request
+ */
+ public static class RestoreRequest {
+
+ private String cause;
+
+ private String name;
+
+ private String repository;
+
+ private String[] indices;
+
+ private String renamePattern;
+
+ private String renameReplacement;
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ private Settings settings;
+
+ private TimeValue masterNodeTimeout;
+
+ private boolean includeGlobalState = false;
+
+ /**
+ * Constructs new restore request
+ *
+ * @param cause cause for restoring the snapshot
+ * @param repository repository name
+ * @param name snapshot name
+ */
+ public RestoreRequest(String cause, String repository, String name) {
+ this.cause = cause;
+ this.name = name;
+ this.repository = repository;
+ }
+
+ /**
+ * Sets list of indices to restore
+ *
+ * @param indices list of indices
+ * @return this request
+ */
+ public RestoreRequest indices(String[] indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * Sets indices options flags
+ *
+ * @param indicesOptions indices options flags
+ * @return this request
+ */
+ public RestoreRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ /**
+ * If true global cluster state will be restore as part of the restore operation
+ *
+ * @param includeGlobalState restore global state flag
+ * @return this request
+ */
+ public RestoreRequest includeGlobalState(boolean includeGlobalState) {
+ this.includeGlobalState = includeGlobalState;
+ return this;
+ }
+
+ /**
+ * Sets repository-specific restore settings
+ *
+ * @param settings restore settings
+ * @return this request
+ */
+ public RestoreRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ /**
+ * Sets master node timeout
+ * <p/>
+ * This timeout will affect only start of the restore process. Once restore process has started this timeout
+ * has no affect for the duration of restore.
+ *
+ * @param masterNodeTimeout master node timeout
+ * @return this request
+ */
+ public RestoreRequest masterNodeTimeout(TimeValue masterNodeTimeout) {
+ this.masterNodeTimeout = masterNodeTimeout;
+ return this;
+ }
+
+ /**
+ * Sets index rename pattern
+ *
+ * @param renamePattern rename pattern
+ * @return this request
+ */
+ public RestoreRequest renamePattern(String renamePattern) {
+ this.renamePattern = renamePattern;
+ return this;
+ }
+
+ /**
+ * Sets index rename replacement
+ *
+ * @param renameReplacement rename replacement
+ * @return this request
+ */
+ public RestoreRequest renameReplacement(String renameReplacement) {
+ this.renameReplacement = renameReplacement;
+ return this;
+ }
+
+ /**
+ * Returns restore operation cause
+ *
+ * @return restore operation cause
+ */
+ public String cause() {
+ return cause;
+ }
+
+ /**
+ * Returns snapshot name
+ *
+ * @return snapshot name
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * Returns repository name
+ *
+ * @return repository name
+ */
+ public String repository() {
+ return repository;
+ }
+
+ /**
+ * Return the list of indices to be restored
+ *
+ * @return the list of indices
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ /**
+ * Returns indices option flags
+ *
+ * @return indices options flags
+ */
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ /**
+ * Returns rename pattern
+ *
+ * @return rename pattern
+ */
+ public String renamePattern() {
+ return renamePattern;
+ }
+
+ /**
+ * Returns replacement pattern
+ *
+ * @return replacement pattern
+ */
+ public String renameReplacement() {
+ return renameReplacement;
+ }
+
+ /**
+ * Returns repository-specific restore settings
+ *
+ * @return restore settings
+ */
+ public Settings settings() {
+ return settings;
+ }
+
+ /**
+ * Returns true if global state should be restore during this restore operation
+ *
+ * @return restore global state flag
+ */
+ public boolean includeGlobalState() {
+ return includeGlobalState;
+ }
+
+ /**
+ * Return master node timeout
+ *
+ * @return master node timeout
+ */
+ public TimeValue masterNodeTimeout() {
+ return masterNodeTimeout;
+ }
+
+ }
+
+
+ /**
+ * This listener is called as soon as restore operation starts in the cluster.
+ * <p/>
+ * To receive notifications about when operation ends in the cluster use {@link RestoreCompletionListener}
+ */
+ public static interface RestoreSnapshotListener {
+ /**
+ * Called when restore operations successfully starts in the cluster. Not null value of {@code snapshot} parameter
+ * means that restore operation didn't involve any shards and therefore has already completed.
+ *
+ * @param restoreInfo if restore operation finished, contains information about restore operation, null otherwise
+ */
+ void onResponse(RestoreInfo restoreInfo);
+
+ /**
+ * Called when restore operation failed to start
+ *
+ * @param t exception that prevented the restore operation to start
+ */
+ void onFailure(Throwable t);
+ }
+
+ /**
+ * This listener is called every time a snapshot is restored in the cluster
+ */
+ public static interface RestoreCompletionListener {
+ /**
+ * Called for every snapshot that is completed in the cluster
+ *
+ * @param snapshotId snapshot id
+ * @param restoreInfo restore completion information
+ */
+ void onRestoreCompletion(SnapshotId snapshotId, RestoreInfo restoreInfo);
+ }
+
+ /**
+ * Internal class that is used to send notifications about finished shard restore operations to master node
+ */
+ private static class UpdateIndexShardRestoreStatusRequest extends TransportRequest {
+ private SnapshotId snapshotId;
+ private ShardId shardId;
+ private ShardRestoreStatus status;
+
+ private UpdateIndexShardRestoreStatusRequest() {
+
+ }
+
+ private UpdateIndexShardRestoreStatusRequest(SnapshotId snapshotId, ShardId shardId, ShardRestoreStatus status) {
+ this.snapshotId = snapshotId;
+ this.shardId = shardId;
+ this.status = status;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ snapshotId = SnapshotId.readSnapshotId(in);
+ shardId = ShardId.readShardId(in);
+ status = ShardRestoreStatus.readShardRestoreStatus(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ snapshotId.writeTo(out);
+ shardId.writeTo(out);
+ status.writeTo(out);
+ }
+
+ public SnapshotId snapshotId() {
+ return snapshotId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ public ShardRestoreStatus status() {
+ return status;
+ }
+ }
+
+ /**
+ * Internal class that is used to send notifications about finished shard restore operations to master node
+ */
+ private class UpdateRestoreStateRequestHandler extends BaseTransportRequestHandler<UpdateIndexShardRestoreStatusRequest> {
+
+ static final String ACTION = "cluster/snapshot/update_restore";
+
+ @Override
+ public UpdateIndexShardRestoreStatusRequest newInstance() {
+ return new UpdateIndexShardRestoreStatusRequest();
+ }
+
+ @Override
+ public void messageReceived(UpdateIndexShardRestoreStatusRequest request, final TransportChannel channel) throws Exception {
+ innerUpdateRestoreState(request);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/Snapshot.java b/src/main/java/org/elasticsearch/snapshots/Snapshot.java
new file mode 100644
index 0000000..80d6571
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/Snapshot.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.Version;
+
+/**
+ * Represent information about snapshot
+ */
+public interface Snapshot extends Comparable<Snapshot> {
+ /**
+ * Returns snapshot name
+ *
+ * @return snapshot name
+ */
+ String name();
+
+ /**
+ * Returns current snapshot state
+ *
+ * @return snapshot state
+ */
+ SnapshotState state();
+
+ /**
+ * Returns reason for complete snapshot failure
+ *
+ * @return snapshot failure reason
+ */
+ String reason();
+
+ /**
+ * Returns version of Elasticsearch that was used to create this snapshot
+ *
+ * @return Elasticsearch version
+ */
+ Version version();
+
+ /**
+ * Returns indices that were included into this snapshot
+ *
+ * @return list of indices
+ */
+ ImmutableList<String> indices();
+
+ /**
+ * Returns time when snapshot started
+ *
+ * @return snapshot start time
+ */
+ long startTime();
+
+ /**
+ * Returns time when snapshot ended
+ * <p/>
+ * Can be 0L if snapshot is still running
+ *
+ * @return snapshot end time
+ */
+ long endTime();
+
+ /**
+ * Returns total number of shards that were snapshotted
+ *
+ * @return number of shards
+ */
+ int totalShard();
+
+ /**
+ * Returns total number of shards that were successfully snapshotted
+ *
+ * @return number of successful shards
+ */
+ int successfulShards();
+
+ /**
+ * Returns shard failures
+ *
+ * @return shard failures
+ */
+ ImmutableList<SnapshotShardFailure> shardFailures();
+
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotCreationException.java b/src/main/java/org/elasticsearch/snapshots/SnapshotCreationException.java
new file mode 100644
index 0000000..a4d8654
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/SnapshotCreationException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+
+/**
+ * Thrown when snapshot creation fails completely
+ */
+public class SnapshotCreationException extends SnapshotException implements ElasticsearchWrapperException {
+
+ public SnapshotCreationException(SnapshotId snapshot, String message) {
+ super(snapshot, message);
+ }
+
+ public SnapshotCreationException(SnapshotId snapshot, Throwable cause) {
+ super(snapshot, "failed to create snapshot", cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotException.java b/src/main/java/org/elasticsearch/snapshots/SnapshotException.java
new file mode 100644
index 0000000..7a47d90
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/SnapshotException.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+
+/**
+ * Generic snapshot exception
+ */
+public class SnapshotException extends ElasticsearchException {
+ private final SnapshotId snapshot;
+
+ public SnapshotException(SnapshotId snapshot, String msg) {
+ this(snapshot, msg, null);
+ }
+
+ public SnapshotException(SnapshotId snapshot, String msg, Throwable cause) {
+ super("[" + (snapshot == null ? "_na" : snapshot) + "] " + msg, cause);
+ this.snapshot = snapshot;
+ }
+
+ public SnapshotId snapshot() {
+ return snapshot;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
new file mode 100644
index 0000000..768a015
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+
+/**
+ * Information about snapshot
+ */
+public class SnapshotInfo implements ToXContent, Streamable {
+
+ private static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
+
+ private String name;
+
+ private SnapshotState state;
+
+ private String reason;
+
+ private ImmutableList<String> indices;
+
+ private long startTime;
+
+ private long endTime;
+
+ private int totalShards;
+
+ private int successfulShards;
+
+ private ImmutableList<SnapshotShardFailure> shardFailures;
+
+ SnapshotInfo() {
+
+ }
+
+ /**
+ * Creates a new snapshot information from a {@link Snapshot}
+ *
+ * @param snapshot snapshot information returned by repository
+ */
+ public SnapshotInfo(Snapshot snapshot) {
+ name = snapshot.name();
+ state = snapshot.state();
+ reason = snapshot.reason();
+ indices = snapshot.indices();
+ startTime = snapshot.startTime();
+ endTime = snapshot.endTime();
+ totalShards = snapshot.totalShard();
+ successfulShards = snapshot.successfulShards();
+ shardFailures = snapshot.shardFailures();
+ }
+
+ /**
+ * Returns snapshot name
+ *
+ * @return snapshot name
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * Returns snapshot state
+ *
+ * @return snapshot state
+ */
+ public SnapshotState state() {
+ return state;
+ }
+
+ /**
+ * Returns snapshot failure reason
+ *
+ * @return snapshot failure reason
+ */
+ public String reason() {
+ return reason;
+ }
+
+ /**
+ * Returns indices that were included into this snapshot
+ *
+ * @return list of indices
+ */
+ public ImmutableList<String> indices() {
+ return indices;
+ }
+
+ /**
+ * Returns time when snapshot started
+ *
+ * @return snapshot start time
+ */
+ public long startTime() {
+ return startTime;
+ }
+
+ /**
+ * Returns time when snapshot ended
+ * <p/>
+ * Can be 0L if snapshot is still running
+ *
+ * @return snapshot end time
+ */
+ public long endTime() {
+ return endTime;
+ }
+
+ /**
+ * Returns total number of shards that were snapshotted
+ *
+ * @return number of shards
+ */
+ public int totalShards() {
+ return totalShards;
+ }
+
+ /**
+ * Number of failed shards
+ *
+ * @return number of failed shards
+ */
+ public int failedShards() {
+ return totalShards - successfulShards;
+ }
+
+ /**
+ * Returns total number of shards that were successfully snapshotted
+ *
+ * @return number of successful shards
+ */
+ public int successfulShards() {
+ return successfulShards;
+ }
+
+ /**
+ * Returns shard failures
+ *
+ * @return shard failures
+ */
+ public ImmutableList<SnapshotShardFailure> shardFailures() {
+ return shardFailures;
+ }
+
+ /**
+ * Returns snapshot REST status
+ */
+ public RestStatus status() {
+ if (state == SnapshotState.FAILED) {
+ return RestStatus.INTERNAL_SERVER_ERROR;
+ }
+ if (shardFailures.size() == 0) {
+ return RestStatus.OK;
+ }
+ RestStatus status = RestStatus.OK;
+ if (successfulShards == 0 && totalShards > 0) {
+ for (SnapshotShardFailure shardFailure : shardFailures)
+ if (shardFailure.status().getStatus() > status().getStatus()) {
+ status = shardFailure.status();
+ }
+ return status;
+ }
+ return status;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString INDICES = new XContentBuilderString("indices");
+ static final XContentBuilderString STATE = new XContentBuilderString("state");
+ static final XContentBuilderString REASON = new XContentBuilderString("reason");
+ static final XContentBuilderString START_TIME = new XContentBuilderString("start_time");
+ static final XContentBuilderString START_TIME_IN_MILLIS = new XContentBuilderString("start_time_in_millis");
+ static final XContentBuilderString END_TIME = new XContentBuilderString("end_time");
+ static final XContentBuilderString END_TIME_IN_MILLIS = new XContentBuilderString("end_time_in_millis");
+ static final XContentBuilderString DURATION = new XContentBuilderString("duration");
+ static final XContentBuilderString DURATION_IN_MILLIS = new XContentBuilderString("duration_in_millis");
+ static final XContentBuilderString FAILURES = new XContentBuilderString("failures");
+ static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
+ static final XContentBuilderString TOTAL = new XContentBuilderString("total");
+ static final XContentBuilderString FAILED = new XContentBuilderString("failed");
+ static final XContentBuilderString SUCCESSFUL = new XContentBuilderString("successful");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field("snapshot", name);
+ builder.startArray(Fields.INDICES);
+ for (String index : indices) {
+ builder.value(index);
+ }
+ builder.endArray();
+ builder.field(Fields.STATE, state);
+ if (reason != null) {
+ builder.field(Fields.REASON, reason);
+ }
+ if (startTime != 0) {
+ builder.field(Fields.START_TIME, DATE_TIME_FORMATTER.printer().print(startTime));
+ builder.field(Fields.START_TIME_IN_MILLIS, startTime);
+ }
+ if (endTime != 0) {
+ builder.field(Fields.END_TIME, DATE_TIME_FORMATTER.printer().print(endTime));
+ builder.field(Fields.END_TIME_IN_MILLIS, endTime);
+ builder.timeValueField(Fields.DURATION_IN_MILLIS, Fields.DURATION, endTime - startTime);
+ }
+ builder.startArray(Fields.FAILURES);
+ for (SnapshotShardFailure shardFailure : shardFailures) {
+ SnapshotShardFailure.toXContent(shardFailure, builder, params);
+ }
+ builder.endArray();
+ builder.startObject(Fields.SHARDS);
+ builder.field(Fields.TOTAL, totalShards);
+ builder.field(Fields.FAILED, failedShards());
+ builder.field(Fields.SUCCESSFUL, successfulShards);
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ int size = in.readVInt();
+ ImmutableList.Builder<String> indicesListBuilder = ImmutableList.builder();
+ for (int i = 0; i < size; i++) {
+ indicesListBuilder.add(in.readString());
+ }
+ indices = indicesListBuilder.build();
+ state = SnapshotState.fromValue(in.readByte());
+ reason = in.readOptionalString();
+ startTime = in.readVLong();
+ endTime = in.readVLong();
+ totalShards = in.readVInt();
+ successfulShards = in.readVInt();
+ size = in.readVInt();
+ if (size > 0) {
+ ImmutableList.Builder<SnapshotShardFailure> failureBuilder = ImmutableList.builder();
+ for (int i = 0; i < size; i++) {
+ failureBuilder.add(SnapshotShardFailure.readSnapshotShardFailure(in));
+ }
+ shardFailures = failureBuilder.build();
+ } else {
+ shardFailures = ImmutableList.of();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeVInt(indices.size());
+ for (String index : indices) {
+ out.writeString(index);
+ }
+ out.writeByte(state.value());
+ out.writeOptionalString(reason);
+ out.writeVLong(startTime);
+ out.writeVLong(endTime);
+ out.writeVInt(totalShards);
+ out.writeVInt(successfulShards);
+ out.writeVInt(shardFailures.size());
+ for (SnapshotShardFailure failure : shardFailures) {
+ failure.writeTo(out);
+ }
+ }
+
+ /**
+ * Reads snapshot information from stream input
+ *
+ * @param in stream input
+ * @return deserialized snapshot info
+ * @throws IOException
+ */
+ public static SnapshotInfo readSnapshotInfo(StreamInput in) throws IOException {
+ SnapshotInfo snapshotInfo = new SnapshotInfo();
+ snapshotInfo.readFrom(in);
+ return snapshotInfo;
+ }
+
+ /**
+ * Reads optional snapshot information from stream input
+ *
+ * @param in stream input
+ * @return deserialized snapshot info or null
+ * @throws IOException
+ */
+ public static SnapshotInfo readOptionalSnapshotInfo(StreamInput in) throws IOException {
+ return in.readOptionalStreamable(new SnapshotInfo());
+ }
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java b/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java
new file mode 100644
index 0000000..1a8ead0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.rest.RestStatus;
+
+/**
+ * Thrown if requested snapshot doesn't exist
+ */
+public class SnapshotMissingException extends SnapshotException {
+
+
+ public SnapshotMissingException(SnapshotId snapshot) {
+ super(snapshot, "is missing");
+ }
+
+ public SnapshotMissingException(SnapshotId snapshot, Throwable cause) {
+ super(snapshot, "is missing", cause);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.NOT_FOUND;
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotRestoreException.java b/src/main/java/org/elasticsearch/snapshots/SnapshotRestoreException.java
new file mode 100644
index 0000000..0b9280e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/SnapshotRestoreException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.cluster.metadata.SnapshotId;
+
+/**
+ * Snapshot restore exception
+ */
+public class SnapshotRestoreException extends SnapshotException {
+ public SnapshotRestoreException(SnapshotId snapshot, String message) {
+ super(snapshot, message);
+ }
+
+ public SnapshotRestoreException(SnapshotId snapshot, String message, Throwable cause) {
+ super(snapshot, message, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java b/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java
new file mode 100644
index 0000000..83c9032
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.rest.RestStatus;
+
+import java.io.IOException;
+
+/**
+ * Stores information about failures that occurred during shard snapshotting process
+ */
+public class SnapshotShardFailure implements ShardOperationFailedException {
+ private String index;
+
+ private int shardId;
+
+ private String reason;
+
+ @Nullable
+ private String nodeId;
+
+ private RestStatus status;
+
+ private SnapshotShardFailure() {
+
+ }
+
+ /**
+ * Constructs new snapshot shard failure object
+ *
+ * @param nodeId node where failure occurred
+ * @param index index which the shard belongs to
+ * @param shardId shard id
+ * @param reason failure reason
+ */
+ public SnapshotShardFailure(@Nullable String nodeId, String index, int shardId, String reason) {
+ this.nodeId = nodeId;
+ this.index = index;
+ this.shardId = shardId;
+ this.reason = reason;
+ status = RestStatus.INTERNAL_SERVER_ERROR;
+ }
+
+ /**
+ * Returns index where failure occurred
+ *
+ * @return index
+ */
+ @Override
+ public String index() {
+ return this.index;
+ }
+
+ /**
+ * Returns shard id where failure occurred
+ *
+ * @return shard id
+ */
+ @Override
+ public int shardId() {
+ return this.shardId;
+ }
+
+ /**
+ * Returns reason for the failure
+ *
+ * @return reason for the failure
+ */
+ @Override
+ public String reason() {
+ return this.reason;
+ }
+
+ /**
+ * Returns REST status corresponding to this failure
+ *
+ * @return REST status
+ */
+ @Override
+ public RestStatus status() {
+ return status;
+ }
+
+ /**
+ * Returns node id where failure occurred
+ *
+ * @return node id
+ */
+ @Nullable
+ public String nodeId() {
+ return nodeId;
+ }
+
+ /**
+ * Reads shard failure information from stream input
+ *
+ * @param in stream input
+ * @return shard failure information
+ * @throws IOException
+ */
+ public static SnapshotShardFailure readSnapshotShardFailure(StreamInput in) throws IOException {
+ SnapshotShardFailure exp = new SnapshotShardFailure();
+ exp.readFrom(in);
+ return exp;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ nodeId = in.readOptionalString();
+ index = in.readString();
+ shardId = in.readVInt();
+ reason = in.readString();
+ status = RestStatus.readFrom(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeOptionalString(nodeId);
+ out.writeString(index);
+ out.writeVInt(shardId);
+ out.writeString(reason);
+ RestStatus.writeTo(out, status);
+ }
+
+ @Override
+ public String toString() {
+ return "[" + index + "][" + shardId + "] failed, reason [" + reason + "]";
+ }
+
+ /**
+ * Serializes snapshot failure information into JSON
+ *
+ * @param snapshotShardFailure snapshot failure information
+ * @param builder XContent builder
+ * @param params additional parameters
+ * @throws IOException
+ */
+ public static void toXContent(SnapshotShardFailure snapshotShardFailure, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ if (snapshotShardFailure.nodeId != null) {
+ builder.field("node_id", snapshotShardFailure.nodeId);
+ }
+ builder.field("index", snapshotShardFailure.index);
+ builder.field("reason", snapshotShardFailure.reason);
+ builder.field("shard_id", snapshotShardFailure.shardId);
+ builder.field("status", snapshotShardFailure.status.name());
+ builder.endObject();
+ }
+
+ /**
+ * Deserializes snapshot failure information from JSON
+ *
+ * @param parser JSON parser
+ * @return snapshot failure information
+ * @throws IOException
+ */
+ public static SnapshotShardFailure fromXContent(XContentParser parser) throws IOException {
+ SnapshotShardFailure snapshotShardFailure = new SnapshotShardFailure();
+
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String currentFieldName = parser.currentName();
+ token = parser.nextToken();
+ if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ snapshotShardFailure.index = parser.text();
+ } else if ("node_id".equals(currentFieldName)) {
+ snapshotShardFailure.nodeId = parser.text();
+ } else if ("reason".equals(currentFieldName)) {
+ snapshotShardFailure.reason = parser.text();
+ } else if ("shard_id".equals(currentFieldName)) {
+ snapshotShardFailure.shardId = parser.intValue();
+ } else if ("status".equals(currentFieldName)) {
+ snapshotShardFailure.status = RestStatus.valueOf(parser.text());
+ } else {
+ throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "]");
+ }
+ }
+ } else {
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ }
+ }
+ } else {
+ throw new ElasticsearchParseException("unexpected token [" + token + "]");
+ }
+ return snapshotShardFailure;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotState.java b/src/main/java/org/elasticsearch/snapshots/SnapshotState.java
new file mode 100644
index 0000000..88a2a98
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/SnapshotState.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+
+/**
+ * Represents the state that a snapshot can be in
+ */
+public enum SnapshotState {
+ /**
+ * Snapshot process has started
+ */
+ IN_PROGRESS((byte) 0),
+ /**
+ * Snapshot process completed successfully
+ */
+ SUCCESS((byte) 1),
+ /**
+ * Snapshot failed
+ */
+ FAILED((byte) 2);
+
+ private byte value;
+
+ private SnapshotState(byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns code that represents the snapshot state
+ *
+ * @return code for the state
+ */
+ public byte value() {
+ return value;
+ }
+
+ /**
+ * Returns true if snapshot completed (successfully or not)
+ *
+ * @return true if snapshot completed, false otherwise
+ */
+ public boolean completed() {
+ return this == SUCCESS || this == FAILED;
+ }
+
+ /**
+ * Generate snapshot state from code
+ *
+ * @param value the state code
+ * @return state
+ */
+ public static SnapshotState fromValue(byte value) {
+ switch (value) {
+ case 0:
+ return IN_PROGRESS;
+ case 1:
+ return SUCCESS;
+ case 2:
+ return FAILED;
+ default:
+ throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]");
+ }
+ }
+}
+
diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java b/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java
new file mode 100644
index 0000000..53080f3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.indices.IndexMissingException;
+
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Snapshot utilities
+ */
+public class SnapshotUtils {
+
+ /**
+ * Filters out list of available indices based on the list of selected indices.
+ *
+ * @param availableIndices list of available indices
+ * @param selectedIndices list of selected indices
+ * @param indicesOptions ignore indices flag
+ * @return filtered out indices
+ */
+ public static ImmutableList<String> filterIndices(ImmutableList<String> availableIndices, String[] selectedIndices, IndicesOptions indicesOptions) {
+ if (selectedIndices == null || selectedIndices.length == 0) {
+ return availableIndices;
+ }
+ Set<String> result = null;
+ for (int i = 0; i < selectedIndices.length; i++) {
+ String indexOrPattern = selectedIndices[i];
+ boolean add = true;
+ if (!indexOrPattern.isEmpty()) {
+ if (availableIndices.contains(indexOrPattern)) {
+ if (result != null) {
+ result.add(indexOrPattern);
+ }
+ continue;
+ }
+ if (indexOrPattern.charAt(0) == '+') {
+ add = true;
+ indexOrPattern = indexOrPattern.substring(1);
+ // if its the first, add empty set
+ if (i == 0) {
+ result = new HashSet<String>();
+ }
+ } else if (indexOrPattern.charAt(0) == '-') {
+ // if its the first, fill it with all the indices...
+ if (i == 0) {
+ result = new HashSet<String>(availableIndices);
+ }
+ add = false;
+ indexOrPattern = indexOrPattern.substring(1);
+ }
+ }
+ if (indexOrPattern.isEmpty() || !Regex.isSimpleMatchPattern(indexOrPattern)) {
+ if (!availableIndices.contains(indexOrPattern)) {
+ if (!indicesOptions.ignoreUnavailable()) {
+ throw new IndexMissingException(new Index(indexOrPattern));
+ } else {
+ if (result == null) {
+ // add all the previous ones...
+ result = new HashSet<String>();
+ result.addAll(availableIndices.subList(0, i));
+ }
+ }
+ } else {
+ if (result != null) {
+ if (add) {
+ result.add(indexOrPattern);
+ } else {
+ result.remove(indexOrPattern);
+ }
+ }
+ }
+ continue;
+ }
+ if (result == null) {
+ // add all the previous ones...
+ result = new HashSet<String>();
+ result.addAll(availableIndices.subList(0, i));
+ }
+ boolean found = false;
+ for (String index : availableIndices) {
+ if (Regex.simpleMatch(indexOrPattern, index)) {
+ found = true;
+ if (add) {
+ result.add(index);
+ } else {
+ result.remove(index);
+ }
+ }
+ }
+ if (!found && !indicesOptions.allowNoIndices()) {
+ throw new IndexMissingException(new Index(indexOrPattern));
+ }
+ }
+ if (result == null) {
+ return ImmutableList.copyOf(selectedIndices);
+ }
+ return ImmutableList.copyOf(result);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
new file mode 100644
index 0000000..641d5c2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
@@ -0,0 +1,1280 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.metadata.*;
+import org.elasticsearch.cluster.metadata.SnapshotMetaData.ShardSnapshotStatus;
+import org.elasticsearch.cluster.metadata.SnapshotMetaData.State;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.snapshots.IndexShardSnapshotAndRestoreService;
+import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.repositories.Repository;
+import org.elasticsearch.repositories.RepositoryMissingException;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+import static com.google.common.collect.Maps.newHashMapWithExpectedSize;
+import static com.google.common.collect.Sets.newHashSet;
+
+/**
+ * Service responsible for creating snapshots
+ * <p/>
+ * A typical snapshot creating process looks like this:
+ * <ul>
+ * <li>On the master node the {@link #createSnapshot(SnapshotRequest, CreateSnapshotListener)} is called and makes sure that no snapshots is currently running
+ * and registers the new snapshot in cluster state</li>
+ * <li>When cluster state is updated the {@link #beginSnapshot(ClusterState, SnapshotMetaData.Entry, boolean, CreateSnapshotListener)} method
+ * kicks in and initializes the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state</li>
+ * <li>Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes
+ * start processing them through {@link #processIndexShardSnapshots(SnapshotMetaData)} method</li>
+ * <li>Once shard snapshot is created data node updates state of the shard in the cluster state using the {@link #updateIndexShardSnapshotStatus(UpdateIndexShardSnapshotStatusRequest)} method</li>
+ * <li>When last shard is completed master node in {@link #innerUpdateSnapshotState} method marks the snapshot as completed</li>
+ * <li>After cluster state is updated, the {@link #endSnapshot(SnapshotMetaData.Entry)} finalizes snapshot in the repository,
+ * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls {@link #removeSnapshotFromClusterState(SnapshotId, SnapshotInfo, Throwable)} to remove snapshot from cluster state</li>
+ * </ul>
+ */
+public class SnapshotsService extends AbstractComponent implements ClusterStateListener {
+
+ private final ClusterService clusterService;
+
+ private final RepositoriesService repositoriesService;
+
+ private final ThreadPool threadPool;
+
+ private final IndicesService indicesService;
+
+ private final TransportService transportService;
+
+ private volatile ImmutableMap<SnapshotId, SnapshotShards> shardSnapshots = ImmutableMap.of();
+
+ private final CopyOnWriteArrayList<SnapshotCompletionListener> snapshotCompletionListeners = new CopyOnWriteArrayList<SnapshotCompletionListener>();
+
+
+ @Inject
+ public SnapshotsService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, ThreadPool threadPool,
+ IndicesService indicesService, TransportService transportService) {
+ super(settings);
+ this.clusterService = clusterService;
+ this.repositoriesService = repositoriesService;
+ this.threadPool = threadPool;
+ this.indicesService = indicesService;
+ this.transportService = transportService;
+
+ transportService.registerHandler(UpdateSnapshotStateRequestHandler.ACTION, new UpdateSnapshotStateRequestHandler());
+
+ // addLast to make sure that Repository will be created before snapshot
+ clusterService.addLast(this);
+ }
+
+ /**
+ * Retrieves snapshot from repository
+ *
+ * @param snapshotId snapshot id
+ * @return snapshot
+ * @throws SnapshotMissingException if snapshot is not found
+ */
+ public Snapshot snapshot(SnapshotId snapshotId) {
+ return repositoriesService.repository(snapshotId.getRepository()).readSnapshot(snapshotId);
+ }
+
+ /**
+ * Returns a list of snapshots from repository sorted by snapshot creation date
+ *
+ * @param repositoryName repository name
+ * @return list of snapshots
+ */
+ public ImmutableList<Snapshot> snapshots(String repositoryName) {
+ ArrayList<Snapshot> snapshotList = newArrayList();
+ Repository repository = repositoriesService.repository(repositoryName);
+ ImmutableList<SnapshotId> snapshotIds = repository.snapshots();
+ for (SnapshotId snapshotId : snapshotIds) {
+ snapshotList.add(repository.readSnapshot(snapshotId));
+ }
+ CollectionUtil.timSort(snapshotList);
+ return ImmutableList.copyOf(snapshotList);
+ }
+
+ /**
+ * Initializes the snapshotting process.
+ * <p/>
+ * This method is used by clients to start snapshot. It makes sure that there is no snapshots are currently running and
+ * creates a snapshot record in cluster state metadata.
+ *
+ * @param request snapshot request
+ * @param listener snapshot creation listener
+ */
+ public void createSnapshot(final SnapshotRequest request, final CreateSnapshotListener listener) {
+ final SnapshotId snapshotId = new SnapshotId(request.repository(), request.name());
+ clusterService.submitStateUpdateTask(request.cause(), new TimeoutClusterStateUpdateTask() {
+
+ private SnapshotMetaData.Entry newSnapshot = null;
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ validate(request, currentState);
+
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
+ if (snapshots == null || snapshots.entries().isEmpty()) {
+ // Store newSnapshot here to be processed in clusterStateProcessed
+ ImmutableList<String> indices = ImmutableList.copyOf(metaData.concreteIndices(request.indices(), request.indicesOptions()));
+ logger.trace("[{}][{}] creating snapshot for indices [{}]", request.repository(), request.name(), indices);
+ newSnapshot = new SnapshotMetaData.Entry(snapshotId, request.includeGlobalState(), State.INIT, indices, null);
+ snapshots = new SnapshotMetaData(newSnapshot);
+ } else {
+ // TODO: What should we do if a snapshot is already running?
+ throw new ConcurrentSnapshotExecutionException(snapshotId, "a snapshot is already running");
+ }
+ mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("[{}][{}] failed to create snapshot", t, request.repository(), request.name());
+ newSnapshot = null;
+ listener.onFailure(t);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) {
+ if (newSnapshot != null) {
+ threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
+ @Override
+ public void run() {
+ beginSnapshot(newState, newSnapshot, request.partial, listener);
+ }
+ });
+ }
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return request.masterNodeTimeout();
+ }
+
+ });
+ }
+
+ /**
+ * Validates snapshot request
+ *
+ * @param request snapshot request
+ * @param state current cluster state
+ * @throws org.elasticsearch.ElasticsearchException
+ */
+ private void validate(SnapshotRequest request, ClusterState state) throws ElasticsearchException {
+ RepositoriesMetaData repositoriesMetaData = state.getMetaData().custom(RepositoriesMetaData.TYPE);
+ if (repositoriesMetaData == null || repositoriesMetaData.repository(request.repository()) == null) {
+ throw new RepositoryMissingException(request.repository());
+ }
+ if (!Strings.hasLength(request.name())) {
+ throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "cannot be empty");
+ }
+ if (request.name().contains(" ")) {
+ throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not contain whitespace");
+ }
+ if (request.name().contains(",")) {
+ throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not contain ','");
+ }
+ if (request.name().contains("#")) {
+ throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not contain '#'");
+ }
+ if (request.name().charAt(0) == '_') {
+ throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not start with '_'");
+ }
+ if (!request.name().toLowerCase(Locale.ROOT).equals(request.name())) {
+ throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must be lowercase");
+ }
+ if (!Strings.validFileName(request.name())) {
+ throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
+ }
+ }
+
+ /**
+ * Starts snapshot.
+ * <p/>
+ * Creates snapshot in repository and updates snapshot metadata record with list of shards that needs to be processed.
+ *
+ * @param clusterState cluster state
+ * @param snapshot snapshot meta data
+ * @param partial allow partial snapshots
+ * @param userCreateSnapshotListener listener
+ */
+ private void beginSnapshot(ClusterState clusterState, final SnapshotMetaData.Entry snapshot, final boolean partial, final CreateSnapshotListener userCreateSnapshotListener) {
+ boolean snapshotCreated = false;
+ try {
+ Repository repository = repositoriesService.repository(snapshot.snapshotId().getRepository());
+
+ MetaData metaData = clusterState.metaData();
+ if (!snapshot.includeGlobalState()) {
+ // Remove global state from the cluster state
+ MetaData.Builder builder = MetaData.builder();
+ for (String index : snapshot.indices()) {
+ builder.put(metaData.index(index), false);
+ }
+ metaData = builder.build();
+ }
+
+ repository.initializeSnapshot(snapshot.snapshotId(), snapshot.indices(), metaData);
+ snapshotCreated = true;
+ if (snapshot.indices().isEmpty()) {
+ // No indices in this snapshot - we are done
+ userCreateSnapshotListener.onResponse();
+ endSnapshot(snapshot);
+ return;
+ }
+ clusterService.submitStateUpdateTask("update_snapshot [" + snapshot + "]", new ProcessedClusterStateUpdateTask() {
+ boolean accepted = false;
+ SnapshotMetaData.Entry updatedSnapshot;
+ String failure = null;
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
+ ImmutableList.Builder<SnapshotMetaData.Entry> entries = ImmutableList.builder();
+ for (SnapshotMetaData.Entry entry : snapshots.entries()) {
+ if (entry.snapshotId().equals(snapshot.snapshotId())) {
+ // Replace the snapshot that was just created
+ ImmutableMap<ShardId, SnapshotMetaData.ShardSnapshotStatus> shards = shards(snapshot.snapshotId(), currentState, snapshot.indices());
+ if (!partial) {
+ Set<String> indicesWithMissingShards = indicesWithMissingShards(shards);
+ if (indicesWithMissingShards != null) {
+ updatedSnapshot = new SnapshotMetaData.Entry(snapshot.snapshotId(), snapshot.includeGlobalState(), State.FAILED, snapshot.indices(), shards);
+ entries.add(updatedSnapshot);
+ failure = "Indices don't have primary shards +[" + indicesWithMissingShards + "]";
+ continue;
+ }
+ }
+ updatedSnapshot = new SnapshotMetaData.Entry(snapshot.snapshotId(), snapshot.includeGlobalState(), State.STARTED, snapshot.indices(), shards);
+ entries.add(updatedSnapshot);
+ if (!completed(shards.values())) {
+ accepted = true;
+ }
+ } else {
+ entries.add(entry);
+ }
+ }
+ mdBuilder.putCustom(SnapshotMetaData.TYPE, new SnapshotMetaData(entries.build()));
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("[{}] failed to create snapshot", t, snapshot.snapshotId());
+ userCreateSnapshotListener.onFailure(t);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ // The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted
+ // for processing. If client wants to wait for the snapshot completion, it can register snapshot
+ // completion listener in this method. For the snapshot completion to work properly, the snapshot
+ // should still exist when listener is registered.
+ userCreateSnapshotListener.onResponse();
+
+ // Now that snapshot completion listener is registered we can end the snapshot if needed
+ // We should end snapshot only if 1) we didn't accept it for processing (which happens when there
+ // is nothing to do) and 2) there was a snapshot in metadata that we should end. Otherwise we should
+ // go ahead and continue working on this snapshot rather then end here.
+ if (!accepted && updatedSnapshot != null) {
+ endSnapshot(updatedSnapshot, failure);
+ }
+ }
+ });
+ } catch (Throwable t) {
+ logger.warn("failed to create snapshot [{}]", t, snapshot.snapshotId());
+ clusterService.submitStateUpdateTask("fail_snapshot [" + snapshot.snapshotId() + "]", new ClusterStateUpdateTask() {
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
+ ImmutableList.Builder<SnapshotMetaData.Entry> entries = ImmutableList.builder();
+ for (SnapshotMetaData.Entry entry : snapshots.entries()) {
+ if (!entry.snapshotId().equals(snapshot.snapshotId())) {
+ entries.add(entry);
+ }
+ }
+ mdBuilder.putCustom(SnapshotMetaData.TYPE, new SnapshotMetaData(entries.build()));
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("[{}] failed to delete snapshot", t, snapshot.snapshotId());
+ }
+ });
+ if (snapshotCreated) {
+ try {
+ repositoriesService.repository(snapshot.snapshotId().getRepository()).finalizeSnapshot(snapshot.snapshotId(), ExceptionsHelper.detailedMessage(t), 0, ImmutableList.<SnapshotShardFailure>of());
+ } catch (Throwable t2) {
+ logger.warn("[{}] failed to close snapshot in repository", snapshot.snapshotId());
+ }
+ }
+ userCreateSnapshotListener.onFailure(t);
+ }
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ try {
+ if (event.localNodeMaster()) {
+ if (event.nodesRemoved()) {
+ processSnapshotsOnRemovedNodes(event);
+ }
+ }
+ SnapshotMetaData prev = event.previousState().metaData().custom(SnapshotMetaData.TYPE);
+ SnapshotMetaData curr = event.state().metaData().custom(SnapshotMetaData.TYPE);
+
+ if (prev == null) {
+ if (curr != null) {
+ processIndexShardSnapshots(curr);
+ }
+ } else {
+ if (!prev.equals(curr)) {
+ processIndexShardSnapshots(curr);
+ }
+ }
+ } catch (Throwable t) {
+ logger.warn("Failed to update snapshot state ", t);
+ }
+ }
+
+ /**
+ * Cleans up shard snapshots that were running on removed nodes
+ *
+ * @param event cluster changed event
+ */
+ private void processSnapshotsOnRemovedNodes(ClusterChangedEvent event) {
+ if (removedNodesCleanupNeeded(event)) {
+ // Check if we just became the master
+ final boolean newMaster = !event.previousState().nodes().localNodeMaster();
+ clusterService.submitStateUpdateTask("update snapshot state after node removal", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ DiscoveryNodes nodes = currentState.nodes();
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
+ if (snapshots == null) {
+ return currentState;
+ }
+ boolean changed = false;
+ ArrayList<SnapshotMetaData.Entry> entries = newArrayList();
+ for (final SnapshotMetaData.Entry snapshot : snapshots.entries()) {
+ SnapshotMetaData.Entry updatedSnapshot = snapshot;
+ boolean snapshotChanged = false;
+ if (snapshot.state() == State.STARTED) {
+ ImmutableMap.Builder<ShardId, ShardSnapshotStatus> shards = ImmutableMap.builder();
+ for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> shardEntry : snapshot.shards().entrySet()) {
+ ShardSnapshotStatus shardStatus = shardEntry.getValue();
+ if (!shardStatus.state().completed() && shardStatus.nodeId() != null) {
+ if (nodes.nodeExists(shardStatus.nodeId())) {
+ shards.put(shardEntry);
+ } else {
+ // TODO: Restart snapshot on another node?
+ snapshotChanged = true;
+ logger.warn("failing snapshot of shard [{}] on closed node [{}]", shardEntry.getKey(), shardStatus.nodeId());
+ shards.put(shardEntry.getKey(), new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown"));
+ }
+ }
+ }
+ if (snapshotChanged) {
+ changed = true;
+ ImmutableMap<ShardId, ShardSnapshotStatus> shardsMap = shards.build();
+ if (!snapshot.state().completed() && completed(shardsMap.values())) {
+ updatedSnapshot = new SnapshotMetaData.Entry(snapshot.snapshotId(), snapshot.includeGlobalState(), State.SUCCESS, snapshot.indices(), shardsMap);
+ endSnapshot(updatedSnapshot);
+ } else {
+ updatedSnapshot = new SnapshotMetaData.Entry(snapshot.snapshotId(), snapshot.includeGlobalState(), snapshot.state(), snapshot.indices(), shardsMap);
+ }
+ }
+ entries.add(updatedSnapshot);
+ } else if (snapshot.state() == State.INIT && newMaster) {
+ // Clean up the snapshot that failed to start from the old master
+ deleteSnapshot(snapshot.snapshotId(), new DeleteSnapshotListener() {
+ @Override
+ public void onResponse() {
+ logger.debug("cleaned up abandoned snapshot {} in INIT state", snapshot.snapshotId());
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.warn("failed to clean up abandoned snapshot {} in INIT state", snapshot.snapshotId());
+ }
+ });
+ } else if (snapshot.state() == State.SUCCESS && newMaster) {
+ // Finalize the snapshot
+ endSnapshot(snapshot);
+ }
+ }
+ if (changed) {
+ snapshots = new SnapshotMetaData(entries.toArray(new SnapshotMetaData.Entry[entries.size()]));
+ mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("failed to update snapshot state after node removal");
+ }
+ });
+ }
+ }
+
+ private boolean removedNodesCleanupNeeded(ClusterChangedEvent event) {
+ // Check if we just became the master
+ boolean newMaster = !event.previousState().nodes().localNodeMaster();
+ SnapshotMetaData snapshotMetaData = event.state().getMetaData().custom(SnapshotMetaData.TYPE);
+ if (snapshotMetaData == null) {
+ return false;
+ }
+ for (SnapshotMetaData.Entry snapshot : snapshotMetaData.entries()) {
+ if (newMaster && (snapshot.state() == State.SUCCESS || snapshot.state() == State.INIT)) {
+ // We just replaced old master and snapshots in intermediate states needs to be cleaned
+ return true;
+ }
+ for (DiscoveryNode node : event.nodesDelta().removedNodes()) {
+ for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> shardEntry : snapshot.shards().entrySet()) {
+ ShardSnapshotStatus shardStatus = shardEntry.getValue();
+ if (!shardStatus.state().completed() && node.getId().equals(shardStatus.nodeId())) {
+ // At least one shard was running on the removed node - we need to fail it
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Checks if any new shards should be snapshotted on this node
+ *
+ * @param snapshotMetaData snapshot metadata to be processed
+ */
+ private void processIndexShardSnapshots(SnapshotMetaData snapshotMetaData) {
+ Map<SnapshotId, SnapshotShards> survivors = newHashMap();
+ // First, remove snapshots that are no longer there
+ for (Map.Entry<SnapshotId, SnapshotShards> entry : shardSnapshots.entrySet()) {
+ if (snapshotMetaData != null && snapshotMetaData.snapshot(entry.getKey()) != null) {
+ survivors.put(entry.getKey(), entry.getValue());
+ }
+ }
+
+ // For now we will be mostly dealing with a single snapshot at a time but might have multiple simultaneously running
+ // snapshots in the future
+ HashMap<SnapshotId, SnapshotShards> newSnapshots = null;
+ // Now go through all snapshots and update existing or create missing
+ final String localNodeId = clusterService.localNode().id();
+ for (SnapshotMetaData.Entry entry : snapshotMetaData.entries()) {
+ HashMap<ShardId, IndexShardSnapshotStatus> startedShards = null;
+ for (Map.Entry<ShardId, SnapshotMetaData.ShardSnapshotStatus> shard : entry.shards().entrySet()) {
+ // Check if we have new shards to start processing on
+ if (localNodeId.equals(shard.getValue().nodeId())) {
+ if (entry.state() == State.STARTED) {
+ if (startedShards == null) {
+ startedShards = newHashMap();
+ }
+ startedShards.put(shard.getKey(), new IndexShardSnapshotStatus());
+ } else if (entry.state() == State.ABORTED) {
+ SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshotId());
+ if (snapshotShards != null) {
+ IndexShardSnapshotStatus snapshotStatus = snapshotShards.shards.get(shard.getKey());
+ if (snapshotStatus != null) {
+ snapshotStatus.abort();
+ }
+ }
+ }
+ }
+ }
+ if (startedShards != null) {
+ if (!survivors.containsKey(entry.snapshotId())) {
+ if (newSnapshots == null) {
+ newSnapshots = newHashMapWithExpectedSize(2);
+ }
+ newSnapshots.put(entry.snapshotId(), new SnapshotShards(ImmutableMap.copyOf(startedShards)));
+ }
+ }
+ }
+
+ if (newSnapshots != null) {
+ survivors.putAll(newSnapshots);
+ }
+
+ // Update the list of snapshots that we saw and tried to started
+ // If startup of these shards fails later, we don't want to try starting these shards again
+ shardSnapshots = ImmutableMap.copyOf(survivors);
+
+ // We have new snapshots to process -
+ if (newSnapshots != null) {
+ for (final Map.Entry<SnapshotId, SnapshotShards> entry : newSnapshots.entrySet()) {
+ for (final Map.Entry<ShardId, IndexShardSnapshotStatus> shardEntry : entry.getValue().shards.entrySet()) {
+ try {
+ final IndexShardSnapshotAndRestoreService shardSnapshotService = indicesService.indexServiceSafe(shardEntry.getKey().getIndex()).shardInjectorSafe(shardEntry.getKey().id())
+ .getInstance(IndexShardSnapshotAndRestoreService.class);
+ threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ shardSnapshotService.snapshot(entry.getKey(), shardEntry.getValue());
+ updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(entry.getKey(), shardEntry.getKey(), new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.SUCCESS)));
+ } catch (Throwable t) {
+ logger.warn("[{}] [{}] failed to create snapshot", t, shardEntry.getKey(), entry.getKey());
+ updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(entry.getKey(), shardEntry.getKey(), new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.FAILED, ExceptionsHelper.detailedMessage(t))));
+ }
+ }
+ });
+ } catch (Throwable t) {
+ updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(entry.getKey(), shardEntry.getKey(), new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.FAILED, ExceptionsHelper.detailedMessage(t))));
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Updates the shard status
+ *
+ * @param request update shard status request
+ */
+ private void updateIndexShardSnapshotStatus(UpdateIndexShardSnapshotStatusRequest request) {
+ try {
+ if (clusterService.state().nodes().localNodeMaster()) {
+ innerUpdateSnapshotState(request);
+ } else {
+ transportService.sendRequest(clusterService.state().nodes().masterNode(),
+ UpdateSnapshotStateRequestHandler.ACTION, request, EmptyTransportResponseHandler.INSTANCE_SAME);
+ }
+ } catch (Throwable t) {
+ logger.warn("[{}] [{}] failed to update snapshot state", t, request.snapshotId(), request.status());
+ }
+ }
+
+ /**
+ * Checks if all shards in the list have completed
+ *
+ * @param shards list of shard statuses
+ * @return true if all shards have completed (either successfully or failed), false otherwise
+ */
+ private boolean completed(Collection<SnapshotMetaData.ShardSnapshotStatus> shards) {
+ for (ShardSnapshotStatus status : shards) {
+ if (!status.state().completed()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Returns list of indices with missing shards
+ *
+ * @param shards list of shard statuses
+ * @return list of failed indices
+ */
+ private Set<String> indicesWithMissingShards(ImmutableMap<ShardId, SnapshotMetaData.ShardSnapshotStatus> shards) {
+ Set<String> indices = null;
+ for (ImmutableMap.Entry<ShardId, SnapshotMetaData.ShardSnapshotStatus> entry : shards.entrySet()) {
+ if (entry.getValue().state() == State.MISSING) {
+ if (indices == null) {
+ indices = newHashSet();
+ }
+ indices.add(entry.getKey().getIndex());
+ }
+ }
+ return indices;
+ }
+
+ /**
+ * Updates the shard status on master node
+ *
+ * @param request update shard status request
+ */
+ private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request) {
+ clusterService.submitStateUpdateTask("update snapshot state", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
+ if (snapshots != null) {
+ boolean changed = false;
+ ArrayList<SnapshotMetaData.Entry> entries = newArrayList();
+ for (SnapshotMetaData.Entry entry : snapshots.entries()) {
+ if (entry.snapshotId().equals(request.snapshotId())) {
+ HashMap<ShardId, ShardSnapshotStatus> shards = newHashMap(entry.shards());
+ logger.trace("[{}] Updating shard [{}] with status [{}]", request.snapshotId(), request.shardId(), request.status().state());
+ shards.put(request.shardId(), request.status());
+ if (!completed(shards.values())) {
+ entries.add(new SnapshotMetaData.Entry(entry.snapshotId(), entry.includeGlobalState(), entry.state(), entry.indices(), ImmutableMap.copyOf(shards)));
+ } else {
+ // Snapshot is finished - mark it as done
+ // TODO: Add PARTIAL_SUCCESS status?
+ SnapshotMetaData.Entry updatedEntry = new SnapshotMetaData.Entry(entry.snapshotId(), entry.includeGlobalState(), State.SUCCESS, entry.indices(), ImmutableMap.copyOf(shards));
+ entries.add(updatedEntry);
+ // Finalize snapshot in the repository
+ endSnapshot(updatedEntry);
+ logger.info("snapshot [{}] is done", updatedEntry.snapshotId());
+ }
+ changed = true;
+ } else {
+ entries.add(entry);
+ }
+ }
+ if (changed) {
+ snapshots = new SnapshotMetaData(entries.toArray(new SnapshotMetaData.Entry[entries.size()]));
+ mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("[{}][{}] failed to update snapshot status to [{}]", t, request.snapshotId(), request.shardId(), request.status());
+ }
+ });
+ }
+
+ /**
+ * Finalizes the shard in repository and then removes it from cluster state
+ * <p/>
+ * This is non-blocking method that runs on a thread from SNAPSHOT thread pool
+ *
+ * @param entry snapshot
+ */
+ private void endSnapshot(SnapshotMetaData.Entry entry) {
+ endSnapshot(entry, null);
+ }
+
+
+ /**
+ * Finalizes the shard in repository and then removes it from cluster state
+ * <p/>
+ * This is non-blocking method that runs on a thread from SNAPSHOT thread pool
+ *
+ * @param entry snapshot
+ * @param failure failure reason or null if snapshot was successful
+ */
+ private void endSnapshot(final SnapshotMetaData.Entry entry, final String failure) {
+ threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
+ @Override
+ public void run() {
+ SnapshotId snapshotId = entry.snapshotId();
+ try {
+ final Repository repository = repositoriesService.repository(snapshotId.getRepository());
+ logger.trace("[{}] finalizing snapshot in repository, state: [{}], failure[{}]", snapshotId, entry.state(), failure);
+ ArrayList<ShardSearchFailure> failures = newArrayList();
+ ArrayList<SnapshotShardFailure> shardFailures = newArrayList();
+ for (Map.Entry<ShardId, ShardSnapshotStatus> shardStatus : entry.shards().entrySet()) {
+ ShardId shardId = shardStatus.getKey();
+ ShardSnapshotStatus status = shardStatus.getValue();
+ if (status.state().failed()) {
+ failures.add(new ShardSearchFailure(status.reason(), new SearchShardTarget(status.nodeId(), shardId.getIndex(), shardId.id())));
+ shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId.getIndex(), shardId.id(), status.reason()));
+ }
+ }
+ Snapshot snapshot = repository.finalizeSnapshot(snapshotId, failure, entry.shards().size(), ImmutableList.copyOf(shardFailures));
+ removeSnapshotFromClusterState(snapshotId, new SnapshotInfo(snapshot), null);
+ } catch (Throwable t) {
+ logger.warn("[{}] failed to finalize snapshot", t, snapshotId);
+ removeSnapshotFromClusterState(snapshotId, null, t);
+ }
+ }
+ });
+ }
+
+ /**
+ * Removes record of running snapshot from cluster state
+ *
+ * @param snapshotId snapshot id
+ * @param snapshot snapshot info if snapshot was successful
+ * @param t exception if snapshot failed
+ */
+ private void removeSnapshotFromClusterState(final SnapshotId snapshotId, final SnapshotInfo snapshot, final Throwable t) {
+ clusterService.submitStateUpdateTask("remove snapshot metadata", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
+ if (snapshots != null) {
+ boolean changed = false;
+ ArrayList<SnapshotMetaData.Entry> entries = newArrayList();
+ for (SnapshotMetaData.Entry entry : snapshots.entries()) {
+ if (entry.snapshotId().equals(snapshotId)) {
+ changed = true;
+ } else {
+ entries.add(entry);
+ }
+ }
+ if (changed) {
+ snapshots = new SnapshotMetaData(entries.toArray(new SnapshotMetaData.Entry[entries.size()]));
+ mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("[{}][{}] failed to remove snapshot metadata", t, snapshotId);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ for (SnapshotCompletionListener listener : snapshotCompletionListeners) {
+ try {
+ if (snapshot != null) {
+ listener.onSnapshotCompletion(snapshotId, snapshot);
+ } else {
+ listener.onSnapshotFailure(snapshotId, t);
+ }
+ } catch (Throwable t) {
+ logger.warn("failed to refresh settings for [{}]", t, listener);
+ }
+ }
+
+ }
+ });
+ }
+
+ /**
+ * Deletes snapshot from repository.
+ * <p/>
+ * If the snapshot is still running cancels the snapshot first and then deletes it from the repository.
+ *
+ * @param snapshotId snapshot id
+ * @param listener listener
+ */
+ public void deleteSnapshot(final SnapshotId snapshotId, final DeleteSnapshotListener listener) {
+ clusterService.submitStateUpdateTask("delete snapshot", new ProcessedClusterStateUpdateTask() {
+
+ boolean waitForSnapshot = false;
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ MetaData metaData = currentState.metaData();
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
+ if (snapshots == null) {
+ // No snapshots running - we can continue
+ return currentState;
+ }
+ SnapshotMetaData.Entry snapshot = snapshots.snapshot(snapshotId);
+ if (snapshot == null) {
+ // This snapshot is not running - continue
+ if (!snapshots.entries().isEmpty()) {
+ // However other snapshots are running - cannot continue
+ throw new ConcurrentSnapshotExecutionException(snapshotId, "another snapshot is currently running cannot delete");
+ }
+ return currentState;
+ } else {
+ // This snapshot is currently running - stopping shards first
+ waitForSnapshot = true;
+ ImmutableMap<ShardId, ShardSnapshotStatus> shards;
+ if (snapshot.state() == State.STARTED && snapshot.shards() != null) {
+ // snapshot is currently running - stop started shards
+ ImmutableMap.Builder<ShardId, ShardSnapshotStatus> shardsBuilder = ImmutableMap.builder();
+ for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> shardEntry : snapshot.shards().entrySet()) {
+ ShardSnapshotStatus status = shardEntry.getValue();
+ if (!status.state().completed()) {
+ shardsBuilder.put(shardEntry.getKey(), new ShardSnapshotStatus(status.nodeId(), State.ABORTED));
+ } else {
+ shardsBuilder.put(shardEntry.getKey(), status);
+ }
+ }
+ shards = shardsBuilder.build();
+ } else if (snapshot.state() == State.INIT) {
+ // snapshot hasn't started yet - end it
+ shards = snapshot.shards();
+ endSnapshot(snapshot);
+ } else {
+ // snapshot is being finalized - wait for it
+ logger.trace("trying to delete completed snapshot - save to delete");
+ return currentState;
+ }
+ SnapshotMetaData.Entry newSnapshot = new SnapshotMetaData.Entry(snapshotId, snapshot.includeGlobalState(), State.ABORTED, snapshot.indices(), shards);
+ snapshots = new SnapshotMetaData(newSnapshot);
+ mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ listener.onFailure(t);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ if (waitForSnapshot) {
+ logger.trace("adding snapshot completion listener to wait for deleted snapshot to finish");
+ addListener(new SnapshotCompletionListener() {
+ @Override
+ public void onSnapshotCompletion(SnapshotId snapshotId, SnapshotInfo snapshot) {
+ logger.trace("deleted snapshot completed - deleting files");
+ removeListener(this);
+ deleteSnapshotFromRepository(snapshotId, listener);
+ }
+
+ @Override
+ public void onSnapshotFailure(SnapshotId snapshotId, Throwable t) {
+ logger.trace("deleted snapshot failed - deleting files", t);
+ removeListener(this);
+ deleteSnapshotFromRepository(snapshotId, listener);
+ }
+ });
+ } else {
+ logger.trace("deleted snapshot is not running - deleting files");
+ deleteSnapshotFromRepository(snapshotId, listener);
+ }
+ }
+ });
+ }
+
+ /**
+ * Checks if a repository is currently in use by one of the snapshots
+ *
+ * @param clusterState cluster state
+ * @param repository repository id
+ * @return true if repository is currently in use by one of the running snapshots
+ */
+ public static boolean isRepositoryInUse(ClusterState clusterState, String repository) {
+ MetaData metaData = clusterState.metaData();
+ SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
+ if (snapshots != null) {
+ for (SnapshotMetaData.Entry snapshot : snapshots.entries()) {
+ if (repository.equals(snapshot.snapshotId().getRepository())) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Deletes snapshot from repository
+ *
+ * @param snapshotId snapshot id
+ * @param listener listener
+ */
+ private void deleteSnapshotFromRepository(final SnapshotId snapshotId, final DeleteSnapshotListener listener) {
+ threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Repository repository = repositoriesService.repository(snapshotId.getRepository());
+ repository.deleteSnapshot(snapshotId);
+ listener.onResponse();
+ } catch (Throwable t) {
+ listener.onFailure(t);
+ }
+ }
+ });
+ }
+
+ /**
+ * Calculates the list of shards that should be included into the current snapshot
+ *
+ * @param snapshotId snapshot id
+ * @param clusterState cluster state
+ * @param indices list of indices to be snapshotted
+ * @return list of shard to be included into current snapshot
+ */
+ private ImmutableMap<ShardId, SnapshotMetaData.ShardSnapshotStatus> shards(SnapshotId snapshotId, ClusterState clusterState, ImmutableList<String> indices) {
+ ImmutableMap.Builder<ShardId, SnapshotMetaData.ShardSnapshotStatus> builder = ImmutableMap.builder();
+ MetaData metaData = clusterState.metaData();
+ for (String index : indices) {
+ IndexMetaData indexMetaData = metaData.index(index);
+ IndexRoutingTable indexRoutingTable = clusterState.getRoutingTable().index(index);
+ if (indexRoutingTable == null) {
+ throw new SnapshotCreationException(snapshotId, "Missing routing table for index [" + index + "]");
+ }
+ for (int i = 0; i < indexMetaData.numberOfShards(); i++) {
+ ShardId shardId = new ShardId(index, i);
+ ShardRouting primary = indexRoutingTable.shard(i).primaryShard();
+ if (primary == null || !primary.assignedToNode()) {
+ builder.put(shardId, new SnapshotMetaData.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated"));
+ } else if (!primary.started()) {
+ builder.put(shardId, new SnapshotMetaData.ShardSnapshotStatus(primary.currentNodeId(), State.MISSING, "primary shard hasn't been started yet"));
+ } else {
+ builder.put(shardId, new SnapshotMetaData.ShardSnapshotStatus(primary.currentNodeId()));
+ }
+ }
+ }
+
+ return builder.build();
+ }
+
+ /**
+ * Adds snapshot completion listener
+ *
+ * @param listener listener
+ */
+ public void addListener(SnapshotCompletionListener listener) {
+ this.snapshotCompletionListeners.add(listener);
+ }
+
+ /**
+ * Removes snapshot completion listener
+ *
+ * @param listener listener
+ */
+ public void removeListener(SnapshotCompletionListener listener) {
+ this.snapshotCompletionListeners.remove(listener);
+ }
+
+ /**
+ * Listener for create snapshot operation
+ */
+ public static interface CreateSnapshotListener {
+
+ /**
+ * Called when snapshot has successfully started
+ */
+ void onResponse();
+
+ /**
+ * Called if a snapshot operation couldn't start
+ */
+ void onFailure(Throwable t);
+ }
+
+ /**
+ * Listener for delete snapshot operation
+ */
+ public static interface DeleteSnapshotListener {
+
+ /**
+ * Called if delete operation was successful
+ */
+ void onResponse();
+
+ /**
+ * Called if delete operation failed
+ */
+ void onFailure(Throwable t);
+ }
+
+ public static interface SnapshotCompletionListener {
+
+ void onSnapshotCompletion(SnapshotId snapshotId, SnapshotInfo snapshot);
+
+ void onSnapshotFailure(SnapshotId snapshotId, Throwable t);
+ }
+
+ /**
+ * Snapshot creation request
+ */
+ public static class SnapshotRequest {
+
+ private String cause;
+
+ private String name;
+
+ private String repository;
+
+ private String[] indices;
+
+ private IndicesOptions indicesOptions = IndicesOptions.strict();
+
+ private boolean partial;
+
+ private Settings settings;
+
+ private boolean includeGlobalState;
+
+ private TimeValue masterNodeTimeout;
+
+ /**
+ * Constructs new snapshot creation request
+ *
+ * @param cause cause for snapshot operation
+ * @param name name of the snapshot
+ * @param repository name of the repository
+ */
+ public SnapshotRequest(String cause, String name, String repository) {
+ this.cause = cause;
+ this.name = name;
+ this.repository = repository;
+ }
+
+ /**
+ * Sets the list of indices to be snapshotted
+ *
+ * @param indices list of indices
+ * @return this request
+ */
+ public SnapshotRequest indices(String[] indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * Sets repository-specific snapshot settings
+ *
+ * @param settings snapshot settings
+ * @return this request
+ */
+ public SnapshotRequest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ /**
+ * Set to true if global state should be stored as part of the snapshot
+ *
+ * @param includeGlobalState true if global state should be stored as part of the snapshot
+ * @return this request
+ */
+ public SnapshotRequest includeGlobalState(boolean includeGlobalState) {
+ this.includeGlobalState = includeGlobalState;
+ return this;
+ }
+
+ /**
+ * Sets master node timeout
+ *
+ * @param masterNodeTimeout master node timeout
+ * @return this request
+ */
+ public SnapshotRequest masterNodeTimeout(TimeValue masterNodeTimeout) {
+ this.masterNodeTimeout = masterNodeTimeout;
+ return this;
+ }
+
+ /**
+ * Sets the indices options
+ *
+ * @param indicesOptions indices options
+ * @return this request
+ */
+ public SnapshotRequest indicesOptions(IndicesOptions indicesOptions) {
+ this.indicesOptions = indicesOptions;
+ return this;
+ }
+
+ /**
+ * Set to true if partial snapshot should be allowed
+ *
+ * @param partial true if partial snapshots should be allowed
+ * @return this request
+ */
+ public SnapshotRequest partial(boolean partial) {
+ this.partial = partial;
+ return this;
+ }
+
+ /**
+ * Returns cause for snapshot operation
+ *
+ * @return cause for snapshot operation
+ */
+ public String cause() {
+ return cause;
+ }
+
+ /**
+ * Returns snapshot name
+ *
+ * @return snapshot name
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * Returns snapshot repository
+ *
+ * @return snapshot repository
+ */
+ public String repository() {
+ return repository;
+ }
+
+ /**
+ * Returns the list of indices to be snapshotted
+ *
+ * @return the list of indices
+ */
+ public String[] indices() {
+ return indices;
+ }
+
+ /**
+ * Returns indices options
+ *
+ * @return indices options
+ */
+ public IndicesOptions indicesOptions() {
+ return indicesOptions;
+ }
+
+ /**
+ * Returns repository-specific settings for the snapshot operation
+ *
+ * @return repository-specific settings
+ */
+ public Settings settings() {
+ return settings;
+ }
+
+ /**
+ * Returns true if global state should be stored as part of the snapshot
+ *
+ * @return true if global state should be stored as part of the snapshot
+ */
+ public boolean includeGlobalState() {
+ return includeGlobalState;
+ }
+
+ /**
+ * Returns master node timeout
+ *
+ * @return master node timeout
+ */
+ public TimeValue masterNodeTimeout() {
+ return masterNodeTimeout;
+ }
+
+ }
+
+ /**
+ * Stores the list of shards that has to be snapshotted on this node
+ */
+ private static class SnapshotShards {
+ private final ImmutableMap<ShardId, IndexShardSnapshotStatus> shards;
+
+ private SnapshotShards(ImmutableMap<ShardId, IndexShardSnapshotStatus> shards) {
+ this.shards = shards;
+ }
+ }
+
+ /**
+ * Internal request that is used to send changes in snapshot status to master
+ */
+ private static class UpdateIndexShardSnapshotStatusRequest extends TransportRequest {
+ private SnapshotId snapshotId;
+ private ShardId shardId;
+ private SnapshotMetaData.ShardSnapshotStatus status;
+
+ private UpdateIndexShardSnapshotStatusRequest() {
+
+ }
+
+ private UpdateIndexShardSnapshotStatusRequest(SnapshotId snapshotId, ShardId shardId, SnapshotMetaData.ShardSnapshotStatus status) {
+ this.snapshotId = snapshotId;
+ this.shardId = shardId;
+ this.status = status;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ snapshotId = SnapshotId.readSnapshotId(in);
+ shardId = ShardId.readShardId(in);
+ status = SnapshotMetaData.ShardSnapshotStatus.readShardSnapshotStatus(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ snapshotId.writeTo(out);
+ shardId.writeTo(out);
+ status.writeTo(out);
+ }
+
+ public SnapshotId snapshotId() {
+ return snapshotId;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ public SnapshotMetaData.ShardSnapshotStatus status() {
+ return status;
+ }
+ }
+
+ /**
+ * Transport request handler that is used to send changes in snapshot status to master
+ */
+ private class UpdateSnapshotStateRequestHandler extends BaseTransportRequestHandler<UpdateIndexShardSnapshotStatusRequest> {
+
+ static final String ACTION = "cluster/snapshot/update_snapshot";
+
+ @Override
+ public UpdateIndexShardSnapshotStatusRequest newInstance() {
+ return new UpdateIndexShardSnapshotStatusRequest();
+ }
+
+ @Override
+ public void messageReceived(UpdateIndexShardSnapshotStatusRequest request, final TransportChannel channel) throws Exception {
+ innerUpdateSnapshotState(request);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }
+
+
+}
+
diff --git a/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
new file mode 100644
index 0000000..4f65523
--- /dev/null
+++ b/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
@@ -0,0 +1,670 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsAbortPolicy;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
+import org.elasticsearch.common.util.concurrent.XRejectedExecutionHandler;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Queue;
+import java.util.concurrent.*;
+
+import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
+
+/**
+ *
+ */
+public class ThreadPool extends AbstractComponent {
+
+ public static class Names {
+ public static final String SAME = "same";
+ public static final String GENERIC = "generic";
+ public static final String GET = "get";
+ public static final String INDEX = "index";
+ public static final String BULK = "bulk";
+ public static final String SEARCH = "search";
+ public static final String SUGGEST = "suggest";
+ public static final String PERCOLATE = "percolate";
+ public static final String MANAGEMENT = "management";
+ public static final String FLUSH = "flush";
+ public static final String MERGE = "merge";
+ public static final String REFRESH = "refresh";
+ public static final String WARMER = "warmer";
+ public static final String SNAPSHOT = "snapshot";
+ public static final String OPTIMIZE = "optimize";
+ }
+
+ public static final String THREADPOOL_GROUP = "threadpool.";
+
+ private volatile ImmutableMap<String, ExecutorHolder> executors;
+
+ private final ImmutableMap<String, Settings> defaultExecutorTypeSettings;
+
+ private final Queue<ExecutorHolder> retiredExecutors = new ConcurrentLinkedQueue<ExecutorHolder>();
+
+ private final ScheduledThreadPoolExecutor scheduler;
+
+ private final EstimatedTimeThread estimatedTimeThread;
+
+ public ThreadPool() {
+ this(ImmutableSettings.Builder.EMPTY_SETTINGS, null);
+ }
+
+ @Inject
+ public ThreadPool(Settings settings, @Nullable NodeSettingsService nodeSettingsService) {
+ super(settings);
+
+ Map<String, Settings> groupSettings = settings.getGroups(THREADPOOL_GROUP);
+
+ int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
+ int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5);
+ int halfProcMaxAt10 = Math.min(((availableProcessors + 1) / 2), 10);
+ defaultExecutorTypeSettings = ImmutableMap.<String, Settings>builder()
+ .put(Names.GENERIC, settingsBuilder().put("type", "cached").put("keep_alive", "30s").build())
+ .put(Names.INDEX, settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 200).build())
+ .put(Names.BULK, settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 50).build())
+ .put(Names.GET, settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 1000).build())
+ .put(Names.SEARCH, settingsBuilder().put("type", "fixed").put("size", availableProcessors * 3).put("queue_size", 1000).build())
+ .put(Names.SUGGEST, settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 1000).build())
+ .put(Names.PERCOLATE, settingsBuilder().put("type", "fixed").put("size", availableProcessors).put("queue_size", 1000).build())
+ .put(Names.MANAGEMENT, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", 5).build())
+ .put(Names.FLUSH, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
+ .put(Names.MERGE, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
+ .put(Names.REFRESH, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt10).build())
+ .put(Names.WARMER, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
+ .put(Names.SNAPSHOT, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
+ .put(Names.OPTIMIZE, settingsBuilder().put("type", "fixed").put("size", 1).build())
+ .build();
+
+ Map<String, ExecutorHolder> executors = Maps.newHashMap();
+ for (Map.Entry<String, Settings> executor : defaultExecutorTypeSettings.entrySet()) {
+ executors.put(executor.getKey(), build(executor.getKey(), groupSettings.get(executor.getKey()), executor.getValue()));
+ }
+ executors.put(Names.SAME, new ExecutorHolder(MoreExecutors.sameThreadExecutor(), new Info(Names.SAME, "same")));
+ if (!executors.get(Names.GENERIC).info.getType().equals("cached")) {
+ throw new ElasticsearchIllegalArgumentException("generic thread pool must be of type cached");
+ }
+ this.executors = ImmutableMap.copyOf(executors);
+ this.scheduler = new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy());
+ this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
+ this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
+ if (nodeSettingsService != null) {
+ nodeSettingsService.addListener(new ApplySettings());
+ }
+
+ TimeValue estimatedTimeInterval = componentSettings.getAsTime("estimated_time_interval", TimeValue.timeValueMillis(200));
+ this.estimatedTimeThread = new EstimatedTimeThread(EsExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis());
+ this.estimatedTimeThread.start();
+ }
+
+ public long estimatedTimeInMillis() {
+ return estimatedTimeThread.estimatedTimeInMillis();
+ }
+
+ public ThreadPoolInfo info() {
+ List<Info> infos = new ArrayList<Info>();
+ for (ExecutorHolder holder : executors.values()) {
+ String name = holder.info.getName();
+ // no need to have info on "same" thread pool
+ if ("same".equals(name)) {
+ continue;
+ }
+ infos.add(holder.info);
+ }
+ return new ThreadPoolInfo(infos);
+ }
+
+ public Info info(String name) {
+ ExecutorHolder holder = executors.get(name);
+ if (holder == null) {
+ return null;
+ }
+ return holder.info;
+ }
+
+ public ThreadPoolStats stats() {
+ List<ThreadPoolStats.Stats> stats = new ArrayList<ThreadPoolStats.Stats>();
+ for (ExecutorHolder holder : executors.values()) {
+ String name = holder.info.getName();
+ // no need to have info on "same" thread pool
+ if ("same".equals(name)) {
+ continue;
+ }
+ int threads = -1;
+ int queue = -1;
+ int active = -1;
+ long rejected = -1;
+ int largest = -1;
+ long completed = -1;
+ if (holder.executor instanceof ThreadPoolExecutor) {
+ ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) holder.executor;
+ threads = threadPoolExecutor.getPoolSize();
+ queue = threadPoolExecutor.getQueue().size();
+ active = threadPoolExecutor.getActiveCount();
+ largest = threadPoolExecutor.getLargestPoolSize();
+ completed = threadPoolExecutor.getCompletedTaskCount();
+ RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler();
+ if (rejectedExecutionHandler instanceof XRejectedExecutionHandler) {
+ rejected = ((XRejectedExecutionHandler) rejectedExecutionHandler).rejected();
+ }
+ }
+ stats.add(new ThreadPoolStats.Stats(name, threads, queue, active, rejected, largest, completed));
+ }
+ return new ThreadPoolStats(stats);
+ }
+
+ public Executor generic() {
+ return executor(Names.GENERIC);
+ }
+
+ public Executor executor(String name) {
+ Executor executor = executors.get(name).executor;
+ if (executor == null) {
+ throw new ElasticsearchIllegalArgumentException("No executor found for [" + name + "]");
+ }
+ return executor;
+ }
+
+ public ScheduledExecutorService scheduler() {
+ return this.scheduler;
+ }
+
+ public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, TimeValue interval) {
+ return scheduler.scheduleWithFixedDelay(new LoggingRunnable(command), interval.millis(), interval.millis(), TimeUnit.MILLISECONDS);
+ }
+
+ public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
+ if (!Names.SAME.equals(name)) {
+ command = new ThreadedRunnable(command, executor(name));
+ }
+ return scheduler.schedule(command, delay.millis(), TimeUnit.MILLISECONDS);
+ }
+
+ public void shutdown() {
+ estimatedTimeThread.running = false;
+ estimatedTimeThread.interrupt();
+ scheduler.shutdown();
+ for (ExecutorHolder executor : executors.values()) {
+ if (executor.executor instanceof ThreadPoolExecutor) {
+ ((ThreadPoolExecutor) executor.executor).shutdown();
+ }
+ }
+ }
+
+ public void shutdownNow() {
+ estimatedTimeThread.running = false;
+ estimatedTimeThread.interrupt();
+ scheduler.shutdownNow();
+ for (ExecutorHolder executor : executors.values()) {
+ if (executor.executor instanceof ThreadPoolExecutor) {
+ ((ThreadPoolExecutor) executor.executor).shutdownNow();
+ }
+ }
+ while (!retiredExecutors.isEmpty()) {
+ ((ThreadPoolExecutor) retiredExecutors.remove().executor).shutdownNow();
+ }
+ }
+
+ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
+ boolean result = scheduler.awaitTermination(timeout, unit);
+ for (ExecutorHolder executor : executors.values()) {
+ if (executor.executor instanceof ThreadPoolExecutor) {
+ result &= ((ThreadPoolExecutor) executor.executor).awaitTermination(timeout, unit);
+ }
+ }
+ while (!retiredExecutors.isEmpty()) {
+ result &= ((ThreadPoolExecutor) retiredExecutors.remove().executor).awaitTermination(timeout, unit);
+ }
+ return result;
+ }
+
+ private ExecutorHolder build(String name, @Nullable Settings settings, Settings defaultSettings) {
+ return rebuild(name, null, settings, defaultSettings);
+ }
+
+ private ExecutorHolder rebuild(String name, ExecutorHolder previousExecutorHolder, @Nullable Settings settings, Settings defaultSettings) {
+ if (Names.SAME.equals(name)) {
+ // Don't allow to change the "same" thread executor
+ return previousExecutorHolder;
+ }
+ if (settings == null) {
+ settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+ Info previousInfo = previousExecutorHolder != null ? previousExecutorHolder.info : null;
+ String type = settings.get("type", previousInfo != null ? previousInfo.getType() : defaultSettings.get("type"));
+ ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, name);
+ if ("same".equals(type)) {
+ if (previousExecutorHolder != null) {
+ logger.debug("updating thread_pool [{}], type [{}]", name, type);
+ } else {
+ logger.debug("creating thread_pool [{}], type [{}]", name, type);
+ }
+ return new ExecutorHolder(MoreExecutors.sameThreadExecutor(), new Info(name, type));
+ } else if ("cached".equals(type)) {
+ TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
+ if (previousExecutorHolder != null) {
+ if ("cached".equals(previousInfo.getType())) {
+ TimeValue updatedKeepAlive = settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
+ if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
+ logger.debug("updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, updatedKeepAlive);
+ ((EsThreadPoolExecutor) previousExecutorHolder.executor).setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
+ return new ExecutorHolder(previousExecutorHolder.executor, new Info(name, type, -1, -1, updatedKeepAlive, null));
+ }
+ return previousExecutorHolder;
+ }
+ if (previousInfo.getKeepAlive() != null) {
+ defaultKeepAlive = previousInfo.getKeepAlive();
+ }
+ }
+ TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive);
+ if (previousExecutorHolder != null) {
+ logger.debug("updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
+ } else {
+ logger.debug("creating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
+ }
+ Executor executor = EsExecutors.newCached(keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
+ return new ExecutorHolder(executor, new Info(name, type, -1, -1, keepAlive, null));
+ } else if ("fixed".equals(type)) {
+ int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
+ SizeValue defaultQueueSize = defaultSettings.getAsSize("queue", defaultSettings.getAsSize("queue_size", null));
+
+ if (previousExecutorHolder != null) {
+ if ("fixed".equals(previousInfo.getType())) {
+ SizeValue updatedQueueSize = settings.getAsSize("capacity", settings.getAsSize("queue", settings.getAsSize("queue_size", previousInfo.getQueueSize())));
+ if (Objects.equal(previousInfo.getQueueSize(), updatedQueueSize)) {
+ int updatedSize = settings.getAsInt("size", previousInfo.getMax());
+ if (previousInfo.getMax() != updatedSize) {
+ logger.debug("updating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, updatedSize, updatedQueueSize);
+ ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedSize);
+ ((EsThreadPoolExecutor) previousExecutorHolder.executor).setMaximumPoolSize(updatedSize);
+ return new ExecutorHolder(previousExecutorHolder.executor, new Info(name, type, updatedSize, updatedSize, null, updatedQueueSize));
+ }
+ return previousExecutorHolder;
+ }
+ }
+ if (previousInfo.getMax() >= 0) {
+ defaultSize = previousInfo.getMax();
+ }
+ defaultQueueSize = previousInfo.getQueueSize();
+ }
+
+ int size = settings.getAsInt("size", defaultSize);
+ SizeValue queueSize = settings.getAsSize("capacity", settings.getAsSize("queue", settings.getAsSize("queue_size", defaultQueueSize)));
+ logger.debug("creating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, size, queueSize);
+ Executor executor = EsExecutors.newFixed(size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory);
+ return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize));
+ } else if ("scaling".equals(type)) {
+ TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
+ int defaultMin = defaultSettings.getAsInt("min", 1);
+ int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
+ if (previousExecutorHolder != null) {
+ if ("scaling".equals(previousInfo.getType())) {
+ TimeValue updatedKeepAlive = settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
+ int updatedMin = settings.getAsInt("min", previousInfo.getMin());
+ int updatedSize = settings.getAsInt("max", settings.getAsInt("size", previousInfo.getMax()));
+ if (!previousInfo.getKeepAlive().equals(updatedKeepAlive) || previousInfo.getMin() != updatedMin || previousInfo.getMax() != updatedSize) {
+ logger.debug("updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, updatedKeepAlive);
+ if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
+ ((EsThreadPoolExecutor) previousExecutorHolder.executor).setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
+ }
+ if (previousInfo.getMin() != updatedMin) {
+ ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedMin);
+ }
+ if (previousInfo.getMax() != updatedSize) {
+ ((EsThreadPoolExecutor) previousExecutorHolder.executor).setMaximumPoolSize(updatedSize);
+ }
+ return new ExecutorHolder(previousExecutorHolder.executor, new Info(name, type, updatedMin, updatedSize, updatedKeepAlive, null));
+ }
+ return previousExecutorHolder;
+ }
+ if (previousInfo.getKeepAlive() != null) {
+ defaultKeepAlive = previousInfo.getKeepAlive();
+ }
+ if (previousInfo.getMin() >= 0) {
+ defaultMin = previousInfo.getMin();
+ }
+ if (previousInfo.getMax() >= 0) {
+ defaultSize = previousInfo.getMax();
+ }
+ }
+ TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive);
+ int min = settings.getAsInt("min", defaultMin);
+ int size = settings.getAsInt("max", settings.getAsInt("size", defaultSize));
+ if (previousExecutorHolder != null) {
+ logger.debug("updating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive);
+ } else {
+ logger.debug("creating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive);
+ }
+ Executor executor = EsExecutors.newScaling(min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
+ return new ExecutorHolder(executor, new Info(name, type, min, size, keepAlive, null));
+ }
+ throw new ElasticsearchIllegalArgumentException("No type found [" + type + "], for [" + name + "]");
+ }
+
+ public void updateSettings(Settings settings) {
+ Map<String, Settings> groupSettings = settings.getGroups("threadpool");
+ if (groupSettings.isEmpty()) {
+ return;
+ }
+
+ for (Map.Entry<String, Settings> executor : defaultExecutorTypeSettings.entrySet()) {
+ Settings updatedSettings = groupSettings.get(executor.getKey());
+ if (updatedSettings == null) {
+ continue;
+ }
+
+ ExecutorHolder oldExecutorHolder = executors.get(executor.getKey());
+ ExecutorHolder newExecutorHolder = rebuild(executor.getKey(), oldExecutorHolder, updatedSettings, executor.getValue());
+ if (!oldExecutorHolder.equals(newExecutorHolder)) {
+ executors = newMapBuilder(executors).put(executor.getKey(), newExecutorHolder).immutableMap();
+ if (!oldExecutorHolder.executor.equals(newExecutorHolder.executor) && oldExecutorHolder.executor instanceof EsThreadPoolExecutor) {
+ retiredExecutors.add(oldExecutorHolder);
+ ((EsThreadPoolExecutor) oldExecutorHolder.executor).shutdown(new ExecutorShutdownListener(oldExecutorHolder));
+ }
+ }
+ }
+ }
+
+ class ExecutorShutdownListener implements EsThreadPoolExecutor.ShutdownListener {
+
+ private ExecutorHolder holder;
+
+ public ExecutorShutdownListener(ExecutorHolder holder) {
+ this.holder = holder;
+ }
+
+ @Override
+ public void onTerminated() {
+ retiredExecutors.remove(holder);
+ }
+ }
+
+ class LoggingRunnable implements Runnable {
+
+ private final Runnable runnable;
+
+ LoggingRunnable(Runnable runnable) {
+ this.runnable = runnable;
+ }
+
+ @Override
+ public void run() {
+ try {
+ runnable.run();
+ } catch (Exception e) {
+ logger.warn("failed to run {}", e, runnable.toString());
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return runnable.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return runnable.equals(obj);
+ }
+
+ @Override
+ public String toString() {
+ return "[threaded] " + runnable.toString();
+ }
+ }
+
+ class ThreadedRunnable implements Runnable {
+
+ private final Runnable runnable;
+
+ private final Executor executor;
+
+ ThreadedRunnable(Runnable runnable, Executor executor) {
+ this.runnable = runnable;
+ this.executor = executor;
+ }
+
+ @Override
+ public void run() {
+ executor.execute(runnable);
+ }
+
+ @Override
+ public int hashCode() {
+ return runnable.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return runnable.equals(obj);
+ }
+
+ @Override
+ public String toString() {
+ return "[threaded] " + runnable.toString();
+ }
+ }
+
+ static class EstimatedTimeThread extends Thread {
+
+ final long interval;
+
+ volatile boolean running = true;
+
+ volatile long estimatedTimeInMillis;
+
+ EstimatedTimeThread(String name, long interval) {
+ super(name);
+ this.interval = interval;
+ setDaemon(true);
+ }
+
+ public long estimatedTimeInMillis() {
+ return this.estimatedTimeInMillis;
+ }
+
+ @Override
+ public void run() {
+ while (running) {
+ estimatedTimeInMillis = System.currentTimeMillis();
+ try {
+ Thread.sleep(interval);
+ } catch (InterruptedException e) {
+ running = false;
+ return;
+ }
+ try {
+ FileSystemUtils.checkMkdirsStall(estimatedTimeInMillis);
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ static class ExecutorHolder {
+ public final Executor executor;
+ public final Info info;
+
+ ExecutorHolder(Executor executor, Info info) {
+ this.executor = executor;
+ this.info = info;
+ }
+ }
+
+ public static class Info implements Streamable, ToXContent {
+
+ private String name;
+ private String type;
+ private int min;
+ private int max;
+ private TimeValue keepAlive;
+ private SizeValue queueSize;
+
+ Info() {
+
+ }
+
+ public Info(String name, String type) {
+ this(name, type, -1);
+ }
+
+ public Info(String name, String type, int size) {
+ this(name, type, size, size, null, null);
+ }
+
+ public Info(String name, String type, int min, int max, @Nullable TimeValue keepAlive, @Nullable SizeValue queueSize) {
+ this.name = name;
+ this.type = type;
+ this.min = min;
+ this.max = max;
+ this.keepAlive = keepAlive;
+ this.queueSize = queueSize;
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public String getType() {
+ return this.type;
+ }
+
+ public int getMin() {
+ return this.min;
+ }
+
+ public int getMax() {
+ return this.max;
+ }
+
+ @Nullable
+ public TimeValue getKeepAlive() {
+ return this.keepAlive;
+ }
+
+ @Nullable
+ public SizeValue getQueueSize() {
+ return this.queueSize;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ type = in.readString();
+ min = in.readInt();
+ max = in.readInt();
+ if (in.readBoolean()) {
+ keepAlive = TimeValue.readTimeValue(in);
+ }
+ if (in.readBoolean()) {
+ queueSize = SizeValue.readSizeValue(in);
+ }
+ in.readBoolean(); // here to conform with removed waitTime
+ in.readBoolean(); // here to conform with removed rejected setting
+ in.readBoolean(); // here to conform with queue type
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeString(type);
+ out.writeInt(min);
+ out.writeInt(max);
+ if (keepAlive == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ keepAlive.writeTo(out);
+ }
+ if (queueSize == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ queueSize.writeTo(out);
+ }
+ out.writeBoolean(false); // here to conform with removed waitTime
+ out.writeBoolean(false); // here to conform with removed rejected setting
+ out.writeBoolean(false); // here to conform with queue type
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name, XContentBuilder.FieldCaseConversion.NONE);
+ builder.field(Fields.TYPE, type);
+ if (min != -1) {
+ builder.field(Fields.MIN, min);
+ }
+ if (max != -1) {
+ builder.field(Fields.MAX, max);
+ }
+ if (keepAlive != null) {
+ builder.field(Fields.KEEP_ALIVE, keepAlive.toString());
+ }
+ if (queueSize != null) {
+ builder.field(Fields.QUEUE_SIZE, queueSize.toString());
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString TYPE = new XContentBuilderString("type");
+ static final XContentBuilderString MIN = new XContentBuilderString("min");
+ static final XContentBuilderString MAX = new XContentBuilderString("max");
+ static final XContentBuilderString KEEP_ALIVE = new XContentBuilderString("keep_alive");
+ static final XContentBuilderString QUEUE_SIZE = new XContentBuilderString("queue_size");
+ }
+
+ }
+
+ class ApplySettings implements NodeSettingsService.Listener {
+ @Override
+ public void onRefreshSettings(Settings settings) {
+ updateSettings(settings);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java b/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java
new file mode 100644
index 0000000..2e7dd99
--- /dev/null
+++ b/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ */
+public class ThreadPoolInfo implements Streamable, Iterable<ThreadPool.Info>, ToXContent {
+
+ private List<ThreadPool.Info> infos;
+
+ ThreadPoolInfo() {
+ }
+
+
+ public ThreadPoolInfo(List<ThreadPool.Info> infos) {
+ this.infos = infos;
+ }
+
+ @Override
+ public Iterator<ThreadPool.Info> iterator() {
+ return infos.iterator();
+ }
+
+ public static ThreadPoolInfo readThreadPoolInfo(StreamInput in) throws IOException {
+ ThreadPoolInfo info = new ThreadPoolInfo();
+ info.readFrom(in);
+ return info;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int size = in.readVInt();
+ infos = new ArrayList<ThreadPool.Info>(size);
+ for (int i = 0; i < size; i++) {
+ ThreadPool.Info info = new ThreadPool.Info();
+ info.readFrom(in);
+ infos.add(info);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(infos.size());
+ for (ThreadPool.Info info : infos) {
+ info.writeTo(out);
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString THREAD_POOL = new XContentBuilderString("thread_pool");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.THREAD_POOL);
+ for (ThreadPool.Info info : infos) {
+ info.toXContent(builder, params);
+ }
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java b/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java
new file mode 100644
index 0000000..6dc8c03
--- /dev/null
+++ b/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class ThreadPoolModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public ThreadPoolModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(ThreadPool.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java b/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java
new file mode 100644
index 0000000..eb65db7
--- /dev/null
+++ b/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ */
+public class ThreadPoolStats implements Streamable, ToXContent, Iterable<ThreadPoolStats.Stats> {
+
+ public static class Stats implements Streamable, ToXContent {
+
+ private String name;
+ private int threads;
+ private int queue;
+ private int active;
+ private long rejected;
+ private int largest;
+ private long completed;
+
+ Stats() {
+
+ }
+
+ public Stats(String name, int threads, int queue, int active, long rejected, int largest, long completed) {
+ this.name = name;
+ this.threads = threads;
+ this.queue = queue;
+ this.active = active;
+ this.rejected = rejected;
+ this.largest = largest;
+ this.completed = completed;
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public int getThreads() {
+ return this.threads;
+ }
+
+ public int getQueue() {
+ return this.queue;
+ }
+
+ public int getActive() {
+ return this.active;
+ }
+
+ public long getRejected() {
+ return rejected;
+ }
+
+ public int getLargest() {
+ return largest;
+ }
+
+ public long getCompleted() {
+ return this.completed;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ name = in.readString();
+ threads = in.readInt();
+ queue = in.readInt();
+ active = in.readInt();
+ rejected = in.readLong();
+ largest = in.readInt();
+ completed = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeInt(threads);
+ out.writeInt(queue);
+ out.writeInt(active);
+ out.writeLong(rejected);
+ out.writeInt(largest);
+ out.writeLong(completed);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name, XContentBuilder.FieldCaseConversion.NONE);
+ if (threads != -1) {
+ builder.field(Fields.THREADS, threads);
+ }
+ if (queue != -1) {
+ builder.field(Fields.QUEUE, queue);
+ }
+ if (active != -1) {
+ builder.field(Fields.ACTIVE, active);
+ }
+ if (rejected != -1) {
+ builder.field(Fields.REJECTED, rejected);
+ }
+ if (largest != -1) {
+ builder.field(Fields.LARGEST, largest);
+ }
+ if (completed != -1) {
+ builder.field(Fields.COMPLETED, completed);
+ }
+ builder.endObject();
+ return builder;
+ }
+ }
+
+ private List<Stats> stats;
+
+ ThreadPoolStats() {
+
+ }
+
+ public ThreadPoolStats(List<Stats> stats) {
+ this.stats = stats;
+ }
+
+ @Override
+ public Iterator<Stats> iterator() {
+ return stats.iterator();
+ }
+
+ public static ThreadPoolStats readThreadPoolStats(StreamInput in) throws IOException {
+ ThreadPoolStats stats = new ThreadPoolStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ int size = in.readVInt();
+ stats = new ArrayList<Stats>(size);
+ for (int i = 0; i < size; i++) {
+ Stats stats1 = new Stats();
+ stats1.readFrom(in);
+ stats.add(stats1);
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(stats.size());
+ for (Stats stat : stats) {
+ stat.writeTo(out);
+ }
+ }
+
+ static final class Fields {
+ static final XContentBuilderString THREAD_POOL = new XContentBuilderString("thread_pool");
+ static final XContentBuilderString THREADS = new XContentBuilderString("threads");
+ static final XContentBuilderString QUEUE = new XContentBuilderString("queue");
+ static final XContentBuilderString ACTIVE = new XContentBuilderString("active");
+ static final XContentBuilderString REJECTED = new XContentBuilderString("rejected");
+ static final XContentBuilderString LARGEST = new XContentBuilderString("largest");
+ static final XContentBuilderString COMPLETED = new XContentBuilderString("completed");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject(Fields.THREAD_POOL);
+ for (Stats stat : stats) {
+ stat.toXContent(builder, params);
+ }
+ builder.endObject();
+ return builder;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/ActionNotFoundTransportException.java b/src/main/java/org/elasticsearch/transport/ActionNotFoundTransportException.java
new file mode 100644
index 0000000..b53af40
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/ActionNotFoundTransportException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ * An exception indicating that a transport action was not found.
+ *
+ *
+ */
+public class ActionNotFoundTransportException extends TransportException {
+
+ private final String action;
+
+ public ActionNotFoundTransportException(String action) {
+ super("No handler for action [" + action + "]");
+ this.action = action;
+ }
+
+ public String action() {
+ return this.action;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/ActionTransportException.java b/src/main/java/org/elasticsearch/transport/ActionTransportException.java
new file mode 100644
index 0000000..bc430a4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/ActionTransportException.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.common.transport.TransportAddress;
+
+/**
+ * An action invocation failure.
+ *
+ *
+ */
+public class ActionTransportException extends TransportException {
+
+ private TransportAddress address;
+
+ private String action;
+
+ public ActionTransportException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+
+ public ActionTransportException(String name, TransportAddress address, String action, Throwable cause) {
+ super(buildMessage(name, address, action, null), cause);
+ this.address = address;
+ this.action = action;
+ }
+
+ public ActionTransportException(String name, TransportAddress address, String action, String msg, Throwable cause) {
+ super(buildMessage(name, address, action, msg), cause);
+ this.address = address;
+ this.action = action;
+ }
+
+ /**
+ * The target address to invoke the action on.
+ */
+ public TransportAddress address() {
+ return address;
+ }
+
+ /**
+ * The action to invoke.
+ */
+ public String action() {
+ return action;
+ }
+
+ private static String buildMessage(String name, TransportAddress address, String action, String msg) {
+ StringBuilder sb = new StringBuilder();
+ if (name != null) {
+ sb.append('[').append(name).append(']');
+ }
+ if (address != null) {
+ sb.append('[').append(address).append(']');
+ }
+ if (action != null) {
+ sb.append('[').append(action).append(']');
+ }
+ if (msg != null) {
+ sb.append(" ").append(msg);
+ }
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java b/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java
new file mode 100644
index 0000000..98c6f78
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ * A simple based class that always spawns.
+ */
+public abstract class BaseTransportRequestHandler<T extends TransportRequest> implements TransportRequestHandler<T> {
+
+ /**
+ * Default force execution to false.
+ */
+ @Override
+ public boolean isForceExecution() {
+ return false;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/BaseTransportResponseHandler.java b/src/main/java/org/elasticsearch/transport/BaseTransportResponseHandler.java
new file mode 100644
index 0000000..df2362d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/BaseTransportResponseHandler.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ * A simple based class that always spawns.
+ */
+public abstract class BaseTransportResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> {
+
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/BindTransportException.java b/src/main/java/org/elasticsearch/transport/BindTransportException.java
new file mode 100644
index 0000000..41738ec
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/BindTransportException.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ *
+ */
+public class BindTransportException extends TransportException {
+
+ public BindTransportException(String message) {
+ super(message);
+ }
+
+ public BindTransportException(String message, Throwable cause) {
+ super(message, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java b/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java
new file mode 100644
index 0000000..1a6100c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * A specialized, bytes only request, that can potentially be optimized on the network
+ * layer, specifically for teh same large buffer send to several nodes.
+ */
+public class BytesTransportRequest extends TransportRequest {
+
+ BytesReference bytes;
+ Version version;
+
+ public BytesTransportRequest() {
+
+ }
+
+ public BytesTransportRequest(BytesReference bytes, Version version) {
+ this.bytes = bytes;
+ this.version = version;
+ }
+
+ public Version version() {
+ return this.version;
+ }
+
+ public BytesReference bytes() {
+ return this.bytes;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ bytes = in.readBytesReference();
+ version = in.getVersion();
+ }
+
+ /**
+ * Writes the data in a "thin" manner, without the actual bytes, assumes
+ * the actual bytes will be appended right after this content.
+ */
+ public void writeThin(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(bytes.length());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeBytesReference(bytes);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/src/main/java/org/elasticsearch/transport/ConnectTransportException.java
new file mode 100644
index 0000000..e3f25fc
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/ConnectTransportException.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+/**
+ *
+ */
+public class ConnectTransportException extends ActionTransportException {
+
+ private final DiscoveryNode node;
+
+ public ConnectTransportException(DiscoveryNode node, String msg) {
+ this(node, msg, null, null);
+ }
+
+ public ConnectTransportException(DiscoveryNode node, String msg, String action) {
+ this(node, msg, action, null);
+ }
+
+ public ConnectTransportException(DiscoveryNode node, String msg, Throwable cause) {
+ this(node, msg, null, cause);
+ }
+
+ public ConnectTransportException(DiscoveryNode node, String msg, String action, Throwable cause) {
+ super(node.name(), node.address(), action, msg, cause);
+ this.node = node;
+ }
+
+ public DiscoveryNode node() {
+ return node;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java b/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java
new file mode 100644
index 0000000..41c2b9b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.threadpool.ThreadPool;
+
+/**
+ *
+ */
+public class EmptyTransportResponseHandler implements TransportResponseHandler<TransportResponse.Empty> {
+
+ public static final EmptyTransportResponseHandler INSTANCE_SAME = new EmptyTransportResponseHandler(ThreadPool.Names.SAME);
+
+ private final String executor;
+
+ public EmptyTransportResponseHandler(String executor) {
+ this.executor = executor;
+ }
+
+ @Override
+ public TransportResponse.Empty newInstance() {
+ return TransportResponse.Empty.INSTANCE;
+ }
+
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/FailedCommunicationException.java b/src/main/java/org/elasticsearch/transport/FailedCommunicationException.java
new file mode 100644
index 0000000..ccb6d81
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/FailedCommunicationException.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ *
+ */
+public class FailedCommunicationException extends TransportException {
+
+ public FailedCommunicationException(String message) {
+ super(message);
+ }
+
+ public FailedCommunicationException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java b/src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java
new file mode 100644
index 0000000..57679ac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.threadpool.ThreadPool;
+
+/**
+ * A response handler to be used when all interaction will be done through the {@link TransportFuture}.
+ */
+public abstract class FutureTransportResponseHandler<T extends TransportResponse> extends BaseTransportResponseHandler<T> {
+
+ @Override
+ public void handleResponse(T response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/NodeDisconnectedException.java b/src/main/java/org/elasticsearch/transport/NodeDisconnectedException.java
new file mode 100644
index 0000000..ed94aa4
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/NodeDisconnectedException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+/**
+ *
+ */
+public class NodeDisconnectedException extends ConnectTransportException {
+
+ public NodeDisconnectedException(DiscoveryNode node, String action) {
+ super(node, "disconnected", action, null);
+ }
+
+ // stack trace is meaningless...
+
+ @Override
+ public Throwable fillInStackTrace() {
+ return null;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/NodeNotConnectedException.java b/src/main/java/org/elasticsearch/transport/NodeNotConnectedException.java
new file mode 100644
index 0000000..7813383
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/NodeNotConnectedException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+/**
+ * An exception indicating that a message is sent to a node that is not connected.
+ *
+ *
+ */
+public class NodeNotConnectedException extends ConnectTransportException {
+
+ public NodeNotConnectedException(DiscoveryNode node, String msg) {
+ super(node, msg);
+ }
+
+ public NodeNotConnectedException(DiscoveryNode node, String msg, Throwable cause) {
+ super(node, msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/NodeShouldNotConnectException.java b/src/main/java/org/elasticsearch/transport/NodeShouldNotConnectException.java
new file mode 100644
index 0000000..5792e58
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/NodeShouldNotConnectException.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+/**
+ */
+public class NodeShouldNotConnectException extends NodeNotConnectedException {
+
+ public NodeShouldNotConnectException(DiscoveryNode fromNode, DiscoveryNode node) {
+ super(node, "node should not connect from [" + fromNode + "]");
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java b/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java
new file mode 100644
index 0000000..9f75312
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ *
+ */
+public class NotSerializableTransportException extends TransportException {
+
+ public NotSerializableTransportException(Throwable t) {
+ super(buildMessage(t));
+ }
+
+ @Override
+ public Throwable fillInStackTrace() {
+ return null;
+ }
+
+ private static String buildMessage(Throwable t) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("[").append(t.getClass().getName()).append("] ");
+ while (t != null) {
+ sb.append(t.getMessage()).append("; ");
+ t = t.getCause();
+ }
+ return sb.toString();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java b/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java
new file mode 100644
index 0000000..812eec9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.common.util.concurrent.BaseFuture;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ *
+ */
+public class PlainTransportFuture<V extends TransportResponse> extends BaseFuture<V> implements TransportFuture<V>, TransportResponseHandler<V> {
+
+ private final TransportResponseHandler<V> handler;
+
+ public PlainTransportFuture(TransportResponseHandler<V> handler) {
+ this.handler = handler;
+ }
+
+ @Override
+ public V txGet() throws ElasticsearchException {
+ try {
+ return get();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new ElasticsearchIllegalStateException("Future got interrupted", e);
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof ElasticsearchException) {
+ throw (ElasticsearchException) e.getCause();
+ } else {
+ throw new TransportException("Failed execution", e);
+ }
+ }
+ }
+
+ @Override
+ public V txGet(long timeout, TimeUnit unit) throws ElasticsearchException {
+ try {
+ return get(timeout, unit);
+ } catch (TimeoutException e) {
+ throw new ElasticsearchTimeoutException(e.getMessage());
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new ElasticsearchIllegalStateException("Future got interrupted", e);
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof ElasticsearchException) {
+ throw (ElasticsearchException) e.getCause();
+ } else {
+ throw new TransportException("Failed execution", e);
+ }
+ }
+ }
+
+ @Override
+ public V newInstance() {
+ return handler.newInstance();
+ }
+
+ @Override
+ public String executor() {
+ return handler.executor();
+ }
+
+ @Override
+ public void handleResponse(V response) {
+ try {
+ handler.handleResponse(response);
+ set(response);
+ } catch (Throwable t) {
+ handleException(new ResponseHandlerFailureTransportException(t));
+ }
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ try {
+ handler.handleException(exp);
+ } finally {
+ setException(exp);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "future(" + handler.toString() + ")";
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/ReceiveTimeoutTransportException.java b/src/main/java/org/elasticsearch/transport/ReceiveTimeoutTransportException.java
new file mode 100644
index 0000000..a790fc0
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/ReceiveTimeoutTransportException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+/**
+ *
+ */
+public class ReceiveTimeoutTransportException extends ActionTransportException {
+
+ public ReceiveTimeoutTransportException(DiscoveryNode node, String action, String msg) {
+ super(node.name(), node.address(), action, msg, null);
+ }
+
+// @Override public Throwable fillInStackTrace() {
+// return fillStack();
+// }
+}
diff --git a/src/main/java/org/elasticsearch/transport/RemoteTransportException.java b/src/main/java/org/elasticsearch/transport/RemoteTransportException.java
new file mode 100644
index 0000000..eaca648
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/RemoteTransportException.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.common.transport.TransportAddress;
+
+/**
+ * A remote exception for an action. A wrapper exception around the actual remote cause and does not fill the
+ * stack trace.
+ *
+ *
+ */
+public class RemoteTransportException extends ActionTransportException implements ElasticsearchWrapperException {
+
+ public RemoteTransportException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+
+ public RemoteTransportException(String name, TransportAddress address, String action, Throwable cause) {
+ super(name, address, action, cause);
+ }
+
+ @Override
+ public Throwable fillInStackTrace() {
+ // no need for stack trace here, we always have cause
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/ResponseHandlerFailureTransportException.java b/src/main/java/org/elasticsearch/transport/ResponseHandlerFailureTransportException.java
new file mode 100644
index 0000000..4ba707a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/ResponseHandlerFailureTransportException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ * A failure to handle the response of a transaction action.
+ *
+ *
+ */
+public class ResponseHandlerFailureTransportException extends TransportException {
+
+ public ResponseHandlerFailureTransportException(Throwable cause) {
+ super(cause.getMessage(), cause);
+ }
+
+ @Override
+ public Throwable fillInStackTrace() {
+ return null;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/ResponseHandlerNotFoundTransportException.java b/src/main/java/org/elasticsearch/transport/ResponseHandlerNotFoundTransportException.java
new file mode 100644
index 0000000..381ce51
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/ResponseHandlerNotFoundTransportException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ *
+ */
+public class ResponseHandlerNotFoundTransportException extends TransportException {
+
+ private final long requestId;
+
+ public ResponseHandlerNotFoundTransportException(long requestId) {
+ super("Transport response handler not found of id [" + requestId + "]");
+ this.requestId = requestId;
+ }
+
+ public long requestId() {
+ return requestId;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/SendRequestTransportException.java b/src/main/java/org/elasticsearch/transport/SendRequestTransportException.java
new file mode 100644
index 0000000..a62d980
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/SendRequestTransportException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.ElasticsearchWrapperException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+/**
+ *
+ */
+public class SendRequestTransportException extends ActionTransportException implements ElasticsearchWrapperException {
+
+ public SendRequestTransportException(DiscoveryNode node, String action, Throwable cause) {
+ super(node == null ? null : node.name(), node == null ? null : node.address(), action, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/Transport.java b/src/main/java/org/elasticsearch/transport/Transport.java
new file mode 100644
index 0000000..35eb71d
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/Transport.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public interface Transport extends LifecycleComponent<Transport> {
+
+ public static class TransportSettings {
+ public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress";
+ }
+
+ void transportServiceAdapter(TransportServiceAdapter service);
+
+ /**
+ * The address the transport is bound on.
+ */
+ BoundTransportAddress boundAddress();
+
+ /**
+ * Returns an address from its string representation.
+ */
+ TransportAddress[] addressesFromString(String address) throws Exception;
+
+ /**
+ * Is the address type supported.
+ */
+ boolean addressSupported(Class<? extends TransportAddress> address);
+
+ /**
+ * Returns <tt>true</tt> if the node is connected.
+ */
+ boolean nodeConnected(DiscoveryNode node);
+
+ /**
+ * Connects to the given node, if already connected, does nothing.
+ */
+ void connectToNode(DiscoveryNode node) throws ConnectTransportException;
+
+ /**
+ * Connects to a node in a light manner. Used when just connecting for ping and then
+ * disconnecting.
+ */
+ void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException;
+
+ /**
+ * Disconnected from the given node, if not connected, will do nothing.
+ */
+ void disconnectFromNode(DiscoveryNode node);
+
+ /**
+ * Sends the request to the node.
+ */
+ void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException;
+
+ long serverOpen();
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportChannel.java b/src/main/java/org/elasticsearch/transport/TransportChannel.java
new file mode 100644
index 0000000..4810fb1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportChannel.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import java.io.IOException;
+
+/**
+ * A transport channel allows to send a response to a request on the channel.
+ */
+public interface TransportChannel {
+
+ String action();
+
+ void sendResponse(TransportResponse response) throws IOException;
+
+ void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException;
+
+ void sendResponse(Throwable error) throws IOException;
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java b/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java
new file mode 100644
index 0000000..ad9c799
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+/**
+ *
+ */
+public interface TransportConnectionListener {
+
+ void onNodeConnected(DiscoveryNode node);
+
+ void onNodeDisconnected(DiscoveryNode node);
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportException.java b/src/main/java/org/elasticsearch/transport/TransportException.java
new file mode 100644
index 0000000..66a22c5
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.ElasticsearchException;
+
+/**
+ *
+ */
+public class TransportException extends ElasticsearchException {
+
+ public TransportException(String msg) {
+ super(msg);
+ }
+
+ public TransportException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/TransportFuture.java b/src/main/java/org/elasticsearch/transport/TransportFuture.java
new file mode 100644
index 0000000..1b330d2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportFuture.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.ElasticsearchException;
+
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public interface TransportFuture<V> extends Future<V> {
+
+ /**
+ * Waits if necessary for the computation to complete, and then
+ * retrieves its result.
+ */
+ V txGet() throws ElasticsearchException;
+
+ /**
+ * Waits if necessary for at most the given time for the computation
+ * to complete, and then retrieves its result, if available.
+ */
+ V txGet(long timeout, TimeUnit unit) throws ElasticsearchException;
+}
+
diff --git a/src/main/java/org/elasticsearch/transport/TransportInfo.java b/src/main/java/org/elasticsearch/transport/TransportInfo.java
new file mode 100644
index 0000000..fec3a95
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportInfo.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ *
+ */
+public class TransportInfo implements Streamable, Serializable, ToXContent {
+
+ private BoundTransportAddress address;
+
+ TransportInfo() {
+ }
+
+ public TransportInfo(BoundTransportAddress address) {
+ this.address = address;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString TRANSPORT = new XContentBuilderString("transport");
+ static final XContentBuilderString BOUND_ADDRESS = new XContentBuilderString("bound_address");
+ static final XContentBuilderString PUBLISH_ADDRESS = new XContentBuilderString("publish_address");
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.TRANSPORT);
+ builder.field(Fields.BOUND_ADDRESS, address.boundAddress().toString());
+ builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString());
+ builder.endObject();
+ return builder;
+ }
+
+ public static TransportInfo readTransportInfo(StreamInput in) throws IOException {
+ TransportInfo info = new TransportInfo();
+ info.readFrom(in);
+ return info;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ address = BoundTransportAddress.readBoundTransportAddress(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ address.writeTo(out);
+ }
+
+ public BoundTransportAddress address() {
+ return address;
+ }
+
+ public BoundTransportAddress getAddress() {
+ return address();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportModule.java b/src/main/java/org/elasticsearch/transport/TransportModule.java
new file mode 100644
index 0000000..b4a7b69
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportModule.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.Modules;
+import org.elasticsearch.common.inject.SpawnModules;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.local.LocalTransportModule;
+import org.elasticsearch.transport.netty.NettyTransportModule;
+
+/**
+ *
+ */
+public class TransportModule extends AbstractModule implements SpawnModules {
+
+ private final Settings settings;
+
+ public static final String TRANSPORT_TYPE_KEY = "transport.type";
+
+ public TransportModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public Iterable<? extends Module> spawnModules() {
+ Class<? extends Module> defaultTransportModule;
+ if (DiscoveryNode.localNode(settings)) {
+ defaultTransportModule = LocalTransportModule.class;
+ } else {
+ defaultTransportModule = NettyTransportModule.class;
+ }
+ return ImmutableList.of(Modules.createModule(settings.getAsClass(TRANSPORT_TYPE_KEY, defaultTransportModule, "org.elasticsearch.transport.", "TransportModule"), settings));
+ }
+
+ @Override
+ protected void configure() {
+ bind(TransportService.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/TransportRequest.java b/src/main/java/org/elasticsearch/transport/TransportRequest.java
new file mode 100644
index 0000000..6adc8e3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportRequest.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ */
+public abstract class TransportRequest implements Streamable {
+
+ public static class Empty extends TransportRequest {
+
+ public static final Empty INSTANCE = new Empty();
+
+ public Empty() {
+ super();
+ }
+
+ public Empty(TransportRequest request) {
+ super(request);
+ }
+ }
+
+ private Map<String, Object> headers;
+
+ protected TransportRequest() {
+
+ }
+
+ protected TransportRequest(TransportRequest request) {
+ // create a new copy of the headers, since we are creating a new request which might have
+ // its headers changed in the context of that specific request
+ if (request.getHeaders() != null) {
+ this.headers = new HashMap<String, Object>(request.getHeaders());
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public final TransportRequest putHeader(String key, Object value) {
+ if (headers == null) {
+ headers = Maps.newHashMap();
+ }
+ headers.put(key, value);
+ return this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final <V> V getHeader(String key) {
+ if (headers == null) {
+ return null;
+ }
+ return (V) headers.get(key);
+ }
+
+ public Map<String, Object> getHeaders() {
+ return this.headers;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ headers = in.readMap();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (headers == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeMap(headers);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java b/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java
new file mode 100644
index 0000000..fd62f30
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ *
+ */
+public interface TransportRequestHandler<T extends TransportRequest> {
+
+ T newInstance();
+
+ void messageReceived(T request, TransportChannel channel) throws Exception;
+
+ String executor();
+
+ /**
+ * See {@link org.elasticsearch.common.util.concurrent.AbstractRunnable#isForceExecution()}.
+ */
+ boolean isForceExecution();
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java b/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java
new file mode 100644
index 0000000..5452b02
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.unit.TimeValue;
+
+/**
+ *
+ */
+public class TransportRequestOptions {
+
+ public static final TransportRequestOptions EMPTY = options();
+
+ public static TransportRequestOptions options() {
+ return new TransportRequestOptions();
+ }
+
+ public static enum Type {
+ RECOVERY,
+ BULK,
+ REG,
+ STATE,
+ PING;
+
+ public static Type fromString(String type) {
+ if ("bulk".equalsIgnoreCase(type)) {
+ return BULK;
+ } else if ("reg".equalsIgnoreCase(type)) {
+ return REG;
+ } else if ("state".equalsIgnoreCase(type)) {
+ return STATE;
+ } else if ("recovery".equalsIgnoreCase(type)) {
+ return RECOVERY;
+ } else if ("ping".equalsIgnoreCase(type)) {
+ return PING;
+ } else {
+ throw new ElasticsearchIllegalArgumentException("failed to match transport type for [" + type + "]");
+ }
+ }
+ }
+
+ private TimeValue timeout;
+
+ private boolean compress;
+
+ private Type type = Type.REG;
+
+ public TransportRequestOptions withTimeout(long timeout) {
+ return withTimeout(TimeValue.timeValueMillis(timeout));
+ }
+
+ public TransportRequestOptions withTimeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return this;
+ }
+
+ public TransportRequestOptions withCompress(boolean compress) {
+ this.compress = compress;
+ return this;
+ }
+
+ public TransportRequestOptions withType(Type type) {
+ this.type = type;
+ return this;
+ }
+
+ public TimeValue timeout() {
+ return this.timeout;
+ }
+
+ public boolean compress() {
+ return this.compress;
+ }
+
+ public Type type() {
+ return this.type;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportResponse.java b/src/main/java/org/elasticsearch/transport/TransportResponse.java
new file mode 100644
index 0000000..4b22d11
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportResponse.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ */
+public abstract class TransportResponse implements Streamable {
+
+ public static class Empty extends TransportResponse {
+
+ public static final Empty INSTANCE = new Empty();
+
+ public Empty() {
+ super();
+ }
+
+ public Empty(TransportResponse request) {
+ super(request);
+ }
+ }
+
+ private Map<String, Object> headers;
+
+ protected TransportResponse() {
+
+ }
+
+ protected TransportResponse(TransportResponse request) {
+ // create a new copy of the headers, since we are creating a new request which might have
+ // its headers changed in the context of that specific request
+ if (request.getHeaders() != null) {
+ this.headers = new HashMap<String, Object>(request.getHeaders());
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public final TransportResponse putHeader(String key, Object value) {
+ if (headers == null) {
+ headers = Maps.newHashMap();
+ }
+ headers.put(key, value);
+ return this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public final <V> V getHeader(String key) {
+ if (headers == null) {
+ return null;
+ }
+ return (V) headers.get(key);
+ }
+
+ public Map<String, Object> getHeaders() {
+ return this.headers;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ if (in.readBoolean()) {
+ headers = in.readMap();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ if (headers == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeMap(headers);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java b/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java
new file mode 100644
index 0000000..2602a3e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ *
+ */
+public interface TransportResponseHandler<T extends TransportResponse> {
+
+ /**
+ * creates a new instance of the return type from the remote call.
+ * called by the infra before de-serializing the response.
+ *
+ * @return a new response copy.
+ */
+ T newInstance();
+
+ void handleResponse(T response);
+
+ void handleException(TransportException exp);
+
+ String executor();
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportResponseOptions.java b/src/main/java/org/elasticsearch/transport/TransportResponseOptions.java
new file mode 100644
index 0000000..32dbf52
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportResponseOptions.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ *
+ */
+public class TransportResponseOptions {
+
+ public static final TransportResponseOptions EMPTY = options();
+
+ public static TransportResponseOptions options() {
+ return new TransportResponseOptions();
+ }
+
+ private boolean compress;
+
+ public TransportResponseOptions withCompress(boolean compress) {
+ this.compress = compress;
+ return this;
+ }
+
+ public boolean compress() {
+ return this.compress;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportSerializationException.java b/src/main/java/org/elasticsearch/transport/TransportSerializationException.java
new file mode 100644
index 0000000..ee92fe6
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportSerializationException.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+/**
+ *
+ */
+public class TransportSerializationException extends TransportException {
+
+ public TransportSerializationException(String msg) {
+ super(msg);
+ }
+
+ public TransportSerializationException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportService.java b/src/main/java/org/elasticsearch/transport/TransportService.java
new file mode 100644
index 0000000..6cc2513
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportService.java
@@ -0,0 +1,431 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.metrics.MeanMetric;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+/**
+ *
+ */
+public class TransportService extends AbstractLifecycleComponent<TransportService> {
+
+ private final Transport transport;
+
+ private final ThreadPool threadPool;
+
+ volatile ImmutableMap<String, TransportRequestHandler> serverHandlers = ImmutableMap.of();
+ final Object serverHandlersMutex = new Object();
+
+ final ConcurrentMapLong<RequestHolder> clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
+
+ final AtomicLong requestIds = new AtomicLong();
+
+ final CopyOnWriteArrayList<TransportConnectionListener> connectionListeners = new CopyOnWriteArrayList<TransportConnectionListener>();
+
+ // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they
+ // do show up, we can print more descriptive information about them
+ final Map<Long, TimeoutInfoHolder> timeoutInfoHandlers = Collections.synchronizedMap(new LinkedHashMap<Long, TimeoutInfoHolder>(100, .75F, true) {
+ protected boolean removeEldestEntry(Map.Entry eldest) {
+ return size() > 100;
+ }
+ });
+
+ private boolean throwConnectException = false;
+ private final TransportService.Adapter adapter = new Adapter();
+
+ public TransportService(Transport transport, ThreadPool threadPool) {
+ this(EMPTY_SETTINGS, transport, threadPool);
+ }
+
+ @Inject
+ public TransportService(Settings settings, Transport transport, ThreadPool threadPool) {
+ super(settings);
+ this.transport = transport;
+ this.threadPool = threadPool;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ adapter.rxMetric.clear();
+ adapter.txMetric.clear();
+ transport.transportServiceAdapter(adapter);
+ transport.start();
+ if (transport.boundAddress() != null && logger.isInfoEnabled()) {
+ logger.info("{}", transport.boundAddress());
+ }
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ transport.stop();
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ transport.close();
+ }
+
+ public boolean addressSupported(Class<? extends TransportAddress> address) {
+ return transport.addressSupported(address);
+ }
+
+ public TransportInfo info() {
+ return new TransportInfo(boundAddress());
+ }
+
+ public TransportStats stats() {
+ return new TransportStats(transport.serverOpen(), adapter.rxMetric.count(), adapter.rxMetric.sum(), adapter.txMetric.count(), adapter.txMetric.sum());
+ }
+
+ public BoundTransportAddress boundAddress() {
+ return transport.boundAddress();
+ }
+
+ public boolean nodeConnected(DiscoveryNode node) {
+ return transport.nodeConnected(node);
+ }
+
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ transport.connectToNode(node);
+ }
+
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ transport.connectToNodeLight(node);
+ }
+
+ public void disconnectFromNode(DiscoveryNode node) {
+ transport.disconnectFromNode(node);
+ }
+
+ public void addConnectionListener(TransportConnectionListener listener) {
+ connectionListeners.add(listener);
+ }
+
+ public void removeConnectionListener(TransportConnectionListener listener) {
+ connectionListeners.remove(listener);
+ }
+
+ /**
+ * Set to <tt>true</tt> to indicate that a {@link ConnectTransportException} should be thrown when
+ * sending a message (otherwise, it will be passed to the response handler). Defaults to <tt>false</tt>.
+ * <p/>
+ * <p>This is useful when logic based on connect failure is needed without having to wrap the handler,
+ * for example, in case of retries across several nodes.
+ */
+ public void throwConnectException(boolean throwConnectException) {
+ this.throwConnectException = throwConnectException;
+ }
+
+ public <T extends TransportResponse> TransportFuture<T> submitRequest(DiscoveryNode node, String action, TransportRequest request,
+ TransportResponseHandler<T> handler) throws TransportException {
+ return submitRequest(node, action, request, TransportRequestOptions.EMPTY, handler);
+ }
+
+ public <T extends TransportResponse> TransportFuture<T> submitRequest(DiscoveryNode node, String action, TransportRequest request,
+ TransportRequestOptions options, TransportResponseHandler<T> handler) throws TransportException {
+ PlainTransportFuture<T> futureHandler = new PlainTransportFuture<T>(handler);
+ sendRequest(node, action, request, options, futureHandler);
+ return futureHandler;
+ }
+
+ public <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request,
+ final TransportResponseHandler<T> handler) throws TransportException {
+ sendRequest(node, action, request, TransportRequestOptions.EMPTY, handler);
+ }
+
+ public <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request,
+ final TransportRequestOptions options, TransportResponseHandler<T> handler) throws TransportException {
+ if (node == null) {
+ throw new ElasticsearchIllegalStateException("can't send request to a null node");
+ }
+ final long requestId = newRequestId();
+ TimeoutHandler timeoutHandler = null;
+ try {
+ if (options.timeout() != null) {
+ timeoutHandler = new TimeoutHandler(requestId);
+ timeoutHandler.future = threadPool.schedule(options.timeout(), ThreadPool.Names.GENERIC, timeoutHandler);
+ }
+ clientHandlers.put(requestId, new RequestHolder<T>(handler, node, action, timeoutHandler));
+ transport.sendRequest(node, requestId, action, request, options);
+ } catch (final Throwable e) {
+ // usually happen either because we failed to connect to the node
+ // or because we failed serializing the message
+ final RequestHolder holderToNotify = clientHandlers.remove(requestId);
+ if (timeoutHandler != null) {
+ timeoutHandler.future.cancel(false);
+ }
+
+ // If holderToNotify == null then handler has already been taken care of.
+ if (holderToNotify != null) {
+ // callback that an exception happened, but on a different thread since we don't
+ // want handlers to worry about stack overflows
+ final SendRequestTransportException sendRequestException = new SendRequestTransportException(node, action, e);
+ threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() {
+ @Override
+ public void run() {
+ holderToNotify.handler().handleException(sendRequestException);
+ }
+ });
+ }
+
+ if (throwConnectException) {
+ if (e instanceof ConnectTransportException) {
+ throw (ConnectTransportException) e;
+ }
+ }
+ }
+ }
+
+ private long newRequestId() {
+ return requestIds.getAndIncrement();
+ }
+
+ public TransportAddress[] addressesFromString(String address) throws Exception {
+ return transport.addressesFromString(address);
+ }
+
+ public void registerHandler(String action, TransportRequestHandler handler) {
+ synchronized (serverHandlersMutex) {
+ TransportRequestHandler handlerReplaced = serverHandlers.get(action);
+ serverHandlers = MapBuilder.newMapBuilder(serverHandlers).put(action, handler).immutableMap();
+ if (handlerReplaced != null) {
+ logger.warn("Registered two transport handlers for action {}, handlers: {}, {}", action, handler, handlerReplaced);
+ }
+ }
+ }
+
+ public void removeHandler(String action) {
+ synchronized (serverHandlersMutex) {
+ serverHandlers = MapBuilder.newMapBuilder(serverHandlers).remove(action).immutableMap();
+ }
+ }
+
+ class Adapter implements TransportServiceAdapter {
+
+ final MeanMetric rxMetric = new MeanMetric();
+ final MeanMetric txMetric = new MeanMetric();
+
+ @Override
+ public void received(long size) {
+ rxMetric.inc(size);
+ }
+
+ @Override
+ public void sent(long size) {
+ txMetric.inc(size);
+ }
+
+ @Override
+ public TransportRequestHandler handler(String action) {
+ return serverHandlers.get(action);
+ }
+
+ @Override
+ public TransportResponseHandler remove(long requestId) {
+ RequestHolder holder = clientHandlers.remove(requestId);
+ if (holder == null) {
+ // lets see if its in the timeout holder
+ TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId);
+ if (timeoutInfoHolder != null) {
+ long time = System.currentTimeMillis();
+ logger.warn("Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, action [{}], node [{}], id [{}]", time - timeoutInfoHolder.sentTime(), time - timeoutInfoHolder.timeoutTime(), timeoutInfoHolder.action(), timeoutInfoHolder.node(), requestId);
+ } else {
+ logger.warn("Transport response handler not found of id [{}]", requestId);
+ }
+ return null;
+ }
+ holder.cancel();
+ return holder.handler();
+ }
+
+ @Override
+ public void raiseNodeConnected(final DiscoveryNode node) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ for (TransportConnectionListener connectionListener : connectionListeners) {
+ connectionListener.onNodeConnected(node);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void raiseNodeDisconnected(final DiscoveryNode node) {
+ if (lifecycle.stoppedOrClosed()) {
+ return;
+ }
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ for (TransportConnectionListener connectionListener : connectionListeners) {
+ connectionListener.onNodeDisconnected(node);
+ }
+ // node got disconnected, raise disconnection on possible ongoing handlers
+ for (Map.Entry<Long, RequestHolder> entry : clientHandlers.entrySet()) {
+ RequestHolder holder = entry.getValue();
+ if (holder.node().equals(node)) {
+ final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey());
+ if (holderToNotify != null) {
+ // callback that an exception happened, but on a different thread since we don't
+ // want handlers to worry about stack overflows
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ holderToNotify.handler().handleException(new NodeDisconnectedException(node, holderToNotify.action()));
+ }
+ });
+ }
+ }
+ }
+ } catch (EsRejectedExecutionException ex) {
+ logger.debug("Rejected execution on NodeDisconnected", ex);
+ }
+ }
+ });
+ }
+ }
+
+ class TimeoutHandler implements Runnable {
+
+ private final long requestId;
+
+ private final long sentTime = System.currentTimeMillis();
+
+ ScheduledFuture future;
+
+ TimeoutHandler(long requestId) {
+ this.requestId = requestId;
+ }
+
+ public long sentTime() {
+ return sentTime;
+ }
+
+ @Override
+ public void run() {
+ if (future.isCancelled()) {
+ return;
+ }
+ final RequestHolder holder = clientHandlers.remove(requestId);
+ if (holder != null) {
+ // add it to the timeout information holder, in case we are going to get a response later
+ long timeoutTime = System.currentTimeMillis();
+ timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(holder.node(), holder.action(), sentTime, timeoutTime));
+ holder.handler().handleException(new ReceiveTimeoutTransportException(holder.node(), holder.action(), "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]"));
+ }
+ }
+ }
+
+
+ static class TimeoutInfoHolder {
+
+ private final DiscoveryNode node;
+
+ private final String action;
+
+ private final long sentTime;
+
+ private final long timeoutTime;
+
+ TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) {
+ this.node = node;
+ this.action = action;
+ this.sentTime = sentTime;
+ this.timeoutTime = timeoutTime;
+ }
+
+ public DiscoveryNode node() {
+ return node;
+ }
+
+ public String action() {
+ return action;
+ }
+
+ public long sentTime() {
+ return sentTime;
+ }
+
+ public long timeoutTime() {
+ return timeoutTime;
+ }
+ }
+
+ static class RequestHolder<T extends TransportResponse> {
+
+ private final TransportResponseHandler<T> handler;
+
+ private final DiscoveryNode node;
+
+ private final String action;
+
+ private final TimeoutHandler timeout;
+
+ RequestHolder(TransportResponseHandler<T> handler, DiscoveryNode node, String action, TimeoutHandler timeout) {
+ this.handler = handler;
+ this.node = node;
+ this.action = action;
+ this.timeout = timeout;
+ }
+
+ public TransportResponseHandler<T> handler() {
+ return handler;
+ }
+
+ public DiscoveryNode node() {
+ return this.node;
+ }
+
+ public String action() {
+ return this.action;
+ }
+
+ public void cancel() {
+ if (timeout != null) {
+ timeout.future.cancel(false);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java b/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java
new file mode 100644
index 0000000..a0ed558
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+
+/**
+ *
+ */
+public interface TransportServiceAdapter {
+
+ void received(long size);
+
+ void sent(long size);
+
+ TransportRequestHandler handler(String action);
+
+ TransportResponseHandler remove(long requestId);
+
+ void raiseNodeConnected(DiscoveryNode node);
+
+ void raiseNodeDisconnected(DiscoveryNode node);
+}
diff --git a/src/main/java/org/elasticsearch/transport/TransportStats.java b/src/main/java/org/elasticsearch/transport/TransportStats.java
new file mode 100644
index 0000000..36624b3
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/TransportStats.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilderString;
+
+import java.io.IOException;
+
+public class TransportStats implements Streamable, ToXContent {
+
+ private long serverOpen;
+ private long rxCount;
+ private long rxSize;
+ private long txCount;
+ private long txSize;
+
+ TransportStats() {
+
+ }
+
+ public TransportStats(long serverOpen, long rxCount, long rxSize, long txCount, long txSize) {
+ this.serverOpen = serverOpen;
+ this.rxCount = rxCount;
+ this.rxSize = rxSize;
+ this.txCount = txCount;
+ this.txSize = txSize;
+ }
+
+ public long serverOpen() {
+ return this.serverOpen;
+ }
+
+ public long getServerOpen() {
+ return serverOpen();
+ }
+
+ public long rxCount() {
+ return rxCount;
+ }
+
+ public long getRxCount() {
+ return rxCount();
+ }
+
+ public ByteSizeValue rxSize() {
+ return new ByteSizeValue(rxSize);
+ }
+
+ public ByteSizeValue getRxSize() {
+ return rxSize();
+ }
+
+ public long txCount() {
+ return txCount;
+ }
+
+ public long getTxCount() {
+ return txCount();
+ }
+
+ public ByteSizeValue txSize() {
+ return new ByteSizeValue(txSize);
+ }
+
+ public ByteSizeValue getTxSize() {
+ return txSize();
+ }
+
+ public static TransportStats readTransportStats(StreamInput in) throws IOException {
+ TransportStats stats = new TransportStats();
+ stats.readFrom(in);
+ return stats;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ serverOpen = in.readVLong();
+ rxCount = in.readVLong();
+ rxSize = in.readVLong();
+ txCount = in.readVLong();
+ txSize = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(serverOpen);
+ out.writeVLong(rxCount);
+ out.writeVLong(rxSize);
+ out.writeVLong(txCount);
+ out.writeVLong(txSize);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(Fields.TRANSPORT);
+ builder.field(Fields.SERVER_OPEN, serverOpen);
+ builder.field(Fields.RX_COUNT, rxCount);
+ builder.byteSizeField(Fields.RX_SIZE_IN_BYTES, Fields.RX_SIZE, rxSize);
+ builder.field(Fields.TX_COUNT, txCount);
+ builder.byteSizeField(Fields.TX_SIZE_IN_BYTES, Fields.TX_SIZE, txSize);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final XContentBuilderString TRANSPORT = new XContentBuilderString("transport");
+ static final XContentBuilderString SERVER_OPEN = new XContentBuilderString("server_open");
+ static final XContentBuilderString RX_COUNT = new XContentBuilderString("rx_count");
+ static final XContentBuilderString RX_SIZE = new XContentBuilderString("rx_size");
+ static final XContentBuilderString RX_SIZE_IN_BYTES = new XContentBuilderString("rx_size_in_bytes");
+ static final XContentBuilderString TX_COUNT = new XContentBuilderString("tx_count");
+ static final XContentBuilderString TX_SIZE = new XContentBuilderString("tx_size");
+ static final XContentBuilderString TX_SIZE_IN_BYTES = new XContentBuilderString("tx_size_in_bytes");
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/local/LocalTransport.java b/src/main/java/org/elasticsearch/transport/local/LocalTransport.java
new file mode 100644
index 0000000..ffa1ecb
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/local/LocalTransport.java
@@ -0,0 +1,324 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.local;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.ThrowableObjectInputStream;
+import org.elasticsearch.common.io.stream.*;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.support.TransportStatus;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
+
+/**
+ *
+ */
+public class LocalTransport extends AbstractLifecycleComponent<Transport> implements Transport {
+
+ private final ThreadPool threadPool;
+ private final Version version;
+ private volatile TransportServiceAdapter transportServiceAdapter;
+ private volatile BoundTransportAddress boundAddress;
+ private volatile LocalTransportAddress localAddress;
+ private final static ConcurrentMap<TransportAddress, LocalTransport> transports = newConcurrentMap();
+ private static final AtomicLong transportAddressIdGenerator = new AtomicLong();
+ private final ConcurrentMap<DiscoveryNode, LocalTransport> connectedNodes = newConcurrentMap();
+
+ @Inject
+ public LocalTransport(Settings settings, ThreadPool threadPool, Version version) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.version = version;
+ }
+
+ @Override
+ public TransportAddress[] addressesFromString(String address) {
+ return new TransportAddress[]{new LocalTransportAddress(address)};
+ }
+
+ @Override
+ public boolean addressSupported(Class<? extends TransportAddress> address) {
+ return LocalTransportAddress.class.equals(address);
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ localAddress = new LocalTransportAddress(Long.toString(transportAddressIdGenerator.incrementAndGet()));
+ transports.put(localAddress, this);
+ boundAddress = new BoundTransportAddress(localAddress, localAddress);
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ transports.remove(localAddress);
+ // now, go over all the transports connected to me, and raise disconnected event
+ for (final LocalTransport targetTransport : transports.values()) {
+ for (final Map.Entry<DiscoveryNode, LocalTransport> entry : targetTransport.connectedNodes.entrySet()) {
+ if (entry.getValue() == this) {
+ targetTransport.disconnectFromNode(entry.getKey());
+ }
+ }
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ @Override
+ public void transportServiceAdapter(TransportServiceAdapter transportServiceAdapter) {
+ this.transportServiceAdapter = transportServiceAdapter;
+ }
+
+ @Override
+ public BoundTransportAddress boundAddress() {
+ return boundAddress;
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ return connectedNodes.containsKey(node);
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ connectToNode(node);
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ synchronized (this) {
+ if (connectedNodes.containsKey(node)) {
+ return;
+ }
+ final LocalTransport targetTransport = transports.get(node.address());
+ if (targetTransport == null) {
+ throw new ConnectTransportException(node, "Failed to connect");
+ }
+ connectedNodes.put(node, targetTransport);
+ transportServiceAdapter.raiseNodeConnected(node);
+ }
+ }
+
+ @Override
+ public void disconnectFromNode(DiscoveryNode node) {
+ synchronized (this) {
+ LocalTransport removed = connectedNodes.remove(node);
+ if (removed != null) {
+ transportServiceAdapter.raiseNodeDisconnected(node);
+ }
+ }
+ }
+
+ @Override
+ public long serverOpen() {
+ return 0;
+ }
+
+ @Override
+ public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ final Version version = Version.smallest(node.version(), this.version);
+
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput stream = new HandlesStreamOutput(bStream);
+ stream.setVersion(version);
+
+ stream.writeLong(requestId);
+ byte status = 0;
+ status = TransportStatus.setRequest(status);
+ stream.writeByte(status); // 0 for request, 1 for response.
+
+ stream.writeString(action);
+ request.writeTo(stream);
+
+ stream.close();
+
+ final LocalTransport targetTransport = connectedNodes.get(node);
+ if (targetTransport == null) {
+ throw new NodeNotConnectedException(node, "Node not connected");
+ }
+
+ final byte[] data = bStream.bytes().toBytes();
+
+ transportServiceAdapter.sent(data.length);
+
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ targetTransport.messageReceived(data, action, LocalTransport.this, version, requestId);
+ }
+ });
+ }
+
+ ThreadPool threadPool() {
+ return this.threadPool;
+ }
+
+ protected void messageReceived(byte[] data, String action, LocalTransport sourceTransport, Version version, @Nullable final Long sendRequestId) {
+ try {
+ transportServiceAdapter.received(data.length);
+ StreamInput stream = new BytesStreamInput(data, false);
+ stream = CachedStreamInput.cachedHandles(stream);
+ stream.setVersion(version);
+
+ long requestId = stream.readLong();
+ byte status = stream.readByte();
+ boolean isRequest = TransportStatus.isRequest(status);
+
+ if (isRequest) {
+ handleRequest(stream, requestId, sourceTransport, version);
+ } else {
+ final TransportResponseHandler handler = transportServiceAdapter.remove(requestId);
+ // ignore if its null, the adapter logs it
+ if (handler != null) {
+ if (TransportStatus.isError(status)) {
+ handlerResponseError(stream, handler);
+ } else {
+ handleResponse(stream, handler);
+ }
+ }
+ }
+ } catch (Throwable e) {
+ if (sendRequestId != null) {
+ TransportResponseHandler handler = transportServiceAdapter.remove(sendRequestId);
+ if (handler != null) {
+ handleException(handler, new RemoteTransportException(nodeName(), localAddress, action, e));
+ }
+ } else {
+ logger.warn("Failed to receive message for action [" + action + "]", e);
+ }
+ }
+ }
+
+ private void handleRequest(StreamInput stream, long requestId, LocalTransport sourceTransport, Version version) throws Exception {
+ final String action = stream.readString();
+ final LocalTransportChannel transportChannel = new LocalTransportChannel(this, sourceTransport, action, requestId, version);
+ try {
+ final TransportRequestHandler handler = transportServiceAdapter.handler(action);
+ if (handler == null) {
+ throw new ActionNotFoundTransportException("Action [" + action + "] not found");
+ }
+ final TransportRequest request = handler.newInstance();
+ request.readFrom(stream);
+ if (handler.executor() == ThreadPool.Names.SAME) {
+ //noinspection unchecked
+ handler.messageReceived(request, transportChannel);
+ } else {
+ threadPool.executor(handler.executor()).execute(new AbstractRunnable() {
+ @Override
+ public void run() {
+ try {
+ //noinspection unchecked
+ handler.messageReceived(request, transportChannel);
+ } catch (Throwable e) {
+ if (lifecycleState() == Lifecycle.State.STARTED) {
+ // we can only send a response transport is started....
+ try {
+ transportChannel.sendResponse(e);
+ } catch (Throwable e1) {
+ logger.warn("Failed to send error message back to client for action [" + action + "]", e1);
+ logger.warn("Actual Exception", e);
+ }
+ }
+ }
+ }
+
+ @Override
+ public boolean isForceExecution() {
+ return handler.isForceExecution();
+ }
+ });
+ }
+ } catch (Throwable e) {
+ try {
+ transportChannel.sendResponse(e);
+ } catch (Throwable e1) {
+ logger.warn("Failed to send error message back to client for action [" + action + "]", e);
+ logger.warn("Actual Exception", e1);
+ }
+ }
+ }
+
+
+ protected void handleResponse(StreamInput buffer, final TransportResponseHandler handler) {
+ final TransportResponse response = handler.newInstance();
+ try {
+ response.readFrom(buffer);
+ } catch (Throwable e) {
+ handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
+ return;
+ }
+ handleParsedRespone(response, handler);
+ }
+
+ protected void handleParsedRespone(final TransportResponse response, final TransportResponseHandler handler) {
+ threadPool.executor(handler.executor()).execute(new Runnable() {
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public void run() {
+ try {
+ handler.handleResponse(response);
+ } catch (Throwable e) {
+ handleException(handler, new ResponseHandlerFailureTransportException(e));
+ }
+ }
+ });
+ }
+
+ private void handlerResponseError(StreamInput buffer, final TransportResponseHandler handler) {
+ Throwable error;
+ try {
+ ThrowableObjectInputStream ois = new ThrowableObjectInputStream(buffer, settings.getClassLoader());
+ error = (Throwable) ois.readObject();
+ } catch (Throwable e) {
+ error = new TransportSerializationException("Failed to deserialize exception response from stream", e);
+ }
+ handleException(handler, error);
+ }
+
+ private void handleException(final TransportResponseHandler handler, Throwable error) {
+ if (!(error instanceof RemoteTransportException)) {
+ error = new RemoteTransportException("None remote transport exception", error);
+ }
+ final RemoteTransportException rtx = (RemoteTransportException) error;
+ try {
+ handler.handleException(rtx);
+ } catch (Throwable t) {
+ logger.error("failed to handle exception response [{}]", t, handler);
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java b/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java
new file mode 100644
index 0000000..f4d5e83
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.local;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.ThrowableObjectOutputStream;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.HandlesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.support.TransportStatus;
+
+import java.io.IOException;
+import java.io.NotSerializableException;
+
+/**
+ *
+ */
+public class LocalTransportChannel implements TransportChannel {
+
+ private final LocalTransport sourceTransport;
+ // the transport we will *send to*
+ private final LocalTransport targetTransport;
+ private final String action;
+ private final long requestId;
+ private final Version version;
+
+ public LocalTransportChannel(LocalTransport sourceTransport, LocalTransport targetTransport, String action, long requestId, Version version) {
+ this.sourceTransport = sourceTransport;
+ this.targetTransport = targetTransport;
+ this.action = action;
+ this.requestId = requestId;
+ this.version = version;
+ }
+
+ @Override
+ public String action() {
+ return action;
+ }
+
+ @Override
+ public void sendResponse(TransportResponse response) throws IOException {
+ sendResponse(response, TransportResponseOptions.EMPTY);
+ }
+
+ @Override
+ public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ StreamOutput stream = new HandlesStreamOutput(bStream);
+ stream.setVersion(version);
+ stream.writeLong(requestId);
+ byte status = 0;
+ status = TransportStatus.setResponse(status);
+ stream.writeByte(status); // 0 for request, 1 for response.
+ response.writeTo(stream);
+ stream.close();
+ final byte[] data = bStream.bytes().toBytes();
+ targetTransport.threadPool().generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ targetTransport.messageReceived(data, action, sourceTransport, version, null);
+ }
+ });
+ }
+
+ @Override
+ public void sendResponse(Throwable error) throws IOException {
+ BytesStreamOutput stream = new BytesStreamOutput();
+ try {
+ writeResponseExceptionHeader(stream);
+ RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), targetTransport.boundAddress().boundAddress(), action, error);
+ ThrowableObjectOutputStream too = new ThrowableObjectOutputStream(stream);
+ too.writeObject(tx);
+ too.close();
+ } catch (NotSerializableException e) {
+ stream.reset();
+ writeResponseExceptionHeader(stream);
+ RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), targetTransport.boundAddress().boundAddress(), action, new NotSerializableTransportException(error));
+ ThrowableObjectOutputStream too = new ThrowableObjectOutputStream(stream);
+ too.writeObject(tx);
+ too.close();
+ }
+ final byte[] data = stream.bytes().toBytes();
+ targetTransport.threadPool().generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ targetTransport.messageReceived(data, action, sourceTransport, version, null);
+ }
+ });
+ }
+
+ private void writeResponseExceptionHeader(BytesStreamOutput stream) throws IOException {
+ stream.writeLong(requestId);
+ byte status = 0;
+ status = TransportStatus.setResponse(status);
+ status = TransportStatus.setError(status);
+ stream.writeByte(status);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/local/LocalTransportModule.java b/src/main/java/org/elasticsearch/transport/local/LocalTransportModule.java
new file mode 100644
index 0000000..ea0a44b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/local/LocalTransportModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.local;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.Transport;
+
+/**
+ *
+ */
+public class LocalTransportModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public LocalTransportModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(LocalTransport.class).asEagerSingleton();
+ bind(Transport.class).to(LocalTransport.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java b/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java
new file mode 100644
index 0000000..ce0eea8
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.bytes.ChannelBufferBytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import java.io.EOFException;
+import java.io.IOException;
+
+/**
+ * A Netty {@link org.jboss.netty.buffer.ChannelBuffer} based {@link org.elasticsearch.common.io.stream.StreamInput}.
+ */
+public class ChannelBufferStreamInput extends StreamInput {
+
+ private final ChannelBuffer buffer;
+ private final int startIndex;
+ private final int endIndex;
+
+ public ChannelBufferStreamInput(ChannelBuffer buffer) {
+ this(buffer, buffer.readableBytes());
+ }
+
+ public ChannelBufferStreamInput(ChannelBuffer buffer, int length) {
+ if (length > buffer.readableBytes()) {
+ throw new IndexOutOfBoundsException();
+ }
+ this.buffer = buffer;
+ startIndex = buffer.readerIndex();
+ endIndex = startIndex + length;
+ buffer.markReaderIndex();
+ }
+
+ @Override
+ public BytesReference readBytesReference(int length) throws IOException {
+ ChannelBufferBytesReference ref = new ChannelBufferBytesReference(buffer.slice(buffer.readerIndex(), length));
+ buffer.skipBytes(length);
+ return ref;
+ }
+
+ @Override
+ public BytesRef readBytesRef(int length) throws IOException {
+ if (!buffer.hasArray()) {
+ return super.readBytesRef(length);
+ }
+ BytesRef bytesRef = new BytesRef(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), length);
+ buffer.skipBytes(length);
+ return bytesRef;
+ }
+
+ @Override
+ public int available() throws IOException {
+ return endIndex - buffer.readerIndex();
+ }
+
+ @Override
+ public void mark(int readlimit) {
+ buffer.markReaderIndex();
+ }
+
+ @Override
+ public boolean markSupported() {
+ return true;
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (available() == 0) {
+ return -1;
+ }
+ return buffer.readByte() & 0xff;
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if (len == 0) {
+ return 0;
+ }
+ int available = available();
+ if (available == 0) {
+ return -1;
+ }
+
+ len = Math.min(available, len);
+ buffer.readBytes(b, off, len);
+ return len;
+ }
+
+ @Override
+ public void reset() throws IOException {
+ buffer.resetReaderIndex();
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ if (n > Integer.MAX_VALUE) {
+ return skipBytes(Integer.MAX_VALUE);
+ } else {
+ return skipBytes((int) n);
+ }
+ }
+
+ public int skipBytes(int n) throws IOException {
+ int nBytes = Math.min(available(), n);
+ buffer.skipBytes(nBytes);
+ return nBytes;
+ }
+
+
+ @Override
+ public byte readByte() throws IOException {
+ return buffer.readByte();
+ }
+
+ @Override
+ public void readBytes(byte[] b, int offset, int len) throws IOException {
+ int read = read(b, offset, len);
+ if (read < len) {
+ throw new EOFException();
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ // nothing to do here
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInputFactory.java b/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInputFactory.java
new file mode 100644
index 0000000..554f710
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInputFactory.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.jboss.netty.buffer.ChannelBuffer;
+
+/**
+ */
+public class ChannelBufferStreamInputFactory {
+
+ public static StreamInput create(ChannelBuffer buffer) {
+ return new ChannelBufferStreamInput(buffer, buffer.readableBytes());
+ }
+
+ public static StreamInput create(ChannelBuffer buffer, int size) {
+ return new ChannelBufferStreamInput(buffer, size);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java
new file mode 100644
index 0000000..70c4d75
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java
@@ -0,0 +1,289 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.compress.Compressor;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.ThrowableObjectInputStream;
+import org.elasticsearch.common.io.stream.CachedStreamInput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.support.TransportStatus;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.*;
+
+import java.io.IOException;
+
+/**
+ * A handler (must be the last one!) that does size based frame decoding and forwards the actual message
+ * to the relevant action.
+ */
+public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
+
+ private final ESLogger logger;
+ private final ThreadPool threadPool;
+ private final TransportServiceAdapter transportServiceAdapter;
+ private final NettyTransport transport;
+
+ public MessageChannelHandler(NettyTransport transport, ESLogger logger) {
+ this.threadPool = transport.threadPool();
+ this.transportServiceAdapter = transport.transportServiceAdapter();
+ this.transport = transport;
+ this.logger = logger;
+ }
+
+ @Override
+ public void writeComplete(ChannelHandlerContext ctx, WriteCompletionEvent e) throws Exception {
+ transportServiceAdapter.sent(e.getWrittenAmount());
+ super.writeComplete(ctx, e);
+ }
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
+ Object m = e.getMessage();
+ if (!(m instanceof ChannelBuffer)) {
+ ctx.sendUpstream(e);
+ return;
+ }
+ ChannelBuffer buffer = (ChannelBuffer) m;
+ int size = buffer.getInt(buffer.readerIndex() - 4);
+ transportServiceAdapter.received(size + 6);
+
+ // we have additional bytes to read, outside of the header
+ boolean hasMessageBytesToRead = (size - (NettyHeader.HEADER_SIZE - 6)) != 0;
+
+ int markedReaderIndex = buffer.readerIndex();
+ int expectedIndexReader = markedReaderIndex + size;
+
+ // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh
+ // buffer, or in the cumlation buffer, which is cleaned each time
+ StreamInput streamIn = ChannelBufferStreamInputFactory.create(buffer, size);
+
+ long requestId = buffer.readLong();
+ byte status = buffer.readByte();
+ Version version = Version.fromId(buffer.readInt());
+
+ StreamInput wrappedStream;
+ if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) {
+ Compressor compressor = CompressorFactory.compressor(buffer);
+ if (compressor == null) {
+ int maxToRead = Math.min(buffer.readableBytes(), 10);
+ int offset = buffer.readerIndex();
+ StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(buffer.readableBytes()).append("] readable bytes with message size [").append(size).append("] ").append("] are [");
+ for (int i = 0; i < maxToRead; i++) {
+ sb.append(buffer.getByte(offset + i)).append(",");
+ }
+ sb.append("]");
+ throw new ElasticsearchIllegalStateException(sb.toString());
+ }
+ wrappedStream = CachedStreamInput.cachedHandlesCompressed(compressor, streamIn);
+ } else {
+ wrappedStream = CachedStreamInput.cachedHandles(streamIn);
+ }
+ wrappedStream.setVersion(version);
+
+ if (TransportStatus.isRequest(status)) {
+ String action = handleRequest(ctx.getChannel(), wrappedStream, requestId, version);
+ if (buffer.readerIndex() != expectedIndexReader) {
+ if (buffer.readerIndex() < expectedIndexReader) {
+ logger.warn("Message not fully read (request) for [{}] and action [{}], resetting", requestId, action);
+ } else {
+ logger.warn("Message read past expected size (request) for [{}] and action [{}], resetting", requestId, action);
+ }
+ buffer.readerIndex(expectedIndexReader);
+ }
+ } else {
+ TransportResponseHandler handler = transportServiceAdapter.remove(requestId);
+ // ignore if its null, the adapter logs it
+ if (handler != null) {
+ if (TransportStatus.isError(status)) {
+ handlerResponseError(wrappedStream, handler);
+ } else {
+ handleResponse(wrappedStream, handler);
+ }
+ } else {
+ // if its null, skip those bytes
+ buffer.readerIndex(markedReaderIndex + size);
+ }
+ if (buffer.readerIndex() != expectedIndexReader) {
+ if (buffer.readerIndex() < expectedIndexReader) {
+ logger.warn("Message not fully read (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status));
+ } else {
+ logger.warn("Message read past expected size (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status));
+ }
+ buffer.readerIndex(expectedIndexReader);
+ }
+ }
+ wrappedStream.close();
+ }
+
+ private void handleResponse(StreamInput buffer, final TransportResponseHandler handler) {
+ final TransportResponse response = handler.newInstance();
+ try {
+ response.readFrom(buffer);
+ } catch (Throwable e) {
+ handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
+ return;
+ }
+ try {
+ if (handler.executor() == ThreadPool.Names.SAME) {
+ //noinspection unchecked
+ handler.handleResponse(response);
+ } else {
+ threadPool.executor(handler.executor()).execute(new ResponseHandler(handler, response));
+ }
+ } catch (Throwable e) {
+ handleException(handler, new ResponseHandlerFailureTransportException(e));
+ }
+ }
+
+ private void handlerResponseError(StreamInput buffer, final TransportResponseHandler handler) {
+ Throwable error;
+ try {
+ ThrowableObjectInputStream ois = new ThrowableObjectInputStream(buffer, transport.settings().getClassLoader());
+ error = (Throwable) ois.readObject();
+ } catch (Throwable e) {
+ error = new TransportSerializationException("Failed to deserialize exception response from stream", e);
+ }
+ handleException(handler, error);
+ }
+
+ private void handleException(final TransportResponseHandler handler, Throwable error) {
+ if (!(error instanceof RemoteTransportException)) {
+ error = new RemoteTransportException(error.getMessage(), error);
+ }
+ final RemoteTransportException rtx = (RemoteTransportException) error;
+ if (handler.executor() == ThreadPool.Names.SAME) {
+ try {
+ handler.handleException(rtx);
+ } catch (Throwable e) {
+ logger.error("failed to handle exception response [{}]", e, handler);
+ }
+ } else {
+ threadPool.executor(handler.executor()).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ handler.handleException(rtx);
+ } catch (Throwable e) {
+ logger.error("failed to handle exception response [{}]", e, handler);
+ }
+ }
+ });
+ }
+ }
+
+ private String handleRequest(Channel channel, StreamInput buffer, long requestId, Version version) throws IOException {
+ final String action = buffer.readString();
+
+ final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, action, channel, requestId, version);
+ try {
+ final TransportRequestHandler handler = transportServiceAdapter.handler(action);
+ if (handler == null) {
+ throw new ActionNotFoundTransportException(action);
+ }
+ final TransportRequest request = handler.newInstance();
+ request.readFrom(buffer);
+ if (handler.executor() == ThreadPool.Names.SAME) {
+ //noinspection unchecked
+ handler.messageReceived(request, transportChannel);
+ } else {
+ threadPool.executor(handler.executor()).execute(new RequestHandler(handler, request, transportChannel, action));
+ }
+ } catch (Throwable e) {
+ try {
+ transportChannel.sendResponse(e);
+ } catch (IOException e1) {
+ logger.warn("Failed to send error message back to client for action [" + action + "]", e);
+ logger.warn("Actual Exception", e1);
+ }
+ }
+ return action;
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
+ transport.exceptionCaught(ctx, e);
+ }
+
+ class ResponseHandler implements Runnable {
+
+ private final TransportResponseHandler handler;
+ private final TransportResponse response;
+
+ public ResponseHandler(TransportResponseHandler handler, TransportResponse response) {
+ this.handler = handler;
+ this.response = response;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public void run() {
+ try {
+ handler.handleResponse(response);
+ } catch (Throwable e) {
+ handleException(handler, new ResponseHandlerFailureTransportException(e));
+ }
+ }
+ }
+
+ class RequestHandler extends AbstractRunnable {
+ private final TransportRequestHandler handler;
+ private final TransportRequest request;
+ private final NettyTransportChannel transportChannel;
+ private final String action;
+
+ public RequestHandler(TransportRequestHandler handler, TransportRequest request, NettyTransportChannel transportChannel, String action) {
+ this.handler = handler;
+ this.request = request;
+ this.transportChannel = transportChannel;
+ this.action = action;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public void run() {
+ try {
+ handler.messageReceived(request, transportChannel);
+ } catch (Throwable e) {
+ if (transport.lifecycleState() == Lifecycle.State.STARTED) {
+ // we can only send a response transport is started....
+ try {
+ transportChannel.sendResponse(e);
+ } catch (Throwable e1) {
+ logger.warn("Failed to send error message back to client for action [" + action + "]", e1);
+ logger.warn("Actual Exception", e);
+ }
+ }
+ }
+ }
+
+ @Override
+ public boolean isForceExecution() {
+ return handler.isForceExecution();
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/netty/NettyHeader.java b/src/main/java/org/elasticsearch/transport/netty/NettyHeader.java
new file mode 100644
index 0000000..aa06dc1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/NettyHeader.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.Version;
+import org.jboss.netty.buffer.ChannelBuffer;
+
+/**
+ */
+public class NettyHeader {
+
+ public static final int HEADER_SIZE = 2 + 4 + 8 + 1 + 4;
+
+ public static void writeHeader(ChannelBuffer buffer, long requestId, byte status, Version version) {
+ int index = buffer.readerIndex();
+ buffer.setByte(index, 'E');
+ index += 1;
+ buffer.setByte(index, 'S');
+ index += 1;
+ // write the size, the size indicates the remaining message size, not including the size int
+ buffer.setInt(index, buffer.readableBytes() - 6);
+ index += 4;
+ buffer.setLong(index, requestId);
+ index += 8;
+ buffer.setByte(index, status);
+ index += 1;
+ buffer.setInt(index, version.id);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java b/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java
new file mode 100644
index 0000000..ed92aa2
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.jboss.netty.logging.AbstractInternalLogger;
+
+/**
+ *
+ */
+public class NettyInternalESLogger extends AbstractInternalLogger {
+
+ private final ESLogger logger;
+
+ public NettyInternalESLogger(ESLogger logger) {
+ this.logger = logger;
+ }
+
+ @Override
+ public boolean isDebugEnabled() {
+ return logger.isDebugEnabled();
+ }
+
+ @Override
+ public boolean isInfoEnabled() {
+ return logger.isInfoEnabled();
+ }
+
+ @Override
+ public boolean isWarnEnabled() {
+ return logger.isWarnEnabled();
+ }
+
+ @Override
+ public boolean isErrorEnabled() {
+ return logger.isErrorEnabled();
+ }
+
+ @Override
+ public void debug(String msg) {
+ logger.debug(msg);
+ }
+
+ @Override
+ public void debug(String msg, Throwable cause) {
+ logger.debug(msg, cause);
+ }
+
+ @Override
+ public void info(String msg) {
+ logger.info(msg);
+ }
+
+ @Override
+ public void info(String msg, Throwable cause) {
+ logger.info(msg, cause);
+ }
+
+ @Override
+ public void warn(String msg) {
+ logger.warn(msg);
+ }
+
+ @Override
+ public void warn(String msg, Throwable cause) {
+ logger.warn(msg, cause);
+ }
+
+ @Override
+ public void error(String msg) {
+ logger.error(msg);
+ }
+
+ @Override
+ public void error(String msg, Throwable cause) {
+ logger.error(msg, cause);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLoggerFactory.java b/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLoggerFactory.java
new file mode 100644
index 0000000..70c5e65
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLoggerFactory.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.common.logging.Loggers;
+import org.jboss.netty.logging.InternalLogger;
+import org.jboss.netty.logging.InternalLoggerFactory;
+
+/**
+ *
+ */
+public class NettyInternalESLoggerFactory extends InternalLoggerFactory {
+
+ @Override
+ public InternalLogger newInstance(String name) {
+ return new NettyInternalESLogger(Loggers.getLogger(name));
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java
new file mode 100644
index 0000000..b689da9
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java
@@ -0,0 +1,952 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.HandlesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.netty.NettyStaticSetup;
+import org.elasticsearch.common.netty.OpenChannelsHandler;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.PortsRange;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.util.concurrent.KeyedLock;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.support.TransportStatus;
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.CompositeChannelBuffer;
+import org.jboss.netty.channel.*;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.jboss.netty.channel.socket.nio.NioWorkerPool;
+import org.jboss.netty.channel.socket.oio.OioClientSocketChannelFactory;
+import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory;
+import org.jboss.netty.util.HashedWheelTimer;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.nio.channels.CancelledKeyException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static org.elasticsearch.common.network.NetworkService.TcpSettings.*;
+import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException;
+import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException;
+import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
+import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
+
+/**
+ * There are 4 types of connections per node, low/med/high/ping. Low if for batch oriented APIs (like recovery or
+ * batch) with high payload that will cause regular request. (like search or single index) to take
+ * longer. Med is for the typical search / single doc index. And High for things like cluster state. Ping is reserved for
+ * sending out ping requests to other nodes.
+ */
+public class NettyTransport extends AbstractLifecycleComponent<Transport> implements Transport {
+
+ static {
+ NettyStaticSetup.setup();
+ }
+
+ private final NetworkService networkService;
+ final Version version;
+
+ final int workerCount;
+ final int bossCount;
+
+ final boolean blockingServer;
+
+ final boolean blockingClient;
+
+ final String port;
+
+ final String bindHost;
+
+ final String publishHost;
+
+ final int publishPort;
+
+ final boolean compress;
+
+ final TimeValue connectTimeout;
+
+ final Boolean tcpNoDelay;
+
+ final Boolean tcpKeepAlive;
+
+ final Boolean reuseAddress;
+
+ final ByteSizeValue tcpSendBufferSize;
+ final ByteSizeValue tcpReceiveBufferSize;
+ final ReceiveBufferSizePredictorFactory receiveBufferSizePredictorFactory;
+
+ final int connectionsPerNodeRecovery;
+ final int connectionsPerNodeBulk;
+ final int connectionsPerNodeReg;
+ final int connectionsPerNodeState;
+ final int connectionsPerNodePing;
+
+ final ByteSizeValue maxCumulationBufferCapacity;
+ final int maxCompositeBufferComponents;
+
+ private final ThreadPool threadPool;
+
+ private volatile OpenChannelsHandler serverOpenChannels;
+
+ private volatile ClientBootstrap clientBootstrap;
+
+ private volatile ServerBootstrap serverBootstrap;
+
+ // node id to actual channel
+ final ConcurrentMap<DiscoveryNode, NodeChannels> connectedNodes = newConcurrentMap();
+
+
+ private volatile Channel serverChannel;
+
+ private volatile TransportServiceAdapter transportServiceAdapter;
+
+ private volatile BoundTransportAddress boundAddress;
+
+ private final KeyedLock<String> connectionLock = new KeyedLock<String>();
+
+ // this lock is here to make sure we close this transport and disconnect all the client nodes
+ // connections while no connect operations is going on... (this might help with 100% CPU when stopping the transport?)
+ private final ReadWriteLock globalLock = new ReentrantReadWriteLock();
+
+ @Inject
+ public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, Version version) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.networkService = networkService;
+ this.version = version;
+
+ if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
+ System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
+ }
+
+ this.workerCount = componentSettings.getAsInt("worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
+ this.bossCount = componentSettings.getAsInt("boss_count", 1);
+ this.blockingServer = settings.getAsBoolean("transport.tcp.blocking_server", settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));
+ this.blockingClient = settings.getAsBoolean("transport.tcp.blocking_client", settings.getAsBoolean(TCP_BLOCKING_CLIENT, settings.getAsBoolean(TCP_BLOCKING, false)));
+ this.port = componentSettings.get("port", settings.get("transport.tcp.port", "9300-9400"));
+ this.bindHost = componentSettings.get("bind_host", settings.get("transport.bind_host", settings.get("transport.host")));
+ this.publishHost = componentSettings.get("publish_host", settings.get("transport.publish_host", settings.get("transport.host")));
+ this.publishPort = componentSettings.getAsInt("publish_port", settings.getAsInt("transport.publish_port", 0));
+ this.compress = settings.getAsBoolean(TransportSettings.TRANSPORT_TCP_COMPRESS, false);
+ this.connectTimeout = componentSettings.getAsTime("connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", settings.getAsTime(TCP_CONNECT_TIMEOUT, TCP_DEFAULT_CONNECT_TIMEOUT)));
+ this.tcpNoDelay = componentSettings.getAsBoolean("tcp_no_delay", settings.getAsBoolean(TCP_NO_DELAY, true));
+ this.tcpKeepAlive = componentSettings.getAsBoolean("tcp_keep_alive", settings.getAsBoolean(TCP_KEEP_ALIVE, true));
+ this.reuseAddress = componentSettings.getAsBoolean("reuse_address", settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
+ this.tcpSendBufferSize = componentSettings.getAsBytesSize("tcp_send_buffer_size", settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
+ this.tcpReceiveBufferSize = componentSettings.getAsBytesSize("tcp_receive_buffer_size", settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
+ this.connectionsPerNodeRecovery = componentSettings.getAsInt("connections_per_node.recovery", settings.getAsInt("transport.connections_per_node.recovery", 2));
+ this.connectionsPerNodeBulk = componentSettings.getAsInt("connections_per_node.bulk", settings.getAsInt("transport.connections_per_node.bulk", 3));
+ this.connectionsPerNodeReg = componentSettings.getAsInt("connections_per_node.reg", settings.getAsInt("transport.connections_per_node.reg", 6));
+ this.connectionsPerNodeState = componentSettings.getAsInt("connections_per_node.high", settings.getAsInt("transport.connections_per_node.state", 1));
+ this.connectionsPerNodePing = componentSettings.getAsInt("connections_per_node.ping", settings.getAsInt("transport.connections_per_node.ping", 1));
+
+ // we want to have at least 1 for reg/state/ping
+ if (this.connectionsPerNodeReg == 0) {
+ throw new ElasticsearchIllegalArgumentException("can't set [connection_per_node.reg] to 0");
+ }
+ if (this.connectionsPerNodePing == 0) {
+ throw new ElasticsearchIllegalArgumentException("can't set [connection_per_node.ping] to 0");
+ }
+ if (this.connectionsPerNodeState == 0) {
+ throw new ElasticsearchIllegalArgumentException("can't set [connection_per_node.state] to 0");
+ }
+
+ this.maxCumulationBufferCapacity = componentSettings.getAsBytesSize("max_cumulation_buffer_capacity", null);
+ this.maxCompositeBufferComponents = componentSettings.getAsInt("max_composite_buffer_components", -1);
+
+ long defaultReceiverPredictor = 512 * 1024;
+ if (JvmInfo.jvmInfo().mem().directMemoryMax().bytes() > 0) {
+ // we can guess a better default...
+ long l = (long) ((0.3 * JvmInfo.jvmInfo().mem().directMemoryMax().bytes()) / workerCount);
+ defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
+ }
+
+ // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
+ ByteSizeValue receivePredictorMin = componentSettings.getAsBytesSize("receive_predictor_min", componentSettings.getAsBytesSize("receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
+ ByteSizeValue receivePredictorMax = componentSettings.getAsBytesSize("receive_predictor_max", componentSettings.getAsBytesSize("receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
+ if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
+ receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
+ } else {
+ receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
+ }
+
+ logger.debug("using worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]",
+ workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, receivePredictorMax);
+ }
+
+ public Settings settings() {
+ return this.settings;
+ }
+
+ @Override
+ public void transportServiceAdapter(TransportServiceAdapter service) {
+ this.transportServiceAdapter = service;
+ }
+
+ TransportServiceAdapter transportServiceAdapter() {
+ return transportServiceAdapter;
+ }
+
+ ThreadPool threadPool() {
+ return threadPool;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ if (blockingClient) {
+ clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory(Executors.newCachedThreadPool(daemonThreadFactory(settings, "transport_client_worker"))));
+ } else {
+ clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, "transport_client_boss")),
+ bossCount,
+ new NioWorkerPool(Executors.newCachedThreadPool(daemonThreadFactory(settings, "transport_client_worker")), workerCount),
+ new HashedWheelTimer(daemonThreadFactory(settings, "transport_client_timer"))));
+ }
+ ChannelPipelineFactory clientPipelineFactory = new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ ChannelPipeline pipeline = Channels.pipeline();
+ SizeHeaderFrameDecoder sizeHeader = new SizeHeaderFrameDecoder();
+ if (maxCumulationBufferCapacity != null) {
+ if (maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
+ sizeHeader.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
+ } else {
+ sizeHeader.setMaxCumulationBufferCapacity((int) maxCumulationBufferCapacity.bytes());
+ }
+ }
+ if (maxCompositeBufferComponents != -1) {
+ sizeHeader.setMaxCumulationBufferComponents(maxCompositeBufferComponents);
+ }
+ pipeline.addLast("size", sizeHeader);
+ pipeline.addLast("dispatcher", new MessageChannelHandler(NettyTransport.this, logger));
+ return pipeline;
+ }
+ };
+ clientBootstrap.setPipelineFactory(clientPipelineFactory);
+ clientBootstrap.setOption("connectTimeoutMillis", connectTimeout.millis());
+ if (tcpNoDelay != null) {
+ clientBootstrap.setOption("tcpNoDelay", tcpNoDelay);
+ }
+ if (tcpKeepAlive != null) {
+ clientBootstrap.setOption("keepAlive", tcpKeepAlive);
+ }
+ if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
+ clientBootstrap.setOption("sendBufferSize", tcpSendBufferSize.bytes());
+ }
+ if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
+ clientBootstrap.setOption("receiveBufferSize", tcpReceiveBufferSize.bytes());
+ }
+ clientBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
+ if (reuseAddress != null) {
+ clientBootstrap.setOption("reuseAddress", reuseAddress);
+ }
+
+ if (!settings.getAsBoolean("network.server", true)) {
+ return;
+ }
+
+ serverOpenChannels = new OpenChannelsHandler(logger);
+ if (blockingServer) {
+ serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory(
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, "transport_server_boss")),
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, "transport_server_worker"))
+ ));
+ } else {
+ serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, "transport_server_boss")),
+ Executors.newCachedThreadPool(daemonThreadFactory(settings, "transport_server_worker")),
+ workerCount));
+ }
+ ChannelPipelineFactory serverPipelineFactory = new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ ChannelPipeline pipeline = Channels.pipeline();
+ pipeline.addLast("openChannels", serverOpenChannels);
+ SizeHeaderFrameDecoder sizeHeader = new SizeHeaderFrameDecoder();
+ if (maxCumulationBufferCapacity != null) {
+ if (maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
+ sizeHeader.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
+ } else {
+ sizeHeader.setMaxCumulationBufferCapacity((int) maxCumulationBufferCapacity.bytes());
+ }
+ }
+ if (maxCompositeBufferComponents != -1) {
+ sizeHeader.setMaxCumulationBufferComponents(maxCompositeBufferComponents);
+ }
+ pipeline.addLast("size", sizeHeader);
+ pipeline.addLast("dispatcher", new MessageChannelHandler(NettyTransport.this, logger));
+ return pipeline;
+ }
+ };
+ serverBootstrap.setPipelineFactory(serverPipelineFactory);
+ if (tcpNoDelay != null) {
+ serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay);
+ }
+ if (tcpKeepAlive != null) {
+ serverBootstrap.setOption("child.keepAlive", tcpKeepAlive);
+ }
+ if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
+ serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes());
+ }
+ if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
+ serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes());
+ }
+ serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
+ serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
+ if (reuseAddress != null) {
+ serverBootstrap.setOption("reuseAddress", reuseAddress);
+ serverBootstrap.setOption("child.reuseAddress", reuseAddress);
+ }
+
+ // Bind and start to accept incoming connections.
+ InetAddress hostAddressX;
+ try {
+ hostAddressX = networkService.resolveBindHostAddress(bindHost);
+ } catch (IOException e) {
+ throw new BindTransportException("Failed to resolve host [" + bindHost + "]", e);
+ }
+ final InetAddress hostAddress = hostAddressX;
+
+ PortsRange portsRange = new PortsRange(port);
+ final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
+ boolean success = portsRange.iterate(new PortsRange.PortCallback() {
+ @Override
+ public boolean onPortNumber(int portNumber) {
+ try {
+ serverChannel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
+ } catch (Exception e) {
+ lastException.set(e);
+ return false;
+ }
+ return true;
+ }
+ });
+ if (!success) {
+ throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get());
+ }
+
+ logger.debug("Bound to address [{}]", serverChannel.getLocalAddress());
+
+ InetSocketAddress boundAddress = (InetSocketAddress) serverChannel.getLocalAddress();
+ InetSocketAddress publishAddress;
+ int publishPort = this.publishPort;
+ if (0 == publishPort) {
+ publishPort = boundAddress.getPort();
+ }
+ try {
+ publishAddress = new InetSocketAddress(networkService.resolvePublishHostAddress(publishHost), publishPort);
+ } catch (Exception e) {
+ throw new BindTransportException("Failed to resolve publish address", e);
+ }
+ this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress));
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ final CountDownLatch latch = new CountDownLatch(1);
+ // make sure we run it on another thread than a possible IO handler thread
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ globalLock.writeLock().lock();
+ try {
+ for (Iterator<NodeChannels> it = connectedNodes.values().iterator(); it.hasNext(); ) {
+ NodeChannels nodeChannels = it.next();
+ it.remove();
+ nodeChannels.close();
+ }
+
+ if (serverChannel != null) {
+ try {
+ serverChannel.close().awaitUninterruptibly();
+ } finally {
+ serverChannel = null;
+ }
+ }
+
+ if (serverOpenChannels != null) {
+ serverOpenChannels.close();
+ serverOpenChannels = null;
+ }
+
+ if (serverBootstrap != null) {
+ serverBootstrap.releaseExternalResources();
+ serverBootstrap = null;
+ }
+
+ for (Iterator<NodeChannels> it = connectedNodes.values().iterator(); it.hasNext(); ) {
+ NodeChannels nodeChannels = it.next();
+ it.remove();
+ nodeChannels.close();
+ }
+
+ if (clientBootstrap != null) {
+ clientBootstrap.releaseExternalResources();
+ clientBootstrap = null;
+ }
+ } finally {
+ globalLock.writeLock().unlock();
+ latch.countDown();
+ }
+ }
+ });
+
+ try {
+ latch.await(30, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ @Override
+ public TransportAddress[] addressesFromString(String address) throws Exception {
+ int index = address.indexOf('[');
+ if (index != -1) {
+ String host = address.substring(0, index);
+ Set<String> ports = Strings.commaDelimitedListToSet(address.substring(index + 1, address.indexOf(']')));
+ List<TransportAddress> addresses = Lists.newArrayList();
+ for (String port : ports) {
+ int[] iPorts = new PortsRange(port).ports();
+ for (int iPort : iPorts) {
+ addresses.add(new InetSocketTransportAddress(host, iPort));
+ }
+ }
+ return addresses.toArray(new TransportAddress[addresses.size()]);
+ } else {
+ index = address.lastIndexOf(':');
+ if (index == -1) {
+ List<TransportAddress> addresses = Lists.newArrayList();
+ int[] iPorts = new PortsRange(this.port).ports();
+ for (int iPort : iPorts) {
+ addresses.add(new InetSocketTransportAddress(address, iPort));
+ }
+ return addresses.toArray(new TransportAddress[addresses.size()]);
+ } else {
+ String host = address.substring(0, index);
+ int port = Integer.parseInt(address.substring(index + 1));
+ return new TransportAddress[]{new InetSocketTransportAddress(host, port)};
+ }
+ }
+ }
+
+ @Override
+ public boolean addressSupported(Class<? extends TransportAddress> address) {
+ return InetSocketTransportAddress.class.equals(address);
+ }
+
+ @Override
+ public BoundTransportAddress boundAddress() {
+ return this.boundAddress;
+ }
+
+ void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
+ if (!lifecycle.started()) {
+ // ignore
+ }
+ if (isCloseConnectionException(e.getCause())) {
+ logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel());
+ // close the channel, which will cause a node to be disconnected if relevant
+ ctx.getChannel().close();
+ disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
+ } else if (isConnectException(e.getCause())) {
+ logger.trace("connect exception caught on transport layer [{}]", e.getCause(), ctx.getChannel());
+ // close the channel as safe measure, which will cause a node to be disconnected if relevant
+ ctx.getChannel().close();
+ disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
+ } else if (e.getCause() instanceof CancelledKeyException) {
+ logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel());
+ // close the channel as safe measure, which will cause a node to be disconnected if relevant
+ ctx.getChannel().close();
+ disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
+ } else {
+ logger.warn("exception caught on transport layer [{}], closing connection", e.getCause(), ctx.getChannel());
+ // close the channel, which will cause a node to be disconnected if relevant
+ ctx.getChannel().close();
+ disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
+ }
+ }
+
+ TransportAddress wrapAddress(SocketAddress socketAddress) {
+ return new InetSocketTransportAddress((InetSocketAddress) socketAddress);
+ }
+
+ @Override
+ public long serverOpen() {
+ OpenChannelsHandler channels = serverOpenChannels;
+ return channels == null ? 0 : channels.numberOfOpenChannels();
+ }
+
+ @Override
+ public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ Channel targetChannel = nodeChannel(node, options);
+
+ if (compress) {
+ options.withCompress(true);
+ }
+
+ byte status = 0;
+ status = TransportStatus.setRequest(status);
+
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ bStream.skip(NettyHeader.HEADER_SIZE);
+ StreamOutput stream = bStream;
+ // only compress if asked, and, the request is not bytes, since then only
+ // the header part is compressed, and the "body" can't be extracted as compressed
+ if (options.compress() && (!(request instanceof BytesTransportRequest))) {
+ status = TransportStatus.setCompress(status);
+ stream = CompressorFactory.defaultCompressor().streamOutput(stream);
+ }
+ stream = new HandlesStreamOutput(stream);
+
+ // we pick the smallest of the 2, to support both backward and forward compatibility
+ // note, this is the only place we need to do this, since from here on, we use the serialized version
+ // as the version to use also when the node receiving this request will send the response with
+ Version version = Version.smallest(this.version, node.version());
+
+ stream.setVersion(version);
+ stream.writeString(action);
+
+ ChannelBuffer buffer;
+ // it might be nice to somehow generalize this optimization, maybe a smart "paged" bytes output
+ // that create paged channel buffers, but its tricky to know when to do it (where this option is
+ // more explicit).
+ if (request instanceof BytesTransportRequest) {
+ BytesTransportRequest bRequest = (BytesTransportRequest) request;
+ assert node.version().equals(bRequest.version());
+ bRequest.writeThin(stream);
+ stream.close();
+ ChannelBuffer headerBuffer = bStream.bytes().toChannelBuffer();
+ ChannelBuffer contentBuffer = bRequest.bytes().toChannelBuffer();
+ // false on gathering, cause gathering causes the NIO layer to combine the buffers into a single direct buffer....
+ buffer = new CompositeChannelBuffer(headerBuffer.order(), ImmutableList.<ChannelBuffer>of(headerBuffer, contentBuffer), false);
+ } else {
+ request.writeTo(stream);
+ stream.close();
+ buffer = bStream.bytes().toChannelBuffer();
+ }
+ NettyHeader.writeHeader(buffer, requestId, status, version);
+ targetChannel.write(buffer);
+
+ // We handle close connection exception in the #exceptionCaught method, which is the main reason we want to add this future
+// channelFuture.addListener(new ChannelFutureListener() {
+// @Override public void operationComplete(ChannelFuture future) throws Exception {
+// if (!future.isSuccess()) {
+// // maybe add back the retry?
+// TransportResponseHandler handler = transportServiceAdapter.remove(requestId);
+// if (handler != null) {
+// handler.handleException(new RemoteTransportException("Failed write request", new SendRequestTransportException(node, action, future.getCause())));
+// }
+// }
+// }
+// });
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ return connectedNodes.containsKey(node);
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ connectToNode(node, true);
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) {
+ connectToNode(node, false);
+ }
+
+ public void connectToNode(DiscoveryNode node, boolean light) {
+ if (!lifecycle.started()) {
+ throw new ElasticsearchIllegalStateException("can't add nodes to a stopped transport");
+ }
+ if (node == null) {
+ throw new ConnectTransportException(null, "can't connect to a null node");
+ }
+ globalLock.readLock().lock();
+ try {
+ if (!lifecycle.started()) {
+ throw new ElasticsearchIllegalStateException("can't add nodes to a stopped transport");
+ }
+ NodeChannels nodeChannels = connectedNodes.get(node);
+ if (nodeChannels != null) {
+ return;
+ }
+ connectionLock.acquire(node.id());
+ try {
+ if (!lifecycle.started()) {
+ throw new ElasticsearchIllegalStateException("can't add nodes to a stopped transport");
+ }
+ try {
+
+
+ if (light) {
+ nodeChannels = connectToChannelsLight(node);
+ } else {
+ nodeChannels = new NodeChannels(new Channel[connectionsPerNodeRecovery], new Channel[connectionsPerNodeBulk], new Channel[connectionsPerNodeReg], new Channel[connectionsPerNodeState], new Channel[connectionsPerNodePing]);
+ try {
+ connectToChannels(nodeChannels, node);
+ } catch (Exception e) {
+ nodeChannels.close();
+ throw e;
+ }
+ }
+
+ NodeChannels existing = connectedNodes.putIfAbsent(node, nodeChannels);
+ if (existing != null) {
+ // we are already connected to a node, close this ones
+ nodeChannels.close();
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("connected to node [{}]", node);
+ }
+ transportServiceAdapter.raiseNodeConnected(node);
+ }
+
+ } catch (ConnectTransportException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new ConnectTransportException(node, "General node connection failure", e);
+ }
+ } finally {
+ connectionLock.release(node.id());
+ }
+ } finally {
+ globalLock.readLock().unlock();
+ }
+ }
+
+ private NodeChannels connectToChannelsLight(DiscoveryNode node) {
+ InetSocketAddress address = ((InetSocketTransportAddress) node.address()).address();
+ ChannelFuture connect = clientBootstrap.connect(address);
+ connect.awaitUninterruptibly((long) (connectTimeout.millis() * 1.5));
+ if (!connect.isSuccess()) {
+ throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connect.getCause());
+ }
+ Channel[] channels = new Channel[1];
+ channels[0] = connect.getChannel();
+ channels[0].getCloseFuture().addListener(new ChannelCloseListener(node));
+ return new NodeChannels(channels, channels, channels, channels, channels);
+ }
+
+ private void connectToChannels(NodeChannels nodeChannels, DiscoveryNode node) {
+ ChannelFuture[] connectRecovery = new ChannelFuture[nodeChannels.recovery.length];
+ ChannelFuture[] connectBulk = new ChannelFuture[nodeChannels.bulk.length];
+ ChannelFuture[] connectReg = new ChannelFuture[nodeChannels.reg.length];
+ ChannelFuture[] connectState = new ChannelFuture[nodeChannels.state.length];
+ ChannelFuture[] connectPing = new ChannelFuture[nodeChannels.ping.length];
+ InetSocketAddress address = ((InetSocketTransportAddress) node.address()).address();
+ for (int i = 0; i < connectRecovery.length; i++) {
+ connectRecovery[i] = clientBootstrap.connect(address);
+ }
+ for (int i = 0; i < connectBulk.length; i++) {
+ connectBulk[i] = clientBootstrap.connect(address);
+ }
+ for (int i = 0; i < connectReg.length; i++) {
+ connectReg[i] = clientBootstrap.connect(address);
+ }
+ for (int i = 0; i < connectState.length; i++) {
+ connectState[i] = clientBootstrap.connect(address);
+ }
+ for (int i = 0; i < connectPing.length; i++) {
+ connectPing[i] = clientBootstrap.connect(address);
+ }
+
+ try {
+ for (int i = 0; i < connectRecovery.length; i++) {
+ connectRecovery[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5));
+ if (!connectRecovery[i].isSuccess()) {
+ throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectRecovery[i].getCause());
+ }
+ nodeChannels.recovery[i] = connectRecovery[i].getChannel();
+ nodeChannels.recovery[i].getCloseFuture().addListener(new ChannelCloseListener(node));
+ }
+
+ for (int i = 0; i < connectBulk.length; i++) {
+ connectBulk[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5));
+ if (!connectBulk[i].isSuccess()) {
+ throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectBulk[i].getCause());
+ }
+ nodeChannels.bulk[i] = connectBulk[i].getChannel();
+ nodeChannels.bulk[i].getCloseFuture().addListener(new ChannelCloseListener(node));
+ }
+
+ for (int i = 0; i < connectReg.length; i++) {
+ connectReg[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5));
+ if (!connectReg[i].isSuccess()) {
+ throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectReg[i].getCause());
+ }
+ nodeChannels.reg[i] = connectReg[i].getChannel();
+ nodeChannels.reg[i].getCloseFuture().addListener(new ChannelCloseListener(node));
+ }
+
+ for (int i = 0; i < connectState.length; i++) {
+ connectState[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5));
+ if (!connectState[i].isSuccess()) {
+ throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectState[i].getCause());
+ }
+ nodeChannels.state[i] = connectState[i].getChannel();
+ nodeChannels.state[i].getCloseFuture().addListener(new ChannelCloseListener(node));
+ }
+
+ for (int i = 0; i < connectPing.length; i++) {
+ connectPing[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5));
+ if (!connectPing[i].isSuccess()) {
+ throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectPing[i].getCause());
+ }
+ nodeChannels.ping[i] = connectPing[i].getChannel();
+ nodeChannels.ping[i].getCloseFuture().addListener(new ChannelCloseListener(node));
+ }
+
+ if (nodeChannels.recovery.length == 0) {
+ if (nodeChannels.bulk.length > 0) {
+ nodeChannels.recovery = nodeChannels.bulk;
+ } else {
+ nodeChannels.recovery = nodeChannels.reg;
+ }
+ }
+ if (nodeChannels.bulk.length == 0) {
+ nodeChannels.bulk = nodeChannels.reg;
+ }
+ } catch (RuntimeException e) {
+ // clean the futures
+ for (ChannelFuture future : ImmutableList.<ChannelFuture>builder().add(connectRecovery).add(connectBulk).add(connectReg).add(connectState).add(connectPing).build()) {
+ future.cancel();
+ if (future.getChannel() != null && future.getChannel().isOpen()) {
+ try {
+ future.getChannel().close();
+ } catch (Exception e1) {
+ // ignore
+ }
+ }
+ }
+ throw e;
+ }
+ }
+
+ @Override
+ public void disconnectFromNode(DiscoveryNode node) {
+ NodeChannels nodeChannels = connectedNodes.remove(node);
+ if (nodeChannels != null) {
+ connectionLock.acquire(node.id());
+ try {
+ try {
+ nodeChannels.close();
+ } finally {
+ logger.debug("disconnected from [{}]", node);
+ transportServiceAdapter.raiseNodeDisconnected(node);
+ }
+ } finally {
+ connectionLock.release(node.id());
+ }
+ }
+ }
+
+ /**
+ * Disconnects from a node, only if the relevant channel is found to be part of the node channels.
+ */
+ private void disconnectFromNode(DiscoveryNode node, Channel channel, String reason) {
+ NodeChannels nodeChannels = connectedNodes.get(node);
+ if (nodeChannels != null && nodeChannels.hasChannel(channel)) {
+ connectionLock.acquire(node.id());
+ if (!nodeChannels.hasChannel(channel)) { //might have been removed in the meanwhile, safety check
+ assert !connectedNodes.containsKey(node);
+ } else {
+ try {
+ connectedNodes.remove(node);
+ try {
+ nodeChannels.close();
+ } finally {
+ logger.debug("disconnected from [{}], {}", node, reason);
+ transportServiceAdapter.raiseNodeDisconnected(node);
+ }
+ } finally {
+ connectionLock.release(node.id());
+ }
+ }
+ }
+ }
+
+ /**
+ * Disconnects from a node if a channel is found as part of that nodes channels.
+ */
+ private void disconnectFromNodeChannel(Channel channel, Throwable failure) {
+ for (DiscoveryNode node : connectedNodes.keySet()) {
+ NodeChannels nodeChannels = connectedNodes.get(node);
+ if (nodeChannels != null && nodeChannels.hasChannel(channel)) {
+ connectionLock.acquire(node.id());
+ if (!nodeChannels.hasChannel(channel)) { //might have been removed in the meanwhile, safety check
+ assert !connectedNodes.containsKey(node);
+ } else {
+ try {
+ connectedNodes.remove(node);
+ try {
+ nodeChannels.close();
+ } finally {
+ logger.debug("disconnected from [{}] on channel failure", failure, node);
+ transportServiceAdapter.raiseNodeDisconnected(node);
+ }
+ } finally {
+ connectionLock.release(node.id());
+ }
+ }
+ }
+ }
+ }
+
+ private Channel nodeChannel(DiscoveryNode node, TransportRequestOptions options) throws ConnectTransportException {
+ NodeChannels nodeChannels = connectedNodes.get(node);
+ if (nodeChannels == null) {
+ throw new NodeNotConnectedException(node, "Node not connected");
+ }
+ return nodeChannels.channel(options.type());
+ }
+
+ private class ChannelCloseListener implements ChannelFutureListener {
+
+ private final DiscoveryNode node;
+
+ private ChannelCloseListener(DiscoveryNode node) {
+ this.node = node;
+ }
+
+ @Override
+ public void operationComplete(ChannelFuture future) throws Exception {
+ disconnectFromNode(node, future.getChannel(), "channel closed event");
+ }
+ }
+
+ public static class NodeChannels {
+
+ private Channel[] recovery;
+ private final AtomicInteger recoveryCounter = new AtomicInteger();
+ private Channel[] bulk;
+ private final AtomicInteger bulkCounter = new AtomicInteger();
+ private Channel[] reg;
+ private final AtomicInteger regCounter = new AtomicInteger();
+ private Channel[] state;
+ private final AtomicInteger stateCounter = new AtomicInteger();
+ private Channel[] ping;
+ private final AtomicInteger pingCounter = new AtomicInteger();
+
+ public NodeChannels(Channel[] recovery, Channel[] bulk, Channel[] reg, Channel[] state, Channel[] ping) {
+ this.recovery = recovery;
+ this.bulk = bulk;
+ this.reg = reg;
+ this.state = state;
+ this.ping = ping;
+ }
+
+ public boolean hasChannel(Channel channel) {
+ return hasChannel(channel, recovery) || hasChannel(channel, bulk) || hasChannel(channel, reg) || hasChannel(channel, state) || hasChannel(channel, ping);
+ }
+
+ private boolean hasChannel(Channel channel, Channel[] channels) {
+ for (Channel channel1 : channels) {
+ if (channel.equals(channel1)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public Channel channel(TransportRequestOptions.Type type) {
+ if (type == TransportRequestOptions.Type.REG) {
+ return reg[Math.abs(regCounter.incrementAndGet()) % reg.length];
+ } else if (type == TransportRequestOptions.Type.STATE) {
+ return state[Math.abs(stateCounter.incrementAndGet()) % state.length];
+ } else if (type == TransportRequestOptions.Type.PING) {
+ return ping[Math.abs(pingCounter.incrementAndGet()) % ping.length];
+ } else if (type == TransportRequestOptions.Type.BULK) {
+ return bulk[Math.abs(bulkCounter.incrementAndGet()) % bulk.length];
+ } else if (type == TransportRequestOptions.Type.RECOVERY) {
+ return recovery[Math.abs(recoveryCounter.incrementAndGet()) % recovery.length];
+ } else {
+ throw new ElasticsearchIllegalArgumentException("no type channel for [" + type + "]");
+ }
+ }
+
+ public synchronized void close() {
+ List<ChannelFuture> futures = new ArrayList<ChannelFuture>();
+ closeChannelsAndWait(recovery, futures);
+ closeChannelsAndWait(bulk, futures);
+ closeChannelsAndWait(reg, futures);
+ closeChannelsAndWait(state, futures);
+ closeChannelsAndWait(ping, futures);
+ for (ChannelFuture future : futures) {
+ future.awaitUninterruptibly();
+ }
+ }
+
+ private void closeChannelsAndWait(Channel[] channels, List<ChannelFuture> futures) {
+ for (Channel channel : channels) {
+ try {
+ if (channel != null && channel.isOpen()) {
+ futures.add(channel.close());
+ }
+ } catch (Exception e) {
+ //ignore
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java b/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java
new file mode 100644
index 0000000..5d50d4e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.ThrowableObjectOutputStream;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.HandlesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.support.TransportStatus;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.Channel;
+
+import java.io.IOException;
+import java.io.NotSerializableException;
+
+/**
+ *
+ */
+public class NettyTransportChannel implements TransportChannel {
+
+ private final NettyTransport transport;
+ private final Version version;
+ private final String action;
+ private final Channel channel;
+ private final long requestId;
+
+ public NettyTransportChannel(NettyTransport transport, String action, Channel channel, long requestId, Version version) {
+ this.version = version;
+ this.transport = transport;
+ this.action = action;
+ this.channel = channel;
+ this.requestId = requestId;
+ }
+
+ @Override
+ public String action() {
+ return this.action;
+ }
+
+ @Override
+ public void sendResponse(TransportResponse response) throws IOException {
+ sendResponse(response, TransportResponseOptions.EMPTY);
+ }
+
+ @Override
+ public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
+ if (transport.compress) {
+ options.withCompress(true);
+ }
+
+ byte status = 0;
+ status = TransportStatus.setResponse(status);
+
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ bStream.skip(NettyHeader.HEADER_SIZE);
+ StreamOutput stream = bStream;
+ if (options.compress()) {
+ status = TransportStatus.setCompress(status);
+ stream = CompressorFactory.defaultCompressor().streamOutput(stream);
+ }
+ stream = new HandlesStreamOutput(stream);
+ stream.setVersion(version);
+ response.writeTo(stream);
+ stream.close();
+
+ ChannelBuffer buffer = bStream.bytes().toChannelBuffer();
+ NettyHeader.writeHeader(buffer, requestId, status, version);
+ channel.write(buffer);
+ }
+
+ @Override
+ public void sendResponse(Throwable error) throws IOException {
+ BytesStreamOutput stream = new BytesStreamOutput();
+ try {
+ stream.skip(NettyHeader.HEADER_SIZE);
+ RemoteTransportException tx = new RemoteTransportException(transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()), action, error);
+ ThrowableObjectOutputStream too = new ThrowableObjectOutputStream(stream);
+ too.writeObject(tx);
+ too.close();
+ } catch (NotSerializableException e) {
+ stream.reset();
+ stream.skip(NettyHeader.HEADER_SIZE);
+ RemoteTransportException tx = new RemoteTransportException(transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()), action, new NotSerializableTransportException(error));
+ ThrowableObjectOutputStream too = new ThrowableObjectOutputStream(stream);
+ too.writeObject(tx);
+ too.close();
+ }
+
+ byte status = 0;
+ status = TransportStatus.setResponse(status);
+ status = TransportStatus.setError(status);
+
+ ChannelBuffer buffer = stream.bytes().toChannelBuffer();
+ NettyHeader.writeHeader(buffer, requestId, status, version);
+ channel.write(buffer);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/netty/NettyTransportModule.java b/src/main/java/org/elasticsearch/transport/netty/NettyTransportModule.java
new file mode 100644
index 0000000..b96e9ee
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/NettyTransportModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.Transport;
+
+/**
+ *
+ */
+public class NettyTransportModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public NettyTransportModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(NettyTransport.class).asEagerSingleton();
+ bind(Transport.class).to(NettyTransport.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java b/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java
new file mode 100644
index 0000000..6f1befe
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.handler.codec.frame.FrameDecoder;
+import org.jboss.netty.handler.codec.frame.TooLongFrameException;
+
+import java.io.StreamCorruptedException;
+
+/**
+ */
+public class SizeHeaderFrameDecoder extends FrameDecoder {
+
+ private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().mem().heapMax().bytes() * 0.9);
+
+ @Override
+ protected Object decode(ChannelHandlerContext ctx, Channel channel, ChannelBuffer buffer) throws Exception {
+ if (buffer.readableBytes() < 6) {
+ return null;
+ }
+
+ int readerIndex = buffer.readerIndex();
+ if (buffer.getByte(readerIndex) != 'E' || buffer.getByte(readerIndex + 1) != 'S') {
+ throw new StreamCorruptedException("invalid internal transport message format");
+ }
+
+ int dataLen = buffer.getInt(buffer.readerIndex() + 2);
+ if (dataLen <= 0) {
+ throw new StreamCorruptedException("invalid data length: " + dataLen);
+ }
+ // safety against too large frames being sent
+ if (dataLen > NINETY_PER_HEAP_SIZE) {
+ throw new TooLongFrameException(
+ "transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]");
+ }
+
+ if (buffer.readableBytes() < dataLen + 6) {
+ return null;
+ }
+ buffer.skipBytes(6);
+ return buffer;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/elasticsearch/transport/support/TransportStatus.java b/src/main/java/org/elasticsearch/transport/support/TransportStatus.java
new file mode 100644
index 0000000..8749445
--- /dev/null
+++ b/src/main/java/org/elasticsearch/transport/support/TransportStatus.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.support;
+
+/**
+ */
+public class TransportStatus {
+
+ private static final byte STATUS_REQRES = 1 << 0;
+ private static final byte STATUS_ERROR = 1 << 1;
+ private static final byte STATUS_COMPRESS = 1 << 2;
+
+ public static boolean isRequest(byte value) {
+ return (value & STATUS_REQRES) == 0;
+ }
+
+ public static byte setRequest(byte value) {
+ value &= ~STATUS_REQRES;
+ return value;
+ }
+
+ public static byte setResponse(byte value) {
+ value |= STATUS_REQRES;
+ return value;
+ }
+
+ public static boolean isError(byte value) {
+ return (value & STATUS_ERROR) != 0;
+ }
+
+ public static byte setError(byte value) {
+ value |= STATUS_ERROR;
+ return value;
+ }
+
+ public static boolean isCompress(byte value) {
+ return (value & STATUS_COMPRESS) != 0;
+ }
+
+ public static byte setCompress(byte value) {
+ value |= STATUS_COMPRESS;
+ return value;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/tribe/TribeModule.java b/src/main/java/org/elasticsearch/tribe/TribeModule.java
new file mode 100644
index 0000000..fb642d1
--- /dev/null
+++ b/src/main/java/org/elasticsearch/tribe/TribeModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.tribe;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ */
+public class TribeModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(TribeService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/tribe/TribeService.java b/src/main/java/org/elasticsearch/tribe/TribeService.java
new file mode 100644
index 0000000..c3dc23e
--- /dev/null
+++ b/src/main/java/org/elasticsearch/tribe/TribeService.java
@@ -0,0 +1,299 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.tribe;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.gateway.GatewayService;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalNode;
+import org.elasticsearch.rest.RestStatus;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * The tribe service holds a list of node clients connected to a list of tribe members, and uses their
+ * cluster state events to update this local node cluster state with the merged view of it.
+ * <p/>
+ * The {@link #processSettings(org.elasticsearch.common.settings.Settings)} method should be called before
+ * starting the node, so it will make sure to configure this current node properly with the relevant tribe node
+ * settings.
+ * <p/>
+ * The tribe node settings make sure the discovery used is "local", but with no master elected. This means no
+ * write level master node operations will work ({@link org.elasticsearch.discovery.MasterNotDiscoveredException}
+ * will be thrown), and state level metadata operations with automatically use the local flag.
+ * <p/>
+ * The state merged from different clusters include the list of nodes, metadata, and routing table. Each node merged
+ * will have in its tribe which tribe member it came from. Each index merged will have in its settings which tribe
+ * member it came from. In case an index has already been merged from one cluster, and the same name index is discovered
+ * in another cluster, the conflict one will be discarded. This happens because we need to have the correct index name
+ * to propagate to the relevant cluster.
+ */
+public class TribeService extends AbstractLifecycleComponent<TribeService> {
+
+ public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, RestStatus.BAD_REQUEST, ClusterBlockLevel.METADATA);
+ public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, RestStatus.BAD_REQUEST, ClusterBlockLevel.WRITE);
+
+ public static Settings processSettings(Settings settings) {
+ if (settings.get(TRIBE_NAME) != null) {
+ // if its a node client started by this service as tribe, remove any tribe group setting
+ // to avoid recursive configuration
+ ImmutableSettings.Builder sb = ImmutableSettings.builder().put(settings);
+ for (String s : settings.getAsMap().keySet()) {
+ if (s.startsWith("tribe.") && !s.equals(TRIBE_NAME)) {
+ sb.remove(s);
+ }
+ }
+ return sb.build();
+ }
+ Map<String, Settings> nodesSettings = settings.getGroups("tribe", true);
+ if (nodesSettings.isEmpty()) {
+ return settings;
+ }
+ // its a tribe configured node..., force settings
+ ImmutableSettings.Builder sb = ImmutableSettings.builder().put(settings);
+ sb.put("node.client", true); // this node should just act as a node client
+ sb.put("discovery.type", "local"); // a tribe node should not use zen discovery
+ sb.put("discovery.initial_state_timeout", 0); // nothing is going to be discovered, since no master will be elected
+ if (sb.get("cluster.name") == null) {
+ sb.put("cluster.name", "tribe_" + Strings.randomBase64UUID()); // make sure it won't join other tribe nodes in the same JVM
+ }
+ sb.put("gateway.type", "none"); // we shouldn't store anything locally...
+ sb.put(TransportMasterNodeReadOperationAction.FORCE_LOCAL_SETTING, true);
+ return sb.build();
+ }
+
+ public static final String TRIBE_NAME = "tribe.name";
+
+ private final ClusterService clusterService;
+
+ private final List<InternalNode> nodes = Lists.newCopyOnWriteArrayList();
+
+ @Inject
+ public TribeService(Settings settings, ClusterService clusterService) {
+ super(settings);
+ this.clusterService = clusterService;
+ Map<String, Settings> nodesSettings = Maps.newHashMap(settings.getGroups("tribe", true));
+ nodesSettings.remove("blocks"); // remove prefix settings that don't indicate a client
+ for (Map.Entry<String, Settings> entry : nodesSettings.entrySet()) {
+ ImmutableSettings.Builder sb = ImmutableSettings.builder().put(entry.getValue());
+ sb.put("node.name", settings.get("name") + "/" + entry.getKey());
+ sb.put(TRIBE_NAME, entry.getKey());
+ if (sb.get("http.enabled") == null) {
+ sb.put("http.enabled", false);
+ }
+ nodes.add((InternalNode) NodeBuilder.nodeBuilder().settings(sb).client(true).build());
+ }
+
+ if (!nodes.isEmpty()) {
+ // remove the initial election / recovery blocks since we are not going to have a
+ // master elected in this single tribe node local "cluster"
+ clusterService.removeInitialStateBlock(Discovery.NO_MASTER_BLOCK);
+ clusterService.removeInitialStateBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
+ if (settings.getAsBoolean("tribe.blocks.write", false)) {
+ clusterService.addInitialStateBlock(TRIBE_WRITE_BLOCK);
+ }
+ if (settings.getAsBoolean("tribe.blocks.metadata", false)) {
+ clusterService.addInitialStateBlock(TRIBE_METADATA_BLOCK);
+ }
+ for (InternalNode node : nodes) {
+ node.injector().getInstance(ClusterService.class).add(new TribeClusterStateListener(node));
+ }
+ }
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ final CountDownLatch latch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("updating local node id", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ // add our local node to the mix...
+ return ClusterState.builder(currentState)
+ .nodes(DiscoveryNodes.builder(currentState.nodes()).put(clusterService.localNode()).localNodeId(clusterService.localNode().id()))
+ .build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ try {
+ logger.error("{}", t, source);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ latch.countDown();
+ }
+ });
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new ElasticsearchIllegalStateException("Interrupted while starting [" + this.getClass().getSimpleName() + "]", e);
+ }
+ for (InternalNode node : nodes) {
+ try {
+ node.start();
+ } catch (Throwable e) {
+ // calling close is safe for non started nodes, we can just iterate over all
+ for (InternalNode otherNode : nodes) {
+ try {
+ otherNode.close();
+ } catch (Throwable t) {
+ logger.warn("failed to close node {} on failed start", otherNode, t);
+ }
+ }
+ if (e instanceof RuntimeException) {
+ throw (RuntimeException) e;
+ }
+ throw new ElasticsearchException(e.getMessage(), e);
+ }
+ }
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ for (InternalNode node : nodes) {
+ try {
+ node.stop();
+ } catch (Throwable t) {
+ logger.warn("failed to stop node {}", t, node);
+ }
+ }
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ for (InternalNode node : nodes) {
+ try {
+ node.close();
+ } catch (Throwable t) {
+ logger.warn("failed to close node {}", t, node);
+ }
+ }
+ }
+
+ class TribeClusterStateListener implements ClusterStateListener {
+
+ private final InternalNode tribeNode;
+ private final String tribeName;
+
+ TribeClusterStateListener(InternalNode tribeNode) {
+ this.tribeNode = tribeNode;
+ this.tribeName = tribeNode.settings().get(TRIBE_NAME);
+ }
+
+ @Override
+ public void clusterChanged(final ClusterChangedEvent event) {
+ logger.debug("[{}] received cluster event, [{}]", tribeName, event.source());
+ clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ ClusterState tribeState = event.state();
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes());
+ // -- merge nodes
+ // go over existing nodes, and see if they need to be removed
+ for (DiscoveryNode discoNode : currentState.nodes()) {
+ String markedTribeName = discoNode.attributes().get(TRIBE_NAME);
+ if (markedTribeName != null && markedTribeName.equals(tribeName)) {
+ if (tribeState.nodes().get(discoNode.id()) == null) {
+ logger.info("[{}] removing node [{}]", tribeName, discoNode);
+ nodes.remove(discoNode.id());
+ }
+ }
+ }
+ // go over tribe nodes, and see if they need to be added
+ for (DiscoveryNode tribe : tribeState.nodes()) {
+ if (currentState.nodes().get(tribe.id()) == null) {
+ // a new node, add it, but also add the tribe name to the attributes
+ ImmutableMap<String, String> tribeAttr = MapBuilder.newMapBuilder(tribe.attributes()).put(TRIBE_NAME, tribeName).immutableMap();
+ DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), tribeAttr, tribe.version());
+ logger.info("[{}] adding node [{}]", tribeName, discoNode);
+ nodes.put(discoNode);
+ }
+ }
+
+ // -- merge metadata
+ MetaData.Builder metaData = MetaData.builder(currentState.metaData());
+ RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
+ // go over existing indices, and see if they need to be removed
+ for (IndexMetaData index : currentState.metaData()) {
+ String markedTribeName = index.settings().get(TRIBE_NAME);
+ if (markedTribeName != null && markedTribeName.equals(tribeName)) {
+ IndexMetaData tribeIndex = tribeState.metaData().index(index.index());
+ if (tribeIndex == null) {
+ logger.info("[{}] removing index [{}]", tribeName, index.index());
+ metaData.remove(index.index());
+ routingTable.remove(index.index());
+ } else {
+ // always make sure to update the metadata and routing table, in case
+ // there are changes in them (new mapping, shards moving from initializing to started)
+ routingTable.add(tribeState.routingTable().index(index.index()));
+ Settings tribeSettings = ImmutableSettings.builder().put(tribeIndex.settings()).put(TRIBE_NAME, tribeName).build();
+ metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
+ }
+ }
+ }
+ // go over tribe one, and see if they need to be added
+ for (IndexMetaData tribeIndex : tribeState.metaData()) {
+ if (!currentState.metaData().hasIndex(tribeIndex.index())) {
+ // a new index, add it, and add the tribe name as a setting
+ logger.info("[{}] adding index [{}]", tribeName, tribeIndex.index());
+ Settings tribeSettings = ImmutableSettings.builder().put(tribeIndex.settings()).put(TRIBE_NAME, tribeName).build();
+ metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
+ routingTable.add(tribeState.routingTable().index(tribeIndex.index()));
+ }
+ }
+
+ return ClusterState.builder(currentState).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("failed to process [{}]", t, source);
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/elasticsearch/watcher/AbstractResourceWatcher.java b/src/main/java/org/elasticsearch/watcher/AbstractResourceWatcher.java
new file mode 100644
index 0000000..55f0440
--- /dev/null
+++ b/src/main/java/org/elasticsearch/watcher/AbstractResourceWatcher.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.watcher;
+
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ * Abstract resource watcher framework, which handles adding and removing listeners
+ * and calling resource observer.
+ */
+public abstract class AbstractResourceWatcher<Listener> implements ResourceWatcher {
+ private final List<Listener> listeners = new CopyOnWriteArrayList<Listener>();
+ private boolean initialized = false;
+
+ @Override
+ public void init() {
+ if (!initialized) {
+ doInit();
+ initialized = true;
+ }
+ }
+
+ @Override
+ public void checkAndNotify() {
+ init();
+ doCheckAndNotify();
+ }
+
+ /**
+ * Registers new listener
+ */
+ public void addListener(Listener listener) {
+ listeners.add(listener);
+ }
+
+ /**
+ * Unregisters a listener
+ */
+ public void remove(Listener listener) {
+ listeners.remove(listener);
+ }
+
+ /**
+ * Returns a list of listeners
+ */
+ protected List<Listener> listeners() {
+ return listeners;
+ }
+
+ /**
+ * Will be called once on initialization
+ */
+ protected abstract void doInit();
+
+ /**
+ * Will be called periodically
+ * <p/>
+ * Implementing watcher should check resource and notify all {@link #listeners()}.
+ */
+ protected abstract void doCheckAndNotify();
+
+}
diff --git a/src/main/java/org/elasticsearch/watcher/FileChangesListener.java b/src/main/java/org/elasticsearch/watcher/FileChangesListener.java
new file mode 100644
index 0000000..f680fac
--- /dev/null
+++ b/src/main/java/org/elasticsearch/watcher/FileChangesListener.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.watcher;
+
+import java.io.File;
+
+/**
+ * Callback interface that file changes File Watcher is using to notify listeners about changes.
+ */
+public class FileChangesListener {
+ /**
+ * Called for every file found in the watched directory during initialization
+ */
+ public void onFileInit(File file) {
+
+ }
+
+ /**
+ * Called for every subdirectory found in the watched directory during initialization
+ */
+ public void onDirectoryInit(File file) {
+
+ }
+
+ /**
+ * Called for every new file found in the watched directory
+ */
+ public void onFileCreated(File file) {
+
+ }
+
+ /**
+ * Called for every file that disappeared in the watched directory
+ */
+ public void onFileDeleted(File file) {
+
+ }
+
+ /**
+ * Called for every file that was changed in the watched directory
+ */
+ public void onFileChanged(File file) {
+
+ }
+
+ /**
+ * Called for every new subdirectory found in the watched directory
+ */
+ public void onDirectoryCreated(File file) {
+
+ }
+
+ /**
+ * Called for every file that disappeared in the watched directory
+ */
+ public void onDirectoryDeleted(File file) {
+
+ }
+}
diff --git a/src/main/java/org/elasticsearch/watcher/FileWatcher.java b/src/main/java/org/elasticsearch/watcher/FileWatcher.java
new file mode 100644
index 0000000..da84907
--- /dev/null
+++ b/src/main/java/org/elasticsearch/watcher/FileWatcher.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.watcher;
+
+import java.io.File;
+import java.util.Arrays;
+
+/**
+ * File resources watcher
+ *
+ * The file watcher checks directory and all its subdirectories for file changes and notifies its listeners accordingly
+ */
+public class FileWatcher extends AbstractResourceWatcher<FileChangesListener> {
+
+ private FileObserver rootFileObserver;
+
+ /**
+ * Creates new file watcher on the given directory
+ */
+ public FileWatcher(File file) {
+ rootFileObserver = new FileObserver(file);
+ }
+
+ @Override
+ protected void doInit() {
+ rootFileObserver.init(true);
+ }
+
+ @Override
+ protected void doCheckAndNotify() {
+ rootFileObserver.checkAndNotify();
+ }
+
+ private static FileObserver[] EMPTY_DIRECTORY = new FileObserver[0];
+
+ private class FileObserver {
+ private File file;
+ private boolean exists;
+ private long length;
+ private long lastModified;
+ private boolean isDirectory;
+ private FileObserver[] children;
+
+ public FileObserver(File file) {
+ this.file = file;
+ }
+
+ public void checkAndNotify() {
+ boolean prevExists = exists;
+ boolean prevIsDirectory = isDirectory;
+ long prevLength = length;
+ long prevLastModified = lastModified;
+
+ exists = file.exists();
+
+ if (exists) {
+ isDirectory = file.isDirectory();
+ if (isDirectory) {
+ length = 0;
+ lastModified = 0;
+ } else {
+ length = file.length();
+ lastModified = file.lastModified();
+ }
+ } else {
+ isDirectory = false;
+ length = 0;
+ lastModified = 0;
+ }
+
+ // Perform notifications and update children for the current file
+ if (prevExists) {
+ if (exists) {
+ if (isDirectory) {
+ if (prevIsDirectory) {
+ // Remained a directory
+ updateChildren();
+ } else {
+ // File replaced by directory
+ onFileDeleted();
+ onDirectoryCreated(false);
+ }
+ } else {
+ if (prevIsDirectory) {
+ // Directory replaced by file
+ onDirectoryDeleted();
+ onFileCreated(false);
+ } else {
+ // Remained file
+ if (prevLastModified != lastModified || prevLength != length) {
+ onFileChanged();
+ }
+ }
+ }
+ } else {
+ // Deleted
+ if (prevIsDirectory) {
+ onDirectoryDeleted();
+ } else {
+ onFileDeleted();
+ }
+ }
+ } else {
+ // Created
+ if (exists) {
+ if (isDirectory) {
+ onDirectoryCreated(false);
+ } else {
+ onFileCreated(false);
+ }
+ }
+ }
+
+ }
+
+ private void init(boolean initial) {
+ exists = file.exists();
+ if (exists) {
+ isDirectory = file.isDirectory();
+ if (isDirectory) {
+ onDirectoryCreated(initial);
+ } else {
+ length = file.length();
+ lastModified = file.lastModified();
+ onFileCreated(initial);
+ }
+ }
+ }
+
+ private FileObserver createChild(File file, boolean initial) {
+ FileObserver child = new FileObserver(file);
+ child.init(initial);
+ return child;
+ }
+
+ private File[] listFiles() {
+ File[] files = file.listFiles();
+ if (files != null) {
+ Arrays.sort(files);
+ }
+ return files;
+ }
+
+ private FileObserver[] listChildren(boolean initial) {
+ File[] files = listFiles();
+ if (files != null && files.length > 0) {
+ FileObserver[] children = new FileObserver[files.length];
+ for (int i = 0; i < files.length; i++) {
+ children[i] = createChild(files[i], initial);
+ }
+ return children;
+ } else {
+ return EMPTY_DIRECTORY;
+ }
+ }
+
+ private void updateChildren() {
+ File[] files = listFiles();
+ if (files != null && files.length > 0) {
+ FileObserver[] newChildren = new FileObserver[files.length];
+ int child = 0;
+ int file = 0;
+ while (file < files.length || child < children.length ) {
+ int compare;
+
+ if (file >= files.length) {
+ compare = -1;
+ } else if (child >= children.length) {
+ compare = 1;
+ } else {
+ compare = children[child].file.compareTo(files[file]);
+ }
+
+ if (compare == 0) {
+ // Same file copy it and update
+ children[child].checkAndNotify();
+ newChildren[file] = children[child];
+ file++;
+ child++;
+ } else {
+ if (compare > 0) {
+ // This child doesn't appear in the old list - init it
+ newChildren[file] = createChild(files[file], false);
+ file++;
+ } else {
+ // The child from the old list is missing in the new list
+ // Delete it
+ deleteChild(child);
+ child++;
+ }
+ }
+ }
+ children = newChildren;
+ } else {
+ // No files - delete all children
+ for (int child = 0; child < children.length; child++) {
+ deleteChild(child);
+ }
+ children = EMPTY_DIRECTORY;
+ }
+ }
+
+ private void deleteChild(int child) {
+ if (children[child].exists) {
+ if (children[child].isDirectory) {
+ children[child].onDirectoryDeleted();
+ } else {
+ children[child].onFileDeleted();
+ }
+ }
+ }
+
+ private void onFileCreated(boolean initial) {
+ for (FileChangesListener listener : listeners()) {
+ if (initial) {
+ listener.onFileInit(file);
+ } else {
+ listener.onFileCreated(file);
+ }
+ }
+ }
+
+ private void onFileDeleted() {
+ for (FileChangesListener listener : listeners()) {
+ listener.onFileDeleted(file);
+ }
+ }
+
+ private void onFileChanged() {
+ for (FileChangesListener listener : listeners()) {
+ listener.onFileChanged(file);
+ }
+ }
+
+ private void onDirectoryCreated(boolean initial) {
+ for (FileChangesListener listener : listeners()) {
+ if (initial) {
+ listener.onDirectoryInit(file);
+ } else {
+ listener.onDirectoryCreated(file);
+ }
+ }
+ children = listChildren(initial);
+ }
+
+ private void onDirectoryDeleted() {
+ // First delete all children
+ for (int child = 0; child < children.length; child++) {
+ deleteChild(child);
+ }
+ for (FileChangesListener listener : listeners()) {
+ listener.onDirectoryDeleted(file);
+ }
+ }
+
+ }
+
+}
diff --git a/src/main/java/org/elasticsearch/watcher/ResourceWatcher.java b/src/main/java/org/elasticsearch/watcher/ResourceWatcher.java
new file mode 100644
index 0000000..fa13dea
--- /dev/null
+++ b/src/main/java/org/elasticsearch/watcher/ResourceWatcher.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.watcher;
+
+/**
+ * Abstract resource watcher interface.
+ * <p/>
+ * Different resource watchers can be registered with {@link ResourceWatcherService} to be called
+ * periodically in order to check for changes in different external resources.
+ */
+public interface ResourceWatcher {
+ /**
+ * Called once when the resource watcher is added to {@link ResourceWatcherService}
+ */
+ void init();
+
+ /**
+ * Called periodically by {@link ResourceWatcherService} so resource watcher can check the resource
+ */
+ void checkAndNotify();
+}
diff --git a/src/main/java/org/elasticsearch/watcher/ResourceWatcherModule.java b/src/main/java/org/elasticsearch/watcher/ResourceWatcherModule.java
new file mode 100644
index 0000000..a3ae84a
--- /dev/null
+++ b/src/main/java/org/elasticsearch/watcher/ResourceWatcherModule.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.watcher;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class ResourceWatcherModule extends AbstractModule {
+ @Override
+ protected void configure() {
+ bind(ResourceWatcherService.class).asEagerSingleton();
+ }
+}
diff --git a/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java b/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java
new file mode 100644
index 0000000..2ad53cf
--- /dev/null
+++ b/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.watcher;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ScheduledFuture;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+
+/**
+ * Generic resource watcher service
+ *
+ * Other elasticsearch services can register their resource watchers with this service using {@link #add(ResourceWatcher)}
+ * method. This service will call {@link org.elasticsearch.watcher.ResourceWatcher#checkAndNotify()} method of all
+ * registered watcher periodically. The frequency of checks can be specified using {@code watcher.interval} setting, which
+ * defaults to {@code 60s}. The service can be disabled by setting {@code watcher.enabled} setting to {@code false}.
+ */
+public class ResourceWatcherService extends AbstractLifecycleComponent<ResourceWatcherService> {
+
+ private final List<ResourceWatcher> watchers = new CopyOnWriteArrayList<ResourceWatcher>();
+
+ private volatile ScheduledFuture scheduledFuture;
+
+ private final boolean enabled;
+
+ private final TimeValue interval;
+
+ private final ThreadPool threadPool;
+
+ @Inject
+ public ResourceWatcherService(Settings settings, ThreadPool threadPool) {
+ super(settings);
+ this.enabled = componentSettings.getAsBoolean("enabled", true);
+ this.interval = componentSettings.getAsTime("interval", timeValueSeconds(60));
+ this.threadPool = threadPool;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ if (!enabled) {
+ return;
+ }
+ scheduledFuture = threadPool.scheduleWithFixedDelay(new ResourceMonitor(), interval);
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ if (!enabled) {
+ return;
+ }
+ scheduledFuture.cancel(true);
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ /**
+ * Register new resource watcher
+ */
+ public void add(ResourceWatcher watcher) {
+ watcher.init();
+ watchers.add(watcher);
+ }
+
+ /**
+ * Unregister a resource watcher
+ */
+ public void remove(ResourceWatcher watcher) {
+ watchers.remove(watcher);
+ }
+
+ private class ResourceMonitor implements Runnable {
+
+ @Override
+ public void run() {
+ for(ResourceWatcher watcher : watchers) {
+ watcher.checkAndNotify();
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/joda/time/base/BaseDateTime.java b/src/main/java/org/joda/time/base/BaseDateTime.java
new file mode 100644
index 0000000..c1f5427
--- /dev/null
+++ b/src/main/java/org/joda/time/base/BaseDateTime.java
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2001-2011 Stephen Colebourne
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.joda.time.base;
+
+import org.joda.time.Chronology;
+import org.joda.time.DateTimeUtils;
+import org.joda.time.DateTimeZone;
+import org.joda.time.ReadableDateTime;
+import org.joda.time.chrono.ISOChronology;
+import org.joda.time.convert.ConverterManager;
+import org.joda.time.convert.InstantConverter;
+
+import java.io.Serializable;
+
+/**
+ * BaseDateTime is an abstract implementation of ReadableDateTime that stores
+ * data in <code>long</code> and <code>Chronology</code> fields.
+ * <p/>
+ * This class should generally not be used directly by API users.
+ * The {@link ReadableDateTime} interface should be used when different
+ * kinds of date/time objects are to be referenced.
+ * <p/>
+ * BaseDateTime subclasses may be mutable and not thread-safe.
+ *
+ * @author Stephen Colebourne
+ * @author Kandarp Shah
+ * @author Brian S O'Neill
+ * @since 1.0
+ */
+public abstract class BaseDateTime
+ extends AbstractDateTime
+ implements ReadableDateTime, Serializable {
+
+ /**
+ * Serialization lock
+ */
+ private static final long serialVersionUID = -6728882245981L;
+
+ /**
+ * The millis from 1970-01-01T00:00:00Z
+ */
+ // THIS IS THE ES CHANGE not to have it volatile...
+ private long iMillis;
+ /**
+ * The chronology to use
+ */
+ private volatile Chronology iChronology;
+
+ //-----------------------------------------------------------------------
+
+ /**
+ * Constructs an instance set to the current system millisecond time
+ * using <code>ISOChronology</code> in the default time zone.
+ */
+ public BaseDateTime() {
+ this(DateTimeUtils.currentTimeMillis(), ISOChronology.getInstance());
+ }
+
+ /**
+ * Constructs an instance set to the current system millisecond time
+ * using <code>ISOChronology</code> in the specified time zone.
+ * <p/>
+ * If the specified time zone is null, the default zone is used.
+ *
+ * @param zone the time zone, null means default zone
+ */
+ public BaseDateTime(DateTimeZone zone) {
+ this(DateTimeUtils.currentTimeMillis(), ISOChronology.getInstance(zone));
+ }
+
+ /**
+ * Constructs an instance set to the current system millisecond time
+ * using the specified chronology.
+ * <p/>
+ * If the chronology is null, <code>ISOChronology</code>
+ * in the default time zone is used.
+ *
+ * @param chronology the chronology, null means ISOChronology in default zone
+ */
+ public BaseDateTime(Chronology chronology) {
+ this(DateTimeUtils.currentTimeMillis(), chronology);
+ }
+
+ //-----------------------------------------------------------------------
+
+ /**
+ * Constructs an instance set to the milliseconds from 1970-01-01T00:00:00Z
+ * using <code>ISOChronology</code> in the default time zone.
+ *
+ * @param instant the milliseconds from 1970-01-01T00:00:00Z
+ */
+ public BaseDateTime(long instant) {
+ this(instant, ISOChronology.getInstance());
+ }
+
+ /**
+ * Constructs an instance set to the milliseconds from 1970-01-01T00:00:00Z
+ * using <code>ISOChronology</code> in the specified time zone.
+ * <p/>
+ * If the specified time zone is null, the default zone is used.
+ *
+ * @param instant the milliseconds from 1970-01-01T00:00:00Z
+ * @param zone the time zone, null means default zone
+ */
+ public BaseDateTime(long instant, DateTimeZone zone) {
+ this(instant, ISOChronology.getInstance(zone));
+ }
+
+ /**
+ * Constructs an instance set to the milliseconds from 1970-01-01T00:00:00Z
+ * using the specified chronology.
+ * <p/>
+ * If the chronology is null, <code>ISOChronology</code>
+ * in the default time zone is used.
+ *
+ * @param instant the milliseconds from 1970-01-01T00:00:00Z
+ * @param chronology the chronology, null means ISOChronology in default zone
+ */
+ public BaseDateTime(long instant, Chronology chronology) {
+ super();
+ iChronology = checkChronology(chronology);
+ iMillis = checkInstant(instant, iChronology);
+ }
+
+ //-----------------------------------------------------------------------
+
+ /**
+ * Constructs an instance from an Object that represents a datetime,
+ * forcing the time zone to that specified.
+ * <p/>
+ * If the object contains no chronology, <code>ISOChronology</code> is used.
+ * If the specified time zone is null, the default zone is used.
+ * <p/>
+ * The recognised object types are defined in
+ * {@link org.joda.time.convert.ConverterManager ConverterManager} and
+ * include ReadableInstant, String, Calendar and Date.
+ *
+ * @param instant the datetime object
+ * @param zone the time zone
+ * @throws IllegalArgumentException if the instant is invalid
+ */
+ public BaseDateTime(Object instant, DateTimeZone zone) {
+ super();
+ InstantConverter converter = ConverterManager.getInstance().getInstantConverter(instant);
+ Chronology chrono = checkChronology(converter.getChronology(instant, zone));
+ iChronology = chrono;
+ iMillis = checkInstant(converter.getInstantMillis(instant, chrono), chrono);
+ }
+
+ /**
+ * Constructs an instance from an Object that represents a datetime,
+ * using the specified chronology.
+ * <p/>
+ * If the chronology is null, ISO in the default time zone is used.
+ * <p/>
+ * The recognised object types are defined in
+ * {@link org.joda.time.convert.ConverterManager ConverterManager} and
+ * include ReadableInstant, String, Calendar and Date.
+ *
+ * @param instant the datetime object
+ * @param chronology the chronology
+ * @throws IllegalArgumentException if the instant is invalid
+ */
+ public BaseDateTime(Object instant, Chronology chronology) {
+ super();
+ InstantConverter converter = ConverterManager.getInstance().getInstantConverter(instant);
+ iChronology = checkChronology(converter.getChronology(instant, chronology));
+ iMillis = checkInstant(converter.getInstantMillis(instant, chronology), iChronology);
+ }
+
+ //-----------------------------------------------------------------------
+
+ /**
+ * Constructs an instance from datetime field values
+ * using <code>ISOChronology</code> in the default time zone.
+ *
+ * @param year the year
+ * @param monthOfYear the month of the year
+ * @param dayOfMonth the day of the month
+ * @param hourOfDay the hour of the day
+ * @param minuteOfHour the minute of the hour
+ * @param secondOfMinute the second of the minute
+ * @param millisOfSecond the millisecond of the second
+ */
+ public BaseDateTime(
+ int year,
+ int monthOfYear,
+ int dayOfMonth,
+ int hourOfDay,
+ int minuteOfHour,
+ int secondOfMinute,
+ int millisOfSecond) {
+ this(year, monthOfYear, dayOfMonth, hourOfDay,
+ minuteOfHour, secondOfMinute, millisOfSecond, ISOChronology.getInstance());
+ }
+
+ /**
+ * Constructs an instance from datetime field values
+ * using <code>ISOChronology</code> in the specified time zone.
+ * <p/>
+ * If the specified time zone is null, the default zone is used.
+ *
+ * @param year the year
+ * @param monthOfYear the month of the year
+ * @param dayOfMonth the day of the month
+ * @param hourOfDay the hour of the day
+ * @param minuteOfHour the minute of the hour
+ * @param secondOfMinute the second of the minute
+ * @param millisOfSecond the millisecond of the second
+ * @param zone the time zone, null means default time zone
+ */
+ public BaseDateTime(
+ int year,
+ int monthOfYear,
+ int dayOfMonth,
+ int hourOfDay,
+ int minuteOfHour,
+ int secondOfMinute,
+ int millisOfSecond,
+ DateTimeZone zone) {
+ this(year, monthOfYear, dayOfMonth, hourOfDay,
+ minuteOfHour, secondOfMinute, millisOfSecond, ISOChronology.getInstance(zone));
+ }
+
+ /**
+ * Constructs an instance from datetime field values
+ * using the specified chronology.
+ * <p/>
+ * If the chronology is null, <code>ISOChronology</code>
+ * in the default time zone is used.
+ *
+ * @param year the year
+ * @param monthOfYear the month of the year
+ * @param dayOfMonth the day of the month
+ * @param hourOfDay the hour of the day
+ * @param minuteOfHour the minute of the hour
+ * @param secondOfMinute the second of the minute
+ * @param millisOfSecond the millisecond of the second
+ * @param chronology the chronology, null means ISOChronology in default zone
+ */
+ public BaseDateTime(
+ int year,
+ int monthOfYear,
+ int dayOfMonth,
+ int hourOfDay,
+ int minuteOfHour,
+ int secondOfMinute,
+ int millisOfSecond,
+ Chronology chronology) {
+ super();
+ iChronology = checkChronology(chronology);
+ long instant = iChronology.getDateTimeMillis(year, monthOfYear, dayOfMonth,
+ hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond);
+ iMillis = checkInstant(instant, iChronology);
+ }
+
+ //-----------------------------------------------------------------------
+
+ /**
+ * Checks the specified chronology before storing it, potentially altering it.
+ * This method must not access any instance variables.
+ * <p/>
+ * This implementation converts nulls to ISOChronology in the default zone.
+ *
+ * @param chronology the chronology to use, may be null
+ * @return the chronology to store in this datetime, not null
+ */
+ protected Chronology checkChronology(Chronology chronology) {
+ return DateTimeUtils.getChronology(chronology);
+ }
+
+ /**
+ * Checks the specified instant before storing it, potentially altering it.
+ * This method must not access any instance variables.
+ * <p/>
+ * This implementation simply returns the instant.
+ *
+ * @param instant the milliseconds from 1970-01-01T00:00:00Z to round
+ * @param chronology the chronology to use, not null
+ * @return the instant to store in this datetime
+ */
+ protected long checkInstant(long instant, Chronology chronology) {
+ return instant;
+ }
+
+ //-----------------------------------------------------------------------
+
+ /**
+ * Gets the milliseconds of the datetime instant from the Java epoch
+ * of 1970-01-01T00:00:00Z.
+ *
+ * @return the number of milliseconds since 1970-01-01T00:00:00Z
+ */
+ public long getMillis() {
+ return iMillis;
+ }
+
+ /**
+ * Gets the chronology of the datetime.
+ *
+ * @return the Chronology that the datetime is using
+ */
+ public Chronology getChronology() {
+ return iChronology;
+ }
+
+ //-----------------------------------------------------------------------
+
+ /**
+ * Sets the milliseconds of the datetime.
+ * <p/>
+ * All changes to the millisecond field occurs via this method.
+ * Override and block this method to make a subclass immutable.
+ *
+ * @param instant the milliseconds since 1970-01-01T00:00:00Z to set the datetime to
+ */
+ protected void setMillis(long instant) {
+ iMillis = checkInstant(instant, iChronology);
+ }
+
+ /**
+ * Sets the chronology of the datetime.
+ * <p/>
+ * All changes to the chronology field occurs via this method.
+ * Override and block this method to make a subclass immutable.
+ *
+ * @param chronology the chronology to set
+ */
+ protected void setChronology(Chronology chronology) {
+ iChronology = checkChronology(chronology);
+ }
+
+}
diff --git a/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec
diff --git a/src/main/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat b/src/main/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/main/resources/META-INF/services/org.apache.lucene.codecs.DocValuesFormat
diff --git a/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat
new file mode 100644
index 0000000..38e980a
--- /dev/null
+++ b/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat
@@ -0,0 +1,3 @@
+org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat
+org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat
+org.elasticsearch.search.suggest.completion.Completion090PostingsFormat
diff --git a/src/main/resources/config/names.txt b/src/main/resources/config/names.txt
new file mode 100644
index 0000000..f47a35c
--- /dev/null
+++ b/src/main/resources/config/names.txt
@@ -0,0 +1,2826 @@
+3-D Man
+A'lars
+Aardwolf
+Abdul Alhazred
+Abe Brown
+Abigail Brand
+Abner Jenkins
+Abner Little
+Abominable Snowman
+Abomination
+Abominatrix
+Abraham Cornelius
+Abraxas
+Absalom
+Absorbing Man
+Abyss
+Access
+Achebe
+Achelous
+Achilles
+Acrobat
+Adam II
+Adam Warlock
+Adam X
+Adaptoid
+Administrator
+Adonis
+Adrenazon
+Adri Nital
+Adrian Corbo
+Adrian Toomes
+Adrienne Frost
+Adversary
+Advisor
+Aegis
+Aelfyre Whitemane
+Aero
+Aftershock
+Agamemnon
+Agamotto
+Agatha Harkness
+Aged Genghis
+Agent
+Agent Axis
+Agent Cheesecake
+Agent X
+Agent Zero
+Aggamon
+Aginar
+Agon
+Agony
+Agron
+Aguja
+Ahab
+Ahmet Abdol
+Ahura
+Air-Walker
+Airborne
+Aireo
+Airstrike
+Ajak
+Ajax
+Ajaxis
+Akasha
+Akhenaten
+Al MacKenzie
+Alaris
+Albert
+Albino
+Albion
+Alchemy
+Alcmena
+Aldebron
+Aleksander Lukin
+Aleksei Sytsevich
+Aleta Ogord
+Alex
+Alex Hayden
+Alex Power
+Alex Wilder
+Alexander Bont
+Alexander Goodwin Pierce
+Alexander Lexington
+Alexander Summers
+Alfie O'Meggan
+Algrim the Strong
+Alibar
+Alicia Masters
+Alistair Smythe
+Alistaire Stuart
+Aliyah Bishop
+Alkhema
+All-American
+Allatou
+Allison Blaire
+Alpha Ray
+Alpha the Ultimate Mutant
+Alyosha Kravinoff
+Alysande Stuart
+Alyssa Moy
+Amahl Farouk
+Amalgam
+Amanda Sefton
+Amatsu-Mikaboshi
+Amazon
+Amber Hunt
+Amelia Voght
+Amergin
+American Ace
+American Dream
+American Eagle
+American Samurai
+Americop
+Ameridroid
+Amiko Kobayashi
+Amina Synge
+Aminedi
+Ammo
+Amphibian
+Amphibion
+Amphibius
+Amun
+Anaconda
+Anais
+Analyzer
+Anarchist
+Ancient One
+Andreas von Strucker
+Andrew Chord
+Andrew Gervais
+Android Man
+Andromeda
+Anelle
+Angar the Screamer
+Angel
+Angel Dust
+Angel Face
+Angel Salvadore
+Angela Cairn
+Angela Del Toro
+Angelica Jones
+Angelo Unuscione
+Angler
+Ani-Mator
+Animus
+Ankhi
+Annalee
+Anne-Marie Cortez
+Annex
+Annie Ghazikhanian
+Annihilus
+Anole
+Anomalito
+Anomaloco
+Anomaly
+Answer
+Ant-Man
+Anthropomorpho
+Anti-Cap
+Anti-Phoenix Force
+Anti-Venom
+Anti-Vision
+Antimatter
+Antiphon the Overseer
+Antonio
+Anubis
+Anvil
+Anything
+Apache Kid
+Apalla
+Ape
+Ape-Man
+Ape-X
+Apocalypse
+Apollo
+Apryll
+Aquarian
+Aquarius
+Aqueduct
+Arabian Knight
+Arachne
+Aragorn
+Araki
+Aralune
+Araña
+Arc
+Arcade
+Arcademan
+Arcanna
+Archangel
+Archenemy
+Archer
+Archie Corrigan
+Archimage
+Architect
+Arclight
+Arcturus Rann
+Ardina
+Ardroman
+Arena
+Ares
+Argo
+Argus
+Ariann
+Arides
+Ariel
+Aries
+Arishem the Judge
+Arize
+Arizona Annie
+Arkady Rossovich
+Arkon
+Arkus
+Arlette Truffaut
+Arlok
+Armadillo
+Armageddon
+Armand Martel
+Armor
+Armory
+Arnim Zola
+Arno Stark
+Arranger
+Arsenal
+Arsenic
+Artemis
+Arthur Parks
+Artie
+Artie Maddicks
+Arturo Falcones
+Asbestos Lady
+Asbestos Man
+Ashcan
+Asmodeus
+Asp
+Assassin
+Asteroth
+Astra
+Astrid Bloom
+Astron
+Astronomer
+Asylum
+Atalanta
+Atalon
+Athena
+Atlas
+Atleza
+Atom Bob
+Atom-Smasher
+Att-Lass
+Attuma
+Atum
+Aunt May Parker
+Auntie Freeze
+Auric
+Aurora
+Authority
+Autolycus
+Avalanche
+Avarrish
+Awesome Android
+Axum
+Azazel
+Baal
+Balder
+Balor
+Balthakk
+Bandit
+Banshee
+Bantam
+Baphomet
+Barbarus
+Barnacle
+Baron Blood
+Baron Brimstone
+Baron Macabre
+Baron Mordo
+Baron Samedi
+Baron Strucker
+Baron Von Blitzschlag
+Baron Zemo
+Baroness Blood
+Barracuda
+Bart Hamilton
+Base
+Basil Sandhurst
+Basilisk
+Bast
+Bastion
+Batragon
+Batroc the Leaper
+Battering Ram
+Battleaxe
+Battlestar
+Battletide
+Batwing
+Beast
+Beautiful Dreamer
+Bedlam
+Bedlam II
+Beetle
+Beetle II
+Behemoth
+Bela
+Belasco
+Belathauzer
+Bella Donna
+Ben Parker
+Ben Reilly
+Ben Urich
+Benazir Kaur
+Benedict Kine
+Bengal
+Benjamin Jacob Grimm
+Bennet du Paris
+Benny Beckley
+Bentley Wittman
+Bereet
+Berzerker
+Bes
+Beta Ray Bill
+Bethany Cabe
+Betty Brant
+Betty Brant Leeds
+Betty Ross Banner
+Bevatron
+Beyonder
+Bi-Beast
+Bible John
+Big Bertha
+Big Man
+Big Wheel
+Bill Foster
+Binary
+Bird-Brain
+Bird-Man
+Bishop
+Bison
+Bizarnage
+Black Bolt
+Black Box
+Black Cat
+Black Crow
+Black Death
+Black Dragon
+Black Fox
+Black Goliath
+Black Jack Tarr
+Black King
+Black Knight
+Black Lama
+Black Mamba
+Black Marvel
+Black Panther
+Black Queen
+Black Talon
+Black Tarantula
+Black Tom Cassidy
+Black Widow
+Blackbird
+Blackheart
+Blackheath
+Blacklash
+Blackout
+Blackwing
+Blackwulf
+Blade
+Blaquesmith
+Blastaar
+Blaze
+Blazing Skull
+Blind Faith
+Blind Justice
+Blindside
+Blindspot
+Bling
+Blink
+Blistik
+Blitziana
+Blitzkrieger
+Blizzard
+Blizzard II
+Blob
+Blockbuster
+Bloke
+Blonde Phantom
+Blood Brothers
+Blood Rose
+Blood Spider
+Bloodaxe
+Bloodhawk
+Bloodlust
+Bloodlust II
+Bloodscream
+Bloodshed
+Bloodsport
+Bloodstorm
+Bloodtide
+Bloodwraith
+Blowhard
+Blue Bullet
+Blue Diamond
+Blue Marvel
+Blue Shield
+Blue Streak
+Blur
+Bob
+Bob Diamond
+Bobster
+Bogeyman
+Bombshell
+Boneyard
+Bonita Juarez
+Boobytrap
+Book
+Boom Boom
+Boom Boy
+Boomer
+Boomerang
+Boomslang
+Boost
+Bora
+Bounty
+Bounty Hunter
+Bova
+Box
+Box IV
+Brain Cell
+Brain Drain
+Brain-Child
+Brainchild
+Bram Velsing
+Brass
+Bres
+Brian Braddock
+Brian Falsworth
+Brigade
+Briquette
+Brother Nature
+Brother Tode
+Brother Voodoo
+Brothers Grimm
+Bruiser
+Brunnhilda
+Brutacus
+Brute I
+Brute II
+Brute III
+Brynocki
+Bucky
+Bucky III
+Bug
+Bulldozer
+Bullet
+Bullseye
+Burner
+Burstarr
+Bushman
+Bushmaster
+Bushwacker
+Butterball
+Buzz
+Buzzard
+Byrrah
+Caber
+Cable
+Cadaver
+Cagliostro
+Caiera
+Caiman
+Cain
+Cain Marko
+Caleb Alexander
+Caliban
+Callisto
+Calvin Rankin
+Calypso
+Cameron Hodge
+Canasta
+Cancer
+Candra
+Cannonball
+Cannonball I
+Cap 'N Hawk
+Caprice
+Capricorn
+Captain America
+Captain Atlas
+Captain Barracuda
+Captain Britain
+Captain Fate
+Captain Germany
+Captain Marvel
+Captain Omen
+Captain Savage
+Captain UK
+Captain Ultra
+Captain Universe
+Captain Wings
+Captain Zero
+Cardiac
+Cardinal
+Caregiver
+Caretaker
+Carl "Crusher" Creel
+Carlos Lobo
+Carmella Unuscione
+Carmilla Black
+Carnage
+Carnivore
+Carolyn Parmenter
+Carolyn Trainer
+Carrie Alexander
+Carrion
+Carter Ghazikhanian
+Cassandra Nova
+Cassie Lang
+Cassiopea
+Cat
+Cat-Man
+Catiana
+Cayman
+Cecelia Reyes
+Cecilia Reyes
+Celestial Madonna
+Centennial
+Centurion
+Centurious
+Centurius
+Cerberus
+Cerebra
+Cerise
+Cessily Kincaid
+Cethlann
+Ch'od
+Chaka
+Challenger
+Chamber
+Chameleon
+Champion of the Universe
+Chan Luichow
+Chance
+Changeling
+Chaos
+Charcoal
+Charles Xavier
+Charlie-27
+Charon
+Chase Stein
+Cheetah
+Chemistro
+Chen Lu
+Chi Demon
+Chief Examiner
+Chimera
+Chloe Tran
+Choice
+Chondu the Mystic
+Christopher Summers
+Chrome
+Chronos
+Chthon
+Chtylok
+Citizen V
+Claire Voyant
+Claudette St. Croix
+Clea
+Clearcut
+Cletus Kasady
+Clint Barton
+Clive
+Cloak
+Cloud
+Cloud 9
+Clown
+Coach
+Coachwhip
+Cobalt Man
+Cobra
+Cody Mushumanski Gun Man
+Cold War
+Coldblood
+Coldfire
+Collective Man
+Collector
+Colleen Wing
+Colonel
+Colonel America
+Colossus
+Comet
+Comet Man
+Commander Kraken
+Commando
+Conan the Barbarian
+Condor
+Conquer Lord
+Conquest
+Conquistador
+Conrad Josten
+Constrictor
+Contemplator
+Contessa
+Contrary
+Controller
+Copperhead
+Copycat
+Coral
+Cordelia Frost
+Cornelius van Lunt
+Corona
+Corruptor
+Corsair
+Cottonmouth
+Count Abyss
+Count Nefaria
+Courier
+Cowgirl
+Crazy Eight
+Crime Master
+Crime-Buster
+Crimebuster
+Crimson
+Crimson Cavalier
+Crimson Commando
+Crimson Cowl
+Crimson Craig
+Crimson Daffodil
+Crimson Dynamo
+Crimson Dynamo V
+Crimson and the Raven
+Crippler
+Crooked Man
+Crossbones
+Crossfire
+Crown
+Crucible
+Crusader
+Crusher
+Crystal
+Curtis Connors
+Cutthroat
+Cybele
+Cybelle
+Cyber
+Cyborg X
+Cyclone
+Cyclops
+Cypher
+D'Ken
+D'Spayre
+D-Man
+DJ
+Dagger
+Daisy Johnson
+Dakimh the Enchanter
+Dakota North
+Damballah
+Damion Hellstrom
+Damon Dran
+Dan Ketch
+Danger
+Daniel Rand
+Danielle Moonstar
+Dansen Macabre
+Danvers Carol
+Daredevil
+Dark Angel
+Dark Beast
+Dark Phoenix
+Dark-Crawler
+Darkdevil
+Darkhawk
+Darkoth
+Darkstar
+David Cannon
+Daytripper
+Dazzler
+Deacon Frost
+Dead Girl
+Deadhead
+Deadly Ernest
+Deadpool
+Death
+Death Adder
+Death's Head
+Death's Head II
+Death-Stalker
+Deathbird
+Deathlok
+Deathstroke
+Deathurge
+Deathwatch
+Deborah Ritter
+Debra Whitman
+Decay
+Decay II
+Defensor
+Delilah
+Delphi
+Delphine Courtney
+Dementia
+Demiurge
+Demogoblin
+Demogorge the God-Eater
+Demolition Man
+Derrick Slegers Speed
+Desmond Pitt
+Destiny
+Destroyer
+Destroyer of Demons
+Devastator
+Devil Dinosaur
+Devil Hunter Gabriel
+Devil-Slayer
+Devos the Devastator
+Diablo
+Diamanda Nero
+Diamond Lil
+Diamondback
+Diamondhead
+Digitek
+Dionysus
+Dirtnap
+Discus
+Dittomaster
+Dmitri Bukharin
+Dmitri Smerdyakov
+Doc Samson
+Doctor Anthony Droom
+Doctor Arthur Nagan
+Doctor Bong
+Doctor Demonicus
+Doctor Doom
+Doctor Dorcas
+Doctor Droom
+Doctor Druid
+Doctor Faustus
+Doctor Glitternight
+Doctor Leery
+Doctor Minerva
+Doctor Octopus
+Doctor Spectrum
+Doctor Strange
+Doctor Sun
+Domina
+Dominic Fortune
+Dominic Petros
+Domino
+Dominus
+Domo
+Don Fortunato
+Donald "Donny" Gill
+Donald Pierce
+Donald Ritter
+Doop
+Doorman
+Doppelganger
+Doppleganger
+Dorma
+Dormammu
+Double Helix
+Doug Ramsey
+Doug and Jerry
+Dougboy
+Doughboy
+Douglas Birely
+Douglas Ramsey
+Douglock
+Dr. John Grey
+Dr. Lemuel Dorcas
+Dr. Marla Jameson
+Dr. Otto Octavius
+Dracula
+Dragon Lord
+Dragon Man
+Dragon of the Moon
+Dragoness
+Dragonfly
+Dragonwing
+Drax the Destroyer
+Dreadknight
+Dreadnought
+Dream Weaver
+Dreaming Celestial
+Dreamqueen
+Dredmund Druid
+Dromedan
+Druid
+Druig
+Dum-Dum Dugan
+Dusk
+Dust
+Dweller-in-Darkness
+Dyna-Mite
+Earth Lord
+Earthquake
+Ebenezer Laughton
+Ebon Seeker
+Echo
+Ecstasy
+Ectokid
+Eddie Brock
+Edward "Ned" Buckman
+Edwin Jarvis
+Eel
+Egghead
+Ego the Living Planet
+El Aguila
+El Muerto
+Elaine Grey
+Elathan
+Electric Eve
+Electro
+ElectroCute
+Electron
+Eleggua
+Elektra
+Elektra Natchios
+Elektro
+Elf With A Gun
+Elfqueen
+Elias Bogan
+Eliminator
+Elixir
+Elizabeth "Betsy" Braddock
+Elizabeth Twoyoungmen
+Ellie Phimster
+Elsie-Dee
+Elven
+Elysius
+Emil Blonsky
+Emma Frost
+Empath
+Empathoid
+Emplate
+En Sabah Nur
+Enchantress
+Energizer
+Enforcer
+Enigma
+Ent
+Entropic Man
+Eon
+Epoch
+Equilibrius
+Equinox
+Ereshkigal
+Erg
+Eric Slaughter
+Eric Williams
+Eric the Red
+Erik Josten
+Erik Killmonger
+Erik Magnus Lehnsherr
+Ernst
+Eros
+Eshu
+Eson the Searcher
+Eternal Brain
+Eternity
+Ethan Edwards
+Eugene Judd
+Ev Teel Urizen
+Evangeline Whedon
+Ever
+Everett Thomas
+Everyman
+Evilhawk
+Executioner
+Exodus
+Exploding Man
+Exterminator
+Ezekiel
+Ezekiel Sims
+Ezekiel Stane
+Fabian Cortez
+Fafnir
+Fagin
+Falcon
+Fallen One
+Famine
+Fan Boy
+Fandral
+Fang
+Fantasia
+Fantastic Four
+Fantomex
+Farallah
+Fasaud
+Fashima
+Fatale
+Fateball
+Father Time
+Fault Zone
+Fearmaster
+Feedback
+Felicia Hardy
+Feline
+Fenris
+Fenris Wolf
+Fer-de-Lance
+Feral
+Feron
+Fever Pitch
+Fight-Man
+Fin
+Fin Fang Foom
+Firearm
+Firebird
+Firebolt
+Firebrand
+Firefrost
+Firelord
+Firepower
+Firestar
+Fixer
+Fixx
+Flag-Smasher
+Flambe
+Flash Thompson
+Flatman
+Flex
+Flint Marko
+Flubber
+Fly
+Flygirl
+Flying Tiger
+Foggy Nelson
+Fontanelle
+Foolkiller
+Forbush Man
+Force
+Forearm
+Foreigner
+Forge
+Forgotten One
+Foxfire
+Frank Castle
+Frank Drake
+Frank Payne
+Frank Simpson
+Frankenstein's Monster
+Frankie Raye
+Frankie and Victoria
+Franklin Hall
+Franklin Richards
+Franklin Storm
+Freak
+Freak of Science
+Freakmaster
+Freakshow
+Fred Myers
+Frederick Slade
+Free Spirit
+Freedom Ring
+Frenzy
+Frey
+Frigga
+Frog-Man
+Fury
+Fusion
+Futurist
+G-Force
+Gabe Jones
+Gabriel Summers
+Gabriel the Air-Walker
+Gaea
+Gaia
+Gailyn Bailey
+Galactus
+Galaxy Master
+Gambit
+Gammenon the Gatherer
+Gamora
+Ganymede
+Gardener
+Gargantua
+Gargantus
+Gargouille
+Gargoyle
+Garokk the Petrified Man
+Garrison Kane
+Gatecrasher
+Gateway
+Gauntlet
+Gavel
+Gaza
+Gazelle
+Gazer
+Geb
+Gee
+Geiger
+Geirrodur
+Gemini
+General Orwell Taylor
+Genis-Vell
+George Stacy
+George Tarleton
+George Washington Bridge
+Georgianna Castleberry
+Gertrude Yorkes
+Ghaur
+Ghost
+Ghost Dancer
+Ghost Girl
+Ghost Maker
+Ghost Rider
+Ghost Rider 2099
+Ghoul
+Giant-Man
+Gibbon
+Gibborim
+Gideon
+Gideon Mace
+Giganto
+Gigantus
+Gin Genie
+Gladiator
+Gladiatrix
+Glamor
+Glenn Talbot
+Glitch
+Glob
+Glob Herman
+Gloom
+Glorian
+Goblin Queen
+Goblyn
+Godfrey Calthrop
+Gog
+Goldbug
+Golden Archer
+Golden Girl
+Golden Oldie
+Goldeneye
+Golem
+Goliath
+Gomi
+Googam
+Gorgeous George
+Gorgilla
+Gorgon
+Gorilla Girl
+Gorilla-Man
+Gorr
+Gosamyr
+Grand Director
+Grandmaster
+Grappler
+Grasshopper
+Grasshopper II
+Graviton
+Gravity
+Graydon Creed
+Great Gambonnos
+Great Video
+Green Goblin
+Green Goblin IV
+Greer Grant
+Greer Grant Nelson
+Gregor Shapanka
+Gregory Gideon
+Gremlin
+Grenade
+Grey Gargoyle
+Grey King
+Griffin
+Grim Hunter
+Grim Reaper
+Grizzly
+Grog the God-Crusher
+Gronk
+Grotesk
+Groundhog
+Growing Man
+Guardsman
+Guido Carosella
+Gunthar of Rigel
+Gwen Stacy
+Gypsy Moth
+H.E.R.B.I.E.
+Hack
+Hag
+Hairbag
+Halflife
+Halloween Jack
+Hamilton Slade
+Hammer Harrison
+Hammer and Anvil
+Hammerhead
+Hangman
+Hank McCoy
+Hank Pym
+Hanna Levy
+Hannah Levy
+Hannibal King
+Harald Jaekelsson
+Hardcase
+Hardcore
+Hardnose
+Hardshell
+Hardwire
+Hargen the Measurer
+Harmonica
+Harness
+Harold "Happy" Hogan
+Harold H. Harold
+Harpoon
+Harpy
+Harrier
+Harry Leland
+Harry Osborn
+Hate-Monger
+Haven
+Havok
+Hawkeye
+Hawkeye II
+Hawkshaw
+Haywire
+Hazard
+Hazmat
+Headknocker
+Headlok
+Heart Attack
+Heather Cameron
+Hebe
+Hecate
+Hector
+Heimdall
+Heinrich Zemo
+Hela
+Helio
+Hellcat
+Helleyes
+Hellfire
+Hellion
+Hellrazor
+Helmut Zemo
+Henry "Hank" McCoy
+Henry Peter Gyrich
+Hensley Fargus
+Hephaestus
+Hepzibah
+Her
+Hera
+Herbert Edgar Wyndham
+Hercules
+Herman Schultz
+Hermes
+Hermod
+Hero
+Hero for Hire
+Herr Kleiser
+Hideko Takata
+High Evolutionary
+High-Tech
+Hijacker
+Hildegarde
+Him
+Hindsight Lad
+Hippolyta
+Hisako Ichiki
+Hit-Maker
+Hitman
+Hobgoblin
+Hobgoblin II
+Hoder
+Hogun
+Holly
+Honcho
+Honey Lemon
+Hood
+Hornet
+Horus
+Howard the Duck
+Hrimhari
+Hub
+Hugh Jones
+Hulk
+Hulk 2099
+Hulkling
+Human Cannonball
+Human Fly
+Human Robot
+Human Top
+Human Top II
+Human Torch
+Human Torch II
+Humbug
+Humus Sapien
+Huntara
+Hurricane
+Husk
+Hussar
+Hybrid
+Hybrid II
+Hyde
+Hydro
+Hydro-Man
+Hydron
+Hyperion
+Hyperkind
+Hyperstorm
+Hypnotia
+Hyppokri
+ISAAC
+Icarus
+Iceman
+Icemaster
+Idunn
+Iguana
+Ikaris
+Ikonn
+Ikthalon
+Illusion
+Illyana Rasputin
+Immortus
+Impala
+Imperial Hydra
+Impossible Man
+Impulse
+In-Betweener
+Indech
+Indra
+Inertia
+Infamnia
+Infant Terrible
+Infectia
+Inferno
+Infinity
+Interloper
+Invisible Girl
+Invisible Woman
+Inza
+Ion
+Iridia
+Iron Cross
+Iron Fist
+Iron Lad
+Iron Maiden
+Iron Man
+Iron Man 2020
+Iron Monger
+Ironclad
+Isaac Christians
+Isaiah Bradley
+Isbisa
+Isis
+Ivan Kragoff
+J. Jonah Jameson
+J2
+Jack Flag
+Jack Frost
+Jack Kirby
+Jack O'Lantern
+Jack Power
+Jack of Hearts
+Jack-in-the-Box
+Jackal
+Jackdaw
+Jackhammer
+Jackpot
+Jackson Arvad
+Jacob "Jake" Fury
+Jacqueline Falsworth
+Jacques DuQuesne
+Jade Dragon
+Jaeger
+Jaguar
+Jamal Afari
+James "Jimmy" Marks
+James Dr. Power
+James Howlett
+James Jaspers
+James Madrox
+James Proudstar
+James Rhodes
+James Sanders
+Jamie Braddock
+Jane Foster
+Jane Kincaid
+Janet van Dyne
+Jann
+Janus
+Jared Corbo
+Jarella
+Jaren
+Jason
+Jawynn Dueck the Iron Christian of Faith
+Jazz
+Jean DeWolff
+Jean Grey
+Jean Grey-Summers
+Jean-Paul Beaubier
+Jeanne-Marie Beaubier
+Jebediah Guthrie
+Jeffrey Mace
+Jekyll
+Jennifer Kale
+Jennifer Walters
+Jens Meilleur Slap Shot
+Jericho Drumm
+Jerome Beechman
+Jerry Jaxon
+Jessica Drew
+Jessica Jones
+Jester
+Jigsaw
+Jim Hammond
+Jimaine Szardos
+Jimmy Woo
+Jocasta
+Joe Cartelli
+Joe Fixit
+Joey Bailey
+Johann Schmidt
+John Doe
+John Falsworth
+John Jameson
+John Proudstar
+John Ryker
+John Sublime
+John Walker
+Johnny Blaze
+Johnny Ohm
+Johnny Storm
+Jolt
+Jon Spectre
+Jonas Harrow
+Jonathan "John" Garrett
+Jonathan Richards
+Jonothon Starsmore
+Jordan Seberius
+Joseph
+Joshua Guthrie
+Joystick
+Jubilee
+Judas Traveller
+Jude the Entropic Man
+Juggernaut
+Julie Power
+Jumbo Carnation
+Junkpile
+Junta
+Justice
+Justin Hammer
+Justine Hammer
+Ka-Zar
+Kaine
+Kala
+Kaluu
+Kamal
+Kamo Tharnn
+Kamuu
+Kang the Conqueror
+Kangaroo
+Karen Page
+Karima Shapandar
+Karkas
+Karl Lykos
+Karl Malus
+Karl Mordo
+Karla Sofen
+Karma
+Karnak
+Karnilla
+Karolina Dean
+Karthon the Quester
+Kasper Cole
+Kate Bishop
+Kate Neville
+Katherine "Kitty" Pryde
+Katherine Reynolds
+Katie Power
+Katrina Luisa van Horne
+Katu
+Keen Marlow
+Kehl of Tauran
+Keith Kilham
+Kem Horkus
+Kenneth Crichton
+Key
+Khaos
+Khonshu
+Khoryphos
+Kiber the Cruel
+Kick-Ass
+Kid Colt
+Kid Nova
+Kiden Nixon
+Kierrok
+Killer Shrike
+Killpower
+Killraven
+Kilmer
+Kimura
+King Bedlam
+Kingo Sunen
+Kingpin
+Kirigi
+Kirtsyn Perrin Short Stop
+Kismet
+Kismet Deadly
+Kiss
+Kiwi Black
+Kkallakku
+Kl'rt
+Klaatu
+Klaw
+Kleinstocks
+Knickknack
+Kofi Whitemane
+Kogar
+Kohl Harder Boulder Man
+Korath the Pursuer
+Korg
+Kormok
+Korrek
+Korvac
+Korvus
+Kosmos
+Kraken
+Krakkan
+Krang
+Kraven the Hunter
+Krista Marwan
+Kristoff Vernard
+Kristoff von Doom
+Kro
+Krystalin
+Kubik
+Kukulcan
+Kurse
+Kurt Wagner
+Kwannon
+Kyle Gibney
+Kylun
+Kymaera
+La Lunatica
+La Nuit
+Lacuna
+Lady Deathstrike
+Lady Jacqueline Falsworth Crichton
+Lady Killer
+Lady Lark
+Lady Lotus
+Lady Mandarin
+Lady Mastermind
+Lady Octopus
+Lament
+Lancer
+Landslide
+Larry Bodine
+Lasher
+Laura Dean
+Layla Miller
+Lazarus
+Leader
+Leap-Frog
+Leash
+Lee Forrester
+Leech
+Left Hand
+Left-Winger
+Legacy
+Legion
+Leila Davis
+Leir
+Lemuel Dorcas
+Leo
+Leonard Samson
+Leonus
+Letha
+Levan
+Lianda
+Libra
+Lifeforce
+Lifeguard
+Lifter
+Lightbright
+Lighting Rod
+Lightmaster
+Lightspeed
+Lila Cheney
+Lilandra Neramani
+Lilith, the Daughter of Dracula
+Lin Sun
+Link
+Lionheart
+Live Wire
+Living Brain
+Living Colossus
+Living Diamond
+Living Eraser
+Living Hulk
+Living Laser
+Living Lightning
+Living Monolith
+Living Mummy
+Living Pharaoh
+Living Planet
+Living Totem
+Living Tribunal
+Liz Allan
+Lizard
+Llan the Sorcerer
+Lloigoroth
+Llyra
+Llyron
+Loa
+Lockdown
+Lockheed
+Lockjaw
+Locksmith
+Locus
+Locust
+Lodestone
+Logan
+Loki
+Longneck
+Longshot
+Lonnie Thompson Lincoln
+Looter
+Lord Chaos
+Lord Dark Wind
+Lord Pumpkin
+Lorelei
+Lorelei II
+Lorelei Travis
+Lorna Dane
+Lorvex
+Loss
+Louise Mason
+Lucas Brand
+Luchino Nefaria
+Lucifer
+Ludi
+Luke Cage
+Luna
+Lunatica
+Lunatik
+Lupa
+Lupo
+Lurking Unknown
+Lyja
+Lynx
+M
+M-Twins
+MN-E (Ultraverse)
+MODAM
+MODOK
+Mac Gargan
+Mach-IV
+Machine Man
+Machine Teen
+Machinesmith
+Mad Dog Rassitano
+Mad Jack
+Mad Jim Jaspers
+Mad Thinker
+Mad Thinker’s Awesome Android
+Mad-Dog
+Madam Slay
+Madame Hydra
+Madame MacEvil
+Madame Masque
+Madame Menace
+Madame Web
+Madcap
+Madeline Joyce
+Madelyne Pryor
+Madison Jeffries
+Maelstrom
+Maestro
+Magdalena
+Magdalene
+Maggott
+Magician
+Magik
+Magilla
+Magma
+Magneto
+Magnum
+Magnus
+Magus
+Maha Yogi
+Mahkizmo
+Major Mapleleaf
+Makkari
+Malekith the Accursed
+Malice
+Mammomax
+Man Mountain Marko
+Man-Ape
+Man-Beast
+Man-Brute
+Man-Bull
+Man-Eater
+Man-Elephant
+Man-Killer
+Man-Spider
+Man-Thing
+Man-Wolf
+Manbot
+Mandarin
+Mandrill
+Mandroid
+Mangle
+Mangog
+Manikin
+Manslaughter
+Manta
+Mantis
+Mantra
+Mar-Vell
+Marc Spector
+Marduk Kurios
+Margali Szardos
+Margaret Power
+Margo Damian
+Maria Hill
+Mariko Yashida
+Marius St. Croix
+Mark Gervaisnight Shade
+Mark Raxton
+Mark Scarlotti
+Mark Todd
+Marlene Alraune
+Marrina
+Marrina Smallwood
+Marrow
+Marsha Rosenberg
+Martha Johansson
+Martin Gold
+Martin Preston
+Martinex
+Marvel Boy
+Marvel Girl
+Marvel Man
+Marvin Flumm
+Mary "Skeeter" MacPherran
+Mary Jane Parker
+Mary Jane Watson
+Mary Walker
+Mary Zero
+Masked Marauder
+Masked Marvel
+Masked Rose
+Masque
+Mass Master
+Master
+Master Khan
+Master Man
+Master Menace
+Master Mold
+Master Order
+Master Pandemonium
+Master of Vengeance
+Mastermind
+Mastermind of the UK
+Matador
+Match
+Matsu'o Tsurayaba
+Matt Murdock
+Mauler
+Maur-Konn
+Mauvais
+Maverick
+Max
+Maxam
+Maximus
+Maxwell Dillon
+May "Mayday" Parker
+May Parker
+Mayhem
+Maynard Tiboldt
+Meanstreak
+Meathook
+Mechamage
+Medusa
+Meggan
+Meggan Braddock
+Mekano
+Meld
+Melee
+Melissa Gold
+Melody Guthrie
+Meltdown
+Melter
+Mentallo
+Mentor
+Mentus
+Mephisto
+Mercurio
+Mercury
+Mercy
+Merlin
+Mesmero
+Metal Master
+Metalhead
+Meteor Man
+Meteorite
+Meteorite II
+Michael Nowman
+Michael Twoyoungmen
+Micro
+Microchip
+Micromax
+Midas
+Midgard Serpent
+Midnight
+Midnight Man
+Midnight Sun
+Miek
+Miguel Espinosa
+Miguel O'Hara
+Miguel Santos
+Mikado
+Mikey
+Mikhail Rasputin
+Mikula Golubev
+Milan
+Miles Warren
+Milos Masaryk
+Mimic
+Mimir
+Mindmeld
+Mindworm
+Miracle Man
+Mirage
+Mirage II
+Misfit
+Miss America
+Missing Link
+Mist Mistress
+Mister Buda
+Mister Doll
+Mister Fear
+Mister Hyde
+Mister Jip
+Mister Machine
+Mister One
+Mister Sensitive
+Mister Sinister
+Mister Two
+Mister X
+Misty Knight
+Mockingbird
+Modred the Mystic
+Mogul of the Mystic Mountain
+Moira Brandon
+Moira MacTaggert
+Mojo
+Mole Man
+Molecule Man
+Molly Hayes
+Molten Man
+Mondo
+Monet St. Croix
+Mongoose
+Monica Rappaccini
+Monsoon
+Monstra
+Monstro the Mighty
+Moon Knight
+Moon-Boy
+Moondark
+Moondragon
+Moonhunter
+Moonstone
+Mop Man
+Morbius
+Mordred
+Morg
+Morgan Le Fay
+Morlun
+Morning Star
+Morph
+Morpheus
+Morris Bench
+Mortimer Toynbee
+Moses Magnum
+Mosha
+Mother Earth
+Mother Nature
+Mother Night
+Mother Superior
+Motormouth
+Mountjoy
+Mr. Fish
+Mr. Justice
+Mr. M
+Mr. Wu
+Ms. MODOK
+Ms. Marvel
+Ms. Steed
+Multiple Man
+Murmur
+Murmur II
+Mutant Master
+Mutant X
+Myron MacLain
+Mys-Tech
+Mysterio
+Mystique
+N'Gabthoth
+N'Garai
+N'astirh
+NFL Superpro
+Naga
+Nameless One
+Namor McKenzie
+Namor the Sub-Mariner
+Namora
+Namorita
+Nanny
+Nate Grey
+Nathaniel Essex
+Nathaniel Richards
+Native
+Nebula
+Nebulo
+Nebulon
+Nebulos
+Necrodamus
+Necromantra
+Ned Horrocks
+Ned Leeds
+Needle
+Nefarius
+Negasonic Teenage Warhead
+Nekra
+Nekra Sinclar
+Nemesis
+Neophyte
+Neptune
+Network
+Neuronne
+Neurotap
+New Goblin
+Nezarr the Calculator
+Nicholas Maunder
+Nicholas Scratch
+Nick Fury
+Nico Minoru
+Nicole St. Croix
+Night Nurse
+Night Rider
+Night Thrasher
+Nightcrawler
+Nighthawk
+Nightmare
+Nightshade
+Nightside
+Nightwatch
+Nightwind
+Nikki
+Niles Van Roekel
+Nimrod
+Ningal
+Nitro
+Nobilus
+Nocturne
+Noh-Varr
+Nomad
+Norman Osborn
+Norns
+Norrin Radd
+Northstar
+Nosferata
+Nova
+Nova-Prime
+Novs
+Nox
+Nth Man
+Nth Man: the Ultimate Ninja
+Nuke - Frank Simpson
+Nuke - Squadron Supreme Member
+Nuklo
+Numinus
+Nut
+Obadiah Stane
+Obituary
+Obliterator
+Oblivion
+Occulus
+Ocean
+Ocelot
+Oddball
+Odin
+Ogre
+Ogress
+Omega
+Omega Red
+Omega the Unknown
+Omen
+Omerta
+One Above All
+Oneg the Prober
+Onslaught
+Onyxx
+Ooze
+Optoman
+Oracle
+Orator
+Orb
+Orbit
+Orchid
+Ord
+Order
+Orikal
+Orka
+Ororo Munroe
+Orphan
+Orphan-Maker
+Osiris
+Outlaw
+Outrage
+Overkill
+Overmind
+Overrider
+Owl
+Ox
+Ozone
+Ozymandias
+Paibo
+Paige Guthrie
+Paladin
+Paradigm
+Paragon
+Paralyzer
+Paris
+Pasco
+Paste-Pot Pete
+Patch
+Pathway
+Patriot
+Patriot II
+Patsy Hellstrom
+Patsy Walker
+Paul Bailey
+Paul Norbert Ebersol
+Paul Patterson
+Payback
+Peace Monger
+Peepers
+Peggy Carter
+Penance
+Penance II
+Peregrine
+Perfection
+Perseus
+Persuader
+Persuasion
+Perun
+Pete Wisdom
+Peter Criss
+Peter Noble
+Peter Parker
+Peter Petruski
+Phade
+Phage
+Phalanx
+Phantazia
+Phantom Blonde
+Phantom Eagle
+Phantom Rider
+Phastos
+Phat
+Phil Urich
+Philip Fetter
+Phineas T. Horton
+Phoenix
+Photon
+Phyla-Vell
+Pietro Maximoff
+Piledriver
+Piotr Rasputin
+Pip the Troll
+Pipeline
+Piper
+Piranha
+Pisces
+Pistol
+Pixie
+Pixx
+Plague
+Plantman
+Plasma
+Plazm
+Plug
+Plunderer
+Pluto
+Poison
+Polaris
+Poltergeist
+Porcupine
+Portal
+Possessor
+Postman
+Postmortem
+Poundcakes
+Powderkeg
+Power Broker
+Power Man
+Power Princess
+Power Skrull
+Powerhouse
+Powerpax
+Presence
+Pressure
+Prester John
+Pretty Persuasions
+Preview
+Primal
+Prime
+Prime Mover
+Primevil
+Primus
+Princess Python
+Proctor
+Prodigy
+Professor Power
+Professor X
+Projector
+Prometheus
+Protector
+Proteus
+Prototype
+Prowler
+Psi-Lord
+Psyche
+Psycho-Man
+Psyklop
+Psylocke
+Puck
+Puff Adder
+Puishannt
+Pulse
+Puma
+Punchout
+Punisher
+Punisher 2099
+Puppet Master
+Purge
+Purple Girl
+Purple Man
+Pyre
+Pyro
+Quagmire
+Quantum
+Quasar
+Quasar II
+Quasimodo
+Quentin Beck
+Quentin Quire
+Quicksand
+Quicksilver
+Quincy Harker
+Raa of the Caves
+Rachel Grey
+Rachel Summers
+Rachel van Helsing
+Radian
+Radioactive Man
+Radion the Atomic Man
+Radius
+Rafferty
+Rage
+Raggadorr
+Rahne Sinclair
+Rainbow
+Rama-Tut
+Raman
+Ramrod
+Ramshot
+Rancor
+Randall Shire
+Random
+Ranger
+Ransak the Reject
+Rattler
+Ravage 2099
+Raving Beauty
+Rawhide Kid
+Rax
+Raymond Sikorsky
+Raza
+Razor Fist
+Razorback
+Reaper
+Rebel
+Recorder
+Red Claw
+Red Ghost
+Red Guardian
+Red Lotus
+Red Nine
+Red Raven
+Red Ronin
+Red Shift
+Red Skull
+Red Skull II
+Red Wolf
+Redeemer
+Redneck
+Redwing
+Reeva Payge
+Reignfire
+Reject
+Remnant
+Remy LeBeau
+Reptyl
+Revanche
+Rex Mundi
+Rhiannon
+Rhino
+Ricadonna
+Richard Fisk
+Richard Parker
+Richard Rider
+Rick Jones
+Ricochet
+Rictor
+Rigellian Recorder
+Right-Winger
+Ringer
+Ringleader
+Ringmaster
+Ringo Kid
+Rintrah
+Riot
+Riot Grrl
+Ripfire
+Ritchie Gilmore
+Rl'nnd
+Robbie Robertson
+Robert "Bobby" Drake
+Robert Bruce Banner
+Robert Hunter
+Robert Kelly
+Robert da Costa
+Rock
+Rock Python
+Rocket Raccoon
+Rocket Racer
+Rodstvow
+Rogue
+Rom the Spaceknight
+Roma
+Romany Wisdom
+Ronan the Accuser
+Rose
+Roughhouse
+Roulette
+Royal Roy
+Ruby Thursday
+Ruckus
+Rumiko Fujikawa
+Rune
+Runner
+Rush
+Rusty Collins
+Ruth Bat-Seraph
+Ryder
+S'byll
+S'ym
+Sabra
+Sabreclaw
+Sabretooth
+Sack
+Sage
+Sagittarius
+Saint Anna
+Saint Elmo
+Sally Blevins
+Sally Floyd
+Salvo
+Sam Sawyer
+Sam Wilson
+Samuel "Starr" Saxon
+Samuel Guthrie
+Samuel Silke
+Samuel Smithers
+Sandman
+Sangre
+Sara Grey
+Sasquatch
+Satana
+Satannish
+Saturnyne
+Sauron
+Savage Steel
+Sayge
+Scaleface
+Scalphunter
+Scanner
+Scarecrow
+Scarecrow II
+Scarlet Beetle
+Scarlet Centurion
+Scarlet Scarab
+Scarlet Spider
+Scarlet Spiders
+Scarlet Witch
+Schemer
+Scimitar
+Scintilla
+Scorcher
+Scorpia
+Scorpio
+Scorpion
+Scott Summers
+Scott Washington
+Scourge of the Underworld
+Scrambler
+Scream
+Screaming Mimi
+Screech
+Scrier
+Sea Urchin
+Seamus Mellencamp
+Sean Cassidy
+Sean Garrison
+Sebastian Shaw
+Seeker
+Sekhmet
+Selene
+Senator Robert Kelly
+Senor Muerte
+Sentry
+Sepulchre
+Sergeant Fury
+Sergei Kravinoff
+Serpentina
+Sersi
+Set
+Seth
+Shadow King
+Shadow Slasher
+Shadow-Hunter
+Shadowcat
+Shadowmage
+Shadrac
+Shalla-Bal
+Shaman
+Shamrock
+Shang-Chi
+Shanga
+Shanna the She-Devil
+Shaper of Worlds
+Shard
+Sharon Carter
+Sharon Friedlander
+Sharon Ventura
+Shathra
+Shatter
+Shatterfist
+Shatterstar
+She-Hulk
+She-Thing
+She-Venom
+Shellshock
+Shen Kuei
+Shi'ar Gladiator
+Shinchuko Lotus
+Shingen Harada
+Shinobi Shaw
+Shirow Ishihara
+Shiva
+Shiver Man
+Shocker
+Shockwave
+Shola Inkosi
+Shooting Star
+Shotgun
+Shriek
+Shriker
+Shroud
+Shrunken Bones
+Shuma-Gorath
+Sidewinder
+Siege
+Siena Blaze
+Sif
+Sigmar
+Sigyn
+Sikorsky
+Silhouette
+Silly Seal
+Silver
+Silver Dagger
+Silver Fox
+Silver Sable
+Silver Samurai
+Silver Scorpion
+Silver Squire
+Silver Surfer
+Silverclaw
+Silvermane
+Simon Williams
+Sin
+Sin-Eater
+Sinister
+Sir Steel
+Siryn
+Sise-Neg
+Skein
+Skids
+Skin
+Skinhead
+Skull the Slayer
+Skullcrusher
+Skullfire
+Skunge the Laxidazian Troll
+Skyhawk
+Skywalker
+Slab
+Slapstick
+Sleek
+Sleeper
+Sleepwalker
+Slick
+Sligguth
+Slipstream
+Slither
+Sludge
+Slug
+Sluggo
+Sluk
+Slyde
+Smart Alec
+Smartship Friday
+Smasher
+Smuggler
+Smuggler II
+Snowbird
+Snowfall
+Solara
+Solarman
+Solarr
+Soldier X
+Solitaire
+Solo
+Solomon O'Sullivan
+Son of Satan
+Songbird
+Soulfire
+Space Phantom
+Space Turnip
+Specialist
+Spectra
+Spectral
+Speed
+Speed Demon
+Speedball
+Speedo
+Spellbinder
+Spellcheck
+Spencer Smythe
+Sphinx
+Sphinxor
+Spider Doppelganger
+Spider-Girl
+Spider-Ham
+Spider-Man
+Spider-Slayer
+Spider-Woman
+Spidercide
+Spike
+Spike Freeman
+Spinnerette
+Spiral
+Spirit of '76
+Spitfire
+Spoilsport
+Spoor
+Spot
+Sprite
+Sputnik
+Spyder
+Spymaster
+Spyne
+Squidboy
+Squirrel Girl
+St. John Allerdyce
+Stacy X
+Stained Glass Scarlet
+Stakar
+Stallior
+Stanley Stewart
+Star Stalker
+Star Thief
+Star-Dancer
+Star-Lord
+Starbolt
+Stardust
+Starfox
+Starhawk
+Starlight
+Starr the Slayer
+Starshine
+Starstreak
+Stature
+Steel Raven
+Steel Serpent
+Steel Spider
+Stegron
+Stellaris
+Stem Cell
+Stentor
+Stephen Colbert
+Stephen Strange
+Steve Rogers
+Steven Lang
+Stevie Hunter
+Stick
+Stiletto
+Stilt-Man
+Stinger
+Stingray
+Stitch
+Stone
+Stonecutter
+Stonewall
+Storm
+Stranger
+Stratosfire
+Straw Man
+Strobe
+Strong Guy
+Strongarm
+Stryfe
+Stunner
+Stuntmaster
+Stygorr
+Stygyro
+Styx and Stone
+Sub-Mariner
+Sugar Man
+Suicide
+Sultan
+Sun Girl
+Sunder
+Sundragon
+Sunfire
+Sunpyre
+Sunset Bain
+Sunspot
+Sunstreak
+Sunstroke
+Sunturion
+Super Rabbit
+Super Sabre
+Super-Adaptoid
+Super-Nova
+Super-Skrull
+SuperPro
+Supercharger
+Superia
+Supernalia
+Suprema
+Supreme Intelligence
+Supremor
+Surge
+Surtur
+Susan Richards
+Susan Storm
+Sushi
+Svarog
+Swarm
+Sweetface
+Swordsman
+Sybil Dorn
+Sybil Dvorak
+Synch
+T-Ray
+Tabitha Smith
+Tag
+Tagak the Leopard Lord
+Tailhook
+Taj Nital
+Talia Josephine Wagner
+Talisman
+Tamara Rahn
+Tana Nile
+Tantra
+Tanya Anderssen
+Tarantula
+Tarot
+Tartarus
+Taskmaster
+Tatterdemalion
+Tattletale
+Tattoo
+Taurus
+Techno
+Tefral the Surveyor
+Tempest
+Tempo
+Tempus
+Temugin
+Tenpin
+Termagaira
+Terminator
+Terminatrix
+Terminus
+Terrax the Tamer
+Terraxia
+Terror
+Tess-One
+Tessa
+Tether
+Tethlam
+Tex Dawson
+Texas Twister
+Thakos
+Thane Ector
+Thanos
+The Amazing Tanwir Ahmed
+The Angel
+The Blank
+The Destroyer
+The Entity
+The Grip
+The Night Man
+The Profile
+The Russian
+The Stepford Cuckoos
+The Symbiote
+The Wink
+Thena
+Theresa Cassidy
+Thermo
+Thin Man
+Thing
+Thinker
+Thirty-Three
+Thog
+Thomas Halloway
+Thor
+Thor Girl
+Thornn
+Threnody
+Thumbelina
+Thunderball
+Thunderbird
+Thunderbolt
+Thunderclap
+Thunderfist
+Thunderstrike
+Thundra
+Tiboro
+Tiger Shark
+Tigra
+Timberius
+Time Bomb
+Timeshadow
+Timeslip
+Tinkerer
+Titan
+Titania
+Titanium Man
+Tito Bohusk
+Toad
+Toad-In-Waiting
+Todd Arliss
+Tom Cassidy
+Tom Corsi
+Tom Foster
+Tom Thumb
+Tomazooma
+Tombstone
+Tommy
+Tommy Lightning
+Tomorrow Man
+Tony Stark
+Topaz
+Topspin
+Torgo of Mekka
+Torgo the Vampire
+Toro
+Torpedo
+Torrent
+Torso
+Tower
+Toxin
+Trader
+Trapper
+Trapster
+Tremolo
+Trevor Fitzroy
+Tri-Man
+Triathlon
+Trick Shot
+Trioccula
+Trip Monroe
+Triton
+Troll
+Trump
+Tuc
+Tugun
+Tumbler
+Tundra
+Turac
+Turbo
+Turner Century
+Turner D. Century
+Tusk
+Tutinax the Mountain-Mover
+Two-Gun Kid
+Tyger Tiger
+Typeface
+Typhoid
+Typhoid Mary
+Typhon
+Tyr
+Tyrak
+Tyrannosaur
+Tyrannus
+Tyrant
+Tzabaoth
+U-Go Girl
+U-Man
+USAgent
+Uatu
+Ulik
+Ultimo
+Ultimus
+Ultra-Marine
+Ultragirl
+Ultron
+Ulysses
+Umar
+Umbo
+Uncle Ben Parker
+Uni-Mind
+Unicorn
+Union Jack
+Unseen
+Unthinnk
+Unus the Untouchable
+Unuscione
+Ursa Major
+Urthona
+Utgard-Loki
+Vagabond
+Vague
+Vakume
+Valentina Allegra de La Fontaine
+Valerie Cooper
+Valinor
+Valkin
+Valkyrie
+Valtorr
+Vamp
+Vampire by Night
+Vance Astro
+Vance Astrovik
+Vanguard
+Vanisher
+Vapor
+Vargas
+Varnae
+Vashti
+Vavavoom
+Vector
+Vegas
+Veil
+Vengeance
+Venom
+Venomm
+Venus
+Venus Dee Milo
+Veritas
+Vermin
+Vertigo
+Vesta
+Vibraxas
+Vibro
+Victor Creed
+Victor Mancha
+Victor Strange
+Victor von Doom
+Victorius
+Vidar
+Vincente
+Vindaloo
+Vindicator
+Viper
+Virako
+Virginia "Pepper" Potts
+Virgo
+Vishanti
+Visimajoris
+Vision
+Vivisector
+Vixen
+Volcana
+Volla
+Volpan
+Volstagg
+Vulcan
+Vulture
+Wade Wilson
+Wallflower
+Walter Newell
+Wanda Maximoff
+War
+War Eagle
+War Machine
+War V
+Warbird
+Warhawk
+Warlock
+Warpath
+Warren III Worthington
+Warrior Woman
+Warstar
+Warstrike
+Warwolves
+Washout
+Wasp
+Watcher
+Water Wizard
+Watoomb
+Weapon X
+Wendell Vaughn
+Wendigo
+Werewolf by Night
+Western Kid
+Whiplash
+Whirlwind
+Whistler
+White Fang
+White Pilgrim
+White Queen
+White Rabbit
+White Tiger
+Whiteout
+Whizzer
+Wiccan
+Wicked
+Widget
+Wilbur Day
+Wild Child
+Wild Thing
+Wildboys
+Wildpride
+Wildside
+Will o' the Wisp
+William Baker
+William Stryker
+Willie Lumpkin
+Wilson Fisk
+Wind Dancer
+Wind Warrior
+Windeagle
+Windshear
+Winky Man
+Winter Soldier
+Witchfire
+Wiz Kid
+Wizard
+Wolf
+Wolfsbane
+Wolverine
+Wonder Man
+Wong
+Woodgod
+Worm
+Wraith
+Wrath
+Wreckage
+Wrecker
+Wundarr the Aquarian
+Wyatt Wingfoot
+Wysper
+X-23
+X-Cutioner
+X-Man
+X-Ray
+X-Treme
+Xandu
+Xavin
+Xemnu the Titan
+Xemu
+Xi'an Chi Xan
+Xorn
+Xorr the God-Jewel
+Y'Garon
+Yandroth
+Yellow Claw
+Yellowjacket
+Yeti
+Yith
+Ymir
+Yondu
+Yrial
+Yukio
+Yukon Jack
+Yuri Topolov
+Yuriko Oyama
+Zabu
+Zach
+Zaladane
+Zarathos
+Zarek
+Zartra
+Zebediah Killgrave
+Zeitgeist
+Zero
+Zero-G
+Zeus
+Ziggy Pig
+Zip-Zap
+Zodiak
+Zom
+Zombie
+Zuras
+Zzzax
+gen Harada
+the Living Colossus It
+the Living Darkness Null
+the Renegade Watcher Aron
+the Tomorrow Man Zarrko
diff --git a/src/main/resources/es-build.properties b/src/main/resources/es-build.properties
new file mode 100644
index 0000000..563ecdd
--- /dev/null
+++ b/src/main/resources/es-build.properties
@@ -0,0 +1,3 @@
+version=${project.version}
+hash=${buildNumber}
+timestamp=${timestamp}
diff --git a/src/rpm/init.d/elasticsearch b/src/rpm/init.d/elasticsearch
new file mode 100644
index 0000000..a5ba651
--- /dev/null
+++ b/src/rpm/init.d/elasticsearch
@@ -0,0 +1,158 @@
+#!/bin/sh
+#
+# elasticsearch <summary>
+#
+# chkconfig: 2345 80 20
+# description: Starts and stops a single elasticsearch instance on this system
+#
+
+### BEGIN INIT INFO
+# Provides: Elasticsearch
+# Required-Start: $network $named
+# Required-Stop: $network $named
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: This service manages the elasticsearch daemon
+# Description: Elasticsearch is a very scalable, schema-free and high-performance search solution supporting multi-tenancy and near realtime search.
+### END INIT INFO
+
+#
+# init.d / servicectl compatibility (openSUSE)
+#
+if [ -f /etc/rc.status ]; then
+ . /etc/rc.status
+ rc_reset
+fi
+
+#
+# Source function library.
+#
+if [ -f /etc/rc.d/init.d/functions ]; then
+ . /etc/rc.d/init.d/functions
+fi
+
+exec="/usr/share/elasticsearch/bin/elasticsearch"
+prog="elasticsearch"
+pidfile=/var/run/elasticsearch/${prog}.pid
+
+[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
+
+export ES_HEAP_SIZE
+export ES_HEAP_NEWSIZE
+export ES_DIRECT_SIZE
+export ES_JAVA_OPTS
+
+lockfile=/var/lock/subsys/$prog
+
+# backwards compatibility for old config sysconfig files, pre 0.90.1
+if [ -n $USER ] && [ -z $ES_USER ] ; then
+ ES_USER=$USER
+fi
+
+checkJava() {
+ if [ -x "$JAVA_HOME/bin/java" ]; then
+ JAVA="$JAVA_HOME/bin/java"
+ else
+ JAVA=`which java`
+ fi
+
+ if [ ! -x "$JAVA" ]; then
+ echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
+ exit 1
+ fi
+}
+
+start() {
+ checkJava
+ [ -x $exec ] || exit 5
+ [ -f $CONF_FILE ] || exit 6
+ if [ -n "$MAX_LOCKED_MEMORY" -a -z "$ES_HEAP_SIZE" ]; then
+ echo "MAX_LOCKED_MEMORY is set - ES_HEAP_SIZE must also be set"
+ return 7
+ fi
+ if [ -n "$MAX_OPEN_FILES" ]; then
+ ulimit -n $MAX_OPEN_FILES
+ fi
+ if [ -n "$MAX_LOCKED_MEMORY" ]; then
+ ulimit -l $MAX_LOCKED_MEMORY
+ fi
+ if [ -n "$MAX_MAP_COUNT" ]; then
+ sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
+ fi
+ if [ -n "$WORK_DIR" ]; then
+ mkdir -p "$WORK_DIR"
+ chown "$ES_USER":"$ES_GROUP" "$WORK_DIR"
+ fi
+ echo -n $"Starting $prog: "
+ # if not running, start it up here, usually something like "daemon $exec"
+ daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.work=$WORK_DIR -Des.default.path.conf=$CONF_DIR
+ retval=$?
+ echo
+ [ $retval -eq 0 ] && touch $lockfile
+ return $retval
+}
+
+stop() {
+ echo -n $"Stopping $prog: "
+ # stop it here, often "killproc $prog"
+ killproc -p $pidfile -d 20 $prog
+ retval=$?
+ echo
+ [ $retval -eq 0 ] && rm -f $lockfile
+ return $retval
+}
+
+restart() {
+ stop
+ start
+}
+
+reload() {
+ restart
+}
+
+force_reload() {
+ restart
+}
+
+rh_status() {
+ # run checks to determine if the service is running or use generic status
+ status -p $pidfile $prog
+}
+
+rh_status_q() {
+ rh_status >/dev/null 2>&1
+}
+
+
+case "$1" in
+ start)
+ rh_status_q && exit 0
+ $1
+ ;;
+ stop)
+ rh_status_q || exit 0
+ $1
+ ;;
+ restart)
+ $1
+ ;;
+ reload)
+ rh_status_q || exit 7
+ $1
+ ;;
+ force-reload)
+ force_reload
+ ;;
+ status)
+ rh_status
+ ;;
+ condrestart|try-restart)
+ rh_status_q || exit 0
+ restart
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
+ exit 2
+esac
+exit $?
diff --git a/src/rpm/scripts/postinstall b/src/rpm/scripts/postinstall
new file mode 100644
index 0000000..317f7fe
--- /dev/null
+++ b/src/rpm/scripts/postinstall
@@ -0,0 +1,50 @@
+
+[ -f /etc/sysconfig/elasticsearch ] && . /etc/sysconfig/elasticsearch
+
+startElasticsearch() {
+ if [ -x /bin/systemctl ] ; then
+ /bin/systemctl start elasticsearch.service
+ elif [ -x /etc/init.d/elasticsearch ] ; then
+ /etc/init.d/elasticsearch start
+ # older suse linux distributions do not ship with systemd
+ # but do not have an /etc/init.d/ directory
+ # this tries to start elasticsearch on these as well without failing this script
+ elif [ -x /etc/rc.d/init.d/elasticsearch ] ; then
+ /etc/rc.d/init.d/elasticsearch start
+ fi
+}
+
+stopElasticsearch() {
+ if [ -x /bin/systemctl ] ; then
+ /bin/systemctl stop elasticsearch.service > /dev/null 2>&1 || :
+ elif [ -x /etc/init.d/elasticsearch ] ; then
+ /etc/init.d/elasticsearch stop
+ elif [ -x /etc/rc.d/init.d/elasticsearch ] ; then
+ /etc/rc.d/init.d/elasticsearch stop
+ fi
+}
+
+# Initial installation: $1 == 1
+# Upgrade: $1 == 2, and configured to restart on upgrade
+if [ $1 -eq 1 ] ; then
+
+ if [ -x /bin/systemctl ] ; then
+ echo "### NOT starting on installation, please execute the following statements to configure elasticsearch to start automatically using systemd"
+ echo " sudo /bin/systemctl daemon-reload"
+ echo " sudo /bin/systemctl enable elasticsearch.service"
+ echo "### You can start elasticsearch by executing"
+ echo " sudo /bin/systemctl start elasticsearch.service"
+
+
+ elif [ -x /sbin/chkconfig ] ; then
+ echo "### NOT starting on installation, please execute the following statements to configure elasticsearch to start automatically using chkconfig"
+ echo " sudo /sbin/chkconfig --add elasticsearch"
+ echo "### You can start elasticsearch by executing"
+ echo " sudo service elasticsearch start"
+ fi
+
+elif [ $1 -ge 2 -a "$RESTART_ON_UPGRADE" == "true" ] ; then
+ stopElasticsearch
+ startElasticsearch
+fi
+
diff --git a/src/rpm/scripts/postremove b/src/rpm/scripts/postremove
new file mode 100644
index 0000000..6dedc29
--- /dev/null
+++ b/src/rpm/scripts/postremove
@@ -0,0 +1,15 @@
+# only execute in case of package removal, not on upgrade
+if [ $1 -eq 0 ] ; then
+
+ getent passwd elasticsearch > /dev/null
+ if [ "$?" == "0" ] ; then
+ userdel elasticsearch
+ fi
+
+ getent group elasticsearch >/dev/null
+ if [ "$?" == "0" ] ; then
+ groupdel elasticsearch
+ fi
+fi
+
+exit
diff --git a/src/rpm/scripts/preinstall b/src/rpm/scripts/preinstall
new file mode 100644
index 0000000..327c8d6
--- /dev/null
+++ b/src/rpm/scripts/preinstall
@@ -0,0 +1,4 @@
+getent group elasticsearch >/dev/null || groupadd -r elasticsearch
+getent passwd elasticsearch >/dev/null || \
+ useradd -r -g elasticsearch -d /usr/share/elasticsearch -s /sbin/nologin \
+ -c "elasticsearch user" elasticsearch
diff --git a/src/rpm/scripts/preremove b/src/rpm/scripts/preremove
new file mode 100644
index 0000000..1627c19
--- /dev/null
+++ b/src/rpm/scripts/preremove
@@ -0,0 +1,29 @@
+
+[ -f /etc/sysconfig/elasticsearch ] && . /etc/sysconfig/elasticsearch
+
+stopElasticsearch() {
+ if [ -x /bin/systemctl ] ; then
+ /bin/systemctl stop elasticsearch.service > /dev/null 2>&1 || :
+ elif [ -x /etc/init.d/elasticsearch ] ; then
+ /etc/init.d/elasticsearch stop
+ elif [ -x /etc/rc.d/init.d/elasticsearch ] ; then
+ /etc/rc.d/init.d/elasticsearch stop
+ fi
+}
+
+# Removal: $1 == 0
+# Dont do anything on upgrade, because the preun script in redhat gets executed after the postinst (madness!)
+if [ $1 -eq 0 ] ; then
+
+ if [ -x /bin/systemctl ] ; then
+ /bin/systemctl --no-reload disable elasticsearch.service > /dev/null 2>&1 || :
+ fi
+
+ if [ -x /sbin/chkconfig ] ; then
+ /sbin/chkconfig --del elasticsearch 2> /dev/null
+ fi
+
+ stopElasticsearch
+fi
+
+exit 0
diff --git a/src/rpm/sysconfig/elasticsearch b/src/rpm/sysconfig/elasticsearch
new file mode 100644
index 0000000..f27c497
--- /dev/null
+++ b/src/rpm/sysconfig/elasticsearch
@@ -0,0 +1,46 @@
+# Directory where the Elasticsearch binary distribution resides
+ES_HOME=/usr/share/elasticsearch
+
+# Heap Size (defaults to 256m min, 1g max)
+#ES_HEAP_SIZE=2g
+
+# Heap new generation
+#ES_HEAP_NEWSIZE=
+
+# max direct memory
+#ES_DIRECT_SIZE=
+
+# Additional Java OPTS
+#ES_JAVA_OPTS=
+
+# Maximum number of open files
+MAX_OPEN_FILES=65535
+
+# Maximum amount of locked memory
+#MAX_LOCKED_MEMORY=
+
+# Maximum number of VMA (Virtual Memory Areas) a process can own
+MAX_MAP_COUNT=262144
+
+# Elasticsearch log directory
+LOG_DIR=/var/log/elasticsearch
+
+# Elasticsearch data directory
+DATA_DIR=/var/lib/elasticsearch
+
+# Elasticsearch work directory
+WORK_DIR=/tmp/elasticsearch
+
+# Elasticsearch conf directory
+CONF_DIR=/etc/elasticsearch
+
+# Elasticsearch configuration file (elasticsearch.yml)
+CONF_FILE=/etc/elasticsearch/elasticsearch.yml
+
+# User to run as, change this to a specific elasticsearch user if possible
+# Also make sure, this user can write into the log directories in case you change them
+# This setting only works for the init script, but has to be configured separately for systemd startup
+ES_USER=elasticsearch
+
+# Configure restart on package upgrade (true, every other setting will lead to not restarting)
+#RESTART_ON_UPGRADE=true
diff --git a/src/rpm/systemd/elasticsearch.conf b/src/rpm/systemd/elasticsearch.conf
new file mode 100644
index 0000000..9db225e
--- /dev/null
+++ b/src/rpm/systemd/elasticsearch.conf
@@ -0,0 +1 @@
+d /run/elasticsearch 0755 elasticsearch elasticsearch - -
diff --git a/src/rpm/systemd/elasticsearch.service b/src/rpm/systemd/elasticsearch.service
new file mode 100644
index 0000000..5642018
--- /dev/null
+++ b/src/rpm/systemd/elasticsearch.service
@@ -0,0 +1,20 @@
+[Unit]
+Description=Starts and stops a single elasticsearch instance on this system
+Documentation=http://www.elasticsearch.org
+
+[Service]
+Type=forking
+EnvironmentFile=/etc/sysconfig/elasticsearch
+User=elasticsearch
+Group=elasticsearch
+PIDFile=/var/run/elasticsearch/elasticsearch.pid
+ExecStart=/usr/share/elasticsearch/bin/elasticsearch -d -p /var/run/elasticsearch/elasticsearch.pid -Des.default.config=$CONF_FILE -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.work=$WORK_DIR -Des.default.path.conf=$CONF_DIR
+# See MAX_OPEN_FILES in sysconfig
+LimitNOFILE=65535
+# See MAX_LOCKED_MEMORY in sysconfig, use "infinity" when MAX_LOCKED_MEMORY=unlimited and using bootstrap.mlockall: true
+#LimitMEMLOCK=infinity
+# Shutdown delay in seconds, before process is tried to be killed with KILL (if configured)
+TimeoutStopSec=20
+
+[Install]
+WantedBy=multi-user.target
diff --git a/src/rpm/systemd/sysctl.d/elasticsearch.conf b/src/rpm/systemd/sysctl.d/elasticsearch.conf
new file mode 100644
index 0000000..62ea54d
--- /dev/null
+++ b/src/rpm/systemd/sysctl.d/elasticsearch.conf
@@ -0,0 +1 @@
+vm.max_map_count=262144
diff --git a/src/test/java/com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.java b/src/test/java/com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.java
new file mode 100644
index 0000000..bf890e8
--- /dev/null
+++ b/src/test/java/com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package com.carrotsearch.randomizedtesting;
+
+/**
+ * Exposes methods that allow to use a {@link RandomizedContext} without using a {@link RandomizedRunner}
+ * This was specifically needed by the REST tests since they run with a custom junit runner ({@link org.elasticsearch.test.rest.junit.RestTestSuiteRunner})
+ */
+public final class StandaloneRandomizedContext {
+
+ private StandaloneRandomizedContext() {
+
+ }
+
+ /**
+ * Creates a new {@link RandomizedContext} associated to the current thread
+ */
+ public static void createRandomizedContext(Class<?> testClass, Randomness runnerRandomness) {
+ //the randomized runner is passed in as null, which is fine as long as we don't try to access it afterwards
+ RandomizedContext randomizedContext = RandomizedContext.create(Thread.currentThread().getThreadGroup(), testClass, null);
+ randomizedContext.push(runnerRandomness.clone(Thread.currentThread()));
+ }
+
+ /**
+ * Destroys the {@link RandomizedContext} associated to the current thread
+ */
+ public static void disposeRandomizedContext() {
+ RandomizedContext.current().dispose();
+ }
+
+ public static void pushRandomness(Randomness randomness) {
+ RandomizedContext.current().push(randomness);
+ }
+
+ public static void popAndDestroy() {
+ RandomizedContext.current().popAndDestroy();
+ }
+
+ /**
+ * Returns the string formatted seed associated to the current thread's randomized context
+ */
+ public static String getSeedAsString() {
+ return SeedUtils.formatSeed(RandomizedContext.current().getRandomness().getSeed());
+ }
+
+ /**
+ * Util method to extract the seed out of a {@link Randomness} instance
+ */
+ public static long getSeed(Randomness randomness) {
+ return randomness.getSeed();
+ }
+}
diff --git a/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java b/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java
new file mode 100644
index 0000000..893020f
--- /dev/null
+++ b/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.analysis.miscellaneous;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.Reader;
+
+import static org.hamcrest.Matchers.equalTo;
+/**
+ */
+
+public class TruncateTokenFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTest() throws IOException {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName,
+ Reader reader) {
+ Tokenizer t = new WhitespaceTokenizer(Lucene.VERSION, reader);
+ return new TokenStreamComponents(t, new TruncateTokenFilter(t, 3));
+ }
+ };
+
+ TokenStream test = analyzer.tokenStream("test", "a bb ccc dddd eeeee");
+ test.reset();
+ CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class);
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("a"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("bb"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("ccc"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("ddd"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("eee"));
+
+ assertThat(test.incrementToken(), equalTo(false));
+ }
+}
diff --git a/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java b/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java
new file mode 100644
index 0000000..e1d49f3
--- /dev/null
+++ b/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.analysis.miscellaneous;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.Reader;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class UniqueTokenFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTest() throws IOException {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName,
+ Reader reader) {
+ Tokenizer t = new WhitespaceTokenizer(Lucene.VERSION, reader);
+ return new TokenStreamComponents(t, new UniqueTokenFilter(t));
+ }
+ };
+
+ TokenStream test = analyzer.tokenStream("test", "this test with test");
+ test.reset();
+ CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class);
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("this"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("test"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("with"));
+
+ assertThat(test.incrementToken(), equalTo(false));
+ }
+}
diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java
new file mode 100644
index 0000000..1bdf4e1
--- /dev/null
+++ b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.search.highlight.DefaultEncoder;
+import org.apache.lucene.search.highlight.SimpleHTMLEncoder;
+import org.apache.lucene.util.BytesRef;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+
+public class CustomPassageFormatterTests {
+
+ @Test
+ public void testSimpleFormat() {
+ String content = "This is a really cool highlighter. Postings highlighter gives nice snippets back. No matches here.";
+
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new DefaultEncoder());
+
+ Passage[] passages = new Passage[3];
+ String match = "highlighter";
+ BytesRef matchBytesRef = new BytesRef(match);
+
+ Passage passage1 = new Passage();
+ int start = content.indexOf(match);
+ int end = start + match.length();
+ passage1.startOffset = 0;
+ passage1.endOffset = end + 2; //lets include the whitespace at the end to make sure we trim it
+ passage1.addMatch(start, end, matchBytesRef);
+ passages[0] = passage1;
+
+ Passage passage2 = new Passage();
+ start = content.lastIndexOf(match);
+ end = start + match.length();
+ passage2.startOffset = passage1.endOffset;
+ passage2.endOffset = end + 26;
+ passage2.addMatch(start, end, matchBytesRef);
+ passages[1] = passage2;
+
+ Passage passage3 = new Passage();
+ passage3.startOffset = passage2.endOffset;
+ passage3.endOffset = content.length();
+ passages[2] = passage3;
+
+ Snippet[] fragments = passageFormatter.format(passages, content);
+ assertThat(fragments, notNullValue());
+ assertThat(fragments.length, equalTo(3));
+ assertThat(fragments[0].getText(), equalTo("This is a really cool <em>highlighter</em>."));
+ assertThat(fragments[0].isHighlighted(), equalTo(true));
+ assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
+ assertThat(fragments[1].isHighlighted(), equalTo(true));
+ assertThat(fragments[2].getText(), equalTo("No matches here."));
+ assertThat(fragments[2].isHighlighted(), equalTo(false));
+ }
+
+ @Test
+ public void testHtmlEncodeFormat() {
+ String content = "<b>This is a really cool highlighter.</b> Postings highlighter gives nice snippets back.";
+
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new SimpleHTMLEncoder());
+
+ Passage[] passages = new Passage[2];
+ String match = "highlighter";
+ BytesRef matchBytesRef = new BytesRef(match);
+
+ Passage passage1 = new Passage();
+ int start = content.indexOf(match);
+ int end = start + match.length();
+ passage1.startOffset = 0;
+ passage1.endOffset = end + 6; //lets include the whitespace at the end to make sure we trim it
+ passage1.addMatch(start, end, matchBytesRef);
+ passages[0] = passage1;
+
+ Passage passage2 = new Passage();
+ start = content.lastIndexOf(match);
+ end = start + match.length();
+ passage2.startOffset = passage1.endOffset;
+ passage2.endOffset = content.length();
+ passage2.addMatch(start, end, matchBytesRef);
+ passages[1] = passage2;
+
+ Snippet[] fragments = passageFormatter.format(passages, content);
+ assertThat(fragments, notNullValue());
+ assertThat(fragments.length, equalTo(2));
+ assertThat(fragments[0].getText(), equalTo("&lt;b&gt;This is a really cool <em>highlighter</em>.&lt;&#x2F;b&gt;"));
+ assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
+ }
+}
diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java
new file mode 100644
index 0000000..d33ece1
--- /dev/null
+++ b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java
@@ -0,0 +1,487 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.highlight.DefaultEncoder;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.search.highlight.HighlightUtils;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+
+@LuceneTestCase.SuppressCodecs({"MockFixedIntBlock", "MockVariableIntBlock", "MockSep", "MockRandom", "Lucene3x"})
+public class CustomPostingsHighlighterTests extends ElasticsearchLuceneTestCase {
+
+ @Test
+ public void testDiscreteHighlightingPerValue() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is a test. Just a test highlighting from postings highlighter.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the second value to perform highlighting on.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ final String thirdValue = "This is the third value to test highlighting with postings.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ List<Object> fieldValues = new ArrayList<Object>();
+ fieldValues.add(firstValue);
+ fieldValues.add(secondValue);
+ fieldValues.add(thirdValue);
+
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ BytesRef[] queryTerms = filterTerms(extractTerms(query), "body", true);
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ int docId = topDocs.scoreDocs[0].doc;
+
+ //highlighting per value, considering whole values (simulating number_of_fragments=0)
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0);
+ highlighter.setBreakIterator(new WholeBreakIterator());
+
+ Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is a test. Just a test <b>highlighting</b> from postings highlighter."));
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is the second value to perform <b>highlighting</b> on."));
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is the third value to test <b>highlighting</b> with postings."));
+
+
+ //let's try without whole break iterator as well, to prove that highlighting works the same when working per value (not optimized though)
+ highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0);
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("Just a test <b>highlighting</b> from postings highlighter."));
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is the second value to perform <b>highlighting</b> on."));
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is the third value to test <b>highlighting</b> with postings."));
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ Tests that scoring works properly even when using discrete per value highlighting
+ */
+ @Test
+ public void testDiscreteHighlightingScoring() throws Exception {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ //good position but only one match
+ final String firstValue = "This is a test. Just a test1 highlighting from postings highlighter.";
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ //two matches, not the best snippet due to its length though
+ final String secondValue = "This is the second highlighting value to perform highlighting on a longer text that gets scored lower.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ //two matches and short, will be scored highest
+ final String thirdValue = "This is highlighting the third short highlighting value.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ //one match, same as first but at the end, will be scored lower due to its position
+ final String fourthValue = "Just a test4 highlighting from postings highlighter.";
+ Field body4 = new Field("body", "", offsetsType);
+ doc.add(body4);
+ body4.setStringValue(fourthValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+
+ String firstHlValue = "Just a test1 <b>highlighting</b> from postings highlighter.";
+ String secondHlValue = "This is the second <b>highlighting</b> value to perform <b>highlighting</b> on a longer text that gets scored lower.";
+ String thirdHlValue = "This is <b>highlighting</b> the third short <b>highlighting</b> value.";
+ String fourthHlValue = "Just a test4 <b>highlighting</b> from postings highlighter.";
+
+
+ IndexSearcher searcher = newSearcher(ir);
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ BytesRef[] queryTerms = filterTerms(extractTerms(query), "body", true);
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ int docId = topDocs.scoreDocs[0].doc;
+
+ List<Object> fieldValues = new ArrayList<Object>();
+ fieldValues.add(firstValue);
+ fieldValues.add(secondValue);
+ fieldValues.add(thirdValue);
+ fieldValues.add(fourthValue);
+
+ boolean mergeValues = true;
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0);
+ Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+
+ assertThat(snippets.length, equalTo(4));
+
+ assertThat(snippets[0].getText(), equalTo(firstHlValue));
+ assertThat(snippets[1].getText(), equalTo(secondHlValue));
+ assertThat(snippets[2].getText(), equalTo(thirdHlValue));
+ assertThat(snippets[3].getText(), equalTo(fourthHlValue));
+
+
+ //Let's highlight each separate value and check how the snippets are scored
+ mergeValues = false;
+ highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0);
+ List<Snippet> snippets2 = new ArrayList<Snippet>();
+ for (int i = 0; i < fieldValues.size(); i++) {
+ snippets2.addAll(Arrays.asList(highlighter.highlightDoc("body", queryTerms, searcher, docId, 5)));
+ }
+
+ assertThat(snippets2.size(), equalTo(4));
+ assertThat(snippets2.get(0).getText(), equalTo(firstHlValue));
+ assertThat(snippets2.get(1).getText(), equalTo(secondHlValue));
+ assertThat(snippets2.get(2).getText(), equalTo(thirdHlValue));
+ assertThat(snippets2.get(3).getText(), equalTo(fourthHlValue));
+
+ Comparator <Snippet> comparator = new Comparator<Snippet>() {
+ @Override
+ public int compare(Snippet o1, Snippet o2) {
+ return (int)Math.signum(o1.getScore() - o2.getScore());
+ }
+ };
+
+ //sorting both groups of snippets
+ Arrays.sort(snippets, comparator);
+ Collections.sort(snippets2, comparator);
+
+ //checking that the snippets are in the same order, regardless of whether we used per value discrete highlighting or not
+ //we can't compare the scores directly since they are slightly different due to the multiValued separator added when merging values together
+ //That causes slightly different lengths and start offsets, thus a slightly different score.
+ //Anyways, that's not an issue. What's important is that the score is computed the same way, so that the produced order is always the same.
+ for (int i = 0; i < snippets.length; i++) {
+ assertThat(snippets[i].getText(), equalTo(snippets2.get(i).getText()));
+ }
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ Tests that we produce the same snippets and scores when manually merging values in our own custom highlighter rather than using the built-in code
+ */
+ @Test
+ public void testMergeValuesScoring() throws Exception {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ //good position but only one match
+ final String firstValue = "This is a test. Just a test1 highlighting from postings highlighter.";
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ //two matches, not the best snippet due to its length though
+ final String secondValue = "This is the second highlighting value to perform highlighting on a longer text that gets scored lower.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ //two matches and short, will be scored highest
+ final String thirdValue = "This is highlighting the third short highlighting value.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ //one match, same as first but at the end, will be scored lower due to its position
+ final String fourthValue = "Just a test4 highlighting from postings highlighter.";
+ Field body4 = new Field("body", "", offsetsType);
+ doc.add(body4);
+ body4.setStringValue(fourthValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+
+ String firstHlValue = "Just a test1 <b>highlighting</b> from postings highlighter.";
+ String secondHlValue = "This is the second <b>highlighting</b> value to perform <b>highlighting</b> on a longer text that gets scored lower.";
+ String thirdHlValue = "This is <b>highlighting</b> the third short <b>highlighting</b> value.";
+ String fourthHlValue = "Just a test4 <b>highlighting</b> from postings highlighter.";
+
+
+ IndexSearcher searcher = newSearcher(ir);
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ BytesRef[] queryTerms = filterTerms(extractTerms(query), "body", true);
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ int docId = topDocs.scoreDocs[0].doc;
+
+ List<Object> fieldValues = new ArrayList<Object>();
+ fieldValues.add(firstValue);
+ fieldValues.add(secondValue);
+ fieldValues.add(thirdValue);
+ fieldValues.add(fourthValue);
+
+ boolean mergeValues = true;
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0);
+ Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+
+ assertThat(snippets.length, equalTo(4));
+
+ assertThat(snippets[0].getText(), equalTo(firstHlValue));
+ assertThat(snippets[1].getText(), equalTo(secondHlValue));
+ assertThat(snippets[2].getText(), equalTo(thirdHlValue));
+ assertThat(snippets[3].getText(), equalTo(fourthHlValue));
+
+
+ //testing now our fork / normal postings highlighter, which merges multiple values together using the paragraph separator
+ XPostingsHighlighter highlighter2 = new XPostingsHighlighter(Integer.MAX_VALUE - 1) {
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ return HighlightUtils.PARAGRAPH_SEPARATOR;
+ }
+
+ @Override
+ protected PassageFormatter getFormatter(String field) {
+ return new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
+ }
+ };
+
+ Map<String, Object[]> highlightMap = highlighter2.highlightFieldsAsObjects(new String[]{"body"}, query, searcher, new int[]{docId}, new int[]{5});
+ Object[] objects = highlightMap.get("body");
+ assertThat(objects, notNullValue());
+ assertThat(objects.length, equalTo(1));
+ Snippet[] normalSnippets = (Snippet[])objects[0];
+
+ assertThat(normalSnippets.length, equalTo(4));
+
+ assertThat(normalSnippets[0].getText(), equalTo(firstHlValue));
+ assertThat(normalSnippets[1].getText(), equalTo(secondHlValue));
+ assertThat(normalSnippets[2].getText(), equalTo(thirdHlValue));
+ assertThat(normalSnippets[3].getText(), equalTo(fourthHlValue));
+
+
+ for (int i = 0; i < normalSnippets.length; i++) {
+ Snippet normalSnippet = snippets[0];
+ Snippet customSnippet = normalSnippets[0];
+ assertThat(customSnippet.getText(), equalTo(normalSnippet.getText()));
+ assertThat(customSnippet.getScore(), equalTo(normalSnippet.getScore()));
+ }
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testRequireFieldMatch() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field none = new Field("none", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(none);
+
+ String firstValue = "This is a test. Just a test highlighting from postings. Feel free to ignore.";
+ body.setStringValue(firstValue);
+ none.setStringValue(firstValue);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ Query query = new TermQuery(new Term("none", "highlighting"));
+ SortedSet<Term> queryTerms = extractTerms(query);
+
+ IndexSearcher searcher = newSearcher(ir);
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ int docId = topDocs.scoreDocs[0].doc;
+
+ List<Object> values = new ArrayList<Object>();
+ values.add(firstValue);
+
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, 0);
+
+ //no snippets with simulated require field match (we filter the terms ourselves)
+ boolean requireFieldMatch = true;
+ BytesRef[] filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch);
+ Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(0));
+
+
+ highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, 0);
+ //one snippet without require field match, just passing in the query terms with no filtering on our side
+ requireFieldMatch = false;
+ filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch);
+ snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("Just a test <b>highlighting</b> from postings."));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testNoMatchSize() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field none = new Field("none", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(none);
+
+ String firstValue = "This is a test. Just a test highlighting from postings. Feel free to ignore.";
+ body.setStringValue(firstValue);
+ none.setStringValue(firstValue);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ Query query = new TermQuery(new Term("none", "highlighting"));
+ SortedSet<Term> queryTerms = extractTerms(query);
+
+ IndexSearcher searcher = newSearcher(ir);
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ int docId = topDocs.scoreDocs[0].doc;
+
+ List<Object> values = new ArrayList<Object>();
+ values.add(firstValue);
+
+ BytesRef[] filteredQueryTerms = filterTerms(queryTerms, "body", true);
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
+
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, 0);
+ Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(0));
+
+ highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, atLeast(1));
+ snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is a test."));
+
+ ir.close();
+ dir.close();
+ }
+
+ private static SortedSet<Term> extractTerms(Query query) {
+ SortedSet<Term> queryTerms = new TreeSet<Term>();
+ query.extractTerms(queryTerms);
+ return queryTerms;
+ }
+
+ private static BytesRef[] filterTerms(SortedSet<Term> queryTerms, String field, boolean requireFieldMatch) {
+ SortedSet<Term> fieldTerms;
+ if (requireFieldMatch) {
+ Term floor = new Term(field, "");
+ Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
+ fieldTerms = queryTerms.subSet(floor, ceiling);
+ } else {
+ fieldTerms = queryTerms;
+ }
+
+ BytesRef terms[] = new BytesRef[fieldTerms.size()];
+ int termUpto = 0;
+ for(Term term : fieldTerms) {
+ terms[termUpto++] = term.bytes();
+ }
+
+ return terms;
+ }
+}
diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java
new file mode 100644
index 0000000..6f63b72
--- /dev/null
+++ b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java
@@ -0,0 +1,1693 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.highlight.DefaultEncoder;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.Test;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.text.BreakIterator;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.*;
+
+@LuceneTestCase.SuppressCodecs({"MockFixedIntBlock", "MockVariableIntBlock", "MockSep", "MockRandom", "Lucene3x"})
+public class XPostingsHighlighterTests extends ElasticsearchLuceneTestCase {
+
+ /*
+ Tests changes needed to make possible to perform discrete highlighting.
+ We want to highlight every field value separately in case of multiple values, at least when needing to return the whole field content
+ This is needed to be able to get back a single snippet per value when number_of_fragments=0
+ */
+ @Test
+ public void testDiscreteHighlightingPerValue() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is a test. Just a test highlighting from postings highlighter.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the second value to perform highlighting on.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ final String thirdValue = "This is the third value to test highlighting with postings.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
+ return 8233;
+ }
+ };
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+
+ String firstHlValue = "This is a test. Just a test <b>highlighting</b> from postings highlighter.";
+ String secondHlValue = "This is the second value to perform <b>highlighting</b> on.";
+ String thirdHlValue = "This is the third value to test <b>highlighting</b> with postings.";
+
+ //default behaviour: using the WholeBreakIterator, despite the multi valued paragraph separator we get back a single snippet for multiple values
+ assertThat(snippets[0], equalTo(firstHlValue + (char)8233 + secondHlValue + (char)8233 + thirdHlValue));
+
+
+
+ highlighter = new XPostingsHighlighter() {
+ Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue, thirdValue).iterator();
+ Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1, firstValue.length() + secondValue.length() + 2).iterator();
+
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ return new String[][]{new String[]{valuesIterator.next()}};
+ }
+
+ @Override
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ return offsetsIterator.next();
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+ };
+
+ //first call using the WholeBreakIterator, we get now only the first value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(firstHlValue));
+
+ //second call using the WholeBreakIterator, we get now only the second value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(secondHlValue));
+
+ //third call using the WholeBreakIterator, we get now only the third value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(thirdHlValue));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testDiscreteHighlightingPerValue_secondValueWithoutMatches() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is a test. Just a test highlighting from postings highlighter.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the second value without matches.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ final String thirdValue = "This is the third value to test highlighting with postings.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
+ return 8233;
+ }
+
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ String firstHlValue = "This is a test. Just a test <b>highlighting</b> from postings highlighter.";
+ String thirdHlValue = "This is the third value to test <b>highlighting</b> with postings.";
+ //default behaviour: using the WholeBreakIterator, despite the multi valued paragraph separator we get back a single snippet for multiple values
+ //but only the first and the third value are returned since there are no matches in the second one.
+ assertThat(snippets[0], equalTo(firstHlValue + (char)8233 + secondValue + (char)8233 + thirdHlValue));
+
+
+ highlighter = new XPostingsHighlighter() {
+ Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue, thirdValue).iterator();
+ Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1, firstValue.length() + secondValue.length() + 2).iterator();
+
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ return new String[][]{new String[]{valuesIterator.next()}};
+ }
+
+ @Override
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ return offsetsIterator.next();
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+
+ //first call using the WholeBreakIterator, we get now only the first value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(firstHlValue));
+
+ //second call using the WholeBreakIterator, we get now nothing back because there's nothing to highlight in the second value
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], nullValue());
+
+ //third call using the WholeBreakIterator, we get now only the third value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(thirdHlValue));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testDiscreteHighlightingPerValue_MultipleMatches() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is a highlighting test. Just a test highlighting from postings highlighter.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the second highlighting value to test highlighting with postings.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ String firstHlValue = "This is a <b>highlighting</b> test. Just a test <b>highlighting</b> from postings highlighter.";
+ String secondHlValue = "This is the second <b>highlighting</b> value to test <b>highlighting</b> with postings.";
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue).iterator();
+ Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1).iterator();
+
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ return new String[][]{new String[]{valuesIterator.next()}};
+ }
+
+ @Override
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ return offsetsIterator.next();
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+
+ //first call using the WholeBreakIterator, we get now only the first value properly highlighted as we wish
+ String[] snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(firstHlValue));
+
+ //second call using the WholeBreakIterator, we get now only the second value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(secondHlValue));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testDiscreteHighlightingPerValue_MultipleQueryTerms() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is the first sentence. This is the second sentence.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the third sentence. This is the fourth sentence.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ final String thirdValue = "This is the fifth sentence";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ BooleanQuery query = new BooleanQuery();
+ query.add(new BooleanClause(new TermQuery(new Term("body", "third")), BooleanClause.Occur.SHOULD));
+ query.add(new BooleanClause(new TermQuery(new Term("body", "seventh")), BooleanClause.Occur.SHOULD));
+ query.add(new BooleanClause(new TermQuery(new Term("body", "fifth")), BooleanClause.Occur.SHOULD));
+ query.setMinimumNumberShouldMatch(1);
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ String secondHlValue = "This is the <b>third</b> sentence. This is the fourth sentence.";
+ String thirdHlValue = "This is the <b>fifth</b> sentence";
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue, thirdValue).iterator();
+ Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1, secondValue.length() + 1).iterator();
+
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ return new String[][]{new String[]{valuesIterator.next()}};
+ }
+
+ @Override
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ return offsetsIterator.next();
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+
+ //first call using the WholeBreakIterator, we get now null as the first value doesn't hold any match
+ String[] snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], nullValue());
+
+ //second call using the WholeBreakIterator, we get now only the second value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(secondHlValue));
+
+ //second call using the WholeBreakIterator, we get now only the third value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(thirdHlValue));
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ The following are tests that we added to make sure that certain behaviours are possible using the postings highlighter
+ They don't require our forked version, but only custom versions of methods that can be overridden and are already exposed to subclasses
+ */
+
+ /*
+ Tests that it's possible to obtain different fragments per document instead of a big string of concatenated fragments.
+ We use our own PassageFormatter for that and override the getFormatter method.
+ */
+ @Test
+ public void testCustomPassageFormatterMultipleFragments() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ IndexSearcher searcher = newSearcher(ir);
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 5);
+ assertThat(snippets.length, equalTo(1));
+ //default behaviour that we want to change
+ assertThat(snippets[0], equalTo("This <b>test</b> is another test. ... <b>Test</b> <b>test</b> <b>test</b> test."));
+
+
+ final CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
+ highlighter = new XPostingsHighlighter() {
+ @Override
+ protected PassageFormatter getFormatter(String field) {
+ return passageFormatter;
+ }
+ };
+
+ final ScoreDoc scoreDocs[] = topDocs.scoreDocs;
+ int docids[] = new int[scoreDocs.length];
+ int maxPassages[] = new int[scoreDocs.length];
+ for (int i = 0; i < docids.length; i++) {
+ docids[i] = scoreDocs[i].doc;
+ maxPassages[i] = 5;
+ }
+ Map<String, Object[]> highlights = highlighter.highlightFieldsAsObjects(new String[]{"body"}, query, searcher, docids, maxPassages);
+ assertThat(highlights, notNullValue());
+ assertThat(highlights.size(), equalTo(1));
+ Object[] objectSnippets = highlights.get("body");
+ assertThat(objectSnippets, notNullValue());
+ assertThat(objectSnippets.length, equalTo(1));
+ assertThat(objectSnippets[0], instanceOf(Snippet[].class));
+
+ Snippet[] snippetsSnippet = (Snippet[]) objectSnippets[0];
+ assertThat(snippetsSnippet.length, equalTo(2));
+ //multiple fragments as we wish
+ assertThat(snippetsSnippet[0].getText(), equalTo("This <b>test</b> is another test."));
+ assertThat(snippetsSnippet[1].getText(), equalTo("<b>Test</b> <b>test</b> <b>test</b> test."));
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ Tests that it's possible to return no fragments when there's nothing to highlight
+ We do that by overriding the getEmptyHighlight method
+ */
+ @Test
+ public void testHighlightWithNoMatches() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field none = new Field("none", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(none);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ none.setStringValue(body.stringValue());
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ none.setStringValue(body.stringValue());
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("none", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(2));
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 1);
+ //Two null snippets if there are no matches (thanks to our own custom passage formatter)
+ assertThat(snippets.length, equalTo(2));
+ //default behaviour: returns the first sentence with num passages = 1
+ assertThat(snippets[0], equalTo("This is a test. "));
+ assertThat(snippets[1], equalTo("Highlighting the first term. "));
+
+ highlighter = new XPostingsHighlighter() {
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ //Two null snippets if there are no matches, as we wish
+ assertThat(snippets.length, equalTo(2));
+ assertThat(snippets[0], nullValue());
+ assertThat(snippets[1], nullValue());
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ Tests that it's possible to avoid having fragments that span across different values
+ We do that by overriding the getMultiValuedSeparator and using a proper separator between values
+ */
+ @Test
+ public void testCustomMultiValuedSeparator() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings");
+
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue("highlighter.");
+ iw.addDocument(doc);
+
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ //default behaviour: getting a fragment that spans across different values
+ assertThat(snippets[0], equalTo("Just a test <b>highlighting</b> from postings highlighter."));
+
+
+ highlighter = new XPostingsHighlighter() {
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
+ return 8233;
+ }
+ };
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ //getting a fragment that doesn't span across different values since we used the paragraph separator between the different values
+ assertThat(snippets[0], equalTo("Just a test <b>highlighting</b> from postings" + (char)8233));
+
+ ir.close();
+ dir.close();
+ }
+
+
+
+
+ /*
+ The following are all the existing postings highlighter tests, to make sure we don't have regression in our own fork
+ */
+
+ @Test
+ public void testBasics() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(2, snippets.length);
+ assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);
+ assertEquals("<b>Highlighting</b> the first term. ", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ public void testFormatWithMatchExceedingContentLength2() throws Exception {
+
+ String bodyText = "123 TEST 01234 TEST";
+
+ String[] snippets = formatWithMatchExceedingContentLength(bodyText);
+
+ assertEquals(1, snippets.length);
+ assertEquals("123 <b>TEST</b> 01234 TE", snippets[0]);
+ }
+
+ public void testFormatWithMatchExceedingContentLength3() throws Exception {
+
+ String bodyText = "123 5678 01234 TEST TEST";
+
+ String[] snippets = formatWithMatchExceedingContentLength(bodyText);
+
+ assertEquals(1, snippets.length);
+ assertEquals("123 5678 01234 TE", snippets[0]);
+ }
+
+ public void testFormatWithMatchExceedingContentLength() throws Exception {
+
+ String bodyText = "123 5678 01234 TEST";
+
+ String[] snippets = formatWithMatchExceedingContentLength(bodyText);
+
+ assertEquals(1, snippets.length);
+ // LUCENE-5166: no snippet
+ assertEquals("123 5678 01234 TE", snippets[0]);
+ }
+
+ private String[] formatWithMatchExceedingContentLength(String bodyText) throws IOException {
+
+ int maxLength = 17;
+
+ final Analyzer analyzer = new MockAnalyzer(random());
+
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ final FieldType fieldType = new FieldType(TextField.TYPE_STORED);
+ fieldType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ final Field body = new Field("body", bodyText, fieldType);
+
+ Document doc = new Document();
+ doc.add(body);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ Query query = new TermQuery(new Term("body", "test"));
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(maxLength);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+
+
+ ir.close();
+ dir.close();
+ return snippets;
+ }
+
+ // simple test highlighting last word.
+ public void testHighlightLastWord() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a <b>test</b>", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ // simple test with one sentence documents.
+ @Test
+ public void testOneSentence() throws Exception {
+ Directory dir = newDirectory();
+ // use simpleanalyzer for more natural tokenization (else "test." is a token)
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test.");
+ iw.addDocument(doc);
+ body.setStringValue("Test a one sentence document.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(2, snippets.length);
+ assertEquals("This is a <b>test</b>.", snippets[0]);
+ assertEquals("<b>Test</b> a one sentence document.", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ // simple test with multiple values that make a result longer than maxLength.
+ @Test
+ public void testMaxLengthWithMultivalue() throws Exception {
+ Directory dir = newDirectory();
+ // use simpleanalyzer for more natural tokenization (else "test." is a token)
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ for(int i = 0; i < 3 ; i++) {
+ Field body = new Field("body", "", offsetsType);
+ body.setStringValue("This is a multivalued field");
+ doc.add(body);
+ }
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(40);
+ Query query = new TermQuery(new Term("body", "field"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(1, snippets.length);
+ assertTrue("Snippet should have maximum 40 characters plus the pre and post tags",
+ snippets[0].length() == (40 + "<b></b>".length()));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultipleFields() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field title = new Field("title", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(title);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ title.setStringValue("I am hoping for the best.");
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ title.setStringValue("But best may not be good enough.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "highlighting")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("title", "best")), BooleanClause.Occur.SHOULD);
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ Map<String,String[]> snippets = highlighter.highlightFields(new String [] { "body", "title" }, query, searcher, topDocs);
+ assertEquals(2, snippets.size());
+ assertEquals("Just a test <b>highlighting</b> from postings. ", snippets.get("body")[0]);
+ assertEquals("<b>Highlighting</b> the first term. ", snippets.get("body")[1]);
+ assertEquals("I am hoping for the <b>best</b>.", snippets.get("title")[0]);
+ assertEquals("But <b>best</b> may not be good enough.", snippets.get("title")[1]);
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultipleTerms() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "highlighting")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("body", "just")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("body", "first")), BooleanClause.Occur.SHOULD);
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(2, snippets.length);
+ assertEquals("<b>Just</b> a test <b>highlighting</b> from postings. ", snippets[0]);
+ assertEquals("<b>Highlighting</b> the <b>first</b> term. ", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultiplePassages() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+ body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(2, snippets.length);
+ assertEquals("This is a <b>test</b>. Just a <b>test</b> highlighting from postings. ", snippets[0]);
+ assertEquals("This <b>test</b> is another <b>test</b>. ... <b>Test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testUserFailedToIndexOffsets() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+ Field body = new Field("body", "", positionsType);
+ Field title = new StringField("title", "", Field.Store.YES);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(title);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ title.setStringValue("test");
+ iw.addDocument(doc);
+ body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
+ title.setStringValue("test");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ try {
+ highlighter.highlight("body", query, searcher, topDocs, 2);
+ fail("did not hit expected exception");
+ } catch (IllegalArgumentException iae) {
+ // expected
+ }
+
+ try {
+ highlighter.highlight("title", new TermQuery(new Term("title", "test")), searcher, topDocs, 2);
+ fail("did not hit expected exception");
+ } catch (IllegalArgumentException iae) {
+ // expected
+ }
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testBuddhism() throws Exception {
+ String text = "This eight-volume set brings together seminal papers in Buddhist studies from a vast " +
+ "range of academic disciplines published over the last forty years. With a new introduction " +
+ "by the editor, this collection is a unique and unrivalled research resource for both " +
+ "student and scholar. Coverage includes: - Buddhist origins; early history of Buddhism in " +
+ "South and Southeast Asia - early Buddhist Schools and Doctrinal History; Theravada Doctrine " +
+ "- the Origins and nature of Mahayana Buddhism; some Mahayana religious topics - Abhidharma " +
+ "and Madhyamaka - Yogacara, the Epistemological tradition, and Tathagatagarbha - Tantric " +
+ "Buddhism (Including China and Japan); Buddhism in Nepal and Tibet - Buddhism in South and " +
+ "Southeast Asia, and - Buddhism in China, East Asia, and Japan.";
+ Directory dir = newDirectory();
+ Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
+
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", text, positionsType);
+ Document document = new Document();
+ document.add(body);
+ iw.addDocument(document);
+ IndexReader ir = iw.getReader();
+ iw.close();
+ IndexSearcher searcher = newSearcher(ir);
+ PhraseQuery query = new PhraseQuery();
+ query.add(new Term("body", "buddhist"));
+ query.add(new Term("body", "origins"));
+ TopDocs topDocs = searcher.search(query, 10);
+ assertEquals(1, topDocs.totalHits);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertTrue(snippets[0].contains("<b>Buddhist</b> <b>origins</b>"));
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testCuriousGeorge() throws Exception {
+ String text = "It’s the formula for success for preschoolers—Curious George and fire trucks! " +
+ "Curious George and the Firefighters is a story based on H. A. and Margret Rey’s " +
+ "popular primate and painted in the original watercolor and charcoal style. " +
+ "Firefighters are a famously brave lot, but can they withstand a visit from one curious monkey?";
+ Directory dir = newDirectory();
+ Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", text, positionsType);
+ Document document = new Document();
+ document.add(body);
+ iw.addDocument(document);
+ IndexReader ir = iw.getReader();
+ iw.close();
+ IndexSearcher searcher = newSearcher(ir);
+ PhraseQuery query = new PhraseQuery();
+ query.add(new Term("body", "curious"));
+ query.add(new Term("body", "george"));
+ TopDocs topDocs = searcher.search(query, 10);
+ assertEquals(1, topDocs.totalHits);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertFalse(snippets[0].contains("<b>Curious</b>Curious"));
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testCambridgeMA() throws Exception {
+ BufferedReader r = new BufferedReader(new InputStreamReader(
+ this.getClass().getResourceAsStream("CambridgeMA.utf8"), "UTF-8"));
+ String text = r.readLine();
+ r.close();
+ Directory dir = newDirectory();
+ Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", text, positionsType);
+ Document document = new Document();
+ document.add(body);
+ iw.addDocument(document);
+ IndexReader ir = iw.getReader();
+ iw.close();
+ IndexSearcher searcher = newSearcher(ir);
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "porter")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("body", "square")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("body", "massachusetts")), BooleanClause.Occur.SHOULD);
+ TopDocs topDocs = searcher.search(query, 10);
+ assertEquals(1, topDocs.totalHits);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(Integer.MAX_VALUE-1);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertTrue(snippets[0].contains("<b>Square</b>"));
+ assertTrue(snippets[0].contains("<b>Porter</b>"));
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testPassageRanking() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a <b>test</b>. ... Feel free to <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testBooleanMustNot() throws Exception {
+ Directory dir = newDirectory();
+ Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "This sentence has both terms. This sentence has only terms.", positionsType);
+ Document document = new Document();
+ document.add(body);
+ iw.addDocument(document);
+ IndexReader ir = iw.getReader();
+ iw.close();
+ IndexSearcher searcher = newSearcher(ir);
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "terms")), BooleanClause.Occur.SHOULD);
+ BooleanQuery query2 = new BooleanQuery();
+ query.add(query2, BooleanClause.Occur.SHOULD);
+ query2.add(new TermQuery(new Term("body", "both")), BooleanClause.Occur.MUST_NOT);
+ TopDocs topDocs = searcher.search(query, 10);
+ assertEquals(1, topDocs.totalHits);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(Integer.MAX_VALUE-1);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertFalse(snippets[0].contains("<b>both</b>"));
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testHighlightAllText() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(10000) {
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+ };
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a <b>test</b>. Just highlighting from postings. This is also a much sillier <b>test</b>. Feel free to <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testSpecificDocIDs() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ ScoreDoc[] hits = topDocs.scoreDocs;
+ int[] docIDs = new int[2];
+ docIDs[0] = hits[0].doc;
+ docIDs[1] = hits[1].doc;
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 1 }).get("body");
+ assertEquals(2, snippets.length);
+ assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);
+ assertEquals("<b>Highlighting</b> the first term. ", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testCustomFieldValueSource() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ Document doc = new Document();
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_NOT_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ final String text = "This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.";
+ Field body = new Field("body", text, offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(10000) {
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ assertThat(fields.length, equalTo(1));
+ assertThat(docids.length, equalTo(1));
+ String[][] contents = new String[1][1];
+ contents[0][0] = text;
+ return contents;
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+ };
+
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a <b>test</b>. Just highlighting from postings. This is also a much sillier <b>test</b>. Feel free to <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** Make sure highlighter returns first N sentences if
+ * there were no hits. */
+ @Test
+ public void testEmptyHighlights() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[] {0};
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertEquals("test this is. another sentence this test has. ", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** Make sure highlighter we can customize how emtpy
+ * highlight is returned. */
+ @Test
+ public void testCustomEmptyHighlights() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ @Override
+ public Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[] {0};
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertNull(snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** Make sure highlighter returns whole text when there
+ * are no hits and BreakIterator is null. */
+ @Test
+ public void testEmptyHighlightsWhole() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(10000) {
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+ };
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[] {0};
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertEquals("test this is. another sentence this test has. far away is that planet.", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** Make sure highlighter is OK with entirely missing
+ * field. */
+ @Test
+ public void testFieldIsMissing() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("bogus", "highlighting"));
+ int[] docIDs = new int[] {0};
+ String snippets[] = highlighter.highlightFields(new String[] {"bogus"}, query, searcher, docIDs, new int[] { 2 }).get("bogus");
+ assertEquals(1, snippets.length);
+ assertNull(snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testFieldIsJustSpace() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ Document doc = new Document();
+ doc.add(new Field("body", " ", offsetsType));
+ doc.add(new Field("id", "id", offsetsType));
+ iw.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("body", "something", offsetsType));
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ int docID = searcher.search(new TermQuery(new Term("id", "id")), 1).scoreDocs[0].doc;
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[1];
+ docIDs[0] = docID;
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertEquals(" ", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testFieldIsEmptyString() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ Document doc = new Document();
+ doc.add(new Field("body", "", offsetsType));
+ doc.add(new Field("id", "id", offsetsType));
+ iw.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("body", "something", offsetsType));
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ int docID = searcher.search(new TermQuery(new Term("id", "id")), 1).scoreDocs[0].doc;
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[1];
+ docIDs[0] = docID;
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertNull(snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultipleDocs() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ int numDocs = atLeast(100);
+ for(int i=0;i<numDocs;i++) {
+ Document doc = new Document();
+ String content = "the answer is " + i;
+ if ((i & 1) == 0) {
+ content += " some more terms";
+ }
+ doc.add(new Field("body", content, offsetsType));
+ doc.add(newStringField("id", ""+i, Field.Store.YES));
+ iw.addDocument(doc);
+
+ if (random().nextInt(10) == 2) {
+ iw.commit();
+ }
+ }
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "answer"));
+ TopDocs hits = searcher.search(query, numDocs);
+ assertEquals(numDocs, hits.totalHits);
+
+ String snippets[] = highlighter.highlight("body", query, searcher, hits);
+ assertEquals(numDocs, snippets.length);
+ for(int hit=0;hit<numDocs;hit++) {
+ Document doc = searcher.doc(hits.scoreDocs[hit].doc);
+ int id = Integer.parseInt(doc.get("id"));
+ String expected = "the <b>answer</b> is " + id;
+ if ((id & 1) == 0) {
+ expected += " some more terms";
+ }
+ assertEquals(expected, snippets[hit]);
+ }
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultipleSnippetSizes() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field title = new Field("title", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(title);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ title.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "test")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("title", "test")), BooleanClause.Occur.SHOULD);
+ Map<String,String[]> snippets = highlighter.highlightFields(new String[] { "title", "body" }, query, searcher, new int[] { 0 }, new int[] { 1, 2 });
+ String titleHighlight = snippets.get("title")[0];
+ String bodyHighlight = snippets.get("body")[0];
+ assertEquals("This is a <b>test</b>. ", titleHighlight);
+ assertEquals("This is a <b>test</b>. Just a <b>test</b> highlighting from postings. ", bodyHighlight);
+ ir.close();
+ dir.close();
+ }
+
+ public void testEncode() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from <i>postings</i>. Feel free to ignore.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ PostingsHighlighter highlighter = new PostingsHighlighter() {
+ @Override
+ protected PassageFormatter getFormatter(String field) {
+ return new DefaultPassageFormatter("<b>", "</b>", "... ", true);
+ }
+ };
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(1, snippets.length);
+ assertEquals("Just&#32;a&#32;test&#32;<b>highlighting</b>&#32;from&#32;&lt;i&gt;postings&lt;&#x2F;i&gt;&#46;&#32;", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** customizing the gap separator to force a sentence break */
+ public void testGapSeparator() throws Exception {
+ Directory dir = newDirectory();
+ // use simpleanalyzer for more natural tokenization (else "test." is a token)
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body1 = new Field("body", "", offsetsType);
+ body1.setStringValue("This is a multivalued field");
+ doc.add(body1);
+
+ Field body2 = new Field("body", "", offsetsType);
+ body2.setStringValue("This is something different");
+ doc.add(body2);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ PostingsHighlighter highlighter = new PostingsHighlighter() {
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ assert field.equals("body");
+ return '\u2029';
+ }
+ };
+ Query query = new TermQuery(new Term("body", "field"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a multivalued <b>field</b>\u2029", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ // LUCENE-4906
+ public void testObjectFormatter() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ @Override
+ protected PassageFormatter getFormatter(String field) {
+ return new PassageFormatter() {
+ PassageFormatter defaultFormatter = new DefaultPassageFormatter();
+
+ @Override
+ public String[] format(Passage passages[], String content) {
+ // Just turns the String snippet into a length 2
+ // array of String
+ return new String[] {"blah blah", defaultFormatter.format(passages, content).toString()};
+ }
+ };
+ }
+ };
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ int[] docIDs = new int[1];
+ docIDs[0] = topDocs.scoreDocs[0].doc;
+ Map<String,Object[]> snippets = highlighter.highlightFieldsAsObjects(new String[]{"body"}, query, searcher, docIDs, new int[] {1});
+ Object[] bodySnippets = snippets.get("body");
+ assertEquals(1, bodySnippets.length);
+ assertTrue(Arrays.equals(new String[] {"blah blah", "Just a test <b>highlighting</b> from postings. "}, (String[]) bodySnippets[0]));
+
+ ir.close();
+ dir.close();
+ }
+}
diff --git a/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java b/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java
new file mode 100644
index 0000000..b778b73
--- /dev/null
+++ b/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java
@@ -0,0 +1,367 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.util;
+
+import com.carrotsearch.randomizedtesting.JUnit4MethodProvider;
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
+import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule;
+import com.carrotsearch.randomizedtesting.rules.NoInstanceHooksOverridesRule;
+import com.carrotsearch.randomizedtesting.rules.StaticFieldsInvariantRule;
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.rules.RuleChain;
+import org.junit.rules.TestRule;
+import org.junit.runner.RunWith;
+
+import java.io.Closeable;
+import java.io.File;
+import java.lang.annotation.*;
+import java.lang.reflect.Method;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.logging.Logger;
+
+@TestMethodProviders({
+ LuceneJUnit3MethodProvider.class,
+ JUnit4MethodProvider.class
+})
+@Listeners({
+ ReproduceInfoPrinter.class
+})
+@RunWith(value = com.carrotsearch.randomizedtesting.RandomizedRunner.class)
+@SuppressCodecs(value = "Lucene3x")
+
+// NOTE: this class is in o.a.lucene.util since it uses some classes that are related
+// to the test framework that didn't make sense to copy but are package private access
+public abstract class AbstractRandomizedTest extends RandomizedTest {
+ /**
+ * Annotation for integration tests
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = true, sysProperty = SYSPROP_INTEGRATION)
+ public @interface IntegrationTests {
+ }
+
+ // --------------------------------------------------------------------
+ // Test groups, system properties and other annotations modifying tests
+ // --------------------------------------------------------------------
+
+ /**
+ * @see #ignoreAfterMaxFailures
+ */
+ public static final String SYSPROP_MAXFAILURES = "tests.maxfailures";
+
+ /**
+ * @see #ignoreAfterMaxFailures
+ */
+ public static final String SYSPROP_FAILFAST = "tests.failfast";
+
+ public static final String SYSPROP_INTEGRATION = "tests.integration";
+
+ // -----------------------------------------------------------------
+ // Truly immutable fields and constants, initialized once and valid
+ // for all suites ever since.
+ // -----------------------------------------------------------------
+
+ /**
+ * Use this constant when creating Analyzers and any other version-dependent stuff.
+ * <p><b>NOTE:</b> Change this when development starts for new Lucene version:
+ */
+ public static final Version TEST_VERSION_CURRENT = Lucene.VERSION;
+
+ /**
+ * True if and only if tests are run in verbose mode. If this flag is false
+ * tests are not expected to print any messages.
+ */
+ public static final boolean VERBOSE = systemPropertyAsBoolean("tests.verbose", false);
+
+ /**
+ * A random multiplier which you should use when writing random tests:
+ * multiply it by the number of iterations to scale your tests (for nightly builds).
+ */
+ public static final int RANDOM_MULTIPLIER = systemPropertyAsInt("tests.multiplier", 1);
+
+ /**
+ * TODO: javadoc?
+ */
+ public static final String DEFAULT_LINE_DOCS_FILE = "europarl.lines.txt.gz";
+
+ /**
+ * the line file used by LineFileDocs
+ */
+ public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", DEFAULT_LINE_DOCS_FILE);
+
+ /**
+ * Create indexes in this directory, optimally use a subdir, named after the test
+ */
+ public static final File TEMP_DIR;
+
+ static {
+ String s = System.getProperty("tempDir", System.getProperty("java.io.tmpdir"));
+ if (s == null)
+ throw new RuntimeException("To run tests, you need to define system property 'tempDir' or 'java.io.tmpdir'.");
+ TEMP_DIR = new File(s);
+ TEMP_DIR.mkdirs();
+ }
+
+ /**
+ * These property keys will be ignored in verification of altered properties.
+ *
+ * @see SystemPropertiesInvariantRule
+ * @see #ruleChain
+ * @see #classRules
+ */
+ private static final String[] IGNORED_INVARIANT_PROPERTIES = {
+ "user.timezone", "java.rmi.server.randomIDs", "sun.nio.ch.bugLevel",
+ "solr.directoryFactory", "solr.solr.home", "solr.data.dir" // these might be set by the LuceneTestCase -- ignore
+ };
+
+ // -----------------------------------------------------------------
+ // Fields initialized in class or instance rules.
+ // -----------------------------------------------------------------
+
+
+ // -----------------------------------------------------------------
+ // Class level (suite) rules.
+ // -----------------------------------------------------------------
+
+ /**
+ * Stores the currently class under test.
+ */
+ private static final TestRuleStoreClassName classNameRule;
+
+ /**
+ * Class environment setup rule.
+ */
+ static final TestRuleSetupAndRestoreClassEnv classEnvRule;
+
+ /**
+ * Suite failure marker (any error in the test or suite scope).
+ */
+ public final static TestRuleMarkFailure suiteFailureMarker =
+ new TestRuleMarkFailure();
+
+ /**
+ * Ignore tests after hitting a designated number of initial failures. This
+ * is truly a "static" global singleton since it needs to span the lifetime of all
+ * test classes running inside this JVM (it cannot be part of a class rule).
+ * <p/>
+ * <p>This poses some problems for the test framework's tests because these sometimes
+ * trigger intentional failures which add up to the global count. This field contains
+ * a (possibly) changing reference to {@link TestRuleIgnoreAfterMaxFailures} and we
+ * dispatch to its current value from the {@link #classRules} chain using {@link TestRuleDelegate}.
+ */
+ private static final AtomicReference<TestRuleIgnoreAfterMaxFailures> ignoreAfterMaxFailuresDelegate;
+ private static final TestRule ignoreAfterMaxFailures;
+
+ static {
+ int maxFailures = systemPropertyAsInt(SYSPROP_MAXFAILURES, Integer.MAX_VALUE);
+ boolean failFast = systemPropertyAsBoolean(SYSPROP_FAILFAST, false);
+
+ if (failFast) {
+ if (maxFailures == Integer.MAX_VALUE) {
+ maxFailures = 1;
+ } else {
+ Logger.getLogger(LuceneTestCase.class.getSimpleName()).warning(
+ "Property '" + SYSPROP_MAXFAILURES + "'=" + maxFailures + ", 'failfast' is" +
+ " ignored.");
+ }
+ }
+
+ ignoreAfterMaxFailuresDelegate =
+ new AtomicReference<TestRuleIgnoreAfterMaxFailures>(
+ new TestRuleIgnoreAfterMaxFailures(maxFailures));
+ ignoreAfterMaxFailures = TestRuleDelegate.of(ignoreAfterMaxFailuresDelegate);
+ }
+
+ /**
+ * Temporarily substitute the global {@link TestRuleIgnoreAfterMaxFailures}. See
+ * {@link #ignoreAfterMaxFailuresDelegate} for some explanation why this method
+ * is needed.
+ */
+ public static TestRuleIgnoreAfterMaxFailures replaceMaxFailureRule(TestRuleIgnoreAfterMaxFailures newValue) {
+ return ignoreAfterMaxFailuresDelegate.getAndSet(newValue);
+ }
+
+ /**
+ * Max 10mb of static data stored in a test suite class after the suite is complete.
+ * Prevents static data structures leaking and causing OOMs in subsequent tests.
+ */
+ private final static long STATIC_LEAK_THRESHOLD = 10 * 1024 * 1024;
+
+ /**
+ * By-name list of ignored types like loggers etc.
+ */
+ private final static Set<String> STATIC_LEAK_IGNORED_TYPES =
+ Collections.unmodifiableSet(new HashSet<String>(Arrays.asList(
+ EnumSet.class.getName())));
+
+ private final static Set<Class<?>> TOP_LEVEL_CLASSES =
+ Collections.unmodifiableSet(new HashSet<Class<?>>(Arrays.asList(
+ AbstractRandomizedTest.class, LuceneTestCase.class,
+ ElasticsearchIntegrationTest.class, ElasticsearchTestCase.class)));
+
+ /**
+ * This controls how suite-level rules are nested. It is important that _all_ rules declared
+ * in {@link LuceneTestCase} are executed in proper order if they depend on each
+ * other.
+ */
+ @ClassRule
+ public static TestRule classRules = RuleChain
+ .outerRule(new TestRuleIgnoreTestSuites())
+ .around(ignoreAfterMaxFailures)
+ .around(suiteFailureMarker)
+ .around(new TestRuleAssertionsRequired())
+ .around(new StaticFieldsInvariantRule(STATIC_LEAK_THRESHOLD, true) {
+ @Override
+ protected boolean accept(java.lang.reflect.Field field) {
+ // Don't count known classes that consume memory once.
+ if (STATIC_LEAK_IGNORED_TYPES.contains(field.getType().getName())) {
+ return false;
+ }
+ // Don't count references from ourselves, we're top-level.
+ if (TOP_LEVEL_CLASSES.contains(field.getDeclaringClass())) {
+ return false;
+ }
+ return super.accept(field);
+ }
+ })
+ .around(new NoClassHooksShadowingRule())
+ .around(new NoInstanceHooksOverridesRule() {
+ @Override
+ protected boolean verify(Method key) {
+ String name = key.getName();
+ return !(name.equals("setUp") || name.equals("tearDown"));
+ }
+ })
+ .around(new SystemPropertiesInvariantRule(IGNORED_INVARIANT_PROPERTIES))
+ .around(classNameRule = new TestRuleStoreClassName())
+ .around(classEnvRule = new TestRuleSetupAndRestoreClassEnv());
+
+
+ // -----------------------------------------------------------------
+ // Test level rules.
+ // -----------------------------------------------------------------
+
+ /**
+ * Enforces {@link #setUp()} and {@link #tearDown()} calls are chained.
+ */
+ private TestRuleSetupTeardownChained parentChainCallRule = new TestRuleSetupTeardownChained();
+
+ /**
+ * Save test thread and name.
+ */
+ private TestRuleThreadAndTestName threadAndTestNameRule = new TestRuleThreadAndTestName();
+
+ /**
+ * Taint suite result with individual test failures.
+ */
+ private TestRuleMarkFailure testFailureMarker = new TestRuleMarkFailure(suiteFailureMarker);
+
+ /**
+ * This controls how individual test rules are nested. It is important that
+ * _all_ rules declared in {@link LuceneTestCase} are executed in proper order
+ * if they depend on each other.
+ */
+ @Rule
+ public final TestRule ruleChain = RuleChain
+ .outerRule(testFailureMarker)
+ .around(ignoreAfterMaxFailures)
+ .around(threadAndTestNameRule)
+ .around(new SystemPropertiesInvariantRule(IGNORED_INVARIANT_PROPERTIES))
+ .around(new TestRuleSetupAndRestoreInstanceEnv())
+ .around(new TestRuleFieldCacheSanity())
+ .around(parentChainCallRule);
+
+ // -----------------------------------------------------------------
+ // Suite and test case setup/ cleanup.
+ // -----------------------------------------------------------------
+
+ /**
+ * For subclasses to override. Overrides must call {@code super.setUp()}.
+ */
+ @Before
+ public void setUp() throws Exception {
+ parentChainCallRule.setupCalled = true;
+ }
+
+ /**
+ * For subclasses to override. Overrides must call {@code super.tearDown()}.
+ */
+ @After
+ public void tearDown() throws Exception {
+ parentChainCallRule.teardownCalled = true;
+ }
+
+
+ // -----------------------------------------------------------------
+ // Test facilities and facades for subclasses.
+ // -----------------------------------------------------------------
+
+ /**
+ * Registers a {@link Closeable} resource that should be closed after the test
+ * completes.
+ *
+ * @return <code>resource</code> (for call chaining).
+ */
+ public <T extends Closeable> T closeAfterTest(T resource) {
+ return RandomizedContext.current().closeAtEnd(resource, LifecycleScope.TEST);
+ }
+
+ /**
+ * Registers a {@link Closeable} resource that should be closed after the suite
+ * completes.
+ *
+ * @return <code>resource</code> (for call chaining).
+ */
+ public static <T extends Closeable> T closeAfterSuite(T resource) {
+ return RandomizedContext.current().closeAtEnd(resource, LifecycleScope.SUITE);
+ }
+
+ /**
+ * Return the current class being tested.
+ */
+ public static Class<?> getTestClass() {
+ return classNameRule.getTestClass();
+ }
+
+ /**
+ * Return the name of the currently executing test case.
+ */
+ public String getTestName() {
+ return threadAndTestNameRule.testMethodName;
+ }
+
+}
diff --git a/src/test/java/org/apache/lucene/util/SloppyMathTests.java b/src/test/java/org/apache/lucene/util/SloppyMathTests.java
new file mode 100644
index 0000000..81a2fb5
--- /dev/null
+++ b/src/test/java/org/apache/lucene/util/SloppyMathTests.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.util;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.number.IsCloseTo.closeTo;
+
+public class SloppyMathTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAccuracy() {
+ for (double lat1 = -89; lat1 <= 89; lat1+=1) {
+ final double lon1 = randomLongitude();
+
+ for (double i = -180; i <= 180; i+=1) {
+ final double lon2 = i;
+ final double lat2 = randomLatitude();
+
+ assertAccurate(lat1, lon1, lat2, lon2);
+ }
+ }
+ }
+
+ @Test
+ public void testSloppyMath() {
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(-46.645, -171.057, -46.644, -171.058, DistanceUnit.METERS), closeTo(134.87709, maxError(134.87709)));
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(-77.912, -81.173, -77.912, -81.171, DistanceUnit.METERS), closeTo(46.57161, maxError(46.57161)));
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(65.75, -20.708, 65.75, -20.709, DistanceUnit.METERS), closeTo(45.66996, maxError(45.66996)));
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(-86.9, 53.738, -86.9, 53.741, DistanceUnit.METERS), closeTo(18.03998, maxError(18.03998)));
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(89.041, 115.93, 89.04, 115.946, DistanceUnit.METERS), closeTo(115.11711, maxError(115.11711)));
+
+ testSloppyMath(DistanceUnit.METERS, 0.01, 5, 45, 90);
+ testSloppyMath(DistanceUnit.KILOMETERS, 0.01, 5, 45, 90);
+ testSloppyMath(DistanceUnit.INCH, 0.01, 5, 45, 90);
+ testSloppyMath(DistanceUnit.MILES, 0.01, 5, 45, 90);
+ }
+
+ private static double maxError(double distance) {
+ return distance / 1000.0;
+ }
+
+ private void testSloppyMath(DistanceUnit unit, double...deltaDeg) {
+ final double lat1 = randomLatitude();
+ final double lon1 = randomLongitude();
+ logger.info("testing SloppyMath with {} at \"{}, {}\"", unit, lat1, lon1);
+
+ for (int test = 0; test < deltaDeg.length; test++) {
+ for (int i = 0; i < 100; i++) {
+ // crop pole areas, sine we now there the function
+ // is not accurate around lat(89°, 90°) and lat(-90°, -89°)
+ final double lat2 = Math.max(-89.0, Math.min(+89.0, lat1 + (randomDouble() - 0.5) * 2 * deltaDeg[test]));
+ final double lon2 = lon1 + (randomDouble() - 0.5) * 2 * deltaDeg[test];
+
+ final double accurate = GeoDistance.ARC.calculate(lat1, lon1, lat2, lon2, unit);
+ final double dist = GeoDistance.SLOPPY_ARC.calculate(lat1, lon1, lat2, lon2, unit);
+
+ assertThat("distance between("+lat1+", "+lon1+") and ("+lat2+", "+lon2+"))", dist, closeTo(accurate, maxError(accurate)));
+ }
+ }
+ }
+
+ private static void assertAccurate(double lat1, double lon1, double lat2, double lon2) {
+ double accurate = GeoDistance.ARC.calculate(lat1, lon1, lat2, lon2, DistanceUnit.METERS);
+ double sloppy = GeoDistance.SLOPPY_ARC.calculate(lat1, lon1, lat2, lon2, DistanceUnit.METERS);
+ assertThat("distance between("+lat1+", "+lon1+") and ("+lat2+", "+lon2+"))", sloppy, closeTo(accurate, maxError(accurate)));
+ }
+
+ private static final double randomLatitude() {
+ // crop pole areas, sine we now there the function
+ // is not accurate around lat(89°, 90°) and lat(-90°, -89°)
+ return (getRandom().nextDouble() - 0.5) * 178.0;
+ }
+
+ private static final double randomLongitude() {
+ return (getRandom().nextDouble() - 0.5) * 360.0;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java
new file mode 100644
index 0000000..2c605f8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.transport.RemoteTransportException;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ElasticsearchExceptionTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testStatus() {
+ ElasticsearchException exception = new ElasticsearchException("test");
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+
+ exception = new ElasticsearchException("test", new RuntimeException());
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+
+ exception = new ElasticsearchException("test", new IndexMissingException(new Index("test")));
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+
+ exception = new RemoteTransportException("test", new IndexMissingException(new Index("test")));
+ assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/VersionTests.java b/src/test/java/org/elasticsearch/VersionTests.java
new file mode 100644
index 0000000..54df22d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/VersionTests.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.Version.V_0_20_0;
+import static org.elasticsearch.Version.V_0_90_0;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.sameInstance;
+
+public class VersionTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testVersions() throws Exception {
+ assertThat(V_0_20_0.before(V_0_90_0), is(true));
+ assertThat(V_0_20_0.before(V_0_20_0), is(false));
+ assertThat(V_0_90_0.before(V_0_20_0), is(false));
+
+ assertThat(V_0_20_0.onOrBefore(V_0_90_0), is(true));
+ assertThat(V_0_20_0.onOrBefore(V_0_20_0), is(true));
+ assertThat(V_0_90_0.onOrBefore(V_0_20_0), is(false));
+
+ assertThat(V_0_20_0.after(V_0_90_0), is(false));
+ assertThat(V_0_20_0.after(V_0_20_0), is(false));
+ assertThat(V_0_90_0.after(V_0_20_0), is(true));
+
+ assertThat(V_0_20_0.onOrAfter(V_0_90_0), is(false));
+ assertThat(V_0_20_0.onOrAfter(V_0_20_0), is(true));
+ assertThat(V_0_90_0.onOrAfter(V_0_20_0), is(true));
+ }
+
+ @Test
+ public void testVersionConstantPresent() {
+ assertThat(Version.CURRENT, sameInstance(Version.fromId(Version.CURRENT.id)));
+ assertThat(Version.CURRENT.luceneVersion.ordinal(), equalTo(org.apache.lucene.util.Version.LUCENE_CURRENT.ordinal() - 1));
+ final int iters = atLeast(20);
+ for (int i = 0; i < iters; i++) {
+ Version version = randomVersion();
+ assertThat(version, sameInstance(Version.fromId(version.id)));
+ assertThat(version.luceneVersion, sameInstance(Version.fromId(version.id).luceneVersion));
+ }
+ }
+ @Test
+ public void testCURRENTIsLatest() {
+ final int iters = scaledRandomIntBetween(100, 1000);
+ for (int i = 0; i < iters; i++) {
+ Version version = randomVersion();
+ if (version != Version.CURRENT) {
+ assertThat("Version: " + version + " should be before: " + Version.CURRENT + " but wasn't", version.before(Version.CURRENT), is(true));
+ }
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java b/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java
new file mode 100644
index 0000000..215553d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.index.query.FilterBuilders.andFilter;
+import static org.elasticsearch.index.query.FilterBuilders.notFilter;
+import static org.elasticsearch.index.query.FilterBuilders.queryFilter;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.notNullValue;
+
+/**
+ */
+public class HotThreadsTest extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testHotThreadsDontFail() throws ExecutionException, InterruptedException {
+ /**
+ * This test just checks if nothing crashes or gets stuck etc.
+ */
+ createIndex("test");
+ final int iters = atLeast(2);
+ final AtomicBoolean hasErrors = new AtomicBoolean(false);
+ for (int i = 0; i < iters; i++) {
+ final String type;
+ NodesHotThreadsRequestBuilder nodesHotThreadsRequestBuilder = client().admin().cluster().prepareNodesHotThreads();
+ if (randomBoolean()) {
+ TimeValue timeValue = new TimeValue(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(20, 500));
+ nodesHotThreadsRequestBuilder.setInterval(timeValue);
+ }
+ if (randomBoolean()) {
+ nodesHotThreadsRequestBuilder.setThreads(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(1, 500));
+ }
+ if (randomBoolean()) {
+ switch (randomIntBetween(0, 2)) {
+ case 2:
+ type = "cpu";
+ break;
+ case 1:
+ type = "wait";
+ break;
+ default:
+ type = "block";
+ break;
+ }
+ assertThat(type, notNullValue());
+ nodesHotThreadsRequestBuilder.setType(type);
+ } else {
+ type = null;
+ }
+ final CountDownLatch latch = new CountDownLatch(1);
+ nodesHotThreadsRequestBuilder.execute(new ActionListener<NodesHotThreadsResponse>() {
+ @Override
+ public void onResponse(NodesHotThreadsResponse nodeHotThreads) {
+ boolean success = false;
+ try {
+ assertThat(nodeHotThreads, notNullValue());
+ Map<String,NodeHotThreads> nodesMap = nodeHotThreads.getNodesMap();
+ assertThat(nodesMap.size(), equalTo(cluster().size()));
+ for (NodeHotThreads ht : nodeHotThreads) {
+ assertNotNull(ht.getHotThreads());
+ //logger.info(ht.getHotThreads());
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ hasErrors.set(true);
+ }
+ latch.countDown();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.error("FAILED", e);
+ hasErrors.set(true);
+ latch.countDown();
+ fail();
+ }
+ });
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3"));
+ ensureSearchable();
+ while(latch.getCount() > 0) {
+ assertHitCount(
+ client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(
+ andFilter(
+ queryFilter(matchAllQuery()),
+ notFilter(andFilter(queryFilter(termQuery("field1", "value1")),
+ queryFilter(termQuery("field1", "value2")))))).get(),
+ 3l);
+ }
+ latch.await();
+ assertThat(hasErrors.get(), is(false));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java b/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java
new file mode 100644
index 0000000..87edd65
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.monitor.sigar.SigarService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
+public class ClusterStatsTests extends ElasticsearchIntegrationTest {
+
+ private void assertCounts(ClusterStatsNodes.Counts counts, int total, int masterOnly, int dataOnly, int masterData, int client) {
+ assertThat(counts.getTotal(), Matchers.equalTo(total));
+ assertThat(counts.getMasterOnly(), Matchers.equalTo(masterOnly));
+ assertThat(counts.getDataOnly(), Matchers.equalTo(dataOnly));
+ assertThat(counts.getMasterData(), Matchers.equalTo(masterData));
+ assertThat(counts.getClient(), Matchers.equalTo(client));
+ }
+
+ @Test
+ public void testNodeCounts() {
+ cluster().startNode();
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 1, 0, 0, 1, 0);
+
+ cluster().startNode(ImmutableSettings.builder().put("node.data", false));
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 2, 1, 0, 1, 0);
+
+ cluster().startNode(ImmutableSettings.builder().put("node.master", false));
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 3, 1, 1, 1, 0);
+
+ cluster().startNode(ImmutableSettings.builder().put("node.client", true));
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 4, 1, 1, 1, 1);
+ }
+
+
+ private void assertShardStats(ClusterStatsIndices.ShardStats stats, int indices, int total, int primaries, double replicationFactor) {
+ assertThat(stats.getIndices(), Matchers.equalTo(indices));
+ assertThat(stats.getTotal(), Matchers.equalTo(total));
+ assertThat(stats.getPrimaries(), Matchers.equalTo(primaries));
+ assertThat(stats.getReplication(), Matchers.equalTo(replicationFactor));
+ }
+
+ @Test
+ public void testIndicesShardStats() {
+ cluster().startNode();
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
+
+
+ prepareCreate("test1").setSettings("number_of_shards", 2, "number_of_replicas", 1).get();
+ ensureYellow();
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0l));
+ assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1));
+ assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0);
+
+ // add another node, replicas should get assigned
+ cluster().startNode();
+ ensureGreen();
+ index("test1", "type", "1", "f", "f");
+ refresh(); // make the doc visible
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
+ assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1l));
+ assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0);
+
+ prepareCreate("test2").setSettings("number_of_shards", 3, "number_of_replicas", 0).get();
+ ensureGreen();
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
+ assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(2));
+ assertShardStats(response.getIndicesStats().getShards(), 2, 7, 5, 2.0 / 5);
+
+ assertThat(response.getIndicesStats().getShards().getAvgIndexPrimaryShards(), Matchers.equalTo(2.5));
+ assertThat(response.getIndicesStats().getShards().getMinIndexPrimaryShards(), Matchers.equalTo(2));
+ assertThat(response.getIndicesStats().getShards().getMaxIndexPrimaryShards(), Matchers.equalTo(3));
+
+ assertThat(response.getIndicesStats().getShards().getAvgIndexShards(), Matchers.equalTo(3.5));
+ assertThat(response.getIndicesStats().getShards().getMinIndexShards(), Matchers.equalTo(3));
+ assertThat(response.getIndicesStats().getShards().getMaxIndexShards(), Matchers.equalTo(4));
+
+ assertThat(response.getIndicesStats().getShards().getAvgIndexReplication(), Matchers.equalTo(0.5));
+ assertThat(response.getIndicesStats().getShards().getMinIndexReplication(), Matchers.equalTo(0.0));
+ assertThat(response.getIndicesStats().getShards().getMaxIndexReplication(), Matchers.equalTo(1.0));
+
+ }
+
+ @Test
+ public void testValuesSmokeScreen() {
+ cluster().ensureAtMostNumNodes(5);
+ cluster().ensureAtLeastNumNodes(1);
+ SigarService sigarService = cluster().getInstance(SigarService.class);
+ index("test1", "type", "1", "f", "f");
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getTimestamp(), Matchers.greaterThan(946681200000l)); // 1 Jan 2000
+ assertThat(response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0l));
+
+ assertThat(response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0l));
+ assertThat(response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0));
+ if (sigarService.sigarAvailable()) {
+ // We only get those if we have sigar
+ assertThat(response.nodesStats.getOs().getAvailableProcessors(), Matchers.greaterThan(0));
+ assertThat(response.nodesStats.getOs().getAvailableMemory().bytes(), Matchers.greaterThan(0l));
+ assertThat(response.nodesStats.getOs().getCpus().size(), Matchers.greaterThan(0));
+ }
+ assertThat(response.nodesStats.getVersions().size(), Matchers.greaterThan(0));
+ assertThat(response.nodesStats.getVersions().contains(Version.CURRENT), Matchers.equalTo(true));
+ assertThat(response.nodesStats.getPlugins().size(), Matchers.greaterThanOrEqualTo(0));
+
+ assertThat(response.nodesStats.getProcess().count, Matchers.greaterThan(0));
+ // 0 happens when not supported on platform
+ assertThat(response.nodesStats.getProcess().getAvgOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(0L));
+ // these can be -1 if not supported on platform
+ assertThat(response.nodesStats.getProcess().getMinOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
+ assertThat(response.nodesStats.getProcess().getMaxOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java b/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java
new file mode 100644
index 0000000..67ed2be
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.warmer.put;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.Matchers.hasSize;
+
+public class PutWarmerRequestTests extends ElasticsearchTestCase {
+
+ @Test // issue 4196
+ public void testThatValidationWithoutSpecifyingSearchRequestFails() {
+ PutWarmerRequest putWarmerRequest = new PutWarmerRequest("foo");
+ ActionRequestValidationException validationException = putWarmerRequest.validate();
+ assertThat(validationException.validationErrors(), hasSize(1));
+ assertThat(validationException.getMessage(), containsString("search request is missing"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java b/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java
new file mode 100644
index 0000000..5a3c318
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import static org.hamcrest.Matchers.*;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+
+import org.junit.Test;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope= ElasticsearchIntegrationTest.Scope.SUITE, numNodes=1)
+public class BulkIntegrationTests extends ElasticsearchIntegrationTest{
+
+ @Test
+ public void testBulkIndexCreatesMapping() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/bulk-log.json");
+ BulkRequestBuilder bulkBuilder = new BulkRequestBuilder(client());
+ bulkBuilder.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ bulkBuilder.execute().actionGet();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ try {
+ GetMappingsResponse mappingsResponse = client().admin().indices().getMappings(new GetMappingsRequest()).get();
+ return mappingsResponse.getMappings().containsKey("logstash-2014.03.30");
+ } catch (Throwable t) {
+ return false;
+ }
+ }
+ });
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ try {
+ GetMappingsResponse mappingsResponse = client().admin().indices().getMappings(new GetMappingsRequest()).get();
+ return mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs");
+ } catch (Throwable t) {
+ return false;
+ }
+ }
+ });
+ ensureYellow();
+ GetMappingsResponse mappingsResponse = client().admin().indices().getMappings(new GetMappingsRequest()).get();
+ assertThat(mappingsResponse.mappings().size(), equalTo(1));
+ assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30"));
+ assertTrue(mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
new file mode 100644
index 0000000..9171fc4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class BulkRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleBulk1() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
+ // translate Windows line endings (\r\n) to standard ones (\n)
+ if (Constants.WINDOWS) {
+ bulkAction = Strings.replace(bulkAction, "\r\n", "\n");
+ }
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(3));
+ assertThat(((IndexRequest) bulkRequest.requests().get(0)).source().toBytes(), equalTo(new BytesArray("{ \"field1\" : \"value1\" }").toBytes()));
+ assertThat(bulkRequest.requests().get(1), instanceOf(DeleteRequest.class));
+ assertThat(((IndexRequest) bulkRequest.requests().get(2)).source().toBytes(), equalTo(new BytesArray("{ \"field1\" : \"value3\" }").toBytes()));
+ }
+
+ @Test
+ public void testSimpleBulk2() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk2.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(3));
+ }
+
+ @Test
+ public void testSimpleBulk3() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk3.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(3));
+ }
+
+ @Test
+ public void testSimpleBulk4() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk4.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(4));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(0)).id(), equalTo("1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(0)).retryOnConflict(), equalTo(2));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(0)).doc().source().toUtf8(), equalTo("{\"field\":\"value\"}"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).id(), equalTo("0"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).type(), equalTo("type1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).index(), equalTo("index1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).script(), equalTo("counter += param1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).scriptLang(), equalTo("js"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).scriptParams().size(), equalTo(1));
+ assertThat(((Integer) ((UpdateRequest) bulkRequest.requests().get(1)).scriptParams().get("param1")), equalTo(1));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).upsertRequest().source().toUtf8(), equalTo("{\"counter\":1}"));
+ }
+
+ @Test
+ public void testBulkAllowExplicitIndex() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
+ try {
+ new BulkRequest().add(new BytesArray(bulkAction.getBytes(Charsets.UTF_8)), true, null, null, false);
+ fail();
+ } catch (Exception e) {
+
+ }
+
+ bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk5.json");
+ new BulkRequest().add(new BytesArray(bulkAction.getBytes(Charsets.UTF_8)), true, "test", null, false);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/bulk/bulk-log.json b/src/test/java/org/elasticsearch/action/bulk/bulk-log.json
new file mode 100644
index 0000000..9c3663c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/bulk-log.json
@@ -0,0 +1,24 @@
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json
new file mode 100644
index 0000000..cf76477
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json
@@ -0,0 +1,5 @@
+{ "index":{"_index":"test","_type":"type1","_id":"1"} }
+{ "field1" : "value1" }
+{ "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
+{ "create" : { "_index" : "test", "_type" : "type1", "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json
new file mode 100644
index 0000000..7cd4f99
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json
@@ -0,0 +1,5 @@
+{ "index":{ } }
+{ "field1" : "value1" }
+{ "delete" : { "_id" : "2" } }
+{ "create" : { "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json
new file mode 100644
index 0000000..7cd4f99
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json
@@ -0,0 +1,5 @@
+{ "index":{ } }
+{ "field1" : "value1" }
+{ "delete" : { "_id" : "2" } }
+{ "create" : { "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json
new file mode 100644
index 0000000..8b916b8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json
@@ -0,0 +1,7 @@
+{ "update" : {"_id" : "1", "_retry_on_conflict" : 2} }
+{ "doc" : {"field" : "value"} }
+{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1" } }
+{ "script" : "counter += param1", "lang" : "js", "params" : {"param1" : 1}, "upsert" : {"counter" : 1}}
+{ "delete" : { "_id" : "2" } }
+{ "create" : { "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json
new file mode 100644
index 0000000..6ad5ff3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json
@@ -0,0 +1,5 @@
+{ "index": {"_type": "type1","_id": "1"} }
+{ "field1" : "value1" }
+{ "delete" : { "_type" : "type1", "_id" : "2" } }
+{ "create" : { "_type" : "type1", "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java b/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java
new file mode 100644
index 0000000..3995061
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MultiPercolatorRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParseBulkRequests() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate1.json");
+ MultiPercolateRequest request = new MultiPercolateRequest().add(data, 0, data.length, false);
+
+ assertThat(request.requests().size(), equalTo(6));
+ PercolateRequest percolateRequest = request.requests().get(0);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strict()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ Map sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value1").map()));
+
+ percolateRequest = request.requests().get(1);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index2"));
+ assertThat(percolateRequest.indices()[1], equalTo("my-index3"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenient()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value2").map()));
+
+ percolateRequest = request.requests().get(2);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index4"));
+ assertThat(percolateRequest.indices()[1], equalTo("my-index5"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true)));
+ assertThat(percolateRequest.onlyCount(), equalTo(true));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value3").map()));
+
+ percolateRequest = request.requests().get(3);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index6"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true)));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), notNullValue());
+ assertThat(percolateRequest.getRequest().id(), equalTo("1"));
+ assertThat(percolateRequest.getRequest().type(), equalTo("my-type1"));
+ assertThat(percolateRequest.getRequest().index(), equalTo("my-index6"));
+ assertThat(percolateRequest.getRequest().routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.getRequest().preference(), equalTo("_local"));
+
+ percolateRequest = request.requests().get(4);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index7"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strict()));
+ assertThat(percolateRequest.onlyCount(), equalTo(true));
+ assertThat(percolateRequest.getRequest(), notNullValue());
+ assertThat(percolateRequest.getRequest().id(), equalTo("2"));
+ assertThat(percolateRequest.getRequest().type(), equalTo("my-type1"));
+ assertThat(percolateRequest.getRequest().index(), equalTo("my-index7"));
+ assertThat(percolateRequest.getRequest().routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.getRequest().preference(), equalTo("_local"));
+
+ percolateRequest = request.requests().get(5);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index8"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("primary"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strict()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value4").map()));
+ }
+
+ @Test
+ public void testParseBulkRequests_defaults() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate2.json");
+ MultiPercolateRequest request = new MultiPercolateRequest();
+ request.indices("my-index1").documentType("my-type1").indicesOptions(IndicesOptions.lenient());
+ request.add(data, 0, data.length, false);
+
+ assertThat(request.requests().size(), equalTo(3));
+ PercolateRequest percolateRequest = request.requests().get(0);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenient()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ Map sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value1").map()));
+
+ percolateRequest = request.requests().get(1);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenient()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value2").map()));
+
+ percolateRequest = request.requests().get(2);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenient()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value3").map()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json b/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json
new file mode 100644
index 0000000..ceb4aca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json
@@ -0,0 +1,12 @@
+{"percolate" : {"index" : "my-index1", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : false}}
+{"doc" : {"field1" : "value1"}}
+{"percolate" : {"indices" : ["my-index2", "my-index3"], "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : true}}
+{"doc" : {"field1" : "value2"}}
+{"count" : {"indices" : ["my-index4", "my-index5"], "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "expand_wildcards" : "open,closed"}}
+{"doc" : {"field1" : "value3"}}
+{"percolate" : {"id" : "1", "index" : "my-index6", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "expand_wildcards" : ["open", "closed"]}}
+{}
+{"count" : {"id" : "2", "index" : "my-index7", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local"}}
+{}
+{"percolate" : {"index" : "my-index8", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "primary"}}
+{"doc" : {"field1" : "value4"}}
diff --git a/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json b/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json
new file mode 100644
index 0000000..fa676cf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json
@@ -0,0 +1,6 @@
+{"percolate" : {"routing" : "my-routing-1", "preference" : "_local"}}
+{"doc" : {"field1" : "value1"}}
+{"percolate" : {"index" : "my-index1", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : true}}
+{"doc" : {"field1" : "value2"}}
+{"percolate" : {}}
+{"doc" : {"field1" : "value3"}}
diff --git a/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java
new file mode 100644
index 0000000..103ab7d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class MultiSearchRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleAdd() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null);
+ assertThat(request.requests().size(), equalTo(5));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true)));
+ assertThat(request.requests().get(0).types().length, equalTo(0));
+ assertThat(request.requests().get(1).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true)));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(2).indices(), nullValue());
+ assertThat(request.requests().get(2).types().length, equalTo(0));
+ assertThat(request.requests().get(3).indices(), nullValue());
+ assertThat(request.requests().get(3).types().length, equalTo(0));
+ assertThat(request.requests().get(3).searchType(), equalTo(SearchType.COUNT));
+ assertThat(request.requests().get(4).indices(), nullValue());
+ assertThat(request.requests().get(4).types().length, equalTo(0));
+ }
+
+ @Test
+ public void simpleAdd2() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch2.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null);
+ assertThat(request.requests().size(), equalTo(5));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(0).types().length, equalTo(0));
+ assertThat(request.requests().get(1).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(2).indices(), nullValue());
+ assertThat(request.requests().get(2).types().length, equalTo(0));
+ assertThat(request.requests().get(3).indices(), nullValue());
+ assertThat(request.requests().get(3).types().length, equalTo(0));
+ assertThat(request.requests().get(3).searchType(), equalTo(SearchType.COUNT));
+ assertThat(request.requests().get(4).indices(), nullValue());
+ assertThat(request.requests().get(4).types().length, equalTo(0));
+ }
+
+ @Test
+ public void simpleAdd3() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch3.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null);
+ assertThat(request.requests().size(), equalTo(4));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test0"));
+ assertThat(request.requests().get(0).indices()[1], equalTo("test1"));
+ assertThat(request.requests().get(1).indices()[0], equalTo("test2"));
+ assertThat(request.requests().get(1).indices()[1], equalTo("test3"));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(2).indices()[0], equalTo("test4"));
+ assertThat(request.requests().get(2).indices()[1], equalTo("test1"));
+ assertThat(request.requests().get(2).types()[0], equalTo("type2"));
+ assertThat(request.requests().get(2).types()[1], equalTo("type1"));
+ assertThat(request.requests().get(3).indices(), nullValue());
+ assertThat(request.requests().get(3).types().length, equalTo(0));
+ assertThat(request.requests().get(3).searchType(), equalTo(SearchType.COUNT));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch1.json b/src/test/java/org/elasticsearch/action/search/simple-msearch1.json
new file mode 100644
index 0000000..dd2f582
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/search/simple-msearch1.json
@@ -0,0 +1,10 @@
+{"index":"test", "ignore_unavailable" : true, "expand_wildcards" : "open,closed"}}
+{"query" : {"match_all" {}}}
+{"index" : "test", "type" : "type1", "expand_wildcards" : ["open", "closed"]}
+{"query" : {"match_all" {}}}
+{}
+{"query" : {"match_all" {}}}
+{"search_type" : "count"}
+{"query" : {"match_all" {}}}
+
+{"query" : {"match_all" {}}}
diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch2.json b/src/test/java/org/elasticsearch/action/search/simple-msearch2.json
new file mode 100644
index 0000000..a9aca8b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/search/simple-msearch2.json
@@ -0,0 +1,10 @@
+{"index":"test"}
+{"query" : {"match_all" {}}}
+{"index" : "test", "type" : "type1"}
+{"query" : {"match_all" {}}}
+{}
+{"query" : {"match_all" {}}}
+{"search_type" : "count"}
+{"query" : {"match_all" {}}}
+
+{"query" : {"match_all" {}}}
diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch3.json b/src/test/java/org/elasticsearch/action/search/simple-msearch3.json
new file mode 100644
index 0000000..9cdff90
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/search/simple-msearch3.json
@@ -0,0 +1,8 @@
+{"index":["test0", "test1"]}
+{"query" : {"match_all" {}}}
+{"index" : "test2,test3", "type" : "type1"}
+{"query" : {"match_all" {}}}
+{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ]}
+{"query" : {"match_all" {}}}
+{"search_type" : "count"}
+{"query" : {"match_all" {}}}
diff --git a/src/test/java/org/elasticsearch/action/suggest/SuggestActionTests.java b/src/test/java/org/elasticsearch/action/suggest/SuggestActionTests.java
new file mode 100644
index 0000000..cfc81e7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/suggest/SuggestActionTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.suggest;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder;
+import org.elasticsearch.search.suggest.SuggestSearchTests;
+
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SuggestActionTests extends SuggestSearchTests {
+
+ protected Suggest searchSuggest(Client client, String suggestText, int expectShardsFailed, SuggestionBuilder<?>... suggestions) {
+ SuggestRequestBuilder builder = client.prepareSuggest();
+
+ if (suggestText != null) {
+ builder.setSuggestText(suggestText);
+ }
+ for (SuggestionBuilder<?> suggestion : suggestions) {
+ builder.addSuggestion(suggestion);
+ }
+
+ SuggestResponse actionGet = builder.execute().actionGet();
+ assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(expectShardsFailed));
+ if (expectShardsFailed > 0) {
+ throw new SearchPhaseExecutionException("suggest", "Suggest execution failed", new ShardSearchFailure[0]);
+ }
+ return actionGet.getSuggest();
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/AbstractTermVectorTests.java b/src/test/java/org/elasticsearch/action/termvector/AbstractTermVectorTests.java
new file mode 100644
index 0000000..6f897df
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/AbstractTermVectorTests.java
@@ -0,0 +1,411 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.inject.internal.Join;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.*;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public abstract class AbstractTermVectorTests extends ElasticsearchIntegrationTest {
+
+ protected static class TestFieldSetting {
+ final public String name;
+ final public boolean storedOffset;
+ final public boolean storedPayloads;
+ final public boolean storedPositions;
+
+ public TestFieldSetting(String name, boolean storedOffset, boolean storedPayloads, boolean storedPositions) {
+ this.name = name;
+ this.storedOffset = storedOffset;
+ this.storedPayloads = storedPayloads;
+ this.storedPositions = storedPositions;
+ }
+
+ public void addToMappings(XContentBuilder mappingsBuilder) throws IOException {
+ mappingsBuilder.startObject(name);
+ mappingsBuilder.field("type", "string");
+ String tv_settings;
+ if (storedPositions && storedOffset && storedPayloads) {
+ tv_settings = "with_positions_offsets_payloads";
+ } else if (storedPositions && storedOffset) {
+ tv_settings = "with_positions_offsets";
+ } else if (storedPayloads) {
+ tv_settings = "with_positions_payloads";
+ } else if (storedPositions) {
+ tv_settings = "with_positions";
+ } else if (storedOffset) {
+ tv_settings = "with_offsets";
+ } else {
+ tv_settings = "yes";
+ }
+
+ mappingsBuilder.field("term_vector", tv_settings);
+
+ if (storedPayloads) {
+ mappingsBuilder.field("analyzer", "tv_test");
+ }
+
+ mappingsBuilder.endObject();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("name: ").append(name).append(" tv_with:");
+ if (storedPayloads) {
+ sb.append("payloads,");
+ }
+ if (storedOffset) {
+ sb.append("offsets,");
+ }
+ if (storedPositions) {
+ sb.append("positions,");
+ }
+ return sb.toString();
+ }
+ }
+
+ protected static class TestDoc {
+ final public String id;
+ final public TestFieldSetting[] fieldSettings;
+ final public String[] fieldContent;
+ public String index = "test";
+ public String type = "type1";
+
+ public TestDoc(String id, TestFieldSetting[] fieldSettings, String[] fieldContent) {
+ this.id = id;
+ assertEquals(fieldSettings.length, fieldContent.length);
+ this.fieldSettings = fieldSettings;
+ this.fieldContent = fieldContent;
+ }
+
+ public TestDoc index(String index) {
+ this.index = index;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+
+ StringBuilder sb = new StringBuilder("index:").append(index).append(" type:").append(type).append(" id:").append(id);
+ for (int i = 0; i < fieldSettings.length; i++) {
+ TestFieldSetting f = fieldSettings[i];
+ sb.append("\n").append("Field: ").append(f).append("\n content:").append(fieldContent[i]);
+ }
+ sb.append("\n");
+
+ return sb.toString();
+ }
+ }
+
+ protected static class TestConfig {
+ final public TestDoc doc;
+ final public String[] selectedFields;
+ final public boolean requestPositions;
+ final public boolean requestOffsets;
+ final public boolean requestPayloads;
+ public Class expectedException = null;
+
+ public TestConfig(TestDoc doc, String[] selectedFields, boolean requestPositions, boolean requestOffsets, boolean requestPayloads) {
+ this.doc = doc;
+ this.selectedFields = selectedFields;
+ this.requestPositions = requestPositions;
+ this.requestOffsets = requestOffsets;
+ this.requestPayloads = requestPayloads;
+ }
+
+ public TestConfig expectedException(Class exceptionClass) {
+ this.expectedException = exceptionClass;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ String requested = "";
+ if (requestOffsets) {
+ requested += "offsets,";
+ }
+ if (requestPositions) {
+ requested += "position,";
+ }
+ if (requestPayloads) {
+ requested += "payload,";
+ }
+ Locale aLocale = new Locale("en", "US");
+ return String.format(aLocale, "(doc: %s\n requested: %s, fields: %s)", doc, requested,
+ selectedFields == null ? "NULL" : Join.join(",", selectedFields));
+ }
+ }
+
+ protected void createIndexBasedOnFieldSettings(TestFieldSetting[] fieldSettings, int number_of_shards) throws IOException {
+ cluster().wipeIndices("test");
+ XContentBuilder mappingBuilder = jsonBuilder();
+ mappingBuilder.startObject().startObject("type1").startObject("properties");
+ for (TestFieldSetting field : fieldSettings) {
+ field.addToMappings(mappingBuilder);
+ }
+ ImmutableSettings.Builder settings = ImmutableSettings.settingsBuilder()
+ .put("index.analysis.analyzer.tv_test.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase");
+ if (number_of_shards > 0) {
+ settings.put("number_of_shards", number_of_shards);
+ }
+ mappingBuilder.endObject().endObject().endObject();
+ prepareCreate("test").addMapping("type1", mappingBuilder).setSettings(settings).get();
+
+ ensureYellow();
+ }
+
+ /**
+ * Generate test documentsThe returned documents are already indexed.
+ */
+ protected TestDoc[] generateTestDocs(int numberOfDocs, TestFieldSetting[] fieldSettings) {
+ String[] fieldContentOptions = new String[]{"Generating a random permutation of a sequence (such as when shuffling cards).",
+ "Selecting a random sample of a population (important in statistical sampling).",
+ "Allocating experimental units via random assignment to a treatment or control condition.",
+ "Generating random numbers: see Random number generation.",
+ "Transforming a data stream (such as when using a scrambler in telecommunications)."};
+
+ String[] contentArray = new String[fieldSettings.length];
+ Map<String, Object> docSource = new HashMap<String, Object>();
+ TestDoc[] testDocs = new TestDoc[numberOfDocs];
+ for (int docId = 0; docId < numberOfDocs; docId++) {
+ docSource.clear();
+ for (int i = 0; i < contentArray.length; i++) {
+ contentArray[i] = fieldContentOptions[randomInt(fieldContentOptions.length - 1)];
+ docSource.put(fieldSettings[i].name, contentArray[i]);
+ }
+ TestDoc doc = new TestDoc(Integer.toString(docId), fieldSettings, contentArray.clone());
+ index(doc.index, doc.type, doc.id, docSource);
+ testDocs[docId] = doc;
+ }
+
+ refresh();
+ return testDocs;
+
+ }
+
+ protected TestConfig[] generateTestConfigs(int numberOfTests, TestDoc[] testDocs, TestFieldSetting[] fieldSettings) {
+ ArrayList<TestConfig> configs = new ArrayList<TestConfig>();
+ for (int i = 0; i < numberOfTests; i++) {
+
+ ArrayList<String> selectedFields = null;
+ if (randomBoolean()) {
+ // used field selection
+ selectedFields = new ArrayList<String>();
+ if (randomBoolean()) {
+ selectedFields.add("Doesnt_exist"); // this will be ignored.
+ }
+ for (TestFieldSetting field : fieldSettings)
+ if (randomBoolean()) {
+ selectedFields.add(field.name);
+ }
+
+ if (selectedFields.size() == 0) {
+ selectedFields = null; // 0 length set is not supported.
+ }
+
+ }
+ TestConfig config = new TestConfig(testDocs[randomInt(testDocs.length - 1)], selectedFields == null ? null
+ : selectedFields.toArray(new String[]{}), randomBoolean(), randomBoolean(), randomBoolean());
+
+ configs.add(config);
+ }
+ // always adds a test that fails
+ configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}).index("doesn't_exist"),
+ new String[]{"doesnt_exist"}, true, true, true).expectedException(IndexMissingException.class));
+
+ refresh();
+
+ return configs.toArray(new TestConfig[]{});
+ }
+
+ protected TestFieldSetting[] getFieldSettings() {
+ return new TestFieldSetting[]{new TestFieldSetting("field_with_positions", false, false, true),
+ new TestFieldSetting("field_with_offsets", true, false, false),
+ new TestFieldSetting("field_with_only_tv", false, false, false),
+ new TestFieldSetting("field_with_positions_offsets", false, false, true),
+ new TestFieldSetting("field_with_positions_payloads", false, true, true)
+
+ };
+ }
+
+ protected DirectoryReader indexDocsWithLucene(TestDoc[] testDocs) throws IOException {
+
+ Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
+ for (TestFieldSetting field : testDocs[0].fieldSettings) {
+ if (field.storedPayloads) {
+ mapping.put(field.name, new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer tokenizer = new StandardTokenizer(Version.CURRENT.luceneVersion, reader);
+ TokenFilter filter = new LowerCaseFilter(Version.CURRENT.luceneVersion, tokenizer);
+ filter = new TypeAsPayloadTokenFilter(filter);
+ return new TokenStreamComponents(tokenizer, filter);
+ }
+
+ });
+ }
+ }
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(Version.CURRENT.luceneVersion, CharArraySet.EMPTY_SET), mapping);
+
+ Directory dir = new RAMDirectory();
+ IndexWriterConfig conf = new IndexWriterConfig(Version.CURRENT.luceneVersion, wrapper);
+
+ conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ for (TestDoc doc : testDocs) {
+ Document d = new Document();
+ d.add(new Field("id", doc.id, StringField.TYPE_STORED));
+ for (int i = 0; i < doc.fieldContent.length; i++) {
+ FieldType type = new FieldType(TextField.TYPE_STORED);
+ TestFieldSetting fieldSetting = doc.fieldSettings[i];
+
+ type.setStoreTermVectorOffsets(fieldSetting.storedOffset);
+ type.setStoreTermVectorPayloads(fieldSetting.storedPayloads);
+ type.setStoreTermVectorPositions(fieldSetting.storedPositions || fieldSetting.storedPayloads || fieldSetting.storedOffset);
+ type.setStoreTermVectors(true);
+ type.freeze();
+ d.add(new Field(fieldSetting.name, doc.fieldContent[i], type));
+ }
+ writer.updateDocument(new Term("id", doc.id), d);
+ writer.commit();
+ }
+ writer.close();
+
+ return DirectoryReader.open(dir);
+ }
+
+ protected void validateResponse(TermVectorResponse esResponse, Fields luceneFields, TestConfig testConfig) throws IOException {
+ TestDoc testDoc = testConfig.doc;
+ HashSet<String> selectedFields = testConfig.selectedFields == null ? null : new HashSet<String>(
+ Arrays.asList(testConfig.selectedFields));
+ Fields esTermVectorFields = esResponse.getFields();
+ for (TestFieldSetting field : testDoc.fieldSettings) {
+ Terms esTerms = esTermVectorFields.terms(field.name);
+ if (selectedFields != null && !selectedFields.contains(field.name)) {
+ assertNull(esTerms);
+ continue;
+ }
+
+ assertNotNull(esTerms);
+
+ Terms luceneTerms = luceneFields.terms(field.name);
+ TermsEnum esTermEnum = esTerms.iterator(null);
+ TermsEnum luceneTermEnum = luceneTerms.iterator(null);
+
+ while (esTermEnum.next() != null) {
+ assertNotNull(luceneTermEnum.next());
+
+ assertThat(esTermEnum.totalTermFreq(), equalTo(luceneTermEnum.totalTermFreq()));
+ DocsAndPositionsEnum esDocsPosEnum = esTermEnum.docsAndPositions(null, null, 0);
+ DocsAndPositionsEnum luceneDocsPosEnum = luceneTermEnum.docsAndPositions(null, null, 0);
+ if (luceneDocsPosEnum == null) {
+ // test we expect that...
+ assertFalse(field.storedOffset);
+ assertFalse(field.storedPayloads);
+ assertFalse(field.storedPositions);
+ continue;
+ }
+
+ String currentTerm = esTermEnum.term().utf8ToString();
+
+ assertThat("Token mismatch for field: " + field.name, currentTerm, equalTo(luceneTermEnum.term().utf8ToString()));
+
+ esDocsPosEnum.nextDoc();
+ luceneDocsPosEnum.nextDoc();
+
+ int freq = esDocsPosEnum.freq();
+ assertThat(freq, equalTo(luceneDocsPosEnum.freq()));
+ for (int i = 0; i < freq; i++) {
+ String failDesc = " (field:" + field.name + " term:" + currentTerm + ")";
+ int lucenePos = luceneDocsPosEnum.nextPosition();
+ int esPos = esDocsPosEnum.nextPosition();
+ if (field.storedPositions && testConfig.requestPositions) {
+ assertThat("Position test failed" + failDesc, lucenePos, equalTo(esPos));
+ } else {
+ assertThat("Missing position test failed" + failDesc, esPos, equalTo(-1));
+ }
+ if (field.storedOffset && testConfig.requestOffsets) {
+ assertThat("Offset test failed" + failDesc, luceneDocsPosEnum.startOffset(), equalTo(esDocsPosEnum.startOffset()));
+ assertThat("Offset test failed" + failDesc, luceneDocsPosEnum.endOffset(), equalTo(esDocsPosEnum.endOffset()));
+ } else {
+ assertThat("Missing offset test failed" + failDesc, esDocsPosEnum.startOffset(), equalTo(-1));
+ assertThat("Missing offset test failed" + failDesc, esDocsPosEnum.endOffset(), equalTo(-1));
+ }
+ if (field.storedPayloads && testConfig.requestPayloads) {
+ assertThat("Payload test failed" + failDesc, luceneDocsPosEnum.getPayload(), equalTo(esDocsPosEnum.getPayload()));
+ } else {
+ assertThat("Missing payload test failed" + failDesc, esDocsPosEnum.getPayload(), equalTo(null));
+ }
+
+ }
+ }
+
+ assertNull("Es returned terms are done but lucene isn't", luceneTermEnum.next());
+
+ }
+
+ }
+
+ protected TermVectorRequestBuilder getRequestForConfig(TestConfig config) {
+ return client().prepareTermVector(config.doc.index, config.doc.type, config.doc.id).setPayloads(config.requestPayloads)
+ .setOffsets(config.requestOffsets).setPositions(config.requestPositions).setFieldStatistics(true).setTermStatistics(true)
+ .setSelectedFields(config.selectedFields);
+
+ }
+
+ protected Fields getTermVectorsFromLucene(DirectoryReader directoryReader, TestDoc doc) throws IOException {
+ IndexSearcher searcher = new IndexSearcher(directoryReader);
+ TopDocs search = searcher.search(new TermQuery(new Term("id", doc.id)), 1);
+
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ assertEquals(1, scoreDocs.length);
+ return directoryReader.getTermVectors(scoreDocs[0].doc);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/GetTermVectorCheckDocFreqTests.java b/src/test/java/org/elasticsearch/action/termvector/GetTermVectorCheckDocFreqTests.java
new file mode 100644
index 0000000..3908e85
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/GetTermVectorCheckDocFreqTests.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.io.BytesStream;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class GetTermVectorCheckDocFreqTests extends ElasticsearchIntegrationTest {
+
+
+
+ @Test
+ public void testSimpleTermVectors() throws ElasticsearchException, IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .field("analyzer", "tv_test")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .put("index.number_of_replicas", 0)
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ ensureGreen();
+ int numDocs = 15;
+ for (int i = 0; i < numDocs; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
+ // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
+ // 31the34 35lazy39 40dog43
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+ String[] values = { "brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the" };
+ int[] freq = { 1, 1, 1, 1, 1, 1, 1, 2 };
+ int[][] pos = { { 2 }, { 8 }, { 3 }, { 4 }, { 7 }, { 5 }, { 1 }, { 0, 6 } };
+ int[][] startOffset = { { 10 }, { 40 }, { 16 }, { 20 }, { 35 }, { 26 }, { 4 }, { 0, 31 } };
+ int[][] endOffset = { { 15 }, { 43 }, { 19 }, { 25 }, { 39 }, { 30 }, { 9 }, { 3, 34 } };
+ for (int i = 0; i < numDocs; i++) {
+ checkAllInfo(numDocs, values, freq, pos, startOffset, endOffset, i);
+ checkWithoutTermStatistics(numDocs, values, freq, pos, startOffset, endOffset, i);
+ checkWithoutFieldStatistics(numDocs, values, freq, pos, startOffset, endOffset, i);
+ }
+ }
+
+ private void checkWithoutFieldStatistics(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset,
+ int i) throws IOException {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setTermStatistics(true).setFieldStatistics(false).setSelectedFields();
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) -1));
+ assertThat(terms.getDocCount(), Matchers.equalTo(-1));
+ assertThat(terms.getSumDocFreq(), equalTo((long) -1));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+ if (string.equals("the")) {
+ assertThat("expected ttf of " + string, numDocs * 2, equalTo((int) iterator.totalTermFreq()));
+ } else {
+ assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq()));
+ }
+
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ assertThat(iterator.docFreq(), equalTo(numDocs));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+
+ XContentBuilder xBuilder = new XContentFactory().jsonBuilder();
+
+ response.toXContent(xBuilder, null);
+ BytesStream bytesStream = xBuilder.bytesStream();
+ String utf8 = bytesStream.bytes().toUtf8();
+ String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
+ + i
+ + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"terms\":{\"brown\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"doc_freq\":15,\"ttf\":30,\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
+ assertThat(utf8, equalTo(expectedString));
+
+ }
+
+ private void checkWithoutTermStatistics(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset,
+ int i) throws IOException {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setTermStatistics(false).setFieldStatistics(true).setSelectedFields();
+ assertThat(resp.request().termStatistics(), equalTo(false));
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
+ assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
+ assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+
+ assertThat("expected ttf of " + string, -1, equalTo((int) iterator.totalTermFreq()));
+
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ assertThat(iterator.docFreq(), equalTo(-1));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+
+ XContentBuilder xBuilder = new XContentFactory().jsonBuilder();
+
+ response.toXContent(xBuilder, null);
+ BytesStream bytesStream = xBuilder.bytesStream();
+ String utf8 = bytesStream.bytes().toUtf8();
+ String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
+ + i
+ + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"field_statistics\":{\"sum_doc_freq\":120,\"doc_count\":15,\"sum_ttf\":135},\"terms\":{\"brown\":{\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
+ assertThat(utf8, equalTo(expectedString));
+
+ }
+
+ private void checkAllInfo(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset, int i)
+ throws IOException {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setFieldStatistics(true).setTermStatistics(true).setSelectedFields();
+ assertThat(resp.request().fieldStatistics(), equalTo(true));
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
+ assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
+ assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+ if (string.equals("the")) {
+ assertThat("expected ttf of " + string, numDocs * 2, equalTo((int) iterator.totalTermFreq()));
+ } else {
+ assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq()));
+ }
+
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ assertThat(iterator.docFreq(), equalTo(numDocs));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+
+ XContentBuilder xBuilder = new XContentFactory().jsonBuilder();
+
+ response.toXContent(xBuilder, null);
+ BytesStream bytesStream = xBuilder.bytesStream();
+ String utf8 = bytesStream.bytes().toUtf8();
+ String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
+ + i
+ + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"field_statistics\":{\"sum_doc_freq\":120,\"doc_count\":15,\"sum_ttf\":135},\"terms\":{\"brown\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"doc_freq\":15,\"ttf\":30,\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
+ assertThat(utf8, equalTo(expectedString));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/GetTermVectorTests.java b/src/test/java/org/elasticsearch/action/termvector/GetTermVectorTests.java
new file mode 100644
index 0000000..f3dbb41
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/GetTermVectorTests.java
@@ -0,0 +1,544 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.apache.lucene.analysis.payloads.PayloadHelper;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+
+public class GetTermVectorTests extends AbstractTermVectorTests {
+
+
+
+ @Test
+ public void testNoSuchDoc() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping));
+
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "666").setSource("field", "foo bar").execute().actionGet();
+ refresh();
+ for (int i = 0; i < 20; i++) {
+ ActionFuture<TermVectorResponse> termVector = client().termVector(new TermVectorRequest("test", "type1", "" + i));
+ TermVectorResponse actionGet = termVector.actionGet();
+ assertThat(actionGet, Matchers.notNullValue());
+ assertThat(actionGet.isExists(), Matchers.equalTo(false));
+
+ }
+
+ }
+
+ @Test
+ public void testExistingFieldWithNoTermVectorsNoNPE() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("existingfield")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping));
+
+ ensureYellow();
+ // when indexing a field that simply has a question mark, the term
+ // vectors will be null
+ client().prepareIndex("test", "type1", "0").setSource("existingfield", "?").execute().actionGet();
+ refresh();
+ String[] selectedFields = { "existingfield" };
+ ActionFuture<TermVectorResponse> termVector = client().termVector(
+ new TermVectorRequest("test", "type1", "0").selectedFields(selectedFields));
+ // lets see if the null term vectors are caught...
+ termVector.actionGet();
+ TermVectorResponse actionGet = termVector.actionGet();
+ assertThat(actionGet.isExists(), Matchers.equalTo(true));
+
+ }
+
+ @Test
+ public void testExistingFieldButNotInDocNPE() throws Exception {
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("existingfield")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureYellow();
+ // when indexing a field that simply has a question mark, the term
+ // vectors will be null
+ client().prepareIndex("test", "type1", "0").setSource("anotherexistingfield", 1).execute().actionGet();
+ refresh();
+ String[] selectedFields = { "existingfield" };
+ ActionFuture<TermVectorResponse> termVector = client().termVector(
+ new TermVectorRequest("test", "type1", "0").selectedFields(selectedFields));
+ // lets see if the null term vectors are caught...
+ TermVectorResponse actionGet = termVector.actionGet();
+ assertThat(actionGet.isExists(), Matchers.equalTo(true));
+
+ }
+
+
+
+ @Test
+ public void testSimpleTermVectors() throws ElasticsearchException, IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .field("analyzer", "tv_test")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping)
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ ensureYellow();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
+ // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
+ // 31the34 35lazy39 40dog43
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+ String[] values = {"brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the"};
+ int[] freq = {1, 1, 1, 1, 1, 1, 1, 2};
+ int[][] pos = {{2}, {8}, {3}, {4}, {7}, {5}, {1}, {0, 6}};
+ int[][] startOffset = {{10}, {40}, {16}, {20}, {35}, {26}, {4}, {0, 31}};
+ int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}};
+ for (int i = 0; i < 10; i++) {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i)).setPayloads(true)
+ .setOffsets(true).setPositions(true).setSelectedFields();
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+ // do not test ttf or doc frequency, because here we have many
+ // shards and do not know how documents are distributed
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+ }
+ }
+
+ @Test
+ public void testRandomSingleTermVectors() throws ElasticsearchException, IOException {
+ FieldType ft = new FieldType();
+ int config = randomInt(6);
+ boolean storePositions = false;
+ boolean storeOffsets = false;
+ boolean storePayloads = false;
+ boolean storeTermVectors = false;
+ switch (config) {
+ case 0: {
+ // do nothing
+ }
+ case 1: {
+ storeTermVectors = true;
+ }
+ case 2: {
+ storeTermVectors = true;
+ storePositions = true;
+ }
+ case 3: {
+ storeTermVectors = true;
+ storeOffsets = true;
+ }
+ case 4: {
+ storeTermVectors = true;
+ storePositions = true;
+ storeOffsets = true;
+ }
+ case 5: {
+ storeTermVectors = true;
+ storePositions = true;
+ storePayloads = true;
+ }
+ case 6: {
+ storeTermVectors = true;
+ storePositions = true;
+ storeOffsets = true;
+ storePayloads = true;
+ }
+ }
+ ft.setStoreTermVectors(storeTermVectors);
+ ft.setStoreTermVectorOffsets(storeOffsets);
+ ft.setStoreTermVectorPayloads(storePayloads);
+ ft.setStoreTermVectorPositions(storePositions);
+
+ String optionString = AbstractFieldMapper.termVectorOptionsToString(ft);
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", optionString)
+ .field("analyzer", "tv_test")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping)
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ ensureYellow();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
+ // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
+ // 31the34 35lazy39 40dog43
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+ String[] values = {"brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the"};
+ int[] freq = {1, 1, 1, 1, 1, 1, 1, 2};
+ int[][] pos = {{2}, {8}, {3}, {4}, {7}, {5}, {1}, {0, 6}};
+ int[][] startOffset = {{10}, {40}, {16}, {20}, {35}, {26}, {4}, {0, 31}};
+ int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}};
+
+ boolean isPayloadRequested = randomBoolean();
+ boolean isOffsetRequested = randomBoolean();
+ boolean isPositionsRequested = randomBoolean();
+ String infoString = createInfoString(isPositionsRequested, isOffsetRequested, isPayloadRequested, optionString);
+ for (int i = 0; i < 10; i++) {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i))
+ .setPayloads(isPayloadRequested).setOffsets(isOffsetRequested).setPositions(isPositionsRequested).setSelectedFields();
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat(infoString + "doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(ft.storeTermVectors() ? 1 : 0));
+ if (ft.storeTermVectors()) {
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(infoString, next, Matchers.notNullValue());
+ assertThat(infoString + "expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(infoString, next, Matchers.notNullValue());
+ // do not test ttf or doc frequency, because here we have
+ // many shards and do not know how documents are distributed
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ // docs and pos only returns something if positions or
+ // payloads or offsets are stored / requestd Otherwise use
+ // DocsEnum?
+ assertThat(infoString, docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(infoString, freq[j], equalTo(docsAndPositions.freq()));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ if (isPositionsRequested && storePositions) {
+ assertThat(infoString, termPos.length, equalTo(freq[j]));
+ }
+ if (isOffsetRequested && storeOffsets) {
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ }
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ // only return something useful if requested and stored
+ if (isPositionsRequested && storePositions) {
+ assertThat(infoString + "positions for term: " + string, nextPosition, equalTo(termPos[k]));
+ } else {
+ assertThat(infoString + "positions for term: ", nextPosition, equalTo(-1));
+ }
+
+ // only return something useful if requested and stored
+ if (isPayloadRequested && storePayloads) {
+ assertThat(infoString + "payloads for term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef(
+ "word")));
+ } else {
+ assertThat(infoString + "payloads for term: " + string, docsAndPositions.getPayload(), equalTo(null));
+ }
+ // only return something useful if requested and stored
+ if (isOffsetRequested && storeOffsets) {
+
+ assertThat(infoString + "startOffsets term: " + string, docsAndPositions.startOffset(),
+ equalTo(termStartOffset[k]));
+ assertThat(infoString + "endOffsets term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ } else {
+ assertThat(infoString + "startOffsets term: " + string, docsAndPositions.startOffset(), equalTo(-1));
+ assertThat(infoString + "endOffsets term: " + string, docsAndPositions.endOffset(), equalTo(-1));
+ }
+
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+ }
+
+ }
+ }
+
+ private String createInfoString(boolean isPositionsRequested, boolean isOffsetRequested, boolean isPayloadRequested,
+ String optionString) {
+ String ret = "Store config: " + optionString + "\n" + "Requested: pos-"
+ + (isPositionsRequested ? "yes" : "no") + ", offsets-" + (isOffsetRequested ? "yes" : "no") + ", payload- "
+ + (isPayloadRequested ? "yes" : "no") + "\n";
+ return ret;
+ }
+
+ @Test
+ public void testDuelESLucene() throws Exception {
+ TestFieldSetting[] testFieldSettings = getFieldSettings();
+ createIndexBasedOnFieldSettings(testFieldSettings, -1);
+ TestDoc[] testDocs = generateTestDocs(5, testFieldSettings);
+
+ DirectoryReader directoryReader = indexDocsWithLucene(testDocs);
+ TestConfig[] testConfigs = generateTestConfigs(20, testDocs, testFieldSettings);
+
+ for (TestConfig test : testConfigs) {
+ try {
+ TermVectorRequestBuilder request = getRequestForConfig(test);
+ if (test.expectedException != null) {
+ assertThrows(request, test.expectedException);
+ continue;
+ }
+
+ TermVectorResponse response = request.get();
+ Fields luceneTermVectors = getTermVectorsFromLucene(directoryReader, test.doc);
+ validateResponse(response, luceneTermVectors, test);
+ } catch (Throwable t) {
+ throw new Exception("Test exception while running " + test.toString(), t);
+ }
+ }
+ }
+
+ @Test
+ public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws ElasticsearchException, IOException {
+
+ //create the test document
+ int encoding = randomIntBetween(0, 2);
+ String encodingString = "";
+ if (encoding == 0) {
+ encodingString = "float";
+ }
+ if (encoding == 1) {
+ encodingString = "int";
+ }
+ if (encoding == 2) {
+ encodingString = "identity";
+ }
+ String[] tokens = crateRandomTokens();
+ Map<String, List<BytesRef>> payloads = createPayloads(tokens, encoding);
+ String delimiter = createRandomDelimiter(tokens);
+ String queryString = createString(tokens, payloads, encoding, delimiter.charAt(0));
+ //create the mapping
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field").field("type", "string").field("term_vector", "with_positions_offsets_payloads")
+ .field("analyzer", "payload_test").endObject().endObject().endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ ImmutableSettings.settingsBuilder().put("index.analysis.analyzer.payload_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_test.filter", "my_delimited_payload_filter")
+ .put("index.analysis.filter.my_delimited_payload_filter.delimiter", delimiter)
+ .put("index.analysis.filter.my_delimited_payload_filter.encoding", encodingString)
+ .put("index.analysis.filter.my_delimited_payload_filter.type", "delimited_payload_filter")));
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", Integer.toString(1))
+ .setSource(XContentFactory.jsonBuilder().startObject().field("field", queryString).endObject()).execute().actionGet();
+ refresh();
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(1)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setSelectedFields();
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id 1 doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ TermsEnum iterator = terms.iterator(null);
+ while (iterator.next() != null) {
+ String term = iterator.term().utf8ToString();
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ List<BytesRef> curPayloads = payloads.get(term);
+ assertThat(term, curPayloads, Matchers.notNullValue());
+ assertNotNull(docsAndPositions);
+ for (int k = 0; k < docsAndPositions.freq(); k++) {
+ docsAndPositions.nextPosition();
+ if (docsAndPositions.getPayload()!=null){
+ String infoString = "\nterm: " + term + " has payload \n"+ docsAndPositions.getPayload().toString() + "\n but should have payload \n"+curPayloads.get(k).toString();
+ assertThat(infoString, docsAndPositions.getPayload(), equalTo(curPayloads.get(k)));
+ } else {
+ String infoString = "\nterm: " + term + " has no payload but should have payload \n"+curPayloads.get(k).toString();
+ assertThat(infoString, curPayloads.get(k).length, equalTo(0));
+ }
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+ }
+ private String createRandomDelimiter(String[] tokens) {
+ String delimiter = "";
+ boolean isTokenOrWhitespace = true;
+ while(isTokenOrWhitespace) {
+ isTokenOrWhitespace = false;
+ delimiter = randomUnicodeOfLength(1);
+ for(String token:tokens) {
+ if(token.contains(delimiter)) {
+ isTokenOrWhitespace = true;
+ }
+ }
+ if(Character.isWhitespace(delimiter.charAt(0))) {
+ isTokenOrWhitespace = true;
+ }
+ }
+ return delimiter;
+ }
+ private String createString(String[] tokens, Map<String, List<BytesRef>> payloads, int encoding, char delimiter) {
+ String resultString = "";
+ ObjectIntOpenHashMap<String> payloadCounter = new ObjectIntOpenHashMap<String>();
+ for (String token : tokens) {
+ if (!payloadCounter.containsKey(token)) {
+ payloadCounter.putIfAbsent(token, 0);
+ } else {
+ payloadCounter.put(token, payloadCounter.get(token) + 1);
+ }
+ resultString = resultString + token;
+ BytesRef payload = payloads.get(token).get(payloadCounter.get(token));
+ if (payload.length > 0) {
+ resultString = resultString + delimiter;
+ switch (encoding) {
+ case 0: {
+ resultString = resultString + Float.toString(PayloadHelper.decodeFloat(payload.bytes, payload.offset));
+ break;
+ }
+ case 1: {
+ resultString = resultString + Integer.toString(PayloadHelper.decodeInt(payload.bytes, payload.offset));
+ break;
+ }
+ case 2: {
+ resultString = resultString + payload.utf8ToString();
+ break;
+ }
+ default: {
+ throw new ElasticsearchException("unsupported encoding type");
+ }
+ }
+ }
+ resultString = resultString + " ";
+ }
+ return resultString;
+ }
+
+ private Map<String, List<BytesRef>> createPayloads(String[] tokens, int encoding) {
+ Map<String, List<BytesRef>> payloads = new HashMap<String, List<BytesRef>>();
+ for (String token : tokens) {
+ if (payloads.get(token) == null) {
+ payloads.put(token, new ArrayList<BytesRef>());
+ }
+ boolean createPayload = randomBoolean();
+ if (createPayload) {
+ switch (encoding) {
+ case 0: {
+ float theFloat = randomFloat();
+ payloads.get(token).add(new BytesRef(PayloadHelper.encodeFloat(theFloat)));
+ break;
+ }
+ case 1: {
+ payloads.get(token).add(new BytesRef(PayloadHelper.encodeInt(randomInt())));
+ break;
+ }
+ case 2: {
+ String payload = randomUnicodeOfLengthBetween(50, 100);
+ for (int c = 0; c < payload.length(); c++) {
+ if (Character.isWhitespace(payload.charAt(c))) {
+ payload = payload.replace(payload.charAt(c), 'w');
+ }
+ }
+ payloads.get(token).add(new BytesRef(payload));
+ break;
+ }
+ default: {
+ throw new ElasticsearchException("unsupported encoding type");
+ }
+ }
+ } else {
+ payloads.get(token).add(new BytesRef());
+ }
+ }
+ return payloads;
+ }
+
+ private String[] crateRandomTokens() {
+ String[] tokens = { "the", "quick", "brown", "fox" };
+ int numTokensWithDuplicates = randomIntBetween(3, 15);
+ String[] finalTokens = new String[numTokensWithDuplicates];
+ for (int i = 0; i < numTokensWithDuplicates; i++) {
+ finalTokens[i] = tokens[randomIntBetween(0, tokens.length - 1)];
+ }
+ return finalTokens;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/MultiTermVectorsTests.java b/src/test/java/org/elasticsearch/action/termvector/MultiTermVectorsTests.java
new file mode 100644
index 0000000..8ce9a95
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/MultiTermVectorsTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Fields;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class MultiTermVectorsTests extends AbstractTermVectorTests {
+
+ @Test
+ public void testDuelESLucene() throws Exception {
+ AbstractTermVectorTests.TestFieldSetting[] testFieldSettings = getFieldSettings();
+ createIndexBasedOnFieldSettings(testFieldSettings, -1);
+ AbstractTermVectorTests.TestDoc[] testDocs = generateTestDocs(5, testFieldSettings);
+
+ DirectoryReader directoryReader = indexDocsWithLucene(testDocs);
+ AbstractTermVectorTests.TestConfig[] testConfigs = generateTestConfigs(20, testDocs, testFieldSettings);
+
+ MultiTermVectorsRequestBuilder requestBuilder = client().prepareMultiTermVectors();
+ for (AbstractTermVectorTests.TestConfig test : testConfigs) {
+ requestBuilder.add(getRequestForConfig(test).request());
+ }
+
+ MultiTermVectorsItemResponse[] responseItems = requestBuilder.get().getResponses();
+
+ for (int i = 0; i < testConfigs.length; i++) {
+ TestConfig test = testConfigs[i];
+ try {
+ MultiTermVectorsItemResponse item = responseItems[i];
+ if (test.expectedException != null) {
+ assertTrue(item.isFailed());
+ continue;
+ } else if (item.isFailed()) {
+ fail(item.getFailure().getMessage());
+ }
+ Fields luceneTermVectors = getTermVectorsFromLucene(directoryReader, test.doc);
+ validateResponse(item.getResponse(), luceneTermVectors, test);
+ } catch (Throwable t) {
+ throw new Exception("Test exception while running " + test.toString(), t);
+ }
+ }
+
+ }
+
+ public void testMissingIndexThrowsMissingIndex() throws Exception {
+ TermVectorRequestBuilder requestBuilder = client().prepareTermVector("testX", "typeX", Integer.toString(1));
+ MultiTermVectorsRequestBuilder mtvBuilder = new MultiTermVectorsRequestBuilder(client());
+ mtvBuilder.add(requestBuilder.request());
+ MultiTermVectorsResponse response = mtvBuilder.execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getFailure().getMessage(), equalTo("[" + response.getResponses()[0].getIndex() + "] missing"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/TermVectorUnitTests.java b/src/test/java/org/elasticsearch/action/termvector/TermVectorUnitTests.java
new file mode 100644
index 0000000..36ee3cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/TermVectorUnitTests.java
@@ -0,0 +1,328 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.action.termvector.TermVectorRequest.Flag;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.TypeParsers;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.rest.action.termvector.RestTermVectorAction;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class TermVectorUnitTests extends ElasticsearchLuceneTestCase {
+
+ @Test
+ public void streamResponse() throws Exception {
+
+ TermVectorResponse outResponse = new TermVectorResponse("a", "b", "c");
+ outResponse.setExists(true);
+ writeStandardTermVector(outResponse);
+
+ // write
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ outResponse.writeTo(out);
+
+ // read
+ ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer);
+ TermVectorResponse inResponse = new TermVectorResponse("a", "b", "c");
+ inResponse.readFrom(esBuffer);
+
+ // see if correct
+ checkIfStandardTermVector(inResponse);
+
+ outResponse = new TermVectorResponse("a", "b", "c");
+ writeEmptyTermVector(outResponse);
+ // write
+ outBuffer = new ByteArrayOutputStream();
+ out = new OutputStreamStreamOutput(outBuffer);
+ outResponse.writeTo(out);
+
+ // read
+ esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ esBuffer = new InputStreamStreamInput(esInBuffer);
+ inResponse = new TermVectorResponse("a", "b", "c");
+ inResponse.readFrom(esBuffer);
+ assertTrue(inResponse.isExists());
+
+ }
+
+ private void writeEmptyTermVector(TermVectorResponse outResponse) throws IOException {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT));
+ conf.setOpenMode(OpenMode.CREATE);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ FieldType type = new FieldType(TextField.TYPE_STORED);
+ type.setStoreTermVectorOffsets(true);
+ type.setStoreTermVectorPayloads(false);
+ type.setStoreTermVectorPositions(true);
+ type.setStoreTermVectors(true);
+ type.freeze();
+ Document d = new Document();
+ d.add(new Field("id", "abc", StringField.TYPE_STORED));
+
+ writer.updateDocument(new Term("id", "abc"), d);
+ writer.commit();
+ writer.close();
+ DirectoryReader dr = DirectoryReader.open(dir);
+ IndexSearcher s = new IndexSearcher(dr);
+ TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ int doc = scoreDocs[0].doc;
+ Fields fields = dr.getTermVectors(doc);
+ EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
+ outResponse.setFields(fields, null, flags, fields);
+ outResponse.setExists(true);
+ dr.close();
+ dir.close();
+
+ }
+
+ private void writeStandardTermVector(TermVectorResponse outResponse) throws IOException {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT));
+
+ conf.setOpenMode(OpenMode.CREATE);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ FieldType type = new FieldType(TextField.TYPE_STORED);
+ type.setStoreTermVectorOffsets(true);
+ type.setStoreTermVectorPayloads(false);
+ type.setStoreTermVectorPositions(true);
+ type.setStoreTermVectors(true);
+ type.freeze();
+ Document d = new Document();
+ d.add(new Field("id", "abc", StringField.TYPE_STORED));
+ d.add(new Field("title", "the1 quick brown fox jumps over the1 lazy dog", type));
+ d.add(new Field("desc", "the1 quick brown fox jumps over the1 lazy dog", type));
+
+ writer.updateDocument(new Term("id", "abc"), d);
+ writer.commit();
+ writer.close();
+ DirectoryReader dr = DirectoryReader.open(dir);
+ IndexSearcher s = new IndexSearcher(dr);
+ TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ int doc = scoreDocs[0].doc;
+ Fields termVectors = dr.getTermVectors(doc);
+ EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
+ outResponse.setFields(termVectors, null, flags, termVectors);
+ dr.close();
+ dir.close();
+
+ }
+
+ private void checkIfStandardTermVector(TermVectorResponse inResponse) throws IOException {
+
+ Fields fields = inResponse.getFields();
+ assertThat(fields.terms("title"), Matchers.notNullValue());
+ assertThat(fields.terms("desc"), Matchers.notNullValue());
+ assertThat(fields.size(), equalTo(2));
+ }
+
+ @Test
+ public void testRestRequestParsing() throws Exception {
+ BytesReference inputBytes = new BytesArray(
+ " {\"fields\" : [\"a\", \"b\",\"c\"], \"offsets\":false, \"positions\":false, \"payloads\":true}");
+
+ TermVectorRequest tvr = new TermVectorRequest(null, null, null);
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(inputBytes);
+ TermVectorRequest.parseRequest(tvr, parser);
+
+ Set<String> fields = tvr.selectedFields();
+ assertThat(fields.contains("a"), equalTo(true));
+ assertThat(fields.contains("b"), equalTo(true));
+ assertThat(fields.contains("c"), equalTo(true));
+ assertThat(tvr.offsets(), equalTo(false));
+ assertThat(tvr.positions(), equalTo(false));
+ assertThat(tvr.payloads(), equalTo(true));
+ String additionalFields = "b,c ,d, e ";
+ RestTermVectorAction.addFieldStringsFromParameter(tvr, additionalFields);
+ assertThat(tvr.selectedFields().size(), equalTo(5));
+ assertThat(fields.contains("d"), equalTo(true));
+ assertThat(fields.contains("e"), equalTo(true));
+
+ additionalFields = "";
+ RestTermVectorAction.addFieldStringsFromParameter(tvr, additionalFields);
+
+ inputBytes = new BytesArray(" {\"offsets\":false, \"positions\":false, \"payloads\":true}");
+ tvr = new TermVectorRequest(null, null, null);
+ parser = XContentFactory.xContent(XContentType.JSON).createParser(inputBytes);
+ TermVectorRequest.parseRequest(tvr, parser);
+ additionalFields = "";
+ RestTermVectorAction.addFieldStringsFromParameter(tvr, additionalFields);
+ assertThat(tvr.selectedFields(), equalTo(null));
+ additionalFields = "b,c ,d, e ";
+ RestTermVectorAction.addFieldStringsFromParameter(tvr, additionalFields);
+ assertThat(tvr.selectedFields().size(), equalTo(4));
+
+ }
+
+ @Test
+ public void testRequestParsingThrowsException() throws Exception {
+ BytesReference inputBytes = new BytesArray(
+ " {\"fields\" : \"a, b,c \", \"offsets\":false, \"positions\":false, \"payloads\":true, \"meaningless_term\":2}");
+ TermVectorRequest tvr = new TermVectorRequest(null, null, null);
+ boolean threwException = false;
+ try {
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(inputBytes);
+ TermVectorRequest.parseRequest(tvr, parser);
+ } catch (Exception e) {
+ threwException = true;
+ }
+ assertThat(threwException, equalTo(true));
+
+ }
+
+ @Test
+ public void streamRequest() throws IOException {
+
+ for (int i = 0; i < 10; i++) {
+ TermVectorRequest request = new TermVectorRequest("index", "type", "id");
+ request.offsets(random().nextBoolean());
+ request.fieldStatistics(random().nextBoolean());
+ request.payloads(random().nextBoolean());
+ request.positions(random().nextBoolean());
+ request.termStatistics(random().nextBoolean());
+ String parent = random().nextBoolean() ? "someParent" : null;
+ request.parent(parent);
+ String pref = random().nextBoolean() ? "somePreference" : null;
+ request.preference(pref);
+
+ // write
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ request.writeTo(out);
+
+ // read
+ ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer);
+ TermVectorRequest req2 = new TermVectorRequest(null, null, null);
+ req2.readFrom(esBuffer);
+
+ assertThat(request.offsets(), equalTo(req2.offsets()));
+ assertThat(request.fieldStatistics(), equalTo(req2.fieldStatistics()));
+ assertThat(request.payloads(), equalTo(req2.payloads()));
+ assertThat(request.positions(), equalTo(req2.positions()));
+ assertThat(request.termStatistics(), equalTo(req2.termStatistics()));
+ assertThat(request.preference(), equalTo(pref));
+ assertThat(request.routing(), equalTo(parent));
+
+ }
+ }
+
+ @Test
+ public void testFieldTypeToTermVectorString() throws Exception {
+ FieldType ft = new FieldType();
+ ft.setStoreTermVectorOffsets(false);
+ ft.setStoreTermVectorPayloads(true);
+ ft.setStoreTermVectors(true);
+ ft.setStoreTermVectorPositions(true);
+ String ftOpts = AbstractFieldMapper.termVectorOptionsToString(ft);
+ assertThat("with_positions_payloads", equalTo(ftOpts));
+ AllFieldMapper.Builder builder = new AllFieldMapper.Builder();
+ boolean exceptiontrown = false;
+ try {
+ TypeParsers.parseTermVector("", ftOpts, builder);
+ } catch (MapperParsingException e) {
+ exceptiontrown = true;
+ }
+ assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", exceptiontrown, equalTo(false));
+ }
+
+ @Test
+ public void testTermVectorStringGenerationWithoutPositions() throws Exception {
+ FieldType ft = new FieldType();
+ ft.setStoreTermVectorOffsets(true);
+ ft.setStoreTermVectorPayloads(true);
+ ft.setStoreTermVectors(true);
+ ft.setStoreTermVectorPositions(false);
+ String ftOpts = AbstractFieldMapper.termVectorOptionsToString(ft);
+ assertThat(ftOpts, equalTo("with_offsets"));
+ }
+
+ @Test
+ public void testMultiParser() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/termvector/multiRequest1.json");
+ BytesReference bytes = new BytesArray(data);
+ MultiTermVectorsRequest request = new MultiTermVectorsRequest();
+ request.add(new TermVectorRequest(), bytes);
+ checkParsedParameters(request);
+
+ data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/termvector/multiRequest2.json");
+ bytes = new BytesArray(data);
+ request = new MultiTermVectorsRequest();
+ request.add(new TermVectorRequest(), bytes);
+ checkParsedParameters(request);
+
+ }
+ void checkParsedParameters(MultiTermVectorsRequest request) {
+ Set<String> ids = new HashSet<String>();
+ ids.add("1");
+ ids.add("2");
+ Set<String> fields = new HashSet<String>();
+ fields.add("a");
+ fields.add("b");
+ fields.add("c");
+ for (TermVectorRequest singleRequest : request.requests) {
+ assertThat(singleRequest.index(), equalTo("testidx"));
+ assertThat(singleRequest.type(), equalTo("test"));
+ assertThat(singleRequest.payloads(), equalTo(false));
+ assertThat(singleRequest.positions(), equalTo(false));
+ assertThat(singleRequest.offsets(), equalTo(false));
+ assertThat(singleRequest.termStatistics(), equalTo(true));
+ assertThat(singleRequest.fieldStatistics(), equalTo(false));
+ assertThat(singleRequest.id(),Matchers.anyOf(Matchers.equalTo("1"), Matchers.equalTo("2")));
+ assertThat(singleRequest.selectedFields(), equalTo(fields));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/multiRequest1.json b/src/test/java/org/elasticsearch/action/termvector/multiRequest1.json
new file mode 100644
index 0000000..fcb5e3a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/multiRequest1.json
@@ -0,0 +1,13 @@
+{
+ "ids": ["1","2"],
+ "parameters": {
+ "field_statistics": false,
+ "term_statistics": true,
+ "payloads":false,
+ "offsets":false,
+ "positions":false,
+ "fields":["a","b","c"],
+ "_index": "testidx",
+ "_type":"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/action/termvector/multiRequest2.json b/src/test/java/org/elasticsearch/action/termvector/multiRequest2.json
new file mode 100644
index 0000000..a0709ef
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/multiRequest2.json
@@ -0,0 +1,26 @@
+{
+ "docs": [
+ {
+ "_id": "1",
+ "field_statistics": false,
+ "term_statistics": true,
+ "payloads": false,
+ "offsets": false,
+ "positions": false,
+ "fields":["a","b","c"],
+ "_index": "testidx",
+ "_type": "test"
+ },
+ {
+ "_id": "2",
+ "field_statistics": false,
+ "term_statistics": true,
+ "payloads": false,
+ "offsets": false,
+ "positions": false,
+ "fields":["a","b","c"],
+ "_index": "testidx",
+ "_type": "test"
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java
new file mode 100644
index 0000000..3b7cd27
--- /dev/null
+++ b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java
@@ -0,0 +1,834 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.aliases;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesMissingException;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.test.hamcrest.CollectionAssertions.hasKey;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class IndexAliasesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testAliases() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ logger.info("--> aliasing index [test] with [alias1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1"));
+
+ logger.info("--> indexing against [alias1], should work now");
+ IndexResponse indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"))).actionGet();
+ assertThat(indexResponse.getIndex(), equalTo("test"));
+
+ logger.info("--> creating index [test_x]");
+ createIndex("test_x");
+
+ ensureGreen();
+
+ logger.info("--> remove [alias1], Aliasing index [test_x] with [alias1]");
+ assertAcked(admin().indices().prepareAliases().removeAlias("test", "alias1").addAlias("test_x", "alias1"));
+
+ logger.info("--> indexing against [alias1], should work against [test_x]");
+ indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"))).actionGet();
+ assertThat(indexResponse.getIndex(), equalTo("test_x"));
+ }
+
+ @Test
+ public void testFailedFilter() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ try {
+ logger.info("--> aliasing index [test] with [alias1] and filter [t]");
+ admin().indices().prepareAliases().addAlias("test", "alias1", "{ t }").get();
+ fail();
+ } catch (Exception e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testFilteringAliases() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ logger.info("--> aliasing index [test] with [alias1] and filter [user:kimchy]");
+ FilterBuilder filter = termFilter("user", "kimchy");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1", filter));
+
+ // For now just making sure that filter was stored with the alias
+ logger.info("--> making sure that filter was stored with alias [alias1] and filter [user:kimchy]");
+ ClusterState clusterState = admin().cluster().prepareState().get().getState();
+ IndexMetaData indexMd = clusterState.metaData().index("test");
+ assertThat(indexMd.aliases().get("alias1").filter().string(), equalTo("{\"term\":{\"user\":\"kimchy\"}}"));
+
+ }
+
+ @Test
+ public void testEmptyFilter() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+ ensureGreen();
+
+ logger.info("--> aliasing index [test] with [alias1] and empty filter");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1", "{}"));
+ }
+
+ @Test
+ public void testSearchingFilteringAliasesSingleIndex() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias2"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "foos", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "bars", termFilter("name", "bar")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "tests", termFilter("name", "test")));
+
+ logger.info("--> indexing against [test]");
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "foo test")).refresh(true)).actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "bar test")).refresh(true)).actionGet();
+ client().index(indexRequest("test").type("type1").id("3").source(source("3", "baz test")).refresh(true)).actionGet();
+ client().index(indexRequest("test").type("type1").id("4").source(source("4", "something else")).refresh(true)).actionGet();
+
+ logger.info("--> checking single filtering alias search");
+ SearchResponse searchResponse = client().prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1");
+
+ logger.info("--> checking single filtering alias wildcard search");
+ searchResponse = client().prepareSearch("fo*").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1");
+
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3");
+
+ logger.info("--> checking single filtering alias search with sort");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).addSort("_uid", SortOrder.ASC).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3");
+
+ logger.info("--> checking single filtering alias search with global facets");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar"))
+ .addFacet(FacetBuilders.termsFacet("test").field("name").global(true))
+ .get();
+ assertThat(((TermsFacet) searchResponse.getFacets().facet("test")).getEntries().size(), equalTo(4));
+
+ logger.info("--> checking single filtering alias search with global facets and sort");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar"))
+ .addFacet(FacetBuilders.termsFacet("test").field("name").global(true))
+ .addSort("_uid", SortOrder.ASC).get();
+ assertThat(((TermsFacet) searchResponse.getFacets().facet("test")).getEntries().size(), equalTo(4));
+
+ logger.info("--> checking single filtering alias search with non-global facets");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar"))
+ .addFacet(FacetBuilders.termsFacet("test").field("name").global(false))
+ .addSort("_uid", SortOrder.ASC).get();
+ assertThat(((TermsFacet) searchResponse.getFacets().facet("test")).getEntries().size(), equalTo(2));
+
+ searchResponse = client().prepareSearch("foos", "bars").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2");
+
+ logger.info("--> checking single non-filtering alias search");
+ searchResponse = client().prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+
+ logger.info("--> checking non-filtering alias and filtering alias search");
+ searchResponse = client().prepareSearch("alias1", "foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+
+ logger.info("--> checking index and filtering alias search");
+ searchResponse = client().prepareSearch("test", "foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+
+ logger.info("--> checking index and alias wildcard search");
+ searchResponse = client().prepareSearch("te*", "fo*").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+ }
+
+ @Test
+ public void testSearchingFilteringAliasesTwoIndices() throws Exception {
+ logger.info("--> creating index [test1]");
+ createIndex("test1");
+
+ logger.info("--> creating index [test2]");
+ createIndex("test2");
+
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTest1"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "foos", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "bars", termFilter("name", "bar")));
+
+ logger.info("--> adding filtering aliases to index [test2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTest2"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "foos", termFilter("name", "foo")));
+
+ logger.info("--> indexing against [test1]");
+ client().index(indexRequest("test1").type("type1").id("1").source(source("1", "foo test"))).get();
+ client().index(indexRequest("test1").type("type1").id("2").source(source("2", "bar test"))).get();
+ client().index(indexRequest("test1").type("type1").id("3").source(source("3", "baz test"))).get();
+ client().index(indexRequest("test1").type("type1").id("4").source(source("4", "something else"))).get();
+
+ logger.info("--> indexing against [test2]");
+ client().index(indexRequest("test2").type("type1").id("5").source(source("5", "foo test"))).get();
+ client().index(indexRequest("test2").type("type1").id("6").source(source("6", "bar test"))).get();
+ client().index(indexRequest("test2").type("type1").id("7").source(source("7", "baz test"))).get();
+ client().index(indexRequest("test2").type("type1").id("8").source(source("8", "something else"))).get();
+
+ refresh();
+
+ logger.info("--> checking filtering alias for two indices");
+ SearchResponse searchResponse = client().prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "5");
+ assertThat(client().prepareCount("foos").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(2L));
+
+ logger.info("--> checking filtering alias for one index");
+ searchResponse = client().prepareSearch("bars").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "2");
+ assertThat(client().prepareCount("bars").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(1L));
+
+ logger.info("--> checking filtering alias for two indices and one complete index");
+ searchResponse = client().prepareSearch("foos", "test1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5");
+ assertThat(client().prepareCount("foos", "test1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(5L));
+
+ logger.info("--> checking filtering alias for two indices and non-filtering alias for one index");
+ searchResponse = client().prepareSearch("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5");
+ assertThat(client().prepareCount("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(5L));
+
+ logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices");
+ searchResponse = client().prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(8L));
+ assertThat(client().prepareCount("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(8L));
+
+ logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices");
+ searchResponse = client().prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")).get();
+ assertHits(searchResponse.getHits(), "4", "8");
+ assertThat(client().prepareCount("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")).get().getCount(), equalTo(2L));
+ }
+
+ @Test
+ public void testSearchingFilteringAliasesMultipleIndices() throws Exception {
+ logger.info("--> creating indices");
+ createIndex("test1", "test2", "test3");
+
+ ensureGreen();
+
+ logger.info("--> adding aliases to indices");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "alias12"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "alias12"));
+
+ logger.info("--> adding filtering aliases to indices");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "filter1", termFilter("name", "test1")));
+
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "filter23", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test3", "filter23", termFilter("name", "foo")));
+
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "filter13", termFilter("name", "baz")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test3", "filter13", termFilter("name", "baz")));
+
+ logger.info("--> indexing against [test1]");
+ client().index(indexRequest("test1").type("type1").id("11").source(source("11", "foo test1"))).get();
+ client().index(indexRequest("test1").type("type1").id("12").source(source("12", "bar test1"))).get();
+ client().index(indexRequest("test1").type("type1").id("13").source(source("13", "baz test1"))).get();
+
+ client().index(indexRequest("test2").type("type1").id("21").source(source("21", "foo test2"))).get();
+ client().index(indexRequest("test2").type("type1").id("22").source(source("22", "bar test2"))).get();
+ client().index(indexRequest("test2").type("type1").id("23").source(source("23", "baz test2"))).get();
+
+ client().index(indexRequest("test3").type("type1").id("31").source(source("31", "foo test3"))).get();
+ client().index(indexRequest("test3").type("type1").id("32").source(source("32", "bar test3"))).get();
+ client().index(indexRequest("test3").type("type1").id("33").source(source("33", "baz test3"))).get();
+
+ refresh();
+
+ logger.info("--> checking filtering alias for multiple indices");
+ SearchResponse searchResponse = client().prepareSearch("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "21", "31", "13", "33");
+ assertThat(client().prepareCount("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(4L));
+
+ searchResponse = client().prepareSearch("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "21", "31", "11", "12", "13");
+ assertThat(client().prepareCount("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(5L));
+
+ searchResponse = client().prepareSearch("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "11", "12", "13", "33");
+ assertThat(client().prepareCount("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(4L));
+
+ searchResponse = client().prepareSearch("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "11", "12", "13", "21", "31", "33");
+ assertThat(client().prepareCount("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(6L));
+
+ searchResponse = client().prepareSearch("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "21", "22", "23", "31", "13", "33");
+ assertThat(client().prepareCount("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(6L));
+
+ searchResponse = client().prepareSearch("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "11", "12", "13", "21", "22", "23", "31", "33");
+ assertThat(client().prepareCount("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(8L));
+ }
+
+ @Test
+ public void testDeletingByQueryFilteringAliases() throws Exception {
+ logger.info("--> creating index [test1] and [test2");
+ createIndex("test1", "test2");
+
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTest1"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "foos", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "bars", termFilter("name", "bar")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "tests", termFilter("name", "test")));
+
+ logger.info("--> adding filtering aliases to index [test2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTest2"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "foos", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "tests", termFilter("name", "test")));
+
+ logger.info("--> indexing against [test1]");
+ client().index(indexRequest("test1").type("type1").id("1").source(source("1", "foo test"))).get();
+ client().index(indexRequest("test1").type("type1").id("2").source(source("2", "bar test"))).get();
+ client().index(indexRequest("test1").type("type1").id("3").source(source("3", "baz test"))).get();
+ client().index(indexRequest("test1").type("type1").id("4").source(source("4", "something else"))).get();
+
+ logger.info("--> indexing against [test2]");
+ client().index(indexRequest("test2").type("type1").id("5").source(source("5", "foo test"))).get();
+ client().index(indexRequest("test2").type("type1").id("6").source(source("6", "bar test"))).get();
+ client().index(indexRequest("test2").type("type1").id("7").source(source("7", "baz test"))).get();
+ client().index(indexRequest("test2").type("type1").id("8").source(source("8", "something else"))).get();
+
+ refresh();
+
+ logger.info("--> checking counts before delete");
+ assertThat(client().prepareCount("bars").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(1L));
+
+ logger.info("--> delete by query from a single alias");
+ client().prepareDeleteByQuery("bars").setQuery(QueryBuilders.termQuery("name", "test")).get();
+
+ logger.info("--> verify that only one record was deleted");
+ assertThat(client().prepareCount("test1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(3L));
+
+ logger.info("--> delete by query from an aliases pointing to two indices");
+ client().prepareDeleteByQuery("foos").setQuery(QueryBuilders.matchAllQuery()).get();
+
+ logger.info("--> verify that proper records were deleted");
+ SearchResponse searchResponse = client().prepareSearch("aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "3", "4", "6", "7", "8");
+
+ logger.info("--> delete by query from an aliases and an index");
+ client().prepareDeleteByQuery("tests", "test2").setQuery(QueryBuilders.matchAllQuery()).get();
+
+ logger.info("--> verify that proper records were deleted");
+ searchResponse = client().prepareSearch("aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "4");
+ }
+
+
+
+ @Test
+ public void testDeleteAliases() throws Exception {
+ logger.info("--> creating index [test1] and [test2]");
+ createIndex("test1", "test2");
+
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTest1")
+ .addAlias("test1", "aliasToTests")
+ .addAlias("test1", "foos", termFilter("name", "foo"))
+ .addAlias("test1", "bars", termFilter("name", "bar"))
+ .addAlias("test1", "tests", termFilter("name", "test")));
+
+ logger.info("--> adding filtering aliases to index [test2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTest2")
+ .addAlias("test2", "aliasToTests")
+ .addAlias("test2", "foos", termFilter("name", "foo"))
+ .addAlias("test2", "tests", termFilter("name", "test")));
+
+ String[] indices = {"test1", "test2"};
+ String[] aliases = {"aliasToTest1", "foos", "bars", "tests", "aliasToTest2", "aliasToTests"};
+
+ admin().indices().prepareAliases().removeAlias(indices, aliases).get();
+
+ AliasesExistResponse response = admin().indices().prepareAliasesExist(aliases).get();
+ assertThat(response.exists(), equalTo(false));
+ }
+
+
+ @Test
+ public void testWaitForAliasCreationMultipleShards() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias" + i));
+ client().index(indexRequest("alias" + i).type("type1").id("1").source(source("1", "test"))).get();
+ }
+ }
+
+ @Test
+ public void testWaitForAliasCreationSingleShard() throws Exception {
+ logger.info("--> creating index [test]");
+ assertAcked(admin().indices().create(createIndexRequest("test").settings(settingsBuilder().put("index.numberOfReplicas", 0).put("index.numberOfShards", 1))).get());
+
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias" + i));
+ client().index(indexRequest("alias" + i).type("type1").id("1").source(source("1", "test"))).get();
+ }
+ }
+
+ @Test
+ public void testWaitForAliasSimultaneousUpdate() throws Exception {
+ final int aliasCount = 10;
+
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ ExecutorService executor = Executors.newFixedThreadPool(aliasCount);
+ for (int i = 0; i < aliasCount; i++) {
+ final String aliasName = "alias" + i;
+ executor.submit(new Runnable() {
+ @Override
+ public void run() {
+ assertAcked(admin().indices().prepareAliases().addAlias("test", aliasName));
+ client().index(indexRequest(aliasName).type("type1").id("1").source(source("1", "test"))).actionGet();
+ }
+ });
+ }
+ executor.shutdown();
+ boolean done = executor.awaitTermination(10, TimeUnit.SECONDS);
+ assertThat(done, equalTo(true));
+ if (!done) {
+ executor.shutdownNow();
+ }
+ }
+
+
+ @Test
+ public void testSameAlias() throws Exception {
+
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ logger.info("--> creating alias1 ");
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1")));
+ TimeValue timeout = TimeValue.timeValueSeconds(2);
+ logger.info("--> recreating alias1 ");
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1").setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> modifying alias1 to have a filter");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1", termFilter("name", "foo")).setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> recreating alias1 with the same filter");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1", termFilter("name", "foo")).setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> recreating alias1 with a different filter");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1", termFilter("name", "bar")).setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> verify that filter was updated");
+ AliasMetaData aliasMetaData = cluster().clusterService().state().metaData().aliases().get("alias1").get("test");
+ assertThat(aliasMetaData.getFilter().toString(), equalTo("{\"term\":{\"name\":\"bar\"}}"));
+
+ logger.info("--> deleting alias1");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().removeAlias("test", "alias1").setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+
+ }
+
+ @Test(expected = AliasesMissingException.class)
+ public void testIndicesRemoveNonExistingAliasResponds404() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+ ensureGreen();
+ logger.info("--> deleting alias1 which does not exist");
+ assertAcked((admin().indices().prepareAliases().removeAlias("test", "alias1")));
+ }
+
+ @Test
+ public void testIndicesGetAliases() throws Exception {
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .build();
+ logger.info("--> creating indices [foobar, test, test123, foobarbaz, bazbar]");
+ assertAcked(prepareCreate("foobar").setSettings(indexSettings));
+ assertAcked(prepareCreate("test").setSettings(indexSettings));
+ assertAcked(prepareCreate("test123").setSettings(indexSettings));
+ assertAcked(prepareCreate("foobarbaz").setSettings(indexSettings));
+ assertAcked(prepareCreate("bazbar").setSettings(indexSettings));
+
+ ensureGreen();
+
+ logger.info("--> creating aliases [alias1, alias2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("foobar", "alias1").addAlias("foobar", "alias2"));
+
+ logger.info("--> getting alias1");
+ GetAliasesResponse getResponse = admin().indices().prepareGetAliases("alias1").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ AliasesExistResponse existsResponse = admin().indices().prepareAliasesExist("alias1").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting all aliases that start with alias*");
+ getResponse = admin().indices().prepareGetAliases("alias*").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias2"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1).alias(), equalTo("alias1"));
+ assertThat(getResponse.getAliases().get("foobar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("alias*").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+
+ logger.info("--> creating aliases [bar, baz, foo]");
+ assertAcked(admin().indices().prepareAliases()
+ .addAlias("bazbar", "bar")
+ .addAlias("bazbar", "bac", termFilter("field", "value"))
+ .addAlias("foobar", "foo"));
+
+ assertAcked(admin().indices().prepareAliases()
+ .addAliasAction(new AliasAction(AliasAction.Type.ADD, "foobar", "bac").routing("bla")));
+
+ logger.info("--> getting bar and baz for index bazbar");
+ getResponse = admin().indices().prepareGetAliases("bar", "bac").addIndices("bazbar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("term"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("field"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("value"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).alias(), equalTo("bar"));
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("bar", "bac")
+ .addIndices("bazbar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting *b* for index baz*");
+ getResponse = admin().indices().prepareGetAliases("*b*").addIndices("baz*").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("term"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("field"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("value"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).alias(), equalTo("bar"));
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("*b*")
+ .addIndices("baz*").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting *b* for index *bar");
+ getResponse = admin().indices().prepareGetAliases("b*").addIndices("*bar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("term"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("field"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("value"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).alias(), equalTo("bar"));
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), equalTo("bla"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), equalTo("bla"));
+ existsResponse = admin().indices().prepareAliasesExist("b*")
+ .addIndices("*bar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting f* for index *bar");
+ getResponse = admin().indices().prepareGetAliases("f*").addIndices("*bar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("f*")
+ .addIndices("*bar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ // alias at work
+ logger.info("--> getting f* for index *bac");
+ getResponse = admin().indices().prepareGetAliases("foo").addIndices("*bac").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("foo")
+ .addIndices("*bac").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting foo for index foobar");
+ getResponse = admin().indices().prepareGetAliases("foo").addIndices("foobar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("foo")
+ .addIndices("foobar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ // alias at work again
+ logger.info("--> getting * for index *bac");
+ getResponse = admin().indices().prepareGetAliases("*").addIndices("*bac").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(4));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ existsResponse = admin().indices().prepareAliasesExist("*")
+ .addIndices("*bac").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ assertAcked(admin().indices().prepareAliases()
+ .removeAlias("foobar", "foo"));
+
+ getResponse = admin().indices().prepareGetAliases("foo").addIndices("foobar").get();
+ assertThat(getResponse.getAliases().isEmpty(), equalTo(true));
+ existsResponse = admin().indices().prepareAliasesExist("foo").addIndices("foobar").get();
+ assertThat(existsResponse.exists(), equalTo(false));
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testAddAliasNullIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testAddAliasEmptyIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testAddAliasNullAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", null)).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testAddAliasEmptyAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", "")).get();
+ }
+
+ @Test
+ public void testAddAliasNullAliasNullIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, null)).get();
+ assertTrue("Should throw " + ActionRequestValidationException.class.getSimpleName(), false);
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(1));
+ }
+ }
+
+ @Test
+ public void testAddAliasEmptyAliasEmptyIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "")).get();
+ assertTrue("Should throw " + ActionRequestValidationException.class.getSimpleName(), false);
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasNullIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction(null, "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasEmptyIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("", "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasNullAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", null)).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasEmptyAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", "")).get();
+ }
+
+ @Test
+ public void testRemoveAliasNullAliasNullIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction(null, null)).get();
+ fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testRemoveAliasEmptyAliasEmptyIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "")).get();
+ fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testGetAllAliasesWorks() {
+ createIndex("index1");
+ createIndex("index2");
+
+ ensureYellow();
+
+ assertAcked(admin().indices().prepareAliases().addAlias("index1", "alias1").addAlias("index2", "alias2"));
+
+ GetAliasesResponse response = admin().indices().prepareGetAliases().get();
+ assertThat(response.getAliases(), hasKey("index1"));
+ assertThat(response.getAliases(), hasKey("index1"));
+ }
+
+ private void assertHits(SearchHits hits, String... ids) {
+ assertThat(hits.totalHits(), equalTo((long) ids.length));
+ Set<String> hitIds = newHashSet();
+ for (SearchHit hit : hits.getHits()) {
+ hitIds.add(hit.id());
+ }
+ assertThat(hitIds, containsInAnyOrder(ids));
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" }";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java b/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java
new file mode 100644
index 0000000..42f9686
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.aliases;
+
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ */
+public class AliasesBenchmark {
+
+ private final static String INDEX_NAME = "my-index";
+
+ public static void main(String[] args) throws IOException {
+ int NUM_ADDITIONAL_NODES = 0;
+ int BASE_ALIAS_COUNT = 100000;
+ int NUM_ADD_ALIAS_REQUEST = 1000;
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("gateway.type", "local")
+ .put("node.master", false).build();
+ Node node1 = NodeBuilder.nodeBuilder().settings(
+ ImmutableSettings.settingsBuilder().put(settings).put("node.master", true)
+ ).node();
+
+ Node[] otherNodes = new Node[NUM_ADDITIONAL_NODES];
+ for (int i = 0; i < otherNodes.length; i++) {
+ otherNodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+
+ Client client = node1.client();
+ try {
+ client.admin().indices().prepareCreate(INDEX_NAME).execute().actionGet();
+ } catch (IndexAlreadyExistsException e) {}
+ client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
+ int numberOfAliases = countAliases(client);
+ System.out.println("Number of aliases: " + numberOfAliases);
+
+ if (numberOfAliases < BASE_ALIAS_COUNT) {
+ int diff = BASE_ALIAS_COUNT - numberOfAliases;
+ System.out.println("Adding " + diff + " more aliases to get to the start amount of " + BASE_ALIAS_COUNT + " aliases");
+ IndicesAliasesRequestBuilder builder = client.admin().indices().prepareAliases();
+ for (int i = 1; i <= diff; i++) {
+ builder.addAlias(INDEX_NAME, Strings.randomBase64UUID());
+ if (i % 1000 == 0) {
+ builder.execute().actionGet();
+ builder = client.admin().indices().prepareAliases();
+ }
+ }
+ if (!builder.request().getAliasActions().isEmpty()) {
+ builder.execute().actionGet();
+ }
+ } else if (numberOfAliases > BASE_ALIAS_COUNT) {
+ IndicesAliasesRequestBuilder builder = client.admin().indices().prepareAliases();
+ int diff = numberOfAliases - BASE_ALIAS_COUNT;
+ System.out.println("Removing " + diff + " aliases to get to the start amount of " + BASE_ALIAS_COUNT + " aliases");
+ List<AliasMetaData> aliases= client.admin().indices().prepareGetAliases("*")
+ .addIndices(INDEX_NAME)
+ .execute().actionGet().getAliases().get(INDEX_NAME);
+ for (int i = 0; i <= diff; i++) {
+ builder.removeAlias(INDEX_NAME, aliases.get(i).alias());
+ if (i % 1000 == 0) {
+ builder.execute().actionGet();
+ builder = client.admin().indices().prepareAliases();
+ }
+ }
+ if (!builder.request().getAliasActions().isEmpty()) {
+ builder.execute().actionGet();
+ }
+ }
+
+ numberOfAliases = countAliases(client);
+ System.out.println("Number of aliases: " + numberOfAliases);
+
+ long totalTime = 0;
+ int max = numberOfAliases + NUM_ADD_ALIAS_REQUEST;
+ for (int i = numberOfAliases; i <= max; i++) {
+ if (i != numberOfAliases && i % 100 == 0) {
+ long avgTime = totalTime / 100;
+ System.out.println("Added [" + (i - numberOfAliases) + "] aliases. Avg create time: " + avgTime + " ms");
+ totalTime = 0;
+ }
+
+ long time = System.currentTimeMillis();
+// String filter = termFilter("field" + i, "value" + i).toXContent(XContentFactory.jsonBuilder(), null).string();
+ client.admin().indices().prepareAliases().addAlias(INDEX_NAME, Strings.randomBase64UUID()/*, filter*/)
+ .execute().actionGet();
+ totalTime += System.currentTimeMillis() - time;
+ }
+ System.out.println("Number of aliases: " + countAliases(client));
+
+ client.close();
+ node1.close();
+ for (Node otherNode : otherNodes) {
+ otherNode.close();
+ }
+ }
+
+ private static int countAliases(Client client) {
+ GetAliasesResponse response = client.admin().indices().prepareGetAliases("*")
+ .addIndices(INDEX_NAME)
+ .execute().actionGet();
+ if (response.getAliases().isEmpty()) {
+ return 0;
+ } else {
+ return response.getAliases().get(INDEX_NAME).size();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java b/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java
new file mode 100644
index 0000000..15745fc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.bloom;
+
+import org.apache.lucene.codecs.bloom.FuzzySet;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.util.BloomFilter;
+
+import java.security.SecureRandom;
+
+/**
+ */
+public class BloomBench {
+
+ public static void main(String[] args) throws Exception {
+ SecureRandom random = new SecureRandom();
+ final int ELEMENTS = (int) SizeValue.parseSizeValue("1m").singles();
+ final double fpp = 0.01;
+ BloomFilter gFilter = BloomFilter.create(ELEMENTS, fpp);
+ System.out.println("G SIZE: " + new ByteSizeValue(gFilter.getSizeInBytes()));
+
+ FuzzySet lFilter = FuzzySet.createSetBasedOnMaxMemory((int) gFilter.getSizeInBytes());
+ //FuzzySet lFilter = FuzzySet.createSetBasedOnQuality(ELEMENTS, 0.97f);
+
+ for (int i = 0; i < ELEMENTS; i++) {
+ BytesRef bytesRef = new BytesRef(Strings.randomBase64UUID(random));
+ gFilter.put(bytesRef);
+ lFilter.addValue(bytesRef);
+ }
+
+ int lFalse = 0;
+ int gFalse = 0;
+ for (int i = 0; i < ELEMENTS; i++) {
+ BytesRef bytesRef = new BytesRef(Strings.randomBase64UUID(random));
+ if (gFilter.mightContain(bytesRef)) {
+ gFalse++;
+ }
+ if (lFilter.contains(bytesRef) == FuzzySet.ContainsResult.MAYBE) {
+ lFalse++;
+ }
+ }
+ System.out.println("Failed positives, g[" + gFalse + "], l[" + lFalse + "]");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java b/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java
new file mode 100644
index 0000000..660d042
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.checksum;
+
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.security.MessageDigest;
+import java.util.zip.Adler32;
+import java.util.zip.CRC32;
+
+/**
+ *
+ */
+public class ChecksumBenchmark {
+
+ public static final int BATCH_SIZE = 16 * 1024;
+
+ public static void main(String[] args) throws Exception {
+ System.out.println("Warning up");
+ long warmSize = ByteSizeValue.parseBytesSizeValue("1g", null).bytes();
+ crc(warmSize);
+ adler(warmSize);
+ md5(warmSize);
+
+ long dataSize = ByteSizeValue.parseBytesSizeValue("10g", null).bytes();
+ System.out.println("Running size: " + dataSize);
+ crc(dataSize);
+ adler(dataSize);
+ md5(dataSize);
+ }
+
+ private static void crc(long dataSize) {
+ long start = System.currentTimeMillis();
+ CRC32 crc = new CRC32();
+ byte[] data = new byte[BATCH_SIZE];
+ long iter = dataSize / BATCH_SIZE;
+ for (long i = 0; i < iter; i++) {
+ crc.update(data);
+ }
+ crc.getValue();
+ System.out.println("CRC took " + new TimeValue(System.currentTimeMillis() - start));
+ }
+
+ private static void adler(long dataSize) {
+ long start = System.currentTimeMillis();
+ Adler32 crc = new Adler32();
+ byte[] data = new byte[BATCH_SIZE];
+ long iter = dataSize / BATCH_SIZE;
+ for (long i = 0; i < iter; i++) {
+ crc.update(data);
+ }
+ crc.getValue();
+ System.out.println("Adler took " + new TimeValue(System.currentTimeMillis() - start));
+ }
+
+ private static void md5(long dataSize) throws Exception {
+ long start = System.currentTimeMillis();
+ byte[] data = new byte[BATCH_SIZE];
+ long iter = dataSize / BATCH_SIZE;
+ MessageDigest digest = MessageDigest.getInstance("MD5");
+ for (long i = 0; i < iter; i++) {
+ digest.update(data);
+ }
+ digest.digest();
+ System.out.println("md5 took " + new TimeValue(System.currentTimeMillis() - start));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java b/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java
new file mode 100644
index 0000000..855744b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.cluster;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+
+import java.util.Random;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+
+public class ClusterAllocationRerouteBenchmark {
+
+ private static final ESLogger logger = Loggers.getLogger(ClusterAllocationRerouteBenchmark.class);
+
+ public static void main(String[] args) {
+ final int numberOfRuns = 1;
+ final int numIndices = 5 * 365; // five years
+ final int numShards = 6;
+ final int numReplicas = 2;
+ final int numberOfNodes = 30;
+ final int numberOfTags = 2;
+ AllocationService strategy = ElasticsearchAllocationTestCase.createAllocationService(ImmutableSettings.builder()
+ .put("cluster.routing.allocation.awareness.attributes", "tag")
+ .build(), new Random(1));
+
+ MetaData.Builder mb = MetaData.builder();
+ for (int i = 1; i <= numIndices; i++) {
+ mb.put(IndexMetaData.builder("test_" + i).numberOfShards(numShards).numberOfReplicas(numReplicas));
+ }
+ MetaData metaData = mb.build();
+ RoutingTable.Builder rb = RoutingTable.builder();
+ for (int i = 1; i <= numIndices; i++) {
+ rb.addAsNew(metaData.index("test_" + i));
+ }
+ RoutingTable routingTable = rb.build();
+ DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
+ for (int i = 1; i <= numberOfNodes; i++) {
+ nb.put(ElasticsearchAllocationTestCase.newNode("node" + i, numberOfTags == 0 ? ImmutableMap.<String, String>of() : ImmutableMap.of("tag", "tag_" + (i % numberOfTags))));
+ }
+ ClusterState initialClusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).nodes(nb).build();
+
+ long start = System.currentTimeMillis();
+ for (int i = 0; i < numberOfRuns; i++) {
+ logger.info("[{}] starting... ", i);
+ long runStart = System.currentTimeMillis();
+ ClusterState clusterState = initialClusterState;
+ while (clusterState.readOnlyRoutingNodes().hasUnassignedShards()) {
+ logger.info("[{}] remaining unassigned {}", i, clusterState.readOnlyRoutingNodes().unassigned().size());
+ RoutingAllocation.Result result = strategy.applyStartedShards(clusterState, clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ result = strategy.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ }
+ logger.info("[{}] took {}", i, TimeValue.timeValueMillis(System.currentTimeMillis() - runStart));
+ }
+ long took = System.currentTimeMillis() - start;
+ logger.info("total took {}, AVG {}", TimeValue.timeValueMillis(took), TimeValue.timeValueMillis(took / numberOfRuns));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java b/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java
new file mode 100644
index 0000000..5858908
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.common.lucene.uidscan;
+
+import jsr166y.ThreadLocalRandom;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.FSDirectory;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.unit.SizeValue;
+
+import java.io.File;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ *
+ */
+public class LuceneUidScanBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ FSDirectory dir = FSDirectory.open(new File("work/test"));
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ final int NUMBER_OF_THREADS = 2;
+ final long INDEX_COUNT = SizeValue.parseSizeValue("1m").singles();
+ final long SCAN_COUNT = SizeValue.parseSizeValue("100k").singles();
+ final long startUid = 1000000;
+
+ long LIMIT = startUid + INDEX_COUNT;
+ StopWatch watch = new StopWatch().start();
+ System.out.println("Indexing " + INDEX_COUNT + " docs...");
+ for (long i = startUid; i < LIMIT; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("_uid", Long.toString(i), Store.NO));
+ doc.add(new NumericDocValuesField("_version", i));
+ writer.addDocument(doc);
+ }
+ System.out.println("Done indexing, took " + watch.stop().lastTaskTime());
+
+ final IndexReader reader = DirectoryReader.open(writer, true);
+
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ for (long i = 0; i < SCAN_COUNT; i++) {
+ long id = startUid + (Math.abs(ThreadLocalRandom.current().nextInt()) % INDEX_COUNT);
+ final long version = Versions.loadVersion(reader, new Term("_uid", Long.toString(id)));
+ if (version != id) {
+ System.err.println("wrong id...");
+ break;
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ latch.countDown();
+ }
+ }
+ });
+ }
+
+ watch = new StopWatch().start();
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].start();
+ }
+ latch.await();
+ watch.stop();
+ System.out.println("Scanned in " + watch.totalTime() + " TP Seconds " + ((SCAN_COUNT * NUMBER_OF_THREADS) / watch.totalTime().secondsFrac()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java b/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java
new file mode 100644
index 0000000..e8cdc91
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.common.recycler;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.recycler.Recycler;
+
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.recycler.Recyclers.*;
+
+/** Benchmark that tries to measure the overhead of object recycling depending on concurrent access. */
+public class RecyclerBenchmark {
+
+ private static final long NUM_RECYCLES = 5000000L;
+ private static final Random RANDOM = new Random(0);
+
+ private static long bench(final Recycler<?> recycler, long numRecycles, int numThreads) throws InterruptedException {
+ final AtomicLong recycles = new AtomicLong(numRecycles);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final Thread[] threads = new Thread[numThreads];
+ for (int i = 0; i < numThreads; ++i){
+ // Thread ids happen to be generated sequentially, so we also generate random threads so that distribution of IDs
+ // is not perfect for the concurrent recycler
+ for (int j = RANDOM.nextInt(5); j >= 0; --j) {
+ new Thread();
+ }
+
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ return;
+ }
+ while (recycles.getAndDecrement() > 0) {
+ final Recycler.V<?> v = recycler.obtain();
+ v.release();
+ }
+ }
+ };
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ final long start = System.nanoTime();
+ latch.countDown();
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ return System.nanoTime() - start;
+ }
+
+ public static void main(String[] args) throws InterruptedException {
+ final int limit = 100;
+ final Recycler.C<Object> c = new Recycler.C<Object>() {
+
+ @Override
+ public Object newInstance(int sizing) {
+ return new Object();
+ }
+
+ @Override
+ public void clear(Object value) {}
+ };
+
+ final ImmutableMap<String, Recycler<Object>> recyclers = ImmutableMap.<String, Recycler<Object>>builder()
+ .put("none", none(c))
+ .put("concurrent-queue", concurrentDeque(c, limit))
+ .put("thread-local", threadLocal(dequeFactory(c, limit)))
+ .put("soft-thread-local", threadLocal(softFactory(dequeFactory(c, limit))))
+ .put("locked", locked(deque(c, limit)))
+ .put("concurrent", concurrent(dequeFactory(c, limit), Runtime.getRuntime().availableProcessors()))
+ .put("soft-concurrent", concurrent(softFactory(dequeFactory(c, limit)), Runtime.getRuntime().availableProcessors())).build();
+
+ // warmup
+ final long start = System.nanoTime();
+ while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)) {
+ for (Recycler<?> recycler : recyclers.values()) {
+ bench(recycler, NUM_RECYCLES, 2);
+ }
+ }
+
+ // run
+ for (int numThreads = 1; numThreads <= 4 * Runtime.getRuntime().availableProcessors(); numThreads *= 2) {
+ System.out.println("## " + numThreads + " threads\n");
+ System.gc();
+ Thread.sleep(1000);
+ for (Recycler<?> recycler : recyclers.values()) {
+ bench(recycler, NUM_RECYCLES, numThreads);
+ }
+ for (int i = 0; i < 5; ++i) {
+ for (Map.Entry<String, Recycler<Object>> entry : recyclers.entrySet()) {
+ System.out.println(entry.getKey() + "\t" + TimeUnit.NANOSECONDS.toMillis(bench(entry.getValue(), NUM_RECYCLES, numThreads)));
+ }
+ System.out.println();
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/common/util/BytesRefComparisonsBenchmark.java b/src/test/java/org/elasticsearch/benchmark/common/util/BytesRefComparisonsBenchmark.java
new file mode 100644
index 0000000..7ff45eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/common/util/BytesRefComparisonsBenchmark.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.common.util;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.UnsafeUtils;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+public class BytesRefComparisonsBenchmark {
+
+ private static final Random R = new Random(0);
+ private static final int ITERS = 100;
+
+ // To avoid JVM optimizations
+ @SuppressWarnings("unused")
+ private static boolean DUMMY;
+
+ enum Comparator {
+ SAFE {
+ boolean compare(BytesRef b1, BytesRef b2) {
+ return b1.bytesEquals(b2);
+ }
+ },
+ UNSAFE {
+ @Override
+ boolean compare(BytesRef b1, BytesRef b2) {
+ return UnsafeUtils.equals(b1, b2);
+ }
+ };
+ abstract boolean compare(BytesRef b1, BytesRef b2);
+ }
+
+ private static BytesRef[] buildBytesRefs(int minLen, int maxLen, int count, int uniqueCount) {
+ final BytesRef[] uniqueRefs = new BytesRef[uniqueCount];
+ for (int i = 0; i < uniqueCount; ++i) {
+ final int len = RandomInts.randomIntBetween(R, minLen, maxLen);
+ final byte[] bytes = new byte[len];
+ for (int j = 0; j < bytes.length; ++j) {
+ bytes[j] = (byte) R.nextInt(2); // so that some instances have common prefixes
+ }
+ uniqueRefs[i] = new BytesRef(bytes);
+ }
+ final BytesRef[] result = new BytesRef[count];
+ for (int i = 0; i < count; ++i) {
+ result[i] = RandomPicks.randomFrom(R, uniqueRefs);
+ }
+ int totalLen = 0;
+ for (BytesRef b : result) {
+ totalLen += b.length;
+ }
+ final byte[] data = new byte[totalLen];
+ int offset = 0;
+ for (int i = 0; i < count; ++i) {
+ final BytesRef b = result[i];
+ System.arraycopy(b.bytes, b.offset, data, offset, b.length);
+ result[i] = new BytesRef(data, offset, b.length);
+ offset += b.length;
+ }
+ if (offset != totalLen) {
+ throw new AssertionError();
+ }
+ return result;
+ }
+
+ private static long bench(Comparator comparator, BytesRef[] refs, int iters) {
+ boolean xor = false;
+ final long start = System.nanoTime();
+ for (int iter = 0; iter < iters; ++iter) {
+ for (int i = 0; i < refs.length; ++i) {
+ for (int j = i + 1; j < refs.length; ++j) {
+ xor ^= comparator.compare(refs[i], refs[j]);
+ }
+ }
+ }
+ DUMMY = xor;
+ return System.nanoTime() - start;
+ }
+
+ public static void main(String[] args) throws InterruptedException {
+ // warmup
+ BytesRef[] bytes = buildBytesRefs(2, 20, 1000, 100);
+ final long start = System.nanoTime();
+ while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)) {
+ for (Comparator comparator : Comparator.values()) {
+ bench(comparator, bytes, 1);
+ }
+ }
+
+ System.out.println("## Various lengths");
+ // make sure GC doesn't hurt results
+ System.gc();
+ Thread.sleep(2000);
+ for (Comparator comparator : Comparator.values()) {
+ bench(comparator, bytes, ITERS);
+ }
+ for (int i = 0; i < 3; ++i) {
+ for (Comparator comparator : Comparator.values()) {
+ System.out.println(comparator + " " + new TimeValue(bench(comparator, bytes, ITERS), TimeUnit.NANOSECONDS));
+ }
+ }
+
+ for (int len = 2; len <= 20; ++len) {
+ System.out.println("## Length = " + len);
+ bytes = buildBytesRefs(len, len, 1000, 100);
+ System.gc();
+ Thread.sleep(2000);
+ for (Comparator comparator : Comparator.values()) {
+ bench(comparator, bytes, ITERS);
+ }
+ for (int i = 0; i < 3; ++i) {
+ for (Comparator comparator : Comparator.values()) {
+ System.out.println(comparator + " " + new TimeValue(bench(comparator, bytes, ITERS), TimeUnit.NANOSECONDS));
+ }
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java b/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java
new file mode 100644
index 0000000..ea1e589
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.counter;
+
+import org.elasticsearch.common.StopWatch;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ *
+ */
+public class SimpleCounterBenchmark {
+
+ private static long NUMBER_OF_ITERATIONS = 10000000;
+ private static int NUMBER_OF_THREADS = 100;
+
+ public static void main(String[] args) throws Exception {
+ final AtomicLong counter = new AtomicLong();
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Running " + NUMBER_OF_ITERATIONS);
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ counter.incrementAndGet();
+ }
+ System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
+
+ System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ counter.incrementAndGet();
+ }
+ latch.countDown();
+ }
+ });
+ }
+ stopWatch = new StopWatch().start();
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/fielddata/LongFieldDataBenchmark.java b/src/test/java/org/elasticsearch/benchmark/fielddata/LongFieldDataBenchmark.java
new file mode 100644
index 0000000..72a7f7c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/fielddata/LongFieldDataBenchmark.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.fielddata;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.merge.Merges;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+
+import java.util.Random;
+
+public class LongFieldDataBenchmark {
+
+ private static final Random RANDOM = new Random();
+ private static final int SECONDS_PER_YEAR = 60 * 60 * 24 * 365;
+
+ public static enum Data {
+ SINGLE_VALUES_DENSE_ENUM {
+ public int numValues() {
+ return 1;
+ }
+
+ @Override
+ public long nextValue() {
+ return RANDOM.nextInt(16);
+ }
+ },
+ SINGLE_VALUED_DENSE_DATE {
+ public int numValues() {
+ return 1;
+ }
+
+ @Override
+ public long nextValue() {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + RANDOM.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_DATE {
+ public int numValues() {
+ return RANDOM.nextInt(3);
+ }
+
+ @Override
+ public long nextValue() {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + RANDOM.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_ENUM {
+ public int numValues() {
+ return RANDOM.nextInt(3);
+ }
+
+ @Override
+ public long nextValue() {
+ return 3 + RANDOM.nextInt(8);
+ }
+ },
+ SINGLE_VALUED_SPARSE_RANDOM {
+ public int numValues() {
+ return RANDOM.nextFloat() < 0.1f ? 1 : 0;
+ }
+
+ @Override
+ public long nextValue() {
+ return RANDOM.nextLong();
+ }
+ },
+ MULTI_VALUED_SPARSE_RANDOM {
+ public int numValues() {
+ return RANDOM.nextFloat() < 0.1f ? 1 + RANDOM.nextInt(5) : 0;
+ }
+
+ @Override
+ public long nextValue() {
+ return RANDOM.nextLong();
+ }
+ },
+ MULTI_VALUED_DENSE_RANDOM {
+ public int numValues() {
+ return 1 + RANDOM.nextInt(3);
+ }
+
+ @Override
+ public long nextValue() {
+ return RANDOM.nextLong();
+ }
+ };
+
+ public abstract int numValues();
+
+ public abstract long nextValue();
+ }
+
+ public static void main(String[] args) throws Exception {
+ final IndexWriterConfig iwc = new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer());
+ final String fieldName = "f";
+ final int numDocs = 1000000;
+ System.out.println("Data\tLoading time\tImplementation\tActual size\tExpected size");
+ for (Data data : Data.values()) {
+ final RAMDirectory dir = new RAMDirectory();
+ final IndexWriter indexWriter = new IndexWriter(dir, iwc);
+ for (int i = 0; i < numDocs; ++i) {
+ final Document doc = new Document();
+ final int numFields = data.numValues();
+ for (int j = 0; j < numFields; ++j) {
+ doc.add(new LongField(fieldName, data.nextValue(), Store.NO));
+ }
+ indexWriter.addDocument(doc);
+ }
+ Merges.forceMerge(indexWriter, 1);
+ indexWriter.close();
+
+ final DirectoryReader dr = DirectoryReader.open(dir);
+ final IndexFieldDataService fds = new IndexFieldDataService(new Index("dummy"), new DummyCircuitBreakerService());
+ final LongFieldMapper mapper = new LongFieldMapper.Builder(fieldName).build(new BuilderContext(null, new ContentPath(1)));
+ final IndexNumericFieldData<AtomicNumericFieldData> fd = fds.getForField(mapper);
+ final long start = System.nanoTime();
+ final AtomicNumericFieldData afd = fd.loadDirect(SlowCompositeReaderWrapper.wrap(dr).getContext());
+ final long loadingTimeMs = (System.nanoTime() - start) / 1000 / 1000;
+ System.out.println(data + "\t" + loadingTimeMs + "\t" + afd.getClass().getSimpleName() + "\t" + RamUsageEstimator.humanSizeOf(afd.getLongValues()) + "\t" + RamUsageEstimator.humanReadableUnits(afd.getMemorySizeInBytes()));
+ dr.close();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java b/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java
new file mode 100644
index 0000000..f8ec2cf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.fs;
+
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+import java.io.File;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.Random;
+
+/**
+ *
+ */
+public class FsAppendBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ new File("work/test.log").delete();
+ RandomAccessFile raf = new RandomAccessFile("work/test.log", "rw");
+ raf.setLength(0);
+
+ boolean CHANNEL = true;
+ int CHUNK = (int) ByteSizeValue.parseBytesSizeValue("1k").bytes();
+ long DATA = ByteSizeValue.parseBytesSizeValue("10gb").bytes();
+
+ byte[] data = new byte[CHUNK];
+ new Random().nextBytes(data);
+
+ StopWatch watch = new StopWatch().start("write");
+ if (CHANNEL) {
+ FileChannel channel = raf.getChannel();
+ long position = 0;
+ while (position < DATA) {
+ channel.write(ByteBuffer.wrap(data), position);
+ position += data.length;
+ }
+ watch.stop().start("flush");
+ channel.force(true);
+ } else {
+ long position = 0;
+ while (position < DATA) {
+ raf.write(data);
+ position += data.length;
+ }
+ watch.stop().start("flush");
+ raf.getFD().sync();
+ }
+ raf.close();
+ watch.stop();
+ System.out.println("Wrote [" + (new ByteSizeValue(DATA)) + "], chunk [" + (new ByteSizeValue(CHUNK)) + "], in " + watch);
+ }
+
+ private static final ByteBuffer fill = ByteBuffer.allocateDirect(1);
+
+// public static long padLogFile(long position, long currentSize, long preAllocSize) throws IOException {
+// if (position + 4096 >= currentSize) {
+// currentSize = currentSize + preAllocSize;
+// fill.position(0);
+// f.getChannel().write(fill, currentSize - fill.remaining());
+// }
+// return currentSize;
+// }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java b/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java
new file mode 100644
index 0000000..d78df7f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.get;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+// simple test for embedded / single remote lookup
+public class SimpleGetActionBenchmark {
+
+ public static void main(String[] args) {
+ long OPERATIONS = SizeValue.parseSizeValue("300k").singles();
+
+ Node node = NodeBuilder.nodeBuilder().node();
+
+ Client client;
+ if (false) {
+ client = NodeBuilder.nodeBuilder().client(true).node().client();
+ } else {
+ client = node.client();
+ }
+
+ client.prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+ for (long i = 0; i < OPERATIONS; i++) {
+ client.prepareGet("test", "type1", "1").execute().actionGet();
+ }
+ stopWatch.stop();
+
+ System.out.println("Ran in " + stopWatch.totalTime() + ", per second: " + (((double) OPERATIONS) / stopWatch.totalTime().secondsFrac()));
+
+ node.close();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java b/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java
new file mode 100644
index 0000000..ed90a5a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.hppc;
+
+import com.carrotsearch.hppc.IntIntOpenHashMap;
+import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.unit.SizeValue;
+
+import java.util.HashMap;
+import java.util.IdentityHashMap;
+
+public class StringMapAdjustOrPutBenchmark {
+
+ public static void main(String[] args) {
+
+ int NUMBER_OF_KEYS = (int) SizeValue.parseSizeValue("20").singles();
+ int STRING_SIZE = 5;
+ long PUT_OPERATIONS = SizeValue.parseSizeValue("5m").singles();
+ long ITERATIONS = 10;
+ boolean REUSE = true;
+
+
+ String[] values = new String[NUMBER_OF_KEYS];
+ for (int i = 0; i < values.length; i++) {
+ values[i] = RandomStrings.randomAsciiOfLength(ThreadLocalRandom.current(), STRING_SIZE);
+ }
+
+ StopWatch stopWatch;
+
+ stopWatch = new StopWatch().start();
+ ObjectIntOpenHashMap<String> map = new ObjectIntOpenHashMap<String>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ map.clear();
+ } else {
+ map = new ObjectIntOpenHashMap<String>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ map.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
+ }
+ }
+ map.clear();
+ map = null;
+
+ stopWatch.stop();
+ System.out.println("TObjectIntHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ stopWatch = new StopWatch().start();
+// TObjectIntCustomHashMap<String> iMap = new TObjectIntCustomHashMap<String>(new StringIdentityHashingStrategy());
+ ObjectIntOpenHashMap<String> iMap = new ObjectIntOpenHashMap<String>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ iMap.clear();
+ } else {
+ iMap = new ObjectIntOpenHashMap<String>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
+ }
+ }
+ stopWatch.stop();
+ System.out.println("TObjectIntCustomHashMap(StringIdentity): " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+ iMap.clear();
+ iMap = null;
+
+ stopWatch = new StopWatch().start();
+ iMap = new ObjectIntOpenHashMap<String>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ iMap.clear();
+ } else {
+ iMap = new ObjectIntOpenHashMap<String>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
+ }
+ }
+ stopWatch.stop();
+ System.out.println("TObjectIntCustomHashMap(PureIdentity): " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+ iMap.clear();
+ iMap = null;
+
+ // now test with THashMap
+ stopWatch = new StopWatch().start();
+ ObjectObjectOpenHashMap<String, StringEntry> tMap = new ObjectObjectOpenHashMap<String, StringEntry>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ tMap.clear();
+ } else {
+ tMap = new ObjectObjectOpenHashMap<String, StringEntry>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ String key = values[(int) (i % NUMBER_OF_KEYS)];
+ StringEntry stringEntry = tMap.get(key);
+ if (stringEntry == null) {
+ stringEntry = new StringEntry(key, 1);
+ tMap.put(key, stringEntry);
+ } else {
+ stringEntry.counter++;
+ }
+ }
+ }
+
+ tMap.clear();
+ tMap = null;
+
+ stopWatch.stop();
+ System.out.println("THashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ stopWatch = new StopWatch().start();
+ HashMap<String, StringEntry> hMap = new HashMap<String, StringEntry>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ hMap.clear();
+ } else {
+ hMap = new HashMap<String, StringEntry>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ String key = values[(int) (i % NUMBER_OF_KEYS)];
+ StringEntry stringEntry = hMap.get(key);
+ if (stringEntry == null) {
+ stringEntry = new StringEntry(key, 1);
+ hMap.put(key, stringEntry);
+ } else {
+ stringEntry.counter++;
+ }
+ }
+ }
+
+ hMap.clear();
+ hMap = null;
+
+ stopWatch.stop();
+ System.out.println("HashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+
+ stopWatch = new StopWatch().start();
+ IdentityHashMap<String, StringEntry> ihMap = new IdentityHashMap<String, StringEntry>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ ihMap.clear();
+ } else {
+ hMap = new HashMap<String, StringEntry>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ String key = values[(int) (i % NUMBER_OF_KEYS)];
+ StringEntry stringEntry = ihMap.get(key);
+ if (stringEntry == null) {
+ stringEntry = new StringEntry(key, 1);
+ ihMap.put(key, stringEntry);
+ } else {
+ stringEntry.counter++;
+ }
+ }
+ }
+ stopWatch.stop();
+ System.out.println("IdentityHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ ihMap.clear();
+ ihMap = null;
+
+ int[] iValues = new int[NUMBER_OF_KEYS];
+ for (int i = 0; i < values.length; i++) {
+ iValues[i] = ThreadLocalRandom.current().nextInt();
+ }
+
+ stopWatch = new StopWatch().start();
+ IntIntOpenHashMap intMap = new IntIntOpenHashMap();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ intMap.clear();
+ } else {
+ intMap = new IntIntOpenHashMap();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ int key = iValues[(int) (i % NUMBER_OF_KEYS)];
+ intMap.addTo(key, 1);
+ }
+ }
+ stopWatch.stop();
+ System.out.println("TIntIntHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ intMap.clear();
+ intMap = null;
+
+ // now test with THashMap
+ stopWatch = new StopWatch().start();
+ IntObjectOpenHashMap<IntEntry> tIntMap = new IntObjectOpenHashMap<IntEntry>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ tIntMap.clear();
+ } else {
+ tIntMap = new IntObjectOpenHashMap<IntEntry>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ int key = iValues[(int) (i % NUMBER_OF_KEYS)];
+ IntEntry intEntry = tIntMap.get(key);
+ if (intEntry == null) {
+ intEntry = new IntEntry(key, 1);
+ tIntMap.put(key, intEntry);
+ } else {
+ intEntry.counter++;
+ }
+ }
+ }
+
+ tIntMap.clear();
+ tIntMap = null;
+
+ stopWatch.stop();
+ System.out.println("TIntObjectHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+ }
+
+
+ static class StringEntry {
+ String key;
+ int counter;
+
+ StringEntry(String key, int counter) {
+ this.key = key;
+ this.counter = counter;
+ }
+ }
+
+ static class IntEntry {
+ int key;
+ int counter;
+
+ IntEntry(int key, int counter) {
+ this.key = key;
+ this.counter = counter;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java b/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java
new file mode 100644
index 0000000..7d6096c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.percolator;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.percolator.PercolatorService;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class PercolatorStressBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("cluster.routing.schedule", 200, TimeUnit.MILLISECONDS)
+ .put("gateway.type", "none")
+ .put(SETTING_NUMBER_OF_SHARDS, 4)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ Node clientNode = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+ Client client = clientNode.client();
+
+ client.admin().indices().create(createIndexRequest("test")).actionGet();
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth("test")
+ .setWaitForGreenStatus()
+ .execute().actionGet();
+ if (healthResponse.isTimedOut()) {
+ System.err.println("Quiting, because cluster health requested timed out...");
+ return;
+ } else if (healthResponse.getStatus() != ClusterHealthStatus.GREEN) {
+ System.err.println("Quiting, because cluster state isn't green...");
+ return;
+ }
+
+ int COUNT = 200000;
+ int QUERIES = 100;
+ int TERM_QUERIES = QUERIES / 2;
+ int RANGE_QUERIES = QUERIES - TERM_QUERIES;
+
+ client.prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("numeric1", 1).endObject()).execute().actionGet();
+
+ // register queries
+ int i = 0;
+ for (; i < TERM_QUERIES; i++) {
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("query", termQuery("name", "value"))
+ .endObject())
+ .execute().actionGet();
+ }
+
+ int[] numbers = new int[RANGE_QUERIES];
+ for (; i < QUERIES; i++) {
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("query", rangeQuery("numeric1").from(i).to(i))
+ .endObject())
+ .execute().actionGet();
+ numbers[i - TERM_QUERIES] = i;
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Percolating [" + COUNT + "] ...");
+ for (i = 1; i <= COUNT; i++) {
+ XContentBuilder source;
+ int expectedMatches;
+ if (i % 2 == 0) {
+ source = source(Integer.toString(i), "value");
+ expectedMatches = TERM_QUERIES;
+ } else {
+ int number = numbers[i % RANGE_QUERIES];
+ source = source(Integer.toString(i), number);
+ expectedMatches = 1;
+ }
+ PercolateResponse percolate = client.preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(source)
+ .execute().actionGet();
+ if (percolate.getMatches().length != expectedMatches) {
+ System.err.println("No matching number of queries");
+ }
+
+ if ((i % 10000) == 0) {
+ System.out.println("Percolated " + i + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("Percolation took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
+
+ clientNode.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().startObject("doc")
+ .field("id", id)
+ .field("name", nameValue)
+ .endObject().endObject();
+ }
+
+ private static XContentBuilder source(String id, int number) throws IOException {
+ return jsonBuilder().startObject().startObject("doc")
+ .field("id", id)
+ .field("numeric1", number)
+ .field("numeric2", number)
+ .field("numeric3", number)
+ .field("numeric4", number)
+ .field("numeric5", number)
+ .field("numeric6", number)
+ .field("numeric7", number)
+ .field("numeric8", number)
+ .field("numeric9", number)
+ .field("numeric10", number)
+ .endObject().endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java
new file mode 100644
index 0000000..61b2074
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java
@@ -0,0 +1,337 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score;
+
+import com.google.common.base.Charsets;
+import com.google.common.io.Files;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;
+import org.joda.time.DateTime;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.security.SecureRandom;
+import java.util.*;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+
+public class BasicScriptBenchmark {
+
+ public static class RequestInfo {
+ public RequestInfo(SearchRequest source, int i) {
+ request = source;
+ numTerms = i;
+ }
+
+ SearchRequest request;
+ int numTerms;
+ }
+
+ public static class Results {
+ public static final String TIME_PER_DOCIN_MILLIS = "timePerDocinMillis";
+ public static final String NUM_TERMS = "numTerms";
+ public static final String NUM_DOCS = "numDocs";
+ public static final String TIME_PER_QUERY_IN_SEC = "timePerQueryInSec";
+ public static final String TOTAL_TIME_IN_SEC = "totalTimeInSec";
+ Double[] resultSeconds;
+ Double[] resultMSPerQuery;
+ Long[] numDocs;
+ Integer[] numTerms;
+ Double[] timePerDoc;
+ String label;
+ String description;
+ public String lineStyle;
+ public String color;
+
+ void init(int numVariations, String label, String description, String color, String lineStyle) {
+ resultSeconds = new Double[numVariations];
+ resultMSPerQuery = new Double[numVariations];
+ numDocs = new Long[numVariations];
+ numTerms = new Integer[numVariations];
+ timePerDoc = new Double[numVariations];
+ this.label = label;
+ this.description = description;
+ this.color = color;
+ this.lineStyle = lineStyle;
+ }
+
+ void set(SearchResponse searchResponse, StopWatch stopWatch, String message, int maxIter, int which, int numTerms) {
+ resultSeconds[which] = (double) ((double) stopWatch.lastTaskTime().getMillis() / (double) 1000);
+ resultMSPerQuery[which] = (double) ((double) stopWatch.lastTaskTime().secondsFrac() / (double) maxIter);
+ numDocs[which] = searchResponse.getHits().totalHits();
+ this.numTerms[which] = numTerms;
+ timePerDoc[which] = resultMSPerQuery[which] / numDocs[which];
+ }
+
+ public void printResults(BufferedWriter writer) throws IOException {
+ String comma = (writer == null) ? "" : ";";
+ String results = description + "\n" + Results.TOTAL_TIME_IN_SEC + " = " + getResultArray(resultSeconds) + comma + "\n"
+ + Results.TIME_PER_QUERY_IN_SEC + " = " + getResultArray(resultMSPerQuery) + comma + "\n" + Results.NUM_DOCS + " = "
+ + getResultArray(numDocs) + comma + "\n" + Results.NUM_TERMS + " = " + getResultArray(numTerms) + comma + "\n"
+ + Results.TIME_PER_DOCIN_MILLIS + " = " + getResultArray(timePerDoc) + comma + "\n";
+ if (writer != null) {
+ writer.write(results);
+ } else {
+ System.out.println(results);
+ }
+
+ }
+
+ private String getResultArray(Object[] resultArray) {
+ String result = "[";
+ for (int i = 0; i < resultArray.length; i++) {
+ result += resultArray[i].toString();
+ if (i != resultArray.length - 1) {
+ result += ",";
+ }
+ }
+ result += "]";
+ return result;
+ }
+ }
+
+ public BasicScriptBenchmark() {
+ }
+
+ static List<String> termsList = new ArrayList<String>();
+
+ static void init(int numTerms) {
+ SecureRandom random = new SecureRandom();
+ random.setSeed(1);
+ termsList.clear();
+ for (int i = 0; i < numTerms; i++) {
+ String term = new BigInteger(512, random).toString(32);
+ termsList.add(term);
+ }
+
+ }
+
+ static String[] getTerms(int numTerms) {
+ String[] terms = new String[numTerms];
+ for (int i = 0; i < numTerms; i++) {
+ terms[i] = termsList.get(i);
+ }
+ return terms;
+ }
+
+ public static void writeHelperFunction() throws IOException {
+ File file = new File("addToPlot.m");
+ BufferedWriter out = Files.newWriter(file, Charsets.UTF_8);
+
+ out.write("function handle = addToPlot(numTerms, perDoc, color, linestyle, linewidth)\n" + "handle = line(numTerms, perDoc);\n"
+ + "set(handle, 'color', color);\n" + "set(handle, 'linestyle',linestyle);\n" + "set(handle, 'LineWidth',linewidth);\n"
+ + "end\n");
+ out.close();
+ }
+
+ public static void printOctaveScript(List<Results> allResults, String[] args) throws IOException {
+ if (args.length == 0) {
+ return;
+ }
+ BufferedWriter out = null;
+ try {
+ File file = new File(args[0]);
+ out = Files.newWriter(file, Charsets.UTF_8);
+
+ out.write("#! /usr/local/bin/octave -qf");
+ out.write("\n\n\n\n");
+ out.write("######################################\n");
+ out.write("# Octave script for plotting results\n");
+ String filename = "scriptScoreBenchmark" + new DateTime().toString();
+ out.write("#Call '" + args[0] + "' from the command line. The plot is then in " + filename + "\n\n");
+
+ out.write("handleArray = [];\n tagArray = [];\n plot([]);\n hold on;\n");
+ for (Results result : allResults) {
+ out.write("\n");
+ out.write("# " + result.description);
+ result.printResults(out);
+ out.write("handleArray = [handleArray, addToPlot(" + Results.NUM_TERMS + ", " + Results.TIME_PER_DOCIN_MILLIS + ", '"
+ + result.color + "','" + result.lineStyle + "',5)];\n");
+ out.write("tagArray = [tagArray; '" + result.label + "'];\n");
+ out.write("\n");
+ }
+
+ out.write("xlabel(\'number of query terms');");
+ out.write("ylabel(\'query time per document');");
+
+ out.write("legend(handleArray,tagArray);\n");
+
+ out.write("saveas(gcf,'" + filename + ".png','png')\n");
+ out.write("hold off;\n\n");
+ } catch (IOException e) {
+ System.err.println("Error: " + e.getMessage());
+ } finally {
+ if (out != null) {
+ out.close();
+ }
+ }
+ writeHelperFunction();
+ }
+
+ static void printResult(SearchResponse searchResponse, StopWatch stopWatch, String queryInfo) {
+ System.out.println("--> Searching with " + queryInfo + " took " + stopWatch.lastTaskTime() + ", per query "
+ + (stopWatch.lastTaskTime().secondsFrac() / 100) + " for " + searchResponse.getHits().totalHits() + " docs");
+ }
+
+ static void indexData(long numDocs, Client client, boolean randomizeTerms) throws IOException {
+ try {
+ client.admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Throwable t) {
+ // index might exist already, in this case we do nothing TODO: make
+ // saver in general
+ }
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").field("index_options", "offsets").field("analyzer", "payload_float")
+ .endObject().endObject().endObject().endObject();
+ client.admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping("type1", mapping)
+ .setSettings(
+ ImmutableSettings.settingsBuilder().put("index.analysis.analyzer.payload_float.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_float.filter", "delimited_float")
+ .put("index.analysis.filter.delimited_float.delimiter", "|")
+ .put("index.analysis.filter.delimited_float.encoding", "float")
+ .put("index.analysis.filter.delimited_float.type", "delimited_payload_filter")
+ .put("index.number_of_replicas", 0).put("index.number_of_shards", 1)).execute().actionGet();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ BulkRequestBuilder bulkRequest = client.prepareBulk();
+ Random random = new Random(1);
+ for (int i = 0; i < numDocs; i++) {
+
+ bulkRequest.add(client.prepareIndex().setType("type1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("text", randomText(random, randomizeTerms)).endObject()));
+ if (i % 1000 == 0) {
+ bulkRequest.execute().actionGet();
+ bulkRequest = client.prepareBulk();
+ }
+ }
+ bulkRequest.execute().actionGet();
+ client.admin().indices().prepareRefresh("test").execute().actionGet();
+ client.admin().indices().prepareFlush("test").setFull(true).execute().actionGet();
+ System.out.println("Done indexing " + numDocs + " documents");
+
+ }
+
+ private static String randomText(Random random, boolean randomizeTerms) {
+ String text = "";
+ for (int i = 0; i < termsList.size(); i++) {
+ if (random.nextInt(5) == 3 || !randomizeTerms) {
+ text = text + " " + termsList.get(i) + "|1";
+ }
+ }
+ return text;
+ }
+
+ static void printTimings(SearchResponse searchResponse, StopWatch stopWatch, String message, int maxIter) {
+ System.out.println(message);
+ System.out.println(stopWatch.lastTaskTime() + ", " + (stopWatch.lastTaskTime().secondsFrac() / maxIter) + ", "
+ + searchResponse.getHits().totalHits() + ", "
+ + (stopWatch.lastTaskTime().secondsFrac() / (maxIter + searchResponse.getHits().totalHits())));
+ }
+
+ static List<Entry<String, RequestInfo>> initTermQueries(int minTerms, int maxTerms) {
+ List<Entry<String, RequestInfo>> termSearchRequests = new ArrayList<Entry<String, RequestInfo>>();
+ for (int nTerms = minTerms; nTerms < maxTerms; nTerms++) {
+ Map<String, Object> params = new HashMap<String, Object>();
+ String[] terms = getTerms(nTerms + 1);
+ params.put("text", terms);
+ SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).size(0).query(QueryBuilders.termsQuery("text", terms)));
+ String infoString = "Results for term query with " + (nTerms + 1) + " terms:";
+ termSearchRequests.add(new AbstractMap.SimpleEntry<String, RequestInfo>(infoString, new RequestInfo(request, nTerms + 1)));
+ }
+ return termSearchRequests;
+ }
+
+ static List<Entry<String, RequestInfo>> initNativeSearchRequests(int minTerms, int maxTerms, String script, boolean langNative) {
+ List<Entry<String, RequestInfo>> nativeSearchRequests = new ArrayList<Entry<String, RequestInfo>>();
+ for (int nTerms = minTerms; nTerms < maxTerms; nTerms++) {
+ Map<String, Object> params = new HashMap<String, Object>();
+ String[] terms = getTerms(nTerms + 1);
+ params.put("text", terms);
+ String infoString = "Results for native script with " + (nTerms + 1) + " terms:";
+ ScriptScoreFunctionBuilder scriptFunction = (langNative == true) ? scriptFunction(script, "native", params) : scriptFunction(
+ script, params);
+ SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(false)
+ .size(0)
+ .query(functionScoreQuery(FilterBuilders.termsFilter("text", terms), scriptFunction).boostMode(
+ CombineFunction.REPLACE)));
+ nativeSearchRequests.add(new AbstractMap.SimpleEntry<String, RequestInfo>(infoString, new RequestInfo(request, nTerms + 1)));
+ }
+ return nativeSearchRequests;
+ }
+
+ static List<Entry<String, RequestInfo>> initScriptMatchAllSearchRequests(String script, boolean langNative) {
+ List<Entry<String, RequestInfo>> nativeSearchRequests = new ArrayList<Entry<String, RequestInfo>>();
+ String infoString = "Results for constant score script:";
+ ScriptScoreFunctionBuilder scriptFunction = (langNative == true) ? scriptFunction(script, "native") : scriptFunction(script);
+ SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).size(0)
+ .query(functionScoreQuery(FilterBuilders.matchAllFilter(), scriptFunction).boostMode(CombineFunction.REPLACE)));
+ nativeSearchRequests.add(new AbstractMap.SimpleEntry<String, RequestInfo>(infoString, new RequestInfo(request, 0)));
+
+ return nativeSearchRequests;
+ }
+
+ static void runBenchmark(Client client, int maxIter, Results results, List<Entry<String, RequestInfo>> nativeSearchRequests,
+ int minTerms, int warmerIter) throws IOException {
+ int counter = 0;
+ for (Entry<String, RequestInfo> entry : nativeSearchRequests) {
+ SearchResponse searchResponse = null;
+ // warm up
+ for (int i = 0; i < warmerIter; i++) {
+ searchResponse = client.search(entry.getValue().request).actionGet();
+ }
+ System.gc();
+ // run benchmark
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ for (int i = 0; i < maxIter; i++) {
+ searchResponse = client.search(entry.getValue().request).actionGet();
+ }
+ stopWatch.stop();
+ results.set(searchResponse, stopWatch, entry.getKey(), maxIter, counter, entry.getValue().numTerms);
+ counter++;
+ }
+ results.printResults(null);
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java
new file mode 100644
index 0000000..774e295
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
+import org.elasticsearch.benchmark.scripts.score.script.NativeConstantForLoopScoreScript;
+import org.elasticsearch.benchmark.scripts.score.script.NativeConstantScoreScript;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ScriptsConstantScoreBenchmark extends BasicScriptBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ int minTerms = 49;
+ int maxTerms = 50;
+ int maxIter = 1000;
+ int warmerIter = 1000;
+
+ init(maxTerms);
+ List<Results> allResults = new ArrayList<BasicScriptBenchmark.Results>();
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
+
+ String clusterName = ScriptsConstantScoreBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ indexData(10000, client, true);
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ Results results = new Results();
+
+ results.init(maxTerms - minTerms, "native const script score (log(2) 10X)",
+ "Results for native const script score with score = log(2) 10X:", "black", "-.");
+ // init script searches
+ List<Entry<String, RequestInfo>> searchRequests = initScriptMatchAllSearchRequests(
+ NativeConstantForLoopScoreScript.NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ // init native script searches
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel const (log(2) 10X)", "Results for mvel const score = log(2) 10X:", "red", "-.");
+ searchRequests = initScriptMatchAllSearchRequests("score = 0; for (int i=0; i<10;i++) {score = score + log(2);} return score",
+ false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "native const script score (2)", "Results for native const script score with score = 2:",
+ "black", ":");
+ // init native script searches
+ searchRequests = initScriptMatchAllSearchRequests(NativeConstantScoreScript.NATIVE_CONSTANT_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel const (2)", "Results for mvel const score = 2:", "red", "--");
+ // init native script searches
+ searchRequests = initScriptMatchAllSearchRequests("2", false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ printOctaveScript(allResults, args);
+
+ client.close();
+ node1.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java
new file mode 100644
index 0000000..a78830e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
+import org.elasticsearch.benchmark.scripts.score.script.NativeNaiveTFIDFScoreScript;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ScriptsScoreBenchmark extends BasicScriptBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ int minTerms = 1;
+ int maxTerms = 50;
+ int maxIter = 100;
+ int warmerIter = 10;
+
+ boolean runMVEL = false;
+ init(maxTerms);
+ List<Results> allResults = new ArrayList<BasicScriptBenchmark.Results>();
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
+
+ String clusterName = ScriptsScoreBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ indexData(10000, client, false);
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ Results results = new Results();
+ results.init(maxTerms - minTerms, "native tfidf script score dense posting list",
+ "Results for native script score with dense posting list:", "black", "--");
+ // init native script searches
+ List<Entry<String, RequestInfo>> searchRequests = initNativeSearchRequests(minTerms, maxTerms,
+ NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+
+ results.init(maxTerms - minTerms, "term query dense posting list", "Results for term query with dense posting lists:", "green",
+ "--");
+ // init term queries
+ searchRequests = initTermQueries(minTerms, maxTerms);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ if (runMVEL) {
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel tfidf dense posting list", "Results for mvel score with dense posting list:", "red",
+ "--");
+ // init native script searches
+ searchRequests = initNativeSearchRequests(
+ minTerms,
+ maxTerms,
+ "score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
+ false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+ }
+
+ indexData(10000, client, true);
+ results = new Results();
+ results.init(maxTerms - minTerms, "native tfidf script score sparse posting list",
+ "Results for native script scorewith sparse posting list:", "black", "-.");
+ // init native script searches
+ searchRequests = initNativeSearchRequests(minTerms, maxTerms, NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+
+ results.init(maxTerms - minTerms, "term query sparse posting list", "Results for term query with sparse posting lists:", "green",
+ "-.");
+ // init term queries
+ searchRequests = initTermQueries(minTerms, maxTerms);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ if (runMVEL) {
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel tfidf sparse posting list", "Results for mvel score with sparse posting list:", "red",
+ "-.");
+ // init native script searches
+ searchRequests = initNativeSearchRequests(
+ minTerms,
+ maxTerms,
+ "score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
+ false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+ }
+ printOctaveScript(allResults, args);
+
+ client.close();
+ node1.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java
new file mode 100644
index 0000000..592ffe1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
+import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumNoRecordScoreScript;
+import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumScoreScript;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ScriptsScorePayloadSumBenchmark extends BasicScriptBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ int minTerms = 1;
+ int maxTerms = 50;
+ int maxIter = 100;
+ int warmerIter = 10;
+
+ init(maxTerms);
+ List<Results> allResults = new ArrayList<BasicScriptBenchmark.Results>();
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
+
+ String clusterName = ScriptsScoreBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ indexData(10000, client, false);
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ Results results = new Results();
+ // init script searches
+ results.init(maxTerms - minTerms, "native payload sum script score", "Results for native script score:", "green", ":");
+ List<Entry<String, RequestInfo>> searchRequests = initNativeSearchRequests(minTerms, maxTerms,
+ NativePayloadSumScoreScript.NATIVE_PAYLOAD_SUM_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+ // init script searches
+ results.init(maxTerms - minTerms, "native payload sum script score no record", "Results for native script score:", "black", ":");
+ searchRequests = initNativeSearchRequests(minTerms, maxTerms,
+ NativePayloadSumNoRecordScoreScript.NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ printOctaveScript(allResults, args);
+
+ client.close();
+ node1.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java
new file mode 100644
index 0000000..0d90d9f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score.plugin;
+
+import org.elasticsearch.benchmark.scripts.score.script.*;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.script.ScriptModule;
+
+public class NativeScriptExamplesPlugin extends AbstractPlugin {
+
+
+ @Override
+ public String name() {
+ return "native-script-example";
+ }
+
+ @Override
+ public String description() {
+ return "Native script examples";
+ }
+
+ public void onModule(ScriptModule module) {
+ module.registerScript(NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, NativeNaiveTFIDFScoreScript.Factory.class);
+ module.registerScript(NativeConstantForLoopScoreScript.NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE, NativeConstantForLoopScoreScript.Factory.class);
+ module.registerScript(NativeConstantScoreScript.NATIVE_CONSTANT_SCRIPT_SCORE, NativeConstantScoreScript.Factory.class);
+ module.registerScript(NativePayloadSumScoreScript.NATIVE_PAYLOAD_SUM_SCRIPT_SCORE, NativePayloadSumScoreScript.Factory.class);
+ module.registerScript(NativePayloadSumNoRecordScoreScript.NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE, NativePayloadSumNoRecordScoreScript.Factory.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java
new file mode 100644
index 0000000..c61a40d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeConstantForLoopScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE = "native_constant_for_loop_script_score";
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeConstantForLoopScoreScript(params);
+ }
+ }
+
+ private NativeConstantForLoopScoreScript(Map<String, Object> params) {
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ for (int i = 0; i < 10; i++) {
+ score += Math.log(2);
+ }
+ return score;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java
new file mode 100644
index 0000000..6d07242
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeConstantScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_CONSTANT_SCRIPT_SCORE = "native_constant_script_score";
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeConstantScoreScript();
+ }
+ }
+
+ private NativeConstantScoreScript() {
+ }
+
+ @Override
+ public Object run() {
+ return 2;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java
new file mode 100644
index 0000000..1f88e66
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+import org.elasticsearch.search.lookup.IndexFieldTerm;
+import org.elasticsearch.search.lookup.IndexField;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Map;
+
+public class NativeNaiveTFIDFScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_NAIVE_TFIDF_SCRIPT_SCORE = "native_naive_tfidf_script_score";
+ String field = null;
+ String[] terms = null;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeNaiveTFIDFScoreScript(params);
+ }
+ }
+
+ private NativeNaiveTFIDFScoreScript(Map<String, Object> params) {
+ params.entrySet();
+ terms = new String[params.size()];
+ field = params.keySet().iterator().next();
+ Object o = params.get(field);
+ ArrayList<String> arrayList = (ArrayList<String>) o;
+ terms = arrayList.toArray(new String[arrayList.size()]);
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ IndexField indexField = indexLookup().get(field);
+ for (int i = 0; i < terms.length; i++) {
+ IndexFieldTerm indexFieldTerm = indexField.get(terms[i]);
+ try {
+ if (indexFieldTerm.tf() != 0) {
+ score += indexFieldTerm.tf() * indexField.docCount() / indexFieldTerm.df();
+ }
+ } catch (IOException e) {
+ throw new RuntimeException();
+ }
+ }
+ return score;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java
new file mode 100644
index 0000000..825b31e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.search.lookup.IndexFieldTerm;
+import org.elasticsearch.search.lookup.IndexField;
+import org.elasticsearch.search.lookup.IndexLookup;
+import org.elasticsearch.search.lookup.TermPosition;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.ArrayList;
+import java.util.Map;
+
+public class NativePayloadSumNoRecordScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE = "native_payload_sum_no_record_script_score";
+ String field = null;
+ String[] terms = null;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativePayloadSumNoRecordScoreScript(params);
+ }
+ }
+
+ private NativePayloadSumNoRecordScoreScript(Map<String, Object> params) {
+ params.entrySet();
+ terms = new String[params.size()];
+ field = params.keySet().iterator().next();
+ Object o = params.get(field);
+ ArrayList<String> arrayList = (ArrayList<String>) o;
+ terms = arrayList.toArray(new String[arrayList.size()]);
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ IndexField indexField = indexLookup().get(field);
+ for (int i = 0; i < terms.length; i++) {
+ IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS);
+ for (TermPosition pos : indexFieldTerm) {
+ score += pos.payloadAsFloat(0);
+ }
+ }
+ return score;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java
new file mode 100644
index 0000000..0172c56
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.search.lookup.IndexFieldTerm;
+import org.elasticsearch.search.lookup.IndexField;
+import org.elasticsearch.search.lookup.IndexLookup;
+import org.elasticsearch.search.lookup.TermPosition;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.ArrayList;
+import java.util.Map;
+
+public class NativePayloadSumScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_PAYLOAD_SUM_SCRIPT_SCORE = "native_payload_sum_script_score";
+ String field = null;
+ String[] terms = null;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativePayloadSumScoreScript(params);
+ }
+ }
+
+ private NativePayloadSumScoreScript(Map<String, Object> params) {
+ params.entrySet();
+ terms = new String[params.size()];
+ field = params.keySet().iterator().next();
+ Object o = params.get(field);
+ ArrayList<String> arrayList = (ArrayList<String>) o;
+ terms = arrayList.toArray(new String[arrayList.size()]);
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ IndexField indexField = indexLookup().get(field);
+ for (int i = 0; i < terms.length; i++) {
+ IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS | IndexLookup.FLAG_CACHE);
+ for (TermPosition pos : indexFieldTerm) {
+ score += pos.payloadAsFloat(0);
+ }
+ }
+ return score;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java b/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java
new file mode 100644
index 0000000..3f7811c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ */
+public class SuggestSearchBenchMark {
+
+ public static void main(String[] args) throws Exception {
+ int SEARCH_ITERS = 200;
+
+ Settings settings = settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ Client client = nodes[0].client();
+ try {
+ client.admin().indices().prepareCreate("test").setSettings(settings).addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("_all").field("enabled", false).endObject()
+ .startObject("_type").field("index", "no").endObject()
+ .startObject("_id").field("index", "no").endObject()
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", true).endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+ long COUNT = SizeValue.parseSizeValue("10m").singles();
+ int BATCH = 100;
+ System.out.println("Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ char character = 'a';
+ int idCounter = 0;
+ for (; i <= ITERS; i++) {
+ int termCounter = 0;
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(idCounter++)).source(source("prefix" + character + termCounter++)));
+ }
+ character++;
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("failures...");
+ }
+ }
+ System.out.println("Indexing took " + stopWatch.totalTime());
+
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+ }
+
+
+ System.out.println("Warming up...");
+ char startChar = 'a';
+ for (int i = 0; i <= 20; i++) {
+ String term = "prefix" + startChar;
+ SearchResponse response = client.prepareSearch()
+ .setQuery(prefixQuery("field", term))
+ .addSuggestion(SuggestBuilder.termSuggestion("field").field("field").text(term).suggestMode("always"))
+ .execute().actionGet();
+ if (response.getHits().totalHits() == 0) {
+ System.err.println("No hits");
+ continue;
+ }
+ startChar++;
+ }
+
+
+ System.out.println("Starting benchmarking suggestions.");
+ startChar = 'a';
+ long timeTaken = 0;
+ for (int i = 0; i <= SEARCH_ITERS; i++) {
+ String term = "prefix" + startChar;
+ SearchResponse response = client.prepareSearch()
+ .setQuery(matchQuery("field", term))
+ .addSuggestion(SuggestBuilder.termSuggestion("field").text(term).field("field").suggestMode("always"))
+ .execute().actionGet();
+ timeTaken += response.getTookInMillis();
+ if (response.getSuggest() == null) {
+ System.err.println("No suggestions");
+ continue;
+ }
+ List<? extends Option> options = response.getSuggest().getSuggestion("field").getEntries().get(0).getOptions();
+ if (options == null || options.isEmpty()) {
+ System.err.println("No suggestions");
+ }
+ startChar++;
+ }
+
+ System.out.println("Avg time taken without filter " + (timeTaken / SEARCH_ITERS));
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String nameValue) throws IOException {
+ return jsonBuilder().startObject()
+ .field("field", nameValue)
+ .endObject();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java
new file mode 100644
index 0000000..ae01a9e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java
@@ -0,0 +1,311 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.util.Date;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.search.facet.FacetBuilders.dateHistogramFacet;
+import static org.elasticsearch.search.facet.FacetBuilders.histogramFacet;
+
+/**
+ *
+ */
+public class HistogramAggregationSearchBenchmark {
+
+ static final long COUNT = SizeValue.parseSizeValue("20m").singles();
+ static final int BATCH = 1000;
+ static final int QUERY_WARMUP = 5;
+ static final int QUERY_COUNT = 20;
+ static final int NUMBER_OF_TERMS = 1000;
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = HistogramAggregationSearchBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1")).node();
+
+ //Node clientNode = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ Client client = node1.client();
+
+ long[] lValues = new long[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ lValues[i] = i;
+ }
+
+ Random r = new Random();
+ try {
+ client.admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put(settings))
+ .addMapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("l_value")
+ .field("type", "long")
+ .endObject()
+ .startObject("i_value")
+ .field("type", "integer")
+ .endObject()
+ .startObject("s_value")
+ .field("type", "short")
+ .endObject()
+ .startObject("b_value")
+ .field("type", "byte")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long iters = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= iters; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ final long value = lValues[r.nextInt(lValues.length)];
+ XContentBuilder source = jsonBuilder().startObject()
+ .field("id", Integer.valueOf(counter))
+ .field("l_value", value)
+ .field("i_value", (int) value)
+ .field("s_value", (short) value)
+ .field("b_value", (byte) value)
+ .field("date", new Date())
+ .endObject();
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(source));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ client.admin().indices().prepareFlush("test").execute().actionGet();
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ if (client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() != COUNT) {
+ throw new Error();
+ }
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+ System.out.println("--> Warmup...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("l_value").field("l_value").interval(4))
+ .addFacet(histogramFacet("i_value").field("i_value").interval(4))
+ .addFacet(histogramFacet("s_value").field("s_value").interval(4))
+ .addFacet(histogramFacet("b_value").field("b_value").interval(4))
+ .addFacet(histogramFacet("date").field("date").interval(1000))
+ .addAggregation(histogram("l_value").field("l_value").interval(4))
+ .addAggregation(histogram("i_value").field("i_value").interval(4))
+ .addAggregation(histogram("s_value").field("s_value").interval(4))
+ .addAggregation(histogram("b_value").field("b_value").interval(4))
+ .addAggregation(histogram("date").field("date").interval(1000))
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Warmup took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup DONE");
+
+ long totalQueryTime = 0;
+ for (String field : new String[] {"b_value", "s_value", "i_value", "l_value"}) {
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet(field).field(field).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Facet (" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram(field).field(field).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet(field).field(field).valueField(field).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Facet (" + field + "/" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram(field).field(field).subAggregation(stats(field).field(field)).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (" + field + "/" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("date").field("date").interval(1000))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Facet (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date").field("date").interval(1000))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("date").field("date").valueField("l_value").interval(1000))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Facet (date/l_value) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date").field("date").interval(1000).subAggregation(stats("stats").field("l_value")))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (date/l_value) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(dateHistogramFacet("date").field("date").interval("day").mode(FacetBuilder.Mode.COLLECTOR))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Date Histogram Facet (mode/collector) (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(dateHistogramFacet("date").field("date").interval("day").mode(FacetBuilder.Mode.POST))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Date Histogram Facet (mode/post) (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ node1.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java
new file mode 100644
index 0000000..40345fe
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.facet.FacetBuilder;
+import org.elasticsearch.search.facet.FacetBuilders;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+public class QueryFilterAggregationSearchBenchmark {
+
+ static final long COUNT = SizeValue.parseSizeValue("5m").singles();
+ static final int BATCH = 1000;
+ static final int QUERY_COUNT = 200;
+ static final int NUMBER_OF_TERMS = 200;
+
+ static Client client;
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = QueryFilterAggregationSearchBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ client = node1.client();
+
+ long[] lValues = new long[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ lValues[i] = ThreadLocalRandom.current().nextLong();
+ }
+
+ Thread.sleep(10000);
+ try {
+ client.admin().indices().create(createIndexRequest("test")).actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+
+ XContentBuilder builder = jsonBuilder().startObject();
+ builder.field("id", Integer.toString(counter));
+ builder.field("l_value", lValues[ThreadLocalRandom.current().nextInt(NUMBER_OF_TERMS)]);
+
+ builder.endObject();
+
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(builder));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 100000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ if (client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() != COUNT) {
+ throw new Error();
+ }
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+ final long anyValue = ((Number) client.prepareSearch().execute().actionGet().getHits().hits()[0].sourceAsMap().get("l_value")).longValue();
+
+ long totalQueryTime = 0;
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Simple Query on first l_value " + totalQueryTime + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .addFacet(FacetBuilders.queryFacet("query").query(termQuery("l_value", anyValue)))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Query facet first l_value " + totalQueryTime + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .addAggregation(AggregationBuilders.filter("filter").filter(FilterBuilders.termFilter("l_value", anyValue)))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Filter agg first l_value " + totalQueryTime + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .addFacet(FacetBuilders.queryFacet("query").query(termQuery("l_value", anyValue)).global(true).mode(FacetBuilder.Mode.COLLECTOR))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Query facet first l_value (global) (mode/collector) " + totalQueryTime + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .addFacet(FacetBuilders.queryFacet("query").query(termQuery("l_value", anyValue)).global(true).mode(FacetBuilder.Mode.POST))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Query facet first l_value (global) (mode/post) " + totalQueryTime + "ms");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java
new file mode 100644
index 0000000..c12f24f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java
@@ -0,0 +1,367 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.aggregations;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.collect.Lists;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+
+import java.util.List;
+import java.util.Random;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.elasticsearch.search.facet.FacetBuilders.termsStatsFacet;
+
+/**
+ *
+ */
+public class TermsAggregationSearchBenchmark {
+
+ static long COUNT = SizeValue.parseSizeValue("2m").singles();
+ static int BATCH = 1000;
+ static int QUERY_WARMUP = 10;
+ static int QUERY_COUNT = 100;
+ static int NUMBER_OF_TERMS = 200;
+ static int NUMBER_OF_MULTI_VALUE_TERMS = 10;
+ static int STRING_TERM_SIZE = 5;
+
+ static Client client;
+
+ private enum Method {
+ FACET {
+ @Override
+ SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint) {
+ return builder.addFacet(termsFacet(name).field(field).executionHint(executionHint));
+ }
+
+ @Override
+ SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField) {
+ return builder.addFacet(termsStatsFacet(name).keyField(keyField).valueField(valueField));
+ }
+ },
+ AGGREGATION {
+ @Override
+ SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint) {
+ return builder.addAggregation(AggregationBuilders.terms(name).executionHint(executionHint).field(field));
+ }
+
+ @Override
+ SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField) {
+ return builder.addAggregation(AggregationBuilders.terms(name).field(keyField).subAggregation(AggregationBuilders.stats("stats").field(valueField)));
+ }
+ };
+ abstract SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint);
+ abstract SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField);
+ }
+
+ public static void main(String[] args) throws Exception {
+ Random random = new Random();
+
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = TermsAggregationSearchBenchmark.class.getSimpleName();
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node" + i))
+ .node();
+ }
+
+ Node clientNode = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ client = clientNode.client();
+
+ long[] lValues = new long[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ lValues[i] = ThreadLocalRandom.current().nextLong();
+ }
+ String[] sValues = new String[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ sValues[i] = RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE);
+ }
+
+ Thread.sleep(10000);
+ try {
+ client.admin().indices().create(createIndexRequest("test").mapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("s_value_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("sm_value_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("l_value_dv")
+ .field("type", "long")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("lm_value_dv")
+ .field("type", "long")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())).actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+
+ XContentBuilder builder = jsonBuilder().startObject();
+ builder.field("id", Integer.toString(counter));
+ final String sValue = sValues[counter % sValues.length];
+ final long lValue = lValues[counter % lValues.length];
+ builder.field("s_value", sValue);
+ builder.field("l_value", lValue);
+ builder.field("s_value_dv", sValue);
+ builder.field("l_value_dv", lValue);
+
+ for (String field : new String[] {"sm_value", "sm_value_dv"}) {
+ builder.startArray(field);
+ for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
+ builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
+ }
+ builder.endArray();
+ }
+
+ for (String field : new String[] {"lm_value", "lm_value_dv"}) {
+ builder.startArray(field);
+ for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
+ builder.value(lValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
+ }
+ builder.endArray();
+ }
+
+ builder.endObject();
+
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(builder));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ COUNT = client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount();
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+
+ List<StatsResult> stats = Lists.newArrayList();
+ stats.add(terms("terms_facet_s", Method.FACET, "s_value", null));
+ stats.add(terms("terms_facet_s_dv", Method.FACET, "s_value_dv", null));
+ stats.add(terms("terms_facet_map_s", Method.FACET, "s_value", "map"));
+ stats.add(terms("terms_facet_map_s_dv", Method.FACET, "s_value_dv", "map"));
+ stats.add(terms("terms_agg_s", Method.AGGREGATION, "s_value", null));
+ stats.add(terms("terms_agg_s_dv", Method.AGGREGATION, "s_value_dv", null));
+ stats.add(terms("terms_agg_map_s", Method.AGGREGATION, "s_value", "map"));
+ stats.add(terms("terms_agg_map_s_dv", Method.AGGREGATION, "s_value_dv", "map"));
+ stats.add(terms("terms_facet_l", Method.FACET, "l_value", null));
+ stats.add(terms("terms_facet_l_dv", Method.FACET, "l_value_dv", null));
+ stats.add(terms("terms_agg_l", Method.AGGREGATION, "l_value", null));
+ stats.add(terms("terms_agg_l_dv", Method.AGGREGATION, "l_value_dv", null));
+ stats.add(terms("terms_facet_sm", Method.FACET, "sm_value", null));
+ stats.add(terms("terms_facet_sm_dv", Method.FACET, "sm_value_dv", null));
+ stats.add(terms("terms_facet_map_sm", Method.FACET, "sm_value", "map"));
+ stats.add(terms("terms_facet_map_sm_dv", Method.FACET, "sm_value_dv", "map"));
+ stats.add(terms("terms_agg_sm", Method.AGGREGATION, "sm_value", null));
+ stats.add(terms("terms_agg_sm_dv", Method.AGGREGATION, "sm_value_dv", null));
+ stats.add(terms("terms_agg_map_sm", Method.AGGREGATION, "sm_value", "map"));
+ stats.add(terms("terms_agg_map_sm_dv", Method.AGGREGATION, "sm_value_dv", "map"));
+ stats.add(terms("terms_facet_lm", Method.FACET, "lm_value", null));
+ stats.add(terms("terms_facet_lm_dv", Method.FACET, "lm_value_dv", null));
+ stats.add(terms("terms_agg_lm", Method.AGGREGATION, "lm_value", null));
+ stats.add(terms("terms_agg_lm_dv", Method.AGGREGATION, "lm_value_dv", null));
+
+ stats.add(termsStats("terms_stats_facet_s_l", Method.FACET, "s_value", "l_value", null));
+ stats.add(termsStats("terms_stats_facet_s_l_dv", Method.FACET, "s_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_s_l", Method.AGGREGATION, "s_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_s_l_dv", Method.AGGREGATION, "s_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_facet_s_lm", Method.FACET, "s_value", "lm_value", null));
+ stats.add(termsStats("terms_stats_facet_s_lm_dv", Method.FACET, "s_value_dv", "lm_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_s_lm", Method.AGGREGATION, "s_value", "lm_value", null));
+ stats.add(termsStats("terms_stats_agg_s_lm_dv", Method.AGGREGATION, "s_value_dv", "lm_value_dv", null));
+ stats.add(termsStats("terms_stats_facet_sm_l", Method.FACET, "sm_value", "l_value", null));
+ stats.add(termsStats("terms_stats_facet_sm_l_dv", Method.FACET, "sm_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_sm_l", Method.AGGREGATION, "sm_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_sm_l_dv", Method.AGGREGATION, "sm_value_dv", "l_value_dv", null));
+
+ System.out.println("------------------ SUMMARY -------------------------------");
+ System.out.format("%25s%10s%10s\n", "name", "took", "millis");
+ for (StatsResult stat : stats) {
+ System.out.format("%25s%10s%10d\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT));
+ }
+ System.out.println("------------------ SUMMARY -------------------------------");
+
+ clientNode.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ static class StatsResult {
+ final String name;
+ final long took;
+
+ StatsResult(String name, long took) {
+ this.name = name;
+ this.took = took;
+ }
+ }
+
+ private static StatsResult terms(String name, Method method, String field, String executionHint) {
+ long totalQueryTime;// LM VALUE
+
+ client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+
+ System.out.println("--> Warmup (" + name + ")...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = method.addTermsAgg(client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery()), name, field, executionHint)
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Loading (" + field + "): took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup (" + name + ") DONE");
+
+
+ System.out.println("--> Running (" + name + ")...");
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = method.addTermsAgg(client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery()), name, field, executionHint)
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Terms Agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
+ return new StatsResult(name, totalQueryTime);
+ }
+
+ private static StatsResult termsStats(String name, Method method, String keyField, String valueField, String executionHint) {
+ long totalQueryTime;
+
+ client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+
+ System.out.println("--> Warmup (" + name + ")...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = method.addTermsStatsAgg(client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery()), name, keyField, valueField)
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Loading (" + name + "): took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup (" + name + ") DONE");
+
+
+ System.out.println("--> Running (" + name + ")...");
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = method.addTermsStatsAgg(client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery()), name, keyField, valueField)
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Terms stats agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
+ return new StatsResult(name, totalQueryTime);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java
new file mode 100644
index 0000000..ed75ddb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.child;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Random;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.hasChildFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ChildSearchAndIndexingBenchmark {
+
+ static long COUNT = SizeValue.parseSizeValue("1m").singles();
+ static int CHILD_COUNT = 5;
+ static int BATCH = 100;
+ static int QUERY_COUNT = 50;
+ static String indexName = "test";
+ static Random random = new Random();
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = ChildSearchAndIndexingBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node1"))
+ .clusterName(clusterName)
+ .node();
+ Client client = node1.client();
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().create(createIndexRequest(indexName)).actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] parent document and [" + (COUNT * CHILD_COUNT) + " child documents");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter))
+ .source(parentSource(Integer.toString(counter), "test" + counter)));
+ for (int k = 0; k < CHILD_COUNT; k++) {
+ request.add(Requests.indexRequest(indexName).type("child").id(Integer.toString(counter) + "_" + k)
+ .parent(Integer.toString(counter))
+ .source(childSource(Integer.toString(counter), "tag" + k)));
+ }
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) * (1 + CHILD_COUNT) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT * (1 + CHILD_COUNT))) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ SearchThread searchThread = new SearchThread(client);
+ new Thread(searchThread).start();
+ IndexThread indexThread = new IndexThread(client);
+ new Thread(indexThread).start();
+
+ System.in.read();
+
+ indexThread.stop();
+ searchThread.stop();
+ client.close();
+ node1.close();
+ }
+
+ private static XContentBuilder parentSource(String id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().field("id", id).field("name", nameValue).endObject();
+ }
+
+ private static XContentBuilder childSource(String id, String tag) throws IOException {
+ return jsonBuilder().startObject().field("id", id).field("tag", tag).endObject();
+ }
+
+ static class IndexThread implements Runnable {
+
+ private final Client client;
+ private volatile boolean run = true;
+
+ IndexThread(Client client) {
+ this.client = client;
+ }
+
+ @Override
+ public void run() {
+ while (run) {
+ for (int i = 1; run && i < COUNT; i++) {
+ try {
+ client.prepareIndex(indexName, "parent", Integer.toString(i))
+ .setSource(parentSource(Integer.toString(i), "test" + i)).execute().actionGet();
+ for (int j = 0; j < CHILD_COUNT; j++) {
+ client.prepareIndex(indexName, "child", Integer.toString(i) + "_" + j)
+ .setParent(Integer.toString(i))
+ .setSource(childSource(Integer.toString(j), "tag" + j)).execute().actionGet();
+ }
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ Thread.sleep(100);
+ if (i % 500 == 0) {
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .clear().setIndices(true).execute().actionGet();
+ System.out.println("Deleted docs: " + statsResponse.getAt(0).getIndices().getDocs().getDeleted());
+ }
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ public void stop() {
+ run = false;
+ }
+
+ }
+
+ static class SearchThread implements Runnable {
+
+ private final Client client;
+ private volatile boolean run = true;
+
+ SearchThread(Client client) {
+ this.client = client;
+ }
+
+ @Override
+ public void run() {
+ while (run) {
+ try {
+ long totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", termQuery("tag", "tag" + random.nextInt(CHILD_COUNT)))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+// System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + COUNT + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with term filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", matchAllQuery())
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ long expected = (COUNT / BATCH) * BATCH;
+ if (searchResponse.getHits().totalHits() != expected) {
+// System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + expected + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with match_all child query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+ Thread.sleep(1000);
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ public void stop() {
+ run = false;
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java
new file mode 100644
index 0000000..b7432bf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java
@@ -0,0 +1,434 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.child;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ChildSearchBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = ChildSearchBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+
+ long COUNT = SizeValue.parseSizeValue("10m").singles();
+ int CHILD_COUNT = 5;
+ int BATCH = 100;
+ int QUERY_WARMUP = 20;
+ int QUERY_COUNT = 50;
+ String indexName = "test";
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().create(createIndexRequest(indexName)).actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] parent document and [" + (COUNT * CHILD_COUNT) + " child documents");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter))
+ .source(parentSource(counter, "test" + counter)));
+ for (int k = 0; k < CHILD_COUNT; k++) {
+ request.add(Requests.indexRequest(indexName).type("child").id(Integer.toString(counter) + "_" + k)
+ .parent(Integer.toString(counter))
+ .source(childSource(counter, "tag" + k)));
+ }
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) * (1 + CHILD_COUNT) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT * (1 + CHILD_COUNT))) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount(indexName).setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ System.out.println("--> Running just child query");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Warmup took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+
+ long totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Just Child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ // run parent child constant query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", termQuery("tag", "tag1"))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + COUNT + "]");
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", termQuery("tag", "tag1"))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + COUNT + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running has_child filter with match_all child query");
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", matchAllQuery())
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ long expected = (COUNT / BATCH) * BATCH;
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + expected + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with match_all child query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(
+ filteredQuery(matchAllQuery(), hasChildFilter("child", termQuery("id", Integer.toString(j + 1))))
+ ).execute().actionGet();
+ long expected = 1;
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with single parent match Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ double expected = Math.pow((j + 1), 3) * CHILD_COUNT;
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(filteredQuery(matchAllQuery(), hasChildFilter("child", constantScoreQuery(rangeFilter("num").lte(expected)))))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits: " + searchResponse.getHits().totalHits() + " != " + expected);
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with exponential parent results Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ // run parent child constant query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasParentFilter("parent", termQuery("name", "test1"))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != CHILD_COUNT) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + CHILD_COUNT + "]");
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasParentFilter("parent", termQuery("name", "test1"))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != CHILD_COUNT) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + CHILD_COUNT + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running has_parent filter with match_all parent query ");
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(filteredQuery(
+ matchAllQuery(),
+ hasParentFilter("parent", matchAllQuery())
+ ))
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != 5000000) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + 5000000 + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent filter with match_all parent query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+ System.out.println("--> Running top_children query");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", termQuery("tag", "tag1"))).execute().actionGet();
+ // we expect to have mismatch on hits here
+// if (searchResponse.hits().totalHits() != COUNT) {
+// System.err.println("mismatch on hits");
+// }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", termQuery("tag", "tag1"))).execute().actionGet();
+ // we expect to have mismatch on hits here
+// if (searchResponse.hits().totalHits() != COUNT) {
+// System.err.println("mismatch on hits");
+// }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> top_children Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running top_children query, with match_all as child query");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", matchAllQuery())).execute().actionGet();
+ // we expect to have mismatch on hits here
+// if (searchResponse.hits().totalHits() != COUNT) {
+// System.err.println("mismatch on hits");
+// }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", matchAllQuery())).execute().actionGet();
+ // we expect to have mismatch on hits here
+// if (searchResponse.hits().totalHits() != COUNT) {
+// System.err.println("mismatch on hits");
+// }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> top_children, with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ System.out.println("--> Running has_child query with score type");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("tag", "tag1")).scoreType("max")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("mismatch on hits");
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("tag", "tag1")).scoreType("max")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", matchAllQuery()).scoreType("max")).execute().actionGet();
+ long expected = (COUNT / BATCH) * BATCH;
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("id", Integer.toString(j + 1))).scoreType("max")).execute().actionGet();
+ long expected = 1;
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query with single parent match Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ double expected = Math.pow((j + 1), 3) * CHILD_COUNT;
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", constantScoreQuery(rangeFilter("num").lte(expected))).scoreType("max")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits: " + searchResponse.getHits().totalHits() + " != " + expected);
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query with exponential parent results Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ /*System.out.println("--> Running has_parent query with score type");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("name", "test1")).scoreType("score")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != CHILD_COUNT) {
+ System.err.println("mismatch on hits");
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("name", "test1")).scoreType("score")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != CHILD_COUNT) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", matchAllQuery()).scoreType("score")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != 5000000) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");*/
+
+ System.gc();
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ client.close();
+ node1.close();
+ }
+
+ private static XContentBuilder parentSource(int id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().field("id", Integer.toString(id)).field("num", id).field("name", nameValue).endObject();
+ }
+
+ private static XContentBuilder childSource(int id, String tag) throws IOException {
+ return jsonBuilder().startObject().field("id", Integer.toString(id)).field("num", id).field("tag", tag).endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java
new file mode 100644
index 0000000..398c9a9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.child;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.hasChildFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ChildSearchShortCircuitBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = ChildSearchShortCircuitBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1"))
+ .node();
+ Client client = node1.client();
+
+ long PARENT_COUNT = SizeValue.parseSizeValue("10M").singles();
+ int BATCH = 100;
+ int QUERY_WARMUP = 5;
+ int QUERY_COUNT = 25;
+ String indexName = "test";
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().create(createIndexRequest(indexName)).actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + PARENT_COUNT + "] parent document and some child documents");
+ long ITERS = PARENT_COUNT / BATCH;
+ int i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter))
+ .source(parentSource(counter)));
+
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + "parent docs; took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+
+ int id = 0;
+ for (i = 1; i <= PARENT_COUNT; i *= 2) {
+ int parentId = 1;
+ for (int j = 0; j < i; j++) {
+ client.prepareIndex(indexName, "child", Integer.toString(id++))
+ .setParent(Integer.toString(parentId++))
+ .setSource(childSource(i))
+ .execute().actionGet();
+ }
+ }
+
+ System.out.println("--> Indexing took " + stopWatch.totalTime());
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount(indexName).setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ System.out.println("--> Running just child query");
+ // run just the child query, warm up first
+ for (int i = 1; i <= 10000; i *= 2) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(matchQuery("child.field2", i)).execute().actionGet();
+ System.out.println("--> Warmup took["+ i +"]: " + searchResponse.getTook());
+ if (searchResponse.getHits().totalHits() != i) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ // run parent child constant query
+ for (int j = 1; j < QUERY_WARMUP; j *= 2) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ hasChildQuery("child", matchQuery("field2", j))
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != j) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + PARENT_COUNT + "]");
+ }
+ }
+
+ long totalQueryTime = 0;
+ for (int i = 1; i < PARENT_COUNT; i *= 2) {
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(filteredQuery(matchAllQuery(), hasChildFilter("child", matchQuery("field2", i))))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != i) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+ }
+
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ totalQueryTime = 0;
+ for (int i = 1; i < PARENT_COUNT; i *= 2) {
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(hasChildQuery("child", matchQuery("field2", i)).scoreType("max"))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != i) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+ }
+
+ System.gc();
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ client.close();
+ node1.close();
+ }
+
+ private static XContentBuilder parentSource(int val) throws IOException {
+ return jsonBuilder().startObject().field("field1", Integer.toString(val)).endObject();
+ }
+
+ private static XContentBuilder childSource(int val) throws IOException {
+ return jsonBuilder().startObject().field("field2", Integer.toString(val)).endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java
new file mode 100644
index 0000000..c39bf8f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.geo;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.geoDistanceFilter;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ */
+public class GeoDistanceSearchBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ Node node = NodeBuilder.nodeBuilder().clusterName(GeoDistanceSearchBenchmark.class.getSimpleName()).node();
+ Client client = node.client();
+
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("Failed to wait for green status, bailing");
+ System.exit(1);
+ }
+
+ final long NUM_DOCS = SizeValue.parseSizeValue("1m").singles();
+ final long NUM_WARM = 50;
+ final long NUM_RUNS = 100;
+
+ if (client.admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ System.out.println("Found an index, count: " + client.prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount());
+ } else {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client.admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", mapping)
+ .execute().actionGet();
+
+ System.err.println("--> Indexing [" + NUM_DOCS + "]");
+ for (long i = 0; i < NUM_DOCS; ) {
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 5.286 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 0.4621 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.258 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 8.572 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()).execute().actionGet();
+
+ if ((i % 10000) == 0) {
+ System.err.println("--> indexed " + i);
+ }
+ }
+ System.err.println("Done indexed");
+ client.admin().indices().prepareFlush("test").execute().actionGet();
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ }
+
+ System.err.println("--> Warming up (ARC) - optimize_bbox");
+ long start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.ARC, "memory");
+ }
+ long totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (ARC) - optimize_bbox (memory) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (ARC) - optimize_bbox (memory)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.ARC, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (ARC) - optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
+
+ System.err.println("--> Warming up (ARC) - optimize_bbox (indexed)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.ARC, "indexed");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (ARC) - optimize_bbox (indexed)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.ARC, "indexed");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_RUNS) + "ms");
+
+
+ System.err.println("--> Warming up (ARC) - no optimize_bbox");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.ARC, "none");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (ARC) - no optimize_bbox " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (ARC) - no optimize_bbox");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.ARC, "none");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (ARC) - no optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
+
+ System.err.println("--> Warming up (SLOPPY_ARC)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.SLOPPY_ARC, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (SLOPPY_ARC) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (SLOPPY_ARC)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.SLOPPY_ARC, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (SLOPPY_ARC) " + (totalTime / NUM_RUNS) + "ms");
+
+ System.err.println("--> Warming up (PLANE)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.PLANE, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (PLANE) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (PLANE)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.PLANE, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (PLANE) " + (totalTime / NUM_RUNS) + "ms");
+
+ node.close();
+ }
+
+ public static void run(Client client, GeoDistance geoDistance, String optimizeBbox) {
+ client.prepareSearch() // from NY
+ .setSearchType(SearchType.COUNT)
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location")
+ .distance("2km")
+ .optimizeBbox(optimizeBbox)
+ .geoDistance(geoDistance)
+ .point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java b/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java
new file mode 100644
index 0000000..d084261
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.nested;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ */
+public class NestedSearchBenchMark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node node1 = nodeBuilder()
+ .settings(settingsBuilder().put(settings).put("name", "node1"))
+ .node();
+ Client client = node1.client();
+
+ int count = (int) SizeValue.parseSizeValue("1m").singles();
+ int nestedCount = 10;
+ int rootDocs = count / nestedCount;
+ int batch = 100;
+ int queryWarmup = 5;
+ int queryCount = 500;
+ String indexName = "test";
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth()
+ .setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ try {
+ client.admin().indices().prepareCreate(indexName)
+ .addMapping("type", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "integer")
+ .endObject()
+ .startObject("field2")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("field3")
+ .field("type", "integer")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + rootDocs + "] root documents and [" + (rootDocs * nestedCount) + "] nested objects");
+ long ITERS = rootDocs / batch;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < batch; j++) {
+ counter++;
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject()
+ .field("field1", counter)
+ .startArray("field2");
+ for (int k = 0; k < nestedCount; k++) {
+ doc = doc.startObject()
+ .field("field3", k)
+ .endObject();
+ }
+ doc = doc.endArray();
+ request.add(
+ Requests.indexRequest(indexName).type("type").id(Integer.toString(counter)).source(doc)
+ );
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * batch) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * batch) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (count * (1 + nestedCount))) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ System.out.println("--> Running match_all with sorting on nested field");
+ // run just the child query, warm up first
+ for (int j = 0; j < queryWarmup; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("field2.field3")
+ .setNestedPath("field2")
+ .sortMode("avg")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Warmup took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != rootDocs) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+
+ long totalQueryTime = 0;
+ for (int j = 0; j < queryCount; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("field2.field3")
+ .setNestedPath("field2")
+ .sortMode("avg")
+ .order(j % 2 == 0 ? SortOrder.ASC : SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ if (searchResponse.getHits().totalHits() != rootDocs) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Sorting by nested fields took: " + (totalQueryTime / queryCount) + "ms");
+
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java b/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java
new file mode 100644
index 0000000..fab0166
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.stress;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.node.Node;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.queryFilter;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+
+/**
+ *
+ */
+public class NodesStressTest {
+
+ private Node[] nodes;
+
+ private int numberOfNodes = 2;
+
+ private Client[] clients;
+
+ private AtomicLong idGenerator = new AtomicLong();
+
+ private int fieldNumLimit = 50;
+
+ private long searcherIterations = 10;
+ private Searcher[] searcherThreads = new Searcher[1];
+
+ private long indexIterations = 10;
+ private Indexer[] indexThreads = new Indexer[1];
+
+ private TimeValue sleepAfterDone = TimeValue.timeValueMillis(0);
+ private TimeValue sleepBeforeClose = TimeValue.timeValueMillis(0);
+
+ private CountDownLatch latch;
+ private CyclicBarrier barrier1;
+ private CyclicBarrier barrier2;
+
+ public NodesStressTest() {
+ }
+
+ public NodesStressTest numberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public NodesStressTest fieldNumLimit(int fieldNumLimit) {
+ this.fieldNumLimit = fieldNumLimit;
+ return this;
+ }
+
+ public NodesStressTest searchIterations(int searchIterations) {
+ this.searcherIterations = searchIterations;
+ return this;
+ }
+
+ public NodesStressTest searcherThreads(int numberOfSearcherThreads) {
+ searcherThreads = new Searcher[numberOfSearcherThreads];
+ return this;
+ }
+
+ public NodesStressTest indexIterations(long indexIterations) {
+ this.indexIterations = indexIterations;
+ return this;
+ }
+
+ public NodesStressTest indexThreads(int numberOfWriterThreads) {
+ indexThreads = new Indexer[numberOfWriterThreads];
+ return this;
+ }
+
+ public NodesStressTest sleepAfterDone(TimeValue time) {
+ this.sleepAfterDone = time;
+ return this;
+ }
+
+ public NodesStressTest sleepBeforeClose(TimeValue time) {
+ this.sleepBeforeClose = time;
+ return this;
+ }
+
+ public NodesStressTest build(Settings settings) throws Exception {
+ settings = settingsBuilder()
+// .put("index.refresh_interval", 1, TimeUnit.SECONDS)
+ .put(SETTING_NUMBER_OF_SHARDS, 5)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(settings)
+ .build();
+
+ nodes = new Node[numberOfNodes];
+ clients = new Client[numberOfNodes];
+ for (int i = 0; i < numberOfNodes; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ clients[i] = nodes[i].client();
+ }
+
+ for (int i = 0; i < searcherThreads.length; i++) {
+ searcherThreads[i] = new Searcher(i);
+ }
+ for (int i = 0; i < indexThreads.length; i++) {
+ indexThreads[i] = new Indexer(i);
+ }
+
+ latch = new CountDownLatch(1);
+ barrier1 = new CyclicBarrier(2);
+ barrier2 = new CyclicBarrier(2);
+ // warmup
+ StopWatch stopWatch = new StopWatch().start();
+ Indexer warmup = new Indexer(-1).max(10000);
+ warmup.start();
+ barrier1.await();
+ barrier2.await();
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Done Warmup, took [" + stopWatch.totalTime() + "]");
+
+ latch = new CountDownLatch(searcherThreads.length + indexThreads.length);
+ barrier1 = new CyclicBarrier(searcherThreads.length + indexThreads.length + 1);
+ barrier2 = new CyclicBarrier(searcherThreads.length + indexThreads.length + 1);
+
+ return this;
+ }
+
+ public void start() throws Exception {
+ for (Thread t : searcherThreads) {
+ t.start();
+ }
+ for (Thread t : indexThreads) {
+ t.start();
+ }
+ barrier1.await();
+
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+
+ barrier2.await();
+
+ latch.await();
+ stopWatch.stop();
+
+ System.out.println("Done, took [" + stopWatch.totalTime() + "]");
+ System.out.println("Sleeping before close: " + sleepBeforeClose);
+ Thread.sleep(sleepBeforeClose.millis());
+
+ for (Client client : clients) {
+ client.close();
+ }
+ for (Node node : nodes) {
+ node.close();
+ }
+
+ System.out.println("Sleeping before exit: " + sleepBeforeClose);
+ Thread.sleep(sleepAfterDone.millis());
+ }
+
+ class Searcher extends Thread {
+ final int id;
+ long counter = 0;
+ long max = searcherIterations;
+
+ Searcher(int id) {
+ super("Searcher" + id);
+ this.id = id;
+ }
+
+ @Override
+ public void run() {
+ try {
+ barrier1.await();
+ barrier2.await();
+ for (; counter < max; counter++) {
+ Client client = client(counter);
+ QueryBuilder query = termQuery("num", counter % fieldNumLimit);
+ query = constantScoreQuery(queryFilter(query));
+
+ SearchResponse search = client.search(searchRequest()
+ .source(searchSource().query(query)))
+ .actionGet();
+// System.out.println("Got search response, hits [" + search.hits().totalHits() + "]");
+ }
+ } catch (Exception e) {
+ System.err.println("Failed to search:");
+ e.printStackTrace();
+ } finally {
+ latch.countDown();
+ }
+ }
+ }
+
+ class Indexer extends Thread {
+
+ final int id;
+ long counter = 0;
+ long max = indexIterations;
+
+ Indexer(int id) {
+ super("Indexer" + id);
+ this.id = id;
+ }
+
+ Indexer max(int max) {
+ this.max = max;
+ return this;
+ }
+
+ @Override
+ public void run() {
+ try {
+ barrier1.await();
+ barrier2.await();
+ for (; counter < max; counter++) {
+ Client client = client(counter);
+ long id = idGenerator.incrementAndGet();
+ client.index(Requests.indexRequest().index("test").type("type1").id(Long.toString(id))
+ .source(XContentFactory.jsonBuilder().startObject()
+ .field("num", id % fieldNumLimit)
+ .endObject()))
+ .actionGet();
+ }
+ System.out.println("Indexer [" + id + "]: Done");
+ } catch (Exception e) {
+ System.err.println("Failed to index:");
+ e.printStackTrace();
+ } finally {
+ latch.countDown();
+ }
+ }
+ }
+
+ private Client client(long i) {
+ return clients[((int) (i % clients.length))];
+ }
+
+ public static void main(String[] args) throws Exception {
+ NodesStressTest test = new NodesStressTest()
+ .numberOfNodes(2)
+ .indexThreads(5)
+ .indexIterations(10 * 1000)
+ .searcherThreads(5)
+ .searchIterations(10 * 1000)
+ .sleepBeforeClose(TimeValue.timeValueMinutes(10))
+ .sleepAfterDone(TimeValue.timeValueMinutes(10))
+ .build(EMPTY_SETTINGS);
+
+ test.start();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java b/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java
new file mode 100644
index 0000000..e36f645
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.stress;
+
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class SingleThreadBulkStress {
+
+ public static void main(String[] args) throws Exception {
+ Random random = new Random();
+
+ int shardsCount = Integer.parseInt(System.getProperty("es.shards", "1"));
+ int replicaCount = Integer.parseInt(System.getProperty("es.replica", "1"));
+
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "1s")
+ .put("index.merge.async", true)
+ .put("index.translog.flush_threshold_ops", 5000)
+ .put("gateway.type", "none")
+ .put(SETTING_NUMBER_OF_SHARDS, shardsCount)
+ .put(SETTING_NUMBER_OF_REPLICAS, replicaCount)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ //Node client = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+ Node client = nodes[0];
+
+ Client client1 = client.client();
+
+ Thread.sleep(1000);
+ client1.admin().indices().prepareCreate("test").setSettings(settings).addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("_all").field("enabled", false).endObject()
+ .startObject("_type").field("index", "no").endObject()
+ .startObject("_id").field("index", "no").endObject()
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", true).endObject()
+// .startObject("field").field("index", "analyzed").field("omit_norms", false).endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+ long COUNT = SizeValue.parseSizeValue("2m").singles();
+ int BATCH = 500;
+ System.out.println("Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client1.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter)).source(source(Integer.toString(counter), "test" + counter)));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
+
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ client.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().field("field", nameValue).endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java b/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java
new file mode 100644
index 0000000..8250f88
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.stress;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class SingleThreadIndexingStress {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "1s")
+ .put("index.merge.async", true)
+ .put("index.translog.flush_threshold_ops", 5000)
+ .put("gateway.type", "none")
+ .put(SETTING_NUMBER_OF_SHARDS, 2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ Node client = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ Client client1 = client.client();
+
+ Thread.sleep(1000);
+ client1.admin().indices().create(createIndexRequest("test")).actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+ int COUNT = 200000;
+ int ID_RANGE = 100;
+ System.out.println("Indexing [" + COUNT + "] ...");
+ int i = 1;
+ for (; i <= COUNT; i++) {
+// client1.admin().cluster().preparePingSingle("test", "type1", Integer.toString(i)).execute().actionGet();
+ client1.prepareIndex("test", "type1").setId(Integer.toString(i % ID_RANGE)).setSource(source(Integer.toString(i), "test" + i))
+ .setCreate(false).execute().actionGet();
+ if ((i % 10000) == 0) {
+ System.out.println("Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
+
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ client.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String id, String nameValue) throws IOException {
+ long time = System.currentTimeMillis();
+ return jsonBuilder().startObject()
+ .field("id", id)
+// .field("numeric1", time)
+// .field("numeric2", time)
+// .field("numeric3", time)
+// .field("numeric4", time)
+// .field("numeric5", time)
+// .field("numeric6", time)
+// .field("numeric7", time)
+// .field("numeric8", time)
+// .field("numeric9", time)
+// .field("numeric10", time)
+ .field("name", nameValue)
+ .endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java b/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java
new file mode 100644
index 0000000..37b20bc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.time;
+
+import org.elasticsearch.common.StopWatch;
+
+import java.util.concurrent.CountDownLatch;
+
+/**
+ *
+ */
+public class SimpleTimeBenchmark {
+
+ private static boolean USE_NANO_TIME = false;
+ private static long NUMBER_OF_ITERATIONS = 1000000;
+ private static int NUMBER_OF_THREADS = 100;
+
+ public static void main(String[] args) throws Exception {
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Running " + NUMBER_OF_ITERATIONS);
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ System.currentTimeMillis();
+ }
+ System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
+
+ System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ if (USE_NANO_TIME) {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ System.nanoTime();
+ }
+ } else {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ System.currentTimeMillis();
+ }
+ }
+ latch.countDown();
+ }
+ });
+ }
+ stopWatch = new StopWatch().start();
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java
new file mode 100644
index 0000000..2978c5c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BenchmarkMessageRequest extends TransportRequest {
+
+ long id;
+ byte[] payload;
+
+ public BenchmarkMessageRequest(long id, byte[] payload) {
+ this.id = id;
+ this.payload = payload;
+ }
+
+ public BenchmarkMessageRequest() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ payload = new byte[in.readVInt()];
+ in.readFully(payload);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ out.writeVInt(payload.length);
+ out.writeBytes(payload);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java
new file mode 100644
index 0000000..7a7e3d9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BenchmarkMessageResponse extends TransportResponse {
+
+ long id;
+ byte[] payload;
+
+ public BenchmarkMessageResponse(BenchmarkMessageRequest request) {
+ this.id = request.id;
+ this.payload = request.payload;
+ }
+
+ public BenchmarkMessageResponse(long id, byte[] payload) {
+ this.id = id;
+ this.payload = payload;
+ }
+
+ public BenchmarkMessageResponse() {
+ }
+
+ public long id() {
+ return id;
+ }
+
+ public byte[] payload() {
+ return payload;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ payload = new byte[in.readVInt()];
+ in.readFully(payload);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ out.writeVInt(payload.length);
+ out.writeBytes(payload);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java
new file mode 100644
index 0000000..84bf8d9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.netty.NettyTransport;
+
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.transport.TransportRequestOptions.options;
+
+/**
+ *
+ */
+public class BenchmarkNettyLargeMessages {
+
+ public static void main(String[] args) throws InterruptedException {
+ final ByteSizeValue payloadSize = new ByteSizeValue(10, ByteSizeUnit.MB);
+ final int NUMBER_OF_ITERATIONS = 100000;
+ final int NUMBER_OF_CLIENTS = 5;
+ final byte[] payload = new byte[(int) payloadSize.bytes()];
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .build();
+
+ NetworkService networkService = new NetworkService(settings);
+
+ final ThreadPool threadPool = new ThreadPool();
+ final TransportService transportServiceServer = new TransportService(new NettyTransport(settings, threadPool, networkService, Version.CURRENT), threadPool).start();
+ final TransportService transportServiceClient = new TransportService(new NettyTransport(settings, threadPool, networkService, Version.CURRENT), threadPool).start();
+
+ final DiscoveryNode bigNode = new DiscoveryNode("big", new InetSocketTransportAddress("localhost", 9300), Version.CURRENT);
+// final DiscoveryNode smallNode = new DiscoveryNode("small", new InetSocketTransportAddress("localhost", 9300));
+ final DiscoveryNode smallNode = bigNode;
+
+ transportServiceClient.connectToNode(bigNode);
+ transportServiceClient.connectToNode(smallNode);
+
+ transportServiceServer.registerHandler("benchmark", new BaseTransportRequestHandler<BenchmarkMessageRequest>() {
+ @Override
+ public BenchmarkMessageRequest newInstance() {
+ return new BenchmarkMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception {
+ channel.sendResponse(new BenchmarkMessageResponse(request));
+ }
+ });
+
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS);
+ for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
+ transportServiceClient.submitRequest(bigNode, "benchmark", message, options().withType(TransportRequestOptions.Type.BULK), new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ }
+ }).txGet();
+ }
+ latch.countDown();
+ }
+ }).start();
+ }
+
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int i = 0; i < 1; i++) {
+ BenchmarkMessageRequest message = new BenchmarkMessageRequest(2, BytesRef.EMPTY_BYTES);
+ long start = System.currentTimeMillis();
+ transportServiceClient.submitRequest(smallNode, "benchmark", message, options().withType(TransportRequestOptions.Type.STATE), new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ }
+ }).txGet();
+ long took = System.currentTimeMillis() - start;
+ System.out.println("Took " + took + "ms");
+ }
+ }
+ }).start();
+
+ latch.await();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java b/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java
new file mode 100644
index 0000000..3c206ef
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.local.LocalTransport;
+import org.elasticsearch.transport.netty.NettyTransport;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ *
+ */
+public class TransportBenchmark {
+
+ static enum Type {
+ LOCAL {
+ @Override
+ public Transport newTransport(Settings settings, ThreadPool threadPool) {
+ return new LocalTransport(settings, threadPool, Version.CURRENT);
+ }
+ },
+ NETTY {
+ @Override
+ public Transport newTransport(Settings settings, ThreadPool threadPool) {
+ return new NettyTransport(settings, threadPool, new NetworkService(ImmutableSettings.EMPTY), Version.CURRENT);
+ }
+ };
+
+ public abstract Transport newTransport(Settings settings, ThreadPool threadPool);
+ }
+
+ public static void main(String[] args) {
+ final String executor = ThreadPool.Names.GENERIC;
+ final boolean waitForRequest = true;
+ final ByteSizeValue payloadSize = new ByteSizeValue(100, ByteSizeUnit.BYTES);
+ final int NUMBER_OF_CLIENTS = 10;
+ final int NUMBER_OF_ITERATIONS = 100000;
+ final byte[] payload = new byte[(int) payloadSize.bytes()];
+ final AtomicLong idGenerator = new AtomicLong();
+ final Type type = Type.NETTY;
+
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .build();
+
+ final ThreadPool serverThreadPool = new ThreadPool();
+ final TransportService serverTransportService = new TransportService(type.newTransport(settings, serverThreadPool), serverThreadPool).start();
+
+ final ThreadPool clientThreadPool = new ThreadPool();
+ final TransportService clientTransportService = new TransportService(type.newTransport(settings, clientThreadPool), clientThreadPool).start();
+
+ final DiscoveryNode node = new DiscoveryNode("server", serverTransportService.boundAddress().publishAddress(), Version.CURRENT);
+
+ serverTransportService.registerHandler("benchmark", new BaseTransportRequestHandler<BenchmarkMessageRequest>() {
+ @Override
+ public BenchmarkMessageRequest newInstance() {
+ return new BenchmarkMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+
+ @Override
+ public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception {
+ channel.sendResponse(new BenchmarkMessageResponse(request));
+ }
+ });
+
+ clientTransportService.connectToNode(node);
+
+ for (int i = 0; i < 10000; i++) {
+ BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
+ clientTransportService.submitRequest(node, "benchmark", message, new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ }
+ }).txGet();
+ }
+
+
+ Thread[] clients = new Thread[NUMBER_OF_CLIENTS];
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS);
+ for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
+ clients[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < NUMBER_OF_ITERATIONS; j++) {
+ final long id = idGenerator.incrementAndGet();
+ BenchmarkMessageRequest request = new BenchmarkMessageRequest(id, payload);
+ BaseTransportResponseHandler<BenchmarkMessageResponse> handler = new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ if (response.id() != id) {
+ System.out.println("NO ID MATCH [" + response.id() + "] and [" + id + "]");
+ }
+ latch.countDown();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ latch.countDown();
+ }
+ };
+
+ if (waitForRequest) {
+ clientTransportService.submitRequest(node, "benchmark", request, handler).txGet();
+ } else {
+ clientTransportService.sendRequest(node, "benchmark", request, handler);
+ }
+ }
+ }
+ });
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+ for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
+ clients[i].start();
+ }
+
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ stopWatch.stop();
+
+ System.out.println("Ran [" + NUMBER_OF_CLIENTS + "], each with [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + stopWatch.totalTime() + "], TPS: " + (NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS) / stopWatch.totalTime().secondsFrac());
+
+ clientTransportService.close();
+ clientThreadPool.shutdownNow();
+
+ serverTransportService.close();
+ serverThreadPool.shutdownNow();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java b/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java
new file mode 100644
index 0000000..61686eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport.netty;
+
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.*;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+
+public class NettyEchoBenchmark {
+
+ public static void main(String[] args) {
+ final int payloadSize = 100;
+ int CYCLE_SIZE = 50000;
+ final long NUMBER_OF_ITERATIONS = 500000;
+
+ ChannelBuffer message = ChannelBuffers.buffer(100);
+ for (int i = 0; i < message.capacity(); i++) {
+ message.writeByte((byte) i);
+ }
+
+ // Configure the server.
+ ServerBootstrap serverBootstrap = new ServerBootstrap(
+ new NioServerSocketChannelFactory(
+ Executors.newCachedThreadPool(),
+ Executors.newCachedThreadPool()));
+
+ // Set up the pipeline factory.
+ serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(new EchoServerHandler());
+ }
+ });
+
+ // Bind and start to accept incoming connections.
+ serverBootstrap.bind(new InetSocketAddress(9000));
+
+ ClientBootstrap clientBootstrap = new ClientBootstrap(
+ new NioClientSocketChannelFactory(
+ Executors.newCachedThreadPool(),
+ Executors.newCachedThreadPool()));
+
+// ClientBootstrap clientBootstrap = new ClientBootstrap(
+// new OioClientSocketChannelFactory(Executors.newCachedThreadPool()));
+
+ // Set up the pipeline factory.
+ final EchoClientHandler clientHandler = new EchoClientHandler();
+ clientBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(clientHandler);
+ }
+ });
+
+ // Start the connection attempt.
+ ChannelFuture future = clientBootstrap.connect(new InetSocketAddress("localhost", 9000));
+ future.awaitUninterruptibly();
+ Channel clientChannel = future.getChannel();
+
+ System.out.println("Warming up...");
+ for (long i = 0; i < 10000; i++) {
+ clientHandler.latch = new CountDownLatch(1);
+ clientChannel.write(message);
+ try {
+ clientHandler.latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ System.out.println("Warmed up");
+
+
+ long start = System.currentTimeMillis();
+ long cycleStart = System.currentTimeMillis();
+ for (long i = 1; i < NUMBER_OF_ITERATIONS; i++) {
+ clientHandler.latch = new CountDownLatch(1);
+ clientChannel.write(message);
+ try {
+ clientHandler.latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ if ((i % CYCLE_SIZE) == 0) {
+ long cycleEnd = System.currentTimeMillis();
+ System.out.println("Ran 50000, TPS " + (CYCLE_SIZE / ((double) (cycleEnd - cycleStart) / 1000)));
+ cycleStart = cycleEnd;
+ }
+ }
+ long end = System.currentTimeMillis();
+ long seconds = (end - start) / 1000;
+ System.out.println("Ran [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + seconds + "], TPS: " + ((double) NUMBER_OF_ITERATIONS) / seconds);
+
+ clientChannel.close().awaitUninterruptibly();
+ clientBootstrap.releaseExternalResources();
+ serverBootstrap.releaseExternalResources();
+ }
+
+ public static class EchoClientHandler extends SimpleChannelUpstreamHandler {
+
+ public volatile CountDownLatch latch;
+
+ public EchoClientHandler() {
+ }
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
+ latch.countDown();
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
+ e.getCause().printStackTrace();
+ e.getChannel().close();
+ }
+ }
+
+
+ public static class EchoServerHandler extends SimpleChannelUpstreamHandler {
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
+ e.getChannel().write(e.getMessage());
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
+ // Close the connection when an exception is raised.
+ e.getCause().printStackTrace();
+ e.getChannel().close();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java b/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java
new file mode 100644
index 0000000..d9995e1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.uuid;
+
+import org.elasticsearch.common.StopWatch;
+
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ *
+ */
+public class SimpleUuidBenchmark {
+
+ private static long NUMBER_OF_ITERATIONS = 10000;
+ private static int NUMBER_OF_THREADS = 100;
+
+ public static void main(String[] args) throws Exception {
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Running " + NUMBER_OF_ITERATIONS);
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ UUID.randomUUID().toString();
+ }
+ System.out.println("Generated in " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
+
+ System.out.println("Generating using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ UUID.randomUUID().toString();
+ }
+ latch.countDown();
+ }
+ });
+ }
+ stopWatch = new StopWatch().start();
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Generate in " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java b/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java
new file mode 100644
index 0000000..3e4bea1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.blocks;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class SimpleBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void verifyIndexAndClusterReadOnly() throws Exception {
+ // cluster.read_only = null: write and metadata not blocked
+ canCreateIndex("test1");
+ canIndexDocument("test1");
+ setIndexReadOnly("test1", "false");
+ canIndexExists("test1");
+
+ // cluster.read_only = true: block write and metadata
+ setClusterReadOnly("true");
+ canNotCreateIndex("test2");
+ // even if index has index.read_only = false
+ canNotIndexDocument("test1");
+ canNotIndexExists("test1");
+
+ // cluster.read_only = false: removes the block
+ setClusterReadOnly("false");
+ canCreateIndex("test2");
+ canIndexDocument("test2");
+ canIndexDocument("test1");
+ canIndexExists("test1");
+
+
+ // newly created an index has no blocks
+ canCreateIndex("ro");
+ canIndexDocument("ro");
+ canIndexExists("ro");
+
+ // adds index write and metadata block
+ setIndexReadOnly( "ro", "true");
+ canNotIndexDocument("ro");
+ canNotIndexExists("ro");
+
+ // other indices not blocked
+ canCreateIndex("rw");
+ canIndexDocument("rw");
+ canIndexExists("rw");
+
+ // blocks can be removed
+ setIndexReadOnly("ro", "false");
+ canIndexDocument("ro");
+ canIndexExists("ro");
+ }
+
+ @Test
+ public void testIndexReadWriteMetaDataBlocks() {
+ canCreateIndex("test1");
+ canIndexDocument("test1");
+ client().admin().indices().prepareUpdateSettings("test1")
+ .setSettings(settingsBuilder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true))
+ .execute().actionGet();
+ canNotIndexDocument("test1");
+ client().admin().indices().prepareUpdateSettings("test1")
+ .setSettings(settingsBuilder().put(IndexMetaData.SETTING_BLOCKS_WRITE, false))
+ .execute().actionGet();
+ canIndexDocument("test1");
+ }
+
+ private void canCreateIndex(String index) {
+ try {
+ CreateIndexResponse r = client().admin().indices().prepareCreate(index).execute().actionGet();
+ assertThat(r, notNullValue());
+ } catch (ClusterBlockException e) {
+ fail();
+ }
+ }
+
+ private void canNotCreateIndex(String index) {
+ try {
+ client().admin().indices().prepareCreate(index).execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+ }
+
+ private void canIndexDocument(String index) {
+ try {
+ IndexRequestBuilder builder = client().prepareIndex(index, "zzz");
+ builder.setSource("foo", "bar");
+ IndexResponse r = builder.execute().actionGet();
+ assertThat(r, notNullValue());
+ } catch (ClusterBlockException e) {
+ fail();
+ }
+ }
+
+ private void canNotIndexDocument(String index) {
+ try {
+ IndexRequestBuilder builder = client().prepareIndex(index, "zzz");
+ builder.setSource("foo", "bar");
+ builder.execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+ }
+
+ private void canIndexExists(String index) {
+ try {
+ IndicesExistsResponse r = client().admin().indices().prepareExists(index).execute().actionGet();
+ assertThat(r, notNullValue());
+ } catch (ClusterBlockException e) {
+ fail();
+ }
+ }
+
+ private void canNotIndexExists(String index) {
+ try {
+ IndicesExistsResponse r = client().admin().indices().prepareExists(index).execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+ }
+
+ private void setClusterReadOnly(String value) {
+ Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
+ }
+
+ private void setIndexReadOnly(String index, Object value) {
+ HashMap<String, Object> newSettings = new HashMap<String, Object>();
+ newSettings.put(IndexMetaData.SETTING_READ_ONLY, value);
+
+ UpdateSettingsRequestBuilder settingsRequest = client().admin().indices().prepareUpdateSettings(index);
+ settingsRequest.setSettings(newSettings);
+ UpdateSettingsResponse settingsResponse = settingsRequest.execute().actionGet();
+ assertThat(settingsResponse, notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java b/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java
new file mode 100644
index 0000000..4d841c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.broadcast;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class BroadcastActionsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBroadcastOperations() throws IOException {
+ prepareCreate("test", 1).execute().actionGet(5000);
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ flush();
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet();
+ refresh();
+
+ logger.info("Count");
+ // check count
+ for (int i = 0; i < 5; i++) {
+ // test successful
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.NO_THREADS).get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getTotalShards(), equalTo(5));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+
+ for (int i = 0; i < 5; i++) {
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.SINGLE_THREAD).get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getTotalShards(), equalTo(5));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+
+ for (int i = 0; i < 5; i++) {
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.THREAD_PER_SHARD).get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getTotalShards(), equalTo(5));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+
+ for (int i = 0; i < 5; i++) {
+ // test failed (simply query that can't be parsed)
+ CountResponse countResponse = client().count(countRequest("test").source("{ term : { _type : \"type1 } }".getBytes(Charsets.UTF_8))).actionGet();
+
+ assertThat(countResponse.getCount(), equalTo(0l));
+ assertThat(countResponse.getTotalShards(), equalTo(5));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(countResponse.getFailedShards(), equalTo(5));
+ for (ShardOperationFailedException exp : countResponse.getShardFailures()) {
+ assertThat(exp.reason(), containsString("QueryParsingException"));
+ }
+ }
+
+ }
+
+ private XContentBuilder source(String id, String nameValue) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().field("id", id).field("name", nameValue).endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java b/src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java
new file mode 100644
index 0000000..082be6d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache.recycler;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.recycler.Recycler.V;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.lang.reflect.Array;
+import java.util.Random;
+import java.util.concurrent.ConcurrentMap;
+
+public class MockPageCacheRecycler extends PageCacheRecycler {
+
+ private static final ConcurrentMap<Object, Throwable> ACQUIRED_PAGES = Maps.newConcurrentMap();
+
+ public static void ensureAllPagesAreReleased() {
+ if (ACQUIRED_PAGES.size() > 0) {
+ final Throwable t = ACQUIRED_PAGES.entrySet().iterator().next().getValue();
+ throw new RuntimeException(ACQUIRED_PAGES.size() + " pages have not been released", t);
+ }
+ ACQUIRED_PAGES.clear();
+ }
+
+ private final Random random;
+
+ @Inject
+ public MockPageCacheRecycler(Settings settings, ThreadPool threadPool) {
+ super(settings, threadPool);
+ final long seed = settings.getAsLong(TestCluster.SETTING_CLUSTER_NODE_SEED, 0L);
+ random = new Random(seed);
+ }
+
+ private <T> V<T> wrap(final V<T> v) {
+ ACQUIRED_PAGES.put(v, new Throwable());
+ final Thread t = Thread.currentThread();
+ return new V<T>() {
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ if (t != Thread.currentThread()) {
+ // Releasing from a different thread doesn't break anything but this is bad practice as pages should be acquired
+ // as late as possible and released as soon as possible in a try/finally fashion
+ throw new RuntimeException("Page was allocated in " + t + " but released in " + Thread.currentThread());
+ }
+ final Throwable t = ACQUIRED_PAGES.remove(v);
+ if (t == null) {
+ throw new IllegalStateException("Releasing a page that has not been acquired");
+ }
+ final T ref = v();
+ for (int i = 0; i < Array.getLength(ref); ++i) {
+ if (ref instanceof Object[]) {
+ Array.set(ref, i, null);
+ } else {
+ Array.set(ref, i, (byte) random.nextInt(256));
+ }
+ }
+ return v.release();
+ }
+
+ @Override
+ public T v() {
+ return v.v();
+ }
+
+ @Override
+ public boolean isRecycled() {
+ return v.isRecycled();
+ }
+
+ };
+ }
+
+ @Override
+ public V<byte[]> bytePage(boolean clear) {
+ final V<byte[]> page = super.bytePage(clear);
+ if (!clear) {
+ random.nextBytes(page.v());
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<int[]> intPage(boolean clear) {
+ final V<int[]> page = super.intPage(clear);
+ if (!clear) {
+ for (int i = 0; i < page.v().length; ++i) {
+ page.v()[i] = random.nextInt();
+ }
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<long[]> longPage(boolean clear) {
+ final V<long[]> page = super.longPage(clear);
+ if (!clear) {
+ for (int i = 0; i < page.v().length; ++i) {
+ page.v()[i] = random.nextLong();
+ }
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<double[]> doublePage(boolean clear) {
+ final V<double[]> page = super.doublePage(clear);
+ if (!clear) {
+ for (int i = 0; i < page.v().length; ++i) {
+ page.v()[i] = random.nextDouble() - 0.5;
+ }
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<Object[]> objectPage() {
+ return wrap(super.objectPage());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java
new file mode 100644
index 0000000..087b7d7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0, transportClientRatio = 1.0)
+public class TransportClientTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testPickingUpChangesInDiscoveryNode() {
+ String nodeName = cluster().startNode(ImmutableSettings.builder().put("node.data", false));
+
+ TransportClient client = (TransportClient) cluster().client(nodeName);
+ assertThat(client.connectedNodes().get(0).dataNode(), Matchers.equalTo(false));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java b/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java
new file mode 100644
index 0000000..06d4569
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+
+/**
+ * Scoped as test, because the if the test with cluster read only block fails, all other tests fail as well, as this is not cleaned up properly
+ */
+@ClusterScope(scope=Scope.TEST)
+public class BlockClusterStatsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBlocks() throws Exception {
+ createIndex("foo");
+ ClusterUpdateSettingsResponse updateSettingsResponse = client().admin().cluster().prepareUpdateSettings().setTransientSettings(
+ ImmutableSettings.settingsBuilder().put("cluster.blocks.read_only", true).build()).get();
+ assertThat(updateSettingsResponse.isAcknowledged(), is(true));
+ UpdateSettingsResponse indexSettingsResponse = client().admin().indices().prepareUpdateSettings("foo").setSettings(
+ ImmutableSettings.settingsBuilder().put("index.blocks.read_only", true)).get();
+ assertThat(indexSettingsResponse.isAcknowledged(), is(true));
+
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().clear().setBlocks(true).get();
+ assertThat(clusterStateResponseUnfiltered.getState().blocks().global(), hasSize(1));
+ assertThat(clusterStateResponseUnfiltered.getState().blocks().indices().size(), is(1));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponse.getState().blocks().global(), hasSize(0));
+ assertThat(clusterStateResponse.getState().blocks().indices().size(), is(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java b/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java
new file mode 100644
index 0000000..4212eba
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth;
+import org.elasticsearch.action.admin.cluster.health.ClusterShardHealth;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.empty;
+
+public class ClusterHealthResponsesTests extends ElasticsearchTestCase {
+
+
+ private void assertIndexHealth(ClusterIndexHealth indexHealth, ShardCounter counter, IndexMetaData indexMetaData) {
+ assertThat(indexHealth.getStatus(), equalTo(counter.status()));
+ assertThat(indexHealth.getNumberOfShards(), equalTo(indexMetaData.getNumberOfShards()));
+ assertThat(indexHealth.getNumberOfReplicas(), equalTo(indexMetaData.getNumberOfReplicas()));
+ assertThat(indexHealth.getActiveShards(), equalTo(counter.active));
+ assertThat(indexHealth.getRelocatingShards(), equalTo(counter.relocating));
+ assertThat(indexHealth.getInitializingShards(), equalTo(counter.initializing));
+ assertThat(indexHealth.getUnassignedShards(), equalTo(counter.unassigned));
+ assertThat(indexHealth.getShards().size(), equalTo(indexMetaData.getNumberOfShards()));
+ assertThat(indexHealth.getValidationFailures(), empty());
+ int totalShards = 0;
+ for (ClusterShardHealth shardHealth : indexHealth.getShards().values()) {
+ totalShards += shardHealth.getActiveShards() + shardHealth.getInitializingShards() + shardHealth.getUnassignedShards();
+ }
+
+ assertThat(totalShards, equalTo(indexMetaData.getNumberOfShards() * (1 + indexMetaData.getNumberOfReplicas())));
+ }
+
+ protected class ShardCounter {
+ public int active;
+ public int relocating;
+ public int initializing;
+ public int unassigned;
+ public int primaryActive;
+ public int primaryInactive;
+
+ public ClusterHealthStatus status() {
+ if (primaryInactive > 0) {
+ return ClusterHealthStatus.RED;
+ }
+ if (unassigned > 0 || initializing > 0) {
+ return ClusterHealthStatus.YELLOW;
+ }
+ return ClusterHealthStatus.GREEN;
+ }
+
+ public void update(ShardRouting shardRouting) {
+ if (shardRouting.active()) {
+ active++;
+ if (shardRouting.primary()) {
+ primaryActive++;
+ }
+ if (shardRouting.relocating()) {
+ relocating++;
+ }
+ return;
+ }
+
+ if (shardRouting.primary()) {
+ primaryInactive++;
+ }
+ if (shardRouting.initializing()) {
+ initializing++;
+ } else {
+ unassigned++;
+ }
+ }
+ }
+
+ static int node_id = 1;
+
+ private ImmutableShardRouting genShardRouting(String index, int shardId, boolean primary) {
+
+ ShardRoutingState state;
+
+ int i = randomInt(40);
+ if (i > 5) {
+ state = ShardRoutingState.STARTED;
+ } else if (i > 3) {
+ state = ShardRoutingState.RELOCATING;
+ } else if (i > 1) {
+ state = ShardRoutingState.INITIALIZING;
+ } else {
+ state = ShardRoutingState.UNASSIGNED;
+ }
+
+ switch (state) {
+ case UNASSIGNED:
+ return new MutableShardRouting(index, shardId, null, primary, ShardRoutingState.UNASSIGNED, 1);
+ case STARTED:
+ return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), primary, ShardRoutingState.STARTED, 1);
+ case INITIALIZING:
+ return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), primary, ShardRoutingState.INITIALIZING, 1);
+ case RELOCATING:
+ return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), "node_" + Integer.toString(node_id++), primary, ShardRoutingState.RELOCATING, 1);
+ default:
+ throw new ElasticsearchException("Unknown state: " + state.name());
+ }
+
+ }
+
+ private IndexShardRoutingTable genShardRoutingTable(String index, int shardId, int replicas, ShardCounter counter) {
+ IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId), true);
+ ImmutableShardRouting shardRouting = genShardRouting(index, shardId, true);
+ counter.update(shardRouting);
+ builder.addShard(shardRouting);
+ for (; replicas > 0; replicas--) {
+ shardRouting = genShardRouting(index, shardId, false);
+ counter.update(shardRouting);
+ builder.addShard(shardRouting);
+ }
+
+ return builder.build();
+ }
+
+ IndexRoutingTable genIndexRoutingTable(IndexMetaData indexMetaData, ShardCounter counter) {
+ IndexRoutingTable.Builder builder = IndexRoutingTable.builder(indexMetaData.index());
+ for (int shard = 0; shard < indexMetaData.numberOfShards(); shard++) {
+ builder.addIndexShard(genShardRoutingTable(indexMetaData.index(), shard, indexMetaData.getNumberOfReplicas(), counter));
+ }
+ return builder.build();
+ }
+
+ @Test
+ public void testClusterIndexHealth() {
+ int numberOfShards = randomInt(3) + 1;
+ int numberOfReplicas = randomInt(4);
+ IndexMetaData indexMetaData = IndexMetaData.builder("test1").numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
+ ShardCounter counter = new ShardCounter();
+ IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
+
+ ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
+ logger.info("index status: {}, expected {}", indexHealth.getStatus(), counter.status());
+ assertIndexHealth(indexHealth, counter, indexMetaData);
+ }
+
+ private void assertClusterHealth(ClusterHealthResponse clusterHealth, ShardCounter counter) {
+ assertThat(clusterHealth.getStatus(), equalTo(counter.status()));
+ assertThat(clusterHealth.getActiveShards(), equalTo(counter.active));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(counter.primaryActive));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(counter.initializing));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(counter.relocating));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(counter.unassigned));
+ assertThat(clusterHealth.getValidationFailures(), empty());
+ }
+
+ @Test
+ public void testClusterHealth() {
+ ShardCounter counter = new ShardCounter();
+ RoutingTable.Builder routingTable = RoutingTable.builder();
+ MetaData.Builder metaData = MetaData.builder();
+ for (int i = randomInt(4); i >= 0; i--) {
+ int numberOfShards = randomInt(3) + 1;
+ int numberOfReplicas = randomInt(4);
+ IndexMetaData indexMetaData = IndexMetaData.builder("test_" + Integer.toString(i)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
+ IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
+ metaData.put(indexMetaData, true);
+ routingTable.add(indexRoutingTable);
+ }
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(null), clusterState);
+ logger.info("cluster status: {}, expected {}", clusterHealth.getStatus(), counter.status());
+
+ assertClusterHealth(clusterHealth, counter);
+ }
+
+ @Test
+ public void testValidations() {
+ IndexMetaData indexMetaData = IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(2).build();
+ ShardCounter counter = new ShardCounter();
+ IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
+ indexMetaData = IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(3).build();
+
+ ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
+ assertThat(indexHealth.getValidationFailures(), Matchers.hasSize(2));
+
+ RoutingTable.Builder routingTable = RoutingTable.builder();
+ MetaData.Builder metaData = MetaData.builder();
+ metaData.put(indexMetaData, true);
+ routingTable.add(indexRoutingTable);
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(null), clusterState);
+ // currently we have no cluster level validation failures as index validation issues are reported per index.
+ assertThat(clusterHealth.getValidationFailures(), Matchers.hasSize(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java b/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java
new file mode 100644
index 0000000..2dc906f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ClusterHealthTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testHealth() {
+ logger.info("--> running cluster health on an index that does not exists");
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForYellowStatus().setTimeout("1s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(true));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED));
+ assertThat(healthResponse.getIndices().isEmpty(), equalTo(true));
+
+ logger.info("--> running cluster wide health");
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setTimeout("10s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(healthResponse.getIndices().isEmpty(), equalTo(true));
+
+ logger.info("--> Creating index test1 with zero replicas");
+ client().admin().indices().prepareCreate("test1")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_replicas", 0))
+ .execute().actionGet();
+
+ logger.info("--> running cluster health on an index that does exists");
+ healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForYellowStatus().setTimeout("10s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> running cluster health on an index that does exists and an index that doesn't exists");
+ healthResponse = client().admin().cluster().prepareHealth("test1", "test2").setWaitForYellowStatus().setTimeout("1s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(true));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED));
+ assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(healthResponse.getIndices().size(), equalTo(1));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java b/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java
new file mode 100644
index 0000000..4789d6b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java
@@ -0,0 +1,658 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Singleton;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class ClusterServiceTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testTimeoutUpdateTask() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService1 = cluster().getInstance(ClusterService.class);
+ final CountDownLatch block = new CountDownLatch(1);
+ clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ try {
+ block.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+
+ final CountDownLatch timedOut = new CountDownLatch(1);
+ final AtomicBoolean executeCalled = new AtomicBoolean();
+ clusterService1.submitStateUpdateTask("test2", new TimeoutClusterStateUpdateTask() {
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueMillis(2);
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ timedOut.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ executeCalled.set(true);
+ return currentState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+ });
+
+ assertThat(timedOut.await(500, TimeUnit.MILLISECONDS), equalTo(true));
+ block.countDown();
+ Thread.sleep(100); // sleep a bit to double check that execute on the timed out update task is not called...
+ assertThat(executeCalled.get(), equalTo(false));
+ }
+
+ @Test
+ public void testAckedUpdateTask() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService = cluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch processedLatch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask() {
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ processedLatch.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(true));
+ assertThat(ackTimeout.get(), equalTo(false));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+
+ assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testAckedUpdateTaskSameClusterState() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService = cluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch processedLatch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask() {
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ processedLatch.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(true));
+ assertThat(ackTimeout.get(), equalTo(false));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+
+ assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testAckedUpdateTaskNoAckExpected() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService = cluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask() {
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return false;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(true));
+ assertThat(ackTimeout.get(), equalTo(false));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+ }
+
+ @Test
+ public void testAckedUpdateTaskTimeoutZero() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService = cluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch processedLatch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask() {
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return false;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(0);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ processedLatch.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(false));
+ assertThat(ackTimeout.get(), equalTo(true));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+
+ assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testPendingUpdateTask() throws Exception {
+ Settings zenSettings = settingsBuilder()
+ .put("discovery.type", "zen").build();
+ String node_0 = cluster().startNode(zenSettings);
+ cluster().startNodeClient(zenSettings);
+
+
+ ClusterService clusterService = cluster().getInstance(ClusterService.class, node_0);
+ final CountDownLatch block1 = new CountDownLatch(1);
+ final CountDownLatch invoked1 = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ invoked1.countDown();
+ try {
+ block1.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ invoked1.countDown();
+ fail();
+ }
+ });
+ invoked1.await();
+ final CountDownLatch invoked2 = new CountDownLatch(9);
+ for (int i = 2; i <= 10; i++) {
+ clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ invoked2.countDown();
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ }
+
+ // The tasks can be re-ordered, so we need to check out-of-order
+ Set<String> controlSources = new HashSet<String>(Arrays.asList("2", "3", "4", "5", "6", "7", "8", "9", "10"));
+ List<PendingClusterTask> pendingClusterTasks = clusterService.pendingTasks();
+ assertThat(pendingClusterTasks.size(), equalTo(9));
+ for (PendingClusterTask task : pendingClusterTasks) {
+ assertTrue(controlSources.remove(task.source().string()));
+ }
+ assertTrue(controlSources.isEmpty());
+
+ controlSources = new HashSet<String>(Arrays.asList("2", "3", "4", "5", "6", "7", "8", "9", "10"));
+ PendingClusterTasksResponse response = cluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
+ assertThat(response.pendingTasks().size(), equalTo(9));
+ for (PendingClusterTask task : response) {
+ assertTrue(controlSources.remove(task.source().string()));
+ }
+ assertTrue(controlSources.isEmpty());
+ block1.countDown();
+ invoked2.await();
+
+ pendingClusterTasks = clusterService.pendingTasks();
+ assertThat(pendingClusterTasks, empty());
+ response = cluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
+ assertThat(response.pendingTasks(), empty());
+
+ final CountDownLatch block2 = new CountDownLatch(1);
+ final CountDownLatch invoked3 = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ invoked3.countDown();
+ try {
+ block2.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ invoked3.countDown();
+ fail();
+ }
+ });
+ invoked3.await();
+
+ for (int i = 2; i <= 5; i++) {
+ clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ }
+ Thread.sleep(100);
+
+ pendingClusterTasks = clusterService.pendingTasks();
+ assertThat(pendingClusterTasks.size(), equalTo(4));
+ controlSources = new HashSet<String>(Arrays.asList("2", "3", "4", "5"));
+ for (PendingClusterTask task : pendingClusterTasks) {
+ assertTrue(controlSources.remove(task.source().string()));
+ }
+ assertTrue(controlSources.isEmpty());
+
+ response = cluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
+ assertThat(response.pendingTasks().size(), equalTo(4));
+ controlSources = new HashSet<String>(Arrays.asList("2", "3", "4", "5"));
+ for (PendingClusterTask task : response) {
+ assertTrue(controlSources.remove(task.source().string()));
+ assertThat(task.getTimeInQueueInMillis(), greaterThan(0l));
+ }
+ assertTrue(controlSources.isEmpty());
+ block2.countDown();
+ }
+
+ @Test
+ public void testListenerCallbacks() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.minimum_master_nodes", 1)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("plugin.types", TestPlugin.class.getName())
+ .build();
+
+ cluster().startNode(settings);
+ ClusterService clusterService1 = cluster().getInstance(ClusterService.class);
+ MasterAwareService testService1 = cluster().getInstance(MasterAwareService.class);
+
+ // the first node should be a master as the minimum required is 1
+ assertThat(clusterService1.state().nodes().masterNode(), notNullValue());
+ assertThat(clusterService1.state().nodes().localNodeMaster(), is(true));
+ assertThat(testService1.master(), is(true));
+
+ String node_1 = cluster().startNode(settings);
+ final ClusterService clusterService2 = cluster().getInstance(ClusterService.class, node_1);
+ MasterAwareService testService2 = cluster().getInstance(MasterAwareService.class, node_1);
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // the second node should not be the master as node1 is already the master.
+ assertThat(clusterService2.state().nodes().localNodeMaster(), is(false));
+ assertThat(testService2.master(), is(false));
+
+ cluster().stopCurrentMasterNode();
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("1").execute().actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // now that node1 is closed, node2 should be elected as master
+ assertThat(clusterService2.state().nodes().localNodeMaster(), is(true));
+ assertThat(testService2.master(), is(true));
+
+ Settings newSettings = settingsBuilder()
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .put("discovery.type", "zen")
+ .build();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet();
+
+ // there should not be any master as the minimum number of required eligible masters is not met
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ return clusterService2.state().nodes().masterNode() == null;
+ }
+ });
+ assertThat(testService2.master(), is(false));
+
+
+ String node_2 = cluster().startNode(settings);
+ clusterService1 = cluster().getInstance(ClusterService.class, node_2);
+ testService1 = cluster().getInstance(MasterAwareService.class, node_2);
+
+ // make sure both nodes see each other otherwise the masternode below could be null if node 2 is master and node 1 did'r receive the updated cluster state...
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).setWaitForNodes("2").execute().actionGet().isTimedOut(), is(false));
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).setWaitForNodes("2").execute().actionGet().isTimedOut(), is(false));
+
+ // now that we started node1 again, a new master should be elected
+ assertThat(clusterService1.state().nodes().masterNode(), is(notNullValue()));
+ if (node_2.equals(clusterService1.state().nodes().masterNode().name())) {
+ assertThat(testService1.master(), is(true));
+ assertThat(testService2.master(), is(false));
+ } else {
+ assertThat(testService1.master(), is(false));
+ assertThat(testService2.master(), is(true));
+ }
+
+ }
+
+ public static class TestPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test plugin";
+ }
+
+ @Override
+ public String description() {
+ return "test plugin";
+ }
+
+ @Override
+ public Collection<Class<? extends LifecycleComponent>> services() {
+ List<Class<? extends LifecycleComponent>> services = new ArrayList<Class<? extends LifecycleComponent>>(1);
+ services.add(MasterAwareService.class);
+ return services;
+ }
+ }
+
+ @Singleton
+ public static class MasterAwareService extends AbstractLifecycleComponent<MasterAwareService> implements LocalNodeMasterListener {
+
+ private final ClusterService clusterService;
+ private volatile boolean master;
+
+ @Inject
+ public MasterAwareService(Settings settings, ClusterService clusterService) {
+ super(settings);
+ clusterService.add(this);
+ this.clusterService = clusterService;
+ logger.info("initialized test service");
+ }
+
+ @Override
+ public void onMaster() {
+ logger.info("on master [" + clusterService.localNode() + "]");
+ master = true;
+ }
+
+ @Override
+ public void offMaster() {
+ logger.info("off master [" + clusterService.localNode() + "]");
+ master = false;
+ }
+
+ public boolean master() {
+ return master;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ @Override
+ public String executorName() {
+ return ThreadPool.Names.SAME;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
new file mode 100644
index 0000000..443debf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class DiskUsageTests extends ElasticsearchTestCase {
+
+ @Test
+ public void diskUsageCalcTest() {
+ DiskUsage du = new DiskUsage("node1", 100, 40);
+ assertThat(du.getFreeDiskAsPercentage(), equalTo(40.0));
+ assertThat(du.getFreeBytes(), equalTo(40L));
+ assertThat(du.getUsedBytes(), equalTo(60L));
+ assertThat(du.getTotalBytes(), equalTo(100L));
+
+ }
+
+ @Test
+ public void randomDiskUsageTest() {
+ int iters = atLeast(1000);
+ for (int i = 1; i < iters; i++) {
+ long total = between(Integer.MIN_VALUE, Integer.MAX_VALUE);
+ long free = between(Integer.MIN_VALUE, Integer.MAX_VALUE);
+ if (free > total || total <= 0) {
+ try {
+ new DiskUsage("random", total, free);
+ fail("should never reach this");
+ } catch (IllegalStateException e) {
+ }
+ } else {
+ DiskUsage du = new DiskUsage("random", total, free);
+ assertThat(du.getFreeBytes(), equalTo(free));
+ assertThat(du.getTotalBytes(), equalTo(total));
+ assertThat(du.getUsedBytes(), equalTo(total - free));
+ assertThat(du.getFreeDiskAsPercentage(), equalTo(100.0 * ((double)free / total)));
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java b/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java
new file mode 100644
index 0000000..9747704
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java
@@ -0,0 +1,307 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = Scope.TEST, numNodes=0)
+public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleMinimumMasterNodes() throws Exception {
+
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("gateway.type", "local")
+ .put("index.number_of_shards", 1)
+ .build();
+
+ logger.info("--> start first node");
+ cluster().startNode(settings);
+
+ logger.info("--> should be blocked, no master...");
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
+
+ logger.info("--> start second node, cluster should be formed");
+ cluster().startNode(settings);
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ assertThat(state.metaData().indices().containsKey("test"), equalTo(false));
+
+ createIndex("test");
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ // make sure that all shards recovered before trying to flush
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(2).execute().actionGet().getActiveShards(), equalTo(2));
+ // flush for simpler debugging
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ cluster().stopCurrentMasterNode();
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ });
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
+
+ logger.info("--> starting the previous master node again...");
+ cluster().startNode(settings);
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ assertThat(state.metaData().indices().containsKey("test"), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ cluster().stopRandomNonMasterNode();
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ }), equalTo(true));
+
+ logger.info("--> starting the previous master node again...");
+ cluster().startNode(settings);
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ assertThat(state.metaData().indices().containsKey("test"), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+ }
+
+ @Test
+ public void multipleNodesShutdownNonMasterNodes() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.minimum_master_nodes", 3)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("gateway.type", "local")
+ .build();
+
+ logger.info("--> start first 2 nodes");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ ClusterState state;
+
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ });
+
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ });
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
+
+ logger.info("--> start two more nodes");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(4));
+
+ createIndex("test");
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ // make sure that all shards recovered before trying to flush
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(10).execute().actionGet().isTimedOut(), equalTo(false));
+ // flush for simpler debugging
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ cluster().stopRandomNonMasterNode();
+ cluster().stopRandomNonMasterNode();
+
+ logger.info("--> verify that there is no master anymore on remaining nodes");
+ // spin here to wait till the state is set
+ assertNoMasterBlockOnAllNodes();
+
+ logger.info("--> start back the 2 nodes ");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(4));
+
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+ }
+
+ @Test
+ public void dynamicUpdateMinimumMasterNodes() throws InterruptedException {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("gateway.type", "local")
+ .build();
+
+ logger.info("--> start 2 nodes");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ // wait until second node join the cluster
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> setting minimum master node to 2");
+ setMinimumMasterNodes(2);
+
+ // make sure it has been processed on all nodes (master node spawns a secondary cluster state update task)
+ for (Client client : cluster()) {
+ assertThat(client.admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).get().isTimedOut(),
+ equalTo(false));
+ }
+
+ logger.info("--> stopping a node");
+ cluster().stopRandomNode();
+ logger.info("--> verifying min master node has effect");
+ assertNoMasterBlockOnAllNodes();
+
+ logger.info("--> bringing another node up");
+ cluster().startNode(settings);
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ }
+
+ private void assertNoMasterBlockOnAllNodes() throws InterruptedException {
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ boolean success = true;
+ for (Client client : cluster()) {
+ ClusterState state = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ success &= state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ if (logger.isDebugEnabled()) {
+ logger.debug("Checking for NO_MASTER_BLOCK on client: {} NO_MASTER_BLOCK: [{}]", client, state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK));
+ }
+ }
+ return success;
+ }
+ }, 20, TimeUnit.SECONDS), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java b/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java
new file mode 100644
index 0000000..94eb635
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class NoMasterNodeTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testNoMasterActions() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("action.auto_create_index", false)
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("index.number_of_shards", 1)
+ .build();
+
+ TimeValue timeout = TimeValue.timeValueMillis(200);
+
+ cluster().startNode(settings);
+ // start a second node, create an index, and then shut it down so we have no master block
+ cluster().startNode(settings);
+ createIndex("test");
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+ cluster().stopRandomNode();
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ }), equalTo(true));
+
+
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ try {
+ client().prepareMultiGet().add("test", "type1", "1").execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ try {
+ PercolateSourceBuilder percolateSource = new PercolateSourceBuilder();
+ percolateSource.percolateDocument().setDoc(new HashMap());
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(percolateSource).execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ long now = System.currentTimeMillis();
+ try {
+ client().prepareUpdate("test", "type1", "1").setScript("test script").setTimeout(timeout).execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ try {
+ client().admin().indices().prepareAnalyze("test", "this is a test").execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ try {
+ client().prepareCount("test").execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ now = System.currentTimeMillis();
+ try {
+ client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout).execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java b/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java
new file mode 100644
index 0000000..9a3f9dd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.CollectionAssertions;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ * Checking simple filtering capabilites of the cluster state
+ *
+ */
+public class SimpleClusterStateTests extends ElasticsearchIntegrationTest {
+
+ @Before
+ public void indexData() throws Exception {
+ index("foo", "bar", "1", XContentFactory.jsonBuilder().startObject().field("foo", "foo").endObject());
+ index("fuu", "buu", "1", XContentFactory.jsonBuilder().startObject().field("fuu", "fuu").endObject());
+ index("baz", "baz", "1", XContentFactory.jsonBuilder().startObject().field("baz", "baz").endObject());
+ refresh();
+ }
+
+ @Test
+ public void testRoutingTable() throws Exception {
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().clear().setRoutingTable(true).get();
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("foo"), is(true));
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("fuu"), is(true));
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("baz"), is(true));
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("non-existent"), is(false));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("foo"), is(false));
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("fuu"), is(false));
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("baz"), is(false));
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("non-existent"), is(false));
+ }
+
+ @Test
+ public void testNodes() throws Exception {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setNodes(true).get();
+ assertThat(clusterStateResponse.getState().nodes().nodes().size(), is(cluster().size()));
+
+ ClusterStateResponse clusterStateResponseFiltered = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponseFiltered.getState().nodes().nodes().size(), is(0));
+ }
+
+ @Test
+ public void testMetadata() throws Exception {
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().clear().setMetaData(true).get();
+ assertThat(clusterStateResponseUnfiltered.getState().metaData().indices().size(), is(3));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponse.getState().metaData().indices().size(), is(0));
+ }
+
+ @Test
+ public void testIndexTemplates() throws Exception {
+ client().admin().indices().preparePutTemplate("foo_template")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ client().admin().indices().preparePutTemplate("fuu_template")
+ .setTemplate("test*")
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().get();
+ assertThat(clusterStateResponseUnfiltered.getState().metaData().templates().size(), is(greaterThanOrEqualTo(2)));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndexTemplates("foo_template").get();
+ assertThat(clusterStateResponse.getState().metaData().templates().size(), is(1));
+ }
+
+ @Test
+ public void testThatFilteringByIndexWorksForMetadataAndRoutingTable() throws Exception {
+ ClusterStateResponse clusterStateResponseFiltered = client().admin().cluster().prepareState().clear()
+ .setMetaData(true).setRoutingTable(true).setIndices("foo", "fuu", "non-existent").get();
+
+ // metadata
+ assertThat(clusterStateResponseFiltered.getState().metaData().indices().size(), is(2));
+ assertThat(clusterStateResponseFiltered.getState().metaData().indices(), CollectionAssertions.hasKey("foo"));
+ assertThat(clusterStateResponseFiltered.getState().metaData().indices(), CollectionAssertions.hasKey("fuu"));
+
+ // routing table
+ assertThat(clusterStateResponseFiltered.getState().routingTable().hasIndex("foo"), is(true));
+ assertThat(clusterStateResponseFiltered.getState().routingTable().hasIndex("fuu"), is(true));
+ assertThat(clusterStateResponseFiltered.getState().routingTable().hasIndex("baz"), is(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java b/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java
new file mode 100644
index 0000000..cb23803
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.UnavailableShardsException;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class SimpleDataNodesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testDataNodes() throws Exception {
+ cluster().startNode(settingsBuilder().put("node.data", false).build());
+ client().admin().indices().create(createIndexRequest("test")).actionGet();
+ try {
+ client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet();
+ fail("no allocation should happen");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ cluster().startNode(settingsBuilder().put("node.data", false).build());
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
+
+ // still no shard should be allocated
+ try {
+ client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet();
+ fail("no allocation should happen");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ // now, start a node data, and see that it gets with shards
+ cluster().startNode(settingsBuilder().put("node.data", true).build());
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
+
+ IndexResponse indexResponse = client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ assertThat(indexResponse.getId(), equalTo("1"));
+ assertThat(indexResponse.getType(), equalTo("type1"));
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java b/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java
new file mode 100644
index 0000000..521c2fc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.discovery.MasterNotDiscoveredException;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class SpecificMasterNodesTests extends ElasticsearchIntegrationTest {
+
+ protected final ImmutableSettings.Builder settingsBuilder() {
+ return ImmutableSettings.builder().put("discovery.type", "zen");
+ }
+
+ @Test
+ public void simpleOnlyMasterNodeElection() {
+ logger.info("--> start data node / non master node");
+ cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
+ try {
+ assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
+ fail("should not be able to find master");
+ } catch (MasterNotDiscoveredException e) {
+ // all is well, no master elected
+ }
+ logger.info("--> start master node");
+ final String masterNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+
+ logger.info("--> stop master node");
+ cluster().stopCurrentMasterNode();
+
+ try {
+ assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
+ fail("should not be able to find master");
+ } catch (MasterNotDiscoveredException e) {
+ // all is well, no master elected
+ }
+
+ logger.info("--> start master node");
+ final String nextMasterEligableNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ }
+
+ @Test
+ public void electOnlyBetweenMasterNodes() {
+ logger.info("--> start data node / non master node");
+ cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
+ try {
+ assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
+ fail("should not be able to find master");
+ } catch (MasterNotDiscoveredException e) {
+ // all is well, no master elected
+ }
+ logger.info("--> start master node (1)");
+ final String masterNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+
+ logger.info("--> start master node (2)");
+ final String nextMasterEligableNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+
+ logger.info("--> closing master node (1)");
+ cluster().stopCurrentMasterNode();
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ }
+
+ /**
+ * Tests that putting custom default mapping and then putting a type mapping will have the default mapping merged
+ * to the type mapping.
+ */
+ @Test
+ public void testCustomDefaultMapping() throws Exception {
+ logger.info("--> start master node / non data");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+
+ logger.info("--> start data node / non master node");
+ cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
+
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings("number_of_shards", 1).get());
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("_timestamp", "enabled=true"));
+
+ MappingMetaData defaultMapping = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test").getMappings().get("_default_");
+ assertThat(defaultMapping.getSourceAsMap().get("_timestamp"), notNullValue());
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("_timestamp", "enabled=true"));
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("type1").setSource("foo", "enabled=true"));
+ MappingMetaData type1Mapping = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test").getMappings().get("type1");
+ assertThat(type1Mapping.getSourceAsMap().get("_timestamp"), notNullValue());
+ }
+
+ @Test
+ public void testAliasFilterValidation() throws Exception {
+ logger.info("--> start master node / non data");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+
+ logger.info("--> start data node / non master node");
+ cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
+
+ assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", \"properties\" : {\"field_a\" : { \"type\" : \"string\" },\"field_b\" :{ \"type\" : \"string\" }}}}}}"));
+ client().admin().indices().prepareAliases().addAlias("test", "a_test", FilterBuilders.nestedFilter("table_a", FilterBuilders.termFilter("table_a.field_b", "y"))).get();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java b/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java
new file mode 100644
index 0000000..1e1274f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class UpdateSettingsValidationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testUpdateSettingsValidation() throws Exception {
+ String master = cluster().startNode(settingsBuilder().put("node.data", false).build());
+ String node_1 = cluster().startNode(settingsBuilder().put("node.master", false).build());
+ String node_2 = cluster().startNode(settingsBuilder().put("node.master", false).build());
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1)).execute().actionGet();
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(10));
+
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 0)).execute().actionGet();
+ healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(5));
+
+ try {
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.refresh_interval", "")).execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ logger.info("Error message: [{}]", ex.getMessage());
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ZenUnicastDiscoveryTests.java b/src/test/java/org/elasticsearch/cluster/ZenUnicastDiscoveryTests.java
new file mode 100644
index 0000000..b05e410
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ZenUnicastDiscoveryTests.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@LuceneTestCase.Slow
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
+public class ZenUnicastDiscoveryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @TestLogging("discovery.zen:TRACE")
+ // The bug zen unicast ping override bug, may rarely manifest itself, it is very timing dependant.
+ // Without the fix in UnicastZenPing, this test fails roughly 1 out of 10 runs from the command line.
+ public void testMasterElectionNotMissed() throws Exception {
+ final Settings settings = settingsBuilder()
+ // Failure only manifests if multicast ping is disabled!
+ .put("discovery.zen.ping.multicast.ping.enabled", false)
+ .put("discovery.zen.minimum_master_nodes", 2)
+ // Can't use this, b/c at the moment all node will only ping localhost:9300
+// .put("discovery.zen.ping.unicast.hosts", "localhost")
+ .put("discovery.zen.ping.unicast.hosts", "localhost:15300,localhost:15301,localhost:15302")
+ .put("transport.tcp.port", "15300-15400")
+ .build();
+
+ final CountDownLatch latch = new CountDownLatch(3);
+ final AtomicArray<String> nodes = new AtomicArray<String>(3);
+ Runnable r1 = new Runnable() {
+
+ @Override
+ public void run() {
+ logger.info("--> start first node");
+ nodes.set(0, cluster().startNode(settings));
+ latch.countDown();
+ }
+ };
+ new Thread(r1).start();
+
+ sleep(between(500, 3000));
+ Runnable r2 = new Runnable() {
+
+ @Override
+ public void run() {
+ logger.info("--> start second node");
+ nodes.set(1, cluster().startNode(settings));
+ latch.countDown();
+ }
+ };
+ new Thread(r2).start();
+
+
+ sleep(between(500, 3000));
+ Runnable r3 = new Runnable() {
+
+ @Override
+ public void run() {
+ logger.info("--> start third node");
+ nodes.set(2, cluster().startNode(settings));
+ latch.countDown();
+ }
+ };
+ new Thread(r3).start();
+ latch.await();
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ DiscoveryNode masterDiscoNode = null;
+ for (String node : nodes.toArray(new String[3])) {
+ ClusterState state = cluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(3));
+ if (masterDiscoNode == null) {
+ masterDiscoNode = state.nodes().masterNode();
+ } else {
+ assertThat(masterDiscoNode.equals(state.nodes().masterNode()), equalTo(true));
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java b/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java
new file mode 100644
index 0000000..a2e9d6d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.ack;
+
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = TEST)
+public class AckClusterUpdateSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ //to test that the acknowledgement mechanism is working we better disable the wait for publish
+ //otherwise the operation is most likely acknowledged even if it doesn't support ack
+ return ImmutableSettings.builder().put("discovery.zen.publish_timeout", 0).build();
+ }
+
+ @Test
+ public void testClusterUpdateSettingsAcknowledgement() {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().get();
+ String excludedNodeId = null;
+ for (NodeInfo nodeInfo : nodesInfo) {
+ if (nodeInfo.getNode().isDataNode()) {
+ excludedNodeId = nodesInfo.getAt(0).getNode().id();
+ break;
+ }
+ }
+ assertNotNull(excludedNodeId);
+
+ ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get();
+ assertAcked(clusterUpdateSettingsResponse);
+ assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
+
+ for (Client client : clients()) {
+ ClusterState clusterState = getLocalClusterState(client);
+ assertThat(clusterState.routingNodes().metaData().transientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (clusterState.nodes().get(shardRouting.currentNodeId()).id().equals(excludedNodeId)) {
+ //if the shard is still there it must be relocating and all nodes need to know, since the request was acknowledged
+ assertThat(shardRouting.relocating(), equalTo(true));
+ }
+ }
+ }
+ }
+ }
+
+ //let's wait for the relocation to be completed, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ waitForRelocation();
+
+ //removes the allocation exclude settings
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", "")).get();
+ }
+
+ @Test
+ public void testClusterUpdateSettingsNoAcknowledgement() {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().get();
+ String excludedNodeId = null;
+ for (NodeInfo nodeInfo : nodesInfo) {
+ if (nodeInfo.getNode().isDataNode()) {
+ excludedNodeId = nodesInfo.getAt(0).getNode().id();
+ break;
+ }
+ }
+ assertNotNull(excludedNodeId);
+
+ ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings().setTimeout("0s")
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get();
+ assertThat(clusterUpdateSettingsResponse.isAcknowledged(), equalTo(false));
+ assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
+
+ //let's wait for the relocation to be completed, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ waitForRelocation();
+
+ //removes the allocation exclude settings
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", "")).get();
+ }
+
+ private static ClusterState getLocalClusterState(Client client) {
+ return client.admin().cluster().prepareState().setLocal(true).get().getState();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ack/AckTests.java b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java
new file mode 100644
index 0000000..f13fd70
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java
@@ -0,0 +1,422 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.ack;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = SUITE)
+public class AckTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ //to test that the acknowledgement mechanism is working we better disable the wait for publish
+ //otherwise the operation is most likely acknowledged even if it doesn't support ack
+ return ImmutableSettings.builder().put("discovery.zen.publish_timeout", 0).build();
+ }
+
+ @Test
+ public void testUpdateSettingsAcknowledgement() {
+ createIndex("test");
+
+ assertAcked(client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.builder().put("refresh_interval", 9999)));
+
+ for (Client client : clients()) {
+ String refreshInterval = getLocalClusterState(client).metaData().index("test").settings().get("index.refresh_interval");
+ assertThat(refreshInterval, equalTo("9999"));
+ }
+ }
+
+ @Test
+ public void testUpdateSettingsNoAcknowledgement() {
+ createIndex("test");
+
+ UpdateSettingsResponse updateSettingsResponse = client().admin().indices().prepareUpdateSettings("test").setTimeout("0s")
+ .setSettings(ImmutableSettings.builder().put("refresh_interval", 9999)).get();
+ assertThat(updateSettingsResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testPutWarmerAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
+
+ for (Client client : clients()) {
+ GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(1));
+ ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
+ assertThat(entry.key, equalTo("test"));
+ assertThat(entry.value.size(), equalTo(1));
+ assertThat(entry.value.get(0).name(), equalTo("custom_warmer"));
+ }
+ }
+
+ @Test
+ public void testPutWarmerNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .get();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteWarmerAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
+
+ assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer"));
+
+ for (Client client : clients()) {
+ GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testDeleteWarmerNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .get();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteMappingAcknowledgement() {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string").get();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1");
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").addTypes("type1").get();
+ assertThat(getMappingsResponse.mappings().get("test").get("type1"), notNullValue());
+
+ assertAcked(client().admin().indices().prepareDeleteMapping("test").setType("type1"));
+
+ for (Client client : clients()) {
+ getMappingsResponse = client.admin().indices().prepareGetMappings("test").addTypes("type1").setLocal(true).get();
+ assertThat(getMappingsResponse.mappings().size(), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testDeleteMappingNoAcknowledgement() {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string").get();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1");
+
+ DeleteMappingResponse deleteMappingResponse = client().admin().indices().prepareDeleteMapping("test").setTimeout("0s").setType("type1").get();
+ assertThat(deleteMappingResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testClusterRerouteAcknowledgement() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ assertAcked(client().admin().cluster().prepareReroute().add(moveAllocationCommand));
+
+ for (Client client : clients()) {
+ ClusterState clusterState = getLocalClusterState(client);
+ for (MutableShardRouting mutableShardRouting : clusterState.routingNodes().routingNodeIter(moveAllocationCommand.fromNode())) {
+ //if the shard that we wanted to move is still on the same node, it must be relocating
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ assertThat(mutableShardRouting.relocating(), equalTo(true));
+ }
+
+ }
+
+ boolean found = false;
+ for (MutableShardRouting mutableShardRouting : clusterState.routingNodes().routingNodeIter(moveAllocationCommand.toNode())) {
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ assertThat(mutableShardRouting.state(), anyOf(equalTo(ShardRoutingState.INITIALIZING), equalTo(ShardRoutingState.STARTED)));
+ found = true;
+ break;
+ }
+ }
+ assertThat(found, equalTo(true));
+ }
+ //let's wait for the relocation to be completed, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ waitForRelocation();
+ }
+
+ @Test
+ public void testClusterRerouteNoAcknowledgement() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").add(moveAllocationCommand).get();
+ assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testClusterRerouteAcknowledgementDryRun() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ assertAcked(client().admin().cluster().prepareReroute().setDryRun(true).add(moveAllocationCommand));
+
+ //testing only on master with the latest cluster state as we didn't make any change thus we cannot guarantee that
+ //all nodes hold the same cluster state version. We only know there was no need to change anything, thus no need for ack on this update.
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
+ boolean found = false;
+ for (MutableShardRouting mutableShardRouting : clusterStateResponse.getState().routingNodes().routingNodeIter(moveAllocationCommand.fromNode())) {
+ //the shard that we wanted to move is still on the same node, as we had dryRun flag
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ assertThat(mutableShardRouting.started(), equalTo(true));
+ found = true;
+ break;
+ }
+ }
+ assertThat(found, equalTo(true));
+
+ for (MutableShardRouting mutableShardRouting : clusterStateResponse.getState().routingNodes().routingNodeIter(moveAllocationCommand.toNode())) {
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ fail("shard [" + mutableShardRouting + "] shouldn't be on node [" + moveAllocationCommand.toString() + "]");
+ }
+ }
+ }
+
+ @Test
+ public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").setDryRun(true).add(moveAllocationCommand).get();
+ //acknowledged anyway as no changes were made
+ assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(true));
+ }
+
+ private MoveAllocationCommand getAllocationCommand() {
+ String fromNodeId = null;
+ String toNodeId = null;
+ MutableShardRouting shardToBeMoved = null;
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
+ for (RoutingNode routingNode : clusterStateResponse.getState().routingNodes()) {
+ if (routingNode.node().isDataNode()) {
+ if (fromNodeId == null && routingNode.numberOfOwningShards() > 0) {
+ fromNodeId = routingNode.nodeId();
+ shardToBeMoved = routingNode.get(randomInt(routingNode.size() - 1));
+ } else {
+ toNodeId = routingNode.nodeId();
+ }
+
+ if (toNodeId != null && fromNodeId != null) {
+ break;
+ }
+ }
+ }
+
+ assertNotNull(fromNodeId);
+ assertNotNull(toNodeId);
+ assertNotNull(shardToBeMoved);
+
+ logger.info("==> going to move shard [{}] from [{}] to [{}]", shardToBeMoved, fromNodeId, toNodeId);
+ return new MoveAllocationCommand(shardToBeMoved.shardId(), fromNodeId, toNodeId);
+ }
+
+ @Test
+ public void testIndicesAliasesAcknowledgement() {
+ createIndex("test");
+
+ //testing acknowledgement when trying to submit an existing alias too
+ //in that case it would not make any change, but we are sure about the cluster state
+ //as the previous operation was acknowledged
+ for (int i = 0; i < 2; i++) {
+ assertAcked(client().admin().indices().prepareAliases().addAlias("test", "alias"));
+
+ for (Client client : clients()) {
+ AliasMetaData aliasMetaData = getLocalClusterState(client).metaData().aliases().get("alias").get("test");
+ assertThat(aliasMetaData.alias(), equalTo("alias"));
+ }
+ }
+ }
+
+ @Test
+ public void testIndicesAliasesNoAcknowledgement() {
+ createIndex("test");
+
+ IndicesAliasesResponse indicesAliasesResponse = client().admin().indices().prepareAliases().addAlias("test", "alias").setTimeout("0s").get();
+ assertThat(indicesAliasesResponse.isAcknowledged(), equalTo(false));
+ }
+
+ public void testCloseIndexAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().prepareClose("test"));
+
+ for (Client client : clients()) {
+ IndexMetaData indexMetaData = getLocalClusterState(client).metaData().indices().get("test");
+ assertThat(indexMetaData.getState(), equalTo(IndexMetaData.State.CLOSE));
+ }
+ }
+
+ @Test
+ public void testCloseIndexNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").setTimeout("0s").get();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testOpenIndexAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().prepareClose("test"));
+
+ assertAcked(client().admin().indices().prepareOpen("test"));
+
+ for (Client client : clients()) {
+ IndexMetaData indexMetaData = getLocalClusterState(client).metaData().indices().get("test");
+ assertThat(indexMetaData.getState(), equalTo(IndexMetaData.State.OPEN));
+ }
+ }
+
+ @Test
+ public void testOpenIndexNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+
+ OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").setTimeout("0s").get();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testPutMappingAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed"));
+
+ for (Client client : clients()) {
+ assertThat(getLocalClusterState(client).metaData().indices().get("test").mapping("test"), notNullValue());
+ }
+ }
+
+ @Test
+ public void testPutMappingNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed").setTimeout("0s").get();
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testCreateIndexAcknowledgement() {
+ createIndex("test");
+
+ for (Client client : clients()) {
+ assertThat(getLocalClusterState(client).metaData().indices().containsKey("test"), equalTo(true));
+ }
+
+ //let's wait for green, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ //but we do want to check that the new index is on all nodes cluster state even before green
+ ensureGreen();
+ }
+
+ @Test
+ public void testCreateIndexNoAcknowledgement() {
+ CreateIndexResponse createIndexResponse = client().admin().indices().prepareCreate("test").setTimeout("0s").get();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(false));
+
+ //let's wait for green, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ ensureGreen();
+ }
+
+ private static ClusterState getLocalClusterState(Client client) {
+ return client.admin().cluster().prepareState().setLocal(true).get().getState();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java
new file mode 100644
index 0000000..a63d885
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class);
+
+ @Test
+ public void testSimpleAwareness() throws Exception {
+ Settings commonSettings = ImmutableSettings.settingsBuilder()
+ .put("cluster.routing.schedule", "10ms")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build();
+
+
+ logger.info("--> starting 2 nodes on the same rack");
+ cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_1").build());
+ cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_1").build());
+
+ createIndex("test1");
+ createIndex("test2");
+
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> starting 1 node on a different rack");
+ String node3 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_2").build());
+
+ long start = System.currentTimeMillis();
+ ObjectIntOpenHashMap<String> counts;
+ // On slow machines the initial relocation might be delayed
+ do {
+ Thread.sleep(100);
+ logger.info("--> waiting for no relocation");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> checking current state");
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ //System.out.println(clusterState.routingTable().prettyPrint());
+ // verify that we have 10 shards on node3
+ counts = new ObjectIntOpenHashMap<String>();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ } while (counts.get(node3) != 10 && (System.currentTimeMillis() - start) < 10000);
+ assertThat(counts.get(node3), equalTo(10));
+ }
+
+ @Test
+ @Slow
+ public void testAwarenessZones() throws InterruptedException {
+ Settings commonSettings = ImmutableSettings.settingsBuilder()
+ .put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
+ .put("cluster.routing.allocation.awareness.attributes", "zone")
+ .build();
+
+ logger.info("--> starting 6 nodes on different zones");
+ String A_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build());
+ String B_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
+ String B_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
+ String A_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 5)
+ .put("index.number_of_replicas", 1)).execute().actionGet();
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ ObjectIntOpenHashMap<String> counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ assertThat(counts.get(A_1), anyOf(equalTo(2),equalTo(3)));
+ assertThat(counts.get(B_1), anyOf(equalTo(2),equalTo(3)));
+ assertThat(counts.get(A_0), anyOf(equalTo(2),equalTo(3)));
+ assertThat(counts.get(B_0), anyOf(equalTo(2),equalTo(3)));
+ }
+
+ @Test
+ @Slow
+ public void testAwarenessZonesIncrementalNodes() throws InterruptedException {
+ Settings commonSettings = ImmutableSettings.settingsBuilder()
+ .put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
+ .put("cluster.routing.allocation.awareness.attributes", "zone")
+ .build();
+
+
+ logger.info("--> starting 2 nodes on zones 'a' & 'b'");
+ String A_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build());
+ String B_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 5)
+ .put("index.number_of_replicas", 1)).execute().actionGet();
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ ObjectIntOpenHashMap<String> counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ assertThat(counts.get(A_0), equalTo(5));
+ assertThat(counts.get(B_0), equalTo(5));
+ logger.info("--> starting another node in zone 'b'");
+
+ String B_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ client().admin().cluster().prepareReroute().get();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+
+ assertThat(health.isTimedOut(), equalTo(false));
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ assertThat(counts.get(A_0), equalTo(5));
+ assertThat(counts.get(B_0), equalTo(3));
+ assertThat(counts.get(B_1), equalTo(2));
+
+ String noZoneNode = cluster().startNode();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ client().admin().cluster().prepareReroute().get();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+
+ assertThat(health.isTimedOut(), equalTo(false));
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+
+ assertThat(counts.get(A_0), equalTo(5));
+ assertThat(counts.get(B_0), equalTo(3));
+ assertThat(counts.get(B_1), equalTo(2));
+ assertThat(counts.containsKey(noZoneNode), equalTo(false));
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(ImmutableSettings.settingsBuilder().put("cluster.routing.allocation.awareness.attributes", "").build()).get();
+
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+
+ assertThat(health.isTimedOut(), equalTo(false));
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+
+ assertThat(counts.get(A_0), equalTo(3));
+ assertThat(counts.get(B_0), equalTo(3));
+ assertThat(counts.get(B_1), equalTo(2));
+ assertThat(counts.get(noZoneNode), equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java b/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java
new file mode 100644
index 0000000..3b96965
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.AllocateAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.Arrays;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(ClusterRerouteTests.class);
+
+ @Test
+ public void rerouteWithCommands_disableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build();
+ rerouteWithCommands(commonSettings);
+ }
+
+ @Test
+ public void rerouteWithCommands_enableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name())
+ .put("gateway.type", "local")
+ .build();
+ rerouteWithCommands(commonSettings);
+ }
+
+ private void rerouteWithCommands(Settings commonSettings) throws Exception {
+ String node_1 = cluster().startNode(commonSettings);
+ String node_2 = cluster().startNode(commonSettings);
+
+ logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(2));
+
+ logger.info("--> explicitly allocate shard 1, *under dry_run*");
+ state = client().admin().cluster().prepareReroute()
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .setDryRun(true)
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ logger.info("--> get the state, verify nothing changed because of the dry run");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(2));
+
+ logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
+ state = client().admin().cluster().prepareReroute()
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary allocated");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+
+ logger.info("--> move shard 1 primary from node1 to node2");
+ state = client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2))
+ .execute().actionGet().getState();
+
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary moved from node1 to node2");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+ }
+
+ @Test
+ public void rerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .put("gateway.type", "local")
+ .build();
+ rerouteWithAllocateLocalGateway(commonSettings);
+ }
+
+ @Test
+ public void rerouteWithAllocateLocalGateway_enableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name())
+ .put("gateway.type", "local")
+ .build();
+ rerouteWithAllocateLocalGateway(commonSettings);
+ }
+
+ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exception {
+ logger.info("--> starting 2 nodes");
+ String node_1 = cluster().startNode(commonSettings);
+ cluster().startNode(commonSettings);
+ assertThat(cluster().size(), equalTo(2));
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(2));
+
+ logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
+ state = client().admin().cluster().prepareReroute()
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary allocated");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet();
+
+ logger.info("--> closing all nodes");
+ File[] shardLocation = cluster().getInstance(NodeEnvironment.class, node_1).shardLocations(new ShardId("test", 0));
+ assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // make sure the data is there!
+ cluster().closeNonSharedNodes(false); // don't wipe data directories the index needs to be there!
+
+ logger.info("--> deleting the shard data [{}] ", Arrays.toString(shardLocation));
+ assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // verify again after cluster was shut down
+ assertThat(FileSystemUtils.deleteRecursively(shardLocation), equalTo(true));
+
+ logger.info("--> starting nodes back, will not allocate the shard since it has no data, but the index will be there");
+ node_1 = cluster().startNode(commonSettings);
+ cluster().startNode(commonSettings);
+ // wait a bit for the cluster to realize that the shard is not there...
+ // TODO can we get around this? the cluster is RED, so what do we wait for?
+ client().admin().cluster().prepareReroute().get();
+ assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), equalTo(ClusterHealthStatus.RED));
+ logger.info("--> explicitly allocate primary");
+ state = client().admin().cluster().prepareReroute()
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary allocated");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java
new file mode 100644
index 0000000..448276c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class FilteringAllocationTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(FilteringAllocationTests.class);
+
+ @Test
+ public void testDecommissionNodeNoReplicas() throws Exception {
+ logger.info("--> starting 2 nodes");
+ final String node_0 = cluster().startNode();
+ final String node_1 = cluster().startNode();
+ assertThat(cluster().size(), equalTo(2));
+
+ logger.info("--> creating an index with no replicas");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_replicas", 0))
+ .execute().actionGet();
+ ensureGreen();
+ logger.info("--> index some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+
+ logger.info("--> decommission the second node");
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._name", node_1))
+ .execute().actionGet();
+ waitForRelocation();
+
+ logger.info("--> verify all are allocated on node1 now");
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_0));
+ }
+ }
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ @Test
+ public void testDisablingAllocationFiltering() throws Exception {
+ logger.info("--> starting 2 nodes");
+ final String node_0 = cluster().startNode();
+ final String node_1 = cluster().startNode();
+ assertThat(cluster().size(), equalTo(2));
+
+ logger.info("--> creating an index with no replicas");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_replicas", 0))
+ .execute().actionGet();
+
+ ensureGreen();
+
+ logger.info("--> index some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test");
+ int numShardsOnNode1 = 0;
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if ("node1".equals(clusterState.nodes().get(shardRouting.currentNodeId()).name())) {
+ numShardsOnNode1++;
+ }
+ }
+ }
+
+ if (numShardsOnNode1 > ThrottlingAllocationDecider.DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES) {
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.node_concurrent_recoveries", numShardsOnNode1)).execute().actionGet();
+ // make sure we can recover all the nodes at once otherwise we might run into a state where one of the shards has not yet started relocating
+ // but we already fired up the request to wait for 0 relocating shards.
+ }
+ logger.info("--> remove index from the first node");
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(settingsBuilder().put("index.routing.allocation.exclude._name", node_0))
+ .execute().actionGet();
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen();
+
+ logger.info("--> verify all shards are allocated on node_1 now");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ indexRoutingTable = clusterState.routingTable().index("test");
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_1));
+ }
+ }
+
+ logger.info("--> disable allocation filtering ");
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(settingsBuilder().put("index.routing.allocation.exclude._name", ""))
+ .execute().actionGet();
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen();
+
+ logger.info("--> verify that there are shards allocated on both nodes now");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(clusterState.routingTable().index("test").numberOfNodesShardsAreAllocatedOn(), equalTo(2));
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java b/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java
new file mode 100644
index 0000000..7f537ea
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.EvenShardsCountAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.instanceOf;
+
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class ShardsAllocatorModuleTests extends ElasticsearchIntegrationTest {
+
+
+ public void testLoadDefaultShardsAllocator() {
+ assertAllocatorInstance(ImmutableSettings.Builder.EMPTY_SETTINGS, BalancedShardsAllocator.class);
+ }
+
+ public void testLoadByShortKeyShardsAllocator() {
+ Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.EVEN_SHARD_COUNT_ALLOCATOR_KEY)
+ .build();
+ assertAllocatorInstance(build, EvenShardsCountAllocator.class);
+ build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.BALANCED_ALLOCATOR_KEY).build();
+ assertAllocatorInstance(build, BalancedShardsAllocator.class);
+ }
+
+ public void testLoadByClassNameShardsAllocator() {
+ Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "EvenShardsCount").build();
+ assertAllocatorInstance(build, EvenShardsCountAllocator.class);
+
+ build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY,
+ "org.elasticsearch.cluster.routing.allocation.allocator.EvenShardsCountAllocator").build();
+ assertAllocatorInstance(build, EvenShardsCountAllocator.class);
+ }
+
+ private void assertAllocatorInstance(Settings settings, Class<? extends ShardsAllocator> clazz) {
+ while (cluster().size() != 0) {
+ cluster().stopRandomNode();
+ }
+ cluster().startNode(settings);
+ ShardsAllocator instance = cluster().getInstance(ShardsAllocator.class);
+ assertThat(instance, instanceOf(clazz));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java
new file mode 100644
index 0000000..c886519
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class SimpleAllocationTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * Test for
+ * https://groups.google.com/d/msg/elasticsearch/y-SY_HyoB-8/EZdfNt9VO44J
+ */
+ @Test
+ public void testSaneAllocation() {
+ prepareCreate("test", 3,
+ settingsBuilder().put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 1))
+ .execute().actionGet();
+ ensureGreen();
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(0));
+ for (RoutingNode node : state.routingNodes()) {
+ if (!node.isEmpty()) {
+ assertThat(node.size(), equalTo(2));
+ }
+ }
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 0)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ assertThat(state.routingNodes().unassigned().size(), equalTo(0));
+ for (RoutingNode node : state.routingNodes()) {
+ if (!node.isEmpty()) {
+ assertThat(node.size(), equalTo(1));
+ }
+ }
+
+ // create another index
+ prepareCreate("test2", 3,
+ settingsBuilder()
+ .put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 1))
+ .execute()
+ .actionGet();
+ ensureGreen();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 1)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ assertThat(state.routingNodes().unassigned().size(), equalTo(0));
+ for (RoutingNode node : state.routingNodes()) {
+ if (!node.isEmpty()) {
+ assertThat(node.size(), equalTo(4));
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java b/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java
new file mode 100644
index 0000000..18fbb5e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java
@@ -0,0 +1,346 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class MappingMetaDataParserTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParseIdAlone() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "1");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.idResolved(), equalTo(true));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.routingResolved(), equalTo(false));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testFailIfIdIsNoValue() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startArray("id").value("id").endArray().field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "1");
+ try {
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ fail();
+ } catch (MapperParsingException ex) {
+ // bogus its an array
+ }
+
+ bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("id").field("x", "id").endObject().field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ parseContext = md.createParseContext(null, "routing_value", "1");
+ try {
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ fail();
+ } catch (MapperParsingException ex) {
+ // bogus its an object
+ }
+ }
+
+ @Test
+ public void testParseRoutingAlone() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext("id", null, "1");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTimestampAlone() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext("id", "routing_value1", null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.routingResolved(), equalTo(false));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ assertThat(parseContext.timestampResolved(), equalTo(true));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestamp() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ @Test
+ public void testParseIdWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "2");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.idResolved(), equalTo(true));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.routingResolved(), equalTo(false));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testParseRoutingWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext("id", null, "2");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTimestampWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value1", null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.routingResolved(), equalTo(false));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ assertThat(parseContext.timestampResolved(), equalTo(true));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithinSamePath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject()
+ .startObject("obj2").field("field1", "value1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithinSamePathAndMoreLevels() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.obj0.id"),
+ new MappingMetaData.Routing(true, "obj1.obj2.routing"),
+ new MappingMetaData.Timestamp(true, "obj1.obj3.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1")
+ .startObject("obj0")
+ .field("id", "id")
+ .endObject()
+ .startObject("obj2")
+ .field("routing", "routing_value")
+ .endObject()
+ .startObject("obj3")
+ .field("timestamp", "1")
+ .endObject()
+ .endObject()
+ .startObject("obj2").field("field1", "value1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithSameRepeatedObject() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").endObject()
+ .startObject("obj1").field("routing", "routing_value").endObject()
+ .startObject("obj1").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ //
+ @Test
+ public void testParseIdRoutingTimestampWithRepeatedField() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("field1"),
+ new MappingMetaData.Routing(true, "field1.field1"),
+ new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime"), false);
+
+ byte[] bytes = jsonBuilder().startObject()
+ .field("aaa", "wr")
+ .array("arr1", "1", "2", "3")
+ .field("field1", "foo")
+ .field("field1", "bar")
+ .field("test", "value")
+ .field("zzz", "wr")
+ .endObject().bytes().toBytes();
+
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("foo"));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.timestamp(), equalTo("foo"));
+ }
+
+ @Test
+ public void testParseNoIdRoutingWithRepeatedFieldAndObject() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "field1.field1.field2"),
+ new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime"), false);
+
+ byte[] bytes = jsonBuilder().startObject()
+ .field("aaa", "wr")
+ .array("arr1", "1", "2", "3")
+ .field("field1", "foo")
+ .startObject("field1").field("field2", "bar").endObject()
+ .field("test", "value")
+ .field("zzz", "wr")
+ .endObject().bytes().toBytes();
+
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.timestamp(), equalTo("foo"));
+ }
+
+ @Test
+ public void testParseRoutingWithRepeatedFieldAndValidRouting() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id(null),
+ new MappingMetaData.Routing(true, "field1.field2"),
+ new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime"), false);
+
+ byte[] bytes = jsonBuilder().startObject()
+ .field("aaa", "wr")
+ .array("arr1", "1", "2", "3")
+ .field("field1", "foo")
+ .startObject("field1").field("field2", "bar").endObject()
+ .field("test", "value")
+ .field("zzz", "wr")
+ .endObject().bytes().toBytes();
+
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.routing(), equalTo("bar"));
+ assertThat(parseContext.timestamp(), equalTo("foo"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java
new file mode 100644
index 0000000..8ff8c26
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java
@@ -0,0 +1,485 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
+import static org.hamcrest.Matchers.emptyArray;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class MetaDataTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIndexOptions_strict() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo"))
+ .put(indexBuilder("foobar"))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions options = IndicesOptions.strict();
+
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(new String[]{"foo"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ try {
+ md.concreteIndices(new String[]{"bar"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+ results = md.concreteIndices(new String[]{"foofoo", "foobar"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ try {
+ md.concreteIndices(new String[]{"foo", "bar"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+ results = md.concreteIndices(new String[]{"barbaz", "foobar"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ try {
+ md.concreteIndices(new String[]{"barbaz", "bar"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+ results = md.concreteIndices(new String[]{"baz*"}, options);
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(new String[]{"foo", "baz*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+ }
+
+ @Test
+ public void testIndexOptions_lenient() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo"))
+ .put(indexBuilder("foobar"))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions options = IndicesOptions.lenient();
+
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(new String[]{"foo"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(new String[]{"foofoo", "foobar"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ results = md.concreteIndices(new String[]{"foo", "bar"}, options);
+ assertEquals(1, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo"));
+
+ results = md.concreteIndices(new String[]{"barbaz", "foobar"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ results = md.concreteIndices(new String[]{"barbaz", "bar"}, options);
+ assertEquals(1, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo"));
+
+ results = md.concreteIndices(new String[]{"baz*"}, options);
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(new String[]{"foo", "baz*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+ }
+
+ @Test
+ public void testIndexOptions_allowUnavailableExpandOpenDisAllowEmpty() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo"))
+ .put(indexBuilder("foobar"))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions options = IndicesOptions.fromOptions(true, false, true, false);
+
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(new String[]{"foo"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertThat(results, emptyArray());
+
+ try {
+ md.concreteIndices(new String[]{"baz*"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+ try {
+ md.concreteIndices(new String[]{"foo", "baz*"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+ }
+
+ @Test
+ public void testIndexOptions_wildcardExpansion() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo").state(IndexMetaData.State.CLOSE))
+ .put(indexBuilder("bar"))
+ .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ // Only closed
+ IndicesOptions options = IndicesOptions.fromOptions(false, true, false, true);
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ // no wildcards, so wildcard expansion don't apply
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ // Only open
+ options = IndicesOptions.fromOptions(false, true, true, false);
+ results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("bar", "foobar"));
+
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foobar", results[0]);
+
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ // Open and closed
+ options = IndicesOptions.fromOptions(false, true, true, true);
+ results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(3, results.length);
+ assertThat(results, arrayContainingInAnyOrder("bar", "foobar", "foo"));
+
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foobar", "foo"));
+
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ results = md.concreteIndices(new String[]{"-foo*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ results = md.concreteIndices(new String[]{"-*"}, options);
+ assertEquals(0, results.length);
+
+ options = IndicesOptions.fromOptions(false, false, true, true);
+ try {
+ md.concreteIndices(new String[]{"-*"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+ }
+
+ @Test
+ public void testIndexOptions_emptyCluster() {
+ MetaData md = MetaData.builder().build();
+ IndicesOptions options = IndicesOptions.strict();
+
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertThat(results, emptyArray());
+ try {
+ md.concreteIndices(new String[]{"foo"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertThat(results, emptyArray());
+ try {
+ md.concreteIndices(new String[]{"foo*", "bar"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+
+ options = IndicesOptions.lenient();
+ results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertThat(results, emptyArray());
+ results = md.concreteIndices(new String[]{"foo"}, options);
+ assertThat(results, emptyArray());
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertThat(results, emptyArray());
+ results = md.concreteIndices(new String[]{"foo*", "bar"}, options);
+ assertThat(results, emptyArray());
+
+ options = IndicesOptions.fromOptions(true, false, true, false);
+ try {
+ md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ } catch (IndexMissingException e) {}
+ }
+
+ @Test
+ public void convertWildcardsJustIndicesTests() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("testXYY"))
+ .put(indexBuilder("testYYY"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX", "testYYY"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX", "ku*"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "kuku")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"test*"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*", "kuku"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "kuku")));
+ }
+
+ @Test
+ public void convertWildcardsTests() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX").putAlias(AliasMetaData.builder("alias1")).putAlias(AliasMetaData.builder("alias2")))
+ .put(indexBuilder("testXYY").putAlias(AliasMetaData.builder("alias2")))
+ .put(indexBuilder("testYYY").putAlias(AliasMetaData.builder("alias3")))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testYY*", "alias*"}, IndicesOptions.lenient())), equalTo(newHashSet("alias1", "alias2", "alias3", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"-kuku"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"+test*", "-testYYY"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"+testX*", "+testYYY"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"+testYYY", "+testX*"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ }
+
+ private IndexMetaData.Builder indexBuilder(String index) {
+ return IndexMetaData.builder(index).settings(ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0));
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void concreteIndicesIgnoreIndicesOneMissingIndex() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ md.concreteIndices(new String[]{"testZZZ"}, IndicesOptions.strict());
+ }
+
+ @Test
+ public void concreteIndicesIgnoreIndicesOneMissingIndexOtherFound() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(new String[]{"testXXX", "testZZZ"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX")));
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void concreteIndicesIgnoreIndicesAllMissing() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(new String[]{"testMo", "testMahdy"}, IndicesOptions.strict())), equalTo(newHashSet("testXXX")));
+ }
+
+ @Test
+ public void concreteIndicesIgnoreIndicesEmptyRequest() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(new String[]{}, IndicesOptions.lenient())), equalTo(Sets.<String>newHashSet("kuku", "testXXX")));
+ }
+
+ @Test
+ public void testIsAllIndices_null() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(null), equalTo(true));
+ }
+
+ @Test
+ public void testIsAllIndices_empty() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[0]), equalTo(true));
+ }
+
+ @Test
+ public void testIsAllIndices_explicitAll() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[]{"_all"}), equalTo(true));
+ }
+
+ @Test
+ public void testIsAllIndices_explicitAllPlusOther() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[]{"_all", "other"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsAllIndices_normalIndexes() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[]{"index1", "index2", "index3"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsAllIndices_wildcard() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[]{"*"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_null() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(null), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_empty() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[0]), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_explicitAll() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[]{"_all"}), equalTo(true));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_explicitAllPlusOther() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[]{"_all", "other"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_normalIndexes() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[]{"index1", "index2", "index3"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_wildcard() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[]{"*"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_explicitList() throws Exception {
+ //even though it does identify all indices, it's not a pattern but just an explicit list of them
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] indicesOrAliases = concreteIndices;
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_onlyWildcard() throws Exception {
+ String[] indicesOrAliases = new String[]{"*"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_matchingTrailingWildcard() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcard() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = new String[]{"index1", "index2", "index3", "a", "b"};
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_matchingSingleExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"-index1", "+index1"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_nonMatchingSingleExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"-index1"};
+ String[] concreteIndices = new String[]{"index2", "index3"};
+ String[] allConcreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_matchingTrailingWildcardAndExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*", "-index1", "+index1"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcardAndExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*", "-index1"};
+ String[] concreteIndices = new String[]{"index2", "index3"};
+ String[] allConcreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ private MetaData metaDataBuilder(String... indices) {
+ MetaData.Builder mdBuilder = MetaData.builder();
+ for (String concreteIndex : indices) {
+ mdBuilder.put(indexBuilder(concreteIndex));
+ }
+ return mdBuilder.build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java b/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java
new file mode 100644
index 0000000..013c765
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleJsonFromAndTo() throws IOException {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1")
+ .numberOfShards(1)
+ .numberOfReplicas(2))
+ .put(IndexMetaData.builder("test2")
+ .settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(2)
+ .numberOfReplicas(3))
+ .put(IndexMetaData.builder("test3")
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1))
+ .put(IndexMetaData.builder("test4")
+ .settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2))
+ .put(IndexMetaData.builder("test5")
+ .settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1"))
+ .putAlias(newAliasMetaDataBuilder("alias2")))
+ .put(IndexMetaData.builder("test6")
+ .settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2")
+ .put("index.aliases.0", "alias3")
+ .put("index.aliases.1", "alias1"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1"))
+ .putAlias(newAliasMetaDataBuilder("alias2")))
+ .put(IndexMetaData.builder("test7")
+ .settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2")
+ .put("index.aliases.0", "alias3")
+ .put("index.aliases.1", "alias1"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1))
+ .putAlias(newAliasMetaDataBuilder("alias2"))
+ .putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2)))
+ .put(IndexTemplateMetaData.builder("foo")
+ .template("bar")
+ .order(1).settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2")))
+ .build();
+
+ String metaDataSource = MetaData.Builder.toXContent(metaData);
+// System.out.println("ToJson: " + metaDataSource);
+
+ MetaData parsedMetaData = MetaData.Builder.fromXContent(XContentFactory.xContent(XContentType.JSON).createParser(metaDataSource));
+
+ IndexMetaData indexMetaData = parsedMetaData.index("test1");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().size(), equalTo(0));
+
+ indexMetaData = parsedMetaData.index("test2");
+ assertThat(indexMetaData.numberOfShards(), equalTo(2));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(3));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(0));
+
+ indexMetaData = parsedMetaData.index("test3");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().size(), equalTo(1));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+
+ indexMetaData = parsedMetaData.index("test4");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+
+ indexMetaData = parsedMetaData.index("test5");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(2));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+
+ indexMetaData = parsedMetaData.index("test6");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(3));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+ assertThat(indexMetaData.aliases().get("alias3").alias(), equalTo("alias3"));
+
+ indexMetaData = parsedMetaData.index("test7");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(4));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+ assertThat(indexMetaData.aliases().get("alias2").filter(), nullValue());
+ assertThat(indexMetaData.aliases().get("alias3").alias(), equalTo("alias3"));
+ assertThat(indexMetaData.aliases().get("alias3").filter(), nullValue());
+ assertThat(indexMetaData.aliases().get("alias4").alias(), equalTo("alias4"));
+ assertThat(indexMetaData.aliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2));
+
+ // templates
+ assertThat(parsedMetaData.templates().get("foo").name(), is("foo"));
+ assertThat(parsedMetaData.templates().get("foo").template(), is("bar"));
+ assertThat(parsedMetaData.templates().get("foo").settings().get("index.setting1"), is("value1"));
+ assertThat(parsedMetaData.templates().get("foo").settings().getByPrefix("index.").get("setting2"), is("value2"));
+ }
+
+ private static final String MAPPING_SOURCE1 = "{\"mapping1\":{\"text1\":{\"type\":\"string\"}}}";
+ private static final String MAPPING_SOURCE2 = "{\"mapping2\":{\"text2\":{\"type\":\"string\"}}}";
+ private static final String ALIAS_FILTER1 = "{\"field1\":\"value1\"}";
+ private static final String ALIAS_FILTER2 = "{\"field2\":\"value2\"}";
+}
diff --git a/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java
new file mode 100644
index 0000000..8b6ebff
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.node;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DiscoveryNodeFiltersTests extends ElasticsearchTestCase {
+
+ @Test
+ public void nameMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx.name", "name1")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void idMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx._id", "id1")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void idOrNameMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx._id", "id1,blah")
+ .put("xxx.name", "blah,name2")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name3", "id3", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void tagAndGroupMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx.tag", "A")
+ .put("xxx.group", "B")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(AND, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE,
+ ImmutableMap.<String, String>of("tag", "A", "group", "B"), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE,
+ ImmutableMap.<String, String>of("tag", "A", "group", "B", "name", "X"), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name3", "id3", DummyTransportAddress.INSTANCE,
+ ImmutableMap.<String, String>of("tag", "A", "group", "F", "name", "X"), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+
+ node = new DiscoveryNode("name4", "id4", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void starMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx.name", "*")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java
new file mode 100644
index 0000000..9563cd0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java
@@ -0,0 +1,435 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.Lists;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class AddIncrementallyTests extends ElasticsearchAllocationTestCase {
+ private final ESLogger logger = Loggers.getLogger(AddIncrementallyTests.class);
+
+ @Test
+ public void testAddNodesAndIndices() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ AllocationService service = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(9));
+ int nodeOffset = 1;
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ assertAtLeastOneIndexShardPerNode(clusterState);
+ clusterState = removeNodes(clusterState, service, 1);
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+
+ clusterState = addIndex(clusterState, service, 3, 2, 3);
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, "test3", Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+
+ clusterState = addIndex(clusterState, service, 4, 2, 3);
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(4));
+ assertNumIndexShardsPerNode(clusterState, "test4", Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ clusterState = removeNodes(clusterState, service, 1);
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(4));
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ }
+
+ @Test
+ public void testMinimalRelocations() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString())
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 2);
+ AllocationService service = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(9));
+ int nodeOffset = 1;
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+
+ logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ nodes.put(newNode("node2"));
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+
+ RoutingTable prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(4));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(6));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.sameInstance(routingTable));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ }
+
+ @Test
+ public void testMinimalRelocationsNoLimit() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString())
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 100)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100);
+ AllocationService service = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(9));
+ int nodeOffset = 1;
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+
+ logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ nodes.put(newNode("node2"));
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+
+ RoutingTable prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(4));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(6));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.sameInstance(routingTable));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ }
+
+
+ private void assertNumIndexShardsPerNode(ClusterState state, Matcher<Integer> matcher) {
+ for (String index : state.routingTable().indicesRouting().keySet()) {
+ assertNumIndexShardsPerNode(state, index, matcher);
+ }
+ }
+
+ private void assertNumIndexShardsPerNode(ClusterState state, String index, Matcher<Integer> matcher) {
+ for (RoutingNode node : state.routingNodes()) {
+ assertThat(node.shardsWithState(index, STARTED).size(), matcher);
+ }
+ }
+
+
+ private void assertAtLeastOneIndexShardPerNode(ClusterState state) {
+ for (String index : state.routingTable().indicesRouting().keySet()) {
+
+ for (RoutingNode node : state.routingNodes()) {
+ assertThat(node.shardsWithState(index, STARTED).size(), Matchers.greaterThanOrEqualTo(1));
+ }
+ }
+
+ }
+
+ private ClusterState addNodes(ClusterState clusterState, AllocationService service, int numNodes, int nodeOffset) {
+ logger.info("now, start [{}] more node, check that rebalancing will happen because we set it to always", numNodes);
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ for (int i = 0; i < numNodes; i++) {
+ nodes.put(newNode("node" + (i + nodeOffset)));
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ // move initializing to started
+
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState initCluster(AllocationService service, int numberOfNodes, int numberOfIndices, int numberOfShards,
+ int numberOfReplicas) {
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ IndexMetaData.Builder index = IndexMetaData.builder("test" + i).numberOfShards(numberOfShards).numberOfReplicas(
+ numberOfReplicas);
+ metaDataBuilder = metaDataBuilder.put(index);
+ }
+
+ MetaData metaData = metaDataBuilder.build();
+
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ routingTableBuilder.addAsNew(cursor.value);
+ }
+
+ RoutingTable routingTable = routingTableBuilder.build();
+
+ logger.info("start " + numberOfNodes + " nodes");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
+ for (int i = 0; i < numberOfNodes; i++) {
+ nodes.put(newNode("node" + i));
+ }
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("restart all the primary shards, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState addIndex(ClusterState clusterState, AllocationService service, int indexOrdinal, int numberOfShards,
+ int numberOfReplicas) {
+ MetaData.Builder metaDataBuilder = MetaData.builder(clusterState.getMetaData());
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable());
+
+ IndexMetaData.Builder index = IndexMetaData.builder("test" + indexOrdinal).numberOfShards(numberOfShards).numberOfReplicas(
+ numberOfReplicas);
+ IndexMetaData imd = index.build();
+ metaDataBuilder = metaDataBuilder.put(imd, true);
+ routingTableBuilder.addAsNew(imd);
+
+ MetaData metaData = metaDataBuilder.build();
+ RoutingTable routingTable = routingTableBuilder.build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+ routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("restart all the primary shards, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState removeNodes(ClusterState clusterState, AllocationService service, int numNodes) {
+ logger.info("Removing [{}] nodes", numNodes);
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ ArrayList<DiscoveryNode> discoveryNodes = Lists.newArrayList(clusterState.nodes());
+ Collections.shuffle(discoveryNodes, getRandom());
+ for (DiscoveryNode node : discoveryNodes) {
+ nodes.remove(node.id());
+ numNodes--;
+ if (numNodes <= 0) {
+ break;
+ }
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingTable routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("rebalancing");
+ routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java
new file mode 100644
index 0000000..a749535
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class AllocatePostApiFlagTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(AllocatePostApiFlagTests.class);
+
+ @Test
+ public void simpleFlagTests() {
+ AllocationService allocation = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("creating an index with 1 shard, no replica");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(), equalTo(false));
+
+ logger.info("adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(), equalTo(false));
+
+ logger.info("start primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java
new file mode 100644
index 0000000..2e76b20
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java
@@ -0,0 +1,392 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.AllocateAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class AllocationCommandsTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(AllocationCommandsTests.class);
+
+ @Test
+ public void moveShardCommand() {
+ AllocationService allocation = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("creating an index with 1 shard, no replica");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("start primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("move the shard");
+ String existingNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String toNodeId;
+ if ("node1".equals(existingNodeId)) {
+ toNodeId = "node2";
+ } else {
+ toNodeId = "node1";
+ }
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(new ShardId("test", 0), existingNodeId, toNodeId)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(existingNodeId).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
+ assertThat(clusterState.routingNodes().node(toNodeId).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ logger.info("finish moving the shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(existingNodeId).isEmpty(), equalTo(true));
+ assertThat(clusterState.routingNodes().node(toNodeId).get(0).state(), equalTo(ShardRoutingState.STARTED));
+ }
+
+ @Test
+ public void allocateCommand() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 3 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .put(newNode("node3"))
+ .put(newNode("node4", ImmutableMap.of("data", Boolean.FALSE.toString())))
+ ).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> allocating with primary flag set to false, should fail");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> allocating to non-data node, should fail");
+ try {
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node4", true)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> allocating with primary flag set to true");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", true)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> start the primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> allocate the replica shard on the primary shard node, should fail");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+
+ logger.info("--> start the replica shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+ logger.info("--> verify that we fail when there are no unassigned shards");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node3", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ }
+
+ @Test
+ public void cancelCommand() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 3 nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .put(newNode("node3"))
+ ).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> allocating with primary flag set to true");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", true)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> cancel primary allocation, make sure it fails...");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> start the primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> cancel primary allocation, make sure it fails...");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> cancel the relocation allocation");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> cancel the primary being replicated, make sure it fails");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> start the replica shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+ logger.info("--> cancel allocation of the replica shard");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(rerouteResult.changed(), equalTo(true));
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+ logger.info("--> start the replica shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+ logger.info("--> move the replica shard");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(new ShardId("test", 0), "node2", "node3")));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node3").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> cancel the move of the replica shard");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node3", false)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+
+ logger.info("--> cancel the primary allocation (with allow_primary set to true)");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", true)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(rerouteResult.changed(), equalTo(true));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).get(0).primary(), equalTo(true));
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+ }
+
+ @Test
+ public void serialization() throws Exception {
+ AllocationCommands commands = new AllocationCommands(
+ new AllocateAllocationCommand(new ShardId("test", 1), "node1", true),
+ new MoveAllocationCommand(new ShardId("test", 3), "node2", "node3"),
+ new CancelAllocationCommand(new ShardId("test", 4), "node5", true)
+ );
+ BytesStreamOutput bytes = new BytesStreamOutput();
+ AllocationCommands.writeTo(commands, bytes);
+ AllocationCommands sCommands = AllocationCommands.readFrom(new BytesStreamInput(bytes.bytes()));
+
+ assertThat(sCommands.commands().size(), equalTo(3));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(new ShardId("test", 1)));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1"));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).allowPrimary(), equalTo(true));
+
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(new ShardId("test", 3)));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).fromNode(), equalTo("node2"));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).toNode(), equalTo("node3"));
+
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(new ShardId("test", 4)));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node5"));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).allowPrimary(), equalTo(true));
+ }
+
+ @Test
+ public void xContent() throws Exception {
+ String commands = "{\n" +
+ " \"commands\" : [\n" +
+ " {\"allocate\" : {\"index\" : \"test\", \"shard\" : 1, \"node\" : \"node1\", \"allow_primary\" : true}}\n" +
+ " ,{\"move\" : {\"index\" : \"test\", \"shard\" : 3, \"from_node\" : \"node2\", \"to_node\" : \"node3\"}} \n" +
+ " ,{\"cancel\" : {\"index\" : \"test\", \"shard\" : 4, \"node\" : \"node5\", \"allow_primary\" : true}} \n" +
+ " ]\n" +
+ "}\n";
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(commands);
+ // move two tokens, parser expected to be "on" `commands` field
+ parser.nextToken();
+ parser.nextToken();
+ AllocationCommands sCommands = AllocationCommands.fromXContent(parser);
+
+ assertThat(sCommands.commands().size(), equalTo(3));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(new ShardId("test", 1)));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1"));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).allowPrimary(), equalTo(true));
+
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(new ShardId("test", 3)));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).fromNode(), equalTo("node2"));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).toNode(), equalTo("node3"));
+
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(new ShardId("test", 4)));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node5"));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).allowPrimary(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java
new file mode 100644
index 0000000..929fb54
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java
@@ -0,0 +1,827 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class);
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded1() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded1'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded2() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded2'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node3", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded3() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded3'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(INITIALIZING)) {
+ logger.info(shard.toString());
+ }
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(STARTED)) {
+ logger.info(shard.toString());
+ }
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(RELOCATING)) {
+ logger.info(shard.toString());
+ }
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(UNASSIGNED)) {
+ logger.info(shard.toString());
+ }
+
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
+
+ logger.info("--> complete initializing");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> run it again, since we still might have relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, some more relocation should happen");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded4() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded4'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(5).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
+
+ logger.info("--> complete initializing");
+ for (int i = 0; i < 2; i++) {
+ logger.info("--> complete initializing round: [{}]", i);
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(10));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(5));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, some more relocation should happen");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
+
+ logger.info("--> complete relocation");
+ for (int i = 0; i < 2; i++) {
+ logger.info("--> complete initializing round: [{}]", i);
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(5));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded5() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded5'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, we will have another relocation");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+
+ logger.info("--> make sure another reroute does not move things");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded6() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded6'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(3))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node3", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node4", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node5"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, we will have another relocation");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node6", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node6"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> make sure another reroute does not move things");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void fullAwareness1() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'fullAwareness1'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> replica will not start because we have only one rack value");
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void fullAwareness2() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'fullAwareness2'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node3", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> replica will not start because we have only one rack value");
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node4"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void fullAwareness3() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table for 'fullAwareness3'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(5).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ logger.info("--> complete initializing");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> run it again, since we still might have relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, some more relocation should happen");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void testUnbalancedZones() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
+ .put("cluster.routing.allocation.awareness.attributes", "zone")
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table for 'testUnbalancedZones'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("A-0", ImmutableMap.of("zone", "a")))
+ .put(newNode("B-0", ImmutableMap.of("zone", "b")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> all replicas are allocated and started since we have on node in each zone");
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> add a new node in zone 'a' and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("A-1", ImmutableMap.of("zone", "a")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(8));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("A-1"));
+ logger.info("--> starting initializing shards on the new node");
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+ assertThat(clusterState.getRoutingNodes().node("A-1").size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("A-0").size(), equalTo(3));
+ assertThat(clusterState.getRoutingNodes().node("B-0").size(), equalTo(5));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
new file mode 100644
index 0000000..a7c29af
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
@@ -0,0 +1,509 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.gateway.none.NoneGatewayAllocator;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class BalanceConfigurationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(BalanceConfigurationTests.class);
+ // TODO maybe we can randomize these numbers somehow
+ final int numberOfNodes = 25;
+ final int numberOfIndices = 12;
+ final int numberOfShards = 2;
+ final int numberOfReplicas = 2;
+
+ @Test
+ public void testIndexBalance() {
+ /* Tests balance over indices only */
+ final float indexBalance = 1.0f;
+ final float replicaBalance = 0.0f;
+ final float primaryBalance = 0.0f;
+ final float balanceTreshold = 1.0f;
+
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, primaryBalance);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold);
+
+ AllocationService strategy = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(strategy);
+ assertIndexBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = addNode(clusterState, strategy);
+ assertIndexBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = removeNodes(clusterState, strategy);
+ assertIndexBalance(logger, clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ }
+
+ @Test
+ public void testReplicaBalance() {
+ /* Tests balance over replicas only */
+ final float indexBalance = 0.0f;
+ final float replicaBalance = 1.0f;
+ final float primaryBalance = 0.0f;
+ final float balanceTreshold = 1.0f;
+
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, primaryBalance);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold);
+
+ AllocationService strategy = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(strategy);
+ assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = addNode(clusterState, strategy);
+ assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = removeNodes(clusterState, strategy);
+ assertReplicaBalance(logger, clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ }
+
+ @Test
+ public void testPrimaryBalance() {
+ /* Tests balance over primaries only */
+ final float indexBalance = 0.0f;
+ final float replicaBalance = 0.0f;
+ final float primaryBalance = 1.0f;
+ final float balanceTreshold = 1.0f;
+
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, primaryBalance);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold);
+
+ AllocationService strategy = createAllocationService(settings.build());
+
+ ClusterState clusterstate = initCluster(strategy);
+ assertPrimaryBalance(logger, clusterstate.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterstate = addNode(clusterstate, strategy);
+ assertPrimaryBalance(logger, clusterstate.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterstate = removeNodes(clusterstate, strategy);
+ assertPrimaryBalance(logger, clusterstate.getRoutingNodes(), numberOfNodes + 1 - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+ }
+
+ private ClusterState initCluster(AllocationService strategy) {
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ IndexMetaData.Builder index = IndexMetaData.builder("test" + i).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas);
+ metaDataBuilder = metaDataBuilder.put(index);
+ }
+
+ MetaData metaData = metaDataBuilder.build();
+
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ routingTableBuilder.addAsNew(cursor.value);
+ }
+
+ RoutingTable routingTable = routingTableBuilder.build();
+
+
+ logger.info("start " + numberOfNodes + " nodes");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
+ for (int i = 0; i < numberOfNodes; i++) {
+ nodes.put(newNode("node" + i));
+ }
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("restart all the primary shards, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState addNode(ClusterState clusterState, AllocationService strategy) {
+ logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node" + numberOfNodes)))
+ .build();
+
+ RoutingTable routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ // move initializing to started
+
+ RoutingTable prev = routingTable;
+ while (true) {
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState removeNodes(ClusterState clusterState, AllocationService strategy) {
+ logger.info("Removing half the nodes (" + (numberOfNodes + 1) / 2 + ")");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+
+ for (int i = (numberOfNodes + 1) / 2; i <= numberOfNodes; i++) {
+ nodes.remove("node" + i);
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingTable routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("rebalancing");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+
+ private void assertReplicaBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+ final int numShards = numberOfIndices * numberOfShards * (numberOfReplicas + 1);
+ final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
+ final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
+ final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold)));
+
+ for (RoutingNode node : nodes) {
+// logger.info(node.nodeId() + ": " + node.shardsWithState(INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")");
+ assertThat(node.shardsWithState(STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards));
+ assertThat(node.shardsWithState(STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards));
+ }
+ }
+
+ private void assertIndexBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+
+ final int numShards = numberOfShards * (numberOfReplicas + 1);
+ final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
+ final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
+ final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold)));
+
+ for (String index : nodes.getRoutingTable().indicesRouting().keySet()) {
+ for (RoutingNode node : nodes) {
+// logger.info(node.nodeId() +":"+index+ ": " + node.shardsWithState(index, INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")");
+ assertThat(node.shardsWithState(index, STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards));
+ assertThat(node.shardsWithState(index, STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards));
+ }
+ }
+ }
+
+ private void assertPrimaryBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+
+ final int numShards = numberOfShards;
+ final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
+ final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
+ final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold)));
+
+ for (String index : nodes.getRoutingTable().indicesRouting().keySet()) {
+ for (RoutingNode node : nodes) {
+ int primaries = 0;
+ for (ShardRouting shard : node.shardsWithState(index, STARTED)) {
+ primaries += shard.primary() ? 1 : 0;
+ }
+// logger.info(node.nodeId() + ": " + primaries + " primaries ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")");
+ assertThat(primaries, Matchers.greaterThanOrEqualTo(minAvgNumberOfShards));
+ assertThat(primaries, Matchers.lessThanOrEqualTo(maxAvgNumberOfShards));
+ }
+ }
+ }
+
+ @Test
+ public void testPersistedSettings() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.2);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.3);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, 0.5);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 2.0);
+ final NodeSettingsService.Listener[] listeners = new NodeSettingsService.Listener[1];
+ NodeSettingsService service = new NodeSettingsService(settingsBuilder().build()) {
+
+ @Override
+ public void addListener(Listener listener) {
+ assertNull("addListener was called twice while only one time was expected", listeners[0]);
+ listeners[0] = listener;
+ }
+
+ };
+ BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings.build(), service);
+ assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f));
+ assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f));
+ assertThat(allocator.getPrimaryBalance(), Matchers.equalTo(0.5f));
+ assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f));
+
+ settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ listeners[0].onRefreshSettings(settings.build());
+ assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f));
+ assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f));
+ assertThat(allocator.getPrimaryBalance(), Matchers.equalTo(0.5f));
+ assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f));
+
+ settings = settingsBuilder();
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.5);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.1);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, 0.4);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 3.0);
+ listeners[0].onRefreshSettings(settings.build());
+ assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.5f));
+ assertThat(allocator.getShardBalance(), Matchers.equalTo(0.1f));
+ assertThat(allocator.getPrimaryBalance(), Matchers.equalTo(0.4f));
+ assertThat(allocator.getThreshold(), Matchers.equalTo(3.0f));
+ }
+
+ @Test
+ public void testNoRebalanceOnPrimaryOverload() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(),
+ new NodeSettingsService(ImmutableSettings.Builder.EMPTY_SETTINGS), getRandom()), new ShardsAllocators(settings.build(),
+ new NoneGatewayAllocator(), new ShardsAllocator() {
+
+ @Override
+ public boolean rebalance(RoutingAllocation allocation) {
+ return false;
+ }
+
+ @Override
+ public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return false;
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+
+
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ }
+
+ /*
+ * // this allocator tries to rebuild this scenario where a rebalance is
+ * // triggered solely by the primary overload on node [1] where a shard
+ * // is rebalanced to node 0
+ routing_nodes:
+ -----node_id[0][V]
+ --------[test][0], node[0], [R], s[STARTED]
+ --------[test][4], node[0], [R], s[STARTED]
+ -----node_id[1][V]
+ --------[test][0], node[1], [P], s[STARTED]
+ --------[test][1], node[1], [P], s[STARTED]
+ --------[test][3], node[1], [R], s[STARTED]
+ -----node_id[2][V]
+ --------[test][1], node[2], [R], s[STARTED]
+ --------[test][2], node[2], [R], s[STARTED]
+ --------[test][4], node[2], [P], s[STARTED]
+ -----node_id[3][V]
+ --------[test][2], node[3], [P], s[STARTED]
+ --------[test][3], node[3], [P], s[STARTED]
+ ---- unassigned
+ */
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned();
+ boolean changed = !unassigned.isEmpty();
+ for (MutableShardRouting sr : unassigned) {
+ switch (sr.id()) {
+ case 0:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node1");
+ } else {
+ allocation.routingNodes().assign(sr, "node0");
+ }
+ break;
+ case 1:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node1");
+ } else {
+ allocation.routingNodes().assign(sr, "node2");
+ }
+ break;
+ case 2:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node3");
+ } else {
+ allocation.routingNodes().assign(sr, "node2");
+ }
+ break;
+ case 3:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node3");
+ } else {
+ allocation.routingNodes().assign(sr, "node1");
+ }
+ break;
+ case 4:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node2");
+ } else {
+ allocation.routingNodes().assign(sr, "node0");
+ }
+ break;
+ }
+
+ }
+ unassigned.clear();
+ return changed;
+ }
+ }), ClusterInfoService.EMPTY);
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1);
+ metaDataBuilder = metaDataBuilder.put(indexMeta);
+ MetaData metaData = metaDataBuilder.build();
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ routingTableBuilder.addAsNew(cursor.value);
+ }
+ RoutingTable routingTable = routingTableBuilder.build();
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
+ for (int i = 0; i < 4; i++) {
+ DiscoveryNode node = newNode("node" + i);
+ nodes.put(node);
+ }
+
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.INITIALIZING));
+ }
+ }
+ strategy = createAllocationService(settings.build());
+
+ logger.info("use the new allocator and check if it moves shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
+ }
+ }
+
+ logger.info("start the replica shards");
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
+ }
+ }
+
+ logger.info("rebalancing");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
+ }
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
new file mode 100644
index 0000000..89b0721
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
@@ -0,0 +1,626 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class);
+
+ @Test
+ public void testAlways() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+// assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will happen (for test1) because we set it to always");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").size(), equalTo(1));
+ assertThat(routingNodes.node("node3").get(0).shardId().index().name(), equalTo("test1"));
+ }
+
+
+ @Test
+ public void testClusterPrimariesActive1() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test2, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to primaries_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").size(), equalTo(1));
+ assertThat(routingNodes.node("node3").get(0).shardId().index().name(), equalTo("test1"));
+ }
+
+ @Test
+ public void testClusterPrimariesActive2() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to primaries_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testClusterAllActive1() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test2, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("start the test2 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").size(), equalTo(1));
+ assertThat(routingNodes.node("node3").get(0).shardId().index().name(), anyOf(equalTo("test1"), equalTo("test2")));
+ }
+
+ @Test
+ public void testClusterAllActive2() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testClusterAllActive3() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test2, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java
new file mode 100644
index 0000000..2f685b6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class ConcurrentRebalanceRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ConcurrentRebalanceRoutingTests.class);
+
+ @Test
+ public void testClusterConcurrentRebalance() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", 3)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("start two nodes and fully start the shards");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("start the replica shards, rebalancing should start, but, only 3 should be rebalancing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+
+ logger.info("finalize this session relocation, 3 more should relocate now");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+
+ logger.info("finalize this session relocation, 2 more should relocate now");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(2));
+
+ logger.info("finalize this session relocation, no more relocation");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(0));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java
new file mode 100644
index 0000000..fee7858
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DeadNodesAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(DeadNodesAllocationTests.class);
+
+ @Test
+ public void simpleDeadNodeOnStartedPrimaryShard() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> fail node with primary");
+ String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode(nodeIdRemaining))
+ ).build();
+
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(nodeIdRemaining).get(0).primary(), equalTo(true));
+ assertThat(clusterState.routingNodes().node(nodeIdRemaining).get(0).state(), equalTo(STARTED));
+ }
+
+ @Test
+ public void deadNodeWhileRelocatingOnToNode() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> adding additional node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on node3 being initialized by killing node3");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode(origPrimaryNodeId))
+ .put(newNode(origReplicaNodeId))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
+ }
+
+ @Test
+ public void deadNodeWhileRelocatingOnFromNode() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> adding additional node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on 'origPrimaryNodeId' being relocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node3"))
+ .put(newNode(origReplicaNodeId))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java
new file mode 100644
index 0000000..f58b3cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DisableAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(DisableAllocationTests.class);
+
+ @Test
+ public void testClusterDisableAllocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ }
+
+ @Test
+ public void testClusterDisableReplicaAllocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.disable_replica_allocation", true)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ }
+
+ @Test
+ public void testIndexDisableAllocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("disabled").settings(ImmutableSettings.builder().put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true).put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("enabled").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("disabled"))
+ .addAsNew(metaData.index("enabled"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> verify only enabled index has been routed");
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("enabled", STARTED).size(), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("disabled", STARTED).size(), equalTo(0));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java
new file mode 100644
index 0000000..80d1a15
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ElectReplicaAsPrimaryDuringRelocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class);
+
+ @Test
+ public void testElectReplicaAsPrimaryDuringRelocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
+
+ logger.info("Start another node and perform rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("find the replica shard that gets relocated");
+ IndexShardRoutingTable indexShardRoutingTable = null;
+ if (routingTable.index("test").shard(0).replicaShards().get(0).relocating()) {
+ indexShardRoutingTable = routingTable.index("test").shard(0);
+ } else if (routingTable.index("test").shard(1).replicaShards().get(0).relocating()) {
+ indexShardRoutingTable = routingTable.index("test").shard(1);
+ }
+
+ // we might have primary relocating, and the test is only for replicas, so only test in the case of replica allocation
+ if (indexShardRoutingTable != null) {
+ logger.info("kill the node [{}] of the primary shard for the relocating replica", indexShardRoutingTable.primaryShard().currentNodeId());
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("make sure all the primary shards are active");
+ assertThat(routingTable.index("test").shard(0).primaryShard().active(), equalTo(true));
+ assertThat(routingTable.index("test").shard(1).primaryShard().active(), equalTo(true));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java
new file mode 100644
index 0000000..e5ac333
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class FailedNodeRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(FailedNodeRoutingTests.class);
+
+ @Test
+ public void simpleFailedNodeTest() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start 4 nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node4").numberOfShardsWithState(STARTED), equalTo(1));
+
+
+ logger.info("remove 2 nodes where primaries are allocated, reroute");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove(routingTable.index("test1").shard(0).primaryShard().currentNodeId())
+ .remove(routingTable.index("test2").shard(0).primaryShard().currentNodeId())
+ )
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNode.numberOfShardsWithState(INITIALIZING), equalTo(1));
+ }
+ }
+
+ @Test
+ public void simpleFailedNodeTestNoReassign() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start 4 nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node4").numberOfShardsWithState(STARTED), equalTo(1));
+
+
+ logger.info("remove 2 nodes where primaries are allocated, reroute");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove(routingTable.index("test1").shard(0).primaryShard().currentNodeId())
+ .remove(routingTable.index("test2").shard(0).primaryShard().currentNodeId())
+ )
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.rerouteWithNoReassign(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(1));
+ }
+ assertThat(routingNodes.unassigned().size(), equalTo(2));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java
new file mode 100644
index 0000000..ca8bf49
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java
@@ -0,0 +1,485 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(FailedShardsRoutingTests.class);
+
+ @Test
+ public void testFailedShardPrimaryRelocatingToAndFrom() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> adding additional node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on node3 being initialized");
+ rerouteResult = allocation.applyFailedShard(clusterState, new ImmutableShardRouting(clusterState.routingNodes().node("node3").get(0)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on node1 being relocated");
+ rerouteResult = allocation.applyFailedShard(clusterState, new ImmutableShardRouting(clusterState.routingNodes().node(origPrimaryNodeId).get(0)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // check promotion of replica to primary
+ assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(origReplicaNodeId));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(origPrimaryNodeId), equalTo("node3")));
+ }
+
+ @Test
+ public void failPrimaryStartedCheckReplicaElected() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the shards (primaries)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("Start the shards (backups)");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("fail the primary shard, will have no place to be rerouted to (single node), so stays unassigned");
+ ShardRouting shardToFail = new ImmutableShardRouting(routingTable.index("test").shard(0).primaryShard());
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, shardToFail).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), not(equalTo(shardToFail.currentNodeId())));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+
+ logger.info("fail the shard again, check that nothing happens");
+ assertThat(strategy.applyFailedShard(clusterState, shardToFail).changed(), equalTo(false));
+ }
+
+ @Test
+ public void firstAllocationFailureSingleNode() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding single node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the first shard, will have no place to be rerouted to (single node), so stays unassigned");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, "node1", true, INITIALIZING, 0)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the shard again, see that nothing happens");
+ assertThat(strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, "node1", true, INITIALIZING, 0)).changed(), equalTo(false));
+ }
+
+ @Test
+ public void singleShardMultipleAllocationFailures() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+ int numberOfReplicas = scaledRandomIntBetween(2, 10);
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(numberOfReplicas))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding {} nodes and performing rerouting", numberOfReplicas + 1);
+ DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder();
+ for (int i = 0; i < numberOfReplicas + 1; i++) {
+ nodeBuilder.put(newNode("node" + Integer.toString(i)));
+ }
+ clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
+ while (!clusterState.routingTable().shardsWithState(UNASSIGNED).isEmpty()) {
+ // start all initializing
+ clusterState = ClusterState.builder(clusterState)
+ .routingTable(strategy
+ .applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)).routingTable()
+ )
+ .build();
+ // and assign more unassigned
+ clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState).routingTable()).build();
+ }
+
+ int shardsToFail = randomIntBetween(1, numberOfReplicas);
+ ArrayList<ShardRouting> failedShards = new ArrayList<ShardRouting>();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ for (int i = 0; i < shardsToFail; i++) {
+ String n = "node" + Integer.toString(randomInt(numberOfReplicas));
+ logger.info("failing shard on node [{}]", n);
+ ShardRouting shardToFail = routingNodes.node(n).get(0);
+ failedShards.add(new MutableShardRouting(shardToFail));
+ }
+
+ routingTable = strategy.applyFailedShards(clusterState, failedShards).routingTable();
+
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ for (ShardRouting failedShard : failedShards) {
+ if (!routingNodes.node(failedShard.currentNodeId()).isEmpty()) {
+ fail("shard " + failedShard + " was re-assigned to it's node");
+ }
+ }
+ }
+
+ @Test
+ public void firstAllocationFailureTwoNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the first shard, will start INITIALIZING on the second node");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), not(equalTo(nodeHoldingPrimary)));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the shard again, see that nothing happens");
+ assertThat(strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).changed(), equalTo(false));
+ }
+
+ @Test
+ public void rebalanceFailure() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the shards (primaries)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("Start the shards (backups)");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("Adding third node and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+
+
+ logger.info("Fail the shards on node 3");
+ ShardRouting shardToFail = routingNodes.node("node3").get(0);
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting(shardToFail)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ // make sure the failedShard is not INITIALIZING again on node3
+ assertThat(routingNodes.node("node3").get(0).shardId(), not(equalTo(shardToFail.shardId())));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java
new file mode 100644
index 0000000..b9e11a8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class FilterRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(FilterRoutingTests.class);
+
+ @Test
+ public void testClusterFilters() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.include.tag1", "value1,value2")
+ .put("cluster.routing.allocation.exclude.tag1", "value3,value4")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding four nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))
+ .put(newNode("node2", ImmutableMap.of("tag1", "value2")))
+ .put(newNode("node3", ImmutableMap.of("tag1", "value3")))
+ .put(newNode("node4", ImmutableMap.of("tag1", "value4")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
+ List<MutableShardRouting> startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(startedShards.size(), equalTo(4));
+ for (MutableShardRouting startedShard : startedShards) {
+ assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node2")));
+ }
+ }
+
+ @Test
+ public void testIndexFilters() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .put("index.routing.allocation.include.tag1", "value1,value2")
+ .put("index.routing.allocation.exclude.tag1", "value3,value4")
+ .build()))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))
+ .put(newNode("node2", ImmutableMap.of("tag1", "value2")))
+ .put(newNode("node3", ImmutableMap.of("tag1", "value3")))
+ .put(newNode("node4", ImmutableMap.of("tag1", "value4")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
+ List<MutableShardRouting> startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(startedShards.size(), equalTo(4));
+ for (MutableShardRouting startedShard : startedShards) {
+ assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node2")));
+ }
+
+ logger.info("--> switch between value2 and value4, shards should be relocating");
+
+ metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .put("index.routing.allocation.include.tag1", "value1,value4")
+ .put("index.routing.allocation.exclude.tag1", "value2,value3")
+ .build()))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(2));
+
+ logger.info("--> finish relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(startedShards.size(), equalTo(4));
+ for (MutableShardRouting startedShard : startedShards) {
+ assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node4")));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java
new file mode 100644
index 0000000..1536a12
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java
@@ -0,0 +1,538 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class IndexBalanceTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class);
+
+ @Test
+ public void testBalanceAllNodesStarted() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void testBalanceIncrementallyStartNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the primary shard");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void testBalanceAllNodesStartedAddIndex() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ logger.info("Add new index 3 shards 1 replica");
+
+ prevRoutingTable = routingTable;
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test1").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ ))
+ .build();
+ routingTable = RoutingTable.builder(routingTable)
+ .addAsNew(metaData.index("test1"))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
new file mode 100644
index 0000000..a8239eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
@@ -0,0 +1,340 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class NodeVersionAllocationDeciderTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(NodeVersionAllocationDeciderTests.class);
+
+ @Test
+ public void testDoNotAllocateFromPrimary() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
+ }
+
+ logger.info("start two nodes and fully start the shards");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(2));
+
+ }
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ }
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", getPreviousVersion())))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ }
+
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ }
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(2));
+ }
+ }
+
+
+ @Test
+ public void testRandom() {
+ AllocationService service = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+ MetaData.Builder builder = MetaData.builder();
+ RoutingTable.Builder rtBuilder = RoutingTable.builder();
+ int numIndices = between(1, 20);
+ for (int i = 0; i < numIndices; i++) {
+ builder.put(IndexMetaData.builder("test_" + i).numberOfShards(between(1, 5)).numberOfReplicas(between(0, 2)));
+ }
+ MetaData metaData = builder.build();
+
+ for (int i = 0; i < numIndices; i++) {
+ rtBuilder.addAsNew(metaData.index("test_" + i));
+ }
+ RoutingTable routingTable = rtBuilder.build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(routingTable.allShards().size()));
+ List<DiscoveryNode> nodes = new ArrayList<DiscoveryNode>();
+ int nodeIdx = 0;
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
+ int numNodes = between(1, 20);
+ if (nodes.size() > numNodes) {
+ Collections.shuffle(nodes, getRandom());
+ nodes = nodes.subList(0, numNodes);
+ } else {
+ for (int j = nodes.size(); j < numNodes; j++) {
+ if (frequently()) {
+ nodes.add(newNode("node" + (nodeIdx++), randomBoolean() ? getPreviousVersion() : Version.CURRENT));
+ } else {
+ nodes.add(newNode("node" + (nodeIdx++), randomVersion()));
+ }
+ }
+ }
+ for (DiscoveryNode node : nodes) {
+ nodesBuilder.put(node);
+ }
+ clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ clusterState = stabilize(clusterState, service);
+ }
+ }
+
+ @Test
+ public void testRollingRestart() {
+ AllocationService service = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
+ }
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("old0", getPreviousVersion()))
+ .put(newNode("old1", getPreviousVersion()))
+ .put(newNode("old2", getPreviousVersion()))).build();
+ clusterState = stabilize(clusterState, service);
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("old0", getPreviousVersion()))
+ .put(newNode("old1", getPreviousVersion()))
+ .put(newNode("new0"))).build();
+
+ clusterState = stabilize(clusterState, service);
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node0", getPreviousVersion()))
+ .put(newNode("new1"))
+ .put(newNode("new0"))).build();
+
+ clusterState = stabilize(clusterState, service);
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("new2"))
+ .put(newNode("new1"))
+ .put(newNode("new0"))).build();
+
+ clusterState = stabilize(clusterState, service);
+ routingTable = clusterState.routingTable();
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), notNullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), notNullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), notNullValue());
+ }
+ }
+
+ private ClusterState stabilize(ClusterState clusterState, AllocationService service) {
+ logger.trace("RoutingNodes: {}", clusterState.routingNodes().prettyPrint());
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ assertRecoveryNodeVersions(routingNodes);
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ boolean stable = false;
+ for (int i = 0; i < 1000; i++) { // at most 200 iters - this should be enough for all tests
+ logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (stable = (routingTable == prev)) {
+ break;
+ }
+ assertRecoveryNodeVersions(routingNodes);
+ prev = routingTable;
+ }
+ logger.info("stabilized success [{}]", stable);
+ assertThat(stable, is(true));
+ return clusterState;
+ }
+
+ private final void assertRecoveryNodeVersions(RoutingNodes routingNodes) {
+ logger.trace("RoutingNodes: {}", routingNodes.prettyPrint());
+
+ List<MutableShardRouting> mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.RELOCATING);
+ for (MutableShardRouting r : mutableShardRoutings) {
+ String toId = r.relocatingNodeId();
+ String fromId = r.currentNodeId();
+ assertThat(fromId, notNullValue());
+ assertThat(toId, notNullValue());
+ logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
+ assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
+ }
+
+ mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.INITIALIZING);
+ for (MutableShardRouting r : mutableShardRoutings) {
+ if (r.initializing() && r.relocatingNodeId() == null && !r.primary()) {
+ MutableShardRouting primary = routingNodes.activePrimary(r);
+ assertThat(primary, notNullValue());
+ String fromId = primary.currentNodeId();
+ String toId = r.currentNodeId();
+ logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
+ assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
+ }
+ }
+
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java
new file mode 100644
index 0000000..2630527
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class PreferLocalPrimariesToRelocatingPrimariesTests extends ElasticsearchAllocationTestCase {
+ @Test
+ public void testPreferLocalPrimaryAllocationOverFiltered() {
+ int concurrentRecoveries = randomIntBetween(1, 10);
+ int primaryRecoveries = randomIntBetween(1, 10);
+ int numberOfShards = randomIntBetween(5, 20);
+ int totalNumberOfShards = numberOfShards * 2;
+
+ logger.info("create an allocation with [{}] initial primary recoveries and [{}] concurrent recoveries", primaryRecoveries, concurrentRecoveries);
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", concurrentRecoveries)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", primaryRecoveries)
+ .build());
+
+ logger.info("create 2 indices with [{}] no replicas, and wait till all are allocated", numberOfShards);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(numberOfShards).numberOfReplicas(0))
+ .put(IndexMetaData.builder("test2").numberOfShards(numberOfShards).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting till all are allocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))
+ .put(newNode("node2", ImmutableMap.of("tag1", "value2")))).build();
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ while (!clusterState.routingNodes().shardsWithState(INITIALIZING).isEmpty()) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ logger.info("remove one of the nodes and apply filter to move everything from another node");
+
+ metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settingsBuilder()
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", 0)
+ .put("index.routing.allocation.exclude.tag1", "value2")
+ .build()))
+ .put(IndexMetaData.builder("test2").settings(settingsBuilder()
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", 0)
+ .put("index.routing.allocation.exclude.tag1", "value2")
+ .build()))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", numberOfShards, numberOfShards);
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(numberOfShards));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(numberOfShards));
+
+ logger.info("start node back up");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ while (clusterState.routingNodes().shardsWithState(STARTED).size() < totalNumberOfShards) {
+ int localInitializations = 0;
+ int relocatingInitializations = 0;
+ for (MutableShardRouting routing : clusterState.routingNodes().shardsWithState(INITIALIZING)) {
+ if (routing.relocatingNodeId() == null) {
+ localInitializations++;
+ } else {
+ relocatingInitializations++;
+ }
+ }
+ int needToInitialize = totalNumberOfShards - clusterState.routingNodes().shardsWithState(STARTED).size() - clusterState.routingNodes().shardsWithState(RELOCATING).size();
+ logger.info("local initializations: [{}], relocating: [{}], need to initialize: {}", localInitializations, relocatingInitializations, needToInitialize);
+ assertThat(localInitializations, equalTo(Math.min(primaryRecoveries, needToInitialize)));
+ clusterState = startRandomInitializingShard(clusterState, strategy);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java
new file mode 100644
index 0000000..f1cd54a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class PreferPrimaryAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(PreferPrimaryAllocationTests.class);
+
+ @Test
+ public void testPreferPrimaryAllocationOverReplicas() {
+ logger.info("create an allocation with 1 initial recoveries");
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 1)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 1)
+ .build());
+
+ logger.info("create several indices with no replicas, and wait till all are allocated");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(10).numberOfReplicas(0))
+ .put(IndexMetaData.builder("test2").numberOfShards(10).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting till all are allocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ while (!clusterState.routingNodes().shardsWithState(INITIALIZING).isEmpty()) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ logger.info("increasing the number of replicas to 1, and perform a reroute (to get the replicas allocation going)");
+ routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(1).build();
+ metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("2 replicas should be initializing now for the existing indices (we throttle to 1)");
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("create a new index");
+ metaData = MetaData.builder(clusterState.metaData())
+ .put(IndexMetaData.builder("new_index").numberOfShards(4).numberOfReplicas(0))
+ .build();
+
+ routingTable = RoutingTable.builder(clusterState.routingTable())
+ .addAsNew(metaData.index("new_index"))
+ .build();
+
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("reroute, verify that primaries for the new index primary shards are allocated");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingTable().index("new_index").shardsWithState(INITIALIZING).size(), equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
new file mode 100644
index 0000000..30ad415
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PrimaryElectionRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(PrimaryElectionRoutingTests.class);
+
+ @Test
+ public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the backup shard (on node2)");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Adding third node and reroute and kill first node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3")).remove("node1")).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingNodes.node("node1"), nullValue());
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ // verify where the primary is
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
+ }
+
+ @Test
+ public void testRemovingInitializingReplicasIfPrimariesFails() {
+ AllocationService allocation = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ rerouteResult = allocation.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ // now, fail one node, while the replica is initializing, and it also holds a primary
+ logger.info("--> fail node with primary");
+ String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode(nodeIdRemaining))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingNodes.node(nodeIdRemaining).shardsWithState(INITIALIZING).get(0).primary(), equalTo(true));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java
new file mode 100644
index 0000000..ee47827
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class);
+
+
+ @Test
+ public void testPrimaryNotRelocatedWhileBeingRecoveredFrom() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+
+ logger.info("start another node, replica will start recovering form primary");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ logger.info("start another node, make sure the primary is not relocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
new file mode 100644
index 0000000..5cbd22d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.MetaData.Builder;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class RandomAllocationDeciderTests extends ElasticsearchAllocationTestCase {
+
+ /* This test will make random allocation decision on a growing and shrinking
+ * cluster leading to a random distribution of the shards. After a certain
+ * amount of iterations the test allows allocation unless the same shard is
+ * already allocated on a node and balances the cluster to gain optimal
+ * balance.*/
+ @Test
+ public void testRandomDecisions() {
+ RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(getRandom());
+ AllocationService strategy = new AllocationService(settingsBuilder().build(), new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ randomAllocationDecider))), new ShardsAllocators(), ClusterInfoService.EMPTY);
+ int indices = between(1, 20);
+ Builder metaBuilder = MetaData.builder();
+ int maxNumReplicas = 1;
+ int totalNumShards = 0;
+ for (int i = 0; i < indices; i++) {
+ int replicas = between(0, 6);
+ maxNumReplicas = Math.max(maxNumReplicas, replicas + 1);
+ int numShards = between(1, 20);
+ totalNumShards += numShards * (replicas + 1);
+ metaBuilder.put(IndexMetaData.builder("INDEX_" + i).numberOfShards(numShards).numberOfReplicas(replicas));
+
+ }
+ MetaData metaData = metaBuilder.build();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (int i = 0; i < indices; i++) {
+ routingTableBuilder.addAsNew(metaData.index("INDEX_" + i));
+ }
+
+ RoutingTable routingTable = routingTableBuilder.build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ int numIters = atLeast(20);
+ int nodeIdCounter = 0;
+ int atMostNodes = between(Math.max(1, maxNumReplicas), numIters);
+ final boolean frequentNodes = randomBoolean();
+ for (int i = 0; i < numIters; i++) {
+ ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
+ DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+
+ if (clusterState.nodes().size() <= atMostNodes &&
+ (nodeIdCounter == 0 || (frequentNodes ? frequently() : rarely()))) {
+ int numNodes = atLeast(1);
+ for (int j = 0; j < numNodes; j++) {
+ logger.info("adding node [{}]", nodeIdCounter);
+ newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
+ }
+ }
+
+ if (nodeIdCounter > 1 && rarely()) {
+ int nodeId = between(0, nodeIdCounter - 2);
+ logger.info("removing node [{}]", nodeId);
+ newNodesBuilder.remove("NODE_" + nodeId);
+ }
+
+ stateBuilder.nodes(newNodesBuilder.build());
+ clusterState = stateBuilder.build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ if (clusterState.routingNodes().shardsWithState(INITIALIZING).size() > 0) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING))
+ .routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ }
+ logger.info("Fill up nodes such that every shard can be allocated");
+ if (clusterState.nodes().size() < maxNumReplicas) {
+ ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
+ DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+ for (int j = 0; j < (maxNumReplicas - clusterState.nodes().size()); j++) {
+ logger.info("adding node [{}]", nodeIdCounter);
+ newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
+ }
+ stateBuilder.nodes(newNodesBuilder.build());
+ clusterState = stateBuilder.build();
+ }
+
+
+ randomAllocationDecider.allwaysSayYes = true;
+ logger.info("now say YES to everything");
+ int iterations = 0;
+ do {
+ iterations++;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ if (clusterState.routingNodes().shardsWithState(INITIALIZING).size() > 0) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING))
+ .routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ } while (clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 ||
+ clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size() != 0 && iterations < 200);
+ logger.info("Done Balancing after [{}] iterations", iterations);
+ // we stop after 200 iterations if it didn't stabelize by then something is likely to be wrong
+ assertThat("max num iteration exceeded", iterations, Matchers.lessThan(200));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0));
+ int shards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size();
+ assertThat(shards, equalTo(totalNumShards));
+ final int numNodes = clusterState.nodes().size();
+ final int upperBound = (int) Math.round(((shards / numNodes) * 1.10));
+ final int lowerBound = (int) Math.round(((shards / numNodes) * 0.90));
+ for (int i = 0; i < nodeIdCounter; i++) {
+ if (clusterState.getRoutingNodes().node("NODE_" + i) == null) {
+ continue;
+ }
+ assertThat(clusterState.getRoutingNodes().node("NODE_" + i).size(), Matchers.anyOf(
+ Matchers.anyOf(equalTo((shards / numNodes) + 1), equalTo((shards / numNodes) - 1), equalTo((shards / numNodes))),
+ Matchers.allOf(Matchers.greaterThanOrEqualTo(lowerBound), Matchers.lessThanOrEqualTo(upperBound))));
+ }
+ }
+
+ private static final class RandomAllocationDecider extends AllocationDecider {
+
+ private final Random random;
+
+ public RandomAllocationDecider(Random random) {
+ super(ImmutableSettings.EMPTY);
+ this.random = random;
+ }
+
+ public boolean allwaysSayYes = false;
+
+ @Override
+ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
+ return getRandomDecision();
+ }
+
+ private Decision getRandomDecision() {
+ if (allwaysSayYes) {
+ return Decision.YES;
+ }
+ switch (random.nextInt(10)) {
+ case 9:
+ case 8:
+ case 7:
+ case 6:
+ case 5:
+ return Decision.NO;
+ case 4:
+ return Decision.THROTTLE;
+ case 3:
+ case 2:
+ case 1:
+ return Decision.YES;
+ default:
+ return Decision.ALWAYS;
+ }
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return getRandomDecision();
+ }
+
+ @Override
+ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return getRandomDecision();
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
new file mode 100644
index 0000000..c3f9173
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class RebalanceAfterActiveTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(RebalanceAfterActiveTests.class);
+
+ @Test
+ public void testRebalanceOnlyAfterAllShardsAreActive() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("start two nodes and fully start the shards");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("start the replica shards, rebalancing should start");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(5));
+
+ logger.info("complete relocation, other half of relocation should happen");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we now only relocate 3, since 2 remain where they are!
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+
+ logger.info("complete relocation, thats it!");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ // make sure we have an even relocation
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.size(), equalTo(1));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java
new file mode 100644
index 0000000..ad11807
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class ReplicaAllocatedAfterPrimaryTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class);
+
+ @Test
+ public void testBackupIsAllocatedAfterPrimary() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Start all the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node(nodeHoldingPrimary).shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId();
+ assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java
new file mode 100644
index 0000000..c545428
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java
@@ -0,0 +1,415 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class RoutingNodesIntegrityTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class);
+
+ @Test
+ public void testBalanceAllNodesStarted() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ // all shards are unassigned. so no inactive shards or primaries.
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ }
+
+ @Test
+ public void testBalanceIncrementallyStartNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shard");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void testBalanceAllNodesStartedAddIndex() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 1)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ routingNodes = clusterState.routingNodes();
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ logger.info("Add new index 3 shards 1 replica");
+
+ prevRoutingTable = routingTable;
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test1").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ ))
+ .build();
+ routingTable = RoutingTable.builder(routingTable)
+ .addAsNew(metaData.index("test1"))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Reroute, assign");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Reroute, start the primaries");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Reroute, start the replicas");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+
+ logger.info("kill one node");
+ IndexShardRoutingTable indexShardRoutingTable = routingTable.index("test").shard(0);
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ // replica got promoted to primary
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Start Recovering shards round 1");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Start Recovering shards round 2");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ }
+
+ private boolean assertShardStats(RoutingNodes routingNodes) {
+ return RoutingNodes.assertShardStats(routingNodes);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java
new file mode 100644
index 0000000..74106e9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+
+/**
+ */
+public class RoutingNodesUtils {
+
+ public static int numberOfShardsOfType(RoutingNodes nodes, ShardRoutingState state) {
+ int count = 0;
+ for (RoutingNode routingNode : nodes) {
+ count += routingNode.numberOfShardsWithState(state);
+ }
+ return count;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java
new file mode 100644
index 0000000..c35959c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SameShardRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(SameShardRoutingTests.class);
+
+ @Test
+ @TestLogging("cluster.routing.allocation:TRACE")
+ public void sameHost() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(SameShardAllocationDecider.SAME_HOST_SETTING, true).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes with the same host");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(new DiscoveryNode("node1", "node1", "test1", "test1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT))
+ .put(new DiscoveryNode("node2", "node2", "test1", "test1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(2));
+
+ logger.info("--> start all primary shards, no replica will be started since its on the same host");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.STARTED), equalTo(2));
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(0));
+
+ logger.info("--> add another node, with a different host, replicas will be allocating");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(new DiscoveryNode("node3", "node3", "test2", "test2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.STARTED), equalTo(2));
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(2));
+ for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING)) {
+ assertThat(shardRouting.currentNodeId(), equalTo("node3"));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java
new file mode 100644
index 0000000..e21246b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ShardVersioningTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ShardVersioningTests.class);
+
+ @Test
+ public void simple() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(1l));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1l));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(2l));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).version(), equalTo(2l));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1l));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).version(), equalTo(1l));
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java
new file mode 100644
index 0000000..1086670
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class ShardsLimitAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ShardsLimitAllocationTests.class);
+
+ @Test
+ public void indexLevelShardsLimitAllocate() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 2)))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2));
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(0));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(0));
+ assertThat(clusterState.readOnlyRoutingNodes().unassigned().size(), equalTo(4));
+
+ logger.info("Do another reroute, make sure its still not allocated");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ @Test
+ public void indexLevelShardsLimitRemain() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ ))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ logger.info("Adding one node and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), STARTED), equalTo(5));
+
+ logger.info("add another index with 5 shards");
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test1").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ ))
+ .build();
+ routingTable = RoutingTable.builder(routingTable)
+ .addAsNew(metaData.index("test1"))
+ .build();
+
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Add another one node and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), STARTED), equalTo(10));
+
+ for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().node("node1")) {
+ assertThat(shardRouting.index(), equalTo("test"));
+ }
+ for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().node("node2")) {
+ assertThat(shardRouting.index(), equalTo("test1"));
+ }
+
+ logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE + " for test, see that things move");
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 3)
+ ))
+ .build();
+
+
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).build();
+
+ logger.info("reroute after setting");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(3));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(RELOCATING), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(RELOCATING), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(3));
+ // the first move will destroy the balance and the balancer will move 2 shards from node2 to node one right after
+ // moving the nodes to node2 since we consider INITIALIZING nodes during rebalance
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ // now we are done compared to EvenShardCountAllocator since the Balancer is not soely based on the average
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(5));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(5));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java
new file mode 100644
index 0000000..a72fd5b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Set;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SingleShardNoReplicasRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class);
+
+ @Test
+ public void testSingleIndexStartedShard() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Rerouting again, nothing should change");
+ prevRoutingTable = routingTable;
+ clusterState = ClusterState.builder(clusterState).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable == prevRoutingTable, equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Marking the shard as started");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable != prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Starting another node and making sure nothing changed");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable == prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Killing node1 where the shard is, checking the shard is relocated");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable != prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
+
+ logger.info("Start another node, make sure that things remain the same (shard is in node2 and initializing)");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(routingTable == prevRoutingTable, equalTo(true));
+
+ logger.info("Start the shard on node 2");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable != prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
+ }
+
+ @Test
+ public void testSingleIndexShardFailed() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Marking the shard as failed");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ }
+
+ @Test
+ public void testMultiIndexEvenDistribution() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ final int numberOfIndices = 50;
+ logger.info("Building initial routing table with " + numberOfIndices + " indices");
+
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ metaDataBuilder.put(IndexMetaData.builder("test" + i).numberOfShards(1).numberOfReplicas(0));
+ }
+ MetaData metaData = metaDataBuilder.build();
+
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ routingTableBuilder.addAsNew(metaData.index("test" + i));
+ }
+ RoutingTable routingTable = routingTableBuilder.build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding " + (numberOfIndices / 2) + " nodes");
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
+ List<DiscoveryNode> nodes = newArrayList();
+ for (int i = 0; i < (numberOfIndices / 2); i++) {
+ nodesBuilder.put(newNode("node" + i));
+ }
+ RoutingTable prevRoutingTable = routingTable;
+ clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
+ // make sure we still have 2 shards initializing per node on the first 25 nodes
+ String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
+ int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
+ assertThat(nodeIndex, lessThan(25));
+ }
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ Set<String> encounteredIndices = newHashSet();
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(0));
+ assertThat(routingNode.size(), equalTo(2));
+ // make sure we still have 2 shards initializing per node on the only 25 nodes
+ int nodeIndex = Integer.parseInt(routingNode.nodeId().substring("node".length()));
+ assertThat(nodeIndex, lessThan(25));
+ // check that we don't have a shard associated with a node with the same index name (we have a single shard)
+ for (MutableShardRouting shardRoutingEntry : routingNode) {
+ assertThat(encounteredIndices, not(hasItem(shardRoutingEntry.index())));
+ encounteredIndices.add(shardRoutingEntry.index());
+ }
+ }
+
+ logger.info("Adding additional " + (numberOfIndices / 2) + " nodes, nothing should change");
+ nodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+ for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) {
+ nodesBuilder.put(newNode("node" + i));
+ }
+ prevRoutingTable = routingTable;
+ clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(false));
+
+ logger.info("Marking the shard as started");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ int numberOfRelocatingShards = 0;
+ int numberOfStartedShards = 0;
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(STARTED), equalTo(RELOCATING)));
+ if (routingTable.index("test" + i).shard(0).shards().get(0).state() == STARTED) {
+ numberOfStartedShards++;
+ } else if (routingTable.index("test" + i).shard(0).shards().get(0).state() == RELOCATING) {
+ numberOfRelocatingShards++;
+ }
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
+ // make sure we still have 2 shards either relocating or started on the first 25 nodes (still)
+ String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
+ int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
+ assertThat(nodeIndex, lessThan(25));
+ }
+ assertThat(numberOfRelocatingShards, equalTo(25));
+ assertThat(numberOfStartedShards, equalTo(25));
+ }
+
+ @Test
+ public void testMultiIndexUnevenNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ final int numberOfIndices = 10;
+ logger.info("Building initial routing table with " + numberOfIndices + " indices");
+
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ metaDataBuilder.put(IndexMetaData.builder("test" + i).numberOfShards(1).numberOfReplicas(0));
+ }
+ MetaData metaData = metaDataBuilder.build();
+
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ routingTableBuilder.addAsNew(metaData.index("test" + i));
+ }
+ RoutingTable routingTable = routingTableBuilder.build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));
+
+ logger.info("Starting 3 nodes and rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")))
+ .build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ }
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ assertThat(numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(numberOfIndices));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4)));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4)));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4)));
+
+ logger.info("Start two more nodes, things should remain the same");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4")).put(newNode("node5")))
+ .build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
+ }
+ routingNodes = clusterState.routingNodes();
+ assertThat("4 source shard routing are relocating", numberOfShardsOfType(routingNodes, RELOCATING), equalTo(4));
+ assertThat("4 target shard routing are initializing", numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(4));
+
+ logger.info("Now, mark the relocated as started");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+// routingTable = strategy.reroute(new RoutingStrategyInfo(metaData, routingTable), nodes);
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
+ }
+ routingNodes = clusterState.routingNodes();
+ assertThat(numberOfShardsOfType(routingNodes, STARTED), equalTo(numberOfIndices));
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(2));
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java
new file mode 100644
index 0000000..82dab0e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SingleShardOneReplicaRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(SingleShardOneReplicaRoutingTests.class);
+
+ @Test
+ public void testSingleIndexFirstStartPrimaryThenBackups() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary shards not started");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+
+ logger.info("Kill node1, backup shard should become primary");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Start another node, backup shard should start initializing");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java
new file mode 100644
index 0000000..897ae5f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class TenShardsOneReplicaRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(TenShardsOneReplicaRoutingTests.class);
+
+ @Test
+ public void testSingleIndexFirstStartPrimaryThenBackups() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ }
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(10));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10));
+
+ logger.info("Add another node and perform rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(10));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(10));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(6));
+
+ logger.info("Start the shards on node 3");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node3").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(7));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(7));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(6));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java
new file mode 100644
index 0000000..64859c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ThrottlingAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ThrottlingAllocationTests.class);
+
+ @Test
+ public void testPrimaryRecoveryThrottling() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 3)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start one node, do reroute, only 3 should initialize");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(17));
+
+ logger.info("start initializing, another 3 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(14));
+
+ logger.info("start initializing, another 3 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(6));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(11));
+
+ logger.info("start initializing, another 1 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
+
+ logger.info("start initializing, all primaries should be started");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
+ }
+
+ @Test
+ public void testReplicaAndPrimaryRecoveryThrottling() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 3)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start one node, do reroute, only 3 should initialize");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(7));
+
+ logger.info("start initializing, another 2 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
+
+ logger.info("start initializing, all primaries should be started");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
+
+ logger.info("start another node, replicas should start being allocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));
+
+ logger.info("start initializing replicas");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+
+ logger.info("start initializing replicas, all should be started");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java
new file mode 100644
index 0000000..fd79255
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class UpdateNumberOfReplicasTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(UpdateNumberOfReplicasTests.class);
+
+ @Test
+ public void testUpdateNumberOfReplicas() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start all the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start all the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+ final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId();
+ assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+
+
+ logger.info("add another replica");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(2).build();
+ metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(2).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+
+ assertThat(clusterState.metaData().index("test").numberOfReplicas(), equalTo(2));
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED));
+
+ logger.info("Add another node and start the added replica");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(1).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+
+ logger.info("now remove a replica");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(1).build();
+ metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+
+ assertThat(clusterState.metaData().index("test").numberOfReplicas(), equalTo(1));
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+
+ logger.info("do a reroute, should remain the same");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
new file mode 100644
index 0000000..674d849
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
@@ -0,0 +1,597 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterInfo;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.DiskUsage;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase {
+
+ @Test
+ public void diskThresholdTest() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build();
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node1", new DiskUsage("node1", 100, 10)); // 90% used
+ usages.put("node2", new DiskUsage("node2", 100, 35)); // 65% used
+ usages.put("node3", new DiskUsage("node3", 100, 60)); // 40% used
+ usages.put("node4", new DiskUsage("node4", 100, 80)); // 20% used
+
+ Map<String, Long> shardSizes = new HashMap<String, Long>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ shardSizes.put("[test][0][r]", 10L);
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Primary shard should be initializing, replica should not
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that we're able to start the primary
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ // Assert that node1 didn't get any shards because its disk usage is too high
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+
+ logger.info("--> adding node3");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica is initialized now that node3 is available with enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing decider settings");
+
+ // Set the low threshold to 60 instead of 70
+ // Set the high threshold to 70 instead of 80
+ // node2 now should not have new shards allocated to it, but shards can remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.6)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.7).build();
+
+ deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing settings again");
+
+ // Set the low threshold to 50 instead of 60
+ // Set the high threshold to 60 instead of 70
+ // node2 now should not have new shards allocated to it, and shards cannot remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.5)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.6).build();
+
+ deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Shard hasn't been moved off of node2 yet because there's nowhere for it to go
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> adding node4");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> apply INITIALIZING shards");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Node4 is available now, so the shard is moved off of node2
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
+ }
+
+ @Test
+ public void diskThresholdWithAbsoluteSizesTest() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "30b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "20b").build();
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node1", new DiskUsage("node1", 100, 10)); // 90% used
+ usages.put("node2", new DiskUsage("node2", 100, 35)); // 65% used
+ usages.put("node3", new DiskUsage("node3", 100, 60)); // 40% used
+ usages.put("node4", new DiskUsage("node4", 100, 80)); // 20% used
+
+ Map<String, Long> shardSizes = new HashMap<String, Long>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ shardSizes.put("[test][0][r]", 10L);
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Primary shard should be initializing, replica should not
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that we're able to start the primary
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ // Assert that node1 didn't get any shards because its disk usage is too high
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+
+ logger.info("--> adding node3");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica is initialized now that node3 is available with enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing decider settings");
+
+ // Set the low threshold to 60 instead of 70
+ // Set the high threshold to 70 instead of 80
+ // node2 now should not have new shards allocated to it, but shards can remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "40b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "30b").build();
+
+ deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing settings again");
+
+ // Set the low threshold to 50 instead of 60
+ // Set the high threshold to 60 instead of 70
+ // node2 now should not have new shards allocated to it, and shards cannot remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "50b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "40b").build();
+
+ deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Shard hasn't been moved off of node2 yet because there's nowhere for it to go
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> adding node4");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> apply INITIALIZING shards");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Node4 is available now, so the shard is moved off of node2
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
+ }
+
+ @Test
+ public void diskThresholdWithShardSizes() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.71).build();
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node1", new DiskUsage("node1", 100, 31)); // 69% used
+ usages.put("node2", new DiskUsage("node2", 100, 1)); // 99% used
+
+ Map<String, Long> shardSizes = new HashMap<String, Long>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ logger.info("--> adding node1");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shard can't be allocated to node1 (or node2) because it would cause too much usage
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ // No shards are started, no nodes have enough disk for allocation
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
+ }
+
+ @Test
+ public void unknownDiskUsageTest() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.85).build();
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node2", new DiskUsage("node2", 100, 50)); // 50% used
+ usages.put("node3", new DiskUsage("node3", 100, 0)); // 100% used
+
+ Map<String, Long> shardSizes = new HashMap<String, Long>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ shardSizes.put("[test][0][r]", 10L); // 10 bytes
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ logger.info("--> adding node1");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shard can't be allocated to node1 (or node2) because the average usage is 75% > 70%
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ // No shards are started, node1 doesn't have enough disk usage
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
+ }
+
+ @Test
+ public void averageUsageUnitTest() {
+ RoutingNode rn = new RoutingNode("node1", newNode("node1"));
+ DiskThresholdDecider decider = new DiskThresholdDecider(ImmutableSettings.EMPTY);
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node2", new DiskUsage("node2", 100, 50)); // 50% used
+ usages.put("node3", new DiskUsage("node3", 100, 0)); // 100% used
+
+ DiskUsage node1Usage = decider.averageUsage(rn, usages);
+ assertThat(node1Usage.getTotalBytes(), equalTo(100L));
+ assertThat(node1Usage.getFreeBytes(), equalTo(25L));
+ }
+
+ @Test
+ public void freeDiskPercentageAfterShardAssignedUnitTest() {
+ RoutingNode rn = new RoutingNode("node1", newNode("node1"));
+ DiskThresholdDecider decider = new DiskThresholdDecider(ImmutableSettings.EMPTY);
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node2", new DiskUsage("node2", 100, 50)); // 50% used
+ usages.put("node3", new DiskUsage("node3", 100, 0)); // 100% used
+
+ Double after = decider.freeDiskPercentageAfterShardAssigned(new DiskUsage("node2", 100, 30), 11L);
+ assertThat(after, equalTo(19.0));
+ }
+
+ public void logShardStates(ClusterState state) {
+ RoutingNodes rn = state.routingNodes();
+ logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}",
+ rn.shards(new Predicate<MutableShardRouting>() {
+ @Override
+ public boolean apply(org.elasticsearch.cluster.routing.MutableShardRouting input) {
+ return true;
+ }
+ }).size(),
+ rn.shardsWithState(UNASSIGNED).size(),
+ rn.shardsWithState(INITIALIZING).size(),
+ rn.shardsWithState(RELOCATING).size(),
+ rn.shardsWithState(STARTED).size());
+ logger.info("--> unassigned: {}, initializing: {}, relocating: {}, started: {}",
+ rn.shardsWithState(UNASSIGNED),
+ rn.shardsWithState(INITIALIZING),
+ rn.shardsWithState(RELOCATING),
+ rn.shardsWithState(STARTED));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java
new file mode 100644
index 0000000..595b258
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class EnableAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(EnableAllocationTests.class);
+
+ @Test
+ public void testClusterEnableNone() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name())
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ }
+
+ @Test
+ public void testClusterEnableOnlyPrimaries() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.PRIMARIES.name())
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ }
+
+ @Test
+ public void testIndexEnableNone() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("disabled").settings(ImmutableSettings.builder()
+ .put(INDEX_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()))
+ .numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("enabled").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("disabled"))
+ .addAsNew(metaData.index("enabled"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> verify only enabled index has been routed");
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("enabled", STARTED).size(), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("disabled", STARTED).size(), equalTo(0));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
new file mode 100644
index 0000000..505ccf2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.serialization;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ClusterSerializationTests extends ElasticsearchAllocationTestCase {
+
+ @Test
+ public void testClusterStateSerialization() throws Exception {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).localNodeId("node1").masterNodeId("node2").build();
+
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+
+ AllocationService strategy = createAllocationService();
+ clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState).routingTable()).build();
+
+ ClusterState serializedClusterState = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), newNode("node1"));
+
+ assertThat(serializedClusterState.routingTable().prettyPrint(), equalTo(clusterState.routingTable().prettyPrint()));
+ }
+
+
+ @Test
+ public void testRoutingTableSerialization() throws Exception {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).build();
+
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+
+ AllocationService strategy = createAllocationService();
+ RoutingTable source = strategy.reroute(clusterState).routingTable();
+
+ BytesStreamOutput outStream = new BytesStreamOutput();
+ RoutingTable.Builder.writeTo(source, outStream);
+ BytesStreamInput inStream = new BytesStreamInput(outStream.bytes().toBytes(), false);
+ RoutingTable target = RoutingTable.Builder.readFrom(inStream);
+
+ assertThat(target.prettyPrint(), equalTo(source.prettyPrint()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java b/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java
new file mode 100644
index 0000000..4504b6b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.serialization;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.containsString;
+
+/**
+ *
+ */
+public class ClusterStateToStringTests extends ElasticsearchAllocationTestCase {
+ @Test
+ public void testClusterStateSerialization() throws Exception {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test_idx").numberOfShards(10).numberOfReplicas(1))
+ .put(IndexTemplateMetaData.builder("test_template").build())
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test_idx"))
+ .build();
+
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(new DiscoveryNode("node_foo", DummyTransportAddress.INSTANCE, Version.CURRENT)).localNodeId("node_foo").masterNodeId("node_foo").build();
+
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+
+ AllocationService strategy = createAllocationService();
+ clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState).routingTable()).build();
+
+ String clusterStateString = clusterState.toString();
+ assertNotNull(clusterStateString);
+
+ assertThat(clusterStateString, containsString("test_idx"));
+ assertThat(clusterStateString, containsString("test_template"));
+ assertThat(clusterStateString, containsString("node_foo"));
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java b/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java
new file mode 100644
index 0000000..f801530
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+@ClusterScope(scope = TEST)
+public class ClusterSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void clusterNonExistingSettingsUpdate() {
+ String key1 = "no_idea_what_you_are_talking_about";
+ int value1 = 10;
+
+ ClusterUpdateSettingsResponse response = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(ImmutableSettings.builder().put(key1, value1).build())
+ .get();
+
+ assertAcked(response);
+ assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable());
+ }
+
+ @Test
+ public void clusterSettingsUpdateResponse() {
+ String key1 = "indices.cache.filter.size";
+ int value1 = 10;
+
+ String key2 = DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION;
+ boolean value2 = true;
+
+ Settings transientSettings1 = ImmutableSettings.builder().put(key1, value1).build();
+ Settings persistentSettings1 = ImmutableSettings.builder().put(key2, value2).build();
+
+ ClusterUpdateSettingsResponse response1 = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(transientSettings1)
+ .setPersistentSettings(persistentSettings1)
+ .execute()
+ .actionGet();
+
+ assertAcked(response1);
+ assertThat(response1.getTransientSettings().get(key1), notNullValue());
+ assertThat(response1.getTransientSettings().get(key2), nullValue());
+ assertThat(response1.getPersistentSettings().get(key1), nullValue());
+ assertThat(response1.getPersistentSettings().get(key2), notNullValue());
+
+ Settings transientSettings2 = ImmutableSettings.builder().put(key1, value1).put(key2, value2).build();
+ Settings persistentSettings2 = ImmutableSettings.EMPTY;
+
+ ClusterUpdateSettingsResponse response2 = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(transientSettings2)
+ .setPersistentSettings(persistentSettings2)
+ .execute()
+ .actionGet();
+
+ assertAcked(response2);
+ assertThat(response2.getTransientSettings().get(key1), notNullValue());
+ assertThat(response2.getTransientSettings().get(key2), notNullValue());
+ assertThat(response2.getPersistentSettings().get(key1), nullValue());
+ assertThat(response2.getPersistentSettings().get(key2), nullValue());
+
+ Settings transientSettings3 = ImmutableSettings.EMPTY;
+ Settings persistentSettings3 = ImmutableSettings.builder().put(key1, value1).put(key2, value2).build();
+
+ ClusterUpdateSettingsResponse response3 = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(transientSettings3)
+ .setPersistentSettings(persistentSettings3)
+ .execute()
+ .actionGet();
+
+ assertAcked(response3);
+ assertThat(response3.getTransientSettings().get(key1), nullValue());
+ assertThat(response3.getTransientSettings().get(key2), nullValue());
+ assertThat(response3.getPersistentSettings().get(key1), notNullValue());
+ assertThat(response3.getPersistentSettings().get(key2), notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java b/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java
new file mode 100644
index 0000000..e87211f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SettingsValidatorTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testValidators() throws Exception {
+ assertThat(Validator.EMPTY.validate("", "anything goes"), nullValue());
+
+ assertThat(Validator.TIME.validate("", "10m"), nullValue());
+ assertThat(Validator.TIME.validate("", "10g"), notNullValue());
+ assertThat(Validator.TIME.validate("", "bad timing"), notNullValue());
+
+ assertThat(Validator.BYTES_SIZE.validate("", "10m"), nullValue());
+ assertThat(Validator.BYTES_SIZE.validate("", "10g"), nullValue());
+ assertThat(Validator.BYTES_SIZE.validate("", "bad"), notNullValue());
+
+ assertThat(Validator.FLOAT.validate("", "10.2"), nullValue());
+ assertThat(Validator.FLOAT.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "10.2"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "0.0"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "-1.0"), notNullValue());
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.DOUBLE.validate("", "10.2"), nullValue());
+ assertThat(Validator.DOUBLE.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "10.2"), nullValue());
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "2.0"), nullValue());
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "1.0"), notNullValue());
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "10.2"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "0.0"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "-1.0"), notNullValue());
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.INTEGER.validate("", "10"), nullValue());
+ assertThat(Validator.INTEGER.validate("", "10.2"), notNullValue());
+
+ assertThat(Validator.INTEGER_GTE_2.validate("", "2"), nullValue());
+ assertThat(Validator.INTEGER_GTE_2.validate("", "1"), notNullValue());
+ assertThat(Validator.INTEGER_GTE_2.validate("", "0"), notNullValue());
+ assertThat(Validator.INTEGER_GTE_2.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "2"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "1"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "0"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "-1"), notNullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "10.2"), notNullValue());
+
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "2"), nullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "1"), nullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "0"), notNullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "-1"), notNullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "10.2"), notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java
new file mode 100644
index 0000000..2233059
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.shards;
+
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope=Scope.SUITE, numNodes=2)
+public class ClusterSearchShardsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ switch(nodeOrdinal) {
+ case 1:
+ return settingsBuilder().put("node.tag", "B").build();
+ case 0:
+ return settingsBuilder().put("node.tag", "A").build();
+ }
+ return super.nodeSettings(nodeOrdinal);
+ }
+
+ @Test
+ public void testSingleShardAllocation() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "1").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet();
+ ensureGreen();
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+ assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
+ assertThat(response.getGroups()[0].getShardId(), equalTo(0));
+ assertThat(response.getGroups()[0].getShards().length, equalTo(1));
+ assertThat(response.getNodes().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
+
+ response = client().admin().cluster().prepareSearchShards("test").setRouting("A").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+ assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
+ assertThat(response.getGroups()[0].getShardId(), equalTo(0));
+ assertThat(response.getGroups()[0].getShards().length, equalTo(1));
+ assertThat(response.getNodes().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
+
+ }
+
+ @Test
+ public void testMultipleShardsSingleNodeAllocation() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "4").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet();
+ ensureGreen();
+
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(4));
+ assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
+ assertThat(response.getNodes().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
+
+ response = client().admin().cluster().prepareSearchShards("test").setRouting("ABC").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+
+ response = client().admin().cluster().prepareSearchShards("test").setPreference("_shards:2").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShardId(), equalTo(2));
+ }
+
+ @Test
+ public void testMultipleIndicesAllocation() throws Exception {
+ client().admin().indices().prepareCreate("test1").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "4").put("index.number_of_replicas", 1)).execute().actionGet();
+ client().admin().indices().prepareCreate("test2").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "4").put("index.number_of_replicas", 1)).execute().actionGet();
+ client().admin().indices().prepareAliases()
+ .addAliasAction(AliasAction.newAddAliasAction("test1", "routing_alias").routing("ABC"))
+ .addAliasAction(AliasAction.newAddAliasAction("test2", "routing_alias").routing("EFG"))
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("routing_alias").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(2));
+ assertThat(response.getGroups()[0].getShards().length, equalTo(2));
+ assertThat(response.getGroups()[1].getShards().length, equalTo(2));
+ boolean seenTest1 = false;
+ boolean seenTest2 = false;
+ for (ClusterSearchShardsGroup group : response.getGroups()) {
+ if (group.getIndex().equals("test1")) {
+ seenTest1 = true;
+ assertThat(group.getShards().length, equalTo(2));
+ } else if (group.getIndex().equals("test2")) {
+ seenTest2 = true;
+ assertThat(group.getShards().length, equalTo(2));
+ } else {
+ fail();
+ }
+ }
+ assertThat(seenTest1, equalTo(true));
+ assertThat(seenTest2, equalTo(true));
+ assertThat(response.getNodes().length, equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java
new file mode 100644
index 0000000..137a8bc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java
@@ -0,0 +1,361 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.structure;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
+import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
+import org.elasticsearch.cluster.routing.operation.plain.PlainOperationRouting;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class RoutingIteratorTests extends ElasticsearchAllocationTestCase {
+
+ @Test
+ public void testEmptyIterator() {
+ ShardShuffler shuffler = new RotationShardShuffler(0);
+ ShardIterator shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.firstOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.firstOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.firstOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.firstOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ }
+
+ @Test
+ public void testIterator1() {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(2))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .build();
+
+ ShardIterator shardIterator = routingTable.index("test1").shard(0).shardsIt(0);
+ assertThat(shardIterator.size(), equalTo(3));
+ ShardRouting firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(3));
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ assertThat(shardIterator.remaining(), equalTo(3));
+ ShardRouting shardRouting1 = shardIterator.nextOrNull();
+ assertThat(firstRouting, sameInstance(shardRouting1));
+ assertThat(shardRouting1, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(2));
+ ShardRouting shardRouting2 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(1));
+ assertThat(shardRouting2, not(sameInstance(shardRouting1)));
+ ShardRouting shardRouting3 = shardIterator.nextOrNull();
+ assertThat(shardRouting3, notNullValue());
+ assertThat(shardRouting3, not(sameInstance(shardRouting1)));
+ assertThat(shardRouting3, not(sameInstance(shardRouting2)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ }
+
+ @Test
+ public void testIterator2() {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ShardIterator shardIterator = routingTable.index("test1").shard(0).shardsIt(0);
+ assertThat(shardIterator.size(), equalTo(2));
+ ShardRouting firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(2));
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ assertThat(shardIterator.remaining(), equalTo(2));
+ ShardRouting shardRouting1 = shardIterator.nextOrNull();
+ assertThat(shardRouting1, sameInstance(firstRouting));
+ assertThat(shardRouting1, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(1));
+ ShardRouting shardRouting2 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardRouting2, not(sameInstance(shardRouting1)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(1);
+ assertThat(shardIterator.size(), equalTo(2));
+ firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ ShardRouting shardRouting3 = shardIterator.nextOrNull();
+ assertThat(firstRouting, sameInstance(shardRouting3));
+ assertThat(shardRouting1, notNullValue());
+ ShardRouting shardRouting4 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ assertThat(shardRouting2, not(sameInstance(shardRouting1)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting1, not(sameInstance(shardRouting3)));
+ assertThat(shardRouting2, not(sameInstance(shardRouting4)));
+ assertThat(shardRouting1, sameInstance(shardRouting4));
+ assertThat(shardRouting2, sameInstance(shardRouting3));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(2);
+ assertThat(shardIterator.size(), equalTo(2));
+ firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ ShardRouting shardRouting5 = shardIterator.nextOrNull();
+ assertThat(shardRouting5, sameInstance(firstRouting));
+ assertThat(shardRouting5, notNullValue());
+ ShardRouting shardRouting6 = shardIterator.nextOrNull();
+ assertThat(shardRouting6, notNullValue());
+ assertThat(shardRouting6, not(sameInstance(shardRouting5)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting5, sameInstance(shardRouting1));
+ assertThat(shardRouting6, sameInstance(shardRouting2));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(3);
+ assertThat(shardIterator.size(), equalTo(2));
+ firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ ShardRouting shardRouting7 = shardIterator.nextOrNull();
+ assertThat(shardRouting7, sameInstance(firstRouting));
+ assertThat(shardRouting7, notNullValue());
+ ShardRouting shardRouting8 = shardIterator.nextOrNull();
+ assertThat(shardRouting8, notNullValue());
+ assertThat(shardRouting8, not(sameInstance(shardRouting7)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting7, sameInstance(shardRouting3));
+ assertThat(shardRouting8, sameInstance(shardRouting4));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(4);
+ assertThat(shardIterator.size(), equalTo(2));
+ firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ ShardRouting shardRouting9 = shardIterator.nextOrNull();
+ assertThat(shardRouting9, sameInstance(firstRouting));
+ assertThat(shardRouting9, notNullValue());
+ ShardRouting shardRouting10 = shardIterator.nextOrNull();
+ assertThat(shardRouting10, notNullValue());
+ assertThat(shardRouting10, not(sameInstance(shardRouting9)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting9, sameInstance(shardRouting5));
+ assertThat(shardRouting10, sameInstance(shardRouting6));
+ }
+
+ @Test
+ public void testRandomRouting() {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ShardIterator shardIterator = routingTable.index("test1").shard(0).shardsRandomIt();
+ ShardRouting shardRouting1 = shardIterator.nextOrNull();
+ assertThat(shardRouting1, notNullValue());
+ assertThat(shardIterator.nextOrNull(), notNullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ shardIterator = routingTable.index("test1").shard(0).shardsRandomIt();
+ ShardRouting shardRouting2 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ ShardRouting shardRouting3 = shardIterator.nextOrNull();
+ assertThat(shardRouting3, notNullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardRouting1, not(sameInstance(shardRouting2)));
+ assertThat(shardRouting1, sameInstance(shardRouting3));
+ }
+
+ @Test
+ public void testAttributePreferenceRouting() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id,zone")
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "rack_1", "zone", "zone1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "rack_2", "zone", "zone2")))
+ .localNodeId("node1")
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ // after all are started, check routing iteration
+ ShardIterator shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(new String[]{"rack_id"}, clusterState.nodes());
+ ShardRouting shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node1"));
+ shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node2"));
+
+ shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(new String[]{"rack_id"}, clusterState.nodes());
+ shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node1"));
+ shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node2"));
+ }
+
+
+ @Test
+ public void testShardsAndPreferNodeRouting() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .localNodeId("node1")
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ PlainOperationRouting operationRouting = new PlainOperationRouting(ImmutableSettings.Builder.EMPTY_SETTINGS, new DjbHashFunction(), new AwarenessAllocationDecider());
+
+ GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:1");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(1));
+
+ //check node preference, first without preference to see they switch
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ String firstRoundNodeId = shardIterators.iterator().next().nextOrNull().currentNodeId();
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), not(equalTo(firstRoundNodeId)));
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;_prefer_node:node1");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1"));
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;_prefer_node:node1");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/codecs/CodecTests.java b/src/test/java/org/elasticsearch/codecs/CodecTests.java
new file mode 100644
index 0000000..02c6f69
--- /dev/null
+++ b/src/test/java/org/elasticsearch/codecs/CodecTests.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.codecs;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.sort.FieldSortBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.endsWith;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class CodecTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testFieldsWithCustomPostingsFormat() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("postings_format", "test1").field("index_options", "docs").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.codec.postings_format.test1.type", "pulsing")
+ ).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox").setRefresh(true).execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ try {
+ client().prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.getMessage(), endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ }
+
+ @Test
+ public void testIndexingWithSimpleTextCodec() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.codec", "SimpleText")
+ ).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox").setRefresh(true).execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ try {
+ client().prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.getMessage(), endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ }
+
+ @Test
+ public void testCustomDocValuesFormat() throws IOException {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("test", jsonBuilder().startObject().startObject("test")
+ .startObject("_version").field("doc_values_format", "disk").endObject()
+ .startObject("properties").startObject("field").field("type", "long").field("doc_values_format", "dvf").endObject().endObject()
+ .endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.codec.doc_values_format.dvf.type", "disk")
+ .build());
+
+ for (int i = 10; i >= 0; --i) {
+ client().prepareIndex("test", "test", Integer.toString(i)).setSource("field", randomLong()).setRefresh(i == 0 || rarely()).execute().actionGet();
+ }
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).addSort(new FieldSortBuilder("field")).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/BooleansTests.java b/src/test/java/org/elasticsearch/common/BooleansTests.java
new file mode 100644
index 0000000..7440d8c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/BooleansTests.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+public class BooleansTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIsBoolean() {
+ String[] booleans = new String[]{"true", "false", "on", "off", "yes", "no", "0", "1"};
+ String[] notBooleans = new String[]{"11", "00", "sdfsdfsf", "F", "T"};
+
+ for (String b : booleans) {
+ String t = "prefix" + b + "suffix";
+ assertThat("failed to recognize [" + b + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), b.length()), Matchers.equalTo(true));
+ }
+
+ for (String nb : notBooleans) {
+ String t = "prefix" + nb + "suffix";
+ assertThat("recognized [" + nb + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), nb.length()), Matchers.equalTo(false));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/src/test/java/org/elasticsearch/common/ParseFieldTests.java
new file mode 100644
index 0000000..d1dca3a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/ParseFieldTests.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.EnumSet;
+
+import static org.hamcrest.CoreMatchers.*;
+
+public class ParseFieldTests extends ElasticsearchTestCase {
+
+ public void testParse() {
+ String[] values = new String[]{"foo_bar", "fooBar"};
+ ParseField field = new ParseField(randomFrom(values));
+ String[] deprecated = new String[]{"barFoo", "bar_foo"};
+ ParseField withDepredcations = field.withDeprecation("Foobar", randomFrom(deprecated));
+ assertThat(field, not(sameInstance(withDepredcations)));
+ assertThat(field.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(field.match("foo bar", ParseField.EMPTY_FLAGS), is(false));
+ assertThat(field.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(false));
+ assertThat(field.match("barFoo", ParseField.EMPTY_FLAGS), is(false));
+
+
+ assertThat(withDepredcations.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(withDepredcations.match("foo bar", ParseField.EMPTY_FLAGS), is(false));
+ assertThat(withDepredcations.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(withDepredcations.match("barFoo", ParseField.EMPTY_FLAGS), is(true));
+
+ // now with strict mode
+ EnumSet<ParseField.Flag> flags = EnumSet.of(ParseField.Flag.STRICT);
+ assertThat(field.match(randomFrom(values), flags), is(true));
+ assertThat(field.match("foo bar", flags), is(false));
+ assertThat(field.match(randomFrom(deprecated), flags), is(false));
+ assertThat(field.match("barFoo", flags), is(false));
+
+
+ assertThat(withDepredcations.match(randomFrom(values), flags), is(true));
+ assertThat(withDepredcations.match("foo bar", flags), is(false));
+ try {
+ withDepredcations.match(randomFrom(deprecated), flags);
+ fail();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+
+ try {
+ withDepredcations.match("barFoo", flags);
+ fail();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/StringsTests.java b/src/test/java/org/elasticsearch/common/StringsTests.java
new file mode 100644
index 0000000..e6f75aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/StringsTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+public class StringsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testToCamelCase() {
+ assertEquals("foo", Strings.toCamelCase("foo"));
+ assertEquals("fooBar", Strings.toCamelCase("fooBar"));
+ assertEquals("FooBar", Strings.toCamelCase("FooBar"));
+ assertEquals("fooBar", Strings.toCamelCase("foo_bar"));
+ assertEquals("fooBarFooBar", Strings.toCamelCase("foo_bar_foo_bar"));
+ assertEquals("fooBar", Strings.toCamelCase("foo_bar_"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/TableTests.java b/src/test/java/org/elasticsearch/common/TableTests.java
new file mode 100644
index 0000000..919e1c4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/TableTests.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+public class TableTests extends ElasticsearchTestCase {
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnStartRowWithoutHeader() {
+ Table table = new Table();
+ table.startRow();
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnEndHeadersWithoutStart() {
+ Table table = new Table();
+ table.endHeaders();
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnAddCellWithoutHeader() {
+ Table table = new Table();
+ table.addCell("error");
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnAddCellWithoutRow() {
+ Table table = this.getTableWithHeaders();
+ table.addCell("error");
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnEndRowWithoutStart() {
+ Table table = this.getTableWithHeaders();
+ table.endRow();
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnLessCellsThanDeclared() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo");
+ table.endRow(true);
+ }
+
+ @Test
+ public void testOnLessCellsThanDeclaredUnchecked() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo");
+ table.endRow(false);
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnMoreCellsThanDeclared() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo");
+ table.addCell("bar");
+ table.addCell("foobar");
+ }
+
+ @Test
+ public void testSimple() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo1");
+ table.addCell("bar1");
+ table.endRow();
+ table.startRow();
+ table.addCell("foo2");
+ table.addCell("bar2");
+ table.endRow();
+
+ // Check headers
+ List<Table.Cell> headers = table.getHeaders();
+ assertEquals(2, headers.size());
+ assertEquals("foo", headers.get(0).value.toString());
+ assertEquals(2, headers.get(0).attr.size());
+ assertEquals("f", headers.get(0).attr.get("alias"));
+ assertEquals("foo", headers.get(0).attr.get("desc"));
+ assertEquals("bar", headers.get(1).value.toString());
+ assertEquals(2, headers.get(1).attr.size());
+ assertEquals("b", headers.get(1).attr.get("alias"));
+ assertEquals("bar", headers.get(1).attr.get("desc"));
+
+ // Check rows
+ List<List<Table.Cell>> rows = table.getRows();
+ assertEquals(2, rows.size());
+ List<Table.Cell> row = rows.get(0);
+ assertEquals("foo1", row.get(0).value.toString());
+ assertEquals("bar1", row.get(1).value.toString());
+ row = rows.get(1);
+ assertEquals("foo2", row.get(0).value.toString());
+ assertEquals("bar2", row.get(1).value.toString());
+
+ // Check getAsMap
+ Map<String, List<Table.Cell>> map = table.getAsMap();
+ assertEquals(2, map.size());
+ row = map.get("foo");
+ assertEquals("foo1", row.get(0).value.toString());
+ assertEquals("foo2", row.get(1).value.toString());
+ row = map.get("bar");
+ assertEquals("bar1", row.get(0).value.toString());
+ assertEquals("bar2", row.get(1).value.toString());
+
+ // Check getHeaderMap
+ Map<String, Table.Cell> headerMap = table.getHeaderMap();
+ assertEquals(2, headerMap.size());
+ Table.Cell cell = headerMap.get("foo");
+ assertEquals("foo", cell.value.toString());
+ cell = headerMap.get("bar");
+ assertEquals("bar", cell.value.toString());
+
+ // Check findHeaderByName
+ cell = table.findHeaderByName("foo");
+ assertEquals("foo", cell.value.toString());
+ cell = table.findHeaderByName("missing");
+ assertNull(cell);
+ }
+
+ private Table getTableWithHeaders() {
+ Table table = new Table();
+ table.startHeaders();
+ table.addCell("foo", "alias:f;desc:foo");
+ table.addCell("bar", "alias:b;desc:bar");
+ table.endHeaders();
+ return table;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java b/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java
new file mode 100644
index 0000000..5565cd4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.breaker;
+
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for the Memory Aggregating Circuit Breaker
+ */
+public class MemoryCircuitBreakerTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThreadedUpdatesToBreaker() throws Exception {
+ final int NUM_THREADS = 5;
+ final int BYTES_PER_THREAD = 1000;
+ final Thread[] threads = new Thread[NUM_THREADS];
+ final AtomicBoolean tripped = new AtomicBoolean(false);
+ final AtomicReference<Throwable> lastException = new AtomicReference<Throwable>(null);
+
+ final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue((BYTES_PER_THREAD * NUM_THREADS) - 1), 1.0, logger);
+
+ for (int i = 0; i < NUM_THREADS; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < BYTES_PER_THREAD; j++) {
+ try {
+ breaker.addEstimateBytesAndMaybeBreak(1L);
+ } catch (CircuitBreakingException e) {
+ if (tripped.get()) {
+ assertThat("tripped too many times", true, equalTo(false));
+ } else {
+ assertThat(tripped.compareAndSet(false, true), equalTo(true));
+ }
+ } catch (Throwable e2) {
+ lastException.set(e2);
+ }
+ }
+ }
+ });
+
+ threads[i].start();
+ }
+
+ for (Thread t : threads) {
+ t.join();
+ }
+
+ assertThat("no other exceptions were thrown", lastException.get(), equalTo(null));
+ assertThat("breaker was tripped exactly once", tripped.get(), equalTo(true));
+ }
+
+ @Test
+ public void testConstantFactor() throws Exception {
+ final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue(15), 1.6, logger);
+
+ // add only 7 bytes
+ breaker.addWithoutBreaking(7);
+
+ try {
+ // this won't actually add it because it trips the breaker
+ breaker.addEstimateBytesAndMaybeBreak(3);
+ fail("should never reach this");
+ } catch (CircuitBreakingException cbe) {
+ }
+
+ // shouldn't throw an exception
+ breaker.addEstimateBytesAndMaybeBreak(2);
+
+ assertThat(breaker.getUsed(), equalTo(9L));
+
+ // adding 3 more bytes (now at 12)
+ breaker.addWithoutBreaking(3);
+
+ try {
+ // Adding no bytes still breaks
+ breaker.addEstimateBytesAndMaybeBreak(0);
+ fail("should never reach this");
+ } catch (CircuitBreakingException cbe) {
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java b/src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java
new file mode 100644
index 0000000..65aa51c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Ordering;
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Iterator;
+import java.util.List;
+
+public class Iterators2Tests extends ElasticsearchTestCase {
+
+ public void testDeduplicateSorted() {
+ final List<String> list = Lists.newArrayList();
+ for (int i = randomInt(100); i >= 0; --i) {
+ final int frequency = randomIntBetween(1, 10);
+ final String s = randomAsciiOfLength(randomIntBetween(2, 20));
+ for (int j = 0; j < frequency; ++j) {
+ list.add(s);
+ }
+ }
+ CollectionUtil.introSort(list);
+ final List<String> deduplicated = Lists.newArrayList();
+ for (Iterator<String> it = Iterators2.deduplicateSorted(list.iterator(), Ordering.natural()); it.hasNext(); ) {
+ deduplicated.add(it.next());
+ }
+ assertEquals(Lists.newArrayList(Sets.newTreeSet(list)), deduplicated);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/compress/CompressedStringTests.java b/src/test/java/org/elasticsearch/common/compress/CompressedStringTests.java
new file mode 100644
index 0000000..9b63cbc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/compress/CompressedStringTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+
+/**
+ *
+ */
+public class CompressedStringTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTestsLZF() throws IOException {
+ simpleTests("lzf");
+ }
+
+ public void simpleTests(String compressor) throws IOException {
+ CompressorFactory.configure(ImmutableSettings.settingsBuilder().put("compress.default.type", compressor).build());
+ String str = "this is a simple string";
+ CompressedString cstr = new CompressedString(str);
+ assertThat(cstr.string(), equalTo(str));
+ assertThat(new CompressedString(str), equalTo(cstr));
+
+ String str2 = "this is a simple string 2";
+ CompressedString cstr2 = new CompressedString(str2);
+ assertThat(cstr2.string(), not(equalTo(str)));
+ assertThat(new CompressedString(str2), not(equalTo(cstr)));
+ assertThat(new CompressedString(str2), equalTo(cstr2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java
new file mode 100644
index 0000000..55ff0bd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.geo;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+
+
+/**
+ * Tests for {@link GeoHashUtils}
+ */
+public class GeoHashTests extends ElasticsearchTestCase {
+
+
+ @Test
+ public void testGeohashAsLongRoutines() {
+
+ //Ensure that for all points at all supported levels of precision
+ // that the long encoding of a geohash is compatible with its
+ // String based counterpart
+ for (double lat=-90;lat<90;lat++)
+ {
+ for (double lng=-180;lng<180;lng++)
+ {
+ for(int p=1;p<=12;p++)
+ {
+ long geoAsLong = GeoHashUtils.encodeAsLong(lat,lng,p);
+ String geohash = GeoHashUtils.encode(lat,lng,p);
+
+ String geohashFromLong=GeoHashUtils.toString(geoAsLong);
+ assertEquals(geohash, geohashFromLong);
+ GeoPoint pos=GeoHashUtils.decode(geohash);
+ GeoPoint pos2=GeoHashUtils.decode(geoAsLong);
+ assertEquals(pos, pos2);
+ }
+ }
+
+ }
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java
new file mode 100644
index 0000000..31b2900
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.spatial4j.core.shape.jts.JtsPoint;
+import com.vividsolutions.jts.geom.*;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+
+/**
+ * Tests for {@link GeoJSONShapeParser}
+ */
+public class GeoJSONShapeParserTests extends ElasticsearchTestCase {
+
+ private final static GeometryFactory GEOMETRY_FACTORY = new GeometryFactory();
+
+ @Test
+ public void testParse_simplePoint() throws IOException {
+ String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point")
+ .startArray("coordinates").value(100.0).value(0.0).endArray()
+ .endObject().string();
+
+ Point expected = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0));
+ assertGeometryEquals(new JtsPoint(expected, ShapeBuilder.SPATIAL_CONTEXT), pointGeoJson);
+ }
+
+ @Test
+ public void testParse_lineString() throws IOException {
+ String lineGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "LineString")
+ .startArray("coordinates")
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> lineCoordinates = new ArrayList<Coordinate>();
+ lineCoordinates.add(new Coordinate(100, 0));
+ lineCoordinates.add(new Coordinate(101, 1));
+
+ LineString expected = GEOMETRY_FACTORY.createLineString(
+ lineCoordinates.toArray(new Coordinate[lineCoordinates.size()]));
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), lineGeoJson);
+ }
+
+ @Test
+ public void testParse_polygonNoHoles() throws IOException {
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> shellCoordinates = new ArrayList<Coordinate>();
+ shellCoordinates.add(new Coordinate(100, 0));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(100, 1));
+ shellCoordinates.add(new Coordinate(100, 0));
+
+ LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null);
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), polygonGeoJson);
+ }
+
+ @Test
+ public void testParse_polygonWithHole() throws IOException {
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .startArray().value(100.2).value(0.2).endArray()
+ .startArray().value(100.8).value(0.2).endArray()
+ .startArray().value(100.8).value(0.8).endArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> shellCoordinates = new ArrayList<Coordinate>();
+ shellCoordinates.add(new Coordinate(100, 0));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(100, 1));
+ shellCoordinates.add(new Coordinate(100, 0));
+
+ List<Coordinate> holeCoordinates = new ArrayList<Coordinate>();
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+
+ LinearRing shell = GEOMETRY_FACTORY.createLinearRing(
+ shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ LinearRing[] holes = new LinearRing[1];
+ holes[0] = GEOMETRY_FACTORY.createLinearRing(
+ holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
+ Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes);
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), polygonGeoJson);
+ }
+
+ @Test
+ public void testParse_multiPoint() throws IOException {
+ String multiPointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPoint")
+ .startArray("coordinates")
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> multiPointCoordinates = new ArrayList<Coordinate>();
+ multiPointCoordinates.add(new Coordinate(100, 0));
+ multiPointCoordinates.add(new Coordinate(101, 1));
+
+ MultiPoint expected = GEOMETRY_FACTORY.createMultiPoint(
+ multiPointCoordinates.toArray(new Coordinate[multiPointCoordinates.size()]));
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), multiPointGeoJson);
+ }
+
+ @Test
+ public void testParse_multiPolygon() throws IOException {
+ String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray()
+ .startArray().value(102.0).value(2.0).endArray()
+ .startArray().value(103.0).value(2.0).endArray()
+ .startArray().value(103.0).value(3.0).endArray()
+ .startArray().value(102.0).value(3.0).endArray()
+ .startArray().value(102.0).value(2.0).endArray()
+ .endArray()
+ .endArray()
+ .startArray()
+ .startArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .startArray().value(100.2).value(0.2).endArray()
+ .startArray().value(100.8).value(0.2).endArray()
+ .startArray().value(100.8).value(0.8).endArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> shellCoordinates = new ArrayList<Coordinate>();
+ shellCoordinates.add(new Coordinate(100, 0));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(100, 1));
+ shellCoordinates.add(new Coordinate(100, 0));
+
+ List<Coordinate> holeCoordinates = new ArrayList<Coordinate>();
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+
+ LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ LinearRing[] holes = new LinearRing[1];
+ holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
+ Polygon withHoles = GEOMETRY_FACTORY.createPolygon(shell, holes);
+
+ shellCoordinates = new ArrayList<Coordinate>();
+ shellCoordinates.add(new Coordinate(102, 3));
+ shellCoordinates.add(new Coordinate(103, 3));
+ shellCoordinates.add(new Coordinate(103, 2));
+ shellCoordinates.add(new Coordinate(102, 2));
+ shellCoordinates.add(new Coordinate(102, 3));
+
+
+ shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ Polygon withoutHoles = GEOMETRY_FACTORY.createPolygon(shell, null);
+
+ MultiPolygon expected = GEOMETRY_FACTORY.createMultiPolygon(new Polygon[] {withoutHoles, withHoles});
+
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), multiPolygonGeoJson);
+ }
+
+ @Test
+ public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() throws IOException {
+ String pointGeoJson = XContentFactory.jsonBuilder().startObject()
+ .startObject("crs")
+ .field("type", "name")
+ .startObject("properties")
+ .field("name", "urn:ogc:def:crs:OGC:1.3:CRS84")
+ .endObject()
+ .endObject()
+ .field("bbox", "foobar")
+ .field("type", "point")
+ .field("bubu", "foobar")
+ .startArray("coordinates").value(100.0).value(0.0).endArray()
+ .startObject("nested").startArray("coordinates").value(200.0).value(0.0).endArray().endObject()
+ .startObject("lala").field("type", "NotAPoint").endObject()
+ .endObject().string();
+
+ Point expected = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0));
+ assertGeometryEquals(new JtsPoint(expected, ShapeBuilder.SPATIAL_CONTEXT), pointGeoJson);
+ }
+
+ private void assertGeometryEquals(Shape expected, String geoJson) throws IOException {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(geoJson);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertEquals(ShapeBuilder.parse(parser).build(), expected);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
new file mode 100644
index 0000000..9bb1505
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.Shape;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.LineString;
+import com.vividsolutions.jts.geom.Polygon;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiLineString;
+import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiPolygon;
+/**
+ * Tests for {@link ShapeBuilder}
+ */
+public class ShapeBuilderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNewPoint() {
+ Point point = ShapeBuilder.newPoint(-100, 45).build();
+ assertEquals(-100D, point.getX(), 0.0d);
+ assertEquals(45D, point.getY(), 0.0d);
+ }
+
+ @Test
+ public void testNewRectangle() {
+ Rectangle rectangle = ShapeBuilder.newEnvelope().topLeft(-45, 30).bottomRight(45, -30).build();
+ assertEquals(-45D, rectangle.getMinX(), 0.0d);
+ assertEquals(-30D, rectangle.getMinY(), 0.0d);
+ assertEquals(45D, rectangle.getMaxX(), 0.0d);
+ assertEquals(30D, rectangle.getMaxY(), 0.0d);
+ }
+
+ @Test
+ public void testNewPolygon() {
+ Polygon polygon = ShapeBuilder.newPolygon()
+ .point(-45, 30)
+ .point(45, 30)
+ .point(45, -30)
+ .point(-45, -30)
+ .point(-45, 30).toPolygon();
+
+ LineString exterior = polygon.getExteriorRing();
+ assertEquals(exterior.getCoordinateN(0), new Coordinate(-45, 30));
+ assertEquals(exterior.getCoordinateN(1), new Coordinate(45, 30));
+ assertEquals(exterior.getCoordinateN(2), new Coordinate(45, -30));
+ assertEquals(exterior.getCoordinateN(3), new Coordinate(-45, -30));
+ }
+
+ @Test
+ public void testLineStringBuilder() {
+ // Building a simple LineString
+ ShapeBuilder.newLineString()
+ .point(-130.0, 55.0)
+ .point(-130.0, -40.0)
+ .point(-15.0, -40.0)
+ .point(-20.0, 50.0)
+ .point(-45.0, 50.0)
+ .point(-45.0, -15.0)
+ .point(-110.0, -15.0)
+ .point(-110.0, 55.0).build();
+
+ // Building a linestring that needs to be wrapped
+ ShapeBuilder.newLineString()
+ .point(100.0, 50.0)
+ .point(110.0, -40.0)
+ .point(240.0, -40.0)
+ .point(230.0, 60.0)
+ .point(200.0, 60.0)
+ .point(200.0, -30.0)
+ .point(130.0, -30.0)
+ .point(130.0, 60.0)
+ .build();
+
+ // Building a lineString on the dateline
+ ShapeBuilder.newLineString()
+ .point(-180.0, 80.0)
+ .point(-180.0, 40.0)
+ .point(-180.0, -40.0)
+ .point(-180.0, -80.0)
+ .build();
+
+ // Building a lineString on the dateline
+ ShapeBuilder.newLineString()
+ .point(180.0, 80.0)
+ .point(180.0, 40.0)
+ .point(180.0, -40.0)
+ .point(180.0, -80.0)
+ .build();
+ }
+
+ @Test
+ public void testMultiLineString() {
+ ShapeBuilder.newMultiLinestring()
+ .linestring()
+ .point(-100.0, 50.0)
+ .point(50.0, 50.0)
+ .point(50.0, 20.0)
+ .point(-100.0, 20.0)
+ .end()
+ .linestring()
+ .point(-100.0, 20.0)
+ .point(50.0, 20.0)
+ .point(50.0, 0.0)
+ .point(-100.0, 0.0)
+ .end()
+ .build();
+
+
+ // LineString that needs to be wrappped
+ ShapeBuilder.newMultiLinestring()
+ .linestring()
+ .point(150.0, 60.0)
+ .point(200.0, 60.0)
+ .point(200.0, 40.0)
+ .point(150.0, 40.0)
+ .end()
+ .linestring()
+ .point(150.0, 20.0)
+ .point(200.0, 20.0)
+ .point(200.0, 0.0)
+ .point(150.0, 0.0)
+ .end()
+ .build();
+ }
+
+ @Test
+ public void testPolygonSelfIntersection() {
+ try {
+ ShapeBuilder.newPolygon()
+ .point(-40.0, 50.0)
+ .point(40.0, 50.0)
+ .point(-40.0, -50.0)
+ .point(40.0, -50.0)
+ .close().build();
+ fail("Polygon self-intersection");
+ } catch (Throwable e) {}
+
+ }
+
+ @Test
+ public void testGeoCircle() {
+ ShapeBuilder.newCircleBuilder().center(0, 0).radius("100m").build();
+ ShapeBuilder.newCircleBuilder().center(+180, 0).radius("100m").build();
+ ShapeBuilder.newCircleBuilder().center(-180, 0).radius("100m").build();
+ ShapeBuilder.newCircleBuilder().center(0, 90).radius("100m").build();
+ ShapeBuilder.newCircleBuilder().center(0, -90).radius("100m").build();
+ }
+
+ @Test
+ public void testPolygonWrapping() {
+ Shape shape = ShapeBuilder.newPolygon()
+ .point(-150.0, 65.0)
+ .point(-250.0, 65.0)
+ .point(-250.0, -65.0)
+ .point(-150.0, -65.0)
+ .close().build();
+
+ assertMultiPolygon(shape);
+ }
+
+ @Test
+ public void testLineStringWrapping() {
+ Shape shape = ShapeBuilder.newLineString()
+ .point(-150.0, 65.0)
+ .point(-250.0, 65.0)
+ .point(-250.0, -65.0)
+ .point(-150.0, -65.0)
+ .build();
+
+ assertMultiLineString(shape);
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java b/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java
new file mode 100644
index 0000000..a562131
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.hppc;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import org.elasticsearch.common.collect.HppcMaps;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class HppcMapsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIntersection() throws Exception {
+ assumeTrue(ASSERTIONS_ENABLED);
+ ObjectOpenHashSet<String> set1 = ObjectOpenHashSet.from("1", "2", "3");
+ ObjectOpenHashSet<String> set2 = ObjectOpenHashSet.from("1", "2", "3");
+ List<String> values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(3));
+ assertThat(values.contains("1"), equalTo(true));
+ assertThat(values.contains("2"), equalTo(true));
+ assertThat(values.contains("3"), equalTo(true));
+
+ set1 = ObjectOpenHashSet.from("1", "2", "3");
+ set2 = ObjectOpenHashSet.from("3", "4", "5");
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(1));
+ assertThat(values.get(0), equalTo("3"));
+
+ set1 = ObjectOpenHashSet.from("1", "2", "3");
+ set2 = ObjectOpenHashSet.from("4", "5", "6");
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = ObjectOpenHashSet.from();
+ set2 = ObjectOpenHashSet.from("3", "4", "5");
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = ObjectOpenHashSet.from("1", "2", "3");
+ set2 = ObjectOpenHashSet.from();
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = ObjectOpenHashSet.from();
+ set2 = ObjectOpenHashSet.from();
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = null;
+ set2 = ObjectOpenHashSet.from();
+ try {
+ toList(HppcMaps.intersection(set1, set2));
+ fail();
+ } catch (AssertionError e) {}
+
+ set1 = ObjectOpenHashSet.from();
+ set2 = null;
+ try {
+ toList(HppcMaps.intersection(set1, set2));
+ fail();
+ } catch (AssertionError e) {}
+
+ set1 = null;
+ set2 = null;
+ try {
+ toList(HppcMaps.intersection(set1, set2));
+ fail();
+ } catch (AssertionError e) {}
+ }
+
+ private List<String> toList(Iterable<String> iterable) {
+ List<String> list = new ArrayList<String>();
+ for (String s : iterable) {
+ list.add(s);
+ }
+ return list;
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/common/io/StreamsTests.java b/src/test/java/org/elasticsearch/common/io/StreamsTests.java
new file mode 100644
index 0000000..cde97c5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/io/StreamsTests.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.*;
+import java.util.Arrays;
+
+import static org.elasticsearch.common.io.Streams.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Unit tests for {@link org.elasticsearch.common.io.Streams}.
+ */
+public class StreamsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCopyFromInputStream() throws IOException {
+ byte[] content = "content".getBytes(Charsets.UTF_8);
+ ByteArrayInputStream in = new ByteArrayInputStream(content);
+ ByteArrayOutputStream out = new ByteArrayOutputStream(content.length);
+ long count = copy(in, out);
+
+ assertThat(count, equalTo((long) content.length));
+ assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true));
+ }
+
+ @Test
+ public void testCopyFromByteArray() throws IOException {
+ byte[] content = "content".getBytes(Charsets.UTF_8);
+ ByteArrayOutputStream out = new ByteArrayOutputStream(content.length);
+ copy(content, out);
+ assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true));
+ }
+
+ @Test
+ public void testCopyToByteArray() throws IOException {
+ byte[] content = "content".getBytes(Charsets.UTF_8);
+ ByteArrayInputStream in = new ByteArrayInputStream(content);
+ byte[] result = copyToByteArray(in);
+ assertThat(Arrays.equals(content, result), equalTo(true));
+ }
+
+ @Test
+ public void testCopyFromReader() throws IOException {
+ String content = "content";
+ StringReader in = new StringReader(content);
+ StringWriter out = new StringWriter();
+ int count = copy(in, out);
+ assertThat(content.length(), equalTo(count));
+ assertThat(out.toString(), equalTo(content));
+ }
+
+ @Test
+ public void testCopyFromString() throws IOException {
+ String content = "content";
+ StringWriter out = new StringWriter();
+ copy(content, out);
+ assertThat(out.toString(), equalTo(content));
+ }
+
+ @Test
+ public void testCopyToString() throws IOException {
+ String content = "content";
+ StringReader in = new StringReader(content);
+ String result = copyToString(in);
+ assertThat(result, equalTo(content));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java b/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java
new file mode 100644
index 0000000..3a34b17
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.streams;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class BytesStreamsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleStreams() throws Exception {
+ assumeTrue(Constants.JRE_IS_64BIT);
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.writeBoolean(false);
+ out.writeByte((byte) 1);
+ out.writeShort((short) -1);
+ out.writeInt(-1);
+ out.writeVInt(2);
+ out.writeLong(-3);
+ out.writeVLong(4);
+ out.writeFloat(1.1f);
+ out.writeDouble(2.2);
+ int[] intArray = {1, 2, 3};
+ out.writeGenericValue(intArray);
+ long[] longArray = {1, 2, 3};
+ out.writeGenericValue(longArray);
+ float[] floatArray = {1.1f, 2.2f, 3.3f};
+ out.writeGenericValue(floatArray);
+ double[] doubleArray = {1.1, 2.2, 3.3};
+ out.writeGenericValue(doubleArray);
+ out.writeString("hello");
+ out.writeString("goodbye");
+ BytesStreamInput in = new BytesStreamInput(out.bytes().toBytes(), false);
+ assertThat(in.readBoolean(), equalTo(false));
+ assertThat(in.readByte(), equalTo((byte) 1));
+ assertThat(in.readShort(), equalTo((short) -1));
+ assertThat(in.readInt(), equalTo(-1));
+ assertThat(in.readVInt(), equalTo(2));
+ assertThat(in.readLong(), equalTo((long) -3));
+ assertThat(in.readVLong(), equalTo((long) 4));
+ assertThat((double) in.readFloat(), closeTo(1.1, 0.0001));
+ assertThat(in.readDouble(), closeTo(2.2, 0.0001));
+ assertThat(in.readGenericValue(), equalTo((Object)intArray));
+ assertThat(in.readGenericValue(), equalTo((Object)longArray));
+ assertThat(in.readGenericValue(), equalTo((Object)floatArray));
+ assertThat(in.readGenericValue(), equalTo((Object)doubleArray));
+ assertThat(in.readString(), equalTo("hello"));
+ assertThat(in.readString(), equalTo("goodbye"));
+ }
+
+ @Test
+ public void testGrowLogic() throws Exception {
+ assumeTrue(Constants.JRE_IS_64BIT);
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.writeBytes(new byte[BytesStreamOutput.DEFAULT_SIZE - 5]);
+ assertThat(out.bufferSize(), equalTo(2048)); // remains the default
+ out.writeBytes(new byte[1 * 1024]);
+ assertThat(out.bufferSize(), equalTo(4608));
+ out.writeBytes(new byte[32 * 1024]);
+ assertThat(out.bufferSize(), equalTo(40320));
+ out.writeBytes(new byte[32 * 1024]);
+ assertThat(out.bufferSize(), equalTo(90720));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/io/streams/HandlesStreamsTests.java b/src/test/java/org/elasticsearch/common/io/streams/HandlesStreamsTests.java
new file mode 100644
index 0000000..a28082f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/io/streams/HandlesStreamsTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.streams;
+
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.HandlesStreamInput;
+import org.elasticsearch.common.io.stream.HandlesStreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class HandlesStreamsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSharedStringHandles() throws Exception {
+ String test1 = "test1";
+ String test2 = "test2";
+ String test3 = "test3";
+ String test4 = "test4";
+ String test5 = "test5";
+ String test6 = "test6";
+
+ BytesStreamOutput bout = new BytesStreamOutput();
+ HandlesStreamOutput out = new HandlesStreamOutput(bout);
+ out.writeString(test1);
+ out.writeString(test1);
+ out.writeString(test2);
+ out.writeString(test3);
+ out.writeSharedString(test4);
+ out.writeSharedString(test4);
+ out.writeSharedString(test5);
+ out.writeSharedString(test6);
+
+ BytesStreamInput bin = new BytesStreamInput(bout.bytes());
+ HandlesStreamInput in = new HandlesStreamInput(bin);
+ String s1 = in.readString();
+ String s2 = in.readString();
+ String s3 = in.readString();
+ String s4 = in.readString();
+ String s5 = in.readSharedString();
+ String s6 = in.readSharedString();
+ String s7 = in.readSharedString();
+ String s8 = in.readSharedString();
+
+ assertThat(s1, equalTo(test1));
+ assertThat(s2, equalTo(test1));
+ assertThat(s3, equalTo(test2));
+ assertThat(s4, equalTo(test3));
+ assertThat(s5, equalTo(test4));
+ assertThat(s6, equalTo(test4));
+ assertThat(s7, equalTo(test5));
+ assertThat(s8, equalTo(test6));
+
+ assertThat(s1, not(sameInstance(s2)));
+ assertThat(s5, sameInstance(s6));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java b/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java
new file mode 100644
index 0000000..6c0ab25
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.joda;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DateMathParserTests extends ElasticsearchTestCase {
+
+ @Test
+ public void dataMathTests() {
+ DateMathParser parser = new DateMathParser(Joda.forPattern("dateOptionalTime"), TimeUnit.MILLISECONDS);
+
+ assertThat(parser.parse("now", 0), equalTo(0l));
+ assertThat(parser.parse("now+m", 0), equalTo(TimeUnit.MINUTES.toMillis(1)));
+ assertThat(parser.parse("now+1m", 0), equalTo(TimeUnit.MINUTES.toMillis(1)));
+ assertThat(parser.parse("now+11m", 0), equalTo(TimeUnit.MINUTES.toMillis(11)));
+
+ assertThat(parser.parse("now+1d", 0), equalTo(TimeUnit.DAYS.toMillis(1)));
+
+ assertThat(parser.parse("now+1m+1s", 0), equalTo(TimeUnit.MINUTES.toMillis(1) + TimeUnit.SECONDS.toMillis(1)));
+ assertThat(parser.parse("now+1m-1s", 0), equalTo(TimeUnit.MINUTES.toMillis(1) - TimeUnit.SECONDS.toMillis(1)));
+
+ assertThat(parser.parse("now+1m+1s/m", 0), equalTo(TimeUnit.MINUTES.toMillis(1)));
+ assertThat(parser.parseRoundCeil("now+1m+1s/m", 0), equalTo(TimeUnit.MINUTES.toMillis(2)));
+
+ assertThat(parser.parse("now+4y", 0), equalTo(TimeUnit.DAYS.toMillis(4*365 + 1)));
+ }
+
+ @Test
+ public void actualDateTests() {
+ DateMathParser parser = new DateMathParser(Joda.forPattern("dateOptionalTime"), TimeUnit.MILLISECONDS);
+
+ assertThat(parser.parse("1970-01-01", 0), equalTo(0l));
+ assertThat(parser.parse("1970-01-01||+1m", 0), equalTo(TimeUnit.MINUTES.toMillis(1)));
+ assertThat(parser.parse("1970-01-01||+1m+1s", 0), equalTo(TimeUnit.MINUTES.toMillis(1) + TimeUnit.SECONDS.toMillis(1)));
+
+ assertThat(parser.parse("2013-01-01||+1y", 0), equalTo(parser.parse("2013-01-01", 0) + TimeUnit.DAYS.toMillis(365)));
+ assertThat(parser.parse("2013-03-03||/y", 0), equalTo(parser.parse("2013-01-01", 0)));
+ assertThat(parser.parseRoundCeil("2013-03-03||/y", 0), equalTo(parser.parse("2014-01-01", 0)));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java
new file mode 100644
index 0000000..0ae9781
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class LuceneTest {
+
+
+ /*
+ * simple test that ensures that we bumb the version on Upgrade
+ */
+ @Test
+ public void testVersion() {
+ ESLogger logger = ESLoggerFactory.getLogger(LuceneTest.class.getName());
+ Version[] values = Version.values();
+ assertThat(Version.LUCENE_CURRENT, equalTo(values[values.length-1]));
+ assertThat("Latest Lucene Version is not set after upgrade", Lucene.VERSION, equalTo(values[values.length-2]));
+ assertThat(Lucene.parseVersion(null, Lucene.VERSION, null), equalTo(Lucene.VERSION));
+ for (int i = 0; i < values.length-1; i++) {
+ // this should fail if the lucene version is not mapped as a string in Lucene.java
+ assertThat(Lucene.parseVersion(values[i].name().replaceFirst("^LUCENE_(\\d)(\\d)$", "$1.$2"), Version.LUCENE_CURRENT, logger), equalTo(values[i]));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java b/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java
new file mode 100644
index 0000000..bb7de12
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.all;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.payloads.PayloadHelper;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleAllTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testBoostOnEagerTokenizer() throws Exception {
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "all", 2.0f);
+ allEntries.addText("field2", "your", 1.0f);
+ allEntries.addText("field1", "boosts", 0.5f);
+ allEntries.reset();
+ // whitespace analyzer's tokenizer reads characters eagerly on the contrary to the standard tokenizer
+ final TokenStream ts = AllTokenStream.allTokenStream("any", allEntries, new WhitespaceAnalyzer(Lucene.VERSION));
+ final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+ final PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
+ ts.reset();
+ for (int i = 0; i < 3; ++i) {
+ assertTrue(ts.incrementToken());
+ final String term;
+ final float boost;
+ switch (i) {
+ case 0:
+ term = "all";
+ boost = 2;
+ break;
+ case 1:
+ term = "your";
+ boost = 1;
+ break;
+ case 2:
+ term = "boosts";
+ boost = 0.5f;
+ break;
+ default:
+ throw new AssertionError();
+ }
+ assertEquals(term, termAtt.toString());
+ final BytesRef payload = payloadAtt.getPayload();
+ if (payload == null || payload.length == 0) {
+ assertEquals(boost, 1f, 0.001f);
+ } else {
+ assertEquals(4, payload.length);
+ final float b = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
+ assertEquals(boost, b, 0.001f);
+ }
+ }
+ assertFalse(ts.incrementToken());
+ }
+
+ @Test
+ public void testAllEntriesRead() throws Exception {
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something", 1.0f);
+ allEntries.addText("field2", "else", 1.0f);
+
+ for (int i = 1; i < 30; i++) {
+ allEntries.reset();
+ char[] data = new char[i];
+ String value = slurpToString(allEntries, data);
+ assertThat("failed for " + i, value, equalTo("something else"));
+ }
+ }
+
+ private String slurpToString(AllEntries allEntries, char[] data) throws IOException {
+ StringBuilder sb = new StringBuilder();
+ while (true) {
+ int read = allEntries.read(data, 0, data.length);
+ if (read == -1) {
+ break;
+ }
+ sb.append(data, 0, read);
+ }
+ return sb.toString();
+ }
+
+ private void assertExplanationScore(IndexSearcher searcher, Query query, ScoreDoc scoreDoc) throws IOException {
+ final Explanation expl = searcher.explain(query, scoreDoc.doc);
+ assertEquals(scoreDoc.score, expl.getValue(), 0.00001f);
+ }
+
+ @Test
+ public void testSimpleAllNoBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something", 1.0f);
+ allEntries.addText("field2", "else", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else", 1.0f);
+ allEntries.addText("field2", "something", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ Query query = new AllTermQuery(new Term("_all", "else"));
+ TopDocs docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ query = new AllTermQuery(new Term("_all", "something"));
+ docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testSimpleAllWithBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something", 1.0f);
+ allEntries.addText("field2", "else", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else", 2.0f);
+ allEntries.addText("field2", "something", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ // this one is boosted. so the second doc is more relevant
+ Query query = new AllTermQuery(new Term("_all", "else"));
+ TopDocs docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ query = new AllTermQuery(new Term("_all", "something"));
+ docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testMultipleTokensAllNoBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something moo", 1.0f);
+ allEntries.addText("field2", "else koo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else koo", 1.0f);
+ allEntries.addText("field2", "something moo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "koo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "something")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "moo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testMultipleTokensAllWithBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something moo", 1.0f);
+ allEntries.addText("field2", "else koo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else koo", 2.0f);
+ allEntries.addText("field2", "something moo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(1));
+ assertThat(docs.scoreDocs[1].doc, equalTo(0));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "koo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(1));
+ assertThat(docs.scoreDocs[1].doc, equalTo(0));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "something")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "moo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testNoTokensWithKeywordAnalyzer() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.KEYWORD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.KEYWORD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10);
+ assertThat(docs.totalHits, equalTo(1));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilterTests.java b/src/test/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilterTests.java
new file mode 100644
index 0000000..0b85525
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilterTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class MatchAllDocsFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMatchAllDocsFilter() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new TextField("text", "lucene", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ document = new Document();
+ document.add(new TextField("_id", "2", Field.Store.YES));
+ document.add(new TextField("text", "lucene release", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ XConstantScoreQuery query = new XConstantScoreQuery(Queries.MATCH_ALL_FILTER);
+ long count = Lucene.count(searcher, query);
+ assertThat(count, equalTo(2l));
+
+ reader.close();
+ indexWriter.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java b/src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java
new file mode 100644
index 0000000..c2b51f6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class MoreLikeThisQueryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ indexWriter.commit();
+
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new TextField("text", "lucene", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ document = new Document();
+ document.add(new TextField("_id", "2", Field.Store.YES));
+ document.add(new TextField("text", "lucene release", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ MoreLikeThisQuery mltQuery = new MoreLikeThisQuery("lucene", new String[]{"text"}, Lucene.STANDARD_ANALYZER);
+ mltQuery.setLikeText("lucene");
+ mltQuery.setMinTermFrequency(1);
+ mltQuery.setMinDocFreq(1);
+ long count = Lucene.count(searcher, mltQuery);
+ assertThat(count, equalTo(2l));
+
+ reader.close();
+ indexWriter.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java b/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java
new file mode 100644
index 0000000..cc2fac4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class MultiPhrasePrefixQueryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTests() throws Exception {
+ IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ Document doc = new Document();
+ doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ IndexReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery();
+ query.add(new Term("field", "aa"));
+ assertThat(Lucene.count(searcher, query), equalTo(1l));
+
+ query = new MultiPhrasePrefixQuery();
+ query.add(new Term("field", "aaa"));
+ query.add(new Term("field", "bb"));
+ assertThat(Lucene.count(searcher, query), equalTo(1l));
+
+ query = new MultiPhrasePrefixQuery();
+ query.setSlop(1);
+ query.add(new Term("field", "aaa"));
+ query.add(new Term("field", "cc"));
+ assertThat(Lucene.count(searcher, query), equalTo(1l));
+
+ query = new MultiPhrasePrefixQuery();
+ query.setSlop(1);
+ query.add(new Term("field", "xxx"));
+ assertThat(Lucene.count(searcher, query), equalTo(0l));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java b/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java
new file mode 100644
index 0000000..1c84a0f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class TermsFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testTermFilter() throws Exception {
+ String fieldName = "field1";
+ Directory rd = new RAMDirectory();
+ IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer()));
+ for (int i = 0; i < 100; i++) {
+ Document doc = new Document();
+ int term = i * 10; //terms are units of 10;
+ doc.add(new Field(fieldName, "" + term, StringField.TYPE_NOT_STORED));
+ doc.add(new Field("all", "xxx", StringField.TYPE_NOT_STORED));
+ w.addDocument(doc);
+ if ((i % 40) == 0) {
+ w.commit();
+ }
+ }
+ AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(w, true));
+ w.close();
+
+ TermFilter tf = new TermFilter(new Term(fieldName, "19"));
+ FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits, nullValue());
+
+ tf = new TermFilter(new Term(fieldName, "20"));
+ DocIdSet result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
+ assertThat(bits.cardinality(), equalTo(1));
+
+ tf = new TermFilter(new Term("all", "xxx"));
+ result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
+ assertThat(bits.cardinality(), equalTo(100));
+
+ reader.close();
+ rd.close();
+ }
+
+ @Test
+ public void testTermsFilter() throws Exception {
+ String fieldName = "field1";
+ Directory rd = new RAMDirectory();
+ IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer()));
+ for (int i = 0; i < 100; i++) {
+ Document doc = new Document();
+ int term = i * 10; //terms are units of 10;
+ doc.add(new Field(fieldName, "" + term, StringField.TYPE_NOT_STORED));
+ doc.add(new Field("all", "xxx", StringField.TYPE_NOT_STORED));
+ w.addDocument(doc);
+ if ((i % 40) == 0) {
+ w.commit();
+ }
+ }
+ AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(w, true));
+ w.close();
+
+ XTermsFilter tf = new XTermsFilter(new Term[]{new Term(fieldName, "19")});
+ FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits, nullValue());
+
+ tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20")});
+ bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits.cardinality(), equalTo(1));
+
+ tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10")});
+ bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits.cardinality(), equalTo(2));
+
+ tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10"), new Term(fieldName, "00")});
+ bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits.cardinality(), equalTo(2));
+
+ reader.close();
+ rd.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java
new file mode 100644
index 0000000..5aa4424
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java
@@ -0,0 +1,391 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.FilterClause;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+/**
+ * Tests ported from Lucene.
+ */
+public class XBooleanFilterLuceneTests extends ElasticsearchTestCase {
+
+ private Directory directory;
+ private AtomicReader reader;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ directory = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.VERSION, new WhitespaceAnalyzer(Lucene.VERSION)));
+
+ //Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags
+ addDoc(writer, "admin guest", "010", "20040101", "Y");
+ addDoc(writer, "guest", "020", "20040101", "Y");
+ addDoc(writer, "guest", "020", "20050101", "Y");
+ addDoc(writer, "admin", "020", "20050101", "Maybe");
+ addDoc(writer, "admin guest", "030", "20050101", "N");
+ writer.close();
+ reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(directory));
+ writer.close();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ reader.close();
+ directory.close();
+ }
+
+ private void addDoc(IndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException {
+ Document doc = new Document();
+ doc.add(new TextField("accessRights", accessRights, Field.Store.YES));
+ doc.add(new TextField("price", price, Field.Store.YES));
+ doc.add(new TextField("date", date, Field.Store.YES));
+ doc.add(new TextField("inStock", inStock, Field.Store.YES));
+ writer.addDocument(doc);
+ }
+
+ private Filter getRangeFilter(String field, String lowerPrice, String upperPrice) {
+ return TermRangeFilter.newStringRange(field, lowerPrice, upperPrice, true, true);
+ }
+
+ private Filter getTermsFilter(String field, String text) {
+ return new XTermsFilter(new Term(field, text));
+ }
+
+ private Filter getWrappedTermQuery(String field, String text) {
+ return new QueryWrapperFilter(new TermQuery(new Term(field, text)));
+ }
+
+ private Filter getEmptyFilter() {
+ return new Filter() {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
+ return new FixedBitSet(context.reader().maxDoc());
+ }
+ };
+ }
+
+ private Filter getNullDISFilter() {
+ return new Filter() {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
+ return null;
+ }
+ };
+ }
+
+ private Filter getNullDISIFilter() {
+ return new Filter() {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
+ return new DocIdSet() {
+ @Override
+ public DocIdSetIterator iterator() {
+ return null;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+ };
+ }
+ };
+ }
+
+ private void tstFilterCard(String mes, int expected, Filter filt) throws Exception {
+ int actual = 0;
+ DocIdSet docIdSet = filt.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ if (docIdSet != null) {
+ DocIdSetIterator disi = docIdSet.iterator();
+ if (disi != null) {
+ while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ actual++;
+ }
+ }
+ }
+ assertThat(mes, actual, equalTo(expected));
+ }
+
+ @Test
+ public void testShould() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.SHOULD);
+ tstFilterCard("Should retrieves only 1 doc", 1, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getWrappedTermQuery("price", "030"), BooleanClause.Occur.SHOULD);
+ tstFilterCard("Should retrieves only 1 doc", 1, booleanFilter);
+ }
+
+ @Test
+ public void testShoulds() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ tstFilterCard("Shoulds are Ored together", 5, booleanFilter);
+ }
+
+ @Test
+ public void testShouldsAndMustNot() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but AndNot", 4, booleanFilter);
+
+ booleanFilter.add(getTermsFilter("inStock", "Maybe"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but AndNots", 3, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getWrappedTermQuery("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but AndNot", 4, booleanFilter);
+
+ booleanFilter.add(getWrappedTermQuery("inStock", "Maybe"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but AndNots", 3, booleanFilter);
+ }
+
+ @Test
+ public void testShouldsAndMust() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard("Shoulds Ored but MUST", 3, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getWrappedTermQuery("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard("Shoulds Ored but MUST", 3, booleanFilter);
+ }
+
+ @Test
+ public void testShouldsAndMusts() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getRangeFilter("date", "20040101", "20041231"), BooleanClause.Occur.MUST);
+ tstFilterCard("Shoulds Ored but MUSTs ANDED", 1, booleanFilter);
+ }
+
+ @Test
+ public void testShouldsAndMustsAndMustNot() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "030", "040"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getRangeFilter("date", "20050101", "20051231"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but MUSTs ANDED and MustNot", 0, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "030", "040"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getWrappedTermQuery("accessRights", "admin"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getRangeFilter("date", "20050101", "20051231"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getWrappedTermQuery("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but MUSTs ANDED and MustNot", 0, booleanFilter);
+ }
+
+ @Test
+ public void testJustMust() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard("MUST", 3, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getWrappedTermQuery("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard("MUST", 3, booleanFilter);
+ }
+
+ @Test
+ public void testJustMustNot() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("MUST_NOT", 4, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getWrappedTermQuery("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("MUST_NOT", 4, booleanFilter);
+ }
+
+ @Test
+ public void testMustAndMustNot() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("MUST_NOT wins over MUST for same docs", 0, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getWrappedTermQuery("inStock", "N"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getWrappedTermQuery("price", "030"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("MUST_NOT wins over MUST for same docs", 0, booleanFilter);
+ }
+
+ @Test
+ public void testEmpty() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ tstFilterCard("empty XBooleanFilter returns no results", 0, booleanFilter);
+ }
+
+ @Test
+ public void testCombinedNullDocIdSets() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.MUST);
+ tstFilterCard("A MUST filter that returns a null DIS should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.MUST);
+ tstFilterCard("A MUST filter that returns a null DISI should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.SHOULD);
+ tstFilterCard("A SHOULD filter that returns a null DIS should be invisible", 1, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.SHOULD);
+ tstFilterCard("A SHOULD filter that returns a null DISI should be invisible", 1, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("A MUST_NOT filter that returns a null DIS should be invisible", 1, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("A MUST_NOT filter that returns a null DISI should be invisible", 1, booleanFilter);
+ }
+
+ @Test
+ public void testJustNullDocIdSets() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.MUST);
+ tstFilterCard("A MUST filter that returns a null DIS should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.MUST);
+ tstFilterCard("A MUST filter that returns a null DISI should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.SHOULD);
+ tstFilterCard("A single SHOULD filter that returns a null DIS should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.SHOULD);
+ tstFilterCard("A single SHOULD filter that returns a null DISI should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("A single MUST_NOT filter that returns a null DIS should be invisible", 5, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("A single MUST_NOT filter that returns a null DIS should be invisible", 5, booleanFilter);
+ }
+
+ @Test
+ public void testNonMatchingShouldsAndMusts() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getEmptyFilter(), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard(">0 shoulds with no matches should return no docs", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard(">0 shoulds with no matches should return no docs", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard(">0 shoulds with no matches should return no docs", 0, booleanFilter);
+ }
+
+ @Test
+ public void testToStringOfBooleanFilterContainingTermsFilter() {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getTermsFilter("isFragile", "Y"), BooleanClause.Occur.MUST);
+
+ assertThat("BooleanFilter(+inStock:N +isFragile:Y)", equalTo(booleanFilter.toString()));
+ }
+
+ @Test
+ public void testToStringOfWrappedBooleanFilters() {
+ XBooleanFilter orFilter = new XBooleanFilter();
+
+ XBooleanFilter stockFilter = new XBooleanFilter();
+ stockFilter.add(new FilterClause(getTermsFilter("inStock", "Y"), BooleanClause.Occur.MUST));
+ stockFilter.add(new FilterClause(getTermsFilter("barCode", "12345678"), BooleanClause.Occur.MUST));
+
+ orFilter.add(new FilterClause(stockFilter, BooleanClause.Occur.SHOULD));
+
+ XBooleanFilter productPropertyFilter = new XBooleanFilter();
+ productPropertyFilter.add(new FilterClause(getTermsFilter("isHeavy", "N"), BooleanClause.Occur.MUST));
+ productPropertyFilter.add(new FilterClause(getTermsFilter("isDamaged", "Y"), BooleanClause.Occur.MUST));
+
+ orFilter.add(new FilterClause(productPropertyFilter, BooleanClause.Occur.SHOULD));
+
+ XBooleanFilter composedFilter = new XBooleanFilter();
+ composedFilter.add(new FilterClause(orFilter, BooleanClause.Occur.MUST));
+
+ assertThat(
+ "BooleanFilter(+BooleanFilter(BooleanFilter(+inStock:Y +barCode:12345678) BooleanFilter(+isHeavy:N +isDamaged:Y)))",
+ equalTo(composedFilter.toString())
+ );
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterTests.java b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterTests.java
new file mode 100644
index 0000000..32c08af
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterTests.java
@@ -0,0 +1,567 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.FilterClause;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.apache.lucene.search.BooleanClause.Occur.*;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+/**
+ */
+public class XBooleanFilterTests extends ElasticsearchLuceneTestCase {
+
+ private Directory directory;
+ private AtomicReader reader;
+ private static final char[] distinctValues = new char[] {'a', 'b', 'c', 'd', 'v','z','y'};
+
+ @Before
+ public void setup() throws Exception {
+ super.setUp();
+ char[][] documentMatrix = new char[][] {
+ {'a', 'b', 'c', 'd', 'v'},
+ {'a', 'b', 'c', 'd', 'z'},
+ {'a', 'a', 'a', 'a', 'x'}
+ };
+
+ List<Document> documents = new ArrayList<Document>(documentMatrix.length);
+ for (char[] fields : documentMatrix) {
+ Document document = new Document();
+ for (int i = 0; i < fields.length; i++) {
+ document.add(new StringField(Integer.toString(i), String.valueOf(fields[i]), Field.Store.NO));
+ }
+ documents.add(document);
+ }
+ directory = newDirectory();
+ IndexWriter w = new IndexWriter(directory, new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer()));
+ w.addDocuments(documents);
+ w.close();
+ reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(directory));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ reader.close();
+ directory.close();
+ super.tearDown();
+
+ }
+
+ @Test
+ public void testWithTwoClausesOfEachOccur_allFixedBitsetFilters() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, false),
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, false),
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, false)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, false),
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, false),
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, false)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, false),
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, false),
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, false)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testWithTwoClausesOfEachOccur_allBitsBasedFilters() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, true),
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, true),
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, true)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, true),
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, true),
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, true)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, true),
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, true),
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, true)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testWithTwoClausesOfEachOccur_allFilterTypes() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, false),
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, false),
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, false)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, false),
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, false),
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, false)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, false),
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, false),
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, false)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+
+ booleanFilters.clear();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, true),
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, true),
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, true)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, true),
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, true),
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, true)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, true),
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, true),
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, true)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testWithTwoClausesOfEachOccur_singleClauseOptimisation() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'b', MUST, true)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+
+ booleanFilters.clear();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'c', MUST_NOT, true)
+ ));
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(3));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ booleanFilters.clear();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, true)
+ ));
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testOnlyShouldClauses() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ // 2 slow filters
+ // This case caused: https://github.com/elasticsearch/elasticsearch/issues/2826
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', SHOULD, true),
+ newFilterClause(1, 'b', SHOULD, true)
+ ));
+ // 2 fast filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', SHOULD, false),
+ newFilterClause(1, 'b', SHOULD, false)
+ ));
+ // 1 fast filters, 1 slow filter
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', SHOULD, true),
+ newFilterClause(1, 'b', SHOULD, false)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(3));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testOnlyMustClauses() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ // Slow filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(3, 'd', MUST, true),
+ newFilterClause(3, 'd', MUST, true)
+ ));
+ // 2 fast filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(3, 'd', MUST, false),
+ newFilterClause(3, 'd', MUST, false)
+ ));
+ // 1 fast filters, 1 slow filter
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(3, 'd', MUST, true),
+ newFilterClause(3, 'd', MUST, false)
+ ));
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testOnlyMustNotClauses() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ // Slow filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', MUST_NOT, true),
+ newFilterClause(1, 'a', MUST_NOT, true)
+ ));
+ // 2 fast filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', MUST_NOT, false),
+ newFilterClause(1, 'a', MUST_NOT, false)
+ ));
+ // 1 fast filters, 1 slow filter
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', MUST_NOT, true),
+ newFilterClause(1, 'a', MUST_NOT, false)
+ ));
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testNonMatchingSlowShouldWithMatchingMust() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false),
+ newFilterClause(0, 'b', SHOULD, true)
+ );
+
+ DocIdSet docIdSet = booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(docIdSet, equalTo(null));
+ }
+
+ @Test
+ public void testSlowShouldClause_atLeastOneShouldMustMatch() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false),
+ newFilterClause(1, 'a', SHOULD, true)
+ );
+
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(1));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(false));
+ assertThat(result.get(2), equalTo(true));
+
+ booleanFilter = createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false),
+ newFilterClause(1, 'a', SHOULD, true),
+ newFilterClause(4, 'z', SHOULD, true)
+ );
+
+ result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ @Test
+ // See issue: https://github.com/elasticsearch/elasticsearch/issues/4130
+ public void testOneFastMustNotOneFastShouldAndOneSlowShould() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(4, 'v', MUST_NOT, false),
+ newFilterClause(4, 'z', SHOULD, false),
+ newFilterClause(4, 'x', SHOULD, true)
+ );
+
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ @Test
+ public void testOneFastShouldClauseAndOneSlowShouldClause() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(4, 'z', SHOULD, false),
+ newFilterClause(4, 'x', SHOULD, true)
+ );
+
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ @Test
+ public void testOneMustClauseOneFastShouldClauseAndOneSlowShouldClause() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false),
+ newFilterClause(4, 'z', SHOULD, false),
+ newFilterClause(4, 'x', SHOULD, true)
+ );
+
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ private static FilterClause newFilterClause(int field, char character, BooleanClause.Occur occur, boolean slowerBitsBackedFilter) {
+ Filter filter;
+ if (slowerBitsBackedFilter) {
+ filter = new PrettyPrintFieldCacheTermsFilter(String.valueOf(field), String.valueOf(character));
+ } else {
+ Term term = new Term(String.valueOf(field), String.valueOf(character));
+ filter = new TermFilter(term);
+ }
+ return new FilterClause(filter, occur);
+ }
+
+ private static XBooleanFilter createBooleanFilter(FilterClause... clauses) {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ for (FilterClause clause : clauses) {
+ booleanFilter.add(clause);
+ }
+ return booleanFilter;
+ }
+
+ @Test
+ public void testRandom() throws IOException {
+ int iterations = atLeast(400); // don't worry that is fast!
+ for (int iter = 0; iter < iterations; iter++) {
+ int numClauses = 1 + random().nextInt(10);
+ FilterClause[] clauses = new FilterClause[numClauses];
+ BooleanQuery topLevel = new BooleanQuery();
+ BooleanQuery orQuery = new BooleanQuery();
+ boolean hasMust = false;
+ boolean hasShould = false;
+ boolean hasMustNot = false;
+ for(int i = 0; i < numClauses; i++) {
+ int field = random().nextInt(5);
+ char value = distinctValues[random().nextInt(distinctValues.length)];
+ switch(random().nextInt(10)) {
+ case 9:
+ case 8:
+ case 7:
+ case 6:
+ case 5:
+ hasMust = true;
+ if (rarely()) {
+ clauses[i] = new FilterClause(new EmptyFilter(), MUST);
+ topLevel.add(new BooleanClause(new MatchNoDocsQuery(), MUST));
+ } else {
+ clauses[i] = newFilterClause(field, value, MUST, random().nextBoolean());
+ topLevel.add(new BooleanClause(new TermQuery(new Term(String.valueOf(field), String.valueOf(value))), MUST));
+ }
+ break;
+ case 4:
+ case 3:
+ case 2:
+ case 1:
+ hasShould = true;
+ if (rarely()) {
+ clauses[i] = new FilterClause(new EmptyFilter(), SHOULD);
+ orQuery.add(new BooleanClause(new MatchNoDocsQuery(), SHOULD));
+ } else {
+ clauses[i] = newFilterClause(field, value, SHOULD, random().nextBoolean());
+ orQuery.add(new BooleanClause(new TermQuery(new Term(String.valueOf(field), String.valueOf(value))), SHOULD));
+ }
+ break;
+ case 0:
+ hasMustNot = true;
+ if (rarely()) {
+ clauses[i] = new FilterClause(new EmptyFilter(), MUST_NOT);
+ topLevel.add(new BooleanClause(new MatchNoDocsQuery(), MUST_NOT));
+ } else {
+ clauses[i] = newFilterClause(field, value, MUST_NOT, random().nextBoolean());
+ topLevel.add(new BooleanClause(new TermQuery(new Term(String.valueOf(field), String.valueOf(value))), MUST_NOT));
+ }
+ break;
+
+ }
+ }
+ if (orQuery.getClauses().length > 0) {
+ topLevel.add(new BooleanClause(orQuery, MUST));
+ }
+ if (hasMustNot && !hasMust && !hasShould) { // pure negative
+ topLevel.add(new BooleanClause(new MatchAllDocsQuery(), MUST));
+ }
+ XBooleanFilter booleanFilter = createBooleanFilter(clauses);
+
+ FixedBitSet leftResult = new FixedBitSet(reader.maxDoc());
+ FixedBitSet rightResult = new FixedBitSet(reader.maxDoc());
+ DocIdSet left = booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ DocIdSet right = new QueryWrapperFilter(topLevel).getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ if (left == null || right == null) {
+ if (left == null && right != null) {
+ assertThat(errorMsg(clauses, topLevel), (right.iterator() == null ? DocIdSetIterator.NO_MORE_DOCS : right.iterator().nextDoc()), equalTo(DocIdSetIterator.NO_MORE_DOCS));
+ }
+ if (left != null && right == null) {
+ assertThat(errorMsg(clauses, topLevel), (left.iterator() == null ? DocIdSetIterator.NO_MORE_DOCS : left.iterator().nextDoc()), equalTo(DocIdSetIterator.NO_MORE_DOCS));
+ }
+ } else {
+ DocIdSetIterator leftIter = left.iterator();
+ DocIdSetIterator rightIter = right.iterator();
+ if (leftIter != null) {
+ leftResult.or(leftIter);
+ }
+
+ if (rightIter != null) {
+ rightResult.or(rightIter);
+ }
+
+ assertThat(leftResult.cardinality(), equalTo(rightResult.cardinality()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(errorMsg(clauses, topLevel) + " -- failed at index " + i, leftResult.get(i), equalTo(rightResult.get(i)));
+ }
+ }
+ }
+ }
+
+ private String errorMsg(FilterClause[] clauses, BooleanQuery query) {
+ return query.toString() + " vs. " + Arrays.toString(clauses);
+ }
+
+
+ public static final class PrettyPrintFieldCacheTermsFilter extends FieldCacheTermsFilter {
+
+ private final String value;
+ private final String field;
+
+ public PrettyPrintFieldCacheTermsFilter(String field, String value) {
+ super(field, value);
+ this.field = field;
+ this.value = value;
+ }
+
+ @Override
+ public String toString() {
+ return "SLOW(" + field + ":" + value + ")";
+ }
+ }
+
+ public final class EmptyFilter extends Filter {
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ return random().nextBoolean() ? new Empty() : null;
+ }
+
+ private class Empty extends DocIdSet {
+
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ return null;
+ }
+ }
+ }
+
+}
+
diff --git a/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java b/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java
new file mode 100644
index 0000000..315b93e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.store;
+
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ *
+ */
+public class InputStreamIndexInputTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSingleReadSingleByteLimit() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ for (int i = 0; i < 3; i++) {
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ @Test
+ public void testReadMultiSingleByteLimit1() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ byte[] read = new byte[2];
+
+ for (int i = 0; i < 3; i++) {
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(read), equalTo(1));
+ assertThat(read[0], equalTo((byte) 1));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(read), equalTo(1));
+ assertThat(read[0], equalTo((byte) 2));
+ }
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(read), equalTo(-1));
+ }
+
+ @Test
+ public void testSingleReadTwoBytesLimit() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(-1));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(-1));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(-1));
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ @Test
+ public void testReadMultiTwoBytesLimit1() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ byte[] read = new byte[2];
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 1));
+ assertThat(read[1], equalTo((byte) 1));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 1));
+ assertThat(read[1], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 2));
+ assertThat(read[1], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(read), equalTo(-1));
+ }
+
+ @Test
+ public void testReadMultiFourBytesLimit() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ byte[] read = new byte[4];
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 4);
+ assertThat(is.actualSizeToRead(), equalTo(4l));
+ assertThat(is.read(read), equalTo(4));
+ assertThat(read[0], equalTo((byte) 1));
+ assertThat(read[1], equalTo((byte) 1));
+ assertThat(read[2], equalTo((byte) 1));
+ assertThat(read[3], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 4);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 2));
+ assertThat(read[1], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ is = new InputStreamIndexInput(input, 4);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(read), equalTo(-1));
+ }
+
+ @Test
+ public void testMarkRest() throws Exception {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 4);
+ assertThat(is.markSupported(), equalTo(true));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(1));
+ is.mark(0);
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(2));
+ is.reset();
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java
new file mode 100644
index 0000000..8556058
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene.uid;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.*;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
+import org.elasticsearch.index.merge.Merges;
+import org.elasticsearch.index.merge.policy.IndexUpgraderMergePolicy;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class VersionsTests extends ElasticsearchLuceneTestCase {
+
+ public static DirectoryReader reopen(DirectoryReader reader) throws IOException {
+ return reopen(reader, true);
+ }
+
+ public static DirectoryReader reopen(DirectoryReader reader, boolean newReaderExpected) throws IOException {
+ DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+ if (newReader != null) {
+ reader.close();
+ } else {
+ assertFalse(newReaderExpected);
+ }
+ return newReader;
+ }
+ @Test
+ public void testVersions() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ DirectoryReader directoryReader = DirectoryReader.open(writer, true);
+ MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+
+ Document doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
+ writer.addDocument(doc);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_SET));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(Versions.NOT_SET));
+
+ doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
+ doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 1));
+ writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(1l));
+
+ doc = new Document();
+ Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE);
+ Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 2);
+ doc.add(uid);
+ doc.add(version);
+ writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(2l));
+
+ // test reuse of uid field
+ doc = new Document();
+ version.setLongValue(3);
+ doc.add(uid);
+ doc.add(version);
+ writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
+
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(3l));
+
+ writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
+ directoryReader.close();
+ writer.close();
+ dir.close();
+ }
+
+ @Test
+ public void testNestedDocuments() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ List<Document> docs = new ArrayList<Document>();
+ for (int i = 0; i < 4; ++i) {
+ // Nested
+ Document doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
+ docs.add(doc);
+ }
+ // Root
+ Document doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
+ NumericDocValuesField version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
+ doc.add(version);
+ docs.add(doc);
+
+ writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
+ DirectoryReader directoryReader = DirectoryReader.open(writer, true);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5l));
+
+ version.setLongValue(6L);
+ writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
+ version.setLongValue(7L);
+ writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(7l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(7l));
+
+ writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
+ directoryReader.close();
+ writer.close();
+ dir.close();
+ }
+
+ @Test
+ public void testBackwardCompatibility() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ DirectoryReader directoryReader = DirectoryReader.open(writer, true);
+ MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+
+ Document doc = new Document();
+ UidField uidAndVersion = new UidField("1", 1L);
+ doc.add(uidAndVersion);
+ writer.addDocument(doc);
+
+ uidAndVersion.uid = "2";
+ uidAndVersion.version = 2;
+ writer.addDocument(doc);
+ writer.commit();
+
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "2")), equalTo(2l));
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "3")), equalTo(Versions.NOT_FOUND));
+ directoryReader.close();
+ writer.close();
+ dir.close();
+ }
+
+ // This is how versions used to be encoded
+ private static class UidField extends Field {
+ private static final FieldType FIELD_TYPE = new FieldType();
+ static {
+ FIELD_TYPE.setTokenized(true);
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+ FIELD_TYPE.setStored(true);
+ FIELD_TYPE.freeze();
+ }
+ String uid;
+ long version;
+ UidField(String uid, long version) {
+ super(UidFieldMapper.NAME, uid, FIELD_TYPE);
+ this.uid = uid;
+ this.version = version;
+ }
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ return new TokenStream() {
+ boolean finished = true;
+ final CharTermAttribute term = addAttribute(CharTermAttribute.class);
+ final PayloadAttribute payload = addAttribute(PayloadAttribute.class);
+ @Override
+ public boolean incrementToken() throws IOException {
+ if (finished) {
+ return false;
+ }
+ term.setEmpty().append(uid);
+ payload.setPayload(new BytesRef(Numbers.longToBytes(version)));
+ finished = true;
+ return true;
+ }
+ @Override
+ public void reset() throws IOException {
+ finished = false;
+ }
+ };
+ }
+ }
+
+ @Test
+ public void testMergingOldIndices() throws Exception {
+ final IndexWriterConfig iwConf = new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer());
+ iwConf.setMergePolicy(new IndexUpgraderMergePolicy(iwConf.getMergePolicy()));
+ final Directory dir = newDirectory();
+ final IndexWriter iw = new IndexWriter(dir, iwConf);
+
+ // 1st segment, no _version
+ Document document = new Document();
+ // Add a dummy field (enough to trigger #3237)
+ document.add(new StringField("a", "b", Store.NO));
+ StringField uid = new StringField(UidFieldMapper.NAME, "1", Store.YES);
+ document.add(uid);
+ iw.addDocument(document);
+ uid.setStringValue("2");
+ iw.addDocument(document);
+ iw.commit();
+
+ // 2nd segment, old layout
+ document = new Document();
+ UidField uidAndVersion = new UidField("3", 3L);
+ document.add(uidAndVersion);
+ iw.addDocument(document);
+ uidAndVersion.uid = "4";
+ uidAndVersion.version = 4L;
+ iw.addDocument(document);
+ iw.commit();
+
+ // 3rd segment new layout
+ document = new Document();
+ uid.setStringValue("5");
+ Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
+ document.add(uid);
+ document.add(version);
+ iw.addDocument(document);
+ uid.setStringValue("6");
+ version.setLongValue(6L);
+ iw.addDocument(document);
+ iw.commit();
+
+ final Map<String, Long> expectedVersions = ImmutableMap.<String, Long>builder()
+ .put("1", 0L).put("2", 0L).put("3", 0L).put("4", 4L).put("5", 5L).put("6", 6L).build();
+
+ // Force merge and check versions
+ Merges.forceMerge(iw, 1);
+ final AtomicReader ir = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(iw.getDirectory()));
+ final NumericDocValues versions = ir.getNumericDocValues(VersionFieldMapper.NAME);
+ assertThat(versions, notNullValue());
+ for (int i = 0; i < ir.maxDoc(); ++i) {
+ final String uidValue = ir.document(i).get(UidFieldMapper.NAME);
+ final long expectedVersion = expectedVersions.get(uidValue);
+ assertThat(versions.get(i), equalTo(expectedVersion));
+ }
+
+ iw.close();
+ assertThat(IndexWriter.isLocked(iw.getDirectory()), is(false));
+ ir.close();
+ dir.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/path/PathTrieTests.java b/src/test/java/org/elasticsearch/common/path/PathTrieTests.java
new file mode 100644
index 0000000..6b60a45
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/path/PathTrieTests.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.path;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PathTrieTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPath() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("/a/b/c", "walla");
+ trie.insert("a/d/g", "kuku");
+ trie.insert("x/b/c", "lala");
+ trie.insert("a/x/*", "one");
+ trie.insert("a/b/*", "two");
+ trie.insert("*/*/x", "three");
+ trie.insert("{index}/insert/{docId}", "bingo");
+
+ assertThat(trie.retrieve("a/b/c"), equalTo("walla"));
+ assertThat(trie.retrieve("a/d/g"), equalTo("kuku"));
+ assertThat(trie.retrieve("x/b/c"), equalTo("lala"));
+ assertThat(trie.retrieve("a/x/b"), equalTo("one"));
+ assertThat(trie.retrieve("a/b/d"), equalTo("two"));
+
+ assertThat(trie.retrieve("a/b"), nullValue());
+ assertThat(trie.retrieve("a/b/c/d"), nullValue());
+ assertThat(trie.retrieve("g/t/x"), equalTo("three"));
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("index1/insert/12", params), equalTo("bingo"));
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("index"), equalTo("index1"));
+ assertThat(params.get("docId"), equalTo("12"));
+ }
+
+ @Test
+ public void testEmptyPath() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("/", "walla");
+ assertThat(trie.retrieve(""), equalTo("walla"));
+ }
+
+ @Test
+ public void testDifferentNamesOnDifferentPath() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("/a/{type}", "test1");
+ trie.insert("/b/{name}", "test2");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/a/test", params), equalTo("test1"));
+ assertThat(params.get("type"), equalTo("test"));
+
+ params.clear();
+ assertThat(trie.retrieve("/b/testX", params), equalTo("test2"));
+ assertThat(params.get("name"), equalTo("testX"));
+ }
+
+ @Test
+ public void testSameNameOnDifferentPath() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("/a/c/{name}", "test1");
+ trie.insert("/b/{name}", "test2");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/a/c/test", params), equalTo("test1"));
+ assertThat(params.get("name"), equalTo("test"));
+
+ params.clear();
+ assertThat(trie.retrieve("/b/testX", params), equalTo("test2"));
+ assertThat(params.get("name"), equalTo("testX"));
+ }
+
+ @Test
+ public void testPreferNonWildcardExecution() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("{test}", "test1");
+ trie.insert("b", "test2");
+ trie.insert("{test}/a", "test3");
+ trie.insert("b/a", "test4");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/b", params), equalTo("test2"));
+ assertThat(trie.retrieve("/b/a", params), equalTo("test4"));
+ }
+
+ @Test
+ public void testSamePathConcreteResolution() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("{x}/{y}/{z}", "test1");
+ trie.insert("{x}/_y/{k}", "test2");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/a/b/c", params), equalTo("test1"));
+ assertThat(params.get("x"), equalTo("a"));
+ assertThat(params.get("y"), equalTo("b"));
+ assertThat(params.get("z"), equalTo("c"));
+ params.clear();
+ assertThat(trie.retrieve("/a/_y/c", params), equalTo("test2"));
+ assertThat(params.get("x"), equalTo("a"));
+ assertThat(params.get("k"), equalTo("c"));
+ }
+
+ @Test
+ public void testNamedWildcardAndLookupWithWildcard() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("x/{test}", "test1");
+ trie.insert("{test}/a", "test2");
+ trie.insert("/{test}", "test3");
+ trie.insert("/{test}/_endpoint", "test4");
+ trie.insert("/*/{test}/_endpoint", "test5");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/x/*", params), equalTo("test1"));
+ assertThat(params.get("test"), equalTo("*"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("/b/a", params), equalTo("test2"));
+ assertThat(params.get("test"), equalTo("b"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("/*", params), equalTo("test3"));
+ assertThat(params.get("test"), equalTo("*"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("/*/_endpoint", params), equalTo("test4"));
+ assertThat(params.get("test"), equalTo("*"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("a/*/_endpoint", params), equalTo("test5"));
+ assertThat(params.get("test"), equalTo("*"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java
new file mode 100644
index 0000000..74c491b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+
+public abstract class AbstractRecyclerTests extends ElasticsearchTestCase {
+
+ protected static final Recycler.C<byte[]> RECYCLER_C = new Recycler.C<byte[]>() {
+
+ @Override
+ public byte[] newInstance(int sizing) {
+ return new byte[10];
+ }
+
+ @Override
+ public void clear(byte[] value) {
+ Arrays.fill(value, (byte) 0);
+ }
+
+ };
+
+ protected abstract Recycler<byte[]> newRecycler();
+
+ public void testReuse() {
+ Recycler<byte[]> r = newRecycler();
+ Recycler.V<byte[]> o = r.obtain();
+ assertFalse(o.isRecycled());
+ final byte[] b1 = o.v();
+ o.release();
+ o = r.obtain();
+ final byte[] b2 = o.v();
+ if (o.isRecycled()) {
+ assertSame(b1, b2);
+ } else {
+ assertNotSame(b1, b2);
+ }
+ o.release();
+ r.close();
+ }
+
+ public void testClear() {
+ Recycler<byte[]> r = newRecycler();
+ Recycler.V<byte[]> o = r.obtain();
+ getRandom().nextBytes(o.v());
+ o.release();
+ o = r.obtain();
+ for (int i = 0; i < o.v().length; ++i) {
+ assertEquals(0, o.v()[i]);
+ }
+ o.release();
+ r.close();
+ }
+
+ public void testDoubleRelease() {
+ final Recycler<byte[]> r = newRecycler();
+ final Recycler.V<byte[]> v1 = r.obtain();
+ v1.release();
+ try {
+ v1.release();
+ } catch (ElasticsearchIllegalStateException e) {
+ // impl has protection against double release: ok
+ return;
+ }
+ // otherwise ensure that the impl may not be returned twice
+ final Recycler.V<byte[]> v2 = r.obtain();
+ final Recycler.V<byte[]> v3 = r.obtain();
+ assertNotSame(v2.v(), v3.v());
+ r.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java
new file mode 100644
index 0000000..758041d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class ConcurrentRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.concurrent(Recyclers.dequeFactory(RECYCLER_C, randomIntBetween(5, 10)), randomIntBetween(1,5));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java
new file mode 100644
index 0000000..9ffdf7a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class LockedRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.locked(Recyclers.deque(RECYCLER_C, randomIntBetween(5, 10)));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java
new file mode 100644
index 0000000..a60c0ba
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class NoneRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.none(RECYCLER_C);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java
new file mode 100644
index 0000000..f693c30
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class QueueRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.concurrentDeque(RECYCLER_C, randomIntBetween(5, 10));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/SoftConcurrentRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/SoftConcurrentRecyclerTests.java
new file mode 100644
index 0000000..0320ff5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/SoftConcurrentRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class SoftConcurrentRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.concurrent(Recyclers.softFactory(Recyclers.dequeFactory(RECYCLER_C, randomIntBetween(5, 10))), randomIntBetween(1, 5));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/SoftThreadLocalRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/SoftThreadLocalRecyclerTests.java
new file mode 100644
index 0000000..2a5d253
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/SoftThreadLocalRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class SoftThreadLocalRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.threadLocal(Recyclers.softFactory(Recyclers.dequeFactory(RECYCLER_C, 10)));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/ThreadLocalRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/ThreadLocalRecyclerTests.java
new file mode 100644
index 0000000..5ab6892
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/ThreadLocalRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class ThreadLocalRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.threadLocal(Recyclers.dequeFactory(RECYCLER_C, 10));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/regex/RegexTests.java b/src/test/java/org/elasticsearch/common/regex/RegexTests.java
new file mode 100644
index 0000000..380bf90
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/regex/RegexTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.regex;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Random;
+import java.util.regex.Pattern;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class RegexTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testFlags() {
+ String[] supportedFlags = new String[]{"CASE_INSENSITIVE", "MULTILINE", "DOTALL", "UNICODE_CASE", "CANON_EQ", "UNIX_LINES",
+ "LITERAL", "COMMENTS", "UNICODE_CHAR_CLASS"};
+ int[] flags = new int[]{Pattern.CASE_INSENSITIVE, Pattern.MULTILINE, Pattern.DOTALL, Pattern.UNICODE_CASE, Pattern.CANON_EQ,
+ Pattern.UNIX_LINES, Pattern.LITERAL, Pattern.COMMENTS, Regex.UNICODE_CHARACTER_CLASS};
+ Random random = getRandom();
+ int num = 10 + random.nextInt(100);
+ for (int i = 0; i < num; i++) {
+ int numFlags = random.nextInt(flags.length + 1);
+ int current = 0;
+ StringBuilder builder = new StringBuilder();
+ for (int j = 0; j < numFlags; j++) {
+ int index = random.nextInt(flags.length);
+ current |= flags[index];
+ builder.append(supportedFlags[index]);
+ if (j < numFlags - 1) {
+ builder.append("|");
+ }
+ }
+ String flagsToString = Regex.flagsToString(current);
+ assertThat(Regex.flagsFromString(builder.toString()), equalTo(current));
+ assertThat(Regex.flagsFromString(builder.toString()), equalTo(Regex.flagsFromString(flagsToString)));
+ Pattern.compile("\\w\\d{1,2}", current); // accepts the flags?
+ }
+ }
+
+ @Test(timeout = 1000)
+ public void testDoubleWildcardMatch() {
+ assertTrue(Regex.simpleMatch("ddd", "ddd"));
+ assertTrue(Regex.simpleMatch("d*d*d", "dadd"));
+ assertTrue(Regex.simpleMatch("**ddd", "dddd"));
+ assertFalse(Regex.simpleMatch("**ddd", "fff"));
+ assertTrue(Regex.simpleMatch("fff*ddd", "fffabcddd"));
+ assertTrue(Regex.simpleMatch("fff**ddd", "fffabcddd"));
+ assertFalse(Regex.simpleMatch("fff**ddd", "fffabcdd"));
+ assertTrue(Regex.simpleMatch("fff*******ddd", "fffabcddd"));
+ assertFalse(Regex.simpleMatch("fff******ddd", "fffabcdd"));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java b/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java
new file mode 100644
index 0000000..cd77bfc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.rounding;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+
+
+public class RoundingTests extends ElasticsearchTestCase {
+
+ public void testInterval() {
+ final long interval = randomIntBetween(1, 100);
+ Rounding.Interval rounding = new Rounding.Interval(interval);
+ for (int i = 0; i < 1000; ++i) {
+ long l = Math.max(randomLong(), Long.MIN_VALUE + interval);
+ final long r = rounding.round(l);
+ String message = "round(" + l + ", interval=" + interval + ") = " + r;
+ assertEquals(message, 0, r % interval);
+ assertThat(message, r, lessThanOrEqualTo(l));
+ assertThat(message, r + interval, greaterThan(l));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java
new file mode 100644
index 0000000..f57f4e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.rounding;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class TimeZoneRoundingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testUTCMonthRounding() {
+ TimeZoneRounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-01T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-01T00:00:00.000Z")), equalTo(utc("2009-03-01T00:00:00.000Z")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build();
+ assertThat(tzRounding.round(utc("2012-01-10T01:01:01")), equalTo(utc("2012-01-09T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2012-01-09T00:00:00.000Z")), equalTo(utc("2012-01-16T00:00:00.000Z")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).postOffset(-TimeValue.timeValueHours(24).millis()).build();
+ assertThat(tzRounding.round(utc("2012-01-10T01:01:01")), equalTo(utc("2012-01-08T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2012-01-08T00:00:00.000Z")), equalTo(utc("2012-01-15T00:00:00.000Z")));
+ }
+
+ @Test
+ public void testDayTimeZoneRounding() {
+ TimeZoneRounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).preZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(0), equalTo(0l - TimeValue.timeValueHours(24).millis()));
+ assertThat(tzRounding.nextRoundingValue(0l - TimeValue.timeValueHours(24).millis()), equalTo(0l));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).preZone(DateTimeZone.forOffsetHours(-2)).postZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(0), equalTo(0l - TimeValue.timeValueHours(26).millis()));
+ assertThat(tzRounding.nextRoundingValue(0l - TimeValue.timeValueHours(26).millis()), equalTo(-TimeValue.timeValueHours(2).millis()));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).preZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-02T00:00:00")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-02T00:00:00")), equalTo(utc("2009-02-03T00:00:00")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).preZone(DateTimeZone.forOffsetHours(-2)).postZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(time("2009-02-02T00:00:00", DateTimeZone.forOffsetHours(+2))));
+ assertThat(tzRounding.nextRoundingValue(time("2009-02-02T00:00:00", DateTimeZone.forOffsetHours(+2))), equalTo(time("2009-02-03T00:00:00", DateTimeZone.forOffsetHours(+2))));
+ }
+
+ @Test
+ public void testTimeTimeZoneRounding() {
+ TimeZoneRounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).preZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(0), equalTo(0l));
+ assertThat(tzRounding.nextRoundingValue(0l), equalTo(TimeValue.timeValueHours(1l).getMillis()));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).preZone(DateTimeZone.forOffsetHours(-2)).postZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(0), equalTo(0l - TimeValue.timeValueHours(2).millis()));
+ assertThat(tzRounding.nextRoundingValue(0l - TimeValue.timeValueHours(2).millis()), equalTo(0l - TimeValue.timeValueHours(1).millis()));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).preZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-03T01:00:00")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-03T01:00:00")), equalTo(utc("2009-02-03T02:00:00")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).preZone(DateTimeZone.forOffsetHours(-2)).postZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(time("2009-02-03T01:00:00", DateTimeZone.forOffsetHours(+2))));
+ assertThat(tzRounding.nextRoundingValue(time("2009-02-03T01:00:00", DateTimeZone.forOffsetHours(+2))), equalTo(time("2009-02-03T02:00:00", DateTimeZone.forOffsetHours(+2))));
+ }
+
+ private long utc(String time) {
+ return time(time, DateTimeZone.UTC);
+ }
+
+ private long time(String time, DateTimeZone zone) {
+ return ISODateTimeFormat.dateOptionalTimeParser().withZone(zone).parseMillis(time);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/ImmutableSettingsTests.java b/src/test/java/org/elasticsearch/common/settings/ImmutableSettingsTests.java
new file mode 100644
index 0000000..845f7e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/ImmutableSettingsTests.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.common.settings.bar.BarTestClass;
+import org.elasticsearch.common.settings.foo.FooTestClass;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class ImmutableSettingsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testGetAsClass() {
+ Settings settings = settingsBuilder()
+ .put("test.class", "bar")
+ .put("test.class.package", "org.elasticsearch.common.settings.bar")
+ .build();
+
+ // Assert that defaultClazz is loaded if setting is not specified
+ assertThat(settings.getAsClass("no.settings", FooTestClass.class, "org.elasticsearch.common.settings.", "TestClass").getName(),
+ equalTo(FooTestClass.class.getName()));
+
+ // Assert that correct class is loaded if setting contain name without package
+ assertThat(settings.getAsClass("test.class", FooTestClass.class, "org.elasticsearch.common.settings.", "TestClass").getName(),
+ equalTo(BarTestClass.class.getName()));
+
+ // Assert that class cannot be loaded if wrong packagePrefix is specified
+ try {
+ settings.getAsClass("test.class", FooTestClass.class, "com.example.elasticsearch.test.unit..common.settings.", "TestClass");
+ fail("Class with wrong package name shouldn't be loaded");
+ } catch (NoClassSettingsException ex) {
+ // Ignore
+ }
+
+ // Assert that package name in settings is getting correctly applied
+ assertThat(settings.getAsClass("test.class.package", FooTestClass.class, "com.example.elasticsearch.test.unit.common.settings.", "TestClass").getName(),
+ equalTo(BarTestClass.class.getName()));
+
+ }
+
+ @Test
+ public void testLoadFromDelimitedString() {
+ Settings settings = settingsBuilder()
+ .loadFromDelimitedString("key1=value1;key2=value2", ';')
+ .build();
+ assertThat(settings.get("key1"), equalTo("value1"));
+ assertThat(settings.get("key2"), equalTo("value2"));
+ assertThat(settings.getAsMap().size(), equalTo(2));
+ assertThat(settings.toDelimitedString(';'), equalTo("key1=value1;key2=value2;"));
+
+ settings = settingsBuilder()
+ .loadFromDelimitedString("key1=value1;key2=value2;", ';')
+ .build();
+ assertThat(settings.get("key1"), equalTo("value1"));
+ assertThat(settings.get("key2"), equalTo("value2"));
+ assertThat(settings.getAsMap().size(), equalTo(2));
+ assertThat(settings.toDelimitedString(';'), equalTo("key1=value1;key2=value2;"));
+ }
+
+ @Test(expected = NoClassSettingsException.class)
+ public void testThatAllClassNotFoundExceptionsAreCaught() {
+ // this should be nGram in order to really work, but for sure not not throw a NoClassDefFoundError
+ Settings settings = settingsBuilder().put("type", "ngram").build();
+ settings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "TokenFilterFactory");
+ }
+
+ @Test
+ public void testReplacePropertiesPlaceholderSystemProperty() {
+ System.setProperty("sysProp1", "sysVal1");
+ try {
+ Settings settings = settingsBuilder()
+ .put("setting1", "${sysProp1}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), equalTo("sysVal1"));
+ } finally {
+ System.clearProperty("sysProp1");
+ }
+
+ Settings settings = settingsBuilder()
+ .put("setting1", "${sysProp1:defaultVal1}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), equalTo("defaultVal1"));
+
+ settings = settingsBuilder()
+ .put("setting1", "${sysProp1:}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), is(nullValue()));
+ }
+
+ @Test
+ public void testReplacePropertiesPlaceholderIgnoreEnvUnset() {
+ Settings settings = settingsBuilder()
+ .put("setting1", "${env.UNSET_ENV_VAR}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), is(nullValue()));
+ }
+
+ @Test
+ public void testUnFlattenedSettings() {
+ Settings settings = settingsBuilder()
+ .put("foo", "abc")
+ .put("bar", "def")
+ .put("baz.foo", "ghi")
+ .put("baz.bar", "jkl")
+ .putArray("baz.arr", "a", "b", "c")
+ .build();
+ Map<String, Object> map = settings.getAsStructuredMap();
+ assertThat(map.keySet(), Matchers.<String>hasSize(3));
+ assertThat(map, allOf(
+ Matchers.<String, Object>hasEntry("foo", "abc"),
+ Matchers.<String, Object>hasEntry("bar", "def")));
+
+ @SuppressWarnings("unchecked") Map<String, Object> bazMap = (Map<String, Object>) map.get("baz");
+ assertThat(bazMap.keySet(), Matchers.<String>hasSize(3));
+ assertThat(bazMap, allOf(
+ Matchers.<String, Object>hasEntry("foo", "ghi"),
+ Matchers.<String, Object>hasEntry("bar", "jkl")));
+ @SuppressWarnings("unchecked") List<String> bazArr = (List<String>) bazMap.get("arr");
+ assertThat(bazArr, contains("a", "b", "c"));
+
+ }
+
+ @Test
+ public void testFallbackToFlattenedSettings() {
+ Settings settings = settingsBuilder()
+ .put("foo", "abc")
+ .put("foo.bar", "def")
+ .put("foo.baz", "ghi").build();
+ Map<String, Object> map = settings.getAsStructuredMap();
+ assertThat(map.keySet(), Matchers.<String>hasSize(3));
+ assertThat(map, allOf(
+ Matchers.<String, Object>hasEntry("foo", "abc"),
+ Matchers.<String, Object>hasEntry("foo.bar", "def"),
+ Matchers.<String, Object>hasEntry("foo.baz", "ghi")));
+
+ settings = settingsBuilder()
+ .put("foo.bar", "def")
+ .put("foo", "abc")
+ .put("foo.baz", "ghi")
+ .build();
+ map = settings.getAsStructuredMap();
+ assertThat(map.keySet(), Matchers.<String>hasSize(3));
+ assertThat(map, allOf(
+ Matchers.<String, Object>hasEntry("foo", "abc"),
+ Matchers.<String, Object>hasEntry("foo.bar", "def"),
+ Matchers.<String, Object>hasEntry("foo.baz", "ghi")));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java b/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java
new file mode 100644
index 0000000..d4d5d14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.bar;
+
+//used in ImmutableSettingsTest
+public class BarTestClass {
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java b/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java
new file mode 100644
index 0000000..36f1527
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.foo;
+
+// used in ImmutableSettingsTest
+public class FooTestClass {
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java b/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java
new file mode 100644
index 0000000..c237b96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class JsonSettingsLoaderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleJsonSettings() throws Exception {
+ Settings settings = settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/common/settings/loader/test-settings.json")
+ .build();
+
+ assertThat(settings.get("test1.value1"), equalTo("value1"));
+ assertThat(settings.get("test1.test2.value2"), equalTo("value2"));
+ assertThat(settings.getAsInt("test1.test2.value3", -1), equalTo(2));
+
+ // check array
+ assertThat(settings.get("test1.test3.0"), equalTo("test3-1"));
+ assertThat(settings.get("test1.test3.1"), equalTo("test3-2"));
+ assertThat(settings.getAsArray("test1.test3").length, equalTo(2));
+ assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1"));
+ assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java b/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java
new file mode 100644
index 0000000..d541d15
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class YamlSettingsLoaderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleYamlSettings() throws Exception {
+ Settings settings = settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/common/settings/loader/test-settings.yml")
+ .build();
+
+ assertThat(settings.get("test1.value1"), equalTo("value1"));
+ assertThat(settings.get("test1.test2.value2"), equalTo("value2"));
+ assertThat(settings.getAsInt("test1.test2.value3", -1), equalTo(2));
+
+ // check array
+ assertThat(settings.get("test1.test3.0"), equalTo("test3-1"));
+ assertThat(settings.get("test1.test3.1"), equalTo("test3-2"));
+ assertThat(settings.getAsArray("test1.test3").length, equalTo(2));
+ assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1"));
+ assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json b/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json
new file mode 100644
index 0000000..7190648
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json
@@ -0,0 +1,10 @@
+{
+ test1:{
+ value1:"value1",
+ test2:{
+ value2:"value2",
+ value3:2
+ },
+ test3:["test3-1", "test3-2"]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml b/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml
new file mode 100644
index 0000000..b533ae0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml
@@ -0,0 +1,8 @@
+test1:
+ value1: value1
+ test2:
+ value2: value2
+ value3: 2
+ test3:
+ - test3-1
+ - test3-2
diff --git a/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java b/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java
new file mode 100644
index 0000000..8b39e4e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.unit.ByteSizeUnit.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ByteSizeUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testBytes() {
+ assertThat(BYTES.toBytes(1), equalTo(1l));
+ assertThat(BYTES.toKB(1024), equalTo(1l));
+ assertThat(BYTES.toMB(1024 * 1024), equalTo(1l));
+ assertThat(BYTES.toGB(1024 * 1024 * 1024), equalTo(1l));
+ }
+
+ @Test
+ public void testKB() {
+ assertThat(KB.toBytes(1), equalTo(1024l));
+ assertThat(KB.toKB(1), equalTo(1l));
+ assertThat(KB.toMB(1024), equalTo(1l));
+ assertThat(KB.toGB(1024 * 1024), equalTo(1l));
+ }
+
+ @Test
+ public void testMB() {
+ assertThat(MB.toBytes(1), equalTo(1024l * 1024));
+ assertThat(MB.toKB(1), equalTo(1024l));
+ assertThat(MB.toMB(1), equalTo(1l));
+ assertThat(MB.toGB(1024), equalTo(1l));
+ }
+
+ @Test
+ public void testGB() {
+ assertThat(GB.toBytes(1), equalTo(1024l * 1024 * 1024));
+ assertThat(GB.toKB(1), equalTo(1024l * 1024));
+ assertThat(GB.toMB(1), equalTo(1024l));
+ assertThat(GB.toGB(1), equalTo(1l));
+ }
+
+ @Test
+ public void testTB() {
+ assertThat(TB.toBytes(1), equalTo(1024l * 1024 * 1024 * 1024));
+ assertThat(TB.toKB(1), equalTo(1024l * 1024 * 1024));
+ assertThat(TB.toMB(1), equalTo(1024l * 1024));
+ assertThat(TB.toGB(1), equalTo(1024l));
+ assertThat(TB.toTB(1), equalTo(1l));
+ }
+
+ @Test
+ public void testPB() {
+ assertThat(PB.toBytes(1), equalTo(1024l * 1024 * 1024 * 1024 * 1024));
+ assertThat(PB.toKB(1), equalTo(1024l * 1024 * 1024 * 1024));
+ assertThat(PB.toMB(1), equalTo(1024l * 1024 * 1024));
+ assertThat(PB.toGB(1), equalTo(1024l * 1024));
+ assertThat(PB.toTB(1), equalTo(1024l));
+ assertThat(PB.toPB(1), equalTo(1l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java
new file mode 100644
index 0000000..0522f87
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class ByteSizeValueTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testActualPeta() {
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).bytes(), equalTo(4503599627370496l));
+ }
+
+ @Test
+ public void testActualTera() {
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.TB).bytes(), equalTo(4398046511104l));
+ }
+
+ @Test
+ public void testActual() {
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).bytes(), equalTo(4294967296l));
+ }
+
+ @Test
+ public void testSimple() {
+ assertThat(ByteSizeUnit.BYTES.toBytes(10), is(new ByteSizeValue(10, ByteSizeUnit.BYTES).bytes()));
+ assertThat(ByteSizeUnit.KB.toKB(10), is(new ByteSizeValue(10, ByteSizeUnit.KB).kb()));
+ assertThat(ByteSizeUnit.MB.toMB(10), is(new ByteSizeValue(10, ByteSizeUnit.MB).mb()));
+ assertThat(ByteSizeUnit.GB.toGB(10), is(new ByteSizeValue(10, ByteSizeUnit.GB).gb()));
+ assertThat(ByteSizeUnit.TB.toTB(10), is(new ByteSizeValue(10, ByteSizeUnit.TB).tb()));
+ assertThat(ByteSizeUnit.PB.toPB(10), is(new ByteSizeValue(10, ByteSizeUnit.PB).pb()));
+ }
+
+ @Test
+ public void testToString() {
+ assertThat("10b", is(new ByteSizeValue(10, ByteSizeUnit.BYTES).toString()));
+ assertThat("1.5kb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.BYTES).toString()));
+ assertThat("1.5mb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.KB).toString()));
+ assertThat("1.5gb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.MB).toString()));
+ assertThat("1.5tb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.GB).toString()));
+ assertThat("1.5pb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.TB).toString()));
+ assertThat("1536pb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.PB).toString()));
+ }
+
+ @Test
+ public void testParsing() {
+ assertThat(ByteSizeValue.parseBytesSizeValue("42pb").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42P").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42PB").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54tb").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54T").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54TB").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12gb").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12G").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12GB").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12M").toString(), is("12mb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("1b").toString(), is("1b"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23kb").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23k").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23").toString(), is("23b"));
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnEmptyParsing() {
+ assertThat(ByteSizeValue.parseBytesSizeValue("").toString(), is("23kb"));
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnEmptyNumberParsing() {
+ assertThat(ByteSizeValue.parseBytesSizeValue("g").toString(), is("23b"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java b/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
new file mode 100644
index 0000000..d84107e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class DistanceUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleDistanceUnit() {
+ assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.MILES), closeTo(16.09344, 0.001));
+ assertThat(DistanceUnit.MILES.convert(10, DistanceUnit.MILES), closeTo(10, 0.001));
+ assertThat(DistanceUnit.MILES.convert(10, DistanceUnit.KILOMETERS), closeTo(6.21371192, 0.001));
+ assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.KILOMETERS), closeTo(10, 0.001));
+ assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.METERS), closeTo(0.01, 0.00001));
+ assertThat(DistanceUnit.KILOMETERS.convert(1000,DistanceUnit.METERS), closeTo(1, 0.001));
+ assertThat(DistanceUnit.METERS.convert(1, DistanceUnit.KILOMETERS), closeTo(1000, 0.001));
+ }
+
+ @Test
+ public void testDistanceUnitParsing() {
+ assertThat(DistanceUnit.Distance.parseDistance("50km").unit, equalTo(DistanceUnit.KILOMETERS));
+ assertThat(DistanceUnit.Distance.parseDistance("500m").unit, equalTo(DistanceUnit.METERS));
+ assertThat(DistanceUnit.Distance.parseDistance("51mi").unit, equalTo(DistanceUnit.MILES));
+ assertThat(DistanceUnit.Distance.parseDistance("52yd").unit, equalTo(DistanceUnit.YARD));
+ assertThat(DistanceUnit.Distance.parseDistance("12in").unit, equalTo(DistanceUnit.INCH));
+ assertThat(DistanceUnit.Distance.parseDistance("23mm").unit, equalTo(DistanceUnit.MILLIMETERS));
+ assertThat(DistanceUnit.Distance.parseDistance("23cm").unit, equalTo(DistanceUnit.CENTIMETERS));
+
+ double testValue = 12345.678;
+ for (DistanceUnit unit : DistanceUnit.values()) {
+ assertThat("Unit can be parsed from '" + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
+ assertThat("Unit can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
+ assertThat("Value can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.Distance.parseDistance(unit.toString(testValue)).value, equalTo(testValue));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
new file mode 100644
index 0000000..448d052
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.number.IsCloseTo.closeTo;
+
+public class FuzzinessTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNumerics() {
+ String[] options = new String[]{"1.0", "1", "1.000000"};
+ assertThat(Fuzziness.build(randomFrom(options)).asByte(), equalTo((byte) 1));
+ assertThat(Fuzziness.build(randomFrom(options)).asInt(), equalTo(1));
+ assertThat(Fuzziness.build(randomFrom(options)).asFloat(), equalTo(1f));
+ assertThat(Fuzziness.build(randomFrom(options)).asDouble(), equalTo(1d));
+ assertThat(Fuzziness.build(randomFrom(options)).asLong(), equalTo(1l));
+ assertThat(Fuzziness.build(randomFrom(options)).asShort(), equalTo((short) 1));
+ }
+
+ @Test
+ public void testParseFromXContent() throws IOException {
+ final int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ {
+ XContent xcontent = XContentType.JSON.xContent();
+ float floatValue = randomFloat();
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, floatValue)
+ .endObject().string();
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_NUMBER));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse.asFloat(), equalTo(floatValue));
+ assertThat(parse.asDouble(), closeTo((double) floatValue, 0.000001));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+
+ {
+ XContent xcontent = XContentType.JSON.xContent();
+ Integer intValue = frequently() ? randomIntBetween(0, 2) : randomIntBetween(0, 100);
+ Float floatRep = randomFloat();
+ Number value = intValue;
+ if (randomBoolean()) {
+ value = new Float(floatRep += intValue);
+ }
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, randomBoolean() ? value.toString() : value)
+ .endObject().string();
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), anyOf(equalTo(XContentParser.Token.VALUE_NUMBER), equalTo(XContentParser.Token.VALUE_STRING)));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse.asInt(), equalTo(intValue));
+ assertThat((int) parse.asShort(), equalTo(intValue));
+ assertThat((int) parse.asByte(), equalTo(intValue));
+ assertThat(parse.asLong(), equalTo((long) intValue));
+ if (value.intValue() >= 1) {
+ assertThat(parse.asDistance(), equalTo(Math.min(2, intValue)));
+ }
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ if (intValue.equals(value)) {
+ switch (intValue) {
+ case 1:
+ assertThat(parse, sameInstance(Fuzziness.ONE));
+ break;
+ case 2:
+ assertThat(parse, sameInstance(Fuzziness.TWO));
+ break;
+ case 0:
+ assertThat(parse, sameInstance(Fuzziness.ZERO));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ {
+ XContent xcontent = XContentType.JSON.xContent();
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, randomBoolean() ? "AUTO" : "auto")
+ .endObject().string();
+ if (randomBoolean()) {
+ json = Fuzziness.AUTO.toXContent(jsonBuilder().startObject(), null).endObject().string();
+ }
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse, sameInstance(Fuzziness.AUTO));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+
+ {
+ String[] values = new String[]{"d", "H", "ms", "s", "S", "w"};
+ String actual = randomIntBetween(1, 3) + randomFrom(values);
+ XContent xcontent = XContentType.JSON.xContent();
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, actual)
+ .endObject().string();
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse.asTimeValue(), equalTo(TimeValue.parseTimeValue(actual, null)));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+ }
+
+ }
+
+ @Test
+ public void testAuto() {
+ final int codePoints = randomIntBetween(0, 10);
+ String string = randomRealisticUnicodeOfCodepointLength(codePoints);
+ if (codePoints <= 2) {
+ assertThat(Fuzziness.AUTO.asDistance(string), equalTo(0));
+ assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(0));
+ } else if (codePoints > 5) {
+ assertThat(Fuzziness.AUTO.asDistance(string), equalTo(2));
+ assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(2));
+ } else {
+ assertThat(Fuzziness.AUTO.asDistance(string), equalTo(1));
+ assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(1));
+ }
+ assertThat(Fuzziness.AUTO.asByte(), equalTo((byte) 1));
+ assertThat(Fuzziness.AUTO.asInt(), equalTo(1));
+ assertThat(Fuzziness.AUTO.asFloat(), equalTo(1f));
+ assertThat(Fuzziness.AUTO.asDouble(), equalTo(1d));
+ assertThat(Fuzziness.AUTO.asLong(), equalTo(1l));
+ assertThat(Fuzziness.AUTO.asShort(), equalTo((short) 1));
+ assertThat(Fuzziness.AUTO.asTimeValue(), equalTo(TimeValue.parseTimeValue("1", TimeValue.timeValueMillis(1))));
+
+ }
+
+ @Test
+ public void testAsDistance() {
+ final int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ Integer integer = Integer.valueOf(randomIntBetween(0, 10));
+ String value = "" + (randomBoolean() ? integer.intValue() : integer.floatValue());
+ assertThat(Fuzziness.build(value).asDistance(), equalTo(Math.min(2, integer.intValue())));
+ }
+ }
+
+ @Test
+ public void testSimilarityToDistance() {
+ assertThat(Fuzziness.fromSimilarity(0.5f).asDistance("ab"), equalTo(1));
+ assertThat(Fuzziness.fromSimilarity(0.66f).asDistance("abcefg"), equalTo(2));
+ assertThat(Fuzziness.fromSimilarity(0.8f).asDistance("ab"), equalTo(0));
+ assertThat(Fuzziness.fromSimilarity(0.8f).asDistance("abcefg"), equalTo(1));
+ assertThat((double) Fuzziness.ONE.asSimilarity("abcefg"), closeTo(0.8f, 0.05));
+ assertThat((double) Fuzziness.TWO.asSimilarity("abcefg"), closeTo(0.66f, 0.05));
+ assertThat((double) Fuzziness.ONE.asSimilarity("ab"), closeTo(0.5f, 0.05));
+
+ int iters = atLeast(100);
+ for (int i = 0; i < iters; i++) {
+ Fuzziness fuzziness = Fuzziness.fromEdits(between(1, 2));
+ String string = rarely() ? randomRealisticUnicodeOfLengthBetween(2, 4) :
+ randomRealisticUnicodeOfLengthBetween(4, 10);
+ float similarity = fuzziness.asSimilarity(string);
+ if (similarity != 0.0f) {
+ Fuzziness similarityBased = Fuzziness.build(similarity);
+ assertThat((double) similarityBased.asSimilarity(string), closeTo(similarity, 0.05));
+ assertThat(similarityBased.asDistance(string), equalTo(Math.min(2, fuzziness.asDistance(string))));
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java
new file mode 100644
index 0000000..6ca424a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.PeriodType;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ *
+ */
+public class TimeValueTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() {
+ assertThat(TimeUnit.MILLISECONDS.toMillis(10), equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).millis()));
+ assertThat(TimeUnit.MICROSECONDS.toMicros(10), equalTo(new TimeValue(10, TimeUnit.MICROSECONDS).micros()));
+ assertThat(TimeUnit.SECONDS.toSeconds(10), equalTo(new TimeValue(10, TimeUnit.SECONDS).seconds()));
+ assertThat(TimeUnit.MINUTES.toMinutes(10), equalTo(new TimeValue(10, TimeUnit.MINUTES).minutes()));
+ assertThat(TimeUnit.HOURS.toHours(10), equalTo(new TimeValue(10, TimeUnit.HOURS).hours()));
+ assertThat(TimeUnit.DAYS.toDays(10), equalTo(new TimeValue(10, TimeUnit.DAYS).days()));
+ }
+
+ @Test
+ public void testToString() {
+ assertThat("10ms", equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).toString()));
+ assertThat("1.5s", equalTo(new TimeValue(1533, TimeUnit.MILLISECONDS).toString()));
+ assertThat("1.5m", equalTo(new TimeValue(90, TimeUnit.SECONDS).toString()));
+ assertThat("1.5h", equalTo(new TimeValue(90, TimeUnit.MINUTES).toString()));
+ assertThat("1.5d", equalTo(new TimeValue(36, TimeUnit.HOURS).toString()));
+ assertThat("1000d", equalTo(new TimeValue(1000, TimeUnit.DAYS).toString()));
+ }
+
+ @Test
+ public void testFormat() {
+ assertThat(new TimeValue(1025, TimeUnit.MILLISECONDS).format(PeriodType.dayTime()), equalTo("1 second and 25 milliseconds"));
+ assertThat(new TimeValue(1, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("1 minute"));
+ assertThat(new TimeValue(65, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("1 hour and 5 minutes"));
+ assertThat(new TimeValue(24 * 600 + 85, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("241 hours and 25 minutes"));
+ }
+
+ @Test
+ public void testMinusOne() {
+ assertThat(new TimeValue(-1).nanos(), lessThan(0l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/src/test/java/org/elasticsearch/common/util/BigArraysTests.java
new file mode 100644
index 0000000..514caad
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/BigArraysTests.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Arrays;
+
+public class BigArraysTests extends ElasticsearchTestCase {
+
+ public static PageCacheRecycler randomCacheRecycler() {
+ return randomBoolean() ? null : new MockPageCacheRecycler(ImmutableSettings.EMPTY, new ThreadPool());
+ }
+
+ public void testByteArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 4000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ ByteArray array = BigArrays.newByteArray(startLen, randomCacheRecycler(), randomBoolean());
+ byte[] ref = new byte[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomByte();
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i));
+ }
+ array.release();
+ }
+
+ public void testIntArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ IntArray array = BigArrays.newIntArray(startLen, randomCacheRecycler(), randomBoolean());
+ int[] ref = new int[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomInt();
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i));
+ }
+ array.release();
+ }
+
+ public void testLongArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ LongArray array = BigArrays.newLongArray(startLen, randomCacheRecycler(), randomBoolean());
+ long[] ref = new long[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomLong();
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i));
+ }
+ array.release();
+ }
+
+ public void testDoubleArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ DoubleArray array = BigArrays.newDoubleArray(startLen, randomCacheRecycler(), randomBoolean());
+ double[] ref = new double[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomDouble();
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i), 0.001d);
+ }
+ array.release();
+ }
+
+ public void testObjectArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ ObjectArray<Object> array = BigArrays.newObjectArray(startLen, randomCacheRecycler());
+ final Object[] pool = new Object[100];
+ for (int i = 0; i < pool.length; ++i) {
+ pool[i] = new Object();
+ }
+ Object[] ref = new Object[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomFrom(pool);
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertSame(ref[i], array.get(i));
+ }
+ array.release();
+ }
+
+ public void testDoubleArrayFill() {
+ final int len = randomIntBetween(1, 100000);
+ final int fromIndex = randomIntBetween(0, len - 1);
+ final int toIndex = randomBoolean()
+ ? Math.min(fromIndex + randomInt(100), len) // single page
+ : randomIntBetween(fromIndex, len); // likely multiple pages
+ final DoubleArray array2 = BigArrays.newDoubleArray(len, randomCacheRecycler(), randomBoolean());
+ final double[] array1 = new double[len];
+ for (int i = 0; i < len; ++i) {
+ array1[i] = randomDouble();
+ array2.set(i, array1[i]);
+ }
+ final double rand = randomDouble();
+ Arrays.fill(array1, fromIndex, toIndex, rand);
+ array2.fill(fromIndex, toIndex, rand);
+ for (int i = 0; i < len; ++i) {
+ assertEquals(array1[i], array2.get(i), 0.001d);
+ }
+ array2.release();
+ }
+
+ public void testLongArrayFill() {
+ final int len = randomIntBetween(1, 100000);
+ final int fromIndex = randomIntBetween(0, len - 1);
+ final int toIndex = randomBoolean()
+ ? Math.min(fromIndex + randomInt(100), len) // single page
+ : randomIntBetween(fromIndex, len); // likely multiple pages
+ final LongArray array2 = BigArrays.newLongArray(len, randomCacheRecycler(), randomBoolean());
+ final long[] array1 = new long[len];
+ for (int i = 0; i < len; ++i) {
+ array1[i] = randomLong();
+ array2.set(i, array1[i]);
+ }
+ final long rand = randomLong();
+ Arrays.fill(array1, fromIndex, toIndex, rand);
+ array2.fill(fromIndex, toIndex, rand);
+ for (int i = 0; i < len; ++i) {
+ assertEquals(array1[i], array2.get(i));
+ }
+ array2.release();
+ }
+
+ public void testByteArrayBulkGet() {
+ final byte[] array1 = new byte[randomIntBetween(1, 4000000)];
+ getRandom().nextBytes(array1);
+ final ByteArray array2 = BigArrays.newByteArray(array1.length, randomCacheRecycler(), randomBoolean());
+ for (int i = 0; i < array1.length; ++i) {
+ array2.set(i, array1[i]);
+ }
+ final BytesRef ref = new BytesRef();
+ for (int i = 0; i < 1000; ++i) {
+ final int offset = randomInt(array1.length - 1);
+ final int len = randomInt(Math.min(randomBoolean() ? 10 : Integer.MAX_VALUE, array1.length - offset));
+ array2.get(offset, len, ref);
+ assertEquals(new BytesRef(array1, offset, len), ref);
+ }
+ array2.release();
+ }
+
+ public void testByteArrayBulkSet() {
+ final byte[] array1 = new byte[randomIntBetween(1, 4000000)];
+ getRandom().nextBytes(array1);
+ final ByteArray array2 = BigArrays.newByteArray(array1.length, randomCacheRecycler(), randomBoolean());
+ for (int i = 0; i < array1.length; ) {
+ final int len = Math.min(array1.length - i, randomBoolean() ? randomInt(10) : randomInt(3 * BigArrays.BYTE_PAGE_SIZE));
+ array2.set(i, array1, i, len);
+ i += len;
+ }
+ for (int i = 0; i < array1.length; ++i) {
+ assertEquals(array1[i], array2.get(i));
+ }
+ array2.release();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java b/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java
new file mode 100644
index 0000000..7331962
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+
+public class ByteUtilsTests extends ElasticsearchTestCase {
+
+ public void testZigZag(long l) {
+ assertEquals(l, ByteUtils.zigZagDecode(ByteUtils.zigZagEncode(l)));
+ }
+
+ public void testZigZag() {
+ testZigZag(0);
+ testZigZag(1);
+ testZigZag(-1);
+ testZigZag(Long.MAX_VALUE);
+ testZigZag(Long.MIN_VALUE);
+ for (int i = 0; i < 1000; ++i) {
+ testZigZag(randomLong());
+ assertTrue(ByteUtils.zigZagEncode(randomInt(1000)) >= 0);
+ assertTrue(ByteUtils.zigZagEncode(-randomInt(1000)) >= 0);
+ }
+ }
+
+ public void testFloat() throws IOException {
+ final float[] data = new float[atLeast(1000)];
+ final byte[] encoded = new byte[data.length * 4];
+ for (int i = 0; i < data.length; ++i) {
+ data[i] = randomFloat();
+ ByteUtils.writeFloatLE(data[i], encoded, i * 4);
+ }
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(data[i], ByteUtils.readFloatLE(encoded, i * 4), Float.MIN_VALUE);
+ }
+ }
+
+ public void testDouble() throws IOException {
+ final double[] data = new double[atLeast(1000)];
+ final byte[] encoded = new byte[data.length * 8];
+ for (int i = 0; i < data.length; ++i) {
+ data[i] = randomDouble();
+ ByteUtils.writeDoubleLE(data[i], encoded, i * 8);
+ }
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(data[i], ByteUtils.readDoubleLE(encoded, i * 8), Double.MIN_VALUE);
+ }
+ }
+
+ public void testVLong() throws IOException {
+ final long[] data = new long[atLeast(1000)];
+ for (int i = 0; i < data.length; ++i) {
+ switch (randomInt(4)) {
+ case 0:
+ data[i] = 0;
+ break;
+ case 1:
+ data[i] = Long.MAX_VALUE;
+ break;
+ case 2:
+ data[i] = Long.MIN_VALUE;
+ break;
+ case 3:
+ data[i] = randomInt(1 << randomIntBetween(2,30));
+ break;
+ case 4:
+ data[i] = randomLong();
+ break;
+ default:
+ throw new AssertionError();
+ }
+ }
+ final byte[] encoded = new byte[ByteUtils.MAX_BYTES_VLONG * data.length];
+ ByteArrayDataOutput out = new ByteArrayDataOutput(encoded);
+ for (int i = 0; i < data.length; ++i) {
+ final int pos = out.getPosition();
+ ByteUtils.writeVLong(out, data[i]);
+ if (data[i] < 0) {
+ assertEquals(ByteUtils.MAX_BYTES_VLONG, out.getPosition() - pos);
+ }
+ }
+ final ByteArrayDataInput in = new ByteArrayDataInput(encoded);
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(data[i], ByteUtils.readVLong(in));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java
new file mode 100644
index 0000000..c541c3e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+
+public class CollectionUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void rotateEmpty() {
+ assertTrue(CollectionUtils.rotate(ImmutableList.of(), randomInt()).isEmpty());
+ }
+
+ @Test
+ public void rotate() {
+ final int iters = scaledRandomIntBetween(10, 100);
+ for (int k = 0; k < iters; ++k) {
+ final int size = randomIntBetween(1, 100);
+ final int distance = randomInt();
+ List<Object> list = new ArrayList<Object>();
+ for (int i = 0; i < size; ++i) {
+ list.add(new Object());
+ }
+ final List<Object> rotated = CollectionUtils.rotate(list, distance);
+ // check content is the same
+ assertEquals(rotated.size(), list.size());
+ assertEquals(Iterables.size(rotated), list.size());
+ assertEquals(new HashSet<Object>(rotated), new HashSet<Object>(list));
+ // check stability
+ for (int j = randomInt(4); j >= 0; --j) {
+ assertEquals(rotated, CollectionUtils.rotate(list, distance));
+ }
+ // reverse
+ if (distance != Integer.MIN_VALUE) {
+ assertEquals(list, CollectionUtils.rotate(CollectionUtils.rotate(list, distance), -distance));
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/SlicedDoubleListTests.java b/src/test/java/org/elasticsearch/common/util/SlicedDoubleListTests.java
new file mode 100644
index 0000000..e17725b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/SlicedDoubleListTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for {@link SlicedDoubleList}
+ */
+public class SlicedDoubleListTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCapacity() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(0));
+ assertThat(list.values.length, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+
+
+ list = new SlicedDoubleList(new double[10], 5, 5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+ assertThat(list.values.length, equalTo(10));
+ }
+
+ @Test
+ public void testGrow() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i);
+ }
+ int expected = 0;
+ for (Double d : list) {
+ assertThat((double)expected++, equalTo(d));
+ }
+
+ for (int i = 0; i < list.length; i++) {
+ assertThat((double)i, equalTo(list.get(i)));
+ }
+
+ int count = 0;
+ for (int i = list.offset; i < list.offset+list.length; i++) {
+ assertThat((double)count++, equalTo(list.values[i]));
+ }
+ }
+
+ @Test
+ public void testIndexOf() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i%100);
+ }
+
+ assertThat(999, equalTo(list.lastIndexOf(99.0d)));
+ assertThat(99, equalTo(list.indexOf(99.0d)));
+
+ assertThat(-1, equalTo(list.lastIndexOf(100.0d)));
+ assertThat(-1, equalTo(list.indexOf(100.0d)));
+ }
+
+ public void testIsEmpty() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ assertThat(false, equalTo(list.isEmpty()));
+ list.length = 0;
+ assertThat(true, equalTo(list.isEmpty()));
+ }
+
+ @Test
+ public void testSet() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ try {
+ list.set(0, (double)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ try {
+ list.add((double)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ }
+
+ @Test
+ public void testToString() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ assertThat("[0.0, 0.0, 0.0, 0.0, 0.0]", equalTo(list.toString()));
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i);
+ }
+ assertThat("[0.0, 1.0, 2.0, 3.0, 4.0]", equalTo(list.toString()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/SlicedLongListTests.java b/src/test/java/org/elasticsearch/common/util/SlicedLongListTests.java
new file mode 100644
index 0000000..2669501
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/SlicedLongListTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for {@link SlicedLongList}
+ */
+public class SlicedLongListTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCapacity() {
+ SlicedLongList list = new SlicedLongList(5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(0));
+ assertThat(list.values.length, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+
+ list = new SlicedLongList(new long[10], 5, 5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+ assertThat(list.values.length, equalTo(10));
+ }
+
+ @Test
+ public void testGrow() {
+ SlicedLongList list = new SlicedLongList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((long)i);
+ }
+ int expected = 0;
+ for (Long d : list) {
+ assertThat((long)expected++, equalTo(d));
+ }
+
+ for (int i = 0; i < list.length; i++) {
+ assertThat((long)i, equalTo(list.get(i)));
+ }
+
+ int count = 0;
+ for (int i = list.offset; i < list.offset+list.length; i++) {
+ assertThat((long)count++, equalTo(list.values[i]));
+ }
+ }
+
+ @Test
+ public void testSet() {
+ SlicedLongList list = new SlicedLongList(5);
+ try {
+ list.set(0, (long)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ try {
+ list.add((long)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ }
+
+ @Test
+ public void testIndexOf() {
+ SlicedLongList list = new SlicedLongList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((long)i%100);
+ }
+
+ assertThat(999, equalTo(list.lastIndexOf(99l)));
+ assertThat(99, equalTo(list.indexOf(99l)));
+
+ assertThat(-1, equalTo(list.lastIndexOf(100l)));
+ assertThat(-1, equalTo(list.indexOf(100l)));
+ }
+
+ public void testIsEmpty() {
+ SlicedLongList list = new SlicedLongList(5);
+ assertThat(false, equalTo(list.isEmpty()));
+ list.length = 0;
+ assertThat(true, equalTo(list.isEmpty()));
+ }
+
+ @Test
+ public void testToString() {
+ SlicedLongList list = new SlicedLongList(5);
+ assertThat("[0, 0, 0, 0, 0]", equalTo(list.toString()));
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((long)i);
+ }
+ assertThat("[0, 1, 2, 3, 4]", equalTo(list.toString()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/SlicedObjectListTests.java b/src/test/java/org/elasticsearch/common/util/SlicedObjectListTests.java
new file mode 100644
index 0000000..7e98073
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/SlicedObjectListTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+/**
+ * Tests for {@link SlicedObjectList}
+ */
+public class SlicedObjectListTests extends ElasticsearchTestCase {
+
+ public class TestList extends SlicedObjectList<Double> {
+
+ public TestList(int capactiy) {
+ this(new Double[capactiy], 0, capactiy);
+ }
+
+ public TestList(Double[] values, int offset, int length) {
+ super(values, offset, length);
+ }
+
+ public TestList(Double[] values) {
+ super(values);
+ }
+
+ @Override
+ public void grow(int newLength) {
+ assertThat(offset, equalTo(0)); // NOTE: senseless if offset != 0
+ if (values.length >= newLength) {
+ return;
+ }
+ final Double[] current = values;
+ values = new Double[ArrayUtil.oversize(newLength, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+ System.arraycopy(current, 0, values, 0, current.length);
+
+ }
+
+ }
+ @Test
+ public void testCapacity() {
+ TestList list = new TestList(5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(0));
+ assertThat(list.values.length, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+
+
+ list = new TestList(new Double[10], 5, 5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+ assertThat(list.values.length, equalTo(10));
+ }
+
+ @Test
+ public void testGrow() {
+ TestList list = new TestList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i);
+ }
+ int expected = 0;
+ for (Double d : list) {
+ assertThat((double)expected++, equalTo(d));
+ }
+
+ for (int i = 0; i < list.length; i++) {
+ assertThat((double)i, equalTo(list.get(i)));
+ }
+
+ int count = 0;
+ for (int i = list.offset; i < list.offset+list.length; i++) {
+ assertThat((double)count++, equalTo(list.values[i]));
+ }
+ }
+
+ @Test
+ public void testIndexOf() {
+ TestList list = new TestList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i%100);
+ }
+
+ assertThat(999, equalTo(list.lastIndexOf(99.0d)));
+ assertThat(99, equalTo(list.indexOf(99.0d)));
+
+ assertThat(-1, equalTo(list.lastIndexOf(100.0d)));
+ assertThat(-1, equalTo(list.indexOf(100.0d)));
+ }
+
+ public void testIsEmpty() {
+ TestList list = new TestList(5);
+ assertThat(false, equalTo(list.isEmpty()));
+ list.length = 0;
+ assertThat(true, equalTo(list.isEmpty()));
+ }
+
+ @Test
+ public void testSet() {
+ TestList list = new TestList(5);
+ try {
+ list.set(0, (double)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ try {
+ list.add((double)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ }
+
+ @Test
+ public void testToString() {
+ TestList list = new TestList(5);
+ assertThat("[null, null, null, null, null]", equalTo(list.toString()));
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i);
+ }
+ assertThat("[0.0, 1.0, 2.0, 3.0, 4.0]", equalTo(list.toString()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java b/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java
new file mode 100644
index 0000000..8997969
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+
+public class CountDownTest extends ElasticsearchTestCase {
+
+ @Test @Repeat(iterations = 1000)
+ public void testConcurrent() throws InterruptedException {
+ final AtomicInteger count = new AtomicInteger(0);
+ final CountDown countDown = new CountDown(atLeast(10));
+ Thread[] threads = new Thread[atLeast(3)];
+ final CountDownLatch latch = new CountDownLatch(1);
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread() {
+
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException();
+ }
+ while (true) {
+ if(frequently()) {
+ if (countDown.isCountedDown()) {
+ break;
+ }
+ }
+ if (countDown.countDown()) {
+ count.incrementAndGet();
+ break;
+ }
+ }
+ }
+ };
+ threads[i].start();
+ }
+ latch.countDown();
+ Thread.yield();
+ if (rarely()) {
+ if (countDown.fastForward()) {
+ count.incrementAndGet();
+ }
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(countDown.fastForward(), equalTo(false));
+
+ }
+
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(count.get(), Matchers.equalTo(1));
+ }
+
+ @Test
+ public void testSingleThreaded() {
+ int atLeast = atLeast(10);
+ final CountDown countDown = new CountDown(atLeast);
+ while(!countDown.isCountedDown()) {
+ atLeast--;
+ if (countDown.countDown()) {
+ assertThat(atLeast, equalTo(0));
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(countDown.fastForward(), equalTo(false));
+ break;
+ }
+ if (rarely()) {
+ assertThat(countDown.fastForward(), equalTo(true));
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(countDown.fastForward(), equalTo(false));
+ }
+ assertThat(atLeast, greaterThan(0));
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
new file mode 100644
index 0000000..d4d46db
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ */
+public class EsExecutorsTests extends ElasticsearchTestCase {
+
+ private TimeUnit randomTimeUnit() {
+ return TimeUnit.values()[between(0, TimeUnit.values().length - 1)];
+ }
+
+ @Test
+ public void testFixedForcedExecution() throws Exception {
+ EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory("test"));
+ final CountDownLatch wait = new CountDownLatch(1);
+
+ final CountDownLatch exec1Wait = new CountDownLatch(1);
+ final AtomicBoolean executed1 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ wait.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ executed1.set(true);
+ exec1Wait.countDown();
+ }
+ });
+
+ final CountDownLatch exec2Wait = new CountDownLatch(1);
+ final AtomicBoolean executed2 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executed2.set(true);
+ exec2Wait.countDown();
+ }
+ });
+
+ final AtomicBoolean executed3 = new AtomicBoolean();
+ final CountDownLatch exec3Wait = new CountDownLatch(1);
+ executor.execute(new AbstractRunnable() {
+ @Override
+ public void run() {
+ executed3.set(true);
+ exec3Wait.countDown();
+ }
+
+ @Override
+ public boolean isForceExecution() {
+ return true;
+ }
+ });
+
+ wait.countDown();
+
+ exec1Wait.await();
+ exec2Wait.await();
+ exec3Wait.await();
+
+ assertThat(executed1.get(), equalTo(true));
+ assertThat(executed2.get(), equalTo(true));
+ assertThat(executed3.get(), equalTo(true));
+
+ executor.shutdownNow();
+ }
+
+ @Test
+ public void testFixedRejected() throws Exception {
+ EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory("test"));
+ final CountDownLatch wait = new CountDownLatch(1);
+
+ final CountDownLatch exec1Wait = new CountDownLatch(1);
+ final AtomicBoolean executed1 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ wait.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ executed1.set(true);
+ exec1Wait.countDown();
+ }
+ });
+
+ final CountDownLatch exec2Wait = new CountDownLatch(1);
+ final AtomicBoolean executed2 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executed2.set(true);
+ exec2Wait.countDown();
+ }
+ });
+
+ final AtomicBoolean executed3 = new AtomicBoolean();
+ try {
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executed3.set(true);
+ }
+ });
+ fail("should be rejected...");
+ } catch (EsRejectedExecutionException e) {
+ // all is well
+ }
+
+ wait.countDown();
+
+ exec1Wait.await();
+ exec2Wait.await();
+
+ assertThat(executed1.get(), equalTo(true));
+ assertThat(executed2.get(), equalTo(true));
+ assertThat(executed3.get(), equalTo(false));
+
+ executor.shutdownNow();
+ }
+
+ @Test
+ public void testScaleUp() throws Exception {
+ final int min = between(1, 3);
+ final int max = between(min + 1, 6);
+ final ThreadBarrier barrier = new ThreadBarrier(max + 1);
+
+ ThreadPoolExecutor pool = EsExecutors.newScaling(min, max, between(1, 100), randomTimeUnit(), EsExecutors.daemonThreadFactory("test"));
+ assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
+ assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
+
+ for (int i = 0; i < max; ++i) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ pool.execute(new Runnable() {
+ public void run() {
+ latch.countDown();
+ try {
+ barrier.await();
+ barrier.await();
+ } catch (Throwable e) {
+ barrier.reset(e);
+ }
+ }
+ });
+
+ //wait until thread executes this task
+ //otherwise, a task might be queued
+ latch.await();
+ }
+
+ barrier.await();
+ assertThat("wrong pool size", pool.getPoolSize(), equalTo(max));
+ assertThat("wrong active size", pool.getActiveCount(), equalTo(max));
+ barrier.await();
+ pool.shutdown();
+ }
+
+ @Test
+ public void testScaleDown() throws Exception {
+ final int min = between(1, 3);
+ final int max = between(min + 1, 6);
+ final ThreadBarrier barrier = new ThreadBarrier(max + 1);
+
+ final ThreadPoolExecutor pool = EsExecutors.newScaling(min, max, between(1, 100), TimeUnit.MILLISECONDS, EsExecutors.daemonThreadFactory("test"));
+ assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
+ assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
+
+ for (int i = 0; i < max; ++i) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ pool.execute(new Runnable() {
+ public void run() {
+ latch.countDown();
+ try {
+ barrier.await();
+ barrier.await();
+ } catch (Throwable e) {
+ barrier.reset(e);
+ }
+ }
+ });
+
+ //wait until thread executes this task
+ //otherwise, a task might be queued
+ latch.await();
+ }
+
+ barrier.await();
+ assertThat("wrong pool size", pool.getPoolSize(), equalTo(max));
+ assertThat("wrong active size", pool.getActiveCount(), equalTo(max));
+ barrier.await();
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ return pool.getActiveCount() == 0 && pool.getPoolSize() < max;
+ }
+ });
+ //assertThat("not all tasks completed", pool.getCompletedTaskCount(), equalTo((long) max));
+ assertThat("wrong active count", pool.getActiveCount(), equalTo(0));
+ //assertThat("wrong pool size. ", min, equalTo(pool.getPoolSize())); //BUG in ThreadPool - Bug ID: 6458662
+ //assertThat("idle threads didn't stay above min (" + pool.getPoolSize() + ")", pool.getPoolSize(), greaterThan(0));
+ assertThat("idle threads didn't shrink below max. (" + pool.getPoolSize() + ")", pool.getPoolSize(), lessThan(max));
+ pool.shutdown();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
new file mode 100644
index 0000000..bd17217
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PrioritizedExecutorsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPriorityQueue() throws Exception {
+ PriorityBlockingQueue<Priority> queue = new PriorityBlockingQueue<Priority>();
+ queue.add(Priority.LANGUID);
+ queue.add(Priority.NORMAL);
+ queue.add(Priority.HIGH);
+ queue.add(Priority.LOW);
+ queue.add(Priority.URGENT);
+
+ assertThat(queue.poll(), equalTo(Priority.URGENT));
+ assertThat(queue.poll(), equalTo(Priority.HIGH));
+ assertThat(queue.poll(), equalTo(Priority.NORMAL));
+ assertThat(queue.poll(), equalTo(Priority.LOW));
+ assertThat(queue.poll(), equalTo(Priority.LANGUID));
+ }
+
+ @Test
+ public void testSubmitPrioritizedExecutorWithRunnables() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ List<Integer> results = new ArrayList<Integer>(7);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(7);
+ executor.submit(new AwaitingJob(awaitingLatch));
+ executor.submit(new Job(6, Priority.LANGUID, results, finishedLatch));
+ executor.submit(new Job(4, Priority.LOW, results, finishedLatch));
+ executor.submit(new Job(1, Priority.HIGH, results, finishedLatch));
+ executor.submit(new Job(5, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.submit(new Job(0, Priority.URGENT, results, finishedLatch));
+ executor.submit(new Job(3, Priority.NORMAL, results, finishedLatch));
+ executor.submit(new Job(2, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(7));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ }
+
+ @Test
+ public void testExecutePrioritizedExecutorWithRunnables() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ List<Integer> results = new ArrayList<Integer>(7);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(7);
+ executor.execute(new AwaitingJob(awaitingLatch));
+ executor.execute(new Job(6, Priority.LANGUID, results, finishedLatch));
+ executor.execute(new Job(4, Priority.LOW, results, finishedLatch));
+ executor.execute(new Job(1, Priority.HIGH, results, finishedLatch));
+ executor.execute(new Job(5, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.execute(new Job(0, Priority.URGENT, results, finishedLatch));
+ executor.execute(new Job(3, Priority.NORMAL, results, finishedLatch));
+ executor.execute(new Job(2, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(7));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ }
+
+ @Test
+ public void testSubmitPrioritizedExecutorWithCallables() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ List<Integer> results = new ArrayList<Integer>(7);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(7);
+ executor.submit(new AwaitingJob(awaitingLatch));
+ executor.submit(new CallableJob(6, Priority.LANGUID, results, finishedLatch));
+ executor.submit(new CallableJob(4, Priority.LOW, results, finishedLatch));
+ executor.submit(new CallableJob(1, Priority.HIGH, results, finishedLatch));
+ executor.submit(new CallableJob(5, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.submit(new CallableJob(0, Priority.URGENT, results, finishedLatch));
+ executor.submit(new CallableJob(3, Priority.NORMAL, results, finishedLatch));
+ executor.submit(new CallableJob(2, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(7));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ }
+
+ @Test
+ public void testSubmitPrioritizedExecutorWithMixed() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ List<Integer> results = new ArrayList<Integer>(7);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(7);
+ executor.submit(new AwaitingJob(awaitingLatch));
+ executor.submit(new CallableJob(6, Priority.LANGUID, results, finishedLatch));
+ executor.submit(new Job(4, Priority.LOW, results, finishedLatch));
+ executor.submit(new CallableJob(1, Priority.HIGH, results, finishedLatch));
+ executor.submit(new Job(5, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.submit(new CallableJob(0, Priority.URGENT, results, finishedLatch));
+ executor.submit(new Job(3, Priority.NORMAL, results, finishedLatch));
+ executor.submit(new CallableJob(2, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(7));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ }
+
+ @Test
+ public void testTimeout() throws Exception {
+ ScheduledExecutorService timer = Executors.newSingleThreadScheduledExecutor();
+ PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ final CountDownLatch block = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ block.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "the blocking";
+ }
+ });
+
+ final AtomicBoolean executeCalled = new AtomicBoolean();
+ final CountDownLatch timedOut = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executeCalled.set(true);
+ }
+
+ @Override
+ public String toString() {
+ return "the waiting";
+ }
+ }, timer, TimeValue.timeValueMillis(100) /* enough timeout to catch them in the pending list... */, new Runnable() {
+ @Override
+ public void run() {
+ timedOut.countDown();
+ }
+ }
+ );
+
+ PrioritizedEsThreadPoolExecutor.Pending[] pending = executor.getPending();
+ assertThat(pending.length, equalTo(1));
+ assertThat(pending[0].task.toString(), equalTo("the waiting"));
+
+ assertThat(timedOut.await(2, TimeUnit.SECONDS), equalTo(true));
+ block.countDown();
+ Thread.sleep(100); // sleep a bit to double check that execute on the timed out update task is not called...
+ assertThat(executeCalled.get(), equalTo(false));
+
+ timer.shutdownNow();
+ executor.shutdownNow();
+ }
+
+ static class AwaitingJob extends PrioritizedRunnable {
+
+ private final CountDownLatch latch;
+
+ private AwaitingJob(CountDownLatch latch) {
+ super(Priority.URGENT);
+ this.latch = latch;
+ }
+
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ static class Job extends PrioritizedRunnable {
+
+ private final int result;
+ private final List<Integer> results;
+ private final CountDownLatch latch;
+
+ Job(int result, Priority priority, List<Integer> results, CountDownLatch latch) {
+ super(priority);
+ this.result = result;
+ this.results = results;
+ this.latch = latch;
+ }
+
+ @Override
+ public void run() {
+ results.add(result);
+ latch.countDown();
+ }
+ }
+
+ static class CallableJob extends PrioritizedCallable<Integer> {
+
+ private final int result;
+ private final List<Integer> results;
+ private final CountDownLatch latch;
+
+ CallableJob(int result, Priority priority, List<Integer> results, CountDownLatch latch) {
+ super(priority);
+ this.result = result;
+ this.results = results;
+ this.latch = latch;
+ }
+
+ @Override
+ public Integer call() throws Exception {
+ results.add(result);
+ latch.countDown();
+ return result;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java b/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java
new file mode 100644
index 0000000..c8911c6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.builder;
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class BuilderRawFieldTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testJsonRawField() throws IOException {
+ testRawField(XContentType.JSON);
+ }
+
+ @Test
+ public void testSmileRawField() throws IOException {
+ testRawField(XContentType.SMILE);
+ }
+
+ @Test
+ public void testYamlRawField() throws IOException {
+ testRawField(XContentType.YAML);
+ }
+
+ private void testRawField(XContentType type) throws IOException {
+ XContentBuilder builder = XContentFactory.contentBuilder(type);
+ builder.startObject();
+ builder.field("field1", "value1");
+ builder.rawField("_source", XContentFactory.contentBuilder(type).startObject().field("s_field", "s_value").endObject().bytes());
+ builder.field("field2", "value2");
+ builder.rawField("payload_i", new BytesArray(Long.toString(1)));
+ builder.field("field3", "value3");
+ builder.rawField("payload_d", new BytesArray(Double.toString(1.1)));
+ builder.field("field4", "value4");
+ builder.rawField("payload_s", new BytesArray("test"));
+ builder.field("field5", "value5");
+ builder.endObject();
+
+ XContentParser parser = XContentFactory.xContent(type).createParser(builder.bytes());
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field1"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value1"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("_source"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("s_field"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("s_value"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field2"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value2"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("payload_i"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_NUMBER));
+ assertThat(parser.numberType(), equalTo(XContentParser.NumberType.INT));
+ assertThat(parser.longValue(), equalTo(1l));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field3"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value3"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("payload_d"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_NUMBER));
+ assertThat(parser.numberType(), equalTo(XContentParser.NumberType.DOUBLE));
+ assertThat(parser.doubleValue(), equalTo(1.1d));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field4"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value4"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("payload_s"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("test"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field5"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value5"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
new file mode 100644
index 0000000..67bb99f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.builder;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.io.FastCharArrayWriter;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentGenerator;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.CAMELCASE;
+import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.UNDERSCORE;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class XContentBuilderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPrettyWithLfAtEnd() throws Exception {
+ FastCharArrayWriter writer = new FastCharArrayWriter();
+ XContentGenerator generator = XContentFactory.xContent(XContentType.JSON).createGenerator(writer);
+ generator.usePrettyPrint();
+ generator.usePrintLineFeedAtEnd();
+
+ generator.writeStartObject();
+ generator.writeStringField("test", "value");
+ generator.writeEndObject();
+ generator.flush();
+
+ generator.close();
+ // double close, and check there is no error...
+ generator.close();
+
+ assertThat(writer.unsafeCharArray()[writer.size() - 1], equalTo('\n'));
+ }
+
+ @Test
+ public void verifyReuseJsonGenerator() throws Exception {
+ FastCharArrayWriter writer = new FastCharArrayWriter();
+ XContentGenerator generator = XContentFactory.xContent(XContentType.JSON).createGenerator(writer);
+ generator.writeStartObject();
+ generator.writeStringField("test", "value");
+ generator.writeEndObject();
+ generator.flush();
+
+ assertThat(writer.toStringTrim(), equalTo("{\"test\":\"value\"}"));
+
+ // try again...
+ writer.reset();
+ generator.writeStartObject();
+ generator.writeStringField("test", "value");
+ generator.writeEndObject();
+ generator.flush();
+ // we get a space at the start here since it thinks we are not in the root object (fine, we will ignore it in the real code we use)
+ assertThat(writer.toStringTrim(), equalTo("{\"test\":\"value\"}"));
+ }
+
+ @Test
+ public void testSimpleGenerator() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"test\":\"value\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"test\":\"value\"}"));
+ }
+
+ @Test
+ public void testOverloadedList() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test", Lists.newArrayList("1", "2")).endObject();
+ assertThat(builder.string(), equalTo("{\"test\":[\"1\",\"2\"]}"));
+ }
+
+ @Test
+ public void testWritingBinaryToStream() throws Exception {
+ BytesStreamOutput bos = new BytesStreamOutput();
+
+ XContentGenerator gen = XContentFactory.xContent(XContentType.JSON).createGenerator(bos);
+ gen.writeStartObject();
+ gen.writeStringField("name", "something");
+ gen.flush();
+ bos.write(", source : { test : \"value\" }".getBytes("UTF8"));
+ gen.writeStringField("name2", "something2");
+ gen.writeEndObject();
+ gen.close();
+
+ byte[] data = bos.bytes().toBytes();
+ String sData = new String(data, "UTF8");
+ System.out.println("DATA: " + sData);
+ }
+
+ @Test
+ public void testFieldCaseConversion() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).fieldCaseConversion(CAMELCASE);
+ builder.startObject().field("test_name", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"testName\":\"value\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON).fieldCaseConversion(UNDERSCORE);
+ builder.startObject().field("testName", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"test_name\":\"value\"}"));
+ }
+
+ @Test
+ public void testDateTypesConversion() throws Exception {
+ Date date = new Date();
+ String expectedDate = XContentBuilder.defaultDatePrinter.print(date.getTime());
+ Calendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ROOT);
+ String expectedCalendar = XContentBuilder.defaultDatePrinter.print(calendar.getTimeInMillis());
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("date", date).endObject();
+ assertThat(builder.string(), equalTo("{\"date\":\"" + expectedDate + "\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("calendar", calendar).endObject();
+ assertThat(builder.string(), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("date", date);
+ builder.map(map);
+ assertThat(builder.string(), equalTo("{\"date\":\"" + expectedDate + "\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ map = new HashMap<String, Object>();
+ map.put("calendar", calendar);
+ builder.map(map);
+ assertThat(builder.string(), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java b/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java
new file mode 100644
index 0000000..0a57adf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.smile;
+
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentGenerator;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class JsonVsSmileTests extends ElasticsearchTestCase {
+
+// @Test public void testBinarySmileField() throws Exception {
+// JsonGenerator gen = new SmileFactory().createJsonGenerator(new ByteArrayOutputStream());
+//// JsonGenerator gen = new JsonFactory().createJsonGenerator(new ByteArrayOutputStream(), JsonEncoding.UTF8);
+// gen.writeStartObject();
+// gen.writeFieldName("field1");
+// gen.writeBinary(new byte[]{1, 2, 3});
+// gen.writeEndObject();
+// }
+
+ @Test
+ public void compareParsingTokens() throws IOException {
+ BytesStreamOutput xsonOs = new BytesStreamOutput();
+ XContentGenerator xsonGen = XContentFactory.xContent(XContentType.SMILE).createGenerator(xsonOs);
+
+ BytesStreamOutput jsonOs = new BytesStreamOutput();
+ XContentGenerator jsonGen = XContentFactory.xContent(XContentType.JSON).createGenerator(jsonOs);
+
+ xsonGen.writeStartObject();
+ jsonGen.writeStartObject();
+
+ xsonGen.writeStringField("test", "value");
+ jsonGen.writeStringField("test", "value");
+
+ xsonGen.writeArrayFieldStart("arr");
+ jsonGen.writeArrayFieldStart("arr");
+ xsonGen.writeNumber(1);
+ jsonGen.writeNumber(1);
+ xsonGen.writeNull();
+ jsonGen.writeNull();
+ xsonGen.writeEndArray();
+ jsonGen.writeEndArray();
+
+ xsonGen.writeEndObject();
+ jsonGen.writeEndObject();
+
+ xsonGen.close();
+ jsonGen.close();
+
+ verifySameTokens(XContentFactory.xContent(XContentType.JSON).createParser(jsonOs.bytes().toBytes()), XContentFactory.xContent(XContentType.SMILE).createParser(xsonOs.bytes().toBytes()));
+ }
+
+ private void verifySameTokens(XContentParser parser1, XContentParser parser2) throws IOException {
+ while (true) {
+ XContentParser.Token token1 = parser1.nextToken();
+ XContentParser.Token token2 = parser2.nextToken();
+ if (token1 == null) {
+ assertThat(token2, nullValue());
+ return;
+ }
+ assertThat(token1, equalTo(token2));
+ switch (token1) {
+ case FIELD_NAME:
+ assertThat(parser1.currentName(), equalTo(parser2.currentName()));
+ break;
+ case VALUE_STRING:
+ assertThat(parser1.text(), equalTo(parser2.text()));
+ break;
+ case VALUE_NUMBER:
+ assertThat(parser1.numberType(), equalTo(parser2.numberType()));
+ assertThat(parser1.numberValue(), equalTo(parser2.numberValue()));
+ break;
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java b/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java
new file mode 100644
index 0000000..0f9e4ba
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support;
+
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class XContentHelperTests extends ElasticsearchTestCase {
+
+ Map<String, Object> getMap(Object... keyValues) {
+ Map<String, Object> map = new HashMap<String, Object>();
+ for (int i = 0; i < keyValues.length; i++) {
+ map.put((String) keyValues[i], keyValues[++i]);
+ }
+ return map;
+ }
+
+ Map<String, Object> getNamedMap(String name, Object... keyValues) {
+ Map<String, Object> map = getMap(keyValues);
+
+ Map<String, Object> namedMap = new HashMap<String, Object>(1);
+ namedMap.put(name, map);
+ return namedMap;
+ }
+
+ List<Object> getList(Object... values) {
+ return Arrays.asList(values);
+ }
+
+ @Test
+ public void testMergingListValuesAreMapsOfOne() {
+
+ Map<String, Object> defaults = getMap("test", getList(getNamedMap("name1", "t1", "1"), getNamedMap("name2", "t2", "2")));
+ Map<String, Object> content = getMap("test", getList(getNamedMap("name2", "t3", "3"), getNamedMap("name4", "t4", "4")));
+ Map<String, Object> expected = getMap("test",
+ getList(getNamedMap("name2", "t2", "2", "t3", "3"), getNamedMap("name4", "t4", "4"), getNamedMap("name1", "t1", "1")));
+
+ XContentHelper.mergeDefaults(content, defaults);
+
+ assertThat(content, Matchers.equalTo(expected));
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java b/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java
new file mode 100644
index 0000000..e2fba5a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+/**
+ */
+public class XContentMapValuesTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testFilter() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .field("something_else", "value3")
+ .endObject();
+
+ Map<String, Object> source = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ Map<String, Object> filter = XContentMapValues.filter(source, new String[]{"test1"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.size(), equalTo(1));
+ assertThat(filter.get("test1").toString(), equalTo("value1"));
+
+ filter = XContentMapValues.filter(source, new String[]{"test*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.size(), equalTo(2));
+ assertThat(filter.get("test1").toString(), equalTo("value1"));
+ assertThat(filter.get("test2").toString(), equalTo("value2"));
+
+ filter = XContentMapValues.filter(source, Strings.EMPTY_ARRAY, new String[]{"test1"});
+ assertThat(filter.size(), equalTo(2));
+ assertThat(filter.get("test2").toString(), equalTo("value2"));
+ assertThat(filter.get("something_else").toString(), equalTo("value3"));
+
+ // more complex object...
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1")
+ .startArray("path2")
+ .startObject().field("test", "value1").endObject()
+ .startObject().field("test", "value2").endObject()
+ .endArray()
+ .endObject()
+ .field("test1", "value1")
+ .endObject();
+
+ source = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ filter = XContentMapValues.filter(source, new String[]{"path1"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.size(), equalTo(1));
+
+ filter = XContentMapValues.filter(source, new String[]{"path1*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.get("path1"), equalTo(source.get("path1")));
+ assertThat(filter.containsKey("test1"), equalTo(false));
+
+ filter = XContentMapValues.filter(source, new String[]{"test1*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.get("test1"), equalTo(source.get("test1")));
+ assertThat(filter.containsKey("path1"), equalTo(false));
+
+ filter = XContentMapValues.filter(source, new String[]{"path1.path2.*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.get("path1"), equalTo(source.get("path1")));
+ assertThat(filter.containsKey("test1"), equalTo(false));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testExtractValue() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .field("test", "value")
+ .endObject();
+
+ Map<String, Object> map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("test", map).toString(), equalTo("value"));
+ assertThat(XContentMapValues.extractValue("test.me", map), nullValue());
+ assertThat(XContentMapValues.extractValue("something.else.2", map), nullValue());
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").startObject("path2").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("path1.path2.test", map).toString(), equalTo("value"));
+ assertThat(XContentMapValues.extractValue("path1.path2.test_me", map), nullValue());
+ assertThat(XContentMapValues.extractValue("path1.non_path2.test", map), nullValue());
+
+ Object extValue = XContentMapValues.extractValue("path1.path2", map);
+ assertThat(extValue, instanceOf(Map.class));
+ Map<String, Object> extMapValue = (Map<String, Object>) extValue;
+ assertThat(extMapValue, hasEntry("test", (Object) "value"));
+
+ extValue = XContentMapValues.extractValue("path1", map);
+ assertThat(extValue, instanceOf(Map.class));
+ extMapValue = (Map<String, Object>) extValue;
+ assertThat(extMapValue.containsKey("path2"), equalTo(true));
+
+ // lists
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").field("test", "value1", "value2").endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+
+ extValue = XContentMapValues.extractValue("path1.test", map);
+ assertThat(extValue, instanceOf(List.class));
+
+ List extListValue = (List) extValue;
+ assertThat(extListValue.size(), equalTo(2));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1")
+ .startArray("path2")
+ .startObject().field("test", "value1").endObject()
+ .startObject().field("test", "value2").endObject()
+ .endArray()
+ .endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+
+ extValue = XContentMapValues.extractValue("path1.path2.test", map);
+ assertThat(extValue, instanceOf(List.class));
+
+ extListValue = (List) extValue;
+ assertThat(extListValue.size(), equalTo(2));
+ assertThat(extListValue.get(0).toString(), equalTo("value1"));
+ assertThat(extListValue.get(1).toString(), equalTo("value2"));
+
+ // fields with . in them
+ builder = XContentFactory.jsonBuilder().startObject()
+ .field("xxx.yyy", "value")
+ .endObject();
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("xxx.yyy", map).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1.xxx").startObject("path2.yyy").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("path1.xxx.path2.yyy.test", map).toString(), equalTo("value"));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testExtractRawValue() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .field("test", "value")
+ .endObject();
+
+ Map<String, Object> map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("test", map).get(0).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .field("test.me", "value")
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("test.me", map).get(0).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").startObject("path2").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("path1.path2.test", map).get(0).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1.xxx").startObject("path2.yyy").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("path1.xxx.path2.yyy.test", map).get(0).toString(), equalTo("value"));
+ }
+
+ @Test
+ public void prefixedNamesFilteringTest() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("obj", "value");
+ map.put("obj_name", "value_name");
+ Map<String, Object> filterdMap = XContentMapValues.filter(map, new String[]{"obj_name"}, Strings.EMPTY_ARRAY);
+ assertThat(filterdMap.size(), equalTo(1));
+ assertThat((String) filterdMap.get("obj_name"), equalTo("value_name"));
+ }
+
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void nestedFilteringTest() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("field", "value");
+ map.put("array",
+ Arrays.asList(
+ 1,
+ new HashMap<String, Object>() {{
+ put("nested", 2);
+ put("nested_2", 3);
+ }}));
+ Map<String, Object> falteredMap = XContentMapValues.filter(map, new String[]{"array.nested"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+
+ // Selecting members of objects within arrays (ex. [ 1, { nested: "value"} ]) always returns all values in the array (1 in the ex)
+ // this is expected behavior as this types of objects are not supported in ES
+ assertThat((Integer) ((List) falteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) falteredMap.get("array")).get(1)).size(), equalTo(1));
+ assertThat((Integer) ((Map<String, Object>) ((List) falteredMap.get("array")).get(1)).get("nested"), equalTo(2));
+
+ falteredMap = XContentMapValues.filter(map, new String[]{"array.*"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+ assertThat((Integer) ((List) falteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) falteredMap.get("array")).get(1)).size(), equalTo(2));
+
+ map.clear();
+ map.put("field", "value");
+ map.put("obj",
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }});
+ falteredMap = XContentMapValues.filter(map, new String[]{"obj.field"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) falteredMap.get("obj")).size(), equalTo(1));
+ assertThat((String) ((Map<String, Object>) falteredMap.get("obj")).get("field"), equalTo("value"));
+
+ falteredMap = XContentMapValues.filter(map, new String[]{"obj.*"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) falteredMap.get("obj")).size(), equalTo(2));
+ assertThat((String) ((Map<String, Object>) falteredMap.get("obj")).get("field"), equalTo("value"));
+ assertThat((String) ((Map<String, Object>) falteredMap.get("obj")).get("field2"), equalTo("value2"));
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void completeObjectFilteringTest() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("field", "value");
+ map.put("obj",
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }});
+ map.put("array",
+ Arrays.asList(
+ 1,
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }}));
+
+ Map<String, Object> filteredMap = XContentMapValues.filter(map, new String[]{"obj"}, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(2));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).get("field").toString(), equalTo("value"));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).get("field2").toString(), equalTo("value2"));
+
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"obj"}, new String[]{"*.field2"});
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).get("field").toString(), equalTo("value"));
+
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"array"}, new String[]{});
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((List) filteredMap.get("array")).size(), equalTo(2));
+ assertThat((Integer) ((List) filteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) filteredMap.get("array")).get(1)).size(), equalTo(2));
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"array"}, new String[]{"*.field2"});
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((List) filteredMap.get("array")).size(), equalTo(2));
+ assertThat((Integer) ((List) filteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) filteredMap.get("array")).get(1)).size(), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) filteredMap.get("array")).get(1)).get("field").toString(), equalTo("value"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void filterIncludesUsingStarPrefix() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("field", "value");
+ map.put("obj",
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }});
+ map.put("n_obj",
+ new HashMap<String, Object>() {{
+ put("n_field", "value");
+ put("n_field2", "value2");
+ }});
+
+ Map<String, Object> filteredMap = XContentMapValues.filter(map, new String[]{"*.field2"}, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(filteredMap, hasKey("obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")), hasKey("field2"));
+
+ // only objects
+ filteredMap = XContentMapValues.filter(map, new String[]{"*.*"}, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(2));
+ assertThat(filteredMap, hasKey("obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(2));
+ assertThat(filteredMap, hasKey("n_obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("n_obj")).size(), equalTo(2));
+
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"*"}, new String[]{"*.*2"});
+ assertThat(filteredMap.size(), equalTo(3));
+ assertThat(filteredMap, hasKey("field"));
+ assertThat(filteredMap, hasKey("obj"));
+ assertThat(((Map) filteredMap.get("obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")), hasKey("field"));
+ assertThat(filteredMap, hasKey("n_obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("n_obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("n_obj")), hasKey("n_field"));
+
+ }
+
+ @Test
+ public void filterWithEmptyIncludesExcludes() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("field", "value");
+ Map<String, Object> filteredMap = XContentMapValues.filter(map, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(filteredMap.get("field").toString(), equalTo("value"));
+
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testThatFilterIncludesEmptyObjectWhenUsingIncludes() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj")
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"obj"}, Strings.EMPTY_ARRAY);
+
+ assertThat(mapTuple.v2(), equalTo(filteredSource));
+ }
+
+ @Test
+ public void testThatFilterIncludesEmptyObjectWhenUsingExcludes() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj")
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{"nonExistingField"});
+
+ assertThat(mapTuple.v2(), equalTo(filteredSource));
+ }
+
+ @Test
+ public void testNotOmittingObjectsWithExcludedProperties() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj")
+ .field("f1", "v1")
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{"obj.f1"});
+
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj"));
+ assertThat(((Map) filteredSource.get("obj")).size(), equalTo(0));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testNotOmittingObjectWithNestedExcludedObject() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj1")
+ .startObject("obj2")
+ .startObject("obj3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ // implicit include
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{"*.obj2"});
+
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map) filteredSource.get("obj1")).size(), Matchers.equalTo(0));
+
+ // explicit include
+ filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"obj1"}, new String[]{"*.obj2"});
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map) filteredSource.get("obj1")).size(), Matchers.equalTo(0));
+
+ // wild card include
+ filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"*.obj2"}, new String[]{"*.obj3"});
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map<String, Object>) filteredSource.get("obj1")), hasKey("obj2"));
+ assertThat(((Map) ((Map) filteredSource.get("obj1")).get("obj2")).size(), Matchers.equalTo(0));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testIncludingObjectWithNestedIncludedObject() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj1")
+ .startObject("obj2")
+ .endObject()
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"*.obj2"}, Strings.EMPTY_ARRAY);
+
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map) filteredSource.get("obj1")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredSource.get("obj1")), hasKey("obj2"));
+ assertThat(((Map) ((Map) filteredSource.get("obj1")).get("obj2")).size(), equalTo(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java b/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java
new file mode 100644
index 0000000..689107a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.consistencylevel;
+
+import org.elasticsearch.action.UnavailableShardsException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class WriteConsistencyLevelTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testWriteConsistencyLevelReplication2() throws Exception {
+ prepareCreate("test", 1, ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 2)).execute().actionGet();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(1).setWaitForYellowStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ // indexing, by default, will work (ONE consistency level)
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test")).setConsistencyLevel(WriteConsistencyLevel.ONE).execute().actionGet();
+ try {
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.QUORUM)
+ .setTimeout(timeValueMillis(100)).execute().actionGet();
+ fail("can't index, does not match consistency");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ allowNodes("test", 2);
+
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(2).setWaitForYellowStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ // this should work, since we now have
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.QUORUM)
+ .setTimeout(timeValueSeconds(1)).execute().actionGet();
+
+ try {
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.ALL)
+ .setTimeout(timeValueMillis(100)).execute().actionGet();
+ fail("can't index, does not match consistency");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ allowNodes("test", 3);
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ // this should work, since we now have
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.ALL)
+ .setTimeout(timeValueSeconds(1)).execute().actionGet();
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java b/src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java
new file mode 100644
index 0000000..ecb1cf7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java
@@ -0,0 +1,827 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.count.query;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.CommonTermsQueryBuilder.Operator;
+import org.elasticsearch.index.query.MatchQueryBuilder.Type;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleQueryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void passQueryAsStringTest() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ CountResponse countResponse = client().prepareCount().setSource(new BytesArray("{ \"query\" : { \"term\" : { \"field1\" : \"value1_1\" }}}").array()).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testIndexOptions() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,index_options=docs")
+ .setSettings("index.number_of_shards", 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox").setRefresh(true).get();
+
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.matchQuery("field2", "quick brown").type(Type.PHRASE).slop(0)).get();
+ assertHitCount(countResponse, 1l);
+ try {
+ client().prepareCount().setQuery(QueryBuilders.matchQuery("field1", "quick brown").type(Type.PHRASE).slop(0)).get();
+ } catch (SearchPhaseExecutionException e) {
+ assertTrue("wrong exception message " + e.getMessage(), e.getMessage().endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ }
+
+ @Test
+ public void testCommonTermsQuery() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,analyzer=whitespace")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick lazy huge brown pidgin", "field2", "the quick lazy huge brown fox jumps over the tree"),
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree") );
+
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.OR)).get();
+ assertHitCount(countResponse, 3l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.AND)).get();
+ assertHitCount(countResponse, 2l);
+
+ // Default
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the quick brown").cutoffFrequency(3)).get();
+ assertHitCount(countResponse, 3l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("3")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("4")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setSource(new BytesArray("{ \"query\" : { \"common\" : { \"field1\" : { \"query\" : \"the lazy fox brown\", \"cutoff_frequency\" : 1, \"minimum_should_match\" : { \"high_freq\" : 4 } } } } }").array()).get();
+ assertHitCount(countResponse, 1l);
+
+ // Default
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the lazy fox brown").cutoffFrequency(1)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the quick brown").cutoffFrequency(3).analyzer("standard")).get();
+ assertHitCount(countResponse, 3l);
+ // standard drops "the" since its a stopword
+
+ // try the same with match query
+ countResponse = client().prepareCount().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.OR)).get();
+ assertHitCount(countResponse, 3l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND).analyzer("stop")).get();
+ assertHitCount(countResponse, 3l);
+ // standard drops "the" since its a stopword
+
+ // try the same with multi match query
+ countResponse = client().prepareCount().setQuery(QueryBuilders.multiMatchQuery("the quick brown", "field1", "field2").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(countResponse, 3l);
+ }
+
+ @Test
+ public void queryStringAnalyzedWildcard() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("value*").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("*ue*").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("*ue_1").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("val*e_1").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("v?l*e?1").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testLowercaseExpandedTerms() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("VALUE_3~1").lowercaseExpandedTerms(true)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount().setQuery(queryString("VALUE_3~1").lowercaseExpandedTerms(false)).get();
+ assertHitCount(countResponse, 0l);
+ countResponse = client().prepareCount().setQuery(queryString("ValUE_*").lowercaseExpandedTerms(true)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount().setQuery(queryString("vAl*E_1")).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount().setQuery(queryString("[VALUE_1 TO VALUE_3]")).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount().setQuery(queryString("[VALUE_1 TO VALUE_3]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(countResponse, 0l);
+ }
+
+ @Test
+ public void testDateRangeInQueryString() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("past:[now-2M/d TO now/d]")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("future:[now/d TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ //D is an unsupported unit in date math
+ assertThat(countResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(countResponse.getFailedShards(), equalTo(1));
+ assertThat(countResponse.getShardFailures().length, equalTo(1));
+ assertThat(countResponse.getShardFailures()[0].reason(), allOf(containsString("Failed to parse"), containsString("unit [D] not supported for date math")));
+ }
+
+ @Test
+ public void typeFilterTypeIndexedTests() throws Exception {
+ typeFilterTests("not_analyzed");
+ }
+
+ @Test
+ public void typeFilterTypeNotIndexedTests() throws Exception {
+ typeFilterTests("no");
+ }
+
+ private void typeFilterTests(String index) throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject()));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "3").setSource("field1", "value1"));
+
+ assertHitCount(client().prepareCount().setQuery(filteredQuery(matchAllQuery(), typeFilter("type1"))).get(), 2l);
+ assertHitCount(client().prepareCount().setQuery(filteredQuery(matchAllQuery(), typeFilter("type2"))).get(), 3l);
+
+ assertHitCount(client().prepareCount().setTypes("type1").setQuery(matchAllQuery()).get(), 2l);
+ assertHitCount(client().prepareCount().setTypes("type2").setQuery(matchAllQuery()).get(), 3l);
+
+ assertHitCount(client().prepareCount().setTypes("type1", "type2").setQuery(matchAllQuery()).get(), 5l);
+ }
+
+ @Test
+ public void idsFilterTestsIdIndexed() throws Exception {
+ idsFilterTests("not_analyzed");
+ }
+
+ @Test
+ public void idsFilterTestsIdNotIndexed() throws Exception {
+ idsFilterTests("no");
+ }
+
+ private void idsFilterTests(String index) throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_id").field("index", index).endObject()
+ .endObject().endObject()));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3"));
+
+ CountResponse countResponse = client().prepareCount().setQuery(constantScoreQuery(idsFilter("type1").ids("1", "3"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // no type
+ countResponse = client().prepareCount().setQuery(constantScoreQuery(idsFilter().ids("1", "3"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(idsQuery("type1").ids("1", "3")).get();
+ assertHitCount(countResponse, 2l);
+
+ // no type
+ countResponse = client().prepareCount().setQuery(idsQuery().ids("1", "3")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(idsQuery("type1").ids("7", "10")).get();
+ assertHitCount(countResponse, 0l);
+
+ // repeat..., with terms
+ countResponse = client().prepareCount().setTypes("type1").setQuery(constantScoreQuery(termsFilter("_id", "1", "3"))).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testLimitFilter() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1_2"),
+ client().prepareIndex("test", "type1", "3").setSource("field2", "value2_3"),
+ client().prepareIndex("test", "type1", "4").setSource("field3", "value3_4"));
+
+ CountResponse countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), limitFilter(2))).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void filterExistsMissingTests() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x1", "x_1").field("field1", "value1_1").field("field2", "value2_1").endObject()),
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x2", "x_2").field("field1", "value1_2").endObject()),
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y1", "y_1").field("field2", "value2_3").endObject()),
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()));
+
+ CountResponse countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(constantScoreQuery(existsFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryString("_exists_:field1")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("field2"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("field3"))).get();
+ assertHitCount(countResponse, 1l);
+
+ // wildcard check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("x*"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // object check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("obj1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(constantScoreQuery(missingFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryString("_missing_:field1")).get();
+ assertHitCount(countResponse, 2l);
+
+ // wildcard check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingFilter("x*"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // object check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingFilter("obj1"))).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void passQueryAsJSONStringTest() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }");
+ assertHitCount(client().prepareCount().setQuery(wrapper).get(), 1l);
+
+ BoolQueryBuilder bool = boolQuery().must(wrapper).must(new TermQueryBuilder("field2", "value2_1"));
+ assertHitCount(client().prepareCount().setQuery(bool).get(), 1l);
+ }
+
+ @Test
+ public void testFiltersWithCustomCacheKey() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testMatchQueryNumeric() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("long", 1l, "double", 1.0d).get();
+ client().prepareIndex("test", "type1", "2").setSource("long", 2l, "double", 2.0d).get();
+ client().prepareIndex("test", "type1", "3").setSource("long", 3l, "double", 3.0d).get();
+ refresh();
+ CountResponse countResponse = client().prepareCount().setQuery(matchQuery("long", "1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(matchQuery("double", "2")).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testMultiMatchQuery() throws Exception {
+
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value4", "field3", "value3").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2", "field2", "value5", "field3", "value2").get();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3", "field2", "value6", "field3", "value1").get();
+ refresh();
+
+ MultiMatchQueryBuilder builder = QueryBuilders.multiMatchQuery("value1 value2 value4", "field1", "field2");
+ CountResponse countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 2l);
+
+ refresh();
+ builder = QueryBuilders.multiMatchQuery("value1", "field1", "field2")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 1l);
+
+ refresh();
+ builder = QueryBuilders.multiMatchQuery("value1", "field1", "field3^1.5")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 2l);
+
+ refresh();
+ builder = QueryBuilders.multiMatchQuery("value1").field("field1").field("field3", 1.5f)
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 2l);
+
+ // Test lenient
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value7", "field2", "value8", "field4", 5).get();
+ refresh();
+
+ builder = QueryBuilders.multiMatchQuery("value1", "field1", "field2", "field4");
+ builder.lenient(true);
+ countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testMatchQueryZeroTermsQuery() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get();
+ refresh();
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE));
+ CountResponse countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 1l);
+
+ boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testMultiMatchQueryZeroTermsQuery() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value3", "field2", "value4").get();
+ refresh();
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(multiMatchQuery("value1", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE)); // Fields are ORed together
+ CountResponse countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(multiMatchQuery("value4", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 1l);
+
+ boolQuery = boolQuery().must(multiMatchQuery("a", "field1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testMultiMatchQueryMinShouldMatch() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("field1", new String[]{"value1", "value2", "value3"}).get();
+ client().prepareIndex("test", "type1", "2").setSource("field2", "value1").get();
+ refresh();
+
+ MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2");
+
+ multiMatchQuery.useDisMax(true);
+ multiMatchQuery.minimumShouldMatch("70%");
+ CountResponse countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 1l);
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 2l);
+
+ multiMatchQuery.useDisMax(false);
+ multiMatchQuery.minimumShouldMatch("70%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 1l);
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 2l);
+
+ multiMatchQuery = multiMatchQuery("value1 value2 bar", "field1");
+ multiMatchQuery.minimumShouldMatch("100%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 0l);
+
+ multiMatchQuery.minimumShouldMatch("70%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testFuzzyQueryString() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("str:kimcy~1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:11~1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("date:2012-02-02~1d")).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testSpecialRangeSyntaxInQueryString() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("num:>19")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:>20")).get();
+ assertHitCount(countResponse, 0l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:>=20")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:>11")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:<20")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:<=20")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryString("+num:>11 +num:<20")).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testEmptyTermsFilter() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "terms", "type=string"));
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("term", "1").get();
+ client().prepareIndex("test", "type", "2").setSource("term", "2").get();
+ client().prepareIndex("test", "type", "3").setSource("term", "3").get();
+ client().prepareIndex("test", "type", "4").setSource("term", "4").get();
+ refresh();
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("term", new String[0]))).get();
+ assertHitCount(countResponse, 0l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), idsFilter())).get();
+ assertHitCount(countResponse, 0l);
+ }
+
+ @Test
+ public void testTermsLookupFilter() throws Exception {
+ assertAcked(prepareCreate("lookup").addMapping("type", "terms", "type=string", "other", "type=string"));
+ assertAcked(prepareCreate("lookup2").addMapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("arr").startObject("properties").startObject("term").field("type", "string")
+ .endObject().endObject().endObject().endObject().endObject().endObject()));
+ assertAcked(prepareCreate("test").addMapping("type", "term", "type=string"));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("lookup", "type", "1").setSource("terms", new String[]{"1", "3"}),
+ client().prepareIndex("lookup", "type", "2").setSource("terms", new String[]{"2"}),
+ client().prepareIndex("lookup", "type", "3").setSource("terms", new String[]{"2", "4"}),
+ client().prepareIndex("lookup", "type", "4").setSource("other", "value"),
+ client().prepareIndex("lookup2", "type", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "1").endObject()
+ .startObject().field("term", "3").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "2").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "3").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .startObject().field("term", "4").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("test", "type", "1").setSource("term", "1"),
+ client().prepareIndex("test", "type", "2").setSource("term", "2"),
+ client().prepareIndex("test", "type", "3").setSource("term", "3"),
+ client().prepareIndex("test", "type", "4").setSource("term", "4"));
+
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // same as above, just on the _id...
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("_id").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // another search with same parameters...
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("2").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("3").lookupPath("terms"))
+ ).get();
+ assertNoFailures(countResponse);
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("4").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 0l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("1").lookupPath("arr.term"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("2").lookupPath("arr.term"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("not_exists").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))).get();
+ assertHitCount(countResponse, 0l);
+ }
+
+ @Test
+ public void testBasicFilterById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter("type1", "type2").ids("1", "2"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter().ids("1", "2"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter("type1").ids("1", "2"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter().ids("1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter(null).ids("1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter("type1", "type2", "type3").ids("1", "2", "3", "4"))).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testBasicQueryById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery("type1", "type2").ids("1", "2")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery().ids("1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery().ids("1", "2")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery("type1").ids("1", "2")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery().ids("1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery(null).ids("1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery("type1", "type2", "type3").ids("1", "2", "3", "4")).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testNumericTermsAndRanges() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1",
+ "num_byte", "type=byte", "num_short", "type=short",
+ "num_integer", "type=integer", "num_long", "type=long",
+ "num_float", "type=float", "num_double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("num_byte", 1, "num_short", 1, "num_integer", 1,
+ "num_long", 1, "num_float", 1, "num_double", 1).get();
+
+ client().prepareIndex("test", "type1", "2").setSource("num_byte", 2, "num_short", 2, "num_integer", 2,
+ "num_long", 2, "num_float", 2, "num_double", 2).get();
+
+ client().prepareIndex("test", "type1", "17").setSource("num_byte", 17, "num_short", 17, "num_integer", 17,
+ "num_long", 17, "num_float", 17, "num_double", 17).get();
+ refresh();
+
+ CountResponse countResponse;
+ logger.info("--> term query on 1");
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_byte", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_short", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_integer", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_long", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_float", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_double", 1)).get();
+ assertHitCount(countResponse, 1l);
+
+ logger.info("--> terms query on 1");
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_byte", new int[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_short", new int[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_integer", new int[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_long", new int[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_float", new double[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_double", new double[]{1})).get();
+ assertHitCount(countResponse, 1l);
+
+ logger.info("--> term filter on 1");
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_byte", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_short", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_integer", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_long", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_float", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_double", 1))).get();
+ assertHitCount(countResponse, 1l);
+
+ logger.info("--> terms filter on 1");
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_byte", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_short", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_integer", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_long", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_float", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_double", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test // see #2994
+ public void testSimpleSpan() throws ElasticsearchException, IOException {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar").get();
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything").get();
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other").get();
+ client().prepareIndex("test", "test", "4").setSource("description", "foo").get();
+ refresh();
+
+ CountResponse response = client().prepareCount("test")
+ .setQuery(QueryBuilders.spanOrQuery().clause(QueryBuilders.spanTermQuery("description", "bar"))).get();
+ assertHitCount(response, 1l);
+ response = client().prepareCount("test")
+ .setQuery(QueryBuilders.spanOrQuery().clause(QueryBuilders.spanTermQuery("test.description", "bar"))).get();
+ assertHitCount(response, 1l);
+
+ response = client().prepareCount("test").setQuery(
+ QueryBuilders.spanNearQuery()
+ .clause(QueryBuilders.spanTermQuery("description", "foo"))
+ .clause(QueryBuilders.spanTermQuery("test.description", "other"))
+ .slop(3)).get();
+ assertHitCount(response, 3l);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java b/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java
new file mode 100644
index 0000000..0c3f18b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.count.simple;
+
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+public class SimpleCountTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testCountRandomPreference() throws InterruptedException, ExecutionException {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1, 3))).get();
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", "value"),
+ client().prepareIndex("test", "type", "2").setSource("field", "value"),
+ client().prepareIndex("test", "type", "3").setSource("field", "value"),
+ client().prepareIndex("test", "type", "4").setSource("field", "value"),
+ client().prepareIndex("test", "type", "5").setSource("field", "value"),
+ client().prepareIndex("test", "type", "6").setSource("field", "value"));
+
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ // id is not indexed, but lets see that we automatically convert to
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).setPreference(randomUnicodeOfLengthBetween(0, 4)).get();
+ assertHitCount(countResponse, 6l);
+ }
+ }
+
+ @Test
+ public void simpleIpTests() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("from").field("type", "ip").endObject()
+ .startObject("to").field("type", "ip").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefresh(true).execute().actionGet();
+
+ CountResponse countResponse = client().prepareCount()
+ .setQuery(boolQuery().must(rangeQuery("from").lt("192.168.0.7")).must(rangeQuery("to").gt("192.168.0.7")))
+ .execute().actionGet();
+
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void simpleIdTests() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ client().prepareIndex("test", "type", "XXX1").setSource("field", "value").setRefresh(true).execute().actionGet();
+ // id is not indexed, but lets see that we automatically convert to
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.queryString("_id:XXX1")).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+
+ // id is not index, but we can automatically support prefix as well
+ countResponse = client().prepareCount().setQuery(QueryBuilders.prefixQuery("_id", "XXX")).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.queryString("_id:XXX*").lowercaseExpandedTerms(false)).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void simpleDateMathTests() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field", "2010-01-05T02:00").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "2010-01-06T02:00").execute().actionGet();
+ ensureGreen();
+ refresh();
+ CountResponse countResponse = client().prepareCount("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d")).execute().actionGet();
+ assertNoFailures(countResponse);
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test").setQuery(QueryBuilders.queryString("field:[2010-01-03||+2d TO 2010-01-04||+2d]")).execute().actionGet();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void localDependentDateTests() throws Exception {
+ prepareCreate("test")
+ .addMapping("type1",
+ jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("date_field")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "de")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", "" + i).setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800").execute().actionGet();
+ client().prepareIndex("test", "type1", "" + (10 + i)).setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800").execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(countResponse, 10l);
+
+
+ countResponse = client().prepareCount("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(countResponse, 20l);
+
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java b/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java
new file mode 100644
index 0000000..ef394bb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deleteByQuery;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+public class DeleteByQueryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testDeleteAllNoIndices() {
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery();
+ deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery());
+ deleteByQueryRequestBuilder.setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false));
+ DeleteByQueryResponse actionGet = deleteByQueryRequestBuilder.execute().actionGet();
+ assertThat(actionGet.getIndices().size(), equalTo(0));
+ }
+
+ @Test
+ public void testDeleteAllOneIndex() {
+
+ String json = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out Elastic Search\"" + "}";
+
+ client().prepareIndex("twitter", "tweet").setSource(json).setRefresh(true).execute().actionGet();
+
+ SearchResponse search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(search.getHits().totalHits(), equalTo(1l));
+ DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery();
+ deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery());
+
+ DeleteByQueryResponse actionGet = deleteByQueryRequestBuilder.execute().actionGet();
+ assertThat(actionGet.status(), equalTo(RestStatus.OK));
+ assertThat(actionGet.getIndex("twitter"), notNullValue());
+ assertThat(actionGet.getIndex("twitter").getFailedShards(), equalTo(0));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(search.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testMissing() {
+
+ String json = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out Elastic Search\"" + "}";
+
+ client().prepareIndex("twitter", "tweet").setSource(json).setRefresh(true).execute().actionGet();
+
+ SearchResponse search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(search.getHits().totalHits(), equalTo(1l));
+ DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery();
+ deleteByQueryRequestBuilder.setIndices("twitter", "missing");
+ deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery());
+
+ try {
+ deleteByQueryRequestBuilder.execute().actionGet();
+ fail("Exception should have been thrown.");
+ } catch (IndexMissingException e) {
+ //everything well
+ }
+
+ deleteByQueryRequestBuilder.setIndicesOptions(IndicesOptions.lenient());
+ DeleteByQueryResponse actionGet = deleteByQueryRequestBuilder.execute().actionGet();
+ assertThat(actionGet.status(), equalTo(RestStatus.OK));
+ assertThat(actionGet.getIndex("twitter").getFailedShards(), equalTo(0));
+ assertThat(actionGet.getIndex("twitter"), notNullValue());
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(search.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testFailure() throws Exception {
+ client().admin().indices().prepareCreate("twitter").execute().actionGet();
+
+ DeleteByQueryResponse response = client().prepareDeleteByQuery("twitter")
+ .setQuery(QueryBuilders.hasChildQuery("type", QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+
+ assertThat(response.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(response.getIndex("twitter").getSuccessfulShards(), equalTo(0));
+ assertThat(response.getIndex("twitter").getFailedShards(), equalTo(5));
+ assertThat(response.getIndices().size(), equalTo(1));
+ assertThat(response.getIndices().get("twitter").getFailedShards(), equalTo(5));
+ assertThat(response.getIndices().get("twitter").getFailures().length, equalTo(5));
+ for (ShardOperationFailedException failure : response.getIndices().get("twitter").getFailures()) {
+ assertThat(failure.reason(), containsString("[twitter] [has_child] No mapping for for type [type]"));
+ assertThat(failure.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(failure.shardId(), greaterThan(-1));
+ }
+ }
+
+ @Test
+ public void testDeleteByFieldQuery() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ int numDocs = atLeast(10);
+ for (int i = 0; i < numDocs; i++) {
+ client().prepareIndex("test", "test", Integer.toString(i))
+ .setRouting(randomAsciiOfLengthBetween(1, 5))
+ .setSource("foo", "bar").get();
+ }
+ refresh();
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchQuery("_id", Integer.toString(between(0, numDocs - 1)))).get(), 1);
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs);
+ client().prepareDeleteByQuery("test")
+ .setQuery(QueryBuilders.matchQuery("_id", Integer.toString(between(0, numDocs - 1))))
+ .execute().actionGet();
+ refresh();
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs - 1);
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java b/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java
new file mode 100644
index 0000000..2ba71a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.jackson;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class JacksonLocationTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLocationExtraction() throws IOException {
+ // {
+ // "index" : "test",
+ // "source" : {
+ // value : "something"
+ // }
+ // }
+ BytesStreamOutput os = new BytesStreamOutput();
+ JsonGenerator gen = new JsonFactory().createGenerator(os);
+ gen.writeStartObject();
+
+ gen.writeStringField("index", "test");
+
+ gen.writeFieldName("source");
+ gen.writeStartObject();
+ gen.writeStringField("value", "something");
+ gen.writeEndObject();
+
+ gen.writeEndObject();
+
+ gen.close();
+
+ byte[] data = os.bytes().toBytes();
+ JsonParser parser = new JsonFactory().createParser(data);
+
+ assertThat(parser.nextToken(), equalTo(JsonToken.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "index"
+ assertThat(parser.nextToken(), equalTo(JsonToken.VALUE_STRING));
+ assertThat(parser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "source"
+// JsonLocation location1 = parser.getCurrentLocation();
+// parser.skipChildren();
+// JsonLocation location2 = parser.getCurrentLocation();
+//
+// byte[] sourceData = new byte[(int) (location2.getByteOffset() - location1.getByteOffset())];
+// System.arraycopy(data, (int) location1.getByteOffset(), sourceData, 0, sourceData.length);
+//
+// JsonParser sourceParser = new JsonFactory().createJsonParser(new FastByteArrayInputStream(sourceData));
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.START_OBJECT));
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "value"
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.VALUE_STRING));
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.END_OBJECT));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java b/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java
new file mode 100644
index 0000000..c306b55
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.joda;
+
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.DateTimeZone;
+import org.joda.time.MutableDateTime;
+import org.joda.time.format.*;
+import org.junit.Test;
+
+import java.util.Date;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleJodaTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMultiParsers() {
+ DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder();
+ DateTimeParser[] parsers = new DateTimeParser[3];
+ parsers[0] = DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getParser();
+ parsers[1] = DateTimeFormat.forPattern("MM-dd-yyyy").withZone(DateTimeZone.UTC).getParser();
+ parsers[2] = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss").withZone(DateTimeZone.UTC).getParser();
+ builder.append(DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getPrinter(), parsers);
+
+ DateTimeFormatter formatter = builder.toFormatter();
+
+ formatter.parseMillis("2009-11-15 14:12:12");
+ }
+
+ @Test
+ public void testIsoDateFormatDateTimeNoMillisUTC() {
+ DateTimeFormatter formatter = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC);
+ long millis = formatter.parseMillis("1970-01-01T00:00:00Z");
+
+ assertThat(millis, equalTo(0l));
+ }
+
+ @Test
+ public void testUpperBound() {
+ MutableDateTime dateTime = new MutableDateTime(3000, 12, 31, 23, 59, 59, 999, DateTimeZone.UTC);
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+
+ String value = "2000-01-01";
+ int i = formatter.parseInto(dateTime, value, 0);
+ assertThat(i, equalTo(value.length()));
+ assertThat(dateTime.toString(), equalTo("2000-01-01T23:59:59.999Z"));
+ }
+
+ @Test
+ public void testIsoDateFormatDateOptionalTimeUTC() {
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+ long millis = formatter.parseMillis("1970-01-01T00:00:00Z");
+ assertThat(millis, equalTo(0l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00.001Z");
+ assertThat(millis, equalTo(1l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00.1Z");
+ assertThat(millis, equalTo(100l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00.1");
+ assertThat(millis, equalTo(100l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00");
+ assertThat(millis, equalTo(0l));
+ millis = formatter.parseMillis("1970-01-01");
+ assertThat(millis, equalTo(0l));
+
+ millis = formatter.parseMillis("1970");
+ assertThat(millis, equalTo(0l));
+
+ try {
+ formatter.parseMillis("1970 kuku");
+ fail("formatting should fail");
+ } catch (IllegalArgumentException e) {
+ // all is well
+ }
+
+ // test offset in format
+ millis = formatter.parseMillis("1970-01-01T00:00:00-02:00");
+ assertThat(millis, equalTo(TimeValue.timeValueHours(2).millis()));
+ }
+
+ @Test
+ public void testIsoVsCustom() {
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+ long millis = formatter.parseMillis("1970-01-01T00:00:00");
+ assertThat(millis, equalTo(0l));
+
+ formatter = DateTimeFormat.forPattern("yyyy/MM/dd HH:mm:ss").withZone(DateTimeZone.UTC);
+ millis = formatter.parseMillis("1970/01/01 00:00:00");
+ assertThat(millis, equalTo(0l));
+
+ FormatDateTimeFormatter formatter2 = Joda.forPattern("yyyy/MM/dd HH:mm:ss");
+ millis = formatter2.parser().parseMillis("1970/01/01 00:00:00");
+ assertThat(millis, equalTo(0l));
+ }
+
+ @Test
+ public void testWriteAndParse() {
+ DateTimeFormatter dateTimeWriter = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+ Date date = new Date();
+ assertThat(formatter.parseMillis(dateTimeWriter.print(date.getTime())), equalTo(date.getTime()));
+ }
+
+ @Test
+ public void testSlashInFormat() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("MM/yyyy");
+ formatter.parser().parseMillis("01/2001");
+
+ formatter = Joda.forPattern("yyyy/MM/dd HH:mm:ss");
+ long millis = formatter.parser().parseMillis("1970/01/01 00:00:00");
+ formatter.printer().print(millis);
+
+ try {
+ millis = formatter.parser().parseMillis("1970/01/01");
+ fail();
+ } catch (IllegalArgumentException e) {
+ // it really can't parse this one
+ }
+ }
+
+ @Test
+ public void testMultipleFormats() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd");
+ long millis = formatter.parser().parseMillis("1970/01/01 00:00:00");
+ assertThat("1970/01/01 00:00:00", is(formatter.printer().print(millis)));
+ }
+
+ @Test
+ public void testMultipleDifferentFormats() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd");
+ String input = "1970/01/01 00:00:00";
+ long millis = formatter.parser().parseMillis(input);
+ assertThat(input, is(formatter.printer().print(millis)));
+
+ Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||dateOptionalTime");
+ Joda.forPattern("dateOptionalTime||yyyy/MM/dd HH:mm:ss||yyyy/MM/dd");
+ Joda.forPattern("yyyy/MM/dd HH:mm:ss||dateOptionalTime||yyyy/MM/dd");
+ Joda.forPattern("date_time||date_time_no_millis");
+ Joda.forPattern(" date_time || date_time_no_millis");
+ }
+
+ @Test
+ public void testInvalidPatterns() {
+ expectInvalidPattern("does_not_exist_pattern", "Invalid format: [does_not_exist_pattern]: Illegal pattern component: o");
+ expectInvalidPattern("OOOOO", "Invalid format: [OOOOO]: Illegal pattern component: OOOOO");
+ expectInvalidPattern(null, "No date pattern provided");
+ expectInvalidPattern("", "No date pattern provided");
+ expectInvalidPattern(" ", "No date pattern provided");
+ expectInvalidPattern("||date_time_no_millis", "No date pattern provided");
+ expectInvalidPattern("date_time_no_millis||", "No date pattern provided");
+ }
+
+ private void expectInvalidPattern(String pattern, String errorMessage) {
+ try {
+ Joda.forPattern(pattern);
+ fail("Pattern " + pattern + " should have thrown an exception but did not");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString(errorMessage));
+ }
+ }
+
+ @Test
+ public void testRounding() {
+ long TIME = utcTimeInMillis("2009-02-03T01:01:01");
+ MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
+ time.setMillis(TIME);
+ assertThat(time.monthOfYear().roundFloor().toString(), equalTo("2009-02-01T00:00:00.000Z"));
+ time.setMillis(TIME);
+ assertThat(time.hourOfDay().roundFloor().toString(), equalTo("2009-02-03T01:00:00.000Z"));
+ time.setMillis(TIME);
+ assertThat(time.dayOfMonth().roundFloor().toString(), equalTo("2009-02-03T00:00:00.000Z"));
+ }
+
+ @Test
+ public void testRoundingSetOnTime() {
+ MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
+ time.setRounding(time.getChronology().monthOfYear(), MutableDateTime.ROUND_FLOOR);
+ time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-01T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-01T00:00:00.000Z")));
+
+ time.setMillis(utcTimeInMillis("2009-05-03T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-05-01T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-05-01T00:00:00.000Z")));
+
+ time = new MutableDateTime(DateTimeZone.UTC);
+ time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);
+ time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-03T00:00:00.000Z")));
+
+ time.setMillis(utcTimeInMillis("2009-02-02T23:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-02T00:00:00.000Z")));
+
+ time = new MutableDateTime(DateTimeZone.UTC);
+ time.setRounding(time.getChronology().weekOfWeekyear(), MutableDateTime.ROUND_FLOOR);
+ time.setMillis(utcTimeInMillis("2011-05-05T01:01:01"));
+ assertThat(time.toString(), equalTo("2011-05-02T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2011-05-02T00:00:00.000Z")));
+ }
+
+ @Test
+ public void testRoundingWithTimeZone() {
+ MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
+ time.setZone(DateTimeZone.forOffsetHours(-2));
+ time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);
+
+ MutableDateTime utcTime = new MutableDateTime(DateTimeZone.UTC);
+ utcTime.setRounding(utcTime.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);
+
+ time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+ utcTime.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+
+ assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000-02:00"));
+ assertThat(utcTime.toString(), equalTo("2009-02-03T00:00:00.000Z"));
+ // the time is on the 2nd, and utcTime is on the 3rd, but, because time already encapsulates
+ // time zone, the millis diff is not 24, but 22 hours
+ assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis()));
+
+ time.setMillis(utcTimeInMillis("2009-02-04T01:01:01"));
+ utcTime.setMillis(utcTimeInMillis("2009-02-04T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000-02:00"));
+ assertThat(utcTime.toString(), equalTo("2009-02-04T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis()));
+ }
+
+ private long utcTimeInMillis(String time) {
+ return ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC).parseMillis(time);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java b/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java
new file mode 100644
index 0000000..e8fa166
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.lucene;
+
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleLuceneTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSortValues() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ for (int i = 0; i < 10; i++) {
+ Document document = new Document();
+ document.add(new TextField("str", new String(new char[]{(char) (97 + i), (char) (97 + i)}), Field.Store.YES));
+ indexWriter.addDocument(document);
+ }
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, 10, new Sort(new SortField("str", SortField.Type.STRING)));
+ for (int i = 0; i < 10; i++) {
+ FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i];
+ assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[]{(char) (97 + i), (char) (97 + i)}))));
+ }
+ }
+
+ @Test
+ public void testAddDocAfterPrepareCommit() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ indexWriter.addDocument(document);
+ DirectoryReader reader = DirectoryReader.open(indexWriter, true);
+ assertThat(reader.numDocs(), equalTo(1));
+
+ indexWriter.prepareCommit();
+ // Returns null b/c no changes.
+ assertThat(DirectoryReader.openIfChanged(reader), equalTo(null));
+
+ document = new Document();
+ document.add(new TextField("_id", "2", Field.Store.YES));
+ indexWriter.addDocument(document);
+ indexWriter.commit();
+ reader = DirectoryReader.openIfChanged(reader);
+ assertThat(reader.numDocs(), equalTo(2));
+ }
+
+ @Test
+ public void testSimpleNumericOps() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new IntField("test", 2, IntField.TYPE_STORED));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+ Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
+ IndexableField f = doc.getField("test");
+ assertThat(f.stringValue(), equalTo("2"));
+
+ BytesRef bytes = new BytesRef();
+ NumericUtils.intToPrefixCoded(2, 0, bytes);
+ topDocs = searcher.search(new TermQuery(new Term("test", bytes)), 1);
+ doc = searcher.doc(topDocs.scoreDocs[0].doc);
+ f = doc.getField("test");
+ assertThat(f.stringValue(), equalTo("2"));
+
+ indexWriter.close();
+ }
+
+ /**
+ * Here, we verify that the order that we add fields to a document counts, and not the lexi order
+ * of the field. This means that heavily accessed fields that use field selector should be added
+ * first (with load and break).
+ */
+ @Test
+ public void testOrdering() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new TextField("#id", "1", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+ final ArrayList<String> fieldsOrder = new ArrayList<String>();
+ searcher.doc(topDocs.scoreDocs[0].doc, new StoredFieldVisitor() {
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ fieldsOrder.add(fieldInfo.name);
+ return Status.YES;
+ }
+ });
+
+ assertThat(fieldsOrder.size(), equalTo(2));
+ assertThat(fieldsOrder.get(0), equalTo("_id"));
+ assertThat(fieldsOrder.get(1), equalTo("#id"));
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ for (int i = 0; i < 100; i++) {
+ // TODO (just setting the boost value does not seem to work...)
+ StringBuilder value = new StringBuilder().append("value");
+ for (int j = 0; j < i; j++) {
+ value.append(" ").append("value");
+ }
+ Document document = new Document();
+ TextField textField = new TextField("_id", Integer.toString(i), Field.Store.YES);
+ textField.setBoost(i);
+ document.add(textField);
+ textField = new TextField("value", value.toString(), Field.Store.YES);
+ textField.setBoost(i);
+ document.add(textField);
+ indexWriter.addDocument(document);
+ }
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TermQuery query = new TermQuery(new Term("value", "value"));
+ TopDocs topDocs = searcher.search(query, 100);
+ assertThat(100, equalTo(topDocs.totalHits));
+ for (int i = 0; i < topDocs.scoreDocs.length; i++) {
+ Document doc = searcher.doc(topDocs.scoreDocs[i].doc);
+// System.out.println(doc.get("id") + ": " + searcher.explain(query, topDocs.scoreDocs[i].doc));
+ assertThat(doc.get("_id"), equalTo(Integer.toString(100 - i - 1)));
+ }
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testNRTSearchOnClosedWriter() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ DirectoryReader reader = DirectoryReader.open(indexWriter, true);
+
+ for (int i = 0; i < 100; i++) {
+ Document document = new Document();
+ TextField field = new TextField("_id", Integer.toString(i), Field.Store.YES);
+ field.setBoost(i);
+ document.add(field);
+ indexWriter.addDocument(document);
+ }
+ reader = refreshReader(reader);
+
+ indexWriter.close();
+
+ TermsEnum termDocs = SlowCompositeReaderWrapper.wrap(reader).terms("_id").iterator(null);
+ termDocs.next();
+ }
+
+ /**
+ * A test just to verify that term freqs are not stored for numeric fields. <tt>int1</tt> is not storing termFreq
+ * and <tt>int2</tt> does.
+ */
+ @Test
+ public void testNumericTermDocsFreqs() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ FieldType type = IntField.TYPE_NOT_STORED;
+ IntField field = new IntField("int1", 1, type);
+ doc.add(field);
+
+ type = new FieldType(IntField.TYPE_NOT_STORED);
+ type.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS);
+ type.freeze();
+
+ field = new IntField("int1", 1, type);
+ doc.add(field);
+
+ field = new IntField("int2", 1, type);
+ doc.add(field);
+
+ field = new IntField("int2", 1, type);
+ doc.add(field);
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ AtomicReader atomicReader = SlowCompositeReaderWrapper.wrap(reader);
+
+ Terms terms = atomicReader.terms("int1");
+ TermsEnum termsEnum = terms.iterator(null);
+ termsEnum.next();
+
+ DocsEnum termDocs = termsEnum.docs(atomicReader.getLiveDocs(), null);
+ assertThat(termDocs.nextDoc(), equalTo(0));
+ assertThat(termDocs.docID(), equalTo(0));
+ assertThat(termDocs.freq(), equalTo(1));
+
+ terms = atomicReader.terms("int2");
+ termsEnum = terms.iterator(termsEnum);
+ termsEnum.next();
+ termDocs = termsEnum.docs(atomicReader.getLiveDocs(), termDocs);
+ assertThat(termDocs.nextDoc(), equalTo(0));
+ assertThat(termDocs.docID(), equalTo(0));
+ assertThat(termDocs.freq(), equalTo(2));
+
+ reader.close();
+ indexWriter.close();
+ }
+
+ private DirectoryReader refreshReader(DirectoryReader reader) throws IOException {
+ DirectoryReader oldReader = reader;
+ reader = DirectoryReader.openIfChanged(reader);
+ if (reader != oldReader) {
+ oldReader.close();
+ }
+ return reader;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java
new file mode 100644
index 0000000..1d5e7cc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.lucene;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.vectorhighlight.CustomFieldQuery;
+import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class VectorHighlighterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testVectorHighlighter() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, notNullValue());
+ assertThat(fragment, equalTo("the big <b>bad</b> dog"));
+ }
+
+ @Test
+ public void testVectorHighlighterPrefixQuery() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+
+ PrefixQuery prefixQuery = new PrefixQuery(new Term("content", "ba"));
+ assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT.getClass().getName()));
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(prefixQuery),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, nullValue());
+
+ prefixQuery.setRewriteMethod(PrefixQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+ Query rewriteQuery = prefixQuery.rewrite(reader);
+ fragment = highlighter.getBestFragment(highlighter.getFieldQuery(rewriteQuery),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, notNullValue());
+
+ // now check with the custom field query
+ prefixQuery = new PrefixQuery(new Term("content", "ba"));
+ assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT.getClass().getName()));
+ fragment = highlighter.getBestFragment(new CustomFieldQuery(prefixQuery, reader, highlighter),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, notNullValue());
+ }
+
+ @Test
+ public void testVectorHighlighterNoStore() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, nullValue());
+ }
+
+ @Test
+ public void testVectorHighlighterNoTermVector() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, nullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/discovery/DiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/DiscoveryTests.java
new file mode 100644
index 0000000..6a3fc26
--- /dev/null
+++ b/src/test/java/org/elasticsearch/discovery/DiscoveryTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope=Scope.SUITE, numNodes=2)
+public class DiscoveryTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder().put("discovery.zen.ping.multicast.enabled", false)
+ .put("discovery.zen.ping.unicast.hosts", "localhost").put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "Proposed fix: Each node maintains a list of endpoints that have pinged it " +
+ "(UnicastZenPing#temporalResponses), a node will remove entries that are old. We can use this list to extend " +
+ "'discovery.zen.ping.unicast.hosts' list of nodes to ping. If we do this then in the test both nodes will ping each " +
+ "other, like in solution 1. The upside compared to solution 1, is that it won't go and ping 100 endpoints (based on the default port range), " +
+ "just other nodes that have pinged it in addition to the already configured nodes in the 'discovery.zen.ping.unicast.hosts' list.")
+ public void testUnicastDiscovery() {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java b/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java
new file mode 100644
index 0000000..ad98754
--- /dev/null
+++ b/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping.multicast;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.local.LocalTransport;
+import org.junit.Test;
+
+import java.net.DatagramPacket;
+import java.net.InetAddress;
+import java.net.MulticastSocket;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class MulticastZenPingTests extends ElasticsearchTestCase {
+
+ private Settings buildRandomMulticast(Settings settings) {
+ ImmutableSettings.Builder builder = ImmutableSettings.builder().put(settings);
+ builder.put("discovery.zen.ping.multicast.group", "224.2.3." + randomIntBetween(0, 255));
+ builder.put("discovery.zen.ping.multicast.port", randomIntBetween(55000, 56000));
+ return builder.build();
+ }
+
+ @Test
+ public void testSimplePings() {
+ Settings settings = ImmutableSettings.EMPTY;
+ settings = buildRandomMulticast(settings);
+
+ ThreadPool threadPool = new ThreadPool();
+ ClusterName clusterName = new ClusterName("test");
+ final TransportService transportServiceA = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT), threadPool).start();
+ final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ final TransportService transportServiceB = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT), threadPool).start();
+ final DiscoveryNode nodeB = new DiscoveryNode("B", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ MulticastZenPing zenPingA = new MulticastZenPing(threadPool, transportServiceA, clusterName, Version.CURRENT);
+ zenPingA.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeA).localNodeId("A").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingA.start();
+
+ MulticastZenPing zenPingB = new MulticastZenPing(threadPool, transportServiceB, clusterName, Version.CURRENT);
+ zenPingB.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeB).localNodeId("B").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingB.start();
+
+ try {
+ ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
+ assertThat(pingResponses.length, equalTo(1));
+ assertThat(pingResponses[0].target().id(), equalTo("B"));
+ } finally {
+ zenPingA.close();
+ zenPingB.close();
+ transportServiceA.close();
+ transportServiceB.close();
+ threadPool.shutdown();
+ }
+ }
+
+ @Test
+ public void testExternalPing() throws Exception {
+ Settings settings = ImmutableSettings.EMPTY;
+ settings = buildRandomMulticast(settings);
+
+ ThreadPool threadPool = new ThreadPool();
+ ClusterName clusterName = new ClusterName("test");
+ final TransportService transportServiceA = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT), threadPool).start();
+ final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ MulticastZenPing zenPingA = new MulticastZenPing(threadPool, transportServiceA, clusterName, Version.CURRENT);
+ zenPingA.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeA).localNodeId("A").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingA.start();
+
+ MulticastSocket multicastSocket = null;
+ try {
+ Loggers.getLogger(MulticastZenPing.class).setLevel("TRACE");
+ multicastSocket = new MulticastSocket(54328);
+ multicastSocket.setReceiveBufferSize(2048);
+ multicastSocket.setSendBufferSize(2048);
+ multicastSocket.setSoTimeout(60000);
+
+ DatagramPacket datagramPacket = new DatagramPacket(new byte[2048], 2048, InetAddress.getByName("224.2.2.4"), 54328);
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("request").field("cluster_name", "test").endObject().endObject();
+ datagramPacket.setData(builder.bytes().toBytes());
+ multicastSocket.send(datagramPacket);
+ Thread.sleep(100);
+ } finally {
+ Loggers.getLogger(MulticastZenPing.class).setLevel("INFO");
+ if (multicastSocket != null) multicastSocket.close();
+ zenPingA.close();
+ threadPool.shutdown();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java b/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java
new file mode 100644
index 0000000..7a553e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping.unicast;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.netty.NettyTransport;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class UnicastZenPingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimplePings() {
+ Settings settings = ImmutableSettings.EMPTY;
+ int startPort = 11000 + randomIntBetween(0, 1000);
+ int endPort = startPort + 10;
+ settings = ImmutableSettings.builder().put(settings).put("transport.tcp.port", startPort + "-" + endPort).build();
+
+ ThreadPool threadPool = new ThreadPool();
+ ClusterName clusterName = new ClusterName("test");
+ NetworkService networkService = new NetworkService(settings);
+
+ NettyTransport transportA = new NettyTransport(settings, threadPool, networkService, Version.CURRENT);
+ final TransportService transportServiceA = new TransportService(transportA, threadPool).start();
+ final DiscoveryNode nodeA = new DiscoveryNode("UZP_A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ InetSocketTransportAddress addressA = (InetSocketTransportAddress) transportA.boundAddress().publishAddress();
+
+ NettyTransport transportB = new NettyTransport(settings, threadPool, networkService, Version.CURRENT);
+ final TransportService transportServiceB = new TransportService(transportB, threadPool).start();
+ final DiscoveryNode nodeB = new DiscoveryNode("UZP_B", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ InetSocketTransportAddress addressB = (InetSocketTransportAddress) transportB.boundAddress().publishAddress();
+
+ Settings hostsSettings = ImmutableSettings.settingsBuilder().putArray("discovery.zen.ping.unicast.hosts",
+ addressA.address().getAddress().getHostAddress() + ":" + addressA.address().getPort(),
+ addressB.address().getAddress().getHostAddress() + ":" + addressB.address().getPort())
+ .build();
+
+ UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, transportServiceA, clusterName, Version.CURRENT, null);
+ zenPingA.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeA).localNodeId("UZP_A").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingA.start();
+
+ UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, transportServiceB, clusterName, Version.CURRENT, null);
+ zenPingB.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeB).localNodeId("UZP_B").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingB.start();
+
+ try {
+ ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
+ assertThat(pingResponses.length, equalTo(1));
+ assertThat(pingResponses[0].target().id(), equalTo("UZP_B"));
+ } finally {
+ zenPingA.close();
+ zenPingB.close();
+ transportServiceA.close();
+ transportServiceB.close();
+ threadPool.shutdown();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java b/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java
new file mode 100644
index 0000000..727649e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ *
+ */
+public class AliasedIndexDocumentActionsTests extends DocumentActionsTests {
+
+ protected void createIndex() {
+ logger.info("Creating index [test1] with alias [test]");
+ try {
+ client().admin().indices().prepareDelete("test1").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ logger.info("--> creating index test");
+ client().admin().indices().create(createIndexRequest("test1").settings(settingsBuilder().putArray("index.aliases", "test"))).actionGet();
+ }
+
+ @Override
+ protected String getConcreteIndexName() {
+ return "test1";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/document/BulkTests.java b/src/test/java/org/elasticsearch/document/BulkTests.java
new file mode 100644
index 0000000..53f9472
--- /dev/null
+++ b/src/test/java/org/elasticsearch/document/BulkTests.java
@@ -0,0 +1,593 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.CyclicBarrier;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class BulkTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testBulkUpdate_simple() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("2").setSource("field", 2).setCreate(true))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("3").setSource("field", 3))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("4").setSource("field", 4))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("5").setSource("field", 5))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(5));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("1").setScript("ctx._source.field += 1"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2").setScript("ctx._source.field += 1").setRetryOnConflict(3))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("3").setDoc(jsonBuilder().startObject().field("field1", "test").endObject()))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getId(), equalTo("1"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("3"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(2l));
+
+ GetResponse getResponse = client().prepareGet().setIndex("test").setType("type1").setId("1").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(2l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(3l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("3").setFields("field1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("test"));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("6").setScript("ctx._source.field += 1")
+ .setUpsert(jsonBuilder().startObject().field("field", 0).endObject()))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("7").setScript("ctx._source.field += 1"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2").setScript("ctx._source.field += 1"))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getId(), equalTo("6"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(1l));
+ assertThat(bulkResponse.getItems()[1].getResponse(), nullValue());
+ assertThat(bulkResponse.getItems()[1].getFailure().getId(), equalTo("7"));
+ assertThat(bulkResponse.getItems()[1].getFailure().getMessage(), containsString("DocumentMissingException"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("6").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(0l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("7").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(3l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(4l));
+ }
+
+ @Test
+ public void testBulkVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field", "1"))
+ .add(client().prepareIndex("test", "type", "2").setCreate(true).setSource("field", "1"))
+ .add(client().prepareIndex("test", "type", "1").setSource("field", "2")).get();
+
+ assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(1l));
+ assertTrue(((IndexResponse) bulkResponse.getItems()[1].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(1l));
+ assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(2l));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate("test", "type", "1").setVersion(4l).setDoc("field", "2"))
+ .add(client().prepareUpdate("test", "type", "2").setDoc("field", "2"))
+ .add(client().prepareUpdate("test", "type", "1").setVersion(2l).setDoc("field", "3")).get();
+
+ assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex("test", "type", "e1").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareIndex("test", "type", "e2").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareIndex("test", "type", "e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)).get();
+
+ assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(10l));
+ assertTrue(((IndexResponse) bulkResponse.getItems()[1].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(10l));
+ assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(12l));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate("test", "type", "e1").setVersion(4l).setDoc("field", "2").setVersion(10).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareUpdate("test", "type", "e2").setDoc("field", "2").setVersion(15).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareUpdate("test", "type", "e1").setVersion(2l).setDoc("field", "3").setVersion(15).setVersionType(VersionType.EXTERNAL)).get();
+
+ assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(15l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(15l));
+ }
+
+ @Test
+ public void testBulkUpdate_malformedScripts() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("2").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("3").setSource("field", 1))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("1").setScript("ctx._source.field += a").setFields("field"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2").setScript("ctx._source.field += 1").setFields("field"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("3").setScript("ctx._source.field += a").setFields("field"))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(bulkResponse.getItems()[0].getFailure().getId(), equalTo("1"));
+ assertThat(bulkResponse.getItems()[0].getFailure().getMessage(), containsString("failed to execute script"));
+ assertThat(bulkResponse.getItems()[0].getResponse(), nullValue());
+
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((Integer) ((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getGetResult().field("field").getValue()), equalTo(2));
+ assertThat(bulkResponse.getItems()[1].getFailure(), nullValue());
+
+ assertThat(bulkResponse.getItems()[2].getFailure().getId(), equalTo("3"));
+ assertThat(bulkResponse.getItems()[2].getFailure().getMessage(), containsString("failed to execute script"));
+ assertThat(bulkResponse.getItems()[2].getResponse(), nullValue());
+ }
+
+ @Test
+ public void testBulkUpdate_largerVolume() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ int numDocs = 2000;
+ BulkRequestBuilder builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setScript("ctx._source.counter += 1").setFields("counter")
+ .setUpsert(jsonBuilder().startObject().field("counter", 1).endObject())
+ );
+ }
+
+ BulkResponse response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(1l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getId(), equalTo(Integer.toString(i)));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getVersion(), equalTo(1l));
+ assertThat(((Integer) ((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue()), equalTo(1));
+
+ for (int j = 0; j < 5; j++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat((Long) getResponse.getField("counter").getValue(), equalTo(1l));
+ }
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ UpdateRequestBuilder updateBuilder = client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setFields("counter");
+ if (i % 2 == 0) {
+ updateBuilder.setScript("ctx._source.counter += 1");
+ } else {
+ updateBuilder.setDoc(jsonBuilder().startObject().field("counter", 2).endObject());
+ }
+ if (i % 3 == 0) {
+ updateBuilder.setRetryOnConflict(3);
+ }
+
+ builder.add(updateBuilder);
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(2l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getId(), equalTo(Integer.toString(i)));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((Integer) ((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue()), equalTo(2));
+ }
+
+ builder = client().prepareBulk();
+ int maxDocs = numDocs / 2 + numDocs;
+ for (int i = (numDocs / 2); i < maxDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setScript("ctx._source.counter += 1")
+ );
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(true));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ int id = i + (numDocs / 2);
+ if (i >= (numDocs / 2)) {
+ assertThat(response.getItems()[i].getFailure().getId(), equalTo(Integer.toString(id)));
+ assertThat(response.getItems()[i].getFailure().getMessage(), containsString("DocumentMissingException"));
+ } else {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(id)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(3l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ }
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setScript("ctx.op = \"none\"")
+ );
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getItemId(), equalTo(i));
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setScript("ctx.op = \"delete\"")
+ );
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getItemId(), equalTo(i));
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ for (int j = 0; j < 5; j++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+ }
+ }
+
+ @Test
+ public void testBulkIndexingWhileInitializing() throws Exception {
+
+ int shards = 1 + randomInt(10);
+ int replica = randomInt(2);
+
+ cluster().ensureAtLeastNumNodes(1 + replica);
+
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", shards)
+ .put("index.number_of_replicas", replica)
+ ).execute().actionGet();
+
+ int numDocs = 5000;
+ int bulk = 50;
+ for (int i = 0; i < numDocs; ) {
+ BulkRequestBuilder builder = client().prepareBulk();
+ for (int j = 0; j < bulk; j++, i++) {
+ builder.add(client().prepareIndex("test", "type1", Integer.toString(i)).setSource("val", i));
+ }
+ logger.info("bulk indexing {}-{}", i - bulk, i - 1);
+ BulkResponse response = builder.get();
+ if (response.hasFailures()) {
+ fail(response.buildFailureMessage());
+ }
+ }
+
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ }
+
+ /*
+ Test for https://github.com/elasticsearch/elasticsearch/issues/3444
+ */
+ @Test
+ public void testBulkUpdateDocAsUpsertWithParent() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent", "{\"parent\":{}}")
+ .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ BulkRequestBuilder builder = client().prepareBulk();
+
+ byte[] addParent = new BytesArray("{\"index\" : { \"_index\" : \"test\", \"_type\" : \"parent\", \"_id\" : \"parent1\"}}\n" +
+ "{\"field1\" : \"value1\"}\n").array();
+
+ byte[] addChild = new BytesArray("{ \"update\" : { \"_index\" : \"test\", \"_type\" : \"child\", \"_id\" : \"child1\", \"parent\" : \"parent1\"}}\n" +
+ "{\"doc\" : { \"field1\" : \"value1\"}, \"doc_as_upsert\" : \"true\"}\n").array();
+
+ builder.add(addParent, 0, addParent.length, false);
+ builder.add(addChild, 0, addChild.length, false);
+
+ BulkResponse bulkResponse = builder.get();
+ assertThat(bulkResponse.getItems().length, equalTo(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+
+ client().admin().indices().prepareRefresh("test").get();
+
+ //we check that the _parent field was set on the child document by using the has parent query
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasParentQuery("parent", QueryBuilders.matchAllQuery()))
+ .get();
+
+ assertNoFailures(searchResponse);
+ assertSearchHits(searchResponse, "child1");
+ }
+
+ /*
+ Test for https://github.com/elasticsearch/elasticsearch/issues/3444
+ */
+ @Test
+ public void testBulkUpdateUpsertWithParent() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent", "{\"parent\":{}}")
+ .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ BulkRequestBuilder builder = client().prepareBulk();
+
+ byte[] addParent = new BytesArray("{\"index\" : { \"_index\" : \"test\", \"_type\" : \"parent\", \"_id\" : \"parent1\"}}\n" +
+ "{\"field1\" : \"value1\"}\n").array();
+
+ byte[] addChild = new BytesArray("{\"update\" : { \"_id\" : \"child1\", \"_type\" : \"child\", \"_index\" : \"test\", \"parent\" : \"parent1\"} }\n" +
+ "{ \"script\" : \"ctx._source.field2 = 'value2'\", \"upsert\" : {\"field1\" : \"value1\"}}\n").array();
+
+ builder.add(addParent, 0, addParent.length, false);
+ builder.add(addChild, 0, addChild.length, false);
+
+ BulkResponse bulkResponse = builder.get();
+ assertThat(bulkResponse.getItems().length, equalTo(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+
+ client().admin().indices().prepareRefresh("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasParentQuery("parent", QueryBuilders.matchAllQuery()))
+ .get();
+
+ assertNoFailures(searchResponse);
+ assertSearchHits(searchResponse, "child1");
+ }
+
+ @Test
+ public void testFailingVersionedUpdatedOnBulk() throws Exception {
+ createIndex("test");
+ index("test","type","1","field","1");
+ final BulkResponse[] responses = new BulkResponse[30];
+ final CyclicBarrier cyclicBarrier = new CyclicBarrier(responses.length);
+ Thread[] threads = new Thread[responses.length];
+
+
+ for (int i=0;i<responses.length;i++) {
+ final int threadID = i;
+ threads[threadID] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ cyclicBarrier.await();
+ } catch (Exception e) {
+ return;
+ }
+ BulkRequestBuilder requestBuilder = client().prepareBulk();
+ requestBuilder.add(client().prepareUpdate("test", "type", "1").setVersion(1).setDoc("field", threadID));
+ responses[threadID]=requestBuilder.get();
+
+ }
+ });
+ threads[threadID].start();
+
+ }
+
+ for (int i=0;i < threads.length; i++) {
+ threads[i].join();
+ }
+
+ int successes = 0;
+ for (BulkResponse response : responses) {
+ if (!response.hasFailures()) successes ++;
+ }
+
+ assertThat(successes, equalTo(1));
+ }
+
+ @Test // issue 4745
+ public void preParsingSourceDueToMappingShouldNotBreakCompleteBulkRequest() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", true)
+ .field("path", "last_modified")
+ .endObject()
+ .endObject()
+ .endObject();
+ CreateIndexResponse createIndexResponse = prepareCreate("test").addMapping("type", builder).get();
+ assertAcked(createIndexResponse);
+
+ String brokenBuildRequestData = "{\"index\": {\"_id\": \"1\"}}\n" +
+ "{\"name\": \"Malformed}\n" +
+ "{\"index\": {\"_id\": \"2\"}}\n" +
+ "{\"name\": \"Good\", \"last_modified\" : \"2013-04-05\"}\n";
+
+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), false, "test", "type").setRefresh(true).get();
+ assertThat(bulkResponse.getItems().length, is(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));
+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));
+
+ assertExists(get("test", "type", "2"));
+ }
+
+ @Test // issue 4745
+ public void preParsingSourceDueToRoutingShouldNotBreakCompleteBulkRequest() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_routing")
+ .field("required", true)
+ .field("path", "my_routing")
+ .endObject()
+ .endObject()
+ .endObject();
+ CreateIndexResponse createIndexResponse = prepareCreate("test").addMapping("type", builder).get();
+ assertAcked(createIndexResponse);
+ ensureYellow("test");
+
+ String brokenBuildRequestData = "{\"index\": {} }\n" +
+ "{\"name\": \"Malformed}\n" +
+ "{\"index\": { \"_id\" : \"24000\" } }\n" +
+ "{\"name\": \"Good\", \"my_routing\" : \"48000\"}\n";
+
+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), false, "test", "type").setRefresh(true).get();
+ assertThat(bulkResponse.getItems().length, is(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));
+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));
+
+ assertExists(client().prepareGet("test", "type", "24000").setRouting("48000").get());
+ }
+
+
+ @Test // issue 4745
+ public void preParsingSourceDueToIdShouldNotBreakCompleteBulkRequest() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_id")
+ .field("path", "my_id")
+ .endObject()
+ .endObject()
+ .endObject();
+ CreateIndexResponse createIndexResponse = prepareCreate("test").addMapping("type", builder).get();
+ assertAcked(createIndexResponse);
+ ensureYellow("test");
+
+ String brokenBuildRequestData = "{\"index\": {} }\n" +
+ "{\"name\": \"Malformed}\n" +
+ "{\"index\": {} }\n" +
+ "{\"name\": \"Good\", \"my_id\" : \"48\"}\n";
+
+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), false, "test", "type").setRefresh(true).get();
+ assertThat(bulkResponse.getItems().length, is(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));
+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));
+
+ assertExists(get("test", "type", "48"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/document/DocumentActionsTests.java b/src/test/java/org/elasticsearch/document/DocumentActionsTests.java
new file mode 100644
index 0000000..ee9eb8d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/document/DocumentActionsTests.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class DocumentActionsTests extends ElasticsearchIntegrationTest {
+
+ protected void createIndex() {
+ cluster().wipeIndices(getConcreteIndexName());
+ createIndex(getConcreteIndexName());
+ }
+
+
+ protected String getConcreteIndexName() {
+ return "test";
+ }
+
+ @Test
+ public void testIndexActions() throws Exception {
+ createIndex();
+ logger.info("Running Cluster Health");
+ ensureGreen();
+ logger.info("Indexing [type1/1]");
+ IndexResponse indexResponse = client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource(source("1", "test")).setRefresh(true).execute().actionGet();
+ assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(indexResponse.getId(), equalTo("1"));
+ assertThat(indexResponse.getType(), equalTo("type1"));
+ logger.info("Refreshing");
+ RefreshResponse refreshResponse = refresh();
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(10));
+
+ logger.info("--> index exists?");
+ assertThat(indexExists(getConcreteIndexName()), equalTo(true));
+ logger.info("--> index exists?, fake index");
+ assertThat(indexExists("test1234565"), equalTo(false));
+
+ logger.info("Clearing cache");
+ ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).filterCache(true).idCache(true)).actionGet();
+ assertNoFailures(clearIndicesCacheResponse);
+ assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(10));
+
+ logger.info("Optimizing");
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ OptimizeResponse optimizeResponse = optimize();
+ assertThat(optimizeResponse.getSuccessfulShards(), equalTo(10));
+
+ GetResponse getResult;
+
+ logger.info("Get [type1/1]");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().prepareGet("test", "type1", "1").setOperationThreaded(false).execute().actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ assertThat("cycle(map) #" + i, (String) ((Map) getResult.getSourceAsMap().get("type1")).get("name"), equalTo("test"));
+ getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(true)).actionGet();
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+
+ logger.info("Get [type1/1] with script");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().prepareGet("test", "type1", "1").setFields("type1.name").execute().actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(getResult.isExists(), equalTo(true));
+ assertThat(getResult.getSourceAsBytes(), nullValue());
+ assertThat(getResult.getField("type1.name").getValues().get(0).toString(), equalTo("test"));
+ }
+
+ logger.info("Get [type1/2] (should be empty)");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResult.isExists(), equalTo(false));
+ }
+
+ logger.info("Delete [type1/1]");
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").setReplicationType(ReplicationType.SYNC).execute().actionGet();
+ assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(deleteResponse.getId(), equalTo("1"));
+ assertThat(deleteResponse.getType(), equalTo("type1"));
+ logger.info("Refreshing");
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+
+ logger.info("Get [type1/1] (should be empty)");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.isExists(), equalTo(false));
+ }
+
+ logger.info("Index [type1/1]");
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ logger.info("Index [type1/2]");
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "test2"))).actionGet();
+
+ logger.info("Flushing");
+ FlushResponse flushResult = client().admin().indices().prepareFlush("test").execute().actionGet();
+ assertThat(flushResult.getSuccessfulShards(), equalTo(10));
+ assertThat(flushResult.getFailedShards(), equalTo(0));
+ logger.info("Refreshing");
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+
+ logger.info("Get [type1/1] and [type1/2]");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ String ste1 = getResult.getSourceAsString();
+ String ste2 = source("2", "test2").string();
+ assertThat("cycle #" + i, ste1, equalTo(ste2));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+
+ logger.info("Count");
+ // check count
+ for (int i = 0; i < 5; i++) {
+ // test successful
+ CountResponse countResponse = client().prepareCount("test").setQuery(termQuery("_type", "type1")).setOperationThreading(BroadcastOperationThreading.NO_THREADS).execute().actionGet();
+ assertNoFailures(countResponse);
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+
+ countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.SINGLE_THREAD)
+ .get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+
+ countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.THREAD_PER_SHARD).get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+
+ // test failed (simply query that can't be parsed)
+ countResponse = client().count(countRequest("test").source("{ term : { _type : \"type1 } }".getBytes(Charsets.UTF_8))).actionGet();
+
+ assertThat(countResponse.getCount(), equalTo(0l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(countResponse.getFailedShards(), equalTo(5));
+
+ // count with no query is a match all one
+ countResponse = client().prepareCount("test").execute().actionGet();
+ assertThat("Failures " + countResponse.getShardFailures(), countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, equalTo(0));
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+
+ logger.info("Delete by query");
+ DeleteByQueryResponse queryResponse = client().prepareDeleteByQuery().setIndices("test").setQuery(termQuery("name", "test2")).execute().actionGet();
+ assertThat(queryResponse.getIndex(getConcreteIndexName()).getSuccessfulShards(), equalTo(5));
+ assertThat(queryResponse.getIndex(getConcreteIndexName()).getFailedShards(), equalTo(0));
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+
+ logger.info("Get [type1/1] and [type1/2], should be empty");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat("cycle #" + i, getResult.isExists(), equalTo(false));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+ }
+
+ @Test
+ public void testBulk() throws Exception {
+ createIndex();
+ logger.info("-> running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource(source("1", "test")))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("2").setSource(source("2", "test")).setCreate(true))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setSource(source("3", "test")))
+ .add(client().prepareDelete().setIndex("test").setType("type1").setId("1"))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setSource("{ xxx }")) // failure
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(5));
+
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[0].getOpType(), equalTo("index"));
+ assertThat(bulkResponse.getItems()[0].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[0].getType(), equalTo("type1"));
+ assertThat(bulkResponse.getItems()[0].getId(), equalTo("1"));
+
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].getOpType(), equalTo("create"));
+ assertThat(bulkResponse.getItems()[1].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[1].getType(), equalTo("type1"));
+ assertThat(bulkResponse.getItems()[1].getId(), equalTo("2"));
+
+ assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[2].getOpType(), equalTo("create"));
+ assertThat(bulkResponse.getItems()[2].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[2].getType(), equalTo("type1"));
+ String generatedId3 = bulkResponse.getItems()[2].getId();
+
+ assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[3].getOpType(), equalTo("delete"));
+ assertThat(bulkResponse.getItems()[3].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[3].getType(), equalTo("type1"));
+ assertThat(bulkResponse.getItems()[3].getId(), equalTo("1"));
+
+ assertThat(bulkResponse.getItems()[4].isFailed(), equalTo(true));
+ assertThat(bulkResponse.getItems()[4].getOpType(), equalTo("create"));
+ assertThat(bulkResponse.getItems()[4].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[4].getType(), equalTo("type1"));
+
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().actionGet();
+ assertNoFailures(refreshResponse);
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(10));
+
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.isExists(), equalTo(false));
+
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("2", "test").string()));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+
+ getResult = client().get(getRequest("test").type("type1").id(generatedId3)).actionGet();
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("3", "test").string()));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+ }
+
+ private XContentBuilder source(String id, String nameValue) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("type1").field("id", id).field("name", nameValue).endObject().endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/explain/ExplainActionTests.java b/src/test/java/org/elasticsearch/explain/ExplainActionTests.java
new file mode 100644
index 0000000..31473ee
--- /dev/null
+++ b/src/test/java/org/elasticsearch/explain/ExplainActionTests.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.explain;
+
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.queryString;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class ExplainActionTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testSimple() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(
+ ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "test", "1")
+ .setSource("field", "value1")
+ .execute().actionGet();
+
+ ExplainResponse response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .execute().actionGet();
+ assertNotNull(response);
+ assertFalse(response.isExists()); // not a match b/c not realtime
+ assertFalse(response.isMatch()); // not a match b/c not realtime
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.termQuery("field", "value2"))
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertFalse(response.getExplanation().isMatch());
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.boolQuery()
+ .must(QueryBuilders.termQuery("field", "value1"))
+ .must(QueryBuilders.termQuery("field", "value2"))
+ )
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertFalse(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getDetails().length, equalTo(2));
+
+ response = client().prepareExplain("test", "test", "2")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .execute().actionGet();
+ assertNotNull(response);
+ assertFalse(response.isExists());
+ assertFalse(response.isMatch());
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testExplainWithFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "test", "1")
+ .setSource(
+ jsonBuilder().startObject()
+ .startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ ExplainResponse response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFields("obj1.field1")
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+ assertThat(response.getGetResult().isExists(), equalTo(true));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getFields().size(), equalTo(1));
+ assertThat(response.getGetResult().getFields().get("obj1.field1").getValue().toString(), equalTo("value1"));
+ assertThat(response.getGetResult().isSourceEmpty(), equalTo(true));
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFields("obj1.field1")
+ .setFetchSource(true)
+ .get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+ assertThat(response.getGetResult().isExists(), equalTo(true));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getFields().size(), equalTo(1));
+ assertThat(response.getGetResult().getFields().get("obj1.field1").getValue().toString(), equalTo("value1"));
+ assertThat(response.getGetResult().isSourceEmpty(), equalTo(false));
+
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFields("obj1.field1", "obj1.field2")
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ String v1 = (String) response.getGetResult().field("obj1.field1").getValue();
+ String v2 = (String) response.getGetResult().field("obj1.field2").getValue();
+ assertThat(v1, equalTo("value1"));
+ assertThat(v2, equalTo("value2"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testExplainWitSource() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "test", "1")
+ .setSource(
+ jsonBuilder().startObject()
+ .startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ ExplainResponse response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFetchSource("obj1.field1", null)
+ .get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+ assertThat(response.getGetResult().isExists(), equalTo(true));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getSource().size(), equalTo(1));
+ assertThat(((Map<String, Object>) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1"));
+
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFetchSource(null, "obj1.field2")
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertThat(((Map<String, Object>) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1"));
+ }
+
+
+ @Test
+ public void testExplainWithAlias() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().prepareAliases().addAlias("test", "alias1", FilterBuilders.termFilter("field2", "value2"))
+ .execute().actionGet();
+ client().prepareIndex("test", "test", "1").setSource("field1", "value1", "field2", "value1").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ ExplainResponse response = client().prepareExplain("alias1", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ }
+
+ @Test
+ public void explainDateRangeInQueryString() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).get();
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+
+ refresh();
+
+ ExplainResponse explainResponse = client().prepareExplain("test", "type", "1").setQuery(queryString("past:[now-2M/d TO now/d]")).get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ assertThat(explainResponse.isMatch(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/flt/FuzzyLikeThisActionTests.java b/src/test/java/org/elasticsearch/flt/FuzzyLikeThisActionTests.java
new file mode 100644
index 0000000..c0b4930
--- /dev/null
+++ b/src/test/java/org/elasticsearch/flt/FuzzyLikeThisActionTests.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.flt;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.fuzzyLikeThisFieldQuery;
+import static org.elasticsearch.index.query.QueryBuilders.fuzzyLikeThisQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class FuzzyLikeThisActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // See issue https://github.com/elasticsearch/elasticsearch/issues/3252
+ public void testNumericField() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(1, 5))
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)))
+ .addMapping("type", "int_value", "type=integer"));
+ ensureGreen();
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("string_value", "lucene index").field("int_value", 1).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", "type", "2")
+ .setSource(jsonBuilder().startObject().field("string_value", "elasticsearch index").field("int_value", 42).endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ // flt query with no field -> OK
+ SearchResponse searchResponse = client().prepareSearch().setQuery(fuzzyLikeThisQuery().likeText("index")).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L));
+
+ // flt query with string fields
+ searchResponse = client().prepareSearch().setQuery(fuzzyLikeThisQuery("string_value").likeText("index")).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L));
+
+ // flt query with at least a numeric field -> fail by default
+ assertThrows(client().prepareSearch().setQuery(fuzzyLikeThisQuery("string_value", "int_value").likeText("index")), SearchPhaseExecutionException.class);
+
+ // flt query with at least a numeric field -> fail by command
+ assertThrows(client().prepareSearch().setQuery(fuzzyLikeThisQuery("string_value", "int_value").likeText("index").failOnUnsupportedField(true)), SearchPhaseExecutionException.class);
+
+
+ // flt query with at least a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(fuzzyLikeThisQuery("string_value", "int_value").likeText("index").failOnUnsupportedField(false)).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L));
+
+ // flt field query on a numeric field -> failure by default
+ assertThrows(client().prepareSearch().setQuery(fuzzyLikeThisFieldQuery("int_value").likeText("42")), SearchPhaseExecutionException.class);
+
+ // flt field query on a numeric field -> failure by command
+ assertThrows(client().prepareSearch().setQuery(fuzzyLikeThisFieldQuery("int_value").likeText("42").failOnUnsupportedField(true)), SearchPhaseExecutionException.class);
+
+ // flt field query on a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(fuzzyLikeThisFieldQuery("int_value").likeText("42").failOnUnsupportedField(false)).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0L));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java b/src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java
new file mode 100644
index 0000000..57b8a28
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java
@@ -0,0 +1,384 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.fs;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.SetOnce;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.status.IndexShardStatus;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.admin.indices.status.ShardStatus;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class IndexGatewayTests extends ElasticsearchIntegrationTest {
+
+ private String storeType;
+ private final SetOnce<Settings> settings = new SetOnce<Settings>();
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ if (settings.get() == null) {
+ Builder builder = ImmutableSettings.builder();
+ builder.put("cluster.routing.schedule", "100ms");
+ builder.put("gateway.type", "fs");
+ if (between(0, 5) == 0) {
+ builder.put("gateway.fs.buffer_size", between(1, 100) + "kb");
+ }
+ if (between(0, 5) == 0) {
+ builder.put("gateway.fs.chunk_size", between(1, 100) + "kb");
+ }
+ builder.put("index.number_of_replicas", "1");
+ builder.put("index.number_of_shards", rarely() ? Integer.toString(between(2, 6)) : "1");
+ storeType = rarely() ? "ram" : "fs";
+ builder.put("index.store.type", storeType);
+ settings.set(builder.build());
+ }
+ return settings.get();
+ }
+
+
+ protected boolean isPersistentStorage() {
+ assertNotNull(storeType);
+ return "fs".equals(settings.get().get("index.store.type"));
+ }
+
+ @Test
+ @Slow
+ public void testSnapshotOperations() throws Exception {
+ cluster().startNode(nodeSettings(0));
+
+ // get the environment, so we can clear the work dir when needed
+ Environment environment = cluster().getInstance(Environment.class);
+
+
+ logger.info("Running Cluster Health (waiting for node to startup properly)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ // Translog tests
+
+ logger.info("Creating index [{}]", "test");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ // create a mapping
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type1").setSource(mappingSource()).execute().actionGet();
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ // verify that mapping is there
+ ClusterStateResponse clusterState = client().admin().cluster().state(clusterStateRequest()).actionGet();
+ assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());
+
+ // create two and delete the first
+ logger.info("Indexing #1");
+ client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ logger.info("Indexing #2");
+ client().index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet();
+
+ // perform snapshot to the index
+ logger.info("Gateway Snapshot");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+
+ logger.info("Deleting #1");
+ client().delete(deleteRequest("test").type("type1").id("1")).actionGet();
+
+ // perform snapshot to the index
+ logger.info("Gateway Snapshot");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+ logger.info("Gateway Snapshot (should be a no op)");
+ // do it again, it should be a no op
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+
+ logger.info("Closing the server");
+ cluster().stopRandomNode();
+ logger.info("Starting the server, should recover from the gateway (only translog should be populated)");
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ // verify that mapping is there
+ clusterState = client().admin().cluster().state(clusterStateRequest()).actionGet();
+ assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());
+
+ logger.info("Getting #1, should not exists");
+ GetResponse getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ logger.info("Getting #2");
+ getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
+
+ // Now flush and add some data (so we have index recovery as well)
+ logger.info("Flushing, so we have actual content in the index files (#2 should be in the index)");
+ client().admin().indices().flush(flushRequest("test")).actionGet();
+ logger.info("Indexing #3, so we have something in the translog as well");
+ client().index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test"))).actionGet();
+
+ logger.info("Gateway Snapshot");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+ logger.info("Gateway Snapshot (should be a no op)");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+
+ logger.info("Closing the server");
+ cluster().stopRandomNode();
+ logger.info("Starting the server, should recover from the gateway (both index and translog) and reuse work dir");
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ logger.info("Getting #1, should not exists");
+ getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ logger.info("Getting #2 (not from the translog, but from the index)");
+ getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
+ logger.info("Getting #3 (from the translog)");
+ getResponse = client().get(getRequest("test").type("type1").id("3")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));
+
+ logger.info("Closing the server");
+ cluster().stopRandomNode();
+ logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
+ FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
+ logger.info("Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ logger.info("Getting #1, should not exists");
+ getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ logger.info("Getting #2 (not from the translog, but from the index)");
+ getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
+ logger.info("Getting #3 (from the translog)");
+ getResponse = client().get(getRequest("test").type("type1").id("3")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));
+
+
+ logger.info("Flushing, so we have actual content in the index files (#3 should be in the index now as well)");
+ client().admin().indices().flush(flushRequest("test")).actionGet();
+
+ logger.info("Gateway Snapshot");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+ logger.info("Gateway Snapshot (should be a no op)");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+
+ logger.info("Closing the server");
+ cluster().stopRandomNode();
+ logger.info("Starting the server, should recover from the gateway (just from the index, nothing in the translog)");
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ logger.info("Getting #1, should not exists");
+ getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ logger.info("Getting #2 (not from the translog, but from the index)");
+ getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
+ logger.info("Getting #3 (not from the translog, but from the index)");
+ getResponse = client().get(getRequest("test").type("type1").id("3")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));
+
+ logger.info("Deleting the index");
+ client().admin().indices().delete(deleteIndexRequest("test")).actionGet();
+ }
+
+ @Test
+ @Nightly
+ public void testLoadWithFullRecovery() {
+ testLoad(true);
+ }
+
+ @Test
+ @Nightly
+ public void testLoadWithReuseRecovery() {
+ testLoad(false);
+ }
+
+ private void testLoad(boolean fullRecovery) {
+ logger.info("Running with fullRecover [{}]", fullRecovery);
+
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (waiting for node to startup properly)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ // get the environment, so we can clear the work dir when needed
+ Environment environment = cluster().getInstance(Environment.class);
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+
+ logger.info("--> refreshing and checking count");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+
+ logger.info("--> indexing 1234 docs");
+ for (long i = 0; i < 1234; i++) {
+ client().prepareIndex("test", "type1", Long.toString(i))
+ .setCreate(true) // make sure we use create, so if we recover wrongly, we will get increments...
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
+
+ // snapshot every 100 so we get some actions going on in the gateway
+ if ((i % 11) == 0) {
+ client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
+ }
+ // flush every once is a while, so we get different data
+ if ((i % 55) == 0) {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+
+ logger.info("--> refreshing and checking count");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1234l));
+
+
+ logger.info("--> closing the server");
+ cluster().stopRandomNode();
+ if (fullRecovery) {
+ logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
+ FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
+ logger.info("Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
+ }
+
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("--> running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("--> done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ logger.info("--> checking count");
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1234l));
+
+ logger.info("--> checking reuse / recovery status");
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().setRecovery(true).execute().actionGet();
+ for (IndexShardStatus indexShardStatus : statusResponse.getIndex("test")) {
+ for (ShardStatus shardStatus : indexShardStatus) {
+ if (shardStatus.getShardRouting().primary()) {
+ if (fullRecovery || !isPersistentStorage()) {
+ assertThat(shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(), equalTo(0l));
+ } else {
+ assertThat(shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(), greaterThan(shardStatus.getGatewayRecoveryStatus().getIndexSize().bytes() - 8196 /* segments file and others */));
+ }
+ }
+ }
+ }
+ }
+
+ private String mappingSource() {
+ return "{ type1 : { properties : { name : { type : \"string\" } } } }";
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+
+ @Test
+ @Slow
+ public void testRandom() {
+ testLoad(randomBoolean());
+ }
+
+ @Test
+ @Slow
+ public void testIndexActions() throws Exception {
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (waiting for node to startup properly)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ assertAcked(client().admin().indices().create(createIndexRequest("test")).actionGet());
+
+ cluster().stopRandomNode();
+ cluster().startNode(nodeSettings(0));
+ assertTrue("index should exists", awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ try {
+ client().admin().indices().create(createIndexRequest("test")).actionGet();
+ return false;
+ } catch (IndexAlreadyExistsException e) {
+ // all is well
+ return true;
+ }
+ }
+ }));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/gateway/local/LocalGatewayIndexStateTests.java b/src/test/java/org/elasticsearch/gateway/local/LocalGatewayIndexStateTests.java
new file mode 100644
index 0000000..5688e02
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/local/LocalGatewayIndexStateTests.java
@@ -0,0 +1,535 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.test.*;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster.RestartCallback;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(LocalGatewayIndexStateTests.class);
+
+ @Test
+ public void testMappingMetaDataParsed() throws Exception {
+
+ logger.info("--> starting 1 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+
+ logger.info("--> creating test index, with meta routing");
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> waiting for yellow status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(5).setWaitForYellowStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ ClusterStateResponse response = client().admin().cluster().prepareState().execute().actionGet();
+ System.out.println("" + response);
+ }
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify meta _routing required exists");
+ MappingMetaData mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").mapping("type1");
+ assertThat(mappingMd.routing().required(), equalTo(true));
+
+ logger.info("--> restarting nodes...");
+ cluster().fullRestart();
+
+ logger.info("--> waiting for yellow status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(5).setWaitForYellowStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ ClusterStateResponse response = client().admin().cluster().prepareState().execute().actionGet();
+ System.out.println("" + response);
+ }
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify meta _routing required exists");
+ mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").mapping("type1");
+ assertThat(mappingMd.routing().required(), equalTo(true));
+ }
+
+ @Test
+ public void testSimpleOpenClose() throws Exception {
+
+ logger.info("--> starting 2 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> creating test index");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(2));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
+
+ logger.info("--> closing test index...");
+ client().admin().indices().prepareClose("test").execute().actionGet();
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> verifying that the state is green");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> trying to index into a closed index ...");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+
+ logger.info("--> creating another index (test2) by indexing into it");
+ client().prepareIndex("test2", "type1", "1").setSource("field1", "value1").execute().actionGet();
+ logger.info("--> verifying that the state is green");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> opening the first index again...");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+
+ logger.info("--> verifying that the state is green");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(2));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> trying to get the indexed document on the first index");
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ logger.info("--> closing test index...");
+ client().admin().indices().prepareClose("test").execute().actionGet();
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> restarting nodes...");
+ cluster().fullRestart();
+ logger.info("--> waiting for two nodes and green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> trying to index into a closed index ...");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+
+ logger.info("--> opening index...");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(2));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> trying to get the indexed document on the first round (before close and shutdown)");
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1").execute().actionGet();
+ }
+
+ @Test
+ public void testJustMasterNode() throws Exception {
+ logger.info("--> cleaning nodes");
+
+ logger.info("--> starting 1 master node non data");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> create an index");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("--> closing master node");
+ cluster().closeNonSharedNodes(false);
+
+ logger.info("--> starting 1 master node non data again");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> waiting for test index to be created");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify we have an index");
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().setIndices("test").execute().actionGet();
+ assertThat(clusterStateResponse.getState().metaData().hasIndex("test"), equalTo(true));
+ }
+
+ @Test
+ public void testJustMasterNodeAndJustDataNode() throws Exception {
+ logger.info("--> cleaning nodes");
+
+ logger.info("--> starting 1 master node non data");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+ cluster().startNode(settingsBuilder().put("node.master", false).put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> create an index");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("--> waiting for test index to be created");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").setWaitForYellowStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1").setTimeout("100ms").execute().actionGet();
+ }
+
+ @Test
+ public void testTwoNodesSingleDoc() throws Exception {
+ logger.info("--> cleaning nodes");
+
+ logger.info("--> starting 2 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build());
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> closing test index...");
+ client().admin().indices().prepareClose("test").execute().actionGet();
+
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> opening the index...");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void testDanglingIndicesAutoImportYes() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "local").put("gateway.local.auto_import_dangled", "yes")
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .build();
+ logger.info("--> starting two nodes");
+ final String node_1 = cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> restarting the nodes");
+ final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ if (node_1.equals(nodeName)) {
+ logger.info("--> deleting the data for the first node");
+ gateway1.reset();
+ }
+ return null;
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ // spin a bit waiting for the index to exists
+ long time = System.currentTimeMillis();
+ while ((System.currentTimeMillis() - time) < TimeValue.timeValueSeconds(10).millis()) {
+ if (client().admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ break;
+ }
+ }
+
+ logger.info("--> verify that the dangling index exists");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify the doc is there");
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testDanglingIndicesAutoImportClose() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "local").put("gateway.local.auto_import_dangled", "closed")
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .build();
+
+
+ logger.info("--> starting two nodes");
+ final String node_1 = cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> restarting the nodes");
+ final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ if (node_1.equals(nodeName)) {
+ logger.info("--> deleting the data for the first node");
+ gateway1.reset();
+ }
+ return null;
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ // spin a bit waiting for the index to exists
+ long time = System.currentTimeMillis();
+ while ((System.currentTimeMillis() - time) < TimeValue.timeValueSeconds(10).millis()) {
+ if (client().admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ break;
+ }
+ }
+
+ logger.info("--> verify that the dangling index exists");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify the index state is closed");
+ assertThat(client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ logger.info("--> open the index");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify the doc is there");
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testDanglingIndicesNoAutoImport() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "local").put("gateway.local.auto_import_dangled", "no")
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .build();
+ logger.info("--> starting two nodes");
+ final String node_1 = cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> restarting the nodes");
+ final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
+ cluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ if (node_1.equals(nodeName)) {
+ logger.info("--> deleting the data for the first node");
+ gateway1.reset();
+ }
+ return null;
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ // we need to wait for the allocate dangled to kick in (even though in this case its disabled)
+ // just to make sure
+ Thread.sleep(500);
+
+ logger.info("--> verify that the dangling index does not exists");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(false));
+
+ logger.info("--> restart start the nodes, but make sure we do recovery only after we have 2 nodes in the cluster");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.recover_after_nodes", 2).build();
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify that the dangling index does exists now!");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ logger.info("--> verify the doc is there");
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testDanglingIndicesNoAutoImportStillDanglingAndCreatingSameIndex() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "local").put("gateway.local.auto_import_dangled", "no")
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .build();
+
+ logger.info("--> starting two nodes");
+ final String node_1 = cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> restarting the nodes");
+ final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
+ cluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ if (node_1.equals(nodeName)) {
+ logger.info("--> deleting the data for the first node");
+ gateway1.reset();
+ }
+ return null;
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify that the dangling index does not exists");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(false));
+
+ logger.info("--> close the first node, so we remain with the second that has the dangling index");
+ cluster().stopRandomNode(TestCluster.nameFilter(node_1));
+
+ logger.info("--> index a different doc");
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").setRefresh(true).execute().actionGet();
+
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "2").execute().actionGet().isExists(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/gateway/local/QuorumLocalGatewayTests.java b/src/test/java/org/elasticsearch/gateway/local/QuorumLocalGatewayTests.java
new file mode 100644
index 0000000..2eb286c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/local/QuorumLocalGatewayTests.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster.RestartCallback;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(numNodes=0, scope=Scope.TEST)
+public class QuorumLocalGatewayTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testChangeInitialShardsRecovery() throws Exception {
+ logger.info("--> starting 3 nodes");
+ final String[] nodes = new String[3];
+ nodes[0] = cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ nodes[1] = cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ nodes[2] = cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+
+ logger.info("--> indexing...");
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
+ //We don't check for failures in the flush response: if we do we might get the following:
+ // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed]
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get();
+ assertNoFailures(client().admin().indices().prepareRefresh().execute().get());
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(6)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
+ }
+
+ final String nodeToRemove = nodes[between(0,2)];
+ logger.info("--> restarting 1 nodes -- kill 2");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.type", "local").build();
+ }
+
+ @Override
+ public boolean doRestart(String nodeName) {
+ return nodeToRemove.equals(nodeName);
+ }
+ });
+ if (randomBoolean()) {
+ Thread.sleep(between(1, 400)); // wait a bit and give is a chance to try to allocate
+ }
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForNodes("1")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.RED)); // nothing allocated yet
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ ClusterStateResponse clusterStateResponse = cluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
+ return clusterStateResponse.getState() != null && clusterStateResponse.getState().routingTable().index("test") != null;
+ }}), equalTo(true)); // wait until we get a cluster state - could be null if we quick enough.
+ final ClusterStateResponse clusterStateResponse = cluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
+ assertThat(clusterStateResponse.getState(), notNullValue());
+ assertThat(clusterStateResponse.getState().routingTable().index("test"), notNullValue());
+ assertThat(clusterStateResponse.getState().routingTable().index("test").allPrimaryShardsActive(), is(false));
+ logger.info("--> change the recovery.initial_shards setting, and make sure its recovered");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("recovery.initial_shards", 1)).get();
+
+ logger.info("--> running cluster_health (wait for the shards to startup), 2 shards since we only have 1 node");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(2)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
+ }
+ }
+
+ @Test
+ @Slow
+ public void testQuorumRecovery() throws Exception {
+
+ logger.info("--> starting 3 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ // we are shutting down nodes - make sure we don't have 2 clusters if we test network
+ setMinimumMasterNodes(2);
+
+ logger.info("--> indexing...");
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
+ //We don't check for failures in the flush response: if we do we might get the following:
+ // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed]
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get();
+ assertNoFailures(client().admin().indices().prepareRefresh().get());
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(6)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
+ }
+ logger.info("--> restart all nodes");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return null;
+ }
+
+ @Override
+ public void doAfterNodes(int numNodes, final Client activeClient) throws Exception {
+ if (numNodes == 1) {
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(4)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW;
+ }
+ }, 30, TimeUnit.SECONDS), equalTo(true));
+ logger.info("--> one node is closed -- index 1 document into the remaining nodes");
+ activeClient.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).get();
+ assertNoFailures(activeClient.admin().indices().prepareRefresh().get());
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(activeClient.prepareCount().setQuery(matchAllQuery()).get(), 3l);
+ }
+ }
+ }
+
+ });
+ logger.info("--> all nodes are started back, verifying we got the latest version");
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(6)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 3l);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/gateway/local/SimpleRecoveryLocalGatewayTests.java b/src/test/java/org/elasticsearch/gateway/local/SimpleRecoveryLocalGatewayTests.java
new file mode 100644
index 0000000..4be01d0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/local/SimpleRecoveryLocalGatewayTests.java
@@ -0,0 +1,428 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.status.IndexShardStatus;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.admin.indices.status.ShardStatus;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster.RestartCallback;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(numNodes = 0, scope = Scope.TEST)
+public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTest {
+
+
+ private ImmutableSettings.Builder settingsBuilder() {
+ return ImmutableSettings.settingsBuilder().put("gateway.type", "local");
+ }
+
+ @Test
+ @Slow
+ public void testX() throws Exception {
+
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("appAccountIds").field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "10990239").setSource(jsonBuilder().startObject()
+ .field("_id", "10990239")
+ .startArray("appAccountIds").value(14).value(179).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "10990473").setSource(jsonBuilder().startObject()
+ .field("_id", "10990473")
+ .startArray("appAccountIds").value(14).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "10990513").setSource(jsonBuilder().startObject()
+ .field("_id", "10990513")
+ .startArray("appAccountIds").value(14).value(179).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "10990695").setSource(jsonBuilder().startObject()
+ .field("_id", "10990695")
+ .startArray("appAccountIds").value(14).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "11026351").setSource(jsonBuilder().startObject()
+ .field("_id", "11026351")
+ .startArray("appAccountIds").value(14).endArray().endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
+
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
+ }
+
+ @Test
+ @Slow
+ public void testSingleNodeNoFlush() throws Exception {
+
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("field").field("type", "string").endObject().startObject("num").field("type", "integer").endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("_id", "1").field("field", "value1").startArray("num").value(14).value(179).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("_id", "2").field("field", "value2").startArray("num").value(14).endArray().endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value1")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value2")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).execute().actionGet(), 1);
+ }
+
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value1")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value2")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).execute().actionGet(), 1);
+ }
+
+ cluster().fullRestart();
+
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value1")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value2")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).execute().actionGet(), 1);
+ }
+ }
+
+
+ @Test
+ @Slow
+ public void testSingleNodeWithFlush() throws Exception {
+
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+ }
+
+ @Test
+ @Slow
+ public void testTwoNodeFirstNodeCleared() throws Exception {
+
+ final String firstNode = cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(2)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.recover_after_nodes", 2).build();
+ }
+
+ @Override
+ public boolean clearData(String nodeName) {
+ return firstNode.equals(nodeName);
+ }
+
+ });
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(2)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+ }
+
+ @Test
+ @Slow
+ public void testLatestVersionLoaded() throws Exception {
+ // clean two nodes
+
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).put("gateway.recover_after_nodes", 2).build());
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).put("gateway.recover_after_nodes", 2).build());
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(2)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+
+ String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid();
+ assertThat(metaDataUuid, not(equalTo("_na_")));
+
+ logger.info("--> closing first node, and indexing more data to the second node");
+ cluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public void doAfterNodes(int numNodes, Client client) throws Exception {
+ if (numNodes == 1) {
+ logger.info("--> one node is closed - start indexing data into the second one");
+ client.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute().actionGet();
+ client.admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client.prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 3);
+ }
+
+ logger.info("--> add some metadata, additional type and template");
+ client.admin().indices().preparePutMapping("test").setType("type2")
+ .setSource(jsonBuilder().startObject().startObject("type2").startObject("_source").field("enabled", false).endObject().endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().prepareAliases().addAlias("test", "test_alias", FilterBuilders.termFilter("field", "value")).execute().actionGet();
+ logger.info("--> starting two nodes back, verifying we got the latest version");
+ }
+
+ }
+
+ });
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(2)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid(), equalTo(metaDataUuid));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 3);
+ }
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.metaData().index("test").mapping("type2"), notNullValue());
+ assertThat(state.metaData().templates().get("template_1").template(), equalTo("te*"));
+ assertThat(state.metaData().index("test").aliases().get("test_alias"), notNullValue());
+ assertThat(state.metaData().index("test").aliases().get("test_alias").filter(), notNullValue());
+ }
+
+ @Test
+ @Slow
+ public void testReusePeerRecovery() throws Exception {
+
+
+ ImmutableSettings.Builder settings = settingsBuilder()
+ .put("action.admin.cluster.node.shutdown.delay", "10ms")
+ .put("gateway.recover_after_nodes", 4)
+
+ .put(BalancedShardsAllocator.SETTING_THRESHOLD, 1.1f); // use less agressive settings
+
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing docs");
+ for (int i = 0; i < 1000; i++) {
+ client().prepareIndex("test", "type").setSource("field", "value").execute().actionGet();
+ if ((i % 200) == 0) {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> shutting down the nodes");
+ // Disable allocations while we are closing nodes
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)).execute().actionGet();
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(10)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> shutting down the nodes");
+ // Disable allocations while we are closing nodes
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)).execute().actionGet();
+ cluster().fullRestart();
+
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(10)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus("test").setRecovery(true).execute().actionGet();
+ for (IndexShardStatus indexShardStatus : statusResponse.getIndex("test")) {
+ for (ShardStatus shardStatus : indexShardStatus) {
+ if (!shardStatus.getShardRouting().primary()) {
+ logger.info("--> shard {}, recovered {}, reuse {}", shardStatus.getShardId(), shardStatus.getPeerRecoveryStatus().getRecoveredIndexSize(), shardStatus.getPeerRecoveryStatus().getReusedIndexSize());
+ assertThat(shardStatus.getPeerRecoveryStatus().getRecoveredIndexSize().bytes(), greaterThan(0l));
+ assertThat(shardStatus.getPeerRecoveryStatus().getReusedIndexSize().bytes(), greaterThan(0l));
+ assertThat(shardStatus.getPeerRecoveryStatus().getReusedIndexSize().bytes(), greaterThan(shardStatus.getPeerRecoveryStatus().getRecoveredIndexSize().bytes()));
+ }
+ }
+ }
+ }
+
+ @Test
+ @Slow
+ public void testRecoveryDifferentNodeOrderStartup() throws Exception {
+ // we need different data paths so we make sure we start the second node fresh
+
+ final String node_1 = cluster().startNode(settingsBuilder().put("path.data", "data/data1").build());
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet();
+
+ cluster().startNode(settingsBuilder().put("path.data", "data/data2").build());
+
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ cluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public boolean doRestart(String nodeName) {
+ return !node_1.equals(nodeName);
+ }
+ });
+
+
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 1);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/gateway/none/RecoverAfterNodesTests.java b/src/test/java/org/elasticsearch/gateway/none/RecoverAfterNodesTests.java
new file mode 100644
index 0000000..351ed19
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/none/RecoverAfterNodesTests.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.none;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.gateway.GatewayService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest {
+
+ private final static TimeValue BLOCK_WAIT_TIMEOUT = TimeValue.timeValueSeconds(1);
+
+ public ImmutableSet<ClusterBlock> waitForNoBlocksOnNode(TimeValue timeout, Client nodeClient) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ ImmutableSet<ClusterBlock> blocks;
+ do {
+ blocks = nodeClient.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA);
+ }
+ while (!blocks.isEmpty() && (System.currentTimeMillis() - start) < timeout.millis());
+ return blocks;
+ }
+
+ public Client startNode(Settings.Builder settings) {
+ String name = cluster().startNode(settings);
+ return cluster().client(name);
+ }
+
+ @Test
+ public void testRecoverAfterNodes() throws Exception {
+ logger.info("--> start node (1)");
+ Client clientNode1 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3));
+ assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start node (2)");
+ Client clientNode2 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3));
+ Thread.sleep(BLOCK_WAIT_TIMEOUT.millis());
+ assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(clientNode2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start node (3)");
+ Client clientNode3 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3));
+
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode2).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode3).isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testRecoverAfterMasterNodes() throws Exception {
+ logger.info("--> start master_node (1)");
+ Client master1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (1)");
+ Client data1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (2)");
+ Client data2 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start master_node (2)");
+ Client master2 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data2).isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testRecoverAfterDataNodes() throws Exception {
+ logger.info("--> start master_node (1)");
+ Client master1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (1)");
+ Client data1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start master_node (2)");
+ Client master2 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (2)");
+ Client data2 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data2).isEmpty(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/get/GetActionTests.java b/src/test/java/org/elasticsearch/get/GetActionTests.java
new file mode 100644
index 0000000..e01f533
--- /dev/null
+++ b/src/test/java/org/elasticsearch/get/GetActionTests.java
@@ -0,0 +1,870 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.get;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.common.Base64;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class GetActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleGetTests() {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
+
+ ensureGreen();
+
+ GetResponse response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> index doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+
+ logger.info("--> realtime get 1");
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> realtime get 1 (no source, implicit)");
+ response = client().prepareGet("test", "type1", "1").setFields(Strings.EMPTY_ARRAY).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getFields().size(), equalTo(0));
+ assertThat(response.getSourceAsBytes(), nullValue());
+
+ logger.info("--> realtime get 1 (no source, explicit)");
+ response = client().prepareGet("test", "type1", "1").setFetchSource(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getFields().size(), equalTo(0));
+ assertThat(response.getSourceAsBytes(), nullValue());
+
+ logger.info("--> realtime get 1 (no type)");
+ response = client().prepareGet("test", null, "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> non realtime get 1");
+ response = client().prepareGet("test", "type1", "1").setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> realtime fetch of field (requires fetching parsing source)");
+ response = client().prepareGet("test", "type1", "1").setFields("field1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsBytes(), nullValue());
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> realtime fetch of field & source (requires fetching parsing source)");
+ response = client().prepareGet("test", "type1", "1").setFields("field1").setFetchSource("field1", null).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap(), hasKey("field1"));
+ assertThat(response.getSourceAsMap(), not(hasKey("field2")));
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> flush the index, so we load it from it");
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ logger.info("--> realtime get 1 (loaded from index)");
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> non realtime get 1 (loaded from index)");
+ response = client().prepareGet("test", "type1", "1").setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> realtime fetch of field (loaded from index)");
+ response = client().prepareGet("test", "type1", "1").setFields("field1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsBytes(), nullValue());
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> realtime fetch of field & source (loaded from index)");
+ response = client().prepareGet("test", "type1", "1").setFields("field1").setFetchSource(true).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsBytes(), not(nullValue()));
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> update doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").execute().actionGet();
+
+ logger.info("--> realtime get 1");
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1_1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2_1"));
+
+ logger.info("--> update doc 1 again");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_2", "field2", "value2_2").execute().actionGet();
+
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1_2"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2_2"));
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ }
+
+ @Test
+ public void simpleMultiGetTests() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // fine
+ }
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
+
+ ensureGreen();
+
+ MultiGetResponse response = client().prepareMultiGet().add("test", "type1", "1").execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false));
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+
+ response = client().prepareMultiGet()
+ .add("test", "type1", "1")
+ .add("test", "type1", "15")
+ .add("test", "type1", "3")
+ .add("test", "type1", "9")
+ .add("test", "type1", "11")
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(5));
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[1].getId(), equalTo("15"));
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(false));
+ assertThat(response.getResponses()[2].getId(), equalTo("3"));
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[3].getId(), equalTo("9"));
+ assertThat(response.getResponses()[3].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[4].getId(), equalTo("11"));
+ assertThat(response.getResponses()[4].getResponse().isExists(), equalTo(false));
+
+ // multi get with specific field
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1").fields("field"))
+ .add(new MultiGetRequest.Item("test", "type1", "3").fields("field"))
+ .execute().actionGet();
+
+ assertThat(response.getResponses().length, equalTo(2));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsBytes(), nullValue());
+ assertThat(response.getResponses()[0].getResponse().getField("field").getValues().get(0).toString(), equalTo("value1"));
+ }
+
+ @Test
+ public void realtimeGetWithCompress() throws Exception {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("_source").field("compress", true).endObject().endObject().endObject())
+ .execute().actionGet();
+
+ ensureGreen();
+
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < 10000; i++) {
+ sb.append((char) i);
+ }
+ String fieldValue = sb.toString();
+ client().prepareIndex("test", "type", "1").setSource("field", fieldValue).execute().actionGet();
+
+ // realtime get
+ GetResponse getResponse = client().prepareGet("test", "type", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo(fieldValue));
+ }
+
+ @Test
+ public void getFieldsWithDifferentTypes() throws Exception {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("_source").field("enabled", true).endObject().endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("str").field("type", "string").field("store", "yes").endObject()
+ .startObject("strs").field("type", "string").field("store", "yes").endObject()
+ .startObject("int").field("type", "integer").field("store", "yes").endObject()
+ .startObject("ints").field("type", "integer").field("store", "yes").endObject()
+ .startObject("date").field("type", "date").field("store", "yes").endObject()
+ .startObject("binary").field("type", "binary").field("store", "yes").endObject()
+ .endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(
+ jsonBuilder().startObject()
+ .field("str", "test")
+ .field("strs", new String[]{"A", "B", "C"})
+ .field("int", 42)
+ .field("ints", new int[]{1, 2, 3, 4})
+ .field("date", "2012-11-13T15:26:14.000Z")
+ .field("binary", Base64.encodeBytes(new byte[]{1, 2, 3}))
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type2", "1").setSource(
+ jsonBuilder().startObject()
+ .field("str", "test")
+ .field("strs", new String[]{"A", "B", "C"})
+ .field("int", 42)
+ .field("ints", new int[]{1, 2, 3, 4})
+ .field("date", "2012-11-13T15:26:14.000Z")
+ .field("binary", Base64.encodeBytes(new byte[]{1, 2, 3}))
+ .endObject()).execute().actionGet();
+
+ // realtime get with stored source
+ logger.info("--> realtime get (from source)");
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("str", "strs", "int", "ints", "date", "binary").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Long) getResponse.getField("int").getValue(), equalTo(42l));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1L, 2L, 3L, 4L));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat(getResponse.getField("binary").getValue(), instanceOf(String.class)); // its a String..., not binary mapped
+
+ logger.info("--> realtime get (from stored fields)");
+ getResponse = client().prepareGet("test", "type2", "1").setFields("str", "strs", "int", "ints", "date", "binary").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Integer) getResponse.getField("int").getValue(), equalTo(42));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1, 2, 3, 4));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat((BytesReference) getResponse.getField("binary").getValue(), equalTo((BytesReference) new BytesArray(new byte[]{1, 2, 3})));
+
+ logger.info("--> flush the index, so we load it from it");
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ logger.info("--> non realtime get (from source)");
+ getResponse = client().prepareGet("test", "type1", "1").setFields("str", "strs", "int", "ints", "date", "binary").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Long) getResponse.getField("int").getValue(), equalTo(42l));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1L, 2L, 3L, 4L));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat(getResponse.getField("binary").getValue(), instanceOf(String.class)); // its a String..., not binary mapped
+
+ logger.info("--> non realtime get (from stored fields)");
+ getResponse = client().prepareGet("test", "type2", "1").setFields("str", "strs", "int", "ints", "date", "binary").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Integer) getResponse.getField("int").getValue(), equalTo(42));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1, 2, 3, 4));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat((BytesReference) getResponse.getField("binary").getValue(), equalTo((BytesReference) new BytesArray(new byte[]{1, 2, 3})));
+ }
+
+ @Test
+ public void testGetDocWithMultivaluedFields() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // fine
+ }
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2")
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", mapping1)
+ .addMapping("type2", mapping2)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+
+ ensureGreen();
+
+ GetResponse response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ response = client().prepareGet("test", "type2", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").endObject())
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type2", "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").endObject())
+ .execute().actionGet();
+
+ response = client().prepareGet("test", "type1", "1")
+ .setFields("field")
+ .execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getType(), equalTo("type1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+
+
+ response = client().prepareGet("test", "type2", "1")
+ .setFields("field")
+ .execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getType(), equalTo("type2"));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+
+ // Now test values being fetched from stored fields.
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareGet("test", "type1", "1")
+ .setFields("field")
+ .execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+
+
+ response = client().prepareGet("test", "type2", "1")
+ .setFields("field")
+ .execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+ }
+
+ @Test
+ public void testThatGetFromTranslogShouldWorkWithExclude() throws Exception {
+ String index = "test";
+ String type = "type1";
+
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("_source")
+ .array("excludes", "excluded")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ client().admin().indices().prepareCreate(index)
+ .addMapping(type, mapping)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+
+ client().prepareIndex(index, type, "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").field("excluded", "should not be seen").endObject())
+ .execute().actionGet();
+
+ GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").execute().actionGet();
+ client().admin().indices().prepareFlush(index).execute().actionGet();
+ GetResponse responseAfterFlush = client().prepareGet(index, type, "1").execute().actionGet();
+
+ assertThat(responseBeforeFlush.isExists(), is(true));
+ assertThat(responseAfterFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("field"));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("excluded")));
+ assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString()));
+ }
+
+ @Test
+ public void testThatGetFromTranslogShouldWorkWithInclude() throws Exception {
+ String index = "test";
+ String type = "type1";
+
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("_source")
+ .array("includes", "included")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ client().admin().indices().prepareCreate(index)
+ .addMapping(type, mapping)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+
+ client().prepareIndex(index, type, "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").field("included", "should be seen").endObject())
+ .execute().actionGet();
+
+ GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").execute().actionGet();
+ client().admin().indices().prepareFlush(index).execute().actionGet();
+ GetResponse responseAfterFlush = client().prepareGet(index, type, "1").execute().actionGet();
+
+ assertThat(responseBeforeFlush.isExists(), is(true));
+ assertThat(responseAfterFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("field")));
+ assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("included"));
+ assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString()));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testThatGetFromTranslogShouldWorkWithIncludeExcludeAndFields() throws Exception {
+ String index = "test";
+ String type = "type1";
+
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("_source")
+ .array("includes", "included")
+ .array("exlcudes", "excluded")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ client().admin().indices().prepareCreate(index)
+ .addMapping(type, mapping)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+
+ client().prepareIndex(index, type, "1")
+ .setSource(jsonBuilder().startObject()
+ .field("field", "1", "2")
+ .startObject("included").field("field", "should be seen").field("field2", "extra field to remove").endObject()
+ .startObject("excluded").field("field", "should not be seen").field("field2", "should not be seen").endObject()
+ .endObject())
+ .execute().actionGet();
+
+ GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").setFields("_source", "included.field", "excluded.field").execute().actionGet();
+ assertThat(responseBeforeFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("excluded")));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("field")));
+ assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("included"));
+
+ // now tests that extra source filtering works as expected
+ GetResponse responseBeforeFlushWithExtraFilters = client().prepareGet(index, type, "1").setFields("included.field", "excluded.field")
+ .setFetchSource(new String[]{"field", "*.field"}, new String[]{"*.field2"}).get();
+ assertThat(responseBeforeFlushWithExtraFilters.isExists(), is(true));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), not(hasKey("excluded")));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), not(hasKey("field")));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), hasKey("included"));
+ assertThat((Map<String, Object>) responseBeforeFlushWithExtraFilters.getSourceAsMap().get("included"), hasKey("field"));
+ assertThat((Map<String, Object>) responseBeforeFlushWithExtraFilters.getSourceAsMap().get("included"), not(hasKey("field2")));
+
+ client().admin().indices().prepareFlush(index).execute().actionGet();
+ GetResponse responseAfterFlush = client().prepareGet(index, type, "1").setFields("_source", "included.field", "excluded.field").execute().actionGet();
+ GetResponse responseAfterFlushWithExtraFilters = client().prepareGet(index, type, "1").setFields("included.field", "excluded.field")
+ .setFetchSource("*.field", "*.field2").get();
+
+ assertThat(responseAfterFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString()));
+
+ assertThat(responseAfterFlushWithExtraFilters.isExists(), is(true));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsString(), is(responseAfterFlushWithExtraFilters.getSourceAsString()));
+ }
+
+ @Test
+ public void testGetWithVersion() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ GetResponse response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> index doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").execute().actionGet();
+
+ // From translog:
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareGet("test", "type1", "1").setVersion(0).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ response = client().prepareGet("test", "type1", "1").setVersion(1).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ try {
+ client().prepareGet("test", "type1", "1").setVersion(2).execute().actionGet();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ }
+
+ // From Lucene index:
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareGet("test", "type1", "1").setVersion(0).setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ response = client().prepareGet("test", "type1", "1").setVersion(1).setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ try {
+ client().prepareGet("test", "type1", "1").setVersion(2).setRealtime(false).execute().actionGet();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ }
+
+ logger.info("--> index doc 1 again, so increasing the version");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").execute().actionGet();
+
+ // From translog:
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareGet("test", "type1", "1").setVersion(0).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareGet("test", "type1", "1").setVersion(1).execute().actionGet();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ }
+
+ response = client().prepareGet("test", "type1", "1").setVersion(2).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ // From Lucene index:
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareGet("test", "type1", "1").setVersion(0).setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareGet("test", "type1", "1").setVersion(1).setRealtime(false).execute().actionGet();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ }
+
+ response = client().prepareGet("test", "type1", "1").setVersion(2).setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(2l));
+ }
+
+ @Test
+ public void testMultiGetWithVersion() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // fine
+ }
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ MultiGetResponse response = client().prepareMultiGet().add("test", "type1", "1").execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false));
+
+ for (int i = 0; i < 3; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+
+ // Version from translog
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(0))
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(1))
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(2))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[1].getId(), equalTo("1"));
+ assertThat(response.getResponses()[1].getFailure(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[2].getFailure(), notNullValue());
+ assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
+ assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+
+ //Version from Lucene index
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(0))
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(1))
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(2))
+ .setRealtime(false)
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[1].getId(), equalTo("1"));
+ assertThat(response.getResponses()[1].getFailure(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[2].getFailure(), notNullValue());
+ assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
+ assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+
+
+ for (int i = 0; i < 3; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+
+ // Version from translog
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(0))
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(1))
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(2))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("2"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ assertThat(response.getResponses()[1].getFailure(), notNullValue());
+ assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+ assertThat(response.getResponses()[2].getId(), equalTo("2"));
+ assertThat(response.getResponses()[2].getFailure(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[2].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+
+
+ //Version from Lucene index
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(0))
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(1))
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(2))
+ .setRealtime(false)
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("2"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ assertThat(response.getResponses()[1].getFailure(), notNullValue());
+ assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+ assertThat(response.getResponses()[2].getId(), equalTo("2"));
+ assertThat(response.getResponses()[2].getFailure(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[2].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ }
+
+ @Test
+ public void testGetFields_metaData() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .get();
+
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setRouting("1")
+ .setSource(jsonBuilder().startObject().field("field1", "value").endObject())
+ .get();
+
+ GetResponse getResponse = client().prepareGet("my-index", "my-type1", "1")
+ .setRouting("1")
+ .setFields("field1", "_routing")
+ .get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value"));
+ assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true));
+ assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1"));
+
+ client().admin().indices().prepareFlush("my-index").get();
+
+ client().prepareGet("my-index", "my-type1", "1")
+ .setFields("field1", "_routing")
+ .setRouting("1")
+ .get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value"));
+ assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true));
+ assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1"));
+ }
+
+ @Test
+ public void testGetFields_nonLeafField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type1", jsonBuilder().startObject().startObject("my-type1").startObject("properties")
+ .startObject("field1").startObject("properties")
+ .startObject("field2").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .get();
+
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject())
+ .get();
+
+ try {
+ client().prepareGet("my-index", "my-type1", "1").setFields("field1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ client().admin().indices().prepareFlush("my-index").get();
+
+ try {
+ client().prepareGet("my-index", "my-type1", "1").setFields("field1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+ }
+
+ @Test
+ public void testGetFields_complexField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("my-type2", jsonBuilder().startObject().startObject("my-type2").startObject("properties")
+ .startObject("field1").field("type", "object")
+ .startObject("field2").field("type", "object")
+ .startObject("field3").field("type", "object")
+ .startObject("field4").field("type", "string").field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ BytesReference source = jsonBuilder().startObject()
+ .startArray("field1")
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value1")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().bytes();
+
+ client().prepareIndex("my-index", "my-type1", "1").setSource(source).get();
+ client().prepareIndex("my-index", "my-type2", "1").setSource(source).get();
+
+
+ String field = "field1.field2.field3.field4";
+ GetResponse getResponse = client().prepareGet("my-index", "my-type1", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+
+ getResponse = client().prepareGet("my-index", "my-type2", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+
+ client().admin().indices().prepareFlush("my-index").get();
+
+ getResponse = client().prepareGet("my-index", "my-type1", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+
+ getResponse = client().prepareGet("my-index", "my-type2", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java b/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java
new file mode 100644
index 0000000..1b6e4aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+public class IndexRequestBuilderTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testSetSource() throws InterruptedException, ExecutionException {
+ createIndex("test");
+ ensureYellow();
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("test_field", "foobar");
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[] {
+ client().prepareIndex("test", "test").setSource((Object)"test_field", (Object)"foobar"),
+ client().prepareIndex("test", "test").setSource("{\"test_field\" : \"foobar\"}"),
+ client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}")),
+ client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), randomBoolean()),
+ client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}").toBytes()),
+ client().prepareIndex("test", "test").setSource(map)
+ };
+ indexRandom(true, builders);
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.termQuery("test_field", "foobar")).get();
+ ElasticsearchAssertions.assertHitCount(searchResponse, builders.length);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testOddNumberOfSourceObjetc() {
+ client().prepareIndex("test", "test").setSource((Object)"test_field", (Object)"foobar", new Object());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/VersionTypeTests.java b/src/test/java/org/elasticsearch/index/VersionTypeTests.java
new file mode 100644
index 0000000..01aabd2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/VersionTypeTests.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class VersionTypeTests extends ElasticsearchTestCase {
+ @Test
+ public void testInternalVersionConflict() throws Exception {
+
+ assertFalse(VersionType.INTERNAL.isVersionConflict(10, Versions.MATCH_ANY));
+ // if we don't have a version in the index we accept everything
+ assertFalse(VersionType.INTERNAL.isVersionConflict(Versions.NOT_SET, 10));
+ assertFalse(VersionType.INTERNAL.isVersionConflict(Versions.NOT_SET, Versions.MATCH_ANY));
+
+ // if we didn't find a version (but the index does support it), we don't like it unless MATCH_ANY
+ assertTrue(VersionType.INTERNAL.isVersionConflict(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertTrue(VersionType.INTERNAL.isVersionConflict(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.INTERNAL.isVersionConflict(Versions.NOT_FOUND, Versions.MATCH_ANY));
+
+ // and the stupid usual case
+ assertFalse(VersionType.INTERNAL.isVersionConflict(10, 10));
+ assertTrue(VersionType.INTERNAL.isVersionConflict(9, 10));
+ assertTrue(VersionType.INTERNAL.isVersionConflict(10, 9));
+
+// Old indexing code, dictating behavior
+// if (expectedVersion != Versions.MATCH_ANY && currentVersion != Versions.NOT_SET) {
+// // an explicit version is provided, see if there is a conflict
+// // if we did not find anything, and a version is provided, so we do expect to find a doc under that version
+// // this is important, since we don't allow to preset a version in order to handle deletes
+// if (currentVersion == Versions.NOT_FOUND) {
+// throw new VersionConflictEngineException(shardId, index.type(), index.id(), Versions.NOT_FOUND, expectedVersion);
+// } else if (expectedVersion != currentVersion) {
+// throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
+// }
+// }
+// updatedVersion = (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
+ }
+
+ @Test
+ public void testExternalVersionConflict() throws Exception {
+
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_SET, 10));
+ // MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value
+ assertTrue(VersionType.EXTERNAL.isVersionConflict(10, Versions.MATCH_ANY));
+
+ // if we didn't find a version (but the index does support it), we always accept
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_FOUND, Versions.MATCH_ANY));
+
+ // and the standard behavior
+ assertTrue(VersionType.EXTERNAL.isVersionConflict(10, 10));
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(9, 10));
+ assertTrue(VersionType.EXTERNAL.isVersionConflict(10, 9));
+
+
+// Old indexing code, dictating behavior
+// // an external version is provided, just check, if a local version exists, that its higher than it
+// // the actual version checking is one in an external system, and we just want to not index older versions
+// if (currentVersion >= 0) { // we can check!, its there
+// if (currentVersion >= index.version()) {
+// throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, index.version());
+// }
+// }
+// updatedVersion = index.version();
+ }
+
+
+ @Test
+ public void testUpdateVersion() {
+
+ assertThat(VersionType.INTERNAL.updateVersion(Versions.NOT_SET, 10), equalTo(1l));
+ assertThat(VersionType.INTERNAL.updateVersion(Versions.NOT_FOUND, 10), equalTo(1l));
+ assertThat(VersionType.INTERNAL.updateVersion(1, 1), equalTo(2l));
+ assertThat(VersionType.INTERNAL.updateVersion(2, Versions.MATCH_ANY), equalTo(3l));
+
+
+ assertThat(VersionType.EXTERNAL.updateVersion(Versions.NOT_SET, 10), equalTo(10l));
+ assertThat(VersionType.EXTERNAL.updateVersion(Versions.NOT_FOUND, 10), equalTo(10l));
+ assertThat(VersionType.EXTERNAL.updateVersion(1, 10), equalTo(10l));
+
+// Old indexing code
+// if (index.versionType() == VersionType.INTERNAL) { // internal version type
+// updatedVersion = (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
+// } else { // external version type
+// updatedVersion = expectedVersion;
+// }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java
new file mode 100644
index 0000000..65ecafb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.aliases;
+
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.InvalidAliasNameException;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class IndexAliasesServiceTests extends ElasticsearchTestCase {
+ public static IndexAliasesService newIndexAliasesService() {
+ return new IndexAliasesService(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS, newIndexQueryParserService());
+ }
+
+ public static IndexQueryParserService newIndexQueryParserService() {
+ Injector injector = new ModulesBuilder().add(
+ new IndicesQueriesModule(),
+ new CacheRecyclerModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new CodecModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new IndexSettingsModule(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new IndexNameModule(new Index("test")),
+ new IndexQueryParserModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new AnalysisModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new SimilarityModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new ScriptModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new SettingsModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new IndexEngineModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new IndexCacheModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+ return injector.getInstance(IndexQueryParserService.class);
+ }
+
+ public static CompressedString filter(FilterBuilder filterBuilder) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.close();
+ return new CompressedString(builder.string());
+ }
+
+ @Test
+ public void testFilteringAliases() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termFilter("animal", "cat")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "dog")));
+ indexAliasesService.add("all", null);
+
+ assertThat(indexAliasesService.hasAlias("cats"), equalTo(true));
+ assertThat(indexAliasesService.hasAlias("dogs"), equalTo(true));
+ assertThat(indexAliasesService.hasAlias("turtles"), equalTo(false));
+
+ assertThat(indexAliasesService.aliasFilter("cats").toString(), equalTo("cache(animal:cat)"));
+ assertThat(indexAliasesService.aliasFilter("cats", "dogs").toString(), equalTo("BooleanFilter(cache(animal:cat) cache(animal:dog))"));
+
+ // Non-filtering alias should turn off all filters because filters are ORed
+ assertThat(indexAliasesService.aliasFilter("all"), nullValue());
+ assertThat(indexAliasesService.aliasFilter("cats", "all"), nullValue());
+ assertThat(indexAliasesService.aliasFilter("all", "cats"), nullValue());
+
+ indexAliasesService.add("cats", filter(termFilter("animal", "feline")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "canine")));
+ assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("BooleanFilter(cache(animal:canine) cache(animal:feline))"));
+ }
+
+ @Test
+ public void testAliasFilters() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termFilter("animal", "cat")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "dog")));
+
+ assertThat(indexAliasesService.aliasFilter(), nullValue());
+ assertThat(indexAliasesService.aliasFilter("dogs").toString(), equalTo("cache(animal:dog)"));
+ assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("BooleanFilter(cache(animal:dog) cache(animal:cat))"));
+
+ indexAliasesService.add("cats", filter(termFilter("animal", "feline")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "canine")));
+
+ assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("BooleanFilter(cache(animal:canine) cache(animal:feline))"));
+ }
+
+ @Test(expected = InvalidAliasNameException.class)
+ public void testRemovedAliasFilter() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termFilter("animal", "cat")));
+ indexAliasesService.remove("cats");
+ indexAliasesService.aliasFilter("cats");
+ }
+
+
+ @Test
+ public void testUnknownAliasFilter() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termFilter("animal", "cat")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "dog")));
+
+ try {
+ indexAliasesService.aliasFilter("unknown");
+ fail();
+ } catch (InvalidAliasNameException e) {
+ // all is well
+ }
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java b/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java
new file mode 100644
index 0000000..ef94926
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
+import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.io.*;
+import java.lang.reflect.Field;
+import java.util.Set;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class AnalysisModuleTests extends ElasticsearchTestCase {
+
+ private Injector injector;
+
+ public AnalysisService getAnalysisService(Settings settings) {
+ Index index = new Index("test");
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ return injector.getInstance(AnalysisService.class);
+ }
+
+ @Test
+ public void testSimpleConfigurationJson() {
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.json").build();
+ testSimpleConfiguration(settings);
+ }
+
+ @Test
+ public void testSimpleConfigurationYaml() {
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml").build();
+ testSimpleConfiguration(settings);
+ }
+
+ @Test
+ public void testDefaultFactoryTokenFilters() {
+ assertTokenFilter("keyword_repeat", KeywordRepeatFilter.class);
+ assertTokenFilter("persian_normalization", PersianNormalizationFilter.class);
+ assertTokenFilter("arabic_normalization", ArabicNormalizationFilter.class);
+ }
+
+ @Test
+ public void testVersionedAnalyzers() throws Exception {
+ Settings settings2 = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml")
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build();
+ AnalysisService analysisService2 = getAnalysisService(settings2);
+
+ // indicesanalysisservice always has the current version
+ IndicesAnalysisService indicesAnalysisService2 = injector.getInstance(IndicesAnalysisService.class);
+ assertThat(indicesAnalysisService2.analyzer("default"), is(instanceOf(NamedAnalyzer.class)));
+ NamedAnalyzer defaultNamedAnalyzer = (NamedAnalyzer) indicesAnalysisService2.analyzer("default");
+ assertThat(defaultNamedAnalyzer.analyzer(), is(instanceOf(StandardAnalyzer.class)));
+ assertLuceneAnalyzerVersion(Version.CURRENT.luceneVersion, defaultNamedAnalyzer.analyzer());
+
+ // analysis service has the expected version
+ assertThat(analysisService2.analyzer("standard").analyzer(), is(instanceOf(StandardAnalyzer.class)));
+ assertLuceneAnalyzerVersion(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("standard").analyzer());
+ assertLuceneAnalyzerVersion(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("thai").analyzer());
+ }
+
+ // ugly reflection based hack to extract the lucene version from an analyzer
+ private void assertLuceneAnalyzerVersion(org.apache.lucene.util.Version luceneVersion, Analyzer analyzer) throws Exception {
+ Field field = analyzer.getClass().getSuperclass().getDeclaredField("matchVersion");
+ boolean currentAccessible = field.isAccessible();
+ field.setAccessible(true);
+ Object obj = field.get(analyzer);
+ field.setAccessible(currentAccessible);
+
+ assertThat(obj, instanceOf(org.apache.lucene.util.Version.class));
+ org.apache.lucene.util.Version analyzerVersion = (org.apache.lucene.util.Version) obj;
+ assertThat(analyzerVersion, is(luceneVersion));
+ }
+
+ private void assertTokenFilter(String name, Class clazz) {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(ImmutableSettings.settingsBuilder().build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter(name);
+ Tokenizer tokenizer = new WhitespaceTokenizer(Version.CURRENT.luceneVersion, new StringReader("foo bar"));
+ TokenStream stream = tokenFilter.create(tokenizer);
+ assertThat(stream, instanceOf(clazz));
+ }
+
+ private void testSimpleConfiguration(Settings settings) {
+ AnalysisService analysisService = getAnalysisService(settings);
+ Analyzer analyzer = analysisService.analyzer("custom1").analyzer();
+
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom1 = (CustomAnalyzer) analyzer;
+ assertThat(custom1.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
+ assertThat(custom1.tokenFilters().length, equalTo(2));
+
+ StopTokenFilterFactory stop1 = (StopTokenFilterFactory) custom1.tokenFilters()[0];
+ assertThat(stop1.stopWords().size(), equalTo(1));
+ //assertThat((Iterable<char[]>) stop1.stopWords(), hasItem("test-stop".toCharArray()));
+
+ analyzer = analysisService.analyzer("custom2").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom2 = (CustomAnalyzer) analyzer;
+
+// HtmlStripCharFilterFactory html = (HtmlStripCharFilterFactory) custom2.charFilters()[0];
+// assertThat(html.readAheadLimit(), equalTo(HTMLStripCharFilter.DEFAULT_READ_AHEAD));
+//
+// html = (HtmlStripCharFilterFactory) custom2.charFilters()[1];
+// assertThat(html.readAheadLimit(), equalTo(1024));
+
+ // verify characters mapping
+ analyzer = analysisService.analyzer("custom5").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom5 = (CustomAnalyzer) analyzer;
+ assertThat(custom5.charFilters()[0], instanceOf(MappingCharFilterFactory.class));
+
+ // verify aliases
+ analyzer = analysisService.analyzer("alias1").analyzer();
+ assertThat(analyzer, instanceOf(StandardAnalyzer.class));
+
+ // check custom pattern replace filter
+ analyzer = analysisService.analyzer("custom3").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom3 = (CustomAnalyzer) analyzer;
+ PatternReplaceCharFilterFactory patternReplaceCharFilterFactory = (PatternReplaceCharFilterFactory) custom3.charFilters()[0];
+ assertThat(patternReplaceCharFilterFactory.getPattern().pattern(), equalTo("sample(.*)"));
+ assertThat(patternReplaceCharFilterFactory.getReplacement(), equalTo("replacedSample $1"));
+
+ // check custom class name (my)
+ analyzer = analysisService.analyzer("custom4").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom4 = (CustomAnalyzer) analyzer;
+ assertThat(custom4.tokenFilters()[0], instanceOf(MyFilterTokenFilterFactory.class));
+
+// // verify Czech stemmer
+// analyzer = analysisService.analyzer("czechAnalyzerWithStemmer").analyzer();
+// assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+// CustomAnalyzer czechstemmeranalyzer = (CustomAnalyzer) analyzer;
+// assertThat(czechstemmeranalyzer.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
+// assertThat(czechstemmeranalyzer.tokenFilters().length, equalTo(4));
+// assertThat(czechstemmeranalyzer.tokenFilters()[3], instanceOf(CzechStemTokenFilterFactory.class));
+//
+// // check dictionary decompounder
+// analyzer = analysisService.analyzer("decompoundingAnalyzer").analyzer();
+// assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+// CustomAnalyzer dictionaryDecompounderAnalyze = (CustomAnalyzer) analyzer;
+// assertThat(dictionaryDecompounderAnalyze.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
+// assertThat(dictionaryDecompounderAnalyze.tokenFilters().length, equalTo(1));
+// assertThat(dictionaryDecompounderAnalyze.tokenFilters()[0], instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
+
+ Set<?> wordList = Analysis.getWordSet(null, settings, "index.analysis.filter.dict_dec.word_list", Lucene.VERSION);
+ MatcherAssert.assertThat(wordList.size(), equalTo(6));
+// MatcherAssert.assertThat(wordList, hasItems("donau", "dampf", "schiff", "spargel", "creme", "suppe"));
+ }
+
+ @Test
+ public void testWordListPath() throws Exception {
+ Environment env = new Environment(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ String[] words = new String[]{"donau", "dampf", "schiff", "spargel", "creme", "suppe"};
+
+ File wordListFile = generateWordList(words);
+ Settings settings = settingsBuilder().loadFromSource("index: \n word_list_path: " + wordListFile.getAbsolutePath()).build();
+
+ Set<?> wordList = Analysis.getWordSet(env, settings, "index.word_list", Lucene.VERSION);
+ MatcherAssert.assertThat(wordList.size(), equalTo(6));
+// MatcherAssert.assertThat(wordList, hasItems(words));
+ }
+
+ private File generateWordList(String[] words) throws Exception {
+ File wordListFile = File.createTempFile("wordlist", ".txt");
+ wordListFile.deleteOnExit();
+
+ BufferedWriter writer = null;
+ try {
+ writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(wordListFile), Charsets.UTF_8));
+ for (String word : words) {
+ writer.write(word);
+ writer.write('\n');
+ }
+ } finally {
+ if (writer != null) {
+ writer.close();
+ }
+ }
+ return wordListFile;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java b/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
new file mode 100644
index 0000000..ddf7b9d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+
+public class AnalysisTestsHelper {
+
+ public static AnalysisService createAnalysisServiceFromClassPath(String resource) {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .loadFromClasspath(resource).build();
+
+ return createAnalysisServiceFromSettings(settings);
+ }
+
+ public static AnalysisService createAnalysisServiceFromSettings(
+ Settings settings) {
+ Index index = new Index("test");
+
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings),
+ new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+
+ AnalysisModule analysisModule = new AnalysisModule(settings,
+ parentInjector.getInstance(IndicesAnalysisService.class));
+
+ Injector injector = new ModulesBuilder().add(new IndexSettingsModule(index, settings),
+ new IndexNameModule(index), analysisModule).createChildInjector(parentInjector);
+
+ return injector.getInstance(AnalysisService.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java b/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java
new file mode 100644
index 0000000..3a23086
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Ignore;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
+
+/**
+ */
+public class AnalyzerBackwardsCompatTests extends ElasticsearchTokenStreamTestCase {
+
+ @Ignore
+ private void testNoStopwordsAfter(org.elasticsearch.Version noStopwordVersion, String type) throws IOException {
+ final int iters = atLeast(10);
+ org.elasticsearch.Version version = org.elasticsearch.Version.CURRENT;
+ for (int i = 0; i < iters; i++) {
+ ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop");
+ if (version.onOrAfter(noStopwordVersion)) {
+ if (random().nextBoolean()) {
+ builder.put(SETTING_VERSION_CREATED, version);
+ }
+ } else {
+ builder.put(SETTING_VERSION_CREATED, version);
+ }
+ builder.put("index.analysis.analyzer.foo.type", type);
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build());
+ NamedAnalyzer analyzer = analysisService.analyzer("foo");
+ if (version.onOrAfter(noStopwordVersion)) {
+ assertAnalyzesTo(analyzer, "this is bogus", new String[]{"this", "is", "bogus"});
+ } else {
+ assertAnalyzesTo(analyzer, "this is bogus", new String[]{"bogus"});
+ }
+ version = randomVersion();
+ }
+ }
+
+ public void testPatternAnalyzer() throws IOException {
+ testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "pattern");
+ }
+
+ public void testStandardHTMLStripAnalyzer() throws IOException {
+ testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "standard_html_strip");
+ }
+
+ public void testStandardAnalyzer() throws IOException {
+ testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_Beta1, "standard");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java
new file mode 100644
index 0000000..e657731
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+public class CJKFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ private static final String RESOURCE = "org/elasticsearch/index/analysis/cjk_analysis.json";
+
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_bigram");
+ String source = "多くの学生が試験に落ちた。";
+ String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" };
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testNoFlags() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_no_flags");
+ String source = "多くの学生が試験に落ちた。";
+ String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" };
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testHanOnly() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_only");
+ String source = "多くの学生が試験に落ちた。";
+ String[] expected = new String[]{"多", "く", "の", "学生", "が", "試験", "に", "落", "ち", "た" };
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testHanUnigramOnly() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_unigram_only");
+ String source = "多くの学生が試験に落ちた。";
+ String[] expected = new String[]{"多", "く", "の", "学", "学生", "生", "が", "試", "試験", "験", "に", "落", "ち", "た" };
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
new file mode 100644
index 0000000..a168586
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ */
+public class CharFilterTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testMappingCharFilter() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder()
+ .put("index.analysis.char_filter.my_mapping.type", "mapping")
+ .putArray("index.analysis.char_filter.my_mapping.mappings", "ph=>f", "qu=>q")
+ .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping")
+ .build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
+
+ // Repeat one more time to make sure that char filter is reinitialized correctly
+ assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
+ }
+
+ @Test
+ public void testHtmlStripCharFilter() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder()
+ .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "html_strip")
+ .build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
+
+ // Repeat one more time to make sure that char filter is reinitialized correctly
+ assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java b/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
new file mode 100644
index 0000000..61fa566
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllTokenStream;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class CompoundAnalysisTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultsCompoundAnalysis() throws Exception {
+ Index index = new Index("test");
+ Settings settings = getJsonSettings();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ TokenFilterFactory filterFactory = analysisService.tokenFilter("dict_dec");
+ MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
+ }
+
+ @Test
+ public void testDictionaryDecompounder() throws Exception {
+ Settings[] settingsArr = new Settings[]{getJsonSettings(), getYamlSettings()};
+ for (Settings settings : settingsArr) {
+ List<String> terms = analyze(settings, "decompoundingAnalyzer", "donaudampfschiff spargelcremesuppe");
+ MatcherAssert.assertThat(terms.size(), equalTo(8));
+ MatcherAssert.assertThat(terms, hasItems("donau", "dampf", "schiff", "donaudampfschiff", "spargel", "creme", "suppe", "spargelcremesuppe"));
+ }
+ }
+
+ private List<String> analyze(Settings settings, String analyzerName, String text) throws IOException {
+ Index index = new Index("test");
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
+
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", text, 1.0f);
+ allEntries.reset();
+
+ TokenStream stream = AllTokenStream.allTokenStream("_all", allEntries, analyzer);
+ stream.reset();
+ CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
+
+ List<String> terms = new ArrayList<String>();
+ while (stream.incrementToken()) {
+ String tokText = termAtt.toString();
+ terms.add(tokText);
+ }
+ return terms;
+ }
+
+ private Settings getJsonSettings() {
+ return settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.json").build();
+ }
+
+ private Settings getYamlSettings() {
+ return settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml").build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java
new file mode 100644
index 0000000..b49d719
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.inject.ProvisionException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+
+public class HunspellTokenFilterFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDedup() throws IOException {
+ Settings settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.dedup(), is(true));
+
+ settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.dedup", false)
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.dedup(), is(false));
+ }
+
+ @Test
+ public void testDefaultRecursionLevel() throws IOException {
+ Settings settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.recursionLevel(), is(2));
+ }
+
+ @Test
+ public void testCustomRecursionLevel() throws IOException {
+ Settings settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.recursion_level", 0)
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.recursionLevel(), is(0));
+ }
+
+ @Test(expected = ProvisionException.class)
+ public void negativeRecursionLevelShouldFail() throws IOException {
+ Settings settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.recursion_level", -1)
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java
new file mode 100644
index 0000000..cc2df1b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.FailedToResolveConfigException;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ private static final String RESOURCE = "org/elasticsearch/index/analysis/keep_analysis.json";
+
+
+ @Test
+ public void testLoadWithoutSettings() {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("keep");
+ Assert.assertNull(tokenFilter);
+ }
+
+ @Test
+ public void testLoadOverConfiguredSettings() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.analysis.filter.broken_keep_filter.type", "keep")
+ .put("index.analysis.filter.broken_keep_filter.keep_words_path", "does/not/exists.txt")
+ .put("index.analysis.filter.broken_keep_filter.keep_words", "[\"Hello\", \"worlD\"]")
+ .build();
+ try {
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Assert.fail("path and array are configured");
+ } catch (Exception e) {
+ assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class));
+ }
+ }
+
+ @Test
+ public void testKeepWordsPathSettings() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.analysis.filter.non_broken_keep_filter.type", "keep")
+ .put("index.analysis.filter.non_broken_keep_filter.keep_words_path", "does/not/exists.txt")
+ .build();
+ try {
+ // test our none existing setup is picked up
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ fail("expected an exception due to non existent keep_words_path");
+ } catch (Throwable e) {
+ assertThat(e.getCause(), instanceOf(FailedToResolveConfigException.class));
+ }
+
+ settings = ImmutableSettings.settingsBuilder().put(settings)
+ .put("index.analysis.filter.non_broken_keep_filter.keep_words", new String[]{"test"})
+ .build();
+ try {
+ // test our none existing setup is picked up
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ fail("expected an exception indicating that you can't use [keep_words_path] with [keep_words] ");
+ } catch (Throwable e) {
+ assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class));
+ }
+
+ }
+
+ @Test
+ public void testCaseInsensitiveMapping() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_keep_filter");
+ assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
+ String source = "hello small world";
+ String[] expected = new String[]{"hello", "world"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected, new int[]{1, 2});
+ }
+
+ @Test
+ public void testCaseSensitiveMapping() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_case_sensitive_keep_filter");
+ assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
+ String source = "Hello small world";
+ String[] expected = new String[]{"Hello"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected, new int[]{1});
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java
new file mode 100644
index 0000000..9d43d7e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+public class LimitTokenCountFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefault() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_default.type", "limit").build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_default");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ @Test
+ public void testSettings() throws IOException {
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_1.type", "limit")
+ .put("index.analysis.filter.limit_1.max_token_count", 3).put("index.analysis.filter.limit_1.consume_all_tokens", true)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the", "quick", "brown" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_1.type", "limit")
+ .put("index.analysis.filter.limit_1.max_token_count", 3).put("index.analysis.filter.limit_1.consume_all_tokens", false)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the", "quick", "brown" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_1.type", "limit")
+ .put("index.analysis.filter.limit_1.max_token_count", 17).put("index.analysis.filter.limit_1.consume_all_tokens", true)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the", "quick", "brown", "fox" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java
new file mode 100644
index 0000000..c17e3e3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java
@@ -0,0 +1,230 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.ngram.*;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+public class NGramTokenizerFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testParseTokenChars() {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = ImmutableSettings.EMPTY;
+ for (String tokenChars : Arrays.asList("letters", "number", "DIRECTIONALITY_UNDEFINED")) {
+ final Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build();
+ try {
+ new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(""));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException expected) {
+ // OK
+ }
+ }
+ for (String tokenChars : Arrays.asList("letter", " digit ", "punctuation", "DIGIT", "CoNtRoL", "dash_punctuation")) {
+ final Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build();
+ new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(""));
+ // no exception
+ }
+ }
+
+ @Test
+ public void testPreTokenization() throws IOException {
+ // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = ImmutableSettings.EMPTY;
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
+ assertTokenStreamContents(new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader("Åbc déf g\uD801\uDC00f ")),
+ new String[] {"Åb", "Åbc", "bc", "dé", "déf", "éf", "g\uD801\uDC00", "g\uD801\uDC00f", "\uD801\uDC00f"});
+ settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
+ assertTokenStreamContents(new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(" a!$ 9")),
+ new String[] {" a", " a!", "a!", "a!$", "!$", "!$ ", "$ ", "$ 9", " 9"});
+ }
+
+ @Test
+ public void testPreTokenizationEdge() throws IOException {
+ // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = ImmutableSettings.EMPTY;
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
+ assertTokenStreamContents(new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader("Åbc déf g\uD801\uDC00f ")),
+ new String[] {"Åb", "Åbc", "dé", "déf", "g\uD801\uDC00", "g\uD801\uDC00f"});
+ settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
+ assertTokenStreamContents(new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(" a!$ 9")),
+ new String[] {" a", " a!"});
+ }
+
+ @Test
+ public void testBackwardsCompatibilityEdgeNgramTokenizer() throws IllegalArgumentException, IllegalAccessException {
+ int iters = atLeast(20);
+ final Index index = new Index("test");
+ final String name = "ngr";
+ for (int i = 0; i < iters; i++) {
+ Version v = randomVersion(random());
+ if (v.onOrAfter(Version.V_0_90_2)) {
+ Builder builder = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
+ boolean compatVersion = false;
+ if ((compatVersion = random().nextBoolean())) {
+ builder.put("version", "4." + random().nextInt(3));
+ builder.put("side", "back");
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(
+ "foo bar"));
+ if (compatVersion) {
+ assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
+ } else {
+ assertThat(edgeNGramTokenizer, instanceOf(EdgeNGramTokenizer.class));
+ }
+
+ } else {
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(
+ "foo bar"));
+ assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
+ }
+ }
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+ try {
+ new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader("foo bar"));
+ fail("should fail side:back is not supported anymore");
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ }
+
+ }
+
+ @Test
+ public void testBackwardsCompatibilityNgramTokenizer() throws IllegalArgumentException, IllegalAccessException {
+ int iters = atLeast(20);
+ for (int i = 0; i < iters; i++) {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ Version v = randomVersion(random());
+ if (v.onOrAfter(Version.V_0_90_2)) {
+ Builder builder = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
+ boolean compatVersion = false;
+ if ((compatVersion = random().nextBoolean())) {
+ builder.put("version", "4." + random().nextInt(3));
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer nGramTokenizer = new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(
+ "foo bar"));
+ if (compatVersion) {
+ assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
+ } else {
+ assertThat(nGramTokenizer, instanceOf(NGramTokenizer.class));
+ }
+
+ } else {
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer nGramTokenizer = new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(
+ "foo bar"));
+ assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
+ }
+ }
+ }
+
+ @Test
+ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws IllegalArgumentException, IllegalAccessException {
+ int iters = atLeast(20);
+ for (int i = 0; i < iters; i++) {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ Version v = randomVersion(random());
+ if (v.onOrAfter(Version.V_0_90_2)) {
+ Builder builder = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3);
+ boolean compatVersion = false;
+ if ((compatVersion = random().nextBoolean())) {
+ builder.put("version", "4." + random().nextInt(3));
+ }
+ boolean reverse = random().nextBoolean();
+ if (reverse) {
+ builder.put("side", "back");
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(index, indexSettings, name, settings).create(new MockTokenizer(new StringReader(
+ "foo bar")));
+ if (compatVersion) {
+ assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class));
+ } else if (reverse && !compatVersion){
+ assertThat(edgeNGramTokenFilter, instanceOf(ReverseStringFilter.class));
+ } else {
+ assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class));
+ }
+
+ } else {
+ Builder builder = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3);
+ boolean reverse = random().nextBoolean();
+ if (reverse) {
+ builder.put("side", "back");
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(index, indexSettings, name, settings).create(new MockTokenizer(new StringReader(
+ "foo bar")));
+ assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class));
+ }
+ }
+ }
+
+
+ private Version randomVersion(Random random) throws IllegalArgumentException, IllegalAccessException {
+ Field[] declaredFields = Version.class.getDeclaredFields();
+ List<Field> versionFields = new ArrayList<Field>();
+ for (Field field : declaredFields) {
+ if ((field.getModifiers() & Modifier.STATIC) != 0 && field.getName().startsWith("V_") && field.getType() == Version.class) {
+ versionFields.add(field);
+ }
+ }
+ return (Version) versionFields.get(random.nextInt(versionFields.size())).get(Version.class);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java b/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java
new file mode 100644
index 0000000..7f7b363
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class NumericAnalyzerTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAttributeEqual() throws IOException {
+ final int precisionStep = 8;
+ final double value = randomDouble();
+ NumericDoubleAnalyzer analyzer = new NumericDoubleAnalyzer(precisionStep);
+
+ final TokenStream ts1 = analyzer.tokenStream("dummy", String.valueOf(value));
+ final NumericTokenStream ts2 = new NumericTokenStream(precisionStep);
+ ts2.setDoubleValue(value);
+ final NumericTermAttribute numTerm1 = ts1.addAttribute(NumericTermAttribute.class);
+ final NumericTermAttribute numTerm2 = ts1.addAttribute(NumericTermAttribute.class);
+ final PositionIncrementAttribute posInc1 = ts1.addAttribute(PositionIncrementAttribute.class);
+ final PositionIncrementAttribute posInc2 = ts1.addAttribute(PositionIncrementAttribute.class);
+ ts1.reset();
+ ts2.reset();
+ while (ts1.incrementToken()) {
+ assertThat(ts2.incrementToken(), is(true));
+ assertThat(posInc1, equalTo(posInc2));
+ // can't use equalTo directly on the numeric attribute cause it doesn't implement equals (LUCENE-5070)
+ assertThat(numTerm1.getRawValue(), equalTo(numTerm2.getRawValue()));
+ assertThat(numTerm2.getShift(), equalTo(numTerm2.getShift()));
+ }
+ assertThat(ts2.incrementToken(), is(false));
+ ts1.end();
+ ts2.end();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java
new file mode 100644
index 0000000..20f5474
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class PatternCaptureTokenFilterTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testPatternCaptureTokenFilter() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/pattern_capture.json").build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("single");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "foobarbaz"), new String[]{"foobarbaz","foobar","foo"});
+
+ NamedAnalyzer analyzer2 = analysisService.analyzer("multi");
+
+ assertTokenStreamContents(analyzer2.tokenStream("test", "abc123def"), new String[]{"abc123def","abc","123","def"});
+
+ NamedAnalyzer analyzer3 = analysisService.analyzer("preserve");
+
+ assertTokenStreamContents(analyzer3.tokenStream("test", "foobarbaz"), new String[]{"foobar","foo"});
+ }
+
+
+ @Test(expected=ElasticsearchIllegalArgumentException.class)
+ public void testNoPatterns() {
+ new PatternCaptureGroupTokenFilterFactory(new Index("test"), settingsBuilder().build(), "pattern_capture", settingsBuilder().put("pattern", "foobar").build());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java
new file mode 100644
index 0000000..f9dc905
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+
+/**
+ *
+ */
+public class PreBuiltAnalyzerProviderFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testVersioningInFactoryProvider() throws Exception {
+ PreBuiltAnalyzerProviderFactory factory = new PreBuiltAnalyzerProviderFactory("default", AnalyzerScope.INDEX, PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT));
+
+ AnalyzerProvider currentAnalyzerProvider = factory.create("default", ImmutableSettings.Builder.EMPTY_SETTINGS);
+ AnalyzerProvider former090AnalyzerProvider = factory.create("default", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
+ AnalyzerProvider currentAnalyzerProviderReference = factory.create("default", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ // would love to access the version inside of the lucene analyzer, but that is not possible...
+ assertThat(currentAnalyzerProvider, is(currentAnalyzerProviderReference));
+ assertThat(currentAnalyzerProvider, is(not(former090AnalyzerProvider)));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
new file mode 100644
index 0000000..7719df6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class PreBuiltAnalyzerTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatDefaultAndStandardAnalyzerAreTheSameInstance() {
+ Analyzer currentStandardAnalyzer = PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT);
+ Analyzer currentDefaultAnalyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.CURRENT);
+
+ // special case, these two are the same instance
+ assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
+ }
+
+ @Test
+ public void testThatDefaultAndStandardAnalyzerChangedIn10Beta1() throws IOException {
+ Analyzer currentStandardAnalyzer = PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_1_0_0_Beta1);
+ Analyzer currentDefaultAnalyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
+
+ // special case, these two are the same instance
+ assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
+ PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
+ final int n = atLeast(10);
+ Version version = Version.CURRENT;
+ for(int i = 0; i < n; i++) {
+ if (version.equals(Version.V_1_0_0_Beta1)) {
+ assertThat(currentDefaultAnalyzer, is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version)));
+ } else {
+ assertThat(currentDefaultAnalyzer, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
+ }
+ Analyzer analyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(version);
+ TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
+ ts.reset();
+ CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
+ List<String> list = new ArrayList<String>();
+ while(ts.incrementToken()) {
+ list.add(charTermAttribute.toString());
+ }
+ if (version.onOrAfter(Version.V_1_0_0_Beta1)) {
+ assertThat(list.size(), is(4));
+ assertThat(list, contains("this", "is", "it", "dude"));
+
+ } else {
+ assertThat(list.size(), is(1));
+ assertThat(list, contains("dude"));
+ }
+ ts.close();
+ version = randomVersion();
+ }
+ }
+
+ @Test
+ public void testAnalyzerChangedIn10RC1() throws IOException {
+ Analyzer pattern = PreBuiltAnalyzers.PATTERN.getAnalyzer(Version.V_1_0_0_RC1);
+ Analyzer standardHtml = PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(Version.V_1_0_0_RC1);
+ final int n = atLeast(10);
+ Version version = Version.CURRENT;
+ for(int i = 0; i < n; i++) {
+ if (version.equals(Version.V_1_0_0_RC1)) {
+ assertThat(pattern, is(PreBuiltAnalyzers.PATTERN.getAnalyzer(version)));
+ assertThat(standardHtml, is(PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version)));
+ } else {
+ assertThat(pattern, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
+ assertThat(standardHtml, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
+ }
+ Analyzer analyzer = randomBoolean() ? PreBuiltAnalyzers.PATTERN.getAnalyzer(version) : PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version);
+ TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
+ ts.reset();
+ CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
+ List<String> list = new ArrayList<String>();
+ while(ts.incrementToken()) {
+ list.add(charTermAttribute.toString());
+ }
+ if (version.onOrAfter(Version.V_1_0_0_RC1)) {
+ assertThat(list.toString(), list.size(), is(4));
+ assertThat(list, contains("this", "is", "it", "dude"));
+
+ } else {
+ assertThat(list.size(), is(1));
+ assertThat(list, contains("dude"));
+ }
+ ts.close();
+ version = randomVersion();
+ }
+ }
+
+ @Test
+ public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() {
+ assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT),
+ is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_0_18_0)));
+ }
+
+ @Test
+ public void testThatInstancesAreCachedAndReused() {
+ assertThat(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT),
+ is(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT)));
+ assertThat(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_0_18_0),
+ is(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_0_18_0)));
+ }
+
+ @Test
+ public void testThatInstancesWithSameLuceneVersionAreReused() {
+ // both are lucene 4.4 and should return the same instance
+ assertThat(PreBuiltAnalyzers.CATALAN.getAnalyzer(Version.V_0_90_4),
+ is(PreBuiltAnalyzers.CATALAN.getAnalyzer(Version.V_0_90_5)));
+ }
+
+ @Test
+ public void testThatAnalyzersAreUsedInMapping() throws IOException {
+ int randomInt = randomInt(PreBuiltAnalyzers.values().length-1);
+ PreBuiltAnalyzers randomPreBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt];
+ String analyzerName = randomPreBuiltAnalyzer.name().toLowerCase(Locale.ROOT);
+
+ Version randomVersion = randomVersion();
+ Settings indexSettings = ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
+
+ NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(analyzerName, AnalyzerScope.INDEX, randomPreBuiltAnalyzer.getAnalyzer(randomVersion)).get();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("analyzer", analyzerName).endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser(indexSettings).parse(mapping);
+
+ FieldMapper fieldMapper = docMapper.mappers().name("field").mapper();
+ assertThat(fieldMapper.searchAnalyzer(), instanceOf(NamedAnalyzer.class));
+ NamedAnalyzer fieldMapperNamedAnalyzer = (NamedAnalyzer) fieldMapper.searchAnalyzer();
+
+ assertThat(fieldMapperNamedAnalyzer.analyzer(), is(namedAnalyzer.analyzer()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java
new file mode 100644
index 0000000..d7d5f71
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.analysis.PreBuiltCharFilters;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+/**
+ *
+ */
+public class PreBuiltCharFilterFactoryFactoryTests {
+
+ @Test
+ public void testThatDifferentVersionsCanBeLoaded() {
+ PreBuiltCharFilterFactoryFactory factory = new PreBuiltCharFilterFactoryFactory(PreBuiltCharFilters.HTML_STRIP.getCharFilterFactory(Version.CURRENT));
+
+ CharFilterFactory emptySettingsTokenizerFactory = factory.create("html_strip", ImmutableSettings.EMPTY);
+ CharFilterFactory former090TokenizerFactory = factory.create("html_strip", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
+ CharFilterFactory former090TokenizerFactoryCopy = factory.create("html_strip", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
+ CharFilterFactory currentTokenizerFactory = factory.create("html_strip", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(emptySettingsTokenizerFactory, is(currentTokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java
new file mode 100644
index 0000000..a3c45d6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.analysis.PreBuiltTokenFilters;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.*;
+
+/**
+ *
+ */
+public class PreBuiltTokenFilterFactoryFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatCachingWorksForCachingStrategyOne() {
+ PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.WORD_DELIMITER.getTokenFilterFactory(Version.CURRENT));
+
+ TokenFilterFactory emptySettingsTokenizerFactory = factory.create("word_delimiter", ImmutableSettings.EMPTY);
+ TokenFilterFactory former090TokenizerFactory = factory.create("word_delimiter", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build());
+ TokenFilterFactory former090TokenizerFactoryCopy = factory.create("word_delimiter", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build());
+ TokenFilterFactory currentTokenizerFactory = factory.create("word_delimiter", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(emptySettingsTokenizerFactory, is(currentTokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+ @Test
+ public void testThatDifferentVersionsCanBeLoaded() {
+ PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.STOP.getTokenFilterFactory(Version.CURRENT));
+
+ TokenFilterFactory emptySettingsTokenizerFactory = factory.create("stop", ImmutableSettings.EMPTY);
+ TokenFilterFactory former090TokenizerFactory = factory.create("stop", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build());
+ TokenFilterFactory former090TokenizerFactoryCopy = factory.create("stop", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build());
+ TokenFilterFactory currentTokenizerFactory = factory.create("stop", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(emptySettingsTokenizerFactory, is(currentTokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(not(former090TokenizerFactory)));
+ assertThat(former090TokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java
new file mode 100644
index 0000000..279bf7b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.analysis.PreBuiltTokenizers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.*;
+
+/**
+ *
+ */
+public class PreBuiltTokenizerFactoryFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatDifferentVersionsCanBeLoaded() {
+ PreBuiltTokenizerFactoryFactory factory = new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.STANDARD.getTokenizerFactory(Version.CURRENT));
+
+ TokenizerFactory emptySettingsTokenizerFactory = factory.create("standard", ImmutableSettings.EMPTY);
+ // different es versions, same lucene version, thus cached
+ TokenizerFactory former090TokenizerFactory = factory.create("standard", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build());
+ TokenizerFactory former090TokenizerFactoryCopy = factory.create("standard", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build());
+ TokenizerFactory currentTokenizerFactory = factory.create("standard", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(emptySettingsTokenizerFactory, is(currentTokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(not(former090TokenizerFactory)));
+ assertThat(emptySettingsTokenizerFactory, is(not(former090TokenizerFactoryCopy)));
+ assertThat(former090TokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java
new file mode 100644
index 0000000..6cc06be
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+@ThreadLeakScope(Scope.NONE)
+public class ShingleTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ private static final String RESOURCE = "org/elasticsearch/index/analysis/shingle_analysis.json";
+
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle");
+ String source = "the quick brown fox";
+ String[] expected = new String[]{"the", "the quick", "quick", "quick brown", "brown", "brown fox", "fox"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testInverseMapping() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
+ assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
+ String source = "the quick brown fox";
+ String[] expected = new String[]{"the_quick_brown", "quick_brown_fox"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testInverseMappingNoShingles() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
+ assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
+ String source = "the quick";
+ String[] expected = new String[]{"the", "quick"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java b/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java
new file mode 100644
index 0000000..daf0de9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class StopAnalyzerTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefaultsCompoundAnalysis() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/stop.json").build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("analyzer1");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "to be or not to be"), new String[0]);
+
+ NamedAnalyzer analyzer2 = analysisService.analyzer("analyzer2");
+
+ assertTokenStreamContents(analyzer2.tokenStream("test", "to be or not to be"), new String[0]);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java
new file mode 100644
index 0000000..d2a1727
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.inject.ProvisionException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+
+public class StopTokenFilterTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test(expected = ProvisionException.class)
+ public void testPositionIncrementSetting() throws IOException {
+ Builder builder = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop")
+ .put("index.analysis.filter.my_stop.enable_position_increments", false);
+ if (random().nextBoolean()) {
+ builder.put("index.analysis.filter.my_stop.version", "4.4");
+ }
+ Settings settings = builder.build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ analysisService.tokenFilter("my_stop");
+ }
+
+ @Test
+ public void testCorrectPositionIncrementSetting() throws IOException {
+ Builder builder = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop");
+ if (random().nextBoolean()) {
+ builder.put("index.analysis.filter.my_stop.enable_position_increments", true);
+ }
+ if (random().nextBoolean()) {
+ builder.put("index.analysis.filter.my_stop.version", Version.values()[random().nextInt(Version.values().length)]);
+ }
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
+ TokenStream create = tokenFilter.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("foo bar")));
+ assertThat(create, instanceOf(StopFilter.class));
+ assertThat(((StopFilter)create).getEnablePositionIncrements(), equalTo(true));
+ }
+
+ @Test
+ public void testDeprecatedPositionIncrementSettingWithVerions() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop")
+ .put("index.analysis.filter.my_stop.enable_position_increments", false).put("index.analysis.filter.my_stop.version", "4.3")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
+ TokenStream create = tokenFilter.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("foo bar")));
+ assertThat(create, instanceOf(StopFilter.class));
+ assertThat(((StopFilter)create).getEnablePositionIncrements(), equalTo(false));
+ }
+
+ @Test
+ public void testThatSuggestStopFilterWorks() throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.analysis.filter.my_stop.type", "stop")
+ .put("index.analysis.filter.my_stop.remove_trailing", false)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
+ TokenStream create = tokenFilter.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("foo an")));
+ assertThat(create, instanceOf(SuggestStopFilter.class));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java
new file mode 100644
index 0000000..2320c22
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testCatenateWords() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
+ .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"PowerShot", "500", "42", "wifi", "wifi", "4000", "j", "2", "se", "ONeil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testCatenateNumbers() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
+ .put("index.analysis.filter.my_word_delimiter.catenate_numbers", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"Power", "Shot", "50042", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testCatenateAll() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
+ .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
+ .put("index.analysis.filter.my_word_delimiter.catenate_all", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"PowerShot", "50042", "wifi", "wifi4000", "j2se", "ONeil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testSplitOnCaseChange() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot";
+ String[] expected = new String[]{"PowerShot"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testPreserveOriginal() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.preserve_original", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"PowerShot", "Power", "Shot", "500-42", "500", "42", "wi-fi", "wi", "fi", "wi-fi-4000", "wi", "fi", "4000", "j2se", "j", "2", "se", "O'Neil's", "O", "Neil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testStemEnglishPossessive() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil", "s"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json b/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json
new file mode 100644
index 0000000..89a1281
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json
@@ -0,0 +1,37 @@
+{
+ "index":{
+ "analysis":{
+ "filter":{
+ "cjk_all_flags":{
+ "type":"cjk_bigram",
+ "output_unigrams":true,
+ "ignored_scripts":[
+ "han",
+ "hiragana",
+ "katakana",
+ "hangul",
+ "foobar"
+ ]
+ },
+ "cjk_han_only":{
+ "type":"cjk_bigram",
+ "output_unigrams":false,
+ "ignored_scripts":[
+ "hiragana"
+ ]
+ },
+ "cjk_han_unigram_only":{
+ "type":"cjk_bigram",
+ "output_unigrams":true,
+ "ignored_scripts":[
+ "hiragana"
+ ]
+ },
+ "cjk_no_flags":{
+ "type":"cjk_bigram",
+ "output_unigrams":false
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java
new file mode 100644
index 0000000..dcbc026
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis.commongrams;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.AnalysisTestsHelper;
+import org.elasticsearch.index.analysis.TokenFilterFactory;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefault() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams").build();
+
+ try {
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Assert.fail("[common_words] or [common_words_path] is set");
+ } catch (Exception e) {
+ assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class));
+ }
+ }
+ @Test
+ public void testWithoutCommonWordsMatch() throws IOException {
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams")
+ .putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein")
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams")
+ .put("index.analysis.filter.common_grams_default.query_mode", false)
+ .putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+ }
+
+ @Test
+ public void testSettings() throws IOException {
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_1.type", "common_grams")
+ .put("index.analysis.filter.common_grams_1.ignore_case", true)
+ .putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
+ String source = "the quick brown is a fox or noT";
+ String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "fox_or", "or", "or_noT", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_2.type", "common_grams")
+ .put("index.analysis.filter.common_grams_2.ignore_case", false)
+ .putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
+ String source = "the quick brown is a fox or why noT";
+ String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "or", "why", "why_noT", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_3.type", "common_grams")
+ .putArray("index.analysis.filter.common_grams_3.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ @Test
+ public void testCommonGramsAnalysis() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams.json").build();
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ }
+
+ @Test
+ public void testQueryModeSettings() throws IOException {
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_1.type", "common_grams")
+ .put("index.analysis.filter.common_grams_1.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are")
+ .put("index.analysis.filter.common_grams_1.ignore_case", true)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
+ String source = "the quick brown is a fox or noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox_or", "or_noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_2.type", "common_grams")
+ .put("index.analysis.filter.common_grams_2.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
+ .put("index.analysis.filter.common_grams_2.ignore_case", false)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
+ String source = "the quick brown is a fox or why noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_3.type", "common_grams")
+ .put("index.analysis.filter.common_grams_3.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_3.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
+ String source = "the quick brown is a fox or why noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_4.type", "common_grams")
+ .put("index.analysis.filter.common_grams_4.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_4.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_4");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ @Test
+ public void testQueryModeCommonGramsAnalysis() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json").build();
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt b/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt
new file mode 100644
index 0000000..f97b799
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt
@@ -0,0 +1,2 @@
+brown
+fox
diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json b/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json
new file mode 100644
index 0000000..6db49fc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json
@@ -0,0 +1,29 @@
+{
+ "index":{
+ "analysis":{
+ "analyzer":{
+ "commongramsAnalyzer":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams" ]
+ },
+ "commongramsAnalyzer_file":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams_file" ]
+ }
+ },
+ "filter":{
+ "common_grams":{
+ "type":"common_grams",
+ "common_words":[
+ "brown",
+ "fox"
+ ]
+ },
+ "common_grams_file":{
+ "type":"common_grams",
+ "common_words_path":"org/elasticsearch/index/analysis/commongrams/common_words.txt"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json b/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json
new file mode 100644
index 0000000..6f0c015
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json
@@ -0,0 +1,31 @@
+{
+ "index":{
+ "analysis":{
+ "analyzer":{
+ "commongramsAnalyzer":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams" ]
+ },
+ "commongramsAnalyzer_file":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams_file" ]
+ }
+ },
+ "filter":{
+ "common_grams":{
+ "type":"common_grams",
+ "query_mode" : true,
+ "common_words":[
+ "brown",
+ "fox"
+ ]
+ },
+ "common_grams_file":{
+ "type":"common_grams",
+ "query_mode" : true,
+ "common_words_path":"org/elasticsearch/index/analysis/commongrams/common_words.txt"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java b/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java
new file mode 100644
index 0000000..a30d520
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis.filter1;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
+import org.elasticsearch.index.settings.IndexSettings;
+
+public class MyFilterTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public MyFilterTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, String name) {
+ super(index, indexSettings, name, ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new StopFilter(version, tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json b/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json
new file mode 100644
index 0000000..233d6f3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json
@@ -0,0 +1,19 @@
+{
+ "index":{
+ "analysis":{
+ "filter":{
+ "my_keep_filter":{
+ "type":"keep",
+ "keep_words" : ["Hello", "worlD"],
+ "keep_words_case" : true
+ },
+ "my_case_sensitive_keep_filter":{
+ "type":"keep",
+ "keep_words" : ["Hello", "worlD"],
+ "enable_position_increments" : false,
+ "version" : "4.2"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json b/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json
new file mode 100644
index 0000000..d82fb98
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json
@@ -0,0 +1,46 @@
+{
+ "index": {
+ "number_of_shards": 1,
+ "number_of_replicas": 0,
+ "analysis": {
+ "filter": {
+ "single": {
+ "type": "pattern_capture",
+ "patterns": "((...)...)"
+ },
+ "multi": {
+ "type": "pattern_capture",
+ "patterns": [
+ "(\\d+)",
+ "([a-z]+)"
+ ]
+ },
+ "preserve": {
+ "type": "pattern_capture",
+ "preserve_original": false,
+ "patterns": "((...)...)"
+ }
+ },
+ "analyzer": {
+ "single": {
+ "tokenizer": "keyword",
+ "filter": [
+ "single"
+ ]
+ },
+ "multi": {
+ "tokenizer": "keyword",
+ "filter": [
+ "multi"
+ ]
+ },
+ "preserve": {
+ "tokenizer": "keyword",
+ "filter": [
+ "preserve"
+ ]
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json b/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json
new file mode 100644
index 0000000..c469a4a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json
@@ -0,0 +1,16 @@
+{
+ "index":{
+ "analysis":{
+ "filter":{
+ "shingle_inverse":{
+ "type":"shingle",
+ "max_shingle_size" : 3,
+ "min_shingle_size" : 3,
+ "output_unigrams" : false,
+ "output_unigrams_if_no_shingles" : true,
+ "token_separator" : "_"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/stop.json b/src/test/java/org/elasticsearch/index/analysis/stop.json
new file mode 100644
index 0000000..717c9fd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/stop.json
@@ -0,0 +1,18 @@
+{
+ "index":{
+ "number_of_shards":1,
+ "number_of_replicas":0,
+ "analysis":{
+ "analyzer":{
+ "analyzer1":{
+ "type":"stop",
+ "stopwords":["_english_"]
+ },
+ "analyzer2":{
+ "type":"stop",
+ "stopwords":"_english_"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java b/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java
new file mode 100644
index 0000000..a578e39
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis.synonyms;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllTokenStream;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SynonymsAnalysisTest extends ElasticsearchTestCase {
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+ private AnalysisService analysisService;
+
+ @Test
+ public void testSynonymsAnalysis() throws IOException {
+
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/synonyms/synonyms.json").build();
+
+ Index index = new Index("test");
+
+ Injector parentInjector = new ModulesBuilder().add(
+ new SettingsModule(settings),
+ new EnvironmentModule(new Environment(settings)),
+ new IndicesAnalysisModule())
+ .createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ analysisService = injector.getInstance(AnalysisService.class);
+
+ match("synonymAnalyzer", "kimchy is the dude abides", "shay is the elasticsearch man!");
+ match("synonymAnalyzer_file", "kimchy is the dude abides", "shay is the elasticsearch man!");
+ match("synonymAnalyzerWordnet", "abstain", "abstain refrain desist");
+ match("synonymAnalyzerWordnet_file", "abstain", "abstain refrain desist");
+
+ }
+
+ private void match(String analyzerName, String source, String target) throws IOException {
+
+ Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
+
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field", source, 1.0f);
+ allEntries.reset();
+
+ TokenStream stream = AllTokenStream.allTokenStream("_all", allEntries, analyzer);
+ stream.reset();
+ CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
+
+ StringBuilder sb = new StringBuilder();
+ while (stream.incrementToken()) {
+ sb.append(termAtt.toString()).append(" ");
+ }
+
+ MatcherAssert.assertThat(target, equalTo(sb.toString().trim()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json
new file mode 100644
index 0000000..d23d6ef
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json
@@ -0,0 +1,52 @@
+{
+ "index":{
+ "analysis":{
+ "analyzer":{
+ "synonymAnalyzer":{
+ "tokenizer":"standard",
+ "filter":[ "synonym" ]
+ },
+ "synonymAnalyzer_file":{
+ "tokenizer":"standard",
+ "filter":[ "synonym_file" ]
+ },
+ "synonymAnalyzerWordnet":{
+ "tokenizer":"standard",
+ "filter":[ "synonymWordnet" ]
+ },
+ "synonymAnalyzerWordnet_file":{
+ "tokenizer":"standard",
+ "filter":[ "synonymWordnet_file" ]
+ }
+ },
+ "filter":{
+ "synonym":{
+ "type":"synonym",
+ "synonyms":[
+ "kimchy => shay",
+ "dude => elasticsearch",
+ "abides => man!"
+ ]
+ },
+ "synonym_file":{
+ "type":"synonym",
+ "synonyms_path":"org/elasticsearch/index/analysis/synonyms/synonyms.txt"
+ },
+ "synonymWordnet":{
+ "type":"synonym",
+ "format":"wordnet",
+ "synonyms":[
+ "s(100000001,1,'abstain',v,1,0).",
+ "s(100000001,2,'refrain',v,1,0).",
+ "s(100000001,3,'desist',v,1,0)."
+ ]
+ },
+ "synonymWordnet_file":{
+ "type":"synonym",
+ "format":"wordnet",
+ "synonyms_path":"org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt
new file mode 100644
index 0000000..ef4b225
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt
@@ -0,0 +1,3 @@
+kimchy => shay
+dude => elasticsearch
+abides => man!
diff --git a/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt
new file mode 100644
index 0000000..f7b68e3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt
@@ -0,0 +1,3 @@
+s(100000001,1,'abstain',v,1,0).
+s(100000001,2,'refrain',v,1,0).
+s(100000001,3,'desist',v,1,0).
diff --git a/src/test/java/org/elasticsearch/index/analysis/test1.json b/src/test/java/org/elasticsearch/index/analysis/test1.json
new file mode 100644
index 0000000..3b503d1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/test1.json
@@ -0,0 +1,80 @@
+{
+ "index":{
+ "analysis":{
+ "tokenizer":{
+ "standard":{
+ "type":"standard"
+ }
+ },
+ "char_filter":{
+ "my_html":{
+ "type":"html_strip",
+ "escaped_tags":["xxx", "yyy"],
+ "read_ahead":1024
+ },
+ "my_pattern":{
+ "type":"pattern_replace",
+ "pattern":"sample(.*)",
+ "replacement":"replacedSample $1"
+ },
+ "my_mapping":{
+ "type":"mapping",
+ "mappings":["ph=>f", "qu=>q"]
+ }
+ },
+ "filter":{
+ "stop":{
+ "type":"stop",
+ "stopwords":["test-stop"]
+ },
+ "stop2":{
+ "type":"stop",
+ "stopwords":["stop2-1", "stop2-2"]
+ },
+ "my":{
+ "type":"org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory"
+ },
+ "dict_dec":{
+ "type":"dictionary_decompounder",
+ "word_list":["donau", "dampf", "schiff", "spargel", "creme", "suppe"]
+ }
+ },
+ "analyzer":{
+ "standard":{
+ "alias":"alias1,alias2",
+ "type":"standard",
+ "stopwords":["test1", "test2", "test3"]
+ },
+ "custom1":{
+ "alias":["alias4", "alias5"],
+ "tokenizer":"standard",
+ "filter":["stop", "stop2"]
+ },
+ "custom2":{
+ "tokenizer":"standard",
+ "char_filter":["html_strip", "my_html"]
+ },
+ "custom3":{
+ "tokenizer":"standard",
+ "char_filter":["my_pattern"]
+ },
+ "custom4":{
+ "tokenizer":"standard",
+ "filter":["my"]
+ },
+ "custom5":{
+ "tokenizer":"standard",
+ "char_filter":["my_mapping"]
+ },
+ "czechAnalyzerWithStemmer":{
+ "tokenizer":"standard",
+ "filter":["standard", "lowercase", "stop", "czech_stem"]
+ },
+ "decompoundingAnalyzer":{
+ "tokenizer":"standard",
+ "filter":["dict_dec"]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/test1.yml b/src/test/java/org/elasticsearch/index/analysis/test1.yml
new file mode 100644
index 0000000..9c4aac6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/test1.yml
@@ -0,0 +1,61 @@
+index :
+ analysis :
+ tokenizer :
+ standard :
+ type : standard
+ char_filter :
+ my_html :
+ type : html_strip
+ escaped_tags : [xxx, yyy]
+ read_ahead : 1024
+ my_pattern :
+ type: pattern_replace
+ pattern: sample(.*)
+ replacement: replacedSample $1
+ my_mapping :
+ type : mapping
+ mappings : [ph=>f, qu=>q]
+ filter :
+ stop :
+ type : stop
+ stopwords : [test-stop]
+ stop2 :
+ type : stop
+ stopwords : [stop2-1, stop2-2]
+ my :
+ type : org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory
+ dict_dec :
+ type : dictionary_decompounder
+ word_list : [donau, dampf, schiff, spargel, creme, suppe]
+ analyzer :
+ standard :
+ alias: alias1,alias2
+ type : standard
+ stopwords : [test1, test2, test3]
+ custom1 :
+ alias : [alias4, alias5]
+ tokenizer : standard
+ filter : [stop, stop2]
+ custom2 :
+ tokenizer : standard
+ char_filter : [html_strip, my_html]
+ custom3 :
+ tokenizer : standard
+ char_filter : [my_pattern]
+ custom4 :
+ tokenizer : standard
+ filter : [my]
+ custom5 :
+ tokenizer : standard
+ char_filter : [my_mapping]
+ custom6 :
+ type : standard
+ custom7 :
+ type : standard
+ version: 3.6
+ czechAnalyzerWithStemmer :
+ tokenizer : standard
+ filter : [standard, lowercase, stop, czech_stem]
+ decompoundingAnalyzer :
+ tokenizer : standard
+ filter : [dict_dec] \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/cache/filter/FilterCacheTests.java b/src/test/java/org/elasticsearch/index/cache/filter/FilterCacheTests.java
new file mode 100644
index 0000000..3133ce2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/cache/filter/FilterCacheTests.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.filter.none.NoneFilterCache;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class FilterCacheTests extends ElasticsearchTestCase {
+
+
+ @Test
+ public void testNoCache() throws Exception {
+ verifyCache(new NoneFilterCache(new Index("test"), EMPTY_SETTINGS));
+ }
+
+ private void verifyCache(FilterCache filterCache) throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ DirectoryReader reader = DirectoryReader.open(indexWriter, true);
+
+ for (int i = 0; i < 100; i++) {
+ Document document = new Document();
+ document.add(new TextField("id", Integer.toString(i), Field.Store.YES));
+ indexWriter.addDocument(document);
+ }
+
+ reader = refreshReader(reader);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ assertThat(Lucene.count(searcher, new ConstantScoreQuery(filterCache.cache(new TermFilter(new Term("id", "1"))))), equalTo(1l));
+ assertThat(Lucene.count(searcher, new XFilteredQuery(new MatchAllDocsQuery(), filterCache.cache(new TermFilter(new Term("id", "1"))))), equalTo(1l));
+
+ indexWriter.deleteDocuments(new Term("id", "1"));
+ reader = refreshReader(reader);
+ searcher = new IndexSearcher(reader);
+ TermFilter filter = new TermFilter(new Term("id", "1"));
+ Filter cachedFilter = filterCache.cache(filter);
+ long constantScoreCount = filter == cachedFilter ? 0 : 1;
+ // sadly, when caching based on cacheKey with NRT, this fails, that's why we have DeletionAware one
+ assertThat(Lucene.count(searcher, new ConstantScoreQuery(cachedFilter)), equalTo(constantScoreCount));
+ assertThat(Lucene.count(searcher, new XConstantScoreQuery(cachedFilter)), equalTo(0l));
+ assertThat(Lucene.count(searcher, new XFilteredQuery(new MatchAllDocsQuery(), cachedFilter)), equalTo(0l));
+
+ indexWriter.close();
+ }
+
+ private DirectoryReader refreshReader(DirectoryReader reader) throws IOException {
+ IndexReader oldReader = reader;
+ reader = DirectoryReader.openIfChanged(reader);
+ if (reader != oldReader) {
+ oldReader.close();
+ }
+ return reader;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java b/src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java
new file mode 100644
index 0000000..68ab67e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java
@@ -0,0 +1,410 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.id;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexShardMissingException;
+import org.elasticsearch.index.aliases.IndexAliasesService;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.cache.id.simple.SimpleIdCache;
+import org.elasticsearch.index.engine.IndexEngine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.gateway.IndexGateway;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class SimpleIdCacheTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDeletedDocuments() throws Exception {
+ SimpleIdCache idCache = createSimpleIdCache(Tuple.tuple("child", "parent"));
+ IndexWriter writer = createIndexWriter();
+ // Begins with parent, ends with child docs
+ final Document parent = doc("parent", "1");
+ writer.addDocument(parent);
+ writer.addDocument(childDoc("child", "1", "parent", "1"));
+ writer.addDocument(childDoc("child", "2", "parent", "1"));
+ writer.addDocument(childDoc("child", "3", "parent", "1"));
+ writer.commit();
+
+ final String parentUid = parent.get("_uid");
+ assertNotNull(parentUid);
+ writer.deleteDocuments(new Term("_uid", parentUid));
+
+ writer.close();
+ DirectoryReader topLevelReader = DirectoryReader.open(writer.getDirectory());
+ List<AtomicReaderContext> leaves = topLevelReader.getContext().leaves();
+ idCache.refresh(leaves);
+
+ assertThat(leaves.size(), equalTo(1));
+ IdReaderCache readerCache = idCache.reader(leaves.get(0).reader());
+ IdReaderTypeCache typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0).toUtf8(), equalTo("1"));
+ }
+
+ @Test
+ public void testRefresh() throws Exception {
+ SimpleIdCache idCache = createSimpleIdCache(Tuple.tuple("child", "parent"));
+ IndexWriter writer = createIndexWriter();
+ // Begins with parent, ends with child docs
+ writer.addDocument(doc("parent", "1"));
+ writer.addDocument(childDoc("child", "1", "parent", "1"));
+ writer.addDocument(childDoc("child", "2", "parent", "1"));
+ writer.addDocument(childDoc("child", "3", "parent", "1"));
+ writer.addDocument(childDoc("child", "4", "parent", "1"));
+ writer.commit();
+
+ // Begins with child, ends with parent docs
+ writer.addDocument(childDoc("child", "5", "parent", "2"));
+ writer.addDocument(doc("parent", "2"));
+ writer.addDocument(doc("parent", "3"));
+ writer.addDocument(doc("parent", "4"));
+ writer.addDocument(doc("parent", "5"));
+ writer.commit();
+
+ // Begins with parent, child docs in the middle and ends with parent doc
+ writer.addDocument(doc("parent", "6"));
+ writer.addDocument(childDoc("child", "6", "parent", "6"));
+ writer.addDocument(childDoc("child", "7", "parent", "6"));
+ writer.addDocument(childDoc("child", "8", "parent", "5"));
+ writer.addDocument(childDoc("child", "9", "parent", "4"));
+ writer.addDocument(doc("parent", "7"));
+ writer.commit();
+
+ // Garbage segment
+ writer.addDocument(doc("zzz", "1"));
+ writer.addDocument(doc("xxx", "2"));
+ writer.addDocument(doc("aaa", "3"));
+ writer.addDocument(doc("ccc", "4"));
+ writer.addDocument(doc("parent", "8"));
+ writer.commit();
+
+ writer.close();
+ DirectoryReader topLevelReader = DirectoryReader.open(writer.getDirectory());
+ List<AtomicReaderContext> leaves = topLevelReader.getContext().leaves();
+ idCache.refresh(leaves);
+
+ // Verify simple id cache for segment 1
+ IdReaderCache readerCache = idCache.reader(leaves.get(0).reader());
+ assertThat(readerCache.type("child"), nullValue());
+ IdReaderTypeCache typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0).toUtf8(), equalTo("1"));
+ assertThat(typeCache.idByDoc(1), nullValue());
+ assertThat(typeCache.idByDoc(2), nullValue());
+ assertThat(typeCache.idByDoc(3), nullValue());
+ assertThat(typeCache.idByDoc(4), nullValue());
+
+ assertThat(typeCache.parentIdByDoc(0), nullValue());
+ assertThat(typeCache.parentIdByDoc(1).toUtf8(), equalTo("1"));
+ assertThat(typeCache.parentIdByDoc(2).toUtf8(), equalTo("1"));
+ assertThat(typeCache.parentIdByDoc(3).toUtf8(), equalTo("1"));
+ assertThat(typeCache.parentIdByDoc(4).toUtf8(), equalTo("1"));
+
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("1"))), equalTo(0));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("2"))), equalTo(-1));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("3"))), equalTo(-1));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("4"))), equalTo(-1));
+
+ // Verify simple id cache for segment 2
+ readerCache = idCache.reader(leaves.get(1).reader());
+ assertThat(readerCache.type("child"), nullValue());
+ typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0), nullValue());
+ assertThat(typeCache.idByDoc(1).toUtf8(), equalTo("2"));
+ assertThat(typeCache.idByDoc(2).toUtf8(), equalTo("3"));
+ assertThat(typeCache.idByDoc(3).toUtf8(), equalTo("4"));
+ assertThat(typeCache.idByDoc(4).toUtf8(), equalTo("5"));
+
+ assertThat(typeCache.parentIdByDoc(0).toUtf8(), equalTo("2"));
+ assertThat(typeCache.parentIdByDoc(1), nullValue());
+ assertThat(typeCache.parentIdByDoc(2), nullValue());
+ assertThat(typeCache.parentIdByDoc(3), nullValue());
+ assertThat(typeCache.parentIdByDoc(4), nullValue());
+
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("2"))), equalTo(1));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("3"))), equalTo(2));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("4"))), equalTo(3));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("5"))), equalTo(4));
+
+ // Verify simple id cache for segment 3
+ readerCache = idCache.reader(leaves.get(2).reader());
+ assertThat(readerCache.type("child"), nullValue());
+ typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0).toUtf8(), equalTo("6"));
+ assertThat(typeCache.idByDoc(1), nullValue());
+ assertThat(typeCache.idByDoc(2), nullValue());
+ assertThat(typeCache.idByDoc(3), nullValue());
+ assertThat(typeCache.idByDoc(4), nullValue());
+ assertThat(typeCache.idByDoc(5).toUtf8(), equalTo("7"));
+
+ assertThat(typeCache.parentIdByDoc(0), nullValue());
+ assertThat(typeCache.parentIdByDoc(1).toUtf8(), equalTo("6"));
+ assertThat(typeCache.parentIdByDoc(2).toUtf8(), equalTo("6"));
+ assertThat(typeCache.parentIdByDoc(3).toUtf8(), equalTo("5"));
+ assertThat(typeCache.parentIdByDoc(4).toUtf8(), equalTo("4"));
+ assertThat(typeCache.parentIdByDoc(5), nullValue());
+
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("6"))), equalTo(0));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("7"))), equalTo(5));
+
+ // Verify simple id cache for segment 4
+ readerCache = idCache.reader(leaves.get(3).reader());
+ assertThat(readerCache.type("child"), nullValue());
+ typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0), nullValue());
+ assertThat(typeCache.idByDoc(1), nullValue());
+ assertThat(typeCache.idByDoc(2), nullValue());
+ assertThat(typeCache.idByDoc(3), nullValue());
+ assertThat(typeCache.idByDoc(4).toUtf8(), equalTo("8"));
+
+ assertThat(typeCache.parentIdByDoc(0), nullValue());
+ assertThat(typeCache.parentIdByDoc(1), nullValue());
+ assertThat(typeCache.parentIdByDoc(2), nullValue());
+ assertThat(typeCache.parentIdByDoc(3), nullValue());
+ assertThat(typeCache.parentIdByDoc(4), nullValue());
+
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("8"))), equalTo(4));
+ }
+
+ @Test(expected = AssertionError.class)
+ public void testRefresh_tripAssert() throws Exception {
+ assumeTrue(ASSERTIONS_ENABLED);
+ SimpleIdCache idCache = createSimpleIdCache(Tuple.tuple("child", "parent"));
+ IndexWriter writer = createIndexWriter();
+ // Begins with parent, ends with child docs
+ writer.addDocument(doc("parent", "1"));
+ writer.addDocument(childDoc("child", "1", "parent", "1"));
+ writer.addDocument(childDoc("child", "2", "parent", "1"));
+ writer.addDocument(childDoc("child", "3", "parent", "1"));
+ writer.addDocument(childDoc("child", "4", "parent", "1"));
+ // Doc like this should never end up in the index, just wanna trip an assert here!
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, "parent", Field.Store.NO));
+ writer.addDocument(document);
+ writer.commit();
+
+ writer.close();
+ DirectoryReader topLevelReader = DirectoryReader.open(writer.getDirectory());
+ List<AtomicReaderContext> leaves = topLevelReader.getContext().leaves();
+ idCache.refresh(leaves);
+ }
+
+ private Document doc(String type, String id) {
+ Document parent = new Document();
+ parent.add(new StringField(UidFieldMapper.NAME, String.format(Locale.ROOT, "%s#%s", type, id), Field.Store.NO));
+ return parent;
+ }
+
+ private Document childDoc(String type, String id, String parentType, String parentId) {
+ Document parent = new Document();
+ parent.add(new StringField(UidFieldMapper.NAME, String.format(Locale.ROOT, "%s#%s", type, id), Field.Store.NO));
+ parent.add(new StringField(ParentFieldMapper.NAME, String.format(Locale.ROOT, "%s#%s", parentType, parentId), Field.Store.NO));
+ return parent;
+ }
+
+ private SimpleIdCache createSimpleIdCache(Tuple<String, String>... documentTypes) throws IOException {
+ Settings settings = ImmutableSettings.EMPTY;
+ Index index = new Index("test");
+ SimpleIdCache idCache = new SimpleIdCache(index, settings);
+ MapperService mapperService = MapperTestUtils.newMapperService();
+ idCache.setIndexService(new StubIndexService(mapperService));
+
+ for (Tuple<String, String> documentType : documentTypes) {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(documentType.v1())
+ .startObject("_parent").field("type", documentType.v2()).endObject()
+ .endObject().endObject().string();
+ mapperService.merge(documentType.v1(), new CompressedString(defaultMapping), true);
+ }
+
+ return idCache;
+ }
+
+ private IndexWriter createIndexWriter() throws IOException {
+ return new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)));
+ }
+
+ public static class StubIndexService implements IndexService {
+
+ private final MapperService mapperService;
+
+ public StubIndexService(MapperService mapperService) {
+ this.mapperService = mapperService;
+ }
+
+ @Override
+ public Injector injector() {
+ return null;
+ }
+
+ @Override
+ public IndexGateway gateway() {
+ return null;
+ }
+
+ @Override
+ public IndexCache cache() {
+ return null;
+ }
+
+ @Override
+ public IndexFieldDataService fieldData() {
+ return null;
+ }
+
+ @Override
+ public IndexSettingsService settingsService() {
+ return null;
+ }
+
+ @Override
+ public AnalysisService analysisService() {
+ return null;
+ }
+
+ @Override
+ public MapperService mapperService() {
+ return mapperService;
+ }
+
+ @Override
+ public IndexQueryParserService queryParserService() {
+ return null;
+ }
+
+ @Override
+ public SimilarityService similarityService() {
+ return null;
+ }
+
+ @Override
+ public IndexAliasesService aliasesService() {
+ return null;
+ }
+
+ @Override
+ public IndexEngine engine() {
+ return null;
+ }
+
+ @Override
+ public IndexStore store() {
+ return null;
+ }
+
+ @Override
+ public IndexShard createShard(int sShardId) throws ElasticsearchException {
+ return null;
+ }
+
+ @Override
+ public void removeShard(int shardId, String reason) throws ElasticsearchException {
+ }
+
+ @Override
+ public int numberOfShards() {
+ return 0;
+ }
+
+ @Override
+ public ImmutableSet<Integer> shardIds() {
+ return null;
+ }
+
+ @Override
+ public boolean hasShard(int shardId) {
+ return false;
+ }
+
+ @Override
+ public IndexShard shard(int shardId) {
+ return null;
+ }
+
+ @Override
+ public IndexShard shardSafe(int shardId) throws IndexShardMissingException {
+ return null;
+ }
+
+ @Override
+ public Injector shardInjector(int shardId) {
+ return null;
+ }
+
+ @Override
+ public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException {
+ return null;
+ }
+
+ @Override
+ public String indexUUID() {
+ return IndexMetaData.INDEX_UUID_NA_VALUE;
+ }
+
+ @Override
+ public Index index() {
+ return null;
+ }
+
+ @Override
+ public Iterator<IndexShard> iterator() {
+ return null;
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/src/test/java/org/elasticsearch/index/codec/CodecTests.java
new file mode 100644
index 0000000..e1ba608
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/codec/CodecTests.java
@@ -0,0 +1,430 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec;
+
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat;
+import org.apache.lucene.codecs.diskdv.DiskDocValuesFormat;
+import org.apache.lucene.codecs.lucene40.Lucene40Codec;
+import org.apache.lucene.codecs.lucene41.Lucene41Codec;
+import org.apache.lucene.codecs.lucene42.Lucene42Codec;
+import org.apache.lucene.codecs.lucene45.Lucene45Codec;
+import org.apache.lucene.codecs.lucene45.Lucene45DocValuesFormat;
+import org.apache.lucene.codecs.lucene46.Lucene46Codec;
+import org.apache.lucene.codecs.memory.DirectPostingsFormat;
+import org.apache.lucene.codecs.memory.MemoryDocValuesFormat;
+import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
+import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
+import org.apache.lucene.codecs.pulsing.Pulsing41PostingsFormat;
+import org.apache.lucene.codecs.simpletext.SimpleTextCodec;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.codec.docvaluesformat.*;
+import org.elasticsearch.index.codec.postingsformat.*;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperServiceModule;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class CodecTests extends ElasticsearchLuceneTestCase {
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ forceDefaultCodec(); // we test against default codec so never get a random one here!
+ }
+
+ @Test
+ public void testResolveDefaultCodecs() throws Exception {
+ CodecService codecService = createCodecService();
+ assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class));
+ assertThat(codecService.codec("default"), instanceOf(Lucene46Codec.class));
+ assertThat(codecService.codec("Lucene46"), instanceOf(Lucene46Codec.class));
+ assertThat(codecService.codec("Lucene45"), instanceOf(Lucene45Codec.class));
+ assertThat(codecService.codec("Lucene40"), instanceOf(Lucene40Codec.class));
+ assertThat(codecService.codec("Lucene41"), instanceOf(Lucene41Codec.class));
+ assertThat(codecService.codec("Lucene42"), instanceOf(Lucene42Codec.class));
+ assertThat(codecService.codec("SimpleText"), instanceOf(SimpleTextCodec.class));
+ }
+
+ @Test
+ public void testResolveDefaultPostingFormats() throws Exception {
+ PostingsFormatService postingsFormatService = createCodecService().postingsFormatService();
+ assertThat(postingsFormatService.get("default"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("default").get(), instanceOf(Elasticsearch090PostingsFormat.class));
+
+ // Should fail when upgrading Lucene with codec changes
+ assertThat(((Elasticsearch090PostingsFormat)postingsFormatService.get("default").get()).getDefaultWrapped(), instanceOf(((PerFieldPostingsFormat) Codec.getDefault().postingsFormat()).getPostingsFormatForField("").getClass()));
+ assertThat(postingsFormatService.get("Lucene41"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ // Should fail when upgrading Lucene with codec changes
+ assertThat(postingsFormatService.get("Lucene41").get(), instanceOf(((PerFieldPostingsFormat) Codec.getDefault().postingsFormat()).getPostingsFormatForField(null).getClass()));
+
+ assertThat(postingsFormatService.get("bloom_default"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ if (PostingFormats.luceneBloomFilter) {
+ assertThat(postingsFormatService.get("bloom_default").get(), instanceOf(BloomFilteringPostingsFormat.class));
+ } else {
+ assertThat(postingsFormatService.get("bloom_default").get(), instanceOf(BloomFilterPostingsFormat.class));
+ }
+ assertThat(postingsFormatService.get("BloomFilter"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("BloomFilter").get(), instanceOf(BloomFilteringPostingsFormat.class));
+
+ assertThat(postingsFormatService.get("XBloomFilter"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("XBloomFilter").get(), instanceOf(BloomFilterPostingsFormat.class));
+
+ if (PostingFormats.luceneBloomFilter) {
+ assertThat(postingsFormatService.get("bloom_pulsing").get(), instanceOf(BloomFilteringPostingsFormat.class));
+ } else {
+ assertThat(postingsFormatService.get("bloom_pulsing").get(), instanceOf(BloomFilterPostingsFormat.class));
+ }
+
+ assertThat(postingsFormatService.get("pulsing"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("pulsing").get(), instanceOf(Pulsing41PostingsFormat.class));
+ assertThat(postingsFormatService.get("Pulsing41"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("Pulsing41").get(), instanceOf(Pulsing41PostingsFormat.class));
+
+ assertThat(postingsFormatService.get("memory"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("memory").get(), instanceOf(MemoryPostingsFormat.class));
+ assertThat(postingsFormatService.get("Memory"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("Memory").get(), instanceOf(MemoryPostingsFormat.class));
+
+ assertThat(postingsFormatService.get("direct"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("direct").get(), instanceOf(DirectPostingsFormat.class));
+ assertThat(postingsFormatService.get("Direct"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("Direct").get(), instanceOf(DirectPostingsFormat.class));
+ }
+
+ @Test
+ public void testResolveDefaultDocValuesFormats() throws Exception {
+ DocValuesFormatService docValuesFormatService = createCodecService().docValuesFormatService();
+
+ for (String dvf : Arrays.asList("memory", "disk", "Disk", "default")) {
+ assertThat(docValuesFormatService.get(dvf), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ }
+ assertThat(docValuesFormatService.get("memory").get(), instanceOf(MemoryDocValuesFormat.class));
+ assertThat(docValuesFormatService.get("disk").get(), instanceOf(DiskDocValuesFormat.class));
+ assertThat(docValuesFormatService.get("Disk").get(), instanceOf(DiskDocValuesFormat.class));
+ assertThat(docValuesFormatService.get("default").get(), instanceOf(Lucene45DocValuesFormat.class));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMapping_default() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "default").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "default")
+ .put("index.codec.postings_format.my_format1.min_block_size", 16)
+ .put("index.codec.postings_format.my_format1.max_block_size", 64)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(Elasticsearch090PostingsFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(DefaultPostingsFormatProvider.class));
+ DefaultPostingsFormatProvider provider = (DefaultPostingsFormatProvider) documentMapper.mappers().name("field2").mapper().postingsFormatProvider();
+ assertThat(provider.minBlockSize(), equalTo(16));
+ assertThat(provider.maxBlockSize(), equalTo(64));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMapping_memory() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "memory").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "memory")
+ .put("index.codec.postings_format.my_format1.pack_fst", true)
+ .put("index.codec.postings_format.my_format1.acceptable_overhead_ratio", 0.3f)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(MemoryPostingsFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(MemoryPostingsFormatProvider.class));
+ MemoryPostingsFormatProvider provider = (MemoryPostingsFormatProvider) documentMapper.mappers().name("field2").mapper().postingsFormatProvider();
+ assertThat(provider.packFst(), equalTo(true));
+ assertThat(provider.acceptableOverheadRatio(), equalTo(0.3f));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMapping_direct() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "direct").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "direct")
+ .put("index.codec.postings_format.my_format1.min_skip_count", 16)
+ .put("index.codec.postings_format.my_format1.low_freq_cutoff", 64)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(DirectPostingsFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(DirectPostingsFormatProvider.class));
+ DirectPostingsFormatProvider provider = (DirectPostingsFormatProvider) documentMapper.mappers().name("field2").mapper().postingsFormatProvider();
+ assertThat(provider.minSkipCount(), equalTo(16));
+ assertThat(provider.lowFreqCutoff(), equalTo(64));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMapping_pulsing() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "pulsing").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "pulsing")
+ .put("index.codec.postings_format.my_format1.freq_cut_off", 2)
+ .put("index.codec.postings_format.my_format1.min_block_size", 32)
+ .put("index.codec.postings_format.my_format1.max_block_size", 64)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(Pulsing41PostingsFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(PulsingPostingsFormatProvider.class));
+ PulsingPostingsFormatProvider provider = (PulsingPostingsFormatProvider) documentMapper.mappers().name("field2").mapper().postingsFormatProvider();
+ assertThat(provider.freqCutOff(), equalTo(2));
+ assertThat(provider.minBlockSize(), equalTo(32));
+ assertThat(provider.maxBlockSize(), equalTo(64));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMappingLuceneBloom() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "bloom_default").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "bloom_pulsing").endObject()
+ .startObject("field3").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "bloom_filter_lucene")
+ .put("index.codec.postings_format.my_format1.desired_max_saturation", 0.2f)
+ .put("index.codec.postings_format.my_format1.saturation_limit", 0.8f)
+ .put("index.codec.postings_format.my_format1.delegate", "delegate1")
+ .put("index.codec.postings_format.delegate1.type", "direct")
+ .put("index.codec.postings_format.delegate1.min_skip_count", 16)
+ .put("index.codec.postings_format.delegate1.low_freq_cutoff", 64)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ if (PostingFormats.luceneBloomFilter) {
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(BloomFilteringPostingsFormat.class));
+ } else {
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(BloomFilterPostingsFormat.class));
+ }
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ if (PostingFormats.luceneBloomFilter) {
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider().get(), instanceOf(BloomFilteringPostingsFormat.class));
+ } else {
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider().get(), instanceOf(BloomFilterPostingsFormat.class));
+ }
+
+ assertThat(documentMapper.mappers().name("field3").mapper().postingsFormatProvider(), instanceOf(BloomFilterLucenePostingsFormatProvider.class));
+ BloomFilterLucenePostingsFormatProvider provider = (BloomFilterLucenePostingsFormatProvider) documentMapper.mappers().name("field3").mapper().postingsFormatProvider();
+ assertThat(provider.desiredMaxSaturation(), equalTo(0.2f));
+ assertThat(provider.saturationLimit(), equalTo(0.8f));
+ assertThat(provider.delegate(), instanceOf(DirectPostingsFormatProvider.class));
+ DirectPostingsFormatProvider delegate = (DirectPostingsFormatProvider) provider.delegate();
+ assertThat(delegate.minSkipCount(), equalTo(16));
+ assertThat(delegate.lowFreqCutoff(), equalTo(64));
+ }
+
+ @Test
+ public void testChangeUidPostingsFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_uid").field("postings_format", "memory").endObject()
+ .endObject().endObject().string();
+
+ CodecService codecService = createCodecService();
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).postingsFormatProvider().get(), instanceOf(MemoryPostingsFormat.class));
+ }
+
+ @Test
+ public void testChangeUidDocValuesFormat() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_uid").startObject("fielddata").field("format", "doc_values").endObject().field("doc_values_format", "disk").endObject()
+ .endObject().endObject().string();
+
+ CodecService codecService = createCodecService();
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).hasDocValues(), equalTo(true));
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).docValuesFormatProvider().get(), instanceOf(DiskDocValuesFormat.class));
+ }
+
+ @Test
+ public void testChangeIdDocValuesFormat() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_id").startObject("fielddata").field("format", "doc_values").endObject().field("doc_values_format", "disk").endObject()
+ .endObject().endObject().string();
+
+ CodecService codecService = createCodecService();
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.rootMapper(IdFieldMapper.class).hasDocValues(), equalTo(true));
+ assertThat(documentMapper.rootMapper(IdFieldMapper.class).docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.rootMapper(IdFieldMapper.class).docValuesFormatProvider().get(), instanceOf(DiskDocValuesFormat.class));
+ }
+
+ @Test
+ public void testResolveDocValuesFormatsFromMapping_default() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("doc_values_format", "default").endObject()
+ .startObject("field2").field("type", "double").field("doc_values_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.doc_values_format.my_format1.type", "default")
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider().get(), instanceOf(Lucene45DocValuesFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().docValuesFormatProvider(), instanceOf(DefaultDocValuesFormatProvider.class));
+ }
+
+ @Test
+ public void testResolveDocValuesFormatsFromMapping_memory() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("doc_values_format", "memory").endObject()
+ .startObject("field2").field("type", "double").field("doc_values_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.doc_values_format.my_format1.type", "memory")
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider().get(), instanceOf(MemoryDocValuesFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().docValuesFormatProvider(), instanceOf(MemoryDocValuesFormatProvider.class));
+ }
+
+ @Test
+ public void testResolveDocValuesFormatsFromMapping_disk() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("doc_values_format", "disk").endObject()
+ .startObject("field2").field("type", "double").field("doc_values_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.doc_values_format.my_format1.type", "disk")
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider().get(), instanceOf(DiskDocValuesFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().docValuesFormatProvider(), instanceOf(DiskDocValuesFormatProvider.class));
+ }
+
+ @Test
+ public void testChangeVersionFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_version").field("doc_values_format", "disk").endObject()
+ .endObject().endObject().string();
+
+ CodecService codecService = createCodecService();
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.rootMapper(VersionFieldMapper.class).docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.rootMapper(VersionFieldMapper.class).docValuesFormatProvider().get(), instanceOf(DiskDocValuesFormat.class));
+ }
+
+ private static CodecService createCodecService() {
+ return createCodecService(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ private static CodecService createCodecService(Settings settings) {
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder()
+ .add(new SettingsModule(settings))
+ .add(new IndexNameModule(index))
+ .add(new IndexSettingsModule(index, settings))
+ .add(new SimilarityModule(settings))
+ .add(new CodecModule(settings))
+ .add(new MapperServiceModule())
+ .add(new AnalysisModule(settings))
+ .add(new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ })
+ .createInjector();
+ return injector.getInstance(CodecService.class);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/codec/postingformat/DefaultPostingsFormatTests.java b/src/test/java/org/elasticsearch/index/codec/postingformat/DefaultPostingsFormatTests.java
new file mode 100644
index 0000000..a8e3a14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/codec/postingformat/DefaultPostingsFormatTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingformat;
+
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.lucene46.Lucene46Codec;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat;
+import org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.merge.Merges;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Simple smoke test for {@link org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat}
+ */
+public class DefaultPostingsFormatTests extends ElasticsearchTestCase {
+
+ private final class TestCodec extends Lucene46Codec {
+
+ @Override
+ public PostingsFormat getPostingsFormatForField(String field) {
+ return new Elasticsearch090PostingsFormat();
+ }
+ }
+
+ @Test
+ public void testUseDefault() throws IOException {
+
+ Codec codec = new TestCodec();
+ Directory d = new RAMDirectory();
+ IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION, new WhitespaceAnalyzer(Lucene.VERSION));
+ config.setCodec(codec);
+ IndexWriter writer = new IndexWriter(d, config);
+ writer.addDocument(Arrays.asList(new TextField("foo", "bar", Store.YES), new TextField(UidFieldMapper.NAME, "1234", Store.YES)));
+ writer.commit();
+ DirectoryReader reader = DirectoryReader.open(writer, false);
+ List<AtomicReaderContext> leaves = reader.leaves();
+ assertThat(leaves.size(), equalTo(1));
+ AtomicReader ar = leaves.get(0).reader();
+ Terms terms = ar.terms("foo");
+ Terms uidTerms = ar.terms(UidFieldMapper.NAME);
+
+ assertThat(terms.size(), equalTo(1l));
+ assertThat(terms, not(instanceOf(BloomFilterPostingsFormat.BloomFilteredTerms.class)));
+ assertThat(uidTerms, instanceOf(BloomFilterPostingsFormat.BloomFilteredTerms.class));
+
+ reader.close();
+ writer.close();
+ d.close();
+ }
+
+ @Test
+ public void testNoUIDField() throws IOException {
+
+ Codec codec = new TestCodec();
+ Directory d = new RAMDirectory();
+ IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION, new WhitespaceAnalyzer(Lucene.VERSION));
+ config.setCodec(codec);
+ IndexWriter writer = new IndexWriter(d, config);
+ for (int i = 0; i < 100; i++) {
+ writer.addDocument(Arrays.asList(new TextField("foo", "foo bar foo bar", Store.YES), new TextField("some_other_field", "1234", Store.YES)));
+ }
+ Merges.forceMerge(writer, 1);
+ writer.commit();
+
+ DirectoryReader reader = DirectoryReader.open(writer, false);
+ List<AtomicReaderContext> leaves = reader.leaves();
+ assertThat(leaves.size(), equalTo(1));
+ AtomicReader ar = leaves.get(0).reader();
+ Terms terms = ar.terms("foo");
+ Terms some_other_field = ar.terms("some_other_field");
+
+ assertThat(terms.size(), equalTo(2l));
+ assertThat(terms, not(instanceOf(BloomFilterPostingsFormat.BloomFilteredTerms.class)));
+ assertThat(some_other_field, not(instanceOf(BloomFilterPostingsFormat.BloomFilteredTerms.class)));
+ TermsEnum iterator = terms.iterator(null);
+ Set<String> expected = new HashSet<String>();
+ expected.add("foo");
+ expected.add("bar");
+ while(iterator.next() != null) {
+ expected.remove(iterator.term().utf8ToString());
+ }
+ assertThat(expected.size(), equalTo(0));
+ reader.close();
+ writer.close();
+ d.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java b/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java
new file mode 100644
index 0000000..eed806c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.lucene.index.DirectoryReader.listCommits;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * A set of tests for {@link org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy}.
+ */
+public class SnapshotDeletionPolicyTests extends ElasticsearchTestCase {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ private RAMDirectory dir;
+ private SnapshotDeletionPolicy deletionPolicy;
+ private IndexWriter indexWriter;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ dir = new RAMDirectory();
+ deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS));
+ indexWriter = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, Lucene.STANDARD_ANALYZER)
+ .setIndexDeletionPolicy(deletionPolicy)
+ .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ indexWriter.close();
+ dir.close();
+ }
+
+ private Document testDocument() {
+ Document document = new Document();
+ document.add(new TextField("test", "1", Field.Store.YES));
+ return document;
+ }
+
+ @Test
+ public void testSimpleSnapshot() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // add another document and commit, resulting again in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // snapshot the last commit, and then add a document and commit, now we should have two commit points
+ SnapshotIndexCommit snapshot = deletionPolicy.snapshot();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // release the commit, add a document and commit, now we should be back to one commit point
+ assertThat(snapshot.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+ }
+
+ @Test
+ public void testMultiSnapshot() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // take two snapshots
+ SnapshotIndexCommit snapshot1 = deletionPolicy.snapshot();
+ SnapshotIndexCommit snapshot2 = deletionPolicy.snapshot();
+
+ // we should have two commits points
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // release one snapshot, we should still have two commit points
+ assertThat(snapshot1.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // release the second snapshot, we should be back to one commit
+ assertThat(snapshot2.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+ }
+
+ @Test
+ public void testMultiReleaseException() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // snapshot the last commit, and release it twice, the seconds should throw an exception
+ SnapshotIndexCommit snapshot = deletionPolicy.snapshot();
+ assertThat(snapshot.release(), equalTo(true));
+ assertThat(snapshot.release(), equalTo(false));
+ }
+
+ @Test
+ public void testSimpleSnapshots() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // add another document and commit, resulting again in one commint point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // snapshot the last commit, and then add a document and commit, now we should have two commit points
+ SnapshotIndexCommit snapshot = deletionPolicy.snapshot();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // now, take a snapshot of all the commits
+ SnapshotIndexCommits snapshots = deletionPolicy.snapshots();
+ assertThat(snapshots.size(), equalTo(2));
+
+ // release the snapshot, add a document and commit
+ // we should have 3 commits points since we are holding onto the first two with snapshots
+ // and we are using the keep only last
+ assertThat(snapshot.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(3));
+
+ // now release the snapshots, we should be back to a single commit point
+ assertThat(snapshots.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java b/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java
new file mode 100644
index 0000000..c3d00f6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SnapshotIndexCommitExistsMatcher extends TypeSafeMatcher<SnapshotIndexCommit> {
+
+ @Override
+ public boolean matchesSafely(SnapshotIndexCommit snapshotIndexCommit) {
+ for (String fileName : snapshotIndexCommit.getFiles()) {
+ try {
+ if (!snapshotIndexCommit.getDirectory().fileExists(fileName)) {
+ return false;
+ }
+ } catch (IOException e) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("an index commit existence");
+ }
+
+ public static Matcher<SnapshotIndexCommit> snapshotIndexCommitExists() {
+ return new SnapshotIndexCommitExistsMatcher();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java b/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java
new file mode 100644
index 0000000..bdd72e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public final class EngineSearcherTotalHitsMatcher extends TypeSafeMatcher<Engine.Searcher> {
+
+ private final Query query;
+
+ private final int totalHits;
+
+ public EngineSearcherTotalHitsMatcher(Query query, int totalHits) {
+ this.query = query;
+ this.totalHits = totalHits;
+ }
+
+ @Override
+ public boolean matchesSafely(Engine.Searcher searcher) {
+ try {
+ long count = Lucene.count(searcher.searcher(), query);
+ return count == totalHits;
+ } catch (IOException e) {
+ return false;
+ }
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("total hits of size ").appendValue(totalHits).appendText(" with query ").appendValue(query);
+ }
+
+ public static Matcher<Engine.Searcher> engineSearcherTotalHits(Query query, int totalHits) {
+ return new EngineSearcherTotalHitsMatcher(query, totalHits);
+ }
+
+ public static Matcher<Engine.Searcher> engineSearcherTotalHits(int totalHits) {
+ return new EngineSearcherTotalHitsMatcher(Queries.newMatchAllQuery(), totalHits);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineIntegrationTest.java b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineIntegrationTest.java
new file mode 100644
index 0000000..ee0ccc8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineIntegrationTest.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine.internal;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.segments.IndexSegments;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.BloomFilter;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.engine.Segment;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Collection;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+
+public class InternalEngineIntegrationTest extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testSettingLoadBloomFilterDefaultTrue() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_replicas", 0).put("number_of_shards", 1)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ ensureGreen();
+ refresh();
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ final long segmentsMemoryWithBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("segments with bloom: {}", segmentsMemoryWithBloom);
+
+ logger.info("updating the setting to unload bloom filters");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(CodecService.INDEX_CODEC_BLOOM_LOAD, false)).get();
+ logger.info("waiting for memory to match without blooms");
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ long segmentsMemoryWithoutBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("trying segments without bloom: {}", segmentsMemoryWithoutBloom);
+ return segmentsMemoryWithoutBloom == (segmentsMemoryWithBloom - BloomFilter.Factory.DEFAULT.createFilter(1).getSizeInBytes());
+ }
+ });
+
+ logger.info("updating the setting to load bloom filters");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(CodecService.INDEX_CODEC_BLOOM_LOAD, true)).get();
+ logger.info("waiting for memory to match with blooms");
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ long newSegmentsMemoryWithBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("trying segments with bloom: {}", newSegmentsMemoryWithBloom);
+ return newSegmentsMemoryWithBloom == segmentsMemoryWithBloom;
+ }
+ });
+ }
+
+ @Test
+ @Slow
+ public void testSettingLoadBloomFilterDefaultFalse() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_replicas", 0).put("number_of_shards", 1).put(CodecService.INDEX_CODEC_BLOOM_LOAD, false)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ ensureGreen();
+ refresh();
+
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ final long segmentsMemoryWithoutBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("segments without bloom: {}", segmentsMemoryWithoutBloom);
+
+ logger.info("updating the setting to load bloom filters");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(CodecService.INDEX_CODEC_BLOOM_LOAD, true)).get();
+ logger.info("waiting for memory to match with blooms");
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ long segmentsMemoryWithBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("trying segments with bloom: {}", segmentsMemoryWithoutBloom);
+ return segmentsMemoryWithoutBloom == (segmentsMemoryWithBloom - BloomFilter.Factory.DEFAULT.createFilter(1).getSizeInBytes());
+ }
+ });
+
+ logger.info("updating the setting to unload bloom filters");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(CodecService.INDEX_CODEC_BLOOM_LOAD, false)).get();
+ logger.info("waiting for memory to match without blooms");
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ long newSegmentsMemoryWithoutBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("trying segments without bloom: {}", newSegmentsMemoryWithoutBloom);
+ return newSegmentsMemoryWithoutBloom == segmentsMemoryWithoutBloom;
+ }
+ });
+ }
+
+ @Test
+ public void testSetIndexCompoundOnFlush() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_replicas", 0).put("number_of_shards", 1)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ refresh();
+ assertTotalCompoundSegments(1, 1, "test");
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.builder().put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, false)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ refresh();
+ assertTotalCompoundSegments(1, 2, "test");
+
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.builder().put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, true)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ refresh();
+ assertTotalCompoundSegments(2, 3, "test");
+ }
+
+ private void assertTotalCompoundSegments(int i, int t, String index) {
+ IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().prepareSegments(index).get();
+ IndexSegments indexSegments = indicesSegmentResponse.getIndices().get(index);
+ Collection<IndexShardSegments> values = indexSegments.getShards().values();
+ int compounds = 0;
+ int total = 0;
+ for (IndexShardSegments indexShardSegments : values) {
+ for (ShardSegments s : indexShardSegments) {
+ for (Segment segment : s) {
+ if (segment.isSearch() && segment.getNumDocs() > 0) {
+ if (segment.isCompound()) {
+ compounds++;
+ }
+ total++;
+ }
+ }
+ }
+ }
+ assertThat(compounds, Matchers.equalTo(i));
+ assertThat(total, Matchers.equalTo(t));
+
+ }
+
+ @Test
+ public void test4093() {
+ cluster().ensureAtMostNumNodes(1);
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.store.type", "memory")
+ .put("cache.memory.large_cache_size", new ByteSizeValue(1, ByteSizeUnit.MB)) // no need to cache a lot
+ .put("index.number_of_shards", "1")
+ .put("index.number_of_replicas", "0")
+ .put("gateway.type", "none")
+ .put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, randomBoolean())
+ .put("index.warmer.enabled", false)
+ .build()).get());
+ NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().setJvm(true).get();
+ NodeInfo[] nodes = nodeInfos.getNodes();
+ for (NodeInfo info : nodes) {
+ ByteSizeValue directMemoryMax = info.getJvm().getMem().getDirectMemoryMax();
+ logger.debug(" --> JVM max direct memory for node [{}] is set to [{}]", info.getNode().getName(), directMemoryMax);
+ }
+ final int numDocs = between(30, 50); // 30 docs are enough to fail without the fix for #4093
+ logger.debug(" --> Indexing [{}] documents", numDocs);
+ for (int i = 0; i < numDocs; i++) {
+ if ((i + 1) % 10 == 0) {
+ logger.debug(" --> Indexed [{}] documents", i + 1);
+ OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setMaxNumSegments(1).execute().actionGet();
+ assertNoFailures(actionGet);
+ }
+ client().prepareIndex("test", "type1")
+ .setSource("a", "" + i)
+ .setRefresh(true)
+ .execute()
+ .actionGet();
+ }
+ logger.debug(" --> Done indexing [{}] documents", numDocs);
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java
new file mode 100644
index 0000000..0d99093
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java
@@ -0,0 +1,1162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine.internal;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy;
+import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommitExistsMatcher;
+import org.elasticsearch.index.engine.*;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.merge.OnGoingMerge;
+import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider;
+import org.elasticsearch.index.merge.policy.MergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.SerialMergeSchedulerProvider;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.distributor.LeastUsedDistributor;
+import org.elasticsearch.index.store.ram.RamDirectoryService;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogSizeMatcher;
+import org.elasticsearch.index.translog.fs.FsTranslog;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.hamcrest.MatcherAssert;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class InternalEngineTests extends ElasticsearchTestCase {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ protected ThreadPool threadPool;
+
+ private Store store;
+ private Store storeReplica;
+
+ protected Engine engine;
+ protected Engine replicaEngine;
+
+ private IndexSettingsService engineSettingsService;
+
+ private IndexSettingsService replicaSettingsService;
+
+ private Settings defaultSettings;
+
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ defaultSettings = ImmutableSettings.builder()
+ .put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, getRandom().nextBoolean())
+ .build(); // TODO randomize more settings
+ threadPool = new ThreadPool();
+ store = createStore();
+ store.deleteContent();
+ storeReplica = createStoreReplica();
+ storeReplica.deleteContent();
+ engineSettingsService = new IndexSettingsService(shardId.index(), EMPTY_SETTINGS);
+ engine = createEngine(engineSettingsService, store, createTranslog());
+ engine.start();
+ replicaSettingsService = new IndexSettingsService(shardId.index(), EMPTY_SETTINGS);
+ replicaEngine = createEngine(replicaSettingsService, storeReplica, createTranslogReplica());
+ replicaEngine.start();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ replicaEngine.close();
+ storeReplica.close();
+
+ engine.close();
+ store.close();
+
+ if (threadPool != null) {
+ threadPool.shutdownNow();
+ }
+ }
+
+ private Document testDocumentWithTextField() {
+ Document document = testDocument();
+ document.add(new TextField("value", "test", Field.Store.YES));
+ return document;
+ }
+
+ private Document testDocument() {
+ return new Document();
+ }
+
+
+ private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl, Document document, Analyzer analyzer, BytesReference source, boolean mappingsModified) {
+ Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE);
+ Field versionField = new NumericDocValuesField("_version", 0);
+ document.add(uidField);
+ document.add(versionField);
+ return new ParsedDocument(uidField, versionField, id, type, routing, timestamp, ttl, Arrays.asList(document), analyzer, source, mappingsModified);
+ }
+
+ protected Store createStore() throws IOException {
+ DirectoryService directoryService = new RamDirectoryService(shardId, EMPTY_SETTINGS);
+ return new Store(shardId, EMPTY_SETTINGS, null, null, directoryService, new LeastUsedDistributor(directoryService));
+ }
+
+ protected Store createStoreReplica() throws IOException {
+ DirectoryService directoryService = new RamDirectoryService(shardId, EMPTY_SETTINGS);
+ return new Store(shardId, EMPTY_SETTINGS, null, null, directoryService, new LeastUsedDistributor(directoryService));
+ }
+
+ protected Translog createTranslog() {
+ return new FsTranslog(shardId, EMPTY_SETTINGS, new File("work/fs-translog/primary"));
+ }
+
+ protected Translog createTranslogReplica() {
+ return new FsTranslog(shardId, EMPTY_SETTINGS, new File("work/fs-translog/replica"));
+ }
+
+ protected IndexDeletionPolicy createIndexDeletionPolicy() {
+ return new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS);
+ }
+
+ protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() {
+ return new SnapshotDeletionPolicy(createIndexDeletionPolicy());
+ }
+
+ protected MergePolicyProvider<?> createMergePolicy() {
+ return new LogByteSizeMergePolicyProvider(store, new IndexSettingsService(new Index("test"), EMPTY_SETTINGS));
+ }
+
+ protected MergeSchedulerProvider createMergeScheduler() {
+ return new SerialMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool);
+ }
+
+ protected Engine createEngine(IndexSettingsService indexSettingsService, Store store, Translog translog) {
+ return createEngine(indexSettingsService, store, translog, createMergeScheduler());
+ }
+
+ protected Engine createEngine(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) {
+ return new InternalEngine(shardId, defaultSettings, threadPool, indexSettingsService, new ShardIndexingService(shardId, EMPTY_SETTINGS, new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)), null, store, createSnapshotDeletionPolicy(), translog, createMergePolicy(), mergeSchedulerProvider,
+ new AnalysisService(shardId.index()), new SimilarityService(shardId.index()), new CodecService(shardId.index()));
+ }
+
+ protected static final BytesReference B_1 = new BytesArray(new byte[]{1});
+ protected static final BytesReference B_2 = new BytesArray(new byte[]{2});
+ protected static final BytesReference B_3 = new BytesArray(new byte[]{3});
+
+ @Test
+ public void testSegments() throws Exception {
+ List<Segment> segments = engine.segments();
+ assertThat(segments.isEmpty(), equalTo(true));
+ assertThat(engine.segmentsStats().getCount(), equalTo(0l));
+ final boolean defaultCompound = defaultSettings.getAsBoolean(InternalEngine.INDEX_COMPOUND_ON_FLUSH, true);
+
+ // create a doc and refresh
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(1));
+ assertThat(engine.segmentsStats().getCount(), equalTo(1l));
+ assertThat(segments.get(0).isCommitted(), equalTo(false));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ engine.flush(new Engine.Flush());
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(1));
+ assertThat(engine.segmentsStats().getCount(), equalTo(1l));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ engineSettingsService.refreshSettings(ImmutableSettings.builder().put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, false).build());
+
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_3, false);
+ engine.create(new Engine.Create(null, newUid("3"), doc3));
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(2));
+ assertThat(engine.segmentsStats().getCount(), equalTo(2l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+
+ engine.delete(new Engine.Delete("test", "1", newUid("1")));
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(2));
+ assertThat(engine.segmentsStats().getCount(), equalTo(2l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(1));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ engineSettingsService.refreshSettings(ImmutableSettings.builder().put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, true).build());
+ ParsedDocument doc4 = testParsedDocument("4", "4", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_3, false);
+ engine.create(new Engine.Create(null, newUid("4"), doc4));
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(3));
+ assertThat(engine.segmentsStats().getCount(), equalTo(3l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(1));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ assertThat(segments.get(2).isCommitted(), equalTo(false));
+ assertThat(segments.get(2).isSearch(), equalTo(true));
+ assertThat(segments.get(2).getNumDocs(), equalTo(1));
+ assertThat(segments.get(2).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(2).isCompound(), equalTo(true));
+ }
+
+ @Test
+ public void testSegmentsWithMergeFlag() throws Exception {
+ ConcurrentMergeSchedulerProvider mergeSchedulerProvider = new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool);
+ final AtomicReference<CountDownLatch> waitTillMerge = new AtomicReference<CountDownLatch>();
+ final AtomicReference<CountDownLatch> waitForMerge = new AtomicReference<CountDownLatch>();
+ mergeSchedulerProvider.addListener(new MergeSchedulerProvider.Listener() {
+ @Override
+ public void beforeMerge(OnGoingMerge merge) {
+ try {
+ if (waitTillMerge.get() != null) {
+ waitTillMerge.get().countDown();
+ }
+ if (waitForMerge.get() != null) {
+ waitForMerge.get().await();
+ }
+ } catch (InterruptedException e) {
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ }
+
+ @Override
+ public void afterMerge(OnGoingMerge merge) {
+ }
+ });
+
+ Engine engine = createEngine(engineSettingsService, store, createTranslog(), mergeSchedulerProvider);
+ engine.start();
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ engine.flush(new Engine.Flush());
+ assertThat(engine.segments().size(), equalTo(1));
+ index = new Engine.Index(null, newUid("2"), doc);
+ engine.index(index);
+ engine.flush(new Engine.Flush());
+ assertThat(engine.segments().size(), equalTo(2));
+ for (Segment segment : engine.segments()) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+ index = new Engine.Index(null, newUid("3"), doc);
+ engine.index(index);
+ engine.flush(new Engine.Flush());
+ assertThat(engine.segments().size(), equalTo(3));
+ for (Segment segment : engine.segments()) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+
+ waitTillMerge.set(new CountDownLatch(1));
+ waitForMerge.set(new CountDownLatch(1));
+ engine.optimize(new Engine.Optimize().maxNumSegments(1).waitForMerge(false));
+ waitTillMerge.get().await();
+
+ for (Segment segment : engine.segments()) {
+ assertThat(segment.getMergeId(), notNullValue());
+ }
+
+ waitForMerge.get().countDown();
+
+ index = new Engine.Index(null, newUid("4"), doc);
+ engine.index(index);
+ engine.flush(new Engine.Flush());
+
+ // now, optimize and wait for merges, see that we have no merge flag
+ engine.optimize(new Engine.Optimize().flush(true).maxNumSegments(1).waitForMerge(true));
+
+ for (Segment segment : engine.segments()) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+
+ engine.close();
+ }
+
+ @Test
+ public void testSimpleOperations() throws Exception {
+ Engine.Searcher searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ searchResult.release();
+
+ // create a document
+ Document document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.release();
+
+ // but, we can still get it (in realtime)
+ Engine.GetResult getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source().source.toBytesArray(), equalTo(B_1.toBytesArray()));
+ assertThat(getResult.docIdAndVersion(), nullValue());
+ getResult.release();
+
+ // but, not there non realtime
+ getResult = engine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+ // refresh and it should be there
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ // now its there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.release();
+
+ // also in non realtime
+ getResult = engine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // now do an update
+ document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+ document.add(new Field(SourceFieldMapper.NAME, B_2.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.index(new Engine.Index(null, newUid("1"), doc));
+
+ // its not updated yet...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // but, we can still get it (in realtime)
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source().source.toBytesArray(), equalTo(B_2.toBytesArray()));
+ assertThat(getResult.docIdAndVersion(), nullValue());
+ getResult.release();
+
+ // refresh and it should be updated
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.release();
+
+ // now delete
+ engine.delete(new Engine.Delete("test", "1", newUid("1")));
+
+ // its not deleted yet
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.release();
+
+ // but, get should not see it (in realtime)
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+
+ // refresh and it should be deleted
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // add it back
+ document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // refresh and it should be there
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ // now its there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // now flush
+ engine.flush(new Engine.Flush());
+
+ // and, verify get (in real time)
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source(), nullValue());
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // make sure we can still work with the engine
+ // now do an update
+ document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.index(new Engine.Index(null, newUid("1"), doc));
+
+ // its not updated yet...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // refresh and it should be updated
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.release();
+
+ engine.close();
+ }
+
+ @Test
+ public void testSearchResultRelease() throws Exception {
+ Engine.Searcher searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ searchResult.release();
+
+ // create a document
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.release();
+
+ // refresh and it should be there
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ // now its there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ // don't release the search result yet...
+
+ // delete, refresh and do a new search, it should not be there
+ engine.delete(new Engine.Delete("test", "1", newUid("1")));
+ engine.refresh(new Engine.Refresh("test").force(false));
+ Engine.Searcher updateSearchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ updateSearchResult.release();
+
+ // the non release search result should not see the deleted yet...
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.release();
+ }
+
+ @Test
+ public void testSimpleSnapshot() throws Exception {
+ // create a document
+ ParsedDocument doc1 = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc1));
+
+ final ExecutorService executorService = Executors.newCachedThreadPool();
+
+ engine.snapshot(new Engine.SnapshotHandler<Void>() {
+ @Override
+ public Void snapshot(final SnapshotIndexCommit snapshotIndexCommit1, final Translog.Snapshot translogSnapshot1) {
+ MatcherAssert.assertThat(snapshotIndexCommit1, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
+ assertThat(translogSnapshot1.hasNext(), equalTo(true));
+ Translog.Create create1 = (Translog.Create) translogSnapshot1.next();
+ assertThat(create1.source().toBytesArray(), equalTo(B_1.toBytesArray()));
+ assertThat(translogSnapshot1.hasNext(), equalTo(false));
+
+ Future<Object> future = executorService.submit(new Callable<Object>() {
+ @Override
+ public Object call() throws Exception {
+ engine.flush(new Engine.Flush());
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+ engine.flush(new Engine.Flush());
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_3, false);
+ engine.create(new Engine.Create(null, newUid("3"), doc3));
+ return null;
+ }
+ });
+
+ try {
+ future.get();
+ } catch (Exception e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ MatcherAssert.assertThat(snapshotIndexCommit1, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
+
+ engine.snapshot(new Engine.SnapshotHandler<Void>() {
+ @Override
+ public Void snapshot(SnapshotIndexCommit snapshotIndexCommit2, Translog.Snapshot translogSnapshot2) throws EngineException {
+ MatcherAssert.assertThat(snapshotIndexCommit1, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
+ MatcherAssert.assertThat(snapshotIndexCommit2, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
+ assertThat(snapshotIndexCommit2.getSegmentsFileName(), not(equalTo(snapshotIndexCommit1.getSegmentsFileName())));
+ assertThat(translogSnapshot2.hasNext(), equalTo(true));
+ Translog.Create create3 = (Translog.Create) translogSnapshot2.next();
+ assertThat(create3.source().toBytesArray(), equalTo(B_3.toBytesArray()));
+ assertThat(translogSnapshot2.hasNext(), equalTo(false));
+ return null;
+ }
+ });
+ return null;
+ }
+ });
+
+ engine.close();
+ }
+
+ @Test
+ public void testSimpleRecover() throws Exception {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+ engine.flush(new Engine.Flush());
+
+ engine.recover(new Engine.RecoveryHandler() {
+ @Override
+ public void phase1(SnapshotIndexCommit snapshot) throws EngineException {
+ try {
+ engine.flush(new Engine.Flush());
+ assertThat("flush is not allowed in phase 3", false, equalTo(true));
+ } catch (FlushNotAllowedEngineException e) {
+ // all is well
+ }
+ }
+
+ @Override
+ public void phase2(Translog.Snapshot snapshot) throws EngineException {
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ try {
+ engine.flush(new Engine.Flush());
+ assertThat("flush is not allowed in phase 3", false, equalTo(true));
+ } catch (FlushNotAllowedEngineException e) {
+ // all is well
+ }
+ }
+
+ @Override
+ public void phase3(Translog.Snapshot snapshot) throws EngineException {
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ try {
+ // we can do this here since we are on the same thread
+ engine.flush(new Engine.Flush());
+ assertThat("flush is not allowed in phase 3", false, equalTo(true));
+ } catch (FlushNotAllowedEngineException e) {
+ // all is well
+ }
+ }
+ });
+
+ engine.flush(new Engine.Flush());
+ engine.close();
+ }
+
+ @Test
+ public void testRecoverWithOperationsBetweenPhase1AndPhase2() throws Exception {
+ ParsedDocument doc1 = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc1));
+ engine.flush(new Engine.Flush());
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+
+ engine.recover(new Engine.RecoveryHandler() {
+ @Override
+ public void phase1(SnapshotIndexCommit snapshot) throws EngineException {
+ }
+
+ @Override
+ public void phase2(Translog.Snapshot snapshot) throws EngineException {
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(create.source().toBytesArray(), equalTo(B_2));
+ assertThat(snapshot.hasNext(), equalTo(false));
+ }
+
+ @Override
+ public void phase3(Translog.Snapshot snapshot) throws EngineException {
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ }
+ });
+
+ engine.flush(new Engine.Flush());
+ engine.close();
+ }
+
+ @Test
+ public void testRecoverWithOperationsBetweenPhase1AndPhase2AndPhase3() throws Exception {
+ ParsedDocument doc1 = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc1));
+ engine.flush(new Engine.Flush());
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+
+ engine.recover(new Engine.RecoveryHandler() {
+ @Override
+ public void phase1(SnapshotIndexCommit snapshot) throws EngineException {
+ }
+
+ @Override
+ public void phase2(Translog.Snapshot snapshot) throws EngineException {
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(snapshot.hasNext(), equalTo(false));
+ assertThat(create.source().toBytesArray(), equalTo(B_2));
+
+ // add for phase3
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_3, false);
+ engine.create(new Engine.Create(null, newUid("3"), doc3));
+ }
+
+ @Override
+ public void phase3(Translog.Snapshot snapshot) throws EngineException {
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(snapshot.hasNext(), equalTo(false));
+ assertThat(create.source().toBytesArray(), equalTo(B_3));
+ }
+ });
+
+ engine.flush(new Engine.Flush());
+ engine.close();
+ }
+
+ @Test
+ public void testVersioningNewCreate() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc);
+ engine.create(create);
+ assertThat(create.version(), equalTo(1l));
+
+ create = new Engine.Create(null, newUid("1"), doc).version(create.version()).origin(REPLICA);
+ replicaEngine.create(create);
+ assertThat(create.version(), equalTo(1l));
+ }
+
+ @Test
+ public void testExternalVersioningNewCreate() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(12);
+ engine.create(create);
+ assertThat(create.version(), equalTo(12l));
+
+ create = new Engine.Create(null, newUid("1"), doc).version(create.version()).origin(REPLICA);
+ replicaEngine.create(create);
+ assertThat(create.version(), equalTo(12l));
+ }
+
+ @Test
+ public void testVersioningNewIndex() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc).version(index.version()).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(1l));
+ }
+
+ @Test
+ public void testExternalVersioningNewIndex() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(12);
+ engine.index(index);
+ assertThat(index.version(), equalTo(12l));
+
+ index = new Engine.Index(null, newUid("1"), doc).version(index.version()).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(12l));
+ }
+
+ @Test
+ public void testVersioningIndexConflict() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ index = new Engine.Index(null, newUid("1"), doc).version(1l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ index = new Engine.Index(null, newUid("1"), doc).version(3l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testExternalVersioningIndexConflict() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(12);
+ engine.index(index);
+ assertThat(index.version(), equalTo(12l));
+
+ index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(14);
+ engine.index(index);
+ assertThat(index.version(), equalTo(14l));
+
+ index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(13l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningIndexConflictWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ engine.flush(new Engine.Flush());
+
+ index = new Engine.Index(null, newUid("1"), doc).version(1l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ index = new Engine.Index(null, newUid("1"), doc).version(3l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testExternalVersioningIndexConflictWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(12);
+ engine.index(index);
+ assertThat(index.version(), equalTo(12l));
+
+ index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(14);
+ engine.index(index);
+ assertThat(index.version(), equalTo(14l));
+
+ engine.flush(new Engine.Flush());
+
+ index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(13);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningDeleteConflict() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ Engine.Delete delete = new Engine.Delete("test", "1", newUid("1")).version(1l);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ delete = new Engine.Delete("test", "1", newUid("1")).version(3l);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // now actually delete
+ delete = new Engine.Delete("test", "1", newUid("1")).version(2l);
+ engine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ // now check if we can index to a delete doc with version
+ index = new Engine.Index(null, newUid("1"), doc).version(2l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // we shouldn't be able to create as well
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc).version(2l);
+ try {
+ engine.create(create);
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningDeleteConflictWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ engine.flush(new Engine.Flush());
+
+ Engine.Delete delete = new Engine.Delete("test", "1", newUid("1")).version(1l);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ delete = new Engine.Delete("test", "1", newUid("1")).version(3l);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ engine.flush(new Engine.Flush());
+
+ // now actually delete
+ delete = new Engine.Delete("test", "1", newUid("1")).version(2l);
+ engine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ engine.flush(new Engine.Flush());
+
+ // now check if we can index to a delete doc with version
+ index = new Engine.Index(null, newUid("1"), doc).version(2l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // we shouldn't be able to create as well
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc).version(2l);
+ try {
+ engine.create(create);
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningCreateExistsException() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc);
+ engine.create(create);
+ assertThat(create.version(), equalTo(1l));
+
+ create = new Engine.Create(null, newUid("1"), doc);
+ try {
+ engine.create(create);
+ fail();
+ } catch (DocumentAlreadyExistsException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningCreateExistsExceptionWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc);
+ engine.create(create);
+ assertThat(create.version(), equalTo(1l));
+
+ engine.flush(new Engine.Flush());
+
+ create = new Engine.Create(null, newUid("1"), doc);
+ try {
+ engine.create(create);
+ fail();
+ } catch (DocumentAlreadyExistsException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningReplicaConflict1() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ // apply the second index to the replica, should work fine
+ index = new Engine.Index(null, newUid("1"), doc).version(2l).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ // now, the old one should not work
+ index = new Engine.Index(null, newUid("1"), doc).version(1l).origin(REPLICA);
+ try {
+ replicaEngine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // second version on replica should fail as well
+ try {
+ index = new Engine.Index(null, newUid("1"), doc).version(2l).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(2l));
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningReplicaConflict2() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ // apply the first index to the replica, should work fine
+ index = new Engine.Index(null, newUid("1"), doc).version(1l).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ // index it again
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ // now delete it
+ Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"));
+ engine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ // apply the delete on the replica (skipping the second index)
+ delete = new Engine.Delete("test", "1", newUid("1")).version(3l).origin(REPLICA);
+ replicaEngine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ // second time delete with same version should fail
+ try {
+ delete = new Engine.Delete("test", "1", newUid("1")).version(3l).origin(REPLICA);
+ replicaEngine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // now do the second index on the replica, it should fail
+ try {
+ index = new Engine.Index(null, newUid("1"), doc).version(2l).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(2l));
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+
+ @Test
+ public void testBasicCreatedFlag() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertTrue(index.created());
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertFalse(index.created());
+
+ engine.delete(new Engine.Delete(null, "1", newUid("1")));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertTrue(index.created());
+ }
+
+ @Test
+ public void testCreatedFlagAfterFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertTrue(index.created());
+
+ engine.delete(new Engine.Delete(null, "1", newUid("1")));
+
+ engine.flush(new Engine.Flush());
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertTrue(index.created());
+ }
+
+ protected Term newUid(String id) {
+ return new Term("_uid", id);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java b/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java
new file mode 100644
index 0000000..b7efdbb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java
@@ -0,0 +1,358 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+public abstract class AbstractFieldDataImplTests extends AbstractFieldDataTests {
+
+ protected String one() {
+ return "1";
+ }
+
+ protected String two() {
+ return "2";
+ }
+
+ protected String three() {
+ return "3";
+ }
+
+ protected String four() {
+ return "4";
+ }
+
+ protected String toString(Object value) {
+ if (value instanceof BytesRef) {
+ return ((BytesRef) value).utf8ToString();
+ }
+ return value.toString();
+ }
+
+ protected abstract void fillSingleValueAllSet() throws Exception;
+
+ protected abstract void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception;
+
+ @Test
+ public void testDeletedDocs() throws Exception {
+ add2SingleValuedDocumentsAndDeleteOneOfThem();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicReaderContext readerContext = refreshReader();
+ AtomicFieldData fieldData = indexFieldData.load(readerContext);
+ BytesValues values = fieldData.getBytesValues(randomBoolean());
+ for (int i = 0; i < fieldData.getNumDocs(); ++i) {
+ assertThat(values.setDocument(i), greaterThanOrEqualTo(1));
+ }
+ }
+
+ @Test
+ public void testSingleValueAllSet() throws Exception {
+ fillSingleValueAllSet();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicReaderContext readerContext = refreshReader();
+ AtomicFieldData fieldData = indexFieldData.load(readerContext);
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(false));
+
+ assertThat(bytesValues.setDocument(0), equalTo(1));
+ assertThat(bytesValues.nextValue(), equalTo(new BytesRef(two())));
+ assertThat(bytesValues.setDocument(1), equalTo(1));
+ assertThat(bytesValues.nextValue(), equalTo(new BytesRef(one())));
+ assertThat(bytesValues.setDocument(2), equalTo(1));
+ assertThat(bytesValues.nextValue(), equalTo(new BytesRef(three())));
+
+ assertValues(bytesValues, 0, two());
+ assertValues(bytesValues, 1, one());
+ assertValues(bytesValues, 2, three());
+
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
+ assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(one())));
+ assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
+ assertHashedValues(hashedBytesValues, 0, two());
+ assertHashedValues(hashedBytesValues, 1, one());
+ assertHashedValues(hashedBytesValues, 2, three());
+
+ IndexSearcher searcher = new IndexSearcher(readerContext.reader());
+ TopFieldDocs topDocs;
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one()));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(toString(((FieldDoc) topDocs.scoreDocs[1]).fields[0]), equalTo(two()));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(toString(((FieldDoc) topDocs.scoreDocs[2]).fields[0]), equalTo(three()));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ private HashedBytesRef convert(BytesValues values, int doc) {
+ if (values.setDocument(doc) > 0) {
+ return new HashedBytesRef(BytesRef.deepCopyOf(values.nextValue()), values.currentValueHash());
+ } else {
+ return new HashedBytesRef(new BytesRef());
+ }
+ }
+
+ protected abstract void fillSingleValueWithMissing() throws Exception;
+
+ public void assertValues(BytesValues values, int docId, BytesRef... actualValues) {
+ assertThat(values.setDocument(docId), equalTo(actualValues.length));
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.nextValue(), equalTo(actualValues[i]));
+ }
+ }
+
+ public void assertValues(BytesValues values, int docId, String... actualValues) {
+ assertThat(values.setDocument(docId), equalTo(actualValues.length));
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.nextValue(), equalTo(new BytesRef(actualValues[i])));
+ }
+ }
+
+ public void assertHashedValues(BytesValues values, int docId, BytesRef... actualValues) {
+ assertThat(values.setDocument(docId), equalTo(actualValues.length));
+ BytesRef r = new BytesRef();
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.nextValue(), equalTo(new HashedBytesRef(actualValues[i]).bytes));
+ assertThat(values.currentValueHash(), equalTo(new HashedBytesRef(actualValues[i]).hash));
+
+ }
+ }
+
+ public void assertHashedValues(BytesValues values, int docId, String... actualValues) {
+ assertThat(values.setDocument(docId), equalTo(actualValues.length));
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.nextValue(), equalTo(new HashedBytesRef(actualValues[i]).bytes));
+ assertThat(values.currentValueHash(), equalTo(new HashedBytesRef(actualValues[i]).hash));
+ }
+ }
+
+
+ @Test
+ public void testSingleValueWithMissing() throws Exception {
+ fillSingleValueWithMissing();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData
+ .getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(false));
+
+ assertValues(bytesValues, 0, two());
+ assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 2, three());
+
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+ assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
+ assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(new BytesRef())));
+ assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
+
+ assertHashedValues(hashedBytesValues, 0, two());
+ assertHashedValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
+ assertHashedValues(hashedBytesValues, 2, three());
+
+
+ }
+
+ protected abstract void fillMultiValueAllSet() throws Exception;
+
+ @Test
+ public void testMultiValueAllSet() throws Exception {
+ fillMultiValueAllSet();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(true));
+
+ assertValues(bytesValues, 0, two(), four());
+ assertValues(bytesValues, 1, one());
+ assertValues(bytesValues, 2, three());
+
+
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
+ assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(one())));
+ assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
+
+ assertHashedValues(hashedBytesValues, 0, two(), four());
+
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs.length, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs.length, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ protected abstract void fillMultiValueWithMissing() throws Exception;
+
+ @Test
+ public void testMultiValueWithMissing() throws Exception {
+ fillMultiValueWithMissing();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(true));
+
+ assertValues(bytesValues, 0, two(), four());
+ assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+
+
+ assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
+ assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(new BytesRef())));
+ assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
+
+ assertHashedValues(bytesValues, 0, two(), four());
+ assertHashedValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+ assertHashedValues(bytesValues, 2, three());
+
+ assertHashedValues(hashedBytesValues, 0, two(), four());
+ assertHashedValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
+ assertHashedValues(hashedBytesValues, 2, three());
+
+ }
+
+ public void testMissingValueForAll() throws Exception {
+ fillAllMissing();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ // Some impls (FST) return size 0 and some (PagedBytes) do take size in the case no actual data is loaded
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThanOrEqualTo(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(false));
+
+ assertValues(bytesValues, 0, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 2, Strings.EMPTY_ARRAY);
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertValues(hashedBytesValues, 0, Strings.EMPTY_ARRAY);
+ assertValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(hashedBytesValues, 2, Strings.EMPTY_ARRAY);
+ }
+
+ protected abstract void fillAllMissing() throws Exception;
+
+ @Test
+ public void testSortMultiValuesFields() throws Exception {
+ fillExtendedMvSet();
+ IndexFieldData indexFieldData = getForField("value");
+
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("!08"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("02"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("03"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("04"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("06"));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("08"));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+ assertThat((BytesRef) ((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(BytesRefFieldComparatorSource.MAX_TERM));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+ assertThat((BytesRef) ((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(BytesRefFieldComparatorSource.MAX_TERM));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("10"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("08"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("06"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("04"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("03"));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("!10"));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+ assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+ assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+ }
+
+ protected abstract void fillExtendedMvSet() throws Exception;
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java
new file mode 100644
index 0000000..43a0878
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.MapperBuilders;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+
+// we might wanna cut this over to LuceneTestCase
+public abstract class AbstractFieldDataTests extends ElasticsearchTestCase {
+
+ protected IndexFieldDataService ifdService;
+ protected IndexWriter writer;
+ protected AtomicReaderContext readerContext;
+
+ protected abstract FieldDataType getFieldDataType();
+
+ protected boolean hasDocValues() {
+ return false;
+ }
+
+ public <IFD extends IndexFieldData<?>> IFD getForField(String fieldName) {
+ return getForField(getFieldDataType(), fieldName);
+ }
+
+ public <IFD extends IndexFieldData<?>> IFD getForField(FieldDataType type, String fieldName) {
+ final FieldMapper<?> mapper;
+ final BuilderContext context = new BuilderContext(null, new ContentPath(1));
+ if (type.getType().equals("string")) {
+ mapper = MapperBuilders.stringField(fieldName).tokenized(false).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("float")) {
+ mapper = MapperBuilders.floatField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("double")) {
+ mapper = MapperBuilders.doubleField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("long")) {
+ mapper = MapperBuilders.longField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("int")) {
+ mapper = MapperBuilders.integerField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("short")) {
+ mapper = MapperBuilders.shortField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("byte")) {
+ mapper = MapperBuilders.byteField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("geo_point")) {
+ mapper = MapperBuilders.geoPointField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else {
+ throw new UnsupportedOperationException(type.getType());
+ }
+ return ifdService.getForField(mapper);
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ // LogByteSizeMP to preserve doc ID order
+ writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)).setMergePolicy(new LogByteSizeMergePolicy()));
+ }
+
+ protected AtomicReaderContext refreshReader() throws Exception {
+ if (readerContext != null) {
+ readerContext.reader().close();
+ }
+ AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(writer, true));
+ readerContext = reader.getContext();
+ return readerContext;
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ if (readerContext != null) {
+ readerContext.reader().close();
+ }
+ writer.close();
+ ifdService.clear();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java
new file mode 100644
index 0000000..817a7c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.search.*;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public abstract class AbstractNumericFieldDataTests extends AbstractFieldDataImplTests {
+
+ protected abstract FieldDataType getFieldDataType();
+
+ @Test
+ public void testSingleValueAllSetNumber() throws Exception {
+ fillSingleValueAllSet();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(false));
+
+ assertThat(longValues.setDocument(0), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(2l));
+
+ assertThat(longValues.setDocument(1), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(1l));
+
+ assertThat(longValues.setDocument(2), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(3l));
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(false));
+
+ assertThat(1, equalTo(doubleValues.setDocument(0)));
+ assertThat(doubleValues.nextValue(), equalTo(2d));
+
+ assertThat(1, equalTo(doubleValues.setDocument(1)));
+ assertThat(doubleValues.nextValue(), equalTo(1d));
+
+ assertThat(1, equalTo(doubleValues.setDocument(2)));
+ assertThat(doubleValues.nextValue(), equalTo(3d));
+
+ IndexSearcher searcher = new IndexSearcher(readerContext.reader());
+ TopFieldDocs topDocs;
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ @Test
+ public void testSingleValueWithMissingNumber() throws Exception {
+ fillSingleValueWithMissing();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(false));
+
+ assertThat(longValues.setDocument(0), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(2l));
+
+ assertThat(longValues.setDocument(1), equalTo(0));
+
+ assertThat(longValues.setDocument(2), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(3l));
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(false));
+
+ assertThat(1, equalTo(doubleValues.setDocument(0)));
+ assertThat(doubleValues.nextValue(), equalTo(2d));
+
+ assertThat(0, equalTo(doubleValues.setDocument(1)));
+
+ assertThat(1, equalTo(doubleValues.setDocument(2)));
+ assertThat(doubleValues.nextValue(), equalTo(3d));
+
+ IndexSearcher searcher = new IndexSearcher(readerContext.reader());
+ TopFieldDocs topDocs;
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(0));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("1", SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("1", SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ @Test
+ public void testMultiValueAllSetNumber() throws Exception {
+ fillMultiValueAllSet();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(true));
+
+ assertThat(longValues.setDocument(0), equalTo(2));
+ assertThat(longValues.nextValue(), equalTo(2l));
+ assertThat(longValues.nextValue(), equalTo(4l));
+
+ assertThat(longValues.setDocument(1), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(1l));
+
+ assertThat(longValues.setDocument(2), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(3l));
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(true));
+
+ assertThat(2, equalTo(doubleValues.setDocument(0)));
+ assertThat(doubleValues.nextValue(), equalTo(2d));
+ assertThat(doubleValues.nextValue(), equalTo(4d));
+
+ assertThat(1, equalTo(doubleValues.setDocument(1)));
+ assertThat(doubleValues.nextValue(), equalTo(1d));
+
+ assertThat(1, equalTo(doubleValues.setDocument(2)));
+ assertThat(doubleValues.nextValue(), equalTo(3d));
+ }
+
+ @Test
+ public void testMultiValueWithMissingNumber() throws Exception {
+ fillMultiValueWithMissing();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(true));
+
+ assertThat(longValues.setDocument(0), equalTo(2));
+ assertThat(longValues.nextValue(), equalTo(2l));
+ assertThat(longValues.nextValue(), equalTo(4l));
+
+ assertThat(longValues.setDocument(1), equalTo(0));
+
+ assertThat(longValues.setDocument(2), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(3l));
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(true));
+
+
+ assertThat(2, equalTo(doubleValues.setDocument(0)));
+ assertThat(doubleValues.nextValue(), equalTo(2d));
+ assertThat(doubleValues.nextValue(), equalTo(4d));
+
+ assertThat(0, equalTo(doubleValues.setDocument(1)));
+
+ assertThat(1, equalTo(doubleValues.setDocument(2)));
+ assertThat(doubleValues.nextValue(), equalTo(3d));
+
+ }
+
+ @Test
+ public void testMissingValueForAll() throws Exception {
+ fillAllMissing();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ // long values
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(false));
+
+ assertThat(longValues.setDocument(0), equalTo(0));
+ assertThat(longValues.setDocument(1), equalTo(0));
+ assertThat(longValues.setDocument(2), equalTo(0));
+
+ // double values
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(false));
+
+ assertThat(0, equalTo(doubleValues.setDocument(0)));
+
+ assertThat(0, equalTo(doubleValues.setDocument(1)));
+
+ assertThat(0, equalTo(doubleValues.setDocument(2)));
+ }
+
+
+ protected void fillAllMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Test
+ public void testSortMultiValuesFields() throws Exception {
+ fillExtendedMvSet();
+ IndexFieldData indexFieldData = getForField("value");
+
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-10));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(10));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(-8));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.SUM)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-27));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(15));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(21));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(27));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.SUM), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(27));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(21));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(15));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(-27));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.AVG)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-9));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(5));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.AVG), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(5));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(-9));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(7));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(6));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(6));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("-9", SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(6));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("9", SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java
new file mode 100644
index 0000000..a33b475
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.OpenBitSet;
+import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util._TestUtil;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.search.nested.NestedFieldComparatorSource;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ */
+public abstract class AbstractStringFieldDataTests extends AbstractFieldDataImplTests {
+
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "1", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new StringField("value", "4", Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ d.add(new StringField("value", "4", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new StringField("value", "1", Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit(); // TODO: Have tests with more docs for sorting
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ d.add(new StringField("value", "4", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillAllMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "02", Field.Store.NO));
+ d.add(new StringField("value", "04", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "03", Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new StringField("value", "04", Field.Store.NO));
+ d.add(new StringField("value", "05", Field.Store.NO));
+ d.add(new StringField("value", "06", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new StringField("value", "06", Field.Store.NO));
+ d.add(new StringField("value", "07", Field.Store.NO));
+ d.add(new StringField("value", "08", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new StringField("value", "08", Field.Store.NO));
+ d.add(new StringField("value", "09", Field.Store.NO));
+ d.add(new StringField("value", "10", Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new StringField("value", "!08", Field.Store.NO));
+ d.add(new StringField("value", "!09", Field.Store.NO));
+ d.add(new StringField("value", "!10", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Repeat(iterations=10)
+ public void testActualMissingValue() throws IOException {
+ testActualMissingValue(false);
+ }
+
+ @Repeat(iterations=10)
+ public void testActualMissingValueReverse() throws IOException {
+ testActualMissingValue(true);
+ }
+
+ public void testActualMissingValue(boolean reverse) throws IOException {
+ // missing value is set to an actual value
+ Document d = new Document();
+ final StringField s = new StringField("value", "", Field.Store.YES);
+ d.add(s);
+ final String[] values = new String[randomIntBetween(2, 30)];
+ for (int i = 1; i < values.length; ++i) {
+ values[i] = _TestUtil.randomUnicodeString(getRandom());
+ }
+ final int numDocs = atLeast(100);
+ for (int i = 0; i < numDocs; ++i) {
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ if (value == null) {
+ writer.addDocument(new Document());
+ } else {
+ s.setStringValue(value);
+ writer.addDocument(d);
+ }
+ if (randomInt(10) == 0) {
+ writer.commit();
+ }
+ }
+
+ final IndexFieldData indexFieldData = getForField("value");
+ final String missingValue = values[1];
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ XFieldComparatorSource comparator = indexFieldData.comparatorSource(missingValue, SortMode.MIN);
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse)));
+ assertEquals(numDocs, topDocs.totalHits);
+ BytesRef previousValue = reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
+ for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
+ final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value");
+ final BytesRef value = new BytesRef(docValue == null ? missingValue : docValue);
+ if (reverse) {
+ assertTrue(previousValue.compareTo(value) >= 0);
+ } else {
+ assertTrue(previousValue.compareTo(value) <= 0);
+ }
+ previousValue = value;
+ }
+ searcher.getIndexReader().close();
+ }
+
+ @Repeat(iterations=3)
+ public void testSortMissingFirst() throws IOException {
+ testSortMissing(true, false);
+ }
+
+ @Repeat(iterations=3)
+ public void testSortMissingFirstReverse() throws IOException {
+ testSortMissing(true, true);
+ }
+
+ @Repeat(iterations=3)
+ public void testSortMissingLast() throws IOException {
+ testSortMissing(false, false);
+ }
+
+ @Repeat(iterations=3)
+ public void testSortMissingLastReverse() throws IOException {
+ testSortMissing(false, true);
+ }
+
+ public void testSortMissing(boolean first, boolean reverse) throws IOException {
+ Document d = new Document();
+ final StringField s = new StringField("value", "", Field.Store.YES);
+ d.add(s);
+ final String[] values = new String[randomIntBetween(2, 10)];
+ for (int i = 1; i < values.length; ++i) {
+ values[i] = _TestUtil.randomUnicodeString(getRandom());
+ }
+ final int numDocs = atLeast(100);
+ for (int i = 0; i < numDocs; ++i) {
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ if (value == null) {
+ writer.addDocument(new Document());
+ } else {
+ s.setStringValue(value);
+ writer.addDocument(d);
+ }
+ if (randomInt(10) == 0) {
+ writer.commit();
+ }
+ }
+ final IndexFieldData indexFieldData = getForField("value");
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ XFieldComparatorSource comparator = indexFieldData.comparatorSource(first ? "_first" : "_last", SortMode.MIN);
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse)));
+ assertEquals(numDocs, topDocs.totalHits);
+ BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
+ for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
+ final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value");
+ if (first && docValue == null) {
+ assertNull(previousValue);
+ } else if (!first && docValue != null) {
+ assertNotNull(previousValue);
+ }
+ final BytesRef value = docValue == null ? null : new BytesRef(docValue);
+ if (previousValue != null && value != null) {
+ if (reverse) {
+ assertTrue(previousValue.compareTo(value) >= 0);
+ } else {
+ assertTrue(previousValue.compareTo(value) <= 0);
+ }
+ }
+ previousValue = value;
+ }
+ searcher.getIndexReader().close();
+ }
+
+ @Repeat(iterations=3)
+ public void testNestedSortingMin() throws IOException {
+ testNestedSorting(SortMode.MIN);
+ }
+
+ @Repeat(iterations=3)
+ public void testNestedSortingMax() throws IOException {
+ testNestedSorting(SortMode.MAX);
+ }
+
+ public void testNestedSorting(SortMode sortMode) throws IOException {
+ final String[] values = new String[randomIntBetween(2, 20)];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = _TestUtil.randomSimpleString(getRandom());
+ }
+ final int numParents = atLeast(100);
+ List<Document> docs = new ArrayList<Document>();
+ final OpenBitSet parents = new OpenBitSet();
+ for (int i = 0; i < numParents; ++i) {
+ docs.clear();
+ final int numChildren = randomInt(4);
+ for (int j = 0; j < numChildren; ++j) {
+ final Document child = new Document();
+ final int numValues = randomInt(3);
+ for (int k = 0; k < numValues; ++k) {
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ child.add(new StringField("text", value, Store.YES));
+ }
+ docs.add(child);
+ }
+ final Document parent = new Document();
+ parent.add(new StringField("type", "parent", Store.YES));
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ if (value != null) {
+ parent.add(new StringField("text", value, Store.YES));
+ }
+ docs.add(parent);
+ parents.set(parents.prevSetBit(parents.length() - 1) + docs.size());
+ writer.addDocuments(docs);
+ if (randomInt(10) == 0) {
+ writer.commit();
+ }
+ }
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ IndexFieldData<?> fieldData = getForField("text");
+ final BytesRef missingValue;
+ switch (randomInt(4)) {
+ case 0:
+ missingValue = new BytesRef();
+ break;
+ case 1:
+ missingValue = BytesRefFieldComparatorSource.MAX_TERM;
+ break;
+ case 2:
+ missingValue = new BytesRef(RandomPicks.randomFrom(getRandom(), values));
+ break;
+ default:
+ missingValue = new BytesRef(_TestUtil.randomSimpleString(getRandom()));
+ break;
+ }
+ BytesRefFieldComparatorSource innerSource = new BytesRefFieldComparatorSource(fieldData, missingValue, sortMode);
+ Filter parentFilter = new TermFilter(new Term("type", "parent"));
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerSource, parentFilter, childFilter);
+ ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("text", nestedComparatorSource));
+ TopFieldDocs topDocs = searcher.search(query, randomIntBetween(1, numParents), sort);
+ assertTrue(topDocs.scoreDocs.length > 0);
+ BytesRef previous = null;
+ for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
+ final int docID = topDocs.scoreDocs[i].doc;
+ assertTrue("expected " + docID + " to be a parent", parents.get(docID));
+ BytesRef cmpValue = null;
+ for (int child = parents.prevSetBit(docID - 1) + 1; child < docID; ++child) {
+ String[] vals = searcher.doc(child).getValues("text");
+ if (vals.length == 0) {
+ vals = new String[] {missingValue.utf8ToString()};
+ }
+ for (String value : vals) {
+ final BytesRef bytesValue = new BytesRef(value);
+ if (cmpValue == null) {
+ cmpValue = bytesValue;
+ } else if (sortMode == SortMode.MIN && bytesValue.compareTo(cmpValue) < 0) {
+ cmpValue = bytesValue;
+ } else if (sortMode == SortMode.MAX && bytesValue.compareTo(cmpValue) > 0) {
+ cmpValue = bytesValue;
+ }
+ }
+ }
+ if (cmpValue == null) {
+ cmpValue = missingValue;
+ }
+ if (previous != null) {
+ assertNotNull(cmpValue);
+ assertTrue(previous.utf8ToString() + " / " + cmpValue.utf8ToString(), previous.compareTo(cmpValue) <= 0);
+ }
+ previous = cmpValue;
+ }
+ searcher.getIndexReader().close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java b/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java
new file mode 100644
index 0000000..2a205f4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+public class DisabledFieldDataFormatTests extends ElasticsearchIntegrationTest {
+
+ public void test() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ for (int i = 0; i < 10; ++i) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("s", "value" + i).execute().actionGet();
+ }
+
+ refresh();
+
+ // disable field data
+ updateFormat("disabled");
+
+ SearchResponse resp = null;
+ // try to run something that relies on field data and make sure that it fails
+ try {
+ resp = client().prepareSearch("test").addAggregation(AggregationBuilders.terms("t").field("s")).execute().actionGet();
+ assertTrue(resp.toString(), resp.getFailedShards() > 0);
+ } catch (SearchPhaseExecutionException e) {
+ // expected
+ }
+
+ // enable it again
+ updateFormat("paged_bytes");
+
+ // try to run something that relies on field data and make sure that it works
+ resp = client().prepareSearch("test").addAggregation(AggregationBuilders.terms("t").field("s")).execute().actionGet();
+ assertNoFailures(resp);
+
+ // disable it again
+ updateFormat("disabled");
+
+ // this time, it should work because segments are already loaded
+ resp = client().prepareSearch("test").addAggregation(AggregationBuilders.terms("t").field("s")).execute().actionGet();
+ assertNoFailures(resp);
+
+ // but add more docs and the new segment won't be loaded
+ client().prepareIndex("test", "type", "-1").setSource("s", "value").execute().actionGet();
+ refresh();
+ try {
+ resp = client().prepareSearch("test").addAggregation(AggregationBuilders.terms("t").field("s")).execute().actionGet();
+ assertTrue(resp.toString(), resp.getFailedShards() > 0);
+ } catch (SearchPhaseExecutionException e) {
+ // expected
+ }
+ }
+
+ private void updateFormat(String format) throws Exception {
+ client().admin().indices().preparePutMapping("test").setType("type").setSource(
+ XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("s")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field("format", format)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java
new file mode 100644
index 0000000..77a5bb2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.Term;
+
+/**
+ */
+public class DoubleFieldDataTests extends AbstractNumericFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("double");
+ }
+
+ protected String one() {
+ return "1.0";
+ }
+
+ protected String two() {
+ return "2.0";
+ }
+
+ protected String three() {
+ return "3.0";
+ }
+
+ protected String four() {
+ return "4.0";
+ }
+
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new DoubleField("value", 4.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new DoubleField("value", 1.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0d, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0d, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ d.add(new DoubleField("value", 4.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new DoubleField("value", 1.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0d, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ d.add(new DoubleField("value", 4.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2, Field.Store.NO));
+ d.add(new DoubleField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new DoubleField("value", 4, Field.Store.NO));
+ d.add(new DoubleField("value", 5, Field.Store.NO));
+ d.add(new DoubleField("value", 6, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new DoubleField("value", 6, Field.Store.NO));
+ d.add(new DoubleField("value", 7, Field.Store.NO));
+ d.add(new DoubleField("value", 8, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new DoubleField("value", 8, Field.Store.NO));
+ d.add(new DoubleField("value", 9, Field.Store.NO));
+ d.add(new DoubleField("value", 10, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new DoubleField("value", -8, Field.Store.NO));
+ d.add(new DoubleField("value", -9, Field.Store.NO));
+ d.add(new DoubleField("value", -10, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java
new file mode 100644
index 0000000..80e78fe
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java
@@ -0,0 +1,625 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.CompositeReaderContext;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.Map.Entry;
+
+import static org.hamcrest.Matchers.*;
+
+public class DuelFieldDataTests extends AbstractFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return null;
+ }
+
+ public static int atLeast(Random random, int i) {
+ int min = i;
+ int max = min + (min / 2);
+ return min + random.nextInt(max - min);
+ }
+
+ @Test
+ public void testDuelAllTypesSingleValue() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("bytes").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", LuceneTestCase.defaultCodecSupportsSortedSet() ? "doc_values" : "fst").endObject().endObject()
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+ final DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ for (int i = 0; i < atLeast; i++) {
+ String s = Integer.toString(randomByte());
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject();
+ for (String fieldName : Arrays.asList("bytes", "byte", "short", "integer", "long", "float", "double")) {
+ doc = doc.field(fieldName, s);
+ }
+
+ doc = doc.endObject();
+
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, DuelFieldDataTests.Type>();
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "fst")), Type.Bytes);
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes")), Type.Bytes);
+ typeMap.put(new FieldDataType("byte", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("short", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("int", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("long", ImmutableSettings.builder().put("format", "array")), Type.Long);
+ typeMap.put(new FieldDataType("double", ImmutableSettings.builder().put("format", "array")), Type.Double);
+ typeMap.put(new FieldDataType("float", ImmutableSettings.builder().put("format", "array")), Type.Float);
+ typeMap.put(new FieldDataType("byte", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("short", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("int", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("long", ImmutableSettings.builder().put("format", "doc_values")), Type.Long);
+ typeMap.put(new FieldDataType("double", ImmutableSettings.builder().put("format", "doc_values")), Type.Double);
+ typeMap.put(new FieldDataType("float", ImmutableSettings.builder().put("format", "doc_values")), Type.Float);
+ if (LuceneTestCase.defaultCodecSupportsSortedSet()) {
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "doc_values")), Type.Bytes);
+ }
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ Preprocessor pre = new ToDoublePreprocessor();
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+
+ ifdService.clear();
+ IndexFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+ ifdService.clear();
+ IndexFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+ duelFieldDataBytes(random, context, leftFieldData, rightFieldData, pre);
+ duelFieldDataBytes(random, context, rightFieldData, leftFieldData, pre);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ duelFieldDataBytes(random, atomicReaderContext, leftFieldData, rightFieldData, pre);
+ }
+ }
+ }
+
+
+ @Test
+ public void testDuelIntegers() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ final DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ final int maxNumValues = randomBoolean() ? 1 : randomIntBetween(2, 40);
+ byte[] values = new byte[maxNumValues];
+ for (int i = 0; i < atLeast; i++) {
+ final int numValues = randomInt(maxNumValues);
+ for (int j = 0; j < numValues; ++j) {
+ if (randomBoolean()) {
+ values[j] = 1; // test deduplication
+ } else {
+ values[j] = randomByte();
+ }
+ }
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject();
+ for (String fieldName : Arrays.asList("byte", "short", "integer", "long")) {
+ doc = doc.startArray(fieldName);
+ for (int j = 0; j < numValues; ++j) {
+ doc = doc.value(values[j]);
+ }
+ doc = doc.endArray();
+ }
+ doc = doc.endObject();
+
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, Type>();
+ typeMap.put(new FieldDataType("byte", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("short", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("int", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("long", ImmutableSettings.builder().put("format", "array")), Type.Long);
+ typeMap.put(new FieldDataType("byte", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("short", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("int", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("long", ImmutableSettings.builder().put("format", "doc_values")), Type.Long);
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexNumericFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+ ifdService.clear();
+ IndexNumericFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataLong(random, context, leftFieldData, rightFieldData);
+ duelFieldDataLong(random, context, rightFieldData, leftFieldData);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ duelFieldDataLong(random, atomicReaderContext, leftFieldData, rightFieldData);
+ }
+ }
+
+ }
+
+ @Test
+ public void testDuelDoubles() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ final DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ final int maxNumValues = randomBoolean() ? 1 : randomIntBetween(2, 40);
+ float[] values = new float[maxNumValues];
+ for (int i = 0; i < atLeast; i++) {
+ final int numValues = randomInt(maxNumValues);
+ float def = randomBoolean() ? randomFloat() : Float.NaN;
+ for (int j = 0; j < numValues; ++j) {
+ if (randomBoolean()) {
+ values[j] = def;
+ } else {
+ values[j] = randomFloat();
+ }
+ }
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startArray("float");
+ for (int j = 0; j < numValues; ++j) {
+ doc = doc.value(values[j]);
+ }
+ doc = doc.endArray().startArray("double");
+ for (int j = 0; j < numValues; ++j) {
+ doc = doc.value(values[j]);
+ }
+ doc = doc.endArray().endObject();
+
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, Type>();
+ typeMap.put(new FieldDataType("double", ImmutableSettings.builder().put("format", "array")), Type.Double);
+ typeMap.put(new FieldDataType("float", ImmutableSettings.builder().put("format", "array")), Type.Float);
+ typeMap.put(new FieldDataType("double", ImmutableSettings.builder().put("format", "doc_values")), Type.Double);
+ typeMap.put(new FieldDataType("float", ImmutableSettings.builder().put("format", "doc_values")), Type.Float);
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexNumericFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+
+ ifdService.clear();
+ IndexNumericFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ assertOrder(left.getValue().order(), leftFieldData, context);
+ assertOrder(right.getValue().order(), rightFieldData, context);
+ duelFieldDataDouble(random, context, leftFieldData, rightFieldData);
+ duelFieldDataDouble(random, context, rightFieldData, leftFieldData);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ duelFieldDataDouble(random, atomicReaderContext, leftFieldData, rightFieldData);
+ }
+ }
+
+ }
+
+
+ @Test
+ public void testDuelStrings() throws Exception {
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ for (int i = 0; i < atLeast; i++) {
+ Document d = new Document();
+ d.add(new StringField("_id", "" + i, Field.Store.NO));
+ if (random.nextInt(15) != 0) {
+ int[] numbers = getNumbers(random, Integer.MAX_VALUE);
+ for (int j : numbers) {
+ final String s = English.longToEnglish(j);
+ d.add(new StringField("bytes", s, Field.Store.NO));
+ if (LuceneTestCase.defaultCodecSupportsSortedSet()) {
+ d.add(new SortedSetDocValuesField("bytes", new BytesRef(s)));
+ }
+ }
+ if (random.nextInt(10) == 0) {
+ d.add(new StringField("bytes", "", Field.Store.NO));
+ if (LuceneTestCase.defaultCodecSupportsSortedSet()) {
+ d.add(new SortedSetDocValuesField("bytes", new BytesRef()));
+ }
+ }
+ }
+ writer.addDocument(d);
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, DuelFieldDataTests.Type>();
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "fst")), Type.Bytes);
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes")), Type.Bytes);
+ if (LuceneTestCase.defaultCodecSupportsSortedSet()) {
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "doc_values")), Type.Bytes);
+ }
+ // TODO add filters
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ Preprocessor pre = new Preprocessor();
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+
+ ifdService.clear();
+ IndexFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataBytes(random, context, leftFieldData, rightFieldData, pre);
+ duelFieldDataBytes(random, context, rightFieldData, leftFieldData, pre);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ assertOrder(AtomicFieldData.Order.BYTES, leftFieldData, atomicReaderContext);
+ assertOrder(AtomicFieldData.Order.BYTES, rightFieldData, atomicReaderContext);
+ duelFieldDataBytes(random, atomicReaderContext, leftFieldData, rightFieldData, pre);
+ }
+ perSegment.close();
+ }
+
+ }
+
+ public void testDuelGeoPoints() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("geopoint").field("type", "geo_point").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ final DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ int maxValuesPerDoc = randomBoolean() ? 1 : randomIntBetween(2, 40);
+ // to test deduplication
+ double defaultLat = randomDouble() * 180 - 90;
+ double defaultLon = randomDouble() * 360 - 180;
+ for (int i = 0; i < atLeast; i++) {
+ final int numValues = randomInt(maxValuesPerDoc);
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startArray("geopoint");
+ for (int j = 0; j < numValues; ++j) {
+ if (randomBoolean()) {
+ doc.startObject().field("lat", defaultLat).field("lon", defaultLon).endObject();
+ } else {
+ doc.startObject().field("lat", randomDouble() * 180 - 90).field("lon", randomDouble() * 360 - 180).endObject();
+ }
+ }
+ doc = doc.endArray().endObject();
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, DuelFieldDataTests.Type>();
+ final Distance precision = new Distance(1, randomFrom(DistanceUnit.values()));
+ typeMap.put(new FieldDataType("geo_point", ImmutableSettings.builder().put("format", "array")), Type.GeoPoint);
+ typeMap.put(new FieldDataType("geo_point", ImmutableSettings.builder().put("format", "compressed").put("precision", precision)), Type.GeoPoint);
+ typeMap.put(new FieldDataType("geo_point", ImmutableSettings.builder().put("format", "doc_values")), Type.GeoPoint);
+
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexGeoPointFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+
+ ifdService.clear();
+ IndexGeoPointFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataGeoPoint(random, context, leftFieldData, rightFieldData, precision);
+ duelFieldDataGeoPoint(random, context, rightFieldData, leftFieldData, precision);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ duelFieldDataGeoPoint(random, atomicReaderContext, leftFieldData, rightFieldData, precision);
+ }
+ perSegment.close();
+ }
+ }
+
+ private void assertOrder(AtomicFieldData.Order order, IndexFieldData<?> data, AtomicReaderContext context) throws Exception {
+ AtomicFieldData<?> leftData = randomBoolean() ? data.load(context) : data.loadDirect(context);
+ assertThat(leftData.getBytesValues(randomBoolean()).getOrder(), is(order));
+ }
+
+ private int[] getNumbers(Random random, int margin) {
+ if (random.nextInt(20) == 0) {
+ int[] num = new int[1 + random.nextInt(10)];
+ for (int i = 0; i < num.length; i++) {
+ int v = (random.nextBoolean() ? -1 * random.nextInt(margin) : random.nextInt(margin));
+ num[i] = v;
+ }
+ return num;
+ }
+ return new int[]{(random.nextBoolean() ? -1 * random.nextInt(margin) : random.nextInt(margin))};
+ }
+
+
+ private static void duelFieldDataBytes(Random random, AtomicReaderContext context, IndexFieldData<?> left, IndexFieldData<?> right, Preprocessor pre) throws Exception {
+ AtomicFieldData<?> leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicFieldData<?> rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+ assertThat(leftData.getNumDocs(), equalTo(rightData.getNumDocs()));
+
+ int numDocs = leftData.getNumDocs();
+ BytesValues leftBytesValues = leftData.getBytesValues(random.nextBoolean());
+ BytesValues rightBytesValues = rightData.getBytesValues(random.nextBoolean());
+ BytesRef leftSpare = new BytesRef();
+ BytesRef rightSpare = new BytesRef();
+
+ for (int i = 0; i < numDocs; i++) {
+ int numValues = 0;
+ assertThat((numValues = leftBytesValues.setDocument(i)), equalTo(rightBytesValues.setDocument(i)));
+ BytesRef previous = null;
+ for (int j = 0; j < numValues; j++) {
+
+ rightSpare.copyBytes(rightBytesValues.nextValue());
+ leftSpare.copyBytes(leftBytesValues.nextValue());
+ assertThat(rightSpare.hashCode(), equalTo(rightBytesValues.currentValueHash()));
+ assertThat(leftSpare.hashCode(), equalTo(leftBytesValues.currentValueHash()));
+ if (previous != null && leftBytesValues.getOrder() == rightBytesValues.getOrder()) { // we can only compare the
+ assertThat(pre.compare(previous, rightSpare), lessThan(0));
+ }
+ previous = BytesRef.deepCopyOf(rightSpare);
+ pre.toString(rightSpare);
+ pre.toString(leftSpare);
+ assertThat(pre.toString(leftSpare), equalTo(pre.toString(rightSpare)));
+ if (leftSpare.equals(rightSpare)) {
+ assertThat(leftBytesValues.currentValueHash(), equalTo(rightBytesValues.currentValueHash()));
+ }
+ }
+ }
+ }
+
+
+ private static void duelFieldDataDouble(Random random, AtomicReaderContext context, IndexNumericFieldData<?> left, IndexNumericFieldData<?> right) throws Exception {
+ AtomicNumericFieldData leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicNumericFieldData rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ assertThat(leftData.getNumDocs(), equalTo(rightData.getNumDocs()));
+
+ int numDocs = leftData.getNumDocs();
+ DoubleValues leftDoubleValues = leftData.getDoubleValues();
+ DoubleValues rightDoubleValues = rightData.getDoubleValues();
+ for (int i = 0; i < numDocs; i++) {
+ int numValues = 0;
+ assertThat((numValues = leftDoubleValues.setDocument(i)), equalTo(rightDoubleValues.setDocument(i)));
+ double previous = 0;
+ for (int j = 0; j < numValues; j++) {
+ double current = rightDoubleValues.nextValue();
+ if (Double.isNaN(current)) {
+ assertTrue(Double.isNaN(leftDoubleValues.nextValue()));
+ } else {
+ assertThat(leftDoubleValues.nextValue(), closeTo(current, 0.0001));
+ }
+ if (j > 0) {
+ assertThat(Double.compare(previous,current), lessThan(0));
+ }
+ previous = current;
+ }
+ }
+ }
+
+ private static void duelFieldDataLong(Random random, AtomicReaderContext context, IndexNumericFieldData<?> left, IndexNumericFieldData right) throws Exception {
+ AtomicNumericFieldData leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicNumericFieldData rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ assertThat(leftData.getNumDocs(), equalTo(rightData.getNumDocs()));
+
+ int numDocs = leftData.getNumDocs();
+ LongValues leftLongValues = leftData.getLongValues();
+ LongValues rightLongValues = rightData.getLongValues();
+ for (int i = 0; i < numDocs; i++) {
+ int numValues = 0;
+ long previous = 0;
+ assertThat((numValues = leftLongValues.setDocument(i)), equalTo(rightLongValues.setDocument(i)));
+ for (int j = 0; j < numValues; j++) {
+ long current;
+ assertThat(leftLongValues.nextValue(), equalTo(current = rightLongValues.nextValue()));
+ if (j > 0) {
+ assertThat(previous, lessThan(current));
+ }
+ previous = current;
+ }
+ }
+ }
+
+ private static void duelFieldDataGeoPoint(Random random, AtomicReaderContext context, IndexGeoPointFieldData<?> left, IndexGeoPointFieldData<?> right, Distance precision) throws Exception {
+ AtomicGeoPointFieldData<?> leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicGeoPointFieldData<?> rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ assertThat(leftData.getNumDocs(), equalTo(rightData.getNumDocs()));
+
+ int numDocs = leftData.getNumDocs();
+ GeoPointValues leftValues = leftData.getGeoPointValues();
+ GeoPointValues rightValues = rightData.getGeoPointValues();
+ for (int i = 0; i < numDocs; ++i) {
+ final int numValues = leftValues.setDocument(i);
+ assertEquals(numValues, rightValues.setDocument(i));
+ List<GeoPoint> leftPoints = Lists.newArrayList();
+ List<GeoPoint> rightPoints = Lists.newArrayList();
+ for (int j = 0; j < numValues; ++j) {
+ GeoPoint l = leftValues.nextValue();
+ leftPoints.add(new GeoPoint(l.getLat(), l.getLon()));
+ GeoPoint r = rightValues.nextValue();
+ rightPoints.add(new GeoPoint(r.getLat(), r.getLon()));
+ }
+ for (GeoPoint l : leftPoints) {
+ assertTrue("Couldn't find " + l + " among " + rightPoints, contains(l, rightPoints, precision));
+ }
+ for (GeoPoint r : rightPoints) {
+ assertTrue("Couldn't find " + r + " among " + leftPoints, contains(r, leftPoints, precision));
+ }
+ }
+ }
+
+ private static boolean contains(GeoPoint point, List<GeoPoint> set, Distance precision) {
+ for (GeoPoint r : set) {
+ final double distance = GeoDistance.PLANE.calculate(point.getLat(), point.getLon(), r.getLat(), r.getLon(), DistanceUnit.METERS);
+ if (new Distance(distance, DistanceUnit.METERS).compareTo(precision) <= 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private static class Preprocessor {
+
+ public String toString(BytesRef ref) {
+ return ref.utf8ToString();
+ }
+
+ public int compare(BytesRef a, BytesRef b) {
+ return a.compareTo(b);
+ }
+ }
+
+ private static class ToDoublePreprocessor extends Preprocessor {
+
+ @Override
+ public String toString(BytesRef ref) {
+ assertTrue(ref.length > 0);
+ return Double.toString(Double.parseDouble(super.toString(ref)));
+ }
+
+ @Override
+ public int compare(BytesRef a, BytesRef b) {
+ Double _a = Double.parseDouble(super.toString(a));
+ return _a.compareTo(Double.parseDouble(super.toString(b)));
+ }
+ }
+
+
+ private static enum Type {
+ Float(AtomicFieldData.Order.NUMERIC), Double(AtomicFieldData.Order.NUMERIC), Integer(AtomicFieldData.Order.NUMERIC), Long(AtomicFieldData.Order.NUMERIC), Bytes(AtomicFieldData.Order.BYTES), GeoPoint(AtomicFieldData.Order.NONE);
+
+ private final AtomicFieldData.Order order;
+ Type(AtomicFieldData.Order order) {
+ this.order = order;
+ }
+
+ public AtomicFieldData.Order order() {
+ return order;
+ }
+ }
+
+}
+
diff --git a/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java
new file mode 100644
index 0000000..e9047c2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+
+/**
+ */
+public class FSTPackedBytesStringFieldDataTests extends AbstractStringFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", ImmutableSettings.builder().put("format", "fst"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java b/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java
new file mode 100644
index 0000000..fdeb988
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.facet.Facets;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+public class FieldDataFilterIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRegexpFilter() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", between(1,5))
+ .put("index.number_of_replicas", 0));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .startObject("fielddata")
+ .startObject("filter")
+ .startObject("regex")
+ .field("pattern", "^bac.*")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("not_filtered")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type", mapping));
+ ensureGreen();
+ int numDocs = atLeast(5);
+ for (int i = 0; i < numDocs; i++) {
+ client().prepareIndex("test", "type", "" + 0).setSource("name", "bacon bastards", "not_filtered", "bacon bastards").get();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("name").field("name"))
+ .addFacet(termsFacet("not_filtered").field("not_filtered")).get();
+ Facets facets = searchResponse.getFacets();
+ TermsFacet nameFacet = facets.facet("name");
+ assertThat(nameFacet.getEntries().size(), Matchers.equalTo(1));
+ assertThat(nameFacet.getEntries().get(0).getTerm().string(), Matchers.equalTo("bacon"));
+
+ TermsFacet notFilteredFacet = facets.facet("not_filtered");
+ assertThat(notFilteredFacet.getEntries().size(), Matchers.equalTo(2));
+ assertThat(notFilteredFacet.getEntries().get(0).getTerm().string(), Matchers.isOneOf("bacon", "bastards"));
+ assertThat(notFilteredFacet.getEntries().get(1).getTerm().string(), Matchers.isOneOf("bacon", "bastards"));
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java b/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java
new file mode 100644
index 0000000..91dc5ef
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.fielddata.AtomicFieldData.WithOrdinals;
+import org.elasticsearch.index.fielddata.ScriptDocValues.Strings;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+import org.elasticsearch.index.merge.Merges;
+import org.junit.Test;
+
+import java.util.Random;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class FilterFieldDataTest extends AbstractFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Test
+ public void testFilterByFrequency() throws Exception {
+ Random random = getRandom();
+ for (int i = 0; i < 1000; i++) {
+ Document d = new Document();
+ d.add(new StringField("id", "" + i, Field.Store.NO));
+ if (i % 100 == 0) {
+ d.add(new StringField("high_freq", "100", Field.Store.NO));
+ d.add(new StringField("low_freq", "100", Field.Store.NO));
+ d.add(new StringField("med_freq", "100", Field.Store.NO));
+ }
+ if (i % 10 == 0) {
+ d.add(new StringField("high_freq", "10", Field.Store.NO));
+ d.add(new StringField("med_freq", "10", Field.Store.NO));
+ }
+ if (i % 5 == 0) {
+ d.add(new StringField("high_freq", "5", Field.Store.NO));
+ }
+ writer.addDocument(d);
+ }
+ Merges.forceMerge(writer, 1);
+ AtomicReaderContext context = refreshReader();
+ String[] formats = new String[] { "fst", "paged_bytes"};
+
+ for (String format : formats) {
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 100).put("filter.frequency.min", 0.0d).put("filter.frequency.max", random.nextBoolean() ? 100 : 0.5d));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(2L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.getValueByOrd(2).utf8ToString(), equalTo("100"));
+ }
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 100).put("filter.frequency.min", random.nextBoolean() ? 101 : 101d/200.0d).put("filter.frequency.max", 201));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(1L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("5"));
+ }
+
+ {
+ ifdService.clear(); // test # docs with value
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 101).put("filter.frequency.min", random.nextBoolean() ? 101 : 101d/200.0d));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "med_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(2L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.getValueByOrd(2).utf8ToString(), equalTo("100"));
+ }
+
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 101).put("filter.frequency.min", random.nextBoolean() ? 101 : 101d/200.0d));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "med_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(2L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.getValueByOrd(2).utf8ToString(), equalTo("100"));
+ }
+
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.regex.pattern", "\\d{2,3}") // allows 10 & 100
+ .put("filter.frequency.min_segment_size", 0)
+ .put("filter.frequency.min", random.nextBoolean() ? 1 : 1d/200.0d) // 100, 10, 5
+ .put("filter.frequency.max", random.nextBoolean() ? 99 : 99d/200.0d)); // 100
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(1L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("100"));
+ }
+ }
+
+ }
+
+ @Test
+ public void testFilterByRegExp() throws Exception {
+
+ int hundred = 0;
+ int ten = 0;
+ int five = 0;
+ for (int i = 0; i < 1000; i++) {
+ Document d = new Document();
+ d.add(new StringField("id", "" + i, Field.Store.NO));
+ if (i % 100 == 0) {
+ hundred++;
+ d.add(new StringField("high_freq", "100", Field.Store.NO));
+ }
+ if (i % 10 == 0) {
+ ten++;
+ d.add(new StringField("high_freq", "10", Field.Store.NO));
+ }
+ if (i % 5 == 0) {
+ five++;
+ d.add(new StringField("high_freq", "5", Field.Store.NO));
+
+ }
+ writer.addDocument(d);
+ }
+ System.out.println(hundred + " " + ten + " " + five);
+ Merges.forceMerge(writer, 1);
+ AtomicReaderContext context = refreshReader();
+ String[] formats = new String[] { "fst", "paged_bytes"};
+ for (String format : formats) {
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.regex.pattern", "\\d"));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(1L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("5"));
+ }
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.regex.pattern", "\\d{1,2}"));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(2L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.getValueByOrd(2).utf8ToString(), equalTo("5"));
+ }
+ }
+
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java
new file mode 100644
index 0000000..a91797d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.Term;
+
+/**
+ */
+public class FloatFieldDataTests extends AbstractNumericFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("float");
+ }
+
+ protected String one() {
+ return "1.0";
+ }
+
+ protected String two() {
+ return "2.0";
+ }
+
+ protected String three() {
+ return "3.0";
+ }
+
+ protected String four() {
+ return "4.0";
+ }
+
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new FloatField("value", 4.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new FloatField("value", 1.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ d.add(new FloatField("value", 4.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new FloatField("value", 1.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ d.add(new FloatField("value", 4.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2, Field.Store.NO));
+ d.add(new FloatField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new FloatField("value", 4, Field.Store.NO));
+ d.add(new FloatField("value", 5, Field.Store.NO));
+ d.add(new FloatField("value", 6, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new FloatField("value", 6, Field.Store.NO));
+ d.add(new FloatField("value", 7, Field.Store.NO));
+ d.add(new FloatField("value", 8, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new FloatField("value", 8, Field.Store.NO));
+ d.add(new FloatField("value", 9, Field.Store.NO));
+ d.add(new FloatField("value", 10, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new FloatField("value", -8, Field.Store.NO));
+ d.add(new FloatField("value", -9, Field.Store.NO));
+ d.add(new FloatField("value", -10, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java
new file mode 100644
index 0000000..2f85936
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.plain.*;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.MapperBuilders;
+import org.elasticsearch.index.mapper.core.*;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+public class IndexFieldDataServiceTests extends ElasticsearchTestCase {
+
+ private static Settings DOC_VALUES_SETTINGS = ImmutableSettings.builder().put(FieldDataType.FORMAT_KEY, FieldDataType.DOC_VALUES_FORMAT_VALUE).build();
+
+ @SuppressWarnings("unchecked")
+ public void testGetForFieldDefaults() {
+ final IndexFieldDataService ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ for (boolean docValues : Arrays.asList(true, false)) {
+ final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
+ final StringFieldMapper stringMapper = new StringFieldMapper.Builder("string").tokenized(false).fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx);
+ ifdService.clear();
+ IndexFieldData<?> fd = ifdService.getForField(stringMapper);
+ if (docValues) {
+ assertTrue(fd instanceof SortedSetDVBytesIndexFieldData);
+ } else {
+ assertTrue(fd instanceof PagedBytesIndexFieldData);
+ }
+
+ for (FieldMapper<?> mapper : Arrays.asList(
+ new ByteFieldMapper.Builder("int").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx),
+ new ShortFieldMapper.Builder("int").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx),
+ new IntegerFieldMapper.Builder("int").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx),
+ new LongFieldMapper.Builder("long").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx)
+ )) {
+ ifdService.clear();
+ fd = ifdService.getForField(mapper);
+ if (docValues) {
+ assertTrue(fd instanceof BinaryDVNumericIndexFieldData);
+ } else {
+ assertTrue(fd instanceof PackedArrayIndexFieldData);
+ }
+ }
+
+ final FloatFieldMapper floatMapper = new FloatFieldMapper.Builder("float").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(floatMapper);
+ if (docValues) {
+ assertTrue(fd instanceof BinaryDVNumericIndexFieldData);
+ } else {
+ assertTrue(fd instanceof FloatArrayIndexFieldData);
+ }
+
+ final DoubleFieldMapper doubleMapper = new DoubleFieldMapper.Builder("double").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(doubleMapper);
+ if (docValues) {
+ assertTrue(fd instanceof BinaryDVNumericIndexFieldData);
+ } else {
+ assertTrue(fd instanceof DoubleArrayIndexFieldData);
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public void testByPassDocValues() {
+ final IndexFieldDataService ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
+ final StringFieldMapper stringMapper = MapperBuilders.stringField("string").tokenized(false).fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(ImmutableSettings.builder().put("format", "fst").build()).build(ctx);
+ ifdService.clear();
+ IndexFieldData<?> fd = ifdService.getForField(stringMapper);
+ assertTrue(fd instanceof FSTBytesIndexFieldData);
+
+ final Settings fdSettings = ImmutableSettings.builder().put("format", "array").build();
+ for (FieldMapper<?> mapper : Arrays.asList(
+ new ByteFieldMapper.Builder("int").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx),
+ new ShortFieldMapper.Builder("int").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx),
+ new IntegerFieldMapper.Builder("int").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx),
+ new LongFieldMapper.Builder("long").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx)
+ )) {
+ ifdService.clear();
+ fd = ifdService.getForField(mapper);
+ assertTrue(fd instanceof PackedArrayIndexFieldData);
+ }
+
+ final FloatFieldMapper floatMapper = MapperBuilders.floatField("float").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(floatMapper);
+ assertTrue(fd instanceof FloatArrayIndexFieldData);
+
+ final DoubleFieldMapper doubleMapper = MapperBuilders.doubleField("double").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(doubleMapper);
+ assertTrue(fd instanceof DoubleArrayIndexFieldData);
+ }
+
+ public void testChangeFieldDataFormat() throws Exception {
+ final IndexFieldDataService ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
+ final StringFieldMapper mapper1 = MapperBuilders.stringField("s").tokenized(false).fieldDataSettings(ImmutableSettings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx);
+ final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer()));
+ Document doc = new Document();
+ doc.add(new StringField("s", "thisisastring", Store.NO));
+ writer.addDocument(doc);
+ final IndexReader reader1 = DirectoryReader.open(writer, true);
+ IndexFieldData<?> ifd = ifdService.getForField(mapper1);
+ assertThat(ifd, instanceOf(PagedBytesIndexFieldData.class));
+ Set<AtomicReader> oldSegments = Collections.newSetFromMap(new IdentityHashMap<AtomicReader, Boolean>());
+ for (AtomicReaderContext arc : reader1.leaves()) {
+ oldSegments.add(arc.reader());
+ AtomicFieldData<?> afd = ifd.load(arc);
+ assertThat(afd, instanceOf(PagedBytesAtomicFieldData.class));
+ }
+ // write new segment
+ writer.addDocument(doc);
+ final IndexReader reader2 = DirectoryReader.open(writer, true);
+ final StringFieldMapper mapper2 = MapperBuilders.stringField("s").tokenized(false).fieldDataSettings(ImmutableSettings.builder().put(FieldDataType.FORMAT_KEY, "fst").build()).build(ctx);
+ ifdService.onMappingUpdate();
+ ifd = ifdService.getForField(mapper2);
+ assertThat(ifd, instanceOf(FSTBytesIndexFieldData.class));
+ for (AtomicReaderContext arc : reader2.leaves()) {
+ AtomicFieldData<?> afd = ifd.load(arc);
+ if (oldSegments.contains(arc.reader())) {
+ assertThat(afd, instanceOf(PagedBytesAtomicFieldData.class));
+ } else {
+ assertThat(afd, instanceOf(FSTBytesAtomicFieldData.class));
+ }
+ }
+ reader1.close();
+ reader2.close();
+ writer.close();
+ writer.getDirectory().close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java
new file mode 100644
index 0000000..288cfea
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java
@@ -0,0 +1,416 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.hppc.DoubleOpenHashSet;
+import com.carrotsearch.hppc.LongOpenHashSet;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.Term;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.fielddata.plain.PackedArrayAtomicFieldData;
+import org.elasticsearch.index.merge.Merges;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Tests for all integer types (byte, short, int, long).
+ */
+public class LongFieldDataTests extends AbstractNumericFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ // we don't want to optimize the type so it will always be a long...
+ return new FieldDataType("long", ImmutableSettings.builder());
+ }
+
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Test
+ public void testOptimizeTypeLong() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", Integer.MAX_VALUE + 1l, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", Integer.MIN_VALUE - 1l, Field.Store.NO));
+ writer.addDocument(d);
+
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData, instanceOf(PackedArrayAtomicFieldData.class));
+ assertThat(getFirst(fieldData.getLongValues(), 0), equalTo((long) Integer.MAX_VALUE + 1l));
+ assertThat(getFirst(fieldData.getLongValues(), 1), equalTo((long) Integer.MIN_VALUE - 1l));
+ }
+
+ private static long getFirst(LongValues values, int docId) {
+ final int numValues = values.setDocument(docId);
+ assertThat(numValues , is(1));
+ return values.nextValue();
+ }
+
+ private static double getFirst(DoubleValues values, int docId) {
+ final int numValues = values.setDocument(docId);
+ assertThat(numValues , is(1));
+ return values.nextValue();
+ }
+
+ @Test
+ public void testDateScripts() throws Exception {
+ fillSingleValueAllSet();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ ScriptDocValues.Longs scriptValues = (ScriptDocValues.Longs) fieldData.getScriptValues();
+ scriptValues.setNextDocId(0);
+ assertThat(scriptValues.getValue(), equalTo(2l));
+ assertThat(scriptValues.getDate().getMillis(), equalTo(2l));
+ assertThat(scriptValues.getDate().getZone(), equalTo(DateTimeZone.UTC));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", 1, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", 1, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ d.add(new LongField("value", 5, Field.Store.NO));
+ d.add(new LongField("value", 6, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new LongField("value", 6, Field.Store.NO));
+ d.add(new LongField("value", 7, Field.Store.NO));
+ d.add(new LongField("value", 8, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new LongField("value", 8, Field.Store.NO));
+ d.add(new LongField("value", 9, Field.Store.NO));
+ d.add(new LongField("value", 10, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new LongField("value", -8, Field.Store.NO));
+ d.add(new LongField("value", -9, Field.Store.NO));
+ d.add(new LongField("value", -10, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ private static final int SECONDS_PER_YEAR = 60 * 60 * 24 * 365;
+
+ // TODO: use random() when migrating to Junit
+ public static enum Data {
+ SINGLE_VALUED_DENSE_ENUM {
+ public int numValues(Random r) {
+ return 1;
+ }
+ @Override
+ public long nextValue(Random r) {
+ return 1 + r.nextInt(16);
+ }
+ },
+ SINGLE_VALUED_DENSE_DATE {
+ public int numValues(Random r) {
+ return 1;
+ }
+ @Override
+ public long nextValue(Random r) {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + r.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_DATE {
+ public int numValues(Random r) {
+ return r.nextInt(3);
+ }
+ @Override
+ public long nextValue(Random r) {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + r.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_ENUM {
+ public int numValues(Random r) {
+ return r.nextInt(3);
+ }
+ @Override
+ public long nextValue(Random r) {
+ return 3 + r.nextInt(8);
+ }
+ },
+ SINGLE_VALUED_SPARSE_RANDOM {
+ public int numValues(Random r) {
+ return r.nextFloat() < 0.1f ? 1 : 0;
+ }
+ @Override
+ public long nextValue(Random r) {
+ return r.nextLong();
+ }
+ },
+ MULTI_VALUED_SPARSE_RANDOM {
+ public int numValues(Random r) {
+ return r.nextFloat() < 0.1f ? 1 + r.nextInt(5) : 0;
+ }
+ @Override
+ public long nextValue(Random r) {
+ return r.nextLong();
+ }
+ },
+ MULTI_VALUED_DENSE_RANDOM {
+ public int numValues(Random r) {
+ return 1 + r.nextInt(3);
+ }
+ @Override
+ public long nextValue(Random r) {
+ return r.nextLong();
+ }
+ };
+ public abstract int numValues(Random r);
+ public abstract long nextValue(Random r);
+ }
+
+ private void test(List<LongOpenHashSet> values) throws Exception {
+ StringField id = new StringField("_id", "", Field.Store.NO);
+
+ for (int i = 0; i < values.size(); ++i) {
+ Document doc = new Document();
+ id.setStringValue("" + i);
+ doc.add(id);
+ final LongOpenHashSet v = values.get(i);
+ final boolean[] states = v.allocated;
+ final long[] keys = v.keys;
+
+ for (int j = 0; j < states.length; j++) {
+ if (states[j]) {
+ LongField value = new LongField("value", keys[j], Field.Store.NO);
+ doc.add(value);
+ }
+ }
+ writer.addDocument(doc);
+ }
+ Merges.forceMerge(writer, 1);
+
+ final IndexNumericFieldData indexFieldData = getForField("value");
+ final AtomicNumericFieldData atomicFieldData = indexFieldData.load(refreshReader());
+ final LongValues data = atomicFieldData.getLongValues();
+ final DoubleValues doubleData = atomicFieldData.getDoubleValues();
+ final LongOpenHashSet set = new LongOpenHashSet();
+ final DoubleOpenHashSet doubleSet = new DoubleOpenHashSet();
+ for (int i = 0; i < values.size(); ++i) {
+ final LongOpenHashSet v = values.get(i);
+
+ assertThat(data.setDocument(i) > 0, equalTo(!v.isEmpty()));
+ assertThat(doubleData.setDocument(i) > 0, equalTo(!v.isEmpty()));
+
+ set.clear();
+ int numValues = data.setDocument(i);
+ for (int j = 0; j < numValues; j++) {
+ set.add(data.nextValue());
+ }
+ assertThat(set, equalTo(v));
+
+ final DoubleOpenHashSet doubleV = new DoubleOpenHashSet();
+ final boolean[] states = v.allocated;
+ final long[] keys = v.keys;
+ for (int j = 0; j < states.length; j++) {
+ if (states[j]) {
+ doubleV.add((double) keys[j]);
+ }
+ }
+ doubleSet.clear();
+ numValues = doubleData.setDocument(i);
+ double prev = 0;
+ for (int j = 0; j < numValues; j++) {
+ double current;
+ doubleSet.add(current = doubleData.nextValue());
+ if (j > 0) {
+ assertThat(prev, lessThan(current));
+ }
+ prev = current;
+ }
+ assertThat(doubleSet, equalTo(doubleV));
+ }
+ }
+
+ private void test(Data data) throws Exception {
+ Random r = getRandom();
+ final int numDocs = 1000 + r.nextInt(19000);
+ final List<LongOpenHashSet> values = new ArrayList<LongOpenHashSet>(numDocs);
+ for (int i = 0; i < numDocs; ++i) {
+ final int numValues = data.numValues(r);
+ final LongOpenHashSet vals = new LongOpenHashSet(numValues);
+ for (int j = 0; j < numValues; ++j) {
+ vals.add(data.nextValue(r));
+ }
+ values.add(vals);
+ }
+ test(values);
+ }
+
+ public void testSingleValuedDenseEnum() throws Exception {
+ test(Data.SINGLE_VALUED_DENSE_ENUM);
+ }
+
+ public void testSingleValuedDenseDate() throws Exception {
+ test(Data.SINGLE_VALUED_DENSE_DATE);
+ }
+
+ public void testSingleValuedSparseRandom() throws Exception {
+ test(Data.SINGLE_VALUED_SPARSE_RANDOM);
+ }
+
+ public void testMultiValuedDate() throws Exception {
+ test(Data.MULTI_VALUED_DATE);
+ }
+
+ public void testMultiValuedEnum() throws Exception {
+ test(Data.MULTI_VALUED_ENUM);
+ }
+
+ public void testMultiValuedSparseRandom() throws Exception {
+ test(Data.MULTI_VALUED_SPARSE_RANDOM);
+ }
+
+ public void testMultiValuedDenseRandom() throws Exception {
+ test(Data.MULTI_VALUED_DENSE_RANDOM);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java
new file mode 100644
index 0000000..0eca185
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+
+/** Returns an implementation based on paged bytes which doesn't implement WithOrdinals in order to visit different paths in the code,
+ * eg. BytesRefFieldComparatorSource makes decisions based on whether the field data implements WithOrdinals. */
+public class NoOrdinalsStringFieldDataTests extends PagedBytesStringFieldDataTests {
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public IndexFieldData<AtomicFieldData<ScriptDocValues>> getForField(String fieldName) {
+ final IndexFieldData<?> in = super.getForField(fieldName);
+ return new IndexFieldData<AtomicFieldData<ScriptDocValues>>() {
+
+ @Override
+ public Index index() {
+ return in.index();
+ }
+
+ @Override
+ public Names getFieldNames() {
+ return in.getFieldNames();
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ return in.valuesOrdered();
+ }
+
+ @Override
+ public AtomicFieldData<ScriptDocValues> load(AtomicReaderContext context) {
+ return in.load(context);
+ }
+
+ @Override
+ public AtomicFieldData<ScriptDocValues> loadDirect(AtomicReaderContext context) throws Exception {
+ return in.loadDirect(context);
+ }
+
+ @Override
+ public XFieldComparatorSource comparatorSource(Object missingValue, SortMode sortMode) {
+ return new BytesRefFieldComparatorSource(this, missingValue, sortMode);
+ }
+
+ @Override
+ public void clear() {
+ in.clear();
+ }
+
+ @Override
+ public void clear(IndexReader reader) {
+ in.clear(reader);
+ }
+
+ };
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java
new file mode 100644
index 0000000..7496ce1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+
+/**
+ */
+public class PagedBytesStringFieldDataTests extends AbstractStringFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java b/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java
new file mode 100644
index 0000000..eb71564
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java
@@ -0,0 +1,289 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.util.LongsRef;
+import org.apache.lucene.util.packed.PackedInts;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class MultiOrdinalsTests extends ElasticsearchTestCase {
+
+ protected final Ordinals creationMultiOrdinals(OrdinalsBuilder builder) {
+ return this.creationMultiOrdinals(builder, ImmutableSettings.builder());
+ }
+
+
+ protected Ordinals creationMultiOrdinals(OrdinalsBuilder builder, ImmutableSettings.Builder settings) {
+ return builder.build(settings.build());
+ }
+
+
+ @Test
+ public void testRandomValues() throws IOException {
+ Random random = getRandom();
+ int numDocs = 100 + random.nextInt(1000);
+ int numOrdinals = 1 + random.nextInt(200);
+ int numValues = 100 + random.nextInt(100000);
+ OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
+ Set<OrdAndId> ordsAndIdSet = new HashSet<OrdAndId>();
+ for (int i = 0; i < numValues; i++) {
+ ordsAndIdSet.add(new OrdAndId(1 + random.nextInt(numOrdinals), random.nextInt(numDocs)));
+ }
+ List<OrdAndId> ordsAndIds = new ArrayList<OrdAndId>(ordsAndIdSet);
+ Collections.sort(ordsAndIds, new Comparator<OrdAndId>() {
+
+ @Override
+ public int compare(OrdAndId o1, OrdAndId o2) {
+ if (o1.ord < o2.ord) {
+ return -1;
+ }
+ if (o1.ord == o2.ord) {
+ if (o1.id < o2.id) {
+ return -1;
+ }
+ if (o1.id > o2.id) {
+ return 1;
+ }
+ return 0;
+ }
+ return 1;
+ }
+ });
+ long lastOrd = -1;
+ for (OrdAndId ordAndId : ordsAndIds) {
+ if (lastOrd != ordAndId.ord) {
+ lastOrd = ordAndId.ord;
+ builder.nextOrdinal();
+ }
+ ordAndId.ord = builder.currentOrdinal(); // remap the ordinals in case we have gaps?
+ builder.addDoc(ordAndId.id);
+ }
+
+ Collections.sort(ordsAndIds, new Comparator<OrdAndId>() {
+
+ @Override
+ public int compare(OrdAndId o1, OrdAndId o2) {
+ if (o1.id < o2.id) {
+ return -1;
+ }
+ if (o1.id == o2.id) {
+ if (o1.ord < o2.ord) {
+ return -1;
+ }
+ if (o1.ord > o2.ord) {
+ return 1;
+ }
+ return 0;
+ }
+ return 1;
+ }
+ });
+ Ordinals ords = creationMultiOrdinals(builder);
+ Ordinals.Docs docs = ords.ordinals();
+ int docId = ordsAndIds.get(0).id;
+ List<Long> docOrds = new ArrayList<Long>();
+ for (OrdAndId ordAndId : ordsAndIds) {
+ if (docId == ordAndId.id) {
+ docOrds.add(ordAndId.ord);
+ } else {
+ if (!docOrds.isEmpty()) {
+ assertThat(docs.getOrd(docId), equalTo(docOrds.get(0)));
+ LongsRef ref = docs.getOrds(docId);
+ assertThat(ref.offset, equalTo(0));
+
+ for (int i = ref.offset; i < ref.length; i++) {
+ assertThat("index: " + i + " offset: " + ref.offset + " len: " + ref.length, ref.longs[i], equalTo(docOrds.get(i)));
+ }
+ final long[] array = new long[docOrds.size()];
+ for (int i = 0; i < array.length; i++) {
+ array[i] = docOrds.get(i);
+ }
+ assertIter(docs, docId, array);
+ }
+ for (int i = docId + 1; i < ordAndId.id; i++) {
+ assertThat(docs.getOrd(i), equalTo(0L));
+ }
+ docId = ordAndId.id;
+ docOrds.clear();
+ docOrds.add(ordAndId.ord);
+
+ }
+ }
+
+ }
+
+ public static class OrdAndId {
+ long ord;
+ final int id;
+
+ public OrdAndId(long ord, int id) {
+ this.ord = ord;
+ this.id = id;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + id;
+ result = prime * result + (int) ord;
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ OrdAndId other = (OrdAndId) obj;
+ if (id != other.id) {
+ return false;
+ }
+ if (ord != other.ord) {
+ return false;
+ }
+ return true;
+ }
+ }
+
+ @Test
+ public void testOrdinals() throws Exception {
+ int maxDoc = 7;
+ long maxOrds = 32;
+ OrdinalsBuilder builder = new OrdinalsBuilder(maxDoc);
+ builder.nextOrdinal(); // 1
+ builder.addDoc(1).addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 2
+ builder.addDoc(0).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 3
+ builder.addDoc(2).addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 4
+ builder.addDoc(0).addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 5
+ builder.addDoc(4).addDoc(5).addDoc(6);
+ long ord = builder.nextOrdinal(); // 6
+ builder.addDoc(4).addDoc(5).addDoc(6);
+ for (long i = ord; i < maxOrds; i++) {
+ builder.nextOrdinal();
+ builder.addDoc(5).addDoc(6);
+ }
+
+ long[][] ordinalPlan = new long[][]{
+ {2, 4},
+ {1},
+ {3},
+ {},
+ {1, 3, 4, 5, 6},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
+ };
+
+ Ordinals ordinals = creationMultiOrdinals(builder);
+ Ordinals.Docs docs = ordinals.ordinals();
+ assertEquals(docs, ordinalPlan);
+ }
+
+ protected static void assertIter(Ordinals.Docs docs, int docId, long... expectedOrdinals) {
+ assertThat(docs.setDocument(docId), equalTo(expectedOrdinals.length));
+ for (long expectedOrdinal : expectedOrdinals) {
+ assertThat(docs.nextOrd(), equalTo(expectedOrdinal));
+ }
+ }
+
+ @Test
+ public void testMultiValuesDocsWithOverlappingStorageArrays() throws Exception {
+ int maxDoc = 7;
+ long maxOrds = 15;
+ OrdinalsBuilder builder = new OrdinalsBuilder(maxDoc);
+ for (int i = 0; i < maxOrds; i++) {
+ builder.nextOrdinal();
+ if (i < 10) {
+ builder.addDoc(0);
+ }
+ builder.addDoc(1);
+ if (i == 0) {
+ builder.addDoc(2);
+ }
+ if (i < 5) {
+ builder.addDoc(3);
+
+ }
+ if (i < 6) {
+ builder.addDoc(4);
+
+ }
+ if (i == 1) {
+ builder.addDoc(5);
+ }
+ if (i < 10) {
+ builder.addDoc(6);
+ }
+ }
+
+ long[][] ordinalPlan = new long[][]{
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {1},
+ {1, 2, 3, 4, 5},
+ {1, 2, 3, 4, 5, 6},
+ {2},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+ };
+
+ Ordinals ordinals = new MultiOrdinals(builder, PackedInts.FASTEST);
+ Ordinals.Docs docs = ordinals.ordinals();
+ assertEquals(docs, ordinalPlan);
+ }
+
+ private void assertEquals(Ordinals.Docs docs, long[][] ordinalPlan) {
+ long numOrds = 0;
+ for (int doc = 0; doc < ordinalPlan.length; ++doc) {
+ if (ordinalPlan[doc].length > 0) {
+ numOrds = Math.max(numOrds, ordinalPlan[doc][ordinalPlan[doc].length - 1]);
+ }
+ }
+ assertThat(docs.getNumDocs(), equalTo(ordinalPlan.length));
+ assertThat(docs.getNumOrds(), equalTo(numOrds)); // Includes null ord
+ assertThat(docs.getMaxOrd(), equalTo(numOrds + 1));
+ assertThat(docs.isMultiValued(), equalTo(true));
+ for (int doc = 0; doc < ordinalPlan.length; ++doc) {
+ LongsRef ref = docs.getOrds(doc);
+ assertThat(ref.offset, equalTo(0));
+ long[] ords = ordinalPlan[doc];
+ assertThat(ref, equalTo(new LongsRef(ords, 0, ords.length)));
+ assertIter(docs, doc, ords);
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java b/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java
new file mode 100644
index 0000000..2dea5cc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SingleOrdinalsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSvValues() throws IOException {
+ int numDocs = 1000000;
+ int numOrdinals = numDocs / 4;
+ Map<Integer, Long> controlDocToOrdinal = new HashMap<Integer, Long>();
+ OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
+ long ordinal = builder.nextOrdinal();
+ for (int doc = 0; doc < numDocs; doc++) {
+ if (doc % numOrdinals == 0) {
+ ordinal = builder.nextOrdinal();
+ }
+ controlDocToOrdinal.put(doc, ordinal);
+ builder.addDoc(doc);
+ }
+
+ Ordinals ords = builder.build(ImmutableSettings.EMPTY);
+ assertThat(ords, instanceOf(SinglePackedOrdinals.class));
+ Ordinals.Docs docs = ords.ordinals();
+
+ assertThat(controlDocToOrdinal.size(), equalTo(docs.getNumDocs()));
+ for (Map.Entry<Integer, Long> entry : controlDocToOrdinal.entrySet()) {
+ assertThat(entry.getValue(), equalTo(docs.getOrd(entry.getKey())));
+ }
+
+ }
+
+ @Test
+ public void testMvOrdinalsTrigger() throws IOException {
+ int numDocs = 1000000;
+ OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
+ builder.nextOrdinal();
+ for (int doc = 0; doc < numDocs; doc++) {
+ builder.addDoc(doc);
+ }
+
+ Ordinals ords = builder.build(ImmutableSettings.EMPTY);
+ assertThat(ords, instanceOf(SinglePackedOrdinals.class));
+
+ builder.nextOrdinal();
+ builder.addDoc(0);
+ ords = builder.build(ImmutableSettings.EMPTY);
+ assertThat(ords, not(instanceOf(SinglePackedOrdinals.class)));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java b/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java
new file mode 100644
index 0000000..f3182fa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class CommitPointsTests extends ElasticsearchTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(CommitPointsTests.class);
+
+ @Test
+ public void testCommitPointXContent() throws Exception {
+ ArrayList<CommitPoint.FileInfo> indexFiles = Lists.newArrayList();
+ indexFiles.add(new CommitPoint.FileInfo("file1", "file1_p", 100, "ck1"));
+ indexFiles.add(new CommitPoint.FileInfo("file2", "file2_p", 200, "ck2"));
+
+ ArrayList<CommitPoint.FileInfo> translogFiles = Lists.newArrayList();
+ translogFiles.add(new CommitPoint.FileInfo("t_file1", "t_file1_p", 100, null));
+ translogFiles.add(new CommitPoint.FileInfo("t_file2", "t_file2_p", 200, null));
+
+ CommitPoint commitPoint = new CommitPoint(1, "test", CommitPoint.Type.GENERATED, indexFiles, translogFiles);
+
+ byte[] serialized = CommitPoints.toXContent(commitPoint);
+ logger.info("serialized commit_point {}", new String(serialized, Charsets.UTF_8));
+
+ CommitPoint desCp = CommitPoints.fromXContent(serialized);
+ assertThat(desCp.version(), equalTo(commitPoint.version()));
+ assertThat(desCp.name(), equalTo(commitPoint.name()));
+
+ assertThat(desCp.indexFiles().size(), equalTo(commitPoint.indexFiles().size()));
+ for (int i = 0; i < desCp.indexFiles().size(); i++) {
+ assertThat(desCp.indexFiles().get(i).name(), equalTo(commitPoint.indexFiles().get(i).name()));
+ assertThat(desCp.indexFiles().get(i).physicalName(), equalTo(commitPoint.indexFiles().get(i).physicalName()));
+ assertThat(desCp.indexFiles().get(i).length(), equalTo(commitPoint.indexFiles().get(i).length()));
+ assertThat(desCp.indexFiles().get(i).checksum(), equalTo(commitPoint.indexFiles().get(i).checksum()));
+ }
+
+ assertThat(desCp.translogFiles().size(), equalTo(commitPoint.translogFiles().size()));
+ for (int i = 0; i < desCp.indexFiles().size(); i++) {
+ assertThat(desCp.translogFiles().get(i).name(), equalTo(commitPoint.translogFiles().get(i).name()));
+ assertThat(desCp.translogFiles().get(i).physicalName(), equalTo(commitPoint.translogFiles().get(i).physicalName()));
+ assertThat(desCp.translogFiles().get(i).length(), equalTo(commitPoint.translogFiles().get(i).length()));
+ assertThat(desCp.translogFiles().get(i).checksum(), nullValue());
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/MapperTestUtils.java b/src/test/java/org/elasticsearch/index/mapper/MapperTestUtils.java
new file mode 100644
index 0000000..5fb90e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/MapperTestUtils.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityLookupService;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+
+/**
+ *
+ */
+public class MapperTestUtils {
+
+ public static DocumentMapperParser newParser() {
+ return new DocumentMapperParser(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS, newAnalysisService(), new PostingsFormatService(new Index("test")),
+ new DocValuesFormatService(new Index("test")), newSimilarityLookupService());
+ }
+
+ public static DocumentMapperParser newParser(Settings indexSettings) {
+ return new DocumentMapperParser(new Index("test"), indexSettings, newAnalysisService(indexSettings), new PostingsFormatService(new Index("test")),
+ new DocValuesFormatService(new Index("test")), newSimilarityLookupService());
+ }
+
+ public static MapperService newMapperService() {
+ return newMapperService(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public static MapperService newMapperService(Index index, Settings indexSettings) {
+ return new MapperService(index, indexSettings, new Environment(), newAnalysisService(), new IndexFieldDataService(index, new DummyCircuitBreakerService()),
+ new PostingsFormatService(index), new DocValuesFormatService(index), newSimilarityLookupService());
+ }
+
+ public static AnalysisService newAnalysisService() {
+ return newAnalysisService(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public static AnalysisService newAnalysisService(Settings indexSettings) {
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(indexSettings), new EnvironmentModule(new Environment(ImmutableSettings.Builder.EMPTY_SETTINGS)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(new Index("test"), indexSettings),
+ new IndexNameModule(new Index("test")),
+ new AnalysisModule(indexSettings, parentInjector.getInstance(IndicesAnalysisService.class))).createChildInjector(parentInjector);
+
+ return injector.getInstance(AnalysisService.class);
+ }
+
+ public static SimilarityLookupService newSimilarityLookupService() {
+ return new SimilarityLookupService(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/UidTests.java b/src/test/java/org/elasticsearch/index/mapper/UidTests.java
new file mode 100644
index 0000000..0720447
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/UidTests.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class UidTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCreateAndSplitId() {
+ BytesRef createUid = Uid.createUidAsBytes("foo", "bar");
+ HashedBytesArray[] splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(createUid);
+ assertThat("foo", equalTo(splitUidIntoTypeAndId[0].toUtf8()));
+ assertThat("bar", equalTo(splitUidIntoTypeAndId[1].toUtf8()));
+ // split also with an offset
+ BytesRef ref = new BytesRef(createUid.length+10);
+ ref.offset = 9;
+ ref.length = createUid.length;
+ System.arraycopy(createUid.bytes, createUid.offset, ref.bytes, ref.offset, ref.length);
+ splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(ref);
+ assertThat("foo", equalTo(splitUidIntoTypeAndId[0].toUtf8()));
+ assertThat("bar", equalTo(splitUidIntoTypeAndId[1].toUtf8()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java
new file mode 100644
index 0000000..1f15caf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.all;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllField;
+import org.elasticsearch.common.lucene.all.AllTermQuery;
+import org.elasticsearch.common.lucene.all.AllTokenStream;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleAllMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleAllMappers() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ FieldMapper mapper = docMapper.mappers().smartNameFieldMapper("_all");
+ assertThat(field.fieldType().omitNorms(), equalTo(true));
+ assertThat(mapper.queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(AllTermQuery.class));
+ }
+
+ @Test
+ public void testAllMappersNoBoost() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/noboost-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ FieldMapper mapper = docMapper.mappers().smartNameFieldMapper("_all");
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ assertThat(mapper.queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(TermQuery.class));
+ }
+
+ @Test
+ public void testAllMappersTermQuery() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ FieldMapper mapper = docMapper.mappers().smartNameFieldMapper("_all");
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ assertThat(mapper.queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(TermQuery.class));
+
+ }
+
+
+ @Test
+ public void testSimpleAllMappersWithReparse() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+ // reparse it
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = builtDocMapper.parse(new BytesArray(json)).rootDoc();
+
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ assertThat(field.fieldType().omitNorms(), equalTo(true));
+ }
+
+ @Test
+ public void testSimpleAllMappersWithStore() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/store-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(2));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+
+ String text = field.stringValue();
+ assertThat(text, equalTo(allEntries.buildText()));
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ }
+
+ @Test
+ public void testSimpleAllMappersWithReparseWithStore() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/store-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+ // reparse it
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = builtDocMapper.parse(new BytesArray(json)).rootDoc();
+
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(2));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+
+ String text = field.stringValue();
+ assertThat(text, equalTo(allEntries.buildText()));
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ boolean omitNorms = false;
+ boolean stored = false;
+ boolean enabled = true;
+ boolean autoBoost = false;
+ boolean tv_stored = false;
+ boolean tv_payloads = false;
+ boolean tv_offsets = false;
+ boolean tv_positions = false;
+ String similarity = null;
+ boolean fieldData = false;
+ XContentBuilder mappingBuilder = jsonBuilder();
+ mappingBuilder.startObject().startObject("test");
+ List<Tuple<String, Boolean>> booleanOptionList = new ArrayList<Tuple<String, Boolean>>();
+ boolean allDefault = true;
+ if (frequently()) {
+ allDefault = false;
+ mappingBuilder.startObject("_all");
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("omit_norms", omitNorms = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store", stored = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store_term_vectors", tv_stored = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("enabled", enabled = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("auto_boost", autoBoost = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store_term_vector_offsets", tv_offsets = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store_term_vector_positions", tv_positions = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store_term_vector_payloads", tv_payloads = randomBoolean()));
+ }
+ Collections.shuffle(booleanOptionList, getRandom());
+ for (Tuple<String, Boolean> option : booleanOptionList) {
+ mappingBuilder.field(option.v1(), option.v2().booleanValue());
+ }
+ tv_stored |= tv_positions || tv_payloads || tv_offsets;
+ if (randomBoolean()) {
+ mappingBuilder.field("similarity", similarity = randomBoolean() ? "BM25" : "TF/IDF");
+ }
+ if (randomBoolean()) {
+ fieldData = true;
+ mappingBuilder.startObject("fielddata");
+ mappingBuilder.field("foo", "bar");
+ mappingBuilder.endObject();
+ }
+ mappingBuilder.endObject();
+ }
+
+ String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8();
+ logger.info(mapping);
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+ // reparse it
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+
+ byte[] json = jsonBuilder().startObject()
+ .field("foo", "bar")
+ .field("_id", 1)
+ .field("foobar", "foobar")
+ .endObject().bytes().array();
+ Document doc = builtDocMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ if (enabled) {
+ assertThat(field.fieldType().omitNorms(), equalTo(omitNorms));
+ assertThat(field.fieldType().stored(), equalTo(stored));
+ assertThat(field.fieldType().storeTermVectorOffsets(), equalTo(tv_offsets));
+ assertThat(field.fieldType().storeTermVectorPayloads(), equalTo(tv_payloads));
+ assertThat(field.fieldType().storeTermVectorPositions(), equalTo(tv_positions));
+ assertThat(field.fieldType().storeTermVectors(), equalTo(tv_stored));
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(2));
+ assertThat(allEntries.fields().contains("foobar"), equalTo(true));
+ assertThat(allEntries.fields().contains("foo"), equalTo(true));
+ if (!stored) {
+ assertThat(field.stringValue(), nullValue());
+ }
+ String text = stored ? field.stringValue() : "bar foobar";
+ assertThat(text.trim(), equalTo(allEntries.buildText().trim()));
+ } else {
+ assertThat(field, nullValue());
+ }
+
+ Term term = new Term("foo", "bar");
+ Query query = builtDocMapper.allFieldMapper().queryStringTermQuery(term);
+ if (autoBoost) {
+ assertThat(query, equalTo((Query)new AllTermQuery(term)));
+ } else {
+ assertThat(query, equalTo((Query)new TermQuery(term)));
+ }
+ if (similarity == null || similarity.equals("TF/IDF")) {
+ assertThat(builtDocMapper.allFieldMapper().similarity(), nullValue());
+ } else {
+ assertThat(similarity, equalTo(builtDocMapper.allFieldMapper().similarity().name()));
+ }
+ assertThat(builtMapping.contains("fielddata"), is(fieldData));
+ if (allDefault) {
+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);
+ XContentBuilder b = new XContentBuilder(XContentType.JSON.xContent(), bytesStreamOutput);
+ XContentBuilder xContentBuilder = builtDocMapper.allFieldMapper().toXContent(b, ToXContent.EMPTY_PARAMS);
+ xContentBuilder.flush();
+ assertThat(bytesStreamOutput.size(), equalTo(0));
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/mapping.json b/src/test/java/org/elasticsearch/index/mapper/all/mapping.json
new file mode 100644
index 0000000..6c2120f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/mapping.json
@@ -0,0 +1,57 @@
+{
+ "person":{
+ "_all":{
+ "enabled":true,
+ "omit_norms":true
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed",
+ "boost":2.0
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes",
+ "index_name":"firstLocation"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json b/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json
new file mode 100644
index 0000000..e232192
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json
@@ -0,0 +1,56 @@
+{
+ "person":{
+ "_all":{
+ "enabled": true ,
+ "index_options" : "freqs"
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes",
+ "index_name":"firstLocation"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json b/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json
new file mode 100644
index 0000000..ecbf315
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json
@@ -0,0 +1,55 @@
+{
+ "person":{
+ "_all":{
+ "enabled":true
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes",
+ "index_name":"firstLocation"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json b/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json
new file mode 100644
index 0000000..7fc9283
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json
@@ -0,0 +1,56 @@
+{
+ "person":{
+ "_all":{
+ "enabled":true,
+ "store":"yes"
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed",
+ "boost":2.0
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes",
+ "index_name":"firstLocation"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string"
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/test1.json b/src/test/java/org/elasticsearch/index/mapper/all/test1.json
new file mode 100644
index 0000000..834400a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/test1.json
@@ -0,0 +1,18 @@
+{
+ "_boost":3.7,
+ "_id":"1",
+ "name":{
+ "first":"shay",
+ "last":"banon"
+ },
+ "address":{
+ "first":{
+ "location":"first location"
+ },
+ "last":{
+ "location":"last location"
+ }
+ },
+ "simple1":1,
+ "simple2":2
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/analyzer/AnalyzerMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/analyzer/AnalyzerMapperTests.java
new file mode 100644
index 0000000..9e63f65
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/analyzer/AnalyzerMapperTests.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.analyzer;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.analysis.FieldNameAnalyzer;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class AnalyzerMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAnalyzerMapping() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_analyzer").field("path", "field_analyzer").endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("field2").field("type", "string").field("analyzer", "simple").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ FieldNameAnalyzer analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+
+ // check that it serializes and de-serializes correctly
+
+ DocumentMapper reparsedMapper = MapperTestUtils.newParser().parse(documentMapper.mappingSource().string());
+
+ doc = reparsedMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+ }
+
+
+ @Test
+ public void testAnalyzerMappingExplicit() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_analyzer").field("path", "field_analyzer").endObject()
+ .startObject("properties")
+ .startObject("field_analyzer").field("type", "string").endObject()
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("field2").field("type", "string").field("analyzer", "simple").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ FieldNameAnalyzer analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+
+ // check that it serializes and de-serializes correctly
+
+ DocumentMapper reparsedMapper = MapperTestUtils.newParser().parse(documentMapper.mappingSource().string());
+
+ doc = reparsedMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+ }
+
+ @Test
+ public void testAnalyzerMappingNotIndexedNorStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_analyzer").field("path", "field_analyzer").endObject()
+ .startObject("properties")
+ .startObject("field_analyzer").field("type", "string").field("index", "no").field("store", "no").endObject()
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("field2").field("type", "string").field("analyzer", "simple").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ FieldNameAnalyzer analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+
+ // check that it serializes and de-serializes correctly
+
+ DocumentMapper reparsedMapper = MapperTestUtils.newParser().parse(documentMapper.mappingSource().string());
+
+ doc = reparsedMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java
new file mode 100644
index 0000000..38e2fa1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.binary;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ */
+public class BinaryMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultMapping() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "binary")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field");
+ assertThat(fieldMapper, instanceOf(BinaryFieldMapper.class));
+ assertThat(fieldMapper.fieldType().stored(), equalTo(false));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/boost/BoostMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/boost/BoostMappingTests.java
new file mode 100644
index 0000000..f009344
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/boost/BoostMappingTests.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.boost;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.BoostFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class BoostMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultMapping() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("_boost", 2.0f)
+ .field("field", "a")
+ .field("field", "b")
+ .endObject().bytes());
+
+ // one fo the same named field will have the proper boost, the others will have 1
+ IndexableField[] fields = doc.rootDoc().getFields("field");
+ assertThat(fields[0].boost(), equalTo(2.0f));
+ assertThat(fields[1].boost(), equalTo(1.0f));
+ }
+
+ @Test
+ public void testCustomName() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_boost").field("name", "custom_boost").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "a")
+ .field("_boost", 2.0f)
+ .endObject().bytes());
+ assertThat(doc.rootDoc().getField("field").boost(), equalTo(1.0f));
+
+ doc = mapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "a")
+ .field("custom_boost", 2.0f)
+ .endObject().bytes());
+ assertThat(doc.rootDoc().getField("field").boost(), equalTo(2.0f));
+ }
+
+ @Test
+ public void testDefaultValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.boostFieldMapper().fieldType().stored(), equalTo(BoostFieldMapper.Defaults.FIELD_TYPE.stored()));
+ assertThat(docMapper.boostFieldMapper().fieldType().indexed(), equalTo(BoostFieldMapper.Defaults.FIELD_TYPE.indexed()));
+ }
+
+ @Test
+ public void testSetValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_boost")
+ .field("store", "yes").field("index", "not_analyzed")
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.boostFieldMapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.boostFieldMapper().fieldType().indexed(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java
new file mode 100644
index 0000000..17f0920
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.boost;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class CustomBoostMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomBoostValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("s_field").field("type", "string").endObject()
+ .startObject("l_field").field("type", "long").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("i_field").field("type", "integer").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("sh_field").field("type", "short").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("b_field").field("type", "byte").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("d_field").field("type", "double").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("f_field").field("type", "float").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("date_field").field("type", "date").startObject("norms").field("enabled", true).endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("s_field").field("value", "s_value").field("boost", 2.0f).endObject()
+ .startObject("l_field").field("value", 1l).field("boost", 3.0f).endObject()
+ .startObject("i_field").field("value", 1).field("boost", 4.0f).endObject()
+ .startObject("sh_field").field("value", 1).field("boost", 5.0f).endObject()
+ .startObject("b_field").field("value", 1).field("boost", 6.0f).endObject()
+ .startObject("d_field").field("value", 1).field("boost", 7.0f).endObject()
+ .startObject("f_field").field("value", 1).field("boost", 8.0f).endObject()
+ .startObject("date_field").field("value", "20100101").field("boost", 9.0f).endObject()
+ .endObject().bytes());
+
+ assertThat(doc.rootDoc().getField("s_field").boost(), equalTo(2.0f));
+ assertThat(doc.rootDoc().getField("l_field").boost(), equalTo(3.0f));
+ assertThat(doc.rootDoc().getField("i_field").boost(), equalTo(4.0f));
+ assertThat(doc.rootDoc().getField("sh_field").boost(), equalTo(5.0f));
+ assertThat(doc.rootDoc().getField("b_field").boost(), equalTo(6.0f));
+ assertThat(doc.rootDoc().getField("d_field").boost(), equalTo(7.0f));
+ assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(8.0f));
+ assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(9.0f));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java b/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java
new file mode 100644
index 0000000..c949cbb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.boost;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+
+/**
+ */
+public class FieldLevelBoostTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testFieldLevelBoost() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("str_field").field("type", "string").endObject()
+ .startObject("int_field").field("type", "integer").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("byte_field").field("type", "byte").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("date_field").field("type", "date").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("double_field").field("type", "double").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("float_field").field("type", "float").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("long_field").field("type", "long").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("short_field").field("type", "short").startObject("norms").field("enabled", true).endObject().endObject()
+ .string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference json = XContentFactory.jsonBuilder().startObject().field("_id", "1")
+ .startObject("str_field").field("boost", 2.0).field("value", "some name").endObject()
+ .startObject("int_field").field("boost", 3.0).field("value", 10).endObject()
+ .startObject("byte_field").field("boost", 4.0).field("value", 20).endObject()
+ .startObject("date_field").field("boost", 5.0).field("value", "2012-01-10").endObject()
+ .startObject("double_field").field("boost", 6.0).field("value", 30.0).endObject()
+ .startObject("float_field").field("boost", 7.0).field("value", 40.0).endObject()
+ .startObject("long_field").field("boost", 8.0).field("value", 50).endObject()
+ .startObject("short_field").field("boost", 9.0).field("value", 60).endObject()
+ .bytes();
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("str_field");
+ assertThat((double) f.boost(), closeTo(2.0, 0.001));
+
+ f = doc.getField("int_field");
+ assertThat((double) f.boost(), closeTo(3.0, 0.001));
+
+ f = doc.getField("byte_field");
+ assertThat((double) f.boost(), closeTo(4.0, 0.001));
+
+ f = doc.getField("date_field");
+ assertThat((double) f.boost(), closeTo(5.0, 0.001));
+
+ f = doc.getField("double_field");
+ assertThat((double) f.boost(), closeTo(6.0, 0.001));
+
+ f = doc.getField("float_field");
+ assertThat((double) f.boost(), closeTo(7.0, 0.001));
+
+ f = doc.getField("long_field");
+ assertThat((double) f.boost(), closeTo(8.0, 0.001));
+
+ f = doc.getField("short_field");
+ assertThat((double) f.boost(), closeTo(9.0, 0.001));
+ }
+
+ @Test
+ public void testInvalidFieldLevelBoost() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("str_field").field("type", "string").endObject()
+ .startObject("int_field").field("type", "integer").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("byte_field").field("type", "byte").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("date_field").field("type", "date").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("double_field").field("type", "double").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("float_field").field("type", "float").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("long_field").field("type", "long").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("short_field").field("type", "short").startObject("norms").field("enabled", true).endObject().endObject()
+ .string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("str_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("int_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("byte_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("date_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("double_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("float_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("long_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("short_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java b/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java
new file mode 100644
index 0000000..c97a396
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.camelcase;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class CamelCaseFieldNameTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCamelCaseFieldNameStaysAsIs() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("thisIsCamelCase", "value1")
+ .endObject().bytes());
+
+ assertThat(documentMapper.mappers().indexName("thisIsCamelCase").isEmpty(), equalTo(false));
+ assertThat(documentMapper.mappers().indexName("this_is_camel_case"), nullValue());
+
+ documentMapper.refreshSource();
+ documentMapper = MapperTestUtils.newParser().parse(documentMapper.mappingSource().string());
+
+ assertThat(documentMapper.mappers().indexName("thisIsCamelCase").isEmpty(), equalTo(false));
+ assertThat(documentMapper.mappers().indexName("this_is_camel_case"), nullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java
new file mode 100644
index 0000000..683b8d4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.completion;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+
+public class CompletionFieldMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultConfiguration() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ FieldMapper fieldMapper = defaultMapper.mappers().name("completion").mapper();
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
+
+ CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
+ assertThat(completionFieldMapper.isStoringPayloads(), is(false));
+ }
+
+ @Test
+ public void testThatSerializationIncludesAllElements() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .field("index_analyzer", "simple")
+ .field("search_analyzer", "standard")
+ .field("payloads", true)
+ .field("preserve_separators", false)
+ .field("preserve_position_increments", true)
+ .field("max_input_length", 14)
+
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ FieldMapper fieldMapper = defaultMapper.mappers().name("completion").mapper();
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
+
+ CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
+ XContentBuilder builder = jsonBuilder().startObject();
+ completionFieldMapper.toXContent(builder, null).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ Map<String, Object> configMap = (Map<String, Object>) serializedMap.get("completion");
+ assertThat(configMap.get("index_analyzer").toString(), is("simple"));
+ assertThat(configMap.get("search_analyzer").toString(), is("standard"));
+ assertThat(Boolean.valueOf(configMap.get("payloads").toString()), is(true));
+ assertThat(Boolean.valueOf(configMap.get("preserve_separators").toString()), is(false));
+ assertThat(Boolean.valueOf(configMap.get("preserve_position_increments").toString()), is(true));
+ assertThat(Integer.valueOf(configMap.get("max_input_length").toString()), is(14));
+ }
+
+ @Test
+ public void testThatSerializationCombinesToOneAnalyzerFieldIfBothAreEqual() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .field("index_analyzer", "simple")
+ .field("search_analyzer", "simple")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ FieldMapper fieldMapper = defaultMapper.mappers().name("completion").mapper();
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
+
+ CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
+ XContentBuilder builder = jsonBuilder().startObject();
+ completionFieldMapper.toXContent(builder, null).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ Map<String, Object> configMap = (Map<String, Object>) serializedMap.get("completion");
+ assertThat(configMap.get("analyzer").toString(), is("simple"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java b/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java
new file mode 100644
index 0000000..c1825f3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.compound;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+public class CompoundTypesTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testStringType() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat((double) doc.rootDoc().getField("field1").boost(), closeTo(1.0d, 0.000001d));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("field1").field("value", "value1").field("boost", 2.0f).endObject()
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat((double) doc.rootDoc().getField("field1").boost(), closeTo(2.0d, 0.000001d));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat((double) doc.rootDoc().getField("field1").boost(), closeTo(1.0d, 0.000001d));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java
new file mode 100644
index 0000000..40fccf0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.copyto;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class CopyToMapperIntegrationTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testDynamicTemplateCopyTo() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("test-idx")
+ .addMapping("doc", createDynamicTemplateMapping())
+ );
+
+ int recordCount = between(1, 200);
+
+ for (int i = 0; i < recordCount * 2; i++) {
+ client().prepareIndex("test-idx", "doc", Integer.toString(i))
+ .setSource("test_field", "test " + i, "even", i % 2 == 0)
+ .get();
+ }
+ client().admin().indices().prepareRefresh("test-idx").execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("test-idx")
+ .setQuery(QueryBuilders.termQuery("even", true))
+ .addAggregation(AggregationBuilders.terms("test").field("test_field").size(recordCount * 2))
+ .addAggregation(AggregationBuilders.terms("test_raw").field("test_field_raw").size(recordCount * 2))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo((long) recordCount));
+
+ assertThat(((Terms) response.getAggregations().get("test")).getBuckets().size(), equalTo(recordCount + 1));
+ assertThat(((Terms) response.getAggregations().get("test_raw")).getBuckets().size(), equalTo(recordCount));
+
+ }
+
+
+ private XContentBuilder createDynamicTemplateMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .startArray("dynamic_templates")
+
+ .startObject().startObject("template_raw")
+ .field("match", "*_raw")
+ .field("match_mapping_type", "string")
+ .startObject("mapping").field("type", "string").field("index", "not_analyzed").endObject()
+ .endObject().endObject()
+
+ .startObject().startObject("template_all")
+ .field("match", "*")
+ .field("match_mapping_type", "string")
+ .startObject("mapping").field("type", "string").field("copy_to", "{name}_raw").endObject()
+ .endObject().endObject()
+
+ .endArray();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java
new file mode 100644
index 0000000..35fca85
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.copyto;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class CopyToMapperTests extends ElasticsearchTestCase {
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testCopyToFieldsParsing() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("copy_test")
+ .field("type", "string")
+ .array("copy_to", "another_field", "cyclic_test")
+ .endObject()
+
+ .startObject("another_field")
+ .field("type", "string")
+ .endObject()
+
+ .startObject("cyclic_test")
+ .field("type", "string")
+ .array("copy_to", "copy_test")
+ .endObject()
+
+ .startObject("int_to_str_test")
+ .field("type", "integer")
+ .array("copy_to", "another_field", "new_field")
+ .endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = docMapper.mappers().name("copy_test").mapper();
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+
+ // Check json serialization
+ StringFieldMapper stringFieldMapper = (StringFieldMapper) fieldMapper;
+ XContentBuilder builder = jsonBuilder().startObject();
+ stringFieldMapper.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ Map<String, Object> copyTestMap = (Map<String, Object>) serializedMap.get("copy_test");
+ assertThat(copyTestMap.get("type").toString(), is("string"));
+ List<String> copyToList = (List<String>) copyTestMap.get("copy_to");
+ assertThat(copyToList.size(), equalTo(2));
+ assertThat(copyToList.get(0).toString(), equalTo("another_field"));
+ assertThat(copyToList.get(1).toString(), equalTo("cyclic_test"));
+
+ // Check data parsing
+ BytesReference json = jsonBuilder().startObject()
+ .field("copy_test", "foo")
+ .field("cyclic_test", "bar")
+ .field("int_to_str_test", 42)
+ .endObject().bytes();
+
+ ParseContext.Document doc = docMapper.parse("type1", "1", json).rootDoc();
+ assertThat(doc.getFields("copy_test").length, equalTo(2));
+ assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo"));
+ assertThat(doc.getFields("copy_test")[1].stringValue(), equalTo("bar"));
+
+ assertThat(doc.getFields("another_field").length, equalTo(2));
+ assertThat(doc.getFields("another_field")[0].stringValue(), equalTo("foo"));
+ assertThat(doc.getFields("another_field")[1].stringValue(), equalTo("42"));
+
+ assertThat(doc.getFields("cyclic_test").length, equalTo(2));
+ assertThat(doc.getFields("cyclic_test")[0].stringValue(), equalTo("foo"));
+ assertThat(doc.getFields("cyclic_test")[1].stringValue(), equalTo("bar"));
+
+ assertThat(doc.getFields("int_to_str_test").length, equalTo(1));
+ assertThat(doc.getFields("int_to_str_test")[0].numericValue().intValue(), equalTo(42));
+
+ assertThat(doc.getFields("new_field").length, equalTo(1));
+ assertThat(doc.getFields("new_field")[0].numericValue().intValue(), equalTo(42));
+
+ fieldMapper = docMapper.mappers().name("new_field").mapper();
+ assertThat(fieldMapper, instanceOf(LongFieldMapper.class));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testCopyToFieldsInnerObjectParsing() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .field("copy_to", "very.inner.field")
+ .endObject()
+
+ .startObject("very")
+ .field("type", "object")
+ .startObject("properties")
+ .startObject("inner")
+ .field("type", "object")
+ .endObject()
+ .endObject()
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("copy_test", "foo")
+ .startObject("foo").startObject("bar").field("baz", "zoo").endObject().endObject()
+ .endObject().bytes();
+
+ ParseContext.Document doc = docMapper.parse("type1", "1", json).rootDoc();
+ assertThat(doc.getFields("copy_test").length, equalTo(1));
+ assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo"));
+
+ assertThat(doc.getFields("very.inner.field").length, equalTo(1));
+ assertThat(doc.getFields("very.inner.field")[0].stringValue(), equalTo("foo"));
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testCopyToFieldsNonExistingInnerObjectParsing() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .field("copy_to", "very.inner.field")
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("copy_test", "foo")
+ .endObject().bytes();
+
+ try {
+ docMapper.parse("type1", "1", json).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ assertThat(ex.getMessage(), startsWith("attempt to copy value to non-existing object"));
+ }
+ }
+
+ @Test
+ public void testCopyToFieldMerge() throws Exception {
+
+ String mappingBefore = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .array("copy_to", "foo", "bar")
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ String mappingAfter = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .array("copy_to", "baz", "bar")
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapperBefore = MapperTestUtils.newParser().parse(mappingBefore);
+
+ ImmutableList<String> fields = docMapperBefore.mappers().name("copy_test").mapper().copyTo().copyToFields();
+
+ assertThat(fields.size(), equalTo(2));
+ assertThat(fields.get(0), equalTo("foo"));
+ assertThat(fields.get(1), equalTo("bar"));
+
+
+ DocumentMapper docMapperAfter = MapperTestUtils.newParser().parse(mappingAfter);
+
+ DocumentMapper.MergeResult mergeResult = docMapperBefore.merge(docMapperAfter, mergeFlags().simulate(true));
+
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapperBefore.merge(docMapperAfter, mergeFlags().simulate(false));
+
+ fields = docMapperBefore.mappers().name("copy_test").mapper().copyTo().copyToFields();
+
+ assertThat(fields.size(), equalTo(2));
+ assertThat(fields.get(0), equalTo("baz"));
+ assertThat(fields.get(1), equalTo("bar"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java
new file mode 100644
index 0000000..bbfd82d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.TermsFacetBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrationTest {
+ @ParametersFactory
+ public static Iterable<Object[]> buildParameters() {
+ List<Object[]> parameters = new ArrayList<Object[]>();
+ for (boolean storeCountedFields : new boolean[] { true, false }) {
+ for (boolean loadCountedFields : new boolean[] { true, false }) {
+ parameters.add(new Object[] { storeCountedFields, loadCountedFields });
+ }
+ }
+ return parameters;
+ }
+
+ private final boolean storeCountedFields;
+ private final boolean loadCountedFields;
+
+ public TokenCountFieldMapperIntegrationTests(@Name("storeCountedFields") boolean storeCountedFields,
+ @Name("loadCountedFields") boolean loadCountedFields) {
+ this.storeCountedFields = storeCountedFields;
+ this.loadCountedFields = loadCountedFields;
+ }
+
+ /**
+ * It is possible to get the token count in a search response.
+ */
+ @Test
+ public void searchReturnsTokenCount() throws ElasticsearchException, IOException {
+ init();
+
+ assertSearchReturns(searchById("single"), "single");
+ assertSearchReturns(searchById("bulk1"), "bulk1");
+ assertSearchReturns(searchById("bulk2"), "bulk2");
+ assertSearchReturns(searchById("multi"), "multi");
+ assertSearchReturns(searchById("multibulk1"), "multibulk1");
+ assertSearchReturns(searchById("multibulk2"), "multibulk2");
+ }
+
+ /**
+ * It is possible to search by token count.
+ */
+ @Test
+ public void searchByTokenCount() throws ElasticsearchException, IOException {
+ init();
+
+ assertSearchReturns(searchByNumericRange(4, 4).get(), "single");
+ assertSearchReturns(searchByNumericRange(10, 10).get(), "multibulk2");
+ assertSearchReturns(searchByNumericRange(7, 10).get(), "multi", "multibulk1", "multibulk2");
+ assertSearchReturns(searchByNumericRange(1, 10).get(), "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2");
+ assertSearchReturns(searchByNumericRange(12, 12).get());
+ }
+
+ /**
+ * It is possible to search by token count.
+ */
+ @Test
+ public void facetByTokenCount() throws ElasticsearchException, IOException {
+ init();
+
+ String facetField = randomFrom(ImmutableList.of(
+ "foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values"));
+ SearchResponse result = searchByNumericRange(1, 10)
+ .addFacet(new TermsFacetBuilder("facet").field(facetField)).get();
+ assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2");
+ assertThat(result.getFacets().facets().size(), equalTo(1));
+ TermsFacet facet = (TermsFacet) result.getFacets().facets().get(0);
+ assertThat(facet.getEntries().size(), equalTo(9));
+ }
+
+ private void init() throws ElasticsearchException, IOException {
+ prepareCreate("test").addMapping("test", jsonBuilder().startObject()
+ .startObject("test")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("store", storeCountedFields)
+ .field("analyzer", "simple")
+ .endObject()
+ .startObject("token_count")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .field("store", true)
+ .endObject()
+ .startObject("token_count_unstored")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .endObject()
+ .startObject("token_count_with_doc_values")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .startObject("fielddata")
+ .field("format", LuceneTestCase.defaultCodecSupportsSortedSet() ? "doc_values" : null)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()).get();
+ ensureGreen();
+
+ assertTrue(prepareIndex("single", "I have four terms").get().isCreated());
+ BulkResponse bulk = client().prepareBulk()
+ .add(prepareIndex("bulk1", "bulk three terms"))
+ .add(prepareIndex("bulk2", "this has five bulk terms")).get();
+ assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
+ assertTrue(prepareIndex("multi", "two terms", "wow now I have seven lucky terms").get().isCreated());
+ bulk = client().prepareBulk()
+ .add(prepareIndex("multibulk1", "one", "oh wow now I have eight unlucky terms"))
+ .add(prepareIndex("multibulk2", "six is a bunch of terms", "ten! ten terms is just crazy! too many too count!")).get();
+ assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
+
+ assertThat(refresh().getFailedShards(), equalTo(0));
+ }
+
+ private IndexRequestBuilder prepareIndex(String id, String... texts) throws IOException {
+ return client().prepareIndex("test", "test", id).setSource("foo", texts);
+ }
+
+ private SearchResponse searchById(String id) {
+ return prepareSearch().setQuery(QueryBuilders.termQuery("_id", id)).get();
+ }
+
+ private SearchRequestBuilder searchByNumericRange(int low, int high) {
+ return prepareSearch().setQuery(QueryBuilders.rangeQuery(randomFrom(
+ ImmutableList.of("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")
+ )).gte(low).lte(high));
+ }
+
+ private SearchRequestBuilder prepareSearch() {
+ SearchRequestBuilder request = client().prepareSearch("test").setTypes("test");
+ request.addField("foo.token_count");
+ if (loadCountedFields) {
+ request.addField("foo");
+ }
+ return request;
+ }
+
+ private void assertSearchReturns(SearchResponse result, String... ids) {
+ assertThat(result.getHits().getTotalHits(), equalTo((long) ids.length));
+ assertThat(result.getHits().hits().length, equalTo(ids.length));
+ List<String> foundIds = new ArrayList<String>();
+ for (SearchHit hit : result.getHits()) {
+ foundIds.add(hit.id());
+ }
+ assertThat(foundIds, containsInAnyOrder(ids));
+ for (SearchHit hit : result.getHits()) {
+ String id = hit.id();
+ if (id.equals("single")) {
+ assertSearchHit(hit, 4);
+ } else if (id.equals("bulk1")) {
+ assertSearchHit(hit, 3);
+ } else if (id.equals("bulk2")) {
+ assertSearchHit(hit, 5);
+ } else if (id.equals("multi")) {
+ assertSearchHit(hit, 2, 7);
+ } else if (id.equals("multibulk1")) {
+ assertSearchHit(hit, 1, 8);
+ } else if (id.equals("multibulk2")) {
+ assertSearchHit(hit, 6, 10);
+ } else {
+ throw new ElasticsearchException("Unexpected response!");
+ }
+ }
+ }
+
+ private void assertSearchHit(SearchHit hit, int... termCounts) {
+ assertThat(hit.field("foo.token_count"), not(nullValue()));
+ assertThat(hit.field("foo.token_count").values().size(), equalTo(termCounts.length));
+ for (int i = 0; i < termCounts.length; i++) {
+ assertThat((Integer) hit.field("foo.token_count").values().get(i), equalTo(termCounts[i]));
+ }
+
+ if (loadCountedFields && storeCountedFields) {
+ assertThat(hit.field("foo").values().size(), equalTo(termCounts.length));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java
new file mode 100644
index 0000000..284a33a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Test for {@link TokenCountFieldMapper}.
+ */
+public class TokenCountFieldMapperTests extends ElasticsearchTestCase {
+ @Test
+ public void testMerge() throws IOException {
+ String stage1Mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("person")
+ .startObject("properties")
+ .startObject("tc")
+ .field("type", "token_count")
+ .field("analyzer", "keyword")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper stage1 = MapperTestUtils.newParser().parse(stage1Mapping);
+
+ String stage2Mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("person")
+ .startObject("properties")
+ .startObject("tc")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper stage2 = MapperTestUtils.newParser().parse(stage2Mapping);
+
+ DocumentMapper.MergeResult mergeResult = stage1.merge(stage2, mergeFlags().simulate(true));
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // Just simulated so merge hasn't happened yet
+ assertThat(((TokenCountFieldMapper) stage1.mappers().smartName("tc").mapper()).analyzer(), equalTo("keyword"));
+
+ mergeResult = stage1.merge(stage2, mergeFlags().simulate(false));
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // Just simulated so merge hasn't happened yet
+ assertThat(((TokenCountFieldMapper) stage1.mappers().smartName("tc").mapper()).analyzer(), equalTo("standard"));
+ }
+
+ @Test
+ public void testCountPositions() throws IOException {
+ // We're looking to make sure that we:
+ Token t1 = new Token(); // Don't count tokens without an increment
+ t1.setPositionIncrement(0);
+ Token t2 = new Token();
+ t2.setPositionIncrement(1); // Count normal tokens with one increment
+ Token t3 = new Token();
+ t2.setPositionIncrement(2); // Count funny tokens with more than one increment
+ int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them
+ Token[] tokens = new Token[] {t1, t2, t3};
+ Collections.shuffle(Arrays.asList(tokens), getRandom());
+ TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens);
+ assertThat(TokenCountFieldMapper.countPositions(tokenStream), equalTo(7));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java
new file mode 100644
index 0000000..de72c19
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.date;
+
+import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleDateMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAutomaticDateParser() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field1", "2011/01/22")
+ .field("date_field2", "2011/01/22 00:00:00")
+ .field("wrong_date1", "-4")
+ .field("wrong_date2", "2012/2")
+ .field("wrong_date3", "2012/test")
+ .endObject()
+ .bytes());
+
+ FieldMapper<?> fieldMapper = defaultMapper.mappers().smartNameFieldMapper("date_field1");
+ assertThat(fieldMapper, instanceOf(DateFieldMapper.class));
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("date_field2");
+ assertThat(fieldMapper, instanceOf(DateFieldMapper.class));
+
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date1");
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date2");
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date3");
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+ }
+
+ @Test
+ public void testParseLocal() {
+ assertThat(Locale.GERMAN, equalTo(DateFieldMapper.parseLocale("de")));
+ assertThat(Locale.GERMANY, equalTo(DateFieldMapper.parseLocale("de_DE")));
+ assertThat(new Locale("de","DE","DE"), equalTo(DateFieldMapper.parseLocale("de_DE_DE")));
+
+ try {
+ DateFieldMapper.parseLocale("de_DE_DE_DE");
+ fail();
+ } catch(ElasticsearchIllegalArgumentException ex) {
+ // expected
+ }
+ assertThat(Locale.ROOT, equalTo(DateFieldMapper.parseLocale("")));
+ assertThat(Locale.ROOT, equalTo(DateFieldMapper.parseLocale("ROOT")));
+ }
+
+ @Test
+ public void testLocale() throws IOException {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("date_field_default")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .endObject()
+ .startObject("date_field_en")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "EN")
+ .endObject()
+ .startObject("date_field_de")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "DE_de")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field_en", "Wed, 06 Dec 2000 02:55:00 -0800")
+ .field("date_field_de", "Mi, 06 Dez 2000 02:55:00 -0800")
+ .field("date_field_default", "Wed, 06 Dec 2000 02:55:00 -0800") // check default - no exception is a successs!
+ .endObject()
+ .bytes());
+ assertNumericTokensEqual(doc, defaultMapper, "date_field_en", "date_field_de");
+ assertNumericTokensEqual(doc, defaultMapper, "date_field_en", "date_field_default");
+ }
+
+ private DocumentMapper mapper(String mapping) throws IOException {
+ // we serialize and deserialize the mapping to make sure serialization works just fine
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject();
+ defaultMapper.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ String rebuildMapping = builder.string();
+ return MapperTestUtils.newParser().parse(rebuildMapping);
+ }
+
+ private void assertNumericTokensEqual(ParsedDocument doc, DocumentMapper defaultMapper, String fieldA, String fieldB) throws IOException {
+ assertThat(doc.rootDoc().getField(fieldA).tokenStream(defaultMapper.indexAnalyzer()), notNullValue());
+ assertThat(doc.rootDoc().getField(fieldB).tokenStream(defaultMapper.indexAnalyzer()), notNullValue());
+
+ TokenStream tokenStream = doc.rootDoc().getField(fieldA).tokenStream(defaultMapper.indexAnalyzer());
+ tokenStream.reset();
+ NumericTermAttribute nta = tokenStream.addAttribute(NumericTermAttribute.class);
+ List<Long> values = new ArrayList<Long>();
+ while(tokenStream.incrementToken()) {
+ values.add(nta.getRawValue());
+ }
+
+ tokenStream = doc.rootDoc().getField(fieldB).tokenStream(defaultMapper.indexAnalyzer());
+ tokenStream.reset();
+ nta = tokenStream.addAttribute(NumericTermAttribute.class);
+ int pos = 0;
+ while(tokenStream.incrementToken()) {
+ assertThat(values.get(pos++), equalTo(nta.getRawValue()));
+ }
+ assertThat(pos, equalTo(values.size()));
+ }
+
+ @Test
+ public void testTimestampAsDate() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("date_field").field("type", "date").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ long value = System.currentTimeMillis();
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", value)
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("date_field").tokenStream(defaultMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testDateDetection() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("date_detection", false)
+ .startObject("properties").startObject("date_field").field("type", "date").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "2010-01-01")
+ .field("date_field_x", "2010-01-01")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("date_field"), nullValue());
+ assertThat(doc.rootDoc().get("date_field_x"), equalTo("2010-01-01"));
+ }
+
+ @Test
+ public void testHourFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("date_detection", false)
+ .startObject("properties").startObject("date_field").field("type", "date").field("format", "HH:mm:ss").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "10:00:00")
+ .endObject()
+ .bytes());
+ assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(10).millis(), DateTimeZone.UTC).getMillis())));
+
+ Filter filter = defaultMapper.mappers().smartNameFieldMapper("date_field").rangeFilter("10:00:00", "11:00:00", true, true, null);
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter<Long> rangeFilter = (NumericRangeFilter<Long>) filter;
+ assertThat(rangeFilter.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(11).millis() + 999).getMillis())); // +999 to include the 00-01 minute
+ assertThat(rangeFilter.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(10).millis()).getMillis()));
+ }
+
+
+ @Test
+ public void testDayWithoutYearFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("date_detection", false)
+ .startObject("properties").startObject("date_field").field("type", "date").field("format", "MMM dd HH:mm:ss").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "Jan 02 10:00:00")
+ .endObject()
+ .bytes());
+ assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(34).millis(), DateTimeZone.UTC).getMillis())));
+
+ Filter filter = defaultMapper.mappers().smartNameFieldMapper("date_field").rangeFilter("Jan 02 10:00:00", "Jan 02 11:00:00", true, true, null);
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter<Long> rangeFilter = (NumericRangeFilter<Long>) filter;
+ assertThat(rangeFilter.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(35).millis() + 999).getMillis())); // +999 to include the 00-01 minute
+ assertThat(rangeFilter.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(34).millis()).getMillis()));
+ }
+
+ @Test
+ public void testIgnoreMalformedOption() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "date").field("ignore_malformed", true).endObject()
+ .startObject("field2").field("type", "date").field("ignore_malformed", false).endObject()
+ .startObject("field3").field("type", "date").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "a")
+ .field("field2", "2010-01-01")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field1"), nullValue());
+ assertThat(doc.rootDoc().getField("field2"), notNullValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(MapperParsingException.class));
+ }
+
+ // Verify that the default is false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(MapperParsingException.class));
+ }
+
+ // Unless the global ignore_malformed option is set to true
+ Settings indexSettings = settingsBuilder().put("index.mapping.ignore_malformed", true).build();
+ defaultMapper = MapperTestUtils.newParser(indexSettings).parse(mapping);
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field3"), nullValue());
+
+ // This should still throw an exception, since field2 is specifically set to ignore_malformed=false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(MapperParsingException.class));
+ }
+ }
+
+ @Test
+ public void testThatMergingWorks() throws Exception {
+ String initialMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field").field("type", "date")
+ .field("format", "EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "date")
+ .field("format", "EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy||yyyy-MM-dd'T'HH:mm:ss.SSSZZ")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(initialMapping);
+ DocumentMapper mergeMapper = mapper(updatedMapping);
+
+ assertThat(defaultMapper.mappers().name("field").mapper(), is(instanceOf(DateFieldMapper.class)));
+ DateFieldMapper initialDateFieldMapper = (DateFieldMapper) defaultMapper.mappers().name("field").mapper();
+ Map<String, String> config = getConfigurationViaXContent(initialDateFieldMapper);
+ assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy"));
+
+ DocumentMapper.MergeResult mergeResult = defaultMapper.merge(mergeMapper, DocumentMapper.MergeFlags.mergeFlags().simulate(false));
+
+ assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.conflicts()), mergeResult.hasConflicts(), is(false));
+ assertThat(defaultMapper.mappers().name("field").mapper(), is(instanceOf(DateFieldMapper.class)));
+
+ DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().name("field").mapper();
+ Map<String, String> mergedConfig = getConfigurationViaXContent(mergedFieldMapper);
+ assertThat(mergedConfig.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy||yyyy-MM-dd'T'HH:mm:ss.SSSZZ"));
+ }
+
+ private Map<String, String> getConfigurationViaXContent(DateFieldMapper dateFieldMapper) throws IOException {
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ dateFieldMapper.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ Map<String, Object> dateFieldMapperMap = JsonXContent.jsonXContent.createParser(builder.string()).mapAndClose();
+ assertThat(dateFieldMapperMap, hasKey("field"));
+ assertThat(dateFieldMapperMap.get("field"), is(instanceOf(Map.class)));
+ return (Map<String, String>) dateFieldMapperMap.get("field");
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java
new file mode 100644
index 0000000..17e30f0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.dynamic;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.StrictDynamicMappingException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class DynamicMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDynamicTrue() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "true")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+ }
+
+ @Test
+ public void testDynamicFalse() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "false")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("field2"), nullValue());
+ }
+
+
+ @Test
+ public void testDynamicStrict() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "strict")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+ fail();
+ } catch (StrictDynamicMappingException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testDynamicFalseWithInnerObjectButDynamicSetOnRoot() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "false")
+ .startObject("properties")
+ .startObject("obj1").startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("obj1.field1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("obj1.field2"), nullValue());
+ }
+
+ @Test
+ public void testDynamicStrictWithInnerObjectButDynamicSetOnRoot() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "strict")
+ .startObject("properties")
+ .startObject("obj1").startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .bytes());
+ fail();
+ } catch (StrictDynamicMappingException e) {
+ // all is well
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java
new file mode 100644
index 0000000..61f4e9f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.dynamictemplate.genericstore;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class GenericStoreDynamicTemplateTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+
+ FieldMappers fieldMappers = docMapper.mappers().fullName("name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(true));
+
+ f = doc.getField("age");
+ assertThat(f.name(), equalTo("age"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("age");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json
new file mode 100644
index 0000000..75b8a0e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json
@@ -0,0 +1,5 @@
+{
+ "_id":"1",
+ "name":"some name",
+ "age":1
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json
new file mode 100644
index 0000000..d99067c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json
@@ -0,0 +1,14 @@
+{
+ "person":{
+ "dynamic_templates":[
+ {
+ "template_1":{
+ "match":"*",
+ "mapping":{
+ "store":"yes"
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java
new file mode 100644
index 0000000..05546e1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.dynamictemplate.pathmatch;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PathMatchDynamicTemplateTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("top_level"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+
+ FieldMappers fieldMappers = docMapper.mappers().fullName("name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(false));
+
+ f = doc.getField("obj1.name");
+ assertThat(f.name(), equalTo("obj1.name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("obj1.name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(true));
+
+ f = doc.getField("obj1.obj2.name");
+ assertThat(f.name(), equalTo("obj1.obj2.name"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("obj1.obj2.name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(false));
+
+ // verify more complex path_match expressions
+
+ fieldMappers = docMapper.mappers().fullName("obj3.obj4.prop1");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json
new file mode 100644
index 0000000..818bedd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json
@@ -0,0 +1,15 @@
+{
+ "_id":"1",
+ "name":"top_level",
+ "obj1":{
+ "name":"obj1_level",
+ "obj2":{
+ "name":"obj2_level"
+ }
+ },
+ "obj3":{
+ "obj4":{
+ "prop1":"prop1_value"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json
new file mode 100644
index 0000000..dce33da
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json
@@ -0,0 +1,30 @@
+{
+ "person":{
+ "dynamic_templates":[
+ {
+ "template_1":{
+ "path_match":"obj1.obj2.*",
+ "mapping":{
+ "store":"no"
+ }
+ }
+ },
+ {
+ "template_2":{
+ "path_match":"obj1.*",
+ "mapping":{
+ "store":"yes"
+ }
+ }
+ },
+ {
+ "template_3":{
+ "path_match":"*.obj4.*",
+ "mapping":{
+ "type":"string"
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java
new file mode 100644
index 0000000..e11c703
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.dynamictemplate.simple;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleDynamicTemplatesTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMatchTypeOnly() throws Exception {
+ XContentBuilder builder = JsonXContent.contentBuilder();
+ builder.startObject().startObject("person").startArray("dynamic_templates").startObject().startObject("test")
+ .field("match_mapping_type", "string")
+ .startObject("mapping").field("index", "no").endObject()
+ .endObject().endObject().endArray().endObject().endObject();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(builder.string());
+ builder = JsonXContent.contentBuilder();
+ builder.startObject().field("_id", "1").field("s", "hello").field("l", 1).endObject();
+ docMapper.parse(builder.bytes());
+
+ DocumentFieldMappers mappers = docMapper.mappers();
+
+ assertThat(mappers.smartName("s"), Matchers.notNullValue());
+ assertThat(mappers.smartName("s").mapper().fieldType().indexed(), equalTo(false));
+
+ assertThat(mappers.smartName("l"), Matchers.notNullValue());
+ assertThat(mappers.smartName("l").mapper().fieldType().indexed(), equalTo(true));
+
+
+ }
+
+
+ @Test
+ public void testSimple() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ FieldMappers fieldMappers = docMapper.mappers().fullName("name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi1");
+ assertThat(f.name(), equalTo("multi1"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("multi1");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi1.org");
+ assertThat(f.name(), equalTo("multi1.org"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("multi1.org");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi2");
+ assertThat(f.name(), equalTo("multi2"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("multi2");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi2.org");
+ assertThat(f.name(), equalTo("multi2.org"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("multi2.org");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ }
+
+ @Test
+ public void testSimpleWithXContentTraverse() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ docMapper.refreshSource();
+ docMapper = MapperTestUtils.newParser().parse(docMapper.mappingSource().string());
+
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ FieldMappers fieldMappers = docMapper.mappers().fullName("name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi1");
+ assertThat(f.name(), equalTo("multi1"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("multi1");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi1.org");
+ assertThat(f.name(), equalTo("multi1.org"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("multi1.org");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi2");
+ assertThat(f.name(), equalTo("multi2"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("multi2");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi2.org");
+ assertThat(f.name(), equalTo("multi2.org"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("multi2.org");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json
new file mode 100644
index 0000000..5682d1d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json
@@ -0,0 +1,7 @@
+{
+ "_id":"1",
+ "name":"some name",
+ "age":1,
+ "multi1":"multi 1",
+ "multi2":"multi 2"
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json
new file mode 100644
index 0000000..fa0293c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json
@@ -0,0 +1,36 @@
+{
+ "person":{
+ "dynamic_templates":[
+ {
+ "tempalte_1":{
+ "match":"multi*",
+ "mapping":{
+ "type":"multi_field",
+ "fields":{
+ "{name}":{
+ "type":"{dynamic_type}",
+ "index":"analyzed",
+ "store":"yes"
+ },
+ "org":{
+ "type":"{dynamic_type}",
+ "index":"not_analyzed",
+ "store":"yes"
+ }
+ }
+ }
+ }
+ },
+ {
+ "template_2":{
+ "match":"*",
+ "match_mapping_type":"string",
+ "mapping":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java
new file mode 100644
index 0000000..f52363e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+
+public class GeoEncodingTests extends ElasticsearchTestCase {
+
+ public void test() {
+ for (int i = 0; i < 10000; ++i) {
+ final double lat = randomDouble() * 180 - 90;
+ final double lon = randomDouble() * 360 - 180;
+ final Distance precision = new Distance(1+(randomDouble() * 9), randomFrom(Arrays.asList(DistanceUnit.MILLIMETERS, DistanceUnit.METERS, DistanceUnit.KILOMETERS)));
+ final GeoPointFieldMapper.Encoding encoding = GeoPointFieldMapper.Encoding.of(precision);
+ assertThat(encoding.precision().convert(DistanceUnit.METERS).value, lessThanOrEqualTo(precision.convert(DistanceUnit.METERS).value));
+ final GeoPoint geoPoint = encoding.decode(encoding.encodeCoordinate(lat), encoding.encodeCoordinate(lon), new GeoPoint());
+ final double error = GeoDistance.PLANE.calculate(lat, lon, geoPoint.lat(), geoPoint.lon(), DistanceUnit.METERS);
+ assertThat(error, lessThanOrEqualTo(precision.convert(DistanceUnit.METERS).value));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java
new file mode 100644
index 0000000..44c9575
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.Map;
+
+public class GeoMappingTests extends ElasticsearchIntegrationTest {
+
+ public void testUpdatePrecision() throws Exception {
+ prepareCreate("test").addMapping("type1", XContentFactory.jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .startObject("fielddata")
+ .field("format", "compressed")
+ .field("precision", "2mm")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureYellow();
+ assertPrecision(new Distance(2, DistanceUnit.MILLIMETERS));
+
+ client().admin().indices().preparePutMapping("test").setType("type1").setSource(XContentFactory.jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .startObject("fielddata")
+ .field("format", "compressed")
+ .field("precision", "11m")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ assertPrecision(new Distance(11, DistanceUnit.METERS));
+ }
+
+ private void assertPrecision(Distance expected) throws Exception {
+ ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = client().admin().indices().getMappings(new GetMappingsRequest().indices("test").types("type1")).actionGet().getMappings();
+ assertNotNull(mappings);
+ Map<String, ?> properties = (Map<String, ?>) mappings.get("test").get("type1").getSourceAsMap().get("properties");
+ Map<String, ?> pinProperties = (Map<String, ?>) properties.get("pin");
+ Map<String, ?> pinFieldData = (Map<String, ?>) pinProperties.get("fielddata");
+ Distance precision = Distance.parseDistance(pinFieldData.get("precision").toString());
+ assertEquals(expected, precision);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java
new file mode 100644
index 0000000..e2a2be5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.geo;
+
+import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class GeoShapeFieldMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultConfiguration() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(GeoShapeFieldMapper.Defaults.DISTANCE_ERROR_PCT));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoShapeFieldMapper.Defaults.GEOHASH_LEVELS));
+ }
+
+ @Test
+ public void testGeohashConfiguration() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("tree_levels", "4")
+ .field("distance_error_pct", "0.1")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.1));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(4));
+ }
+
+ @Test
+ public void testQuadtreeConfiguration() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", "6")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(6));
+ }
+
+ @Test
+ public void testLevelPrecisionConfiguration() throws IOException {
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", "6")
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ /* 70m is more precise so it wins */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.quadTreeLevelsForPrecision(70d)));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("tree_levels", "6")
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ /* 70m is more precise so it wins */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(70d)));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("tree_levels", GeoUtils.geoHashLevelsForPrecision(70d)+1)
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(70d)+1));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", GeoUtils.quadTreeLevelsForPrecision(70d)+1)
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.quadTreeLevelsForPrecision(70d)+1));
+ }
+ }
+
+ @Test
+ public void testLevelDefaults() throws IOException {
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ /* 50m is default */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.quadTreeLevelsForPrecision(50d)));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ /* 50m is default */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(50d)));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java
new file mode 100644
index 0000000..ffa8900
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeohashMappingGeoPointTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonInOneValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testGeoHashValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", GeoHashUtils.encode(1.2, 1.3))
+ .endObject()
+ .bytes());
+
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ MatcherAssert.assertThat(doc.rootDoc().get("point"), notNullValue());
+ }
+
+ @Test
+ public void testGeoHashPrecisionAsInteger() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash_precision", 10).endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper mapper = defaultMapper.mappers().smartName("point").mapper();
+ assertThat(mapper, instanceOf(GeoPointFieldMapper.class));
+ GeoPointFieldMapper geoPointFieldMapper = (GeoPointFieldMapper) mapper;
+ assertThat(geoPointFieldMapper.geoHashPrecision(), is(10));
+ }
+
+ @Test
+ public void testGeoHashPrecisionAsLength() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash_precision", "5m").endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper mapper = defaultMapper.mappers().smartName("point").mapper();
+ assertThat(mapper, instanceOf(GeoPointFieldMapper.class));
+ GeoPointFieldMapper geoPointFieldMapper = (GeoPointFieldMapper) mapper;
+ assertThat(geoPointFieldMapper.geoHashPrecision(), is(10));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/LatLonAndGeohashMappingGeoPointTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/LatLonAndGeohashMappingGeoPointTests.java
new file mode 100644
index 0000000..64d7758
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/LatLonAndGeohashMappingGeoPointTests.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class LatLonAndGeohashMappingGeoPointTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ }
+
+ @Test
+ public void testLatLonInOneValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ }
+
+ @Test
+ public void testGeoHashValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", GeoHashUtils.encode(1.2, 1.3))
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/LatLonMappingGeoPointTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/LatLonMappingGeoPointTests.java
new file mode 100644
index 0000000..f6b731a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/LatLonMappingGeoPointTests.java
@@ -0,0 +1,411 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class LatLonMappingGeoPointTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNormalizeLatLonValuesDefault() throws Exception {
+ // default to normalize
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 91).field("lon", 181).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), equalTo("89.0,1.0"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", -91).field("lon", -181).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), equalTo("-89.0,-1.0"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 181).field("lon", 361).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), equalTo("-1.0,-179.0"));
+ }
+
+ @Test
+ public void testValidateLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("normalize", false).field("validate", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 90).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", -91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", -181).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 181).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+ }
+
+
+ @Test
+ public void testNoValidateLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("normalize", false).field("validate", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 90).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", -91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", -181).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 181).endObject()
+ .endObject()
+ .bytes());
+ }
+
+ @Test
+ public void testLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").fieldType().stored(), is(false));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").fieldType().stored(), is(false));
+ assertThat(doc.rootDoc().getField("point.geohash"), nullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonValuesStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getField("point.geohash"), nullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testArrayLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point")
+ .startObject().field("lat", 1.2).field("lon", 1.3).endObject()
+ .startObject().field("lat", 1.4).field("lon", 1.5).endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
+ assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
+ assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
+ }
+
+ @Test
+ public void testLatLonInOneValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonInOneValueStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonInOneValueArray() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point")
+ .value("1.2,1.3")
+ .value("1.4,1.5")
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
+ assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
+ assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
+ }
+
+ @Test
+ public void testGeoHashValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", GeoHashUtils.encode(1.2, 1.3))
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), notNullValue());
+ }
+
+ @Test
+ public void testLonLatArray() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point").value(1.3).value(1.2).endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLonLatArrayStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point").value(1.3).value(1.2).endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLonLatArrayArrayStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point")
+ .startArray().value(1.3).value(1.2).endArray()
+ .startArray().value(1.5).value(1.4).endArray()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
+ assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
+ assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
+ }
+
+ @Test
+ public void testTryParsingLatLonFromString() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject().field("lat", "52").field("lon", "4").endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken();
+ GeoPoint geoPoint = GeoPoint.parse(parser);
+ assertThat(geoPoint.lat(), is(52.0));
+ assertThat(geoPoint.lon(), is(4.0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java
new file mode 100644
index 0000000..adb97e6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.id;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class IdMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleIdTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), nullValue());
+
+ try {
+ docMapper.parse("type", null, XContentFactory.jsonBuilder()
+ .startObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+ }
+
+ doc = docMapper.parse("type", null, XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_id", 1)
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), nullValue());
+ }
+
+ @Test
+ public void testIdIndexed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_id").field("index", "not_analyzed").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), notNullValue());
+
+ doc = docMapper.parse("type", null, XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_id", 1)
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), notNullValue());
+ }
+
+ @Test
+ public void testIdPath() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_id").field("path", "my_path").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ // serialize the id mapping
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ builder = docMapper.idFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ String serialized_id_mapping = builder.string();
+
+ String expected_id_mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("_id").field("path", "my_path").endObject()
+ .endObject().string();
+
+ assertThat(serialized_id_mapping, equalTo(expected_id_mapping));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java
new file mode 100644
index 0000000..ce08da5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.index;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class IndexTypeMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleIndexMapperTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.rootMapper(IndexFieldMapper.class);
+ assertThat(indexMapper.enabled(), equalTo(true));
+ assertThat(indexMapper.fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().indexName("_index").mapper(), instanceOf(IndexFieldMapper.class));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), equalTo("test"));
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void explicitDisabledIndexMapperTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", false).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.rootMapper(IndexFieldMapper.class);
+ assertThat(indexMapper.enabled(), equalTo(false));
+ assertThat(indexMapper.fieldType().stored(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), nullValue());
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void defaultDisabledIndexMapperTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.rootMapper(IndexFieldMapper.class);
+ assertThat(indexMapper.enabled(), equalTo(false));
+ assertThat(indexMapper.fieldType().stored(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), nullValue());
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void testThatMergingFieldMappingAllowsDisabling() throws Exception {
+ String mappingWithIndexEnabled = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper mapperEnabled = MapperTestUtils.newParser().parse(mappingWithIndexEnabled);
+
+
+ String mappingWithIndexDisabled = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", false).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper mapperDisabled = MapperTestUtils.newParser().parse(mappingWithIndexDisabled);
+
+ mapperEnabled.merge(mapperDisabled, DocumentMapper.MergeFlags.mergeFlags().simulate(false));
+ assertThat(mapperEnabled.IndexFieldMapper().enabled(), is(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java
new file mode 100644
index 0000000..88cd8d6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.ip;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Ignore;
+
+/**
+ *
+ */
+@Ignore("No tests?")
+public class SimpleIpMappingTests extends ElasticsearchTestCase {
+
+ // No Longer enabled...
+// @Test public void testAutoIpDetection() throws Exception {
+// String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+// .startObject("properties").endObject()
+// .endObject().endObject().string();
+//
+// XContentDocumentMapper defaultMapper = MapperTests.newParser().parse(mapping);
+//
+// ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+// .startObject()
+// .field("ip1", "127.0.0.1")
+// .field("ip2", "0.1")
+// .field("ip3", "127.0.0.1.2")
+// .endObject()
+// .copiedBytes());
+//
+// assertThat(doc.doc().getFieldable("ip1"), notNullValue());
+// assertThat(doc.doc().get("ip1"), nullValue()); // its numeric
+// assertThat(doc.doc().get("ip2"), equalTo("0.1"));
+// assertThat(doc.doc().get("ip3"), equalTo("127.0.0.1.2"));
+// }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java b/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java
new file mode 100644
index 0000000..b2f90aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.lucene;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class DoubleIndexingDocTest {
+
+ @Test
+ public void testDoubleIndexingSameDoc() throws Exception {
+ IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").endObject()
+ .endObject().endObject().string();
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", 1)
+ .field("field3", 1.1)
+ .field("field4", "2010-01-01")
+ .startArray("field5").value(1).value(2).value(3).endArray()
+ .endObject()
+ .bytes());
+
+ writer.addDocument(doc.rootDoc(), doc.analyzer());
+ writer.addDocument(doc.rootDoc(), doc.analyzer());
+
+ IndexReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs topDocs = searcher.search(mapper.mappers().smartName("field1").mapper().termQuery("value1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field2").mapper().termQuery("1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field3").mapper().termQuery("1.1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field4").mapper().termQuery("2010-01-01", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field5").mapper().termQuery("1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field5").mapper().termQuery("2", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field5").mapper().termQuery("3", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java b/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java
new file mode 100644
index 0000000..4283e23
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.lucene;
+
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class StoredNumericValuesTest {
+
+ @Test
+ public void testBytesAndNumericRepresentation() throws Exception {
+ IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("store", "yes").endObject()
+ .startObject("field2").field("type", "float").field("store", "yes").endObject()
+ .startObject("field3").field("type", "long").field("store", "yes").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", 1)
+ .field("field2", 1.1)
+ .startArray("field3").value(1).value(2).value(3).endArray()
+ .endObject()
+ .bytes());
+
+ writer.addDocument(doc.rootDoc(), doc.analyzer());
+
+ // Indexing a doc in the old way
+ FieldType fieldType = new FieldType();
+ fieldType.setStored(true);
+ fieldType.setNumericType(FieldType.NumericType.INT);
+ Document doc2 = new Document();
+ doc2.add(new StoredField("field1", new BytesRef(Numbers.intToBytes(1))));
+ doc2.add(new StoredField("field2", new BytesRef(Numbers.floatToBytes(1.1f))));
+ doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(1l))));
+ doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(2l))));
+ doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(3l))));
+ writer.addDocument(doc2);
+
+ DirectoryReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ Set<String> fields = new HashSet<String>(Arrays.asList("field1", "field2", "field3"));
+ CustomFieldsVisitor fieldsVisitor = new CustomFieldsVisitor(fields, false);
+ searcher.doc(0, fieldsVisitor);
+ fieldsVisitor.postProcess(mapper);
+ assertThat(fieldsVisitor.fields().size(), equalTo(3));
+ assertThat(fieldsVisitor.fields().get("field1").size(), equalTo(1));
+ assertThat((Integer) fieldsVisitor.fields().get("field1").get(0), equalTo(1));
+ assertThat(fieldsVisitor.fields().get("field2").size(), equalTo(1));
+ assertThat((Float) fieldsVisitor.fields().get("field2").get(0), equalTo(1.1f));
+ assertThat(fieldsVisitor.fields().get("field3").size(), equalTo(3));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(0), equalTo(1l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(1), equalTo(2l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(2), equalTo(3l));
+
+ // Make sure the doc gets loaded as if it was stored in the new way
+ fieldsVisitor.reset();
+ searcher.doc(1, fieldsVisitor);
+ fieldsVisitor.postProcess(mapper);
+ assertThat(fieldsVisitor.fields().size(), equalTo(3));
+ assertThat(fieldsVisitor.fields().get("field1").size(), equalTo(1));
+ assertThat((Integer) fieldsVisitor.fields().get("field1").get(0), equalTo(1));
+ assertThat(fieldsVisitor.fields().get("field2").size(), equalTo(1));
+ assertThat((Float) fieldsVisitor.fields().get("field2").get(0), equalTo(1.1f));
+ assertThat(fieldsVisitor.fields().get("field3").size(), equalTo(3));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(0), equalTo(1l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(1), equalTo(2l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(2), equalTo(3l));
+
+ reader.close();
+ writer.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java
new file mode 100644
index 0000000..df695b5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.merge;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class TestMergeMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void test1Merge() throws Exception {
+
+ String stage1Mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper stage1 = MapperTestUtils.newParser().parse(stage1Mapping);
+ String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("age").field("type", "integer").endObject()
+ .startObject("obj1").startObject("properties").startObject("prop1").field("type", "integer").endObject().endObject().endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper stage2 = MapperTestUtils.newParser().parse(stage2Mapping);
+
+ DocumentMapper.MergeResult mergeResult = stage1.merge(stage2, mergeFlags().simulate(true));
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // since we are simulating, we should not have the age mapping
+ assertThat(stage1.mappers().smartName("age"), nullValue());
+ assertThat(stage1.mappers().smartName("obj1.prop1"), nullValue());
+ // now merge, don't simulate
+ mergeResult = stage1.merge(stage2, mergeFlags().simulate(false));
+ // there is still merge failures
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // but we have the age in
+ assertThat(stage1.mappers().smartName("age"), notNullValue());
+ assertThat(stage1.mappers().smartName("obj1.prop1"), notNullValue());
+ }
+
+ @Test
+ public void testMergeObjectDynamic() throws Exception {
+ String objectMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").endObject().endObject().string();
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(objectMapping);
+ assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.TRUE));
+
+ String withDynamicMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").field("dynamic", "false").endObject().endObject().string();
+ DocumentMapper withDynamicMapper = MapperTestUtils.newParser().parse(withDynamicMapping);
+ assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE));
+
+ DocumentMapper.MergeResult mergeResult = mapper.merge(withDynamicMapper, mergeFlags().simulate(false));
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE));
+ }
+
+ @Test
+ public void testMergeObjectAndNested() throws Exception {
+ String objectMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("obj").field("type", "object").endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper objectMapper = MapperTestUtils.newParser().parse(objectMapping);
+ String nestedMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("obj").field("type", "nested").endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper nestedMapper = MapperTestUtils.newParser().parse(nestedMapping);
+
+ DocumentMapper.MergeResult mergeResult = objectMapper.merge(nestedMapper, mergeFlags().simulate(true));
+ assertThat(mergeResult.hasConflicts(), equalTo(true));
+ assertThat(mergeResult.conflicts().length, equalTo(1));
+ assertThat(mergeResult.conflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested"));
+
+ mergeResult = nestedMapper.merge(objectMapper, mergeFlags().simulate(true));
+ assertThat(mergeResult.conflicts().length, equalTo(1));
+ assertThat(mergeResult.conflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested"));
+ }
+
+ @Test
+ public void testMergeSearchAnalyzer() throws Exception {
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("search_analyzer", "whitespace").endObject().endObject()
+ .endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("search_analyzer", "keyword").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper existing = MapperTestUtils.newParser().parse(mapping1);
+ DocumentMapper changed = MapperTestUtils.newParser().parse(mapping2);
+
+ assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("whitespace"));
+ DocumentMapper.MergeResult mergeResult = existing.merge(changed, mergeFlags().simulate(false));
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("keyword"));
+ }
+
+ @Test
+ public void testNotChangeSearchAnalyzer() throws Exception {
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("search_analyzer", "whitespace").endObject().endObject()
+ .endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("postings_format", "direct").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper existing = MapperTestUtils.newParser().parse(mapping1);
+ DocumentMapper changed = MapperTestUtils.newParser().parse(mapping2);
+
+ assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("whitespace"));
+ DocumentMapper.MergeResult mergeResult = existing.merge(changed, mergeFlags().simulate(false));
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("whitespace"));
+ assertThat((existing.mappers().name("field").mapper().postingsFormatProvider()).name(), equalTo("direct"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java
new file mode 100644
index 0000000..0be1024
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java
@@ -0,0 +1,408 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.multifield;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.*;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.mapper.MapperBuilders.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class MultiFieldTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMultiField_multiFieldType() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json");
+ testMultiField(mapping);
+ }
+
+ @Test
+ public void testMultiField_multiFields() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-fields.json");
+ testMultiField(mapping);
+ }
+
+ private void testMultiField(String mapping) throws Exception {
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.indexed");
+ assertThat(f.name(), equalTo("name.indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.not_indexed");
+ assertThat(f.name(), equalTo("name.not_indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(false));
+
+ f = doc.getField("object1.multi1");
+ assertThat(f.name(), equalTo("object1.multi1"));
+
+ f = doc.getField("object1.multi1.string");
+ assertThat(f.name(), equalTo("object1.multi1.string"));
+ assertThat(f.stringValue(), equalTo("2010-01-01"));
+
+ assertThat(docMapper.mappers().fullName("name").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.test1").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.test1").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.test1").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test1").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test1").mapper().fieldType().tokenized(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test1").mapper().fieldDataType().getLoading(), equalTo(FieldMapper.Loading.EAGER));
+
+ assertThat(docMapper.mappers().fullName("name.test2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.test2").mapper(), instanceOf(TokenCountFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.test2").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test2").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test2").mapper().fieldType().tokenized(), equalTo(false));
+ assertThat(((TokenCountFieldMapper) docMapper.mappers().fullName("name.test2").mapper()).analyzer(), equalTo("simple"));
+ assertThat(((TokenCountFieldMapper) docMapper.mappers().fullName("name.test2").mapper()).analyzer(), equalTo("simple"));
+
+ assertThat(docMapper.mappers().fullName("object1.multi1").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("object1.multi1").mapper(), instanceOf(DateFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("object1.multi1.string").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("object1.multi1.string").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("object1.multi1.string").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("object1.multi1.string").mapper().fieldType().tokenized(), equalTo(false));
+ }
+
+ @Test
+ public void testBuildThenParse() throws Exception {
+ DocumentMapperParser mapperParser = MapperTestUtils.newParser();
+
+ DocumentMapper builderDocMapper = doc("test", rootObject("person").add(
+ stringField("name").store(true)
+ .addMultiField(stringField("indexed").index(true).tokenized(true))
+ .addMultiField(stringField("not_indexed").index(false).store(true))
+ )).build(mapperParser);
+ builderDocMapper.refreshSource();
+
+ String builtMapping = builderDocMapper.mappingSource().string();
+// System.out.println(builtMapping);
+ // reparse it
+ DocumentMapper docMapper = mapperParser.parse(builtMapping);
+
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.indexed");
+ assertThat(f.name(), equalTo("name.indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.not_indexed");
+ assertThat(f.name(), equalTo("name.not_indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(false));
+ }
+
+ @Test
+ public void testConvertMultiFieldNoDefaultField() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+
+ assertNull(doc.getField("name"));
+ IndexableField f = doc.getField("name.indexed");
+ assertThat(f.name(), equalTo("name.indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.not_indexed");
+ assertThat(f.name(), equalTo("name.not_indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("name").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertNull(doc.getField("age"));
+ f = doc.getField("age.not_stored");
+ assertThat(f.name(), equalTo("age.not_stored"));
+ assertThat(f.numericValue(), equalTo((Number) 28L));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("age.stored");
+ assertThat(f.name(), equalTo("age.stored"));
+ assertThat(f.numericValue(), equalTo((Number) 28L));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("age").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("age").mapper(), instanceOf(LongFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("age").mapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("age").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("age").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper(), instanceOf(LongFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("age.stored").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("age.stored").mapper(), instanceOf(LongFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("age.stored").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("age.stored").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("age.stored").mapper().fieldType().tokenized(), equalTo(false));
+ }
+
+ @Test
+ public void testConvertMultiFieldGeoPoint() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.mappers().fullName("a").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("a").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("a.b").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("a.b").mapper(), instanceOf(GeoPointFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().tokenized(), equalTo(false));
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .field("a", "-1,-1")
+ .endObject().bytes();
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a"));
+ assertThat(f.stringValue(), equalTo("-1,-1"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("a.b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a.b"));
+ assertThat(f.stringValue(), equalTo("-1.0,-1.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("b").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("b").mapper(), instanceOf(GeoPointFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("b.a").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("b.a").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().tokenized(), equalTo(false));
+
+ json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .field("b", "-1,-1")
+ .endObject().bytes();
+ doc = docMapper.parse(json).rootDoc();
+
+ f = doc.getField("b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("-1.0,-1.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("b.a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b.a"));
+ assertThat(f.stringValue(), equalTo("-1,-1"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .startArray("b").startArray().value(-1).value(-1).endArray().startArray().value(-2).value(-2).endArray().endArray()
+ .endObject().bytes();
+ doc = docMapper.parse(json).rootDoc();
+
+ f = doc.getFields("b")[0];
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("-1.0,-1.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getFields("b")[1];
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("-2.0,-2.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("b.a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b.a"));
+ // NOTE: "]" B/c the lat,long aren't specified as a string, we miss the actual values when parsing the multi
+ // fields. We already skipped over the coordinates values and can't get to the coordinates.
+ // This happens if coordinates are specified as array and object.
+ assertThat(f.stringValue(), equalTo("]"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ }
+
+ @Test
+ public void testConvertMultiFieldCompletion() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.mappers().fullName("a").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("a").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("a.b").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("a.b").mapper(), instanceOf(CompletionFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().tokenized(), equalTo(true));
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .field("a", "complete me")
+ .endObject().bytes();
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("a.b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a.b"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("b").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("b").mapper(), instanceOf(CompletionFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("b.a").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("b.a").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().tokenized(), equalTo(false));
+
+ json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .field("b", "complete me")
+ .endObject().bytes();
+ doc = docMapper.parse(json).rootDoc();
+
+ f = doc.getField("b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("b.a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b.a"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java
new file mode 100644
index 0000000..1808074
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.multifield;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.index.query.FilterBuilders.geoDistanceFilter;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MultiFieldsIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testMultiFields() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createTypeSource())
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource));
+ assertThat(titleFields.size(), equalTo(1));
+ assertThat(titleFields.get("not_analyzed"), notNullValue());
+ assertThat(((Map)titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1")
+ .setSource("title", "Multi fields")
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("my-index")
+ .setQuery(matchQuery("title", "multi"))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch("my-index")
+ .setQuery(matchQuery("title.not_analyzed", "Multi fields"))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ assertAcked(
+ client().admin().indices().preparePutMapping("my-index").setType("my-type")
+ .setSource(createPutMappingSource())
+ .setIgnoreConflicts(true) // If updated with multi-field type, we need to ignore failures.
+ );
+
+ getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ mappingSource = mappingMetaData.sourceAsMap();
+ assertThat(((Map) XContentMapValues.extractValue("properties.title", mappingSource)).size(), equalTo(2));
+ titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource));
+ assertThat(titleFields.size(), equalTo(2));
+ assertThat(titleFields.get("not_analyzed"), notNullValue());
+ assertThat(((Map)titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed"));
+ assertThat(titleFields.get("uncased"), notNullValue());
+ assertThat(((Map)titleFields.get("uncased")).get("analyzer").toString(), equalTo("whitespace"));
+
+ client().prepareIndex("my-index", "my-type", "1")
+ .setSource("title", "Multi fields")
+ .setRefresh(true)
+ .get();
+
+ searchResponse = client().prepareSearch("my-index")
+ .setQuery(matchQuery("title.uncased", "Multi"))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void testGeoPointMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createMappingSource("geo_point"))
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(2));
+ assertThat(aField.get("type").toString(), equalTo("geo_point"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "51,19").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index")
+ .setQuery(constantScoreQuery(geoDistanceFilter("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS)))
+ .get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "51,19")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testTokenCountMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("a")
+ .field("type", "token_count")
+ .field("analyzer", "simple")
+ .startObject("fields")
+ .startObject("b")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject())
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(3));
+ assertThat(aField.get("type").toString(), equalTo("token_count"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "my tokens").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "my tokens")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testCompletionMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createMappingSource("completion"))
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(7));
+ assertThat(aField.get("type").toString(), equalTo("completion"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "complete me").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "complete me")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testIpMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createMappingSource("ip"))
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(2));
+ assertThat(aField.get("type").toString(), equalTo("ip"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "127.0.0.1").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "127.0.0.1")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ private XContentBuilder createMappingSource(String fieldType) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("a")
+ .field("type", fieldType)
+ .startObject("fields")
+ .startObject("b")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ private XContentBuilder createTypeSource() throws IOException {
+ if (randomBoolean()) {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("not_analyzed")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ } else {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("title")
+ .field("type", "string")
+ .endObject()
+ .startObject("not_analyzed")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+ }
+
+ private XContentBuilder createPutMappingSource() throws IOException {
+ if (randomBoolean()) {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("uncased")
+ .field("type", "string")
+ .field("analyzer", "whitespace")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ } else {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("uncased")
+ .field("type", "string")
+ .field("analyzer", "whitespace")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java
new file mode 100644
index 0000000..d866363
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.multifield.merge;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class JavaMultiFieldMergeTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMergeMultiField() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json");
+ DocumentMapperParser parser = MapperTestUtils.newParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed"), nullValue());
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+ IndexableField f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json");
+ DocumentMapper docMapper2 = parser.parse(mapping);
+
+ DocumentMapper.MergeResult mergeResult = docMapper.merge(docMapper2, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper2, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3"), nullValue());
+
+ json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-data.json"));
+ doc = docMapper.parse(json).rootDoc();
+ f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, notNullValue());
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json");
+ DocumentMapper docMapper3 = parser.parse(mapping);
+
+ mergeResult = docMapper.merge(docMapper3, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper3, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3"), nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json");
+ DocumentMapper docMapper4 = parser.parse(mapping);
+
+
+ mergeResult = docMapper.merge(docMapper4, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper4, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3").mapper(), notNullValue());
+ }
+
+ @Test
+ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json");
+ DocumentMapperParser parser = MapperTestUtils.newParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed"), nullValue());
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+ IndexableField f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json");
+ DocumentMapper docMapper2 = parser.parse(mapping);
+
+ DocumentMapper.MergeResult mergeResult = docMapper.merge(docMapper2, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper2, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3"), nullValue());
+
+ json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-data.json"));
+ doc = docMapper.parse(json).rootDoc();
+ f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, notNullValue());
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json");
+ DocumentMapper docMapper3 = parser.parse(mapping);
+
+ mergeResult = docMapper.merge(docMapper3, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper3, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3"), nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json");
+ DocumentMapper docMapper4 = parser.parse(mapping);
+ mergeResult = docMapper.merge(docMapper4, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(true));
+ assertThat(mergeResult.conflicts()[0], equalTo("mapper [name] has different index values"));
+ assertThat(mergeResult.conflicts()[1], equalTo("mapper [name] has different store values"));
+
+ mergeResult = docMapper.merge(docMapper4, mergeFlags().simulate(false));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(true));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(mergeResult.conflicts()[0], equalTo("mapper [name] has different index values"));
+ assertThat(mergeResult.conflicts()[1], equalTo("mapper [name] has different store values"));
+
+ // There are conflicts, but the `name.not_indexed3` has been added, b/c that field has no conflicts
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3").mapper(), notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json
new file mode 100644
index 0000000..c539fcc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json
@@ -0,0 +1,4 @@
+{
+ _id:1,
+ name:"some name"
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json
new file mode 100644
index 0000000..61f08af
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json
@@ -0,0 +1,11 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json
new file mode 100644
index 0000000..02ce895
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json
@@ -0,0 +1,27 @@
+{
+ "person" :{
+ "properties" :{
+ "name":{
+ "type" :"string",
+ "index" :"analyzed",
+ "store" :"yes",
+ "fields":{
+ "name":{
+ "type" :"string",
+ "index" :"analyzed",
+ "store" :"yes"
+ },
+ "indexed":{
+ "type" :"string",
+ "index" :"analyzed"
+ },
+ "not_indexed":{
+ "type" :"string",
+ "index" :"no",
+ "store" :"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json
new file mode 100644
index 0000000..ea07675
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json
@@ -0,0 +1,32 @@
+{
+ "person" : {
+ "properties" :{
+ "name" : {
+ "type" : "string",
+ "index" : "analyzed",
+ "store" : "yes",
+ "fields": {
+ "name" : {
+ "type" : "string",
+ "index" : "analyzed",
+ "store" : "yes"
+ },
+ "indexed":{
+ type:"string",
+ index:"analyzed"
+ },
+ "not_indexed":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ },
+ "not_indexed2":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json
new file mode 100644
index 0000000..384c263
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json
@@ -0,0 +1,18 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes",
+ "fields":{
+ "not_indexed3":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json
new file mode 100644
index 0000000..6206592
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json
@@ -0,0 +1,25 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"multi_field",
+ "fields":{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes"
+ },
+ "indexed":{
+ type:"string",
+ index:"analyzed"
+ },
+ "not_indexed":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json
new file mode 100644
index 0000000..4a8fbf6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json
@@ -0,0 +1,30 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"multi_field",
+ "fields":{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes"
+ },
+ "indexed":{
+ type:"string",
+ index:"analyzed"
+ },
+ "not_indexed":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ },
+ "not_indexed2":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json
new file mode 100644
index 0000000..9b30978
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json
@@ -0,0 +1,16 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"multi_field",
+ "fields":{
+ "not_indexed3":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json
new file mode 100644
index 0000000..35f9418
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json
@@ -0,0 +1,8 @@
+{
+ "_id":1,
+ "age":28,
+ "name":"some name",
+ "object1":{
+ "multi1":"2010-01-01"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json
new file mode 100644
index 0000000..d36e9d2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json
@@ -0,0 +1,30 @@
+{
+ "type":{
+ "properties":{
+ "a":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"completion"
+ }
+ }
+ },
+ "b":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"completion"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json
new file mode 100644
index 0000000..c7d11be
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json
@@ -0,0 +1,30 @@
+{
+ "type":{
+ "properties":{
+ "a":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"geo_point"
+ }
+ }
+ },
+ "b":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"geo_point"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json
new file mode 100644
index 0000000..99b74c0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json
@@ -0,0 +1,32 @@
+{
+ "person": {
+ "properties": {
+ "name": {
+ "type": "multi_field",
+ "fields": {
+ "indexed": {
+ "type": "string",
+ "index": "analyzed"
+ },
+ "not_indexed": {
+ "type": "string",
+ "index": "no",
+ "store": "yes"
+ }
+ }
+ },
+ "age": {
+ "type": "multi_field",
+ "fields": {
+ "not_stored": {
+ "type": "long"
+ },
+ "stored": {
+ "type": "long",
+ "store": "yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json
new file mode 100644
index 0000000..b099b9a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json
@@ -0,0 +1,55 @@
+{
+ "person":{
+ "properties":{
+ "name":{
+ "type":"multi_field",
+ "fields":{
+ "name":{
+ "type":"string",
+ "index":"analyzed",
+ "store":"yes"
+ },
+ "indexed":{
+ "type":"string",
+ "index":"analyzed"
+ },
+ "not_indexed":{
+ "type":"string",
+ "index":"no",
+ "store":"yes"
+ },
+ "test1" : {
+ "type":"string",
+ "index":"analyzed",
+ "store" : "yes",
+ "fielddata" : {
+ "loading" : "eager"
+ }
+ },
+ "test2" : {
+ "type" : "token_count",
+ "store" : "yes",
+ "index" : "not_analyzed",
+ "analyzer" : "simple"
+ }
+ }
+ },
+ "object1":{
+ "properties":{
+ "multi1":{
+ "type":"multi_field",
+ "fields":{
+ "multi1":{
+ "type":"date"
+ },
+ "string":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json
new file mode 100644
index 0000000..b116665
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json
@@ -0,0 +1,50 @@
+{
+ "person": {
+ "properties": {
+ "name": {
+ "type": "string",
+ "index": "analyzed",
+ "store": "yes",
+ "fields": {
+ "indexed": {
+ "type": "string",
+ "index": "analyzed",
+ "store": "no"
+ },
+ "not_indexed": {
+ "type": "string",
+ "index": "no",
+ "store": "yes"
+ },
+ "test1": {
+ "type": "string",
+ "index": "analyzed",
+ "store": "yes",
+ "fielddata": {
+ "loading": "eager"
+ }
+ },
+ "test2": {
+ "type": "token_count",
+ "index": "not_analyzed",
+ "store": "yes",
+ "analyzer": "simple"
+ }
+ }
+ },
+ "object1": {
+ "properties": {
+ "multi1": {
+ "type": "date",
+ "fields": {
+ "string": {
+ "type": "string",
+ "index": "not_analyzed"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java
new file mode 100644
index 0000000..6596463
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java
@@ -0,0 +1,318 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.nested;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class NestedMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void emptyNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .nullField("nested1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(1));
+
+ doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested").endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(1));
+ }
+
+ @Test
+ public void singleNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startObject("nested1").field("field1", "1").field("field2", "2").endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(2));
+ assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
+ assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2"));
+
+ assertThat(doc.docs().get(1).get("field"), equalTo("value"));
+
+
+ doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").field("field2", "2").endObject()
+ .startObject().field("field1", "3").field("field2", "4").endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(3));
+ assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
+ assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("3"));
+ assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("4"));
+ assertThat(doc.docs().get(1).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
+ assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("2"));
+
+ assertThat(doc.docs().get(2).get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void multiNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested")
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(6).get("nested1.nested2.field2"), nullValue());
+ }
+
+ @Test
+ public void multiObjectAndNested1() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested").field("include_in_parent", true)
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(6).get("nested1.nested2.field2"), nullValue());
+ }
+
+ @Test
+ public void multiObjectAndNested2() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").field("include_in_parent", true).startObject("properties")
+ .startObject("nested2").field("type", "nested").field("include_in_parent", true)
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).getFields("nested1.field1").length, equalTo(2));
+ assertThat(doc.docs().get(6).getFields("nested1.nested2.field2").length, equalTo(4));
+ }
+
+ @Test
+ public void multiRootAndNested1() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested").field("include_in_root", true)
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(6).getFields("nested1.nested2.field2").length, equalTo(4));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java b/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java
new file mode 100644
index 0000000..541a05d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.numeric;
+
+import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.index.mapper.string.SimpleStringMappingTests;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SimpleNumericTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNumericDetectionEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("numeric_detection", true)
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("s_long", "100")
+ .field("s_double", "100.0")
+ .endObject()
+ .bytes());
+
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("s_long");
+ assertThat(mapper, instanceOf(LongFieldMapper.class));
+
+ mapper = defaultMapper.mappers().smartNameFieldMapper("s_double");
+ assertThat(mapper, instanceOf(DoubleFieldMapper.class));
+ }
+
+ @Test
+ public void testNumericDetectionDefault() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("s_long", "100")
+ .field("s_double", "100.0")
+ .endObject()
+ .bytes());
+
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("s_long");
+ assertThat(mapper, instanceOf(StringFieldMapper.class));
+
+ mapper = defaultMapper.mappers().smartNameFieldMapper("s_double");
+ assertThat(mapper, instanceOf(StringFieldMapper.class));
+ }
+
+ @Test
+ public void testIgnoreMalformedOption() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("ignore_malformed", true).endObject()
+ .startObject("field2").field("type", "integer").field("ignore_malformed", false).endObject()
+ .startObject("field3").field("type", "integer").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "a")
+ .field("field2", "1")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field1"), nullValue());
+ assertThat(doc.rootDoc().getField("field2"), notNullValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+
+ // Verify that the default is false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+
+ // Unless the global ignore_malformed option is set to true
+ Settings indexSettings = settingsBuilder().put("index.mapping.ignore_malformed", true).build();
+ defaultMapper = MapperTestUtils.newParser(indexSettings).parse(mapping);
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field3"), nullValue());
+
+ // This should still throw an exception, since field2 is specifically set to ignore_malformed=false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+ }
+
+ @Test
+ public void testCoerceOption() throws Exception {
+ String [] nonFractionNumericFieldTypes={"integer","long","short"};
+ //Test co-ercion policies on all non-fraction numerics
+ for (String nonFractionNumericFieldType : nonFractionNumericFieldTypes) {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("noErrorNoCoerceField").field("type", nonFractionNumericFieldType).field("ignore_malformed", true)
+ .field("coerce", false).endObject()
+ .startObject("noErrorCoerceField").field("type", nonFractionNumericFieldType).field("ignore_malformed", true)
+ .field("coerce", true).endObject()
+ .startObject("errorDefaultCoerce").field("type", nonFractionNumericFieldType).field("ignore_malformed", false).endObject()
+ .startObject("errorNoCoerce").field("type", nonFractionNumericFieldType).field("ignore_malformed", false)
+ .field("coerce", false).endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ //Test numbers passed as strings
+ String invalidJsonNumberAsString="1";
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", invalidJsonNumberAsString)
+ .field("noErrorCoerceField", invalidJsonNumberAsString)
+ .field("errorDefaultCoerce", invalidJsonNumberAsString)
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("noErrorNoCoerceField"), nullValue());
+ assertThat(doc.rootDoc().getField("noErrorCoerceField"), notNullValue());
+ //Default is ignore_malformed=true and coerce=true
+ assertThat(doc.rootDoc().getField("errorDefaultCoerce"), notNullValue());
+
+ //Test valid case of numbers passed as numbers
+ int validNumber=1;
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", validNumber)
+ .field("noErrorCoerceField", validNumber)
+ .field("errorDefaultCoerce", validNumber)
+ .endObject()
+ .bytes());
+ assertEquals(validNumber,doc.rootDoc().getField("noErrorNoCoerceField").numericValue().intValue());
+ assertEquals(validNumber,doc.rootDoc().getField("noErrorCoerceField").numericValue().intValue());
+ assertEquals(validNumber,doc.rootDoc().getField("errorDefaultCoerce").numericValue().intValue());
+
+ //Test valid case of negative numbers passed as numbers
+ int validNegativeNumber=-1;
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", validNegativeNumber)
+ .field("noErrorCoerceField", validNegativeNumber)
+ .field("errorDefaultCoerce", validNegativeNumber)
+ .endObject()
+ .bytes());
+ assertEquals(validNegativeNumber,doc.rootDoc().getField("noErrorNoCoerceField").numericValue().intValue());
+ assertEquals(validNegativeNumber,doc.rootDoc().getField("noErrorCoerceField").numericValue().intValue());
+ assertEquals(validNegativeNumber,doc.rootDoc().getField("errorDefaultCoerce").numericValue().intValue());
+
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("errorNoCoerce", invalidJsonNumberAsString)
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+
+
+ //Test questionable case of floats passed to ints
+ float invalidJsonForInteger=1.9f;
+ int coercedFloatValue=1; //This is what the JSON parser will do to a float - truncate not round
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", invalidJsonForInteger)
+ .field("noErrorCoerceField", invalidJsonForInteger)
+ .field("errorDefaultCoerce", invalidJsonForInteger)
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("noErrorNoCoerceField"), nullValue());
+ assertEquals(coercedFloatValue,doc.rootDoc().getField("noErrorCoerceField").numericValue().intValue());
+ //Default is ignore_malformed=true and coerce=true
+ assertEquals(coercedFloatValue,doc.rootDoc().getField("errorDefaultCoerce").numericValue().intValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("errorNoCoerce", invalidJsonForInteger)
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+ }
+ }
+
+
+ public void testDocValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("int")
+ .field("type", "integer")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("double")
+ .field("type", "double")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("int", "1234")
+ .field("double", "1234")
+ .endObject()
+ .bytes());
+ final Document doc = parsedDoc.rootDoc();
+ assertEquals(DocValuesType.BINARY, SimpleStringMappingTests.docValuesType(doc, "int"));
+ assertEquals(DocValuesType.BINARY, SimpleStringMappingTests.docValuesType(doc, "double"));
+ }
+
+ public void testDocValuesOnNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("nested")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("int")
+ .field("type", "integer")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("double")
+ .field("type", "double")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("nested")
+ .startObject()
+ .field("int", "1234")
+ .field("double", "1234")
+ .endObject()
+ .startObject()
+ .field("int", "-1")
+ .field("double", "-2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+ for (Document doc : parsedDoc.docs()) {
+ if (doc == parsedDoc.rootDoc()) {
+ continue;
+ }
+ assertEquals(DocValuesType.BINARY, SimpleStringMappingTests.docValuesType(doc, "nested.int"));
+ assertEquals(DocValuesType.BINARY, SimpleStringMappingTests.docValuesType(doc, "nested.double"));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java
new file mode 100644
index 0000000..4c732b7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class NullValueObjectMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNullValueObject() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("obj1").field("type", "object").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("obj1").endObject()
+ .field("value1", "test1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("value1"), equalTo("test1"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .nullField("obj1")
+ .field("value1", "test1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("value1"), equalTo("test1"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("obj1").field("field", "value").endObject()
+ .field("value1", "test1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("obj1.field"), equalTo("value"));
+ assertThat(doc.rootDoc().get("value1"), equalTo("test1"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java
new file mode 100644
index 0000000..b94a30c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+/**
+ */
+public class SimpleObjectMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDifferentInnerObjectTokenFailure() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ try {
+ defaultMapper.parse("type", "1", new BytesArray(" {\n" +
+ " \"object\": {\n" +
+ " \"array\":[\n" +
+ " {\n" +
+ " \"object\": { \"value\": \"value\" }\n" +
+ " },\n" +
+ " {\n" +
+ " \"object\":\"value\"\n" +
+ " }\n" +
+ " ]\n" +
+ " },\n" +
+ " \"value\":\"value\"\n" +
+ " }"));
+ fail();
+ } catch (MapperParsingException e) {
+ // all is well
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java
new file mode 100644
index 0000000..04497a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.parent;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class ParentMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void parentNotMapped() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_parent", "1122")
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1"));
+
+ // no _parent mapping, used as a simple field
+ assertThat(doc.parent(), nullValue());
+ assertThat(doc.rootDoc().get("_parent"), nullValue());
+ }
+
+ @Test
+ public void parentSetInDocNotExternally() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_parent").field("type", "p_type").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_parent", "1122")
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1"));
+
+ assertThat(doc.parent(), equalTo("1122"));
+ assertThat(doc.rootDoc().get("_parent"), equalTo(Uid.createUid("p_type", "1122")));
+ }
+
+ @Test
+ public void parentNotSetInDocSetExternally() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_parent").field("type", "p_type").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1").parent("1122"));
+
+ assertThat(doc.rootDoc().get("_parent"), equalTo(Uid.createUid("p_type", "1122")));
+ }
+
+ @Test
+ public void parentSetInDocSetExternally() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_parent").field("type", "p_type").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_parent", "1122")
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1").parent("1122"));
+
+ assertThat(doc.rootDoc().get("_parent"), equalTo(Uid.createUid("p_type", "1122")));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java
new file mode 100644
index 0000000..e05f7cd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.path;
+
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PathMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPathMapping() throws IOException {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/path/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.mappers().indexName("first1"), notNullValue());
+ assertThat(docMapper.mappers().indexName("name1.first1"), nullValue());
+ assertThat(docMapper.mappers().indexName("last1"), nullValue());
+ assertThat(docMapper.mappers().indexName("i_last_1"), notNullValue());
+ assertThat(docMapper.mappers().indexName("name1.last1"), nullValue());
+ assertThat(docMapper.mappers().indexName("name1.i_last_1"), nullValue());
+
+ assertThat(docMapper.mappers().indexName("first2"), nullValue());
+ assertThat(docMapper.mappers().indexName("name2.first2"), notNullValue());
+ assertThat(docMapper.mappers().indexName("last2"), nullValue());
+ assertThat(docMapper.mappers().indexName("i_last_2"), nullValue());
+ assertThat(docMapper.mappers().indexName("name2.i_last_2"), notNullValue());
+ assertThat(docMapper.mappers().indexName("name2.last2"), nullValue());
+
+ // test full name
+ assertThat(docMapper.mappers().fullName("first1"), nullValue());
+ assertThat(docMapper.mappers().fullName("name1.first1"), notNullValue());
+ assertThat(docMapper.mappers().fullName("last1"), nullValue());
+ assertThat(docMapper.mappers().fullName("i_last_1"), nullValue());
+ assertThat(docMapper.mappers().fullName("name1.last1"), notNullValue());
+ assertThat(docMapper.mappers().fullName("name1.i_last_1"), nullValue());
+
+ assertThat(docMapper.mappers().fullName("first2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name2.first2"), notNullValue());
+ assertThat(docMapper.mappers().fullName("last2"), nullValue());
+ assertThat(docMapper.mappers().fullName("i_last_2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name2.i_last_2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name2.last2"), notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json
new file mode 100644
index 0000000..200a451
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json
@@ -0,0 +1,32 @@
+{
+ "person":{
+ "properties":{
+ "name1":{
+ "type":"object",
+ "path":"just_name",
+ "properties":{
+ "first1":{
+ "type":"string"
+ },
+ "last1":{
+ "type":"string",
+ "index_name":"i_last_1"
+ }
+ }
+ },
+ "name2":{
+ "type":"object",
+ "path":"full",
+ "properties":{
+ "first2":{
+ "type":"string"
+ },
+ "last2":{
+ "type":"string",
+ "index_name":"i_last_2"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java
new file mode 100644
index 0000000..1793256
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.routing;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class RoutingTypeMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleRoutingMapperTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes()).type("type").id("1").routing("routing_value"));
+
+ assertThat(doc.rootDoc().get("_routing"), equalTo("routing_value"));
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void testSetValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_routing")
+ .field("store", "no")
+ .field("index", "no")
+ .field("path", "route")
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.routingFieldMapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.routingFieldMapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.routingFieldMapper().path(), equalTo("route"));
+ }
+
+ @Test
+ public void testThatSerializationWorksCorrectlyForIndexField() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_routing").field("store", "no").field("index", "no").endObject()
+ .endObject().endObject().string();
+ DocumentMapper enabledMapper = MapperTestUtils.newParser().parse(enabledMapping);
+
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ enabledMapper.routingFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ assertThat(serializedMap, hasKey("_routing"));
+ assertThat(serializedMap.get("_routing"), instanceOf(Map.class));
+ Map<String, Object> routingConfiguration = (Map<String, Object>) serializedMap.get("_routing");
+ assertThat(routingConfiguration, hasKey("store"));
+ assertThat(routingConfiguration.get("store").toString(), is("false"));
+ assertThat(routingConfiguration, hasKey("index"));
+ assertThat(routingConfiguration.get("index").toString(), is("no"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java
new file mode 100644
index 0000000..79951e4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.simple;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.index.mapper.MapperBuilders.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleMapper() throws Exception {
+ DocumentMapperParser mapperParser = MapperTestUtils.newParser();
+ DocumentMapper docMapper = doc("test",
+ rootObject("person")
+ .add(object("name").add(stringField("first").store(true).index(false)))
+ ).build(mapperParser);
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+ assertThat(docMapper.mappers().name("first").mapper().names().fullName(), equalTo("name.first"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ doc = docMapper.parse(json).rootDoc();
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testParseToJsonAndParse() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+// System.out.println(builtMapping);
+ // reparse it
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
+ Document doc = builtDocMapper.parse(json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testSimpleParser() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat((String) docMapper.meta().get("param1"), equalTo("value1"));
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testSimpleParserNoTypeNoId() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1-notype-noid.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testAttributes() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat((String) docMapper.meta().get("param1"), equalTo("value1"));
+
+ String builtMapping = docMapper.mappingSource().string();
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+ assertThat((String) builtDocMapper.meta().get("param1"), equalTo("value1"));
+ }
+
+ @Test
+ public void testNoDocumentSent() throws Exception {
+ DocumentMapperParser mapperParser = MapperTestUtils.newParser();
+ DocumentMapper docMapper = doc("test",
+ rootObject("person")
+ .add(object("name").add(stringField("first").store(true).index(false)))
+ ).build(mapperParser);
+
+ BytesReference json = new BytesArray("".getBytes(Charsets.UTF_8));
+ try {
+ docMapper.parse("person", "1", json).rootDoc();
+ fail("this point is never reached");
+ } catch (MapperParsingException e) {
+ assertThat(e.getMessage(), equalTo("failed to parse, document is empty"));
+ }
+ }
+
+ @Test
+ public void testTypeWrapperWithSetting() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.mapping.allow_type_wrapper", true).build();
+ DocumentMapper docMapper = MapperTestUtils.newParser(settings).parse(mapping);
+
+ assertThat((String) docMapper.meta().get("param1"), equalTo("value1"));
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1-withtype.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json
new file mode 100644
index 0000000..d45d557
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json
@@ -0,0 +1,98 @@
+{
+ person:{
+ "_meta":{
+ "param1":"value1"
+ },
+ date_formats:["yyyy-MM-dd", "dd-MM-yyyy"],
+ dynamic:false,
+ enabled:true,
+ _id:{
+ name:"_id",
+ index_name:"_id"
+ },
+ _source:{
+ name:"_source"
+ },
+ _type:{
+ name:"_type"
+ },
+ _boost:{
+ name:"_boost",
+ null_value:2.0
+ },
+ properties:{
+ name:{
+ type:"object",
+ dynamic:false,
+ properties:{
+ first:{
+ type:"string",
+ store:"yes"
+ },
+ last:{
+ type:"string",
+ index:"not_analyzed"
+ }
+ }
+ },
+ address:{
+ type:"object",
+ properties:{
+ first:{
+ properties:{
+ location:{
+ type:"string",
+ store:"yes",
+ index_name:"firstLocation"
+ }
+ }
+ },
+ last:{
+ properties:{
+ location:{
+ type:"string"
+ }
+ }
+ }
+ }
+ },
+ age:{
+ type:"integer",
+ null_value:0
+ },
+ birthdate:{
+ type:"date",
+ format:"yyyy-MM-dd"
+ },
+ nerd:{
+ type:"boolean"
+ },
+ dogs:{
+ type:"string",
+ index_name:"dog"
+ },
+ complex:{
+ type:"object",
+ properties:{
+ value1:{
+ type:"string"
+ },
+ value2:{
+ type:"string"
+ }
+ }
+ },
+ complex2:{
+ type:"object",
+ properties:{
+ value1:{
+ type:"string"
+ },
+ value2:{
+ type:"string"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json b/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json
new file mode 100644
index 0000000..745617a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json
@@ -0,0 +1,40 @@
+{
+ _boost:3.7,
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json b/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json
new file mode 100644
index 0000000..7f38d0d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json
@@ -0,0 +1,41 @@
+{
+ _boost:3.7,
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json b/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json
new file mode 100644
index 0000000..096554a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json
@@ -0,0 +1,43 @@
+{
+ person:{
+ _boost:3.7,
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test1.json b/src/test/java/org/elasticsearch/index/mapper/simple/test1.json
new file mode 100644
index 0000000..93507da
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test1.json
@@ -0,0 +1,41 @@
+{
+ _boost:3.7,
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java
new file mode 100644
index 0000000..bb39993
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.size;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+public class SizeMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSizeEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size").fieldType().stored(), equalTo(false));
+ assertThat(doc.rootDoc().getField("_size").tokenStream(docMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testSizeEnabledAndStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size").fieldType().stored(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_size").tokenStream(docMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testSizeDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size"), nullValue());
+ }
+
+ @Test
+ public void testSizeNotSet() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size"), nullValue());
+ }
+
+ @Test
+ public void testThatDisablingWorksWhenMerging() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapper enabledMapper = MapperTestUtils.newParser().parse(enabledMapping);
+
+ String disabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper disabledMapper = MapperTestUtils.newParser().parse(disabledMapping);
+
+ enabledMapper.merge(disabledMapper, DocumentMapper.MergeFlags.mergeFlags().simulate(false));
+ assertThat(enabledMapper.SizeFieldMapper().enabled(), is(false));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java
new file mode 100644
index 0000000..e108122
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.source;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class CompressSourceMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCompressDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("compress", false).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+ BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(false));
+ }
+
+ @Test
+ public void testCompressEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("compress", true).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(true));
+ }
+
+ @Test
+ public void testCompressThreshold() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("compress_threshold", "200b").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .endObject().bytes());
+
+ BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(false));
+
+ doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .endObject().bytes());
+
+ bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java
new file mode 100644
index 0000000..2a21f48
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.source;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class DefaultSourceMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNoFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
+
+ documentMapper = MapperTestUtils.newParser().parse(mapping);
+ doc = documentMapper.parse("type", "1", XContentFactory.smileBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE));
+ }
+
+ @Test
+ public void testJsonFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("format", "json").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
+
+ documentMapper = MapperTestUtils.newParser().parse(mapping);
+ doc = documentMapper.parse("type", "1", XContentFactory.smileBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
+ }
+
+ @Test
+ public void testJsonFormatCompressed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("format", "json").field("compress", true).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true));
+ byte[] uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes();
+ assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON));
+
+ documentMapper = MapperTestUtils.newParser().parse(mapping);
+ doc = documentMapper.parse("type", "1", XContentFactory.smileBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true));
+ uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes();
+ assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON));
+ }
+
+ @Test
+ public void testIncludeExclude() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("includes", new String[]{"path1*"}).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").field("field1", "value1").endObject()
+ .startObject("path2").field("field2", "value2").endObject()
+ .endObject().bytes());
+
+ IndexableField sourceField = doc.rootDoc().getField("_source");
+ Map<String, Object> sourceAsMap = XContentFactory.xContent(XContentType.JSON).createParser(new BytesArray(sourceField.binaryValue())).mapAndClose();
+ assertThat(sourceAsMap.containsKey("path1"), equalTo(true));
+ assertThat(sourceAsMap.containsKey("path2"), equalTo(false));
+ }
+
+ @Test
+ public void testDefaultMappingAndNoMapping() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse("my_type", null, defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ try {
+ mapper = MapperTestUtils.newParser().parse(null, null, defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ fail();
+ } catch (MapperParsingException e) {
+ // all is well
+ }
+ try {
+ mapper = MapperTestUtils.newParser().parse(null, "{}", defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ fail();
+ } catch (MapperParsingException e) {
+ assertThat(e.getMessage(), equalTo("malformed mapping no root object found"));
+ // all is well
+ }
+ }
+
+ @Test
+ public void testDefaultMappingAndWithMappingOverride() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type")
+ .startObject("_source").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse("my_type", mapping, defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(true));
+ }
+
+ @Test
+ public void testDefaultMappingAndNoMappingWithMapperService() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ MapperService mapperService = MapperTestUtils.newMapperService();
+ mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true);
+
+ DocumentMapper mapper = mapperService.documentMapperWithAutoCreate("my_type");
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ }
+
+ @Test
+ public void testDefaultMappingAndWithMappingOverrideWithMapperService() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ MapperService mapperService = MapperTestUtils.newMapperService();
+ mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true);
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type")
+ .startObject("_source").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ mapperService.merge("my_type", new CompressedString(mapping), true);
+
+ DocumentMapper mapper = mapperService.documentMapper("my_type");
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java
new file mode 100644
index 0000000..b10ef46
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java
@@ -0,0 +1,322 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.string;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SimpleStringMappingTests extends ElasticsearchTestCase {
+
+ private static Settings DOC_VALUES_SETTINGS = ImmutableSettings.builder().put(FieldDataType.FORMAT_KEY, FieldDataType.DOC_VALUES_FORMAT_VALUE).build();
+
+ @Test
+ public void testLimit() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("ignore_above", 5).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field"), notNullValue());
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "12345")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field"), notNullValue());
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "123456")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field"), nullValue());
+ }
+
+ private void assertDefaultAnalyzedFieldType(IndexableFieldType fieldType) {
+ assertThat(fieldType.omitNorms(), equalTo(false));
+ assertThat(fieldType.indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS));
+ assertThat(fieldType.storeTermVectors(), equalTo(false));
+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
+ }
+
+ private void assertEquals(IndexableFieldType ft1, IndexableFieldType ft2) {
+ assertEquals(ft1.indexed(), ft2.indexed());
+ assertEquals(ft1.tokenized(), ft2.tokenized());
+ assertEquals(ft1.omitNorms(), ft2.omitNorms());
+ assertEquals(ft1.indexOptions(), ft2.indexOptions());
+ assertEquals(ft1.storeTermVectors(), ft2.storeTermVectors());
+ assertEquals(ft1.docValueType(), ft2.docValueType());
+ }
+
+ private void assertParseIdemPotent(IndexableFieldType expected, DocumentMapper mapper) throws Exception {
+ String mapping = mapper.toXContent(XContentFactory.jsonBuilder().startObject(), new ToXContent.MapParams(ImmutableMap.<String, String>of())).endObject().string();
+ mapper = MapperTestUtils.newParser().parse(mapping);
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "2345")
+ .endObject()
+ .bytes());
+ assertEquals(expected, doc.rootDoc().getField("field").fieldType());
+ }
+
+ @Test
+ public void testDefaultsForAnalyzed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ IndexableFieldType fieldType = doc.rootDoc().getField("field").fieldType();
+ assertDefaultAnalyzedFieldType(fieldType);
+ assertParseIdemPotent(fieldType, defaultMapper);
+ }
+
+ @Test
+ public void testDefaultsForNotAnalyzed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ IndexableFieldType fieldType = doc.rootDoc().getField("field").fieldType();
+ assertThat(fieldType.omitNorms(), equalTo(true));
+ assertThat(fieldType.indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_ONLY));
+ assertThat(fieldType.storeTermVectors(), equalTo(false));
+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
+ assertParseIdemPotent(fieldType, defaultMapper);
+
+ // now test it explicitly set
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").startObject("norms").field("enabled", true).endObject().field("index_options", "freqs").endObject().endObject()
+ .endObject().endObject().string();
+
+ defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ fieldType = doc.rootDoc().getField("field").fieldType();
+ assertThat(fieldType.omitNorms(), equalTo(false));
+ assertThat(fieldType.indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_AND_FREQS));
+ assertThat(fieldType.storeTermVectors(), equalTo(false));
+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
+ assertParseIdemPotent(fieldType, defaultMapper);
+
+ // also test the deprecated omit_norms
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ fieldType = doc.rootDoc().getField("field").fieldType();
+ assertThat(fieldType.omitNorms(), equalTo(false));
+ assertParseIdemPotent(fieldType, defaultMapper);
+ }
+
+ @Test
+ public void testTermVectors() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "string")
+ .field("term_vector", "no")
+ .endObject()
+ .startObject("field2")
+ .field("type", "string")
+ .field("term_vector", "yes")
+ .endObject()
+ .startObject("field3")
+ .field("type", "string")
+ .field("term_vector", "with_offsets")
+ .endObject()
+ .startObject("field4")
+ .field("type", "string")
+ .field("term_vector", "with_positions")
+ .endObject()
+ .startObject("field5")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets")
+ .endObject()
+ .startObject("field6")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "1234")
+ .field("field2", "1234")
+ .field("field3", "1234")
+ .field("field4", "1234")
+ .field("field5", "1234")
+ .field("field6", "1234")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectors(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorOffsets(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorPositions(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorOffsets(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorPositions(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorOffsets(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorPositions(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorOffsets(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorPositions(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorOffsets(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorPositions(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorOffsets(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPositions(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPayloads(), equalTo(true));
+ }
+
+ public void testDocValues() throws Exception {
+ // doc values only work on non-analyzed content
+ final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
+ try {
+ new StringFieldMapper.Builder("anything").fieldDataSettings(DOC_VALUES_SETTINGS).build(ctx);
+ fail();
+ } catch (Exception e) { /* OK */ }
+ new StringFieldMapper.Builder("anything").tokenized(false).fieldDataSettings(DOC_VALUES_SETTINGS).build(ctx);
+ new StringFieldMapper.Builder("anything").index(false).fieldDataSettings(DOC_VALUES_SETTINGS).build(ctx);
+
+ assertFalse(new StringFieldMapper.Builder("anything").index(false).build(ctx).hasDocValues());
+ assertTrue(new StringFieldMapper.Builder("anything").index(false).fieldDataSettings(DOC_VALUES_SETTINGS).build(ctx).hasDocValues());
+ assertTrue(new StringFieldMapper.Builder("anything").index(false).docValues(true).build(ctx).hasDocValues());
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("str1")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field("format", "fst")
+ .endObject()
+ .endObject()
+ .startObject("str2")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("str1", "1234")
+ .field("str2", "1234")
+ .endObject()
+ .bytes());
+ final Document doc = parsedDoc.rootDoc();
+ assertEquals(null, docValuesType(doc, "str1"));
+ assertEquals(DocValuesType.SORTED_SET, docValuesType(doc, "str2"));
+ }
+
+ public static DocValuesType docValuesType(Document document, String fieldName) {
+ for (IndexableField field : document.getFields(fieldName)) {
+ if (field.fieldType().docValueType() != null) {
+ return field.fieldType().docValueType();
+ }
+ }
+ return null;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java
new file mode 100644
index 0000000..4cac4ce
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.timestamp;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Locale;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class TimestampMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").timestamp(1));
+
+ assertThat(doc.rootDoc().getField("_timestamp"), equalTo(null));
+ }
+
+ @Test
+ public void testEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", "yes").field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").timestamp(1));
+
+ assertThat(doc.rootDoc().getField("_timestamp").fieldType().stored(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_timestamp").fieldType().indexed(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_timestamp").tokenStream(docMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testDefaultValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.timestampFieldMapper().enabled(), equalTo(TimestampFieldMapper.Defaults.ENABLED.enabled));
+ assertThat(docMapper.timestampFieldMapper().fieldType().stored(), equalTo(TimestampFieldMapper.Defaults.FIELD_TYPE.stored()));
+ assertThat(docMapper.timestampFieldMapper().fieldType().indexed(), equalTo(TimestampFieldMapper.Defaults.FIELD_TYPE.indexed()));
+ assertThat(docMapper.timestampFieldMapper().path(), equalTo(null));
+ assertThat(docMapper.timestampFieldMapper().dateTimeFormatter().format(), equalTo(TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT));
+ }
+
+
+ @Test
+ public void testSetValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes").field("store", "yes").field("index", "no")
+ .field("path", "timestamp").field("format", "year")
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.timestampFieldMapper().enabled(), equalTo(true));
+ assertThat(docMapper.timestampFieldMapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.timestampFieldMapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.timestampFieldMapper().path(), equalTo("timestamp"));
+ assertThat(docMapper.timestampFieldMapper().dateTimeFormatter().format(), equalTo("year"));
+ }
+
+ @Test
+ public void testThatDisablingDuringMergeIsWorking() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper enabledMapper = MapperTestUtils.newParser().parse(enabledMapping);
+
+ String disabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper disabledMapper = MapperTestUtils.newParser().parse(disabledMapping);
+
+ enabledMapper.merge(disabledMapper, DocumentMapper.MergeFlags.mergeFlags().simulate(false));
+
+ assertThat(enabledMapper.timestampFieldMapper().enabled(), is(false));
+ }
+
+ @Test
+ public void testThatDisablingFieldMapperDoesNotReturnAnyUselessInfo() throws Exception {
+ boolean inversedStoreSetting = !TimestampFieldMapper.Defaults.FIELD_TYPE.stored();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", false).field("store", inversedStoreSetting).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ mapper.timestampFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+
+ assertThat(builder.string(), is(String.format(Locale.ROOT, "{\"%s\":{}}", TimestampFieldMapper.NAME)));
+ }
+
+ @Test // issue 3174
+ public void testThatSerializationWorksCorrectlyForIndexField() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").field("index", "no").endObject()
+ .endObject().endObject().string();
+ DocumentMapper enabledMapper = MapperTestUtils.newParser().parse(enabledMapping);
+
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ enabledMapper.timestampFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ assertThat(serializedMap, hasKey("_timestamp"));
+ assertThat(serializedMap.get("_timestamp"), instanceOf(Map.class));
+ Map<String, Object> timestampConfiguration = (Map<String, Object>) serializedMap.get("_timestamp");
+ assertThat(timestampConfiguration, hasKey("store"));
+ assertThat(timestampConfiguration.get("store").toString(), is("true"));
+ assertThat(timestampConfiguration, hasKey("index"));
+ assertThat(timestampConfiguration.get("index").toString(), is("no"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
new file mode 100644
index 0000000..73193ba
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.ttl;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class TTLMappingTests extends ElasticsearchTestCase {
+ @Test
+ public void testSimpleDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").ttl(Long.MAX_VALUE));
+
+ assertThat(doc.rootDoc().getField("_ttl"), equalTo(null));
+ }
+
+ @Test
+ public void testEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl").field("enabled", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").ttl(Long.MAX_VALUE));
+
+ assertThat(doc.rootDoc().getField("_ttl").fieldType().stored(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_ttl").fieldType().indexed(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_ttl").tokenStream(docMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testDefaultValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.TTLFieldMapper().enabled(), equalTo(TTLFieldMapper.Defaults.ENABLED_STATE.enabled));
+ assertThat(docMapper.TTLFieldMapper().fieldType().stored(), equalTo(TTLFieldMapper.Defaults.TTL_FIELD_TYPE.stored()));
+ assertThat(docMapper.TTLFieldMapper().fieldType().indexed(), equalTo(TTLFieldMapper.Defaults.TTL_FIELD_TYPE.indexed()));
+ }
+
+
+ @Test
+ public void testSetValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", "yes").field("store", "no").field("index", "no")
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.TTLFieldMapper().enabled(), equalTo(true));
+ assertThat(docMapper.TTLFieldMapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.TTLFieldMapper().fieldType().indexed(), equalTo(false));
+ }
+
+ @Test
+ public void testThatEnablingTTLFieldOnMergeWorks() throws Exception {
+ String mappingWithoutTtl = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ String mappingWithTtl = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", "yes").field("store", "no").field("index", "no")
+ .endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapperWithoutTtl = MapperTestUtils.newParser().parse(mappingWithoutTtl);
+ DocumentMapper mapperWithTtl = MapperTestUtils.newParser().parse(mappingWithTtl);
+
+ DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(false);
+ DocumentMapper.MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl, mergeFlags);
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(mapperWithoutTtl.TTLFieldMapper().enabled(), equalTo(true));
+ }
+
+ @Test
+ public void testThatChangingTTLKeepsMapperEnabled() throws Exception {
+ String mappingWithTtl = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", "yes")
+ .endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("default", "1w")
+ .endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper initialMapper = MapperTestUtils.newParser().parse(mappingWithTtl);
+ DocumentMapper updatedMapper = MapperTestUtils.newParser().parse(updatedMapping);
+
+ DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(false);
+ DocumentMapper.MergeResult mergeResult = initialMapper.merge(updatedMapper, mergeFlags);
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java
new file mode 100644
index 0000000..56d1a96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.typelevels;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ParseDocumentTypeLevelsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNoLevel() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevel() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsValue() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("type", "value_type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsValue() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("type", "value_type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsObject() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ // in this case, we analyze the type object as the actual document, and ignore the other same level fields
+ assertThat(doc.rootDoc().get("type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsObject() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsValueNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .field("type", "value_type")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsValueNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .field("type", "value_type")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsObjectNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("test1", "value1")
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ // when the type is not the first one, we don't confuse it...
+ assertThat(doc.rootDoc().get("type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsObjectNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java
new file mode 100644
index 0000000..e2c7a93
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.typelevels;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ParseMappingTypeLevelTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testTypeLevel() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse("type", mapping);
+ assertThat(mapper.type(), equalTo("type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+
+ mapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(mapper.type(), equalTo("type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java
new file mode 100644
index 0000000..de3456b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.merge.policy;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.distributor.LeastUsedDistributor;
+import org.elasticsearch.index.store.ram.RamDirectoryService;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class MergePolicySettingsTest {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ @Test
+ public void testCompoundFileSettings() throws IOException {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+
+ assertThat(new TieredMergePolicyProvider(createStore(EMPTY_SETTINGS), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(true)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(0.5)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.5));
+ assertThat(new TieredMergePolicyProvider(createStore(build(1.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("true")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("True")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("False")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("false")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(false)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(0.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(EMPTY_SETTINGS), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(true)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(0.5)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.5));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(1.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("true")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("True")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("False")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("false")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(false)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(0.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ assertThat(new LogDocMergePolicyProvider(createStore(EMPTY_SETTINGS), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(true)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(0.5)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.5));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(1.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("true")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("True")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("False")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("false")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(false)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(0.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ }
+
+ @Test
+ public void testInvalidValue() throws IOException {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ try {
+ new LogDocMergePolicyProvider(createStore(build(-0.1)), service).newMergePolicy().getNoCFSRatio();
+ assertThat("exception expected", false);
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+ try {
+ new LogDocMergePolicyProvider(createStore(build(1.1)), service).newMergePolicy().getNoCFSRatio();
+ assertThat("exception expected", false);
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+ try {
+ new LogDocMergePolicyProvider(createStore(build("Falsch")), service).newMergePolicy().getNoCFSRatio();
+ assertThat("exception expected", false);
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+
+ }
+
+ @Test
+ public void testUpdateSettings() throws IOException {
+ {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ TieredMergePolicyProvider mp = new TieredMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ service.refreshSettings(build(1.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+
+ service.refreshSettings(build(0.1));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(0.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ }
+
+ {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ LogByteSizeMergePolicyProvider mp = new LogByteSizeMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ service.refreshSettings(build(1.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+
+ service.refreshSettings(build(0.1));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(0.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ }
+
+ {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ LogDocMergePolicyProvider mp = new LogDocMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ service.refreshSettings(build(1.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+
+ service.refreshSettings(build(0.1));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(0.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ }
+ }
+
+ public Settings build(String value) {
+ return ImmutableSettings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ public Settings build(double value) {
+ return ImmutableSettings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ public Settings build(int value) {
+ return ImmutableSettings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ public Settings build(boolean value) {
+ return ImmutableSettings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ protected Store createStore(Settings settings) throws IOException {
+ DirectoryService directoryService = new RamDirectoryService(shardId, EMPTY_SETTINGS);
+ return new Store(shardId, settings, null, null, directoryService, new LeastUsedDistributor(directoryService));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java
new file mode 100644
index 0000000..b7acdf8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.geo.builders.EnvelopeBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+public class GeoShapeQueryBuilderTests extends ElasticsearchTestCase {
+
+ @Test // see #3878
+ public void testThatXContentSerializationInsideOfArrayWorks() throws Exception {
+ EnvelopeBuilder envelopeBuilder = ShapeBuilder.newEnvelope().topLeft(0, 0).bottomRight(10, 10);
+ GeoShapeQueryBuilder geoQuery = QueryBuilders.geoShapeQuery("searchGeometry", envelopeBuilder);
+ JsonXContent.contentBuilder().startArray().value(geoQuery).endArray();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterCachingTests.java b/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterCachingTests.java
new file mode 100644
index 0000000..1dc979f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterCachingTests.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.lucene.search.CachedFilter;
+import org.elasticsearch.common.lucene.search.NoCacheFilter;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MapperServiceModule;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.search.child.TestSearchContext;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class IndexQueryParserFilterCachingTests extends ElasticsearchTestCase {
+
+ private static Injector injector;
+
+ private static IndexQueryParserService queryParser;
+
+ @BeforeClass
+ public static void setupQueryParser() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.cache.filter.type", "weighted")
+ .build();
+ Index index = new Index("test");
+ injector = new ModulesBuilder().add(
+ new CacheRecyclerModule(settings),
+ new CodecModule(settings),
+ new SettingsModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new MapperServiceModule(),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ new IndexQueryParserModule(settings),
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
+ injector.getInstance(MapperService.class).merge("person", new CompressedString(mapping), true);
+ String childMapping = copyToStringFromClasspath("/org/elasticsearch/index/query/child-mapping.json");
+ injector.getInstance(MapperService.class).merge("child", new CompressedString(childMapping), true);
+ injector.getInstance(MapperService.class).documentMapper("person").parse(new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
+ queryParser = injector.getInstance(IndexQueryParserService.class);
+ }
+
+ @AfterClass
+ public static void close() {
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ queryParser = null;
+ injector = null;
+ }
+
+ private IndexQueryParserService queryParser() throws IOException {
+ return this.queryParser;
+ }
+
+ private BytesRef longToPrefixCoded(long val, int shift) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.longToPrefixCoded(val, shift, bytesRef);
+ return bytesRef;
+ }
+
+
+ @Test
+ public void testNoFilterParsing() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery)parsedQuery).getFilter(), instanceOf(XBooleanFilter.class));
+ assertThat(((XBooleanFilter)((ConstantScoreQuery)parsedQuery).getFilter()).clauses().get(1).getFilter(), instanceOf(NoCacheFilter.class));
+ assertThat(((XBooleanFilter)((ConstantScoreQuery)parsedQuery).getFilter()).clauses().size(), is(2));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery)parsedQuery).getFilter(), instanceOf(XBooleanFilter.class));
+ assertThat(((XBooleanFilter)((ConstantScoreQuery)parsedQuery).getFilter()).clauses().get(1).getFilter(), instanceOf(NoCacheFilter.class));
+ assertThat(((XBooleanFilter)((ConstantScoreQuery)parsedQuery).getFilter()).clauses().size(), is(2));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(XBooleanFilter.class));
+ assertThat(((XBooleanFilter) ((ConstantScoreQuery) parsedQuery).getFilter()).clauses().get(1).getFilter(), instanceOf(NoCacheFilter.class));
+ assertThat(((XBooleanFilter) ((ConstantScoreQuery) parsedQuery).getFilter()).clauses().size(), is(2));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));
+
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/has-child.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(NoCacheFilter.class));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter-cache.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/has-child-in-and-filter-cached.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(AndFilter.class));
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java
new file mode 100644
index 0000000..45dc8cf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java
@@ -0,0 +1,2298 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.*;
+import org.apache.lucene.sandbox.queries.FuzzyLikeThisQuery;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.spans.*;
+import org.apache.lucene.spatial.prefix.IntersectsPrefixTreeFilter;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.lucene.search.*;
+import org.elasticsearch.common.lucene.search.function.BoostScoreFunction;
+import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MapperServiceModule;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.search.geo.GeoDistanceFilter;
+import org.elasticsearch.index.search.geo.GeoPolygonFilter;
+import org.elasticsearch.index.search.geo.InMemoryGeoBoundingBoxFilter;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.hamcrest.Matchers;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.RegexpFlag.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.factorFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
+
+ private static Injector injector;
+
+ private static IndexQueryParserService queryParser;
+
+ @BeforeClass
+ public static void setupQueryParser() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.cache.filter.type", "none")
+ .build();
+ Index index = new Index("test");
+ injector = new ModulesBuilder().add(
+ new CacheRecyclerModule(settings),
+ new CodecModule(settings),
+ new SettingsModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new MapperServiceModule(),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ new IndexQueryParserModule(settings),
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
+ injector.getInstance(MapperService.class).merge("person", new CompressedString(mapping), true);
+ injector.getInstance(MapperService.class).documentMapper("person").parse(new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
+ queryParser = injector.getInstance(IndexQueryParserService.class);
+ }
+
+ @AfterClass
+ public static void close() {
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ queryParser = null;
+ injector = null;
+ }
+
+ private IndexQueryParserService queryParser() throws IOException {
+ return this.queryParser;
+ }
+
+ private BytesRef longToPrefixCoded(long val, int shift) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.longToPrefixCoded(val, shift, bytesRef);
+ return bytesRef;
+ }
+
+ @Test
+ public void testQueryStringBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryString("test").defaultField("content").phraseSlop(1)).query();
+
+ assertThat(parsedQuery, instanceOf(TermQuery.class));
+ TermQuery termQuery = (TermQuery) parsedQuery;
+ assertThat(termQuery.getTerm(), equalTo(new Term("content", "test")));
+ }
+
+ @Test
+ public void testQueryString() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(TermQuery.class));
+ TermQuery termQuery = (TermQuery) parsedQuery;
+ assertThat(termQuery.getTerm(), equalTo(new Term("content", "test")));
+ }
+
+ @Test
+ public void testQueryStringBoostsBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ QueryStringQueryBuilder builder = queryString("field:boosted^2");
+ Query parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) parsedQuery).getTerm(), equalTo(new Term("field", "boosted")));
+ assertThat(parsedQuery.getBoost(), equalTo(2.0f));
+ builder.boost(2.0f);
+ parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery.getBoost(), equalTo(4.0f));
+
+ builder = queryString("((field:boosted^2) AND (field:foo^1.5))^3");
+ parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("field", "boosted")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getBoost(), equalTo(2.0f));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("field", "foo")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getBoost(), equalTo(1.5f));
+ assertThat(parsedQuery.getBoost(), equalTo(3.0f));
+ builder.boost(2.0f);
+ parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery.getBoost(), equalTo(6.0f));
+ }
+
+ @Test
+ public void testQueryStringFields1Builder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryString("test").field("content").field("name").useDisMax(false)).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery bQuery = (BooleanQuery) parsedQuery;
+ assertThat(bQuery.clauses().size(), equalTo(2));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields1() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery bQuery = (BooleanQuery) parsedQuery;
+ assertThat(bQuery.clauses().size(), equalTo(2));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFieldsMatch() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields-match.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery bQuery = (BooleanQuery) parsedQuery;
+ assertThat(bQuery.clauses().size(), equalTo(2));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("name.first", "test")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("name.last", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields2Builder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryString("test").field("content").field("name").useDisMax(true)).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields2() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields3Builder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryString("test").field("content", 2.2f).field("name").useDisMax(true)).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat((double) disjuncts.get(0).getBoost(), closeTo(2.2, 0.01));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ assertThat((double) disjuncts.get(1).getBoost(), closeTo(1, 0.01));
+ }
+
+ @Test
+ public void testQueryStringFields3() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat((double) disjuncts.get(0).getBoost(), closeTo(2.2, 0.01));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ assertThat((double) disjuncts.get(1).getBoost(), closeTo(1, 0.01));
+ }
+
+ @Test
+ public void testMatchAllBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(matchAllQuery().boost(1.2f)).query();
+ assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class));
+ MatchAllDocsQuery matchAllDocsQuery = (MatchAllDocsQuery) parsedQuery;
+ assertThat((double) matchAllDocsQuery.getBoost(), closeTo(1.2, 0.01));
+ }
+
+ @Test
+ public void testMatchAll() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/matchAll.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class));
+ MatchAllDocsQuery matchAllDocsQuery = (MatchAllDocsQuery) parsedQuery;
+ assertThat((double) matchAllDocsQuery.getBoost(), closeTo(1.2, 0.01));
+ }
+
+ @Test
+ public void testMatchAllEmpty1() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match_all_empty1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, equalTo(Queries.newMatchAllQuery()));
+ assertThat(parsedQuery, not(sameInstance(Queries.newMatchAllQuery())));
+ }
+
+ @Test
+ public void testMatchAllEmpty2() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match_all_empty2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, equalTo(Queries.newMatchAllQuery()));
+ assertThat(parsedQuery, not(sameInstance(Queries.newMatchAllQuery())));
+
+ }
+
+ @Test
+ public void testStarColonStar() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/starColonStar.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ Filter internalFilter = constantScoreQuery.getFilter();
+ assertThat(internalFilter, instanceOf(MatchAllDocsFilter.class));
+ }
+
+ @Test
+ public void testDisMaxBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(disMaxQuery().boost(1.2f).tieBreaker(0.7f).add(termQuery("name.first", "first")).add(termQuery("name.last", "last"))).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ assertThat((double) disjunctionMaxQuery.getBoost(), closeTo(1.2, 0.01));
+
+ List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
+ assertThat(disjuncts.size(), equalTo(2));
+
+ Query firstQ = disjuncts.get(0);
+ assertThat(firstQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) firstQ).getTerm(), equalTo(new Term("name.first", "first")));
+
+ Query secondsQ = disjuncts.get(1);
+ assertThat(secondsQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) secondsQ).getTerm(), equalTo(new Term("name.last", "last")));
+ }
+
+ @Test
+ public void testDisMax() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/disMax.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ assertThat((double) disjunctionMaxQuery.getBoost(), closeTo(1.2, 0.01));
+
+ List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
+ assertThat(disjuncts.size(), equalTo(2));
+
+ Query firstQ = disjuncts.get(0);
+ assertThat(firstQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) firstQ).getTerm(), equalTo(new Term("name.first", "first")));
+
+ Query secondsQ = disjuncts.get(1);
+ assertThat(secondsQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) secondsQ).getTerm(), equalTo(new Term("name.last", "last")));
+ }
+
+ @Test
+ public void testDisMax2() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/disMax2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+
+ List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
+ assertThat(disjuncts.size(), equalTo(1));
+
+ PrefixQuery firstQ = (PrefixQuery) disjuncts.get(0);
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(firstQ.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) firstQ.getBoost(), closeTo(1.2, 0.00001));
+ }
+
+ @Test
+ public void testTermQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termQuery("age", 34).buildAsBytes()).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fieldQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fieldQuery.getMin().intValue(), equalTo(34));
+ assertThat(fieldQuery.getMax().intValue(), equalTo(34));
+ assertThat(fieldQuery.includesMax(), equalTo(true));
+ assertThat(fieldQuery.includesMin(), equalTo(true));
+ }
+
+ @Test
+ public void testTermQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fieldQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fieldQuery.getMin().intValue(), equalTo(34));
+ assertThat(fieldQuery.getMax().intValue(), equalTo(34));
+ assertThat(fieldQuery.includesMax(), equalTo(true));
+ assertThat(fieldQuery.includesMin(), equalTo(true));
+ }
+
+ @Test
+ public void testFuzzyQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyQuery("name.first", "sh").buildAsBytes()).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testFuzzyQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzy.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testFuzzyQueryWithFieldsBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyQuery("name.first", "sh").fuzziness(Fuzziness.fromSimilarity(0.1f)).prefixLength(1).boost(2.0f).buildAsBytes()).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ assertThat(fuzzyQuery.getMaxEdits(), equalTo(FuzzyQuery.floatToEdits(0.1f, "sh".length())));
+ assertThat(fuzzyQuery.getPrefixLength(), equalTo(1));
+ assertThat(fuzzyQuery.getBoost(), equalTo(2.0f));
+ }
+
+ @Test
+ public void testFuzzyQueryWithFields() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzy-with-fields.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ assertThat(fuzzyQuery.getMaxEdits(), equalTo(FuzzyQuery.floatToEdits(0.1f, "sh".length())));
+ assertThat(fuzzyQuery.getPrefixLength(), equalTo(1));
+ assertThat(fuzzyQuery.getBoost(), equalTo(2.0f));
+ }
+
+ @Test
+ public void testFuzzyQueryWithFields2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzy-with-fields2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fuzzyQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fuzzyQuery.getMin().longValue(), equalTo(7l));
+ assertThat(fuzzyQuery.getMax().longValue(), equalTo(17l));
+ }
+
+ @Test
+ public void testTermWithBoostQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termQuery("age", 34).boost(2.0f)).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fieldQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fieldQuery.getMin().intValue(), equalTo(34));
+ assertThat(fieldQuery.getMax().intValue(), equalTo(34));
+ assertThat(fieldQuery.includesMax(), equalTo(true));
+ assertThat(fieldQuery.includesMin(), equalTo(true));
+ assertThat((double) fieldQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testTermWithBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-with-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fieldQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fieldQuery.getMin().intValue(), equalTo(34));
+ assertThat(fieldQuery.getMax().intValue(), equalTo(34));
+ assertThat(fieldQuery.includesMax(), equalTo(true));
+ assertThat(fieldQuery.includesMin(), equalTo(true));
+ assertThat((double) fieldQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testPrefixQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(prefixQuery("name.first", "sh")).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) prefixQuery.getBoost(), closeTo(1.2, 0.00001));
+ }
+
+ @Test
+ public void testPrefixFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), prefixFilter("name.first", "sh"))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ PrefixFilter prefixFilter = (PrefixFilter) filteredQuery.getFilter();
+ assertThat(prefixFilter.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ PrefixFilter prefixFilter = (PrefixFilter) filteredQuery.getFilter();
+ assertThat(prefixFilter.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixNamedFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+ PrefixFilter prefixFilter = (PrefixFilter) filteredQuery.getFilter();
+ assertThat(prefixFilter.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixQueryBoostQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(prefixQuery("name.first", "sh").boost(2.0f)).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) prefixQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testPrefixQueryBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-with-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) prefixQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testPrefixQueryWithUnknownField() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(prefixQuery("unknown", "sh")).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("unknown", "sh")));
+ assertThat(prefixQuery.getRewriteMethod(), notNullValue());
+ }
+
+ @Test
+ public void testRegexpQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(regexpQuery("name.first", "s.*y")).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ }
+
+ @Test
+ public void testRegexpQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ }
+
+ @Test
+ public void testRegexpFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(RegexpFilter.class));
+ RegexpFilter regexpFilter = (RegexpFilter) filter;
+ assertThat(regexpFilter.field(), equalTo("name.first"));
+ assertThat(regexpFilter.regexp(), equalTo("s.*y"));
+ }
+
+ @Test
+ public void testNamedRegexpFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery.query()).getFilter();
+ assertThat(filter, instanceOf(RegexpFilter.class));
+ RegexpFilter regexpFilter = (RegexpFilter) filter;
+ assertThat(regexpFilter.field(), equalTo("name.first"));
+ assertThat(regexpFilter.regexp(), equalTo("s.*y"));
+ }
+
+ @Test
+ public void testRegexpWithFlagsFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-flags.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery.query()).getFilter();
+ assertThat(filter, instanceOf(RegexpFilter.class));
+ RegexpFilter regexpFilter = (RegexpFilter) filter;
+ assertThat(regexpFilter.field(), equalTo("name.first"));
+ assertThat(regexpFilter.regexp(), equalTo("s.*y"));
+ assertThat(regexpFilter.flags(), equalTo(INTERSECTION.value() | COMPLEMENT.value() | EMPTY.value()));
+ }
+
+ @Test
+ public void testNamedAndCachedRegexpWithFlagsFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery.query()).getFilter();
+ assertThat(filter, instanceOf(CacheKeyFilter.Wrapper.class));
+ CacheKeyFilter.Wrapper wrapper = (CacheKeyFilter.Wrapper) filter;
+ assertThat(new BytesRef(wrapper.cacheKey().bytes()).utf8ToString(), equalTo("key"));
+ assertThat(wrapper.wrappedFilter(), instanceOf(RegexpFilter.class));
+ RegexpFilter regexpFilter = (RegexpFilter) wrapper.wrappedFilter();
+ assertThat(regexpFilter.field(), equalTo("name.first"));
+ assertThat(regexpFilter.regexp(), equalTo("s.*y"));
+ assertThat(regexpFilter.flags(), equalTo(INTERSECTION.value() | COMPLEMENT.value() | EMPTY.value()));
+ }
+
+ @Test
+ public void testRegexpBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ assertThat(regexpQuery.getBoost(), equalTo(1.2f));
+ }
+
+ @Test
+ public void testWildcardQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(wildcardQuery("name.first", "sh*")).query();
+ assertThat(parsedQuery, instanceOf(WildcardQuery.class));
+ WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery;
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ }
+
+ @Test
+ public void testWildcardQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/wildcard.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(WildcardQuery.class));
+ WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery;
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ }
+
+ @Test
+ public void testWildcardBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/wildcard-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(WildcardQuery.class));
+ WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery;
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ assertThat((double) wildcardQuery.getBoost(), closeTo(1.2, 0.00001));
+ }
+
+ @Test
+ public void testRangeQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(rangeQuery("age").from(23).to(54).includeLower(true).includeUpper(false)).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(rangeQuery.getField(), equalTo("age"));
+ assertThat(rangeQuery.getMin().intValue(), equalTo(23));
+ assertThat(rangeQuery.getMax().intValue(), equalTo(54));
+ assertThat(rangeQuery.includesMin(), equalTo(true));
+ assertThat(rangeQuery.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(rangeQuery.getField(), equalTo("age"));
+ assertThat(rangeQuery.getMin().intValue(), equalTo(23));
+ assertThat(rangeQuery.getMax().intValue(), equalTo(54));
+ assertThat(rangeQuery.includesMin(), equalTo(true));
+ assertThat(rangeQuery.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRange2Query() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(rangeQuery.getField(), equalTo("age"));
+ assertThat(rangeQuery.getMin().intValue(), equalTo(23));
+ assertThat(rangeQuery.getMax().intValue(), equalTo(54));
+ assertThat(rangeQuery.includesMin(), equalTo(true));
+ assertThat(rangeQuery.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), rangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false))).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter rangeFilter = (NumericRangeFilter) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getMin().intValue(), equalTo(23));
+ assertThat(rangeFilter.getMax().intValue(), equalTo(54));
+ assertThat(rangeFilter.includesMin(), equalTo(true));
+ assertThat(rangeFilter.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter rangeFilter = (NumericRangeFilter) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getMin().intValue(), equalTo(23));
+ assertThat(rangeFilter.getMax().intValue(), equalTo(54));
+ assertThat(rangeFilter.includesMin(), equalTo(true));
+ assertThat(rangeFilter.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeNamedFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery.query()).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter rangeFilter = (NumericRangeFilter) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getMin().intValue(), equalTo(23));
+ assertThat(rangeFilter.getMax().intValue(), equalTo(54));
+ assertThat(rangeFilter.includesMin(), equalTo(true));
+ assertThat(rangeFilter.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testNumericRangeFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), numericRangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFieldDataFilter.class));
+ NumericRangeFieldDataFilter<Number> rangeFilter = (NumericRangeFieldDataFilter<Number>) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getLowerVal().intValue(), equalTo(23));
+ assertThat(rangeFilter.getUpperVal().intValue(), equalTo(54));
+ assertThat(rangeFilter.isIncludeLower(), equalTo(true));
+ assertThat(rangeFilter.isIncludeUpper(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeFilteredQueryBuilder_executionFieldData() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), rangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false).setExecution("fielddata"))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFieldDataFilter.class));
+ NumericRangeFieldDataFilter<Number> rangeFilter = (NumericRangeFieldDataFilter<Number>) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getLowerVal().intValue(), equalTo(23));
+ assertThat(rangeFilter.getUpperVal().intValue(), equalTo(54));
+ assertThat(rangeFilter.isIncludeLower(), equalTo(true));
+ assertThat(rangeFilter.isIncludeUpper(), equalTo(false));
+ }
+
+ @Test
+ public void testNumericRangeFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/numeric_range-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFieldDataFilter.class));
+ NumericRangeFieldDataFilter<Number> rangeFilter = (NumericRangeFieldDataFilter<Number>) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getLowerVal().intValue(), equalTo(23));
+ assertThat(rangeFilter.getUpperVal().intValue(), equalTo(54));
+ assertThat(rangeFilter.isIncludeLower(), equalTo(true));
+ assertThat(rangeFilter.isIncludeUpper(), equalTo(false));
+ }
+
+ @Test
+ public void testBoolFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), boolFilter().must(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")).mustNot(termFilter("name.first", "shay2")).should(termFilter("name.first", "shay3")))).query();
+
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ XBooleanFilter booleanFilter = (XBooleanFilter) filteredQuery.getFilter();
+
+ Iterator<FilterClause> iterator = booleanFilter.iterator();
+ assertThat(iterator.hasNext(), equalTo(true));
+ FilterClause clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay4")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay2")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay3")));
+
+ assertThat(iterator.hasNext(), equalTo(false));
+ }
+
+
+ @Test
+ public void testBoolFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ XBooleanFilter booleanFilter = (XBooleanFilter) filteredQuery.getFilter();
+
+ Iterator<FilterClause> iterator = booleanFilter.iterator();
+ assertThat(iterator.hasNext(), equalTo(true));
+ FilterClause clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay4")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay2")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay3")));
+
+ assertThat(iterator.hasNext(), equalTo(false));
+ }
+
+ @Test
+ public void testAndFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), andFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+
+ AndFilter andFilter = (AndFilter) constantScoreQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testAndFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+
+ AndFilter andFilter = (AndFilter) filteredQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testAndNamedFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+
+ AndFilter andFilter = (AndFilter) filteredQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testAndFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+
+ AndFilter andFilter = (AndFilter) filteredQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testOrFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), orFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+
+ OrFilter andFilter = (OrFilter) constantScoreQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testOrFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+
+ OrFilter orFilter = (OrFilter) filteredQuery.getFilter();
+ assertThat(orFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) orFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) orFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testOrFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+
+ OrFilter orFilter = (OrFilter) filteredQuery.getFilter();
+ assertThat(orFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) orFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) orFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testNotFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), notFilter(termFilter("name.first", "shay1")))).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+
+ NotFilter notFilter = (NotFilter) constantScoreQuery.getFilter();
+ assertThat(((TermFilter) notFilter.filter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+ }
+
+ @Test
+ public void testNotFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/not-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+
+ NotFilter notFilter = (NotFilter) filteredQuery.getFilter();
+ assertThat(((TermFilter) notFilter.filter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+ }
+
+ @Test
+ public void testNotFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/not-filter2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+
+ NotFilter notFilter = (NotFilter) filteredQuery.getFilter();
+ assertThat(((TermFilter) notFilter.filter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+ }
+
+ @Test
+ public void testNotFilteredQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/not-filter3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+
+ NotFilter notFilter = (NotFilter) filteredQuery.getFilter();
+ assertThat(((TermFilter) notFilter.filter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+ }
+
+ @Test
+ public void testBoostingQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(boostingQuery().positive(termQuery("field1", "value1")).negative(termQuery("field1", "value2")).negativeBoost(0.2f)).query();
+ assertThat(parsedQuery, instanceOf(BoostingQuery.class));
+ }
+
+ @Test
+ public void testBoostingQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/boosting-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BoostingQuery.class));
+ }
+
+ @Test
+ public void testQueryStringFuzzyNumeric() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fuzzyQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fuzzyQuery.getMin().longValue(), equalTo(12l));
+ assertThat(fuzzyQuery.getMax().longValue(), equalTo(12l));
+ }
+
+ @Test
+ public void testBoolQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(boolQuery().must(termQuery("content", "test1")).must(termQuery("content", "test4")).mustNot(termQuery("content", "test2")).should(termQuery("content", "test3"))).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(4));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("content", "test1")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("content", "test4")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("content", "test2")));
+ assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+
+ assertThat(((TermQuery) clauses[3].getQuery()).getTerm(), equalTo(new Term("content", "test3")));
+ assertThat(clauses[3].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+
+ @Test
+ public void testBoolQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(4));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("content", "test1")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("content", "test4")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("content", "test2")));
+ assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+
+ assertThat(((TermQuery) clauses[3].getQuery()).getTerm(), equalTo(new Term("content", "test3")));
+ assertThat(clauses[3].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testTermsQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termsQuery("name.first", Lists.newArrayList("shay", "test"))).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(2));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("name.first", "test")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testTermsQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/terms-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(2));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("name.first", "test")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testInQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termsQuery("name.first", Lists.newArrayList("test1", "test2", "test3"))).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(3));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("name.first", "test1")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("name.first", "test2")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("name.first", "test3")));
+ assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termFilter("name.last", "banon"))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testFilteredQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+
+ Filter filter = filteredQuery.getFilter();
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter rangeFilter = (NumericRangeFilter) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getMin().intValue(), equalTo(23));
+ assertThat(rangeFilter.getMax().intValue(), equalTo(54));
+ }
+
+ @Test
+ public void testFilteredQuery4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ WildcardQuery wildcardQuery = (WildcardQuery) filteredQuery.getQuery();
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ assertThat((double) wildcardQuery.getBoost(), closeTo(1.1, 0.001));
+
+ assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testLimitFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/limit-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(filteredQuery.getFilter(), instanceOf(LimitFilter.class));
+ assertThat(((LimitFilter) filteredQuery.getFilter()).getLimit(), equalTo(2));
+
+ assertThat(filteredQuery.getQuery(), instanceOf(TermQuery.class));
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ }
+
+ @Test
+ public void testTermFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(filteredQuery.getFilter(), instanceOf(TermFilter.class));
+ TermFilter termFilter = (TermFilter) filteredQuery.getFilter();
+ assertThat(termFilter.getTerm().field(), equalTo("name.last"));
+ assertThat(termFilter.getTerm().text(), equalTo("banon"));
+ }
+
+ @Test
+ public void testTermNamedFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+ assertThat(filteredQuery.getFilter(), instanceOf(TermFilter.class));
+ TermFilter termFilter = (TermFilter) filteredQuery.getFilter();
+ assertThat(termFilter.getTerm().field(), equalTo("name.last"));
+ assertThat(termFilter.getTerm().text(), equalTo("banon"));
+ }
+
+ @Test
+ public void testTermsFilterQueryBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termsFilter("name.last", "banon", "kimchy"))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
+ XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
+ //assertThat(termsFilter.getTerms().length, equalTo(2));
+ //assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
+ }
+
+
+ @Test
+ public void testTermsFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/terms-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
+ XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
+ //assertThat(termsFilter.getTerms().length, equalTo(2));
+ //assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
+ }
+
+ @Test
+ public void testTermsWithNameFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/terms-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+ assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
+ XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
+ //assertThat(termsFilter.getTerms().length, equalTo(2));
+ //assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
+ }
+
+ @Test
+ public void testConstantScoreQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(constantScoreQuery(termFilter("name.last", "banon"))).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ assertThat(((TermFilter) constantScoreQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testConstantScoreQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/constantScore-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ assertThat(((TermFilter) constantScoreQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ // Disabled since we need a current context to execute it...
+// @Test public void testCustomScoreQuery1() throws IOException {
+// IndexQueryParser queryParser = queryParser();
+// String query = copyToStringFromClasspath("/org/elasticsearch/index/query/custom_score1.json");
+// Query parsedQuery = queryParser.parse(query).query();
+// assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+// FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+// assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term("name.last", "banon")));
+// assertThat(functionScoreQuery.getFunction(), instanceOf(CustomScoreQueryParser.ScriptScoreFunction.class));
+// }
+
+ @Test
+ public void testCustomBoostFactorQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(customBoostFactorQuery(termQuery("name.last", "banon")).boostFactor(1.3f)).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term("name.last", "banon")));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+
+ @Test
+ public void testCustomBoostFactorQueryBuilder_withFunctionScore() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(functionScoreQuery(termQuery("name.last", "banon"), factorFunction(1.3f))).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term("name.last", "banon")));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+ @Test
+ public void testCustomBoostFactorQueryBuilder_withFunctionScoreWithoutQueryGiven() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(functionScoreQuery(factorFunction(1.3f))).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(functionScoreQuery.getSubQuery() instanceof XConstantScoreQuery, equalTo(true));
+ assertThat(((XConstantScoreQuery) functionScoreQuery.getSubQuery()).getFilter() instanceof MatchAllDocsFilter, equalTo(true));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+
+ @Test
+ public void testCustomBoostFactorQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/custom-boost-factor-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term("name.last", "banon")));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+ @Test
+ public void testSpanTermQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanTermQuery("age", 34)).query();
+ assertThat(parsedQuery, instanceOf(SpanTermQuery.class));
+ SpanTermQuery termQuery = (SpanTermQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(termQuery.getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ }
+
+ @Test
+ public void testSpanTermQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanTerm.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanTermQuery.class));
+ SpanTermQuery termQuery = (SpanTermQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(termQuery.getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ }
+
+ @Test
+ public void testSpanNotQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanNotQuery().include(spanTermQuery("age", 34)).exclude(spanTermQuery("age", 35))).query();
+ assertThat(parsedQuery, instanceOf(SpanNotQuery.class));
+ SpanNotQuery spanNotQuery = (SpanNotQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanNotQuery.getInclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ }
+
+ @Test
+ public void testSpanNotQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanNot.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanNotQuery.class));
+ SpanNotQuery spanNotQuery = (SpanNotQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanNotQuery.getInclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ }
+
+ @Test
+ public void testSpanFirstQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanFirstQuery(spanTermQuery("age", 34), 12)).query();
+ assertThat(parsedQuery, instanceOf(SpanFirstQuery.class));
+ SpanFirstQuery spanFirstQuery = (SpanFirstQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanFirstQuery.getMatch()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(spanFirstQuery.getEnd(), equalTo(12));
+ }
+
+ @Test
+ public void testSpanFirstQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanFirst.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanFirstQuery.class));
+ SpanFirstQuery spanFirstQuery = (SpanFirstQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanFirstQuery.getMatch()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(spanFirstQuery.getEnd(), equalTo(12));
+ }
+
+ @Test
+ public void testSpanNearQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanNearQuery().clause(spanTermQuery("age", 34)).clause(spanTermQuery("age", 35)).clause(spanTermQuery("age", 36)).slop(12).inOrder(false).collectPayloads(false)).query();
+ assertThat(parsedQuery, instanceOf(SpanNearQuery.class));
+ SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery;
+ assertThat(spanNearQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ assertThat(spanNearQuery.isInOrder(), equalTo(false));
+ }
+
+ @Test
+ public void testSpanNearQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanNear.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanNearQuery.class));
+ SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery;
+ assertThat(spanNearQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ assertThat(spanNearQuery.isInOrder(), equalTo(false));
+ }
+
+ @Test
+ public void testFieldMaskingSpanQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanFieldMaskingTerm.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanNearQuery.class));
+ SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery;
+ assertThat(spanNearQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) ((FieldMaskingSpanQuery) spanNearQuery.getClauses()[2]).getMaskedQuery()).getTerm(), equalTo(new Term("age_1", "36")));
+ assertThat(spanNearQuery.isInOrder(), equalTo(false));
+ }
+
+
+ @Test
+ public void testSpanOrQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanOrQuery().clause(spanTermQuery("age", 34)).clause(spanTermQuery("age", 35)).clause(spanTermQuery("age", 36))).query();
+ assertThat(parsedQuery, instanceOf(SpanOrQuery.class));
+ SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery;
+ assertThat(spanOrQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ }
+
+ @Test
+ public void testSpanOrQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanOr.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanOrQuery.class));
+ SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery;
+ assertThat(spanOrQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ }
+
+ @Test
+ public void testSpanOrQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanOr2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanOrQuery.class));
+ SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery;
+ assertThat(spanOrQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ }
+
+ @Test
+ public void testSpanMultiTermWildcardQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-wildcard.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ WildcardQuery expectedWrapped = new WildcardQuery(new Term("user", "ki*y"));
+ expectedWrapped.setBoost(1.08f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermPrefixQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-prefix.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ PrefixQuery expectedWrapped = new PrefixQuery(new Term("user", "ki"));
+ expectedWrapped.setBoost(1.08f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermFuzzyTermQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper.getField(), equalTo("user"));
+ }
+
+ @Test
+ public void testSpanMultiTermFuzzyRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ NumericRangeQuery<Long> expectedWrapped = NumericRangeQuery.newLongRange("age", 7l, 17l, true, true);
+ expectedWrapped.setBoost(2.0f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermNumericRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-range-numeric.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ NumericRangeQuery<Long> expectedWrapped = NumericRangeQuery.newLongRange("age", 10l, 20l, true, false);
+ expectedWrapped.setBoost(2.0f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermTermRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-range-numeric.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ NumericRangeQuery<Long> expectedWrapped = NumericRangeQuery.newLongRange("age", 10l, 20l, true, false);
+ expectedWrapped.setBoost(2.0f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testQueryFilterBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), queryFilter(termQuery("name.last", "banon")))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ QueryWrapperFilter queryWrapperFilter = (QueryWrapperFilter) filteredQuery.getFilter();
+ Field field = QueryWrapperFilter.class.getDeclaredField("query");
+ field.setAccessible(true);
+ Query wrappedQuery = (Query) field.get(queryWrapperFilter);
+ assertThat(wrappedQuery, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) wrappedQuery).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testQueryFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ QueryWrapperFilter queryWrapperFilter = (QueryWrapperFilter) filteredQuery.getFilter();
+ Field field = QueryWrapperFilter.class.getDeclaredField("query");
+ field.setAccessible(true);
+ Query wrappedQuery = (Query) field.get(queryWrapperFilter);
+ assertThat(wrappedQuery, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) wrappedQuery).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testFQueryFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fquery-filter.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+ QueryWrapperFilter queryWrapperFilter = (QueryWrapperFilter) filteredQuery.getFilter();
+ Field field = QueryWrapperFilter.class.getDeclaredField("query");
+ field.setAccessible(true);
+ Query wrappedQuery = (Query) field.get(queryWrapperFilter);
+ assertThat(wrappedQuery, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) wrappedQuery).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testMoreLikeThisBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(moreLikeThisQuery("name.first", "name.last").likeText("something").minTermFreq(1).maxQueryTerms(12)).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testMoreLikeThis() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/mlt.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getMoreLikeFields()[1], equalTo("name.last"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testFuzzyLikeThisBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12)).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+ parsedQuery = queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12).fuzziness(Fuzziness.build("4"))).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+
+ Query parsedQuery1 = queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12).fuzziness(Fuzziness.build("4.0"))).query();
+ assertThat(parsedQuery1, instanceOf(FuzzyLikeThisQuery.class));
+ assertThat(parsedQuery, equalTo(parsedQuery1));
+
+ try {
+ queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12).fuzziness(Fuzziness.build("4.1"))).query();
+ fail("exception expected - fractional edit distance");
+ } catch (ElasticsearchException ex) {
+ //
+ }
+
+ try {
+ queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12).fuzziness(Fuzziness.build("-" + between(1, 100)))).query();
+ fail("exception expected - negative edit distance");
+ } catch (ElasticsearchException ex) {
+ //
+ }
+ String[] queries = new String[] {
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": \"4\"}}",
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": \"4.00000000\"}}",
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": \"4.\"}}",
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": 4}}",
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": 4.0}}"
+ };
+ int iters = atLeast(5);
+ for (int i = 0; i < iters; i++) {
+ parsedQuery = queryParser.parse(new BytesArray((String) randomFrom(queries))).query();
+ parsedQuery1 = queryParser.parse(new BytesArray((String) randomFrom(queries))).query();
+ assertThat(parsedQuery1, instanceOf(FuzzyLikeThisQuery.class));
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+ assertThat(parsedQuery, equalTo(parsedQuery1));
+ }
+ }
+
+ @Test
+ public void testFuzzyLikeThis() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzyLikeThis.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+// FuzzyLikeThisQuery fuzzyLikeThisQuery = (FuzzyLikeThisQuery) parsedQuery;
+ }
+
+ @Test
+ public void testFuzzyLikeFieldThisBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyLikeThisFieldQuery("name.first").likeText("something").maxQueryTerms(12)).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+// FuzzyLikeThisQuery fuzzyLikeThisQuery = (FuzzyLikeThisQuery) parsedQuery;
+ }
+
+ @Test
+ public void testFuzzyLikeThisField() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzyLikeThisField.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+// FuzzyLikeThisQuery fuzzyLikeThisQuery = (FuzzyLikeThisQuery) parsedQuery;
+ }
+
+ @Test
+ public void testMoreLikeThisFieldBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(moreLikeThisFieldQuery("name.first").likeText("something").minTermFreq(1).maxQueryTerms(12)).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testMoreLikeThisField() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/mltField.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testGeoDistanceFilterNamed() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery.query();
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter5() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance5.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter6() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance6.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter7() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance7.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(0.012, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter8() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance8.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.KILOMETERS.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter9() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance9.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter10() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance10.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter11() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance11.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter12() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance12.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilterNamed() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.query(), instanceOf(XConstantScoreQuery.class));
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery.query();
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+
+ @Test
+ public void testGeoBoundingBoxFilter1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter5() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox5.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter6() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox6.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+
+ @Test
+ public void testGeoPolygonNamedFilter() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery.query();
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoShapeFilter() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geoShape-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ assertThat(constantScoreQuery.getFilter(), instanceOf(IntersectsPrefixTreeFilter.class));
+ }
+
+ @Test
+ public void testGeoShapeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geoShape-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery csq = (ConstantScoreQuery) parsedQuery;
+ assertThat(csq.getFilter(), instanceOf(IntersectsPrefixTreeFilter.class));
+ }
+
+ @Test
+ public void testCommonTermsQuery1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class));
+ ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;
+ assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue());
+ assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2"));
+ }
+
+ @Test
+ public void testCommonTermsQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class));
+ ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;
+ assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), equalTo("50%"));
+ assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("5<20%"));
+ }
+
+ @Test
+ public void testCommonTermsQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class));
+ ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;
+ assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue());
+ assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2"));
+ }
+
+ @Test(expected = QueryParsingException.class)
+ public void assureMalformedThrowsException() throws IOException {
+ IndexQueryParserService queryParser;
+ queryParser = queryParser();
+ String query;
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/faulty-function-score-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ }
+
+ @Test
+ public void testFilterParsing() throws IOException {
+ IndexQueryParserService queryParser;
+ queryParser = queryParser();
+ String query;
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/function-filter-score-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat((double) (parsedQuery.getBoost()), Matchers.closeTo(3.0, 1.e-7));
+ }
+
+ @Test
+ public void testBadTypeMatchQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match-query-bad-type.json");
+ QueryParsingException expectedException = null;
+ try {
+ queryParser.parse(query).query();
+ } catch (QueryParsingException qpe) {
+ expectedException = qpe;
+ }
+ assertThat(expectedException, notNullValue());
+ }
+
+ @Test
+ public void testMultiMatchQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/multiMatch-query-simple.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ }
+
+ @Test
+ public void testBadTypeMultiMatchQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/multiMatch-query-bad-type.json");
+ QueryParsingException expectedException = null;
+ try {
+ queryParser.parse(query).query();
+ } catch (QueryParsingException qpe) {
+ expectedException = qpe;
+ }
+ assertThat(expectedException, notNullValue());
+ }
+
+ @Test
+ public void testMultiMatchQueryWithFieldsAsString() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ }
+
+ @Test
+ public void testSimpleQueryString() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/simple-query-string.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/and-filter-cache.json b/src/test/java/org/elasticsearch/index/query/and-filter-cache.json
new file mode 100644
index 0000000..41cc482
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/and-filter-cache.json
@@ -0,0 +1,21 @@
+{
+ "filtered":{
+ "filter":{
+ "and":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ],
+ "_cache" : true
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/and-filter-named.json b/src/test/java/org/elasticsearch/index/query/and-filter-named.json
new file mode 100644
index 0000000..605a193
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/and-filter-named.json
@@ -0,0 +1,26 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "and":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ],
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/and-filter.json b/src/test/java/org/elasticsearch/index/query/and-filter.json
new file mode 100644
index 0000000..752add1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/and-filter.json
@@ -0,0 +1,25 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "and":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/and-filter2.json b/src/test/java/org/elasticsearch/index/query/and-filter2.json
new file mode 100644
index 0000000..580b8e9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/and-filter2.json
@@ -0,0 +1,23 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "and":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/bool-filter.json b/src/test/java/org/elasticsearch/index/query/bool-filter.json
new file mode 100644
index 0000000..484e517
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/bool-filter.json
@@ -0,0 +1,35 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ bool:{
+ must:[
+ {
+ term:{
+ "name.first":"shay1"
+ }
+ },
+ {
+ term:{
+ "name.first":"shay4"
+ }
+ }
+ ],
+ must_not:{
+ term:{
+ "name.first":"shay2"
+ }
+ },
+ should:{
+ term:{
+ "name.first":"shay3"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/bool.json b/src/test/java/org/elasticsearch/index/query/bool.json
new file mode 100644
index 0000000..1619fcf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/bool.json
@@ -0,0 +1,30 @@
+{
+ bool:{
+ must:[
+ {
+ query_string:{
+ default_field:"content",
+ query:"test1"
+ }
+ },
+ {
+ query_string:{
+ default_field:"content",
+ query:"test4"
+ }
+ }
+ ],
+ must_not:{
+ query_string:{
+ default_field:"content",
+ query:"test2"
+ }
+ },
+ should:{
+ query_string:{
+ default_field:"content",
+ query:"test3"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/boosting-query.json b/src/test/java/org/elasticsearch/index/query/boosting-query.json
new file mode 100644
index 0000000..87b6e6d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/boosting-query.json
@@ -0,0 +1,15 @@
+{
+ "boosting":{
+ "positive":{
+ "term":{
+ "field1":"value1"
+ }
+ },
+ "negative":{
+ "term":{
+ "field2":"value2"
+ }
+ },
+ "negative_boost":0.2
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/child-mapping.json b/src/test/java/org/elasticsearch/index/query/child-mapping.json
new file mode 100644
index 0000000..6f3b6e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/child-mapping.json
@@ -0,0 +1,12 @@
+{
+ "child":{
+ "properties":{
+ "field":{
+ "type":"string"
+ }
+ },
+ "_parent" : {
+ "type" : "person"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json b/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json
new file mode 100644
index 0000000..b2728da
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json
@@ -0,0 +1,11 @@
+{
+ "common" : {
+ "dogs" : {
+ "query" : "buck mia tom",
+ "cutoff_frequency" : 1,
+ "minimum_should_match" : {
+ "low_freq" : 2
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json b/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json
new file mode 100644
index 0000000..aeb281b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json
@@ -0,0 +1,11 @@
+{
+ "common" : {
+ "dogs" : {
+ "query" : "buck mia tom",
+ "minimum_should_match" : {
+ "high_freq" : "50%",
+ "low_freq" : "5<20%"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json b/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json
new file mode 100644
index 0000000..f276209
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json
@@ -0,0 +1,9 @@
+{
+ "common" : {
+ "dogs" : {
+ "query" : "buck mia tom",
+ "cutoff_frequency" : 1,
+ "minimum_should_match" : 2
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/constantScore-query.json b/src/test/java/org/elasticsearch/index/query/constantScore-query.json
new file mode 100644
index 0000000..bf59bc5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/constantScore-query.json
@@ -0,0 +1,9 @@
+{
+ constant_score:{
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/custom-boost-factor-query.json b/src/test/java/org/elasticsearch/index/query/custom-boost-factor-query.json
new file mode 100644
index 0000000..6f82921
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/custom-boost-factor-query.json
@@ -0,0 +1,10 @@
+{
+ "custom_boost_factor":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "boost_factor":1.3
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/custom_score1.json b/src/test/java/org/elasticsearch/index/query/custom_score1.json
new file mode 100644
index 0000000..6d7dcac
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/custom_score1.json
@@ -0,0 +1,10 @@
+{
+ "custom_score":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "script":"score * doc['name.first']"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/data.json b/src/test/java/org/elasticsearch/index/query/data.json
new file mode 100644
index 0000000..b3c6db8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/data.json
@@ -0,0 +1,45 @@
+{
+ _boost:3.7,
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null,
+ "location":{
+ "lat":1.1,
+ "lon":1.2
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean.json
new file mode 100644
index 0000000..08fe069
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean.json
@@ -0,0 +1,25 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached.json
new file mode 100644
index 0000000..a6c0bdc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "2013-01-01"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json
new file mode 100644
index 0000000..cc779db
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now+1m+1s"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json
new file mode 100644
index 0000000..5e7da0a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now+1m+1s/m"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json
new file mode 100644
index 0000000..0040bdc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json
new file mode 100644
index 0000000..9dd1d1a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now/d"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/disMax.json b/src/test/java/org/elasticsearch/index/query/disMax.json
new file mode 100644
index 0000000..99da2df
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/disMax.json
@@ -0,0 +1,18 @@
+{
+ dis_max:{
+ tie_breaker:0.7,
+ boost:1.2,
+ queries:[
+ {
+ term:{
+ "name.first":"first"
+ }
+ },
+ {
+ term:{
+ "name.last":"last"
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/disMax2.json b/src/test/java/org/elasticsearch/index/query/disMax2.json
new file mode 100644
index 0000000..ea92d64
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/disMax2.json
@@ -0,0 +1,14 @@
+{
+ "dis_max":{
+ "queries":[
+ {
+ "prefix":{
+ "name.first":{
+ "value":"sh",
+ "boost":1.2
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json b/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json
new file mode 100644
index 0000000..07f906c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json
@@ -0,0 +1,15 @@
+{
+ "function_score":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "functions": {
+ {
+ "boost_factor" : 3
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/field3.json b/src/test/java/org/elasticsearch/index/query/field3.json
new file mode 100644
index 0000000..61e349f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/field3.json
@@ -0,0 +1,9 @@
+{
+ field:{
+ age:{
+ query:34,
+ boost:2.0,
+ enable_position_increments:false
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/filtered-query.json b/src/test/java/org/elasticsearch/index/query/filtered-query.json
new file mode 100644
index 0000000..8eea99a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/filtered-query.json
@@ -0,0 +1,14 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/filtered-query2.json b/src/test/java/org/elasticsearch/index/query/filtered-query2.json
new file mode 100644
index 0000000..b23faf4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/filtered-query2.json
@@ -0,0 +1,14 @@
+{
+ filtered:{
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ },
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/filtered-query3.json b/src/test/java/org/elasticsearch/index/query/filtered-query3.json
new file mode 100644
index 0000000..4a9db49
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/filtered-query3.json
@@ -0,0 +1,19 @@
+{
+ filtered:{
+ filter:{
+ range:{
+ age:{
+ from:"23",
+ to:"54",
+ include_lower:true,
+ include_upper:false
+ }
+ }
+ },
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/filtered-query4.json b/src/test/java/org/elasticsearch/index/query/filtered-query4.json
new file mode 100644
index 0000000..8c10013
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/filtered-query4.json
@@ -0,0 +1,17 @@
+{
+ filtered:{
+ query:{
+ wildcard:{
+ "name.first":{
+ wildcard:"sh*",
+ boost:1.1
+ }
+ }
+ },
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/fquery-filter.json b/src/test/java/org/elasticsearch/index/query/fquery-filter.json
new file mode 100644
index 0000000..6015334
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fquery-filter.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "fquery":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json b/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json
new file mode 100644
index 0000000..e78c549
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json
@@ -0,0 +1,30 @@
+
+
+{
+ "function_score":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "functions": [
+ {
+ "boost_factor": 3,
+ "filter": {
+ term:{
+ "name.last":"banon"
+ }
+ }
+ },
+ {
+ "boost_factor": 3
+ },
+ {
+ "boost_factor": 3
+ }
+ ],
+ "boost" : 3,
+ "score_mode" : "avg",
+ "max_boost" : 10
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json b/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json
new file mode 100644
index 0000000..3e3d30f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json
@@ -0,0 +1,10 @@
+{
+ "fuzzy":{
+ "name.first":{
+ "value":"sh",
+ "fuzziness":0.1,
+ "prefix_length":1,
+ "boost":2.0
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json b/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json
new file mode 100644
index 0000000..095ecc6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json
@@ -0,0 +1,9 @@
+{
+ "fuzzy":{
+ "age":{
+ "value":12,
+ "fuzziness":5,
+ "boost":2.0
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzy.json b/src/test/java/org/elasticsearch/index/query/fuzzy.json
new file mode 100644
index 0000000..27d8dee
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzy.json
@@ -0,0 +1,5 @@
+{
+ "fuzzy":{
+ "name.first":"sh"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzyLikeThis.json b/src/test/java/org/elasticsearch/index/query/fuzzyLikeThis.json
new file mode 100644
index 0000000..ccd30a9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzyLikeThis.json
@@ -0,0 +1,7 @@
+{
+ fuzzy_like_this:{
+ fields:["name.first", "name.last"],
+ like_text:"something",
+ max_query_terms:12
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzyLikeThisField.json b/src/test/java/org/elasticsearch/index/query/fuzzyLikeThisField.json
new file mode 100644
index 0000000..114ebe5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzyLikeThisField.json
@@ -0,0 +1,8 @@
+{
+ fuzzy_like_this_field:{
+ "name.first":{
+ like_text:"something",
+ max_query_terms:12
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/geoShape-filter.json b/src/test/java/org/elasticsearch/index/query/geoShape-filter.json
new file mode 100644
index 0000000..a4392ae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geoShape-filter.json
@@ -0,0 +1,21 @@
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_shape" : {
+ "country" : {
+ "shape" : {
+ "type" : "Envelope",
+ "coordinates" : [
+ [-45, 45],
+ [45, -45]
+ ]
+ },
+ "relation" : "intersects"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/geoShape-query.json b/src/test/java/org/elasticsearch/index/query/geoShape-query.json
new file mode 100644
index 0000000..e0af827
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geoShape-query.json
@@ -0,0 +1,14 @@
+{
+ "geo_shape" : {
+ "country" : {
+ "shape" : {
+ "type" : "Envelope",
+ "coordinates" : [
+ [-45, 45],
+ [45, -45]
+ ]
+ },
+ "relation" : "intersects"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json
new file mode 100644
index 0000000..6db6d5a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":[-70, 40],
+ "bottom_right":[-80, 30]
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json
new file mode 100644
index 0000000..8d04915
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":[-70, 40],
+ "bottom_right":[-80, 30]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json
new file mode 100644
index 0000000..6321654
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json
@@ -0,0 +1,21 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":{
+ "lat":40,
+ "lon":-70
+ },
+ "bottom_right":{
+ "lat":30,
+ "lon":-80
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json
new file mode 100644
index 0000000..0899960
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":"40, -70",
+ "bottom_right":"30, -80"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json
new file mode 100644
index 0000000..170a02d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":"drn5x1g8cu2y",
+ "bottom_right":"30, -80"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json
new file mode 100644
index 0000000..347a463
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_right":"40, -80",
+ "bottom_left":"30, -70"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json
new file mode 100644
index 0000000..96ccbd0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "right": -80,
+ "top": 40,
+ "left": -70,
+ "bottom": 30
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance-named.json b/src/test/java/org/elasticsearch/index/query/geo_distance-named.json
new file mode 100644
index 0000000..a3e0be9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance-named.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance1.json b/src/test/java/org/elasticsearch/index/query/geo_distance1.json
new file mode 100644
index 0000000..cf3b0ab
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance1.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance10.json b/src/test/java/org/elasticsearch/index/query/geo_distance10.json
new file mode 100644
index 0000000..067b39e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance10.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":19.312128,
+ "unit":"km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance11.json b/src/test/java/org/elasticsearch/index/query/geo_distance11.json
new file mode 100644
index 0000000..008d5b5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance11.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"19.312128km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance12.json b/src/test/java/org/elasticsearch/index/query/geo_distance12.json
new file mode 100644
index 0000000..8769223
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance12.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "unit":"km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance2.json b/src/test/java/org/elasticsearch/index/query/geo_distance2.json
new file mode 100644
index 0000000..3283867
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance2.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":[-70, 40]
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance3.json b/src/test/java/org/elasticsearch/index/query/geo_distance3.json
new file mode 100644
index 0000000..193f234
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance3.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":"40, -70"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance4.json b/src/test/java/org/elasticsearch/index/query/geo_distance4.json
new file mode 100644
index 0000000..56a7409
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance4.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":"drn5x1g8cu2y"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance5.json b/src/test/java/org/elasticsearch/index/query/geo_distance5.json
new file mode 100644
index 0000000..bea9a3d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance5.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":12,
+ "unit":"mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance6.json b/src/test/java/org/elasticsearch/index/query/geo_distance6.json
new file mode 100644
index 0000000..4afa128
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance6.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12",
+ "unit":"mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance7.json b/src/test/java/org/elasticsearch/index/query/geo_distance7.json
new file mode 100644
index 0000000..7fcf8bd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance7.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"19.312128",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance8.json b/src/test/java/org/elasticsearch/index/query/geo_distance8.json
new file mode 100644
index 0000000..3bafd16
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance8.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":19.312128,
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance9.json b/src/test/java/org/elasticsearch/index/query/geo_distance9.json
new file mode 100644
index 0000000..e6c8f12
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance9.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"19.312128",
+ "unit":"km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json b/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json
new file mode 100644
index 0000000..91256c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon1.json b/src/test/java/org/elasticsearch/index/query/geo_polygon1.json
new file mode 100644
index 0000000..99ac329
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon1.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon2.json b/src/test/java/org/elasticsearch/index/query/geo_polygon2.json
new file mode 100644
index 0000000..588b22f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon2.json
@@ -0,0 +1,27 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ {
+ "lat":40,
+ "lon":-70
+ },
+ {
+ "lat":30,
+ "lon":-80
+ },
+ {
+ "lat":20,
+ "lon":-90
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon3.json b/src/test/java/org/elasticsearch/index/query/geo_polygon3.json
new file mode 100644
index 0000000..d6d905b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon3.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ "40, -70",
+ "30, -80",
+ "20, -90"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon4.json b/src/test/java/org/elasticsearch/index/query/geo_polygon4.json
new file mode 100644
index 0000000..ae9608d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon4.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ "drn5x1g8cu2y",
+ "30, -80",
+ "20, -90"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java b/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java
new file mode 100644
index 0000000..9b206a0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.guice;
+
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class IndexQueryParserModuleTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomInjection() {
+ Settings settings = settingsBuilder()
+ .put("index.queryparser.query.my.type", MyJsonQueryParser.class)
+ .put("index.queryparser.query.my.param1", "value1")
+ .put("index.queryparser.filter.my.type", MyJsonFilterParser.class)
+ .put("index.queryparser.filter.my.param2", "value2")
+ .put("index.cache.filter.type", "none")
+ .build();
+
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder().add(
+ new SettingsModule(settings),
+ new CacheRecyclerModule(settings),
+ new CodecModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ new IndexQueryParserModule(settings),
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ IndexQueryParserService indexQueryParserService = injector.getInstance(IndexQueryParserService.class);
+
+ MyJsonQueryParser myJsonQueryParser = (MyJsonQueryParser) indexQueryParserService.queryParser("my");
+
+ assertThat(myJsonQueryParser.names()[0], equalTo("my"));
+ assertThat(myJsonQueryParser.settings().get("param1"), equalTo("value1"));
+
+ MyJsonFilterParser myJsonFilterParser = (MyJsonFilterParser) indexQueryParserService.filterParser("my");
+ assertThat(myJsonFilterParser.names()[0], equalTo("my"));
+ assertThat(myJsonFilterParser.settings().get("param2"), equalTo("value2"));
+
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/guice/MyJsonFilterParser.java b/src/test/java/org/elasticsearch/index/query/guice/MyJsonFilterParser.java
new file mode 100644
index 0000000..09b53d3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/guice/MyJsonFilterParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.guice;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.FilterParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MyJsonFilterParser extends AbstractIndexComponent implements FilterParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public MyJsonFilterParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java b/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java
new file mode 100644
index 0000000..582ef13
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.guice;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParser;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MyJsonQueryParser extends AbstractIndexComponent implements QueryParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public MyJsonQueryParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json b/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json
new file mode 100644
index 0000000..4b055cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "filter":{
+ "and" : {
+ "filters" : [
+ {
+ "has_child" : {
+ "type" : "child",
+ "query" : {
+ "match_all" : {}
+ }
+ }
+ }
+ ],
+ "_cache" : true
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/has-child.json b/src/test/java/org/elasticsearch/index/query/has-child.json
new file mode 100644
index 0000000..c87ac17
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/has-child.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "filter":{
+ "has_child" : {
+ "type" : "child",
+ "query" : {
+ "match_all" : {}
+ },
+ "_cache" : true
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/limit-filter.json b/src/test/java/org/elasticsearch/index/query/limit-filter.json
new file mode 100644
index 0000000..549f331
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/limit-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "filter":{
+ "limit":{
+ "value":2
+ }
+ },
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/mapping.json b/src/test/java/org/elasticsearch/index/query/mapping.json
new file mode 100644
index 0000000..3939249
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/mapping.json
@@ -0,0 +1,15 @@
+{
+ "person":{
+ "properties":{
+ "location":{
+ "type":"geo_point"
+ },
+ "country" : {
+ "type" : "geo_shape"
+ },
+ "born":{
+ "type":"date"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json b/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json
new file mode 100644
index 0000000..47d1227
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json
@@ -0,0 +1,8 @@
+{
+ "match" : {
+ "message" : {
+ "query" : "this is a test",
+ "type" : "doesNotExist"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/matchAll.json b/src/test/java/org/elasticsearch/index/query/matchAll.json
new file mode 100644
index 0000000..3325646
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/matchAll.json
@@ -0,0 +1,5 @@
+{
+ match_all:{
+ boost:1.2
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/match_all_empty1.json b/src/test/java/org/elasticsearch/index/query/match_all_empty1.json
new file mode 100644
index 0000000..6dd141f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/match_all_empty1.json
@@ -0,0 +1,3 @@
+{
+ "match_all": {}
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/match_all_empty2.json b/src/test/java/org/elasticsearch/index/query/match_all_empty2.json
new file mode 100644
index 0000000..a0549df
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/match_all_empty2.json
@@ -0,0 +1,3 @@
+{
+ "match_all": []
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/mlt.json b/src/test/java/org/elasticsearch/index/query/mlt.json
new file mode 100644
index 0000000..3f45bb4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/mlt.json
@@ -0,0 +1,8 @@
+{
+ more_like_this:{
+ fields:["name.first", "name.last"],
+ like_text:"something",
+ min_term_freq:1,
+ max_query_terms:12
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/mltField.json b/src/test/java/org/elasticsearch/index/query/mltField.json
new file mode 100644
index 0000000..9f9eb59
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/mltField.json
@@ -0,0 +1,9 @@
+{
+ more_like_this_field:{
+ "name.first":{
+ like_text:"something",
+ min_term_freq:1,
+ max_query_terms:12
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json b/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json
new file mode 100644
index 0000000..9c3b751
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json
@@ -0,0 +1,7 @@
+{
+ "multi_match": {
+ "query": "foo bar",
+ "fields": [ "myField", "otherField" ],
+ "type":"doesNotExist"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json b/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json
new file mode 100644
index 0000000..d29211d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json
@@ -0,0 +1,6 @@
+{
+ "multi_match": {
+ "query": "foo bar",
+ "fields": "myField"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json b/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json
new file mode 100644
index 0000000..904ba0e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json
@@ -0,0 +1,6 @@
+{
+ "multi_match": {
+ "query": "foo bar",
+ "fields": [ "myField", "otherField" ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/not-filter.json b/src/test/java/org/elasticsearch/index/query/not-filter.json
new file mode 100644
index 0000000..42c48d8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/not-filter.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "not":{
+ "filter":{
+ "term":{
+ "name.first":"shay1"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/not-filter2.json b/src/test/java/org/elasticsearch/index/query/not-filter2.json
new file mode 100644
index 0000000..6defaff
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/not-filter2.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "not":{
+ "term":{
+ "name.first":"shay1"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/not-filter3.json b/src/test/java/org/elasticsearch/index/query/not-filter3.json
new file mode 100644
index 0000000..ab61335
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/not-filter3.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "filter":{
+ "not":{
+ "term":{
+ "name.first":"shay1"
+ }
+ }
+ },
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/numeric_range-filter.json b/src/test/java/org/elasticsearch/index/query/numeric_range-filter.json
new file mode 100644
index 0000000..fbae8ae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/numeric_range-filter.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "numeric_range":{
+ "age":{
+ "from":"23",
+ "to":"54",
+ "include_lower":true,
+ "include_upper":false
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/or-filter.json b/src/test/java/org/elasticsearch/index/query/or-filter.json
new file mode 100644
index 0000000..b1e73fa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/or-filter.json
@@ -0,0 +1,25 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "or":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/or-filter2.json b/src/test/java/org/elasticsearch/index/query/or-filter2.json
new file mode 100644
index 0000000..2c15e9a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/or-filter2.json
@@ -0,0 +1,23 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "or":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java
new file mode 100644
index 0000000..e04c346
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class IndexQueryParserPlugin2Tests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomInjection() {
+ Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ IndexQueryParserModule queryParserModule = new IndexQueryParserModule(settings);
+ queryParserModule.addQueryParser("my", PluginJsonQueryParser.class);
+ queryParserModule.addFilterParser("my", PluginJsonFilterParser.class);
+
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder().add(
+ new CodecModule(settings),
+ new CacheRecyclerModule(settings),
+ new SettingsModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ queryParserModule,
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ IndexQueryParserService indexQueryParserService = injector.getInstance(IndexQueryParserService.class);
+
+ PluginJsonQueryParser myJsonQueryParser = (PluginJsonQueryParser) indexQueryParserService.queryParser("my");
+
+ assertThat(myJsonQueryParser.names()[0], equalTo("my"));
+
+ PluginJsonFilterParser myJsonFilterParser = (PluginJsonFilterParser) indexQueryParserService.filterParser("my");
+ assertThat(myJsonFilterParser.names()[0], equalTo("my"));
+
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java
new file mode 100644
index 0000000..53b1cb5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class IndexQueryParserPluginTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomInjection() {
+ Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ IndexQueryParserModule queryParserModule = new IndexQueryParserModule(settings);
+ queryParserModule.addProcessor(new IndexQueryParserModule.QueryParsersProcessor() {
+ @Override
+ public void processXContentQueryParsers(XContentQueryParsersBindings bindings) {
+ bindings.processXContentQueryParser("my", PluginJsonQueryParser.class);
+ }
+
+ @Override
+ public void processXContentFilterParsers(XContentFilterParsersBindings bindings) {
+ bindings.processXContentQueryFilter("my", PluginJsonFilterParser.class);
+ }
+ });
+
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder().add(
+ new SettingsModule(settings),
+ new CacheRecyclerModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ queryParserModule,
+ new IndexNameModule(index),
+ new CodecModule(settings),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ IndexQueryParserService indexQueryParserService = injector.getInstance(IndexQueryParserService.class);
+
+ PluginJsonQueryParser myJsonQueryParser = (PluginJsonQueryParser) indexQueryParserService.queryParser("my");
+
+ assertThat(myJsonQueryParser.names()[0], equalTo("my"));
+
+ PluginJsonFilterParser myJsonFilterParser = (PluginJsonFilterParser) indexQueryParserService.filterParser("my");
+ assertThat(myJsonFilterParser.names()[0], equalTo("my"));
+
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonFilterParser.java b/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonFilterParser.java
new file mode 100644
index 0000000..194060e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonFilterParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.FilterParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class PluginJsonFilterParser extends AbstractIndexComponent implements FilterParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public PluginJsonFilterParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java b/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java
new file mode 100644
index 0000000..d475cdf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParser;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class PluginJsonQueryParser extends AbstractIndexComponent implements QueryParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public PluginJsonQueryParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/prefix-boost.json b/src/test/java/org/elasticsearch/index/query/prefix-boost.json
new file mode 100644
index 0000000..4da623a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix-boost.json
@@ -0,0 +1,8 @@
+{
+ "prefix":{
+ "name.first":{
+ "value":"sh",
+ "boost":1.2
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json b/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json
new file mode 100644
index 0000000..de01701
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "prefix":{
+ "name.first":"sh",
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/prefix-filter.json b/src/test/java/org/elasticsearch/index/query/prefix-filter.json
new file mode 100644
index 0000000..1f2e42e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "prefix":{
+ "name.first":"sh"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json b/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json
new file mode 100644
index 0000000..83e56cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json
@@ -0,0 +1,8 @@
+{
+ prefix:{
+ "name.first":{
+ prefix:"sh",
+ boost:2.0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/prefix.json b/src/test/java/org/elasticsearch/index/query/prefix.json
new file mode 100644
index 0000000..49f5261
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix.json
@@ -0,0 +1,5 @@
+{
+ prefix:{
+ "name.first":"sh"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-fields-match.json b/src/test/java/org/elasticsearch/index/query/query-fields-match.json
new file mode 100644
index 0000000..c15cdf3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-fields-match.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["name.*"],
+ use_dis_max:false,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-fields1.json b/src/test/java/org/elasticsearch/index/query/query-fields1.json
new file mode 100644
index 0000000..84abcaa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-fields1.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["content", "name"],
+ use_dis_max:false,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-fields2.json b/src/test/java/org/elasticsearch/index/query/query-fields2.json
new file mode 100644
index 0000000..ab39c87
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-fields2.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["content", "name"],
+ use_dis_max:true,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-fields3.json b/src/test/java/org/elasticsearch/index/query/query-fields3.json
new file mode 100644
index 0000000..8114c1b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-fields3.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["content^2.2", "name"],
+ use_dis_max:true,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-filter.json b/src/test/java/org/elasticsearch/index/query/query-filter.json
new file mode 100644
index 0000000..dee136d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-filter.json
@@ -0,0 +1,16 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ query:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/query.json b/src/test/java/org/elasticsearch/index/query/query.json
new file mode 100644
index 0000000..f07a0d8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ default_field:"content",
+ phrase_slop:1,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query2.json b/src/test/java/org/elasticsearch/index/query/query2.json
new file mode 100644
index 0000000..410e05c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query2.json
@@ -0,0 +1,6 @@
+{
+ query_string:{
+ default_field:"age",
+ query:"12~0.2"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/range-filter-named.json b/src/test/java/org/elasticsearch/index/query/range-filter-named.json
new file mode 100644
index 0000000..1b50177
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/range-filter-named.json
@@ -0,0 +1,20 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "range":{
+ "age":{
+ "from":"23",
+ "to":"54",
+ "include_lower":true,
+ "include_upper":false
+ },
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/range-filter.json b/src/test/java/org/elasticsearch/index/query/range-filter.json
new file mode 100644
index 0000000..3842e0b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/range-filter.json
@@ -0,0 +1,19 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ range:{
+ age:{
+ from:"23",
+ to:"54",
+ include_lower:true,
+ include_upper:false
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/range.json b/src/test/java/org/elasticsearch/index/query/range.json
new file mode 100644
index 0000000..cc2363f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/range.json
@@ -0,0 +1,10 @@
+{
+ range:{
+ age:{
+ from:"23",
+ to:"54",
+ include_lower:true,
+ include_upper:false
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/range2.json b/src/test/java/org/elasticsearch/index/query/range2.json
new file mode 100644
index 0000000..c116b3c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/range2.json
@@ -0,0 +1,8 @@
+{
+ range:{
+ age:{
+ gte:"23",
+ lt:"54"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-boost.json b/src/test/java/org/elasticsearch/index/query/regexp-boost.json
new file mode 100644
index 0000000..ed8699b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-boost.json
@@ -0,0 +1,8 @@
+{
+ "regexp":{
+ "name.first":{
+ "value":"sh",
+ "boost":1.2
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json b/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json
new file mode 100644
index 0000000..112f8fb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json
@@ -0,0 +1,20 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : {
+ "value" : "s.*y",
+ "flags" : "INTERSECTION|COMPLEMENT|EMPTY"
+ },
+ "_name":"test",
+ "_cache" : true,
+ "_cache_key" : "key"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json b/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json
new file mode 100644
index 0000000..a5d7307
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json
@@ -0,0 +1,18 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : {
+ "value" : "s.*y",
+ "flags" : "INTERSECTION|COMPLEMENT|EMPTY"
+ },
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json b/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json
new file mode 100644
index 0000000..ac96b3e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : "s.*y",
+ "_name" : "test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-filter.json b/src/test/java/org/elasticsearch/index/query/regexp-filter.json
new file mode 100644
index 0000000..d7c7bfd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : "s.*y"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp.json b/src/test/java/org/elasticsearch/index/query/regexp.json
new file mode 100644
index 0000000..6c3d694
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp.json
@@ -0,0 +1,5 @@
+{
+ "regexp":{
+ "name.first": "s.*y"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/simple-query-string.json b/src/test/java/org/elasticsearch/index/query/simple-query-string.json
new file mode 100644
index 0000000..9208e88
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/simple-query-string.json
@@ -0,0 +1,8 @@
+{
+ "simple_query_string": {
+ "query": "foo bar",
+ "analyzer": "keyword",
+ "fields": ["body^5","_all"],
+ "default_operator": "and"
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json
new file mode 100644
index 0000000..d9ca05b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json
@@ -0,0 +1,13 @@
+{
+ "span_multi":{
+ "match":{
+ "fuzzy":{
+ "age":{
+ "value":12,
+ "fuzziness":5,
+ "boost":2.0
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json
new file mode 100644
index 0000000..edb58e3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json
@@ -0,0 +1,12 @@
+{
+ "span_multi":{
+ "match":{
+ "fuzzy" : {
+ "user" : {
+ "value" : "ki",
+ "boost" : 1.08
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json
new file mode 100644
index 0000000..62918d6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json
@@ -0,0 +1,7 @@
+{
+ "span_multi":{
+ "match":{
+ "prefix" : { "user" : { "value" : "ki", "boost" : 1.08 } }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json
new file mode 100644
index 0000000..d9db8a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json
@@ -0,0 +1,16 @@
+{
+ "span_multi":{
+ "match":{
+ "range" : {
+ "age" : {
+ "from" : 10,
+ "to" : 20,
+ "include_lower" : true,
+ "include_upper": false,
+ "boost" : 2.0
+ }
+ }
+ }
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json
new file mode 100644
index 0000000..d9db8a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json
@@ -0,0 +1,16 @@
+{
+ "span_multi":{
+ "match":{
+ "range" : {
+ "age" : {
+ "from" : 10,
+ "to" : 20,
+ "include_lower" : true,
+ "include_upper": false,
+ "boost" : 2.0
+ }
+ }
+ }
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json
new file mode 100644
index 0000000..a2eaeb7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json
@@ -0,0 +1,7 @@
+{
+ "span_multi":{
+ "match":{
+ "wildcard" : { "user" : {"value": "ki*y" , "boost" : 1.08}}
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json b/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json
new file mode 100644
index 0000000..9849c10
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json
@@ -0,0 +1,29 @@
+{
+ span_near:{
+ clauses:[
+ {
+ span_term:{
+ age:34
+ }
+ },
+ {
+ span_term:{
+ age:35
+ }
+ },
+ {
+ field_masking_span:{
+ query:{
+ span_term:{
+ age_1 : 36
+ }
+ },
+ field:"age"
+ }
+ }
+ ],
+ slop:12,
+ in_order:false,
+ collect_payloads:false
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanFirst.json b/src/test/java/org/elasticsearch/index/query/spanFirst.json
new file mode 100644
index 0000000..9972c76
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanFirst.json
@@ -0,0 +1,10 @@
+{
+ span_first:{
+ match:{
+ span_term:{
+ age:34
+ }
+ },
+ end:12
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanNear.json b/src/test/java/org/elasticsearch/index/query/spanNear.json
new file mode 100644
index 0000000..ce17063
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanNear.json
@@ -0,0 +1,24 @@
+{
+ span_near:{
+ clauses:[
+ {
+ span_term:{
+ age:34
+ }
+ },
+ {
+ span_term:{
+ age:35
+ }
+ },
+ {
+ span_term:{
+ age:36
+ }
+ }
+ ],
+ slop:12,
+ in_order:false,
+ collect_payloads:false
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanNot.json b/src/test/java/org/elasticsearch/index/query/spanNot.json
new file mode 100644
index 0000000..c90de33
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanNot.json
@@ -0,0 +1,14 @@
+{
+ span_not:{
+ include:{
+ span_term:{
+ age:34
+ }
+ },
+ exclude:{
+ span_term:{
+ age:35
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanOr.json b/src/test/java/org/elasticsearch/index/query/spanOr.json
new file mode 100644
index 0000000..06c5262
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanOr.json
@@ -0,0 +1,21 @@
+{
+ span_or:{
+ clauses:[
+ {
+ span_term:{
+ age:34
+ }
+ },
+ {
+ span_term:{
+ age:35
+ }
+ },
+ {
+ span_term:{
+ age:36
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanOr2.json b/src/test/java/org/elasticsearch/index/query/spanOr2.json
new file mode 100644
index 0000000..b64ce1c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanOr2.json
@@ -0,0 +1,30 @@
+{
+ "span_or":{
+ "clauses":[
+ {
+ "span_term":{
+ "age":{
+ "value":34,
+ "boost":1.0
+ }
+ }
+ },
+ {
+ "span_term":{
+ "age":{
+ "value":35,
+ "boost":1.0
+ }
+ }
+ },
+ {
+ "span_term":{
+ "age":{
+ "value":36,
+ "boost":1.0
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanTerm.json b/src/test/java/org/elasticsearch/index/query/spanTerm.json
new file mode 100644
index 0000000..0186593
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanTerm.json
@@ -0,0 +1,5 @@
+{
+ span_term:{
+ age:34
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/starColonStar.json b/src/test/java/org/elasticsearch/index/query/starColonStar.json
new file mode 100644
index 0000000..c769ca0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/starColonStar.json
@@ -0,0 +1,5 @@
+{
+ "query_string": {
+ "query": "*:*"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/term-filter-named.json b/src/test/java/org/elasticsearch/index/query/term-filter-named.json
new file mode 100644
index 0000000..c23b7b3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/term-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "term":{
+ "name.last":"banon",
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/term-filter.json b/src/test/java/org/elasticsearch/index/query/term-filter.json
new file mode 100644
index 0000000..11d2bfd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/term-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "term":{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/term-with-boost.json b/src/test/java/org/elasticsearch/index/query/term-with-boost.json
new file mode 100644
index 0000000..5f33cd5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/term-with-boost.json
@@ -0,0 +1,8 @@
+{
+ term:{
+ age:{
+ value:34,
+ boost:2.0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/term.json b/src/test/java/org/elasticsearch/index/query/term.json
new file mode 100644
index 0000000..378cf42
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/term.json
@@ -0,0 +1,5 @@
+{
+ term:{
+ age:34
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/terms-filter-named.json b/src/test/java/org/elasticsearch/index/query/terms-filter-named.json
new file mode 100644
index 0000000..2cb8c7a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/terms-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "terms":{
+ "name.last":["banon", "kimchy"],
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/terms-filter.json b/src/test/java/org/elasticsearch/index/query/terms-filter.json
new file mode 100644
index 0000000..04a8d26
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/terms-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "terms":{
+ "name.last":["banon", "kimchy"]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/terms-query.json b/src/test/java/org/elasticsearch/index/query/terms-query.json
new file mode 100644
index 0000000..a3e0d08
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/terms-query.json
@@ -0,0 +1,5 @@
+{
+ "terms":{
+ "name.first":["shay", "test"]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/wildcard-boost.json b/src/test/java/org/elasticsearch/index/query/wildcard-boost.json
new file mode 100644
index 0000000..53c8d82
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/wildcard-boost.json
@@ -0,0 +1,8 @@
+{
+ "wildcard":{
+ "name.first":{
+ "value":"sh*",
+ "boost":1.2
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/wildcard.json b/src/test/java/org/elasticsearch/index/query/wildcard.json
new file mode 100644
index 0000000..c8ed852
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/wildcard.json
@@ -0,0 +1,5 @@
+{
+ wildcard:{
+ "name.first":"sh*"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/search/FieldDataTermsFilterTests.java b/src/test/java/org/elasticsearch/index/search/FieldDataTermsFilterTests.java
new file mode 100644
index 0000000..1bb9631
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/FieldDataTermsFilterTests.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search;
+
+import com.carrotsearch.hppc.DoubleOpenHashSet;
+import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper;
+import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+/**
+ */
+public class FieldDataTermsFilterTests extends ElasticsearchTestCase {
+
+ protected IndexFieldDataService ifdService;
+ protected IndexWriter writer;
+ protected AtomicReader reader;
+ protected StringFieldMapper strMapper;
+ protected LongFieldMapper lngMapper;
+ protected DoubleFieldMapper dblMapper;
+
+ @Before
+ public void setup() throws Exception {
+ super.setUp();
+
+ // setup field mappers
+ strMapper = new StringFieldMapper.Builder("str_value")
+ .build(new Mapper.BuilderContext(null, new ContentPath(1)));
+
+ lngMapper = new LongFieldMapper.Builder("lng_value")
+ .build(new Mapper.BuilderContext(null, new ContentPath(1)));
+
+ dblMapper = new DoubleFieldMapper.Builder("dbl_value")
+ .build(new Mapper.BuilderContext(null, new ContentPath(1)));
+
+ // create index and fielddata service
+ ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ writer = new IndexWriter(new RAMDirectory(),
+ new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)));
+
+ int numDocs = 10;
+ for (int i = 0; i < numDocs; i++) {
+ Document d = new Document();
+ d.add(new StringField(strMapper.names().indexName(), "str" + i, Field.Store.NO));
+ d.add(new LongField(lngMapper.names().indexName(), i, Field.Store.NO));
+ d.add(new DoubleField(dblMapper.names().indexName(), Double.valueOf(i), Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(writer, true));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ reader.close();
+ writer.close();
+ ifdService.clear();
+ SearchContext.removeCurrent();
+ }
+
+ protected <IFD extends IndexFieldData> IFD getFieldData(FieldMapper fieldMapper) {
+ return ifdService.getForField(fieldMapper);
+ }
+
+ protected <IFD extends IndexNumericFieldData> IFD getFieldData(NumberFieldMapper fieldMapper) {
+ return ifdService.getForField(fieldMapper);
+ }
+
+ @Test
+ public void testBytes() throws Exception {
+ List<Integer> docs = Arrays.asList(1, 5, 7);
+
+ ObjectOpenHashSet<BytesRef> hTerms = new ObjectOpenHashSet<BytesRef>();
+ List<BytesRef> cTerms = new ArrayList<BytesRef>(docs.size());
+ for (int i = 0; i < docs.size(); i++) {
+ BytesRef term = new BytesRef("str" + docs.get(i));
+ hTerms.add(term);
+ cTerms.add(term);
+ }
+
+ FieldDataTermsFilter hFilter = FieldDataTermsFilter.newBytes(getFieldData(strMapper), hTerms);
+
+ int size = reader.maxDoc();
+ FixedBitSet result = new FixedBitSet(size);
+
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ // filter from mapper
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(strMapper.termsFilter(ifdService, cTerms, null)
+ .getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+
+ // filter on a numeric field using BytesRef terms
+ // should not match any docs
+ hFilter = FieldDataTermsFilter.newBytes(getFieldData(lngMapper), hTerms);
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(0));
+
+ // filter on a numeric field using BytesRef terms
+ // should not match any docs
+ hFilter = FieldDataTermsFilter.newBytes(getFieldData(dblMapper), hTerms);
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(0));
+ }
+
+ @Test
+ public void testLongs() throws Exception {
+ List<Integer> docs = Arrays.asList(1, 5, 7);
+
+ LongOpenHashSet hTerms = new LongOpenHashSet();
+ List<Long> cTerms = new ArrayList<Long>(docs.size());
+ for (int i = 0; i < docs.size(); i++) {
+ long term = docs.get(i).longValue();
+ hTerms.add(term);
+ cTerms.add(term);
+ }
+
+ FieldDataTermsFilter hFilter = FieldDataTermsFilter.newLongs(getFieldData(lngMapper), hTerms);
+
+ int size = reader.maxDoc();
+ FixedBitSet result = new FixedBitSet(size);
+
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ // filter from mapper
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(lngMapper.termsFilter(ifdService, cTerms, null)
+ .getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ hFilter = FieldDataTermsFilter.newLongs(getFieldData(dblMapper), hTerms);
+ assertNull(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ }
+
+ @Test
+ public void testDoubles() throws Exception {
+ List<Integer> docs = Arrays.asList(1, 5, 7);
+
+ DoubleOpenHashSet hTerms = new DoubleOpenHashSet();
+ List<Double> cTerms = new ArrayList<Double>(docs.size());
+ for (int i = 0; i < docs.size(); i++) {
+ double term = Double.valueOf(docs.get(i));
+ hTerms.add(term);
+ cTerms.add(term);
+ }
+
+ FieldDataTermsFilter hFilter = FieldDataTermsFilter.newDoubles(getFieldData(dblMapper), hTerms);
+
+ int size = reader.maxDoc();
+ FixedBitSet result = new FixedBitSet(size);
+
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ // filter from mapper
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(dblMapper.termsFilter(ifdService, cTerms, null)
+ .getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ hFilter = FieldDataTermsFilter.newDoubles(getFieldData(lngMapper), hTerms);
+ assertNull(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ }
+
+ @Test
+ public void testNoTerms() throws Exception {
+ FieldDataTermsFilter hFilterBytes = FieldDataTermsFilter.newBytes(getFieldData(strMapper), new ObjectOpenHashSet<BytesRef>());
+ FieldDataTermsFilter hFilterLongs = FieldDataTermsFilter.newLongs(getFieldData(lngMapper), new LongOpenHashSet());
+ FieldDataTermsFilter hFilterDoubles = FieldDataTermsFilter.newDoubles(getFieldData(dblMapper), new DoubleOpenHashSet());
+ assertNull(hFilterBytes.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ assertNull(hFilterLongs.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ assertNull(hFilterDoubles.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java b/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java
new file mode 100644
index 0000000..ee4bea4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.search.NoopCollector;
+
+import java.io.IOException;
+
+class BitSetCollector extends NoopCollector {
+
+ final FixedBitSet result;
+ int docBase;
+
+ BitSetCollector(int topLevelMaxDoc) {
+ this.result = new FixedBitSet(topLevelMaxDoc);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ result.set(docBase + doc);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ docBase = context.docBase;
+ }
+
+ FixedBitSet getResult() {
+ return result;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java
new file mode 100644
index 0000000..04152f8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java
@@ -0,0 +1,347 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache;
+import org.elasticsearch.index.cache.id.IdCache;
+import org.elasticsearch.index.cache.id.SimpleIdCacheTests;
+import org.elasticsearch.index.cache.id.simple.SimpleIdCache;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.hamcrest.Description;
+import org.hamcrest.StringDescription;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ChildrenConstantScoreQueryTests extends ElasticsearchLuceneTestCase {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ forceDefaultCodec();
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext.removeCurrent();
+ }
+
+ @Test
+ public void testSimple() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+
+ for (int parent = 1; parent <= 5; parent++) {
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", Integer.toString(parent)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ indexWriter.addDocument(document);
+
+ for (int child = 1; child <= 3; child++) {
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(parent * 3 + child)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", Integer.toString(parent)), Field.Store.NO));
+ document.add(new StringField("field1", "value" + child, Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+ }
+
+ IndexReader indexReader = DirectoryReader.open(indexWriter.w, false);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+
+ TermQuery childQuery = new TermQuery(new Term("field1", "value" + (1 + random().nextInt(3))));
+ TermFilter parentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
+ int shortCircuitParentDocSet = random().nextInt(5);
+ ChildrenConstantScoreQuery query = new ChildrenConstantScoreQuery(childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, null);
+
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ searcher.search(query, collector);
+ FixedBitSet actualResult = collector.getResult();
+
+ assertThat(actualResult.cardinality(), equalTo(5));
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ int numUniqueChildValues = 1 + random().nextInt(TEST_NIGHTLY ? 10000 : 1000);
+ String[] childValues = new String[numUniqueChildValues];
+ for (int i = 0; i < numUniqueChildValues; i++) {
+ childValues[i] = Integer.toString(i);
+ }
+
+ IntOpenHashSet filteredOrDeletedDocs = new IntOpenHashSet();
+ int childDocId = 0;
+ int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
+ ObjectObjectOpenHashMap<String, NavigableSet<String>> childValueToParentIds = new ObjectObjectOpenHashMap<String, NavigableSet<String>>();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ boolean filterMe = rarely();
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ if (markParentAsDeleted) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs;
+ if (rarely()) {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
+ } else {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ boolean markChildAsDeleted = rarely();
+ String childValue = childValues[random().nextInt(childValues.length)];
+
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(childDocId)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField("field1", childValue, Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markChildAsDeleted) {
+ NavigableSet<String> parentIds;
+ if (childValueToParentIds.containsKey(childValue)) {
+ parentIds = childValueToParentIds.lget();
+ } else {
+ childValueToParentIds.put(childValue, parentIds = new TreeSet<String>());
+ }
+ if (!markParentAsDeleted && !filterMe) {
+ parentIds.add(parent);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+
+ indexWriter.commit();
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.SimpleSearcher(
+ ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ Filter rawParentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
+ Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
+ int max = numUniqueChildValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
+ // that deletes are applied at the top level when filters are cached.
+ Filter parentFilter;
+ if (random().nextBoolean()) {
+ parentFilter = SearchContext.current().filterCache().cache(rawParentFilter);
+ } else {
+ parentFilter = rawParentFilter;
+ }
+
+ // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ Filter filterMe;
+ if (random().nextBoolean()) {
+ filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
+ } else {
+ filterMe = rawFilterMe;
+ }
+
+ // Simulate a parent update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int parentId;
+ do {
+ parentId = random().nextInt(numParentDocs);
+ } while (filteredOrDeletedDocs.contains(parentId));
+
+ String parentUid = Uid.createUid("parent", Integer.toString(parentId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, parentUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, parentUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.SimpleSearcher(
+ ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String childValue = childValues[random().nextInt(numUniqueChildValues)];
+ TermQuery childQuery = new TermQuery(new Term("field1", childValue));
+ int shortCircuitParentDocSet = random().nextInt(numParentDocs);
+ Filter nonNestedDocsFilter = random().nextBoolean() ? NonNestedDocsFilter.INSTANCE : null;
+ Query query;
+ if (random().nextBoolean()) {
+ // Usage in HasChildQueryParser
+ query = new ChildrenConstantScoreQuery(childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
+ } else {
+ // Usage in HasChildFilterParser
+ query = new XConstantScoreQuery(
+ new CustomQueryWrappingFilter(
+ new ChildrenConstantScoreQuery(childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter)
+ )
+ );
+ }
+ query = new XFilteredQuery(query, filterMe);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ searcher.search(query, collector);
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ if (childValueToParentIds.containsKey(childValue)) {
+ AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableSet<String> parentIds = childValueToParentIds.lget();
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ for (String id : parentIds) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", id));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ static void assertBitSet(FixedBitSet actual, FixedBitSet expected, IndexSearcher searcher) throws IOException {
+ if (!actual.equals(expected)) {
+ Description description = new StringDescription();
+ description.appendText(reason(actual, expected, searcher));
+ description.appendText("\nExpected: ");
+ description.appendValue(expected);
+ description.appendText("\n got: ");
+ description.appendValue(actual);
+ description.appendText("\n");
+ throw new java.lang.AssertionError(description.toString());
+ }
+ }
+
+ static String reason(FixedBitSet actual, FixedBitSet expected, IndexSearcher indexSearcher) throws IOException {
+ StringBuilder builder = new StringBuilder();
+ builder.append("expected cardinality:").append(expected.cardinality()).append('\n');
+ DocIdSetIterator iterator = expected.iterator();
+ for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
+ builder.append("Expected doc[").append(doc).append("] with id value ").append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
+ }
+ builder.append("actual cardinality: ").append(actual.cardinality()).append('\n');
+ iterator = actual.iterator();
+ for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
+ builder.append("Actual doc[").append(doc).append("] with id value ").append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
+ }
+ return builder.toString();
+ }
+
+ static SearchContext createSearchContext(String indexName, String parentType, String childType) throws IOException {
+ final Index index = new Index(indexName);
+ final IdCache idCache = new SimpleIdCache(index, ImmutableSettings.EMPTY);
+ final CacheRecycler cacheRecycler = new CacheRecycler(ImmutableSettings.EMPTY);
+ final PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(ImmutableSettings.EMPTY, new ThreadPool());
+ Settings settings = ImmutableSettings.EMPTY;
+ MapperService mapperService = MapperTestUtils.newMapperService(index, settings);
+ final IndexService indexService = new SimpleIdCacheTests.StubIndexService(mapperService);
+ idCache.setIndexService(indexService);
+ // Id_cache is now registered as document type listener, so we can add mappings.
+ mapperService.merge(
+ childType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType).string()), true
+ );
+
+ ThreadPool threadPool = new ThreadPool();
+ NodeSettingsService nodeSettingsService = new NodeSettingsService(settings);
+ IndicesFilterCache indicesFilterCache = new IndicesFilterCache(settings, threadPool, cacheRecycler, nodeSettingsService);
+ WeightedFilterCache filterCache = new WeightedFilterCache(index, settings, indicesFilterCache);
+ return new TestSearchContext(cacheRecycler, pageCacheRecycler, idCache, indexService, filterCache);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java
new file mode 100644
index 0000000..06a5db6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.assertBitSet;
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.createSearchContext;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ChildrenQueryTests extends ElasticsearchLuceneTestCase {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ forceDefaultCodec();
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext.removeCurrent();
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ int numUniqueChildValues = 1 + random().nextInt(TEST_NIGHTLY ? 6000 : 600);
+ String[] childValues = new String[numUniqueChildValues];
+ for (int i = 0; i < numUniqueChildValues; i++) {
+ childValues[i] = Integer.toString(i);
+ }
+
+ IntOpenHashSet filteredOrDeletedDocs = new IntOpenHashSet();
+
+ int childDocId = 0;
+ int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
+ ObjectObjectOpenHashMap<String, NavigableMap<String, FloatArrayList>> childValueToParentIds = new ObjectObjectOpenHashMap<String, NavigableMap<String, FloatArrayList>>();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ boolean filterMe = rarely();
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ if (markParentAsDeleted) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs;
+ if (rarely()) {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
+ } else {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ boolean markChildAsDeleted = rarely();
+ String childValue = childValues[random().nextInt(childValues.length)];
+
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(childDocId)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField("field1", childValue, Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markChildAsDeleted) {
+ NavigableMap<String, FloatArrayList> parentIdToChildScores;
+ if (childValueToParentIds.containsKey(childValue)) {
+ parentIdToChildScores = childValueToParentIds.lget();
+ } else {
+ childValueToParentIds.put(childValue, parentIdToChildScores = new TreeMap<String, FloatArrayList>());
+ }
+ if (!markParentAsDeleted && !filterMe) {
+ FloatArrayList childScores = parentIdToChildScores.get(parent);
+ if (childScores == null) {
+ parentIdToChildScores.put(parent, childScores = new FloatArrayList());
+ }
+ childScores.add(1f);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+ indexWriter.commit();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.SimpleSearcher(
+ ChildrenQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ Filter rawParentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
+ Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
+ int max = numUniqueChildValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
+ // that deletes are applied at the top level when filters are cached.
+ Filter parentFilter;
+ if (random().nextBoolean()) {
+ parentFilter = SearchContext.current().filterCache().cache(rawParentFilter);
+ } else {
+ parentFilter = rawParentFilter;
+ }
+
+ // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ Filter filterMe;
+ if (random().nextBoolean()) {
+ filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
+ } else {
+ filterMe = rawFilterMe;
+ }
+
+ // Simulate a parent update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int parentId;
+ do {
+ parentId = random().nextInt(numParentDocs);
+ } while (filteredOrDeletedDocs.contains(parentId));
+
+ String parentUid = Uid.createUid("parent", Integer.toString(parentId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, parentUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, parentUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.SimpleSearcher(
+ ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String childValue = childValues[random().nextInt(numUniqueChildValues)];
+ Query childQuery = new ConstantScoreQuery(new TermQuery(new Term("field1", childValue)));
+ int shortCircuitParentDocSet = random().nextInt(numParentDocs);
+ ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)];
+ Filter nonNestedDocsFilter = random().nextBoolean() ? NonNestedDocsFilter.INSTANCE : null;
+ Query query = new ChildrenQuery("parent", "child", parentFilter, childQuery, scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
+ query = new XFilteredQuery(query, filterMe);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ int numHits = 1 + random().nextInt(25);
+ TopScoreDocCollector actualTopDocsCollector = TopScoreDocCollector.create(numHits, false);
+ searcher.search(query, MultiCollector.wrap(collector, actualTopDocsCollector));
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ MockScorer mockScorer = new MockScorer(scoreType);
+ TopScoreDocCollector expectedTopDocsCollector = TopScoreDocCollector.create(numHits, false);
+ expectedTopDocsCollector.setScorer(mockScorer);
+ if (childValueToParentIds.containsKey(childValue)) {
+ AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableMap<String, FloatArrayList> parentIdToChildScores = childValueToParentIds.lget();
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ for (Map.Entry<String, FloatArrayList> entry : parentIdToChildScores.entrySet()) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", entry.getKey()));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ mockScorer.scores = entry.getValue();
+ expectedTopDocsCollector.collect(docsEnum.docID());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ assertTopDocs(actualTopDocsCollector.topDocs(), expectedTopDocsCollector.topDocs());
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ static void assertTopDocs(TopDocs actual, TopDocs expected) {
+ assertThat("actual.totalHits != expected.totalHits", actual.totalHits, equalTo(expected.totalHits));
+ assertThat("actual.getMaxScore() != expected.getMaxScore()", actual.getMaxScore(), equalTo(expected.getMaxScore()));
+ assertThat("actual.scoreDocs.length != expected.scoreDocs.length", actual.scoreDocs.length, equalTo(actual.scoreDocs.length));
+ for (int i = 0; i < actual.scoreDocs.length; i++) {
+ ScoreDoc actualHit = actual.scoreDocs[i];
+ ScoreDoc expectedHit = expected.scoreDocs[i];
+ assertThat("actualHit.doc != expectedHit.doc", actualHit.doc, equalTo(expectedHit.doc));
+ assertThat("actualHit.score != expectedHit.score", actualHit.score, equalTo(expectedHit.score));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/MockScorer.java b/src/test/java/org/elasticsearch/index/search/child/MockScorer.java
new file mode 100644
index 0000000..7fe3fd5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/MockScorer.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import org.apache.lucene.search.Scorer;
+
+import java.io.IOException;
+
+class MockScorer extends Scorer {
+
+ final ScoreType scoreType;
+ FloatArrayList scores;
+
+ MockScorer(ScoreType scoreType) {
+ super(null);
+ this.scoreType = scoreType;
+ }
+
+ @Override
+ public float score() throws IOException {
+ float aggregateScore = 0;
+ for (int i = 0; i < scores.elementsCount; i++) {
+ float score = scores.buffer[i];
+ switch (scoreType) {
+ case MAX:
+ if (aggregateScore < score) {
+ aggregateScore = score;
+ }
+ break;
+ case SUM:
+ case AVG:
+ aggregateScore += score;
+ break;
+ }
+ }
+
+ if (scoreType == ScoreType.AVG) {
+ aggregateScore /= scores.elementsCount;
+ }
+
+ return aggregateScore;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int docID() {
+ return 0;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ return 0;
+ }
+
+ @Override
+ public long cost() {
+ return 0;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java
new file mode 100644
index 0000000..5907113
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.IntIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.assertBitSet;
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.createSearchContext;
+
+/**
+ */
+public class ParentConstantScoreQueryTests extends ElasticsearchLuceneTestCase {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ forceDefaultCodec();
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext.removeCurrent();
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ int numUniqueParentValues = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
+ String[] parentValues = new String[numUniqueParentValues];
+ for (int i = 0; i < numUniqueParentValues; i++) {
+ parentValues[i] = Integer.toString(i);
+ }
+
+ int childDocId = 0;
+ int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 10000 : 1000);
+ ObjectObjectOpenHashMap<String, NavigableSet<String>> parentValueToChildDocIds = new ObjectObjectOpenHashMap<String, NavigableSet<String>>();
+ IntIntOpenHashMap childIdToParentId = new IntIntOpenHashMap();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ String parentValue = parentValues[random().nextInt(parentValues.length)];
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ document.add(new StringField("field1", parentValue, Field.Store.NO));
+ if (markParentAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs;
+ if (rarely()) {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
+ } else {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
+ }
+ if (parentDocId == numParentDocs - 1 && childIdToParentId.isEmpty()) {
+ // ensure there is at least one child in the index
+ numChildDocs = Math.max(1, numChildDocs);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ boolean markChildAsDeleted = rarely();
+ boolean filterMe = rarely();
+ String child = Integer.toString(childDocId++);
+
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", child), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markParentAsDeleted) {
+ NavigableSet<String> childIds;
+ if (parentValueToChildDocIds.containsKey(parentValue)) {
+ childIds = parentValueToChildDocIds.lget();
+ } else {
+ parentValueToChildDocIds.put(parentValue, childIds = new TreeSet<String>());
+ }
+ if (!markChildAsDeleted && !filterMe) {
+ childIdToParentId.put(Integer.valueOf(child), parentDocId);
+ childIds.add(child);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+ indexWriter.commit();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.SimpleSearcher(
+ ParentConstantScoreQuery.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ TermFilter rawChildrenFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "child"));
+ Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
+ int max = numUniqueParentValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
+ // that deletes are applied at the top level when filters are cached.
+ Filter childrenFilter;
+ if (random().nextBoolean()) {
+ childrenFilter = SearchContext.current().filterCache().cache(rawChildrenFilter);
+ } else {
+ childrenFilter = rawChildrenFilter;
+ }
+
+ // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ Filter filterMe;
+ if (random().nextBoolean()) {
+ filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
+ } else {
+ filterMe = rawFilterMe;
+ }
+
+ // Simulate a child update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
+ int[] childIds = childIdToParentId.keys().toArray();
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int childId = childIds[random().nextInt(childIds.length)];
+ String childUid = Uid.createUid("child", Integer.toString(childId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, childUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, childUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ String parentUid = Uid.createUid("parent", Integer.toString(childIdToParentId.get(childId)));
+ document.add(new StringField(ParentFieldMapper.NAME, parentUid, Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.SimpleSearcher(
+ ParentConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String parentValue = parentValues[random().nextInt(numUniqueParentValues)];
+ TermQuery parentQuery = new TermQuery(new Term("field1", parentValue));
+ Query query;
+ if (random().nextBoolean()) {
+ // Usage in HasParentQueryParser
+ query = new ParentConstantScoreQuery(parentQuery, "parent", childrenFilter);
+ } else {
+ // Usage in HasParentFilterParser
+ query = new XConstantScoreQuery(
+ new CustomQueryWrappingFilter(
+ new ParentConstantScoreQuery(parentQuery, "parent", childrenFilter)
+ )
+ );
+ }
+ query = new XFilteredQuery(query, filterMe);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ searcher.search(query, collector);
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ if (parentValueToChildDocIds.containsKey(parentValue)) {
+ AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableSet<String> childIds = parentValueToChildDocIds.lget();
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ for (String id : childIds) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", id));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java
new file mode 100644
index 0000000..4fb1b8e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import com.carrotsearch.hppc.IntIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.assertBitSet;
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.createSearchContext;
+import static org.elasticsearch.index.search.child.ChildrenQueryTests.assertTopDocs;
+
+public class ParentQueryTests extends ElasticsearchLuceneTestCase {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ forceDefaultCodec();
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext.removeCurrent();
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ int numUniqueParentValues = 1 + random().nextInt(TEST_NIGHTLY ? 6000 : 600);
+ String[] parentValues = new String[numUniqueParentValues];
+ for (int i = 0; i < numUniqueParentValues; i++) {
+ parentValues[i] = Integer.toString(i);
+ }
+
+ int childDocId = 0;
+ int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
+ ObjectObjectOpenHashMap<String, NavigableMap<String, Float>> parentValueToChildIds = new ObjectObjectOpenHashMap<String, NavigableMap<String, Float>>();
+ IntIntOpenHashMap childIdToParentId = new IntIntOpenHashMap();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ String parentValue = parentValues[random().nextInt(parentValues.length)];
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ document.add(new StringField("field1", parentValue, Field.Store.NO));
+ if (markParentAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs;
+ if (rarely()) {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
+ } else {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
+ }
+ if (parentDocId == numParentDocs - 1 && childIdToParentId.isEmpty()) {
+ // ensure there is at least one child in the index
+ numChildDocs = Math.max(1, numChildDocs);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ String child = Integer.toString(childDocId++);
+ boolean markChildAsDeleted = rarely();
+ boolean filterMe = rarely();
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", child), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markParentAsDeleted) {
+ NavigableMap<String, Float> childIdToScore;
+ if (parentValueToChildIds.containsKey(parentValue)) {
+ childIdToScore = parentValueToChildIds.lget();
+ } else {
+ parentValueToChildIds.put(parentValue, childIdToScore = new TreeMap<String, Float>());
+ }
+ if (!markChildAsDeleted && !filterMe) {
+ assertFalse("child ["+ child + "] already has a score", childIdToScore.containsKey(child));
+ childIdToScore.put(child, 1f);
+ childIdToParentId.put(Integer.valueOf(child), parentDocId);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+ indexWriter.commit();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.SimpleSearcher(
+ ParentQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ TermFilter rawChildrenFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "child"));
+ Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
+ int max = numUniqueParentValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
+ // that deletes are applied at the top level when filters are cached.
+ Filter childrenFilter;
+ if (random().nextBoolean()) {
+ childrenFilter = SearchContext.current().filterCache().cache(rawChildrenFilter);
+ } else {
+ childrenFilter = rawChildrenFilter;
+ }
+
+ // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ Filter filterMe;
+ if (random().nextBoolean()) {
+ filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
+ } else {
+ filterMe = rawFilterMe;
+ }
+
+ // Simulate a child update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
+ int[] childIds = childIdToParentId.keys().toArray();
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int childId = childIds[random().nextInt(childIds.length)];
+ String childUid = Uid.createUid("child", Integer.toString(childId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, childUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, childUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ String parentUid = Uid.createUid("parent", Integer.toString(childIdToParentId.get(childId)));
+ document.add(new StringField(ParentFieldMapper.NAME, parentUid, Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.SimpleSearcher(
+ ParentConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String parentValue = parentValues[random().nextInt(numUniqueParentValues)];
+ Query parentQuery = new ConstantScoreQuery(new TermQuery(new Term("field1", parentValue)));
+ Query query = new ParentQuery(parentQuery,"parent", childrenFilter);
+ query = new XFilteredQuery(query, filterMe);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ int numHits = 1 + random().nextInt(25);
+ TopScoreDocCollector actualTopDocsCollector = TopScoreDocCollector.create(numHits, false);
+ searcher.search(query, MultiCollector.wrap(collector, actualTopDocsCollector));
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ MockScorer mockScorer = new MockScorer(ScoreType.MAX); // just save one score per parent...
+ mockScorer.scores = new FloatArrayList();
+ TopScoreDocCollector expectedTopDocsCollector = TopScoreDocCollector.create(numHits, false);
+ expectedTopDocsCollector.setScorer(mockScorer);
+ if (parentValueToChildIds.containsKey(parentValue)) {
+ AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableMap<String, Float> childIdsAndScore = parentValueToChildIds.lget();
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ for (Map.Entry<String, Float> entry : childIdsAndScore.entrySet()) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", entry.getKey()));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ mockScorer.scores.add(entry.getValue());
+ expectedTopDocsCollector.collect(docsEnum.docID());
+ mockScorer.scores.clear();
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ assertTopDocs(actualTopDocsCollector.topDocs(), expectedTopDocsCollector.topDocs());
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/TestSearchContext.java b/src/test/java/org/elasticsearch/index/search/child/TestSearchContext.java
new file mode 100644
index 0000000..ccd3ac7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/TestSearchContext.java
@@ -0,0 +1,583 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.docset.DocSetCache;
+import org.elasticsearch.index.cache.filter.FilterCache;
+import org.elasticsearch.index.cache.id.IdCache;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.SearchContextAggregations;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.facet.SearchContextFacets;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
+import org.elasticsearch.search.fetch.partial.PartialFieldsContext;
+import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.highlight.SearchContextHighlight;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.scan.ScanContext;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+
+import java.util.List;
+
+public class TestSearchContext extends SearchContext {
+
+ final CacheRecycler cacheRecycler;
+ final PageCacheRecycler pageCacheRecycler;
+ final IdCache idCache;
+ final IndexService indexService;
+ final FilterCache filterCache;
+
+ ContextIndexSearcher searcher;
+ int size;
+
+ public TestSearchContext(CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler, IdCache idCache, IndexService indexService, FilterCache filterCache) {
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ this.idCache = idCache;
+ this.indexService = indexService;
+ this.filterCache = filterCache;
+ }
+
+ public TestSearchContext() {
+ this.cacheRecycler = null;
+ this.pageCacheRecycler = null;
+ this.idCache = null;
+ this.indexService = null;
+ this.filterCache = null;
+ }
+
+ @Override
+ public boolean clearAndRelease() {
+ return false;
+ }
+
+ @Override
+ public void preProcess() {
+ }
+
+ @Override
+ public Filter searchFilter(String[] types) {
+ return null;
+ }
+
+ @Override
+ public long id() {
+ return 0;
+ }
+
+ @Override
+ public String source() {
+ return null;
+ }
+
+ @Override
+ public ShardSearchRequest request() {
+ return null;
+ }
+
+ @Override
+ public SearchType searchType() {
+ return null;
+ }
+
+ @Override
+ public SearchContext searchType(SearchType searchType) {
+ return null;
+ }
+
+ @Override
+ public SearchShardTarget shardTarget() {
+ return null;
+ }
+
+ @Override
+ public int numberOfShards() {
+ return 0;
+ }
+
+ @Override
+ public boolean hasTypes() {
+ return false;
+ }
+
+ @Override
+ public String[] types() {
+ return new String[0];
+ }
+
+ @Override
+ public float queryBoost() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext queryBoost(float queryBoost) {
+ return null;
+ }
+
+ @Override
+ public long nowInMillis() {
+ return 0;
+ }
+
+ @Override
+ public Scroll scroll() {
+ return null;
+ }
+
+ @Override
+ public SearchContext scroll(Scroll scroll) {
+ return null;
+ }
+
+ @Override
+ public SearchContextFacets facets() {
+ return null;
+ }
+
+ @Override
+ public SearchContext facets(SearchContextFacets facets) {
+ return null;
+ }
+
+ @Override
+ public SearchContextAggregations aggregations() {
+ return null;
+ }
+
+ @Override
+ public SearchContext aggregations(SearchContextAggregations aggregations) {
+ return null;
+ }
+
+ @Override
+ public SearchContextHighlight highlight() {
+ return null;
+ }
+
+ @Override
+ public void highlight(SearchContextHighlight highlight) {
+ }
+
+ @Override
+ public SuggestionSearchContext suggest() {
+ return null;
+ }
+
+ @Override
+ public void suggest(SuggestionSearchContext suggest) {
+ }
+
+ @Override
+ public RescoreSearchContext rescore() {
+ return null;
+ }
+
+ @Override
+ public void rescore(RescoreSearchContext rescore) {
+ }
+
+ @Override
+ public boolean hasFieldDataFields() {
+ return false;
+ }
+
+ @Override
+ public FieldDataFieldsContext fieldDataFields() {
+ return null;
+ }
+
+ @Override
+ public boolean hasScriptFields() {
+ return false;
+ }
+
+ @Override
+ public ScriptFieldsContext scriptFields() {
+ return null;
+ }
+
+ @Override
+ public boolean hasPartialFields() {
+ return false;
+ }
+
+ @Override
+ public PartialFieldsContext partialFields() {
+ return null;
+ }
+
+ @Override
+ public boolean sourceRequested() {
+ return false;
+ }
+
+ @Override
+ public boolean hasFetchSourceContext() {
+ return false;
+ }
+
+ @Override
+ public FetchSourceContext fetchSourceContext() {
+ return null;
+ }
+
+ @Override
+ public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
+ return null;
+ }
+
+ @Override
+ public ContextIndexSearcher searcher() {
+ return searcher;
+ }
+
+ void setSearcher(ContextIndexSearcher searcher) {
+ this.searcher = searcher;
+ }
+
+ @Override
+ public IndexShard indexShard() {
+ return null;
+ }
+
+ @Override
+ public MapperService mapperService() {
+ return indexService.mapperService();
+ }
+
+ @Override
+ public AnalysisService analysisService() {
+ return indexService.analysisService();
+ }
+
+ @Override
+ public IndexQueryParserService queryParserService() {
+ return null;
+ }
+
+ @Override
+ public SimilarityService similarityService() {
+ return null;
+ }
+
+ @Override
+ public ScriptService scriptService() {
+ return null;
+ }
+
+ @Override
+ public CacheRecycler cacheRecycler() {
+ return cacheRecycler;
+ }
+
+ @Override
+ public PageCacheRecycler pageCacheRecycler() {
+ return pageCacheRecycler;
+ }
+
+ @Override
+ public FilterCache filterCache() {
+ return filterCache;
+ }
+
+ @Override
+ public DocSetCache docSetCache() {
+ return null;
+ }
+
+ @Override
+ public IndexFieldDataService fieldData() {
+ return null;
+ }
+
+ @Override
+ public IdCache idCache() {
+ return idCache;
+ }
+
+ @Override
+ public long timeoutInMillis() {
+ return 0;
+ }
+
+ @Override
+ public void timeoutInMillis(long timeoutInMillis) {
+ }
+
+ @Override
+ public SearchContext minimumScore(float minimumScore) {
+ return null;
+ }
+
+ @Override
+ public Float minimumScore() {
+ return null;
+ }
+
+ @Override
+ public SearchContext sort(Sort sort) {
+ return null;
+ }
+
+ @Override
+ public Sort sort() {
+ return null;
+ }
+
+ @Override
+ public SearchContext trackScores(boolean trackScores) {
+ return null;
+ }
+
+ @Override
+ public boolean trackScores() {
+ return false;
+ }
+
+ @Override
+ public SearchContext parsedPostFilter(ParsedFilter postFilter) {
+ return null;
+ }
+
+ @Override
+ public ParsedFilter parsedPostFilter() {
+ return null;
+ }
+
+ @Override
+ public Filter aliasFilter() {
+ return null;
+ }
+
+ @Override
+ public SearchContext parsedQuery(ParsedQuery query) {
+ return null;
+ }
+
+ @Override
+ public ParsedQuery parsedQuery() {
+ return null;
+ }
+
+ @Override
+ public Query query() {
+ return null;
+ }
+
+ @Override
+ public boolean queryRewritten() {
+ return false;
+ }
+
+ @Override
+ public SearchContext updateRewriteQuery(Query rewriteQuery) {
+ return null;
+ }
+
+ @Override
+ public int from() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext from(int from) {
+ return null;
+ }
+
+ @Override
+ public int size() {
+ return size;
+ }
+
+ public void setSize(int size) {
+ this.size = size;
+ }
+
+
+ @Override
+ public SearchContext size(int size) {
+ return null;
+ }
+
+ @Override
+ public boolean hasFieldNames() {
+ return false;
+ }
+
+ @Override
+ public List<String> fieldNames() {
+ return null;
+ }
+
+ @Override
+ public void emptyFieldNames() {
+ }
+
+ @Override
+ public boolean explain() {
+ return false;
+ }
+
+ @Override
+ public void explain(boolean explain) {
+ }
+
+ @Override
+ public List<String> groupStats() {
+ return null;
+ }
+
+ @Override
+ public void groupStats(List<String> groupStats) {
+ }
+
+ @Override
+ public boolean version() {
+ return false;
+ }
+
+ @Override
+ public void version(boolean version) {
+ }
+
+ @Override
+ public int[] docIdsToLoad() {
+ return new int[0];
+ }
+
+ @Override
+ public int docIdsToLoadFrom() {
+ return 0;
+ }
+
+ @Override
+ public int docIdsToLoadSize() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) {
+ return null;
+ }
+
+ @Override
+ public void accessed(long accessTime) {
+ }
+
+ @Override
+ public long lastAccessTime() {
+ return 0;
+ }
+
+ @Override
+ public long keepAlive() {
+ return 0;
+ }
+
+ @Override
+ public void keepAlive(long keepAlive) {
+ }
+
+ @Override
+ public SearchLookup lookup() {
+ return null;
+ }
+
+ @Override
+ public DfsSearchResult dfsResult() {
+ return null;
+ }
+
+ @Override
+ public QuerySearchResult queryResult() {
+ return null;
+ }
+
+ @Override
+ public FetchSearchResult fetchResult() {
+ return null;
+ }
+
+ @Override
+ public void addReleasable(Releasable releasable) {
+ }
+
+ @Override
+ public void clearReleasables() {
+ }
+
+ @Override
+ public ScanContext scanContext() {
+ return null;
+ }
+
+ @Override
+ public MapperService.SmartNameFieldMappers smartFieldMappers(String name) {
+ return null;
+ }
+
+ @Override
+ public FieldMappers smartNameFieldMappers(String name) {
+ return null;
+ }
+
+ @Override
+ public FieldMapper smartNameFieldMapper(String name) {
+ return null;
+ }
+
+ @Override
+ public MapperService.SmartNameObjectMapper smartNameObjectMapper(String name) {
+ return null;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ return false;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java b/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java
new file mode 100644
index 0000000..a20ee88
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class GeoDistanceTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDistanceCheck() {
+ // Note, is within is an approximation, so, even though 0.52 is outside 50mi, we still get "true"
+ GeoDistance.DistanceBoundingCheck check = GeoDistance.distanceBoundingCheck(0, 0, 50, DistanceUnit.MILES);
+ assertThat(check.isWithin(0.5, 0.5), equalTo(true));
+ assertThat(check.isWithin(0.52, 0.52), equalTo(true));
+ assertThat(check.isWithin(1, 1), equalTo(false));
+
+ check = GeoDistance.distanceBoundingCheck(0, 179, 200, DistanceUnit.MILES);
+ assertThat(check.isWithin(0, -179), equalTo(true));
+ assertThat(check.isWithin(0, -178), equalTo(false));
+ }
+
+ @Test
+ public void testArcDistanceVsPlaneInEllipsis() {
+ GeoPoint centre = new GeoPoint(48.8534100, 2.3488000);
+ GeoPoint northernPoint = new GeoPoint(48.8801108681, 2.35152032666);
+ GeoPoint westernPoint = new GeoPoint(48.85265, 2.308896);
+
+ // With GeoDistance.ARC both the northern and western points are within the 4km range
+ assertThat(GeoDistance.ARC.calculate(centre.lat(), centre.lon(), northernPoint.lat(),
+ northernPoint.lon(), DistanceUnit.KILOMETERS), lessThan(4D));
+ assertThat(GeoDistance.ARC.calculate(centre.lat(), centre.lon(), westernPoint.lat(),
+ westernPoint.lon(), DistanceUnit.KILOMETERS), lessThan(4D));
+
+ // With GeoDistance.PLANE, only the northern point is within the 4km range,
+ // the western point is outside of the range due to the simple math it employs,
+ // meaning results will appear elliptical
+ assertThat(GeoDistance.PLANE.calculate(centre.lat(), centre.lon(), northernPoint.lat(),
+ northernPoint.lon(), DistanceUnit.KILOMETERS), lessThan(4D));
+ assertThat(GeoDistance.PLANE.calculate(centre.lat(), centre.lon(), westernPoint.lat(),
+ westernPoint.lon(), DistanceUnit.KILOMETERS), greaterThan(4D));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java b/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java
new file mode 100644
index 0000000..7e872e1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ *
+ */
+public class GeoHashUtilsTests extends ElasticsearchTestCase {
+
+ /**
+ * Pass condition: lat=42.6, lng=-5.6 should be encoded as "ezs42e44yx96",
+ * lat=57.64911 lng=10.40744 should be encoded as "u4pruydqqvj8"
+ */
+ @Test
+ public void testEncode() {
+ String hash = GeoHashUtils.encode(42.6, -5.6);
+ assertEquals("ezs42e44yx96", hash);
+
+ hash = GeoHashUtils.encode(57.64911, 10.40744);
+ assertEquals("u4pruydqqvj8", hash);
+ }
+
+ /**
+ * Pass condition: lat=52.3738007, lng=4.8909347 should be encoded and then
+ * decoded within 0.00001 of the original value
+ */
+ @Test
+ public void testDecodePreciseLongitudeLatitude() {
+ String hash = GeoHashUtils.encode(52.3738007, 4.8909347);
+
+ GeoPoint point = GeoHashUtils.decode(hash);
+
+ assertEquals(52.3738007, point.lat(), 0.00001D);
+ assertEquals(4.8909347, point.lon(), 0.00001D);
+ }
+
+ /**
+ * Pass condition: lat=84.6, lng=10.5 should be encoded and then decoded
+ * within 0.00001 of the original value
+ */
+ @Test
+ public void testDecodeImpreciseLongitudeLatitude() {
+ String hash = GeoHashUtils.encode(84.6, 10.5);
+
+ GeoPoint point = GeoHashUtils.decode(hash);
+
+ assertEquals(84.6, point.lat(), 0.00001D);
+ assertEquals(10.5, point.lon(), 0.00001D);
+ }
+
+ /*
+ * see https://issues.apache.org/jira/browse/LUCENE-1815 for details
+ */
+
+ @Test
+ public void testDecodeEncode() {
+ String geoHash = "u173zq37x014";
+ assertEquals(geoHash, GeoHashUtils.encode(52.3738007, 4.8909347));
+ GeoPoint decode = GeoHashUtils.decode(geoHash);
+ assertEquals(52.37380061d, decode.lat(), 0.000001d);
+ assertEquals(4.8909343d, decode.lon(), 0.000001d);
+
+ assertEquals(geoHash, GeoHashUtils.encode(decode.lat(), decode.lon()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java
new file mode 100644
index 0000000..6ac7198
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.distance.DistanceUtils;
+import org.apache.lucene.spatial.prefix.tree.Cell;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeoUtilsTests extends ElasticsearchTestCase {
+
+ /**
+ * Test special values like inf, NaN and -0.0.
+ */
+ @Test
+ public void testSpecials() {
+ assertThat(GeoUtils.normalizeLon(Double.POSITIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLat(Double.POSITIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLon(Double.NEGATIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLat(Double.NEGATIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLon(Double.NaN), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLat(Double.NaN), equalTo(Double.NaN));
+ assertThat(0.0, not(equalTo(-0.0)));
+ assertThat(GeoUtils.normalizeLon(-0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(-0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(0.0), equalTo(0.0));
+ }
+
+ /**
+ * Test bounding values.
+ */
+ @Test
+ public void testBounds() {
+ assertThat(GeoUtils.normalizeLon(-360.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(-180.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(360.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(180.0), equalTo(0.0));
+ // and halves
+ assertThat(GeoUtils.normalizeLon(-180.0), equalTo(180.0));
+ assertThat(GeoUtils.normalizeLat(-90.0), equalTo(-90.0));
+ assertThat(GeoUtils.normalizeLon(180.0), equalTo(180.0));
+ assertThat(GeoUtils.normalizeLat(90.0), equalTo(90.0));
+ }
+
+ /**
+ * Test normal values.
+ */
+ @Test
+ public void testNormal() {
+ // Near bounds
+ assertThat(GeoUtils.normalizeLon(-360.5), equalTo(-0.5));
+ assertThat(GeoUtils.normalizeLat(-180.5), equalTo(0.5));
+ assertThat(GeoUtils.normalizeLon(360.5), equalTo(0.5));
+ assertThat(GeoUtils.normalizeLat(180.5), equalTo(-0.5));
+ // and near halves
+ assertThat(GeoUtils.normalizeLon(-180.5), equalTo(179.5));
+ assertThat(GeoUtils.normalizeLat(-90.5), equalTo(-89.5));
+ assertThat(GeoUtils.normalizeLon(180.5), equalTo(-179.5));
+ assertThat(GeoUtils.normalizeLat(90.5), equalTo(89.5));
+ // Now with points, to check for longitude shifting with latitude normalization
+ // We've gone past the north pole and down the other side, the longitude will
+ // be shifted by 180
+ assertNormalizedPoint(new GeoPoint(90.5, 10), new GeoPoint(89.5, -170));
+
+ // Every 10-units, multiple full turns
+ for (int shift = -20; shift <= 20; ++shift) {
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 10.0), equalTo(10.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 20.0), equalTo(20.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 30.0), equalTo(30.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 40.0), equalTo(40.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 50.0), equalTo(50.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 60.0), equalTo(60.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 70.0), equalTo(70.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 80.0), equalTo(80.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 90.0), equalTo(90.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 100.0), equalTo(100.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 110.0), equalTo(110.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 120.0), equalTo(120.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 130.0), equalTo(130.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 140.0), equalTo(140.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 150.0), equalTo(150.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 160.0), equalTo(160.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 170.0), equalTo(170.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 180.0), equalTo(180.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 190.0), equalTo(-170.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 200.0), equalTo(-160.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 210.0), equalTo(-150.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 220.0), equalTo(-140.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 230.0), equalTo(-130.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 240.0), equalTo(-120.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 250.0), equalTo(-110.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 260.0), equalTo(-100.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 270.0), equalTo(-90.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 280.0), equalTo(-80.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 290.0), equalTo(-70.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 300.0), equalTo(-60.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 310.0), equalTo(-50.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 320.0), equalTo(-40.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 330.0), equalTo(-30.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 340.0), equalTo(-20.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 350.0), equalTo(-10.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 360.0), equalTo(0.0));
+ }
+ for (int shift = -20; shift <= 20; ++shift) {
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 10.0), equalTo(10.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 20.0), equalTo(20.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 30.0), equalTo(30.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 40.0), equalTo(40.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 50.0), equalTo(50.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 60.0), equalTo(60.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 70.0), equalTo(70.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 80.0), equalTo(80.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 90.0), equalTo(90.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 100.0), equalTo(80.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 110.0), equalTo(70.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 120.0), equalTo(60.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 130.0), equalTo(50.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 140.0), equalTo(40.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 150.0), equalTo(30.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 160.0), equalTo(20.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 170.0), equalTo(10.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 180.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 190.0), equalTo(-10.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 200.0), equalTo(-20.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 210.0), equalTo(-30.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 220.0), equalTo(-40.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 230.0), equalTo(-50.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 240.0), equalTo(-60.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 250.0), equalTo(-70.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 260.0), equalTo(-80.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 270.0), equalTo(-90.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 280.0), equalTo(-80.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 290.0), equalTo(-70.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 300.0), equalTo(-60.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 310.0), equalTo(-50.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 320.0), equalTo(-40.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 330.0), equalTo(-30.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 340.0), equalTo(-20.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 350.0), equalTo(-10.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 360.0), equalTo(0.0));
+ }
+ }
+
+ /**
+ * Test huge values.
+ */
+ @Test
+ public void testHuge() {
+ assertThat(GeoUtils.normalizeLon(-36000000000181.0), equalTo(GeoUtils.normalizeLon(-181.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000180.0), equalTo(GeoUtils.normalizeLon(-180.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000179.0), equalTo(GeoUtils.normalizeLon(-179.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000178.0), equalTo(GeoUtils.normalizeLon(-178.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000001.0), equalTo(GeoUtils.normalizeLon(-001.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000000.0), equalTo(GeoUtils.normalizeLon(+000.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000001.0), equalTo(GeoUtils.normalizeLon(+001.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000002.0), equalTo(GeoUtils.normalizeLon(+002.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000178.0), equalTo(GeoUtils.normalizeLon(+178.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000179.0), equalTo(GeoUtils.normalizeLon(+179.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000180.0), equalTo(GeoUtils.normalizeLon(+180.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000181.0), equalTo(GeoUtils.normalizeLon(+181.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000091.0), equalTo(GeoUtils.normalizeLat(-091.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000090.0), equalTo(GeoUtils.normalizeLat(-090.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000089.0), equalTo(GeoUtils.normalizeLat(-089.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000088.0), equalTo(GeoUtils.normalizeLat(-088.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000001.0), equalTo(GeoUtils.normalizeLat(-001.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000000.0), equalTo(GeoUtils.normalizeLat(+000.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000001.0), equalTo(GeoUtils.normalizeLat(+001.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000002.0), equalTo(GeoUtils.normalizeLat(+002.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000088.0), equalTo(GeoUtils.normalizeLat(+088.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000089.0), equalTo(GeoUtils.normalizeLat(+089.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000090.0), equalTo(GeoUtils.normalizeLat(+090.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000091.0), equalTo(GeoUtils.normalizeLat(+091.0)));
+ }
+
+ @Test
+ public void testPrefixTreeCellSizes() {
+ assertThat(GeoUtils.EARTH_SEMI_MAJOR_AXIS, equalTo(DistanceUtils.EARTH_EQUATORIAL_RADIUS_KM * 1000));
+ assertThat(GeoUtils.quadTreeCellWidth(0), lessThanOrEqualTo(GeoUtils.EARTH_EQUATOR));
+
+ SpatialContext spatialContext = new SpatialContext(true);
+
+ GeohashPrefixTree geohashPrefixTree = new GeohashPrefixTree(spatialContext, GeohashPrefixTree.getMaxLevelsPossible()/2);
+ Cell gNode = geohashPrefixTree.getWorldCell();
+
+ for(int i = 0; i<geohashPrefixTree.getMaxLevels(); i++) {
+ double width = GeoUtils.geoHashCellWidth(i);
+ double height = GeoUtils.geoHashCellHeight(i);
+ double size = GeoUtils.geoHashCellSize(i);
+ double degrees = 360.0 * width / GeoUtils.EARTH_EQUATOR;
+ int level = GeoUtils.quadTreeLevelsForPrecision(size);
+
+ assertThat(GeoUtils.quadTreeCellWidth(level), lessThanOrEqualTo(width));
+ assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
+ assertThat(GeoUtils.geoHashLevelsForPrecision(size), equalTo(geohashPrefixTree.getLevelForDistance(degrees)));
+
+ assertThat("width at level "+i, gNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
+ assertThat("height at level "+i, gNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height / GeoUtils.EARTH_POLAR_DISTANCE));
+
+ gNode = gNode.getSubCells(null).iterator().next();
+ }
+
+ QuadPrefixTree quadPrefixTree = new QuadPrefixTree(spatialContext);
+ Cell qNode = quadPrefixTree.getWorldCell();
+ for (int i = 0; i < QuadPrefixTree.DEFAULT_MAX_LEVELS; i++) {
+
+ double degrees = 360.0/(1L<<i);
+ double width = GeoUtils.quadTreeCellWidth(i);
+ double height = GeoUtils.quadTreeCellHeight(i);
+ double size = GeoUtils.quadTreeCellSize(i);
+ int level = GeoUtils.quadTreeLevelsForPrecision(size);
+
+ assertThat(GeoUtils.quadTreeCellWidth(level), lessThanOrEqualTo(width));
+ assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
+ assertThat(GeoUtils.quadTreeLevelsForPrecision(size), equalTo(quadPrefixTree.getLevelForDistance(degrees)));
+
+ assertThat("width at level "+i, qNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
+ assertThat("height at level "+i, qNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height / GeoUtils.EARTH_POLAR_DISTANCE));
+
+ qNode = qNode.getSubCells(null).iterator().next();
+ }
+ }
+
+ private static void assertNormalizedPoint(GeoPoint input, GeoPoint expected) {
+ GeoUtils.normalizePoint(input);
+ assertThat(input, equalTo(expected));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java
new file mode 100644
index 0000000..65bb9fe
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java
@@ -0,0 +1,347 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.fielddata.AbstractFieldDataTests;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public abstract class AbstractNumberNestedSortingTests extends AbstractFieldDataTests {
+
+ @Test
+ public void testNestedSorting() throws Exception {
+ List<Document> docs = new ArrayList<Document>();
+ Document document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 1, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 2, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 2, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 1, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 3, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 4, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 4, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 5, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 5, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 6, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 6, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ // This doc will not be included, because it doesn't have nested docs
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 7, Field.Store.NO));
+ writer.addDocument(document);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 7, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 8, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ // Some garbage docs, just to check if the NestedFieldComparator can deal with this.
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+
+ SortMode sortMode = SortMode.SUM;
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
+ IndexFieldData.XFieldComparatorSource innerFieldComparator = createInnerFieldComparator("field2", sortMode, null);
+ Filter parentFilter = new TermFilter(new Term("__type", "parent"));
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopFieldDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(10));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(11));
+
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(28));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(13));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(23));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(12));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(11));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(10));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
+
+ childFilter = new TermFilter(new Term("filter_1", "T"));
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ query = new ToParentBlockJoinQuery(
+ new XFilteredQuery(new MatchAllDocsQuery(), childFilter),
+ new FixedBitSetCachingWrapperFilter(parentFilter),
+ ScoreMode.None
+ );
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(6));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(23));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(12));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+
+ sort = new Sort(new SortField("field2", nestedComparatorSource));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(6));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(28));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
+
+ innerFieldComparator = createInnerFieldComparator("field2", sortMode, 127);
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(127));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(24));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(127));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(23));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(12));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(8));
+
+ innerFieldComparator = createInnerFieldComparator("field2", sortMode, -127);
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ sort = new Sort(new SortField("field2", nestedComparatorSource));
+ topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-127));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(24));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(-127));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(28));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(7));
+
+ // Moved to method, because floating point based XFieldComparatorSource have different outcome for SortMode avg,
+ // than integral number based implementations...
+ assertAvgScoreMode(parentFilter, searcher, innerFieldComparator);
+ searcher.getIndexReader().close();
+ }
+
+ protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
+ SortMode sortMode = SortMode.AVG;
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ Query query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ }
+
+ protected abstract IndexableField createField(String name, int value, Field.Store store);
+
+ protected abstract IndexFieldData.XFieldComparatorSource createInnerFieldComparator(String fieldName, SortMode sortMode, Object missingValue);
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java
new file mode 100644
index 0000000..d34dd24
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.plain.DoubleArrayIndexFieldData;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("double");
+ }
+
+ @Override
+ protected IndexFieldData.XFieldComparatorSource createInnerFieldComparator(String fieldName, SortMode sortMode, Object missingValue) {
+ DoubleArrayIndexFieldData fieldData = getForField(fieldName);
+ return new DoubleValuesComparatorSource(fieldData, missingValue, sortMode);
+ }
+
+ @Override
+ protected IndexableField createField(String name, int value, Field.Store store) {
+ return new DoubleField(name, value, store);
+ }
+
+ protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
+ SortMode sortMode = SortMode.AVG;
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ Query query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java
new file mode 100644
index 0000000..ec15feb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.FloatValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.plain.FloatArrayIndexFieldData;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class FloatNestedSortingTests extends AbstractNumberNestedSortingTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("float");
+ }
+
+ @Override
+ protected IndexFieldData.XFieldComparatorSource createInnerFieldComparator(String fieldName, SortMode sortMode, Object missingValue) {
+ FloatArrayIndexFieldData fieldData = getForField(fieldName);
+ return new FloatValuesComparatorSource(fieldData, missingValue, sortMode);
+ }
+
+ @Override
+ protected IndexableField createField(String name, int value, Field.Store store) {
+ return new FloatField(name, value, store);
+ }
+
+ protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
+ SortMode sortMode = SortMode.AVG;
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ Query query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java
new file mode 100644
index 0000000..33ab1d0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.LongValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.plain.PackedArrayIndexFieldData;
+
+/**
+ */
+public class LongNestedSortingTests extends AbstractNumberNestedSortingTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("long");
+ }
+
+ @Override
+ protected IndexFieldData.XFieldComparatorSource createInnerFieldComparator(String fieldName, SortMode sortMode, Object missingValue) {
+ PackedArrayIndexFieldData fieldData = getForField(fieldName);
+ return new LongValuesComparatorSource(fieldData, missingValue, sortMode);
+ }
+
+ @Override
+ protected IndexableField createField(String name, int value, Field.Store store) {
+ return new LongField(name, value, store);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
new file mode 100644
index 0000000..c120ca9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.fielddata.AbstractFieldDataTests;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class NestedSortingTests extends AbstractFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes"));
+ }
+
+ @Test
+ public void testNestedSorting() throws Exception {
+ List<Document> docs = new ArrayList<Document>();
+ Document document = new Document();
+ document.add(new StringField("field2", "a", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "b", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "c", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "a", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "c", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "d", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "e", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "b", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "e", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "f", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "g", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "c", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "g", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "h", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "i", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "d", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "i", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "j", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "k", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "f", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "k", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "l", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "m", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "g", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ // This doc will not be included, because it doesn't have nested docs
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "h", Field.Store.NO));
+ writer.addDocument(document);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "m", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "n", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "o", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "i", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ // Some garbage docs, just to check if the NestedFieldComparator can deal with this.
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+
+ SortMode sortMode = SortMode.MIN;
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
+ PagedBytesIndexFieldData indexFieldData = getForField("field2");
+ BytesRefFieldComparatorSource innerSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode);
+ Filter parentFilter = new TermFilter(new Term("__type", "parent"));
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerSource, parentFilter, childFilter);
+ ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopFieldDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("a"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("c"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("e"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("g"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("i"));
+
+ sortMode = SortMode.MAX;
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerSource, parentFilter, childFilter);
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(28));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("o"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(23));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("m"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(19));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("k"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("i"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(11));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("g"));
+
+
+ childFilter = new AndFilter(Arrays.asList(new NotFilter(parentFilter), new TermFilter(new Term("filter_1", "T"))));
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerSource, parentFilter, childFilter);
+ query = new ToParentBlockJoinQuery(
+ new XFilteredQuery(new MatchAllDocsQuery(), childFilter),
+ new FixedBitSetCachingWrapperFilter(parentFilter),
+ ScoreMode.None
+ );
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(6));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(23));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("m"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(28));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("m"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("g"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("g"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("e"));
+
+ searcher.getIndexReader().close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java
new file mode 100644
index 0000000..8bf8586
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import org.apache.lucene.search.similarities.*;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperServiceModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.instanceOf;
+
+public class SimilarityTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testResolveDefaultSimilarities() {
+ SimilarityLookupService similarityLookupService = similarityService().similarityLookupService();
+ assertThat(similarityLookupService.similarity("default"), instanceOf(PreBuiltSimilarityProvider.class));
+ assertThat(similarityLookupService.similarity("default").get(), instanceOf(DefaultSimilarity.class));
+ assertThat(similarityLookupService.similarity("BM25"), instanceOf(PreBuiltSimilarityProvider.class));
+ assertThat(similarityLookupService.similarity("BM25").get(), instanceOf(BM25Similarity.class));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_default() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "default")
+ .put("index.similarity.my_similarity.discount_overlaps", false)
+ .build();
+ SimilarityService similarityService = similarityService(indexSettings);
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().similarity(), instanceOf(DefaultSimilarityProvider.class));
+
+ DefaultSimilarity similarity = (DefaultSimilarity) documentMapper.mappers().name("field1").mapper().similarity().get();
+ assertThat(similarity.getDiscountOverlaps(), equalTo(false));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_bm25() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "BM25")
+ .put("index.similarity.my_similarity.k1", 2.0f)
+ .put("index.similarity.my_similarity.b", 1.5f)
+ .put("index.similarity.my_similarity.discount_overlaps", false)
+ .build();
+ SimilarityService similarityService = similarityService(indexSettings);
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().similarity(), instanceOf(BM25SimilarityProvider.class));
+
+ BM25Similarity similarity = (BM25Similarity) documentMapper.mappers().name("field1").mapper().similarity().get();
+ assertThat(similarity.getK1(), equalTo(2.0f));
+ assertThat(similarity.getB(), equalTo(1.5f));
+ assertThat(similarity.getDiscountOverlaps(), equalTo(false));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_DFR() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "DFR")
+ .put("index.similarity.my_similarity.basic_model", "g")
+ .put("index.similarity.my_similarity.after_effect", "l")
+ .put("index.similarity.my_similarity.normalization", "h2")
+ .put("index.similarity.my_similarity.normalization.h2.c", 3f)
+ .build();
+ SimilarityService similarityService = similarityService(indexSettings);
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().similarity(), instanceOf(DFRSimilarityProvider.class));
+
+ DFRSimilarity similarity = (DFRSimilarity) documentMapper.mappers().name("field1").mapper().similarity().get();
+ assertThat(similarity.getBasicModel(), instanceOf(BasicModelG.class));
+ assertThat(similarity.getAfterEffect(), instanceOf(AfterEffectL.class));
+ assertThat(similarity.getNormalization(), instanceOf(NormalizationH2.class));
+ assertThat(((NormalizationH2) similarity.getNormalization()).getC(), equalTo(3f));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_IB() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "IB")
+ .put("index.similarity.my_similarity.distribution", "spl")
+ .put("index.similarity.my_similarity.lambda", "ttf")
+ .put("index.similarity.my_similarity.normalization", "h2")
+ .put("index.similarity.my_similarity.normalization.h2.c", 3f)
+ .build();
+ SimilarityService similarityService = similarityService(indexSettings);
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().similarity(), instanceOf(IBSimilarityProvider.class));
+
+ IBSimilarity similarity = (IBSimilarity) documentMapper.mappers().name("field1").mapper().similarity().get();
+ assertThat(similarity.getDistribution(), instanceOf(DistributionSPL.class));
+ assertThat(similarity.getLambda(), instanceOf(LambdaTTF.class));
+ assertThat(similarity.getNormalization(), instanceOf(NormalizationH2.class));
+ assertThat(((NormalizationH2) similarity.getNormalization()).getC(), equalTo(3f));
+ }
+
+ private static SimilarityService similarityService() {
+ return similarityService(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ private static SimilarityService similarityService(Settings settings) {
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder()
+ .add(new SettingsModule(settings))
+ .add(new IndexNameModule(index))
+ .add(new IndexSettingsModule(index, settings))
+ .add(new CodecModule(settings))
+ .add(new MapperServiceModule())
+ .add(new AnalysisModule(settings))
+ .add(new SimilarityModule(settings))
+ .add(new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ })
+ .createInjector();
+ return injector.getInstance(SimilarityService.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/store/distributor/DistributorTests.java b/src/test/java/org/elasticsearch/index/store/distributor/DistributorTests.java
new file mode 100644
index 0000000..528f052
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/store/distributor/DistributorTests.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.distributor;
+
+import org.apache.lucene.store.*;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class DistributorTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLeastUsedDistributor() throws Exception {
+ FakeFsDirectory[] directories = new FakeFsDirectory[]{
+ new FakeFsDirectory("dir0", 10L),
+ new FakeFsDirectory("dir1", 20L),
+ new FakeFsDirectory("dir2", 30L)
+ };
+ FakeDirectoryService directoryService = new FakeDirectoryService(directories);
+
+ LeastUsedDistributor distributor = new LeastUsedDistributor(directoryService);
+ for (int i = 0; i < 5; i++) {
+ assertThat(distributor.any(), equalTo((Directory) directories[2]));
+ }
+
+ directories[2].setUsableSpace(5L);
+ for (int i = 0; i < 5; i++) {
+ assertThat(distributor.any(), equalTo((Directory) directories[1]));
+ }
+
+ directories[1].setUsableSpace(0L);
+ for (int i = 0; i < 5; i++) {
+ assertThat(distributor.any(), equalTo((Directory) directories[0]));
+ }
+
+
+ directories[0].setUsableSpace(10L);
+ directories[1].setUsableSpace(20L);
+ directories[2].setUsableSpace(20L);
+ for (FakeFsDirectory directory : directories) {
+ directory.resetAllocationCount();
+ }
+ for (int i = 0; i < 10000; i++) {
+ ((FakeFsDirectory) distributor.any()).incrementAllocationCount();
+ }
+ assertThat(directories[0].getAllocationCount(), equalTo(0));
+ assertThat((double) directories[1].getAllocationCount() / directories[2].getAllocationCount(), closeTo(1.0, 0.5));
+
+ // Test failover scenario
+ for (FakeFsDirectory directory : directories) {
+ directory.resetAllocationCount();
+ }
+ directories[0].setUsableSpace(0L);
+ directories[1].setUsableSpace(0L);
+ directories[2].setUsableSpace(0L);
+ for (int i = 0; i < 10000; i++) {
+ ((FakeFsDirectory) distributor.any()).incrementAllocationCount();
+ }
+ for (FakeFsDirectory directory : directories) {
+ assertThat(directory.getAllocationCount(), greaterThan(0));
+ }
+ assertThat((double) directories[0].getAllocationCount() / directories[2].getAllocationCount(), closeTo(1.0, 0.5));
+ assertThat((double) directories[1].getAllocationCount() / directories[2].getAllocationCount(), closeTo(1.0, 0.5));
+
+ }
+
+ @Test
+ public void testRandomWeightedDistributor() throws Exception {
+ FakeFsDirectory[] directories = new FakeFsDirectory[]{
+ new FakeFsDirectory("dir0", 10L),
+ new FakeFsDirectory("dir1", 20L),
+ new FakeFsDirectory("dir2", 30L)
+ };
+ FakeDirectoryService directoryService = new FakeDirectoryService(directories);
+
+ RandomWeightedDistributor randomWeightedDistributor = new RandomWeightedDistributor(directoryService);
+ for (int i = 0; i < 10000; i++) {
+ ((FakeFsDirectory) randomWeightedDistributor.any()).incrementAllocationCount();
+ }
+ for (FakeFsDirectory directory : directories) {
+ assertThat(directory.getAllocationCount(), greaterThan(0));
+ }
+ assertThat((double) directories[1].getAllocationCount() / directories[0].getAllocationCount(), closeTo(2.0, 0.5));
+ assertThat((double) directories[2].getAllocationCount() / directories[0].getAllocationCount(), closeTo(3.0, 0.5));
+
+ for (FakeFsDirectory directory : directories) {
+ directory.resetAllocationCount();
+ }
+
+ directories[1].setUsableSpace(0L);
+
+ for (int i = 0; i < 1000; i++) {
+ ((FakeFsDirectory) randomWeightedDistributor.any()).incrementAllocationCount();
+ }
+
+ assertThat(directories[0].getAllocationCount(), greaterThan(0));
+ assertThat(directories[1].getAllocationCount(), equalTo(0));
+ assertThat(directories[2].getAllocationCount(), greaterThan(0));
+
+ }
+
+ public static class FakeDirectoryService implements DirectoryService {
+
+ private final Directory[] directories;
+
+ public FakeDirectoryService(Directory[] directories) {
+ this.directories = directories;
+ }
+
+ @Override
+ public Directory[] build() throws IOException {
+ return directories;
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return 0;
+ }
+
+ @Override
+ public void renameFile(Directory dir, String from, String to) throws IOException {
+ }
+
+ @Override
+ public void fullDelete(Directory dir) throws IOException {
+ }
+ }
+
+ public static class FakeFsDirectory extends FSDirectory {
+
+ public int allocationCount;
+
+ public FakeFile fakeFile;
+
+ public FakeFsDirectory(String path, long usableSpace) throws IOException {
+ super(new File(path), NoLockFactory.getNoLockFactory());
+ fakeFile = new FakeFile(path, usableSpace);
+ allocationCount = 0;
+ }
+
+ @Override
+ public IndexInput openInput(String name, IOContext context) throws IOException {
+ throw new UnsupportedOperationException("Shouldn't be called in the test");
+ }
+
+ public void setUsableSpace(long usableSpace) {
+ fakeFile.setUsableSpace(usableSpace);
+ }
+
+ public void incrementAllocationCount() {
+ allocationCount++;
+ }
+
+ public int getAllocationCount() {
+ return allocationCount;
+ }
+
+ public void resetAllocationCount() {
+ allocationCount = 0;
+ }
+
+ @Override
+ public File getDirectory() {
+ return fakeFile;
+ }
+ }
+
+ public static class FakeFile extends File {
+ private long usableSpace;
+
+ public FakeFile(String s, long usableSpace) {
+ super(s);
+ this.usableSpace = usableSpace;
+ }
+
+ @Override
+ public long getUsableSpace() {
+ return usableSpace;
+ }
+
+ public void setUsableSpace(long usableSpace) {
+ this.usableSpace = usableSpace;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/store/memory/SimpleByteBufferStoreTests.java b/src/test/java/org/elasticsearch/index/store/memory/SimpleByteBufferStoreTests.java
new file mode 100644
index 0000000..a252b89
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/store/memory/SimpleByteBufferStoreTests.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.memory;
+
+import org.apache.lucene.store.*;
+import org.apache.lucene.store.bytebuffer.ByteBufferDirectory;
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleByteBufferStoreTests extends ElasticsearchTestCase {
+
+ @Test
+ public void test1BufferNoCache() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(1, 0, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 1);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test1Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(1, 10, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 1);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test3Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(3, 10, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 3);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test10Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(10, 20, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 10);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test15Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(15, 30, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 15);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test40Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(40, 80, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 40);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void testSimpleLocking() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(40, 80, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+
+ Lock lock = dir.makeLock("testlock");
+
+ assertThat(lock.isLocked(), equalTo(false));
+ assertThat(lock.obtain(200), equalTo(true));
+ assertThat(lock.isLocked(), equalTo(true));
+ try {
+ assertThat(lock.obtain(200), equalTo(false));
+ assertThat("lock should be thrown", false, equalTo(true));
+ } catch (LockObtainFailedException e) {
+ // all is well
+ }
+ lock.release();
+ assertThat(lock.isLocked(), equalTo(false));
+ dir.close();
+ cache.close();
+ }
+
+ private void insertData(ByteBufferDirectory dir, int bufferSizeInBytes) throws IOException {
+ byte[] test = new byte[]{1, 2, 3, 4, 5, 6, 7, 8};
+ IndexOutput indexOutput = dir.createOutput("value1", IOContext.DEFAULT);
+ indexOutput.writeBytes(new byte[]{2, 4, 6, 7, 8}, 5);
+ indexOutput.writeInt(-1);
+ indexOutput.writeLong(10);
+ indexOutput.writeInt(0);
+ indexOutput.writeInt(0);
+ indexOutput.writeBytes(test, 8);
+ indexOutput.writeBytes(test, 5);
+
+ indexOutput.seek(0);
+ indexOutput.writeByte((byte) 8);
+ if (bufferSizeInBytes > 4) {
+ indexOutput.seek(2);
+ indexOutput.writeBytes(new byte[]{1, 2}, 2);
+ }
+
+ indexOutput.close();
+ }
+
+ private void verifyData(ByteBufferDirectory dir) throws IOException {
+ byte[] test = new byte[]{1, 2, 3, 4, 5, 6, 7, 8};
+ assertThat(dir.fileExists("value1"), equalTo(true));
+ assertThat(dir.fileLength("value1"), equalTo(38l));
+
+ IndexInput indexInput = dir.openInput("value1", IOContext.DEFAULT);
+ indexInput.readBytes(test, 0, 5);
+ assertThat(test[0], equalTo((byte) 8));
+ assertThat(indexInput.readInt(), equalTo(-1));
+ assertThat(indexInput.readLong(), equalTo((long) 10));
+ assertThat(indexInput.readInt(), equalTo(0));
+ assertThat(indexInput.readInt(), equalTo(0));
+ indexInput.readBytes(test, 0, 8);
+ assertThat(test[0], equalTo((byte) 1));
+ assertThat(test[7], equalTo((byte) 8));
+ indexInput.readBytes(test, 0, 5);
+ assertThat(test[0], equalTo((byte) 1));
+ assertThat(test[4], equalTo((byte) 5));
+
+ indexInput.seek(28);
+ assertThat(indexInput.readByte(), equalTo((byte) 4));
+ indexInput.seek(30);
+ assertThat(indexInput.readByte(), equalTo((byte) 6));
+
+ indexInput.seek(0);
+ indexInput.readBytes(test, 0, 5);
+ assertThat(test[0], equalTo((byte) 8));
+
+ indexInput.close();
+
+ indexInput = dir.openInput("value1", IOContext.DEFAULT);
+ // iterate over all the data
+ for (int i = 0; i < 38; i++) {
+ indexInput.readByte();
+ }
+ indexInput.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java
new file mode 100644
index 0000000..b6c9190
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.apache.lucene.index.Term;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.hamcrest.MatcherAssert;
+import org.hamcrest.Matchers;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public abstract class AbstractSimpleTranslogTests {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ protected Translog translog;
+
+ @Before
+ public void setUp() {
+ translog = create();
+ translog.newTranslog(1);
+ }
+
+ @After
+ public void tearDown() {
+ translog.closeWithDelete();
+ }
+
+ protected abstract Translog create();
+
+ @Test
+ public void testRead() throws IOException {
+ Translog.Location loc1 = translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ Translog.Location loc2 = translog.add(new Translog.Create("test", "2", new byte[]{2}));
+ assertThat(TranslogStreams.readSource(translog.read(loc1)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{1})));
+ assertThat(TranslogStreams.readSource(translog.read(loc2)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{2})));
+ translog.sync();
+ assertThat(TranslogStreams.readSource(translog.read(loc1)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{1})));
+ assertThat(TranslogStreams.readSource(translog.read(loc2)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{2})));
+ Translog.Location loc3 = translog.add(new Translog.Create("test", "2", new byte[]{3}));
+ assertThat(TranslogStreams.readSource(translog.read(loc3)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{3})));
+ translog.sync();
+ assertThat(TranslogStreams.readSource(translog.read(loc3)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{3})));
+ }
+
+ @Test
+ public void testTransientTranslog() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+
+ translog.newTransientTranslog(2);
+
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+
+ translog.add(new Translog.Index("test", "2", new byte[]{2}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(2));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(2));
+ snapshot.release();
+
+ translog.makeTransientCurrent();
+
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); // now its one, since it only includes "2"
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+ }
+
+ @Test
+ public void testSimpleOperations() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+
+ translog.add(new Translog.Index("test", "2", new byte[]{2}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(2));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(2));
+ snapshot.release();
+
+ translog.add(new Translog.Delete(newUid("3")));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(3));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(3));
+ snapshot.release();
+
+ translog.add(new Translog.DeleteByQuery(new BytesArray(new byte[]{4}), null));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(4));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(4));
+ snapshot.release();
+
+ snapshot = translog.snapshot();
+
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(create.source().toBytes(), equalTo(new byte[]{1}));
+
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Index index = (Translog.Index) snapshot.next();
+ assertThat(index.source().toBytes(), equalTo(new byte[]{2}));
+
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Delete delete = (Translog.Delete) snapshot.next();
+ assertThat(delete.uid(), equalTo(newUid("3")));
+
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) snapshot.next();
+ assertThat(deleteByQuery.source().toBytes(), equalTo(new byte[]{4}));
+
+ assertThat(snapshot.hasNext(), equalTo(false));
+
+ snapshot.release();
+
+ long firstId = translog.currentId();
+ translog.newTranslog(2);
+ assertThat(translog.currentId(), Matchers.not(equalTo(firstId)));
+
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(0));
+ snapshot.release();
+ }
+
+ @Test
+ public void testSnapshot() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+
+ snapshot = translog.snapshot();
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(create.source().toBytes(), equalTo(new byte[]{1}));
+ snapshot.release();
+
+ Translog.Snapshot snapshot1 = translog.snapshot();
+ // we use the translogSize to also navigate to the last position on this snapshot
+ // so snapshot(Snapshot) will work properly
+ MatcherAssert.assertThat(snapshot1, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot1.estimatedTotalOperations(), equalTo(1));
+
+ translog.add(new Translog.Index("test", "2", new byte[]{2}));
+ snapshot = translog.snapshot(snapshot1);
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(2));
+ snapshot.release();
+
+ snapshot = translog.snapshot(snapshot1);
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Index index = (Translog.Index) snapshot.next();
+ assertThat(index.source().toBytes(), equalTo(new byte[]{2}));
+ assertThat(snapshot.hasNext(), equalTo(false));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(2));
+ snapshot.release();
+ snapshot1.release();
+ }
+
+ @Test
+ public void testSnapshotWithNewTranslog() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ Translog.Snapshot actualSnapshot = translog.snapshot();
+
+ translog.add(new Translog.Index("test", "2", new byte[]{2}));
+
+ translog.newTranslog(2);
+
+ translog.add(new Translog.Index("test", "3", new byte[]{3}));
+
+ snapshot = translog.snapshot(actualSnapshot);
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ snapshot.release();
+
+ snapshot = translog.snapshot(actualSnapshot);
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Index index = (Translog.Index) snapshot.next();
+ assertThat(index.source().toBytes(), equalTo(new byte[]{3}));
+ assertThat(snapshot.hasNext(), equalTo(false));
+
+ actualSnapshot.release();
+ snapshot.release();
+ }
+
+ @Test
+ public void testSnapshotWithSeekForward() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ long lastPosition = snapshot.position();
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "2", new byte[]{1}));
+ snapshot = translog.snapshot();
+ snapshot.seekForward(lastPosition);
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ snapshot.release();
+
+ snapshot = translog.snapshot();
+ snapshot.seekForward(lastPosition);
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(create.id(), equalTo("2"));
+ snapshot.release();
+ }
+
+ private Term newUid(String id) {
+ return new Term("_uid", id);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java b/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java
new file mode 100644
index 0000000..03cf701
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+/**
+ *
+ */
+public class TranslogSizeMatcher extends TypeSafeMatcher<Translog.Snapshot> {
+
+ private final int size;
+
+ public TranslogSizeMatcher(int size) {
+ this.size = size;
+ }
+
+ @Override
+ public boolean matchesSafely(Translog.Snapshot snapshot) {
+ int count = 0;
+ while (snapshot.hasNext()) {
+ snapshot.next();
+ count++;
+ }
+ return size == count;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("a translog with size ").appendValue(size);
+ }
+
+ public static Matcher<Translog.Snapshot> translogSize(int size) {
+ return new TranslogSizeMatcher(size);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java
new file mode 100644
index 0000000..7802373
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.AbstractSimpleTranslogTests;
+import org.junit.AfterClass;
+
+import java.io.File;
+
+/**
+ *
+ */
+public class FsBufferedTranslogTests extends AbstractSimpleTranslogTests {
+
+ @Override
+ protected Translog create() {
+ return new FsTranslog(shardId,
+ ImmutableSettings.settingsBuilder().put("index.translog.fs.type", FsTranslogFile.Type.BUFFERED.name()).build(),
+ new File("data/fs-translog"));
+ }
+
+ @AfterClass
+ public static void cleanup() {
+ FileSystemUtils.deleteRecursively(new File("data/fs-translog"), true);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java
new file mode 100644
index 0000000..c826247
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.AbstractSimpleTranslogTests;
+import org.junit.AfterClass;
+
+import java.io.File;
+
+/**
+ *
+ */
+public class FsSimpleTranslogTests extends AbstractSimpleTranslogTests {
+
+ @Override
+ protected Translog create() {
+ return new FsTranslog(shardId,
+ ImmutableSettings.settingsBuilder().put("index.translog.fs.type", FsTranslogFile.Type.SIMPLE.name()).build(),
+ new File("data/fs-translog"));
+ }
+
+ @AfterClass
+ public static void cleanup() {
+ FileSystemUtils.deleteRecursively(new File("data/fs-translog"), true);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indexing/IndexActionTests.java b/src/test/java/org/elasticsearch/indexing/IndexActionTests.java
new file mode 100644
index 0000000..5dbb954
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indexing/IndexActionTests.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indexing;
+
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicIntegerArray;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ *
+ */
+public class IndexActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testCreatedFlag() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").execute().actionGet();
+ assertFalse(indexResponse.isCreated());
+
+ client().prepareDelete("test", "type", "1").execute().actionGet();
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+
+ }
+
+ @Test
+ public void testCreatedFlagWithFlush() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+
+ client().prepareDelete("test", "type", "1").execute().actionGet();
+
+ flush();
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+ }
+
+ @Test
+ public void testCreatedFlagParallelExecution() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ int threadCount = 20;
+ final int docCount = 300;
+ int taskCount = docCount * threadCount;
+
+ final AtomicIntegerArray createdCounts = new AtomicIntegerArray(docCount);
+ ExecutorService threadPool = Executors.newFixedThreadPool(threadCount);
+ List<Callable<Void>> tasks = new ArrayList<Callable<Void>>(taskCount);
+ final Random random = getRandom();
+ for (int i=0;i< taskCount; i++ ) {
+ tasks.add(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ int docId = random.nextInt(docCount);
+ IndexResponse indexResponse = index("test", "type", Integer.toString(docId), "field1", "value");
+ if (indexResponse.isCreated()) createdCounts.incrementAndGet(docId);
+ return null;
+ }
+ });
+ }
+
+ threadPool.invokeAll(tasks);
+
+ for (int i=0;i<docCount;i++) {
+ assertThat(createdCounts.get(i), lessThanOrEqualTo(1));
+ }
+ }
+
+ @Test
+ public void testCreatedFlagWithExternalVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(123)
+ .setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+ }
+
+ @Test
+ public void testCreateFlagWithBulk() {
+ createIndex("test");
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk().add(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1")).execute().actionGet();
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(1));
+ IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse();
+ assertTrue(indexResponse.isCreated());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java b/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java
new file mode 100644
index 0000000..3a8ebdf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indexlifecycle;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.google.common.base.Function;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster;
+import org.junit.Test;
+
+import java.util.Set;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
+
+ @Slow
+ @Test
+ public void testIndexLifecycleActions() throws Exception {
+ if (randomBoolean()) { // both run with @Nightly
+ testIndexLifecycleActionsWith11Shards0Backup();
+ } else {
+ testIndexLifecycleActionsWith11Shards1Backup();
+ }
+ }
+
+ @Slow
+ @Nightly
+ @Test
+ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception {
+ Settings settings = settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 11)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .put("cluster.routing.schedule", "20ms") // reroute every 20ms so we identify new nodes fast
+ .build();
+
+ // start one server
+ logger.info("Starting sever1");
+ final String server_1 = cluster().startNode(settings);
+ final String node1 = getLocalNodeId(server_1);
+
+ logger.info("Creating index [test]");
+ CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test")).actionGet();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ RoutingNode routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
+
+ logger.info("Starting server2");
+ // start another server
+ String server_2 = cluster().startNode(settings);
+
+ // first wait for 2 nodes in the cluster
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ setMinimumMasterNodes(2);
+ final String node2 = getLocalNodeId(server_2);
+
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2").waitForRelocatingShards(0)).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(2));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(22));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2);
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
+ RoutingNode routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
+
+ logger.info("Starting server3");
+ // start another server
+ String server_3 = cluster().startNode(settings);
+
+ // first wait for 3 nodes in the cluster
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+
+ final String node3 = getLocalNodeId(server_3);
+
+
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3").waitForRelocatingShards(0)).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(3));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(22));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2, node3);
+
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ RoutingNode routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED) + routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(7));
+
+ logger.info("Closing server1");
+ // kill the first server
+ cluster().stopRandomNode(TestCluster.nameFilter(server_1));
+ // verify health
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ client().admin().cluster().prepareReroute().get();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0).waitForNodes("2")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(22));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11));
+
+
+ logger.info("Deleting index [test]");
+ // last, lets delete the index
+ DeleteIndexResponse deleteIndexResponse = client().admin().indices().prepareDelete("test").execute().actionGet();
+ assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.isEmpty(), equalTo(true));
+
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+ assertThat(routingNodeEntry3.isEmpty(), equalTo(true));
+ }
+
+ private String getLocalNodeId(String name) {
+ Discovery discovery = cluster().getInstance(Discovery.class, name);
+ String nodeId = discovery.localNode().getId();
+ assertThat(nodeId, not(nullValue()));
+ return nodeId;
+ }
+
+ @Slow
+ @Nightly
+ @Test
+ public void testIndexLifecycleActionsWith11Shards0Backup() throws Exception {
+
+ Settings settings = settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 11)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("cluster.routing.schedule", "20ms") // reroute every 20ms so we identify new nodes fast
+ .build();
+
+ // start one server
+ logger.info("Starting server1");
+ final String server_1 = cluster().startNode(settings);
+
+ final String node1 = getLocalNodeId(server_1);
+
+ logger.info("Creating index [test]");
+ CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test")).actionGet();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(11));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1);
+ RoutingNode routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
+
+ // start another server
+ logger.info("Starting server2");
+ final String server_2 = cluster().startNode(settings);
+
+ // first wait for 2 nodes in the cluster
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ setMinimumMasterNodes(2);
+ final String node2 = getLocalNodeId(server_2);
+
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0).waitForNodes("2")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(2));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(11));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2);
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(6), equalTo(5)));
+ RoutingNode routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(6)));
+
+ // start another server
+ logger.info("Starting server3");
+ final String server_3 = cluster().startNode();
+
+ // first wait for 3 nodes in the cluster
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ final String node3 = getLocalNodeId(server_3);
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3").waitForRelocatingShards(0)).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(3));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(11));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2, node3);
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ RoutingNode routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED) + routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11));
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(4), equalTo(3)));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(4), equalTo(3)));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(3));
+
+ logger.info("Closing server1");
+ // kill the first server
+ cluster().stopRandomNode(TestCluster.nameFilter(server_1));
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ client().admin().cluster().prepareReroute().get();
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2").waitForRelocatingShards(0)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(11));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(6)));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(6)));
+
+ logger.info("Deleting index [test]");
+ // last, lets delete the index
+ DeleteIndexResponse deleteIndexResponse = client().admin().indices().delete(deleteIndexRequest("test")).actionGet();
+ assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.isEmpty(), equalTo(true));
+
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+ assertThat(routingNodeEntry3.isEmpty(), equalTo(true));
+ }
+
+ private void assertNodesPresent(RoutingNodes routingNodes, String... nodes) {
+ final Set<String> keySet = Sets.newHashSet(Iterables.transform(routingNodes, new Function<RoutingNode, String>() {
+ @Override
+ public String apply(RoutingNode input) {
+ return input.nodeId();
+ }
+ }));
+ assertThat(keySet, containsInAnyOrder(nodes));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java b/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java
new file mode 100644
index 0000000..77762e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION;
+import static org.elasticsearch.common.settings.ImmutableSettings.builder;
+import static org.elasticsearch.index.shard.IndexShardState.*;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class IndicesLifecycleListenerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testIndexStateShardChanged() throws Throwable {
+
+ //start with a single node
+ String node1 = cluster().startNode();
+ IndexShardStateChangeListener stateChangeListenerNode1 = new IndexShardStateChangeListener();
+ //add a listener that keeps track of the shard state changes
+ cluster().getInstance(IndicesLifecycle.class, node1).addListener(stateChangeListenerNode1);
+
+ //create an index
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 6, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ //new shards got started
+ assertShardStatesMatch(stateChangeListenerNode1, 6, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+
+ //add a node: 3 out of the 6 shards will be relocated to it
+ //disable allocation before starting a new node, as we need to register the listener first
+ assertAcked(client().admin().cluster().prepareUpdateSettings()
+ .setPersistentSettings(builder().put(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)));
+ String node2 = cluster().startNode();
+ IndexShardStateChangeListener stateChangeListenerNode2 = new IndexShardStateChangeListener();
+ //add a listener that keeps track of the shard state changes
+ cluster().getInstance(IndicesLifecycle.class, node2).addListener(stateChangeListenerNode2);
+ //re-enable allocation
+ assertAcked(client().admin().cluster().prepareUpdateSettings()
+ .setPersistentSettings(builder().put(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, false)));
+ ensureGreen();
+
+ //the 3 relocated shards get closed on the first node
+ assertShardStatesMatch(stateChangeListenerNode1, 3, CLOSED);
+ //the 3 relocated shards get created on the second node
+ assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+
+ //increase replicas from 0 to 1
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(builder().put(SETTING_NUMBER_OF_REPLICAS, 1)));
+ ensureGreen();
+
+ //3 replicas are allocated to the first node
+ assertShardStatesMatch(stateChangeListenerNode1, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+ //3 replicas are allocated to the second node
+ assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+
+ //close the index
+ assertAcked(client().admin().indices().prepareClose("test"));
+
+ assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED);
+ assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED);
+ }
+
+ private static void assertShardStatesMatch(final IndexShardStateChangeListener stateChangeListener, final int numShards, final IndexShardState... shardStates)
+ throws InterruptedException {
+
+ Predicate<Object> waitPredicate = new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ if (stateChangeListener.shardStates.size() != numShards) {
+ return false;
+ }
+ for (List<IndexShardState> indexShardStates : stateChangeListener.shardStates.values()) {
+ if (indexShardStates == null || indexShardStates.size() != shardStates.length) {
+ return false;
+ }
+ for (int i = 0; i < shardStates.length; i++) {
+ if (indexShardStates.get(i) != shardStates[i]) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+ };
+ if (!awaitBusy(waitPredicate, 1, TimeUnit.MINUTES)) {
+ fail("failed to observe expect shard states\n" +
+ "expected: [" + numShards + "] shards with states: " + Strings.arrayToCommaDelimitedString(shardStates) + "\n" +
+ "observed:\n" + stateChangeListener);
+ }
+
+ stateChangeListener.shardStates.clear();
+ }
+
+ private static class IndexShardStateChangeListener extends IndicesLifecycle.Listener {
+ //we keep track of all the states (ordered) a shard goes through
+ final ConcurrentMap<ShardId, List<IndexShardState>> shardStates = Maps.newConcurrentMap();
+
+ @Override
+ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState newState, @Nullable String reason) {
+ List<IndexShardState> shardStates = this.shardStates.putIfAbsent(indexShard.shardId(),
+ new CopyOnWriteArrayList<IndexShardState>(new IndexShardState[]{newState}));
+ if (shardStates != null) {
+ shardStates.add(newState);
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for (Map.Entry<ShardId, List<IndexShardState>> entry : shardStates.entrySet()) {
+ sb.append(entry.getKey()).append(" --> ").append(Strings.collectionToCommaDelimitedString(entry.getValue())).append("\n");
+ }
+ return sb.toString();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java b/src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java
new file mode 100644
index 0000000..357dbed
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java
@@ -0,0 +1,919 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder;
+import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusRequestBuilder;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
+import org.elasticsearch.action.count.CountRequestBuilder;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
+import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.action.search.MultiSearchRequestBuilder;
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSpecifiedIndexUnavailable() throws Exception {
+ assertAcked(prepareCreate("test1"));
+ ensureYellow();
+
+ // Verify defaults
+ verify(search("test1", "test2"), true);
+ verify(msearch(null, "test1", "test2"), true);
+ verify(count("test1", "test2"), true);
+ verify(clearCache("test1", "test2"), true);
+ verify(_flush("test1", "test2"),true);
+ verify(gatewatSnapshot("test1", "test2"), true);
+ verify(segments("test1", "test2"), true);
+ verify(stats("test1", "test2"), true);
+ verify(status("test1", "test2"), true);
+ verify(optimize("test1", "test2"), true);
+ verify(refresh("test1", "test2"), true);
+ verify(validateQuery("test1", "test2"), true);
+ verify(aliasExists("test1", "test2"), true);
+ verify(typesExists("test1", "test2"), true);
+ verify(deleteByQuery("test1", "test2"), true);
+ verify(percolate("test1", "test2"), true);
+ verify(mpercolate(null, "test1", "test2"), false);
+ verify(suggest("test1", "test2"), true);
+ verify(getAliases("test1", "test2"), true);
+ verify(getFieldMapping("test1", "test2"), true);
+ verify(getMapping("test1", "test2"), true);
+ verify(getWarmer("test1", "test2"), true);
+ verify(getSettings("test1", "test2"), true);
+
+ IndicesOptions options = IndicesOptions.strict();
+ verify(search("test1", "test2").setIndicesOptions(options), true);
+ verify(msearch(options, "test1", "test2"), true);
+ verify(count("test1", "test2").setIndicesOptions(options), true);
+ verify(clearCache("test1", "test2").setIndicesOptions(options), true);
+ verify(_flush("test1", "test2").setIndicesOptions(options),true);
+ verify(gatewatSnapshot("test1", "test2").setIndicesOptions(options), true);
+ verify(segments("test1", "test2").setIndicesOptions(options), true);
+ verify(stats("test1", "test2").setIndicesOptions(options), true);
+ verify(status("test1", "test2").setIndicesOptions(options), true);
+ verify(optimize("test1", "test2").setIndicesOptions(options), true);
+ verify(refresh("test1", "test2").setIndicesOptions(options), true);
+ verify(validateQuery("test1", "test2").setIndicesOptions(options), true);
+ verify(aliasExists("test1", "test2").setIndicesOptions(options), true);
+ verify(typesExists("test1", "test2").setIndicesOptions(options), true);
+ verify(deleteByQuery("test1", "test2").setIndicesOptions(options), true);
+ verify(percolate("test1", "test2").setIndicesOptions(options), true);
+ verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(suggest("test1", "test2").setIndicesOptions(options), true);
+ verify(getAliases("test1", "test2").setIndicesOptions(options), true);
+ verify(getFieldMapping("test1", "test2").setIndicesOptions(options), true);
+ verify(getMapping("test1", "test2").setIndicesOptions(options), true);
+ verify(getWarmer("test1", "test2").setIndicesOptions(options), true);
+ verify(getSettings("test1", "test2").setIndicesOptions(options), true);
+
+ options = IndicesOptions.lenient();
+ verify(search("test1", "test2").setIndicesOptions(options), false);
+ verify(msearch(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(count("test1", "test2").setIndicesOptions(options), false);
+ verify(clearCache("test1", "test2").setIndicesOptions(options), false);
+ verify(_flush("test1", "test2").setIndicesOptions(options), false);
+ verify(gatewatSnapshot("test1", "test2").setIndicesOptions(options), false);
+ verify(segments("test1", "test2").setIndicesOptions(options), false);
+ verify(stats("test1", "test2").setIndicesOptions(options), false);
+ verify(status("test1", "test2").setIndicesOptions(options), false);
+ verify(optimize("test1", "test2").setIndicesOptions(options), false);
+ verify(refresh("test1", "test2").setIndicesOptions(options), false);
+ verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(aliasExists("test1", "test2").setIndicesOptions(options), false);
+ verify(typesExists("test1", "test2").setIndicesOptions(options), false);
+ verify(deleteByQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(percolate("test1", "test2").setIndicesOptions(options), false);
+ verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(suggest("test1", "test2").setIndicesOptions(options), false);
+ verify(getAliases("test1", "test2").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
+ verify(getSettings("test1", "test2").setIndicesOptions(options), false);
+
+ options = IndicesOptions.strict();
+ assertAcked(prepareCreate("test2"));
+ ensureYellow();
+ verify(search("test1", "test2").setIndicesOptions(options), false);
+ verify(msearch(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(count("test1", "test2").setIndicesOptions(options), false);
+ verify(clearCache("test1", "test2").setIndicesOptions(options), false);
+ verify(_flush("test1", "test2").setIndicesOptions(options),false);
+ verify(gatewatSnapshot("test1", "test2").setIndicesOptions(options), false);
+ verify(segments("test1", "test2").setIndicesOptions(options), false);
+ verify(stats("test1", "test2").setIndicesOptions(options), false);
+ verify(status("test1", "test2").setIndicesOptions(options), false);
+ verify(optimize("test1", "test2").setIndicesOptions(options), false);
+ verify(refresh("test1", "test2").setIndicesOptions(options), false);
+ verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(aliasExists("test1", "test2").setIndicesOptions(options), false);
+ verify(typesExists("test1", "test2").setIndicesOptions(options), false);
+ verify(deleteByQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(percolate("test1", "test2").setIndicesOptions(options), false);
+ verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(suggest("test1", "test2").setIndicesOptions(options), false);
+ verify(getAliases("test1", "test2").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
+ verify(getSettings("test1", "test2").setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testSpecifiedIndexUnavailable_snapshotRestore() throws Exception {
+ assertAcked(prepareCreate("test1"));
+ ensureYellow();
+
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("dummy-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+ client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get();
+
+ verify(snapshot("snap2", "test1", "test2"), true);
+ verify(restore("snap1", "test1", "test2"), true);
+
+ IndicesOptions options = IndicesOptions.strict();
+ verify(snapshot("snap2", "test1", "test2").setIndicesOptions(options), true);
+ verify(restore("snap1", "test1", "test2").setIndicesOptions(options), true);
+
+ options = IndicesOptions.lenient();
+ verify(snapshot("snap2", "test1", "test2").setIndicesOptions(options), false);
+ verify(restore("snap2", "test1", "test2").setIndicesOptions(options), false);
+
+ options = IndicesOptions.strict();
+ assertAcked(prepareCreate("test2"));
+ ensureYellow();
+ verify(snapshot("snap3", "test1", "test2").setIndicesOptions(options), false);
+ verify(restore("snap3", "test1", "test2").setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testWildcardBehaviour() throws Exception {
+ // Verify defaults for wildcards, when specifying no indices (*, _all, /)
+ String[] indices = Strings.EMPTY_ARRAY;
+ verify(search(indices), false);
+ verify(msearch(null, indices), false);
+ verify(count(indices), false);
+ verify(clearCache(indices), false);
+ verify(_flush(indices),false);
+ verify(gatewatSnapshot(indices), false);
+ verify(segments(indices), true);
+ verify(stats(indices), false);
+ verify(status(indices), false);
+ verify(optimize(indices), false);
+ verify(refresh(indices), false);
+ verify(validateQuery(indices), true);
+ verify(aliasExists(indices), false);
+ verify(typesExists(indices), false);
+ verify(deleteByQuery(indices), true);
+ verify(percolate(indices), false);
+ verify(mpercolate(null, indices), false);
+ verify(suggest(indices), false);
+ verify(getAliases(indices), false);
+ verify(getFieldMapping(indices), false);
+ verify(getMapping(indices), false);
+ verify(getWarmer(indices), false);
+ verify(getSettings(indices), false);
+
+ // Now force allow_no_indices=true
+ IndicesOptions options = IndicesOptions.fromOptions(false, true, true, false);
+ verify(search(indices).setIndicesOptions(options), false);
+ verify(msearch(options, indices).setIndicesOptions(options), false);
+ verify(count(indices).setIndicesOptions(options), false);
+ verify(clearCache(indices).setIndicesOptions(options), false);
+ verify(_flush(indices).setIndicesOptions(options),false);
+ verify(gatewatSnapshot(indices).setIndicesOptions(options), false);
+ verify(segments(indices).setIndicesOptions(options), false);
+ verify(stats(indices).setIndicesOptions(options), false);
+ verify(status(indices).setIndicesOptions(options), false);
+ verify(optimize(indices).setIndicesOptions(options), false);
+ verify(refresh(indices).setIndicesOptions(options), false);
+ verify(validateQuery(indices).setIndicesOptions(options), false);
+ verify(aliasExists(indices).setIndicesOptions(options), false);
+ verify(typesExists(indices).setIndicesOptions(options), false);
+ verify(deleteByQuery(indices).setIndicesOptions(options), false);
+ verify(percolate(indices).setIndicesOptions(options), false);
+ verify(mpercolate(options, indices), false);
+ verify(suggest(indices).setIndicesOptions(options), false);
+ verify(getAliases(indices).setIndicesOptions(options), false);
+ verify(getFieldMapping(indices).setIndicesOptions(options), false);
+ verify(getMapping(indices).setIndicesOptions(options), false);
+ verify(getWarmer(indices).setIndicesOptions(options), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+
+ assertAcked(prepareCreate("foobar"));
+ client().prepareIndex("foobar", "type", "1").setSource("k", "v").setRefresh(true).execute().actionGet();
+
+ // Verify defaults for wildcards, with one wildcard expression and one existing index
+ indices = new String[]{"foo*"};
+ verify(search(indices), false, 1);
+ verify(msearch(null, indices), false, 1);
+ verify(count(indices), false, 1);
+ verify(clearCache(indices), false);
+ verify(_flush(indices),false);
+ verify(gatewatSnapshot(indices), false);
+ verify(segments(indices), false);
+ verify(stats(indices), false);
+ verify(status(indices), false);
+ verify(optimize(indices), false);
+ verify(refresh(indices), false);
+ verify(validateQuery(indices), false);
+ verify(aliasExists(indices), false);
+ verify(typesExists(indices), false);
+ verify(deleteByQuery(indices), false);
+ verify(percolate(indices), false);
+ verify(mpercolate(null, indices), false);
+ verify(suggest(indices), false);
+ verify(getAliases(indices), false);
+ verify(getFieldMapping(indices), false);
+ verify(getMapping(indices), false);
+ verify(getWarmer(indices), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+
+ // Verify defaults for wildcards, with two wildcard expression and one existing index
+ indices = new String[]{"foo*", "bar*"};
+ verify(search(indices), false, 1);
+ verify(msearch(null, indices), false, 1);
+ verify(count(indices), false, 1);
+ verify(clearCache(indices), false);
+ verify(_flush(indices),false);
+ verify(gatewatSnapshot(indices), false);
+ verify(segments(indices), true);
+ verify(stats(indices), false);
+ verify(status(indices), false);
+ verify(optimize(indices), false);
+ verify(refresh(indices), false);
+ verify(validateQuery(indices), true);
+ verify(aliasExists(indices), false);
+ verify(typesExists(indices), false);
+ verify(deleteByQuery(indices), true);
+ verify(percolate(indices), false);
+ verify(mpercolate(null, indices), false);
+ verify(suggest(indices), false);
+ verify(getAliases(indices), false);
+ verify(getFieldMapping(indices), false);
+ verify(getMapping(indices), false);
+ verify(getWarmer(indices), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+
+ // Now force allow_no_indices=true
+ options = IndicesOptions.fromOptions(false, true, true, false);
+ verify(search(indices).setIndicesOptions(options), false, 1);
+ verify(msearch(options, indices).setIndicesOptions(options), false, 1);
+ verify(count(indices).setIndicesOptions(options), false, 1);
+ verify(clearCache(indices).setIndicesOptions(options), false);
+ verify(_flush(indices).setIndicesOptions(options),false);
+ verify(gatewatSnapshot(indices).setIndicesOptions(options), false);
+ verify(segments(indices).setIndicesOptions(options), false);
+ verify(stats(indices).setIndicesOptions(options), false);
+ verify(status(indices).setIndicesOptions(options), false);
+ verify(optimize(indices).setIndicesOptions(options), false);
+ verify(refresh(indices).setIndicesOptions(options), false);
+ verify(validateQuery(indices).setIndicesOptions(options), false);
+ verify(aliasExists(indices).setIndicesOptions(options), false);
+ verify(typesExists(indices).setIndicesOptions(options), false);
+ verify(deleteByQuery(indices).setIndicesOptions(options), false);
+ verify(percolate(indices).setIndicesOptions(options), false);
+ verify(mpercolate(options, indices), false);
+ verify(suggest(indices).setIndicesOptions(options), false);
+ verify(getAliases(indices).setIndicesOptions(options), false);
+ verify(getFieldMapping(indices).setIndicesOptions(options), false);
+ verify(getMapping(indices).setIndicesOptions(options), false);
+ verify(getWarmer(indices).setIndicesOptions(options), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testWildcardBehaviour_snapshotRestore() throws Exception {
+ assertAcked(prepareCreate("foobar"));
+ ensureYellow();
+
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("dummy-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+ client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get();
+
+ IndicesOptions options = IndicesOptions.fromOptions(false, false, true, false);
+ verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), true);
+ verify(restore("snap1", "foo*", "bar*").setIndicesOptions(options), true);
+
+ options = IndicesOptions.strict();
+ verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), false);
+ verify(restore("snap2", "foo*", "bar*").setIndicesOptions(options), false);
+
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+ options = IndicesOptions.fromOptions(false, false, true, false);
+ verify(snapshot("snap3", "foo*", "bar*").setIndicesOptions(options), false);
+ verify(restore("snap3", "foo*", "bar*").setIndicesOptions(options), false);
+
+ options = IndicesOptions.fromOptions(false, false, true, false);
+ verify(snapshot("snap4", "foo*", "baz*").setIndicesOptions(options), true);
+ verify(restore("snap3", "foo*", "baz*").setIndicesOptions(options), true);
+ }
+
+ @Test
+ public void testAllMissing_lenient() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test1"));
+ client().prepareIndex("test1", "type", "1").setSource("k", "v").setRefresh(true).execute().actionGet();
+ SearchResponse response = client().prepareSearch("test2")
+ .setIndicesOptions(IndicesOptions.lenient())
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ assertHitCount(response, 0l);
+
+ response = client().prepareSearch("test2","test3").setQuery(matchAllQuery())
+ .setIndicesOptions(IndicesOptions.lenient())
+ .execute().actionGet();
+ assertHitCount(response, 0l);
+
+ //you should still be able to run empty searches without things blowing up
+ response = client().prepareSearch()
+ .setIndicesOptions(IndicesOptions.lenient())
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ public void testAllMissing_strict() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test1"));
+ ensureYellow();
+ try {
+ client().prepareSearch("test2")
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ fail("Exception should have been thrown.");
+ } catch (IndexMissingException e) {
+ }
+
+ try {
+ client().prepareSearch("test2","test3")
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ fail("Exception should have been thrown.");
+ } catch (IndexMissingException e) {
+ }
+
+ //you should still be able to run empty searches without things blowing up
+ client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet();
+ }
+
+ @Test
+ // For now don't handle closed indices
+ public void testCloseApi_specifiedIndices() throws Exception {
+ assertAcked(prepareCreate("test1"));
+ assertAcked(prepareCreate("test2"));
+ ensureYellow();
+ verify(search("test1", "test2"), false);
+ verify(count("test1", "test2"), false);
+ assertAcked(client().admin().indices().prepareClose("test2").get());
+
+ try {
+ search("test1", "test2").get();
+ fail("Exception should have been thrown");
+ } catch (ClusterBlockException e) {
+ }
+ try {
+ count("test1", "test2").get();
+ fail("Exception should have been thrown");
+ } catch (ClusterBlockException e) {
+ }
+
+ verify(search(), false);
+ verify(count(), false);
+
+ verify(search("t*"), false);
+ verify(count("t*"), false);
+ }
+
+ @Test
+ public void testCloseApi_wildcards() throws Exception {
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareClose("bar*"), false);
+ verify(client().admin().indices().prepareClose("bar*"), true);
+
+ verify(client().admin().indices().prepareClose("foo*"), false);
+ verify(client().admin().indices().prepareClose("foo*"), true);
+ verify(client().admin().indices().prepareClose("_all"), true);
+
+ verify(client().admin().indices().prepareOpen("bar*"), false);
+ verify(client().admin().indices().prepareOpen("_all"), false);
+ verify(client().admin().indices().prepareOpen("_all"), true);
+ }
+
+ @Test
+ public void testDeleteIndex() throws Exception {
+ assertAcked(prepareCreate("foobar"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDelete("foo"), true);
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true));
+ verify(client().admin().indices().prepareDelete("foobar"), false);
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteIndex_wildcard() throws Exception {
+ verify(client().admin().indices().prepareDelete("_all"), false);
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDelete("foo*"), false);
+ assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("barbaz").get().isExists(), equalTo(true));
+
+ verify(client().admin().indices().prepareDelete("foo*"), false);
+
+ verify(client().admin().indices().prepareDelete("_all"), false);
+ assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("barbaz").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteMapping() throws Exception {
+ assertAcked(prepareCreate("foobar").addMapping("type1", "field", "type=string"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteMapping("foo").setType("type1"), true);
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type1").get().isExists(), equalTo(true));
+ verify(client().admin().indices().prepareDeleteMapping("foobar").setType("type1"), false);
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type1").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteMapping_wildcard() throws Exception {
+ verify(client().admin().indices().prepareDeleteMapping("_all").setType("type1"), true);
+
+ assertAcked(prepareCreate("foo").addMapping("type1", "field", "type=string"));
+ assertAcked(prepareCreate("foobar").addMapping("type1", "field", "type=string"));
+ assertAcked(prepareCreate("bar").addMapping("type1", "field", "type=string"));
+ assertAcked(prepareCreate("barbaz").addMapping("type1", "field", "type=string"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteMapping("foo*").setType("type1"), false);
+ assertThat(client().admin().indices().prepareTypesExists("foo").setTypes("type1").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type1").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type1").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type1").get().isExists(), equalTo(true));
+
+ assertAcked(client().admin().indices().prepareDelete("foo*"));
+
+ verify(client().admin().indices().prepareDeleteMapping("foo*").setType("type1"), true);
+
+ verify(client().admin().indices().prepareDeleteMapping("_all").setType("type1"), false);
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type1").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type1").get().isExists(), equalTo(false));
+ }
+
+
+ @Test
+ public void testPutWarmer() throws Exception {
+ assertAcked(prepareCreate("foobar"));
+ ensureYellow();
+ verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foobar").setQuery(QueryBuilders.matchAllQuery())), false);
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
+
+ }
+
+ @Test
+ public void testPutWarmer_wildcard() throws Exception {
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foo*").setQuery(QueryBuilders.matchAllQuery())), false);
+
+ assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
+
+ verify(client().admin().indices().preparePutWarmer("warmer2").setSearchRequest(client().prepareSearch().setIndices().setQuery(QueryBuilders.matchAllQuery())), false);
+
+ assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+
+ }
+
+ @Test
+ public void testPutAlias() throws Exception {
+ assertAcked(prepareCreate("foobar"));
+ ensureYellow();
+ verify(client().admin().indices().prepareAliases().addAlias("foobar", "foobar_alias"), false);
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true));
+
+ }
+
+ @Test
+ public void testPutAlias_wildcard() throws Exception {
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareAliases().addAlias("foo*", "foobar_alias"), false);
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foo").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("bar").get().exists(), equalTo(false));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("barbaz").get().exists(), equalTo(false));
+
+ verify(client().admin().indices().prepareAliases().addAlias("*", "foobar_alias"), false);
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foo").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("bar").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("barbaz").get().exists(), equalTo(true));
+
+ }
+ @Test
+ public void testDeleteMapping_typeWildcard() throws Exception {
+ verify(client().admin().indices().prepareDeleteMapping("_all").setType("type1"), true);
+
+ assertAcked(prepareCreate("foo").addMapping("type1", "field", "type=string"));
+ assertAcked(prepareCreate("foobar").addMapping("type2", "field", "type=string"));
+ assertAcked(prepareCreate("bar").addMapping("type3", "field", "type=string"));
+ assertAcked(prepareCreate("barbaz").addMapping("type4", "field", "type=string"));
+
+ ensureYellow();
+
+ assertThat(client().admin().indices().prepareTypesExists("foo").setTypes("type1").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type2").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type3").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type4").get().isExists(), equalTo(true));
+
+ verify(client().admin().indices().prepareDeleteMapping("foo*").setType("type*"), false);
+ assertThat(client().admin().indices().prepareTypesExists("foo").setTypes("type1").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type2").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type3").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type4").get().isExists(), equalTo(true));
+
+ assertAcked(client().admin().indices().prepareDelete("foo*"));
+
+ verify(client().admin().indices().prepareDeleteMapping("foo*").setType("type1"), true);
+
+ verify(client().admin().indices().prepareDeleteMapping("_all").setType("type3","type4"), false);
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type3").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type4").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteWarmer() throws Exception {
+ IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry(
+ "test1", new String[]{"typ1"}, new BytesArray("{\"query\" : { \"match_all\" : {}}}")
+ );
+ assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo").setNames("test1"), true);
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foobar").setNames("test1"), false);
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ }
+
+ @Test
+ public void testDeleteWarmer_wildcard() throws Exception {
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), true);
+
+ IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry(
+ "test1", new String[]{"type1"}, new BytesArray("{\"query\" : { \"match_all\" : {}}}")
+ );
+ assertAcked(prepareCreate("foo").addCustom(new IndexWarmersMetaData(entry)));
+ assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
+ assertAcked(prepareCreate("bar").addCustom(new IndexWarmersMetaData(entry)));
+ assertAcked(prepareCreate("barbaz").addCustom(new IndexWarmersMetaData(entry)));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), false);
+ assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(1));
+
+ assertAcked(client().admin().indices().prepareDelete("foo*"));
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), true);
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), false);
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ }
+
+ @Test
+ // Indices exists never throws IndexMissingException, the indices options control its behaviour (return true or false)
+ public void testIndicesExists() throws Exception {
+ assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foo").setIndicesOptions(IndicesOptions.lenient()).get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foo*").setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)).get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false));
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("bar*").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testPutMapping() throws Exception {
+ verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), true);
+ verify(client().admin().indices().preparePutMapping("_all").setType("type1").setSource("field", "type=string"), true);
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type1"), notNullValue());
+ verify(client().admin().indices().preparePutMapping("b*").setType("type1").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type1"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type1"), notNullValue());
+ verify(client().admin().indices().preparePutMapping("_all").setType("type2").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type2"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type2"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type2"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type2"), notNullValue());
+ verify(client().admin().indices().preparePutMapping().setType("type3").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type3"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type3"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type3"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type3"), notNullValue());
+
+
+ verify(client().admin().indices().preparePutMapping("c*").setType("type1").setSource("field", "type=string"), true);
+
+ assertAcked(client().admin().indices().prepareClose("barbaz").get());
+ verify(client().admin().indices().preparePutMapping("barbaz").setType("type4").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type4"), notNullValue());
+ }
+
+ @Test
+ public void testUpdateSettings() throws Exception {
+ verify(client().admin().indices().prepareUpdateSettings("foo").setSettings(ImmutableSettings.builder().put("a", "b")), true);
+ verify(client().admin().indices().prepareUpdateSettings("_all").setSettings(ImmutableSettings.builder().put("a", "b")), true);
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+ assertAcked(client().admin().indices().prepareClose("_all").get());
+
+ verify(client().admin().indices().prepareUpdateSettings("foo").setSettings(ImmutableSettings.builder().put("a", "b")), false);
+ verify(client().admin().indices().prepareUpdateSettings("bar*").setSettings(ImmutableSettings.builder().put("a", "b")), false);
+ verify(client().admin().indices().prepareUpdateSettings("_all").setSettings(ImmutableSettings.builder().put("c", "d")), false);
+
+ GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings("foo").get();
+ assertThat(settingsResponse.getSetting("foo", "index.a"), equalTo("b"));
+ settingsResponse = client().admin().indices().prepareGetSettings("bar*").get();
+ assertThat(settingsResponse.getSetting("bar", "index.a"), equalTo("b"));
+ assertThat(settingsResponse.getSetting("barbaz", "index.a"), equalTo("b"));
+ settingsResponse = client().admin().indices().prepareGetSettings("_all").get();
+ assertThat(settingsResponse.getSetting("foo", "index.c"), equalTo("d"));
+ assertThat(settingsResponse.getSetting("foobar", "index.c"), equalTo("d"));
+ assertThat(settingsResponse.getSetting("bar", "index.c"), equalTo("d"));
+ assertThat(settingsResponse.getSetting("barbaz", "index.c"), equalTo("d"));
+
+ assertAcked(client().admin().indices().prepareOpen("_all").get());
+ try {
+ verify(client().admin().indices().prepareUpdateSettings("barbaz").setSettings(ImmutableSettings.builder().put("e", "f")), false);
+ } catch (ElasticsearchIllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("Can't update non dynamic settings[[index.e]] for open indices[[barbaz]]"));
+ }
+ verify(client().admin().indices().prepareUpdateSettings("baz*").setSettings(ImmutableSettings.builder().put("a", "b")), true);
+ }
+
+ private static SearchRequestBuilder search(String... indices) {
+ return client().prepareSearch(indices).setQuery(matchAllQuery());
+ }
+
+ private static MultiSearchRequestBuilder msearch(IndicesOptions options, String... indices) {
+ MultiSearchRequestBuilder multiSearchRequestBuilder = client().prepareMultiSearch();
+ if (options != null) {
+ multiSearchRequestBuilder.setIndicesOptions(options);
+ }
+ return multiSearchRequestBuilder.add(client().prepareSearch(indices).setQuery(matchAllQuery()));
+ }
+
+ private static CountRequestBuilder count(String... indices) {
+ return client().prepareCount(indices).setQuery(matchAllQuery());
+ }
+
+ private static ClearIndicesCacheRequestBuilder clearCache(String... indices) {
+ return client().admin().indices().prepareClearCache(indices);
+ }
+
+ private static FlushRequestBuilder _flush(String... indices) {
+ return client().admin().indices().prepareFlush(indices);
+ }
+
+ private static GatewaySnapshotRequestBuilder gatewatSnapshot(String... indices) {
+ return client().admin().indices().prepareGatewaySnapshot(indices);
+ }
+
+ private static IndicesSegmentsRequestBuilder segments(String... indices) {
+ return client().admin().indices().prepareSegments(indices);
+ }
+
+ private static IndicesStatsRequestBuilder stats(String... indices) {
+ return client().admin().indices().prepareStats(indices);
+ }
+
+ private static IndicesStatusRequestBuilder status(String... indices) {
+ return client().admin().indices().prepareStatus(indices);
+ }
+
+ private static OptimizeRequestBuilder optimize(String... indices) {
+ return client().admin().indices().prepareOptimize(indices);
+ }
+
+ private static RefreshRequestBuilder refresh(String... indices) {
+ return client().admin().indices().prepareRefresh(indices);
+ }
+
+ private static ValidateQueryRequestBuilder validateQuery(String... indices) {
+ return client().admin().indices().prepareValidateQuery(indices);
+ }
+
+ private static AliasesExistRequestBuilder aliasExists(String... indices) {
+ return client().admin().indices().prepareAliasesExist("dummy").addIndices(indices);
+ }
+
+ private static TypesExistsRequestBuilder typesExists(String... indices) {
+ return client().admin().indices().prepareTypesExists(indices).setTypes("dummy");
+ }
+
+ private static DeleteByQueryRequestBuilder deleteByQuery(String... indices) {
+ return client().prepareDeleteByQuery(indices).setQuery(boolQuery().mustNot(matchAllQuery()));
+ }
+
+ private static PercolateRequestBuilder percolate(String... indices) {
+ return client().preparePercolate().setIndices(indices)
+ .setSource(new PercolateSourceBuilder().setDoc(docBuilder().setDoc("k", "v")))
+ .setDocumentType("type");
+ }
+
+ private static MultiPercolateRequestBuilder mpercolate(IndicesOptions options, String... indices) {
+ MultiPercolateRequestBuilder builder = client().prepareMultiPercolate();
+ if (options != null) {
+ builder.setIndicesOptions(options);
+ }
+ return builder.add(percolate(indices));
+ }
+
+ private static SuggestRequestBuilder suggest(String... indices) {
+ return client().prepareSuggest(indices).addSuggestion(SuggestBuilder.termSuggestion("name").field("a"));
+ }
+
+ private static GetAliasesRequestBuilder getAliases(String... indices) {
+ return client().admin().indices().prepareGetAliases("dummy").addIndices(indices);
+ }
+
+ private static GetFieldMappingsRequestBuilder getFieldMapping(String... indices) {
+ return client().admin().indices().prepareGetFieldMappings(indices);
+ }
+
+ private static GetMappingsRequestBuilder getMapping(String... indices) {
+ return client().admin().indices().prepareGetMappings(indices);
+ }
+
+ private static GetWarmersRequestBuilder getWarmer(String... indices) {
+ return client().admin().indices().prepareGetWarmers(indices);
+ }
+
+ private static GetSettingsRequestBuilder getSettings(String... indices) {
+ return client().admin().indices().prepareGetSettings(indices);
+ }
+
+ private static CreateSnapshotRequestBuilder snapshot(String name, String... indices) {
+ return client().admin().cluster().prepareCreateSnapshot("dummy-repo", name).setWaitForCompletion(true).setIndices(indices);
+ }
+
+ private static RestoreSnapshotRequestBuilder restore(String name, String... indices) {
+ return client().admin().cluster().prepareRestoreSnapshot("dummy-repo", name)
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy-" + name)
+ .setWaitForCompletion(true)
+ .setIndices(indices);
+ }
+
+ private static void verify(ActionRequestBuilder requestBuilder, boolean fail) {
+ verify(requestBuilder, fail, 0);
+ }
+
+ private static void verify(ActionRequestBuilder requestBuilder, boolean fail, long expectedCount) {
+ if (fail) {
+ if (requestBuilder instanceof MultiSearchRequestBuilder) {
+ MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get();
+ assertThat(multiSearchResponse.getResponses().length, equalTo(1));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue());
+ } else {
+ try {
+ requestBuilder.get();
+ fail("IndexMissingException was expected");
+ } catch (IndexMissingException e) {}
+ }
+ } else {
+ if (requestBuilder instanceof SearchRequestBuilder) {
+ SearchRequestBuilder searchRequestBuilder = (SearchRequestBuilder) requestBuilder;
+ assertHitCount(searchRequestBuilder.get(), expectedCount);
+ } else if (requestBuilder instanceof CountRequestBuilder) {
+ CountRequestBuilder countRequestBuilder = (CountRequestBuilder) requestBuilder;
+ assertHitCount(countRequestBuilder.get(), expectedCount);
+ } else if (requestBuilder instanceof MultiSearchRequestBuilder) {
+ MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get();
+ assertThat(multiSearchResponse.getResponses().length, equalTo(1));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse(), notNullValue());
+ } else {
+ requestBuilder.get();
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java
new file mode 100644
index 0000000..fdb5ab0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.index.analysis.AnalysisModule;
+
+/**
+ */
+public class DummyAnalysisBinderProcessor extends AnalysisModule.AnalysisBinderProcessor {
+
+ @Override
+ public void processAnalyzers(AnalyzersBindings analyzersBindings) {
+ analyzersBindings.processAnalyzer("dummy", DummyAnalyzerProvider.class);
+ }
+
+ @Override
+ public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
+ tokenFiltersBindings.processTokenFilter("dummy_token_filter", DummyTokenFilterFactory.class);
+ }
+
+ @Override
+ public void processTokenizers(TokenizersBindings tokenizersBindings) {
+ tokenizersBindings.processTokenizer("dummy_tokenizer", DummyTokenizerFactory.class);
+ }
+
+ @Override
+ public void processCharFilters(CharFiltersBindings charFiltersBindings) {
+ charFiltersBindings.processCharFilter("dummy_char_filter", DummyCharFilterFactory.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java
new file mode 100644
index 0000000..55d22eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.plugins.AbstractPlugin;
+
+import java.util.Collection;
+
+public class DummyAnalysisPlugin extends AbstractPlugin {
+ /**
+ * The name of the plugin.
+ */
+ @Override
+ public String name() {
+ return "analysis-dummy";
+ }
+
+ /**
+ * The description of the plugin.
+ */
+ @Override
+ public String description() {
+ return "Analysis Dummy Plugin";
+ }
+
+ @Override
+ public Collection<Class<? extends Module>> modules() {
+ return ImmutableList.<Class<? extends Module>>of(DummyIndicesAnalysisModule.class);
+ }
+
+ public void onModule(AnalysisModule module) {
+ module.addProcessor(new DummyAnalysisBinderProcessor());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java
new file mode 100644
index 0000000..d413096
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
+import org.apache.lucene.util.Version;
+
+import java.io.Reader;
+
+public class DummyAnalyzer extends StopwordAnalyzerBase {
+
+ protected DummyAnalyzer(Version version) {
+ super(version);
+ }
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java
new file mode 100644
index 0000000..0c4b48b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.analysis.AnalyzerProvider;
+import org.elasticsearch.index.analysis.AnalyzerScope;
+
+public class DummyAnalyzerProvider implements AnalyzerProvider<DummyAnalyzer> {
+ @Override
+ public String name() {
+ return "dummy";
+ }
+
+ @Override
+ public AnalyzerScope scope() {
+ return AnalyzerScope.INDICES;
+ }
+
+ @Override
+ public DummyAnalyzer get() {
+ return new DummyAnalyzer(Lucene.ANALYZER_VERSION);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java b/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java
new file mode 100644
index 0000000..8c5896e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.index.analysis.CharFilterFactory;
+
+import java.io.Reader;
+
+public class DummyCharFilterFactory implements CharFilterFactory {
+ @Override
+ public String name() {
+ return "dummy_char_filter";
+ }
+
+ @Override
+ public Reader create(Reader reader) {
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java b/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java
new file mode 100644
index 0000000..c48edb1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.*;
+
+public class DummyIndicesAnalysis extends AbstractComponent {
+
+ @Inject
+ public DummyIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {
+ super(settings);
+ indicesAnalysisService.analyzerProviderFactories().put("dummy",
+ new PreBuiltAnalyzerProviderFactory("dummy", AnalyzerScope.INDICES,
+ new DummyAnalyzer(Lucene.ANALYZER_VERSION)));
+ indicesAnalysisService.tokenFilterFactories().put("dummy_token_filter",
+ new PreBuiltTokenFilterFactoryFactory(new DummyTokenFilterFactory()));
+ indicesAnalysisService.charFilterFactories().put("dummy_char_filter",
+ new PreBuiltCharFilterFactoryFactory(new DummyCharFilterFactory()));
+ indicesAnalysisService.tokenizerFactories().put("dummy_tokenizer",
+ new PreBuiltTokenizerFactoryFactory(new DummyTokenizerFactory()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java b/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java
new file mode 100644
index 0000000..9d14f67
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+public class DummyIndicesAnalysisModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(DummyIndicesAnalysis.class).asEagerSingleton();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java b/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java
new file mode 100644
index 0000000..489e4dc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.elasticsearch.index.analysis.TokenFilterFactory;
+
+public class DummyTokenFilterFactory implements TokenFilterFactory {
+ @Override public String name() {
+ return "dummy_token_filter";
+ }
+
+ @Override public TokenStream create(TokenStream tokenStream) {
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java b/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java
new file mode 100644
index 0000000..95c6a5e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.elasticsearch.index.analysis.TokenizerFactory;
+
+import java.io.Reader;
+
+public class DummyTokenizerFactory implements TokenizerFactory {
+ @Override
+ public String name() {
+ return "dummy_tokenizer";
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java b/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java
new file mode 100644
index 0000000..af5df68
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.lucene.analysis.Analyzer;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.lang.reflect.Field;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class PreBuiltAnalyzerIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder()
+ .put("plugin.types", DummyAnalysisPlugin.class.getName())
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Test
+ public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception {
+ Map<PreBuiltAnalyzers, List<Version>> loadedAnalyzers = Maps.newHashMap();
+
+ List<String> indexNames = Lists.newArrayList();
+ for (int i = 0; i < 10; i++) {
+ String indexName = randomAsciiOfLength(10).toLowerCase(Locale.ROOT);
+ indexNames.add(indexName);
+
+ int randomInt = randomInt(PreBuiltAnalyzers.values().length-1);
+ PreBuiltAnalyzers preBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt];
+ String name = preBuiltAnalyzer.name().toLowerCase(Locale.ROOT);
+
+ Version randomVersion = randomVersion();
+ if (!loadedAnalyzers.containsKey(preBuiltAnalyzer)) {
+ loadedAnalyzers.put(preBuiltAnalyzer, Lists.<Version>newArrayList());
+ }
+ loadedAnalyzers.get(preBuiltAnalyzer).add(randomVersion);
+
+ final XContentBuilder mapping = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", name)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ Settings versionSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
+ client().admin().indices().prepareCreate(indexName).addMapping("type", mapping).setSettings(versionSettings).get();
+ }
+
+ ensureGreen();
+
+ // index some amount of data
+ for (int i = 0; i < 100; i++) {
+ String randomIndex = indexNames.get(randomInt(indexNames.size()-1));
+ String randomId = randomInt() + "";
+
+ Map<String, Object> data = Maps.newHashMap();
+ data.put("foo", randomAsciiOfLength(50));
+
+ index(randomIndex, "type", randomId, data);
+ }
+
+ refresh();
+
+ // close some of the indices
+ int amountOfIndicesToClose = randomInt(10-1);
+ for (int i = 0; i < amountOfIndicesToClose; i++) {
+ String indexName = indexNames.get(i);
+ client().admin().indices().prepareClose(indexName).execute().actionGet();
+ }
+
+ ensureGreen();
+
+ // check that all above configured analyzers have been loaded
+ assertThatAnalyzersHaveBeenLoaded(loadedAnalyzers);
+
+ // check that all of the prebuiltanalyzers are still open
+ assertLuceneAnalyzersAreNotClosed(loadedAnalyzers);
+ }
+
+ /**
+ * Test case for #5030: Upgrading analysis plugins fails
+ * See https://github.com/elasticsearch/elasticsearch/issues/5030
+ */
+ @Test
+ public void testThatPluginAnalyzersCanBeUpdated() throws Exception {
+ final XContentBuilder mapping = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "dummy")
+ .endObject()
+ .startObject("bar")
+ .field("type", "string")
+ .field("analyzer", "my_dummy")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ Settings versionSettings = ImmutableSettings.builder()
+ .put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion())
+ .put("index.analysis.analyzer.my_dummy.type", "custom")
+ .put("index.analysis.analyzer.my_dummy.filter", "my_dummy_token_filter")
+ .put("index.analysis.analyzer.my_dummy.char_filter", "my_dummy_char_filter")
+ .put("index.analysis.analyzer.my_dummy.tokenizer", "my_dummy_tokenizer")
+ .put("index.analysis.tokenizer.my_dummy_tokenizer.type", "dummy_tokenizer")
+ .put("index.analysis.filter.my_dummy_token_filter.type", "dummy_token_filter")
+ .put("index.analysis.char_filter.my_dummy_char_filter.type", "dummy_char_filter")
+ .build();
+
+ client().admin().indices().prepareCreate("test-analysis-dummy").addMapping("type", mapping).setSettings(versionSettings).get();
+
+ ensureGreen();
+ }
+
+ private void assertThatAnalyzersHaveBeenLoaded(Map<PreBuiltAnalyzers, List<Version>> expectedLoadedAnalyzers) {
+ for (Map.Entry<PreBuiltAnalyzers, List<Version>> entry : expectedLoadedAnalyzers.entrySet()) {
+ for (Version version : entry.getValue()) {
+ // if it is not null in the cache, it has been loaded
+ assertThat(entry.getKey().getCache().get(version), is(notNullValue()));
+ }
+ }
+ }
+
+ // the close() method of a lucene analyzer sets the storedValue field to null
+ // we simply check this via reflection - ugly but works
+ private void assertLuceneAnalyzersAreNotClosed(Map<PreBuiltAnalyzers, List<Version>> loadedAnalyzers) throws IllegalAccessException, NoSuchFieldException {
+ for (Map.Entry<PreBuiltAnalyzers, List<Version>> preBuiltAnalyzerEntry : loadedAnalyzers.entrySet()) {
+ PreBuiltAnalyzers preBuiltAnalyzer = preBuiltAnalyzerEntry.getKey();
+ for (Version version : preBuiltAnalyzerEntry.getValue()) {
+ Analyzer analyzer = preBuiltAnalyzerEntry.getKey().getCache().get(version);
+
+ Field field = getFieldFromClass("storedValue", analyzer);
+ boolean currentAccessible = field.isAccessible();
+ field.setAccessible(true);
+ Object storedValue = field.get(analyzer);
+ field.setAccessible(currentAccessible);
+
+ assertThat(String.format(Locale.ROOT, "Analyzer %s in version %s seems to be closed", preBuiltAnalyzer.name(), version), storedValue, is(notNullValue()));
+ }
+ }
+ }
+
+ /**
+ * Searches for a field until it finds, loops through all superclasses
+ */
+ private Field getFieldFromClass(String fieldName, Object obj) {
+ Field field = null;
+ boolean storedValueFieldFound = false;
+ Class clazz = obj.getClass();
+ while (!storedValueFieldFound) {
+ try {
+ field = clazz.getDeclaredField(fieldName);
+ storedValueFieldFound = true;
+ } catch (NoSuchFieldException e) {
+ clazz = clazz.getSuperclass();
+ }
+
+ if (Object.class.equals(clazz)) throw new RuntimeException("Could not find storedValue field in class" + clazz);
+ }
+
+ return field;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java
new file mode 100644
index 0000000..f33ce30
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analyze;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class AnalyzeActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleAnalyzerTests() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("test", "this is a test").execute().actionGet();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+ AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0);
+ assertThat(token.getTerm(), equalTo("this"));
+ assertThat(token.getStartOffset(), equalTo(0));
+ assertThat(token.getEndOffset(), equalTo(4));
+ token = analyzeResponse.getTokens().get(1);
+ assertThat(token.getTerm(), equalTo("is"));
+ assertThat(token.getStartOffset(), equalTo(5));
+ assertThat(token.getEndOffset(), equalTo(7));
+ token = analyzeResponse.getTokens().get(2);
+ assertThat(token.getTerm(), equalTo("a"));
+ assertThat(token.getStartOffset(), equalTo(8));
+ assertThat(token.getEndOffset(), equalTo(9));
+ token = analyzeResponse.getTokens().get(3);
+ assertThat(token.getTerm(), equalTo("test"));
+ assertThat(token.getStartOffset(), equalTo(10));
+ assertThat(token.getEndOffset(), equalTo(14));
+ }
+ }
+
+ @Test
+ public void analyzeNumericField() throws ElasticsearchException, IOException {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ createIndex("test");
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().prepareIndex("test", "test", "1")
+ .setSource(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("long", 1l)
+ .field("double", 1.0d)
+ .endObject())
+ .setRefresh(true).execute().actionGet();
+
+ try {
+ client().admin().indices().prepareAnalyze("test", "123").setField("long").execute().actionGet();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ }
+ try {
+ client().admin().indices().prepareAnalyze("test", "123.0").setField("double").execute().actionGet();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ }
+ }
+
+ @Test
+ public void analyzeWithNoIndex() throws Exception {
+
+ AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setAnalyzer("simple").execute().actionGet();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+
+ analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setTokenizer("keyword").setTokenFilters("lowercase").execute().actionGet();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(1));
+ assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("this is a test"));
+ }
+
+ @Test
+ public void analyzerWithFieldOrTypeTests() throws Exception {
+
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().preparePutMapping("test")
+ .setType("document").setSource(
+ "{\n" +
+ " \"document\":{\n" +
+ " \"properties\":{\n" +
+ " \"simple\":{\n" +
+ " \"type\":\"string\",\n" +
+ " \"analyzer\": \"simple\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}"
+ ).execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ final AnalyzeRequestBuilder requestBuilder = client().admin().indices().prepareAnalyze("test", "THIS IS A TEST");
+ requestBuilder.setField("document.simple");
+ AnalyzeResponse analyzeResponse = requestBuilder.execute().actionGet();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+ AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(3);
+ assertThat(token.getTerm(), equalTo("test"));
+ assertThat(token.getStartOffset(), equalTo(10));
+ assertThat(token.getEndOffset(), equalTo(14));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java b/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java
new file mode 100644
index 0000000..e3a1587
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analyze;
+
+import org.apache.lucene.analysis.hunspell.HunspellDictionary;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.HunspellService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class HunspellServiceTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testLocaleDirectoryWithNodeLevelConfig() throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("indices.analysis.hunspell.dictionary.lazy", true)
+ .put("indices.analysis.hunspell.dictionary.ignore_case", true)
+ .build();
+
+ cluster().startNode(settings);
+ HunspellDictionary dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US");
+ assertThat(dictionary, notNullValue());
+ Version expectedVersion = Lucene.parseVersion(settings.get("indices.analysis.hunspell.version"), Lucene.ANALYZER_VERSION, logger);
+ assertThat(dictionary.getVersion(), equalTo(expectedVersion));
+ assertThat(dictionary.isIgnoreCase(), equalTo(true));
+ }
+
+ @Test
+ public void testLocaleDirectoryWithLocaleSpecificConfig() throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("indices.analysis.hunspell.dictionary.lazy", true)
+ .put("indices.analysis.hunspell.dictionary.ignore_case", true)
+ .put("indices.analysis.hunspell.dictionary.en_US.strict_affix_parsing", false)
+ .put("indices.analysis.hunspell.dictionary.en_US.ignore_case", false)
+ .build();
+
+ cluster().startNode(settings);
+ HunspellDictionary dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US");
+ assertThat(dictionary, notNullValue());
+ Version expectedVersion = Lucene.parseVersion(settings.get("indices.analysis.hunspell.version"), Lucene.ANALYZER_VERSION, logger);
+ assertThat(dictionary.getVersion(), equalTo(expectedVersion));
+ assertThat(dictionary.isIgnoreCase(), equalTo(false));
+
+
+ // testing that dictionary specific settings override node level settings
+ dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US_custom");
+ assertThat(dictionary, notNullValue());
+ assertThat(dictionary.getVersion(), equalTo(expectedVersion));
+ assertThat(dictionary.isIgnoreCase(), equalTo(true));
+ }
+
+ @Test
+ public void testCustomizeLocaleDirectory() throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("indices.analysis.hunspell.dictionary.location", getResource("/indices/analyze/conf_dir/hunspell"))
+ .build();
+
+ cluster().startNode(settings);
+ HunspellDictionary dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US");
+ assertThat(dictionary, notNullValue());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/cache/CacheTests.java b/src/test/java/org/elasticsearch/indices/cache/CacheTests.java
new file mode 100644
index 0000000..7441a32
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/cache/CacheTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cache;
+
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope=Scope.SUITE, numNodes=1)
+public class CacheTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ //Filter cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad
+ return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("indices.cache.filter.clean_interval", "1ms").build();
+ }
+
+ @Test
+ public void testClearCacheFilterKeys() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ client().prepareIndex("test", "type", "1").setSource("field", "value").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), FilterBuilders.termFilter("field", "value").cacheKey("test_key"))).execute().actionGet();
+ assertThat(searchResponse.getHits().getHits().length, equalTo(1));
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ client().admin().indices().prepareClearCache().setFilterKeys("test_key").execute().actionGet();
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ }
+
+ @Test
+ public void testFieldDataStats() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "field2", "value1").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "field2", "value2").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ // sort to load it to field data...
+ client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();
+ client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();
+
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+
+ // sort to load it to field data...
+ client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();
+ client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();
+
+ // now check the per field stats
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.FieldData, true).fieldDataFields("*")).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getFields().get("field"), greaterThan(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getFields().get("field"), lessThan(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes()));
+
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).setFieldDataFields("*").execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), lessThan(indicesStats.getTotal().getFieldData().getMemorySizeInBytes()));
+
+ client().admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ }
+
+ @Test
+ public void testClearAllCaches() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 1))
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ client().prepareIndex("test", "type", "1").setSource("field", "value1").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
+ .execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test")
+ .clear().setFieldData(true).setFilterCache(true)
+ .execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ // sort to load it to field data and filter to load filter cache
+ client().prepareSearch()
+ .setPostFilter(FilterBuilders.termFilter("field", "value1"))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+ client().prepareSearch()
+ .setPostFilter(FilterBuilders.termFilter("field", "value2"))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
+ .execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ indicesStats = client().admin().indices().prepareStats("test")
+ .clear().setFieldData(true).setFilterCache(true)
+ .execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ client().admin().indices().prepareClearCache().execute().actionGet();
+ Thread.sleep(100); // Make sure the filter cache entries have been removed...
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
+ .execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ indicesStats = client().admin().indices().prepareStats("test")
+ .clear().setFieldData(true).setFilterCache(true)
+ .execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java b/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java
new file mode 100644
index 0000000..778723b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.exists.types;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class TypesExistsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimple() throws Exception {
+ Client client = client();
+ client.admin().indices().prepareCreate("test1")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2").endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().prepareCreate("test2")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().prepareAliases().addAlias("test1", "alias1").execute().actionGet();
+ ClusterHealthResponse healthResponse = client.admin().cluster()
+ .prepareHealth("test1", "test2").setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ TypesExistsResponse response = client.admin().indices().prepareTypesExists("test1").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1").setTypes("type2").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1").setTypes("type3").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ try {
+ client.admin().indices().prepareTypesExists("notExist").setTypes("type1").execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (IndexMissingException e) {}
+ try {
+ client.admin().indices().prepareTypesExists("notExist").setTypes("type0").execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (IndexMissingException e) {}
+ response = client.admin().indices().prepareTypesExists("alias1").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("*").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type2").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerServiceTests.java b/src/test/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerServiceTests.java
new file mode 100644
index 0000000..2be096a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerServiceTests.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.breaker;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures;
+
+/**
+ * Integration tests for InternalCircuitBreakerService
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest {
+
+ private String randomRidiculouslySmallLimit() {
+ // 3 different ways to say 100 bytes
+ return randomFrom(Arrays.asList("100b", "100"));
+ //, (10000. / JvmInfo.jvmInfo().getMem().getHeapMax().bytes()) + "%")); // this is prone to rounding errors and will fail if JVM memory changes!
+ }
+
+ @Test
+ @TestLogging("org.elasticsearch.indices.fielddata.breaker:TRACE,org.elasticsearch.index.fielddata:TRACE,org.elasticsearch.common.breaker:TRACE")
+ public void testMemoryBreaker() {
+ assertAcked(prepareCreate("cb-test", 1));
+ final Client client = client();
+
+ try {
+
+ // index some different terms so we have some field data for loading
+ int docCount = atLeast(300);
+ for (long id = 0; id < docCount; id++) {
+ client.prepareIndex("cb-test", "type", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ }
+
+ // refresh
+ refresh();
+
+ // execute a search that loads field data (sorting on the "test" field)
+ client.prepareSearch("cb-test").setSource("{\"sort\": \"test\",\"query\":{\"match_all\":{}}}")
+ .execute().actionGet();
+
+ // clear field data cache (thus setting the loaded field data back to 0)
+ client.admin().indices().prepareClearCache("cb-test").setFieldDataCache(true).execute().actionGet();
+
+ // Update circuit breaker settings
+ Settings settings = settingsBuilder()
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, randomRidiculouslySmallLimit())
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
+
+ // execute a search that loads field data (sorting on the "test" field)
+ // again, this time it should trip the breaker
+ try {
+ SearchResponse resp = client.prepareSearch("cb-test").setSource("{\"sort\": \"test\",\"query\":{\"match_all\":{}}}")
+ .execute().actionGet();
+ assertFailures(resp);
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ } finally {
+ // Reset settings
+ Settings resetSettings = settingsBuilder()
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, "-1")
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, InternalCircuitBreakerService.DEFAULT_OVERHEAD_CONSTANT)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings).execute().actionGet();
+ }
+ }
+
+ @Test
+ @TestLogging("org.elasticsearch.indices.fielddata.breaker:TRACE,org.elasticsearch.index.fielddata:TRACE,org.elasticsearch.common.breaker:TRACE")
+ public void testRamAccountingTermsEnum() {
+ final Client client = client();
+
+ try {
+
+ // Create an index where the mappings have a field data filter
+ client.admin().indices().prepareCreate("ramtest").setSource("{\"mappings\": {\"type\": {\"properties\": {\"test\": " +
+ "{\"type\": \"string\",\"fielddata\": {\"filter\": {\"regex\": {\"pattern\": \"^value.*\"}}}}}}}}").execute().actionGet();
+
+ // Wait 10 seconds for green
+ client.admin().cluster().prepareHealth("ramtest").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ // index some different terms so we have some field data for loading
+ int docCount = atLeast(300);
+ for (long id = 0; id < docCount; id++) {
+ client.prepareIndex("ramtest", "type", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ }
+
+ // refresh
+ refresh();
+
+ // execute a search that loads field data (sorting on the "test" field)
+ client.prepareSearch("ramtest").setSource("{\"sort\": \"test\",\"query\":{\"match_all\":{}}}")
+ .execute().actionGet();
+
+ // clear field data cache (thus setting the loaded field data back to 0)
+ client.admin().indices().prepareClearCache("ramtest").setFieldDataCache(true).execute().actionGet();
+
+ // Update circuit breaker settings
+ Settings settings = settingsBuilder()
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, randomRidiculouslySmallLimit())
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
+
+ // execute a search that loads field data (sorting on the "test" field)
+ // again, this time it should trip the breaker
+ try {
+ SearchResponse resp = client.prepareSearch("ramtest").setSource("{\"sort\": \"test\",\"query\":{\"match_all\":{}}}")
+ .execute().actionGet();
+ assertFailures(resp);
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ } finally {
+ // Reset settings
+ Settings resetSettings = settingsBuilder()
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, "-1")
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, InternalCircuitBreakerService.DEFAULT_OVERHEAD_CONSTANT)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings).execute().actionGet();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/fielddata/breaker/DummyCircuitBreakerService.java b/src/test/java/org/elasticsearch/indices/fielddata/breaker/DummyCircuitBreakerService.java
new file mode 100644
index 0000000..1b3271c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/fielddata/breaker/DummyCircuitBreakerService.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.breaker;
+
+import org.elasticsearch.common.breaker.MemoryCircuitBreaker;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+/**
+ * Class that returns a breaker that never breaks
+ */
+public class DummyCircuitBreakerService implements CircuitBreakerService {
+
+ private final ESLogger logger = Loggers.getLogger(DummyCircuitBreakerService.class);
+
+ private final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue(Long.MAX_VALUE), 0.0, logger);
+
+ public DummyCircuitBreakerService() {}
+
+ @Override
+ public MemoryCircuitBreaker getBreaker() {
+ return breaker;
+ }
+
+ @Override
+ public FieldDataBreakerStats stats() {
+ return new FieldDataBreakerStats(-1, -1, 0);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/fielddata/breaker/RandomExceptionCircuitBreakerTests.java b/src/test/java/org/elasticsearch/indices/fielddata/breaker/RandomExceptionCircuitBreakerTests.java
new file mode 100644
index 0000000..7be3274
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/fielddata/breaker/RandomExceptionCircuitBreakerTests.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.breaker;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.test.engine.MockInternalEngine;
+import org.elasticsearch.test.engine.ThrowingAtomicReaderWrapper;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for the circuit breaker while random exceptions are happening
+ */
+public class RandomExceptionCircuitBreakerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException {
+ final int numShards = between(1, 5);
+ final int numReplicas = randomIntBetween(0, 1);
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("test-str")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", randomBytesFieldDataFormat())
+ .endObject() // fielddata
+ .endObject() // test-str
+ .startObject("test-num")
+ // I don't use randomNumericType() here because I don't want "byte", and I want "float" and "double"
+ .field("type", randomFrom(Arrays.asList("float", "long", "double", "short", "integer")))
+ .startObject("fielddata")
+ .field("format", randomNumericFieldDataFormat())
+ .endObject() // fielddata
+ .endObject() // test-num
+ .endObject() // properties
+ .endObject() // type
+ .endObject() // {}
+ .string();
+ final double topLevelRate;
+ final double lowLevelRate;
+ if (frequently()) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ lowLevelRate = 1.0/between(2, 10);
+ topLevelRate = 0.0d;
+ } else {
+ topLevelRate = 1.0/between(2, 10);
+ lowLevelRate = 0.0d;
+ }
+ } else {
+ lowLevelRate = 1.0/between(2, 10);
+ topLevelRate = 1.0/between(2, 10);
+ }
+ } else {
+ // rarely no exception
+ topLevelRate = 0d;
+ lowLevelRate = 0d;
+ }
+
+ ImmutableSettings.Builder settings = settingsBuilder()
+ .put("index.number_of_shards", numShards)
+ .put("index.number_of_replicas", numReplicas)
+ .put(MockInternalEngine.READER_WRAPPER_TYPE, RandomExceptionDirectoryReaderWrapper.class.getName())
+ .put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate)
+ .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate)
+ .put(MockInternalEngine.WRAP_READER_RATIO, 1.0d);
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster()
+ .health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get(); // it's OK to timeout here
+ final int numDocs;
+ if (clusterHealthResponse.isTimedOut()) {
+ /* some seeds just won't let you create the index at all and we enter a ping-pong mode
+ * trying one node after another etc. that is ok but we need to make sure we don't wait
+ * forever when indexing documents so we set numDocs = 1 and expect all shards to fail
+ * when we search below.*/
+ logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
+ numDocs = 1;
+ } else {
+ numDocs = between(10, 100);
+ }
+ for (int i = 0; i < numDocs ; i++) {
+ try {
+ client().prepareIndex("test", "type", "" + i)
+ .setTimeout(TimeValue.timeValueSeconds(1)).setSource("test-str", randomUnicodeOfLengthBetween(5, 25), "test-num", i).get();
+ } catch (ElasticsearchException ex) {
+ }
+ }
+ logger.info("Start Refresh");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here
+ final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
+ logger.info("Refresh failed: [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ",
+ refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length,
+ refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
+ final int numSearches = atLeast(50);
+ NodesStatsResponse resp = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).execute().actionGet();
+ for (NodeStats stats : resp.getNodes()) {
+ assertThat("Breaker is set to 0", stats.getBreaker().getEstimated(), equalTo(0L));
+ }
+
+ for (int i = 0; i < numSearches; i++) {
+ SearchRequestBuilder searchRequestBuilder = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery());
+ switch(randomIntBetween(0, 5)) {
+ case 5:
+ case 4:
+ case 3:
+ searchRequestBuilder.addSort("test-str", SortOrder.ASC);
+ // fall through - sometimes get both fields
+ case 2:
+ case 1:
+ default:
+ searchRequestBuilder.addSort("test-num", SortOrder.ASC);
+
+ }
+ boolean success = false;
+ try {
+ // Sort by the string and numeric fields, to load them into field data
+ searchRequestBuilder.get();
+ success = true;
+ } catch (SearchPhaseExecutionException ex) {
+ logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
+ }
+
+ if (frequently()) {
+ // Now, clear the cache and check that the circuit breaker has been
+ // successfully set back to zero. If there is a bug in the circuit
+ // breaker adjustment code, it should show up here by the breaker
+ // estimate being either positive or negative.
+ client().admin().indices().prepareClearCache("test").setFieldDataCache(true).execute().actionGet();
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).execute().actionGet();
+ for (NodeStats stats : nodeStats.getNodes()) {
+ assertThat("Breaker reset to 0 last search success: " + success + " mapping: " + mapping, stats.getBreaker().getEstimated(), equalTo(0L));
+ }
+ }
+ }
+ }
+
+ public static final String EXCEPTION_TOP_LEVEL_RATIO_KEY = "index.engine.exception.ratio.top";
+ public static final String EXCEPTION_LOW_LEVEL_RATIO_KEY = "index.engine.exception.ratio.low";
+
+ // TODO: Generalize this class and add it as a utility
+ public static class RandomExceptionDirectoryReaderWrapper extends MockInternalEngine.DirectoryReaderWrapper {
+ private final Settings settings;
+ static class ThrowingSubReaderWrapper extends SubReaderWrapper implements ThrowingAtomicReaderWrapper.Thrower {
+ private final Random random;
+ private final double topLevelRatio;
+ private final double lowLevelRatio;
+
+ ThrowingSubReaderWrapper(Settings settings) {
+ final long seed = settings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ this.topLevelRatio = settings.getAsDouble(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d);
+ this.lowLevelRatio = settings.getAsDouble(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d);
+ this.random = new Random(seed);
+ }
+
+ @Override
+ public AtomicReader wrap(AtomicReader reader) {
+ return new ThrowingAtomicReaderWrapper(reader, this);
+ }
+
+ @Override
+ public void maybeThrow(ThrowingAtomicReaderWrapper.Flags flag) throws IOException {
+ switch (flag) {
+ case Fields:
+ break;
+ case TermVectors:
+ break;
+ case Terms:
+ case TermsEnum:
+ if (random.nextDouble() < topLevelRatio) {
+ throw new IOException("Forced top level Exception on [" + flag.name() + "]");
+ }
+ case Intersect:
+ break;
+ case Norms:
+ break;
+ case NumericDocValues:
+ break;
+ case BinaryDocValues:
+ break;
+ case SortedDocValues:
+ break;
+ case SortedSetDocValues:
+ break;
+ case DocsEnum:
+ case DocsAndPositionsEnum:
+ if (random.nextDouble() < lowLevelRatio) {
+ throw new IOException("Forced low level Exception on [" + flag.name() + "]");
+ }
+ break;
+ }
+ }
+
+ public boolean wrapTerms(String field) {
+ return field.startsWith("test");
+ }
+ }
+
+
+
+ public RandomExceptionDirectoryReaderWrapper(DirectoryReader in, Settings settings) {
+ super(in, new ThrowingSubReaderWrapper(settings));
+ this.settings = settings;
+ }
+
+ @Override
+ protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) {
+ return new RandomExceptionDirectoryReaderWrapper(in, settings);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java b/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java
new file mode 100644
index 0000000..e7a0628
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.leaks;
+
+import org.apache.lucene.util.LuceneTestCase.BadApple;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=1)
+public class IndicesLeaksTests extends ElasticsearchIntegrationTest {
+
+
+ @SuppressWarnings({"ConstantConditions", "unchecked"})
+ @Test
+ @BadApple
+ public void testIndexShardLifecycleLeak() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+
+ IndicesService indicesService = cluster().getInstance(IndicesService.class);
+ IndexService indexService = indicesService.indexServiceSafe("test");
+ Injector indexInjector = indexService.injector();
+ IndexShard shard = indexService.shardSafe(0);
+ Injector shardInjector = indexService.shardInjector(0);
+
+ performCommonOperations();
+
+ List<WeakReference> indexReferences = new ArrayList<WeakReference>();
+ List<WeakReference> shardReferences = new ArrayList<WeakReference>();
+
+ // TODO if we could iterate over the already created classes on the injector, we can just add them here to the list
+ // for now, we simple add some classes that make sense
+
+ // add index references
+ indexReferences.add(new WeakReference(indexService));
+ indexReferences.add(new WeakReference(indexInjector));
+ indexReferences.add(new WeakReference(indexService.mapperService()));
+ for (DocumentMapper documentMapper : indexService.mapperService()) {
+ indexReferences.add(new WeakReference(documentMapper));
+ }
+ indexReferences.add(new WeakReference(indexService.aliasesService()));
+ indexReferences.add(new WeakReference(indexService.analysisService()));
+ indexReferences.add(new WeakReference(indexService.fieldData()));
+ indexReferences.add(new WeakReference(indexService.queryParserService()));
+
+
+ // add shard references
+ shardReferences.add(new WeakReference(shard));
+ shardReferences.add(new WeakReference(shardInjector));
+
+ indexService = null;
+ indexInjector = null;
+ shard = null;
+ shardInjector = null;
+
+ client().admin().indices().prepareDelete().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ System.gc();
+ int indexNotCleared = 0;
+ for (WeakReference indexReference : indexReferences) {
+ if (indexReference.get() != null) {
+ indexNotCleared++;
+ }
+ }
+ int shardNotCleared = 0;
+ for (WeakReference shardReference : shardReferences) {
+ if (shardReference.get() != null) {
+ shardNotCleared++;
+ }
+ }
+ logger.info("round {}, indices {}/{}, shards {}/{}", i, indexNotCleared, indexReferences.size(), shardNotCleared, shardReferences.size());
+ if (indexNotCleared == 0 && shardNotCleared == 0) {
+ break;
+ }
+ }
+
+ //Thread.sleep(1000000);
+
+ for (WeakReference indexReference : indexReferences) {
+ assertThat("dangling index reference: " + indexReference.get(), indexReference.get(), nullValue());
+ }
+
+ for (WeakReference shardReference : shardReferences) {
+ assertThat("dangling shard reference: " + shardReference.get(), shardReference.get(), nullValue());
+ }
+ }
+
+ private void performCommonOperations() {
+ client().prepareIndex("test", "type", "1").setSource("field1", "value", "field2", 2, "field3", 3.0f).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ client().prepareSearch("test").setQuery(QueryBuilders.queryString("field1:value")).execute().actionGet();
+ client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field1", "value")).execute().actionGet();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java b/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java
new file mode 100644
index 0000000..fccccbc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.emptyIterable;
+
+public class ConcurrentDynamicTemplateTests extends ElasticsearchIntegrationTest {
+
+ private final String mappingType = "test-mapping";
+
+ @Test // see #3544
+ public void testConcurrentDynamicMapping() throws Exception {
+ final String fieldName = "field";
+ final String mapping = "{ \"" + mappingType + "\": {" +
+ "\"dynamic_templates\": ["
+ + "{ \"" + fieldName + "\": {" + "\"path_match\": \"*\"," + "\"mapping\": {" + "\"type\": \"string\"," + "\"store\": \"yes\","
+ + "\"index\": \"analyzed\", \"analyzer\": \"whitespace\" } } } ] } }";
+ // The 'fieldNames' array is used to help with retrieval of index terms
+ // after testing
+
+ int iters = atLeast(5);
+ for (int i = 0; i < iters; i++) {
+ cluster().wipeIndices("test");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("number_of_shards", between(1, 5))
+ .put("number_of_replicas", between(0, 1)).build())
+ .addMapping(mappingType, mapping).execute().actionGet();
+ ensureYellow();
+ int numDocs = atLeast(10);
+ final CountDownLatch latch = new CountDownLatch(numDocs);
+ final List<Throwable> throwable = new CopyOnWriteArrayList<Throwable>();
+ int currentID = 0;
+ for (int j = 0; j < numDocs; j++) {
+ Map<String, Object> source = new HashMap<String, Object>();
+ source.put(fieldName, "test-user");
+ client().prepareIndex("test", mappingType, Integer.toString(currentID++)).setSource(source).execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ throwable.add(e);
+ latch.countDown();
+ }
+ });
+ }
+ latch.await();
+ assertThat(throwable, emptyIterable());
+ refresh();
+ assertHitCount(client().prepareSearch("test").setQuery(QueryBuilders.matchQuery(fieldName, "test-user")).get(), numDocs);
+ assertHitCount(client().prepareSearch("test").setQuery(QueryBuilders.matchQuery(fieldName, "test user")).get(), 0);
+
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java b/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java
new file mode 100644
index 0000000..bdac39d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ */
+@LuceneTestCase.Slow
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
+public class DedicatedMasterGetFieldMappingTests extends SimpleGetFieldMappingsTests {
+
+ @Before
+ public void before1() {
+ Settings settings = settingsBuilder()
+ .put("node.data", false)
+ .build();
+ cluster().startNode(settings);
+ cluster().startNode(ImmutableSettings.EMPTY);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleDeleteMappingTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleDeleteMappingTests.java
new file mode 100644
index 0000000..3499b88
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleDeleteMappingTests.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleDeleteMappingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleDeleteMapping() throws Exception {
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("value", "test" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ ensureGreen();
+ refresh();
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ assertThat(countResponse.getCount(), equalTo(10l));
+ }
+
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ assertThat(clusterState.metaData().index("test").mappings().containsKey("type1"), equalTo(true));
+
+ GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings("test").setTypes("type1").execute().actionGet();
+ assertThat(mappingsResponse.getMappings().get("test").get("type1"), notNullValue());
+
+ ElasticsearchAssertions.assertAcked(client().admin().indices().prepareDeleteMapping().setIndices("test").setType("type1"));
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ assertThat(countResponse.getCount(), equalTo(0l));
+ }
+
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(clusterState.metaData().index("test").mappings().containsKey("type1"), equalTo(false));
+ mappingsResponse = client().admin().indices().prepareGetMappings("test").setTypes("type1").execute().actionGet();
+ assertThat(mappingsResponse.getMappings().get("test"), nullValue());
+ }
+
+
+ @Test
+ public void deleteMappingAllowNoBlankIndexAndNoEmptyStrings() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("index1").addMapping("1", "field1", "type=string").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").addMapping("1", "field1", "type=string").get());
+
+ // Should succeed, since no wildcards
+ client().admin().indices().prepareDeleteMapping("1index").setType("1").get();
+ try {
+ client().admin().indices().prepareDeleteMapping("_all").get();
+ fail();
+ } catch (ActionRequestValidationException e) {}
+
+ try {
+ client().admin().indices().prepareDeleteMapping("_all").setType("").get();
+ fail();
+ } catch (ActionRequestValidationException e) {}
+
+ try {
+ client().admin().indices().prepareDeleteMapping().setType("1").get();
+ fail();
+ } catch (ActionRequestValidationException e) {}
+
+ try {
+ client().admin().indices().prepareDeleteMapping("").setType("1").get();
+ fail();
+ } catch (ActionRequestValidationException e) {}
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java
new file mode 100644
index 0000000..1f65aa7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void getMappingsWhereThereAreNone() {
+ createIndex("index");
+ ensureYellow();
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().get();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("index").size(), equalTo(0));
+
+ assertThat(response.fieldMappings("index", "type", "field"), Matchers.nullValue());
+ }
+
+ private XContentBuilder getMappingForType(String type) throws IOException {
+ return jsonBuilder().startObject().startObject(type).startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("obj").startObject("properties").startObject("subfield").field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject()
+ .endObject().endObject().endObject();
+ }
+
+ @Test
+ public void simpleGetFieldMappings() throws Exception {
+
+ Settings.Builder settings = ImmutableSettings.settingsBuilder()
+ .put("number_of_shards", randomIntBetween(1, 3), "number_of_replicas", randomIntBetween(0, 1));
+
+ assertTrue(client().admin().indices().prepareCreate("indexa")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .setSettings(settings)
+ .get().isAcknowledged());
+ assertTrue(client().admin().indices().prepareCreate("indexb")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .setSettings(settings)
+ .get().isAcknowledged());
+
+ ensureYellow();
+
+ // Get mappings by full name
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "obj.subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.mappings().get("indexa"), not(hasKey("typeB")));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.mappings(), not(hasKey("indexb")));
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ // Get mappings by name
+ response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ // get mappings by name across multiple indices
+ response = client().admin().indices().prepareGetFieldMappings().setTypes("typeA").setFields("subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "subfield"), nullValue());
+
+ // get mappings by name across multiple types
+ response = client().admin().indices().prepareGetFieldMappings("indexa").setFields("subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "subfield"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ // get mappings by name across multiple types & indices
+ response = client().admin().indices().prepareGetFieldMappings().setFields("subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void simpleGetFieldMappingsWithDefaults() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type", getMappingForType("type")).get();
+
+ client().prepareIndex("test", "type", "1").setSource("num", 1).get();
+ ensureYellow();
+
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().setFields("num", "field1", "subfield").includeDefaults(true).get();
+
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "num").sourceAsMap().get("num"), hasEntry("index", (Object) "not_analyzed"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "num").sourceAsMap().get("num"), hasEntry("type", (Object) "long"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("index", (Object) "analyzed"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("type", (Object) "string"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "subfield").sourceAsMap().get("subfield"), hasEntry("index", (Object) "not_analyzed"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "subfield").sourceAsMap().get("subfield"), hasEntry("type", (Object) "string"));
+
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java
new file mode 100644
index 0000000..2599833
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleGetMappingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void getMappingsWhereThereAreNone() {
+ createIndex("index");
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet();
+ assertThat(response.mappings().containsKey("index"), equalTo(true));
+ assertThat(response.mappings().get("index").size(), equalTo(0));
+ }
+
+
+ private XContentBuilder getMappingForType(String type) throws IOException {
+ return jsonBuilder().startObject().startObject(type).startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject().endObject().endObject();
+ }
+
+
+ @Test
+ public void simpleGetMappings() throws Exception {
+ client().admin().indices().prepareCreate("indexa")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .addMapping("Atype", getMappingForType("Atype"))
+ .addMapping("Btype", getMappingForType("Btype"))
+ .execute().actionGet();
+ client().admin().indices().prepareCreate("indexb")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .addMapping("Atype", getMappingForType("Atype"))
+ .addMapping("Btype", getMappingForType("Btype"))
+ .execute().actionGet();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // Get all mappings
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(4));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(4));
+ assertThat(response.mappings().get("indexb").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+
+ // Get all mappings, via wildcard support
+ response = client().admin().indices().prepareGetMappings("*").setTypes("*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(4));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(4));
+ assertThat(response.mappings().get("indexb").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+
+ // Get all typeA mappings in all indices
+ response = client().admin().indices().prepareGetMappings("*").setTypes("typeA").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(1));
+ assertThat(response.mappings().get("indexb").get("typeA"), notNullValue());
+
+ // Get all mappings in indexa
+ response = client().admin().indices().prepareGetMappings("indexa").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").size(), equalTo(4));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+
+ // Get all mappings beginning with A* in indexa
+ response = client().admin().indices().prepareGetMappings("indexa").setTypes("A*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+
+ // Get all mappings beginning with B* in all indices
+ response = client().admin().indices().prepareGetMappings().setTypes("B*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(1));
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+
+ // Get all mappings beginning with B* and A* in all indices
+ response = client().admin().indices().prepareGetMappings().setTypes("B*", "A*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(2));
+ assertThat(response.mappings().get("indexb").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java
new file mode 100644
index 0000000..f2de136
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MergeMappingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+public class UpdateMappingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void dynamicUpdates() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ int recCount = randomIntBetween(200, 600);
+ int numberOfTypes = randomIntBetween(1, 5);
+ List<IndexRequestBuilder> indexRequests = Lists.newArrayList();
+ for (int rec = 0; rec < recCount; rec++) {
+ String type = "type" + (rec % numberOfTypes);
+ String fieldName = "field_" + type + "_" + rec;
+ indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)).setSource(fieldName, "some_value"));
+ }
+ indexRandom(true, indexRequests);
+
+ logger.info("checking all the documents are there");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+ CountResponse response = client().prepareCount("test").execute().actionGet();
+ assertThat(response.getCount(), equalTo((long) recCount));
+
+ logger.info("checking all the fields are in the mappings");
+
+ reRunTest:
+ while (true) {
+ Map<String, String> typeToSource = Maps.newHashMap();
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ for (ObjectObjectCursor<String, MappingMetaData> cursor : state.getMetaData().getIndices().get("test").getMappings()) {
+ typeToSource.put(cursor.key, cursor.value.source().string());
+ }
+ for (int rec = 0; rec < recCount; rec++) {
+ String type = "type" + (rec % numberOfTypes);
+ String fieldName = "field_" + type + "_" + rec;
+ fieldName = "\"" + fieldName + "\""; // quote it, so we make sure we catch the exact one
+ if (!typeToSource.containsKey(type) || !typeToSource.get(type).contains(fieldName)) {
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ PendingClusterTasksResponse pendingTasks = client().admin().cluster().preparePendingClusterTasks().get();
+ return pendingTasks.pendingTasks().isEmpty();
+ }
+ });
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+ // its going to break, before we do, make sure that the cluster state hasn't changed on us...
+ ClusterState state2 = client().admin().cluster().prepareState().get().getState();
+ if (state.version() != state2.version()) {
+ logger.info("not the same version, used for test {}, new one {}, re-running test, first wait for mapping to wait", state.version(), state2.version());
+ continue reRunTest;
+ }
+ logger.info("failing, type {}, field {}, mapping {}", type, fieldName, typeToSource.get(type));
+ assertThat(typeToSource.get(type), containsString(fieldName));
+ }
+ }
+ break;
+ }
+ }
+
+ @Test
+ public void updateMappingWithoutType() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("doc", "{\"doc\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("doc")
+ .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}")
+ .execute().actionGet();
+
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet();
+ assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(),
+ equalTo("{\"doc\":{\"properties\":{\"body\":{\"type\":\"string\"},\"date\":{\"type\":\"integer\"}}}}"));
+ }
+
+ @Test
+ public void updateMappingWithoutTypeMultiObjects() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("doc")
+ .setSource("{\"_source\":{\"enabled\":false},\"properties\":{\"date\":{\"type\":\"integer\"}}}")
+ .execute().actionGet();
+
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet();
+ assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(),
+ equalTo("{\"doc\":{\"_source\":{\"enabled\":false},\"properties\":{\"date\":{\"type\":\"integer\"}}}}"));
+ }
+
+ @Test(expected = MergeMappingException.class)
+ public void updateMappingWithConflicts() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}")
+ .execute().actionGet();
+
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+ }
+
+ @Test(expected = MergeMappingException.class)
+ public void updateMappingWithNormsConflicts() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\", \"norms\": { \"enabled\": false }}}}}")
+ .execute().actionGet();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\", \"norms\": { \"enabled\": true }}}}}")
+ .execute().actionGet();
+ }
+
+ /*
+ First regression test for https://github.com/elasticsearch/elasticsearch/issues/3381
+ */
+ @Test
+ public void updateMappingWithIgnoredConflicts() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}")
+ .setIgnoreConflicts(true)
+ .execute().actionGet();
+
+ //no changes since the only one had a conflict and was ignored, we return
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+ }
+
+ /*
+ Second regression test for https://github.com/elasticsearch/elasticsearch/issues/3381
+ */
+ @Test
+ public void updateMappingNoChanges() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+
+ //no changes, we return
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+ }
+
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void updateIncludeExclude() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("normal").field("type", "long").endObject()
+ .startObject("exclude").field("type", "long").endObject()
+ .startObject("include").field("type", "long").endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen(); // make sure that replicas are initialized so the refresh command will work them too
+
+ logger.info("Index doc");
+ index("test", "type", "1", JsonXContent.contentBuilder().startObject()
+ .field("normal", 1).field("exclude", 1).field("include", 1)
+ .endObject()
+ );
+ refresh(); // commit it for later testing.
+
+
+ logger.info("Adding exclude settings");
+ PutMappingResponse putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource(
+ JsonXContent.contentBuilder().startObject().startObject("type")
+ .startObject("_source")
+ .startArray("excludes").value("exclude").endArray()
+ .endObject().endObject()
+ ).get();
+
+ assertTrue(putResponse.isAcknowledged());
+
+ // changed mapping doesn't affect indexed documents (checking backward compatibility)
+ GetResponse getResponse = client().prepareGet("test", "type", "1").setRealtime(false).get();
+ assertThat(getResponse.getSource(), hasKey("normal"));
+ assertThat(getResponse.getSource(), hasKey("exclude"));
+ assertThat(getResponse.getSource(), hasKey("include"));
+
+
+ logger.info("Index doc again");
+ index("test", "type", "1", JsonXContent.contentBuilder().startObject()
+ .field("normal", 2).field("exclude", 1).field("include", 2)
+ .endObject()
+ );
+
+ // but do affect newly indexed docs
+ getResponse = get("test", "type", "1");
+ assertThat(getResponse.getSource(), hasKey("normal"));
+ assertThat(getResponse.getSource(), not(hasKey("exclude")));
+ assertThat(getResponse.getSource(), hasKey("include"));
+
+
+ logger.info("Changing mapping to includes");
+ putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource(
+ JsonXContent.contentBuilder().startObject().startObject("type")
+ .startObject("_source")
+ .startArray("excludes").endArray()
+ .startArray("includes").value("include").endArray()
+ .endObject().endObject()
+ ).get();
+ assertTrue(putResponse.isAcknowledged());
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ MappingMetaData typeMapping = getMappingsResponse.getMappings().get("test").get("type");
+ assertThat((Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes"));
+ ArrayList<String> includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes");
+ assertThat(includes, contains("include"));
+ assertThat((Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes"));
+ assertThat((ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes"), emptyIterable());
+
+
+ logger.info("Indexing doc yet again");
+ index("test", "type", "1", JsonXContent.contentBuilder().startObject()
+ .field("normal", 3).field("exclude", 3).field("include", 3)
+ .endObject()
+ );
+
+ getResponse = get("test", "type", "1");
+ assertThat(getResponse.getSource(), not(hasKey("normal")));
+ assertThat(getResponse.getSource(), not(hasKey("exclude")));
+ assertThat(getResponse.getSource(), hasKey("include"));
+
+
+ logger.info("Adding excludes, but keep includes");
+ putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource(
+ JsonXContent.contentBuilder().startObject().startObject("type")
+ .startObject("_source")
+ .startArray("excludes").value("*.excludes").endArray()
+ .endObject().endObject()
+ ).get();
+ assertTrue(putResponse.isAcknowledged());
+
+ getMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ typeMapping = getMappingsResponse.getMappings().get("test").get("type");
+ assertThat((Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes"));
+ includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes");
+ assertThat(includes, contains("include"));
+ assertThat((Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes"));
+ ArrayList<String> excludes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes");
+ assertThat(excludes, contains("*.excludes"));
+
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void updateDefaultMappingSettings() throws Exception {
+
+ logger.info("Creating index with _default_ mappings");
+ client().admin().indices().prepareCreate("test").addMapping(MapperService.DEFAULT_MAPPING,
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .field("date_detection", false)
+ .endObject().endObject()
+ ).get();
+
+ GetMappingsResponse getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get();
+ Map<String, Object> defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
+ assertThat(defaultMapping, hasKey("date_detection"));
+
+
+ logger.info("Emptying _default_ mappings");
+ // now remove it
+ PutMappingResponse putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .endObject().endObject()
+ ).get();
+ assertThat(putResponse.isAcknowledged(), equalTo(true));
+ logger.info("Done Emptying _default_ mappings");
+
+ getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get();
+ defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
+ assertThat(defaultMapping, not(hasKey("date_detection")));
+
+ // now test you can change stuff that are normally unchangable
+ logger.info("Creating _default_ mappings with an analyzed field");
+ putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("properties").startObject("f").field("type", "string").field("index", "analyzed").endObject().endObject()
+ .endObject().endObject()
+ ).get();
+ assertThat(putResponse.isAcknowledged(), equalTo(true));
+
+
+ logger.info("Changing _default_ mappings field from analyzed to non-analyzed");
+ putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("properties").startObject("f").field("type", "string").field("index", "not_analyzed").endObject().endObject()
+ .endObject().endObject()
+ ).get();
+ assertThat(putResponse.isAcknowledged(), equalTo(true));
+ logger.info("Done changing _default_ mappings field from analyzed to non-analyzed");
+
+ getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get();
+ defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
+ Map<String, Object> fieldSettings = (Map<String, Object>) ((Map) defaultMapping.get("properties")).get("f");
+ assertThat(fieldSettings, hasEntry("index", (Object) "not_analyzed"));
+
+ // but we still validate the _default_ type
+ logger.info("Confirming _default_ mappings validation");
+ assertThrows(client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("properties").startObject("f").field("type", "DOESNT_EXIST").endObject().endObject()
+ .endObject().endObject()
+ ), MapperParsingException.class);
+
+ }
+
+ @Test
+ public void updateMappingConcurrently() throws Throwable {
+ // Test that we can concurrently update different indexes and types.
+ int shardNo = Math.max(5, cluster().size());
+
+ prepareCreate("test1").setSettings("index.number_of_shards", shardNo).execute().actionGet();
+ prepareCreate("test2").setSettings("index.number_of_shards", shardNo).execute().actionGet();
+
+ // This is important. The test assumes all nodes are aware of all indices. Due to initializing shard throttling
+ // not all shards are allocated with the initial create index. Wait for it..
+ ensureYellow();
+
+ final Throwable[] threadException = new Throwable[1];
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] threads = new Thread[3];
+ final CyclicBarrier barrier = new CyclicBarrier(threads.length);
+ final ArrayList<Client> clientArray = new ArrayList<Client>();
+ for (Client c : clients()) {
+ clientArray.add(c);
+ }
+
+ for (int j = 0; j < threads.length; j++) {
+ threads[j] = new Thread(new Runnable() {
+ @SuppressWarnings("unchecked")
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+
+ for (int i = 0; i < 100; i++) {
+ if (stop.get()) {
+ return;
+ }
+
+ Client client1 = clientArray.get(i % clientArray.size());
+ Client client2 = clientArray.get((i + 1) % clientArray.size());
+ String indexName = i % 2 == 0 ? "test2" : "test1";
+ String typeName = "type" + (i % 10);
+ String fieldName = Thread.currentThread().getName() + "_" + i;
+
+ PutMappingResponse response = client1.admin().indices().preparePutMapping(indexName).setType(typeName).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(typeName)
+ .startObject("properties").startObject(fieldName).field("type", "string").endObject().endObject()
+ .endObject().endObject()
+ ).get();
+
+ assertThat(response.isAcknowledged(), equalTo(true));
+ GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get();
+ ImmutableOpenMap<String, MappingMetaData> mappings = getMappingResponse.getMappings().get(indexName);
+ assertThat(mappings.containsKey(typeName), equalTo(true));
+ assertThat(((Map<String, Object>) mappings.get(typeName).getSourceAsMap().get("properties")).keySet(), Matchers.hasItem(fieldName));
+ }
+ } catch (Throwable t) {
+ threadException[0] = t;
+ stop.set(true);
+ }
+ }
+ });
+
+ threads[j].setName("t_" + j);
+ threads[j].start();
+ }
+
+ for (Thread t : threads) t.join();
+
+ if (threadException[0] != null) {
+ throw threadException[0];
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java
new file mode 100644
index 0000000..ff85b38
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.settings;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void simpleUpdateNumberOfReplicasTests() throws Exception {
+ logger.info("Creating index test");
+ prepareCreate("test", 2).execute().actionGet();
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(5));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("value", "test" + i)
+ .endObject()).get();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).get();
+ assertHitCount(countResponse, 10l);
+ }
+
+ logger.info("Increasing the number of replicas from 1 to 2");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 2)).execute().actionGet();
+ Thread.sleep(200);
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(10).execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(5));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(10));
+
+ logger.info("starting another node to new replicas will be allocated to it");
+ allowNodes("test", 3);
+ Thread.sleep(100);
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=3").execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(5));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(15));
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).get();
+ assertHitCount(countResponse, 10l);
+ }
+
+ logger.info("Decreasing number of replicas from 2 to 0");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 0)).get();
+ Thread.sleep(200);
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=3").execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(5));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(5));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 10);
+ }
+ }
+
+ @Test
+ public void testAutoExpandNumberOfReplicas0ToData() {
+ cluster().ensureAtMostNumNodes(2);
+ logger.info("--> creating index test with auto expand replicas");
+ prepareCreate("test", 2, settingsBuilder().put("index.number_of_shards", 2).put("auto_expand_replicas", "0-all")).execute().actionGet();
+
+ logger.info("--> running cluster health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(4).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(4));
+
+ logger.info("--> add another node, should increase the number of replicas");
+ allowNodes("test", 3);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(6).setWaitForNodes(">=3").execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(6));
+
+ logger.info("--> closing one node");
+ cluster().ensureAtMostNumNodes(2);
+ allowNodes("test", 2);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(4).setWaitForNodes(">=2").execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(4));
+
+ logger.info("--> closing another node");
+ cluster().ensureAtMostNumNodes(1);
+ allowNodes("test", 1);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=1").setWaitForActiveShards(2).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(2));
+ }
+
+ @Test
+ public void testAutoExpandNumberReplicas1ToData() {
+ logger.info("--> creating index test with auto expand replicas");
+ cluster().ensureAtMostNumNodes(2);
+ prepareCreate("test", 2, settingsBuilder().put("index.number_of_shards", 2).put("auto_expand_replicas", "1-all")).execute().actionGet();
+
+ logger.info("--> running cluster health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(4).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(4));
+
+ logger.info("--> add another node, should increase the number of replicas");
+ allowNodes("test", 3);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(6).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(6));
+
+ logger.info("--> closing one node");
+ cluster().ensureAtMostNumNodes(2);
+ allowNodes("test", 2);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=2").setWaitForActiveShards(4).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(4));
+
+ logger.info("--> closing another node");
+ cluster().ensureAtMostNumNodes(1);
+ allowNodes("test", 1);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes(">=1").setWaitForActiveShards(2).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(2));
+ }
+
+ @Test
+ public void testAutoExpandNumberReplicas2() {
+ logger.info("--> creating index test with auto expand replicas set to 0-2");
+ prepareCreate("test", 3, settingsBuilder().put("index.number_of_shards", 2).put("auto_expand_replicas", "0-2")).execute().actionGet();
+
+ logger.info("--> running cluster health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(6).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(6));
+
+ logger.info("--> add two more nodes");
+ allowNodes("test", 4);
+ allowNodes("test", 5);
+
+ logger.info("--> update the auto expand replicas to 0-3");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("auto_expand_replicas", "0-3")).execute().actionGet();
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(8).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(3));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(8));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java
new file mode 100644
index 0000000..177933b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.settings;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class UpdateSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testOpenCloseUpdateSettings() throws Exception {
+ createIndex("test");
+ try {
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.refresh_interval", -1) // this one can change
+ .put("index.cache.filter.type", "none") // this one can't
+ )
+ .execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ // all is well
+ }
+
+ IndexMetaData indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
+ assertThat(indexMetaData.settings().get("index.refresh_interval"), nullValue());
+ assertThat(indexMetaData.settings().get("index.cache.filter.type"), nullValue());
+
+ // Now verify via dedicated get settings api:
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), nullValue());
+ assertThat(getSettingsResponse.getSetting("test", "index.cache.filter.type"), nullValue());
+
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.refresh_interval", -1) // this one can change
+ )
+ .execute().actionGet();
+
+ indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
+ assertThat(indexMetaData.settings().get("index.refresh_interval"), equalTo("-1"));
+ // Now verify via dedicated get settings api:
+ getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), equalTo("-1"));
+
+ // now close the index, change the non dynamic setting, and see that it applies
+
+ // Wait for the index to turn green before attempting to close it
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ client().admin().indices().prepareClose("test").execute().actionGet();
+
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.refresh_interval", "1s") // this one can change
+ .put("index.cache.filter.type", "none") // this one can't
+ )
+ .execute().actionGet();
+
+ indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
+ assertThat(indexMetaData.settings().get("index.refresh_interval"), equalTo("1s"));
+ assertThat(indexMetaData.settings().get("index.cache.filter.type"), equalTo("none"));
+
+ // Now verify via dedicated get settings api:
+ getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), equalTo("1s"));
+ assertThat(getSettingsResponse.getSetting("test", "index.cache.filter.type"), equalTo("none"));
+ }
+
+ @Test
+ public void testEngineGCDeletesSetting() throws InterruptedException {
+ createIndex("test");
+ client().prepareIndex("test", "type", "1").setSource("f", 1).get(); // set version to 1
+ client().prepareDelete("test", "type", "1").get(); // sets version to 2
+ client().prepareIndex("test", "type", "1").setSource("f", 2).setVersion(2).get(); // delete is still in cache this should work & set version to 3
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.gc_deletes", 0)
+ ).get();
+
+ client().prepareDelete("test", "type", "1").get(); // sets version to 4
+ Thread.sleep(300); // wait for cache time to change TODO: this needs to be solved better. To be discussed.
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("f", 3).setVersion(4), VersionConflictEngineException.class); // delete is should not be in cache
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java
new file mode 100644
index 0000000..8ce4ac3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.state;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ClusterScope(scope=Scope.TEST, numNodes=2)
+public class CloseIndexDisableCloseAllTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // Combined multiple tests into one, because cluster scope is test.
+ // The cluster scope is test b/c we can't clear cluster settings.
+ public void testCloseAllRequiresName() {
+ Settings clusterSettings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(clusterSettings));
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ // Close all explicitly
+ try {
+ client().admin().indices().prepareClose("_all").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("*").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("test*").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("*", "-test1").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("*", "-test1", "+test1").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test3", "test2").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test2", "test3");
+ }
+
+ private void assertIndexIsClosed(String... indices) {
+ checkIndexState(IndexMetaData.State.CLOSE, indices);
+ }
+
+ private void checkIndexState(IndexMetaData.State state, String... indices) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ for (String index : indices) {
+ IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().indices().get(index);
+ assertThat(indexMetaData, notNullValue());
+ assertThat(indexMetaData.getState(), equalTo(state));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java b/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java
new file mode 100644
index 0000000..01f144a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.state;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class OpenCloseIndexTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleCloseOpen() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testSimpleCloseMissingIndex() {
+ Client client = client();
+ client.admin().indices().prepareClose("test1").execute().actionGet();
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testSimpleOpenMissingIndex() {
+ Client client = client();
+ client.admin().indices().prepareOpen("test1").execute().actionGet();
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testCloseOneMissingIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ client.admin().indices().prepareClose("test1", "test2").execute().actionGet();
+ }
+
+ @Test
+ public void testCloseOneMissingIndexIgnoreMissing() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1", "test2")
+ .setIndicesOptions(IndicesOptions.lenient()).execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testOpenOneMissingIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ client.admin().indices().prepareOpen("test1", "test2").execute().actionGet();
+ }
+
+ @Test
+ public void testOpenOneMissingIndexIgnoreMissing() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1", "test2")
+ .setIndicesOptions(IndicesOptions.lenient()).execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test
+ public void testCloseOpenMultipleIndices() {
+ Client client = client();
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse1 = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse1.isAcknowledged(), equalTo(true));
+ CloseIndexResponse closeIndexResponse2 = client.admin().indices().prepareClose("test2").execute().actionGet();
+ assertThat(closeIndexResponse2.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2");
+ assertIndexIsOpened("test3");
+
+ OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet();
+ assertThat(openIndexResponse1.isAcknowledged(), equalTo(true));
+ OpenIndexResponse openIndexResponse2 = client.admin().indices().prepareOpen("test2").execute().actionGet();
+ assertThat(openIndexResponse2.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "test3");
+ }
+
+ @Test
+ public void testCloseOpenWildcard() {
+ Client client = client();
+ createIndex("test1", "test2", "a");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test*").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2");
+ assertIndexIsOpened("a");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test*").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "a");
+ }
+
+ @Test
+ public void testCloseOpenAll() {
+ Client client = client();
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("_all").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2", "test3");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("_all").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "test3");
+ }
+
+ @Test
+ public void testCloseOpenAllWildcard() {
+ Client client = client();
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("*").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2", "test3");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("*").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "test3");
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testCloseNoIndex() {
+ Client client = client();
+ client.admin().indices().prepareClose().execute().actionGet();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testCloseNullIndex() {
+ Client client = client();
+ client.admin().indices().prepareClose(null).execute().actionGet();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testOpenNoIndex() {
+ Client client = client();
+ client.admin().indices().prepareOpen().execute().actionGet();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testOpenNullIndex() {
+ Client client = client();
+ client.admin().indices().prepareOpen(null).execute().actionGet();
+ }
+
+ @Test
+ public void testOpenAlreadyOpenedIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ //no problem if we try to open an index that's already in open state
+ OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet();
+ assertThat(openIndexResponse1.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test
+ public void testCloseAlreadyClosedIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ //closing the index
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+
+ //no problem if we try to close an index that's already in close state
+ closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+ }
+
+ @Test
+ public void testSimpleCloseOpenAlias() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ IndicesAliasesResponse aliasesResponse = client.admin().indices().prepareAliases().addAlias("test1", "test1-alias").execute().actionGet();
+ assertThat(aliasesResponse.isAcknowledged(), equalTo(true));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1-alias").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1-alias").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test
+ public void testCloseOpenAliasMultipleIndices() {
+ Client client = client();
+ createIndex("test1", "test2");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ IndicesAliasesResponse aliasesResponse1 = client.admin().indices().prepareAliases().addAlias("test1", "test-alias").execute().actionGet();
+ assertThat(aliasesResponse1.isAcknowledged(), equalTo(true));
+ IndicesAliasesResponse aliasesResponse2 = client.admin().indices().prepareAliases().addAlias("test2", "test-alias").execute().actionGet();
+ assertThat(aliasesResponse2.isAcknowledged(), equalTo(true));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test-alias").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test-alias").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2");
+ }
+
+ private void assertIndexIsOpened(String... indices) {
+ checkIndexState(IndexMetaData.State.OPEN, indices);
+ }
+
+ private void assertIndexIsClosed(String... indices) {
+ checkIndexState(IndexMetaData.State.CLOSE, indices);
+ }
+
+ private void checkIndexState(IndexMetaData.State expectedState, String... indices) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ for (String index : indices) {
+ IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().indices().get(index);
+ assertThat(indexMetaData, notNullValue());
+ assertThat(indexMetaData.getState(), equalTo(expectedState));
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java b/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java
new file mode 100644
index 0000000..2eb4b27
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.state;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.SettingsException;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SimpleIndexStateTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(SimpleIndexStateTests.class);
+
+ @Test
+ public void testSimpleOpenClose() {
+ logger.info("--> creating test index");
+ createIndex("test");
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(5));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+
+ logger.info("--> closing test index...");
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").get();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+
+ stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> testing indices status api...");
+ IndicesStatusResponse indicesStatusResponse = client().admin().indices().prepareStatus().get();
+ assertThat(indicesStatusResponse.getIndices().size(), equalTo(0));
+
+ logger.info("--> trying to index into a closed index ...");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+
+ logger.info("--> opening index...");
+ OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").get();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(5));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ }
+
+ @Test
+ public void testFastCloseAfterCreateDoesNotClose() {
+ logger.info("--> creating test index that cannot be allocated");
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.routing.allocation.include.tag", "no_such_node")
+ .put("index.number_of_replicas", 1).build()).get();
+
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth("test").setWaitForNodes(">=2").get();
+ assertThat(health.isTimedOut(), equalTo(false));
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED));
+
+ try {
+ client().admin().indices().prepareClose("test").get();
+ fail("Exception should have been thrown");
+ } catch(IndexPrimaryShardNotAllocatedException e) {
+ // expected
+ }
+
+ logger.info("--> updating test index settings to allow allocation");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.routing.allocation.include.tag", "").build()).get();
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(5));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ }
+
+ @Test
+ public void testConsistencyAfterIndexCreationFailure() {
+
+ logger.info("--> deleting test index....");
+ try {
+ client().admin().indices().prepareDelete("test").get();
+ } catch (IndexMissingException ex) {
+ // Ignore
+ }
+
+ logger.info("--> creating test index with invalid settings ");
+ try {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("number_of_shards", "bad")).get();
+ fail();
+ } catch (SettingsException ex) {
+ // Expected
+ }
+
+ logger.info("--> creating test index with valid settings ");
+ CreateIndexResponse response = client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("number_of_shards", 1)).get();
+ assertThat(response.isAcknowledged(), equalTo(true));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/stats/SimpleIndexStatsTests.java b/src/test/java/org/elasticsearch/indices/stats/SimpleIndexStatsTests.java
new file mode 100644
index 0000000..e09928c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/stats/SimpleIndexStatsTests.java
@@ -0,0 +1,425 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.stats;
+
+import org.apache.lucene.util.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 2)
+public class SimpleIndexStatsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleStats() throws Exception {
+ // rely on 1 replica for this tests
+ createIndex("test1");
+ createIndex("test2");
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getPrimaries().getDocs().getCount(), equalTo(3l));
+ assertThat(stats.getTotal().getDocs().getCount(), equalTo(6l));
+ assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexCount(), equalTo(3l));
+ assertThat(stats.getTotal().getIndexing().getTotal().getIndexCount(), equalTo(6l));
+ assertThat(stats.getTotal().getStore(), notNullValue());
+ assertThat(stats.getTotal().getMerge(), notNullValue());
+ assertThat(stats.getTotal().getFlush(), notNullValue());
+ assertThat(stats.getTotal().getRefresh(), notNullValue());
+
+ assertThat(stats.getIndex("test1").getPrimaries().getDocs().getCount(), equalTo(2l));
+ assertThat(stats.getIndex("test1").getTotal().getDocs().getCount(), equalTo(4l));
+ assertThat(stats.getIndex("test1").getPrimaries().getStore(), notNullValue());
+ assertThat(stats.getIndex("test1").getPrimaries().getMerge(), notNullValue());
+ assertThat(stats.getIndex("test1").getPrimaries().getFlush(), notNullValue());
+ assertThat(stats.getIndex("test1").getPrimaries().getRefresh(), notNullValue());
+
+ assertThat(stats.getIndex("test2").getPrimaries().getDocs().getCount(), equalTo(1l));
+ assertThat(stats.getIndex("test2").getTotal().getDocs().getCount(), equalTo(2l));
+
+ // make sure that number of requests in progress is 0
+ assertThat(stats.getIndex("test1").getTotal().getIndexing().getTotal().getIndexCurrent(), equalTo(0l));
+ assertThat(stats.getIndex("test1").getTotal().getIndexing().getTotal().getDeleteCurrent(), equalTo(0l));
+ assertThat(stats.getIndex("test1").getTotal().getSearch().getTotal().getFetchCurrent(), equalTo(0l));
+ assertThat(stats.getIndex("test1").getTotal().getSearch().getTotal().getQueryCurrent(), equalTo(0l));
+
+ // check flags
+ stats = client().admin().indices().prepareStats().clear()
+ .setFlush(true)
+ .setRefresh(true)
+ .setMerge(true)
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getDocs(), nullValue());
+ assertThat(stats.getTotal().getStore(), nullValue());
+ assertThat(stats.getTotal().getIndexing(), nullValue());
+ assertThat(stats.getTotal().getMerge(), notNullValue());
+ assertThat(stats.getTotal().getFlush(), notNullValue());
+ assertThat(stats.getTotal().getRefresh(), notNullValue());
+
+ // check types
+ stats = client().admin().indices().prepareStats().setTypes("type1", "type").execute().actionGet();
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexCount(), equalTo(1l));
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type").getIndexCount(), equalTo(1l));
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type2"), nullValue());
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexCurrent(), equalTo(0l));
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getDeleteCurrent(), equalTo(0l));
+
+ assertThat(stats.getTotal().getGet().getCount(), equalTo(0l));
+ // check get
+ GetResponse getResponse = client().prepareGet("test1", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getTotal().getGet().getCount(), equalTo(1l));
+ assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1l));
+ assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(0l));
+
+ // missing get
+ getResponse = client().prepareGet("test1", "type1", "2").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getTotal().getGet().getCount(), equalTo(2l));
+ assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1l));
+ assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(1l));
+
+ // clear all
+ stats = client().admin().indices().prepareStats()
+ .setDocs(false)
+ .setStore(false)
+ .setIndexing(false)
+ .setFlush(true)
+ .setRefresh(true)
+ .setMerge(true)
+ .clear() // reset defaults
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getDocs(), nullValue());
+ assertThat(stats.getTotal().getStore(), nullValue());
+ assertThat(stats.getTotal().getIndexing(), nullValue());
+ assertThat(stats.getTotal().getGet(), nullValue());
+ assertThat(stats.getTotal().getSearch(), nullValue());
+ }
+
+ @Test
+ public void testMergeStats() {
+ // rely on 1 replica for this tests
+ createIndex("test1");
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ // clear all
+ IndicesStatsResponse stats = client().admin().indices().prepareStats()
+ .setDocs(false)
+ .setStore(false)
+ .setIndexing(false)
+ .setFlush(true)
+ .setRefresh(true)
+ .setMerge(true)
+ .clear() // reset defaults
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getDocs(), nullValue());
+ assertThat(stats.getTotal().getStore(), nullValue());
+ assertThat(stats.getTotal().getIndexing(), nullValue());
+ assertThat(stats.getTotal().getGet(), nullValue());
+ assertThat(stats.getTotal().getSearch(), nullValue());
+
+ for (int i = 0; i < 20; i++) {
+ client().prepareIndex("test1", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ client().admin().indices().prepareOptimize().setWaitForMerge(true).setMaxNumSegments(1).execute().actionGet();
+ stats = client().admin().indices().prepareStats()
+ .setMerge(true)
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getMerge(), notNullValue());
+ assertThat(stats.getTotal().getMerge().getTotal(), greaterThan(0l));
+ }
+
+ @Test
+ public void testSegmentsStats() {
+ prepareCreate("test1", 2).setSettings("index.number_of_shards", 5, "index.number_of_replicas", 1).get();
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ for (int i = 0; i < 20; i++) {
+ index("test1", "type1", Integer.toString(i), "field", "value");
+ index("test1", "type2", Integer.toString(i), "field", "value");
+ client().admin().indices().prepareFlush().get();
+ }
+ client().admin().indices().prepareOptimize().setWaitForMerge(true).setMaxNumSegments(1).execute().actionGet();
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+
+ assertThat(stats.getTotal().getSegments(), notNullValue());
+ assertThat(stats.getTotal().getSegments().getCount(), equalTo(10l));
+ assumeTrue(org.elasticsearch.Version.CURRENT.luceneVersion != Version.LUCENE_46);
+ assertThat(stats.getTotal().getSegments().getMemoryInBytes(), greaterThan(0l));
+ }
+
+ @Test
+ public void testAllFlags() throws Exception {
+ // rely on 1 replica for this tests
+ createIndex("test1");
+ createIndex("test2");
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats();
+ Flag[] values = CommonStatsFlags.Flag.values();
+ for (Flag flag : values) {
+ set(flag, builder, false);
+ }
+
+ IndicesStatsResponse stats = builder.execute().actionGet();
+ for (Flag flag : values) {
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(false));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(false));
+ }
+
+ for (Flag flag : values) {
+ set(flag, builder, true);
+ }
+ stats = builder.execute().actionGet();
+ for (Flag flag : values) {
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(true));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(true));
+ }
+ Random random = getRandom();
+ EnumSet<Flag> flags = EnumSet.noneOf(Flag.class);
+ for (Flag flag : values) {
+ if (random.nextBoolean()) {
+ flags.add(flag);
+ }
+ }
+
+
+ for (Flag flag : values) {
+ set(flag, builder, false); // clear all
+ }
+
+ for (Flag flag : flags) { // set the flags
+ set(flag, builder, true);
+ }
+ stats = builder.execute().actionGet();
+ for (Flag flag : flags) { // check the flags
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(true));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(true));
+ }
+
+ for (Flag flag : EnumSet.complementOf(flags)) { // check the complement
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(false));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(false));
+ }
+
+ }
+
+ @Test
+ public void testEncodeDecodeCommonStats() throws IOException {
+ CommonStatsFlags flags = new CommonStatsFlags();
+ Flag[] values = CommonStatsFlags.Flag.values();
+ assertThat(flags.anySet(), equalTo(true));
+
+ for (Flag flag : values) {
+ flags.set(flag, false);
+ }
+ assertThat(flags.anySet(), equalTo(false));
+ for (Flag flag : values) {
+ flags.set(flag, true);
+ }
+ assertThat(flags.anySet(), equalTo(true));
+ Random random = getRandom();
+ flags.set(values[random.nextInt(values.length)], false);
+ assertThat(flags.anySet(), equalTo(true));
+
+ {
+ BytesStreamOutput out = new BytesStreamOutput();
+ flags.writeTo(out);
+ out.close();
+ BytesReference bytes = out.bytes();
+ CommonStatsFlags readStats = CommonStatsFlags.readCommonStatsFlags(new BytesStreamInput(bytes));
+ for (Flag flag : values) {
+ assertThat(flags.isSet(flag), equalTo(readStats.isSet(flag)));
+ }
+ }
+
+ {
+ for (Flag flag : values) {
+ flags.set(flag, random.nextBoolean());
+ }
+ BytesStreamOutput out = new BytesStreamOutput();
+ flags.writeTo(out);
+ out.close();
+ BytesReference bytes = out.bytes();
+ CommonStatsFlags readStats = CommonStatsFlags.readCommonStatsFlags(new BytesStreamInput(bytes));
+ for (Flag flag : values) {
+ assertThat(flags.isSet(flag), equalTo(readStats.isSet(flag)));
+ }
+ }
+ }
+
+ @Test
+ public void testFlagOrdinalOrder() {
+ Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh,
+ Flag.FilterCache, Flag.IdCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments, Flag.Translog};
+
+ assertThat(flags.length, equalTo(Flag.values().length));
+ for (int i = 0; i < flags.length; i++) {
+ assertThat("ordinal has changed - this breaks the wire protocol. Only append to new values", i, equalTo(flags[i].ordinal()));
+ }
+ }
+
+ private static void set(Flag flag, IndicesStatsRequestBuilder builder, boolean set) {
+ switch (flag) {
+ case Docs:
+ builder.setDocs(set);
+ break;
+ case FieldData:
+ builder.setFieldData(set);
+ break;
+ case FilterCache:
+ builder.setFilterCache(set);
+ break;
+ case Flush:
+ builder.setFlush(set);
+ break;
+ case Get:
+ builder.setGet(set);
+ break;
+ case IdCache:
+ builder.setIdCache(set);
+ break;
+ case Indexing:
+ builder.setIndexing(set);
+ break;
+ case Merge:
+ builder.setMerge(set);
+ break;
+ case Refresh:
+ builder.setRefresh(set);
+ break;
+ case Search:
+ builder.setSearch(set);
+ break;
+ case Store:
+ builder.setStore(set);
+ break;
+ case Warmer:
+ builder.setWarmer(set);
+ break;
+ case Percolate:
+ builder.setPercolate(set);
+ break;
+ case Completion:
+ builder.setCompletion(set);
+ break;
+ case Segments:
+ builder.setSegments(set);
+ break;
+ case Translog:
+ builder.setTranslog(set);
+ break;
+ default:
+ fail("new flag? " + flag);
+ break;
+ }
+ }
+
+ private static boolean isSet(Flag flag, CommonStats response) {
+ switch (flag) {
+ case Docs:
+ return response.getDocs() != null;
+ case FieldData:
+ return response.getFieldData() != null;
+ case FilterCache:
+ return response.getFilterCache() != null;
+ case Flush:
+ return response.getFlush() != null;
+ case Get:
+ return response.getGet() != null;
+ case IdCache:
+ return response.getIdCache() != null;
+ case Indexing:
+ return response.getIndexing() != null;
+ case Merge:
+ return response.getMerge() != null;
+ case Refresh:
+ return response.getRefresh() != null;
+ case Search:
+ return response.getSearch() != null;
+ case Store:
+ return response.getStore() != null;
+ case Warmer:
+ return response.getWarmer() != null;
+ case Percolate:
+ return response.getPercolate() != null;
+ case Completion:
+ return response.getCompletion() != null;
+ case Segments:
+ return response.getSegments() != null;
+ case Translog:
+ return response.getTranslog() != null;
+ default:
+ fail("new flag? " + flag);
+ return false;
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java
new file mode 100644
index 0000000..e7e409e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster;
+import org.junit.Test;
+
+import java.io.File;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class IndicesStoreTests extends ElasticsearchIntegrationTest {
+ private static final Settings SETTINGS = settingsBuilder().put("gateway.type", "local").build();
+
+ @Test
+ public void shardsCleanup() throws Exception {
+ final String node_1 = cluster().startNode(SETTINGS);
+ final String node_2 = cluster().startNode(SETTINGS);
+ logger.info("--> creating index [test] with one shard and on replica");
+ client().admin().indices().create(createIndexRequest("test")
+ .settings(settingsBuilder().put("index.numberOfReplicas", 1).put("index.numberOfShards", 1))).actionGet();
+
+ logger.info("--> running cluster_health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+
+ logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2");
+ assertThat(shardDirectory(node_1, "test", 0).exists(), equalTo(true));
+ assertThat(shardDirectory(node_2, "test", 0).exists(), equalTo(true));
+
+ logger.info("--> starting node server3");
+ String node_3 = cluster().startNode(SETTINGS);
+
+ logger.info("--> making sure that shard is not allocated on server3");
+ assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false));
+
+ File server2Shard = shardDirectory(node_2, "test", 0);
+ logger.info("--> stopping node node_2");
+ cluster().stopRandomNode(TestCluster.nameFilter(node_2));
+ assertThat(server2Shard.exists(), equalTo(true));
+
+ logger.info("--> running cluster_health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ logger.info("--> making sure that shard and its replica exist on server1, server2 and server3");
+ assertThat(shardDirectory(node_1, "test", 0).exists(), equalTo(true));
+ assertThat(server2Shard.exists(), equalTo(true));
+ assertThat(shardDirectory(node_3, "test", 0).exists(), equalTo(true));
+
+ logger.info("--> starting node node_4");
+ final String node_4 = cluster().startNode(SETTINGS);
+
+ logger.info("--> running cluster_health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ logger.info("--> making sure that shard and its replica are allocated on server1 and server3 but not on server2");
+ assertThat(shardDirectory(node_1, "test", 0).exists(), equalTo(true));
+ assertThat(shardDirectory(node_3, "test", 0).exists(), equalTo(true));
+ assertThat(waitForShardDeletion(node_4, "test", 0), equalTo(false));
+ }
+
+ private File shardDirectory(String server, String index, int shard) {
+ NodeEnvironment env = cluster().getInstance(NodeEnvironment.class, server);
+ return env.shardLocations(new ShardId(index, shard))[0];
+ }
+
+ private boolean waitForShardDeletion(final String server, final String index, final int shard) throws InterruptedException {
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ return !shardDirectory(server, index, shard).exists();
+ }
+ });
+ return shardDirectory(server, index, shard).exists();
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/store/SimpleDistributorTests.java b/src/test/java/org/elasticsearch/indices/store/SimpleDistributorTests.java
new file mode 100644
index 0000000..e4ec98d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/store/SimpleDistributorTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Set;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleDistributorTests extends ElasticsearchIntegrationTest {
+
+ public final static String[] STORE_TYPES = {"fs", "simplefs", "niofs", "mmapfs"};
+
+ @Test
+ public void testAvailableSpaceDetection() {
+ for (String store : STORE_TYPES) {
+ createIndexWithStoreType("test", store, StrictDistributor.class.getCanonicalName());
+ }
+ }
+
+ @Test
+ public void testDirectoryToString() throws IOException {
+ createIndexWithStoreType("test", "niofs", "least_used");
+ String storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ File[] dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(least_used[rate_limited(niofs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), rate_limited(niofs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(", type=MERGE, rate=20.0)])"));
+
+ createIndexWithStoreType("test", "niofs", "random");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(random[rate_limited(niofs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), rate_limited(niofs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(", type=MERGE, rate=20.0)])"));
+
+ createIndexWithStoreType("test", "mmapfs", "least_used");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(least_used[rate_limited(mmapfs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), rate_limited(mmapfs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(", type=MERGE, rate=20.0)])"));
+
+ createIndexWithStoreType("test", "simplefs", "least_used");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(least_used[rate_limited(simplefs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), rate_limited(simplefs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(", type=MERGE, rate=20.0)])"));
+
+ createIndexWithStoreType("test", "memory", "least_used");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString, equalTo("store(least_used[byte_buffer])"));
+
+ createIndexWithoutRateLimitingStoreType("test", "niofs", "least_used");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(least_used[niofs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), niofs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(")])"));
+ }
+
+ private void createIndexWithStoreType(String index, String storeType, String distributor) {
+ cluster().wipeIndices(index);
+ client().admin().indices().prepareCreate(index)
+ .setSettings(settingsBuilder()
+ .put("index.store.distributor", distributor)
+ .put("index.store.type", storeType)
+ .put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 1)
+ )
+ .execute().actionGet();
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet().isTimedOut(), equalTo(false));
+ }
+
+ private void createIndexWithoutRateLimitingStoreType(String index, String storeType, String distributor) {
+ cluster().wipeIndices(index);
+ client().admin().indices().prepareCreate(index)
+ .setSettings(settingsBuilder()
+ .put("index.store.distributor", distributor)
+ .put("index.store.type", storeType)
+ .put("index.store.throttle.type", "none")
+ .put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 1)
+ )
+ .execute().actionGet();
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet().isTimedOut(), equalTo(false));
+ }
+
+
+ private File[] dataPaths() {
+ Set<String> nodes = cluster().nodesInclude("test");
+ assertThat(nodes.isEmpty(), equalTo(false));
+ NodeEnvironment env = cluster().getInstance(NodeEnvironment.class, nodes.iterator().next());
+ return env.nodeDataLocations();
+ }
+
+ private Directory getStoreDirectory(String index, int shardId) {
+ Set<String> nodes = cluster().nodesInclude("test");
+ assertThat(nodes.isEmpty(), equalTo(false));
+ IndicesService indicesService = cluster().getInstance(IndicesService.class, nodes.iterator().next());
+ InternalIndexShard indexShard = (InternalIndexShard) (indicesService.indexService(index).shard(shardId));
+ return indexShard.store().directory();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/store/StrictDistributor.java b/src/test/java/org/elasticsearch/indices/store/StrictDistributor.java
new file mode 100644
index 0000000..726e0cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/store/StrictDistributor.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.distributor.AbstractDistributor;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.greaterThan;
+
+/**
+ *
+ */
+public class StrictDistributor extends AbstractDistributor {
+
+ @Inject
+ public StrictDistributor(DirectoryService directoryService) throws IOException {
+ super(directoryService);
+ }
+
+ @Override
+ public Directory doAny() {
+ for (Directory delegate : delegates) {
+ assertThat(getUsableSpace(delegate), greaterThan(0L));
+ }
+ return primary();
+ }
+
+ @Override
+ public String name() {
+ return "strict";
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java b/src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java
new file mode 100644
index 0000000..ddab5ea
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.template;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.google.common.base.Charsets;
+import com.google.common.io.Files;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=1)
+public class IndexTemplateFileLoadingTests extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ ImmutableSettings.Builder settingsBuilder = ImmutableSettings.settingsBuilder();
+ settingsBuilder.put(super.nodeSettings(nodeOrdinal));
+
+ try {
+ File directory = newTempDir(LifecycleScope.SUITE);
+ settingsBuilder.put("path.conf", directory.getPath());
+
+ File templatesDir = new File(directory + File.separator + "templates");
+ templatesDir.mkdir();
+
+ File dst = new File(templatesDir, "template.json");
+ String templatePath = "/org/elasticsearch/indices/template/template" + randomInt(5) + ".json";
+ logger.info("Picking template path [{}]", templatePath);
+ // random template, one uses the 'setting.index.number_of_shards', the other 'settings.number_of_shards'
+ String template = Streams.copyToStringFromClasspath(templatePath);
+ Files.write(template, dst, Charsets.UTF_8);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ return settingsBuilder.build();
+ }
+
+ @Test
+ public void testThatLoadingTemplateFromFileWorks() throws Exception {
+ final int iters = atLeast(5);
+ Set<String> indices = new HashSet<String>();
+ for (int i = 0; i < iters; i++) {
+ String indexName = "foo" + randomRealisticUnicodeOfLengthBetween(0, 5);
+ if (indices.contains(indexName)) {
+ continue;
+ }
+ indices.add(indexName);
+ createIndex(indexName);
+ ensureYellow(); // ensuring yellow so the test fails faster if the template cannot be loaded
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().setIndices(indexName).get();
+ assertThat(stateResponse.getState().getMetaData().indices().get(indexName).getNumberOfShards(), is(10));
+ assertThat(stateResponse.getState().getMetaData().indices().get(indexName).getNumberOfReplicas(), is(0));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java
new file mode 100644
index 0000000..3d41e4b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.template;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleIndexTemplateTests() throws Exception {
+ // clean all templates setup by the framework.
+ client().admin().indices().prepareDeleteTemplate("*").get();
+
+ // check get all templates on an empty index.
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), empty());
+
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("test*")
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ // test create param
+ assertThrows(client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("test*")
+ .setCreate(true)
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ , IndexTemplateAlreadyExistsException.class
+ );
+
+ response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(2));
+
+
+ // index something into test_index, will match on both templates
+ client().prepareIndex("test_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test_index")
+ .setQuery(termQuery("field1", "value1"))
+ .addField("field1").addField("field2")
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 1);
+ assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field("field2").value().toString(), equalTo("value 2")); // this will still be loaded because of the source feature
+
+ client().prepareIndex("text_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // now only match on one template (template_1)
+ searchResponse = client().prepareSearch("text_index")
+ .setQuery(termQuery("field1", "value1"))
+ .addField("field1").addField("field2")
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ assertHitCount(searchResponse, 1);
+ assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field("field2").value().toString(), equalTo("value 2"));
+ }
+
+ @Test
+ public void testDeleteIndexTemplate() throws Exception {
+ final int existingTemplates = admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size();
+ logger.info("--> put template_1 and template_2");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("test*")
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> explicitly delete template_1");
+ admin().indices().prepareDeleteTemplate("template_1").execute().actionGet();
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(1 + existingTemplates));
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().containsKey("template_2"), equalTo(true));
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().containsKey("template_1"), equalTo(false));
+
+
+ logger.info("--> put template_1 back");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> delete template*");
+ admin().indices().prepareDeleteTemplate("template*").execute().actionGet();
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(existingTemplates));
+
+ logger.info("--> delete * with no templates, make sure we don't get a failure");
+ admin().indices().prepareDeleteTemplate("*").execute().actionGet();
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(0));
+ }
+
+ @Test
+ public void testThatGetIndexTemplatesWorks() throws Exception {
+ logger.info("--> put template_1");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> get template template_1");
+ GetIndexTemplatesResponse getTemplate1Response = client().admin().indices().prepareGetTemplates("template_1").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(1));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0), is(notNullValue()));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0).getTemplate(), is("te*"));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0).getOrder(), is(0));
+
+ logger.info("--> get non-existing-template");
+ GetIndexTemplatesResponse getTemplate2Response = client().admin().indices().prepareGetTemplates("non-existing-template").execute().actionGet();
+ assertThat(getTemplate2Response.getIndexTemplates(), hasSize(0));
+ }
+
+ @Test
+ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception {
+ logger.info("--> put template_1");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> put template_2");
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> put template3");
+ client().admin().indices().preparePutTemplate("template3")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> get template template_*");
+ GetIndexTemplatesResponse getTemplate1Response = client().admin().indices().prepareGetTemplates("template_*").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2));
+
+ List<String> templateNames = Lists.newArrayList();
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(0).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(1).name());
+ assertThat(templateNames, containsInAnyOrder("template_1", "template_2"));
+
+ logger.info("--> get all templates");
+ getTemplate1Response = client().admin().indices().prepareGetTemplates("template*").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(3));
+
+ templateNames = Lists.newArrayList();
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(0).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(1).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(2).name());
+ assertThat(templateNames, containsInAnyOrder("template_1", "template_2", "template3"));
+
+ logger.info("--> get templates template_1 and template_2");
+ getTemplate1Response = client().admin().indices().prepareGetTemplates("template_1", "template_2").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2));
+
+ templateNames = Lists.newArrayList();
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(0).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(1).name());
+ assertThat(templateNames, containsInAnyOrder("template_1", "template_2"));
+ }
+
+ @Test
+ public void testThatInvalidGetIndexTemplatesFails() throws Exception {
+ logger.info("--> get template null");
+ testExpectActionRequestValidationException(null);
+
+ logger.info("--> get template empty");
+ testExpectActionRequestValidationException("");
+
+ logger.info("--> get template 'a', '', 'c'");
+ testExpectActionRequestValidationException("a", "", "c");
+
+ logger.info("--> get template 'a', null, 'c'");
+ testExpectActionRequestValidationException("a", null, "c");
+ }
+
+ private void testExpectActionRequestValidationException(String... names) {
+ assertThrows(client().admin().indices().prepareGetTemplates(names),
+ ActionRequestValidationException.class,
+ "get template with " + Arrays.toString(names));
+ }
+
+ @Test
+ public void testBrokenMapping() throws Exception {
+ // clean all templates setup by the framework.
+ client().admin().indices().prepareDeleteTemplate("*").get();
+
+ // check get all templates on an empty index.
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), empty());
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .addMapping("type1", "abcde")
+ .get();
+
+ response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(1));
+ assertThat(response.getIndexTemplates().get(0).getMappings().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getMappings().get("type1").string(), equalTo("abcde"));
+
+ try {
+ createIndex("test");
+ fail("create index should have failed due to broken index templates mapping");
+ } catch(ElasticsearchParseException e) {
+ //everything fine
+ }
+ }
+
+ @Test
+ public void testInvalidSettings() throws Exception {
+ // clean all templates setup by the framework.
+ client().admin().indices().prepareDeleteTemplate("*").get();
+
+ // check get all templates on an empty index.
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), empty());
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setSettings(ImmutableSettings.builder().put("does_not_exist", "test"))
+ .get();
+
+ response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(1));
+ assertThat(response.getIndexTemplates().get(0).getSettings().getAsMap().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getSettings().get("index.does_not_exist"), equalTo("test"));
+
+ createIndex("test");
+
+ //the wrong setting has no effect but does get stored among the index settings
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getIndexToSettings().get("test").getAsMap().get("index.does_not_exist"), equalTo("test"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/template/template0.json b/src/test/java/org/elasticsearch/indices/template/template0.json
new file mode 100644
index 0000000..3b2ace1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template0.json
@@ -0,0 +1,7 @@
+{
+ "template" : "foo*",
+ "settings" : {
+ "index.number_of_shards": 10,
+ "index.number_of_replicas": 0
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template1.json b/src/test/java/org/elasticsearch/indices/template/template1.json
new file mode 100644
index 0000000..f918668
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template1.json
@@ -0,0 +1,7 @@
+{
+ "template" : "foo*",
+ "settings" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template2.json b/src/test/java/org/elasticsearch/indices/template/template2.json
new file mode 100644
index 0000000..c48169f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template2.json
@@ -0,0 +1,9 @@
+{
+ "template" : "foo*",
+ "settings" : {
+ "index" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template3.json b/src/test/java/org/elasticsearch/indices/template/template3.json
new file mode 100644
index 0000000..3114cd6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template3.json
@@ -0,0 +1,9 @@
+{
+ "mytemplate" : {
+ "template" : "foo*",
+ "settings" : {
+ "index.number_of_shards": 10,
+ "index.number_of_replicas": 0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template4.json b/src/test/java/org/elasticsearch/indices/template/template4.json
new file mode 100644
index 0000000..674f631
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template4.json
@@ -0,0 +1,9 @@
+{
+ "mytemplate" : {
+ "template" : "foo*",
+ "settings" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template5.json b/src/test/java/org/elasticsearch/indices/template/template5.json
new file mode 100644
index 0000000..c8192c2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template5.json
@@ -0,0 +1,11 @@
+{
+ "mytemplate" : {
+ "template" : "foo*",
+ "settings" : {
+ "index" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/warmer/LocalGatewayIndicesWarmerTests.java b/src/test/java/org/elasticsearch/indices/warmer/LocalGatewayIndicesWarmerTests.java
new file mode 100644
index 0000000..5a056c5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/warmer/LocalGatewayIndicesWarmerTests.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.warmer;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster.RestartCallback;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(numNodes=0, scope=Scope.TEST)
+public class LocalGatewayIndicesWarmerTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(LocalGatewayIndicesWarmerTests.class);
+
+ @Test
+ public void testStatePersistence() throws Exception {
+
+ logger.info("--> starting 1 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+
+ logger.info("--> putting two templates");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_1")
+ .setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value1")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+ putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_2")
+ .setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value2")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> put template with warmer");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setSource("{\n" +
+ " \"template\" : \"xxx\",\n" +
+ " \"warmers\" : {\n" +
+ " \"warmer_1\" : {\n" +
+ " \"types\" : [],\n" +
+ " \"source\" : {\n" +
+ " \"query\" : {\n" +
+ " \"match_all\" : {}\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}")
+ .execute().actionGet();
+
+
+ logger.info("--> verify warmers are registered in cluster state");
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(2));
+
+ IndexWarmersMetaData templateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
+ assertThat(templateWarmers, Matchers.notNullValue());
+ assertThat(templateWarmers.entries().size(), equalTo(1));
+
+ logger.info("--> restarting the node");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.type", "local").build();
+ }
+ });
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify warmers are recovered");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
+ for (int i = 0; i < warmersMetaData.entries().size(); i++) {
+ assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
+ assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
+ }
+
+ logger.info("--> verify warmers in template are recovered");
+ IndexWarmersMetaData recoveredTemplateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
+ assertThat(recoveredTemplateWarmers.entries().size(), equalTo(templateWarmers.entries().size()));
+ for (int i = 0; i < templateWarmers.entries().size(); i++) {
+ assertThat(recoveredTemplateWarmers.entries().get(i).name(), equalTo(templateWarmers.entries().get(i).name()));
+ assertThat(recoveredTemplateWarmers.entries().get(i).source(), equalTo(templateWarmers.entries().get(i).source()));
+ }
+
+
+ logger.info("--> delete warmer warmer_1");
+ DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("warmer_1").execute().actionGet();
+ assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> verify warmers (delete) are registered in cluster state");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(1));
+
+ logger.info("--> restarting the node");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.type", "local").build();
+ }
+ });
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify warmers are recovered");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
+ for (int i = 0; i < warmersMetaData.entries().size(); i++) {
+ assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
+ assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java
new file mode 100644
index 0000000..fc1d7a1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java
@@ -0,0 +1,352 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.warmer;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.segments.IndexSegments;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.engine.Segment;
+import org.elasticsearch.index.mapper.FieldMapper.Loading;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.warmer.IndexWarmerMissingException;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SimpleIndicesWarmerTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void simpleWarmerTests() {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_1")
+ .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.termQuery("field", "value1")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+ putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_2")
+ .setSearchRequest(client().prepareSearch("test").setTypes("a2").setQuery(QueryBuilders.termQuery("field", "value2")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+
+ GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("tes*")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_*")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_1")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_2")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a*").addWarmers("warmer_2")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a1").addWarmers("warmer_2")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(0));
+ }
+
+ @Test
+ public void templateWarmer() {
+ client().admin().indices().preparePutTemplate("template_1")
+ .setSource("{\n" +
+ " \"template\" : \"*\",\n" +
+ " \"warmers\" : {\n" +
+ " \"warmer_1\" : {\n" +
+ " \"types\" : [],\n" +
+ " \"source\" : {\n" +
+ " \"query\" : {\n" +
+ " \"match_all\" : {}\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}")
+ .execute().actionGet();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+ ensureGreen();
+
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+ }
+
+ @Test
+ public void createIndexWarmer() {
+ client().admin().indices().prepareCreate("test")
+ .setSource("{\n" +
+ " \"settings\" : {\n" +
+ " \"index.number_of_shards\" : 1\n" +
+ " },\n" +
+ " \"warmers\" : {\n" +
+ " \"warmer_1\" : {\n" +
+ " \"types\" : [],\n" +
+ " \"source\" : {\n" +
+ " \"query\" : {\n" +
+ " \"match_all\" : {}\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}")
+ .execute().actionGet();
+
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+ }
+
+ @Test
+ public void deleteNonExistentIndexWarmerTest() {
+ createIndex("test");
+
+ try {
+ client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("foo").execute().actionGet(1000);
+ fail("warmer foo should not exist");
+ } catch (IndexWarmerMissingException ex) {
+ assertThat(ex.names()[0], equalTo("foo"));
+ }
+ }
+
+ @Test
+ public void deleteIndexWarmerTest() {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .get();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(1));
+ ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
+ assertThat(entry.key, equalTo("test"));
+ assertThat(entry.value.size(), equalTo(1));
+ assertThat(entry.value.iterator().next().name(), equalTo("custom_warmer"));
+
+ DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").get();
+ assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(0));
+ }
+
+ @Test // issue 3246
+ public void ensureThatIndexWarmersCanBeChangedOnRuntime() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1, "index.number_of_replicas", 0))
+ .execute().actionGet();
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ client().prepareIndex("test", "test", "1").setSource("foo", "bar").setRefresh(true).execute().actionGet();
+
+ logger.info("--> Disabling warmers execution");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put("index.warmer.enabled", false)).execute().actionGet();
+
+ long warmerRunsAfterDisabling = getWarmerRuns();
+ assertThat(warmerRunsAfterDisabling, greaterThanOrEqualTo(1L));
+
+ client().prepareIndex("test", "test", "2").setSource("foo2", "bar2").setRefresh(true).execute().actionGet();
+
+ assertThat(getWarmerRuns(), equalTo(warmerRunsAfterDisabling));
+ }
+
+ @Test
+ public void gettingAllWarmersUsingAllAndWildcardsShouldWork() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1, "index.number_of_replicas", 0))
+ .execute().actionGet();
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ PutWarmerResponse anotherPutWarmerResponse = client().admin().indices().preparePutWarmer("second_custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(anotherPutWarmerResponse.isAcknowledged(), equalTo(true));
+
+ GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("*").addWarmers("*").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("_all").addWarmers("_all").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("t*").addWarmers("c*").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("custom_warmer", "second_custom_warmer").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+ }
+
+ private long getWarmerRuns() {
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setWarmer(true).execute().actionGet();
+ return indicesStatsResponse.getIndex("test").getPrimaries().warmer.total();
+ }
+
+ private long getSegmentsMemoryUsage(String idx) {
+ IndicesSegmentResponse response = client().admin().indices().segments(Requests.indicesSegmentsRequest("idx")).actionGet();
+ IndexSegments indicesSegments = response.getIndices().get(idx);
+ long total = 0;
+ for (IndexShardSegments indexShardSegments : indicesSegments) {
+ for (ShardSegments shardSegments : indexShardSegments) {
+ for (Segment segment : shardSegments) {
+ System.out.println("+=" + segment.memoryInBytes + " " + indexShardSegments.getShardId() + " " + shardSegments.getIndex());
+ total += segment.memoryInBytes;
+ }
+ }
+ }
+ return total;
+ }
+
+ private enum LoadingMethod {
+ LAZY {
+ @Override
+ void createIndex(String indexName, String type, String fieldName) {
+ client().admin().indices().prepareCreate(indexName).setSettings(ImmutableSettings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE)).execute().actionGet();
+ }
+ },
+ EAGER {
+ @Override
+ void createIndex(String indexName, String type, String fieldName) {
+ client().admin().indices().prepareCreate(indexName).setSettings(ImmutableSettings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.EAGER_VALUE)).execute().actionGet();
+ }
+ @Override
+ boolean isLazy() {
+ return false;
+ }
+ },
+ EAGER_PER_FIELD {
+ @Override
+ void createIndex(String indexName, String type, String fieldName) throws Exception {
+ client().admin().indices().prepareCreate(indexName).setSettings(ImmutableSettings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE)).addMapping(type, JsonXContent.contentBuilder()
+ .startObject(type)
+ .startObject("properties")
+ .startObject(fieldName)
+ .field("type", "string")
+ .startObject("norms")
+ .field("loading", Loading.EAGER_VALUE)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ }
+ @Override
+ boolean isLazy() {
+ return false;
+ }
+ };
+ private static Settings SINGLE_SHARD_NO_REPLICA = ImmutableSettings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build();
+ abstract void createIndex(String indexName, String type, String fieldName) throws Exception;
+ boolean isLazy() {
+ return true;
+ }
+ }
+
+ static {
+ assertTrue("remove me when LUCENE-5373 is fixed", Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_46);
+ }
+
+ @Ignore("enable me when LUCENE-5373 is fixed, see assertion above")
+ public void testEagerLoading() throws Exception {
+ for (LoadingMethod method : LoadingMethod.values()) {
+ System.out.println("METHOD " + method);
+ method.createIndex("idx", "t", "foo");
+ client().prepareIndex("idx", "t", "1").setSource("foo", "bar").setRefresh(true).execute().actionGet();
+ long memoryUsage0 = getSegmentsMemoryUsage("idx");
+ // queries load norms if they were not loaded before
+ client().prepareSearch("idx").setQuery(QueryBuilders.matchQuery("foo", "bar")).execute().actionGet();
+ long memoryUsage1 = getSegmentsMemoryUsage("idx");
+ if (method.isLazy()) {
+ assertThat(memoryUsage1, greaterThan(memoryUsage0));
+ } else {
+ assertThat(memoryUsage1, equalTo(memoryUsage0));
+ }
+ cluster().wipeIndices("idx");
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java b/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java
new file mode 100644
index 0000000..4d671bb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.mget;
+
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetRequestBuilder;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleMgetTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException {
+ createIndex("test");
+ ensureYellow();
+
+ client().prepareIndex("test", "test", "1").setSource(jsonBuilder().startObject().field("foo", "bar").endObject()).setRefresh(true).execute().actionGet();
+
+ MultiGetResponse mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "test", "1"))
+ .add(new MultiGetRequest.Item("nonExistingIndex", "test", "1"))
+ .execute().actionGet();
+ assertThat(mgetResponse.getResponses().length, is(2));
+
+ assertThat(mgetResponse.getResponses()[0].getIndex(), is("test"));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(false));
+
+ assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex"));
+ assertThat(mgetResponse.getResponses()[1].isFailed(), is(true));
+ assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), is("[nonExistingIndex] missing"));
+
+
+ mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("nonExistingIndex", "test", "1"))
+ .execute().actionGet();
+ assertThat(mgetResponse.getResponses().length, is(1));
+ assertThat(mgetResponse.getResponses()[0].getIndex(), is("nonExistingIndex"));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(true));
+ assertThat(mgetResponse.getResponses()[0].getFailure().getMessage(), is("[nonExistingIndex] missing"));
+
+ }
+
+ @Test
+ public void testThatParentPerDocumentIsSupported() throws Exception {
+ createIndex("test");
+ ensureYellow();
+ client().admin().indices().preparePutMapping("test").setType("test").setSource(jsonBuilder()
+ .startObject()
+ .startObject("test")
+ .startObject("_parent")
+ .field("type", "foo")
+ .endObject()
+ .endObject()
+ .endObject()
+ ).get();
+
+ client().prepareIndex("test", "test", "1").setParent("4").setRefresh(true)
+ .setSource(jsonBuilder().startObject().field("foo", "bar").endObject())
+ .execute().actionGet();
+
+ MultiGetResponse mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "test", "1").parent("4"))
+ .add(new MultiGetRequest.Item("test", "test", "1"))
+ .execute().actionGet();
+
+ assertThat(mgetResponse.getResponses().length, is(2));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(false));
+ assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true));
+
+ assertThat(mgetResponse.getResponses()[1].isFailed(), is(true));
+ assertThat(mgetResponse.getResponses()[1].getResponse(), nullValue());
+ assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testThatSourceFilteringIsSupported() throws Exception {
+ createIndex("test");
+ ensureYellow();
+ BytesReference sourceBytesRef = jsonBuilder().startObject()
+ .field("field", "1", "2")
+ .startObject("included").field("field", "should be seen").field("hidden_field", "should not be seen").endObject()
+ .field("excluded", "should not be seen")
+ .endObject().bytes();
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource(sourceBytesRef).get();
+ }
+
+ MultiGetRequestBuilder request = client().prepareMultiGet();
+ for (int i = 0; i < 100; i++) {
+ if (i % 2 == 0) {
+ request.add(new MultiGetRequest.Item("test", "type", Integer.toString(i)).fetchSourceContext(new FetchSourceContext("included", "*.hidden_field")));
+ } else {
+ request.add(new MultiGetRequest.Item("test", "type", Integer.toString(i)).fetchSourceContext(new FetchSourceContext(false)));
+ }
+ }
+
+ MultiGetResponse response = request.get();
+
+ assertThat(response.getResponses().length, equalTo(100));
+ for (int i = 0; i < 100; i++) {
+ MultiGetItemResponse responseItem = response.getResponses()[i];
+ if (i % 2 == 0) {
+ Map<String, Object> source = responseItem.getResponse().getSourceAsMap();
+ assertThat(source.size(), equalTo(1));
+ assertThat(source, hasKey("included"));
+ assertThat(((Map<String, Object>) source.get("included")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) source.get("included")), hasKey("field"));
+ } else {
+ assertThat(responseItem.getResponse().getSourceAsBytes(), nullValue());
+ }
+ }
+
+
+ }
+
+ @Test
+ public void testThatRoutingPerDocumentIsSupported() throws Exception {
+ createIndex("test");
+ ensureYellow();
+
+ client().prepareIndex("test", "test", "1").setRefresh(true).setRouting("bar")
+ .setSource(jsonBuilder().startObject().field("foo", "bar").endObject())
+ .execute().actionGet();
+
+ MultiGetResponse mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "test", "1").routing("bar"))
+ .add(new MultiGetRequest.Item("test", "test", "1"))
+ .execute().actionGet();
+
+ assertThat(mgetResponse.getResponses().length, is(2));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(false));
+ assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true));
+
+ assertThat(mgetResponse.getResponses()[1].isFailed(), is(false));
+ assertThat(mgetResponse.getResponses()[1].getResponse().isExists(), is(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java b/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java
new file mode 100644
index 0000000..40ad9ee
--- /dev/null
+++ b/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.mlt;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisFieldQuery;
+import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleMoreLikeThis() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test").addMapping("type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("text", "lucene").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running moreLikeThis");
+ SearchResponse mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 1l);
+ }
+
+
+ @Test
+ public void testSimpleMoreLikeOnLongField() throws Exception {
+ logger.info("Creating index test");
+ createIndex("test");
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("some_long", 1367484649580l).endObject())).actionGet();
+ client().index(indexRequest("test").type("type2").id("2").source(jsonBuilder().startObject().field("some_long", 0).endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("3").source(jsonBuilder().startObject().field("some_long", -666).endObject())).actionGet();
+
+
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running moreLikeThis");
+ SearchResponse mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 0l);
+ }
+
+
+ @Test
+ public void testMoreLikeThisWithAliases() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test").addMapping("type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+ logger.info("Creating aliases alias release");
+ client().admin().indices().aliases(indexAliasesRequest().addAlias("release", termFilter("text", "release"), "test")).actionGet();
+ client().admin().indices().aliases(indexAliasesRequest().addAlias("beta", termFilter("text", "beta"), "test")).actionGet();
+
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("text", "lucene beta").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("3").source(jsonBuilder().startObject().field("text", "elasticsearch beta").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("4").source(jsonBuilder().startObject().field("text", "elasticsearch release").endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running moreLikeThis on index");
+ SearchResponse mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 2l);
+
+ logger.info("Running moreLikeThis on beta shard");
+ mltResponse = client().moreLikeThis(moreLikeThisRequest("beta").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 1l);
+ assertThat(mltResponse.getHits().getAt(0).id(), equalTo("3"));
+
+ logger.info("Running moreLikeThis on release shard");
+ mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1).searchIndices("release")).actionGet();
+ assertHitCount(mltResponse, 1l);
+ assertThat(mltResponse.getHits().getAt(0).id(), equalTo("2"));
+
+ logger.info("Running moreLikeThis on alias with node client");
+ mltResponse = cluster().clientNodeClient().moreLikeThis(moreLikeThisRequest("beta").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 1l);
+ assertThat(mltResponse.getHits().getAt(0).id(), equalTo("3"));
+
+ }
+
+ @Test
+ public void testMoreLikeThisIssue2197() throws Exception {
+ Client client = client();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("bar")
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("foo").addMapping("bar", mapping).execute().actionGet();
+ client().prepareIndex("foo", "bar", "1")
+ .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("foo").execute().actionGet();
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ SearchResponse searchResponse = client().prepareMoreLikeThis("foo", "bar", "1").execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse, notNullValue());
+ searchResponse = client.prepareMoreLikeThis("foo", "bar", "1").execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse, notNullValue());
+ }
+
+ @Test
+ // See: https://github.com/elasticsearch/elasticsearch/issues/2489
+ public void testMoreLikeWithCustomRouting() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("bar")
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("foo").addMapping("bar", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("foo", "bar", "1")
+ .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject())
+ .setRouting("2")
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("foo").execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareMoreLikeThis("foo", "bar", "1").setRouting("2").execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse, notNullValue());
+ }
+
+ @Test
+ // See issue: https://github.com/elasticsearch/elasticsearch/issues/3039
+ public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("bar")
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ prepareCreate("foo", 2, ImmutableSettings.builder().put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 2))
+ .addMapping("bar", mapping)
+ .execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("foo", "bar", "1")
+ .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject())
+ .setRouting("4000")
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("foo").execute().actionGet();
+ SearchResponse searchResponse = client().prepareMoreLikeThis("foo", "bar", "1").setRouting("4000").execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse, notNullValue());
+ }
+
+ @Test
+ // See issue https://github.com/elasticsearch/elasticsearch/issues/3252
+ public void testNumericField() throws Exception {
+ prepareCreate("test").addMapping("type", jsonBuilder()
+ .startObject().startObject("type")
+ .startObject("properties")
+ .startObject("int_value").field("type", randomNumericType(getRandom())).endObject()
+ .startObject("string_value").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("string_value", "lucene index").field("int_value", 1).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", "type", "2")
+ .setSource(jsonBuilder().startObject().field("string_value", "elasticsearch index").field("int_value", 42).endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ // Implicit list of fields -> ignore numeric fields
+ SearchResponse searchResponse = client().prepareMoreLikeThis("test", "type", "1").setMinDocFreq(1).setMinTermFreq(1).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ // Explicit list of fields including numeric fields -> fail
+ assertThrows(client().prepareMoreLikeThis("test", "type", "1").setField("string_value", "int_value"), SearchPhaseExecutionException.class);
+
+ // mlt query with no field -> OK
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery().likeText("index").minTermFreq(1).minDocFreq(1)).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+
+ // mlt query with string fields
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery("string_value").likeText("index").minTermFreq(1).minDocFreq(1)).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+
+ // mlt query with at least a numeric field -> fail by default
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisQuery("string_value", "int_value").likeText("index")), SearchPhaseExecutionException.class);
+
+ // mlt query with at least a numeric field -> fail by command
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisQuery("string_value", "int_value").likeText("index").failOnUnsupportedField(true)), SearchPhaseExecutionException.class);
+
+
+ // mlt query with at least a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery("string_value", "int_value").likeText("index").minTermFreq(1).minDocFreq(1).failOnUnsupportedField(false)).get();
+ assertHitCount(searchResponse, 2l);
+
+ // mlt field query on a numeric field -> failure by default
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisFieldQuery("int_value").likeText("42").minTermFreq(1).minDocFreq(1)), SearchPhaseExecutionException.class);
+
+ // mlt field query on a numeric field -> failure by command
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisFieldQuery("int_value").likeText("42").minTermFreq(1).minDocFreq(1).failOnUnsupportedField(true)),
+ SearchPhaseExecutionException.class);
+
+ // mlt field query on a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisFieldQuery("int_value").likeText("42").minTermFreq(1).minDocFreq(1).failOnUnsupportedField(false)).execute().actionGet();
+ assertHitCount(searchResponse, 0l);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java
new file mode 100644
index 0000000..c286e92
--- /dev/null
+++ b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java
@@ -0,0 +1,1229 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nested;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.search.facet.filter.FilterFacet;
+import org.elasticsearch.search.facet.statistical.StatisticalFacet;
+import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleNestedTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleNested() throws Exception {
+ XContentBuilder builder = jsonBuilder().
+ startObject().
+ field("type1").
+ startObject().
+ field("properties").
+ startObject().
+ field("nested1").
+ startObject().
+ field("type").
+ value("nested").
+ endObject().
+ endObject().
+ endObject().
+ endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", builder));
+ ensureGreen();
+
+ // check on no data, see it works
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(termQuery("_all", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("nested1.n_field1", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getSourceAsBytes(), notNullValue());
+
+ // check the numDocs
+ IndicesStatusResponse statusResponse = admin().indices().prepareStatus().get();
+ assertNoFailures(statusResponse);
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(3l));
+
+ // check that _all is working on nested docs
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("_all", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("nested1.n_field1", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ // search for something that matches the nested doc, and see that we don't find the nested doc
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("nested1.n_field1", "n_value1_1")).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ // now, do a nested query
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // add another doc, one that would match if it was not nested...
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ statusResponse = client().admin().indices().prepareStatus().get();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(6l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // filter
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), nestedFilter("nested1",
+ boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // check with type prefix
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("type1.nested1",
+ boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // check delete, so all is gone...
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "2").execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(3l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void simpleNestedDeletedByQuery1() throws Exception {
+ simpleNestedDeleteByQuery(3, 0);
+ }
+
+ @Test
+ public void simpleNestedDeletedByQuery2() throws Exception {
+ simpleNestedDeleteByQuery(3, 1);
+ }
+
+ @Test
+ public void simpleNestedDeletedByQuery3() throws Exception {
+ simpleNestedDeleteByQuery(3, 2);
+ }
+
+ private void simpleNestedDeleteByQuery(int total, int docToDelete) throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.referesh_interval", -1).build())
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ ensureGreen();
+
+ for (int i = 0; i < total; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ }
+
+
+ flush();
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(total * 3l));
+
+ client().prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet();
+ flush();
+ refresh();
+ statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo((total * 3l) - 3));
+
+ for (int i = 0; i < total; i++) {
+ assertThat(client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().isExists(), equalTo(i != docToDelete));
+ }
+ }
+
+ @Test
+ public void noChildrenNestedDeletedByQuery1() throws Exception {
+ noChildrenNestedDeleteByQuery(3, 0);
+ }
+
+ @Test
+ public void noChildrenNestedDeletedByQuery2() throws Exception {
+ noChildrenNestedDeleteByQuery(3, 1);
+ }
+
+ @Test
+ public void noChildrenNestedDeletedByQuery3() throws Exception {
+ noChildrenNestedDeleteByQuery(3, 2);
+ }
+
+ private void noChildrenNestedDeleteByQuery(long total, int docToDelete) throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.referesh_interval", -1).build())
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+
+ for (int i = 0; i < total; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .endObject()).execute().actionGet();
+ }
+
+
+ flush();
+ refresh();
+
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(total));
+
+ client().prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet();
+ flush();
+ refresh();
+ statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo((total) - 1));
+
+ for (int i = 0; i < total; i++) {
+ assertThat(client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().isExists(), equalTo(i != docToDelete));
+ }
+ }
+
+ @Test
+ public void multiNested() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ // check the numDocs
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(7l));
+
+ // do some multi nested queries
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ termQuery("nested1.field1", "1"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1.nested2",
+ termQuery("nested1.nested2.field2", "2"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "3"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "4"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "4")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "4")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testFacetsSingleShard() throws Exception {
+ testFacets(1);
+ }
+
+ @Test
+ public void testFacetsMultiShards() throws Exception {
+ testFacets(3);
+ }
+
+ private void testFacets(int numberOfShards) throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", numberOfShards))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested")
+ .startObject("properties")
+ .startObject("field2_1").field("type", "string").endObject()
+ .startObject("field2_2").field("type", "long").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1_1", "1").startArray("nested2").startObject().field("field2_1", "blue").field("field2_2", 5).endObject().startObject().field("field2_1", "yellow").field("field2_2", 3).endObject().endArray().endObject()
+ .startObject().field("field1_1", "4").startArray("nested2").startObject().field("field2_1", "green").field("field2_2", 6).endObject().startObject().field("field2_1", "blue").field("field2_2", 1).endObject().endArray().endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1_1", "2").startArray("nested2").startObject().field("field2_1", "yellow").field("field2_2", 10).endObject().startObject().field("field2_1", "green").field("field2_2", 8).endObject().endArray().endObject()
+ .startObject().field("field1_1", "1").startArray("nested2").startObject().field("field2_1", "blue").field("field2_2", 2).endObject().startObject().field("field2_1", "red").field("field2_2", 12).endObject().endArray().endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addFacet(FacetBuilders.termsStatsFacet("facet1").keyField("nested1.nested2.field2_1").valueField("nested1.nested2.field2_2").nested("nested1.nested2"))
+ .addFacet(FacetBuilders.statisticalFacet("facet2").field("field2_2").nested("nested1.nested2"))
+ .addFacet(FacetBuilders.statisticalFacet("facet2_blue").field("field2_2").nested("nested1.nested2")
+ .facetFilter(boolFilter().must(termFilter("field2_1", "blue"))))
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+
+ TermsStatsFacet termsStatsFacet = searchResponse.getFacets().facet("facet1");
+ assertThat(termsStatsFacet.getEntries().size(), equalTo(4));
+ assertThat(termsStatsFacet.getEntries().get(0).getTerm().string(), equalTo("blue"));
+ assertThat(termsStatsFacet.getEntries().get(0).getCount(), equalTo(3l));
+ assertThat(termsStatsFacet.getEntries().get(0).getTotal(), equalTo(8d));
+ assertThat(termsStatsFacet.getEntries().get(1).getTerm().string(), equalTo("yellow"));
+ assertThat(termsStatsFacet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(termsStatsFacet.getEntries().get(1).getTotal(), equalTo(13d));
+ assertThat(termsStatsFacet.getEntries().get(2).getTerm().string(), equalTo("green"));
+ assertThat(termsStatsFacet.getEntries().get(2).getCount(), equalTo(2l));
+ assertThat(termsStatsFacet.getEntries().get(2).getTotal(), equalTo(14d));
+ assertThat(termsStatsFacet.getEntries().get(3).getTerm().string(), equalTo("red"));
+ assertThat(termsStatsFacet.getEntries().get(3).getCount(), equalTo(1l));
+ assertThat(termsStatsFacet.getEntries().get(3).getTotal(), equalTo(12d));
+
+ StatisticalFacet statsFacet = searchResponse.getFacets().facet("facet2");
+ assertThat(statsFacet.getCount(), equalTo(8l));
+ assertThat(statsFacet.getMin(), equalTo(1d));
+ assertThat(statsFacet.getMax(), equalTo(12d));
+ assertThat(statsFacet.getTotal(), equalTo(47d));
+
+ StatisticalFacet blueFacet = searchResponse.getFacets().facet("facet2_blue");
+ assertThat(blueFacet.getCount(), equalTo(3l));
+ assertThat(blueFacet.getMin(), equalTo(1d));
+ assertThat(blueFacet.getMax(), equalTo(5d));
+ assertThat(blueFacet.getTotal(), equalTo(8d));
+
+ // test scope ones (collector based)
+ searchResponse = client().prepareSearch("test")
+ .setQuery(
+ nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2_1", "blue"))
+ )
+ .addFacet(
+ FacetBuilders.termsStatsFacet("facet1")
+ .keyField("nested1.nested2.field2_1")
+ .valueField("nested1.nested2.field2_2")
+ .nested("nested1.nested2")
+ // Maybe remove the `join` option?
+ // The following also works:
+ // .facetFilter(termFilter("nested1.nested2.field2_1", "blue"))
+ .facetFilter(nestedFilter("nested1.nested2", termFilter("nested1.nested2.field2_1", "blue")).join(false))
+ )
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+
+ termsStatsFacet = searchResponse.getFacets().facet("facet1");
+ assertThat(termsStatsFacet.getEntries().size(), equalTo(1));
+ assertThat(termsStatsFacet.getEntries().get(0).getTerm().string(), equalTo("blue"));
+ assertThat(termsStatsFacet.getEntries().get(0).getCount(), equalTo(3l));
+ assertThat(termsStatsFacet.getEntries().get(0).getTotal(), equalTo(8d));
+
+ // test scope ones (post based)
+ searchResponse = client().prepareSearch("test")
+ .setQuery(
+ nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2_1", "blue"))
+ )
+ .addFacet(
+ FacetBuilders.filterFacet("facet1")
+ .global(true)
+ .filter(rangeFilter("nested1.nested2.field2_2").gte(0).lte(2))
+ .nested("nested1.nested2")
+ .facetFilter(nestedFilter("nested1.nested2", termFilter("nested1.nested2.field2_1", "blue")).join(false))
+ )
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+
+ FilterFacet filterFacet = searchResponse.getFacets().facet("facet1");
+ assertThat(filterFacet.getCount(), equalTo(2l));
+ assertThat(filterFacet.getName(), equalTo("facet1"));
+ }
+
+ @Test
+ // When IncludeNestedDocsQuery is wrapped in a FilteredQuery then a in-finite loop occurs b/c of a bug in IncludeNestedDocsQuery#advance()
+ // This IncludeNestedDocsQuery also needs to be aware of the filter from alias
+ public void testDeleteNestedDocsWithAlias() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.referesh_interval", -1).build())
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareAliases()
+ .addAlias("test", "alias1", FilterBuilders.termFilter("field1", "value1")).execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", "value2")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ flush();
+ refresh();
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(6l));
+
+ client().prepareDeleteByQuery("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ flush();
+ refresh();
+ statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+
+ // This must be 3, otherwise child docs aren't deleted.
+ // If this is 5 then only the parent has been removed
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(3l));
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testExplain() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1")
+ .endObject()
+ .endArray()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1")).scoreMode("total"))
+ .setExplain(true)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ Explanation explanation = searchResponse.getHits().hits()[0].explanation();
+ assertThat(explanation.getValue(), equalTo(2f));
+ assertThat(explanation.getDescription(), equalTo("Score based on child doc range from 0 to 1"));
+ // TODO: Enable when changes from BlockJoinQuery#explain are added to Lucene (Most likely version 4.2)
+// assertThat(explanation.getDetails().length, equalTo(2));
+// assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+// assertThat(explanation.getDetails()[0].getDescription(), equalTo("Child[0]"));
+// assertThat(explanation.getDetails()[1].getValue(), equalTo(1f));
+// assertThat(explanation.getDetails()[1].getDescription(), equalTo("Child[1]"));
+ }
+
+ @Test
+ public void testSimpleNestedSorting() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.refresh_interval", -1)
+ .build()
+ )
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "long")
+ .field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", 1)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 5)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", 2)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 1)
+ .endObject()
+ .startObject()
+ .field("field1", 2)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("field1", 3)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 3)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("4"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value + 1", "number").setNestedPath("nested1").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("6.0"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("5.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("3.0"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value + 1", "number").setNestedPath("nested1").sortMode("sum").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ // B/c of sum it is actually +2
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("11.0"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("9.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("5.0"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "number")
+ .setNestedFilter(rangeFilter("nested1.field1").from(1).to(3))
+ .setNestedPath("nested1").sortMode("avg").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo(Double.toString(Double.MAX_VALUE)));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("1.5"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "string")
+ .setNestedPath("nested1").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "string")
+ .setNestedPath("nested1").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("4"));
+
+ try {
+ client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "string")
+ .setNestedPath("nested1").sortMode("sum").order(SortOrder.ASC))
+ .execute().actionGet();
+ Assert.fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.getMessage(), containsString("type [string] doesn't support mode [SUM]"));
+ }
+ }
+
+ @Test
+ public void testSimpleNestedSorting_withNestedFilterMissing() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.referesh_interval", -1)
+ .build()
+ )
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", 1)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 5)
+ .field("field2", true)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .field("field2", true)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", 2)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 1)
+ .field("field2", true)
+ .endObject()
+ .startObject()
+ .field("field1", 2)
+ .field("field2", true)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ // Doc with missing nested docs if nested filter is used
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("field1", 3)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 3)
+ .field("field2", false)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .field("field2", false)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").setNestedFilter(termFilter("nested1.field2", true)).missing(10).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("10"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").setNestedFilter(termFilter("nested1.field2", true)).missing(10).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+ }
+
+ @Test
+ public void testSortNestedWithNestedFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("grand_parent_values").field("type", "long").endObject()
+ .startObject("parent").field("type", "nested")
+ .startObject("properties")
+ .startObject("parent_values").field("type", "long").endObject()
+ .startObject("child").field("type", "nested")
+ .startObject("properties")
+ .startObject("child_values").field("type", "long").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // sum: 11
+ client().prepareIndex("test", "type1", Integer.toString(1)).setSource(jsonBuilder().startObject()
+ .field("grand_parent_values", 1l)
+ .startObject("parent")
+ .field("filter", false)
+ .field("parent_values", 1l)
+ .startObject("child")
+ .field("filter", true)
+ .field("child_values", 1l)
+ .startObject("child_obj")
+ .field("value", 1l)
+ .endObject()
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 6l)
+ .endObject()
+ .endObject()
+ .startObject("parent")
+ .field("filter", true)
+ .field("parent_values", 2l)
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", -1l)
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 5l)
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ // sum: 7
+ client().prepareIndex("test", "type1", Integer.toString(2)).setSource(jsonBuilder().startObject()
+ .field("grand_parent_values", 2l)
+ .startObject("parent")
+ .field("filter", false)
+ .field("parent_values", 2l)
+ .startObject("child")
+ .field("filter", true)
+ .field("child_values", 2l)
+ .startObject("child_obj")
+ .field("value", 2l)
+ .endObject()
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 4l)
+ .endObject()
+ .endObject()
+ .startObject("parent")
+ .field("parent_values", 3l)
+ .field("filter", true)
+ .startObject("child")
+ .field("child_values", -2l)
+ .field("filter", false)
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 3l)
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ // sum: 2
+ client().prepareIndex("test", "type1", Integer.toString(3)).setSource(jsonBuilder().startObject()
+ .field("grand_parent_values", 3l)
+ .startObject("parent")
+ .field("parent_values", 3l)
+ .field("filter", false)
+ .startObject("child")
+ .field("filter", true)
+ .field("child_values", 3l)
+ .startObject("child_obj")
+ .field("value", 3l)
+ .endObject()
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 1l)
+ .endObject()
+ .endObject()
+ .startObject("parent")
+ .field("parent_values", 4l)
+ .field("filter", true)
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", -3l)
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 1l)
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Without nested filter
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("-3"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("-2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("-1"));
+
+ // With nested filter
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ // Nested path should be automatically detected, expect same results as above search request
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.parent_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.filter", false))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.filter", false))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ // TODO: If we expose ToChildBlockJoinQuery we can filter sort values based on a higher level nested objects
+// assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+// assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("-3"));
+// assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+// assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("-2"));
+// assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+// assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("-1"));
+
+ // Check if closest nested type is resolved
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_obj.value")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ // Sort mode: sum
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("sum")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("7"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("11"));
+
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("sum")
+ .order(SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("11"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("7"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ // Sort mode: sum with filter
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .sortMode("sum")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ // Sort mode: avg
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("avg")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("0"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("avg")
+ .order(SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("0"));
+
+ // Sort mode: avg with filter
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .sortMode("avg")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java
new file mode 100644
index 0000000..8db6fd4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node.internal;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class InternalSettingsPreparerTests extends ElasticsearchTestCase {
+ @Before
+ public void setupSystemProperties() {
+ System.setProperty("es.node.zone", "foo");
+ }
+
+ @After
+ public void cleanupSystemProperties() {
+ System.clearProperty("es.node.zone");
+ }
+
+ @Test
+ public void testIgnoreSystemProperties() {
+ Tuple<Settings, Environment> tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder().put("node.zone", "bar").build(), true);
+ // Should use setting from the system property
+ assertThat(tuple.v1().get("node.zone"), equalTo("foo"));
+
+ tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder().put("config.ignore_system_properties", true).put("node.zone", "bar").build(), true);
+ // Should use setting from the system property
+ assertThat(tuple.v1().get("node.zone"), equalTo("bar"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java b/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java
new file mode 100644
index 0000000..6104a7c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nodesinfo;
+
+import com.google.common.base.Function;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
+import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.nodesinfo.plugin.dummy1.TestPlugin;
+import org.elasticsearch.nodesinfo.plugin.dummy2.TestNoVersionPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.File;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.Collections;
+import java.util.List;
+
+import static com.google.common.base.Predicates.and;
+import static com.google.common.base.Predicates.isNull;
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.client.Requests.nodesInfoRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class SimpleNodesInfoTests extends ElasticsearchIntegrationTest {
+
+ static final class Fields {
+ static final String SITE_PLUGIN = "dummy";
+ static final String SITE_PLUGIN_DESCRIPTION = "This is a description for a dummy test site plugin.";
+ static final String SITE_PLUGIN_VERSION = "0.0.7-BOND-SITE";
+ }
+
+
+ @Test
+ public void testNodesInfos() {
+ final String node_1 = cluster().startNode();
+ final String node_2 = cluster().startNode();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ String server1NodeId = cluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
+ String server2NodeId = cluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
+ logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId);
+
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
+ assertThat(response.getNodes().length, is(2));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest()).actionGet();
+ assertThat(response.getNodes().length, is(2));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server1NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server1NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server2NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server2NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+ }
+
+ /**
+ * Use case is to start 4 nodes:
+ * <ul>
+ * <li>1 : no plugin</li>
+ * <li>2 : one site plugin (with a es-plugin.properties file)</li>
+ * <li>3 : one java plugin</li>
+ * <li>4 : one site plugin and 2 java plugins (included the previous one)</li>
+ * </ul>
+ * We test here that NodeInfo API with plugin option give us the right results.
+ * @throws URISyntaxException
+ */
+ @Test
+ public void testNodeInfoPlugin() throws URISyntaxException {
+ // We start four nodes
+ // The first has no plugin
+ String server1NodeId = startNodeWithPlugins(1);
+ // The second has one site plugin with a es-plugin.properties file (description and version)
+ String server2NodeId = startNodeWithPlugins(2);
+ // The third has one java plugin
+ String server3NodeId = startNodeWithPlugins(3,TestPlugin.class.getName());
+ // The fourth has one java plugin and one site plugin
+ String server4NodeId = startNodeWithPlugins(4,TestNoVersionPlugin.class.getName());
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setPlugin(true).execute().actionGet();
+ logger.info("--> full json answer, status " + response.toString());
+
+ assertNodeContainsPlugins(response, server1NodeId,
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST, // No JVM Plugin
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No Site Plugin
+
+ assertNodeContainsPlugins(response, server2NodeId,
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST, // No JVM Plugin
+ Lists.newArrayList(Fields.SITE_PLUGIN), // Site Plugin
+ Lists.newArrayList(Fields.SITE_PLUGIN_DESCRIPTION),
+ Lists.newArrayList(Fields.SITE_PLUGIN_VERSION));
+
+ assertNodeContainsPlugins(response, server3NodeId,
+ Lists.newArrayList(TestPlugin.Fields.NAME), // JVM Plugin
+ Lists.newArrayList(TestPlugin.Fields.DESCRIPTION),
+ Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE),
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No site Plugin
+
+ assertNodeContainsPlugins(response, server4NodeId,
+ Lists.newArrayList(TestNoVersionPlugin.Fields.NAME), // JVM Plugin
+ Lists.newArrayList(TestNoVersionPlugin.Fields.DESCRIPTION),
+ Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE),
+ Lists.newArrayList(Fields.SITE_PLUGIN, TestNoVersionPlugin.Fields.NAME),// Site Plugin
+ Lists.newArrayList(PluginInfo.DESCRIPTION_NOT_AVAILABLE),
+ Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE));
+ }
+
+ private void assertNodeContainsPlugins(NodesInfoResponse response, String nodeId,
+ List<String> expectedJvmPluginNames,
+ List<String> expectedJvmPluginDescriptions,
+ List<String> expectedJvmVersions,
+ List<String> expectedSitePluginNames,
+ List<String> expectedSitePluginDescriptions,
+ List<String> expectedSiteVersions) {
+
+ assertThat(response.getNodesMap().get(nodeId), notNullValue());
+
+ PluginsInfo plugins = response.getNodesMap().get(nodeId).getPlugins();
+ assertThat(plugins, notNullValue());
+
+ List<String> pluginNames = FluentIterable.from(plugins.getInfos()).filter(jvmPluginPredicate).transform(nameFunction).toList();
+ for (String expectedJvmPluginName : expectedJvmPluginNames) {
+ assertThat(pluginNames, hasItem(expectedJvmPluginName));
+ }
+
+ List<String> pluginDescriptions = FluentIterable.from(plugins.getInfos()).filter(jvmPluginPredicate).transform(descriptionFunction).toList();
+ for (String expectedJvmPluginDescription : expectedJvmPluginDescriptions) {
+ assertThat(pluginDescriptions, hasItem(expectedJvmPluginDescription));
+ }
+
+ List<String> jvmPluginVersions = FluentIterable.from(plugins.getInfos()).filter(jvmPluginPredicate).transform(versionFunction).toList();
+ for (String pluginVersion : expectedJvmVersions) {
+ assertThat(jvmPluginVersions, hasItem(pluginVersion));
+ }
+
+ FluentIterable<String> jvmUrls = FluentIterable.from(plugins.getInfos())
+ .filter(and(jvmPluginPredicate, Predicates.not(sitePluginPredicate)))
+ .filter(isNull())
+ .transform(urlFunction);
+ assertThat(Iterables.size(jvmUrls), is(0));
+
+ List<String> sitePluginNames = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(nameFunction).toList();
+ for (String expectedSitePluginName : expectedSitePluginNames) {
+ assertThat(sitePluginNames, hasItem(expectedSitePluginName));
+ }
+
+ List<String> sitePluginDescriptions = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(descriptionFunction).toList();
+ for (String sitePluginDescription : expectedSitePluginDescriptions) {
+ assertThat(sitePluginDescriptions, hasItem(sitePluginDescription));
+ }
+
+ List<String> sitePluginUrls = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(urlFunction).toList();
+ assertThat(sitePluginUrls, not(contains(nullValue())));
+
+
+ List<String> sitePluginVersions = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(versionFunction).toList();
+ for (String pluginVersion : expectedSiteVersions) {
+ assertThat(sitePluginVersions, hasItem(pluginVersion));
+ }
+ }
+
+ private String startNodeWithPlugins(int nodeId, String ... pluginClassNames) throws URISyntaxException {
+ URL resource = SimpleNodesInfoTests.class.getResource("/org/elasticsearch/nodesinfo/node" + Integer.toString(nodeId) + "/");
+ ImmutableSettings.Builder settings = settingsBuilder();
+ if (resource != null) {
+ settings.put("path.plugins", new File(resource.toURI()).getAbsolutePath());
+ }
+
+ if (pluginClassNames.length > 0) {
+ settings.putArray("plugin.types", pluginClassNames);
+ }
+
+ String nodeName = cluster().startNode(settings);
+
+ // We wait for a Green status
+ client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+
+ String serverNodeId = cluster().getInstance(ClusterService.class, nodeName).state().nodes().localNodeId();
+ logger.debug("--> server {} started" + serverNodeId);
+ return serverNodeId;
+ }
+
+
+ private Predicate<PluginInfo> jvmPluginPredicate = new Predicate<PluginInfo>() {
+ public boolean apply(PluginInfo pluginInfo) {
+ return pluginInfo.isJvm();
+ }
+ };
+
+ private Predicate<PluginInfo> sitePluginPredicate = new Predicate<PluginInfo>() {
+ public boolean apply(PluginInfo pluginInfo) {
+ return pluginInfo.isSite();
+ }
+ };
+
+ private Function<PluginInfo, String> nameFunction = new Function<PluginInfo, String>() {
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getName();
+ }
+ };
+
+ private Function<PluginInfo, String> descriptionFunction = new Function<PluginInfo, String>() {
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getDescription();
+ }
+ };
+
+ private Function<PluginInfo, String> urlFunction = new Function<PluginInfo, String>() {
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getUrl();
+ }
+ };
+
+ private Function<PluginInfo, String> versionFunction = new Function<PluginInfo, String>() {
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getVersion();
+ }
+ };
+}
diff --git a/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java b/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java
new file mode 100644
index 0000000..274e5e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nodesinfo.plugin.dummy1;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class TestPlugin extends AbstractPlugin {
+
+ static final public class Fields {
+ static public final String NAME = "test-plugin";
+ static public final String DESCRIPTION = NAME + " description";
+ }
+
+ @Override
+ public String name() {
+ return Fields.NAME;
+ }
+
+ @Override
+ public String description() {
+ return Fields.DESCRIPTION;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java b/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java
new file mode 100644
index 0000000..58b5ee0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nodesinfo.plugin.dummy2;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class TestNoVersionPlugin extends AbstractPlugin {
+
+ static final public class Fields {
+ static public final String NAME = "test-no-version-plugin";
+ static public final String DESCRIPTION = NAME + " description";
+ }
+
+ @Override
+ public String name() {
+ return Fields.NAME;
+ }
+
+ @Override
+ public String description() {
+ return Fields.DESCRIPTION;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java
new file mode 100644
index 0000000..d752bde
--- /dev/null
+++ b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.operateAllIndices;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // One test for test performance, since cluster scope is test
+ // The cluster scope is test b/c we can't clear cluster settings.
+ public void testDestructiveOperations() throws Exception {
+ Settings settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareCreate("index1").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").get());
+
+ // Should succeed, since no wildcards
+ assertAcked(client().admin().indices().prepareDelete("1index").get());
+
+ try {
+ // should fail since index1 is the only index.
+ client().admin().indices().prepareDelete("i*").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ try {
+ client().admin().indices().prepareDelete("_all").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, false)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareDelete("_all").get());
+ assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false));
+
+ // end delete index:
+ // close index:
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareCreate("index1").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").get());
+ ensureYellow();// wait for primaries to be allocated
+ // Should succeed, since no wildcards
+ assertAcked(client().admin().indices().prepareClose("1index").get());
+
+ try {
+ client().admin().indices().prepareClose("_all").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+ try {
+ assertAcked(client().admin().indices().prepareOpen("_all").get());
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ try {
+ client().admin().indices().prepareClose("*").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+ try {
+ assertAcked(client().admin().indices().prepareOpen("*").get());
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, false)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+ assertAcked(client().admin().indices().prepareClose("_all").get());
+ assertAcked(client().admin().indices().prepareOpen("_all").get());
+
+ // end close index:
+ client().admin().indices().prepareDelete("_all").get();
+ // delete_by_query:
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareCreate("index1").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").get());
+
+ // Should succeed, since no wildcards
+ client().prepareDeleteByQuery("1index").setQuery(QueryBuilders.matchAllQuery()).get();
+
+ try {
+ client().prepareDeleteByQuery("_all").setQuery(QueryBuilders.matchAllQuery()).get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ try {
+ client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, false)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get();
+ client().prepareDeleteByQuery("_all").setQuery(QueryBuilders.matchAllQuery()).get();
+
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+ client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get();
+ // end delete_by_query:
+ client().admin().indices().prepareDelete("_all").get();
+ // delete mapping:
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+
+ assertAcked(client().admin().indices().prepareCreate("index1").addMapping("1", "field1", "type=string").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").addMapping("1", "field1", "type=string").get());
+
+ // Should succeed, since no wildcards
+ client().admin().indices().prepareDeleteMapping("1index").setType("1").get();
+ try {
+ client().admin().indices().prepareDeleteMapping("_all").setType("1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ try {
+ client().admin().indices().prepareDeleteMapping().setIndices("*").setType("1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ settings = ImmutableSettings.builder().put(DestructiveOperations.REQUIRES_NAME, false).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().preparePutMapping("1index").setType("1").setSource("field1", "type=string"));
+ assertAcked(client().admin().indices().prepareDeleteMapping().setIndices("*").setType("1"));
+ assertAcked(client().admin().indices().preparePutMapping("1index").setType("1").setSource("field1", "type=string"));
+ assertAcked(client().admin().indices().prepareDeleteMapping("_all").setType("1"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java b/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java
new file mode 100644
index 0000000..c769277
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java
@@ -0,0 +1,399 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+
+/**
+ *
+ */
+public class ConcurrentPercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleConcurrentPercolator() throws Exception {
+ client().admin().indices().prepareCreate("index").setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .build()
+ ).execute().actionGet();
+ ensureGreen();
+
+ final BytesReference onlyField1 = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .endObject().endObject().bytes();
+ final BytesReference onlyField2 = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field2", "value")
+ .endObject().endObject().bytes();
+ final BytesReference bothFields = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject().endObject().bytes();
+
+
+ // We need to index a document / define mapping, otherwise field1 doesn't get reconized as number field.
+ // If we don't do this, then 'test2' percolate query gets parsed as a TermQuery and not a RangeQuery.
+ // The percolate api doesn't parse the doc if no queries have registered, so it can't lazily create a mapping
+ client().prepareIndex("index", "type", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("index", PercolatorService.TYPE_NAME, "test1")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("index", PercolatorService.TYPE_NAME, "test2")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject())
+ .execute().actionGet();
+
+ final CountDownLatch start = new CountDownLatch(1);
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final AtomicInteger counts = new AtomicInteger(0);
+ final AtomicReference<Throwable> exceptionHolder = new AtomicReference<Throwable>();
+ Thread[] threads = new Thread[5];
+
+ for (int i = 0; i < threads.length; i++) {
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ start.await();
+ while (!stop.get()) {
+ int count = counts.incrementAndGet();
+ if ((count > 10000)) {
+ stop.set(true);
+ }
+ PercolateResponse percolate;
+ if (count % 3 == 0) {
+ percolate = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(bothFields)
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContainingInAnyOrder("test1", "test2"));
+ } else if (count % 3 == 1) {
+ percolate = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField2)
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContaining("test1"));
+ } else {
+ percolate = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField1)
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContaining("test2"));
+ }
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ } catch (Throwable e) {
+ exceptionHolder.set(e);
+ Thread.currentThread().interrupt();
+ }
+ }
+ };
+ threads[i] = new Thread(r);
+ threads[i].start();
+ }
+
+ start.countDown();
+ for (Thread thread : threads) {
+ thread.join();
+ }
+
+ Throwable assertionError = exceptionHolder.get();
+ if (assertionError != null) {
+ assertionError.printStackTrace();
+ }
+ assertThat(assertionError + " should be null", assertionError, nullValue());
+ }
+
+ @Test
+ public void testConcurrentAddingAndPercolating() throws Exception {
+ client().admin().indices().prepareCreate("index").setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build()
+ ).execute().actionGet();
+ ensureGreen();
+ final int numIndexThreads = 3;
+ final int numPercolateThreads = 6;
+ final int numPercolatorOperationsPerThread = 1000;
+
+ final Set<Throwable> exceptionsHolder = ConcurrentCollections.newConcurrentSet();
+ final CountDownLatch start = new CountDownLatch(1);
+ final AtomicInteger runningPercolateThreads = new AtomicInteger(numPercolateThreads);
+ final AtomicInteger type1 = new AtomicInteger();
+ final AtomicInteger type2 = new AtomicInteger();
+ final AtomicInteger type3 = new AtomicInteger();
+
+ final AtomicInteger idGen = new AtomicInteger();
+
+ Thread[] indexThreads = new Thread[numIndexThreads];
+ for (int i = 0; i < numIndexThreads; i++) {
+ final Random rand = new Random(getRandom().nextLong());
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder onlyField1 = XContentFactory.jsonBuilder().startObject()
+ .field("query", termQuery("field1", "value")).endObject();
+ XContentBuilder onlyField2 = XContentFactory.jsonBuilder().startObject()
+ .field("query", termQuery("field2", "value")).endObject();
+ XContentBuilder field1And2 = XContentFactory.jsonBuilder().startObject()
+ .field("query", boolQuery().must(termQuery("field1", "value")).must(termQuery("field2", "value"))).endObject();
+
+ start.await();
+ while (runningPercolateThreads.get() > 0) {
+ Thread.sleep(100);
+ int x = rand.nextInt(3);
+ String id = Integer.toString(idGen.incrementAndGet());
+ IndexResponse response;
+ switch (x) {
+ case 0:
+ response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(onlyField1)
+ .execute().actionGet();
+ type1.incrementAndGet();
+ break;
+ case 1:
+ response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(onlyField2)
+ .execute().actionGet();
+ type2.incrementAndGet();
+ break;
+ case 2:
+ response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(field1And2)
+ .execute().actionGet();
+ type3.incrementAndGet();
+ break;
+ default:
+ throw new IllegalStateException("Illegal x=" + x);
+ }
+ assertThat(response.getId(), equalTo(id));
+ assertThat(response.getVersion(), equalTo(1l));
+ }
+ } catch (Throwable t) {
+ exceptionsHolder.add(t);
+ logger.error("Error in indexing thread...", t);
+ }
+ }
+ };
+ indexThreads[i] = new Thread(r);
+ indexThreads[i].start();
+ }
+
+ Thread[] percolateThreads = new Thread[numPercolateThreads];
+ for (int i = 0; i < numPercolateThreads; i++) {
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder onlyField1Doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value")
+ .endObject().endObject();
+ XContentBuilder onlyField2Doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field2", "value")
+ .endObject().endObject();
+ XContentBuilder field1AndField2Doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value")
+ .field("field2", "value")
+ .endObject().endObject();
+ Random random = getRandom();
+ start.await();
+ for (int counter = 0; counter < numPercolatorOperationsPerThread; counter++) {
+ int x = random.nextInt(3);
+ int atLeastExpected;
+ PercolateResponse response;
+ switch (x) {
+ case 0:
+ atLeastExpected = type1.get();
+ response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField1Doc).execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
+ break;
+ case 1:
+ atLeastExpected = type2.get();
+ response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField2Doc).execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
+ break;
+ case 2:
+ atLeastExpected = type3.get();
+ response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(field1AndField2Doc).execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
+ break;
+ }
+ }
+ } catch (Throwable t) {
+ exceptionsHolder.add(t);
+ logger.error("Error in percolate thread...", t);
+ } finally {
+ runningPercolateThreads.decrementAndGet();
+ }
+ }
+ };
+ percolateThreads[i] = new Thread(r);
+ percolateThreads[i].start();
+ }
+
+ start.countDown();
+ for (Thread thread : indexThreads) {
+ thread.join();
+ }
+ for (Thread thread : percolateThreads) {
+ thread.join();
+ }
+
+ for (Throwable t : exceptionsHolder) {
+ logger.error("Unexpected exception {}", t.getMessage(), t);
+ }
+ assertThat(exceptionsHolder.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConcurrentAddingAndRemovingWhilePercolating() throws Exception {
+ client().admin().indices().prepareCreate("index").setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build()
+ ).execute().actionGet();
+ ensureGreen();
+ final int numIndexThreads = 3;
+ final int numberPercolateOperation = 100;
+
+ final AtomicReference<Throwable> exceptionHolder = new AtomicReference<Throwable>(null);
+ final AtomicInteger idGen = new AtomicInteger(0);
+ final Set<String> liveIds = ConcurrentCollections.newConcurrentSet();
+ final AtomicBoolean run = new AtomicBoolean(true);
+ Thread[] indexThreads = new Thread[numIndexThreads];
+ final Semaphore semaphore = new Semaphore(numIndexThreads, true);
+ for (int i = 0; i < indexThreads.length; i++) {
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject()
+ .field("query", termQuery("field1", "value")).endObject();
+ outer:
+ while (run.get()) {
+ semaphore.acquire();
+ try {
+ if (!liveIds.isEmpty() && getRandom().nextInt(100) < 19) {
+ String id;
+ do {
+ if (liveIds.isEmpty()) {
+ continue outer;
+ }
+ id = Integer.toString(randomInt(idGen.get()));
+ } while (!liveIds.remove(id));
+
+ DeleteResponse response = client().prepareDelete("index", PercolatorService.TYPE_NAME, id)
+ .execute().actionGet();
+ assertThat(response.getId(), equalTo(id));
+ assertThat("doc[" + id + "] should have been deleted, but isn't", response.isFound(), equalTo(true));
+ } else {
+ String id = Integer.toString(idGen.getAndIncrement());
+ IndexResponse response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(doc)
+ .execute().actionGet();
+ liveIds.add(id);
+ assertThat(response.isCreated(), equalTo(true)); // We only add new docs
+ assertThat(response.getId(), equalTo(id));
+ }
+ } finally {
+ semaphore.release();
+ }
+ }
+ } catch (InterruptedException iex) {
+ logger.error("indexing thread was interrupted...", iex);
+ run.set(false);
+ } catch (Throwable t) {
+ run.set(false);
+ exceptionHolder.set(t);
+ logger.error("Error in indexing thread...", t);
+ }
+ }
+ };
+ indexThreads[i] = new Thread(r);
+ indexThreads[i].start();
+ }
+
+ XContentBuilder percolateDoc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value")
+ .endObject().endObject();
+ for (int counter = 0; counter < numberPercolateOperation; counter++) {
+ Thread.sleep(5);
+ semaphore.acquire(numIndexThreads);
+ try {
+ if (!run.get()) {
+ break;
+ }
+ int atLeastExpected = liveIds.size();
+ PercolateResponse response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(percolateDoc).execute().actionGet();
+ assertThat(response.getShardFailures(), emptyArray());
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, equalTo(atLeastExpected));
+ } finally {
+ semaphore.release(numIndexThreads);
+ }
+ }
+ run.set(false);
+ for (Thread thread : indexThreads) {
+ thread.join();
+ }
+ assertThat("exceptionHolder should have been empty, but holds: " + exceptionHolder.toString(), exceptionHolder.get(), nullValue());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java b/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java
new file mode 100644
index 0000000..56d2ffa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
+import org.elasticsearch.action.percolate.MultiPercolateResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MultiPercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBasics() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+
+ MultiPercolateResponse response = client().prepareMultiPercolate()
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject())))
+ .add(client().preparePercolate() // non existing doc, so error element
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("5")))
+ .execute().actionGet();
+
+ MultiPercolateResponse.Item item = response.getItems()[0];
+ assertMatchCount(item.response(), 2l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(2));
+ assertThat(item.errorMessage(), nullValue());
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ item = response.getItems()[1];
+ assertThat(item.errorMessage(), nullValue());
+
+ assertMatchCount(item.response(), 2l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ item = response.getItems()[2];
+ assertThat(item.errorMessage(), nullValue());
+ assertMatchCount(item.response(), 4l);
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ item = response.getItems()[3];
+ assertThat(item.errorMessage(), nullValue());
+ assertMatchCount(item.response(), 1l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4"));
+
+ item = response.getItems()[4];
+ assertThat(item.getResponse(), nullValue());
+ assertThat(item.errorMessage(), notNullValue());
+ assertThat(item.errorMessage(), containsString("document missing"));
+ }
+
+ @Test
+ public void testExistingDocsOnly() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build())
+ .execute().actionGet();
+ ensureGreen();
+
+ int numQueries = randomIntBetween(50, 100);
+ logger.info("--> register a queries");
+ for (int i = 0; i < numQueries; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("field", "a"))
+ .execute().actionGet();
+
+ MultiPercolateRequestBuilder builder = client().prepareMultiPercolate();
+ int numPercolateRequest = randomIntBetween(50, 100);
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type"));
+ }
+
+ MultiPercolateResponse response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertMatchCount(item.response(), numQueries);
+ assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+ // Non existing doc
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .setIndices("test").setDocumentType("type"));
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(true));
+ assertThat(item.errorMessage(), containsString("document missing"));
+ assertThat(item.getResponse(), nullValue());
+ }
+
+ // One existing doc
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .setIndices("test").setDocumentType("type"));
+ }
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type"));
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest + 1));
+ assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false));
+ assertMatchCount(response.items()[numPercolateRequest].response(), numQueries);
+ assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+ @Test
+ public void testWithDocsOnly() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build())
+ .execute().actionGet();
+ ensureGreen();
+
+ int numQueries = randomIntBetween(50, 100);
+ logger.info("--> register a queries");
+ for (int i = 0; i < numQueries; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ MultiPercolateRequestBuilder builder = client().prepareMultiPercolate();
+ int numPercolateRequest = randomIntBetween(50, 100);
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject())));
+ }
+
+ MultiPercolateResponse response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertMatchCount(item.response(), numQueries);
+ assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+ // All illegal json
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource("illegal json"));
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertThat(item.getResponse().getSuccessfulShards(), equalTo(0));
+ assertThat(item.getResponse().getShardFailures().length, equalTo(2));
+ for (ShardOperationFailedException shardFailure : item.getResponse().getShardFailures()) {
+ assertThat(shardFailure.reason(), containsString("Failed to derive xcontent from"));
+ assertThat(shardFailure.status().getStatus(), equalTo(500));
+ }
+ }
+
+ // one valid request
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource("illegal json"));
+ }
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject())));
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest + 1));
+ assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false));
+ assertMatchCount(response.items()[numPercolateRequest].response(), numQueries);
+ assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java
new file mode 100644
index 0000000..83e25ca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.percolate.PercolateRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.hamcrest.Matchers.arrayWithSize;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // Just test the integration with facets and aggregations, not the facet and aggregation functionality!
+ public void testFacetsAndAggregations() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ int numQueries = atLeast(250);
+ int numUniqueQueries = between(1, numQueries / 2);
+ String[] values = new String[numUniqueQueries];
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "value" + i;
+ }
+ int[] expectedCount = new int[numUniqueQueries];
+
+ logger.info("--> registering {} queries", numQueries);
+ for (int i = 0; i < numQueries; i++) {
+ String value = values[i % numUniqueQueries];
+ expectedCount[i % numUniqueQueries]++;
+ QueryBuilder queryBuilder = matchQuery("field1", value);
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject())
+ .execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ for (int i = 0; i < numQueries; i++) {
+ String value = values[i % numUniqueQueries];
+ PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject()));
+
+ boolean useAggs = randomBoolean();
+ if (useAggs) {
+ percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2"));
+ } else {
+ percolateRequestBuilder.addFacet(FacetBuilders.termsFacet("a").field("field2"));
+
+ }
+
+ if (randomBoolean()) {
+ percolateRequestBuilder.setPercolateQuery(matchAllQuery());
+ }
+ if (randomBoolean()) {
+ percolateRequestBuilder.setScore(true);
+ } else {
+ percolateRequestBuilder.setSortByScore(true).setSize(numQueries);
+ }
+
+ boolean countOnly = randomBoolean();
+ if (countOnly) {
+ percolateRequestBuilder.setOnlyCount(countOnly);
+ }
+
+ PercolateResponse response = percolateRequestBuilder.execute().actionGet();
+ assertMatchCount(response, expectedCount[i % numUniqueQueries]);
+ if (!countOnly) {
+ assertThat(response.getMatches(), arrayWithSize(expectedCount[i % numUniqueQueries]));
+ }
+
+ if (useAggs) {
+ List<Aggregation> aggregations = response.getAggregations().asList();
+ assertThat(aggregations.size(), equalTo(1));
+ assertThat(aggregations.get(0).getName(), equalTo("a"));
+ List<Terms.Bucket> buckets = new ArrayList<Terms.Bucket>(((Terms) aggregations.get(0)).getBuckets());
+ assertThat(buckets.size(), equalTo(1));
+ assertThat(buckets.get(0).getKeyAsText().string(), equalTo("b"));
+ assertThat(buckets.get(0).getDocCount(), equalTo((long) expectedCount[i % values.length]));
+ } else {
+ assertThat(response.getFacets().facets().size(), equalTo(1));
+ assertThat(response.getFacets().facets().get(0).getName(), equalTo("a"));
+ assertThat(((TermsFacet) response.getFacets().facets().get(0)).getEntries().size(), equalTo(1));
+ assertThat(((TermsFacet) response.getFacets().facets().get(0)).getEntries().get(0).getCount(), equalTo(expectedCount[i % values.length]));
+ assertThat(((TermsFacet) response.getFacets().facets().get(0)).getEntries().get(0).getTerm().string(), equalTo("b"));
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/PercolatorTests.java b/src/test/java/org/elasticsearch/percolator/PercolatorTests.java
new file mode 100644
index 0000000..26efb4f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/PercolatorTests.java
@@ -0,0 +1,1766 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.factor.FactorBuilder;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.highlight.HighlightBuilder;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.*;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class PercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimple1() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Add dummy doc");
+ client().prepareIndex("test", "type", "1").setSource("field", "value").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate doc with field1=b");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ logger.info("--> Percolate doc with field1=c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate doc with field1=b c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), arrayWithSize(4));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ logger.info("--> Percolate doc with field1=d");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("4"));
+
+ logger.info("--> Search dummy doc, percolate queries must not be included");
+ SearchResponse searchResponse = client().prepareSearch("test", "test").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1L));
+ assertThat(searchResponse.getHits().getAt(0).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ logger.info("--> Percolate non existing doc");
+ try {
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("5"))
+ .execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (DocumentMissingException e) {
+ }
+ }
+
+ @Test
+ public void testSimple2() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=long"));
+
+ // introduce the doc
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject().endObject();
+
+ PercolateResponse response = client().preparePercolate().setSource(doc)
+ .setIndices("test").setDocumentType("type1")
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ assertThat(response.getMatches(), emptyArray());
+
+ // add first query...
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "test1")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject())
+ .execute().actionGet();
+
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc).execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("test1"));
+
+ // add second query...
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "test2")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject())
+ .execute().actionGet();
+
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc)
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("test1", "test2"));
+
+
+ client().prepareDelete("test", PercolatorService.TYPE_NAME, "test2").execute().actionGet();
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc).execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("test1"));
+
+ // add a range query (cached)
+ // add a query
+ client().prepareIndex("test1", PercolatorService.TYPE_NAME)
+ .setSource(
+ XContentFactory.jsonBuilder().startObject().field("query",
+ constantScoreQuery(FilterBuilders.rangeFilter("field2").from(1).to(5).includeLower(true).setExecution("fielddata"))
+ ).endObject()
+ )
+ .execute().actionGet();
+
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc).execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("test1"));
+ }
+
+ @Test
+ public void testRangeFilterThatUsesFD() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=long")
+ .get();
+
+
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(
+ XContentFactory.jsonBuilder().startObject().field("query",
+ constantScoreQuery(FilterBuilders.rangeFilter("field1").from(1).to(5).setExecution("fielddata"))
+ ).endObject()
+ ).get();
+
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setPercolateDoc(PercolateSourceBuilder.docBuilder().setDoc("field1", 3)).get();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+ }
+
+ @Test
+ public void testPercolateQueriesWithRouting() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a queries");
+ for (int i = 1; i <= 100; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .setRouting(Integer.toString(i % 2))
+ .execute().actionGet();
+ }
+
+ logger.info("--> Percolate doc with no routing");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 100l);
+ assertThat(response.getMatches(), arrayWithSize(100));
+
+ logger.info("--> Percolate doc with routing=0");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .setRouting("0")
+ .execute().actionGet();
+ assertMatchCount(response, 50l);
+ assertThat(response.getMatches(), arrayWithSize(50));
+
+ logger.info("--> Percolate doc with routing=1");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .setRouting("1")
+ .execute().actionGet();
+ assertMatchCount(response, 50l);
+ assertThat(response.getMatches(), arrayWithSize(50));
+ }
+
+ @Test
+ public void percolateOnRecreatedIndex() throws Exception {
+ prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("field1", "value1").execute().actionGet();
+ logger.info("--> register a query");
+ client().prepareIndex("my-queries-index", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ cluster().wipeIndices("test");
+ prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("field1", "value1").execute().actionGet();
+ logger.info("--> register a query");
+ client().prepareIndex("my-queries-index", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+ }
+
+ @Test
+ // see #2814
+ public void percolateCustomAnalyzer() throws Exception {
+ Builder builder = ImmutableSettings.builder();
+ builder.put("index.analysis.analyzer.lwhitespacecomma.tokenizer", "whitespacecomma");
+ builder.putArray("index.analysis.analyzer.lwhitespacecomma.filter", "lowercase");
+ builder.put("index.analysis.tokenizer.whitespacecomma.type", "pattern");
+ builder.put("index.analysis.tokenizer.whitespacecomma.pattern", "(,|\\s+)");
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .startObject("properties")
+ .startObject("filingcategory").field("type", "string").field("analyzer", "lwhitespacecomma").endObject()
+ .endObject()
+ .endObject().endObject();
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("doc", mapping)
+ .setSettings(builder.put("index.number_of_shards", 1))
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject()
+ .field("source", "productizer")
+ .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.queryString("filingcategory:s")))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("doc")
+ .setSource(jsonBuilder().startObject()
+ .startObject("doc").field("filingcategory", "s").endObject()
+ .field("query", termQuery("source", "productizer"))
+ .endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+
+ }
+
+ @Test
+ public void createIndexAndThenRegisterPercolator() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)));
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .execute().actionGet();
+
+ refresh();
+ CountResponse countResponse = client().prepareCount()
+ .setQuery(matchAllQuery()).setTypes(PercolatorService.TYPE_NAME)
+ .execute().actionGet();
+ assertThat(countResponse.getCount(), equalTo(1l));
+
+
+ for (int i = 0; i < 10; i++) {
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+ for (int i = 0; i < 10; i++) {
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setPreference("_local")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+
+ logger.info("--> delete the index");
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ logger.info("--> make sure percolated queries for it have been deleted as well");
+ countResponse = client().prepareCount()
+ .setQuery(matchAllQuery()).setTypes(PercolatorService.TYPE_NAME)
+ .execute().actionGet();
+ assertHitCount(countResponse, 0l);
+ }
+
+ @Test
+ public void multiplePercolators() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query 1");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ logger.info("--> register a query 2");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "bubu")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "green")
+ .field("query", termQuery("field1", "value2"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku"));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value2").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("bubu"));
+
+ }
+
+ @Test
+ public void dynamicAddingRemovingQueries() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query 1");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku"));
+
+ logger.info("--> register a query 2");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "bubu")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "green")
+ .field("query", termQuery("field1", "value2"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value2").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("bubu"));
+
+ logger.info("--> register a query 3");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "susu")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "red")
+ .field("query", termQuery("field1", "value2"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateSourceBuilder sourceBuilder = new PercolateSourceBuilder()
+ .setDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value2").endObject()))
+ .setQueryBuilder(termQuery("color", "red"));
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(sourceBuilder)
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("susu"));
+
+ logger.info("--> deleting query 1");
+ client().prepareDelete("test", PercolatorService.TYPE_NAME, "kuku").setRefresh(true).execute().actionGet();
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").startObject("type1")
+ .field("field1", "value1")
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 0l);
+ assertThat(percolate.getMatches(), emptyArray());
+ }
+
+ @Test
+ public void percolateWithSizeField() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_size").field("enabled", true).field("stored", "yes").endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", mapping)
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ logger.info("--> percolate a document");
+ PercolateResponse percolate = client().preparePercolate().setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject()
+ .startObject("doc")
+ .field("field1", "value1")
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku"));
+ }
+
+ @Test
+ public void testPercolateStatistics() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> First percolate request");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+
+ IndicesStatsResponse indicesResponse = client().admin().indices().prepareStats("test").execute().actionGet();
+ assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo(5l)); // We have 5 partitions
+ assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
+ assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo(2l)); // One primary and replica
+ assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));
+
+ NodesStatsResponse nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ long percolateCount = 0;
+ for (NodeStats nodeStats : nodesResponse) {
+ percolateCount += nodeStats.getIndices().getPercolate().getCount();
+ }
+ assertThat(percolateCount, equalTo(5l)); // We have 5 partitions
+
+ logger.info("--> Second percolate request");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+
+ indicesResponse = client().admin().indices().prepareStats().setPercolate(true).execute().actionGet();
+ assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo(10l));
+ assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
+ assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo(2l));
+ assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));
+
+ percolateCount = 0;
+ nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ for (NodeStats nodeStats : nodesResponse) {
+ percolateCount += nodeStats.getIndices().getPercolate().getCount();
+ }
+ assertThat(percolateCount, equalTo(10l));
+
+ // We might be faster than 1 ms, so run upto 1000 times until have spend 1ms or more on percolating
+ boolean moreThanOneMs = false;
+ int counter = 3; // We already ran two times.
+ do {
+ indicesResponse = client().admin().indices().prepareStats("test").execute().actionGet();
+ if (indicesResponse.getTotal().getPercolate().getTimeInMillis() > 0) {
+ moreThanOneMs = true;
+ break;
+ }
+
+ logger.info("--> {}th percolate request", counter);
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
+ .execute().actionGet();
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+ } while (++counter <= 1000);
+ assertTrue("Something is off, we should have spent at least 1ms on percolating...", moreThanOneMs);
+
+ long percolateSumTime = 0;
+ nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ for (NodeStats nodeStats : nodesResponse) {
+ percolateCount += nodeStats.getIndices().getPercolate().getCount();
+ percolateSumTime += nodeStats.getIndices().getPercolate().getTimeInMillis();
+ }
+ assertThat(percolateSumTime, greaterThan(0l));
+ }
+
+ @Test
+ public void testPercolatingExistingDocs() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ logger.info("--> Percolate existing doc with id 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate existing doc with id 3");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("3"))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), arrayWithSize(4));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ logger.info("--> Percolate existing doc with id 4");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("4"))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("4"));
+
+ logger.info("--> Search normals docs, percolate queries must not be included");
+ SearchResponse searchResponse = client().prepareSearch("test").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4L));
+ assertThat(searchResponse.getHits().getAt(0).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(1).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(2).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(3).type(), equalTo("type"));
+ }
+
+ @Test
+ public void testPercolatingExistingDocs_routing() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").setRouting("4").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").setRouting("3").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").setRouting("2").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").setRouting("1").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("1").routing("4"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ logger.info("--> Percolate existing doc with id 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").routing("3"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate existing doc with id 3");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("3").routing("2"))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), arrayWithSize(4));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ logger.info("--> Percolate existing doc with id 4");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("4").routing("1"))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("4"));
+ }
+
+ @Test
+ public void testPercolatingExistingDocs_versionCheck() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet();
+
+ logger.info("--> registering queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 2 and version 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").version(1l))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate existing doc with id 2 and version 2");
+ try {
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").version(2l))
+ .execute().actionGet();
+ fail("Error should have been thrown");
+ } catch (VersionConflictEngineException e) {
+ }
+
+ logger.info("--> Index doc with id for the second time");
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 2 and version 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").version(2l))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+ }
+
+ @Test
+ public void testPercolateMultipleIndicesAndAliases() throws Exception {
+ client().admin().indices().prepareCreate("test1")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ client().admin().indices().prepareCreate("test2")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> registering queries");
+ for (int i = 1; i <= 10; i++) {
+ String index = i % 2 == 0 ? "test1" : "test2";
+ client().prepareIndex(index, PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ logger.info("--> Percolate doc to index test1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test1").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+
+ logger.info("--> Percolate doc to index test2");
+ response = client().preparePercolate()
+ .setIndices("test2").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+
+ logger.info("--> Percolate doc to index test1 and test2");
+ response = client().preparePercolate()
+ .setIndices("test1", "test2").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 10l);
+ assertThat(response.getMatches(), arrayWithSize(10));
+
+ logger.info("--> Percolate doc to index test2 and test3, with ignore missing");
+ response = client().preparePercolate()
+ .setIndices("test1", "test3").setDocumentType("type")
+ .setIndicesOptions(IndicesOptions.lenient())
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+
+ logger.info("--> Adding aliases");
+ IndicesAliasesResponse aliasesResponse = client().admin().indices().prepareAliases()
+ .addAlias("test1", "my-alias1")
+ .addAlias("test2", "my-alias1")
+ .addAlias("test2", "my-alias2")
+ .setTimeout(TimeValue.timeValueHours(10))
+ .execute().actionGet();
+ assertTrue(aliasesResponse.isAcknowledged());
+
+ logger.info("--> Percolate doc to my-alias1");
+ response = client().preparePercolate()
+ .setIndices("my-alias1").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 10l);
+ assertThat(response.getMatches(), arrayWithSize(10));
+ for (PercolateResponse.Match match : response) {
+ assertThat(match.getIndex().string(), anyOf(equalTo("test1"), equalTo("test2")));
+ }
+
+ logger.info("--> Percolate doc to my-alias2");
+ response = client().preparePercolate()
+ .setIndices("my-alias2").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ for (PercolateResponse.Match match : response) {
+ assertThat(match.getIndex().string(), equalTo("test2"));
+ }
+
+ }
+
+ @Test
+ public void testCountPercolation() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Add dummy doc");
+ client().prepareIndex("test", "type", "1").setSource("field", "value").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Count percolate doc with field1=b");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate doc with field1=c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate doc with field1=b c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate doc with field1=d");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate non existing doc");
+ try {
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("5"))
+ .execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (DocumentMissingException e) {
+ }
+ }
+
+ @Test
+ public void testCountPercolatingExistingDocs() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Count percolate existing doc with id 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate existing doc with id 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate existing doc with id 3");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("3"))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate existing doc with id 4");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("4"))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), nullValue());
+ }
+
+ @Test
+ public void testPercolateSizingWithQueryAndFilter() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ int numLevels = randomIntBetween(1, 25);
+ long numQueriesPerLevel = randomIntBetween(10, 250);
+ long totalQueries = numLevels * numQueriesPerLevel;
+ logger.info("--> register " + totalQueries + " queries");
+ for (int level = 1; level <= numLevels; level++) {
+ for (int query = 1; query <= numQueriesPerLevel; query++) {
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, level + "-" + query)
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", level).endObject())
+ .execute().actionGet();
+ }
+ }
+
+ boolean onlyCount = randomBoolean();
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .execute().actionGet();
+ assertMatchCount(response, totalQueries);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo((int) totalQueries));
+ }
+
+ int size = randomIntBetween(0, (int) totalQueries - 1);
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setSize(size)
+ .execute().actionGet();
+ assertMatchCount(response, totalQueries);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo(size));
+ }
+
+ // The query / filter capabilities are NOT in realtime
+ client().admin().indices().prepareRefresh("my-index").execute().actionGet();
+
+ int runs = randomIntBetween(3, 16);
+ for (int i = 0; i < runs; i++) {
+ onlyCount = randomBoolean();
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1)))
+ .execute().actionGet();
+ assertMatchCount(response, numQueriesPerLevel);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel));
+ }
+ }
+
+ for (int i = 0; i < runs; i++) {
+ onlyCount = randomBoolean();
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateFilter(termFilter("level", 1 + randomInt(numLevels - 1)))
+ .execute().actionGet();
+ assertMatchCount(response, numQueriesPerLevel);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel));
+ }
+ }
+
+ for (int i = 0; i < runs; i++) {
+ onlyCount = randomBoolean();
+ size = randomIntBetween(0, (int) numQueriesPerLevel - 1);
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateFilter(termFilter("level", 1 + randomInt(numLevels - 1)))
+ .execute().actionGet();
+ assertMatchCount(response, numQueriesPerLevel);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo(size));
+ }
+ }
+ }
+
+ @Test
+ public void testPercolateScoreAndSorting() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 1)
+ .build())
+ .execute().actionGet();
+ ensureGreen();
+
+ // Add a dummy doc, that shouldn't never interfere with percolate operations.
+ client().prepareIndex("my-index", "my-type", "1").setSource("field", "value").execute().actionGet();
+
+ Map<Integer, NavigableSet<Integer>> controlMap = new HashMap<Integer, NavigableSet<Integer>>();
+ long numQueries = randomIntBetween(100, 250);
+ logger.info("--> register " + numQueries + " queries");
+ for (int i = 0; i < numQueries; i++) {
+ int value = randomInt(10);
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", i).field("field1", value).endObject())
+ .execute().actionGet();
+ if (!controlMap.containsKey(value)) {
+ controlMap.put(value, new TreeSet<Integer>());
+ }
+ controlMap.get(value).add(i);
+ }
+ List<Integer> usedValues = new ArrayList<Integer>(controlMap.keySet());
+ refresh();
+
+ // Only retrieve the score
+ int runs = randomInt(27);
+ for (int i = 0; i < runs; i++) {
+ int size = randomIntBetween(1, 50);
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setScore(true)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, numQueries);
+ assertThat(response.getMatches().length, equalTo(size));
+ for (int j = 0; j < response.getMatches().length; j++) {
+ String id = response.getMatches()[j].getId().string();
+ assertThat(Integer.valueOf(id), equalTo((int) response.getMatches()[j].getScore()));
+ }
+ }
+
+ // Sort the queries by the score
+ for (int i = 0; i < runs; i++) {
+ int size = randomIntBetween(1, 10);
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, numQueries);
+ assertThat(response.getMatches().length, equalTo(size));
+
+ int expectedId = (int) (numQueries - 1);
+ for (PercolateResponse.Match match : response) {
+ assertThat(match.getId().string(), equalTo(Integer.toString(expectedId)));
+ assertThat(match.getScore(), equalTo((float) expectedId));
+ assertThat(match.getIndex().string(), equalTo("my-index"));
+ expectedId--;
+ }
+ }
+
+
+ for (int i = 0; i < runs; i++) {
+ int value = usedValues.get(randomInt(usedValues.size() - 1));
+ NavigableSet<Integer> levels = controlMap.get(value);
+ int size = randomIntBetween(1, levels.size());
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchQuery("field1", value), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+
+ assertMatchCount(response, levels.size());
+ assertThat(response.getMatches().length, equalTo(Math.min(levels.size(), size)));
+ Iterator<Integer> levelIterator = levels.descendingIterator();
+ for (PercolateResponse.Match match : response) {
+ int controlLevel = levelIterator.next();
+ assertThat(match.getId().string(), equalTo(Integer.toString(controlLevel)));
+ assertThat(match.getScore(), equalTo((float) controlLevel));
+ assertThat(match.getIndex().string(), equalTo("my-index"));
+ }
+ }
+ }
+
+ @Test
+ public void testPercolateSortingWithNoSize() throws Exception {
+ client().admin().indices().prepareCreate("my-index").execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject())
+ .execute().actionGet();
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 2).endObject())
+ .execute().actionGet();
+ refresh();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches()[0].getId().string(), equalTo("2"));
+ assertThat(response.getMatches()[0].getScore(), equalTo(2f));
+ assertThat(response.getMatches()[1].getId().string(), equalTo("1"));
+ assertThat(response.getMatches()[1].getScore(), equalTo(1f));
+
+ response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertThat(response.getCount(), equalTo(0l));
+ assertThat(response.getSuccessfulShards(), equalTo(3));
+ assertThat(response.getShardFailures().length, equalTo(2));
+ assertThat(response.getShardFailures()[0].status().getStatus(), equalTo(400));
+ assertThat(response.getShardFailures()[0].reason(), containsString("Can't sort if size isn't specified"));
+ assertThat(response.getShardFailures()[1].status().getStatus(), equalTo(400));
+ assertThat(response.getShardFailures()[1].reason(), containsString("Can't sort if size isn't specified"));
+ }
+
+ @Test
+ public void testPercolateSorting_unsupportedField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", "field", "type=string")
+ .addMapping(PercolatorService.TYPE_NAME, "level", "type=integer", "query", "type=object,enabled=false")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject())
+ .get();
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 2).endObject())
+ .get();
+ refresh();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .addSort(SortBuilders.fieldSort("level"))
+ .get();
+
+ assertThat(response.getShardFailures().length, equalTo(5));
+ assertThat(response.getShardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(response.getShardFailures()[0].reason(), containsString("Only _score desc is supported"));
+ }
+
+ @Test
+ public void testPercolateOnEmptyIndex() throws Exception {
+ client().admin().indices().prepareCreate("my-index").execute().actionGet();
+ ensureGreen();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ }
+
+ @Test
+ public void testPercolateNotEmptyIndexButNoRefresh() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject())
+ .execute().actionGet();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ }
+
+ @Test
+ public void testPercolatorWithHighlighting() throws Exception {
+ Client client = client();
+ client.admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+
+ if (randomBoolean()) {
+ // FVH HL
+ client.admin().indices().preparePutMapping("test").setType("type")
+ .setSource(
+ jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("store", randomBoolean())
+ .field("term_vector", "with_positions_offsets").endObject()
+ .endObject()
+ .endObject().endObject()
+ ).get();
+ } else if (randomBoolean()) {
+ // plain hl with stored fields
+ client.admin().indices().preparePutMapping("test").setType("type")
+ .setSource(
+ jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("store", true).endObject()
+ .endObject()
+ .endObject().endObject()
+ ).get();
+ } else if (randomBoolean()) {
+ // positions hl
+ client.admin().indices().preparePutMapping("test").setType("type")
+ .setSource(
+ jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string")
+ .field("index_options", "offsets")
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ ).get();
+ }
+
+ logger.info("--> register a queries");
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "brown fox")).endObject())
+ .execute().actionGet();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "lazy dog")).endObject())
+ .execute().actionGet();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "jumps")).endObject())
+ .execute().actionGet();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "dog")).endObject())
+ .execute().actionGet();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "5")
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "fox")).endObject())
+ .execute().actionGet();
+
+ logger.info("--> Percolate doc with field1=The quick brown fox jumps over the lazy dog");
+ PercolateResponse response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ PercolateResponse.Match[] matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ // Anything with percolate query isn't realtime
+ client.admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Query percolate doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(matchAllQuery())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ logger.info("--> Query percolate with score for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setScore(true)
+ .execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setSortByScore(true)
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1").highlightQuery(QueryBuilders.matchQuery("field1", "jumps")))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setSortByScore(true)
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+
+ // Highlighting an existing doc
+ client.prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject())
+ .get();
+
+ logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setSortByScore(true)
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testDeletePercolatorType() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test1"));
+ assertAcked(client().admin().indices().prepareCreate("test2"));
+
+ client().prepareIndex("test1", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test2", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test1", "test2").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ for (Client client : cluster()) {
+ GetMappingsResponse getMappingsResponse = client.admin().indices().prepareGetMappings("test1", "test2").get();
+ boolean hasPercolatorType = getMappingsResponse.getMappings().get("test1").containsKey(PercolatorService.TYPE_NAME);
+ if (!hasPercolatorType) {
+ return false;
+ }
+
+ if (!getMappingsResponse.getMappings().get("test2").containsKey(PercolatorService.TYPE_NAME)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ });
+
+ assertAcked(client().admin().indices().prepareDeleteMapping("test1").setType(PercolatorService.TYPE_NAME));
+ response = client().preparePercolate()
+ .setIndices("test1", "test2").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+
+ assertAcked(client().admin().indices().prepareDeleteMapping("test2").setType(PercolatorService.TYPE_NAME));
+ // Percolate api should return 0 matches, because all docs in _percolate type have been removed.
+ response = client().preparePercolate()
+ .setIndices("test1", "test2").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ }
+
+ public static String[] convertFromTextArray(PercolateResponse.Match[] matches, String index) {
+ if (matches.length == 0) {
+ return Strings.EMPTY_ARRAY;
+ }
+ String[] strings = new String[matches.length];
+ for (int i = 0; i < matches.length; i++) {
+ assertEquals(index, matches[i].getIndex().string());
+ strings[i] = matches[i].getId().string();
+ }
+ return strings;
+ }
+
+ @Test
+ public void percolateNonMatchingConstantScoreQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test"));
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject()
+ .field("query", QueryBuilders.constantScoreQuery(FilterBuilders.andFilter(
+ FilterBuilders.queryFilter(QueryBuilders.queryString("root")),
+ FilterBuilders.termFilter("message", "tree"))))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("doc")
+ .setSource(jsonBuilder().startObject()
+ .startObject("doc").field("message", "A new bonsai tree ").endObject()
+ .endObject())
+ .execute().actionGet();
+ assertThat(percolate.getFailedShards(), equalTo(0));
+ assertMatchCount(percolate, 0l);
+ }
+
+ @Test
+ public void testPercolationWithDynamicTemplates() throws Exception {
+ assertAcked(prepareCreate("idx").addMapping("type", jsonBuilder().startObject().startObject("type")
+ .field("dynamic", false)
+ .startObject("properties")
+ .startObject("custom")
+ .field("dynamic", true)
+ .field("type", "object")
+ .field("incude_in_all", false)
+ .endObject()
+ .endObject()
+ .startArray("dynamic_template")
+ .startObject()
+ .startObject("custom_fields")
+ .field("path_match", "custom.*")
+ .startObject("mapping")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().endObject()));
+
+ client().prepareIndex("idx", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryString("color:red")).endObject())
+ .get();
+ client().prepareIndex("idx", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryString("color:blue")).endObject())
+ .get();
+
+ PercolateResponse percolateResponse = client().preparePercolate().setDocumentType("type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(jsonBuilder().startObject().startObject("custom").field("color", "blue").endObject().endObject()))
+ .get();
+
+ assertMatchCount(percolateResponse, 0l);
+ assertThat(percolateResponse.getMatches(), arrayWithSize(0));
+
+ // wait until the mapping change has propagated from the percolate request
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ PendingClusterTasksResponse pendingTasks = client().admin().cluster().preparePendingClusterTasks().get();
+ return pendingTasks.pendingTasks().isEmpty();
+ }
+ });
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+
+ // The previous percolate request introduced the custom.color field, so now we register the query again
+ // and the field name `color` will be resolved to `custom.color` field in mapping via smart field mapping resolving.
+ client().prepareIndex("idx", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryString("color:blue")).endObject())
+ .get();
+
+ // The second request will yield a match, since the query during the proper field during parsing.
+ percolateResponse = client().preparePercolate().setDocumentType("type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(jsonBuilder().startObject().startObject("custom").field("color", "blue").endObject().endObject()))
+ .get();
+
+ assertMatchCount(percolateResponse, 1l);
+ assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("2"));
+ }
+
+ @Test
+ public void testUpdateMappingDynamicallyWhilePercolating() throws Exception {
+ createIndex("test");
+ ensureSearchable();
+
+ // percolation source
+ XContentBuilder percolateDocumentSource = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject().endObject();
+
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(percolateDocumentSource).execute().actionGet();
+ assertAllSuccessful(response);
+ assertMatchCount(response, 0l);
+ assertThat(response.getMatches(), arrayWithSize(0));
+
+ // wait until the mapping change has propagated
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ PendingClusterTasksResponse pendingTasks = client().admin().cluster().preparePendingClusterTasks().get();
+ return pendingTasks.pendingTasks().isEmpty();
+ }
+ });
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+
+ GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ assertThat(mappingsResponse.getMappings().get("test"), notNullValue());
+ assertThat(mappingsResponse.getMappings().get("test").get("type1"), notNullValue());
+ assertThat(mappingsResponse.getMappings().get("test").get("type1").getSourceAsMap().isEmpty(), is(false));
+ Map<String, Object> properties = (Map<String, Object>) mappingsResponse.getMappings().get("test").get("type1").getSourceAsMap().get("properties");
+ assertThat(((Map<String, String>) properties.get("field1")).get("type"), equalTo("long"));
+ assertThat(((Map<String, String>) properties.get("field2")).get("type"), equalTo("string"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java b/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java
new file mode 100644
index 0000000..de63c3c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java
@@ -0,0 +1,443 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.percolator;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
+import org.elasticsearch.action.percolate.MultiPercolateResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testRestartNodePercolator1() throws Exception {
+ Settings settings = settingsBuilder()
+ .put(super.indexSettings())
+ .put("gateway.type", "local")
+ .build();
+ cluster().startNode(settings);
+ client().admin().indices().prepareCreate("test").setSettings(
+ settingsBuilder().put("index.number_of_shards", 1).put()
+ ).execute().actionGet();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+
+ cluster().rollingRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+ @Test
+ @Slow
+ public void testRestartNodePercolator2() throws Exception {
+ Settings settings = settingsBuilder()
+ .put(super.indexSettings())
+ .put("gateway.type", "local")
+ .build();
+ cluster().startNode(settings);
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+
+ cluster().rollingRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+
+ DeleteIndexResponse actionGet = client().admin().indices().prepareDelete("test").execute().actionGet();
+ assertThat(actionGet.isAcknowledged(), equalTo(true));
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 0l);
+ assertThat(percolate.getMatches(), emptyArray());
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+ @Test
+ @Slow
+ @TestLogging("index.percolator:TRACE,percolator:TRACE")
+ public void testLoadingPercolateQueriesDuringCloseAndOpen() throws Exception {
+ Settings settings = settingsBuilder()
+ .put(super.indexSettings())
+ .put("gateway.type", "local")
+ .build();
+ logger.info("--> Starting 2 nodes");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ client().admin().indices().prepareDelete("_all").execute().actionGet();
+ ensureGreen();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Add dummy docs");
+ client().prepareIndex("test", "type1", "1").setSource("field1", 0).execute().actionGet();
+ client().prepareIndex("test", "type2", "1").setSource("field1", "0").execute().actionGet();
+
+ logger.info("--> register a queries");
+ for (int i = 1; i <= 100; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("query", rangeQuery("field1").from(0).to(i))
+ // The type must be set now, because two fields with the same name exist in different types.
+ // Setting the type to `type1`, makes sure that the range query gets parsed to a Lucene NumericRangeQuery.
+ .field("type", "type1")
+ .endObject())
+ .execute().actionGet();
+ }
+
+ logger.info("--> Percolate doc with field1=95");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", 95).endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 6l);
+ assertThat(response.getMatches(), arrayWithSize(6));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("95", "96", "97", "98", "99", "100"));
+
+ logger.info("--> Close and open index to trigger percolate queries loading...");
+ assertAcked(client().admin().indices().prepareClose("test"));
+ assertAcked(client().admin().indices().prepareOpen("test"));
+ ensureGreen();
+
+ logger.info("--> Percolate doc with field1=100");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", 100).endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(response.getMatches()[0].getId().string(), equalTo("100"));
+ }
+
+ @Test
+ @Slow
+ public void testSinglePercolator_recovery() throws Exception {
+ percolatorRecovery(false);
+ }
+
+ @Test
+ @Slow
+ public void testMultiPercolator_recovery() throws Exception {
+ percolatorRecovery(true);
+ }
+
+ // 3 nodes, 2 primary + 2 replicas per primary, so each node should have a copy of the data.
+ // We only start and stop nodes 2 and 3, so all requests should succeed and never be partial.
+ private void percolatorRecovery(final boolean multiPercolate) throws Exception {
+ logger.info("--> ensuring exactly 2 nodes");
+ cluster().ensureAtLeastNumNodes(2);
+ cluster().ensureAtMostNumNodes(2);
+ logger.info("--> Adding 3th node");
+ cluster().startNode(settingsBuilder().put("node.stay", true));
+
+ client().admin().indices().prepareDelete("_all").execute().actionGet();
+ ensureGreen();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 2)
+ )
+ .execute().actionGet();
+ ensureGreen();
+
+ final Client client = cluster().client(new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings input) {
+ return input.getAsBoolean("node.stay", false);
+ }
+ });
+ final int numQueries = randomIntBetween(50, 100);
+ logger.info("--> register a queries");
+ for (int i = 0; i < numQueries; i++) {
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ client.prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("field", "a"))
+ .execute().actionGet();
+
+ final AtomicBoolean run = new AtomicBoolean(true);
+ final CountDownLatch done = new CountDownLatch(1);
+ final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder doc = jsonBuilder().startObject().field("field", "a").endObject();
+ while (run.get()) {
+ NodesInfoResponse nodesInfoResponse = client.admin().cluster().prepareNodesInfo()
+ .execute().actionGet();
+ String node2Id = null;
+ String node3Id = null;
+ for (NodeInfo nodeInfo : nodesInfoResponse) {
+ if ("node2".equals(nodeInfo.getNode().getName())) {
+ node2Id = nodeInfo.getNode().id();
+ } else if ("node3".equals(nodeInfo.getNode().getName())) {
+ node3Id = nodeInfo.getNode().id();
+ }
+ }
+
+ String preference;
+ if (node2Id == null && node3Id == null) {
+ preference = "_local";
+ } else if (node2Id == null || node3Id == null) {
+ if (node2Id != null) {
+ preference = "_prefer_node:" + node2Id;
+ } else {
+ preference = "_prefer_node:" + node3Id;
+ }
+ } else {
+ preference = "_prefer_node:" + (randomBoolean() ? node2Id : node3Id);
+ }
+
+ if (multiPercolate) {
+ MultiPercolateRequestBuilder builder = client
+ .prepareMultiPercolate();
+ int numPercolateRequest = randomIntBetween(50, 100);
+
+ for (int i = 0; i < numPercolateRequest; i++) {
+ if (randomBoolean()) {
+ builder.add(
+ client.preparePercolate()
+ .setPreference(preference)
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type")
+ );
+ } else {
+ builder.add(
+ client.preparePercolate()
+ .setPreference(preference)
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(doc)));
+ }
+ }
+
+ MultiPercolateResponse response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertNoFailures(item.getResponse());
+ assertThat(item.getResponse().getSuccessfulShards(), equalTo(item.getResponse().getTotalShards()));
+ assertThat(item.getResponse().getCount(), equalTo((long) numQueries));
+ assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
+ }
+ } else {
+ PercolateResponse response;
+ if (randomBoolean()) {
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(doc))
+ .setPreference(preference)
+ .execute().actionGet();
+ } else {
+ response = client.preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type")
+ .setPreference(preference)
+ .execute().actionGet();
+ }
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getCount(), equalTo((long) numQueries));
+ assertThat(response.getMatches().length, equalTo(numQueries));
+ }
+ }
+ } catch (Throwable t) {
+ logger.info("Error in percolate thread...", t);
+ run.set(false);
+ error.set(t);
+ } finally {
+ done.countDown();
+ }
+ }
+ };
+ new Thread(r).start();
+
+ Predicate<Settings> nodePredicate = new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings input) {
+ return !input.getAsBoolean("node.stay", false);
+ }
+ };
+ try {
+ // 1 index, 2 primaries, 2 replicas per primary
+ for (int i = 0; i < 4; i++) {
+ cluster().stopRandomNode(nodePredicate);
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForYellowStatus()
+ .setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
+ .execute().actionGet();
+ assertThat(error.get(), nullValue());
+ cluster().stopRandomNode(nodePredicate);
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForYellowStatus()
+ .setWaitForActiveShards(2) // 1 node, so 2 shards (2 primaries, 0 replicas)
+ .execute().actionGet();
+ assertThat(error.get(), nullValue());
+ cluster().startNode();
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForYellowStatus()
+ .setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
+ .execute().actionGet();
+ assertThat(error.get(), nullValue());
+ cluster().startNode();
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForGreenStatus() // We're confirm the shard settings, so green instead of yellow
+ .setWaitForActiveShards(6) // 3 nodes, so 6 shards (2 primaries, 4 replicas)
+ .execute().actionGet();
+ assertThat(error.get(), nullValue());
+ }
+ } finally {
+ run.set(false);
+ }
+ done.await();
+ assertThat(error.get(), nullValue());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java b/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java
new file mode 100644
index 0000000..caa9846
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.percolator;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.AlreadyExpiredException;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope = Scope.TEST)
+public class TTLPercolatorTests extends ElasticsearchIntegrationTest {
+
+ private static final long PURGE_INTERVAL = 200;
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("indices.ttl.interval", PURGE_INTERVAL)
+ .build();
+ }
+
+ @Test
+ public void testPercolatingWithTimeToLive() throws Exception {
+ final Client client = client();
+ ensureGreen();
+
+ String precolatorMapping = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME)
+ .startObject("_ttl").field("enabled", true).endObject()
+ .startObject("_timestamp").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ String typeMapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_ttl").field("enabled", true).endObject()
+ .startObject("_timestamp").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ client.admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping(PercolatorService.TYPE_NAME, precolatorMapping)
+ .addMapping("type1", typeMapping)
+ .execute().actionGet();
+ ensureGreen();
+
+ long ttl = 1500;
+ long now = System.currentTimeMillis();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "kuku").setSource(jsonBuilder()
+ .startObject()
+ .startObject("query")
+ .startObject("term")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ .endObject()
+ ).setRefresh(true).setTTL(ttl).execute().actionGet();
+
+ IndicesStatsResponse response = client.admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ assertThat(response.getIndices().get("test").getTotal().getIndexing().getTotal().getIndexCount(), equalTo(2l));
+
+ PercolateResponse percolateResponse = client.preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ assertNoFailures(percolateResponse);
+ if (percolateResponse.getMatches().length == 0) {
+ // OK, ttl + purgeInterval has passed (slow machine or many other tests were running at the same time
+ GetResponse getResponse = client.prepareGet("test", PercolatorService.TYPE_NAME, "kuku").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ response = client.admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ long currentDeleteCount = response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount();
+ assertThat(currentDeleteCount, equalTo(2l));
+ return;
+ }
+
+ assertThat(convertFromTextArray(percolateResponse.getMatches(), "test"), arrayContaining("kuku"));
+ long timeSpent = System.currentTimeMillis() - now;
+ long waitTime = ttl + PURGE_INTERVAL - timeSpent;
+ if (waitTime >= 0) {
+ Thread.sleep(waitTime); // Doesn't make sense to check the deleteCount before ttl has expired
+ }
+
+ // See comment in SimpleTTLTests
+ logger.info("Checking if the ttl purger has run");
+ long currentDeleteCount;
+ do {
+ response = client.admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ // This returns the number of delete operations stats (not Lucene delete count)
+ currentDeleteCount = response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount();
+ }
+ while (currentDeleteCount < 2); // TTL deletes one doc, but it is indexed in the primary shard and replica shard.
+ assertThat(currentDeleteCount, equalTo(2l));
+
+ percolateResponse = client.preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ assertMatchCount(percolateResponse, 0l);
+ assertThat(percolateResponse.getMatches(), emptyArray());
+ }
+
+
+ @Test
+ public void testEnsureTTLDoesNotCreateIndex() throws IOException, InterruptedException {
+ ensureGreen();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
+ .put("indices.ttl.interval", 60) // 60 sec
+ .build()).get();
+
+ String typeMapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1))
+ .addMapping("type1", typeMapping)
+ .execute().actionGet();
+ ensureGreen();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
+ .put("indices.ttl.interval", 1) // 60 sec
+ .build()).get();
+
+ for (int i = 0; i < 100; i++) {
+ logger.debug("index doc {} ", i);
+ try {
+ client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .startObject("query")
+ .startObject("term")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ .endObject()
+ ).setTTL(randomIntBetween(1, 500)).execute().actionGet();
+ } catch (MapperParsingException e) {
+ logger.info("failed indexing {}", i, e);
+ // if we are unlucky the TTL is so small that we see the expiry date is already in the past when
+ // we parse the doc ignore those...
+ assertThat(e.getCause(), Matchers.instanceOf(AlreadyExpiredException.class));
+ }
+
+ }
+ refresh();
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setIndexing(true).get();
+ logger.debug("delete count [{}]", indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount());
+ // TTL deletes one doc, but it is indexed in the primary shard and replica shards
+ return indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount() != 0;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+ cluster().wipeIndices("test");
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", typeMapping)
+ .execute().actionGet();
+
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/plugin/PluginManagerTests.java b/src/test/java/org/elasticsearch/plugin/PluginManagerTests.java
new file mode 100644
index 0000000..1fbb0c6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/PluginManagerTests.java
@@ -0,0 +1,318 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.plugins.PluginManager;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.helper.HttpClient;
+import org.elasticsearch.rest.helper.HttpClientResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.junit.annotations.Network;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URL;
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ClusterScope(scope = Scope.TEST, numNodes = 0, transportClientRatio = 0.0)
+public class PluginManagerTests extends ElasticsearchIntegrationTest {
+ private static final Settings SETTINGS = ImmutableSettings.settingsBuilder()
+ .put("discovery.zen.ping.multicast.enabled", false)
+ .put("force.http.enabled", true)
+ .build();
+ private static final String PLUGIN_DIR = "plugins";
+
+ @After
+ public void afterTest() {
+ deletePluginsFolder();
+ }
+
+ @Before
+ public void beforeTest() {
+ deletePluginsFolder();
+ }
+
+ @Test
+ public void testLocalPluginInstallSingleFolder() throws Exception {
+ //When we have only a folder in top-level (no files either) we remove that folder while extracting
+ String pluginName = "plugin-test";
+ URL url = PluginManagerTests.class.getResource("plugin_single_folder.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+
+ cluster().startNode(SETTINGS);
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test
+ public void testLocalPluginInstallSiteFolder() throws Exception {
+ //When we have only a folder in top-level (no files either) but it's called _site, we make it work
+ //we can either remove the folder while extracting and then re-add it manually or just leave it as it is
+ String pluginName = "plugin-test";
+ URL url = PluginManagerTests.class.getResource("plugin_folder_site.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+
+ String nodeName = cluster().startNode(SETTINGS);
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test
+ public void testLocalPluginWithoutFolders() throws Exception {
+ //When we don't have folders at all in the top-level, but only files, we don't modify anything
+ String pluginName = "plugin-test";
+ URL url = PluginManagerTests.class.getResource("plugin_without_folders.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+
+ cluster().startNode(SETTINGS);
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test
+ public void testLocalPluginFolderAndFile() throws Exception {
+ //When we have a single top-level folder but also files in the top-level, we don't modify anything
+ String pluginName = "plugin-test";
+ URL url = PluginManagerTests.class.getResource("plugin_folder_file.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+
+ cluster().startNode(SETTINGS);
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testSitePluginWithSourceThrows() throws Exception {
+ String pluginName = "plugin-with-source";
+ URL url = PluginManagerTests.class.getResource("plugin_with_sourcefiles.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+ }
+
+ /**
+ * We build a plugin manager instance which wait only for 30 seconds before
+ * raising an ElasticsearchTimeoutException
+ */
+ private static PluginManager pluginManager(String pluginUrl) {
+ Tuple<Settings, Environment> initialSettings = InternalSettingsPreparer.prepareSettings(
+ ImmutableSettings.settingsBuilder().build(), false);
+ if (!initialSettings.v2().pluginsFile().exists()) {
+ FileSystemUtils.mkdirs(initialSettings.v2().pluginsFile());
+ }
+ return new PluginManager(initialSettings.v2(), pluginUrl, PluginManager.OutputMode.SILENT, TimeValue.timeValueSeconds(30));
+ }
+
+ private static void downloadAndExtract(String pluginName, String pluginUrl) throws IOException {
+ pluginManager(pluginUrl).downloadAndExtract(pluginName);
+ }
+
+ private void assertPluginLoaded(String pluginName) {
+ NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().clear().setPlugin(true).get();
+ assertThat(nodesInfoResponse.getNodes().length, equalTo(1));
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos(), notNullValue());
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos().size(), equalTo(1));
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos().get(0).getName(), equalTo(pluginName));
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos().get(0).isSite(), equalTo(true));
+ }
+
+ private void assertPluginAvailable(String pluginName) throws InterruptedException {
+ HttpServerTransport httpServerTransport = cluster().getInstance(HttpServerTransport.class);
+ final HttpClient httpClient = new HttpClient(httpServerTransport.boundAddress().publishAddress());
+ logger.info("--> tested http address [{}]", httpServerTransport.info().getAddress());
+
+ //checking that the http connector is working properly
+ // We will try it for some seconds as it could happen that the REST interface is not yet fully started
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ HttpClientResponse response = httpClient.request("");
+ if (response.errorCode() != RestStatus.OK.getStatus()) {
+ // We want to trace what's going on here before failing the test
+ logger.info("--> error caught [{}], headers [{}]", response.errorCode(), response.getHeaders());
+ logger.info("--> cluster state [{}]", cluster().clusterService().state());
+ return false;
+ }
+ return true;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+
+
+ //checking now that the plugin is available
+ HttpClientResponse response = httpClient.request("_plugin/" + pluginName + "/");
+ assertThat(response, notNullValue());
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ }
+
+ @Test
+ public void testListInstalledEmpty() throws IOException {
+ File[] plugins = pluginManager(null).getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(0));
+ }
+
+ @Test(expected = IOException.class)
+ public void testInstallPluginNull() throws IOException {
+ pluginManager(null).downloadAndExtract("");
+ }
+
+
+ @Test
+ public void testInstallPlugin() throws IOException {
+ PluginManager pluginManager = pluginManager("file://".concat(PluginManagerTests.class.getResource("plugin_with_classfile.zip").getFile()));
+
+ pluginManager.downloadAndExtract("plugin");
+ File[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(1));
+ }
+
+ @Test
+ public void testInstallSitePlugin() throws IOException {
+ PluginManager pluginManager = pluginManager("file://".concat(PluginManagerTests.class.getResource("plugin_without_folders.zip").getFile()));
+
+ pluginManager.downloadAndExtract("plugin-site");
+ File[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(1));
+
+ // We want to check that Plugin Manager moves content to _site
+ String pluginDir = PLUGIN_DIR.concat("/plugin-site/_site");
+ assertThat(FileSystemUtils.exists(new File(pluginDir)), is(true));
+ }
+
+
+ private void singlePluginInstallAndRemove(String pluginShortName, String pluginCoordinates) throws IOException {
+ logger.info("--> trying to download and install [{}]", pluginShortName);
+ PluginManager pluginManager = pluginManager(pluginCoordinates);
+ try {
+ pluginManager.downloadAndExtract(pluginShortName);
+ File[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(1));
+
+ // We remove it
+ pluginManager.removePlugin(pluginShortName);
+ plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(0));
+ } catch (IOException e) {
+ logger.warn("--> IOException raised while downloading plugin [{}]. Skipping test.", e, pluginShortName);
+ } catch (ElasticsearchTimeoutException e) {
+ logger.warn("--> timeout exception raised while downloading plugin [{}]. Skipping test.", pluginShortName);
+ }
+ }
+
+ /**
+ * We are ignoring by default these tests as they require to have an internet access
+ * To activate the test, use -Dtests.network=true
+ * We test regular form: username/reponame/version
+ * It should find it in download.elasticsearch.org service
+ */
+ @Test
+ @Network
+ public void testInstallPluginWithElasticsearchDownloadService() throws IOException {
+ assumeTrue(isDownloadServiceWorking("http://download.elasticsearch.org/", "elasticsearch/ci-test.txt"));
+ singlePluginInstallAndRemove("elasticsearch/elasticsearch-transport-thrift/1.5.0", null);
+ }
+
+ /**
+ * We are ignoring by default these tests as they require to have an internet access
+ * To activate the test, use -Dtests.network=true
+ * We test regular form: groupId/artifactId/version
+ * It should find it in maven central service
+ */
+ @Test
+ @Network
+ public void testInstallPluginWithMavenCentral() throws IOException {
+ assumeTrue(isDownloadServiceWorking("http://search.maven.org/", "/"));
+ singlePluginInstallAndRemove("org.elasticsearch/elasticsearch-transport-thrift/1.5.0", null);
+ }
+
+ /**
+ * We are ignoring by default these tests as they require to have an internet access
+ * To activate the test, use -Dtests.network=true
+ * We test site plugins from github: userName/repoName
+ * It should find it on github
+ */
+ @Test
+ @Network
+ public void testInstallPluginWithGithub() throws IOException {
+ assumeTrue(isDownloadServiceWorking("https://github.com/", "/"));
+ singlePluginInstallAndRemove("elasticsearch/kibana", null);
+ }
+
+ private boolean isDownloadServiceWorking(String url, String resource) {
+ HttpClient client = new HttpClient(url);
+ try {
+ if (client.request(resource).errorCode() != 200) {
+ logger.warn("[{}{}] download service is not working. Disabling current test.", url, resource);
+ return false;
+ }
+ return true;
+ } catch (Throwable t) {
+ logger.warn("[{}{}] download service is not working. Disabling current test.", url, resource);
+ }
+ return false;
+ }
+
+ private void deletePluginsFolder() {
+ FileSystemUtils.deleteRecursively(new File(PLUGIN_DIR));
+ }
+
+ @Test
+ public void testRemovePlugin() throws Exception {
+ // We want to remove plugin with plugin short name
+ singlePluginInstallAndRemove("plugintest", "file://".concat(PluginManagerTests.class.getResource("plugin_without_folders.zip").getFile()));
+
+ // We want to remove plugin with groupid/artifactid/version form
+ singlePluginInstallAndRemove("groupid/plugintest/1.0.0", "file://".concat(PluginManagerTests.class.getResource("plugin_without_folders.zip").getFile()));
+
+ // We want to remove plugin with groupid/artifactid form
+ singlePluginInstallAndRemove("groupid/plugintest", "file://".concat(PluginManagerTests.class.getResource("plugin_without_folders.zip").getFile()));
+ }
+
+ @Test(expected = ElasticsearchIllegalArgumentException.class)
+ public void testRemovePluginWithURLForm() throws Exception {
+ PluginManager pluginManager = pluginManager(null);
+ pluginManager.removePlugin("file://whatever");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/plugin/ResponseHeaderPluginTests.java b/src/test/java/org/elasticsearch/plugin/ResponseHeaderPluginTests.java
new file mode 100644
index 0000000..a2d20da
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/ResponseHeaderPluginTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.plugin.responseheader.TestResponseHeaderPlugin;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.helper.HttpClient;
+import org.elasticsearch.rest.helper.HttpClientResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Test a rest action that sets special response headers
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 1)
+public class ResponseHeaderPluginTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder()
+ .put("plugin.types", TestResponseHeaderPlugin.class.getName())
+ .put("force.http.enabled", true)
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Test
+ public void testThatSettingHeadersWorks() throws Exception {
+ ensureGreen();
+ HttpClientResponse response = httpClient().request("/_protected");
+ assertThat(response.errorCode(), equalTo(RestStatus.UNAUTHORIZED.getStatus()));
+ assertThat(response.getHeader("Secret"), equalTo("required"));
+
+ Map<String, String> headers = Maps.newHashMap();
+ headers.put("Secret", "password");
+ HttpClientResponse authResponse = httpClient().request("GET", "_protected", headers);
+ assertThat(authResponse.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(authResponse.getHeader("Secret"), equalTo("granted"));
+ }
+
+ private HttpClient httpClient() {
+ HttpServerTransport httpServerTransport = cluster().getInstance(HttpServerTransport.class);
+ return new HttpClient(httpServerTransport.boundAddress().publishAddress());
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/plugin/SitePluginTests.java b/src/test/java/org/elasticsearch/plugin/SitePluginTests.java
new file mode 100644
index 0000000..465a355
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/SitePluginTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.helper.HttpClient;
+import org.elasticsearch.rest.helper.HttpClientResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.File;
+import java.net.URISyntaxException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * We want to test site plugins
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 1)
+public class SitePluginTests extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ try {
+ File pluginDir = new File(SitePluginTests.class.getResource("/org/elasticsearch/plugin").toURI());
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("path.plugins", pluginDir.getAbsolutePath())
+ .put("force.http.enabled", true)
+ .build();
+ } catch (URISyntaxException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ public HttpClient httpClient(String id) {
+ HttpServerTransport httpServerTransport = cluster().getInstance(HttpServerTransport.class);
+ return new HttpClient(httpServerTransport.boundAddress().publishAddress());
+ }
+
+ @Test
+ public void testRedirectSitePlugin() throws Exception {
+ // We use an HTTP Client to test redirection
+ HttpClientResponse response = httpClient("test").request("/_plugin/dummy");
+ assertThat(response.errorCode(), equalTo(RestStatus.MOVED_PERMANENTLY.getStatus()));
+ assertThat(response.response(), containsString("/_plugin/dummy/"));
+
+ // We test the real URL
+ response = httpClient("test").request("/_plugin/dummy/");
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(response.response(), containsString("<title>Dummy Site Plugin</title>"));
+ }
+
+ /**
+ * Test direct access to an existing file (index.html)
+ */
+ @Test
+ public void testAnyPage() throws Exception {
+ HttpClientResponse response = httpClient("test").request("/_plugin/dummy/index.html");
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(response.response(), containsString("<title>Dummy Site Plugin</title>"));
+ }
+
+ /**
+ * Test case for #4845: https://github.com/elasticsearch/elasticsearch/issues/4845
+ * Serving _site plugins do not pick up on index.html for sub directories
+ */
+ @Test
+ public void testWelcomePageInSubDirs() throws Exception {
+ HttpClientResponse response = httpClient("test").request("/_plugin/subdir/dir/");
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(response.response(), containsString("<title>Dummy Site Plugin (subdir)</title>"));
+
+ response = httpClient("test").request("/_plugin/subdir/dir_without_index/");
+ assertThat(response.errorCode(), equalTo(RestStatus.FORBIDDEN.getStatus()));
+
+ response = httpClient("test").request("/_plugin/subdir/dir_without_index/page.html");
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(response.response(), containsString("<title>Dummy Site Plugin (page)</title>"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderPlugin.java b/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderPlugin.java
new file mode 100644
index 0000000..86304e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderPlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugin.responseheader;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.rest.RestModule;
+
+public class TestResponseHeaderPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-custom-header";
+ }
+
+ @Override
+ public String description() {
+ return "test-plugin-custom-header-desc";
+ }
+
+ public void onModule(RestModule restModule) {
+ restModule.addRestAction(TestResponseHeaderRestAction.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderRestAction.java b/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderRestAction.java
new file mode 100644
index 0000000..4d1c9eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderRestAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.responseheader;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+public class TestResponseHeaderRestAction extends BaseRestHandler {
+
+ @Inject
+ public TestResponseHeaderRestAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.GET, "/_protected", this);
+ }
+
+ @Override
+ public void handleRequest(RestRequest request, RestChannel channel) {
+ if ("password".equals(request.header("Secret"))) {
+ RestResponse response = new StringRestResponse(RestStatus.OK, "Access granted");
+ response.addHeader("Secret", "granted");
+ channel.sendResponse(response);
+ } else {
+ RestResponse response = new StringRestResponse(RestStatus.UNAUTHORIZED, "Access denied");
+ response.addHeader("Secret", "required");
+ channel.sendResponse(response);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java b/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java
new file mode 100644
index 0000000..61b235d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0, transportClientRatio = 0.0)
+public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
+
+ protected void assertTimeout(ClusterHealthRequestBuilder requestBuilder) {
+ ClusterHealthResponse clusterHealth = requestBuilder.get();
+ if (clusterHealth.isTimedOut()) {
+ logger.info("cluster health request timed out:\n{}", clusterHealth);
+ fail("cluster health request timed out");
+ }
+ }
+
+ @Test
+ @Slow
+ @TestLogging("indices.cluster:TRACE,cluster.service:TRACE")
+ public void testFullRollingRestart() throws Exception {
+ cluster().startNode();
+ createIndex("test");
+
+ for (int i = 0; i < 1000; i++) {
+ client().prepareIndex("test", "type1", Long.toString(i))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
+ }
+ client().admin().indices().prepareFlush().execute().actionGet();
+ for (int i = 1000; i < 2000; i++) {
+ client().prepareIndex("test", "type1", Long.toString(i))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
+ }
+
+ // now start adding nodes
+ cluster().startNode();
+ cluster().startNode();
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
+
+ // now start adding nodes
+ cluster().startNode();
+ cluster().startNode();
+
+ // We now have 5 nodes
+ setMinimumMasterNodes(3);
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("5"));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
+ }
+
+ // now start shutting nodes down
+ cluster().stopRandomNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("4"));
+
+ // going down to 3 nodes. note that the min_master_node may not be in effect when we shutdown the 4th
+ // node, but that's OK as it is set to 3 before.
+ setMinimumMasterNodes(2);
+ cluster().stopRandomNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
+
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
+ }
+
+ // closing the 3rd node
+ cluster().stopRandomNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("2"));
+
+ // closing the 2nd node
+ setMinimumMasterNodes(1);
+ cluster().stopRandomNode();
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForYellowStatus().setWaitForRelocatingShards(0).setWaitForNodes("1"));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java b/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java
new file mode 100644
index 0000000..68e519b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java
@@ -0,0 +1,460 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.shard.DocsStats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.emptyIterable;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(RecoveryWhileUnderLoadTests.class);
+
+ @Test @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE")
+ @Slow
+ public void recoverWhileUnderLoadAllocateBackupsTest() throws Exception {
+ logger.info("--> creating test index ...");
+ assertAcked(prepareCreate("test", 1));
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[5];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+
+ logger.info("--> starting {} indexing threads", writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final Client client = client();
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ client.admin().indices().prepareFlush().execute().actionGet();
+ }
+ client.prepareIndex("test", "type1", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Throwable e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ waitForDocs(2000);
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> flushing the index ....");
+ // now flush, just to make sure we have some data in the index, not just translog
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+
+ logger.info("--> waiting for 4000 docs to be indexed ...");
+ waitForDocs(4000);
+ logger.info("--> 4000 docs indexed");
+
+ logger.info("--> allow 2 nodes for index [test] ...");
+ // now start another node, while we index
+ allowNodes("test", 2);
+
+ logger.info("--> waiting for GREEN health status ...");
+ // make sure the cluster state is green, and all has been recovered
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> waiting for 15000 docs to be indexed ...");
+ waitForDocs(15000);
+ logger.info("--> 15000 docs indexed");
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(5, indexCounter.get(), 10);
+ }
+
+ @Test @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE")
+ @Slow
+ public void recoverWhileUnderLoadAllocateBackupsRelocatePrimariesTest() throws Exception {
+ logger.info("--> creating test index ...");
+ assertAcked(prepareCreate("test", 1));
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[5];
+ logger.info("--> starting {} indexing threads", writers.length);
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final Client client = client();
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ long id = idGenerator.incrementAndGet();
+ client.prepareIndex("test", "type1", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Throwable e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ waitForDocs(2000);
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> flushing the index ....");
+ // now flush, just to make sure we have some data in the index, not just translog
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+
+ logger.info("--> waiting for 4000 docs to be indexed ...");
+ waitForDocs(4000);
+ logger.info("--> 4000 docs indexed");
+ logger.info("--> allow 4 nodes for index [test] ...");
+ allowNodes("test", 4);
+
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=4").execute().actionGet().isTimedOut(), equalTo(false));
+
+
+ logger.info("--> waiting for 15000 docs to be indexed ...");
+ waitForDocs(15000);
+ logger.info("--> 15000 docs indexed");
+
+ stop.set(true);
+ stopLatch.await();
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(5, indexCounter.get(), 10);
+ }
+
+ @Test @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE")
+ @Slow
+ public void recoverWhileUnderLoadWithNodeShutdown() throws Exception {
+ logger.info("--> creating test index ...");
+ assertAcked(prepareCreate("test", 2));
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[5];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+ logger.info("--> starting {} indexing threads", writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final Client client = client();
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ long id = idGenerator.incrementAndGet();
+ client.prepareIndex("test", "type1", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Throwable e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ waitForDocs(2000);
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> flushing the index ....");
+ // now flush, just to make sure we have some data in the index, not just translog
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+
+ logger.info("--> waiting for 4000 docs to be indexed ...");
+ waitForDocs(4000);
+ logger.info("--> 4000 docs indexed");
+
+ // now start more nodes, while we index
+ logger.info("--> allow 4 nodes for index [test] ...");
+ allowNodes("test", 4);
+
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=4").execute().actionGet().isTimedOut(), equalTo(false));
+
+
+ logger.info("--> waiting for 10000 docs to be indexed ...");
+ waitForDocs(15000);
+ logger.info("--> 10000 docs indexed");
+
+ // now, shutdown nodes
+ logger.info("--> allow 3 nodes for index [test] ...");
+ allowNodes("test", 3);
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=3").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> allow 2 nodes for index [test] ...");
+ allowNodes("test", 2);
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> allow 1 nodes for index [test] ...");
+ allowNodes("test", 1);
+ logger.info("--> waiting for YELLOW health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForYellowStatus().setWaitForNodes(">=1").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForYellowStatus().setWaitForNodes(">=1").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(5, indexCounter.get(), 10);
+
+ }
+
+ @Test
+ @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE,action.index:TRACE,action.support.replication:TRACE,cluster.service:DEBUG")
+ @Slow
+ public void recoverWhileRelocating() throws Exception {
+ final int numShards = between(2, 10);
+ final int numReplicas = 0;
+ cluster().ensureAtLeastNumNodes(3);
+ logger.info("--> creating test index ...");
+ int allowNodes = 2;
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_shards", numShards).put("number_of_replicas", numReplicas).build()));
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[atLeast(3)];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+ logger.info("--> starting {} indexing threads", writers.length);
+ final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final Client client = client();
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ long id = -1;
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ id = idGenerator.incrementAndGet();
+ client.prepareIndex("test", "type1", Long.toString(id) + "-" + indexerId)
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Throwable e) {
+ failures.add(e);
+ logger.warn("**** failed indexing thread {} on doc id {}", e, indexerId, id);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ final int numDocs = between(10000, 50000);
+ for (int i = 0; i < numDocs; i += between(100, 1000)) {
+ assertThat(failures, emptyIterable());
+ logger.info("--> waiting for {} docs to be indexed ...", i);
+ waitForDocs(i);
+ logger.info("--> {} docs indexed", i);
+ allowNodes = 2 / allowNodes;
+ allowNodes("test", allowNodes);
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().execute().actionGet().isTimedOut(), equalTo(false));
+ }
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ assertThat(failures, emptyIterable());
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+ logger.info("--> bump up number of replicas to 1 and allow all nodes to hold the index");
+ allowNodes("test", 3);
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.settingsBuilder().put("number_of_replicas", 1)).get());
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(numShards, indexCounter.get(), 10);
+ }
+
+ private void iterateAssertCount(final int numberOfShards, final long numberOfDocs, final int iterations) throws Exception {
+ SearchResponse[] iterationResults = new SearchResponse[iterations];
+ boolean error = false;
+ for (int i = 0; i < iterations; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setSearchType(SearchType.COUNT).setQuery(matchAllQuery()).get();
+ logSearchResponse(numberOfShards, numberOfDocs, i, searchResponse);
+ iterationResults[i] = searchResponse;
+ if (searchResponse.getHits().totalHits() != numberOfDocs) {
+ error = true;
+ }
+ }
+
+ if (error) {
+ //Printing out shards and their doc count
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().get();
+ for (ShardStats shardStats : indicesStatsResponse.getShards()) {
+ DocsStats docsStats = shardStats.getStats().docs;
+ logger.info("shard [{}] - count {}, primary {}", shardStats.getShardId(), docsStats.getCount(), shardStats.getShardRouting().primary());
+ }
+
+ //if there was an error we try to wait and see if at some point it'll get fixed
+ logger.info("--> trying to wait");
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ boolean error = false;
+ for (int i = 0; i < iterations; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setSearchType(SearchType.COUNT).setQuery(matchAllQuery()).get();
+ if (searchResponse.getHits().totalHits() != numberOfDocs) {
+ error = true;
+ }
+ }
+ return !error;
+ }
+ }, 5, TimeUnit.MINUTES), equalTo(true));
+ }
+
+ //lets now make the test fail if it was supposed to fail
+ for (int i = 0; i < iterations; i++) {
+ assertHitCount(iterationResults[i], numberOfDocs);
+ }
+ }
+
+ private void logSearchResponse(int numberOfShards, long numberOfDocs, int iteration, SearchResponse searchResponse) {
+ logger.info("iteration [{}] - successful shards: {} (expected {})", iteration, searchResponse.getSuccessfulShards(), numberOfShards);
+ logger.info("iteration [{}] - failed shards: {} (expected 0)", iteration, searchResponse.getFailedShards());
+ if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) {
+ logger.info("iteration [{}] - shard failures: {}", iteration, Arrays.toString(searchResponse.getShardFailures()));
+ }
+ logger.info("iteration [{}] - returned documents: {} (expected {})", iteration, searchResponse.getHits().totalHits(), numberOfDocs);
+ }
+
+ private void refreshAndAssert() throws InterruptedException {
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ try {
+ RefreshResponse actionGet = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet.getTotalShards() == actionGet.getSuccessfulShards();
+ } catch (Throwable e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }, 5, TimeUnit.MINUTES), equalTo(true));
+ }
+
+ private void waitForDocs(final long numDocs) throws InterruptedException {
+ final long[] lastKnownCount = {-1};
+ long lastStartCount = -1;
+ Predicate<Object> testDocs = new Predicate<Object>() {
+ public boolean apply(Object o) {
+ lastKnownCount[0] = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount();
+ logger.debug("[{}] docs visible for search. waiting for [{}]", lastKnownCount[0], numDocs);
+ return lastKnownCount[0] > numDocs;
+ }
+ };
+ // 5 minutes seems like a long time but while relocating, indexing threads can wait for up to ~1m before retrying when
+ // they first try to index into a shard which is not STARTED.
+ while (!awaitBusy(testDocs, 5, TimeUnit.MINUTES)) {
+ if (lastStartCount == lastKnownCount[0]) {
+ // we didn't make any progress
+ fail("failed to reach " + numDocs + "docs");
+ }
+ lastStartCount = lastKnownCount[0];
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/RelocationTests.java b/src/test/java/org/elasticsearch/recovery/RelocationTests.java
new file mode 100644
index 0000000..c9c24e0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/RelocationTests.java
@@ -0,0 +1,419 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.procedures.IntProcedure;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class RelocationTests extends ElasticsearchIntegrationTest {
+ private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES);
+
+
+ @Test
+ public void testSimpleRelocationNoIndexing() {
+ logger.info("--> starting [node1] ...");
+ final String node_1 = cluster().startNode();
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .execute().actionGet();
+
+ logger.info("--> index 10 docs");
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+ logger.info("--> flush so we have an actual index");
+ client().admin().indices().prepareFlush().execute().actionGet();
+ logger.info("--> index more docs so we have something in the translog");
+ for (int i = 10; i < 20; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+
+ logger.info("--> verifying count");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount("test").execute().actionGet().getCount(), equalTo(20l));
+
+ logger.info("--> start another node");
+ final String node_2 = cluster().startNode();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> relocate the shard from node1 to node2");
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2))
+ .execute().actionGet();
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> verifying count again...");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount("test").execute().actionGet().getCount(), equalTo(20l));
+ }
+
+ @Test
+ @Slow
+ public void testPrimaryRelocationWhileIndexingRandom() throws Exception {
+ int numRelocations = atLeast(rarely() ? 3 : 1);
+ int numWriters = atLeast(rarely() ? 3 : 1);
+ boolean batch = getRandom().nextBoolean();
+ logger.info("testPrimaryRelocationWhileIndexingRandom(numRelocations={}, numWriters={}, batch={}",
+ numRelocations, numWriters, batch);
+ testPrimaryRelocationWhileIndexing(numRelocations, numWriters, batch);
+ }
+
+
+
+ private void testPrimaryRelocationWhileIndexing(final int numberOfRelocations, final int numberOfWriters, final boolean batch) throws Exception {
+ String[] nodes = new String[2];
+ logger.info("--> starting [node1] ...");
+ nodes[0] = cluster().startNode();
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+
+ logger.info("--> starting [node2] ...");
+ nodes[1] = cluster().startNode();
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[numberOfWriters];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+
+ logger.info("--> starting {} indexing threads", writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final Client perThreadClient = client();
+ final int indexerId = i;
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ if (batch) {
+ BulkRequestBuilder bulkRequest = perThreadClient.prepareBulk();
+ for (int i = 0; i < 100; i++) {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ perThreadClient.admin().indices().prepareFlush().execute().actionGet();
+ }
+ bulkRequest.add(perThreadClient.prepareIndex("test", "type1", Long.toString(id))
+ .setSource("test", "value" + id));
+ }
+ BulkResponse bulkResponse = bulkRequest.execute().actionGet();
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ if (!bulkItemResponse.isFailed()) {
+ indexCounter.incrementAndGet();
+ } else {
+ logger.warn("**** failed bulk indexing thread {}, {}/{}", indexerId, bulkItemResponse.getFailure().getId(), bulkItemResponse.getFailure().getMessage());
+ }
+ }
+ } else {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ perThreadClient.admin().indices().prepareFlush().execute().actionGet();
+ }
+ perThreadClient.prepareIndex("test", "type1", Long.toString(id))
+ .setSource("test", "value" + id).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Exception e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ while (client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() < 2000) {
+ Thread.sleep(100);
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ }
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> starting relocations...");
+ for (int i = 0; i < numberOfRelocations; i++) {
+ int fromNode = (i % 2);
+ int toNode = fromNode == 0 ? 1 : 0;
+ logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), nodes[fromNode], nodes[toNode]))
+ .execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode);
+ }
+ logger.info("--> done relocations");
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ logger.info("--> searching the index");
+ boolean ranOnce = false;
+ for (int i = 0; i < 10; i++) {
+ try {
+ logger.info("--> START search test round {}", i + 1);
+ SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexCounter.get()).setNoFields().execute().actionGet().getHits();
+ ranOnce = true;
+ if (hits.totalHits() != indexCounter.get()) {
+ int[] hitIds = new int[(int) indexCounter.get()];
+ for (int hit = 0; hit < indexCounter.get(); hit++) {
+ hitIds[hit] = hit + 1;
+ }
+ IntOpenHashSet set = IntOpenHashSet.from(hitIds);
+ for (SearchHit hit : hits.hits()) {
+ int id = Integer.parseInt(hit.id());
+ if (!set.remove(id)) {
+ logger.error("Extra id [{}]", id);
+ }
+ }
+ set.forEach(new IntProcedure() {
+
+ @Override
+ public void apply(int value) {
+ logger.error("Missing id [{}]", value);
+ }
+
+ });
+ }
+ assertThat(hits.totalHits(), equalTo(indexCounter.get()));
+ logger.info("--> DONE search test round {}", i + 1);
+ } catch (SearchPhaseExecutionException ex) {
+ // TODO: the first run fails with this failure, waiting for relocating nodes set to 0 is not enough?
+ logger.warn("Got exception while searching.", ex);
+ }
+ }
+ if (!ranOnce) {
+ fail();
+ }
+ }
+
+ @Test
+ @Slow
+ public void testReplicaRelocationWhileIndexingRandom() throws Exception {
+ int numRelocations = atLeast(rarely() ? 3 : 1);
+ int numWriters = atLeast(rarely() ? 3 : 1);
+ boolean batch = getRandom().nextBoolean();
+ logger.info("testReplicaRelocationWhileIndexing(numRelocations={}, numWriters={}, batch={}", numRelocations, numWriters, batch);
+ testReplicaRelocationWhileIndexing(numRelocations, numWriters, batch);
+ }
+
+ private void testReplicaRelocationWhileIndexing(final int numberOfRelocations, final int numberOfWriters, final boolean batch) throws Exception {
+ logger.info("--> starting [node1] ...");
+ String[] nodes = new String[3];
+ nodes[0] = cluster().startNode();
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 1)
+ ).execute().actionGet();
+
+ logger.info("--> starting [node2] ...");
+ nodes[1] = cluster().startNode();
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> starting [node3] ...");
+ nodes[2] = cluster().startNode();
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[numberOfWriters];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+
+ logger.info("--> starting {} indexing threads", writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final Client perThreadClient = client();
+ final int indexerId = i;
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ if (batch) {
+ BulkRequestBuilder bulkRequest = perThreadClient.prepareBulk();
+ for (int i = 0; i < 100; i++) {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ perThreadClient.admin().indices().prepareFlush().execute().actionGet();
+ }
+ bulkRequest.add(perThreadClient.prepareIndex("test", "type1", Long.toString(id))
+ .setSource("test", "value" + id));
+ }
+ BulkResponse bulkResponse = bulkRequest.execute().actionGet();
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ if (!bulkItemResponse.isFailed()) {
+ indexCounter.incrementAndGet();
+ } else {
+ logger.warn("**** failed bulk indexing thread {}, {}/{}", indexerId, bulkItemResponse.getFailure().getId(), bulkItemResponse.getFailure().getMessage());
+ }
+ }
+ } else {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ perThreadClient.admin().indices().prepareFlush().execute().actionGet();
+ }
+ perThreadClient.prepareIndex("test", "type1", Long.toString(id))
+ .setSource("test", "value" + id).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Exception e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ while (client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() < 2000) {
+ Thread.sleep(100);
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ }
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> starting relocations...");
+ for (int i = 0; i < numberOfRelocations; i++) {
+ int fromNode = (1 + (i % 2));
+ int toNode = fromNode == 1 ? 2 : 1;
+ logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), nodes[fromNode], nodes[toNode]))
+ .execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode);
+ }
+ logger.info("--> done relocations");
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ logger.info("--> searching the index");
+ boolean ranOnce = false;
+ for (int i = 0; i < 10; i++) {
+ try {
+ logger.info("--> START search test round {}", i + 1);
+ SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexCounter.get()).setNoFields().execute().actionGet().getHits();
+ ranOnce = true;
+ if (hits.totalHits() != indexCounter.get()) {
+ int[] hitIds = new int[(int) indexCounter.get()];
+ for (int hit = 0; hit < indexCounter.get(); hit++) {
+ hitIds[hit] = hit + 1;
+ }
+ IntOpenHashSet set = IntOpenHashSet.from(hitIds);
+ for (SearchHit hit : hits.hits()) {
+ int id = Integer.parseInt(hit.id());
+ if (!set.remove(id)) {
+ logger.error("Extra id [{}]", id);
+ }
+ }
+ set.forEach(new IntProcedure() {
+
+ @Override
+ public void apply(int value) {
+ logger.error("Missing id [{}]", value);
+ }
+ });
+ }
+ assertThat(hits.totalHits(), equalTo(indexCounter.get()));
+ logger.info("--> DONE search test round {}", i + 1);
+ } catch (SearchPhaseExecutionException ex) {
+ // TODO: the first run fails with this failure, waiting for relocating nodes set to 0 is not enough?
+ logger.warn("Got exception while searching.", ex);
+ }
+ }
+ if (!ranOnce) {
+ fail();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java b/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java
new file mode 100644
index 0000000..fee8e96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleRecoveryTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return recoverySettings();
+ }
+
+ protected Settings recoverySettings() {
+ return ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+
+ @Test
+ public void testSimpleRecovery() throws Exception {
+ prepareCreate("test", 1).execute().actionGet(5000);
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ FlushResponse flushResponse = client().admin().indices().flush(flushRequest("test")).actionGet();
+ assertThat(flushResponse.getTotalShards(), equalTo(10));
+ assertThat(flushResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(flushResponse.getFailedShards(), equalTo(0));
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet();
+ RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ assertThat(refreshResponse.getTotalShards(), equalTo(10));
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+
+ allowNodes("test", 2);
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().local(true).waitForNodes(">=2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ GetResponse getResult;
+
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(false)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(false)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ }
+
+ // now start another one so we move some primaries
+ allowNodes("test", 3);
+ Thread.sleep(200);
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0).waitForNodes(">=3")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ }
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java b/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java
new file mode 100644
index 0000000..a69abf4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class SmallFileChunkSizeRecoveryTests extends SimpleRecoveryTests {
+
+ @Override
+ protected Settings recoverySettings() {
+ return ImmutableSettings.settingsBuilder().put("index.shard.recovery.file_chunk_size", "3b").build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java b/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java
new file mode 100644
index 0000000..7ddabae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class SmallTranslogOpsRecoveryTests extends SimpleRecoveryTests {
+
+ @Override
+ protected Settings recoverySettings() {
+ return ImmutableSettings.settingsBuilder().put("index.shard.recovery.translog_ops", 1).build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java b/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java
new file mode 100644
index 0000000..d74940c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class SmallTranslogSizeRecoveryTests extends SimpleRecoveryTests {
+
+ @Override
+ protected Settings recoverySettings() {
+ return ImmutableSettings.settingsBuilder().put("index.shard.recovery.translog_size", "3b").build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/rest/helper/HttpClient.java b/src/test/java/org/elasticsearch/rest/helper/HttpClient.java
new file mode 100644
index 0000000..4ec352d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/rest/helper/HttpClient.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.helper;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.List;
+import java.util.Map;
+
+public class HttpClient {
+
+ private final URL baseUrl;
+
+ public HttpClient(TransportAddress transportAddress) {
+ InetSocketAddress address = ((InetSocketTransportAddress) transportAddress).address();
+ try {
+ baseUrl = new URL("http", address.getAddress().getHostAddress(), address.getPort(), "/");
+ } catch (MalformedURLException e) {
+ throw new ElasticsearchException("", e);
+ }
+ }
+
+ public HttpClient(String url) {
+ try {
+ baseUrl = new URL(url);
+ } catch (MalformedURLException e) {
+ throw new ElasticsearchException("", e);
+ }
+ }
+
+ public HttpClient(URL url) {
+ baseUrl = url;
+ }
+
+ public HttpClientResponse request(String path) {
+ return request("GET", path);
+ }
+
+ public HttpClientResponse request(String method, String path) {
+ return request(method, path, null);
+ }
+
+ public HttpClientResponse request(String method, String path, Map<String, String> headers) {
+ URL url;
+ try {
+ url = new URL(baseUrl, path);
+ } catch (MalformedURLException e) {
+ throw new ElasticsearchException("Cannot parse " + path, e);
+ }
+
+ HttpURLConnection urlConnection;
+ try {
+ urlConnection = (HttpURLConnection) url.openConnection();
+ urlConnection.setRequestMethod(method);
+ if (headers != null) {
+ for (Map.Entry<String, String> headerEntry : headers.entrySet()) {
+ urlConnection.setRequestProperty(headerEntry.getKey(), headerEntry.getValue());
+ }
+ }
+ urlConnection.connect();
+ } catch (IOException e) {
+ throw new ElasticsearchException("", e);
+ }
+
+ int errorCode = -1;
+ Map<String, List<String>> respHeaders = null;
+ try {
+ errorCode = urlConnection.getResponseCode();
+ respHeaders = urlConnection.getHeaderFields();
+ InputStream inputStream = urlConnection.getInputStream();
+ String body = null;
+ try {
+ body = Streams.copyToString(new InputStreamReader(inputStream, Charsets.UTF_8));
+ } catch (IOException e1) {
+ throw new ElasticsearchException("problem reading error stream", e1);
+ }
+ return new HttpClientResponse(body, errorCode, respHeaders, null);
+ } catch (IOException e) {
+ InputStream errStream = urlConnection.getErrorStream();
+ String body = null;
+ if (errStream != null) {
+ try {
+ body = Streams.copyToString(new InputStreamReader(errStream, Charsets.UTF_8));
+ } catch (IOException e1) {
+ throw new ElasticsearchException("problem reading error stream", e1);
+ }
+ }
+ return new HttpClientResponse(body, errorCode, respHeaders, e);
+ } finally {
+ urlConnection.disconnect();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/rest/helper/HttpClientResponse.java b/src/test/java/org/elasticsearch/rest/helper/HttpClientResponse.java
new file mode 100644
index 0000000..2abc6b6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/rest/helper/HttpClientResponse.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.helper;
+
+import java.util.List;
+import java.util.Map;
+
+public class HttpClientResponse {
+ private final String response;
+ private final int errorCode;
+ private Map<String, List<String>> headers;
+ private final Throwable e;
+
+ public HttpClientResponse(String response, int errorCode, Map<String, List<String>> headers, Throwable e) {
+ this.response = response;
+ this.errorCode = errorCode;
+ this.headers = headers;
+ this.e = e;
+ }
+
+ public String response() {
+ return response;
+ }
+
+ public int errorCode() {
+ return errorCode;
+ }
+
+ public Throwable cause() {
+ return e;
+ }
+
+ public Map<String, List<String>> getHeaders() {
+ return headers;
+ }
+
+ public String getHeader(String name) {
+ if (headers == null) {
+ return null;
+ }
+ List<String> vals = headers.get(name);
+ if (vals == null || vals.size() == 0) {
+ return null;
+ }
+ return vals.iterator().next();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java b/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java
new file mode 100644
index 0000000..b676ff3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.util;
+
+import org.elasticsearch.rest.support.RestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class RestUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDecodeQueryString() {
+ Map<String, String> params = newHashMap();
+
+ String uri = "something?test=value";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("test"), equalTo("value"));
+
+ params.clear();
+ uri = "something?test=value&test1=value1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("test"), equalTo("value"));
+ assertThat(params.get("test1"), equalTo("value1"));
+
+ params.clear();
+ uri = "something";
+ RestUtils.decodeQueryString(uri, uri.length(), params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something";
+ RestUtils.decodeQueryString(uri, -1, params);
+ assertThat(params.size(), equalTo(0));
+ }
+
+ @Test
+ public void testDecodeQueryStringEdgeCases() {
+ Map<String, String> params = newHashMap();
+
+ String uri = "something?";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?&";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?p=v&&p1=v1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("p"), equalTo("v"));
+ assertThat(params.get("p1"), equalTo("v1"));
+
+ params.clear();
+ uri = "something?=";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?&=";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?a";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("a"), equalTo(""));
+
+ params.clear();
+ uri = "something?p=v&a";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("a"), equalTo(""));
+ assertThat(params.get("p"), equalTo("v"));
+
+ params.clear();
+ uri = "something?p=v&a&p1=v1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(3));
+ assertThat(params.get("a"), equalTo(""));
+ assertThat(params.get("p"), equalTo("v"));
+ assertThat(params.get("p1"), equalTo("v1"));
+
+ params.clear();
+ uri = "something?p=v&a&b&p1=v1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(4));
+ assertThat(params.get("a"), equalTo(""));
+ assertThat(params.get("b"), equalTo(""));
+ assertThat(params.get("p"), equalTo("v"));
+ assertThat(params.get("p1"), equalTo("v1"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/river/RiverTests.java b/src/test/java/org/elasticsearch/river/RiverTests.java
new file mode 100644
index 0000000..807d2dd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/river/RiverTests.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetRequestBuilder;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.river.dummy.DummyRiverModule;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class RiverTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRiverStart() throws Exception {
+ startAndCheckRiverIsStarted("dummy-river-test");
+ }
+
+ @Test
+ public void testMultipleRiversStart() throws Exception {
+ int nbRivers = between(2,10);
+ logger.info("--> testing with {} rivers...", nbRivers);
+ Thread[] riverCreators = new Thread[nbRivers];
+ final CountDownLatch latch = new CountDownLatch(nbRivers);
+ final MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet();
+ for (int i = 0; i < nbRivers; i++) {
+ final String riverName = "dummy-river-test-" + i;
+ riverCreators[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ startRiver(riverName);
+ } catch (Throwable t) {
+ logger.warn("failed to register river {}", t, riverName);
+ } finally {
+ latch.countDown();
+ }
+ }
+ };
+ riverCreators[i].start();
+ multiGetRequestBuilder.add(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, "_status");
+ }
+
+ latch.await();
+
+ logger.info("--> checking that all rivers were created");
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ MultiGetResponse multiGetItemResponse = multiGetRequestBuilder.get();
+ for (MultiGetItemResponse getItemResponse : multiGetItemResponse) {
+ if (getItemResponse.isFailed() || !getItemResponse.getResponse().isExists()) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ /**
+ * Test case for https://github.com/elasticsearch/elasticsearch/issues/4577
+ * River does not start when using config/templates files
+ */
+ @Test
+ public void startDummyRiverWithDefaultTemplate() throws Exception {
+ logger.info("--> create empty template");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("*")
+ .setOrder(0)
+ .addMapping(MapperService.DEFAULT_MAPPING,
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .endObject().endObject())
+ .get();
+
+ startAndCheckRiverIsStarted("dummy-river-default-template-test");
+ }
+
+ /**
+ * Test case for https://github.com/elasticsearch/elasticsearch/issues/4577
+ * River does not start when using config/templates files
+ */
+ @Test
+ public void startDummyRiverWithSomeTemplates() throws Exception {
+ logger.info("--> create some templates");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("*")
+ .setOrder(0)
+ .addMapping(MapperService.DEFAULT_MAPPING,
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .endObject().endObject())
+ .get();
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("*")
+ .setOrder(0)
+ .addMapping("atype",
+ JsonXContent.contentBuilder().startObject().startObject("atype")
+ .endObject().endObject())
+ .get();
+
+ startAndCheckRiverIsStarted("dummy-river-template-test");
+ }
+
+ /**
+ * Create a Dummy river then check it has been started. We will fail after 5 seconds.
+ * @param riverName Dummy river needed to be started
+ */
+ private void startAndCheckRiverIsStarted(final String riverName) throws InterruptedException {
+ startRiver(riverName);
+ checkRiverIsStarted(riverName);
+ }
+
+ private void startRiver(final String riverName) {
+ logger.info("--> starting river [{}]", riverName);
+ IndexResponse indexResponse = client().prepareIndex(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, "_meta")
+ .setSource("type", DummyRiverModule.class.getCanonicalName()).get();
+ assertTrue(indexResponse.isCreated());
+ }
+
+ private void checkRiverIsStarted(final String riverName) throws InterruptedException {
+ logger.info("--> checking that river [{}] was created", riverName);
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, "_status").get();
+ return response.isExists();
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java
new file mode 100644
index 0000000..861fdf7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.routing;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class AliasResolveRoutingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testResolveIndexRouting() throws Exception {
+ createIndex("test1");
+ createIndex("test2");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias10").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias110").searchRouting("1,0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias12").routing("2")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias20").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias21").routing("1")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias0").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias0").routing("0")).execute().actionGet();
+
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "test1"), nullValue());
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias"), nullValue());
+
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "test1"), nullValue());
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias10"), equalTo("0"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias20"), equalTo("0"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias21"), equalTo("1"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting("3", "test1"), equalTo("3"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting("0", "alias10"), equalTo("0"));
+ try {
+ clusterService().state().metaData().resolveIndexRouting("1", "alias10");
+ fail("should fail");
+ } catch (ElasticsearchIllegalArgumentException e) {
+ // all is well, we can't have two mappings, one provided, and one in the alias
+ }
+
+ try {
+ clusterService().state().metaData().resolveIndexRouting(null, "alias0");
+ fail("should fail");
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ // Expected
+ }
+ }
+
+
+ @Test
+ public void testResolveSearchRouting() throws Exception {
+ createIndex("test1");
+ createIndex("test2");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias10").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias20").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias21").routing("1")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias0").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias0").routing("0")).execute().actionGet();
+
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias"), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1", "alias"), equalTo(newMap("test1", newSet("0", "1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias10"), equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias10"), equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0", "alias10"), equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("1", "alias10"), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias0"), equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "alias20"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias20", "alias21"}),
+ equalTo(newMap("test2", newSet("0", "1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"test1", "alias10"}), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "test1"}), nullValue());
+
+
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0", new String[]{"alias10", "alias20"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1", new String[]{"alias10", "alias20"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("1", new String[]{"alias10", "alias20"}), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0", new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("1", new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test2", newSet("1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1,2", new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1,2", new String[]{"test1", "alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0", "1", "2"), "test2", newSet("1"))));
+ }
+
+ private <T> Set<T> newSet(T... elements) {
+ return newHashSet(elements);
+ }
+
+
+ private <K, V> Map<K, V> newMap(K key, V value) {
+ Map<K, V> r = newHashMap();
+ r.put(key, value);
+ return r;
+ }
+
+ private <K, V> Map<K, V> newMap(K key1, V value1, K key2, V value2) {
+ Map<K, V> r = newHashMap();
+ r.put(key1, value1);
+ r.put(key2, value2);
+ return r;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java b/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java
new file mode 100644
index 0000000..adb815c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java
@@ -0,0 +1,434 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.routing;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ *
+ */
+public class AliasRoutingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testAliasCrudRouting() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test", "alias0").routing("0")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing with id [1], and routing [0] using alias");
+ client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> verifying get with routing alias, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> updating with id [1] and routing through alias");
+ client().prepareUpdate("alias0", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field = 'value2'")
+ .execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ }
+
+
+ logger.info("--> deleting with no routing, should not delete anything");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with routing alias, should delete");
+ client().prepareDelete("alias0", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0] using alias");
+ client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting_by_query with 1 as routing, should not delete anything");
+ client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("1").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting_by_query with alias0, should delete");
+ client().prepareDeleteByQuery("alias0").setQuery(matchAllQuery()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testAliasSearchRouting() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias"))
+ .addAliasAction(newAddAliasAction("test", "alias0").routing("0"))
+ .addAliasAction(newAddAliasAction("test", "alias1").routing("1"))
+ .addAliasAction(newAddAliasAction("test", "alias01").searchRouting("0,1")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing with id [1], and routing [0] using alias");
+ client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> search with no routing, should fine one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ }
+
+ logger.info("--> search with wrong routing, should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> search with correct routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ assertThat(client().prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> indexing with id [2], and routing [1] using alias");
+ client().prepareIndex("alias1", "type1", "2").setSource("field", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> search with no routing, should fine two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with 0 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ assertThat(client().prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 1 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ assertThat(client().prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 0,1 routings , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ assertThat(client().prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with two routing aliases , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with alias0, alias1 and alias01, should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with test, alias0 and alias1, should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ }
+
+ @Test
+ public void testAliasSearchRoutingWithTwoIndices() throws Exception {
+ createIndex("test-a");
+ createIndex("test-b");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test-a", "alias-a0").routing("0"))
+ .addAliasAction(newAddAliasAction("test-a", "alias-a1").routing("1"))
+ .addAliasAction(newAddAliasAction("test-b", "alias-b0").routing("0"))
+ .addAliasAction(newAddAliasAction("test-b", "alias-b1").routing("1"))
+ .addAliasAction(newAddAliasAction("test-a", "alias-ab").searchRouting("0"))
+ .addAliasAction(newAddAliasAction("test-b", "alias-ab").searchRouting("1")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+ ensureGreen(); // wait for events again to make sure we got the aliases on all nodes
+ logger.info("--> indexing with id [1], and routing [0] using alias to test-a");
+ client().prepareIndex("alias-a0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias-a0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> indexing with id [0], and routing [1] using alias to test-b");
+ client().prepareIndex("alias-b1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias-b1", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+
+ logger.info("--> search with alias-a1,alias-b0, should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> search with alias-ab, should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias-ab").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with alias-a0,alias-b1 should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+ }
+
+ /*
+ See https://github.com/elasticsearch/elasticsearch/issues/2682
+ Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
+ to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShards.
+ That affected the number of shards that we executed the search on, thus some documents were missing in the search results.
+ */
+ @Test
+ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue2682() throws Exception {
+ createIndex("index", "index_2");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("index", "index_1").routing("1")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
+ client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> indexing on index_2 which is a concrete index");
+ client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+
+
+ logger.info("--> search all on index_* should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("index_*").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ }
+ }
+
+ /*
+ See https://github.com/elasticsearch/elasticsearch/pull/3268
+ Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
+ to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShardsCount.
+ That could cause returning 1, which led to forcing the QUERY_AND_FETCH mode.
+ As a result, (size * number of hit shards) results were returned and no reduce phase was taking place.
+ */
+ @Test
+ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue3268() throws Exception {
+ createIndex("index", "index_2");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("index", "index_1").routing("1")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
+ client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> indexing on index_2 which is a concrete index");
+ client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("index_*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+
+ logger.info("--> search all on index_* should find two");
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2L));
+ //Let's make sure that, even though 2 docs are available, only one is returned according to the size we set in the request
+ //Therefore the reduce phase has taken place, which proves that the QUERY_AND_FETCH search type wasn't erroneously forced.
+ assertThat(searchResponse.getHits().getHits().length, equalTo(1));
+ }
+
+ @Test
+ public void testRequiredRoutingMappingWithAlias() throws Exception {
+ prepareCreate("test").addMapping(
+ "type1",
+ XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true)
+ .endObject().endObject().endObject()).get();
+ ensureGreen();
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> indexing with id [1], with no routing, should fail");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ fail();
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class));
+ }
+
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with no routing, should broadcast the delete since _routing is required");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> bulk deleting with no routing, should broadcast the delete since _routing is required");
+ client().prepareBulk().add(Requests.deleteRequest("test").type("type1").id("1")).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ try {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testIndexingAliasesOverTime() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ logger.info("--> creating alias with routing [3]");
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias").routing("3")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing with id [0], and routing [3]");
+ client().prepareIndex("alias", "type1", "0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> verifying get and search with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> creating alias with routing [4]");
+ res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias").routing("4")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> verifying search with wrong routing should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> creating alias with search routing [3,4] and index routing 4");
+ client().admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias").searchRouting("3,4").indexRouting("4"))
+ .execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [4]");
+ client().prepareIndex("alias", "type1", "1").setSource("field", "value2").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> verifying get and search with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("4").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java b/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java
new file mode 100644
index 0000000..338e7e1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java
@@ -0,0 +1,440 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.routing;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.action.termvector.MultiTermVectorsResponse;
+import org.elasticsearch.action.termvector.TermVectorRequest;
+import org.elasticsearch.action.termvector.TermVectorResponse;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SimpleRoutingTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testSimpleCrudRouting() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with no routing, should not delete anything");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with routing, should delete");
+ client().prepareDelete("test", "type1", "1").setRouting("0").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting_by_query with 1 as routing, should not delete anything");
+ client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("1").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting_by_query with , should delete");
+ client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("0").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testSimpleSearchRouting() {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> search with no routing, should fine one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ }
+
+ logger.info("--> search with wrong routing, should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> search with correct routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> indexing with id [2], and routing [1]");
+ client().prepareIndex("test", "type1", "2").setRouting("1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> search with no routing, should fine two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with 0 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 1 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 0,1 routings , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with 0,1,0 routings , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0", "1", "0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setRouting("0", "1", "0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingMapping() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should fail");
+
+ logger.info("--> indexing with id [1], with no routing, should fail");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ fail();
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class));
+ }
+
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with no routing, should broadcast the delete since _routing is required");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> bulk deleting with no routing, should broadcast the delete since _routing is required");
+ client().prepareBulk().add(Requests.deleteRequest("test").type("type1").id("1")).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingWithPathMapping() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).field("path", "routing_field").endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1", "routing_field", "0").setRefresh(true).execute().actionGet();
+
+ logger.info("--> check failure with different routing");
+ try {
+ client().prepareIndex("test", "type1", "1").setRouting("1").setSource("field", "value1", "routing_field", "0").setRefresh(true).execute().actionGet();
+ fail();
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(MapperParsingException.class));
+ }
+
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingWithPathMappingBulk() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).field("path", "routing_field").endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareBulk().add(
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1", "routing_field", "0")).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingWithPathNumericType() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).field("path", "routing_field").endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1", "routing_field", 0).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingMapping_variousAPIs() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").get();
+ logger.info("--> indexing with id [2], and routing [0]");
+ client().prepareIndex("test", "type1", "2").setRouting("0").setSource("field", "value2").setRefresh(true).get();
+
+ logger.info("--> verifying get with id [1] with routing [0], should succeed");
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> verifying get with id [1], with no routing, should fail");
+ try {
+ client().prepareGet("test", "type1", "1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+
+ logger.info("--> verifying explain with id [2], with routing [0], should succeed");
+ ExplainResponse explainResponse = client().prepareExplain("test", "type1", "2")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setRouting("0").get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ assertThat(explainResponse.isMatch(), equalTo(true));
+
+ logger.info("--> verifying explain with id [2], with no routing, should fail");
+ try {
+ client().prepareExplain("test", "type1", "2")
+ .setQuery(QueryBuilders.matchAllQuery()).get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[2]"));
+ }
+
+ logger.info("--> verifying term vector with id [1], with routing [0], should succeed");
+ TermVectorResponse termVectorResponse = client().prepareTermVector("test", "type1", "1").setRouting("0").get();
+ assertThat(termVectorResponse.isExists(), equalTo(true));
+ assertThat(termVectorResponse.getId(), equalTo("1"));
+
+ try {
+ client().prepareTermVector("test", "type1", "1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1").setRouting("0")
+ .setDoc("field1", "value1").get();
+ assertThat(updateResponse.getId(), equalTo("1"));
+ assertThat(updateResponse.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareUpdate("test", "type1", "1").setDoc("field1", "value1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+
+ logger.info("--> verifying mget with ids [1,2], with routing [0], should succeed");
+ MultiGetResponse multiGetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1").routing("0"))
+ .add(new MultiGetRequest.Item("test", "type1", "2").routing("0")).get();
+ assertThat(multiGetResponse.getResponses().length, equalTo(2));
+ assertThat(multiGetResponse.getResponses()[0].isFailed(), equalTo(false));
+ assertThat(multiGetResponse.getResponses()[0].getResponse().getId(), equalTo("1"));
+ assertThat(multiGetResponse.getResponses()[1].isFailed(), equalTo(false));
+ assertThat(multiGetResponse.getResponses()[1].getResponse().getId(), equalTo("2"));
+
+ logger.info("--> verifying mget with ids [1,2], with no routing, should fail");
+ multiGetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1"))
+ .add(new MultiGetRequest.Item("test", "type1", "2")).get();
+ assertThat(multiGetResponse.getResponses().length, equalTo(2));
+ assertThat(multiGetResponse.getResponses()[0].isFailed(), equalTo(true));
+ assertThat(multiGetResponse.getResponses()[0].getFailure().getId(), equalTo("1"));
+ assertThat(multiGetResponse.getResponses()[0].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+ assertThat(multiGetResponse.getResponses()[1].isFailed(), equalTo(true));
+ assertThat(multiGetResponse.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(multiGetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+
+ MultiTermVectorsResponse multiTermVectorsResponse = client().prepareMultiTermVectors()
+ .add(new TermVectorRequest("test", "type1", "1").routing("0"))
+ .add(new TermVectorRequest("test", "type1", "2").routing("0")).get();
+ assertThat(multiTermVectorsResponse.getResponses().length, equalTo(2));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getId(), equalTo("1"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].isFailed(), equalTo(false));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getResponse().getId(), equalTo("1"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getId(), equalTo("2"));
+ assertThat(multiTermVectorsResponse.getResponses()[1].isFailed(), equalTo(false));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getResponse().getId(), equalTo("2"));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getResponse().isExists(), equalTo(true));
+
+ multiTermVectorsResponse = client().prepareMultiTermVectors()
+ .add(new TermVectorRequest("test", "type1", "1"))
+ .add(new TermVectorRequest("test", "type1", "2")).get();
+ assertThat(multiTermVectorsResponse.getResponses().length, equalTo(2));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getId(), equalTo("1"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].isFailed(), equalTo(true));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getResponse(), nullValue());
+ assertThat(multiTermVectorsResponse.getResponses()[1].getId(), equalTo("2"));
+ assertThat(multiTermVectorsResponse.getResponses()[1].isFailed(), equalTo(true));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getResponse(),nullValue());
+ assertThat(multiTermVectorsResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/script/IndexLookupTests.java b/src/test/java/org/elasticsearch/script/IndexLookupTests.java
new file mode 100644
index 0000000..5d1aa6b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/script/IndexLookupTests.java
@@ -0,0 +1,625 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class IndexLookupTests extends ElasticsearchIntegrationTest {
+
+ String includeAllFlag = "_FREQUENCIES | _OFFSETS | _PAYLOADS | _POSITIONS | _CACHE";
+ String includeAllWithoutRecordFlag = "_FREQUENCIES | _OFFSETS | _PAYLOADS | _POSITIONS ";
+ private HashMap<String, List<Object>> expectedEndOffsetsArray;
+ private HashMap<String, List<Object>> expectedPayloadsArray;
+ private HashMap<String, List<Object>> expectedPositionsArray;
+ private HashMap<String, List<Object>> emptyArray;
+ private HashMap<String, List<Object>> expectedStartOffsetsArray;
+
+ void initTestData() throws InterruptedException, ExecutionException, IOException {
+ emptyArray = new HashMap<String, List<Object>>();
+ List<Object> empty1 = new ArrayList<Object>();
+ empty1.add(-1);
+ empty1.add(-1);
+ emptyArray.put("1", empty1);
+ List<Object> empty2 = new ArrayList<Object>();
+ empty2.add(-1);
+ empty2.add(-1);
+ emptyArray.put("2", empty2);
+ List<Object> empty3 = new ArrayList<Object>();
+ empty3.add(-1);
+ empty3.add(-1);
+ emptyArray.put("3", empty3);
+
+ expectedPositionsArray = new HashMap<String, List<Object>>();
+
+ List<Object> pos1 = new ArrayList<Object>();
+ pos1.add(1);
+ pos1.add(2);
+ expectedPositionsArray.put("1", pos1);
+ List<Object> pos2 = new ArrayList<Object>();
+ pos2.add(0);
+ pos2.add(1);
+ expectedPositionsArray.put("2", pos2);
+ List<Object> pos3 = new ArrayList<Object>();
+ pos3.add(0);
+ pos3.add(4);
+ expectedPositionsArray.put("3", pos3);
+
+ expectedPayloadsArray = new HashMap<String, List<Object>>();
+ List<Object> pay1 = new ArrayList<Object>();
+ pay1.add(2);
+ pay1.add(3);
+ expectedPayloadsArray.put("1", pay1);
+ List<Object> pay2 = new ArrayList<Object>();
+ pay2.add(1);
+ pay2.add(2);
+ expectedPayloadsArray.put("2", pay2);
+ List<Object> pay3 = new ArrayList<Object>();
+ pay3.add(1);
+ pay3.add(-1);
+ expectedPayloadsArray.put("3", pay3);
+ /*
+ * "a|1 b|2 b|3 c|4 d " "b|1 b|2 c|3 d|4 a " "b|1 c|2 d|3 a|4 b "
+ */
+ expectedStartOffsetsArray = new HashMap<String, List<Object>>();
+ List<Object> starts1 = new ArrayList<Object>();
+ starts1.add(4);
+ starts1.add(8);
+ expectedStartOffsetsArray.put("1", starts1);
+ List<Object> starts2 = new ArrayList<Object>();
+ starts2.add(0);
+ starts2.add(4);
+ expectedStartOffsetsArray.put("2", starts2);
+ List<Object> starts3 = new ArrayList<Object>();
+ starts3.add(0);
+ starts3.add(16);
+ expectedStartOffsetsArray.put("3", starts3);
+
+ expectedEndOffsetsArray = new HashMap<String, List<Object>>();
+ List<Object> ends1 = new ArrayList<Object>();
+ ends1.add(7);
+ ends1.add(11);
+ expectedEndOffsetsArray.put("1", ends1);
+ List<Object> ends2 = new ArrayList<Object>();
+ ends2.add(3);
+ ends2.add(7);
+ expectedEndOffsetsArray.put("2", ends2);
+ List<Object> ends3 = new ArrayList<Object>();
+ ends3.add(3);
+ ends3.add(17);
+ expectedEndOffsetsArray.put("3", ends3);
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("int_payload_field").field("type", "string").field("index_options", "offsets")
+ .field("analyzer", "payload_int").endObject().endObject().endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ ImmutableSettings.settingsBuilder().put("index.analysis.analyzer.payload_int.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_int.filter", "delimited_int")
+ .put("index.analysis.filter.delimited_int.delimiter", "|")
+ .put("index.analysis.filter.delimited_int.encoding", "int")
+ .put("index.analysis.filter.delimited_int.type", "delimited_payload_filter")
+ .put("index.number_of_replicas", 0).put("index.number_of_shards", randomIntBetween(1, 6))));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("int_payload_field", "a|1 b|2 b|3 c|4 d "), client()
+ .prepareIndex("test", "type1", "2").setSource("int_payload_field", "b|1 b|2 c|3 d|4 a "),
+ client().prepareIndex("test", "type1", "3").setSource("int_payload_field", "b|1 c|2 d|3 a|4 b "));
+ ensureGreen();
+
+ }
+
+ @Test
+ public void testTwoScripts() throws Exception {
+
+ initTestData();
+
+ // check term frequencies for 'a'
+ String scriptFieldScript = "term = _index['int_payload_field']['c']; term.tf()";
+ scriptFieldScript = "1";
+ String scoreScript = "term = _index['int_payload_field']['b']; term.tf()";
+ Map<String, Object> expectedResultsField = new HashMap<String, Object>();
+ expectedResultsField.put("1", 1);
+ expectedResultsField.put("2", 1);
+ expectedResultsField.put("3", 1);
+ Map<String, Object> expectedResultsScore = new HashMap<String, Object>();
+ expectedResultsScore.put("1", 2f);
+ expectedResultsScore.put("2", 2f);
+ expectedResultsScore.put("3", 2f);
+ checkOnlyFunctionScore(scoreScript, expectedResultsScore, 3);
+ checkValueInEachDocWithFunctionScore(scriptFieldScript, expectedResultsField, scoreScript, expectedResultsScore, 3);
+
+ }
+
+ @Test
+ public void testCallWithDifferentFlagsFails() throws Exception {
+
+ initTestData();
+
+ // should throw an exception, we cannot call with different flags twice
+ // if the flags of the second call were not included in the first call.
+ String script = "term = _index['int_payload_field']['b']; return _index['int_payload_field'].get('b', _POSITIONS).tf();";
+ try {
+ client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(
+ e.getDetailedMessage()
+ .indexOf(
+ "You must call get with all required flags! Instead of _index['int_payload_field'].get('b', _FREQUENCIES) and _index['int_payload_field'].get('b', _POSITIONS) call _index['int_payload_field'].get('b', _FREQUENCIES | _POSITIONS) once]; "),
+ Matchers.greaterThan(-1));
+ }
+
+ // Should not throw an exception this way round
+ script = "term = _index['int_payload_field'].get('b', _POSITIONS | _FREQUENCIES);return _index['int_payload_field']['b'].tf();";
+ client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet();
+ }
+
+ private void checkOnlyFunctionScore(String scoreScript, Map<String, Object> expectedScore, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test")
+ .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(scoreScript))).execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
+ Matchers.closeTo(hit.score(), 1.e-4));
+ }
+ }
+
+ @Test
+ public void testDocumentationExample() throws Exception {
+
+ initTestData();
+
+ String script = "term = _index['float_payload_field'].get('b'," + includeAllFlag
+ + "); payloadSum=0; for (pos : term) {payloadSum = pos.payloadAsInt(0);} return payloadSum;";
+
+ // non existing field: sum should be 0
+ HashMap<String, Object> zeroArray = new HashMap<String, Object>();
+ zeroArray.put("1", 0);
+ zeroArray.put("2", 0);
+ zeroArray.put("3", 0);
+ checkValueInEachDoc(script, zeroArray, 3);
+
+ script = "term = _index['int_payload_field'].get('b'," + includeAllFlag
+ + "); payloadSum=0; for (pos : term) {payloadSum = payloadSum + pos.payloadAsInt(0);} return payloadSum;";
+
+ // existing field: sums should be as here:
+ zeroArray.put("1", 5);
+ zeroArray.put("2", 3);
+ zeroArray.put("3", 1);
+ checkValueInEachDoc(script, zeroArray, 3);
+ }
+
+ @Test
+ public void testIteratorAndRecording() throws Exception {
+
+ initTestData();
+
+ // call twice with record: should work as expected
+ String script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // no record and get iterator twice: should fail
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "position");
+ checkExceptions(script);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "startOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "endOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkExceptions(script);
+
+ // no record and get termObject twice and iterate: should fail
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "position");
+ checkExceptions(script);
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "startOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "endOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkExceptions(script);
+
+ }
+
+ private String createPositionsArrayScriptGetInfoObjectTwice(String term, String flags, String what) {
+ String script = "term = _index['int_payload_field'].get('" + term + "'," + flags
+ + "); array=[]; for (pos : term) {array.add(pos." + what + ")} ;_index['int_payload_field'].get('" + term + "',"
+ + flags + "); array=[]; for (pos : term) {array.add(pos." + what + ")}";
+ return script;
+ }
+
+ private String createPositionsArrayScriptIterateTwice(String term, String flags, String what) {
+ String script = "term = _index['int_payload_field'].get('" + term + "'," + flags
+ + "); array=[]; for (pos : term) {array.add(pos." + what + ")} array=[]; for (pos : term) {array.add(pos." + what
+ + ")} return array;";
+ return script;
+ }
+
+ private String createPositionsArrayScript(String field, String term, String flags, String what) {
+ String script = "term = _index['" + field + "'].get('" + term + "'," + flags
+ + "); array=[]; for (pos : term) {array.add(pos." + what + ")} return array;";
+ return script;
+ }
+
+ private String createPositionsArrayScriptDefaultGet(String field, String term, String what) {
+ String script = "term = _index['" + field + "']['" + term + "']; array=[]; for (pos : term) {array.add(pos." + what
+ + ")} return array;";
+ return script;
+ }
+
+ @Test
+ public void testFlags() throws Exception {
+
+ initTestData();
+
+ // check default flag
+ String script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "position");
+ // there should be no positions
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "startOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "endOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "payloadAsInt(-1)");
+ // there should be no payload
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+
+ // check FLAG_FREQUENCIES flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "position");
+ // there should be no positions
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "startOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "endOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "payloadAsInt(-1)");
+ // there should be no payloads
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+
+ // check FLAG_POSITIONS flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "position");
+ // there should be positions
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "startOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "endOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "payloadAsInt(-1)");
+ // there should be no payloads
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+
+ // check FLAG_OFFSETS flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "position");
+ // there should be positions and s forth ...
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // check FLAG_PAYLOADS flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // check all flags
+ String allFlags = "_POSITIONS | _OFFSETS | _PAYLOADS";
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // check all flags without record
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ }
+
+ private void checkArrayValsInEachDoc(String script, HashMap<String, List<Object>> expectedArray, int expectedHitSize) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, expectedHitSize);
+ int nullCounter = 0;
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedArray.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ if (expectedResult != null) {
+ nullCounter++;
+ }
+ }
+ assertThat(nullCounter, equalTo(expectedArray.size()));
+ }
+
+ @Test
+ public void testAllExceptPosAndOffset() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("float_payload_field").field("type", "string").field("index_options", "offsets").field("term_vector", "no")
+ .field("analyzer", "payload_float").endObject().startObject("string_payload_field").field("type", "string")
+ .field("index_options", "offsets").field("term_vector", "no").field("analyzer", "payload_string").endObject()
+ .startObject("int_payload_field").field("type", "string").field("index_options", "offsets")
+ .field("analyzer", "payload_int").endObject().endObject().endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ ImmutableSettings.settingsBuilder().put("index.analysis.analyzer.payload_float.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_float.filter", "delimited_float")
+ .put("index.analysis.filter.delimited_float.delimiter", "|")
+ .put("index.analysis.filter.delimited_float.encoding", "float")
+ .put("index.analysis.filter.delimited_float.type", "delimited_payload_filter")
+ .put("index.analysis.analyzer.payload_string.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_string.filter", "delimited_string")
+ .put("index.analysis.filter.delimited_string.delimiter", "|")
+ .put("index.analysis.filter.delimited_string.encoding", "identity")
+ .put("index.analysis.filter.delimited_string.type", "delimited_payload_filter")
+ .put("index.analysis.analyzer.payload_int.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_int.filter", "delimited_int")
+ .put("index.analysis.filter.delimited_int.delimiter", "|")
+ .put("index.analysis.filter.delimited_int.encoding", "int")
+ .put("index.analysis.filter.delimited_int.type", "delimited_payload_filter").put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 1)));
+ ensureYellow();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("float_payload_field", "a|1 b|2 a|3 b "), client()
+ .prepareIndex("test", "type1", "2").setSource("string_payload_field", "a|a b|b a|a b "),
+ client().prepareIndex("test", "type1", "3").setSource("float_payload_field", "a|4 b|5 a|6 b "),
+ client().prepareIndex("test", "type1", "4").setSource("string_payload_field", "a|b b|a a|b b "),
+ client().prepareIndex("test", "type1", "5").setSource("float_payload_field", "c "),
+ client().prepareIndex("test", "type1", "6").setSource("int_payload_field", "c|1"));
+
+ // get the number of all docs
+ String script = "_index.numDocs()";
+ checkValueInEachDoc(6, script, 6);
+
+ // get the number of docs with field float_payload_field
+ script = "_index['float_payload_field'].docCount()";
+ checkValueInEachDoc(3, script, 6);
+
+ // corner case: what if the field does not exist?
+ script = "_index['non_existent_field'].docCount()";
+ checkValueInEachDoc(0, script, 6);
+
+ // get the number of all tokens in all docs
+ script = "_index['float_payload_field'].sumttf()";
+ checkValueInEachDoc(9, script, 6);
+
+ // corner case get the number of all tokens in all docs for non existent
+ // field
+ script = "_index['non_existent_field'].sumttf()";
+ checkValueInEachDoc(0, script, 6);
+
+ // get the sum of doc freqs in all docs
+ script = "_index['float_payload_field'].sumdf()";
+ checkValueInEachDoc(5, script, 6);
+
+ // get the sum of doc freqs in all docs for non existent field
+ script = "_index['non_existent_field'].sumdf()";
+ checkValueInEachDoc(0, script, 6);
+
+ // check term frequencies for 'a'
+ script = "term = _index['float_payload_field']['a']; if (term != null) {term.tf()}";
+ Map<String, Object> expectedResults = new HashMap<String, Object>();
+ expectedResults.put("1", 2);
+ expectedResults.put("2", 0);
+ expectedResults.put("3", 2);
+ expectedResults.put("4", 0);
+ expectedResults.put("5", 0);
+ expectedResults.put("6", 0);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for 'c'
+ script = "term = _index['float_payload_field']['c']; if (term != null) {term.df()}";
+ expectedResults.put("1", 1l);
+ expectedResults.put("2", 1l);
+ expectedResults.put("3", 1l);
+ expectedResults.put("4", 1l);
+ expectedResults.put("5", 1l);
+ expectedResults.put("6", 1l);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for term that does not exist
+ script = "term = _index['float_payload_field']['non_existent_term']; if (term != null) {term.df()}";
+ expectedResults.put("1", 0l);
+ expectedResults.put("2", 0l);
+ expectedResults.put("3", 0l);
+ expectedResults.put("4", 0l);
+ expectedResults.put("5", 0l);
+ expectedResults.put("6", 0l);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for term that does not exist
+ script = "term = _index['non_existent_field']['non_existent_term']; if (term != null) {term.tf()}";
+ expectedResults.put("1", 0);
+ expectedResults.put("2", 0);
+ expectedResults.put("3", 0);
+ expectedResults.put("4", 0);
+ expectedResults.put("5", 0);
+ expectedResults.put("6", 0);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check total term frequencies for 'a'
+ script = "term = _index['float_payload_field']['a']; if (term != null) {term.ttf()}";
+ expectedResults.put("1", 4l);
+ expectedResults.put("2", 4l);
+ expectedResults.put("3", 4l);
+ expectedResults.put("4", 4l);
+ expectedResults.put("5", 4l);
+ expectedResults.put("6", 4l);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check float payload for 'b'
+ HashMap<String, List<Object>> expectedPayloadsArray = new HashMap<String, List<Object>>();
+ script = createPositionsArrayScript("float_payload_field", "b", includeAllFlag, "payloadAsFloat(-1)");
+ float missingValue = -1;
+ List<Object> payloadsFor1 = new ArrayList<Object>();
+ payloadsFor1.add(2f);
+ payloadsFor1.add(missingValue);
+ expectedPayloadsArray.put("1", payloadsFor1);
+ List<Object> payloadsFor2 = new ArrayList<Object>();
+ payloadsFor2.add(5f);
+ payloadsFor2.add(missingValue);
+ expectedPayloadsArray.put("3", payloadsFor2);
+ expectedPayloadsArray.put("6", new ArrayList<Object>());
+ expectedPayloadsArray.put("5", new ArrayList<Object>());
+ expectedPayloadsArray.put("4", new ArrayList<Object>());
+ expectedPayloadsArray.put("2", new ArrayList<Object>());
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
+
+ // check string payload for 'b'
+ expectedPayloadsArray.clear();
+ payloadsFor1.clear();
+ payloadsFor2.clear();
+ script = createPositionsArrayScript("string_payload_field", "b", includeAllFlag, "payloadAsString()");
+ payloadsFor1.add("b");
+ payloadsFor1.add(null);
+ expectedPayloadsArray.put("2", payloadsFor1);
+ payloadsFor2.add("a");
+ payloadsFor2.add(null);
+ expectedPayloadsArray.put("4", payloadsFor2);
+ expectedPayloadsArray.put("6", new ArrayList<Object>());
+ expectedPayloadsArray.put("5", new ArrayList<Object>());
+ expectedPayloadsArray.put("3", new ArrayList<Object>());
+ expectedPayloadsArray.put("1", new ArrayList<Object>());
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
+
+ // check int payload for 'c'
+ expectedPayloadsArray.clear();
+ payloadsFor1.clear();
+ payloadsFor2.clear();
+ script = createPositionsArrayScript("int_payload_field", "c", includeAllFlag, "payloadAsInt(-1)");
+ payloadsFor1 = new ArrayList<Object>();
+ payloadsFor1.add(1);
+ expectedPayloadsArray.put("6", payloadsFor1);
+ expectedPayloadsArray.put("5", new ArrayList<Object>());
+ expectedPayloadsArray.put("4", new ArrayList<Object>());
+ expectedPayloadsArray.put("3", new ArrayList<Object>());
+ expectedPayloadsArray.put("2", new ArrayList<Object>());
+ expectedPayloadsArray.put("1", new ArrayList<Object>());
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
+
+ }
+
+ private void checkExceptions(String script) {
+ try {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertThat(sr.getHits().hits().length, equalTo(0));
+ ShardSearchFailure[] shardFails = sr.getShardFailures();
+ for (ShardSearchFailure fail : shardFails) {
+ assertThat(fail.reason().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitely."),
+ Matchers.greaterThan(-1));
+ }
+ } catch (SearchPhaseExecutionException ex) {
+
+ assertThat(
+ ex.getDetailedMessage().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitely."),
+ Matchers.greaterThan(-1));
+ }
+ }
+
+ private void checkValueInEachDocWithFunctionScore(String fieldScript, Map<String, Object> expectedFieldVals, String scoreScript,
+ Map<String, Object> expectedScore, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test")
+ .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(scoreScript)))
+ .addScriptField("tvtest", fieldScript).execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedFieldVals.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
+ Matchers.closeTo(hit.score(), 1.e-4));
+ }
+ }
+
+ private void checkValueInEachDoc(String script, Map<String, Object> expectedResults, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedResults.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ }
+ }
+
+ private void checkValueInEachDoc(int value, String script, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ if (result instanceof Integer) {
+ assertThat(((Integer) result).intValue(), equalTo(value));
+ } else if (result instanceof Long) {
+ assertThat(((Long) result).intValue(), equalTo(value));
+ } else {
+ fail();
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/src/test/java/org/elasticsearch/script/NativeScriptTests.java
new file mode 100644
index 0000000..7c66752
--- /dev/null
+++ b/src/test/java/org/elasticsearch/script/NativeScriptTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class NativeScriptTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNativeScript() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("script.native.my.type", MyNativeScriptFactory.class.getName())
+ .build();
+ Injector injector = new ModulesBuilder().add(
+ new SettingsModule(settings),
+ new ScriptModule(settings)).createInjector();
+
+ ScriptService scriptService = injector.getInstance(ScriptService.class);
+
+ ExecutableScript executable = scriptService.executable("native", "my", null);
+ assertThat(executable.run().toString(), equalTo("test"));
+ }
+
+ static class MyNativeScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new MyScript();
+ }
+ }
+
+ static class MyScript extends AbstractExecutableScript {
+ @Override
+ public Object run() {
+ return "test";
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/script/ScriptFieldTests.java b/src/test/java/org/elasticsearch/script/ScriptFieldTests.java
new file mode 100644
index 0000000..9615f96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/script/ScriptFieldTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = Scope.SUITE, numNodes = 3)
+public class ScriptFieldTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder().put("plugin.types", CustomScriptPlugin.class.getName()).put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ static int[] intArray = { Integer.MAX_VALUE, Integer.MIN_VALUE, 3 };
+ static long[] longArray = { Long.MAX_VALUE, Long.MIN_VALUE, 9223372036854775807l };
+ static float[] floatArray = { Float.MAX_VALUE, Float.MIN_VALUE, 3.3f };
+ static double[] doubleArray = { Double.MAX_VALUE, Double.MIN_VALUE, 3.3d };
+
+ public void testNativeScript() throws InterruptedException, ExecutionException {
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("text", "doc1"), client()
+ .prepareIndex("test", "type1", "2").setSource("text", "doc2"),
+ client().prepareIndex("test", "type1", "3").setSource("text", "doc3"), client().prepareIndex("test", "type1", "4")
+ .setSource("text", "doc4"), client().prepareIndex("test", "type1", "5").setSource("text", "doc5"), client()
+ .prepareIndex("test", "type1", "6").setSource("text", "doc6"));
+
+ client().admin().indices().prepareFlush("test").execute().actionGet();
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .addScriptField("int", "native", "int", null).addScriptField("float", "native", "float", null)
+ .addScriptField("double", "native", "double", null).addScriptField("long", "native", "long", null).execute().actionGet();
+ assertThat(sr.getHits().hits().length, equalTo(6));
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("int").getValues().get(0);
+ assertThat(result, equalTo((Object) intArray));
+ result = hit.getFields().get("long").getValues().get(0);
+ assertThat(result, equalTo((Object) longArray));
+ result = hit.getFields().get("float").getValues().get(0);
+ assertThat(result, equalTo((Object) floatArray));
+ result = hit.getFields().get("double").getValues().get(0);
+ assertThat(result, equalTo((Object) doubleArray));
+ }
+ }
+
+ static class IntArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new IntScript();
+ }
+ }
+
+ static class IntScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return intArray;
+ }
+ }
+
+ static class LongArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new LongScript();
+ }
+ }
+
+ static class LongScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return longArray;
+ }
+ }
+
+ static class FloatArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new FloatScript();
+ }
+ }
+
+ static class FloatScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return floatArray;
+ }
+ }
+
+ static class DoubleArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new DoubleScript();
+ }
+ }
+
+ static class DoubleScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return doubleArray;
+ }
+ }
+
+ public static class CustomScriptPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "custom_script";
+ }
+
+ @Override
+ public String description() {
+ return "script ";
+ }
+
+ public void onModule(ScriptModule scriptModule) {
+ scriptModule.registerScript("int", IntArrayScriptFactory.class);
+ scriptModule.registerScript("long", LongArrayScriptFactory.class);
+ scriptModule.registerScript("float", FloatArrayScriptFactory.class);
+ scriptModule.registerScript("double", DoubleArrayScriptFactory.class);
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java b/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java
new file mode 100644
index 0000000..aee1577
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search;
+
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class StressSearchServiceReaperTest extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ // very frequent checks
+ return ImmutableSettings.builder().put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueMillis(1)).build();
+ }
+
+ @Slow
+ @Test // see issue #5165 - this test fails each time without the fix in pull #5170
+ public void testStressReaper() throws ExecutionException, InterruptedException {
+ int num = atLeast(100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[num];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type", "" + i).setSource("f", English.intToEnglish(i));
+ }
+ prepareCreate("test").setSettings("number_of_shards", randomIntBetween(1,5), "number_of_replicas", randomIntBetween(0,1)).setSettings();
+ indexRandom(true, builders);
+ ensureYellow();
+ final int iterations = atLeast(500);
+ for (int i = 0; i < iterations; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).setSize(num).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, num);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java b/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java
new file mode 100644
index 0000000..9633b85
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.carrotsearch.hppc.IntIntMap;
+import com.carrotsearch.hppc.IntIntOpenHashMap;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Collection;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class CombiTests extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ /**
+ * Making sure that if there are multiple aggregations, working on the same field, yet require different
+ * value source type, they can all still work. It used to fail as we used to cache the ValueSource by the
+ * field name. If the cached value source was of type "bytes" and another aggregation on the field required to see
+ * it as "numeric", it didn't work. Now we cache the Value Sources by a custom key (field name + ValueSource type)
+ * so there's no conflict there.
+ */
+ @Test
+ public void multipleAggs_OnSameField_WithDifferentRequiredValueSourceType() throws Exception {
+
+ createIndex("idx");
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)];
+ IntIntMap values = new IntIntOpenHashMap();
+ long missingValues = 0;
+ for (int i = 0; i < builders.length; i++) {
+ String name = "name_" + randomIntBetween(1, 10);
+ if (rarely()) {
+ missingValues++;
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("name", name)
+ .endObject());
+ } else {
+ int value = randomIntBetween(1, 10);
+ values.put(value, values.getOrDefault(value, 0) + 1);
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("name", name)
+ .field("value", value)
+ .endObject());
+ }
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+
+
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(missing("missing_values").field("value"))
+ .addAggregation(terms("values").field("value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Aggregations aggs = response.getAggregations();
+
+ Missing missing = aggs.get("missing_values");
+ assertNotNull(missing);
+ assertThat(missing.getDocCount(), equalTo(missingValues));
+
+ Terms terms = aggs.get("values");
+ assertNotNull(terms);
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(values.size()));
+ for (Terms.Bucket bucket : buckets) {
+ values.remove(bucket.getKeyAsNumber().intValue());
+ }
+ assertTrue(values.isEmpty());
+ }
+
+
+ /**
+ * Some top aggs (eg. date_/histogram) that are executed on unmapped fields, will generate an estimate count of buckets - zero.
+ * when the sub aggregator is then created, it will take this estimation into account. This used to cause
+ * and an ArrayIndexOutOfBoundsException...
+ */
+ @Test
+ public void subAggregationForTopAggregationOnUnmappedField() throws Exception {
+
+ prepareCreate("idx").addMapping("type", jsonBuilder()
+ .startObject()
+ .startObject("type").startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("value").field("type", "integer").endObject()
+ .endObject().endObject()
+ .endObject()).execute().actionGet();
+
+ ensureSearchable("idx");
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .addAggregation(histogram("values").field("value1").interval(1)
+ .subAggregation(terms("names").field("name")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo(0l));
+ Histogram values = searchResponse.getAggregations().get("values");
+ assertThat(values, notNullValue());
+ assertThat(values.getBuckets().isEmpty(), is(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java b/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java
new file mode 100644
index 0000000..9ae230a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+public class ParsingTests extends ElasticsearchIntegrationTest {
+
+ @Test(expected=SearchPhaseExecutionException.class)
+ public void testTwoTypes() throws Exception {
+ createIndex("idx");
+ client().prepareSearch("idx").setAggregations(JsonXContent.contentBuilder()
+ .startObject()
+ .startObject("in_stock")
+ .startObject("filter")
+ .startObject("range")
+ .startObject("stock")
+ .field("gt", 0)
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("terms")
+ .field("field", "stock")
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/RandomTests.java b/src/test/java/org/elasticsearch/search/aggregations/RandomTests.java
new file mode 100644
index 0000000..811a88a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/RandomTests.java
@@ -0,0 +1,276 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.carrotsearch.hppc.IntOpenHashSet;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.RangeFilterBuilder;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.range.RangeBuilder;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ * Additional tests that aim at testing more complex aggregation trees on larger random datasets, so that things like
+ * the growth of dynamic arrays is tested.
+ */
+public class RandomTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+
+
+ // Make sure that unordered, reversed, disjoint and/or overlapping ranges are supported
+ // Duel with filters
+ public void testRandomRanges() throws Exception {
+ final int numDocs = atLeast(1000);
+ final double[][] docs = new double[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ final int numValues = randomInt(5);
+ docs[i] = new double[numValues];
+ for (int j = 0; j < numValues; ++j) {
+ docs[i][j] = randomDouble() * 100;
+ }
+ }
+
+ createIndex("idx");
+ for (int i = 0; i < docs.length; ++i) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .startArray("values");
+ for (int j = 0; j < docs[i].length; ++j) {
+ source = source.value(docs[i][j]);
+ }
+ source = source.endArray().endObject();
+ client().prepareIndex("idx", "type").setSource(source).execute().actionGet();
+ }
+ assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenient()).execute().get());
+
+ final int numRanges = randomIntBetween(1, 20);
+ final double[][] ranges = new double[numRanges][];
+ for (int i = 0; i < ranges.length; ++i) {
+ switch (randomInt(2)) {
+ case 0:
+ ranges[i] = new double[] { Double.NEGATIVE_INFINITY, randomInt(100) };
+ break;
+ case 1:
+ ranges[i] = new double[] { randomInt(100), Double.POSITIVE_INFINITY };
+ break;
+ case 2:
+ ranges[i] = new double[] { randomInt(100), randomInt(100) };
+ break;
+ default:
+ throw new AssertionError();
+ }
+ }
+
+ RangeBuilder query = range("range").field("values");
+ for (int i = 0; i < ranges.length; ++i) {
+ String key = Integer.toString(i);
+ if (ranges[i][0] == Double.NEGATIVE_INFINITY) {
+ query.addUnboundedTo(key, ranges[i][1]);
+ } else if (ranges[i][1] == Double.POSITIVE_INFINITY) {
+ query.addUnboundedFrom(key, ranges[i][0]);
+ } else {
+ query.addRange(key, ranges[i][0], ranges[i][1]);
+ }
+ }
+
+ SearchRequestBuilder reqBuilder = client().prepareSearch("idx").addAggregation(query);
+ for (int i = 0; i < ranges.length; ++i) {
+ RangeFilterBuilder filter = FilterBuilders.rangeFilter("values");
+ if (ranges[i][0] != Double.NEGATIVE_INFINITY) {
+ filter = filter.from(ranges[i][0]);
+ }
+ if (ranges[i][1] != Double.POSITIVE_INFINITY){
+ filter = filter.to(ranges[i][1]);
+ }
+ reqBuilder = reqBuilder.addAggregation(filter("filter" + i).filter(filter));
+ }
+
+ SearchResponse resp = reqBuilder.execute().actionGet();
+ Range range = resp.getAggregations().get("range");
+
+ for (int i = 0; i < ranges.length; ++i) {
+
+ long count = 0;
+ for (double[] values : docs) {
+ for (double value : values) {
+ if (value >= ranges[i][0] && value < ranges[i][1]) {
+ ++count;
+ break;
+ }
+ }
+ }
+
+ final Range.Bucket bucket = range.getBucketByKey(Integer.toString(i));
+ assertEquals(bucket.getKey(), count, bucket.getDocCount());
+
+ final Filter filter = resp.getAggregations().get("filter" + i);
+ assertThat(filter.getDocCount(), equalTo(count));
+ }
+ }
+
+ // test long/double/string terms aggs with high number of buckets that require array growth
+ public void testDuelTerms() throws Exception {
+ // These high numbers of docs and terms are important to trigger page recycling
+ final int numDocs = atLeast(10000);
+ final int maxNumTerms = randomIntBetween(10, 100000);
+
+ final IntOpenHashSet valuesSet = new IntOpenHashSet();
+ cluster().wipeIndices("idx");
+ prepareCreate("idx").addMapping("type", jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("string_values")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .startObject("long_values")
+ .field("type", "long")
+ .endObject()
+ .startObject("double_values")
+ .field("type", "double")
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ for (int i = 0; i < numDocs; ++i) {
+ final int[] values = new int[randomInt(4)];
+ for (int j = 0; j < values.length; ++j) {
+ values[j] = randomInt(maxNumTerms - 1) - 1000;
+ valuesSet.add(values[j]);
+ }
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("num", randomDouble())
+ .startArray("long_values");
+ for (int j = 0; j < values.length; ++j) {
+ source = source.value(values[j]);
+ }
+ source = source.endArray().startArray("double_values");
+ for (int j = 0; j < values.length; ++j) {
+ source = source.value((double) values[j]);
+ }
+ source = source.endArray().startArray("string_values");
+ for (int j = 0; j < values.length; ++j) {
+ source = source.value(Integer.toString(values[j]));
+ }
+ source = source.endArray().endObject();
+ client().prepareIndex("idx", "type").setSource(source).execute().actionGet();
+ }
+ assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenient()).execute().get());
+
+ SearchResponse resp = client().prepareSearch("idx")
+ .addAggregation(terms("long").field("long_values").size(maxNumTerms).subAggregation(min("min").field("num")))
+ .addAggregation(terms("double").field("double_values").size(maxNumTerms).subAggregation(max("max").field("num")))
+ .addAggregation(terms("string_map").field("string_values").executionHint(TermsAggregatorFactory.EXECUTION_HINT_VALUE_MAP).size(maxNumTerms).subAggregation(stats("stats").field("num")))
+ .addAggregation(terms("string_ordinals").field("string_values").executionHint(TermsAggregatorFactory.EXECUTION_HINT_VALUE_ORDINALS).size(maxNumTerms).subAggregation(extendedStats("stats").field("num"))).execute().actionGet();
+ assertEquals(0, resp.getFailedShards());
+
+ final Terms longTerms = resp.getAggregations().get("long");
+ final Terms doubleTerms = resp.getAggregations().get("double");
+ final Terms stringMapTerms = resp.getAggregations().get("string_map");
+ final Terms stringOrdinalsTerms = resp.getAggregations().get("string_ordinals");
+
+ assertEquals(valuesSet.size(), longTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), doubleTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), stringOrdinalsTerms.getBuckets().size());
+ for (Terms.Bucket bucket : longTerms.getBuckets()) {
+ final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey(Double.toString(Long.parseLong(bucket.getKeyAsText().string())));
+ final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsText().string());
+ final Terms.Bucket stringOrdinalsBucket = stringOrdinalsTerms.getBucketByKey(bucket.getKeyAsText().string());
+ assertNotNull(doubleBucket);
+ assertNotNull(stringMapBucket);
+ assertNotNull(stringOrdinalsBucket);
+ assertEquals(bucket.getDocCount(), doubleBucket.getDocCount());
+ assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount());
+ assertEquals(bucket.getDocCount(), stringOrdinalsBucket.getDocCount());
+ }
+ }
+
+ // Duel between histograms and scripted terms
+ public void testDuelTermsHistogram() throws Exception {
+ createIndex("idx");
+
+ final int numDocs = atLeast(1000);
+ final int maxNumTerms = randomIntBetween(10, 2000);
+ final int interval = randomIntBetween(1, 100);
+
+ final Integer[] values = new Integer[maxNumTerms];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = randomInt(maxNumTerms * 3) - maxNumTerms;
+ }
+
+ for (int i = 0; i < numDocs; ++i) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("num", randomDouble())
+ .startArray("values");
+ final int numValues = randomInt(4);
+ for (int j = 0; j < numValues; ++j) {
+ source = source.value(randomFrom(values));
+ }
+ source = source.endArray().endObject();
+ client().prepareIndex("idx", "type").setSource(source).execute().actionGet();
+ }
+ assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenient()).execute().get());
+
+ SearchResponse resp = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field("values").script("floor(_value / interval)").param("interval", interval).size(maxNumTerms))
+ .addAggregation(histogram("histo").field("values").interval(interval))
+ .execute().actionGet();
+
+ assertThat(resp.getFailedShards(), equalTo(0));
+
+ Terms terms = resp.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ Histogram histo = resp.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size()));
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ final long key = bucket.getKeyAsNumber().longValue() * interval;
+ final Histogram.Bucket histoBucket = histo.getBucketByKey(key);
+ assertEquals(bucket.getDocCount(), histoBucket.getDocCount());
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/BytesRefHashTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/BytesRefHashTests.java
new file mode 100644
index 0000000..bd354d7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/BytesRefHashTests.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.ObjectLongMap;
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import com.carrotsearch.hppc.cursors.ObjectLongCursor;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util._TestUtil;
+import org.elasticsearch.common.util.BigArraysTests;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.Map.Entry;
+
+public class BytesRefHashTests extends ElasticsearchTestCase {
+
+ BytesRefHash hash;
+
+ private void newHash() {
+ if (hash != null) {
+ hash.release();
+ }
+ // Test high load factors to make sure that collision resolution works fine
+ final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;
+ hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randomCacheRecycler());
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ newHash();
+ }
+
+ public void testDuell() {
+ final int len = randomIntBetween(1, 100000);
+ final BytesRef[] values = new BytesRef[len];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = new BytesRef(randomAsciiOfLength(5));
+ }
+ final ObjectLongMap<BytesRef> valueToId = new ObjectLongOpenHashMap<BytesRef>();
+ final BytesRef[] idToValue = new BytesRef[values.length];
+ final int iters = randomInt(1000000);
+ for (int i = 0; i < iters; ++i) {
+ final BytesRef value = randomFrom(values);
+ if (valueToId.containsKey(value)) {
+ assertEquals(- 1 - valueToId.get(value), hash.add(value, value.hashCode()));
+ } else {
+ assertEquals(valueToId.size(), hash.add(value, value.hashCode()));
+ idToValue[valueToId.size()] = value;
+ valueToId.put(value, valueToId.size());
+ }
+ }
+
+ assertEquals(valueToId.size(), hash.size());
+ for (Iterator<ObjectLongCursor<BytesRef>> iterator = valueToId.iterator(); iterator.hasNext(); ) {
+ final ObjectLongCursor<BytesRef> next = iterator.next();
+ assertEquals(next.value, hash.find(next.key, next.key.hashCode()));
+ }
+
+ for (long i = 0; i < hash.capacity(); ++i) {
+ final long id = hash.id(i);
+ BytesRef spare = new BytesRef();
+ if (id >= 0) {
+ hash.get(id, spare);
+ assertEquals(idToValue[(int) id], spare);
+ }
+ }
+ hash.release();
+ }
+
+ // START - tests borrowed from LUCENE
+
+ /**
+ * Test method for {@link org.apache.lucene.util.BytesRefHash#size()}.
+ */
+ @Test
+ public void testSize() {
+ BytesRef ref = new BytesRef();
+ int num = atLeast(2);
+ for (int j = 0; j < num; j++) {
+ final int mod = 1+randomInt(40);
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.add(ref);
+ if (key < 0)
+ assertEquals(hash.size(), count);
+ else
+ assertEquals(hash.size(), count + 1);
+ if(i % mod == 0) {
+ newHash();
+ }
+ }
+ }
+ hash.release();
+ }
+
+ /**
+ * Test method for
+ * {@link org.apache.lucene.util.BytesRefHash#get(int, BytesRef)}
+ * .
+ */
+ @Test
+ public void testGet() {
+ BytesRef ref = new BytesRef();
+ BytesRef scratch = new BytesRef();
+ int num = atLeast(2);
+ for (int j = 0; j < num; j++) {
+ Map<String, Long> strings = new HashMap<String, Long>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.add(ref);
+ if (key >= 0) {
+ assertNull(strings.put(str, Long.valueOf(key)));
+ assertEquals(uniqueCount, key);
+ uniqueCount++;
+ assertEquals(hash.size(), count + 1);
+ } else {
+ assertTrue((-key)-1 < count);
+ assertEquals(hash.size(), count);
+ }
+ }
+ for (Entry<String, Long> entry : strings.entrySet()) {
+ ref.copyChars(entry.getKey());
+ assertEquals(ref, hash.get(entry.getValue().longValue(), scratch));
+ }
+ newHash();
+ }
+ hash.release();
+ }
+
+ /**
+ * Test method for
+ * {@link org.apache.lucene.util.BytesRefHash#add(org.apache.lucene.util.BytesRef)}
+ * .
+ */
+ @Test
+ public void testAdd() {
+ BytesRef ref = new BytesRef();
+ BytesRef scratch = new BytesRef();
+ int num = atLeast(2);
+ for (int j = 0; j < num; j++) {
+ Set<String> strings = new HashSet<String>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.add(ref);
+
+ if (key >=0) {
+ assertTrue(strings.add(str));
+ assertEquals(uniqueCount, key);
+ assertEquals(hash.size(), count + 1);
+ uniqueCount++;
+ } else {
+ assertFalse(strings.add(str));
+ assertTrue((-key)-1 < count);
+ assertEquals(str, hash.get((-key)-1, scratch).utf8ToString());
+ assertEquals(count, hash.size());
+ }
+ }
+
+ assertAllIn(strings, hash);
+ newHash();
+ }
+ hash.release();
+ }
+
+ @Test
+ public void testFind() throws Exception {
+ BytesRef ref = new BytesRef();
+ BytesRef scratch = new BytesRef();
+ int num = atLeast(2);
+ for (int j = 0; j < num; j++) {
+ Set<String> strings = new HashSet<String>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.find(ref); //hash.add(ref);
+ if (key >= 0) { // string found in hash
+ assertFalse(strings.add(str));
+ assertTrue(key < count);
+ assertEquals(str, hash.get(key, scratch).utf8ToString());
+ assertEquals(count, hash.size());
+ } else {
+ key = hash.add(ref);
+ assertTrue(strings.add(str));
+ assertEquals(uniqueCount, key);
+ assertEquals(hash.size(), count + 1);
+ uniqueCount++;
+ }
+ }
+
+ assertAllIn(strings, hash);
+ newHash();
+ }
+ hash.release();
+ }
+
+ private void assertAllIn(Set<String> strings, BytesRefHash hash) {
+ BytesRef ref = new BytesRef();
+ BytesRef scratch = new BytesRef();
+ long count = hash.size();
+ for (String string : strings) {
+ ref.copyChars(string);
+ long key = hash.add(ref); // add again to check duplicates
+ assertEquals(string, hash.get((-key)-1, scratch).utf8ToString());
+ assertEquals(count, hash.size());
+ assertTrue("key: " + key + " count: " + count + " string: " + string,
+ key < count);
+ }
+ }
+
+ // END - tests borrowed from LUCENE
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java
new file mode 100644
index 0000000..60b0275
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java
@@ -0,0 +1,1077 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class DateHistogramTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private DateTime date(int month, int day) {
+ return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC);
+ }
+
+ private DateTime date(String date) {
+ return DateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date);
+ }
+
+ private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("value", value)
+ .field("date", date(month, day))
+ .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray()
+ .endObject());
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+ // TODO: would be nice to have more random data here
+ indexRandom(true,
+ indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3
+ indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3
+ indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16
+ indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3
+ indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16
+ indexDoc(3, 23, 6)); // date: Mar 23, dates: Mar 23, Apr 24
+ ensureSearchable();
+ }
+
+ private static DateHistogram.Bucket getBucket(DateHistogram histogram, DateTime key) {
+ return getBucket(histogram, key, DateFieldMapper.Defaults.DATE_TIME_FORMATTER.format());
+ }
+
+ private static DateHistogram.Bucket getBucket(DateHistogram histogram, DateTime key, String format) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return histogram.getBucketByKey(key);
+ }
+ return histogram.getBucketByKey(key.getMillis());
+ }
+ if (randomBoolean()) {
+ return histogram.getBucketByKey("" + key.getMillis());
+ }
+ return histogram.getBucketByKey(Joda.forPattern(format).printer().print(key));
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ DateHistogram.Bucket bucket = getBucket(histo, key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key.getMillis()));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = getBucket(histo, key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key.getMillis()));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = getBucket(histo, key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key.getMillis()));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void singleValuedField_WithPostTimeZone() throws Exception {
+ SearchResponse response;
+ if (randomBoolean()) {
+ response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).postZone("-01:00"))
+ .execute().actionGet();
+ } else {
+
+ // checking post_zone setting as an int
+
+ response = client().prepareSearch("idx")
+ .addAggregation(new AbstractAggregationBuilder("histo", "date_histogram") {
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder.startObject(name)
+ .startObject(type)
+ .field("field", "date")
+ .field("interval", "1d")
+ .field("post_zone", -1)
+ .endObject()
+ .endObject();
+ }
+ })
+ .execute().actionGet();
+ }
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(6));
+
+ long key = new DateTime(2012, 1, 2, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 2, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 15, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 2, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 15, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 23, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.KEY_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.KEY_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.COUNT_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.COUNT_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH)
+ .subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(1.0));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(5.0));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(15.0));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 1, 2, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 2, 15, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 3, 23, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.aggregation("sum", true))
+ .subAggregation(max("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.aggregation("sum", false))
+ .subAggregation(max("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationAsc_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.aggregation("stats", "sum", true))
+ .subAggregation(stats("stats").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.aggregation("stats", "sum", false))
+ .subAggregation(stats("stats").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .script("new DateTime(_value).plusMonths(1).getMillis()")
+ .interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /*
+ [ Jan 2, Feb 3]
+ [ Feb 2, Mar 3]
+ [ Feb 15, Mar 16]
+ [ Mar 2, Apr 3]
+ [ Mar 15, Apr 16]
+ [ Mar 23, Apr 24]
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("dates").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void multiValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("dates")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.COUNT_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ List<DateHistogram.Bucket> buckets = new ArrayList<DateHistogram.Bucket>(histo.getBuckets());
+
+ DateHistogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+
+ /**
+ * The script will change to document date values to the following:
+ *
+ * doc 1: [ Feb 2, Mar 3]
+ * doc 2: [ Mar 2, Apr 3]
+ * doc 3: [ Mar 15, Apr 16]
+ * doc 4: [ Apr 2, May 3]
+ * doc 5: [ Apr 15, May 16]
+ * doc 6: [ Apr 23, May 24]
+ */
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("dates")
+ .script("new DateTime(_value, DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /**
+ * The script will change to document date values to the following:
+ *
+ * doc 1: [ Feb 2, Mar 3]
+ * doc 2: [ Mar 2, Apr 3]
+ * doc 3: [ Mar 15, Apr 16]
+ * doc 4: [ Apr 2, May 3]
+ * doc 5: [ Apr 15, May 16]
+ * doc 6: [ Apr 23, May 24]
+ *
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("dates")
+ .script("new DateTime(_value, DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .interval(DateHistogram.Interval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 3, 3, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 16, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 5, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 5, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ /**
+ * Jan 2
+ * Feb 2
+ * Feb 15
+ * Mar 2
+ * Mar 15
+ * Mar 23
+ */
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").script("doc['date'].value").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .script("doc['date'].value")
+ .interval(DateHistogram.Interval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 1, 2, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 2, 15, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 3, 23, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").script("doc['dates'].values").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /*
+ [ Jan 2, Feb 3]
+ [ Feb 2, Mar 3]
+ [ Feb 15, Mar 16]
+ [ Mar 2, Apr 3]
+ [ Mar 15, Apr 16]
+ [ Mar 23, Apr 24]
+ */
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .script("doc['dates'].values")
+ .interval(DateHistogram.Interval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 2, 3, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 3, 16, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(dateHistogram("date_histo").interval(1)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ DateHistogram dateHisto = bucket.getAggregations().get("date_histo");
+ assertThat(dateHisto, Matchers.notNullValue());
+ assertThat(dateHisto.getName(), equalTo("date_histo"));
+ assertThat(dateHisto.getBuckets().isEmpty(), is(true));
+
+ }
+
+ @Test
+ public void singleValue_WithPreZone() throws Exception {
+ prepareCreate("idx2").addMapping("type", "date", "type=date").execute().actionGet();
+ IndexRequestBuilder[] reqs = new IndexRequestBuilder[5];
+ DateTime date = date("2014-03-11T00:00:00+00:00");
+ for (int i = 0; i < reqs.length; i++) {
+ reqs[i] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().field("date", date).endObject());
+ date = date.plusHours(1);
+ }
+ indexRandom(true, reqs);
+
+ SearchResponse response = client().prepareSearch("idx2")
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date_histo")
+ .field("date")
+ .preZone("-2:00")
+ .interval(DateHistogram.Interval.DAY)
+ .format("yyyy-MM-dd"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().getTotalHits(), equalTo(5l));
+
+ DateHistogram histo = response.getAggregations().get("date_histo");
+ Collection<? extends DateHistogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(2));
+
+ DateHistogram.Bucket bucket = histo.getBucketByKey("2014-03-10");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = histo.getBucketByKey("2014-03-11");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void singleValue_WithPreZone_WithAadjustLargeInterval() throws Exception {
+ prepareCreate("idx2").addMapping("type", "date", "type=date").execute().actionGet();
+ IndexRequestBuilder[] reqs = new IndexRequestBuilder[5];
+ DateTime date = date("2014-03-11T00:00:00+00:00");
+ for (int i = 0; i < reqs.length; i++) {
+ reqs[i] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().field("date", date).endObject());
+ date = date.plusHours(1);
+ }
+ indexRandom(true, reqs);
+
+ SearchResponse response = client().prepareSearch("idx2")
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date_histo")
+ .field("date")
+ .preZone("-2:00")
+ .interval(DateHistogram.Interval.DAY)
+ .preZoneAdjustLargeInterval(true)
+ .format("yyyy-MM-dd'T'HH:mm:ss"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().getTotalHits(), equalTo(5l));
+
+ DateHistogram histo = response.getAggregations().get("date_histo");
+ Collection<? extends DateHistogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(2));
+
+ DateHistogram.Bucket bucket = histo.getBucketByKey("2014-03-10T02:00:00");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = histo.getBucketByKey("2014-03-11T02:00:00");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java
new file mode 100644
index 0000000..01e81cd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java
@@ -0,0 +1,1058 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.date.DateRange;
+import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeBuilder;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.min.Min;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ *
+ */
+public class DateRangeTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("value", value)
+ .field("date", date(month, day))
+ .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray()
+ .endObject());
+ }
+
+ private static DateTime date(int month, int day) {
+ return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC);
+ }
+
+ int numDocs;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(7, 20);
+
+ List<IndexRequestBuilder> docs = new ArrayList<IndexRequestBuilder>();
+ docs.addAll(Arrays.asList(
+ indexDoc(1, 2, 1), // Jan 2
+ indexDoc(2, 2, 2), // Feb 2
+ indexDoc(2, 15, 3), // Feb 15
+ indexDoc(3, 2, 4), // Mar 2
+ indexDoc(3, 15, 5), // Mar 15
+ indexDoc(3, 23, 6))); // Mar 23
+
+ // dummy docs
+ for (int i = docs.size(); i < numDocs; ++i) {
+ docs.add(indexDoc(randomIntBetween(6, 10), randomIntBetween(1, 20), randomInt(100)));
+ }
+
+ indexRandom(true, docs);
+ ensureSearchable();
+ }
+
+ @Test
+ public void dateMath() throws Exception {
+ DateRangeBuilder rangeBuilder = dateRange("range");
+ if (randomBoolean()) {
+ rangeBuilder.field("date");
+ } else {
+ rangeBuilder.script("doc['date'].value");
+ }
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(rangeBuilder
+ .addUnboundedTo("a long time ago", "now-50y")
+ .addRange("recently", "now-50y", "now-1y")
+ .addUnboundedFrom("last year", "now-1y"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ List<DateRange.Bucket> buckets = new ArrayList<DateRange.Bucket>(range.getBuckets());
+
+ DateRange.Bucket bucket = buckets.get(0);
+ assertThat(bucket.getKey(), equalTo("a long time ago"));
+ assertThat(bucket.getDocCount(), equalTo(0L));
+
+ bucket = buckets.get(1);
+ assertThat(bucket.getKey(), equalTo("recently"));
+ assertThat(bucket.getDocCount(), equalTo((long) numDocs));
+
+ bucket = buckets.get(2);
+ assertThat(bucket.getKey(), equalTo("last year"));
+ assertThat(bucket.getDocCount(), equalTo(0L));
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithStringDates() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-03-15")
+ .addUnboundedFrom("2012-03-15"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithStringDates_WithCustomFormat() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .format("yyyy-MM-dd")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-03-15")
+ .addUnboundedFrom("2012-03-15"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15-2012-03-15");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15-2012-03-15"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithDateMath() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-02-15||+1M")
+ .addUnboundedFrom("2012-02-15||+1M"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithCustomKey() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("r1", date(2, 15))
+ .addRange("r2", date(2, 15), date(3, 15))
+ .addUnboundedFrom("r3", date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ Jan 2, 1
+ Feb 2, 2
+ Feb 15, 3
+ Mar 2, 4
+ Mar 15, 5
+ Mar 23, 6
+ */
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("r1", date(2, 15))
+ .addRange("r2", date(2, 15), date(3, 15))
+ .addUnboundedFrom("r3", date(3, 15))
+ .subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 1 + 2));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 3 + 4));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("r1", date(2, 15))
+ .addRange("r2", date(2, 15), date(3, 15))
+ .addUnboundedFrom("r3", date(3, 15))
+ .subAggregation(min("min")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(1, 2).getMillis()));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 15).getMillis()));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(3, 15).getMillis()));
+ }
+
+ /*
+ Jan 2, Feb 3, 1
+ Feb 2, Mar 3, 2
+ Feb 15, Mar 16, 3
+ Mar 2, Apr 3, 4
+ Mar 15, Apr 16 5
+ Mar 23, Apr 24 6
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("dates")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ /*
+ Feb 2, Mar 3, 1
+ Mar 2, Apr 3, 2
+ Mar 15, Apr 16, 3
+ Apr 2, May 3, 4
+ Apr 15, May 16 5
+ Apr 23, May 24 6
+ */
+
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("dates")
+ .script("new DateTime(_value.longValue(), DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 1l));
+ }
+
+ /*
+ Feb 2, Mar 3, 1
+ Mar 2, Apr 3, 2
+ Mar 15, Apr 16, 3
+ Apr 2, May 3, 4
+ Apr 15, May 16 5
+ Apr 23, May 24 6
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("dates")
+ .script("new DateTime(_value.longValue(), DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(3, 3).getMillis()));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(4, 3).getMillis()));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 1l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .script("doc['date'].value")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .script("doc['date'].value")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(2, 2).getMillis()));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(3, 2).getMillis()));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ }
+
+ /*
+ Jan 2, Feb 3, 1
+ Feb 2, Mar 3, 2
+ Feb 15, Mar 16, 3
+ Mar 2, Apr 3, 4
+ Mar 15, Apr 16 5
+ Mar 23, Apr 24 6
+ */
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .script("doc['dates'].values")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .script("doc['dates'].values")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))
+ .subAggregation(min("min")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(1, 2).getMillis()));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 2).getMillis()));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 15).getMillis()));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void unmapped_WithStringDates() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-03-15")
+ .addUnboundedFrom("2012-03-15"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(dateRange("date_range").addRange("0-1", 0, 1)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ DateRange dateRange = bucket.getAggregations().get("date_range");
+ List<DateRange.Bucket> buckets = new ArrayList<DateRange.Bucket>(dateRange.getBuckets());
+ assertThat(dateRange, Matchers.notNullValue());
+ assertThat(dateRange.getName(), equalTo("date_range"));
+ assertThat(buckets.size(), is(1));
+ assertThat(buckets.get(0).getKey(), equalTo("0-1"));
+ assertThat(buckets.get(0).getFrom().doubleValue(), equalTo(0.0));
+ assertThat(buckets.get(0).getTo().doubleValue(), equalTo(1.0));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+ assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true));
+
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java
new file mode 100644
index 0000000..a386558
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java
@@ -0,0 +1,880 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class DoubleTermsTests extends ElasticsearchIntegrationTest {
+
+ private static final int NUM_DOCS = 5; // TODO: randomize the size?
+ private static final String SINGLE_VALUED_FIELD_NAME = "d_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "d_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+
+ IndexRequestBuilder[] lowcardBuilders = new IndexRequestBuilder[NUM_DOCS];
+ for (int i = 0; i < lowcardBuilders.length; i++) {
+ lowcardBuilders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, (double) i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value((double)i).value(i + 1d).endArray()
+ .endObject());
+
+ }
+ indexRandom(randomBoolean(), lowcardBuilders);
+ IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO: randomize the size?
+ for (int i = 0; i < highCardBuilders.length; i++) {
+ highCardBuilders[i] = client().prepareIndex("idx", "high_card_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, (double) i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value((double)i).value(i + 1d).endArray()
+ .endObject());
+ }
+ indexRandom(true, highCardBuilders);
+
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return randomBoolean() ? bucket.getKey() : bucket.getKeyAsText().string();
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithMaxSize() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(20)
+ .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(20));
+
+ for (int i = 0; i < 20; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(true)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat((long) sum.getValue(), equalTo(i+i+1l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .script("_value + 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_NotUnique() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("(long) _value / 1000 + 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("1.0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ /*
+
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+
+ 1 - count: 1 - sum: 1
+ 2 - count: 2 - sum: 4
+ 3 - count: 2 - sum: 6
+ 4 - count: 2 - sum: 8
+ 5 - count: 2 - sum: 10
+ 6 - count: 1 - sum: 6
+
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().doubleValue(), equalTo(i+1d));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j + 1;
+ s += j+1 + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_NoExplicitType() throws Exception {
+
+ // since no type is explicitly defined, es will assume all values returned by the script to be strings (bytes),
+ // so the aggregation should fail, since the "sum" aggregation can only operation on numeric values.
+
+ try {
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+
+ fail("expected to fail as sub-aggregation sum requires a numeric value source context, but there is none");
+
+ } catch (Exception e) {
+ // expected
+ }
+
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_WithExplicitType() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .valueType(Terms.ValueType.DOUBLE)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i + ".0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i + ".0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j;
+ s += j+1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(randomInt(5)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped", "idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(terms("terms")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Terms terms = bucket.getAggregations().get("terms");
+ assertThat(terms, Matchers.notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", true))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByNonMetricsSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("filter", true))
+ .subAggregation(filter("filter").filter(FilterBuilders.termFilter("foo", "bar")))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.foo", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "with an unknown specified metric to order by");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "where the metric name is not specified");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueExtendedStatsAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.variance", asc))
+ .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void script_Score() {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(functionScoreQuery(matchAllQuery()).add(ScoreFunctionBuilders.scriptFunction("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")))
+ .addAggregation(terms("terms")
+ .script("ceil(_doc.score/3)")
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ for (int i = 0; i < 3; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(i == 1 ? 3L : 1L));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java
new file mode 100644
index 0000000..3332dd6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.matchAllFilter;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class FilterTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numDocs, numTag1Docs;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx2");
+ numDocs = randomIntBetween(5, 20);
+ numTag1Docs = randomIntBetween(1, numDocs - 1);
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < numTag1Docs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag1")
+ .endObject()));
+ }
+ for (int i = numTag1Docs; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .field("tag", "tag2")
+ .field("name", "name" + i)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+ ensureGreen();
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(filter("tag1").filter(termFilter("tag", "tag1")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Filter filter = response.getAggregations().get("tag1");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getName(), equalTo("tag1"));
+ assertThat(filter.getDocCount(), equalTo((long) numTag1Docs));
+ }
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(filter("tag1")
+ .filter(termFilter("tag", "tag1"))
+ .subAggregation(avg("avg_value").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Filter filter = response.getAggregations().get("tag1");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getName(), equalTo("tag1"));
+ assertThat(filter.getDocCount(), equalTo((long) numTag1Docs));
+
+ long sum = 0;
+ for (int i = 0; i < numTag1Docs; ++i) {
+ sum += i + 1;
+ }
+ assertThat(filter.getAggregations().asList().isEmpty(), is(false));
+ Avg avgValue = filter.getAggregations().get("avg_value");
+ assertThat(avgValue, notNullValue());
+ assertThat(avgValue.getName(), equalTo("avg_value"));
+ assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs));
+ }
+
+ @Test
+ public void withContextBasedSubAggregation() throws Exception {
+
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(filter("tag1")
+ .filter(termFilter("tag", "tag1"))
+ .subAggregation(avg("avg_value")))
+ .execute().actionGet();
+
+ fail("expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" +
+ "context which the sub-aggregation can inherit");
+
+ } catch (ElasticsearchException ese) {
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(filter("filter").filter(matchAllFilter())))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Filter filter = bucket.getAggregations().get("filter");
+ assertThat(filter, Matchers.notNullValue());
+ assertThat(filter.getName(), equalTo("filter"));
+ assertThat(filter.getDocCount(), is(0l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java
new file mode 100644
index 0000000..3d0c3ad
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java
@@ -0,0 +1,439 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistance;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class GeoDistanceTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception {
+ XContentBuilder source = jsonBuilder().startObject().field("city", name);
+ source.startArray("location");
+ for (int i = 0; i < latLons.length; i++) {
+ source.value(latLons[i]);
+ }
+ source.endArray();
+ source = source.endObject();
+ return client().prepareIndex(idx, "type").setSource(source);
+ }
+
+ @Before
+ public void init() throws Exception {
+ prepareCreate("idx")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ prepareCreate("idx-multi")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ createIndex("idx_unmapped");
+
+ List<IndexRequestBuilder> cities = new ArrayList<IndexRequestBuilder>();
+ cities.addAll(Arrays.asList(
+ // below 500km
+ indexCity("idx", "utrecht", "52.0945, 5.116"),
+ indexCity("idx", "haarlem", "52.3890, 4.637"),
+ // above 500km, below 1000km
+ indexCity("idx", "berlin", "52.540, 13.409"),
+ indexCity("idx", "prague", "50.097679, 14.441314"),
+ // above 1000km
+ indexCity("idx", "tel-aviv", "32.0741, 34.777")));
+
+ // random cities with no location
+ for (String cityName : Arrays.asList("london", "singapour", "tokyo", "milan")) {
+ if (randomBoolean()) {
+ cities.add(indexCity("idx", cityName));
+ }
+ }
+ indexRandom(true, cities);
+
+ cities.clear();
+ cities.addAll(Arrays.asList(
+ indexCity("idx-multi", "city1", "52.3890, 4.637", "50.097679,14.441314"), // first point is within the ~17.5km, the second is ~710km
+ indexCity("idx-multi", "city2", "52.540, 13.409", "52.0945, 5.116"), // first point is ~576km, the second is within the ~35km
+ indexCity("idx-multi", "city3", "32.0741, 34.777"))); // above 1000km
+
+ // random cities with no location
+ for (String cityName : Arrays.asList("london", "singapour", "tokyo", "milan")) {
+ if (randomBoolean() || true) {
+ cities.add(indexCity("idx-multi", cityName));
+ }
+ }
+ indexRandom(true, cities);
+
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void simple_WithCustomKeys() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo("ring1", 500)
+ .addRange("ring2", 500, 1000)
+ .addUnboundedFrom("ring3", 1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("ring1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("ring1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("ring2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("ring2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("ring3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("ring3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000)
+ .subAggregation(terms("cities").field("city")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Terms cities = bucket.getAggregations().get("cities");
+ assertThat(cities, Matchers.notNullValue());
+ Set<String> names = Sets.newHashSet();
+ for (Terms.Bucket city : cities.getBuckets()) {
+ names.add(city.getKey());
+ }
+ assertThat(names.contains("utrecht") && names.contains("haarlem"), is(true));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ cities = bucket.getAggregations().get("cities");
+ assertThat(cities, Matchers.notNullValue());
+ names = Sets.newHashSet();
+ for (Terms.Bucket city : cities.getBuckets()) {
+ names.add(city.getKey());
+ }
+ assertThat(names.contains("berlin") && names.contains("prague"), is(true));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ cities = bucket.getAggregations().get("cities");
+ assertThat(cities, Matchers.notNullValue());
+ names = Sets.newHashSet();
+ for (Terms.Bucket city : cities.getBuckets()) {
+ names.add(city.getKey());
+ }
+ assertThat(names.contains("tel-aviv"), is(true));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer", "location", "type=geo_point").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i * 2)
+ .field("location", "52.0945, 5.116")
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(geoDistance("geo_dist").field("location").point("52.3760, 4.894").addRange("0-100", 0.0, 100.0)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ GeoDistance geoDistance = bucket.getAggregations().get("geo_dist");
+ List<GeoDistance.Bucket> buckets = new ArrayList<GeoDistance.Bucket>(geoDistance.getBuckets());
+ assertThat(geoDistance, Matchers.notNullValue());
+ assertThat(geoDistance.getName(), equalTo("geo_dist"));
+ assertThat(buckets.size(), is(1));
+ assertThat(buckets.get(0).getKey(), equalTo("0-100"));
+ assertThat(buckets.get(0).getFrom().doubleValue(), equalTo(0.0));
+ assertThat(buckets.get(0).getTo().doubleValue(), equalTo(100.0));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void multiValues() throws Exception {
+ SearchResponse response = client().prepareSearch("idx-multi")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .distanceType(org.elasticsearch.common.geo.GeoDistance.ARC)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java
new file mode 100644
index 0000000..5897045
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java
@@ -0,0 +1,244 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.ObjectIntMap;
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.cursors.ObjectIntCursor;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.GeoBoundingBoxFilterBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class GeoHashGridTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private IndexRequestBuilder indexCity(String name, String latLon) throws Exception {
+ XContentBuilder source = jsonBuilder().startObject().field("city", name);
+ if (latLon != null) {
+ source = source.field("location", latLon);
+ }
+ source = source.endObject();
+ return client().prepareIndex("idx", "type").setSource(source);
+ }
+
+
+ ObjectIntMap<String> expectedDocCountsForGeoHash = null;
+ int highestPrecisionGeohash = 12;
+ int numRandomPoints = 100;
+
+ String smallestGeoHash = null;
+
+ @Before
+ public void init() throws Exception {
+ prepareCreate("idx")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ createIndex("idx_unmapped");
+
+ List<IndexRequestBuilder> cities = new ArrayList<IndexRequestBuilder>();
+ Random random = getRandom();
+ expectedDocCountsForGeoHash = new ObjectIntOpenHashMap<String>(numRandomPoints * 2);
+ for (int i = 0; i < numRandomPoints; i++) {
+ //generate random point
+ double lat = (180d * random.nextDouble()) - 90d;
+ double lng = (360d * random.nextDouble()) - 180d;
+ String randomGeoHash = GeoHashUtils.encode(lat, lng, highestPrecisionGeohash);
+ //Index at the highest resolution
+ cities.add(indexCity(randomGeoHash, lat + ", " + lng));
+ expectedDocCountsForGeoHash.put(randomGeoHash, expectedDocCountsForGeoHash.getOrDefault(randomGeoHash, 0) + 1);
+ //Update expected doc counts for all resolutions..
+ for (int precision = highestPrecisionGeohash - 1; precision > 0; precision--) {
+ String hash = GeoHashUtils.encode(lat, lng, precision);
+ if ((smallestGeoHash == null) || (hash.length() < smallestGeoHash.length())) {
+ smallestGeoHash = hash;
+ }
+ expectedDocCountsForGeoHash.put(hash, expectedDocCountsForGeoHash.getOrDefault(hash, 0) + 1);
+ }
+ }
+ indexRandom(true, cities);
+ ensureSearchable();
+ }
+
+
+ @Test
+ public void simple() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKey();
+
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ }
+ }
+ }
+
+ @Test
+ public void filtered() throws Exception {
+ GeoBoundingBoxFilterBuilder bbox = new GeoBoundingBoxFilterBuilder("location");
+ bbox.topLeft(smallestGeoHash).bottomRight(smallestGeoHash).filterName("bbox");
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(
+ AggregationBuilders.filter("filtered").filter(bbox)
+ .subAggregation(
+ geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+
+ Filter filter = response.getAggregations().get("filtered");
+
+ GeoHashGrid geoGrid = filter.getAggregations().get("geohashgrid");
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKey();
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertTrue("Buckets must be filtered", geohash.startsWith(smallestGeoHash));
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+
+ }
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ assertThat(geoGrid.getBuckets().size(), equalTo(0));
+ }
+
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKey();
+
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ }
+ }
+ }
+
+ @Test
+ public void testTopMatch() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .size(1)
+ .shardSize(100)
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ //Check we only have one bucket with the best match for that resolution
+ assertThat(geoGrid.getBuckets().size(), equalTo(1));
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKey();
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = 0;
+ for (ObjectIntCursor<String> cursor : expectedDocCountsForGeoHash) {
+ if (cursor.key.length() == precision) {
+ expectedBucketCount = Math.max(expectedBucketCount, cursor.value);
+ }
+ }
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java
new file mode 100644
index 0000000..05b0ea5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class GlobalTests extends ElasticsearchIntegrationTest {
+
+ int numDocs;
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx2");
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ numDocs = randomIntBetween(3, 20);
+ for (int i = 0; i < numDocs / 2; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i+1).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag1")
+ .endObject()));
+ }
+ for (int i = numDocs / 2; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i+1).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag2")
+ .field("name", "name" + i+1)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void withStatsSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.termQuery("tag", "tag1"))
+ .addAggregation(global("global")
+ .subAggregation(stats("value_stats").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Global global = response.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo((long) numDocs));
+ assertThat(global.getAggregations().asList().isEmpty(), is(false));
+
+ Stats stats = global.getAggregations().get("value_stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("value_stats"));
+ long sum = 0;
+ for (int i = 0; i < numDocs; ++i) {
+ sum += i + 1;
+ }
+ assertThat(stats.getAvg(), equalTo((double) sum / numDocs));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo((double) numDocs));
+ assertThat(stats.getCount(), equalTo((long) numDocs));
+ assertThat(stats.getSum(), equalTo((double) sum));
+ }
+
+ @Test
+ public void nonTopLevel() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx")
+ .setQuery(QueryBuilders.termQuery("tag", "tag1"))
+ .addAggregation(global("global")
+ .subAggregation(global("inner_global")))
+ .execute().actionGet();
+
+ fail("expected to fail executing non-top-level global aggregator. global aggregations are only allowed as top level" +
+ "aggregations");
+
+ } catch (ElasticsearchException ese) {
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java
new file mode 100644
index 0000000..b9f38d3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java
@@ -0,0 +1,790 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.LongOpenHashSet;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.*;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class HistogramTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "l_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numDocs;
+ int interval;
+ int numValueBuckets, numValuesBuckets;
+ long[] valueCounts, valuesCounts;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(6, 20);
+ interval = randomIntBetween(2, 5);
+
+ numValueBuckets = numDocs / interval + 1;
+ valueCounts = new long[numValueBuckets];
+ for (int i = 0; i < numDocs; ++i) {
+ final int bucket = (i + 1) / interval;
+ ++valueCounts[bucket];
+ }
+
+ numValuesBuckets = (numDocs + 1) / interval + 1;
+ valuesCounts = new long[numValuesBuckets];
+ for (int i = 0; i < numDocs; ++i) {
+ final int bucket1 = (i + 1) / interval;
+ final int bucket2 = (i + 2) / interval;
+ ++valuesCounts[bucket1];
+ if (bucket1 != bucket2) {
+ ++valuesCounts[bucket2];
+ }
+ }
+
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i + 1)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i + 1).value(i + 2).endArray()
+ .field("tag", "tag" + i)
+ .endObject());
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(numValueBuckets -i - 1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet buckets = new LongOpenHashSet();
+ List<Histogram.Bucket> histoBuckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ long previousCount = Long.MIN_VALUE;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histoBuckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertEquals(0, key % interval);
+ assertTrue(buckets.add(key));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)]));
+ assertThat(bucket.getDocCount(), greaterThanOrEqualTo(previousCount));
+ previousCount = bucket.getDocCount();
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet buckets = new LongOpenHashSet();
+ List<Histogram.Bucket> histoBuckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ long previousCount = Long.MAX_VALUE;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histoBuckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertEquals(0, key % interval);
+ assertTrue(buckets.add(key));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)]));
+ assertThat(bucket.getDocCount(), lessThanOrEqualTo(previousCount));
+ previousCount = bucket.getDocCount();
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("sum", true))
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet visited = new LongOpenHashSet();
+ double previousSum = Double.NEGATIVE_INFINITY;
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ assertThat(sum.getValue(), greaterThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("sum", false))
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet visited = new LongOpenHashSet();
+ double previousSum = Double.POSITIVE_INFINITY;
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ assertThat(sum.getValue(), lessThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationAsc_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("stats.sum", true))
+ .subAggregation(stats("stats")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet visited = new LongOpenHashSet();
+ double previousSum = Double.NEGATIVE_INFINITY;
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(stats.getSum(), equalTo((double) s));
+ assertThat(stats.getSum(), greaterThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("stats.sum", false))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet visited = new LongOpenHashSet();
+ double previousSum = Double.POSITIVE_INFINITY;
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(stats.getSum(), equalTo((double) s));
+ assertThat(stats.getSum(), lessThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).script("_value + 1").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ final int numBuckets = (numDocs + 1) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 1) / interval + 1];
+ for (int i = 0; i < numDocs ; ++i) {
+ ++counts[(i + 2) / interval];
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numBuckets));
+
+ for (int i = 2 / interval; i <= (numDocs + 1) / interval; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(counts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(numValuesBuckets -i - 1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).script("_value + 1").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 2) / interval + 1];
+ for (int i = 0; i < numDocs ; ++i) {
+ final int bucket1 = (i + 2) / interval;
+ final int bucket2 = (i + 3) / interval;
+ ++counts[bucket1];
+ if (bucket1 != bucket2) {
+ ++counts[bucket2];
+ }
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numBuckets));
+
+ for (int i = 2 / interval; i <= (numDocs + 2) / interval; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(counts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).script("_value + 1").interval(interval)
+ .subAggregation(terms(MULTI_VALUED_FIELD_NAME).order(Terms.Order.term(true))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 2) / interval + 1];
+ for (int i = 0; i < numDocs ; ++i) {
+ final int bucket1 = (i + 2) / interval;
+ final int bucket2 = (i + 3) / interval;
+ ++counts[bucket1];
+ if (bucket1 != bucket2) {
+ ++counts[bucket2];
+ }
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numBuckets));
+
+ for (int i = 2 / interval; i < (numDocs + 2) / interval; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(counts[i]));
+ Terms terms = bucket.getAggregations().get(MULTI_VALUED_FIELD_NAME);
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo(MULTI_VALUED_FIELD_NAME));
+ int minTerm = Math.max(2, i * interval - 1);
+ int maxTerm = Math.min(numDocs + 2, (i + 1) * interval);
+ assertThat(terms.getBuckets().size(), equalTo(maxTerm - minTerm + 1));
+ Iterator<Terms.Bucket> iter = terms.getBuckets().iterator();
+ for (int j = minTerm; j <= maxTerm; ++j) {
+ assertThat(iter.next().getKeyAsNumber().longValue(), equalTo((long) j));
+ }
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value").interval(interval)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values").interval(interval)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i || (j + 2) / interval == i) {
+ s += j + 1;
+ s += j + 2;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i * 2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(histogram("sub_histo").interval(1l)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ histo = bucket.getAggregations().get("sub_histo");
+ assertThat(histo, Matchers.notNullValue());
+ assertThat(histo.getName(), equalTo("sub_histo"));
+ assertThat(histo.getBuckets().isEmpty(), is(true));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java
new file mode 100644
index 0000000..436b476
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java
@@ -0,0 +1,865 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.ip.IpFieldMapper;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4Range;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ *
+ */
+public class IPv4RangeTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ prepareCreate("idx")
+ .addMapping("type", "ip", "type=ip", "ips", "type=ip")
+ .execute().actionGet();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[255]; // TODO randomize the size?
+ // TODO randomize the values in the docs?
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("ip", "10.0.0." + (i))
+ .startArray("ips").value("10.0.0." + i).value("10.0.0." + (i + 1)).endArray()
+ .field("value", (i < 100 ? 1 : i < 200 ? 2 : 3)) // 100 1's, 100 2's, and 55 3's
+ .endObject());
+ }
+ indexRandom(true, builders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void singleValueField_WithMaskRange() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addMaskRange("10.0.0.0/25")
+ .addMaskRange("10.0.0.128/25"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(2));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("10.0.0.0/25");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.0/25"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.0")));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.0"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.128")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.128"));
+ assertThat(bucket.getDocCount(), equalTo(128l));
+
+ bucket = range.getBucketByKey("10.0.0.128/25");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.128/25"));
+ assertThat((long) bucket.getFrom().doubleValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.128")));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.128"));
+ assertThat((long) bucket.getTo().doubleValue(), equalTo(IpFieldMapper.ipToLong("10.0.1.0"))); // range is exclusive on the to side
+ assertThat(bucket.getToAsString(), equalTo("10.0.1.0"));
+ assertThat(bucket.getDocCount(), equalTo(127l)); // include 10.0.0.128
+ }
+
+ @Test
+ public void singleValueField_WithCustomKey() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("r1", "10.0.0.100")
+ .addRange("r2", "10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("r3", "10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 100));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 200));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 55*3));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.99")));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.199")));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.254")));
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .script("_value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ /*
+ [0, 1]
+ [1, 2]
+ [2, 3]
+ ...
+ [99, 100]
+ [100, 101]
+ [101, 102]
+ ...
+ [199, 200]
+ [200, 201]
+ [201, 202]
+ ...
+ [254, 255]
+ [255, 256]
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ips")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ips")
+ .script("_value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ips")
+ .script("_value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.100")));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.200")));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.255")));
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .script("doc['ip'].value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .script("doc['ip'].value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.99")));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.199")));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.254")));
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .script("doc['ips'].values")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .script("doc['ips'].values")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.100")));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.200")));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.255")));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer", "ip", "type=ip").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i * 2)
+ .field("ip", "10.0.0.5")
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(ipRange("ip_range").field("ip").addRange("r1", "10.0.0.1", "10.0.0.10")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ IPv4Range range = bucket.getAggregations().get("ip_range");
+ List<IPv4Range.Bucket> buckets = new ArrayList<IPv4Range.Bucket>(range.getBuckets());
+ assertThat(range, Matchers.notNullValue());
+ assertThat(range.getName(), equalTo("ip_range"));
+ assertThat(buckets.size(), is(1));
+ assertThat(buckets.get(0).getKey(), equalTo("r1"));
+ assertThat(buckets.get(0).getFromAsString(), equalTo("10.0.0.1"));
+ assertThat(buckets.get(0).getToAsString(), equalTo("10.0.0.10"));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/LongHashTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/LongHashTests.java
new file mode 100644
index 0000000..c9c7d6c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/LongHashTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.LongLongMap;
+import com.carrotsearch.hppc.LongLongOpenHashMap;
+import com.carrotsearch.hppc.cursors.LongLongCursor;
+import org.elasticsearch.common.util.BigArraysTests;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Iterator;
+
+public class LongHashTests extends ElasticsearchTestCase {
+
+ public void testDuell() {
+ final Long[] values = new Long[randomIntBetween(1, 100000)];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = randomLong();
+ }
+ final LongLongMap valueToId = new LongLongOpenHashMap();
+ final long[] idToValue = new long[values.length];
+ // Test high load factors to make sure that collision resolution works fine
+ final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;
+ final LongHash longHash = new LongHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randomCacheRecycler());
+ final int iters = randomInt(1000000);
+ for (int i = 0; i < iters; ++i) {
+ final Long value = randomFrom(values);
+ if (valueToId.containsKey(value)) {
+ assertEquals(- 1 - valueToId.get(value), longHash.add(value));
+ } else {
+ assertEquals(valueToId.size(), longHash.add(value));
+ idToValue[valueToId.size()] = value;
+ valueToId.put(value, valueToId.size());
+ }
+ }
+
+ assertEquals(valueToId.size(), longHash.size());
+ for (Iterator<LongLongCursor> iterator = valueToId.iterator(); iterator.hasNext(); ) {
+ final LongLongCursor next = iterator.next();
+ assertEquals(next.value, longHash.find(next.key));
+ }
+
+ for (long i = 0; i < longHash.capacity(); ++i) {
+ final long id = longHash.id(i);
+ if (id >= 0) {
+ assertEquals(idToValue[(int) id], longHash.key(i));
+ }
+ }
+ longHash.release();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java
new file mode 100644
index 0000000..bb1baca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java
@@ -0,0 +1,848 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class LongTermsTests extends ElasticsearchIntegrationTest {
+
+ private static final int NUM_DOCS = 5; // TODO randomize the size?
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "l_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ IndexRequestBuilder[] lowCardBuilders = new IndexRequestBuilder[NUM_DOCS];
+ for (int i = 0; i < lowCardBuilders.length; i++) {
+ lowCardBuilders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray()
+ .endObject());
+ }
+ indexRandom(randomBoolean(), lowCardBuilders);
+ IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO randomize the size?
+ for (int i = 0; i < highCardBuilders.length; i++) {
+ highCardBuilders[i] = client().prepareIndex("idx", "high_card_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray()
+ .endObject());
+
+ }
+ indexRandom(true, highCardBuilders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return randomBoolean() ? bucket.getKey() : key(bucket);
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithMaxSize() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(20)
+ .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(20));
+
+ for (int i = 0; i < 20; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(true)))
+ .execute().actionGet();
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat((long) sum.getValue(), equalTo(i+i+1l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .script("_value + 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value - 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i - 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i-1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i-1));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_NotUnique() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("floor(_value / 1000 + 1)"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("1.0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ /*
+
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+
+ 1 - count: 1 - sum: 1
+ 2 - count: 2 - sum: 4
+ 3 - count: 2 - sum: 6
+ 4 - count: 2 - sum: 8
+ 5 - count: 2 - sum: 10
+ 6 - count: 1 - sum: 6
+
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j + 1;
+ s += j+1 + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_NoExplicitType() throws Exception {
+
+ // since no type ie explicitly defined, es will assume all values returned by the script to be strings (bytes),
+ // so the aggregation should fail, since the "sum" aggregation can only operation on numeric values.
+
+ try {
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ fail("expected to fail as sub-aggregation sum requires a numeric value source context, but there is none");
+
+ } catch (Exception e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_WithExplicitType() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .valueType(Terms.ValueType.LONG)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j;
+ s += j+1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(randomInt(5)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped", "idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(terms("terms")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Terms terms = bucket.getAggregations().get("terms");
+ assertThat(terms, Matchers.notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", true))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByNonMetricsSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("filter", true))
+ .subAggregation(filter("filter").filter(FilterBuilders.termFilter("foo", "bar")))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.foo", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "with an unknown specified metric to order by");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "where the metric name is not specified");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueExtendedStatsAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.variance", asc))
+ .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java
new file mode 100644
index 0000000..9acc300
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.LongSet;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.format.DateTimeFormat;
+import org.junit.Before;
+
+import java.util.*;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+
+public class MinDocCountTests extends ElasticsearchIntegrationTest {
+
+ private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true);
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private int cardinality;
+
+ @Before
+ public void indexData() throws Exception {
+ createIndex("idx");
+
+ cardinality = randomIntBetween(8, 30);
+ final List<IndexRequestBuilder> indexRequests = new ArrayList<IndexRequestBuilder>();
+ final Set<String> stringTerms = new HashSet<String>();
+ final LongSet longTerms = new LongOpenHashSet();
+ final Set<String> dateTerms = new HashSet<String>();
+ for (int i = 0; i < cardinality; ++i) {
+ String stringTerm;
+ do {
+ stringTerm = RandomStrings.randomAsciiOfLength(getRandom(), 8);
+ } while (!stringTerms.add(stringTerm));
+ long longTerm;
+ do {
+ longTerm = randomInt(cardinality * 2);
+ } while (!longTerms.add(longTerm));
+ double doubleTerm = longTerm * Math.PI;
+ String dateTerm = DateTimeFormat.forPattern("yyyy-MM-dd").print(new DateTime(2014, 1, ((int) longTerm % 20) + 1, 0, 0));
+ final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20);
+ for (int j = 0; j < frequency; ++j) {
+ indexRequests.add(client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("s", stringTerm)
+ .field("l", longTerm)
+ .field("d", doubleTerm)
+ .field("date", dateTerm)
+ .field("match", randomBoolean())
+ .endObject()));
+ }
+ }
+ cardinality = stringTerms.size();
+
+ indexRandom(true, indexRequests);
+ ensureSearchable();
+ }
+
+ private enum Script {
+ NO {
+ @Override
+ TermsBuilder apply(TermsBuilder builder, String field) {
+ return builder.field(field);
+ }
+ },
+ YES {
+ @Override
+ TermsBuilder apply(TermsBuilder builder, String field) {
+ return builder.script("doc['" + field + "'].values");
+ }
+ };
+ abstract TermsBuilder apply(TermsBuilder builder, String field);
+ }
+
+ // check that terms2 is a subset of terms1
+ private void assertSubset(Terms terms1, Terms terms2, long minDocCount, int size, String include) {
+ final Matcher matcher = include == null ? null : Pattern.compile(include).matcher("");;
+ final Iterator<Terms.Bucket> it1 = terms1.getBuckets().iterator();
+ final Iterator<Terms.Bucket> it2 = terms2.getBuckets().iterator();
+ int size2 = 0;
+ while (it1.hasNext()) {
+ final Terms.Bucket bucket1 = it1.next();
+ if (bucket1.getDocCount() >= minDocCount && (matcher == null || matcher.reset(bucket1.getKey()).matches())) {
+ if (size2++ == size) {
+ break;
+ }
+ assertTrue(it2.hasNext());
+ final Terms.Bucket bucket2 = it2.next();
+ assertEquals(bucket1.getKeyAsText(), bucket2.getKeyAsText());
+ assertEquals(bucket1.getDocCount(), bucket2.getDocCount());
+ }
+ }
+ assertFalse(it2.hasNext());
+ }
+
+ private void assertSubset(Histogram histo1, Histogram histo2, long minDocCount) {
+ final Iterator<? extends Histogram.Bucket> it2 = histo2.getBuckets().iterator();
+ for (Histogram.Bucket b1 : histo1.getBuckets()) {
+ if (b1.getDocCount() >= minDocCount) {
+ final Histogram.Bucket b2 = it2.next();
+ assertEquals(b1.getKeyAsNumber(), b2.getKeyAsNumber());
+ assertEquals(b1.getDocCount(), b2.getDocCount());
+ }
+ }
+ }
+
+ private void assertSubset(DateHistogram histo1, DateHistogram histo2, long minDocCount) {
+ final Iterator<? extends DateHistogram.Bucket> it2 = histo2.getBuckets().iterator();
+ for (DateHistogram.Bucket b1 : histo1.getBuckets()) {
+ if (b1.getDocCount() >= minDocCount) {
+ final DateHistogram.Bucket b2 = it2.next();
+ assertEquals(b1.getKeyAsNumber(), b2.getKeyAsNumber());
+ assertEquals(b1.getDocCount(), b2.getDocCount());
+ }
+ }
+ }
+
+ public void testStringTermAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(true));
+ }
+
+ public void testStringScriptTermAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(true));
+ }
+
+ public void testStringTermDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(false));
+ }
+
+ public void testStringScriptTermDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(false));
+ }
+
+ public void testStringCountAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true));
+ }
+
+ public void testStringScriptCountAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true));
+ }
+
+ public void testStringCountDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false));
+ }
+
+ public void testStringScriptCountDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false));
+ }
+
+ public void testStringCountAscWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true), ".*a.*");
+ }
+
+ public void testStringScriptCountAscWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true), ".*a.*");
+ }
+
+ public void testStringCountDescWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false), ".*a.*");
+ }
+
+ public void testStringScriptCountDescWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false), ".*a.*");
+ }
+
+ public void testLongTermAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(true));
+ }
+
+ public void testLongScriptTermAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(true));
+ }
+
+ public void testLongTermDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(false));
+ }
+
+ public void testLongScriptTermDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(false));
+ }
+
+ public void testLongCountAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(true));
+ }
+
+ public void testLongScriptCountAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(true));
+ }
+
+ public void testLongCountDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(false));
+ }
+
+ public void testLongScriptCountDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(false));
+ }
+
+ public void testDoubleTermAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(true));
+ }
+
+ public void testDoubleScriptTermAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(true));
+ }
+
+ public void testDoubleTermDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(false));
+ }
+
+ public void testDoubleScriptTermDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(false));
+ }
+
+ public void testDoubleCountAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(true));
+ }
+
+ public void testDoubleScriptCountAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(true));
+ }
+
+ public void testDoubleCountDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(false));
+ }
+
+ public void testDoubleScriptCountDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(false));
+ }
+
+ private void testMinDocCountOnTerms(String field, Script script, Terms.Order order) throws Exception {
+ testMinDocCountOnTerms(field, script, order, null);
+ }
+
+ private void testMinDocCountOnTerms(String field, Script script, Terms.Order order, String include) throws Exception {
+ // all terms
+ final SearchResponse allTermsResponse = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(script.apply(terms("terms"), field)
+ .executionHint(StringTermsTests.randomExecutionHint())
+ .order(order)
+ .size(cardinality + randomInt(10))
+ .minDocCount(0))
+ .execute().actionGet();
+ final Terms allTerms = allTermsResponse.getAggregations().get("terms");
+ assertEquals(cardinality, allTerms.getBuckets().size());
+
+ for (long minDocCount = 0; minDocCount < 20; ++minDocCount) {
+ final int size = randomIntBetween(1, cardinality + 2);
+ final SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(script.apply(terms("terms"), field)
+ .executionHint(StringTermsTests.randomExecutionHint())
+ .order(order)
+ .size(size)
+ .include(include)
+ .shardSize(cardinality + randomInt(10))
+ .minDocCount(minDocCount))
+ .execute().actionGet();
+ assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include);
+ }
+
+ }
+
+ public void testHistogramCountAsc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.COUNT_ASC);
+ }
+
+ public void testHistogramCountDesc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.COUNT_DESC);
+ }
+
+ public void testHistogramKeyAsc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.KEY_ASC);
+ }
+
+ public void testHistogramKeyDesc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.KEY_DESC);
+ }
+
+ public void testDateHistogramCountAsc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.COUNT_ASC);
+ }
+
+ public void testDateHistogramCountDesc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.COUNT_DESC);
+ }
+
+ public void testDateHistogramKeyAsc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.KEY_ASC);
+ }
+
+ public void testDateHistogramKeyDesc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.KEY_DESC);
+ }
+
+ private void testMinDocCountOnHistogram(Histogram.Order order) throws Exception {
+ final int interval = randomIntBetween(1, 3);
+ final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0))
+ .execute().actionGet();
+
+ final Histogram allHisto = allResponse.getAggregations().get("histo");
+
+ for (long minDocCount = 0; minDocCount < 50; ++minDocCount) {
+ final SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount))
+ .execute().actionGet();
+ assertSubset(allHisto, (Histogram) response.getAggregations().get("histo"), minDocCount);
+ }
+
+ }
+
+ private void testMinDocCountOnDateHistogram(Histogram.Order order) throws Exception {
+ final int interval = randomIntBetween(1, 3);
+ final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).order(order).minDocCount(0))
+ .execute().actionGet();
+
+ final DateHistogram allHisto = allResponse.getAggregations().get("histo");
+
+ for (long minDocCount = 0; minDocCount < 50; ++minDocCount) {
+ final SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).order(order).minDocCount(minDocCount))
+ .execute().actionGet();
+ assertSubset(allHisto, (DateHistogram) response.getAggregations().get("histo"), minDocCount);
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java
new file mode 100644
index 0000000..632f4e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class MissingTests extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numDocs, numDocsMissing, numDocsUnmapped;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ numDocs = randomIntBetween(5, 20);
+ numDocsMissing = randomIntBetween(1, numDocs - 1);
+ for (int i = 0; i < numDocsMissing; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .endObject()));
+ }
+ for (int i = numDocsMissing; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("tag", "tag1")
+ .endObject()));
+ }
+
+ createIndex("unmapped_idx");
+ numDocsUnmapped = randomIntBetween(2, 5);
+ for (int i = 0; i < numDocsUnmapped; i++) {
+ builders.add(client().prepareIndex("unmapped_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .endObject()));
+ }
+
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+ ensureGreen(); // wait until we are ready to serve requests
+ ensureSearchable();
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("unmapped_idx")
+ .addAggregation(missing("missing_tag").field("tag"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsUnmapped));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "unmapped_idx")
+ .addAggregation(missing("missing_tag").field("tag"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(missing("missing_tag").field("tag"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsMissing));
+ }
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "unmapped_idx")
+ .addAggregation(missing("missing_tag").field("tag")
+ .subAggregation(avg("avg_value").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ assertThat(missing.getAggregations().asList().isEmpty(), is(false));
+
+ long sum = 0;
+ for (int i = 0; i < numDocsMissing; ++i) {
+ sum += i;
+ }
+ for (int i = 0; i < numDocsUnmapped; ++i) {
+ sum += i;
+ }
+ Avg avgValue = missing.getAggregations().get("avg_value");
+ assertThat(avgValue, notNullValue());
+ assertThat(avgValue.getName(), equalTo("avg_value"));
+ assertThat(avgValue.getValue(), equalTo((double) sum / (numDocsMissing + numDocsUnmapped)));
+ }
+
+ @Test
+ public void withInheritedSubMissing() throws Exception {
+
+ SearchResponse response = client().prepareSearch()
+ .addAggregation(missing("top_missing").field("tag")
+ .subAggregation(missing("sub_missing")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing topMissing = response.getAggregations().get("top_missing");
+ assertThat(topMissing, notNullValue());
+ assertThat(topMissing.getName(), equalTo("top_missing"));
+ assertThat(topMissing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ assertThat(topMissing.getAggregations().asList().isEmpty(), is(false));
+
+ Missing subMissing = topMissing.getAggregations().get("sub_missing");
+ assertThat(subMissing, notNullValue());
+ assertThat(subMissing.getName(), equalTo("sub_missing"));
+ assertThat(subMissing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(missing("missing")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Missing missing = bucket.getAggregations().get("missing");
+ assertThat(missing, Matchers.notNullValue());
+ assertThat(missing.getName(), equalTo("missing"));
+ assertThat(missing.getDocCount(), is(0l));
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java
new file mode 100644
index 0000000..789b814
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.Comparators;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregationBuilder;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+public class NaNSortingTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private enum SubAggregation {
+ AVG("avg") {
+ @Override
+ public MetricsAggregationBuilder<?> builder() {
+ return avg(name).field("numeric_field");
+ }
+ @Override
+ public double getValue(Aggregation aggregation) {
+ return ((Avg) aggregation).getValue();
+ }
+ },
+ VARIANCE("variance") {
+ @Override
+ public MetricsAggregationBuilder<?> builder() {
+ return extendedStats(name).field("numeric_field");
+ }
+ @Override
+ public String sortKey() {
+ return name + ".variance";
+ }
+ @Override
+ public double getValue(Aggregation aggregation) {
+ return ((ExtendedStats) aggregation).getVariance();
+ }
+ },
+ STD_DEVIATION("std_deviation"){
+ @Override
+ public MetricsAggregationBuilder<?> builder() {
+ return extendedStats(name).field("numeric_field");
+ }
+ @Override
+ public String sortKey() {
+ return name + ".std_deviation";
+ }
+ @Override
+ public double getValue(Aggregation aggregation) {
+ return ((ExtendedStats) aggregation).getStdDeviation();
+ }
+ };
+
+ SubAggregation(String name) {
+ this.name = name;
+ }
+
+ public String name;
+
+ public abstract MetricsAggregationBuilder<?> builder();
+
+ public String sortKey() {
+ return name;
+ }
+
+ public abstract double getValue(Aggregation aggregation);
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ final int numDocs = randomIntBetween(2, 10);
+ for (int i = 0; i < numDocs; ++i) {
+ final long value = randomInt(5);
+ XContentBuilder source = jsonBuilder().startObject().field("long_value", value).field("double_value", value + 0.05).field("string_value", "str_" + value);
+ if (randomBoolean()) {
+ source.field("numeric_value", randomDouble());
+ }
+ client().prepareIndex("idx", "type").setSource(source.endObject()).execute().actionGet();
+ }
+ refresh();
+ ensureSearchable();
+ }
+
+ private void assertCorrectlySorted(Terms terms, boolean asc, SubAggregation agg) {
+ assertThat(terms, notNullValue());
+ double previousValue = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ Aggregation sub = bucket.getAggregations().get(agg.name);
+ double value = agg.getValue(sub);
+ assertTrue(Comparators.compareDiscardNaN(previousValue, value, asc) <= 0);
+ previousValue = value;
+ }
+ }
+
+ private void assertCorrectlySorted(Histogram histo, boolean asc, SubAggregation agg) {
+ assertThat(histo, notNullValue());
+ double previousValue = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ Aggregation sub = bucket.getAggregations().get(agg.name);
+ double value = agg.getValue(sub);
+ assertTrue(Comparators.compareDiscardNaN(previousValue, value, asc) <= 0);
+ previousValue = value;
+ }
+ }
+
+ public void testTerms(String fieldName) {
+ final boolean asc = randomBoolean();
+ SubAggregation agg = randomFrom(SubAggregation.values());
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field(fieldName).subAggregation(agg.builder()).order(Terms.Order.aggregation(agg.sortKey(), asc)))
+ .execute().actionGet();
+
+ final Terms terms = response.getAggregations().get("terms");
+ assertCorrectlySorted(terms, asc, agg);
+ }
+
+ @Test
+ public void stringTerms() {
+ testTerms("string_value");
+ }
+
+ @Test
+ public void longTerms() {
+ testTerms("long_value");
+ }
+
+ @Test
+ public void doubleTerms() {
+ testTerms("double_value");
+ }
+
+ @Test
+ public void longHistogram() {
+ final boolean asc = randomBoolean();
+ SubAggregation agg = randomFrom(SubAggregation.values());
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo")
+ .field("long_value").interval(randomIntBetween(1, 2)).subAggregation(agg.builder()).order(Histogram.Order.aggregation(agg.sortKey(), asc)))
+ .execute().actionGet();
+
+ final Histogram histo = response.getAggregations().get("histo");
+ assertCorrectlySorted(histo, asc, agg);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java
new file mode 100644
index 0000000..7b88776
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java
@@ -0,0 +1,345 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.nested.Nested;
+import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class NestedTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numParents;
+ int[] numChildren;
+
+ @Before
+ public void init() throws Exception {
+
+ prepareCreate("idx")
+ .addMapping("type", "nested", "type=nested")
+ .setSettings(indexSettings())
+ .execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+
+ numParents = randomIntBetween(3, 10);
+ numChildren = new int[numParents];
+ int totalChildren = 0;
+ for (int i = 0; i < numParents; ++i) {
+ if (i == numParents - 1 && totalChildren == 0) {
+ // we need at least one child overall
+ numChildren[i] = randomIntBetween(1, 5);
+ } else {
+ numChildren[i] = randomInt(5);
+ }
+ totalChildren += numChildren[i];
+ }
+ assertTrue(totalChildren > 0);
+
+ for (int i = 0; i < numParents; i++) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .startArray("nested");
+ for (int j = 0; j < numChildren[i]; ++j) {
+ source = source.startObject().field("value", i + 1 + j).endObject();
+ }
+ source = source.endArray().endObject();
+ builders.add(client().prepareIndex("idx", "type", ""+i+1).setSource(source));
+ }
+
+ prepareCreate("idx_nested_nested_aggs")
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("nested2")
+ .field("type", "nested")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()).get();
+
+ builders.add(
+ client().prepareIndex("idx_nested_nested_aggs", "type", "1")
+ .setSource(jsonBuilder().startObject()
+ .startArray("nested1")
+ .startObject()
+ .field("a", "a")
+ .startArray("nested2")
+ .startObject()
+ .field("b", 2)
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("a", "b")
+ .startArray("nested2")
+ .startObject()
+ .field("b", 2)
+ .endObject()
+ .endArray()
+ .endObject()
+ .endArray()
+ .endObject())
+ );
+
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(nested("nested").path("nested")
+ .subAggregation(stats("nested_value_stats").field("nested.value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+ long sum = 0;
+ long count = 0;
+ for (int i = 0; i < numParents; ++i) {
+ for (int j = 0; j < numChildren[i]; ++j) {
+ final long value = i + 1 + j;
+ min = Math.min(min, value);
+ max = Math.max(max, value);
+ sum += value;
+ ++count;
+ }
+ }
+
+ Nested nested = response.getAggregations().get("nested");
+ assertThat(nested, notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), equalTo(count));
+ assertThat(nested.getAggregations().asList().isEmpty(), is(false));
+
+ Stats stats = nested.getAggregations().get("nested_value_stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMin(), equalTo(min));
+ assertThat(stats.getMax(), equalTo(max));
+ assertThat(stats.getCount(), equalTo(count));
+ assertThat(stats.getSum(), equalTo((double) sum));
+ assertThat(stats.getAvg(), equalTo((double) sum / count));
+ }
+
+ @Test
+ public void onNonNestedField() throws Exception {
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(nested("nested").path("value")
+ .subAggregation(stats("nested_value_stats").field("nested.value")))
+ .execute().actionGet();
+
+ fail("expected execution to fail - an attempt to nested facet on non-nested field/path");
+
+ } catch (ElasticsearchException ese) {
+ }
+ }
+
+ @Test
+ public void nestedWithSubTermsAgg() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(nested("nested").path("nested")
+ .subAggregation(terms("values").field("nested.value").size(100)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ long docCount = 0;
+ long[] counts = new long[numParents + 6];
+ for (int i = 0; i < numParents; ++i) {
+ for (int j = 0; j < numChildren[i]; ++j) {
+ final int value = i + 1 + j;
+ ++counts[value];
+ ++docCount;
+ }
+ }
+ int uniqueValues = 0;
+ for (long count : counts) {
+ if (count > 0) {
+ ++uniqueValues;
+ }
+ }
+
+ Nested nested = response.getAggregations().get("nested");
+ assertThat(nested, notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), equalTo(docCount));
+ assertThat(nested.getAggregations().asList().isEmpty(), is(false));
+
+ LongTerms values = nested.getAggregations().get("values");
+ assertThat(values, notNullValue());
+ assertThat(values.getName(), equalTo("values"));
+ assertThat(values.getBuckets(), notNullValue());
+ assertThat(values.getBuckets().size(), equalTo(uniqueValues));
+ for (int i = 0; i < counts.length; ++i) {
+ final String key = Long.toString(i);
+ if (counts[i] == 0) {
+ assertNull(values.getBucketByKey(key));
+ } else {
+ Bucket bucket = values.getBucketByKey(key);
+ assertNotNull(bucket);
+ assertEquals(counts[i], bucket.getDocCount());
+ }
+ }
+ }
+
+ @Test
+ public void nestedAsSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("top_values").field("value").size(100)
+ .subAggregation(nested("nested").path("nested")
+ .subAggregation(max("max_value").field("nested.value"))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ LongTerms values = response.getAggregations().get("top_values");
+ assertThat(values, notNullValue());
+ assertThat(values.getName(), equalTo("top_values"));
+ assertThat(values.getBuckets(), notNullValue());
+ assertThat(values.getBuckets().size(), equalTo(numParents));
+
+ for (int i = 0; i < numParents; i++) {
+ String topValue = "" + (i + 1);
+ assertThat(values.getBucketByKey(topValue), notNullValue());
+ Nested nested = values.getBucketByKey(topValue).getAggregations().get("nested");
+ assertThat(nested, notNullValue());
+ Max max = nested.getAggregations().get("max_value");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo(numChildren[i] == 0 ? Double.NEGATIVE_INFINITY : (double) i + numChildren[i]));
+ }
+ }
+
+ @Test
+ public void nestNestedAggs() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_nested_nested_aggs")
+ .addAggregation(nested("level1").path("nested1")
+ .subAggregation(terms("a").field("nested1.a")
+ .subAggregation(nested("level2").path("nested1.nested2")
+ .subAggregation(sum("sum").field("nested1.nested2.b")))))
+ .get();
+ assertSearchResponse(response);
+
+
+ Nested level1 = response.getAggregations().get("level1");
+ assertThat(level1, notNullValue());
+ assertThat(level1.getName(), equalTo("level1"));
+ assertThat(level1.getDocCount(), equalTo(2l));
+
+ StringTerms a = level1.getAggregations().get("a");
+ Terms.Bucket bBucket = a.getBucketByKey("a");
+ assertThat(bBucket.getDocCount(), equalTo(1l));
+
+ Nested level2 = bBucket.getAggregations().get("level2");
+ assertThat(level2.getDocCount(), equalTo(1l));
+ Sum sum = level2.getAggregations().get("sum");
+ assertThat(sum.getValue(), equalTo(2d));
+
+ a = level1.getAggregations().get("a");
+ bBucket = a.getBucketByKey("b");
+ assertThat(bBucket.getDocCount(), equalTo(1l));
+
+ level2 = bBucket.getAggregations().get("level2");
+ assertThat(level2.getDocCount(), equalTo(1l));
+ sum = level2.getAggregations().get("sum");
+ assertThat(sum.getValue(), equalTo(2d));
+ }
+
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer", "nested", "type=nested").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .startArray("nested")
+ .startObject().field("value", i + 1).endObject()
+ .startObject().field("value", i + 2).endObject()
+ .startObject().field("value", i + 3).endObject()
+ .startObject().field("value", i + 4).endObject()
+ .startObject().field("value", i + 5).endObject()
+ .endArray()
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(nested("nested").path("nested")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Nested nested = bucket.getAggregations().get("nested");
+ assertThat(nested, Matchers.notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), is(0l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java
new file mode 100644
index 0000000..3b62521
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java
@@ -0,0 +1,947 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class RangeTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "l_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numDocs;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ numDocs = randomIntBetween(10, 20);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i+1)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i+1).value(i+2).endArray()
+ .endObject());
+ }
+ indexRandom(true, builders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ @Test
+ public void rangeAsSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field(MULTI_VALUED_FIELD_NAME).size(100).subAggregation(
+ range("range").field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getBuckets().size(), equalTo(numDocs + 1));
+ for (int i = 1; i < numDocs + 2; ++i) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ final long docCount = i == 1 || i == numDocs + 1 ? 1 : 2;
+ assertThat(bucket.getDocCount(), equalTo(docCount));
+ Range range = bucket.getAggregations().get("range");
+ Range.Bucket rangeBucket = range.getBucketByKey("*-3.0");
+ assertThat(rangeBucket, notNullValue());
+ if (i == 1 || i == 3) {
+ assertThat(rangeBucket.getDocCount(), equalTo(1L));
+ } else if (i == 2) {
+ assertThat(rangeBucket.getDocCount(), equalTo(2L));
+ } else {
+ assertThat(rangeBucket.getDocCount(), equalTo(0L));
+ }
+ rangeBucket = range.getBucketByKey("3.0-6.0");
+ assertThat(rangeBucket, notNullValue());
+ if (i == 3 || i == 6) {
+ assertThat(rangeBucket.getDocCount(), equalTo(1L));
+ } else if (i == 4 || i == 5) {
+ assertThat(rangeBucket.getDocCount(), equalTo(2L));
+ } else {
+ assertThat(rangeBucket.getDocCount(), equalTo(0L));
+ }
+ rangeBucket = range.getBucketByKey("6.0-*");
+ assertThat(rangeBucket, notNullValue());
+ if (i == 6 || i == numDocs + 1) {
+ assertThat(rangeBucket.getDocCount(), equalTo(1L));
+ } else if (i < 6) {
+ assertThat(rangeBucket.getDocCount(), equalTo(0L));
+ } else {
+ assertThat(rangeBucket.getDocCount(), equalTo(2L));
+ }
+ }
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5L));
+ }
+
+ @Test
+ public void singleValueField_WithCustomKey() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo("r1", 3)
+ .addRange("r2", 3, 6)
+ .addUnboundedFrom("r3", 6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5L));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(3.0)); // 1 + 2
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(12.0)); // 3 + 4 + 5
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(avg("avg")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(1.5)); // (1 + 2) / 2
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(4.0)); // (3 + 4 + 5) / 3
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(avg.getValue(), equalTo((double) total / (numDocs - 5))); // (6 + 7 + 8 + 9 + 10) / 5
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(1l)); // 2
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l)); // 3, 4, 5
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+ [11, 12]
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 3l));
+ }
+
+ /*
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+ [11, 12]
+
+ r1: 2
+ r2: 3, 3, 4, 4, 5, 5
+ r3: 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(2d+3d));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 3L));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ long total = 0;
+ for (int i = 3; i < numDocs; ++i) {
+ total += ((i + 1) + 1) + ((i + 1) + 2);
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(avg("avg")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(1.5)); // (1 + 2) / 2
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(4.0)); // (3 + 4 + 5) / 3
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(avg.getValue(), equalTo((double) total / (numDocs - 5))); // (6 + 7 + 8 + 9 + 10) / 5
+ }
+
+ @Test
+ public void emptyRange() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .addUnboundedTo(-1)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(2));
+
+ Range.Bucket bucket = range.getBucketByKey("*--1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*--1.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(-1.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000d));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+
+ r1: 1, 2, 2
+ r2: 3, 3, 4, 4, 5, 5
+ r3: 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
+ */
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .addUnboundedTo("r1", 3)
+ .addRange("r2", 3, 6)
+ .addUnboundedFrom("r3", 6)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ long total = 0;
+ for (int i = 4; i < numDocs; ++i) {
+ total += (i + 1) + (i + 2);
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ }
+
+ @Test
+ public void overlappingRanges() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .addUnboundedTo(5)
+ .addRange(3, 6)
+ .addRange(4, 5)
+ .addUnboundedFrom(4))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(4));
+
+ Range.Bucket bucket = range.getBucketByKey("*-5.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-5.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(5.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("4.0-5.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("4.0-5.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(4.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(5.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("4.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("4.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(4.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i * 2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(range("range").addRange("0-2", 0.0, 2.0)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Range range = bucket.getAggregations().get("range");
+ List<Range.Bucket> buckets = new ArrayList<Range.Bucket>(range.getBuckets());
+ assertThat(range, Matchers.notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(buckets.size(), is(1));
+ assertThat(buckets.get(0).getKey(), equalTo("0-2"));
+ assertThat(buckets.get(0).getFrom().doubleValue(), equalTo(0.0));
+ assertThat(buckets.get(0).getTo().doubleValue(), equalTo(2.0));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java
new file mode 100644
index 0000000..dca4052
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java
@@ -0,0 +1,324 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.search.aggregations.bucket.nested.Nested;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.range.date.DateRange;
+import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4Range;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests making sure that the reduce is propagated to all aggregations in the hierarchy when executing on a single shard
+ * These tests are based on the date histogram in combination of min_doc_count=0. In order for the date histogram to
+ * compute empty buckets, its {@code reduce()} method must be called. So by adding the date histogram under other buckets,
+ * we can make sure that the reduce is properly propagated by checking that empty buckets were created.
+ */
+public class ShardReduceTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", randomBoolean() ? 1 : randomIntBetween(2, 10))
+ .put("index.number_of_replicas", randomIntBetween(0, 1))
+ .build();
+ }
+
+ private IndexRequestBuilder indexDoc(String date, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("value", value)
+ .field("ip", "10.0.0." + value)
+ .field("location", GeoHashUtils.encode(52, 5, 12))
+ .field("date", date)
+ .field("term-l", 1)
+ .field("term-d", 1.5)
+ .field("term-s", "term")
+ .startObject("nested")
+ .field("date", date)
+ .endObject()
+ .endObject());
+ }
+
+ @Before
+ public void init() throws Exception {
+ prepareCreate("idx")
+ .addMapping("type", "nested", "type=nested", "ip", "type=ip", "location", "type=geo_point")
+ .setSettings(indexSettings())
+ .execute().actionGet();
+
+ indexRandom(true,
+ indexDoc("2014-01-01", 1),
+ indexDoc("2014-01-02", 2),
+ indexDoc("2014-01-04", 3));
+ ensureSearchable();
+ }
+
+ @Test
+ public void testGlobal() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(global("global")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Global global = response.getAggregations().get("global");
+ DateHistogram histo = global.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testFilter() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(filter("filter").filter(FilterBuilders.matchAllFilter())
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filter filter = response.getAggregations().get("filter");
+ DateHistogram histo = filter.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testMissing() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(missing("missing").field("foobar")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Missing missing = response.getAggregations().get("missing");
+ DateHistogram histo = missing.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testGlobalWithFilterWithMissing() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(global("global")
+ .subAggregation(filter("filter").filter(FilterBuilders.matchAllFilter())
+ .subAggregation(missing("missing").field("foobar")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Global global = response.getAggregations().get("global");
+ Filter filter = global.getAggregations().get("filter");
+ Missing missing = filter.getAggregations().get("missing");
+ DateHistogram histo = missing.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testNested() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(nested("nested").path("nested")
+ .subAggregation(dateHistogram("histo").field("nested.date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Nested nested = response.getAggregations().get("nested");
+ DateHistogram histo = nested.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testStringTerms() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(terms("terms").field("term-s")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ DateHistogram histo = terms.getBucketByKey("term").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testLongTerms() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(terms("terms").field("term-l")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ DateHistogram histo = terms.getBucketByKey("1").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testDoubleTerms() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(terms("terms").field("term-d")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ DateHistogram histo = terms.getBucketByKey("1.5").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testRange() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(range("range").field("value").addRange("r1", 0, 10)
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ DateHistogram histo = range.getBucketByKey("r1").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testDateRange() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(dateRange("range").field("date").addRange("r1", "2014-01-01", "2014-01-10")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ DateRange range = response.getAggregations().get("range");
+ DateHistogram histo = range.getBucketByKey("r1").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testIpRange() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(ipRange("range").field("ip").addRange("r1", "10.0.0.1", "10.0.0.10")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ IPv4Range range = response.getAggregations().get("range");
+ DateHistogram histo = range.getBucketByKey("r1").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testHistogram() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(histogram("topHisto").field("value").interval(5)
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram topHisto = response.getAggregations().get("topHisto");
+ DateHistogram histo = topHisto.getBucketByKey(0).getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testDateHistogram() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(dateHistogram("topHisto").field("date").interval(DateHistogram.Interval.MONTH)
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ DateHistogram topHisto = response.getAggregations().get("topHisto");
+ DateHistogram histo = topHisto.getBuckets().iterator().next().getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ }
+
+ @Test
+ public void testGeoHashGrid() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(geohashGrid("grid").field("location")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoHashGrid grid = response.getAggregations().get("grid");
+ DateHistogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java
new file mode 100644
index 0000000..7f97c47
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java
@@ -0,0 +1,361 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST)
+public class ShardSizeTermsTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * to properly test the effect/functionality of shard_size, we need to force having 2 shards and also
+ * control the routing such that certain documents will end on each shard. Using "djb" routing hash + ignoring the
+ * doc type when hashing will ensure that docs with routing value "1" will end up in a different shard than docs with
+ * routing value "2".
+ */
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ .put("cluster.routing.operation.hash.type", "djb")
+ .put("cluster.routing.operation.use_type", "false")
+ .build();
+ }
+
+ @Test
+ public void noShardSize_string() throws Exception {
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 4l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsText().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsText().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 5l)
+ .put("2", 4l)
+ .put("3", 3l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket: buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKey())));
+ }
+ }
+
+ @Test
+ public void noShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ private void indexData() throws Exception {
+
+ /*
+
+
+ || || size = 3, shard_size = 5 || shard_size = size = 3 ||
+ ||==========||==================================================||===============================================||
+ || shard 1: || "1" - 5 | "2" - 4 | "3" - 3 | "4" - 2 | "5" - 1 || "1" - 5 | "3" - 3 | "2" - 4 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || shard 2: || "1" - 3 | "2" - 1 | "3" - 5 | "4" - 2 | "5" - 1 || "1" - 3 | "3" - 5 | "4" - 2 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || reduced: || "1" - 8 | "2" - 5 | "3" - 8 | "4" - 4 | "5" - 2 || ||
+ || || || "1" - 8, "3" - 8, "2" - 4 <= WRONG ||
+ || || "1" - 8 | "3" - 8 | "2" - 5 <= CORRECT || ||
+
+
+ */
+
+ List<IndexRequestBuilder> indexOps = new ArrayList<IndexRequestBuilder>();
+
+ indexDoc("1", "1", 5, indexOps);
+ indexDoc("1", "2", 4, indexOps);
+ indexDoc("1", "3", 3, indexOps);
+ indexDoc("1", "4", 2, indexOps);
+ indexDoc("1", "5", 1, indexOps);
+
+ // total docs in shard "1" = 15
+
+ indexDoc("2", "1", 3, indexOps);
+ indexDoc("2", "2", 1, indexOps);
+ indexDoc("2", "3", 5, indexOps);
+ indexDoc("2", "4", 2, indexOps);
+ indexDoc("2", "5", 1, indexOps);
+
+ // total docs in shard "2" = 12
+
+ indexRandom(true, indexOps);
+
+ long totalOnOne = client().prepareSearch("idx").setTypes("type").setRouting("1").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnOne, is(15l));
+ long totalOnTwo = client().prepareSearch("idx").setTypes("type").setRouting("2").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnTwo, is(12l));
+ ensureSearchable();
+ }
+
+ private void indexDoc(String shard, String key, int times, List<IndexRequestBuilder> indexOps) throws Exception {
+ for (int i = 0; i < times; i++) {
+ indexOps.add(client().prepareIndex("idx", "type").setRouting(shard).setCreate(true).setSource(jsonBuilder()
+ .startObject()
+ .field("key", key)
+ .endObject()));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java
new file mode 100644
index 0000000..3bcc3bf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java
@@ -0,0 +1,1024 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.base.Strings;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class StringTermsTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "s_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "s_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ public static String randomExecutionHint() {
+ return randomFrom(Arrays.asList(null, TermsAggregatorFactory.EXECUTION_HINT_VALUE_MAP, TermsAggregatorFactory.EXECUTION_HINT_VALUE_ORDINALS));
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ IndexRequestBuilder[] lowCardBuilders = new IndexRequestBuilder[5]; // TODO randomize the size?
+ for (int i = 0; i < lowCardBuilders.length; i++) {
+ lowCardBuilders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, "val" + i)
+ .field("i", i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value("val" + i).value("val" + (i + 1)).endArray()
+ .endObject());
+ }
+ indexRandom(true, lowCardBuilders);
+ IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO randomize the size?
+
+ for (int i = 0; i < highCardBuilders.length; i++) {
+ highCardBuilders[i] = client().prepareIndex("idx", "high_card_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i+"", 3, '0'))
+ .startArray(MULTI_VALUED_FIELD_NAME).value("val" + Strings.padStart(i+"", 3, '0')).value("val" + Strings.padStart((i+1)+"", 3, '0')).endArray()
+ .endObject());
+ }
+ indexRandom(true, highCardBuilders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return randomBoolean() ? bucket.getKey() : bucket.getKeyAsText().string();
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithRegexFiltering() throws Exception {
+
+ // include without exclude
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).include("val00.+"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // include and exclude
+ // we should be left with: val002, val003, val004, val005, val006, val007, val008, val009
+
+ response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).include("val00.+").exclude("(val000|val001)"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(8));
+
+ for (int i = 2; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // exclude without include
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+
+ response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).exclude("val0[1-9]+.+"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithRegexFiltering_WithFlags() throws Exception {
+
+ // include without exclude
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+ // with case insensitive flag on the include regex
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).include("VAL00.+", Pattern.CASE_INSENSITIVE))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // include and exclude
+ // we should be left with: val002, val003, val004, val005, val006, val007, val008, val009
+ // with multi-flag masking on the exclude regex
+
+ response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).include("val00.+").exclude("( val000 | VAL001 )#this is a comment", Pattern.CASE_INSENSITIVE | Pattern.COMMENTS))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(8));
+
+ for (int i = 2; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // exclude without include
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+ // with a "no flag" flag
+
+ response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).exclude("val0[1-9]+.+", 0))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+
+ @Test
+ public void singleValueField_WithMaxSize() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(20)
+ .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(20));
+
+ for (int i = 0; i < 20; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + Strings.padStart(i + "", 3, '0'));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + Strings.padStart(i+"", 3, '0')));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(true)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(count("count").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(count("count")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .script("'foo_' + _value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_NotUnique() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value.substring(0,3)"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("val");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val"));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(MULTI_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("'foo_' + _value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+
+ [foo_val0, foo_val1]
+ [foo_val1, foo_val2]
+ [foo_val2, foo_val3]
+ [foo_val3, foo_val4]
+ [foo_val4, foo_val5]
+
+
+ foo_val0 - doc_count: 1 - val_count: 2
+ foo_val1 - doc_count: 2 - val_count: 4
+ foo_val2 - doc_count: 2 - val_count: 4
+ foo_val3 - doc_count: 2 - val_count: 4
+ foo_val4 - doc_count: 2 - val_count: 4
+ foo_val5 - doc_count: 1 - val_count: 2
+
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("'foo_' + _value")
+ .subAggregation(count("count")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ if (i == 0 | i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat("term[" + key(bucket) + "]", valueCount.getValue(), equalTo(4l));
+ }
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_ExplicitSingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")
+ .subAggregation(count("count")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .subAggregation(count("count")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 | i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(4l));
+ }
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .size(randomInt(5))
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void stringTermsNestedIntoPerBucketAggregator() throws Exception {
+ // no execution hint so that the logic that decides whether or not to use ordinals is executed
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(filter("filter").filter(termFilter(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation(terms("terms").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ Filter filter = response.getAggregations().get("filter");
+
+ Terms terms = filter.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ for (int i = 2; i <= 4; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(i == 3 ? 2L : 1L));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(terms("terms")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Terms terms = bucket.getAggregations().get("terms");
+ assertThat(terms, Matchers.notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field("i"))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", true))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByNonMetricsSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("filter", true))
+ .subAggregation(filter("filter").filter(FilterBuilders.termFilter("foo", "bar")))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.foo", true))
+ .subAggregation(stats("stats").field("i"))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "with an unknown specified metric to order by");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats", true))
+ .subAggregation(stats("stats").field("i"))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "where the metric name is not specified");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field("i"))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ i--;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field("i"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ i++;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field("i"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ i--;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueExtendedStatsAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.sum_of_squares", asc))
+ .subAggregation(extendedStats("stats").field("i"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ i++;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java
new file mode 100644
index 0000000..d6f1ac1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Aggregations module
+ */
+@TestLogging("org.elasticsearch.action.search.type:TRACE")
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.test.junit.annotations.TestLogging; \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java
new file mode 100644
index 0000000..df69c9b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+
+/**
+ *
+ */
+public abstract class AbstractNumericTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+
+ for (int i = 0; i < 10; i++) { // TODO randomize the size and the params in here?
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i+1)
+ .startArray("values").value(i+2).value(i+3).endArray()
+ .endObject()));
+ }
+ indexRandom(true, builders);
+
+ // creating an index to test the empty buckets functionality. The way it works is by indexing
+ // two docs {value: 0} and {value : 2}, then building a histogram agg with interval 1 and with empty
+ // buckets computed.. the empty bucket is the one associated with key "1". then each test will have
+ // to check that this bucket exists with the appropriate sub aggregations.
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ public abstract void testEmptyAggregation() throws Exception;
+
+ public abstract void testUnmapped() throws Exception;
+
+ public abstract void testSingleValuedField() throws Exception;
+
+ public abstract void testSingleValuedField_PartiallyUnmapped() throws Exception;
+
+ public abstract void testSingleValuedField_WithValueScript() throws Exception;
+
+ public abstract void testSingleValuedField_WithValueScript_WithParams() throws Exception;
+
+ public abstract void testMultiValuedField() throws Exception;
+
+ public abstract void testMultiValuedField_WithValueScript() throws Exception;
+
+ public abstract void testMultiValuedField_WithValueScript_WithParams() throws Exception;
+
+ public abstract void testScript_SingleValued() throws Exception;
+
+ public abstract void testScript_SingleValued_WithParams() throws Exception;
+
+ public abstract void testScript_ExplicitSingleValued_WithParams() throws Exception;
+
+ public abstract void testScript_MultiValued() throws Exception;
+
+ public abstract void testScript_ExplicitMultiValued() throws Exception;
+
+ public abstract void testScript_MultiValued_WithParams() throws Exception;
+
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java
new file mode 100644
index 0000000..41385b2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class AvgTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(avg("avg")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(Double.isNaN(avg.getValue()), is(true));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo(Double.NaN));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ }
+
+ @Override
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("values").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("values").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("new double[] { doc['value'].value, doc['value'].value + 1 }"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("new double[] { doc['value'].value, doc['value'].value + 1 }"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20));
+ }
+
+ @Test
+ @TestLogging("search:TRACE")
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("new double[] { doc['value'].value, doc['value'].value + inc }").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java
new file mode 100644
index 0000000..437261b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class ExtendedStatsTests extends AbstractNumericTests {
+
+ private static double stdDev(int... vals) {
+ return Math.sqrt(variance(vals));
+ }
+
+ private static double variance(int... vals) {
+ double sum = 0;
+ double sumOfSqrs = 0;
+ for (int val : vals) {
+ sum += val;
+ sumOfSqrs += val * val;
+ }
+ return (sumOfSqrs - ((sum * sum) / vals.length)) / vals.length;
+ }
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(extendedStats("stats")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getSumOfSquares(), equalTo(0.0));
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(Double.isNaN(stats.getStdDeviation()), is(true));
+ assertThat(Double.isNaN(stats.getAvg()), is(true));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo(Double.NaN));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getSumOfSquares(), equalTo(0.0));
+ assertThat(stats.getVariance(), equalTo(Double.NaN));
+ assertThat(stats.getStdDeviation(), equalTo(Double.NaN));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ }
+
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("values").script("_value - 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("values").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("new double[] { doc['value'].value, doc['value'].value - dec }").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9) / 20));
+ assertThat(stats.getMin(), equalTo(0.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+0+1+4+9+16+25+36+49+64+81));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9)));
+ }
+
+
+ private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception {
+ ShardSearchFailure[] failures = response.getShardFailures();
+ if (failures.length != expectedFailures) {
+ for (ShardSearchFailure failure : failures) {
+ logger.error("Shard Failure: {}", failure.failure(), failure.toString());
+ }
+ fail("Unexpected shard failures!");
+ }
+ assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java
new file mode 100644
index 0000000..3cb4ff3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MaxTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(Double.NEGATIVE_INFINITY));
+ }
+ @Test
+ public void testUnmapped() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(Double.NEGATIVE_INFINITY));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ }
+
+
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(12.0));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("values").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(13.0));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("values").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(13.0));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(12.0));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(12.0));
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("new double[] { doc['value'].value, doc['value'].value + inc }").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java
new file mode 100644
index 0000000..8a73d91
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.min.Min;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.min;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MinTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(min("min")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(Double.POSITIVE_INFINITY));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(Double.POSITIVE_INFINITY));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value").script("_value - 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(2.0));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values").script("_value - 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_Reverse() throws Exception {
+ // test what happens when values arrive in reverse order since the min aggregator is optimized to work on sorted values
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values").script("_value * -1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(-12d));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['value'].value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['value'].value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(2.0));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(2.0));
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("List values = doc['values'].values; double[] res = new double[values.length]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java
new file mode 100644
index 0000000..7437678
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java
@@ -0,0 +1,370 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class StatsTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(stats("stats")))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(Double.isNaN(stats.getAvg()), is(true));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo(Double.NaN));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getCount(), equalTo(0l));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("values").script("_value - 1"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("values").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ @TestLogging("search:TRACE")
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("new double[] { doc['value'].value, doc['value'].value - dec }").param("dec", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9) / 20));
+ assertThat(stats.getMin(), equalTo(0.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+
+ private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception {
+ ShardSearchFailure[] failures = response.getShardFailures();
+ if (failures.length != expectedFailures) {
+ for (ShardSearchFailure failure : failures) {
+ logger.error("Shard Failure: {}", failure.failure(), failure.toString());
+ }
+ fail("Unexpected shard failures!");
+ }
+ assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java
new file mode 100644
index 0000000..e7f59c4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java
@@ -0,0 +1,270 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class SumTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ }
+
+ @Override
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value").script("_value + increment").param("increment", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("new double[] { doc['value'].value, doc['value'].value + 1 }"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("new double[] { doc['value'].value, doc['value'].value + 1 }"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11));
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("new double[] { doc['value'].value, doc['value'].value + inc }").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("values").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("values").script("_value + increment").param("increment", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java
new file mode 100644
index 0000000..97bdce9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.count;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class ValueCountTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i+1)
+ .startArray("values").value(i+2).value(i+3).endArray()
+ .endObject())
+ .execute().actionGet();
+ }
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ ensureSearchable();
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(0l));
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(10l));
+ }
+
+ @Test
+ public void singleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(10l));
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(20l));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java
new file mode 100644
index 0000000..c1c7e21
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Aggregations module
+ */
+@TestLogging("org.elasticsearch.action.search.type:TRACE")
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.test.junit.annotations.TestLogging; \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/package-info.java b/src/test/java/org/elasticsearch/search/aggregations/package-info.java
new file mode 100644
index 0000000..c2a0b23
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Aggregations module
+ */
+@TestLogging("org.elasticsearch.action.search.type:TRACE")
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.test.junit.annotations.TestLogging; \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/support/FieldDataSourceTests.java b/src/test/java/org/elasticsearch/search/aggregations/support/FieldDataSourceTests.java
new file mode 100644
index 0000000..b9c1954
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/support/FieldDataSourceTests.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.support;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+public class FieldDataSourceTests extends ElasticsearchTestCase {
+
+ private static BytesValues randomBytesValues() {
+ final boolean multiValued = randomBoolean();
+ return new BytesValues(multiValued) {
+ @Override
+ public int setDocument(int docId) {
+ return randomInt(multiValued ? 10 : 1);
+ }
+ @Override
+ public BytesRef nextValue() {
+ scratch.copyChars(randomAsciiOfLength(10));
+ return scratch;
+ }
+
+ };
+ }
+
+ private static SearchScript randomScript() {
+ return new SearchScript() {
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ }
+
+ @Override
+ public Object run() {
+ return randomAsciiOfLength(5);
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ return value;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) {
+ }
+
+ @Override
+ public void setNextDocId(int doc) {
+ }
+
+ @Override
+ public void setNextSource(Map<String, Object> source) {
+ }
+
+ @Override
+ public void setNextScore(float score) {
+ }
+
+ @Override
+ public float runAsFloat() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long runAsLong() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public double runAsDouble() {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+ }
+
+ private static void assertConsistent(BytesValues values) {
+ for (int i = 0; i < 10; ++i) {
+ final int valueCount = values.setDocument(i);
+ for (int j = 0; j < valueCount; ++j) {
+ final BytesRef term = values.nextValue();
+ assertEquals(term.hashCode(), values.currentValueHash());
+ assertTrue(term.bytesEquals(values.copyShared()));
+ }
+ }
+ }
+
+ @Test
+ public void bytesValuesWithScript() {
+ final BytesValues values = randomBytesValues();
+ FieldDataSource source = new FieldDataSource.Bytes() {
+
+ @Override
+ public BytesValues bytesValues() {
+ return values;
+ }
+
+ @Override
+ public MetaData metaData() {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+ SearchScript script = randomScript();
+ assertConsistent(new FieldDataSource.WithScript.BytesValues(source, script));
+ }
+
+ @Test
+ public void sortedUniqueBytesValues() {
+ assertConsistent(new FieldDataSource.Bytes.SortedAndUnique.SortedUniqueBytesValues(randomBytesValues()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java b/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java
new file mode 100644
index 0000000..b14dd6f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.support;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.aggregations.support.bytes.ScriptBytesValues;
+import org.elasticsearch.search.aggregations.support.numeric.ScriptDoubleValues;
+import org.elasticsearch.search.aggregations.support.numeric.ScriptLongValues;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Map;
+
+public class ScriptValuesTests extends ElasticsearchTestCase {
+
+ private static class FakeSearchScript implements SearchScript {
+
+ private final Object[][] values;
+ int index;
+
+ FakeSearchScript(Object[][] values) {
+ this.values = values;
+ index = -1;
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ }
+
+ @Override
+ public Object run() {
+ // Script values are supposed to support null, single values, arrays and collections
+ final Object[] values = this.values[index];
+ if (values.length <= 1 && randomBoolean()) {
+ return values.length == 0 ? null : values[0];
+ }
+ return randomBoolean() ? values : Arrays.asList(values);
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) {
+ }
+
+ @Override
+ public void setNextDocId(int doc) {
+ index = doc;
+ }
+
+ @Override
+ public void setNextSource(Map<String, Object> source) {
+ }
+
+ @Override
+ public void setNextScore(float score) {
+ }
+
+ @Override
+ public float runAsFloat() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long runAsLong() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public double runAsDouble() {
+ throw new UnsupportedOperationException();
+ }
+
+ }
+
+ @Test
+ public void longs() {
+ final Object[][] values = new Long[randomInt(10)][];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = new Long[randomInt(8)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = randomLong();
+ }
+ }
+ FakeSearchScript script = new FakeSearchScript(values);
+ ScriptLongValues scriptValues = new ScriptLongValues(script);
+ for (int i = 0; i < values.length; ++i) {
+ assertEquals(values[i].length, scriptValues.setDocument(i));
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], scriptValues.nextValue());
+ }
+ }
+ }
+
+ @Test
+ public void doubles() {
+ final Object[][] values = new Double[randomInt(10)][];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = new Double[randomInt(8)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = randomDouble();
+ }
+ }
+ FakeSearchScript script = new FakeSearchScript(values);
+ ScriptDoubleValues scriptValues = new ScriptDoubleValues(script);
+ for (int i = 0; i < values.length; ++i) {
+ assertEquals(values[i].length, scriptValues.setDocument(i));
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], scriptValues.nextValue());
+ }
+ }
+ }
+
+ @Test
+ public void bytes() {
+ final String[][] values = new String[randomInt(10)][];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = new String[randomInt(8)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = RandomStrings.randomAsciiOfLength(getRandom(), 5);
+ }
+ }
+ FakeSearchScript script = new FakeSearchScript(values);
+ ScriptBytesValues scriptValues = new ScriptBytesValues(script);
+ for (int i = 0; i < values.length; ++i) {
+ assertEquals(values[i].length, scriptValues.setDocument(i));
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(new BytesRef(values[i][j]), scriptValues.nextValue());
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java
new file mode 100644
index 0000000..680a35a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+
+/**
+ * This test basically verifies that search with a single shard active (cause we indexed to it) and other
+ * shards possibly not active at all (cause they haven't allocated) will still work.
+ */
+public class SearchWhileCreatingIndexTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testIndexCausesIndexCreation() throws Exception {
+ searchWhileCreatingIndex(-1, 1); // 1 replica in our default...
+ }
+
+ @Test
+ @Slow
+ public void testNoReplicas() throws Exception {
+ searchWhileCreatingIndex(10, 0);
+ }
+
+ @Test
+ @Slow
+ public void testOneReplica() throws Exception {
+ searchWhileCreatingIndex(10, 1);
+ }
+
+ @Test
+ @Slow
+ public void testTwoReplicas() throws Exception {
+ searchWhileCreatingIndex(10, 2);
+ }
+
+ private void searchWhileCreatingIndex(int numberOfShards, int numberOfReplicas) throws Exception {
+
+ // make sure we have enough nodes to guaranty default QUORUM consistency.
+ // TODO: add a smarter choice based on actual consistency (when that is randomized)
+ int shardsNo = numberOfReplicas + 1;
+ int neededNodes = shardsNo <= 2 ? 1 : shardsNo / 2 + 1;
+ cluster().ensureAtLeastNumNodes(randomIntBetween(neededNodes, shardsNo));
+ for (int i = 0; i < 20; i++) {
+ logger.info("running iteration {}", i);
+ if (numberOfShards > 0) {
+ CreateIndexResponse createIndexResponse = prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", numberOfReplicas)).get();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(true));
+ }
+ client().prepareIndex("test", "type1", randomAsciiOfLength(5)).setSource("field", "test").execute().actionGet();
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().actionGet();
+ assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); // at least one shard should be successful when refreshing
+
+ // we want to make sure that while recovery happens, and a replica gets recovered, its properly refreshed
+ ClusterHealthStatus status = ClusterHealthStatus.RED;
+ while (status != ClusterHealthStatus.GREEN) {
+ // first, verify that search on the primary search works
+ SearchResponse searchResponse = client().prepareSearch("test").setPreference("_primary").setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ assertHitCount(searchResponse, 1);
+ // now, let it go to primary or replica, though in a randomized re-creatable manner
+ String preference = randomAsciiOfLength(5);
+ Client client = client();
+ searchResponse = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ if (searchResponse.getHits().getTotalHits() != 1) {
+ refresh();
+ SearchResponse searchResponseAfterRefresh = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ logger.info("hits count mismatch on any shard search failed, post explicit refresh hits are {}", searchResponseAfterRefresh.getHits().getTotalHits());
+ ensureGreen();
+ SearchResponse searchResponseAfterGreen = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ logger.info("hits count mismatch on any shard search failed, post explicit wait for green hits are {}", searchResponseAfterGreen.getHits().getTotalHits());
+ assertHitCount(searchResponse, 1);
+ }
+ assertHitCount(searchResponse, 1);
+ status = client().admin().cluster().prepareHealth("test").get().getStatus();
+ cluster().ensureAtLeastNumNodes(numberOfReplicas + 1);
+ }
+ cluster().wipeIndices("test");
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java
new file mode 100644
index 0000000..4a700e9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+
+public class SearchWhileRelocatingTests extends ElasticsearchIntegrationTest {
+
+ @LuceneTestCase.AwaitsFix(bugUrl = "problem with search searching on 1 shard (no replica), " +
+ "and between getting the cluster state to do the search, and executing it, " +
+ "the shard has fully relocated (moved from started on one node, to fully started on another node")
+ @Test
+ public void testSearchAndRelocateConcurrently0Replicas() throws Exception {
+ testSearchAndRelocateConcurrently(0);
+ }
+
+ @TestLogging("org.elasticsearch.action.search.type:TRACE")
+ @Test
+ public void testSearchAndRelocateConcurrently1Replicas() throws Exception {
+ testSearchAndRelocateConcurrently(1);
+ }
+
+ private void testSearchAndRelocateConcurrently(int numberOfReplicas) throws Exception {
+ final int numShards = between(10, 20);
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", numShards).put("index.number_of_replicas", numberOfReplicas))
+ .addMapping("type1", "loc", "type=geo_point", "test", "type=string").execute().actionGet();
+ ensureGreen();
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ final int numDocs = between(10, 20);
+ for (int i = 0; i < numDocs; i++) {
+ indexBuilders.add(client().prepareIndex("test", "type", Integer.toString(i))
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 21)
+ .endObject().endObject()));
+ }
+ indexRandom(true, indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]));
+ assertHitCount(client().prepareSearch().get(), (long) (numDocs));
+ final int numIters = atLeast(10);
+ for (int i = 0; i < numIters; i++) {
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final List<Throwable> thrownExceptions = new CopyOnWriteArrayList<Throwable>();
+ Thread[] threads = new Thread[atLeast(1)];
+ for (int j = 0; j < threads.length; j++) {
+ threads[j] = new Thread() {
+ public void run() {
+ try {
+ while (!stop.get()) {
+ SearchResponse sr = client().prepareSearch().setSize(numDocs).get();
+ assertHitCount(sr, (long) (numDocs));
+ final SearchHits sh = sr.getHits();
+ assertThat("Expected hits to be the same size the actual hits array", sh.getTotalHits(),
+ equalTo((long) (sh.getHits().length)));
+ }
+ } catch (Throwable t) {
+ thrownExceptions.add(t);
+ }
+ }
+ };
+ }
+ for (int j = 0; j < threads.length; j++) {
+ threads[j].start();
+ }
+ allowNodes("test", between(1, 3));
+ client().admin().cluster().prepareReroute().get();
+ ClusterHealthResponse resp = client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).execute().actionGet();
+ stop.set(true);
+ for (int j = 0; j < threads.length; j++) {
+ threads[j].join();
+ }
+ assertThat(resp.isTimedOut(), equalTo(false));
+
+ if (!thrownExceptions.isEmpty()) {
+ Client client = client();
+ String verified = "POST SEARCH OK";
+ for (int j = 0; j < 10; j++) {
+ if (client.prepareSearch().get().getHits().getTotalHits() != numDocs) {
+ verified = "POST SEARCH FAIL";
+ break;
+ }
+ }
+
+ assertThat("failed in iteration " + i + ", verification: " + verified, thrownExceptions, Matchers.emptyIterable());
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java
new file mode 100644
index 0000000..b2a6bb4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.util.English;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.test.engine.MockInternalEngine;
+import org.elasticsearch.test.engine.ThrowingAtomicReaderWrapper;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class SearchWithRandomExceptionsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException {
+ final int numShards = between(1, 5);
+ String mapping = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type").
+ startObject("properties").
+ startObject("test")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject().
+ endObject().
+ endObject()
+ .endObject().string();
+ final double exceptionRate;
+ final double exceptionOnOpenRate;
+ if (frequently()) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ exceptionOnOpenRate = 1.0/between(5, 100);
+ exceptionRate = 0.0d;
+ } else {
+ exceptionRate = 1.0/between(5, 100);
+ exceptionOnOpenRate = 0.0d;
+ }
+ } else {
+ exceptionOnOpenRate = 1.0/between(5, 100);
+ exceptionRate = 1.0/between(5, 100);
+ }
+ } else {
+ // rarely no exception
+ exceptionRate = 0d;
+ exceptionOnOpenRate = 0d;
+ }
+
+ Builder settings = settingsBuilder()
+ .put("index.number_of_shards", numShards)
+ .put("index.number_of_replicas", randomIntBetween(0, 1))
+ .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE, exceptionRate)
+ .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate)
+ .put(MockDirectoryHelper.CHECK_INDEX_ON_CLOSE, true);
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster()
+ .health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get(); // it's OK to timeout here
+ final int numDocs;
+ final boolean expectAllShardsFailed;
+ if (clusterHealthResponse.isTimedOut()) {
+ /* some seeds just won't let you create the index at all and we enter a ping-pong mode
+ * trying one node after another etc. that is ok but we need to make sure we don't wait
+ * forever when indexing documents so we set numDocs = 1 and expecte all shards to fail
+ * when we search below.*/
+ logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
+ numDocs = 1;
+ expectAllShardsFailed = true;
+ } else {
+ numDocs = between(10, 100);
+ expectAllShardsFailed = false;
+ }
+ long numCreated = 0;
+ boolean[] added = new boolean[numDocs];
+ for (int i = 0; i < numDocs ; i++) {
+ try {
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
+ if (indexResponse.isCreated()) {
+ numCreated++;
+ added[i] = true;
+ }
+ } catch (ElasticsearchException ex) {
+ }
+ }
+ logger.info("Start Refresh");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here
+ final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
+ logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
+ final int numSearches = atLeast(10);
+ // we don't check anything here really just making sure we don't leave any open files or a broken index behind.
+ for (int i = 0; i < numSearches; i++) {
+ try {
+ int docToQuery = between(0, numDocs-1);
+ long expectedResults = added[docToQuery] ? 1 : 0;
+ logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).get();
+ logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards);
+ // check match all
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get();
+ } catch (SearchPhaseExecutionException ex) {
+ if (!expectAllShardsFailed) {
+ throw ex;
+ } else {
+ logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
+ }
+ }
+
+ }
+ }
+
+ @Test
+ public void testRandomExceptions() throws IOException, InterruptedException, ExecutionException {
+ final int numShards = between(1, 5);
+ String mapping = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type").
+ startObject("properties").
+ startObject("test")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject().
+ endObject().
+ endObject()
+ .endObject().string();
+ final double lowLevelRate;
+ final double topLevelRate;
+ if (frequently()) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ lowLevelRate = 1.0/between(2, 10);
+ topLevelRate = 0.0d;
+ } else {
+ topLevelRate = 1.0/between(2, 10);
+ lowLevelRate = 0.0d;
+ }
+ } else {
+ lowLevelRate = 1.0/between(2, 10);
+ topLevelRate = 1.0/between(2, 10);
+ }
+ } else {
+ // rarely no exception
+ topLevelRate = 0d;
+ lowLevelRate = 0d;
+ }
+
+ Builder settings = settingsBuilder()
+ .put("index.number_of_shards", numShards)
+ .put("index.number_of_replicas", randomIntBetween(0, 1))
+ .put(MockInternalEngine.READER_WRAPPER_TYPE, RandomExceptionDirectoryReaderWrapper.class.getName())
+ .put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate)
+ .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate)
+ .put(MockInternalEngine.WRAP_READER_RATIO, 1.0d);
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping).execute().actionGet();
+ ensureSearchable();
+ final int numDocs = between(10, 100);
+ long numCreated = 0;
+ boolean[] added = new boolean[numDocs];
+ for (int i = 0; i < numDocs ; i++) {
+ try {
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
+ if (indexResponse.isCreated()) {
+ numCreated++;
+ added[i] = true;
+ }
+ } catch (ElasticsearchException ex) {
+ }
+ }
+ logger.info("Start Refresh");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here
+ final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
+ logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
+
+ final int numSearches = atLeast(100);
+ // we don't check anything here really just making sure we don't leave any open files or a broken index behind.
+ for (int i = 0; i < numSearches; i++) {
+ try {
+ int docToQuery = between(0, numDocs-1);
+ long expectedResults = added[docToQuery] ? 1 : 0;
+ logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).get();
+ logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards);
+ if (searchResponse.getSuccessfulShards() == numShards && !refreshFailed) {
+ assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo(expectedResults));
+ }
+ // check match all
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get();
+ logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards);
+ if (searchResponse.getSuccessfulShards() == numShards && !refreshFailed) {
+ assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo(numCreated));
+ }
+
+ } catch (SearchPhaseExecutionException ex) {
+ logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
+ }
+ }
+ }
+
+
+ public static final String EXCEPTION_TOP_LEVEL_RATIO_KEY = "index.engine.exception.ratio.top";
+ public static final String EXCEPTION_LOW_LEVEL_RATIO_KEY = "index.engine.exception.ratio.low";
+
+
+ public static class RandomExceptionDirectoryReaderWrapper extends MockInternalEngine.DirectoryReaderWrapper {
+ private final Settings settings;
+ static class ThrowingSubReaderWrapper extends SubReaderWrapper implements ThrowingAtomicReaderWrapper.Thrower {
+ private final Random random;
+ private final double topLevelRatio;
+ private final double lowLevelRatio;
+
+ ThrowingSubReaderWrapper(Settings settings) {
+ final long seed = settings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ this.topLevelRatio = settings.getAsDouble(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d);
+ this.lowLevelRatio = settings.getAsDouble(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d);
+ this.random = new Random(seed);
+ }
+
+ @Override
+ public AtomicReader wrap(AtomicReader reader) {
+ return new ThrowingAtomicReaderWrapper(reader, this);
+ }
+
+ @Override
+ public void maybeThrow(ThrowingAtomicReaderWrapper.Flags flag) throws IOException {
+ switch (flag) {
+ case Fields:
+ case TermVectors:
+ case Terms:
+ case TermsEnum:
+ case Intersect:
+ case Norms:
+ case NumericDocValues:
+ case BinaryDocValues:
+ case SortedDocValues:
+ case SortedSetDocValues:
+ if (random.nextDouble() < topLevelRatio) {
+ throw new IOException("Forced top level Exception on [" + flag.name() + "]");
+ }
+ break;
+ case DocsEnum:
+ case DocsAndPositionsEnum:
+ if (random.nextDouble() < lowLevelRatio) {
+ throw new IOException("Forced low level Exception on [" + flag.name() + "]");
+ }
+ break;
+ }
+ }
+
+ public boolean wrapTerms(String field) {
+ return true;
+ }
+ }
+
+ public RandomExceptionDirectoryReaderWrapper(DirectoryReader in, Settings settings) {
+ super(in, new ThrowingSubReaderWrapper(settings));
+ this.settings = settings;
+ }
+
+ @Override
+ protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) {
+ return new RandomExceptionDirectoryReaderWrapper(in, settings);
+ }
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java b/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java
new file mode 100644
index 0000000..84803d3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ *
+ */
+public class TransportSearchFailuresTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testFailedSearchWithWrongQuery() throws Exception {
+ logger.info("Start Testing failed search with wrong query");
+ prepareCreate("test", 1, settingsBuilder().put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 2)
+ .put("routing.hash.type", "simple")).execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ index(client(), Integer.toString(i), "test", i);
+ }
+ RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ assertThat(refreshResponse.getTotalShards(), equalTo(9));
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(3));
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+ for (int i = 0; i < 5; i++) {
+ try {
+ SearchResponse searchResponse = client().search(searchRequest("test").source("{ xxx }".getBytes(Charsets.UTF_8))).actionGet();
+ assertThat(searchResponse.getTotalShards(), equalTo(3));
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(searchResponse.getFailedShards(), equalTo(3));
+ fail("search should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
+ // all is well
+ }
+ }
+
+ allowNodes("test", 2);
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest("test")
+ .waitForYellowStatus().waitForRelocatingShards(0).waitForActiveShards(6)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(clusterHealth.getActiveShards(), equalTo(6));
+
+ refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ assertThat(refreshResponse.getTotalShards(), equalTo(9));
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(6));
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+
+ for (int i = 0; i < 5; i++) {
+ try {
+ SearchResponse searchResponse = client().search(searchRequest("test").source("{ xxx }".getBytes(Charsets.UTF_8))).actionGet();
+ assertThat(searchResponse.getTotalShards(), equalTo(3));
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(searchResponse.getFailedShards(), equalTo(3));
+ fail("search should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
+ // all is well
+ }
+ }
+
+ logger.info("Done Testing failed search");
+ }
+
+ private void index(Client client, String id, String nameValue, int age) throws IOException {
+ client.index(Requests.indexRequest("test").type("type1").id(id).source(source(id, nameValue, age)).consistencyLevel(WriteConsistencyLevel.ONE)).actionGet();
+ }
+
+ private XContentBuilder source(String id, String nameValue, int age) throws IOException {
+ StringBuilder multi = new StringBuilder().append(nameValue);
+ for (int i = 0; i < age; i++) {
+ multi.append(" ").append(nameValue);
+ }
+ return jsonBuilder().startObject()
+ .field("id", id)
+ .field("name", nameValue + id)
+ .field("age", age)
+ .field("multi", multi.toString())
+ .field("_boost", age * 10)
+ .endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java b/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java
new file mode 100644
index 0000000..16b8b2e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java
@@ -0,0 +1,439 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.search.facet.query.QueryFacet;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.action.search.SearchType.*;
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class TransportTwoNodesSearchTests extends ElasticsearchIntegrationTest {
+
+ private Set<String> prepareData() throws Exception {
+ Set<String> fullExpectedIds = Sets.newHashSet();
+ client().admin().indices().create(createIndexRequest("test")
+ .settings(settingsBuilder().put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 0)
+ .put("routing.hash.type", "simple")))
+ .actionGet();
+
+ ensureGreen();
+ for (int i = 0; i < 100; i++) {
+ index(client(), Integer.toString(i), "test", i);
+ fullExpectedIds.add(Integer.toString(i));
+ }
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ return fullExpectedIds;
+ }
+
+ private void index(Client client, String id, String nameValue, int age) throws IOException {
+ client().index(Requests.indexRequest("test").type("type1").id(id).source(source(id, nameValue, age))).actionGet();
+ }
+
+ private XContentBuilder source(String id, String nameValue, int age) throws IOException {
+ StringBuilder multi = new StringBuilder().append(nameValue);
+ for (int i = 0; i < age; i++) {
+ multi.append(" ").append(nameValue);
+ }
+ return jsonBuilder().startObject()
+ .field("id", id)
+ .field("nid", Integer.parseInt(id))
+ .field("name", nameValue + id)
+ .field("age", age)
+ .field("multi", multi.toString())
+ .field("_boost", age * 10)
+ .endObject();
+ }
+
+ @Test
+ public void testDfsQueryThenFetch() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(60).explain(true);
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(DFS_QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+// System.out.println("max_score: " + searchResponse.hits().maxScore());
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.score() + ":" + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
+ }
+ }
+
+ @Test
+ public void testDfsQueryThenFetchWithSort() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(60).explain(true).sort("age", SortOrder.ASC);
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(DFS_QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i)));
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60)));
+ }
+ }
+
+ @Test
+ public void testQueryThenFetch() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .sort("nid", SortOrder.DESC) // we have to sort here to have some ordering with dist scoring
+ .from(0).size(60).explain(true);
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
+ }
+ }
+
+ @Test
+ public void testQueryThenFetchWithFrom() throws Exception {
+ Set<String> fullExpectedIds = prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(matchAllQuery())
+ .explain(true);
+
+ Set<String> collectedIds = Sets.newHashSet();
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source.from(0).size(60)).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ collectedIds.add(hit.id());
+ }
+ searchResponse = client().search(searchRequest("test").source(source.from(60).size(60)).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ collectedIds.add(hit.id());
+ }
+ assertThat(collectedIds, equalTo(fullExpectedIds));
+ }
+
+ @Test
+ public void testQueryThenFetchWithSort() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(60).explain(true).sort("age", SortOrder.ASC);
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i)));
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60)));
+ }
+ }
+
+ @Test
+ public void testQueryAndFetch() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(20).explain(true);
+
+ Set<String> expectedIds = Sets.newHashSet();
+ for (int i = 0; i < 100; i++) {
+ expectedIds.add(Integer.toString(i));
+ }
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60)); // 20 per shard
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ // we can't really check here, since its query and fetch, and not controlling distribution
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
+ // we don't do perfect sorting when it comes to scroll with Query+Fetch
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+ assertThat("make sure we got all [" + expectedIds + "]", expectedIds.size(), equalTo(0));
+ }
+
+ @Test
+ public void testDfsQueryAndFetch() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(20).explain(true);
+
+ Set<String> expectedIds = Sets.newHashSet();
+ for (int i = 0; i < 100; i++) {
+ expectedIds.add(Integer.toString(i));
+ }
+
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(DFS_QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60)); // 20 per shard
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
+ // we don't do perfect sorting when it comes to scroll with Query+Fetch
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+ assertThat("make sure we got all [" + expectedIds + "]", expectedIds.size(), equalTo(0));
+ }
+
+ @Test
+ public void testSimpleFacets() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder sourceBuilder = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(20).explain(true)
+ .facet(FacetBuilders.queryFacet("all", termQuery("multi", "test")).global(true))
+ .facet(FacetBuilders.queryFacet("test1", termQuery("name", "test1")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(sourceBuilder)).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+
+ assertThat(searchResponse.getFacets().facet(QueryFacet.class, "test1").getCount(), equalTo(1l));
+ assertThat(searchResponse.getFacets().facet(QueryFacet.class, "all").getCount(), equalTo(100l));
+ }
+
+ @Test
+ public void testFailedSearchWithWrongQuery() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed search with wrong query");
+ try {
+ SearchResponse searchResponse = client().search(searchRequest("test").source("{ xxx }".getBytes(Charsets.UTF_8))).actionGet();
+ assertThat(searchResponse.getTotalShards(), equalTo(3));
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(searchResponse.getFailedShards(), equalTo(3));
+ fail("search should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
+ // all is well
+ }
+ logger.info("Done Testing failed search");
+ }
+
+ @Test
+ public void testFailedSearchWithWrongFrom() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed search with wrong from");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(1000).size(20).explain(true);
+ SearchResponse response = client().search(searchRequest("test").searchType(DFS_QUERY_AND_FETCH).source(source)).actionGet();
+ assertThat(response.getHits().hits().length, equalTo(0));
+ assertThat(response.getTotalShards(), equalTo(3));
+ assertThat(response.getSuccessfulShards(), equalTo(3));
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ response = client().search(searchRequest("test").searchType(QUERY_THEN_FETCH).source(source)).actionGet();
+ assertNoFailures(response);
+ assertThat(response.getHits().hits().length, equalTo(0));
+
+ response = client().search(searchRequest("test").searchType(DFS_QUERY_AND_FETCH).source(source)).actionGet();
+ assertNoFailures(response);
+ assertThat(response.getHits().hits().length, equalTo(0));
+
+ response = client().search(searchRequest("test").searchType(DFS_QUERY_THEN_FETCH).source(source)).actionGet();
+ assertNoFailures(response);
+ assertThat(response.getHits().hits().length, equalTo(0));
+
+ logger.info("Done Testing failed search");
+ }
+
+ @Test
+ public void testFailedMultiSearchWithWrongQuery() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed multi search with a wrong query");
+
+ MultiSearchResponse response = client().prepareMultiSearch()
+ // Add custom score query with missing script
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.customScoreQuery(QueryBuilders.termQuery("nid", 1))))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2)))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ assertThat(response.getResponses()[0].getFailureMessage(), notNullValue());
+
+ assertThat(response.getResponses()[1].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().getHits().hits().length, equalTo(1));
+
+ assertThat(response.getResponses()[2].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().getHits().hits().length, equalTo(10));
+
+ logger.info("Done Testing failed search");
+ }
+
+
+ @Test
+ public void testFailedMultiSearchWithWrongQuery_withFunctionScore() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed multi search with a wrong query");
+
+ MultiSearchResponse response = client().prepareMultiSearch()
+ // Add custom score query with missing script
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("nid", 1)).add(new ScriptScoreFunctionBuilder())))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2)))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ assertThat(response.getResponses()[0].getFailureMessage(), notNullValue());
+
+ assertThat(response.getResponses()[1].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().getHits().hits().length, equalTo(1));
+
+ assertThat(response.getResponses()[2].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().getHits().hits().length, equalTo(10));
+
+ logger.info("Done Testing failed search");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java
new file mode 100644
index 0000000..a96dba1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java
@@ -0,0 +1,2246 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.child;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.mapper.MergeMappingException;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.search.child.ScoreType;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void multiLevelChild() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .addMapping("grandchild", "_parent", "type=child")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "c_value1").setParent("p1").get();
+ client().prepareIndex("test", "grandchild", "gc1").setSource("gc_field", "gc_value1")
+ .setParent("c1").setRouting("gc1").get();
+ refresh();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter(
+ "child",
+ filteredQuery(termQuery("c_field", "c_value1"),
+ hasChildFilter("grandchild", termQuery("gc_field", "gc_value1")))))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentFilter("parent", termFilter("p_field", "p_value1")))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentFilter("child", termFilter("c_field", "c_value1")))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("gc1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasParentQuery("parent", termQuery("p_field", "p_value1"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasParentQuery("child", termQuery("c_field", "c_value1"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("gc1"));
+ }
+
+ @Test
+ // see #2744
+ public void test2744() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("foo")
+ .addMapping("test", "_parent", "type=foo")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "foo", "1").setSource("foo", 1).get();
+ client().prepareIndex("test", "test").setSource("foo", 1).setParent("1").get();
+ client().admin().indices().prepareRefresh().get();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("test", matchQuery("foo", 1))).execute()
+ .actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ }
+
+ @Test
+ public void simpleChildQuery() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ refresh();
+
+ // TEST FETCHING _parent from child
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(idsQuery("child").ids("c1")).addFields("_parent").execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+
+ // TEST matching on parent
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("child._parent", "p1")).addFields("_parent").execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(1).field("_parent").value().toString(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("_parent", "p1")).addFields("_parent").get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(1).field("_parent").value().toString(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(queryString("_parent:p1")).addFields("_parent").get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(1).field("_parent").value().toString(), equalTo("p1"));
+
+ // TOP CHILDREN QUERY
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow"))).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "blue")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "red"))).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD
+ searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "yellow"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "blue")).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "red")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS PARENT
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value2")).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c3"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("c4"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value1")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("c2"));
+ }
+
+ @Test
+ public void testClearIdCacheBug() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p0").setSource("p_field", "p_value0").get();
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+
+ refresh();
+ // No _parent field yet, there shouldn't be anything in the parent id cache
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setIdCache(true).get();
+ assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), equalTo(0l));
+
+ // Now add mapping + children
+ client().admin().indices().preparePutMapping("test").setType("child")
+ .setSource("_parent", "type=parent")
+ .get();
+
+ // index simple data
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ client().admin().indices().prepareRefresh().get();
+
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setIdCache(true).get();
+ // automatic warm-up has populated the cache since it found a parent field mapper
+ assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setIdCache(true).get();
+ assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ client().admin().indices().prepareClearCache("test").setIdCache(true).get();
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setIdCache(true).get();
+ assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), equalTo(0l));
+ }
+
+ @Test
+ // See: https://github.com/elasticsearch/elasticsearch/issues/3290
+ public void testCachingBug_withFqueryFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ // index simple data
+ for (int i = 0; i < 10; i++) {
+ builders.add(client().prepareIndex("test", "parent", Integer.toString(i)).setSource("p_field", i));
+ }
+ indexRandom(randomBoolean(), builders);
+ builders.clear();
+ for (int j = 0; j < 2; j++) {
+ for (int i = 0; i < 10; i++) {
+ builders.add(client().prepareIndex("test", "child", Integer.toString(i)).setSource("c_field", i).setParent("" + 0));
+ }
+ for (int i = 0; i < 10; i++) {
+ builders.add(client().prepareIndex("test", "child", Integer.toString(i + 10)).setSource("c_field", i + 10).setParent(Integer.toString(i)));
+ }
+
+ if (randomBoolean()) {
+ break; // randomly break out and dont' have deletes / updates
+ }
+ }
+ indexRandom(true, builders);
+
+ for (int i = 1; i <= 10; i++) {
+ logger.info("Round {}", i);
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(queryFilter(topChildrenQuery("child", matchAllQuery())).cache(true))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(queryFilter(hasChildQuery("child", matchAllQuery()).scoreType("max")).cache(true)))
+ .get();
+ assertNoFailures(searchResponse);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(queryFilter(hasParentQuery("parent", matchAllQuery()).scoreType("score")).cache(true)))
+ .get();
+ assertNoFailures(searchResponse);
+ }
+ }
+
+ @Test
+ public void testHasParentFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ Map<String, Set<String>> parentToChildren = newHashMap();
+ // Childless parent
+ client().prepareIndex("test", "parent", "p0").setSource("p_field", "p0").get();
+ parentToChildren.put("p0", new HashSet<String>());
+
+ String previousParentId = null;
+ int numChildDocs = 32;
+ int numChildDocsPerParent = 0;
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 1; i <= numChildDocs; i++) {
+
+ if (previousParentId == null || i % numChildDocsPerParent == 0) {
+ previousParentId = "p" + i;
+ builders.add(client().prepareIndex("test", "parent", previousParentId).setSource("p_field", previousParentId));
+ numChildDocsPerParent++;
+ }
+
+ String childId = "c" + i;
+ builders.add(client().prepareIndex("test", "child", childId).setSource("c_field", childId).setParent(previousParentId));
+
+ if (!parentToChildren.containsKey(previousParentId)) {
+ parentToChildren.put(previousParentId, new HashSet<String>());
+ }
+ assertThat(parentToChildren.get(previousParentId).add(childId), is(true));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[0]));
+
+ assertThat(parentToChildren.isEmpty(), equalTo(false));
+ for (Map.Entry<String, Set<String>> parentToChildrenEntry : parentToChildren.entrySet()) {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasParentFilter("parent", termQuery("p_field", parentToChildrenEntry.getKey()))))
+ .setSize(numChildDocsPerParent).get();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ Set<String> childIds = parentToChildrenEntry.getValue();
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long) childIds.size()));
+ for (int i = 0; i < searchResponse.getHits().totalHits(); i++) {
+ assertThat(childIds.remove(searchResponse.getHits().getAt(i).id()), is(true));
+ assertThat(searchResponse.getHits().getAt(i).score(), is(1.0f));
+ }
+ assertThat(childIds.size(), is(0));
+ }
+ }
+
+ @Test
+ public void simpleChildQueryWithFlush() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data with flushes, so we have many segments
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+ refresh();
+
+ // TOP CHILDREN QUERY
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "blue"))).execute()
+ .actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "red"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD QUERY
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "blue"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "red"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD FILTER
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "red"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ }
+
+ @Test
+ public void simpleChildQueryWithFlushAnd3Shards() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data with flushes, so we have many segments
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+
+ client().admin().indices().prepareRefresh().get();
+
+ // TOP CHILDREN QUERY
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "blue"))).execute()
+ .actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "red"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD QUERY
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "blue"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "red"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD FILTER
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "red"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ }
+
+ @Test
+ public void testScopedFacet() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(topChildrenQuery("child", boolQuery().should(termQuery("c_field", "red")).should(termQuery("c_field", "yellow"))))
+ .addFacet(
+ termsFacet("facet1")
+ .facetFilter(boolFilter().should(termFilter("c_field", "red")).should(termFilter("c_field", "yellow")))
+ .field("c_field").global(true)).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ assertThat(searchResponse.getFacets().facets().size(), equalTo(1));
+ TermsFacet termsFacet = searchResponse.getFacets().facet("facet1");
+ assertThat(termsFacet.getEntries().size(), equalTo(2));
+ assertThat(termsFacet.getEntries().get(0).getTerm().string(), equalTo("red"));
+ assertThat(termsFacet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(termsFacet.getEntries().get(1).getTerm().string(), equalTo("yellow"));
+ assertThat(termsFacet.getEntries().get(1).getCount(), equalTo(1));
+ }
+
+ @Test
+ public void testDeletedParent() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ // TOP CHILDREN QUERY
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow")))
+ .get();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ // update p1 and see what that we get updated values...
+
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1_updated").get();
+ client().admin().indices().prepareRefresh().get();
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1_updated\""));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1_updated\""));
+ }
+
+ @Test
+ public void testDfsSearchType() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(hasChildQuery("child", boolQuery().should(queryString("c_field:*"))))).get();
+ assertNoFailures(searchResponse);
+
+ searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(hasParentQuery("parent", boolQuery().should(queryString("p_field:*"))))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+
+ searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(queryString("c_field:*"))))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testFixAOBEIfTopChildrenIsWrappedInMusNotClause() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(queryString("c_field:*"))))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testTopChildrenReSearchBug() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ int numberOfParents = 4;
+ int numberOfChildrenPerParent = 123;
+ for (int i = 1; i <= numberOfParents; i++) {
+ String parentId = String.format(Locale.ROOT, "p%d", i);
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", String.format(Locale.ROOT, "p_value%d", i)).execute()
+ .actionGet();
+ for (int j = 1; j <= numberOfChildrenPerParent; j++) {
+ client().prepareIndex("test", "child", String.format(Locale.ROOT, "%s_c%d", parentId, j))
+ .setSource("c_field1", parentId, "c_field2", i % 2 == 0 ? "even" : "not_even").setParent(parentId).execute()
+ .actionGet();
+ }
+ }
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field1", "p3")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field2", "even"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+ }
+
+ @Test
+ public void testHasChildAndHasParentFailWhenSomeSegmentsDontContainAnyParentOrChildDocs() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("c_field", 1).get();
+ client().admin().indices().prepareFlush("test").get();
+
+ client().prepareIndex("test", "type1", "1").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasChildFilter("child", matchAllQuery()))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentFilter("parent", matchAllQuery()))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void testCountApiUsage() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount("test").setQuery(topChildrenQuery("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "1"))))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(hasParentFilter("parent", termQuery("p_field", "1"))))
+ .get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testExplainUsage() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("not implemented yet..."));
+
+ searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("not implemented yet..."));
+
+ searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("not implemented yet..."));
+
+ ExplainResponse explainResponse = client().prepareExplain("test", "parent", parentId)
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ assertThat(explainResponse.getExplanation().getDescription(), equalTo("not implemented yet..."));
+ }
+
+ @Test
+ public void testScoreForParentChildQueries() throws Exception {
+
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "child",
+ jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject())
+ .addMapping(
+ "child1",
+ jsonBuilder().startObject().startObject("child1").startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ indexRandom(false, createDocBuilders().toArray(new IndexRequestBuilder[0]));
+ refresh();
+
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery("child",
+ QueryBuilders.customScoreQuery(matchQuery("c_field2", 0)).script("doc['c_field1'].value")).scoreType("sum"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(3f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery("child",
+ QueryBuilders.customScoreQuery(matchQuery("c_field2", 0)).script("doc['c_field1'].value")).scoreType("max"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(2f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery("child",
+ QueryBuilders.customScoreQuery(matchQuery("c_field2", 0)).script("doc['c_field1'].value")).scoreType("avg"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1.5f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasParentQuery("parent",
+ QueryBuilders.customScoreQuery(matchQuery("p_field1", "p_value3")).script("doc['p_field2'].value"))
+ .scoreType("score")).addSort(SortBuilders.fieldSort("c_field3")).addSort(SortBuilders.scoreSort())
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(7l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("13"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("14"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("15"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[3].id(), equalTo("16"));
+ assertThat(response.getHits().hits()[3].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[4].id(), equalTo("17"));
+ assertThat(response.getHits().hits()[4].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[5].id(), equalTo("18"));
+ assertThat(response.getHits().hits()[5].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[6].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[6].score(), equalTo(5f));
+ }
+
+ List<IndexRequestBuilder> createDocBuilders() {
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ // Parent 1 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("1").setIndex("test").setSource("p_field", "p_value1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("1").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("2").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("3").setIndex("test")
+ .setSource("c_field1", 2, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("4").setIndex("test")
+ .setSource("c_field1", 2, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("5").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("6").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2).setParent("1"));
+
+ // Parent 2 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("2").setIndex("test").setSource("p_field", "p_value2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("7").setIndex("test")
+ .setSource("c_field1", 3, "c_field2", 0).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("8").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("9").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("p")); // why
+ // "p"????
+ indexBuilders.add(client().prepareIndex().setType("child").setId("10").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("11").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("12").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2).setParent("2"));
+
+ // Parent 3 and its children
+
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("3").setIndex("test")
+ .setSource("p_field1", "p_value3", "p_field2", 5));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("13").setIndex("test")
+ .setSource("c_field1", 4, "c_field2", 0, "c_field3", 0).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("14").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1, "c_field3", 1).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("15").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 2).setParent("3")); // why
+ // "p"????
+ indexBuilders.add(client().prepareIndex().setType("child").setId("16").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 3).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("17").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 4).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("18").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 5).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child1").setId("1").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 6).setParent("3"));
+
+ return indexBuilders;
+ }
+
+ @Test
+ public void testScoreForParentChildQueries_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .addMapping("child1", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ indexRandom(false, createDocBuilders().toArray(new IndexRequestBuilder[0]));
+ refresh();
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(matchQuery("c_field2", 0), scriptFunction("doc['c_field1'].value"))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("sum")).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(3f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(matchQuery("c_field2", 0), scriptFunction("doc['c_field1'].value"))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("max")).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(2f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(matchQuery("c_field2", 0), scriptFunction("doc['c_field1'].value"))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("avg")).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1.5f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasParentQuery(
+ "parent",
+ QueryBuilders.functionScoreQuery(matchQuery("p_field1", "p_value3"), scriptFunction("doc['p_field2'].value"))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("score"))
+ .addSort(SortBuilders.fieldSort("c_field3")).addSort(SortBuilders.scoreSort()).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(7l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("13"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("14"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("15"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[3].id(), equalTo("16"));
+ assertThat(response.getHits().hits()[3].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[4].id(), equalTo("17"));
+ assertThat(response.getHits().hits()[4].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[5].id(), equalTo("18"));
+ assertThat(response.getHits().hits()[5].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[6].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[6].score(), equalTo(5f));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/2536
+ public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"))).get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ client().prepareIndex("test", "child1").setSource(jsonBuilder().startObject().field("text", "value").endObject()).setRefresh(true)
+ .get();
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"))).get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value")).scoreType("max"))
+ .get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value"))).get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value")).scoreType("score"))
+ .get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testHasChildAndHasParentFilter_withFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get();
+ client().prepareIndex("test", "child", "2").setParent("1").setSource("c_field", 1).get();
+ client().admin().indices().prepareFlush("test").get();
+
+ client().prepareIndex("test", "type1", "3").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasChildFilter("child", termFilter("c_field", 1)))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentFilter("parent", termFilter("p_field", 1)))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ }
+
+ @Test
+ public void testSimpleQueryRewrite() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ int childId = 0;
+ for (int i = 0; i < 10; i++) {
+ String parentId = String.format(Locale.ROOT, "p%03d", i);
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", parentId).get();
+ int j = childId;
+ for (; j < childId + 50; j++) {
+ String childUid = String.format(Locale.ROOT, "c%03d", j);
+ client().prepareIndex("test", "child", childUid).setSource("c_field", childUid).setParent(parentId).get();
+ }
+ childId = j;
+ }
+ refresh();
+
+ SearchType[] searchTypes = new SearchType[]{SearchType.QUERY_THEN_FETCH, SearchType.DFS_QUERY_THEN_FETCH};
+ for (SearchType searchType : searchTypes) {
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(searchType)
+ .setQuery(hasChildQuery("child", prefixQuery("c_field", "c")).scoreType("max")).addSort("p_field", SortOrder.ASC)
+ .setSize(5).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(10L));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("p000"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("p001"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("p002"));
+ assertThat(searchResponse.getHits().hits()[3].id(), equalTo("p003"));
+ assertThat(searchResponse.getHits().hits()[4].id(), equalTo("p004"));
+
+ searchResponse = client().prepareSearch("test").setSearchType(searchType)
+ .setQuery(hasParentQuery("parent", prefixQuery("p_field", "p")).scoreType("score")).addSort("c_field", SortOrder.ASC)
+ .setSize(5).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(500L));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("c000"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("c001"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("c002"));
+ assertThat(searchResponse.getHits().hits()[3].id(), equalTo("c003"));
+ assertThat(searchResponse.getHits().hits()[4].id(), equalTo("c004"));
+
+ searchResponse = client().prepareSearch("test").setSearchType(searchType)
+ .setQuery(topChildrenQuery("child", prefixQuery("c_field", "c"))).addSort("p_field", SortOrder.ASC).setSize(5)
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(10L));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("p000"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("p001"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("p002"));
+ assertThat(searchResponse.getHits().hits()[3].id(), equalTo("p003"));
+ assertThat(searchResponse.getHits().hits()[4].id(), equalTo("p004"));
+ }
+ }
+
+ @Test
+ // See also issue:
+ // https://github.com/elasticsearch/elasticsearch/issues/3144
+ public void testReIndexingParentAndChildDocuments() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "x").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "x").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "yellow")).scoreType("sum")).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ boolQuery().must(matchQuery("c_field", "x")).must(
+ hasParentQuery("parent", termQuery("p_field", "p_value2")).scoreType("score"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c3"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("c4"));
+
+ // re-index
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "d" + i).setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "x").setParent("p2").get();
+ client().admin().indices().prepareRefresh("test").get();
+ }
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow")).scoreType("sum"))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ boolQuery().must(matchQuery("c_field", "x")).must(
+ hasParentQuery("parent", termQuery("p_field", "p_value2")).scoreType("score"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), Matchers.anyOf(equalTo("c3"), equalTo("c4")));
+ assertThat(searchResponse.getHits().getAt(1).id(), Matchers.anyOf(equalTo("c3"), equalTo("c4")));
+ }
+
+ @Test
+ // See also issue:
+ // https://github.com/elasticsearch/elasticsearch/issues/3203
+ public void testHasChildQueryWithMinimumScore() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "x").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "x").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "x").setParent("p2").get();
+ client().prepareIndex("test", "child", "c5").setSource("c_field", "x").setParent("p2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", matchAllQuery()).scoreType("sum"))
+ .setMinScore(3) // Score needs to be 3 or above!
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ }
+
+ @Test
+ public void testParentFieldFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .put("index.refresh_interval", -1))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .addMapping("child2", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // test term filter
+ SearchResponse response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "p1")))
+ .get();
+ assertHitCount(response, 0l);
+
+ client().prepareIndex("test", "some_type", "1").setSource("field", "value").get();
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "value").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "value").setParent("p1").get();
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 0l);
+ refresh();
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "parent#p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ client().prepareIndex("test", "parent2", "p1").setSource("p_field", "value").setRefresh(true).get();
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "parent#p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ // test terms filter
+ client().prepareIndex("test", "child2", "c1").setSource("c_field", "value").setParent("p1").get();
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "parent#p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ refresh();
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 2l);
+
+ refresh();
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "p1", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 2l);
+
+ response = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "parent#p1", "parent2#p1"))).get();
+ assertHitCount(response, 2l);
+ }
+
+ @Test
+ public void testHasChildNotBeingCached() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "parent", "p5").setSource("p_field", "p_value5").get();
+ client().prepareIndex("test", "parent", "p6").setSource("p_field", "p_value6").get();
+ client().prepareIndex("test", "parent", "p7").setSource("p_field", "p_value7").get();
+ client().prepareIndex("test", "parent", "p8").setSource("p_field", "p_value8").get();
+ client().prepareIndex("test", "parent", "p9").setSource("p_field", "p_value9").get();
+ client().prepareIndex("test", "parent", "p10").setSource("p_field", "p_value10").get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue")).cache(true)))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ client().prepareIndex("test", "child", "c2").setParent("p2").setSource("c_field", "blue").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue")).cache(true)))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+
+ @Test
+ public void testDeleteByQuery_has_child() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 1)
+ .put("index.refresh_interval", "-1")
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().prepareIndex("test", "child", "c5").setSource("c_field", "blue").setParent("p3").get();
+ client().prepareIndex("test", "child", "c6").setSource("c_field", "red").setParent("p3").get();
+ client().admin().indices().prepareRefresh().get();
+ // p4 will not be found via search api, but will be deleted via delete_by_query api!
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "child", "c7").setSource("c_field", "blue").setParent("p4").get();
+ client().prepareIndex("test", "child", "c8").setSource("c_field", "red").setParent("p4").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasChild("child", "c_field", "blue"))
+ .get();
+ assertHitCount(searchResponse, 2l);
+
+ client().prepareDeleteByQuery("test").setQuery(randomHasChild("child", "c_field", "blue")).get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasChild("child", "c_field", "blue"))
+ .get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testDeleteByQuery_has_child_SingleRefresh() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 1)
+ .put("index.refresh_interval", "-1")
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().prepareIndex("test", "child", "c5").setSource("c_field", "blue").setParent("p3").get();
+ client().prepareIndex("test", "child", "c6").setSource("c_field", "red").setParent("p3").get();
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "child", "c7").setSource("c_field", "blue").setParent("p4").get();
+ client().prepareIndex("test", "child", "c8").setSource("c_field", "red").setParent("p4").get();
+ client().admin().indices().prepareRefresh().get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasChild("child", "c_field", "blue"))
+ .get();
+ assertHitCount(searchResponse, 3l);
+
+ client().prepareDeleteByQuery("test").setQuery(randomHasChild("child", "c_field", "blue")).get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasChild("child", "c_field", "blue"))
+ .get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ private QueryBuilder randomHasChild(String type, String field, String value) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return constantScoreQuery(hasChildFilter(type, termQuery(field, value)));
+ } else {
+ return filteredQuery(matchAllQuery(), hasChildFilter(type, termQuery(field, value)));
+ }
+ } else {
+ return hasChildQuery(type, termQuery(field, value));
+ }
+ }
+
+ @Test
+ public void testDeleteByQuery_has_parent() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 1)
+ .put("index.refresh_interval", "-1")
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().admin().indices().prepareRefresh().get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value2"))
+ .get();
+ assertHitCount(searchResponse, 2l);
+
+ client().prepareDeleteByQuery("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value2"))
+ .get();
+ client().admin().indices().prepareRefresh("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value2"))
+ .get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ private QueryBuilder randomHasParent(String type, String field, String value) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return constantScoreQuery(hasParentFilter(type, termQuery(field, value)));
+ } else {
+ return filteredQuery(matchAllQuery(), hasParentFilter(type, termQuery(field, value)));
+ }
+ } else {
+ return hasParentQuery(type, termQuery(field, value));
+ }
+ }
+
+ @Test
+ // Relates to bug: https://github.com/elasticsearch/elasticsearch/issues/3818
+ public void testHasChildQueryOnlyReturnsSingleChildType() {
+ client().admin().indices().prepareCreate("grandissue")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("grandparent", "name", "type=string")
+ .addMapping("parent", "_parent", "type=grandparent")
+ .addMapping("child_type_one", "_parent", "type=parent")
+ .addMapping("child_type_two", "_parent", "type=parent")
+ .get();
+
+ client().prepareIndex("grandissue", "grandparent", "1").setSource("name", "Grandpa").get();
+ client().prepareIndex("grandissue", "parent", "2").setParent("1").setSource("name", "Dana").get();
+ client().prepareIndex("grandissue", "child_type_one", "3").setParent("2").setRouting("1")
+ .setSource("name", "William")
+ .get();
+ client().prepareIndex("grandissue", "child_type_two", "4").setParent("2").setRouting("1")
+ .setSource("name", "Kate")
+ .get();
+ client().admin().indices().prepareRefresh("grandissue").get();
+
+ SearchResponse searchResponse = client().prepareSearch("grandissue").setQuery(
+ boolQuery().must(
+ hasChildQuery(
+ "parent",
+ boolQuery().must(
+ hasChildQuery(
+ "child_type_one",
+ boolQuery().must(
+ queryString("name:William*").analyzeWildcard(true)
+ )
+ )
+ )
+ )
+ )
+ ).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("grandissue").setQuery(
+ boolQuery().must(
+ hasChildQuery(
+ "parent",
+ boolQuery().must(
+ hasChildQuery(
+ "child_type_two",
+ boolQuery().must(
+ queryString("name:William*").analyzeWildcard(true)
+ )
+ )
+ )
+ )
+ )
+ ).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void indexChildDocWithNoParentMapping() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("parent")
+ .addMapping("child1")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1", "_parent", "bla").get();
+ try {
+ client().prepareIndex("test", "child1", "c1").setParent("p1").setSource("c_field", "blue").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured"));
+ }
+ try {
+ client().prepareIndex("test", "child2", "c2").setParent("p1").setSource("c_field", "blue").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured"));
+ }
+
+ refresh();
+ }
+
+ @Test
+ public void testAddingParentToExistingMapping() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).get();
+ ensureGreen();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("child").setSource("number", "type=integer")
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ Map<String, Object> mapping = getMappingsResponse.getMappings().get("test").get("child").getSourceAsMap();
+ assertThat(mapping.size(), equalTo(1));
+ assertThat(mapping.get("properties"), notNullValue());
+
+ try {
+ // Adding _parent metadata field to existing mapping is prohibited:
+ client().admin().indices().preparePutMapping("test").setType("child").setSource(jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).get();
+ fail();
+ } catch (MergeMappingException e) {
+ assertThat(e.getMessage(), equalTo("Merge failed with failures {[The _parent field can't be added or updated]}"));
+ }
+ }
+
+ @Test
+ // The SimpleIdReaderTypeCache#docById method used lget, which can't be used if a map is shared.
+ public void testTopChildrenBug_concurrencyIssue() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get();
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ int numThreads = 10;
+ final CountDownLatch latch = new CountDownLatch(numThreads);
+ final AtomicReference<AssertionError> holder = new AtomicReference<AssertionError>();
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ for (int i = 0; i < 100; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "blue")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "red")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+ } catch (AssertionError error) {
+ holder.set(error);
+ } finally {
+ latch.countDown();
+ }
+ }
+ };
+
+ for (int i = 0; i < 10; i++) {
+ new Thread(r).start();
+ }
+ latch.await();
+ if (holder.get() != null) {
+ throw holder.get();
+ }
+ }
+
+ @Test
+ public void testHasChildQueryWithNestedInnerObjects() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("parent", "objects", "type=nested")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p1")
+ .setSource(jsonBuilder().startObject().field("p_field", "1").startArray("objects")
+ .startObject().field("i_field", "1").endObject()
+ .startObject().field("i_field", "2").endObject()
+ .startObject().field("i_field", "3").endObject()
+ .startObject().field("i_field", "4").endObject()
+ .startObject().field("i_field", "5").endObject()
+ .startObject().field("i_field", "6").endObject()
+ .endArray().endObject())
+ .get();
+ client().prepareIndex("test", "parent", "p2")
+ .setSource(jsonBuilder().startObject().field("p_field", "2").startArray("objects")
+ .startObject().field("i_field", "1").endObject()
+ .startObject().field("i_field", "2").endObject()
+ .endArray().endObject())
+ .get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get();
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get();
+ refresh();
+
+ String scoreMode = ScoreType.values()[getRandom().nextInt(ScoreType.values().length)].name().toLowerCase(Locale.ROOT);
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(QueryBuilders.hasChildQuery("child", termQuery("c_field", "blue")).scoreType(scoreMode), notFilter(termFilter("p_field", "3"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(QueryBuilders.hasChildQuery("child", termQuery("c_field", "red")).scoreType(scoreMode), notFilter(termFilter("p_field", "3"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+
+ @Test
+ public void testNamedFilters() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "1")).queryName("test"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max").queryName("test"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score").queryName("test"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "1")).filterName("test")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasParentFilter("parent", termQuery("p_field", "1")).filterName("test")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+ }
+
+ @Test
+ public void testParentChildQueriesNoParentType() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.refresh_interval", -1)
+ .put("index.number_of_replicas", 0))
+ .get();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().admin().indices().prepareRefresh().get();
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setPostFilter(hasChildFilter("child", termQuery("c_field", "1")))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "1")).score("max"))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ // can't fail, because there is no check, this b/c parent type can be refered by many child types.
+ client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ client().prepareSearch("test")
+ .setPostFilter(hasParentFilter("parent", termQuery("p_field", "1")))
+ .get();
+ }
+
+ @Test
+ public void testAdd_ParentFieldAfterIndexingParentDocButBeforeIndexingChildDoc() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.refresh_interval", -1)
+ .put("index.number_of_replicas", 0))
+ .get();
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().admin().indices().prepareRefresh().get();
+ assertAcked(client().admin()
+ .indices()
+ .preparePutMapping("test")
+ .setType("child")
+ .setSource("_parent", "type=parent"));
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ client().admin().indices().prepareRefresh().get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+
+ searchResponse = client().prepareSearch("test")
+ .setPostFilter(hasChildFilter("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "1")).score("max"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+ searchResponse = client().prepareSearch("test")
+ .setPostFilter(hasParentFilter("parent", termQuery("p_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "c1");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "c1");
+ }
+
+ @Test
+ public void testParentChildCaching() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.refresh_interval", -1)
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get();
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get();
+ client().admin().indices().prepareOptimize("test").setFlush(true).setWaitForMerge(true).get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "child", "c4").setParent("p3").setSource("c_field", "green").get();
+ client().prepareIndex("test", "child", "c5").setParent("p3").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c6").setParent("p4").setSource("c_field", "blue").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ for (int i = 0; i < 2; i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), boolFilter()
+ .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red")))
+ .must(matchAllFilter())
+ .cache(true)))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+
+
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "blue").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), boolFilter()
+ .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red")))
+ .must(matchAllFilter())
+ .cache(true)))
+ .get();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void testParentChildQueriesViaScrollApi() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "parent", "p" + i).setSource("{}").get();
+ client().prepareIndex("test", "child", "c" + i).setSource("{}").setParent("p" + i).get();
+ }
+
+ client().admin().indices().prepareRefresh().get();
+
+ QueryBuilder[] queries = new QueryBuilder[]{
+ hasChildQuery("child", matchAllQuery()),
+ filteredQuery(matchAllQuery(), hasChildFilter("child", matchAllQuery())),
+ hasParentQuery("parent", matchAllQuery()),
+ filteredQuery(matchAllQuery(), hasParentFilter("parent", matchAllQuery())),
+ topChildrenQuery("child", matchAllQuery()).factor(10)
+ };
+
+ for (QueryBuilder query : queries) {
+ SearchResponse scrollResponse = client().prepareSearch("test")
+ .setScroll(TimeValue.timeValueSeconds(30))
+ .setSize(1)
+ .addField("_id")
+ .setQuery(query)
+ .setSearchType("scan")
+ .execute()
+ .actionGet();
+
+ assertThat(scrollResponse.getFailedShards(), equalTo(0));
+ assertThat(scrollResponse.getHits().totalHits(), equalTo(10l));
+
+ int scannedDocs = 0;
+ do {
+ scrollResponse = client()
+ .prepareSearchScroll(scrollResponse.getScrollId())
+ .setScroll(TimeValue.timeValueSeconds(30)).get();
+ assertThat(scrollResponse.getHits().totalHits(), equalTo(10l));
+ scannedDocs += scrollResponse.getHits().getHits().length;
+ } while (scrollResponse.getHits().getHits().length > 0);
+ assertThat(scannedDocs, equalTo(10));
+ }
+ }
+
+ @Test
+ public void testValidateThatHasChildAndHasParentFilterAreNeverCached() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("field", "value")
+ .get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("field", "value")
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", matchAllQuery()))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", matchAllQuery()))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // Internally the has_child and has_parent use filter for the type field, which end up in the filter cache,
+ // so by first checking how much they take by executing has_child and has_parent *query* we can set a base line
+ // for the filter cache size in this test.
+ IndicesStatsResponse statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ long initialCacheSize = statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true)))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true)))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c has_child and has_parent can't be cached.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c has_child and has_parent can't be cached.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.termFilter("field", "value").cache(true))
+ .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.termFilter("field", "value").cache(true))
+ .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c has_child and has_parent can't be cached.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(initialCacheSize));
+ }
+
+ private static HasChildFilterBuilder hasChildFilter(String type, QueryBuilder queryBuilder) {
+ HasChildFilterBuilder hasChildFilterBuilder = FilterBuilders.hasChildFilter(type, queryBuilder);
+ hasChildFilterBuilder.setShortCircuitCutoff(randomInt(10));
+ return hasChildFilterBuilder;
+ }
+
+ private static HasChildFilterBuilder hasChildFilter(String type, FilterBuilder filterBuilder) {
+ HasChildFilterBuilder hasChildFilterBuilder = FilterBuilders.hasChildFilter(type, filterBuilder);
+ hasChildFilterBuilder.setShortCircuitCutoff(randomInt(10));
+ return hasChildFilterBuilder;
+ }
+
+ private static HasChildQueryBuilder hasChildQuery(String type, QueryBuilder queryBuilder) {
+ HasChildQueryBuilder hasChildQueryBuilder = QueryBuilders.hasChildQuery(type, queryBuilder);
+ hasChildQueryBuilder.setShortCircuitCutoff(randomInt(10));
+ return hasChildQueryBuilder;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java b/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java
new file mode 100644
index 0000000..a16acc1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.compress;
+
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.compress.lzf.LZFCompressor;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SearchSourceCompressTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSourceCompressionLZF() throws IOException {
+ CompressorFactory.setDefaultCompressor(new LZFCompressor());
+ verifySource(true);
+ verifySource(false);
+ verifySource(null);
+ }
+
+ private void verifySource(Boolean compress) throws IOException {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("compress", compress).endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ for (int i = 1; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(buildSource(i)).execute().actionGet();
+ }
+ client().prepareIndex("test", "type1", Integer.toString(10000)).setSource(buildSource(10000)).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 1; i < 100; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
+ assertThat(getResponse.getSourceAsBytes(), equalTo(buildSource(i).bytes().toBytes()));
+ }
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(10000)).execute().actionGet();
+ assertThat(getResponse.getSourceAsBytes(), equalTo(buildSource(10000).bytes().toBytes()));
+
+ for (int i = 1; i < 100; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(i))).execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).source(), equalTo(buildSource(i).bytes().toBytes()));
+ }
+ }
+
+ private XContentBuilder buildSource(int count) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ StringBuilder sb = new StringBuilder();
+ for (int j = 0; j < count; j++) {
+ sb.append("value").append(j).append(' ');
+ }
+ builder.field("field", sb.toString());
+ return builder.endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/customscore/CustomScoreSearchTests.java b/src/test/java/org/elasticsearch/search/customscore/CustomScoreSearchTests.java
new file mode 100644
index 0000000..e1ea9e0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/customscore/CustomScoreSearchTests.java
@@ -0,0 +1,1048 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.customscore;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.factorFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class CustomScoreSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testScoreExplainBug_2283() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test").setWaitForYellowStatus().execute()
+ .actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), "2")
+ .add(termFilter("field", "value2"), "3").scoreMode("first")).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation:\n {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)
+ .explanation());
+ Explanation explanation = searchResponse.getHits().getAt(0).explanation();
+ assertNotNull(explanation);
+ assertThat(explanation.isMatch(), equalTo(true));
+ assertThat(explanation.getValue(), equalTo(3f));
+ assertThat(explanation.getDescription(), equalTo("function score, product of:"));
+
+ assertThat(explanation.getDetails().length, equalTo(3));
+ assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+ assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));
+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));
+
+ // Same query but with boost
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), "2")
+ .add(termFilter("field", "value2"), "3").boost(2).scoreMode("first")).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(6f));
+ logger.info("--> Hit[0] {} Explanation:\n {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)
+ .explanation());
+ explanation = searchResponse.getHits().getAt(0).explanation();
+ assertNotNull(explanation);
+ assertThat(explanation.isMatch(), equalTo(true));
+ assertThat(explanation.getValue(), equalTo(6f));
+ assertThat(explanation.getDescription(), equalTo("function score, product of:"));
+
+ assertThat(explanation.getDetails().length, equalTo(3));
+ assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+ assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));
+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[2].getDescription(), equalTo("queryBoost"));
+ assertThat(explanation.getDetails()[2].getValue(), equalTo(2f));
+ }
+
+ @Test
+ public void testScoreExplainBug_2283_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test").setWaitForYellowStatus().execute()
+ .actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("first").add(termFilter("field", "value4"), scriptFunction("2"))
+ .add(termFilter("field", "value2"), scriptFunction("3"))).setExplain(true).execute().actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation:\n {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)
+ .explanation());
+ Explanation explanation = searchResponse.getHits().getAt(0).explanation();
+ assertNotNull(explanation);
+ assertThat(explanation.isMatch(), equalTo(true));
+ assertThat(explanation.getValue(), equalTo(3f));
+ assertThat(explanation.getDescription(), equalTo("function score, product of:"));
+ assertThat(explanation.getDetails().length, equalTo(3));
+ assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+ assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));
+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));
+
+ // Same query but with boost
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("first").add(termFilter("field", "value4"), scriptFunction("2"))
+ .add(termFilter("field", "value2"), scriptFunction("3")).boost(2)).setExplain(true).execute().actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(6f));
+ logger.info("--> Hit[0] {} Explanation:\n {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)
+ .explanation());
+ explanation = searchResponse.getHits().getAt(0).explanation();
+ assertNotNull(explanation);
+ assertThat(explanation.isMatch(), equalTo(true));
+ assertThat(explanation.getValue(), equalTo(6f));
+ assertThat(explanation.getDescription(), equalTo("function score, product of:"));
+
+ assertThat(explanation.getDetails().length, equalTo(3));
+ assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+ assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));
+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[2].getDescription(), equalTo("queryBoost"));
+ assertThat(explanation.getDetails()[2].getValue(), equalTo(2f));
+ }
+
+ @Test
+ public void testMultiValueCustomScriptBoost() throws ElasticsearchException, IOException {
+
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("snum")
+ .field("type", "string").endObject().startObject("dnum").field("type", "double").endObject()
+ .startObject("slnum").field("type", "long").endObject().startObject("gp").field("type", "geo_point")
+ .endObject().endObject().endObject().endObject()).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ String[] values = new String[100];
+ String[] gp = new String[100];
+
+ long[] lValues = new long[100];
+ double[] dValues = new double[100];
+ int offset = 1;
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "" + (i + offset);
+ gp[i] = "" + (i + offset) + "," + (i + offset);
+ lValues[i] = (i + offset);
+ dValues[i] = (i + offset);
+ }
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("1")
+ .source(jsonBuilder().startObject().field("test", "value check").field("snum", values).field("dnum", dValues)
+ .field("lnum", lValues).field("gp", gp).endObject())).actionGet();
+ offset++;
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "" + (i + offset);
+ gp[i] = "" + (i + offset) + "," + (i + offset);
+ lValues[i] = (i + offset);
+ dValues[i] = (i + offset);
+ }
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("2")
+ .source(jsonBuilder().startObject().field("test", "value check").field("snum", values).field("dnum", dValues)
+ .field("lnum", lValues).field("gp", gp).endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running min(doc['num1'].value)");
+ SearchResponse response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(customScoreQuery(termQuery("test", "value"))
+ .script("c_min = 1000; foreach (x : doc['snum'].values) { c_min = min(Integer.parseInt(x), c_min) } return c_min"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script(
+ "c_min = 1000; foreach (x : doc['lnum'].values) { c_min = min(x, c_min) } return c_min"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script(
+ "c_min = 1000; foreach (x : doc['dnum'].values) { c_min = min(x, c_min) } return c_min"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script(
+ "c_min = 1000; foreach (x : doc['gp'].values) { c_min = min(x.lat, c_min) } return c_min"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testMultiValueCustomScriptBoost_withFunctionScore() throws ElasticsearchException, IOException {
+
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("snum")
+ .field("type", "string").endObject().startObject("dnum").field("type", "double").endObject()
+ .startObject("slnum").field("type", "long").endObject().startObject("gp").field("type", "geo_point")
+ .endObject().endObject().endObject().endObject()).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ String[] values = new String[100];
+ String[] gp = new String[100];
+
+ long[] lValues = new long[100];
+ double[] dValues = new double[100];
+ int offset = 1;
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "" + (i + offset);
+ gp[i] = "" + (i + offset) + "," + (i + offset);
+ lValues[i] = (i + offset);
+ dValues[i] = (i + offset);
+ }
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("1")
+ .source(jsonBuilder().startObject().field("test", "value check").field("snum", values).field("dnum", dValues)
+ .field("lnum", lValues).field("gp", gp).endObject())).actionGet();
+ offset++;
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "" + (i + offset);
+ gp[i] = "" + (i + offset) + "," + (i + offset);
+ lValues[i] = (i + offset);
+ dValues[i] = (i + offset);
+ }
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("2")
+ .source(jsonBuilder().startObject().field("test", "value check").field("snum", values).field("dnum", dValues)
+ .field("lnum", lValues).field("gp", gp).endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running min(doc['num1'].value)");
+ SearchResponse response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(functionScoreQuery(
+ termQuery("test", "value"),
+ scriptFunction("c_min = 1000; foreach (x : doc['snum'].values) { c_min = min(Integer.parseInt(x), c_min) } return c_min")))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(functionScoreQuery(
+ termQuery("test", "value"),
+ scriptFunction("c_min = 1000; foreach (x : doc['lnum'].values) { c_min = min(x, c_min) } return c_min")))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(functionScoreQuery(
+ termQuery("test", "value"),
+ scriptFunction("c_min = 1000; foreach (x : doc['dnum'].values) { c_min = min(x, c_min) } return c_min")))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(functionScoreQuery(
+ termQuery("test", "value"),
+ scriptFunction("c_min = 1000; foreach (x : doc['gp'].values) { c_min = min(x.lat, c_min) } return c_min")))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testCustomScriptBoost() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value check").field("num1", 2.0f).endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("--- QUERY_THEN_FETCH");
+
+ logger.info("running doc['num1'].value");
+ SearchResponse response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(customScoreQuery(termQuery("test", "value")).script("doc['num1'].value"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running -doc['num1'].value");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(customScoreQuery(termQuery("test", "value")).script("-doc['num1'].value"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+
+ logger.info("running pow(doc['num1'].value, 2)");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true)
+ .query(customScoreQuery(termQuery("test", "value")).script("pow(doc['num1'].value, 2)")))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running max(doc['num1'].value, 1)");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script("max(doc['num1'].value, 1d)")))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running doc['num1'].value * _score");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script("doc['num1'].value * _score")))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running param1 * param2 * _score");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script("param1 * param2 * _score").param("param1", 2)
+ .param("param2", 2)))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertSearchHits(response, "1", "2");
+
+ logger.info("running param1 * param2 * _score with filter instead of query");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termFilter("test", "value")).script("param1 * param2 * _score").param("param1", 2)
+ .param("param2", 2)))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertSearchHits(response, "1", "2");
+ assertThat(response.getHits().getAt(0).score(), equalTo(4f)); // _score
+ // is
+ // always
+ // 1
+ assertThat(response.getHits().getAt(1).score(), equalTo(4f)); // _score
+ // is
+ // always
+ // 1
+ }
+
+ @Test
+ public void testCustomScriptBoost_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value check").field("num1", 2.0f).endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("--- QUERY_THEN_FETCH");
+
+ logger.info("running doc['num1'].value");
+ SearchResponse response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("doc['num1'].value"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running -doc['num1'].value");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("-doc['num1'].value"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+
+ logger.info("running pow(doc['num1'].value, 2)");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("pow(doc['num1'].value, 2)"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running max(doc['num1'].value, 1)");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("max(doc['num1'].value, 1d)"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running doc['num1'].value * _score");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("doc['num1'].value * _score"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running param1 * param2 * _score");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("param1 * param2 * _score")
+ .param("param1", 2).param("param2", 2))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertSearchHits(response, "1", "2");
+
+ logger.info("running param1 * param2 * _score with filter instead of query");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termFilter("test", "value"),
+ scriptFunction("param1 * param2 * _score").param("param1", 2).param("param2", 2))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertSearchHits(response, "1", "2");
+ assertThat(response.getHits().getAt(0).score(), equalTo(4f)); // _score
+ // is
+ // always
+ // 1
+ assertThat(response.getHits().getAt(1).score(), equalTo(4f)); // _score
+ // is
+ // always
+ // 1
+ }
+
+ @Test
+ public void testTriggerBooleanScorer() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(customFiltersScoreQuery(fuzzyQuery("field", "value")).add(FilterBuilders.idsFilter("type").addIds("1"), 3))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ }
+
+ @Test
+ public void testTriggerBooleanScorer_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(fuzzyQuery("field", "value")).add(FilterBuilders.idsFilter("type").addIds("1"),
+ factorFunction(3))).execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ }
+
+ @Test
+ public void testCustomFiltersScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), "2").add(termFilter("field", "value2"),
+ "3")).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(1.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value2"), 3)).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(1.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("total").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(8.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("max").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("1"), equalTo("3"))); // could
+ // be
+ // both
+ // depending
+ // on
+ // the
+ // order
+ // of
+ // the
+ // docs
+ // internally
+ // (lucene
+ // order)
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("avg").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(4.0f));
+ logger.info("--> Hit[1] {} Explanation {}", searchResponse.getHits().getAt(1).id(), searchResponse.getHits().getAt(1).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("min").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(3.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("multiply").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(15.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(5.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(termsQuery("field", "value1", "value2", "value3", "value4")).scoreMode("first")
+ .add(termFilter("field", "value4"), 2).add(termFilter("field", "value3"), 3)
+ .add(termFilter("field", "value2"), 4)).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(searchResponse.getHits().getAt(0).explanation().getValue()));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(searchResponse.getHits().getAt(1).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(searchResponse.getHits().getAt(2).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(termsQuery("field", "value1", "value2", "value3", "value4")).scoreMode("multiply")
+ .add(termFilter("field", "value4"), 2).add(termFilter("field", "value1"), 3)
+ .add(termFilter("color", "red"), 5)).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(searchResponse.getHits().getAt(0).explanation().getValue()));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(searchResponse.getHits().getAt(1).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(searchResponse.getHits().getAt(2).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));
+ }
+
+ @Test
+ public void testCustomFiltersScore_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery())
+ .add(termFilter("field", "value4"), scriptFunction("2")).add(
+ termFilter("field", "value2"), scriptFunction("3"))).setExplain(true)
+ .execute().actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(1.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), factorFunction(2)).add(
+ termFilter("field", "value2"), factorFunction(3))).setExplain(true).execute().actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(1.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("sum")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(8.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("max")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("1"), equalTo("3"))); // could
+ // be
+ // both
+ // depending
+ // on
+ // the
+ // order
+ // of
+ // the
+ // docs
+ // internally
+ // (lucene
+ // order)
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("avg")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(4.0f));
+ logger.info("--> Hit[1] {} Explanation {}", searchResponse.getHits().getAt(1).id(), searchResponse.getHits().getAt(1).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("min")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(3.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("multiply")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(15.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(5.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(termsQuery("field", "value1", "value2", "value3", "value4")).scoreMode("first")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value3"), factorFunction(3))
+ .add(termFilter("field", "value2"), factorFunction(4))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(searchResponse.getHits().getAt(0).explanation().getValue()));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(searchResponse.getHits().getAt(1).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(searchResponse.getHits().getAt(2).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(termsQuery("field", "value1", "value2", "value3", "value4")).scoreMode("multiply")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(searchResponse.getHits().getAt(0).explanation().getValue()));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(searchResponse.getHits().getAt(1).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(searchResponse.getHits().getAt(2).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/ConcurrentDuel.java b/src/test/java/org/elasticsearch/search/facet/ConcurrentDuel.java
new file mode 100644
index 0000000..0ad73d3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/ConcurrentDuel.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class ConcurrentDuel<T> {
+
+
+ private final ExecutorService pool;
+ private final int numExecutorThreads;
+
+ public ConcurrentDuel(int numThreads) {
+ pool = Executors.newFixedThreadPool(numThreads);
+ this.numExecutorThreads = numThreads;
+ }
+
+ public void close() {
+ pool.shutdown();
+ }
+
+ public List<T> runDuel(final DuelExecutor<T> executor, int iterations, int numTasks) throws InterruptedException, ExecutionException {
+ List<T> results = new ArrayList<T>();
+ T firstRun = executor.run();
+ results.add(firstRun);
+ for (int i = 0; i < 3; i++) {
+
+ }
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicLong count = new AtomicLong(iterations);
+ List<Future<List<T>>> futures = new ArrayList<Future<List<T>>>();
+ for (int i = 0; i < numTasks; i++) {
+ futures.add(pool.submit(new Callable<List<T>>() {
+
+ @Override
+ public List<T> call() throws Exception {
+ List<T> results = new ArrayList<T>();
+ latch.await();
+ while(count.decrementAndGet() >= 0) {
+ results.add(executor.run());
+ }
+ return results;
+ }
+ }));
+ }
+ latch.countDown();
+ for (Future<List<T>> future : futures) {
+ results.addAll(future.get());
+ }
+ return results;
+ }
+ public void duel(DuelJudge<T> judge, final DuelExecutor<T> executor, int iterations) throws InterruptedException, ExecutionException {
+ duel(judge, executor, iterations, numExecutorThreads);
+ }
+
+ public void duel(DuelJudge<T> judge, final DuelExecutor<T> executor, int iterations, int threadCount) throws InterruptedException, ExecutionException {
+ T firstRun = executor.run();
+ List<T> runDuel = runDuel(executor, iterations, threadCount);
+ for (T t : runDuel) {
+ judge.judge(firstRun, t);
+ }
+ }
+
+ public static interface DuelExecutor<T> {
+ public T run();
+ }
+
+ public static interface DuelJudge<T> {
+ public void judge(T firstRun, T result);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTests.java b/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTests.java
new file mode 100644
index 0000000..6e9ceb7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTests.java
@@ -0,0 +1,355 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.TermsFacetBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class ExtendedFacetsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", numberOfShards())
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ protected int numberOfShards() {
+ return 1;
+ }
+
+ protected int numDocs() {
+ return 2500;
+ }
+
+
+ @Test
+ @Slow
+ public void testTermFacet_stringFields() throws Throwable {
+ prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("field1_paged")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "paged_bytes")
+ .field("loading", randomBoolean() ? "eager" : "lazy")
+ .endObject()
+ .endObject()
+ .startObject("field1_fst")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "fst")
+ .field("loading", randomBoolean() ? "eager" : "lazy")
+ .endObject()
+ .endObject()
+ .startObject("field1_dv")
+ .field("type", "string")
+ .field("index", randomBoolean() ? "no" : "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("field2")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "fst")
+ .field("loading", randomBoolean() ? "eager" : "lazy")
+ .endObject()
+ .endObject()
+ .startObject("q_field")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ )
+ .execute().actionGet();
+
+
+ Random random = getRandom();
+ int numOfQueryValues = 50;
+ String[] queryValues = new String[numOfQueryValues];
+ for (int i = 0; i < numOfQueryValues; i++) {
+ queryValues[i] = randomAsciiOfLength(5);
+ }
+
+ Set<String> uniqueValuesSet = new HashSet<String>();
+ int numOfVals = 400;
+ for (int i = 0; i < numOfVals; i++) {
+ uniqueValuesSet.add(randomAsciiOfLength(10));
+ }
+ String[] allUniqueFieldValues = uniqueValuesSet.toArray(new String[uniqueValuesSet.size()]);
+
+ Set<String> allField1Values = new HashSet<String>();
+ Set<String> allField1AndField2Values = new HashSet<String>();
+ Map<String, Map<String, Integer>> queryValToField1FacetEntries = new HashMap<String, Map<String, Integer>>();
+ Map<String, Map<String, Integer>> queryValToField1and2FacetEntries = new HashMap<String, Map<String, Integer>>();
+ for (int i = 1; i <= numDocs(); i++) {
+ int numField1Values = random.nextInt(17);
+ Set<String> field1Values = new HashSet<String>(numField1Values);
+ for (int j = 0; j <= numField1Values; j++) {
+ boolean added = false;
+ while (!added) {
+ added = field1Values.add(allUniqueFieldValues[random.nextInt(numOfVals)]);
+ }
+ }
+ allField1Values.addAll(field1Values);
+ allField1AndField2Values.addAll(field1Values);
+ String field2Val = allUniqueFieldValues[random.nextInt(numOfVals)];
+ allField1AndField2Values.add(field2Val);
+ String queryVal = queryValues[random.nextInt(numOfQueryValues)];
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("field1_paged", field1Values)
+ .field("field1_fst", field1Values)
+ .field("field1_dv", field1Values)
+ .field("field2", field2Val)
+ .field("q_field", queryVal)
+ .endObject())
+ .execute().actionGet();
+
+ if (random.nextInt(2000) == 854) {
+ client().admin().indices().prepareFlush("test").execute().actionGet();
+ }
+ addControlValues(queryValToField1FacetEntries, field1Values, queryVal);
+ addControlValues(queryValToField1and2FacetEntries, field1Values, queryVal);
+ addControlValues(queryValToField1and2FacetEntries, field2Val, queryVal);
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ String[] facetFields = new String[]{"field1_paged", "field1_fst", "field1_dv"};
+ TermsFacet.ComparatorType[] compTypes = TermsFacet.ComparatorType.values();
+ for (String facetField : facetFields) {
+ for (String queryVal : queryValToField1FacetEntries.keySet()) {
+ Set<String> allFieldValues;
+ Map<String, Integer> queryControlFacets;
+ TermsFacet.ComparatorType compType = compTypes[random.nextInt(compTypes.length)];
+ TermsFacetBuilder termsFacetBuilder = FacetBuilders.termsFacet("facet1").order(compType);
+
+ boolean useFields;
+ if (random.nextInt(4) == 3) {
+ useFields = true;
+ queryControlFacets = queryValToField1and2FacetEntries.get(queryVal);
+ allFieldValues = allField1AndField2Values;
+ termsFacetBuilder.fields(facetField, "field2");
+ } else {
+ queryControlFacets = queryValToField1FacetEntries.get(queryVal);
+ allFieldValues = allField1Values;
+ useFields = false;
+ termsFacetBuilder.field(facetField);
+ }
+ int size;
+ if (numberOfShards() == 1 || compType == TermsFacet.ComparatorType.TERM || compType == TermsFacet.ComparatorType.REVERSE_TERM) {
+ size = random.nextInt(queryControlFacets.size());
+ } else {
+ size = allFieldValues.size();
+ }
+ termsFacetBuilder.size(size);
+
+ if (random.nextBoolean()) {
+ termsFacetBuilder.executionHint("map");
+ }
+ List<String> excludes = new ArrayList<String>();
+ if (random.nextBoolean()) {
+ int numExcludes = random.nextInt(5) + 1;
+ List<String> facetValues = new ArrayList<String>(queryControlFacets.keySet());
+ for (int i = 0; i < numExcludes; i++) {
+ excludes.add(facetValues.get(random.nextInt(facetValues.size())));
+ }
+ termsFacetBuilder.exclude(excludes.toArray());
+ }
+ String regex = null;
+ if (random.nextBoolean()) {
+ List<String> facetValues = new ArrayList<String>(queryControlFacets.keySet());
+ regex = facetValues.get(random.nextInt(facetValues.size()));
+ regex = "^" + regex.substring(0, regex.length() / 2) + ".*";
+ termsFacetBuilder.regex(regex);
+ }
+
+ boolean allTerms = random.nextInt(10) == 3;
+ termsFacetBuilder.allTerms(allTerms);
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.termQuery("q_field", queryVal))
+ .addFacet(termsFacetBuilder)
+ .execute().actionGet();
+ TermsFacet actualFacetEntries = response.getFacets().facet("facet1");
+
+ List<Tuple<Text, Integer>> expectedFacetEntries = getExpectedFacetEntries(allFieldValues, queryControlFacets, size, compType, excludes, regex, allTerms);
+ String reason = String.format(Locale.ROOT, "query: [%s] field: [%s] size: [%d] order: [%s] all_terms: [%s] fields: [%s] regex: [%s] excludes: [%s]", queryVal, facetField, size, compType, allTerms, useFields, regex, excludes);
+ assertThat(reason, actualFacetEntries.getEntries().size(), equalTo(expectedFacetEntries.size()));
+ for (int i = 0; i < expectedFacetEntries.size(); i++) {
+ assertThat(reason, actualFacetEntries.getEntries().get(i).getTerm(), equalTo(expectedFacetEntries.get(i).v1()));
+ assertThat(reason, actualFacetEntries.getEntries().get(i).getCount(), equalTo(expectedFacetEntries.get(i).v2()));
+ }
+ }
+ }
+ }
+
+ private void addControlValues(Map<String, Map<String, Integer>> queryValToFacetFieldEntries, String fieldVal, String queryVal) {
+ Map<String, Integer> controlFieldFacets = queryValToFacetFieldEntries.get(queryVal);
+ if (controlFieldFacets == null) {
+ controlFieldFacets = new HashMap<String, Integer>();
+ queryValToFacetFieldEntries.put(queryVal, controlFieldFacets);
+ }
+ Integer controlCount = controlFieldFacets.get(fieldVal);
+ if (controlCount == null) {
+ controlCount = 0;
+ }
+ controlFieldFacets.put(fieldVal, ++controlCount);
+ }
+
+ private void addControlValues(Map<String, Map<String, Integer>> queryValToFacetFieldEntries, Set<String> fieldValues, String queryVal) {
+ for (String fieldValue : fieldValues) {
+ addControlValues(queryValToFacetFieldEntries, fieldValue, queryVal);
+ }
+ }
+
+ private List<Tuple<Text, Integer>> getExpectedFacetEntries(Set<String> fieldValues,
+ Map<String, Integer> controlFacetsField,
+ int size,
+ TermsFacet.ComparatorType sort,
+ List<String> excludes,
+ String regex,
+ boolean allTerms) {
+ Pattern pattern = null;
+ if (regex != null) {
+ pattern = Regex.compile(regex, null);
+ }
+
+ List<Tuple<Text, Integer>> entries = new ArrayList<Tuple<Text, Integer>>();
+ for (Map.Entry<String, Integer> e : controlFacetsField.entrySet()) {
+ if (excludes.contains(e.getKey())) {
+ continue;
+ }
+ if (pattern != null && !pattern.matcher(e.getKey()).matches()) {
+ continue;
+ }
+
+ entries.add(new Tuple<Text, Integer>(new StringText(e.getKey()), e.getValue()));
+ }
+
+ if (allTerms) {
+ for (String fieldValue : fieldValues) {
+ if (!controlFacetsField.containsKey(fieldValue)) {
+ if (excludes.contains(fieldValue)) {
+ continue;
+ }
+ if (pattern != null && !pattern.matcher(fieldValue).matches()) {
+ continue;
+ }
+
+ entries.add(new Tuple<Text, Integer>(new StringText(fieldValue), 0));
+ }
+ }
+ }
+
+ switch (sort) {
+ case COUNT:
+ Collections.sort(entries, count);
+ break;
+ case REVERSE_COUNT:
+ Collections.sort(entries, count_reverse);
+ break;
+ case TERM:
+ Collections.sort(entries, term);
+ break;
+ case REVERSE_TERM:
+ Collections.sort(entries, term_reverse);
+ break;
+ }
+ return size >= entries.size() ? entries : entries.subList(0, size);
+ }
+
+ private final static Count count = new Count();
+ private final static CountReverse count_reverse = new CountReverse();
+ private final static Term term = new Term();
+ private final static TermReverse term_reverse = new TermReverse();
+
+ private static class Count implements Comparator<Tuple<Text, Integer>> {
+
+ @Override
+ public int compare(Tuple<Text, Integer> o1, Tuple<Text, Integer> o2) {
+ int cmp = o2.v2() - o1.v2();
+ if (cmp != 0) {
+ return cmp;
+ }
+ cmp = o2.v1().compareTo(o1.v1());
+ if (cmp != 0) {
+ return cmp;
+ }
+ return System.identityHashCode(o2) - System.identityHashCode(o1);
+ }
+
+ }
+
+ private static class CountReverse implements Comparator<Tuple<Text, Integer>> {
+
+ @Override
+ public int compare(Tuple<Text, Integer> o1, Tuple<Text, Integer> o2) {
+ return -count.compare(o1, o2);
+ }
+
+ }
+
+ private static class Term implements Comparator<Tuple<Text, Integer>> {
+
+ @Override
+ public int compare(Tuple<Text, Integer> o1, Tuple<Text, Integer> o2) {
+ return o1.v1().compareTo(o2.v1());
+ }
+
+ }
+
+ private static class TermReverse implements Comparator<Tuple<Text, Integer>> {
+
+ @Override
+ public int compare(Tuple<Text, Integer> o1, Tuple<Text, Integer> o2) {
+ return -term.compare(o1, o2);
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTestsMultiShardMultiNodeTests.java b/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTestsMultiShardMultiNodeTests.java
new file mode 100644
index 0000000..88faf06
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTestsMultiShardMultiNodeTests.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+/**
+ */
+public class ExtendedFacetsTestsMultiShardMultiNodeTests extends ExtendedFacetsTests {
+
+ @Override
+ protected int numberOfShards() {
+ return 8;
+ }
+
+ @Override
+ protected int numDocs() {
+ return 10000;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/SimpleFacetsTests.java b/src/test/java/org/elasticsearch/search/facet/SimpleFacetsTests.java
new file mode 100644
index 0000000..6130db0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/SimpleFacetsTests.java
@@ -0,0 +1,2378 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.facet.datehistogram.DateHistogramFacet;
+import org.elasticsearch.search.facet.filter.FilterFacet;
+import org.elasticsearch.search.facet.histogram.HistogramFacet;
+import org.elasticsearch.search.facet.query.QueryFacet;
+import org.elasticsearch.search.facet.range.RangeFacet;
+import org.elasticsearch.search.facet.statistical.StatisticalFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet.Entry;
+import org.elasticsearch.search.facet.terms.doubles.InternalDoubleTermsFacet;
+import org.elasticsearch.search.facet.terms.longs.InternalLongTermsFacet;
+import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormatter;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.search.facet.FacetBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleFacetsTests extends ElasticsearchIntegrationTest {
+
+ private int numRuns = -1;
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ protected int numberOfRuns() {
+ if (numRuns == -1) {
+ numRuns = atLeast(3);
+ }
+ return numRuns;
+ }
+
+ @Test
+ public void testSimpleFacetEmptyFacetFilter() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "green")
+ .endObject()).execute().actionGet();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setFacets(new BytesArray(
+ "{\"facet1\":{\"filter\":{ }}}").array())
+ .get();
+
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+ FilterFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testSimpleFacetEmptyFilterFacet() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "green")
+ .endObject()).execute().actionGet();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setFacets(new BytesArray(
+ "{\"facet1\":{\"terms\":{\"field\":\"tag\"},\"facet_filter\":{ }}}").array())
+ .get();
+
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("green"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ }
+
+ @Test
+ public void testBinaryFacet() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "green")
+ .endObject()).execute().actionGet();
+
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "blue")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setFacets(XContentFactory.jsonBuilder().startObject()
+ .startObject("facet1")
+ .startObject("terms")
+ .field("field", "tag")
+ .endObject()
+ .endObject()
+ .endObject().bytes())
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ }
+ }
+
+ @Test
+ public void testFacetNumeric() throws ElasticsearchException, IOException {
+ prepareCreate("test").addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("name", "" + i)
+ .field("multiValued", "" + i, "" + (90 + i % 10))
+ .field("byte", i)
+ .field("short", i + Byte.MAX_VALUE)
+ .field("integer", i + Short.MAX_VALUE)
+ .field("long", i + Integer.MAX_VALUE)
+ .field("float", (float) i)
+ .field("double", (double) i)
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", "" + (i + 100)).setSource(jsonBuilder().startObject()
+ .field("foo", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ String[] execHint = new String[]{"map", null};
+ for (String hint : execHint) {
+
+ flushAndRefresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("double").executionHint(hint).field("double").size(10))
+ .addFacet(termsFacet("float").executionHint(hint).field("float").size(10))
+ .addFacet(termsFacet("integer").executionHint(hint).field("integer").size(10))
+ .addFacet(termsFacet("long").executionHint(hint).field("long").size(10))
+ .addFacet(termsFacet("short").executionHint(hint).field("short").size(10))
+ .addFacet(termsFacet("byte").executionHint(hint).field("byte").size(10))
+ .addFacet(termsFacet("termFacet").executionHint(hint).field("name").size(10))
+ .addFacet(termsFacet("termFacetRegex").executionHint(hint).field("multiValued").regex("9\\d").size(20))
+ .addFacet(termsFacet("termFacetScript").executionHint(hint).field("multiValued").script("Integer.toHexString(Integer.parseInt(term))").size(10))
+ .addFacet(termsFacet("termFacetScriptRegex").executionHint(hint).field("multiValued").script("Integer.toHexString(Integer.parseInt(term))").regex("9\\d").size(20))
+
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(110l));
+ TermsFacet facet = searchResponse.getFacets().facet("termFacet");
+ assertThat(facet.getName(), equalTo("termFacet"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("termFacetRegex");
+ assertThat(facet.getName(), equalTo("termFacetRegex"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(190l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ int count = 99;
+ for (Entry entry : facet) {
+ assertThat(Integer.parseInt(entry.getTerm().string()), equalTo(count--));
+ assertThat(entry.getCount(), equalTo(10));
+ }
+
+ facet = searchResponse.getFacets().facet("termFacetScriptRegex");
+ assertThat(facet.getName(), equalTo("termFacetScriptRegex"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(190l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ count = 99;
+ for (Entry entry : facet) {
+ assertThat(entry.getTerm().string(), equalTo(Integer.toHexString(count--)));
+ assertThat(entry.getCount(), equalTo(10));
+ }
+
+ facet = searchResponse.getFacets().facet("termFacetScript");
+ assertThat(facet.getName(), equalTo("termFacetScript"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(190l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ count = 99;
+ for (Entry entry : facet) {
+ assertThat(entry.getTerm().string(), equalTo(Integer.toHexString(count--)));
+ assertThat(entry.getCount(), equalTo(10));
+ }
+
+ facet = searchResponse.getFacets().facet("double");
+ assertThat(facet.getName(), equalTo("double"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("float");
+ assertThat(facet.getName(), equalTo("float"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("long");
+ assertThat(facet.getName(), equalTo("long"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("integer");
+ assertThat(facet.getName(), equalTo("integer"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("short");
+ assertThat(facet.getName(), equalTo("short"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+ }
+
+ }
+
+
+ @Test
+ @Slow
+ public void testConcurrentFacets() throws ElasticsearchException, IOException, InterruptedException, ExecutionException {
+ prepareCreate("test")
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("name", "" + i)
+ .field("byte", i)
+ .field("short", i + Byte.MAX_VALUE)
+ .field("integer", i + Short.MAX_VALUE)
+ .field("long", i + Integer.MAX_VALUE)
+ .field("float", (float) i)
+ .field("double", (double) i)
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", "" + (i + 100)).setSource(jsonBuilder().startObject()
+ .field("foo", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ flushAndRefresh();
+ ConcurrentDuel<Facets> duel = new ConcurrentDuel<Facets>(5);
+ {
+ final Client cl = client();
+
+ duel.duel(new ConcurrentDuel.DuelJudge<Facets>() {
+
+ @Override
+ public void judge(Facets firstRun, Facets result) {
+ for (Facet f : result) {
+ TermsFacet facet = (TermsFacet) f;
+ assertThat(facet.getName(), isIn(new String[]{"short", "double", "byte", "float", "integer", "long", "termFacet"}));
+ TermsFacet firstRunFacet = (TermsFacet) firstRun.getFacets().get(facet.getName());
+ assertThat(facet.getEntries().size(), equalTo(firstRunFacet.getEntries().size()));
+
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ List<? extends Entry> right = facet.getEntries();
+ List<? extends Entry> left = firstRunFacet.getEntries();
+
+ for (int i = 0; i < facet.getEntries().size(); i++) {
+ assertThat(left.get(i).getTerm(), equalTo(right.get(i).getTerm()));
+ assertThat(left.get(i).getCount(), equalTo(right.get(i).getCount()));
+ }
+ }
+ }
+ }, new ConcurrentDuel.DuelExecutor<Facets>() {
+ AtomicInteger count = new AtomicInteger();
+
+ @Override
+ public Facets run() {
+ final SearchRequestBuilder facetRequest;
+ if (count.incrementAndGet() % 2 == 0) { // every second request is mapped
+ facetRequest = cl.prepareSearch().setQuery(matchAllQuery())
+ .addFacet(termsFacet("double").field("double").size(10))
+ .addFacet(termsFacet("float").field("float").size(10))
+ .addFacet(termsFacet("integer").field("integer").size(10))
+ .addFacet(termsFacet("long").field("long").size(10))
+ .addFacet(termsFacet("short").field("short").size(10))
+ .addFacet(termsFacet("byte").field("byte").size(10))
+ .addFacet(termsFacet("termFacet").field("name").size(10));
+ } else {
+ facetRequest = cl.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("double").executionHint("map").field("double").size(10))
+ .addFacet(termsFacet("float").executionHint("map").field("float").size(10))
+ .addFacet(termsFacet("integer").executionHint("map").field("integer").size(10))
+ .addFacet(termsFacet("long").executionHint("map").field("long").size(10))
+ .addFacet(termsFacet("short").executionHint("map").field("short").size(10))
+ .addFacet(termsFacet("byte").executionHint("map").field("byte").size(10))
+ .addFacet(termsFacet("termFacet").executionHint("map").field("name").size(10));
+ }
+
+ SearchResponse actionGet = facetRequest.execute().actionGet();
+ return actionGet.getFacets();
+ }
+ }, 5000
+ );
+ }
+ {
+ duel.duel(new ConcurrentDuel.DuelJudge<Facets>() {
+
+ @Override
+ public void judge(Facets firstRun, Facets result) {
+ for (Facet f : result) {
+ TermsFacet facet = (TermsFacet) f;
+ assertThat(facet.getName(), equalTo("termFacet"));
+ TermsFacet firstRunFacet = (TermsFacet) firstRun.getFacets().get(facet.getName());
+ assertThat(facet.getEntries().size(), equalTo(firstRunFacet.getEntries().size()));
+
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ List<? extends Entry> right = facet.getEntries();
+ List<? extends Entry> left = firstRunFacet.getEntries();
+
+ for (int i = 0; i < facet.getEntries().size(); i++) {
+ assertThat(left.get(i).getTerm(), equalTo(right.get(i).getTerm()));
+ assertThat(left.get(i).getCount(), equalTo(right.get(i).getCount()));
+ }
+ }
+ }
+ }, new ConcurrentDuel.DuelExecutor<Facets>() {
+ AtomicInteger count = new AtomicInteger();
+
+ @Override
+ public Facets run() {
+ final SearchRequestBuilder facetRequest;
+ switch (count.incrementAndGet() % 6) {
+ case 4:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field("name").script("\"\" + (Integer.parseInt(term) % 100)").size(10));
+ break;
+ case 3:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field("name").regex("\\d+").size(10));
+ break;
+ case 2:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field("name").regex("\\d+").script("term").size(10));
+ break;
+ case 1:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field("name").regex("\\d+").script("term").size(10));
+ break;
+ case 0:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field("name").size(10));
+ break;
+ default:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field("name").size(10));
+ break;
+ }
+ SearchResponse actionGet = facetRequest.execute().actionGet();
+ return actionGet.getFacets();
+ }
+ }, 5000
+ );
+ }
+
+ duel.close();
+ }
+
+ @Test
+ @Slow
+ public void testDuelByteFieldDataImpl() throws ElasticsearchException, IOException, InterruptedException, ExecutionException {
+ prepareCreate("test")
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("name_paged")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "paged_bytes").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_fst")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "fst").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata").field("format", "doc_values").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_paged_mv")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "paged_bytes").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_fst_mv")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "fst").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_dv_mv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata").field("format", "doc_values").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("filtered")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "fst").field("loading", randomBoolean() ? "eager" : "lazy").startObject("filter")
+ .startObject("regex").field("pattern", "\\d{1,2}").endObject().endObject()
+ .endObject()
+ // only 1 or 2 digits
+ .endObject()
+ .startObject("filtered_mv")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "fst").field("loading", randomBoolean() ? "eager" : "lazy").startObject("filter")
+ .startObject("regex").field("pattern", "\\d{1,2}").endObject().endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("name_paged", "" + i)
+ .field("name_fst", "" + i)
+ .field("filtered", "" + i)
+ .field("name_paged_mv", "" + i, "" + Math.min(99, i + 1))
+ .field("name_fst_mv", "" + i, "" + Math.min(99, i + 1))
+ .field("filtered_mv", "" + i, "" + Math.min(99, i + 1), "" + (100 + i))
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", "" + (i + 100)).setSource(jsonBuilder().startObject()
+ .field("foo", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ flushAndRefresh();
+ ConcurrentDuel<Facets> duel = new ConcurrentDuel<Facets>(5);
+ String[] fieldPostFix = new String[]{"", "_mv"};
+ for (final String postfix : fieldPostFix) {
+ duel.duel(new ConcurrentDuel.DuelJudge<Facets>() {
+
+ @Override
+ public void judge(Facets firstRun, Facets result) {
+ for (Facet f : result) {
+ TermsFacet facet = (TermsFacet) f;
+ assertThat(facet.getName(), equalTo("termFacet"));
+ TermsFacet firstRunFacet = (TermsFacet) firstRun.getFacets().get(facet.getName());
+ assertThat(facet.getEntries().size(), equalTo(firstRunFacet.getEntries().size()));
+
+ if ("_mv".equals(postfix)) {
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(199l));
+ assertThat(facet.getOtherCount(), equalTo(179l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+ } else {
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+ }
+ List<? extends Entry> right = facet.getEntries();
+ List<? extends Entry> left = firstRunFacet.getEntries();
+
+ for (int i = 0; i < facet.getEntries().size(); i++) {
+ assertThat(left.get(i).getTerm(), equalTo(right.get(i).getTerm()));
+ assertThat(left.get(i).getCount(), equalTo(right.get(i).getCount()));
+ }
+ }
+ }
+ }, new ConcurrentDuel.DuelExecutor<Facets>() {
+ AtomicInteger count = new AtomicInteger();
+
+ @Override
+ public Facets run() {
+ final SearchRequestBuilder facetRequest;
+ int incrementAndGet = count.incrementAndGet();
+ final String field;
+ switch (incrementAndGet % 2) {
+ case 1:
+ field = "filtered" + postfix;
+ break;
+ case 0:
+ field = "name_paged" + postfix;
+ break;
+ default:
+ field = "name_fst" + postfix;
+ }
+ switch (incrementAndGet % 5) {
+ case 4:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field(field).script("\"\" + (Integer.parseInt(term) % 100)").size(10));
+ break;
+ case 3:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field(field).regex("\\d+").size(10));
+ break;
+ case 2:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field(field).regex("\\d+").script("term").size(10));
+ break;
+ case 1:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field(field).regex("\\d+").script("term").size(10));
+ break;
+ case 0:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field(field).size(10));
+ break;
+ default:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field(field).size(10));
+ break;
+ }
+ SearchResponse actionGet = facetRequest.execute().actionGet();
+ return actionGet.getFacets();
+ }
+ }, 5000
+ );
+ }
+
+ duel.close();
+ }
+
+ @Test
+ public void testSearchFilter() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "green")
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "blue")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(termFilter("tag", "blue"))
+ .addFacet(termsFacet("facet1").field("tag").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ }
+ }
+
+ @Test
+ public void testFacetsWithSize0() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("lstag", 111)
+ .startArray("tag").value("xxx").value("yyy").endArray()
+ .startArray("ltag").value(1000l).value(2000l).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("lstag", 111)
+ .startArray("tag").value("zzz").value("yyy").endArray()
+ .startArray("ltag").value(3000l).value(2000l).endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(0)
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("stag").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setSize(0)
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("stag").size(10))
+ .addFacet(termsFacet("facet2").field("tag").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testTermsIndexFacet() throws Exception {
+ createIndex("test1");
+ createIndex("test2");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test1", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test1", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test2", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(0)
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("_index").size(10))
+ .execute().actionGet();
+
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("test1"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("test2"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ }
+
+ try {
+ client().admin().indices().prepareDelete("test1").execute().actionGet();
+ client().admin().indices().prepareDelete("test2").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ @Test
+ public void testFilterFacets() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .startArray("tag").value("xxx").value("yyy").endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .startArray("tag").value("zzz").value("yyy").endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(filterFacet("facet1").filter(termFilter("stag", "111")))
+ .addFacet(filterFacet("facet2").filter(termFilter("tag", "xxx")))
+ .addFacet(filterFacet("facet3").filter(termFilter("tag", "yyy")))
+ .addFacet(filterFacet("facet4").filter(termFilter("tag", "zzz")))
+ .execute().actionGet();
+
+ FilterFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getCount(), equalTo(2l));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getCount(), equalTo(1l));
+
+ facet = searchResponse.getFacets().facet("facet3");
+ assertThat(facet.getName(), equalTo("facet3"));
+ assertThat(facet.getCount(), equalTo(2l));
+
+ facet = searchResponse.getFacets().facet("facet4");
+ assertThat(facet.getName(), equalTo("facet4"));
+ assertThat(facet.getCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void testTermsFacetsMissing() throws Exception {
+ prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("bstag").field("type", "byte").endObject()
+ .startObject("shstag").field("type", "short").endObject()
+ .startObject("istag").field("type", "integer").endObject()
+ .startObject("lstag").field("type", "long").endObject()
+ .startObject("fstag").field("type", "float").endObject()
+ .startObject("dstag").field("type", "double").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("bstag", 111)
+ .field("shstag", 111)
+ .field("istag", 111)
+ .field("lstag", 111)
+ .field("fstag", 111.1f)
+ .field("dstag", 111.1)
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("kuku", "kuku")
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("stag").size(10))
+ .execute().actionGet();
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getMissingCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void testTermsFacetsNoHint() throws Exception {
+ testTermsFacets(null);
+ }
+
+ @Test
+ public void testTermsFacetsMapHint() throws Exception {
+ testTermsFacets("map");
+ }
+
+ private void testTermsFacets(String executionHint) throws Exception {
+ prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("bstag").field("type", "byte").endObject()
+ .startObject("shstag").field("type", "short").endObject()
+ .startObject("istag").field("type", "integer").endObject()
+ .startObject("lstag").field("type", "long").endObject()
+ .startObject("fstag").field("type", "float").endObject()
+ .startObject("dstag").field("type", "double").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("bstag", 111)
+ .field("shstag", 111)
+ .field("istag", 111)
+ .field("lstag", 111)
+ .field("fstag", 111.1f)
+ .field("dstag", 111.1)
+ .startArray("tag").value("xxx").value("yyy").endArray()
+ .startArray("ltag").value(1000l).value(2000l).endArray()
+ .startArray("dtag").value(1000.1).value(2000.1).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("bstag", 111)
+ .field("shstag", 111)
+ .field("istag", 111)
+ .field("lstag", 111)
+ .field("fstag", 111.1f)
+ .field("dstag", 111.1)
+ .startArray("tag").value("zzz").value("yyy").endArray()
+ .startArray("ltag").value(3000l).value(2000l).endArray()
+ .startArray("dtag").value(3000.1).value(2000.1).endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("stag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet2").field("tag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getTotalCount(), equalTo(2l));
+ assertThat(facet.getOtherCount(), equalTo(0l));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ // Numeric
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("lstag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet2").field("ltag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet3").field("ltag").size(10).exclude(3000).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet, instanceOf(InternalLongTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet, instanceOf(InternalLongTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("2000"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("1000"), equalTo("3000")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("1000"), equalTo("3000")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ facet = searchResponse.getFacets().facet("facet3");
+ assertThat(facet, instanceOf(InternalLongTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet3"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("2000"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("1000"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("dstag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet2").field("dtag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet, instanceOf(InternalDoubleTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111.1"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet, instanceOf(InternalDoubleTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("2000.1"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("1000.1"), equalTo("3000.1")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("1000.1"), equalTo("3000.1")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("bstag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("istag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("shstag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ // Test Facet Filter
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("stag").size(10).facetFilter(termFilter("tag", "xxx")).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+
+ // now with global
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("stag").size(10).facetFilter(termFilter("tag", "xxx")).global(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+
+ // Test Facet Filter (with a type)
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("type1.stag").size(10).facetFilter(termFilter("tag", "xxx")).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ // Bounded Size
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(2).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ // Test Exclude
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).exclude("yyy").executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ // Test Order
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).order(TermsFacet.ComparatorType.TERM).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(2).getTerm().string(), equalTo("zzz"));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).order(TermsFacet.ComparatorType.REVERSE_TERM).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(2).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("zzz"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+
+ // Script
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).script("term + param1").param("param1", "a").order(TermsFacet.ComparatorType.TERM).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxxa"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyya"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(2).getTerm().string(), equalTo("zzza"));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).script("term == 'xxx' ? false : true").order(TermsFacet.ComparatorType.TERM).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("zzz"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ // Fields Facets
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").fields("stag", "tag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(4));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("111"), equalTo("yyy")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("111"), equalTo("yyy")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("zzz"), equalTo("xxx")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(3).getTerm().string(), anyOf(equalTo("zzz"), equalTo("xxx")));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("xxx", "yyy")) // don't match anything
+ .addFacet(termsFacet("facet1").field("tag").size(10).allTerms(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(0));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("xxx", "yyy")) // don't match anything
+ .addFacet(termsFacet("facet1").fields("tag", "stag").size(10).allTerms(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(4));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz"), equalTo("111")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz"), equalTo("111")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz"), equalTo("111")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(3).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz"), equalTo("111")));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(0));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("xxx", "yyy")) // don't match anything
+ .addFacet(termsFacet("facet1").field("ltag").size(10).allTerms(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTermAsNumber().intValue(), anyOf(equalTo(1000), equalTo(2000), equalTo(3000)));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(1).getTermAsNumber().intValue(), anyOf(equalTo(1000), equalTo(2000), equalTo(3000)));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(2).getTermAsNumber().intValue(), anyOf(equalTo(1000), equalTo(2000), equalTo(3000)));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(0));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("xxx", "yyy")) // don't match anything
+ .addFacet(termsFacet("facet1").field("dtag").size(10).allTerms(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTermAsNumber().doubleValue(), anyOf(equalTo(1000.1), equalTo(2000.1), equalTo(3000.1)));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(1).getTermAsNumber().doubleValue(), anyOf(equalTo(1000.1), equalTo(2000.1), equalTo(3000.1)));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(2).getTermAsNumber().doubleValue(), anyOf(equalTo(1000.1), equalTo(2000.1), equalTo(3000.1)));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(0));
+
+ // Script Field
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").scriptField("_source.stag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet2").scriptField("_source.tag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getTotalCount(), equalTo(2l));
+ assertThat(facet.getOtherCount(), equalTo(0l));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet.getTotalCount(), equalTo(4l));
+ assertThat(facet.getOtherCount(), equalTo(0l));
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testTermFacetWithEqualTermDistribution() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // at the end of the index, we should have 10 of each `bar`, `foo`, and `baz`
+ for (int i = 0; i < 5; i++) {
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("text", "foo bar")
+ .endObject()).execute().actionGet();
+ }
+ for (int i = 0; i < 5; i++) {
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("text", "bar baz")
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 0; i < 5; i++) {
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("text", "baz foo")
+ .endObject()).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("text").size(10))
+ .execute().actionGet();
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ for (int j = 0; j < 3; j++) {
+ assertThat(facet.getEntries().get(j).getTerm().string(), anyOf(equalTo("foo"), equalTo("bar"), equalTo("baz")));
+ assertThat(facet.getEntries().get(j).getCount(), equalTo(10));
+ }
+ }
+ }
+
+ @Test
+ public void testStatsFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ // We have to specify mapping explicitly because by the time search is performed dynamic mapping might not
+ // be propagated to all nodes yet and some facets fail when the facet field is not defined
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("multi_num").field("type", "float").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1)
+ .startArray("multi_num").value(1.0).value(2.0f).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 2)
+ .startArray("multi_num").value(3.0).value(4.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(statisticalFacet("stats1").field("num"))
+ .addFacet(statisticalFacet("stats2").field("multi_num"))
+ .addFacet(statisticalScriptFacet("stats3").script("doc['num'].value * 2"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ StatisticalFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(2l));
+ assertThat(facet.getTotal(), equalTo(3d));
+ assertThat(facet.getMin(), equalTo(1d));
+ assertThat(facet.getMax(), equalTo(2d));
+ assertThat(facet.getMean(), equalTo(1.5d));
+ assertThat(facet.getSumOfSquares(), equalTo(5d));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(4l));
+ assertThat(facet.getTotal(), equalTo(10d));
+ assertThat(facet.getMin(), equalTo(1d));
+ assertThat(facet.getMax(), equalTo(4d));
+ assertThat(facet.getMean(), equalTo(2.5d));
+
+ facet = searchResponse.getFacets().facet("stats3");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(2l));
+ assertThat(facet.getTotal(), equalTo(6d));
+ assertThat(facet.getMin(), equalTo(2d));
+ assertThat(facet.getMax(), equalTo(4d));
+ assertThat(facet.getMean(), equalTo(3d));
+ assertThat(facet.getSumOfSquares(), equalTo(20d));
+
+ // test multi field facet
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(statisticalFacet("stats").fields("num", "multi_num"))
+ .execute().actionGet();
+
+
+ facet = searchResponse.getFacets().facet("stats");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(6l));
+ assertThat(facet.getTotal(), equalTo(13d));
+ assertThat(facet.getMin(), equalTo(1d));
+ assertThat(facet.getMax(), equalTo(4d));
+ assertThat(facet.getMean(), equalTo(13d / 6d));
+ assertThat(facet.getSumOfSquares(), equalTo(35d));
+
+ // test cross field facet using the same facet name...
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(statisticalFacet("stats").field("num"))
+ .addFacet(statisticalFacet("stats").field("multi_num"))
+ .execute().actionGet();
+
+
+ facet = searchResponse.getFacets().facet("stats");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(6l));
+ assertThat(facet.getTotal(), equalTo(13d));
+ assertThat(facet.getMin(), equalTo(1d));
+ assertThat(facet.getMax(), equalTo(4d));
+ assertThat(facet.getMean(), equalTo(13d / 6d));
+ assertThat(facet.getSumOfSquares(), equalTo(35d));
+ }
+ }
+
+ @Test
+ public void testHistoFacetEdge() throws Exception {
+ // TODO: Make sure facet doesn't fail in case of dynamic mapping
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 100)
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 200)
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 300)
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("facet1").field("num").valueField("num").interval(100))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ HistogramFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(100l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(200l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getKey(), equalTo(300l));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void testHistoFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("multi_num").field("type", "float").endObject()
+ .startObject("date").field("type", "date").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1055)
+ .field("date", "1970-01-01T00:00:00")
+ .startArray("multi_num").value(13.0f).value(23.f).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1065)
+ .field("date", "1970-01-01T00:00:25")
+ .startArray("multi_num").value(15.0f).value(31.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1175)
+ .field("date", "1970-01-01T00:02:00")
+ .startArray("multi_num").value(17.0f).value(25.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("stats1").field("num").valueField("num").interval(100))
+ .addFacet(histogramFacet("stats2").field("multi_num").valueField("multi_num").interval(10))
+ .addFacet(histogramFacet("stats3").keyField("num").valueField("multi_num").interval(100))
+ .addFacet(histogramScriptFacet("stats4").keyScript("doc['date'].date.minuteOfHour").valueScript("doc['num'].value"))
+ .addFacet(histogramFacet("stats5").field("date").interval(1, TimeUnit.MINUTES))
+ .addFacet(histogramScriptFacet("stats6").keyField("num").valueScript("doc['num'].value").interval(100))
+ .addFacet(histogramFacet("stats7").field("num").interval(100))
+ .addFacet(histogramScriptFacet("stats8").keyField("num").valueScript("doc.score").interval(100))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ HistogramFacet facet;
+
+ facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getName(), equalTo("stats1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(1055d, 0.000001));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(1065d, 0.000001));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2120d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(1060d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(1175d, 0.000001));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(1175d, 0.000001));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1175d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(1175d));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getName(), equalTo("stats2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(10l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(45d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(15d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(20l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(48d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(24d));
+ assertThat(facet.getEntries().get(2).getKey(), equalTo(30l));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotal(), equalTo(31d));
+ assertThat(facet.getEntries().get(2).getMean(), equalTo(31d));
+
+ facet = searchResponse.getFacets().facet("stats3");
+ assertThat(facet.getName(), equalTo("stats3"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(82d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(20.5d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(42d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(21d));
+
+ facet = searchResponse.getFacets().facet("stats4");
+ assertThat(facet.getName(), equalTo("stats4"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(0l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2120d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(1060d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1175d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(1175d));
+
+ facet = searchResponse.getFacets().facet("stats5");
+ assertThat(facet.getName(), equalTo("stats5"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(0l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(TimeValue.timeValueMinutes(2).millis()));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+
+ facet = searchResponse.getFacets().facet("stats6");
+ assertThat(facet.getName(), equalTo("stats6"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2120d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(1060d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1175d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(1175d));
+
+ facet = searchResponse.getFacets().facet("stats7");
+ assertThat(facet.getName(), equalTo("stats7"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+
+ facet = searchResponse.getFacets().facet("stats8");
+ assertThat(facet.getName(), equalTo("stats8"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(1d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(1d));
+
+ }
+ }
+
+ @Test
+ public void testRangeFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("multi_num").field("type", "float").endObject()
+ .startObject("value").field("type", "integer").endObject()
+ .startObject("multi_value").field("type", "float").endObject()
+ .startObject("date").field("type", "date").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1055)
+ .field("value", 1)
+ .field("date", "1970-01-01T00:00:00")
+ .startArray("multi_num").value(13.0f).value(23.f).endArray()
+ .startArray("multi_value").value(10).value(11).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1065)
+ .field("value", 2)
+ .field("date", "1970-01-01T00:00:25")
+ .startArray("multi_num").value(15.0f).value(31.0f).endArray()
+ .startArray("multi_value").value(20).value(21).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1175)
+ .field("value", 3)
+ .field("date", "1970-01-01T00:00:52")
+ .startArray("multi_num").value(17.0f).value(25.0f).endArray()
+ .startArray("multi_value").value(30).value(31).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(rangeFacet("range1").field("num").addUnboundedFrom(1056).addRange(1000, 1170).addUnboundedTo(1170))
+ .addFacet(rangeFacet("range2").keyField("num").valueField("value").addUnboundedFrom(1056).addRange(1000, 1170).addUnboundedTo(1170))
+ .addFacet(rangeFacet("range3").keyField("num").valueField("multi_value").addUnboundedFrom(1056).addRange(1000, 1170).addUnboundedTo(1170))
+ .addFacet(rangeFacet("range4").keyField("multi_num").valueField("value").addUnboundedFrom(16).addRange(10, 26).addUnboundedTo(20))
+ .addFacet(rangeScriptFacet("range5").keyScript("doc['num'].value").valueScript("doc['value'].value").addUnboundedFrom(1056).addRange(1000, 1170).addUnboundedTo(1170))
+ .addFacet(rangeFacet("range6").field("date").addUnboundedFrom("1970-01-01T00:00:26").addRange("1970-01-01T00:00:15", "1970-01-01T00:00:53").addUnboundedTo("1970-01-01T00:00:26"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ RangeFacet facet = searchResponse.getFacets().facet("range1");
+ assertThat(facet.getName(), equalTo("range1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(1056, 0.000001));
+ assertThat(Double.parseDouble(facet.getEntries().get(0).getToAsString()), closeTo(1056, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(1055, 0.000001));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(1055, 0.000001));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(1055, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(1000, 0.000001));
+ assertThat(Double.parseDouble(facet.getEntries().get(1).getFromAsString()), closeTo(1000, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1170, 0.000001));
+ assertThat(Double.parseDouble(facet.getEntries().get(1).getToAsString()), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(1055 + 1065, 0.000001));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(1055, 0.000001));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(1065, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(1175, 0.000001));
+ assertThat(facet.getEntries().get(2).getMin(), closeTo(1175, 0.000001));
+ assertThat(facet.getEntries().get(2).getMax(), closeTo(1175, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range2");
+ assertThat(facet.getName(), equalTo("range2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(1056, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(1000, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(3, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(3, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range3");
+ assertThat(facet.getName(), equalTo("range3"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(1056, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(10 + 11, 0.000001));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(10, 0.000001));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(11, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(1000, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(62, 0.000001));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(10, 0.000001));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(21, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(61, 0.000001));
+ assertThat(facet.getEntries().get(2).getMin(), closeTo(30, 0.000001));
+ assertThat(facet.getEntries().get(2).getMax(), closeTo(31, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range4");
+ assertThat(facet.getName(), equalTo("range4"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(16, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(3, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(10, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(26, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(1 + 2 + 3, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(20, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(1 + 2 + 3, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range5");
+ assertThat(facet.getName(), equalTo("range5"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(1056, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(1000, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(3, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(3, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range6");
+ assertThat(facet.getName(), equalTo("range6"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getToAsString(), equalTo("1970-01-01T00:00:26"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getFromAsString(), equalTo("1970-01-01T00:00:15"));
+ assertThat(facet.getEntries().get(1).getToAsString(), equalTo("1970-01-01T00:00:53"));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getFromAsString(), equalTo("1970-01-01T00:00:26"));
+ }
+ }
+
+ @Test
+ public void testDateHistoFacetsCollectorMode() throws Exception {
+ testDateHistoFacets(FacetBuilder.Mode.COLLECTOR);
+ }
+
+ @Test
+ public void testDateHistoFacetsPostMode() throws Exception {
+ testDateHistoFacets(FacetBuilder.Mode.POST);
+ }
+
+ private void testDateHistoFacets(FacetBuilder.Mode mode) throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("date").field("type", "date").endObject()
+ .startObject("date_in_seconds").field("type", "long").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ DateTimeFormatter parser = Joda.forPattern("dateOptionalTime").parser();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T01:01:01")
+ .field("num", 1)
+ .field("date_in_seconds", TimeUnit.SECONDS.convert(parser.parseMillis("2009-03-05T01:01:01"), TimeUnit.MILLISECONDS))
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T04:01:01")
+ .field("num", 2)
+ .field("date_in_seconds", TimeUnit.SECONDS.convert(parser.parseMillis("2009-03-05T04:01:01"), TimeUnit.MILLISECONDS))
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-06T01:01:01")
+ .field("num", 3)
+ .field("date_in_seconds", TimeUnit.SECONDS.convert(parser.parseMillis("2009-03-06T01:01:01"), TimeUnit.MILLISECONDS))
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(dateHistogramFacet("stats1").field("date").interval("day").mode(mode))
+ .addFacet(dateHistogramFacet("stats2").field("date").interval("day").preZone("-02:00").mode(mode))
+ .addFacet(dateHistogramFacet("stats3").field("date").valueField("num").interval("day").preZone("-02:00").mode(mode))
+ .addFacet(dateHistogramFacet("stats4").field("date").valueScript("doc['num'].value * 2").interval("day").preZone("-02:00").mode(mode))
+ .addFacet(dateHistogramFacet("stats5").field("date").interval("24h").mode(mode))
+ .addFacet(dateHistogramFacet("stats6").field("date").valueField("num").interval("day").preZone("-02:00").postZone("-02:00").mode(mode))
+ .addFacet(dateHistogramFacet("stats7").field("date").interval("quarter").mode(mode))
+ .addFacet(dateHistogramFacet("stats8").field("date_in_seconds").interval("day").factor(1000f).mode(mode))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ DateHistogramFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getName(), equalTo("stats1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+
+ // time zone causes the dates to shift by 2
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getName(), equalTo("stats2"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-04")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+
+ // time zone causes the dates to shift by 2
+ facet = searchResponse.getFacets().facet("stats3");
+ assertThat(facet.getName(), equalTo("stats3"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-04")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(1d));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(5d));
+
+ // time zone causes the dates to shift by 2
+ facet = searchResponse.getFacets().facet("stats4");
+ assertThat(facet.getName(), equalTo("stats4"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-04")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2d));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(10d));
+
+ facet = searchResponse.getFacets().facet("stats5");
+ assertThat(facet.getName(), equalTo("stats5"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+
+ facet = searchResponse.getFacets().facet("stats6");
+ assertThat(facet.getName(), equalTo("stats6"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-04") - TimeValue.timeValueHours(2).millis()));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(1d));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-05") - TimeValue.timeValueHours(2).millis()));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(5d));
+
+ facet = searchResponse.getFacets().facet("stats7");
+ assertThat(facet.getName(), equalTo("stats7"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-01-01")));
+
+ // check date_histogram on a long field containing date in seconds - we use a factor.
+ facet = searchResponse.getFacets().facet("stats8");
+ assertThat(facet.getName(), equalTo("stats8"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/2141
+ public void testDateHistoFacets_preZoneBug() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("date").field("type", "date").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T23:31:01")
+ .field("num", 1)
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T18:01:01")
+ .field("num", 2)
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T22:01:01")
+ .field("num", 3)
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(dateHistogramFacet("stats1").field("date").interval("day").preZone("+02:00"))
+ .addFacet(dateHistogramFacet("stats2").field("date").valueField("num").interval("day").preZone("+01:30"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ // time zone causes the dates to shift by 2:00
+ DateHistogramFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getName(), equalTo("stats1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+
+ // time zone causes the dates to shift by 1:30
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getName(), equalTo("stats2"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(5d));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1d));
+ }
+ }
+
+ @Test
+ public void testTermsStatsFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field").field("type", "string").endObject()
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("multi_num").field("type", "float").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("field", "xxx")
+ .field("num", 100.0)
+ .startArray("multi_num").value(1.0).value(2.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("field", "xxx")
+ .field("num", 200.0)
+ .startArray("multi_num").value(2.0).value(3.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("field", "yyy")
+ .field("num", 500.0)
+ .startArray("multi_num").value(5.0).value(6.0f).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("stats1").keyField("field").valueField("num"))
+ .addFacet(termsStatsFacet("stats2").keyField("field").valueField("multi_num"))
+ .addFacet(termsStatsFacet("stats3").keyField("field").valueField("num").order(TermsStatsFacet.ComparatorType.COUNT))
+ .addFacet(termsStatsFacet("stats4").keyField("field").valueField("multi_num").order(TermsStatsFacet.ComparatorType.COUNT))
+ .addFacet(termsStatsFacet("stats5").keyField("field").valueField("num").order(TermsStatsFacet.ComparatorType.TOTAL))
+ .addFacet(termsStatsFacet("stats6").keyField("field").valueField("multi_num").order(TermsStatsFacet.ComparatorType.TOTAL))
+
+ .addFacet(termsStatsFacet("stats7").keyField("field").valueField("num").allTerms())
+ .addFacet(termsStatsFacet("stats8").keyField("field").valueField("multi_num").allTerms())
+ .addFacet(termsStatsFacet("stats9").keyField("field").valueField("num").order(TermsStatsFacet.ComparatorType.COUNT).allTerms())
+ .addFacet(termsStatsFacet("stats10").keyField("field").valueField("multi_num").order(TermsStatsFacet.ComparatorType.COUNT).allTerms())
+ .addFacet(termsStatsFacet("stats11").keyField("field").valueField("num").order(TermsStatsFacet.ComparatorType.TOTAL).allTerms())
+ .addFacet(termsStatsFacet("stats12").keyField("field").valueField("multi_num").order(TermsStatsFacet.ComparatorType.TOTAL).allTerms())
+
+ .addFacet(termsStatsFacet("stats13").keyField("field").valueScript("doc['num'].value * 2"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ TermsStatsFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(100d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(200d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(1d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(3d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(8d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(5d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(6d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(11d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats3");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats4");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(8d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(11d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats5");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(300d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats6");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(11d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(8d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats7");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats8");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(8d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(11d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats9");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats10");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(8d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(11d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats11");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(300d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats12");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(11d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(8d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats13");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(600d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(1000d, 0.00001d));
+ }
+ }
+
+ @Test
+ public void testNumericTermsStatsFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("lField").field("type", "long").endObject()
+ .startObject("dField").field("type", "double").endObject()
+ .startObject("num").field("type", "float").endObject()
+ .startObject("multi_num").field("type", "integer").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("lField", 100l)
+ .field("dField", 100.1d)
+ .field("num", 100.0)
+ .startArray("multi_num").value(1.0).value(2.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("lField", 100l)
+ .field("dField", 100.1d)
+ .field("num", 200.0)
+ .startArray("multi_num").value(2.0).value(3.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("lField", 200l)
+ .field("dField", 200.2d)
+ .field("num", 500.0)
+ .startArray("multi_num").value(5.0).value(6.0f).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("stats1").keyField("lField").valueField("num"))
+ .addFacet(termsStatsFacet("stats2").keyField("dField").valueField("num"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ TermsStatsFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("100"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(100d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(200d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("200"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("100.1"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(100d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(200d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("200.2"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+ }
+ }
+
+ @Test
+ public void testTermsStatsFacets2() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "float").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 20; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("num", i % 10).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("stats1").keyField("num").valueScript("doc.score").order(TermsStatsFacet.ComparatorType.COUNT))
+ .addFacet(termsStatsFacet("stats2").keyField("num").valueScript("doc.score").order(TermsStatsFacet.ComparatorType.TOTAL))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ TermsStatsFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getEntries().size(), equalTo(10));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getEntries().size(), equalTo(10));
+ }
+ }
+
+ @Test
+ public void testQueryFacet() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 20; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("num", i % 10).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(queryFacet("query").query(termQuery("num", 1)))
+ .execute().actionGet();
+
+ QueryFacet facet = searchResponse.getFacets().facet("query");
+ assertThat(facet.getCount(), equalTo(2l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(queryFacet("query").query(termQuery("num", 1)).global(true))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("query");
+ assertThat(facet.getCount(), equalTo(2l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(queryFacet("query").query(termsQuery("num", new long[]{1, 2})).facetFilter(termFilter("num", 1)).global(true))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("query");
+ assertThat(facet.getCount(), equalTo(2l));
+ }
+ }
+
+ @Test // #3479: Null pointer exception for POST mode facets if facet_filter accepts no documents
+ public void testFilterFacetWithFacetFilterPostMode() throws IOException {
+ createIndex("test");
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("field", "xxx")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(
+ termsFacet("facet1").field("field").mode(FacetBuilder.Mode.POST).facetFilter(termFilter("tag", "doesnotexist"))
+ )
+ .execute().actionGet();
+
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(0));
+ }
+ }
+
+ private long utcTimeInMillis(String time) {
+ return timeInMillis(time, DateTimeZone.UTC);
+ }
+
+ private long timeInMillis(String time, DateTimeZone zone) {
+ return ISODateTimeFormat.dateOptionalTimeParser().withZone(zone).parseMillis(time);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/terms/ShardSizeTermsFacetTests.java b/src/test/java/org/elasticsearch/search/facet/terms/ShardSizeTermsFacetTests.java
new file mode 100644
index 0000000..b06e640
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/terms/ShardSizeTermsFacetTests.java
@@ -0,0 +1,423 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.facet.Facets;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE)
+public class ShardSizeTermsFacetTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * to properly test the effect/functionality of shard_size, we need to force having 2 shards and also
+ * control the routing such that certain documents will end on each shard. Using "djb" routing hash + ignoring the
+ * doc type when hashing will ensure that docs with routing value "1" will end up in a different shard than docs with
+ * routing value "2".
+ */
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ .put("cluster.routing.operation.hash.type", "djb")
+ .put("cluster.routing.operation.use_type", "false")
+ .build();
+ }
+
+ @Test
+ public void noShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 8)
+ .put("3", 8)
+ .put("2", 4)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 8)
+ .put("3", 8)
+ .put("2", 5) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 5)
+ .put("2", 4)
+ .put("3", 3) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_withExecutionHintMap() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).executionHint("map").order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 8)
+ .put("3", 8)
+ .put("2", 5) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_withExecutionHintMap_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).executionHint("map").order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 5)
+ .put("2", 4)
+ .put("3", 3) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void noShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 8)
+ .put(3, 8)
+ .put(2, 4)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 8)
+ .put(3, 8)
+ .put(2, 5) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 5)
+ .put(2, 4)
+ .put(3, 3)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 8)
+ .put(3, 8)
+ .put(2, 4)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 8)
+ .put(3, 8)
+ .put(2, 5) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 5)
+ .put(2, 4)
+ .put(3, 3)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ private void indexData() throws Exception {
+
+ /*
+
+
+ || || size = 3, shard_size = 5 || shard_size = size = 3 ||
+ ||==========||==================================================||===============================================||
+ || shard 1: || "1" - 5 | "2" - 4 | "3" - 3 | "4" - 2 | "5" - 1 || "1" - 5 | "3" - 3 | "2" - 4 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || shard 2: || "1" - 3 | "2" - 1 | "3" - 5 | "4" - 2 | "5" - 1 || "1" - 3 | "3" - 5 | "4" - 2 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || reduced: || "1" - 8 | "2" - 5 | "3" - 8 | "4" - 4 | "5" - 2 || ||
+ || || || "1" - 8, "3" - 8, "2" - 4 <= WRONG ||
+ || || "1" - 8 | "3" - 8 | "2" - 5 <= CORRECT || ||
+
+
+ */
+
+
+ indexDoc("1", "1", 5);
+ indexDoc("1", "2", 4);
+ indexDoc("1", "3", 3);
+ indexDoc("1", "4", 2);
+ indexDoc("1", "5", 1);
+
+ // total docs in shard "1" = 15
+
+ indexDoc("2", "1", 3);
+ indexDoc("2", "2", 1);
+ indexDoc("2", "3", 5);
+ indexDoc("2", "4", 2);
+ indexDoc("2", "5", 1);
+
+ // total docs in shard "2" = 12
+
+ client().admin().indices().prepareFlush("idx").execute().actionGet();
+ client().admin().indices().prepareRefresh("idx").execute().actionGet();
+
+ long totalOnOne = client().prepareSearch("idx").setTypes("type").setRouting("1").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnOne, is(15l));
+ long totalOnTwo = client().prepareSearch("idx").setTypes("type").setRouting("2").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnTwo, is(12l));
+ }
+
+ private void indexDoc(String shard, String key, int times) throws Exception {
+ for (int i = 0; i < times; i++) {
+ client().prepareIndex("idx", "type").setRouting(shard).setCreate(true).setSource(jsonBuilder()
+ .startObject()
+ .field("key", key)
+ .endObject()).execute().actionGet();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/terms/UnmappedFieldsTermsFacetsTests.java b/src/test/java/org/elasticsearch/search/facet/terms/UnmappedFieldsTermsFacetsTests.java
new file mode 100644
index 0000000..911be14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/terms/UnmappedFieldsTermsFacetsTests.java
@@ -0,0 +1,387 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class UnmappedFieldsTermsFacetsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", numberOfShards())
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ protected int numberOfShards() {
+ return 5;
+ }
+
+ /**
+ * Tests the terms facet when faceting on unmapped field
+ */
+ @Test
+ public void testUnmappedField() throws Exception {
+ createIndex("idx");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("mapped", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ flushAndRefresh();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("mapped").field("mapped").size(10))
+ .addFacet(termsFacet("unmapped_bool").field("unmapped_bool").size(10))
+ .addFacet(termsFacet("unmapped_str").field("unmapped_str").size(10))
+ .addFacet(termsFacet("unmapped_byte").field("unmapped_byte").size(10))
+ .addFacet(termsFacet("unmapped_short").field("unmapped_short").size(10))
+ .addFacet(termsFacet("unmapped_int").field("unmapped_int").size(10))
+ .addFacet(termsFacet("unmapped_long").field("unmapped_long").size(10))
+ .addFacet(termsFacet("unmapped_float").field("unmapped_float").size(10))
+ .addFacet(termsFacet("unmapped_double").field("unmapped_double").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ // all values should be returned for the mapped field
+ TermsFacet facet = searchResponse.getFacets().facet("mapped");
+ assertThat(facet.getName(), equalTo("mapped"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ // no values should be returned for the unmapped field (all docs are missing)
+
+ facet = searchResponse.getFacets().facet("unmapped_str");
+ assertThat(facet.getName(), equalTo("unmapped_str"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_bool");
+ assertThat(facet.getName(), equalTo("unmapped_bool"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_byte");
+ assertThat(facet.getName(), equalTo("unmapped_byte"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_short");
+ assertThat(facet.getName(), equalTo("unmapped_short"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_int");
+ assertThat(facet.getName(), equalTo("unmapped_int"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_long");
+ assertThat(facet.getName(), equalTo("unmapped_long"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_float");
+ assertThat(facet.getName(), equalTo("unmapped_float"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_double");
+ assertThat(facet.getName(), equalTo("unmapped_double"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ }
+
+
+ /**
+ * Tests the terms facet when faceting on partially unmapped field. An example for this scenario is when searching
+ * across indices, where the field is mapped in some indices and unmapped in others.
+ */
+ @Test
+ public void testPartiallyUnmappedField() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("mapped_idx")
+ .setSettings(indexSettings())
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("partially_mapped_byte").field("type", "byte").endObject()
+ .startObject("partially_mapped_short").field("type", "short").endObject()
+ .startObject("partially_mapped_int").field("type", "integer").endObject()
+ .startObject("partially_mapped_long").field("type", "long").endObject()
+ .startObject("partially_mapped_float").field("type", "float").endObject()
+ .startObject("partially_mapped_double").field("type", "double").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ createIndex("unmapped_idx");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("mapped_idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("mapped", "" + i)
+ .field("partially_mapped_str", "" + i)
+ .field("partially_mapped_bool", i % 2 == 0)
+ .field("partially_mapped_byte", i)
+ .field("partially_mapped_short", i)
+ .field("partially_mapped_int", i)
+ .field("partially_mapped_long", i)
+ .field("partially_mapped_float", i)
+ .field("partially_mapped_double", i)
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 10; i < 20; i++) {
+ client().prepareIndex("unmapped_idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("mapped", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+
+ flushAndRefresh();
+
+ SearchResponse searchResponse = client().prepareSearch("mapped_idx", "unmapped_idx")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("mapped").field("mapped").size(10))
+ .addFacet(termsFacet("partially_mapped_str").field("partially_mapped_str").size(10))
+ .addFacet(termsFacet("partially_mapped_bool").field("partially_mapped_bool").size(10))
+ .addFacet(termsFacet("partially_mapped_byte").field("partially_mapped_byte").size(10))
+ .addFacet(termsFacet("partially_mapped_short").field("partially_mapped_short").size(10))
+ .addFacet(termsFacet("partially_mapped_int").field("partially_mapped_int").size(10))
+ .addFacet(termsFacet("partially_mapped_long").field("partially_mapped_long").size(10))
+ .addFacet(termsFacet("partially_mapped_float").field("partially_mapped_float").size(10))
+ .addFacet(termsFacet("partially_mapped_double").field("partially_mapped_double").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+
+ // all values should be returned for the mapped field
+ TermsFacet facet = searchResponse.getFacets().facet("mapped");
+ assertThat(facet.getName(), equalTo("mapped"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(20l));
+ assertThat(facet.getOtherCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ // only the values of the mapped index should be returned for the partially mapped field (all docs of
+ // the unmapped index should be missing)
+
+ facet = searchResponse.getFacets().facet("partially_mapped_str");
+ assertThat(facet.getName(), equalTo("partially_mapped_str"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_bool");
+ assertThat(facet.getName(), equalTo("partially_mapped_bool"));
+ ArrayList<String> terms = new ArrayList<String>();
+ for (TermsFacet.Entry entry : facet.getEntries()) {
+ terms.add(entry.getTerm().toString());
+ }
+ assertThat("unexpected number of bool terms:" + terms, facet.getEntries().size(), is(2));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_byte");
+ assertThat(facet.getName(), equalTo("partially_mapped_byte"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_short");
+ assertThat(facet.getName(), equalTo("partially_mapped_short"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_int");
+ assertThat(facet.getName(), equalTo("partially_mapped_int"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_long");
+ assertThat(facet.getName(), equalTo("partially_mapped_long"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_float");
+ assertThat(facet.getName(), equalTo("partially_mapped_float"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_float");
+ assertThat(facet.getName(), equalTo("partially_mapped_float"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+ }
+
+ @Test
+ public void testMappedYetMissingField() throws IOException {
+ client().admin().indices().prepareCreate("idx")
+ .setSettings(indexSettings())
+ .addMapping("type", jsonBuilder().startObject()
+ .field("type").startObject()
+ .field("properties").startObject()
+ .field("string").startObject().field("type", "string").endObject()
+ .field("long").startObject().field("type", "long").endObject()
+ .field("double").startObject().field("type", "double").endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("foo", "bar")
+ .endObject()).execute().actionGet();
+ }
+ flushAndRefresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("string").field("string").size(10))
+ .addFacet(termsFacet("long").field("long").size(10))
+ .addFacet(termsFacet("double").field("double").size(10))
+ .execute().actionGet();
+
+ TermsFacet facet = searchResponse.getFacets().facet("string");
+ assertThat(facet.getName(), equalTo("string"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("long");
+ assertThat(facet.getName(), equalTo("long"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("double");
+ assertThat(facet.getName(), equalTo("double"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+ }
+
+ /**
+ * Tests the terms facet when faceting on multiple fields
+ * case 1: some but not all the fields are mapped
+ * case 2: all the fields are unmapped
+ */
+ @Test
+ public void testMultiFields() throws Exception {
+ createIndex("idx");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("mapped_str", "" + i)
+ .field("mapped_long", i)
+ .field("mapped_double", i)
+ .endObject()).execute().actionGet();
+ }
+
+ flushAndRefresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("string").fields("mapped_str", "unmapped").size(10))
+ .addFacet(termsFacet("long").fields("mapped_long", "unmapped").size(10))
+ .addFacet(termsFacet("double").fields("mapped_double", "unmapped").size(10))
+ .addFacet(termsFacet("all_unmapped").fields("unmapped", "unmapped_1").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ TermsFacet facet = searchResponse.getFacets().facet("string");
+ assertThat(facet.getName(), equalTo("string"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ facet = searchResponse.getFacets().facet("long");
+ assertThat(facet.getName(), equalTo("long"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ facet = searchResponse.getFacets().facet("double");
+ assertThat(facet.getName(), equalTo("double"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ facet = searchResponse.getFacets().facet("all_unmapped");
+ assertThat(facet.getName(), equalTo("all_unmapped"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/facet/termsstats/ShardSizeTermsStatsFacetTests.java b/src/test/java/org/elasticsearch/search/facet/termsstats/ShardSizeTermsStatsFacetTests.java
new file mode 100644
index 0000000..3b2a34e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/termsstats/ShardSizeTermsStatsFacetTests.java
@@ -0,0 +1,548 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.termsstats;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.facet.Facets;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.termsStatsFacet;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE)
+public class ShardSizeTermsStatsFacetTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * to properly test the effect/functionality of shard_size, we need to force having 2 shards and also
+ * control the routing such that certain documents will end on each shard. Using "djb" routing hash + ignoring the
+ * doc type when hashing will ensure that docs with routing value "1" will end up in a different shard than docs with
+ * routing value "2".
+ */
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ .put("cluster.routing.operation.hash.type", "djb")
+ .put("cluster.routing.operation.use_type", "false")
+ .build();
+ }
+
+ @Test
+ public void noShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 4l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void noShardSize_string_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l)
+ .put("4", 4l)
+ .put("5", 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).shardSize(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l)
+ .put("4", 4l)
+ .put("5", 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 5l)
+ .put("2", 4l)
+ .put("3", 3l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void noShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_long_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .put(4, 4l)
+ .put(5, 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).shardSize(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .put(4, 4l)
+ .put(5, 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_double_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .put(4, 4l)
+ .put(5, 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).shardSize(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .put(4, 4l)
+ .put(5, 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ private void indexData() throws Exception {
+
+ /*
+
+
+ || || size = 3, shard_size = 5 || shard_size = size = 3 ||
+ ||==========||==================================================||===============================================||
+ || shard 1: || "1" - 5 | "2" - 4 | "3" - 3 | "4" - 2 | "5" - 1 || "1" - 5 | "3" - 3 | "2" - 4 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || shard 2: || "1" - 3 | "2" - 1 | "3" - 5 | "4" - 2 | "5" - 1 || "1" - 3 | "3" - 5 | "4" - 2 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || reduced: || "1" - 8 | "2" - 5 | "3" - 8 | "4" - 4 | "5" - 2 || ||
+ || || || "1" - 8, "3" - 8, "2" - 4 <= WRONG ||
+ || || "1" - 8 | "3" - 8 | "2" - 5 <= CORRECT || ||
+
+
+ */
+
+
+ indexDoc("1", "1", 5);
+ indexDoc("1", "2", 4);
+ indexDoc("1", "3", 3);
+ indexDoc("1", "4", 2);
+ indexDoc("1", "5", 1);
+
+ // total docs in shard "1" = 15
+
+ indexDoc("2", "1", 3);
+ indexDoc("2", "2", 1);
+ indexDoc("2", "3", 5);
+ indexDoc("2", "4", 2);
+ indexDoc("2", "5", 1);
+
+ // total docs in shard "2" = 12
+
+ client().admin().indices().prepareFlush("idx").execute().actionGet();
+ client().admin().indices().prepareRefresh("idx").execute().actionGet();
+
+ long totalOnOne = client().prepareSearch("idx").setTypes("type").setRouting("1").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnOne, is(15l));
+ long totalOnTwo = client().prepareSearch("idx").setTypes("type").setRouting("2").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnTwo, is(12l));
+ }
+
+ private void indexDoc(String shard, String key, int times) throws Exception {
+ for (int i = 0; i < times; i++) {
+ client().prepareIndex("idx", "type").setRouting(shard).setCreate(true).setSource(jsonBuilder()
+ .startObject()
+ .field("key", key)
+ .field("value", 1)
+ .endObject()).execute().actionGet();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java b/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java
new file mode 100644
index 0000000..c397b8a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java
@@ -0,0 +1,486 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fields;
+
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Base64;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.client.Requests.refreshRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SearchFieldsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 1) // why just one?
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ @Test
+ public void testStoredFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .startObject("field3").field("type", "string").field("store", "yes").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .field("field3", "value3")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field1").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
+
+ // field2 is not stored, check that it gets extracted from source
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field2").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field2").value().toString(), equalTo("value2"));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field3").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).source(), nullValue());
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").addField("_source").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).source(), notNullValue());
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
+ }
+
+ @Test
+ public void testScriptDocAndFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num1").field("type", "double").field("store", "yes").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).field("date", "1970-01-01T00:00:00").endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).field("date", "1970-01-01T00:00:25").endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).field("date", "1970-01-01T00:02:00").endObject())
+ .execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running doc['num1'].value");
+ SearchResponse response = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value")
+ .addScriptField("sNum1_field", "_fields['num1'].value")
+ .addScriptField("date1", "doc['date'].date.millis")
+ .execute().actionGet();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).isSourceEmpty(), equalTo(true));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1_field").values().get(0), equalTo(1.0));
+ assertThat((Long) response.getHits().getAt(0).fields().get("date1").values().get(0), equalTo(0l));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1_field").values().get(0), equalTo(2.0));
+ assertThat((Long) response.getHits().getAt(1).fields().get("date1").values().get(0), equalTo(25000l));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1_field").values().get(0), equalTo(3.0));
+ assertThat((Long) response.getHits().getAt(2).fields().get("date1").values().get(0), equalTo(120000l));
+
+ logger.info("running doc['num1'].value * factor");
+ Map<String, Object> params = MapBuilder.<String, Object>newMapBuilder().put("factor", 2.0).map();
+ response = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value * factor", params)
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(4.0));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(6.0));
+ }
+
+ @Test
+ public void testScriptFieldUsingSource() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject()
+ .startObject("obj1").field("test", "something").endObject()
+ .startObject("obj2").startArray("arr2").value("arr_value1").value("arr_value2").endArray().endObject()
+ .startArray("arr3").startObject().field("arr3_field1", "arr3_value1").endObject().endArray()
+ .endObject())
+ .execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ SearchResponse response = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("s_obj1", "_source.obj1")
+ .addScriptField("s_obj1_test", "_source.obj1.test")
+ .addScriptField("s_obj2", "_source.obj2")
+ .addScriptField("s_obj2_arr2", "_source.obj2.arr2")
+ .addScriptField("s_arr3", "_source.arr3")
+ .execute().actionGet();
+
+ assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0));
+
+ assertThat(response.getHits().getAt(0).field("s_obj1_test").value().toString(), equalTo("something"));
+
+ Map<String, Object> sObj1 = response.getHits().getAt(0).field("s_obj1").value();
+ assertThat(sObj1.get("test").toString(), equalTo("something"));
+ assertThat(response.getHits().getAt(0).field("s_obj1_test").value().toString(), equalTo("something"));
+
+ Map<String, Object> sObj2 = response.getHits().getAt(0).field("s_obj2").value();
+ List sObj2Arr2 = (List) sObj2.get("arr2");
+ assertThat(sObj2Arr2.size(), equalTo(2));
+ assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1"));
+ assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2"));
+
+ sObj2Arr2 = (List) response.getHits().getAt(0).field("s_obj2_arr2").value();
+ assertThat(sObj2Arr2.size(), equalTo(2));
+ assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1"));
+ assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2"));
+
+ List sObj2Arr3 = (List) response.getHits().getAt(0).field("s_arr3").value();
+ assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1"));
+ }
+
+ @Test
+ public void testPartialFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startObject("obj1")
+ .startArray("arr1")
+ .startObject().startObject("obj2").field("field2", "value21").endObject().endObject()
+ .startObject().startObject("obj2").field("field2", "value22").endObject().endObject()
+ .endArray()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("test")
+ .addPartialField("partial1", "obj1.arr1.*", null)
+ .addPartialField("partial2", null, "obj1")
+ .execute().actionGet();
+ assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0));
+
+ Map<String, Object> partial1 = response.getHits().getAt(0).field("partial1").value();
+ assertThat(partial1, notNullValue());
+ assertThat(partial1.containsKey("field1"), equalTo(false));
+ assertThat(partial1.containsKey("obj1"), equalTo(true));
+ assertThat(((Map) partial1.get("obj1")).get("arr1"), instanceOf(List.class));
+
+ Map<String, Object> partial2 = response.getHits().getAt(0).field("partial2").value();
+ assertThat(partial2, notNullValue());
+ assertThat(partial2.containsKey("obj1"), equalTo(false));
+ assertThat(partial2.containsKey("field1"), equalTo(true));
+ }
+
+ @Test
+ public void testStoredFieldsWithoutSource() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("byte_field").field("type", "byte").field("store", "yes").endObject()
+ .startObject("short_field").field("type", "short").field("store", "yes").endObject()
+ .startObject("integer_field").field("type", "integer").field("store", "yes").endObject()
+ .startObject("long_field").field("type", "long").field("store", "yes").endObject()
+ .startObject("float_field").field("type", "float").field("store", "yes").endObject()
+ .startObject("double_field").field("type", "double").field("store", "yes").endObject()
+ .startObject("date_field").field("type", "date").field("store", "yes").endObject()
+ .startObject("boolean_field").field("type", "boolean").field("store", "yes").endObject()
+ .startObject("binary_field").field("type", "binary").field("store", "yes").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("byte_field", (byte) 1)
+ .field("short_field", (short) 2)
+ .field("integer_field", 3)
+ .field("long_field", 4l)
+ .field("float_field", 5.0f)
+ .field("double_field", 6.0d)
+ .field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
+ .field("boolean_field", true)
+ .field("binary_field", Base64.encodeBytes("testing text".getBytes("UTF8")))
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addField("byte_field")
+ .addField("short_field")
+ .addField("integer_field")
+ .addField("long_field")
+ .addField("float_field")
+ .addField("double_field")
+ .addField("date_field")
+ .addField("boolean_field")
+ .addField("binary_field")
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(9));
+
+
+ assertThat(searchResponse.getHits().getAt(0).fields().get("byte_field").value().toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("short_field").value().toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("integer_field").value(), equalTo((Object) 3));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("long_field").value(), equalTo((Object) 4l));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("float_field").value(), equalTo((Object) 5.0f));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("double_field").value(), equalTo((Object) 6.0d));
+ String dateTime = Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("date_field").value(), equalTo((Object) dateTime));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) Boolean.TRUE));
+ assertThat(((BytesReference) searchResponse.getHits().getAt(0).fields().get("binary_field").value()).toBytesArray(), equalTo((BytesReference) new BytesArray("testing text".getBytes("UTF8"))));
+
+ }
+
+ @Test
+ public void testSearchFields_metaData() throws Exception {
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setRouting("1")
+ .setSource(jsonBuilder().startObject().field("field1", "value").endObject())
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("my-index")
+ .setTypes("my-type1")
+ .addField("field1").addField("_routing")
+ .get();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).field("field1").isMetadataField(), equalTo(false));
+ assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value"));
+ assertThat(searchResponse.getHits().getAt(0).field("_routing").isMetadataField(), equalTo(true));
+ assertThat(searchResponse.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1"));
+ }
+
+ @Test
+ public void testSearchFields_nonLeafField() throws Exception {
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject())
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type1").addField("field1").get();
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("field [field1] isn't a leaf field"));
+ }
+
+ @Test
+ public void testGetFields_complexField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("my-type2", jsonBuilder().startObject().startObject("my-type2").startObject("properties")
+ .startObject("field1").field("type", "object")
+ .startObject("field2").field("type", "object")
+ .startObject("field3").field("type", "object")
+ .startObject("field4").field("type", "string").field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ BytesReference source = jsonBuilder().startObject()
+ .startArray("field1")
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value1")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().bytes();
+
+ client().prepareIndex("my-index", "my-type1", "1").setSource(source).get();
+ client().prepareIndex("my-index", "my-type2", "1").setRefresh(true).setSource(source).get();
+
+
+ String field = "field1.field2.field3.field4";
+ SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type1").addField(field).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2"));
+
+ searchResponse = client().prepareSearch("my-index").setTypes("my-type2").addField(field).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2"));
+ }
+
+ @Test
+ public void testFieldsPulledFromFieldData() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("string_field").field("type", "string").endObject()
+ .startObject("byte_field").field("type", "byte").endObject()
+ .startObject("short_field").field("type", "short").endObject()
+ .startObject("integer_field").field("type", "integer").endObject()
+ .startObject("long_field").field("type", "long").endObject()
+ .startObject("float_field").field("type", "float").endObject()
+ .startObject("double_field").field("type", "double").endObject()
+ .startObject("date_field").field("type", "date").endObject()
+ .startObject("boolean_field").field("type", "boolean").endObject()
+ .startObject("binary_field").field("type", "binary").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("string_field", "foo")
+ .field("byte_field", (byte) 1)
+ .field("short_field", (short) 2)
+ .field("integer_field", 3)
+ .field("long_field", 4l)
+ .field("float_field", 5.0f)
+ .field("double_field", 6.0d)
+ .field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
+ .field("boolean_field", true)
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchRequestBuilder builder = client().prepareSearch().setQuery(matchAllQuery())
+ .addFieldDataField("string_field")
+ .addFieldDataField("byte_field")
+ .addFieldDataField("short_field")
+ .addFieldDataField("integer_field")
+ .addFieldDataField("long_field")
+ .addFieldDataField("float_field")
+ .addFieldDataField("double_field")
+ .addFieldDataField("date_field")
+ .addFieldDataField("boolean_field");
+ SearchResponse searchResponse = builder.execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(9));
+
+ assertThat(searchResponse.getHits().getAt(0).fields().get("byte_field").value().toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("short_field").value().toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("integer_field").value(), equalTo((Object) 3l));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("long_field").value(), equalTo((Object) 4l));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("float_field").value(), equalTo((Object) 5.0));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("double_field").value(), equalTo((Object) 6.0d));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("date_field").value(), equalTo((Object) 1332374400000L));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value().toString(), equalTo("T"));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java
new file mode 100644
index 0000000..a607f30
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java
@@ -0,0 +1,715 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.index.query.MatchAllFilterBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionBuilder;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.*;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testDistanceScoreGeoLinGaussExp() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 10).field("lon", 20).endObject()
+ .endObject()));
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("2")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 22).endObject()
+ .endObject()));
+
+ int numDummyDocs = 20;
+ for (int i = 1; i <= numDummyDocs; i++) {
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId(Integer.toString(i + 3))
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11 + i).field("lon", 22 + i)
+ .endObject().endObject()));
+ }
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+
+ // Test Gauss
+ List<Float> lonlat = new ArrayList<Float>();
+ lonlat.add(new Float(20));
+ lonlat.add(new Float(11));
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(termQuery("test", "value"))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ // Test Exp
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(termQuery("test", "value"))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), linearDecayFunction("loc", lonlat, "1000km")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ // Test Lin
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(termQuery("test", "value"))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("loc", lonlat, "1000km")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ }
+
+ @Test
+ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ // add tw docs within offset
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex().setType("type1").setId("1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 0.5).endObject()));
+ indexBuilders.add(client().prepareIndex().setType("type1").setId("2").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.7).endObject()));
+
+ // add docs outside offset
+ int numDummyDocs = 20;
+ for (int i = 0; i < numDummyDocs; i++) {
+ indexBuilders.add(client().prepareIndex().setType("type1").setId(Integer.toString(i + 3)).setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 3.0 + i).endObject()));
+ }
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+
+ // Test Gauss
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(true)
+ .size(numDummyDocs + 2)
+ .query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0).setOffset(1.0))
+ .boostMode(CombineFunction.REPLACE.getName()))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+ assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));
+ for (int i = 0; i < numDummyDocs; i++) {
+ assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3)));
+ }
+
+ // Test Exp
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(true)
+ .size(numDummyDocs + 2)
+ .query(functionScoreQuery(termQuery("test", "value"),
+ exponentialDecayFunction("num", 1.0, 5.0).setOffset(1.0)).boostMode(
+ CombineFunction.REPLACE.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+ assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));
+ for (int i = 0; i < numDummyDocs; i++) {
+ assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3)));
+ }
+ // Test Lin
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(true)
+ .size(numDummyDocs + 2)
+ .query(functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0).setOffset(1.0))
+ .boostMode(CombineFunction.REPLACE.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+ assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));
+ }
+
+ @Test
+ public void testBoostModeSettingWorks() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 21).endObject()
+ .endObject()));
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("2")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value value").startObject("loc").field("lat", 11).field("lon", 20)
+ .endObject().endObject()));
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+
+ // Test Gauss
+ List<Float> lonlat = new ArrayList<Float>();
+ lonlat.add(new Float(20));
+ lonlat.add(new Float(11));
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode(
+ CombineFunction.MULT.getName()))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (2)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+
+ // Test Exp
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode(
+ CombineFunction.REPLACE.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (2)));
+ assertThat(sh.getAt(0).getId(), equalTo("2"));
+ assertThat(sh.getAt(1).getId(), equalTo("1"));
+
+ }
+
+ @Test
+ public void testParseGeoPoint() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 20).field("lon", 11).endObject()
+ .endObject()));
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+ GeoPoint point = new GeoPoint(20, 11);
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", point, "1000km")).boostMode(
+ CombineFunction.MULT.getName()))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));
+ float[] coords = { 11, 20 };
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", coords, "1000km")).boostMode(
+ CombineFunction.MULT.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));
+ }
+
+ @Test
+ public void testCombineModes() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex().setType("type1").setId("1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()));
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+
+ // function score should return 0.5 for this function
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.MULT))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.REPLACE))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.SUM))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(2.0 * (0.30685282 + 0.5), 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.AVG))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo((0.30685282 + 0.5), 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.MIN))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(2.0 * (0.30685282), 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.MAX))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testExceptionThrownIfScaleLE0() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-28").endObject())).actionGet();
+ refresh();
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d")))));
+
+ SearchResponse sr = response.actionGet();
+ assertOrderedSearchHits(sr, "2", "1");
+ }
+
+ @Test
+ public void testParseDateMath() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", System.currentTimeMillis()).endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", System.currentTimeMillis() - (1000 * 60 * 60 * 24)).endObject())).actionGet();
+ refresh();
+
+ SearchResponse sr = client().search(
+ searchRequest().source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))))).get();
+
+ assertNoFailures(sr);
+ assertOrderedSearchHits(sr, "1", "2");
+
+ sr = client().search(
+ searchRequest().source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now-1d", "2d"))))).get();
+
+ assertNoFailures(sr);
+ assertOrderedSearchHits(sr, "2", "1");
+
+ }
+
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testExceptionThrownIfScaleRefNotBetween0And1() throws Exception {
+ DecayFunctionBuilder gfb = new GaussDecayFunctionBuilder("num1", "2013-05-28", "1d").setDecay(100);
+
+ }
+
+ @Test
+ public void testValueMissingLin() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().startObject("num2").field("type", "double")
+ .endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").field("num2", "1.0")
+ .endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num2", "1.0").endObject())).actionGet();
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("3")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-30").field("num2", "1.0")
+ .endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("4")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-30").endObject())).actionGet();
+
+ refresh();
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(
+ functionScoreQuery(termQuery("test", "value")).add(linearDecayFunction("num1", "2013-05-28", "+3d"))
+ .add(linearDecayFunction("num2", "0.0", "1")).scoreMode("multiply"))));
+
+ SearchResponse sr = response.actionGet();
+ assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+ assertThat(sh.hits().length, equalTo(4));
+ double[] scores = new double[4];
+ for (int i = 0; i < sh.hits().length; i++) {
+ scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore();
+ }
+ assertThat(scores[0], lessThan(scores[1]));
+ assertThat(scores[2], lessThan(scores[3]));
+
+ }
+
+ @Test
+ public void testDateWithoutOrigin() throws Exception {
+ DateTime dt = new DateTime();
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ DateTime docDate = dt.minusDays(1);
+ String docDateString = docDate.getYear() + "-" + docDate.getMonthOfYear() + "-" + docDate.getDayOfMonth();
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject())).actionGet();
+ docDate = dt.minusDays(2);
+ docDateString = docDate.getYear() + "-" + docDate.getMonthOfYear() + "-" + docDate.getDayOfMonth();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject())).actionGet();
+ docDate = dt.minusDays(3);
+ docDateString = docDate.getYear() + "-" + docDate.getMonthOfYear() + "-" + docDate.getDayOfMonth();
+ client().index(
+ indexRequest("test").type("type1").id("3")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject())).actionGet();
+
+ refresh();
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(
+ functionScoreQuery(QueryBuilders.matchAllQuery()).add(linearDecayFunction("num1", "1000w"))
+ .add(gaussDecayFunction("num1", "1d")).add(exponentialDecayFunction("num1", "1000w"))
+ .scoreMode("multiply"))));
+
+ SearchResponse sr = response.actionGet();
+ assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+ assertThat(sh.hits().length, equalTo(3));
+ double[] scores = new double[4];
+ for (int i = 0; i < sh.hits().length; i++) {
+ scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore();
+ }
+ assertThat(scores[1], lessThan(scores[0]));
+ assertThat(scores[2], lessThan(scores[1]));
+
+ }
+
+ @Test
+ public void testManyDocsLin() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("date").field("type", "date").endObject().startObject("num").field("type", "double")
+ .endObject().startObject("geo").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ int numDocs = 200;
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+
+ for (int i = 0; i < numDocs; i++) {
+ double lat = 100 + (int) (10.0 * (float) (i) / (float) (numDocs));
+ double lon = 100;
+ int day = (int) (29.0 * (float) (i) / (float) (numDocs)) + 1;
+ String dayString = day < 10 ? "0" + Integer.toString(day) : Integer.toString(day);
+ String date = "2013-05-" + dayString;
+
+ indexBuilders.add(client().prepareIndex()
+ .setType("type")
+ .setId(Integer.toString(i))
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").field("date", date).field("num", i).startObject("geo")
+ .field("lat", lat).field("lon", lon).endObject().endObject()));
+ }
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+ indexRandom(true, builders);
+ List<Float> lonlat = new ArrayList<Float>();
+ lonlat.add(new Float(100));
+ lonlat.add(new Float(110));
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().size(numDocs).query(
+ functionScoreQuery(termQuery("test", "value"))
+ .add(new MatchAllFilterBuilder(), linearDecayFunction("date", "2013-05-30", "+15d"))
+ .add(new MatchAllFilterBuilder(), linearDecayFunction("geo", lonlat, "1000km"))
+ .add(new MatchAllFilterBuilder(), linearDecayFunction("num", numDocs, numDocs / 2.0))
+ .scoreMode("multiply").boostMode(CombineFunction.REPLACE.getName()))));
+
+ SearchResponse sr = response.actionGet();
+ assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+ assertThat(sh.hits().length, equalTo(numDocs));
+ double[] scores = new double[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ scores[Integer.parseInt(sh.getAt(i).getId())] = sh.getAt(i).getScore();
+ }
+ for (int i = 0; i < numDocs - 1; i++) {
+ assertThat(scores[i], lessThan(scores[i + 1]));
+
+ }
+
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testParsingExceptionIfFieldDoesNotExist() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("geo").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ int numDocs = 2;
+ client().index(
+ indexRequest("test").type("type1").source(
+ jsonBuilder().startObject().field("test", "value").startObject("geo").field("lat", 1).field("lon", 2).endObject()
+ .endObject())).actionGet();
+ refresh();
+ List<Float> lonlat = new ArrayList<Float>();
+ lonlat.add(new Float(100));
+ lonlat.add(new Float(110));
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(true)
+ .size(numDocs)
+ .query(functionScoreQuery(termQuery("test", "value")).add(new MatchAllFilterBuilder(),
+ linearDecayFunction("type1.geo", lonlat, "1000km")).scoreMode("multiply"))));
+ SearchResponse sr = response.actionGet();
+
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "string").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type").source(
+ jsonBuilder().startObject().field("test", "value").field("num", Integer.toString(1)).endObject())).actionGet();
+ refresh();
+ // so, we indexed a string field, but now we try to score a num field
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value")).add(new MatchAllFilterBuilder(),
+ linearDecayFunction("num", 1.0, 0.5)).scoreMode("multiply"))));
+ response.actionGet();
+ }
+
+ @Test
+ public void testNoQueryGiven() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type").source(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()))
+ .actionGet();
+ refresh();
+ // so, we indexed a string field, but now we try to score a num field
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery().add(new MatchAllFilterBuilder(), linearDecayFunction("num", 1, 0.5)).scoreMode(
+ "multiply"))));
+ response.actionGet();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java
new file mode 100644
index 0000000..024cc04
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.functionscore.DecayFunction;
+import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.DecayFunctionParser;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 1)
+public class FunctionScorePluginTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put("plugin.types", CustomDistanceScorePlugin.class.getName())
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Test
+ public void testPlugin() throws Exception {
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test")
+ .field("type", "string").endObject().startObject("num1").field("type", "date").endObject().endObject()
+ .endObject().endObject()).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-26").endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject())).actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ DecayFunctionBuilder gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d");
+
+ ActionFuture<SearchResponse> response = client().search(searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(functionScoreQuery(termQuery("test", "value")).add(gfb))));
+
+ SearchResponse sr = response.actionGet();
+ ElasticsearchAssertions.assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+
+ assertThat(sh.hits().length, equalTo(2));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+
+ }
+
+ public static class CustomDistanceScorePlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-distance-score";
+ }
+
+ @Override
+ public String description() {
+ return "Distance score plugin to test pluggable implementation";
+ }
+
+ public void onModule(FunctionScoreModule scoreModule) {
+ scoreModule.registerParser(FunctionScorePluginTests.CustomDistanceScoreParser.class);
+ }
+
+ }
+
+ public static class CustomDistanceScoreParser extends DecayFunctionParser {
+
+ public static final String[] NAMES = { "linear_mult", "linearMult" };
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+
+ static final DecayFunction decayFunction = new LinearMultScoreFunction();
+
+ @Override
+ public DecayFunction getDecayFunction() {
+ return decayFunction;
+ }
+
+ static class LinearMultScoreFunction implements DecayFunction {
+ LinearMultScoreFunction() {
+ }
+
+ @Override
+ public double evaluate(double value, double scale) {
+
+ return value;
+ }
+
+ @Override
+ public Explanation explainFunction(String distanceString, double distanceVal, double scale) {
+ ComplexExplanation ce = new ComplexExplanation();
+ ce.setDescription("" + distanceVal);
+ return ce;
+ }
+
+ @Override
+ public double processScale(double userGivenScale, double userGivenValue) {
+ return userGivenScale;
+ }
+ }
+ }
+
+ public class CustomDistanceScoreBuilder extends DecayFunctionBuilder {
+
+ public CustomDistanceScoreBuilder(String fieldName, Object origin, Object scale) {
+ super(fieldName, origin, scale);
+ }
+
+ @Override
+ public String getName() {
+ return CustomDistanceScoreParser.NAMES[0];
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java b/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java
new file mode 100644
index 0000000..7a19796
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.functionscore;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.CoreMatchers;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class RandomScoreFunctionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void consistentHitsWithSameSeed() throws Exception {
+ final int replicas = between(0, 2); // needed for green status!
+ cluster().ensureAtLeastNumNodes(replicas + 1);
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.builder().put("index.number_of_shards", between(2, 5))
+ .put("index.number_of_replicas", replicas)
+ .build()));
+ ensureGreen(); // make sure we are done otherwise preference could change?
+ int docCount = atLeast(100);
+ for (int i = 0; i < docCount; i++) {
+ index("test", "type", "" + i, jsonBuilder().startObject().endObject());
+ }
+ flush();
+ refresh();
+ int outerIters = atLeast(10);
+ for (int o = 0; o < outerIters; o++) {
+ final long seed = randomLong();
+ final String preference = randomRealisticUnicodeOfLengthBetween(1, 10); // at least one char!!
+ int innerIters = atLeast(2);
+ SearchHits hits = null;
+ for (int i = 0; i < innerIters; i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setPreference(preference)
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(seed)))
+ .execute().actionGet();
+ assertThat("Failures " + Arrays.toString(searchResponse.getShardFailures()), searchResponse.getShardFailures().length, CoreMatchers.equalTo(0));
+ int hitCount = searchResponse.getHits().getHits().length;
+ if (i == 0) {
+ assertThat(hits, nullValue());
+ hits = searchResponse.getHits();
+ } else {
+ assertThat(hits.getHits().length, equalTo(searchResponse.getHits().getHits().length));
+ for (int j = 0; j < hitCount; j++) {
+ assertThat(searchResponse.getHits().getAt(j).score(), equalTo(hits.getAt(j).score()));
+ assertThat(searchResponse.getHits().getAt(j).id(), equalTo(hits.getAt(j).id()));
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ @Ignore
+ public void distribution() throws Exception {
+ int count = 10000;
+
+ prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ for (int i = 0; i < count; i++) {
+ index("test", "type", "" + i, jsonBuilder().startObject().endObject());
+ }
+
+ flush();
+ refresh();
+
+ int[] matrix = new int[count];
+
+ for (int i = 0; i < count; i++) {
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(System.nanoTime())))
+ .execute().actionGet();
+
+ matrix[Integer.valueOf(searchResponse.getHits().getAt(0).id())]++;
+ }
+
+ int filled = 0;
+ int maxRepeat = 0;
+ int sumRepeat = 0;
+ for (int i = 0; i < matrix.length; i++) {
+ int value = matrix[i];
+ sumRepeat += value;
+ maxRepeat = Math.max(maxRepeat, value);
+ if (value > 0) {
+ filled++;
+ }
+ }
+
+ System.out.println();
+ System.out.println("max repeat: " + maxRepeat);
+ System.out.println("avg repeat: " + sumRepeat / (double) filled);
+ System.out.println("distribution: " + filled / (double) count);
+
+ int percentile50 = filled / 2;
+ int percentile25 = (filled / 4);
+ int percentile75 = percentile50 + percentile25;
+
+ int sum = 0;
+
+ for (int i = 0; i < matrix.length; i++) {
+ if (matrix[i] == 0) {
+ continue;
+ }
+ sum += i * matrix[i];
+ if (percentile50 == 0) {
+ System.out.println("median: " + i);
+ } else if (percentile25 == 0) {
+ System.out.println("percentile_25: " + i);
+ } else if (percentile75 == 0) {
+ System.out.println("percentile_75: " + i);
+ }
+ percentile50--;
+ percentile25--;
+ percentile75--;
+ }
+
+ System.out.println("mean: " + sum / (double) count);
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java b/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java
new file mode 100644
index 0000000..ca6ddbc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.geoBoundingBoxFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class GeoBoundingBoxTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleBoundingBoxTest() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 5.286 km
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 0.4621 km
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.055 km
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.258 km
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 2.029 km
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 8.572 km
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(40.73, -74.1).bottomRight(40.717, -73.99)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(40.73, -74.1).bottomRight(40.717, -73.99).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("5")));
+ }
+ }
+
+ @Test
+ public void limitsBoundingBoxTest() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(settingsBuilder().put("index.number_of_shards", "1")).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", -20).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", -10).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", 10).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", 20).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 10).field("lon", -170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 0).field("lon", -170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", -10).field("lon", -170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "8").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 10).field("lon", 170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "9").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 0).field("lon", 170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "10").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", -10).field("lon", 170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(41, -11).bottomRight(40, 9)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(41, -11).bottomRight(40, 9).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(41, -9).bottomRight(40, 11)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(41, -9).bottomRight(40, 11).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(11, 171).bottomRight(1, -169)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("5"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(11, 171).bottomRight(1, -169).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("5"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(9, 169).bottomRight(-1, -171)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("9"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(9, 169).bottomRight(-1, -171).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("9"));
+ }
+
+ @Test
+ public void limit2BoundingBoxTest() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(settingsBuilder().put("index.number_of_shards", "1")).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("userid", 880)
+ .field("title", "Place in Stockholm")
+ .startObject("location").field("lat", 59.328355000000002).field("lon", 18.036842).endObject()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("userid", 534)
+ .field("title", "Place in Montreal")
+ .startObject("location").field("lat", 45.509526999999999).field("lon", -73.570986000000005).endObject()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 880),
+ geoBoundingBoxFilter("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 880),
+ geoBoundingBoxFilter("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 534),
+ geoBoundingBoxFilter("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 534),
+ geoBoundingBoxFilter("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoDistanceFacetTests.java b/src/test/java/org/elasticsearch/search/geo/GeoDistanceFacetTests.java
new file mode 100644
index 0000000..18237fb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoDistanceFacetTests.java
@@ -0,0 +1,261 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.facet.geodistance.GeoDistanceFacet;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.geoDistanceFacet;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeoDistanceFacetTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleGeoFacetTests() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // to NY: 0
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .field("num", 1)
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 5.286 km
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .field("num", 2)
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 0.4621 km
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .field("num", 3)
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.055 km
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("name", "Wall Street")
+ .field("num", 4)
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.258 km
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .field("num", 5)
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 2.029 km
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .field("name", "Greenwich Village")
+ .field("num", 6)
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 8.572 km
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .field("num", 7)
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(matchAllQuery())
+ .addFacet(geoDistanceFacet("geo1").field("location").point(40.7143528, -74.0059731).unit(DistanceUnit.KILOMETERS)
+ .addUnboundedFrom(2)
+ .addRange(0, 1)
+ .addRange(0.5, 2.5)
+ .addUnboundedTo(1)
+ )
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(7l));
+ GeoDistanceFacet facet = searchResponse.getFacets().facet("geo1");
+ assertThat(facet.getEntries().size(), equalTo(4));
+
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(0).getTotal(), not(closeTo(0, 0.00001)));
+
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(0, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), not(closeTo(0, 0.00001)));
+
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(0.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getTo(), closeTo(2.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(2).getTotal(), not(closeTo(0, 0.00001)));
+
+ assertThat(facet.getEntries().get(3).getFrom(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(5l));
+ assertThat(facet.getEntries().get(3).getTotal(), not(closeTo(0, 0.00001)));
+
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(matchAllQuery())
+ .addFacet(geoDistanceFacet("geo1").field("location").point(40.7143528, -74.0059731).unit(DistanceUnit.KILOMETERS).valueField("num")
+ .addUnboundedFrom(2)
+ .addRange(0, 1)
+ .addRange(0.5, 2.5)
+ .addUnboundedTo(1)
+ )
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(7l));
+ facet = searchResponse.getFacets().facet("geo1");
+ assertThat(facet.getEntries().size(), equalTo(4));
+
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(13, 0.00001));
+
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(0, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(4, 0.00001));
+
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(0.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getTo(), closeTo(2.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(15, 0.00001));
+
+ assertThat(facet.getEntries().get(3).getFrom(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(5l));
+ assertThat(facet.getEntries().get(3).getTotal(), closeTo(24, 0.00001));
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(matchAllQuery())
+ .addFacet(geoDistanceFacet("geo1").field("location").point(40.7143528, -74.0059731).unit(DistanceUnit.KILOMETERS).valueScript("doc['num'].value")
+ .addUnboundedFrom(2)
+ .addRange(0, 1)
+ .addRange(0.5, 2.5)
+ .addUnboundedTo(1)
+ )
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(7l));
+ facet = searchResponse.getFacets().facet("geo1");
+ assertThat(facet.getEntries().size(), equalTo(4));
+
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(13, 0.00001));
+
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(0, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(4, 0.00001));
+
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(0.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getTo(), closeTo(2.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(15, 0.00001));
+
+ assertThat(facet.getEntries().get(3).getFrom(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(5l));
+ assertThat(facet.getEntries().get(3).getTotal(), closeTo(24, 0.00001));
+ }
+
+ @Test
+ public void multiLocationGeoDistanceTest() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("num", 1)
+ .startArray("location")
+ // to NY: 0
+ .startObject().field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ // to NY: 5.286 km
+ .startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("num", 3)
+ .startArray("location")
+ // to NY: 0.4621 km
+ .startObject().field("lat", 40.718266).field("lon", -74.007819).endObject()
+ // to NY: 1.055 km
+ .startObject().field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(matchAllQuery())
+ .addFacet(geoDistanceFacet("geo1").field("location").point(40.7143528, -74.0059731).unit(DistanceUnit.KILOMETERS)
+ .addRange(0, 2)
+ .addRange(2, 10)
+ )
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ GeoDistanceFacet facet = searchResponse.getFacets().facet("geo1");
+ assertThat(facet.getEntries().size(), equalTo(2));
+
+ assertThat(facet.getEntries().get(0).getFrom(), closeTo(0, 0.000001));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(10, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java b/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java
new file mode 100644
index 0000000..f82f8c4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java
@@ -0,0 +1,659 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class GeoDistanceTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleDistanceTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()),
+ // to NY: 5.286 km
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()),
+ // to NY: 0.4621 km
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()),
+ // to NY: 1.055 km
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endObject()),
+ // to NY: 1.258 km
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()),
+ // to NY: 2.029 km
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ .endObject()),
+ // to NY: 8.572 km
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()));
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("3km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 5);
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("3km").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 5);
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
+ }
+
+ // now with a PLANE type
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("3km").geoDistance(GeoDistance.PLANE).point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 5);
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
+ }
+
+ // factor type is really too small for this resolution
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("2km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("2km").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("1.242mi").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("1.242mi").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeFilter("location").from("1.0km").to("2.0km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 2);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeFilter("location").from("1.0km").to("2.0km").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 2);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeFilter("location").to("2.0km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeFilter("location").from("2.0km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ // SORTING
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("location").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 7);
+ assertOrderedSearchHits(searchResponse, "1", "3", "4", "5", "6", "2", "7");
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("location").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 7);
+ assertOrderedSearchHits(searchResponse, "7", "2", "6", "5", "4", "3", "1");
+ }
+
+ @Test
+ public void testDistanceSortingMVFields() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("locations").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", mapping)
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("names", "New York")
+ .startObject("locations").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("names", "Times Square", "Tribeca")
+ .startArray("locations")
+ // to NY: 5.286 km
+ .startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ // to NY: 0.4621 km
+ .startObject().field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("names", "Wall Street", "Soho")
+ .startArray("locations")
+ // to NY: 1.055 km
+ .startObject().field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ // to NY: 1.258 km
+ .startObject().field("lat", 40.7247222).field("lon", -74).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("names", "Greenwich Village", "Brooklyn")
+ .startArray("locations")
+ // to NY: 2.029 km
+ .startObject().field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ // to NY: 8.572 km
+ .startObject().field("lat", 40.65).field("lon", -73.95).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Order: Asc
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "2", "3", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+
+ // Order: Asc, Mode: max
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+
+ // Order: Desc
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ // Order: Desc, Mode: min
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "3", "2", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1157d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(2874d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301d, 10d));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ try {
+ client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("sum"))
+ .execute().actionGet();
+ fail("Expected error");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+ }
+
+ @Test
+ // Regression bug: https://github.com/elasticsearch/elasticsearch/issues/2851
+ public void testDistanceSortingWithMissingGeoPoint() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("locations").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", mapping)
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("names", "Times Square", "Tribeca")
+ .startArray("locations")
+ // to NY: 5.286 km
+ .startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ // to NY: 0.4621 km
+ .startObject().field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("names", "Wall Street", "Soho")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Order: Asc
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 2);
+ assertOrderedSearchHits(searchResponse, "1", "2");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+
+ // Order: Desc
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ // Doc with missing geo point is first, is consistent with 0.20.x
+ assertHitCount(searchResponse, 2);
+ assertOrderedSearchHits(searchResponse, "2", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286d, 10d));
+ }
+
+ @Test
+ public void distanceScriptTests() throws Exception {
+ double source_lat = 32.798;
+ double source_long = -117.151;
+ double target_lat = 32.81;
+ double target_long = -117.21;
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "TestPosition")
+ .startObject("location").field("lat", source_lat).field("lon", source_long).endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse1 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistance(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance1 = searchResponse1.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance1, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.0001d));
+
+ SearchResponse searchResponse2 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].distance(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance2 = searchResponse2.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance2, closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.0001d));
+
+ SearchResponse searchResponse3 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistanceInKm(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultArcDistance3 = searchResponse3.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance3, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse4 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].distanceInKm(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance4 = searchResponse4.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance4, closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse5 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistanceInKm(" + (target_lat) + "," + (target_long + 360) + ")").execute().actionGet();
+ Double resultArcDistance5 = searchResponse5.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance5, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse6 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistanceInKm(" + (target_lat + 360) + "," + (target_long) + ")").execute().actionGet();
+ Double resultArcDistance6 = searchResponse6.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance6, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse7 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistanceInMiles(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance7 = searchResponse7.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance7, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.MILES), 0.0001d));
+
+ SearchResponse searchResponse8 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].distanceInMiles(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance8 = searchResponse8.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance8, closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.MILES), 0.0001d));
+
+ }
+
+ @Test
+ public void testDistanceSortingNestedFields() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("company")
+ .startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("branches")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("location").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("companies")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("company", mapping)
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth("companies").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ indexRandom(true, client().prepareIndex("companies", "company", "1").setSource(jsonBuilder().startObject()
+ .field("name", "company 1")
+ .startArray("branches")
+ .startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("companies", "company", "2").setSource(jsonBuilder().startObject()
+ .field("name", "company 2")
+ .startArray("branches")
+ .startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject() // to NY: 5.286 km
+ .endObject()
+ .startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject() // to NY: 0.4621 km
+ .endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("companies", "company", "3").setSource(jsonBuilder().startObject()
+ .field("name", "company 3")
+ .startArray("branches")
+ .startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject() // to NY: 1.055 km
+ .endObject()
+ .startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject() // to NY: 1.258 km
+ .endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("companies", "company", "4").setSource(jsonBuilder().startObject()
+ .field("name", "company 4")
+ .startArray("branches")
+ .startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject() // to NY: 2.029 km
+ .endObject()
+ .startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject() // to NY: 8.572 km
+ .endObject()
+ .endArray()
+ .endObject()));
+
+ // Order: Asc
+ SearchResponse searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "2", "3", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+
+ // Order: Asc, Mode: max
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+
+ // Order: Desc
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ // Order: Desc, Mode: min
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "3", "2", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
+
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.geoDistanceSort("branches.location").setNestedPath("branches")
+ .point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.geoDistanceSort("branches.location").setNestedFilter(termFilter("branches.name", "brooklyn"))
+ .point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertFirstHit(searchResponse, hasId("4"));
+ assertSearchHits(searchResponse, "1", "2", "3", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+
+ try {
+ client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).sortMode("sum"))
+ .execute().actionGet();
+ fail("Expected error");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+ }
+
+ /**
+ * Issue 3073
+ */
+ @Test
+ public void testGeoDistanceFilter() throws IOException {
+ double lat = 40.720611;
+ double lon = -73.998776;
+
+ XContentBuilder mapping = JsonXContent.contentBuilder()
+ .startObject()
+ .startObject("location")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .field("geohash", true)
+ .field("geohash_precision", 24)
+ .field("lat_lon", true)
+ .startObject("fielddata")
+ .field("format", randomNumericFieldDataFormat())
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ XContentBuilder source = JsonXContent.contentBuilder()
+ .startObject()
+ .field("pin", GeoHashUtils.encode(lat, lon))
+ .endObject();
+
+ ensureYellow();
+
+ client().admin().indices().prepareCreate("locations").addMapping("location", mapping).execute().actionGet();
+ client().prepareIndex("locations", "location", "1").setCreate(true).setSource(source).execute().actionGet();
+ client().admin().indices().prepareRefresh("locations").execute().actionGet();
+ client().prepareGet("locations", "location", "1").execute().actionGet();
+
+ SearchResponse result = client().prepareSearch("locations")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(FilterBuilders.geoDistanceFilter("pin")
+ .geoDistance(GeoDistance.ARC)
+ .lat(lat).lon(lon)
+ .distance("1m"))
+ .execute().actionGet();
+
+ assertHitCount(result, 1);
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java
new file mode 100644
index 0000000..efb2e13
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java
@@ -0,0 +1,603 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.distance.DistanceUtils;
+import com.spatial4j.core.exception.InvalidShapeException;
+import com.spatial4j.core.shape.Shape;
+import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.query.SpatialArgs;
+import org.apache.lucene.spatial.query.SpatialOperation;
+import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.geo.builders.MultiPolygonBuilder;
+import org.elasticsearch.common.geo.builders.PolygonBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+import java.util.Random;
+import java.util.zip.GZIPInputStream;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeoFilterTests extends ElasticsearchIntegrationTest {
+
+ private static boolean intersectSupport;
+ private static boolean disjointSupport;
+ private static boolean withinSupport;
+
+ @BeforeClass
+ public static void createNodes() throws Exception {
+ intersectSupport = testRelationSupport(SpatialOperation.Intersects);
+ disjointSupport = testRelationSupport(SpatialOperation.IsDisjointTo);
+ withinSupport = testRelationSupport(SpatialOperation.IsWithin);
+ }
+
+ private static byte[] unZipData(String path) throws IOException {
+ InputStream is = Streams.class.getResourceAsStream(path);
+ if (is == null) {
+ throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
+ }
+
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ GZIPInputStream in = new GZIPInputStream(is);
+ Streams.copy(in, out);
+
+ is.close();
+ out.close();
+
+ return out.toByteArray();
+ }
+
+ @Test
+ public void testShapeBuilders() {
+
+ try {
+ // self intersection polygon
+ ShapeBuilder.newPolygon()
+ .point(-10, -10)
+ .point(10, 10)
+ .point(-10, 10)
+ .point(10, -10)
+ .close().build();
+ fail("Self intersection not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+ // polygon with hole
+ ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close().close().build();
+
+ try {
+ // polygon with overlapping hole
+ ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 11).point(5, 11).point(5, -5)
+ .close().close().build();
+
+ fail("Self intersection not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+ try {
+ // polygon with intersection holes
+ ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close()
+ .hole()
+ .point(-5, -6).point(5, -6).point(5, -4).point(-5, -4)
+ .close()
+ .close().build();
+ fail("Intersection of holes not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+ try {
+ // Common line in polygon
+ ShapeBuilder.newPolygon()
+ .point(-10, -10)
+ .point(-10, 10)
+ .point(-5, 10)
+ .point(-5, -5)
+ .point(-5, 20)
+ .point(10, 20)
+ .point(10, -10)
+ .close().build();
+ fail("Self intersection not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+// Not specified
+// try {
+// // two overlapping polygons within a multipolygon
+// ShapeBuilder.newMultiPolygon()
+// .polygon()
+// .point(-10, -10)
+// .point(-10, 10)
+// .point(10, 10)
+// .point(10, -10)
+// .close()
+// .polygon()
+// .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+// .close().build();
+// fail("Polygon intersection not detected";
+// } catch (InvalidShapeException e) {}
+
+ // Multipolygon: polygon with hole and polygon within the whole
+ ShapeBuilder.newMultiPolygon()
+ .polygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close()
+ .close()
+ .polygon()
+ .point(-4, -4).point(-4, 4).point(4, 4).point(4, -4)
+ .close()
+ .build();
+
+// Not supported
+// try {
+// // Multipolygon: polygon with hole and polygon within the hole but overlapping
+// ShapeBuilder.newMultiPolygon()
+// .polygon()
+// .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+// .hole()
+// .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+// .close()
+// .close()
+// .polygon()
+// .point(-4, -4).point(-4, 6).point(4, 6).point(4, -4)
+// .close()
+// .build();
+// fail("Polygon intersection not detected";
+// } catch (InvalidShapeException e) {}
+
+ }
+
+ @Test
+ public void testShapeRelations() throws Exception {
+
+ assertTrue( "Intersect relation is not supported", intersectSupport);
+ assertTrue("Disjoint relation is not supported", disjointSupport);
+ assertTrue("within relation is not supported", withinSupport);
+
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("polygon")
+ .startObject("properties")
+ .startObject("area")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("store", true)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().string();
+
+ CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("shapes").addMapping("polygon", mapping);
+ mappingRequest.execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // Create a multipolygon with two polygons. The first is an rectangle of size 10x10
+ // with a hole of size 5x5 equidistant from all sides. This hole in turn contains
+ // the second polygon of size 4x4 equidistant from all sites
+ MultiPolygonBuilder polygon = ShapeBuilder.newMultiPolygon()
+ .polygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close()
+ .close()
+ .polygon()
+ .point(-4, -4).point(-4, 4).point(4, 4).point(4, -4)
+ .close();
+
+ BytesReference data = jsonBuilder().startObject().field("area", polygon).endObject().bytes();
+
+ client().prepareIndex("shapes", "polygon", "1").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Point in polygon
+ SearchResponse result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(3, 3)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+
+ // Point in polygon hole
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(4.5, 4.5)))
+ .execute().actionGet();
+ assertHitCount(result, 0);
+
+ // by definition the border of a polygon belongs to the inner
+ // so the border of a polygons hole also belongs to the inner
+ // of the polygon NOT the hole
+
+ // Point on polygon border
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(10.0, 5.0)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+
+ // Point on hole border
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(5.0, 2.0)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+
+ if (disjointSupport) {
+ // Point not in polygon
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoDisjointFilter("area", ShapeBuilder.newPoint(3, 3)))
+ .execute().actionGet();
+ assertHitCount(result, 0);
+
+ // Point in polygon hole
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoDisjointFilter("area", ShapeBuilder.newPoint(4.5, 4.5)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+ }
+
+ // Create a polygon that fills the empty area of the polygon defined above
+ PolygonBuilder inverse = ShapeBuilder.newPolygon()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .hole()
+ .point(-4, -4).point(-4, 4).point(4, 4).point(4, -4)
+ .close()
+ .close();
+
+ data = jsonBuilder().startObject().field("area", inverse).endObject().bytes();
+ client().prepareIndex("shapes", "polygon", "2").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // re-check point on polygon hole
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(4.5, 4.5)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("2"));
+
+ // Create Polygon with hole and common edge
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(10, 5).point(10, -5)
+ .close()
+ .close();
+
+ if (withinSupport) {
+ // Polygon WithIn Polygon
+ builder = ShapeBuilder.newPolygon()
+ .point(-30, -30).point(-30, 30).point(30, 30).point(30, -30).close();
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoWithinFilter("area", builder))
+ .execute().actionGet();
+ assertHitCount(result, 2);
+ }
+
+ // Create a polygon crossing longitude 180.
+ builder = ShapeBuilder.newPolygon()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10)
+ .close();
+
+ data = jsonBuilder().startObject().field("area", builder).endObject().bytes();
+ client().prepareIndex("shapes", "polygon", "1").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Create a polygon crossing longitude 180 with hole.
+ builder = ShapeBuilder.newPolygon()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10)
+ .hole().point(175, -5).point(185, -5).point(185, 5).point(175, 5).close()
+ .close();
+
+ data = jsonBuilder().startObject().field("area", builder).endObject().bytes();
+ client().prepareIndex("shapes", "polygon", "1").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(174, -4)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(-174, -4)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(180, -4)))
+ .execute().actionGet();
+ assertHitCount(result, 0);
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(180, -6)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ }
+
+ @Test
+ @Slow
+ public void bulktest() throws Exception {
+ byte[] bulkAction = unZipData("/org/elasticsearch/search/geo/gzippedmap.json");
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("country")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .field("lat_lon", true)
+ .field("store", true)
+ .endObject()
+ .startObject("location")
+ .field("type", "geo_shape")
+ .field("lat_lon", true)
+ .field("store", true)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ client().admin().indices().prepareCreate("countries").addMapping("country", mapping).execute().actionGet();
+ BulkResponse bulk = client().prepareBulk().add(bulkAction, 0, bulkAction.length, false, null, null).execute().actionGet();
+
+ for (BulkItemResponse item : bulk.getItems()) {
+ assertFalse("unable to index data", item.isFailed());
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ String key = "DE";
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("_id", key))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 1);
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.getId(), equalTo(key));
+ }
+
+ SearchResponse world = client().prepareSearch().addField("pin").setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ geoBoundingBoxFilter("pin")
+ .topLeft(90, -179.99999)
+ .bottomRight(-90, 179.99999))
+ ).execute().actionGet();
+
+ assertHitCount(world, 53);
+
+ SearchResponse distance = client().prepareSearch().addField("pin").setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ geoDistanceFilter("pin").distance("425km").point(51.11, 9.851)
+ )).execute().actionGet();
+
+ assertHitCount(distance, 5);
+ GeoPoint point = new GeoPoint();
+ for (SearchHit hit : distance.getHits()) {
+ String name = hit.getId();
+ point.resetFromString(hit.fields().get("pin").getValue().toString());
+ double dist = distance(point.getLat(), point.getLon(), 51.11, 9.851);
+
+ assertThat("distance to '" + name + "'", dist, lessThanOrEqualTo(425000d));
+ assertThat(name, anyOf(equalTo("CZ"), equalTo("DE"), equalTo("BE"), equalTo("NL"), equalTo("LU")));
+ if (key.equals(name)) {
+ assertThat(dist, equalTo(0d));
+ }
+ }
+ }
+
+ @Test
+ public void testGeohashCellFilter() throws IOException {
+ String geohash = randomhash(10);
+ logger.info("Testing geohash_cell filter for [{}]", geohash);
+
+ List<String> neighbors = GeoHashUtils.neighbors(geohash);
+ List<String> parentNeighbors = GeoHashUtils.neighbors(geohash.substring(0, geohash.length() - 1));
+
+ logger.info("Neighbors {}", neighbors);
+ logger.info("Parent Neighbors {}", parentNeighbors);
+
+ ensureYellow();
+
+ client().admin().indices().prepareCreate("locations").addMapping("location", "pin", "type=geo_point,geohash_prefix=true,latlon=false").execute().actionGet();
+
+ // Index a pin
+ client().prepareIndex("locations", "location", "1").setCreate(true).setSource("pin", geohash).execute().actionGet();
+
+ // index neighbors
+ for (int i = 0; i < neighbors.size(); i++) {
+ client().prepareIndex("locations", "location", "N" + i).setCreate(true).setSource("pin", neighbors.get(i)).execute().actionGet();
+ }
+
+ // Index parent cell
+ client().prepareIndex("locations", "location", "p").setCreate(true).setSource("pin", geohash.substring(0, geohash.length() - 1)).execute().actionGet();
+
+ // index neighbors
+ for (int i = 0; i < parentNeighbors.size(); i++) {
+ client().prepareIndex("locations", "location", "p" + i).setCreate(true).setSource("pin", parentNeighbors.get(i)).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh("locations").execute().actionGet();
+
+ // Result of this geohash search should contain the geohash only
+ SearchResponse results1 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter("{\"geohash_cell\": {\"pin\": \"" + geohash + "\", \"neighbors\": false}}").execute().actionGet();
+ assertHitCount(results1, 1);
+
+ // test the same, just with the builder
+ results1 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(geoHashCellFilter("pin", geohash, false)).execute().actionGet();
+ assertHitCount(results1, 1);
+
+ // Result of the parent query should contain the parent it self, its neighbors, the child and all its neighbors
+ SearchResponse results2 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter("{\"geohash_cell\": {\"pin\": \"" + geohash.substring(0, geohash.length() - 1) + "\", \"neighbors\": true}}").execute().actionGet();
+ assertHitCount(results2, 2 + neighbors.size() + parentNeighbors.size());
+
+ // Testing point formats and precision
+ GeoPoint point = GeoHashUtils.decode(geohash);
+ int precision = geohash.length();
+
+ logger.info("Testing lat/lon format");
+ String pointTest1 = "{\"geohash_cell\": {\"pin\": {\"lat\": " + point.lat() + ",\"lon\": " + point.lon() + "},\"precision\": " + precision + ",\"neighbors\": true}}";
+ SearchResponse results3 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(pointTest1).execute().actionGet();
+ assertHitCount(results3, neighbors.size() + 1);
+
+ logger.info("Testing String format");
+ String pointTest2 = "{\"geohash_cell\": {\"pin\": \"" + point.lat() + "," + point.lon() + "\",\"precision\": " + precision + ",\"neighbors\": true}}";
+ SearchResponse results4 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(pointTest2).execute().actionGet();
+ assertHitCount(results4, neighbors.size() + 1);
+
+ logger.info("Testing Array format");
+ String pointTest3 = "{\"geohash_cell\": {\"pin\": [" + point.lon() + "," + point.lat() + "],\"precision\": " + precision + ",\"neighbors\": true}}";
+ SearchResponse results5 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(pointTest3).execute().actionGet();
+ assertHitCount(results5, neighbors.size() + 1);
+ }
+
+ @Test
+ public void testNeighbors() {
+ // Simple root case
+ assertThat(GeoHashUtils.neighbors("7"), containsInAnyOrder("4", "5", "6", "d", "e", "h", "k", "s"));
+
+ // Root cases (Outer cells)
+ assertThat(GeoHashUtils.neighbors("0"), containsInAnyOrder("1", "2", "3", "p", "r"));
+ assertThat(GeoHashUtils.neighbors("b"), containsInAnyOrder("8", "9", "c", "x", "z"));
+ assertThat(GeoHashUtils.neighbors("p"), containsInAnyOrder("n", "q", "r", "0", "2"));
+ assertThat(GeoHashUtils.neighbors("z"), containsInAnyOrder("8", "b", "w", "x", "y"));
+
+ // Root crossing dateline
+ assertThat(GeoHashUtils.neighbors("2"), containsInAnyOrder("0", "1", "3", "8", "9", "p", "r", "x"));
+ assertThat(GeoHashUtils.neighbors("r"), containsInAnyOrder("0", "2", "8", "n", "p", "q", "w", "x"));
+
+ // level1: simple case
+ assertThat(GeoHashUtils.neighbors("dk"), containsInAnyOrder("d5", "d7", "de", "dh", "dj", "dm", "ds", "dt"));
+
+ // Level1: crossing cells
+ assertThat(GeoHashUtils.neighbors("d5"), containsInAnyOrder("d4", "d6", "d7", "dh", "dk", "9f", "9g", "9u"));
+ assertThat(GeoHashUtils.neighbors("d0"), containsInAnyOrder("d1", "d2", "d3", "9b", "9c", "6p", "6r", "3z"));
+ }
+
+ public static double distance(double lat1, double lon1, double lat2, double lon2) {
+ return GeoUtils.EARTH_SEMI_MAJOR_AXIS * DistanceUtils.distHaversineRAD(
+ DistanceUtils.toRadians(lat1),
+ DistanceUtils.toRadians(lon1),
+ DistanceUtils.toRadians(lat2),
+ DistanceUtils.toRadians(lon2)
+ );
+ }
+
+ protected static boolean testRelationSupport(SpatialOperation relation) {
+ try {
+ GeohashPrefixTree tree = new GeohashPrefixTree(SpatialContext.GEO, 3);
+ RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(tree, "area");
+ Shape shape = SpatialContext.GEO.makePoint(0, 0);
+ SpatialArgs args = new SpatialArgs(relation, shape);
+ strategy.makeFilter(args);
+ return true;
+ } catch (UnsupportedSpatialOperation e) {
+ return false;
+ }
+ }
+
+ protected static String randomhash(int length) {
+ return randomhash(getRandom(), length);
+ }
+
+ protected static String randomhash(Random random) {
+ return randomhash(random, 2 + random.nextInt(10));
+ }
+
+ protected static String randomhash() {
+ return randomhash(getRandom());
+ }
+
+ protected static String randomhash(Random random, int length) {
+ final char[] BASE_32 = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'b', 'c', 'd', 'e', 'f', 'g',
+ 'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r',
+ 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'};
+
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < length; i++) {
+ sb.append(BASE_32[random.nextInt(BASE_32.length)]);
+ }
+
+ return sb.toString();
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java b/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java
new file mode 100644
index 0000000..31c8788
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.ShapeRelation;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.geoIntersectionFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+public class GeoShapeIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testNullShape() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "aNullshape").setSource("{\"location\": null}").execute().actionGet();
+ GetResponse result = client().prepareGet("test", "type1", "aNullshape").execute().actionGet();
+ assertThat(result.getField("location"), nullValue());
+ }
+
+ @Test
+ public void testIndexPointsFilterRectangle() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Document 1")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(-30).value(-30).endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Document 2")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(-45).value(-50).endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ refresh();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ ShapeBuilder shape = ShapeBuilder.newEnvelope().topLeft(-45, 45).bottomRight(45, -45);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(),
+ geoIntersectionFilter("location", shape)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(geoShapeQuery("location", shape))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testEdgeCases() throws Exception {
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "blakely").setSource(jsonBuilder().startObject()
+ .field("name", "Blakely Island")
+ .startObject("location")
+ .field("type", "polygon")
+ .startArray("coordinates").startArray()
+ .startArray().value(-122.83).value(48.57).endArray()
+ .startArray().value(-122.77).value(48.56).endArray()
+ .startArray().value(-122.79).value(48.53).endArray()
+ .startArray().value(-122.83).value(48.57).endArray() // close the polygon
+ .endArray().endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ ShapeBuilder query = ShapeBuilder.newEnvelope().topLeft(-122.88, 48.62).bottomRight(-122.82, 48.54);
+
+ // This search would fail if both geoshape indexing and geoshape filtering
+ // used the bottom-level optimization in SpatialPrefixTree#recursiveGetNodes.
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(),
+ geoIntersectionFilter("location", query)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("blakely"));
+ }
+
+ @Test
+ public void testIndexedShapeReference() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Document 1")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(-30).value(-30).endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ refresh();
+
+ ShapeBuilder shape = ShapeBuilder.newEnvelope().topLeft(-45, 45).bottomRight(45, -45);
+ XContentBuilder shapeContent = jsonBuilder().startObject()
+ .field("shape", shape);
+ shapeContent.endObject();
+ createIndex("shapes");
+ ensureGreen();
+ client().prepareIndex("shapes", "shape_type", "Big_Rectangle").setSource(shapeContent).execute().actionGet();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(),
+ geoIntersectionFilter("location", "Big_Rectangle", "shape_type")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(geoShapeQuery("location", "Big_Rectangle", "shape_type"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testReusableBuilder() throws IOException {
+ ShapeBuilder polygon = ShapeBuilder.newPolygon()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10)
+ .hole().point(175, -5).point(185, -5).point(185, 5).point(175, 5).close()
+ .close();
+ assertUnmodified(polygon);
+
+ ShapeBuilder linestring = ShapeBuilder.newLineString()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10);
+ assertUnmodified(linestring);
+ }
+
+ private void assertUnmodified(ShapeBuilder builder) throws IOException {
+ String before = jsonBuilder().startObject().field("area", builder).endObject().string();
+ builder.build();
+ String after = jsonBuilder().startObject().field("area", builder).endObject().string();
+ assertThat(before, equalTo(after));
+ }
+
+ @Test
+ public void testParsingMultipleShapes() throws IOException {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("location1")
+ .field("type", "geo_shape")
+ .endObject()
+ .startObject("location2")
+ .field("type", "geo_shape")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureYellow();
+
+ String p1 = "\"location1\" : {\"type\":\"polygon\", \"coordinates\":[[[-10,-10],[10,-10],[10,10],[-10,10],[-10,-10]]]}";
+ String p2 = "\"location2\" : {\"type\":\"polygon\", \"coordinates\":[[[-20,-20],[20,-20],[20,20],[-20,20],[-20,-20]]]}";
+ String o1 = "{" + p1 + ", " + p2 + "}";
+
+ client().prepareIndex("test", "type1", "1").setSource(o1).execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ String filter = "{\"geo_shape\": {\"location2\": {\"indexed_shape\": {"
+ + "\"id\": \"1\","
+ + "\"type\": \"type1\","
+ + "\"index\": \"test\","
+ + "\"path\": \"location2\""
+ + "}}}}";
+
+ SearchResponse result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(filter).execute().actionGet();
+ assertHitCount(result, 1);
+ }
+
+ @Test
+ public void testShapeFetching_path() throws IOException {
+ prepareCreate("shapes").execute().actionGet();
+ prepareCreate("test").addMapping("type", "location", "type=geo_shape").execute().actionGet();
+ String location = "\"location\" : {\"type\":\"polygon\", \"coordinates\":[[[-10,-10],[10,-10],[10,10],[-10,10],[-10,-10]]]}";
+ client().prepareIndex("shapes", "type", "1")
+ .setSource(
+ String.format(
+ Locale.ROOT, "{ %s, \"1\" : { %s, \"2\" : { %s, \"3\" : { %s } }} }", location, location, location, location
+ )
+ ).get();
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().startObject("location")
+ .field("type", "polygon")
+ .startArray("coordinates").startArray()
+ .startArray().value(-20).value(-20).endArray()
+ .startArray().value(20).value(-20).endArray()
+ .startArray().value(20).value(20).endArray()
+ .startArray().value(-20).value(20).endArray()
+ .startArray().value(-20).value(-20).endArray()
+ .endArray().endArray()
+ .endObject().endObject()).get();
+ client().admin().indices().prepareRefresh("test", "shapes").execute().actionGet();
+
+ GeoShapeFilterBuilder filter = FilterBuilders.geoShapeFilter("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("location");
+ SearchResponse result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertHitCount(result, 1);
+ filter = FilterBuilders.geoShapeFilter("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.location");
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertHitCount(result, 1);
+ filter = FilterBuilders.geoShapeFilter("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.location");
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertHitCount(result, 1);
+ filter = FilterBuilders.geoShapeFilter("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.3.location");
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertHitCount(result, 1);
+
+ // now test the query variant
+ GeoShapeQueryBuilder query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertHitCount(result, 1);
+ query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertHitCount(result, 1);
+ query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertHitCount(result, 1);
+ query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.3.location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertHitCount(result, 1);
+ }
+
+ @Test // Issue 2944
+ public void testThatShapeIsReturnedEvenWhenExclusionsAreSet() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .endObject().endObject()
+ .startObject("_source")
+ .startArray("excludes").value("nonExistingField").endArray()
+ .endObject()
+ .endObject().endObject()
+ .string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Document 1")
+ .startObject("location")
+ .field("type", "envelope")
+ .startArray("coordinates").startArray().value(-45.0).value(45).endArray().startArray().value(45).value(-45).endArray().endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1L));
+
+ Map<String, Object> indexedMap = searchResponse.getHits().getAt(0).sourceAsMap();
+ assertThat(indexedMap.get("location"), instanceOf(Map.class));
+ Map<String, Object> locationMap = (Map<String, Object>) indexedMap.get("location");
+ assertThat(locationMap.get("coordinates"), instanceOf(List.class));
+ List<List<Number>> coordinates = (List<List<Number>>) locationMap.get("coordinates");
+ assertThat(coordinates.size(), equalTo(2));
+ assertThat(coordinates.get(0).size(), equalTo(2));
+ assertThat(coordinates.get(0).get(0).doubleValue(), equalTo(-45.0));
+ assertThat(coordinates.get(0).get(1).doubleValue(), equalTo(45.0));
+ assertThat(coordinates.get(1).size(), equalTo(2));
+ assertThat(coordinates.get(1).get(0).doubleValue(), equalTo(45.0));
+ assertThat(coordinates.get(1).get(1).doubleValue(), equalTo(-45.0));
+ assertThat(locationMap.size(), equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/geo/gzippedmap.json b/src/test/java/org/elasticsearch/search/geo/gzippedmap.json
new file mode 100644
index 0000000..f77bdb8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/gzippedmap.json
Binary files differ
diff --git a/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java
new file mode 100644
index 0000000..f1a0b7c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * total dumb highlighter used to test the pluggable highlighting functionality
+ */
+public class CustomHighlighter implements Highlighter {
+
+ @Override
+ public String[] names() {
+ return new String[] { "test-custom" };
+ }
+
+ @Override
+ public HighlightField highlight(HighlighterContext highlighterContext) {
+ SearchContextHighlight.Field field = highlighterContext.field;
+
+ List<Text> responses = Lists.newArrayList();
+ responses.add(new StringText("standard response"));
+
+ if (field.fieldOptions().options() != null) {
+ for (Map.Entry<String, Object> entry : field.fieldOptions().options().entrySet()) {
+ responses.add(new StringText("field:" + entry.getKey() + ":" + entry.getValue()));
+ }
+ }
+
+ return new HighlightField(highlighterContext.fieldName, responses.toArray(new Text[]{}));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java
new file mode 100644
index 0000000..a3e327b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class CustomHighlighterPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-custom-highlighter";
+ }
+
+ @Override
+ public String description() {
+ return "Custom highlighter to test pluggable implementation";
+ }
+
+ public void onModule(HighlightModule highlightModule) {
+ highlightModule.registerHighlighter(CustomHighlighter.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java
new file mode 100644
index 0000000..a123003
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHighlight;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 1)
+public class CustomHighlighterSearchTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put("plugin.types", CustomHighlighterPlugin.class.getName())
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Before
+ protected void setup() throws Exception{
+ client().prepareIndex("test", "test", "1").setSource(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("name", "arbitrary content")
+ .endObject())
+ .setRefresh(true).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ }
+
+ @Test
+ public void testThatCustomHighlightersAreSupported() throws IOException {
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addHighlightedField("name").setHighlighterType("test-custom")
+ .execute().actionGet();
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response"));
+ }
+
+ @Test
+ public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception {
+ HighlightBuilder.Field highlightConfig = new HighlightBuilder.Field("name");
+ highlightConfig.highlighterType("test-custom");
+ Map<String, Object> options = Maps.newHashMap();
+ options.put("myFieldOption", "someValue");
+ highlightConfig.options(options);
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addHighlightedField(highlightConfig)
+ .execute().actionGet();
+
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response"));
+ assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myFieldOption:someValue"));
+ }
+
+ @Test
+ public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception {
+ Map<String, Object> options = Maps.newHashMap();
+ options.put("myGlobalOption", "someValue");
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setHighlighterOptions(options)
+ .setHighlighterType("test-custom")
+ .addHighlightedField("name")
+ .execute().actionGet();
+
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response"));
+ assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myGlobalOption:someValue"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java
new file mode 100644
index 0000000..c4398d9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java
@@ -0,0 +1,2703 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Iterables;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.MatchQueryBuilder.Operator;
+import org.elasticsearch.index.query.MatchQueryBuilder.Type;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.highlight.HighlightBuilder.Field;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class HighlighterSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // see #3486
+ public void testHighTermFrequencyDoc() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("test", "name", "type=string,term_vector=with_positions_offsets,store=" + (randomBoolean() ? "yes" : "no"))
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1, 5))));
+ ensureYellow();
+ StringBuilder builder = new StringBuilder();
+ for (int i = 0; i < 6000; i++) {
+ builder.append("abc").append(" ");
+ }
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", builder.toString())
+ .get();
+ refresh();
+ SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, startsWith("<em>abc</em> <em>abc</em> <em>abc</em> <em>abc</em>"));
+ }
+
+ @Test
+ public void testNgramHighlightingWithBrokenPositions() throws ElasticsearchException, IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("test", jsonBuilder()
+ .startObject()
+ .startObject("test")
+ .startObject("properties")
+ .startObject("name")
+ .startObject("fields")
+ .startObject("autocomplete")
+ .field("type", "string")
+ .field("index_analyzer", "autocomplete")
+ .field("search_analyzer", "search_autocomplete")
+ .field("term_vector", "with_positions_offsets")
+ .endObject()
+ .startObject("name")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .field("type", "multi_field")
+ .endObject()
+ .endObject()
+ .endObject())
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("analysis.tokenizer.autocomplete.max_gram", 20)
+ .put("analysis.tokenizer.autocomplete.min_gram", 1)
+ .put("analysis.tokenizer.autocomplete.token_chars", "letter,digit")
+ .put("analysis.tokenizer.autocomplete.type", "nGram")
+ .put("analysis.filter.wordDelimiter.type", "word_delimiter")
+ .putArray("analysis.filter.wordDelimiter.type_table",
+ "& => ALPHANUM", "| => ALPHANUM", "! => ALPHANUM",
+ "? => ALPHANUM", ". => ALPHANUM", "- => ALPHANUM", "# => ALPHANUM", "% => ALPHANUM",
+ "+ => ALPHANUM", ", => ALPHANUM", "~ => ALPHANUM", ": => ALPHANUM", "/ => ALPHANUM",
+ "^ => ALPHANUM", "$ => ALPHANUM", "@ => ALPHANUM", ") => ALPHANUM", "( => ALPHANUM",
+ "] => ALPHANUM", "[ => ALPHANUM", "} => ALPHANUM", "{ => ALPHANUM")
+
+ .put("analysis.filter.wordDelimiter.type.split_on_numerics", false)
+ .put("analysis.filter.wordDelimiter.generate_word_parts", true)
+ .put("analysis.filter.wordDelimiter.generate_number_parts", false)
+ .put("analysis.filter.wordDelimiter.catenate_words", true)
+ .put("analysis.filter.wordDelimiter.catenate_numbers", true)
+ .put("analysis.filter.wordDelimiter.catenate_all", false)
+
+ .put("analysis.analyzer.autocomplete.tokenizer", "autocomplete")
+ .putArray("analysis.analyzer.autocomplete.filter", "lowercase", "wordDelimiter")
+ .put("analysis.analyzer.search_autocomplete.tokenizer", "whitespace")
+ .putArray("analysis.analyzer.search_autocomplete.filter", "lowercase", "wordDelimiter")));
+ ensureYellow();
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", "ARCOTEL Hotels Deutschland").get();
+ refresh();
+ SearchResponse search = client().prepareSearch("test").setTypes("test").setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)).addHighlightedField("name.autocomplete").execute().actionGet();
+ assertHighlight(search, 0, "name.autocomplete", 0, equalTo("ARCO<em>TEL</em> Ho<em>tel</em>s <em>Deut</em>schland"));
+ }
+
+ @Test
+ public void testMultiPhraseCutoff() throws ElasticsearchException, IOException {
+ /*
+ * MultiPhraseQuery can literally kill an entire node if there are too many terms in the
+ * query. We cut off and extract terms if there are more than 16 terms in the query
+ */
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "body", "type=string,index_analyzer=custom_analyzer,search_analyzer=custom_analyzer,term_vector=with_positions_offsets")
+ .setSettings(
+ ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("analysis.filter.wordDelimiter.type", "word_delimiter")
+ .put("analysis.filter.wordDelimiter.type.split_on_numerics", false)
+ .put("analysis.filter.wordDelimiter.generate_word_parts", true)
+ .put("analysis.filter.wordDelimiter.generate_number_parts", true)
+ .put("analysis.filter.wordDelimiter.catenate_words", true)
+ .put("analysis.filter.wordDelimiter.catenate_numbers", true)
+ .put("analysis.filter.wordDelimiter.catenate_all", false)
+ .put("analysis.analyzer.custom_analyzer.tokenizer", "whitespace")
+ .putArray("analysis.analyzer.custom_analyzer.filter", "lowercase", "wordDelimiter"))
+ );
+
+ ensureGreen();
+ client().prepareIndex("test", "test", "1")
+ .setSource("body", "Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature")
+ .get();
+ refresh();
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com ").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
+ assertHighlight(search, 0, "body", 0, startsWith("<em>Test: http://www.facebook.com</em>"));
+ search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
+ assertHighlight(search, 0, "body", 0, equalTo("<em>Test</em>: <em>http</em>://<em>www</em>.<em>facebook</em>.<em>com</em> <em>http</em>://<em>elasticsearch</em>.<em>org</em> <em>http</em>://<em>xing</em>.<em>com</em> <em>http</em>://<em>cnn</em>.<em>com</em> <em>http</em>://<em>quora</em>.com"));
+ }
+
+ @Test
+ public void testNgramHighlightingPreLucene42() throws ElasticsearchException, IOException {
+
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("test",
+ "name", "type=string,index_analyzer=name_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets",
+ "name2", "type=string,index_analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("analysis.filter.my_ngram.max_gram", 20)
+ .put("analysis.filter.my_ngram.version", "4.1")
+ .put("analysis.filter.my_ngram.min_gram", 1)
+ .put("analysis.filter.my_ngram.type", "ngram")
+ .put("analysis.tokenizer.my_ngramt.max_gram", 20)
+ .put("analysis.tokenizer.my_ngramt.version", "4.1")
+ .put("analysis.tokenizer.my_ngramt.min_gram", 1)
+ .put("analysis.tokenizer.my_ngramt.type", "ngram")
+ .put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt")
+ .put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace")
+ .putArray("analysis.analyzer.name2_index_analyzer.filter", "lowercase", "my_ngram")
+ .put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace")
+ .put("analysis.analyzer.name_search_analyzer.filter", "lowercase")));
+ ensureYellow();
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", "logicacmg ehemals avinci - the know how company",
+ "name2", "logicacmg ehemals avinci - the know how company").get();
+ client().prepareIndex("test", "test", "2")
+ .setSource("name", "avinci, unilog avinci, logicacmg, logica",
+ "name2", "avinci, unilog avinci, logicacmg, logica").get();
+ refresh();
+
+ SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica m"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"));
+ assertHighlight(search, 1, "name", 0, equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica ma"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"));
+ assertHighlight(search, 1, "name", 0, equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehemals avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica m"))).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"));
+ assertHighlight(search, 1, "name2", 0, equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica ma"))).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"));
+ assertHighlight(search, 1, "name2", 0, equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica"))).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logica</em>cmg ehemals avinci - the know how company"));
+ assertHighlight(search, 1, "name2", 0, equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>"));
+
+ }
+
+ @Test
+ public void testNgramHighlighting() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("test",
+ "name", "type=string,index_analyzer=name_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets",
+ "name2", "type=string,index_analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("analysis.filter.my_ngram.max_gram", 20)
+ .put("analysis.filter.my_ngram.min_gram", 1)
+ .put("analysis.filter.my_ngram.type", "ngram")
+ .put("analysis.tokenizer.my_ngramt.max_gram", 20)
+ .put("analysis.tokenizer.my_ngramt.min_gram", 1)
+ .put("analysis.tokenizer.my_ngramt.token_chars", "letter,digit")
+ .put("analysis.tokenizer.my_ngramt.type", "ngram")
+ .put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt")
+ .put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace")
+ .put("analysis.analyzer.name2_index_analyzer.filter", "my_ngram")
+ .put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace")));
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", "logicacmg ehemals avinci - the know how company",
+ "name2", "logicacmg ehemals avinci - the know how company").get();
+ refresh();
+ ensureGreen();
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("name", "logica m")).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name", "logica ma")).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name", "logica")).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehemals avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name2", "logica m")).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how <em>company</em>"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name2", "logica ma")).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name2", "logica")).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> ehemals avinci - the know how company"));
+ }
+
+ @Test
+ public void testEnsureNoNegativeOffsets() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1",
+ "no_long_term", "type=string,term_vector=with_positions_offsets",
+ "long_term", "type=string,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource("no_long_term", "This is a test where foo is highlighed and should be highlighted",
+ "long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed and should be highlighted")
+ .get();
+ refresh();
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed"))
+ .addHighlightedField("long_term", 18, 1)
+ .get();
+ assertHighlight(search, 0, "long_term", 0, 1, equalTo("<em>thisisaverylongwordandmakessurethisfails</em>"));
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("no_long_term", "test foo highlighed").type(Type.PHRASE).slop(3))
+ .addHighlightedField("no_long_term", 18, 1).setHighlighterPostTags("</b>").setHighlighterPreTags("<b>")
+ .get();
+ assertNotHighlighted(search, 0, "no_long_term");
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("no_long_term", "test foo highlighed").type(Type.PHRASE).slop(3))
+ .addHighlightedField("no_long_term", 30, 1).setHighlighterPostTags("</b>").setHighlighterPreTags("<b>")
+ .get();
+
+ assertHighlight(search, 0, "no_long_term", 0, 1, equalTo("a <b>test</b> where <b>foo</b> is <b>highlighed</b> and"));
+ }
+
+ @Test
+ public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ // we don't store title and don't use term vector, now lets see if it works...
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "no").endObject()
+ .startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("term_vector", "no").endObject().endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .field("title", "This is a test on the highlighting bug present in elasticsearch")
+ .startArray("attachments").startObject().field("body", "attachment 1").endObject().startObject().field("body", "attachment 2").endObject().endArray()
+ .endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 0)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("attachments.body", "attachment"))
+ .addHighlightedField("attachments.body", -1, 0)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> 1"));
+ assertHighlight(search, i, "attachments.body", 1, equalTo("<em>attachment</em> 2"));
+ }
+ }
+
+ @Test
+ public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ // we don't store title, now lets see if it works...
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").endObject()
+ .startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").endObject().endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .field("title", "This is a test on the highlighting bug present in elasticsearch")
+ .startArray("attachments").startObject().field("body", "attachment 1").endObject().startObject().field("body", "attachment 2").endObject().endArray()
+ .endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 0)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("attachments.body", "attachment"))
+ .addHighlightedField("attachments.body", -1, 2)
+ .execute().get();
+
+ for (int i = 0; i < 5; i++) {
+ assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> 1"));
+ assertHighlight(search, i, "attachments.body", 1, equalTo("<em>attachment</em> 2"));
+ }
+ }
+
+ @Test
+ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ // we don't store title, now lets see if it works...
+ .startObject("title").field("type", "string").field("store", "no").field("index_options", "offsets").endObject()
+ .startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("index_options", "offsets").endObject().endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .array("title", "This is a test on the highlighting bug present in elasticsearch. Hopefully it works.",
+ "This is the second bug to perform highlighting on.")
+ .startArray("attachments").startObject().field("body", "attachment for this test").endObject().startObject().field("body", "attachment 2").endObject().endArray()
+ .endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ //asking for the whole field to be highlighted
+ .addHighlightedField("title", -1, 0).get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch. Hopefully it works."));
+ assertHighlight(search, i, "title", 1, 2, equalTo("This is the second <em>bug</em> to perform highlighting on."));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ //sentences will be generated out of each value
+ .addHighlightedField("title").get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch."));
+ assertHighlight(search, i, "title", 1, 2, equalTo("This is the second <em>bug</em> to perform highlighting on."));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("attachments.body", "attachment"))
+ .addHighlightedField("attachments.body", -1, 2)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> for this test"));
+ assertHighlight(search, i, "attachments.body", 1, 2, equalTo("<em>attachment</em> 2"));
+ }
+ }
+
+ @Test
+ public void testHighlightIssue1994() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=no", "titleTV", "type=string,store=no,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ indexRandom(false, client().prepareIndex("test", "type1", "1")
+ .setSource("title", new String[]{"This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us"},
+ "titleTV", new String[]{"This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us"}));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "2")
+ .setSource("titleTV", new String[]{"some text to highlight", "highlight other text"}));
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 2)
+ .addHighlightedField("titleTV", -1, 2)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ assertHighlight(search, 0, "title", 1, 2, equalTo("The <em>bug</em> is bugging us"));
+ assertHighlight(search, 0, "titleTV", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ assertHighlight(search, 0, "titleTV", 1, 2, equalTo("The <em>bug</em> is bugging us"));
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("titleTV", "highlight"))
+ .addHighlightedField("titleTV", -1, 2)
+ .get();
+
+ assertHighlight(search, 0, "titleTV", 0, equalTo("some text to <em>highlight</em>"));
+ assertHighlight(search, 0, "titleTV", 1, 2, equalTo("<em>highlight</em> other text"));
+ }
+
+ @Test
+ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "this is another test").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1 and field2 produces different tags");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().order("score").preTags("<global>").postTags("</global>")
+ .field(new HighlightBuilder.Field("field1"))
+ .field(new HighlightBuilder.Field("field2").preTags("<field2>").postTags("</field2>")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <global>test</global>"));
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("this is another <field2>test</field2>"));
+ }
+
+ @Test //https://github.com/elasticsearch/elasticsearch/issues/5175
+ public void testHighlightingOnWildcardFields() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1",
+ "field-postings", "type=string,index_options=offsets",
+ "field-fvh", "type=string,term_vector=with_positions_offsets",
+ "field-plain", "type=string"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field-postings", "This is the first test sentence. Here is the second one.",
+ "field-fvh", "This is the test with term_vectors",
+ "field-plain", "This is the test for the plain highlighter").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field*");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field-plain", "test"))
+ .highlight(highlight().field("field*").preTags("<xxx>").postTags("</xxx>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field-postings", 0, 1, equalTo("This is the first <xxx>test</xxx> sentence."));
+ assertHighlight(searchResponse, 0, "field-fvh", 0, 1, equalTo("This is the <xxx>test</xxx> with term_vectors"));
+ assertHighlight(searchResponse, 0, "field-plain", 0, 1, equalTo("This is the <xxx>test</xxx> for the plain highlighter"));
+ }
+
+ @Test
+ public void testForceSourceWithSourceDisabled() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ //just to make sure that we hit the stored fields rather than the _source
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").field("index_options", "offsets")
+ .field("term_vector", "with_positions_offsets").endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "The quick brown fox jumps over the lazy dog", "field2", "second field content").get();
+ refresh();
+
+ //works using stored field
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>"))
+ .get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("plain").forceSource(true))
+ .get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("fvh").forceSource(true))
+ .get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("postings").forceSource(true))
+ .get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ SearchSourceBuilder searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
+ .highlight(highlight().forceSource(true).field("field1"));
+ searchResponse = client().search(Requests.searchRequest("test").source(searchSource)).get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
+ .highlight(highlight().forceSource(true).field("field*"));
+ searchResponse = client().search(Requests.searchRequest("test").source(searchSource)).get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), matches("source is forced for fields \\[field\\d, field\\d\\] but type \\[type1\\] has disabled _source"));
+ }
+
+ @Test
+ public void testPlainHighlighter() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field1");
+ source = searchSource()
+ .query(termQuery("_all", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(termQuery("_all", "quick"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(prefixQuery("_all", "qui"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all with constant score, highlighting on field2");
+ source = searchSource()
+ .query(constantScoreQuery(prefixQuery("_all", "qui")))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all with constant score, highlighting on field2");
+ source = searchSource()
+ .query(boolQuery().should(constantScoreQuery(prefixQuery("_all", "qui"))))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testFastVectorHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field1");
+ source = searchSource()
+ .query(termQuery("_all", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(termQuery("_all", "quick"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(prefixQuery("_all", "qui"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+ }
+
+ /**
+ * The FHV can spend a long time highlighting degenerate documents if phraseLimit is not set.
+ */
+ @Test(timeout=120000)
+ public void testFVHManyMatches() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ // Index one megabyte of "t " over and over and over again
+ client().prepareIndex("test", "type1")
+ .setSource("field1", Joiner.on("").join(Iterables.limit(Iterables.cycle("t "), 1024*256))).get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "t"))
+ .highlight(highlight().highlighterType("fvh").field("field1", 20, 1).order("score").preTags("<xxx>").postTags("</xxx>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, containsString("<xxx>t</xxx>"));
+ logger.info("--> done");
+ }
+
+
+ @Test
+ public void testMatchedFieldsFvhRequireFieldMatch() throws Exception {
+ checkMatchedFieldsCase(true);
+ }
+
+ @Test
+ public void testMatchedFieldsFvhNoRequireFieldMatch() throws Exception {
+ checkMatchedFieldsCase(false);
+ }
+
+ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("store", "yes")
+ .field("analyzer", "english")
+ .endObject()
+ .startObject("plain")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("bar")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("bar")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("store", "yes")
+ .field("analyzer", "english")
+ .endObject()
+ .startObject("plain")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureGreen();
+
+ index("test", "type1", "1",
+ "foo", "running with scissors");
+ index("test", "type1", "2",
+ "foo", "cat cat junk junk junk junk junk junk junk cats junk junk",
+ "bar", "cat cat junk junk junk junk junk junk junk cats junk junk");
+ index("test", "type1", "3",
+ "foo", "weird",
+ "bar", "result");
+ refresh();
+
+ Field fooField = new Field("foo").numOfFragments(1).order("score").fragmentSize(25)
+ .highlighterType("fvh").requireFieldMatch(requireFieldMatch);
+ Field barField = new Field("bar").numOfFragments(1).order("score").fragmentSize(25)
+ .highlighterType("fvh").requireFieldMatch(requireFieldMatch);
+ SearchRequestBuilder req = client().prepareSearch("test").addHighlightedField(fooField);
+
+ // First check highlighting without any matched fields set
+ SearchResponse resp = req.setQuery(queryString("running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // And that matching a subfield doesn't automatically highlight it
+ resp = req.setQuery(queryString("foo.plain:running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("running with <em>scissors</em>"));
+
+ // Add the subfield to the list of matched fields but don't match it. Everything should still work
+ // like before we added it.
+ fooField.matchedFields("foo", "foo.plain");
+ resp = req.setQuery(queryString("running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Now make half the matches come from the stored field and half from just a matched field.
+ resp = req.setQuery(queryString("foo.plain:running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Now remove the stored field from the matched field list. That should work too.
+ fooField.matchedFields("foo.plain");
+ resp = req.setQuery(queryString("foo.plain:running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with scissors"));
+
+ // Now make sure boosted fields don't blow up when matched fields is both the subfield and stored field.
+ fooField.matchedFields("foo", "foo.plain");
+ resp = req.setQuery(queryString("foo.plain:running^5 scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Now just all matches are against the matched field. This still returns highlighting.
+ resp = req.setQuery(queryString("foo.plain:running foo.plain:scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // And all matched field via the queryString's field parameter, just in case
+ resp = req.setQuery(queryString("running scissors").field("foo.plain")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Finding the same string two ways is ok too
+ resp = req.setQuery(queryString("run foo.plain:running^5 scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // But we use the best found score when sorting fragments
+ resp = req.setQuery(queryString("cats foo.plain:cats^5").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+
+ // which can also be written by searching on the subfield
+ resp = req.setQuery(queryString("cats").field("foo").field("foo.plain^5")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+
+ // Speaking of two fields, you can have two fields, only one of which has matchedFields enabled
+ QueryBuilder twoFieldsQuery = queryString("cats").field("foo").field("foo.plain^5")
+ .field("bar").field("bar.plain^5");
+ resp = req.setQuery(twoFieldsQuery).addHighlightedField(barField).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("<em>cat</em> <em>cat</em> junk junk junk junk"));
+
+ // And you can enable matchedField highlighting on both
+ barField.matchedFields("bar", "bar.plain");
+ resp = req.get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("junk junk <em>cats</em> junk junk"));
+
+ // Setting a matchedField that isn't searched/doesn't exist is simply ignored.
+ barField.matchedFields("bar", "candy");
+ resp = req.get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("<em>cat</em> <em>cat</em> junk junk junk junk"));
+
+ // If the stored field doesn't have a value it doesn't matter what you match, you get nothing.
+ barField.matchedFields("bar", "foo.plain");
+ resp = req.setQuery(queryString("running scissors").field("foo.plain").field("bar")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+ assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar")));
+
+ // If the stored field is found but the matched field isn't then you don't get a result either.
+ fooField.matchedFields("bar.plain");
+ resp = req.setQuery(queryString("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("foo")));
+
+ // But if you add the stored field to the list of matched fields then you'll get a result again
+ fooField.matchedFields("foo", "bar.plain");
+ resp = req.setQuery(queryString("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+ assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar")));
+
+ // You _can_ highlight fields that aren't subfields of one another.
+ resp = req.setQuery(queryString("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>weird</em>"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("<em>resul</em>t"));
+
+ //But be careful. It'll blow up if there is a result paste the end of the field.
+ resp = req.setQuery(queryString("result").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertThat("Expected ShardFailures", resp.getShardFailures().length, greaterThan(0));
+ }
+
+ @Test
+ @Slow
+ public void testFastVectorHighlighterManyDocs() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ int COUNT = between(20, 100);
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT];
+ for (int i = 0; i < COUNT; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "test " + i);
+ }
+ logger.info("--> indexing docs");
+ indexRandom(true, indexRequestBuilders);
+
+ logger.info("--> searching explicitly on field1 and highlighting on it");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1", 100, 0)
+ .get();
+ for (int i = 0; i < COUNT; i++) {
+ SearchHit hit = searchResponse.getHits().getHits()[i];
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, i, "field1", 0, 1, equalTo("<em>test</em> " + hit.id()));
+ }
+
+ logger.info("--> searching explicitly on field1 and highlighting on it, with DFS");
+ searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1", 100, 0)
+ .get();
+ for (int i = 0; i < COUNT; i++) {
+ SearchHit hit = searchResponse.getHits().getHits()[i];
+ assertHighlight(searchResponse, i, "field1", 0, 1, equalTo("<em>test</em> " + hit.id()));
+ }
+
+ logger.info("--> searching explicitly _all and highlighting on _all");
+ searchResponse = client().prepareSearch()
+ .setSize(COUNT)
+ .setQuery(termQuery("_all", "test"))
+ .addHighlightedField("_all", 100, 0)
+ .get();
+ for (int i = 0; i < COUNT; i++) {
+ SearchHit hit = searchResponse.getHits().getHits()[i];
+ assertHighlight(searchResponse, i, "_all", 0, 1, equalTo("<em>test</em> " + hit.id() + " "));
+ }
+ }
+
+ public XContentBuilder type1TermVectorMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("termVector", "with_positions_offsets").endObject()
+ .startObject("field2").field("type", "string").field("termVector", "with_positions_offsets").endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ @Test
+ public void testSameContent() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test on the highlighting bug present in elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 0)
+ .get();
+
+ for (int i = 0; i < 5; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testFastVectorHighlighterOffsetParameter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets").get());
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test on the highlighting bug present in elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", 30, 1, 10)
+ .get();
+
+ for (int i = 0; i < 5; i++) {
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(search, i, "title", 0, 1, equalTo("highlighting <em>bug</em> present in elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testEscapeHtml() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1, 10)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a html escaping highlighting <em>test</em> for *&amp;? elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testEscapeHtml_vector() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 30, 1, 10)
+ .get();
+
+ for (int i = 0; i < 5; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("highlighting <em>test</em> for *&amp;? elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testMultiMapperVectorWithStore() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "yes").field("term_vector", "with_positions_offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "yes").field("term_vector", "with_positions_offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testMultiMapperVectorFromSource() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title.key
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testMultiMapperNoVectorWithStore() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "yes").field("term_vector", "no").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "yes").field("term_vector", "no").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testMultiMapperNoVectorFromSource() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "no").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "no").field("term_vector", "no").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title.key
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=no"));
+ ensureGreen();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test for the enabling fast vector highlighter");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "this is a test"))
+ .addHighlightedField("title", 50, 1, 10)
+ .get();
+ assertNoFailures(search);
+
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "this is a test"))
+ .addHighlightedField("title", 50, 1, 10)
+ .setHighlighterType("fast-vector-highlighter")
+ .execute().actionGet();
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with term vector with position offsets to be used with fast vector highlighter"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "this is a test"))
+ .addHighlightedField("tit*", 50, 1, 10)
+ .setHighlighterType("fast-vector-highlighter")
+ .execute().actionGet();
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with term vector with position offsets to be used with fast vector highlighter"));
+ }
+ }
+
+ @Test
+ public void testDisableFastVectorHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets,analyzer=classic"));
+ ensureGreen();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "test for the workaround"))
+ .addHighlightedField("title", 50, 1, 10)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ // Because of SOLR-3724 nothing is highlighted when FVH is used
+ assertNotHighlighted(search, i, "title");
+ }
+
+ // Using plain highlighter instead of FVH
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "test for the workaround"))
+ .addHighlightedField("title", 50, 1, 10)
+ .setHighlighterType("highlighter")
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a <em>test</em> for the <em>workaround</em> for the fast vector highlighting SOLR-3724"));
+ }
+
+ // Using plain highlighter instead of FVH on the field level
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "test for the workaround"))
+ .addHighlightedField(new HighlightBuilder.Field("title").highlighterType("highlighter"))
+ .setHighlighterType("highlighter")
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a <em>test</em> for the <em>workaround</em> for the fast vector highlighting SOLR-3724"));
+ }
+ }
+
+ @Test
+ public void testFSHHighlightAllMvFragments() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", "tags", "type=string,term_vector=with_positions_offsets"));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1")
+ .setSource("tags", new String[]{
+ "this is a really long tag i would like to highlight",
+ "here is another one that is very long and has the tag token near the end"}).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "tag"))
+ .addHighlightedField("tags", -1, 0).get();
+
+ assertHighlight(response, 0, "tags", 0, equalTo("this is a really long <em>tag</em> i would like to highlight"));
+ assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long and has the <em>tag</em> token near the end"));
+ }
+
+ @Test
+ public void testBoostingQuery() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testBoostingQueryTermVector() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog")
+ .get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(
+ searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testCommonTermsQuery() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog")
+ .get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(commonTerms("field2", "quick brown").cutoffFrequency(100))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testCommonTermsTermVector() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(commonTerms("field2", "quick brown").cutoffFrequency(100)).from(0).size(60)
+ .explain(true).highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(
+ searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testPhrasePrefix() throws ElasticsearchException, IOException {
+ Builder builder = ImmutableSettings.builder();
+ builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
+ builder.putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
+ builder.put("index.analysis.filter.synonym.type", "synonym");
+ builder.putArray("index.analysis.filter.synonym.synonyms", "quick => fast");
+
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(builder.build()).addMapping("type1", type1TermVectorMapping())
+ .addMapping("type2", "_all", "store=yes,termVector=with_positions_offsets",
+ "field4", "type=string,term_vector=with_positions_offsets,analyzer=synonym",
+ "field3", "type=string,analyzer=synonym"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "0")
+ .setSource("field0", "The quick brown fox jumps over the lazy dog", "field1", "The quick brown fox jumps over the lazy dog").get();
+ client().prepareIndex("test", "type1", "1")
+ .setSource("field1", "The quick browse button is a fancy thing, right bro?").get();
+ refresh();
+ logger.info("--> highlighting and searching on field0");
+ SearchSourceBuilder source = searchSource()
+ .query(matchPhrasePrefixQuery("field0", "quick bro"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field0").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+
+ logger.info("--> highlighting and searching on field1");
+ source = searchSource()
+ .query(matchPhrasePrefixQuery("field1", "quick bro"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1").order("score").preTags("<x>").postTags("</x>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <x>quick browse</x> button is a fancy thing, right bro?"));
+ assertHighlight(searchResponse, 1, "field1", 0, 1, equalTo("The <x>quick brown</x> fox jumps over the lazy dog"));
+
+ // with synonyms
+ client().prepareIndex("test", "type2", "0")
+ .setSource("field4", "The quick brown fox jumps over the lazy dog", "field3", "The quick brown fox jumps over the lazy dog").get();
+ client().prepareIndex("test", "type2", "1")
+ .setSource("field4", "The quick browse button is a fancy thing, right bro?").get();
+ client().prepareIndex("test", "type2", "2")
+ .setSource("field4", "a quick fast blue car").get();
+ refresh();
+
+ source = searchSource().postFilter(typeFilter("type2")).query(matchPhrasePrefixQuery("field3", "fast bro")).from(0).size(60).explain(true)
+ .highlight(highlight().field("field3").order("score").preTags("<x>").postTags("</x>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field3", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+
+ logger.info("--> highlighting and searching on field4");
+ source = searchSource().postFilter(typeFilter("type2")).query(matchPhrasePrefixQuery("field4", "the fast bro")).from(0).size(60).explain(true)
+ .highlight(highlight().field("field4").order("score").preTags("<x>").postTags("</x>"));
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field4", 0, 1, equalTo("<x>The quick browse</x> button is a fancy thing, right bro?"));
+ assertHighlight(searchResponse, 1, "field4", 0, 1, equalTo("<x>The quick brown</x> fox jumps over the lazy dog"));
+
+ logger.info("--> highlighting and searching on field4");
+ source = searchSource().postFilter(typeFilter("type2")).query(matchPhrasePrefixQuery("field4", "a fast quick blue ca")).from(0).size(60).explain(true)
+ .highlight(highlight().field("field4").order("score").preTags("<x>").postTags("</x>"));
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field4", 0, 1, equalTo("<x>a quick fast blue car</x>"));
+ }
+
+ @Test
+ public void testPlainHighlightDifferentFragmenter() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", "tags", "type=string"));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("tags",
+ "this is a really long tag i would like to highlight",
+ "here is another one that is very long tag and has the tag token near the end").endObject()).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
+ .addHighlightedField(new HighlightBuilder.Field("tags")
+ .fragmentSize(-1).numOfFragments(2).fragmenter("simple")).get();
+
+ assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
+ assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
+
+ response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
+ .addHighlightedField(new HighlightBuilder.Field("tags")
+ .fragmentSize(-1).numOfFragments(2).fragmenter("span")).get();
+
+ assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
+ assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
+ .addHighlightedField(new HighlightBuilder.Field("tags")
+ .fragmentSize(-1).numOfFragments(2).fragmenter("invalid")).get();
+ fail("Shouldn't get here");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+ }
+
+ @Test
+ public void testPlainHighlighterMultipleFields() {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field1", "fox"))
+ .addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
+ .addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
+ .get();
+ assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>"));
+ assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>"));
+ }
+
+ @Test
+ public void testFastVectorHighlighterMultipleFields() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,term_vectors=with_positions_offsets", "field2", "type=string,term_vectors=with_positions_offsets"));
+ ensureGreen();
+
+ index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field1", "fox"))
+ .addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
+ .addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
+ .get();
+ assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>"));
+ assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>"));
+ }
+
+ @Test
+ public void testMissingStoredField() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", "highlight_field", "type=string,store=yes"));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject()
+ .field("field", "highlight")
+ .endObject()).get();
+ refresh();
+
+ // This query used to fail when the field to highlight was absent
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field", "highlight").type(MatchQueryBuilder.Type.BOOLEAN))
+ .addHighlightedField(new HighlightBuilder.Field("highlight_field")
+ .fragmentSize(-1).numOfFragments(1).fragmenter("simple")).get();
+ assertThat(response.getHits().hits()[0].highlightFields().isEmpty(), equalTo(true));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/3211
+ public void testNumericHighlighting() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "text", "type=string,index=analyzed",
+ "byte", "type=byte", "short", "type=short", "int", "type=integer", "long", "type=long",
+ "float", "type=float", "double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("text", "elasticsearch test",
+ "byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQueryBuilder.Type.BOOLEAN))
+ .addHighlightedField("text")
+ .addHighlightedField("byte")
+ .addHighlightedField("short")
+ .addHighlightedField("int")
+ .addHighlightedField("long")
+ .addHighlightedField("float")
+ .addHighlightedField("double")
+ .get();
+ // Highlighting of numeric fields is not supported, but it should not raise errors
+ // (this behavior is consistent with version 0.20)
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/3200
+ public void testResetTwice() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(ImmutableSettings.builder()
+ .put("analysis.analyzer.my_analyzer.type", "pattern")
+ .put("analysis.analyzer.my_analyzer.pattern", "\\s+")
+ .build())
+ .addMapping("type", "text", "type=string,analyzer=my_analyzer"));
+ ensureGreen();
+ client().prepareIndex("test", "type", "1")
+ .setSource("text", "elasticsearch test").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQueryBuilder.Type.BOOLEAN))
+ .addHighlightedField("text").execute().actionGet();
+ // PatternAnalyzer will throw an exception if it is resetted twice
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ public void testHighlightUsesHighlightQuery() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ index("test", "type1", "1", "text", "Testing the highlight query feature");
+ refresh();
+
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text");
+
+ SearchRequestBuilder search = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("text", "testing"))
+ .addHighlightedField(field);
+ Matcher<String> searchQueryMatcher = equalTo("<em>Testing</em> the highlight query feature");
+
+ field.highlighterType("plain");
+ SearchResponse response = search.get();
+ assertHighlight(response, 0, "text", 0, searchQueryMatcher);
+ field.highlighterType("fvh");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, searchQueryMatcher);
+ field.highlighterType("postings");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, searchQueryMatcher);
+
+
+ Matcher<String> hlQueryMatcher = equalTo("Testing the highlight <em>query</em> feature");
+ field.highlightQuery(matchQuery("text", "query"));
+
+ field.highlighterType("fvh");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("plain");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("postings");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ // Make sure the the highlightQuery is taken into account when it is set on the highlight context instead of the field
+ search.setHighlighterQuery(matchQuery("text", "query"));
+ field.highlighterType("fvh").highlightQuery(null);
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("plain");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("postings");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+ }
+
+ private static String randomStoreField() {
+ if (randomBoolean()) {
+ return "store=yes,";
+ }
+ return "";
+ }
+
+ @Test
+ public void testHighlightNoMatchSize() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ String text = "I am pretty long so some of me should get cut off. Second sentence";
+ index("test", "type1", "1", "text", text);
+ refresh();
+
+ // When you don't set noMatchSize you don't get any results if there isn't anything to highlight.
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text")
+ .fragmentSize(21)
+ .numOfFragments(1)
+ .highlighterType("plain");
+ SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // When noMatchSize is set to 0 you also shouldn't get any
+ field.highlighterType("plain").noMatchSize(0);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // When noMatchSize is between 0 and the size of the string
+ field.highlighterType("plain").noMatchSize(21);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
+
+ // The FVH also works but the fragment is longer than the plain highlighter because of boundary_max_scan
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
+
+ // Postings hl also works but the fragment is the whole first sentence (size ignored)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // We can also ask for a fragment longer than the input string and get the whole string
+ field.highlighterType("plain").noMatchSize(text.length() * 2);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ //no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // We can also ask for a fragment exactly the size of the input field and get the whole field
+ field.highlighterType("plain").noMatchSize(text.length());
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ //no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // You can set noMatchSize globally in the highlighter as well
+ field.highlighterType("plain").noMatchSize(null);
+ response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // We don't break if noMatchSize is less than zero though
+ field.highlighterType("plain").noMatchSize(randomIntBetween(Integer.MIN_VALUE, -1));
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+ }
+
+ @Test
+ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ String text1 = "I am pretty long so some of me should get cut off. We'll see how that goes.";
+ String text2 = "I am short";
+ index("test", "type1", "1", "text", new String[] {text1, text2});
+ refresh();
+
+ // The no match fragment should come from the first value of a multi-valued field
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text")
+ .fragmentSize(21)
+ .numOfFragments(1)
+ .highlighterType("plain")
+ .noMatchSize(21);
+ SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
+
+ // Postings hl also works but the fragment is the whole first sentence (size ignored)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // And noMatchSize returns nothing when the first entry is empty string!
+ index("test", "type1", "2", "text", new String[] {"", text2});
+ refresh();
+
+ IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("2");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // But if the field was actually empty then you should get no highlighting field
+ index("test", "type1", "3", "text", new String[] {});
+ refresh();
+ idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("3");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // Same for if the field doesn't even exist on the document
+ index("test", "type1", "4");
+ refresh();
+
+ idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("4");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "postings");
+
+ // Again same if the field isn't mapped
+ field = new HighlightBuilder.Field("unmapped")
+ .highlighterType("plain")
+ .noMatchSize(21);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+ }
+
+ @Test
+ public void testHighlightNoMatchSizeNumberOfFragments() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ String text1 = "This is the first sentence. This is the second sentence.";
+ String text2 = "This is the third sentence. This is the fourth sentence.";
+ String text3 = "This is the fifth sentence";
+ index("test", "type1", "1", "text", new String[] {text1, text2, text3});
+ refresh();
+
+ // The no match fragment should come from the first value of a multi-valued field
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text")
+ .fragmentSize(1)
+ .numOfFragments(0)
+ .highlighterType("plain")
+ .noMatchSize(20);
+ SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence"));
+
+ // Postings hl also works but the fragment is the whole first sentence (size ignored)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence."));
+
+ //if there's a match we only return the values with matches (whole value as number_of_fragments == 0)
+ MatchQueryBuilder queryBuilder = QueryBuilders.matchQuery("text", "third fifth");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
+ assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
+ assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
+ assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
+ }
+
+ @Test
+ public void testPostingsHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .highlight(highlight().field("field1").preTags("<xxx>").postTags("</xxx>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field1");
+ source = searchSource()
+ .query(termQuery("_all", "test"))
+ .highlight(highlight().field("field1").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(termQuery("_all", "quick"))
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy <xxx>quick</xxx> dog"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(matchPhraseQuery("_all", "quick brown"))
+ .highlight(highlight().field("field2").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ //phrase query results in highlighting all different terms regardless of their positions
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy <xxx>quick</xxx> dog"));
+
+ //lets fall back to the standard highlighter then, what people would do to highlight query matches
+ logger.info("--> searching on _all, highlighting on field2, falling back to the plain highlighter");
+ source = searchSource()
+ .query(matchPhraseQuery("_all", "quick brown"))
+ .highlight(highlight().field("field2").preTags("<xxx>").postTags("</xxx>").highlighterType("highlighter"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy quick dog"));
+ }
+
+ @Test
+ public void testPostingsHighlighterMultipleFields() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()).get());
+ ensureGreen();
+
+ index("test", "type1", "1", "field1", "The <b>quick<b> brown fox. Second sentence.", "field2", "The <b>slow<b> brown fox. Second sentence.");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field1", "fox"))
+ .addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
+ .addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
+ .get();
+ assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>."));
+ assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>."));
+ }
+
+ @Test
+ public void testPostingsHighlighterNumberOfFragments() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource("field1", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.",
+ "field2", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").numOfFragments(5).preTags("<field1>").postTags("</field1>"))
+ .field(new HighlightBuilder.Field("field2").numOfFragments(2).preTags("<field2>").postTags("</field2>")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+
+ assertHighlight(searchResponse, 0, "field2", 0, equalTo("The quick brown <field2>fox</field2> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field2", 1, 2, equalTo("The lazy red <field2>fox</field2> jumps over the quick dog."));
+
+ client().prepareIndex("test", "type1", "2")
+ .setSource("field1", new String[]{"The quick brown fox jumps over the lazy dog. Second sentence not finished", "The lazy red fox jumps over the quick dog.", "The quick brown dog jumps over the lazy fox."}).get();
+ refresh();
+
+ source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").numOfFragments(0).preTags("<field1>").postTags("</field1>")));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHitCount(searchResponse, 2l);
+
+ for (SearchHit searchHit : searchResponse.getHits()) {
+ if ("1".equals(searchHit.id())) {
+ assertHighlight(searchHit, "field1", 0, 1, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog. The lazy red <field1>fox</field1> jumps over the quick dog. The quick brown dog jumps over the lazy <field1>fox</field1>."));
+ } else if ("2".equals(searchHit.id())) {
+ assertHighlight(searchHit, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog. Second sentence not finished"));
+ assertHighlight(searchHit, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+ } else {
+ fail("Only hits with id 1 and 2 are returned");
+ }
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterRequireFieldMatch() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.",
+ "field2", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").requireFieldMatch(true).preTags("<field1>").postTags("</field1>"))
+ .field(new HighlightBuilder.Field("field2").requireFieldMatch(true).preTags("<field2>").postTags("</field2>")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ //field2 is not returned highlighted because of the require field match option set to true
+ assertNotHighlighted(searchResponse, 0, "field2");
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+
+ logger.info("--> highlighting and searching on field1 and field2 - require field match set to false");
+ source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").requireFieldMatch(false).preTags("<field1>").postTags("</field1>"))
+ .field(new HighlightBuilder.Field("field2").requireFieldMatch(false).preTags("<field2>").postTags("</field2>")));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+
+ //field2 is now returned highlighted thanks to require_field_match set to false
+ assertHighlight(searchResponse, 0, "field2", 0, equalTo("The quick brown <field2>fox</field2> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field2", 1, equalTo("The lazy red <field2>fox</field2> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field2", 2, 3, equalTo("The quick brown dog jumps over the lazy <field2>fox</field2>."));
+
+ logger.info("--> highlighting and searching on field1 and field2 via multi_match query");
+ source = searchSource()
+ .query(multiMatchQuery("fox", "field1", "field2"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").requireFieldMatch(true).preTags("<field1>").postTags("</field1>"))
+ .field(new HighlightBuilder.Field("field2").requireFieldMatch(true).preTags("<field2>").postTags("</field2>")));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+ //field2 is now returned highlighted thanks to the multi_match query on both fields
+ assertHighlight(searchResponse, 0, "field2", 0, equalTo("The quick brown <field2>fox</field2> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field2", 1, equalTo("The lazy red <field2>fox</field2> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field2", 2, 3, equalTo("The quick brown dog jumps over the lazy <field2>fox</field2>."));
+ }
+
+ @Test
+ public void testPostingsHighlighterOrderByScore() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", new String[]{"This sentence contains one match, not that short. This sentence contains two sentence matches. This one contains no matches.",
+ "This is the second value's first sentence. This one contains no matches. This sentence contains three sentence occurrences (sentence).",
+ "One sentence match here and scored lower since the text is quite long, not that appealing. This one contains no matches."}).get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "sentence"))
+ .highlight(highlight().field("field1").order("score"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ Map<String,HighlightField> highlightFieldMap = searchResponse.getHits().getAt(0).highlightFields();
+ assertThat(highlightFieldMap.size(), equalTo(1));
+ HighlightField field1 = highlightFieldMap.get("field1");
+ assertThat(field1.fragments().length, equalTo(5));
+ assertThat(field1.fragments()[0].string(), equalTo("This <em>sentence</em> contains three <em>sentence</em> occurrences (<em>sentence</em>)."));
+ assertThat(field1.fragments()[1].string(), equalTo("This <em>sentence</em> contains two <em>sentence</em> matches."));
+ assertThat(field1.fragments()[2].string(), equalTo("This is the second value's first <em>sentence</em>."));
+ assertThat(field1.fragments()[3].string(), equalTo("This <em>sentence</em> contains one match, not that short."));
+ assertThat(field1.fragments()[4].string(), equalTo("One <em>sentence</em> match here and scored lower since the text is quite long, not that appealing."));
+
+ //lets use now number_of_fragments = 0, so that we highlight per value without breaking them into snippets, but we sort the values by score
+ source = searchSource()
+ .query(termQuery("field1", "sentence"))
+ .highlight(highlight().field("field1", -1, 0).order("score"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("This is the second value's first <em>sentence</em>. This one contains no matches. This <em>sentence</em> contains three <em>sentence</em> occurrences (<em>sentence</em>)."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("This <em>sentence</em> contains one match, not that short. This <em>sentence</em> contains two <em>sentence</em> matches. This one contains no matches."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("One <em>sentence</em> match here and scored lower since the text is quite long, not that appealing. This one contains no matches."));
+ }
+
+ @Test
+ public void testPostingsHighlighterEscapeHtml() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string," + randomStoreField() + "index_options=offsets"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title", "test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title").get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(searchResponse, i, "title", 0, 1, equalTo("This is a html escaping highlighting <em>test</em> for *&amp;?"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterMultiMapperWithStore() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ //just to make sure that we hit the stored fields rather than the _source
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "yes").field("index_options", "offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "yes").field("index_options", "offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test . Second sentence.").get();
+ refresh();
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse searchResponse = client().prepareSearch()
+ //lets make sure we analyze the query and we highlight the resulting terms
+ .setQuery(matchQuery("title", "This is a Test"))
+ .addHighlightedField("title").get();
+
+ assertHitCount(searchResponse, 1l);
+ SearchHit hit = searchResponse.getHits().getAt(0);
+ assertThat(hit.source(), nullValue());
+ //stopwords are not highlighted since not indexed
+ assertHighlight(hit, "title", 0, 1, equalTo("this is a <em>test</em> ."));
+
+ // search on title.key and highlight on title
+ searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .addHighlightedField("title.key").get();
+ assertHitCount(searchResponse, 1l);
+
+ //stopwords are now highlighted since we used only whitespace analyzer here
+ assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em> ."));
+ }
+
+ @Test
+ public void testPostingsHighlighterMultiMapperFromSource() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "no").field("index_options", "offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "no").field("index_options", "offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .get();
+
+ assertHighlight(searchResponse, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title.key
+ searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .addHighlightedField("title.key").get();
+
+ assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "string").field("store", "yes").field("index_options", "docs").endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test for the postings highlighter");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .get();
+ assertNoFailures(search);
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .setHighlighterType("postings-highlighter")
+ .get();
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .setHighlighterType("postings")
+ .get();
+
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("tit*")
+ .setHighlighterType("postings")
+ .get();
+
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterBoostingQuery() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.")
+ .get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
+ .highlight(highlight().field("field2").preTags("<x>").postTags("</x>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog!"));
+ }
+
+ @Test
+ public void testPostingsHighlighterCommonTermsQuery() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(commonTerms("field2", "quick brown").cutoffFrequency(100))
+ .highlight(highlight().field("field2").preTags("<x>").postTags("</x>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog!"));
+ }
+
+ public XContentBuilder type1PostingsffsetsMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("index_options", "offsets").endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("index_options", "offsets").endObject()
+ .startObject("field2").field("type", "string").field("index_options", "offsets").endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ private static final String[] REWRITE_METHODS = new String[]{"constant_score_auto", "scoring_boolean", "constant_score_boolean",
+ "constant_score_filter", "top_terms_boost_50", "top_terms_50"};
+
+ @Test
+ public void testPostingsHighlighterPrefixQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterFuzzyQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+
+ @Test
+ public void testPostingsHighlighterRegexpQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterWildcardQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+
+ source = searchSource().query(wildcardQuery("field2", "qu*k").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHitCount(searchResponse, 1l);
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterTermRangeQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "aaab").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("<em>aaab</em>"));
+ }
+
+ @Test
+ public void testPostingsHighlighterQueryString() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(queryString("qui*").defaultField("field2").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+").rewrite(rewriteMethod)))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(boolQuery()
+ .should(constantScoreQuery(FilterBuilders.missingFilter("field1")))
+ .should(matchQuery("field1", "test"))
+ .should(filteredQuery(queryString("field1:photo*").rewrite(rewriteMethod), null)))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(boolQuery().must(prefixQuery("field1", "photo").rewrite(rewriteMethod)).should(matchQuery("field1", "test").minimumShouldMatch("0")))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(filteredQuery(queryString("field1:photo*").rewrite(rewriteMethod), missingFilter("field_null")))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+ }
+
+ @Test
+ @Slow
+ public void testPostingsHighlighterManyDocs() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ int COUNT = between(20, 100);
+ Map<String, String> prefixes = new HashMap<String, String>(COUNT);
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT];
+ for (int i = 0; i < COUNT; i++) {
+ //generating text with word to highlight in a different position
+ //(https://github.com/elasticsearch/elasticsearch/issues/4103)
+ String prefix = randomAsciiOfLengthBetween(5, 30);
+ prefixes.put(String.valueOf(i), prefix);
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "Sentence " + prefix
+ + " test. Sentence two.");
+ }
+ logger.info("--> indexing docs");
+ indexRandom(true, indexRequestBuilders);
+
+ logger.info("--> searching explicitly on field1 and highlighting on it");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1")
+ .get();
+ assertHitCount(searchResponse, (long)COUNT);
+ assertThat(searchResponse.getHits().hits().length, equalTo(COUNT));
+ for (SearchHit hit : searchResponse.getHits()) {
+ String prefix = prefixes.get(hit.id());
+ assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " <em>test</em>."));
+ }
+
+ logger.info("--> searching explicitly on field1 and highlighting on it, with DFS");
+ searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1")
+ .get();
+ assertHitCount(searchResponse, (long)COUNT);
+ assertThat(searchResponse.getHits().hits().length, equalTo(COUNT));
+ for (SearchHit hit : searchResponse.getHits()) {
+ String prefix = prefixes.get(hit.id());
+ assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " <em>test</em>."));
+ }
+ }
+
+ @Test //https://github.com/elasticsearch/elasticsearch/issues/4116
+ public void testPostingsHighlighterCustomIndexName() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,index_options=offsets,index_name=my_field"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "First sentence. Second sentence.").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("field1", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("my_field").get();
+ assertHighlight(searchResponse, 0, "my_field", 0, 1, equalTo("<em>First</em> sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first"))
+ .addHighlightedField("field1").setHighlighterRequireFieldMatch(true).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence."));
+ }
+
+ @Test
+ public void testFastVectorHighlighterCustomIndexName() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,term_vector=with_positions_offsets,index_name=my_field"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "First sentence. Second sentence.").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("field1", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("my_field").get();
+ assertHighlight(searchResponse, 0, "my_field", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first"))
+ .addHighlightedField("field1").setHighlighterRequireFieldMatch(true).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+ }
+
+ @Test
+ public void testPlainHighlighterCustomIndexName() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,index_name=my_field"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "First sentence. Second sentence.").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("field1", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("my_field").get();
+ assertHighlight(searchResponse, 0, "my_field", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first"))
+ .addHighlightedField("field1").setHighlighterRequireFieldMatch(true).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+ }
+
+ @Test
+ public void testFastVectorHighlighterPhraseBoost() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ phraseBoostTestCase("fvh");
+ }
+
+ @Test
+ public void testPostingsHighlighterPhraseBoost() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ phraseBoostTestCase("postings");
+ }
+
+ /**
+ * Test phrase boosting over normal term matches. Note that this will never pass with the plain highlighter
+ * because it doesn't support the concept of terms having a different weight based on position.
+ * @param highlighterType highlighter to test
+ */
+ private void phraseBoostTestCase(String highlighterType) {
+ ensureGreen();
+ StringBuilder text = new StringBuilder();
+ text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\n");
+ for (int i = 0; i<10; i++) {
+ text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n");
+ }
+ text.append("highlight words together\n");
+ for (int i = 0; i<10; i++) {
+ text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n");
+ }
+ index("test", "type1", "1", "field1", text.toString());
+ refresh();
+
+ // Match queries
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ matchQuery("field1", "highlight words together"),
+ matchPhraseQuery("field1", "highlight words together"));
+
+ // Query string with a single field
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ queryString("highlight words together").field("field1"),
+ queryString("\"highlight words together\"").field("field1").autoGeneratePhraseQueries(true));
+
+ // Query string with a single field without dismax
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ queryString("highlight words together").field("field1").useDisMax(false),
+ queryString("\"highlight words together\"").field("field1").useDisMax(false).autoGeneratePhraseQueries(true));
+
+ // Query string with more than one field
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ queryString("highlight words together").field("field1").field("field2"),
+ queryString("\"highlight words together\"").field("field1").field("field2").autoGeneratePhraseQueries(true));
+
+ // Query string boosting the field
+ phraseBoostTestCaseForClauses(highlighterType, 1f,
+ queryString("highlight words together").field("field1"),
+ queryString("\"highlight words together\"").field("field1^100").autoGeneratePhraseQueries(true));
+ }
+
+ private <P extends QueryBuilder & BoostableQueryBuilder> void
+ phraseBoostTestCaseForClauses(String highlighterType, float boost, QueryBuilder terms, P phrase) {
+ Matcher<String> highlightedMatcher = Matchers.<String>either(containsString("<em>highlight words together</em>")).or(
+ containsString("<em>highlight</em> <em>words</em> <em>together</em>"));
+ SearchRequestBuilder search = client().prepareSearch("test").setHighlighterRequireFieldMatch(true)
+ .setHighlighterOrder("score").setHighlighterType(highlighterType)
+ .addHighlightedField("field1", 100, 1);
+
+ // Try with a bool query
+ phrase.boost(boost);
+ SearchResponse response = search.setQuery(boolQuery().must(terms).should(phrase)).get();
+ assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
+ phrase.boost(1);
+ // Try with a boosting query
+ response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(boost).negativeBoost(1)).get();
+ assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
+ // Try with a boosting query using a negative boost
+ response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(1).negativeBoost(1/boost)).get();
+ assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java b/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java
new file mode 100644
index 0000000..50d4428
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.indicesboost;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleIndicesBoostSearchTests extends ElasticsearchIntegrationTest {
+
+ private static final Settings DEFAULT_SETTINGS = ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ @Test
+ public void testIndicesBoost() throws Exception {
+ ElasticsearchAssertions.assertHitCount(client().prepareSearch().setQuery(termQuery("test", "value")).get(), 0);
+
+ try {
+ client().prepareSearch("test").setQuery(termQuery("test", "value")).execute().actionGet();
+ fail("should fail");
+ } catch (Exception e) {
+ // ignore, no indices
+ }
+
+ client().admin().indices().create(createIndexRequest("test1").settings(DEFAULT_SETTINGS)).actionGet();
+ client().admin().indices().create(createIndexRequest("test2").settings(DEFAULT_SETTINGS)).actionGet();
+ client().index(indexRequest("test1").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value check").endObject())).actionGet();
+ client().index(indexRequest("test2").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value beck").endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ float indexBoost = 1.1f;
+
+ logger.info("--- QUERY_THEN_FETCH");
+
+ logger.info("Query with test1 boosted");
+ SearchResponse response = client().search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test1", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test1"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test2"));
+
+ logger.info("Query with test2 boosted");
+ response = client().search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test2", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test2"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test1"));
+
+ logger.info("--- DFS_QUERY_THEN_FETCH");
+
+ logger.info("Query with test1 boosted");
+ response = client().search(searchRequest()
+ .searchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test1", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test1"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test2"));
+
+ logger.info("Query with test2 boosted");
+ response = client().search(searchRequest()
+ .searchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test2", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test2"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test1"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java b/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java
new file mode 100644
index 0000000..ba510ab
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.matchedqueries;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItemInArray;
+
+/**
+ *
+ */
+public class MatchedQueriesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleMatchedQueryFromFilteredQuery() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("name", "test1", "number", 1).get();
+ client().prepareIndex("test", "type1", "2").setSource("name", "test2", "number", 2).get();
+ client().prepareIndex("test", "type1", "3").setSource("name", "test3", "number", 3).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), orFilter(rangeFilter("number").lte(2).filterName("test1"), rangeFilter("number").gt(2).filterName("test2")))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test1"));
+ } else if (hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test2"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2"))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test1"));
+ } else if (hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test2"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ @Test
+ public void simpleMatchedQueryFromTopLevelFilter() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("name", "test", "title", "title1").get();
+ client().prepareIndex("test", "type1", "2").setSource("name", "test").get();
+ client().prepareIndex("test", "type1", "3").setSource("name", "test").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(orFilter(
+ termFilter("name", "test").filterName("name"),
+ termFilter("title", "title1").filterName("title"))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else if (hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(queryFilter(boolQuery()
+ .should(termQuery("name", "test").queryName("name"))
+ .should(termQuery("title", "title1").queryName("title")))).get();
+
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else if (hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ @Test
+ public void simpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("name", "test", "title", "title1").get();
+ client().prepareIndex("test", "type1", "2").setSource("name", "test", "title", "title2").get();
+ client().prepareIndex("test", "type1", "3").setSource("name", "test", "title", "title3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("title", "title1", "title2", "title3").filterName("title")))
+ .setPostFilter(termFilter("name", "test").filterName("name")).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title"))
+ .setPostFilter(queryFilter(matchQuery("name", "test").queryName("name"))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ @Test
+ public void testIndicesFilterSupportsName() {
+ createIndex("test1", "test2");
+ ensureGreen();
+
+ client().prepareIndex("test1", "type1", "1").setSource("title", "title1").get();
+ client().prepareIndex("test2", "type1", "2").setSource("title", "title2").get();
+ client().prepareIndex("test2", "type1", "3").setSource("title", "title3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(),
+ orFilter(
+ indicesFilter(termFilter("title", "title1").filterName("title1"), "test1")
+ .noMatchFilter(termFilter("title", "title2").filterName("title2")).filterName("indices_filter"),
+ termFilter("title", "title3").filterName("title3")).filterName("or"))).get();
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(3));
+ assertThat(hit.matchedQueries(), hasItemInArray("indices_filter"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title1"));
+ assertThat(hit.matchedQueries(), hasItemInArray("or"));
+ } else if (hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(3));
+ assertThat(hit.matchedQueries(), hasItemInArray("indices_filter"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title2"));
+ assertThat(hit.matchedQueries(), hasItemInArray("or"));
+ } else if (hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("title3"));
+ assertThat(hit.matchedQueries(), hasItemInArray("or"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ /**
+ * Test case for issue #4361: https://github.com/elasticsearch/elasticsearch/issues/4361
+ */
+ @Test
+ public void testMatchedWithShould() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("content", "Lorem ipsum dolor sit amet").get();
+ client().prepareIndex("test", "type1", "2").setSource("content", "consectetur adipisicing elit").get();
+ refresh();
+
+ // Execute search at least two times to load it in cache
+ int iter = atLeast(2);
+ for (int i = 0; i < iter; i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(
+ boolQuery()
+ .minimumNumberShouldMatch(1)
+ .should(queryString("dolor").queryName("dolor"))
+ .should(queryString("elit").queryName("elit"))
+ )
+ .setPreference("_primary")
+ .get();
+
+ assertHitCount(searchResponse, 2l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("dolor"));
+ } else if (hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("elit"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java b/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java
new file mode 100644
index 0000000..dc3b1df
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.msearch;
+
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SimpleMultiSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleMultiSearch() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("field", "xxx").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "yyy").execute().actionGet();
+ refresh();
+ MultiSearchResponse response = client().prepareMultiSearch()
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx")))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy")))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+
+ for (MultiSearchResponse.Item item : response) {
+ assertNoFailures(item.getResponse());
+ }
+ assertThat(response.getResponses().length, equalTo(3));
+ assertHitCount(response.getResponses()[0].getResponse(), 1l);
+ assertHitCount(response.getResponses()[1].getResponse(), 1l);
+ assertHitCount(response.getResponses()[2].getResponse(), 2l);
+ assertFirstHit(response.getResponses()[0].getResponse(), hasId("1"));
+ assertFirstHit(response.getResponses()[1].getResponse(), hasId("2"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java
new file mode 100644
index 0000000..8a6c494
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.preference;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.*;
+
+public class SearchPreferenceTests extends ElasticsearchIntegrationTest {
+
+ @Test // see #2896
+ public void testStopOneNodePreferenceWithRedState() throws InterruptedException {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", cluster().size()+2).put("index.number_of_replicas", 0)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", ""+i).setSource("field1", "value1").execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ cluster().stopRandomNode();
+ client().admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).execute().actionGet();
+ String[] preferences = new String[] {"_primary", "_local", "_primary_first", "_only_local", "_prefer_node:somenode", "_prefer_node:server2"};
+ for (String pref : preferences) {
+ SearchResponse searchResponse = client().prepareSearch().setSearchType(SearchType.COUNT).setPreference(pref).execute().actionGet();
+ assertThat(RestStatus.OK, equalTo(searchResponse.status()));
+ assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
+ searchResponse = client().prepareSearch().setPreference(pref).execute().actionGet();
+ assertThat(RestStatus.OK, equalTo(searchResponse.status()));
+ assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
+ }
+ }
+
+
+ @Test
+ public void noPreferenceRandom() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ final Client client = cluster().smartClient();
+ SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();
+ String firstNodeId = searchResponse.getHits().getAt(0).shard().nodeId();
+ searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();
+ String secondNodeId = searchResponse.getHits().getAt(0).shard().nodeId();
+
+ assertThat(firstNodeId, not(equalTo(secondNodeId)));
+ }
+
+ @Test
+ public void simplePreferenceTests() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_local").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_local").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_primary").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_primary").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("1234").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("1234").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test (expected = ElasticsearchIllegalArgumentException.class)
+ public void testThatSpecifyingNonExistingNodesReturnsUsefulError() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareSearch().setQuery(matchAllQuery()).setPreference("_only_node:DOES-NOT-EXIST").execute().actionGet();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java b/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java
new file mode 100644
index 0000000..3127d72
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java
@@ -0,0 +1,375 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.query;
+
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+public class MultiMatchQueryTests extends ElasticsearchIntegrationTest {
+
+ @Before
+ public void init() throws Exception {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.perfect_match.type", "custom")
+ .put("index.analysis.analyzer.perfect_match.tokenizer", "keyword")
+ .put("index.analysis.analyzer.perfect_match.filter", "lowercase")
+ .put("index.analysis.analyzer.category.type", "custom")
+ .put("index.analysis.analyzer.category.tokenizer", "whitespace")
+ .put("index.analysis.analyzer.category.filter", "lowercase")
+ );
+ assertAcked(builder.addMapping("test", createMapping()));
+ ensureGreen();
+ int numDocs = atLeast(50);
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ builders.add(client().prepareIndex("test", "test", "theone").setSource(
+ "full_name", "Captain America",
+ "first_name", "Captain",
+ "last_name", "America",
+ "category", "marvel hero"));
+ builders.add(client().prepareIndex("test", "test", "theother").setSource(
+ "full_name", "marvel hero",
+ "first_name", "marvel",
+ "last_name", "hero",
+ "category", "bogus"));
+
+ builders.add(client().prepareIndex("test", "test", "ultimate1").setSource(
+ "full_name", "Alpha the Ultimate Mutant",
+ "first_name", "Alpha the",
+ "last_name", "Ultimate Mutant",
+ "category", "marvel hero"));
+ builders.add(client().prepareIndex("test", "test", "ultimate2").setSource(
+ "full_name", "Man the Ultimate Ninja",
+ "first_name", "Man the Ultimate",
+ "last_name", "Ninja",
+ "category", "marvel hero"));
+
+ builders.add(client().prepareIndex("test", "test", "anotherhero").setSource(
+ "full_name", "ultimate",
+ "first_name", "wolferine",
+ "last_name", "",
+ "category", "marvel hero"));
+ List<String> firstNames = new ArrayList<String>();
+ fill(firstNames, "Captain", between(15, 25));
+ fill(firstNames, "Ultimate", between(5, 10));
+ fillRandom(firstNames, between(3, 7));
+ List<String> lastNames = new ArrayList<String>();
+ fill(lastNames, "Captain", between(3, 7));
+ fillRandom(lastNames, between(30, 40));
+ for (int i = 0; i < numDocs; i++) {
+ String first = RandomPicks.randomFrom(getRandom(), firstNames);
+ String last = randomPickExcept(lastNames, first);
+ builders.add(client().prepareIndex("test", "test", "" + i).setSource(
+ "full_name", first + " " + last,
+ "first_name", first,
+ "last_name", last,
+ "category", randomBoolean() ? "marvel hero" : "bogus"));
+ }
+ indexRandom(true, builders);
+ }
+
+ @Test
+ public void testDefaults() throws ExecutionException, InterruptedException {
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR)).get();
+ Set<String> topNIds = Sets.newHashSet("theone", "theother");
+ for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
+ topNIds.remove(searchResponse.getHits().getAt(i).getId());
+ // very likely that we hit a random doc that has the same score so orders are random since
+ // the doc id is the tie-breaker
+ }
+ assertThat(topNIds, empty());
+ assertThat(searchResponse.getHits().hits()[0].getScore(), equalTo(searchResponse.getHits().hits()[1].getScore()));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).useDisMax(false).type(type)).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother")));
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).type(type)).get();
+ assertFirstHit(searchResponse, hasId("theother"));
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).type(type)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).type(type)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+ }
+
+ private XContentBuilder createMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("test")
+ .startObject("properties")
+ .startObject("full_name")
+ .field("type", "string")
+ .field("copy_to", "full_name_phrase")
+ .field("analyzer", "perfect_match")
+ .endObject()
+ .startObject("category")
+ .field("type", "string")
+ .field("analyzer", "category")
+ .field("index_option", "docs")
+ .endObject()
+ .startObject("first_name")
+ .field("type", "string")
+ .field("omit_norms", "true")
+ .field("copy_to", "first_name_phrase")
+ .field("index_option", "docs")
+ .endObject()
+ .startObject("last_name")
+ .field("type", "string")
+ .field("omit_norms", "true")
+ .field("copy_to", "last_name_phrase")
+ .field("index_option", "docs")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ public void testPhraseType() {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("Man the Ultimate", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase")
+ .operator(MatchQueryBuilder.Operator.OR).type(MatchQueryBuilder.Type.PHRASE)).get();
+ assertFirstHit(searchResponse, hasId("ultimate2"));
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("Captain", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase")
+ .operator(MatchQueryBuilder.Operator.OR).type(MatchQueryBuilder.Type.PHRASE)).get();
+ assertThat(searchResponse.getHits().getTotalHits(), greaterThan(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("the Ul", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase")
+ .operator(MatchQueryBuilder.Operator.OR).type(MatchQueryBuilder.Type.PHRASE_PREFIX)).get();
+ assertFirstHit(searchResponse, hasId("ultimate2"));
+ assertSecondHit(searchResponse, hasId("ultimate1"));
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void testCutoffFreq() throws ExecutionException, InterruptedException {
+ final long numDocs = client().prepareCount("test")
+ .setQuery(matchAllQuery()).get().getCount();
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ Float cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20);
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).cutoffFrequency(cutoffFrequency)).get();
+ Set<String> topNIds = Sets.newHashSet("theone", "theother");
+ for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
+ topNIds.remove(searchResponse.getHits().getAt(i).getId());
+ // very likely that we hit a random doc that has the same score so orders are random since
+ // the doc id is the tie-breaker
+ }
+ assertThat(topNIds, empty());
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThanOrEqualTo(searchResponse.getHits().hits()[1].getScore()));
+
+ cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).useDisMax(false).cutoffFrequency(cutoffFrequency).type(type)).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother")));
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
+ long size = searchResponse.getHits().getTotalHits();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).useDisMax(false).type(type)).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother")));
+ assertThat("common terms expected to be a way smaller result set", size, lessThan(searchResponse.getHits().getTotalHits()));
+
+ cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).cutoffFrequency(cutoffFrequency).type(type)).get();
+ assertFirstHit(searchResponse, hasId("theother"));
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).cutoffFrequency(cutoffFrequency).type(type)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).cutoffFrequency(cutoffFrequency).type(type)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+ }
+
+
+ public void testEquivalence() {
+
+ final int numDocs = (int) client().prepareCount("test")
+ .setQuery(matchAllQuery()).get().getCount();
+ int numIters = atLeast(5);
+ for (int i = 0; i < numIters; i++) {
+ {
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).type(type)).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(disMaxQuery().
+ add(matchQuery("full_name", "marvel hero captain america"))
+ .add(matchQuery("first_name", "marvel hero captain america"))
+ .add(matchQuery("last_name", "marvel hero captain america"))
+ .add(matchQuery("category", "marvel hero captain america"))
+ ).get();
+ assertEquivalent("marvel hero captain america", left, right);
+ }
+
+ {
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
+ MatchQueryBuilder.Operator op = randomBoolean() ? MatchQueryBuilder.Operator.AND : MatchQueryBuilder.Operator.OR;
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(op).useDisMax(false).minimumShouldMatch(minShouldMatch).type(type)).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(boolQuery().minimumShouldMatch(minShouldMatch)
+ .should(randomBoolean() ? termQuery("full_name", "captain america") : matchQuery("full_name", "captain america").operator(op))
+ .should(matchQuery("first_name", "captain america").operator(op))
+ .should(matchQuery("last_name", "captain america").operator(op))
+ .should(matchQuery("category", "captain america").operator(op))
+ ).get();
+ assertEquivalent("captain america", left, right);
+ }
+
+ {
+ String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
+ MatchQueryBuilder.Operator op = randomBoolean() ? MatchQueryBuilder.Operator.AND : MatchQueryBuilder.Operator.OR;
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(multiMatchQuery("capta", "full_name", "first_name", "last_name", "category")
+ .type(MatchQueryBuilder.Type.PHRASE_PREFIX).useDisMax(false).minimumShouldMatch(minShouldMatch)).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(boolQuery().minimumShouldMatch(minShouldMatch)
+ .should(matchPhrasePrefixQuery("full_name", "capta"))
+ .should(matchPhrasePrefixQuery("first_name", "capta").operator(op))
+ .should(matchPhrasePrefixQuery("last_name", "capta").operator(op))
+ .should(matchPhrasePrefixQuery("category", "capta").operator(op))
+ ).get();
+ assertEquivalent("capta", left, right);
+ }
+ {
+ String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
+ MatchQueryBuilder.Operator op = randomBoolean() ? MatchQueryBuilder.Operator.AND : MatchQueryBuilder.Operator.OR;
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .type(MatchQueryBuilder.Type.PHRASE).useDisMax(false).minimumShouldMatch(minShouldMatch)).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(boolQuery().minimumShouldMatch(minShouldMatch)
+ .should(matchPhraseQuery("full_name", "captain america"))
+ .should(matchPhraseQuery("first_name", "captain america").operator(op))
+ .should(matchPhraseQuery("last_name", "captain america").operator(op))
+ .should(matchPhraseQuery("category", "captain america").operator(op))
+ ).get();
+ assertEquivalent("captain america", left, right);
+ }
+ }
+ }
+
+
+ private static final void assertEquivalent(String query, SearchResponse left, SearchResponse right) {
+ assertNoFailures(left);
+ assertNoFailures(right);
+ SearchHits leftHits = left.getHits();
+ SearchHits rightHits = right.getHits();
+ assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits()));
+ assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length));
+ SearchHit[] hits = leftHits.getHits();
+ SearchHit[] rHits = rightHits.getHits();
+ for (int i = 0; i < hits.length; i++) {
+ assertThat("query: " + query + " hit: " + i, (double)hits[i].getScore(), closeTo(rHits[i].getScore(), 0.00001d));
+ }
+ for (int i = 0; i < hits.length; i++) {
+ if (hits[i].getScore() == hits[hits.length - 1].getScore()) {
+ return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs
+ }
+ assertThat("query: " + query, hits[i].getId(), equalTo(rHits[i].getId()));
+ }
+ }
+
+
+ public static List<String> fill(List<String> list, String value, int times) {
+ for (int i = 0; i < times; i++) {
+ list.add(value);
+ }
+ return list;
+ }
+
+ public List<String> fillRandom(List<String> list, int times) {
+ for (int i = 0; i < times; i++) {
+ list.add(randomRealisticUnicodeOfCodepointLengthBetween(1, 5));
+ }
+ return list;
+ }
+
+ public <T> T randomPickExcept(List<T> fromList, T butNot) {
+ while (true) {
+ T t = RandomPicks.randomFrom(getRandom(), fromList);
+ if (t.equals(butNot)) {
+ continue;
+ }
+ return t;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java b/src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java
new file mode 100644
index 0000000..073a63d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java
@@ -0,0 +1,2179 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.apache.lucene.util.English;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.CommonTermsQueryBuilder.Operator;
+import org.elasticsearch.index.query.MatchQueryBuilder.Type;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+
+public class SimpleQueryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testOmitNormsOnAll() throws ExecutionException, InterruptedException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("omit_norms", true).endObject()
+ .endObject().endObject()));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick"));
+
+ assertHitCount(client().prepareSearch().setQuery(matchQuery("_all", "quick")).get(), 3l);
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("_all", "quick")).get();
+ SearchHit[] hits = searchResponse.getHits().hits();
+ assertThat(hits.length, equalTo(3));
+ assertThat(hits[0].score(), allOf(equalTo(hits[1].getScore()), equalTo(hits[2].getScore())));
+ cluster().wipeIndices("test");
+
+ assertAcked(client().admin().indices().prepareCreate("test"));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick"));
+
+ assertHitCount(client().prepareSearch().setQuery(matchQuery("_all", "quick")).get(), 3l);
+ searchResponse = client().prepareSearch().setQuery(matchQuery("_all", "quick")).get();
+ hits = searchResponse.getHits().hits();
+ assertThat(hits.length, equalTo(3));
+ assertThat(hits[0].score(), allOf(greaterThan(hits[1].getScore()), greaterThan(hits[2].getScore())));
+
+ }
+ @Test // see #3952
+ public void testEmptyQueryString() throws ExecutionException, InterruptedException, IOException {
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick"));
+ assertHitCount(client().prepareSearch().setQuery(queryString("quick")).get(), 3l);
+ assertHitCount(client().prepareSearch().setQuery(queryString("")).get(), 0l); // return no docs
+ }
+
+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3177
+ public void testIssue3177() {
+ assertAcked(prepareCreate("test").setSettings(settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, 1)));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3").get();
+ ensureGreen();
+ waitForRelocation();
+ optimize();
+ refresh();
+ assertHitCount(
+ client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(
+ andFilter(
+ queryFilter(matchAllQuery()),
+ notFilter(andFilter(queryFilter(termQuery("field1", "value1")),
+ queryFilter(termQuery("field1", "value2")))))).get(),
+ 3l);
+ assertHitCount(
+ client().prepareSearch()
+ .setQuery(
+ filteredQuery(
+ boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2"))
+ .should(termQuery("field1", "value3")),
+ notFilter(andFilter(queryFilter(termQuery("field1", "value1")),
+ queryFilter(termQuery("field1", "value2")))))).get(),
+ 3l);
+ assertHitCount(
+ client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(notFilter(termFilter("field1", "value3"))).get(),
+ 2l);
+ }
+
+ @Test
+ public void passQueryAsStringTest() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery("{ \"term\" : { \"field1\" : \"value1_1\" }}").get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testIndexOptions() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,index_options=docs")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox").setRefresh(true).get();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ assertHitCount(searchResponse, 1l);
+ try {
+ client().prepareSearch().setQuery(matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ } catch (SearchPhaseExecutionException e) {
+ assertTrue("wrong exception message " + e.getMessage(), e.getMessage().endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ }
+
+ @Test // see #3521
+ public void testConstantScoreQuery() throws Exception {
+ Random random = getRandom();
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox"));
+ ensureYellow();
+ SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get();
+ assertHitCount(searchResponse, 2l);
+ for (SearchHit searchHit : searchResponse.getHits().hits()) {
+ assertSearchHit(searchHit, hasScore(1.0f));
+ }
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + getRandom().nextFloat()))).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).score()));
+
+ client().prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + getRandom().nextFloat())).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).score()));
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ constantScoreQuery(boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + (random.nextBoolean()? 0.0f : random.nextFloat()))))).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).score()));
+ for (SearchHit searchHit : searchResponse.getHits().hits()) {
+ assertSearchHit(searchHit, hasScore(1.0f));
+ }
+
+ int num = atLeast(100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[num];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type", "" + i).setSource("f", English.intToEnglish(i));
+ }
+ createIndex("test_1");
+ indexRandom(true, builders);
+ ensureYellow();
+ int queryRounds = atLeast(10);
+ for (int i = 0; i < queryRounds; i++) {
+ MatchQueryBuilder matchQuery = matchQuery("f", English.intToEnglish(between(0, num)));
+ searchResponse = client().prepareSearch("test_1").setQuery(matchQuery).setSize(num).get();
+ long totalHits = searchResponse.getHits().totalHits();
+ SearchHits hits = searchResponse.getHits();
+ for (SearchHit searchHit : hits) {
+ assertSearchHit(searchHit, hasScore(1.0f));
+ }
+ if (random.nextBoolean()) {
+ searchResponse = client().prepareSearch("test_1").setQuery(
+ boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(matchQuery).boost(1.0f + (random.nextBoolean()? 0.0f : random.nextFloat())))).setSize(num).get();
+ hits = searchResponse.getHits();
+ } else {
+ FilterBuilder filter = queryFilter(matchQuery);
+ searchResponse = client().prepareSearch("test_1").setQuery(
+ boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(filter).boost(1.0f + (random.nextBoolean()? 0.0f : random.nextFloat())))).setSize(num).get();
+ hits = searchResponse.getHits();
+ }
+ assertThat(hits.totalHits(), equalTo(totalHits));
+ if (totalHits > 1) {
+ float expected = hits.getAt(0).score();
+ for (SearchHit searchHit : hits) {
+ assertSearchHit(searchHit, hasScore(expected));
+ }
+ }
+ }
+ }
+
+ @Test // see #3521
+ public void testAllDocsQueryString() throws InterruptedException, ExecutionException {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_REPLICAS, 0));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("foo", "bar"),
+ client().prepareIndex("test", "type1", "2").setSource("foo", "bar")
+ );
+ int iters = atLeast(100);
+ for (int i = 0; i < iters; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(queryString("*:*^10.0").boost(10.0f)).get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchAllQuery()))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat((double)searchResponse.getHits().getAt(0).score(), closeTo(Math.sqrt(2), 0.1));
+ assertThat((double)searchResponse.getHits().getAt(1).score(),closeTo(Math.sqrt(2), 0.1));
+ }
+ }
+
+ @Test
+ public void testCommonTermsQueryOnAllField() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "message", "type=string", "comment", "type=string,boost=5.0")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1).get();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("message", "test message", "comment", "whatever"),
+ client().prepareIndex("test", "type1", "2").setSource("message", "hello world", "comment", "test comment"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(commonTerms("_all", "test")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("2"));
+ assertSecondHit(searchResponse, hasId("1"));
+ assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore()));
+ }
+
+ @Test
+ public void testCommonTermsQuery() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,analyzer=whitespace")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1).get();
+ indexRandom(true, client().prepareIndex("test", "type1", "3").setSource("field1", "quick lazy huge brown pidgin", "field2", "the quick lazy huge brown fox jumps over the tree"),
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree") );
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.AND)).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ // Default
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the quick brown").cutoffFrequency(3)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("4")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery("{ \"common\" : { \"field1\" : { \"query\" : \"the lazy fox brown\", \"cutoff_frequency\" : 1, \"minimum_should_match\" : { \"high_freq\" : 4 } } } }").get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ // Default
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the lazy fox brown").cutoffFrequency(1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the quick brown").cutoffFrequency(3).analyzer("stop")).get();
+ assertHitCount(searchResponse, 3l);
+ // standard drops "the" since its a stopword
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("3"));
+ assertThirdHit(searchResponse, hasId("2"));
+
+ // try the same with match query
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND).analyzer("stop")).get();
+ assertHitCount(searchResponse, 3l);
+ // standard drops "the" since its a stopword
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("3"));
+ assertThirdHit(searchResponse, hasId("2"));
+
+ // try the same with multi match query
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery("the quick brown", "field1", "field2").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("3")); // better score due to different query stats
+ assertSecondHit(searchResponse, hasId("1"));
+ assertThirdHit(searchResponse, hasId("2"));
+ }
+
+ @Test
+ public void testOmitTermFreqsAndPositions() throws Exception {
+ Version version = Version.CURRENT;
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ try {
+ // backwards compat test!
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,omit_term_freq_and_positions=true")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_VERSION_CREATED, version.id));
+ assertThat(version.onOrAfter(Version.V_1_0_0_RC2), equalTo(false));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ assertHitCount(searchResponse, 1l);
+ try {
+ client().prepareSearch().setQuery(matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException e) {
+ assertTrue(e.getMessage().endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ cluster().wipeIndices("test");
+ } catch (MapperParsingException ex) {
+ assertThat(version.toString(), version.onOrAfter(Version.V_1_0_0_RC2), equalTo(true));
+ assertThat(ex.getCause().getMessage(), equalTo("'omit_term_freq_and_positions' is not supported anymore - use ['index_options' : 'DOCS_ONLY'] instead"));
+ }
+ version = randomVersion();
+ }
+ }
+
+ @Test
+ public void queryStringAnalyzedWildcard() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("value*").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("*ue*").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("*ue_1").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("val*e_1").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("v?l*e?1").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testLowercaseExpandedTerms() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("VALUE_3~1").lowercaseExpandedTerms(true)).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryString("VALUE_3~1").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 0l);
+ searchResponse = client().prepareSearch().setQuery(queryString("ValUE_*").lowercaseExpandedTerms(true)).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryString("vAl*E_1")).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryString("[VALUE_1 TO VALUE_3]")).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryString("[VALUE_1 TO VALUE_3]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testDateRangeInQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("past:[now-2M/d TO now/d]")).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("future:[now/d TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 1l);
+
+ try {
+ client().prepareSearch().setQuery(queryString("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ fail("D is an unsupported unit in date math");
+ } catch (Exception e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void typeFilterTypeIndexedTests() throws Exception {
+ typeFilterTests("not_analyzed");
+ }
+
+ @Test
+ public void typeFilterTypeNotIndexedTests() throws Exception {
+ typeFilterTests("no");
+ }
+
+ private void typeFilterTests(String index) throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject()));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "3").setSource("field1", "value1"));
+
+ assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), typeFilter("type1"))).get(), 2l);
+ assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), typeFilter("type2"))).get(), 3l);
+
+ assertHitCount(client().prepareSearch().setTypes("type1").setQuery(matchAllQuery()).get(), 2l);
+ assertHitCount(client().prepareSearch().setTypes("type2").setQuery(matchAllQuery()).get(), 3l);
+
+ assertHitCount(client().prepareSearch().setTypes("type1", "type2").setQuery(matchAllQuery()).get(), 5l);
+ }
+
+ @Test
+ public void idsFilterTestsIdIndexed() throws Exception {
+ idsFilterTests("not_analyzed");
+ }
+
+ @Test
+ public void idsFilterTestsIdNotIndexed() throws Exception {
+ idsFilterTests("no");
+ }
+
+ private void idsFilterTests(String index) throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_id").field("index", index).endObject()
+ .endObject().endObject()));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter("type1").ids("1", "3"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // no type
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter().ids("1", "3"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1").ids("1", "3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // no type
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1", "3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1").ids("7", "10")).get();
+ assertHitCount(searchResponse, 0l);
+
+ // repeat..., with terms
+ searchResponse = client().prepareSearch().setTypes("type1").setQuery(constantScoreQuery(termsFilter("_id", "1", "3"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+ }
+
+ @Test
+ public void testLimitFilter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1_2"),
+ client().prepareIndex("test", "type1", "3").setSource("field2", "value2_3"),
+ client().prepareIndex("test", "type1", "4").setSource("field3", "value3_4"));
+
+ assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), limitFilter(2))).get(), 2l);
+ }
+
+ @Test
+ public void filterExistsMissingTests() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x1", "x_1").field("field1", "value1_1").field("field2", "value2_1").endObject()),
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x2", "x_2").field("field1", "value1_2").endObject()),
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y1", "y_1").field("field2", "value2_3").endObject()),
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()) );
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(existsFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(queryString("_exists_:field1")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("field2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("field3"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("4"));
+
+ // wildcard check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("x*"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ // object check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("obj1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(missingFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ searchResponse = client().prepareSearch().setQuery(queryString("_missing_:field1")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ // wildcard check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("x*"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ // object check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("obj1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+ }
+
+ @Test
+ public void passQueryOrFilterAsJSONStringTest() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }");
+ assertHitCount(client().prepareSearch().setQuery(wrapper).get(), 1l);
+
+ BoolQueryBuilder bool = boolQuery().must(wrapper).must(new TermQueryBuilder("field2", "value2_1"));
+ assertHitCount(client().prepareSearch().setQuery(bool).get(), 1l);
+
+ WrapperFilterBuilder wrapperFilter = new WrapperFilterBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }");
+ assertHitCount(client().prepareSearch().setPostFilter(wrapperFilter).get(), 1l);
+ }
+
+ @Test
+ public void testFiltersWithCustomCacheKey() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testMatchQueryNumeric() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("long", 1l, "double", 1.0d),
+ client().prepareIndex("test", "type1", "2").setSource("long", 2l, "double", 2.0d),
+ client().prepareIndex("test", "type1", "3").setSource("long", 3l, "double", 3.0d));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("long", "1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("double", "2")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+ try {
+ client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get();
+ fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException ex) {
+ // number format exception
+ }
+ }
+
+ @Test
+ public void testMultiMatchQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value4", "field3", "value3"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2", "field2", "value5", "field3", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3", "field2", "value6", "field3", "value1") );
+
+ MultiMatchQueryBuilder builder = multiMatchQuery("value1 value2 value4", "field1", "field2");
+ SearchResponse searchResponse = client().prepareSearch().setQuery(builder)
+ .addFacet(FacetBuilders.termsFacet("field1").field("field1")).get();
+
+ assertHitCount(searchResponse, 2l);
+ // this uses dismax so scores are equal and the order can be arbitrary
+ assertSearchHits(searchResponse, "1", "2");
+
+ builder.useDisMax(false);
+ searchResponse = client().prepareSearch()
+ .setQuery(builder)
+ .get();
+
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ client().admin().indices().prepareRefresh("test").get();
+ builder = multiMatchQuery("value1", "field1", "field2")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ searchResponse = client().prepareSearch()
+ .setQuery(builder)
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ refresh();
+ builder = multiMatchQuery("value1", "field1", "field3^1.5")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ searchResponse = client().prepareSearch().setQuery(builder).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "1");
+
+ client().admin().indices().prepareRefresh("test").get();
+ builder = multiMatchQuery("value1").field("field1").field("field3", 1.5f)
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ searchResponse = client().prepareSearch().setQuery(builder).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "1");
+
+ // Test lenient
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value7", "field2", "value8", "field4", 5).get();
+ refresh();
+
+ builder = multiMatchQuery("value1", "field1", "field2", "field4");
+ try {
+ client().prepareSearch().setQuery(builder).get();
+ fail("Exception expected");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ builder.lenient(true);
+ searchResponse = client().prepareSearch().setQuery(builder).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testMatchQueryZeroTermsQuery() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get();
+ refresh();
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE));
+ SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 1l);
+
+ boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ public void testMultiMatchQueryZeroTermsQuery() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value3", "field2", "value4").get();
+ refresh();
+
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(multiMatchQuery("value1", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE)); // Fields are ORed together
+ SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(multiMatchQuery("value4", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 1l);
+
+ boolQuery = boolQuery().must(multiMatchQuery("a", "field1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void testMultiMatchQueryMinShouldMatch() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("field1", new String[]{"value1", "value2", "value3"}).get();
+ client().prepareIndex("test", "type1", "2").setSource("field2", "value1").get();
+ refresh();
+
+ MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2");
+
+ multiMatchQuery.useDisMax(true);
+ multiMatchQuery.minimumShouldMatch("70%");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(multiMatchQuery)
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ multiMatchQuery.useDisMax(false);
+ multiMatchQuery.minimumShouldMatch("70%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ multiMatchQuery = multiMatchQuery("value1 value2 bar", "field1");
+ multiMatchQuery.minimumShouldMatch("100%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 0l);
+
+ multiMatchQuery.minimumShouldMatch("70%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testFuzzyQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("str:kimcy~1")).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:11~1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ searchResponse = client().prepareSearch().setQuery(queryString("date:2012-02-02~1d")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testQuotedQueryStringWithBoost() throws InterruptedException, ExecutionException {
+ float boost = 10.0f;
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("important", "phrase match", "less_important", "nothing important"),
+ client().prepareIndex("test", "type1", "2").setSource("important", "nothing important", "less_important", "phrase match")
+ );
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(queryString("\"phrase match\"").field("important", boost).field("less_important")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThat((double)searchResponse.getHits().getAt(0).score(), closeTo(boost * searchResponse.getHits().getAt(1).score(), .1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(queryString("\"phrase match\"").field("important", boost).field("less_important").useDisMax(false)).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThat((double)searchResponse.getHits().getAt(0).score(), closeTo(boost * searchResponse.getHits().getAt(1).score(), .1));
+ }
+
+ @Test
+ public void testSpecialRangeSyntaxInQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("num:>19")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:>20")).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:>=20")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:>11")).get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:<20")).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:<=20")).get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("+num:>11 +num:<20")).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testEmptyTermsFilter() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "term", "type=string"));
+ ensureGreen();
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("term", "1"),
+ client().prepareIndex("test", "type", "2").setSource("term", "2"),
+ client().prepareIndex("test", "type", "3").setSource("term", "3"),
+ client().prepareIndex("test", "type", "4").setSource("term", "4") );
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("term", new String[0]))).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), idsFilter())).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testFieldDataTermsFilter() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "str", "type=string", "lng", "type=long", "dbl", "type=double"));
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("str", "1", "lng", 1l, "dbl", 1.0d).get();
+ client().prepareIndex("test", "type", "2").setSource("str", "2", "lng", 2l, "dbl", 2.0d).get();
+ client().prepareIndex("test", "type", "3").setSource("str", "3", "lng", 3l, "dbl", 3.0d).get();
+ client().prepareIndex("test", "type", "4").setSource("str", "4", "lng", 4l, "dbl", 4.0d).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("str", "1", "4").execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "4");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("lng", new long[] {2, 3}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("dbl", new double[]{2, 3}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("lng", new int[] {1, 3}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("dbl", new float[] {2, 4}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "4");
+
+ // test partial matching
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("str", "2", "5").execution("fielddata"))).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("dbl", new double[] {2, 5}).execution("fielddata"))).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("lng", new long[] {2, 5}).execution("fielddata"))).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ // test valid type, but no matching terms
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("str", "5", "6").execution("fielddata"))).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("dbl", new double[] {5, 6}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("lng", new long[] {5, 6}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testTermsLookupFilter() throws Exception {
+ assertAcked(prepareCreate("lookup").addMapping("type", "terms","type=string", "other", "type=string"));
+ assertAcked(prepareCreate("lookup2").addMapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("arr").startObject("properties").startObject("term").field("type", "string")
+ .endObject().endObject().endObject().endObject().endObject().endObject()));
+ assertAcked(prepareCreate("test").addMapping("type", "term", "type=string"));
+
+ ensureGreen();
+
+ indexRandom(true,
+ client().prepareIndex("lookup", "type", "1").setSource("terms", new String[]{"1", "3"}),
+ client().prepareIndex("lookup", "type", "2").setSource("terms", new String[]{"2"}),
+ client().prepareIndex("lookup", "type", "3").setSource("terms", new String[]{"2", "4"}),
+ client().prepareIndex("lookup", "type", "4").setSource("other", "value"),
+ client().prepareIndex("lookup2", "type", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "1").endObject()
+ .startObject().field("term", "3").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "2").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "3").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .startObject().field("term", "4").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("test", "type", "1").setSource("term", "1"),
+ client().prepareIndex("test", "type", "2").setSource("term", "2"),
+ client().prepareIndex("test", "type", "3").setSource("term", "3"),
+ client().prepareIndex("test", "type", "4").setSource("term", "4") );
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // same as above, just on the _id...
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("_id").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // another search with same parameters...
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("2").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("3").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "4");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("4").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("1").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("2").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "4");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("not_exists").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testBasicFilterById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(idsFilter("type1").ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter("type1", "type2").ids("1", "2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(idsFilter().ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(idsFilter().ids("1", "2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter().ids("1", "2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter("type1").ids("1", "2"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter().ids("1"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter(null).ids("1"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter("type1", "type2", "type3").ids("1", "2", "3", "4"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ }
+
+ @Test
+ public void testBasicQueryById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2").ids("1", "2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1", "2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1").ids("1", "2")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery(null).ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "type3").ids("1", "2", "3", "4")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ }
+
+ @Test
+ public void testNumericTermsAndRanges() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1",
+ "num_byte", "type=byte", "num_short", "type=short",
+ "num_integer", "type=integer", "num_long", "type=long",
+ "num_float", "type=float", "num_double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("num_byte", 1, "num_short", 1, "num_integer", 1,
+ "num_long", 1, "num_float", 1, "num_double", 1).get();
+
+ client().prepareIndex("test", "type1", "2").setSource("num_byte", 2, "num_short", 2, "num_integer", 2,
+ "num_long", 2, "num_float", 2, "num_double", 2).get();
+
+ client().prepareIndex("test", "type1", "17").setSource("num_byte", 17, "num_short", 17, "num_integer", 17,
+ "num_long", 17, "num_float", 17, "num_double", 17).get();
+ refresh();
+
+ SearchResponse searchResponse;
+ logger.info("--> term query on 1");
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_byte", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_short", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_integer", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_long", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_float", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_double", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ logger.info("--> terms query on 1");
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_byte", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_short", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_integer", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_long", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_float", new double[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_double", new double[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ logger.info("--> term filter on 1");
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_byte", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_short", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_integer", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_long", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_float", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_double", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ logger.info("--> terms filter on 1");
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_byte", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_short", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_integer", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_long", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_float", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_double", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testNumericRangeFilter_2826() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("type1",
+ "num_byte", "type=byte", "num_short", "type=short",
+ "num_integer", "type=integer", "num_long", "type=long",
+ "num_float", "type=float", "num_double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "test1", "num_long", 1).get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "test1", "num_long", 2).get();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "test2", "num_long", 3).get();
+ client().prepareIndex("test", "type1", "4").setSource("field1", "test2", "num_long", 4).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setPostFilter(
+ boolFilter()
+ .should(rangeFilter("num_long", 1, 2))
+ .should(rangeFilter("num_long", 3, 4))
+ ).get();
+ assertHitCount(searchResponse, 4l);
+
+ // This made 2826 fail! (only with bit based filters)
+ searchResponse = client().prepareSearch("test").setPostFilter(
+ boolFilter()
+ .should(rangeFilter("num_long", 1, 2))
+ .should(rangeFilter("num_long", 3, 4))
+ ).get();
+ assertHitCount(searchResponse, 4l);
+
+ // This made #2979 fail!
+ searchResponse = client().prepareSearch("test").setPostFilter(
+ boolFilter()
+ .must(termFilter("field1", "test1"))
+ .should(rangeFilter("num_long", 1, 2))
+ .should(rangeFilter("num_long", 3, 4))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void testEmptyTopLevelFilter() {
+ client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).get();
+ SearchResponse searchResponse = client().prepareSearch().setPostFilter("{}").get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test // see #2926
+ public void testMustNot() throws ElasticsearchException, IOException, ExecutionException, InterruptedException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 2, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar"),
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything"),
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other"),
+ client().prepareIndex("test", "test", "4").setSource("description", "foo"));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
+ assertHitCount(searchResponse, 4l);
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ boolQuery()
+ .mustNot(matchQuery("description", "anything").type(Type.BOOLEAN))
+ ).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test // see #2994
+ public void testSimpleSpan() throws ElasticsearchException, IOException, ExecutionException, InterruptedException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar"),
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything"),
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other"),
+ client().prepareIndex("test", "test", "4").setSource("description", "foo"));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanTermQuery("description", "bar"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanTermQuery("test.description", "bar"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ spanNearQuery()
+ .clause(spanTermQuery("description", "foo"))
+ .clause(spanTermQuery("test.description", "other"))
+ .slop(3)).get();
+ assertHitCount(searchResponse, 3l);
+ }
+
+ @Test
+ public void testSpanMultiTermQuery() throws ElasticsearchException, IOException {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar", "count", 1).get();
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything", "count", 2).get();
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other", "count", 3).get();
+ client().prepareIndex("test", "test", "4").setSource("description", "fop", "count", 4).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))).get();
+ assertHitCount(response, 4);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(prefixQuery("description", "fo")))).get();
+ assertHitCount(response, 4);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(wildcardQuery("description", "oth*")))).get();
+ assertHitCount(response, 3);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(rangeQuery("description").from("ffa").to("foo"))))
+ .execute().actionGet();
+ assertHitCount(response, 3);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(regexpQuery("description", "fo{2}")))).get();
+ assertHitCount(response, 3);
+ }
+
+ @Test
+ public void testSimpleDFSQuery() throws ElasticsearchException, IOException {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 5, SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("s", jsonBuilder()
+ .startObject()
+ .startObject("s")
+ .startObject("_routing")
+ .field("required", true)
+ .field("path", "bs")
+ .endObject()
+ .startObject("properties")
+ .startObject("online")
+ .field("type", "boolean")
+ .endObject()
+ .startObject("ts")
+ .field("type", "date")
+ .field("ignore_malformed", false)
+ .field("format", "dateOptionalTime")
+ .endObject()
+ .startObject("bs")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .addMapping("bs", "online", "type=boolean", "ts", "type=date,ignore_malformed=false,format=dateOptionalTime"));
+ ensureGreen();
+
+ client().prepareIndex("test", "s", "1").setSource("online", false, "bs", "Y", "ts", System.currentTimeMillis() - 100).get();
+ client().prepareIndex("test", "s", "2").setSource("online", true, "bs", "X", "ts", System.currentTimeMillis() - 10000000).get();
+ client().prepareIndex("test", "bs", "3").setSource("online", false, "ts", System.currentTimeMillis() - 100).get();
+ client().prepareIndex("test", "bs", "4").setSource("online", true, "ts", System.currentTimeMillis() - 123123).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(
+ boolQuery()
+ .must(termQuery("online", true))
+ .must(boolQuery()
+ .should(boolQuery()
+ .must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000)))
+ .must(termQuery("_type", "bs"))
+ )
+ .should(boolQuery()
+ .must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000)))
+ .must(termQuery("_type", "s"))
+ )
+ )
+ )
+ .setVersion(true)
+ .setFrom(0).setSize(100).setExplain(true).get();
+ assertNoFailures(response);
+ }
+
+ @Test
+ public void testMultiFieldQueryString() {
+ client().prepareIndex("test", "s", "1").setSource("field1", "value1", "field2", "value2").setRefresh(true).get();
+ logger.info("regular");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("value1").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:value1")).get(), 1);
+ logger.info("prefix");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("value*").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:value*")).get(), 1);
+ logger.info("wildcard");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("v?lue*").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:v?lue*")).get(), 1);
+ logger.info("fuzzy");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("value~").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:value~")).get(), 1);
+ logger.info("regexp");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("/value[01]/").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:/value[01]/")).get(), 1);
+ }
+
+ // see #3881 - for extensive description of the issue
+ @Test
+ public void testMatchQueryWithSynonyms() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.index.type", "custom")
+ .put("index.analysis.analyzer.index.tokenizer", "standard")
+ .put("index.analysis.analyzer.index.filter", "lowercase")
+ .put("index.analysis.analyzer.search.type", "custom")
+ .put("index.analysis.analyzer.search.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.search.filter", "lowercase", "synonym")
+ .put("index.analysis.filter.synonym.type", "synonym")
+ .putArray("index.analysis.filter.synonym.synonyms", "fast, quick"));
+ assertAcked(builder.addMapping("test", "text", "type=string,index_analyzer=index,search_analyzer=search"));
+ ensureGreen();
+ client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick brown").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fast").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+
+ client().prepareIndex("test", "test", "2").setSource("text", "fast brown fox").get();
+ refresh();
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick brown").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test
+ public void testMatchQueryWithStackedStems() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.index.type", "custom")
+ .put("index.analysis.analyzer.index.tokenizer", "standard")
+ .put("index.analysis.analyzer.index.filter", "lowercase")
+ .put("index.analysis.analyzer.search.type", "custom")
+ .put("index.analysis.analyzer.search.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.search.filter", "lowercase", "keyword_repeat", "porterStem", "unique_stem")
+ .put("index.analysis.filter.unique_stem.type", "unique")
+ .put("index.analysis.filter.unique_stem.only_on_same_position", true));
+ assertAcked(builder.addMapping("test", "text", "type=string,index_analyzer=index,search_analyzer=search"));
+ ensureGreen();
+ client().prepareIndex("test", "test", "1").setSource("text", "the fox runs across the street").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fox runs").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+
+ client().prepareIndex("test", "test", "2").setSource("text", "run fox run").get();
+ refresh();
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fox runs").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test
+ public void testQueryStringWithSynonyms() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.index.type", "custom")
+ .put("index.analysis.analyzer.index.tokenizer", "standard")
+ .put("index.analysis.analyzer.index.filter", "lowercase")
+ .put("index.analysis.analyzer.search.type", "custom")
+ .put("index.analysis.analyzer.search.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.search.filter", "lowercase", "synonym")
+ .put("index.analysis.filter.synonym.type", "synonym")
+ .putArray("index.analysis.filter.synonym.synonyms", "fast, quick"));
+ assertAcked(builder.addMapping("test", "text", "type=string,index_analyzer=index,search_analyzer=search"));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(queryString("quick").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch("test").setQuery(queryString("quick brown").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch().setQuery(queryString("fast").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+
+ client().prepareIndex("test", "test", "2").setSource("text", "fast brown fox").get();
+ refresh();
+
+ searchResponse = client().prepareSearch("test").setQuery(queryString("quick").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ searchResponse = client().prepareSearch("test").setQuery(queryString("quick brown").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3898
+ public void testCustomWordDelimiterQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings("analysis.analyzer.my_analyzer.type", "custom",
+ "analysis.analyzer.my_analyzer.tokenizer", "whitespace",
+ "analysis.analyzer.my_analyzer.filter", "custom_word_delimiter",
+ "analysis.filter.custom_word_delimiter.type", "word_delimiter",
+ "analysis.filter.custom_word_delimiter.generate_word_parts", "true",
+ "analysis.filter.custom_word_delimiter.generate_number_parts", "false",
+ "analysis.filter.custom_word_delimiter.catenate_numbers", "true",
+ "analysis.filter.custom_word_delimiter.catenate_words", "false",
+ "analysis.filter.custom_word_delimiter.split_on_case_change", "false",
+ "analysis.filter.custom_word_delimiter.split_on_numerics", "false",
+ "analysis.filter.custom_word_delimiter.stem_english_possessive", "false")
+ .addMapping("type1", "field1", "type=string,analyzer=my_analyzer", "field2", "type=string,analyzer=my_analyzer"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "foo bar baz", "field2", "not needed").get();
+ refresh();
+
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .setQuery(
+ queryString("foo.baz").useDisMax(false).defaultOperator(QueryStringQueryBuilder.Operator.AND)
+ .field("field1").field("field2")).get();
+ assertHitCount(response, 1l);
+ }
+
+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3797
+ public void testMultiMatchLenientIssue3797() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", 123, "field2", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("value2", "field1^2", "field2").lenient(true).useDisMax(false)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("value2", "field1^2", "field2").lenient(true).useDisMax(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("value2", "field2^2").lenient(true)).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testIndicesQuery() throws Exception {
+ createIndex("index1", "index2", "index3");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("index2", "type2").setId("2").setSource("text", "value2").get();
+ client().prepareIndex("index3", "type3").setId("3").setSource("text", "value3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")
+ .noMatchQuery(matchQuery("text", "value2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ //default no match query is match_all
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")
+ .noMatchQuery("all")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")
+ .noMatchQuery("none")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testIndicesFilter() throws Exception {
+ createIndex("index1", "index2", "index3");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("index2", "type2").setId("2").setSource("text", "value2").get();
+ client().prepareIndex("index3", "type3").setId("3").setSource("text", "value3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesFilter(termFilter("text", "value1"), "index1")
+ .noMatchFilter(termFilter("text", "value2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ //default no match filter is "all"
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesFilter(termFilter("text", "value1"), "index1")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesFilter(termFilter("text", "value1"), "index1")
+ .noMatchFilter("all")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesFilter(termFilter("text", "value1"), "index1")
+ .noMatchFilter("none")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test // https://github.com/elasticsearch/elasticsearch/issues/2416
+ public void testIndicesQuerySkipParsing() throws Exception {
+ createIndex("simple");
+ client().admin().indices().prepareCreate("related")
+ .addMapping("child", jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent")
+ .endObject().endObject().endObject()).get();
+ ensureGreen();
+
+ client().prepareIndex("simple", "lone").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("related", "parent").setId("2").setSource("text", "parent").get();
+ client().prepareIndex("related", "child").setId("3").setParent("2").setSource("text", "value2").get();
+ refresh();
+
+ //has_child fails if executed on "simple" index
+ try {
+ client().prepareSearch("simple")
+ .setQuery(hasChildQuery("child", matchQuery("text", "value"))).get();
+ fail("Should have failed as has_child query can only be executed against parent-child types");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures().length, greaterThan(0));
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("No mapping for for type [child]"));
+ }
+ }
+
+ //has_child doesn't get parsed for "simple" index
+ SearchResponse searchResponse = client().prepareSearch("related", "simple")
+ .setQuery(indicesQuery(hasChildQuery("child", matchQuery("text", "value2")), "related")
+ .noMatchQuery(matchQuery("text", "value1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+ }
+
+ @Test // https://github.com/elasticsearch/elasticsearch/issues/2416
+ public void testIndicesFilterSkipParsing() throws Exception {
+ createIndex("simple");
+ client().admin().indices().prepareCreate("related")
+ .addMapping("child", jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent")
+ .endObject().endObject().endObject()).get();
+ ensureGreen();
+
+ client().prepareIndex("simple", "lone").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("related", "parent").setId("2").setSource("text", "parent").get();
+ client().prepareIndex("related", "child").setId("3").setParent("2").setSource("text", "value2").get();
+ refresh();
+
+ //has_child fails if executed on "simple" index
+ try {
+ client().prepareSearch("simple")
+ .setPostFilter(hasChildFilter("child", termFilter("text", "value1"))).get();
+ fail("Should have failed as has_child query can only be executed against parent-child types");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures().length, greaterThan(0));
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("No mapping for for type [child]"));
+ }
+ }
+
+ SearchResponse searchResponse = client().prepareSearch("related", "simple")
+ .setPostFilter(indicesFilter(hasChildFilter("child", termFilter("text", "value2")), "related")
+ .noMatchFilter(termFilter("text", "value1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+ }
+
+ @Test
+ public void testIndicesQueryMissingIndices() throws IOException {
+ createIndex("index1");
+ createIndex("index2");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1", "1").setSource("field", "match").get();
+ client().prepareIndex("index1", "type1", "2").setSource("field", "no_match").get();
+ client().prepareIndex("index2", "type1", "10").setSource("field", "match").get();
+ client().prepareIndex("index2", "type1", "20").setSource("field", "no_match").get();
+ client().prepareIndex("index3", "type1", "100").setSource("field", "match").get();
+ client().prepareIndex("index3", "type1", "200").setSource("field", "no_match").get();
+ refresh();
+
+ //all indices are missing
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ indicesQuery(termQuery("field", "missing"), "test1", "test2", "test3")
+ .noMatchQuery(termQuery("field", "match"))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //only one index specified, which is missing
+ searchResponse = client().prepareSearch().setQuery(
+ indicesQuery(termQuery("field", "missing"), "test1")
+ .noMatchQuery(termQuery("field", "match"))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //more than one index specified, one of them is missing
+ searchResponse = client().prepareSearch().setQuery(
+ indicesQuery(termQuery("field", "missing"), "index1", "test1")
+ .noMatchQuery(termQuery("field", "match"))).get();
+
+ assertHitCount(searchResponse, 2l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index2 or index3");
+ }
+ }
+ }
+
+ @Test
+ public void testIndicesFilterMissingIndices() throws IOException {
+ createIndex("index1");
+ createIndex("index2");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1", "1").setSource("field", "match").get();
+ client().prepareIndex("index1", "type1", "2").setSource("field", "no_match").get();
+ client().prepareIndex("index2", "type1", "10").setSource("field", "match").get();
+ client().prepareIndex("index2", "type1", "20").setSource("field", "no_match").get();
+ client().prepareIndex("index3", "type1", "100").setSource("field", "match").get();
+ client().prepareIndex("index3", "type1", "200").setSource("field", "no_match").get();
+ refresh();
+
+ //all indices are missing
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ filteredQuery(matchAllQuery(),
+ indicesFilter(termFilter("field", "missing"), "test1", "test2", "test3")
+ .noMatchFilter(termFilter("field", "match")))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //only one index specified, which is missing
+ searchResponse = client().prepareSearch().setQuery(
+ filteredQuery(matchAllQuery(),
+ indicesFilter(termFilter("field", "missing"), "test1")
+ .noMatchFilter(termFilter("field", "match")))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //more than one index specified, one of them is missing
+ searchResponse = client().prepareSearch().setQuery(
+ filteredQuery(matchAllQuery(),
+ indicesFilter(termFilter("field", "missing"), "index1", "test1")
+ .noMatchFilter(termFilter("field", "match")))).get();
+
+ assertHitCount(searchResponse, 2l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index2 or index3");
+ }
+ }
+ }
+
+ @Test
+ public void testMinScore() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("score", 1.5).get();
+ client().prepareIndex("test", "test", "2").setSource("score", 1).get();
+ client().prepareIndex("test", "test", "3").setSource("score", 2).get();
+ client().prepareIndex("test", "test", "4").setSource("score", 0.5).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(
+ functionScoreQuery(scriptFunction("_doc['score'].value"))).setMinScore(1.5f).get();
+ assertHitCount(searchResponse, 2);
+ assertFirstHit(searchResponse, hasId("3"));
+ assertSecondHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testQueryStringWithSlopAndFields() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "customer", "1").setSource("desc", "one two three").get();
+ client().prepareIndex("test", "product", "2").setSource("desc", "one two three").get();
+ refresh();
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one two\"").defaultField("desc")).get();
+ assertHitCount(searchResponse, 2);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one two\"").field("product.desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one three\"~5").field("product.desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one two\"").defaultField("customer.desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one two\"").defaultField("customer.desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ }
+
+ private static FilterBuilder rangeFilter(String field, Object from, Object to) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return FilterBuilders.rangeFilter(field).from(from).to(to);
+ } else {
+ return FilterBuilders.rangeFilter(field).from(from).to(to).setExecution("fielddata");
+ }
+ } else {
+ return FilterBuilders.numericRangeFilter(field).from(from).to(to);
+ }
+ }
+
+ @Test
+ public void testSimpleQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo").get();
+ client().prepareIndex("test", "type1", "2").setSource("body", "bar").get();
+ client().prepareIndex("test", "type1", "3").setSource("body", "foo bar").get();
+ client().prepareIndex("test", "type1", "4").setSource("body", "quux baz eggplant").get();
+ client().prepareIndex("test", "type1", "5").setSource("body", "quux baz spaghetti").get();
+ client().prepareIndex("test", "type1", "6").setSource("otherbody", "spaghetti").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryString("foo bar")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar").defaultOperator(SimpleQueryStringBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(simpleQueryString("\"quux baz\" +(eggplant | spaghetti)")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "4", "5");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("eggplants").analyzer("snowball")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("4"));
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("spaghetti").field("body", 10.0f).field("otherbody", 2.0f)).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("5"));
+ assertSearchHits(searchResponse, "5", "6");
+ }
+
+ @Test
+ public void testNestedFieldSimpleQueryString() throws IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("body").field("type", "string")
+ .startObject("fields")
+ .startObject("sub").field("type", "string")
+ .endObject() // sub
+ .endObject() // fields
+ .endObject() // body
+ .endObject() // properties
+ .endObject() // type1
+ .endObject()));
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo bar baz").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar baz").field("body")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar baz").field("type1.body")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar baz").field("body.sub")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar baz").field("type1.body.sub")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+ }
+
+ @Test
+ public void testSimpleQueryStringFlags() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo").get();
+ client().prepareIndex("test", "type1", "2").setSource("body", "bar").get();
+ client().prepareIndex("test", "type1", "3").setSource("body", "foo bar").get();
+ client().prepareIndex("test", "type1", "4").setSource("body", "quux baz eggplant").get();
+ client().prepareIndex("test", "type1", "5").setSource("body", "quux baz spaghetti").get();
+ client().prepareIndex("test", "type1", "6").setSource("otherbody", "spaghetti").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar").flags(SimpleQueryStringFlag.ALL)).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo | bar")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo | bar")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.NONE)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("baz | egg*")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.NONE)).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch().setSource("{\n" +
+ " \"query\": {\n" +
+ " \"simple_query_string\": {\n" +
+ " \"query\": \"foo|bar\",\n" +
+ " \"default_operator\": \"AND\"," +
+ " \"flags\": \"NONE\"\n" +
+ " }\n" +
+ " }\n" +
+ "}").get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("baz | egg*")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.WHITESPACE, SimpleQueryStringFlag.PREFIX)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("4"));
+ }
+
+ @Test
+ public void testRangeFilterNoCacheWithNow() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("type1", "date", "type=date,format=YYYY-mm-dd"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("date", "2014-01-01", "field", "value")
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.rangeFilter("date").from("2013-01-01").to("now")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c `now` is used in `to`.
+ IndicesStatsResponse statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.rangeFilter("date").from("2013-01-01").to("now"))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c `now` is used in `to`.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.rangeFilter("date").from("2013-01-01").to("now/d").cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ // Now with rounding is used, so we must have something in filter cache
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ long filtercacheSize = statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes();
+ assertThat(filtercacheSize, greaterThan(0l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.termFilter("field", "value").cache(true))
+ .must(FilterBuilders.rangeFilter("date").from("2013-01-01").to("now"))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // and because we use term filter, it is also added to filter cache, so it should contain more than before
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(filtercacheSize));
+ filtercacheSize = statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.rangeFilter("date").from("2013-01-01").to("now").cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // The range filter is now explicitly cached, so it now it is in the filter cache.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(filtercacheSize));
+ }
+
+ @Test
+ public void testSearchEmptyDoc() {
+ prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}").get();
+ client().prepareIndex("test", "type1", "1").setSource("{}").get();
+ refresh();
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java b/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java
new file mode 100644
index 0000000..0cf491f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java
@@ -0,0 +1,464 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.rescore;
+
+
+
+import org.apache.lucene.util.English;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.rescore.RescoreBuilder.QueryRescorer;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class QueryRescorerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRescorePhrase() throws Exception {
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("analyzer", "whitespace").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)))
+ .setRescoreWindow(5).execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "the quick brown").slop(3)))
+ .setRescoreWindow(5).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(RescoreBuilder.queryRescorer((QueryBuilders.matchPhraseQuery("field1", "the quick brown"))))
+ .setRescoreWindow(5).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+ }
+
+ @Test
+ public void testMoreDocs() throws Exception {
+ Builder builder = ImmutableSettings.builder();
+ builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
+ builder.putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
+ builder.put("index.analysis.filter.synonym.type", "synonym");
+ builder.putArray("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("index_analyzer", "whitespace").field("search_analyzer", "synonym")
+ .endObject().endObject().endObject().endObject();
+
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(builder.put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "massachusetts avenue boston massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "lexington avenue boston massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "boston avenue lexington massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "4").setSource("field1", "boston road lexington massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "5").setSource("field1", "lexington street lexington massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "6").setSource("field1", "massachusetts avenue lexington massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "7").setSource("field1", "bosten street san franciso california").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "8").setSource("field1", "hollywood boulevard los angeles california").execute().actionGet();
+ client().prepareIndex("test", "type1", "9").setSource("field1", "1st street boston massachussetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "10").setSource("field1", "1st street boston massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "11").setSource("field1", "2st street boston massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "12").setSource("field1", "3st street boston massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(5)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(0.6f).setRescoreQueryWeight(2.0f)).setRescoreWindow(20).execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ assertHitCount(searchResponse, 9);
+ assertFirstHit(searchResponse, hasId("2"));
+ assertSecondHit(searchResponse, hasId("6"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(5)
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(0.6f).setRescoreQueryWeight(2.0f)).setRescoreWindow(20).execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ assertHitCount(searchResponse, 9);
+ assertFirstHit(searchResponse, hasId("2"));
+ assertSecondHit(searchResponse, hasId("6"));
+ assertThirdHit(searchResponse, hasId("3"));
+ }
+
+ private static final void assertEquivalent(String query, SearchResponse plain, SearchResponse rescored) {
+ assertNoFailures(plain);
+ assertNoFailures(rescored);
+ SearchHits leftHits = plain.getHits();
+ SearchHits rightHits = rescored.getHits();
+ assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits()));
+ assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length));
+ SearchHit[] hits = leftHits.getHits();
+ SearchHit[] rHits = rightHits.getHits();
+ for (int i = 0; i < hits.length; i++) {
+ assertThat("query: " + query, hits[i].getScore(), equalTo(rHits[i].getScore()));
+ }
+ for (int i = 0; i < hits.length; i++) {
+ if (hits[i].getScore() == hits[hits.length-1].getScore()) {
+ return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs
+ }
+ assertThat("query: " + query,hits[i].getId(), equalTo(rHits[i].getId()));
+ }
+ }
+
+ private static final void assertEquivalentOrSubstringMatch(String query, SearchResponse plain, SearchResponse rescored) {
+ SearchHits leftHits = plain.getHits();
+ SearchHits rightHits = rescored.getHits();
+ assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits()));
+ assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length));
+ SearchHit[] hits = leftHits.getHits();
+ SearchHit[] otherHits = rightHits.getHits();
+ if (!hits[0].getId().equals(otherHits[0].getId())) {
+ assertThat(((String) otherHits[0].sourceAsMap().get("field1")).contains(query), equalTo(true));
+ } else {
+ for (int i = 0; i < hits.length; i++) {
+ if (hits[i].getScore() == hits[hits.length-1].getScore()) {
+ return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs
+ }
+ assertThat(query, hits[i].getId(), equalTo(rightHits.getHits()[i].getId()));
+ }
+ }
+ }
+
+ @Test
+ // forces QUERY_THEN_FETCH because of https://github.com/elasticsearch/elasticsearch/issues/4829
+ public void testEquivalence() throws Exception {
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("analyzer", "whitespace").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1, 5)).put("index.number_of_replicas", between(0, 1))).execute().actionGet();
+ ensureGreen();
+
+ int numDocs = atLeast(100);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+
+ indexRandom(true, docs);
+ ensureGreen();
+ final int iters = atLeast(50);
+ for (int i = 0; i < iters; i++) {
+ int resultSize = between(5, 30);
+ int rescoreWindow = between(1, 3) * resultSize;
+ String intToEnglish = English.intToEnglish(between(0, numDocs-1));
+ String query = intToEnglish.split(" ")[0];
+ SearchResponse rescored = client()
+ .prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(resultSize)
+ .setRescorer(
+ RescoreBuilder
+ .queryRescorer(
+ QueryBuilders
+ .constantScoreQuery(QueryBuilders.matchPhraseQuery("field1", intToEnglish).slop(3)))
+ .setQueryWeight(1.0f)
+ .setRescoreQueryWeight(0.0f)) // no weight - so we basically use the same score as the actual query
+ .setRescoreWindow(rescoreWindow).execute().actionGet();
+
+ SearchResponse plain = client().prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR)).setFrom(0).setSize(resultSize)
+ .execute().actionGet();
+
+ // check equivalence
+ assertEquivalent(query, plain, rescored);
+
+ rescored = client()
+ .prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(resultSize)
+ .setRescorer(
+ RescoreBuilder
+ .queryRescorer(
+ QueryBuilders
+ .constantScoreQuery(QueryBuilders.matchPhraseQuery("field1", "not in the index").slop(3)))
+ .setQueryWeight(1.0f)
+ .setRescoreQueryWeight(1.0f))
+ .setRescoreWindow(rescoreWindow).execute().actionGet();
+ // check equivalence
+ assertEquivalent(query, plain, rescored);
+
+ rescored = client()
+ .prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(resultSize)
+ .setRescorer(
+ RescoreBuilder
+ .queryRescorer(
+ QueryBuilders.matchPhraseQuery("field1", intToEnglish).slop(0))
+ .setQueryWeight(1.0f).setRescoreQueryWeight(1.0f)).setRescoreWindow(2 * rescoreWindow).execute().actionGet();
+ // check equivalence or if the first match differs we check if the phrase is a substring of the top doc
+ assertEquivalentOrSubstringMatch(intToEnglish, plain, rescored);
+ }
+ }
+
+ @Test
+ public void testExplain() throws Exception {
+ prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("analyzer", "whitespace").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ refresh();
+
+ {
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "the quick brown").slop(2).boost(4.0f))
+ .setQueryWeight(0.5f).setRescoreQueryWeight(0.4f)).setRescoreWindow(5).setExplain(true).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ for (int i = 0; i < 3; i++) {
+ assertThat(searchResponse.getHits().getAt(i).explanation(), notNullValue());
+ assertThat(searchResponse.getHits().getAt(i).explanation().isMatch(), equalTo(true));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails().length, equalTo(2));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[0].isMatch(), equalTo(true));
+ if (i == 2) {
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[1].getValue(), equalTo(0.5f));
+ } else {
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDescription(), equalTo("sum of:"));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[0].getDetails()[1].getValue(), equalTo(0.5f));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[1].getDetails()[1].getValue(), equalTo(0.4f));
+ }
+ }
+ }
+
+ String[] scoreModes = new String[]{ "max", "min", "avg", "total", "multiply", "" };
+ String[] descriptionModes = new String[]{ "max of:", "min of:", "avg of:", "sum of:", "product of:", "sum of:" };
+ for (int i = 0; i < scoreModes.length; i++) {
+ QueryRescorer rescoreQuery = RescoreBuilder.queryRescorer(QueryBuilders.matchQuery("field1", "the quick brown").boost(4.0f))
+ .setQueryWeight(0.5f).setRescoreQueryWeight(0.4f);
+
+ if (!"".equals(scoreModes[i])) {
+ rescoreQuery.setScoreMode(scoreModes[i]);
+ }
+
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(rescoreQuery).setRescoreWindow(5).setExplain(true).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ for (int j = 0; j < 3; j++) {
+ assertThat(searchResponse.getHits().getAt(j).explanation().getDescription(), equalTo(descriptionModes[i]));
+ }
+ }
+ }
+
+ @Test
+ public void testScoring() throws Exception {
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("index", "not_analyzed").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1,5)).put("index.number_of_replicas", between(0,1))).get();
+ int numDocs = atLeast(100);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+
+ indexRandom(true, docs);
+ ensureGreen();
+
+ String[] scoreModes = new String[]{ "max", "min", "avg", "total", "multiply", "" };
+ float primaryWeight = 1.1f;
+ float secondaryWeight = 1.6f;
+
+ for (String scoreMode: scoreModes) {
+ for (int i = 0; i < numDocs - 4; i++) {
+ String[] intToEnglish = new String[] { English.intToEnglish(i), English.intToEnglish(i + 1), English.intToEnglish(i + 2), English.intToEnglish(i + 3) };
+
+ QueryRescorer rescoreQuery = RescoreBuilder
+ .queryRescorer(
+ QueryBuilders.boolQuery()
+ .disableCoord(true)
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[0])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("5.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[1])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("7.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[3])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("0.0f"))))
+ .setQueryWeight(primaryWeight)
+ .setRescoreQueryWeight(secondaryWeight);
+
+ if (!"".equals(scoreMode)) {
+ rescoreQuery.setScoreMode(scoreMode);
+ }
+
+ SearchResponse rescored = client()
+ .prepareSearch()
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.boolQuery()
+ .disableCoord(true)
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[0])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("2.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[1])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("3.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[2])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("5.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[3])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("0.2f"))))
+ .setFrom(0)
+ .setSize(10)
+ .setRescorer(rescoreQuery)
+ .setRescoreWindow(50).execute().actionGet();
+
+ assertHitCount(rescored, 4);
+
+ if ("total".equals(scoreMode) || "".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i)));
+ assertThirdHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight + 7.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight + 5.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight + 0.0f * secondaryWeight));
+ } else if ("max".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i)));
+ assertThirdHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(7.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight));
+ } else if ("min".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 2)));
+ assertSecondHit(rescored, hasId(String.valueOf(i + 1)));
+ assertThirdHit(rescored, hasId(String.valueOf(i)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(3.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(2.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.0f * secondaryWeight));
+ } else if ("avg".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThirdHit(rescored, hasId(String.valueOf(i)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo((3.0f * primaryWeight + 7.0f * secondaryWeight) / 2.0f));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo((2.0f * primaryWeight + 5.0f * secondaryWeight) / 2.0f));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo((0.2f * primaryWeight) / 2.0f));
+ } else if ("multiply".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i)));
+ assertThirdHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight * 7.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight * 5.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight * 0.0f * secondaryWeight));
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java b/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java
new file mode 100644
index 0000000..9d7fb0a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scan;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.Set;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class SearchScanScrollingTests extends ElasticsearchIntegrationTest {
+
+ public void testRandomized() throws Exception {
+ testScroll(between(1, 4), atLeast(100), between(1, 300), getRandom().nextBoolean(), getRandom().nextBoolean());
+ }
+
+ private void testScroll(int numberOfShards, long numberOfDocs, int size, boolean unbalanced, boolean trackScores) throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", numberOfShards)).get();
+ ensureGreen();
+
+ Set<String> ids = Sets.newHashSet();
+ Set<String> expectedIds = Sets.newHashSet();
+ for (int i = 0; i < numberOfDocs; i++) {
+ String id = Integer.toString(i);
+ expectedIds.add(id);
+ String routing = null;
+ if (unbalanced) {
+ if (i < (numberOfDocs * 0.6)) {
+ routing = "0";
+ } else if (i < (numberOfDocs * 0.9)) {
+ routing = "1";
+ } else {
+ routing = "2";
+ }
+ }
+ client().prepareIndex("test", "type1", id).setRouting(routing).setSource("field", i).execute().actionGet();
+ // make some segments
+ if (i % 10 == 0) {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setTrackScores(trackScores)
+ .execute().actionGet();
+ try {
+ assertHitCount(searchResponse, numberOfDocs);
+
+ // start scrolling, until we get not results
+ while (true) {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ assertHitCount(searchResponse, numberOfDocs);
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id() + "should not exist in the result set", ids.contains(hit.id()), equalTo(false));
+ ids.add(hit.id());
+ if (trackScores) {
+ assertThat(hit.getScore(), greaterThan(0.0f));
+ } else {
+ assertThat(hit.getScore(), equalTo(0.0f));
+ }
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ }
+
+ assertThat(expectedIds, equalTo(ids));
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java b/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java
new file mode 100644
index 0000000..f3bb576
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scan;
+
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Set;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+public class SearchScanTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testNarrowingQuery() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1,5))).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ Set<String> ids = Sets.newHashSet();
+ Set<String> expectedIds = Sets.newHashSet();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[atLeast(50)];
+ for (int i = 0; i < builders.length/2; i++) {
+ expectedIds.add(Integer.toString(i));
+ builders[i] = client().prepareIndex("test", "tweet", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("user", "kimchy1").field("postDate", System.currentTimeMillis()).field("message", "test").endObject());
+ }
+
+ for (int i = builders.length/2; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "tweet", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("user", "kimchy2").field("postDate", System.currentTimeMillis()).field("message", "test").endObject());
+ }
+ indexRandom(true, builders);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(termQuery("user", "kimchy1"))
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long)builders.length/2));
+
+ // start scrolling, until we get not results
+ while (true) {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long)builders.length/2));
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id() + "should not exists in the result set", ids.contains(hit.id()), equalTo(false));
+ ids.add(hit.id());
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ }
+
+ assertThat(expectedIds, equalTo(ids));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java
new file mode 100644
index 0000000..4167eaa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scriptfilter;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.client.Requests.refreshRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.scriptFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest {
+ private final static Settings DEFAULT_SETTINGS = ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ @Test
+ public void testCustomScriptBoost() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(DEFAULT_SETTINGS).execute().actionGet();
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject())
+ .execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running doc['num1'].value > 1");
+ SearchResponse response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter("doc['num1'].value > 1")))
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value")
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(3.0));
+
+ logger.info("running doc['num1'].value > param1");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter("doc['num1'].value > param1").addParam("param1", 2)))
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value")
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(3.0));
+
+ logger.info("running doc['num1'].value > param1");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter("doc['num1'].value > param1").addParam("param1", -1)))
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value")
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0));
+ }
+
+ private static AtomicInteger scriptCounter = new AtomicInteger(0);
+
+ public static int incrementScriptCounter() {
+ return scriptCounter.incrementAndGet();
+ }
+
+ @Test
+ public void testCustomScriptCache() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(DEFAULT_SETTINGS).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("test", "1").field("num", 1.0f).endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("test", "2").field("num", 2.0f).endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("test", "3").field("num", 3.0f).endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ String script = "org.elasticsearch.search.scriptfilter.ScriptFilterSearchTests.incrementScriptCounter() > 0";
+
+ scriptCounter.set(0);
+ logger.info("running script filter the first time");
+ SearchResponse response = client().prepareSearch()
+ .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).cache(true)))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(scriptCounter.get(), equalTo(3));
+
+ scriptCounter.set(0);
+ logger.info("running script filter the second time");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(termQuery("test", "2"), scriptFilter(script).cache(true)))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(scriptCounter.get(), equalTo(0));
+
+ scriptCounter.set(0);
+ logger.info("running script filter with new parameters");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).addParam("param1", "1").cache(true)))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(scriptCounter.get(), equalTo(3));
+
+ scriptCounter.set(0);
+ logger.info("running script filter with same parameters");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter(script).addParam("param1", "1").cache(true)))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(scriptCounter.get(), equalTo(0));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java
new file mode 100644
index 0000000..33a3f91
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java
@@ -0,0 +1,453 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scroll;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SearchScrollTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleScrollQueryThenFetch() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+ try {
+ long counter = 0;
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(30));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ @Test
+ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ String routing = "0";
+ if (i > 90) {
+ routing = "1";
+ } else if (i > 60) {
+ routing = "2";
+ }
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", i).setRouting(routing).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+ try {
+ long counter = 0;
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ for (int i = 0; i < 32; i++) {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+ }
+
+ // and now, the last one is one
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ // a the last is zero
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ @Test
+ public void testScrollAndUpdateIndex() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 5)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 500; i++) {
+ client().prepareIndex("test", "tweet", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("user", "kimchy").field("postDate", System.currentTimeMillis()).field("message", "test").endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(0l));
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(queryString("user:kimchy"))
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .addSort("postDate", SortOrder.ASC)
+ .execute().actionGet();
+ try {
+ do {
+ for (SearchHit searchHit : searchResponse.getHits().hits()) {
+ Map<String, Object> map = searchHit.sourceAsMap();
+ map.put("message", "update");
+ client().prepareIndex("test", "tweet", searchHit.id()).setSource(map).execute().actionGet();
+ }
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ } while (searchResponse.getHits().hits().length > 0);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(500l));
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ @Test
+ public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse1 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ SearchResponse searchResponse2 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ long counter1 = 0;
+ long counter2 = 0;
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ ClearScrollResponse clearResponse = client().prepareClearScroll()
+ .addScrollId(searchResponse1.getScrollId())
+ .addScrollId(searchResponse2.getScrollId())
+ .execute().actionGet();
+ assertThat(clearResponse.isSucceeded(), equalTo(true));
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(0l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(0));
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(0l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(0));
+ }
+
+ @Test
+ public void testClearNonExistentScrollId() throws Exception {
+ createIndex("idx");
+ ClearScrollResponse response = client().prepareClearScroll()
+ .addScrollId("cXVlcnlUaGVuRmV0Y2g7MzsyOlpBRC1qOUhrUjhhZ0NtQWUxU2FuWlE7MjpRcjRaNEJ2R1JZV1VEMW02ZGF1LW5ROzI6S0xUal9lZDRTd3lWNUhUU2VSb01CQTswOw==")
+ .get();
+ // Whether we actually clear a scroll, we can't know, since that information isn't serialized in the
+ // free search context response, which is returned from each node we want to clear a particular scroll.
+ assertThat(response.isSucceeded(), is(true));
+ }
+
+ @Test
+ public void testClearIllegalScrollId() throws Exception {
+ createIndex("idx");
+ try {
+ client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ try {
+ // Fails during base64 decoding (Base64-encoded string must have at least four characters)
+ client().prepareClearScroll().addScrollId("a").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ try {
+ client().prepareClearScroll().addScrollId("abcabc").get();
+ fail();
+ // if running without -ea this will also throw ElasticsearchIllegalArgumentException
+ } catch (UncategorizedExecutionException e) {
+ assertThat(e.getRootCause(), instanceOf(AssertionError.class));
+ }
+ }
+
+ @Test
+ public void testSimpleScrollQueryThenFetch_clearAllScrollIds() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse1 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ SearchResponse searchResponse2 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ long counter1 = 0;
+ long counter2 = 0;
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ ClearScrollResponse clearResponse = client().prepareClearScroll().addScrollId("_all")
+ .execute().actionGet();
+ assertThat(clearResponse.isSucceeded(), equalTo(true));
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(0l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(0));
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(0l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(0));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/4156
+ public void testDeepPaginationWithOneDocIndexAndDoNotBlowUp() throws Exception {
+ client().prepareIndex("index", "type", "1")
+ .setSource("field", "value")
+ .setRefresh(true)
+ .execute().get();
+
+ for (SearchType searchType : SearchType.values()) {
+ SearchRequestBuilder builder = client().prepareSearch("index")
+ .setSearchType(searchType)
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setSize(Integer.MAX_VALUE);
+
+ if (searchType == SearchType.SCAN || searchType != SearchType.COUNT && randomBoolean()) {
+ builder.setScroll("1m");
+ }
+
+ SearchResponse response = builder.execute().actionGet();
+ try {
+ ElasticsearchAssertions.assertHitCount(response, 1l);
+ } finally {
+ String scrollId = response.getScrollId();
+ if (scrollId != null) {
+ clearScroll(scrollId);
+ }
+ }
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java
new file mode 100644
index 0000000..450692b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.simple;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+public class SimpleSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSearchNullIndex() {
+ try {
+ client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+
+ }
+
+ try {
+ client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+
+ }
+ }
+
+ @Test
+ public void testSearchRandomPreference() throws InterruptedException, ExecutionException {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1, 3))).get();
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", "value"),
+ client().prepareIndex("test", "type", "2").setSource("field", "value"),
+ client().prepareIndex("test", "type", "3").setSource("field", "value"),
+ client().prepareIndex("test", "type", "4").setSource("field", "value"),
+ client().prepareIndex("test", "type", "5").setSource("field", "value"),
+ client().prepareIndex("test", "type", "6").setSource("field", "value"));
+
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ // id is not indexed, but lets see that we automatically convert to
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setPreference(randomUnicodeOfLengthBetween(0, 4)).get();
+ assertHitCount(searchResponse, 6l);
+
+ }
+ }
+
+ @Test
+ public void simpleIpTests() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("from").field("type", "ip").endObject()
+ .startObject("to").field("type", "ip").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefresh(true).execute().actionGet();
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(boolQuery().must(rangeQuery("from").lt("192.168.0.7")).must(rangeQuery("to").gt("192.168.0.7")))
+ .execute().actionGet();
+
+ assertHitCount(search, 1l);
+ }
+
+ @Test
+ public void simpleIdTests() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ client().prepareIndex("test", "type", "XXX1").setSource("field", "value").setRefresh(true).execute().actionGet();
+ // id is not indexed, but lets see that we automatically convert to
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryString("_id:XXX1")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ // id is not index, but we can automatically support prefix as well
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.prefixQuery("_id", "XXX")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryString("_id:XXX*").lowercaseExpandedTerms(false)).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void simpleDateRangeWithUpperInclusiveEnabledTests() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field", "2010-01-05T02:00").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "2010-01-06T02:00").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // test include upper on ranges to include the full day on the upper bound
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05").lte("2010-01-06")).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05").lt("2010-01-06")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void simpleDateRangeWithUpperInclusiveDisabledTests() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.mapping.date.round_ceil", false)).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field", "2010-01-05T02:00").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "2010-01-06T02:00").execute().actionGet();
+ ensureGreen();
+ refresh();
+ // test include upper on ranges to include the full day on the upper bound (disabled here though...)
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05").lte("2010-01-06")).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05").lt("2010-01-06")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE")
+ public void simpleDateMathTests() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field", "2010-01-05T02:00").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "2010-01-06T02:00").execute().actionGet();
+ ensureGreen();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d")).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("field:[2010-01-03||+2d TO 2010-01-04||+2d]")).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void localDependentDateTests() throws Exception {
+ prepareCreate("test")
+ .addMapping("type1",
+ jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("date_field")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "de")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", "" + i).setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800").execute().actionGet();
+ client().prepareIndex("test", "type1", "" + (10 + i)).setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800").execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 10l);
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 20l);
+
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java
new file mode 100644
index 0000000..265179d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java
@@ -0,0 +1,1533 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util._TestUtil;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+
+/**
+ *
+ */
+public class SimpleSortTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ @Test
+ public void testTrackScores() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ index("test", "type1", jsonBuilder().startObject()
+ .field("id", "1")
+ .field("svalue", "aaa")
+ .field("ivalue", 100)
+ .field("dvalue", 0.1)
+ .endObject());
+ index("test", "type1", jsonBuilder().startObject()
+ .field("id", "2")
+ .field("svalue", "bbb")
+ .field("ivalue", 200)
+ .field("dvalue", 0.2)
+ .endObject());
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getMaxScore(), equalTo(Float.NaN));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.getScore(), equalTo(Float.NaN));
+ }
+
+ // now check with score tracking
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("svalue", SortOrder.ASC)
+ .setTrackScores(true)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getMaxScore(), not(equalTo(Float.NaN)));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.getScore(), not(equalTo(Float.NaN)));
+ }
+ }
+
+ public void testRandomSorting() throws ElasticsearchException, IOException, InterruptedException, ExecutionException {
+ int numberOfShards = between(1, 10);
+ Random random = getRandom();
+ prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", 0))
+ .addMapping("type",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("sparse_bytes")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .startObject("dense_bytes")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureGreen();
+
+ TreeMap<BytesRef, String> sparseBytes = new TreeMap<BytesRef, String>();
+ TreeMap<BytesRef, String> denseBytes = new TreeMap<BytesRef, String>();
+ int numDocs = atLeast(200);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ String docId = Integer.toString(i);
+ BytesRef ref = null;
+ do {
+ ref = new BytesRef(_TestUtil.randomRealisticUnicodeString(random));
+ } while (denseBytes.containsKey(ref));
+ denseBytes.put(ref, docId);
+ XContentBuilder src = jsonBuilder().startObject().field("dense_bytes", ref.utf8ToString());
+ if (rarely()) {
+ src.field("sparse_bytes", ref.utf8ToString());
+ sparseBytes.put(ref, docId);
+ }
+ src.endObject();
+ builders[i] = client().prepareIndex("test", "type", docId).setSource(src);
+ }
+ indexRandom(true, builders);
+ {
+ int size = between(1, denseBytes.size());
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).setSize(size)
+ .addSort("dense_bytes", SortOrder.ASC).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo((long) numDocs));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ Set<Entry<BytesRef, String>> entrySet = denseBytes.entrySet();
+ Iterator<Entry<BytesRef, String>> iterator = entrySet.iterator();
+ for (int i = 0; i < size; i++) {
+ assertThat(iterator.hasNext(), equalTo(true));
+ Entry<BytesRef, String> next = iterator.next();
+ assertThat("pos: " + i, searchResponse.getHits().getAt(i).id(), equalTo(next.getValue()));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(next.getKey().utf8ToString()));
+ }
+ }
+ if (!sparseBytes.isEmpty()) {
+ int size = between(1, sparseBytes.size());
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.existsFilter("sparse_bytes")).setSize(size).addSort("sparse_bytes", SortOrder.ASC).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo((long) sparseBytes.size()));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ Set<Entry<BytesRef, String>> entrySet = sparseBytes.entrySet();
+ Iterator<Entry<BytesRef, String>> iterator = entrySet.iterator();
+ for (int i = 0; i < size; i++) {
+ assertThat(iterator.hasNext(), equalTo(true));
+ Entry<BytesRef, String> next = iterator.next();
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(next.getValue()));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(next.getKey().utf8ToString()));
+ }
+ }
+ }
+
+
+ @Test
+ public void test3078() {
+ createIndex("test");
+ ensureGreen();
+
+ for (int i = 1; i < 101; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", Integer.toString(i)).execute().actionGet();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ // reindex and refresh
+ client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
+ refresh();
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ // reindex - no refresh
+ client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ // optimize
+ optimize();
+ refresh();
+
+ client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ refresh();
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+ }
+
+ @Test
+ public void testScoreSortDirection() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", 1).execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", 0).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(customScoreQuery(matchAllQuery()).script("_source.field")).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(customScoreQuery(matchAllQuery()).script("_source.field")).addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(customScoreQuery(matchAllQuery()).script("_source.field")).addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ }
+
+
+ @Test
+ public void testScoreSortDirection_withFunctionScore() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", 1).execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", 0).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), scriptFunction("_source.field"))).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), scriptFunction("_source.field"))).addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), scriptFunction("_source.field"))).addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ }
+
+ @Test
+ public void testIssue2986() {
+ prepareCreate("test").setSettings(indexSettings()).execute().actionGet();
+
+ client().prepareIndex("test", "post", "1").setSource("{\"field1\":\"value1\"}").execute().actionGet();
+ client().prepareIndex("test", "post", "2").setSource("{\"field1\":\"value2\"}").execute().actionGet();
+ client().prepareIndex("test", "post", "3").setSource("{\"field1\":\"value3\"}").execute().actionGet();
+ refresh();
+ SearchResponse result = client().prepareSearch("test").setQuery(matchAllQuery()).setTrackScores(true).addSort("field1", SortOrder.ASC).execute().actionGet();
+
+ for (SearchHit hit : result.getHits()) {
+ assertFalse(Float.isNaN(hit.getScore()));
+ }
+ }
+
+ @Test
+ public void testIssue2991() {
+ for (int i = 1; i < 4; i++) {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", i).put("index.number_of_replicas", 0)).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("tag", "alpha").execute().actionGet();
+ refresh();
+
+ client().prepareIndex("test", "type", "3").setSource("tag", "gamma").execute().actionGet();
+ refresh();
+
+ client().prepareIndex("test", "type", "4").setSource("tag", "delta").execute().actionGet();
+
+ refresh();
+ client().prepareIndex("test", "type", "2").setSource("tag", "beta").execute().actionGet();
+
+ refresh();
+ SearchResponse resp = client().prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.ASC)).execute().actionGet();
+ assertHitCount(resp, 4);
+ assertThat(resp.getHits().hits().length, equalTo(2));
+ assertFirstHit(resp, hasId("1"));
+ assertSecondHit(resp, hasId("2"));
+
+ resp = client().prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.DESC)).execute().actionGet();
+ assertHitCount(resp, 4);
+ assertThat(resp.getHits().hits().length, equalTo(2));
+ assertFirstHit(resp, hasId("3"));
+ assertSecondHit(resp, hasId("4"));
+ }
+ }
+
+ @Test
+ public void testSimpleSorts() throws Exception {
+ final int numberOfShards = between(1, 10);
+ Random random = getRandom();
+ prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", 0))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("boolean_value").field("type", "boolean").endObject()
+ .startObject("byte_value").field("type", "byte").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("short_value").field("type", "short").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("integer_value").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("long_value").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("float_value").field("type", "float").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("double_value").field("type", "double").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 10; i++) {
+ IndexRequestBuilder builder = client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("str_value", new String(new char[]{(char) (97 + i), (char) (97 + i)}))
+ .field("boolean_value", true)
+ .field("byte_value", i)
+ .field("short_value", i)
+ .field("integer_value", i)
+ .field("long_value", i)
+ .field("float_value", 0.1 * i)
+ .field("double_value", 0.1 * i)
+ .endObject());
+ builders.add(builder);
+ }
+ Collections.shuffle(builders, random);
+ for (IndexRequestBuilder builder : builders) {
+ builder.execute().actionGet();
+ if (random.nextBoolean()) {
+ if (random.nextInt(5) != 0) {
+ refresh();
+ } else {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+
+ }
+ refresh();
+
+ // STRING
+ int size = 1 + random.nextInt(10);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("str_value", SortOrder.ASC)
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + i), (char) (97 + i)})));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("str_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + (9 - i)), (char) (97 + (9 - i))})));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+
+ // STRING script
+ size = 1 + random.nextInt(10);
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort(new ScriptSortBuilder("doc['str_value'].value", "string"))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + i), (char) (97 + i)})));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("str_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + (9 - i)), (char) (97 + (9 - i))})));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // BYTE
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("byte_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).byteValue(), equalTo((byte) i));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("byte_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).byteValue(), equalTo((byte) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // SHORT
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("short_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).shortValue(), equalTo((short) i));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("short_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).shortValue(), equalTo((short) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // INTEGER
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("integer_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).intValue(), equalTo(i));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("integer_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).intValue(), equalTo((9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // LONG
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("long_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).longValue(), equalTo((long) i));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("long_value", SortOrder.DESC)
+ .execute().actionGet();
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).longValue(), equalTo((long) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // FLOAT
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("float_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("float_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // DOUBLE
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("double_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("double_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d));
+ }
+
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void test2920() throws IOException {
+ assertAcked(prepareCreate("test").addMapping("test",
+ jsonBuilder().startObject().startObject("test").startObject("properties")
+ .startObject("value").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "test", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("value", "" + i).endObject()).execute().actionGet();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.scriptSort("\u0027\u0027", "string")).setSize(10)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testSortMinValueScript() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("lvalue").field("type", "long").endObject()
+ .startObject("dvalue").field("type", "double").endObject()
+ .startObject("svalue").field("type", "string").endObject()
+ .startObject("gvalue").field("type", "geo_point").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").setSettings(indexSettings()).addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ IndexRequestBuilder req = client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder().startObject()
+ .field("ord", i)
+ .field("svalue", new String[]{"" + i, "" + (i + 1), "" + (i + 2)})
+ .field("lvalue", new long[]{i, i + 1, i + 2})
+ .field("dvalue", new double[]{i, i + 1, i + 2})
+ .startObject("gvalue")
+ .startObject("location")
+ .field("lat", (double) i + 1)
+ .field("lon", (double) i)
+ .endObject()
+ .endObject()
+ .endObject());
+ req.execute().actionGet();
+ }
+
+ for (int i = 10; i < 20; i++) { // add some docs that don't have values in those fields
+ client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder().startObject()
+ .field("ord", i)
+ .endObject()).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // test the long values
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", "var retval = Long.MAX_VALUE; for (v : doc['lvalue'].values){ retval = Math.min(v, retval);} return retval;")
+ .addSort("ord", SortOrder.ASC).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Long) searchResponse.getHits().getAt(i).field("min").value(), equalTo((long) i));
+ }
+ // test the double values
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", "var retval = Double.MAX_VALUE; for (v : doc['dvalue'].values){ retval = Math.min(v, retval);} return retval;")
+ .addSort("ord", SortOrder.ASC).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), equalTo((double) i));
+ }
+
+ // test the string values
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", "var retval = Integer.MAX_VALUE; for (v : doc['svalue'].values){ retval = Math.min(Integer.parseInt(v), retval);} return retval;")
+ .addSort("ord", SortOrder.ASC).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Integer) searchResponse.getHits().getAt(i).field("min").value(), equalTo(i));
+ }
+
+ // test the geopoint values
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", "var retval = Double.MAX_VALUE; for (v : doc['gvalue'].values){ retval = Math.min(v.lon, retval);} return retval;")
+ .addSort("ord", SortOrder.ASC).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void testDocumentsWithNullValue() throws Exception {
+ // TODO: sort shouldn't fail when sort field is mapped dynamically
+ // We have to specify mapping explicitly because by the time search is performed dynamic mapping might not
+ // be propagated to all nodes yet and sort operation fail when the sort field is not defined
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("svalue").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").setSettings(indexSettings()).addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("svalue", "aaa")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .nullField("svalue")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "3")
+ .field("svalue", "bbb")
+ .endObject()).execute().actionGet();
+
+
+ flush();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", "doc['id'].value")
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", "doc['id'].values[0]")
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", "doc['id'].value")
+ .addSort("svalue", SortOrder.DESC)
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ // a query with docs just with null values
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("id", "2"))
+ .addScriptField("id", "doc['id'].value")
+ .addSort("svalue", SortOrder.DESC)
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("2"));
+ }
+
+ @Test
+ public void testSortMissingNumbers() throws Exception {
+ prepareCreate("test").addMapping("type1",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("i_value")
+ .field("type", "integer")
+ .startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject()
+ .endObject()
+ .startObject("d_value")
+ .field("type", "float")
+ .startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject()
+ .endObject()
+ .endObject()
+ .startObject("d_value")
+ .field("type", "float")
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("i_value", -1)
+ .field("d_value", -1.1)
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("i_value", 2)
+ .field("d_value", 2.2)
+ .endObject()).execute().actionGet();
+
+ flush();
+ refresh();
+
+ logger.info("--> sort with no missing (same as missing _last)");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _last");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_last"))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _first");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_first"))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+ }
+
+ @Test
+ public void testSortMissingStrings() throws ElasticsearchException, IOException {
+ prepareCreate("test").addMapping("type1",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("value")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("value", "a")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("value", "c")
+ .endObject()).execute().actionGet();
+
+ flush();
+ refresh();
+
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ throw new RuntimeException();
+ }
+
+ logger.info("--> sort with no missing (same as missing _last)");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _last");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last"))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _first");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first"))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+
+ logger.info("--> sort with missing b");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("b"))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+ }
+
+ @Test
+ public void testIgnoreUnmapped() throws Exception {
+ createIndex("test");
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("i_value", -1)
+ .field("d_value", -1.1)
+ .endObject()).execute().actionGet();
+
+ logger.info("--> sort with an unmapped field, verify it fails");
+ try {
+ SearchResponse result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("kkk"))
+ .execute().actionGet();
+ assertThat("Expected exception but returned with", result, nullValue());
+ } catch (SearchPhaseExecutionException e) {
+ //we check that it's a parse failure rather than a different shard failure
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("Parse Failure [No mapping found for [kkk] in order to sort on]"));
+ }
+ }
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("kkk").ignoreUnmapped(true))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testSortMVField() throws Exception {
+ prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("long_values").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("int_values").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("short_values").field("type", "short").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("byte_values").field("type", "byte").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("float_values").field("type", "float").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("double_values").field("type", "double").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", Integer.toString(1)).setSource(jsonBuilder().startObject()
+ .array("long_values", 1l, 5l, 10l, 8l)
+ .array("int_values", 1, 5, 10, 8)
+ .array("short_values", 1, 5, 10, 8)
+ .array("byte_values", 1, 5, 10, 8)
+ .array("float_values", 1f, 5f, 10f, 8f)
+ .array("double_values", 1d, 5d, 10d, 8d)
+ .array("string_values", "01", "05", "10", "08")
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", Integer.toString(2)).setSource(jsonBuilder().startObject()
+ .array("long_values", 11l, 15l, 20l, 7l)
+ .array("int_values", 11, 15, 20, 7)
+ .array("short_values", 11, 15, 20, 7)
+ .array("byte_values", 11, 15, 20, 7)
+ .array("float_values", 11f, 15f, 20f, 7f)
+ .array("double_values", 11d, 15d, 20d, 7d)
+ .array("string_values", "11", "15", "20", "07")
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", Integer.toString(3)).setSource(jsonBuilder().startObject()
+ .array("long_values", 2l, 1l, 3l, -4l)
+ .array("int_values", 2, 1, 3, -4)
+ .array("short_values", 2, 1, 3, -4)
+ .array("byte_values", 2, 1, 3, -4)
+ .array("float_values", 2f, 1f, 3f, -4f)
+ .array("double_values", 2d, 1d, 3d, -4d)
+ .array("string_values", "02", "01", "03", "!4")
+ .endObject()).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("long_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).longValue(), equalTo(-4l));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).longValue(), equalTo(1l));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).longValue(), equalTo(7l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("long_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).longValue(), equalTo(20l));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).longValue(), equalTo(10l));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).longValue(), equalTo(3l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode("sum"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).longValue(), equalTo(53l));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).longValue(), equalTo(24l));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).longValue(), equalTo(2l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("int_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(-4));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(1));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(7));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("int_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(20));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(10));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(3));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("short_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(-4));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(1));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(7));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("short_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(20));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(10));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(3));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("byte_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(-4));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(1));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(7));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("byte_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(20));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(10));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(3));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("float_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).floatValue(), equalTo(-4f));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).floatValue(), equalTo(1f));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).floatValue(), equalTo(7f));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("float_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).floatValue(), equalTo(20f));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).floatValue(), equalTo(10f));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).floatValue(), equalTo(3f));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("double_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(-4d));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(1d));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(7d));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("double_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(20d));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(10d));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(3d));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("string_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("!4"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("01"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("07"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("03"));
+ }
+
+ @Test
+ public void testSortOnRareField() throws ElasticsearchException, IOException {
+ prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", Integer.toString(1)).setSource(jsonBuilder().startObject()
+ .array("string_values", "01", "05", "10", "08")
+ .endObject()).execute().actionGet();
+
+
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("10"));
+
+ client().prepareIndex("test", "type1", Integer.toString(2)).setSource(jsonBuilder().startObject()
+ .array("string_values", "11", "15", "20", "07")
+ .endObject()).execute().actionGet();
+ for (int i = 0; i < 15; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(300 + i)).setSource(jsonBuilder().startObject()
+ .array("some_other_field", "foobar")
+ .endObject()).execute().actionGet();
+ }
+ refresh();
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(2)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+
+ client().prepareIndex("test", "type1", Integer.toString(3)).setSource(jsonBuilder().startObject()
+ .array("string_values", "02", "01", "03", "!4")
+ .endObject()).execute().actionGet();
+ for (int i = 0; i < 15; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(300 + i)).setSource(jsonBuilder().startObject()
+ .array("some_other_field", "foobar")
+ .endObject()).execute().actionGet();
+ }
+ refresh();
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("03"));
+
+ for (int i = 0; i < 15; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(300 + i)).setSource(jsonBuilder().startObject()
+ .array("some_other_field", "foobar")
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("03"));
+ }
+
+ public void testSortMetaField() throws Exception {
+ final boolean idDocValues = maybeDocValues();
+ final boolean timestampDocValues = maybeDocValues();
+ prepareCreate("test")
+ .addMapping("typ", XContentFactory.jsonBuilder().startObject().startObject("typ")
+ .startObject("_uid").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("_id").field("index", !idDocValues || randomBoolean() ? "not_analyzed" : "no").startObject("fielddata").field("format", idDocValues ? "doc_values" : null).endObject().endObject()
+ .startObject("_timestamp").field("enabled", true).field("store", true).field("index", !timestampDocValues || randomBoolean() ? "not_analyzed" : "no").startObject("fielddata").field("format", timestampDocValues ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+ final int numDocs = atLeast(10);
+ IndexRequestBuilder[] indexReqs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; ++i) {
+ indexReqs[i] = client().prepareIndex("test", "typ", Integer.toString(i)).setTimestamp(Integer.toString(randomInt(1000))).setSource();
+ }
+ indexRandom(true, indexReqs);
+
+ SortOrder order = randomFrom(SortOrder.values());
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(randomIntBetween(1, numDocs + 5))
+ .addSort("_uid", order)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ SearchHit[] hits = searchResponse.getHits().hits();
+ BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM;
+ for (int i = 0; i < hits.length; ++i) {
+ final BytesRef uid = new BytesRef(Uid.createUid(hits[i].type(), hits[i].id()));
+ assertThat(previous, order == SortOrder.ASC ? lessThan(uid) : greaterThan(uid));
+ previous = uid;
+ }
+
+ /*searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(randomIntBetween(1, numDocs + 5))
+ .addSort("_id", order)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ hits = searchResponse.getHits().hits();
+ previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM;
+ for (int i = 0; i < hits.length; ++i) {
+ final BytesRef id = new BytesRef(Uid.createUid(hits[i].type(), hits[i].id()));
+ assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id));
+ previous = id;
+ }*/
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(randomIntBetween(1, numDocs + 5))
+ .addSort("_timestamp", order)
+ .addField("_timestamp")
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ hits = searchResponse.getHits().hits();
+ Long previousTs = order == SortOrder.ASC ? 0 : Long.MAX_VALUE;
+ for (int i = 0; i < hits.length; ++i) {
+ SearchHitField timestampField = hits[i].getFields().get("_timestamp");
+ Long timestamp = timestampField.<Long>getValue();
+ assertThat(previousTs, order == SortOrder.ASC ? lessThanOrEqualTo(timestamp) : greaterThanOrEqualTo(timestamp));
+ previousTs = timestamp;
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java b/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java
new file mode 100644
index 0000000..de30d6c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.source;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+public class SourceFetchingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSourceDefaultBehavior() {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type1", "1", "field", "value");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test").get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+
+ response = client().prepareSearch("test").addField("bla").get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue());
+
+ response = client().prepareSearch("test").addField("_source").get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+
+ response = client().prepareSearch("test").addPartialField("test", "field", null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue());
+
+ }
+
+ @Test
+ public void testSourceFiltering() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value", "field2", "value2").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test").setFetchSource(false).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue());
+
+ response = client().prepareSearch("test").setFetchSource(true).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+
+ response = client().prepareSearch("test").setFetchSource("field1", null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(0).getSource().get("field1"), equalTo("value"));
+
+ response = client().prepareSearch("test").setFetchSource("hello", null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(0));
+
+ response = client().prepareSearch("test").setFetchSource(new String[]{"*"}, new String[]{"field2"}).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(0).getSource().get("field1"), equalTo("value"));
+
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java b/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java
new file mode 100644
index 0000000..c4778d5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.stats;
+
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.search.stats.SearchStats.Stats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SearchStatsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ @Test
+ public void testSimpleStats() throws Exception {
+ // clear all stats first
+ client().admin().indices().prepareStats().clear().execute().actionGet();
+ createIndex("test1");
+ for (int i = 0; i < 500; i++) {
+ client().prepareIndex("test1", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ if (i == 10) {
+ refresh();
+ }
+ }
+ createIndex("test2");
+ for (int i = 0; i < 500; i++) {
+ client().prepareIndex("test2", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ if (i == 10) {
+ refresh();
+ }
+ }
+ cluster().ensureAtMostNumNodes(numAssignedShards("test1", "test2"));
+ for (int i = 0; i < 200; i++) {
+ client().prepareSearch().setQuery(QueryBuilders.termQuery("field", "value")).setStats("group1", "group2").execute().actionGet();
+ }
+
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryTimeInMillis(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchTimeInMillis(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats(), nullValue());
+
+ indicesStats = client().admin().indices().prepareStats().setGroups("group1").execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats(), notNullValue());
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getQueryCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getQueryTimeInMillis(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getFetchCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getFetchTimeInMillis(), greaterThan(0l));
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ NodeStats[] nodes = nodeStats.getNodes();
+ Set<String> nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2");
+ int num = 0;
+ for (NodeStats stat : nodes) {
+ Stats total = stat.getIndices().getSearch().getTotal();
+ if (nodeIdsWithIndex.contains(stat.getNode().getId())) {
+ assertThat(total.getQueryCount(), greaterThan(0l));
+ assertThat(total.getQueryTimeInMillis(), greaterThan(0l));
+ num++;
+ } else {
+ assertThat(total.getQueryCount(), equalTo(0l));
+ assertThat(total.getQueryTimeInMillis(), equalTo(0l));
+ }
+ }
+
+ assertThat(num, greaterThan(0));
+
+ }
+
+ private Set<String> nodeIdsWithIndex(String... indices) {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
+ Set<String> nodes = new HashSet<String>();
+ for (ShardIterator shardIterator : allAssignedShardsGrouped) {
+ for (ShardRouting routing : shardIterator.asUnordered()) {
+ if (routing.active()) {
+ nodes.add(routing.currentNodeId());
+ }
+
+ }
+ }
+ return nodes;
+ }
+
+ @Test
+ public void testOpenContexts() {
+ createIndex("test1");
+ for (int i = 0; i < 50; i++) {
+ client().prepareIndex("test1", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(matchAllQuery())
+ .setSize(5)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo((long)numAssignedShards("test1")));
+
+ // scroll, but with no timeout (so no context)
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).execute().actionGet();
+
+ indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
+ }
+
+ protected int numAssignedShards(String... indices) {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
+ return allAssignedShardsGrouped.size();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java
new file mode 100644
index 0000000..5e77faf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java
@@ -0,0 +1,1111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.mapper.MapperException;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.percolator.PercolatorService;
+import org.elasticsearch.search.sort.FieldSortBuilder;
+import org.elasticsearch.search.suggest.completion.CompletionStats;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestionFuzzyBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
+
+ private final String INDEX = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT);
+ private final String TYPE = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT);
+ private final String FIELD = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT);
+ private final CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder();
+
+ @Test
+ public void testSimple() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+ String[][] input = {{"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"},
+ {"Generator", "Foo Fighters Generator"}, {"Learn to Fly", "Foo Fighters Learn to Fly"},
+ {"The Prodigy"}, {"The Prodigy"}, {"The Prodigy"}, {"Firestarter", "The Prodigy Firestarter"},
+ {"Turbonegro"}, {"Turbonegro"}, {"Get it on", "Turbonegro Get it on"}}; // work with frequencies
+ for (int i = 0; i < input.length; i++) {
+ client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .endObject()
+ .endObject()
+ )
+ .execute().actionGet();
+ }
+
+ refresh();
+
+ assertSuggestionsNotInOrder("f", "Foo Fighters", "Firestarter", "Foo Fighters Generator", "Foo Fighters Learn to Fly");
+ assertSuggestionsNotInOrder("t", "The Prodigy", "Turbonegro", "Turbonegro Get it on", "The Prodigy Firestarter");
+ }
+
+ @Test
+ public void testSuggestFieldWithPercolateApi() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+ String[][] input = {{"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"},
+ {"Generator", "Foo Fighters Generator"}, {"Learn to Fly", "Foo Fighters Learn to Fly"},
+ {"The Prodigy"}, {"The Prodigy"}, {"The Prodigy"}, {"Firestarter", "The Prodigy Firestarter"},
+ {"Turbonegro"}, {"Turbonegro"}, {"Get it on", "Turbonegro Get it on"}}; // work with frequencies
+ for (int i = 0; i < input.length; i++) {
+ client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .endObject()
+ .endObject()
+ )
+ .execute().actionGet();
+ }
+
+ client().prepareIndex(INDEX, PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ PercolateResponse response = client().preparePercolate().setIndices(INDEX).setDocumentType(TYPE)
+ .setGetRequest(Requests.getRequest(INDEX).type(TYPE).id("1"))
+ .execute().actionGet();
+ assertThat(response.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testBasicPrefixSuggestion() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+ for (int i = 0; i < 2; i++) {
+ createData(i == 0);
+ assertSuggestions("f", "Firestarter - The Prodigy", "Foo Fighters", "Generator - Foo Fighters", "Learn to Fly - Foo Fighters");
+ assertSuggestions("ge", "Generator - Foo Fighters", "Get it on - Turbonegro");
+ assertSuggestions("ge", "Generator - Foo Fighters", "Get it on - Turbonegro");
+ assertSuggestions("t", "The Prodigy", "Firestarter - The Prodigy", "Get it on - Turbonegro", "Turbonegro");
+ }
+ }
+
+ @Test
+ public void testThatWeightsAreWorking() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ List<String> similarNames = Lists.newArrayList("the", "The Prodigy", "The Verve", "The the");
+ // the weight is 1000 divided by string length, so the results are easy to to check
+ for (String similarName : similarNames) {
+ client().prepareIndex(INDEX, TYPE, similarName).setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(similarName).endArray()
+ .field("weight", 1000 / similarName.length())
+ .endObject().endObject()
+ ).get();
+ }
+
+ refresh();
+
+ assertSuggestions("the", "the", "The the", "The Verve", "The Prodigy");
+ }
+
+ @Test
+ public void testThatWeightMustBeAnInteger() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ try {
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("sth").endArray()
+ .field("weight", 2.5)
+ .endObject().endObject()
+ ).get();
+ fail("Indexing with a float weight was successful, but should not be");
+ } catch (MapperParsingException e) {
+ assertThat(ExceptionsHelper.detailedMessage(e), containsString("2.5"));
+ }
+ }
+
+ @Test
+ public void testThatInputCanBeAStringInsteadOfAnArray() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .field("input", "Foo Fighters")
+ .field("output", "Boo Fighters")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("f", "Boo Fighters");
+ }
+
+ @Test
+ public void testThatPayloadsAreArbitraryJsonObjects() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .startObject("payload").field("foo", "bar").startArray("test").value("spam").value("eggs").endArray().endObject()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("foo").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "Boo Fighters");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+ assertThat(prefixOption.getPayload(), is(notNullValue()));
+
+ // parse JSON
+ Map<String, Object> jsonMap = prefixOption.getPayloadAsMap();
+ assertThat(jsonMap.size(), is(2));
+ assertThat(jsonMap.get("foo").toString(), is("bar"));
+ assertThat(jsonMap.get("test"), is(instanceOf(List.class)));
+ List<String> listValues = (List<String>) jsonMap.get("test");
+ assertThat(listValues, hasItems("spam", "eggs"));
+ }
+
+ @Test
+ public void testPayloadAsNumeric() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .field("payload", 1)
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("foo").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "Boo Fighters");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+ assertThat(prefixOption.getPayload(), is(notNullValue()));
+
+ assertThat(prefixOption.getPayloadAsLong(), equalTo(1l));
+ }
+
+ @Test
+ public void testPayloadAsString() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .field("payload", "test")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("foo").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "Boo Fighters");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+ assertThat(prefixOption.getPayload(), is(notNullValue()));
+
+ assertThat(prefixOption.getPayloadAsString(), equalTo("test"));
+ }
+
+ @Test(expected = MapperException.class)
+ public void testThatExceptionIsThrownWhenPayloadsAreDisabledButInIndexRequest() throws Exception {
+ completionMappingBuilder.payloads(false);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .startArray("payload").value("spam").value("eggs").endArray()
+ .endObject().endObject()
+ ).get();
+ }
+
+ @Test
+ public void testDisabledPreserveSeperators() throws Exception {
+ completionMappingBuilder.preserveSeparators(false);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("weight", 10)
+ .endObject().endObject()
+ ).get();
+
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foof").endArray()
+ .field("weight", 20)
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("foof", "Foof", "Foo Fighters");
+ }
+
+ @Test
+ public void testEnabledPreserveSeperators() throws Exception {
+ completionMappingBuilder.preserveSeparators(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .endObject().endObject()
+ ).get();
+
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foof").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("foof", "Foof");
+ }
+
+ @Test
+ public void testThatMultipleInputsAreSupported() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").value("Fu Fighters").endArray()
+ .field("output", "The incredible Foo Fighters")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("foo", "The incredible Foo Fighters");
+ assertSuggestions("fu", "The incredible Foo Fighters");
+ }
+
+ @Test
+ public void testThatShortSyntaxIsWorking() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startArray(FIELD)
+ .value("The Prodigy Firestarter").value("Firestarter")
+ .endArray().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("t", "The Prodigy Firestarter");
+ assertSuggestions("f", "Firestarter");
+ }
+
+ @Test
+ public void testThatDisablingPositionIncrementsWorkForStopwords() throws Exception {
+ // analyzer which removes stopwords... so may not be the simple one
+ completionMappingBuilder.searchAnalyzer("classic").indexAnalyzer("classic").preservePositionIncrements(false);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("The Beatles").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("b", "The Beatles");
+ }
+
+ @Test
+ public void testThatSynonymsWork() throws Exception {
+ Settings.Builder settingsBuilder = settingsBuilder()
+ .put("analysis.analyzer.suggest_analyzer_synonyms.type", "custom")
+ .put("analysis.analyzer.suggest_analyzer_synonyms.tokenizer", "standard")
+ .putArray("analysis.analyzer.suggest_analyzer_synonyms.filter", "standard", "lowercase", "my_synonyms")
+ .put("analysis.filter.my_synonyms.type", "synonym")
+ .putArray("analysis.filter.my_synonyms.synonyms", "foo,renamed");
+ completionMappingBuilder.searchAnalyzer("suggest_analyzer_synonyms").indexAnalyzer("suggest_analyzer_synonyms");
+ createIndexAndMappingAndSettings(settingsBuilder, completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ // get suggestions for renamed
+ assertSuggestions("r", "Foo Fighters");
+ }
+
+ @Test
+ public void testThatUpgradeToMultiFieldTypeWorks() throws Exception {
+ Settings.Builder settingsBuilder = createDefaultSettings();
+ final XContentBuilder mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "string")
+ .field("path", "just_name") // The path can't be changed / upgraded
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping).setSettings(settingsBuilder).get();
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject(FIELD).field("type", "string").endObject()
+ .startObject("suggest").field("type", "completion").field("index_analyzer", "simple").field("search_analyzer", "simple").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, "suggs");
+
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters");
+ }
+
+ @Test
+ public void testThatUpgradeToMultiFieldsWorks() throws Exception {
+ Settings.Builder settingsBuilder = createDefaultSettings();
+ final XContentBuilder mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "string")
+ .field("path", "just_name") // The path can't be changed / upgraded
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping).setSettings(settingsBuilder).get();
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "string")
+ .field("path", "just_name") // Need to specify path again, to make sure that the `path` is known when this mapping is parsed and turned into DocumentMapper that we merge with.
+ .startObject("fields")
+ .startObject("suggest").field("type", "completion").field("index_analyzer", "simple").field("search_analyzer", "simple").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, "suggs");
+
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters");
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "path_type issue")
+ // If the path_type is set to `just_name` and the multi field is updated (for example another multi field is added)
+ // then if the path isn't specified again the path_type isn't taken into account and full path names are generated.
+ public void testThatUpgradeToMultiFieldWorks_bug() throws Exception {
+ Settings.Builder settingsBuilder = createDefaultSettings();
+ final XContentBuilder mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject(FIELD).field("type", "string").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping).setSettings(settingsBuilder).get();
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject(FIELD).field("type", "string").endObject()
+ .startObject("suggest").field("type", "completion").field("index_analyzer", "simple").field("search_analyzer", "simple").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, "suggs");
+
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterWorks() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nirv").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nirw").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsEditDistances() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ // edit distance 1
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Norw").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ // edit distance 2
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Norw").size(10).setFuzziness(Fuzziness.TWO)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsTranspositions() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nriv").size(10).setFuzzyTranspositions(false).setFuzziness(Fuzziness.ONE)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nriv").size(10).setFuzzyTranspositions(true).setFuzziness(Fuzziness.ONE)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsMinPrefixLength() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nriva").size(10).setFuzzyMinLength(6)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nrivan").size(10).setFuzzyMinLength(6)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsNonPrefixLength() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nirw").size(10).setFuzzyPrefixLength(4)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nirvo").size(10).setFuzzyPrefixLength(4)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterIsUnicodeAware() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("ööööö").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ // suggestion with a character, which needs unicode awareness
+ CompletionSuggestionFuzzyBuilder completionSuggestionBuilder =
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("öööи").size(10).setUnicodeAware(true);
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "ööööö");
+
+ // removing unicode awareness leads to no result
+ completionSuggestionBuilder.setUnicodeAware(false);
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ // increasing edit distance instead of unicode awareness works again, as this is only a single character
+ completionSuggestionBuilder.setFuzziness(Fuzziness.TWO);
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "ööööö");
+ }
+
+ @Test
+ public void testThatStatsAreWorking() throws Exception {
+ String otherField = "testOtherField";
+
+ client().admin().indices().prepareDelete("_all").get();
+ client().admin().indices().prepareCreate(INDEX)
+ .setSettings(createDefaultSettings())
+ .get();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion").field("analyzer", "simple")
+ .endObject()
+ .startObject(otherField)
+ .field("type", "completion").field("analyzer", "simple")
+ .endObject()
+ .endObject().endObject().endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ // Index two entities
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").field(otherField, "WHATEVER").endObject()).get();
+ client().prepareIndex(INDEX, TYPE, "2").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Bar Fighters").field(otherField, "WHATEVER2").endObject()).get();
+
+ // Get all stats
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).get();
+ CompletionStats completionStats = indicesStatsResponse.getIndex(INDEX).getPrimaries().completion;
+ assertThat(completionStats, notNullValue());
+ long totalSizeInBytes = completionStats.getSizeInBytes();
+ assertThat(totalSizeInBytes, is(greaterThan(0L)));
+
+ IndicesStatsResponse singleFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields(FIELD).get();
+ long singleFieldSizeInBytes = singleFieldStats.getIndex(INDEX).getPrimaries().completion.getFields().get(FIELD);
+ IndicesStatsResponse otherFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields(otherField).get();
+ long otherFieldSizeInBytes = otherFieldStats.getIndex(INDEX).getPrimaries().completion.getFields().get(otherField);
+ assertThat(singleFieldSizeInBytes + otherFieldSizeInBytes, is(totalSizeInBytes));
+
+ // regexes
+ IndicesStatsResponse regexFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields("*").get();
+ long regexSizeInBytes = regexFieldStats.getIndex(INDEX).getPrimaries().completion.getFields().get("*");
+ assertThat(regexSizeInBytes, is(totalSizeInBytes));
+ }
+
+ @Test
+ public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+ try {
+ client().prepareSearch(INDEX).setTypes(TYPE).addSort(new FieldSortBuilder(FIELD)).execute().actionGet();
+ fail("Expected an exception due to trying to sort on completion field, but did not happen");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status().getStatus(), is(400));
+ assertThat(e.getMessage(), containsString("Sorting not supported for field[" + FIELD + "]"));
+ }
+ }
+
+ @Test
+ public void testThatSuggestStopFilterWorks() throws Exception {
+ ImmutableSettings.Builder settingsBuilder = settingsBuilder()
+ .put("index.analysis.analyzer.stoptest.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.stoptest.filter", "standard", "suggest_stop_filter")
+ .put("index.analysis.filter.suggest_stop_filter.type", "stop")
+ .put("index.analysis.filter.suggest_stop_filter.remove_trailing", false);
+
+ CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder();
+ completionMappingBuilder.preserveSeparators(true).preservePositionIncrements(true);
+ completionMappingBuilder.searchAnalyzer("stoptest");
+ completionMappingBuilder.indexAnalyzer("simple");
+ createIndexAndMappingAndSettings(settingsBuilder, completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().field(FIELD, "Feed trolls").endObject()
+ ).get();
+
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject().field(FIELD, "Feed the trolls").endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("f", "Feed the trolls", "Feed trolls");
+ assertSuggestions("fe", "Feed the trolls", "Feed trolls");
+ assertSuggestions("fee", "Feed the trolls", "Feed trolls");
+ assertSuggestions("feed", "Feed the trolls", "Feed trolls");
+ assertSuggestions("feed t", "Feed the trolls", "Feed trolls");
+ assertSuggestions("feed the", "Feed the trolls");
+ // stop word complete, gets ignored on query time, makes it "feed" only
+ assertSuggestions("feed the ", "Feed the trolls", "Feed trolls");
+ // stopword gets removed, but position increment kicks in, which doesnt work for the prefix suggester
+ assertSuggestions("feed the t");
+ }
+
+ @Test(expected = MapperParsingException.class)
+ public void testThatIndexingInvalidFieldsInCompletionFieldResultsInException() throws Exception {
+ CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder();
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("FRIGGININVALID").value("Nirvana").endArray()
+ .endObject().endObject()).get();
+ }
+
+
+ public void assertSuggestions(String suggestion, String... suggestions) {
+ String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10);
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder(suggestionName).field(FIELD).text(suggestion).size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, suggestionName, suggestions);
+ }
+
+ public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) {
+ String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10);
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder(suggestionName).field(FIELD).text(suggestString).size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, false, suggestionName, suggestions);
+ }
+
+ private void assertSuggestions(SuggestResponse suggestResponse, String name, String... suggestions) {
+ assertSuggestions(suggestResponse, true, name, suggestions);
+ }
+
+ private void assertSuggestions(SuggestResponse suggestResponse, boolean suggestionOrderStrict, String name, String... suggestions) {
+ assertNoFailures(suggestResponse);
+
+ List<String> suggestionNames = Lists.newArrayList();
+ for (Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> suggestion : Lists.newArrayList(suggestResponse.getSuggest().iterator())) {
+ suggestionNames.add(suggestion.getName());
+ }
+ String expectFieldInResponseMsg = String.format(Locale.ROOT, "Expected suggestion named %s in response, got %s", name, suggestionNames);
+ assertThat(expectFieldInResponseMsg, suggestResponse.getSuggest().getSuggestion(name), is(notNullValue()));
+
+ Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>> suggestion = suggestResponse.getSuggest().getSuggestion(name);
+
+ List<String> suggestionList = getNames(suggestion.getEntries().get(0));
+ List<Suggest.Suggestion.Entry.Option> options = suggestion.getEntries().get(0).getOptions();
+
+ String assertMsg = String.format(Locale.ROOT, "Expected options %s length to be %s, but was %s", suggestionList, suggestions.length, options.size());
+ assertThat(assertMsg, options.size(), is(suggestions.length));
+ if (suggestionOrderStrict) {
+ for (int i = 0; i < suggestions.length; i++) {
+ String errMsg = String.format(Locale.ROOT, "Expected elem %s in list %s to be [%s] score: %s", i, suggestionList, suggestions[i], options.get(i).getScore());
+ assertThat(errMsg, options.get(i).getText().toString(), is(suggestions[i]));
+ }
+ } else {
+ for (String expectedSuggestion : suggestions) {
+ String errMsg = String.format(Locale.ROOT, "Expected elem %s to be in list %s", expectedSuggestion, suggestionList);
+ assertThat(errMsg, suggestionList, hasItem(expectedSuggestion));
+ }
+ }
+ }
+
+ private List<String> getNames(Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> suggestEntry) {
+ List<String> names = Lists.newArrayList();
+ for (Suggest.Suggestion.Entry.Option entry : suggestEntry.getOptions()) {
+ names.add(entry.getText().string());
+ }
+ return names;
+ }
+
+ private void createIndexAndMappingAndSettings(Settings.Builder settingsBuilder, CompletionMappingBuilder completionMappingBuilder) throws IOException {
+ client().admin().indices().prepareCreate(INDEX)
+ .setSettings(settingsBuilder)
+ .get();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .field("index_analyzer", completionMappingBuilder.indexAnalyzer)
+ .field("search_analyzer", completionMappingBuilder.searchAnalyzer)
+ .field("payloads", completionMappingBuilder.payloads)
+ .field("preserve_separators", completionMappingBuilder.preserveSeparators)
+ .field("preserve_position_increments", completionMappingBuilder.preservePositionIncrements)
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+ ensureYellow();
+ }
+
+ private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException {
+ createIndexAndMappingAndSettings(createDefaultSettings(), completionMappingBuilder);
+ }
+
+ private ImmutableSettings.Builder createDefaultSettings() {
+ int randomShardNumber = between(1, 5);
+ int randomReplicaNumber = between(0, cluster().size() - 1);
+ return settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, randomShardNumber).put(SETTING_NUMBER_OF_REPLICAS, randomReplicaNumber);
+ }
+
+ private void createData(boolean optimize) throws IOException, InterruptedException, ExecutionException {
+ String[][] input = {{"Foo Fighters"}, {"Generator", "Foo Fighters Generator"}, {"Learn to Fly", "Foo Fighters Learn to Fly"}, {"The Prodigy"}, {"Firestarter", "The Prodigy Firestarter"}, {"Turbonegro"}, {"Get it on", "Turbonegro Get it on"}};
+ String[] surface = {"Foo Fighters", "Generator - Foo Fighters", "Learn to Fly - Foo Fighters", "The Prodigy", "Firestarter - The Prodigy", "Turbonegro", "Get it on - Turbonegro"};
+ int[] weight = {10, 9, 8, 12, 11, 6, 7};
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[input.length];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .field("output", surface[i])
+ .startObject("payload").field("id", i).endObject()
+ .field("weight", 1) // WE FORCEFULLY INDEX A BOGUS WEIGHT
+ .endObject()
+ .endObject()
+ );
+ }
+ indexRandom(false, builders);
+
+ for (int i = 0; i < builders.length; i++) { // add them again to make sure we deduplicate on the surface form
+ builders[i] = client().prepareIndex(INDEX, TYPE, "n" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .field("output", surface[i])
+ .startObject("payload").field("id", i).endObject()
+ .field("weight", weight[i])
+ .endObject()
+ .endObject()
+ );
+ }
+ indexRandom(false, builders);
+
+ client().admin().indices().prepareRefresh(INDEX).execute().actionGet();
+ if (optimize) {
+ // make sure merging works just fine
+ client().admin().indices().prepareFlush(INDEX).execute().actionGet();
+ client().admin().indices().prepareOptimize(INDEX).execute().actionGet();
+ }
+ }
+
+ @Test // see #3555
+ public void testPrunedSegments() throws IOException {
+ createIndexAndMappingAndSettings(settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0), completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("The Beatles").endArray()
+ .endObject().endObject()
+ ).get();
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject()
+ .field("somefield", "somevalue")
+ .endObject()
+ ).get(); // we have 2 docs in a segment...
+ OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setFlush(true).setMaxNumSegments(1).execute().actionGet();
+ assertNoFailures(actionGet);
+ refresh();
+ // update the first one and then merge.. the target segment will have no value in FIELD
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject()
+ .field("somefield", "somevalue")
+ .endObject()
+ ).get();
+ actionGet = client().admin().indices().prepareOptimize().setFlush(true).setMaxNumSegments(1).execute().actionGet();
+ assertNoFailures(actionGet);
+ refresh();
+
+ assertSuggestions("b");
+ assertThat(2l, equalTo(client().prepareCount(INDEX).get().getCount()));
+ for (IndexShardSegments seg : client().admin().indices().prepareSegments().get().getIndices().get(INDEX)) {
+ ShardSegments[] shards = seg.getShards();
+ for (ShardSegments shardSegments : shards) {
+ assertThat(1, equalTo(shardSegments.getSegments().size()));
+ }
+ }
+ }
+
+ @Test
+ public void testMaxFieldLength() throws IOException {
+ client().admin().indices().prepareCreate(INDEX).get();
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ int maxInputLen = between(3, 50);
+ String str = replaceReservedChars(randomRealisticUnicodeOfCodepointLengthBetween(maxInputLen + 1, atLeast(maxInputLen + 2)), (char) 0x01);
+ ElasticsearchAssertions.assertAcked(client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .field("max_input_length", maxInputLen)
+ // upgrade mapping each time
+ .field("analyzer", "keyword")
+ .endObject()
+ .endObject().endObject()
+ .endObject()));
+ ensureYellow();
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(str).endArray()
+ .field("output", "foobar")
+ .endObject().endObject()
+ ).setRefresh(true).get();
+ // need to flush and refresh, because we keep changing the same document
+ // we have to make sure that segments without any live documents are deleted
+ flushAndRefresh();
+ int prefixLen = CompletionFieldMapper.correctSubStringLen(str, between(1, maxInputLen - 1));
+ assertSuggestions(str.substring(0, prefixLen), "foobar");
+ if (maxInputLen + 1 < str.length()) {
+ int offset = Character.isHighSurrogate(str.charAt(maxInputLen - 1)) ? 2 : 1;
+ int correctSubStringLen = CompletionFieldMapper.correctSubStringLen(str, maxInputLen + offset);
+ String shortenedSuggestion = str.substring(0, correctSubStringLen);
+ assertSuggestions(shortenedSuggestion);
+ }
+ }
+ }
+
+ @Test
+ // see #3596
+ public void testVeryLongInput() throws IOException {
+ client().admin().indices().prepareCreate(INDEX).get();
+ ElasticsearchAssertions.assertAcked(client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .endObject()
+ .endObject().endObject()
+ .endObject()));
+ ensureYellow();
+ // can cause stack overflow without the default max_input_length
+ String longString = replaceReservedChars(randomRealisticUnicodeOfLength(atLeast(5000)), (char) 0x01);
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(longString).endArray()
+ .field("output", "foobar")
+ .endObject().endObject()
+ ).setRefresh(true).get();
+
+ }
+
+ // see #3648
+ @Test(expected = MapperParsingException.class)
+ public void testReservedChars() throws IOException {
+ client().admin().indices().prepareCreate(INDEX).get();
+ ElasticsearchAssertions.assertAcked(client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .endObject()
+ .endObject().endObject()
+ .endObject()));
+ ensureYellow();
+ // can cause stack overflow without the default max_input_length
+ String string = "foo" + (char) 0x00 + "bar";
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(string).endArray()
+ .field("output", "foobar")
+ .endObject().endObject()
+ ).setRefresh(true).get();
+ }
+
+ private static String replaceReservedChars(String input, char replacement) {
+ char[] charArray = input.toCharArray();
+ for (int i = 0; i < charArray.length; i++) {
+ if (CompletionFieldMapper.isReservedChar(charArray[i])) {
+ charArray[i] = replacement;
+ }
+ }
+ return new String(charArray);
+ }
+
+ private static class CompletionMappingBuilder {
+ private String searchAnalyzer = "simple";
+ private String indexAnalyzer = "simple";
+ private Boolean payloads = getRandom().nextBoolean();
+ private Boolean preserveSeparators = getRandom().nextBoolean();
+ private Boolean preservePositionIncrements = getRandom().nextBoolean();
+
+ public CompletionMappingBuilder searchAnalyzer(String searchAnalyzer) {
+ this.searchAnalyzer = searchAnalyzer;
+ return this;
+ }
+ public CompletionMappingBuilder indexAnalyzer(String indexAnalyzer) {
+ this.indexAnalyzer = indexAnalyzer;
+ return this;
+ }
+ public CompletionMappingBuilder payloads(Boolean payloads) {
+ this.payloads = payloads;
+ return this;
+ }
+ public CompletionMappingBuilder preserveSeparators(Boolean preserveSeparators) {
+ this.preserveSeparators = preserveSeparators;
+ return this;
+ }
+ public CompletionMappingBuilder preservePositionIncrements(Boolean preservePositionIncrements) {
+ this.preservePositionIncrements = preservePositionIncrements;
+ return this;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java b/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java
new file mode 100644
index 0000000..241e46c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.SimpleAnalyzer;
+import org.apache.lucene.analysis.synonym.SynonymFilter;
+import org.apache.lucene.analysis.synonym.SynonymMap;
+import org.apache.lucene.analysis.synonym.SynonymMap.Builder;
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.elasticsearch.search.suggest.completion.CompletionTokenStream;
+import org.elasticsearch.search.suggest.completion.CompletionTokenStream.ByteTermAttribute;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class CompletionTokenStreamTest extends ElasticsearchTokenStreamTestCase {
+
+ final XAnalyzingSuggester suggester = new XAnalyzingSuggester(new SimpleAnalyzer(TEST_VERSION_CURRENT));
+
+ @Test
+ public void testSuggestTokenFilter() throws Exception {
+ TokenStream tokenStream = new MockTokenizer(new StringReader("mykeyword"), MockTokenizer.WHITESPACE, true);
+ BytesRef payload = new BytesRef("Surface keyword|friggin payload|10");
+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenStream, payload, new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ }
+ }));
+ assertTokenStreamContents(suggestTokenStream, new String[] {"mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10"}, new int[] { 1 }, null, null);
+ }
+
+ @Test
+ public void testSuggestTokenFilterWithSynonym() throws Exception {
+ Builder builder = new SynonymMap.Builder(true);
+ builder.add(new CharsRef("mykeyword"), new CharsRef("mysynonym"), true);
+
+ MockTokenizer tokenizer = new MockTokenizer(new StringReader("mykeyword"), MockTokenizer.WHITESPACE, true);
+ SynonymFilter filter = new SynonymFilter(tokenizer, builder.build(), true);
+
+ BytesRef payload = new BytesRef("Surface keyword|friggin payload|10");
+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(filter, payload, new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ }
+ }));
+ assertTokenStreamContents(suggestTokenStream, new String[] {"mysynonym", "mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10", "Surface keyword|friggin payload|10"}, new int[] { 2, 0 }, null, null);
+ }
+
+ @Test
+ public void testValidNumberOfExpansions() throws IOException {
+ Builder builder = new SynonymMap.Builder(true);
+ for (int i = 0; i < 256; i++) {
+ builder.add(new CharsRef("" + (i+1)), new CharsRef("" + (1000 + (i+1))), true);
+ }
+ StringBuilder valueBuilder = new StringBuilder();
+ for (int i = 0 ; i < 8 ; i++) {
+ valueBuilder.append(i+1);
+ valueBuilder.append(" ");
+ }
+ MockTokenizer tokenizer = new MockTokenizer(new StringReader(valueBuilder.toString()), MockTokenizer.WHITESPACE, true);
+ SynonymFilter filter = new SynonymFilter(tokenizer, builder.build(), true);
+
+ TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ Set<IntsRef> finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ return finiteStrings;
+ }
+ });
+
+ suggestTokenStream.reset();
+ ByteTermAttribute attr = suggestTokenStream.addAttribute(ByteTermAttribute.class);
+ PositionIncrementAttribute posAttr = suggestTokenStream.addAttribute(PositionIncrementAttribute.class);
+ int maxPos = 0;
+ int count = 0;
+ while(suggestTokenStream.incrementToken()) {
+ count++;
+ assertNotNull(attr.getBytesRef());
+ assertTrue(attr.getBytesRef().length > 0);
+ maxPos += posAttr.getPositionIncrement();
+ }
+ suggestTokenStream.close();
+ assertEquals(count, 256);
+ assertEquals(count, maxPos);
+
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testInValidNumberOfExpansions() throws IOException {
+ Builder builder = new SynonymMap.Builder(true);
+ for (int i = 0; i < 256; i++) {
+ builder.add(new CharsRef("" + (i+1)), new CharsRef("" + (1000 + (i+1))), true);
+ }
+ StringBuilder valueBuilder = new StringBuilder();
+ for (int i = 0 ; i < 9 ; i++) { // 9 -> expands to 512
+ valueBuilder.append(i+1);
+ valueBuilder.append(" ");
+ }
+ MockTokenizer tokenizer = new MockTokenizer(new StringReader(valueBuilder.toString()), MockTokenizer.WHITESPACE, true);
+ SynonymFilter filter = new SynonymFilter(tokenizer, builder.build(), true);
+
+ TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ Set<IntsRef> finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ return finiteStrings;
+ }
+ });
+
+ suggestTokenStream.reset();
+ suggestTokenStream.incrementToken();
+ suggestTokenStream.close();
+
+ }
+
+ @Test
+ public void testSuggestTokenFilterProperlyDelegateInputStream() throws Exception {
+ TokenStream tokenStream = new MockTokenizer(new StringReader("mykeyword"), MockTokenizer.WHITESPACE, true);
+ BytesRef payload = new BytesRef("Surface keyword|friggin payload|10");
+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenStream, payload, new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ }
+ }));
+ TermToBytesRefAttribute termAtt = suggestTokenStream.getAttribute(TermToBytesRefAttribute.class);
+ BytesRef ref = termAtt.getBytesRef();
+ assertNotNull(ref);
+ suggestTokenStream.reset();
+
+ while (suggestTokenStream.incrementToken()) {
+ termAtt.fillBytesRef();
+ assertThat(ref.utf8ToString(), equalTo("mykeyword"));
+ }
+ suggestTokenStream.end();
+ suggestTokenStream.close();
+ }
+
+
+ public final static class ByteTermAttrToCharTermAttrFilter extends TokenFilter {
+ private ByteTermAttribute byteAttr = addAttribute(ByteTermAttribute.class);
+ private PayloadAttribute payload = addAttribute(PayloadAttribute.class);
+ private TypeAttribute type = addAttribute(TypeAttribute.class);
+ private CharTermAttribute charTermAttribute = addAttribute(CharTermAttribute.class);
+ protected ByteTermAttrToCharTermAttrFilter(TokenStream input) {
+ super(input);
+ }
+
+ @Override
+ public boolean incrementToken() throws IOException {
+ if (input.incrementToken()) {
+ BytesRef bytesRef = byteAttr.getBytesRef();
+ // we move them over so we can assert them more easily in the tests
+ type.setType(payload.getPayload().utf8ToString());
+ return true;
+ }
+ return false;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java b/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java
new file mode 100644
index 0000000..476fe3c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.CharsRef;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ *
+ */
+public class CustomSuggester extends Suggester<CustomSuggester.CustomSuggestionsContext> {
+
+
+ // This is a pretty dumb implementation which returns the original text + fieldName + custom config option + 12 or 123
+ @Override
+ public Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> innerExecute(String name, CustomSuggestionsContext suggestion, IndexReader indexReader, CharsRef spare) throws IOException {
+ // Get the suggestion context
+ String text = suggestion.getText().utf8ToString();
+
+ // create two suggestions with 12 and 123 appended
+ Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>> response = new Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>>(name, suggestion.getSize());
+
+ String firstSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "12");
+ Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry12 = new Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>(new StringText(firstSuggestion), 0, text.length() + 2);
+ response.addTerm(resultEntry12);
+
+ String secondSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "123");
+ Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry123 = new Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>(new StringText(secondSuggestion), 0, text.length() + 3);
+ response.addTerm(resultEntry123);
+
+ return response;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] {"custom"};
+ }
+
+ @Override
+ public SuggestContextParser getContextParser() {
+ return new SuggestContextParser() {
+ @Override
+ public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService) throws IOException {
+ Map<String, Object> options = parser.map();
+ CustomSuggestionsContext suggestionContext = new CustomSuggestionsContext(CustomSuggester.this, options);
+ suggestionContext.setField((String) options.get("field"));
+ return suggestionContext;
+ }
+ };
+ }
+
+ public static class CustomSuggestionsContext extends SuggestionSearchContext.SuggestionContext {
+
+ public Map<String, Object> options;
+
+ public CustomSuggestionsContext(Suggester suggester, Map<String, Object> options) {
+ super(suggester);
+ this.options = options;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java b/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java
new file mode 100644
index 0000000..a54421c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+/**
+ *
+ */
+public class CustomSuggesterPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-custom-suggester";
+ }
+
+ @Override
+ public String description() {
+ return "Custom suggester to test pluggable implementation";
+ }
+
+ public void onModule(SuggestModule suggestModule) {
+ suggestModule.registerSuggester(CustomSuggester.class);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java
new file mode 100644
index 0000000..ee78c1a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Locale;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.SUITE, numNodes=1)
+public class CustomSuggesterSearchTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder().put("plugin.types", CustomSuggesterPlugin.class.getName()).put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Test
+ public void testThatCustomSuggestersCanBeRegisteredAndWork() throws Exception {
+ createIndex("test");
+ client().prepareIndex("test", "test", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("name", "arbitrary content")
+ .endObject())
+ .setRefresh(true).execute().actionGet();
+ ensureYellow();
+
+ String randomText = randomAsciiOfLength(10);
+ String randomField = randomAsciiOfLength(10);
+ String randomSuffix = randomAsciiOfLength(10);
+ SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test").setTypes("test").setFrom(0).setSize(1);
+ XContentBuilder query = jsonBuilder().startObject()
+ .startObject("suggest")
+ .startObject("someName")
+ .field("text", randomText)
+ .startObject("custom")
+ .field("field", randomField)
+ .field("suffix", randomSuffix)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ searchRequestBuilder.setExtraSource(query.bytes());
+
+ SearchResponse searchResponse = searchRequestBuilder.execute().actionGet();
+
+ List<Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> suggestions = Lists.newArrayList(searchResponse.getSuggest().getSuggestion("someName").iterator());
+ assertThat(suggestions, hasSize(2));
+ assertThat(suggestions.get(0).getText().string(), is(String.format(Locale.ROOT, "%s-%s-%s-12", randomText, randomField, randomSuffix)));
+ assertThat(suggestions.get(1).getText().string(), is(String.format(Locale.ROOT, "%s-%s-%s-123", randomText, randomField, randomSuffix)));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java
new file mode 100644
index 0000000..6a1fa25
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java
@@ -0,0 +1,977 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableList;
+import com.google.common.io.Resources;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.DirectCandidateGenerator;
+import org.elasticsearch.search.suggest.term.TermSuggestionBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.search.suggest.SuggestBuilder.phraseSuggestion;
+import static org.elasticsearch.search.suggest.SuggestBuilder.termSuggestion;
+import static org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.candidateGenerator;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Integration tests for term and phrase suggestions. Many of these tests many requests that vary only slightly from one another. Where
+ * possible these tests should declare for the first request, make the request, modify the configuration for the next request, make that
+ * request, modify again, request again, etc. This makes it very obvious what changes between requests.
+ */
+public class SuggestSearchTests extends ElasticsearchIntegrationTest {
+
+
+ @Test // see #3196
+ public void testSuggestAcrossMultipleIndices() throws IOException {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, between(1, 5),
+ SETTING_NUMBER_OF_REPLICAS, between(0, 1)).get();
+ ensureGreen();
+
+ index("test", "type1", "1", "text", "abcd");
+ index("test", "type1", "2", "text", "aacd");
+ index("test", "type1", "3", "text", "abbd");
+ index("test", "type1", "4", "text", "abcc");
+ refresh();
+
+ TermSuggestionBuilder termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text");
+ logger.info("--> run suggestions with one index");
+ searchSuggest(client(), termSuggest);
+ prepareCreate("test_1").setSettings(
+ SETTING_NUMBER_OF_SHARDS, between(1, 5),
+ SETTING_NUMBER_OF_REPLICAS, between(0, 1)).get();
+ ensureGreen();
+
+ index("test_1", "type1", "1", "text", "ab cd");
+ index("test_1", "type1", "2", "text", "aa cd");
+ index("test_1", "type1", "3", "text", "ab bd");
+ index("test_1", "type1", "4", "text", "ab cc");
+ refresh();
+ termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("ab cd")
+ .minWordLength(1)
+ .field("text");
+ logger.info("--> run suggestions with two indices");
+ searchSuggest(client(), termSuggest);
+
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("text").field("type", "string").field("analyzer", "keyword").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test_2").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(1, 5))
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))
+ ).addMapping("type1", mapping));
+ ensureGreen();
+
+ index("test_2", "type1", "1", "text", "ab cd");
+ index("test_2", "type1", "2", "text", "aa cd");
+ index("test_2", "type1", "3", "text", "ab bd");
+ index("test_2", "type1", "4", "text", "ab cc");
+ index("test_2", "type1", "1", "text", "abcd");
+ index("test_2", "type1", "2", "text", "aacd");
+ index("test_2", "type1", "3", "text", "abbd");
+ index("test_2", "type1", "4", "text", "abcc");
+ refresh();
+
+ termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("ab cd")
+ .minWordLength(1)
+ .field("text");
+ logger.info("--> run suggestions with three indices");
+ try {
+ searchSuggest(client(), termSuggest);
+ fail(" can not suggest across multiple indices with different analysis chains");
+ } catch (ReduceSearchPhaseException ex) {
+ assertThat(ex.getCause(), instanceOf(ElasticsearchIllegalStateException.class));
+ assertThat(ex.getCause().getMessage(),
+ anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"),
+ endsWith("Suggest entries have different sizes actual [2] expected [1]")));
+ } catch (ElasticsearchIllegalStateException ex) {
+ assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"),
+ endsWith("Suggest entries have different sizes actual [2] expected [1]")));
+ }
+
+
+ termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("ABCD")
+ .minWordLength(1)
+ .field("text");
+ logger.info("--> run suggestions with four indices");
+ try {
+ searchSuggest(client(), termSuggest);
+ fail(" can not suggest across multiple indices with different analysis chains");
+ } catch (ReduceSearchPhaseException ex) {
+ assertThat(ex.getCause(), instanceOf(ElasticsearchIllegalStateException.class));
+ assertThat(ex.getCause().getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"),
+ endsWith("Suggest entries have different text actual [abcd] expected [ABCD]")));
+ } catch (ElasticsearchIllegalStateException ex) {
+ assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"),
+ endsWith("Suggest entries have different text actual [abcd] expected [ABCD]")));
+ }
+
+
+ }
+
+ @Test // see #3037
+ public void testSuggestModes() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.biword.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.biword.filter", "shingler", "lowercase")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 3));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject("name")
+ .field("type", "string")
+ .endObject()
+ .startObject("name_shingled")
+ .field("type", "string")
+ .field("index_analyzer", "biword")
+ .field("search_analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+
+ index("test", "type1", "1", "name", "I like iced tea");
+ index("test", "type1", "2", "name", "I like tea.");
+ index("test", "type1", "3", "name", "I like ice cream.");
+ refresh();
+
+ DirectCandidateGenerator generator = candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2);
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("did_you_mean").field("name_shingled")
+ .addCandidateGenerator(generator)
+ .gramSize(3);
+ Suggest searchSuggest = searchSuggest(client(), "ice tea", phraseSuggestion);
+ assertSuggestion(searchSuggest, 0, "did_you_mean", "iced tea");
+
+ generator.suggestMode(null);
+ searchSuggest = searchSuggest(client(), "ice tea", phraseSuggestion);
+ assertSuggestionSize(searchSuggest, 0, 0, "did_you_mean");
+ }
+
+ @Test // see #2729
+ public void testSizeOneShard() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 1,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ for (int i = 0; i < 15; i++) {
+ index("test", "type1", Integer.toString(i), "text", "abc" + i);
+ }
+ refresh();
+
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellchecker")).get();
+ assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue());
+
+ TermSuggestionBuilder termSuggestion = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text")
+ .size(10);
+ Suggest suggest = searchSuggest(client(), termSuggestion);
+ assertSuggestion(suggest, 0, "test", 10, "abc0");
+
+ termSuggestion.text("abcd").shardSize(5);
+ suggest = searchSuggest(client(), termSuggestion);
+ assertSuggestion(suggest, 0, "test", 5, "abc0");
+ }
+
+ @Test
+ public void testUnmappedField() throws IOException, InterruptedException, ExecutionException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(1,5))
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().size() - 1))
+ .put("index.analysis.analyzer.biword.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.biword.filter", "shingler", "lowercase")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 3));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject("name")
+ .field("type", "string")
+ .endObject()
+ .startObject("name_shingled")
+ .field("type", "string")
+ .field("index_analyzer", "biword")
+ .field("search_analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "type1").setSource("name", "I like iced tea"),
+ client().prepareIndex("test", "type1").setSource("name", "I like tea."),
+ client().prepareIndex("test", "type1").setSource("name", "I like ice cream."));
+ refresh();
+
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("did_you_mean").field("name_shingled")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2))
+ .gramSize(3);
+ Suggest searchSuggest = searchSuggest(client(), "ice tea", phraseSuggestion);
+ assertSuggestion(searchSuggest, 0, 0, "did_you_mean", "iced tea");
+
+ phraseSuggestion.field("nosuchField");
+ {
+ SearchRequestBuilder suggestBuilder = client().prepareSearch().setSearchType(SearchType.COUNT);
+ suggestBuilder.setSuggestText("tetsting sugestion");
+ suggestBuilder.addSuggestion(phraseSuggestion);
+ assertThrows(suggestBuilder, SearchPhaseExecutionException.class);
+ }
+ {
+ SearchRequestBuilder suggestBuilder = client().prepareSearch().setSearchType(SearchType.COUNT);
+ suggestBuilder.setSuggestText("tetsting sugestion");
+ suggestBuilder.addSuggestion(phraseSuggestion);
+ assertThrows(suggestBuilder, SearchPhaseExecutionException.class);
+ }
+ }
+
+ @Test
+ public void testSimple() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 1,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ index("test", "type1", "1", "text", "abcd");
+ index("test", "type1", "2", "text", "aacd");
+ index("test", "type1", "3", "text", "abbd");
+ index("test", "type1", "4", "text", "abcc");
+ refresh();
+
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellcecker")).get();
+ assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue());
+
+ TermSuggestionBuilder termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text");
+ Suggest suggest = searchSuggest(client(), termSuggest);
+ assertSuggestion(suggest, 0, "test", "aacd", "abbd", "abcc");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+
+ suggest = searchSuggest(client(), termSuggest);
+ assertSuggestion(suggest, 0, "test", "aacd","abbd", "abcc");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+ }
+
+ @Test
+ public void testEmpty() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 5,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ index("test", "type1", "1", "foo", "bar");
+ refresh();
+
+ TermSuggestionBuilder termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text");
+ Suggest suggest = searchSuggest(client(), termSuggest);
+ assertSuggestionSize(suggest, 0, 0, "test");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+
+ suggest = searchSuggest(client(), termSuggest);
+ assertSuggestionSize(suggest, 0, 0, "test");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+ }
+
+ @Test
+ public void testWithMultipleCommands() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 5,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ index("test", "typ1", "1", "field1", "prefix_abcd", "field2", "prefix_efgh");
+ index("test", "typ1", "2", "field1", "prefix_aacd", "field2", "prefix_eeeh");
+ index("test", "typ1", "3", "field1", "prefix_abbd", "field2", "prefix_efff");
+ index("test", "typ1", "4", "field1", "prefix_abcc", "field2", "prefix_eggg");
+ refresh();
+
+ Suggest suggest = searchSuggest(client(),
+ termSuggestion("size1")
+ .size(1).text("prefix_abcd").maxTermFreq(10).prefixLength(1).minDocFreq(0)
+ .field("field1").suggestMode("always"),
+ termSuggestion("field2")
+ .field("field2").text("prefix_eeeh prefix_efgh")
+ .maxTermFreq(10).minDocFreq(0).suggestMode("always"),
+ termSuggestion("accuracy")
+ .field("field2").text("prefix_efgh").setAccuracy(1f)
+ .maxTermFreq(10).minDocFreq(0).suggestMode("always"));
+ assertSuggestion(suggest, 0, "size1", "prefix_aacd");
+ assertThat(suggest.getSuggestion("field2").getEntries().get(0).getText().string(), equalTo("prefix_eeeh"));
+ assertSuggestion(suggest, 0, "field2", "prefix_efgh");
+ assertThat(suggest.getSuggestion("field2").getEntries().get(1).getText().string(), equalTo("prefix_efgh"));
+ assertSuggestion(suggest, 1, "field2", "prefix_eeeh", "prefix_efff", "prefix_eggg");
+ assertSuggestionSize(suggest, 0, 0, "accuracy");
+ }
+
+ @Test
+ public void testSizeAndSort() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 5,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ Map<String, Integer> termsAndDocCount = new HashMap<String, Integer>();
+ termsAndDocCount.put("prefix_aaad", 20);
+ termsAndDocCount.put("prefix_abbb", 18);
+ termsAndDocCount.put("prefix_aaca", 16);
+ termsAndDocCount.put("prefix_abba", 14);
+ termsAndDocCount.put("prefix_accc", 12);
+ termsAndDocCount.put("prefix_addd", 10);
+ termsAndDocCount.put("prefix_abaa", 8);
+ termsAndDocCount.put("prefix_dbca", 6);
+ termsAndDocCount.put("prefix_cbad", 4);
+ termsAndDocCount.put("prefix_aacd", 1);
+ termsAndDocCount.put("prefix_abcc", 1);
+ termsAndDocCount.put("prefix_accd", 1);
+
+ for (Map.Entry<String, Integer> entry : termsAndDocCount.entrySet()) {
+ for (int i = 0; i < entry.getValue(); i++) {
+ index("test", "type1", entry.getKey() + i, "field1", entry.getKey());
+ }
+ }
+ refresh();
+
+ Suggest suggest = searchSuggest(client(), "prefix_abcd",
+ termSuggestion("size3SortScoreFirst")
+ .size(3).minDocFreq(0).field("field1").suggestMode("always"),
+ termSuggestion("size10SortScoreFirst")
+ .size(10).minDocFreq(0).field("field1").suggestMode("always").shardSize(50),
+ termSuggestion("size3SortScoreFirstMaxEdits1")
+ .maxEdits(1)
+ .size(10).minDocFreq(0).field("field1").suggestMode("always"),
+ termSuggestion("size10SortFrequencyFirst")
+ .size(10).sort("frequency").shardSize(1000)
+ .minDocFreq(0).field("field1").suggestMode("always"));
+
+ // The commented out assertions fail sometimes because suggestions are based off of shard frequencies instead of index frequencies.
+ assertSuggestion(suggest, 0, "size3SortScoreFirst", "prefix_aacd", "prefix_abcc", "prefix_accd");
+ assertSuggestion(suggest, 0, "size10SortScoreFirst", 10, "prefix_aacd", "prefix_abcc", "prefix_accd" /*, "prefix_aaad" */);
+ assertSuggestion(suggest, 0, "size3SortScoreFirstMaxEdits1", "prefix_aacd", "prefix_abcc", "prefix_accd");
+ assertSuggestion(suggest, 0, "size10SortFrequencyFirst", "prefix_aaad", "prefix_abbb", "prefix_aaca", "prefix_abba",
+ "prefix_accc", "prefix_addd", "prefix_abaa", "prefix_dbca", "prefix_cbad", "prefix_aacd");
+
+ // assertThat(suggest.get(3).getSuggestedWords().get("prefix_abcd").get(4).getTerm(), equalTo("prefix_abcc"));
+ // assertThat(suggest.get(3).getSuggestedWords().get("prefix_abcd").get(4).getTerm(), equalTo("prefix_accd"));
+ }
+
+ @Test // see #2817
+ public void testStopwordsOnlyPhraseSuggest() throws ElasticsearchException, IOException {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 1,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ index("test", "typ1", "1", "body", "this is a test");
+ refresh();
+
+ Suggest searchSuggest = searchSuggest(client(), "a an the",
+ phraseSuggestion("simple_phrase").field("body").gramSize(1)
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .size(1));
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+ }
+
+ @Test
+ public void testPrefixLength() throws ElasticsearchException, IOException { // Stopped here
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.reverse.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse")
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
+ .startObject("properties")
+ .startObject("body").field("type", "string").field("analyzer", "body").endObject()
+ .startObject("body_reverse").field("type", "string").field("analyzer", "reverse").endObject()
+ .startObject("bigram").field("type", "string").field("analyzer", "bigram").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ index("test", "type1", "1", "body", "hello world");
+ index("test", "type1", "2", "body", "hello world");
+ index("test", "type1", "3", "body", "hello words");
+ refresh();
+
+ Suggest searchSuggest = searchSuggest(client(), "hello word",
+ phraseSuggestion("simple_phrase").field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").prefixLength(4).minWordLength(1).suggestMode("always"))
+ .size(1).confidence(1.0f));
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "hello words");
+
+ searchSuggest = searchSuggest(client(), "hello word",
+ phraseSuggestion("simple_phrase").field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").prefixLength(2).minWordLength(1).suggestMode("always"))
+ .size(1).confidence(1.0f));
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "hello world");
+ }
+
+
+ @Test
+ @Slow
+ public void testMarvelHerosPhraseSuggest() throws ElasticsearchException, IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.analysis.analyzer.reverse.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse")
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all")
+ .field("store", "yes")
+ .field("termVector", "with_positions_offsets")
+ .endObject()
+ .startObject("properties")
+ .startObject("body").
+ field("type", "string").
+ field("analyzer", "body")
+ .endObject()
+ .startObject("body_reverse").
+ field("type", "string").
+ field("analyzer", "reverse")
+ .endObject()
+ .startObject("bigram").
+ field("type", "string").
+ field("analyzer", "bigram")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ for (String line: Resources.readLines(SuggestSearchTests.class.getResource("/config/names.txt"), Charsets.UTF_8)) {
+ index("test", "type1", line, "body", line, "body_reverse", line, "bigram", line);
+ }
+ refresh();
+
+ PhraseSuggestionBuilder phraseSuggest = phraseSuggestion("simple_phrase")
+ .field("bigram").gramSize(2).analyzer("body")
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .size(1);
+ Suggest searchSuggest = searchSuggest(client(), "american ame", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "american ace");
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("american ame"));
+
+ phraseSuggest.realWordErrorLikelihood(0.95f);
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+ // Check the "text" field this one time.
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("Xor the Got-Jewel"));
+
+ // Ask for highlighting
+ phraseSuggest.highlight("<em>", "</em>");
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getOptions().get(0).getHighlighted().string(), equalTo("<em>xorr</em> the <em>god</em> jewel"));
+
+ // pass in a correct phrase
+ phraseSuggest.highlight(null, null).confidence(0f).size(1).maxErrors(0.5f);
+ searchSuggest = searchSuggest(client(), "Xorr the God-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // pass in a correct phrase - set confidence to 2
+ phraseSuggest.confidence(2f);
+ searchSuggest = searchSuggest(client(), "Xorr the God-Jewel", phraseSuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ // pass in a correct phrase - set confidence to 0.99
+ phraseSuggest.confidence(0.99f);
+ searchSuggest = searchSuggest(client(), "Xorr the God-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ //test reverse suggestions with pre & post filter
+ phraseSuggest
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .addCandidateGenerator(candidateGenerator("body_reverse").minWordLength(1).suggestMode("always").preFilter("reverse").postFilter("reverse"));
+ searchSuggest = searchSuggest(client(), "xor the yod-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // set all mass to trigrams (not indexed)
+ phraseSuggest.clearCandidateGenerators()
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(1,0,0));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ // set all mass to bigrams
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0,1,0));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // distribute mass
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0.4,0.4,0.2));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ searchSuggest = searchSuggest(client(), "american ame", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "american ace");
+
+ // try all smoothing methods
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0.4,0.4,0.2));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.Laplace(0.2));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // check tokenLimit
+ phraseSuggest.smoothingModel(null).tokenLimit(4);
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ phraseSuggest.tokenLimit(15).smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel Xor the Got-Jewel Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel xorr the god jewel xorr the god jewel");
+ // Check the name this time because we're repeating it which is funky
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("Xor the Got-Jewel Xor the Got-Jewel Xor the Got-Jewel"));
+ }
+
+ @Test
+ public void testSizePararm() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put("index.analysis.analyzer.reverse.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse")
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_all")
+ .field("store", "yes")
+ .field("termVector", "with_positions_offsets")
+ .endObject()
+ .startObject("properties")
+ .startObject("body")
+ .field("type", "string")
+ .field("analyzer", "body")
+ .endObject()
+ .startObject("body_reverse")
+ .field("type", "string")
+ .field("analyzer", "reverse")
+ .endObject()
+ .startObject("bigram")
+ .field("type", "string")
+ .field("analyzer", "bigram")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ String line = "xorr the god jewel";
+ index("test", "type1", "1", "body", line, "body_reverse", line, "bigram", line);
+ line = "I got it this time";
+ index("test", "type1", "2", "body", line, "body_reverse", line, "bigram", line);
+ refresh();
+
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("simple_phrase")
+ .realWordErrorLikelihood(0.95f)
+ .field("bigram")
+ .gramSize(2)
+ .analyzer("body")
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).prefixLength(1).suggestMode("always").size(1).accuracy(0.1f))
+ .smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1))
+ .maxErrors(1.0f)
+ .size(5);
+ Suggest searchSuggest = searchSuggest(client(), "Xorr the Gut-Jewel", phraseSuggestion);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ // we allow a size of 2 now on the shard generator level so "god" will be found since it's LD2
+ phraseSuggestion.clearCandidateGenerators()
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).prefixLength(1).suggestMode("always").size(2).accuracy(0.1f));
+ searchSuggest = searchSuggest(client(), "Xorr the Gut-Jewel", phraseSuggestion);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+ }
+
+ @Test
+ public void testPhraseBoundaryCases() throws ElasticsearchException, IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.analyzer.ngram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.ngram.filter", "my_shingle2", "lowercase")
+ .put("index.analysis.analyzer.myDefAnalyzer.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.myDefAnalyzer.filter", "shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle2.type", "shingle")
+ .put("index.analysis.filter.my_shingle2.output_unigrams", true)
+ .put("index.analysis.filter.my_shingle2.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle2.max_shingle_size", 2));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
+ .startObject("properties")
+ .startObject("body").field("type", "string").field("analyzer", "body").endObject()
+ .startObject("bigram").field("type", "string").field("analyzer", "bigram").endObject()
+ .startObject("ngram").field("type", "string").field("analyzer", "ngram").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ for (String line: Resources.readLines(SuggestSearchTests.class.getResource("/config/names.txt"), Charsets.UTF_8)) {
+ index("test", "type1", line, "body", line, "bigram", line, "ngram", line);
+ }
+ refresh();
+
+ // Lets make sure some things throw exceptions
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("simple_phrase")
+ .field("bigram")
+ .analyzer("body")
+ .addCandidateGenerator(candidateGenerator("does_not_exist").minWordLength(1).suggestMode("always"))
+ .realWordErrorLikelihood(0.95f)
+ .maxErrors(0.5f)
+ .size(1);
+ try {
+ searchSuggest(client(), "Xor the Got-Jewel", 5, phraseSuggestion);
+ fail("field does not exists");
+ } catch (SearchPhaseExecutionException e) {}
+
+ phraseSuggestion.clearCandidateGenerators().analyzer(null);
+ try {
+ searchSuggest(client(), "Xor the Got-Jewel", 5, phraseSuggestion);
+ fail("analyzer does only produce ngrams");
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ phraseSuggestion.analyzer("bigram");
+ try {
+ searchSuggest(client(), "Xor the Got-Jewel", 5, phraseSuggestion);
+ fail("analyzer does only produce ngrams");
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ // Now we'll make sure some things don't
+ phraseSuggestion.forceUnigrams(false);
+ searchSuggest(client(), "Xor the Got-Jewel", phraseSuggestion);
+
+ // Field doesn't produce unigrams but the analyzer does
+ phraseSuggestion.forceUnigrams(true).field("bigram").analyzer("ngram");
+ searchSuggest(client(), "Xor the Got-Jewel",
+ phraseSuggestion);
+
+ phraseSuggestion.field("ngram").analyzer("myDefAnalyzer")
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"));
+ Suggest suggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggestion);
+ assertSuggestion(suggest, 0, "simple_phrase", "xorr the god jewel");
+
+ phraseSuggestion.analyzer(null);
+ suggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggestion);
+ assertSuggestion(suggest, 0, "simple_phrase", "xorr the god jewel");
+ }
+
+ @Test
+ public void testDifferentShardSize() throws Exception {
+ prepareCreate("text").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 5)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
+ ensureGreen();
+ indexRandom(true, client().prepareIndex("text", "type1", "1").setSource("field1", "foobar1").setRouting("1"),
+ client().prepareIndex("text", "type1", "2").setSource("field1", "foobar2").setRouting("2"),
+ client().prepareIndex("text", "type1", "3").setSource("field1", "foobar3").setRouting("3"));
+
+ Suggest suggest = searchSuggest(client(), "foobar",
+ termSuggestion("simple")
+ .size(10).minDocFreq(0).field("field1").suggestMode("always"));
+ ElasticsearchAssertions.assertSuggestionSize(suggest, 0, 3, "simple");
+ }
+
+ @Test // see #3469
+ public void testShardFailures() throws IOException, InterruptedException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(1, 5))
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().size() - 1))
+ .put("index.analysis.analyzer.suggest.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 5)
+ .put("index.analysis.filter.shingler.output_unigrams", true));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject("name")
+ .field("type", "string")
+ .field("analyzer", "suggest")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ index("test", "type2", "1", "foo", "bar");
+ index("test", "type2", "2", "foo", "bar");
+ index("test", "type2", "3", "foo", "bar");
+ index("test", "type2", "4", "foo", "bar");
+ index("test", "type2", "5", "foo", "bar");
+ index("test", "type2", "1", "name", "Just testing the suggestions api");
+ index("test", "type2", "2", "name", "An other title about equal length");
+ // Note that the last document has to have about the same length as the other or cutoff rechecking will remove the useful suggestion.
+ refresh();
+
+ // When searching on a shard with a non existing mapping, we should fail
+ SearchRequestBuilder request = client().prepareSearch().setSearchType(SearchType.COUNT)
+ .setSuggestText("tetsting sugestion")
+ .addSuggestion(phraseSuggestion("did_you_mean").field("fielddoesnotexist").maxErrors(5.0f));
+ assertThrows(request, SearchPhaseExecutionException.class);
+
+ // When searching on a shard which does not hold yet any document of an existing type, we should not fail
+ SearchResponse searchResponse = client().prepareSearch().setSearchType(SearchType.COUNT)
+ .setSuggestText("tetsting sugestion")
+ .addSuggestion(phraseSuggestion("did_you_mean").field("name").maxErrors(5.0f))
+ .get();
+ ElasticsearchAssertions.assertNoFailures(searchResponse);
+ ElasticsearchAssertions.assertSuggestion(searchResponse.getSuggest(), 0, 0, "did_you_mean", "testing suggestions");
+ }
+
+ @Test // see #3469
+ public void testEmptyShards() throws IOException, InterruptedException {
+ XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type1").
+ startObject("properties").
+ startObject("name").
+ field("type", "multi_field").
+ field("path", "just_name").
+ startObject("fields").
+ startObject("name").
+ field("type", "string").
+ field("analyzer", "suggest").
+ endObject().
+ endObject().
+ endObject().
+ endObject().
+ endObject().
+ endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 5)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.suggest.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 5)
+ .put("index.analysis.filter.shingler.output_unigrams", true)).addMapping("type1", mappingBuilder));
+ ensureGreen();
+
+ index("test", "type2", "1", "foo", "bar");
+ index("test", "type2", "2", "foo", "bar");
+ index("test", "type1", "1", "name", "Just testing the suggestions api");
+ index("test", "type1", "2", "name", "An other title about equal length");
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setSuggestText("tetsting sugestion")
+ .addSuggestion(phraseSuggestion("did_you_mean").field("name").maxErrors(5.0f))
+ .get();
+
+ assertNoFailures(searchResponse);
+ assertSuggestion(searchResponse.getSuggest(), 0, 0, "did_you_mean", "testing suggestions");
+ }
+
+ /**
+ * Searching for a rare phrase shouldn't provide any suggestions if confidence > 1. This was possible before we rechecked the cutoff
+ * score during the reduce phase. Failures don't occur every time - maybe two out of five tries but we don't repeat it to save time.
+ */
+ @Test
+ public void testSearchForRarePhrase() throws ElasticsearchException, IOException {
+ // If there isn't enough chaf per shard then shards can become unbalanced, making the cutoff recheck this is testing do more harm then good.
+ int chafPerShard = 100;
+ int numberOfShards = between(2, 5);
+
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, numberOfShards)
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().size() - 1))
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase", "my_shingle")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", true)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_all")
+ .field("store", "yes")
+ .field("termVector", "with_positions_offsets")
+ .endObject()
+ .startObject("properties")
+ .startObject("body")
+ .field("type", "string")
+ .field("analyzer", "body")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ List<String> phrases = new ArrayList<String>();
+ Collections.addAll(phrases, "nobel prize", "noble gases", "somethingelse prize", "pride and joy", "notes are fun");
+ for (int i = 0; i < 8; i++) {
+ phrases.add("noble somethingelse" + i);
+ }
+ for (int i = 0; i < numberOfShards * chafPerShard; i++) {
+ phrases.add("chaff" + i);
+ }
+ for (String phrase: phrases) {
+ index("test", "type1", phrase, "body", phrase);
+ }
+ refresh();
+
+ Suggest searchSuggest = searchSuggest(client(), "nobel prize", phraseSuggestion("simple_phrase")
+ .field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f))
+ .confidence(2f)
+ .maxErrors(5f)
+ .size(1));
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ searchSuggest = searchSuggest(client(), "noble prize", phraseSuggestion("simple_phrase")
+ .field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f))
+ .confidence(2f)
+ .maxErrors(5f)
+ .size(1));
+ assertSuggestion(searchSuggest, 0, 0, "simple_phrase", "nobel prize");
+ }
+
+ protected Suggest searchSuggest(Client client, SuggestionBuilder<?>... suggestion) {
+ return searchSuggest(client(), null, suggestion);
+ }
+
+ protected Suggest searchSuggest(Client client, String suggestText, SuggestionBuilder<?>... suggestions) {
+ return searchSuggest(client(), suggestText, 0, suggestions);
+ }
+
+ protected Suggest searchSuggest(Client client, String suggestText, int expectShardsFailed, SuggestionBuilder<?>... suggestions) {
+ SearchRequestBuilder builder = client().prepareSearch().setSearchType(SearchType.COUNT);
+ if (suggestText != null) {
+ builder.setSuggestText(suggestText);
+ }
+ for (SuggestionBuilder<?> suggestion : suggestions) {
+ builder.addSuggestion(suggestion);
+ }
+ SearchResponse actionGet = builder.execute().actionGet();
+ assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(expectShardsFailed));
+ return actionGet.getSuggest();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java b/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java
new file mode 100644
index 0000000..4a47b3d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java
@@ -0,0 +1,337 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest.completion;
+
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.codecs.*;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.search.suggest.analyzing.XFuzzySuggester;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.fst.ByteSequenceOutputs;
+import org.apache.lucene.util.fst.FST;
+import org.apache.lucene.util.fst.PairOutputs;
+import org.apache.lucene.util.fst.PairOutputs.Pair;
+import org.apache.lucene.util.fst.PositiveIntOutputs;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.CompletionLookupProvider;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.LookupFactory;
+import org.elasticsearch.search.suggest.completion.AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder;
+
+import java.io.IOException;
+import java.util.*;
+/**
+ * This is an older implementation of the AnalyzingCompletionLookupProvider class
+ * We use this to test for backwards compatibility in our tests, namely
+ * CompletionPostingsFormatTest
+ * This ensures upgrades between versions work smoothly
+ */
+public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvider {
+
+ // for serialization
+ public static final int SERIALIZE_PRESERVE_SEPERATORS = 1;
+ public static final int SERIALIZE_HAS_PAYLOADS = 2;
+ public static final int SERIALIZE_PRESERVE_POSITION_INCREMENTS = 4;
+
+ private static final int MAX_SURFACE_FORMS_PER_ANALYZED_FORM = 256;
+ private static final int MAX_GRAPH_EXPANSIONS = -1;
+
+ public static final String CODEC_NAME = "analyzing";
+ public static final int CODEC_VERSION = 1;
+
+ private boolean preserveSep;
+ private boolean preservePositionIncrements;
+ private int maxSurfaceFormsPerAnalyzedForm;
+ private int maxGraphExpansions;
+ private boolean hasPayloads;
+ private final XAnalyzingSuggester prototype;
+
+ // important, these are the settings from the old xanalyzingsuggester
+ public static final int SEP_LABEL = 0xFF;
+ public static final int END_BYTE = 0x0;
+ public static final int PAYLOAD_SEP = '\u001f';
+
+ public AnalyzingCompletionLookupProviderV1(boolean preserveSep, boolean exactFirst, boolean preservePositionIncrements, boolean hasPayloads) {
+ this.preserveSep = preserveSep;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.hasPayloads = hasPayloads;
+ this.maxSurfaceFormsPerAnalyzedForm = MAX_SURFACE_FORMS_PER_ANALYZED_FORM;
+ this.maxGraphExpansions = MAX_GRAPH_EXPANSIONS;
+ int options = preserveSep ? XAnalyzingSuggester.PRESERVE_SEP : 0;
+ // needs to fixed in the suggester first before it can be supported
+ //options |= exactFirst ? XAnalyzingSuggester.EXACT_FIRST : 0;
+ prototype = new XAnalyzingSuggester(null, null, options, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions, preservePositionIncrements,
+ null, false, 1, SEP_LABEL, PAYLOAD_SEP, END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ }
+
+ @Override
+ public String getName() {
+ return "analyzing";
+ }
+
+ @Override
+ public FieldsConsumer consumer(final IndexOutput output) throws IOException {
+ CodecUtil.writeHeader(output, CODEC_NAME, CODEC_VERSION);
+ return new FieldsConsumer() {
+ private Map<FieldInfo, Long> fieldOffsets = new HashMap<FieldInfo, Long>();
+
+ @Override
+ public void close() throws IOException {
+ try { /*
+ * write the offsets per field such that we know where
+ * we need to load the FSTs from
+ */
+ long pointer = output.getFilePointer();
+ output.writeVInt(fieldOffsets.size());
+ for (Map.Entry<FieldInfo, Long> entry : fieldOffsets.entrySet()) {
+ output.writeString(entry.getKey().name);
+ output.writeVLong(entry.getValue());
+ }
+ output.writeLong(pointer);
+ output.flush();
+ } finally {
+ IOUtils.close(output);
+ }
+ }
+
+ @Override
+ public TermsConsumer addField(final FieldInfo field) throws IOException {
+
+ return new TermsConsumer() {
+ final XAnalyzingSuggester.XBuilder builder = new XAnalyzingSuggester.XBuilder(maxSurfaceFormsPerAnalyzedForm, hasPayloads, PAYLOAD_SEP);
+ final CompletionPostingsConsumer postingsConsumer = new CompletionPostingsConsumer(AnalyzingCompletionLookupProviderV1.this, builder);
+
+ @Override
+ public PostingsConsumer startTerm(BytesRef text) throws IOException {
+ builder.startTerm(text);
+ return postingsConsumer;
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() throws IOException {
+ return BytesRef.getUTF8SortedAsUnicodeComparator();
+ }
+
+ @Override
+ public void finishTerm(BytesRef text, TermStats stats) throws IOException {
+ builder.finishTerm(stats.docFreq); // use doc freq as a fallback
+ }
+
+ @Override
+ public void finish(long sumTotalTermFreq, long sumDocFreq, int docCount) throws IOException {
+ /*
+ * Here we are done processing the field and we can
+ * buid the FST and write it to disk.
+ */
+ FST<Pair<Long, BytesRef>> build = builder.build();
+ assert build != null || docCount == 0 : "the FST is null but docCount is != 0 actual value: [" + docCount + "]";
+ /*
+ * it's possible that the FST is null if we have 2 segments that get merged
+ * and all docs that have a value in this field are deleted. This will cause
+ * a consumer to be created but it doesn't consume any values causing the FSTBuilder
+ * to return null.
+ */
+ if (build != null) {
+ fieldOffsets.put(field, output.getFilePointer());
+ build.save(output);
+ /* write some more meta-info */
+ output.writeVInt(postingsConsumer.getMaxAnalyzedPathsForOneInput());
+ output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
+ output.writeInt(maxGraphExpansions); // can be negative
+ int options = 0;
+ options |= preserveSep ? SERIALIZE_PRESERVE_SEPERATORS : 0;
+ options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
+ options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
+ output.writeVInt(options);
+ }
+ }
+ };
+ }
+ };
+ }
+
+ private static final class CompletionPostingsConsumer extends PostingsConsumer {
+ private final SuggestPayload spare = new SuggestPayload();
+ private AnalyzingCompletionLookupProviderV1 analyzingSuggestLookupProvider;
+ private XAnalyzingSuggester.XBuilder builder;
+ private int maxAnalyzedPathsForOneInput = 0;
+
+ public CompletionPostingsConsumer(AnalyzingCompletionLookupProviderV1 analyzingSuggestLookupProvider, XAnalyzingSuggester.XBuilder builder) {
+ this.analyzingSuggestLookupProvider = analyzingSuggestLookupProvider;
+ this.builder = builder;
+ }
+
+ @Override
+ public void startDoc(int docID, int freq) throws IOException {
+ }
+
+ @Override
+ public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) throws IOException {
+ analyzingSuggestLookupProvider.parsePayload(payload, spare);
+ builder.addSurface(spare.surfaceForm, spare.payload, spare.weight);
+ // multi fields have the same surface form so we sum up here
+ maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, position + 1);
+ }
+
+ @Override
+ public void finishDoc() throws IOException {
+ }
+
+ public int getMaxAnalyzedPathsForOneInput() {
+ return maxAnalyzedPathsForOneInput;
+ }
+ }
+
+ ;
+
+
+ @Override
+ public LookupFactory load(IndexInput input) throws IOException {
+ CodecUtil.checkHeader(input, CODEC_NAME, CODEC_VERSION, CODEC_VERSION);
+ final Map<String, AnalyzingSuggestHolder> lookupMap = new HashMap<String, AnalyzingSuggestHolder>();
+ input.seek(input.length() - 8);
+ long metaPointer = input.readLong();
+ input.seek(metaPointer);
+ int numFields = input.readVInt();
+
+ Map<Long, String> meta = new TreeMap<Long, String>();
+ for (int i = 0; i < numFields; i++) {
+ String name = input.readString();
+ long offset = input.readVLong();
+ meta.put(offset, name);
+ }
+ long sizeInBytes = 0;
+ for (Map.Entry<Long, String> entry : meta.entrySet()) {
+ input.seek(entry.getKey());
+ FST<Pair<Long, BytesRef>> fst = new FST<Pair<Long, BytesRef>>(input, new PairOutputs<Long, BytesRef>(
+ PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()));
+ int maxAnalyzedPathsForOneInput = input.readVInt();
+ int maxSurfaceFormsPerAnalyzedForm = input.readVInt();
+ int maxGraphExpansions = input.readInt();
+ int options = input.readVInt();
+ boolean preserveSep = (options & SERIALIZE_PRESERVE_SEPERATORS) != 0;
+ boolean hasPayloads = (options & SERIALIZE_HAS_PAYLOADS) != 0;
+ boolean preservePositionIncrements = (options & SERIALIZE_PRESERVE_POSITION_INCREMENTS) != 0;
+ sizeInBytes += fst.sizeInBytes();
+ lookupMap.put(entry.getValue(), new AnalyzingSuggestHolder(preserveSep, preservePositionIncrements, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions,
+ hasPayloads, maxAnalyzedPathsForOneInput, fst));
+ }
+ final long ramBytesUsed = sizeInBytes;
+ return new LookupFactory() {
+ @Override
+ public Lookup getLookup(FieldMapper<?> mapper, CompletionSuggestionContext suggestionContext) {
+ AnalyzingSuggestHolder analyzingSuggestHolder = lookupMap.get(mapper.names().indexName());
+ if (analyzingSuggestHolder == null) {
+ return null;
+ }
+ int flags = analyzingSuggestHolder.preserveSep ? XAnalyzingSuggester.PRESERVE_SEP : 0;
+
+ XAnalyzingSuggester suggester;
+ if (suggestionContext.isFuzzy()) {
+ suggester = new XFuzzySuggester(mapper.indexAnalyzer(), mapper.searchAnalyzer(), flags,
+ analyzingSuggestHolder.maxSurfaceFormsPerAnalyzedForm, analyzingSuggestHolder.maxGraphExpansions,
+ suggestionContext.getFuzzyEditDistance(), suggestionContext.isFuzzyTranspositions(),
+ suggestionContext.getFuzzyPrefixLength(), suggestionContext.getFuzzyMinLength(), false,
+ analyzingSuggestHolder.fst, analyzingSuggestHolder.hasPayloads,
+ analyzingSuggestHolder.maxAnalyzedPathsForOneInput, SEP_LABEL, PAYLOAD_SEP, END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+
+ } else {
+ suggester = new XAnalyzingSuggester(mapper.indexAnalyzer(), mapper.searchAnalyzer(), flags,
+ analyzingSuggestHolder.maxSurfaceFormsPerAnalyzedForm, analyzingSuggestHolder.maxGraphExpansions,
+ analyzingSuggestHolder.preservePositionIncrements,
+ analyzingSuggestHolder.fst, analyzingSuggestHolder.hasPayloads,
+ analyzingSuggestHolder.maxAnalyzedPathsForOneInput, SEP_LABEL, PAYLOAD_SEP, END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ }
+ return suggester;
+ }
+
+ @Override
+ public CompletionStats stats(String... fields) {
+ long sizeInBytes = 0;
+ ObjectLongOpenHashMap<String> completionFields = null;
+ if (fields != null && fields.length > 0) {
+ completionFields = new ObjectLongOpenHashMap<String>(fields.length);
+ }
+
+ for (Map.Entry<String, AnalyzingSuggestHolder> entry : lookupMap.entrySet()) {
+ sizeInBytes += entry.getValue().fst.sizeInBytes();
+ if (fields == null || fields.length == 0) {
+ continue;
+ }
+ for (String field : fields) {
+ // support for getting fields by regex as in fielddata
+ if (Regex.simpleMatch(field, entry.getKey())) {
+ long fstSize = entry.getValue().fst.sizeInBytes();
+ completionFields.addTo(field, fstSize);
+ }
+ }
+ }
+
+ return new CompletionStats(sizeInBytes, completionFields);
+ }
+ @Override
+ AnalyzingSuggestHolder getAnalyzingSuggestHolder(FieldMapper<?> mapper) {
+ return lookupMap.get(mapper.names().indexName());
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return ramBytesUsed;
+ }
+ };
+ }
+
+ /*
+ // might be readded when we change the current impl, right now not needed
+ static class AnalyzingSuggestHolder {
+ final boolean preserveSep;
+ final boolean preservePositionIncrements;
+ final int maxSurfaceFormsPerAnalyzedForm;
+ final int maxGraphExpansions;
+ final boolean hasPayloads;
+ final int maxAnalyzedPathsForOneInput;
+ final FST<Pair<Long, BytesRef>> fst;
+
+ public AnalyzingSuggestHolder(boolean preserveSep, boolean preservePositionIncrements, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
+ boolean hasPayloads, int maxAnalyzedPathsForOneInput, FST<Pair<Long, BytesRef>> fst) {
+ this.preserveSep = preserveSep;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
+ this.maxGraphExpansions = maxGraphExpansions;
+ this.hasPayloads = hasPayloads;
+ this.maxAnalyzedPathsForOneInput = maxAnalyzedPathsForOneInput;
+ this.fst = fst;
+ }
+
+ }
+ */
+
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return prototype.toFiniteStrings(prototype.getTokenStreamToAutomaton(), stream);
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java b/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java
new file mode 100644
index 0000000..650ac2a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest.completion;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.codecs.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.search.suggest.InputIterator;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.search.suggest.Lookup.LookupResult;
+import org.apache.lucene.search.suggest.analyzing.AnalyzingSuggester;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LineFileDocs;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PreBuiltPostingsFormatProvider;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.index.merge.Merges;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.LookupFactory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class CompletionPostingsFormatTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testCompletionPostingsFormat() throws IOException {
+ AnalyzingCompletionLookupProviderV1 providerV1 = new AnalyzingCompletionLookupProviderV1(true, false, true, true);
+ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+ List<Completion090PostingsFormat.CompletionLookupProvider> providers = Lists.newArrayList(providerV1, currentProvider);
+
+ Completion090PostingsFormat.CompletionLookupProvider randomProvider = providers.get(getRandom().nextInt(providers.size()));
+ RAMDirectory dir = new RAMDirectory();
+ writeData(dir, randomProvider);
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = currentProvider.load(input);
+ PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+ NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ Lookup lookup = load.getLookup(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null), new CompletionSuggestionContext(null));
+ List<LookupResult> result = lookup.lookup("ge", false, 10);
+ assertThat(result.get(0).key.toString(), equalTo("Generator - Foo Fighters"));
+ assertThat(result.get(0).payload.utf8ToString(), equalTo("id:10"));
+ dir.close();
+ }
+
+ @Test
+ public void testProviderBackwardCompatibilityForVersion1() throws IOException {
+ AnalyzingCompletionLookupProviderV1 providerV1 = new AnalyzingCompletionLookupProviderV1(true, false, true, true);
+ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+
+ RAMDirectory dir = new RAMDirectory();
+ writeData(dir, providerV1);
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = currentProvider.load(input);
+ PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+ NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder analyzingSuggestHolder = load.getAnalyzingSuggestHolder(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null));
+ assertThat(analyzingSuggestHolder.sepLabel, is(AnalyzingCompletionLookupProviderV1.SEP_LABEL));
+ assertThat(analyzingSuggestHolder.payloadSep, is(AnalyzingCompletionLookupProviderV1.PAYLOAD_SEP));
+ assertThat(analyzingSuggestHolder.endByte, is(AnalyzingCompletionLookupProviderV1.END_BYTE));
+ dir.close();
+ }
+
+ @Test
+ public void testProviderVersion2() throws IOException {
+ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+
+ RAMDirectory dir = new RAMDirectory();
+ writeData(dir, currentProvider);
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = currentProvider.load(input);
+ PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+ NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder analyzingSuggestHolder = load.getAnalyzingSuggestHolder(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null));
+ assertThat(analyzingSuggestHolder.sepLabel, is(XAnalyzingSuggester.SEP_LABEL));
+ assertThat(analyzingSuggestHolder.payloadSep, is(XAnalyzingSuggester.PAYLOAD_SEP));
+ assertThat(analyzingSuggestHolder.endByte, is(XAnalyzingSuggester.END_BYTE));
+ dir.close();
+ }
+
+ @Test
+ public void testDuellCompletions() throws IOException, NoSuchFieldException, SecurityException, IllegalArgumentException,
+ IllegalAccessException {
+ final boolean preserveSeparators = getRandom().nextBoolean();
+ final boolean preservePositionIncrements = getRandom().nextBoolean();
+ final boolean usePayloads = getRandom().nextBoolean();
+ final int options = preserveSeparators ? AnalyzingSuggester.PRESERVE_SEP : 0;
+
+ XAnalyzingSuggester reference = new XAnalyzingSuggester(new StandardAnalyzer(TEST_VERSION_CURRENT), new StandardAnalyzer(
+ TEST_VERSION_CURRENT), options, 256, -1, preservePositionIncrements, null, false, 1, XAnalyzingSuggester.SEP_LABEL, XAnalyzingSuggester.PAYLOAD_SEP, XAnalyzingSuggester.END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ LineFileDocs docs = new LineFileDocs(getRandom());
+ int num = atLeast(150);
+ final String[] titles = new String[num];
+ final long[] weights = new long[num];
+ for (int i = 0; i < titles.length; i++) {
+ Document nextDoc = docs.nextDoc();
+ IndexableField field = nextDoc.getField("title");
+ titles[i] = field.stringValue();
+ weights[i] = between(0, 100);
+
+ }
+ docs.close();
+ final InputIterator primaryIter = new InputIterator() {
+ int index = 0;
+ long currentWeight = -1;
+
+ @Override
+ public Comparator<BytesRef> getComparator() {
+ return null;
+ }
+
+ @Override
+ public BytesRef next() throws IOException {
+ if (index < titles.length) {
+ currentWeight = weights[index];
+ return new BytesRef(titles[index++]);
+ }
+ return null;
+ }
+
+ @Override
+ public long weight() {
+ return currentWeight;
+ }
+
+ @Override
+ public BytesRef payload() {
+ return null;
+ }
+
+ @Override
+ public boolean hasPayloads() {
+ return false;
+ }
+
+ };
+ InputIterator iter;
+ if (usePayloads) {
+ iter = new InputIterator() {
+ @Override
+ public long weight() {
+ return primaryIter.weight();
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() {
+ return primaryIter.getComparator();
+ }
+
+ @Override
+ public BytesRef next() throws IOException {
+ return primaryIter.next();
+ }
+
+ @Override
+ public BytesRef payload() {
+ return new BytesRef(Long.toString(weight()));
+ }
+
+ @Override
+ public boolean hasPayloads() {
+ return true;
+ }
+ };
+ } else {
+ iter = primaryIter;
+ }
+ reference.build(iter);
+ PostingsFormatProvider provider = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+
+ NamedAnalyzer namedAnalzyer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ final CompletionFieldMapper mapper = new CompletionFieldMapper(new Names("foo"), namedAnalzyer, namedAnalzyer, provider, null, usePayloads,
+ preserveSeparators, preservePositionIncrements, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null);
+ Lookup buildAnalyzingLookup = buildAnalyzingLookup(mapper, titles, titles, weights);
+ Field field = buildAnalyzingLookup.getClass().getDeclaredField("maxAnalyzedPathsForOneInput");
+ field.setAccessible(true);
+ Field refField = reference.getClass().getDeclaredField("maxAnalyzedPathsForOneInput");
+ refField.setAccessible(true);
+ assertThat(refField.get(reference), equalTo(field.get(buildAnalyzingLookup)));
+
+ for (int i = 0; i < titles.length; i++) {
+ int res = between(1, 10);
+ final StringBuilder builder = new StringBuilder();
+ SuggestUtils.analyze(namedAnalzyer.tokenStream("foo", titles[i]), new SuggestUtils.TokenConsumer() {
+ @Override
+ public void nextToken() throws IOException {
+ if (builder.length() == 0) {
+ builder.append(this.charTermAttr.toString());
+ }
+ }
+ });
+ String firstTerm = builder.toString();
+ String prefix = firstTerm.isEmpty() ? "" : firstTerm.substring(0, between(1, firstTerm.length()));
+ List<LookupResult> refLookup = reference.lookup(prefix, false, res);
+ List<LookupResult> lookup = buildAnalyzingLookup.lookup(prefix, false, res);
+ assertThat(refLookup.toString(),lookup.size(), equalTo(refLookup.size()));
+ for (int j = 0; j < refLookup.size(); j++) {
+ assertThat(lookup.get(j).key, equalTo(refLookup.get(j).key));
+ assertThat("prefix: " + prefix + " " + j + " -- missmatch cost: " + lookup.get(j).key + " - " + lookup.get(j).value + " | " + refLookup.get(j).key + " - " + refLookup.get(j).value ,
+ lookup.get(j).value, equalTo(refLookup.get(j).value));
+ assertThat(lookup.get(j).payload, equalTo(refLookup.get(j).payload));
+ if (usePayloads) {
+ assertThat(lookup.get(j).payload.utf8ToString(), equalTo(Long.toString(lookup.get(j).value)));
+ }
+ }
+ }
+ }
+
+ public Lookup buildAnalyzingLookup(final CompletionFieldMapper mapper, String[] terms, String[] surfaces, long[] weights)
+ throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ FilterCodec filterCodec = new FilterCodec("filtered", Codec.getDefault()) {
+ public PostingsFormat postingsFormat() {
+ return mapper.postingsFormatProvider().get();
+ }
+ };
+ IndexWriterConfig indexWriterConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, mapper.indexAnalyzer());
+
+ indexWriterConfig.setCodec(filterCodec);
+ IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+ for (int i = 0; i < weights.length; i++) {
+ Document doc = new Document();
+ BytesRef payload = mapper.buildPayload(new BytesRef(surfaces[i]), weights[i], new BytesRef(Long.toString(weights[i])));
+ doc.add(mapper.getCompletionField(terms[i], payload));
+ if (randomBoolean()) {
+ writer.commit();
+ }
+ writer.addDocument(doc);
+ }
+ writer.commit();
+ Merges.forceMerge(writer, 1);
+ writer.commit();
+ DirectoryReader reader = DirectoryReader.open(writer, true);
+ assertThat(reader.leaves().size(), equalTo(1));
+ assertThat(reader.leaves().get(0).reader().numDocs(), equalTo(weights.length));
+ AtomicReaderContext atomicReaderContext = reader.leaves().get(0);
+ Terms luceneTerms = atomicReaderContext.reader().terms(mapper.name());
+ Lookup lookup = ((Completion090PostingsFormat.CompletionTerms) luceneTerms).getLookup(mapper, new CompletionSuggestionContext(null));
+ reader.close();
+ writer.close();
+ dir.close();
+ return lookup;
+ }
+
+ @Test
+ public void testNoDocs() throws IOException {
+ AnalyzingCompletionLookupProvider provider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
+ FieldsConsumer consumer = provider.consumer(output);
+ FieldInfo fieldInfo = new FieldInfo("foo", true, 1, false, true, true, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS,
+ DocValuesType.SORTED, DocValuesType.BINARY, new HashMap<String, String>());
+ TermsConsumer addField = consumer.addField(fieldInfo);
+ addField.finish(0, 0, 0);
+ consumer.close();
+ output.close();
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = provider.load(input);
+ PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+ NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ assertNull(load.getLookup(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null), new CompletionSuggestionContext(null)));
+ dir.close();
+ }
+
+ // TODO ADD more unittests
+ private void writeData(Directory dir, Completion090PostingsFormat.CompletionLookupProvider provider) throws IOException {
+ IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
+ FieldsConsumer consumer = provider.consumer(output);
+ FieldInfo fieldInfo = new FieldInfo("foo", true, 1, false, true, true, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS,
+ DocValuesType.SORTED, DocValuesType.BINARY, new HashMap<String, String>());
+ TermsConsumer addField = consumer.addField(fieldInfo);
+
+ PostingsConsumer postingsConsumer = addField.startTerm(new BytesRef("foofightersgenerator"));
+ postingsConsumer.startDoc(0, 1);
+ postingsConsumer.addPosition(256 - 2, provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10")), 0,
+ 1);
+ postingsConsumer.finishDoc();
+ addField.finishTerm(new BytesRef("foofightersgenerator"), new TermStats(1, 1));
+ addField.startTerm(new BytesRef("generator"));
+ postingsConsumer.startDoc(0, 1);
+ postingsConsumer.addPosition(256 - 1, provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10")), 0,
+ 1);
+ postingsConsumer.finishDoc();
+ addField.finishTerm(new BytesRef("generator"), new TermStats(1, 1));
+ addField.finish(1, 1, 1);
+ consumer.close();
+ output.close();
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java b/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
new file mode 100644
index 0000000..83fb721
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
@@ -0,0 +1,403 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.apache.lucene.analysis.shingle.ShingleFilter;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.synonym.SolrSynonymParser;
+import org.apache.lucene.analysis.synonym.SynonymFilter;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.search.spell.DirectSpellChecker;
+import org.apache.lucene.search.spell.SuggestMode;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.Result;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.*;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class NoisyChannelSpellCheckerTests extends ElasticsearchTestCase{
+ private final BytesRef space = new BytesRef(" ");
+ private final BytesRef preTag = new BytesRef("<em>");
+ private final BytesRef postTag = new BytesRef("</em>");
+
+ @Test
+ public void testMarvelHeros() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
+ mapping.put("body_ngram", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ ShingleFilter tf = new ShingleFilter(t, 2, 3);
+ tf.setOutputUnigrams(false);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, tf));
+ }
+
+ });
+
+ mapping.put("body", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, t));
+ }
+
+ });
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_41), mapping);
+
+ IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_41, wrapper);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), Charsets.UTF_8));
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ Document doc = new Document();
+ doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ }
+
+ DirectoryReader ir = DirectoryReader.open(writer, false);
+ WordScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f);
+
+ NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+ DirectSpellChecker spellchecker = new DirectSpellChecker();
+ spellchecker.setMinQueryLength(1);
+ DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 5);
+ Result result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 2);
+ Correction[] corrections = result.corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("american ace"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american <em>ace</em>"));
+ assertThat(result.cutoffScore, greaterThan(0d));
+
+ result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 0, 1);
+ corrections = result.corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("american ame"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american ame"));
+ assertThat(result.cutoffScore, equalTo(Double.MIN_VALUE));
+
+ suggester = new NoisyChannelSpellChecker(0.85);
+ wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(space).utf8ToString(), equalTo("xorr the got jewel"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr</em> the <em>god</em> jewel"));
+ assertThat(corrections[1].join(space, preTag, postTag).utf8ToString(), equalTo("xor the <em>god</em> jewel"));
+ assertThat(corrections[2].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorn</em> the <em>god</em> jewel"));
+ assertThat(corrections[3].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr</em> the got jewel"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(space).utf8ToString(), equalTo("xorr the got jewel"));
+
+ // Test some of the highlighting corner cases
+ suggester = new NoisyChannelSpellChecker(0.85);
+ wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor teh Got-Jewel"), generator, 4f, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(space).utf8ToString(), equalTo("xor teh god jewel"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr the god</em> jewel"));
+ assertThat(corrections[1].join(space, preTag, postTag).utf8ToString(), equalTo("xor <em>the god</em> jewel"));
+ assertThat(corrections[2].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorn the god</em> jewel"));
+ assertThat(corrections[3].join(space, preTag, postTag).utf8ToString(), equalTo("xor teh <em>god</em> jewel"));
+
+ // test synonyms
+
+ Analyzer analyzer = new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ TokenFilter filter = new LowerCaseFilter(Version.LUCENE_41, t);
+ try {
+ SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer(Version.LUCENE_41));
+ ((SolrSynonymParser) parser).parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
+ filter = new SynonymFilter(filter, parser.build(), true);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return new TokenStreamComponents(t, filter);
+ }
+ };
+
+ spellchecker.setAccuracy(0.0f);
+ spellchecker.setMinPrefix(1);
+ spellchecker.setMinQueryLength(1);
+ suggester = new NoisyChannelSpellChecker(0.85);
+ wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("captain america"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
+
+ generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
+
+ // Make sure that user supplied text is not marked as highlighted in the presence of a synonym filter
+ generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captain usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("captain <em>america</em>"));
+ }
+
+ @Test
+ public void testMarvelHerosMultiGenerator() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
+ mapping.put("body_ngram", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ ShingleFilter tf = new ShingleFilter(t, 2, 3);
+ tf.setOutputUnigrams(false);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, tf));
+ }
+
+ });
+
+ mapping.put("body", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, t));
+ }
+
+ });
+ mapping.put("body_reverse", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ return new TokenStreamComponents(t, new ReverseStringFilter(Version.LUCENE_41, new LowerCaseFilter(Version.LUCENE_41, t)));
+ }
+
+ });
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_41), mapping);
+
+ IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_41, wrapper);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), Charsets.UTF_8));
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ Document doc = new Document();
+ doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_reverse", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ }
+
+ DirectoryReader ir = DirectoryReader.open(writer, false);
+ LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f);
+ NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+ DirectSpellChecker spellchecker = new DirectSpellChecker();
+ spellchecker.setMinQueryLength(1);
+ DirectCandidateGenerator forward = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10);
+ DirectCandidateGenerator reverse = new DirectCandidateGenerator(spellchecker, "body_reverse", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10, wrapper, wrapper, MultiFields.getTerms(ir, "body_reverse"));
+ CandidateGenerator generator = new MultiCandidateGeneratorWrapper(10, forward, reverse);
+
+ Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ generator = new MultiCandidateGeneratorWrapper(5, forward, reverse);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), forward, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(0)); // only use forward with constant prefix
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("america cae"), generator, 2, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("zorr the god jewel"));
+ assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("gorr the god jewel"));
+ assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("varr the god jewel"));
+
+
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 1.5f, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 1.5f, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+
+ }
+
+ @Test
+ public void testMarvelHerosTrigram() throws IOException {
+
+
+ RAMDirectory dir = new RAMDirectory();
+ Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
+ mapping.put("body_ngram", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ ShingleFilter tf = new ShingleFilter(t, 2, 3);
+ tf.setOutputUnigrams(false);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, tf));
+ }
+
+ });
+
+ mapping.put("body", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, t));
+ }
+
+ });
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_41), mapping);
+
+ IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_41, wrapper);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), Charsets.UTF_8));
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ Document doc = new Document();
+ doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ }
+
+ DirectoryReader ir = DirectoryReader.open(writer, false);
+ WordScorer wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1);
+
+ NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+ DirectSpellChecker spellchecker = new DirectSpellChecker();
+ spellchecker.setMinQueryLength(1);
+ DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 5);
+ Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 1).corrections;
+ assertThat(corrections.length, equalTo(0));
+// assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ape"));
+
+ wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 3).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the got jewel"));
+
+
+
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the got jewel"));
+
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 100, 3).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+
+
+ // test synonyms
+
+ Analyzer analyzer = new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ TokenFilter filter = new LowerCaseFilter(Version.LUCENE_41, t);
+ try {
+ SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer(Version.LUCENE_41));
+ ((SolrSynonymParser) parser).parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
+ filter = new SynonymFilter(filter, parser.build(), true);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return new TokenStreamComponents(t, filter);
+ }
+ };
+
+ spellchecker.setAccuracy(0.0f);
+ spellchecker.setMinPrefix(1);
+ spellchecker.setMinQueryLength(1);
+ suggester = new NoisyChannelSpellChecker(0.95);
+ wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5, 0.4, 0.1);
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+
+ generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+
+
+ wordScorer = new StupidBackoffScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.4);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 2, ir, "body", wordScorer, 0, 3).corrections;
+ assertThat(corrections.length, equalTo(2));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java b/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java
new file mode 100644
index 0000000..5305d99
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.timeout;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.FilterBuilders.scriptFilter;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SearchTimeoutTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ @Test
+ public void simpleTimeoutTest() throws Exception {
+ createIndex("test");
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setTimeout("10ms")
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter("Thread.sleep(100); return true;")))
+ .execute().actionGet();
+ assertThat(searchResponse.isTimedOut(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/similarity/SimilarityTests.java b/src/test/java/org/elasticsearch/similarity/SimilarityTests.java
new file mode 100644
index 0000000..6f72604
--- /dev/null
+++ b/src/test/java/org/elasticsearch/similarity/SimilarityTests.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.similarity;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+
+public class SimilarityTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testCustomBM25Similarity() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("field1")
+ .field("similarity", "custom")
+ .field("type", "string")
+ .endObject()
+ .startObject("field2")
+ .field("similarity", "default")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject())
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("similarity.custom.type", "BM25")
+ .put("similarity.custom.k1", 2.0f)
+ .put("similarity.custom.b", 1.5f)
+ ).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumped over the lazy dog",
+ "field2", "the quick brown fox jumped over the lazy dog")
+ .setRefresh(true).execute().actionGet();
+
+ SearchResponse bm25SearchResponse = client().prepareSearch().setQuery(matchQuery("field1", "quick brown fox")).execute().actionGet();
+ assertThat(bm25SearchResponse.getHits().totalHits(), equalTo(1l));
+ float bm25Score = bm25SearchResponse.getHits().hits()[0].score();
+
+ SearchResponse defaultSearchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet();
+ assertThat(defaultSearchResponse.getHits().totalHits(), equalTo(1l));
+ float defaultScore = defaultSearchResponse.getHits().hits()[0].score();
+
+ assertThat(bm25Score, not(equalTo(defaultScore)));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java b/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java
new file mode 100644
index 0000000..27620c9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.cluster.metadata.SnapshotMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.snapshots.mockstore.MockRepository;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Ignore;
+
+import java.io.File;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@Ignore
+public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest {
+
+ public static long getFailureCount(String repository) {
+ long failureCount = 0;
+ for (RepositoriesService repositoriesService : cluster().getInstances(RepositoriesService.class)) {
+ MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
+ failureCount += mockRepository.getFailureCount();
+ }
+ return failureCount;
+ }
+
+ public static int numberOfFiles(File dir) {
+ int count = 0;
+ File[] files = dir.listFiles();
+ if (files != null) {
+ for (File file : files) {
+ if (file.isDirectory()) {
+ count += numberOfFiles(file);
+ } else {
+ count++;
+ }
+ }
+ }
+ return count;
+ }
+
+ public static void stopNode(final String node) {
+ cluster().stopRandomNode(new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings settings) {
+ return settings.get("name").equals(node);
+ }
+ });
+ }
+
+ public void waitForBlock(String node, String repository, TimeValue timeout) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ RepositoriesService repositoriesService = cluster().getInstance(RepositoriesService.class, node);
+ MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
+ while (System.currentTimeMillis() - start < timeout.millis()) {
+ if (mockRepository.blocked()) {
+ return;
+ }
+ Thread.sleep(100);
+ }
+ fail("Timeout!!!");
+ }
+
+ public SnapshotInfo waitForCompletion(String repository, String snapshot, TimeValue timeout) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ SnapshotId snapshotId = new SnapshotId(repository, snapshot);
+ while (System.currentTimeMillis() - start < timeout.millis()) {
+ ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshot).get().getSnapshots();
+ assertThat(snapshotInfos.size(), equalTo(1));
+ if (snapshotInfos.get(0).state().completed()) {
+ // Make sure that snapshot clean up operations are finished
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ SnapshotMetaData snapshotMetaData = stateResponse.getState().getMetaData().custom(SnapshotMetaData.TYPE);
+ if (snapshotMetaData == null || snapshotMetaData.snapshot(snapshotId) == null) {
+ return snapshotInfos.get(0);
+ }
+ }
+ Thread.sleep(100);
+ }
+ fail("Timeout!!!");
+ return null;
+ }
+
+ public static String blockNodeWithIndex(String index) {
+ for(String node : cluster().nodesInclude("test-idx")) {
+ ((MockRepository)cluster().getInstance(RepositoriesService.class, node).repository("test-repo")).blockOnDataFiles(true);
+ return node;
+ }
+ fail("No nodes for the index " + index + " found");
+ return null;
+ }
+
+ public static void unblockNode(String node) {
+ ((MockRepository)cluster().getInstance(RepositoriesService.class, node).repository("test-repo")).unblock();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java
new file mode 100644
index 0000000..0913f8f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Test;
+
+import java.util.ArrayList;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
+public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
+
+ @Test
+ public void restorePersistentSettingsTest() throws Exception {
+ logger.info("--> start node");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+ Client client = client();
+
+ // Add dummy persistent setting
+ logger.info("--> set test persistent setting");
+ String settingValue = "test-" + randomInt();
+ client.admin().cluster().prepareUpdateSettings().setPersistentSettings(ImmutableSettings.settingsBuilder().put(ThreadPool.THREADPOOL_GROUP + "dummy.value", settingValue)).execute().actionGet();
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().get(ThreadPool.THREADPOOL_GROUP + "dummy.value"), equalTo(settingValue));
+
+ logger.info("--> create repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).execute().actionGet();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> start snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> clean the test persistent setting");
+ client.admin().cluster().prepareUpdateSettings().setPersistentSettings(ImmutableSettings.settingsBuilder().put(ThreadPool.THREADPOOL_GROUP + "dummy.value", "")).execute().actionGet();
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().get(ThreadPool.THREADPOOL_GROUP + "dummy.value"), equalTo(""));
+
+ logger.info("--> restore snapshot");
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet();
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().get(ThreadPool.THREADPOOL_GROUP + "dummy.value"), equalTo(settingValue));
+ }
+
+ @Test
+ public void snapshotDuringNodeShutdownTest() throws Exception {
+ logger.info("--> start 2 nodes");
+ ArrayList<String> nodes = newArrayList();
+ nodes.add(cluster().startNode());
+ nodes.add(cluster().startNode());
+ Client client = client();
+
+ assertAcked(prepareCreate("test-idx", 2, settingsBuilder().put("number_of_shards", 2).put("number_of_replicas", 0).put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)));
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> create repository");
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.TEST))
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], shutting it down", blockedNode);
+ unblockNode(blockedNode);
+
+ logger.info("--> stopping node", blockedNode);
+ stopNode(blockedNode);
+ logger.info("--> waiting for completion");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(60));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+ }
+
+ @Test
+ @TestLogging("snapshots:TRACE")
+ public void restoreIndexWithMissingShards() throws Exception {
+ logger.info("--> start 2 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+ cluster().wipeIndices("_all");
+
+ assertAcked(prepareCreate("test-idx-1", 2, settingsBuilder().put("number_of_shards", 6)
+ .put("number_of_replicas", 0)
+ .put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)));
+ ensureGreen();
+
+ logger.info("--> indexing some data into test-idx-1");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client().prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+
+ logger.info("--> shutdown one of the nodes");
+ cluster().stopRandomNode();
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("<2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ assertAcked(prepareCreate("test-idx-2", 1, settingsBuilder().put("number_of_shards", 6)
+ .put("number_of_replicas", 0)
+ .put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)));
+ ensureGreen("test-idx-2");
+
+ logger.info("--> indexing some data into test-idx-2");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client().prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+
+ logger.info("--> create repository");
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).execute().actionGet();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> start snapshot with default settings - should fail");
+ CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).execute().actionGet();
+
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.FAILED));
+
+ createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(true).setPartial(true).execute().actionGet();
+ logger.info("State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), createSnapshotResponse.getSnapshotInfo().reason());
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(12));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), lessThan(12));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(6));
+ assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-2").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ assertAcked(client().admin().indices().prepareClose("test-idx-1", "test-idx-2").execute().actionGet());
+
+ logger.info("--> restore incomplete snapshot - should fail");
+ assertThrows(client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setWaitForCompletion(true).execute(), SnapshotRestoreException.class);
+
+ logger.info("--> restore snapshot for the index that was snapshotted completely");
+ RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setIndices("test-idx-2").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue());
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
+
+ ensureGreen("test-idx-2");
+
+ assertThat(client().prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java b/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java
new file mode 100644
index 0000000..7471f14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
+import org.elasticsearch.cluster.metadata.RepositoryMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.repositories.RepositoryException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ */
+public class RepositoriesTests extends AbstractSnapshotTests {
+
+ @Test
+ public void testRepositoryCreation() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-1")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> check that repository is really there");
+ ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().clear().setMetaData(true).get();
+ MetaData metaData = clusterStateResponse.getState().getMetaData();
+ RepositoriesMetaData repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
+ assertThat(repositoriesMetaData, notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-1"), notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-1").type(), equalTo("fs"));
+
+ logger.info("--> creating anoter repository");
+ putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> check that both repositories are in cluster state");
+ clusterStateResponse = client.admin().cluster().prepareState().clear().setMetaData(true).get();
+ metaData = clusterStateResponse.getState().getMetaData();
+ repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
+ assertThat(repositoriesMetaData, notNullValue());
+ assertThat(repositoriesMetaData.repositories().size(), equalTo(2));
+ assertThat(repositoriesMetaData.repository("test-repo-1"), notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-1").type(), equalTo("fs"));
+ assertThat(repositoriesMetaData.repository("test-repo-2"), notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-2").type(), equalTo("fs"));
+
+ logger.info("--> check that both repositories can be retrieved by getRepositories query");
+ GetRepositoriesResponse repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
+ assertThat(repositoriesResponse.repositories().size(), equalTo(2));
+ assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue());
+ assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
+
+ logger.info("--> delete repository test-repo-1");
+ client.admin().cluster().prepareDeleteRepository("test-repo-1").get();
+ repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
+ assertThat(repositoriesResponse.repositories().size(), equalTo(1));
+ assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
+
+ logger.info("--> delete repository test-repo-2");
+ client.admin().cluster().prepareDeleteRepository("test-repo-2").get();
+ repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
+ assertThat(repositoriesResponse.repositories().size(), equalTo(0));
+ }
+
+ private RepositoryMetaData findRepository(ImmutableList<RepositoryMetaData> repositories, String name) {
+ for (RepositoryMetaData repository : repositories) {
+ if (repository.name().equals(name)) {
+ return repository;
+ }
+ }
+ return null;
+ }
+
+ @Test
+ public void testMisconfiguredRepository() throws Exception {
+ Client client = client();
+
+ logger.info("--> trying creating repository with incorrect settings");
+ try {
+ client.admin().cluster().preparePutRepository("test-repo").setType("fs").get();
+ fail("Shouldn't be here");
+ } catch (RepositoryException ex) {
+ // Expected
+ }
+ }
+
+ @Test
+ public void repositoryAckTimeoutTest() throws Exception {
+
+ logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack");
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-1")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(5, 100))
+ )
+ .setTimeout("0s").get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false));
+
+ logger.info("--> creating repository test-repo-2 with standard timeout - should ack");
+ putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(5, 100))
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack");
+ DeleteRepositoryResponse deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-2")
+ .setTimeout("0s").get();
+ assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(false));
+
+ logger.info("--> deleting repository test-repo-1 with standard timeout - should ack");
+ deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-1").get();
+ assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(true));
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java
new file mode 100644
index 0000000..754fa0e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java
@@ -0,0 +1,983 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ListenableActionFuture;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.indices.InvalidIndexNameException;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.junit.Test;
+
+import java.io.File;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
+
+ @Override
+ public Settings indexSettings() {
+ // During restore we frequently restore index to exactly the same state it was before, that might cause the same
+ // checksum file to be written twice during restore operation
+ return ImmutableSettings.builder().put(super.indexSettings())
+ .put(MockDirectoryHelper.RANDOM_PREVENT_DOUBLE_WRITE, false)
+ .put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false) //TODO: Ask Simon if this is hiding an issue
+ .build();
+ }
+
+ @Test
+ public void basicWorkFlowTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000))));
+
+ createIndex("test-idx-1", "test-idx-2", "test-idx-3");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+ index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
+ index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete some data");
+ for (int i = 0; i < 50; i++) {
+ client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
+ }
+ for (int i = 50; i < 100; i++) {
+ client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
+ }
+ for (int i = 0; i < 100; i += 2) {
+ client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L));
+ assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
+
+ logger.info("--> close indices");
+ client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
+
+ logger.info("--> restore all indices from the snapshot");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
+
+ // Test restore after index deletion
+ logger.info("--> delete indices");
+ cluster().wipeIndices("test-idx-1", "test-idx-2");
+ logger.info("--> restore one index after deletion");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
+ assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
+ assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
+ }
+
+ @Test
+ public void restoreWithDifferentMappingsAndSettingsTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000))));
+
+ logger.info("--> create index with foo type");
+ assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder().put("refresh_interval", 10)));
+
+ assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("foo").setSource("baz", "type=string"));
+ ensureGreen();
+
+ logger.info("--> snapshot it");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> delete the index and recreate it with bar type");
+ cluster().wipeIndices("test-idx");
+ assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder().put("refresh_interval", 5)));
+ assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("bar").setSource("baz", "type=string"));
+ ensureGreen();
+
+ logger.info("--> close index");
+ client.admin().indices().prepareClose("test-idx").get();
+
+ logger.info("--> restore all indices from the snapshot");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureGreen();
+
+ logger.info("--> assert that old mapping is restored");
+ ImmutableOpenMap<String, MappingMetaData> mappings = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test-idx").getMappings();
+ assertThat(mappings.get("foo"), notNullValue());
+ assertThat(mappings.get("bar"), nullValue());
+
+ logger.info("--> assert that old settings are restored");
+ GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet();
+ assertThat(getSettingsResponse.getSetting("test-idx", "index.refresh_interval"), equalTo("10"));
+ }
+
+ @Test
+ public void emptySnapshotTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+ }
+
+ @Test
+ public void restoreTemplatesTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())));
+
+ logger.info("--> creating test template");
+ assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete test template");
+ assertThat(client.admin().indices().prepareDeleteTemplate("test-template").get().isAcknowledged(), equalTo(true));
+ ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices(Strings.EMPTY_ARRAY).get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+
+ logger.info("--> restore cluster state");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ // We don't restore any indices here
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
+
+ logger.info("--> check that template is restored");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices(Strings.EMPTY_ARRAY).get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(true));
+ }
+
+ @Test
+ public void includeGlobalStateTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ File location = newTempDir();
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", location)));
+
+ logger.info("--> creating test template");
+ assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true));
+
+ logger.info("--> snapshot without global state");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> snapshot with global state");
+ createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-with-global-state").setIndices().setIncludeGlobalState(true).setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete test template");
+ cluster().wipeTemplates("test-template");
+ ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+
+ logger.info("--> try restoring cluster state from snapshot without global state");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
+
+ logger.info("--> check that template wasn't restored");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+
+ logger.info("--> restore cluster state");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
+
+ logger.info("--> check that template is restored");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(true));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot without global state but with indices");
+ createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index").setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete test template and index ");
+ cluster().wipeIndices("test-idx");
+ cluster().wipeTemplates("test-template");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+
+ logger.info("--> try restoring index and cluster state from snapshot without global state");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
+
+ ensureGreen();
+ logger.info("--> check that template wasn't restored but index was");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ }
+
+ @Test
+ public void snapshotFileFailureDuringSnapshotTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.TEST))
+ .put("random", randomAsciiOfLength(10))
+ .put("random_control_io_exception_rate", 0.2)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ try {
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
+ // If we are here, that means we didn't have any failures, let's check it
+ assertThat(getFailureCount("test-repo"), equalTo(0L));
+ } else {
+ assertThat(getFailureCount("test-repo"), greaterThan(0L));
+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0));
+ for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) {
+ assertThat(shardFailure.reason(), containsString("Random IOException"));
+ assertThat(shardFailure.nodeId(), notNullValue());
+ assertThat(shardFailure.index(), equalTo("test-idx"));
+ }
+ GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get();
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+ SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
+ if (snapshotInfo.state() == SnapshotState.SUCCESS) {
+ assertThat(snapshotInfo.shardFailures().size(), greaterThan(0));
+ assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards()));
+ }
+ }
+ } catch (Exception ex) {
+ assertThat(getFailureCount("test-repo"), greaterThan(0L));
+ assertThat(ExceptionsHelper.detailedMessage(ex), containsString("IOException"));
+ }
+ }
+
+ @Test
+ public void dataFileFailureDuringSnapshotTest() throws Exception {
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.TEST))
+ .put("random", randomAsciiOfLength(10))
+ .put("random_data_file_io_exception_rate", 0.1)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
+ // If we are here, that means we didn't have any failures, let's check it
+ assertThat(getFailureCount("test-repo"), equalTo(0L));
+ } else {
+ assertThat(getFailureCount("test-repo"), greaterThan(0L));
+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0));
+ for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) {
+ assertThat(shardFailure.nodeId(), notNullValue());
+ assertThat(shardFailure.index(), equalTo("test-idx"));
+ }
+ GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get();
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+ SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
+ assertThat(snapshotInfo.shardFailures().size(), greaterThan(0));
+ assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards()));
+ }
+
+ }
+
+ @Test
+ public void dataFileFailureDuringRestoreTest() throws Exception {
+ File repositoryLocation = newTempDir(LifecycleScope.TEST);
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
+
+ logger.info("--> update repository with mock version");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("random_data_file_io_exception_rate", 0.3)));
+
+ // Test restore after index deletion
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+ logger.info("--> restore index after deletion");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureGreen();
+ CountResponse countResponse = client.prepareCount("test-idx").get();
+ assertThat(countResponse.getCount(), equalTo(100L));
+ }
+
+
+ @Test
+ @TestLogging("snapshots:TRACE")
+ public void deletionOfFailingToRecoverIndexShouldStopRestore() throws Exception {
+ File repositoryLocation = newTempDir(LifecycleScope.TEST);
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
+
+ logger.info("--> update repository with mock version");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("random_data_file_io_exception_rate", 1.0) // Fail completely
+ ));
+
+ // Test restore after index deletion
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+ logger.info("--> restore index after deletion");
+ ListenableActionFuture<RestoreSnapshotResponse> restoreSnapshotResponseFuture =
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute();
+
+ logger.info("--> wait for the index to appear");
+ // that would mean that recovery process started and failing
+ assertThat(waitForIndex("test-idx", TimeValue.timeValueSeconds(10)), equalTo(true));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+ logger.info("--> get restore results");
+ // Now read restore results and make sure it failed
+ RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotResponseFuture.actionGet(TimeValue.timeValueSeconds(10));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), greaterThan(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(restoreSnapshotResponse.getRestoreInfo().failedShards()));
+
+ logger.info("--> restoring working repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ logger.info("--> trying to restore index again");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
+ ensureGreen();
+ CountResponse countResponse = client.prepareCount("test-idx").get();
+ assertThat(countResponse.getCount(), equalTo(100L));
+
+ }
+
+ @Test
+ public void unallocatedShardsTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))));
+
+ logger.info("--> creating index that cannot be allocated");
+ prepareCreate("test-idx", 2, ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + ".tag", "nowhere").put("index.number_of_shards", 3)).get();
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.FAILED));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().reason(), startsWith("Indices don't have primary shards"));
+ }
+
+ @Test
+ public void deleteSnapshotTest() throws Exception {
+ final int numberOfSnapshots = between(5, 15);
+ Client client = client();
+
+ File repo = newTempDir(LifecycleScope.SUITE);
+ logger.info("--> creating repository at " + repo.getAbsolutePath());
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", repo)
+ .put("compress", false)
+ .put("chunk_size", randomIntBetween(100, 1000))));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ int[] numberOfFiles = new int[numberOfSnapshots];
+ logger.info("--> creating {} snapshots ", numberOfSnapshots);
+ for (int i = 0; i < numberOfSnapshots; i++) {
+ for (int j = 0; j < 10; j++) {
+ index("test-idx", "doc", Integer.toString(i * 10 + j), "foo", "bar" + i * 10 + j);
+ }
+ refresh();
+ logger.info("--> snapshot {}", i);
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-" + i).setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ // Store number of files after each snapshot
+ numberOfFiles[i] = numberOfFiles(repo);
+ }
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots));
+ int numberOfFilesBeforeDeletion = numberOfFiles(repo);
+
+ logger.info("--> delete all snapshots except the first one and last one");
+ for (int i = 1; i < numberOfSnapshots - 1; i++) {
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-" + i).get();
+ }
+
+ int numberOfFilesAfterDeletion = numberOfFiles(repo);
+
+ assertThat(numberOfFilesAfterDeletion, lessThan(numberOfFilesBeforeDeletion));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> restore index");
+ String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1);
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", lastSnapshot).setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots));
+
+ logger.info("--> delete the last snapshot");
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", lastSnapshot).get();
+ logger.info("--> make sure that number of files is back to what it was when the first snapshot was made");
+ assertThat(numberOfFiles(repo), equalTo(numberOfFiles[0]));
+ }
+
+ @Test
+ @TestLogging("snapshots:TRACE")
+ public void snapshotClosedIndexTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))));
+
+ createIndex("test-idx", "test-idx-closed");
+ ensureGreen();
+ logger.info("--> closing index test-idx-closed");
+ assertAcked(client.admin().indices().prepareClose("test-idx-closed"));
+ ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test-idx-closed").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), nullValue());
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx*").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().indices().size(), equalTo(1));
+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), equalTo(0));
+
+ logger.info("--> deleting snapshot");
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get();
+ }
+
+ @Test
+ public void renameOnRestoreTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))));
+
+ createIndex("test-idx-1", "test-idx-2");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+ index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1", "test-idx-2").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> restore indices with different names");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx-1-copy").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2-copy").get().getCount(), equalTo(100L));
+
+ logger.info("--> close just restored indices");
+ client.admin().indices().prepareClose("test-idx-1-copy", "test-idx-2-copy").get();
+
+ logger.info("--> and try to restore these indices again");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx-1-copy").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2-copy").get().getCount(), equalTo(100L));
+
+
+ logger.info("--> close indices");
+ assertAcked(client.admin().indices().prepareClose("test-idx-1", "test-idx-2-copy"));
+
+ logger.info("--> restore indices with different names");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+-2)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ logger.info("--> try renaming indices using the same name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("(.+)").setRenameReplacement("same-name").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (SnapshotRestoreException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices using the same name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("test-idx-2").setRenameReplacement("test-idx-1").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (SnapshotRestoreException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices using invalid index name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern(".+").setRenameReplacement("__WRONG__").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (InvalidIndexNameException ex) {
+ // Expected
+ }
+ }
+
+ @Test
+ @TestLogging("cluster.routing.allocation.decider:TRACE")
+ public void moveShardWhileSnapshottingTest() throws Exception {
+ Client client = client();
+ File repositoryLocation = newTempDir(LifecycleScope.TEST);
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)));
+
+ // Create index on 2 nodes and make sure each node has a primary by setting no replicas
+ assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder().put("number_of_replicas", 0)));
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], moving shards away from this node", blockedNode);
+ ImmutableSettings.Builder excludeSettings = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", blockedNode);
+ client().admin().indices().prepareUpdateSettings("test-idx").setSettings(excludeSettings).get();
+
+ logger.info("--> unblocking blocked node");
+ unblockNode(blockedNode);
+ logger.info("--> waiting for completion");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+
+ ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
+
+ assertThat(snapshotInfos.size(), equalTo(1));
+ assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> replace mock repository with real one at the same location");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ logger.info("--> restore index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+ }
+
+ @Test
+ @TestLogging("cluster.routing.allocation.decider:TRACE")
+ public void deleteRepositoryWhileSnapshottingTest() throws Exception {
+ Client client = client();
+ File repositoryLocation = newTempDir(LifecycleScope.TEST);
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ // Create index on 2 nodes and make sure each node has a primary by setting no replicas
+ assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder().put("number_of_replicas", 0)));
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], trying to delete repository", blockedNode);
+
+ try {
+ client.admin().cluster().prepareDeleteRepository("test-repo").execute().get();
+ fail("shouldn't be able to delete in-use repository");
+ } catch (Exception ex) {
+ logger.info("--> in-use repository deletion failed");
+ }
+
+ logger.info("--> trying to move repository to another location");
+ try {
+ client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", new File(repositoryLocation, "test"))
+ ).get();
+ fail("shouldn't be able to replace in-use repository");
+ } catch (Exception ex) {
+ logger.info("--> in-use repository replacement failed");
+ }
+
+ logger.info("--> trying to create a repository with different name");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", new File(repositoryLocation, "test"))));
+
+ logger.info("--> unblocking blocked node");
+ unblockNode(blockedNode);
+ logger.info("--> waiting for completion");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+
+ ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
+
+ assertThat(snapshotInfos.size(), equalTo(1));
+ assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> replace mock repository with real one at the same location");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ logger.info("--> restore index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+ }
+
+ @Test
+ public void urlRepositoryTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ File repositoryLocation = newTempDir(LifecycleScope.SUITE);
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000))));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> create read-only URL repository");
+ assertAcked(client.admin().cluster().preparePutRepository("url-repo")
+ .setType("url").setSettings(ImmutableSettings.settingsBuilder()
+ .put("url", repositoryLocation.toURI().toURL())
+ .put("list_directories", randomBoolean())));
+ logger.info("--> restore index after deletion");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("url-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> list available shapshots");
+ GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get();
+ assertThat(getSnapshotsResponse.getSnapshots(), notNullValue());
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+
+ logger.info("--> delete snapshot");
+ DeleteSnapshotResponse deleteSnapshotResponse = client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get();
+ assertAcked(deleteSnapshotResponse);
+
+ logger.info("--> list available shapshot again, no snapshots should be returned");
+ getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get();
+ assertThat(getSnapshotsResponse.getSnapshots(), notNullValue());
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0));
+ }
+
+ @Test
+ public void throttlingTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ File repositoryLocation = newTempDir(LifecycleScope.SUITE);
+ boolean throttleSnapshot = randomBoolean();
+ boolean throttleRestore = randomBoolean();
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000))
+ .put("max_restore_bytes_per_sec", throttleRestore ? "2.5k" : "0")
+ .put("max_snapshot_bytes_per_sec", throttleSnapshot ? "2.5k" : "0")));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> restore index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ long snapshotPause = 0L;
+ long restorePause = 0L;
+ for (RepositoriesService repositoriesService : cluster().getInstances(RepositoriesService.class)) {
+ snapshotPause += repositoriesService.repository("test-repo").snapshotThrottleTimeInNanos();
+ restorePause += repositoriesService.repository("test-repo").restoreThrottleTimeInNanos();
+ }
+
+ if (throttleSnapshot) {
+ assertThat(snapshotPause, greaterThan(0L));
+ } else {
+ assertThat(snapshotPause, equalTo(0L));
+ }
+
+ if (throttleRestore) {
+ assertThat(restorePause, greaterThan(0L));
+ } else {
+ assertThat(restorePause, equalTo(0L));
+ }
+ }
+
+ private boolean waitForIndex(String index, TimeValue timeout) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ while (System.currentTimeMillis() - start < timeout.millis()) {
+ if (client().admin().indices().prepareExists(index).execute().actionGet().isExists()) {
+ return true;
+ }
+ Thread.sleep(100);
+ }
+ return false;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java b/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java
new file mode 100644
index 0000000..ee6cefb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.containsInAnyOrder;
+
+/**
+ */
+public class SnapshotUtilsTests {
+ @Test
+ public void testIndexNameFiltering() {
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{}, new String[]{"foo", "bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"*"}, new String[]{"foo", "bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo", "bar", "baz"}, new String[]{"foo", "bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo"}, new String[]{"foo"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"ba*", "-bar", "-baz"}, new String[]{});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"-bar"}, new String[]{"foo", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"-ba*"}, new String[]{"foo"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"+ba*"}, new String[]{"bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"+bar", "+foo"}, new String[]{"bar", "foo"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"zzz", "bar"}, IndicesOptions.lenient(), new String[]{"bar"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{""}, IndicesOptions.lenient(), new String[]{});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo", "", "ba*"}, IndicesOptions.lenient(), new String[]{"foo", "bar", "baz"});
+ }
+
+ private void assertIndexNameFiltering(String[] indices, String[] filter, String[] expected) {
+ assertIndexNameFiltering(indices, filter, IndicesOptions.lenient(), expected);
+ }
+
+ private void assertIndexNameFiltering(String[] indices, String[] filter, IndicesOptions indicesOptions, String[] expected) {
+ ImmutableList<String> indicesList = ImmutableList.copyOf(indices);
+ ImmutableList<String> actual = SnapshotUtils.filterIndices(indicesList, filter, indicesOptions);
+ assertThat(actual, containsInAnyOrder(expected));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java b/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java
new file mode 100644
index 0000000..3e80880
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots.mockstore;
+
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+
+/**
+ *
+ */
+public class BlobStoreWrapper implements BlobStore {
+
+ private BlobStore delegate;
+
+ public BlobStoreWrapper(BlobStore delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
+ return delegate.immutableBlobContainer(path);
+ }
+
+ @Override
+ public void delete(BlobPath path) {
+ delegate.delete(path);
+ }
+
+ @Override
+ public void close() {
+ delegate.close();
+ }
+
+ protected BlobStore delegate() {
+ return delegate;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/ImmutableBlobContainerWrapper.java b/src/test/java/org/elasticsearch/snapshots/mockstore/ImmutableBlobContainerWrapper.java
new file mode 100644
index 0000000..d72409e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/mockstore/ImmutableBlobContainerWrapper.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots.mockstore;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ *
+ */
+public class ImmutableBlobContainerWrapper implements ImmutableBlobContainer {
+ private ImmutableBlobContainer delegate;
+
+ public ImmutableBlobContainerWrapper(ImmutableBlobContainer delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes, WriterListener listener) {
+ delegate.writeBlob(blobName, is, sizeInBytes, listener);
+ }
+
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
+ delegate.writeBlob(blobName, is, sizeInBytes);
+ }
+
+ @Override
+ public BlobPath path() {
+ return delegate.path();
+ }
+
+ @Override
+ public boolean blobExists(String blobName) {
+ return delegate.blobExists(blobName);
+ }
+
+ @Override
+ public void readBlob(String blobName, ReadBlobListener listener) {
+ delegate.readBlob(blobName, listener);
+ }
+
+ @Override
+ public byte[] readBlobFully(String blobName) throws IOException {
+ return delegate.readBlobFully(blobName);
+ }
+
+ @Override
+ public boolean deleteBlob(String blobName) throws IOException {
+ return delegate.deleteBlob(blobName);
+ }
+
+ @Override
+ public void deleteBlobsByPrefix(String blobNamePrefix) throws IOException {
+ delegate.deleteBlobsByPrefix(blobNamePrefix);
+ }
+
+ @Override
+ public void deleteBlobsByFilter(BlobNameFilter filter) throws IOException {
+ delegate.deleteBlobsByFilter(filter);
+ }
+
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobs() throws IOException {
+ return delegate.listBlobs();
+ }
+
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
+ return delegate.listBlobsByPrefix(blobNamePrefix);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
new file mode 100644
index 0000000..90c4e39
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots.mockstore;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.repositories.RepositoryName;
+import org.elasticsearch.repositories.RepositorySettings;
+import org.elasticsearch.repositories.fs.FsRepository;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ */
+public class MockRepository extends FsRepository {
+
+ private final AtomicLong failureCounter = new AtomicLong();
+
+ public void resetFailureCount() {
+ failureCounter.set(0);
+ }
+
+ public long getFailureCount() {
+ return failureCounter.get();
+ }
+
+ private final double randomControlIOExceptionRate;
+
+ private final double randomDataFileIOExceptionRate;
+
+ private final long waitAfterUnblock;
+
+ private final MockBlobStore mockBlobStore;
+
+ private final String randomPrefix;
+
+ private volatile boolean blockOnControlFiles;
+
+ private volatile boolean blockOnDataFiles;
+
+ private volatile boolean blocked = false;
+
+ @Inject
+ public MockRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
+ super(name, repositorySettings, indexShardRepository);
+ randomControlIOExceptionRate = repositorySettings.settings().getAsDouble("random_control_io_exception_rate", 0.0);
+ randomDataFileIOExceptionRate = repositorySettings.settings().getAsDouble("random_data_file_io_exception_rate", 0.0);
+ blockOnControlFiles = repositorySettings.settings().getAsBoolean("block_on_control", false);
+ blockOnDataFiles = repositorySettings.settings().getAsBoolean("block_on_data", false);
+ randomPrefix = repositorySettings.settings().get("random");
+ waitAfterUnblock = repositorySettings.settings().getAsLong("wait_after_unblock", 0L);
+ logger.info("starting mock repository with random prefix " + randomPrefix);
+ mockBlobStore = new MockBlobStore(super.blobStore());
+ }
+
+ private void addFailure() {
+ failureCounter.incrementAndGet();
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ unblock();
+ super.doStop();
+ }
+
+ @Override
+ protected BlobStore blobStore() {
+ return mockBlobStore;
+ }
+
+ public boolean blocked() {
+ return mockBlobStore.blocked();
+ }
+
+ public void unblock() {
+ mockBlobStore.unblockExecution();
+ }
+
+ public void blockOnDataFiles(boolean blocked) {
+ blockOnDataFiles = blocked;
+ }
+
+ public void blockOnControlFiles(boolean blocked) {
+ blockOnControlFiles = blocked;
+ }
+
+ public class MockBlobStore extends BlobStoreWrapper {
+ ConcurrentMap<String, AtomicLong> accessCounts = new ConcurrentHashMap<String, AtomicLong>();
+
+ private long incrementAndGet(String path) {
+ AtomicLong value = accessCounts.get(path);
+ if (value == null) {
+ value = accessCounts.putIfAbsent(path, new AtomicLong(1));
+ }
+ if (value != null) {
+ return value.incrementAndGet();
+ }
+ return 1;
+ }
+
+ public MockBlobStore(BlobStore delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
+ return new MockImmutableBlobContainer(super.immutableBlobContainer(path));
+ }
+
+ public synchronized void unblockExecution() {
+ if (blocked) {
+ blocked = false;
+ // Clean blocking flags, so we wouldn't try to block again
+ blockOnDataFiles = false;
+ blockOnControlFiles = false;
+ this.notifyAll();
+ }
+ }
+
+ public boolean blocked() {
+ return blocked;
+ }
+
+ private synchronized boolean blockExecution() {
+ boolean wasBlocked = false;
+ try {
+ while (blockOnDataFiles || blockOnControlFiles) {
+ blocked = true;
+ this.wait();
+ wasBlocked = true;
+ }
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+ return wasBlocked;
+ }
+
+ private class MockImmutableBlobContainer extends ImmutableBlobContainerWrapper {
+ private MessageDigest digest;
+
+ private boolean shouldFail(String blobName, double probability) {
+ if (probability > 0.0) {
+ String path = path().add(blobName).buildAsString("/") + "/" + randomPrefix;
+ path += "/" + incrementAndGet(path);
+ logger.info("checking [{}] [{}]", path, Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability);
+ return Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability;
+ } else {
+ return false;
+ }
+ }
+
+ private int hashCode(String path) {
+ try {
+ digest = MessageDigest.getInstance("MD5");
+ byte[] bytes = digest.digest(path.getBytes("UTF-8"));
+ int i = 0;
+ return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16)
+ | ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF);
+ } catch (NoSuchAlgorithmException ex) {
+ throw new ElasticsearchException("cannot calculate hashcode", ex);
+ } catch (UnsupportedEncodingException ex) {
+ throw new ElasticsearchException("cannot calculate hashcode", ex);
+ }
+ }
+
+ private void maybeIOExceptionOrBlock(String blobName) throws IOException {
+ if (blobName.startsWith("__")) {
+ if (shouldFail(blobName, randomDataFileIOExceptionRate)) {
+ logger.info("throwing random IOException for file [{}] at path [{}]", blobName, path());
+ addFailure();
+ throw new IOException("Random IOException");
+ } else if (blockOnDataFiles) {
+ logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path());
+ if (blockExecution() && waitAfterUnblock > 0) {
+ try {
+ // Delay operation after unblocking
+ // So, we can start node shutdown while this operation is still running.
+ Thread.sleep(waitAfterUnblock);
+ } catch (InterruptedException ex) {
+ //
+ }
+ }
+ }
+ } else {
+ if (shouldFail(blobName, randomControlIOExceptionRate)) {
+ logger.info("throwing random IOException for file [{}] at path [{}]", blobName, path());
+ addFailure();
+ throw new IOException("Random IOException");
+ } else if (blockOnControlFiles) {
+ logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path());
+ if (blockExecution() && waitAfterUnblock > 0) {
+ try {
+ // Delay operation after unblocking
+ // So, we can start node shutdown while this operation is still running.
+ Thread.sleep(waitAfterUnblock);
+ } catch (InterruptedException ex) {
+ //
+ }
+ }
+ }
+ }
+ }
+
+ private void maybeIOExceptionOrBlock(String blobName, ImmutableBlobContainer.WriterListener listener) {
+ try {
+ maybeIOExceptionOrBlock(blobName);
+ } catch (IOException ex) {
+ listener.onFailure(ex);
+ }
+ }
+
+ private void maybeIOExceptionOrBlock(String blobName, ImmutableBlobContainer.ReadBlobListener listener) {
+ try {
+ maybeIOExceptionOrBlock(blobName);
+ } catch (IOException ex) {
+ listener.onFailure(ex);
+ }
+ }
+
+
+ public MockImmutableBlobContainer(ImmutableBlobContainer delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes, WriterListener listener) {
+ maybeIOExceptionOrBlock(blobName, listener);
+ super.writeBlob(blobName, is, sizeInBytes, listener);
+ }
+
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
+ maybeIOExceptionOrBlock(blobName);
+ super.writeBlob(blobName, is, sizeInBytes);
+ }
+
+ @Override
+ public boolean blobExists(String blobName) {
+ return super.blobExists(blobName);
+ }
+
+ @Override
+ public void readBlob(String blobName, ReadBlobListener listener) {
+ maybeIOExceptionOrBlock(blobName, listener);
+ super.readBlob(blobName, listener);
+ }
+
+ @Override
+ public byte[] readBlobFully(String blobName) throws IOException {
+ maybeIOExceptionOrBlock(blobName);
+ return super.readBlobFully(blobName);
+ }
+
+ @Override
+ public boolean deleteBlob(String blobName) throws IOException {
+ maybeIOExceptionOrBlock(blobName);
+ return super.deleteBlob(blobName);
+ }
+
+ @Override
+ public void deleteBlobsByPrefix(String blobNamePrefix) throws IOException {
+ maybeIOExceptionOrBlock(blobNamePrefix);
+ super.deleteBlobsByPrefix(blobNamePrefix);
+ }
+
+ @Override
+ public void deleteBlobsByFilter(BlobNameFilter filter) throws IOException {
+ maybeIOExceptionOrBlock("");
+ super.deleteBlobsByFilter(filter);
+ }
+
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobs() throws IOException {
+ maybeIOExceptionOrBlock("");
+ return super.listBlobs();
+ }
+
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
+ maybeIOExceptionOrBlock(blobNamePrefix);
+ return super.listBlobsByPrefix(blobNamePrefix);
+ }
+
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java
new file mode 100644
index 0000000..0da50f1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots.mockstore;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
+import org.elasticsearch.repositories.Repository;
+
+/**
+ */
+public class MockRepositoryModule extends AbstractModule {
+
+ public MockRepositoryModule() {
+ super();
+ }
+
+ @Override
+ protected void configure() {
+ bind(Repository.class).to(MockRepository.class).asEagerSingleton();
+ bind(IndexShardRepository.class).to(BlobStoreIndexShardRepository.class).asEagerSingleton();
+ }
+
+}
+
diff --git a/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java b/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java
new file mode 100644
index 0000000..86ae039
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.client;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ */
+public class ClientFailover {
+
+ public static void main(String[] args) throws Exception {
+ Node[] nodes = new Node[3];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().node();
+ }
+
+ final TransportClient client = new TransportClient()
+ .addTransportAddress(new InetSocketTransportAddress("localhost", 9300))
+ .addTransportAddress(new InetSocketTransportAddress("localhost", 9301))
+ .addTransportAddress(new InetSocketTransportAddress("localhost", 9302));
+
+ final AtomicBoolean done = new AtomicBoolean();
+ final AtomicLong indexed = new AtomicLong();
+ final CountDownLatch latch = new CountDownLatch(1);
+ Thread indexer = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!done.get()) {
+ try {
+ client.prepareIndex("test", "type").setSource("field", "value").execute().actionGet();
+ indexed.incrementAndGet();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ latch.countDown();
+ }
+ });
+ indexer.start();
+
+ for (int i = 0; i < 100; i++) {
+ int index = i % nodes.length;
+ nodes[index].close();
+
+ ClusterHealthResponse health = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ System.err.println("timed out on health");
+ }
+
+ nodes[index] = NodeBuilder.nodeBuilder().node();
+
+ health = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ System.err.println("timed out on health");
+ }
+ }
+
+ latch.await();
+
+ // TODO add verification to the number of indexed docs
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java b/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java
new file mode 100644
index 0000000..8885d09
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.fullrestart;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalNode;
+
+import java.io.File;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ *
+ */
+public class FullRestartStressTest {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ private int numberOfNodes = 4;
+
+ private boolean clearNodeWork = false;
+
+ private int numberOfIndices = 5;
+ private int textTokens = 150;
+ private int numberOfFields = 10;
+ private int bulkSize = 1000;
+ private int numberOfDocsPerRound = 50000;
+
+ private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ private TimeValue period = TimeValue.timeValueMinutes(20);
+
+ private AtomicLong indexCounter = new AtomicLong();
+
+ public FullRestartStressTest numberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public FullRestartStressTest numberOfIndices(int numberOfIndices) {
+ this.numberOfIndices = numberOfIndices;
+ return this;
+ }
+
+ public FullRestartStressTest textTokens(int textTokens) {
+ this.textTokens = textTokens;
+ return this;
+ }
+
+ public FullRestartStressTest numberOfFields(int numberOfFields) {
+ this.numberOfFields = numberOfFields;
+ return this;
+ }
+
+ public FullRestartStressTest bulkSize(int bulkSize) {
+ this.bulkSize = bulkSize;
+ return this;
+ }
+
+ public FullRestartStressTest numberOfDocsPerRound(int numberOfDocsPerRound) {
+ this.numberOfDocsPerRound = numberOfDocsPerRound;
+ return this;
+ }
+
+ public FullRestartStressTest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public FullRestartStressTest period(TimeValue period) {
+ this.period = period;
+ return this;
+ }
+
+ public FullRestartStressTest clearNodeWork(boolean clearNodeWork) {
+ this.clearNodeWork = clearNodeWork;
+ return this;
+ }
+
+ public void run() throws Exception {
+ long numberOfRounds = 0;
+ Random random = new Random(0);
+ long testStart = System.currentTimeMillis();
+ while (true) {
+ Node[] nodes = new Node[numberOfNodes];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ Node client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
+
+ // verify that the indices are there
+ for (int i = 0; i < numberOfIndices; i++) {
+ try {
+ client.client().admin().indices().prepareCreate("test" + i).execute().actionGet();
+ } catch (Exception e) {
+ // might already exists, fine
+ }
+ }
+
+ logger.info("*** Waiting for GREEN status");
+ try {
+ ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ logger.warn("timed out waiting for green status....");
+ }
+ } catch (Exception e) {
+ logger.warn("failed to execute cluster health....");
+ }
+
+ CountResponse count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ logger.info("*** index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
+ // verify count
+ for (int i = 0; i < (nodes.length * 5); i++) {
+ count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ logger.debug("index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
+ if (count.getCount() != indexCounter.get()) {
+ logger.warn("!!! count does not match, index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
+ throw new Exception("failed test, count does not match...");
+ }
+ }
+
+ // verify search
+ for (int i = 0; i < (nodes.length * 5); i++) {
+ // do a search with norms field, so we don't rely on match all filtering cache
+ SearchResponse search = client.client().prepareSearch().setQuery(matchAllQuery().normsField("field")).execute().actionGet();
+ logger.debug("index_count [{}], expected_count [{}]", search.getHits().totalHits(), indexCounter.get());
+ if (count.getCount() != indexCounter.get()) {
+ logger.warn("!!! search does not match, index_count [{}], expected_count [{}]", search.getHits().totalHits(), indexCounter.get());
+ throw new Exception("failed test, count does not match...");
+ }
+ }
+
+ logger.info("*** ROUND {}", ++numberOfRounds);
+ // bulk index data
+ int numberOfBulks = numberOfDocsPerRound / bulkSize;
+ for (int b = 0; b < numberOfBulks; b++) {
+ BulkRequestBuilder bulk = client.client().prepareBulk();
+ for (int k = 0; k < bulkSize; k++) {
+ StringBuffer sb = new StringBuffer();
+ XContentBuilder json = XContentFactory.jsonBuilder().startObject()
+ .field("field", "value" + ThreadLocalRandom.current().nextInt());
+
+ int fields = ThreadLocalRandom.current().nextInt() % numberOfFields;
+ for (int i = 0; i < fields; i++) {
+ json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
+ int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
+ sb.setLength(0);
+ for (int j = 0; j < tokens; j++) {
+ sb.append(Strings.randomBase64UUID(random)).append(' ');
+ }
+ json.field("text_" + i, sb.toString());
+ }
+
+ json.endObject();
+
+ bulk.add(Requests.indexRequest("test" + (Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfIndices)).type("type1").source(json));
+ indexCounter.incrementAndGet();
+ }
+ bulk.execute().actionGet();
+ }
+
+ client.client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
+
+ client.close();
+ for (Node node : nodes) {
+ File[] nodeDatas = ((InternalNode) node).injector().getInstance(NodeEnvironment.class).nodeDataLocations();
+ node.close();
+ if (clearNodeWork && !settings.get("gateway.type").equals("local")) {
+ FileSystemUtils.deleteRecursively(nodeDatas);
+ }
+ }
+
+ if ((System.currentTimeMillis() - testStart) > period.millis()) {
+ logger.info("test finished, full_restart_rounds [{}]", numberOfRounds);
+ break;
+ }
+
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ int numberOfNodes = 2;
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.shard.check_on_startup", true)
+ .put("gateway.type", "local")
+ .put("gateway.recover_after_nodes", numberOfNodes)
+ .put("index.number_of_shards", 1)
+ .put("path.data", "data/data1,data/data2")
+ .build();
+
+ FullRestartStressTest test = new FullRestartStressTest()
+ .settings(settings)
+ .period(TimeValue.timeValueMinutes(20))
+ .clearNodeWork(false) // only applies to shared gateway
+ .numberOfNodes(numberOfNodes)
+ .numberOfIndices(1)
+ .textTokens(150)
+ .numberOfFields(10)
+ .bulkSize(1000)
+ .numberOfDocsPerRound(10000);
+
+ test.run();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java b/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java
new file mode 100644
index 0000000..67a092b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.gcbehavior;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.index.query.FilterBuilders.rangeFilter;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+public class FilterCacheGcStress {
+
+ public static void main(String[] args) {
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("gateway.type", "none")
+ .build();
+
+ Node node = NodeBuilder.nodeBuilder().settings(settings).node();
+ final Client client = node.client();
+
+ client.admin().indices().prepareCreate("test").execute().actionGet();
+ client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
+
+ final AtomicBoolean stop = new AtomicBoolean();
+
+ Thread indexingThread = new Thread() {
+ @Override
+ public void run() {
+ while (!stop.get()) {
+ client.prepareIndex("test", "type1").setSource("field", System.currentTimeMillis()).execute().actionGet();
+ }
+ }
+ };
+ indexingThread.start();
+
+ Thread searchThread = new Thread() {
+ @Override
+ public void run() {
+ while (!stop.get()) {
+ client.prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), rangeFilter("field").from(System.currentTimeMillis() - 1000000)))
+ .execute().actionGet();
+ }
+ }
+ };
+
+ searchThread.start();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java b/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java
new file mode 100644
index 0000000..b1c24c2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.get;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class GetStressTest {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build();
+
+ final int NUMBER_OF_NODES = 2;
+ final int NUMBER_OF_THREADS = 50;
+ final TimeValue TEST_TIME = TimeValue.parseTimeValue("10m", null);
+
+ Node[] nodes = new Node[NUMBER_OF_NODES];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+
+ final Node client = NodeBuilder.nodeBuilder()
+ .settings(settings)
+ .client(true)
+ .node();
+
+ client.client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ final AtomicBoolean done = new AtomicBoolean();
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong counter = new AtomicLong();
+
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ ThreadLocalRandom random = ThreadLocalRandom.current();
+ while (!done.get()) {
+ String id = String.valueOf(idGenerator.incrementAndGet());
+ client.client().prepareIndex("test", "type1", id)
+ .setSource("field", random.nextInt(100))
+ .execute().actionGet();
+
+ GetResponse getResponse = client.client().prepareGet("test", "type1", id)
+ //.setFields(Strings.EMPTY_ARRAY)
+ .execute().actionGet();
+ if (!getResponse.isExists()) {
+ System.err.println("Failed to find " + id);
+ }
+
+ long count = counter.incrementAndGet();
+ if ((count % 10000) == 0) {
+ System.out.println("Executed " + count);
+ }
+ }
+ }
+ });
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ Thread.sleep(TEST_TIME.millis());
+
+ System.out.println("test done.");
+ done.set(true);
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java b/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java
new file mode 100644
index 0000000..11092ca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.get;
+
+import com.google.common.collect.Sets;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ */
+public class MGetStress1 {
+
+ public static void main(String[] args) throws Exception {
+ final int NUMBER_OF_NODES = 2;
+ final int NUMBER_OF_DOCS = 50000;
+ final int MGET_BATCH = 1000;
+
+ Node[] nodes = new Node[NUMBER_OF_NODES];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().node();
+ }
+
+ System.out.println("---> START Indexing initial data [" + NUMBER_OF_DOCS + "]");
+ final Client client = nodes[0].client();
+ for (int i = 0; i < NUMBER_OF_DOCS; i++) {
+ client.prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ System.out.println("---> DONE Indexing initial data [" + NUMBER_OF_DOCS + "]");
+
+ final AtomicBoolean done = new AtomicBoolean();
+ // start indexer
+ Thread indexer = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!done.get()) {
+ client.prepareIndex("test", "type", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)))
+ .setSource("field", "value").execute().actionGet();
+ }
+ }
+ });
+ indexer.start();
+ System.out.println("---> Starting indexer");
+
+ // start the mget one
+ Thread mget = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!done.get()) {
+ Set<String> ids = Sets.newHashSet();
+ for (int i = 0; i < MGET_BATCH; i++) {
+ ids.add(Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)));
+ }
+ //System.out.println("---> mget for [" + ids.size() + "]");
+ MultiGetResponse response = client.prepareMultiGet().add("test", "type", ids).execute().actionGet();
+ int expected = ids.size();
+ int count = 0;
+ for (MultiGetItemResponse item : response) {
+ count++;
+ if (item.isFailed()) {
+ System.err.println("item failed... " + item.getFailure());
+ } else {
+ boolean removed = ids.remove(item.getId());
+ if (!removed) {
+ System.err.println("got id twice " + item.getId());
+ }
+ }
+ }
+ if (expected != count) {
+ System.err.println("Expected [" + expected + "], got back [" + count + "]");
+ }
+ }
+ }
+ });
+ mget.start();
+ System.out.println("---> Starting mget");
+
+ Thread.sleep(TimeValue.timeValueMinutes(10).millis());
+
+ done.set(true);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java b/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java
new file mode 100644
index 0000000..2b84fb0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.indexing;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+/**
+ */
+public class BulkIndexingStressTest {
+
+ public static void main(String[] args) {
+ final int NUMBER_OF_NODES = 4;
+ final int NUMBER_OF_INDICES = 600;
+ final int BATCH = 300;
+
+ final Settings nodeSettings = ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).build();
+
+// ESLogger logger = Loggers.getLogger("org.elasticsearch");
+// logger.setLevel("DEBUG");
+ Node[] nodes = new Node[NUMBER_OF_NODES];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(nodeSettings).node();
+ }
+
+ Client client = nodes.length == 1 ? nodes[0].client() : nodes[1].client();
+
+ while (true) {
+ BulkRequestBuilder bulkRequest = client.prepareBulk();
+ for (int i = 0; i < BATCH; i++) {
+ bulkRequest.add(Requests.indexRequest("test" + ThreadLocalRandom.current().nextInt(NUMBER_OF_INDICES)).type("type").source("field", "value"));
+ }
+ BulkResponse bulkResponse = bulkRequest.execute().actionGet();
+ if (bulkResponse.hasFailures()) {
+ for (BulkItemResponse item : bulkResponse) {
+ if (item.isFailed()) {
+ System.out.println("failed response:" + item.getFailureMessage());
+ }
+ }
+
+ throw new RuntimeException("Failed responses");
+ }
+ ;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java b/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java
new file mode 100644
index 0000000..15d324c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.indexing;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ * Checks that index operation does not create duplicate documents.
+ */
+public class ConcurrentIndexingVersioningStressTest {
+
+ public static void main(String[] args) throws Exception {
+
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "none")
+ .build();
+
+ Node node1 = nodeBuilder().settings(settings).node();
+ Node node2 = nodeBuilder().settings(settings).node();
+ final Node client = nodeBuilder().settings(settings).client(true).node();
+
+ final int NUMBER_OF_DOCS = 10000;
+ final int NUMBER_OF_THREADS = 10;
+ final long NUMBER_OF_ITERATIONS = SizeValue.parseSizeValue("10k").singles();
+ final long DELETE_EVERY = 10;
+
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ if ((i % DELETE_EVERY) == 0) {
+ client.client().prepareDelete("test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))).execute().actionGet();
+ } else {
+ client.client().prepareIndex("test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))).setSource("field1", "value1").execute().actionGet();
+ }
+ }
+ } finally {
+ latch.countDown();
+ }
+ }
+ };
+ }
+
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ latch.await();
+ System.out.println("done indexing, verifying docs");
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < NUMBER_OF_DOCS; i++) {
+ String id = Integer.toString(i);
+ for (int j = 0; j < 5; j++) {
+ SearchResponse response = client.client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", id)).execute().actionGet();
+ if (response.getHits().totalHits() > 1) {
+ System.err.println("[" + i + "] FAIL, HITS [" + response.getHits().totalHits() + "]");
+ }
+ }
+ GetResponse getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
+ if (getResponse.isExists()) {
+ long version = getResponse.getVersion();
+ for (int j = 0; j < 5; j++) {
+ getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
+ if (!getResponse.isExists()) {
+ System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
+ break;
+ }
+ if (version != getResponse.getVersion()) {
+ System.err.println("[" + i + "] FAIL, DIFFERENT VERSIONS: [" + version + "], [" + getResponse.getVersion() + "]");
+ break;
+ }
+ }
+ } else {
+ for (int j = 0; j < 5; j++) {
+ getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
+ if (getResponse.isExists()) {
+ System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
+ break;
+ }
+ }
+ }
+ }
+ System.out.println("done.");
+
+ client.close();
+ node1.close();
+ node2.close();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java b/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java
new file mode 100644
index 0000000..6bf78d5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.leaks;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.monitor.jvm.JvmService;
+import org.elasticsearch.monitor.network.NetworkService;
+import org.elasticsearch.monitor.os.OsService;
+import org.elasticsearch.monitor.process.ProcessService;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalNode;
+
+public class GenericStatsLeak {
+
+ public static void main(String[] args) {
+ InternalNode node = (InternalNode) NodeBuilder.nodeBuilder().settings(ImmutableSettings.settingsBuilder()
+ .put("monitor.os.refresh_interval", 0)
+ .put("monitor.process.refresh_interval", 0)
+ .put("monitor.network.refresh_interval", 0)
+ ).node();
+
+ JvmService jvmService = node.injector().getInstance(JvmService.class);
+ OsService osService = node.injector().getInstance(OsService.class);
+ ProcessService processService = node.injector().getInstance(ProcessService.class);
+ NetworkService networkService = node.injector().getInstance(NetworkService.class);
+
+ while (true) {
+ jvmService.stats();
+ osService.stats();
+ processService.stats();
+ networkService.stats();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java b/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java
new file mode 100644
index 0000000..e558b47
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.leaks;
+
+import org.elasticsearch.monitor.jvm.JvmStats;
+
+/**
+ * This test mainly comes to check the native memory leak with getLastGCInfo (which is now
+ * disabled by default).
+ */
+public class JvmStatsLeak {
+
+ public static void main(String[] args) {
+ while (true) {
+ JvmStats.jvmStats();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java
new file mode 100644
index 0000000..8a20dca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.manyindices;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Date;
+
+/**
+ *
+ */
+public class ManyIndicesRemoteStressTest {
+
+ private static final ESLogger logger = Loggers.getLogger(ManyIndicesRemoteStressTest.class);
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ int numberOfShards = 1;
+ int numberOfReplicas = 1;
+ int numberOfIndices = 1000;
+ int numberOfDocs = 1;
+
+ Client client;
+ Node node = null;
+ if (true) {
+ client = new TransportClient().addTransportAddress(new InetSocketTransportAddress("localhost", 9300));
+ } else {
+ node = NodeBuilder.nodeBuilder().client(true).node();
+ client = node.client();
+ }
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ logger.info("START index [{}] ...", i);
+ client.admin().indices().prepareCreate("index_" + i)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", numberOfReplicas))
+ .execute().actionGet();
+
+ for (int j = 0; j < numberOfDocs; j++) {
+ client.prepareIndex("index_" + i, "type").setSource("field1", "test", "field2", 2, "field3", new Date()).execute().actionGet();
+ }
+ logger.info("DONE index [{}]", i);
+ }
+
+ client.admin().indices().prepareGatewaySnapshot().execute().actionGet();
+
+ logger.info("closing node...");
+ if (node != null) {
+ node.close();
+ }
+ logger.info("node closed");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java
new file mode 100644
index 0000000..4d437bc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.manyindices;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Date;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ *
+ */
+public class ManyIndicesStressTest {
+
+ private static final ESLogger logger = Loggers.getLogger(ManyIndicesStressTest.class);
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ int numberOfIndices = 100;
+ int numberOfDocs = 100;
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.shard.check_on_startup", false)
+ .put("gateway.type", "local")
+ .put("index.number_of_shards", 1)
+ .build();
+ Node node = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ logger.info("START index [{}] ...", i);
+ node.client().admin().indices().prepareCreate("index_" + i).execute().actionGet();
+
+ for (int j = 0; j < numberOfDocs; j++) {
+ node.client().prepareIndex("index_" + i, "type").setSource("field1", "test", "field2", 2, "field3", new Date()).execute().actionGet();
+ }
+ logger.info("DONE index [{}] ...", i);
+ }
+
+ node.client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
+
+ logger.info("closing node...");
+ node.close();
+ logger.info("node closed");
+
+ logger.info("starting node...");
+ node = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ ClusterHealthResponse health = node.client().admin().cluster().prepareHealth().setTimeout("5m").setWaitForYellowStatus().execute().actionGet();
+ logger.info("health: " + health.getStatus());
+ logger.info("active shards: " + health.getActiveShards());
+ logger.info("active primary shards: " + health.getActivePrimaryShards());
+ if (health.isTimedOut()) {
+ logger.error("Timed out on health...");
+ }
+
+ ClusterState clusterState = node.client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (int i = 0; i < numberOfIndices; i++) {
+ if (clusterState.blocks().indices().containsKey("index_" + i)) {
+ logger.error("index [{}] has blocks: {}", i, clusterState.blocks().indices().get("index_" + i));
+ }
+ }
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ long count = node.client().prepareCount("index_" + i).setQuery(matchAllQuery()).execute().actionGet().getCount();
+ if (count == numberOfDocs) {
+ logger.info("VERIFIED [{}], count [{}]", i, count);
+ } else {
+ logger.error("FAILED [{}], expected [{}], got [{}]", i, numberOfDocs, count);
+ }
+ }
+
+ logger.info("closing node...");
+ node.close();
+ logger.info("node closed");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java
new file mode 100644
index 0000000..45cbd02
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.manyindices;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.List;
+
+public class ManyNodesManyIndicesRecoveryStressTest {
+
+ public static void main(String[] args) throws Exception {
+ final int NUM_NODES = 40;
+ final int NUM_INDICES = 100;
+ final int NUM_DOCS = 2;
+ final int FLUSH_AFTER = 1;
+
+ final Settings nodeSettings = ImmutableSettings.settingsBuilder()
+ .put("transport.netty.connections_per_node.low", 0)
+ .put("transport.netty.connections_per_node.med", 0)
+ .put("transport.netty.connections_per_node.high", 1)
+ .build();
+
+ final Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .build();
+
+ List<Node> nodes = Lists.newArrayList();
+ for (int i = 0; i < NUM_NODES; i++) {
+ nodes.add(NodeBuilder.nodeBuilder().settings(ImmutableSettings.settingsBuilder().put(nodeSettings).put("name", "node" + i)).node());
+ }
+ Client client = nodes.get(0).client();
+
+ for (int index = 0; index < NUM_INDICES; index++) {
+ String indexName = "index_" + index;
+ System.out.println("--> Processing index [" + indexName + "]...");
+ client.admin().indices().prepareCreate(indexName).setSettings(indexSettings).execute().actionGet();
+
+ boolean flushed = false;
+ for (int doc = 0; doc < NUM_DOCS; doc++) {
+ if (!flushed && doc > FLUSH_AFTER) {
+ flushed = true;
+ client.admin().indices().prepareFlush(indexName).execute().actionGet();
+ }
+ client.prepareIndex(indexName, "type1", Integer.toString(doc)).setSource("field", "value" + doc).execute().actionGet();
+ }
+ System.out.println("--> DONE index [" + indexName + "]");
+ }
+
+ System.out.println("--> Initiating shutdown");
+ client.admin().cluster().prepareNodesShutdown().setExit(false).execute().actionGet();
+
+ System.out.println("--> Waiting for all nodes to be closed...");
+ while (true) {
+ boolean allAreClosed = true;
+ for (Node node : nodes) {
+ if (!node.isClosed()) {
+ allAreClosed = false;
+ break;
+ }
+ }
+ if (allAreClosed) {
+ break;
+ }
+ Thread.sleep(100);
+ }
+ System.out.println("Waiting a bit for node lock to really be released?");
+ Thread.sleep(5000);
+ System.out.println("--> All nodes are closed, starting back...");
+
+ nodes = Lists.newArrayList();
+ for (int i = 0; i < NUM_NODES; i++) {
+ nodes.add(NodeBuilder.nodeBuilder().settings(ImmutableSettings.settingsBuilder().put(nodeSettings).put("name", "node" + i)).node());
+ }
+ client = nodes.get(0).client();
+
+ System.out.println("--> Waiting for green status");
+ while (true) {
+ ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ System.err.println("--> cluster health timed out..., active shards [" + clusterHealth.getActiveShards() + "]");
+ } else {
+ break;
+ }
+ }
+
+ System.out.println("Verifying counts...");
+ for (int index = 0; index < NUM_INDICES; index++) {
+ String indexName = "index_" + index;
+ CountResponse count = client.prepareCount(indexName).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ if (count.getCount() != NUM_DOCS) {
+ System.err.println("Wrong count value, expected [" + NUM_DOCS + "], got [" + count.getCount() + "] for index [" + indexName + "]");
+ }
+ }
+
+ System.out.println("Test end");
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java b/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java
new file mode 100644
index 0000000..3934b38
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.refresh;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.UUID;
+
+/**
+ */
+public class RefreshStressTest1 {
+
+ public static void main(String[] args) throws InterruptedException {
+ int numberOfShards = 5;
+ Node node = NodeBuilder.nodeBuilder().local(true).loadConfigSettings(false).clusterName("testCluster").settings(
+ ImmutableSettings.settingsBuilder()
+ .put("node.name", "node1")
+ .put("gateway.type", "none")
+ .put("index.number_of_shards", numberOfShards)
+ //.put("path.data", new File("target/data").getAbsolutePath())
+ .build()).node();
+ Node node2 = NodeBuilder.nodeBuilder().local(true).loadConfigSettings(false).clusterName("testCluster").settings(
+ ImmutableSettings.settingsBuilder()
+ .put("node.name", "node2")
+ .put("gateway.type", "none")
+ .put("index.number_of_shards", numberOfShards)
+ //.put("path.data", new File("target/data").getAbsolutePath())
+ .build()).node();
+ Client client = node.client();
+
+ for (int loop = 1; loop < 1000; loop++) {
+ String indexName = "testindex" + loop;
+ String typeName = "testType" + loop;
+ String id = UUID.randomUUID().toString();
+ String mapping = "{ \"" + typeName + "\" : {\"dynamic_templates\" : [{\"no_analyze_strings\" : {\"match_mapping_type\" : \"string\",\"match\" : \"*\",\"mapping\" : {\"type\" : \"string\",\"index\" : \"not_analyzed\"}}}]}}";
+ client.admin().indices().prepareCreate(indexName).execute().actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType(typeName).setSource(mapping).execute().actionGet();
+// sleep after put mapping
+// Thread.sleep(100);
+
+ System.out.println("indexing " + loop);
+ String name = "name" + id;
+ client.prepareIndex(indexName, typeName, id).setSource("{ \"id\": \"" + id + "\", \"name\": \"" + name + "\" }").execute().actionGet();
+
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+// sleep after refresh
+// Thread.sleep(100);
+
+ System.out.println("searching " + loop);
+ SearchResponse result = client.prepareSearch(indexName).setPostFilter(FilterBuilders.termFilter("name", name)).execute().actionGet();
+ if (result.getHits().hits().length != 1) {
+ for (int i = 1; i <= 100; i++) {
+ System.out.println("retry " + loop + ", " + i + ", previous total hits: " + result.getHits().getTotalHits());
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ Thread.sleep(100);
+ result = client.prepareSearch(indexName).setPostFilter(FilterBuilders.termFilter("name", name)).execute().actionGet();
+ if (result.getHits().hits().length == 1) {
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ result = client.prepareSearch(indexName).setPostFilter(FilterBuilders.termFilter("name", name)).execute().actionGet();
+ throw new RuntimeException("Record found after " + (i * 100) + " ms, second go: " + result.getHits().hits().length);
+ } else if (i == 100) {
+ if (client.prepareGet(indexName, typeName, id).execute().actionGet().isExists())
+ throw new RuntimeException("Record wasn't found after 10s but can be get by id");
+ else throw new RuntimeException("Record wasn't found after 10s and can't be get by id");
+ }
+ }
+ }
+
+ //client.admin().indices().prepareDelete(indexName).execute().actionGet();
+ }
+ client.close();
+ node2.close();
+ node.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java b/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java
new file mode 100644
index 0000000..34e75bb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.rollingrestart;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Date;
+import java.util.Random;
+
+/**
+ */
+public class QuickRollingRestartStressTest {
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ Random random = new Random();
+
+ Settings settings = ImmutableSettings.settingsBuilder().build();
+
+ Node[] nodes = new Node[5];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+
+ Node client = NodeBuilder.nodeBuilder().client(true).node();
+
+ long COUNT;
+ if (client.client().admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ ClusterHealthResponse clusterHealthResponse = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ throw new ElasticsearchException("failed to wait for green state on startup...");
+ }
+ COUNT = client.client().prepareCount().execute().actionGet().getCount();
+ System.out.println("--> existing index, count [" + COUNT + "]");
+ } else {
+ COUNT = SizeValue.parseSizeValue("100k").singles();
+ System.out.println("--> indexing data...");
+ for (long i = 0; i < COUNT; i++) {
+ client.client().prepareIndex("test", "type", Long.toString(i))
+ .setSource("date", new Date(), "data", RandomStrings.randomAsciiOfLength(random, 10000))
+ .execute().actionGet();
+ }
+ System.out.println("--> done indexing data [" + COUNT + "]");
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ long count = client.client().prepareCount().execute().actionGet().getCount();
+ if (COUNT != count) {
+ System.err.println("--> the indexed docs do not match the count..., got [" + count + "], expected [" + COUNT + "]");
+ }
+ }
+ }
+
+ final int ROLLING_RESTARTS = 100;
+ System.out.println("--> starting rolling restarts [" + ROLLING_RESTARTS + "]");
+ for (int rollingRestart = 0; rollingRestart < ROLLING_RESTARTS; rollingRestart++) {
+ System.out.println("--> doing rolling restart [" + rollingRestart + "]...");
+ int nodeId = ThreadLocalRandom.current().nextInt();
+ for (int i = 0; i < nodes.length; i++) {
+ int nodeIdx = Math.abs(nodeId++) % nodes.length;
+ nodes[nodeIdx].close();
+ nodes[nodeIdx] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ System.out.println("--> done rolling restart [" + rollingRestart + "]");
+
+ System.out.println("--> waiting for green state now...");
+ ClusterHealthResponse clusterHealthResponse = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForRelocatingShards(0).setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> timed out waiting for green state...");
+ ClusterState state = client.client().admin().cluster().prepareState().execute().actionGet().getState();
+ System.out.println(state.nodes().prettyPrint());
+ System.out.println(state.routingTable().prettyPrint());
+ System.out.println(state.routingNodes().prettyPrint());
+ throw new ElasticsearchException("timed out waiting for green state");
+ } else {
+ System.out.println("--> got green status");
+ }
+
+ System.out.println("--> checking data [" + rollingRestart + "]....");
+ boolean failed = false;
+ for (int i = 0; i < 10; i++) {
+ long count = client.client().prepareCount().execute().actionGet().getCount();
+ if (COUNT != count) {
+ failed = true;
+ System.err.println("--> ERROR the indexed docs do not match the count..., got [" + count + "], expected [" + COUNT + "]");
+ }
+ }
+ if (!failed) {
+ System.out.println("--> count verified");
+ }
+ }
+
+ System.out.println("--> shutting down...");
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java b/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java
new file mode 100644
index 0000000..12aaa5a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java
@@ -0,0 +1,367 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.rollingrestart;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.status.IndexShardStatus;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.admin.indices.status.ShardStatus;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalNode;
+import org.elasticsearch.search.SearchHit;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ *
+ */
+public class RollingRestartStressTest {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ private int numberOfShards = 5;
+ private int numberOfReplicas = 1;
+ private int numberOfNodes = 4;
+
+ private int textTokens = 150;
+ private int numberOfFields = 10;
+ private long initialNumberOfDocs = 100000;
+
+ private int indexers = 0;
+
+ private TimeValue indexerThrottle = TimeValue.timeValueMillis(100);
+
+ private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ private TimeValue period = TimeValue.timeValueMinutes(20);
+
+ private boolean clearNodeData = true;
+
+ private Node client;
+
+ private AtomicLong indexCounter = new AtomicLong();
+ private AtomicLong idCounter = new AtomicLong();
+
+
+ public RollingRestartStressTest numberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public RollingRestartStressTest numberOfShards(int numberOfShards) {
+ this.numberOfShards = numberOfShards;
+ return this;
+ }
+
+ public RollingRestartStressTest numberOfReplicas(int numberOfReplicas) {
+ this.numberOfReplicas = numberOfReplicas;
+ return this;
+ }
+
+ public RollingRestartStressTest initialNumberOfDocs(long initialNumberOfDocs) {
+ this.initialNumberOfDocs = initialNumberOfDocs;
+ return this;
+ }
+
+ public RollingRestartStressTest textTokens(int textTokens) {
+ this.textTokens = textTokens;
+ return this;
+ }
+
+ public RollingRestartStressTest numberOfFields(int numberOfFields) {
+ this.numberOfFields = numberOfFields;
+ return this;
+ }
+
+ public RollingRestartStressTest indexers(int indexers) {
+ this.indexers = indexers;
+ return this;
+ }
+
+ public RollingRestartStressTest indexerThrottle(TimeValue indexerThrottle) {
+ this.indexerThrottle = indexerThrottle;
+ return this;
+ }
+
+ public RollingRestartStressTest period(TimeValue period) {
+ this.period = period;
+ return this;
+ }
+
+ public RollingRestartStressTest cleanNodeData(boolean clearNodeData) {
+ this.clearNodeData = clearNodeData;
+ return this;
+ }
+
+ public RollingRestartStressTest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public void run() throws Exception {
+ Random random = new Random(0);
+
+ Node[] nodes = new Node[numberOfNodes];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
+
+ client.client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", numberOfReplicas)
+ ).execute().actionGet();
+
+ logger.info("********** [START] INDEXING INITIAL DOCS");
+ for (long i = 0; i < initialNumberOfDocs; i++) {
+ indexDoc(random);
+ }
+ logger.info("********** [DONE ] INDEXING INITIAL DOCS");
+
+ Indexer[] indexerThreads = new Indexer[indexers];
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i] = new Indexer();
+ }
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i].start();
+ }
+
+ long testStart = System.currentTimeMillis();
+
+ // start doing the rolling restart
+ int nodeIndex = 0;
+ while (true) {
+ File[] nodeData = ((InternalNode) nodes[nodeIndex]).injector().getInstance(NodeEnvironment.class).nodeDataLocations();
+ nodes[nodeIndex].close();
+ if (clearNodeData) {
+ FileSystemUtils.deleteRecursively(nodeData);
+ }
+
+ try {
+ ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth()
+ .setWaitForGreenStatus()
+ .setWaitForNodes(Integer.toString(numberOfNodes + 0 /* client node*/))
+ .setWaitForRelocatingShards(0)
+ .setTimeout("10m").execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ logger.warn("timed out waiting for green status....");
+ }
+ } catch (Exception e) {
+ logger.warn("failed to execute cluster health....");
+ }
+
+ nodes[nodeIndex] = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ Thread.sleep(1000);
+
+ try {
+ ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth()
+ .setWaitForGreenStatus()
+ .setWaitForNodes(Integer.toString(numberOfNodes + 1 /* client node*/))
+ .setWaitForRelocatingShards(0)
+ .setTimeout("10m").execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ logger.warn("timed out waiting for green status....");
+ }
+ } catch (Exception e) {
+ logger.warn("failed to execute cluster health....");
+ }
+
+ if (++nodeIndex == nodes.length) {
+ nodeIndex = 0;
+ }
+
+ if ((System.currentTimeMillis() - testStart) > period.millis()) {
+ logger.info("test finished");
+ break;
+ }
+ }
+
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i].close = true;
+ }
+
+ Thread.sleep(indexerThrottle.millis() + 10000);
+
+ for (int i = 0; i < indexerThreads.length; i++) {
+ if (!indexerThreads[i].closed) {
+ logger.warn("thread not closed!");
+ }
+ }
+
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // check the status
+ IndicesStatusResponse status = client.client().admin().indices().prepareStatus("test").execute().actionGet();
+ for (IndexShardStatus shardStatus : status.getIndex("test")) {
+ ShardStatus shard = shardStatus.getShards()[0];
+ logger.info("shard [{}], docs [{}]", shard.getShardId(), shard.getDocs().getNumDocs());
+ for (ShardStatus shardStatu : shardStatus) {
+ if (shard.getDocs().getNumDocs() != shardStatu.getDocs().getNumDocs()) {
+ logger.warn("shard doc number does not match!, got {} and {}", shard.getDocs().getNumDocs(), shardStatu.getDocs().getNumDocs());
+ }
+ }
+ }
+
+ // check the count
+ for (int i = 0; i < (nodes.length * 5); i++) {
+ CountResponse count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ logger.info("indexed [{}], count [{}], [{}]", count.getCount(), indexCounter.get(), count.getCount() == indexCounter.get() ? "OK" : "FAIL");
+ if (count.getCount() != indexCounter.get()) {
+ logger.warn("count does not match!");
+ }
+ }
+
+ // scan all the docs, verify all have the same version based on the number of replicas
+ SearchResponse searchResponse = client.client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(matchAllQuery())
+ .setSize(50)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+ logger.info("Verifying versions for {} hits...", searchResponse.getHits().totalHits());
+
+ while (true) {
+ searchResponse = client.client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ for (SearchHit hit : searchResponse.getHits()) {
+ long version = -1;
+ for (int i = 0; i < (numberOfReplicas + 1); i++) {
+ GetResponse getResponse = client.client().prepareGet(hit.index(), hit.type(), hit.id()).execute().actionGet();
+ if (version == -1) {
+ version = getResponse.getVersion();
+ } else {
+ if (version != getResponse.getVersion()) {
+ logger.warn("Doc {} has different version numbers {} and {}", hit.id(), version, getResponse.getVersion());
+ }
+ }
+ }
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ }
+ logger.info("Done verifying versions");
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private class Indexer extends Thread {
+
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ Random random = new Random(0);
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ indexDoc(random);
+ Thread.sleep(indexerThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to index / sleep", e);
+ }
+ }
+ }
+ }
+
+ private void indexDoc(Random random) throws Exception {
+ StringBuilder sb = new StringBuilder();
+ XContentBuilder json = XContentFactory.jsonBuilder().startObject()
+ .field("field", "value" + ThreadLocalRandom.current().nextInt());
+
+ int fields = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfFields;
+ for (int i = 0; i < fields; i++) {
+ json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
+ int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
+ sb.setLength(0);
+ for (int j = 0; j < tokens; j++) {
+ sb.append(Strings.randomBase64UUID(random)).append(' ');
+ }
+ json.field("text_" + i, sb.toString());
+ }
+
+ json.endObject();
+
+ String id = Long.toString(idCounter.incrementAndGet());
+ client.client().prepareIndex("test", "type1", id)
+ .setCreate(true)
+ .setSource(json)
+ .execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ Settings settings = settingsBuilder()
+ .put("index.shard.check_on_startup", true)
+ .put("gateway.type", "none")
+ .put("path.data", "data/data1,data/data2")
+ .build();
+
+ RollingRestartStressTest test = new RollingRestartStressTest()
+ .settings(settings)
+ .numberOfNodes(4)
+ .numberOfShards(5)
+ .numberOfReplicas(1)
+ .initialNumberOfDocs(1000)
+ .textTokens(150)
+ .numberOfFields(10)
+ .cleanNodeData(false)
+ .indexers(5)
+ .indexerThrottle(TimeValue.timeValueMillis(50))
+ .period(TimeValue.timeValueMinutes(3));
+
+ test.run();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java b/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java
new file mode 100644
index 0000000..772db8f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.search1;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.junit.Ignore;
+
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * Tests that data don't get corrupted while reading it over the streams.
+ * <p/>
+ * See: https://github.com/elasticsearch/elasticsearch/issues/1686.
+ */
+@Ignore("Stress Test")
+public class ConcurrentSearchSerializationTests {
+
+ public static void main(String[] args) throws Exception {
+
+ Settings settings = ImmutableSettings.settingsBuilder().put("gateway.type", "none").build();
+
+ Node node1 = NodeBuilder.nodeBuilder().settings(settings).node();
+ Node node2 = NodeBuilder.nodeBuilder().settings(settings).node();
+ Node node3 = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ final Client client = node1.client();
+
+ System.out.println("Indexing...");
+ final String data = RandomStrings.randomAsciiOfLength(ThreadLocalRandom.current(), 100);
+ final CountDownLatch latch1 = new CountDownLatch(100);
+ for (int i = 0; i < 100; i++) {
+ client.prepareIndex("test", "type", Integer.toString(i))
+ .setSource("field", data)
+ .execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse indexResponse) {
+ latch1.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ latch1.countDown();
+ }
+ });
+ }
+ latch1.await();
+ System.out.println("Indexed");
+
+ System.out.println("searching...");
+ Thread[] threads = new Thread[10];
+ final CountDownLatch latch = new CountDownLatch(threads.length);
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int i = 0; i < 1000; i++) {
+ SearchResponse searchResponse = client.prepareSearch("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setSize(i % 100)
+ .execute().actionGet();
+ for (SearchHit hit : searchResponse.getHits()) {
+ try {
+ if (!hit.sourceAsMap().get("field").equals(data)) {
+ System.err.println("Field not equal!");
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ latch.countDown();
+ }
+ });
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ latch.await();
+
+ System.out.println("done searching");
+ client.close();
+ node1.close();
+ node2.close();
+ node3.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java b/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java
new file mode 100644
index 0000000..f9a9cd5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.search1;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.transport.RemoteTransportException;
+
+import java.io.IOException;
+import java.util.*;
+
+
+public class ParentChildStressTest {
+
+ private Node elasticNode;
+ private Client client;
+
+ private static final String PARENT_TYPE_NAME = "content";
+ private static final String CHILD_TYPE_NAME = "contentFiles";
+ private static final String INDEX_NAME = "acme";
+
+ /**
+ * Constructor. Initialize elastic and create the index/mapping
+ */
+ public ParentChildStressTest() {
+ NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder();
+ Settings settings = nodeBuilder.settings()
+ .build();
+ this.elasticNode = nodeBuilder.settings(settings).client(true).node();
+ this.client = this.elasticNode.client();
+
+ String mapping =
+ "{\"contentFiles\": {" +
+ "\"_parent\": {" +
+ "\"type\" : \"content\"" +
+ "}}}";
+
+ try {
+ client.admin().indices().create(new CreateIndexRequest(INDEX_NAME).mapping(CHILD_TYPE_NAME, mapping)).actionGet();
+ } catch (RemoteTransportException e) {
+ // usually means the index is already created.
+ }
+ }
+
+ public void shutdown() {
+ client.close();
+ elasticNode.close();
+ }
+
+ /**
+ * Deletes the item from both the parent and child type locations.
+ */
+ public void deleteById(String id) {
+ client.prepareDelete(INDEX_NAME, PARENT_TYPE_NAME, id).execute().actionGet();
+ client.prepareDelete(INDEX_NAME, CHILD_TYPE_NAME, id).execute().actionGet();
+ }
+
+ /**
+ * Index a parent doc
+ */
+ public void indexParent(String id, Map<String, Object> objectMap) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+
+ // index content
+ client.prepareIndex(INDEX_NAME, PARENT_TYPE_NAME, id).setSource(builder.map(objectMap)).execute().actionGet();
+ }
+
+ /**
+ * Index the file as a child doc
+ */
+ public void indexChild(String id, Map<String, Object> objectMap) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+
+ IndexRequestBuilder indexRequestbuilder = client.prepareIndex(INDEX_NAME, CHILD_TYPE_NAME, id);
+ indexRequestbuilder = indexRequestbuilder.setParent(id);
+ indexRequestbuilder = indexRequestbuilder.setSource(builder.map(objectMap));
+ indexRequestbuilder.execute().actionGet();
+ }
+
+ /**
+ * Execute a search based on a JSON String in QueryDSL format.
+ * <p/>
+ * Throws a RuntimeException if there are any shard failures to
+ * elevate the visibility of the problem.
+ */
+ public List<String> executeSearch(String source) {
+ SearchRequest request = Requests.searchRequest(INDEX_NAME).source(source);
+
+ List<ShardSearchFailure> failures;
+ SearchResponse response;
+
+ response = client.search(request).actionGet();
+ failures = Arrays.asList(response.getShardFailures());
+
+ // throw an exception so that we see the shard failures
+ if (failures.size() != 0) {
+ String failuresStr = failures.toString();
+ if (!failuresStr.contains("reason [No active shards]")) {
+ throw new RuntimeException(failures.toString());
+ }
+ }
+
+ ArrayList<String> results = new ArrayList<String>();
+ if (response != null) {
+ for (SearchHit hit : response.getHits()) {
+ String sourceStr = hit.sourceAsString();
+ results.add(sourceStr);
+ }
+ }
+ return results;
+ }
+
+ /**
+ * Create a document as a parent and index it.
+ * Load a file and index it as a child.
+ */
+ public String indexDoc() throws IOException {
+ String id = UUID.randomUUID().toString();
+
+ Map<String, Object> objectMap = new HashMap<String, Object>();
+ objectMap.put("title", "this is a document");
+
+ Map<String, Object> objectMap2 = new HashMap<String, Object>();
+ objectMap2.put("description", "child test");
+
+ this.indexParent(id, objectMap);
+ this.indexChild(id, objectMap2);
+ return id;
+ }
+
+ /**
+ * Perform the has_child query for the doc.
+ * <p/>
+ * Since it might take time to get indexed, it
+ * loops until it finds the doc.
+ */
+ public void searchDocByChild() throws InterruptedException {
+ String dslString =
+ "{\"query\":{" +
+ "\"has_child\":{" +
+ "\"query\":{" +
+ "\"field\":{" +
+ "\"description\":\"child test\"}}," +
+ "\"type\":\"contentFiles\"}}}";
+
+ int numTries = 0;
+ List<String> items = new ArrayList<String>();
+
+ while (items.size() != 1 && numTries < 20) {
+ items = executeSearch(dslString);
+
+ numTries++;
+ if (items.size() != 1) {
+ Thread.sleep(250);
+ }
+ }
+ if (items.size() != 1) {
+ System.out.println("Exceeded number of retries");
+ System.exit(1);
+ }
+ }
+
+ /**
+ * Program to loop on:
+ * create parent/child doc
+ * search for the doc
+ * delete the doc
+ * repeat the above until shard failure.
+ * <p/>
+ * Eventually fails with:
+ * <p/>
+ * [shard [[74wz0lrXRSmSOsJOqgPvlw][acme][1]], reason [RemoteTransportException
+ * [[Kismet][inet[/10.10.30.52:9300]][search/phase/query]]; nested:
+ * QueryPhaseExecutionException[[acme][1]:
+ * query[ConstantScore(child_filter[contentFiles
+ * /content](filtered(file:mission
+ * file:statement)->FilterCacheFilterWrapper(
+ * _type:contentFiles)))],from[0],size[10]: Query Failed [Failed to execute
+ * child query [filtered(file:mission
+ * file:statement)->FilterCacheFilterWrapper(_type:contentFiles)]]]; nested:
+ * ]]
+ *
+ * @param args
+ */
+ public static void main(String[] args) {
+ ParentChildStressTest elasticTest = new ParentChildStressTest();
+ try {
+ // loop a bunch of times - usually fails before the count is done.
+ int NUM_LOOPS = 1000;
+ System.out.println();
+ System.out.println("Looping [" + NUM_LOOPS + "] times:");
+ System.out.println();
+ for (int i = 0; i < NUM_LOOPS; i++) {
+ String id = elasticTest.indexDoc();
+
+ elasticTest.searchDocByChild();
+
+ elasticTest.deleteById(id);
+
+ System.out.println(" Success: " + i);
+ }
+ elasticTest.shutdown();
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ elasticTest.shutdown();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java b/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java
new file mode 100644
index 0000000..a1751ae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java
@@ -0,0 +1,427 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.search1;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortOrder;
+import org.junit.Ignore;
+
+import java.util.Arrays;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+
+/**
+ *
+ */
+@Ignore("Stress Test")
+public class Search1StressTest {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+
+ private int numberOfNodes = 4;
+
+ private int indexers = 0;
+ private SizeValue preIndexDocs = new SizeValue(0);
+ private TimeValue indexerThrottle = TimeValue.timeValueMillis(100);
+ private int searchers = 0;
+ private TimeValue searcherThrottle = TimeValue.timeValueMillis(20);
+ private int numberOfIndices = 10;
+ private int numberOfTypes = 4;
+ private int numberOfValues = 20;
+ private int numberOfHits = 300;
+ private TimeValue flusherThrottle = TimeValue.timeValueMillis(1000);
+ private TimeValue deleteByQueryThrottle = TimeValue.timeValueMillis(5000);
+
+ private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ private TimeValue period = TimeValue.timeValueMinutes(20);
+
+ private AtomicLong indexCounter = new AtomicLong();
+ private AtomicLong searchCounter = new AtomicLong();
+
+
+ private Node client;
+
+ public Search1StressTest setNumberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public Search1StressTest setPreIndexDocs(SizeValue preIndexDocs) {
+ this.preIndexDocs = preIndexDocs;
+ return this;
+ }
+
+ public Search1StressTest setIndexers(int indexers) {
+ this.indexers = indexers;
+ return this;
+ }
+
+ public Search1StressTest setIndexerThrottle(TimeValue indexerThrottle) {
+ this.indexerThrottle = indexerThrottle;
+ return this;
+ }
+
+ public Search1StressTest setSearchers(int searchers) {
+ this.searchers = searchers;
+ return this;
+ }
+
+ public Search1StressTest setSearcherThrottle(TimeValue searcherThrottle) {
+ this.searcherThrottle = searcherThrottle;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfIndices(int numberOfIndices) {
+ this.numberOfIndices = numberOfIndices;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfTypes(int numberOfTypes) {
+ this.numberOfTypes = numberOfTypes;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfValues(int numberOfValues) {
+ this.numberOfValues = numberOfValues;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfHits(int numberOfHits) {
+ this.numberOfHits = numberOfHits;
+ return this;
+ }
+
+ public Search1StressTest setFlusherThrottle(TimeValue flusherThrottle) {
+ this.flusherThrottle = flusherThrottle;
+ return this;
+ }
+
+ public Search1StressTest setDeleteByQueryThrottle(TimeValue deleteByQueryThrottle) {
+ this.deleteByQueryThrottle = deleteByQueryThrottle;
+ return this;
+ }
+
+ public Search1StressTest setSettings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public Search1StressTest setPeriod(TimeValue period) {
+ this.period = period;
+ return this;
+ }
+
+ private String nextIndex() {
+ return "test" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfIndices;
+ }
+
+ private String nextType() {
+ return "type" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfTypes;
+ }
+
+ private int nextNumValue() {
+ return Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfValues;
+ }
+
+ private String nextFieldValue() {
+ return "value" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfValues;
+ }
+
+ private class Searcher extends Thread {
+
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ String indexName = nextIndex();
+ SearchRequestBuilder builder = client.client().prepareSearch(indexName);
+ if (ThreadLocalRandom.current().nextBoolean()) {
+ builder.addSort("num", SortOrder.DESC);
+ } else if (ThreadLocalRandom.current().nextBoolean()) {
+ // add a _score based sorting, won't do any sorting, just to test...
+ builder.addSort("_score", SortOrder.DESC);
+ }
+ if (ThreadLocalRandom.current().nextBoolean()) {
+ builder.setSearchType(SearchType.DFS_QUERY_THEN_FETCH);
+ }
+ int size = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfHits;
+ builder.setSize(size);
+ if (ThreadLocalRandom.current().nextBoolean()) {
+ // update from
+ builder.setFrom(size / 2);
+ }
+ String value = nextFieldValue();
+ builder.setQuery(termQuery("field", value));
+ searchCounter.incrementAndGet();
+ SearchResponse searchResponse = builder.execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ // verify that all come from the requested index
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (!hit.shard().index().equals(indexName)) {
+ logger.warn("got wrong index, asked for [{}], got [{}]", indexName, hit.shard().index());
+ }
+ }
+ // verify that all has the relevant value
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (!value.equals(hit.sourceAsMap().get("field"))) {
+ logger.warn("got wrong field, asked for [{}], got [{}]", value, hit.sourceAsMap().get("field"));
+ }
+ }
+ Thread.sleep(searcherThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to search", e);
+ }
+ }
+ }
+ }
+
+ private class Indexer extends Thread {
+
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ indexDoc();
+ Thread.sleep(indexerThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to index / sleep", e);
+ }
+ }
+ }
+ }
+
+ private class Flusher extends Thread {
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ client.client().admin().indices().prepareFlush().execute().actionGet();
+ Thread.sleep(indexerThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to flush / sleep", e);
+ }
+ }
+ }
+ }
+
+ private class DeleteByQuery extends Thread {
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ client.client().prepareDeleteByQuery().setQuery(termQuery("num", nextNumValue())).execute().actionGet();
+ Thread.sleep(deleteByQueryThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to delete_by_query", e);
+ }
+ }
+ }
+ }
+
+ private void indexDoc() throws Exception {
+ XContentBuilder json = XContentFactory.jsonBuilder().startObject()
+ .field("num", nextNumValue())
+ .field("field", nextFieldValue());
+
+ json.endObject();
+
+ client.client().prepareIndex(nextIndex(), nextType())
+ .setSource(json)
+ .execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+
+ public void run() throws Exception {
+ Node[] nodes = new Node[numberOfNodes];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ client.client().admin().indices().prepareCreate("test" + i).execute().actionGet();
+ }
+
+ logger.info("Pre indexing docs [{}]...", preIndexDocs);
+ for (long i = 0; i < preIndexDocs.singles(); i++) {
+ indexDoc();
+ }
+ logger.info("Done pre indexing docs [{}]", preIndexDocs);
+
+ Indexer[] indexerThreads = new Indexer[indexers];
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i] = new Indexer();
+ }
+ for (Indexer indexerThread : indexerThreads) {
+ indexerThread.start();
+ }
+
+ Thread.sleep(10000);
+
+ Searcher[] searcherThreads = new Searcher[searchers];
+ for (int i = 0; i < searcherThreads.length; i++) {
+ searcherThreads[i] = new Searcher();
+ }
+ for (Searcher searcherThread : searcherThreads) {
+ searcherThread.start();
+ }
+
+ Flusher flusher = null;
+ if (flusherThrottle.millis() > 0) {
+ flusher = new Flusher();
+ flusher.start();
+ }
+
+ DeleteByQuery deleteByQuery = null;
+ if (deleteByQueryThrottle.millis() > 0) {
+ deleteByQuery = new DeleteByQuery();
+ deleteByQuery.start();
+ }
+
+
+ long testStart = System.currentTimeMillis();
+
+ while (true) {
+ Thread.sleep(5000);
+ if ((System.currentTimeMillis() - testStart) > period.millis()) {
+ break;
+ }
+ }
+
+ System.out.println("DONE, closing .....");
+
+ if (flusher != null) {
+ flusher.close = true;
+ }
+
+ if (deleteByQuery != null) {
+ deleteByQuery.close = true;
+ }
+
+ for (Searcher searcherThread : searcherThreads) {
+ searcherThread.close = true;
+ }
+
+ for (Indexer indexerThread : indexerThreads) {
+ indexerThread.close = true;
+ }
+
+ Thread.sleep(indexerThrottle.millis() + 10000);
+
+ if (flusher != null && !flusher.closed) {
+ logger.warn("flusher not closed!");
+ }
+ if (deleteByQuery != null && !deleteByQuery.closed) {
+ logger.warn("deleteByQuery not closed!");
+ }
+ for (Searcher searcherThread : searcherThreads) {
+ if (!searcherThread.closed) {
+ logger.warn("search thread not closed!");
+ }
+ }
+ for (Indexer indexerThread : indexerThreads) {
+ if (!indexerThread.closed) {
+ logger.warn("index thread not closed!");
+ }
+ }
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+
+ System.out.println("********** DONE, indexed [" + indexCounter.get() + "], searched [" + searchCounter.get() + "]");
+ }
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("gateway.type", "none")
+ .build();
+
+ Search1StressTest test = new Search1StressTest()
+ .setPeriod(TimeValue.timeValueMinutes(10))
+ .setSettings(settings)
+ .setNumberOfNodes(2)
+ .setPreIndexDocs(SizeValue.parseSizeValue("100"))
+ .setIndexers(2)
+ .setIndexerThrottle(TimeValue.timeValueMillis(100))
+ .setSearchers(10)
+ .setSearcherThrottle(TimeValue.timeValueMillis(10))
+ .setDeleteByQueryThrottle(TimeValue.timeValueMillis(-1))
+ .setFlusherThrottle(TimeValue.timeValueMillis(1000))
+ .setNumberOfIndices(10)
+ .setNumberOfTypes(5)
+ .setNumberOfValues(50)
+ .setNumberOfHits(300);
+
+ test.run();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java
new file mode 100644
index 0000000..6e2428b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecidersModule;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.lang.reflect.Constructor;
+import java.util.*;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+
+/**
+ */
+public class ElasticsearchAllocationTestCase extends ElasticsearchTestCase {
+
+ public static AllocationService createAllocationService() {
+ return createAllocationService(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public static AllocationService createAllocationService(Settings settings) {
+ return createAllocationService(settings, getRandom());
+ }
+
+ public static AllocationService createAllocationService(Settings settings, Random random) {
+ return new AllocationService(settings,
+ randomAllocationDeciders(settings, new NodeSettingsService(ImmutableSettings.Builder.EMPTY_SETTINGS), random),
+ new ShardsAllocators(settings), ClusterInfoService.EMPTY);
+ }
+
+ public static AllocationDeciders randomAllocationDeciders(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
+ final ImmutableSet<Class<? extends AllocationDecider>> defaultAllocationDeciders = AllocationDecidersModule.DEFAULT_ALLOCATION_DECIDERS;
+ final List<AllocationDecider> list = new ArrayList<AllocationDecider>();
+ for (Class<? extends AllocationDecider> deciderClass : defaultAllocationDeciders) {
+ try {
+ try {
+ Constructor<? extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class, NodeSettingsService.class);
+ list.add(constructor.newInstance(settings, nodeSettingsService));
+ } catch (NoSuchMethodException e) {
+ Constructor<? extends AllocationDecider> constructor = null;
+ constructor = deciderClass.getConstructor(Settings.class);
+ list.add(constructor.newInstance(settings));
+ }
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ assertThat(list.size(), equalTo(defaultAllocationDeciders.size()));
+ for (AllocationDecider d : list) {
+ assertThat(defaultAllocationDeciders.contains(d.getClass()), is(true));
+ }
+ Collections.shuffle(list, random);
+ return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0]));
+
+ }
+
+ public static DiscoveryNode newNode(String nodeId) {
+ return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, TransportAddress address) {
+ return new DiscoveryNode(nodeId, address, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) {
+ return new DiscoveryNode("", nodeId, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, Version version) {
+ return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, version);
+ }
+
+ public static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) {
+ List<MutableShardRouting> initializingShards = clusterState.routingNodes().shardsWithState(INITIALIZING);
+ if (initializingShards.isEmpty()) {
+ return clusterState;
+ }
+ RoutingTable routingTable = strategy.applyStartedShards(clusterState, newArrayList(initializingShards.get(randomInt(initializingShards.size() - 1)))).routingTable();
+ return ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java
new file mode 100644
index 0000000..4be71bb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java
@@ -0,0 +1,878 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.google.common.base.Joiner;
+import org.apache.lucene.util.AbstractRandomizedTest;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.client.RandomizingClient;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+
+import java.io.IOException;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.util.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.TestCluster.clusterName;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.emptyIterable;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * {@link ElasticsearchIntegrationTest} is an abstract base class to run integration
+ * tests against a JVM private Elasticsearch Cluster. The test class supports 3 different
+ * cluster scopes.
+ * <ul>
+ * <li>{@link Scope#GLOBAL} - uses a cluster shared across test suites. This cluster doesn't allow any modifications to
+ * the cluster settings and will fail if any persistent cluster settings are applied during tear down.</li>
+ * <li>{@link Scope#TEST} - uses a new cluster for each individual test method.</li>
+ * <li>{@link Scope#SUITE} - uses a cluster shared across all test method in the same suite</li>
+ * </ul>
+ * <p/>
+ * The most common test scope it {@link Scope#GLOBAL} which shares a cluster per JVM. This cluster is only set-up once
+ * and can be used as long as the tests work on a per index basis without changing any cluster wide settings or require
+ * any specific node configuration. This is the best performing option since it sets up the cluster only once.
+ * <p/>
+ * If the tests need specific node settings or change persistent and/or transient cluster settings either {@link Scope#TEST}
+ * or {@link Scope#SUITE} should be used. To configure a scope for the test cluster the {@link ClusterScope} annotation
+ * should be used, here is an example:
+ * <pre>
+ *
+ * @ClusterScope(scope=Scope.TEST) public class SomeIntegrationTest extends ElasticsearchIntegrationTest {
+ * @Test
+ * public void testMethod() {}
+ * }
+ * </pre>
+ * <p/>
+ * If no {@link ClusterScope} annotation is present on an integration test the default scope it {@link Scope#GLOBAL}
+ * <p/>
+ * A test cluster creates a set of nodes in the background before the test starts. The number of nodes in the cluster is
+ * determined at random and can change across tests. The minimum number of nodes in the shared global cluster is <code>2</code>.
+ * For other scopes the {@link ClusterScope} allows configuring the initial number of nodes that are created before
+ * the tests start.
+ * <p/>
+ * <pre>
+ * @ClusterScope(scope=Scope.SUITE, numNodes=3)
+ * public class SomeIntegrationTest extends ElasticsearchIntegrationTest {
+ * @Test
+ * public void testMethod() {}
+ * }
+ * </pre>
+ * <p/>
+ * Note, the {@link ElasticsearchIntegrationTest} uses randomized settings on a cluster and index level. For instance
+ * each test might use different directory implementation for each test or will return a random client to one of the
+ * nodes in the cluster for each call to {@link #client()}. Test failures might only be reproducible if the correct
+ * system properties are passed to the test execution environment.
+ * <p/>
+ * <p>
+ * This class supports the following system properties (passed with -Dkey=value to the application)
+ * <ul>
+ * <li>-D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node and transport clients used</li>
+ * <li>-D{@value TestCluster#TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is
+ * useful to test the system without asserting modules that to make sure they don't hide any bugs in production.</li>
+ * <li>-D{@value org.elasticsearch.test.TestCluster#SETTING_INDEX_SEED} - a random seed used to initialize the index random context.
+ * </ul>
+ * </p>
+ */
+@Ignore
+@AbstractRandomizedTest.IntegrationTests
+public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase {
+ private static TestCluster GLOBAL_CLUSTER;
+
+ /**
+ * Key used to set the transport client ratio via the commandline -D{@value #TESTS_CLIENT_RATIO}
+ */
+ public static final String TESTS_CLIENT_RATIO = "tests.client.ratio";
+
+ /**
+ * The current cluster depending on the configured {@link Scope}.
+ * By default if no {@link ClusterScope} is configured this will hold a reference to the global cluster carried
+ * on across test suites.
+ */
+ private static TestCluster currentCluster;
+
+ private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio();
+
+ private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<Class<?>, TestCluster>();
+
+ @BeforeClass
+ public static void beforeClass() throws Exception {
+ // Initialize lazily. No need for volatiles/ CASs since each JVM runs at most one test
+ // suite at any given moment.
+ if (GLOBAL_CLUSTER == null) {
+ long masterSeed = SeedUtils.parseSeed(RandomizedContext.current().getRunnerSeedAsString());
+ GLOBAL_CLUSTER = new TestCluster(masterSeed, clusterName("shared", ElasticsearchTestCase.CHILD_VM_ID, masterSeed));
+ }
+ }
+
+ @Before
+ public final void before() throws IOException {
+ assert Thread.getDefaultUncaughtExceptionHandler() instanceof ElasticsearchUncaughtExceptionHandler;
+ try {
+ final Scope currentClusterScope = getCurrentClusterScope();
+ switch (currentClusterScope) {
+ case GLOBAL:
+ clearClusters();
+ currentCluster = GLOBAL_CLUSTER;
+ break;
+ case SUITE:
+ currentCluster = buildAndPutCluster(currentClusterScope, false);
+ break;
+ case TEST:
+ currentCluster = buildAndPutCluster(currentClusterScope, true);
+ break;
+ default:
+ fail("Unknown Scope: [" + currentClusterScope + "]");
+ }
+ currentCluster.beforeTest(getRandom(), getPerTestTransportClientRatio());
+ cluster().wipe();
+ cluster().randomIndexTemplate();
+ logger.info("[{}#{}]: before test", getTestClass().getSimpleName(), getTestName());
+ } catch (OutOfMemoryError e) {
+ if (e.getMessage().contains("unable to create new native thread")) {
+ ElasticsearchTestCase.printStackDump(logger);
+ }
+ throw e;
+ }
+ }
+
+ public TestCluster buildAndPutCluster(Scope currentClusterScope, boolean createIfExists) throws IOException {
+ TestCluster testCluster = clusters.get(this.getClass());
+ if (createIfExists || testCluster == null) {
+ testCluster = buildTestCluster(currentClusterScope);
+ } else {
+ clusters.remove(this.getClass());
+ }
+ clearClusters();
+ clusters.put(this.getClass(), testCluster);
+ return testCluster;
+ }
+
+ private void clearClusters() throws IOException {
+ if (!clusters.isEmpty()) {
+ for (TestCluster cluster : clusters.values()) {
+ cluster.close();
+ }
+ clusters.clear();
+ }
+ }
+
+ @After
+ public final void after() throws IOException {
+ try {
+ logger.info("[{}#{}]: cleaning up after test", getTestClass().getSimpleName(), getTestName());
+ Scope currentClusterScope = getCurrentClusterScope();
+ if (currentClusterScope == Scope.TEST) {
+ clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST
+ } else {
+ MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData();
+ assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(), metaData
+ .persistentSettings().getAsMap().size(), equalTo(0));
+ assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(), metaData
+ .transientSettings().getAsMap().size(), equalTo(0));
+
+ }
+ cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete
+ cluster().assertAfterTest();
+ logger.info("[{}#{}]: cleaned up after test", getTestClass().getSimpleName(), getTestName());
+ } catch (OutOfMemoryError e) {
+ if (e.getMessage().contains("unable to create new native thread")) {
+ ElasticsearchTestCase.printStackDump(logger);
+ }
+ throw e;
+ } finally {
+ currentCluster.afterTest();
+ currentCluster = null;
+ }
+ }
+
+ public static TestCluster cluster() {
+ return currentCluster;
+ }
+
+ public ClusterService clusterService() {
+ return cluster().clusterService();
+ }
+
+ public static Client client() {
+ Client client = cluster().client();
+ if (frequently()) {
+ client = new RandomizingClient((InternalClient) client, getRandom());
+ }
+ return client;
+ }
+
+ public static Iterable<Client> clients() {
+ return cluster();
+ }
+
+ /**
+ * Returns a settings object used in {@link #createIndex(String...)} and {@link #prepareCreate(String)} and friends.
+ * This method can be overwritten by subclasses to set defaults for the indices that are created by the test.
+ * By default it returns an empty settings object.
+ */
+ public Settings indexSettings() {
+ return ImmutableSettings.EMPTY;
+ }
+
+ /**
+ * Creates one or more indices and asserts that the indices are acknowledged. If one of the indices
+ * already exists this method will fail and wipe all the indices created so far.
+ */
+ public final void createIndex(String... names) {
+
+ List<String> created = new ArrayList<String>();
+ for (String name : names) {
+ boolean success = false;
+ try {
+ assertAcked(prepareCreate(name));
+ created.add(name);
+ success = true;
+ } finally {
+ if (!success && !created.isEmpty()) {
+ cluster().wipeIndices(created.toArray(new String[created.size()]));
+ }
+ }
+ }
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ */
+ public final CreateIndexRequestBuilder prepareCreate(String index) {
+ return client().admin().indices().prepareCreate(index).setSettings(indexSettings());
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
+ * method.
+ * <p>
+ * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
+ * rules based on <code>index.routing.allocation.exclude._name</code>.
+ * </p>
+ */
+ public final CreateIndexRequestBuilder prepareCreate(String index, int numNodes) {
+ return prepareCreate(index, numNodes, ImmutableSettings.builder());
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
+ * method.
+ * <p>
+ * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
+ * rules based on <code>index.routing.allocation.exclude._name</code>.
+ * </p>
+ */
+ public CreateIndexRequestBuilder prepareCreate(String index, int numNodes, ImmutableSettings.Builder builder) {
+ cluster().ensureAtLeastNumNodes(numNodes);
+ Settings settings = indexSettings();
+ builder.put(settings);
+ if (numNodes > 0) {
+ getExcludeSettings(index, numNodes, builder);
+ }
+ return client().admin().indices().prepareCreate(index).setSettings(builder.build());
+ }
+
+ private ImmutableSettings.Builder getExcludeSettings(String index, int num, ImmutableSettings.Builder builder) {
+ String exclude = Joiner.on(',').join(cluster().allButN(num));
+ builder.put("index.routing.allocation.exclude._name", exclude);
+ return builder;
+ }
+
+ /**
+ * Restricts the given index to be allocated on <code>n</code> nodes using the allocation deciders.
+ * Yet if the shards can't be allocated on any other node shards for this index will remain allocated on
+ * more than <code>n</code> nodes.
+ */
+ public void allowNodes(String index, int n) {
+ assert index != null;
+ cluster().ensureAtLeastNumNodes(n);
+ ImmutableSettings.Builder builder = ImmutableSettings.builder();
+ if (n > 0) {
+ getExcludeSettings(index, n, builder);
+ }
+ Settings build = builder.build();
+ if (!build.getAsMap().isEmpty()) {
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+ }
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ */
+ public ClusterHealthStatus ensureGreen(String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
+ }
+ assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Waits for all relocating shards to become active using the cluster health API.
+ */
+ public ClusterHealthStatus waitForRelocation() {
+ return waitForRelocation(null);
+ }
+
+ /**
+ * Waits for all relocating shards to become active and the cluster has reached the given health status
+ * using the cluster health API.
+ */
+ public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) {
+ ClusterHealthRequest request = Requests.clusterHealthRequest().waitForRelocatingShards(0);
+ if (status != null) {
+ request.waitForStatus(status);
+ }
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(request).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false));
+ }
+ if (status != null) {
+ assertThat(actionGet.getStatus(), equalTo(status));
+ }
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Sets the cluster's minimum master node and make sure the response is acknowledge.
+ * Note: this doesn't guaranty the new settings is in effect, just that it has been received bu all nodes.
+ */
+ public void setMinimumMasterNodes(int n) {
+ assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
+ settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n))
+ .get().isAcknowledged());
+ }
+
+ /**
+ * Ensures the cluster has a yellow state via the cluster health API.
+ */
+ public ClusterHealthStatus ensureYellow(String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).waitForRelocatingShards(0).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureYellow timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false));
+ }
+ return actionGet.getStatus();
+ }
+
+
+ /**
+ * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each
+ * shard is available on the cluster.
+ */
+ protected ClusterHealthStatus ensureSearchable(String... indices) {
+ // this is just a temporary thing but it's easier to change if it is encapsulated.
+ return ensureGreen(indices);
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, XContentBuilder source) {
+ return client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, Map<String, Object> source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareGet(index, type, id).execute().actionGet();
+ * </pre>
+ */
+ protected final GetResponse get(String index, String type, String id) {
+ return client().prepareGet(index, type, id).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, XContentBuilder source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, Object... source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Waits for relocations and refreshes all indices in the cluster.
+ *
+ * @see #waitForRelocation()
+ */
+ protected final RefreshResponse refresh() {
+ waitForRelocation();
+ // TODO RANDOMIZE with flush?
+ RefreshResponse actionGet = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet;
+ }
+
+ /**
+ * Flushes and refreshes all indices in the cluster
+ */
+ protected final void flushAndRefresh() {
+ flush(true);
+ refresh();
+ }
+
+ /**
+ * Flushes all indices in the cluster
+ */
+ protected final FlushResponse flush() {
+ return flush(true);
+ }
+
+ private FlushResponse flush(boolean ignoreNotAllowed) {
+ waitForRelocation();
+ FlushResponse actionGet = client().admin().indices().prepareFlush().execute().actionGet();
+ if (ignoreNotAllowed) {
+ for (ShardOperationFailedException failure : actionGet.getShardFailures()) {
+ assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+ } else {
+ assertNoFailures(actionGet);
+ }
+ return actionGet;
+ }
+
+ /**
+ * Waits for all relocations and optimized all indices in the cluster to 1 segment.
+ */
+ protected OptimizeResponse optimize() {
+ waitForRelocation();
+ OptimizeResponse actionGet = client().admin().indices().prepareOptimize().execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet;
+ }
+
+ /**
+ * Returns <code>true</code> iff the given index exists otherwise <code>false</code>
+ */
+ protected boolean indexExists(String index) {
+ IndicesExistsResponse actionGet = client().admin().indices().prepareExists(index).execute().actionGet();
+ return actionGet.isExists();
+ }
+
+ /**
+ * Returns a random admin client. This client can either be a node or a transport client pointing to any of
+ * the nodes in the cluster.
+ */
+ protected AdminClient admin() {
+ return client().admin();
+ }
+
+ /**
+ * Convenience method that forwards to {@link #indexRandom(boolean, List)}.
+ */
+ public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, Arrays.asList(builders));
+ }
+
+ /**
+ * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
+ * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document
+ * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
+ * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
+ * layout.
+ */
+ public void indexRandom(boolean forceRefresh, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
+ if (builders.size() == 0) {
+ return;
+ }
+
+ Random random = getRandom();
+ Set<String> indicesSet = new HashSet<String>();
+ for (IndexRequestBuilder builder : builders) {
+ indicesSet.add(builder.request().index());
+ }
+ final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
+ Collections.shuffle(builders, random);
+ final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>> errors = new CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>>();
+ List<CountDownLatch> latches = new ArrayList<CountDownLatch>();
+ if (frequently()) {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false);
+ final CountDownLatch latch = new CountDownLatch(builders.size());
+ latches.add(latch);
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ indexRequestBuilder.execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>(indexRequestBuilder, latch, errors));
+ if (rarely()) {
+ if (rarely()) {
+ client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenient()).execute(new LatchedActionListener<RefreshResponse>(newLatch(latches)));
+ } else if (rarely()) {
+ client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenient()).execute(new LatchedActionListener<FlushResponse>(newLatch(latches)));
+ } else if (rarely()) {
+ client().admin().indices().prepareOptimize(indices).setIndicesOptions(IndicesOptions.lenient()).setMaxNumSegments(between(1, 10)).setFlush(random.nextBoolean()).execute(new LatchedActionListener<OptimizeResponse>(newLatch(latches)));
+ }
+ }
+ }
+
+ } else if (randomBoolean()) {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false);
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ indexRequestBuilder.execute().actionGet();
+ if (rarely()) {
+ if (rarely()) {
+ client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenient()).execute(new LatchedActionListener<RefreshResponse>(newLatch(latches)));
+ } else if (rarely()) {
+ client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenient()).execute(new LatchedActionListener<FlushResponse>(newLatch(latches)));
+ } else if (rarely()) {
+ client().admin().indices().prepareOptimize(indices).setIndicesOptions(IndicesOptions.lenient()).setMaxNumSegments(between(1, 10)).setFlush(random.nextBoolean()).execute(new LatchedActionListener<OptimizeResponse>(newLatch(latches)));
+ }
+ }
+ }
+ } else {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, true);
+ BulkRequestBuilder bulkBuilder = client().prepareBulk();
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ bulkBuilder.add(indexRequestBuilder);
+ }
+ BulkResponse actionGet = bulkBuilder.execute().actionGet();
+ assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false));
+ }
+ for (CountDownLatch countDownLatch : latches) {
+ countDownLatch.await();
+ }
+ final List<Throwable> actualErrors = new ArrayList<Throwable>();
+ for (Tuple<IndexRequestBuilder, Throwable> tuple : errors) {
+ if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) {
+ tuple.v1().execute().actionGet(); // re-index if rejected
+ } else {
+ actualErrors.add(tuple.v2());
+ }
+ }
+ assertThat(actualErrors, emptyIterable());
+ if (forceRefresh) {
+ assertNoFailures(client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenient()).execute().get());
+ }
+ }
+
+ private static CountDownLatch newLatch(List<CountDownLatch> latches) {
+ CountDownLatch l = new CountDownLatch(1);
+ latches.add(l);
+ return l;
+ }
+
+ private class LatchedActionListener<Response> implements ActionListener<Response> {
+ private final CountDownLatch latch;
+
+ public LatchedActionListener(CountDownLatch latch) {
+ this.latch = latch;
+ }
+
+ @Override
+ public final void onResponse(Response response) {
+ latch.countDown();
+ }
+
+ @Override
+ public final void onFailure(Throwable t) {
+ try {
+ logger.info("Action Failed", t);
+ addError(t);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ protected void addError(Throwable t) {
+ }
+
+ }
+
+ private class PayloadLatchedActionListener<Response, T> extends LatchedActionListener<Response> {
+ private final CopyOnWriteArrayList<Tuple<T, Throwable>> errors;
+ private final T builder;
+
+ public PayloadLatchedActionListener(T builder, CountDownLatch latch, CopyOnWriteArrayList<Tuple<T, Throwable>> errors) {
+ super(latch);
+ this.errors = errors;
+ this.builder = builder;
+ }
+
+ protected void addError(Throwable t) {
+ errors.add(new Tuple<T, Throwable>(builder, t));
+ }
+
+ }
+
+ /**
+ * Clears the given scroll Ids
+ */
+ public void clearScroll(String... scrollIds) {
+ ClearScrollResponse clearResponse = client().prepareClearScroll()
+ .setScrollIds(Arrays.asList(scrollIds)).get();
+ assertThat(clearResponse.isSucceeded(), equalTo(true));
+ }
+
+
+ /**
+ * The scope of a test cluster used together with
+ * {@link ClusterScope} annotations on {@link ElasticsearchIntegrationTest} subclasses.
+ */
+ public static enum Scope {
+ /**
+ * A globally shared cluster. This cluster doesn't allow modification of transient or persistent
+ * cluster settings.
+ */
+ GLOBAL,
+ /**
+ * A cluster shared across all method in a single test suite
+ */
+ SUITE,
+ /**
+ * A test exclusive test cluster
+ */
+ TEST
+ }
+
+ private ClusterScope getAnnotation(Class<?> clazz) {
+ if (clazz == Object.class || clazz == ElasticsearchIntegrationTest.class) {
+ return null;
+ }
+ ClusterScope annotation = clazz.getAnnotation(ClusterScope.class);
+ if (annotation != null) {
+ return annotation;
+ }
+ return getAnnotation(clazz.getSuperclass());
+ }
+
+ private Scope getCurrentClusterScope() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ // if we are not annotated assume global!
+ return annotation == null ? Scope.GLOBAL : annotation.scope();
+ }
+
+ private int getNumNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null ? -1 : annotation.numNodes();
+ }
+
+ private int getMinNumNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null ? TestCluster.DEFAULT_MIN_NUM_NODES : annotation.minNumNodes();
+ }
+
+ private int getMaxNumNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null ? TestCluster.DEFAULT_MAX_NUM_NODES : annotation.maxNumNodes();
+ }
+
+ /**
+ * This method is used to obtain settings for the <tt>Nth</tt> node in the cluster.
+ * Nodes in this cluster are associated with an ordinal number such that nodes can
+ * be started with specific configurations. This method might be called multiple
+ * times with the same ordinal and is expected to return the same value for each invocation.
+ * In other words subclasses must ensure this method is idempotent.
+ */
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.EMPTY;
+ }
+
+ private TestCluster buildTestCluster(Scope scope) {
+ long currentClusterSeed = randomLong();
+ int numNodes = getNumNodes();
+ NodeSettingsSource nodeSettingsSource;
+ if (numNodes > 0) {
+ NodeSettingsSource.Immutable.Builder nodesSettings = NodeSettingsSource.Immutable.builder();
+ for (int i = 0; i < numNodes; i++) {
+ nodesSettings.set(i, nodeSettings(i));
+ }
+ nodeSettingsSource = nodesSettings.build();
+ } else {
+ nodeSettingsSource = new NodeSettingsSource() {
+ @Override
+ public Settings settings(int nodeOrdinal) {
+ return nodeSettings(nodeOrdinal);
+ }
+ };
+ }
+
+ int minNumNodes, maxNumNodes;
+ if (numNodes >= 0) {
+ minNumNodes = maxNumNodes = numNodes;
+ } else {
+ minNumNodes = getMinNumNodes();
+ maxNumNodes = getMaxNumNodes();
+ }
+
+ return new TestCluster(currentClusterSeed, minNumNodes, maxNumNodes, clusterName(scope.name(), ElasticsearchTestCase.CHILD_VM_ID, currentClusterSeed), nodeSettingsSource);
+ }
+
+ /**
+ * Defines a cluster scope for a {@link ElasticsearchIntegrationTest} subclass.
+ * By default if no {@link ClusterScope} annotation is present {@link Scope#GLOBAL} is used
+ * together with randomly chosen settings like number of nodes etc.
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target({ElementType.TYPE})
+ public @interface ClusterScope {
+ /**
+ * Returns the scope. {@link Scope#GLOBAL} is default.
+ */
+ Scope scope() default Scope.GLOBAL;
+
+ /**
+ * Returns the number of nodes in the cluster. Default is <tt>-1</tt> which means
+ * a random number of nodes is used, where the minimum and maximum number of nodes
+ * are either the specified ones or the default ones if not specified.
+ */
+ int numNodes() default -1;
+
+ /**
+ * Returns the minimum number of nodes in the cluster. Default is {@link TestCluster#DEFAULT_MIN_NUM_NODES}.
+ * Ignored when {@link ClusterScope#numNodes()} is set.
+ */
+ int minNumNodes() default TestCluster.DEFAULT_MIN_NUM_NODES;
+
+ /**
+ * Returns the maximum number of nodes in the cluster. Default is {@link TestCluster#DEFAULT_MAX_NUM_NODES}.
+ * Ignored when {@link ClusterScope#numNodes()} is set.
+ */
+ int maxNumNodes() default TestCluster.DEFAULT_MAX_NUM_NODES;
+
+ /**
+ * Returns the transport client ratio. By default this returns <code>-1</code> which means a random
+ * ratio in the interval <code>[0..1]</code> is used.
+ */
+ double transportClientRatio() default -1;
+ }
+
+ /**
+ * Returns the client ratio configured via
+ */
+ private static double transportClientRatio() {
+ String property = System.getProperty(TESTS_CLIENT_RATIO);
+ if (property == null || property.isEmpty()) {
+ return Double.NaN;
+ }
+ return Double.parseDouble(property);
+ }
+
+ /**
+ * Returns the transport client ratio from the class level annotation or via
+ * {@link System#getProperty(String)} if available. If both are not available this will
+ * return a random ratio in the interval <tt>[0..1]</tt>
+ */
+ protected double getPerTestTransportClientRatio() {
+ final ClusterScope annotation = getAnnotation(this.getClass());
+ double perTestRatio = -1;
+ if (annotation != null) {
+ perTestRatio = annotation.transportClientRatio();
+ }
+ if (perTestRatio == -1) {
+ return Double.isNaN(TRANSPORT_CLIENT_RATIO) ? randomDouble() : TRANSPORT_CLIENT_RATIO;
+ }
+ assert perTestRatio >= 0.0 && perTestRatio <= 1.0;
+ return perTestRatio;
+ }
+
+ /**
+ * Returns a random numeric field data format from the choices of "array",
+ * "compressed", or "doc_values".
+ */
+ public static String randomNumericFieldDataFormat() {
+ return randomFrom(Arrays.asList("array", "compressed", "doc_values"));
+ }
+
+ /**
+ * Returns a random bytes field data format from the choices of
+ * "paged_bytes", "fst", or "doc_values".
+ */
+ public static String randomBytesFieldDataFormat() {
+ return randomFrom(Arrays.asList("paged_bytes", "fst", "doc_values"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchLuceneTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchLuceneTestCase.java
new file mode 100644
index 0000000..e9eebbe
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchLuceneTestCase.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.*;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+
+
+/**
+ * Base testcase for lucene based testing. This class should be used if low level lucene features are tested.
+ */
+@Listeners({
+ ReproduceInfoPrinter.class
+})
+@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
+@ThreadLeakScope(Scope.NONE)
+@TimeoutSuite(millis = TimeUnits.HOUR)
+@SuppressCodecs("Lucene3x")
+public abstract class ElasticsearchLuceneTestCase extends LuceneTestCase {
+
+ private static final Codec DEFAULT_CODEC = Codec.getDefault();
+
+ /**
+ * Forcefully reset the default codec
+ */
+ public static void forceDefaultCodec() {
+ Codec.setDefault(DEFAULT_CODEC);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java
new file mode 100644
index 0000000..eee6a2c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.*;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.AbstractRandomizedTest;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.Version;
+import org.elasticsearch.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.util.concurrent.EsAbortPolicy;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.test.junit.listeners.LoggingListener;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.net.URI;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllFilesClosed;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSearchersClosed;
+
+/**
+ * Base testcase for randomized unit testing with Elasticsearch
+ */
+@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
+@ThreadLeakScope(Scope.NONE)
+@TimeoutSuite(millis = 20 * TimeUnits.MINUTE) // timeout the suite after 20min and fail the test.
+@Listeners(LoggingListener.class)
+public abstract class ElasticsearchTestCase extends AbstractRandomizedTest {
+
+ private static Thread.UncaughtExceptionHandler defaultHandler;
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+
+ public static final String CHILD_VM_ID = System.getProperty("junit4.childvm.id", "" + System.currentTimeMillis());
+
+ public static final String TESTS_SECURITY_MANAGER = System.getProperty("tests.security.manager");
+
+ public static final String JAVA_SECURTY_POLICY = System.getProperty("java.security.policy");
+
+ public static final boolean ASSERTIONS_ENABLED;
+ static {
+ boolean enabled = false;
+ assert enabled = true;
+ ASSERTIONS_ENABLED = enabled;
+ if (Boolean.parseBoolean(Strings.hasLength(TESTS_SECURITY_MANAGER) ? TESTS_SECURITY_MANAGER : "true") && JAVA_SECURTY_POLICY != null) {
+ System.setSecurityManager(new SecurityManager());
+ }
+
+ }
+
+ public static boolean awaitBusy(Predicate<?> breakPredicate) throws InterruptedException {
+ return awaitBusy(breakPredicate, 10, TimeUnit.SECONDS);
+ }
+
+ public static boolean awaitBusy(Predicate<?> breakPredicate, long maxWaitTime, TimeUnit unit) throws InterruptedException {
+ long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit);
+ long iterations = Math.max(Math.round(Math.log10(maxTimeInMillis) / Math.log10(2)), 1);
+ long timeInMillis = 1;
+ long sum = 0;
+ for (int i = 0; i < iterations; i++) {
+ if (breakPredicate.apply(null)) {
+ return true;
+ }
+ sum += timeInMillis;
+ Thread.sleep(timeInMillis);
+ timeInMillis *= 2;
+ }
+ timeInMillis = maxTimeInMillis - sum;
+ Thread.sleep(Math.max(timeInMillis, 0));
+ return breakPredicate.apply(null);
+ }
+
+ private static final String[] numericTypes = new String[]{"byte", "short", "integer", "long"};
+
+ public static String randomNumericType(Random random) {
+ return numericTypes[random.nextInt(numericTypes.length)];
+ }
+
+ /**
+ * Returns a {@link File} pointing to the class path relative resource given
+ * as the first argument. In contrast to
+ * <code>getClass().getResource(...).getFile()</code> this method will not
+ * return URL encoded paths if the parent path contains spaces or other
+ * non-standard characters.
+ */
+ public File getResource(String relativePath) {
+ URI uri = URI.create(getClass().getResource(relativePath).toString());
+ return new File(uri);
+ }
+
+ @After
+ public void ensureAllPagesReleased() {
+ MockPageCacheRecycler.ensureAllPagesAreReleased();
+ }
+
+ public static boolean hasUnclosedWrapper() {
+ for (MockDirectoryWrapper w : MockDirectoryHelper.wrappers) {
+ if (w.isOpen()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @BeforeClass
+ public static void registerMockDirectoryHooks() throws Exception {
+ closeAfterSuite(new Closeable() {
+ @Override
+ public void close() throws IOException {
+ assertAllFilesClosed();
+ }
+ });
+
+ closeAfterSuite(new Closeable() {
+ @Override
+ public void close() throws IOException {
+ assertAllSearchersClosed();
+ }
+ });
+ defaultHandler = Thread.getDefaultUncaughtExceptionHandler();
+ Thread.setDefaultUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler(defaultHandler));
+ }
+
+ @AfterClass
+ public static void resetUncaughtExceptionHandler() {
+ Thread.setDefaultUncaughtExceptionHandler(defaultHandler);
+ }
+
+ public static boolean maybeDocValues() {
+ return LuceneTestCase.defaultCodecSupportsSortedSet() && randomBoolean();
+ }
+
+ private static final List<Version> SORTED_VERSIONS;
+
+ static {
+ Field[] declaredFields = Version.class.getDeclaredFields();
+ Set<Integer> ids = new HashSet<Integer>();
+ for (Field field : declaredFields) {
+ final int mod = field.getModifiers();
+ if (Modifier.isStatic(mod) && Modifier.isFinal(mod) && Modifier.isPublic(mod)) {
+ if (field.getType() == Version.class) {
+ try {
+ Version object = (Version) field.get(null);
+ ids.add(object.id);
+ } catch (Throwable e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+ List<Integer> idList = new ArrayList<Integer>(ids);
+ Collections.sort(idList);
+ Collections.reverse(idList);
+ ImmutableList.Builder<Version> version = ImmutableList.builder();
+ for (Integer integer : idList) {
+ version.add(Version.fromId(integer));
+ }
+ SORTED_VERSIONS = version.build();
+ }
+
+ public static Version getPreviousVersion() {
+ Version version = SORTED_VERSIONS.get(1);
+ assert version.before(Version.CURRENT);
+ return version;
+ }
+
+ public static Version randomVersion() {
+ return randomVersion(getRandom());
+ }
+
+ public static Version randomVersion(Random random) {
+ return SORTED_VERSIONS.get(random.nextInt(SORTED_VERSIONS.size()));
+ }
+
+ static final class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
+
+ private final Thread.UncaughtExceptionHandler parent;
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ private ElasticsearchUncaughtExceptionHandler(Thread.UncaughtExceptionHandler parent) {
+ this.parent = parent;
+ }
+
+
+ @Override
+ public void uncaughtException(Thread t, Throwable e) {
+ if (e instanceof EsRejectedExecutionException) {
+ if (e.getMessage().contains(EsAbortPolicy.SHUTTING_DOWN_KEY)) {
+ return; // ignore the EsRejectedExecutionException when a node shuts down
+ }
+ } else if (e instanceof OutOfMemoryError) {
+ if (e.getMessage().contains("unable to create new native thread")) {
+ printStackDump(logger);
+ }
+ }
+ parent.uncaughtException(t, e);
+ }
+
+ }
+
+ protected static final void printStackDump(ESLogger logger) {
+ // print stack traces if we can't create any native thread anymore
+ Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces();
+ logger.error(formatThreadStacks(allStackTraces));
+ }
+
+ /**
+ * Dump threads and their current stack trace.
+ */
+ private static String formatThreadStacks(Map<Thread, StackTraceElement[]> threads) {
+ StringBuilder message = new StringBuilder();
+ int cnt = 1;
+ final Formatter f = new Formatter(message, Locale.ENGLISH);
+ for (Map.Entry<Thread, StackTraceElement[]> e : threads.entrySet()) {
+ if (e.getKey().isAlive())
+ f.format(Locale.ENGLISH, "\n %2d) %s", cnt++, threadName(e.getKey())).flush();
+ if (e.getValue().length == 0) {
+ message.append("\n at (empty stack)");
+ } else {
+ for (StackTraceElement ste : e.getValue()) {
+ message.append("\n at ").append(ste);
+ }
+ }
+ }
+ return message.toString();
+ }
+
+ private static String threadName(Thread t) {
+ return "Thread[" +
+ "id=" + t.getId() +
+ ", name=" + t.getName() +
+ ", state=" + t.getState() +
+ ", group=" + groupName(t.getThreadGroup()) +
+ "]";
+ }
+
+ private static String groupName(ThreadGroup threadGroup) {
+ if (threadGroup == null) {
+ return "{null group}";
+ } else {
+ return threadGroup.getName();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchThreadFilter.java b/src/test/java/org/elasticsearch/test/ElasticsearchThreadFilter.java
new file mode 100644
index 0000000..bd30e42
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchThreadFilter.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.ThreadFilter;
+
+/**
+ * Simple thread filter for randomized runner
+ */
+public final class ElasticsearchThreadFilter implements ThreadFilter {
+ @Override
+ public boolean reject(Thread t) {
+ return true;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java
new file mode 100644
index 0000000..d42093c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.Version;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+
+@Listeners({
+ ReproduceInfoPrinter.class
+})
+@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
+@ThreadLeakScope(Scope.NONE)
+@TimeoutSuite(millis = TimeUnits.HOUR)
+
+/**
+ * Basic test case for token streams. the assertion methods in this class will
+ * run basic checks to enforce correct behavior of the token streams.
+ */
+public abstract class ElasticsearchTokenStreamTestCase extends BaseTokenStreamTestCase {
+
+ public static Version randomVersion() {
+ return ElasticsearchTestCase.randomVersion(random());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/NodeSettingsSource.java b/src/test/java/org/elasticsearch/test/NodeSettingsSource.java
new file mode 100644
index 0000000..b128afc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/NodeSettingsSource.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+
+abstract class NodeSettingsSource {
+
+ public static final NodeSettingsSource EMPTY = new NodeSettingsSource() {
+ @Override
+ public Settings settings(int nodeOrdinal) {
+ return null;
+ }
+ };
+
+ /**
+ * @return the settings for the node represented by the given ordinal, or {@code null} if there are not settings defined (in which
+ * case a random settings will be generated for the node)
+ */
+ public abstract Settings settings(int nodeOrdinal);
+
+ public static class Immutable extends NodeSettingsSource {
+
+ private final Map<Integer, Settings> settingsPerNode;
+
+ private Immutable(Map<Integer, Settings> settingsPerNode) {
+ this.settingsPerNode = settingsPerNode;
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ @Override
+ public Settings settings(int nodeOrdinal) {
+ return settingsPerNode.get(nodeOrdinal);
+ }
+
+ public static class Builder {
+
+ private final ImmutableMap.Builder<Integer, Settings> settingsPerNode = ImmutableMap.builder();
+
+ private Builder() {
+ }
+
+ public Builder set(int ordinal, Settings settings) {
+ settingsPerNode.put(ordinal, settings);
+ return this;
+ }
+
+ public Immutable build() {
+ return new Immutable(settingsPerNode.build());
+ }
+
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/TestCluster.java b/src/test/java/org/elasticsearch/test/TestCluster.java
new file mode 100644
index 0000000..2ca2364
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/TestCluster.java
@@ -0,0 +1,1300 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.hppc.ObjectArrayList;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecyclerModule;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.merge.policy.*;
+import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerModule;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.SerialMergeSchedulerProvider;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndexTemplateMissingException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.internal.InternalNode;
+import org.elasticsearch.repositories.RepositoryMissingException;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.test.cache.recycler.MockPageCacheRecyclerModule;
+import org.elasticsearch.test.engine.MockEngineModule;
+import org.elasticsearch.test.store.MockFSIndexStoreModule;
+import org.elasticsearch.test.transport.AssertingLocalTransportModule;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportModule;
+import org.elasticsearch.transport.TransportService;
+import org.junit.Assert;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
+import static com.google.common.collect.Maps.newTreeMap;
+import static org.apache.lucene.util.LuceneTestCase.rarely;
+import static org.apache.lucene.util.LuceneTestCase.usually;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+
+/**
+ * TestCluster manages a set of JVM private nodes and allows convenient access to them.
+ * The cluster supports randomized configuration such that nodes started in the cluster will
+ * automatically load asserting services tracking resources like file handles or open searchers.
+ * <p>
+ * The Cluster is bound to a test lifecycle where tests must call {@link #beforeTest(java.util.Random, double)} and
+ * {@link #afterTest()} to initialize and reset the cluster in order to be more reproducible. The term "more" relates
+ * to the async nature of Elasticsearch in combination with randomized testing. Once Threads and asynchronous calls
+ * are involved reproducibility is very limited. This class should only be used through {@link ElasticsearchIntegrationTest}.
+ * </p>
+ */
+public final class TestCluster implements Iterable<Client> {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ /**
+ * A boolean value to enable or disable mock modules. This is useful to test the
+ * system without asserting modules that to make sure they don't hide any bugs in
+ * production.
+ *
+ * @see ElasticsearchIntegrationTest
+ */
+ public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules";
+
+ /**
+ * A node level setting that holds a per node random seed that is consistent across node restarts
+ */
+ public static final String SETTING_CLUSTER_NODE_SEED = "test.cluster.node.seed";
+
+ /**
+ * Key used to retrieve the index random seed from the index settings on a running node.
+ * The value of this seed can be used to initialize a random context for a specific index.
+ * It's set once per test via a generic index template.
+ */
+ public static final String SETTING_INDEX_SEED = "index.tests.seed";
+
+ private static final String CLUSTER_NAME_KEY = "cluster.name";
+
+ private static final boolean ENABLE_MOCK_MODULES = systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true);
+
+ static final int DEFAULT_MIN_NUM_NODES = 2;
+ static final int DEFAULT_MAX_NUM_NODES = 6;
+
+ /* sorted map to make traverse order reproducible */
+ private final TreeMap<String, NodeAndClient> nodes = newTreeMap();
+
+ private final Set<File> dataDirToClean = new HashSet<File>();
+
+ private final String clusterName;
+
+ private final AtomicBoolean open = new AtomicBoolean(true);
+
+ private final Settings defaultSettings;
+
+ private Random random;
+
+ private AtomicInteger nextNodeId = new AtomicInteger(0);
+
+ /* Each shared node has a node seed that is used to start up the node and get default settings
+ * this is important if a node is randomly shut down in a test since the next test relies on a
+ * fully shared cluster to be more reproducible */
+ private final long[] sharedNodesSeeds;
+
+ private double transportClientRatio = 0.0;
+
+ private final NodeSettingsSource nodeSettingsSource;
+
+ public TestCluster(long clusterSeed, String clusterName) {
+ this(clusterSeed, DEFAULT_MIN_NUM_NODES, DEFAULT_MAX_NUM_NODES, clusterName, NodeSettingsSource.EMPTY);
+ }
+
+ public TestCluster(long clusterSeed, int minNumNodes, int maxNumNodes, String clusterName) {
+ this(clusterSeed, minNumNodes, maxNumNodes, clusterName, NodeSettingsSource.EMPTY);
+ }
+
+ public TestCluster(long clusterSeed, int minNumNodes, int maxNumNodes, String clusterName, NodeSettingsSource nodeSettingsSource) {
+ this.clusterName = clusterName;
+
+ if (minNumNodes < 0 || maxNumNodes < 0) {
+ throw new IllegalArgumentException("minimum and maximum number of nodes must be >= 0");
+ }
+
+ if (maxNumNodes < minNumNodes) {
+ throw new IllegalArgumentException("maximum number of nodes must be >= minimum number of nodes");
+ }
+
+ Random random = new Random(clusterSeed);
+
+ int numSharedNodes;
+ if (minNumNodes == maxNumNodes) {
+ numSharedNodes = minNumNodes;
+ } else {
+ numSharedNodes = minNumNodes + random.nextInt(maxNumNodes - minNumNodes);
+ }
+
+ assert numSharedNodes >= 0;
+ /*
+ * TODO
+ * - we might want start some master only nodes?
+ * - we could add a flag that returns a client to the master all the time?
+ * - we could add a flag that never returns a client to the master
+ * - along those lines use a dedicated node that is master eligible and let all other nodes be only data nodes
+ */
+ sharedNodesSeeds = new long[numSharedNodes];
+ for (int i = 0; i < sharedNodesSeeds.length; i++) {
+ sharedNodesSeeds[i] = random.nextLong();
+ }
+ logger.info("Setup TestCluster [{}] with seed [{}] using [{}] nodes", clusterName, SeedUtils.formatSeed(clusterSeed), numSharedNodes);
+ this.nodeSettingsSource = nodeSettingsSource;
+ Builder builder = ImmutableSettings.settingsBuilder();
+ // randomize (multi/single) data path, special case for 0, don't set it at all...
+ int numOfDataPaths = random.nextInt(5);
+ if (numOfDataPaths > 0) {
+ StringBuilder dataPath = new StringBuilder();
+ for (int i = 0; i < numOfDataPaths; i++) {
+ dataPath.append("data/d").append(i).append(',');
+ }
+ builder.put("path.data", dataPath.toString());
+ }
+ defaultSettings = builder.build();
+
+ }
+
+ public String getClusterName() {
+ return clusterName;
+ }
+
+ private static boolean isLocalTransportConfigured() {
+ if ("local".equals(System.getProperty("es.node.mode", "network"))) {
+ return true;
+ }
+ return Boolean.parseBoolean(System.getProperty("es.node.local", "false"));
+ }
+
+ private Settings getSettings(int nodeOrdinal, long nodeSeed, Settings others) {
+ Builder builder = ImmutableSettings.settingsBuilder().put(defaultSettings)
+ .put(getRandomNodeSettings(nodeSeed));
+ Settings settings = nodeSettingsSource.settings(nodeOrdinal);
+ if (settings != null) {
+ if (settings.get(CLUSTER_NAME_KEY) != null) {
+ throw new ElasticsearchIllegalStateException("Tests must not set a '" + CLUSTER_NAME_KEY + "' as a node setting set '" + CLUSTER_NAME_KEY + "': [" + settings.get(CLUSTER_NAME_KEY) + "]");
+ }
+ builder.put(settings);
+ }
+ if (others != null) {
+ builder.put(others);
+ }
+ builder.put(CLUSTER_NAME_KEY, clusterName);
+ return builder.build();
+ }
+
+ private static Settings getRandomNodeSettings(long seed) {
+ Random random = new Random(seed);
+ Builder builder = ImmutableSettings.settingsBuilder()
+ /* use RAM directories in 10% of the runs */
+ //.put("index.store.type", random.nextInt(10) == 0 ? MockRamIndexStoreModule.class.getName() : MockFSIndexStoreModule.class.getName())
+ // decrease the routing schedule so new nodes will be added quickly - some random value between 30 and 80 ms
+ .put("cluster.routing.schedule", (30 + random.nextInt(50)) + "ms")
+ // default to non gateway
+ .put("gateway.type", "none")
+ .put(SETTING_CLUSTER_NODE_SEED, seed);
+ if (ENABLE_MOCK_MODULES && usually(random)) {
+ builder.put("index.store.type", MockFSIndexStoreModule.class.getName()); // no RAM dir for now!
+ builder.put(IndexEngineModule.EngineSettings.ENGINE_TYPE, MockEngineModule.class.getName());
+ builder.put(PageCacheRecyclerModule.CACHE_IMPL, MockPageCacheRecyclerModule.class.getName());
+ }
+ if (isLocalTransportConfigured()) {
+ builder.put(TransportModule.TRANSPORT_TYPE_KEY, AssertingLocalTransportModule.class.getName());
+ } else {
+ builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, rarely(random));
+ }
+ builder.put("type", RandomPicks.randomFrom(random, CacheRecycler.Type.values()));
+ if (random.nextBoolean()) {
+ builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, CacheRecycler.Type.values()));
+ }
+ if (random.nextInt(10) == 0) { // 10% of the nodes have a very frequent check interval
+ builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueMillis(10 + random.nextInt(2000)));
+ } else if (random.nextInt(10) != 0) { // 90% of the time - 10% of the time we don't set anything
+ builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueSeconds(10 + random.nextInt(5 * 60)));
+ }
+ if (random.nextBoolean()) { // sometimes set a
+ builder.put(SearchService.DEFAUTL_KEEPALIVE_KEY, TimeValue.timeValueSeconds(100 + random.nextInt(5*60)));
+ }
+ if (random.nextBoolean()) {
+ // change threadpool types to make sure we don't have components that rely on the type of thread pools
+ for (String name : Arrays.asList(ThreadPool.Names.BULK, ThreadPool.Names.FLUSH, ThreadPool.Names.GET,
+ ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.MERGE, ThreadPool.Names.OPTIMIZE,
+ ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT,
+ ThreadPool.Names.SUGGEST, ThreadPool.Names.WARMER)) {
+ if (random.nextBoolean()) {
+ final String type = RandomPicks.randomFrom(random, Arrays.asList("fixed", "cached", "scaling"));
+ builder.put(ThreadPool.THREADPOOL_GROUP + name + ".type", type);
+ }
+ }
+ }
+ return builder.build();
+ }
+
+ public static String clusterName(String prefix, String childVMId, long clusterSeed) {
+ StringBuilder builder = new StringBuilder(prefix);
+ builder.append('-').append(NetworkUtils.getLocalAddress().getHostName());
+ builder.append("-CHILD_VM=[").append(childVMId).append(']');
+ builder.append("-CLUSTER_SEED=[").append(clusterSeed).append(']');
+ // if multiple maven task run on a single host we better have an identifier that doesn't rely on input params
+ builder.append("-HASH=[").append(SeedUtils.formatSeed(System.nanoTime())).append(']');
+ return builder.toString();
+ }
+
+ private void ensureOpen() {
+ if (!open.get()) {
+ throw new RuntimeException("Cluster is already closed");
+ }
+ }
+
+ private synchronized NodeAndClient getOrBuildRandomNode() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient;
+ }
+ NodeAndClient buildNode = buildNode();
+ buildNode.node().start();
+ publishNode(buildNode);
+ return buildNode;
+ }
+
+ private synchronized NodeAndClient getRandomNodeAndClient() {
+ Predicate<NodeAndClient> all = Predicates.alwaysTrue();
+ return getRandomNodeAndClient(all);
+ }
+
+
+ private synchronized NodeAndClient getRandomNodeAndClient(Predicate<NodeAndClient> predicate) {
+ ensureOpen();
+ Collection<NodeAndClient> values = Collections2.filter(nodes.values(), predicate);
+ if (!values.isEmpty()) {
+ int whichOne = random.nextInt(values.size());
+ for (NodeAndClient nodeAndClient : values) {
+ if (whichOne-- == 0) {
+ return nodeAndClient;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Ensures that at least <code>n</code> nodes are present in the cluster.
+ * if more nodes than <code>n</code> are present this method will not
+ * stop any of the running nodes.
+ */
+ public synchronized void ensureAtLeastNumNodes(int n) {
+ int size = nodes.size();
+ for (int i = size; i < n; i++) {
+ logger.info("increasing cluster size from {} to {}", size, n);
+ NodeAndClient buildNode = buildNode();
+ buildNode.node().start();
+ publishNode(buildNode);
+ }
+ }
+
+ /**
+ * Ensures that at most <code>n</code> are up and running.
+ * If less nodes that <code>n</code> are running this method
+ * will not start any additional nodes.
+ */
+ public synchronized void ensureAtMostNumNodes(int n) {
+ if (nodes.size() <= n) {
+ return;
+ }
+ // prevent killing the master if possible
+ final Iterator<NodeAndClient> values = n == 0 ? nodes.values().iterator() : Iterators.filter(nodes.values().iterator(), Predicates.not(new MasterNodePredicate(getMasterName())));
+ final Iterator<NodeAndClient> limit = Iterators.limit(values, nodes.size() - n);
+ logger.info("reducing cluster size from {} to {}", nodes.size() - n, n);
+ Set<NodeAndClient> nodesToRemove = new HashSet<NodeAndClient>();
+ while (limit.hasNext()) {
+ NodeAndClient next = limit.next();
+ nodesToRemove.add(next);
+ next.close();
+ }
+ for (NodeAndClient toRemove : nodesToRemove) {
+ nodes.remove(toRemove.name);
+ }
+ }
+
+ private NodeAndClient buildNode(Settings settings) {
+ int ord = nextNodeId.getAndIncrement();
+ return buildNode(ord, random.nextLong(), settings);
+ }
+
+ private NodeAndClient buildNode() {
+ int ord = nextNodeId.getAndIncrement();
+ return buildNode(ord, random.nextLong(), null);
+ }
+
+ private NodeAndClient buildNode(int nodeId, long seed, Settings settings) {
+ ensureOpen();
+ settings = getSettings(nodeId, seed, settings);
+ String name = buildNodeName(nodeId);
+ assert !nodes.containsKey(name);
+ Settings finalSettings = settingsBuilder()
+ .put(settings)
+ .put("name", name)
+ .put("discovery.id.seed", seed)
+ .build();
+ Node node = nodeBuilder().settings(finalSettings).build();
+ return new NodeAndClient(name, node, new RandomClientFactory());
+ }
+
+ private String buildNodeName(int id) {
+ return "node_" + id;
+ }
+
+ public synchronized Client client() {
+ ensureOpen();
+ /* Randomly return a client to one of the nodes in the cluster */
+ return getOrBuildRandomNode().client(random);
+ }
+
+ /**
+ * Returns a node client to the current master node.
+ * Note: use this with care tests should not rely on a certain nodes client.
+ */
+ public synchronized Client masterClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()));
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient(); // ensure node client master is requested
+ }
+ Assert.fail("No master client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a node client to random node but not the master. This method will fail if no non-master client is available.
+ */
+ public synchronized Client nonMasterClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient(); // ensure node client non-master is requested
+ }
+ Assert.fail("No non-master client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a client to a node started with "node.client: true"
+ */
+ public synchronized Client clientNodeClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new ClientNodePredicate());
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.client(random);
+ }
+ startNodeClient(ImmutableSettings.EMPTY);
+ return getRandomNodeAndClient(new ClientNodePredicate()).client(random);
+ }
+
+ /**
+ * Returns a transport client
+ */
+ public synchronized Client transportClient() {
+ ensureOpen();
+ // randomly return a transport client going to one of the nodes in the cluster
+ return getOrBuildRandomNode().transportClient();
+ }
+
+ /**
+ * Returns a node client to a given node.
+ */
+ public synchronized Client client(String nodeName) {
+ ensureOpen();
+ NodeAndClient nodeAndClient = nodes.get(nodeName);
+ if (nodeAndClient != null) {
+ return nodeAndClient.client(random);
+ }
+ Assert.fail("No node found with name: [" + nodeName + "]");
+ return null; // can't happen
+ }
+
+
+ /**
+ * Returns a "smart" node client to a random node in the cluster
+ */
+ public synchronized Client smartClient() {
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient();
+ }
+ Assert.fail("No smart client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a random node that applies to the given predicate.
+ * The predicate can filter nodes based on the nodes settings.
+ * If all nodes are filtered out this method will return <code>null</code>
+ */
+ public synchronized Client client(final Predicate<Settings> filterPredicate) {
+ ensureOpen();
+ final NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new Predicate<NodeAndClient>() {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return filterPredicate.apply(nodeAndClient.node.settings());
+ }
+ });
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.client(random);
+ }
+ return null;
+ }
+
+ public void close() {
+ ensureOpen();
+ if (this.open.compareAndSet(true, false)) {
+ IOUtils.closeWhileHandlingException(nodes.values());
+ nodes.clear();
+ }
+ }
+
+ private final class NodeAndClient implements Closeable {
+ private InternalNode node;
+ private Client client;
+ private Client nodeClient;
+ private Client transportClient;
+ private final AtomicBoolean closed = new AtomicBoolean(false);
+ private final ClientFactory clientFactory;
+ private final String name;
+
+ NodeAndClient(String name, Node node, ClientFactory factory) {
+ this.node = (InternalNode) node;
+ this.name = name;
+ this.clientFactory = factory;
+ }
+
+ Node node() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ return node;
+ }
+
+ Client client(Random random) {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ if (client != null) {
+ return client;
+ }
+ return client = clientFactory.client(node, clusterName, random);
+ }
+
+ Client nodeClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ if (nodeClient == null) {
+ Client maybeNodeClient = client(random);
+ if (client instanceof NodeClient) {
+ nodeClient = maybeNodeClient;
+ } else {
+ nodeClient = node.client();
+ }
+ }
+ return nodeClient;
+ }
+
+ Client transportClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ if (transportClient == null) {
+ Client maybeTransportClient = client(random);
+ if (maybeTransportClient instanceof TransportClient) {
+ transportClient = maybeTransportClient;
+ } else {
+ transportClient = TransportClientFactory.NO_SNIFF_CLIENT_FACTORY.client(node, clusterName, random);
+ }
+ }
+ return transportClient;
+ }
+
+ void resetClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ if (client != null) {
+ client.close();
+ client = null;
+ }
+ if (nodeClient != null) {
+ nodeClient.close();
+ nodeClient = null;
+ }
+ if (transportClient != null) {
+ transportClient.close();
+ transportClient = null;
+ }
+ }
+
+ void restart(RestartCallback callback) throws Exception {
+ assert callback != null;
+ if (!node.isClosed()) {
+ node.close();
+ }
+ Settings newSettings = callback.onNodeStopped(name);
+ if (newSettings == null) {
+ newSettings = ImmutableSettings.EMPTY;
+ }
+ if (callback.clearData(name)) {
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, node);
+ if (nodeEnv.hasNodeFile()) {
+ FileSystemUtils.deleteRecursively(nodeEnv.nodeDataLocations());
+ }
+ }
+ node = (InternalNode) nodeBuilder().settings(node.settings()).settings(newSettings).node();
+ resetClient();
+ }
+
+
+ @Override
+ public void close() {
+ closed.set(true);
+ if (client != null) {
+ client.close();
+ client = null;
+ }
+ if (nodeClient != null) {
+ nodeClient.close();
+ nodeClient = null;
+ }
+ node.close();
+
+ }
+ }
+
+ static class ClientFactory {
+
+ public Client client(Node node, String clusterName, Random random) {
+ return node.client();
+ }
+ }
+
+ static class TransportClientFactory extends ClientFactory {
+
+ private boolean sniff;
+ public static TransportClientFactory NO_SNIFF_CLIENT_FACTORY = new TransportClientFactory(false);
+ public static TransportClientFactory SNIFF_CLIENT_FACTORY = new TransportClientFactory(true);
+
+ private TransportClientFactory(boolean sniff) {
+ this.sniff = sniff;
+ }
+
+ @Override
+ public Client client(Node node, String clusterName, Random random) {
+ TransportAddress addr = ((InternalNode) node).injector().getInstance(TransportService.class).boundAddress().publishAddress();
+ TransportClient client = new TransportClient(settingsBuilder().put("client.transport.nodes_sampler_interval", "1s")
+ .put("name", "transport_client_" + node.settings().get("name"))
+ .put(CLUSTER_NAME_KEY, clusterName).put("client.transport.sniff", sniff).build());
+ client.addTransportAddress(addr);
+ return client;
+ }
+ }
+
+ class RandomClientFactory extends ClientFactory {
+
+ @Override
+ public Client client(Node node, String clusterName, Random random) {
+ double nextDouble = random.nextDouble();
+ if (nextDouble < transportClientRatio) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Using transport client for node [{}] sniff: [{}]", node.settings().get("name"), false);
+ }
+ /* no sniff client for now - doesn't work will all tests since it might throw NoNodeAvailableException if nodes are shut down.
+ * we first need support of transportClientRatio as annotations or so
+ */
+ return TransportClientFactory.NO_SNIFF_CLIENT_FACTORY.client(node, clusterName, random);
+ } else {
+ return node.client();
+ }
+ }
+ }
+
+ /**
+ * This method should be executed before each test to reset the cluster to it's initial state.
+ */
+ public synchronized void beforeTest(Random random, double transportClientRatio) {
+ reset(random, true, transportClientRatio);
+ }
+
+ private synchronized void reset(Random random, boolean wipeData, double transportClientRatio) {
+ assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0;
+ logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio);
+ this.transportClientRatio = transportClientRatio;
+ this.random = new Random(random.nextLong());
+ resetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */
+ if (wipeData) {
+ wipeDataDirectories();
+ }
+ if (nextNodeId.get() == sharedNodesSeeds.length && nodes.size() == sharedNodesSeeds.length) {
+ logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ return;
+ }
+ logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+
+
+ Set<NodeAndClient> sharedNodes = new HashSet<NodeAndClient>();
+ boolean changed = false;
+ for (int i = 0; i < sharedNodesSeeds.length; i++) {
+ String buildNodeName = buildNodeName(i);
+ NodeAndClient nodeAndClient = nodes.get(buildNodeName);
+ if (nodeAndClient == null) {
+ changed = true;
+ nodeAndClient = buildNode(i, sharedNodesSeeds[i], null);
+ nodeAndClient.node.start();
+ logger.info("Start Shared Node [{}] not shared", nodeAndClient.name);
+ }
+ sharedNodes.add(nodeAndClient);
+ }
+ if (!changed && sharedNodes.size() == nodes.size()) {
+ logger.debug("Cluster is consistent - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
+ }
+ return; // we are consistent - return
+ }
+ for (NodeAndClient nodeAndClient : sharedNodes) {
+ nodes.remove(nodeAndClient.name);
+ }
+
+ // trash the remaining nodes
+ final Collection<NodeAndClient> toShutDown = nodes.values();
+ for (NodeAndClient nodeAndClient : toShutDown) {
+ logger.debug("Close Node [{}] not shared", nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ nodes.clear();
+ for (NodeAndClient nodeAndClient : sharedNodes) {
+ publishNode(nodeAndClient);
+ }
+ nextNodeId.set(sharedNodesSeeds.length);
+ assert size() == sharedNodesSeeds.length;
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
+ }
+ logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ }
+
+ public void wipe() {
+ wipeIndices("_all");
+ wipeTemplates();
+ wipeRepositories();
+ }
+
+ /**
+ * Deletes the given indices from the tests cluster. If no index name is passed to this method
+ * all indices are removed.
+ */
+ public void wipeIndices(String... indices) {
+ assert indices != null && indices.length > 0;
+ if (size() > 0) {
+ try {
+ assertAcked(client().admin().indices().prepareDelete(indices));
+ } catch (IndexMissingException e) {
+ // ignore
+ } catch (ElasticsearchIllegalArgumentException e) {
+ // Happens if `action.destructive_requires_name` is set to true
+ // which is the case in the CloseIndexDisableCloseAllTests
+ if ("_all".equals(indices[0])) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ ObjectArrayList<String> concreteIndices = new ObjectArrayList<String>();
+ for (IndexMetaData indexMetaData : clusterStateResponse.getState().metaData()) {
+ concreteIndices.add(indexMetaData.getIndex());
+ }
+ if (!concreteIndices.isEmpty()) {
+ assertAcked(client().admin().indices().prepareDelete(concreteIndices.toArray(String.class)));
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes index templates, support wildcard notation.
+ * If no template name is passed to this method all templates are removed.
+ */
+ public void wipeTemplates(String... templates) {
+ if (size() > 0) {
+ // if nothing is provided, delete all
+ if (templates.length == 0) {
+ templates = new String[]{"*"};
+ }
+ for (String template : templates) {
+ try {
+ client().admin().indices().prepareDeleteTemplate(template).execute().actionGet();
+ } catch (IndexTemplateMissingException e) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes repositories, supports wildcard notation.
+ */
+ public void wipeRepositories(String... repositories) {
+ if (size() > 0) {
+ // if nothing is provided, delete all
+ if (repositories.length == 0) {
+ repositories = new String[]{"*"};
+ }
+ for (String repository : repositories) {
+ try {
+ client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet();
+ } catch (RepositoryMissingException ex) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Creates a randomized index template. This template is used to pass in randomized settings on a
+ * per index basis.
+ */
+ public void randomIndexTemplate() {
+ // TODO move settings for random directory etc here into the index based randomized settings.
+ if (size() > 0) {
+ client().admin().indices().preparePutTemplate("random_index_template")
+ .setTemplate("*")
+ .setOrder(0)
+ .setSettings(setRandomNormsLoading(setRandomMerge(random, ImmutableSettings.builder())
+ .put(SETTING_INDEX_SEED, random.nextLong())))
+ .execute().actionGet();
+ }
+ }
+
+
+ private ImmutableSettings.Builder setRandomNormsLoading(ImmutableSettings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(SearchService.NORMS_LOADING_KEY, RandomPicks.randomFrom(random, Arrays.asList(FieldMapper.Loading.EAGER, FieldMapper.Loading.LAZY)));
+ }
+ return builder;
+ }
+
+ private static ImmutableSettings.Builder setRandomMerge(Random random, ImmutableSettings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT,
+ random.nextBoolean() ? random.nextDouble() : random.nextBoolean());
+ }
+ Class<? extends MergePolicyProvider<?>> mergePolicy = TieredMergePolicyProvider.class;
+ switch (random.nextInt(5)) {
+ case 4:
+ mergePolicy = LogByteSizeMergePolicyProvider.class;
+ break;
+ case 3:
+ mergePolicy = LogDocMergePolicyProvider.class;
+ break;
+ case 0:
+ mergePolicy = null;
+ }
+ if (mergePolicy != null) {
+ builder.put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, mergePolicy.getName());
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(MergeSchedulerProvider.FORCE_ASYNC_MERGE, random.nextBoolean());
+ }
+ switch (random.nextInt(5)) {
+ case 4:
+ builder.put(MergeSchedulerModule.MERGE_SCHEDULER_TYPE_KEY, SerialMergeSchedulerProvider.class.getName());
+ break;
+ case 3:
+ builder.put(MergeSchedulerModule.MERGE_SCHEDULER_TYPE_KEY, ConcurrentMergeSchedulerProvider.class.getName());
+ break;
+ }
+
+ return builder;
+ }
+
+ /**
+ * This method should be executed during tearDown
+ */
+ public synchronized void afterTest() {
+ wipeDataDirectories();
+ resetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */
+ }
+
+ public void assertAfterTest() throws IOException {
+ assertAllSearchersClosed();
+ assertAllFilesClosed();
+ }
+
+ private void resetClients() {
+ final Collection<NodeAndClient> nodesAndClients = nodes.values();
+ for (NodeAndClient nodeAndClient : nodesAndClients) {
+ nodeAndClient.resetClient();
+ }
+ }
+
+ private void wipeDataDirectories() {
+ if (!dataDirToClean.isEmpty()) {
+ logger.info("Wipe data directory for all nodes locations: {}", this.dataDirToClean);
+ try {
+ FileSystemUtils.deleteRecursively(dataDirToClean.toArray(new File[dataDirToClean.size()]));
+ } finally {
+ this.dataDirToClean.clear();
+ }
+ }
+ }
+
+ /**
+ * Returns a reference to a random nodes {@link ClusterService}
+ */
+ public synchronized ClusterService clusterService() {
+ return getInstance(ClusterService.class);
+ }
+
+ /**
+ * Returns an Iterable to all instances for the given class &gt;T&lt; across all nodes in the cluster.
+ */
+ public synchronized <T> Iterable<T> getInstances(Class<T> clazz) {
+ List<T> instances = new ArrayList<T>(nodes.size());
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ instances.add(getInstanceFromNode(clazz, nodeAndClient.node));
+ }
+ return instances;
+ }
+
+ /**
+ * Returns a reference to the given nodes instances of the given class &gt;T&lt;
+ */
+ public synchronized <T> T getInstance(Class<T> clazz, final String node) {
+ final Predicate<TestCluster.NodeAndClient> predicate;
+ if (node != null) {
+ predicate = new Predicate<TestCluster.NodeAndClient>() {
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return node.equals(nodeAndClient.name);
+ }
+ };
+ } else {
+ predicate = Predicates.alwaysTrue();
+ }
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(predicate);
+ assert randomNodeAndClient != null;
+ return getInstanceFromNode(clazz, randomNodeAndClient.node);
+ }
+
+ /**
+ * Returns a reference to a random nodes instances of the given class &gt;T&lt;
+ */
+ public synchronized <T> T getInstance(Class<T> clazz) {
+ return getInstance(clazz, null);
+ }
+
+ private synchronized <T> T getInstanceFromNode(Class<T> clazz, InternalNode node) {
+ return node.injector().getInstance(clazz);
+ }
+
+ /**
+ * Returns the number of nodes in the cluster.
+ */
+ public synchronized int size() {
+ return this.nodes.size();
+ }
+
+ /**
+ * Stops a random node in the cluster.
+ */
+ public synchronized void stopRandomNode() {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient();
+ if (nodeAndClient != null) {
+ logger.info("Closing random node [{}] ", nodeAndClient.name);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+ /**
+ * Stops a random node in the cluster that applies to the given filter or non if the non of the nodes applies to the
+ * filter.
+ */
+ public synchronized void stopRandomNode(final Predicate<Settings> filter) {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(new Predicate<TestCluster.NodeAndClient>() {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return filter.apply(nodeAndClient.node.settings());
+ }
+ });
+ if (nodeAndClient != null) {
+ logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+
+ /**
+ * Stops the current master node forcefully
+ */
+ public synchronized void stopCurrentMasterNode() {
+ ensureOpen();
+ assert size() > 0;
+ String masterNodeName = getMasterName();
+ assert nodes.containsKey(masterNodeName);
+ logger.info("Closing master node [{}] ", masterNodeName);
+ NodeAndClient remove = nodes.remove(masterNodeName);
+ remove.close();
+ }
+
+ /**
+ * Stops the any of the current nodes but not the master node.
+ */
+ public void stopRandomNonMasterNode() {
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
+ if (nodeAndClient != null) {
+ logger.info("Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName());
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+ /**
+ * Restarts a random node in the cluster
+ */
+ public void restartRandomNode() throws Exception {
+ restartRandomNode(EMPTY_CALLBACK);
+ }
+
+
+ /**
+ * Restarts a random node in the cluster and calls the callback during restart.
+ */
+ public void restartRandomNode(RestartCallback callback) throws Exception {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient();
+ if (nodeAndClient != null) {
+ logger.info("Restarting random node [{}] ", nodeAndClient.name);
+ nodeAndClient.restart(callback);
+ }
+ }
+
+ private void restartAllNodes(boolean rollingRestart, RestartCallback callback) throws Exception {
+ ensureOpen();
+ List<NodeAndClient> toRemove = new ArrayList<TestCluster.NodeAndClient>();
+ try {
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ if (!callback.doRestart(nodeAndClient.name)) {
+ logger.info("Closing node [{}] during restart", nodeAndClient.name);
+ toRemove.add(nodeAndClient);
+ nodeAndClient.close();
+ }
+ }
+ } finally {
+ for (NodeAndClient nodeAndClient : toRemove) {
+ nodes.remove(nodeAndClient.name);
+ }
+ }
+ logger.info("Restarting remaining nodes rollingRestart [{}]", rollingRestart);
+ if (rollingRestart) {
+ int numNodesRestarted = 0;
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
+ logger.info("Restarting node [{}] ", nodeAndClient.name);
+ nodeAndClient.restart(callback);
+ }
+ } else {
+ int numNodesRestarted = 0;
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
+ logger.info("Stopping node [{}] ", nodeAndClient.name);
+ nodeAndClient.node.close();
+ }
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ logger.info("Starting node [{}] ", nodeAndClient.name);
+ nodeAndClient.restart(callback);
+ }
+ }
+ }
+
+
+ private static final RestartCallback EMPTY_CALLBACK = new RestartCallback() {
+ public Settings onNodeStopped(String node) {
+ return null;
+ }
+ };
+
+ /**
+ * Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
+ */
+ public void fullRestart() throws Exception {
+ fullRestart(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
+ */
+ public void rollingRestart() throws Exception {
+ rollingRestart(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
+ */
+ public void rollingRestart(RestartCallback function) throws Exception {
+ restartAllNodes(true, function);
+ }
+
+ /**
+ * Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
+ */
+ public void fullRestart(RestartCallback function) throws Exception {
+ restartAllNodes(false, function);
+ }
+
+
+ private String getMasterName() {
+ try {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ return state.nodes().masterNode().name();
+ } catch (Throwable e) {
+ logger.warn("Can't fetch cluster state", e);
+ throw new RuntimeException("Can't get master node " + e.getMessage(), e);
+ }
+ }
+
+ synchronized Set<String> allButN(int numNodes) {
+ return nRandomNodes(size() - numNodes);
+ }
+
+ private synchronized Set<String> nRandomNodes(int numNodes) {
+ assert size() >= numNodes;
+ return Sets.newHashSet(Iterators.limit(this.nodes.keySet().iterator(), numNodes));
+ }
+
+ public synchronized void startNodeClient(Settings settings) {
+ ensureOpen(); // currently unused
+ startNode(settingsBuilder().put(settings).put("node.client", true));
+ }
+
+ /**
+ * Returns a set of nodes that have at least one shard of the given index.
+ */
+ public synchronized Set<String> nodesInclude(String index) {
+ if (clusterService().state().routingTable().hasIndex(index)) {
+ List<ShardRouting> allShards = clusterService().state().routingTable().allShards(index);
+ DiscoveryNodes discoveryNodes = clusterService().state().getNodes();
+ Set<String> nodes = new HashSet<String>();
+ for (ShardRouting shardRouting : allShards) {
+ if (shardRouting.assignedToNode()) {
+ DiscoveryNode discoveryNode = discoveryNodes.get(shardRouting.currentNodeId());
+ nodes.add(discoveryNode.getName());
+ }
+ }
+ return nodes;
+ }
+ return Collections.emptySet();
+ }
+
+ /**
+ * Starts a node with default settings and returns it's name.
+ */
+ public String startNode() {
+ return startNode(ImmutableSettings.EMPTY);
+ }
+
+ /**
+ * Starts a node with the given settings builder and returns it's name.
+ */
+ public String startNode(Settings.Builder settings) {
+ return startNode(settings.build());
+ }
+
+ /**
+ * Starts a node with the given settings and returns it's name.
+ */
+ public String startNode(Settings settings) {
+ NodeAndClient buildNode = buildNode(settings);
+ buildNode.node().start();
+ publishNode(buildNode);
+ return buildNode.name;
+ }
+
+ private void publishNode(NodeAndClient nodeAndClient) {
+ assert !nodeAndClient.node().isClosed();
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, nodeAndClient.node);
+ if (nodeEnv.hasNodeFile()) {
+ dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataLocations()));
+ }
+ nodes.put(nodeAndClient.name, nodeAndClient);
+
+ }
+
+ public void closeNonSharedNodes(boolean wipeData) {
+ reset(random, wipeData, transportClientRatio);
+ }
+
+
+ private static final class MasterNodePredicate implements Predicate<NodeAndClient> {
+ private final String masterNodeName;
+
+ public MasterNodePredicate(String masterNodeName) {
+ this.masterNodeName = masterNodeName;
+ }
+
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return masterNodeName.equals(nodeAndClient.name);
+ }
+ }
+
+ private static final class ClientNodePredicate implements Predicate<NodeAndClient> {
+
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return nodeAndClient.node.settings().getAsBoolean("node.client", false);
+ }
+ }
+
+ @Override
+ public synchronized Iterator<Client> iterator() {
+ ensureOpen();
+ final Iterator<NodeAndClient> iterator = nodes.values().iterator();
+ return new Iterator<Client>() {
+
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public Client next() {
+ return iterator.next().client(random);
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException("");
+ }
+
+ };
+ }
+
+ /**
+ * Returns a predicate that only accepts settings of nodes with one of the given names.
+ */
+ public static Predicate<Settings> nameFilter(String... nodeName) {
+ return new NodeNamePredicate(new HashSet<String>(Arrays.asList(nodeName)));
+ }
+
+ private static final class NodeNamePredicate implements Predicate<Settings> {
+ private final HashSet<String> nodeNames;
+
+
+ public NodeNamePredicate(HashSet<String> nodeNames) {
+ this.nodeNames = nodeNames;
+ }
+
+ @Override
+ public boolean apply(Settings settings) {
+ return nodeNames.contains(settings.get("name"));
+
+ }
+ }
+
+
+ /**
+ * An abstract class that is called during {@link #rollingRestart(org.elasticsearch.test.TestCluster.RestartCallback)}
+ * and / or {@link #fullRestart(org.elasticsearch.test.TestCluster.RestartCallback)} to execute actions at certain
+ * stages of the restart.
+ */
+ public static abstract class RestartCallback {
+
+ /**
+ * Executed once the give node name has been stopped.
+ */
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return ImmutableSettings.EMPTY;
+ }
+
+ /**
+ * Executed for each node before the <tt>n+1</tt> node is restarted. The given client is
+ * an active client to the node that will be restarted next.
+ */
+ public void doAfterNodes(int n, Client client) throws Exception {
+ }
+
+ /**
+ * If this returns <code>true</code> all data for the node with the given node name will be cleared including
+ * gateways and all index data. Returns <code>false</code> by default.
+ */
+ public boolean clearData(String nodeName) {
+ return false;
+ }
+
+
+ /**
+ * If this returns <code>false</code> the node with the given node name will not be restarted. It will be
+ * closed and removed from the cluster. Returns <code>true</code> by default.
+ */
+ public boolean doRestart(String nodeName) {
+ return true;
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java b/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java
new file mode 100644
index 0000000..3f5b2a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.cache.recycler;
+
+import org.elasticsearch.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.inject.AbstractModule;
+
+public class MockPageCacheRecyclerModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(PageCacheRecycler.class).to(MockPageCacheRecycler.class).asEagerSingleton();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/client/RandomizingClient.java b/src/test/java/org/elasticsearch/test/client/RandomizingClient.java
new file mode 100644
index 0000000..cbdf36e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/client/RandomizingClient.java
@@ -0,0 +1,431 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountRequest;
+import org.elasticsearch.action.count.CountRequestBuilder;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteRequestBuilder;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.explain.ExplainRequest;
+import org.elasticsearch.action.explain.ExplainRequestBuilder;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.get.*;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.mlt.MoreLikeThisRequest;
+import org.elasticsearch.action.mlt.MoreLikeThisRequestBuilder;
+import org.elasticsearch.action.percolate.*;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.action.suggest.SuggestRequest;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.action.termvector.*;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Arrays;
+import java.util.Random;
+
+/** A {@link Client} that randomizes request parameters. */
+public class RandomizingClient implements InternalClient {
+
+ private final SearchType defaultSearchType;
+ private final InternalClient delegate;
+
+ public RandomizingClient(InternalClient client, Random random) {
+ this.delegate = client;
+ // we don't use the QUERY_AND_FETCH types that break quite a lot of tests
+ // given that they return `size*num_shards` hits instead of `size`
+ defaultSearchType = RandomPicks.randomFrom(random, Arrays.asList(
+ SearchType.DFS_QUERY_THEN_FETCH,
+ SearchType.QUERY_THEN_FETCH));
+ }
+
+ @Override
+ public void close() {
+ delegate.close();
+ }
+
+ @Override
+ public AdminClient admin() {
+ return delegate.admin();
+ }
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
+ Action<Request, Response, RequestBuilder> action, Request request) {
+ return delegate.execute(action, request);
+ }
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
+ Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
+ delegate.execute(action, request, listener);
+ }
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
+ Action<Request, Response, RequestBuilder> action) {
+ return delegate.prepareExecute(action);
+ }
+
+ @Override
+ public ActionFuture<IndexResponse> index(IndexRequest request) {
+ return delegate.index(request);
+ }
+
+ @Override
+ public void index(IndexRequest request, ActionListener<IndexResponse> listener) {
+ delegate.index(request, listener);
+ }
+
+ @Override
+ public IndexRequestBuilder prepareIndex() {
+ return delegate.prepareIndex();
+ }
+
+ @Override
+ public ActionFuture<UpdateResponse> update(UpdateRequest request) {
+ return delegate.update(request);
+ }
+
+ @Override
+ public void update(UpdateRequest request, ActionListener<UpdateResponse> listener) {
+ delegate.update(request, listener);
+ }
+
+ @Override
+ public UpdateRequestBuilder prepareUpdate() {
+ return delegate.prepareUpdate();
+ }
+
+ @Override
+ public UpdateRequestBuilder prepareUpdate(String index, String type, String id) {
+ return delegate.prepareUpdate(index, type, id);
+ }
+
+ @Override
+ public IndexRequestBuilder prepareIndex(String index, String type) {
+ return delegate.prepareIndex(index, type);
+ }
+
+ @Override
+ public IndexRequestBuilder prepareIndex(String index, String type, String id) {
+ return delegate.prepareIndex(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<DeleteResponse> delete(DeleteRequest request) {
+ return delegate.delete(request);
+ }
+
+ @Override
+ public void delete(DeleteRequest request, ActionListener<DeleteResponse> listener) {
+ delegate.delete(request, listener);
+ }
+
+ @Override
+ public DeleteRequestBuilder prepareDelete() {
+ return delegate.prepareDelete();
+ }
+
+ @Override
+ public DeleteRequestBuilder prepareDelete(String index, String type, String id) {
+ return delegate.prepareDelete(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<BulkResponse> bulk(BulkRequest request) {
+ return delegate.bulk(request);
+ }
+
+ @Override
+ public void bulk(BulkRequest request, ActionListener<BulkResponse> listener) {
+ delegate.bulk(request, listener);
+ }
+
+ @Override
+ public BulkRequestBuilder prepareBulk() {
+ return delegate.prepareBulk();
+ }
+
+ @Override
+ public ActionFuture<DeleteByQueryResponse> deleteByQuery(DeleteByQueryRequest request) {
+ return delegate.deleteByQuery(request);
+ }
+
+ @Override
+ public void deleteByQuery(DeleteByQueryRequest request, ActionListener<DeleteByQueryResponse> listener) {
+ delegate.deleteByQuery(request, listener);
+ }
+
+ @Override
+ public DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices) {
+ return delegate.prepareDeleteByQuery(indices);
+ }
+
+ @Override
+ public ActionFuture<GetResponse> get(GetRequest request) {
+ return delegate.get(request);
+ }
+
+ @Override
+ public void get(GetRequest request, ActionListener<GetResponse> listener) {
+ delegate.get(request, listener);
+ }
+
+ @Override
+ public GetRequestBuilder prepareGet() {
+ return delegate.prepareGet();
+ }
+
+ @Override
+ public GetRequestBuilder prepareGet(String index, String type, String id) {
+ return delegate.prepareGet(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<MultiGetResponse> multiGet(MultiGetRequest request) {
+ return delegate.multiGet(request);
+ }
+
+ @Override
+ public void multiGet(MultiGetRequest request, ActionListener<MultiGetResponse> listener) {
+ delegate.multiGet(request, listener);
+ }
+
+ @Override
+ public MultiGetRequestBuilder prepareMultiGet() {
+ return delegate.prepareMultiGet();
+ }
+
+ @Override
+ public ActionFuture<CountResponse> count(CountRequest request) {
+ return delegate.count(request);
+ }
+
+ @Override
+ public void count(CountRequest request, ActionListener<CountResponse> listener) {
+ delegate.count(request, listener);
+ }
+
+ @Override
+ public CountRequestBuilder prepareCount(String... indices) {
+ return delegate.prepareCount(indices);
+ }
+
+ @Override
+ public ActionFuture<SuggestResponse> suggest(SuggestRequest request) {
+ return delegate.suggest(request);
+ }
+
+ @Override
+ public void suggest(SuggestRequest request, ActionListener<SuggestResponse> listener) {
+ delegate.suggest(request, listener);
+ }
+
+ @Override
+ public SuggestRequestBuilder prepareSuggest(String... indices) {
+ return delegate.prepareSuggest(indices);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> search(SearchRequest request) {
+ return delegate.search(request);
+ }
+
+ @Override
+ public void search(SearchRequest request, ActionListener<SearchResponse> listener) {
+ delegate.search(request, listener);
+ }
+
+ @Override
+ public SearchRequestBuilder prepareSearch(String... indices) {
+ return delegate.prepareSearch(indices).setSearchType(defaultSearchType);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> searchScroll(SearchScrollRequest request) {
+ return delegate.searchScroll(request);
+ }
+
+ @Override
+ public void searchScroll(SearchScrollRequest request, ActionListener<SearchResponse> listener) {
+ delegate.searchScroll(request, listener);
+ }
+
+ @Override
+ public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) {
+ return delegate.prepareSearchScroll(scrollId);
+ }
+
+ @Override
+ public ActionFuture<MultiSearchResponse> multiSearch(MultiSearchRequest request) {
+ return delegate.multiSearch(request);
+ }
+
+ @Override
+ public void multiSearch(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
+ delegate.multiSearch(request, listener);
+ }
+
+ @Override
+ public MultiSearchRequestBuilder prepareMultiSearch() {
+ return delegate.prepareMultiSearch();
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> moreLikeThis(MoreLikeThisRequest request) {
+ return delegate.moreLikeThis(request);
+ }
+
+ @Override
+ public void moreLikeThis(MoreLikeThisRequest request, ActionListener<SearchResponse> listener) {
+ delegate.moreLikeThis(request, listener);
+ }
+
+ @Override
+ public MoreLikeThisRequestBuilder prepareMoreLikeThis(String index, String type, String id) {
+ return delegate.prepareMoreLikeThis(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<TermVectorResponse> termVector(TermVectorRequest request) {
+ return delegate.termVector(request);
+ }
+
+ @Override
+ public void termVector(TermVectorRequest request, ActionListener<TermVectorResponse> listener) {
+ delegate.termVector(request, listener);
+ }
+
+ @Override
+ public TermVectorRequestBuilder prepareTermVector(String index, String type, String id) {
+ return delegate.prepareTermVector(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<MultiTermVectorsResponse> multiTermVectors(MultiTermVectorsRequest request) {
+ return delegate.multiTermVectors(request);
+ }
+
+ @Override
+ public void multiTermVectors(MultiTermVectorsRequest request, ActionListener<MultiTermVectorsResponse> listener) {
+ delegate.multiTermVectors(request, listener);
+ }
+
+ @Override
+ public MultiTermVectorsRequestBuilder prepareMultiTermVectors() {
+ return delegate.prepareMultiTermVectors();
+ }
+
+ @Override
+ public ActionFuture<PercolateResponse> percolate(PercolateRequest request) {
+ return delegate.percolate(request);
+ }
+
+ @Override
+ public void percolate(PercolateRequest request, ActionListener<PercolateResponse> listener) {
+ delegate.percolate(request, listener);
+ }
+
+ @Override
+ public PercolateRequestBuilder preparePercolate() {
+ return delegate.preparePercolate();
+ }
+
+ @Override
+ public ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request) {
+ return delegate.multiPercolate(request);
+ }
+
+ @Override
+ public void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener) {
+ delegate.multiPercolate(request, listener);
+ }
+
+ @Override
+ public MultiPercolateRequestBuilder prepareMultiPercolate() {
+ return delegate.prepareMultiPercolate();
+ }
+
+ @Override
+ public ExplainRequestBuilder prepareExplain(String index, String type, String id) {
+ return delegate.prepareExplain(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<ExplainResponse> explain(ExplainRequest request) {
+ return delegate.explain(request);
+ }
+
+ @Override
+ public void explain(ExplainRequest request, ActionListener<ExplainResponse> listener) {
+ delegate.explain(request, listener);
+ }
+
+ @Override
+ public ClearScrollRequestBuilder prepareClearScroll() {
+ return delegate.prepareClearScroll();
+ }
+
+ @Override
+ public ActionFuture<ClearScrollResponse> clearScroll(ClearScrollRequest request) {
+ return delegate.clearScroll(request);
+ }
+
+ @Override
+ public void clearScroll(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener) {
+ delegate.clearScroll(request, listener);
+ }
+
+ @Override
+ public ThreadPool threadPool() {
+ return delegate.threadPool();
+ }
+
+ @Override
+ public Settings settings() {
+ return delegate.settings();
+ }
+
+ @Override
+ public String toString() {
+ return "randomized(" + super.toString() + ")";
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/engine/MockEngineModule.java b/src/test/java/org/elasticsearch/test/engine/MockEngineModule.java
new file mode 100644
index 0000000..d8f4c4a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/engine/MockEngineModule.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.engine.Engine;
+
+public class MockEngineModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(Engine.class).to(MockInternalEngine.class).asEagerSingleton();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java
new file mode 100644
index 0000000..f3a84e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.AssertingIndexSearcher;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.XSearcherManager;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.engine.internal.InternalEngine;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.merge.policy.MergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.indices.warmer.IndicesWarmer;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.lang.reflect.Constructor;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+public final class MockInternalEngine extends InternalEngine implements Engine {
+ public static final ConcurrentMap<AssertingSearcher, RuntimeException> INFLIGHT_ENGINE_SEARCHERS = new ConcurrentHashMap<AssertingSearcher, RuntimeException>();
+ public static final String WRAP_READER_RATIO = "index.engine.mock.random.wrap_reader_ratio";
+ public static final String READER_WRAPPER_TYPE = "index.engine.mock.random.wrapper";
+
+ private final Random random;
+ private final boolean wrapReader;
+ private final Class<? extends FilterDirectoryReader> wrapper;
+
+ @Inject
+ public MockInternalEngine(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool,
+ IndexSettingsService indexSettingsService, ShardIndexingService indexingService, @Nullable IndicesWarmer warmer, Store store,
+ SnapshotDeletionPolicy deletionPolicy, Translog translog, MergePolicyProvider mergePolicyProvider,
+ MergeSchedulerProvider mergeScheduler, AnalysisService analysisService, SimilarityService similarityService,
+ CodecService codecService) throws EngineException {
+ super(shardId, indexSettings, threadPool, indexSettingsService, indexingService, warmer, store,
+ deletionPolicy, translog, mergePolicyProvider, mergeScheduler, analysisService, similarityService, codecService);
+ final long seed = indexSettings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ random = new Random(seed);
+ final double ratio = indexSettings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow
+ wrapper = indexSettings.getAsClass(READER_WRAPPER_TYPE, AssertingDirectoryReader.class);
+ wrapReader = random.nextDouble() < ratio;
+ if (logger.isTraceEnabled()) {
+ logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader);
+ }
+ }
+
+
+ public void close() throws ElasticsearchException {
+ try {
+ super.close();
+ } finally {
+ if (logger.isTraceEnabled()) {
+ // log debug if we have pending searchers
+ for (Entry<MockInternalEngine.AssertingSearcher, RuntimeException> entry : MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.entrySet()) {
+ logger.trace("Unreleased Searchers instance for shard [{}]", entry.getValue(), entry.getKey().shardId);
+ }
+ }
+ }
+ }
+
+ @Override
+ protected Searcher newSearcher(String source, IndexSearcher searcher, XSearcherManager manager) throws EngineException {
+
+ IndexReader reader = searcher.getIndexReader();
+ IndexReader wrappedReader = reader;
+ if (reader instanceof DirectoryReader && wrapReader) {
+ wrappedReader = wrapReader((DirectoryReader) reader);
+ }
+ // this executes basic query checks and asserts that weights are normalized only once etc.
+ final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(random, wrappedReader);
+ assertingIndexSearcher.setSimilarity(searcher.getSimilarity());
+ // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will
+ // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager
+ // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here
+ return new AssertingSearcher(assertingIndexSearcher, super.newSearcher(source, searcher, manager), shardId);
+ }
+
+ private DirectoryReader wrapReader(DirectoryReader reader) {
+ try {
+ Constructor<?>[] constructors = wrapper.getConstructors();
+ Constructor<?> nonRandom = null;
+ for (Constructor<?> constructor : constructors) {
+ Class<?>[] parameterTypes = constructor.getParameterTypes();
+ if (parameterTypes.length > 0 && parameterTypes[0] == DirectoryReader.class) {
+ if (parameterTypes.length == 1) {
+ nonRandom = constructor;
+ } else if (parameterTypes.length == 2 && parameterTypes[1] == Settings.class) {
+
+ return (DirectoryReader) constructor.newInstance(reader, indexSettings);
+ }
+ }
+ }
+ if (nonRandom != null) {
+ return (DirectoryReader) nonRandom.newInstance(reader);
+ }
+ } catch (Exception e) {
+ throw new ElasticsearchException("Can not wrap reader", e);
+ }
+ return reader;
+ }
+
+ public final class AssertingSearcher implements Searcher {
+ private final Searcher wrappedSearcher;
+ private final ShardId shardId;
+ private final IndexSearcher indexSearcher;
+ private RuntimeException firstReleaseStack;
+ private final Object lock = new Object();
+ private final int initialRefCount;
+
+ public AssertingSearcher(IndexSearcher indexSearcher, Searcher wrappedSearcher, ShardId shardId) {
+ // we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher
+ // with a wrapped reader.
+ this.wrappedSearcher = wrappedSearcher;
+ this.shardId = shardId;
+ initialRefCount = wrappedSearcher.reader().getRefCount();
+ this.indexSearcher = indexSearcher;
+ assert initialRefCount > 0 : "IndexReader#getRefCount() was [" + initialRefCount + "] expected a value > [0] - reader is already closed";
+ INFLIGHT_ENGINE_SEARCHERS.put(this, new RuntimeException("Unreleased Searcher, source [" + wrappedSearcher.source() + "]"));
+ }
+
+ @Override
+ public String source() {
+ return wrappedSearcher.source();
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ RuntimeException remove = INFLIGHT_ENGINE_SEARCHERS.remove(this);
+ synchronized (lock) {
+ // make sure we only get this once and store the stack of the first caller!
+ if (remove == null) {
+ assert firstReleaseStack != null;
+ AssertionError error = new AssertionError("Released Searcher more than once, source [" + wrappedSearcher.source() + "]");
+ error.initCause(firstReleaseStack);
+ throw error;
+ } else {
+ assert firstReleaseStack == null;
+ firstReleaseStack = new RuntimeException("Searcher Released first here, source [" + wrappedSearcher.source() + "]");
+ }
+ }
+ final int refCount = wrappedSearcher.reader().getRefCount();
+ // this assert seems to be paranoid but given LUCENE-5362 we better add some assertions here to make sure we catch any potential
+ // problems.
+ assert refCount > 0 : "IndexReader#getRefCount() was [" + refCount + "] expected a value > [0] - reader is already closed. Initial refCount was: [" + initialRefCount + "]";
+ try {
+ return wrappedSearcher.release();
+ } catch (RuntimeException ex) {
+ logger.debug("Failed to release searcher", ex);
+ throw ex;
+ }
+ }
+
+ @Override
+ public IndexReader reader() {
+ return indexSearcher.getIndexReader();
+ }
+
+ @Override
+ public IndexSearcher searcher() {
+ return indexSearcher;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+ }
+
+ public static abstract class DirectoryReaderWrapper extends FilterDirectoryReader {
+ protected final SubReaderWrapper subReaderWrapper;
+
+ public DirectoryReaderWrapper(DirectoryReader in, SubReaderWrapper subReaderWrapper) {
+ super(in, subReaderWrapper);
+ this.subReaderWrapper = subReaderWrapper;
+ }
+
+ @Override
+ public Object getCoreCacheKey() {
+ return in.getCoreCacheKey();
+ }
+
+ @Override
+ public Object getCombinedCoreAndDeletesKey() {
+ return in.getCombinedCoreAndDeletesKey();
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/engine/ThrowingAtomicReaderWrapper.java b/src/test/java/org/elasticsearch/test/engine/ThrowingAtomicReaderWrapper.java
new file mode 100644
index 0000000..cedb9de
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/engine/ThrowingAtomicReaderWrapper.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
+
+import java.io.IOException;
+
+/**
+ * An FilterAtomicReader that allows to throw exceptions if certain methods
+ * are called on is. This allows to test parts of the system under certain
+ * error conditions that would otherwise not be possible.
+ */
+public class ThrowingAtomicReaderWrapper extends FilterAtomicReader {
+
+ private final Thrower thrower;
+
+ /**
+ * Flags passed to {@link Thrower#maybeThrow(org.elasticsearch.test.engine.ThrowingAtomicReaderWrapper.Flags)}
+ * when the corresponding method is called.
+ */
+ public enum Flags {
+ TermVectors,
+ Terms,
+ TermsEnum,
+ Intersect,
+ DocsEnum,
+ DocsAndPositionsEnum,
+ Fields,
+ Norms, NumericDocValues, BinaryDocValues, SortedDocValues, SortedSetDocValues;
+ }
+
+ /**
+ * A callback interface that allows to throw certain exceptions for
+ * methods called on the IndexReader that is wrapped by {@link ThrowingAtomicReaderWrapper}
+ */
+ public static interface Thrower {
+ /**
+ * Maybe throws an exception ;)
+ */
+ public void maybeThrow(Flags flag) throws IOException;
+
+ /**
+ * If this method returns true the {@link Terms} instance for the given field
+ * is wrapped with Thrower support otherwise no exception will be thrown for
+ * the current {@link Terms} instance or any other instance obtained from it.
+ */
+ public boolean wrapTerms(String field);
+ }
+
+ public ThrowingAtomicReaderWrapper(AtomicReader in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+
+ @Override
+ public Fields fields() throws IOException {
+ Fields fields = super.fields();
+ thrower.maybeThrow(Flags.Fields);
+ return fields == null ? null : new ThrowingFields(fields, thrower);
+ }
+
+ @Override
+ public Fields getTermVectors(int docID) throws IOException {
+ Fields fields = super.getTermVectors(docID);
+ thrower.maybeThrow(Flags.TermVectors);
+ return fields == null ? null : new ThrowingFields(fields, thrower);
+ }
+
+ /**
+ * Wraps a Fields but with additional asserts
+ */
+ public static class ThrowingFields extends FilterFields {
+ private final Thrower thrower;
+
+ public ThrowingFields(Fields in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ Terms terms = super.terms(field);
+ if (thrower.wrapTerms(field)) {
+ thrower.maybeThrow(Flags.Terms);
+ return terms == null ? null : new ThrowingTerms(terms, thrower);
+ }
+ return terms;
+ }
+ }
+
+ /**
+ * Wraps a Terms but with additional asserts
+ */
+ public static class ThrowingTerms extends FilterTerms {
+ private final Thrower thrower;
+
+ public ThrowingTerms(Terms in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+ @Override
+ public TermsEnum intersect(CompiledAutomaton automaton, BytesRef bytes) throws IOException {
+ TermsEnum termsEnum = in.intersect(automaton, bytes);
+ thrower.maybeThrow(Flags.Intersect);
+ return new ThrowingTermsEnum(termsEnum, thrower);
+ }
+
+ @Override
+ public TermsEnum iterator(TermsEnum reuse) throws IOException {
+ TermsEnum termsEnum = super.iterator(reuse);
+ thrower.maybeThrow(Flags.TermsEnum);
+ return new ThrowingTermsEnum(termsEnum, thrower);
+ }
+ }
+
+ static class ThrowingTermsEnum extends FilterTermsEnum {
+ private final Thrower thrower;
+
+ public ThrowingTermsEnum(TermsEnum in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+
+ }
+
+ @Override
+ public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ thrower.maybeThrow(Flags.DocsEnum);
+ return super.docs(liveDocs, reuse, flags);
+ }
+
+ @Override
+ public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+ thrower.maybeThrow(Flags.DocsAndPositionsEnum);
+ return super.docsAndPositions(liveDocs, reuse, flags);
+ }
+ }
+
+
+ @Override
+ public NumericDocValues getNumericDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.NumericDocValues);
+ return super.getNumericDocValues(field);
+
+ }
+
+ @Override
+ public BinaryDocValues getBinaryDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.BinaryDocValues);
+ return super.getBinaryDocValues(field);
+ }
+
+ @Override
+ public SortedDocValues getSortedDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.SortedDocValues);
+ return super.getSortedDocValues(field);
+ }
+
+ @Override
+ public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.SortedSetDocValues);
+ return super.getSortedSetDocValues(field);
+ }
+
+ @Override
+ public NumericDocValues getNormValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.Norms);
+ return super.getNormValues(field);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
new file mode 100644
index 0000000..b21e94d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.hamcrest.Matcher;
+
+/**
+ * Assertions for easier handling of our custom collections,
+ * for example ImmutableOpenMap
+ */
+public class CollectionAssertions {
+
+ public static Matcher<ImmutableOpenMap> hasKey(final String key) {
+ return new CollectionMatchers.ImmutableOpenMapHasKeyMatcher(key);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java b/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
new file mode 100644
index 0000000..521ba58
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+/**
+ * Matchers for easier handling of our custom collections,
+ * for example ImmutableOpenMap
+ */
+public class CollectionMatchers {
+
+ public static class ImmutableOpenMapHasKeyMatcher extends TypeSafeMatcher<ImmutableOpenMap> {
+
+ private final String key;
+
+ public ImmutableOpenMapHasKeyMatcher(String key) {
+ this.key = key;
+ }
+
+ @Override
+ protected boolean matchesSafely(ImmutableOpenMap item) {
+ return item.containsKey(key);
+ }
+
+ @Override
+ public void describeMismatchSafely(final ImmutableOpenMap map, final Description mismatchDescription) {
+ if (map.size() == 0) {
+ mismatchDescription.appendText("was empty");
+ } else {
+ mismatchDescription.appendText(" was ").appendValue(map);
+ }
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("ImmutableOpenMap should contain key " + key);
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
new file mode 100644
index 0000000..7849d0f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
@@ -0,0 +1,449 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.test.engine.MockInternalEngine;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.test.ElasticsearchTestCase.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ *
+ */
+public class ElasticsearchAssertions {
+
+ public static void assertAcked(AcknowledgedRequestBuilder<?, ?, ?> builder) {
+ assertAcked(builder.get());
+ }
+
+ public static void assertAcked(AcknowledgedResponse response) {
+ assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAcked(DeleteIndexRequestBuilder builder) {
+ assertAcked(builder.get());
+ }
+
+ public static void assertAcked(DeleteIndexResponse response) {
+ assertThat("Delete Index failed - not acked", response.isAcknowledged(), equalTo(true));
+ assertVersionSerializable(response);
+ }
+
+ public static String formatShardStatus(BroadcastOperationResponse response) {
+ String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & "
+ + response.getFailedShards() + " shard failures:";
+ for (ShardOperationFailedException failure : response.getShardFailures()) {
+ msg += "\n " + failure.toString();
+ }
+ return msg;
+ }
+
+ public static String formatShardStatus(SearchResponse response) {
+ String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & "
+ + response.getFailedShards() + " shard failures:";
+ for (ShardSearchFailure failure : response.getShardFailures()) {
+ msg += "\n " + failure.toString();
+ }
+ return msg;
+ }
+
+ /*
+ * assertions
+ */
+ public static void assertHitCount(SearchResponse searchResponse, long expectedHitCount) {
+ if (searchResponse.getHits().totalHits() != expectedHitCount) {
+ fail("Hit count is " + searchResponse.getHits().totalHits() + " but " + expectedHitCount + " was expected. "
+ + formatShardStatus(searchResponse));
+ }
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertSearchHits(SearchResponse searchResponse, String... ids) {
+ String shardStatus = formatShardStatus(searchResponse);
+ assertThat("Expected different hit count. " + shardStatus, searchResponse.getHits().hits().length, equalTo(ids.length));
+
+ Set<String> idsSet = new HashSet<String>(Arrays.asList(ids));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat("Expected id: " + hit.getId() + " in the result but wasn't." + shardStatus, idsSet.remove(hit.getId()),
+ equalTo(true));
+ }
+ assertThat("Expected ids: " + Arrays.toString(idsSet.toArray(new String[idsSet.size()])) + " in the result - result size differs."
+ + shardStatus, idsSet.size(), equalTo(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertOrderedSearchHits(SearchResponse searchResponse, String... ids) {
+ String shardStatus = formatShardStatus(searchResponse);
+ assertThat("Expected different hit count. " + shardStatus, searchResponse.getHits().hits().length, equalTo(ids.length));
+ for (int i = 0; i < ids.length; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("Expected id: " + ids[i] + " at position " + i + " but wasn't." + shardStatus, hit.getId(), equalTo(ids[i]));
+ }
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertHitCount(CountResponse countResponse, long expectedHitCount) {
+ if (countResponse.getCount() != expectedHitCount) {
+ fail("Count is " + countResponse.getCount() + " but " + expectedHitCount + " was expected. " + formatShardStatus(countResponse));
+ }
+ assertVersionSerializable(countResponse);
+ }
+
+ public static void assertMatchCount(PercolateResponse percolateResponse, long expectedHitCount) {
+ if (percolateResponse.getCount() != expectedHitCount) {
+ fail("Count is " + percolateResponse.getCount() + " but " + expectedHitCount + " was expected. " + formatShardStatus(percolateResponse));
+ }
+ assertVersionSerializable(percolateResponse);
+ }
+
+ public static void assertExists(GetResponse response) {
+ String message = String.format(Locale.ROOT, "Expected %s/%s/%s to exist, but does not", response.getIndex(), response.getType(), response.getId());
+ assertThat(message, response.isExists(), is(true));
+ }
+
+ public static void assertFirstHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 1, matcher);
+ }
+
+ public static void assertSecondHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 2, matcher);
+ }
+
+ public static void assertThirdHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 3, matcher);
+ }
+
+ public static void assertSearchHit(SearchResponse searchResponse, int number, Matcher<SearchHit> matcher) {
+ assertThat(number, greaterThan(0));
+ assertThat("SearchHit number must be greater than 0", number, greaterThan(0));
+ assertThat(searchResponse.getHits().totalHits(), greaterThanOrEqualTo((long) number));
+ assertSearchHit(searchResponse.getHits().getAt(number - 1), matcher);
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertNoFailures(SearchResponse searchResponse) {
+ assertThat("Unexpected ShardFailures: " + Arrays.toString(searchResponse.getShardFailures()),
+ searchResponse.getShardFailures().length, equalTo(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertFailures(SearchResponse searchResponse) {
+ assertThat("Expected at least one shard failure, got none",
+ searchResponse.getShardFailures().length, greaterThan(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertNoFailures(BroadcastOperationResponse response) {
+ assertThat("Unexpected ShardFailures: " + Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAllSuccessful(BroadcastOperationResponse response) {
+ assertNoFailures(response);
+ assertThat("Expected all shards successful but got successful [" + response.getSuccessfulShards() + "] total [" + response.getTotalShards() + "]",
+ response.getTotalShards(), equalTo(response.getSuccessfulShards()));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertSearchHit(SearchHit searchHit, Matcher<SearchHit> matcher) {
+ assertThat(searchHit, matcher);
+ assertVersionSerializable(searchHit);
+ }
+
+ public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher<String> matcher) {
+ assertHighlight(resp, hit, field, fragment, greaterThan(fragment), matcher);
+ }
+
+ public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, int totalFragments, Matcher<String> matcher) {
+ assertHighlight(resp, hit, field, fragment, equalTo(totalFragments), matcher);
+ }
+
+ public static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<String> matcher) {
+ assertHighlight(hit, field, fragment, greaterThan(fragment), matcher);
+ }
+
+ public static void assertHighlight(SearchHit hit, String field, int fragment, int totalFragments, Matcher<String> matcher) {
+ assertHighlight(hit, field, fragment, equalTo(totalFragments), matcher);
+ }
+
+ private static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {
+ assertNoFailures(resp);
+ assertThat("not enough hits", resp.getHits().hits().length, greaterThan(hit));
+ assertHighlight(resp.getHits().hits()[hit], field, fragment, fragmentsMatcher, matcher);
+ assertVersionSerializable(resp);
+ }
+
+ private static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {
+ assertThat(hit.getHighlightFields(), hasKey(field));
+ assertThat(hit.getHighlightFields().get(field).fragments().length, fragmentsMatcher);
+ assertThat(hit.highlightFields().get(field).fragments()[fragment].string(), matcher);
+ }
+
+ public static void assertNotHighlighted(SearchResponse resp, int hit, String field) {
+ assertNoFailures(resp);
+ assertThat("not enough hits", resp.getHits().hits().length, greaterThan(hit));
+ assertThat(resp.getHits().hits()[hit].getHighlightFields(), not(hasKey(field)));
+ }
+
+ public static void assertSuggestionSize(Suggest searchSuggest, int entry, int size, String key) {
+ assertThat(searchSuggest, notNullValue());
+ assertThat(searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(searchSuggest.getSuggestion(key).getName(), equalTo(key));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), equalTo(size));
+ assertVersionSerializable(searchSuggest);
+ }
+
+ public static void assertSuggestion(Suggest searchSuggest, int entry, int ord, String key, String text) {
+ assertThat(searchSuggest, notNullValue());
+ assertThat(searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(searchSuggest.getSuggestion(key).getName(), equalTo(key));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), greaterThan(ord));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().get(ord).getText().string(), equalTo(text));
+ assertVersionSerializable(searchSuggest);
+ }
+
+ /**
+ * Assert suggestion returns exactly the provided text.
+ */
+ public static void assertSuggestion(Suggest searchSuggest, int entry, String key, String... text) {
+ assertSuggestion(searchSuggest, entry, key, text.length, text);
+ }
+
+ /**
+ * Assert suggestion returns size suggestions and the first are the provided
+ * text.
+ */
+ public static void assertSuggestion(Suggest searchSuggest, int entry, String key, int size, String... text) {
+ assertSuggestionSize(searchSuggest, entry, size, key);
+ for (int i = 0; i < text.length; i++) {
+ assertSuggestion(searchSuggest, entry, i, key, text[i]);
+ }
+ }
+
+ /*
+ * matchers
+ */
+ public static Matcher<SearchHit> hasId(final String id) {
+ return new ElasticsearchMatchers.SearchHitHasIdMatcher(id);
+ }
+
+ public static Matcher<SearchHit> hasType(final String type) {
+ return new ElasticsearchMatchers.SearchHitHasTypeMatcher(type);
+ }
+
+ public static Matcher<SearchHit> hasIndex(final String index) {
+ return new ElasticsearchMatchers.SearchHitHasIndexMatcher(index);
+ }
+
+ public static Matcher<SearchHit> hasScore(final float score) {
+ return new ElasticsearchMatchers.SearchHitHasScoreMatcher(score);
+ }
+
+ public static <T extends Query> T assertBooleanSubQuery(Query query, Class<T> subqueryType, int i) {
+ assertThat(query, instanceOf(BooleanQuery.class));
+ BooleanQuery q = (BooleanQuery) query;
+ assertThat(q.getClauses().length, greaterThan(i));
+ assertThat(q.getClauses()[i].getQuery(), instanceOf(subqueryType));
+ return (T) q.getClauses()[i].getQuery();
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass) {
+ assertThrows(builder.execute(), exceptionClass);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass, String extraInfo) {
+ assertThrows(builder.execute(), exceptionClass, extraInfo);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass) {
+ assertThrows(future, exceptionClass, null);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass, String extraInfo) {
+ boolean fail = false;
+ extraInfo = extraInfo == null || extraInfo.isEmpty() ? "" : extraInfo + ": ";
+ extraInfo += "expected a " + exceptionClass + " exception to be thrown";
+
+ try {
+ future.actionGet();
+ fail = true;
+
+ } catch (ElasticsearchException esException) {
+ assertThat(extraInfo, esException.unwrapCause(), instanceOf(exceptionClass));
+ } catch (Throwable e) {
+ assertThat(extraInfo, e, instanceOf(exceptionClass));
+ }
+ // has to be outside catch clause to get a proper message
+ if (fail) {
+ throw new AssertionError(extraInfo);
+ }
+ }
+
+ private static BytesReference serialize(Version version, Streamable streamable) throws IOException {
+ BytesStreamOutput output = new BytesStreamOutput();
+ output.setVersion(version);
+ streamable.writeTo(output);
+ output.flush();
+ return output.bytes();
+ }
+
+ public static void assertVersionSerializable(Streamable streamable) {
+ assertTrue(Version.CURRENT.after(getPreviousVersion()));
+ assertVersionSerializable(randomVersion(), streamable);
+ }
+
+ public static void assertVersionSerializable(Version version, Streamable streamable) {
+ try {
+ Streamable newInstance = tryCreateNewInstance(streamable);
+ if (newInstance == null) {
+ return; // can't create a new instance - we never modify a
+ // streamable that comes in.
+ }
+ if (streamable instanceof ActionRequest) {
+ ((ActionRequest<?>)streamable).validate();
+ }
+ BytesReference orig = serialize(version, streamable);
+ StreamInput input = new BytesStreamInput(orig);
+ input.setVersion(version);
+ newInstance.readFrom(input);
+ assertThat("Stream should be fully read with version [" + version + "] for streamable [" + streamable + "]", input.available(), equalTo(0));
+ assertThat("Serialization failed with version [" + version + "] bytes should be equal for streamable [" + streamable + "]", serialize(version, streamable), equalTo(orig));
+ } catch (Throwable ex) {
+ throw new RuntimeException("failed to check serialization - version [" + version + "] for streamable [" + streamable + "]", ex);
+ }
+
+ }
+
+ private static Streamable tryCreateNewInstance(Streamable streamable) throws NoSuchMethodException, InstantiationException,
+ IllegalAccessException, InvocationTargetException {
+ try {
+ Class<? extends Streamable> clazz = streamable.getClass();
+ Constructor<? extends Streamable> constructor = clazz.getDeclaredConstructor();
+ assertThat(constructor, Matchers.notNullValue());
+ constructor.setAccessible(true);
+ Streamable newInstance = constructor.newInstance();
+ return newInstance;
+ } catch (Throwable e) {
+ return null;
+ }
+ }
+
+ /**
+ * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
+ * any of the shards threw an exception and if the response is serializeable.
+ */
+ public static SearchResponse assertSearchResponse(SearchRequestBuilder request) {
+ return assertSearchResponse(request.get());
+ }
+
+ /**
+ * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
+ * any of the shards threw an exception and if the response is serializeable.
+ */
+ public static SearchResponse assertSearchResponse(SearchResponse response) {
+ assertNoFailures(response);
+ assertThat("One or more shards were not successful but didn't trigger a failure", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ return response;
+ }
+
+ public static void assertAllSearchersClosed() {
+ /* in some cases we finish a test faster than the freeContext calls make it to the
+ * shards. Let's wait for some time if there are still searchers. If the are really
+ * pending we will fail anyway.*/
+ try {
+ if (awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ return MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.isEmpty();
+ }
+ }, 5, TimeUnit.SECONDS)) {
+ return;
+ }
+ } catch (InterruptedException ex) {
+ if (MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.isEmpty()) {
+ return;
+ }
+ }
+ try {
+ RuntimeException ex = null;
+ StringBuilder builder = new StringBuilder("Unclosed Searchers instance for shards: [");
+ for (Map.Entry<MockInternalEngine.AssertingSearcher, RuntimeException> entry : MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.entrySet()) {
+ ex = entry.getValue();
+ builder.append(entry.getKey().shardId()).append(",");
+ }
+ builder.append("]");
+ throw new RuntimeException(builder.toString(), ex);
+ } finally {
+ MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.clear();
+ }
+ }
+
+ public static void assertAllFilesClosed() throws IOException {
+ try {
+ for (MockDirectoryHelper.ElasticsearchMockDirectoryWrapper w : MockDirectoryHelper.wrappers) {
+ if (w.isOpen()) {
+ w.closeWithRuntimeException();
+ }
+ }
+ } finally {
+ MockDirectoryHelper.wrappers.clear();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java
new file mode 100644
index 0000000..fe6a06b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.hamcrest;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.spatial4j.core.shape.jts.JtsPoint;
+import com.vividsolutions.jts.geom.*;
+import org.junit.Assert;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+public class ElasticsearchGeoAssertions {
+
+ private static int top(Coordinate...points) {
+ int top = 0;
+ for (int i = 1; i < points.length; i++) {
+ if(points[i].y < points[top].y) {
+ top = i;
+ } else if(points[i].y == points[top].y) {
+ if(points[i].x <= points[top].x) {
+ top = i;
+ }
+ }
+ }
+ return top;
+ }
+
+ private static int prev(int top, Coordinate...points) {
+ for (int i = 1; i < points.length; i++) {
+ int p = (top + points.length - i) % points.length;
+ if((points[p].x != points[top].x) || (points[p].y != points[top].y)) {
+ return p;
+ }
+ }
+ return -1;
+ }
+
+ private static int next(int top, Coordinate...points) {
+ for (int i = 1; i < points.length; i++) {
+ int n = (top + i) % points.length;
+ if((points[n].x != points[top].x) || (points[n].y != points[top].y)) {
+ return n;
+ }
+ }
+ return -1;
+ }
+
+ private static Coordinate[] fixedOrderedRing(List<Coordinate> coordinates, boolean direction) {
+ return fixedOrderedRing(coordinates.toArray(new Coordinate[coordinates.size()]), direction);
+ }
+
+ private static Coordinate[] fixedOrderedRing(Coordinate[] points, boolean direction) {
+
+ final int top = top(points);
+ final int next = next(top, points);
+ final int prev = prev(top, points);
+ final boolean orientation = points[next].x < points[prev].x;
+
+ if(orientation != direction) {
+ List<Coordinate> asList = Arrays.asList(points);
+ Collections.reverse(asList);
+ return fixedOrderedRing(asList, direction);
+ } else {
+ if(top>0) {
+ Coordinate[] aligned = new Coordinate[points.length];
+ System.arraycopy(points, top, aligned, 0, points.length-top-1);
+ System.arraycopy(points, 0, aligned, points.length-top-1, top);
+ aligned[aligned.length-1] = aligned[0];
+ return aligned;
+ } else {
+ return points;
+ }
+ }
+
+ }
+
+ public static void assertEquals(Coordinate c1, Coordinate c2) {
+ assertTrue("expected coordinate " + c1 + " but found " + c2, c1.x == c2.x && c1.y == c2.y);
+ }
+
+ private static boolean isRing(Coordinate[] c) {
+ return (c[0].x == c[c.length-1].x) && (c[0].y == c[c.length-1].y);
+ }
+
+ public static void assertEquals(Coordinate[] c1, Coordinate[] c2) {
+ Assert.assertEquals(c1.length, c2.length);
+
+ if(isRing(c1) && isRing(c2)) {
+ c1 = fixedOrderedRing(c1, true);
+ c2 = fixedOrderedRing(c2, true);
+ }
+
+ for (int i = 0; i < c2.length; i++) {
+ assertEquals(c1[i], c2[i]);
+ }
+ }
+
+ public static void assertEquals(LineString l1, LineString l2) {
+ assertEquals(l1.getCoordinates(), l2.getCoordinates());
+ }
+
+ public static void assertEquals(Polygon p1, Polygon p2) {
+ Assert.assertEquals(p1.getNumInteriorRing(), p2.getNumInteriorRing());
+
+ assertEquals(p1.getExteriorRing(), p2.getExteriorRing());
+
+ // TODO: This test do not check all permutations of linestrings. So the test
+ // fails if the holes of the polygons are not ordered the same way
+ for (int i = 0; i < p1.getNumInteriorRing(); i++) {
+ assertEquals(p1.getInteriorRingN(i), p2.getInteriorRingN(i));
+ }
+ }
+
+ public static void assertEquals(MultiPolygon p1, MultiPolygon p2) {
+ Assert.assertEquals(p1.getNumGeometries(), p2.getNumGeometries());
+
+ // TODO: This test do not check all permutations. So the Test fails
+ // if the inner polygons are not ordered the same way in both Multipolygons
+ for (int i = 0; i < p1.getNumGeometries(); i++) {
+ Geometry a = p1.getGeometryN(i);
+ Geometry b = p2.getGeometryN(i);
+ assertEquals(a, b);
+ }
+ }
+
+ public static void assertEquals(Geometry s1, Geometry s2) {
+ if(s1 instanceof LineString && s2 instanceof LineString) {
+ assertEquals((LineString) s1, (LineString) s2);
+
+ } else if (s1 instanceof Polygon && s2 instanceof Polygon) {
+ assertEquals((Polygon) s1, (Polygon) s2);
+
+ } else if (s1 instanceof MultiPoint && s2 instanceof MultiPoint) {
+ Assert.assertEquals(s1, s2);
+
+ } else if (s1 instanceof MultiPolygon && s2 instanceof MultiPolygon) {
+ assertEquals((MultiPolygon) s1, (MultiPolygon) s2);
+
+ } else {
+ throw new RuntimeException("equality of shape types not supported [" + s1.getClass().getName() + " and " + s2.getClass().getName() + "]");
+ }
+ }
+
+ public static void assertEquals(JtsGeometry g1, JtsGeometry g2) {
+ assertEquals(g1.getGeom(), g2.getGeom());
+ }
+
+ public static void assertEquals(Shape s1, Shape s2) {
+ if(s1 instanceof JtsGeometry && s2 instanceof JtsGeometry) {
+ assertEquals((JtsGeometry) s1, (JtsGeometry) s2);
+ } else if(s1 instanceof JtsPoint && s2 instanceof JtsPoint) {
+ JtsPoint p1 = (JtsPoint) s1;
+ JtsPoint p2 = (JtsPoint) s2;
+ Assert.assertEquals(p1, p2);
+ } else {
+ throw new RuntimeException("equality of shape types not supported [" + s1.getClass().getName() + " and " + s2.getClass().getName() + "]");
+ }
+ }
+
+ private static Geometry unwrap(Shape shape) {
+ assertThat(shape, instanceOf(JtsGeometry.class));
+ return ((JtsGeometry)shape).getGeom();
+ }
+
+ public static void assertMultiPolygon(Shape shape) {
+ assert(unwrap(shape) instanceof MultiPolygon): "expected MultiPolygon but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertPolygon(Shape shape) {
+ assert(unwrap(shape) instanceof Polygon): "expected Polygon but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertLineString(Shape shape) {
+ assert(unwrap(shape) instanceof LineString): "expected LineString but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertMultiLineString(Shape shape) {
+ assert(unwrap(shape) instanceof MultiLineString): "expected MultiLineString but found " + unwrap(shape).getClass().getName();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
new file mode 100644
index 0000000..f49cc3b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.search.SearchHit;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+public class ElasticsearchMatchers {
+
+ public static class SearchHitHasIdMatcher extends TypeSafeMatcher<SearchHit> {
+ private String id;
+
+ public SearchHitHasIdMatcher(String id) {
+ this.id = id;
+ }
+
+ @Override
+ protected boolean matchesSafely(SearchHit searchHit) {
+ return searchHit.getId().equals(id);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getId());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit id should be ").appendValue(id);
+ }
+ }
+
+ public static class SearchHitHasTypeMatcher extends TypeSafeMatcher<SearchHit> {
+ private String type;
+
+ public SearchHitHasTypeMatcher(String type) {
+ this.type = type;
+ }
+
+ @Override
+ public boolean matchesSafely(final SearchHit searchHit) {
+ return searchHit.getType().equals(type);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getType());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit type should be ").appendValue(type);
+ }
+ }
+
+ public static class SearchHitHasIndexMatcher extends TypeSafeMatcher<SearchHit> {
+ private String index;
+
+ public SearchHitHasIndexMatcher(String index) {
+ this.index = index;
+ }
+
+ @Override
+ public boolean matchesSafely(final SearchHit searchHit) {
+ return searchHit.getIndex().equals(index);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getIndex());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit index should be ").appendValue(index);
+ }
+ }
+
+ public static class SearchHitHasScoreMatcher extends TypeSafeMatcher<SearchHit> {
+ private float score;
+
+ public SearchHitHasScoreMatcher(float score) {
+ this.score = score;
+ }
+
+ @Override
+ protected boolean matchesSafely(SearchHit searchHit) {
+ return searchHit.getScore() == score;
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getScore());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit score should be ").appendValue(score);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java b/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
new file mode 100644
index 0000000..62c35e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.hamcrest;
+
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.util.regex.Pattern;
+
+/**
+ * Matcher that supports regular expression and allows to provide optional flags
+ */
+public class RegexMatcher extends TypeSafeMatcher<String> {
+
+ private final String regex;
+ private final Pattern pattern;
+
+ public RegexMatcher(String regex) {
+ this.regex = regex;
+ this.pattern = Pattern.compile(regex);
+ }
+
+ public RegexMatcher(String regex, int flag) {
+ this.regex = regex;
+ this.pattern = Pattern.compile(regex, flag);
+ }
+
+ @Override
+ protected boolean matchesSafely(String item) {
+ return pattern.matcher(item).find();
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(regex);
+ }
+
+ public static RegexMatcher matches(String regex) {
+ return new RegexMatcher(regex);
+ }
+
+ public static RegexMatcher matches(String regex, int flag) {
+ return new RegexMatcher(regex, flag);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/junit/annotations/Network.java b/src/test/java/org/elasticsearch/test/junit/annotations/Network.java
new file mode 100644
index 0000000..d2615ea
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/junit/annotations/Network.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.annotations;
+
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+
+import java.lang.annotation.*;
+
+/**
+ * Annotation used to set if internet network connectivity is required to run the test.
+ * By default, tests annotated with @Network won't be executed.
+ * Set -Dtests.network=true when running test to launch network tests
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Inherited
+@TestGroup(enabled = false, sysProperty = "tests.network")
+public @interface Network {
+}
diff --git a/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java b/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java
new file mode 100644
index 0000000..e09cc75
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.annotations;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.METHOD;
+import static java.lang.annotation.ElementType.PACKAGE;
+import static java.lang.annotation.ElementType.TYPE;
+
+/**
+ * Annotation used to set a custom log level for a specific test method.
+ *
+ * It supports multiple logger:level comma separated key value pairs
+ * Use the _root keyword to set the root logger level
+ * e.g. @TestLogging("_root:DEBUG,org.elasticsearch.cluster.metadata:TRACE")
+ * or just @TestLogging("_root:DEBUG,cluster.metadata:TRACE") since we start the test with -Des.logger.prefix=
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({PACKAGE, TYPE, METHOD})
+public @interface TestLogging {
+ String value();
+}
diff --git a/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
new file mode 100644
index 0000000..77793ee
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.listeners;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.runner.Description;
+import org.junit.runner.Result;
+import org.junit.runner.notification.RunListener;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A {@link RunListener} that allows to change the log level for a specific test method.
+ * When a test method is annotated with the {@link org.elasticsearch.test.junit.annotations.TestLogging} annotation, the level for the specified loggers
+ * will be internally saved before the test method execution and overridden with the specified ones.
+ * At the end of the test method execution the original loggers levels will be restored.
+ *
+ * Note: This class is not thread-safe. Given the static nature of the logging api, it assumes that tests
+ * are never run concurrently in the same jvm. For the very same reason no synchronization has been implemented
+ * regarding the save/restore process of the original loggers levels.
+ */
+public class LoggingListener extends RunListener {
+
+ private Map<String, String> previousLoggingMap;
+ private Map<String, String> previousClassLoggingMap;
+ private Map<String, String> previousPackageLoggingMap;
+
+ @Override
+ public void testRunStarted(Description description) throws Exception {
+ previousPackageLoggingMap = processTestLogging( description.getTestClass().getPackage().getAnnotation(TestLogging.class));
+ previousClassLoggingMap = processTestLogging(description.getAnnotation(TestLogging.class));
+ }
+
+ @Override
+ public void testRunFinished(Result result) throws Exception {
+ previousClassLoggingMap = reset(previousClassLoggingMap);
+ previousPackageLoggingMap = reset(previousPackageLoggingMap);
+ }
+
+ @Override
+ public void testStarted(Description description) throws Exception {
+ final TestLogging testLogging = description.getAnnotation(TestLogging.class);
+ previousLoggingMap = processTestLogging(testLogging);
+ }
+
+ @Override
+ public void testFinished(Description description) throws Exception {
+ previousLoggingMap = reset(previousLoggingMap);
+ }
+
+ private static ESLogger resolveLogger(String loggerName) {
+ if (loggerName.equalsIgnoreCase("_root")) {
+ return ESLoggerFactory.getRootLogger();
+ }
+ return Loggers.getLogger(loggerName);
+ }
+
+ private Map<String, String> processTestLogging(TestLogging testLogging) {
+ if (testLogging == null) {
+ return null;
+ }
+ Map<String, String> map = new HashMap<String, String>();
+ final String[] loggersAndLevels = testLogging.value().split(",");
+ for (String loggerAndLevel : loggersAndLevels) {
+ String[] loggerAndLevelArray = loggerAndLevel.split(":");
+ if (loggerAndLevelArray.length >=2) {
+ String loggerName = loggerAndLevelArray[0];
+ String level = loggerAndLevelArray[1];
+ ESLogger esLogger = resolveLogger(loggerName);
+ map.put(loggerName, esLogger.getLevel());
+ esLogger.setLevel(level);
+ }
+ }
+ return map;
+ }
+
+ private Map<String, String> reset(Map<String, String> map) {
+ if (map != null) {
+ for (Map.Entry<String, String> previousLogger : map.entrySet()) {
+ ESLogger esLogger = resolveLogger(previousLogger.getKey());
+ esLogger.setLevel(previousLogger.getValue());
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
new file mode 100644
index 0000000..5a1e47b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.listeners;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder;
+import com.carrotsearch.randomizedtesting.TraceFormatting;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.TestCluster;
+import org.junit.internal.AssumptionViolatedException;
+import org.junit.runner.Description;
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunListener;
+
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_ITERATIONS;
+
+/**
+ * A {@link RunListener} that emits to {@link System#err} a string with command
+ * line parameters allowing quick test re-run under MVN command line.
+ */
+public class ReproduceInfoPrinter extends RunListener {
+
+ protected final ESLogger logger = Loggers.getLogger(ElasticsearchTestCase.class);
+
+ @Override
+ public void testStarted(Description description) throws Exception {
+ logger.info("Test {} started", description.getDisplayName());
+ }
+
+ @Override
+ public void testFinished(Description description) throws Exception {
+ logger.info("Test {} finished", description.getDisplayName());
+ }
+
+ @Override
+ public void testFailure(Failure failure) throws Exception {
+ // Ignore assumptions.
+ if (failure.getException() instanceof AssumptionViolatedException) {
+ return;
+ }
+
+ final Description d = failure.getDescription();
+ final StringBuilder b = new StringBuilder();
+ b.append("FAILURE : ").append(d.getDisplayName()).append("\n");
+ b.append("REPRODUCE WITH : mvn test");
+ reproduceErrorMessageBuilder(b).appendAllOpts(failure.getDescription());
+
+ b.append("\n");
+ b.append("Throwable:\n");
+ if (failure.getException() != null) {
+ traces().formatThrowable(b, failure.getException());
+ }
+
+ logger.error(b.toString());
+ }
+
+ protected ReproduceErrorMessageBuilder reproduceErrorMessageBuilder(StringBuilder b) {
+ return new MavenMessageBuilder(b);
+ }
+
+ protected TraceFormatting traces() {
+ TraceFormatting traces = new TraceFormatting();
+ try {
+ traces = RandomizedContext.current().getRunner().getTraceFormatting();
+ } catch (IllegalStateException e) {
+ // Ignore if no context.
+ }
+ return traces;
+ }
+
+ protected static class MavenMessageBuilder extends ReproduceErrorMessageBuilder {
+
+ public MavenMessageBuilder(StringBuilder b) {
+ super(b);
+ }
+
+ @Override
+ public ReproduceErrorMessageBuilder appendAllOpts(Description description) {
+ super.appendAllOpts(description);
+ return appendESProperties();
+ }
+
+ /**
+ * Append a single VM option.
+ */
+ @Override
+ public ReproduceErrorMessageBuilder appendOpt(String sysPropName, String value) {
+ if (sysPropName.equals(SYSPROP_ITERATIONS())) { // we don't want the iters to be in there!
+ return this;
+ }
+ if (Strings.hasLength(value)) {
+ return super.appendOpt(sysPropName, value);
+ }
+ return this;
+ }
+
+ public ReproduceErrorMessageBuilder appendESProperties() {
+ appendProperties("es.logger.level", "es.node.mode", "es.node.local", TestCluster.TESTS_ENABLE_MOCK_MODULES,
+ "tests.assertion.disabled", "tests.security.manager", "tests.nighly", "tests.jvms", "tests.client.ratio");
+ if (System.getProperty("tests.jvm.argline") != null && !System.getProperty("tests.jvm.argline").isEmpty()) {
+ appendOpt("tests.jvm.argline", "\"" + System.getProperty("tests.jvm.argline") + "\"");
+ }
+ return this;
+ }
+
+ protected ReproduceErrorMessageBuilder appendProperties(String... properties) {
+ for (String sysPropName : properties) {
+ if (Strings.hasLength(System.getProperty(sysPropName))) {
+ appendOpt(sysPropName, System.getProperty(sysPropName));
+ }
+ }
+ return this;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTests.java b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTests.java
new file mode 100644
index 0000000..26841aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTests.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest;
+
+import org.elasticsearch.test.rest.junit.RestTestSuiteRunner;
+import org.junit.runner.RunWith;
+
+import static org.apache.lucene.util.LuceneTestCase.Slow;
+
+/**
+ * Runs the clients test suite against an elasticsearch node, which can be an external node or an automatically created cluster.
+ * Communicates with elasticsearch exclusively via REST layer.
+ *
+ * @see RestTestSuiteRunner for extensive documentation and all the supported options
+ */
+@Slow
+@RunWith(RestTestSuiteRunner.class)
+public class ElasticsearchRestTests {
+
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java b/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
new file mode 100644
index 0000000..03b19c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.rest.client.RestClient;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+import org.elasticsearch.test.rest.spec.RestSpec;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Execution context passed across the REST tests.
+ * Holds the REST client used to communicate with elasticsearch.
+ * Caches the last obtained test response and allows to stash part of it within variables
+ * that can be used as input values in following requests.
+ */
+public class RestTestExecutionContext implements Closeable {
+
+ private static final ESLogger logger = Loggers.getLogger(RestTestExecutionContext.class);
+
+ private final RestClient restClient;
+
+ private final String esVersion;
+
+ private final Stash stash = new Stash();
+
+ private RestResponse response;
+
+ public RestTestExecutionContext(InetSocketAddress[] addresses, RestSpec restSpec) throws RestException, IOException {
+ this.restClient = new RestClient(addresses, restSpec);
+ this.esVersion = restClient.getEsVersion();
+ }
+
+ /**
+ * Calls an elasticsearch api with the parameters and request body provided as arguments.
+ * Saves the obtained response in the execution context.
+ * @throws RestException if the returned status code is non ok
+ */
+ public RestResponse callApi(String apiName, Map<String, String> params, List<Map<String, Object>> bodies) throws IOException, RestException {
+ //makes a copy of the parameters before modifying them for this specific request
+ HashMap<String, String> requestParams = Maps.newHashMap(params);
+ for (Map.Entry<String, String> entry : requestParams.entrySet()) {
+ if (stash.isStashedValue(entry.getValue())) {
+ entry.setValue(stash.unstashValue(entry.getValue()).toString());
+ }
+ }
+
+ String body = actualBody(bodies);
+
+ try {
+ response = callApiInternal(apiName, requestParams, body);
+ //we always stash the last response body
+ stash.stashValue("body", response.getBody());
+ return response;
+ } catch(RestException e) {
+ response = e.restResponse();
+ throw e;
+ }
+ }
+
+ private String actualBody(List<Map<String, Object>> bodies) throws IOException {
+ if (bodies.isEmpty()) {
+ return "";
+ }
+
+ if (bodies.size() == 1) {
+ return bodyAsString(stash.unstashMap(bodies.get(0)));
+ }
+
+ StringBuilder bodyBuilder = new StringBuilder();
+ for (Map<String, Object> body : bodies) {
+ bodyBuilder.append(bodyAsString(stash.unstashMap(body))).append("\n");
+ }
+ return bodyBuilder.toString();
+ }
+
+ private String bodyAsString(Map<String, Object> body) throws IOException {
+ return XContentFactory.jsonBuilder().map(body).string();
+ }
+
+ /**
+ * Calls an elasticsearch api internally without saving the obtained response in the context.
+ * Useful for internal calls (e.g. delete index during teardown)
+ * @throws RestException if the returned status code is non ok
+ */
+ public RestResponse callApiInternal(String apiName, String... params) throws IOException, RestException {
+ return restClient.callApi(apiName, params);
+ }
+
+ private RestResponse callApiInternal(String apiName, Map<String, String> params, String body) throws IOException, RestException {
+ return restClient.callApi(apiName, params, body);
+ }
+
+ /**
+ * Extracts a specific value from the last saved response
+ */
+ public Object response(String path) throws IOException {
+ return response.evaluate(path);
+ }
+
+ /**
+ * Clears the last obtained response and the stashed fields
+ */
+ public void clear() {
+ logger.debug("resetting response and stash");
+ response = null;
+ stash.clear();
+ }
+
+ public Stash stash() {
+ return stash;
+ }
+
+ /**
+ * Returns the current es version as a string
+ */
+ public String esVersion() {
+ return esVersion;
+ }
+
+ /**
+ * Closes the execution context and releases the underlying resources
+ */
+ public void close() {
+ this.restClient.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/Stash.java b/src/test/java/org/elasticsearch/test/rest/Stash.java
new file mode 100644
index 0000000..3179ecc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/Stash.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Allows to cache the last obtained test response and or part of it within variables
+ * that can be used as input values in following requests and assertions.
+ */
+public class Stash {
+
+ private static final ESLogger logger = Loggers.getLogger(Stash.class);
+
+ private final Map<String, Object> stash = Maps.newHashMap();
+
+ /**
+ * Allows to saved a specific field in the stash as key-value pair
+ */
+ public void stashValue(String key, Object value) {
+ logger.debug("stashing [{}]=[{}]", key, value);
+ Object old = stash.put(key, value);
+ if (old != null && old != value) {
+ logger.trace("replaced stashed value [{}] with same key [{}]", old, key);
+ }
+ }
+
+ /**
+ * Clears the previously stashed values
+ */
+ public void clear() {
+ stash.clear();
+ }
+
+ /**
+ * Tells whether a particular value needs to be looked up in the stash
+ * The stash contains fields eventually extracted from previous responses that can be reused
+ * as arguments for following requests (e.g. scroll_id)
+ */
+ public boolean isStashedValue(Object key) {
+ if (key == null) {
+ return false;
+ }
+ String stashKey = key.toString();
+ return Strings.hasLength(stashKey) && stashKey.startsWith("$");
+ }
+
+ /**
+ * Extracts a value from the current stash
+ * The stash contains fields eventually extracted from previous responses that can be reused
+ * as arguments for following requests (e.g. scroll_id)
+ */
+ public Object unstashValue(String value) {
+ Object stashedValue = stash.get(value.substring(1));
+ if (stashedValue == null) {
+ throw new IllegalArgumentException("stashed value not found for key [" + value + "]");
+ }
+ return stashedValue;
+ }
+
+ /**
+ * Recursively unstashes map values if needed
+ */
+ public Map<String, Object> unstashMap(Map<String, Object> map) {
+ Map<String, Object> copy = Maps.newHashMap(map);
+ unstashObject(copy);
+ return copy;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void unstashObject(Object obj) {
+ if (obj instanceof List) {
+ List list = (List)obj;
+ for (int i = 0; i < list.size(); i++) {
+ Object o = list.get(i);
+ if (isStashedValue(o)) {
+ list.set(i, unstashValue(o.toString()));
+ } else {
+ unstashObject(o);
+ }
+ }
+ }
+ if (obj instanceof Map) {
+ Map<String, Object> map = (Map) obj;
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ if (isStashedValue(entry.getValue())) {
+ entry.setValue(unstashValue(entry.getValue().toString()));
+ } else {
+ unstashObject(entry.getValue());
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/RestClient.java b/src/test/java/org/elasticsearch/test/rest/client/RestClient.java
new file mode 100644
index 0000000..d038b14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/RestClient.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.elasticsearch.test.rest.spec.RestApi;
+import org.elasticsearch.test.rest.spec.RestSpec;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * REST client used to test the elasticsearch REST layer
+ * Holds the {@link RestSpec} used to translate api calls into REST calls
+ */
+public class RestClient implements Closeable {
+
+ private static final ESLogger logger = Loggers.getLogger(RestClient.class);
+
+ private final RestSpec restSpec;
+ private final CloseableHttpClient httpClient;
+
+ private final InetSocketAddress[] addresses;
+
+ private final String esVersion;
+
+ public RestClient(InetSocketAddress[] addresses, RestSpec restSpec) throws IOException, RestException {
+ assert addresses.length > 0;
+ this.restSpec = restSpec;
+ this.httpClient = createHttpClient();
+ this.addresses = addresses;
+ this.esVersion = readAndCheckVersion();
+ logger.info("REST client initialized {}, elasticsearch version: [{}]", addresses, esVersion);
+ }
+
+ private String readAndCheckVersion() throws IOException, RestException {
+ //we make a manual call here without using callApi method, mainly because we are initializing
+ //and the randomized context doesn't exist for the current thread (would be used to choose the method otherwise)
+ RestApi restApi = restApi("info");
+ assert restApi.getPaths().size() == 1;
+ assert restApi.getMethods().size() == 1;
+
+ String version = null;
+ for (InetSocketAddress address : addresses) {
+ RestResponse restResponse = new RestResponse(new HttpRequestBuilder(httpClient)
+ .host(address.getHostName()).port(address.getPort())
+ .path(restApi.getPaths().get(0))
+ .method(restApi.getMethods().get(0)).execute());
+ checkStatusCode(restResponse);
+
+ Object latestVersion = restResponse.evaluate("version.number");
+ if (latestVersion == null) {
+ throw new RuntimeException("elasticsearch version not found in the response");
+ }
+ if (version == null) {
+ version = latestVersion.toString();
+ } else {
+ if (!latestVersion.equals(version)) {
+ throw new IllegalArgumentException("provided nodes addresses run different elasticsearch versions");
+ }
+ }
+ }
+ return version;
+ }
+
+ public String getEsVersion() {
+ return esVersion;
+ }
+
+ /**
+ * Calls an api with the provided parameters
+ * @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored
+ * according to the ignore parameter received as input (which won't get sent to elasticsearch)
+ */
+ public RestResponse callApi(String apiName, String... params) throws IOException, RestException {
+ if (params.length % 2 != 0) {
+ throw new IllegalArgumentException("The number of params passed must be even but was [" + params.length + "]");
+ }
+
+ Map<String, String> paramsMap = Maps.newHashMap();
+ for (int i = 0; i < params.length; i++) {
+ paramsMap.put(params[i++], params[i]);
+ }
+
+ return callApi(apiName, paramsMap, null);
+ }
+
+ /**
+ * Calls an api with the provided parameters and body
+ * @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored
+ * according to the ignore parameter received as input (which won't get sent to elasticsearch)
+ */
+ public RestResponse callApi(String apiName, Map<String, String> params, String body) throws IOException, RestException {
+
+ List<Integer> ignores = Lists.newArrayList();
+ Map<String, String> requestParams = null;
+ if (params != null) {
+ //makes a copy of the parameters before modifying them for this specific request
+ requestParams = Maps.newHashMap(params);
+ //ignore is a special parameter supported by the clients, shouldn't be sent to es
+ String ignoreString = requestParams.remove("ignore");
+ if (Strings.hasLength(ignoreString)) {
+ try {
+ ignores.add(Integer.valueOf(ignoreString));
+ } catch(NumberFormatException e) {
+ throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead");
+ }
+ }
+ }
+
+ HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
+ logger.debug("calling api [{}]", apiName);
+ HttpResponse httpResponse = httpRequestBuilder.execute();
+
+ //http HEAD doesn't support response body
+ // For the few api (exists class of api) that use it we need to accept 404 too
+ if (!httpResponse.supportsBody()) {
+ ignores.add(404);
+ }
+
+ RestResponse restResponse = new RestResponse(httpResponse);
+ checkStatusCode(restResponse, ignores);
+ return restResponse;
+ }
+
+ private void checkStatusCode(RestResponse restResponse, List<Integer> ignores) throws RestException {
+ //ignore is a catch within the client, to prevent the client from throwing error if it gets non ok codes back
+ if (ignores.contains(restResponse.getStatusCode())) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("ignored non ok status codes {} as requested", ignores);
+ }
+ return;
+ }
+ checkStatusCode(restResponse);
+ }
+
+ private void checkStatusCode(RestResponse restResponse) throws RestException {
+ if (restResponse.isError()) {
+ throw new RestException("non ok status code [" + restResponse.getStatusCode() + "] returned", restResponse);
+ }
+ }
+
+ private HttpRequestBuilder callApiBuilder(String apiName, Map<String, String> params, String body) {
+
+ //create doesn't exist in the spec but is supported in the clients (index with op_type=create)
+ boolean indexCreateApi = "create".equals(apiName);
+ String api = indexCreateApi ? "index" : apiName;
+ RestApi restApi = restApi(api);
+
+ HttpRequestBuilder httpRequestBuilder = httpRequestBuilder();
+
+ if (Strings.hasLength(body)) {
+ if (!restApi.isBodySupported()) {
+ throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api");
+ }
+ httpRequestBuilder.body(body);
+ } else {
+ if (restApi.isBodyRequired()) {
+ throw new IllegalArgumentException("body is required by [" + restApi.getName() + "] api");
+ }
+ }
+
+ //divide params between ones that go within query string and ones that go within path
+ Map<String, String> pathParts = Maps.newHashMap();
+ if (params != null) {
+ for (Map.Entry<String, String> entry : params.entrySet()) {
+ if (restApi.getPathParts().contains(entry.getKey())) {
+ pathParts.put(entry.getKey(), entry.getValue());
+ } else {
+ if (!restApi.getParams().contains(entry.getKey())) {
+ throw new IllegalArgumentException("param [" + entry.getKey() + "] not supported in [" + restApi.getName() + "] api");
+ }
+ httpRequestBuilder.addParam(entry.getKey(), entry.getValue());
+ }
+ }
+ }
+
+ if (indexCreateApi) {
+ httpRequestBuilder.addParam("op_type", "create");
+ }
+
+ //the http method is randomized (out of the available ones with the chosen api)
+ return httpRequestBuilder.method(RandomizedTest.randomFrom(restApi.getSupportedMethods(pathParts.keySet())))
+ .path(RandomizedTest.randomFrom(restApi.getFinalPaths(pathParts)));
+ }
+
+ private RestApi restApi(String apiName) {
+ RestApi restApi = restSpec.getApi(apiName);
+ if (restApi == null) {
+ throw new IllegalArgumentException("rest api [" + apiName + "] doesn't exist in the rest spec");
+ }
+ return restApi;
+ }
+
+ protected HttpRequestBuilder httpRequestBuilder() {
+ //the address used is randomized between the available ones
+ InetSocketAddress address = RandomizedTest.randomFrom(addresses);
+ return new HttpRequestBuilder(httpClient).host(address.getHostName()).port(address.getPort());
+ }
+
+ protected CloseableHttpClient createHttpClient() {
+ return HttpClients.createDefault();
+ }
+
+ /**
+ * Closes the REST client and the underlying http client
+ */
+ public void close() {
+ try {
+ httpClient.close();
+ } catch(IOException e) {
+ logger.error(e.getMessage(), e);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/RestException.java b/src/test/java/org/elasticsearch/test/rest/client/RestException.java
new file mode 100644
index 0000000..2236134
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/RestException.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+/**
+ * Thrown when a status code that holds an error is received (unless needs to be ignored)
+ * Holds the original {@link RestResponse}
+ */
+public class RestException extends Exception {
+
+ private final RestResponse restResponse;
+
+ public RestException(String message, RestResponse restResponse) {
+ super(message);
+ this.restResponse = restResponse;
+ }
+
+ public RestResponse restResponse() {
+ return restResponse;
+ }
+
+ public int statusCode() {
+ return restResponse.getStatusCode();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java b/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java
new file mode 100644
index 0000000..fd6df25
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.elasticsearch.test.rest.json.JsonPath;
+
+import java.io.IOException;
+
+/**
+ * Response obtained from a REST call
+ * Supports parsing the response body as json when needed and returning specific values extracted from it
+ */
+public class RestResponse {
+
+ private final HttpResponse response;
+ private JsonPath parsedResponse;
+
+ RestResponse(HttpResponse response) {
+ this.response = response;
+ }
+
+ public int getStatusCode() {
+ return response.getStatusCode();
+ }
+
+ public String getReasonPhrase() {
+ return response.getReasonPhrase();
+ }
+
+ public String getBody() {
+ return response.getBody();
+ }
+
+ public boolean isError() {
+ return response.isError();
+ }
+
+ /**
+ * Parses the response body as json and extracts a specific value from it (identified by the provided path)
+ */
+ public Object evaluate(String path) throws IOException {
+
+ if (response == null) {
+ return null;
+ }
+
+ JsonPath jsonPath = parsedResponse();
+
+ if (jsonPath == null) {
+ //special case: api that don't support body (e.g. exists) return true if 200, false if 404, even if no body
+ //is_true: '' means the response had no body but the client returned true (caused by 200)
+ //is_false: '' means the response had no body but the client returned false (caused by 404)
+ if ("".equals(path) && !response.supportsBody()) {
+ return !response.isError();
+ }
+ return null;
+ }
+
+ return jsonPath.evaluate(path);
+ }
+
+ private JsonPath parsedResponse() throws IOException {
+ if (parsedResponse != null) {
+ return parsedResponse;
+ }
+ if (response == null || !response.hasBody()) {
+ return null;
+ }
+ return parsedResponse = new JsonPath(response.getBody());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java b/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
new file mode 100644
index 0000000..480fc7b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+
+import java.net.URI;
+
+/**
+ * Allows to send DELETE requests providing a body (not supported out of the box)
+ */
+public class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase {
+
+ public final static String METHOD_NAME = "DELETE";
+
+ public HttpDeleteWithEntity(final URI uri) {
+ setURI(uri);
+ }
+
+ @Override
+ public String getMethod() {
+ return METHOD_NAME;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java b/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
new file mode 100644
index 0000000..aa0129f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+
+import java.net.URI;
+
+/**
+ * Allows to send GET requests providing a body (not supported out of the box)
+ */
+public class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
+
+ public final static String METHOD_NAME = "GET";
+
+ public HttpGetWithEntity(final URI uri) {
+ setURI(uri);
+ }
+
+ @Override
+ public String getMethod() {
+ return METHOD_NAME;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
new file mode 100644
index 0000000..a6487a6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Maps;
+import org.apache.http.client.methods.*;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.Charset;
+import java.util.Map;
+
+/**
+ * Executable builder for an http request
+ * Holds an {@link org.apache.http.client.HttpClient} that is used to send the built http request
+ */
+public class HttpRequestBuilder {
+
+ private static final ESLogger logger = Loggers.getLogger(HttpRequestBuilder.class);
+
+ static final Charset DEFAULT_CHARSET = Charset.forName("utf-8");
+
+ private final CloseableHttpClient httpClient;
+
+ private String host;
+
+ private int port;
+
+ private String path = "";
+
+ private final Map<String, String> params = Maps.newHashMap();
+
+ private String method = HttpGetWithEntity.METHOD_NAME;
+
+ private String body;
+
+ public HttpRequestBuilder(CloseableHttpClient httpClient) {
+ this.httpClient = httpClient;
+ }
+
+ public HttpRequestBuilder host(String host) {
+ this.host = host;
+ return this;
+ }
+
+ public HttpRequestBuilder port(int port) {
+ this.port = port;
+ return this;
+ }
+
+ public HttpRequestBuilder path(String path) {
+ this.path = path;
+ return this;
+ }
+
+ public HttpRequestBuilder addParam(String name, String value) {
+ this.params.put(name, value);
+ return this;
+ }
+
+ public HttpRequestBuilder method(String method) {
+ this.method = method;
+ return this;
+ }
+
+ public HttpRequestBuilder body(String body) {
+ if (Strings.hasLength(body)) {
+ this.body = body;
+ }
+ return this;
+ }
+
+ public HttpResponse execute() throws IOException {
+ CloseableHttpResponse closeableHttpResponse = null;
+ try {
+ HttpUriRequest httpUriRequest = buildRequest();
+ if (logger.isTraceEnabled()) {
+ StringBuilder stringBuilder = new StringBuilder(httpUriRequest.getMethod()).append(" ").append(httpUriRequest.getURI());
+ if (Strings.hasLength(body)) {
+ stringBuilder.append("\n").append(body);
+ }
+ logger.trace("sending request \n{}", stringBuilder.toString());
+ }
+ closeableHttpResponse = httpClient.execute(httpUriRequest);
+ HttpResponse httpResponse = new HttpResponse(httpUriRequest, closeableHttpResponse);
+ logger.trace("got response \n{}\n{}", closeableHttpResponse, httpResponse.hasBody() ? httpResponse.getBody() : "");
+ return httpResponse;
+ } finally {
+ try {
+ IOUtils.close(closeableHttpResponse);
+ } catch (IOException e) {
+ logger.error("error closing http response", e);
+ }
+ }
+ }
+
+ private HttpUriRequest buildRequest() {
+
+ if (HttpGetWithEntity.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpGetWithEntity(buildUri()));
+ }
+
+ if (HttpHead.METHOD_NAME.equalsIgnoreCase(method)) {
+ checkBodyNotSupported();
+ return new HttpHead(buildUri());
+ }
+
+ if (HttpDeleteWithEntity.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpDeleteWithEntity(buildUri()));
+ }
+
+ if (HttpPut.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpPut(buildUri()));
+ }
+
+ if (HttpPost.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpPost(buildUri()));
+ }
+
+ throw new UnsupportedOperationException("method [" + method + "] not supported");
+ }
+
+ private URI buildUri() {
+ String query;
+ if (params.size() == 0) {
+ query = null;
+ } else {
+ query = Joiner.on('&').withKeyValueSeparator("=").join(params);
+ }
+ try {
+ return new URI("http", null, host, port, path, query, null);
+ } catch (URISyntaxException e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+
+ private HttpEntityEnclosingRequestBase addOptionalBody(HttpEntityEnclosingRequestBase requestBase) {
+ if (Strings.hasText(body)) {
+ requestBase.setEntity(new StringEntity(body, DEFAULT_CHARSET));
+ }
+ return requestBase;
+ }
+
+ private void checkBodyNotSupported() {
+ if (Strings.hasText(body)) {
+ throw new IllegalArgumentException("request body not supported with head request");
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder stringBuilder = new StringBuilder(method).append(" '")
+ .append(host).append(":").append(port).append(path).append("'");
+ if (!params.isEmpty()) {
+ stringBuilder.append(", params=").append(params);
+ }
+ if (Strings.hasLength(body)) {
+ stringBuilder.append(", body=\n").append(body);
+ }
+ return stringBuilder.toString();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java b/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
new file mode 100644
index 0000000..84ac372
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.util.EntityUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.io.IOException;
+
+/**
+ * Response obtained from an http request
+ * Always consumes the whole response body loading it entirely into a string
+ */
+public class HttpResponse {
+
+ private static final ESLogger logger = Loggers.getLogger(HttpResponse.class);
+
+ private final HttpUriRequest httpRequest;
+ private final int statusCode;
+ private final String reasonPhrase;
+ private final String body;
+
+ HttpResponse(HttpUriRequest httpRequest, CloseableHttpResponse httpResponse) {
+ this.httpRequest = httpRequest;
+ this.statusCode = httpResponse.getStatusLine().getStatusCode();
+ this.reasonPhrase = httpResponse.getStatusLine().getReasonPhrase();
+ if (httpResponse.getEntity() != null) {
+ try {
+ this.body = EntityUtils.toString(httpResponse.getEntity(), HttpRequestBuilder.DEFAULT_CHARSET);
+ } catch (IOException e) {
+ EntityUtils.consumeQuietly(httpResponse.getEntity());
+ throw new RuntimeException(e);
+ } finally {
+ try {
+ httpResponse.close();
+ } catch (IOException e) {
+ logger.error(e.getMessage(), e);
+ }
+ }
+ } else {
+ this.body = null;
+ }
+ }
+
+ public boolean isError() {
+ return statusCode >= 400;
+ }
+
+ public int getStatusCode() {
+ return statusCode;
+ }
+
+ public String getReasonPhrase() {
+ return reasonPhrase;
+ }
+
+ public String getBody() {
+ return body;
+ }
+
+ public boolean hasBody() {
+ return body != null;
+ }
+
+ public boolean supportsBody() {
+ return !HttpHead.METHOD_NAME.equals(httpRequest.getMethod());
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder stringBuilder = new StringBuilder(statusCode).append(" ").append(reasonPhrase);
+ if (hasBody()) {
+ stringBuilder.append("\n").append(body);
+ }
+ return stringBuilder.toString();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java b/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java
new file mode 100644
index 0000000..12e9aa3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.json;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Holds a json object and allows to extract specific values from it
+ */
+public class JsonPath {
+
+ final String json;
+ final Map<String, Object> jsonMap;
+
+ public JsonPath(String json) throws IOException {
+ this.json = json;
+ this.jsonMap = convertToMap(json);
+ }
+
+ private static Map<String, Object> convertToMap(String json) throws IOException {
+ return JsonXContent.jsonXContent.createParser(json).mapOrderedAndClose();
+ }
+
+ /**
+ * Returns the object corresponding to the provided path if present, null otherwise
+ */
+ public Object evaluate(String path) {
+ String[] parts = parsePath(path);
+ Object object = jsonMap;
+ for (String part : parts) {
+ object = evaluate(part, object);
+ if (object == null) {
+ return null;
+ }
+ }
+ return object;
+ }
+
+ @SuppressWarnings("unchecked")
+ private Object evaluate(String key, Object object) {
+ if (object instanceof Map) {
+ return ((Map<String, Object>) object).get(key);
+ }
+ if (object instanceof List) {
+ List<Object> list = (List<Object>) object;
+ try {
+ return list.get(Integer.valueOf(key));
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException("element was a list, but [" + key + "] was not numeric", e);
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException("element was a list with " + list.size() + " elements, but [" + key + "] was out of bounds", e);
+ }
+ }
+
+ throw new IllegalArgumentException("no object found for [" + key + "] within object of class [" + object.getClass() + "]");
+ }
+
+ private String[] parsePath(String path) {
+ List<String> list = Lists.newArrayList();
+ StringBuilder current = new StringBuilder();
+ boolean escape = false;
+ for (int i = 0; i < path.length(); i++) {
+ char c = path.charAt(i);
+ if (c == '\\') {
+ escape = true;
+ continue;
+ }
+
+ if (c == '.') {
+ if (escape) {
+ escape = false;
+ } else {
+ if (current.length() > 0) {
+ list.add(current.toString());
+ current.setLength(0);
+ }
+ continue;
+ }
+ }
+
+ current.append(c);
+ }
+
+ if (current.length() > 0) {
+ list.add(current.toString());
+ }
+
+ return list.toArray(new String[list.size()]);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/DescriptionHelper.java b/src/test/java/org/elasticsearch/test/rest/junit/DescriptionHelper.java
new file mode 100644
index 0000000..44284a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/DescriptionHelper.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import com.google.common.base.Joiner;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.TestSection;
+import org.junit.runner.Description;
+
+import java.util.Map;
+
+/**
+ * Helper that knows how to assign proper junit {@link Description}s to each of the node in the tests tree
+ */
+public final class DescriptionHelper {
+
+ private DescriptionHelper() {
+
+ }
+
+ /*
+ The following generated ids need to be unique throughout a tests run.
+ Ids are also shown by IDEs (with junit 4.11 unique ids can be different from what gets shown, not yet in 4.10).
+ Some tricks are applied to control what gets shown in IDEs in order to keep the ids unique and nice to see at the same time.
+ */
+
+ static Description createRootDescription(String name) {
+ return Description.createSuiteDescription(name);
+ }
+
+ static Description createApiDescription(String api) {
+ return Description.createSuiteDescription(api);
+ }
+
+ static Description createTestSuiteDescription(RestTestSuite restTestSuite) {
+ //e.g. "indices.open (10_basic)", which leads to 10_basic being shown by IDEs
+ String name = restTestSuite.getApi() + " (" + restTestSuite.getName() + ")";
+ return Description.createSuiteDescription(name);
+ }
+
+ static Description createTestSectionWithRepetitionsDescription(RestTestSuite restTestSuite, TestSection testSection) {
+ //e.g. "indices.open/10_basic (Basic test for index open/close)", which leads to
+ //"Basic test for index open/close" being shown by IDEs
+ String name = restTestSuite.getDescription() + " (" + testSection.getName() + ")";
+ return Description.createSuiteDescription(name);
+ }
+
+ static Description createTestSectionIterationDescription(RestTestSuite restTestSuite, TestSection testSection, Map<String, Object> args) {
+ //e.g. "Basic test for index open/close {#0} (indices.open/10_basic)" some IDEs might strip out the part between parentheses
+ String name = testSection.getName() + formatMethodArgs(args) + " (" + restTestSuite.getDescription() + ")";
+ return Description.createSuiteDescription(name);
+ }
+
+ private static String formatMethodArgs(Map<String, Object> args) {
+ if (args == null || args.isEmpty()) return "";
+
+ StringBuilder b = new StringBuilder(" {");
+ Joiner.on(" ").withKeyValueSeparator("").appendTo(b, args);
+ b.append("}");
+
+ return b.toString();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/RestReproduceInfoPrinter.java b/src/test/java/org/elasticsearch/test/rest/junit/RestReproduceInfoPrinter.java
new file mode 100644
index 0000000..735fbf6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/RestReproduceInfoPrinter.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder;
+import com.carrotsearch.randomizedtesting.StandaloneRandomizedContext;
+import com.carrotsearch.randomizedtesting.TraceFormatting;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+import org.elasticsearch.test.rest.ElasticsearchRestTests;
+import org.elasticsearch.test.rest.junit.RestTestSuiteRunner.RunMode;
+import org.junit.runner.Description;
+
+import java.util.Arrays;
+
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_RANDOM_SEED;
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTCLASS;
+import static org.elasticsearch.test.rest.junit.RestTestSuiteRunner.*;
+
+/**
+ * A {@link org.junit.runner.notification.RunListener} that emits to {@link System#err} a string with command
+ * line parameters allowing quick REST test re-run under MVN command line.
+ */
+class RestReproduceInfoPrinter extends ReproduceInfoPrinter {
+
+ protected static final ESLogger logger = Loggers.getLogger(RestReproduceInfoPrinter.class);
+
+ private static boolean isTestCluster() {
+ return runMode() == RunMode.TEST_CLUSTER;
+ }
+
+ @Override
+ protected TraceFormatting traces() {
+ return new TraceFormatting(
+ Arrays.asList(
+ "org.junit.",
+ "junit.framework.",
+ "sun.",
+ "java.lang.reflect.",
+ "com.carrotsearch.randomizedtesting.",
+ "org.elasticsearch.test.rest.junit."
+ ));
+ }
+
+ @Override
+ protected ReproduceErrorMessageBuilder reproduceErrorMessageBuilder(StringBuilder b) {
+ return new MavenMessageBuilder(b);
+ }
+
+ private static class MavenMessageBuilder extends ReproduceInfoPrinter.MavenMessageBuilder {
+
+ public MavenMessageBuilder(StringBuilder b) {
+ super(b);
+ }
+
+ @Override
+ public ReproduceErrorMessageBuilder appendAllOpts(Description description) {
+
+ try {
+ appendOpt(SYSPROP_RANDOM_SEED(), StandaloneRandomizedContext.getSeedAsString());
+ } catch (IllegalStateException e) {
+ logger.warn("No context available when dumping reproduce options?");
+ }
+
+ //we know that ElasticsearchRestTests is the only one that runs with RestTestSuiteRunner
+ appendOpt(SYSPROP_TESTCLASS(), ElasticsearchRestTests.class.getName());
+
+ if (description.getClassName() != null) {
+ appendOpt(REST_TESTS_SUITE, description.getClassName());
+ }
+
+ appendRunnerProperties();
+ appendEnvironmentSettings();
+
+ appendProperties("es.logger.level");
+
+ if (isTestCluster()) {
+ appendProperties("es.node.mode", "es.node.local");
+ }
+
+ appendRestTestsProperties();
+
+ return this;
+ }
+
+ public ReproduceErrorMessageBuilder appendRestTestsProperties() {
+ return appendProperties(REST_TESTS_MODE, REST_TESTS_SPEC);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/RestTestCandidate.java b/src/test/java/org/elasticsearch/test/rest/junit/RestTestCandidate.java
new file mode 100644
index 0000000..2d7544d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/RestTestCandidate.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.SetupSection;
+import org.elasticsearch.test.rest.section.TestSection;
+import org.junit.runner.Description;
+
+/**
+ * Wraps {@link org.elasticsearch.test.rest.section.TestSection}s ready to be run,
+ * properly enriched with the needed execution information.
+ * The tests tree structure gets flattened to the leaves (test sections)
+ */
+public class RestTestCandidate {
+
+ private final RestTestSuite restTestSuite;
+ private final Description suiteDescription;
+ private final TestSection testSection;
+ private final Description testDescription;
+ private final long seed;
+
+ static RestTestCandidate empty(RestTestSuite restTestSuite, Description suiteDescription) {
+ return new RestTestCandidate(restTestSuite, suiteDescription, null, null, -1);
+ }
+
+ RestTestCandidate(RestTestSuite restTestSuite, Description suiteDescription,
+ TestSection testSection, Description testDescription, long seed) {
+ this.restTestSuite = restTestSuite;
+ this.suiteDescription = suiteDescription;
+ this.testSection = testSection;
+ this.testDescription = testDescription;
+ this.seed = seed;
+ }
+
+ public String getApi() {
+ return restTestSuite.getApi();
+ }
+
+ public String getName() {
+ return restTestSuite.getName();
+ }
+
+ public String getSuiteDescription() {
+ return restTestSuite.getDescription();
+ }
+
+ public Description describeSuite() {
+ return suiteDescription;
+ }
+
+ public Description describeTest() {
+ return testDescription;
+ }
+
+ public SetupSection getSetupSection() {
+ return restTestSuite.getSetupSection();
+ }
+
+ public TestSection getTestSection() {
+ return testSection;
+ }
+
+ public long getSeed() {
+ return seed;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/RestTestSuiteRunner.java b/src/test/java/org/elasticsearch/test/rest/junit/RestTestSuiteRunner.java
new file mode 100644
index 0000000..ab0d17b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/RestTestSuiteRunner.java
@@ -0,0 +1,598 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import com.carrotsearch.hppc.hash.MurmurHash3;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.Randomness;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.elasticsearch.test.rest.section.ExecutableSection;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.TestSection;
+import org.elasticsearch.test.rest.spec.RestSpec;
+import org.elasticsearch.test.rest.support.Features;
+import org.elasticsearch.test.rest.support.FileUtils;
+import org.junit.runner.Description;
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunNotifier;
+import org.junit.runners.ParentRunner;
+import org.junit.runners.model.InitializationError;
+import org.junit.runners.model.Statement;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.regex.Pattern;
+
+import static com.carrotsearch.randomizedtesting.SeedUtils.parseSeedChain;
+import static com.carrotsearch.randomizedtesting.StandaloneRandomizedContext.*;
+import static com.carrotsearch.randomizedtesting.SysGlobals.*;
+import static org.elasticsearch.test.TestCluster.clusterName;
+import static org.elasticsearch.test.rest.junit.DescriptionHelper.*;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertThat;
+
+/**
+ * JUnit runner for elasticsearch REST tests
+ *
+ * Supports the following options provided as system properties:
+ * - tests.rest[true|false|host:port]: determines whether the REST tests need to be run and if so
+ * whether to rely on an external cluster (providing host and port) or fire a test cluster (default)
+ * - tests.rest.suite: comma separated paths of the test suites to be run (by default loaded from /rest-api-spec/test)
+ * it is possible to run only a subset of the tests providing a directory or a single yaml file
+ * (the default /rest-api-spec/test prefix is optional when files are loaded from classpath)
+ * - tests.rest.section: regex that allows to filter the test sections that are going to be run. If provided, only the
+ * section names that match (case insensitive) against it will be executed
+ * - tests.rest.spec: REST spec path (default /rest-api-spec/api)
+ * - tests.iters: runs multiple iterations
+ * - tests.seed: seed to base the random behaviours on
+ * - tests.appendseed[true|false]: enables adding the seed to each test section's description (default false)
+ *
+ */
+public class RestTestSuiteRunner extends ParentRunner<RestTestCandidate> {
+
+ private static final ESLogger logger = Loggers.getLogger(RestTestSuiteRunner.class);
+
+ public static final String REST_TESTS_MODE = "tests.rest";
+ public static final String REST_TESTS_SUITE = "tests.rest.suite";
+ public static final String REST_TESTS_SECTION = "tests.rest.section";
+ public static final String REST_TESTS_SPEC = "tests.rest.spec";
+
+ private static final String DEFAULT_TESTS_PATH = "/rest-api-spec/test";
+ private static final String DEFAULT_SPEC_PATH = "/rest-api-spec/api";
+ private static final int DEFAULT_ITERATIONS = 1;
+
+ private static final String PATHS_SEPARATOR = ",";
+
+ private final RestTestExecutionContext restTestExecutionContext;
+ private final List<RestTestCandidate> restTestCandidates;
+ private final Description rootDescription;
+
+ private final RunMode runMode;
+
+ private final TestCluster testCluster;
+
+ private static final AtomicInteger sequencer = new AtomicInteger();
+
+ /** The runner's seed (master). */
+ private final Randomness runnerRandomness;
+
+ /**
+ * If {@link com.carrotsearch.randomizedtesting.SysGlobals#SYSPROP_RANDOM_SEED} property is used with two arguments
+ * (master:test_section) then this field contains test section level override.
+ */
+ private final Randomness testSectionRandomnessOverride;
+
+ enum RunMode {
+ NO, TEST_CLUSTER, EXTERNAL_CLUSTER
+ }
+
+ static RunMode runMode() {
+ String mode = System.getProperty(REST_TESTS_MODE);
+ if (!Strings.hasLength(mode)) {
+ //default true: we run the tests starting our own test cluster
+ mode = Boolean.TRUE.toString();
+ }
+
+ if (Boolean.FALSE.toString().equalsIgnoreCase(mode)) {
+ return RunMode.NO;
+ }
+ if (Boolean.TRUE.toString().equalsIgnoreCase(mode)) {
+ return RunMode.TEST_CLUSTER;
+ }
+ return RunMode.EXTERNAL_CLUSTER;
+ }
+
+ public RestTestSuiteRunner(Class<?> testClass) throws InitializationError {
+ super(testClass);
+
+ this.runMode = runMode();
+
+ if (runMode == RunMode.NO) {
+ //the tests won't be run. the run method will be called anyway but we'll just mark the whole suite as ignored
+ //no need to go ahead and parse the test suites then
+ this.runnerRandomness = null;
+ this.testSectionRandomnessOverride = null;
+ this.restTestExecutionContext = null;
+ this.restTestCandidates = null;
+ this.rootDescription = createRootDescription(getRootSuiteTitle());
+ this.rootDescription.addChild(createApiDescription("empty suite"));
+ this.testCluster = null;
+ return;
+ }
+
+ //the REST test suite is supposed to be run only once per jvm against either an external es node or a self started one
+ if (sequencer.getAndIncrement() > 0) {
+ throw new InitializationError("only one instance of RestTestSuiteRunner can be created per jvm");
+ }
+
+ //either read the seed from system properties (first one in the chain) or generate a new one
+ final String globalSeed = System.getProperty(SYSPROP_RANDOM_SEED());
+ final long initialSeed;
+ Randomness randomnessOverride = null;
+ if (Strings.hasLength(globalSeed)) {
+ final long[] seedChain = parseSeedChain(globalSeed);
+ if (seedChain.length == 0 || seedChain.length > 2) {
+ throw new IllegalArgumentException("Invalid system property "
+ + SYSPROP_RANDOM_SEED() + " specification: " + globalSeed);
+ }
+ if (seedChain.length > 1) {
+ //read the test section level seed if present
+ randomnessOverride = new Randomness(seedChain[1]);
+ }
+ initialSeed = seedChain[0];
+ } else {
+ initialSeed = MurmurHash3.hash(System.nanoTime());
+ }
+ this.runnerRandomness = new Randomness(initialSeed);
+ this.testSectionRandomnessOverride = randomnessOverride;
+ logger.info("Master seed: {}", SeedUtils.formatSeed(initialSeed));
+
+ List<InetSocketAddress> addresses = Lists.newArrayList();
+ if (runMode == RunMode.TEST_CLUSTER) {
+ this.testCluster = new TestCluster(initialSeed, 1, 3,
+ clusterName("REST-tests", ElasticsearchTestCase.CHILD_VM_ID, initialSeed));
+ this.testCluster.beforeTest(runnerRandomness.getRandom(), 0.0f);
+ for (HttpServerTransport httpServerTransport : testCluster.getInstances(HttpServerTransport.class)) {
+ addresses.add(((InetSocketTransportAddress) httpServerTransport.boundAddress().publishAddress()).address());
+ }
+ } else {
+ this.testCluster = null;
+ String testsMode = System.getProperty(REST_TESTS_MODE);
+ String[] stringAddresses = testsMode.split(",");
+ for (String stringAddress : stringAddresses) {
+ String[] split = stringAddress.split(":");
+ if (split.length < 2) {
+ throw new InitializationError("address [" + testsMode + "] not valid");
+ }
+ try {
+ addresses.add(new InetSocketAddress(split[0], Integer.valueOf(split[1])));
+ } catch(NumberFormatException e) {
+ throw new InitializationError("port is not valid, expected number but was [" + split[1] + "]");
+ }
+ }
+ }
+
+ try {
+ String[] specPaths = resolvePathsProperty(REST_TESTS_SPEC, DEFAULT_SPEC_PATH);
+ RestSpec restSpec = RestSpec.parseFrom(DEFAULT_SPEC_PATH, specPaths);
+ this.restTestExecutionContext = new RestTestExecutionContext(addresses.toArray(new InetSocketAddress[addresses.size()]), restSpec);
+ this.rootDescription = createRootDescription(getRootSuiteTitle());
+ this.restTestCandidates = collectTestCandidates(rootDescription);
+ } catch (InitializationError e) {
+ stopTestCluster();
+ throw e;
+ } catch (Throwable e) {
+ stopTestCluster();
+ throw new InitializationError(e);
+ }
+ }
+
+ /**
+ * Parse the test suites and creates the test candidates to be run, together with their junit descriptions.
+ * The descriptions will be part of a tree containing api/yaml file/test section/eventual multiple iterations.
+ * The test candidates will be instead flattened out to the leaves level (iterations), the part that needs to be run.
+ */
+ protected List<RestTestCandidate> collectTestCandidates(Description rootDescription) throws InitializationError, IOException {
+
+ String[] paths = resolvePathsProperty(REST_TESTS_SUITE, DEFAULT_TESTS_PATH);
+ Map<String, Set<File>> yamlSuites = FileUtils.findYamlSuites(DEFAULT_TESTS_PATH, paths);
+
+ String sectionFilter = System.getProperty(REST_TESTS_SECTION);
+ Pattern sectionFilterPattern = null;
+ if (Strings.hasLength(sectionFilter)) {
+ sectionFilterPattern = Pattern.compile(sectionFilter, Pattern.CASE_INSENSITIVE);
+ }
+
+ int iterations = determineTestSectionIterationCount();
+ boolean appendSeedParameter = RandomizedTest.systemPropertyAsBoolean(SYSPROP_APPEND_SEED(), false);
+
+ //we iterate over the files and we shuffle them (grouped by api, and by yaml file)
+ //meanwhile we create the junit descriptions and test candidates (one per iteration)
+
+ //yaml suites are grouped by directory (effectively by api)
+ List<String> apis = Lists.newArrayList(yamlSuites.keySet());
+ Collections.shuffle(apis, runnerRandomness.getRandom());
+
+ final boolean fixedSeed = testSectionRandomnessOverride != null;
+ final boolean hasRepetitions = iterations > 1;
+
+ List<Throwable> parseExceptions = Lists.newArrayList();
+ List<RestTestCandidate> testCandidates = Lists.newArrayList();
+ RestTestSuiteParser restTestSuiteParser = new RestTestSuiteParser();
+ for (String api : apis) {
+
+ Description apiDescription = createApiDescription(api);
+
+ List<File> yamlFiles = Lists.newArrayList(yamlSuites.get(api));
+ Collections.shuffle(yamlFiles, runnerRandomness.getRandom());
+
+ for (File yamlFile : yamlFiles) {
+ RestTestSuite restTestSuite;
+ try {
+ restTestSuite = restTestSuiteParser.parse(restTestExecutionContext.esVersion(), api, yamlFile);
+ } catch (RestTestParseException e) {
+ parseExceptions.add(e);
+ //we continue so that we collect all parse errors and show them all at once
+ continue;
+ }
+
+ Description testSuiteDescription = createTestSuiteDescription(restTestSuite);
+
+ if (restTestSuite.getTestSections().size() == 0) {
+ assert restTestSuite.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion());
+ testCandidates.add(RestTestCandidate.empty(restTestSuite, testSuiteDescription));
+ continue;
+ }
+
+ Collections.shuffle(restTestSuite.getTestSections(), runnerRandomness.getRandom());
+
+ for (TestSection testSection : restTestSuite.getTestSections()) {
+
+ if (sectionFilterPattern != null) {
+ if (!sectionFilterPattern.matcher(testSection.getName()).find()) {
+ continue;
+ }
+ }
+
+ //no need to generate seed if we are going to skip the test section
+ if (testSection.getSkipSection().skip(restTestExecutionContext.esVersion())) {
+ Description testSectionDescription = createTestSectionIterationDescription(restTestSuite, testSection, null);
+ testSuiteDescription.addChild(testSectionDescription);
+ testCandidates.add(new RestTestCandidate(restTestSuite, testSuiteDescription, testSection, testSectionDescription, -1));
+ continue;
+ }
+
+ Description parentDescription;
+ if (hasRepetitions) {
+ //additional level to group multiple iterations under the same test section's node
+ parentDescription = createTestSectionWithRepetitionsDescription(restTestSuite, testSection);
+ testSuiteDescription.addChild(parentDescription);
+ } else {
+ parentDescription = testSuiteDescription;
+ }
+
+ final long testSectionSeed = determineTestSectionSeed(restTestSuite.getDescription() + "/" + testSection.getName());
+ for (int i = 0; i < iterations; i++) {
+ //test section name argument needs to be unique here
+ long thisSeed = (fixedSeed ? testSectionSeed : testSectionSeed ^ MurmurHash3.hash((long) i));
+
+ final LinkedHashMap<String, Object> args = new LinkedHashMap<String, Object>();
+ if (hasRepetitions) {
+ args.put("#", i);
+ }
+ if (hasRepetitions || appendSeedParameter) {
+ args.put("seed=", SeedUtils.formatSeedChain(runnerRandomness, new Randomness(thisSeed)));
+ }
+
+ Description testSectionDescription = createTestSectionIterationDescription(restTestSuite, testSection, args);
+ parentDescription.addChild(testSectionDescription);
+ testCandidates.add(new RestTestCandidate(restTestSuite, testSuiteDescription, testSection, testSectionDescription, thisSeed));
+ }
+ }
+
+ //we add the suite only if it has at least a section left
+ if (testSuiteDescription.getChildren().size() > 0) {
+ apiDescription.addChild(testSuiteDescription);
+ }
+ }
+
+ //we add the api only if it has at least a suite left
+ if (apiDescription.getChildren().size() > 0) {
+ rootDescription.addChild(apiDescription);
+ }
+ }
+
+ if (!parseExceptions.isEmpty()) {
+ throw new InitializationError(parseExceptions);
+ }
+
+ if (rootDescription.getChildren().size() == 0) {
+ throw new InitializationError("No tests to run");
+ }
+
+ return testCandidates;
+ }
+
+ protected String getRootSuiteTitle() {
+ if (runMode == RunMode.NO) {
+ return "elasticsearch REST Tests - not run";
+ }
+ if (runMode == RunMode.TEST_CLUSTER) {
+ return String.format(Locale.ROOT, "elasticsearch REST Tests - test cluster");
+ }
+ if (runMode == RunMode.EXTERNAL_CLUSTER) {
+ return String.format(Locale.ROOT, "elasticsearch REST Tests - external cluster %s", System.getProperty(REST_TESTS_MODE));
+ }
+ throw new UnsupportedOperationException("runMode [" + runMode + "] not supported");
+ }
+
+ private int determineTestSectionIterationCount() {
+ int iterations = RandomizedTest.systemPropertyAsInt(SYSPROP_ITERATIONS(), DEFAULT_ITERATIONS);
+ if (iterations < 1) {
+ throw new IllegalArgumentException("System property " + SYSPROP_ITERATIONS() + " must be >= 1 but was [" + iterations + "]");
+ }
+ return iterations;
+ }
+
+ protected static String[] resolvePathsProperty(String propertyName, String defaultValue) {
+ String property = System.getProperty(propertyName);
+ if (!Strings.hasLength(property)) {
+ return new String[]{defaultValue};
+ } else {
+ return property.split(PATHS_SEPARATOR);
+ }
+ }
+
+ /**
+ * Determine a given test section's initial random seed
+ */
+ private long determineTestSectionSeed(String testSectionName) {
+ if (testSectionRandomnessOverride != null) {
+ return getSeed(testSectionRandomnessOverride);
+ }
+
+ // We assign each test section a different starting hash based on the global seed
+ // and a hash of their name (so that the order of sections does not matter, only their names)
+ return getSeed(runnerRandomness) ^ MurmurHash3.hash((long) testSectionName.hashCode());
+ }
+
+ @Override
+ protected List<RestTestCandidate> getChildren() {
+ return restTestCandidates;
+ }
+
+ @Override
+ public Description getDescription() {
+ return rootDescription;
+ }
+
+ @Override
+ protected Description describeChild(RestTestCandidate child) {
+ return child.describeTest();
+ }
+
+ @Override
+ protected Statement classBlock(RunNotifier notifier) {
+ //we remove support for @BeforeClass & @AfterClass and JUnit Rules (as we don't call super)
+ Statement statement = childrenInvoker(notifier);
+ statement = withExecutionContextClose(statement);
+ if (testCluster != null) {
+ return withTestClusterClose(statement);
+ }
+ return statement;
+ }
+
+ protected Statement withExecutionContextClose(Statement statement) {
+ return new RunAfter(statement, new Statement() {
+ @Override
+ public void evaluate() throws Throwable {
+ restTestExecutionContext.close();
+ }
+ });
+ }
+
+ protected Statement withTestClusterClose(Statement statement) {
+ return new RunAfter(statement, new Statement() {
+ @Override
+ public void evaluate() throws Throwable {
+ stopTestCluster();
+ }
+ });
+ }
+
+ @Override
+ public void run(final RunNotifier notifier) {
+
+ if (runMode == RunMode.NO) {
+ notifier.fireTestIgnored(rootDescription.getChildren().get(0));
+ return;
+ }
+ final RestReproduceInfoPrinter restReproduceInfoPrinter = new RestReproduceInfoPrinter();
+ notifier.addListener(restReproduceInfoPrinter);
+ try {
+ //the test suite gets run on a separate thread as the randomized context is per thread
+ //once the randomized context is disposed it's not possible to create it again on the same thread
+ final Thread thread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ createRandomizedContext(getTestClass().getJavaClass(), runnerRandomness);
+ RestTestSuiteRunner.super.run(notifier);
+ } finally {
+ disposeRandomizedContext();
+ }
+ }
+ };
+
+ thread.start();
+ try {
+ thread.join();
+ } catch (InterruptedException e) {
+ notifier.fireTestFailure(new Failure(getDescription(),
+ new RuntimeException("Interrupted while waiting for the suite runner? Weird.", e)));
+ }
+ } finally {
+ // remove the listener once the suite is done otherwise it will print
+ // a bogus line if a subsequent test fails that is not a
+ // REST test. The RunNotifier is used across suites!
+ notifier.removeListener(restReproduceInfoPrinter);
+ }
+ }
+
+ @Override
+ protected void runChild(RestTestCandidate testCandidate, RunNotifier notifier) {
+
+ //if the while suite needs to be skipped, no test sections were loaded, only an empty one that we need to mark as ignored
+ if (testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion())) {
+ if (logger.isInfoEnabled()) {
+ if (testCandidate.getSetupSection().getSkipSection().isVersionCheck()) {
+ logger.info("skipped test suite [{}]\nreason: {}\nskip versions: {} (current version: {})",
+ testCandidate.getSuiteDescription(), testCandidate.getSetupSection().getSkipSection().getReason(),
+ testCandidate.getSetupSection().getSkipSection().getVersion(), restTestExecutionContext.esVersion());
+ } else {
+ logger.info("skipped test suite [{}]\nreason: feature not supported\nrequired features: {} (supported features: {})",
+ testCandidate.getSuiteDescription(), testCandidate.getSetupSection().getSkipSection().getFeatures(), Features.getSupported());
+ }
+ }
+ notifier.fireTestIgnored(testCandidate.describeSuite());
+ return;
+ }
+
+ //from now on no more empty test candidates are expected
+ assert testCandidate.getTestSection() != null;
+
+ if (testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion())) {
+ if (logger.isInfoEnabled()) {
+ if (testCandidate.getTestSection().getSkipSection().isVersionCheck()) {
+ logger.info("skipped test [{}/{}]\nreason: {}\nskip versions: {} (current version: {})",
+ testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName(),
+ testCandidate.getTestSection().getSkipSection().getReason(),
+ testCandidate.getTestSection().getSkipSection().getVersion(), restTestExecutionContext.esVersion());
+ } else {
+ logger.info("skipped test [{}/{}]\nreason: feature not supported\nrequired features: {} (supported features: {})",
+ testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName(),
+ testCandidate.getTestSection().getSkipSection().getFeatures(), Features.getSupported());
+ }
+ }
+
+ notifier.fireTestIgnored(testCandidate.describeTest());
+ return;
+ }
+
+ runLeaf(methodBlock(testCandidate), testCandidate.describeTest(), notifier);
+ }
+
+ protected Statement methodBlock(final RestTestCandidate testCandidate) {
+ return new Statement() {
+ @Override
+ public void evaluate() throws Throwable {
+ final String testThreadName = "TEST-" + testCandidate.getSuiteDescription() +
+ "." + testCandidate.getTestSection().getName() + "-seed#" + SeedUtils.formatSeedChain(runnerRandomness);
+ // This has a side effect of setting up a nested context for the test thread.
+ final String restoreName = Thread.currentThread().getName();
+ try {
+ Thread.currentThread().setName(testThreadName);
+ pushRandomness(new Randomness(testCandidate.getSeed()));
+ runTestSection(testCandidate);
+ } finally {
+ Thread.currentThread().setName(restoreName);
+ popAndDestroy();
+ }
+ }
+ };
+ }
+
+ protected void runTestSection(RestTestCandidate testCandidate)
+ throws IOException, RestException {
+
+ //let's check that there is something to run, otherwise there might be a problem with the test section
+ if (testCandidate.getTestSection().getExecutableSections().size() == 0) {
+ throw new IllegalArgumentException("No executable sections loaded for ["
+ + testCandidate.getSuiteDescription() + "/" + testCandidate.getTestSection().getName() + "]");
+ }
+
+ logger.info("cleaning up before test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+ tearDown();
+
+ logger.info("start test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+
+ if (!testCandidate.getSetupSection().isEmpty()) {
+ logger.info("start setup test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+ for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) {
+ doSection.execute(restTestExecutionContext);
+ }
+ logger.info("end setup test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+ }
+
+ restTestExecutionContext.clear();
+
+ for (ExecutableSection executableSection : testCandidate.getTestSection().getExecutableSections()) {
+ executableSection.execute(restTestExecutionContext);
+ }
+
+ logger.info("end test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+
+ logger.info("cleaning up after test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+ tearDown();
+ }
+
+ private void tearDown() throws IOException, RestException {
+ wipeIndices();
+ wipeTemplates();
+ restTestExecutionContext.clear();
+ }
+
+ private void wipeIndices() throws IOException, RestException {
+ logger.debug("deleting all indices");
+ RestResponse restResponse = restTestExecutionContext.callApiInternal("indices.delete", "index", "_all");
+ assertThat(restResponse.getStatusCode(), equalTo(200));
+ }
+
+ @SuppressWarnings("unchecked")
+ public void wipeTemplates() throws IOException, RestException {
+ logger.debug("deleting all templates");
+ RestResponse restResponse = restTestExecutionContext.callApiInternal("indices.delete_template", "name", "*");
+ assertThat(restResponse.getStatusCode(), equalTo(200));
+ }
+
+ private void stopTestCluster() {
+ if (runMode == RunMode.TEST_CLUSTER) {
+ assert testCluster != null;
+ testCluster.afterTest();
+ testCluster.close();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/RunAfter.java b/src/test/java/org/elasticsearch/test/rest/junit/RunAfter.java
new file mode 100644
index 0000000..c786935
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/RunAfter.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import com.google.common.collect.Lists;
+import org.junit.runners.model.MultipleFailureException;
+import org.junit.runners.model.Statement;
+
+import java.util.List;
+
+/**
+ * {@link Statement} that allows to run a specific statement after another one
+ */
+public class RunAfter extends Statement {
+
+ private final Statement next;
+ private final Statement after;
+
+ public RunAfter(Statement next, Statement after) {
+ this.next = next;
+ this.after = after;
+ }
+
+ @Override
+ public void evaluate() throws Throwable {
+ List<Throwable> errors = Lists.newArrayList();
+ try {
+ next.evaluate();
+ } catch (Throwable e) {
+ errors.add(e);
+ } finally {
+ try {
+ after.evaluate();
+ } catch (Throwable e) {
+ errors.add(e);
+ }
+ }
+ MultipleFailureException.assertEmpty(errors);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
new file mode 100644
index 0000000..ec5aef5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.rest.section.ApiCallSection;
+import org.elasticsearch.test.rest.section.DoSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for do sections
+ */
+public class DoSectionParser implements RestTestFragmentParser<DoSection> {
+
+ @Override
+ public DoSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ DoSection doSection = new DoSection();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("catch".equals(currentFieldName)) {
+ doSection.setCatch(parser.text());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if (currentFieldName != null) {
+ ApiCallSection apiCallSection = new ApiCallSection(currentFieldName);
+ String paramName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ paramName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("body".equals(paramName)) {
+ String body = parser.text();
+ XContentType bodyContentType = XContentFactory.xContentType(body);
+ XContentParser bodyParser = XContentFactory.xContent(bodyContentType).createParser(body);
+ //multiple bodies are supported e.g. in case of bulk provided as a whole string
+ while(bodyParser.nextToken() != null) {
+ apiCallSection.addBody(bodyParser.mapOrdered());
+ }
+ } else {
+ apiCallSection.addParam(paramName, parser.text());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("body".equals(paramName)) {
+ apiCallSection.addBody(parser.mapOrdered());
+ }
+ }
+ }
+ doSection.setApiCallSection(apiCallSection);
+ }
+ }
+ }
+
+ parser.nextToken();
+
+ if (doSection.getApiCallSection() == null) {
+ throw new RestTestParseException("client call section is mandatory within a do section");
+ }
+
+ return doSection;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java b/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
new file mode 100644
index 0000000..a661221
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.GreaterThanAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for gt assert sections
+ */
+public class GreaterThanParser implements RestTestFragmentParser<GreaterThanAssertion> {
+
+ @Override
+ public GreaterThanAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("gt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new GreaterThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java b/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
new file mode 100644
index 0000000..81cade6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.test.rest.section.IsFalseAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for is_false assert sections
+ */
+public class IsFalseParser implements RestTestFragmentParser<IsFalseAssertion> {
+
+ @Override
+ public IsFalseAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ return new IsFalseAssertion(parseContext.parseField());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java b/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
new file mode 100644
index 0000000..922629b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.test.rest.section.IsTrueAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for is_true assert sections
+ */
+public class IsTrueParser implements RestTestFragmentParser<IsTrueAssertion> {
+
+ @Override
+ public IsTrueAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ return new IsTrueAssertion(parseContext.parseField());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java b/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java
new file mode 100644
index 0000000..414be59
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LengthAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for length assert sections
+ */
+public class LengthParser implements RestTestFragmentParser<LengthAssertion> {
+
+ @Override
+ public LengthAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ assert stringObjectTuple.v2() != null;
+ int value;
+ if (stringObjectTuple.v2() instanceof Number) {
+ value = ((Number) stringObjectTuple.v2()).intValue();
+ } else {
+ try {
+ value = Integer.valueOf(stringObjectTuple.v2().toString());
+ } catch(NumberFormatException e) {
+ throw new RestTestParseException("length is not a valid number", e);
+ }
+
+ }
+ return new LengthAssertion(stringObjectTuple.v1(), value);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java b/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java
new file mode 100644
index 0000000..065dd19
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LessThanAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for lt assert sections
+ */
+public class LessThanParser implements RestTestFragmentParser<LessThanAssertion> {
+
+ @Override
+ public LessThanAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("lt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new LessThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java b/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java
new file mode 100644
index 0000000..30ee18a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.MatchAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for match assert sections
+ */
+public class MatchParser implements RestTestFragmentParser<MatchAssertion> {
+
+ @Override
+ public MatchAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ return new MatchAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
new file mode 100644
index 0000000..8d2bd8b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import java.io.IOException;
+
+/**
+ * Base parser for a REST test suite fragment
+ * @param <T> the test fragment's type that gets parsed and returned
+ */
+public interface RestTestFragmentParser<T> {
+
+ /**
+ * Parses a test fragment given the current {@link RestTestSuiteParseContext}
+ */
+ T parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException;
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
new file mode 100644
index 0000000..3e1af2c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+/**
+ * Exception thrown whenever there is a problem parsing any of the REST test suite fragment
+ */
+public class RestTestParseException extends Exception {
+
+ RestTestParseException(String message) {
+ super(message);
+ }
+
+ RestTestParseException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
new file mode 100644
index 0000000..7ff06f4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.TestSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for a complete test section
+ *
+ * Depending on the elasticsearch version the tests are going to run against, test sections might need to get skipped
+ * In that case the relevant test sections parsing is entirely skipped
+ */
+public class RestTestSectionParser implements RestTestFragmentParser<TestSection> {
+
+ @Override
+ public TestSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ XContentParser parser = parseContext.parser();
+ parseContext.advanceToFieldName();
+ TestSection testSection = new TestSection(parser.currentName());
+ parser.nextToken();
+ testSection.setSkipSection(parseContext.parseSkipSection());
+
+ boolean skip = testSection.getSkipSection().skip(parseContext.getCurrentVersion());
+
+ while ( parser.currentToken() != XContentParser.Token.END_ARRAY) {
+ if (skip) {
+ //if there was a skip section, there was a setup section as well, which means that we are sure
+ // the current token is at the beginning of a new object
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+ //we need to be at the beginning of an object to be able to skip children
+ parser.skipChildren();
+ //after skipChildren we are at the end of the skipped object, need to move on
+ parser.nextToken();
+ } else {
+ parseContext.advanceToFieldName();
+ testSection.addExecutableSection(parseContext.parseExecutableSection());
+ }
+ }
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ return testSection;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
new file mode 100644
index 0000000..5573264
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.*;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Context shared across the whole tests parse phase.
+ * Provides shared parse methods and holds information needed to parse the test sections (e.g. es version)
+ */
+public class RestTestSuiteParseContext {
+
+ private static final SetupSectionParser SETUP_SECTION_PARSER = new SetupSectionParser();
+ private static final RestTestSectionParser TEST_SECTION_PARSER = new RestTestSectionParser();
+ private static final SkipSectionParser SKIP_SECTION_PARSER = new SkipSectionParser();
+ private static final DoSectionParser DO_SECTION_PARSER = new DoSectionParser();
+ private static final Map<String, RestTestFragmentParser<? extends ExecutableSection>> EXECUTABLE_SECTIONS_PARSERS = Maps.newHashMap();
+ static {
+ EXECUTABLE_SECTIONS_PARSERS.put("do", DO_SECTION_PARSER);
+ EXECUTABLE_SECTIONS_PARSERS.put("set", new SetSectionParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("match", new MatchParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("is_true", new IsTrueParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("is_false", new IsFalseParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("gt", new GreaterThanParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("lt", new LessThanParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("length", new LengthParser());
+ }
+
+ private final String api;
+ private final String suiteName;
+ private final XContentParser parser;
+ private final String currentVersion;
+
+ public RestTestSuiteParseContext(String api, String suiteName, XContentParser parser, String currentVersion) {
+ this.api = api;
+ this.suiteName = suiteName;
+ this.parser = parser;
+ this.currentVersion = currentVersion;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public String getSuiteName() {
+ return suiteName;
+ }
+
+ public XContentParser parser() {
+ return parser;
+ }
+
+ public String getCurrentVersion() {
+ return currentVersion;
+ }
+
+ public SetupSection parseSetupSection() throws IOException, RestTestParseException {
+
+ advanceToFieldName();
+
+ if ("setup".equals(parser.currentName())) {
+ parser.nextToken();
+ SetupSection setupSection = SETUP_SECTION_PARSER.parse(this);
+ parser.nextToken();
+ return setupSection;
+ }
+
+ return SetupSection.EMPTY;
+ }
+
+ public TestSection parseTestSection() throws IOException, RestTestParseException {
+ return TEST_SECTION_PARSER.parse(this);
+ }
+
+ public SkipSection parseSkipSection() throws IOException, RestTestParseException {
+
+ advanceToFieldName();
+
+ if ("skip".equals(parser.currentName())) {
+ SkipSection skipSection = SKIP_SECTION_PARSER.parse(this);
+ parser.nextToken();
+ return skipSection;
+ }
+
+ return SkipSection.EMPTY;
+ }
+
+ public ExecutableSection parseExecutableSection() throws IOException, RestTestParseException {
+ advanceToFieldName();
+ String section = parser.currentName();
+ RestTestFragmentParser<? extends ExecutableSection> execSectionParser = EXECUTABLE_SECTIONS_PARSERS.get(section);
+ if (execSectionParser == null) {
+ throw new RestTestParseException("no parser found for executable section [" + section + "]");
+ }
+ ExecutableSection executableSection = execSectionParser.parse(this);
+ parser.nextToken();
+ return executableSection;
+ }
+
+ public DoSection parseDoSection() throws IOException, RestTestParseException {
+ return DO_SECTION_PARSER.parse(this);
+ }
+
+ public void advanceToFieldName() throws IOException, RestTestParseException {
+ XContentParser.Token token = parser.currentToken();
+ //we are in the beginning, haven't called nextToken yet
+ if (token == null) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_ARRAY) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
+ }
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new RestTestParseException("malformed test section: field name expected but found " + token);
+ }
+ }
+
+ public String parseField() throws IOException, RestTestParseException {
+ parser.nextToken();
+ assert parser.currentToken().isValue();
+ String field = parser.text();
+ parser.nextToken();
+ return field;
+ }
+
+ public Tuple<String, Object> parseTuple() throws IOException, RestTestParseException {
+ parser.nextToken();
+ advanceToFieldName();
+ Map<String,Object> map = parser.map();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ if (map.size() != 1) {
+ throw new RestTestParseException("expected key value pair but found " + map.size() + " ");
+ }
+
+ Map.Entry<String, Object> entry = map.entrySet().iterator().next();
+ return Tuple.tuple(entry.getKey(), entry.getValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
new file mode 100644
index 0000000..692e9aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.TestSection;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+/**
+ * Parser for a complete test suite (yaml file)
+ *
+ * Depending on the elasticsearch version the tests are going to run against, a whole test suite might need to get skipped
+ * In that case the relevant test sections parsing is entirely skipped
+ */
+public class RestTestSuiteParser implements RestTestFragmentParser<RestTestSuite> {
+
+ public RestTestSuite parse(String currentVersion, String api, File file) throws IOException, RestTestParseException {
+
+ if (!file.isFile()) {
+ throw new IllegalArgumentException(file.getAbsolutePath() + " is not a file");
+ }
+
+ String filename = file.getName();
+ //remove the file extension
+ int i = filename.lastIndexOf('.');
+ if (i > 0) {
+ filename = filename.substring(0, i);
+ }
+
+ //our yaml parser seems to be too tolerant. Each yaml suite must end with \n, otherwise clients tests might break.
+ RandomAccessFile randomAccessFile = null;
+ try {
+ randomAccessFile = new RandomAccessFile(file, "r");
+ randomAccessFile.skipBytes((int)randomAccessFile.length() - 1);
+ if (randomAccessFile.read() != 10) {
+ throw new RestTestParseException("test suite [" + api + "/" + filename + "] doesn't end with line feed (\\n)");
+ }
+ } finally {
+ IOUtils.close(randomAccessFile);
+ }
+
+ XContentParser parser = YamlXContent.yamlXContent.createParser(new FileInputStream(file));
+ try {
+ RestTestSuiteParseContext testParseContext = new RestTestSuiteParseContext(api, filename, parser, currentVersion);
+ return parse(testParseContext);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public RestTestSuite parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ XContentParser parser = parseContext.parser();
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+
+ RestTestSuite restTestSuite = new RestTestSuite(parseContext.getApi(), parseContext.getSuiteName());
+
+ restTestSuite.setSetupSection(parseContext.parseSetupSection());
+
+ boolean skip = restTestSuite.getSetupSection().getSkipSection().skip(parseContext.getCurrentVersion());
+
+ while(true) {
+ //the "---" section separator is not understood by the yaml parser. null is returned, same as when the parser is closed
+ //we need to somehow distinguish between a null in the middle of a test ("---")
+ // and a null at the end of the file (at least two consecutive null tokens)
+ if(parser.currentToken() == null) {
+ if (parser.nextToken() == null) {
+ break;
+ }
+ }
+
+ if (skip) {
+ //if there was a skip section, there was a setup section as well, which means that we are sure
+ // the current token is at the beginning of a new object
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+ //we need to be at the beginning of an object to be able to skip children
+ parser.skipChildren();
+ //after skipChildren we are at the end of the skipped object, need to move on
+ parser.nextToken();
+ } else {
+ TestSection testSection = parseContext.parseTestSection();
+ if (!restTestSuite.addTestSection(testSection)) {
+ throw new RestTestParseException("duplicate test section [" + testSection.getName() + "] found in [" + restTestSuite.getDescription() + "]");
+ }
+ }
+ }
+
+ return restTestSuite;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
new file mode 100644
index 0000000..8afafc0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SetSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for set sections
+ */
+public class SetSectionParser implements RestTestFragmentParser<SetSection> {
+
+ @Override
+ public SetSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ SetSection setSection = new SetSection();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ setSection.addSet(currentFieldName, parser.text());
+ }
+ }
+
+ parser.nextToken();
+
+ if (setSection.getStash().isEmpty()) {
+ throw new RestTestParseException("set section must set at least a value");
+ }
+
+ return setSection;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
new file mode 100644
index 0000000..e036755
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SetupSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for setup sections
+ */
+public class SetupSectionParser implements RestTestFragmentParser<SetupSection> {
+
+ @Override
+ public SetupSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ SetupSection setupSection = new SetupSection();
+ setupSection.setSkipSection(parseContext.parseSkipSection());
+
+ boolean skip = setupSection.getSkipSection().skip(parseContext.getCurrentVersion());
+
+ while (parser.currentToken() != XContentParser.Token.END_ARRAY) {
+ if (skip) {
+ //if there was a skip section, there was a setup section as well, which means that we are sure
+ // the current token is at the beginning of a new object
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+ //we need to be at the beginning of an object to be able to skip children
+ parser.skipChildren();
+ //after skipChildren we are at the end of the skipped object, need to move on
+ parser.nextToken();
+ } else {
+ parseContext.advanceToFieldName();
+ if (!"do".equals(parser.currentName())) {
+ throw new RestTestParseException("section [" + parser.currentName() + "] not supported within setup section");
+ }
+
+ parser.nextToken();
+ setupSection.addDoSection(parseContext.parseDoSection());
+ parser.nextToken();
+ }
+ }
+
+ parser.nextToken();
+
+ return setupSection;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
new file mode 100644
index 0000000..0a81583
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SkipSection;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Parser for skip sections
+ */
+public class SkipSectionParser implements RestTestFragmentParser<SkipSection> {
+
+ @Override
+ public SkipSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ String version = null;
+ String reason = null;
+ List<String> features = Lists.newArrayList();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("version".equals(currentFieldName)) {
+ version = parser.text();
+ } else if ("reason".equals(currentFieldName)) {
+ reason = parser.text();
+ } else if ("features".equals(currentFieldName)) {
+ features.add(parser.text());
+ }
+ else {
+ throw new RestTestParseException("field " + currentFieldName + " not supported within skip section");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("features".equals(currentFieldName)) {
+ while(parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ features.add(parser.text());
+ }
+ }
+ }
+ }
+
+ parser.nextToken();
+
+ if (!Strings.hasLength(version) && features.isEmpty()) {
+ throw new RestTestParseException("version or features is mandatory within skip section");
+ }
+ if (Strings.hasLength(version) && !features.isEmpty()) {
+ throw new RestTestParseException("version or features are mutually exclusive");
+ }
+ if (Strings.hasLength(version) && !Strings.hasLength(reason)) {
+ throw new RestTestParseException("reason is mandatory within skip version section");
+ }
+
+ return new SkipSection(version, features, reason);
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java b/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java
new file mode 100644
index 0000000..2a49cd4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Represents a test fragment that contains the information needed to call an api
+ */
+public class ApiCallSection {
+
+ private final String api;
+ private final Map<String, String> params = Maps.newHashMap();
+ private final List<Map<String, Object>> bodies = Lists.newArrayList();
+
+ public ApiCallSection(String api) {
+ this.api = api;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public Map<String, String> getParams() {
+ //make sure we never modify the parameters once returned
+ return ImmutableMap.copyOf(params);
+ }
+
+ public void addParam(String key, String value) {
+ String existingValue = params.get(key);
+ if (existingValue != null) {
+ value = Joiner.on(",").join(existingValue, value);
+ }
+ this.params.put(key, value);
+ }
+
+ public List<Map<String, Object>> getBodies() {
+ return ImmutableList.copyOf(bodies);
+ }
+
+ public void addBody(Map<String, Object> body) {
+ this.bodies.add(body);
+ }
+
+ public boolean hasBody() {
+ return bodies.size() > 0;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/Assertion.java b/src/test/java/org/elasticsearch/test/rest/section/Assertion.java
new file mode 100644
index 0000000..4639732
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/Assertion.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Base class for executable sections that hold assertions
+ */
+public abstract class Assertion implements ExecutableSection {
+
+ private final String field;
+ private final Object expectedValue;
+
+ protected Assertion(String field, Object expectedValue) {
+ this.field = field;
+ this.expectedValue = expectedValue;
+ }
+
+ public final String getField() {
+ return field;
+ }
+
+ public final Object getExpectedValue() {
+ return expectedValue;
+ }
+
+ protected final Object resolveExpectedValue(RestTestExecutionContext executionContext) throws IOException {
+ if (expectedValue instanceof Map) {
+ @SuppressWarnings("unchecked")
+ Map<String, Object> map = (Map<String, Object>) expectedValue;
+ return executionContext.stash().unstashMap(map);
+ }
+
+ if (executionContext.stash().isStashedValue(expectedValue)) {
+ return executionContext.stash().unstashValue(expectedValue.toString());
+ }
+ return expectedValue;
+ }
+
+ protected final Object getActualValue(RestTestExecutionContext executionContext) throws IOException {
+ if (executionContext.stash().isStashedValue(field)) {
+ return executionContext.stash().unstashValue(field);
+ }
+ return executionContext.response(field);
+ }
+
+ @Override
+ public final void execute(RestTestExecutionContext executionContext) throws IOException {
+ doAssert(getActualValue(executionContext), resolveExpectedValue(executionContext));
+ }
+
+ /**
+ * Executes the assertion comparing the actual value (parsed from the response) with the expected one
+ */
+ protected abstract void doAssert(Object actualValue, Object expectedValue);
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/DoSection.java b/src/test/java/org/elasticsearch/test/rest/section/DoSection.java
new file mode 100644
index 0000000..3ea1ca0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/DoSection.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.collect.Tuple.tuple;
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a do section:
+ *
+ * - do:
+ * catch: missing
+ * update:
+ * index: test_1
+ * type: test
+ * id: 1
+ * body: { doc: { foo: bar } }
+ *
+ */
+public class DoSection implements ExecutableSection {
+
+ private static final ESLogger logger = Loggers.getLogger(DoSection.class);
+
+ private String catchParam;
+ private ApiCallSection apiCallSection;
+
+ public String getCatch() {
+ return catchParam;
+ }
+
+ public void setCatch(String catchParam) {
+ this.catchParam = catchParam;
+ }
+
+ public ApiCallSection getApiCallSection() {
+ return apiCallSection;
+ }
+
+ public void setApiCallSection(ApiCallSection apiCallSection) {
+ this.apiCallSection = apiCallSection;
+ }
+
+ @Override
+ public void execute(RestTestExecutionContext executionContext) throws IOException {
+
+ if ("param".equals(catchParam)) {
+ //client should throw validation error before sending request
+ //lets just return without doing anything as we don't have any client to test here
+ logger.info("found [catch: param], no request sent");
+ return;
+ }
+
+ try {
+ RestResponse restResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), apiCallSection.getBodies());
+ if (Strings.hasLength(catchParam)) {
+ String catchStatusCode;
+ if (catches.containsKey(catchParam)) {
+ catchStatusCode = catches.get(catchParam).v1();
+ } else if (catchParam.startsWith("/") && catchParam.endsWith("/")) {
+ catchStatusCode = "4xx|5xx";
+ } else {
+ throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported");
+ }
+ fail(formatStatusCodeMessage(restResponse, catchStatusCode));
+ }
+ } catch(RestException e) {
+ if (!Strings.hasLength(catchParam)) {
+ fail(formatStatusCodeMessage(e.restResponse(), "2xx"));
+ } else if (catches.containsKey(catchParam)) {
+ assertStatusCode(e.restResponse());
+ } else if (catchParam.length() > 2 && catchParam.startsWith("/") && catchParam.endsWith("/")) {
+ //the text of the error message matches regular expression
+ assertThat(formatStatusCodeMessage(e.restResponse(), "4xx|5xx"), e.statusCode(), greaterThanOrEqualTo(400));
+ Object error = executionContext.response("error");
+ assertThat("error was expected in the response", error, notNullValue());
+ //remove delimiters from regex
+ String regex = catchParam.substring(1, catchParam.length() - 1);
+ assertThat("the error message was expected to match the provided regex but didn't",
+ error.toString(), matches(regex));
+ } else {
+ throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported");
+ }
+ }
+ }
+
+ private void assertStatusCode(RestResponse restResponse) {
+ Tuple<String, org.hamcrest.Matcher<Integer>> stringMatcherTuple = catches.get(catchParam);
+ assertThat(formatStatusCodeMessage(restResponse, stringMatcherTuple.v1()),
+ restResponse.getStatusCode(), stringMatcherTuple.v2());
+ }
+
+ private String formatStatusCodeMessage(RestResponse restResponse, String expected) {
+ return "expected [" + expected + "] status code but api [" + apiCallSection.getApi() + "] returned ["
+ + restResponse.getStatusCode() + " " + restResponse.getReasonPhrase() + "] [" + restResponse.getBody() + "]";
+ }
+
+ private static Map<String, Tuple<String, org.hamcrest.Matcher<Integer>>> catches = Maps.newHashMap();
+
+ static {
+ catches.put("missing", tuple("404", equalTo(404)));
+ catches.put("conflict", tuple("409", equalTo(409)));
+ catches.put("forbidden", tuple("403", equalTo(403)));
+ catches.put("request", tuple("4xx|5xx", greaterThanOrEqualTo(400)));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java b/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java
new file mode 100644
index 0000000..669d82c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+
+/**
+ * Represents a test fragment that can be executed (e.g. api call, assertion)
+ */
+public interface ExecutableSection {
+
+ /**
+ * Executes the section passing in the execution context
+ */
+ void execute(RestTestExecutionContext executionContext) throws IOException;
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
new file mode 100644
index 0000000..f79dd18
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a gt assert section:
+ *
+ * - gt: { fields._ttl: 0}
+ *
+ */
+public class GreaterThanAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(GreaterThanAssertion.class);
+
+ public GreaterThanAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is greater than [{}]", actualValue, expectedValue);
+ assertThat(actualValue, instanceOf(Comparable.class));
+ assertThat(expectedValue, instanceOf(Comparable.class));
+ assertThat(errorMessage(), (Comparable)actualValue, greaterThan((Comparable) expectedValue));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not greater than [" + getExpectedValue() + "]";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
new file mode 100644
index 0000000..056db99
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents an is_false assert section:
+ *
+ * - is_false: get.fields.bar
+ *
+ */
+public class IsFalseAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(IsFalseAssertion.class);
+
+ public IsFalseAssertion(String field) {
+ super(field, false);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] doesn't have a true value", actualValue);
+
+ if (actualValue == null) {
+ return;
+ }
+
+ String actualString = actualValue.toString();
+ assertThat(errorMessage(), actualString, anyOf(
+ equalTo(""),
+ equalToIgnoringCase(Boolean.FALSE.toString()),
+ equalTo("0")
+ ));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] has a true value but it shouldn't";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
new file mode 100644
index 0000000..6518276
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents an is_true assert section:
+ *
+ * - is_true: get.fields.bar
+ *
+ */
+public class IsTrueAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(IsTrueAssertion.class);
+
+ public IsTrueAssertion(String field) {
+ super(field, true);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] has a true value", actualValue);
+ String errorMessage = errorMessage();
+ assertThat(errorMessage, actualValue, notNullValue());
+ String actualString = actualValue.toString();
+ assertThat(errorMessage, actualString, not(equalTo("")));
+ assertThat(errorMessage, actualString, not(equalToIgnoringCase(Boolean.FALSE.toString())));
+ assertThat(errorMessage, actualString, not(equalTo("0")));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't have a true value";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java
new file mode 100644
index 0000000..b13cca1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a length assert section:
+ *
+ * - length: { hits.hits: 1 }
+ *
+ */
+public class LengthAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LengthAssertion.class);
+
+ public LengthAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] has length [{}]", actualValue, expectedValue);
+ assertThat(expectedValue, instanceOf(Number.class));
+ int length = ((Number) expectedValue).intValue();
+ if (actualValue instanceof String) {
+ assertThat(errorMessage(), ((String) actualValue).length(), equalTo(length));
+ } else if (actualValue instanceof List) {
+ assertThat(errorMessage(), ((List) actualValue).size(), equalTo(length));
+ } else if (actualValue instanceof Map) {
+ assertThat(errorMessage(), ((Map) actualValue).keySet().size(), equalTo(length));
+ } else {
+ throw new UnsupportedOperationException("value is of unsupported type [" + actualValue.getClass().getSimpleName() + "]");
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't have length [" + getExpectedValue() + "]";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
new file mode 100644
index 0000000..52c882d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.lessThan;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a lt assert section:
+ *
+ * - lt: { fields._ttl: 20000}
+ *
+ */
+public class LessThanAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LessThanAssertion.class);
+
+ public LessThanAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is less than [{}]", actualValue, expectedValue);
+ assertThat(actualValue, instanceOf(Comparable.class));
+ assertThat(expectedValue, instanceOf(Comparable.class));
+ assertThat(errorMessage(), (Comparable)actualValue, lessThan((Comparable)expectedValue));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not less than [" + getExpectedValue() + "]";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java
new file mode 100644
index 0000000..8b85104
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a match assert section:
+ *
+ * - match: { get.fields._routing: "5" }
+ *
+ */
+public class MatchAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(MatchAssertion.class);
+
+ public MatchAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+
+ //if the value is wrapped into / it is a regexp (e.g. /s+d+/)
+ if (expectedValue instanceof String) {
+ String expValue = ((String) expectedValue).trim();
+ if (expValue.length() > 2 && expValue.startsWith("/") && expValue.endsWith("/")) {
+ String regex = expValue.substring(1, expValue.length() - 1);
+ logger.trace("assert that [{}] matches [{}]", actualValue, regex);
+ assertThat("field [" + getField() + "] was expected to match the provided regex but didn't",
+ actualValue.toString(), matches(regex, Pattern.COMMENTS));
+ return;
+ }
+ }
+
+ assertThat(errorMessage(), actualValue, notNullValue());
+ logger.trace("assert that [{}] matches [{}]", actualValue, expectedValue);
+ if (!actualValue.getClass().equals(expectedValue.getClass())) {
+ if (actualValue instanceof Number && expectedValue instanceof Number) {
+ //Double 1.0 is equal to Integer 1
+ assertThat(errorMessage(), ((Number) actualValue).doubleValue(), equalTo(((Number) expectedValue).doubleValue()));
+ return;
+ }
+ }
+
+ assertThat(errorMessage(), actualValue, equalTo(expectedValue));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't match the expected value";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java b/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java
new file mode 100644
index 0000000..5737e1e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Holds a REST test suite loaded from a specific yaml file.
+ * Supports a setup section and multiple test sections.
+ */
+public class RestTestSuite {
+
+ private final String api;
+ private final String name;
+
+ private SetupSection setupSection;
+
+ private Set<TestSection> testSections = Sets.newHashSet();
+
+ public RestTestSuite(String api, String name) {
+ this.api = api;
+ this.name = name;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ //describes the rest test suite (e.g. index/10_with_id)
+ //useful also to reproduce failures (RestReproduceInfoPrinter)
+ public String getDescription() {
+ return api + "/" + name;
+ }
+
+ public SetupSection getSetupSection() {
+ return setupSection;
+ }
+
+ public void setSetupSection(SetupSection setupSection) {
+ this.setupSection = setupSection;
+ }
+
+ /**
+ * Adds a {@link org.elasticsearch.test.rest.section.TestSection} to the REST suite
+ * @return true if the test section was not already present, false otherwise
+ */
+ public boolean addTestSection(TestSection testSection) {
+ return this.testSections.add(testSection);
+ }
+
+ public List<TestSection> getTestSections() {
+ return Lists.newArrayList(testSections);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/SetSection.java b/src/test/java/org/elasticsearch/test/rest/section/SetSection.java
new file mode 100644
index 0000000..0a52a77
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/SetSection.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Represents a set section:
+ *
+ * - set: {_scroll_id: scroll_id}
+ *
+ */
+public class SetSection implements ExecutableSection {
+
+ private Map<String, String> stash = Maps.newHashMap();
+
+ public void addSet(String responseField, String stashedField) {
+ stash.put(responseField, stashedField);
+ }
+
+ public Map<String, String> getStash() {
+ return stash;
+ }
+
+ @Override
+ public void execute(RestTestExecutionContext executionContext) throws IOException {
+ for (Map.Entry<String, String> entry : stash.entrySet()) {
+ Object actualValue = executionContext.response(entry.getKey());
+ executionContext.stash().stashValue(entry.getValue(), actualValue);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java b/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java
new file mode 100644
index 0000000..72f653e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * Represents a setup section. Holds a skip section and multiple do sections.
+ */
+public class SetupSection {
+
+ public static final SetupSection EMPTY;
+
+ static {
+ EMPTY = new SetupSection();
+ EMPTY.setSkipSection(SkipSection.EMPTY);
+ }
+
+ private SkipSection skipSection;
+
+ private List<DoSection> doSections = Lists.newArrayList();
+
+ public SkipSection getSkipSection() {
+ return skipSection;
+ }
+
+ public void setSkipSection(SkipSection skipSection) {
+ this.skipSection = skipSection;
+ }
+
+ public List<DoSection> getDoSections() {
+ return doSections;
+ }
+
+ public void addDoSection(DoSection doSection) {
+ this.doSections.add(doSection);
+ }
+
+ public boolean isEmpty() {
+ return EMPTY.equals(this);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java b/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java
new file mode 100644
index 0000000..9e2f5d6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.test.rest.support.Features;
+import org.elasticsearch.test.rest.support.VersionUtils;
+
+import java.util.List;
+
+/**
+ * Represents a skip section that tells whether a specific test section or suite needs to be skipped
+ * based on:
+ * - the elasticsearch version the tests are running against
+ * - a specific test feature required that might not be implemented yet by the runner
+ */
+public class SkipSection {
+
+ public static final SkipSection EMPTY = new SkipSection("", Lists.<String>newArrayList(), "");
+
+ private final String version;
+ private final List<String> features;
+ private final String reason;
+
+ public SkipSection(String version, List<String> features, String reason) {
+ this.version = version;
+ this.features = features;
+ this.reason = reason;
+ }
+
+ public String getVersion() {
+ return version;
+ }
+
+ public List<String> getFeatures() {
+ return features;
+ }
+
+ public String getReason() {
+ return reason;
+ }
+
+ public boolean skip(String currentVersion) {
+ if (isEmpty()) {
+ return false;
+ }
+
+ if (version != null) {
+ return VersionUtils.skipCurrentVersion(version, currentVersion);
+ }
+
+ if (features != null && !this.features.isEmpty()) {
+ return !Features.areAllSupported(this.features);
+ }
+
+ throw new IllegalArgumentException("version or feature should be not null in a non empty skip section");
+ }
+
+ public boolean isVersionCheck() {
+ return Strings.hasLength(version);
+ }
+
+ public boolean isEmpty() {
+ return EMPTY.equals(this);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/TestSection.java b/src/test/java/org/elasticsearch/test/rest/section/TestSection.java
new file mode 100644
index 0000000..3386b9e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/TestSection.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * Represents a test section, which is composed of a skip section and multiple executable sections.
+ */
+public class TestSection {
+ private final String name;
+ private SkipSection skipSection;
+ private final List<ExecutableSection> executableSections;
+
+ public TestSection(String name) {
+ this.name = name;
+ this.executableSections = Lists.newArrayList();
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public SkipSection getSkipSection() {
+ return skipSection;
+ }
+
+ public void setSkipSection(SkipSection skipSection) {
+ this.skipSection = skipSection;
+ }
+
+ public List<ExecutableSection> getExecutableSections() {
+ return executableSections;
+ }
+
+ public void addExecutableSection(ExecutableSection executableSection) {
+ this.executableSections.add(executableSection);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ TestSection that = (TestSection) o;
+
+ if (name != null ? !name.equals(that.name) : that.name != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return name != null ? name.hashCode() : 0;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java b/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java
new file mode 100644
index 0000000..0996df4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Represents an elasticsearch REST endpoint (api)
+ */
+public class RestApi {
+
+ private final String name;
+ private List<String> methods = Lists.newArrayList();
+ private List<String> paths = Lists.newArrayList();
+ private List<String> pathParts = Lists.newArrayList();
+ private List<String> params = Lists.newArrayList();
+ private BODY body = BODY.NOT_SUPPORTED;
+
+ public static enum BODY {
+ NOT_SUPPORTED, OPTIONAL, REQUIRED
+ }
+
+ RestApi(String name) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public List<String> getMethods() {
+ return methods;
+ }
+
+ /**
+ * Returns the supported http methods given the rest parameters provided
+ */
+ public List<String> getSupportedMethods(Set<String> restParams) {
+ //we try to avoid hardcoded mappings but the index api is the exception
+ if ("index".equals(name) || "create".equals(name)) {
+ List<String> indexMethods = Lists.newArrayList();
+ for (String method : methods) {
+ if (restParams.contains("id")) {
+ //PUT when the id is provided
+ if (HttpPut.METHOD_NAME.equals(method)) {
+ indexMethods.add(method);
+ }
+ } else {
+ //POST without id
+ if (HttpPost.METHOD_NAME.equals(method)) {
+ indexMethods.add(method);
+ }
+ }
+ }
+ return indexMethods;
+ }
+
+ return methods;
+ }
+
+ void addMethod(String method) {
+ this.methods.add(method);
+ }
+
+ public List<String> getPaths() {
+ return paths;
+ }
+
+ void addPath(String path) {
+ this.paths.add(path);
+ }
+
+ public List<String> getPathParts() {
+ return pathParts;
+ }
+
+ void addPathPart(String pathPart) {
+ this.pathParts.add(pathPart);
+ }
+
+ public List<String> getParams() {
+ return params;
+ }
+
+ void addParam(String param) {
+ this.params.add(param);
+ }
+
+ void setBodyOptional() {
+ this.body = BODY.OPTIONAL;
+ }
+
+ void setBodyRequired() {
+ this.body = BODY.REQUIRED;
+ }
+
+ public boolean isBodySupported() {
+ return body != BODY.NOT_SUPPORTED;
+ }
+
+ public boolean isBodyRequired() {
+ return body == BODY.REQUIRED;
+ }
+
+ /**
+ * Finds the best matching rest path given the current parameters and replaces
+ * placeholders with their corresponding values received as arguments
+ */
+ public String[] getFinalPaths(Map<String, String> pathParams) {
+
+ List<RestPath> matchingRestPaths = findMatchingRestPaths(pathParams.keySet());
+ if (matchingRestPaths == null || matchingRestPaths.isEmpty()) {
+ throw new IllegalArgumentException("unable to find matching rest path for api [" + name + "] and path params " + pathParams);
+ }
+
+ String[] paths = new String[matchingRestPaths.size()];
+ for (int i = 0; i < matchingRestPaths.size(); i++) {
+ RestPath restPath = matchingRestPaths.get(i);
+ String path = restPath.path;
+ for (Map.Entry<String, String> paramEntry : restPath.parts.entrySet()) {
+ // replace path placeholders with actual values
+ String value = pathParams.get(paramEntry.getValue());
+ if (value == null) {
+ throw new IllegalArgumentException("parameter [" + paramEntry.getValue() + "] missing");
+ }
+ path = path.replace(paramEntry.getKey(), value);
+ }
+ paths[i] = path;
+ }
+ return paths;
+ }
+
+ /**
+ * Finds the matching rest paths out of the available ones with the current api (based on REST spec).
+ *
+ * The best path is the one that has exactly the same number of placeholders to replace
+ * (e.g. /{index}/{type}/{id} when the path params are exactly index, type and id).
+ */
+ private List<RestPath> findMatchingRestPaths(Set<String> restParams) {
+
+ List<RestPath> matchingRestPaths = Lists.newArrayList();
+ RestPath[] restPaths = buildRestPaths();
+
+ for (RestPath restPath : restPaths) {
+ if (restPath.parts.size() == restParams.size()) {
+ if (restPath.parts.values().containsAll(restParams)) {
+ matchingRestPaths.add(restPath);
+ }
+ }
+ }
+
+ return matchingRestPaths;
+ }
+
+ private RestPath[] buildRestPaths() {
+ RestPath[] restPaths = new RestPath[paths.size()];
+ for (int i = 0; i < restPaths.length; i++) {
+ restPaths[i] = new RestPath(paths.get(i));
+ }
+ return restPaths;
+ }
+
+ private static class RestPath {
+ private static final Pattern PLACEHOLDERS_PATTERN = Pattern.compile("(\\{(.*?)})");
+
+ final String path;
+ //contains param to replace (e.g. {index}) and param key to use for lookup in the current values map (e.g. index)
+ final Map<String, String> parts;
+
+ RestPath(String path) {
+ this.path = path;
+ this.parts = extractParts(path);
+ }
+
+ private static Map<String,String> extractParts(String input) {
+ Map<String, String> parts = Maps.newHashMap();
+ Matcher matcher = PLACEHOLDERS_PATTERN.matcher(input);
+ while (matcher.find()) {
+ //key is e.g. {index}
+ String key = input.substring(matcher.start(), matcher.end());
+ if (matcher.groupCount() != 2) {
+ throw new IllegalArgumentException("no lookup key found for param [" + key + "]");
+ }
+ //to be replaced with current value found with key e.g. index
+ String value = matcher.group(2);
+ parts.put(key, value);
+ }
+ return parts;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java b/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java
new file mode 100644
index 0000000..2c3dc45
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * Parser for a REST api spec (single json file)
+ */
+public class RestApiParser {
+
+ public RestApi parse(XContentParser parser) throws IOException {
+
+ try {
+ while ( parser.nextToken() != XContentParser.Token.FIELD_NAME ) {
+ //move to first field name
+ }
+
+ RestApi restApi = new RestApi(parser.currentName());
+
+ int level = -1;
+ while (parser.nextToken() != XContentParser.Token.END_OBJECT || level >= 0) {
+
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ if ("methods".equals(parser.currentName())) {
+ parser.nextToken();
+ while (parser.nextToken() == XContentParser.Token.VALUE_STRING) {
+ restApi.addMethod(parser.text());
+ }
+ }
+
+ if ("url".equals(parser.currentName())) {
+ String currentFieldName = "url";
+ int innerLevel = -1;
+ while(parser.nextToken() != XContentParser.Token.END_OBJECT || innerLevel >= 0) {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_ARRAY && "paths".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.VALUE_STRING) {
+ restApi.addPath(parser.text());
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT && "parts".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ restApi.addPathPart(parser.currentName());
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IOException("Expected parts field in rest api definition to contain an object");
+ }
+ parser.skipChildren();
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT && "params".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ restApi.addParam(parser.currentName());
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IOException("Expected params field in rest api definition to contain an object");
+ }
+ parser.skipChildren();
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ innerLevel++;
+ }
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT) {
+ innerLevel--;
+ }
+ }
+ }
+
+ if ("body".equals(parser.currentName())) {
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ boolean requiredFound = false;
+ while(parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ if ("required".equals(parser.currentName())) {
+ requiredFound = true;
+ parser.nextToken();
+ if (parser.booleanValue()) {
+ restApi.setBodyRequired();
+ } else {
+ restApi.setBodyOptional();
+ }
+ }
+ }
+ }
+ if (!requiredFound) {
+ restApi.setBodyOptional();
+ }
+ }
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ level++;
+ }
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT) {
+ level--;
+ }
+
+ }
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ return restApi;
+
+ } finally {
+ parser.close();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java b/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java
new file mode 100644
index 0000000..6da8591
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.support.FileUtils;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Holds the elasticsearch REST spec
+ */
+public class RestSpec {
+ Map<String, RestApi> restApiMap = Maps.newHashMap();
+
+ private RestSpec() {
+ }
+
+ void addApi(RestApi restApi) {
+ restApiMap.put(restApi.getName(), restApi);
+ }
+
+ public RestApi getApi(String api) {
+ return restApiMap.get(api);
+ }
+
+ /**
+ * Parses the complete set of REST spec available under the provided directories
+ */
+ public static RestSpec parseFrom(String optionalPathPrefix, String... paths) throws IOException {
+ RestSpec restSpec = new RestSpec();
+ for (String path : paths) {
+ for (File jsonFile : FileUtils.findJsonSpec(optionalPathPrefix, path)) {
+ try {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(new FileInputStream(jsonFile));
+ RestApi restApi = new RestApiParser().parse(parser);
+ restSpec.addApi(restApi);
+ } catch (IOException ex) {
+ throw new IOException("Can't parse rest spec file: [" + jsonFile + "]", ex);
+ }
+ }
+ }
+ return restSpec;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/support/Features.java b/src/test/java/org/elasticsearch/test/rest/support/Features.java
new file mode 100644
index 0000000..05d6cfb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/support/Features.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.support;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * Allows to register additional features supported by the tests runner.
+ * This way any runner can add extra features and use proper skip sections to avoid
+ * breaking others runners till they have implemented the new feature as well.
+ *
+ * Once all runners have implemented the feature, it can be removed from the list
+ * and the related skip sections can be removed from the tests as well.
+ */
+public final class Features {
+
+ private static final List<String> SUPPORTED = Lists.newArrayList();
+
+ private Features() {
+
+ }
+
+ /**
+ * Tells whether all the features provided as argument are supported
+ */
+ public static boolean areAllSupported(List<String> features) {
+ for (String feature : features) {
+ if (!SUPPORTED.contains(feature)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Returns all the supported features
+ */
+ public static List<String> getSupported() {
+ return SUPPORTED;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java b/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java
new file mode 100644
index 0000000..6c81703
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.support;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.common.Strings;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileNotFoundException;
+import java.net.URL;
+import java.util.Map;
+import java.util.Set;
+
+public final class FileUtils {
+
+ private static final String YAML_SUFFIX = ".yaml";
+ private static final String JSON_SUFFIX = ".json";
+
+ private FileUtils() {
+
+ }
+
+ /**
+ * Returns the json files found within the directory provided as argument.
+ * Files are looked up in the classpath first, then outside of it if not found.
+ */
+ public static Set<File> findJsonSpec(String optionalPathPrefix, String path) throws FileNotFoundException {
+ File dir = resolveFile(optionalPathPrefix, path, null);
+
+ if (!dir.isDirectory()) {
+ throw new FileNotFoundException("file [" + path + "] is not a directory");
+ }
+
+ File[] jsonFiles = dir.listFiles(new FileFilter() {
+ @Override
+ public boolean accept(File pathname) {
+ return pathname.getName().endsWith(JSON_SUFFIX);
+ }
+ });
+
+ if (jsonFiles == null || jsonFiles.length == 0) {
+ throw new FileNotFoundException("no json files found within [" + path + "]");
+ }
+
+ return Sets.newHashSet(jsonFiles);
+ }
+
+ /**
+ * Returns the yaml files found within the paths provided.
+ * Each input path can either be a single file (the .yaml suffix is optional) or a directory.
+ * Each path is looked up in the classpath first, then outside of it if not found yet.
+ */
+ public static Map<String, Set<File>> findYamlSuites(final String optionalPathPrefix, final String... paths) throws FileNotFoundException {
+ Map<String, Set<File>> yamlSuites = Maps.newHashMap();
+ for (String path : paths) {
+ collectFiles(resolveFile(optionalPathPrefix, path, YAML_SUFFIX), YAML_SUFFIX, yamlSuites);
+ }
+ return yamlSuites;
+ }
+
+ private static File resolveFile(String optionalPathPrefix, String path, String optionalFileSuffix) throws FileNotFoundException {
+ //try within classpath with and without file suffix (as it could be a single test suite)
+ URL resource = findResource(path, optionalFileSuffix);
+ if (resource == null) {
+ //try within classpath with optional prefix: /rest-api-spec/test (or /rest-api-spec/api) is optional
+ String newPath = optionalPathPrefix + "/" + path;
+ resource = findResource(newPath, optionalFileSuffix);
+ if (resource == null) {
+ //if it wasn't on classpath we look outside ouf the classpath
+ File file = findFile(path, optionalFileSuffix);
+ if (!file.exists()) {
+ throw new FileNotFoundException("file [" + path + "] doesn't exist");
+ }
+ return file;
+ }
+ }
+ return new File(resource.getFile());
+ }
+
+ private static URL findResource(String path, String optionalFileSuffix) {
+ URL resource = FileUtils.class.getResource(path);
+ if (resource == null) {
+ //if not found we append the file suffix to the path (as it is optional)
+ if (Strings.hasLength(optionalFileSuffix) && !path.endsWith(optionalFileSuffix)) {
+ resource = FileUtils.class.getResource(path + optionalFileSuffix);
+ }
+ }
+ return resource;
+ }
+
+ private static File findFile(String path, String optionalFileSuffix) {
+ File file = new File(path);
+ if (!file.exists()) {
+ file = new File(path + optionalFileSuffix);
+ }
+ return file;
+ }
+
+ private static void collectFiles(final File file, final String fileSuffix, final Map<String, Set<File>> files) {
+ if (file.isFile()) {
+ String groupName = file.getParentFile().getName();
+ Set<File> filesSet = files.get(groupName);
+ if (filesSet == null) {
+ filesSet = Sets.newHashSet();
+ files.put(groupName, filesSet);
+ }
+ filesSet.add(file);
+ } else if (file.isDirectory()) {
+ walkDir(file, fileSuffix, files);
+ }
+ }
+
+ private static void walkDir(final File dir, final String fileSuffix, final Map<String, Set<File>> files) {
+ File[] children = dir.listFiles(new FileFilter() {
+ @Override
+ public boolean accept(File pathname) {
+ return pathname.isDirectory() || pathname.getName().endsWith(fileSuffix);
+ }
+ });
+
+ for (File file : children) {
+ collectFiles(file, fileSuffix, files);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/support/VersionUtils.java b/src/test/java/org/elasticsearch/test/rest/support/VersionUtils.java
new file mode 100644
index 0000000..9c19210
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/support/VersionUtils.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.support;
+
+public final class VersionUtils {
+
+ private VersionUtils() {
+
+ }
+
+ /**
+ * Parses an elasticsearch version string into an int array with an element per part
+ * e.g. 0.90.7 => [0,90,7]
+ */
+ public static int[] parseVersionNumber(String version) {
+ String[] split = version.split("\\.");
+ //we only take the first 3 parts if there are more, but less is ok too (e.g. 999)
+ int length = Math.min(3, split.length);
+ int[] versionNumber = new int[length];
+ for (int i = 0; i < length; i++) {
+ try {
+ versionNumber[i] = Integer.valueOf(split[i]);
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException("version is not a number", e);
+ }
+
+ }
+ return versionNumber;
+ }
+
+ /**
+ * Compares the skip version read from a test fragment with the elasticsearch version
+ * the tests are running against and determines whether the test fragment needs to be skipped
+ */
+ public static boolean skipCurrentVersion(String skipVersion, String currentVersion) {
+ int[] currentVersionNumber = parseVersionNumber(currentVersion);
+
+ String[] skipVersions = skipVersion.split("-");
+ if (skipVersions.length > 2) {
+ throw new IllegalArgumentException("too many skip versions found");
+ }
+
+ String skipVersionLowerBound = skipVersions[0].trim();
+ String skipVersionUpperBound = skipVersions[1].trim();
+
+ int[] skipVersionLowerBoundNumber = parseVersionNumber(skipVersionLowerBound);
+ int[] skipVersionUpperBoundNumber = parseVersionNumber(skipVersionUpperBound);
+
+ int length = Math.min(skipVersionLowerBoundNumber.length, currentVersionNumber.length);
+ for (int i = 0; i < length; i++) {
+ if (currentVersionNumber[i] < skipVersionLowerBoundNumber[i]) {
+ return false;
+ }
+ if (currentVersionNumber[i] > skipVersionLowerBoundNumber[i]) {
+ break;
+ }
+ }
+
+ length = Math.min(skipVersionUpperBoundNumber.length, currentVersionNumber.length);
+ for (int i = 0; i < length; i++) {
+ if (currentVersionNumber[i] > skipVersionUpperBoundNumber[i]) {
+ return false;
+ }
+ if (currentVersionNumber[i] < skipVersionUpperBoundNumber[i]) {
+ break;
+ }
+ }
+
+ return true;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java
new file mode 100644
index 0000000..a2ee377
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Ignore;
+
+import static org.hamcrest.Matchers.nullValue;
+
+@Ignore
+public abstract class AbstractParserTests extends ElasticsearchTestCase {
+
+ protected XContentParser parser;
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ //this is the way to make sure that we consumed the whole yaml
+ assertThat(parser.currentToken(), nullValue());
+ parser.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java b/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java
new file mode 100644
index 0000000..9d9ba78
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.*;
+import org.elasticsearch.test.rest.section.*;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class AssertionParsersTests extends AbstractParserTests {
+
+ @Test
+ public void testParseIsTrue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "get.fields._timestamp"
+ );
+
+ IsTrueParser isTrueParser = new IsTrueParser();
+ IsTrueAssertion trueAssertion = isTrueParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(trueAssertion, notNullValue());
+ assertThat(trueAssertion.getField(), equalTo("get.fields._timestamp"));
+ }
+
+ @Test
+ public void testParseIsFalse() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "docs.1._source"
+ );
+
+ IsFalseParser isFalseParser = new IsFalseParser();
+ IsFalseAssertion falseAssertion = isFalseParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(falseAssertion, notNullValue());
+ assertThat(falseAssertion.getField(), equalTo("docs.1._source"));
+ }
+
+ @Test
+ public void testParseGreaterThan() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ field: 3}"
+ );
+
+ GreaterThanParser greaterThanParser = new GreaterThanParser();
+ GreaterThanAssertion greaterThanAssertion = greaterThanParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ assertThat(greaterThanAssertion, notNullValue());
+ assertThat(greaterThanAssertion.getField(), equalTo("field"));
+ assertThat(greaterThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) greaterThanAssertion.getExpectedValue(), equalTo(3));
+ }
+
+ @Test
+ public void testParseLessThan() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ field: 3}"
+ );
+
+ LessThanParser lessThanParser = new LessThanParser();
+ LessThanAssertion lessThanAssertion = lessThanParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ assertThat(lessThanAssertion, notNullValue());
+ assertThat(lessThanAssertion.getField(), equalTo("field"));
+ assertThat(lessThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lessThanAssertion.getExpectedValue(), equalTo(3));
+ }
+
+ @Test
+ public void testParseLength() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _id: 22}"
+ );
+
+ LengthParser lengthParser = new LengthParser();
+ LengthAssertion lengthAssertion = lengthParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ assertThat(lengthAssertion, notNullValue());
+ assertThat(lengthAssertion.getField(), equalTo("_id"));
+ assertThat(lengthAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lengthAssertion.getExpectedValue(), equalTo(22));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchSimpleIntegerValue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ field: 10 }"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("field"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) matchAssertion.getExpectedValue(), equalTo(10));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchSimpleStringValue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ foo: bar }"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("foo"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(String.class));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("bar"));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchArray() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{'matches': ['test_percolator_1', 'test_percolator_2']}"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("matches"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(List.class));
+ List strings = (List) matchAssertion.getExpectedValue();
+ assertThat(strings.size(), equalTo(2));
+ assertThat(strings.get(0).toString(), equalTo("test_percolator_1"));
+ assertThat(strings.get(1).toString(), equalTo("test_percolator_2"));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchSourceValues() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _source: { responses.0.hits.total: 3, foo: bar }}"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("_source"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class));
+ Map<String, Object> expectedValue = (Map<String, Object>) matchAssertion.getExpectedValue();
+ assertThat(expectedValue.size(), equalTo(2));
+ Object o = expectedValue.get("responses.0.hits.total");
+ assertThat(o, instanceOf(Integer.class));
+ assertThat((Integer)o, equalTo(3));
+ o = expectedValue.get("foo");
+ assertThat(o, instanceOf(String.class));
+ assertThat(o.toString(), equalTo("bar"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java
new file mode 100644
index 0000000..e580f0f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java
@@ -0,0 +1,393 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.DoSectionParser;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.section.ApiCallSection;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+public class DoSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseDoSectionNoBody() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "get:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ " id: 1"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("get"));
+ assertThat(apiCallSection.getParams().size(), equalTo(3));
+ assertThat(apiCallSection.getParams().get("index"), equalTo("test_index"));
+ assertThat(apiCallSection.getParams().get("type"), equalTo("test_type"));
+ assertThat(apiCallSection.getParams().get("id"), equalTo("1"));
+ assertThat(apiCallSection.hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseDoSectionNoParamsNoBody() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "cluster.node_info: {}"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("cluster.node_info"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseDoSectionWithJsonBody() throws Exception {
+ String body = "{ \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }";
+ parser = YamlXContent.yamlXContent.createParser(
+ "index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: " + body
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("index"));
+ assertThat(apiCallSection.getParams().size(), equalTo(3));
+ assertThat(apiCallSection.getParams().get("index"), equalTo("test_1"));
+ assertThat(apiCallSection.getParams().get("type"), equalTo("test"));
+ assertThat(apiCallSection.getParams().get("id"), equalTo("1"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+
+ assertJsonEquals(apiCallSection.getBodies().get(0), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithJsonMultipleBodiesAsLongString() throws Exception {
+ String bodies[] = new String[]{
+ "{ \"index\": { \"_index\":\"test_index\", \"_type\":\"test_type\", \"_id\":\"test_id\" } }\n",
+ "{ \"f1\":\"v1\", \"f2\":42 }\n",
+ "{ \"index\": { \"_index\":\"test_index2\", \"_type\":\"test_type2\", \"_id\":\"test_id2\" } }\n",
+ "{ \"f1\":\"v2\", \"f2\":47 }\n"
+ };
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body: |\n" +
+ " " + bodies[0] +
+ " " + bodies[1] +
+ " " + bodies[2] +
+ " " + bodies[3]
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(4));
+ }
+
+ @Test
+ public void testParseDoSectionWithJsonMultipleBodiesRepeatedProperty() throws Exception {
+ String[] bodies = new String[] {
+ "{ \"index\": { \"_index\":\"test_index\", \"_type\":\"test_type\", \"_id\":\"test_id\" } }",
+ "{ \"f1\":\"v1\", \"f2\":42 }",
+ };
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body: \n" +
+ " " + bodies[0] + "\n" +
+ " body: \n" +
+ " " + bodies[1]
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(bodies.length));
+ for (int i = 0; i < bodies.length; i++) {
+ assertJsonEquals(apiCallSection.getBodies().get(i), bodies[i]);
+ }
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlBody() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "search:\n" +
+ " body:\n" +
+ " _source: [ include.field1, include.field2 ]\n" +
+ " query: { match_all: {} }"
+ );
+ String body = "{ \"_source\": [ \"include.field1\", \"include.field2\" ], \"query\": { \"match_all\": {} }}";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("search"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(1));
+ assertJsonEquals(apiCallSection.getBodies().get(0), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlMultipleBodies() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body:\n" +
+ " - index:\n" +
+ " _index: test_index\n" +
+ " _type: test_type\n" +
+ " _id: test_id\n" +
+ " - f1: v1\n" +
+ " f2: 42\n" +
+ " - index:\n" +
+ " _index: test_index2\n" +
+ " _type: test_type2\n" +
+ " _id: test_id2\n" +
+ " - f1: v2\n" +
+ " f2: 47"
+ );
+ String[] bodies = new String[4];
+ bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_type\": \"test_type\", \"_id\": \"test_id\"}}";
+ bodies[1] = "{ \"f1\":\"v1\", \"f2\": 42 }";
+ bodies[2] = "{\"index\": {\"_index\": \"test_index2\", \"_type\": \"test_type2\", \"_id\": \"test_id2\"}}";
+ bodies[3] = "{ \"f1\":\"v2\", \"f2\": 47 }";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(bodies.length));
+
+ for (int i = 0; i < bodies.length; i++) {
+ assertJsonEquals(apiCallSection.getBodies().get(i), bodies[i]);
+ }
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlMultipleBodiesRepeatedProperty() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body:\n" +
+ " index:\n" +
+ " _index: test_index\n" +
+ " _type: test_type\n" +
+ " _id: test_id\n" +
+ " body:\n" +
+ " f1: v1\n" +
+ " f2: 42\n"
+ );
+ String[] bodies = new String[2];
+ bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_type\": \"test_type\", \"_id\": \"test_id\"}}";
+ bodies[1] = "{ \"f1\":\"v1\", \"f2\": 42 }";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(bodies.length));
+
+ for (int i = 0; i < bodies.length; i++) {
+ assertJsonEquals(apiCallSection.getBodies().get(i), bodies[i]);
+ }
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlBodyMultiGet() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "mget:\n" +
+ " body:\n" +
+ " docs:\n" +
+ " - { _index: test_2, _type: test, _id: 1}\n" +
+ " - { _index: test_1, _type: none, _id: 1}"
+ );
+ String body = "{ \"docs\": [ " +
+ "{\"_index\": \"test_2\", \"_type\":\"test\", \"_id\":1}, " +
+ "{\"_index\": \"test_1\", \"_type\":\"none\", \"_id\":1} " +
+ "]}";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("mget"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(1));
+ assertJsonEquals(apiCallSection.getBodies().get(0), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithBodyStringified() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: \"{ _source: true, query: { match_all: {} } }\""
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("index"));
+ assertThat(apiCallSection.getParams().size(), equalTo(3));
+ assertThat(apiCallSection.getParams().get("index"), equalTo("test_1"));
+ assertThat(apiCallSection.getParams().get("type"), equalTo("test"));
+ assertThat(apiCallSection.getParams().get("id"), equalTo("1"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(1));
+ //stringified body is taken as is
+ assertJsonEquals(apiCallSection.getBodies().get(0), "{ _source: true, query: { match_all: {} } }");
+ }
+
+ @Test
+ public void testParseDoSectionWithBodiesStringifiedAndNot() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "index:\n" +
+ " body:\n" +
+ " - \"{ _source: true, query: { match_all: {} } }\"\n" +
+ " - { size: 100, query: { match_all: {} } }"
+ );
+
+ String body = "{ \"size\": 100, \"query\": { \"match_all\": {} } }";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection.getApi(), equalTo("index"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(2));
+ //stringified body is taken as is
+ assertJsonEquals(apiCallSection.getBodies().get(0), "{ _source: true, query: { match_all: {} } }");
+ assertJsonEquals(apiCallSection.getBodies().get(1), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithCatch() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "catch: missing\n" +
+ "indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ }
+
+ @Test (expected = RestTestParseException.class)
+ public void testParseDoSectionWithoutClientCallSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "catch: missing\n"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+
+ @Test
+ public void testParseDoSectionMultivaluedField() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "indices.get_field_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ " field: [ text , text1 ]"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type"));
+ assertThat(doSection.getApiCallSection().getParams().get("field"), equalTo("text,text1"));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0));
+ }
+
+ private static void assertJsonEquals(Map<String, Object> actual, String expected) throws IOException {
+ Map<String,Object> expectedMap = JsonXContent.jsonXContent.createParser(expected).mapOrderedAndClose();
+ MatcherAssert.assertThat(actual, equalTo(expectedMap));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java b/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java
new file mode 100644
index 0000000..66c7f04
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.support.FileUtils;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class FileUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLoadSingleYamlSuite() throws Exception {
+ Map<String,Set<File>> yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "/rest-api-spec/test/get/10_basic");
+ assertSingleFile(yamlSuites, "get", "10_basic.yaml");
+
+ //the path prefix is optional
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get/10_basic.yaml");
+ assertSingleFile(yamlSuites, "get", "10_basic.yaml");
+
+ //extension .yaml is optional
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get/10_basic");
+ assertSingleFile(yamlSuites, "get", "10_basic.yaml");
+ }
+
+ @Test
+ public void testLoadMultipleYamlSuites() throws Exception {
+ //single directory
+ Map<String,Set<File>> yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(1));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), greaterThan(1));
+
+ //multiple directories
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get", "index");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), greaterThan(1));
+ assertThat(yamlSuites.containsKey("index"), equalTo(true));
+ assertThat(yamlSuites.get("index").size(), greaterThan(1));
+
+ //multiple paths, which can be both directories or yaml test suites (with optional file extension)
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "indices.optimize/10_basic", "index");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("indices.optimize"), equalTo(true));
+ assertThat(yamlSuites.get("indices.optimize").size(), equalTo(1));
+ assertSingleFile(yamlSuites.get("indices.optimize"), "indices.optimize", "10_basic.yaml");
+ assertThat(yamlSuites.containsKey("index"), equalTo(true));
+ assertThat(yamlSuites.get("index").size(), greaterThan(1));
+
+ //files can be loaded from classpath and from file system too
+ File dir = newTempDir();
+ File file = new File(dir, "test_loading.yaml");
+ assertThat(file.createNewFile(), equalTo(true));
+
+ //load from directory outside of the classpath
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get/10_basic", dir.getAbsolutePath());
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), equalTo(1));
+ assertSingleFile(yamlSuites.get("get"), "get", "10_basic.yaml");
+ assertThat(yamlSuites.containsKey(dir.getName()), equalTo(true));
+ assertSingleFile(yamlSuites.get(dir.getName()), dir.getName(), file.getName());
+
+ //load from external file (optional extension)
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get/10_basic", dir.getAbsolutePath() + File.separator + "test_loading");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), equalTo(1));
+ assertSingleFile(yamlSuites.get("get"), "get", "10_basic.yaml");
+ assertThat(yamlSuites.containsKey(dir.getName()), equalTo(true));
+ assertSingleFile(yamlSuites.get(dir.getName()), dir.getName(), file.getName());
+ }
+
+ private static void assertSingleFile(Map<String, Set<File>> yamlSuites, String dirName, String fileName) {
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(1));
+ assertThat(yamlSuites.containsKey(dirName), equalTo(true));
+ assertSingleFile(yamlSuites.get(dirName), dirName, fileName);
+ }
+
+ private static void assertSingleFile(Set<File> files, String dirName, String fileName) {
+ assertThat(files.size(), equalTo(1));
+ File file = files.iterator().next();
+ assertThat(file.getName(), equalTo(fileName));
+ assertThat(file.getParentFile().getName(), equalTo(dirName));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java b/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java
new file mode 100644
index 0000000..bf56f44
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.json.JsonPath;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.*;
+
+public class JsonPathTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testEvaluateObjectPathEscape() throws Exception {
+ String json = "{ \"field1\": { \"field2.field3\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2\\.field3");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateObjectPathWithDoubleDot() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1..field2");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateObjectPathEndsWithDot() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2.");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateString() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateInteger() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : 333 } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2");
+ assertThat(object, instanceOf(Integer.class));
+ assertThat((Integer)object, equalTo(333));
+ }
+
+ @Test
+ public void testEvaluateDouble() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : 3.55 } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2");
+ assertThat(object, instanceOf(Double.class));
+ assertThat((Double)object, equalTo(3.55));
+ }
+
+ @Test
+ public void testEvaluateArray() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ \"value1\", \"value2\" ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array1");
+ assertThat(object, instanceOf(List.class));
+ List list = (List) object;
+ assertThat(list.size(), equalTo(2));
+ assertThat(list.get(0), instanceOf(String.class));
+ assertThat((String)list.get(0), equalTo("value1"));
+ assertThat(list.get(1), instanceOf(String.class));
+ assertThat((String)list.get(1), equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateArrayElement() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ \"value1\", \"value2\" ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array1.1");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateArrayElementObject() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ {\"element\": \"value1\"}, {\"element\":\"value2\"} ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array1.1.element");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateArrayElementObjectWrongPath() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ {\"element\": \"value1\"}, {\"element\":\"value2\"} ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array2.1.element");
+ assertThat(object, nullValue());
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testEvaluateObjectKeys() throws Exception {
+ String json = "{ \"metadata\": { \"templates\" : {\"template_1\": { \"field\" : \"value\"}, \"template_2\": { \"field\" : \"value\"} } } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("metadata.templates");
+ assertThat(object, instanceOf(Map.class));
+ Map<String, Object> map = (Map<String, Object>)object;
+ assertThat(map.size(), equalTo(2));
+ Set<String> strings = map.keySet();
+ assertThat(strings, contains("template_1", "template_2"));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testEvaluateEmptyPath() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ {\"element\": \"value1\"}, {\"element\":\"value2\"} ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("");
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Map.class));
+ assertThat(((Map<String, Object>)object).containsKey("field1"), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java b/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java
new file mode 100644
index 0000000..c86660f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.spec.RestApiParser;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.containsString;
+
+/**
+ *
+ */
+public class RestApiParserFailingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void brokenSpecShouldThrowUsefulExceptionWhenParsingFailsOnParams() throws Exception {
+ parseAndExpectFailure(BROKEN_SPEC_PARAMS, "Expected params field in rest api definition to contain an object");
+ }
+
+ @Test
+ public void brokenSpecShouldThrowUsefulExceptionWhenParsingFailsOnParts() throws Exception {
+ parseAndExpectFailure(BROKEN_SPEC_PARTS, "Expected parts field in rest api definition to contain an object");
+ }
+
+ private void parseAndExpectFailure(String brokenJson, String expectedErrorMessage) throws Exception {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(brokenJson);
+ try {
+ new RestApiParser().parse(parser);
+ fail("Expected to fail parsing but did not happen");
+ } catch (IOException e) {
+ assertThat(e.getMessage(), containsString(expectedErrorMessage));
+ }
+
+ }
+
+ // see params section is broken, an inside param is missing
+ private static final String BROKEN_SPEC_PARAMS = "{\n" +
+ " \"ping\": {" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/\"," +
+ " \"methods\": [\"HEAD\"]," +
+ " \"url\": {" +
+ " \"path\": \"/\"," +
+ " \"paths\": [\"/\"]," +
+ " \"parts\": {" +
+ " }," +
+ " \"params\": {" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"\n" +
+ " }" +
+ " }," +
+ " \"body\": null" +
+ " }" +
+ "}";
+
+ // see parts section is broken, an inside param is missing
+ private static final String BROKEN_SPEC_PARTS = "{\n" +
+ " \"ping\": {" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/\"," +
+ " \"methods\": [\"HEAD\"]," +
+ " \"url\": {" +
+ " \"path\": \"/\"," +
+ " \"paths\": [\"/\"]," +
+ " \"parts\": {" +
+ " \"type\" : \"boolean\",\n" +
+ " }," +
+ " \"params\": {\n" +
+ " \"ignore_unavailable\": {\n" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"\n" +
+ " } \n" +
+ " }," +
+ " \"body\": null" +
+ " }" +
+ "}";
+
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java
new file mode 100644
index 0000000..5558041
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.spec.RestApi;
+import org.elasticsearch.test.rest.spec.RestApiParser;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class RestApiParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseRestSpecIndexApi() throws Exception {
+ parser = JsonXContent.jsonXContent.createParser(REST_SPEC_INDEX_API);
+ RestApi restApi = new RestApiParser().parse(parser);
+
+ assertThat(restApi, notNullValue());
+ assertThat(restApi.getName(), equalTo("index"));
+ assertThat(restApi.getMethods().size(), equalTo(2));
+ assertThat(restApi.getMethods().get(0), equalTo("POST"));
+ assertThat(restApi.getMethods().get(1), equalTo("PUT"));
+ assertThat(restApi.getPaths().size(), equalTo(2));
+ assertThat(restApi.getPaths().get(0), equalTo("/{index}/{type}"));
+ assertThat(restApi.getPaths().get(1), equalTo("/{index}/{type}/{id}"));
+ assertThat(restApi.getPathParts().size(), equalTo(3));
+ assertThat(restApi.getPathParts().get(0), equalTo("id"));
+ assertThat(restApi.getPathParts().get(1), equalTo("index"));
+ assertThat(restApi.getPathParts().get(2), equalTo("type"));
+ assertThat(restApi.getParams().size(), equalTo(4));
+ assertThat(restApi.getParams(), contains("consistency", "op_type", "parent", "refresh"));
+ assertThat(restApi.isBodySupported(), equalTo(true));
+ assertThat(restApi.isBodyRequired(), equalTo(true));
+ }
+
+ @Test
+ public void testParseRestSpecGetTemplateApi() throws Exception {
+ parser = JsonXContent.jsonXContent.createParser(REST_SPEC_GET_TEMPLATE_API);
+ RestApi restApi = new RestApiParser().parse(parser);
+ assertThat(restApi, notNullValue());
+ assertThat(restApi.getName(), equalTo("indices.get_template"));
+ assertThat(restApi.getMethods().size(), equalTo(1));
+ assertThat(restApi.getMethods().get(0), equalTo("GET"));
+ assertThat(restApi.getPaths().size(), equalTo(2));
+ assertThat(restApi.getPaths().get(0), equalTo("/_template"));
+ assertThat(restApi.getPaths().get(1), equalTo("/_template/{name}"));
+ assertThat(restApi.getPathParts().size(), equalTo(1));
+ assertThat(restApi.getPathParts().get(0), equalTo("name"));
+ assertThat(restApi.getParams().size(), equalTo(0));
+ assertThat(restApi.isBodySupported(), equalTo(false));
+ assertThat(restApi.isBodyRequired(), equalTo(false));
+ }
+
+ @Test
+ public void testParseRestSpecCountApi() throws Exception {
+ parser = JsonXContent.jsonXContent.createParser(REST_SPEC_COUNT_API);
+ RestApi restApi = new RestApiParser().parse(parser);
+ assertThat(restApi, notNullValue());
+ assertThat(restApi.getName(), equalTo("count"));
+ assertThat(restApi.getMethods().size(), equalTo(2));
+ assertThat(restApi.getMethods().get(0), equalTo("POST"));
+ assertThat(restApi.getMethods().get(1), equalTo("GET"));
+ assertThat(restApi.getPaths().size(), equalTo(3));
+ assertThat(restApi.getPaths().get(0), equalTo("/_count"));
+ assertThat(restApi.getPaths().get(1), equalTo("/{index}/_count"));
+ assertThat(restApi.getPaths().get(2), equalTo("/{index}/{type}/_count"));
+ assertThat(restApi.getPathParts().size(), equalTo(2));
+ assertThat(restApi.getPathParts().get(0), equalTo("index"));
+ assertThat(restApi.getPathParts().get(1), equalTo("type"));
+ assertThat(restApi.getParams().size(), equalTo(1));
+ assertThat(restApi.getParams().get(0), equalTo("ignore_unavailable"));
+ assertThat(restApi.isBodySupported(), equalTo(true));
+ assertThat(restApi.isBodyRequired(), equalTo(false));
+ }
+
+ private static final String REST_SPEC_COUNT_API = "{\n" +
+ " \"count\": {\n" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-count.html\",\n" +
+ " \"methods\": [\"POST\", \"GET\"],\n" +
+ " \"url\": {\n" +
+ " \"path\": \"/_count\",\n" +
+ " \"paths\": [\"/_count\", \"/{index}/_count\", \"/{index}/{type}/_count\"],\n" +
+ " \"parts\": {\n" +
+ " \"index\": {\n" +
+ " \"type\" : \"list\",\n" +
+ " \"description\" : \"A comma-separated list of indices to restrict the results\"\n" +
+ " },\n" +
+ " \"type\": {\n" +
+ " \"type\" : \"list\",\n" +
+ " \"description\" : \"A comma-separated list of types to restrict the results\"\n" +
+ " }\n" +
+ " },\n" +
+ " \"params\": {\n" +
+ " \"ignore_unavailable\": {\n" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"\n" +
+ " } \n" +
+ " }\n" +
+ " },\n" +
+ " \"body\": {\n" +
+ " \"description\" : \"A query to restrict the results specified with the Query DSL (optional)\"\n" +
+ " }\n" +
+ " }\n" +
+ "}\n";
+
+ private static final String REST_SPEC_GET_TEMPLATE_API = "{\n" +
+ " \"indices.get_template\": {\n" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/reference/api/admin-indices-templates/\",\n" +
+ " \"methods\": [\"GET\"],\n" +
+ " \"url\": {\n" +
+ " \"path\": \"/_template/{name}\",\n" +
+ " \"paths\": [\"/_template\", \"/_template/{name}\"],\n" +
+ " \"parts\": {\n" +
+ " \"name\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"required\" : false,\n" +
+ " \"description\" : \"The name of the template\"\n" +
+ " }\n" +
+ " },\n" +
+ " \"params\": {\n" +
+ " }\n" +
+ " },\n" +
+ " \"body\": null\n" +
+ " }\n" +
+ "}";
+
+ private static final String REST_SPEC_INDEX_API = "{\n" +
+ " \"index\": {\n" +
+ " \"documentation\": \"http://elasticsearch.org/guide/reference/api/index_/\",\n" +
+ " \"methods\": [\"POST\", \"PUT\"],\n" +
+ " \"url\": {\n" +
+ " \"path\": \"/{index}/{type}\",\n" +
+ " \"paths\": [\"/{index}/{type}\", \"/{index}/{type}/{id}\"],\n" +
+ " \"parts\": {\n" +
+ " \"id\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"description\" : \"Document ID\"\n" +
+ " },\n" +
+ " \"index\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"required\" : true,\n" +
+ " \"description\" : \"The name of the index\"\n" +
+ " },\n" +
+ " \"type\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"required\" : true,\n" +
+ " \"description\" : \"The type of the document\"\n" +
+ " }\n" +
+ " } ,\n" +
+ " \"params\": {\n" +
+ " \"consistency\": {\n" +
+ " \"type\" : \"enum\",\n" +
+ " \"options\" : [\"one\", \"quorum\", \"all\"],\n" +
+ " \"description\" : \"Explicit write consistency setting for the operation\"\n" +
+ " },\n" +
+ " \"op_type\": {\n" +
+ " \"type\" : \"enum\",\n" +
+ " \"options\" : [\"index\", \"create\"],\n" +
+ " \"default\" : \"index\",\n" +
+ " \"description\" : \"Explicit operation type\"\n" +
+ " },\n" +
+ " \"parent\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"description\" : \"ID of the parent document\"\n" +
+ " },\n" +
+ " \"refresh\": {\n" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Refresh the index after performing the operation\"\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ " \"body\": {\n" +
+ " \"description\" : \"The document\",\n" +
+ " \"required\" : true\n" +
+ " }\n" +
+ " }\n" +
+ "}\n";
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
new file mode 100644
index 0000000..9a06328
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
@@ -0,0 +1,536 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.elasticsearch.test.rest.section.IsTrueAssertion;
+import org.elasticsearch.test.rest.section.MatchAssertion;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.junit.After;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class RestTestParserTests extends ElasticsearchTestCase {
+
+ private XContentParser parser;
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ //makes sure that we consumed the whole stream, XContentParser doesn't expose isClosed method
+ //next token can be null even in the middle of the document (e.g. with "---"), but not too many consecutive times
+ assertThat(parser.currentToken(), nullValue());
+ assertThat(parser.nextToken(), nullValue());
+ assertThat(parser.nextToken(), nullValue());
+ parser.close();
+ }
+
+ @Test
+ public void testParseTestSetupAndSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "setup:\n" +
+ " - do:\n" +
+ " indices.create:\n" +
+ " index: test_index\n" +
+ "\n" +
+ "---\n" +
+ "\"Get index mapping\":\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ "\n" +
+ " - match: {test_index.test_type.properties.text.type: string}\n" +
+ " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" +
+ "\n" +
+ "---\n" +
+ "\"Get type mapping - pre 1.0\":\n" +
+ "\n" +
+ " - skip:\n" +
+ " version: \"0.90.9 - 999\"\n" +
+ " reason: \"for newer versions the index name is always returned\"\n" +
+ "\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ "\n" +
+ " - match: {test_type.properties.text.type: string}\n" +
+ " - match: {test_type.properties.text.analyzer: whitespace}\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+ assertThat(restTestSuite.getSetupSection(), notNullValue());
+ assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getApi(), equalTo("indices.create"));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().get("index"), equalTo("test_index"));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Get index mapping"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(3));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class));
+ MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.analyzer"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace"));
+
+ assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 1.0"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getVersion(), equalTo("0.90.9 - 999"));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0);
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(1);
+ assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string"));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.analyzer"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace"));
+ }
+
+ @Test
+ public void testParseTestSetupAndSectionsSkipLastSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "setup:\n" +
+ " - do:\n" +
+ " indices.create:\n" +
+ " index: test_index\n" +
+ "\n" +
+ "---\n" +
+ "\"Get index mapping\":\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ "\n" +
+ " - match: {test_index.test_type.properties.text.type: string}\n" +
+ " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" +
+ "\n" +
+ "---\n" +
+ "\"Get type mapping - pre 1.0\":\n" +
+ "\n" +
+ " - skip:\n" +
+ " version: \"0.90.9 - 999\"\n" +
+ " reason: \"for newer versions the index name is always returned\"\n" +
+ "\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ "\n" +
+ " - match: {test_type.properties.text.type: string}\n" +
+ " - match: {test_type.properties.text.analyzer: whitespace}\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "1.0.0"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+ assertThat(restTestSuite.getSetupSection(), notNullValue());
+ assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getApi(), equalTo("indices.create"));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().get("index"), equalTo("test_index"));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Get index mapping"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(3));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class));
+ MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.analyzer"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace"));
+
+ assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 1.0"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getVersion(), equalTo("0.90.9 - 999"));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseTestSetupAndSectionsSkipEntireFile() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "setup:\n" +
+ " - skip:\n" +
+ " version: \"0.90.3 - 0.90.6\"\n" +
+ " reason: \"test skip entire file\"\n" +
+ " - do:\n" +
+ " indices.create:\n" +
+ " index: test_index\n" +
+ "\n" +
+ "---\n" +
+ "\"Get index mapping\":\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ "\n" +
+ " - match: {test_index.test_type.properties.text.type: string}\n" +
+ " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" +
+ "\n" +
+ "---\n" +
+ "\"Get type mapping - pre 1.0\":\n" +
+ "\n" +
+ " - skip:\n" +
+ " version: \"0.90.9 - 999\"\n" +
+ " reason: \"for newer versions the index name is always returned\"\n" +
+ "\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ "\n" +
+ " - match: {test_type.properties.text.type: string}\n" +
+ " - match: {test_type.properties.text.analyzer: whitespace}\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+ assertThat(restTestSuite.getSetupSection(), notNullValue());
+
+ assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(false));
+ assertThat(restTestSuite.getSetupSection().getSkipSection().getVersion(), equalTo("0.90.3 - 0.90.6"));
+ assertThat(restTestSuite.getSetupSection().getSkipSection().getReason(), equalTo("test skip entire file"));
+
+ assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(0));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseTestSetupAndSectionsSkipEntireFileNoDo() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "setup:\n" +
+ " - skip:\n" +
+ " version: \"0.90.3 - 0.90.6\"\n" +
+ " reason: \"test skip entire file\"\n" +
+ "\n" +
+ "---\n" +
+ "\"Get index mapping\":\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ "\n" +
+ " - match: {test_index.test_type.properties.text.type: string}\n" +
+ " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" +
+ "\n" +
+ "---\n" +
+ "\"Get type mapping - pre 1.0\":\n" +
+ "\n" +
+ " - skip:\n" +
+ " version: \"0.90.9 - 999\"\n" +
+ " reason: \"for newer versions the index name is always returned\"\n" +
+ "\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ "\n" +
+ " - match: {test_type.properties.text.type: string}\n" +
+ " - match: {test_type.properties.text.analyzer: whitespace}\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+ assertThat(restTestSuite.getSetupSection(), notNullValue());
+
+ assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(false));
+ assertThat(restTestSuite.getSetupSection().getSkipSection().getVersion(), equalTo("0.90.3 - 0.90.6"));
+ assertThat(restTestSuite.getSetupSection().getSkipSection().getReason(), equalTo("test skip entire file"));
+
+ assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(0));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseTestSingleTestSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "---\n" +
+ "\"Index with ID\":\n" +
+ "\n" +
+ " - do:\n" +
+ " index:\n" +
+ " index: test-weird-index-中文\n" +
+ " type: weird.type\n" +
+ " id: 1\n" +
+ " body: { foo: bar }\n" +
+ "\n" +
+ " - is_true: ok\n" +
+ " - match: { _index: test-weird-index-中文 }\n" +
+ " - match: { _type: weird.type }\n" +
+ " - match: { _id: \"1\"}\n" +
+ " - match: { _version: 1}\n" +
+ "\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test-weird-index-中文\n" +
+ " type: weird.type\n" +
+ " id: 1\n" +
+ "\n" +
+ " - match: { _index: test-weird-index-中文 }\n" +
+ " - match: { _type: weird.type }\n" +
+ " - match: { _id: \"1\"}\n" +
+ " - match: { _version: 1}\n" +
+ " - match: { _source: { foo: bar }}"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+
+ assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(1));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Index with ID"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(12));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("index"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(IsTrueAssertion.class));
+ IsTrueAssertion trueAssertion = (IsTrueAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(trueAssertion.getField(), equalTo("ok"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("_index"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(3), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(3);
+ assertThat(matchAssertion.getField(), equalTo("_type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(4), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(4);
+ assertThat(matchAssertion.getField(), equalTo("_id"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(5), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(5);
+ assertThat(matchAssertion.getField(), equalTo("_version"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(6), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(6);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(7), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(7);
+ assertThat(matchAssertion.getField(), equalTo("_index"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(8), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(8);
+ assertThat(matchAssertion.getField(), equalTo("_type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(9), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(9);
+ assertThat(matchAssertion.getField(), equalTo("_id"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(10), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(10);
+ assertThat(matchAssertion.getField(), equalTo("_version"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(11), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(11);
+ assertThat(matchAssertion.getField(), equalTo("_source"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class));
+ assertThat(((Map) matchAssertion.getExpectedValue()).get("foo").toString(), equalTo("bar"));
+ }
+
+ @Test
+ public void testParseTestMultipleTestSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "---\n" +
+ "\"Missing document (partial doc)\":\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { doc: { foo: bar } }\n" +
+ "\n" +
+ " - do:\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { doc: { foo: bar } }\n" +
+ " ignore: 404\n" +
+ "\n" +
+ "---\n" +
+ "\"Missing document (script)\":\n" +
+ "\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body:\n" +
+ " script: \"ctx._source.foo = bar\"\n" +
+ " params: { bar: 'xxx' }\n" +
+ "\n" +
+ " - do:\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " ignore: 404\n" +
+ " body:\n" +
+ " script: \"ctx._source.foo = bar\"\n" +
+ " params: { bar: 'xxx' }\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+
+ assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Missing document (partial doc)"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+
+ assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Missing document (script)"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(2));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(1), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseTestDuplicateTestSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "---\n" +
+ "\"Missing document (script)\":\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { doc: { foo: bar } }\n" +
+ "\n" +
+ "---\n" +
+ "\"Missing document (script)\":\n" +
+ "\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body:\n" +
+ " script: \"ctx._source.foo = bar\"\n" +
+ " params: { bar: 'xxx' }\n" +
+ "\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java
new file mode 100644
index 0000000..fb9d7b9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.SetSectionParser;
+import org.elasticsearch.test.rest.section.SetSection;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class SetSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseSetSectionSingleValue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _id: id }"
+ );
+
+ SetSectionParser setSectionParser = new SetSectionParser();
+
+ SetSection setSection = setSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(setSection, notNullValue());
+ assertThat(setSection.getStash(), notNullValue());
+ assertThat(setSection.getStash().size(), equalTo(1));
+ assertThat(setSection.getStash().get("_id"), equalTo("id"));
+ }
+
+ @Test
+ public void testParseSetSectionMultipleValues() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _id: id, _type: type, _index: index }"
+ );
+
+ SetSectionParser setSectionParser = new SetSectionParser();
+
+ SetSection setSection = setSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(setSection, notNullValue());
+ assertThat(setSection.getStash(), notNullValue());
+ assertThat(setSection.getStash().size(), equalTo(3));
+ assertThat(setSection.getStash().get("_id"), equalTo("id"));
+ assertThat(setSection.getStash().get("_type"), equalTo("type"));
+ assertThat(setSection.getStash().get("_index"), equalTo("index"));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSetSectionNoValues() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ }"
+ );
+
+ SetSectionParser setSectionParser = new SetSectionParser();
+
+ setSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
new file mode 100644
index 0000000..191bc3d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.SetupSectionParser;
+import org.elasticsearch.test.rest.section.SetupSection;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class SetupSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseSetupSection() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ " - do:\n" +
+ " index1:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n" +
+ " - do:\n" +
+ " index2:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 2\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n"
+ );
+
+ SetupSectionParser setupSectionParser = new SetupSectionParser();
+ SetupSection setupSection = setupSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(setupSection, notNullValue());
+ assertThat(setupSection.getSkipSection().isEmpty(), equalTo(true));
+ assertThat(setupSection.getDoSections().size(), equalTo(2));
+ assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1"));
+ assertThat(setupSection.getDoSections().get(1).getApiCallSection().getApi(), equalTo("index2"));
+ }
+
+ @Test
+ public void testParseSetupAndSkipSectionSkip() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do:\n" +
+ " index1:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n" +
+ " - do:\n" +
+ " index2:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 2\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n"
+ );
+
+ SetupSectionParser setupSectionParser = new SetupSectionParser();
+ SetupSection setupSection = setupSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(setupSection, notNullValue());
+ assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false));
+ assertThat(setupSection.getSkipSection(), notNullValue());
+ assertThat(setupSection.getSkipSection().getVersion(), equalTo("0.90.0 - 0.90.7"));
+ assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ assertThat(setupSection.getDoSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseSetupAndSkipSectionNoSkip() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do:\n" +
+ " index1:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n" +
+ " - do:\n" +
+ " index2:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 2\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n"
+ );
+
+ SetupSectionParser setupSectionParser = new SetupSectionParser();
+ SetupSection setupSection = setupSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.8"));
+
+ assertThat(setupSection, notNullValue());
+ assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false));
+ assertThat(setupSection.getSkipSection(), notNullValue());
+ assertThat(setupSection.getSkipSection().getVersion(), equalTo("0.90.0 - 0.90.7"));
+ assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ assertThat(setupSection.getDoSections().size(), equalTo(2));
+ assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1"));
+ assertThat(setupSection.getDoSections().get(1).getApiCallSection().getApi(), equalTo("index2"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
new file mode 100644
index 0000000..4209e90
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.SkipSectionParser;
+import org.elasticsearch.test.rest.section.SkipSection;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+public class SkipSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseSkipSectionVersionNoFeature() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \"0 - 0.90.2\"\n" +
+ "reason: Delete ignores the parent param"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.getVersion(), equalTo("0 - 0.90.2"));
+ assertThat(skipSection.getFeatures().size(), equalTo(0));
+ assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param"));
+ }
+
+ @Test
+ public void testParseSkipSectionFeatureNoVersion() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "features: regex"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.getVersion(), nullValue());
+ assertThat(skipSection.getFeatures().size(), equalTo(1));
+ assertThat(skipSection.getFeatures().get(0), equalTo("regex"));
+ assertThat(skipSection.getReason(), nullValue());
+ }
+
+ @Test
+ public void testParseSkipSectionFeaturesNoVersion() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "features: [regex1,regex2,regex3]"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.getVersion(), nullValue());
+ assertThat(skipSection.getFeatures().size(), equalTo(3));
+ assertThat(skipSection.getFeatures().get(0), equalTo("regex1"));
+ assertThat(skipSection.getFeatures().get(1), equalTo("regex2"));
+ assertThat(skipSection.getFeatures().get(2), equalTo("regex3"));
+ assertThat(skipSection.getReason(), nullValue());
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSkipSectionBothFeatureAndVersion() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \"0 - 0.90.2\"\n" +
+ "features: regex\n" +
+ "reason: Delete ignores the parent param"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSkipSectionNoReason() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \"0 - 0.90.2\"\n"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+ skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSkipSectionNoVersionNorFeature() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "reason: Delete ignores the parent param\n"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+ skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
new file mode 100644
index 0000000..f98f67f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestSectionParser;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.section.*;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class TestSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseTestSectionWithDoSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"First test section\": \n" +
+ " - do :\n" +
+ " catch: missing\n" +
+ " indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer"
+ );
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("First test section"));
+ assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY));
+ assertThat(testSection.getExecutableSections().size(), equalTo(1));
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTestSectionWithDoSetAndSkipSectionsSkip() throws Exception {
+ String yaml =
+ "\"First test section\": \n" +
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do :\n" +
+ " catch: missing\n" +
+ " indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer\n" +
+ " - set: {_scroll_id: scroll_id}";
+
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ parser = YamlXContent.yamlXContent.createParser(yaml);
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("First test section"));
+ assertThat(testSection.getSkipSection(), notNullValue());
+ assertThat(testSection.getSkipSection().getVersion(), equalTo("0.90.0 - 0.90.7"));
+ assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ //skip parsing when needed
+ assertThat(testSection.getExecutableSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exception {
+ String yaml =
+ "\"First test section\": \n" +
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do :\n" +
+ " catch: missing\n" +
+ " indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer\n" +
+ " - set: {_scroll_id: scroll_id}";
+
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ parser = YamlXContent.yamlXContent.createParser(yaml);
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.8"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("First test section"));
+ assertThat(testSection.getSkipSection(), notNullValue());
+ assertThat(testSection.getSkipSection().getVersion(), equalTo("0.90.0 - 0.90.7"));
+ assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ assertThat(testSection.getExecutableSections().size(), equalTo(2));
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ SetSection setSection = (SetSection) testSection.getExecutableSections().get(1);
+ assertThat(setSection.getStash().size(), equalTo(1));
+ assertThat(setSection.getStash().get("_scroll_id"), equalTo("scroll_id"));
+ }
+
+ @Test
+ public void testParseTestSectionWithMultipleDoSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"Basic\":\n" +
+ "\n" +
+ " - do:\n" +
+ " index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文\n" +
+ " body: { \"foo\": \"Hello: 中文\" }\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文"
+ );
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("Basic"));
+ assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY));
+ assertThat(testSection.getExecutableSections().size(), equalTo(2));
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("index"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ doSection = (DoSection)testSection.getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTestSectionWithDoSectionsAndAssertions() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"Basic\":\n" +
+ "\n" +
+ " - do:\n" +
+ " index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文\n" +
+ " body: { \"foo\": \"Hello: 中文\" }\n" +
+ "\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文\n" +
+ "\n" +
+ " - match: { _index: test_1 }\n" +
+ " - is_true: _source\n" +
+ " - match: { _source: { foo: \"Hello: 中文\" } }\n" +
+ "\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test_1\n" +
+ " id: 中文\n" +
+ "\n" +
+ " - length: { _index: 6 }\n" +
+ " - is_false: whatever\n" +
+ " - gt: { size: 5 }\n" +
+ " - lt: { size: 10 }"
+ );
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("Basic"));
+ assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY));
+ assertThat(testSection.getExecutableSections().size(), equalTo(10));
+
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("index"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+
+ doSection = (DoSection)testSection.getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+
+ MatchAssertion matchAssertion = (MatchAssertion)testSection.getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("_index"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test_1"));
+
+ IsTrueAssertion trueAssertion = (IsTrueAssertion)testSection.getExecutableSections().get(3);
+ assertThat(trueAssertion.getField(), equalTo("_source"));
+
+ matchAssertion = (MatchAssertion)testSection.getExecutableSections().get(4);
+ assertThat(matchAssertion.getField(), equalTo("_source"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class));
+ Map map = (Map) matchAssertion.getExpectedValue();
+ assertThat(map.size(), equalTo(1));
+ assertThat(map.get("foo").toString(), equalTo("Hello: 中文"));
+
+ doSection = (DoSection)testSection.getExecutableSections().get(5);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+
+ LengthAssertion lengthAssertion = (LengthAssertion) testSection.getExecutableSections().get(6);
+ assertThat(lengthAssertion.getField(), equalTo("_index"));
+ assertThat(lengthAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lengthAssertion.getExpectedValue(), equalTo(6));
+
+ IsFalseAssertion falseAssertion = (IsFalseAssertion)testSection.getExecutableSections().get(7);
+ assertThat(falseAssertion.getField(), equalTo("whatever"));
+
+ GreaterThanAssertion greaterThanAssertion = (GreaterThanAssertion) testSection.getExecutableSections().get(8);
+ assertThat(greaterThanAssertion.getField(), equalTo("size"));
+ assertThat(greaterThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) greaterThanAssertion.getExpectedValue(), equalTo(5));
+
+ LessThanAssertion lessThanAssertion = (LessThanAssertion) testSection.getExecutableSections().get(9);
+ assertThat(lessThanAssertion.getField(), equalTo("size"));
+ assertThat(lessThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lessThanAssertion.getExpectedValue(), equalTo(10));
+ }
+
+ @Test
+ public void testSmallSection() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"node_info test\":\n" +
+ " - do:\n" +
+ " cluster.node_info: {}\n" +
+ " \n" +
+ " - is_true: nodes\n" +
+ " - is_true: cluster_name\n");
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("node_info test"));
+ assertThat(testSection.getExecutableSections().size(), equalTo(3));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/VersionUtilsTests.java b/src/test/java/org/elasticsearch/test/rest/test/VersionUtilsTests.java
new file mode 100644
index 0000000..3960012
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/VersionUtilsTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.test.rest.support.VersionUtils.parseVersionNumber;
+import static org.elasticsearch.test.rest.support.VersionUtils.skipCurrentVersion;
+import static org.hamcrest.Matchers.*;
+
+public class VersionUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParseVersionNumber() {
+
+ int[] versionNumber = parseVersionNumber("0.90.6");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(0));
+ assertThat(versionNumber[1], equalTo(90));
+ assertThat(versionNumber[2], equalTo(6));
+
+ versionNumber = parseVersionNumber("0.90.999");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(0));
+ assertThat(versionNumber[1], equalTo(90));
+ assertThat(versionNumber[2], equalTo(999));
+
+ versionNumber = parseVersionNumber("0.20.11");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(0));
+ assertThat(versionNumber[1], equalTo(20));
+ assertThat(versionNumber[2], equalTo(11));
+
+ versionNumber = parseVersionNumber("1.0.0.Beta1");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(1));
+ assertThat(versionNumber[1], equalTo(0));
+ assertThat(versionNumber[2], equalTo(0));
+
+ versionNumber = parseVersionNumber("1.0.0.RC1");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(1));
+ assertThat(versionNumber[1], equalTo(0));
+ assertThat(versionNumber[2], equalTo(0));
+
+ versionNumber = parseVersionNumber("1.0.0");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(1));
+ assertThat(versionNumber[1], equalTo(0));
+ assertThat(versionNumber[2], equalTo(0));
+
+ versionNumber = parseVersionNumber("1.0");
+ assertThat(versionNumber.length, equalTo(2));
+ assertThat(versionNumber[0], equalTo(1));
+ assertThat(versionNumber[1], equalTo(0));
+
+ versionNumber = parseVersionNumber("999");
+ assertThat(versionNumber.length, equalTo(1));
+ assertThat(versionNumber[0], equalTo(999));
+
+ versionNumber = parseVersionNumber("0");
+ assertThat(versionNumber.length, equalTo(1));
+ assertThat(versionNumber[0], equalTo(0));
+
+ try {
+ parseVersionNumber("1.0.Beta1");
+ fail("parseVersionNumber should have thrown an error");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("version is not a number"));
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+ }
+
+ @Test
+ public void testSkipCurrentVersion() {
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.2"), equalTo(true));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.3"), equalTo(true));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.6"), equalTo(true));
+
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.20.10"), equalTo(false));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.1"), equalTo(false));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.7"), equalTo(false));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "1.0.0"), equalTo(false));
+
+ assertThat(skipCurrentVersion(" 0.90.2 - 0.90.999 ", "0.90.15"), equalTo(true));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.999", "1.0.0"), equalTo(false));
+
+ assertThat(skipCurrentVersion("0 - 999", "0.90.15"), equalTo(true));
+ assertThat(skipCurrentVersion("0 - 999", "0.20.1"), equalTo(true));
+ assertThat(skipCurrentVersion("0 - 999", "1.0.0"), equalTo(true));
+
+ assertThat(skipCurrentVersion("0.90.9 - 999", "1.0.0"), equalTo(true));
+ assertThat(skipCurrentVersion("0.90.9 - 999", "0.90.8"), equalTo(false));
+
+ try {
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.999 - 1.0.0", "1.0.0"), equalTo(false));
+ fail("skipCurrentVersion should have thrown an error");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("too many skip versions found"));
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java b/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java
new file mode 100644
index 0000000..e9df739
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MMapDirectory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.fs.FsDirectoryService;
+import org.elasticsearch.index.store.fs.MmapFsDirectoryService;
+import org.elasticsearch.index.store.fs.NioFsDirectoryService;
+import org.elasticsearch.index.store.fs.SimpleFsDirectoryService;
+import org.elasticsearch.index.store.memory.ByteBufferDirectoryService;
+import org.elasticsearch.index.store.ram.RamDirectoryService;
+import org.elasticsearch.test.TestCluster;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.Set;
+
+public class MockDirectoryHelper {
+ public static final String RANDOM_IO_EXCEPTION_RATE = "index.store.mock.random.io_exception_rate";
+ public static final String RANDOM_IO_EXCEPTION_RATE_ON_OPEN = "index.store.mock.random.io_exception_rate_on_open";
+ public static final String RANDOM_THROTTLE = "index.store.mock.random.throttle";
+ public static final String CHECK_INDEX_ON_CLOSE = "index.store.mock.check_index_on_close";
+ public static final String RANDOM_PREVENT_DOUBLE_WRITE = "index.store.mock.random.prevent_double_write";
+ public static final String RANDOM_NO_DELETE_OPEN_FILE = "index.store.mock.random.no_delete_open_file";
+ public static final String RANDOM_FAIL_ON_CLOSE= "index.store.mock.random.fail_on_close";
+
+ public static final Set<ElasticsearchMockDirectoryWrapper> wrappers = ConcurrentCollections.newConcurrentSet();
+
+ private final Random random;
+ private final double randomIOExceptionRate;
+ private final double randomIOExceptionRateOnOpen;
+ private final Throttling throttle;
+ private final boolean checkIndexOnClose;
+ private final Settings indexSettings;
+ private final ShardId shardId;
+ private final boolean preventDoubleWrite;
+ private final boolean noDeleteOpenFile;
+ private final ESLogger logger;
+ private final boolean failOnClose;
+
+ public MockDirectoryHelper(ShardId shardId, Settings indexSettings, ESLogger logger) {
+ final long seed = indexSettings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ random = new Random(seed);
+ randomIOExceptionRate = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE, 0.0d);
+ randomIOExceptionRateOnOpen = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0.0d);
+ preventDoubleWrite = indexSettings.getAsBoolean(RANDOM_PREVENT_DOUBLE_WRITE, true); // true is default in MDW
+ noDeleteOpenFile = indexSettings.getAsBoolean(RANDOM_NO_DELETE_OPEN_FILE, random.nextBoolean()); // true is default in MDW
+ random.nextInt(shardId.getId() + 1); // some randomness per shard
+ throttle = Throttling.valueOf(indexSettings.get(RANDOM_THROTTLE, random.nextDouble() < 0.1 ? "SOMETIMES" : "NEVER"));
+ checkIndexOnClose = indexSettings.getAsBoolean(CHECK_INDEX_ON_CLOSE, false);// we can't do this by default since it might close the index input that we still read from in a pending fetch phase.
+ failOnClose = indexSettings.getAsBoolean(RANDOM_FAIL_ON_CLOSE, false);
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] checkIndexOnClose: [{}]", SeedUtils.formatSeed(seed),
+ throttle, checkIndexOnClose);
+ }
+ this.indexSettings = indexSettings;
+ this.shardId = shardId;
+ this.logger = logger;
+ }
+
+ public Directory wrap(Directory dir) {
+ final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, logger, failOnClose);
+ w.setRandomIOExceptionRate(randomIOExceptionRate);
+ w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen);
+ w.setThrottling(throttle);
+ w.setCheckIndexOnClose(checkIndexOnClose);
+ w.setPreventDoubleWrite(preventDoubleWrite);
+ w.setNoDeleteOpenFile(noDeleteOpenFile);
+ wrappers.add(w);
+ return w;
+ }
+
+ public Directory[] wrapAllInplace(Directory[] dirs) {
+ for (int i = 0; i < dirs.length; i++) {
+ dirs[i] = wrap(dirs[i]);
+ }
+ return dirs;
+ }
+
+ public FsDirectoryService randomDirectorService(IndexStore indexStore) {
+ if ((Constants.WINDOWS || Constants.SUN_OS) && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
+ return new MmapFsDirectoryService(shardId, indexSettings, indexStore);
+ } else if (Constants.WINDOWS) {
+ return new SimpleFsDirectoryService(shardId, indexSettings, indexStore);
+ }
+ switch (random.nextInt(3)) {
+ case 1:
+ return new MmapFsDirectoryService(shardId, indexSettings, indexStore);
+ case 0:
+ return new SimpleFsDirectoryService(shardId, indexSettings, indexStore);
+ default:
+ return new NioFsDirectoryService(shardId, indexSettings, indexStore);
+ }
+ }
+
+ public DirectoryService randomRamDirecoryService(ByteBufferCache byteBufferCache) {
+ switch (random.nextInt(2)) {
+ case 0:
+ return new RamDirectoryService(shardId, indexSettings);
+ default:
+ return new ByteBufferDirectoryService(shardId, indexSettings, byteBufferCache);
+ }
+
+ }
+
+ public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper {
+
+ private final ESLogger logger;
+ private final boolean failOnClose;
+
+ public ElasticsearchMockDirectoryWrapper(Random random, Directory delegate, ESLogger logger, boolean failOnClose) {
+ super(random, delegate);
+ this.logger = logger;
+ this.failOnClose = failOnClose;
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ super.close();
+ } catch (RuntimeException ex) {
+ if (failOnClose) {
+ throw ex;
+ }
+ // we catch the exception on close to properly close shards even if there are open files
+ // the test framework will call closeWithRuntimeException after the test exits to fail
+ // on unclosed files.
+ logger.debug("MockDirectoryWrapper#close() threw exception", ex);
+ }
+ }
+
+ public void closeWithRuntimeException() throws IOException {
+ super.close(); // force fail if open files etc. called in tear down of ElasticsearchIntegrationTest
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java
new file mode 100644
index 0000000..69a6dc0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.LockFactory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.fs.FsDirectoryService;
+
+import java.io.File;
+import java.io.IOException;
+
+public class MockFSDirectoryService extends FsDirectoryService {
+
+ private final MockDirectoryHelper helper;
+ private FsDirectoryService delegateService;
+
+ @Inject
+ public MockFSDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore) {
+ super(shardId, indexSettings, indexStore);
+ helper = new MockDirectoryHelper(shardId, indexSettings, logger);
+ delegateService = helper.randomDirectorService(indexStore);
+ }
+
+ @Override
+ public Directory[] build() throws IOException {
+ return helper.wrapAllInplace(delegateService.build());
+ }
+
+ @Override
+ protected synchronized FSDirectory newFSDirectory(File location, LockFactory lockFactory) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java b/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java
new file mode 100644
index 0000000..8005a62
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.fs.FsIndexStore;
+import org.elasticsearch.indices.store.IndicesStore;
+
+public class MockFSIndexStore extends FsIndexStore {
+
+ @Inject
+ public MockFSIndexStore(Index index, Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) {
+ super(index, indexSettings, indexService, indicesStore, nodeEnv);
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return MockFSDirectoryService.class;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java b/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java
new file mode 100644
index 0000000..c4f9d20
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.store.IndexStore;
+
+public class MockFSIndexStoreModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(MockFSIndexStore.class).asEagerSingleton();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockRamDirectoryService.java b/src/test/java/org/elasticsearch/test/store/MockRamDirectoryService.java
new file mode 100644
index 0000000..a2a6aa4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockRamDirectoryService.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+
+import java.io.IOException;
+
+public class MockRamDirectoryService extends AbstractIndexShardComponent implements DirectoryService {
+
+ private final MockDirectoryHelper helper;
+ private final DirectoryService delegateService;
+
+ @Inject
+ public MockRamDirectoryService(ShardId shardId, Settings indexSettings, ByteBufferCache byteBufferCache) {
+ super(shardId, indexSettings);
+ helper = new MockDirectoryHelper(shardId, indexSettings, logger);
+ delegateService = helper.randomRamDirecoryService(byteBufferCache);
+ }
+
+ @Override
+ public Directory[] build() throws IOException {
+ return helper.wrapAllInplace(delegateService.build());
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return delegateService.throttleTimeInNanos();
+ }
+
+ @Override
+ public void renameFile(Directory dir, String from, String to) throws IOException {
+ delegateService.renameFile(dir, from, to);
+ }
+
+ @Override
+ public void fullDelete(Directory dir) throws IOException {
+ delegateService.fullDelete(dir);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockRamIndexStore.java b/src/test/java/org/elasticsearch/test/store/MockRamIndexStore.java
new file mode 100644
index 0000000..51aacad
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockRamIndexStore.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.Inject;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.support.AbstractIndexStore;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.monitor.jvm.JvmStats;
+
+public class MockRamIndexStore extends AbstractIndexStore{
+
+ @Inject
+ public MockRamIndexStore(Index index, Settings indexSettings, IndexService indexService, IndicesStore indicesStore) {
+ super(index, indexSettings, indexService, indicesStore);
+ }
+
+ @Override
+ public boolean persistent() {
+ return false;
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return MockRamDirectoryService.class;
+ }
+
+ @Override
+ public ByteSizeValue backingStoreTotalSpace() {
+ return JvmInfo.jvmInfo().getMem().heapMax();
+ }
+
+ @Override
+ public ByteSizeValue backingStoreFreeSpace() {
+ return JvmStats.jvmStats().getMem().heapUsed();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockRamIndexStoreModule.java b/src/test/java/org/elasticsearch/test/store/MockRamIndexStoreModule.java
new file mode 100644
index 0000000..b3bdccb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockRamIndexStoreModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.store.IndexStore;
+
+public class MockRamIndexStoreModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(MockRamIndexStore.class).asEagerSingleton();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
new file mode 100644
index 0000000..159b427
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.local.LocalTransport;
+
+import java.io.IOException;
+import java.util.Random;
+
+/**
+ *
+ */
+public class AssertingLocalTransport extends LocalTransport {
+ private final Random random;
+
+ @Inject
+ public AssertingLocalTransport(Settings settings, ThreadPool threadPool, Version version) {
+ super(settings, threadPool, version);
+ final long seed = settings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ random = new Random(seed);
+ }
+
+ @Override
+ protected void handleParsedRespone(final TransportResponse response, final TransportResponseHandler handler) {
+ ElasticsearchAssertions.assertVersionSerializable(ElasticsearchTestCase.randomVersion(random), response);
+ super.handleParsedRespone(response, handler);
+ }
+
+ @Override
+ public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ ElasticsearchAssertions.assertVersionSerializable(ElasticsearchTestCase.randomVersion(random), request);
+ super.sendRequest(node, requestId, action, request, options);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransportModule.java b/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransportModule.java
new file mode 100644
index 0000000..47c8ee1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransportModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.Transport;
+
+/**
+ *
+ */
+public class AssertingLocalTransportModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public AssertingLocalTransportModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(AssertingLocalTransport.class).asEagerSingleton();
+ bind(Transport.class).to(AssertingLocalTransport.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java b/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java
new file mode 100644
index 0000000..bd2553d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.threadpool.ThreadPool.Names;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.*;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=2)
+public class SimpleThreadPoolTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder().put("threadpool.search.type", "cached").put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Test(timeout = 20000)
+ public void testUpdatingThreadPoolSettings() throws Exception {
+ ThreadPool threadPool = cluster().getInstance(ThreadPool.class);
+ // Check that settings are changed
+ assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(5L));
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.keep_alive", "10m").build()).execute().actionGet();
+ assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+
+ // Make sure that threads continue executing when executor is replaced
+ final CyclicBarrier barrier = new CyclicBarrier(2);
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.executor(Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ } catch (BrokenBarrierException ex) {
+ //
+ }
+ }
+ });
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build()).execute().actionGet();
+ assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false));
+ barrier.await();
+
+ // Make sure that new thread executor is functional
+ threadPool.executor(Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ } catch (BrokenBarrierException ex) {
+ //
+ }
+ }
+ });
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build()).execute().actionGet();
+ barrier.await();
+ Thread.sleep(200);
+
+ // Check that node info is correct
+ NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().all().execute().actionGet();
+ for (int i = 0; i < 2; i++) {
+ NodeInfo nodeInfo = nodesInfoResponse.getNodes()[i];
+ boolean found = false;
+ for (ThreadPool.Info info : nodeInfo.getThreadPool()) {
+ if (info.getName().equals(Names.SEARCH)) {
+ assertThat(info.getType(), equalTo("fixed"));
+ found = true;
+ break;
+ }
+ }
+ assertThat(found, equalTo(true));
+
+ Map<String, Object> poolMap = getPoolSettingsThroughJson(nodeInfo.getThreadPool(), Names.SEARCH);
+ }
+ }
+
+ private Map<String, Object> getPoolSettingsThroughJson(ThreadPoolInfo info, String poolName) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ info.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ builder.close();
+ XContentParser parser = JsonXContent.jsonXContent.createParser(builder.string());
+ Map<String, Object> poolsMap = parser.mapAndClose();
+ return (Map<String, Object>) ((Map<String, Object>) poolsMap.get("thread_pool")).get(poolName);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java
new file mode 100644
index 0000000..04d99ae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import com.google.common.util.concurrent.ListeningExecutorService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool.Names;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class UpdateThreadPoolSettingsTests extends ElasticsearchTestCase {
+
+ private ThreadPool.Info info(ThreadPool threadPool, String name) {
+ for (ThreadPool.Info info : threadPool.info()) {
+ if (info.getName().equals(name)) {
+ return info;
+ }
+ }
+ return null;
+ }
+
+ @Test
+ public void testCachedExecutorType() {
+ ThreadPool threadPool = new ThreadPool(ImmutableSettings.settingsBuilder().put("threadpool.search.type", "cached").build(), null);
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Replace with different type
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "same").build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("same"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(ListeningExecutorService.class));
+
+ // Replace with different type again
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.keep_alive", "10m")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(1));
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+
+ // Put old type back
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "cached").build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ // Make sure keep alive value reused
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Change keep alive
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
+ // Make sure executor didn't change
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ // Set the same keep alive
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
+ // Make sure keep alive value didn't change
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
+ // Make sure executor didn't change
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ threadPool.shutdown();
+ }
+
+ @Test
+ public void testFixedExecutorType() {
+ ThreadPool threadPool = new ThreadPool(settingsBuilder().put("threadpool.search.type", "fixed").build(), null);
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Replace with different type
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.keep_alive", "10m")
+ .put("threadpool.search.min", "2")
+ .put("threadpool.search.size", "15")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+
+ // Put old type back
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "fixed")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
+ // Make sure keep alive value is not used
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive(), nullValue());
+ // Make sure keep pool size value were reused
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(15));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(15));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
+
+ // Change size
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.size", "10").build());
+ // Make sure size values changed
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(10));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(10));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(10));
+ // Make sure executor didn't change
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ // Change queue capacity
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.queue", "500")
+ .build());
+
+ threadPool.shutdown();
+ }
+
+
+ @Test
+ public void testScalingExecutorType() {
+ ThreadPool threadPool = new ThreadPool(
+ settingsBuilder().put("threadpool.search.type", "scaling").put("threadpool.search.size", 10).build(), null);
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(1));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Change settings that doesn't require pool replacement
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.keep_alive", "10m")
+ .put("threadpool.search.min", "2")
+ .put("threadpool.search.size", "15")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ threadPool.shutdown();
+ }
+
+ @Test(timeout = 10000)
+ public void testShutdownDownNowDoesntBlock() throws Exception {
+ ThreadPool threadPool = new ThreadPool(ImmutableSettings.settingsBuilder().put("threadpool.search.type", "cached").build(), null);
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.executor(Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Thread.sleep(20000);
+ } catch (InterruptedException ex) {
+ latch.countDown();
+ Thread.currentThread().interrupt();
+ }
+ }
+ });
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "fixed").build());
+ assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false));
+ threadPool.shutdownNow();
+ latch.await();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java b/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java
new file mode 100644
index 0000000..03cce04
--- /dev/null
+++ b/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.timestamp;
+
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SimpleTimestampTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleTimestamp() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_timestamp").field("enabled", true).field("store", "yes").endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> check with automatic timestamp");
+ long now1 = System.currentTimeMillis();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+ long now2 = System.currentTimeMillis();
+
+ // we check both realtime get and non realtime get
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(true).execute().actionGet();
+ long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, greaterThanOrEqualTo(now1));
+ assertThat(timestamp, lessThanOrEqualTo(now2));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(true).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+
+ // non realtime get (stored)
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, greaterThanOrEqualTo(now1));
+ assertThat(timestamp, lessThanOrEqualTo(now2));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+
+ logger.info("--> check with custom timestamp (numeric)");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimestamp("10").setRefresh(true).execute().actionGet();
+
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(10l));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+
+ logger.info("--> check with custom timestamp (string)");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimestamp("1970-01-01T00:00:00.020").setRefresh(true).execute().actionGet();
+
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(20l));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java b/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java
new file mode 100644
index 0000000..ee4dc16
--- /dev/null
+++ b/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java
@@ -0,0 +1,875 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.transport.TransportRequestOptions.options;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase {
+
+ protected ThreadPool threadPool;
+
+ protected static final Version version0 = Version.fromId(/*0*/99);
+ protected DiscoveryNode nodeA;
+ protected TransportService serviceA;
+
+ protected static final Version version1 = Version.fromId(199);
+ protected DiscoveryNode nodeB;
+ protected TransportService serviceB;
+
+ protected abstract TransportService build(Settings settings, Version version);
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ threadPool = new ThreadPool();
+ serviceA = build(ImmutableSettings.builder().put("name", "TS_A").build(), version0);
+ nodeA = new DiscoveryNode("TS_A", "TS_A", serviceA.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), version0);
+ serviceB = build(ImmutableSettings.builder().put("name", "TS_B").build(), version1);
+ nodeB = new DiscoveryNode("TS_B", "TS_B", serviceB.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), version1);
+
+ // wait till all nodes are properly connected and the event has been sent, so tests in this class
+ // will not get this callback called on the connections done in this setup
+ final CountDownLatch latch = new CountDownLatch(4);
+ TransportConnectionListener waitForConnection = new TransportConnectionListener() {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ fail("disconnect should not be called " + node);
+ }
+ };
+ serviceA.addConnectionListener(waitForConnection);
+ serviceB.addConnectionListener(waitForConnection);
+
+ serviceA.connectToNode(nodeB);
+ serviceA.connectToNode(nodeA);
+ serviceB.connectToNode(nodeA);
+ serviceB.connectToNode(nodeB);
+
+ assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true));
+ serviceA.removeConnectionListener(waitForConnection);
+ serviceB.removeConnectionListener(waitForConnection);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ serviceA.close();
+ serviceB.close();
+ threadPool.shutdown();
+ }
+
+ @Test
+ public void testHelloWorld() {
+ serviceA.registerHandler("sayHello", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ assertThat("moshe", equalTo(request.message));
+ try {
+ channel.sendResponse(new StringMessageResponse("hello " + request.message));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello moshe", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.get();
+ assertThat("hello moshe", equalTo(message.message));
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), TransportRequestOptions.options().withCompress(true), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello moshe", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.get();
+ assertThat("hello moshe", equalTo(message.message));
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testVoidMessageCompressed() {
+ serviceA.registerHandler("sayHello", new BaseTransportRequestHandler<TransportRequest.Empty>() {
+ @Override
+ public TransportRequest.Empty newInstance() {
+ return TransportRequest.Empty.INSTANCE;
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(TransportRequest.Empty request, TransportChannel channel) {
+ try {
+ channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.options().withCompress(true));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<TransportResponse.Empty> res = serviceB.submitRequest(nodeA, "sayHello",
+ TransportRequest.Empty.INSTANCE, TransportRequestOptions.options().withCompress(true), new BaseTransportResponseHandler<TransportResponse.Empty>() {
+ @Override
+ public TransportResponse.Empty newInstance() {
+ return TransportResponse.Empty.INSTANCE;
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ TransportResponse.Empty message = res.get();
+ assertThat(message, notNullValue());
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testHelloWorldCompressed() {
+ serviceA.registerHandler("sayHello", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ assertThat("moshe", equalTo(request.message));
+ try {
+ channel.sendResponse(new StringMessageResponse("hello " + request.message), TransportResponseOptions.options().withCompress(true));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), TransportRequestOptions.options().withCompress(true), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello moshe", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.get();
+ assertThat("hello moshe", equalTo(message.message));
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testErrorMessage() {
+ serviceA.registerHandler("sayHelloException", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception {
+ assertThat("moshe", equalTo(request.message));
+ throw new RuntimeException("bad message !!!");
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHelloException",
+ new StringMessageRequest("moshe"), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("got response instead of exception", false, equalTo(true));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat("bad message !!!", equalTo(exp.getCause().getMessage()));
+ }
+ });
+
+ try {
+ res.txGet();
+ assertThat("exception should be thrown", false, equalTo(true));
+ } catch (Exception e) {
+ assertThat("bad message !!!", equalTo(e.getCause().getMessage()));
+ }
+
+ serviceA.removeHandler("sayHelloException");
+ }
+
+ @Test
+ public void testDisconnectListener() throws Exception {
+ final CountDownLatch latch = new CountDownLatch(1);
+ TransportConnectionListener disconnectListener = new TransportConnectionListener() {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ fail("node connected should not be called, all connection have been done previously, node: " + node);
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ latch.countDown();
+ }
+ };
+ serviceA.addConnectionListener(disconnectListener);
+ serviceB.close();
+ assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testTimeoutSendExceptionWithNeverSendingBackResponse() throws Exception {
+ serviceA.registerHandler("sayHelloTimeoutNoResponse", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ assertThat("moshe", equalTo(request.message));
+ // don't send back a response
+// try {
+// channel.sendResponse(new StringMessage("hello " + request.message));
+// } catch (IOException e) {
+// e.printStackTrace();
+// assertThat(e.getMessage(), false, equalTo(true));
+// }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHelloTimeoutNoResponse",
+ new StringMessageRequest("moshe"), options().withTimeout(100), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("got response instead of exception", false, equalTo(true));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat(exp, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.txGet();
+ assertThat("exception should be thrown", false, equalTo(true));
+ } catch (Exception e) {
+ assertThat(e, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+
+ serviceA.removeHandler("sayHelloTimeoutNoResponse");
+ }
+
+ @Test
+ @TestLogging("_root:TRACE")
+ public void testTimeoutSendExceptionWithDelayedResponse() throws Exception {
+ serviceA.registerHandler("sayHelloTimeoutDelayedResponse", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ TimeValue sleep = TimeValue.parseTimeValue(request.message, null);
+ try {
+ Thread.sleep(sleep.millis());
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ try {
+ channel.sendResponse(new StringMessageResponse("hello " + request.message));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse",
+ new StringMessageRequest("300ms"), options().withTimeout(100), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("got response instead of exception", false, equalTo(true));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat(exp, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.txGet();
+ assertThat("exception should be thrown", false, equalTo(true));
+ } catch (Exception e) {
+ assertThat(e, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+
+ // sleep for 400 millis to make sure we get back the response
+ Thread.sleep(400);
+
+ for (int i = 0; i < 10; i++) {
+ final int counter = i;
+ // now, try and send another request, this times, with a short timeout
+ res = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse",
+ new StringMessageRequest(counter + "ms"), options().withTimeout(100), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello " + counter + "ms", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response for " + counter + ": " + exp.getDetailedMessage(), false, equalTo(true));
+ }
+ });
+
+ StringMessageResponse message = res.txGet();
+ assertThat(message.message, equalTo("hello " + counter + "ms"));
+ }
+
+ serviceA.removeHandler("sayHelloTimeoutDelayedResponse");
+ }
+
+ static class StringMessageRequest extends TransportRequest {
+
+ private String message;
+
+ StringMessageRequest(String message) {
+ this.message = message;
+ }
+
+ StringMessageRequest() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ message = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(message);
+ }
+ }
+
+ static class StringMessageResponse extends TransportResponse {
+
+ private String message;
+
+ StringMessageResponse(String message) {
+ this.message = message;
+ }
+
+ StringMessageResponse() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ message = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(message);
+ }
+ }
+
+
+ static class Version0Request extends TransportRequest {
+
+ int value1;
+
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ value1 = in.readInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(value1);
+ }
+ }
+
+ static class Version1Request extends Version0Request {
+
+ int value2;
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ if (in.getVersion().onOrAfter(version1)) {
+ value2 = in.readInt();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (out.getVersion().onOrAfter(version1)) {
+ out.writeInt(value2);
+ }
+ }
+ }
+
+ static class Version0Response extends TransportResponse {
+
+ int value1;
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ value1 = in.readInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(value1);
+ }
+ }
+
+ static class Version1Response extends Version0Response {
+
+ int value2;
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ if (in.getVersion().onOrAfter(version1)) {
+ value2 = in.readInt();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (out.getVersion().onOrAfter(version1)) {
+ out.writeInt(value2);
+ }
+ }
+ }
+
+ @Test
+ public void testVersion_from0to1() throws Exception {
+ serviceB.registerHandler("/version", new BaseTransportRequestHandler<Version1Request>() {
+ @Override
+ public Version1Request newInstance() {
+ return new Version1Request();
+ }
+
+ @Override
+ public void messageReceived(Version1Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ assertThat(request.value2, equalTo(0)); // not set, coming from service A
+ Version1Response response = new Version1Response();
+ response.value1 = 1;
+ response.value2 = 2;
+ channel.sendResponse(response);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+
+ Version0Request version0Request = new Version0Request();
+ version0Request.value1 = 1;
+ Version0Response version0Response = serviceA.submitRequest(nodeB, "/version", version0Request, new BaseTransportResponseHandler<Version0Response>() {
+ @Override
+ public Version0Response newInstance() {
+ return new Version0Response();
+ }
+
+ @Override
+ public void handleResponse(Version0Response response) {
+ assertThat(response.value1, equalTo(1));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version0Response.value1, equalTo(1));
+ }
+
+ @Test
+ public void testVersion_from1to0() throws Exception {
+ serviceA.registerHandler("/version", new BaseTransportRequestHandler<Version0Request>() {
+ @Override
+ public Version0Request newInstance() {
+ return new Version0Request();
+ }
+
+ @Override
+ public void messageReceived(Version0Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ Version0Response response = new Version0Response();
+ response.value1 = 1;
+ channel.sendResponse(response);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+
+ Version1Request version1Request = new Version1Request();
+ version1Request.value1 = 1;
+ version1Request.value2 = 2;
+ Version1Response version1Response = serviceB.submitRequest(nodeA, "/version", version1Request, new BaseTransportResponseHandler<Version1Response>() {
+ @Override
+ public Version1Response newInstance() {
+ return new Version1Response();
+ }
+
+ @Override
+ public void handleResponse(Version1Response response) {
+ assertThat(response.value1, equalTo(1));
+ assertThat(response.value2, equalTo(0)); // initial values, cause its serialized from version 0
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version1Response.value1, equalTo(1));
+ assertThat(version1Response.value2, equalTo(0));
+ }
+
+ @Test
+ public void testVersion_from1to1() throws Exception {
+ serviceB.registerHandler("/version", new BaseTransportRequestHandler<Version1Request>() {
+ @Override
+ public Version1Request newInstance() {
+ return new Version1Request();
+ }
+
+ @Override
+ public void messageReceived(Version1Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ assertThat(request.value2, equalTo(2));
+ Version1Response response = new Version1Response();
+ response.value1 = 1;
+ response.value2 = 2;
+ channel.sendResponse(response);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+
+ Version1Request version1Request = new Version1Request();
+ version1Request.value1 = 1;
+ version1Request.value2 = 2;
+ Version1Response version1Response = serviceB.submitRequest(nodeB, "/version", version1Request, new BaseTransportResponseHandler<Version1Response>() {
+ @Override
+ public Version1Response newInstance() {
+ return new Version1Response();
+ }
+
+ @Override
+ public void handleResponse(Version1Response response) {
+ assertThat(response.value1, equalTo(1));
+ assertThat(response.value2, equalTo(2));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version1Response.value1, equalTo(1));
+ assertThat(version1Response.value2, equalTo(2));
+ }
+
+ @Test
+ public void testVersion_from0to0() throws Exception {
+ serviceA.registerHandler("/version", new BaseTransportRequestHandler<Version0Request>() {
+ @Override
+ public Version0Request newInstance() {
+ return new Version0Request();
+ }
+
+ @Override
+ public void messageReceived(Version0Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ Version0Response response = new Version0Response();
+ response.value1 = 1;
+ channel.sendResponse(response);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+
+ Version0Request version0Request = new Version0Request();
+ version0Request.value1 = 1;
+ Version0Response version0Response = serviceA.submitRequest(nodeA, "/version", version0Request, new BaseTransportResponseHandler<Version0Response>() {
+ @Override
+ public Version0Response newInstance() {
+ return new Version0Response();
+ }
+
+ @Override
+ public void handleResponse(Version0Response response) {
+ assertThat(response.value1, equalTo(1));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version0Response.value1, equalTo(1));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java b/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java
new file mode 100644
index 0000000..2b6b42e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.local;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.AbstractSimpleTransportTests;
+import org.elasticsearch.transport.TransportService;
+
+public class SimpleLocalTransportTests extends AbstractSimpleTransportTests {
+
+ @Override
+ protected TransportService build(Settings settings, Version version) {
+ return new TransportService(new LocalTransport(settings, threadPool, version), threadPool).start();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java
new file mode 100644
index 0000000..28c0fda
--- /dev/null
+++ b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.util.concurrent.KeyedLock;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+
+public class KeyedLockTests extends ElasticsearchTestCase {
+
+ @Test
+ public void checkIfMapEmptyAfterLotsOfAcquireAndReleases() throws InterruptedException {
+ ConcurrentHashMap<String, Integer> counter = new ConcurrentHashMap<String, Integer>();
+ ConcurrentHashMap<String, AtomicInteger> safeCounter = new ConcurrentHashMap<String, AtomicInteger>();
+ KeyedLock<String> connectionLock = new KeyedLock<String>();
+ String[] names = new String[randomIntBetween(1, 40)];
+ for (int i = 0; i < names.length; i++) {
+ names[i] = randomRealisticUnicodeOfLengthBetween(10, 20);
+ }
+ CountDownLatch startLatch = new CountDownLatch(1);
+ int numThreads = randomIntBetween(3, 10);
+ Thread[] threads = new Thread[numThreads];
+ for (int i = 0; i < numThreads; i++) {
+ threads[i] = new AcquireAndReleaseThread(startLatch, connectionLock, names, counter, safeCounter);
+ }
+ for (int i = 0; i < numThreads; i++) {
+ threads[i].start();
+ }
+ startLatch.countDown();
+ for (int i = 0; i < numThreads; i++) {
+ threads[i].join();
+ }
+ assertThat(connectionLock.hasLockedKeys(), equalTo(false));
+
+ Set<Entry<String, Integer>> entrySet = counter.entrySet();
+ assertThat(counter.size(), equalTo(safeCounter.size()));
+ for (Entry<String, Integer> entry : entrySet) {
+ AtomicInteger atomicInteger = safeCounter.get(entry.getKey());
+ assertThat(atomicInteger, not(Matchers.nullValue()));
+ assertThat(atomicInteger.get(), equalTo(entry.getValue()));
+ }
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void checkCannotAcquireTwoLocks() throws InterruptedException {
+ ConcurrentHashMap<String, Integer> counters = new ConcurrentHashMap<String, Integer>();
+ ConcurrentHashMap<String, AtomicInteger> safeCounter = new ConcurrentHashMap<String, AtomicInteger>();
+ KeyedLock<String> connectionLock = new KeyedLock<String>();
+ String[] names = new String[randomIntBetween(1, 40)];
+ connectionLock = new KeyedLock<String>();
+ String name = randomRealisticUnicodeOfLength(atLeast(10));
+ connectionLock.acquire(name);
+ connectionLock.acquire(name);
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void checkCannotReleaseUnacquiredLock() throws InterruptedException {
+ ConcurrentHashMap<String, Integer> counters = new ConcurrentHashMap<String, Integer>();
+ ConcurrentHashMap<String, AtomicInteger> safeCounter = new ConcurrentHashMap<String, AtomicInteger>();
+ KeyedLock<String> connectionLock = new KeyedLock<String>();
+ String[] names = new String[randomIntBetween(1, 40)];
+ connectionLock = new KeyedLock<String>();
+ String name = randomRealisticUnicodeOfLength(atLeast(10));
+ connectionLock.release(name);
+ }
+
+ public static class AcquireAndReleaseThread extends Thread {
+ private CountDownLatch startLatch;
+ KeyedLock<String> connectionLock;
+ String[] names;
+ ConcurrentHashMap<String, Integer> counter;
+ ConcurrentHashMap<String, AtomicInteger> safeCounter;
+
+ public AcquireAndReleaseThread(CountDownLatch startLatch, KeyedLock<String> connectionLock, String[] names,
+ ConcurrentHashMap<String, Integer> counter, ConcurrentHashMap<String, AtomicInteger> safeCounter) {
+ this.startLatch = startLatch;
+ this.connectionLock = connectionLock;
+ this.names = names;
+ this.counter = counter;
+ this.safeCounter = safeCounter;
+ }
+
+ public void run() {
+ try {
+ startLatch.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException();
+ }
+ int numRuns = atLeast(500);
+ for (int i = 0; i < numRuns; i++) {
+ String curName = names[randomInt(names.length - 1)];
+ connectionLock.acquire(curName);
+ try {
+ Integer integer = counter.get(curName);
+ if (integer == null) {
+ counter.put(curName, 1);
+ } else {
+ counter.put(curName, integer.intValue() + 1);
+ }
+ } finally {
+ connectionLock.release(curName);
+ }
+ AtomicInteger atomicInteger = new AtomicInteger(0);
+ AtomicInteger value = safeCounter.putIfAbsent(curName, atomicInteger);
+ if (value == null) {
+ atomicInteger.incrementAndGet();
+ } else {
+ value.incrementAndGet();
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java b/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java
new file mode 100644
index 0000000..dbd3bd8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.transport.AbstractSimpleTransportTests;
+import org.elasticsearch.transport.ConnectTransportException;
+import org.elasticsearch.transport.TransportService;
+import org.junit.Test;
+
+public class SimpleNettyTransportTests extends AbstractSimpleTransportTests {
+
+ @Override
+ protected TransportService build(Settings settings, Version version) {
+ int startPort = 11000 + randomIntBetween(0, 255);
+ int endPort = startPort + 10;
+ settings = ImmutableSettings.builder().put(settings).put("transport.tcp.port", startPort + "-" + endPort).build();
+ return new TransportService(settings, new NettyTransport(settings, threadPool, new NetworkService(settings), version), threadPool).start();
+ }
+
+ @Test
+ public void testConnectException() {
+ try {
+ serviceA.connectToNode(new DiscoveryNode("C", new InetSocketTransportAddress("localhost", 9876), Version.CURRENT));
+ fail();
+ } catch (ConnectTransportException e) {
+// e.printStackTrace();
+ // all is well
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/tribe/TribeTests.java b/src/test/java/org/elasticsearch/tribe/TribeTests.java
new file mode 100644
index 0000000..a1c3c44
--- /dev/null
+++ b/src/test/java/org/elasticsearch/tribe/TribeTests.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.tribe;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.MasterNotDiscoveredException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.TestCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Note, when talking to tribe client, no need to set the local flag on master read operations, it
+ * does it by default.
+ */
+public class TribeTests extends ElasticsearchIntegrationTest {
+
+ private TestCluster cluster2;
+ private Node tribeNode;
+ private Client tribeClient;
+
+ @Before
+ public void setupSecondCluster() {
+ // create another cluster
+ cluster2 = new TestCluster(randomLong(), 2, 2, cluster().getClusterName() + "-2");
+ cluster2.beforeTest(getRandom(), getPerTestTransportClientRatio());
+ cluster2.ensureAtLeastNumNodes(2);
+
+ Settings settings = ImmutableSettings.builder()
+ .put("tribe.t1.cluster.name", cluster().getClusterName())
+ .put("tribe.t2.cluster.name", cluster2.getClusterName())
+ .put("tribe.blocks.write", false)
+ .put("tribe.blocks.read", false)
+ .build();
+
+ tribeNode = NodeBuilder.nodeBuilder()
+ .settings(settings)
+ .node();
+ tribeClient = tribeNode.client();
+ }
+
+ @After
+ public void tearDownSecondCluster() {
+ tribeNode.close();
+ cluster2.afterTest();
+ cluster2.close();
+ }
+
+ @Test
+ public void testTribeOnOneCluster() throws Exception {
+ logger.info("create 2 indices, test1 on t1, and test2 on t2");
+ cluster().client().admin().indices().prepareCreate("test1").get();
+ cluster2.client().admin().indices().prepareCreate("test2").get();
+
+
+ // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
+ logger.info("wait till test1 and test2 exists in the tribe node state");
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ return tribeState.getMetaData().hasIndex("test1") && tribeState.getMetaData().hasIndex("test2") &&
+ tribeState.getRoutingTable().hasIndex("test1") && tribeState.getRoutingTable().hasIndex("test2");
+ }
+ });
+
+ logger.info("wait till tribe has the same nodes as the 2 clusters");
+ awaitSameNodeCounts();
+
+ assertThat(tribeClient.admin().cluster().prepareHealth().setWaitForGreenStatus().get().getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("create 2 docs through the tribe node");
+ tribeClient.prepareIndex("test1", "type1", "1").setSource("field1", "value1").get();
+ tribeClient.prepareIndex("test2", "type1", "1").setSource("field1", "value1").get();
+ tribeClient.admin().indices().prepareRefresh().get();
+
+ logger.info("verify they are there");
+ assertHitCount(tribeClient.prepareCount().get(), 2l);
+ assertHitCount(tribeClient.prepareSearch().get(), 2l);
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ return tribeState.getMetaData().index("test1").mapping("type1") != null &&
+ tribeState.getMetaData().index("test2").mapping("type2") != null;
+ }
+ });
+
+
+ logger.info("write to another type");
+ tribeClient.prepareIndex("test1", "type2", "1").setSource("field1", "value1").get();
+ tribeClient.prepareIndex("test2", "type2", "1").setSource("field1", "value1").get();
+ tribeClient.admin().indices().prepareRefresh().get();
+
+
+ logger.info("verify they are there");
+ assertHitCount(tribeClient.prepareCount().get(), 4l);
+ assertHitCount(tribeClient.prepareSearch().get(), 4l);
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ return tribeState.getMetaData().index("test1").mapping("type1") != null && tribeState.getMetaData().index("test1").mapping("type2") != null &&
+ tribeState.getMetaData().index("test2").mapping("type1") != null && tribeState.getMetaData().index("test2").mapping("type2") != null;
+ }
+ });
+
+ logger.info("make sure master level write operations fail... (we don't really have a master)");
+ try {
+ tribeClient.admin().indices().prepareCreate("tribe_index").setMasterNodeTimeout("10ms").get();
+ fail();
+ } catch (MasterNotDiscoveredException e) {
+ // all is well!
+ }
+
+ logger.info("delete an index, and make sure its reflected");
+ cluster2.client().admin().indices().prepareDelete("test2").get();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ return tribeState.getMetaData().hasIndex("test1") && !tribeState.getMetaData().hasIndex("test2") &&
+ tribeState.getRoutingTable().hasIndex("test1") && !tribeState.getRoutingTable().hasIndex("test2");
+ }
+ });
+
+ logger.info("stop a node, make sure its reflected");
+ cluster2.stopRandomNode();
+ awaitSameNodeCounts();
+ }
+
+ private void awaitSameNodeCounts() throws Exception {
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ DiscoveryNodes tribeNodes = tribeNode.client().admin().cluster().prepareState().get().getState().getNodes();
+ return countDataNodesForTribe("t1", tribeNodes) == cluster().client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size()
+ && countDataNodesForTribe("t2", tribeNodes) == cluster2.client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size();
+ }
+ });
+ }
+
+ private int countDataNodesForTribe(String tribeName, DiscoveryNodes nodes) {
+ int count = 0;
+ for (DiscoveryNode node : nodes) {
+ if (!node.dataNode()) {
+ continue;
+ }
+ if (tribeName.equals(node.getAttributes().get(TribeService.TRIBE_NAME))) {
+ count++;
+ }
+ }
+ return count;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java b/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java
new file mode 100644
index 0000000..40ac68b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ttl;
+
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope=Scope.TEST)
+public class SimpleTTLTests extends ElasticsearchIntegrationTest {
+
+ static private final long PURGE_INTERVAL = 200;
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("indices.ttl.interval", PURGE_INTERVAL)
+ .put("index.number_of_shards", 2) // 2 shards to test TTL purge with routing properly
+ .put("cluster.routing.operation.use_type", false) // make sure we control the shard computation
+ .put("cluster.routing.operation.hash.type", "djb")
+ .build();
+ }
+
+ @Test
+ public void testSimpleTTL() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).field("store", "yes").endObject()
+ .endObject()
+ .endObject())
+ .addMapping("type2", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type2")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).field("store", "yes").field("default", "1d").endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ long providedTTLValue = 3000;
+ logger.info("--> checking ttl");
+ // Index one doc without routing, one doc with routing, one doc with not TTL and no default and one doc with default TTL
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTTL(providedTTLValue).setRefresh(true).execute().actionGet();
+ long now = System.currentTimeMillis();
+ client().prepareIndex("test", "type1", "with_routing").setSource("field1", "value1").setTTL(providedTTLValue).setRouting("routing").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "no_ttl").setSource("field1", "value1").execute().actionGet();
+ client().prepareIndex("test", "type2", "default_ttl").setSource("field1", "value1").execute().actionGet();
+
+ // realtime get check
+ long currentTime = System.currentTimeMillis();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ long ttl0;
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(-PURGE_INTERVAL));
+ assertThat(ttl0, lessThan(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThan(0l));
+ }
+ // verify the ttl is still decreasing when going to the replica
+ currentTime = System.currentTimeMillis();
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(-PURGE_INTERVAL));
+ assertThat(ttl0, lessThan(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThan(0l));
+ }
+ // non realtime get (stored)
+ currentTime = System.currentTimeMillis();
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(-PURGE_INTERVAL));
+ assertThat(ttl0, lessThan(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThan(0l));
+ }
+ // non realtime get going the replica
+ currentTime = System.currentTimeMillis();
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(-PURGE_INTERVAL));
+ assertThat(ttl0, lessThan(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThan(0l));
+ }
+
+ // no TTL provided so no TTL fetched
+ getResponse = client().prepareGet("test", "type1", "no_ttl").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.getField("_ttl"), nullValue());
+ // no TTL provided make sure it has default TTL
+ getResponse = client().prepareGet("test", "type2", "default_ttl").setFields("_ttl").setRealtime(true).execute().actionGet();
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(0L));
+
+ // make sure the purger has done its job for all indexed docs that are expired
+ long shouldBeExpiredDate = now + providedTTLValue + PURGE_INTERVAL + 2000;
+ currentTime = System.currentTimeMillis();
+ if (shouldBeExpiredDate - currentTime > 0) {
+ Thread.sleep(shouldBeExpiredDate - currentTime);
+ }
+
+ // We can't assume that after waiting for ttl + purgeInterval (waitTime) that the document have actually been deleted.
+ // The ttl purging happens in the background in a different thread, and might not have been completed after waiting for waitTime.
+ // But we can use index statistics' delete count to be sure that deletes have been executed, that must be incremented before
+ // ttl purging has finished.
+ logger.info("--> checking purger");
+ long currentDeleteCount;
+ do {
+ if (rarely()) {
+ client().admin().indices().prepareFlush("test").setFull(true).execute().actionGet();
+ } else if (rarely()) {
+ client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).execute().actionGet();
+ }
+ IndicesStatsResponse response = client().admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ currentDeleteCount = response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount();
+ } while (currentDeleteCount < 4); // TTL deletes two docs, but it is indexed in the primary shard and replica shard.
+ assertThat(currentDeleteCount, equalTo(4l));
+
+ // realtime get check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ // replica realtime get check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ // Need to run a refresh, in order for the non realtime get to work.
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // non realtime get (stored) check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ // non realtime get going the replica check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java b/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java
new file mode 100644
index 0000000..47827d7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.update;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.AbstractExecutableScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.hasKey;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.SUITE, numNodes=1)
+public class UpdateByNativeScriptTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder()
+ .put("script.native.custom.type", CustomNativeScriptFactory.class.getName())
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Test
+ public void testThatUpdateUsingNativeScriptWorks() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()).get();
+ ensureGreen();
+
+ index("test", "type", "1", "text", "value");
+
+ Map<String, Object> params = Maps.newHashMap();
+ params.put("foo", "SETVALUE");
+ client().prepareUpdate("test", "type", "1").setScript("custom").setScriptLang("native").setScriptParams(params).get();
+
+ Map<String, Object> data = client().prepareGet("test", "type", "1").get().getSource();
+ assertThat(data, hasKey("foo"));
+ assertThat(data.get("foo").toString(), is("SETVALUE"));
+ }
+
+ static class CustomNativeScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new CustomScript(params);
+ }
+ }
+
+ static class CustomScript extends AbstractExecutableScript {
+ private Map<String, Object> params;
+ private Map<String, Object> vars = Maps.newHashMapWithExpectedSize(2);
+
+ public CustomScript(Map<String, Object> params) {
+ this.params = params;
+ }
+
+ @Override
+ public Object run() {
+ if (vars.containsKey("ctx") && vars.get("ctx") instanceof Map) {
+ Map ctx = (Map) vars.get("ctx");
+ if (ctx.containsKey("_source") && ctx.get("_source") instanceof Map) {
+ Map source = (Map) ctx.get("_source");
+ source.putAll(params);
+ }
+ }
+ // return value does not matter, the UpdateHelper class
+ return null;
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ vars.put(name, value);
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/update/UpdateTests.java b/src/test/java/org/elasticsearch/update/UpdateTests.java
new file mode 100644
index 0000000..2eb35f8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/update/UpdateTests.java
@@ -0,0 +1,503 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.update;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+public class UpdateTests extends ElasticsearchIntegrationTest {
+
+
+ protected void createIndex() throws Exception {
+ logger.info("--> creating index test");
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).field("store", "yes").endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ }
+
+ @Test
+ public void testUpdateRequest() throws Exception {
+ UpdateRequest request = new UpdateRequest("test", "type", "1");
+ // simple script
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+
+ // script with params
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .field("script", "script1")
+ .startObject("params").field("param1", "value1").endObject()
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+
+ // script with params and upsert
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .field("script", "script1")
+ .startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+ Map<String, Object> upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+ upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+ upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ // script with doc
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("doc").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .endObject());
+ Map<String, Object> doc = request.doc().sourceAsMap();
+ assertThat(doc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2"));
+ }
+
+ @Test
+ public void testUpsert() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field += 1")
+ .execute().actionGet();
+ assertTrue(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1"));
+ }
+
+ updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field += 1")
+ .execute().actionGet();
+ assertFalse(updateResponse.isCreated());
+
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2"));
+ }
+ }
+
+ @Test
+ public void testUpsertDoc() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setDocAsUpsert(true)
+ .setFields("_source")
+ .execute().actionGet();
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ }
+
+ @Test
+ // See: https://github.com/elasticsearch/elasticsearch/issues/3265
+ public void testNotUpsertDoc() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ assertThrows(client().prepareUpdate("test", "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setDocAsUpsert(false)
+ .setFields("_source")
+ .execute(), DocumentMissingException.class);
+ }
+
+ @Test
+ public void testUpsertFields() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript("ctx._source.extra = \"foo\"")
+ .setFields("_source")
+ .execute().actionGet();
+
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra"), nullValue());
+
+ updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript("ctx._source.extra = \"foo\"")
+ .setFields("_source")
+ .execute().actionGet();
+
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra").toString(), equalTo("foo"));
+ }
+
+ @Test
+ public void testVersionedUpdate() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type", "1", "text", "value"); // version is now 1
+
+ assertThrows(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(2).execute(),
+ VersionConflictEngineException.class);
+
+ client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(1).get();
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(2l));
+
+ // and again with a higher version..
+ client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v3'").setVersion(2).get();
+
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(3l));
+
+ // after delete
+ client().prepareDelete("test", "type", "1").get();
+ assertThrows(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(3).execute(),
+ DocumentMissingException.class);
+
+ // external versioning
+ client().prepareIndex("test", "type", "2").setSource("text", "value").setVersion(10).setVersionType(VersionType.EXTERNAL).get();
+ assertThrows(client().prepareUpdate("test", "type", "2").setScript("ctx._source.text = 'v2'").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+ client().prepareUpdate("test", "type", "2").setScript("ctx._source.text = 'v2'").setVersion(11).setVersionType(VersionType.EXTERNAL).get();
+
+ assertThat(client().prepareGet("test", "type", "2").get().getVersion(), equalTo(11l));
+
+ // upserts - the combination with versions is a bit weird. Test are here to ensure we do not change our behavior unintentionally
+
+ // With internal versions, tt means "if object is there with version X, update it or explode. If it is not there, index.
+ client().prepareUpdate("test", "type", "3").setScript("ctx._source.text = 'v2'").setVersion(10).setUpsert("{ \"text\": \"v0\" }").get();
+ GetResponse get = get("test", "type", "3");
+ assertThat(get.getVersion(), equalTo(1l));
+ assertThat((String) get.getSource().get("text"), equalTo("v0"));
+
+ // With external versions, it means - if object is there with version lower than X, update it or explode. If it is not there, insert with new version.
+ client().prepareUpdate("test", "type", "4").setScript("ctx._source.text = 'v2'").
+ setVersion(10).setVersionType(VersionType.EXTERNAL).setUpsert("{ \"text\": \"v0\" }").get();
+ get = get("test", "type", "4");
+ assertThat(get.getVersion(), equalTo(10l));
+ assertThat((String) get.getSource().get("text"), equalTo("v0"));
+
+
+ // retry on conflict is rejected:
+
+ assertThrows(client().prepareUpdate("test", "type", "1").setVersion(10).setRetryOnConflict(5), ActionRequestValidationException.class);
+
+ }
+
+ @Test
+ public void testIndexAutoCreation() throws Exception {
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript("ctx._source.extra = \"foo\"")
+ .setFields("_source")
+ .execute().actionGet();
+
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra"), nullValue());
+ }
+
+ @Test
+ public void testUpdate() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ try {
+ client().prepareUpdate("test", "type1", "1").setScript("ctx._source.field++").execute().actionGet();
+ fail();
+ } catch (DocumentMissingException e) {
+ // all is well
+ }
+
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx._source.field += 1").execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(2L));
+ assertFalse(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2"));
+ }
+
+ updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx._source.field += count").addScriptParam("count", 3).execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(3L));
+ assertFalse(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5"));
+ }
+
+ // check noop
+ updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx.op = 'none'").execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(3L));
+ assertFalse(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5"));
+ }
+
+ // check delete
+ updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx.op = 'delete'").execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(4L));
+ assertFalse(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+
+ // check TTL is kept after an update without TTL
+ client().prepareIndex("test", "type1", "2").setSource("field", 1).setTTL(86400000L).setRefresh(true).execute().actionGet();
+ GetResponse getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ long ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+ client().prepareUpdate("test", "type1", "2").setScript("ctx._source.field += 1").execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+
+ // check TTL update
+ client().prepareUpdate("test", "type1", "2").setScript("ctx._ttl = 3600000").execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+ assertThat(ttl, lessThanOrEqualTo(3600000L));
+
+ // check timestamp update
+ client().prepareIndex("test", "type1", "3").setSource("field", 1).setRefresh(true).execute().actionGet();
+ client().prepareUpdate("test", "type1", "3").setScript("ctx._timestamp = \"2009-11-15T14:12:12\"").execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "3").setFields("_timestamp").execute().actionGet();
+ long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(1258294332000L));
+
+ // check fields parameter
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+ updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx._source.field += 1").setFields("_source", "field").execute().actionGet();
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceRef(), notNullValue());
+ assertThat(updateResponse.getGetResult().field("field").getValue(), notNullValue());
+
+ // check updates without script
+ // add new field
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+ updateResponse = client().prepareUpdate("test", "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field2", 2).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1"));
+ assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2"));
+ }
+
+ // change existing field
+ updateResponse = client().prepareUpdate("test", "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("3"));
+ assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2"));
+ }
+
+ // recursive map
+ Map<String, Object> testMap = new HashMap<String, Object>();
+ Map<String, Object> testMap2 = new HashMap<String, Object>();
+ Map<String, Object> testMap3 = new HashMap<String, Object>();
+ testMap3.put("commonkey", testMap);
+ testMap3.put("map3", 5);
+ testMap2.put("map2", 6);
+ testMap.put("commonkey", testMap2);
+ testMap.put("map1", 8);
+
+ client().prepareIndex("test", "type1", "1").setSource("map", testMap).execute().actionGet();
+ updateResponse = client().prepareUpdate("test", "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ Map map1 = (Map) getResponse.getSourceAsMap().get("map");
+ assertThat(map1.size(), equalTo(3));
+ assertThat(map1.containsKey("map1"), equalTo(true));
+ assertThat(map1.containsKey("map3"), equalTo(true));
+ assertThat(map1.containsKey("commonkey"), equalTo(true));
+ Map map2 = (Map) map1.get("commonkey");
+ assertThat(map2.size(), equalTo(3));
+ assertThat(map2.containsKey("map1"), equalTo(true));
+ assertThat(map2.containsKey("map2"), equalTo(true));
+ assertThat(map2.containsKey("commonkey"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testUpdateRequestWithBothScriptAndDoc() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ try {
+ client().prepareUpdate("test", "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field += 1")
+ .execute().actionGet();
+ fail("Should have thrown ActionRequestValidationException");
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors().size(), equalTo(1));
+ assertThat(e.validationErrors().get(0), containsString("can't provide both script and doc"));
+ assertThat(e.getMessage(), containsString("can't provide both script and doc"));
+ }
+ }
+
+ @Test
+ public void testUpdateRequestWithScriptAndShouldUpsertDoc() throws Exception {
+ createIndex();
+ ensureGreen();
+ try {
+ client().prepareUpdate("test", "type1", "1")
+ .setScript("ctx._source.field += 1")
+ .setDocAsUpsert(true)
+ .execute().actionGet();
+ fail("Should have thrown ActionRequestValidationException");
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors().size(), equalTo(1));
+ assertThat(e.validationErrors().get(0), containsString("doc must be specified if doc_as_upsert is enabled"));
+ assertThat(e.getMessage(), containsString("doc must be specified if doc_as_upsert is enabled"));
+ }
+ }
+
+ @Test
+ @Slow
+ public void testConcurrentUpdateWithRetryOnConflict() throws Exception {
+ final boolean useBulkApi = randomBoolean();
+ createIndex();
+ ensureGreen();
+
+ int numberOfThreads = between(2,5);
+ final CountDownLatch latch = new CountDownLatch(numberOfThreads);
+ final int numberOfUpdatesPerThread = between(1000, 10000);
+ final List<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
+ for (int i = 0; i < numberOfThreads; i++) {
+ Runnable r = new Runnable() {
+
+ @Override
+ public void run() {
+ try {
+ for (int i = 0; i < numberOfUpdatesPerThread; i++) {
+ if (useBulkApi) {
+ UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate("test", "type1", Integer.toString(i))
+ .setScript("ctx._source.field += 1")
+ .setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject());
+ client().prepareBulk().add(updateRequestBuilder).execute().actionGet();
+ } else {
+ client().prepareUpdate("test", "type1", Integer.toString(i)).setScript("ctx._source.field += 1")
+ .setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject())
+ .execute().actionGet();
+ }
+ }
+ } catch (Throwable e) {
+ failures.add(e);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ };
+ new Thread(r).start();
+ }
+ latch.await();
+ for (Throwable throwable : failures) {
+ logger.info("Captured failure on concurrent update:", throwable);
+ }
+ assertThat(failures.size(), equalTo(0));
+ for (int i = 0; i < numberOfUpdatesPerThread; i++) {
+ GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
+ assertThat(response.getId(), equalTo(Integer.toString(i)));
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getVersion(), equalTo((long) numberOfThreads));
+ assertThat((Integer) response.getSource().get("field"), equalTo(numberOfThreads));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java b/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java
new file mode 100644
index 0000000..c6214fa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.validate;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matcher;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.QueryBuilders.queryString;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleValidateQueryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleValidateQuery() throws Exception {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .startObject("bar").field("type", "integer").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setSource("foo".getBytes(Charsets.UTF_8)).execute().actionGet().isValid(), equalTo(false));
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("_id:1")).execute().actionGet().isValid(), equalTo(true));
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("_i:d:1")).execute().actionGet().isValid(), equalTo(false));
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("foo:1")).execute().actionGet().isValid(), equalTo(true));
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("bar:hey")).execute().actionGet().isValid(), equalTo(false));
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("nonexistent:hello")).execute().actionGet().isValid(), equalTo(true));
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("foo:1 AND")).execute().actionGet().isValid(), equalTo(false));
+ }
+
+ @Test
+ public void explainValidateQuery() throws Exception {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .startObject("bar").field("type", "integer").endObject()
+ .startObject("baz").field("type", "string").field("analyzer", "snowball").endObject()
+ .startObject("pin").startObject("properties").startObject("location").field("type", "geo_point").endObject().endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().indices().preparePutMapping("test").setType("child-type")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("child-type")
+ .startObject("_parent").field("type", "type1").endObject()
+ .startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+
+ ValidateQueryResponse response;
+ response = client().admin().indices().prepareValidateQuery("test")
+ .setSource("foo".getBytes(Charsets.UTF_8))
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.isValid(), equalTo(false));
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), containsString("Failed to parse"));
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), nullValue());
+
+ assertExplanation(QueryBuilders.queryString("_id:1"), equalTo("ConstantScore(_uid:type1#1)"));
+
+ assertExplanation(QueryBuilders.idsQuery("type1").addIds("1").addIds("2"),
+ equalTo("ConstantScore(_uid:type1#1 _uid:type1#2)"));
+
+ assertExplanation(QueryBuilders.queryString("foo"), equalTo("_all:foo"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.orFilter(
+ FilterBuilders.termFilter("bar", "2"),
+ FilterBuilders.termFilter("baz", "3")
+ )
+ ), equalTo("filtered(foo:1)->cache(bar:[2 TO 2]) cache(baz:3)"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.orFilter(
+ FilterBuilders.termFilter("bar", "2")
+ )
+ ), equalTo("filtered(foo:1)->cache(bar:[2 TO 2])"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.matchAllQuery(),
+ FilterBuilders.geoPolygonFilter("pin.location")
+ .addPoint(40, -70)
+ .addPoint(30, -80)
+ .addPoint(20, -90)
+ .addPoint(40, -70) // closing polygon
+ ), equalTo("ConstantScore(GeoPolygonFilter(pin.location, [[40.0, -70.0], [30.0, -80.0], [20.0, -90.0], [40.0, -70.0]]))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoBoundingBoxFilter("pin.location")
+ .topLeft(40, -80)
+ .bottomRight(20, -70)
+ ), equalTo("ConstantScore(GeoBoundingBoxFilter(pin.location, [40.0, -80.0], [20.0, -70.0]))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoDistanceFilter("pin.location")
+ .lat(10).lon(20).distance(15, DistanceUnit.DEFAULT).geoDistance(GeoDistance.PLANE)
+ ), equalTo("ConstantScore(GeoDistanceFilter(pin.location, PLANE, 15.0, 10.0, 20.0))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoDistanceFilter("pin.location")
+ .lat(10).lon(20).distance(15, DistanceUnit.DEFAULT).geoDistance(GeoDistance.PLANE)
+ ), equalTo("ConstantScore(GeoDistanceFilter(pin.location, PLANE, 15.0, 10.0, 20.0))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoDistanceRangeFilter("pin.location")
+ .lat(10).lon(20).from("15m").to("25m").geoDistance(GeoDistance.PLANE)
+ ), equalTo("ConstantScore(GeoDistanceRangeFilter(pin.location, PLANE, [15.0 - 25.0], 10.0, 20.0))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoDistanceRangeFilter("pin.location")
+ .lat(10).lon(20).from("15miles").to("25miles").geoDistance(GeoDistance.PLANE)
+ ), equalTo("ConstantScore(GeoDistanceRangeFilter(pin.location, PLANE, [" + DistanceUnit.DEFAULT.convert(15.0, DistanceUnit.MILES) + " - " + DistanceUnit.DEFAULT.convert(25.0, DistanceUnit.MILES) + "], 10.0, 20.0))"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.andFilter(
+ FilterBuilders.termFilter("bar", "2"),
+ FilterBuilders.termFilter("baz", "3")
+ )
+ ), equalTo("filtered(foo:1)->+cache(bar:[2 TO 2]) +cache(baz:3)"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.termsFilter("foo", "1", "2", "3")),
+ equalTo("ConstantScore(cache(foo:1 foo:2 foo:3))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.notFilter(FilterBuilders.termFilter("foo", "bar"))),
+ equalTo("ConstantScore(NotFilter(cache(foo:bar)))"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.hasChildFilter(
+ "child-type",
+ QueryBuilders.matchQuery("foo", "1")
+ )
+ ), equalTo("filtered(foo:1)->CustomQueryWrappingFilter(child_filter[child-type/type1](filtered(foo:1)->cache(_type:child-type)))"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.scriptFilter("true")
+ ), equalTo("filtered(foo:1)->ScriptFilter(true)"));
+
+ }
+
+ @Test
+ public void explainValidateQueryTwoNodes() throws IOException {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .startObject("bar").field("type", "integer").endObject()
+ .startObject("baz").field("type", "string").field("analyzer", "snowball").endObject()
+ .startObject("pin").startObject("properties").startObject("location").field("type", "geo_point").endObject().endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+
+
+ for (Client client : cluster()) {
+ ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test")
+ .setSource("foo".getBytes(Charsets.UTF_8))
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.isValid(), equalTo(false));
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), containsString("Failed to parse"));
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), nullValue());
+
+ }
+
+ for (Client client : cluster()) {
+ ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test")
+ .setQuery(QueryBuilders.queryString("foo"))
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.isValid(), equalTo(true));
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), equalTo("_all:foo"));
+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
+ }
+ }
+
+ @Test //https://github.com/elasticsearch/elasticsearch/issues/3629
+ public void explainDateRangeInQueryString() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).get();
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+
+ refresh();
+
+ ValidateQueryResponse response = client().admin().indices().prepareValidateQuery()
+ .setQuery(queryString("past:[now-2M/d TO now/d]")).setExplain(true).get();
+
+ assertNoFailures(response);
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
+ DateTime twoMonthsAgo = new DateTime(DateTimeZone.UTC).minusMonths(2).withTimeAtStartOfDay();
+ DateTime now = new DateTime(DateTimeZone.UTC).plusDays(1).withTimeAtStartOfDay();
+ assertThat(response.getQueryExplanation().get(0).getExplanation(),
+ equalTo("past:[" + twoMonthsAgo.getMillis() + " TO " + now.getMillis() + "]"));
+ assertThat(response.isValid(), equalTo(true));
+ }
+
+ private void assertExplanation(QueryBuilder queryBuilder, Matcher<String> matcher) {
+ ValidateQueryResponse response = client().admin().indices().prepareValidateQuery("test")
+ .setTypes("type1")
+ .setQuery(queryBuilder)
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), matcher);
+ assertThat(response.isValid(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java b/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java
new file mode 100644
index 0000000..75a4384
--- /dev/null
+++ b/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.versioning;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class ConcurrentDocumentOperationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void concurrentOperationOnSameDocTest() throws Exception {
+
+ logger.info("--> create an index with 1 shard and max replicas based on nodes");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", cluster().size()-1))
+ .execute().actionGet();
+
+ logger.info("execute concurrent updates on the same doc");
+ int numberOfUpdates = 100;
+ final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();
+ final CountDownLatch latch = new CountDownLatch(numberOfUpdates);
+ for (int i = 0; i < numberOfUpdates; i++) {
+ client().prepareIndex("test", "type1", "1").setSource("field1", i).execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ e.printStackTrace();
+ failure.set(e);
+ latch.countDown();
+ }
+ });
+ }
+
+ latch.await();
+
+ assertThat(failure.get(), nullValue());
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("done indexing, check all have the same field value");
+ Map masterSource = client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap();
+ for (int i = 0; i < (cluster().size() * 5); i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java b/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java
new file mode 100644
index 0000000..95cce96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.versioning;
+
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleVersioningTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testExternalVersioningInitialDelete() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ // Note - external version doesn't throw version conflicts on deletes of non existent records. This is different from internal versioning
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+
+ // this should conflict with the delete command transaction which told us that the object was deleted at version 17.
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class
+ );
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(18).
+ setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(18L));
+ }
+
+ @Test
+ public void testExternalVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(12).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(12l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(14).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(14l));
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(14l));
+ }
+
+ // deleting with a lower version fails.
+ assertThrows(
+ client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+ // Delete with a higher version deletes all versions up to the given one.
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(17l));
+
+ // Deleting with a lower version keeps on failing after a delete.
+ assertThrows(
+ client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+
+ // But delete with a higher version is OK.
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(18).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+ assertThat(deleteResponse.getVersion(), equalTo(18l));
+
+
+ // TODO: This behavior breaks rest api returning http status 201, good news is that it this is only the case until deletes GC kicks in.
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(19).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(19l));
+
+
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(20).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(20l));
+
+ // Make sure that the next delete will be GC. Note we do it on the index settings so it will be cleaned up
+ HashMap<String,Object> newSettings = new HashMap<String, Object>();
+ newSettings.put("index.gc_deletes",-1);
+ client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet();
+
+ Thread.sleep(300); // gc works based on estimated sampled time. Give it a chance...
+
+ // And now we have previous version return -1
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(20).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(20l));
+ }
+
+ @Test
+ public void testInternalVersioningInitialDelete() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(17).execute(),
+ VersionConflictEngineException.class);
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1")
+ .setCreate(true).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+ }
+
+
+ @Test
+ public void testInternalVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(1).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(2l));
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(2).execute(),
+ DocumentAlreadyExistsException.class);
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(2).execute(),
+ DocumentAlreadyExistsException.class);
+
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(2l));
+ }
+
+ // search with versioning
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setVersion(true).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).version(), equalTo(2l));
+ }
+
+ // search without versioning
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).version(), equalTo(Versions.NOT_FOUND));
+ }
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(2).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(3l));
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(2).execute(), VersionConflictEngineException.class);
+
+
+ // This is intricate - the object was deleted but a delete transaction was with the right version. We add another one
+ // and thus the transaction is increased.
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(3).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+ assertThat(deleteResponse.getVersion(), equalTo(4l));
+ }
+
+ @Test
+ public void testSimpleVersioningWithFlush() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(1).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(2l));
+
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+ assertThrows(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(2l));
+ }
+
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setVersion(true).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).version(), equalTo(2l));
+ }
+ }
+
+ @Test
+ public void testVersioningWithBulk() {
+ createIndex("test");
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk().add(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1")).execute().actionGet();
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(1));
+ IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java b/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java
new file mode 100644
index 0000000..ce40171
--- /dev/null
+++ b/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java
@@ -0,0 +1,386 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.watcher;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.io.Files.*;
+import static org.elasticsearch.common.io.FileSystemUtils.deleteRecursively;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class FileWatcherTest extends ElasticsearchTestCase {
+
+ private class RecordingChangeListener extends FileChangesListener {
+
+ private File rootDir;
+
+ private RecordingChangeListener(File rootDir) {
+ this.rootDir = rootDir;
+ }
+
+ private String getRelativeFileName(File file) {
+ return rootDir.toURI().relativize(file.toURI()).getPath();
+ }
+
+ private List<String> notifications = newArrayList();
+
+ @Override
+ public void onFileInit(File file) {
+ notifications.add("onFileInit: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onDirectoryInit(File file) {
+ notifications.add("onDirectoryInit: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onFileCreated(File file) {
+ notifications.add("onFileCreated: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onFileDeleted(File file) {
+ notifications.add("onFileDeleted: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onFileChanged(File file) {
+ notifications.add("onFileChanged: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onDirectoryCreated(File file) {
+ notifications.add("onDirectoryCreated: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onDirectoryDeleted(File file) {
+ notifications.add("onDirectoryDeleted: " + getRelativeFileName(file));
+ }
+
+ public List<String> notifications() {
+ return notifications;
+ }
+ }
+
+ @Test
+ public void testSimpleFileOperations() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testFile = new File(tempDir, "test.txt");
+ touch(testFile);
+ FileWatcher fileWatcher = new FileWatcher(testFile);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(equalTo("onFileInit: test.txt")));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ append("Test", testFile, Charset.defaultCharset());
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(equalTo("onFileChanged: test.txt")));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ testFile.delete();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(equalTo("onFileDeleted: test.txt")));
+
+ }
+
+ @Test
+ public void testSimpleDirectoryOperations() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+ testDir.mkdir();
+ touch(new File(testDir, "test.txt"));
+ touch(new File(testDir, "test0.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryInit: test-dir/"),
+ equalTo("onFileInit: test-dir/test.txt"),
+ equalTo("onFileInit: test-dir/test0.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ for (int i = 0; i < 4; i++) {
+ touch(new File(testDir, "test" + i + ".txt"));
+ }
+ // Make sure that first file is modified
+ append("Test", new File(testDir, "test0.txt"), Charset.defaultCharset());
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileChanged: test-dir/test0.txt"),
+ equalTo("onFileCreated: test-dir/test1.txt"),
+ equalTo("onFileCreated: test-dir/test2.txt"),
+ equalTo("onFileCreated: test-dir/test3.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ new File(testDir, "test1.txt").delete();
+ new File(testDir, "test2.txt").delete();
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test1.txt"),
+ equalTo("onFileDeleted: test-dir/test2.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ new File(testDir, "test0.txt").delete();
+ touch(new File(testDir, "test2.txt"));
+ touch(new File(testDir, "test4.txt"));
+ fileWatcher.checkAndNotify();
+
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test0.txt"),
+ equalTo("onFileCreated: test-dir/test2.txt"),
+ equalTo("onFileCreated: test-dir/test4.txt")
+ ));
+
+
+ changes.notifications().clear();
+
+ new File(testDir, "test3.txt").delete();
+ new File(testDir, "test4.txt").delete();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test3.txt"),
+ equalTo("onFileDeleted: test-dir/test4.txt")
+ ));
+
+
+ changes.notifications().clear();
+ deleteRecursively(testDir);
+ fileWatcher.checkAndNotify();
+
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test.txt"),
+ equalTo("onFileDeleted: test-dir/test2.txt"),
+ equalTo("onDirectoryDeleted: test-dir")
+ ));
+
+ }
+
+ @Test
+ public void testNestedDirectoryOperations() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+ testDir.mkdir();
+ touch(new File(testDir, "test.txt"));
+ new File(testDir, "sub-dir").mkdir();
+ touch(new File(testDir, "sub-dir/test0.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryInit: test-dir/"),
+ equalTo("onDirectoryInit: test-dir/sub-dir/"),
+ equalTo("onFileInit: test-dir/sub-dir/test0.txt"),
+ equalTo("onFileInit: test-dir/test.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ // Create new file in subdirectory
+ touch(new File(testDir, "sub-dir/test1.txt"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileCreated: test-dir/sub-dir/test1.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ // Create new subdirectory in subdirectory
+ new File(testDir, "first-level").mkdir();
+ touch(new File(testDir, "first-level/file1.txt"));
+ new File(testDir, "first-level/second-level").mkdir();
+ touch(new File(testDir, "first-level/second-level/file2.txt"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryCreated: test-dir/first-level/"),
+ equalTo("onFileCreated: test-dir/first-level/file1.txt"),
+ equalTo("onDirectoryCreated: test-dir/first-level/second-level/"),
+ equalTo("onFileCreated: test-dir/first-level/second-level/file2.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ // Delete a directory, check notifications for
+ deleteRecursively(new File(testDir, "first-level"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/first-level/file1.txt"),
+ equalTo("onFileDeleted: test-dir/first-level/second-level/file2.txt"),
+ equalTo("onDirectoryDeleted: test-dir/first-level/second-level"),
+ equalTo("onDirectoryDeleted: test-dir/first-level")
+ ));
+ }
+
+ @Test
+ public void testFileReplacingDirectory() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+ testDir.mkdir();
+ File subDir = new File(testDir, "sub-dir");
+ subDir.mkdir();
+ touch(new File(subDir, "test0.txt"));
+ touch(new File(subDir, "test1.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryInit: test-dir/"),
+ equalTo("onDirectoryInit: test-dir/sub-dir/"),
+ equalTo("onFileInit: test-dir/sub-dir/test0.txt"),
+ equalTo("onFileInit: test-dir/sub-dir/test1.txt")
+ ));
+
+ changes.notifications().clear();
+
+ deleteRecursively(subDir);
+ touch(subDir);
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/sub-dir/test0.txt"),
+ equalTo("onFileDeleted: test-dir/sub-dir/test1.txt"),
+ equalTo("onDirectoryDeleted: test-dir/sub-dir"),
+ equalTo("onFileCreated: test-dir/sub-dir")
+ ));
+
+ changes.notifications().clear();
+
+ subDir.delete();
+ subDir.mkdir();
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/sub-dir/"),
+ equalTo("onDirectoryCreated: test-dir/sub-dir/")
+ ));
+ }
+
+ @Test
+ public void testEmptyDirectory() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+ testDir.mkdir();
+ touch(new File(testDir, "test0.txt"));
+ touch(new File(testDir, "test1.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ changes.notifications().clear();
+
+ new File(testDir, "test0.txt").delete();
+ new File(testDir, "test1.txt").delete();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test0.txt"),
+ equalTo("onFileDeleted: test-dir/test1.txt")
+ ));
+ }
+
+ @Test
+ public void testNoDirectoryOnInit() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), hasSize(0));
+ changes.notifications().clear();
+
+ testDir.mkdir();
+ touch(new File(testDir, "test0.txt"));
+ touch(new File(testDir, "test1.txt"));
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryCreated: test-dir/"),
+ equalTo("onFileCreated: test-dir/test0.txt"),
+ equalTo("onFileCreated: test-dir/test1.txt")
+ ));
+ }
+
+ @Test
+ public void testNoFileOnInit() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testFile = new File(tempDir, "testfile.txt");
+
+ FileWatcher fileWatcher = new FileWatcher(testFile);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), hasSize(0));
+ changes.notifications().clear();
+
+ touch(testFile);
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileCreated: testfile.txt")
+ ));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff
new file mode 100755
index 0000000..2ddd985
--- /dev/null
+++ b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff
@@ -0,0 +1,201 @@
+SET ISO8859-1
+TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'
+NOSUGGEST !
+
+# ordinal numbers
+COMPOUNDMIN 1
+# only in compounds: 1th, 2th, 3th
+ONLYINCOMPOUND c
+# compound rules:
+# 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.)
+# 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.)
+COMPOUNDRULE 2
+COMPOUNDRULE n*1t
+COMPOUNDRULE n*mp
+WORDCHARS 0123456789
+
+PFX A Y 1
+PFX A 0 re .
+
+PFX I Y 1
+PFX I 0 in .
+
+PFX U Y 1
+PFX U 0 un .
+
+PFX C Y 1
+PFX C 0 de .
+
+PFX E Y 1
+PFX E 0 dis .
+
+PFX F Y 1
+PFX F 0 con .
+
+PFX K Y 1
+PFX K 0 pro .
+
+SFX V N 2
+SFX V e ive e
+SFX V 0 ive [^e]
+
+SFX N Y 3
+SFX N e ion e
+SFX N y ication y
+SFX N 0 en [^ey]
+
+SFX X Y 3
+SFX X e ions e
+SFX X y ications y
+SFX X 0 ens [^ey]
+
+SFX H N 2
+SFX H y ieth y
+SFX H 0 th [^y]
+
+SFX Y Y 1
+SFX Y 0 ly .
+
+SFX G Y 2
+SFX G e ing e
+SFX G 0 ing [^e]
+
+SFX J Y 2
+SFX J e ings e
+SFX J 0 ings [^e]
+
+SFX D Y 4
+SFX D 0 d e
+SFX D y ied [^aeiou]y
+SFX D 0 ed [^ey]
+SFX D 0 ed [aeiou]y
+
+SFX T N 4
+SFX T 0 st e
+SFX T y iest [^aeiou]y
+SFX T 0 est [aeiou]y
+SFX T 0 est [^ey]
+
+SFX R Y 4
+SFX R 0 r e
+SFX R y ier [^aeiou]y
+SFX R 0 er [aeiou]y
+SFX R 0 er [^ey]
+
+SFX Z Y 4
+SFX Z 0 rs e
+SFX Z y iers [^aeiou]y
+SFX Z 0 ers [aeiou]y
+SFX Z 0 ers [^ey]
+
+SFX S Y 4
+SFX S y ies [^aeiou]y
+SFX S 0 s [aeiou]y
+SFX S 0 es [sxzh]
+SFX S 0 s [^sxzhy]
+
+SFX P Y 3
+SFX P y iness [^aeiou]y
+SFX P 0 ness [aeiou]y
+SFX P 0 ness [^y]
+
+SFX M Y 1
+SFX M 0 's .
+
+SFX B Y 3
+SFX B 0 able [^aeiou]
+SFX B 0 able ee
+SFX B e able [^aeiou]e
+
+SFX L Y 1
+SFX L 0 ment .
+
+REP 88
+REP a ei
+REP ei a
+REP a ey
+REP ey a
+REP ai ie
+REP ie ai
+REP are air
+REP are ear
+REP are eir
+REP air are
+REP air ere
+REP ere air
+REP ere ear
+REP ere eir
+REP ear are
+REP ear air
+REP ear ere
+REP eir are
+REP eir ere
+REP ch te
+REP te ch
+REP ch ti
+REP ti ch
+REP ch tu
+REP tu ch
+REP ch s
+REP s ch
+REP ch k
+REP k ch
+REP f ph
+REP ph f
+REP gh f
+REP f gh
+REP i igh
+REP igh i
+REP i uy
+REP uy i
+REP i ee
+REP ee i
+REP j di
+REP di j
+REP j gg
+REP gg j
+REP j ge
+REP ge j
+REP s ti
+REP ti s
+REP s ci
+REP ci s
+REP k cc
+REP cc k
+REP k qu
+REP qu k
+REP kw qu
+REP o eau
+REP eau o
+REP o ew
+REP ew o
+REP oo ew
+REP ew oo
+REP ew ui
+REP ui ew
+REP oo ui
+REP ui oo
+REP ew u
+REP u ew
+REP oo u
+REP u oo
+REP u oe
+REP oe u
+REP u ieu
+REP ieu u
+REP ue ew
+REP ew ue
+REP uff ough
+REP oo ieu
+REP ieu oo
+REP ier ear
+REP ear ier
+REP ear air
+REP air ear
+REP w qu
+REP qu w
+REP z ss
+REP ss z
+REP shun tion
+REP shun sion
+REP shun cion
diff --git a/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic
new file mode 100755
index 0000000..4f69807
--- /dev/null
+++ b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic
@@ -0,0 +1,62120 @@
+62118
+0/nm
+1/n1
+2/nm
+3/nm
+4/nm
+5/nm
+6/nm
+7/nm
+8/nm
+9/nm
+0th/pt
+1st/p
+1th/tc
+2nd/p
+2th/tc
+3rd/p
+3th/tc
+4th/pt
+5th/pt
+6th/pt
+7th/pt
+8th/pt
+9th/pt
+a
+A
+AA
+AAA
+Aachen/M
+aardvark/SM
+Aaren/M
+Aarhus/M
+Aarika/M
+Aaron/M
+AB
+aback
+abacus/SM
+abaft
+Abagael/M
+Abagail/M
+abalone/SM
+abandoner/M
+abandon/LGDRS
+abandonment/SM
+abase/LGDSR
+abasement/S
+abaser/M
+abashed/UY
+abashment/MS
+abash/SDLG
+abate/DSRLG
+abated/U
+abatement/MS
+abater/M
+abattoir/SM
+Abba/M
+Abbe/M
+abb/S
+abbess/SM
+Abbey/M
+abbey/MS
+Abbie/M
+Abbi/M
+Abbot/M
+abbot/MS
+Abbott/M
+abbr
+abbrev
+abbreviated/UA
+abbreviates/A
+abbreviate/XDSNG
+abbreviating/A
+abbreviation/M
+Abbye/M
+Abby/M
+ABC/M
+Abdel/M
+abdicate/NGDSX
+abdication/M
+abdomen/SM
+abdominal/YS
+abduct/DGS
+abduction/SM
+abductor/SM
+Abdul/M
+ab/DY
+abeam
+Abelard/M
+Abel/M
+Abelson/M
+Abe/M
+Aberdeen/M
+Abernathy/M
+aberrant/YS
+aberrational
+aberration/SM
+abet/S
+abetted
+abetting
+abettor/SM
+Abeu/M
+abeyance/MS
+abeyant
+Abey/M
+abhorred
+abhorrence/MS
+abhorrent/Y
+abhorrer/M
+abhorring
+abhor/S
+abidance/MS
+abide/JGSR
+abider/M
+abiding/Y
+Abidjan/M
+Abie/M
+Abigael/M
+Abigail/M
+Abigale/M
+Abilene/M
+ability/IMES
+abjection/MS
+abjectness/SM
+abject/SGPDY
+abjuration/SM
+abjuratory
+abjurer/M
+abjure/ZGSRD
+ablate/VGNSDX
+ablation/M
+ablative/SY
+ablaze
+abler/E
+ables/E
+ablest
+able/U
+abloom
+ablution/MS
+Ab/M
+ABM/S
+abnegate/NGSDX
+abnegation/M
+Abner/M
+abnormality/SM
+abnormal/SY
+aboard
+abode/GMDS
+abolisher/M
+abolish/LZRSDG
+abolishment/MS
+abolitionism/SM
+abolitionist/SM
+abolition/SM
+abominable
+abominably
+abominate/XSDGN
+abomination/M
+aboriginal/YS
+aborigine/SM
+Aborigine/SM
+aborning
+abortionist/MS
+abortion/MS
+abortiveness/M
+abortive/PY
+abort/SRDVG
+Abo/SM!
+abound/GDS
+about/S
+aboveboard
+aboveground
+above/S
+abracadabra/S
+abrader/M
+abrade/SRDG
+Abraham/M
+Abrahan/M
+Abra/M
+Abramo/M
+Abram/SM
+Abramson/M
+Abran/M
+abrasion/MS
+abrasiveness/S
+abrasive/SYMP
+abreaction/MS
+abreast
+abridge/DSRG
+abridged/U
+abridger/M
+abridgment/SM
+abroad
+abrogate/XDSNG
+abrogation/M
+abrogator/SM
+abruptness/SM
+abrupt/TRYP
+ABS
+abscess/GDSM
+abscissa/SM
+abscission/SM
+absconder/M
+abscond/SDRZG
+abseil/SGDR
+absence/SM
+absenteeism/SM
+absentee/MS
+absentia/M
+absentmindedness/S
+absentminded/PY
+absent/SGDRY
+absinthe/SM
+abs/M
+absoluteness/SM
+absolute/NPRSYTX
+absolution/M
+absolutism/MS
+absolutist/SM
+absolve/GDSR
+absolver/M
+absorb/ASGD
+absorbed/U
+absorbency/MS
+absorbent/MS
+absorber/SM
+absorbing/Y
+absorption/MS
+absorptive
+absorptivity/M
+abstainer/M
+abstain/GSDRZ
+abstemiousness/MS
+abstemious/YP
+abstention/SM
+abstinence/MS
+abstinent/Y
+abstractedness/SM
+abstracted/YP
+abstracter/M
+abstractionism/M
+abstractionist/SM
+abstraction/SM
+abstractness/SM
+abstractor/MS
+abstract/PTVGRDYS
+abstruseness/SM
+abstruse/PRYT
+absurdity/SM
+absurdness/SM
+absurd/PRYST
+Abuja
+abundance/SM
+abundant/Y
+abused/E
+abuse/GVZDSRB
+abuser/M
+abuses/E
+abusing/E
+abusiveness/SM
+abusive/YP
+abut/LS
+abutment/SM
+abutted
+abutter/MS
+abutting
+abuzz
+abysmal/Y
+abyssal
+Abyssinia/M
+Abyssinian
+abyss/SM
+AC
+acacia/SM
+academe/MS
+academia/SM
+academical/Y
+academicianship
+academician/SM
+academic/S
+academy/SM
+Acadia/M
+acanthus/MS
+Acapulco/M
+accede/SDG
+accelerated/U
+accelerate/NGSDXV
+accelerating/Y
+acceleration/M
+accelerator/SM
+accelerometer/SM
+accented/U
+accent/SGMD
+accentual/Y
+accentuate/XNGSD
+accentuation/M
+acceptability/SM
+acceptability's/U
+acceptableness/SM
+acceptable/P
+acceptably/U
+acceptance/SM
+acceptant
+acceptation/SM
+accepted/Y
+accepter/M
+accepting/PY
+acceptor/MS
+accept/RDBSZVG
+accessed/A
+accessibility/IMS
+accessible/IU
+accessibly/I
+accession/SMDG
+accessors
+accessory/SM
+access/SDMG
+accidence/M
+accidentalness/M
+accidental/SPY
+accident/MS
+acclaimer/M
+acclaim/SDRG
+acclamation/MS
+acclimate/XSDGN
+acclimation/M
+acclimatisation
+acclimatise/DG
+acclimatization/AMS
+acclimatized/U
+acclimatize/RSDGZ
+acclimatizes/A
+acclivity/SM
+accolade/GDSM
+accommodated/U
+accommodate/XVNGSD
+accommodating/Y
+accommodation/M
+accommodativeness/M
+accommodative/P
+accompanied/U
+accompanier/M
+accompaniment/MS
+accompanist/SM
+accompany/DRSG
+accomplice/MS
+accomplished/U
+accomplisher/M
+accomplishment/SM
+accomplish/SRDLZG
+accordance/SM
+accordant/Y
+accorder/M
+according/Y
+accordionist/SM
+accordion/MS
+accord/SZGMRD
+accost/SGD
+accountability/MS
+accountability's/U
+accountableness/M
+accountable/U
+accountably/U
+accountancy/SM
+accountant/MS
+account/BMDSGJ
+accounted/U
+accounting/M
+accouter/GSD
+accouterments
+accouterment's
+accoutrement/M
+Accra/M
+accreditation/SM
+accredited/U
+accredit/SGD
+accretion/SM
+accrual/MS
+accrue/SDG
+acct
+acculturate/XSDVNG
+acculturation/M
+accumulate/VNGSDX
+accumulation/M
+accumulativeness/M
+accumulative/YP
+accumulator/MS
+accuracy/IMS
+accurate/IY
+accurateness/SM
+accursedness/SM
+accursed/YP
+accusal/M
+accusation/SM
+accusative/S
+accusatory
+accused/M
+accuser/M
+accuse/SRDZG
+accusing/Y
+accustomedness/M
+accustomed/P
+accustom/SGD
+ac/DRG
+aced/M
+acerbate/DSG
+acerbic
+acerbically
+acerbity/MS
+ace/SM
+acetaminophen/S
+acetate/MS
+acetic
+acetone/SM
+acetonic
+acetylene/MS
+Acevedo/M
+Achaean/M
+Achebe/M
+ached/A
+ache/DSG
+achene/SM
+Achernar/M
+aches/A
+Acheson/M
+achievable/U
+achieved/UA
+achieve/LZGRSDB
+achievement/SM
+achiever/M
+Achilles
+aching/Y
+achoo
+achromatic
+achy/TR
+acidic
+acidification/M
+acidify/NSDG
+acidity/SM
+acidness/M
+acidoses
+acidosis/M
+acid/SMYP
+acidulous
+acing/M
+Ackerman/M
+acknowledgeable
+acknowledgedly
+acknowledged/U
+acknowledge/GZDRS
+acknowledger/M
+acknowledgment/SAM
+ACLU
+Ac/M
+ACM
+acme/SM
+acne/MDS
+acolyte/MS
+Aconcagua/M
+aconite/MS
+acorn/SM
+Acosta/M
+acoustical/Y
+acoustician/M
+acoustic/S
+acoustics/M
+acquaintance/MS
+acquaintanceship/S
+acquainted/U
+acquaint/GASD
+acquiesce/GSD
+acquiescence/SM
+acquiescent/Y
+acquirable
+acquire/ASDG
+acquirement/SM
+acquisition's/A
+acquisition/SM
+acquisitiveness/MS
+acquisitive/PY
+acquit/S
+acquittal/MS
+acquittance/M
+acquitted
+acquitter/M
+acquitting
+acreage/MS
+acre/MS
+acridity/MS
+acridness/SM
+acrid/TPRY
+acrimoniousness/MS
+acrimonious/YP
+acrimony/MS
+acrobatically
+acrobatic/S
+acrobatics/M
+acrobat/SM
+acronym/SM
+acrophobia/SM
+Acropolis/M
+acropolis/SM
+across
+acrostic/SM
+Acrux/M
+acrylate/M
+acrylic/S
+ACT
+Actaeon/M
+Acta/M
+ACTH
+acting/S
+actinic
+actinide/SM
+actinium/MS
+actinometer/MS
+action/DMSGB
+actions/AI
+action's/IA
+activate/AXCDSNGI
+activated/U
+activation/AMCI
+activator/SM
+active/APY
+actively/I
+activeness/MS
+actives
+activism/MS
+activist/MS
+activities/A
+activity/MSI
+Acton/M
+actor/MAS
+actress/SM
+act's
+Acts
+act/SADVG
+actuality/SM
+actualization/MAS
+actualize/GSD
+actualizes/A
+actual/SY
+actuarial/Y
+actuary/MS
+actuate/GNXSD
+actuation/M
+actuator/SM
+acuity/MS
+acumen/SM
+acupressure/S
+acupuncture/SM
+acupuncturist/S
+acuteness/MS
+acute/YTSRP
+acyclic
+acyclically
+acyclovir/S
+AD
+adage/MS
+adagio/S
+Adah/M
+Adair/M
+Adaline/M
+Ada/M
+adamant/SY
+Adamo/M
+Adam/SM
+Adamson/M
+Adana/M
+Adan/M
+adaptability/MS
+adaptable/U
+adaptation/MS
+adaptedness/M
+adapted/P
+adapter/M
+adapting/A
+adaption
+adaptively
+adaptiveness/M
+adaptive/U
+adaptivity
+adapt/SRDBZVG
+Adara/M
+ad/AS
+ADC
+Adda/M
+Addams
+addenda
+addend/SM
+addendum/M
+adder/M
+Addia/M
+addiction/MS
+addictive/P
+addict/SGVD
+Addie/M
+Addi/M
+Addison/M
+additional/Y
+addition/MS
+additive/YMS
+additivity
+addle/GDS
+addressability
+addressable/U
+addressed/A
+addressee/SM
+addresser/M
+addresses/A
+address/MDRSZGB
+Addressograph/M
+adduce/GRSD
+adducer/M
+adduct/DGVS
+adduction/M
+adductor/M
+Addy/M
+add/ZGBSDR
+Adelaida/M
+Adelaide/M
+Adela/M
+Adelbert/M
+Adele/M
+Adelheid/M
+Adelice/M
+Adelina/M
+Adelind/M
+Adeline/M
+Adella/M
+Adelle/M
+Adel/M
+Ade/M
+Adena/M
+Adenauer/M
+adenine/SM
+Aden/M
+adenoidal
+adenoid/S
+adeptness/MS
+adept/RYPTS
+adequacy/IMS
+adequate/IPY
+adequateness's/I
+adequateness/SM
+Adey/M
+Adham/M
+Adhara/M
+adherence/SM
+adherent/YMS
+adherer/M
+adhere/ZGRSD
+adhesion/MS
+adhesiveness/MS
+adhesive/PYMS
+adiabatic
+adiabatically
+Adiana/M
+Adidas/M
+adieu/S
+Adi/M
+Adina/M
+adis
+adipose/S
+Adirondack/SM
+adj
+adjacency/MS
+adjacent/Y
+adjectival/Y
+adjective/MYS
+adjoin/SDG
+adjoint/M
+adjourn/DGLS
+adjournment/SM
+adjudge/DSG
+adjudicate/VNGXSD
+adjudication/M
+adjudicator/SM
+adjudicatory
+adjunct/VSYM
+adjuration/SM
+adjure/GSD
+adjustable/U
+adjustably
+adjust/DRALGSB
+adjusted/U
+adjuster's/A
+adjuster/SM
+adjustive
+adjustment/MAS
+adjustor's
+adjutant/SM
+Adkins/M
+Adlai/M
+Adler/M
+adman/M
+admen
+administer/GDJS
+administrable
+administrate/XSDVNG
+administration/M
+administrative/Y
+administrator/MS
+administratrix/M
+admirableness/M
+admirable/P
+admirably
+admiral/SM
+admiralty/MS
+Admiralty/S
+admiration/MS
+admirer/M
+admire/RSDZBG
+admiring/Y
+admissibility/ISM
+admissible/I
+admissibly
+admission/AMS
+admit/AS
+admittance/MS
+admitted/A
+admittedly
+admitting/A
+admix/SDG
+admixture/SM
+Adm/M
+Ad/MN
+admonisher/M
+admonish/GLSRD
+admonishing/Y
+admonishment/SM
+admonition/MS
+admonitory
+adobe/MS
+adolescence/MS
+adolescent/SYM
+Adolf/M
+Adolfo/M
+Adolphe/M
+Adolph/M
+Adolpho/M
+Adolphus/M
+Ado/M
+ado/MS
+Adonis/SM
+adopted/AU
+adopter/M
+adoption/MS
+adoptive/Y
+adopt/RDSBZVG
+adopts/A
+adorableness/SM
+adorable/P
+adorably
+Adora/M
+adoration/SM
+adore/DSRGZB
+Adoree/M
+Adore/M
+adorer/M
+adoring/Y
+adorned/U
+Adorne/M
+adornment/SM
+adorn/SGLD
+ADP
+Adrea/M
+adrenalin
+adrenaline/MS
+Adrenalin/MS
+adrenal/YS
+Adria/MX
+Adriana/M
+Adriane/M
+Adrian/M
+Adrianna/M
+Adrianne/M
+Adriano/M
+Adriatic
+Adriena/M
+Adrien/M
+Adrienne/M
+adrift
+adroitness/MS
+adroit/RTYP
+ads
+ad's
+adsorbate/M
+adsorbent/S
+adsorb/GSD
+adsorption/MS
+adsorptive/Y
+adulate/GNDSX
+adulation/M
+adulator/SM
+adulatory
+adulterant/SM
+adulterated/U
+adulterate/NGSDX
+adulteration/M
+adulterer/SM
+adulteress/MS
+adulterous/Y
+adultery/SM
+adulthood/MS
+adult/MYPS
+adultness/M
+adumbrate/XSDVGN
+adumbration/M
+adumbrative/Y
+adv
+advance/DSRLZG
+advancement/MS
+advancer/M
+advantage/GMEDS
+advantageous/EY
+advantageousness/M
+Adventist/M
+adventist/S
+adventitiousness/M
+adventitious/PY
+adventive/Y
+Advent/SM
+advent/SVM
+adventurer/M
+adventuresome
+adventure/SRDGMZ
+adventuress/SM
+adventurousness/SM
+adventurous/YP
+adverbial/MYS
+adverb/SM
+adversarial
+adversary/SM
+adverse/DSRPYTG
+adverseness/MS
+adversity/SM
+advert/GSD
+advertised/U
+advertise/JGZSRDL
+advertisement/SM
+advertiser/M
+advertising/M
+advertorial/S
+advice/SM
+Advil/M
+advisability/SIM
+advisable/I
+advisableness/M
+advisably
+advisedly/I
+advised/YU
+advisee/MS
+advisement/MS
+adviser/M
+advise/ZRSDGLB
+advisor/S
+advisor's
+advisory/S
+advocacy/SM
+advocate/NGVDS
+advocation/M
+advt
+adze's
+adz/MDSG
+Aegean
+aegis/SM
+Aelfric/M
+Aeneas
+Aeneid/M
+aeolian
+Aeolus/M
+aeon's
+aerate/XNGSD
+aeration/M
+aerator/MS
+aerialist/MS
+aerial/SMY
+Aeriela/M
+Aeriell/M
+Aeriel/M
+aerie/SRMT
+aeroacoustic
+aerobatic/S
+aerobically
+aerobic/S
+aerodrome/SM
+aerodynamically
+aerodynamic/S
+aerodynamics/M
+aeronautical/Y
+aeronautic/S
+aeronautics/M
+aerosolize/D
+aerosol/MS
+aerospace/SM
+Aeschylus/M
+Aesculapius/M
+Aesop/M
+aesthete/S
+aesthetically
+aestheticism/MS
+aesthetics/M
+aesthetic/U
+aether/M
+aetiology/M
+AF
+AFAIK
+afar/S
+AFB
+AFC
+AFDC
+affability/MS
+affable/TR
+affably
+affair/SM
+affectation/MS
+affectedness/EM
+affected/UEYP
+affect/EGSD
+affecter/M
+affecting/Y
+affectionate/UY
+affectioned
+affection/EMS
+affectioning
+affective/MY
+afferent/YS
+affiance/GDS
+affidavit/SM
+affiliated/U
+affiliate/EXSDNG
+affiliation/EM
+affine
+affinity/SM
+affirm/ASDG
+affirmation/SAM
+affirmative/SY
+affix/SDG
+afflatus/MS
+afflict/GVDS
+affliction/SM
+afflictive/Y
+affluence/SM
+affluent/YS
+afford/DSBG
+afforest/A
+afforestation/SM
+afforested
+afforesting
+afforests
+affray/MDSG
+affricate/VNMS
+affrication/M
+affricative/M
+affright
+affront/GSDM
+Afghani/SM
+Afghanistan/M
+afghan/MS
+Afghan/SM
+aficionado/MS
+afield
+afire
+aflame
+afloat
+aflutter
+afoot
+afore
+aforementioned
+aforesaid
+aforethought/S
+afoul
+Afr
+afraid/U
+afresh
+Africa/M
+African/MS
+Afrikaans/M
+Afrikaner/SM
+afro
+Afrocentric
+Afrocentrism/S
+Afro/MS
+afterbirth/M
+afterbirths
+afterburner/MS
+aftercare/SM
+aftereffect/MS
+afterglow/MS
+afterimage/MS
+afterlife/M
+afterlives
+aftermath/M
+aftermaths
+aftermost
+afternoon/SM
+aftershave/S
+aftershock/SM
+afters/M
+aftertaste/SM
+afterthought/MS
+afterward/S
+afterworld/MS
+Afton/M
+aft/ZR
+Agace/M
+again
+against
+Agamemnon/M
+agapae
+agape/S
+agar/MS
+Agassiz/M
+Agata/M
+agate/SM
+Agatha/M
+Agathe/M
+agave/SM
+agedness/M
+aged/PY
+age/GJDRSMZ
+ageism/S
+ageist/S
+agelessness/MS
+ageless/YP
+agency/SM
+agenda/MS
+agent/AMS
+agented
+agenting
+agentive
+ageratum/M
+Aggie/M
+Aggi/M
+agglomerate/XNGVDS
+agglomeration/M
+agglutinate/VNGXSD
+agglutination/M
+agglutinin/MS
+aggrandize/LDSG
+aggrandizement/SM
+aggravate/SDNGX
+aggravating/Y
+aggravation/M
+aggregated/U
+aggregate/EGNVD
+aggregately
+aggregateness/M
+aggregates
+aggregation/SM
+aggregative/Y
+aggression/SM
+aggressively
+aggressiveness/S
+aggressive/U
+aggressor/MS
+aggrieved/Y
+aggrieve/GDS
+Aggy/SM
+aghast
+agile/YTR
+agility/MS
+agitated/Y
+agitate/XVNGSD
+agitation/M
+agitator/SM
+agitprop/MS
+Aglaia/M
+agleam
+aglitter
+aglow
+Ag/M
+Agna/M
+Agnella/M
+Agnese/M
+Agnes/M
+Agnesse/M
+Agneta/M
+Agnew/M
+Agni/M
+Agnola/M
+agnosticism/MS
+agnostic/SM
+ago
+agog
+agonizedly/S
+agonized/Y
+agonize/ZGRSD
+agonizing/Y
+agony/SM
+agoraphobia/MS
+agoraphobic/S
+Agosto/M
+Agra/M
+agrarianism/MS
+agrarian/S
+agreeable/EP
+agreeableness/SME
+agreeably/E
+agreeing/E
+agree/LEBDS
+agreement/ESM
+agreer/S
+Agretha/M
+agribusiness/SM
+Agricola/M
+agriculturalist/S
+agricultural/Y
+agriculture/MS
+agriculturist/SM
+Agrippa/M
+Agrippina/M
+agrochemicals
+agronomic/S
+agronomist/SM
+agronomy/MS
+aground
+Aguascalientes/M
+ague/MS
+Aguie/M
+Aguilar/M
+Aguinaldo/M
+Aguirre/M
+Aguistin/M
+Aguste/M
+Agustin/M
+ah
+Ahab/M
+Aharon/M
+aha/S
+ahead
+ahem/S
+Ahmadabad
+Ahmad/M
+Ahmed/M
+ahoy/S
+Ahriman/M
+AI
+Aida/M
+Aidan/M
+aided/U
+aide/MS
+aider/M
+AIDS
+aid/ZGDRS
+Aigneis/M
+aigrette/SM
+Aiken/M
+Aila/M
+Ailbert/M
+Ailee/M
+Aileen/M
+Aile/M
+Ailene/M
+aileron/MS
+Ailey/M
+Ailina/M
+Aili/SM
+ail/LSDG
+ailment/SM
+Ailsun/M
+Ailyn/M
+Aimee/M
+Aime/M
+aimer/M
+Aimil/M
+aimlessness/MS
+aimless/YP
+aim/ZSGDR
+Aindrea/M
+Ainslee/M
+Ainsley/M
+Ainslie/M
+ain't
+Ainu/M
+airbag/MS
+airbase/S
+airborne
+airbrush/SDMG
+Airbus/M
+airbus/SM
+aircraft/MS
+aircrew/M
+airdrop/MS
+airdropped
+airdropping
+Airedale/SM
+Aires
+airfare/S
+airfield/MS
+airflow/SM
+airfoil/MS
+airframe/MS
+airfreight/SGD
+airhead/MS
+airily
+airiness/MS
+airing/M
+airlessness/S
+airless/P
+airlift/MDSG
+airliner/M
+airline/SRMZ
+airlock/MS
+airmail/DSG
+airman/M
+airmass
+air/MDRTZGJS
+airmen
+airpark
+airplane/SM
+airplay/S
+airport/MS
+airship/MS
+airsickness/SM
+airsick/P
+airspace/SM
+airspeed/SM
+airstrip/MS
+airtightness/M
+airtight/P
+airtime
+airwaves
+airway/SM
+airworthiness/SM
+airworthy/PTR
+airy/PRT
+Aisha/M
+aisle/DSGM
+aitch/MS
+ajar
+Ajax/M
+Ajay/M
+AK
+aka
+Akbar/M
+Akihito/M
+akimbo
+Akim/M
+akin
+Akita/M
+Akkad/M
+Akron/M
+Aksel/M
+AL
+Alabama/M
+Alabaman/S
+Alabamian/MS
+alabaster/MS
+alack/S
+alacrity/SM
+Aladdin/M
+Alaine/M
+Alain/M
+Alair/M
+Alameda/M
+Alamogordo/M
+Alamo/SM
+ala/MS
+Ala/MS
+Alanah/M
+Alana/M
+Aland/M
+Alane/M
+alanine/M
+Alan/M
+Alanna/M
+Alano/M
+Alanson/M
+Alard/M
+Alaric/M
+Alar/M
+alarming/Y
+alarmist/MS
+alarm/SDG
+Alasdair/M
+Alaska/M
+Alaskan/S
+alas/S
+Alastair/M
+Alasteir/M
+Alaster/M
+Alayne/M
+albacore/SM
+alba/M
+Alba/M
+Albania/M
+Albanian/SM
+Albany/M
+albatross/SM
+albedo/M
+Albee/M
+albeit
+Alberich/M
+Alberik/M
+Alberio/M
+Alberta/M
+Albertan/S
+Albertina/M
+Albertine/M
+Albert/M
+Alberto/M
+Albie/M
+Albigensian
+Albina/M
+albinism/SM
+albino/MS
+Albion/M
+Albireo/M
+alb/MS
+Albrecht/M
+albumen/M
+albumin/MS
+albuminous
+album/MNXS
+Albuquerque/M
+Alcatraz/M
+Alcestis/M
+alchemical
+alchemist/SM
+alchemy/MS
+Alcibiades/M
+Alcmena/M
+Alcoa/M
+alcoholically
+alcoholic/MS
+alcoholism/SM
+alcohol/MS
+Alcott/M
+alcove/MSD
+Alcuin/M
+Alcyone/M
+Aldan/M
+Aldebaran/M
+aldehyde/M
+Alden/M
+Alderamin/M
+alderman/M
+aldermen
+alder/SM
+alderwoman
+alderwomen
+Aldin/M
+Aldis/M
+Aldo/M
+Aldon/M
+Aldous/M
+Aldrich/M
+Aldric/M
+Aldridge/M
+Aldrin/M
+Aldus/M
+Aldwin/M
+aleatory
+Alecia/M
+Aleck/M
+Alec/M
+Aleda/M
+alee
+Aleece/M
+Aleen/M
+alehouse/MS
+Aleichem/M
+Alejandra/M
+Alejandrina/M
+Alejandro/M
+Alejoa/M
+Aleksandr/M
+Alembert/M
+alembic/SM
+ale/MVS
+Alena/M
+Alene/M
+aleph/M
+Aleppo/M
+Aler/M
+alerted/Y
+alertness/MS
+alert/STZGPRDY
+Alessandra/M
+Alessandro/M
+Aleta/M
+Alethea/M
+Aleutian/S
+Aleut/SM
+alewife/M
+alewives
+Alexa/M
+Alexander/SM
+Alexandra/M
+Alexandre/M
+Alexandria/M
+Alexandrian/S
+Alexandrina/M
+Alexandr/M
+Alexandro/MS
+Alexei/M
+Alexia/M
+Alexina/M
+Alexine/M
+Alexio/M
+Alexi/SM
+Alex/M
+alfalfa/MS
+Alfa/M
+Alfie/M
+Alfi/M
+Alf/M
+Alfonse/M
+Alfons/M
+Alfonso/M
+Alfonzo/M
+Alford/M
+Alfreda/M
+Alfred/M
+Alfredo/M
+alfresco
+Alfy/M
+algae
+algaecide
+algal
+alga/M
+algebraic
+algebraical/Y
+algebraist/M
+algebra/MS
+Algenib/M
+Algeria/M
+Algerian/MS
+Alger/M
+Algernon/M
+Algieba/M
+Algiers/M
+alginate/SM
+ALGOL
+Algol/M
+Algonquian/SM
+Algonquin/SM
+algorithmic
+algorithmically
+algorithm/MS
+Alhambra/M
+Alhena/M
+Alia/M
+alias/GSD
+alibi/MDSG
+Alica/M
+Alicea/M
+Alice/M
+Alicia/M
+Alick/M
+Alic/M
+Alida/M
+Alidia/M
+Alie/M
+alienable/IU
+alienate/SDNGX
+alienation/M
+alienist/MS
+alien/RDGMBS
+Alighieri/M
+alight/DSG
+aligned/U
+aligner/SM
+align/LASDG
+alignment/SAM
+Alika/M
+Alikee/M
+alikeness/M
+alike/U
+alimentary
+aliment/SDMG
+alimony/MS
+Ali/MS
+Alina/M
+Aline/M
+alinement's
+Alioth/M
+aliquot/S
+Alisa/M
+Alisander/M
+Alisha/M
+Alison/M
+Alissa/M
+Alistair/M
+Alister/M
+Alisun/M
+aliveness/MS
+alive/P
+Alix/M
+aliyah/M
+aliyahs
+Aliza/M
+Alkaid/M
+alkalies
+alkali/M
+alkaline
+alkalinity/MS
+alkalize/SDG
+alkaloid/MS
+alkyd/S
+alkyl/M
+Allahabad/M
+Allah/M
+Alla/M
+Allan/M
+Allard/M
+allay/GDS
+Allayne/M
+Alleen/M
+allegation/SM
+alleged/Y
+allege/SDG
+Allegheny/MS
+allegiance/SM
+allegiant
+allegoric
+allegoricalness/M
+allegorical/YP
+allegorist/MS
+allegory/SM
+Allegra/M
+allegretto/MS
+allegri
+allegro/MS
+allele/SM
+alleluia/S
+allemande/M
+Allendale/M
+Allende/M
+Allene/M
+Allen/M
+Allentown/M
+allergenic
+allergen/MS
+allergic
+allergically
+allergist/MS
+allergy/MS
+alleviate/SDVGNX
+alleviation/M
+alleviator/MS
+Alley/M
+alley/MS
+Alleyn/M
+alleyway/MS
+Allhallows
+alliance/MS
+Allianora/M
+Allie/M
+allier
+allies/M
+alligator/DMGS
+Alli/MS
+Allina/M
+Allin/M
+Allison/M
+Allissa/M
+Allister/M
+Allistir/M
+alliterate/XVNGSD
+alliteration/M
+alliterative/Y
+Allix/M
+allocable/U
+allocatable
+allocate/ACSDNGX
+allocated/U
+allocation/AMC
+allocative
+allocator/AMS
+allophone/MS
+allophonic
+allotment/MS
+allotments/A
+allotrope/M
+allotropic
+allots/A
+allot/SDL
+allotted/A
+allotter/M
+allotting/A
+allover/S
+allowableness/M
+allowable/P
+allowably
+allowance/GSDM
+allowed/Y
+allowing/E
+allow/SBGD
+allows/E
+alloyed/U
+alloy/SGMD
+all/S
+allspice/MS
+Allstate/M
+Allsun/M
+allude/GSD
+allure/GLSD
+allurement/SM
+alluring/Y
+allusion/MS
+allusiveness/MS
+allusive/PY
+alluvial/S
+alluvions
+alluvium/MS
+Allx/M
+ally/ASDG
+Allyce/M
+Ally/MS
+Allyn/M
+Allys
+Allyson/M
+alma
+Almach/M
+Almaden/M
+almagest
+Alma/M
+almanac/MS
+Almaty/M
+Almeda/M
+Almeria/M
+Almeta/M
+almightiness/M
+Almighty/M
+almighty/P
+Almira/M
+Almire/M
+almond/SM
+almoner/MS
+almost
+Al/MRY
+alms/A
+almshouse/SM
+almsman/M
+alnico
+Alnilam/M
+Alnitak/M
+aloe/MS
+aloft
+aloha/SM
+Aloin/M
+Aloise/M
+Aloisia/M
+aloneness/M
+alone/P
+along
+alongshore
+alongside
+Alon/M
+Alonso/M
+Alonzo/M
+aloofness/MS
+aloof/YP
+aloud
+Aloysia/M
+Aloysius/M
+alpaca/SM
+Alpert/M
+alphabetical/Y
+alphabetic/S
+alphabetization/SM
+alphabetizer/M
+alphabetize/SRDGZ
+alphabet/SGDM
+alpha/MS
+alphanumerical/Y
+alphanumeric/S
+Alphard/M
+Alphecca/M
+Alpheratz/M
+Alphonse/M
+Alphonso/M
+Alpine
+alpine/S
+alp/MS
+Alps
+already
+Alric/M
+alright
+Alsace/M
+Alsatian/MS
+also
+Alsop/M
+Alston/M
+Altaic/M
+Altai/M
+Altair/M
+Alta/M
+altar/MS
+altarpiece/SM
+alterable/UI
+alteration/MS
+altercate/NX
+altercation/M
+altered/U
+alternate/SDVGNYX
+alternation/M
+alternativeness/M
+alternative/YMSP
+alternator/MS
+alter/RDZBG
+Althea/M
+although
+altimeter/SM
+Altiplano/M
+altitude/SM
+altogether/S
+Alton/M
+alto/SM
+Altos/M
+altruism/SM
+altruistic
+altruistically
+altruist/SM
+alt/RZS
+ALU
+Aludra/M
+Aluin/M
+Aluino/M
+alumina/SM
+aluminum/MS
+alumnae
+alumna/M
+alumni
+alumnus/MS
+alum/SM
+alundum
+Alva/M
+Alvan/M
+Alvarado/M
+Alvarez/M
+Alvaro/M
+alveolar/Y
+alveoli
+alveolus/M
+Alvera/M
+Alverta/M
+Alvie/M
+Alvina/M
+Alvinia/M
+Alvin/M
+Alvira/M
+Alvis/M
+Alvy/M
+alway/S
+Alwin/M
+Alwyn/M
+Alyce/M
+Alyda/M
+Alyosha/M
+Alysa/M
+Alyse/M
+Alysia/M
+Alys/M
+Alyson/M
+Alyss
+Alyssa/M
+Alzheimer/M
+AM
+AMA
+Amabelle/M
+Amabel/M
+Amadeus/M
+Amado/M
+amain
+Amalea/M
+Amalee/M
+Amaleta/M
+amalgamate/VNGXSD
+amalgamation/M
+amalgam/MS
+Amalia/M
+Amalie/M
+Amalita/M
+Amalle/M
+Amanda/M
+Amandie/M
+Amandi/M
+Amandy/M
+amanuenses
+amanuensis/M
+Amara/M
+amaranth/M
+amaranths
+amaretto/S
+Amargo/M
+Amarillo/M
+amaryllis/MS
+am/AS
+amasser/M
+amass/GRSD
+Amata/M
+amateurishness/MS
+amateurish/YP
+amateurism/MS
+amateur/SM
+Amati/M
+amatory
+amazed/Y
+amaze/LDSRGZ
+amazement/MS
+amazing/Y
+amazonian
+Amazonian
+amazon/MS
+Amazon/SM
+ambassadorial
+ambassador/MS
+ambassadorship/MS
+ambassadress/SM
+ambergris/SM
+Amberly/M
+amber/MS
+Amber/YM
+ambiance/MS
+ambidexterity/MS
+ambidextrous/Y
+ambience's
+ambient/S
+ambiguity/MS
+ambiguously/U
+ambiguousness/M
+ambiguous/YP
+ambition/GMDS
+ambitiousness/MS
+ambitious/PY
+ambit/M
+ambivalence/SM
+ambivalent/Y
+amble/GZDSR
+Amble/M
+ambler/M
+ambrose
+Ambrose/M
+ambrosial/Y
+ambrosia/SM
+Ambrosi/M
+Ambrosio/M
+Ambrosius/M
+Ambros/M
+ambulance/MS
+ambulant/S
+ambulate/DSNGX
+ambulation/M
+ambulatory/S
+Ambur/M
+ambuscade/MGSRD
+ambuscader/M
+ambusher/M
+ambush/MZRSDG
+Amby/M
+Amdahl/M
+ameba's
+Amelia/M
+Amelie/M
+Amelina/M
+Ameline/M
+ameliorate/XVGNSD
+amelioration/M
+Amelita/M
+amenability/SM
+amenably
+amended/U
+amender/M
+amendment/SM
+amen/DRGTSB
+amend/SBRDGL
+amends/M
+Amenhotep/M
+amenity/MS
+amenorrhea/M
+Amerada/M
+Amerasian/S
+amercement/MS
+amerce/SDLG
+Americana/M
+Americanism/SM
+Americanization/SM
+americanized
+Americanize/SDG
+American/MS
+America/SM
+americium/MS
+Amerigo/M
+Amerindian/MS
+Amerind/MS
+Amer/M
+Amery/M
+Ameslan/M
+Ame/SM
+amethystine
+amethyst/MS
+Amharic/M
+Amherst/M
+amiability/MS
+amiableness/M
+amiable/RPT
+amiably
+amicability/SM
+amicableness/M
+amicable/P
+amicably
+amide/SM
+amid/S
+amidships
+amidst
+Amie/M
+Amiga/M
+amigo/MS
+Amii/M
+Amil/M
+Ami/M
+amines
+aminobenzoic
+amino/M
+amir's
+Amish
+amiss
+Amitie/M
+Amity/M
+amity/SM
+Ammamaria/M
+Amman/M
+Ammerman/M
+ammeter/MS
+ammo/MS
+ammoniac
+ammonia/MS
+ammonium/M
+Am/MR
+ammunition/MS
+amnesiac/MS
+amnesia/SM
+amnesic/S
+amnesty/GMSD
+amniocenteses
+amniocentesis/M
+amnion/SM
+amniotic
+Amoco/M
+amoeba/SM
+amoebic
+amoeboid
+amok/MS
+among
+amongst
+Amontillado/M
+amontillado/MS
+amorality/MS
+amoral/Y
+amorousness/SM
+amorous/PY
+amorphousness/MS
+amorphous/PY
+amortization/SUM
+amortized/U
+amortize/SDG
+Amory/M
+Amos
+amount/SMRDZG
+amour/MS
+Amparo/M
+amperage/SM
+Ampere/M
+ampere/MS
+ampersand/MS
+Ampex/M
+amphetamine/MS
+amphibian/SM
+amphibiousness/M
+amphibious/PY
+amphibology/M
+amphitheater/SM
+amphorae
+amphora/M
+ampleness/M
+ample/PTR
+amplification/M
+amplifier/M
+amplify/DRSXGNZ
+amplitude/MS
+ampoule's
+amp/SGMDY
+ampule/SM
+amputate/DSNGX
+amputation/M
+amputee/SM
+Amritsar/M
+ams
+Amsterdam/M
+amt
+Amtrak/M
+amuck's
+amulet/SM
+Amundsen/M
+Amur/M
+amused/Y
+amuse/LDSRGVZ
+amusement/SM
+amuser/M
+amusingness/M
+amusing/YP
+Amway/M
+Amye/M
+amylase/MS
+amyl/M
+Amy/M
+Anabal/M
+Anabaptist/SM
+Anabella/M
+Anabelle/M
+Anabel/M
+anabolic
+anabolism/MS
+anachronism/SM
+anachronistic
+anachronistically
+Anacin/M
+anaconda/MS
+Anacreon/M
+anaerobe/SM
+anaerobic
+anaerobically
+anaglyph/M
+anagrammatic
+anagrammatically
+anagrammed
+anagramming
+anagram/MS
+Anaheim/M
+Analects/M
+analgesia/MS
+analgesic/S
+Analiese/M
+Analise/M
+Anallese/M
+Anallise/M
+analogical/Y
+analogize/SDG
+analogousness/MS
+analogous/YP
+analog/SM
+analogue/SM
+analogy/MS
+anal/Y
+analysand/MS
+analyses
+analysis/AM
+analyst/SM
+analytical/Y
+analyticity/S
+analytic/S
+analytics/M
+analyzable/U
+analyze/DRSZGA
+analyzed/U
+analyzer/M
+Ana/M
+anamorphic
+Ananias/M
+anapaest's
+anapestic/S
+anapest/SM
+anaphora/M
+anaphoric
+anaphorically
+anaplasmosis/M
+anarchic
+anarchical/Y
+anarchism/MS
+anarchistic
+anarchist/MS
+anarchy/MS
+Anastasia/M
+Anastasie/M
+Anastassia/M
+anastigmatic
+anastomoses
+anastomosis/M
+anastomotic
+anathema/MS
+anathematize/GSD
+Anatola/M
+Anatole/M
+Anatolia/M
+Anatolian
+Anatollo/M
+Anatol/M
+anatomic
+anatomical/YS
+anatomist/MS
+anatomize/GSD
+anatomy/MS
+Anaxagoras/M
+Ancell/M
+ancestor/SMDG
+ancestral/Y
+ancestress/SM
+ancestry/SM
+Anchorage/M
+anchorage/SM
+anchored/U
+anchorite/MS
+anchoritism/M
+anchorman/M
+anchormen
+anchorpeople
+anchorperson/S
+anchor/SGDM
+anchorwoman
+anchorwomen
+anchovy/MS
+ancientness/MS
+ancient/SRYTP
+ancillary/S
+an/CS
+Andalusia/M
+Andalusian
+Andaman
+andante/S
+and/DZGS
+Andean/M
+Andeee/M
+Andee/M
+Anderea/M
+Andersen/M
+Anders/N
+Anderson/M
+Andes
+Andie/M
+Andi/M
+andiron/MS
+Andonis/M
+Andorra/M
+Andover/M
+Andra/SM
+Andrea/MS
+Andreana/M
+Andree/M
+Andrei/M
+Andrej/M
+Andre/SM
+Andrew/MS
+Andrey/M
+Andria/M
+Andriana/M
+Andriette/M
+Andris
+androgenic
+androgen/SM
+androgynous
+androgyny/SM
+android/MS
+Andromache/M
+Andromeda/M
+Andropov/M
+Andros/M
+Andrus/M
+Andy/M
+anecdotal/Y
+anecdote/SM
+anechoic
+anemia/SM
+anemically
+anemic/S
+anemometer/MS
+anemometry/M
+anemone/SM
+anent
+aneroid
+Anestassia/M
+anesthesia/MS
+anesthesiologist/MS
+anesthesiology/SM
+anesthetically
+anesthetic/SM
+anesthetist/MS
+anesthetization/SM
+anesthetizer/M
+anesthetize/ZSRDG
+Anet/M
+Anetta/M
+Anette/M
+Anett/M
+aneurysm/MS
+anew
+Angara/M
+Angela/M
+Angeleno/SM
+Angele/SM
+angelfish/SM
+Angelia/M
+angelic
+angelical/Y
+Angelica/M
+angelica/MS
+Angelico/M
+Angelika/M
+Angeli/M
+Angelina/M
+Angeline/M
+Angelique/M
+Angelita/M
+Angelle/M
+Angel/M
+angel/MDSG
+Angelo/M
+Angelou/M
+Ange/M
+anger/GDMS
+Angevin/M
+Angie/M
+Angil/M
+angina/MS
+angiography
+angioplasty/S
+angiosperm/MS
+Angkor/M
+angle/GMZDSRJ
+angler/M
+Angles
+angleworm/MS
+Anglia/M
+Anglicanism/MS
+Anglican/MS
+Anglicism/SM
+Anglicization/MS
+anglicize/SDG
+Anglicize/SDG
+angling/M
+Anglo/MS
+Anglophile/SM
+Anglophilia/M
+Anglophobe/MS
+Anglophobia/M
+Angola/M
+Angolan/S
+angora/MS
+Angora/MS
+angrily
+angriness/M
+angry/RTP
+angst/MS
+ngstrm/M
+angstrom/MS
+Anguilla/M
+anguish/DSMG
+angularity/MS
+angular/Y
+Angus/M
+Angy/M
+Anheuser/M
+anhydride/M
+anhydrite/M
+anhydrous/Y
+Aniakchak/M
+Ania/M
+Anibal/M
+Anica/M
+aniline/SM
+animadversion/SM
+animadvert/DSG
+animalcule/MS
+animal/MYPS
+animated/A
+animatedly
+animately/I
+animateness/MI
+animates/A
+animate/YNGXDSP
+animating/A
+animation/AMS
+animator/SM
+animism/SM
+animistic
+animist/S
+animized
+animosity/MS
+animus/SM
+anionic/S
+anion/MS
+aniseed/MS
+aniseikonic
+anise/MS
+anisette/SM
+anisotropic
+anisotropy/MS
+Anissa/M
+Anita/M
+Anitra/M
+Anjanette/M
+Anjela/M
+Ankara/M
+ankh/M
+ankhs
+anklebone/SM
+ankle/GMDS
+anklet/MS
+Annabal/M
+Annabela/M
+Annabella/M
+Annabelle/M
+Annabell/M
+Annabel/M
+Annadiana/M
+Annadiane/M
+Annalee/M
+Annaliese/M
+Annalise/M
+annalist/MS
+annal/MNS
+Anna/M
+Annamaria/M
+Annamarie/M
+Annapolis/M
+Annapurna/M
+anneal/DRSZG
+annealer/M
+Annecorinne/M
+annelid/MS
+Anneliese/M
+Annelise/M
+Anne/M
+Annemarie/M
+Annetta/M
+Annette/M
+annexation/SM
+annexe/M
+annex/GSD
+Annice/M
+Annie/M
+annihilate/XSDVGN
+annihilation/M
+annihilator/MS
+Anni/MS
+Annissa/M
+anniversary/MS
+Ann/M
+Annmaria/M
+Annmarie/M
+Annnora/M
+Annora/M
+annotated/U
+annotate/VNGXSD
+annotation/M
+annotator/MS
+announced/U
+announcement/SM
+announcer/M
+announce/ZGLRSD
+annoyance/MS
+annoyer/M
+annoying/Y
+annoy/ZGSRD
+annualized
+annual/YS
+annuitant/MS
+annuity/MS
+annular/YS
+annuli
+annulled
+annulling
+annulment/MS
+annul/SL
+annulus/M
+annum
+annunciate/XNGSD
+annunciation/M
+Annunciation/S
+annunciator/SM
+Anny/M
+anode/SM
+anodic
+anodize/GDS
+anodyne/SM
+anoint/DRLGS
+anointer/M
+anointment/SM
+anomalousness/M
+anomalous/YP
+anomaly/MS
+anomic
+anomie/M
+anon/S
+anonymity/MS
+anonymousness/M
+anonymous/YP
+anopheles/M
+anorak/SM
+anorectic/S
+anorexia/SM
+anorexic/S
+another/M
+Anouilh/M
+Ansell/M
+Ansel/M
+Anselma/M
+Anselm/M
+Anselmo/M
+Anshan/M
+ANSI/M
+Ansley/M
+ans/M
+Anson/M
+Anstice/M
+answerable/U
+answered/U
+answerer/M
+answer/MZGBSDR
+antacid/MS
+Antaeus/M
+antagonism/MS
+antagonistic
+antagonistically
+antagonist/MS
+antagonized/U
+antagonize/GZRSD
+antagonizing/U
+Antananarivo/M
+antarctic
+Antarctica/M
+Antarctic/M
+Antares
+anteater/MS
+antebellum
+antecedence/MS
+antecedent/SMY
+antechamber/SM
+antedate/GDS
+antediluvian/S
+anteing
+antelope/MS
+ante/MS
+antenatal
+antennae
+antenna/MS
+anterior/SY
+anteroom/SM
+ant/GSMD
+Anthea/M
+Anthe/M
+anthem/MGDS
+anther/MS
+Anthia/M
+Anthiathia/M
+anthill/S
+anthologist/MS
+anthologize/GDS
+anthology/SM
+Anthony/M
+anthraces
+anthracite/MS
+anthrax/M
+anthropic
+anthropocentric
+anthropogenic
+anthropoid/S
+anthropological/Y
+anthropologist/MS
+anthropology/SM
+anthropometric/S
+anthropometry/M
+anthropomorphic
+anthropomorphically
+anthropomorphism/SM
+anthropomorphizing
+anthropomorphous
+antiabortion
+antiabortionist/S
+antiaircraft
+antibacterial/S
+antibiotic/SM
+antibody/MS
+anticancer
+Antichrist/MS
+anticipated/U
+anticipate/XVGNSD
+anticipation/M
+anticipative/Y
+anticipatory
+anticked
+anticking
+anticlerical/S
+anticlimactic
+anticlimactically
+anticlimax/SM
+anticline/SM
+anticlockwise
+antic/MS
+anticoagulant/S
+anticoagulation/M
+anticommunism/SM
+anticommunist/SM
+anticompetitive
+anticyclone/MS
+anticyclonic
+antidemocratic
+antidepressant/SM
+antidisestablishmentarianism/M
+antidote/DSMG
+Antietam/M
+antifascist/SM
+antiformant
+antifreeze/SM
+antifundamentalist/M
+antigenic
+antigenicity/SM
+antigen/MS
+antigone
+Antigone/M
+Antigua/M
+antiheroes
+antihero/M
+antihistamine/MS
+antihistorical
+antiknock/MS
+antilabor
+Antillean
+Antilles
+antilogarithm/SM
+antilogs
+antimacassar/SM
+antimalarial/S
+antimatter/SM
+antimicrobial/S
+antimissile/S
+antimony/SM
+anting/M
+Antin/M
+antinomian
+antinomy/M
+antinuclear
+Antioch/M
+antioxidant/MS
+antiparticle/SM
+Antipas/M
+antipasti
+antipasto/MS
+antipathetic
+antipathy/SM
+antipersonnel
+antiperspirant/MS
+antiphonal/SY
+antiphon/SM
+antipodal/S
+antipodean/S
+antipode/MS
+Antipodes
+antipollution/S
+antipoverty
+antiquarianism/MS
+antiquarian/MS
+antiquary/SM
+antiquate/NGSD
+antiquation/M
+antique/MGDS
+antiquity/SM
+antiredeposition
+antiresonance/M
+antiresonator
+anti/S
+antisemitic
+antisemitism/M
+antisepses
+antisepsis/M
+antiseptically
+antiseptic/S
+antiserum/SM
+antislavery/S
+antisocial/Y
+antispasmodic/S
+antisubmarine
+antisymmetric
+antisymmetry
+antitank
+antitheses
+antithesis/M
+antithetic
+antithetical/Y
+antithyroid
+antitoxin/MS
+antitrust/MR
+antivenin/MS
+antiviral/S
+antivivisectionist/S
+antiwar
+antler/SDM
+Antofagasta/M
+Antoine/M
+Antoinette/M
+Antonella/M
+Antone/M
+Antonetta/M
+Antonia/M
+Antonie/M
+Antonietta/M
+Antoni/M
+Antonina/M
+Antonin/M
+Antonino/M
+Antoninus/M
+Antonio/M
+Antonius/M
+Anton/MS
+Antonovics/M
+Antony/M
+antonymous
+antonym/SM
+antral
+antsy/RT
+Antwan/M
+Antwerp/M
+Anubis/M
+anus/SM
+anvil/MDSG
+anxiety/MS
+anxiousness/SM
+anxious/PY
+any
+Anya/M
+anybody/S
+anyhow
+Any/M
+anymore
+anyone/MS
+anyplace
+anything/S
+anytime
+anyway/S
+anywhere/S
+anywise
+AOL/M
+aorta/MS
+aortic
+AP
+apace
+apache/MS
+Apache/MS
+Apalachicola/M
+apartheid/SM
+apart/LP
+apartment/MS
+apartness/M
+apathetic
+apathetically
+apathy/SM
+apatite/MS
+APB
+aped/A
+apelike
+ape/MDRSG
+Apennines
+aper/A
+aperiodic
+aperiodically
+aperiodicity/M
+aperitif/S
+aperture/MDS
+apex/MS
+aphasia/SM
+aphasic/S
+aphelia
+aphelion/SM
+aphid/MS
+aphonic
+aphorism/MS
+aphoristic
+aphoristically
+aphrodisiac/SM
+Aphrodite/M
+Apia/M
+apiarist/SM
+apiary/SM
+apical/YS
+apices's
+apiece
+apishness/M
+apish/YP
+aplenty
+aplomb/SM
+APO
+Apocalypse/M
+apocalypse/MS
+apocalyptic
+apocryphalness/M
+apocryphal/YP
+apocrypha/M
+Apocrypha/M
+apogee/MS
+apolar
+apolitical/Y
+Apollinaire/M
+Apollonian
+Apollo/SM
+apologetically/U
+apologetic/S
+apologetics/M
+apologia/SM
+apologist/MS
+apologize/GZSRD
+apologizer/M
+apologizes/A
+apologizing/U
+apology/MS
+apoplectic
+apoplexy/SM
+apostasy/SM
+apostate/SM
+apostatize/DSG
+apostleship/SM
+apostle/SM
+apostolic
+apostrophe/SM
+apostrophized
+apothecary/MS
+apothegm/MS
+apotheoses
+apotheosis/M
+apotheosized
+apotheosizes
+apotheosizing
+Appalachia/M
+Appalachian/MS
+appalling/Y
+appall/SDG
+Appaloosa/MS
+appaloosa/S
+appanage/M
+apparatus/SM
+apparel/SGMD
+apparency
+apparently/I
+apparentness/M
+apparent/U
+apparition/SM
+appealer/M
+appealing/UY
+appeal/SGMDRZ
+appear/AEGDS
+appearance/AMES
+appearer/S
+appease/DSRGZL
+appeased/U
+appeasement/MS
+appeaser/M
+appellant/MS
+appellate/VNX
+appellation/M
+appellative/MY
+appendage/MS
+appendectomy/SM
+appendices
+appendicitis/SM
+appendix/SM
+append/SGZDR
+appertain/DSG
+appetite/MVS
+appetizer/SM
+appetizing/YU
+Appia/M
+Appian/M
+applauder/M
+applaud/ZGSDR
+applause/MS
+applecart/M
+applejack/MS
+Apple/M
+apple/MS
+applesauce/SM
+Appleseed/M
+Appleton/M
+applet/S
+appliance/SM
+applicabilities
+applicability/IM
+applicable/I
+applicably
+applicant/MS
+applicate/V
+application/MA
+applicative/Y
+applicator/MS
+applier/SM
+appliqud
+appliqu/MSG
+apply/AGSDXN
+appointee/SM
+appoint/ELSADG
+appointer/MS
+appointive
+appointment/ASEM
+Appolonia/M
+Appomattox/M
+apportion/GADLS
+apportionment/SAM
+appose/SDG
+appositeness/MS
+apposite/XYNVP
+apposition/M
+appositive/SY
+appraisal/SAM
+appraised/A
+appraisees
+appraiser/M
+appraises/A
+appraise/ZGDRS
+appraising/Y
+appreciable/I
+appreciably/I
+appreciated/U
+appreciate/XDSNGV
+appreciation/M
+appreciativeness/MI
+appreciative/PIY
+appreciator/MS
+appreciatory
+apprehend/DRSG
+apprehender/M
+apprehensible
+apprehension/SM
+apprehensiveness/SM
+apprehensive/YP
+apprentice/DSGM
+apprenticeship/SM
+apprise/DSG
+apprizer/SM
+apprizingly
+apprizings
+approachability/UM
+approachable/UI
+approach/BRSDZG
+approacher/M
+approbate/NX
+approbation/EMS
+appropriable
+appropriated/U
+appropriately/I
+appropriateness/SMI
+appropriate/XDSGNVYTP
+appropriation/M
+appropriator/SM
+approval/ESM
+approve/DSREG
+approved/U
+approver's/E
+approver/SM
+approving/YE
+approx
+approximate/XGNVYDS
+approximation/M
+approximative/Y
+appurtenance/MS
+appurtenant/S
+APR
+apricot/MS
+Aprilette/M
+April/MS
+Apr/M
+apron/SDMG
+apropos
+apse/MS
+apsis/M
+apter
+aptest
+aptitude/SM
+aptness/SMI
+aptness's/U
+apt/UPYI
+Apuleius/M
+aquaculture/MS
+aqualung/SM
+aquamarine/SM
+aquanaut/SM
+aquaplane/GSDM
+aquarium/MS
+Aquarius/MS
+aqua/SM
+aquatically
+aquatic/S
+aquavit/SM
+aqueduct/MS
+aqueous/Y
+aquiculture's
+aquifer/SM
+Aquila/M
+aquiline
+Aquinas/M
+Aquino/M
+Aquitaine/M
+AR
+Arabela/M
+Arabele/M
+Arabella/M
+Arabelle/M
+Arabel/M
+arabesque/SM
+Arabia/M
+Arabian/MS
+Arabic/M
+arability/MS
+Arabist/MS
+arable/S
+Arab/MS
+Araby/M
+Araceli/M
+arachnid/MS
+arachnoid/M
+arachnophobia
+Arafat/M
+Araguaya/M
+Araldo/M
+Aral/M
+Ara/M
+Aramaic/M
+Aramco/M
+Arapahoes
+Arapahoe's
+Arapaho/MS
+Ararat/M
+Araucanian/M
+Arawakan/M
+Arawak/M
+arbiter/MS
+arbitrage/GMZRSD
+arbitrager/M
+arbitrageur/S
+arbitrament/MS
+arbitrarily
+arbitrariness/MS
+arbitrary/P
+arbitrate/SDXVNG
+arbitration/M
+arbitrator/SM
+arbor/DMS
+arboreal/Y
+arbores
+arboretum/MS
+arborvitae/MS
+arbutus/SM
+ARC
+arcade/SDMG
+Arcadia/M
+Arcadian
+arcana/M
+arcane/P
+arc/DSGM
+archaeological/Y
+archaeologist/SM
+archaically
+archaic/P
+Archaimbaud/M
+archaism/SM
+archaist/MS
+archaize/GDRSZ
+archaizer/M
+Archambault/M
+archangel/SM
+archbishopric/SM
+archbishop/SM
+archdeacon/MS
+archdiocesan
+archdiocese/SM
+archduchess/MS
+archduke/MS
+Archean
+archenemy/SM
+archeologist's
+archeology/MS
+archer/M
+Archer/M
+archery/MS
+archetypal
+archetype/SM
+archfiend/SM
+archfool
+Archibald/M
+Archibaldo/M
+Archibold/M
+Archie/M
+archiepiscopal
+Archimedes/M
+arching/M
+archipelago/SM
+architect/MS
+architectonic/S
+architectonics/M
+architectural/Y
+architecture/SM
+architrave/MS
+archival
+archive/DRSGMZ
+archived/U
+archivist/MS
+Arch/MR
+archness/MS
+arch/PGVZTMYDSR
+archway/SM
+Archy/M
+arclike
+ARCO/M
+arcsine
+arctangent
+Arctic/M
+arctic/S
+Arcturus/M
+Ardabil
+Arda/MH
+Ardath/M
+Ardeen/M
+Ardelia/M
+Ardelis/M
+Ardella/M
+Ardelle/M
+ardency/M
+Ardene/M
+Ardenia/M
+Arden/M
+ardent/Y
+Ardine/M
+Ardisj/M
+Ardis/M
+Ardith/M
+ardor/SM
+Ardra/M
+arduousness/SM
+arduous/YP
+Ardyce/M
+Ardys
+Ardyth/M
+areal
+area/SM
+areawide
+are/BS
+Arel/M
+arenaceous
+arena/SM
+aren't
+Arequipa/M
+Ares
+Aretha/M
+Argentina/M
+Argentinean/S
+Argentine/SM
+Argentinian/S
+argent/MS
+arginine/MS
+Argonaut/MS
+argonaut/S
+argon/MS
+Argonne/M
+Argo/SM
+argosy/SM
+argot/SM
+arguable/IU
+arguably/IU
+argue/DSRGZ
+arguer/M
+argumentation/SM
+argumentativeness/MS
+argumentative/YP
+argument/SM
+Argus/M
+argyle/S
+Ariadne/M
+Ariana/M
+Arianism/M
+Arianist/SM
+aria/SM
+Aridatha/M
+aridity/SM
+aridness/M
+arid/TYRP
+Ariela/M
+Ariella/M
+Arielle/M
+Ariel/M
+Arie/SM
+Aries/S
+aright
+Ari/M
+Arin/M
+Ario/M
+Ariosto/M
+arise/GJSR
+arisen
+Aristarchus/M
+Aristides
+aristocracy/SM
+aristocratic
+aristocratically
+aristocrat/MS
+Aristophanes/M
+Aristotelean
+Aristotelian/M
+Aristotle/M
+arithmetical/Y
+arithmetician/SM
+arithmetic/MS
+arithmetize/SD
+Arius/M
+Ariz/M
+Arizona/M
+Arizonan/S
+Arizonian/S
+Arjuna/M
+Arkansan/MS
+Arkansas/M
+Arkhangelsk/M
+Ark/M
+ark/MS
+Arkwright/M
+Arlana/M
+Arlan/M
+Arlee/M
+Arleen/M
+Arlena/M
+Arlene/M
+Arlen/M
+Arleta/M
+Arlette/M
+Arley/M
+Arleyne/M
+Arlie/M
+Arliene/M
+Arlina/M
+Arlinda/M
+Arline/M
+Arlington/M
+Arlin/M
+Arluene/M
+Arly/M
+Arlyne/M
+Arlyn/M
+Armada/M
+armada/SM
+armadillo/MS
+Armageddon/SM
+Armagnac/M
+armament/EAS
+armament's/E
+Armand/M
+Armando/M
+Arman/M
+arm/ASEDG
+Armata/M
+armature/MGSD
+armband/SM
+armchair/MS
+Armco/M
+armed/U
+Armenia/M
+Armenian/MS
+armer/MES
+armful/SM
+armhole/MS
+arming/M
+Arminius/M
+Armin/M
+armistice/MS
+armless
+armlet/SM
+armload/M
+Armonk/M
+armored/U
+armorer/M
+armorial/S
+armory/DSM
+armor/ZRDMGS
+Armour/M
+armpit/MS
+armrest/MS
+arm's
+Armstrong/M
+Ar/MY
+army/SM
+Arnaldo/M
+Arneb/M
+Arne/M
+Arney/M
+Arnhem/M
+Arnie/M
+Arni/M
+Arnold/M
+Arnoldo/M
+Arno/M
+Arnuad/M
+Arnulfo/M
+Arny/M
+aroma/SM
+aromatherapist/S
+aromatherapy/S
+aromatically
+aromaticity/M
+aromaticness/M
+aromatic/SP
+Aron/M
+arose
+around
+arousal/MS
+aroused/U
+arouse/GSD
+ARPA/M
+Arpanet/M
+ARPANET/M
+arpeggio/SM
+arrack/M
+Arragon/M
+arraignment/MS
+arraign/SDGL
+arrangeable/A
+arranged/EA
+arrangement/AMSE
+arranger/M
+arranges/EA
+arrange/ZDSRLG
+arranging/EA
+arrant/Y
+arras/SM
+arrayer
+array/ESGMD
+arrear/SM
+arrest/ADSG
+arrestee/MS
+arrester/MS
+arresting/Y
+arrestor/MS
+Arrhenius/M
+arrhythmia/SM
+arrhythmic
+arrhythmical
+Arri/M
+arrival/MS
+arriver/M
+arrive/SRDG
+arrogance/MS
+arrogant/Y
+arrogate/XNGDS
+arrogation/M
+Arron/M
+arrowhead/SM
+arrowroot/MS
+arrow/SDMG
+arroyo/MS
+arr/TV
+arsenal/MS
+arsenate/M
+arsenic/MS
+arsenide/M
+arsine/MS
+arsonist/MS
+arson/SM
+Artair/M
+Artaxerxes/M
+artefact's
+Arte/M
+Artemas
+Artemis/M
+Artemus/M
+arterial/SY
+arteriolar
+arteriole/SM
+arterioscleroses
+arteriosclerosis/M
+artery/SM
+artesian
+artfulness/SM
+artful/YP
+Arther/M
+arthritic/S
+arthritides
+arthritis/M
+arthrogram/MS
+arthropod/SM
+arthroscope/S
+arthroscopic
+Arthurian
+Arthur/M
+artichoke/SM
+article/GMDS
+articulable/I
+articular
+articulated/EU
+articulately/I
+articulateness/IMS
+articulates/I
+articulate/VGNYXPSD
+articulation/M
+articulator/SM
+articulatory
+Artie/M
+artifact/MS
+artificer/M
+artifice/ZRSM
+artificiality/MS
+artificialness/M
+artificial/PY
+artillerist
+artilleryman/M
+artillerymen
+artillery/SM
+artiness/MS
+artisan/SM
+artiste/SM
+artistically/I
+artistic/I
+artist/MS
+artistry/SM
+artlessness/MS
+artless/YP
+Art/M
+art/SM
+artsy/RT
+Artur/M
+Arturo/M
+Artus/M
+artwork/MS
+Arty/M
+arty/TPR
+Aruba/M
+arum/MS
+Arvie/M
+Arvin/M
+Arv/M
+Arvy/M
+Aryan/MS
+Aryn/M
+as
+As
+A's
+Asa/M
+Asama/M
+asap
+ASAP
+asbestos/MS
+Ascella/M
+ascend/ADGS
+ascendancy/MS
+ascendant/SY
+ascender/SM
+Ascension/M
+ascension/SM
+ascent/SM
+ascertain/DSBLG
+ascertainment/MS
+ascetically
+asceticism/MS
+ascetic/SM
+ASCII
+ascot/MS
+ascribe/GSDB
+ascription/MS
+ascriptive
+Ase/M
+aseptically
+aseptic/S
+asexuality/MS
+asexual/Y
+Asgard/M
+ashame/D
+ashamed/UY
+Ashanti/M
+Ashbey/M
+Ashby/M
+ashcan/SM
+Ashely/M
+Asher/M
+Asheville/M
+Ashia/M
+Ashien/M
+Ashil/M
+Ashkenazim
+Ashkhabad/M
+Ashla/M
+Ashland/M
+Ashlan/M
+ashlar/GSDM
+Ashlee/M
+Ashleigh/M
+Ashlen/M
+Ashley/M
+Ashlie/M
+Ashli/M
+Ashlin/M
+Ashly/M
+ashman/M
+ash/MNDRSG
+Ashmolean/M
+Ash/MRY
+ashore
+ashram/SM
+Ashton/M
+ashtray/MS
+Ashurbanipal/M
+ashy/RT
+Asia/M
+Asian/MS
+Asiatic/SM
+aside/S
+Asilomar/M
+Asimov
+asinine/Y
+asininity/MS
+askance
+ask/DRZGS
+asked/U
+asker/M
+askew/P
+ASL
+aslant
+asleep
+Asmara/M
+asocial/S
+Asoka/M
+asparagus/MS
+aspartame/S
+ASPCA
+aspect/SM
+Aspell/M
+aspen/M
+Aspen/M
+asperity/SM
+asper/M
+aspersion/SM
+asphalt/MDRSG
+asphodel/MS
+asphyxia/MS
+asphyxiate/GNXSD
+asphyxiation/M
+aspic/MS
+Aspidiske/M
+aspidistra/MS
+aspirant/MS
+aspirate/NGDSX
+aspirational
+aspiration/M
+aspirator/SM
+aspire/GSRD
+aspirer/M
+aspirin/SM
+asplenium
+asp/MNRXS
+Asquith/M
+Assad/M
+assailable/U
+assailant/SM
+assail/BGDS
+Assamese/M
+Assam/M
+assassinate/DSGNX
+assassination/M
+assassin/MS
+assaulter/M
+assaultive/YP
+assault/SGVMDR
+assayer/M
+assay/SZGRD
+assemblage/MS
+assemble/ADSREG
+assembled/U
+assembler/EMS
+assemblies/A
+assembly/EAM
+assemblyman/M
+assemblymen
+Assembly/MS
+assemblywoman
+assemblywomen
+assent/SGMRD
+assert/ADGS
+asserter/MS
+assertional
+assertion/AMS
+assertiveness/SM
+assertive/PY
+assess/BLSDG
+assessed/A
+assesses/A
+assessment/SAM
+assessor/MS
+asset/SM
+asseverate/XSDNG
+asseveration/M
+asshole/MS!
+assiduity/SM
+assiduousness/SM
+assiduous/PY
+assign/ALBSGD
+assignation/MS
+assigned/U
+assignee/MS
+assigner/MS
+assignment/MAS
+assignor/MS
+assigns/CU
+assimilate/VNGXSD
+assimilationist/M
+assimilation/M
+Assisi/M
+assistance/SM
+assistantship/SM
+assistant/SM
+assisted/U
+assister/M
+assist/RDGS
+assize/MGSD
+ass/MNS
+assn
+assoc
+associable
+associated/U
+associate/SDEXNG
+associateship
+associational
+association/ME
+associative/Y
+associativity/S
+associator/MS
+assonance/SM
+assonant/S
+assorter/M
+assort/LRDSG
+assortment/SM
+asst
+assuaged/U
+assuage/SDG
+assumability
+assumer/M
+assume/SRDBJG
+assuming/UA
+assumption/SM
+assumptive
+assurance/AMS
+assure/AGSD
+assuredness/M
+assured/PYS
+assurer/SM
+assuring/YA
+Assyria/M
+Assyrian/SM
+Assyriology/M
+Astaire/SM
+Astarte/M
+astatine/MS
+aster/ESM
+asteria
+asterisked/U
+asterisk/SGMD
+astern
+asteroidal
+asteroid/SM
+asthma/MS
+asthmatic/S
+astigmatic/S
+astigmatism/SM
+astir
+astonish/GSDL
+astonishing/Y
+astonishment/SM
+Aston/M
+Astoria/M
+Astor/M
+astounding/Y
+astound/SDG
+astraddle
+Astrakhan/M
+astrakhan/SM
+astral/SY
+Astra/M
+astray
+astride
+Astrid/M
+astringency/SM
+astringent/YS
+Astrix/M
+astrolabe/MS
+astrologer/MS
+astrological/Y
+astrologist/M
+astrology/SM
+astronautical
+astronautic/S
+astronautics/M
+astronaut/SM
+astronomer/MS
+astronomic
+astronomical/Y
+astronomy/SM
+astrophysical
+astrophysicist/SM
+astrophysics/M
+Astroturf/M
+AstroTurf/S
+Asturias/M
+astuteness/MS
+astute/RTYP
+Asuncin/M
+asunder
+Aswan/M
+asylum/MS
+asymmetric
+asymmetrical/Y
+asymmetry/MS
+asymptomatic
+asymptomatically
+asymptote/MS
+asymptotically
+asymptotic/Y
+asynchronism/M
+asynchronous/Y
+asynchrony
+at
+Atacama/M
+Atahualpa/M
+Atalanta/M
+Atari/M
+Atatrk/M
+atavism/MS
+atavistic
+atavist/MS
+ataxia/MS
+ataxic/S
+atelier/SM
+atemporal
+ate/S
+Athabasca/M
+Athabascan's
+Athabaskan/MS
+Athabaska's
+atheism/SM
+atheistic
+atheist/SM
+Athena/M
+Athene/M
+Athenian/SM
+Athens/M
+atheroscleroses
+atherosclerosis/M
+athirst
+athlete/MS
+athletically
+athleticism/M
+athletic/S
+athletics/M
+athwart
+atilt
+Atkins/M
+Atkinson/M
+Atlanta/M
+Atlante/MS
+atlantes
+Atlantic/M
+Atlantis/M
+atlas/SM
+Atlas/SM
+At/M
+Atman
+ATM/M
+atmosphere/DSM
+atmospherically
+atmospheric/S
+atoll/MS
+atomically
+atomicity/M
+atomic/S
+atomics/M
+atomistic
+atomization/SM
+atomize/GZDRS
+atomizer/M
+atom/SM
+atonality/MS
+atonal/Y
+atone/LDSG
+atonement/SM
+atop
+ATP
+Atreus/M
+atria
+atrial
+Atria/M
+atrium/M
+atrociousness/SM
+atrocious/YP
+atrocity/SM
+atrophic
+atrophy/DSGM
+atropine/SM
+Atropos/M
+Ats
+attach/BLGZMDRS
+attached/UA
+attacher/M
+attach/S
+attachment/ASM
+attacker/M
+attack/GBZSDR
+attainabilities
+attainability/UM
+attainableness/M
+attainable/U
+attainably/U
+attain/AGSD
+attainder/MS
+attained/U
+attainer/MS
+attainment/MS
+attar/MS
+attempt/ADSG
+attempter/MS
+attendance/MS
+attendant/SM
+attended/U
+attendee/SM
+attender/M
+attend/SGZDR
+attentional
+attentionality
+attention/IMS
+attentiveness/IMS
+attentive/YIP
+attenuated/U
+attenuate/SDXGN
+attenuation/M
+attenuator/MS
+attestation/SM
+attested/U
+attester/M
+attest/GSDR
+Attic
+Attica/M
+attic/MS
+Attila/M
+attire/SDG
+attitude/MS
+attitudinal/Y
+attitudinize/SDG
+Attlee/M
+attn
+Attn
+attorney/SM
+attractant/SM
+attract/BSDGV
+attraction/MS
+attractivenesses
+attractiveness/UM
+attractive/UYP
+attractor/MS
+attributable/U
+attribute/BVNGRSDX
+attributed/U
+attributer/M
+attributional
+attribution/M
+attributive/SY
+attrition/MS
+Attucks
+attune/SDG
+atty
+ATV/S
+atwitter
+Atwood/M
+atypical/Y
+Aube/M
+Auberge/M
+aubergine/MS
+Auberon/M
+Auberta/M
+Aubert/M
+Aubine/M
+Aubree/M
+Aubrette/M
+Aubrey/M
+Aubrie/M
+Aubry/M
+auburn/SM
+Auckland/M
+auctioneer/SDMG
+auction/MDSG
+audaciousness/SM
+audacious/PY
+audacity/MS
+Auden/M
+audibility/MSI
+audible/I
+audibles
+audibly/I
+Audie/M
+audience/MS
+Audi/M
+audiogram/SM
+audiological
+audiologist/MS
+audiology/SM
+audiometer/MS
+audiometric
+audiometry/M
+audiophile/SM
+audio/SM
+audiotape/S
+audiovisual/S
+audited/U
+audition/MDSG
+auditorium/MS
+auditor/MS
+auditory/S
+audit/SMDVG
+Audra/M
+Audre/M
+Audrey/M
+Audrie/M
+Audrye/M
+Audry/M
+Audubon/M
+Audy/M
+Auerbach/M
+Augean
+auger/SM
+aught/S
+Augie/M
+Aug/M
+augmentation/SM
+augmentative/S
+augment/DRZGS
+augmenter/M
+augur/GDMS
+augury/SM
+Augusta/M
+Augustan/S
+Auguste/M
+Augustina/M
+Augustine/M
+Augustinian/S
+Augustin/M
+augustness/SM
+Augusto/M
+August/SM
+august/STPYR
+Augustus/M
+Augy/M
+auk/MS
+Au/M
+Aundrea/M
+auntie/MS
+aunt/MYS
+aunty's
+aural/Y
+Aura/M
+aura/SM
+Aurea/M
+Aurelea/M
+Aurelia/M
+Aurelie/M
+Aurelio/M
+Aurelius/M
+Aurel/M
+aureole/GMSD
+aureomycin
+Aureomycin/M
+Auria/M
+auric
+auricle/SM
+auricular
+Aurie/M
+Auriga/M
+Aurilia/M
+Aurlie/M
+Auroora/M
+auroral
+Aurora/M
+aurora/SM
+Aurore/M
+Aurthur/M
+Auschwitz/M
+auscultate/XDSNG
+auscultation/M
+auspice/SM
+auspicious/IPY
+auspiciousnesses
+auspiciousness/IM
+Aussie/MS
+Austen/M
+austereness/M
+austere/TYRP
+austerity/SM
+Austina/M
+Austine/M
+Austin/SM
+austral
+Australasia/M
+Australasian/S
+australes
+Australia/M
+Australian/MS
+Australis/M
+australites
+Australoid
+Australopithecus/M
+Austria/M
+Austrian/SM
+Austronesian
+authentically
+authenticated/U
+authenticate/GNDSX
+authentication/M
+authenticator/MS
+authenticity/MS
+authentic/UI
+author/DMGS
+authoress/S
+authorial
+authoritarianism/MS
+authoritarian/S
+authoritativeness/SM
+authoritative/PY
+authority/SM
+authorization/MAS
+authorize/AGDS
+authorized/U
+authorizer/SM
+authorizes/U
+authorship/MS
+autism/MS
+autistic/S
+autobahn/MS
+autobiographer/MS
+autobiographic
+autobiographical/Y
+autobiography/MS
+autoclave/SDGM
+autocollimator/M
+autocorrelate/GNSDX
+autocorrelation/M
+autocracy/SM
+autocratic
+autocratically
+autocrat/SM
+autodial/R
+autodidact/MS
+autofluorescence
+autograph/MDG
+autographs
+autoignition/M
+autoimmune
+autoimmunity/S
+autoloader
+automaker/S
+automata's
+automate/NGDSX
+automatically
+automatic/S
+automation/M
+automatism/SM
+automatize/DSG
+automaton/SM
+automobile/GDSM
+automorphism/SM
+automotive
+autonavigator/SM
+autonomic/S
+autonomous/Y
+autonomy/MS
+autopilot/SM
+autopsy/MDSG
+autoregressive
+autorepeat/GS
+auto/SDMG
+autostart
+autosuggestibility/M
+autotransformer/M
+autoworker/S
+autumnal/Y
+Autumn/M
+autumn/MS
+aux
+auxiliary/S
+auxin/MS
+AV
+availability/USM
+availableness/M
+available/U
+availably
+avail/BSZGRD
+availing/U
+avalanche/MGSD
+Avalon/M
+Ava/M
+avant
+avarice/SM
+avariciousness/M
+avaricious/PY
+avast/S
+avatar/MS
+avaunt/S
+avdp
+Aveline/M
+Ave/MS
+avenged/U
+avenger/M
+avenge/ZGSRD
+Aventine/M
+Aventino/M
+avenue/MS
+average/DSPGYM
+Averell/M
+Averill/M
+Averil/M
+Avernus/M
+averred
+averrer
+averring
+Averroes/M
+averseness/M
+averse/YNXP
+aversion/M
+avers/V
+avert/GSD
+Averyl/M
+Avery/M
+ave/S
+aves/C
+Avesta/M
+avg
+avian/S
+aviary/SM
+aviate/NX
+aviation/M
+aviator/SM
+aviatrices
+aviatrix/SM
+Avicenna/M
+Avictor/M
+avidity/MS
+avid/TPYR
+Avie/M
+Avigdor/M
+Avignon/M
+Avila/M
+avionic/S
+avionics/M
+Avior/M
+Avis
+avitaminoses
+avitaminosis/M
+Avivah/M
+Aviva/M
+Aviv/M
+avocado/MS
+avocational
+avocation/SM
+Avogadro/M
+avoidable/U
+avoidably/U
+avoidance/SM
+avoider/M
+avoid/ZRDBGS
+avoirdupois/MS
+Avon/M
+avouch/GDS
+avowal/EMS
+avowed/Y
+avower/M
+avow/GEDS
+Avram/M
+Avril/M
+Avrit/M
+Avrom/M
+avuncular
+av/ZR
+AWACS
+await/SDG
+awake/GS
+awakened/U
+awakener/M
+awakening/S
+awaken/SADG
+awarder/M
+award/RDSZG
+awareness/MSU
+aware/TRP
+awash
+away/PS
+aweigh
+awe/SM
+awesomeness/SM
+awesome/PY
+awestruck
+awfuller
+awfullest
+awfulness/SM
+awful/YP
+aw/GD
+awhile/S
+awkwardness/MS
+awkward/PRYT
+awl/MS
+awning/DM
+awn/MDJGS
+awoke
+awoken
+AWOL
+awry/RT
+ax/DRSZGM
+axehead/S
+Axel/M
+Axe/M
+axeman
+axial/Y
+axillary
+axiological/Y
+axiology/M
+axiomatically
+axiomatic/S
+axiomatization/MS
+axiomatize/GDS
+axiom/SM
+axion/SM
+axis/SM
+axle/MS
+axletree/MS
+Ax/M
+axolotl/SM
+axon/SM
+ayah/M
+ayahs
+Ayala/M
+ayatollah
+ayatollahs
+aye/MZRS
+Ayers
+Aylmar/M
+Aylmer/M
+Aymara/M
+Aymer/M
+Ayn/M
+AZ
+azalea/SM
+Azania/M
+Azazel/M
+Azerbaijan/M
+azimuthal/Y
+azimuth/M
+azimuths
+Azores
+Azov/M
+AZT
+Aztecan
+Aztec/MS
+azure/MS
+BA
+Baal/SM
+baa/SDG
+Babara/M
+Babar's
+Babbage/M
+Babbette/M
+Babbie/M
+babbitt/GDS
+Babbitt/M
+babbler/M
+babble/RSDGZ
+Babb/M
+Babcock/M
+Babel/MS
+babel/S
+babe/SM
+Babette/M
+Babita/M
+Babka/M
+baboon/MS
+Bab/SM
+babushka/MS
+babyhood/MS
+babyish
+Babylonia/M
+Babylonian/SM
+Babylon/MS
+babysat
+babysit/S
+babysitter/S
+babysitting
+baby/TDSRMG
+Bacall/M
+Bacardi/M
+baccalaureate/MS
+baccarat/SM
+bacchanalia
+Bacchanalia/M
+bacchanalian/S
+bacchanal/SM
+Bacchic
+Bacchus/M
+bachelorhood/SM
+bachelor/SM
+Bach/M
+bacillary
+bacilli
+bacillus/MS
+backache/SM
+backarrow
+backbencher/M
+backbench/ZR
+backbiter/M
+backbite/S
+backbitten
+backbit/ZGJR
+backboard/SM
+backbone/SM
+backbreaking
+backchaining
+backcloth/M
+backdate/GDS
+backdrop/MS
+backdropped
+backdropping
+backed/U
+backer/M
+backfield/SM
+backfill/SDG
+backfire/GDS
+backgammon/MS
+background/SDRMZG
+back/GZDRMSJ
+backhanded/Y
+backhander/M
+backhand/RDMSZG
+backhoe/S
+backing/M
+backlash/GRSDM
+backless
+backlogged
+backlogging
+backlog/MS
+backorder
+backpacker/M
+backpack/ZGSMRD
+backpedal/DGS
+backplane/MS
+backplate/SM
+backrest/MS
+backscatter/SMDG
+backseat/S
+backside/SM
+backslapper/MS
+backslapping/M
+backslash/DSG
+backslider/M
+backslide/S
+backslid/RZG
+backspace/GSD
+backspin/SM
+backstabber/M
+backstabbing
+backstage
+backstair/S
+backstitch/GDSM
+backstop/MS
+backstopped
+backstopping
+backstreet/M
+backstretch/SM
+backstroke/GMDS
+backtalk/S
+backtrack/SDRGZ
+backup/SM
+Backus/M
+backwardness/MS
+backward/YSP
+backwash/SDMG
+backwater/SM
+backwood/S
+backwoodsman/M
+backwoodsmen
+backyard/MS
+baconer/M
+Bacon/M
+bacon/SRM
+bacterial/Y
+bacteria/MS
+bactericidal
+bactericide/SM
+bacteriologic
+bacteriological
+bacteriologist/MS
+bacteriology/SM
+bacterium/M
+Bactria/M
+badder
+baddest
+baddie/MS
+bade
+Baden/M
+badge/DSRGMZ
+badger/DMG
+badinage/DSMG
+badland/S
+Badlands/M
+badman/M
+badmen
+badminton/MS
+badmouth/DG
+badmouths
+badness/SM
+bad/PSNY
+Baedeker/SM
+Baez/M
+Baffin/M
+bafflement/MS
+baffler/M
+baffle/RSDGZL
+baffling/Y
+bagatelle/MS
+bagel/SM
+bagful/MS
+baggageman
+baggagemen
+baggage/SM
+bagged/M
+bagger/SM
+baggily
+bagginess/MS
+bagging/M
+baggy/PRST
+Baghdad/M
+bagpiper/M
+bagpipe/RSMZ
+Bagrodia/MS
+bag/SM
+baguette/SM
+Baguio/M
+bah
+Baha'i
+Bahama/MS
+Bahamanian/S
+Bahamian/MS
+Baha'ullah
+Bahia/M
+Bahrain/M
+bahs
+Baikal/M
+Bailey/SM
+bail/GSMYDRB
+Bailie/M
+bailiff/SM
+bailiwick/MS
+Baillie/M
+Bail/M
+bailout/MS
+bailsman/M
+bailsmen
+Baily/M
+Baird/M
+bairn/SM
+baiter/M
+bait/GSMDR
+baize/GMDS
+Baja/M
+baked/U
+bakehouse/M
+Bakelite/M
+baker/M
+Baker/M
+Bakersfield/M
+bakery/SM
+bakeshop/S
+bake/ZGJDRS
+baking/M
+baklava/M
+baksheesh/SM
+Baku/M
+Bakunin/M
+balaclava/MS
+balalaika/MS
+balanced/A
+balancedness
+balancer/MS
+balance's
+balance/USDG
+Balanchine/M
+Balboa/M
+balboa/SM
+balcony/MSD
+balderdash/MS
+Balder/M
+baldfaced
+Bald/MR
+baldness/MS
+bald/PYDRGST
+baldric/SM
+Balduin/M
+Baldwin/M
+baldy
+Balearic/M
+baleen/MS
+balefuller
+balefullest
+balefulness/MS
+baleful/YP
+Bale/M
+bale/MZGDRS
+baler/M
+Balfour/M
+Bali/M
+Balinese
+balkanization
+balkanize/DG
+Balkan/SM
+balker/M
+balk/GDRS
+Balkhash/M
+balkiness/M
+balky/PRT
+balladeer/MS
+ballade/MS
+balladry/MS
+ballad/SM
+Ballard/SM
+ballast/SGMD
+ballcock/S
+ballerina/MS
+baller/M
+balletic
+ballet/MS
+ballfields
+ballgame/S
+ball/GZMSDR
+ballistic/S
+ballistics/M
+Ball/M
+balloonist/S
+balloon/RDMZGS
+balloter/M
+ballot/MRDGS
+ballpark/SM
+ballplayer/SM
+ballpoint/SM
+ballroom/SM
+ballsy/TR
+ballyhoo/SGMD
+balminess/SM
+balm/MS
+balmy/PRT
+baloney/SM
+balsam/GMDS
+balsamic
+balsa/MS
+Balthazar/M
+Baltic/M
+Baltimore/M
+Baluchistan/M
+baluster/MS
+balustrade/SM
+Balzac/M
+Ba/M
+Bamako/M
+Bamberger/M
+Bambie/M
+Bambi/M
+bamboo/SM
+bamboozle/GSD
+Bamby/M
+Banach/M
+banality/MS
+banal/TYR
+banana/SM
+Bancroft/M
+bandager/M
+bandage/RSDMG
+bandanna/SM
+bandbox/MS
+bandeau/M
+bandeaux
+band/EDGS
+bander/M
+banding/M
+bandit/MS
+banditry/MS
+bandmaster/MS
+bandoleer/SM
+bandpass
+band's
+bandsman/M
+bandsmen
+bandstand/SM
+bandstop
+Bandung/M
+bandwagon/MS
+bandwidth/M
+bandwidths
+bandy/TGRSD
+banefuller
+banefullest
+baneful/Y
+bane/MS
+Bangalore/M
+banger/M
+bang/GDRZMS
+bangkok
+Bangkok/M
+Bangladeshi/S
+Bangladesh/M
+bangle/MS
+Bangor/M
+Bangui/M
+bani
+banisher/M
+banishment/MS
+banish/RSDGL
+banister/MS
+Banjarmasin/M
+banjoist/SM
+banjo/MS
+Banjul/M
+bankbook/SM
+bankcard/S
+banker/M
+bank/GZJDRMBS
+banking/M
+Bank/MS
+banknote/S
+bankroll/DMSG
+bankruptcy/MS
+bankrupt/DMGS
+Banky/M
+Ban/M
+banned/U
+Banneker/M
+banner/SDMG
+banning/U
+Bannister/M
+bannister's
+bannock/SM
+banns
+banqueter/M
+banquet/SZGJMRD
+banquette/MS
+ban/SGMD
+banshee/MS
+bans/U
+bantam/MS
+bantamweight/MS
+banterer/M
+bantering/Y
+banter/RDSG
+Banting/M
+Bantu/SM
+banyan/MS
+banzai/S
+baobab/SM
+Baotou/M
+baptismal/Y
+baptism/SM
+Baptiste/M
+baptistery/MS
+baptist/MS
+Baptist/MS
+baptistry's
+baptized/U
+baptizer/M
+baptize/SRDZG
+baptizes/U
+Barabbas/M
+Barbabas/M
+Barbabra/M
+Barbadian/S
+Barbados/M
+Barbaraanne/M
+Barbara/M
+Barbarella/M
+barbarianism/MS
+barbarian/MS
+barbaric
+barbarically
+barbarism/MS
+barbarity/SM
+barbarize/SDG
+Barbarossa/M
+barbarousness/M
+barbarous/PY
+Barbary/M
+barb/DRMSGZ
+barbecue/DRSMG
+barbed/P
+Barbee/M
+barbell/SM
+barbel/MS
+Barbe/M
+barbeque's
+barber/DMG
+barbered/U
+Barber/M
+barberry/MS
+barbershop/MS
+Barbette/M
+Barbey/M
+Barbie/M
+Barbi/M
+barbital/M
+barbiturate/MS
+Barbour/M
+Barbra/M
+Barb/RM
+Barbuda/M
+barbwire/SM
+Barby/M
+barcarole/SM
+Barcelona/M
+Barclay/M
+Bardeen/M
+Barde/M
+bardic
+Bard/M
+bard/MDSG
+bareback/D
+barefacedness/M
+barefaced/YP
+barefoot/D
+barehanded
+bareheaded
+barelegged
+bareness/MS
+Barents/M
+bare/YSP
+barfly/SM
+barf/YDSG
+bargainer/M
+bargain/ZGSDRM
+barge/DSGM
+bargeman/M
+bargemen
+bargepole/M
+barhopped
+barhopping
+barhop/S
+Bari/M
+baritone/MS
+barium/MS
+barked/C
+barkeeper/M
+barkeep/SRZ
+barker/M
+Barker/M
+bark/GZDRMS
+Barkley/M
+barks/C
+barleycorn/MS
+barley/MS
+Barlow/M
+barmaid/SM
+barman/M
+barmen
+Bar/MH
+Barnabas
+Barnabe/M
+Barnaby/M
+barnacle/MDS
+Barnard/M
+Barnaul/M
+Barnebas/M
+Barnes
+Barnett/M
+Barney/M
+barnful
+barn/GDSM
+Barnhard/M
+Barnie/M
+Barn/M
+barnsful
+barnstorm/DRGZS
+barnstormer/M
+Barnum/M
+barnyard/MS
+Barny/M
+Baroda/M
+barometer/MS
+barometric
+barometrically
+baronage/MS
+baroness/MS
+baronetcy/SM
+baronet/MS
+baronial
+Baron/M
+baron/SM
+barony/SM
+baroque/SPMY
+barque's
+Barquisimeto/M
+barracker/M
+barrack/SDRG
+barracuda/MS
+barrage/MGSD
+Barranquilla/M
+barred/ECU
+barre/GMDSJ
+barrel/SGMD
+barrenness/SM
+barren/SPRT
+Barrera/M
+Barret/M
+barrette/SM
+Barrett/M
+barricade/SDMG
+Barrie/M
+barrier/MS
+barring/R
+barrio/SM
+Barri/SM
+barrister/MS
+Barr/M
+Barron/M
+barroom/SM
+barrow/MS
+Barry/M
+Barrymore/MS
+bars/ECU
+barstool/SM
+Barstow/M
+Bartel/M
+bartender/M
+bartend/ZR
+barterer/M
+barter/SRDZG
+bar/TGMDRS
+Barthel/M
+Barth/M
+Bartholdi/M
+Bartholemy/M
+Bartholomeo/M
+Bartholomeus/M
+Bartholomew/M
+Bartie/M
+Bartlet/M
+Bartlett/M
+Bart/M
+Bartk/M
+Bartolemo/M
+Bartolomeo/M
+Barton/M
+Bartram/M
+Barty/M
+barycenter
+barycentre's
+barycentric
+Bary/M
+baryon/SM
+Baryram/M
+Baryshnikov/M
+basaltic
+basalt/SM
+basal/Y
+Bascom/M
+bas/DRSTG
+baseball/MS
+baseband
+baseboard/MS
+base/CGRSDL
+baseless
+baseline/SM
+Basel/M
+basely
+Base/M
+baseman/M
+basemen
+basement/CSM
+baseness/MS
+baseplate/M
+base's
+basetting
+bashfulness/MS
+bashful/PY
+bash/JGDSR
+Basho/M
+Basia/M
+BASIC
+basically
+basic/S
+Basie/M
+basilar
+Basile/M
+basilica/SM
+Basilio/M
+basilisk/SM
+Basilius/M
+Basil/M
+basil/MS
+basin/DMS
+basinful/S
+basis/M
+basketball/MS
+basketry/MS
+basket/SM
+basketwork/SM
+bask/GSD
+basophilic
+Basque/SM
+Basra/M
+Basseterre/M
+basset/GMDS
+Bassett/M
+bassinet/SM
+bassist/MS
+Bass/M
+basso/MS
+bassoonist/MS
+bassoon/MS
+bass/SM
+basswood/SM
+bastardization/MS
+bastardized/U
+bastardize/SDG
+bastard/MYS
+bastardy/MS
+baste/NXS
+baster/M
+Bastian/M
+Bastien/M
+Bastille/M
+basting/M
+bastion/DM
+bast/SGZMDR
+Basutoland/M
+Bataan/M
+Batavia/M
+batch/MRSDG
+bated/U
+bate/KGSADC
+bater/AC
+Bates
+bathe
+bather/M
+bathetic
+bathhouse/SM
+bath/JMDSRGZ
+bathmat/S
+Batholomew/M
+bathos/SM
+bathrobe/MS
+bathroom/SDM
+baths
+Bathsheba/M
+bathtub/MS
+bathwater
+bathyscaphe's
+bathysphere/MS
+batik/DMSG
+Batista/M
+batiste/SM
+Bat/M
+batman/M
+Batman/M
+batmen
+baton/SM
+Batsheva/M
+batsman/M
+bat/SMDRG
+batsmen
+battalion/MS
+batted
+batten/SDMG
+batter/SRDZG
+battery/MS
+batting/MS
+battledore/MS
+battledress
+battlefield/SM
+battlefront/SM
+battle/GMZRSDL
+battleground/SM
+Battle/M
+battlement/SMD
+battler/M
+battleship/MS
+batty/RT
+Batu/M
+batwings
+bauble/SM
+Baudelaire/M
+baud/M
+Baudoin/M
+Baudouin/M
+Bauer/M
+Bauhaus/M
+baulk/GSDM
+Bausch/M
+bauxite/SM
+Bavaria/M
+Bavarian/S
+bawdily
+bawdiness/MS
+bawd/SM
+bawdy/PRST
+bawler/M
+bawl/SGDR
+Baxie/M
+Bax/M
+Baxter/M
+Baxy/M
+Bayamon
+Bayard/M
+bayberry/MS
+Bayda/M
+Bayer/M
+Bayes
+Bayesian
+bay/GSMDY
+Baylor/M
+Bay/MR
+bayonet/SGMD
+Bayonne/M
+bayou/MS
+Bayreuth/M
+bazaar/MS
+bazillion/S
+bazooka/MS
+BB
+BBB
+BBC
+bbl
+BBQ
+BBS
+BC
+BCD
+bdrm
+beachcomber/SM
+beachhead/SM
+Beach/M
+beach/MSDG
+beachwear/M
+beacon/DMSG
+beading/M
+Beadle/M
+beadle/SM
+bead/SJGMD
+beadsman/M
+beadworker
+beady/TR
+beagle/SDGM
+beaker/M
+beak/ZSDRM
+Beale/M
+Bealle/M
+Bea/M
+beam/MDRSGZ
+beanbag/SM
+bean/DRMGZS
+beanie/SM
+Bean/M
+beanpole/MS
+beanstalk/SM
+bearable/U
+bearably/U
+beard/DSGM
+bearded/P
+beardless
+Beard/M
+Beardmore/M
+Beardsley/M
+bearer/M
+bearing/M
+bearishness/SM
+bearish/PY
+bearlike
+Bear/M
+Bearnaise/M
+Bearnard/M
+bearskin/MS
+bear/ZBRSJG
+Beasley/M
+beasties
+beastings/M
+beastliness/MS
+beastly/PTR
+beast/SJMY
+beatable/U
+beatably/U
+beaten/U
+beater/M
+beatific
+beatifically
+beatification/M
+beatify/GNXDS
+beating/M
+beatitude/MS
+Beatlemania/M
+Beatles/M
+beatnik/SM
+beat/NRGSBZJ
+Beatrice/M
+Beatrisa/M
+Beatrix/M
+Beatriz/M
+Beauchamps
+Beaufort/M
+Beaujolais/M
+Beau/M
+Beaumarchais/M
+Beaumont/M
+beau/MS
+Beauregard/M
+beauteousness/M
+beauteous/YP
+beautician/MS
+beautification/M
+beautifier/M
+beautifully/U
+beautifulness/M
+beautiful/PTYR
+beautify/SRDNGXZ
+beaut/SM
+beauty/SM
+Beauvoir/M
+beaux's
+beaver/DMSG
+Beaverton/M
+Bebe/M
+bebop/MS
+becalm/GDS
+became
+because
+Becca/M
+Bechtel/M
+Becka/M
+Becker/M
+Becket/M
+Beckett/M
+beck/GSDM
+Beckie/M
+Becki/M
+beckon/SDG
+Beck/RM
+Becky/M
+becloud/SGD
+become/GJS
+becoming/UY
+Becquerel/M
+bedaub/GDS
+bedazzle/GLDS
+bedazzlement/SM
+bedbug/SM
+bedchamber/M
+bedclothes
+bedded
+bedder/MS
+bedding/MS
+bedeck/DGS
+Bede/M
+bedevil/DGLS
+bedevilment/SM
+bedfast
+bedfellow/MS
+Bedford/M
+bedimmed
+bedimming
+bedim/S
+bedizen/DGS
+bedlam/MS
+bedlinen
+bedmaker/SM
+bedmate/MS
+bed/MS
+Bedouin/SM
+bedpan/SM
+bedpost/SM
+bedraggle/GSD
+bedridden
+bedrock/SM
+bedroll/SM
+bedroom/DMS
+bedsheets
+bedside/MS
+bedsit
+bedsitter/M
+bedsore/MS
+bedspread/SM
+bedspring/SM
+bedstead/SM
+bedstraw/M
+bedtime/SM
+Beebe/M
+beebread/MS
+Beecher/M
+beech/MRSN
+beechnut/MS
+beechwood
+beefburger/SM
+beefcake/MS
+beef/GZSDRM
+beefiness/MS
+beefsteak/MS
+beefy/TRP
+beehive/MS
+beekeeper/MS
+beekeeping/SM
+beeline/MGSD
+Beelzebub/M
+Bee/M
+bee/MZGJRS
+been/S
+beeper/M
+beep/GZSMDR
+Beerbohm/M
+beer/M
+beermat/S
+beery/TR
+beeswax/DSMG
+Beethoven/M
+beetle/GMRSD
+Beeton/M
+beetroot/M
+beet/SM
+beeves/M
+befall/SGN
+befell
+befit/SM
+befitted
+befitting/Y
+befogged
+befogging
+befog/S
+before
+beforehand
+befoul/GSD
+befriend/DGS
+befuddle/GLDS
+befuddlement/SM
+began
+beget/S
+begetting
+beggar/DYMSG
+beggarliness/M
+beggarly/P
+beggary/MS
+begged
+begging
+Begin/M
+beginner/MS
+beginning/MS
+begin/S
+begone/S
+begonia/SM
+begot
+begotten
+begrime/SDG
+begrudge/GDRS
+begrudging/Y
+beg/S
+beguilement/SM
+beguiler/M
+beguile/RSDLZG
+beguiling/Y
+beguine/SM
+begum/MS
+begun
+behalf/M
+behalves
+Behan/M
+behave/GRSD
+behavioral/Y
+behaviorism/MS
+behavioristic/S
+behaviorist/S
+behavior/SMD
+behead/GSD
+beheld
+behemoth/M
+behemoths
+behest/SM
+behindhand
+behind/S
+beholder/M
+behold/ZGRNS
+behoofs
+behoove/SDJMG
+behooving/YM
+Behring/M
+Beiderbecke/M
+beige/MS
+Beijing
+Beilul/M
+being/M
+Beirut/M
+Beitris/M
+bejewel/SDG
+Bekesy/M
+Bekki/M
+be/KS
+belabor/MDSG
+Bela/M
+Belarus
+belate/D
+belatedness/M
+belated/PY
+Belau/M
+belay/GSD
+belch/GSD
+beleaguer/GDS
+Belem/M
+Belfast/M
+belfry/SM
+Belgian/MS
+Belgium/M
+Belg/M
+Belgrade/M
+Belia/M
+Belicia/M
+belie
+belief/ESUM
+belier/M
+believability's
+believability/U
+believable/U
+believably/U
+believed/U
+believe/EZGDRS
+believer/MUSE
+believing/U
+Belinda/M
+Belita/M
+belittlement/MS
+belittler/M
+belittle/RSDGL
+Belize/M
+belladonna/MS
+Bella/M
+Bellamy/M
+Bellanca/M
+Bellatrix/M
+bellboy/MS
+belled/A
+Belle/M
+belle/MS
+belletristic
+belletrist/SM
+Belleville/M
+bellflower/M
+bell/GSMD
+bellhop/MS
+bellicoseness/M
+bellicose/YP
+bellicosity/MS
+belligerence/SM
+belligerency/MS
+belligerent/SMY
+Bellina/M
+belling/A
+Bellini/M
+Bell/M
+bellman/M
+bellmen
+Bellovin/M
+bellow/DGS
+Bellow/M
+bellows/M
+bells/A
+bellwether/MS
+Bellwood/M
+bellyacher/M
+bellyache/SRDGM
+bellybutton/MS
+bellyfull
+bellyful/MS
+belly/SDGM
+Bel/M
+Belmont/M
+Belmopan/M
+Beloit/M
+belong/DGJS
+belonging/MP
+Belorussian/S
+Belorussia's
+belove/D
+beloved/S
+below/S
+Belshazzar/M
+belted/U
+belt/GSMD
+belting/M
+Belton/M
+Beltran/M
+Beltsville/M
+beltway/SM
+beluga/SM
+Belushi/M
+Belva/M
+belvedere/M
+Belvia/M
+bely/DSRG
+beman
+Be/MH
+bemire/SDG
+bemoan/GDS
+bemused/Y
+bemuse/GSDL
+bemusement/SM
+Benacerraf/M
+Benares's
+bencher/M
+benchmark/GDMS
+bench/MRSDG
+bend/BUSG
+bended
+Bender/M
+bender/MS
+Bendick/M
+Bendicty/M
+Bendite/M
+Bendix/M
+beneath
+Benedetta/M
+Benedetto/M
+Benedick/M
+Benedicta/M
+Benedictine/MS
+benediction/MS
+Benedict/M
+Benedicto/M
+benedictory
+Benedikta/M
+Benedikt/M
+benefaction/MS
+benefactor/MS
+benefactress/S
+benefice/MGSD
+beneficence/SM
+beneficent/Y
+beneficialness/M
+beneficial/PY
+beneficiary/MS
+benefiter/M
+benefit/SRDMZG
+Benelux/M
+Benet/M
+Benetta/M
+Benetton/M
+benevolence/SM
+benevolentness/M
+benevolent/YP
+Bengali/M
+Bengal/SM
+Benghazi/M
+Bengt/M
+Beniamino/M
+benightedness/M
+benighted/YP
+benignant
+benignity/MS
+benign/Y
+Beninese
+Benin/M
+Benita/M
+Benito/M
+Benjamen/M
+Benjamin/M
+Benjie/M
+Benji/M
+Benjy/M
+Ben/M
+Bennett/M
+Bennie/M
+Benni/M
+Bennington/M
+Benn/M
+Benny/M
+Benoite/M
+Benoit/M
+Benson/M
+Bentham/M
+Bentlee/M
+Bentley/MS
+Bent/M
+Benton/M
+bents
+bent/U
+bentwood/SM
+benumb/SGD
+Benyamin/M
+Benzedrine/M
+benzene/MS
+benzine/SM
+Benz/M
+Beograd's
+Beowulf/M
+bequeath/GSD
+bequeaths
+bequest/MS
+berate/GSD
+Berber/MS
+bereave/GLSD
+bereavement/MS
+bereft
+Berenice/M
+Beret/M
+beret/SM
+Bergen/M
+Bergerac/M
+Berger/M
+Berget/M
+Berglund/M
+Bergman/M
+Berg/NRM
+berg/NRSM
+Bergson/M
+Bergsten/M
+Bergstrom/M
+beribbon/D
+beriberi/SM
+Beringer/M
+Bering/RM
+Berkeley/M
+berkelium/SM
+Berke/M
+Berkie/M
+Berkley/M
+Berkly/M
+Berkowitz/M
+Berkshire/SM
+Berky/M
+Berk/YM
+Berle/M
+Berliner/M
+Berlin/SZRM
+Berlioz/M
+Berlitz/M
+Berman/M
+Ber/MG
+berm/SM
+Bermuda/MS
+Bermudan/S
+Bermudian/S
+Bernadene/M
+Bernadette/M
+Bernadina/M
+Bernadine/M
+Berna/M
+Bernardina/M
+Bernardine/M
+Bernardino/M
+Bernard/M
+Bernardo/M
+Bernarr/M
+Bernays/M
+Bernbach/M
+Bernelle/M
+Berne's
+Bernese
+Bernete/M
+Bernetta/M
+Bernette/M
+Bernhard/M
+Bernhardt/M
+Bernice/M
+Berniece/M
+Bernie/M
+Berni/M
+Bernini/M
+Bernita/M
+Bern/M
+Bernoulli/M
+Bernstein/M
+Berny/M
+Berra/M
+Berrie/M
+Berri/M
+berrylike
+Berry/M
+berry/SDMG
+berserker/M
+berserk/SR
+Berta/M
+Berte/M
+Bertha/M
+Berthe/M
+berth/MDGJ
+berths
+Bertie/M
+Bertillon/M
+Berti/M
+Bertina/M
+Bertine/M
+Bert/M
+Berton/M
+Bertram/M
+Bertrand/M
+Bertrando/M
+Berty/M
+Beryle/M
+beryllium/MS
+Beryl/M
+beryl/SM
+Berzelius/M
+bes
+beseecher/M
+beseeching/Y
+beseech/RSJZG
+beseem/GDS
+beset/S
+besetting
+beside/S
+besieger/M
+besiege/SRDZG
+besmear/GSD
+besmirch/GSD
+besom/GMDS
+besot/S
+besotted
+besotting
+besought
+bespangle/GSD
+bespatter/SGD
+bespeak/SG
+bespectacled
+bespoke
+bespoken
+Bess
+Bessel/M
+Bessemer/M
+Bessie/M
+Bessy/M
+best/DRSG
+bestiality/MS
+bestial/Y
+bestiary/MS
+bestirred
+bestirring
+bestir/S
+Best/M
+bestowal/SM
+bestow/SGD
+bestrew/DGS
+bestrewn
+bestridden
+bestride/SG
+bestrode
+bestseller/MS
+bestselling
+bestubble/D
+betaken
+betake/SG
+beta/SM
+betatron/M
+betcha
+Betelgeuse/M
+betel/MS
+Bethanne/M
+Bethany/M
+bethel/M
+Bethe/M
+Bethena/M
+Bethesda/M
+Bethina/M
+bethink/GS
+Bethlehem/M
+beth/M
+Beth/M
+bethought
+Bethune
+betide/GSD
+betimes
+bet/MS
+betoken/GSD
+betook
+betrayal/SM
+betrayer/M
+betray/SRDZG
+betrothal/SM
+betrothed/U
+betroth/GD
+betroths
+Betsey/M
+Betsy/M
+Betta/M
+Betteanne/M
+Betteann/M
+Bette/M
+betterment/MS
+better/SDLG
+Bettie/M
+Betti/M
+Bettina/M
+Bettine/M
+betting
+bettor/SM
+Bettye/M
+Betty/SM
+betweenness/M
+between/SP
+betwixt
+Beulah/M
+Bevan/M
+bevel/SJGMRD
+beverage/MS
+Beverie/M
+Beverlee/M
+Beverley/M
+Beverlie/M
+Beverly/M
+Bevin/M
+Bevon/M
+Bev's
+Bevvy/M
+bevy/SM
+bewail/GDS
+beware/GSD
+bewhisker/D
+bewigged
+bewildered/PY
+bewildering/Y
+bewilder/LDSG
+bewilderment/SM
+bewitching/Y
+bewitch/LGDS
+bewitchment/SM
+bey/MS
+beyond/S
+bezel/MS
+bf
+B/GT
+Bhopal/M
+Bhutanese
+Bhutan/M
+Bhutto/M
+Bialystok/M
+Bianca/M
+Bianco/M
+Bianka/M
+biannual/Y
+bias/DSMPG
+biased/U
+biathlon/MS
+biaxial/Y
+bibbed
+Bibbie/M
+bibbing
+Bibbye/M
+Bibby/M
+Bibi/M
+bible/MS
+Bible/MS
+biblical/Y
+biblicists
+bibliographer/MS
+bibliographical/Y
+bibliographic/S
+bibliography/MS
+bibliophile/MS
+Bib/M
+bib/MS
+bibulous
+bicameral
+bicameralism/MS
+bicarb/MS
+bicarbonate/MS
+bicentenary/S
+bicentennial/S
+bicep/S
+biceps/M
+bichromate/DM
+bickerer/M
+bickering/M
+bicker/SRDZG
+biconcave
+biconnected
+biconvex
+bicuspid/S
+bicycler/M
+bicycle/RSDMZG
+bicyclist/SM
+biddable
+bidden/U
+bidder/MS
+Biddie/M
+bidding/MS
+Biddle/M
+Biddy/M
+biddy/SM
+bider/M
+bide/S
+bidet/SM
+Bidget/M
+bid/GMRS
+bidiagonal
+bidirectional/Y
+bids/A
+biennial/SY
+biennium/SM
+Bienville/M
+Bierce/M
+bier/M
+bifocal/S
+bifurcate/SDXGNY
+bifurcation/M
+bigamist/SM
+bigamous
+bigamy/SM
+Bigelow/M
+Bigfoot
+bigged
+bigger
+biggest
+biggie/SM
+bigging
+biggish
+bighead/MS
+bigheartedness/S
+bighearted/P
+bighorn/MS
+bight/SMDG
+bigmouth/M
+bigmouths
+bigness/SM
+bigoted/Y
+bigot/MDSG
+bigotry/MS
+big/PYS
+bigwig/MS
+biharmonic
+bijection/MS
+bijective/Y
+bijou/M
+bijoux
+bike/MZGDRS
+biker/M
+bikini/SMD
+Biko/M
+bilabial/S
+bilateralness/M
+bilateral/PY
+bilayer/S
+Bilbao/M
+bilberry/MS
+Bilbo/M
+bile/SM
+bilge/GMDS
+biliary
+Bili/M
+bilinear
+bilingualism/SM
+bilingual/SY
+biliousness/SM
+bilious/P
+bilker/M
+bilk/GZSDR
+billboard/MDGS
+biller/M
+billet/MDGS
+billfold/MS
+billiard/SM
+Billie/M
+Billi/M
+billing/M
+billingsgate/SM
+Billings/M
+billionaire/MS
+billion/SHM
+billionths
+bill/JGZSBMDR
+Bill/JM
+billow/DMGS
+billowy/RT
+billposters
+Billye/M
+Billy/M
+billy/SM
+Bil/MY
+bi/M
+Bi/M
+bimbo/MS
+bimetallic/S
+bimetallism/MS
+Bimini/M
+bimodal
+bimolecular/Y
+bimonthly/S
+binary/S
+binaural/Y
+binder/M
+bindery/MS
+binding/MPY
+bindingness/M
+bind/JDRGZS
+bindle/M
+binds/AU
+bindweed/MS
+binge/MS
+bing/GNDM
+Bingham/M
+Binghamton/M
+Bing/M
+bingo/MS
+Bini/M
+Bink/M
+Binky/M
+binnacle/MS
+binned
+Binnie/M
+Binni/M
+binning
+Binny/M
+binocular/SY
+binodal
+binomial/SYM
+bin/SM
+binuclear
+biochemical/SY
+biochemist/MS
+biochemistry/MS
+biodegradability/S
+biodegradable
+biodiversity/S
+bioengineering/M
+bioethics
+biofeedback/SM
+biographer/M
+biographic
+biographical/Y
+biograph/RZ
+biography/MS
+biog/S
+Bioko/M
+biol
+biological/SY
+biologic/S
+biologist/SM
+biology/MS
+biomass/SM
+biomedical
+biomedicine/M
+biometric/S
+biometrics/M
+biometry/M
+biomolecule/S
+biomorph
+bionically
+bionic/S
+bionics/M
+biophysical/Y
+biophysicist/SM
+biophysic/S
+biophysics/M
+biopic/S
+biopsy/SDGM
+biorhythm/S
+BIOS
+bioscience/S
+biosphere/MS
+biostatistic/S
+biosynthesized
+biotechnological
+biotechnologist
+biotechnology/SM
+biotic
+biotin/SM
+bipartisan
+bipartisanship/MS
+bipartite/YN
+bipartition/M
+bipedal
+biped/MS
+biplane/MS
+bipolar
+bipolarity/MS
+biracial
+Birch/M
+birch/MRSDNG
+birdbath/M
+birdbaths
+birdbrain/SDM
+birdcage/SM
+birder/M
+birdhouse/MS
+birdieing
+Birdie/M
+birdie/MSD
+birdlike
+birdlime/MGDS
+Bird/M
+birdseed/MS
+Birdseye/M
+bird/SMDRGZ
+birdsong
+birdtables
+birdwatch/GZR
+birefringence/M
+birefringent
+biretta/SM
+Birgit/M
+Birgitta/M
+Birkenstock/M
+Birk/M
+Birmingham/M
+Biro/M
+Biron/M
+birthday/SM
+birthmark/MS
+birth/MDG
+birthplace/SM
+birthrate/MS
+birthright/MS
+birth's/A
+births/A
+birthstone/SM
+bis
+Biscay/M
+Biscayne/M
+biscuit/MS
+bisect/DSG
+bisection/MS
+bisector/MS
+biserial
+bisexuality/MS
+bisexual/YMS
+Bishkek
+bishop/DGSM
+Bishop/M
+bishopric/SM
+Bismarck/M
+Bismark/M
+bismuth/M
+bismuths
+bison/M
+bisque/SM
+Bissau/M
+bistable
+bistate
+bistro/SM
+bisyllabic
+bitblt/S
+bitchily
+bitchiness/MS
+bitch/MSDG
+bitchy/PTR
+biter/M
+bite/S
+biting/Y
+bitmap/SM
+bit/MRJSZG
+BITNET/M
+bit's/C
+bits/C
+bitser/M
+bitted
+bitten
+bitterness/SM
+bittern/SM
+bitternut/M
+bitter/PSRDYTG
+bitterroot/M
+bittersweet/YMSP
+bitting
+bitty/PRT
+bitumen/MS
+bituminous
+bitwise
+bivalent/S
+bivalve/MSD
+bivariate
+bivouacked
+bivouacking
+bivouac/MS
+biweekly/S
+biyearly
+bizarreness/M
+bizarre/YSP
+Bizet/M
+biz/M
+bizzes
+Bjorn/M
+bk
+b/KGD
+Bk/M
+blabbed
+blabber/GMDS
+blabbermouth/M
+blabbermouths
+blabbing
+blab/S
+blackamoor/SM
+blackball/SDMG
+blackberry/GMS
+blackbirder/M
+blackbird/SGDRM
+blackboard/SM
+blackbody/S
+Blackburn/M
+blackcurrant/M
+blackener/M
+blacken/GDR
+Blackfeet
+Blackfoot/M
+blackguard/MDSG
+blackhead/SM
+blacking/M
+blackish
+blackjack/SGMD
+blackleg/M
+blacklist/DRMSG
+blackmail/DRMGZS
+blackmailer/M
+Blackman/M
+Blackmer/M
+blackness/MS
+blackout/SM
+Blackpool/M
+Black's
+black/SJTXPYRDNG
+blacksmith/MG
+blacksmiths
+blacksnake/MS
+blackspot
+Blackstone/M
+blackthorn/MS
+blacktop/MS
+blacktopped
+blacktopping
+Blackwell/MS
+bladder/MS
+bladdernut/M
+bladderwort/M
+blade/DSGM
+blah/MDG
+blahs
+Blaine/M
+Blaire/M
+Blair/M
+Blakelee/M
+Blakeley/M
+Blake/M
+Blakey/M
+blame/DSRBGMZ
+blamelessness/SM
+blameless/YP
+blamer/M
+blameworthiness/SM
+blameworthy/P
+Blanca/M
+Blancha/M
+Blanchard/M
+blanch/DRSG
+Blanche/M
+blancher/M
+Blanch/M
+blanc/M
+blancmange/SM
+blandishment/MS
+blandish/SDGL
+blandness/MS
+bland/PYRT
+Blane/M
+Blankenship/M
+blanketing/M
+blanket/SDRMZG
+blankness/MS
+blank/SPGTYRD
+Blanton/M
+Blantyre/M
+blare/DSG
+blarney/DMGS
+blas
+blasphemer/M
+blaspheme/RSDZG
+blasphemousness/M
+blasphemous/PY
+blasphemy/SM
+blaster/M
+blasting/M
+blastoff/SM
+blast/SMRDGZ
+blatancy/SM
+blatant/YP
+blather/DRGS
+blatting
+Blatz/M
+Blavatsky/M
+Blayne/M
+blaze/DSRGMZ
+blazer/M
+blazing/Y
+blazoner/M
+blazon/SGDR
+bl/D
+bldg
+bleach/DRSZG
+bleached/U
+bleacher/M
+bleakness/MS
+bleak/TPYRS
+blear/GDS
+blearily
+bleariness/SM
+bleary/PRT
+bleater/M
+bleat/RDGS
+bleeder/M
+bleed/ZRJSG
+Bleeker/M
+bleep/GMRDZS
+blemish/DSMG
+blemished/U
+blench/DSG
+blender/M
+blend/GZRDS
+Blenheim/M
+blessedness/MS
+blessed/PRYT
+blessing/M
+bless/JGSD
+Blevins/M
+blew
+Bligh/M
+blighter/M
+blight/GSMDR
+blimey/S
+blimp/MS
+blinded/U
+blinder/M
+blindfold/SDG
+blinding/MY
+blind/JGTZPYRDS
+blindness/MS
+blindside/SDG
+blinker/MDG
+blinking/U
+blink/RDGSZ
+blinks/M
+Blinnie/M
+Blinni/M
+Blinny/M
+blintze/M
+blintz/SM
+blip/MS
+blipped
+blipping
+Blisse/M
+blissfulness/MS
+blissful/PY
+Bliss/M
+bliss/SDMG
+blistering/Y
+blister/SMDG
+blistery
+Blithe/M
+blitheness/SM
+blither/G
+blithesome
+blithe/TYPR
+blitz/GSDM
+blitzkrieg/SM
+blizzard/MS
+bloater/M
+bloat/SRDGZ
+blobbed
+blobbing
+blob/MS
+Bloch/M
+blockader/M
+blockade/ZMGRSD
+blockage/MS
+blockbuster/SM
+blockbusting/MS
+blocker/MS
+blockhead/MS
+blockhouse/SM
+block's
+block/USDG
+blocky/R
+bloc/MS
+Bloemfontein/M
+bloke/SM
+Blomberg/M
+Blomquist/M
+Blondelle/M
+Blondell/M
+blonde's
+Blondie/M
+blondish
+blondness/MS
+blond/SPMRT
+Blondy/M
+bloodbath
+bloodbaths
+bloodcurdling
+bloodhound/SM
+bloodied/U
+bloodiness/MS
+bloodlessness/SM
+bloodless/PY
+bloodletting/MS
+bloodline/SM
+bloodmobile/MS
+bloodroot/M
+bloodshed/SM
+bloodshot
+blood/SMDG
+bloodsport/S
+bloodstain/MDS
+bloodstock/SM
+bloodstone/M
+bloodstream/SM
+bloodsucker/SM
+bloodsucking/S
+bloodthirstily
+bloodthirstiness/MS
+bloodthirsty/RTP
+bloodworm/M
+bloodymindedness
+bloody/TPGDRS
+bloomer/M
+Bloomer/M
+Bloomfield/M
+Bloomington/M
+Bloom/MR
+bloom/SMRDGZ
+blooper/M
+bloop/GSZRD
+blossom/DMGS
+blossomy
+blotch/GMDS
+blotchy/RT
+blot/MS
+blotted
+blotter/MS
+blotting
+blotto
+blouse/GMSD
+blower/M
+blowfish/M
+blowfly/MS
+blowgun/SM
+blow/GZRS
+blowing/M
+blown/U
+blowout/MS
+blowpipe/SM
+blowtorch/SM
+blowup/MS
+blowy/RST
+blowzy/RT
+BLT
+blubber/GSDR
+blubbery
+Blucher/M
+bludgeon/GSMD
+blueback
+Bluebeard/M
+bluebell/MS
+blueberry/SM
+bluebill/M
+bluebird/MS
+bluebonnet/SM
+bluebook/M
+bluebottle/MS
+bluebush
+bluefish/SM
+bluegill/SM
+bluegrass/MS
+blueing's
+blueish
+bluejacket/MS
+bluejeans
+blue/JMYTGDRSP
+blueness/MS
+bluenose/MS
+bluepoint/SM
+blueprint/GDMS
+bluer/M
+bluest/M
+bluestocking/SM
+bluesy/TR
+bluet/MS
+bluffer/M
+bluffness/MS
+bluff/SPGTZYRD
+bluing/M
+bluishness/M
+bluish/P
+Blumenthal/M
+Blum/M
+blunderbuss/MS
+blunderer/M
+blunder/GSMDRJZ
+blundering/Y
+bluntness/MS
+blunt/PSGTYRD
+blurb/GSDM
+blur/MS
+blurred/Y
+blurriness/S
+blurring/Y
+blurry/RPT
+blurt/GSRD
+blusher/M
+blushing/UY
+blush/RSDGZ
+blusterer/M
+blustering/Y
+blusterous
+bluster/SDRZG
+blustery
+blvd
+Blvd
+Blythe/M
+BM
+BMW/M
+BO
+boarded
+boarder/SM
+boardgames
+boardinghouse/SM
+boarding/SM
+board/IS
+boardroom/MS
+board's
+boardwalk/SM
+boar/MS
+boa/SM
+boaster/M
+boastfulness/MS
+boastful/YP
+boast/SJRDGZ
+boatclubs
+boater/M
+boathouse/SM
+boating/M
+boatload/SM
+boatman/M
+boat/MDRGZJS
+boatmen
+boatswain/SM
+boatyard/SM
+bobbed
+Bobbee/M
+Bobbe/M
+Bobbette/M
+Bobbie/M
+Bobbi/M
+bobbing/M
+bobbin/MS
+Bobbitt/M
+bobble/SDGM
+Bobbsey/M
+Bobbye/M
+Bobby/M
+bobby/SM
+bobbysoxer's
+bobcat/MS
+Bobette/M
+Bobina/M
+Bobine/M
+Bobinette/M
+Bob/M
+bobolink/SM
+Bobrow/M
+bobsledded
+bobsledder/MS
+bobsledding/M
+bobsled/MS
+bobsleigh/M
+bobsleighs
+bobs/M
+bob/SM
+bobtail/SGDM
+bobwhite/SM
+Boca/M
+Boccaccio/M
+boccie/SM
+bock/GDS
+bockwurst
+bodega/MS
+Bodenheim/M
+bode/S
+Bodhidharma/M
+bodhisattva
+Bodhisattva/M
+bodice/SM
+bodied/M
+bodiless
+bodily
+boding/M
+bodkin/SM
+bod/SGMD
+bodybuilder/SM
+bodybuilding/S
+body/DSMG
+bodyguard/MS
+bodying/M
+bodysuit/S
+bodyweight
+bodywork/SM
+Boeing/M
+Boeotia/M
+Boeotian
+Boer/M
+Bogartian/M
+Bogart/M
+Bogey/M
+bogeyman/M
+bogeymen
+bogey/SGMD
+bogged
+bogging
+boggle/SDG
+boggling/Y
+boggy/RT
+bogie's
+bog/MS
+Bogot/M
+bogus
+bogyman
+bogymen
+bogy's
+Boheme/M
+bohemianism/S
+bohemian/S
+Bohemian/SM
+Bohemia/SM
+Bohr/M
+Boigie/M
+boiled/AU
+boiler/M
+boilermaker/MS
+boilerplate/SM
+boil/JSGZDR
+boils/A
+Boise/M
+Bois/M
+boisterousness/MS
+boisterous/YP
+bola/SM
+boldface/SDMG
+boldness/MS
+bold/YRPST
+bole/MS
+bolero/MS
+Boleyn/M
+bolivares
+Bolivar/M
+bolivar/MS
+Bolivia/M
+Bolivian/S
+bollard/SM
+bollix/GSD
+boll/MDSG
+Bologna/M
+bologna/MS
+bolometer/MS
+bolo/MS
+boloney's
+Bolshevik/MS
+Bolshevism/MS
+Bolshevistic/M
+Bolshevist/MS
+Bolshoi/M
+bolsterer/M
+bolster/SRDG
+bolted/U
+bolter/M
+bolt/MDRGS
+Bolton/M
+bolts/U
+Boltzmann/M
+bolus/SM
+bombardier/MS
+bombard/LDSG
+bombardment/SM
+bombastic
+bombastically
+bombast/RMS
+Bombay/M
+bomber/M
+bombproof
+bomb/SGZDRJ
+bombshell/SM
+Bo/MRZ
+bona
+bonanza/MS
+Bonaparte/M
+Bonaventure/M
+bonbon/SM
+bondage/SM
+bonder/M
+bondholder/SM
+Bondie/M
+bond/JMDRSGZ
+Bond/M
+bondman/M
+bondmen
+Bondon/M
+bonds/A
+bondsman/M
+bondsmen
+bondwoman/M
+bondwomen
+Bondy/M
+boned/U
+bonehead/SDM
+boneless
+Bone/M
+bone/MZDRSG
+boner/M
+bonfire/MS
+bong/GDMS
+bongo/MS
+Bonham/M
+bonhomie/MS
+Boniface/M
+boniness/MS
+Bonita/M
+bonito/MS
+bonjour
+bonkers
+Bonnee/M
+Bonner/M
+bonneted/U
+bonnet/SGMD
+Bonneville/M
+Bonnibelle/M
+bonnie
+Bonnie/M
+Bonni/M
+Bonn/RM
+Bonny/M
+bonny/RT
+bonsai/SM
+Bontempo/M
+bonus/SM
+bony/RTP
+bonzes
+boob/DMSG
+booby/SM
+boodle/GMSD
+boogeyman's
+boogieing
+boogie/SD
+boo/GSDH
+boohoo/GDS
+bookbinder/M
+bookbindery/SM
+bookbinding/M
+bookbind/JRGZ
+bookcase/MS
+booked/U
+bookend/SGD
+Booker/M
+book/GZDRMJSB
+bookie/SM
+booking/M
+bookishness/M
+bookish/PY
+bookkeeper/M
+bookkeep/GZJR
+bookkeeping/M
+booklet/MS
+bookmaker/MS
+bookmaking/MS
+bookmark/MDGS
+bookmobile/MS
+bookplate/SM
+bookseller/SM
+bookshelf/M
+bookshelves
+bookshop/MS
+bookstall/MS
+bookstore/SM
+bookwork/M
+bookworm/MS
+Boolean
+boolean/S
+Boole/M
+boom/DRGJS
+boomerang/MDSG
+boomer/M
+boomtown/S
+boondocks
+boondoggle/DRSGZ
+boondoggler/M
+Boone/M
+Boonie/M
+boonies
+boon/MS
+Boony/M
+boorishness/SM
+boorish/PY
+boor/MS
+boosterism
+booster/M
+boost/SGZMRD
+boot/AGDS
+bootblack/MS
+bootee/MS
+Boote/M
+Botes
+Boothe/M
+booth/M
+Booth/M
+booths
+bootie's
+bootlaces
+bootlegged/M
+bootlegger/SM
+bootlegging/M
+bootleg/S
+Bootle/M
+bootless
+Boot/M
+bootprints
+boot's
+bootstrapped
+bootstrapping
+bootstrap/SM
+booty/SM
+booze/DSRGMZ
+boozer/M
+boozy/TR
+bopped
+bopping
+bop/S
+borate/MSD
+borax/MS
+Bordeaux/M
+bordello/MS
+Borden/M
+borderer/M
+border/JRDMGS
+borderland/SM
+borderline/MS
+Bordie/M
+Bord/MN
+Bordon/M
+Bordy/M
+Borealis/M
+Boreas/M
+boredom/MS
+boreholes
+borer/M
+bore/ZGJDRS
+Borges
+Borgia/M
+Borg/M
+boric
+boring/YMP
+Boris
+Bork/M
+born/AIU
+Borneo/M
+borne/U
+Born/M
+Borodin/M
+boron/SM
+borosilicate/M
+borough/M
+boroughs
+Borroughs/M
+borrower/M
+borrowing/M
+borrow/JZRDGBS
+borscht/SM
+borstal/MS
+Boru/M
+borzoi/MS
+Bosch/M
+Bose/M
+bosh/MS
+Bosnia/M
+Bosnian/S
+bosom's
+bosom/SGUD
+bosomy/RT
+boson/SM
+Bosporus/M
+boss/DSRMG
+bossily
+bossiness/MS
+bossism/MS
+bossy/PTSR
+Bostitch/M
+Bostonian/SM
+Boston/MS
+bosun's
+Boswell/MS
+botanical/SY
+botanic/S
+botanist/SM
+botany/SM
+botcher/M
+botch/SRDGZ
+botfly/M
+bother/DG
+bothersome
+bothy/M
+both/ZR
+bot/S
+Botswana/M
+Botticelli/M
+bottle/GMZSRD
+bottleneck/GSDM
+bottler/M
+bottomlessness/M
+bottomless/YP
+bottommost
+bottom/SMRDG
+botulin/M
+botulinus/M
+botulism/SM
+Boucher/M
+boudoir/MS
+bouffant/S
+bougainvillea/SM
+bough/MD
+boughs
+bought/N
+bouillabaisse/MS
+bouillon/MS
+boulder/GMDS
+Boulder/M
+boulevard/MS
+bouncer/M
+bounce/SRDGZ
+bouncily
+bouncing/Y
+bouncy/TRP
+boundary/MS
+bound/AUDI
+boundedness/MU
+bounded/UP
+bounden
+bounder/AM
+bounders
+bounding
+boundlessness/SM
+boundless/YP
+bounds/IA
+bounteousness/MS
+bounteous/PY
+bountifulness/SM
+bountiful/PY
+bounty/SDM
+bouquet/SM
+Bourbaki/M
+bourbon/SM
+Bourbon/SM
+bourgeoisie/SM
+bourgeois/M
+Bourke/M
+Bourne/M
+Bournemouth/M
+boutique/MS
+bout/MS
+boutonnire/MS
+Bouvier
+Bovary/M
+bovine/YS
+Bowditch/M
+bowdlerization/MS
+bowdlerize/GRSD
+bowed/U
+bowel/GMDS
+Bowell/M
+Bowen/M
+bower/DMG
+Bowers
+Bowery/M
+Bowes
+bowie
+Bowie/M
+bowing/M
+bowlder's
+bowlegged
+bowleg/SM
+bowler/M
+bowlful/S
+bowl/GZSMDR
+bowline/MS
+bowling/M
+bowman/M
+Bowman/M
+bowmen
+bowser/M
+bowsprit/SM
+bows/R
+bowstring/GSMD
+bow/SZGNDR
+bowwow/DMGS
+boxcar/SM
+box/DRSJZGM
+boxer/M
+boxful/M
+boxing/M
+boxlike
+boxtops
+boxwood/SM
+boxy/TPR
+Boyce/M
+Boycey/M
+Boycie/M
+boycotter/M
+boycott/RDGS
+Boyd/M
+Boyer/M
+boyfriend/MS
+boyhood/SM
+boyishness/MS
+boyish/PY
+Boyle/M
+Boy/MR
+boy/MRS
+boyscout
+boysenberry/SM
+bozo/SM
+bpi
+bps
+BR
+brace/DSRJGM
+braced/U
+bracelet/MS
+bracer/M
+brachia
+brachium/M
+bracken/SM
+bracketed/U
+bracketing/M
+bracket/SGMD
+brackishness/SM
+brackish/P
+bract/SM
+Bradan/M
+bradawl/M
+Bradbury/M
+Bradburys
+bradded
+bradding
+Braddock/M
+Brade/M
+Braden/M
+Bradford/M
+Bradley/M
+Bradly/M
+Brad/MYN
+Bradney/M
+Bradshaw/M
+brad/SM
+Bradstreet/M
+Brady/M
+brae/SM
+braggadocio/SM
+braggart/SM
+bragged
+bragger/MS
+braggest
+bragging
+Bragg/M
+brag/S
+Brahe/M
+Brahma/MS
+Brahmanism/MS
+Brahman/SM
+Brahmaputra/M
+Brahmin's
+Brahms
+braider/M
+braiding/M
+braid/RDSJG
+braille/DSG
+Braille/GDSM
+Brainard/SM
+braincell/S
+brainchild/M
+brainchildren
+brain/GSDM
+braininess/MS
+brainlessness/M
+brainless/YP
+Brain/M
+brainpower/M
+brainstorm/DRMGJS
+brainstorming/M
+brainteaser/S
+brainteasing
+brainwasher/M
+brainwashing/M
+brainwash/JGRSD
+brainwave/S
+brainy/RPT
+braise/SDG
+brake/DSGM
+brakeman/M
+brakemen/M
+bramble/DSGM
+brambling/M
+brambly/RT
+Bram/M
+Brampton/M
+bra/MS
+Brana/M
+branched/U
+branching/M
+branchlike
+Branch/M
+branch/MDSJG
+Branchville/M
+Brandais/M
+Brandea/M
+branded/U
+Brandeis/M
+Brandel/M
+Brande/M
+Brandenburg/M
+Branden/M
+brander/GDM
+Brander/M
+Brandice/M
+Brandie/M
+Brandi/M
+Brandise/M
+brandish/GSD
+Brand/MRN
+Brando/M
+Brandon/M
+brand/SMRDGZ
+Brandt/M
+Brandtr/M
+brandy/GDSM
+Brandy/M
+Brandyn/M
+brandywine
+Braniff/M
+Bran/M
+branned
+branning
+Brannon/M
+bran/SM
+Brantley/M
+Brant/M
+Braque/M
+brashness/MS
+brash/PYSRT
+Brasilia
+brasserie/SM
+brass/GSDM
+brassiere/MS
+brassily
+brassiness/SM
+brassy/RSPT
+Bratislava/M
+brat/SM
+Brattain/M
+bratty/RT
+bratwurst/MS
+Braun/M
+bravadoes
+bravado/M
+brave/DSRGYTP
+braveness/MS
+bravery/MS
+bravest/M
+bravo/SDG
+bravura/SM
+brawler/M
+brawl/MRDSGZ
+brawniness/SM
+brawn/MS
+brawny/TRP
+brayer/M
+Bray/M
+bray/SDRG
+braze/GZDSR
+brazenness/MS
+brazen/PYDSG
+brazer/M
+brazier/SM
+Brazilian/MS
+Brazil/M
+Brazos/M
+Brazzaville/M
+breacher/M
+breach/MDRSGZ
+breadbasket/SM
+breadboard/SMDG
+breadbox/S
+breadcrumb/S
+breadfruit/MS
+breadline/MS
+bread/SMDHG
+breadth/M
+breadths
+breadwinner/MS
+breakables
+breakable/U
+breakage/MS
+breakaway/MS
+breakdown/MS
+breaker/M
+breakfaster/M
+breakfast/RDMGZS
+breakfront/S
+breaking/M
+breakneck
+breakout/MS
+breakpoint/SMDG
+break/SZRBG
+breakthroughs
+breakthrough/SM
+breakup/SM
+breakwater/SM
+bream/SDG
+Breanne/M
+Brear/M
+breastbone/MS
+breastfed
+breastfeed/G
+breasting/M
+breast/MDSG
+breastplate/SM
+breaststroke/SM
+breastwork/MS
+breathable/U
+breathalyser/S
+Breathalyzer/SM
+breathe
+breather/M
+breathing/M
+breathlessness/SM
+breathless/PY
+breaths
+breathtaking/Y
+breathy/TR
+breath/ZBJMDRSG
+Brecht/M
+Breckenridge/M
+bred/DG
+bredes
+breeching/M
+breech/MDSG
+breeder/I
+breeder's
+breeding/IM
+breeds/I
+breed/SZJRG
+Bree/M
+Breena/M
+breeze/GMSD
+breezeway/SM
+breezily
+breeziness/SM
+breezy/RPT
+Bremen/M
+bremsstrahlung/M
+Brena/M
+Brenda/M
+Brendan/M
+Brenden/M
+Brendin/M
+Brendis/M
+Brendon/M
+Bren/M
+Brenna/M
+Brennan/M
+Brennen/M
+Brenner/M
+Brenn/RNM
+Brent/M
+Brenton/M
+Bresenham/M
+Brest/M
+brethren
+Bret/M
+Breton
+Brett/M
+breve/SM
+brevet/MS
+brevetted
+brevetting
+breviary/SM
+brevity/MS
+brew/DRGZS
+brewer/M
+Brewer/M
+brewery/MS
+brewing/M
+brewpub/S
+Brew/RM
+Brewster/M
+Brezhnev/M
+Bria/M
+Briana/M
+Brian/M
+Brianna/M
+Brianne/M
+Briano/M
+Briant/M
+briar's
+bribe/GZDSR
+briber/M
+bribery/MS
+Brice/M
+brickbat/SM
+brick/GRDSM
+bricklayer/MS
+bricklaying/SM
+brickmason/S
+brickwork/SM
+brickyard/M
+bridal/S
+Bridalveil/M
+bridegroom/MS
+Bride/M
+bride/MS
+bridesmaid/MS
+Bridewell/M
+bridgeable/U
+bridged/U
+bridgehead/MS
+Bridgeport/M
+Bridger/M
+Bridges
+bridge/SDGM
+Bridget/M
+Bridgetown/M
+Bridgette/M
+Bridgett/M
+Bridgewater/M
+bridgework/MS
+bridging/M
+Bridgman/M
+Bridie/M
+bridled/U
+bridle/SDGM
+bridleway/S
+briefcase/SM
+briefed/C
+briefing/M
+briefness/MS
+briefs/C
+brief/YRDJPGTS
+Brien/M
+Brier/M
+brier/MS
+Brie/RSM
+Brietta/M
+brigade/GDSM
+brigadier/MS
+Brigadoon
+brigandage/MS
+brigand/MS
+brigantine/MS
+Brigg/MS
+Brigham/M
+brightener/M
+brighten/RDZG
+bright/GXTPSYNR
+Bright/M
+brightness/SM
+Brighton/M
+Brigida/M
+Brigid/M
+Brigit/M
+Brigitta/M
+Brigitte/M
+Brig/M
+brig/SM
+brilliance/MS
+brilliancy/MS
+brilliantine/MS
+brilliantness/M
+brilliant/PSY
+Brillo
+Brillouin/M
+brimful
+brimless
+brimmed
+brimming
+brim/SM
+brimstone/MS
+Brina/M
+Brindisi/M
+brindle/DSM
+brine/GMDSR
+briner/M
+Briney/M
+bringer/M
+bring/RGZS
+brininess/MS
+Brinkley/M
+brinkmanship/SM
+brink/MS
+Brinna/M
+Brinn/M
+Briny/M
+briny/PTSR
+brioche/SM
+Brion/M
+briquet's
+briquette/MGSD
+Brisbane/M
+brisket/SM
+briskness/MS
+brisk/YRDPGTS
+bristle/DSGM
+bristly/TR
+Bristol/M
+bristol/S
+Britain/M
+Brita/M
+Britannia/M
+Britannic
+Britannica/M
+britches
+Briticism/MS
+Britisher/M
+Britishly/M
+British/RYZ
+Brit/MS
+Britney/M
+Britni/M
+Briton/MS
+Britta/M
+Brittaney/M
+Brittani/M
+Brittan/M
+Brittany/MS
+Britte/M
+Britten/M
+Britteny/M
+brittleness/MS
+brittle/YTPDRSG
+Britt/MN
+Brittne/M
+Brittney/M
+Brittni/M
+Brnaba/M
+Brnaby/M
+Brno/M
+broach/DRSG
+broacher/M
+broadband
+broadcaster/M
+broadcast/RSGZJ
+broadcasts/A
+broadcloth/M
+broadcloths
+broaden/JGRDZ
+broadleaved
+broadloom/SM
+broadminded/P
+broadness/S
+broadsheet/MS
+broadside/SDGM
+broadsword/MS
+broad/TXSYRNP
+Broadway/SM
+Brobdingnagian
+Brobdingnag/M
+brocade/DSGM
+broccoli/MS
+brochette/SM
+brochure/SM
+Brockie/M
+Brock/M
+Brocky/M
+Broddie/M
+Broddy/M
+Broderick/M
+Broderic/M
+Brodie/M
+Brod/M
+Brody/M
+brogan/MS
+Broglie/M
+brogue/MS
+broiler/M
+broil/RDSGZ
+brokenhearted/Y
+brokenness/MS
+broken/YP
+brokerage/MS
+broker/DMG
+broke/RGZ
+Brok/M
+bromide/MS
+bromidic
+bromine/MS
+bronchial
+bronchi/M
+bronchiolar
+bronchiole/MS
+bronchiolitis
+bronchitic/S
+bronchitis/MS
+broncho's
+bronchus/M
+broncobuster/SM
+bronco/SM
+bronc/S
+Bron/M
+Bronnie/M
+Bronny/M
+Bronson/M
+Bronte
+brontosaur/SM
+brontosaurus/SM
+Bronx/M
+bronzed/M
+bronze/SRDGM
+bronzing/M
+brooch/MS
+brooder/M
+broodiness/M
+brooding/Y
+broodmare/SM
+brood/SMRDGZ
+broody/PTR
+Brookdale/M
+Brooke/M
+Brookfield/M
+Brookhaven/M
+brooklet/MS
+Brooklyn/M
+Brookmont/M
+brook/SGDM
+brookside
+Brook/SM
+broom/SMDG
+broomstick/MS
+Bros
+Brose/M
+bro/SH
+bros/S
+brothel/MS
+brother/DYMG
+brotherhood/SM
+brotherliness/MS
+brotherly/P
+broths
+broth/ZMR
+brougham/MS
+brought
+brouhaha/MS
+browbeat/NSG
+brow/MS
+Brownell/M
+Browne/M
+Brownian/M
+Brownie/MS
+brownie/MTRS
+browning/M
+Browning/M
+brownish
+Brown/MG
+brownness/MS
+brownout/MS
+brownstone/MS
+Brownsville/M
+brown/YRDMSJGTP
+browse
+browser/M
+brows/SRDGZ
+brr
+Br/TMN
+Brubeck/M
+brucellosis/M
+Bruce/M
+Brucie/M
+Bruckner/M
+Bruegel/M
+Brueghel's
+bruin/MS
+bruised/U
+bruise/JGSRDZ
+bruiser/M
+Bruis/M
+bruit/DSG
+Brumidi/M
+Brummel/M
+brunch/MDSG
+Brunei/M
+Brunelleschi/M
+brunet/S
+brunette/SM
+Brunhilda/M
+Brunhilde/M
+Bruno/M
+Brunswick/M
+brunt/GSMD
+brusher/M
+brushfire/MS
+brushlike
+brush/MSRDG
+brushoff/S
+brushwood/SM
+brushwork/MS
+brushy/R
+brusqueness/MS
+brusque/PYTR
+Brussels
+brutality/SM
+brutalization/SM
+brutalized/U
+brutalizes/AU
+brutalize/SDG
+brutal/Y
+brute/DSRGM
+brutishness/SM
+brutish/YP
+Brutus/M
+Bruxelles/M
+Bryana/M
+Bryan/M
+Bryant/M
+Bryanty/M
+Bryce/M
+Bryna/M
+Bryn/M
+Brynna/M
+Brynne/M
+Brynner/M
+Brynn/RM
+Bryon/M
+Brzezinski/M
+B's
+BS
+BSA
+BSD
+Btu
+BTU
+BTW
+bu
+bubblegum/S
+bubbler/M
+bubble/RSDGM
+bubbly/TRS
+Buber/M
+bub/MS
+buboes
+bubo/M
+bubonic
+buccaneer/GMDS
+Buchanan/M
+Bucharest/M
+Buchenwald/M
+Buchwald/M
+buckaroo/SM
+buckboard/SM
+bucker/M
+bucketful/MS
+bucket/SGMD
+buckeye/SM
+buck/GSDRM
+buckhorn/M
+Buckie/M
+Buckingham/M
+buckled/U
+buckler/MDG
+buckle/RSDGMZ
+buckles/U
+Buckley/M
+buckling's
+buckling/U
+Buck/M
+Buckner/M
+buckram/GSDM
+bucksaw/SM
+buckshot/MS
+buckskin/SM
+buckteeth
+bucktooth/DM
+buckwheat/SM
+Bucky/M
+bucolically
+bucolic/S
+Budapest/M
+budded
+Buddha/MS
+Buddhism/SM
+Buddhist/SM
+Buddie/M
+budding/S
+Budd/M
+buddy/GSDM
+Buddy/M
+budge/GDS
+budgerigar/MS
+budgetary
+budgeter/M
+budget/GMRDZS
+budgie/MS
+budging/U
+Bud/M
+bud/MS
+Budweiser/MS
+Buehring/M
+Buena/M
+buffaloes
+Buffalo/M
+buffalo/MDG
+buff/ASGD
+buffered/U
+bufferer/M
+buffer/RDMSGZ
+buffet/GMDJS
+bufflehead/M
+buffoonery/MS
+buffoonish
+buffoon/SM
+buff's
+Buffy/M
+Buford/M
+bugaboo/SM
+Bugatti/M
+bugbear/SM
+bug/CS
+bugeyed
+bugged/C
+buggered
+buggering
+bugger/SCM!
+buggery/M
+bugging/C
+buggy/RSMT
+bugle/GMDSRZ
+bugler/M
+bug's
+Buick/M
+builder/SM
+building/SM
+build/SAG
+buildup/MS
+built/AUI
+Buiron/M
+Bujumbura/M
+Bukhara/M
+Bukharin/M
+Bulawayo/M
+Bulba/M
+bulb/DMGS
+bulblet
+bulbous
+Bulfinch/M
+Bulganin/M
+Bulgaria/M
+Bulgarian/S
+bulge/DSGM
+bulgy/RT
+bulimarexia/S
+bulimia/MS
+bulimic/S
+bulk/GDRMS
+bulkhead/SDM
+bulkiness/SM
+bulky/RPT
+bulldogged
+bulldogger
+bulldogging
+bulldog/SM
+bulldoze/GRSDZ
+bulldozer/M
+bullet/GMDS
+bulletin/SGMD
+bulletproof/SGD
+bullfighter/M
+bullfighting/M
+bullfight/SJGZMR
+bullfinch/MS
+bullfrog/SM
+bullhead/DMS
+bullheadedness/SM
+bullheaded/YP
+bullhide
+bullhorn/SM
+bullied/M
+bullion/SM
+bullishness/SM
+bullish/PY
+bull/MDGS
+Bullock/M
+bullock/MS
+bullpen/MS
+bullring/SM
+bullseye
+bullshit/MS!
+bullshitted/!
+bullshitter/S!
+bullshitting/!
+bullwhackers
+Bullwinkle/M
+bullyboy/MS
+bullying/M
+bully/TRSDGM
+bulrush/SM
+Bultmann/M
+bulwark/GMDS
+bumblebee/MS
+bumble/JGZRSD
+bumbler/M
+bumbling/Y
+Bumbry/M
+bummed/M
+bummer/MS
+bummest
+bumming/M
+bumper/DMG
+bump/GZDRS
+bumpiness/MS
+bumpkin/MS
+Bumppo/M
+bumptiousness/SM
+bumptious/PY
+bumpy/PRT
+bum/SM
+Bunche/M
+bunch/MSDG
+bunchy/RT
+buncombe's
+bunco's
+Bundestag/M
+bundled/U
+bundle/GMRSD
+bundler/M
+Bundy/M
+bungalow/MS
+bungee/SM
+bung/GDMS
+bunghole/MS
+bungle/GZRSD
+bungler/M
+bungling/Y
+Bunin/M
+bunion/SM
+bunk/CSGDR
+Bunker/M
+bunker's/C
+bunker/SDMG
+bunkhouse/SM
+bunkmate/MS
+bunko's
+bunk's
+bunkum/SM
+Bunnie/M
+Bunni/M
+Bunny/M
+bunny/SM
+Bunsen/SM
+bun/SM
+bunt/GJZDRS
+bunting/M
+Buuel/M
+Bunyan/M
+buoyancy/MS
+buoyant/Y
+buoy/SMDG
+Burbank/M
+burbler/M
+burble/RSDG
+burbs
+Burch/M
+burden's
+burdensomeness/M
+burdensome/PY
+burden/UGDS
+burdock/SM
+bureaucracy/MS
+bureaucratically
+bureaucratic/U
+bureaucratization/MS
+bureaucratize/SDG
+bureaucrat/MS
+bureau/MS
+burgeon/GDS
+burger/M
+Burger/M
+Burgess/M
+burgess/MS
+burgher/M
+burgh/MRZ
+burghs
+burglarize/GDS
+burglarproof/DGS
+burglar/SM
+burglary/MS
+burgle/SDG
+burgomaster/SM
+Burgoyne/M
+Burg/RM
+burg/SZRM
+Burgundian/S
+Burgundy/MS
+burgundy/S
+burial/ASM
+buried/U
+burier/M
+Burke/M
+Burk/SM
+burlap/MS
+burler/M
+burlesquer/M
+burlesque/SRDMYG
+burley/M
+Burlie/M
+burliness/SM
+Burlingame/M
+Burlington/M
+Burl/M
+burl/SMDRG
+burly/PRT
+Burma/M
+Burmese
+bur/MYS
+burnable/S
+Burnaby/M
+Burnard/M
+burned/U
+Burne/MS
+burner/M
+Burnett/M
+burn/GZSDRBJ
+burning/Y
+burnisher/M
+burnish/GDRSZ
+burnoose/MS
+burnout/MS
+Burns
+Burnside/MS
+burnt/YP
+burp/SGMD
+burr/GSDRM
+Burris/M
+burrito/S
+Burr/M
+burro/SM
+Burroughs/M
+burrower/M
+burrow/GRDMZS
+bursae
+bursa/M
+Bursa/M
+bursar/MS
+bursary/MS
+bursitis/MS
+burster/M
+burst/SRG
+Burtie/M
+Burt/M
+Burton/M
+Burty/M
+Burundian/S
+Burundi/M
+bury/ASDG
+busboy/MS
+busby/SM
+Busch/M
+buses/A
+busgirl/S
+bus/GMDSJ
+bushel/MDJSG
+Bushido/M
+bushiness/MS
+bushing/M
+bush/JMDSRG
+bushland
+Bush/M
+bushman/M
+bushmaster/SM
+bushmen
+Bushnell/M
+bushwhacker/M
+bushwhacking/M
+bushwhack/RDGSZ
+bushy/PTR
+busily
+businesslike
+businessman/M
+businessmen
+business/MS
+businesspeople
+businessperson/S
+businesswoman/M
+businesswomen
+busker/M
+busk/GRM
+buskin/SM
+bus's/A
+buss/D
+bustard/MS
+buster/M
+bustle/GSD
+bustling/Y
+bust/MSDRGZ
+busty/RT
+busybody/MS
+busy/DSRPTG
+busyness/MS
+busywork/SM
+but/ACS
+butane/MS
+butcherer/M
+butcher/MDRYG
+butchery/MS
+Butch/M
+butch/RSZ
+butene/M
+Butler/M
+butler/SDMG
+butted/A
+butte/MS
+butterball/MS
+buttercup/SM
+buttered/U
+butterfat/MS
+Butterfield/M
+butterfingered
+butterfingers/M
+butterfly/MGSD
+buttermilk/MS
+butternut/MS
+butter/RDMGZ
+butterscotch/SM
+buttery/TRS
+butting/M
+buttock/SGMD
+buttoner/M
+buttonhole/GMRSD
+buttonholer/M
+button's
+button/SUDG
+buttonweed
+buttonwood/SM
+buttress/MSDG
+butt/SGZMDR
+butyl/M
+butyrate/M
+buxomness/M
+buxom/TPYR
+Buxtehude/M
+buyback/S
+buyer/M
+buyout/S
+buy/ZGRS
+buzzard/MS
+buzz/DSRMGZ
+buzzer/M
+buzzword/SM
+buzzy
+bx
+bxs
+byelaw's
+Byelorussia's
+bye/MZS
+Byers/M
+bygone/S
+bylaw/SM
+byliner/M
+byline/RSDGM
+BYOB
+bypass/GSDM
+bypath/M
+bypaths
+byplay/S
+byproduct/SM
+Byram/M
+Byran/M
+Byrann/M
+Byrd/M
+byre/SM
+Byrle/M
+Byrne/M
+byroad/MS
+Byrom/M
+Byronic
+Byronism/M
+Byron/M
+bystander/SM
+byte/SM
+byway/SM
+byword/SM
+byzantine
+Byzantine/S
+Byzantium/M
+by/ZR
+C
+ca
+CA
+cabala/MS
+caballed
+caballero/SM
+caballing
+cabal/SM
+cabana/MS
+cabaret/SM
+cabbage/MGSD
+cabbed
+cabbing
+cabby's
+cabdriver/SM
+caber/M
+Cabernet/M
+cabinetmaker/SM
+cabinetmaking/MS
+cabinet/MS
+cabinetry/SM
+cabinetwork/MS
+cabin/GDMS
+cablecast/SG
+cable/GMDS
+cablegram/SM
+cabochon/MS
+caboodle/SM
+caboose/MS
+Cabot/M
+Cabrera/M
+Cabrini/M
+cabriolet/MS
+cab/SMR
+cabstand/MS
+cacao/SM
+cacciatore
+cache/DSRGM
+cachepot/MS
+cachet/MDGS
+Cacilia/M
+Cacilie/M
+cackler/M
+cackle/RSDGZ
+cackly
+CACM
+cacophonist
+cacophonous
+cacophony/SM
+cacti
+cactus/M
+CAD
+cadaverous/Y
+cadaver/SM
+caddishness/SM
+caddish/PY
+Caddric/M
+caddy/GSDM
+cadence/CSM
+cadenced
+cadencing
+cadent/C
+cadenza/MS
+cadet/SM
+Cadette/S
+cadge/DSRGZ
+cadger/M
+Cadillac/MS
+Cadiz/M
+Cad/M
+cadmium/MS
+cadre/SM
+cad/SM
+caducei
+caduceus/M
+Caedmon/M
+Caesar/MS
+caesura/SM
+caf/MS
+cafeteria/SM
+caffeine/SM
+caftan/SM
+caged/U
+Cage/M
+cage/MZGDRS
+cager/M
+cagey/P
+cagier
+cagiest
+cagily
+caginess/MS
+Cagney/M
+Cahokia/M
+cahoot/MS
+Cahra/M
+CAI
+Caiaphas/M
+caiman's
+Caine/M
+Cain/MS
+Cairistiona/M
+cairn/SDM
+Cairo/M
+caisson/SM
+caitiff/MS
+Caitlin/M
+Caitrin/M
+cajole/LGZRSD
+cajolement/MS
+cajoler/M
+cajolery/SM
+Cajun/MS
+cake/MGDS
+cakewalk/SMDG
+calabash/SM
+calaboose/MS
+Calais/M
+calamari/S
+calamine/GSDM
+calamitousness/M
+calamitous/YP
+calamity/MS
+cal/C
+calcareousness/M
+calcareous/PY
+calciferous
+calcification/M
+calcify/XGNSD
+calcimine/GMSD
+calcine/SDG
+calcite/SM
+calcium/SM
+Calcomp/M
+CalComp/M
+CALCOMP/M
+calculability/IM
+calculable/IP
+calculate/AXNGDS
+calculated/PY
+calculatingly
+calculating/U
+calculation/AM
+calculative
+calculator/SM
+calculi
+calculus/M
+Calcutta/M
+caldera/SM
+Calder/M
+Calderon/M
+caldron's
+Caldwell/M
+Caleb/M
+Caledonia/M
+Cale/M
+calendar/MDGS
+calender/MDGS
+calf/M
+calfskin/SM
+Calgary/M
+Calhoun/M
+Caliban/M
+caliber/SM
+calibrated/U
+calibrater's
+calibrate/XNGSD
+calibrating/A
+calibration/M
+calibrator/MS
+calicoes
+calico/M
+Calida/M
+Calif/M
+California/M
+Californian/MS
+californium/SM
+calif's
+Caligula/M
+Cali/M
+caliper/SDMG
+caliphate/SM
+caliph/M
+caliphs
+calisthenic/S
+calisthenics/M
+Callaghan/M
+call/AGRDBS
+Callahan/M
+calla/MS
+Calla/MS
+Callao/M
+callback/S
+Callean/M
+called/U
+callee/M
+caller/MS
+Calley/M
+Callida/M
+Callie/M
+calligrapher/M
+calligraphic
+calligraphist/MS
+calligraph/RZ
+calligraphy/MS
+Calli/M
+calling/SM
+Calliope/M
+calliope/SM
+callisthenics's
+Callisto/M
+callosity/MS
+callousness/SM
+callous/PGSDY
+callowness/MS
+callow/RTSP
+callus/SDMG
+Cally/M
+calming/Y
+calmness/MS
+calm/PGTYDRS
+Cal/MY
+Caloocan/M
+caloric/S
+calorie/SM
+calorific
+calorimeter/MS
+calorimetric
+calorimetry/M
+Caltech/M
+Calumet/M
+calumet/MS
+calumniate/NGSDX
+calumniation/M
+calumniator/SM
+calumnious
+calumny/MS
+calvary/M
+Calvary/M
+calve/GDS
+Calvert/M
+calves/M
+Calvinism/MS
+Calvinistic
+Calvinist/MS
+Calvin/M
+Calv/M
+calyces's
+Calypso/M
+calypso/SM
+calyx/MS
+Ca/M
+CAM
+Camacho/M
+Camala/M
+camaraderie/SM
+camber/DMSG
+cambial
+cambium/SM
+Cambodia/M
+Cambodian/S
+Cambrian/S
+cambric/MS
+Cambridge/M
+camcorder/S
+Camden/M
+camelhair's
+Camella/M
+Camellia/M
+camellia/MS
+Camel/M
+Camelopardalis/M
+Camelot/M
+camel/SM
+Camembert/MS
+cameo/GSDM
+camerae
+cameraman/M
+cameramen
+camera/MS
+camerawoman
+camerawomen
+Cameron/M
+Cameroonian/S
+Cameroon/SM
+came/N
+Camey/M
+Camila/M
+Camile/M
+Camilla/M
+Camille/M
+Cami/M
+Camino/M
+camion/M
+camisole/MS
+Cam/M
+cammed
+Cammie/M
+Cammi/M
+cam/MS
+Cammy/M
+Camoens/M
+camomile's
+camouflage/DRSGZM
+camouflager/M
+campaigner/M
+campaign/ZMRDSG
+campanile/SM
+campanological
+campanologist/SM
+campanology/MS
+Campbell/M
+Campbellsport/M
+camper/SM
+campesinos
+campest
+campfire/SM
+campground/MS
+camphor/MS
+Campinas/M
+camping/S
+Campos
+camp's
+camp/SCGD
+campsite/MS
+campus/GSDM
+campy/RT
+Camry/M
+camshaft/SM
+Camus/M
+Canaanite/SM
+Canaan/M
+Canada/M
+Canadianism/SM
+Canadian/S
+Canad/M
+Canaletto/M
+canalization/MS
+canalize/GSD
+canal/SGMD
+canap/S
+canard/MS
+Canaries
+canary/SM
+canasta/SM
+Canaveral/M
+Canberra/M
+cancan/SM
+cancelate/D
+canceled/U
+canceler/M
+cancellation/MS
+cancel/RDZGS
+cancer/MS
+Cancer/MS
+cancerous/Y
+Cancun/M
+Candace/M
+candelabra/S
+candelabrum/M
+Candice/M
+candidacy/MS
+Candida/M
+candidate/SM
+candidature/S
+Candide/M
+candidly/U
+candidness/SM
+candid/TRYPS
+Candie/M
+Candi/SM
+candle/GMZRSD
+candlelight/SMR
+candlelit
+candlepower/SM
+candler/M
+candlestick/SM
+Candlewick/M
+candlewick/MS
+candor/MS
+Candra/M
+candy/GSDM
+Candy/M
+canebrake/SM
+caner/M
+cane/SM
+canine/S
+caning/M
+Canis/M
+canister/SGMD
+cankerous
+canker/SDMG
+Can/M
+can/MDRSZGJ
+cannabis/MS
+canned
+cannelloni
+canner/SM
+cannery/MS
+Cannes
+cannibalism/MS
+cannibalistic
+cannibalization/SM
+cannibalize/GSD
+cannibal/SM
+cannily/U
+canninesses
+canniness/UM
+canning/M
+cannister/SM
+cannonade/SDGM
+cannonball/SGDM
+Cannon/M
+cannon/SDMG
+cannot
+canny/RPUT
+canoe/DSGM
+canoeist/SM
+Canoga/M
+canonic
+canonicalization
+canonicalize/GSD
+canonical/SY
+canonist/M
+canonization/MS
+canonized/U
+canonize/SDG
+canon/SM
+Canopus/M
+canopy/GSDM
+canst
+can't
+cantabile/S
+Cantabrigian
+cantaloupe/MS
+cantankerousness/SM
+cantankerous/PY
+cantata/SM
+cant/CZGSRD
+canted/IA
+canteen/MS
+Canterbury/M
+canter/CM
+cantered
+cantering
+canticle/SM
+cantilever/SDMG
+canto/MS
+cantonal
+Cantonese/M
+Canton/M
+cantonment/SM
+canton/MGSLD
+Cantor/M
+cantor/MS
+Cantrell/M
+cant's
+cants/A
+Cantu/M
+Canute/M
+canvasback/MS
+canvas/RSDMG
+canvasser/M
+canvass/RSDZG
+canyon/MS
+CAP
+capability/ISM
+capableness/IM
+capable/PI
+capabler
+capablest
+capably/I
+capaciousness/MS
+capacious/PY
+capacitance/SM
+capacitate/V
+capacitive/Y
+capacitor/MS
+capacity/IMS
+caparison/SDMG
+Capek/M
+Capella/M
+caper/GDM
+capeskin/SM
+cape/SM
+Capet/M
+Capetown/M
+Caph/M
+capillarity/MS
+capillary/S
+Capistrano/M
+capitalism/SM
+capitalistic
+capitalistically
+capitalist/SM
+capitalization/SMA
+capitalized/AU
+capitalizer/M
+capitalize/RSDGZ
+capitalizes/A
+capital/SMY
+capita/M
+Capitan/M
+capitation/CSM
+Capitoline/M
+Capitol/MS
+capitol/SM
+capitulate/AXNGSD
+capitulation/MA
+caplet/S
+cap/MDRSZB
+Capone/M
+capon/SM
+capo/SM
+Capote/M
+capped/UA
+capping/M
+cappuccino/MS
+Cappy/M
+Capra/M
+Caprice/M
+caprice/MS
+capriciousness/MS
+capricious/PY
+Capricorn/MS
+Capri/M
+caps/AU
+capsicum/MS
+capsize/SDG
+capstan/MS
+capstone/MS
+capsular
+capsule/MGSD
+capsulize/GSD
+captaincy/MS
+captain/SGDM
+caption/GSDRM
+captiousness/SM
+captious/PY
+captivate/XGNSD
+captivation/M
+captivator/SM
+captive/MS
+captivity/SM
+Capt/M
+captor/SM
+capture/AGSD
+capturer/MS
+capt/V
+Capulet/M
+Caputo/M
+Caracalla/M
+Caracas/M
+caracul's
+carafe/SM
+Caralie/M
+Cara/M
+caramelize/SDG
+caramel/MS
+carapace/SM
+carapaxes
+carat/SM
+Caravaggio/M
+caravan/DRMGS
+caravaner/M
+caravansary/MS
+caravanserai's
+caravel/MS
+caraway/MS
+carbide/MS
+carbine/MS
+carbohydrate/MS
+carbolic
+Carboloy/M
+carbonaceous
+carbonate/SDXMNG
+carbonation/M
+Carbondale/M
+Carbone/MS
+carbonic
+carboniferous
+Carboniferous
+carbonization/SAM
+carbonizer/AS
+carbonizer's
+carbonizes/A
+carbonize/ZGRSD
+carbon/MS
+carbonyl/M
+carborundum
+Carborundum/MS
+carboy/MS
+carbuncle/SDM
+carbuncular
+carburetor/MS
+carburetter/S
+carburettor/SM
+carcase/MS
+carcass/SM
+Carce/M
+carcinogenic
+carcinogenicity/MS
+carcinogen/SM
+carcinoma/SM
+cardamom/MS
+cardboard/MS
+card/EDRSG
+Cardenas/M
+carder/MS
+carder's/E
+cardholders
+cardiac/S
+Cardiff/M
+cardigan/SM
+cardinality/SM
+cardinal/SYM
+carding/M
+Cardin/M
+Cardiod/M
+cardiogram/MS
+cardiograph/M
+cardiographs
+cardioid/M
+cardiologist/SM
+cardiology/MS
+cardiomegaly/M
+cardiopulmonary
+cardiovascular
+card's
+cardsharp/ZSMR
+CARE
+cared/U
+careen/DSG
+careerism/M
+careerist/MS
+career/SGRDM
+carefree
+carefuller
+carefullest
+carefulness/MS
+careful/PY
+caregiver/S
+carelessness/MS
+careless/YP
+Care/M
+Carena/M
+Caren/M
+carer/M
+care/S
+Caresa/M
+Caressa/M
+Caresse/M
+caresser/M
+caressing/Y
+caressive/Y
+caress/SRDMVG
+caretaker/SM
+caret/SM
+careworn
+Carey/M
+carfare/MS
+cargoes
+cargo/M
+carhopped
+carhopping
+carhop/SM
+Caria/M
+Caribbean/S
+Carib/M
+caribou/MS
+caricature/GMSD
+caricaturisation
+caricaturist/MS
+caricaturization
+Carie/M
+caries/M
+carillonned
+carillonning
+carillon/SM
+Caril/M
+Carilyn/M
+Cari/M
+Carina/M
+Carine/M
+caring/U
+Carin/M
+Cariotta/M
+carious
+Carissa/M
+Carita/M
+Caritta/M
+carjack/GSJDRZ
+Carla/M
+Carlee/M
+Carleen/M
+Carlene/M
+Carlen/M
+Carletonian/M
+Carleton/M
+Carley/M
+Carlie/M
+Carlina/M
+Carline/M
+Carling/M
+Carlin/M
+Carlita/M
+Carl/MNG
+carload/MSG
+Carlo/SM
+Carlota/M
+Carlotta/M
+Carlsbad/M
+Carlson/M
+Carlton/M
+Carlye/M
+Carlyle/M
+Carly/M
+Carlyn/M
+Carlynne/M
+Carlynn/M
+Carma/M
+Carmela/M
+Carmelia/M
+Carmelina/M
+Carmelita/M
+Carmella/M
+Carmelle/M
+Carmel/M
+Carmelo/M
+Carmencita/M
+Carmen/M
+Carmichael/M
+Carmina/M
+Carmine/M
+carmine/MS
+Carmita/M
+Car/MNY
+Carmon/M
+carnage/MS
+carnality/SM
+carnal/Y
+Carnap/M
+carnation/IMS
+Carnegie/M
+carnelian/SM
+Carney/M
+carney's
+carnival/MS
+carnivore/SM
+carnivorousness/MS
+carnivorous/YP
+Carnot/M
+Carny/M
+carny/SDG
+carob/SM
+Carola/M
+Carolan/M
+Carolann/M
+Carolee/M
+Carole/M
+caroler/M
+Carolina/MS
+Caroline/M
+Carolingian
+Carolinian/S
+Carolin/M
+Caroljean/M
+Carol/M
+carol/SGZMRD
+Carolus/M
+Carolyne/M
+Carolyn/M
+Carolynn/M
+Caro/M
+carom/GSMD
+Caron/M
+carotene/MS
+carotid/MS
+carousal/MS
+carousel/MS
+carouser/M
+carouse/SRDZG
+carpal/SM
+Carpathian/MS
+carpel/SM
+carpenter/DSMG
+carpentering/M
+Carpenter/M
+carpentry/MS
+carper/M
+carpetbagged
+carpetbagger/MS
+carpetbagging
+carpetbag/MS
+carpeting/M
+carpet/MDJGS
+carpi/M
+carping/Y
+carp/MDRSGZ
+carpool/DGS
+carport/MS
+carpus/M
+carrageen/M
+Carree/M
+carrel/SM
+carriage/SM
+carriageway/SM
+Carrie/M
+carrier/M
+Carrier/M
+Carrillo/M
+Carri/M
+carrion/SM
+Carrissa/M
+Carr/M
+Carroll/M
+Carrol/M
+carrot/MS
+carroty/RT
+carrousel's
+carryall/MS
+Carry/MR
+carryout/S
+carryover/S
+carry/RSDZG
+carsickness/SM
+carsick/P
+Carson/M
+cartage/MS
+cartel/SM
+carte/M
+carter/M
+Carter/M
+Cartesian
+Carthage/M
+Carthaginian/S
+carthorse/MS
+Cartier/M
+cartilage/MS
+cartilaginous
+cartload/MS
+cart/MDRGSZ
+Cart/MR
+cartographer/MS
+cartographic
+cartography/MS
+carton/GSDM
+cartoon/GSDM
+cartoonist/MS
+cartridge/SM
+cartwheel/MRDGS
+Cartwright/M
+Carty/RM
+Caruso/M
+carve/DSRJGZ
+carven
+carver/M
+Carver/M
+carving/M
+caryatid/MS
+Caryl/M
+Cary/M
+Caryn/M
+car/ZGSMDR
+casaba/SM
+Casablanca/M
+Casals/M
+Casandra/M
+Casanova/SM
+Casar/M
+casbah/M
+cascade/MSDG
+Cascades/M
+cascara/MS
+casebook/SM
+case/DSJMGL
+cased/U
+caseharden/SGD
+casein/SM
+caseload/MS
+Case/M
+casement/SM
+caseworker/M
+casework/ZMRS
+Casey/M
+cashbook/SM
+cashew/MS
+cash/GZMDSR
+cashier/SDMG
+cashless
+Cash/M
+cashmere/MS
+Casie/M
+Casi/M
+casing/M
+casino/MS
+casket/SGMD
+cask/GSDM
+Caspar/M
+Casper/M
+Caspian
+Cass
+Cassandra/SM
+Cassandre/M
+Cassandry/M
+Cassatt/M
+Cassaundra/M
+cassava/MS
+casserole/MGSD
+cassette/SM
+Cassey/M
+cassia/MS
+Cassie/M
+Cassi/M
+cassino's
+Cassiopeia/M
+Cassite/M
+Cassius/M
+cassock/SDM
+Cassondra/M
+cassowary/SM
+Cassy/M
+Castaneda/M
+castanet/SM
+castaway/SM
+castellated
+caste/MHS
+caster/M
+cast/GZSJMDR
+castigate/XGNSD
+castigation/M
+castigator/SM
+Castile's
+Castillo/M
+casting/M
+castle/GMSD
+castoff/S
+Castor/M
+castor's
+castrate/DSNGX
+castration/M
+Castries/M
+Castro/M
+casts/A
+casualness/SM
+casual/SYP
+casualty/SM
+casuistic
+casuist/MS
+casuistry/SM
+cataclysmal
+cataclysmic
+cataclysm/MS
+catacomb/MS
+catafalque/SM
+Catalan/MS
+catalepsy/MS
+cataleptic/S
+Catalina/M
+cataloger/M
+catalog/SDRMZG
+Catalonia/M
+catalpa/SM
+catalysis/M
+catalyst/SM
+catalytic
+catalytically
+catalyze/DSG
+catamaran/MS
+catapult/MGSD
+cataract/MS
+Catarina/M
+catarrh/M
+catarrhs
+catastrophe/SM
+catastrophic
+catastrophically
+catatonia/MS
+catatonic/S
+Catawba/M
+catbird/MS
+catboat/SM
+catcall/SMDG
+catchable/U
+catchall/MS
+catch/BRSJLGZ
+catcher/M
+catchment/SM
+catchpenny/S
+catchphrase/S
+catchup/MS
+catchword/MS
+catchy/TR
+catechism/MS
+catechist/SM
+catechize/SDG
+catecholamine/MS
+categoric
+categorical/Y
+categorization/MS
+categorized/AU
+categorize/RSDGZ
+category/MS
+Cate/M
+catenate/NF
+catenation/MF
+catercorner
+caterer/M
+cater/GRDZ
+Caterina/M
+catering/M
+Caterpillar
+caterpillar/SM
+caterwaul/DSG
+catfish/MS
+catgut/SM
+Catha/M
+Catharina/M
+Catharine/M
+catharses
+catharsis/M
+cathartic/S
+Cathay/M
+cathedral/SM
+Cathee/M
+Catherina/M
+Catherine/M
+Catherin/M
+Cather/M
+Cathe/RM
+catheterize/GSD
+catheter/SM
+Cathie/M
+Cathi/M
+Cathleen/M
+Cathlene/M
+cathode/MS
+cathodic
+catholicism
+Catholicism/SM
+catholicity/MS
+catholic/MS
+Catholic/S
+Cathrine/M
+Cathrin/M
+Cathryn/M
+Cathyleen/M
+Cathy/M
+Catie/M
+Catiline/M
+Cati/M
+Catina/M
+cationic
+cation/MS
+catkin/SM
+Catlaina/M
+Catlee/M
+catlike
+Catlin/M
+catnapped
+catnapping
+catnap/SM
+catnip/MS
+Cato/M
+Catrina/M
+Catriona/M
+Catskill/SM
+cat/SMRZ
+catsup's
+cattail/SM
+catted
+cattery/M
+cattily
+cattiness/SM
+catting
+cattle/M
+cattleman/M
+cattlemen
+Catt/M
+catty/PRST
+Catullus/M
+CATV
+catwalk/MS
+Caty/M
+Caucasian/S
+Caucasoid/S
+Caucasus/M
+Cauchy/M
+caucus/SDMG
+caudal/Y
+caught/U
+cauldron/MS
+cauliflower/MS
+caulker/M
+caulk/JSGZRD
+causality/SM
+causal/YS
+causate/XVN
+causation/M
+causative/SY
+cause/DSRGMZ
+caused/U
+causeless
+causerie/MS
+causer/M
+causeway/SGDM
+caustically
+causticity/MS
+caustic/YS
+cauterization/SM
+cauterized/U
+cauterize/GSD
+cautionary
+cautioner/M
+caution/GJDRMSZ
+cautiousness's/I
+cautiousness/SM
+cautious/PIY
+cavalcade/MS
+cavalierness/M
+cavalier/SGYDP
+cavalryman/M
+cavalrymen
+cavalry/MS
+caveat/SM
+caveatted
+caveatting
+cave/GFRSD
+caveman/M
+cavemen
+Cavendish/M
+caver/M
+cavern/GSDM
+cavernous/Y
+cave's
+caviar/MS
+caviler/M
+cavil/SJRDGZ
+caving/MS
+cavity/MFS
+cavort/SDG
+Cavour/M
+caw/SMDG
+Caxton/M
+Caye/M
+Cayenne/M
+cayenne/SM
+Cayla/M
+Cayman/M
+cayman/SM
+cay's
+cay/SC
+Cayuga/M
+cayuse/SM
+Caz/M
+Cazzie/M
+c/B
+CB
+CBC
+Cb/M
+CBS
+cc
+Cchaddie/M
+CCTV
+CCU
+CD
+CDC/M
+Cd/M
+CDT
+Ce
+cease/DSCG
+ceasefire/S
+ceaselessness/SM
+ceaseless/YP
+ceasing/U
+Ceausescu/M
+Cebuano/M
+Cebu/M
+ceca
+cecal
+Cecelia/M
+Cece/M
+Cecile/M
+Ceciley/M
+Cecilia/M
+Cecilio/M
+Cecilius/M
+Cecilla/M
+Cecil/M
+Cecily/M
+cecum/M
+cedar/SM
+ceded/A
+cede/FRSDG
+ceder's/F
+ceder/SM
+cedes/A
+cedilla/SM
+ceding/A
+Ced/M
+Cedric/M
+ceilidh/M
+ceiling/MDS
+Ceil/M
+celandine/MS
+Celanese/M
+Celebes's
+celebrant/MS
+celebratedness/M
+celebrated/P
+celebrate/XSDGN
+celebration/M
+celebrator/MS
+celebratory
+celebrity/MS
+Cele/M
+Celene/M
+celerity/SM
+celery/SM
+Celesta/M
+celesta/SM
+Celeste/M
+celestial/YS
+Celestia/M
+Celestina/M
+Celestine/M
+Celestyna/M
+Celestyn/M
+Celia/M
+celibacy/MS
+celibate/SM
+Celie/M
+Celina/M
+Celinda/M
+Celine/M
+Celinka/M
+Celisse/M
+Celka/M
+cellarer/M
+cellar/RDMGS
+Celle/M
+cell/GMDS
+Cellini/M
+cellist/SM
+Cello/M
+cello/MS
+cellophane/SM
+cellphone/S
+cellular/SY
+cellulite/S
+celluloid/SM
+cellulose/SM
+Celsius/S
+Celtic/SM
+Celt/MS
+cementa
+cementer/M
+cementum/SM
+cement/ZGMRDS
+cemetery/MS
+cenobite/MS
+cenobitic
+cenotaph/M
+cenotaphs
+Cenozoic
+censer/MS
+censored/U
+censor/GDMS
+censorial
+censoriousness/MS
+censorious/YP
+censorship/MS
+censure/BRSDZMG
+censurer/M
+census/SDMG
+centaur/SM
+Centaurus/M
+centavo/SM
+centenarian/MS
+centenary/S
+centennial/YS
+center/AC
+centerboard/SM
+centered
+centerer/S
+centerfold/S
+centering/SM
+centerline/SM
+centerpiece/SM
+center's
+Centigrade
+centigrade/S
+centigram/SM
+centiliter/MS
+centime/SM
+centimeter/SM
+centipede/MS
+Centralia/M
+centralism/M
+centralist/M
+centrality/MS
+centralization/CAMS
+centralize/CGSD
+centralizer/SM
+centralizes/A
+central/STRY
+centrefold's
+Centrex
+CENTREX/M
+centric/F
+centrifugal/SY
+centrifugate/NM
+centrifugation/M
+centrifuge/GMSD
+centripetal/Y
+centrist/MS
+centroid/MS
+cent/SZMR
+centurion/MS
+century/MS
+CEO
+cephalic/S
+Cepheid
+Cepheus/M
+ceramicist/S
+ceramic/MS
+ceramist/MS
+cerate/MD
+Cerberus/M
+cereal/MS
+cerebellar
+cerebellum/MS
+cerebra
+cerebral/SY
+cerebrate/XSDGN
+cerebration/M
+cerebrum/MS
+cerement/SM
+ceremonial/YSP
+ceremoniousness/MS
+ceremoniousness's/U
+ceremonious/YUP
+ceremony/MS
+Cerenkov/M
+Ceres/M
+Cerf/M
+cerise/SM
+cerium/MS
+cermet/SM
+CERN/M
+certainer
+certainest
+certainty/UMS
+certain/UY
+cert/FS
+certifiable
+certifiably
+certificate/SDGM
+certification/AMC
+certified/U
+certifier/M
+certify/DRSZGNX
+certiorari/M
+certitude/ISM
+cerulean/MS
+Cervantes/M
+cervical
+cervices/M
+cervix/M
+Cesarean
+cesarean/S
+Cesare/M
+Cesar/M
+Cesaro/M
+cesium/MS
+cessation/SM
+cession/FAMSK
+Cessna/M
+cesspit/M
+cesspool/SM
+Cesya/M
+cetacean/S
+cetera/S
+Cetus/M
+Ceylonese
+Ceylon/M
+Cezanne/S
+cf
+CF
+CFC
+Cf/M
+CFO
+cg
+Chablis/SM
+Chaddie/M
+Chadd/M
+Chaddy/M
+Chadian/S
+Chad/M
+Chadwick/M
+chafe/GDSR
+chafer/M
+chaffer/DRG
+chafferer/M
+Chaffey/M
+chaff/GRDMS
+chaffinch/SM
+Chagall/M
+chagrin/DGMS
+Chaim/M
+chainlike
+chain's
+chainsaw/SGD
+chain/SGUD
+chairlady/M
+chairlift/MS
+chairman/MDGS
+chairmanship/MS
+chairmen
+chairperson/MS
+chair/SGDM
+chairwoman/M
+chairwomen
+chaise/SM
+chalcedony/MS
+Chaldea/M
+Chaldean/M
+chalet/SM
+chalice/DSM
+chalkboard/SM
+chalk/DSMG
+chalkiness/S
+chalkline
+chalky/RPT
+challenged/U
+challenger/M
+challenge/ZGSRD
+challenging/Y
+challis/SM
+Chalmers
+chamberer/M
+Chamberlain/M
+chamberlain/MS
+chambermaid/MS
+chamberpot/S
+Chambers/M
+chamber/SZGDRM
+chambray/MS
+chameleon/SM
+chamfer/DMGS
+chammy's
+chamois/DSMG
+chamomile/MS
+champagne/MS
+champaign/M
+champ/DGSZ
+champion/MDGS
+championship/MS
+Champlain/M
+chanced/M
+chance/GMRSD
+chancellery/SM
+chancellorship/SM
+chancellor/SM
+Chancellorsville/M
+chancel/SM
+Chance/M
+chancery/SM
+Chancey/M
+chanciness/S
+chancing/M
+chancre/SM
+chancy/RPT
+Chandal/M
+Chanda/M
+chandelier/SM
+Chandigarh/M
+Chandler/M
+chandler/MS
+Chandragupta/M
+Chandra/M
+Chandrasekhar/M
+Chandy/M
+Chanel/M
+Chane/M
+Chaney/M
+Changchun/M
+changeabilities
+changeability/UM
+changeableness/SM
+changeable/U
+changeably/U
+changed/U
+change/GZRSD
+changeless
+changeling/M
+changeover/SM
+changer/M
+changing/U
+Chang/M
+Changsha/M
+Chan/M
+Channa/M
+channeler/M
+channeling/M
+channelization/SM
+channelize/GDS
+channellings
+channel/MDRZSG
+Channing/M
+chanson/SM
+Chantalle/M
+Chantal/M
+chanter/M
+chanteuse/MS
+chantey/SM
+chanticleer/SM
+Chantilly/M
+chantry/MS
+chant/SJGZMRD
+chanty's
+Chanukah's
+Chao/M
+chaos/SM
+chaotic
+chaotically
+chaparral/MS
+chapbook/SM
+chapeau/MS
+chapel/MS
+chaperonage/MS
+chaperoned/U
+chaperone's
+chaperon/GMDS
+chaplaincy/MS
+chaplain/MS
+chaplet/SM
+Chaplin/M
+Chapman/M
+chap/MS
+Chappaquiddick/M
+chapped
+chapping
+chapter/SGDM
+Chara
+charabanc/MS
+characterful
+characteristically/U
+characteristic/SM
+characterizable/MS
+characterization/MS
+characterize/DRSBZG
+characterized/U
+characterizer/M
+characterless
+character/MDSG
+charade/SM
+charbroil/SDG
+charcoal/MGSD
+Chardonnay
+chardonnay/S
+chard/SM
+chargeableness/M
+chargeable/P
+charged/U
+charge/EGRSDA
+charger/AME
+chargers
+char/GS
+Charil/M
+charily
+chariness/MS
+Charin/M
+charioteer/GSDM
+Chariot/M
+chariot/SMDG
+Charis
+charisma/M
+charismata
+charismatically
+charismatic/S
+Charissa/M
+Charisse/M
+charitablenesses
+charitableness/UM
+charitable/UP
+charitably/U
+Charita/M
+Charity/M
+charity/MS
+charlady/M
+Charla/M
+charlatanism/MS
+charlatanry/SM
+charlatan/SM
+Charlean/M
+Charleen/M
+Charlemagne/M
+Charlena/M
+Charlene/M
+Charles/M
+Charleston/SM
+Charley/M
+Charlie/M
+Charline/M
+Charlot/M
+Charlotta/M
+Charlotte/M
+Charlottesville/M
+Charlottetown/M
+Charlton/M
+Charmaine/M
+Charmain/M
+Charmane/M
+charmer/M
+Charmian/M
+Charmine/M
+charming/RYT
+Charmin/M
+Charmion/M
+charmless
+charm/SGMZRD
+Charolais
+Charo/M
+Charon/M
+charred
+charring
+charted/U
+charter/AGDS
+chartered/U
+charterer/SM
+charter's
+chartist/SM
+Chartres/M
+chartreuse/MS
+chartroom/S
+chart/SJMRDGBZ
+charwoman/M
+charwomen
+Charybdis/M
+Charyl/M
+chary/PTR
+Chas
+chase/DSRGZ
+Chase/M
+chaser/M
+chasing/M
+Chasity/M
+chasm/SM
+chassis/M
+chastely
+chasteness/SM
+chasten/GSD
+chaste/UTR
+chastisement/SM
+chastiser/M
+chastise/ZGLDRS
+Chastity/M
+chastity/SM
+chastity's/U
+chasuble/SM
+Chateaubriand
+chteau/M
+chateaus
+chteaux
+chtelaine/SM
+chat/MS
+Chattahoochee/M
+Chattanooga/M
+chatted
+chattel/MS
+chatterbox/MS
+chatterer/M
+Chatterley/M
+chatter/SZGDRY
+Chatterton/M
+chattily
+chattiness/SM
+chatting
+chatty/RTP
+Chaucer/M
+chauffeur/GSMD
+Chaunce/M
+Chauncey/M
+Chautauqua/M
+chauvinism/MS
+chauvinistic
+chauvinistically
+chauvinist/MS
+Chavez/M
+chaw
+Chayefsky/M
+cheapen/DG
+cheapish
+cheapness/MS
+cheapskate/MS
+cheap/YRNTXSP
+cheater/M
+cheat/RDSGZ
+Chechen/M
+Chechnya/M
+checkable/U
+checkbook/MS
+checked/UA
+checkerboard/MS
+checker/DMG
+check/GZBSRDM
+checklist/S
+checkmate/MSDG
+checkoff/SM
+checkout/S
+checkpoint/MS
+checkroom/MS
+check's/A
+checks/A
+checksummed
+checksumming
+checksum/SM
+checkup/MS
+Cheddar/MS
+cheddar/S
+cheekbone/SM
+cheek/DMGS
+cheekily
+cheekiness/SM
+cheeky/PRT
+cheep/GMDS
+cheerer/M
+cheerfuller
+cheerfullest
+cheerfulness/MS
+cheerful/YP
+cheerily
+cheeriness/SM
+cheerio/S
+Cheerios/M
+cheerleader/SM
+cheerlessness/SM
+cheerless/PY
+cheers/S
+cheery/PTR
+cheer/YRDGZS
+cheeseburger/SM
+cheesecake/SM
+cheesecloth/M
+cheesecloths
+cheeseparing/S
+cheese/SDGM
+cheesiness/SM
+cheesy/PRT
+cheetah/M
+cheetahs
+Cheeto/M
+Cheever/M
+cheffed
+cheffing
+chef/SM
+Chekhov/M
+chelate/XDMNG
+chelation/M
+Chelsae/M
+Chelsea/M
+Chelsey/M
+Chelsie/M
+Chelsy/M
+Chelyabinsk/M
+chem
+Che/M
+chemic
+chemical/SYM
+chemiluminescence/M
+chemiluminescent
+chemise/SM
+chemistry/SM
+chemist/SM
+chemotherapeutic/S
+chemotherapy/SM
+chemurgy/SM
+Chengdu
+Cheng/M
+chenille/SM
+Chen/M
+Cheops/M
+Chere/M
+Cherey/M
+Cherianne/M
+Cherice/M
+Cherida/M
+Cherie/M
+Cherilyn/M
+Cherilynn/M
+Cheri/M
+Cherin/M
+Cherise/M
+cherisher/M
+cherish/GDRS
+Cherish/M
+Cheriton/M
+Cherlyn/M
+Cher/M
+Chernenko/M
+Chernobyl/M
+Cherokee/MS
+cheroot/MS
+Cherri/M
+Cherrita/M
+Cherry/M
+cherry/SM
+chert/MS
+cherubic
+cherubim/S
+cherub/SM
+chervil/MS
+Cherye/M
+Cheryl/M
+Chery/M
+Chesapeake/M
+Cheshire/M
+Cheslie/M
+chessboard/SM
+chessman/M
+chessmen
+chess/SM
+Chesterfield/M
+chesterfield/MS
+Chester/M
+Chesterton/M
+chestful/S
+chest/MRDS
+chestnut/SM
+Cheston/M
+chesty/TR
+Chet/M
+Chevalier/M
+chevalier/SM
+Cheviot/M
+cheviot/S
+Chev/M
+Chevrolet/M
+chevron/DMS
+Chevy/M
+chewer/M
+chew/GZSDR
+chewiness/S
+chewy/RTP
+Cheyenne/SM
+chg
+chge
+Chiang/M
+chianti/M
+Chianti/S
+chiaroscuro/SM
+Chiarra/M
+Chiba/M
+Chicagoan/SM
+Chicago/M
+Chicana/MS
+chicane/MGDS
+chicanery/MS
+Chicano/MS
+chichi/RTS
+chickadee/SM
+Chickasaw/SM
+chickenfeed
+chicken/GDM
+chickenhearted
+chickenpox/MS
+Chickie/M
+Chick/M
+chickpea/MS
+chickweed/MS
+chick/XSNM
+Chicky/M
+chicle/MS
+Chic/M
+chicness/S
+Chico/M
+chicory/MS
+chic/SYRPT
+chide/GDS
+chiding/Y
+chiefdom/MS
+chieftain/SM
+chief/YRMST
+chiffonier/MS
+chiffon/MS
+chigger/MS
+chignon/MS
+Chihuahua/MS
+chihuahua/S
+chilblain/MS
+childbearing/MS
+childbirth/M
+childbirths
+childcare/S
+childes
+child/GMYD
+childhood/MS
+childishness/SM
+childish/YP
+childlessness/SM
+childless/P
+childlikeness/M
+childlike/P
+childminders
+childproof/GSD
+childrearing
+children/M
+Chilean/S
+Chile/MS
+chile's
+chilies
+chili/M
+chiller/M
+chilliness/MS
+chilling/Y
+chilli's
+chill/MRDJGTZPS
+chillness/MS
+chilly/TPRS
+Chilton/M
+Chi/M
+chimaera's
+chimaerical
+Chimborazo/M
+chime/DSRGMZ
+Chimera/S
+chimera/SM
+chimeric
+chimerical
+chimer/M
+Chimiques
+chimney/SMD
+chimpanzee/SM
+chimp/MS
+chi/MS
+Chimu/M
+Ch'in
+China/M
+Chinaman/M
+Chinamen
+china/MS
+Chinatown/SM
+chinchilla/SM
+chine/MS
+Chinese/M
+Ching/M
+chink/DMSG
+chinless
+Chin/M
+chinned
+chinner/S
+chinning
+chino/MS
+Chinook/MS
+chin/SGDM
+chinstrap/S
+chintz/SM
+chintzy/TR
+chipboard/M
+Chipewyan/M
+Chip/M
+chipmunk/SM
+chipped
+Chippendale/M
+chipper/DGS
+Chippewa/MS
+chipping/MS
+chip/SM
+Chiquia/M
+Chiquita/M
+chiral
+Chirico/M
+chirography/SM
+chiropodist/SM
+chiropody/MS
+chiropractic/MS
+chiropractor/SM
+chirp/GDS
+chirpy/RT
+chirrup/DGS
+chiseler/M
+chisel/ZGSJMDR
+Chisholm/M
+Chisinau/M
+chitchat/SM
+chitchatted
+chitchatting
+chitinous
+chitin/SM
+chit/SM
+Chittagong/M
+chitterlings
+chivalric
+chivalrously/U
+chivalrousness/MS
+chivalrous/YP
+chivalry/SM
+chive/GMDS
+chivvy/D
+chivying
+chlamydiae
+chlamydia/S
+Chloe/M
+Chloette/M
+Chlo/M
+chloral/MS
+chlorate/M
+chlordane/MS
+chloride/MS
+chlorinated/C
+chlorinates/C
+chlorinate/XDSGN
+chlorination/M
+chlorine/MS
+Chloris
+chlorofluorocarbon/S
+chloroform/DMSG
+chlorophyll/SM
+chloroplast/MS
+chloroquine/M
+chm
+Ch/MGNRS
+chockablock
+chock/SGRDM
+chocoholic/S
+chocolate/MS
+chocolaty
+Choctaw/MS
+choiceness/M
+choice/RSMTYP
+choirboy/MS
+choirmaster/SM
+choir/SDMG
+chokeberry/M
+chokecherry/SM
+choke/DSRGZ
+choker/M
+chokes/M
+choking/Y
+cholera/SM
+choleric
+choler/SM
+cholesterol/SM
+choline/M
+cholinesterase/M
+chomp/DSG
+Chomsky/M
+Chongqing
+choose/GZRS
+chooser/M
+choosiness/S
+choosy/RPT
+chophouse/SM
+Chopin/M
+chopped
+chopper/SDMG
+choppily
+choppiness/MS
+chopping
+choppy/RPT
+chop/S
+chopstick/SM
+chorale/MS
+choral/SY
+chordal
+chordata
+chordate/MS
+chording/M
+chord/SGMD
+chorea/MS
+chore/DSGNM
+choreographer/M
+choreographic
+choreographically
+choreographs
+choreography/MS
+choreograph/ZGDR
+chorines
+chorion/M
+chorister/SM
+choroid/S
+chortler/M
+chortle/ZGDRS
+chorus/GDSM
+chosen/U
+chose/S
+Chou/M
+chowder/SGDM
+chow/DGMS
+Chretien/M
+Chris/M
+chrism/SM
+chrissake
+Chrisse/M
+Chrissie/M
+Chrissy/M
+Christabella/M
+Christabel/M
+Christalle/M
+Christal/M
+Christa/M
+Christan/M
+Christchurch/M
+Christean/M
+Christel/M
+Christendom/MS
+christened/U
+christening/SM
+Christen/M
+christen/SAGD
+Christensen/M
+Christenson/M
+Christiana/M
+Christiane/M
+Christianity/SM
+Christianize/GSD
+Christian/MS
+Christiano/M
+Christiansen/M
+Christians/N
+Christie/SM
+Christi/M
+Christina/M
+Christine/M
+Christin/M
+Christlike
+Christmas/SM
+Christmastide/SM
+Christmastime/S
+Christoffel/M
+Christoffer/M
+Christoforo/M
+Christoper/M
+Christophe/M
+Christopher/M
+Christoph/MR
+Christophorus/M
+Christos/M
+Christ/SMN
+Christye/M
+Christyna/M
+Christy's
+Chrisy/M
+chroma/M
+chromate/M
+chromatically
+chromaticism/M
+chromaticness/M
+chromatic/PS
+chromatics/M
+chromatin/MS
+chromatogram/MS
+chromatograph
+chromatographic
+chromatography/M
+chrome/GMSD
+chromic
+chromite/M
+chromium/SM
+chromosomal
+chromosome/MS
+chromosphere/M
+chronically
+chronicled/U
+chronicler/M
+chronicle/SRDMZG
+chronic/S
+chronograph/M
+chronographs
+chronography
+chronological/Y
+chronologist/MS
+chronology/MS
+chronometer/MS
+chronometric
+Chrotoem/M
+chrysalids
+chrysalis/SM
+Chrysa/M
+chrysanthemum/MS
+Chrysler/M
+Chrysostom/M
+Chrystal/M
+Chrystel/M
+Chryste/M
+chubbiness/SM
+chubby/RTP
+chub/MS
+Chucho/M
+chuck/GSDM
+chuckhole/SM
+chuckle/DSG
+chuckling/Y
+Chuck/M
+chuff/DM
+chugged
+chugging
+chug/MS
+Chukchi/M
+chukka/S
+Chumash/M
+chummed
+chummily
+chumminess/MS
+chumming
+chum/MS
+chummy/SRTP
+chumping/M
+chump/MDGS
+Chungking's
+Chung/M
+chunkiness/MS
+chunk/SGDM
+chunky/RPT
+chuntering
+churchgoer/SM
+churchgoing/SM
+Churchillian
+Churchill/M
+churchliness/M
+churchly/P
+churchman/M
+church/MDSYG
+churchmen
+Church/MS
+churchwarden/SM
+churchwoman/M
+churchwomen
+churchyard/SM
+churlishness/SM
+churlish/YP
+churl/SM
+churner/M
+churning/M
+churn/SGZRDM
+chute/DSGM
+chutney/MS
+chutzpah/M
+chutzpahs
+chutzpa/SM
+Chuvash/M
+ch/VT
+chyme/SM
+Ci
+CIA
+ciao/S
+cicada/MS
+cicatrice/S
+cicatrix's
+Cicely/M
+Cicero/M
+cicerone/MS
+ciceroni
+Ciceronian
+Cicily/M
+CID
+cider's/C
+cider/SM
+Cid/M
+Ciel/M
+cigarette/MS
+cigarillo/MS
+cigar/SM
+cilantro/S
+cilia/M
+ciliate/FDS
+ciliately
+cilium/M
+Cilka/M
+cinch/MSDG
+cinchona/SM
+Cincinnati/M
+cincture/MGSD
+Cinda/M
+Cindee/M
+Cindelyn/M
+cinder/DMGS
+Cinderella/MS
+Cindie/M
+Cindi/M
+Cindra/M
+Cindy/M
+cine/M
+cinema/SM
+cinematic
+cinematographer/MS
+cinematographic
+cinematography/MS
+Cinerama/M
+cinnabar/MS
+Cinnamon/M
+cinnamon/MS
+ciphered/C
+cipher/MSGD
+ciphers/C
+cir
+circa
+circadian
+Circe/M
+circler/M
+circle/RSDGM
+circlet/MS
+circuital
+circuit/GSMD
+circuitousness/MS
+circuitous/YP
+circuitry/SM
+circuity/MS
+circulant
+circularity/SM
+circularize/GSD
+circularness/M
+circular/PSMY
+circulate/ASDNG
+circulation/MA
+circulations
+circulative
+circulatory
+circumcise/DRSXNG
+circumcised/U
+circumciser/M
+circumcision/M
+circumference/SM
+circumferential/Y
+circumflex/MSDG
+circumlocution/MS
+circumlocutory
+circumnavigate/DSNGX
+circumnavigational
+circumnavigation/M
+circumpolar
+circumscribe/GSD
+circumscription/SM
+circumspection/SM
+circumspect/Y
+circumsphere
+circumstance/SDMG
+circumstantial/YS
+circumvention/MS
+circumvent/SBGD
+circus/SM
+Cirillo/M
+Cirilo/M
+Ciro/M
+cirque/SM
+cirrhoses
+cirrhosis/M
+cirrhotic/S
+cirri/M
+cirrus/M
+Cissiee/M
+Cissy/M
+cistern/SM
+citadel/SM
+citations/I
+citation/SMA
+cit/DSG
+cite/ISDAG
+Citibank/M
+citified
+citizenry/SM
+citizenship/MS
+citizen/SYM
+citrate/DM
+citric
+Citroen/M
+citronella/MS
+citron/MS
+citrus/SM
+city/DSM
+cityscape/MS
+citywide
+civet/SM
+civic/S
+civics/M
+civilian/SM
+civility/IMS
+civilizational/MS
+civilization/AMS
+civilizedness/M
+civilized/PU
+civilize/DRSZG
+civilizer/M
+civilizes/AU
+civil/UY
+civvies
+ck/C
+clack/SDG
+cladding/SM
+clads
+clad/U
+Claiborne/M
+Claiborn/M
+claimable
+claimant/MS
+claim/CDRSKAEGZ
+claimed/U
+claimer/KMACE
+Claire/M
+Clair/M
+Clairol/M
+clairvoyance/MS
+clairvoyant/YS
+clambake/MS
+clamberer/M
+clamber/SDRZG
+clammed
+clammily
+clamminess/MS
+clamming
+clam/MS
+clammy/TPR
+clamorer/M
+clamor/GDRMSZ
+clamorousness/UM
+clamorous/PUY
+clampdown/SM
+clamper/M
+clamp/MRDGS
+clamshell/MS
+Clancy/M
+clandestineness/M
+clandestine/YP
+clanger/M
+clangor/MDSG
+clangorous/Y
+clang/SGZRD
+clanking/Y
+clank/SGDM
+clan/MS
+clannishness/SM
+clannish/PY
+clansman/M
+clansmen
+clapboard/SDGM
+Clapeyron/M
+clapped
+clapper/GMDS
+clapping
+clap/S
+Clapton/M
+claptrap/SM
+claque/MS
+Clarabelle/M
+Clara/M
+Clarance/M
+Clare/M
+Claremont/M
+Clarence/M
+Clarendon/M
+Claresta/M
+Clareta/M
+claret/MDGS
+Claretta/M
+Clarette/M
+Clarey/M
+Claribel/M
+Clarice/M
+Clarie/M
+clarification/M
+clarifier/M
+clarify/NGXDRS
+Clari/M
+Clarinda/M
+Clarine/M
+clarinetist/SM
+clarinet/SM
+clarinettist's
+clarion/GSMD
+Clarissa/M
+Clarisse/M
+Clarita/M
+clarities
+clarity/UM
+Clarke/M
+Clark/M
+Clarridge/M
+Clary/M
+clasher/M
+clash/RSDG
+clasped/M
+clasper/M
+clasp's
+clasp/UGSD
+classer/M
+class/GRSDM
+classical/Y
+classicism/SM
+classicist/SM
+classic/S
+classics/M
+classifiable/U
+classification/AMC
+classificatory
+classified/S
+classifier/SM
+classify/CNXASDG
+classiness/SM
+classless/P
+classmate/MS
+classroom/MS
+classwork/M
+classy/PRT
+clatterer/M
+clattering/Y
+clatter/SGDR
+clattery
+Claudelle/M
+Claudell/M
+Claude/M
+Claudetta/M
+Claudette/M
+Claudia/M
+Claudian/M
+Claudianus/M
+Claudie/M
+Claudina/M
+Claudine/M
+Claudio/M
+Claudius/M
+clausal
+clause/MS
+Clausen/M
+Clausewitz/M
+Clausius/M
+Claus/NM
+claustrophobia/SM
+claustrophobic
+clave/RM
+clave's/F
+clavichord/SM
+clavicle/MS
+clavier/MS
+clawer/M
+claw/GDRMS
+Clayborne/M
+Clayborn/M
+Claybourne/M
+clayey
+clayier
+clayiest
+Clay/M
+clay/MDGS
+claymore/MS
+Clayson/M
+Clayton/M
+Clea/M
+cleanable
+cleaner/MS
+cleaning/SM
+cleanliness/UMS
+cleanly/PRTU
+cleanness/MSU
+cleanse
+cleanser/M
+cleans/GDRSZ
+cleanup/MS
+clean/UYRDPT
+clearance/MS
+clearcut
+clearer/M
+clearheadedness/M
+clearheaded/PY
+clearinghouse/S
+clearing/MS
+clearly
+clearness/MS
+clears
+clear/UTRD
+Clearwater/M
+clearway/M
+cleat/MDSG
+cleavage/MS
+cleaver/M
+cleave/RSDGZ
+Cleavland/M
+clef/SM
+cleft/MDGS
+clematis/MS
+clemence
+Clemenceau/M
+Clemence/M
+clemency/ISM
+Clemente/M
+Clementia/M
+Clementina/M
+Clementine/M
+Clementius/M
+clement/IY
+Clement/MS
+clements
+Clemmie/M
+Clemmy/M
+Clemons
+Clemson/M
+Clem/XM
+clenches
+clenching
+clench/UD
+Cleo/M
+Cleon/M
+Cleopatra/M
+Clerc/M
+clerestory/MS
+clergyman/M
+clergymen
+clergy/MS
+clergywoman
+clergywomen
+clericalism/SM
+clerical/YS
+cleric/SM
+Clerissa/M
+clerk/SGYDM
+clerkship/MS
+Cletis
+Cletus/M
+Cleveland/M
+Cleve/M
+cleverness/SM
+clever/RYPT
+Clevey/M
+Clevie/M
+clevis/SM
+clew/DMGS
+cl/GJ
+Cliburn/M
+clichd
+clich/SM
+clicker/M
+click/GZSRDM
+clientle/SM
+client/SM
+cliffhanger/MS
+cliffhanging
+Cliff/M
+Clifford/M
+cliff/SM
+Clifton/M
+climacteric/SM
+climactic
+climate/MS
+climatic
+climatically
+climatological/Y
+climatologist/SM
+climatology/MS
+climax/MDSG
+climbable/U
+climb/BGZSJRD
+climbdown
+climbed/U
+climber/M
+clime/SM
+Clim/M
+clinch/DRSZG
+clincher/M
+clinching/Y
+Cline/M
+clinger/MS
+clinging
+cling/U
+clingy/TR
+clinical/Y
+clinician/MS
+clinic/MS
+clinker/GMD
+clink/RDGSZ
+clinometer/MIS
+Clint/M
+Clinton/M
+Clio/M
+cliometrician/S
+cliometric/S
+clipboard/SM
+clipped/U
+clipper/MS
+clipping/SM
+clip/SM
+clique/SDGM
+cliquey
+cliquier
+cliquiest
+cliquishness/SM
+cliquish/YP
+clitoral
+clitorides
+clitoris/MS
+Clive/M
+cloacae
+cloaca/M
+cloakroom/MS
+cloak's
+cloak/USDG
+clobber/DGS
+cloche/MS
+clocker/M
+clockmaker/M
+clock/SGZRDMJ
+clockwatcher
+clockwise
+clockwork/MS
+clodded
+clodding
+cloddishness/M
+cloddish/P
+clodhopper/SM
+clod/MS
+Cloe/M
+clogged/U
+clogging/U
+clog's
+clog/US
+cloisonn
+cloisonnes
+cloister/MDGS
+cloistral
+Clo/M
+clomp/MDSG
+clonal
+clone/DSRGMZ
+clonk/SGD
+clopped
+clopping
+clop/S
+Cloris/M
+closed/U
+close/EDSRG
+closefisted
+closely
+closemouthed
+closeness/MS
+closeout/MS
+closer/EM
+closers
+closest
+closet/MDSG
+closeup/S
+closing/S
+closured
+closure/EMS
+closure's/I
+closuring
+clothbound
+clothesbrush
+clotheshorse/MS
+clothesline/SDGM
+clothesman
+clothesmen
+clothespin/MS
+clothe/UDSG
+cloth/GJMSD
+clothier/MS
+clothing/M
+Clotho/M
+cloths
+Clotilda/M
+clot/MS
+clotted
+clotting
+cloture/MDSG
+cloudburst/MS
+clouded/U
+cloudiness/SM
+cloudlessness/M
+cloudless/YP
+cloudscape/SM
+cloud/SGMD
+cloudy/TPR
+clout/GSMD
+cloven
+cloverleaf/MS
+clover/M
+clove/SRMZ
+Clovis/M
+clown/DMSG
+clownishness/SM
+clownish/PY
+cloy/DSG
+cloying/Y
+clubbed/M
+clubbing/M
+clubfeet
+clubfoot/DM
+clubhouse/SM
+club/MS
+clubroom/SM
+cluck/GSDM
+clueless
+clue/MGDS
+Cluj/M
+clump/MDGS
+clumpy/RT
+clumsily
+clumsiness/MS
+clumsy/PRT
+clung
+clunk/SGZRDM
+clunky/PRYT
+clustered/AU
+clusters/A
+cluster/SGJMD
+clutch/DSG
+cluttered/U
+clutter/GSD
+Cl/VM
+Clyde/M
+Clydesdale/M
+Cly/M
+Clytemnestra/M
+Clyve/M
+Clywd/M
+cm
+Cm/M
+CMOS
+cnidarian/MS
+CNN
+CNS
+CO
+coacher/M
+coachman/M
+coachmen
+coach/MSRDG
+coachwork/M
+coadjutor/MS
+coagulable
+coagulant/SM
+coagulate/GNXSD
+coagulation/M
+coagulator/S
+coaler/M
+coalesce/GDS
+coalescence/SM
+coalescent
+coalface/SM
+coalfield/MS
+coalitionist/SM
+coalition/MS
+coal/MDRGS
+coalminers
+coarseness/SM
+coarsen/SGD
+coarse/TYRP
+coastal
+coaster/M
+coastguard/MS
+coastline/SM
+coast/SMRDGZ
+coated/U
+Coates/M
+coating/M
+coat/MDRGZJS
+coattail/S
+coattest
+coauthor/MDGS
+coaxer/M
+coax/GZDSR
+coaxial/Y
+coaxing/Y
+Cobain/M
+cobalt/MS
+cobbed
+Cobbie/M
+cobbing
+cobbler/M
+cobble/SRDGMZ
+cobblestone/MSD
+Cobb/M
+Cobby/M
+coble/M
+Cob/M
+COBOL
+Cobol/M
+cobra/MS
+cob/SM
+cobwebbed
+cobwebbing
+cobwebby/RT
+cobweb/SM
+cocaine/MS
+coca/MS
+cocci/MS
+coccus/M
+coccyges
+coccyx/M
+Cochabamba/M
+cochineal/SM
+Cochin/M
+Cochise/M
+cochleae
+cochlear
+cochlea/SM
+Cochran/M
+cockade/SM
+cockamamie
+cockatoo/SM
+cockatrice/MS
+cockcrow/MS
+cockerel/MS
+cocker/M
+cockeye/DM
+cockeyed/PY
+cockfighting/M
+cockfight/MJSG
+cock/GDRMS
+cockily
+cockiness/MS
+cocklebur/M
+cockle/SDGM
+cockleshell/SM
+Cockney
+cockney/MS
+cockpit/MS
+cockroach/SM
+cockscomb/SM
+cockshies
+cocksucker/S!
+cocksure
+cocktail/GDMS
+cocky/RPT
+cocoa/SM
+coco/MS
+coconut/SM
+cocoon/GDMS
+Cocteau/M
+COD
+coda/SM
+codded
+codding
+coddle/GSRD
+coddler/M
+codebook/S
+codebreak/R
+coded/UA
+Codee/M
+codeine/MS
+codename/D
+codependency/S
+codependent/S
+coder/CM
+code's
+co/DES
+codes/A
+code/SCZGJRD
+codetermine/S
+codeword/SM
+codex/M
+codfish/SM
+codger/MS
+codices/M
+codicil/SM
+Codie/M
+codification/M
+codifier/M
+codify/NZXGRSD
+Codi/M
+coding/M
+codling/M
+Cod/M
+cod/MDRSZGJ
+codpiece/MS
+Cody/M
+coedited
+coediting
+coeditor/MS
+coedits
+coed/SM
+coeducational
+coeducation/SM
+coefficient/SYM
+coelenterate/MS
+coequal/SY
+coercer/M
+coerce/SRDXVGNZ
+coercible/I
+coercion/M
+coerciveness/M
+coercive/PY
+coeval/YS
+coexistence/MS
+coexistent
+coexist/GDS
+coextensive/Y
+cofactor/MS
+coffeecake/SM
+coffeecup
+coffeehouse/SM
+coffeemaker/S
+coffeepot/MS
+coffee/SM
+cofferdam/SM
+coffer/DMSG
+Coffey/M
+coffin/DMGS
+Coffman/M
+cogency/MS
+cogent/Y
+cogged
+cogging
+cogitate/DSXNGV
+cogitation/M
+cogitator/MS
+cog/MS
+Cognac/M
+cognac/SM
+cognate/SXYN
+cognation/M
+cognitional
+cognition/SAM
+cognitive/SY
+cognizable
+cognizance/MAI
+cognizances/A
+cognizant/I
+cognomen/SM
+cognoscente
+cognoscenti
+cogwheel/SM
+cohabitant/MS
+cohabitational
+cohabitation/SM
+cohabit/SDG
+Cohan/M
+coheir/MS
+Cohen/M
+cohere/GSRD
+coherence/SIM
+coherencies
+coherency/I
+coherent/IY
+coherer/M
+cohesion/MS
+cohesiveness/SM
+cohesive/PY
+Cohn/M
+cohoes
+coho/MS
+cohort/SM
+coiffed
+coiffing
+coiffure/MGSD
+coif/SM
+coil/UGSAD
+Coimbatore/M
+coinage's/A
+coinage/SM
+coincide/GSD
+coincidence/MS
+coincidental/Y
+coincident/Y
+coined/U
+coiner/M
+coin/GZSDRM
+coinsurance/SM
+Cointon/M
+cointreau
+coital/Y
+coitus/SM
+coke/MGDS
+Coke/MS
+COL
+COLA
+colander/SM
+Colan/M
+Colas
+cola/SM
+colatitude/MS
+Colbert/M
+Colby/M
+coldblooded
+coldish
+coldness/MS
+cold/YRPST
+Coleen/M
+Cole/M
+Coleman/M
+Colene/M
+Coleridge/M
+coleslaw/SM
+Colet/M
+Coletta/M
+Colette/M
+coleus/SM
+Colfax/M
+Colgate/M
+colicky
+colic/SM
+coliform
+Colin/M
+coliseum/SM
+colitis/MS
+collaborate/VGNXSD
+collaboration/M
+collaborative/SY
+collaborator/SM
+collage/MGSD
+collagen/M
+collapse/SDG
+collapsibility/M
+collapsible
+collarbone/MS
+collar/DMGS
+collard/SM
+collarless
+collated/U
+collateral/SYM
+collate/SDVNGX
+collation/M
+collator/MS
+colleague/SDGM
+collectedness/M
+collected/PY
+collectible/S
+collection/AMS
+collective/SY
+collectivism/SM
+collectivist/MS
+collectivity/MS
+collectivization/MS
+collectivize/DSG
+collector/MS
+collect/SAGD
+Colleen/M
+colleen/SM
+college/SM
+collegiality/S
+collegian/SM
+collegiate/Y
+Collen/M
+Collete/M
+Collette/M
+coll/G
+collide/SDG
+Collie/M
+collie/MZSRD
+collier/M
+Collier/M
+colliery/MS
+collimate/C
+collimated/U
+collimates
+collimating
+collimation/M
+collimator/M
+collinear
+collinearity/M
+Colline/M
+Collin/MS
+collisional
+collision/SM
+collocate/XSDGN
+collocation/M
+colloidal/Y
+colloid/MS
+colloq
+colloquialism/MS
+colloquial/SY
+colloquies
+colloquium/SM
+colloquy/M
+collude/SDG
+collusion/SM
+collusive
+collying
+Colly/RM
+Colman/M
+Col/MY
+Cologne/M
+cologne/MSD
+Colo/M
+Colombia/M
+Colombian/S
+Colombo/M
+colonelcy/MS
+colonel/MS
+colonialism/MS
+colonialist/MS
+colonial/SPY
+colonist/SM
+colonization/ACSM
+colonize/ACSDG
+colonized/U
+colonizer/MS
+colonizes/U
+Colon/M
+colonnade/MSD
+colon/SM
+colony/SM
+colophon/SM
+Coloradan/S
+Coloradoan/S
+Colorado/M
+colorant/SM
+coloration/EMS
+coloratura/SM
+colorblindness/S
+colorblind/P
+colored/USE
+colorer/M
+colorfastness/SM
+colorfast/P
+colorfulness/MS
+colorful/PY
+colorimeter/SM
+colorimetry
+coloring/M
+colorization/S
+colorize/GSD
+colorizing/C
+colorlessness/SM
+colorless/PY
+colors/EA
+color/SRDMGZJ
+colossal/Y
+Colosseum/M
+colossi
+colossus/M
+colostomy/SM
+colostrum/SM
+col/SD
+colter/M
+coltishness/M
+coltish/PY
+Colt/M
+colt/MRS
+Coltrane/M
+Columbia/M
+Columbian
+Columbine/M
+columbine/SM
+Columbus/M
+columnar
+columnist/MS
+columnize/GSD
+column/SDM
+Colver/M
+Co/M
+comae
+comaker/SM
+Comanche/MS
+coma/SM
+comatose
+combatant/SM
+combativeness/MS
+combative/PY
+combat/SVGMD
+combed/U
+comber/M
+combinational/A
+combination/ASM
+combinatorial/Y
+combinatoric/S
+combinator/SM
+combined/AU
+combiner/M
+combines/A
+combine/ZGBRSD
+combining/A
+combo/MS
+comb/SGZDRMJ
+Combs/M
+combusted
+combustibility/SM
+combustible/SI
+combustion/MS
+combustive
+Comdex/M
+Comdr/M
+comeback/SM
+comedian/SM
+comedic
+comedienne/SM
+comedown/MS
+comedy/SM
+come/IZSRGJ
+comeliness/SM
+comely/TPR
+comer/IM
+comes/M
+comestible/MS
+cometary
+cometh
+comet/SM
+comeuppance/SM
+comfit's
+comfit/SE
+comfortability/S
+comfortableness/MS
+comfortable/U
+comfortably/U
+comforted/U
+comforter/MS
+comfort/ESMDG
+comforting/YE
+comfy/RT
+comicality/MS
+comical/Y
+comic/MS
+Cominform/M
+comity/SM
+com/LJRTZG
+comm
+Com/M
+comma/MS
+commandant/MS
+commandeer/SDG
+commander/M
+commanding/Y
+commandment/SM
+commando/SM
+command/SZRDMGL
+commemorate/SDVNGX
+commemoration/M
+commemorative/YS
+commemorator/S
+commence/ALDSG
+commencement/AMS
+commencer/M
+commendably
+commendation/ASM
+commendatory/A
+commender/AM
+commend/GSADRB
+commensurable/I
+commensurate/IY
+commensurates
+commensuration/SM
+commentary/MS
+commentate/GSD
+commentator/SM
+commenter/M
+comment's
+comment/SUGD
+commerce/MGSD
+commercialism/MS
+commercialization/SM
+commercialize/GSD
+commercial/PYS
+Commie
+commie/SM
+commingle/GSD
+commiserate/VGNXSD
+commiseration/M
+commissariat/MS
+commissar/MS
+commissary/MS
+commission/ASCGD
+commissioner/SM
+commission's/A
+commitment/SM
+commit/SA
+committable
+committal/MA
+committals
+committed/UA
+committeeman/M
+committeemen
+committee/MS
+committeewoman/M
+committeewomen
+committing/A
+commode/MS
+commodes/IE
+commodiousness/MI
+commodious/YIP
+commodity/MS
+commodore/SM
+commonality/MS
+commonalty/MS
+commoner/MS
+commonness/MSU
+commonplaceness/M
+commonplace/SP
+common/RYUPT
+commonsense
+commons/M
+Commons/M
+commonweal/SHM
+commonwealth/M
+Commonwealth/M
+commonwealths
+Commonwealths
+commotion/MS
+communality/M
+communal/Y
+commune/XSDNG
+communicability/MS
+communicable/IU
+communicably
+communicant/MS
+communicate/VNGXSD
+communicational
+communication/M
+communicativeness/M
+communicative/PY
+communicator/SM
+communion/M
+Communion/SM
+communique/S
+communism/MS
+Communism/S
+communistic
+communist/MS
+Communist/S
+communitarian/M
+community/MS
+communize/SDG
+commutable/I
+commutate/XVGNSD
+commutation/M
+commutative/Y
+commutativity
+commutator/MS
+commute/BZGRSD
+commuter/M
+Comoros
+compaction/M
+compactness/MS
+compactor/MS
+compact/TZGSPRDY
+companionableness/M
+companionable/P
+companionably
+companion/GBSMD
+companionship/MS
+companionway/MS
+company/MSDG
+Compaq/M
+comparabilities
+comparability/IM
+comparableness/M
+comparable/P
+comparably/I
+comparativeness/M
+comparative/PYS
+comparator/SM
+compare/GRSDB
+comparer/M
+comparison/MS
+compartmental
+compartmentalization/SM
+compartmentalize/DSG
+compartment/SDMG
+compassionateness/M
+compassionate/PSDGY
+compassion/MS
+compass/MSDG
+compatibility/IMS
+compatibleness/M
+compatible/SI
+compatibly/I
+compatriot/SM
+compeer/DSGM
+compellable
+compelled
+compelling/YM
+compel/S
+compendious
+compendium/MS
+compensable
+compensated/U
+compensate/XVNGSD
+compensation/M
+compensator/M
+compensatory
+compete/GSD
+competence/ISM
+competency/IS
+competency's
+competent/IY
+competition/SM
+competitiveness/SM
+competitive/YP
+competitor/MS
+comp/GSYD
+compilable/U
+compilation/SAM
+compile/ASDCG
+compiler/CS
+compiler's
+complacence/S
+complacency/SM
+complacent/Y
+complainant/MS
+complainer/M
+complain/GZRDS
+complaining/YU
+complaint/MS
+complaisance/SM
+complaisant/Y
+complected
+complementariness/M
+complementarity
+complementary/SP
+complementation/M
+complementer/M
+complement/ZSMRDG
+complete/BTYVNGPRSDX
+completed/U
+completely/I
+completeness/ISM
+completer/M
+completion/MI
+complexional
+complexion/DMS
+complexity/MS
+complexness/M
+complex/TGPRSDY
+compliance/SM
+compliant/Y
+complicatedness/M
+complicated/YP
+complicate/SDG
+complication/M
+complicator/SM
+complicit
+complicity/MS
+complier/M
+complimentary/U
+complimenter/M
+compliment/ZSMRDG
+comply/ZXRSDNG
+component/SM
+comport/GLSD
+comportment/SM
+compose/CGASDE
+composedness/M
+composed/PY
+composer/CM
+composers
+composite/YSDXNG
+compositional/Y
+composition/CMA
+compositions/C
+compositor/MS
+compost/DMGS
+composure/ESM
+compote/MS
+compounded/U
+compounder/M
+compound/RDMBGS
+comprehend/DGS
+comprehending/U
+comprehensibility/SIM
+comprehensibleness/IM
+comprehensible/PI
+comprehensibly/I
+comprehension/IMS
+comprehensiveness/SM
+comprehensive/YPS
+compressed/Y
+compressibility/IM
+compressible/I
+compressional
+compression/CSM
+compressive/Y
+compressor/MS
+compress/SDUGC
+comprise/GSD
+compromiser/M
+compromise/SRDGMZ
+compromising/UY
+Compton/M
+comptroller/SM
+compulsion/SM
+compulsiveness/MS
+compulsive/PYS
+compulsivity
+compulsorily
+compulsory/S
+compunction/MS
+Compuserve/M
+CompuServe/M
+computability/M
+computable/UI
+computably
+computational/Y
+computation/SM
+computed/A
+computerese
+computerization/MS
+computerize/SDG
+computer/M
+compute/RSDZBG
+computes/A
+computing/A
+comradely/P
+comradeship/MS
+comrade/YMS
+Comte/M
+Conakry/M
+Conan/M
+Conant/M
+concatenate/XSDG
+concaveness/MS
+concave/YP
+conceal/BSZGRDL
+concealed/U
+concealer/M
+concealing/Y
+concealment/MS
+conceded/Y
+conceitedness/SM
+conceited/YP
+conceit/SGDM
+conceivable/IU
+conceivably/I
+conceive/BGRSD
+conceiver/M
+concentrate/VNGSDX
+concentration/M
+concentrator/MS
+concentrically
+Concepcin/M
+conceptional
+conception/MS
+concept/SVM
+conceptuality/M
+conceptualization/A
+conceptualizations
+conceptualization's
+conceptualize/DRSG
+conceptualizing/A
+conceptual/Y
+concerned/YU
+concern/USGD
+concerted/PY
+concert/EDSG
+concertina/MDGS
+concertize/GDS
+concertmaster/MS
+concerto/SM
+concert's
+concessionaire/SM
+concessional
+concessionary
+concession/R
+Concetta/M
+Concettina/M
+Conchita/M
+conch/MDG
+conchs
+concierge/SM
+conciliar
+conciliate/GNVX
+conciliation/ASM
+conciliator/MS
+conciliatory/A
+conciseness/SM
+concise/TYRNPX
+concision/M
+conclave/S
+concluder/M
+conclude/RSDG
+conclusion/SM
+conclusive/IPY
+conclusiveness/ISM
+concocter/M
+concoction/SM
+concoct/RDVGS
+concomitant/YS
+concordance/MS
+concordant/Y
+concordat/SM
+Concorde/M
+Concordia/M
+Concord/MS
+concourse
+concreteness/MS
+concrete/NGXRSDPYM
+concretion/M
+concubinage/SM
+concubine/SM
+concupiscence/SM
+concupiscent
+concurrence/MS
+concur/S
+concussion/MS
+concuss/VD
+condemnate/XN
+condemnation/M
+condemnatory
+condemner/M
+condemn/ZSGRDB
+condensate/NMXS
+condensation/M
+condenser/M
+condense/ZGSD
+condensible
+condescend
+condescending/Y
+condescension/MS
+condign
+condiment/SM
+condition/AGSJD
+conditionals
+conditional/UY
+conditioned/U
+conditioner/MS
+conditioning/M
+condition's
+condole
+condolence/MS
+condominium/MS
+condom/SM
+condone/GRSD
+condoner/M
+Condorcet/M
+condor/MS
+condo/SM
+conduce/VGSD
+conduciveness/M
+conducive/P
+conductance/SM
+conductibility/SM
+conductible
+conduction/MS
+conductive/Y
+conductivity/MS
+conductor/MS
+conductress/MS
+conduct/V
+conduit/MS
+coneflower/M
+Conestoga
+coney's
+confabbed
+confabbing
+confab/MS
+confabulate/XSDGN
+confabulation/M
+confectioner/M
+confectionery/SM
+confectionist
+confection/RDMGZS
+confect/S
+Confederacy/M
+confederacy/MS
+confederate/M
+Confederate/S
+conferee/MS
+conference/DSGM
+conferrable
+conferral/SM
+conferred
+conferrer/SM
+conferring
+confer/SB
+confessed/Y
+confessional/SY
+confession/MS
+confessor/SM
+confetti/M
+confidante/SM
+confidant/SM
+confidence/SM
+confidentiality/MS
+confidentialness/M
+confidential/PY
+confident/Y
+confider/M
+confide/ZGRSD
+confiding/PY
+configuration/ASM
+configure/AGSDB
+confined/U
+confine/L
+confinement/MS
+confiner/M
+confirm/AGDS
+confirmation/ASM
+confirmatory
+confirmedness/M
+confirmed/YP
+confiscate/DSGNX
+confiscation/M
+confiscator/MS
+confiscatory
+conflagration/MS
+conflate/NGSDX
+conflation/M
+conflicting/Y
+conflict/SVGDM
+confluence/MS
+conformable/U
+conformal
+conformance/SM
+conformational/Y
+conform/B
+conformer/M
+conformism/SM
+conformist/SM
+conformities
+conformity/MUI
+confounded/Y
+confound/R
+confrre/MS
+confrontational
+confrontation/SM
+confronter/M
+confront/Z
+Confucianism/SM
+Confucian/S
+Confucius/M
+confusedness/M
+confused/PY
+confuse/RBZ
+confusing/Y
+confutation/MS
+confute/GRSD
+confuter/M
+conga/MDG
+congeal/GSDL
+congealment/MS
+congeniality/UM
+congenial/U
+congeries/M
+conger/SM
+congestion/MS
+congest/VGSD
+conglomerate/XDSNGVM
+conglomeration/M
+Cong/M
+Congolese
+Congo/M
+congrats
+congratulate/NGXSD
+congratulation/M
+congratulatory
+congregate/DSXGN
+congregational
+Congregational
+congregationalism/MS
+congregationalist/MS
+Congregationalist/S
+congregation/M
+congressional/Y
+congressman/M
+congressmen
+Congress/MS
+congress/MSDG
+congresspeople
+congressperson/S
+congresswoman/M
+congresswomen
+Congreve/M
+congruence/IM
+congruences
+congruency/M
+congruential
+congruent/YI
+congruity/MSI
+congruousness/IM
+congruous/YIP
+conicalness/M
+conical/PSY
+conic/S
+conics/M
+conifer/MS
+coniferous
+conjectural/Y
+conjecture/GMDRS
+conjecturer/M
+conjoint
+conjugacy
+conjugal/Y
+conjugate/XVNGYSDP
+conjugation/M
+conjunct/DSV
+conjunctiva/MS
+conjunctive/YS
+conjunctivitis/SM
+conjuration/MS
+conjurer/M
+conjure/RSDZG
+conjuring/M
+conker/M
+conk/ZDR
+Conley/M
+Con/M
+conman
+connect/ADGES
+connectedly/E
+connectedness/ME
+connected/U
+connectible
+Connecticut/M
+connection/AME
+connectionless
+connections/E
+connective/SYM
+connectivity/MS
+connector/MS
+Connelly/M
+Conner/M
+Connery/M
+connexion/MS
+Conney/M
+conn/GVDR
+Connie/M
+Conni/M
+conniption/MS
+connivance/MS
+conniver/M
+connive/ZGRSD
+connoisseur/MS
+Connor/SM
+connotative/Y
+Conn/RM
+connubial/Y
+Conny/M
+conquerable/U
+conquered/AU
+conqueror/MS
+conquer/RDSBZG
+conquers/A
+conquest/ASM
+conquistador/MS
+Conrade/M
+Conrad/M
+Conrado/M
+Conrail/M
+Conroy/M
+Consalve/M
+consanguineous/Y
+consanguinity/SM
+conscienceless
+conscientiousness/MS
+conscientious/YP
+conscionable/U
+consciousness/MUS
+conscious/UYSP
+conscription/SM
+consecrated/AU
+consecrates/A
+consecrate/XDSNGV
+consecrating/A
+consecration/AMS
+consecutiveness/M
+consecutive/YP
+consensus/SM
+consenter/M
+consenting/Y
+consent/SZGRD
+consequence
+consequentiality/S
+consequential/IY
+consequentialness/M
+consequently/I
+consequent/PSY
+conservancy/SM
+conservationism
+conservationist/SM
+conservation/SM
+conservatism/SM
+conservativeness/M
+Conservative/S
+conservative/SYP
+conservator/MS
+conservatory/MS
+con/SGM
+considerable/I
+considerables
+considerably/I
+considerateness/MSI
+considerate/XIPNY
+consideration/ASMI
+considered/U
+considerer/M
+consider/GASD
+considering/S
+consign/ASGD
+consignee/SM
+consignment/SM
+consist/DSG
+consistence/S
+consistency/IMS
+consistent/IY
+consistory/MS
+consolable/I
+Consolata/M
+consolation/MS
+consolation's/E
+consolatory
+consoled/U
+consoler/M
+console/ZBG
+consolidated/AU
+consolidate/NGDSX
+consolidates/A
+consolidation/M
+consolidator/SM
+consoling/Y
+consomm/S
+consonance/IM
+consonances
+consonantal
+consonant/MYS
+consortia
+consortium/M
+conspectus/MS
+conspicuousness/IMS
+conspicuous/YIP
+conspiracy/MS
+conspiratorial/Y
+conspirator/SM
+constable
+Constable/M
+constabulary/MS
+constance
+Constance/M
+Constancia/M
+constancy/IMS
+Constancy/M
+Constanta/M
+Constantia/M
+Constantina/M
+Constantine/M
+Constantin/M
+Constantino/M
+Constantinople/M
+constant/IY
+constants
+constellation/SM
+consternate/XNGSD
+consternation/M
+constipate/XDSNG
+constipation/M
+constituency/MS
+constituent/SYM
+constituted/A
+constitute/NGVXDS
+constitutes/A
+constituting/A
+Constitution
+constitutionality's
+constitutionality/US
+constitutionally/U
+constitutional/SY
+constitution/AMS
+constitutive/Y
+constrain
+constrainedly
+constrained/U
+constraint/MS
+constriction/MS
+constrictor/MS
+constrict/SDGV
+construable
+construct/ASDGV
+constructibility
+constructible/A
+constructional/Y
+constructionist/MS
+construction/MAS
+constructions/C
+constructiveness/SM
+constructive/YP
+constructor/MS
+construe/GSD
+Consuela/M
+Consuelo/M
+consular/S
+consulate/MS
+consul/KMS
+consulship/MS
+consultancy/S
+consultant/MS
+consultation/SM
+consultative
+consulted/A
+consulter/M
+consult/RDVGS
+consumable/S
+consumed/Y
+consume/JZGSDB
+consumerism/MS
+consumerist/S
+consumer/M
+consuming/Y
+consummate/DSGVY
+consummated/U
+consumption/SM
+consumptive/YS
+cont
+contact/BGD
+contacted/A
+contact's/A
+contacts/A
+contagion/SM
+contagiousness/MS
+contagious/YP
+containerization/SM
+containerize/GSD
+container/M
+containment/SM
+contain/SLZGBRD
+contaminant/SM
+contaminated/AU
+contaminates/A
+contaminate/SDCXNG
+contaminating/A
+contamination/CM
+contaminative
+contaminator/MS
+contd
+cont'd
+contemn/SGD
+contemplate/DVNGX
+contemplation/M
+contemplativeness/M
+contemplative/PSY
+contemporaneity/MS
+contemporaneousness/M
+contemporaneous/PY
+contemptibleness/M
+contemptible/P
+contemptibly
+contempt/M
+contemptuousness/SM
+contemptuous/PY
+contentedly/E
+contentedness/SM
+contented/YP
+content/EMDLSG
+contention/MS
+contentiousness/SM
+contentious/PY
+contently
+contentment/ES
+contentment's
+conterminous/Y
+contestable/I
+contestant/SM
+contested/U
+contextualize/GDS
+contiguity/MS
+contiguousness/M
+contiguous/YP
+continence/ISM
+Continental/S
+continental/SY
+continent/IY
+Continent/M
+continents
+continent's
+contingency/SM
+contingent/SMY
+continua
+continuable
+continual/Y
+continuance/ESM
+continuant/M
+continuation/ESM
+continue/ESDG
+continuer/M
+continuity/SEM
+continuousness/M
+continuous/YE
+continuum/M
+contortionist/SM
+contortion/MS
+contort/VGD
+contour
+contraband/SM
+contrabass/M
+contraception/SM
+contraceptive/S
+contract/DG
+contractible
+contractile
+contractual/Y
+contradict/GDS
+contradiction/MS
+contradictorily
+contradictoriness/M
+contradictory/PS
+contradistinction/MS
+contraflow/S
+contrail/M
+contraindicate/SDVNGX
+contraindication/M
+contralto/SM
+contrapositive/S
+contraption/MS
+contrapuntal/Y
+contrariety/MS
+contrarily
+contrariness/MS
+contrariwise
+contrary/PS
+contra/S
+contrasting/Y
+contrastive/Y
+contrast/SRDVGZ
+contravene/GSRD
+contravener/M
+contravention/MS
+Contreras/M
+contretemps/M
+contribute/XVNZRD
+contribution/M
+contributive/Y
+contributorily
+contributor/SM
+contributory/S
+contriteness/M
+contrite/NXP
+contrition/M
+contrivance/SM
+contriver/M
+contrive/ZGRSD
+control/CS
+controllability/M
+controllable/IU
+controllably/U
+controlled/CU
+controller/SM
+controlling/C
+control's
+controversialists
+controversial/UY
+controversy/MS
+controvert/DGS
+controvertible/I
+contumacious/Y
+contumacy/MS
+contumelious
+contumely/MS
+contuse/NGXSD
+contusion/M
+conundrum/SM
+conurbation/MS
+convalesce/GDS
+convalescence/SM
+convalescent/S
+convect/DSVG
+convectional
+convection/MS
+convector
+convene/ASDG
+convener/MS
+convenience/ISM
+convenient/IY
+conventicle/SM
+conventionalism/M
+conventionalist/M
+conventionality/SUM
+conventionalize/GDS
+conventional/UY
+convention/MA
+conventions
+convergence/MS
+convergent
+conversant/Y
+conversationalist/SM
+conversational/Y
+conversation/SM
+conversazione/M
+converse/Y
+conversion/AM
+conversioning
+converted/U
+converter/MS
+convert/GADS
+convertibility's/I
+convertibility/SM
+convertibleness/M
+convertible/PS
+convexity/MS
+convex/Y
+conveyance/DRSGMZ
+conveyancer/M
+conveyancing/M
+convey/BDGS
+conveyor/MS
+conviction/MS
+convict/SVGD
+convinced/U
+convincer/M
+convince/RSDZG
+convincingness/M
+convincing/PUY
+conviviality/MS
+convivial/Y
+convoke/GSD
+convolute/XDNY
+convolution/M
+convolve/C
+convolved
+convolves
+convolving
+convoy/GMDS
+convulse/SDXVNG
+convulsion/M
+convulsiveness/M
+convulsive/YP
+Conway/M
+cony/SM
+coo/GSD
+cookbook/SM
+cooked/AU
+Cooke/M
+cooker/M
+cookery/MS
+cook/GZDRMJS
+Cookie/M
+cookie/SM
+cooking/M
+Cook/M
+cookout/SM
+cooks/A
+cookware/SM
+cooky's
+coolant/SM
+cooled/U
+cooler/M
+Cooley/M
+coolheaded
+Coolidge/M
+coolie/MS
+coolness/MS
+cool/YDRPJGZTS
+coon/MS!
+coonskin/MS
+cooperage/MS
+cooperate/VNGXSD
+cooperation/M
+cooperativeness/SM
+cooperative/PSY
+cooperator/MS
+cooper/GDM
+Cooper/M
+coop/MDRGZS
+Coop/MR
+coordinated/U
+coordinateness/M
+coordinate/XNGVYPDS
+coordination/M
+coordinator/MS
+Coors/M
+cootie/SM
+coot/MS
+copay/S
+Copeland/M
+Copenhagen/M
+coper/M
+Copernican
+Copernicus/M
+cope/S
+copied/A
+copier/M
+copies/A
+copilot/SM
+coping/M
+copiousness/SM
+copious/YP
+coplanar
+Copland/M
+Copley/M
+copolymer/MS
+copora
+copped
+Copperfield/M
+copperhead/MS
+copper/MSGD
+copperplate/MS
+coppersmith/M
+coppersmiths
+coppery
+coppice's
+copping
+Coppola/M
+copra/MS
+coprolite/M
+coprophagous
+copse/M
+cops/GDS
+cop/SJMDRG
+copter/SM
+Coptic/M
+copula/MS
+copulate/XDSNGV
+copulation/M
+copulative/S
+copybook/MS
+copycat/SM
+copycatted
+copycatting
+copyist/SM
+copy/MZBDSRG
+copyrighter/M
+copyright/MSRDGZ
+copywriter/MS
+coquetry/MS
+coquette/DSMG
+coquettish/Y
+Corabella/M
+Corabelle/M
+Corabel/M
+coracle/SM
+Coralie/M
+Coraline/M
+coralline
+Coral/M
+coral/SM
+Coralyn/M
+Cora/M
+corbel/GMDJS
+Corbet/M
+Corbett/M
+Corbie/M
+Corbin/M
+Corby/M
+cordage/MS
+corded/AE
+Cordelia/M
+Cordelie/M
+Cordell/M
+corder/AM
+Cordey/M
+cord/FSAEM
+cordiality/MS
+cordialness/M
+cordial/PYS
+Cordie/M
+cordillera/MS
+Cordilleras
+Cordi/M
+cording/MA
+cordite/MS
+cordless
+Cord/M
+Cordoba
+cordon/DMSG
+cordovan/SM
+Cordula/M
+corduroy/GDMS
+Cordy/M
+cored/A
+Coreen/M
+Corella/M
+core/MZGDRS
+Corenda/M
+Corene/M
+corer/M
+corespondent/MS
+Coretta/M
+Corette/M
+Corey/M
+Corfu/M
+corgi/MS
+coriander/SM
+Corie/M
+Corilla/M
+Cori/M
+Corina/M
+Corine/M
+coring/M
+Corinna/M
+Corinne/M
+Corinthian/S
+Corinthians/M
+Corinth/M
+Coriolanus/M
+Coriolis/M
+Corissa/M
+Coriss/M
+corked/U
+corker/M
+cork/GZDRMS
+Cork/M
+corkscrew/DMGS
+corks/U
+Corliss/M
+Corly/M
+Cormack/M
+corm/MS
+cormorant/MS
+Cornall/M
+cornball/SM
+cornbread/S
+corncob/SM
+corncrake/M
+corneal
+cornea/SM
+Corneille/M
+Cornela/M
+Cornelia/M
+Cornelius/M
+Cornelle/M
+Cornell/M
+corner/GDM
+cornerstone/MS
+cornet/SM
+Corney/M
+cornfield/SM
+cornflake/S
+cornflour/M
+cornflower/SM
+corn/GZDRMS
+cornice/GSDM
+Cornie/M
+cornily
+corniness/S
+Cornish/S
+cornmeal/S
+cornrow/GDS
+cornstalk/MS
+cornstarch/SM
+cornucopia/MS
+Cornwallis/M
+Cornwall/M
+Corny/M
+corny/RPT
+corolla/MS
+corollary/SM
+Coronado/M
+coronal/MS
+coronary/S
+corona/SM
+coronate/NX
+coronation/M
+coroner/MS
+coronet/DMS
+Corot/M
+coroutine/SM
+Corp
+corporal/SYM
+corpora/MS
+corporate/INVXS
+corporately
+corporation/MI
+corporatism/M
+corporatist
+corporeality/MS
+corporeal/IY
+corporealness/M
+corp/S
+corpse/M
+corpsman/M
+corpsmen
+corps/SM
+corpulence/MS
+corpulentness/S
+corpulent/YP
+corpuscle/SM
+corpuscular
+corpus/M
+corr
+corralled
+corralling
+corral/MS
+correctable/U
+correct/BPSDRYTGV
+corrected/U
+correctional
+correction/MS
+corrective/YPS
+correctly/I
+correctness/MSI
+corrector/MS
+Correggio/M
+correlated/U
+correlate/SDXVNG
+correlation/M
+correlative/YS
+Correna/M
+correspond/DSG
+correspondence/MS
+correspondent/SM
+corresponding/Y
+Correy/M
+Corrianne/M
+corridor/SM
+Corrie/M
+corrigenda
+corrigendum/M
+corrigible/I
+Corri/M
+Corrina/M
+Corrine/M
+Corrinne/M
+corroborated/U
+corroborate/GNVXDS
+corroboration/M
+corroborative/Y
+corroborator/MS
+corroboratory
+corrode/SDG
+corrodible
+corrosion/SM
+corrosiveness/M
+corrosive/YPS
+corrugate/NGXSD
+corrugation/M
+corrupt/DRYPTSGV
+corrupted/U
+corrupter/M
+corruptibility/SMI
+corruptible/I
+corruption/IM
+corruptions
+corruptive/Y
+corruptness/MS
+Corry/M
+corsage/MS
+corsair/SM
+corset/GMDS
+Corsica/M
+Corsican/S
+cortge/MS
+Cortes/S
+cortex/M
+Cortez's
+cortical/Y
+cortices
+corticosteroid/SM
+Cortie/M
+cortisone/SM
+Cortland/M
+Cort/M
+Cortney/M
+Corty/M
+corundum/MS
+coruscate/XSDGN
+coruscation/M
+Corvallis/M
+corvette/MS
+Corvus/M
+Cory/M
+Cos
+Cosby/M
+Cosetta/M
+Cosette/M
+cos/GDS
+cosignatory/MS
+cosign/SRDZG
+cosily
+Cosimo/M
+cosine/MS
+cosiness/MS
+Cosme/M
+cosmetically
+cosmetician/MS
+cosmetic/SM
+cosmetologist/MS
+cosmetology/MS
+cosmic
+cosmical/Y
+cosmogonist/MS
+cosmogony/SM
+cosmological/Y
+cosmologist/MS
+cosmology/SM
+Cosmo/M
+cosmonaut/MS
+cosmopolitanism/MS
+cosmopolitan/SM
+cosmos/SM
+cosponsor/DSG
+cossack/S
+Cossack/SM
+cosset/GDS
+Costa/M
+Costanza/M
+costarred
+costarring
+costar/S
+Costello/M
+costiveness/M
+costive/PY
+costless
+costliness/SM
+costly/RTP
+cost/MYGVJS
+Costner/M
+costumer/M
+costume/ZMGSRD
+cotangent/SM
+Cote/M
+cote/MS
+coterie/MS
+coterminous/Y
+cotillion/SM
+Cotonou/M
+Cotopaxi/M
+cot/SGMD
+cottager/M
+cottage/ZMGSRD
+cottar's
+cotted
+cotter/SDM
+cotton/GSDM
+Cotton/M
+cottonmouth/M
+cottonmouths
+cottonseed/MS
+cottontail/SM
+cottonwood/SM
+cottony
+cotyledon/MS
+couching/M
+couch/MSDG
+cougar/MS
+cougher/M
+cough/RDG
+coughs
+couldn't
+could/T
+could've
+coule/MS
+Coulomb/M
+coulomb/SM
+councilman/M
+councilmen
+councilor/MS
+councilperson/S
+council/SM
+councilwoman/M
+councilwomen
+counsel/GSDM
+counsellings
+counselor/MS
+countability/E
+countable/U
+countably/U
+countdown/SM
+counted/U
+count/EGARDS
+countenance/EGDS
+countenancer/M
+countenance's
+counteract/DSVG
+counteraction/SM
+counterargument/SM
+counterattack/DRMGS
+counterbalance/MSDG
+counterclaim/GSDM
+counterclockwise
+counterculture/MS
+countercyclical
+counterespionage/MS
+counterexample/S
+counterfeiter/M
+counterfeit/ZSGRD
+counterflow
+counterfoil/MS
+counterforce/M
+counter/GSMD
+counterinsurgency/MS
+counterintelligence/MS
+counterintuitive
+countermand/DSG
+counterman/M
+countermeasure/SM
+countermen
+counteroffensive/SM
+counteroffer/SM
+counterpane/SM
+counterpart/SM
+counterpoint/GSDM
+counterpoise/GMSD
+counterproductive
+counterproposal/M
+counterrevolutionary/MS
+counterrevolution/MS
+counter's/E
+counters/E
+countersignature/MS
+countersign/SDG
+countersink/SG
+counterspy/MS
+counterstrike
+countersunk
+countertenor/SM
+countervail/DSG
+counterweight/GMDS
+countess/MS
+countless/Y
+countrify/D
+countryman/M
+countrymen
+country/MS
+countryside/MS
+countrywide
+countrywoman/M
+countrywomen
+county/SM
+coup/ASDG
+coupe/MS
+Couperin/M
+couple/ACU
+coupled/CU
+coupler/C
+couplers
+coupler's
+couple's
+couples/CU
+couplet/SM
+coupling's/C
+coupling/SM
+coupon/SM
+coup's
+courage/MS
+courageously
+courageousness/MS
+courageous/U
+courages/E
+Courbet/M
+courgette/MS
+courier/GMDS
+course/EGSRDM
+courser's/E
+courser/SM
+course's/AF
+courses/FA
+coursework
+coursing/M
+Courtenay/M
+courteousness/EM
+courteousnesses
+courteous/PEY
+courtesan/MS
+courtesied
+courtesy/ESM
+courtesying
+court/GZMYRDS
+courthouse/MS
+courtier/SM
+courtliness/MS
+courtly/RTP
+Court/M
+Courtnay/M
+Courtney/M
+courtroom/MS
+courtship/SM
+courtyard/SM
+couscous/MS
+cousinly/U
+cousin/YMS
+Cousteau/M
+couture/SM
+couturier/SM
+covalent/Y
+covariance/SM
+covariant/S
+covariate/SN
+covary
+cove/DRSMZG
+covenanted/U
+covenanter/M
+covenant/SGRDM
+coven/SM
+Covent/M
+Coventry/MS
+coverable/E
+cover/AEGUDS
+coverage/MS
+coverall/DMS
+coverer/AME
+covering/MS
+coverlet/MS
+coversheet
+covers/M
+covertness/SM
+covert/YPS
+coveter/M
+coveting/Y
+covetousness/SM
+covetous/PY
+covet/SGRD
+covey/SM
+covington
+cowardice/MS
+cowardliness/MS
+cowardly/P
+Coward/M
+coward/MYS
+cowbell/MS
+cowbird/MS
+cowboy/MS
+cowcatcher/SM
+cowed/Y
+cowering/Y
+cower/RDGZ
+cowgirl/MS
+cowhand/S
+cowherd/SM
+cowhide/MGSD
+Cowley/M
+cowlick/MS
+cowling/M
+cowl/SGMD
+cowman/M
+cow/MDRSZG
+cowmen
+coworker/MS
+Cowper/M
+cowpoke/MS
+cowpony
+cowpox/MS
+cowpuncher/M
+cowpunch/RZ
+cowrie/SM
+cowshed/SM
+cowslip/MS
+coxcomb/MS
+Cox/M
+cox/MDSG
+coxswain/GSMD
+coy/CDSG
+coyer
+coyest
+coyly
+Coy/M
+coyness/MS
+coyote/SM
+coypu/SM
+cozenage/MS
+cozen/SGD
+cozily
+coziness/MS
+Cozmo/M
+Cozumel/M
+cozy/DSRTPG
+CPA
+cpd
+CPI
+cpl
+Cpl
+CPO
+CPR
+cps
+CPU/SM
+crabapple
+crabbedness/M
+crabbed/YP
+Crabbe/M
+crabber/MS
+crabbily
+crabbiness/S
+crabbing/M
+crabby/PRT
+crabgrass/S
+crablike
+crab/MS
+crackable/U
+crackdown/MS
+crackerjack/S
+cracker/M
+crackle/GJDS
+crackling/M
+crackly/RT
+crackpot/SM
+crackup/S
+crack/ZSBYRDG
+cradler/M
+cradle/SRDGM
+cradling/M
+craftily
+craftiness/SM
+Craft/M
+craft/MRDSG
+craftsman/M
+craftsmanship/SM
+craftsmen
+craftspeople
+craftspersons
+craftswoman
+craftswomen
+crafty/TRP
+Craggie/M
+cragginess/SM
+Craggy/M
+craggy/RTP
+crag/SM
+Craig/M
+Cramer/M
+crammed
+crammer/M
+cramming
+cramper/M
+cramp/MRDGS
+crampon/SM
+cram/S
+Cranach/M
+cranberry/SM
+Crandall/M
+crane/DSGM
+cranelike
+Crane/M
+Cranford/M
+cranial
+cranium/MS
+crankcase/MS
+crankily
+crankiness/MS
+crank/SGTRDM
+crankshaft/MS
+cranky/TRP
+Cranmer/M
+cranny/DSGM
+Cranston/M
+crape/SM
+crapped
+crappie/M
+crapping
+crappy/RST
+crapshooter/SM
+crap/SMDG!
+crasher/M
+crashing/Y
+crash/SRDGZ
+crassness/MS
+crass/TYRP
+crate/DSRGMZ
+crater/DMG
+Crater/M
+cravat/SM
+cravatted
+cravatting
+crave/DSRGJ
+cravenness/SM
+craven/SPYDG
+craver/M
+craving/M
+crawdad/S
+crawfish's
+Crawford/M
+crawler/M
+crawl/RDSGZ
+crawlspace/S
+crawlway
+crawly/TRS
+craw/SYM
+crayfish/GSDM
+Crayola/M
+crayon/GSDM
+Cray/SM
+craze/GMDS
+crazily
+craziness/MS
+crazy/SRTP
+creakily
+creakiness/SM
+creak/SDG
+creaky/PTR
+creamer/M
+creamery/MS
+creamily
+creaminess/SM
+cream/SMRDGZ
+creamy/TRP
+creased/CU
+crease/IDRSG
+crease's
+creases/C
+creasing/C
+created/U
+create/XKVNGADS
+creationism/MS
+creationist/MS
+Creation/M
+creation/MAK
+creativeness/SM
+creative/YP
+creativities
+creativity/K
+creativity's
+Creator/M
+creator/MS
+creatureliness/M
+creaturely/P
+creature/YMS
+crche/SM
+credence/MS
+credent
+credential/SGMD
+credenza/SM
+credibility/IMS
+credible/I
+credibly/I
+creditability/M
+creditableness/M
+creditable/P
+creditably/E
+credited/U
+credit/EGBSD
+creditor/MS
+credit's
+creditworthiness
+credo/SM
+credulity/ISM
+credulous/IY
+credulousness/SM
+creedal
+creed/C
+creeds
+creed's
+creekside
+creek/SM
+Creek/SM
+creel/SMDG
+Cree/MDS
+creeper/M
+creepily
+creepiness/SM
+creep/SGZR
+creepy/PRST
+Creigh/M
+Creight/M
+Creighton/M
+cremate/XDSNG
+cremation/M
+crematoria
+crematorium/MS
+crematory/S
+creme/S
+crenelate/XGNSD
+crenelation/M
+Creole/MS
+creole/SM
+Creon/M
+creosote/MGDS
+crepe/DSGM
+crept
+crescendoed
+crescendoing
+crescendo/SCM
+crescent/MS
+cress/S
+crestfallenness/M
+crestfallen/PY
+cresting/M
+crestless
+crest/SGMD
+Crestview/M
+cretaceous
+Cretaceously/M
+Cretaceous/Y
+Cretan/S
+Crete/M
+cretinism/MS
+cretin/MS
+cretinous
+cretonne/SM
+crevasse/DSMG
+crevice/SM
+crew/DMGS
+crewel/SM
+crewelwork/SM
+crewman/M
+crewmen
+cribbage/SM
+cribbed
+cribber/SM
+cribbing/M
+crib/SM
+Crichton/M
+cricketer/M
+cricket/SMZRDG
+crick/GDSM
+Crick/M
+cried/C
+crier/CM
+cries/C
+Crimea/M
+Crimean
+crime/GMDS
+criminality/MS
+criminalization/C
+criminalize/GC
+criminal/SYM
+criminologist/SM
+criminology/MS
+crimper/M
+crimp/RDGS
+crimson/DMSG
+cringer/M
+cringe/SRDG
+crinkle/DSG
+crinkly/TRS
+Crin/M
+crinoline/SM
+cripple/GMZDRS
+crippler/M
+crippling/Y
+Crisco/M
+crises
+crisis/M
+Cris/M
+crisper/M
+crispiness/SM
+crispness/MS
+crisp/PGTYRDS
+crispy/RPT
+criss
+crisscross/GDS
+Crissie/M
+Crissy/M
+Cristabel/M
+Cristal/M
+Crista/M
+Cristen/M
+Cristian/M
+Cristiano/M
+Cristie/M
+Cristi/M
+Cristina/M
+Cristine/M
+Cristin/M
+Cristionna/M
+Cristobal/M
+Cristy/M
+criteria
+criterion/M
+criticality
+critically/U
+criticalness/M
+critical/YP
+criticism/MS
+criticized/U
+criticize/GSRDZ
+criticizer/M
+criticizes/A
+criticizingly/S
+criticizing/UY
+critic/MS
+critique/MGSD
+critter/SM
+Cr/M
+croaker/M
+croak/SRDGZ
+croaky/RT
+Croatia/M
+Croatian/S
+Croat/SM
+Croce/M
+crocheter/M
+crochet/RDSZJG
+crockery/SM
+Crockett/M
+Crockpot/M
+crock/SGRDM
+crocodile/MS
+crocus/SM
+Croesus/SM
+crofter/M
+croft/MRGZS
+croissant/MS
+Croix/M
+Cromwellian
+Cromwell/M
+crone/SM
+Cronin/M
+Cronkite/M
+Cronus/M
+crony/SM
+crookedness/SM
+crooked/TPRY
+Crookes/M
+crookneck/MS
+crook/SGDM
+crooner/M
+croon/SRDGZ
+cropland/MS
+crop/MS
+cropped
+cropper/SM
+cropping
+croquet/MDSG
+croquette/SM
+Crosby/M
+crosier/SM
+crossarm
+crossbarred
+crossbarring
+crossbar/SM
+crossbeam/MS
+crossbones
+crossbowman/M
+crossbowmen
+crossbow/SM
+crossbred/S
+crossbreed/SG
+crosscheck/SGD
+crosscurrent/SM
+crosscut/SM
+crosscutting
+crossed/UA
+crosses/UA
+crossfire/SM
+crosshatch/GDS
+crossing/M
+Cross/M
+crossness/MS
+crossover/MS
+crosspatch/MS
+crosspiece/SM
+crosspoint
+crossproduct/S
+crossroad/GSM
+crossroads/M
+crosstalk/M
+crosstown
+crosswalk/MS
+crossway/M
+crosswind/SM
+crosswise
+crossword/MS
+cross/ZTYSRDMPBJG
+crotchetiness/M
+crotchet/MS
+crotchety/P
+crotchless
+crotch/MDS
+crouch/DSG
+croupier/M
+croup/SMDG
+croupy/TZR
+croton/MS
+crowbait
+crowbarred
+crowbarring
+crowbar/SM
+crowdedness/M
+crowded/P
+crowd/MRDSG
+crowfeet
+crowfoot/M
+crow/GDMS
+Crowley/M
+crowned/U
+crowner/M
+crown/RDMSJG
+crozier's
+CRT/S
+crucial/Y
+crucible/MS
+crucifiable
+crucifixion/MS
+Crucifixion/MS
+crucifix/SM
+cruciform/S
+crucify/NGDS
+crudded
+crudding
+cruddy/TR
+crudeness/MS
+crude/YSP
+crudits
+crudity/MS
+crud/STMR
+cruelness/MS
+cruelty/SM
+cruel/YRTSP
+cruet/MS
+cruft
+crufty
+Cruikshank/M
+cruise/GZSRD
+cruiser/M
+cruller/SM
+crumb/GSYDM
+crumble/DSJG
+crumbliness/MS
+crumbly/PTRS
+crumby/RT
+crumminess/S
+crummy/SRTP
+crump
+crumpet/SM
+crumple/DSG
+crunch/DSRGZ
+crunchiness/MS
+crunchy/TRP
+crupper/MS
+crusade/GDSRMZ
+crusader/M
+cruse/MS
+crushable/U
+crusher/M
+crushing/Y
+crushproof
+crush/SRDBGZ
+Crusoe/M
+crustacean/MS
+crustal
+crust/GMDS
+crustily
+crustiness/SM
+crusty/SRTP
+crutch/MDSG
+Crux/M
+crux/MS
+Cruz/M
+crybaby/MS
+cry/JGDRSZ
+cryogenic/S
+cryogenics/M
+cryostat/M
+cryosurgery/SM
+cryptanalysis/M
+cryptanalyst/M
+cryptanalytic
+crypt/CS
+cryptic
+cryptically
+cryptogram/MS
+cryptographer/MS
+cryptographic
+cryptographically
+cryptography/MS
+cryptologic
+cryptological
+cryptologist/M
+cryptology/M
+Cryptozoic/M
+crypt's
+crystalline/S
+crystallite/SM
+crystallization/AMS
+crystallized/UA
+crystallizes/A
+crystallize/SRDZG
+crystallizing/A
+crystallographer/MS
+crystallographic
+crystallography/M
+Crystal/M
+crystal/SM
+Crysta/M
+Crystie/M
+Cs
+C's
+cs/EA
+cs's
+CST
+ct
+CT
+Cthrine/M
+Ct/M
+ctn
+ctr
+Cuba/M
+Cuban/S
+cubbed
+cubbing
+cubbyhole/MS
+cuber/M
+cube/SM
+cubical/Y
+cubicle/SM
+cubic/YS
+cubism/SM
+cubist/MS
+cubit/MS
+cub/MDRSZG
+cuboid
+Cuchulain/M
+cuckold/GSDM
+cuckoldry/MS
+cuckoo/SGDM
+cucumber/MS
+cuddle/GSD
+cuddly/TRP
+cu/DG
+cudgel/GSJMD
+cud/MS
+cue/MS
+cuff/GSDM
+Cuisinart/M
+cuisine/MS
+Culbertson/M
+culinary
+Cullan/M
+cull/DRGS
+cullender's
+Cullen/M
+culler/M
+Culley/M
+Cullie/M
+Cullin/M
+Cull/MN
+Cully/M
+culminate/XSDGN
+culmination/M
+culotte/S
+culpability/MS
+culpable/I
+culpableness/M
+culpably
+culpa/SM
+culprit/SM
+cultism/SM
+cultist/SM
+cultivable
+cultivated/U
+cultivate/XBSDGN
+cultivation/M
+cultivator/SM
+cult/MS
+cultural/Y
+cultured/U
+culture/SDGM
+Culver/MS
+culvert/SM
+Cu/M
+cumber/DSG
+Cumberland/M
+cumbersomeness/MS
+cumbersome/YP
+cumbrous
+cumin/MS
+cummerbund/MS
+Cummings
+cumquat's
+cum/S
+cumulate/XVNGSD
+cumulation/M
+cumulative/Y
+cumuli
+cumulonimbi
+cumulonimbus/M
+cumulus/M
+Cunard/M
+cuneiform/S
+cunnilingus/SM
+Cunningham/M
+cunningness/M
+cunning/RYSPT
+cunt/SM!
+cupboard/SM
+cupcake/SM
+Cupertino/M
+cupful/SM
+cupidinously
+cupidity/MS
+Cupid/M
+cupid/S
+cup/MS
+cupola/MDGS
+cupped
+cupping/M
+cupric
+cuprous
+curability/MS
+curable/IP
+curableness/MI
+curably/I
+Curacao/M
+curacy/SM
+curare/MS
+curate/VGMSD
+curative/YS
+curatorial
+curator/KMS
+curbing/M
+curbside
+curb/SJDMG
+curbstone/MS
+Curcio/M
+curdle/SDG
+curd/SMDG
+cured/U
+cure/KBDRSGZ
+curer/MK
+curettage/SM
+curfew/SM
+curfs
+curiae
+curia/M
+cur/IBS
+Curie/M
+curie/SM
+curiosity/SM
+curio/SM
+curiousness/SM
+curious/TPRY
+Curitiba/M
+curium/MS
+curler/SM
+curlew/MS
+curlicue/MGDS
+curliness/SM
+curling/M
+curl/UDSG
+curlycue's
+curly/PRT
+curmudgeon/MYS
+Curran/M
+currant/SM
+curred/AFI
+currency's
+currency/SF
+current/FSY
+currently/A
+currentness/M
+Currey/M
+curricle/M
+curricula
+curricular
+curriculum/M
+Currie/M
+currier/M
+Currier/M
+curring/FAI
+Curr/M
+currycomb/DMGS
+Curry/MR
+curry/RSDMG
+cur's
+curs/ASDVG
+curse/A
+cursedness/M
+cursed/YRPT
+curse's
+cursive/EPYA
+cursiveness/EM
+cursives
+cursor/DMSG
+cursorily
+cursoriness/SM
+cursory/P
+curtailer/M
+curtail/LSGDR
+curtailment/SM
+curtain/GSMD
+Curtice/M
+Curtis/M
+Curt/M
+curtness/MS
+curtsey's
+curtsy/SDMG
+curt/TYRP
+curvaceousness/S
+curvaceous/YP
+curvature/MS
+curved/A
+curved's
+curve/DSGM
+curvilinearity/M
+curvilinear/Y
+curving/M
+curvy/RT
+cushion/SMDG
+Cushman/M
+cushy/TR
+cuspid/MS
+cuspidor/MS
+cusp/MS
+cussedness/M
+cussed/YP
+cuss/EGDSR
+cusses/F
+cussing/F
+cuss's
+custard/MS
+Custer/M
+custodial
+custodianship/MS
+custodian/SM
+custody/MS
+customarily
+customariness/M
+customary/PS
+customer/M
+customhouse/S
+customization/SM
+customize/ZGBSRD
+custom/SMRZ
+cutaneous/Y
+cutaway/SM
+cutback/SM
+cuteness/MS
+cute/SPY
+cutesy/RT
+cuticle/SM
+cutlass/MS
+cutler/SM
+cutlery/MS
+cutlet/SM
+cut/MRST
+cutoff/MS
+cutout/SM
+cutter/SM
+cutthroat/SM
+cutting/MYS
+cuttlebone/SM
+cuttlefish/MS
+cuttle/M
+cutup/MS
+cutworm/MS
+Cuvier/M
+Cuzco/M
+CV
+cw
+cwt
+Cyanamid/M
+cyanate/M
+cyanic
+cyanide/GMSD
+cyan/MS
+cyanogen/M
+Cybele/M
+cybernetic/S
+cybernetics/M
+cyberpunk/S
+cyberspace/S
+Cybill/M
+Cybil/M
+Cyb/M
+cyborg/S
+Cyclades
+cyclamen/MS
+cycle/ASDG
+cycler
+cycle's
+cycleway/S
+cyclic
+cyclical/SY
+cycling/M
+cyclist/MS
+cyclohexanol
+cycloidal
+cycloid/SM
+cyclometer/MS
+cyclone/SM
+cyclonic
+cyclopean
+cyclopedia/MS
+cyclopes
+Cyclopes
+cyclops
+Cyclops/M
+cyclotron/MS
+cyder/SM
+cygnet/MS
+Cygnus/M
+cylinder/GMDS
+cylindric
+cylindrical/Y
+Cy/M
+cymbalist/MS
+cymbal/SM
+Cymbre/M
+Cynde/M
+Cyndia/M
+Cyndie/M
+Cyndi/M
+Cyndy/M
+cynical/UY
+cynicism/MS
+cynic/MS
+cynosure/SM
+Cynthea/M
+Cynthia/M
+Cynthie/M
+Cynthy/M
+cypher/MGSD
+cypreses
+cypress/SM
+Cyprian
+Cypriot/SM
+Cyprus/M
+Cyrano/M
+Cyrille/M
+Cyrillic
+Cyrill/M
+Cyrillus/M
+Cyril/M
+Cyrus/M
+cystic
+cyst/MS
+cytochemistry/M
+cytochrome/M
+cytologist/MS
+cytology/MS
+cytolysis/M
+cytoplasmic
+cytoplasm/SM
+cytosine/MS
+cytotoxic
+CZ
+czarevitch/M
+czarina/SM
+czarism/M
+czarist/S
+czarship
+czar/SM
+Czech
+Czechoslovakia/M
+Czechoslovakian/S
+Czechoslovak/S
+Czechs
+Czerniak/M
+Czerny/M
+D
+DA
+dabbed
+dabber/MS
+dabbing
+dabbler/M
+dabble/RSDZG
+dab/S
+Dacca's
+dace/MS
+Dacey/M
+dacha/SM
+Dachau/M
+dachshund/SM
+Dacia/M
+Dacie/M
+Dacron/MS
+dactylic/S
+dactyl/MS
+Dacy/M
+Dadaism/M
+dadaism/S
+Dadaist/M
+dadaist/S
+Dada/M
+daddy/SM
+Dade/M
+dado/DMG
+dadoes
+dad/SM
+Daedalus/M
+Dael/M
+daemonic
+daemon/SM
+Daffie/M
+Daffi/M
+daffiness/S
+daffodil/MS
+Daffy/M
+daffy/PTR
+daftness/MS
+daft/TYRP
+DAG
+dagger/DMSG
+Dag/M
+Dagmar/M
+Dagny/M
+Daguerre/M
+daguerreotype/MGDS
+Dagwood/M
+Dahlia/M
+dahlia/MS
+Dahl/M
+Dahomey/M
+Daile/M
+dailiness/MS
+daily/PS
+Daimler/M
+daintily
+daintiness/MS
+dainty/TPRS
+daiquiri/SM
+dairying/M
+dairyland
+dairymaid/SM
+dairyman/M
+dairymen
+dairy/MJGS
+dairywoman/M
+dairywomen
+Daisey/M
+Daisie/M
+Daisi/M
+dais/SM
+Daisy/M
+daisy/SM
+Dakar/M
+Dakotan
+Dakota/SM
+Dale/M
+Dalenna/M
+dale/SMH
+daleth/M
+Daley/M
+Dalhousie/M
+Dalia/M
+Dalian/M
+Dalila/M
+Dali/SM
+Dallas/M
+dalliance/SM
+dallier/M
+Dalli/MS
+Dall/M
+Dallon/M
+dally/ZRSDG
+Dal/M
+Dalmatia/M
+dalmatian/S
+Dalmatian/SM
+Daloris/M
+Dalston/M
+Dalt/M
+Dalton/M
+Daly/M
+damageable
+damaged/U
+damage/MZGRSD
+damager/M
+damaging/Y
+Damara/M
+Damaris/M
+Damascus/M
+damask/DMGS
+dame/SM
+Dame/SMN
+Damian/M
+Damiano/M
+Damien/M
+Damion/M
+Damita/M
+dam/MDS
+dammed
+damming
+dammit/S
+damnably
+damnation/MS
+damnedest/MS
+damned/TR
+damn/GSBRD
+damning/Y
+Damocles/M
+Damon/M
+damped/U
+dampener/M
+dampen/RDZG
+damper/M
+dampness/MS
+damp/SGZTXYRDNP
+damselfly/MS
+damsel/MS
+damson/MS
+Dana
+Dana/M
+Danbury/M
+dancelike
+dancer/M
+dance/SRDJGZ
+dandelion/MS
+dander/DMGS
+dandify/SDG
+dandily
+dandle/GSD
+dandruff/MS
+dandy/TRSM
+Danelaw/M
+Danella/M
+Danell/M
+Dane/SM
+Danette/M
+danger/DMG
+Dangerfield/M
+dangerousness/M
+dangerous/YP
+dangler/M
+dangle/ZGRSD
+dangling/Y
+dang/SGZRD
+Danial/M
+Dania/M
+Danica/M
+Danice/M
+Daniela/M
+Daniele/M
+Daniella/M
+Danielle/M
+Daniel/SM
+Danielson/M
+Danie/M
+Danika/M
+Danila/M
+Dani/M
+Danish
+danish/S
+Danita/M
+Danit/M
+dankness/MS
+dank/TPYR
+Danna/M
+Dannel/M
+Dannie/M
+Danni/M
+Dannye/M
+Danny/M
+danseuse/SM
+Dan/SM
+Dante/M
+Danton/M
+Danube/M
+Danubian
+Danville/M
+Danya/M
+Danyelle/M
+Danyette/M
+Danzig/M
+Daphene/M
+Daphna/M
+Daphne/M
+dapperness/M
+dapper/PSTRY
+dapple/SDG
+Dara/M
+Darbee/M
+Darbie/M
+Darb/M
+Darby/M
+Darcee/M
+Darcey/M
+Darcie/M
+Darci/M
+D'Arcy
+Darcy/M
+Darda/M
+Dardanelles
+daredevil/MS
+daredevilry/S
+Dareen/M
+Darelle/M
+Darell/M
+Dare/M
+Daren/M
+darer/M
+daresay
+dare/ZGDRSJ
+d'Arezzo
+Daria/M
+Darice/M
+Darill/M
+Dari/M
+daringness/M
+daring/PY
+Darin/M
+Dario/M
+Darius/M
+Darjeeling/M
+darkener/M
+darken/RDZG
+dark/GTXYRDNSP
+darkish
+darkly/TR
+darkness/MS
+darkroom/SM
+Darla/M
+Darleen/M
+Darlene/M
+Darline/M
+Darling/M
+darlingness/M
+Darlington/M
+darling/YMSP
+Darlleen/M
+Dar/MNH
+Darnall/M
+darned/TR
+Darnell/M
+darner/M
+darn/GRDZS
+darning/M
+Darn/M
+Daron/M
+DARPA/M
+Darrelle/M
+Darrell/M
+Darrel/M
+Darren/M
+Darrick/M
+Darrin/M
+Darrow/M
+Darryl/M
+Darsey/M
+Darsie/M
+d'art
+dartboard/SM
+darter/M
+Darth/M
+Dartmouth/M
+dart/MRDGZS
+Darvon/M
+Darwinian/S
+Darwinism/MS
+Darwinist/MS
+Darwin/M
+Darya/M
+Daryle/M
+Daryl/M
+Daryn/M
+Dasha/M
+dashboard/SM
+dasher/M
+dash/GZSRD
+dashiki/SM
+dashing/Y
+Dasie/M
+Dasi/M
+dastardliness/SM
+dastardly/P
+dastard/MYS
+Dasya/M
+DAT
+database/DSMG
+datafile
+datagram/MS
+data/M
+Datamation/M
+Datamedia/M
+dataset/S
+datedly
+datedness
+date/DRSMZGV
+dated/U
+dateless
+dateline/DSMG
+dater/M
+Datha/M
+dative/S
+Datsun/M
+datum/MS
+dauber/M
+daub/RDSGZ
+Daugherty/M
+daughter/MYS
+Daumier/M
+Daune/M
+daunt/DSG
+daunted/U
+daunting/Y
+dauntlessness/SM
+dauntless/PY
+dauphin/SM
+Davao/M
+Daveen/M
+Dave/M
+Daven/M
+Davenport/M
+davenport/MS
+Daveta/M
+Davey/M
+Davida/M
+Davidde/M
+Davide/M
+David/SM
+Davidson/M
+Davie/M
+Davina/M
+Davine/M
+Davinich/M
+Davin/M
+Davis/M
+Davita/M
+davit/SM
+Dav/MN
+Davon/M
+Davy/SM
+dawdler/M
+dawdle/ZGRSD
+Dawes/M
+Dawna/M
+dawn/GSDM
+Dawn/M
+Dawson/M
+daybed/S
+daybreak/SM
+daycare/S
+daydreamer/M
+daydream/RDMSZG
+Dayle/M
+daylight/GSDM
+Day/M
+Dayna/M
+daysack
+day/SM
+daytime/SM
+Dayton/M
+dazed/PY
+daze/DSG
+dazzler/M
+dazzle/ZGJRSD
+dazzling/Y
+db
+DB
+dbl
+dB/M
+DBMS
+DC
+DD
+Ddene/M
+DDS
+DDT
+DE
+deacon/DSMG
+deaconess/MS
+deadbeat/SM
+deadbolt/S
+deadener/M
+deadening/MY
+deaden/RDG
+deadhead/MS
+deadline/MGDS
+deadliness/SM
+deadlock/MGDS
+deadly/RPT
+deadness/M
+deadpanned
+deadpanner
+deadpanning
+deadpan/S
+dead/PTXYRN
+deadwood/SM
+deafening/MY
+deafen/JGD
+deafness/MS
+deaf/TXPYRN
+dealer/M
+dealership/MS
+dealing/M
+deallocator
+deal/RSGZJ
+dealt
+Deana/M
+dean/DMG
+Deandre/M
+Deane/M
+deanery/MS
+Dean/M
+Deanna/M
+Deanne/M
+Deann/M
+deanship/SM
+Dearborn/M
+dearness/MS
+dearth/M
+dearths
+dear/TYRHPS
+deary/MS
+deassign
+deathbed/MS
+deathblow/SM
+deathless/Y
+deathlike
+deathly/TR
+death/MY
+deaths
+deathtrap/SM
+deathward
+deathwatch/MS
+debacle/SM
+debarkation/SM
+debark/G
+debar/L
+debarment/SM
+debarring
+debaser/M
+debatable/U
+debate/BMZ
+debater/M
+debauchedness/M
+debauched/PY
+debauchee/SM
+debaucher/M
+debauchery/SM
+debauch/GDRS
+Debbie/M
+Debbi/M
+Debby/M
+Debee/M
+debenture/MS
+Debera/M
+debilitate/NGXSD
+debilitation/M
+debility/MS
+Debi/M
+debit/DG
+deb/MS
+Deb/MS
+debonairness/SM
+debonair/PY
+Deborah/M
+Debora/M
+Debor/M
+debouch/DSG
+Debra/M
+debrief/GJ
+debris/M
+debtor/SM
+debt/SM
+Debussy/M
+dbutante/SM
+debut/MDG
+decade/MS
+decadency/S
+decadent/YS
+decaffeinate/DSG
+decaf/S
+decagon/MS
+Decalogue/M
+decal/SM
+decamp/L
+decampment/MS
+decapitate/GSD
+decapitator/SM
+decathlon/SM
+Decatur/M
+decay/GRD
+Decca/M
+Deccan/M
+decease/M
+decedent/MS
+deceitfulness/SM
+deceitful/PY
+deceit/SM
+deceived/U
+deceiver/M
+deceives/U
+deceive/ZGRSD
+deceivingly
+deceiving/U
+decelerate/XNGSD
+deceleration/M
+decelerator/SM
+December/SM
+decency/ISM
+decennial/SY
+decent/TIYR
+deception/SM
+deceptiveness/SM
+deceptive/YP
+decertify/N
+dechlorinate/N
+decibel/MS
+decidability/U
+decidable/U
+decidedness/M
+decided/PY
+decide/GRSDB
+deciduousness/M
+deciduous/YP
+decile/SM
+deciliter/SM
+decimal/SYM
+decimate/XNGDS
+decimation/M
+decimeter/MS
+decipherable/IU
+decipher/BRZG
+decipherer/M
+decisional
+decisioned
+decisioning
+decision/ISM
+decisive/IPY
+decisiveness/MSI
+deckchair
+decker/M
+Decker/M
+deck/GRDMSJ
+deckhand/S
+decking/M
+Deck/RM
+declamation/SM
+declamatory
+declarable
+declaration/MS
+declaration's/A
+declarative/SY
+declarator/MS
+declaratory
+declare/AGSD
+declared/U
+declarer/MS
+declension/SM
+declination/MS
+decliner/M
+decline/ZGRSD
+declivity/SM
+Dec/M
+DEC/M
+DECNET
+DECnet/M
+deco
+dcolletage/S
+dcollet
+decolletes
+decolorising
+decomposability/M
+decomposable/IU
+decompose/B
+decompress/R
+decongestant/S
+deconstruction
+deconvolution
+decorated/AU
+decorate/NGVDSX
+decorates/A
+decorating/A
+decoration/ASM
+decorativeness/M
+decorative/YP
+decorator/SM
+decorousness/MS
+decorousness's/I
+decorous/PIY
+decor/S
+decorticate/GNDS
+decortication/M
+decorum/MS
+decoupage/MGSD
+decouple/G
+decoy/M
+decrease
+decreasing/Y
+decreeing
+decree/RSM
+decremental
+decrement/DMGS
+decrepit
+decrepitude/SM
+decriminalization/S
+decriminalize/DS
+decry/G
+decrypt/GD
+decryption
+DECstation/M
+DECsystem/M
+DECtape/M
+decustomised
+Dedekind/M
+Dede/M
+dedicate/AGDS
+dedicated/Y
+dedication/MS
+dedicative
+dedicator/MS
+dedicatory
+Dedie/M
+Dedra/M
+deduce/RSDG
+deducible
+deductibility/M
+deductible/S
+deduction/SM
+deductive/Y
+deduct/VG
+Deeanne/M
+Deeann/M
+deeded
+Deedee/M
+deeding
+deed/IS
+deed's
+deejay/MDSG
+Dee/M
+deem/ADGS
+deemphasis
+Deena/M
+deepen/DG
+deepish
+deepness/MS
+deep/PTXSYRN
+Deerdre/M
+Deere/M
+deerskin/MS
+deer/SM
+deerstalker/SM
+deerstalking/M
+Deeyn/M
+deface/LZ
+defacement/SM
+defaecate
+defalcate/NGXSD
+defalcation/M
+defamation/SM
+defamatory
+defamer/M
+defame/ZR
+defaulter/M
+default/ZR
+defeated/U
+defeater/M
+defeatism/SM
+defeatist/SM
+defeat/ZGD
+defecate/DSNGX
+defecation/M
+defection/SM
+defectiveness/MS
+defective/PYS
+defect/MDSVG
+defector/MS
+defendant/SM
+defended/U
+defenestrate/GSD
+defenselessness/MS
+defenseless/PY
+defenses/U
+defense/VGSDM
+defensibility/M
+defensible/I
+defensibly/I
+defensiveness/MS
+defensive/PSY
+deference/MS
+deferential/Y
+deferent/S
+deferrable
+deferral/SM
+deferred
+deferrer/MS
+deferring
+deffer
+defiance/MS
+defiant/Y
+defibrillator/M
+deficiency/MS
+deficient/SY
+deficit/MS
+defier/M
+defile/L
+defilement/MS
+definable/UI
+definably/I
+define/AGDRS
+defined/U
+definer/SM
+definite/IPY
+definiteness/IMS
+definitional
+definition/ASM
+definitiveness/M
+definitive/SYP
+defis
+deflate/XNGRSDB
+deflationary
+deflation/M
+deflect/DSGV
+deflected/U
+deflection/MS
+deflector/MS
+defocus
+defocussing
+Defoe/M
+defog
+defogger/S
+defoliant/SM
+defoliator/SM
+deformational
+deform/B
+deformed/U
+deformity/SM
+defrauder/M
+defraud/ZGDR
+defrayal/SM
+defroster/M
+defrost/RZ
+deftness/MS
+deft/TYRP
+defunct/S
+defying/Y
+defy/RDG
+def/Z
+deg
+Degas/M
+degassing
+degauss/GD
+degeneracy/MS
+degenerateness/M
+degenerate/PY
+degrade/B
+degradedness/M
+degraded/YP
+degrading/Y
+degrease
+degree/SM
+degum
+Dehlia/M
+dehumanize
+dehydrator/MS
+deicer/M
+deice/ZR
+deictic
+Deidre/M
+deification/M
+deify/SDXGN
+deign/DGS
+Deimos/M
+Deina/M
+Deirdre/MS
+deistic
+deist/SM
+Deity/M
+deity/SM
+deja
+deject/DSG
+dejectedness/M
+dejected/PY
+dejection/SM
+Dejesus/M
+DeKalb/M
+DeKastere/M
+Delacroix/M
+Delacruz/M
+Delainey/M
+Dela/M
+Delaney/M
+Delano/M
+Delawarean/SM
+Delaware/MS
+delay/D
+delayer/G
+Delbert/M
+Delcina/M
+Delcine/M
+delectableness/M
+delectable/SP
+delectably
+delectation/MS
+delegable
+Deleon/M
+deleted/U
+deleteriousness/M
+deleterious/PY
+delete/XBRSDNG
+deletion/M
+delfs
+Delft/M
+delft/MS
+delftware/S
+Delgado/M
+Delhi/M
+Delia/M
+deliberateness/SM
+deliberate/PVY
+deliberativeness/M
+deliberative/PY
+Delibes/M
+delicacy/IMS
+delicate/IYP
+delicatenesses
+delicateness/IM
+delicates
+delicatessen/MS
+deliciousness/MS
+delicious/YSP
+delicti
+delightedness/M
+delighted/YP
+delightfulness/M
+delightful/YP
+Delilah/M
+Delilahs
+Delila/M
+Delinda/M
+delineate/SDXVNG
+delineation/M
+delinquency/MS
+delinquent/SYM
+deliquesce/GSD
+deliquescent
+deliriousness/MS
+delirious/PY
+delirium/SM
+deli/SM
+Delius/M
+deliverables
+deliverable/U
+deliver/AGSD
+deliverance/SM
+delivered/U
+deliverer/SM
+delivery/AM
+deliverymen/M
+Della/M
+Dell/M
+dell/SM
+Dellwood/M
+Delly/M
+Delmar/M
+Delmarva/M
+Delmer/M
+Delmonico
+Delmore/M
+Delmor/M
+Del/MY
+Delora/M
+Delores/M
+Deloria/M
+Deloris/M
+Delphic
+Delphi/M
+Delphine/M
+Delphinia/M
+delphinium/SM
+Delphinus/M
+Delta/M
+delta/MS
+deltoid/SM
+deluder/M
+delude/RSDG
+deluding/Y
+deluge/SDG
+delusional
+delusion/SM
+delusiveness/M
+delusive/PY
+deluxe
+delve/GZSRD
+delver/M
+demagnify/N
+demagogic
+demagogue/GSDM
+demagoguery/SM
+demagogy/MS
+demander/M
+demand/GSRD
+demandingly
+demanding/U
+demarcate/SDNGX
+demarcation/M
+Demavend/M
+demean/GDS
+demeanor/SM
+dementedness/M
+demented/YP
+dementia/MS
+Demerol/M
+demesne/SM
+Demeter/M
+Demetra/M
+Demetre/M
+Demetria/M
+Demetri/MS
+Demetrius/M
+demigod/MS
+demijohn/MS
+demimondaine/SM
+demimonde/SM
+demineralization/SM
+Deming/M
+demise/DMG
+demit
+demitasse/MS
+demitted
+demitting
+Dem/MG
+democracy/MS
+Democratic
+democratically/U
+democratic/U
+democratization/MS
+democratize/DRSG
+democratizes/U
+Democrat/MS
+democrat/SM
+Democritus/M
+dmod
+demo/DMPG
+demographer/MS
+demographical/Y
+demographic/S
+demography/MS
+demolisher/M
+demolish/GSRD
+demolition/MS
+demonetization/S
+demoniacal/Y
+demoniac/S
+demonic
+demonology/M
+demon/SM
+demonstrable/I
+demonstrableness/M
+demonstrably/I
+demonstrate/XDSNGV
+demonstration/M
+demonstrativenesses
+demonstrativeness/UM
+demonstratives
+demonstrative/YUP
+demonstrator/MS
+demoralization/M
+demoralizer/M
+demoralizing/Y
+DeMorgan/M
+Demosthenes/M
+demote/DGX
+demotic/S
+Demott/M
+demount/B
+Dempsey/M
+demulcent/S
+demultiplex
+demureness/SM
+demure/YP
+demurral/MS
+demurred
+demurrer/MS
+demurring
+demur/RTS
+demythologization/M
+demythologize/R
+den
+Dena/M
+dendrite/MS
+Deneb/M
+Denebola/M
+Deneen/M
+Dene/M
+Deng/M
+dengue/MS
+deniable/U
+denial/SM
+Denice/M
+denier/M
+denigrate/VNGXSD
+denigration/M
+denim/SM
+Denise/M
+Deni/SM
+denizen/SMDG
+Den/M
+De/NM
+Denmark/M
+Denna/M
+denned
+Dennet/M
+Denney/M
+Dennie/M
+Denni/MS
+denning
+Dennison/M
+Denny/M
+denominate/V
+denominational/Y
+denote/B
+denouement/MS
+denounce/LZRSDG
+denouncement/SM
+denouncer/M
+dense/FR
+densely
+denseness/SM
+densitometer/MS
+densitometric
+densitometry/M
+density/MS
+dens/RT
+dental/YS
+dentifrice/SM
+dentine's
+dentin/SM
+dent/ISGD
+dentistry/MS
+dentist/SM
+dentition/MS
+dent's
+denture/IMS
+denuclearize/GSD
+denudation/SM
+denude/DG
+denuder/M
+denunciate/VNGSDX
+denunciation/M
+Denver/M
+denying/Y
+Deny/M
+Denys
+Denyse/M
+deny/SRDZG
+deodorant/SM
+deodorization/SM
+deodorize/GZSRD
+deodorizer/M
+Deon/M
+Deonne/M
+deoxyribonucleic
+depart/L
+departmentalization/SM
+departmentalize/DSG
+departmental/Y
+department/MS
+departure/MS
+dependability/MS
+dependableness/M
+dependable/P
+dependably
+Dependant/MS
+depend/B
+dependence/ISM
+dependency/MS
+dependent/IYS
+dependent's
+depicted/U
+depicter/M
+depiction/SM
+depict/RDSG
+depilatory/S
+deplete/VGNSDX
+depletion/M
+deplorableness/M
+deplorable/P
+deplorably
+deplorer/M
+deplore/SRDBG
+deploring/Y
+deployable
+deploy/AGDLS
+deployment/SAM
+depolarize
+deponent/S
+deportation/MS
+deportee/SM
+deport/LG
+deportment/MS
+depose
+deposit/ADGS
+depositary/M
+deposition/A
+depositor/SAM
+depository/MS
+depravedness/M
+depraved/PY
+deprave/GSRD
+depraver/M
+depravity/SM
+deprecate/XSDNG
+deprecating/Y
+deprecation/M
+deprecatory
+depreciable
+depreciate/XDSNGV
+depreciating/Y
+depreciation/M
+depreciative/Y
+depressant/S
+depressible
+depression/MS
+depressive/YS
+depressor/MS
+depress/V
+deprive/GSD
+depth/M
+depths
+Dept/M
+deputation/SM
+depute/SDG
+deputize/DSG
+deputy/MS
+dequeue
+derail/L
+drailleur/MS
+derailment/MS
+derange/L
+derangement/MS
+Derbyshire/M
+derby/SM
+Derby/SM
+dereference/Z
+Derek/M
+dereliction/SM
+derelict/S
+Derick/M
+deride/D
+deriding/Y
+derision/SM
+derisiveness/MS
+derisive/PY
+derisory
+derivable/U
+derivate/XNV
+derivation/M
+derivativeness/M
+derivative/SPYM
+derive/B
+derived/U
+Derk/M
+Der/M
+dermal
+dermatitides
+dermatitis/MS
+dermatological
+dermatologist/MS
+dermatology/MS
+dermis/SM
+Dermot/M
+derogate/XDSNGV
+derogation/M
+derogatorily
+derogatory
+Derrek/M
+Derrick/M
+derrick/SMDG
+Derrida/M
+derrire/S
+Derrik/M
+Derril/M
+derringer/SM
+Derron/M
+Derry/M
+dervish/SM
+Derward/M
+Derwin/M
+Des
+desalinate/NGSDX
+desalination/M
+desalinization/MS
+desalinize/GSD
+desalt/G
+descant/M
+Descartes/M
+descendant/SM
+descended/FU
+descendent's
+descender/M
+descending/F
+descends/F
+descend/ZGSDR
+descent
+describable/I
+describe/ZB
+description/MS
+descriptiveness/MS
+descriptive/SYP
+descriptor/SM
+descry/SDG
+Desdemona/M
+desecrater/M
+desecrate/SRDGNX
+desecration/M
+deserter/M
+desertification
+desertion/MS
+desert/ZGMRDS
+deservedness/M
+deserved/YU
+deserve/J
+deserving/Y
+dshabill's
+desiccant/S
+desiccate/XNGSD
+desiccation/M
+desiccator/SM
+desiderata
+desideratum/M
+designable
+design/ADGS
+designate/VNGSDX
+designational
+designation/M
+designator/SM
+designed/Y
+designer/M
+designing/U
+Desi/M
+desirabilia
+desirability's
+desirability/US
+desirableness/SM
+desirableness's/U
+desirable/UPS
+desirably/U
+Desirae/M
+desire/BR
+desired/U
+Desiree/M
+desirer/M
+Desiri/M
+desirousness/M
+desirous/PY
+desist/DSG
+desk/SM
+desktop/S
+Desmond/M
+Desmund/M
+desolateness/SM
+desolate/PXDRSYNG
+desolater/M
+desolating/Y
+desolation/M
+desorption/M
+despairer/M
+despairing/Y
+despair/SGDR
+desperadoes
+desperado/M
+desperateness/SM
+desperate/YNXP
+desperation/M
+despicable
+despicably
+despiser/M
+despise/SRDG
+despoil/L
+despoilment/MS
+despond
+despondence/S
+despondency/MS
+despondent/Y
+despotic
+despotically
+despotism/SM
+dessert/SM
+dessicate/DN
+d'Estaing
+destinate/NX
+destination/M
+destine/GSD
+destiny/MS
+destituteness/M
+destitute/NXP
+destitution/M
+destroy/BZGDRS
+destroyer/M
+destructibility/SMI
+destructible/I
+destruction/SM
+destructiveness/MS
+destructive/YP
+destructor/M
+destruct/VGSD
+desuetude/MS
+desultorily
+desultoriness/M
+desultory/P
+detachedness/M
+detached/YP
+detacher/M
+detach/LSRDBG
+detachment/SM
+detailedness/M
+detailed/YP
+detainee/S
+detainer/M
+detain/LGRDS
+detainment/MS
+d'etat
+detectability/U
+detectable/U
+detectably/U
+detect/DBSVG
+detected/U
+detection/SM
+detective/MS
+detector/MS
+dtente
+detentes
+detention/SM
+detergency/M
+detergent/SM
+deteriorate/XDSNGV
+deterioration/M
+determent/SM
+determinability/M
+determinable/IP
+determinableness/IM
+determinacy/I
+determinant/MS
+determinateness/IM
+determinate/PYIN
+determination/IM
+determinativeness/M
+determinative/P
+determinedly
+determinedness/M
+determined/U
+determine/GASD
+determiner/SM
+determinism/MS
+determinism's/I
+deterministically
+deterministic/I
+deterred/U
+deterrence/SM
+deterrent/SMY
+deterring
+detersive/S
+deter/SL
+deters/V
+detestableness/M
+detestable/P
+detestably
+detestation/SM
+dethrone/L
+dethronement/SM
+detonable
+detonated/U
+detonate/XDSNGV
+detonation/M
+detonator/MS
+detour/G
+detoxification/M
+detoxify/NXGSD
+detox/SDG
+detract/GVD
+detractive/Y
+d'etre
+detribalize/GSD
+detrimental/SY
+detriment/SM
+detritus/M
+Detroit/M
+deuced/Y
+deuce/SDGM
+deus
+deuterium/MS
+deuteron/M
+Deuteronomy/M
+Deutsch/M
+Deva/M
+Devanagari/M
+Devan/M
+devastate/XVNGSD
+devastating/Y
+devastation/M
+devastator/SM
+develop/ALZSGDR
+developed/U
+developer/MA
+developmental/Y
+development/ASM
+deviance/MS
+deviancy/S
+deviant/YMS
+deviated/U
+deviate/XSDGN
+deviating/U
+deviation/M
+devilishness/MS
+devilish/PY
+devilment/SM
+devilry/MS
+devil/SLMDG
+deviltry/MS
+Devi/M
+Devina/M
+Devin/M
+Devinne/M
+deviousness/SM
+devious/YP
+devise/JR
+deviser/M
+Devland/M
+Devlen/M
+Devlin/M
+Dev/M
+devoice
+devolution/MS
+devolve/GSD
+Devondra/M
+Devonian
+Devon/M
+Devonna/M
+Devonne/M
+Devonshire/M
+Devora/M
+devoted/Y
+devotee/MS
+devote/XN
+devotional/YS
+devotion/M
+devourer/M
+devour/SRDZG
+devoutness/MS
+devout/PRYT
+Devy/M
+Dewain/M
+dewar
+Dewar/M
+Dewayne/M
+dewberry/MS
+dewclaw/SM
+dewdrop/MS
+Dewey/M
+Dewie/M
+dewiness/MS
+Dewitt/M
+dewlap/MS
+Dew/M
+dew/MDGS
+dewy/TPR
+Dexedrine/M
+dexes/I
+Dex/M
+dexter
+dexterity/MS
+Dexter/M
+dexterousness/MS
+dexterous/PY
+dextrose/SM
+DH
+Dhaka
+Dhaulagiri/M
+dhoti/SM
+dhow/MS
+DI
+diabase/M
+diabetes/M
+diabetic/S
+diabolic
+diabolicalness/M
+diabolical/YP
+diabolism/M
+diachronic/P
+diacritical/YS
+diacritic/MS
+diadem/GMDS
+diaereses
+diaeresis/M
+Diaghilev/M
+diagnometer/SM
+diagnosable/U
+diagnose/BGDS
+diagnosed/U
+diagnosis/M
+diagnostically
+diagnostician/SM
+diagnostic/MS
+diagnostics/M
+diagonalize/GDSB
+diagonal/YS
+diagrammable
+diagrammatic
+diagrammaticality
+diagrammatically
+diagrammed
+diagrammer/SM
+diagramming
+diagram/MS
+Diahann/M
+dialectal/Y
+dialectical/Y
+dialectic/MS
+dialect/MS
+dialed/A
+dialer/M
+dialing/M
+dial/MRDSGZJ
+dialogged
+dialogging
+dialog/MS
+dials/A
+dialysis/M
+dialyzed/U
+dialyzes
+diam
+diamagnetic
+diameter/MS
+diametric
+diametrical/Y
+diamondback/SM
+diamond/GSMD
+Diana/M
+Diandra/M
+Diane/M
+Dianemarie/M
+Dian/M
+Dianna/M
+Dianne/M
+Diann/M
+Diannne/M
+diapason/MS
+diaper/SGDM
+diaphanousness/M
+diaphanous/YP
+diaphragmatic
+diaphragm/SM
+diarist/SM
+Diarmid/M
+diarrheal
+diarrhea/MS
+diary/MS
+diaspora
+Diaspora/SM
+diastase/SM
+diastole/MS
+diastolic
+diathermy/SM
+diathesis/M
+diatomic
+diatom/SM
+diatonic
+diatribe/MS
+Diaz's
+dibble/SDMG
+dibs
+DiCaprio/M
+dice/GDRS
+dicer/M
+dicey
+dichloride/M
+dichotomization/M
+dichotomize/DSG
+dichotomous/PY
+dichotomy/SM
+dicier
+diciest
+dicing/M
+Dickensian/S
+dickens/M
+Dickens/M
+dicker/DG
+Dickerson/M
+dickey/SM
+dick/GZXRDMS!
+Dickie/M
+dickier
+dickiest
+Dickinson/M
+Dickson/M
+Dick/XM
+Dicky/M
+dicky's
+dicotyledonous
+dicotyledon/SM
+dicta/M
+Dictaphone/SM
+dictate/SDNGX
+dictation/M
+dictatorialness/M
+dictatorial/YP
+dictator/MS
+dictatorship/SM
+dictionary/SM
+diction/MS
+dictum/M
+didactically
+didactic/S
+didactics/M
+did/AU
+diddler/M
+diddle/ZGRSD
+Diderot/M
+Didi/M
+didn't
+didoes
+dido/M
+Dido/M
+didst
+die/DS
+Diefenbaker/M
+Diego/M
+dieing
+dielectric/MS
+diem
+Diem/M
+Diena/M
+Dierdre/M
+diereses
+dieresis/M
+diesel/GMDS
+Diesel's
+dies's
+dies/U
+dietary/S
+dieter/M
+Dieter/M
+dietetic/S
+dietetics/M
+diethylaminoethyl
+diethylstilbestrol/M
+dietitian/MS
+diet/RDGZSM
+Dietrich/M
+Dietz/M
+difference/DSGM
+difference's/I
+differences/I
+differentiability
+differentiable
+differential/SMY
+differentiated/U
+differentiate/XSDNG
+differentiation/M
+differentiator/SM
+differentness
+different/YI
+differ/SZGRD
+difficile
+difficult/Y
+difficulty/SM
+diffidence/MS
+diffident/Y
+diffract/GSD
+diffraction/SM
+diffractometer/SM
+diffuseness/MS
+diffuse/PRSDZYVXNG
+diffuser/M
+diffusible
+diffusional
+diffusion/M
+diffusiveness/M
+diffusive/YP
+diffusivity/M
+digerati
+digested/IU
+digester/M
+digestibility/MS
+digestible/I
+digestifs
+digestion/ISM
+digestive/YSP
+digest/RDVGS
+digger/MS
+digging/S
+digitalis/M
+digitalization/MS
+digitalized
+digitalizes
+digitalizing
+digital/SY
+digitization/M
+digitizer/M
+digitize/ZGDRS
+digit/SM
+dignified/U
+dignify/DSG
+dignitary/SM
+dignity/ISM
+digram
+digraph/M
+digraphs
+digress/GVDS
+digression/SM
+digressiveness/M
+digressive/PY
+dig/TS
+dihedral
+Dijkstra/M
+Dijon/M
+dike/DRSMG
+diker/M
+diktat/SM
+Dilan/M
+dilapidate/XGNSD
+dilapidation/M
+dilatation/SM
+dilated/YP
+dilate/XVNGSD
+dilation/M
+dilatoriness/M
+dilator/SM
+dilatory/P
+Dilbert/M
+dilemma/MS
+dilettante/MS
+dilettantish
+dilettantism/MS
+diligence/SM
+diligentness/M
+diligent/YP
+dilithium
+Dillard/M
+Dillie/M
+Dillinger/M
+dilling/R
+dillis
+Dill/M
+Dillon/M
+dill/SGMD
+dillydally/GSD
+Dilly/M
+dilly/SM
+dilogarithm
+diluent
+diluted/U
+diluteness/M
+dilute/RSDPXYVNG
+dilution/M
+Di/M
+DiMaggio/M
+dimensionality/M
+dimensional/Y
+dimensionless
+dimension/MDGS
+dimer/M
+dime/SM
+dimethylglyoxime
+dimethyl/M
+diminished/U
+diminish/SDGBJ
+diminuendo/SM
+diminution/SM
+diminutiveness/M
+diminutive/SYP
+Dimitri/M
+Dimitry/M
+dimity/MS
+dimmed/U
+dimmer/MS
+dimmest
+dimming
+dimness/SM
+dimorphism/M
+dimple/MGSD
+dimply/RT
+dim/RYPZS
+dimwit/MS
+dimwitted
+Dinah/M
+Dina/M
+dinar/SM
+diner/M
+dine/S
+dinette/MS
+dingbat/MS
+ding/GD
+dinghy/SM
+dingily
+dinginess/SM
+dingle/MS
+dingoes
+dingo/MS
+dingus/SM
+dingy/PRST
+dinky/RST
+din/MDRZGS
+dinned
+dinner/SM
+dinnertime/S
+dinnerware/MS
+Dinnie/M
+dinning
+Dinny/M
+Dino/M
+dinosaur/MS
+dint/SGMD
+diocesan/S
+diocese/SM
+Diocletian/M
+diode/SM
+Diogenes/M
+Dione/M
+Dionisio/M
+Dionis/M
+Dion/M
+Dionne/M
+Dionysian
+Dionysus/M
+Diophantine/M
+diopter/MS
+diorama/SM
+Dior/M
+dioxalate
+dioxide/MS
+dioxin/S
+diphtheria/SM
+diphthong/SM
+diplexers
+diploid/S
+diplomacy/SM
+diploma/SMDG
+diplomata
+diplomatically
+diplomatic/S
+diplomatics/M
+diplomatist/SM
+diplomat/MS
+dipodic
+dipody/M
+dipole/MS
+dipped
+Dipper/M
+dipper/SM
+dipping/S
+dippy/TR
+dip/S
+dipsomaniac/MS
+dipsomania/SM
+dipstick/MS
+dipterous
+diptych/M
+diptychs
+Dir
+Dirac/M
+directed/IUA
+directionality
+directional/SY
+direction/MIS
+directions/A
+directive/SM
+directivity/M
+directly/I
+directness/ISM
+director/AMS
+directorate/SM
+directorial
+directorship/SM
+directory/SM
+direct/RDYPTSVG
+directrix/MS
+directs/IA
+direful/Y
+direness/M
+dire/YTRP
+dirge/GSDM
+Dirichlet/M
+dirigible/S
+dirk/GDMS
+Dirk/M
+dirndl/MS
+dirtily
+dirtiness/SM
+dirt/MS
+dirty/GPRSDT
+Dis
+disable/LZGD
+disablement/MS
+disabler/M
+disabuse
+disadvantaged/P
+disagreeable/S
+disallow/D
+disambiguate/DSGNX
+disappointed/Y
+disappointing/Y
+disarming/Y
+disarrange/L
+disastrous/Y
+disband/L
+disbandment/SM
+disbar/L
+disbarment/MS
+disbarring
+disbelieving/Y
+disbursal/S
+disburse/GDRSL
+disbursement/MS
+disburser/M
+discerner/M
+discernibility
+discernible/I
+discernibly
+discerning/Y
+discernment/MS
+discern/SDRGL
+disc/GDM
+discharged/U
+disciple/DSMG
+discipleship/SM
+disciplinarian/SM
+disciplinary
+disciplined/U
+discipline/IDM
+discipliner/M
+disciplines
+disciplining
+disclosed/U
+discography/MS
+discolored/MP
+discoloreds/U
+discolor/G
+discombobulate/SDGNX
+discomfit/DG
+discomfiture/MS
+disco/MG
+discommode/DG
+disconcerting/Y
+disconnectedness/S
+disconnected/P
+disconnecter/M
+disconnect/R
+disconsolate/YN
+discordance/SM
+discordant/Y
+discord/G
+discorporate/D
+discotheque/MS
+discount/B
+discourage/LGDR
+discouragement/MS
+discouraging/Y
+discoverable/I
+discover/ADGS
+discovered/U
+discoverer/S
+discovery/SAM
+discreetly/I
+discreetness's/I
+discreetness/SM
+discreet/TRYP
+discrepancy/SM
+discrepant/Y
+discreteness/SM
+discrete/YPNX
+discretionary
+discretion/IMS
+discretization
+discretized
+discriminable
+discriminant/MS
+discriminated/U
+discriminate/SDVNGX
+discriminating/YI
+discrimination/MI
+discriminator/MS
+discriminatory
+discursiveness/S
+discussant/MS
+discussed/UA
+discusser/M
+discussion/SM
+discus/SM
+disdainfulness/M
+disdainful/YP
+disdain/MGSD
+disease/G
+disembowelment/SM
+disembowel/SLGD
+disengage/L
+disfigure/L
+disfigurement/MS
+disfranchise/L
+disfranchisement/MS
+disgorge
+disgrace/R
+disgracer/M
+disgruntle/DSLG
+disgruntlement/MS
+disguised/UY
+disguise/R
+disguiser/M
+disgust
+disgusted/Y
+disgustful/Y
+disgusting/Y
+dishabille/SM
+disharmonious
+dishcloth/M
+dishcloths
+dishevel/LDGS
+dishevelment/MS
+dish/GD
+dishonest
+dishonored/U
+dishpan/MS
+dishrag/SM
+dishtowel/SM
+dishwasher/MS
+dishwater/SM
+disillusion/LGD
+disillusionment/SM
+disinfectant/MS
+disinherit
+disinterestedness/SM
+disinterested/P
+disinvest/L
+disjoin
+disjointedness/S
+disjunctive/YS
+disjunct/VS
+disk/D
+diskette/S
+dislike/G
+dislodge/LG
+dislodgement/M
+dismalness/M
+dismal/PSTRY
+dismantle/L
+dismantlement/SM
+dismay/D
+dismayed/U
+dismaying/Y
+dis/MB
+dismember/LG
+dismemberment/MS
+dismissive/Y
+dismiss/RZ
+Disneyland/M
+Disney/M
+disoblige/G
+disorderedness/M
+disordered/YP
+disorderliness/M
+disorderly/P
+disorder/Y
+disorganize
+disorganized/U
+disparagement/MS
+disparager/M
+disparage/RSDLG
+disparaging/Y
+disparateness/M
+disparate/PSY
+dispatch/Z
+dispelled
+dispelling
+dispel/S
+dispensable/I
+dispensary/MS
+dispensate/NX
+dispensation/M
+dispenser/M
+dispense/ZGDRSB
+dispersal/MS
+dispersant/M
+dispersed/Y
+disperser/M
+disperse/XDRSZLNGV
+dispersible
+dispersion/M
+dispersiveness/M
+dispersive/PY
+dispirit/DSG
+displace/L
+display/AGDS
+displayed/U
+displeased/Y
+displease/G
+displeasure
+disport
+disposable/S
+disposal/SM
+dispose/IGSD
+dispositional
+disposition/ISM
+disproportional
+disproportionate/N
+disproportionation/M
+disprove/B
+disputable/I
+disputably/I
+disputant/SM
+disputation/SM
+disputatious/Y
+disputed/U
+disputer/M
+dispute/ZBGSRD
+disquieting/Y
+disquiet/M
+disquisition/SM
+Disraeli/M
+disregardful
+disrepair/M
+disreputableness/M
+disreputable/P
+disrepute/M
+disrespect
+disrupted/U
+disrupter/M
+disrupt/GVDRS
+disruption/MS
+disruptive/YP
+disruptor/M
+dissatisfy
+dissect/DG
+dissed
+dissembler/M
+dissemble/ZGRSD
+disseminate/XGNSD
+dissemination/M
+dissension/SM
+dissenter/M
+dissent/ZGSDR
+dissertation/SM
+disservice
+disses
+dissever
+dissidence/SM
+dissident/MS
+dissimilar/S
+dissing
+dissipatedly
+dissipatedness/M
+dissipated/U
+dissipater/M
+dissipate/XRSDVNG
+dissipation/M
+dissociable/I
+dissociate/DSXNGV
+dissociated/U
+dissociation/M
+dissociative/Y
+dissoluble/I
+dissoluteness/SM
+dissolute/PY
+dissolve/ASDG
+dissolved/U
+dissonance/SM
+dissonant/Y
+dissuade/GDRS
+dissuader/M
+dissuasive
+dist
+distaff/SM
+distal/Y
+distance/DSMG
+distantness/M
+distant/YP
+distaste
+distemper
+distend
+distension
+distention/SM
+distillate/XNMS
+distillation/M
+distillery/MS
+distincter
+distinctest
+distinction/MS
+distinctiveness/MS
+distinctive/YP
+distinct/IYVP
+distinctness/MSI
+distinguishable/I
+distinguishably/I
+distinguish/BDRSG
+distinguished/U
+distinguisher/M
+distort/BGDR
+distorted/U
+distorter/M
+distortion/MS
+distract/DG
+distractedness/M
+distracted/YP
+distracting/Y
+distrait
+distraught/Y
+distress
+distressful
+distressing/Y
+distribute/ADXSVNGB
+distributed/U
+distributer
+distributional
+distribution/AM
+distributiveness/M
+distributive/SPY
+distributivity
+distributorship/M
+distributor/SM
+district/GSAD
+district's
+distrust/G
+disturbance/SM
+disturbed/U
+disturber/M
+disturbing/Y
+disturb/ZGDRS
+disulfide/M
+disuse/M
+disyllable/M
+Dita/M
+ditcher/M
+ditch/MRSDG
+dither/RDZSG
+ditsy/TR
+ditto/DMGS
+ditty/SDGM
+Ditzel/M
+ditz/S
+diuresis/M
+diuretic/S
+diurnal/SY
+divalent/S
+diva/MS
+divan/SM
+dived/M
+divergence/SM
+divergent/Y
+diverge/SDG
+diver/M
+diverseness/MS
+diverse/XYNP
+diversification/M
+diversifier/M
+diversify/GSRDNX
+diversionary
+diversion/M
+diversity/SM
+divert/GSD
+diverticulitis/SM
+divertimento/M
+dive/S
+divestiture/MS
+divest/LDGS
+divestment/S
+dividable
+divide/AGDS
+divided/U
+dividend/MS
+divider/MS
+divination/SM
+diviner/M
+divine/RSDTZYG
+divinity/MS
+divisibility/IMS
+divisible/I
+divisional
+division/SM
+divisiveness/MS
+divisive/PY
+divisor/SM
+divorce/MS
+divorce/GSDLM
+divorcement/MS
+divot/MS
+div/TZGJDRS
+divulge/GSD
+divvy/GSDM
+Dixiecrat/MS
+dixieland
+Dixieland/MS
+Dixie/M
+Dix/M
+Dixon/M
+dizzily
+dizziness/SM
+dizzying/Y
+dizzy/PGRSDT
+DJ
+Djakarta's
+djellabah's
+djellaba/S
+d/JGVX
+Djibouti/M
+DMD
+Dmitri/M
+DMZ
+DNA
+Dnepropetrovsk/M
+Dnepr's
+Dnieper's
+Dniester/M
+Dniren/M
+DOA
+doable
+DOB
+Dobbin/M
+dobbin/MS
+Doberman
+Dobro/M
+docent/SM
+docile/Y
+docility/MS
+docker/M
+docket/GSMD
+dock/GZSRDM
+dockland/MS
+dockside/M
+dockworker/S
+dockyard/SM
+doc/MS
+Doctor
+doctoral
+doctorate/SM
+doctor/GSDM
+Doctorow/M
+doctrinaire/S
+doctrinal/Y
+doctrine/SM
+docudrama/S
+documentary/MS
+documentation/MS
+documented/U
+document/RDMZGS
+DOD
+dodder/DGS
+dodecahedra
+dodecahedral
+dodecahedron/M
+Dode/M
+dodge/GZSRD
+Dodge/M
+dodgem/S
+dodger/M
+Dodgson/M
+Dodie/M
+Dodi/M
+Dodington/M
+Dodoma/M
+dodo/SM
+Dodson/M
+Dody/M
+DOE
+Doe/M
+doe/MS
+doer/MU
+does/AU
+doeskin/MS
+doesn't
+d'oeuvre
+doff/SGD
+dogcart/SM
+dogcatcher/MS
+dogeared
+Doge/M
+doge/SM
+dogfight/GMS
+dogfish/SM
+dogfought
+doggedness/SM
+dogged/PY
+doggerel/SM
+dogging
+doggone/RSDTG
+doggy/SRMT
+doghouse/SM
+dogie/SM
+doglegged
+doglegging
+dogleg/SM
+dogma/MS
+dogmatically/U
+dogmatic/S
+dogmatics/M
+dogmatism/SM
+dogmatist/SM
+dogsbody/M
+dog/SM
+dogtooth/M
+Dogtown/M
+dogtrot/MS
+dogtrotted
+dogtrotting
+dogwood/SM
+dogy's
+Doha/M
+doh's
+doily/SM
+doing/MU
+Dolby/SM
+doldrum/S
+doldrums/M
+doled/F
+dolefuller
+dolefullest
+dolefulness/MS
+doleful/PY
+Dole/M
+dole/MGDS
+doles/F
+Dolf/M
+doling/F
+dollar/SM
+Dolley/M
+Dollie/M
+Dolli/M
+Doll/M
+doll/MDGS
+dollop/GSMD
+Dolly/M
+dolly/SDMG
+dolmen/MS
+dolomite/SM
+dolomitic
+Dolores/M
+Dolorita/SM
+dolorous/Y
+dolor/SM
+dolphin/SM
+Dolph/M
+doltishness/SM
+doltish/YP
+dolt/MS
+domain/MS
+dome/DSMG
+Domenic/M
+Domenico/M
+Domeniga/M
+Domesday/M
+domestically
+domesticate/DSXGN
+domesticated/U
+domestication/M
+domesticity/MS
+domestic/S
+domicile/SDMG
+domiciliary
+dominance/MS
+dominant/YS
+dominate/VNGXSD
+domination/M
+dominator/M
+dominatrices
+dominatrix
+domineer/DSG
+domineeringness/M
+domineering/YP
+Dominga/M
+Domingo/M
+Dominguez/M
+Dominica/M
+Dominican/MS
+Dominick/M
+Dominic/M
+Dominik/M
+Domini/M
+dominion/MS
+Dominique/M
+dominoes
+domino/M
+Domitian/M
+Dom/M
+Donahue/M
+Donald/M
+Donaldson/M
+Donall/M
+Donal/M
+Donalt/M
+Dona/M
+dona/MS
+Donatello/M
+donate/XVGNSD
+donation/M
+donative/M
+Donaugh/M
+Donavon/M
+done/AUF
+Donella/M
+Donelle/M
+Donetsk/M
+Donetta/M
+dong/GDMS
+dongle/S
+Donia/M
+Donica/M
+Donielle/M
+Donizetti/M
+donkey/MS
+Donna/M
+Donnamarie/M
+donned
+Donnell/M
+Donnelly/M
+Donne/M
+Donner/M
+Donnie/M
+Donni/M
+donning
+donnishness/M
+donnish/YP
+Donn/RM
+donnybrook/MS
+Donny/M
+donor/MS
+Donovan/M
+don/S
+Don/SM
+don't
+donut/MS
+donutted
+donutting
+doodad/MS
+doodlebug/MS
+doodler/M
+doodle/SRDZG
+doohickey/MS
+Dooley/M
+Doolittle/M
+doom/MDGS
+doomsday/SM
+Doonesbury/M
+doorbell/SM
+door/GDMS
+doorhandles
+doorkeeper/M
+doorkeep/RZ
+doorknob/SM
+doorman/M
+doormat/SM
+doormen
+doornail/M
+doorplate/SM
+doors/I
+doorstep/MS
+doorstepped
+doorstepping
+doorstop/MS
+doorway/MS
+dooryard/SM
+dopamine
+dopant/M
+dopa/SM
+dope/DRSMZG
+doper/M
+dopey
+dopier
+dopiest
+dopiness/S
+Doppler/M
+Dorado/M
+Doralia/M
+Doralin/M
+Doralyn/M
+Doralynne/M
+Doralynn/M
+Dora/M
+Dorcas
+Dorchester/M
+Doreen/M
+Dorelia/M
+Dorella/M
+Dorelle/M
+Dor/M
+Dorena/M
+Dorene/M
+Doretta/M
+Dorette/M
+Dorey/M
+Doria/M
+Dorian/M
+Doric
+Dorice/M
+Dorie/M
+Dori/MS
+Dorine/M
+Dorisa/M
+Dorise/M
+Dorita/M
+dork/S
+dorky/RT
+dormancy/MS
+dormant/S
+dormer/M
+dormice
+dormitory/SM
+dorm/MRZS
+dormouse/M
+Dorolice/M
+Dorolisa/M
+Doro/M
+Dorotea/M
+Doroteya/M
+Dorothea/M
+Dorothee/M
+Dorothy/M
+Dorree/M
+Dorrie/M
+Dorri/SM
+Dorry/M
+dorsal/YS
+Dorsey/M
+Dorthea/M
+Dorthy/M
+Dortmund/M
+Dory/M
+dory/SM
+DOS
+dosage/SM
+dose/M
+dos/GDS
+Dosi/M
+dosimeter/MS
+dosimetry/M
+dossier/MS
+dost
+Dostoevsky/M
+DOT
+dotage/SM
+dotard/MS
+doter/M
+dote/S
+Doti/M
+doting/Y
+Dot/M
+dot/MDRSJZG
+Dotson/M
+dotted
+Dottie/M
+Dotti/M
+dottiness/M
+dotting
+Dotty/M
+dotty/PRT
+do/TZRHGJ
+Douala/M
+Douay/M
+Doubleday/M
+doubled/UA
+double/GPSRDZ
+doubleheader/MS
+doubleness/M
+doubler/M
+doubles/M
+doublespeak/S
+doublethink/M
+doublet/MS
+doubleton/M
+doubling/A
+doubloon/MS
+doubly
+doubt/AGSDMB
+doubted/U
+doubter/SM
+doubtfulness/SM
+doubtful/YP
+doubting/Y
+doubtlessness/M
+doubtless/YP
+douche/GSDM
+Dougherty/M
+dough/M
+doughs
+doughty/RT
+doughy/RT
+Dougie/M
+Douglas/M
+Douglass
+Doug/M
+Dougy/M
+dourness/MS
+Douro/M
+dour/TYRP
+douser/M
+douse/SRDG
+dovecote/MS
+Dover/M
+dove/RSM
+dovetail/GSDM
+dovish
+Dov/MR
+dowager/SM
+dowdily
+dowdiness/MS
+dowdy/TPSR
+dowel/GMDS
+dower/GDMS
+Dow/M
+downbeat/SM
+downcast/S
+downdraft/M
+downer/M
+Downey/M
+downfall/NMS
+downgrade/GSD
+down/GZSRD
+downheartedness/MS
+downhearted/PY
+downhill/RS
+downland
+download/DGS
+downpipes
+downplay/GDS
+downpour/MS
+downrange
+downrightness/M
+downright/YP
+downriver
+Downs
+downscale/GSD
+downside/S
+downsize/DSG
+downslope
+downspout/SM
+downstage/S
+downstairs
+downstate/SR
+downstream
+downswing/MS
+downtime/SM
+downtowner/M
+downtown/MRS
+downtrend/M
+downtrodden
+downturn/MS
+downwardness/M
+downward/YPS
+downwind
+downy/RT
+dowry/SM
+dowse/GZSRD
+dowser/M
+doxology/MS
+doyenne/SM
+doyen/SM
+Doyle/M
+Doy/M
+doze
+dozen/GHD
+dozenths
+dozer/M
+doz/XGNDRS
+dozy
+DP
+DPs
+dpt
+DPT
+drabbed
+drabber
+drabbest
+drabbing
+drabness/MS
+drab/YSP
+drachma/MS
+Draco/M
+draconian
+Draconian
+Dracula/M
+draft/AMDGS
+draftee/SM
+drafter/MS
+draftily
+draftiness/SM
+drafting/S
+draftsman/M
+draftsmanship/SM
+draftsmen
+draftsperson
+draftswoman
+draftswomen
+drafty/PTR
+dragged
+dragger/M
+dragging/Y
+draggy/RT
+drag/MS
+dragnet/MS
+dragonfly/SM
+dragonhead/M
+dragon/SM
+dragoon/DMGS
+drainage/MS
+drainboard/SM
+drained/U
+drainer/M
+drainpipe/MS
+drain/SZGRDM
+Drake/M
+drake/SM
+Dramamine/MS
+drama/SM
+dramatically/U
+dramatical/Y
+dramatic/S
+dramatics/M
+dramatist/MS
+dramatization/MS
+dramatized/U
+dramatizer/M
+dramatize/SRDZG
+dramaturgy/M
+Drambuie/M
+drammed
+dramming
+dram/MS
+drank
+Drano/M
+draper/M
+drapery/MS
+drape/SRDGZ
+drastic
+drastically
+drat/S
+dratted
+dratting
+Dravidian/M
+drawable
+draw/ASG
+drawback/MS
+drawbridge/SM
+drawer/SM
+drawing/SM
+drawler/M
+drawling/Y
+drawl/RDSG
+drawly
+drawn/AI
+drawnly
+drawnness
+drawstring/MS
+dray/SMDG
+dreadfulness/SM
+dreadful/YPS
+dreadlocks
+dreadnought/SM
+dread/SRDG
+dreamboat/SM
+dreamed/U
+dreamer/M
+dreamily
+dreaminess/SM
+dreaming/Y
+dreamland/SM
+dreamlessness/M
+dreamless/PY
+dreamlike
+dream/SMRDZG
+dreamworld/S
+dreamy/PTR
+drearily
+dreariness/SM
+drear/S
+dreary/TRSP
+Dreddy/M
+dredge/MZGSRD
+dredger/M
+Dredi/M
+dreg/MS
+Dreiser/M
+Dre/M
+drencher/M
+drench/GDRS
+Dresden/M
+dress/ADRSG
+dressage/MS
+dressed/U
+dresser/MS
+dresser's/A
+dresses/U
+dressiness/SM
+dressing/MS
+dressmaker/MS
+dressmaking/SM
+dressy/PTR
+drew/A
+Drew/M
+Drexel/M
+Dreyfus/M
+Dreyfuss
+dribble/DRSGZ
+dribbler/M
+driblet/SM
+drib/SM
+dried/U
+drier/M
+drifter/M
+drifting/Y
+drift/RDZSG
+driftwood/SM
+driller/M
+drilling/M
+drillmaster/SM
+drill/MRDZGS
+drinkable/S
+drink/BRSZG
+drinker/M
+dripped
+dripping/MS
+drippy/RT
+drip/SM
+driveler/M
+drivel/GZDRS
+driven/P
+driver/M
+drive/SRBGZJ
+driveway/MS
+drizzle/DSGM
+drizzling/Y
+drizzly/TR
+Dr/M
+drogue/MS
+drollery/SM
+drollness/MS
+droll/RDSPTG
+drolly
+dromedary/MS
+Drona/M
+drone/SRDGM
+droning/Y
+drool/GSRD
+droopiness/MS
+drooping/Y
+droop/SGD
+droopy/PRT
+drophead
+dropkick/S
+droplet/SM
+dropout/MS
+dropped
+dropper/SM
+dropping/MS
+dropsical
+drop/SM
+dropsy/MS
+drosophila/M
+dross/SM
+drought/SM
+drover/M
+drove/SRDGZ
+drowner/M
+drown/RDSJG
+drowse/SDG
+drowsily
+drowsiness/SM
+drowsy/PTR
+drubbed
+drubber/MS
+drubbing/SM
+drub/S
+Drucie/M
+Drucill/M
+Druci/M
+Drucy/M
+drudge/MGSRD
+drudger/M
+drudgery/SM
+drudging/Y
+Drud/M
+drugged
+druggie/SRT
+drugging
+druggist/SM
+Drugi/M
+drugless
+drug/SM
+drugstore/SM
+druidism/MS
+druid/MS
+Druid's
+Dru/M
+drumbeat/SGM
+drumhead/M
+drumlin/MS
+drummed
+drummer/SM
+drumming
+Drummond/M
+drum/SM
+drumstick/SM
+drunkard/SM
+drunkenness/SM
+drunken/YP
+drunk/SRNYMT
+drupe/SM
+Drury/M
+Drusie/M
+Drusilla/M
+Drusi/M
+Drusy/M
+druthers
+dryad/MS
+Dryden/M
+dryer/MS
+dry/GYDRSTZ
+dryish
+dryness/SM
+drys
+drystone
+drywall/GSD
+D's
+d's/A
+Dshubba/M
+DST
+DTP
+dualism/MS
+dualistic
+dualist/M
+duality/MS
+dual/YS
+Duane/M
+Dubai/M
+dubbed
+dubber/S
+dubbing/M
+dubbin/MS
+Dubcek/M
+Dubhe/M
+dubiety/MS
+dubiousness/SM
+dubious/YP
+Dublin/M
+Dubrovnik/M
+dub/S
+Dubuque/M
+ducal
+ducat/SM
+duce/CAIKF
+duce's
+Duchamp/M
+duchess/MS
+duchy/SM
+duckbill/SM
+ducker/M
+duck/GSRDM
+duckling/SM
+duckpins
+duckpond
+duckweed/MS
+ducky/RSMT
+ducted/CFI
+ductile/I
+ductility/SM
+ducting/F
+duct/KMSF
+ductless
+duct's/A
+ducts/CI
+ductwork/M
+dudder
+dude/MS
+dudgeon/SM
+dud/GMDS
+Dudley/M
+Dud/M
+duelist/MS
+duel/MRDGZSJ
+dueness/M
+duenna/MS
+due/PMS
+duet/MS
+duetted
+duetting
+duffel/M
+duffer/M
+duff/GZSRDM
+Duffie/M
+Duff/M
+Duffy/M
+Dugald/M
+dugout/SM
+dug/S
+duh
+DUI
+Duisburg/M
+dukedom/SM
+duke/DSMG
+Duke/M
+Dukey/M
+Dukie/M
+Duky/M
+Dulcea/M
+Dulce/M
+dulcet/SY
+Dulcia/M
+Dulciana/M
+Dulcie/M
+dulcify
+Dulci/M
+dulcimer/MS
+Dulcinea/M
+Dulcine/M
+Dulcy/M
+dullard/MS
+Dulles/M
+dullness/MS
+dull/SRDPGT
+dully
+dulness's
+Dulsea/M
+Duluth/M
+duly/U
+Du/M
+Dumas
+dumbbell/MS
+dumbfound/GSDR
+dumbness/MS
+Dumbo/M
+dumb/PSGTYRD
+dumbstruck
+dumbwaiter/SM
+dumdum/MS
+dummy/SDMG
+Dumont/M
+dumper/UM
+dumpiness/MS
+dumpling/MS
+dump/SGZRD
+dumpster/S
+Dumpster/S
+Dumpty/M
+dumpy/PRST
+Dunant/M
+Dunbar/M
+Duncan/M
+dunce/MS
+Dunc/M
+Dundee/M
+dunderhead/MS
+Dunedin/M
+dune/SM
+dungaree/SM
+dungeon/GSMD
+dunghill/MS
+dung/SGDM
+Dunham/M
+dunker/M
+dunk/GSRD
+Dunkirk/M
+Dunlap/M
+Dun/M
+dunned
+Dunne/M
+dunner
+dunnest
+dunning
+Dunn/M
+dunno/M
+dun/S
+Dunstan/M
+duodecimal/S
+duodena
+duodenal
+duodenum/M
+duologue/M
+duo/MS
+duopolist
+duopoly/M
+dupe/NGDRSMZ
+duper/M
+dupion/M
+duple
+duplexer/M
+duplex/MSRDG
+duplicability/M
+duplicable
+duplicate/ADSGNX
+duplication/AM
+duplicative
+duplicator/MS
+duplicitous
+duplicity/SM
+Dupont/MS
+DuPont/MS
+durability/MS
+durableness/M
+durable/PS
+durably
+Duracell/M
+durance/SM
+Durand/M
+Duran/M
+Durante/M
+Durant/M
+durational
+duration/MS
+Durban/M
+Drer/M
+duress/SM
+Durex/M
+Durham/MS
+during
+Durkee/M
+Durkheim/M
+Dur/M
+Durocher/M
+durst
+durum/MS
+Durward/M
+Duse/M
+Dusenberg/M
+Dusenbury/M
+Dushanbe/M
+dusk/GDMS
+duskiness/MS
+dusky/RPT
+Dsseldorf
+dustbin/MS
+dustcart/M
+dustcover
+duster/M
+dustily
+dustiness/MS
+dusting/M
+Dustin/M
+dustless
+dustman/M
+dustmen
+dust/MRDGZS
+dustpan/SM
+Dusty/M
+dusty/RPT
+Dutch/M
+Dutchman/M
+Dutchmen
+dutch/MS
+Dutchwoman
+Dutchwomen
+duteous/Y
+dutiable
+dutifulness/S
+dutiful/UPY
+duty/SM
+Duvalier/M
+duvet/SM
+duxes
+Dvina/M
+Dvork/M
+Dwain/M
+dwarfish
+dwarfism/MS
+dwarf/MTGSPRD
+Dwayne/M
+dweeb/S
+dweller/SM
+dwell/IGS
+dwelling/MS
+dwelt/I
+DWI
+Dwight/M
+dwindle/GSD
+dyadic
+dyad/MS
+Dyana/M
+Dyane/M
+Dyan/M
+Dyanna/M
+Dyanne/M
+Dyann/M
+dybbukim
+dybbuk/SM
+dyed/A
+dyeing/M
+dye/JDRSMZG
+dyer/M
+Dyer/M
+dyes/A
+dyestuff/SM
+dying/UA
+Dyke/M
+dyke's
+Dylan/M
+Dy/M
+Dynah/M
+Dyna/M
+dynamical/Y
+dynamic/S
+dynamics/M
+dynamism/SM
+dynamiter/M
+dynamite/RSDZMG
+dynamized
+dynamo/MS
+dynastic
+dynasty/MS
+dyne/M
+dysentery/SM
+dysfunctional
+dysfunction/MS
+dyslectic/S
+dyslexia/MS
+dyslexically
+dyslexic/S
+dyspepsia/MS
+dyspeptic/S
+dysprosium/MS
+dystopia/M
+dystrophy/M
+dz
+Dzerzhinsky/M
+E
+ea
+each
+Eachelle/M
+Eada/M
+Eadie/M
+Eadith/M
+Eadmund/M
+eagerness/MS
+eager/TSPRYM
+eagle/SDGM
+eaglet/SM
+Eakins/M
+Ealasaid/M
+Eal/M
+Eamon/M
+earache/SM
+eardrum/SM
+earful/MS
+ear/GSMDYH
+Earhart/M
+earing/M
+earldom/MS
+Earle/M
+Earlene/M
+Earlie/M
+Earline/M
+earliness/SM
+Earl/M
+earl/MS
+earlobe/S
+Early/M
+early/PRST
+earmark/DGSJ
+earmuff/SM
+earned/U
+earner/M
+Earnestine/M
+Earnest/M
+earnestness/MS
+earnest/PYS
+earn/GRDZTSJ
+earning/M
+earphone/MS
+earpieces
+earplug/MS
+Earp/M
+earring/MS
+earshot/MS
+earsplitting
+Eartha/M
+earthbound
+earthed/U
+earthenware/MS
+earthiness/SM
+earthliness/M
+earthling/MS
+earthly/TPR
+earth/MDNYG
+earthmen
+earthmover/M
+earthmoving
+earthquake/SDGM
+earthshaking
+earths/U
+earthward/S
+earthwork/MS
+earthworm/MS
+earthy/PTR
+Earvin/M
+earwax/MS
+earwigged
+earwigging
+earwig/MS
+eased/E
+ease/LDRSMG
+easel/MS
+easement/MS
+easer/M
+ease's/EU
+eases/UE
+easies
+easily/U
+easiness/MSU
+easing/M
+eastbound
+easterly/S
+Easter/M
+easterner/M
+Easterner/M
+easternmost
+Eastern/RZ
+eastern/ZR
+easter/Y
+east/GSMR
+Easthampton/M
+easting/M
+Eastland/M
+Eastman/M
+eastward/S
+Eastwick/M
+Eastwood/M
+East/ZSMR
+easygoingness/M
+easygoing/P
+easy/PUTR
+eatables
+eatable/U
+eaten/U
+eater/M
+eatery/MS
+eating/M
+Eaton/M
+eat/SJZGNRB
+eavesdropped
+eavesdropper/MS
+eavesdropping
+eavesdrop/S
+eave/SM
+Eba/M
+Ebba/M
+ebb/DSG
+EBCDIC
+Ebeneezer/M
+Ebeneser/M
+Ebenezer/M
+Eben/M
+Eberhard/M
+Eberto/M
+Eb/MN
+Ebola
+Ebonee/M
+Ebonics
+Ebony/M
+ebony/SM
+Ebro/M
+ebullience/SM
+ebullient/Y
+ebullition/SM
+EC
+eccentrically
+eccentricity/SM
+eccentric/MS
+eccl
+Eccles
+Ecclesiastes/M
+ecclesiastical/Y
+ecclesiastic/MS
+ECG
+echelon/SGDM
+echinoderm/SM
+echo/DMG
+echoed/A
+echoes/A
+echoic
+echolocation/SM
+clair/MS
+clat/MS
+eclectically
+eclecticism/MS
+eclectic/S
+eclipse/MGSD
+ecliptic/MS
+eclogue/MS
+ecocide/SM
+ecol
+Ecole/M
+ecologic
+ecological/Y
+ecologist/MS
+ecology/MS
+Eco/M
+econ
+Econometrica/M
+econometricians
+econometric/S
+econometrics/M
+economical/YU
+economic/S
+economics/M
+economist/MS
+economization
+economize/GZSRD
+economizer/M
+economizing/U
+economy/MS
+ecosystem/MS
+ecru/SM
+ecstasy/MS
+Ecstasy/S
+ecstatically
+ecstatic/S
+ectoplasm/M
+Ecuadoran/S
+Ecuadorean/S
+Ecuadorian/S
+Ecuador/M
+ecumenical/Y
+ecumenicism/SM
+ecumenicist/MS
+ecumenic/MS
+ecumenics/M
+ecumenism/SM
+ecumenist/MS
+eczema/MS
+Eda/M
+Edam/SM
+Edan/M
+ed/ASC
+Edda/M
+Eddie/M
+Eddi/M
+Edd/M
+Eddy/M
+eddy/SDMG
+Edee/M
+Edeline/M
+edelweiss/MS
+Ede/M
+edema/SM
+edematous
+eden
+Eden/M
+Edgard/M
+Edgardo/M
+Edgar/M
+edge/DRSMZGJ
+edgeless
+edger/M
+Edgerton/M
+Edgewater/M
+edgewise
+Edgewood/M
+edgily
+edginess/MS
+edging/M
+edgy/TRP
+edibility/MS
+edibleness/SM
+edible/SP
+edict/SM
+Edie/M
+edification/M
+edifice/SM
+edifier/M
+edifying/U
+edify/ZNXGRSD
+Edik/M
+Edi/MH
+Edinburgh/M
+Edin/M
+Edison/M
+editable
+Edita/M
+edited/IU
+Editha/M
+Edithe/M
+Edith/M
+edition/SM
+editorialist/M
+editorialize/DRSG
+editorializer/M
+editorial/YS
+editor/MS
+editorship/MS
+edit/SADG
+Ediva/M
+Edlin/M
+Edmond/M
+Edmon/M
+Edmonton/M
+Edmund/M
+Edna/M
+Edouard/M
+EDP
+eds
+Edsel/M
+Edsger/M
+EDT
+Eduard/M
+Eduardo/M
+educability/SM
+educable/S
+educated/YP
+educate/XASDGN
+educationalists
+educational/Y
+education/AM
+educationists
+educative
+educator/MS
+educ/DBG
+educe/S
+eduction/M
+Eduino/M
+edutainment/S
+Edvard/M
+Edwardian
+Edwardo/M
+Edward/SM
+Edwina/M
+Edwin/M
+Ed/XMN
+Edy/M
+Edythe/M
+Edyth/M
+EEC
+EEG
+eek/S
+eelgrass/M
+eel/MS
+e'en
+EEO
+EEOC
+e'er
+eerie/RT
+eerily
+eeriness/MS
+Eeyore/M
+effaceable/I
+effacement/MS
+effacer/M
+efface/SRDLG
+effectiveness/ISM
+effectives
+effective/YIP
+effector/MS
+effect/SMDGV
+effectual/IYP
+effectualness/MI
+effectuate/SDGN
+effectuation/M
+effeminacy/MS
+effeminate/SY
+effendi/MS
+efferent/SY
+effervesce/GSD
+effervescence/SM
+effervescent/Y
+effeteness/SM
+effete/YP
+efficacious/IPY
+efficaciousness/MI
+efficacy/IMS
+efficiency/MIS
+efficient/ISY
+Effie/M
+effigy/SM
+effloresce
+efflorescence/SM
+efflorescent
+effluence/SM
+effluent/MS
+effluvia
+effluvium/M
+effluxion
+efflux/M
+effortlessness/SM
+effortless/PY
+effort/MS
+effrontery/MS
+effulgence/SM
+effulgent
+effuse/XSDVGN
+effusion/M
+effusiveness/MS
+effusive/YP
+EFL
+e/FMDS
+Efrain/M
+Efrem/M
+Efren/M
+EFT
+egad
+egalitarian/I
+egalitarianism/MS
+egalitarians
+EGA/M
+Egan/M
+Egbert/M
+Egerton/M
+eggbeater/SM
+eggcup/MS
+egger/M
+egg/GMDRS
+eggheaded/P
+egghead/SDM
+eggnog/SM
+eggplant/MS
+eggshell/SM
+egis's
+eglantine/MS
+egocentrically
+egocentricity/SM
+egocentric/S
+egoism/SM
+egoistic
+egoistical/Y
+egoist/SM
+egomaniac/MS
+egomania/MS
+Egon/M
+Egor/M
+ego/SM
+egotism/SM
+egotistic
+egotistical/Y
+egotist/MS
+egregiousness/MS
+egregious/PY
+egress/SDMG
+egret/SM
+Egyptian/S
+Egypt/M
+Egyptology/M
+eh
+Ehrlich/M
+Eichmann/M
+eiderdown/SM
+eider/SM
+eidetic
+Eiffel/M
+eigenfunction/MS
+eigenstate/S
+eigenvalue/SM
+eigenvector/MS
+eighteen/MHS
+eighteenths
+eightfold
+eighth/MS
+eighths
+eightieths
+eightpence
+eight/SM
+eighty/SHM
+Eileen/M
+Eilis/M
+Eimile/M
+Einsteinian
+einsteinium/MS
+Einstein/SM
+Eire/M
+Eirena/M
+Eisenhower/M
+Eisenstein/M
+Eisner/M
+eisteddfod/M
+either
+ejaculate/SDXNG
+ejaculation/M
+ejaculatory
+ejecta
+ejection/SM
+ejector/SM
+eject/VGSD
+Ekaterina/M
+Ekberg/M
+eked/A
+eke/DSG
+EKG
+Ekstrom/M
+Ektachrome/M
+elaborateness/SM
+elaborate/SDYPVNGX
+elaboration/M
+elaborators
+Elaina/M
+Elaine/M
+Elana/M
+eland/SM
+Elane/M
+lan/M
+Elanor/M
+elans
+elapse/SDG
+el/AS
+elastically/I
+elasticated
+elasticity/SM
+elasticize/GDS
+elastic/S
+elastodynamics
+elastomer/M
+elatedness/M
+elated/PY
+elater/M
+elate/SRDXGN
+elation/M
+Elayne/M
+Elba/MS
+Elbe/M
+Elberta/M
+Elbertina/M
+Elbertine/M
+Elbert/M
+elbow/GDMS
+elbowroom/SM
+Elbrus/M
+Elden/M
+elderberry/MS
+elderflower
+elderliness/M
+elderly/PS
+elder/SY
+eldest
+Eldin/M
+Eldon/M
+Eldorado's
+Eldredge/M
+Eldridge/M
+Eleanora/M
+Eleanore/M
+Eleanor/M
+Eleazar/M
+electable/U
+elect/ASGD
+elected/U
+electioneer/GSD
+election/SAM
+electiveness/M
+elective/SPY
+electoral/Y
+electorate/SM
+elector/SM
+Electra/M
+electress/M
+electricalness/M
+electrical/PY
+electrician/SM
+electricity/SM
+electric/S
+electrification/M
+electrifier/M
+electrify/ZXGNDRS
+electrocardiogram/MS
+electrocardiograph/M
+electrocardiographs
+electrocardiography/MS
+electrochemical/Y
+electrocute/GNXSD
+electrocution/M
+electrode/SM
+electrodynamics/M
+electrodynamic/YS
+electroencephalogram/SM
+electroencephalographic
+electroencephalograph/M
+electroencephalographs
+electroencephalography/MS
+electrologist/MS
+electroluminescent
+electrolysis/M
+electrolyte/SM
+electrolytic
+electrolytically
+electrolyze/SDG
+electro/M
+electromagnetic
+electromagnetically
+electromagnetism/SM
+electromagnet/SM
+electromechanical
+electromechanics
+electromotive
+electromyograph
+electromyographic
+electromyographically
+electromyography/M
+electronegative
+electronically
+electronic/S
+electronics/M
+electron/MS
+electrophoresis/M
+electrophorus/M
+electroplate/DSG
+electroscope/MS
+electroscopic
+electroshock/GDMS
+electrostatic/S
+electrostatics/M
+electrotherapist/M
+electrotype/GSDZM
+electroweak
+eleemosynary
+Eleen/M
+elegance/ISM
+elegant/YI
+elegiacal
+elegiac/S
+elegy/SM
+elem
+elemental/YS
+elementarily
+elementariness/M
+elementary/P
+element/MS
+Elena/M
+Elene/M
+Eleni/M
+Elenore/M
+Eleonora/M
+Eleonore/M
+elephantiases
+elephantiasis/M
+elephantine
+elephant/SM
+elevated/S
+elevate/XDSNG
+elevation/M
+elevator/SM
+eleven/HM
+elevens/S
+elevenths
+elev/NX
+Elfie/M
+elfin/S
+elfish
+elf/M
+Elfreda/M
+Elfrida/M
+Elfrieda/M
+Elga/M
+Elgar/M
+Elianora/M
+Elianore/M
+Elia/SM
+Elicia/M
+elicitation/MS
+elicit/GSD
+elide/GSD
+Elie/M
+eligibility/ISM
+eligible/SI
+Elihu/M
+Elijah/M
+Eli/M
+eliminate/XSDYVGN
+elimination/M
+eliminator/SM
+Elinore/M
+Elinor/M
+Eliot/M
+Elisabeth/M
+Elisabet/M
+Elisabetta/M
+Elisa/M
+Elise/M
+Eliseo/M
+Elisha/M
+elision/SM
+Elissa/M
+Elita/M
+elite/MPS
+elitism/SM
+elitist/SM
+elixir/MS
+Elizabethan/S
+Elizabeth/M
+Elizabet/M
+Eliza/M
+Elka/M
+Elke/M
+Elkhart/M
+elk/MS
+Elladine/M
+Ella/M
+Ellary/M
+Elle/M
+Ellene/M
+Ellen/M
+Ellerey/M
+Ellery/M
+Ellesmere/M
+Ellette/M
+Ellie/M
+Ellington/M
+Elliot/M
+Elliott/M
+ellipse/MS
+ellipsis/M
+ellipsoidal
+ellipsoid/MS
+ellipsometer/MS
+ellipsometry
+elliptic
+elliptical/YS
+ellipticity/M
+Elli/SM
+Ellison/M
+Ellissa/M
+ell/MS
+Ellswerth/M
+Ellsworth/M
+Ellwood/M
+Elly/M
+Ellyn/M
+Ellynn/M
+Elma/M
+Elmer/M
+Elmhurst/M
+Elmira/M
+elm/MRS
+Elmo/M
+Elmore/M
+Elmsford/M
+El/MY
+Elna/MH
+Elnar/M
+Elnath/M
+Elnora/M
+Elnore/M
+elocutionary
+elocutionist/MS
+elocution/SM
+elodea/S
+Elohim/M
+Eloisa/M
+Eloise/M
+elongate/NGXSD
+elongation/M
+Elonore/M
+elopement/MS
+eloper/M
+elope/SRDLG
+eloquence/SM
+eloquent/IY
+Elora/M
+Eloy/M
+Elroy/M
+els
+Elsa/M
+Elsbeth/M
+else/M
+Else/M
+Elset/M
+elsewhere
+Elsey/M
+Elsie/M
+Elsi/M
+Elsinore/M
+Elspeth/M
+Elston/M
+Elsworth/M
+Elsy/M
+Eltanin/M
+Elton/M
+eluate/SM
+elucidate/SDVNGX
+elucidation/M
+elude/GSD
+elusiveness/SM
+elusive/YP
+elute/DGN
+elution/M
+Elva/M
+elven
+Elvera/M
+elver/SM
+elves/M
+Elvia/M
+Elvina/M
+Elvin/M
+Elvira/M
+elvish
+Elvis/M
+Elvyn/M
+Elwin/M
+Elwira/M
+Elwood/M
+Elwyn/M
+Ely/M
+Elyn/M
+Elyse/M
+Elysees
+Elyse/M
+Elysha/M
+Elysia/M
+elysian
+Elysian
+Elysium/SM
+Elyssa/M
+EM
+emaciate/NGXDS
+emaciation/M
+emacs/M
+Emacs/M
+email/SMDG
+Emalee/M
+Emalia/M
+Ema/M
+emanate/XSDVNG
+emanation/M
+emancipate/DSXGN
+emancipation/M
+emancipator/MS
+Emanuele/M
+Emanuel/M
+emasculate/GNDSX
+emasculation/M
+embalmer/M
+embalm/ZGRDS
+embank/GLDS
+embankment/MS
+embarcadero
+embargoes
+embargo/GMD
+embark/ADESG
+embarkation/EMS
+embarrassedly
+embarrassed/U
+embarrassing/Y
+embarrassment/MS
+embarrass/SDLG
+embassy/MS
+embattle/DSG
+embeddable
+embedded
+embedder
+embedding/MS
+embed/S
+embellished/U
+embellisher/M
+embellish/LGRSD
+embellishment/MS
+ember/MS
+embezzle/LZGDRS
+embezzlement/MS
+embezzler/M
+embitter/LGDS
+embitterment/SM
+emblazon/DLGS
+emblazonment/SM
+emblematic
+emblem/GSMD
+embodier/M
+embodiment/ESM
+embody/ESDGA
+embolden/DSG
+embolism/SM
+embosom
+embosser/M
+emboss/ZGRSD
+embouchure/SM
+embower/GSD
+embraceable
+embracer/M
+embrace/RSDVG
+embracing/Y
+embrasure/MS
+embrittle
+embrocation/SM
+embroiderer/M
+embroider/SGZDR
+embroidery/MS
+embroilment/MS
+embroil/SLDG
+embryologist/SM
+embryology/MS
+embryonic
+embryo/SM
+emceeing
+emcee/SDM
+Emelda/M
+Emelen/M
+Emelia/M
+Emelina/M
+Emeline/M
+Emelita/M
+Emelyne/M
+emendation/MS
+emend/SRDGB
+emerald/SM
+Emera/M
+emerge/ADSG
+emergence/MAS
+emergency/SM
+emergent/S
+emerita
+emeritae
+emeriti
+emeritus
+Emerson/M
+Emery/M
+emery/MGSD
+emetic/S
+emf/S
+emigrant/MS
+emigrate/SDXNG
+emigration/M
+migr/S
+Emilee/M
+Emile/M
+Emilia/M
+Emilie/M
+Emili/M
+Emiline/M
+Emilio/M
+Emil/M
+Emily/M
+eminence/MS
+Eminence/MS
+eminent/Y
+emirate/SM
+emir/SM
+emissary/SM
+emission/AMS
+emissivity/MS
+emit/S
+emittance/M
+emitted
+emitter/SM
+emitting
+Emlen/M
+Emlyn/M
+Emlynne/M
+Emlynn/M
+em/M
+Em/M
+Emmalee/M
+Emmaline/M
+Emmalyn/M
+Emmalynne/M
+Emmalynn/M
+Emma/M
+Emmanuel/M
+Emmeline/M
+Emmerich/M
+Emmery/M
+Emmet/M
+Emmett/M
+Emmey/M
+Emmie/M
+Emmi/M
+Emmit/M
+Emmott/M
+Emmye/M
+Emmy/SM
+Emogene/M
+emollient/S
+emolument/SM
+Emory/M
+emote/SDVGNX
+emotionalism/MS
+emotionality/M
+emotionalize/GDS
+emotional/UY
+emotionless
+emotion/M
+emotive/Y
+empaneled
+empaneling
+empath
+empathetic
+empathetical/Y
+empathic
+empathize/SDG
+empathy/MS
+emperor/MS
+emphases
+emphasis/M
+emphasize/ZGCRSDA
+emphatically/U
+emphatic/U
+emphysema/SM
+emphysematous
+empire/MS
+empirical/Y
+empiricism/SM
+empiricist/SM
+empiric/SM
+emplace/L
+emplacement/MS
+employability/UM
+employable/US
+employed/U
+employee/SM
+employer/SM
+employ/LAGDS
+employment/UMAS
+emporium/MS
+empower/GLSD
+empowerment/MS
+empress/MS
+emptier/M
+emptily
+emptiness/SM
+empty/GRSDPT
+empyrean/SM
+ems/C
+EMT
+emulate/SDVGNX
+emulation/M
+emulative/Y
+emulator/MS
+emulsification/M
+emulsifier/M
+emulsify/NZSRDXG
+emulsion/SM
+emu/SM
+Emylee/M
+Emyle/M
+enabler/M
+enable/SRDZG
+enactment/ASM
+enact/SGALD
+enameler/M
+enamelware/SM
+enamel/ZGJMDRS
+enamor/DSG
+en/BM
+enc
+encamp/LSDG
+encampment/MS
+encapsulate/SDGNX
+encapsulation/M
+encase/GSDL
+encasement/SM
+encephalitic
+encephalitides
+encephalitis/M
+encephalographic
+encephalopathy/M
+enchain/SGD
+enchanter/MS
+enchant/ESLDG
+enchanting/Y
+enchantment/MSE
+enchantress/MS
+enchilada/SM
+encipherer/M
+encipher/SRDG
+encircle/GLDS
+encirclement/SM
+encl
+enclave/MGDS
+enclosed/U
+enclose/GDS
+enclosure/SM
+encoder/M
+encode/ZJGSRD
+encomium/SM
+encompass/GDS
+encore/GSD
+encounter/GSD
+encouragement/SM
+encourager/M
+encourage/SRDGL
+encouraging/Y
+encroacher/M
+encroach/LGRSD
+encroachment/MS
+encrustation/MS
+encrust/DSG
+encrypt/DGS
+encrypted/U
+encryption/SM
+encumbered/U
+encumber/SEDG
+encumbrancer/M
+encumbrance/SRM
+ency
+encyclical/SM
+encyclopaedia's
+encyclopedia/SM
+encyclopedic
+encyst/GSLD
+encystment/MS
+endanger/DGSL
+endangerment/SM
+endear/GSLD
+endearing/Y
+endearment/MS
+endeavored/U
+endeavorer/M
+endeavor/GZSMRD
+endemically
+endemicity
+endemic/S
+ender/M
+endgame/M
+Endicott/M
+ending/M
+endive/SM
+endlessness/MS
+endless/PY
+endmost
+endnote/MS
+endocrine/S
+endocrinologist/SM
+endocrinology/SM
+endogamous
+endogamy/M
+endogenous/Y
+endomorphism/SM
+endorse/DRSZGL
+endorsement/MS
+endorser/M
+endoscope/MS
+endoscopic
+endoscopy/SM
+endosperm/M
+endothelial
+endothermic
+endow/GSDL
+endowment/SM
+endpoint/MS
+endue/SDG
+endungeoned
+endurable/U
+endurably/U
+endurance/SM
+endure/BSDG
+enduringness/M
+enduring/YP
+endways
+Endymion/M
+end/ZGVMDRSJ
+ENE
+enema/SM
+enemy/SM
+energetically
+energetic/S
+energetics/M
+energized/U
+energizer/M
+energize/ZGDRS
+energy/MS
+enervate/XNGVDS
+enervation/M
+enfeeble/GLDS
+enfeeblement/SM
+enfilade/MGDS
+enfold/SGD
+enforceability/M
+enforceable/U
+enforced/Y
+enforce/LDRSZG
+enforcement/SM
+enforcer/M
+enforcible/U
+enfranchise/ELDRSG
+enfranchisement/EMS
+enfranchiser/M
+engage/ADSGE
+engagement/SEM
+engaging/Y
+Engelbert/M
+Engel/MS
+engender/DGS
+engineer/GSMDJ
+engineering/MY
+engine/MGSD
+England/M
+england/ZR
+Englebert/M
+Englewood/M
+English/GDRSM
+Englishman/M
+Englishmen
+Englishwoman/M
+Englishwomen
+Eng/M
+engorge/LGDS
+engorgement/MS
+Engracia/M
+engram/MS
+engraver/M
+engrave/ZGDRSJ
+engraving/M
+engrossed/Y
+engrosser/M
+engross/GLDRS
+engrossing/Y
+engrossment/SM
+engulf/GDSL
+engulfment/SM
+enhanceable
+enhance/LZGDRS
+enhancement/MS
+enhancer/M
+enharmonic
+Enid/M
+Enif/M
+enigma/MS
+enigmatic
+enigmatically
+Eniwetok/M
+enjambement's
+enjambment/MS
+enjoinder
+enjoin/GSD
+enjoyability
+enjoyableness/M
+enjoyable/P
+enjoyably
+enjoy/GBDSL
+enjoyment/SM
+Enkidu/M
+enlargeable
+enlarge/LDRSZG
+enlargement/MS
+enlarger/M
+enlightened/U
+enlighten/GDSL
+enlightening/U
+enlightenment/SM
+enlistee/MS
+enlister/M
+enlistment/SAM
+enlist/SAGDL
+enliven/LDGS
+enlivenment/SM
+enmesh/DSLG
+enmeshment/SM
+enmity/MS
+Ennis/M
+ennoble/LDRSG
+ennoblement/SM
+ennobler/M
+ennui/SM
+Enoch/M
+enormity/SM
+enormousness/MS
+enormous/YP
+Enos
+enough
+enoughs
+enplane/DSG
+enqueue/DS
+enquirer/S
+enquiringly
+enrage/SDG
+enrapture/GSD
+Enrica/M
+enricher/M
+Enrichetta/M
+enrich/LDSRG
+enrichment/SM
+Enrico/M
+Enrika/M
+Enrique/M
+Enriqueta/M
+enrobed
+enrollee/SM
+enroll/LGSD
+enrollment/SM
+ens
+ensconce/DSG
+ensemble/MS
+enshrine/DSLG
+enshrinement/SM
+enshroud/DGS
+ensign/SM
+ensilage/DSMG
+enslavement/MS
+enslaver/M
+enslave/ZGLDSR
+ensnare/GLDS
+ensnarement/SM
+Ensolite/M
+ensue/SDG
+ensurer/M
+ensure/SRDZG
+entailer/M
+entailment/MS
+entail/SDRLG
+entangle/EGDRSL
+entanglement/ESM
+entangler/EM
+entente/MS
+enter/ASDG
+entered/U
+enterer/M
+enteritides
+enteritis/SM
+enterprise/GMSR
+Enterprise/M
+enterpriser/M
+enterprising/Y
+entertainer/M
+entertaining/Y
+entertainment/SM
+entertain/SGZRDL
+enthalpy/SM
+enthrall/GDSL
+enthrallment/SM
+enthrone/GDSL
+enthronement/MS
+enthuse/DSG
+enthusiasm/SM
+enthusiastically/U
+enthusiastic/U
+enthusiast/MS
+enticement/SM
+entice/SRDJLZG
+enticing/Y
+entire/SY
+entirety/SM
+entitle/GLDS
+entitlement/MS
+entity/SM
+entomb/GDSL
+entombment/MS
+entomological
+entomologist/S
+entomology/MS
+entourage/SM
+entr'acte/S
+entrails
+entrainer/M
+entrain/GSLDR
+entrancement/MS
+entrance/MGDSL
+entranceway/M
+entrancing/Y
+entrant/MS
+entrapment/SM
+entrapped
+entrapping
+entrap/SL
+entreating/Y
+entreat/SGD
+entreaty/SM
+entre/S
+entrench/LSDG
+entrenchment/MS
+entrepreneurial
+entrepreneur/MS
+entrepreneurship/M
+entropic
+entropy/MS
+entrust/DSG
+entry/ASM
+entryway/SM
+entwine/DSG
+enumerable
+enumerate/AN
+enumerated/U
+enumerates
+enumerating
+enumeration's/A
+enumeration/SM
+enumerative
+enumerator/SM
+enunciable
+enunciated/U
+enunciate/XGNSD
+enunciation/M
+enureses
+enuresis/M
+envelope/MS
+enveloper/M
+envelopment/MS
+envelop/ZGLSDR
+envenom/SDG
+enviableness/M
+enviable/U
+enviably
+envied/U
+envier/M
+enviousness/SM
+envious/PY
+environ/LGSD
+environmentalism/SM
+environmentalist/SM
+environmental/Y
+environment/MS
+envisage/DSG
+envision/GSD
+envoy/SM
+envying/Y
+envy/SRDMG
+enzymatic
+enzymatically
+enzyme/SM
+enzymology/M
+Eocene
+EOE
+eohippus/M
+Eolanda/M
+Eolande/M
+eolian
+eon/SM
+EPA
+epaulet/SM
+pe/S
+ephedrine/MS
+ephemeral/SY
+ephemera/MS
+ephemerids
+ephemeris/M
+Ephesian/S
+Ephesians/M
+Ephesus/M
+Ephraim/M
+Ephrayim/M
+Ephrem/M
+epically
+epicenter/SM
+epic/SM
+Epictetus/M
+Epicurean
+epicurean/S
+epicure/SM
+Epicurus/M
+epicycle/MS
+epicyclic
+epicyclical/Y
+epicycloid/M
+epidemically
+epidemic/MS
+epidemiological/Y
+epidemiologist/MS
+epidemiology/MS
+epidermal
+epidermic
+epidermis/MS
+epidural
+epigenetic
+epiglottis/SM
+epigrammatic
+epigram/MS
+epigrapher/M
+epigraph/RM
+epigraphs
+epigraphy/MS
+epilepsy/SM
+epileptic/S
+epilogue/SDMG
+Epimethius/M
+epinephrine/SM
+epiphany/SM
+Epiphany/SM
+epiphenomena
+episcopacy/MS
+episcopalian
+Episcopalian/S
+Episcopal/S
+episcopal/Y
+episcopate/MS
+episode/SM
+episodic
+episodically
+epistemic
+epistemological/Y
+epistemology/M
+epistle/MRS
+Epistle/SM
+epistolary/S
+epistolatory
+epitaph/GMD
+epitaphs
+epitaxial/Y
+epitaxy/M
+epithelial
+epithelium/MS
+epithet/MS
+epitome/MS
+epitomized/U
+epitomizer/M
+epitomize/SRDZG
+epochal/Y
+epoch/M
+epochs
+eponymous
+epoxy/GSD
+epsilon/SM
+Epsom/M
+Epstein/M
+equability/MS
+equableness/M
+equable/P
+equably
+equaling
+equality/ISM
+equalization/MS
+equalize/DRSGJZ
+equalized/U
+equalizer/M
+equalizes/U
+equal/USDY
+equanimity/MS
+equate/NGXBSD
+equation/M
+equatorial/S
+equator/SM
+equerry/MS
+equestrianism/SM
+equestrian/S
+equestrienne/SM
+equiangular
+equidistant/Y
+equilateral/S
+equilibrate/GNSD
+equilibration/M
+equilibrium/MSE
+equine/S
+equinoctial/S
+equinox/MS
+equipage/SM
+equipartition/M
+equip/AS
+equipment/SM
+equipoise/GMSD
+equipotent
+equipped/AU
+equipping/A
+equiproportional
+equiproportionality
+equiproportionate
+equitable/I
+equitableness/M
+equitably/I
+equitation/SM
+equity/IMS
+equiv
+equivalence/DSMG
+equivalent/SY
+equivocalness/MS
+equivocal/UY
+equivocate/NGSDX
+equivocation/M
+equivocator/SM
+Equuleus/M
+ER
+ERA
+eradicable/I
+eradicate/SDXVGN
+eradication/M
+eradicator/SM
+era/MS
+Eran/M
+erase/N
+eraser/M
+erasion/M
+Erasmus/M
+eras/SRDBGZ
+Erastus/M
+erasure/MS
+Erato/M
+Eratosthenes/M
+erbium/SM
+Erda/M
+ere
+Erebus/M
+erect/GPSRDY
+erectile
+erection/SM
+erectness/MS
+erector/SM
+Erek/M
+erelong
+eremite/MS
+Erena/M
+ergo
+ergodic
+ergodicity/M
+ergonomically
+ergonomics/M
+ergonomic/U
+ergophobia
+ergosterol/SM
+ergot/SM
+erg/SM
+Erhard/M
+Erhart/M
+Erica/M
+Ericha/M
+Erich/M
+Ericka/M
+Erick/M
+Erickson/M
+Eric/M
+Ericson's
+Ericsson's
+Eridanus/M
+Erie/SM
+Erika/M
+Erik/M
+Erikson/M
+Erina/M
+Erin/M
+Erinna/M
+Erinn/M
+eris
+Eris
+Eritrea/M
+Erlang/M
+Erlenmeyer/M
+Erl/M
+Er/M
+Erma/M
+Ermanno/M
+Ermengarde/M
+Ermentrude/M
+Ermina/M
+ermine/MSD
+Erminia/M
+Erminie/M
+Ermin/M
+Ernaline/M
+Erna/M
+Ernesta/M
+Ernestine/M
+Ernest/M
+Ernesto/M
+Ernestus/M
+Ernie/M
+Ernst/M
+Erny/M
+erode/SDG
+erodible
+erogenous
+erosible
+erosional
+erosion/SM
+erosiveness/M
+erosive/P
+Eros/SM
+erotically
+erotica/M
+eroticism/MS
+erotic/S
+errancy/MS
+errand/MS
+errantry/M
+errant/YS
+errata/SM
+erratically
+erratic/S
+erratum/MS
+err/DGS
+Errick/M
+erring/UY
+Erroll/M
+Errol/M
+erroneousness/M
+erroneous/YP
+error/SM
+ersatz/S
+Erse/M
+Erskine/M
+erst
+erstwhile
+Ertha/M
+eructation/MS
+eruct/DGS
+erudite/NYX
+erudition/M
+erupt/DSVG
+eruption/SM
+eruptive/SY
+Ervin/M
+ErvIn/M
+Erv/M
+Erwin/M
+Eryn/M
+erysipelas/SM
+erythrocyte/SM
+es
+e's
+Es
+E's
+Esau/M
+escadrille/M
+escalate/CDSXGN
+escalation/MC
+escalator/SM
+escallop/SGDM
+escapable/I
+escapade/SM
+escapee/MS
+escape/LGSRDB
+escapement/MS
+escaper/M
+escapism/SM
+escapist/S
+escapology
+escarole/MS
+escarpment/MS
+eschatology/M
+Escherichia/M
+Escher/M
+eschew/SGD
+Escondido/M
+escort/SGMD
+escritoire/SM
+escrow/DMGS
+escudo/MS
+escutcheon/SM
+Esdras/M
+ESE
+Eskimo/SM
+ESL
+Esma/M
+Esmaria/M
+Esmark/M
+Esme/M
+Esmeralda/M
+esophageal
+esophagi
+esophagus/M
+esoteric
+esoterica
+esoterically
+esp
+ESP
+espadrille/MS
+Espagnol/M
+espalier/SMDG
+especial/Y
+Esperanto/M
+Esperanza/M
+Espinoza/M
+espionage/SM
+esplanade/SM
+Esp/M
+Esposito/M
+espousal/MS
+espouser/M
+espouse/SRDG
+espresso/SM
+esprit/SM
+espy/GSD
+Esq/M
+esquire/GMSD
+Esquire/S
+Esra/M
+Essa/M
+essayer/M
+essayist/SM
+essay/SZMGRD
+essence/MS
+Essene/SM
+Essen/M
+essentialist/M
+essentially
+essentialness/M
+essential/USI
+Essequibo/M
+Essex/M
+Essie/M
+Essy/M
+EST
+established/U
+establisher/M
+establish/LAEGSD
+establishment/EMAS
+Establishment/MS
+Esta/M
+estate/GSDM
+Esteban/M
+esteem/EGDS
+Estela/M
+Estele/M
+Estella/M
+Estelle/M
+Estell/M
+Estel/M
+Esterhzy/M
+ester/M
+Ester/M
+Estes
+Estevan/M
+Esther/M
+esthete's
+esthetically
+esthetic's
+esthetics's
+estimable/I
+estimableness/M
+estimate/XDSNGV
+estimating/A
+estimation/M
+estimator/SM
+Estonia/M
+Estonian/S
+estoppal
+Estrada/M
+estrange/DRSLG
+estrangement/SM
+estranger/M
+Estrella/M
+Estrellita/M
+estrogen/SM
+estrous
+estrus/SM
+est/RZ
+estuarine
+estuary/SM
+et
+ET
+ETA
+Etan/M
+eta/SM
+etc
+etcetera/SM
+etcher/M
+etch/GZJSRD
+etching/M
+ETD
+eternalness/SM
+eternal/PSY
+eternity/SM
+ethane/SM
+Ethan/M
+ethanol/MS
+Ethelbert/M
+Ethelda/M
+Ethelind/M
+Etheline/M
+Ethelin/M
+Ethel/M
+Ethelred/M
+Ethelyn/M
+Ethe/M
+etherealness/M
+ethereal/PY
+etherized
+Ethernet/MS
+ether/SM
+ethically/U
+ethicalness/M
+ethical/PYS
+ethicist/S
+ethic/MS
+Ethiopia/M
+Ethiopian/S
+ethnically
+ethnicity/MS
+ethnic/S
+ethnocentric
+ethnocentrism/MS
+ethnographers
+ethnographic
+ethnography/M
+ethnological
+ethnologist/SM
+ethnology/SM
+ethnomethodology
+ethological
+ethologist/MS
+ethology/SM
+ethos/SM
+ethylene/MS
+Ethyl/M
+ethyl/SM
+Etienne/M
+etiologic
+etiological
+etiology/SM
+etiquette/SM
+Etna/M
+Etruria/M
+Etruscan/MS
+Etta/M
+Ettie/M
+Etti/M
+Ettore/M
+Etty/M
+tude/MS
+etymological/Y
+etymologist/SM
+etymology/MS
+EU
+eucalypti
+eucalyptus/SM
+Eucharistic
+Eucharist/SM
+euchre/MGSD
+euclidean
+Euclid/M
+Eudora/M
+Euell/M
+Eugene/M
+Eugenia/M
+eugenically
+eugenicist/SM
+eugenic/S
+eugenics/M
+Eugenie/M
+Eugenio/M
+Eugenius/M
+Eugen/M
+Eugine/M
+Eulalie/M
+Eula/M
+Eulerian/M
+Euler/M
+eulogistic
+eulogist/MS
+eulogized/U
+eulogize/GRSDZ
+eulogizer/M
+eulogy/MS
+Eu/M
+Eumenides
+Eunice/M
+eunuch/M
+eunuchs
+Euphemia/M
+euphemism/MS
+euphemistic
+euphemistically
+euphemist/M
+euphonious/Y
+euphonium/M
+euphony/SM
+euphoria/SM
+euphoric
+euphorically
+Euphrates/M
+Eurasia/M
+Eurasian/S
+eureka/S
+Euripides/M
+Eur/M
+Eurodollar/SM
+Europa/M
+Europeanization/SM
+Europeanized
+European/MS
+Europe/M
+europium/MS
+Eurydice/M
+Eustace/M
+Eustachian/M
+Eustacia/M
+eutectic
+Euterpe/M
+euthanasia/SM
+euthenics/M
+evacuate/DSXNGV
+evacuation/M
+evacuee/MS
+evader/M
+evade/SRDBGZ
+Evaleen/M
+evaluable
+evaluate/ADSGNX
+evaluated/U
+evaluational
+evaluation/MA
+evaluative
+evaluator/MS
+Eva/M
+evanescence/MS
+evanescent
+Evangelia/M
+evangelic
+evangelicalism/SM
+Evangelical/S
+evangelical/YS
+Evangelina/M
+Evangeline/M
+Evangelin/M
+evangelism/SM
+evangelistic
+evangelist/MS
+Evangelist/MS
+evangelize/GDS
+Evania/M
+Evan/MS
+Evanne/M
+Evanston/M
+Evansville/M
+evaporate/VNGSDX
+evaporation/M
+evaporative/Y
+evaporator/MS
+evasion/SM
+evasiveness/SM
+evasive/PY
+Eveleen/M
+Evelina/M
+Eveline/M
+Evelin/M
+Evelyn/M
+Eve/M
+evened
+evener/M
+evenhanded/YP
+evening/SM
+Evenki/M
+Even/M
+evenness/MSU
+even/PUYRT
+evens
+evensong/MS
+eventfulness/SM
+eventful/YU
+eventide/SM
+event/SGM
+eventuality/MS
+eventual/Y
+eventuate/GSD
+Everard/M
+Eveready/M
+Evered/M
+Everest/M
+Everette/M
+Everett/M
+everglade/MS
+Everglades
+evergreen/S
+Everhart/M
+everlastingness/M
+everlasting/PYS
+everliving
+evermore
+EverReady/M
+eve/RSM
+ever/T
+every
+everybody/M
+everydayness/M
+everyday/P
+everyman
+everyone/MS
+everyplace
+everything
+everywhere
+eve's/A
+eves/A
+Evey/M
+evict/DGS
+eviction/SM
+evidence/MGSD
+evidential/Y
+evident/YS
+Evie/M
+evildoer/SM
+evildoing/MS
+evilness/MS
+evil/YRPTS
+evince/SDG
+Evin/M
+eviscerate/GNXDS
+evisceration/M
+Evita/M
+Ev/MN
+evocable
+evocate/NVX
+evocation/M
+evocativeness/M
+evocative/YP
+evoke/SDG
+evolute/NMXS
+evolutionarily
+evolutionary
+evolutionist/MS
+evolution/M
+evolve/SDG
+Evonne/M
+Evvie/M
+Evvy/M
+Evy/M
+Evyn/M
+Ewan/M
+Eward/M
+Ewart/M
+Ewell/M
+ewe/MZRS
+Ewen/M
+ewer/M
+Ewing/M
+exacerbate/NGXDS
+exacerbation/M
+exacter/M
+exactingness/M
+exacting/YP
+exaction/SM
+exactitude/ISM
+exactly/I
+exactness/MSI
+exact/TGSPRDY
+exaggerate/DSXNGV
+exaggerated/YP
+exaggeration/M
+exaggerative/Y
+exaggerator/MS
+exaltation/SM
+exalted/Y
+exalter/M
+exalt/ZRDGS
+examen/M
+examination/AS
+examination's
+examine/BGZDRS
+examined/AU
+examinees
+examiner/M
+examines/A
+examining/A
+exam/MNS
+example/DSGM
+exampled/U
+exasperate/DSXGN
+exasperated/Y
+exasperating/Y
+exasperation/M
+Excalibur/M
+excavate/NGDSX
+excavation/M
+excavator/SM
+Excedrin/M
+exceeder/M
+exceeding/Y
+exceed/SGDR
+excelled
+excellence/SM
+excellency/MS
+Excellency/MS
+excellent/Y
+excelling
+excel/S
+excelsior/S
+except/DSGV
+exceptionable/U
+exceptionalness/M
+exceptional/YU
+exception/BMS
+excerpter/M
+excerpt/GMDRS
+excess/GVDSM
+excessiveness/M
+excessive/PY
+exchangeable
+exchange/GDRSZ
+exchanger/M
+exchequer/SM
+Exchequer/SM
+excise/XMSDNGB
+excision/M
+excitability/MS
+excitableness/M
+excitable/P
+excitably
+excitation/SM
+excitatory
+excited/Y
+excitement/MS
+exciter/M
+excite/RSDLBZG
+excitingly
+exciting/U
+exciton/M
+exclaimer/M
+exclaim/SZDRG
+exclamation/MS
+exclamatory
+exclude/DRSG
+excluder/M
+exclusionary
+exclusioner/M
+exclusion/SZMR
+exclusiveness/SM
+exclusive/SPY
+exclusivity/MS
+excommunicate/XVNGSD
+excommunication/M
+excoriate/GNXSD
+excoriation/M
+excremental
+excrement/SM
+excrescence/MS
+excrescent
+excreta
+excrete/NGDRSX
+excreter/M
+excretion/M
+excretory/S
+excruciate/NGDS
+excruciating/Y
+excruciation/M
+exculpate/XSDGN
+exculpation/M
+exculpatory
+excursionist/SM
+excursion/MS
+excursiveness/SM
+excursive/PY
+excursus/MS
+excusable/IP
+excusableness/IM
+excusably/I
+excuse/BGRSD
+excused/U
+excuser/M
+exec/MS
+execrableness/M
+execrable/P
+execrably
+execrate/DSXNGV
+execration/M
+executable/MS
+execute/NGVZBXDRS
+executer/M
+executional
+executioner/M
+execution/ZMR
+executive/SM
+executor/SM
+executrices
+executrix/M
+exegeses
+exegesis/M
+exegete/M
+exegetical
+exegetic/S
+exemplariness/M
+exemplar/MS
+exemplary/P
+exemplification/M
+exemplifier/M
+exemplify/ZXNSRDG
+exemption/MS
+exempt/SDG
+exerciser/M
+exercise/ZDRSGB
+exertion/MS
+exert/SGD
+Exeter/M
+exeunt
+exhalation/SM
+exhale/GSD
+exhausted/Y
+exhauster/M
+exhaustible/I
+exhausting/Y
+exhaustion/SM
+exhaustiveness/MS
+exhaustive/YP
+exhaust/VGRDS
+exhibitioner/M
+exhibitionism/MS
+exhibitionist/MS
+exhibition/ZMRS
+exhibitor/SM
+exhibit/VGSD
+exhilarate/XSDVNG
+exhilarating/Y
+exhilaration/M
+exhortation/SM
+exhort/DRSG
+exhorter/M
+exhumation/SM
+exhume/GRSD
+exhumer/M
+exigence/S
+exigency/SM
+exigent/SY
+exiguity/SM
+exiguous
+exile/SDGM
+existence/MS
+existent/I
+existentialism/MS
+existentialistic
+existentialist/MS
+existential/Y
+existents
+exist/SDG
+exit/MDSG
+exobiology/MS
+exocrine
+Exodus/M
+exodus/SM
+exogamous
+exogamy/M
+exogenous/Y
+exonerate/SDVGNX
+exoneration/M
+exorbitance/MS
+exorbitant/Y
+exorcise/SDG
+exorcism/SM
+exorcist/SM
+exorcizer/M
+exoskeleton/MS
+exosphere/SM
+exothermic
+exothermically
+exotica
+exotically
+exoticism/SM
+exoticness/M
+exotic/PS
+exp
+expandability/M
+expand/DRSGZB
+expanded/U
+expander/M
+expanse/DSXGNVM
+expansible
+expansionary
+expansionism/MS
+expansionist/MS
+expansion/M
+expansiveness/S
+expansive/YP
+expatiate/XSDNG
+expatiation/M
+expatriate/SDNGX
+expatriation/M
+expectancy/MS
+expectant/YS
+expectational
+expectation/MS
+expected/UPY
+expecting/Y
+expectorant/S
+expectorate/NGXDS
+expectoration/M
+expect/SBGD
+expedience/IS
+expediency/IMS
+expedients
+expedient/YI
+expediter/M
+expedite/ZDRSNGX
+expeditionary
+expedition/M
+expeditiousness/MS
+expeditious/YP
+expeditor's
+expellable
+expelled
+expelling
+expel/S
+expendable/S
+expended/U
+expender/M
+expenditure/SM
+expend/SDRGB
+expense/DSGVM
+expensive/IYP
+expensiveness/SMI
+experienced/U
+experience/ISDM
+experiencing
+experiential/Y
+experimentalism/M
+experimentalist/SM
+experimental/Y
+experimentation/SM
+experimenter/M
+experiment/GSMDRZ
+experted
+experting
+expertise/SM
+expertize/GD
+expertnesses
+expertness/IM
+expert/PISY
+expert's
+expiable/I
+expiate/XGNDS
+expiation/M
+expiatory
+expiration/MS
+expired/U
+expire/SDG
+expiry/MS
+explainable/UI
+explain/ADSG
+explained/U
+explainer/SM
+explanation/MS
+explanatory
+expletive/SM
+explicable/I
+explicate/VGNSDX
+explication/M
+explicative/Y
+explicitness/SM
+explicit/PSY
+explode/DSRGZ
+exploded/U
+exploder/M
+exploitation/MS
+exploitative
+exploited/U
+exploiter/M
+exploit/ZGVSMDRB
+exploration/MS
+exploratory
+explore/DSRBGZ
+explored/U
+explorer/M
+explosion/MS
+explosiveness/SM
+explosive/YPS
+expo/MS
+exponential/SY
+exponentiate/XSDNG
+exponentiation/M
+exponent/MS
+exportability
+exportable
+export/AGSD
+exportation/SM
+exporter/MS
+export's
+expose
+exposed/U
+exposer/M
+exposit/D
+exposition/SM
+expositor/MS
+expository
+expos/RSDZG
+expostulate/DSXNG
+expostulation/M
+exposure/SM
+expounder/M
+expound/ZGSDR
+expressed/U
+expresser/M
+express/GVDRSY
+expressibility/I
+expressible/I
+expressibly/I
+expressionism/SM
+expressionistic
+expressionist/S
+expressionless/YP
+expression/MS
+expressive/IYP
+expressiveness/MS
+expressiveness's/I
+expressway/SM
+expropriate/XDSGN
+expropriation/M
+expropriator/SM
+expulsion/MS
+expunge/GDSR
+expunger/M
+expurgated/U
+expurgate/SDGNX
+expurgation/M
+exquisiteness/SM
+exquisite/YPS
+ex/S
+ext
+extant
+extemporaneousness/MS
+extemporaneous/YP
+extempore/S
+extemporization/SM
+extemporizer/M
+extemporize/ZGSRD
+extendability/M
+extendedly
+extendedness/M
+extended/U
+extender/M
+extendibility/M
+extendibles
+extend/SGZDR
+extensibility/M
+extensible/I
+extensional/Y
+extension/SM
+extensiveness/SM
+extensive/PY
+extensor/MS
+extent/SM
+extenuate/XSDGN
+extenuation/M
+exterior/MYS
+exterminate/XNGDS
+extermination/M
+exterminator/SM
+externalities
+externalization/SM
+externalize/GDS
+external/YS
+extern/M
+extinct/DGVS
+extinction/MS
+extinguishable/I
+extinguish/BZGDRS
+extinguisher/M
+extirpate/XSDVNG
+extirpation/M
+extolled
+extoller/M
+extolling
+extol/S
+extort/DRSGV
+extorter/M
+extortionate/Y
+extortioner/M
+extortionist/SM
+extortion/ZSRM
+extracellular/Y
+extract/GVSBD
+extraction/SM
+extractive/Y
+extractor/SM
+extracurricular/S
+extradite/XNGSDB
+extradition/M
+extragalactic
+extralegal/Y
+extramarital
+extramural
+extraneousness/M
+extraneous/YP
+extraordinarily
+extraordinariness/M
+extraordinary/PS
+extrapolate/XVGNSD
+extrapolation/M
+extra/S
+extrasensory
+extraterrestrial/S
+extraterritorial
+extraterritoriality/MS
+extravagance/MS
+extravagant/Y
+extravaganza/SM
+extravehicular
+extravert's
+extrema
+extremal
+extreme/DSRYTP
+extremeness/MS
+extremism/SM
+extremist/MS
+extremity/SM
+extricable/I
+extricate/XSDNG
+extrication/M
+extrinsic
+extrinsically
+extroversion/SM
+extrovert/GMDS
+extrude/GDSR
+extruder/M
+extrusion/MS
+extrusive
+exuberance/MS
+exuberant/Y
+exudate/XNM
+exudation/M
+exude/GSD
+exultant/Y
+exultation/SM
+exult/DGS
+exulting/Y
+exurban
+exurbanite/SM
+exurbia/MS
+exurb/MS
+Exxon/M
+Eyck/M
+Eyde/M
+Eydie/M
+eyeball/GSMD
+eyebrow/MS
+eyed/P
+eyedropper/MS
+eyeful/MS
+eye/GDRSMZ
+eyeglass/MS
+eyelash/MS
+eyeless
+eyelet/GSMD
+eyelid/SM
+eyeliner/MS
+eyeopener/MS
+eyeopening
+eyepiece/SM
+eyer/M
+eyeshadow
+eyesight/MS
+eyesore/SM
+eyestrain/MS
+eyeteeth
+eyetooth/M
+eyewash/MS
+eyewitness/SM
+Eyre/M
+eyrie's
+Eysenck/M
+Ezechiel/M
+Ezekiel/M
+Ezequiel/M
+Eziechiele/M
+Ezmeralda/M
+Ezra/M
+Ezri/M
+F
+FAA
+Fabe/MR
+Faberg/M
+Faber/M
+Fabiano/M
+Fabian/S
+Fabien/M
+Fabio/M
+fable/GMSRD
+fabler/M
+fabricate/SDXNG
+fabrication/M
+fabricator/MS
+fabric/MS
+fabulists
+fabulousness/M
+fabulous/YP
+facade/GMSD
+face/AGCSD
+facecloth
+facecloths
+faceless/P
+faceplate/M
+facer/CM
+face's
+facetiousness/MS
+facetious/YP
+facet/SGMD
+facial/YS
+facileness/M
+facile/YP
+facilitate/VNGXSD
+facilitation/M
+facilitator/SM
+facilitatory
+facility/MS
+facing/MS
+facsimileing
+facsimile/MSD
+factional
+factionalism/SM
+faction/SM
+factiousness/M
+factious/PY
+factitious
+fact/MS
+facto
+factoid/S
+factorial/MS
+factoring/A
+factoring's
+factorisable
+factorization/SM
+factorize/GSD
+factor/SDMJG
+factory/MS
+factotum/MS
+factuality/M
+factualness/M
+factual/PY
+faculty/MS
+faddish
+faddist/SM
+fadedly
+faded/U
+fadeout
+fader/M
+fade/S
+fading's
+fading/U
+fad/ZGSMDR
+Fae/M
+faerie/MS
+Faeroe/M
+faery's
+Fafnir/M
+fagged
+fagging
+faggoting's
+Fagin/M
+fag/MS
+fagoting/M
+fagot/MDSJG
+Fahd/M
+Fahrenheit/S
+faence/S
+failing's
+failing/UY
+fail/JSGD
+faille/MS
+failsafe
+failure/SM
+Faina/M
+fain/GTSRD
+fainter/M
+fainthearted
+faintness/MS
+faint/YRDSGPT
+Fairbanks
+Fairchild/M
+faired
+Fairfax/M
+Fairfield/M
+fairgoer/S
+fairground/MS
+fairing/MS
+fairish
+Fairleigh/M
+fairless
+Fairlie/M
+Fair/M
+Fairmont/M
+fairness's
+fairness/US
+Fairport/M
+fairs
+fair/TURYP
+Fairview/M
+fairway/MS
+fairyland/MS
+fairy/MS
+fairytale
+Faisalabad
+Faisal/M
+faithed
+faithfulness/MSU
+faithfuls
+faithful/UYP
+faithing
+faithlessness/SM
+faithless/YP
+Faith/M
+faiths
+faith's
+faith/U
+fajitas
+faker/M
+fake/ZGDRS
+fakir/SM
+falafel
+falconer/M
+falconry/MS
+falcon/ZSRM
+Falito/M
+Falkland/MS
+Falk/M
+Falkner/M
+fallaciousness/M
+fallacious/PY
+fallacy/MS
+faller/M
+fallibility/MSI
+fallible/I
+fallibleness/MS
+fallibly/I
+falloff/S
+Fallon/M
+fallopian
+Fallopian/M
+fallout/MS
+fallowness/M
+fallow/PSGD
+fall/SGZMRN
+falsehood/SM
+falseness/SM
+false/PTYR
+falsetto/SM
+falsie/MS
+falsifiability/M
+falsifiable/U
+falsification/M
+falsifier/M
+falsify/ZRSDNXG
+falsity/MS
+Falstaff/M
+falterer/M
+faltering/UY
+falter/RDSGJ
+Falwell/M
+fa/M
+famed/C
+fame/DSMG
+fames/C
+familial
+familiarity/MUS
+familiarization/MS
+familiarized/U
+familiarizer/M
+familiarize/ZGRSD
+familiarizing/Y
+familiarly/U
+familiarness/M
+familiar/YPS
+family/MS
+famine/SM
+faming/C
+famish/GSD
+famously/I
+famousness/M
+famous/PY
+fanaticalness/M
+fanatical/YP
+fanaticism/MS
+fanatic/SM
+Fanchette/M
+Fanchon/M
+fancied
+Fancie/M
+fancier/SM
+fanciest
+fancifulness/MS
+fanciful/YP
+fancily
+fanciness/SM
+fancying
+fancy/IS
+Fancy/M
+fancywork/SM
+fandango/SM
+Fanechka/M
+fanfare/SM
+fanfold/M
+fang/DMS
+fangled
+Fania/M
+fanlight/SM
+Fan/M
+fanned
+Fannie/M
+Fanni/M
+fanning
+fanny/SM
+Fanny/SM
+fanout
+fan/SM
+fantail/SM
+fantasia/SM
+fantasist/M
+fantasize/SRDG
+fantastical/Y
+fantastic/S
+fantasy/GMSD
+Fanya/M
+fanzine/S
+FAQ/SM
+Faraday/M
+farad/SM
+Farah/M
+Fara/M
+Farand/M
+faraway
+Farber/M
+farce/SDGM
+farcical/Y
+fare/MS
+farer/M
+farewell/DGMS
+farfetchedness/M
+far/GDR
+Fargo/M
+Farica/M
+farinaceous
+farina/MS
+Farkas/M
+Farlay/M
+Farlee/M
+Farleigh/M
+Farley/M
+Farlie/M
+Farly/M
+farmer/M
+Farmer/M
+farmhand/S
+farmhouse/SM
+farming/M
+Farmington/M
+farmland/SM
+farm/MRDGZSJ
+farmstead/SM
+farmworker/S
+Far/MY
+farmyard/MS
+faro/MS
+farragoes
+farrago/M
+Farragut/M
+Farrah/M
+Farrakhan/M
+Farra/M
+Farrand/M
+Farrell/M
+Farrel/M
+farrier/SM
+Farris/M
+Farr/M
+farrow/DMGS
+farseeing
+farsightedness/SM
+farsighted/YP
+farther
+farthermost
+farthest
+farthing/SM
+fart/MDGS!
+fas
+fascia/SM
+fascicle/DSM
+fasciculate/DNX
+fasciculation/M
+fascinate/SDNGX
+fascinating/Y
+fascination/M
+fascism/MS
+Fascism's
+fascistic
+Fascist's
+fascist/SM
+fashionableness/M
+fashionable/PS
+fashionably/U
+fashion/ADSG
+fashioner/SM
+fashion's
+Fassbinder/M
+fastback/MS
+fastball/S
+fasten/AGUDS
+fastener/MS
+fastening/SM
+fast/GTXSPRND
+fastidiousness/MS
+fastidious/PY
+fastness/MS
+fatalism/MS
+fatalistic
+fatalistically
+fatalist/MS
+fatality/MS
+fatal/SY
+fatback/SM
+fatefulness/MS
+fateful/YP
+fate/MS
+Fates
+fatheaded/P
+fathead/SMD
+father/DYMGS
+fathered/U
+fatherhood/MS
+fatherland/SM
+fatherless
+fatherliness/M
+fatherly/P
+Father/SM
+fathomable/U
+fathomless
+fathom/MDSBG
+fatigued/U
+fatigue/MGSD
+fatiguing/Y
+Fatima/M
+fatness/SM
+fat/PSGMDY
+fatso/M
+fatted
+fattener/M
+fatten/JZGSRD
+fatter
+fattest/M
+fattiness/SM
+fatting
+fatty/RSPT
+fatuity/MS
+fatuousness/SM
+fatuous/YP
+fatwa/SM
+faucet/SM
+Faulknerian
+Faulkner/M
+fault/CGSMD
+faultfinder/MS
+faultfinding/MS
+faultily
+faultiness/MS
+faultlessness/SM
+faultless/PY
+faulty/RTP
+fauna/MS
+Faunie/M
+Faun/M
+faun/MS
+Fauntleroy/M
+Faustian
+Faustina/M
+Faustine/M
+Faustino/M
+Faust/M
+Faustus/M
+fauvism/S
+favorableness/MU
+favorable/UMPS
+favorably/U
+favoredness/M
+favored's/U
+favored/YPSM
+favorer/EM
+favor/ESMRDGZ
+favoring/MYS
+favorings/U
+favorite/SMU
+favoritism/MS
+favors/A
+Fawkes/M
+Fawne/M
+fawner/M
+fawn/GZRDMS
+Fawnia/M
+fawning/Y
+Fawn/M
+fax/GMDS
+Fax/M
+Faydra/M
+Faye/M
+Fayette/M
+Fayetteville/M
+Fayina/M
+Fay/M
+fay/MDRGS
+Fayre/M
+Faythe/M
+Fayth/M
+faze/DSG
+FBI
+FCC
+FD
+FDA
+FDIC
+FDR/M
+fealty/MS
+fearfuller
+fearfullest
+fearfulness/MS
+fearful/YP
+fearlessness/MS
+fearless/PY
+fear/RDMSG
+fearsomeness/M
+fearsome/PY
+feasibility/SM
+feasibleness/M
+feasible/UI
+feasibly/U
+feaster/M
+feast/GSMRD
+feater/C
+featherbed
+featherbedding/SM
+featherbrain/MD
+feathered/U
+feathering/M
+featherless
+featherlight
+Featherman/M
+feathertop
+featherweight/SM
+feathery/TR
+feather/ZMDRGS
+feat/MYRGTS
+feats/C
+featureless
+feature/MGSD
+Feb/M
+febrile
+February/MS
+fecal
+feces
+fecklessness/M
+feckless/PY
+fecundability
+fecundate/XSDGN
+fecundation/M
+fecund/I
+fecundity/SM
+federalism/SM
+Federalist
+federalist/MS
+federalization/MS
+federalize/GSD
+Federal/S
+federal/YS
+federated/U
+federate/FSDXVNG
+federation/FM
+federative/Y
+Federica/M
+Federico/M
+FedEx/M
+Fedora/M
+fedora/SM
+feds
+Fed/SM
+fed/U
+feebleness/SM
+feeble/TPR
+feebly
+feedback/SM
+feedbag/MS
+feeder/M
+feed/GRZJS
+feeding/M
+feedlot/SM
+feedstock
+feedstuffs
+feeing
+feeler/M
+feel/GZJRS
+feelingly/U
+feeling/MYP
+feelingness/M
+Fee/M
+fee/MDS
+feet/M
+feigned/U
+feigner/M
+feign/RDGS
+feint/MDSG
+feisty/RT
+Felder/M
+Feldman/M
+feldspar/MS
+Felecia/M
+Felicdad/M
+Felice/M
+Felicia/M
+Felicio/M
+felicitate/XGNSD
+felicitation/M
+felicitous/IY
+felicitousness/M
+felicity/IMS
+Felicity/M
+Felicle/M
+Felic/M
+Felike/M
+Feliks/M
+feline/SY
+Felipa/M
+Felipe/M
+Felisha/M
+Felita/M
+Felix/M
+Feliza/M
+Felizio/M
+fella/S
+fellatio/SM
+felled/A
+feller/M
+felling/A
+Fellini/M
+fellness/M
+fellowman
+fellowmen
+fellow/SGDYM
+fellowshipped
+fellowshipping
+fellowship/SM
+fell/PSGZTRD
+feloniousness/M
+felonious/PY
+felon/MS
+felony/MS
+felt/GSD
+felting/M
+Fe/M
+female/MPS
+femaleness/SM
+feminineness/M
+feminine/PYS
+femininity/MS
+feminism/MS
+feminist/MS
+femme/MS
+femoral
+fem/S
+femur/MS
+fenced/U
+fencepost/M
+fencer/M
+fence/SRDJGMZ
+fencing/M
+fender/CM
+fend/RDSCZG
+Fenelia/M
+fenestration/CSM
+Fenian/M
+fenland/M
+fen/MS
+fennel/SM
+Fenwick/M
+Feodora/M
+Feodor/M
+feral
+Ferber/M
+Ferdie/M
+Ferdinanda/M
+Ferdinande/M
+Ferdinand/M
+Ferdinando/M
+Ferd/M
+Ferdy/M
+fer/FLC
+Fergus/M
+Ferguson/M
+Ferlinghetti/M
+Fermat/M
+fermentation/MS
+fermented
+fermenter
+ferment/FSCM
+fermenting
+Fermi/M
+fermion/MS
+fermium/MS
+Fernanda/M
+Fernande/M
+Fernandez/M
+Fernandina/M
+Fernando/M
+Ferne/M
+fernery/M
+Fern/M
+fern/MS
+ferny/TR
+ferociousness/MS
+ferocious/YP
+ferocity/MS
+Ferrari/M
+Ferraro/M
+Ferreira/M
+Ferrell/M
+Ferrel/M
+Ferrer/M
+ferreter/M
+ferret/SMRDG
+ferric
+ferris
+Ferris
+ferrite/M
+ferro
+ferroelectric
+ferromagnetic
+ferromagnet/M
+ferrous
+ferrule/MGSD
+ferryboat/MS
+ferryman/M
+ferrymen
+ferry/SDMG
+fertileness/M
+fertile/YP
+fertility/IMS
+fertilization/ASM
+fertilized/U
+fertilizer/M
+fertilizes/A
+fertilize/SRDZG
+ferule/SDGM
+fervency/MS
+fervent/Y
+fervidness/M
+fervid/YP
+fervor/MS
+fess/KGFSD
+Fess/M
+fess's
+festal/S
+fester/GD
+festival/SM
+festiveness/SM
+festive/PY
+festivity/SM
+festoon/SMDG
+fest/RVZ
+fetal
+feta/MS
+fetcher/M
+fetching/Y
+fetch/RSDGZ
+feted
+fte/MS
+fetich's
+fetidness/SM
+fetid/YP
+feting
+fetishism/SM
+fetishistic
+fetishist/SM
+fetish/MS
+fetlock/MS
+fetter's
+fetter/UGSD
+fettle/GSD
+fettling/M
+fettuccine/S
+fetus/SM
+feudalism/MS
+feudalistic
+feudal/Y
+feudatory/M
+feud/MDSG
+feverishness/SM
+feverish/PY
+fever/SDMG
+fewness/MS
+few/PTRS
+Fey/M
+Feynman/M
+fey/RT
+fez/M
+Fez/M
+fezzes
+ff
+FHA
+fiance/S
+fianc/MS
+Fianna/M
+Fiann/M
+fiascoes
+fiasco/M
+Fiat/M
+fiat/MS
+fibbed
+fibber/MS
+fibbing
+fiberboard/MS
+fiber/DM
+fiberfill/S
+Fiberglas/M
+fiberglass/DSMG
+Fibonacci/M
+fibrillate/XGNDS
+fibrillation/M
+fibril/MS
+fibrin/MS
+fibroblast/MS
+fibroid/S
+fibroses
+fibrosis/M
+fibrousness/M
+fibrous/YP
+fib/SZMR
+fibulae
+fibula/M
+fibular
+FICA
+fices
+fiche/SM
+Fichte/M
+fichu/SM
+fickleness/MS
+fickle/RTP
+ficos
+fictionalization/MS
+fictionalize/DSG
+fictional/Y
+fiction/SM
+fictitiousness/M
+fictitious/PY
+fictive/Y
+ficus
+fiddle/GMZJRSD
+fiddler/M
+fiddlestick/SM
+fiddly
+fide/F
+Fidela/M
+Fidelia/M
+Fidelio/M
+fidelity/IMS
+Fidelity/M
+Fidel/M
+fidget/DSG
+fidgety
+Fidole/M
+Fido/M
+fiducial/Y
+fiduciary/MS
+fiefdom/S
+fief/MS
+fielded
+fielder/IM
+fielding
+Fielding/M
+Field/MGS
+fieldstone/M
+fieldworker/M
+fieldwork/ZMRS
+field/ZISMR
+fiendishness/M
+fiendish/YP
+fiend/MS
+fierceness/SM
+fierce/RPTY
+fierily
+fieriness/MS
+fiery/PTR
+fie/S
+fies/C
+fiesta/MS
+fife/DRSMZG
+fifer/M
+Fifi/M
+Fifine/M
+FIFO
+fifteen/HRMS
+fifteenths
+fifths
+fifth/Y
+fiftieths
+fifty/HSM
+Figaro/M
+figged
+figging
+fightback
+fighter/MIS
+fighting/IS
+fight/ZSJRG
+figment/MS
+fig/MLS
+Figueroa/M
+figural
+figuration/FSM
+figurativeness/M
+figurative/YP
+figure/GFESD
+figurehead/SM
+figurer/SM
+figure's
+figurine/SM
+figuring/S
+Fijian/SM
+Fiji/M
+filamentary
+filament/MS
+filamentous
+Filberte/M
+Filbert/M
+filbert/MS
+Filberto/M
+filch/SDG
+filed/AC
+file/KDRSGMZ
+filename/SM
+filer/KMCS
+files/AC
+filet's
+filial/UY
+Filia/M
+filibusterer/M
+filibuster/MDRSZG
+Filide/M
+filigreeing
+filigree/MSD
+filing/AC
+filings
+Filipino/SM
+Filip/M
+Filippa/M
+Filippo/M
+fill/BAJGSD
+filled/U
+filler/MS
+filleting/M
+fillet/MDSG
+filling/M
+fillip/MDGS
+Fillmore/M
+filly/SM
+filmdom/M
+Filmer/M
+filminess/SM
+filming/M
+filmmaker/S
+Filmore/M
+film/SGMD
+filmstrip/SM
+filmy/RTP
+Filofax/S
+filtered/U
+filterer/M
+filter/RDMSZGB
+filthily
+filthiness/SM
+filth/M
+filths
+filthy/TRSDGP
+filtrated/I
+filtrate/SDXMNG
+filtrates/I
+filtrating/I
+filtration/IMS
+finagler/M
+finagle/RSDZG
+finale/MS
+finalist/MS
+finality/MS
+finalization/SM
+finalize/GSD
+final/SY
+Fina/M
+financed/A
+finance/MGSDJ
+finances/A
+financial/Y
+financier/DMGS
+financing/A
+Finch/M
+finch/MS
+findable/U
+find/BRJSGZ
+finder/M
+finding/M
+Findlay/M
+Findley/M
+fine/FGSCRDA
+finely
+fineness/MS
+finery/MAS
+fine's
+finespun
+finesse/SDMG
+fingerboard/SM
+fingerer/M
+fingering/M
+fingerless
+fingerling/M
+fingernail/MS
+fingerprint/SGDM
+finger/SGRDMJ
+fingertip/MS
+finial/SM
+finical
+finickiness/S
+finicky/RPT
+fining/M
+finished/UA
+finisher/M
+finishes/A
+finish/JZGRSD
+finis/SM
+finite/ISPY
+finitely/C
+finiteness/MIC
+fink/GDMS
+Finland/M
+Finlay/M
+Finley/M
+Fin/M
+Finnbogadottir/M
+finned
+Finnegan/M
+finner
+finning
+Finnish
+Finn/MS
+finny/RT
+fin/TGMDRS
+Fiona/M
+Fionna/M
+Fionnula/M
+fiord's
+Fiorello/M
+Fiorenze/M
+Fiori/M
+f/IRAC
+firearm/SM
+fireball/SM
+fireboat/M
+firebomb/MDSG
+firebox/MS
+firebrand/MS
+firebreak/SM
+firebrick/SM
+firebug/SM
+firecracker/SM
+firedamp/SM
+fired/U
+firefight/JRGZS
+firefly/MS
+Firefox/M
+fireguard/M
+firehouse/MS
+firelight/GZSM
+fireman/M
+firemen
+fire/MS
+fireplace/MS
+fireplug/MS
+firepower/SM
+fireproof/SGD
+firer/M
+firesafe
+fireside/SM
+Firestone/M
+firestorm/SM
+firetrap/SM
+firetruck/S
+firewall/S
+firewater/SM
+firewood/MS
+firework/MS
+firing/M
+firkin/M
+firmament/MS
+firmer
+firmest
+firm/ISFDG
+firmly/I
+firmness/MS
+firm's
+firmware/MS
+firring
+firstborn/S
+firsthand
+first/SY
+firth/M
+firths
+fir/ZGJMDRHS
+fiscal/YS
+Fischbein/M
+Fischer/M
+fishbowl/MS
+fishcake/S
+fisher/M
+Fisher/M
+fisherman/M
+fishermen/M
+fishery/MS
+fishhook/MS
+fishily
+fishiness/MS
+fishing/M
+fish/JGZMSRD
+Fishkill/M
+fishmeal
+fishmonger/MS
+fishnet/SM
+fishpond/SM
+fishtail/DMGS
+fishtanks
+fishwife/M
+fishwives
+fishy/TPR
+Fiske/M
+Fisk/M
+fissile
+fissionable/S
+fission/BSDMG
+fissure/MGSD
+fistfight/SM
+fistful/MS
+fisticuff/SM
+fist/MDGS
+fistula/SM
+fistulous
+Fitchburg/M
+Fitch/M
+fitfulness/SM
+fitful/PY
+fitments
+fitness/USM
+fits/AK
+fit's/K
+fitted/UA
+fitter/SM
+fittest
+fitting/AU
+fittingly
+fittingness/M
+fittings
+fit/UYPS
+Fitzgerald/M
+Fitz/M
+Fitzpatrick/M
+Fitzroy/M
+fivefold
+five/MRS
+fiver/M
+fixable
+fixate/VNGXSD
+fixatifs
+fixation/M
+fixative/S
+fixedness/M
+fixed/YP
+fixer/SM
+fixes/I
+fixing/SM
+fixity/MS
+fixture/SM
+fix/USDG
+Fizeau/M
+fizzer/M
+fizzle/GSD
+fizz/SRDG
+fizzy/RT
+fjord/SM
+FL
+flabbergast/GSD
+flabbergasting/Y
+flabbily
+flabbiness/SM
+flabby/TPR
+flab/MS
+flaccidity/MS
+flaccid/Y
+flack/SGDM
+flagella/M
+flagellate/DSNGX
+flagellation/M
+flagellum/M
+flagged
+flaggingly/U
+flagging/SMY
+flagman/M
+flagmen
+flag/MS
+flagon/SM
+flagpole/SM
+flagrance/MS
+flagrancy/SM
+flagrant/Y
+flagship/MS
+flagstaff/MS
+flagstone/SM
+flail/SGMD
+flair/SM
+flaker/M
+flake/SM
+flakiness/MS
+flak/RDMGS
+flaky/PRT
+Fla/M
+flamb/D
+flambeing
+flambes
+flamboyance/MS
+flamboyancy/MS
+flamboyant/YS
+flamenco/SM
+flamen/M
+flameproof/DGS
+flamer/IM
+flame's
+flame/SIGDR
+flamethrower/SM
+flamingo/SM
+flaming/Y
+flammability/ISM
+flammable/SI
+flam/MRNDJGZ
+Flanagan/M
+Flanders/M
+flange/GMSD
+flanker/M
+flank/SGZRDM
+flan/MS
+flannel/DMGS
+flannelet/MS
+flannelette's
+flapjack/SM
+flap/MS
+flapped
+flapper/SM
+flapping
+flaps/M
+flare/SDG
+flareup/S
+flaring/Y
+flashback/SM
+flashbulb/SM
+flashcard/S
+flashcube/MS
+flasher/M
+flashgun/S
+flashily
+flashiness/SM
+flashing/M
+flash/JMRSDGZ
+flashlight/MS
+flashy/TPR
+flask/SM
+flatbed/S
+flatboat/MS
+flatcar/MS
+flatfeet
+flatfish/SM
+flatfoot/SGDM
+flathead/M
+flatiron/SM
+flatland/RS
+flatmate/M
+flat/MYPS
+flatness/MS
+flatted
+flattener/M
+flatten/SDRG
+flatter/DRSZG
+flatterer/M
+flattering/YU
+flattery/SM
+flattest/M
+flatting
+flattish
+Flatt/M
+flattop/MS
+flatulence/SM
+flatulent/Y
+flatus/SM
+flatware/MS
+flatworm/SM
+Flaubert/M
+flaunting/Y
+flaunt/SDG
+flautist/SM
+flavored/U
+flavorer/M
+flavorful
+flavoring/M
+flavorless
+flavor/SJDRMZG
+flavorsome
+flaw/GDMS
+flawlessness/MS
+flawless/PY
+flax/MSN
+flaxseed/M
+flayer/M
+flay/RDGZS
+fleabag/MS
+fleabites
+flea/SM
+fleawort/M
+fleck/GRDMS
+Fledermaus/M
+fledged/U
+fledge/GSD
+fledgling/SM
+fleecer/M
+fleece/RSDGMZ
+fleeciness/SM
+fleecy/RTP
+fleeing
+flee/RS
+fleetingly/M
+fleetingness/SM
+fleeting/YP
+fleet/MYRDGTPS
+fleetness/MS
+Fleischer/M
+Fleischman/M
+Fleisher/M
+Fleming/M
+Flemished/M
+Flemish/GDSM
+Flemishing/M
+Flem/JGM
+Flemming/M
+flesher/M
+fleshiness/M
+flesh/JMYRSDG
+fleshless
+fleshly/TR
+fleshpot/SM
+fleshy/TPR
+fletch/DRSGJ
+fletcher/M
+Fletcher/M
+fletching/M
+Fletch/MR
+Fleurette/M
+Fleur/M
+flew/S
+flews/M
+flexed/I
+flexibility/MSI
+flexible/I
+flexibly/I
+flexitime's
+flex/MSDAG
+flextime/S
+flexural
+flexure/M
+fl/GJD
+flibbertigibbet/MS
+flicker/GD
+flickering/Y
+flickery
+flick/GZSRD
+flier/M
+flight/GMDS
+flightiness/SM
+flightless
+flightpath
+flighty/RTP
+flimflammed
+flimflamming
+flimflam/MS
+flimsily
+flimsiness/MS
+flimsy/PTRS
+flincher/M
+flinch/GDRS
+flinching/U
+flinger/M
+fling/RMG
+Flin/M
+Flinn/M
+flintiness/M
+flintless
+flintlock/MS
+Flint/M
+flint/MDSG
+Flintstones
+flinty/TRP
+flipflop
+flippable
+flippancy/MS
+flippant/Y
+flipped
+flipper/SM
+flippest
+flipping
+flip/S
+flirtation/SM
+flirtatiousness/MS
+flirtatious/PY
+flirt/GRDS
+flit/S
+flitted
+flitting
+floater/M
+float/SRDGJZ
+floaty
+flocculate/GNDS
+flocculation/M
+flock/SJDMG
+floe/MS
+flogged
+flogger/SM
+flogging/SM
+flog/S
+Flo/M
+floodgate/MS
+floodlight/DGMS
+floodlit
+floodplain/S
+flood/SMRDG
+floodwater/SM
+floorboard/MS
+floorer/M
+flooring/M
+floor/SJRDMG
+floorspace
+floorwalker/SM
+floozy/SM
+flophouse/SM
+flop/MS
+flopped
+flopper/M
+floppily
+floppiness/SM
+flopping
+floppy/TMRSP
+floral/SY
+Flora/M
+Florance/M
+flora/SM
+Florella/M
+Florence/M
+Florencia/M
+Florentia/M
+Florentine/S
+Florenza/M
+florescence/MIS
+florescent/I
+Flore/SM
+floret/MS
+Florette/M
+Floria/M
+Florian/M
+Florida/M
+Floridan/S
+Floridian/S
+floridness/SM
+florid/YP
+Florie/M
+Florina/M
+Florinda/M
+Florine/M
+florin/MS
+Flori/SM
+florist/MS
+Flor/M
+Florrie/M
+Florri/M
+Florry/M
+Flory/M
+floss/GSDM
+Flossie/M
+Flossi/M
+Flossy/M
+flossy/RST
+flotation/SM
+flotilla/SM
+flotsam/SM
+flounce/GDS
+flouncing/M
+flouncy/RT
+flounder/SDG
+flourisher/M
+flourish/GSRD
+flourishing/Y
+flour/SGDM
+floury/TR
+flouter/M
+flout/GZSRD
+flowchart/SG
+flowed
+flowerbed/SM
+flower/CSGD
+flowerer/M
+floweriness/SM
+flowerless
+flowerpot/MS
+flower's
+Flowers
+flowery/TRP
+flowing/Y
+flow/ISG
+flown
+flowstone
+Floyd/M
+Flss/M
+flt
+flubbed
+flubbing
+flub/S
+fluctuate/XSDNG
+fluctuation/M
+fluency/MS
+fluently
+fluent/SF
+flue/SM
+fluffiness/SM
+fluff/SGDM
+fluffy/PRT
+fluidity/SM
+fluidized
+fluid/MYSP
+fluidness/M
+fluke/SDGM
+fluky/RT
+flume/SDGM
+flummox/DSG
+flu/MS
+flung
+flunkey's
+flunk/SRDG
+flunky/MS
+fluoresce/GSRD
+fluorescence/MS
+fluorescent/S
+fluoridate/XDSGN
+fluoridation/M
+fluoride/SM
+fluorimetric
+fluorinated
+fluorine/SM
+fluorite/MS
+fluorocarbon/MS
+fluoroscope/MGDS
+fluoroscopic
+flurry/GMDS
+flushness/M
+flush/TRSDPBG
+fluster/DSG
+fluter/M
+flute/SRDGMJ
+fluting/M
+flutist/MS
+flutter/DRSG
+flutterer/M
+fluttery
+fluxed/A
+fluxes/A
+flux/IMS
+fluxing
+flyaway
+flyblown
+flyby/M
+flybys
+flycatcher/MS
+flyer's
+fly/JGBDRSTZ
+flyleaf/M
+flyleaves
+Flynn/M
+flyover/MS
+flypaper/MS
+flysheet/S
+flyspeck/MDGS
+flyswatter/S
+flyway/MS
+flyweight/MS
+flywheel/MS
+FM
+Fm/M
+FNMA/M
+foal/MDSG
+foaminess/MS
+foam/MRDSG
+foamy/RPT
+fobbed
+fobbing
+fob/SM
+focal/F
+focally
+Foch/M
+foci's
+focused/AU
+focuser/M
+focuses/A
+focus/SRDMBG
+fodder/GDMS
+foe/SM
+foetid
+FOFL
+fogbound
+fogged/C
+foggily
+fogginess/MS
+fogging/C
+foggy/RPT
+foghorn/SM
+fogs/C
+fog/SM
+fogyish
+fogy/SM
+foible/MS
+foil/GSD
+foist/GDS
+Fokker/M
+foldaway/S
+folded/AU
+folder/M
+foldout/MS
+fold/RDJSGZ
+folds/UA
+Foley/M
+foliage/MSD
+foliate/CSDXGN
+foliation/CM
+folio/SDMG
+folklike
+folklore/MS
+folkloric
+folklorist/SM
+folk/MS
+folksiness/MS
+folksinger/S
+folksinging/S
+folksong/S
+folksy/TPR
+folktale/S
+folkway/S
+foll
+follicle/SM
+follicular
+follower/M
+follow/JSZBGRD
+followup's
+folly/SM
+Folsom
+fol/Y
+Fomalhaut/M
+fomentation/SM
+fomenter/M
+foment/RDSG
+Fonda/M
+fondant/SM
+fondle/GSRD
+fondler/M
+fondness/MS
+fond/PMYRDGTS
+fondue/MS
+Fons
+Fonsie/M
+Fontainebleau/M
+Fontaine/M
+Fontana/M
+fontanelle's
+fontanel/MS
+font/MS
+Fonzie/M
+Fonz/M
+foodie/S
+food/MS
+foodstuff/MS
+foolery/MS
+foolhardily
+foolhardiness/SM
+foolhardy/PTR
+foolishness/SM
+foolish/PRYT
+fool/MDGS
+foolproof
+foolscap/MS
+footage/SM
+football/SRDMGZ
+footbridge/SM
+Foote/M
+footer/M
+footfall/SM
+foothill/SM
+foothold/MS
+footing/M
+footless
+footlights
+footling
+footlocker/SM
+footloose
+footman/M
+footmarks
+footmen
+footnote/MSDG
+footpad/SM
+footpath/M
+footpaths
+footplate/M
+footprint/MS
+footrace/S
+footrest/MS
+footsie/SM
+foot/SMRDGZJ
+footsore
+footstep/SM
+footstool/SM
+footwear/M
+footwork/SM
+fop/MS
+fopped
+foppery/MS
+fopping
+foppishness/SM
+foppish/YP
+forage/GSRDMZ
+forager/M
+forayer/M
+foray/SGMRD
+forbade
+forbearance/SM
+forbearer/M
+forbear/MRSG
+Forbes/M
+forbidden
+forbiddingness/M
+forbidding/YPS
+forbid/S
+forbore
+forborne
+forced/Y
+forcefield/MS
+forcefulness/MS
+forceful/PY
+forceps/M
+forcer/M
+force/SRDGM
+forcibleness/M
+forcible/P
+forcibly
+fordable/U
+Fordham/M
+Ford/M
+ford/SMDBG
+forearm/GSDM
+forebear/MS
+forebode/GJDS
+forebodingness/M
+foreboding/PYM
+forecaster/M
+forecastle/MS
+forecast/SZGR
+foreclose/GSD
+foreclosure/MS
+forecourt/SM
+foredoom/SDG
+forefather/SM
+forefeet
+forefinger/MS
+forefoot/M
+forefront/SM
+foregoer/M
+foregoing/S
+foregone
+foregos
+foreground/MGDS
+forehand/S
+forehead/MS
+foreigner/M
+foreignness/SM
+foreign/PRYZS
+foreknew
+foreknow/GS
+foreknowledge/MS
+foreknown
+foreleg/MS
+forelimb/MS
+forelock/MDSG
+foreman/M
+Foreman/M
+foremast/SM
+foremen
+foremost
+forename/DSM
+forenoon/SM
+forensically
+forensic/S
+forensics/M
+foreordain/DSG
+forepart/MS
+forepaws
+forepeople
+foreperson/S
+foreplay/MS
+forequarter/SM
+forerunner/MS
+fore/S
+foresail/SM
+foresaw
+foreseeable/U
+foreseeing
+foreseen/U
+foreseer/M
+foresee/ZSRB
+foreshadow/SGD
+foreshore/M
+foreshorten/DSG
+foresightedness/SM
+foresighted/PY
+foresight/SMD
+foreskin/SM
+forestaller/M
+forestall/LGSRD
+forestallment/M
+forestation/MCS
+forestations/A
+forest/CSAGD
+Forester/M
+forester/SM
+forestland/S
+Forest/MR
+forestry/MS
+forest's
+foretaste/MGSD
+foreteller/M
+foretell/RGS
+forethought/MS
+foretold
+forevermore
+forever/PS
+forewarner/M
+forewarn/GSJRD
+forewent
+forewoman/M
+forewomen
+foreword/SM
+forfeiter/M
+forfeiture/MS
+forfeit/ZGDRMS
+forfend/GSD
+forgather/GSD
+forgave
+forged/A
+forge/JVGMZSRD
+forger/M
+forgery/MS
+forges/A
+forgetfulness/SM
+forgetful/PY
+forget/SV
+forgettable/U
+forgettably/U
+forgetting
+forging/M
+forgivable/U
+forgivably/U
+forgiven
+forgiveness/SM
+forgiver/M
+forgive/SRPBZG
+forgivingly
+forgivingness/M
+forgiving/UP
+forgoer/M
+forgoes
+forgone
+forgo/RSGZ
+forgot
+forgotten/U
+for/HT
+forkful/S
+fork/GSRDM
+forklift/DMSG
+forlornness/M
+forlorn/PTRY
+formability/AM
+formaldehyde/SM
+formalin/M
+formalism/SM
+formalistic
+formalist/SM
+formality/SMI
+formal/IY
+formalization/SM
+formalized/U
+formalizer/M
+formalizes/I
+formalize/ZGSRD
+formalness/M
+formals
+formant/MIS
+format/AVS
+formate/MXGNSD
+formation/AFSCIM
+formatively/I
+formativeness/IM
+formative/SYP
+format's
+formatted/UA
+formatter/A
+formatters
+formatter's
+formatting/A
+form/CGSAFDI
+formed/U
+former/FSAI
+formerly
+formfitting
+formic
+Formica/MS
+formidableness/M
+formidable/P
+formidably
+formlessness/MS
+formless/PY
+Formosa/M
+Formosan
+form's
+formulaic
+formula/SM
+formulate/AGNSDX
+formulated/U
+formulation/AM
+formulator/SM
+fornicate/GNXSD
+fornication/M
+fornicator/SM
+Forrester/M
+Forrest/RM
+forsaken
+forsake/SG
+forsook
+forsooth
+Forster/M
+forswear/SG
+forswore
+forsworn
+forsythia/MS
+Fortaleza/M
+forte/MS
+forthcome/JG
+forthcoming/U
+FORTH/M
+forthrightness/SM
+forthright/PYS
+forthwith
+fortieths
+fortification/MS
+fortified/U
+fortifier/SM
+fortify/ADSG
+fortiori
+fortissimo/S
+fortitude/SM
+fortnightly/S
+fortnight/MYS
+FORTRAN
+Fortran/M
+fortress/GMSD
+fort/SM
+fortuitousness/SM
+fortuitous/YP
+fortuity/MS
+fortunateness/M
+fortunate/YUS
+fortune/MGSD
+fortuneteller/SM
+fortunetelling/SM
+forty/SRMH
+forum/MS
+forwarder/M
+forwarding/M
+forwardness/MS
+forward/PTZSGDRY
+forwent
+fossiliferous
+fossilization/MS
+fossilized/U
+fossilize/GSD
+fossil/MS
+Foss/M
+fosterer/M
+Foster/M
+foster/SRDG
+Foucault/M
+fought
+foulard/SM
+foulmouth/D
+foulness/MS
+fouls/M
+foul/SYRDGTP
+foundational
+foundation/SM
+founded/UF
+founder/MDG
+founder's/F
+founding/F
+foundling/MS
+found/RDGZS
+foundry/MS
+founds/KF
+fountainhead/SM
+fountain/SMDG
+fount/MS
+fourfold
+Fourier/M
+fourpence/M
+fourpenny
+fourposter/SM
+fourscore/S
+four/SHM
+foursome/SM
+foursquare
+fourteener/M
+fourteen/SMRH
+fourteenths
+Fourth
+fourths
+Fourths
+fourth/Y
+fovea/M
+fowler/M
+Fowler/M
+fowling/M
+fowl/SGMRD
+foxfire/SM
+foxglove/SM
+Foxhall/M
+foxhole/SM
+foxhound/SM
+foxily
+foxiness/MS
+foxing/M
+fox/MDSG
+Fox/MS
+foxtail/M
+foxtrot/MS
+foxtrotted
+foxtrotting
+foxy/TRP
+foyer/SM
+FPO
+fps
+fr
+fracas/SM
+fractal/SM
+fractional/Y
+fractionate/DNG
+fractionation/M
+fractioned
+fractioning
+fraction/ISMA
+fractiousness/SM
+fractious/PY
+fracture/MGDS
+fragile/Y
+fragility/MS
+fragmentarily
+fragmentariness/M
+fragmentary/P
+fragmentation/MS
+fragment/SDMG
+Fragonard/M
+fragrance/SM
+fragrant/Y
+frailness/MS
+frail/STPYR
+frailty/MS
+framed/U
+framer/M
+frame/SRDJGMZ
+framework/SM
+framing/M
+Francaise/M
+France/MS
+Francene/M
+Francesca/M
+Francesco/M
+franchisee/S
+franchise/ESDG
+franchiser/SM
+franchise's
+Franchot/M
+Francie/M
+Francine/M
+Francis
+Francisca/M
+Franciscan/MS
+Francisco/M
+Franciska/M
+Franciskus/M
+francium/MS
+Francklin/M
+Francklyn/M
+Franck/M
+Francoise/M
+Francois/M
+Franco/M
+francophone/M
+franc/SM
+Francyne/M
+frangibility/SM
+frangible
+Frankel/M
+Frankenstein/MS
+franker/M
+Frankford/M
+Frankfort/M
+Frankfurter/M
+frankfurter/MS
+Frankfurt/RM
+Frankie/M
+frankincense/MS
+Frankish/M
+franklin/M
+Franklin/M
+Franklyn/M
+frankness/MS
+frank/SGTYRDP
+Frank/SM
+Franky/M
+Fran/MS
+Frannie/M
+Franni/M
+Franny/M
+Fransisco/M
+frantically
+franticness/M
+frantic/PY
+Frants/M
+Franzen/M
+Franz/NM
+frapp
+frappeed
+frappeing
+frappes
+Frasco/M
+Fraser/M
+Frasier/M
+Frasquito/M
+fraternal/Y
+fraternity/MSF
+fraternization/SM
+fraternize/GZRSD
+fraternizer/M
+fraternizing/U
+frat/MS
+fratricidal
+fratricide/MS
+fraud/CS
+fraud's
+fraudsters
+fraudulence/S
+fraudulent/YP
+fraught/SGD
+Fraulein/S
+Frau/MN
+fray/CSDG
+Frayda/M
+Frayne/M
+fray's
+Fraze/MR
+Frazer/M
+Frazier/M
+frazzle/GDS
+freakishness/SM
+freakish/YP
+freak/SGDM
+freaky/RT
+freckle/GMDS
+freckly/RT
+Freda/M
+Freddie/M
+Freddi/M
+Freddy/M
+Fredek/M
+Fredelia/M
+Frederica/M
+Frederich/M
+Fredericka/M
+Frederick/MS
+Frederic/M
+Frederico/M
+Fredericton/M
+Frederigo/M
+Frederik/M
+Frederique/M
+Fredholm/M
+Fredia/M
+Fredi/M
+Fred/M
+Fredra/M
+Fredrick/M
+Fredrickson/M
+Fredric/M
+Fredrika/M
+freebase/GDS
+freebie/MS
+freebooter/M
+freeboot/ZR
+freeborn
+freedman/M
+Freedman/M
+freedmen
+freedom/MS
+freehand/D
+freehanded/Y
+freeholder/M
+freehold/ZSRM
+freeing/S
+freelance/SRDGZM
+Freeland/M
+freeloader/M
+freeload/SRDGZ
+Free/M
+freeman/M
+Freeman/M
+freemasonry/M
+Freemasonry/MS
+Freemason/SM
+freemen
+Freemon/M
+freeness/M
+Freeport/M
+freestanding
+freestone/SM
+freestyle/SM
+freethinker/MS
+freethinking/S
+Freetown/M
+freeway/MS
+freewheeler/M
+freewheeling/P
+freewheel/SRDMGZ
+freewill
+free/YTDRSP
+freezable
+freezer/SM
+freeze/UGSA
+freezing/S
+Freida/M
+freighter/M
+freight/ZGMDRS
+Fremont/M
+Frenchman/M
+French/MDSG
+Frenchmen
+Frenchwoman/M
+Frenchwomen
+frenetically
+frenetic/S
+frenzied/Y
+frenzy/MDSG
+freon/S
+Freon/SM
+freq
+frequency/ISM
+frequented/U
+frequenter/MS
+frequentest
+frequenting
+frequent/IY
+frequentness/M
+frequents
+fresco/DMG
+frescoes
+fresh/AZSRNDG
+freshener/M
+freshen/SZGDR
+fresher/MA
+freshest
+freshet/SM
+freshly
+freshman/M
+freshmen
+freshness/MS
+freshwater/SM
+Fresnel/M
+Fresno/M
+fretboard
+fretfulness/MS
+fretful/PY
+fret/S
+fretsaw/S
+fretted
+fretting
+fretwork/MS
+Freudian/S
+Freud/M
+Freya/M
+Frey/M
+friableness/M
+friable/P
+friary/MS
+friar/YMS
+fricasseeing
+fricassee/MSD
+frication/M
+fricative/MS
+Frick/M
+frictional/Y
+frictionless/Y
+friction/MS
+Friday/SM
+fridge/SM
+fried/A
+Frieda/M
+Friedan/M
+friedcake/SM
+Friederike/M
+Friedman/M
+Friedrich/M
+Friedrick/M
+friendlessness/M
+friendless/P
+friendlies
+friendlily
+friendliness/USM
+friendly/PUTR
+friend/SGMYD
+friendship/MS
+frier's
+fries/M
+frieze/SDGM
+frigate/SM
+Frigga/M
+frigged
+frigging/S
+frighten/DG
+frightening/Y
+frightfulness/MS
+frightful/PY
+fright/GXMDNS
+Frigidaire/M
+frigidity/MS
+frigidness/SM
+frigid/YP
+frig/S
+frill/MDGS
+frilly/RST
+Fri/M
+fringe/IGSD
+fringe's
+frippery/SM
+Frisbee/MS
+Frisco/M
+Frisian/SM
+frisker/M
+friskily
+friskiness/SM
+frisk/RDGS
+frisky/RTP
+frisson/M
+Frito/M
+fritterer/M
+fritter/RDSG
+Fritz/M
+fritz/SM
+frivolity/MS
+frivolousness/SM
+frivolous/PY
+frizz/GYSD
+frizzle/DSG
+frizzly/RT
+frizzy/RT
+Fr/MD
+Frobisher/M
+frocking/M
+frock's
+frock/SUDGC
+frogged
+frogging
+frogman/M
+frogmarched
+frogmen
+frog/MS
+fro/HS
+Froissart/M
+frolicked
+frolicker/SM
+frolicking
+frolic/SM
+frolicsome
+from
+Fromm/M
+frond/SM
+frontage/MS
+frontal/SY
+Frontenac/M
+front/GSFRD
+frontier/SM
+frontiersman/M
+frontiersmen
+frontispiece/SM
+frontrunner's
+front's
+frontward/S
+frosh/M
+Frostbelt/M
+frostbite/MS
+frostbit/G
+frostbiting/M
+frostbitten
+frost/CDSG
+frosteds
+frosted/U
+frostily
+frostiness/SM
+frosting/MS
+Frost/M
+frost's
+frosty/PTR
+froth/GMD
+frothiness/SM
+froths
+frothy/TRP
+froufrou/MS
+frowardness/MS
+froward/P
+frowner/M
+frowning/Y
+frown/RDSG
+frowzily
+frowziness/SM
+frowzy/RPT
+frozenness/M
+frozen/YP
+froze/UA
+fructify/GSD
+fructose/MS
+Fruehauf/M
+frugality/SM
+frugal/Y
+fruitcake/SM
+fruiterer/M
+fruiter/RM
+fruitfuller
+fruitfullest
+fruitfulness/MS
+fruitful/UYP
+fruit/GMRDS
+fruitiness/MS
+fruition/SM
+fruitlessness/MS
+fruitless/YP
+fruity/RPT
+frumpish
+frump/MS
+frumpy/TR
+Frunze/M
+frustrater/M
+frustrate/RSDXNG
+frustrating/Y
+frustration/M
+frustum/SM
+Frye/M
+fryer/MS
+Fry/M
+fry/NGDS
+F's
+f's/KA
+FSLIC
+ft/C
+FTC
+FTP
+fuchsia/MS
+Fuchs/M
+fucker/M!
+fuck/GZJRDMS!
+FUD
+fuddle/GSD
+fudge/GMSD
+fuel/ASDG
+fueler/SM
+fuel's
+Fuentes/M
+fugal
+Fugger/M
+fugitiveness/M
+fugitive/SYMP
+fugue/GMSD
+fuhrer/S
+Fuji/M
+Fujitsu/M
+Fujiyama
+Fukuoka/M
+Fulani/M
+Fulbright/M
+fulcrum/SM
+fulfilled/U
+fulfiller/M
+fulfill/GLSRD
+fulfillment/MS
+fullback/SMG
+fuller/DMG
+Fuller/M
+Fullerton/M
+fullish
+fullness/MS
+full/RDPSGZT
+fullstops
+fullword/SM
+fully
+fulminate/XSDGN
+fulmination/M
+fulness's
+fulsomeness/SM
+fulsome/PY
+Fulton/M
+Fulvia/M
+fumble/GZRSD
+fumbler/M
+fumbling/Y
+fume/DSG
+fumigant/MS
+fumigate/NGSDX
+fumigation/M
+fumigator/SM
+fuming/Y
+fumy/TR
+Funafuti
+functionalism/M
+functionalist/SM
+functionality/S
+functional/YS
+functionary/MS
+function/GSMD
+functor/SM
+fundamentalism/SM
+fundamentalist/SM
+fundamental/SY
+fund/ASMRDZG
+funded/U
+fundholders
+fundholding
+funding/S
+Fundy/M
+funeral/MS
+funerary
+funereal/Y
+funfair/M
+fungal/S
+fungible/M
+fungicidal
+fungicide/SM
+fungi/M
+fungoid/S
+fungous
+fungus/M
+funicular/SM
+funk/GSDM
+funkiness/S
+funky/RTP
+fun/MS
+funned
+funnel/SGMD
+funner
+funnest
+funnily/U
+funniness/SM
+funning
+funny/RSPT
+furbelow/MDSG
+furbisher/M
+furbish/GDRSA
+furiousness/M
+furious/RYP
+furlong/MS
+furlough/DGM
+furloughs
+furl/UDGS
+furn
+furnace/GMSD
+furnished/U
+furnisher/MS
+furnish/GASD
+furnishing/SM
+furniture/SM
+furore/MS
+furor/MS
+fur/PMS
+furred
+furrier/M
+furriness/SM
+furring/SM
+furrow/DMGS
+furry/RTZP
+furtherance/MS
+furtherer/M
+furthermore
+furthermost
+further/TGDRS
+furthest
+furtiveness/SM
+furtive/PY
+fury/SM
+furze/SM
+fusebox/S
+fusee/SM
+fuse/FSDAGCI
+fuselage/SM
+fuse's/A
+Fushun/M
+fusibility/SM
+fusible/I
+fusiform
+fusilier/MS
+fusillade/SDMG
+fusion/KMFSI
+fussbudget/MS
+fusser/M
+fussily
+fussiness/MS
+fusspot/SM
+fuss/SRDMG
+fussy/PTR
+fustian/MS
+fustiness/MS
+fusty/RPT
+fut
+futileness/M
+futile/PY
+futility/MS
+futon/S
+future/SM
+futurism/SM
+futuristic/S
+futurist/S
+futurity/MS
+futurologist/S
+futurology/MS
+futz/GSD
+fuze's
+Fuzhou/M
+Fuzzbuster/M
+fuzzily
+fuzziness/SM
+fuzz/SDMG
+fuzzy/PRT
+fwd
+FWD
+fwy
+FY
+FYI
+GA
+gabardine/SM
+gabbed
+Gabbey/M
+Gabbie/M
+Gabbi/M
+gabbiness/S
+gabbing
+gabble/SDG
+Gabby/M
+gabby/TRP
+Gabe/M
+gaberdine's
+Gabey/M
+gabfest/MS
+Gabie/M
+Gabi/M
+gable/GMSRD
+Gable/M
+Gabonese
+Gabon/M
+Gaborone/M
+Gabriela/M
+Gabriele/M
+Gabriella/M
+Gabrielle/M
+Gabriellia/M
+Gabriell/M
+Gabriello/M
+Gabriel/M
+Gabrila/M
+gab/S
+Gaby/M
+Gacrux/M
+gadabout/MS
+gadded
+gadder/MS
+gadding
+gadfly/MS
+gadgetry/MS
+gadget/SM
+gadolinium/MS
+gad/S
+Gadsden/M
+Gaea/M
+Gaelan/M
+Gaelic/M
+Gael/SM
+Gae/M
+gaffe/MS
+gaffer/M
+gaff/SGZRDM
+gaga
+Gagarin/M
+gag/DRSG
+Gage/M
+gager/M
+gage/SM
+gagged
+gagging
+gaggle/SDG
+gagwriter/S
+gaiety/MS
+Gaile/M
+Gail/M
+gaily
+gain/ADGS
+gainer/SM
+Gaines/M
+Gainesville/M
+gainfulness/M
+gainful/YP
+gaining/S
+gainly/U
+gainsaid
+gainsayer/M
+gainsay/RSZG
+Gainsborough/M
+gaiter/M
+gait/GSZMRD
+Gaithersburg/M
+galactic
+Galahad/MS
+Galapagos/M
+gal/AS
+gala/SM
+Galatea/M
+Galatia/M
+Galatians/M
+Galaxy/M
+galaxy/MS
+Galbraith/M
+Galbreath/M
+gale/AS
+Gale/M
+galen
+galena/MS
+galenite/M
+Galen/M
+gale's
+Galibi/M
+Galilean/MS
+Galilee/M
+Galileo/M
+Galina/M
+Gallagher/M
+gallanted
+gallanting
+gallantry/MS
+gallants
+gallant/UY
+Gallard/M
+gallbladder/MS
+Gallegos/M
+galleon/SM
+galleria/S
+gallery/MSDG
+galley/MS
+Gallic
+Gallicism/SM
+gallimaufry/MS
+galling/Y
+gallium/SM
+gallivant/GDS
+Gall/M
+gallonage/M
+gallon/SM
+galloper/M
+gallop/GSRDZ
+Galloway/M
+gallows/M
+gall/SGMD
+gallstone/MS
+Gallup/M
+Gal/MN
+Galois/M
+galoot/MS
+galore/S
+galosh/GMSD
+gal's
+Galsworthy/M
+galumph/GD
+galumphs
+galvanic
+Galvani/M
+galvanism/MS
+galvanization/SM
+galvanize/SDG
+Galvan/M
+galvanometer/SM
+galvanometric
+Galven/M
+Galveston/M
+Galvin/M
+Ga/M
+Gamaliel/M
+Gama/M
+Gambia/M
+Gambian/S
+gambit/MS
+gamble/GZRSD
+Gamble/M
+gambler/M
+gambol/SGD
+gamecock/SM
+gamekeeper/MS
+gameness/MS
+game/PJDRSMYTZG
+gamesmanship/SM
+gamesmen
+gamester/M
+gamest/RZ
+gamete/MS
+gametic
+gamine/SM
+gaminess/MS
+gaming/M
+gamin/MS
+gamma/MS
+gammon/DMSG
+Gamow/M
+gamut/MS
+gamy/TRP
+gander/DMGS
+Gandhian
+Gandhi/M
+gangbusters
+ganger/M
+Ganges/M
+gang/GRDMS
+gangland/SM
+ganglia/M
+gangling
+ganglionic
+ganglion/M
+gangplank/SM
+gangrene/SDMG
+gangrenous
+gangster/SM
+Gangtok/M
+gangway/MS
+Gan/M
+gannet/SM
+Gannie/M
+Gannon/M
+Ganny/M
+gantlet/GMDS
+Gantry/M
+gantry/MS
+Ganymede/M
+GAO
+gaoler/M
+gaol/MRDGZS
+gaper/M
+gape/S
+gaping/Y
+gapped
+gapping
+gap/SJMDRG
+garage/GMSD
+Garald/M
+garbageman/M
+garbage/SDMG
+garbanzo/MS
+garb/DMGS
+garbler/M
+garble/RSDG
+Garbo/M
+Garcia/M
+garon/SM
+gardener/M
+Gardener/M
+gardenia/SM
+gardening/M
+garden/ZGRDMS
+Gardie/M
+Gardiner/M
+Gard/M
+Gardner/M
+Gardy/M
+Garek/M
+Gare/MH
+Gareth/M
+Garey/M
+Garfield/M
+garfish/MS
+Garfunkel/M
+Gargantua/M
+gargantuan
+gargle/SDG
+gargoyle/DSM
+Garibaldi/M
+Garik/M
+garishness/MS
+garish/YP
+Garland/M
+garland/SMDG
+garlicked
+garlicking
+garlicky
+garlic/SM
+garment/MDGS
+Gar/MH
+Garner/M
+garner/SGD
+Garnet/M
+garnet/SM
+Garnette/M
+Garnett/M
+garnish/DSLG
+garnisheeing
+garnishee/SDM
+garnishment/MS
+Garold/M
+garote's
+garotte's
+Garrard/M
+garred
+Garrek/M
+Garreth/M
+Garret/M
+garret/SM
+Garrett/M
+Garrick/M
+Garrik/M
+garring
+Garrison/M
+garrison/SGMD
+garroter/M
+garrote/SRDMZG
+Garrot/M
+garrotte's
+Garrott/M
+garrulity/SM
+garrulousness/MS
+garrulous/PY
+Garry/M
+gar/SLM
+garter/SGDM
+Garth/M
+Garvey/M
+Garvin/M
+Garv/M
+Garvy/M
+Garwin/M
+Garwood/M
+Gary/M
+Garza/M
+gasbag/MS
+Gascony/M
+gaseousness/M
+gaseous/YP
+gases/C
+gas/FC
+gash/GTMSRD
+gasification/M
+gasifier/M
+gasify/SRDGXZN
+gasket/SM
+gaslight/DMS
+gasohol/S
+gasoline/MS
+gasometer/M
+Gaspard/M
+Gaspar/M
+Gasparo/M
+gasper/M
+Gasper/M
+gasp/GZSRD
+gasping/Y
+gas's
+gassed/C
+Gasser/M
+gasser/MS
+Gasset/M
+gassiness/M
+gassing/SM
+gassy/PTR
+Gaston/M
+gastric
+gastritides
+gastritis/MS
+gastroenteritides
+gastroenteritis/M
+gastrointestinal
+gastronome/SM
+gastronomic
+gastronomical/Y
+gastronomy/MS
+gastropod/SM
+gasworks/M
+gateau/MS
+gateaux
+gatecrash/GZSRD
+gatehouse/MS
+gatekeeper/SM
+gate/MGDS
+gatepost/SM
+Gates
+gateway/MS
+gathered/IA
+gatherer/M
+gathering/M
+gather/JRDZGS
+gathers/A
+Gatlinburg/M
+Gatling/M
+Gatorade/M
+gator/MS
+Gatsby/M
+Gatun/M
+gaucheness/SM
+gaucherie/SM
+gauche/TYPR
+gaucho/SM
+gaudily
+gaudiness/MS
+gaudy/PRST
+gaugeable
+gauger/M
+Gauguin/M
+Gaulish/M
+Gaulle/M
+Gaul/MS
+Gaultiero/M
+gauntlet/GSDM
+Gauntley/M
+gauntness/MS
+gaunt/PYRDSGT
+gauss/C
+gausses
+Gaussian
+Gauss/M
+gauss's
+Gautama/M
+Gauthier/M
+Gautier/M
+gauze/SDGM
+gauziness/MS
+gauzy/TRP
+Gavan/M
+gave
+gavel/GMDS
+Gaven/M
+Gavin/M
+Gav/MN
+gavotte/MSDG
+Gavra/M
+Gavrielle/M
+Gawain/M
+Gawen/M
+gawkily
+gawkiness/MS
+gawk/SGRDM
+gawky/RSPT
+Gayel/M
+Gayelord/M
+Gaye/M
+gayety's
+Gayla/M
+Gayleen/M
+Gaylene/M
+Gayler/M
+Gayle/RM
+Gaylord/M
+Gaylor/M
+Gay/M
+gayness/SM
+Gaynor/M
+gay/RTPS
+Gaza/M
+gazebo/SM
+gaze/DRSZG
+gazelle/MS
+gazer/M
+gazetteer/SGDM
+gazette/MGSD
+Gaziantep/M
+gazillion/S
+gazpacho/MS
+GB
+G/B
+Gdansk/M
+Gd/M
+GDP
+Gearalt/M
+Gearard/M
+gearbox/SM
+gear/DMJSG
+gearing/M
+gearshift/MS
+gearstick
+gearwheel/SM
+Geary/M
+gecko/MS
+GED
+geegaw's
+geeing
+geek/SM
+geeky/RT
+geese/M
+geest/M
+gee/TDS
+geezer/MS
+Gehenna/M
+Gehrig/M
+Geiger/M
+Geigy/M
+geisha/M
+gelatinousness/M
+gelatinous/PY
+gelatin/SM
+gelcap
+gelding/M
+geld/JSGD
+gelid
+gelignite/MS
+gelled
+gelling
+gel/MBS
+Gelya/M
+Ge/M
+GE/M
+Gemini/SM
+gemlike
+Gemma/M
+gemmed
+gemming
+gem/MS
+gemological
+gemologist/MS
+gemology/MS
+gemstone/SM
+gen
+Gena/M
+Genaro/M
+gendarme/MS
+gender/DMGS
+genderless
+genealogical/Y
+genealogist/SM
+genealogy/MS
+Gene/M
+gene/MS
+generalissimo/SM
+generalist/MS
+generality/MS
+generalizable/SM
+generalization/MS
+generalized/U
+generalize/GZBSRD
+generalizer/M
+general/MSPY
+generalness/M
+generalship/SM
+genera/M
+generate/CXAVNGSD
+generational
+generation/MCA
+generative/AY
+generators/A
+generator/SM
+generically
+generic/PS
+generosity/MS
+generously/U
+generousness/SM
+generous/PY
+Genesco/M
+genesis/M
+Genesis/M
+genes/S
+genetically
+geneticist/MS
+genetic/S
+genetics/M
+Genet/M
+Geneva/M
+Genevieve/M
+Genevra/M
+Genghis/M
+geniality/FMS
+genially/F
+genialness/M
+genial/PY
+Genia/M
+genies/K
+genie/SM
+genii/M
+genitalia
+genitals
+genital/YF
+genitive/SM
+genitourinary
+genius/SM
+Gen/M
+Genna/M
+Gennie/M
+Gennifer/M
+Genni/M
+Genny/M
+Genoa/SM
+genocidal
+genocide/SM
+Geno/M
+genome/SM
+genotype/MS
+Genovera/M
+genre/MS
+gent/AMS
+genteelness/MS
+genteel/PRYT
+gentian/SM
+gentile/S
+Gentile's
+gentility/MS
+gentlefolk/S
+gentlemanliness/M
+gentlemanly/U
+gentleman/YM
+gentlemen
+gentleness/SM
+gentle/PRSDGT
+gentlewoman/M
+gentlewomen/M
+gently
+gentrification/M
+gentrify/NSDGX
+Gentry/M
+gentry/MS
+genuflect/GDS
+genuflection/MS
+genuineness/SM
+genuine/PY
+genus
+Genvieve/M
+geocentric
+geocentrically
+geocentricism
+geochemical/Y
+geochemistry/MS
+geochronology/M
+geodesic/S
+geode/SM
+geodesy/MS
+geodetic/S
+Geoff/M
+Geoffrey/M
+Geoffry/M
+geog
+geographer/MS
+geographic
+geographical/Y
+geography/MS
+geologic
+geological/Y
+geologist/MS
+geology/MS
+geom
+Geo/M
+geomagnetic
+geomagnetically
+geomagnetism/SM
+geometer/MS
+geometrical/Y
+geometrician/M
+geometric/S
+geometry/MS
+geomorphological
+geomorphology/M
+geophysical/Y
+geophysicist/MS
+geophysics/M
+geopolitical/Y
+geopolitic/S
+geopolitics/M
+Georas/M
+Geordie/M
+Georgeanna/M
+Georgeanne/M
+Georgena/M
+George/SM
+Georgeta/M
+Georgetown/M
+Georgetta/M
+Georgette/M
+Georgia/M
+Georgiana/M
+Georgianna/M
+Georgianne/M
+Georgian/S
+Georgie/M
+Georgi/M
+Georgina/M
+Georgine/M
+Georg/M
+Georgy/M
+geostationary
+geosynchronous
+geosyncline/SM
+geothermal
+geothermic
+Geralda/M
+Geraldine/M
+Gerald/M
+geranium/SM
+Gerard/M
+Gerardo/M
+Gerber/M
+gerbil/MS
+Gerda/M
+Gerek/M
+Gerhardine/M
+Gerhard/M
+Gerhardt/M
+Gerianna/M
+Gerianne/M
+geriatric/S
+geriatrics/M
+Gerick/M
+Gerik/M
+Geri/M
+Geritol/M
+Gerladina/M
+Ger/M
+Germaine/M
+Germain/M
+Germana/M
+germane
+Germania/M
+Germanic/M
+germanium/SM
+germanized
+German/SM
+Germantown/M
+Germany/M
+Germayne/M
+germen/M
+germicidal
+germicide/MS
+germinal/Y
+germinated/U
+germinate/XVGNSD
+germination/M
+germinative/Y
+germ/MNS
+Gerome/M
+Geronimo/M
+gerontocracy/M
+gerontological
+gerontologist/SM
+gerontology/SM
+Gerrard/M
+Gerrie/M
+Gerrilee/M
+Gerri/M
+Gerry/M
+gerrymander/SGD
+Gershwin/MS
+Gerta/M
+Gertie/M
+Gerti/M
+Gert/M
+Gertruda/M
+Gertrude/M
+Gertrudis/M
+Gertrud/M
+Gerty/M
+gerundive/M
+gerund/SVM
+Gery/M
+gestalt/M
+gestapo/S
+Gestapo/SM
+gestate/SDGNX
+gestational
+gestation/M
+gesticulate/XSDVGN
+gesticulation/M
+gesticulative/Y
+gestural
+gesture/SDMG
+gesundheit
+getaway/SM
+Gethsemane/M
+get/S
+getter/SDM
+getting
+Getty/M
+Gettysburg/M
+getup/MS
+gewgaw/MS
+Gewrztraminer
+geyser/GDMS
+Ghanaian/MS
+Ghana/M
+Ghanian's
+ghastliness/MS
+ghastly/TPR
+ghat/MS
+Ghats/M
+Ghent/M
+Gherardo/M
+gherkin/SM
+ghetto/DGMS
+ghettoize/SDG
+Ghibelline/M
+ghostlike
+ghostliness/MS
+ghostly/TRP
+ghost/SMYDG
+ghostwrite/RSGZ
+ghostwritten
+ghostwrote
+ghoulishness/SM
+ghoulish/PY
+ghoul/SM
+GHQ
+GI
+Giacinta/M
+Giacobo/M
+Giacometti/M
+Giacomo/M
+Giacopo/M
+Giana/M
+Gianina/M
+Gian/M
+Gianna/M
+Gianni/M
+Giannini/M
+giantess/MS
+giantkiller
+giant/SM
+Giauque/M
+Giavani/M
+gibber/DGS
+gibberish/MS
+gibbet/MDSG
+Gibbie/M
+Gibb/MS
+Gibbon/M
+gibbon/MS
+gibbousness/M
+gibbous/YP
+Gibby/M
+gibe/GDRS
+giber/M
+giblet/MS
+Gib/M
+Gibraltar/MS
+Gibson/M
+giddap
+giddily
+giddiness/SM
+Giddings/M
+giddy/GPRSDT
+Gide/M
+Gideon/MS
+Gielgud/M
+Gienah/M
+Giffard/M
+Giffer/M
+Giffie/M
+Gifford/M
+Giff/RM
+Giffy/M
+giftedness/M
+gifted/PY
+gift/SGMD
+gigabyte/S
+gigacycle/MS
+gigahertz/M
+gigantically
+giganticness/M
+gigantic/P
+gigavolt
+gigawatt/M
+gigged
+gigging
+giggler/M
+giggle/RSDGZ
+giggling/Y
+giggly/TR
+Gigi/M
+gig/MS
+GIGO
+gigolo/MS
+gila
+Gila/M
+Gilberta/M
+Gilberte/M
+Gilbertina/M
+Gilbertine/M
+gilbert/M
+Gilbert/M
+Gilberto/M
+Gilbertson/M
+Gilburt/M
+Gilchrist/M
+Gilda/M
+gilder/M
+gilding/M
+gild/JSGZRD
+Gilead/M
+Gilemette/M
+Giles
+Gilgamesh/M
+Gilkson/M
+Gillan/M
+Gilles
+Gillespie/M
+Gillette/M
+Gilliam/M
+Gillian/M
+Gillie/M
+Gilligan/M
+Gilli/M
+Gill/M
+gill/SGMRD
+Gilly/M
+Gilmore/M
+Gil/MY
+gilt/S
+gimbaled
+gimbals
+Gimbel/M
+gimcrackery/SM
+gimcrack/S
+gimlet/MDSG
+gimme/S
+gimmick/GDMS
+gimmickry/MS
+gimmicky
+gimp/GSMD
+gimpy/RT
+Gina/M
+Ginelle/M
+Ginevra/M
+gingerbread/SM
+gingerliness/M
+gingerly/P
+Ginger/M
+ginger/SGDYM
+gingersnap/SM
+gingery
+gingham/SM
+gingivitis/SM
+Gingrich/M
+ginkgoes
+ginkgo/M
+ginmill
+gin/MS
+ginned
+Ginnie/M
+Ginnifer/M
+Ginni/M
+ginning
+Ginny/M
+Gino/M
+Ginsberg/M
+Ginsburg/M
+ginseng/SM
+Gioconda/M
+Giordano/M
+Giorgia/M
+Giorgi/M
+Giorgio/M
+Giorgione/M
+Giotto/M
+Giovanna/M
+Giovanni/M
+Gipsy's
+giraffe/MS
+Giralda/M
+Giraldo/M
+Giraud/M
+Giraudoux/M
+girded/U
+girder/M
+girdle/GMRSD
+girdler/M
+gird/RDSGZ
+girlfriend/MS
+girlhood/SM
+girlie/M
+girlishness/SM
+girlish/YP
+girl/MS
+giro/M
+girt/GDS
+girth/MDG
+girths
+Gisela/M
+Giselbert/M
+Gisele/M
+Gisella/M
+Giselle/M
+Gish/M
+gist/MS
+git/M
+Giuditta/M
+Giulia/M
+Giuliano/M
+Giulietta/M
+Giulio/M
+Giuseppe/M
+Giustina/M
+Giustino/M
+Giusto/M
+giveaway/SM
+giveback/S
+give/HZGRS
+given/SP
+giver/M
+giving/Y
+Giza/M
+Gizela/M
+gizmo's
+gizzard/SM
+Gk/M
+glac/DGS
+glacial/Y
+glaciate/XNGDS
+glaciation/M
+glacier/SM
+glaciological
+glaciologist/M
+glaciology/M
+gladded
+gladden/GDS
+gladder
+gladdest
+gladding
+gladdy
+glade/SM
+gladiatorial
+gladiator/SM
+Gladi/M
+gladiola/MS
+gladioli
+gladiolus/M
+gladly/RT
+Glad/M
+gladness/MS
+gladsome/RT
+Gladstone/MS
+Gladys
+glad/YSP
+glamor/DMGS
+glamorization/MS
+glamorizer/M
+glamorize/SRDZG
+glamorousness/M
+glamorous/PY
+glance/GJSD
+glancing/Y
+glanders/M
+glandes
+glandular/Y
+gland/ZSM
+glans/M
+glare/SDG
+glaringness/M
+glaring/YP
+Glaser/M
+Glasgow/M
+glasnost/S
+glassblower/S
+glassblowing/MS
+glassful/MS
+glass/GSDM
+glasshouse/SM
+glassily
+glassiness/SM
+glassless
+Glass/M
+glassware/SM
+glasswort/M
+glassy/PRST
+Glastonbury/M
+Glaswegian/S
+glaucoma/SM
+glaucous
+glazed/U
+glazer/M
+glaze/SRDGZJ
+glazier/SM
+glazing/M
+gleam/MDGS
+gleaner/M
+gleaning/M
+glean/RDGZJS
+Gleason/M
+Gleda/M
+gleed/M
+glee/DSM
+gleefulness/MS
+gleeful/YP
+gleeing
+Glendale/M
+Glenda/M
+Glenden/M
+Glendon/M
+Glenine/M
+Glen/M
+Glenna/M
+Glennie/M
+Glennis/M
+Glenn/M
+glen/SM
+glibber
+glibbest
+glibness/MS
+glib/YP
+glide/JGZSRD
+glider/M
+glim/M
+glimmer/DSJG
+glimmering/M
+glimpse/DRSZMG
+glimpser/M
+glint/DSG
+glissandi
+glissando/M
+glisten/DSG
+glister/DGS
+glitch/MS
+glitter/GDSJ
+glittering/Y
+glittery
+glitz/GSD
+glitzy/TR
+gloaming/MS
+gloater/M
+gloating/Y
+gloat/SRDG
+globalism/S
+globalist/S
+global/SY
+globe/SM
+globetrotter/MS
+glob/GDMS
+globularity/M
+globularness/M
+globular/PY
+globule/MS
+globulin/MS
+glockenspiel/SM
+glommed
+gloom/GSMD
+gloomily
+gloominess/MS
+gloomy/RTP
+glop/MS
+glopped
+glopping
+gloppy/TR
+Gloria/M
+Gloriana/M
+Gloriane/M
+glorification/M
+glorifier/M
+glorify/XZRSDNG
+Glori/M
+glorious/IYP
+gloriousness/IM
+Glory/M
+glory/SDMG
+glossary/MS
+gloss/GSDM
+glossily
+glossiness/SM
+glossolalia/SM
+glossy/RSPT
+glottal
+glottalization/M
+glottis/MS
+Gloucester/M
+gloveless
+glover/M
+Glover/M
+glove/SRDGMZ
+glower/GD
+glow/GZRDMS
+glowing/Y
+glowworm/SM
+glucose/SM
+glue/DRSMZG
+glued/U
+gluer/M
+gluey
+gluier
+gluiest
+glummer
+glummest
+glumness/MS
+glum/SYP
+gluon/M
+glutamate/M
+gluten/M
+glutenous
+glutinousness/M
+glutinous/PY
+glut/SMNX
+glutted
+glutting
+glutton/MS
+gluttonous/Y
+gluttony/SM
+glyceride/M
+glycerinate/MD
+glycerine's
+glycerin/SM
+glycerolized/C
+glycerol/SM
+glycine/M
+glycogen/SM
+glycol/MS
+Glynda/M
+Glynis/M
+Glyn/M
+Glynnis/M
+Glynn/M
+glyph/M
+glyphs
+gm
+GM
+GMT
+gnarl/SMDG
+gnash/SDG
+gnat/MS
+gnawer/M
+gnaw/GRDSJ
+gnawing/M
+gneiss/SM
+Gnni/M
+gnomelike
+GNOME/M
+gnome/SM
+gnomic
+gnomish
+gnomonic
+gnosticism
+Gnosticism/M
+gnostic/K
+Gnostic/M
+GNP
+gnu/MS
+goad/MDSG
+goalie/SM
+goalkeeper/MS
+goalkeeping/M
+goalless
+goal/MDSG
+goalmouth/M
+goalpost/S
+goalscorer
+goalscoring
+goaltender/SM
+Goa/M
+goatee/SM
+goatherd/MS
+goat/MS
+goatskin/SM
+gobbed
+gobbet/MS
+gobbing
+gobbledegook's
+gobbledygook/S
+gobbler/M
+gobble/SRDGZ
+Gobi/M
+goblet/MS
+goblin/SM
+gob/SM
+Godard/M
+Godart/M
+godchild/M
+godchildren
+goddammit
+goddamn/GS
+Goddard/M
+Goddart/M
+goddaughter/SM
+godded
+goddess/MS
+godding
+Gdel/M
+godfather/GSDM
+godforsaken
+Godfree/M
+Godfrey/M
+Godfry/M
+godhead/S
+godhood/SM
+Godiva/M
+godlessness/MS
+godless/P
+godlikeness/M
+godlike/P
+godliness/UMS
+godly/UTPR
+God/M
+godmother/MS
+Godot/M
+godparent/SM
+godsend/MS
+god/SMY
+godson/MS
+Godspeed/S
+Godthaab/M
+Godunov/M
+Godwin/M
+Godzilla/M
+Goebbels/M
+Goering/M
+goer/MG
+goes
+Goethals/M
+Goethe/M
+gofer/SM
+Goff/M
+goggler/M
+goggle/SRDGZ
+Gogh/M
+Gog/M
+Gogol/M
+Goiania/M
+going/M
+goiter/SM
+Golan/M
+Golconda/M
+Golda/M
+Goldarina/M
+Goldberg/M
+goldbricker/M
+goldbrick/GZRDMS
+Golden/M
+goldenness/M
+goldenrod/SM
+goldenseal/M
+golden/TRYP
+goldfinch/MS
+goldfish/SM
+Goldia/M
+Goldie/M
+Goldilocks/M
+Goldi/M
+Goldina/M
+Golding/M
+Goldman/M
+goldmine/S
+gold/MRNGTS
+goldsmith/M
+Goldsmith/M
+goldsmiths
+Goldstein/M
+Goldwater/M
+Goldwyn/M
+Goldy/M
+Goleta/M
+golfer/M
+golf/RDMGZS
+Golgotha/M
+Goliath/M
+Goliaths
+golly/S
+Gomez/M
+Gomorrah/M
+Gompers/M
+go/MRHZGJ
+gonadal
+gonad/SM
+gondola/SM
+gondolier/MS
+Gondwanaland/M
+goner/M
+gone/RZN
+gong/SGDM
+gonion/M
+gonna
+gonorrheal
+gonorrhea/MS
+Gonzales/M
+Gonzalez/M
+Gonzalo/M
+Goober/M
+goober/MS
+goodbye/MS
+goodhearted
+goodie's
+goodish
+goodly/TR
+Good/M
+Goodman/M
+goodness/MS
+goodnight
+Goodrich/M
+good/SYP
+goodwill/MS
+Goodwin/M
+Goodyear/M
+goody/SM
+gooey
+goofiness/MS
+goof/SDMG
+goofy/RPT
+Google/M
+gooier
+gooiest
+gook/SM
+goo/MS
+goon/SM
+goop/SM
+gooseberry/MS
+goosebumps
+goose/M
+goos/SDG
+GOP
+Gopher
+gopher/SM
+Goran/M
+Goraud/M
+Gorbachev
+Gordan/M
+Gorden/M
+Gordian/M
+Gordie/M
+Gordimer/M
+Gordon/M
+Gordy/M
+gore/DSMG
+Gore/M
+Goren/M
+Gorey/M
+Gorgas
+gorged/E
+gorge/GMSRD
+gorgeousness/SM
+gorgeous/YP
+gorger/EM
+gorges/E
+gorging/E
+Gorgon/M
+gorgon/S
+Gorgonzola/M
+Gorham/M
+gorilla/MS
+gorily
+goriness/MS
+goring/M
+Gorky/M
+gormandizer/M
+gormandize/SRDGZ
+gormless
+gorp/S
+gorse/SM
+gory/PRT
+gos
+goshawk/MS
+gosh/S
+gosling/M
+gospeler/M
+gospel/MRSZ
+Gospel/SM
+gossamer/SM
+gossipy
+gossip/ZGMRDS
+gotcha/SM
+Gteborg/M
+Gotham/M
+Gothart/M
+Gothicism/M
+Gothic/S
+Goth/M
+Goths
+got/IU
+goto
+GOTO/MS
+gotta
+gotten/U
+Gottfried/M
+Goucher/M
+Gouda/SM
+gouge/GZSRD
+gouger/M
+goulash/SM
+Gould/M
+Gounod/M
+gourde/SM
+gourd/MS
+gourmand/MS
+gourmet/MS
+gout/SM
+gouty/RT
+governable/U
+governance/SM
+governed/U
+governess/SM
+govern/LBGSD
+governmental/Y
+government/MS
+Governor
+governor/MS
+governorship/SM
+gov/S
+govt
+gown/GSDM
+Goya/M
+GP
+GPA
+GPO
+GPSS
+gr
+grabbed
+grabber/SM
+grabbing/S
+grab/S
+Gracchus/M
+grace/ESDMG
+graceful/EYPU
+gracefuller
+gracefullest
+gracefulness/ESM
+Graceland/M
+gracelessness/MS
+graceless/PY
+Grace/M
+Gracia/M
+Graciela/M
+Gracie/M
+graciousness/SM
+gracious/UY
+grackle/SM
+gradate/DSNGX
+gradation/MCS
+grade/ACSDG
+graded/U
+Gradeigh/M
+gradely
+grader/MC
+grade's
+Gradey/M
+gradient/RMS
+grad/MRDGZJS
+gradualism/MS
+gradualist/MS
+gradualness/MS
+gradual/SYP
+graduand/SM
+graduate/MNGDSX
+graduation/M
+Grady/M
+Graehme/M
+Graeme/M
+Graffias/M
+graffiti
+graffito/M
+Graff/M
+grafter/M
+grafting/M
+graft/MRDSGZ
+Grafton/M
+Grahame/M
+Graham/M
+graham/SM
+Graig/M
+grail/S
+Grail/SM
+grainer/M
+grain/IGSD
+graininess/MS
+graining/M
+grain's
+grainy/RTP
+gram/KSM
+Gram/M
+grammarian/SM
+grammar/MS
+grammaticality/M
+grammaticalness/M
+grammatical/UY
+grammatic/K
+gramme/SM
+Grammy/S
+gramophone/SM
+Grampians
+grampus/SM
+Granada/M
+granary/MS
+grandam/SM
+grandaunt/MS
+grandchild/M
+grandchildren
+granddaddy/MS
+granddad/SM
+granddaughter/MS
+grandee/SM
+grandeur/MS
+grandfather/MYDSG
+grandiloquence/SM
+grandiloquent/Y
+grandiose/YP
+grandiosity/MS
+grandkid/SM
+grandma/MS
+grandmaster/MS
+grandmother/MYS
+grandnephew/MS
+grandness/MS
+grandniece/SM
+grandpa/MS
+grandparent/MS
+grandson/MS
+grandstander/M
+grandstand/SRDMG
+grand/TPSYR
+granduncle/MS
+Grange/MR
+grange/MSR
+Granger/M
+granite/MS
+granitic
+Gran/M
+Grannie/M
+Granny/M
+granny/MS
+granola/S
+grantee/MS
+granter/M
+Grantham/M
+Granthem/M
+Grantley/M
+Grant/M
+grantor's
+grant/SGZMRD
+grantsmanship/S
+granularity/SM
+granular/Y
+granulate/SDXVGN
+granulation/M
+granule/SM
+granulocytic
+Granville/M
+grapefruit/SM
+grape/SDGM
+grapeshot/M
+grapevine/MS
+grapheme/M
+graph/GMD
+graphical/Y
+graphicness/M
+graphic/PS
+graphics/M
+graphite/SM
+graphologist/SM
+graphology/MS
+graphs
+grapnel/SM
+grapple/DRSG
+grappler/M
+grappling/M
+grasper/M
+graspingness/M
+grasping/PY
+grasp/SRDBG
+grass/GZSDM
+grasshopper/SM
+grassland/MS
+Grass/M
+grassroots
+grassy/RT
+Grata/M
+gratefuller
+gratefullest
+gratefulness/USM
+grateful/YPU
+grater/M
+grates/I
+grate/SRDJGZ
+Gratia/M
+Gratiana/M
+graticule/M
+gratification/M
+gratified/U
+gratifying/Y
+gratify/NDSXG
+grating/YM
+gratis
+gratitude/IMS
+gratuitousness/MS
+gratuitous/PY
+gratuity/SM
+gravamen/SM
+gravedigger/SM
+gravel/SGMYD
+graven
+graveness/MS
+graver/M
+graveside/S
+Graves/M
+grave/SRDPGMZTY
+gravestone/SM
+graveyard/MS
+gravidness/M
+gravid/PY
+gravimeter/SM
+gravimetric
+gravitas
+gravitate/XVGNSD
+gravitational/Y
+gravitation/M
+graviton/SM
+gravity/MS
+gravy/SM
+graybeard/MS
+Grayce/M
+grayish
+Gray/M
+grayness/S
+gray/PYRDGTS
+Grayson/M
+graze/GZSRD
+grazer/M
+Grazia/M
+grazing/M
+grease/GMZSRD
+greasepaint/MS
+greaseproof
+greaser/M
+greasily
+greasiness/SM
+greasy/PRT
+greatcoat/DMS
+greaten/DG
+greathearted
+greatness/MS
+great/SPTYRN
+grebe/MS
+Grecian/S
+Greece/M
+greed/C
+greedily
+greediness/SM
+greeds
+greed's
+greedy/RTP
+Greek/SM
+Greeley/M
+greenback/MS
+greenbelt/S
+Greenberg/M
+Greenblatt/M
+Greenbriar/M
+Greene/M
+greenery/MS
+Greenfeld/M
+greenfield
+Greenfield/M
+greenfly/M
+greengage/SM
+greengrocer/SM
+greengrocery/M
+greenhorn/SM
+greenhouse/SM
+greening/M
+greenish/P
+Greenland/M
+Green/M
+greenmail/GDS
+greenness/MS
+Greenpeace/M
+greenroom/SM
+Greensboro/M
+Greensleeves/M
+Greensville/M
+greensward/SM
+green/SYRDMPGT
+Greentree/M
+Greenville/M
+Greenwich/M
+greenwood/MS
+Greer/M
+greeter/M
+greeting/M
+greets/A
+greet/SRDJGZ
+gregariousness/MS
+gregarious/PY
+Gregg/M
+Greggory/M
+Greg/M
+Gregoire/M
+Gregoor/M
+Gregorian
+Gregorio/M
+Gregorius/M
+Gregor/M
+Gregory/M
+gremlin/SM
+Grenada/M
+grenade/MS
+Grenadian/S
+grenadier/SM
+Grenadines
+grenadine/SM
+Grendel/M
+Grenier/M
+Grenoble/M
+Grenville/M
+Gresham/M
+Gretal/M
+Greta/M
+Gretchen/M
+Gretel/M
+Grete/M
+Grethel/M
+Gretna/M
+Gretta/M
+Gretzky/M
+grew/A
+greybeard/M
+greyhound/MS
+Grey/M
+greyness/M
+gridded
+griddlecake/SM
+griddle/DSGM
+gridiron/GSMD
+gridlock/DSG
+grids/A
+grid/SGM
+grief/MS
+Grieg/M
+Grier/M
+grievance/SM
+griever/M
+grieve/SRDGZ
+grieving/Y
+grievousness/SM
+grievous/PY
+Griffie/M
+Griffin/M
+griffin/SM
+Griffith/M
+Griff/M
+griffon's
+Griffy/M
+griller/M
+grille/SM
+grill/RDGS
+grillwork/M
+grimace/DRSGM
+grimacer/M
+Grimaldi/M
+grime/MS
+Grimes
+griminess/MS
+grimmer
+grimmest
+Grimm/M
+grimness/MS
+grim/PGYD
+grimy/TPR
+Grinch/M
+grind/ASG
+grinder/MS
+grinding/SY
+grindstone/SM
+gringo/SM
+grinned
+grinner/M
+grinning/Y
+grin/S
+griper/M
+gripe/S
+grippe/GMZSRD
+gripper/M
+gripping/Y
+grip/SGZMRD
+Griselda/M
+grisliness/SM
+grisly/RPT
+Gris/M
+Grissel/M
+gristle/SM
+gristliness/M
+gristly/TRP
+gristmill/MS
+grist/MYS
+Griswold/M
+grit/MS
+gritted
+gritter/MS
+grittiness/SM
+gritting
+gritty/PRT
+Griz/M
+grizzle/DSG
+grizzling/M
+grizzly/TRS
+Gr/M
+groaner/M
+groan/GZSRDM
+groat/SM
+grocer/MS
+grocery/MS
+groggily
+grogginess/SM
+groggy/RPT
+grog/MS
+groin/MGSD
+grokked
+grokking
+grok/S
+grommet/GMDS
+Gromyko/M
+groofs
+groomer/M
+groom/GZSMRD
+groomsman/M
+groomsmen
+Groot/M
+groover/M
+groove/SRDGM
+groovy/TR
+groper/M
+grope/SRDJGZ
+Gropius/M
+grosbeak/SM
+grosgrain/MS
+Gross
+Grosset/M
+gross/GTYSRDP
+Grossman/M
+grossness/MS
+Grosvenor/M
+Grosz/M
+grotesqueness/MS
+grotesque/PSY
+Grotius/M
+Groton/M
+grottoes
+grotto/M
+grouch/GDS
+grouchily
+grouchiness/MS
+grouchy/RPT
+groundbreaking/S
+grounded/U
+grounder/M
+groundhog/SM
+ground/JGZMDRS
+groundlessness/M
+groundless/YP
+groundnut/MS
+groundsheet/M
+groundskeepers
+groundsman/M
+groundswell/S
+groundwater/S
+groundwork/SM
+grouped/A
+grouper/M
+groupie/MS
+grouping/M
+groups/A
+group/ZJSMRDG
+grouse/GMZSRD
+grouser/M
+grouter/M
+grout/GSMRD
+groveler/M
+grovelike
+groveling/Y
+grovel/SDRGZ
+Grover/M
+Grove/RM
+grove/SRMZ
+grower/M
+grow/GZYRHS
+growing/I
+growingly
+growler/M
+growling/Y
+growl/RDGZS
+growly/RP
+grown/IA
+grownup/MS
+grows/A
+growth/IMA
+growths/IA
+grubbed
+grubber/SM
+grubbily
+grubbiness/SM
+grubbing
+grubby/RTP
+grub/MS
+grubstake/MSDG
+grudge/GMSRDJ
+grudger/M
+grudging/Y
+grueling/Y
+gruel/MDGJS
+gruesomeness/SM
+gruesome/RYTP
+gruffness/MS
+gruff/PSGTYRD
+grumble/GZJDSR
+grumbler/M
+grumbling/Y
+Grumman/M
+grumpily
+grumpiness/MS
+grump/MDGS
+grumpy/TPR
+Grundy/M
+Grnewald/M
+grunge/S
+grungy/RT
+grunion/SM
+grunter/M
+grunt/SGRD
+Grusky/M
+Grus/M
+Gruyre
+Gruyeres
+gryphon's
+g's
+G's
+gs/A
+GSA
+gt
+GU
+guacamole/MS
+Guadalajara/M
+Guadalcanal/M
+Guadalquivir/M
+Guadalupe/M
+Guadeloupe/M
+Guallatiri/M
+Gualterio/M
+Guamanian/SM
+Guam/M
+Guangzhou
+guanine/MS
+guano/MS
+Guantanamo/M
+Guarani/M
+guarani/SM
+guaranteeing
+guarantee/RSDZM
+guarantor/SM
+guaranty/MSDG
+guardedness/UM
+guarded/UYP
+guarder/M
+guardhouse/SM
+Guardia/M
+guardianship/MS
+guardian/SM
+guardrail/SM
+guard/RDSGZ
+guardroom/SM
+guardsman/M
+guardsmen
+Guarnieri/M
+Guatemala/M
+Guatemalan/S
+guava/SM
+Guayaquil/M
+gubernatorial
+Gucci/M
+gudgeon/M
+Guelph/M
+Guendolen/M
+Guenevere/M
+Guenna/M
+Guenther/M
+guernsey/S
+Guernsey/SM
+Guerra/M
+Guerrero/M
+guerrilla/MS
+guessable/U
+guess/BGZRSD
+guessed/U
+guesser/M
+guesstimate/DSMG
+guesswork/MS
+guest/SGMD
+Guevara/M
+guffaw/GSDM
+guff/SM
+Guggenheim/M
+Guglielma/M
+Guglielmo/M
+Guhleman/M
+GUI
+Guiana/M
+guidance/MS
+guidebook/SM
+guided/U
+guide/GZSRD
+guideline/SM
+guidepost/MS
+guider/M
+Guido/M
+Guilbert/M
+guilder/M
+guildhall/SM
+guild/SZMR
+guileful
+guilelessness/MS
+guileless/YP
+guile/SDGM
+Guillaume/M
+Guillema/M
+Guillemette/M
+guillemot/MS
+Guillermo/M
+guillotine/SDGM
+guiltily
+guiltiness/MS
+guiltlessness/M
+guiltless/YP
+guilt/SM
+guilty/PTR
+Gui/M
+Guinea/M
+Guinean/S
+guinea/SM
+Guinevere/M
+Guinna/M
+Guinness/M
+guise's
+guise/SDEG
+guitarist/SM
+guitar/SM
+Guiyang
+Guizot/M
+Gujarati/M
+Gujarat/M
+Gujranwala/M
+gulag/S
+gulch/MS
+gulden/MS
+gulf/DMGS
+Gullah/M
+gullet/MS
+gulley's
+gullibility/MS
+gullible
+Gulliver/M
+gull/MDSG
+gully/SDMG
+gulp/RDGZS
+gumboil/MS
+gumbo/MS
+gumboots
+gumdrop/SM
+gummed
+gumminess/M
+gumming/C
+gum/MS
+gummy/RTP
+gumption/SM
+gumshoeing
+gumshoe/SDM
+gumtree/MS
+Gunar/M
+gunboat/MS
+Gunderson/M
+gunfighter/M
+gunfight/SRMGZ
+gunfire/SM
+gunflint/M
+gunfought
+Gunilla/M
+gunk/SM
+gunky/RT
+Gun/M
+gunman/M
+gunmen
+gunmetal/MS
+gun/MS
+Gunnar/M
+gunned
+gunnel's
+Gunner/M
+gunner/SM
+gunnery/MS
+gunning/M
+gunnysack/SM
+gunny/SM
+gunpoint/MS
+gunpowder/SM
+gunrunner/MS
+gunrunning/MS
+gunship/S
+gunshot/SM
+gunslinger/M
+gunsling/GZR
+gunsmith/M
+gunsmiths
+Guntar/M
+Gunter/M
+Gunther/M
+gunwale/MS
+Guofeng/M
+guppy/SM
+Gupta/M
+gurgle/SDG
+Gurkha/M
+gurney/S
+guru/MS
+Gusella/M
+gusher/M
+gush/SRDGZ
+gushy/TR
+Gus/M
+Guss
+gusset/MDSG
+Gussie/M
+Gussi/M
+gussy/GSD
+Gussy/M
+Gustaf/M
+Gustafson/M
+Gusta/M
+gustatory
+Gustave/M
+Gustav/M
+Gustavo/M
+Gustavus/M
+gusted/E
+Gustie/M
+gustily
+Gusti/M
+gustiness/M
+gusting/E
+gust/MDGS
+gustoes
+gusto/M
+gusts/E
+Gusty/M
+gusty/RPT
+Gutenberg/M
+Guthrey/M
+Guthrie/M
+Guthry/M
+Gutierrez/M
+gutlessness/S
+gutless/P
+gutser/M
+gutsiness/M
+gut/SM
+guts/R
+gutsy/PTR
+gutted
+gutter/GSDM
+guttering/M
+guttersnipe/M
+gutting
+gutturalness/M
+guttural/SPY
+gutty/RSMT
+Guyana/M
+Guyanese
+Guy/M
+guy/MDRZGS
+Guzman/M
+guzzle/GZRSD
+guzzler/M
+g/VBX
+Gwalior/M
+Gwendolen/M
+Gwendoline/M
+Gwendolin/M
+Gwendolyn/M
+Gweneth/M
+Gwenette/M
+Gwen/M
+Gwenneth/M
+Gwennie/M
+Gwenni/M
+Gwenny/M
+Gwenora/M
+Gwenore/M
+Gwyneth/M
+Gwyn/M
+Gwynne/M
+gymkhana/SM
+gym/MS
+gymnasia's
+gymnasium/SM
+gymnastically
+gymnastic/S
+gymnastics/M
+gymnast/SM
+gymnosperm/SM
+gynecologic
+gynecological/MS
+gynecologist/SM
+gynecology/MS
+gypped
+gypper/S
+gypping
+gyp/S
+gypsite
+gypster/S
+gypsum/MS
+gypsy/SDMG
+Gypsy/SM
+gyrate/XNGSD
+gyration/M
+gyrator/MS
+gyrfalcon/SM
+gyrocompass/M
+gyro/MS
+gyroscope/SM
+gyroscopic
+gyve/GDS
+H
+Haag/M
+Haas/M
+Habakkuk/M
+habeas
+haberdasher/SM
+haberdashery/SM
+Haber/M
+Haberman/M
+Habib/M
+habiliment/SM
+habitability/MS
+habitableness/M
+habitable/P
+habitant/ISM
+habitation/MI
+habitations
+habitat/MS
+habit/IBDGS
+habit's
+habitualness/SM
+habitual/SYP
+habituate/SDNGX
+habituation/M
+habitu/MS
+hacienda/MS
+hacker/M
+Hackett/M
+hack/GZSDRBJ
+hackler/M
+hackle/RSDMG
+hackney/SMDG
+hacksaw/SDMG
+hackwork/S
+Hadamard/M
+Hadar/M
+Haddad/M
+haddock/MS
+hades
+Hades
+had/GD
+hadji's
+hadj's
+Hadlee/M
+Hadleigh/M
+Hadley/M
+Had/M
+hadn't
+Hadria/M
+Hadrian/M
+hadron/MS
+hadst
+haemoglobin's
+haemophilia's
+haemorrhage's
+Hafiz/M
+hafnium/MS
+haft/GSMD
+Hagan/M
+Hagar/M
+Hagen/M
+Hager/M
+Haggai/M
+haggardness/MS
+haggard/SYP
+hagged
+hagging
+haggish
+haggis/SM
+haggler/M
+haggle/RSDZG
+Hagiographa/M
+hagiographer/SM
+hagiography/MS
+hag/SMN
+Hagstrom/M
+Hague/M
+ha/H
+hahnium/S
+Hahn/M
+Haifa/M
+haiku/M
+Hailee/M
+hailer/M
+Hailey/M
+hail/SGMDR
+hailstone/SM
+hailstorm/SM
+Haily/M
+Haiphong/M
+hairball/SM
+hairbreadth/M
+hairbreadths
+hairbrush/SM
+haircare
+haircloth/M
+haircloths
+haircut/MS
+haircutting
+hairdo/SM
+hairdresser/SM
+hairdressing/SM
+hairdryer/S
+hairiness/MS
+hairlessness/M
+hairless/P
+hairlike
+hairline/SM
+hairnet/MS
+hairpiece/MS
+hairpin/MS
+hairsbreadth
+hairsbreadths
+hair/SDM
+hairsplitter/SM
+hairsplitting/MS
+hairspray
+hairspring/SM
+hairstyle/SMG
+hairstylist/S
+hairy/PTR
+Haitian/S
+Haiti/M
+hajjes
+hajji/MS
+hajj/M
+Hakeem/M
+hake/MS
+Hakim/M
+Hakka/M
+Hakluyt/M
+halalled
+halalling
+halal/S
+halberd/SM
+halcyon/S
+Haldane/M
+Haleakala/M
+Haleigh/M
+hale/ISRDG
+Hale/M
+haler/IM
+halest
+Halette/M
+Haley/M
+halfback/SM
+halfbreed
+halfheartedness/MS
+halfhearted/PY
+halfpence/S
+halfpenny/MS
+halfpennyworth
+half/PM
+halftime/S
+halftone/MS
+halfway
+halfword/MS
+halibut/SM
+halide/SM
+Halie/M
+Halifax/M
+Hali/M
+Halimeda/M
+halite/MS
+halitoses
+halitosis/M
+hallelujah
+hallelujahs
+Halley/M
+halliard's
+Hallie/M
+Halli/M
+Hallinan/M
+Hall/M
+Hallmark/M
+hallmark/SGMD
+hallo/GDS
+halloo's
+Halloween/MS
+hallowing
+hallows
+hallow/UD
+hall/SMR
+Hallsy/M
+hallucinate/VNGSDX
+hallucination/M
+hallucinatory
+hallucinogenic/S
+hallucinogen/SM
+hallway/SM
+Hally/M
+halocarbon
+halogenated
+halogen/SM
+halon
+halo/SDMG
+Halpern/M
+Halsey/M
+Hal/SMY
+Halsy/M
+halter/GDM
+halt/GZJSMDR
+halting/Y
+halve/GZDS
+halves/M
+halyard/MS
+Ha/M
+Hamal/M
+Haman/M
+hamburger/M
+Hamburg/MS
+hamburg/SZRM
+Hamel/M
+Hamey/M
+Hamhung/M
+Hamid/M
+Hamilcar/M
+Hamil/M
+Hamiltonian/MS
+Hamilton/M
+Hamish/M
+Hamitic/M
+Hamlen/M
+Hamlet/M
+hamlet/MS
+Hamlin/M
+Ham/M
+Hammad/M
+Hammarskjold/M
+hammed
+hammerer/M
+hammerhead/SM
+hammering/M
+hammerless
+hammerlock/MS
+Hammerstein/M
+hammertoe/SM
+hammer/ZGSRDM
+Hammett/M
+hamming
+hammock/MS
+Hammond/M
+Hammurabi/M
+hammy/RT
+Hamnet/M
+hampered/U
+hamper/GSD
+Hampshire/M
+Hampton/M
+ham/SM
+hamster/MS
+hamstring/MGS
+hamstrung
+Hamsun/M
+Hana/M
+Hanan/M
+Hancock/M
+handbagged
+handbagging
+handbag/MS
+handball/SM
+handbarrow/MS
+handbasin
+handbill/MS
+handbook/SM
+handbrake/M
+handcar/SM
+handcart/MS
+handclasp/MS
+handcraft/GMDS
+handcuff/GSD
+handcuffs/M
+handedness/M
+handed/PY
+Handel/M
+hander/S
+handful/SM
+handgun/SM
+handhold/M
+handicapped
+handicapper/SM
+handicapping
+handicap/SM
+handicraftsman/M
+handicraftsmen
+handicraft/SMR
+handily/U
+handiness/SM
+handiwork/MS
+handkerchief/MS
+handleable
+handlebar/SM
+handle/MZGRSD
+handler/M
+handless
+handling/M
+handmade
+handmaiden/M
+handmaid/NMSX
+handout/SM
+handover
+handpick/GDS
+handrail/SM
+hand's
+handsaw/SM
+handset/SM
+handshake/GMSR
+handshaker/M
+handshaking/M
+handsomely/U
+handsomeness/MS
+handsome/RPTY
+handspike/SM
+handspring/SM
+handstand/MS
+hand/UDSG
+handwork/SM
+handwoven
+handwrite/GSJ
+handwriting/M
+handwritten
+Handy/M
+handyman/M
+handymen
+handy/URT
+Haney/M
+hangar/SGDM
+hangdog/S
+hanged/A
+hanger/M
+hang/GDRZBSJ
+hanging/M
+hangman/M
+hangmen
+hangnail/MS
+hangout/MS
+hangover/SM
+hangs/A
+Hangul/M
+hangup/S
+Hangzhou
+Hankel/M
+hankerer/M
+hanker/GRDJ
+hankering/M
+hank/GZDRMS
+hankie/SM
+Hank/M
+hanky's
+Hannah/M
+Hanna/M
+Hannibal/M
+Hannie/M
+Hanni/MS
+Hanny/M
+Hanoi/M
+Hanoverian
+Hanover/M
+Hansel/M
+Hansen/M
+Hansiain/M
+Han/SM
+Hans/N
+hansom/MS
+Hanson/M
+Hanuka/S
+Hanukkah/M
+Hanukkahs
+Hapgood/M
+haphazardness/SM
+haphazard/SPY
+haplessness/MS
+hapless/YP
+haploid/S
+happed
+happening/M
+happen/JDGS
+happenstance/SM
+happily/U
+happiness/UMS
+happing
+Happy/M
+happy/UTPR
+Hapsburg/M
+hap/SMY
+Harald/M
+harangue/GDRS
+haranguer/M
+Harare
+harasser/M
+harass/LSRDZG
+harassment/SM
+Harbert/M
+harbinger/DMSG
+Harbin/M
+harborer/M
+harbor/ZGRDMS
+Harcourt/M
+hardback/SM
+hardball/SM
+hardboard/SM
+hardboiled
+hardbound
+hardcore/MS
+hardcover/SM
+hardened/U
+hardener/M
+hardening/M
+harden/ZGRD
+hardhat/S
+hardheadedness/SM
+hardheaded/YP
+hardheartedness/SM
+hardhearted/YP
+hardihood/MS
+hardily
+hardiness/SM
+Harding/M
+Hardin/M
+hardliner/S
+hardness/MS
+hardscrabble
+hardshell
+hardship/MS
+hardstand/S
+hardtack/MS
+hardtop/MS
+hardware/SM
+hardwire/DSG
+hardwood/MS
+hardworking
+Hardy/M
+hard/YNRPJGXTS
+hardy/PTRS
+harebell/MS
+harebrained
+harelip/MS
+harelipped
+hare/MGDS
+harem/SM
+Hargreaves/M
+hark/GDS
+Harland/M
+Harlan/M
+Harlem/M
+Harlene/M
+Harlen/M
+Harlequin
+harlequin/MS
+Harley/M
+Harlie/M
+Harli/M
+Harlin/M
+harlotry/MS
+harlot/SM
+Harlow/M
+Harman/M
+harmed/U
+harmer/M
+harmfulness/MS
+harmful/PY
+harmlessness/SM
+harmless/YP
+harm/MDRGS
+Harmonia/M
+harmonically
+harmonica/MS
+harmonic/S
+harmonics/M
+Harmonie/M
+harmonious/IPY
+harmoniousness/MS
+harmoniousness's/I
+harmonium/MS
+harmonization/A
+harmonizations
+harmonization's
+harmonized/U
+harmonizer/M
+harmonizes/UA
+harmonize/ZGSRD
+Harmon/M
+harmony/EMS
+Harmony/M
+harness/DRSMG
+harnessed/U
+harnesser/M
+harnesses/U
+Harold/M
+Haroun/M
+harper/M
+Harper/M
+harping/M
+harpist/SM
+harp/MDRJGZS
+Harp/MR
+harpooner/M
+harpoon/SZGDRM
+harpsichordist/MS
+harpsichord/SM
+harpy/SM
+Harpy/SM
+Harrell/M
+harridan/SM
+Harrie/M
+harrier/M
+Harriet/M
+Harrietta/M
+Harriette/M
+Harriett/M
+Harrington/M
+Harriot/M
+Harriott/M
+Harrisburg/M
+Harri/SM
+Harrisonburg/M
+Harrison/M
+harrower/M
+harrow/RDMGS
+harrumph/SDG
+Harry/M
+harry/RSDGZ
+harshen/GD
+harshness/SM
+harsh/TRNYP
+Harte/M
+Hartford/M
+Hartley/M
+Hartline/M
+Hart/M
+Hartman/M
+hart/MS
+Hartwell/M
+Harvard/M
+harvested/U
+harvester/M
+harvestman/M
+harvest/MDRZGS
+Harvey/MS
+Harv/M
+Harwell/M
+Harwilll/M
+has
+Hasbro/M
+hash/AGSD
+Hasheem/M
+hasher/M
+Hashim/M
+hashing/M
+hashish/MS
+hash's
+Hasidim
+Haskell/M
+Haskel/M
+Haskins/M
+Haslett/M
+hasn't
+hasp/GMDS
+hassle/MGRSD
+hassock/MS
+haste/MS
+hastener/M
+hasten/GRD
+hast/GXJDN
+Hastie/M
+hastily
+hastiness/MS
+Hastings/M
+Hasty/M
+hasty/RPT
+hatchback/SM
+hatcheck/S
+hatched/U
+hatcher/M
+hatchery/MS
+hatchet/MDSG
+hatching/M
+hatch/RSDJG
+Hatchure/M
+hatchway/MS
+hatefulness/MS
+hateful/YP
+hater/M
+hate/S
+Hatfield/M
+Hathaway/M
+hatless
+hat/MDRSZG
+hatred/SM
+hatstands
+hatted
+Hatteras/M
+hatter/SM
+Hattie/M
+Hatti/M
+hatting
+Hatty/M
+hauberk/SM
+Haugen/M
+haughtily
+haughtiness/SM
+haughty/TPR
+haulage/MS
+hauler/M
+haul/SDRGZ
+haunch/GMSD
+haunter/M
+haunting/Y
+haunt/JRDSZG
+Hauptmann/M
+Hausa/M
+Hausdorff/M
+Hauser/M
+hauteur/MS
+Havana/SM
+Havarti
+Havel/M
+haven/DMGS
+Haven/M
+haven't
+haver/G
+haversack/SM
+have/ZGSR
+havocked
+havocking
+havoc/SM
+Haw
+Hawaiian/S
+Hawaii/M
+hawker/M
+hawk/GZSDRM
+Hawking
+hawking/M
+Hawkins/M
+hawkishness/S
+hawkish/P
+Hawley/M
+haw/MDSG
+hawser/M
+haws/RZ
+Hawthorne/M
+hawthorn/MS
+haycock/SM
+Hayden/M
+Haydn/M
+Haydon/M
+Hayes
+hayfield/MS
+hay/GSMDR
+Hayley/M
+hayloft/MS
+haymow/MS
+Haynes
+hayrick/MS
+hayride/MS
+hayseed/MS
+Hay/SM
+haystack/SM
+haywain
+Hayward/M
+haywire/MS
+Haywood/M
+Hayyim/M
+hazard/MDGS
+hazardousness/M
+hazardous/PY
+haze/DSRJMZG
+Hazel/M
+hazel/MS
+hazelnut/SM
+Haze/M
+hazer/M
+hazily
+haziness/MS
+hazing/M
+Hazlett/M
+Hazlitt/M
+hazy/PTR
+HBO/M
+hdqrs
+HDTV
+headache/MS
+headband/SM
+headboard/MS
+headcount
+headdress/MS
+header/M
+headfirst
+headgear/SM
+headhunter/M
+headhunting/M
+headhunt/ZGSRDMJ
+headily
+headiness/S
+heading/M
+headlamp/S
+headland/MS
+headlessness/M
+headless/P
+headlight/MS
+headline/DRSZMG
+headliner/M
+headlock/MS
+headlong
+Head/M
+headman/M
+headmaster/MS
+headmastership/M
+headmen
+headmistress/MS
+headphone/SM
+headpiece/SM
+headpin/MS
+headquarter/GDS
+headrest/MS
+headroom/SM
+headscarf/M
+headset/SM
+headship/SM
+headshrinker/MS
+head/SJGZMDR
+headsman/M
+headsmen
+headstall/SM
+headstand/MS
+headstock/M
+headstone/MS
+headstrong
+headwaiter/SM
+headwall/S
+headwater/S
+headway/MS
+headwind/SM
+headword/MS
+heady/PTR
+heal/DRHSGZ
+healed/U
+healer/M
+Heall/M
+healthfully
+healthfulness/SM
+healthful/U
+healthily/U
+healthiness/MSU
+health/M
+healths
+healthy/URPT
+heap/SMDG
+heard/UA
+hearer/M
+hearing/AM
+hearken/SGD
+hearsay/SM
+hearse/M
+hears/SDAG
+Hearst/M
+heartache/SM
+heartbeat/MS
+heartbreak/GMS
+heartbreaking/Y
+heartbroke
+heartbroken
+heartburning/M
+heartburn/SGM
+hearted/Y
+hearten/EGDS
+heartening/EY
+heartfelt
+hearth/M
+hearthrug
+hearths
+hearthstone/MS
+heartily
+heartiness/SM
+heartland/SM
+heartlessness/SM
+heartless/YP
+heartrending/Y
+heartsickness/MS
+heartsick/P
+heart/SMDNXG
+heartstrings
+heartthrob/MS
+heartwarming
+Heartwood/M
+heartwood/SM
+hearty/TRSP
+hear/ZTSRHJG
+heatedly
+heated/UA
+heater/M
+heathendom/SM
+heathenish/Y
+heathenism/MS
+heathen/M
+heather/M
+Heather/M
+heathery
+Heathkit/M
+heathland
+Heathman/M
+Heath/MR
+heath/MRNZX
+heaths
+heatproof
+heats/A
+heat/SMDRGZBJ
+heatstroke/MS
+heatwave
+heave/DSRGZ
+heavenliness/M
+heavenly/PTR
+heaven/SYM
+heavenward/S
+heaver/M
+heaves/M
+heavily
+heaviness/MS
+Heaviside/M
+heavyhearted
+heavyset
+heavy/TPRS
+heavyweight/SM
+Hebe/M
+hebephrenic
+Hebert/M
+Heb/M
+Hebraic
+Hebraism/MS
+Hebrew/SM
+Hebrides/M
+Hecate/M
+hecatomb/M
+heckler/M
+heckle/RSDZG
+heck/S
+hectare/MS
+hectically
+hectic/S
+hectogram/MS
+hectometer/SM
+Hector/M
+hector/SGD
+Hecuba/M
+he'd
+Heda/M
+Hedda/M
+Heddie/M
+Heddi/M
+hedge/DSRGMZ
+hedgehog/MS
+hedgehopped
+hedgehopping
+hedgehop/S
+hedger/M
+hedgerow/SM
+hedging/Y
+Hedi/M
+hedonism/SM
+hedonistic
+hedonist/MS
+Hedvige/M
+Hedvig/M
+Hedwiga/M
+Hedwig/M
+Hedy/M
+heeded/U
+heedfulness/M
+heedful/PY
+heeding/U
+heedlessness/SM
+heedless/YP
+heed/SMGD
+heehaw/DGS
+heeler/M
+heeling/M
+heelless
+heel/SGZMDR
+Heep/M
+Hefner/M
+heft/GSD
+heftily
+heftiness/SM
+hefty/TRP
+Hegelian
+Hegel/M
+hegemonic
+hegemony/MS
+Hegira/M
+hegira/S
+Heida/M
+Heidegger/M
+Heidelberg/M
+Heidie/M
+Heidi/M
+heifer/MS
+Heifetz/M
+heighten/GD
+height/SMNX
+Heimlich/M
+Heindrick/M
+Heineken/M
+Heine/M
+Heinlein/M
+heinousness/SM
+heinous/PY
+Heinrich/M
+Heinrick/M
+Heinrik/M
+Heinze/M
+Heinz/M
+heiress/MS
+heirloom/MS
+heir/SDMG
+Heisenberg/M
+Heiser/M
+heister/M
+heist/GSMRD
+Hejira's
+Helaina/M
+Helaine/M
+held
+Helena/M
+Helene/M
+Helenka/M
+Helen/M
+Helga/M
+Helge/M
+helical/Y
+helices/M
+helicon/M
+Helicon/M
+helicopter/GSMD
+heliocentric
+heliography/M
+Heliopolis/M
+Helios/M
+heliosphere
+heliotrope/SM
+heliport/MS
+helium/MS
+helix/M
+he'll
+hellbender/M
+hellbent
+hellcat/SM
+hellebore/SM
+Hellene/SM
+Hellenic
+Hellenism/MS
+Hellenistic
+Hellenist/MS
+Hellenization/M
+Hellenize
+heller/M
+Heller/M
+Hellespont/M
+hellfire/M
+hell/GSMDR
+hellhole/SM
+Helli/M
+hellion/SM
+hellishness/SM
+hellish/PY
+Hellman/M
+hello/GMS
+Hell's
+helluva
+helmed
+helmet/GSMD
+Helmholtz/M
+helming
+helms
+helm's
+helmsman/M
+helmsmen
+helm/U
+Helmut/M
+Hloise/M
+helot/S
+helper/M
+helpfulness/MS
+helpful/UY
+help/GZSJDR
+helping/M
+helplessness/SM
+helpless/YP
+helpline/S
+helpmate/SM
+helpmeet's
+Helsa/M
+Helsinki/M
+helve/GMDS
+Helvetian/S
+Helvetius/M
+Helyn/M
+He/M
+hematite/MS
+hematologic
+hematological
+hematologist/SM
+hematology/MS
+heme/MS
+Hemingway/M
+hemisphere/MSD
+hemispheric
+hemispherical
+hemline/SM
+hemlock/MS
+hemmed
+hemmer/SM
+hemming
+hem/MS
+hemoglobin/MS
+hemolytic
+hemophiliac/SM
+hemophilia/SM
+hemorrhage/GMDS
+hemorrhagic
+hemorrhoid/MS
+hemostat/SM
+hemp/MNS
+h/EMS
+hemstitch/DSMG
+henceforth
+henceforward
+hence/S
+Hench/M
+henchman/M
+henchmen
+Henderson/M
+Hendrick/SM
+Hendrickson/M
+Hendrika/M
+Hendrik/M
+Hendrix/M
+henge/M
+Henka/M
+Henley/M
+hen/MS
+henna/MDSG
+Hennessey/M
+henning
+henpeck/GSD
+Henrie/M
+Henrieta/M
+Henrietta/M
+Henriette/M
+Henrik/M
+Henri/M
+Henryetta/M
+henry/M
+Henry/M
+Hensley/M
+Henson/M
+heparin/MS
+hepatic/S
+hepatitides
+hepatitis/M
+Hepburn/M
+Hephaestus/M
+Hephzibah/M
+hepper
+heppest
+Hepplewhite
+hep/S
+heptagonal
+heptagon/SM
+heptane/M
+heptathlon/S
+her
+Heracles/M
+Heraclitus/M
+heralded/U
+heraldic
+herald/MDSG
+heraldry/MS
+Hera/M
+herbaceous
+herbage/MS
+herbalism
+herbalist/MS
+herbal/S
+Herbart/M
+Herbert/M
+herbicidal
+herbicide/MS
+Herbie/M
+herbivore/SM
+herbivorous/Y
+Herb/M
+herb/MS
+Herby/M
+Herc/M
+Herculaneum/M
+herculean
+Herculean
+Hercule/MS
+Herculie/M
+herder/M
+Herder/M
+herd/MDRGZS
+herdsman/M
+herdsmen
+hereabout/S
+hereafter/S
+hereby
+hereditary
+heredity/MS
+Hereford/SM
+herein
+hereinafter
+here/IS
+hereof
+hereon
+here's
+heres/M
+heresy/SM
+heretical
+heretic/SM
+hereto
+heretofore
+hereunder
+hereunto
+hereupon
+herewith
+Heriberto/M
+heritable
+heritage/MS
+heritor/IM
+Herkimer/M
+Herman/M
+Hermann/M
+hermaphrodite/SM
+hermaphroditic
+Hermaphroditus/M
+hermeneutic/S
+hermeneutics/M
+Hermes
+hermetical/Y
+hermetic/S
+Hermia/M
+Hermie/M
+Hermina/M
+Hermine/M
+Herminia/M
+Hermione/M
+hermitage/SM
+Hermite/M
+hermitian
+hermit/MS
+Hermon/M
+Hermosa/M
+Hermosillo/M
+Hermy/M
+Hernandez/M
+Hernando/M
+hernial
+hernia/MS
+herniate/NGXDS
+Herod/M
+Herodotus/M
+heroes
+heroically
+heroics
+heroic/U
+heroine/SM
+heroin/MS
+heroism/SM
+Herold/M
+hero/M
+heron/SM
+herpes/M
+herpetologist/SM
+herpetology/MS
+Herrera/M
+Herrick/M
+herringbone/SDGM
+Herring/M
+herring/SM
+Herrington/M
+Herr/MG
+Herschel/M
+Hersch/M
+herself
+Hersey/M
+Hershel/M
+Hershey/M
+Hersh/M
+Herta/M
+Hertha/M
+hertz/M
+Hertz/M
+Hertzog/M
+Hertzsprung/M
+Herve/M
+Hervey/M
+Herzegovina/M
+Herzl/M
+hes
+Hesiod/M
+hesitance/S
+hesitancy/SM
+hesitantly
+hesitant/U
+hesitater/M
+hesitate/XDRSNG
+hesitating/UY
+hesitation/M
+Hesperus/M
+Hesse/M
+Hessian/MS
+Hess/M
+Hester/M
+Hesther/M
+Hestia/M
+Heston/M
+heterodox
+heterodoxy/MS
+heterodyne
+heterogamous
+heterogamy/M
+heterogeneity/SM
+heterogeneousness/M
+heterogeneous/PY
+heterosexuality/SM
+heterosexual/YMS
+heterostructure
+heterozygous
+Hettie/M
+Hetti/M
+Hetty/M
+Heublein/M
+heuristically
+heuristic/SM
+Heusen/M
+Heuser/M
+he/VMZ
+hew/DRZGS
+Hewe/M
+hewer/M
+Hewet/M
+Hewett/M
+Hewie/M
+Hewitt/M
+Hewlett/M
+Hew/M
+hexachloride/M
+hexadecimal/YS
+hexafluoride/M
+hexagonal/Y
+hexagon/SM
+hexagram/SM
+hexameter/SM
+hex/DSRG
+hexer/M
+hey
+heyday/MS
+Heyerdahl/M
+Heywood/M
+Hezekiah/M
+hf
+HF
+Hf/M
+Hg/M
+hgt
+hgwy
+HHS
+HI
+Hialeah/M
+hiatus/SM
+Hiawatha/M
+hibachi/MS
+hibernate/XGNSD
+hibernation/M
+hibernator/SM
+Hibernia/M
+Hibernian/S
+hibiscus/MS
+hiccup/MDGS
+hickey/SM
+Hickey/SM
+Hickman/M
+Hickok/M
+hickory/MS
+hick/SM
+Hicks/M
+hi/D
+hidden/U
+hideaway/SM
+hidebound
+hideousness/SM
+hideous/YP
+hideout/MS
+hider/M
+hide/S
+hiding/M
+hid/ZDRGJ
+hieing
+hierarchal
+hierarchic
+hierarchical/Y
+hierarchy/SM
+hieratic
+hieroglyph
+hieroglyphic/S
+hieroglyphics/M
+hieroglyphs
+Hieronymus/M
+hie/S
+hifalutin
+Higashiosaka
+Higgins/M
+highball/GSDM
+highborn
+highboy/MS
+highbrow/SM
+highchair/SM
+highfalutin
+Highfield/M
+highhandedness/SM
+highhanded/PY
+highish
+Highlander/SM
+Highlands
+highland/ZSRM
+highlight/GZRDMS
+Highness/M
+highness/MS
+highpoint
+high/PYRT
+highroad/MS
+highs
+hight
+hightail/DGS
+highwayman/M
+highwaymen
+highway/MS
+hijacker/M
+hijack/JZRDGS
+hiker/M
+hike/ZGDSR
+Hilario/M
+hilariousness/MS
+hilarious/YP
+hilarity/MS
+Hilarius/M
+Hilary/M
+Hilbert/M
+Hildagarde/M
+Hildagard/M
+Hilda/M
+Hildebrand/M
+Hildegaard/M
+Hildegarde/M
+Hilde/M
+Hildy/M
+Hillard/M
+Hillary/M
+hillbilly/MS
+Hillcrest/M
+Hillel/M
+hiller/M
+Hillery/M
+hill/GSMDR
+Hilliard/M
+Hilliary/M
+Hillie/M
+Hillier/M
+hilliness/SM
+Hill/M
+hillman
+hillmen
+hillock/SM
+Hillsboro/M
+Hillsdale/M
+hillside/SM
+hilltop/MS
+hillwalking
+Hillyer/M
+Hilly/RM
+hilly/TRP
+hilt/MDGS
+Hilton/M
+Hi/M
+Himalaya/MS
+Himalayan/S
+Himmler/M
+him/S
+himself
+Hinayana/M
+Hinda/M
+Hindemith/M
+Hindenburg/M
+hindered/U
+hinderer/M
+hinder/GRD
+Hindi/M
+hindmost
+hindquarter/SM
+hindrance/SM
+hind/RSZ
+hindsight/SM
+Hinduism/SM
+Hindu/MS
+Hindustani/MS
+Hindustan/M
+Hines/M
+hinger
+hinge's
+hinge/UDSG
+Hinkle/M
+Hinsdale/M
+hinterland/MS
+hinter/M
+hint/GZMDRS
+Hinton/M
+Hinze/M
+hipbone/SM
+hipness/S
+Hipparchus/M
+hipped
+hipper
+hippest
+hippie/MTRS
+hipping/M
+Hippocrates/M
+Hippocratic
+hippodrome/MS
+hippo/MS
+hippopotamus/SM
+hip/PSM
+hippy's
+hipster/MS
+hiragana
+Hiram/M
+hire/AGSD
+hireling/SM
+hirer/SM
+Hirey/M
+hiring/S
+Hirohito/M
+Hiroshi/M
+Hiroshima/M
+Hirsch/M
+hirsuteness/MS
+hirsute/P
+his
+Hispanic/SM
+Hispaniola/M
+hiss/DSRMJG
+hisser/M
+hissing/M
+Hiss/M
+histamine/SM
+histidine/SM
+histochemic
+histochemical
+histochemistry/M
+histogram/MS
+histological
+histologist/MS
+histology/SM
+historian/MS
+historic
+historicalness/M
+historical/PY
+historicism/M
+historicist/M
+historicity/MS
+historiographer/SM
+historiography/MS
+history/MS
+histrionically
+histrionic/S
+histrionics/M
+hist/SDG
+Hitachi/M
+Hitchcock/M
+hitcher/MS
+hitchhike/RSDGZ
+hitch/UGSD
+hither
+hitherto
+Hitler/SM
+hitless
+hit/MS
+hittable
+hitter/SM
+hitting
+Hittite/SM
+HIV
+hive/MGDS
+h'm
+HM
+HMO
+Hmong
+HMS
+hoarder/M
+hoarding/M
+hoard/RDJZSGM
+hoarfrost/SM
+hoariness/MS
+hoar/M
+hoarseness/SM
+hoarse/RTYP
+hoary/TPR
+hoaxer/M
+hoax/GZMDSR
+Hobard/M
+Hobart/M
+hobbed
+Hobbes/M
+hobbing
+hobbit
+hobbler/M
+hobble/ZSRDG
+Hobbs/M
+hobbyhorse/SM
+hobbyist/SM
+hobby/SM
+Hobday/M
+Hobey/M
+hobgoblin/MS
+Hobie/M
+hobnail/GDMS
+hobnobbed
+hobnobbing
+hobnob/S
+Hoboken/M
+hobo/SDMG
+hob/SM
+hoc
+hocker/M
+hockey/SM
+hock/GDRMS
+Hockney/M
+hockshop/SM
+hodge/MS
+Hodge/MS
+hodgepodge/SM
+Hodgkin/M
+ho/DRYZ
+hod/SM
+Hoebart/M
+hoecake/SM
+hoedown/MS
+hoeing
+hoer/M
+hoe/SM
+Hoffa/M
+Hoff/M
+Hoffman/M
+Hofstadter/M
+Hogan/M
+hogan/SM
+Hogarth/M
+hogback/MS
+hogged
+hogger
+hogging
+hoggish/Y
+hogshead/SM
+hog/SM
+hogtie/SD
+hogtying
+hogwash/SM
+Hohenlohe/M
+Hohenstaufen/M
+Hohenzollern/M
+Hohhot/M
+hoister/M
+hoist/GRDS
+hoke/DSG
+hokey/PRT
+hokier
+hokiest
+Hokkaido/M
+hokum/MS
+Hokusai/M
+Holbein/M
+Holbrook/M
+Holcomb/M
+holdall/MS
+Holden/M
+holder/M
+Holder/M
+holding/IS
+holding's
+hold/NRBSJGZ
+holdout/SM
+holdover/SM
+holdup/MS
+hole/MGDS
+holey
+holiday/GRDMS
+Holiday/M
+holidaymaker/S
+holier/U
+Holiness/MS
+holiness/MSU
+holistic
+holistically
+hollandaise
+Hollandaise/M
+Hollander/M
+Holland/RMSZ
+holler/GDS
+Hollerith/M
+Holley/M
+Hollie/M
+Holli/SM
+Hollister/M
+Holloway/M
+hollowness/MS
+hollow/RDYTGSP
+hollowware/M
+Hollyanne/M
+hollyhock/MS
+Holly/M
+holly/SM
+Hollywood/M
+Holman/M
+Holmes
+holmium/MS
+Holm/M
+Holocaust
+holocaust/MS
+Holocene
+hologram/SM
+holograph/GMD
+holographic
+holographs
+holography/MS
+Holstein/MS
+holster/MDSG
+Holst/M
+Holt/M
+Holyoke/M
+holy/SRTP
+holystone/MS
+Holzman/M
+Ho/M
+homage/MGSRD
+homager/M
+hombre/SM
+homburg/SM
+homebody/MS
+homebound
+homeboy/S
+homebuilder/S
+homebuilding
+homebuilt
+homecoming/MS
+home/DSRMYZG
+homegrown
+homeland/SM
+homelessness/SM
+homeless/P
+homelike
+homeliness/SM
+homely/RPT
+homemade
+homemake/JRZG
+homemaker/M
+homemaking/M
+homeomorphic
+homeomorphism/MS
+homeomorph/M
+homeopath
+homeopathic
+homeopaths
+homeopathy/MS
+homeostases
+homeostasis/M
+homeostatic
+homeowner/S
+homeownership
+homepage
+Homere/M
+homer/GDM
+Homeric
+homerists
+Homer/M
+homeroom/MS
+Homerus/M
+homeschooling/S
+homesickness/MS
+homesick/P
+homespun/S
+homesteader/M
+homestead/GZSRDM
+homestretch/SM
+hometown/SM
+homeward
+homeworker/M
+homework/ZSMR
+homeyness/MS
+homey/PS
+homicidal/Y
+homicide/SM
+homier
+homiest
+homiletic/S
+homily/SM
+hominess's
+homing/M
+hominid/MS
+hominy/SM
+Hom/MR
+homogamy/M
+homogenate/MS
+homogeneity/ISM
+homogeneous/PY
+homogenization/MS
+homogenize/DRSGZ
+homogenizer/M
+homograph/M
+homographs
+homological
+homologous
+homologue/M
+homology/MS
+homomorphic
+homomorphism/SM
+homonym/SM
+homophobia/S
+homophobic
+homophone/MS
+homopolymers
+homosexuality/SM
+homosexual/YMS
+homo/SM
+homotopy
+homozygous/Y
+honcho/DSG
+Honda/M
+Hondo/M
+Honduran/S
+Honduras/M
+Honecker/M
+hone/SM
+honestly/E
+honest/RYT
+honesty/ESM
+honeybee/SM
+honeycomb/SDMG
+honeydew/SM
+honey/GSMD
+honeylocust
+Honey/M
+honeymooner/M
+honeymoon/RDMGZS
+honeysuckle/MS
+Honeywell/M
+hong/M
+Honiara/M
+honker/M
+honk/GZSDRM
+honky/SM
+Hon/M
+hon/MDRSZTG
+Honolulu/M
+honorableness/SM
+honorable/PSM
+honorables/U
+honorablies/U
+honorably/UE
+honorarily
+honorarium/SM
+honorary/S
+honored/U
+honoree/S
+honor/ERDBZGS
+honorer/EM
+Honoria/M
+honorific/S
+Honor/M
+honor's
+honors/A
+Honshu/M
+hooch/MS
+hoodedness/M
+hooded/P
+hoodlum/SM
+Hood/M
+hood/MDSG
+hoodoo/DMGS
+hoodwinker/M
+hoodwink/SRDG
+hooey/SM
+hoof/DRMSG
+hoofer/M
+hoofmark/S
+hookah/M
+hookahs
+hookedness/M
+hooked/P
+Hooke/MR
+hooker/M
+Hooker/M
+hookey's
+hook/GZDRMS
+hooks/U
+hookup/SM
+hookworm/MS
+hooky/SRMT
+hooliganism/SM
+hooligan/SM
+hooper/M
+Hooper/M
+hoopla/SM
+hoop/MDRSG
+hooray/SMDG
+hoosegow/MS
+Hoosier/SM
+hootch's
+hootenanny/SM
+hooter/M
+hoot/MDRSGZ
+Hoover/MS
+hooves/M
+hoped/U
+hopefulness/MS
+hopeful/SPY
+hopelessness/SM
+hopeless/YP
+Hope/M
+hoper/M
+hope/SM
+Hopewell/M
+Hopi/SM
+Hopkinsian/M
+Hopkins/M
+hopped
+Hopper/M
+hopper/MS
+hopping/M
+hoppled
+hopples
+hopscotch/MDSG
+hop/SMDRG
+Horace/M
+Horacio/M
+Horatia/M
+Horatio/M
+Horatius/M
+horde/DSGM
+horehound/MS
+horizon/MS
+horizontal/YS
+Hormel/M
+hormonal/Y
+hormone/MS
+Hormuz/M
+hornbeam/M
+hornblende/MS
+Hornblower/M
+hornedness/M
+horned/P
+Horne/M
+hornet/MS
+horn/GDRMS
+horniness/M
+hornless
+hornlike
+Horn/M
+hornpipe/MS
+horny/TRP
+horologic
+horological
+horologist/MS
+horology/MS
+horoscope/MS
+Horowitz/M
+horrendous/Y
+horribleness/SM
+horrible/SP
+horribly
+horridness/M
+horrid/PY
+horrific
+horrifically
+horrify/DSG
+horrifying/Y
+horror/MS
+hors/DSGX
+horseback/MS
+horsedom
+horseflesh/M
+horsefly/MS
+horsehair/SM
+horsehide/SM
+horselaugh/M
+horselaughs
+horseless
+horselike
+horsely
+horseman/M
+horsemanship/MS
+horsemen
+horseplayer/M
+horseplay/SMR
+horsepower/SM
+horseradish/SM
+horse's
+horseshoeing
+horseshoe/MRSD
+horseshoer/M
+horsetail/SM
+horse/UGDS
+horsewhipped
+horsewhipping
+horsewhip/SM
+horsewoman/M
+horsewomen
+horsey
+horsier
+horsiest
+horsing/M
+Horst/M
+hortatory
+Horten/M
+Hortense/M
+Hortensia/M
+horticultural
+horticulture/SM
+horticulturist/SM
+Hort/MN
+Horton/M
+Horus/M
+hosanna/SDG
+Hosea/M
+hose/M
+hosepipe
+hos/GDS
+hosier/MS
+hosiery/SM
+hosp
+hospice/MS
+hospitable/I
+hospitably/I
+hospitality/MS
+hospitality's/I
+hospitalization/MS
+hospitalize/GSD
+hospital/MS
+hostage/MS
+hosteler/M
+hostelry/MS
+hostel/SZGMRD
+hostess/MDSG
+hostile/YS
+hostility/SM
+hostler/MS
+Host/MS
+host/MYDGS
+hotbed/MS
+hotblooded
+hotbox/MS
+hotcake/S
+hotchpotch/M
+hotelier/MS
+hotelman/M
+hotel/MS
+hotfoot/DGS
+hothead/DMS
+hotheadedness/SM
+hotheaded/PY
+hothouse/MGDS
+hotness/MS
+hotplate/SM
+hotpot/M
+hot/PSY
+hotrod
+hotshot/S
+hotted
+Hottentot/SM
+hotter
+hottest
+hotting
+Houdaille/M
+Houdini/M
+hough/M
+hounder/M
+hounding/M
+hound/MRDSG
+hourglass/MS
+houri/MS
+hourly/S
+hour/YMS
+house/ASDG
+houseboat/SM
+housebound
+houseboy/SM
+housebreaker/M
+housebreaking/M
+housebreak/JSRZG
+housebroke
+housebroken
+housebuilding
+housecleaning/M
+houseclean/JDSG
+housecoat/MS
+housefly/MS
+houseful/SM
+householder/M
+household/ZRMS
+househusband/S
+housekeeper/M
+housekeeping/M
+housekeep/JRGZ
+houselights
+House/M
+housemaid/MS
+houseman/M
+housemen
+housemother/MS
+housemoving
+houseparent/SM
+houseplant/S
+houser
+house's
+housetop/MS
+housewares
+housewarming/MS
+housewifeliness/M
+housewifely/P
+housewife/YM
+housewives
+houseworker/M
+housework/ZSMR
+housing/MS
+Housman/M
+Houston/M
+Houyhnhnm/M
+HOV
+hovel/GSMD
+hovercraft/M
+hoverer/M
+hover/GRD
+hove/ZR
+Howard/M
+howbeit
+howdah/M
+howdahs
+howdy/GSD
+Howell/MS
+Howe/M
+however
+Howey/M
+Howie/M
+howitzer/MS
+howler/M
+howl/GZSMDR
+Howrah/M
+how/SM
+howsoever
+hoyden/DMGS
+hoydenish
+Hoyle/SM
+hoy/M
+Hoyt/M
+hp
+HP
+HQ
+hr
+HR
+HRH
+Hrothgar/M
+hrs
+h's
+H's
+HS
+HST
+ht
+HTML
+Hts/M
+HTTP
+Huang/M
+huarache/SM
+hubba
+Hubbard/M
+Hubble/M
+hubbub/SM
+hubby/SM
+hubcap/SM
+Huber/M
+Hube/RM
+Hubert/M
+Huberto/M
+Hubey/M
+Hubie/M
+hub/MS
+hubris/SM
+huckleberry/SM
+Huck/M
+huckster/SGMD
+HUD
+Huddersfield/M
+huddler/M
+huddle/RSDMG
+Hudson/M
+hue/MDS
+Huerta/M
+Huey/M
+huffily
+huffiness/SM
+Huff/M
+Huffman/M
+huff/SGDM
+huffy/TRP
+hugeness/MS
+huge/YP
+hugged
+hugger
+hugging/S
+Huggins
+Hughie/M
+Hugh/MS
+Hugibert/M
+Hugo/M
+hug/RTS
+Huguenot/SM
+Hugues/M
+huh
+huhs
+Hui/M
+Huitzilopitchli/M
+hula/MDSG
+Hulda/M
+hulk/GDMS
+hullabaloo/SM
+huller/M
+hulling/M
+Hull/M
+hull/MDRGZS
+hullo/GSDM
+humane/IY
+humaneness/SM
+humaner
+humanest
+human/IPY
+humanism/SM
+humanistic
+humanist/SM
+humanitarianism/SM
+humanitarian/S
+humanity/ISM
+humanization/CSM
+humanized/C
+humanizer/M
+humanize/RSDZG
+humanizes/IAC
+humanizing/C
+humankind/M
+humannesses
+humanness/IM
+humanoid/S
+humans
+Humbert/M
+Humberto/M
+humbleness/SM
+humble/TZGPRSDJ
+humbly
+Humboldt/M
+humbugged
+humbugging
+humbug/MS
+humdinger/MS
+humdrum/S
+Hume/M
+humeral/S
+humeri
+humerus/M
+Humfrey/M
+Humfrid/M
+Humfried/M
+humidification/MC
+humidifier/CM
+humidify/RSDCXGNZ
+humidistat/M
+humidity/MS
+humidor/MS
+humid/Y
+humiliate/SDXNG
+humiliating/Y
+humiliation/M
+humility/MS
+hummed
+Hummel/M
+hummer/SM
+humming
+hummingbird/SM
+hummock/MDSG
+hummocky
+hummus/S
+humongous
+humored/U
+humorist/MS
+humorlessness/MS
+humorless/PY
+humorousness/MS
+humorous/YP
+humor/RDMZGS
+humpback/SMD
+hump/GSMD
+humph/DG
+Humphrey/SM
+humphs
+Humpty/M
+hum/S
+humus/SM
+Humvee
+hunchback/DSM
+hunch/GMSD
+hundredfold/S
+hundred/SHRM
+hundredths
+hundredweight/SM
+Hunfredo/M
+hung/A
+Hungarian/MS
+Hungary/M
+hunger/SDMG
+Hung/M
+hungover
+hungrily
+hungriness/SM
+hungry/RTP
+hunker/DG
+hunky/RST
+hunk/ZRMS
+Hun/MS
+hunter/M
+Hunter/M
+hunt/GZJDRS
+hunting/M
+Huntington/M
+Huntlee/M
+Huntley/M
+Hunt/MR
+huntress/MS
+huntsman/M
+huntsmen
+Huntsville/M
+hurdle/JMZGRSD
+hurdler/M
+hurl/DRGZJS
+Hurlee/M
+Hurleigh/M
+hurler/M
+Hurley/M
+hurling/M
+Huron/SM
+hurray/SDG
+hurricane/MS
+hurriedness/M
+hurried/UY
+hurry/RSDG
+Hurst/M
+hurter/M
+hurtfulness/MS
+hurtful/PY
+hurting/Y
+hurtle/SDG
+hurts
+hurt/U
+Hurwitz/M
+Hus
+Husain's
+husbander/M
+husband/GSDRYM
+husbandman/M
+husbandmen
+husbandry/SM
+Husein/M
+hush/DSG
+husker/M
+huskily
+huskiness/MS
+husking/M
+husk/SGZDRM
+husky/RSPT
+hussar/MS
+Hussein/M
+Husserl/M
+hussy/SM
+hustings/M
+hustler/M
+hustle/RSDZG
+Huston/M
+Hutchins/M
+Hutchinson/M
+Hutchison/M
+hutch/MSDG
+hut/MS
+hutted
+hutting
+Hutton/M
+Hutu/M
+Huxley/M
+Huygens/M
+huzzah/GD
+huzzahs
+hwy
+Hyacintha/M
+Hyacinthe/M
+Hyacinthia/M
+Hyacinthie/M
+hyacinth/M
+Hyacinth/M
+hyacinths
+Hyades
+hyaena's
+Hyannis/M
+Hyatt/M
+hybridism/SM
+hybridization/S
+hybridize/GSD
+hybrid/MS
+Hyde/M
+Hyderabad/M
+Hydra/M
+hydra/MS
+hydrangea/SM
+hydrant/SM
+hydrate/CSDNGX
+hydrate's
+hydration/MC
+hydraulically
+hydraulicked
+hydraulicking
+hydraulic/S
+hydraulics/M
+hydrazine/M
+hydride/MS
+hydrocarbon/SM
+hydrocephali
+hydrocephalus/MS
+hydrochemistry
+hydrochloric
+hydrochloride/M
+hydrodynamical
+hydrodynamic/S
+hydrodynamics/M
+hydroelectric
+hydroelectrically
+hydroelectricity/SM
+hydrofluoric
+hydrofoil/MS
+hydrogenate/CDSGN
+hydrogenate's
+hydrogenation/MC
+hydrogenations
+hydrogen/MS
+hydrogenous
+hydrological/Y
+hydrologist/MS
+hydrology/SM
+hydrolysis/M
+hydrolyzed/U
+hydrolyze/GSD
+hydromagnetic
+hydromechanics/M
+hydrometer/SM
+hydrometry/MS
+hydrophilic
+hydrophobia/SM
+hydrophobic
+hydrophone/SM
+hydroplane/DSGM
+hydroponic/S
+hydroponics/M
+hydro/SM
+hydrosphere/MS
+hydrostatic/S
+hydrostatics/M
+hydrotherapy/SM
+hydrothermal/Y
+hydrous
+hydroxide/MS
+hydroxy
+hydroxylate/N
+hydroxyl/SM
+hydroxyzine/M
+hyena/MS
+hygiene/MS
+hygienically
+hygienic/S
+hygienics/M
+hygienist/MS
+hygrometer/SM
+hygroscopic
+hying
+Hy/M
+Hyman/M
+hymeneal/S
+Hymen/M
+hymen/MS
+Hymie/M
+hymnal/SM
+hymnbook/S
+hymn/GSDM
+Hynda/M
+hype/MZGDSR
+hyperactive/S
+hyperactivity/SM
+hyperbola/MS
+hyperbole/MS
+hyperbolic
+hyperbolically
+hyperboloidal
+hyperboloid/SM
+hypercellularity
+hypercritical/Y
+hypercube/MS
+hyperemia/M
+hyperemic
+hyperfine
+hypergamous/Y
+hypergamy/M
+hyperglycemia/MS
+hyperinflation
+Hyperion/M
+hypermarket/SM
+hypermedia/S
+hyperplane/SM
+hyperplasia/M
+hypersensitiveness/MS
+hypersensitive/P
+hypersensitivity/MS
+hypersonic
+hyperspace/M
+hypersphere/M
+hypertension/MS
+hypertensive/S
+hypertext/SM
+hyperthyroid
+hyperthyroidism/MS
+hypertrophy/MSDG
+hypervelocity
+hyperventilate/XSDGN
+hyperventilation/M
+hyphenated/U
+hyphenate/NGXSD
+hyphenation/M
+hyphen/DMGS
+hypnoses
+hypnosis/M
+hypnotherapy/SM
+hypnotically
+hypnotic/S
+hypnotism/MS
+hypnotist/SM
+hypnotize/SDG
+hypoactive
+hypoallergenic
+hypocellularity
+hypochondriac/SM
+hypochondria/MS
+hypocrisy/SM
+hypocrite/MS
+hypocritical/Y
+hypodermic/S
+hypo/DMSG
+hypoglycemia/SM
+hypoglycemic/S
+hypophyseal
+hypophysectomized
+hypotenuse/MS
+hypothalami
+hypothalamic
+hypothalamically
+hypothalamus/M
+hypothermia/SM
+hypotheses
+hypothesis/M
+hypothesizer/M
+hypothesize/ZGRSD
+hypothetic
+hypothetical/Y
+hypothyroid
+hypothyroidism/SM
+hypoxia/M
+hyssop/MS
+hysterectomy/MS
+hysteresis/M
+hysteria/SM
+hysterical/YU
+hysteric/SM
+Hyundai/M
+Hz
+i
+I
+IA
+Iaccoca/M
+Iago/M
+Iain/M
+Ia/M
+iambi
+iambic/S
+iamb/MS
+iambus/SM
+Ian/M
+Ianthe/M
+Ibadan/M
+Ibbie/M
+Ibby/M
+Iberia/M
+Iberian/MS
+Ibero/M
+ibex/MS
+ibid
+ibidem
+ibis/SM
+IBM/M
+Ibo/M
+Ibrahim/M
+Ibsen/M
+ibuprofen/S
+Icarus/M
+ICBM/S
+ICC
+iceberg/SM
+iceboat/MS
+icebound
+icebox/MS
+icebreaker/SM
+icecap/SM
+ice/GDSC
+Icelander/M
+Icelandic
+Iceland/MRZ
+Ice/M
+iceman/M
+icemen
+icepack
+icepick/S
+ice's
+Ichabod/M
+ichneumon/M
+ichthyologist/MS
+ichthyology/MS
+icicle/SM
+icily
+iciness/SM
+icing/MS
+icky/RT
+iconic
+icon/MS
+iconoclasm/MS
+iconoclastic
+iconoclast/MS
+iconography/MS
+icosahedra
+icosahedral
+icosahedron/M
+ictus/SM
+ICU
+icy/RPT
+I'd
+ID
+Idahoan/S
+Idahoes
+Idaho/MS
+Idalia/M
+Idalina/M
+Idaline/M
+Ida/M
+idealism/MS
+idealistic
+idealistically
+idealist/MS
+idealization/MS
+idealized/U
+idealize/GDRSZ
+idealizer/M
+ideal/MYS
+idealogical
+idea/SM
+ideate/SN
+ideation/M
+Idelle/M
+Idell/M
+idem
+idempotent/S
+identicalness/M
+identical/YP
+identifiability
+identifiable/U
+identifiably
+identification/M
+identified/U
+identifier/M
+identify/XZNSRDG
+identity/SM
+ideogram/MS
+ideographic
+ideograph/M
+ideographs
+ideological/Y
+ideologist/SM
+ideologue/S
+ideology/SM
+ides
+Idette/M
+idiocy/MS
+idiolect/M
+idiomatically
+idiomatic/P
+idiom/MS
+idiopathic
+idiosyncrasy/SM
+idiosyncratic
+idiosyncratically
+idiotic
+idiotically
+idiot/MS
+idleness/MS
+idle/PZTGDSR
+idler/M
+id/MY
+idolater/MS
+idolatress/S
+idolatrous
+idolatry/SM
+idolization/SM
+idolized/U
+idolizer/M
+idolize/ZGDRS
+idol/MS
+ids
+IDs
+idyllic
+idyllically
+idyll/MS
+IE
+IEEE
+Ieyasu/M
+if
+iffiness/S
+iffy/TPR
+Ifni/M
+ifs
+Iggie/M
+Iggy/M
+igloo/MS
+Ignace/M
+Ignacio/M
+Ignacius/M
+Ignatius/M
+Ignazio/M
+Ignaz/M
+igneous
+ignitable
+ignite/ASDG
+igniter/M
+ignition/MS
+ignobleness/M
+ignoble/P
+ignobly
+ignominious/Y
+ignominy/MS
+ignoramus/SM
+ignorance/MS
+ignorantness/M
+ignorant/SPY
+ignorer/M
+ignore/SRDGB
+Igor/M
+iguana/MS
+Iguassu/M
+ii
+iii
+Ijsselmeer/M
+Ike/M
+Ikey/M
+Ikhnaton/M
+ikon's
+IL
+Ilaire/M
+Ila/M
+Ilario/M
+ilea
+Ileana/M
+Ileane/M
+ileitides
+ileitis/M
+Ilene/M
+ileum/M
+ilia
+iliac
+Iliad/MS
+Ilise/M
+ilium/M
+Ilka/M
+ilk/MS
+I'll
+Illa/M
+illegality/MS
+illegal/YS
+illegibility/MS
+illegible
+illegibly
+illegitimacy/SM
+illegitimate/SDGY
+illiberality/SM
+illiberal/Y
+illicitness/MS
+illicit/YP
+illimitableness/M
+illimitable/P
+Illinoisan/MS
+Illinois/M
+illiquid
+illiteracy/MS
+illiterateness/M
+illiterate/PSY
+Ill/M
+illness/MS
+illogicality/SM
+illogicalness/M
+illogical/PY
+illogic/M
+ill/PS
+illume/DG
+illuminate/XSDVNG
+Illuminati
+illuminatingly
+illuminating/U
+illumination/M
+illumine/BGSD
+illusionary
+illusion/ES
+illusionist/MS
+illusion's
+illusiveness/M
+illusive/PY
+illusoriness/M
+illusory/P
+illustrated/U
+illustrate/VGNSDX
+illustration/M
+illustrative/Y
+illustrator/SM
+illustriousness/SM
+illustrious/PY
+illus/V
+illy
+Ilona/M
+Ilsa/M
+Ilse/M
+Ilysa/M
+Ilyse/M
+Ilyssa/M
+Ilyushin/M
+I'm
+image/DSGM
+Imagen/M
+imagery/MS
+imaginableness
+imaginable/U
+imaginably/U
+imaginariness/M
+imaginary/PS
+imagination/MS
+imaginativeness/M
+imaginative/UY
+imagined/U
+imaginer/M
+imagine/RSDJBG
+imagoes
+imago/M
+imam/MS
+imbalance/SDM
+imbecile/YMS
+imbecilic
+imbecility/MS
+imbiber/M
+imbibe/ZRSDG
+imbrication/SM
+Imbrium/M
+imbroglio/MS
+imbruing
+imbue/GDS
+Imelda/M
+IMF
+IMHO
+imitable/I
+imitate/SDVNGX
+imitation/M
+imitativeness/MS
+imitative/YP
+imitator/SM
+immaculateness/SM
+immaculate/YP
+immanence/S
+immanency/MS
+immanent/Y
+Immanuel/M
+immateriality/MS
+immaterialness/MS
+immaterial/PY
+immatureness/M
+immature/SPY
+immaturity/MS
+immeasurableness/M
+immeasurable/P
+immeasurably
+immediacy/MS
+immediateness/SM
+immediate/YP
+immemorial/Y
+immenseness/M
+immense/PRTY
+immensity/MS
+immerse/RSDXNG
+immersible
+immersion/M
+immigrant/SM
+immigrate/NGSDX
+immigration/M
+imminence/SM
+imminentness/M
+imminent/YP
+immobile
+immobility/MS
+immobilization/MS
+immobilize/DSRG
+immoderateness/M
+immoderate/NYP
+immoderation/M
+immodest/Y
+immodesty/SM
+immolate/SDNGX
+immolation/M
+immorality/MS
+immoral/Y
+immortality/SM
+immortalized/U
+immortalize/GDS
+immortal/SY
+immovability/SM
+immovableness/M
+immovable/PS
+immovably
+immune/S
+immunity/SM
+immunization/MS
+immunize/GSD
+immunoassay/M
+immunodeficiency/S
+immunodeficient
+immunologic
+immunological/Y
+immunologist/SM
+immunology/MS
+immure/GSD
+immutability/MS
+immutableness/M
+immutable/P
+immutably
+IMNSHO
+IMO
+Imogene/M
+Imogen/M
+Imojean/M
+impaction/SM
+impactor/SM
+impact/VGMRDS
+impaired/U
+impairer/M
+impair/LGRDS
+impairment/SM
+impala/MS
+impale/GLRSD
+impalement/SM
+impaler/M
+impalpable
+impalpably
+impanel/DGS
+impartation/M
+impart/GDS
+impartiality/SM
+impartial/Y
+impassableness/M
+impassable/P
+impassably
+impasse/SXBMVN
+impassibility/SM
+impassible
+impassibly
+impassion/DG
+impassioned/U
+impassiveness/MS
+impassive/YP
+impassivity/MS
+impasto/SM
+impatience/SM
+impatiens/M
+impatient/Y
+impeachable/U
+impeach/DRSZGLB
+impeacher/M
+impeachment/MS
+impeccability/SM
+impeccable/S
+impeccably
+impecuniousness/MS
+impecunious/PY
+impedance/MS
+impeded/U
+impeder/M
+impede/S
+imped/GRD
+impedimenta
+impediment/SM
+impelled
+impeller/MS
+impelling
+impel/S
+impend/DGS
+impenetrability/MS
+impenetrableness/M
+impenetrable/P
+impenetrably
+impenitence/MS
+impenitent/YS
+imperativeness/M
+imperative/PSY
+imperceivable
+imperceptibility/MS
+imperceptible
+imperceptibly
+imperceptive
+imperf
+imperfectability
+imperfection/MS
+imperfectness/SM
+imperfect/YSVP
+imperialism/MS
+imperialistic
+imperialistically
+imperialist/SM
+imperial/YS
+imperil/GSLD
+imperilment/SM
+imperiousness/MS
+imperious/YP
+imperishableness/M
+imperishable/SP
+imperishably
+impermanence/MS
+impermanent/Y
+impermeability/SM
+impermeableness/M
+impermeable/P
+impermeably
+impermissible
+impersonality/M
+impersonalized
+impersonal/Y
+impersonate/XGNDS
+impersonation/M
+impersonator/SM
+impertinence/SM
+impertinent/YS
+imperturbability/SM
+imperturbable
+imperturbably
+imperviousness/M
+impervious/PY
+impetigo/MS
+impetuosity/MS
+impetuousness/MS
+impetuous/YP
+impetus/MS
+impiety/MS
+impinge/LS
+impingement/MS
+imping/GD
+impiousness/SM
+impious/PY
+impishness/MS
+impish/YP
+implacability/SM
+implacableness/M
+implacable/P
+implacably
+implantation/SM
+implant/BGSDR
+implanter/M
+implausibility/MS
+implausible
+implausibly
+implementability
+implementable/U
+implementation/A
+implementations
+implementation's
+implemented/AU
+implementer/M
+implementing/A
+implementor/MS
+implement/SMRDGZB
+implicant/SM
+implicate/VGSD
+implication/M
+implicative/PY
+implicitness/SM
+implicit/YP
+implied/Y
+implode/GSD
+implore/GSD
+imploring/Y
+implosion/SM
+implosive/S
+imply/GNSDX
+impoliteness/MS
+impolite/YP
+impoliticness/M
+impolitic/PY
+imponderableness/M
+imponderable/PS
+importance/SM
+important/Y
+importation/MS
+importer/M
+importing/A
+import/SZGBRD
+importunateness/M
+importunate/PYGDS
+importuner/M
+importune/SRDZYG
+importunity/SM
+imposable
+impose/ASDG
+imposer/SM
+imposingly
+imposing/U
+imposition/SM
+impossibility/SM
+impossibleness/M
+impossible/PS
+impossibly
+imposter's
+impostor/SM
+impost/SGMD
+imposture/SM
+impotence/MS
+impotency/S
+impotent/SY
+impound/GDS
+impoundments
+impoverisher/M
+impoverish/LGDRS
+impoverishment/SM
+impracticableness/M
+impracticable/P
+impracticably
+impracticality/SM
+impracticalness/M
+impractical/PY
+imprecate/NGXSD
+imprecation/M
+impreciseness/MS
+imprecise/PYXN
+imprecision/M
+impregnability/MS
+impregnableness/M
+impregnable/P
+impregnably
+impregnate/DSXNG
+impregnation/M
+impresario/SM
+impress/DRSGVL
+impressed/U
+impresser/M
+impressibility/MS
+impressible
+impressionability/SM
+impressionableness/M
+impressionable/P
+impression/BMS
+impressionism/SM
+impressionistic
+impressionist/MS
+impressiveness/MS
+impressive/YP
+impressment/M
+imprimatur/SM
+imprinter/M
+imprinting/M
+imprint/SZDRGM
+imprison/GLDS
+imprisonment/MS
+improbability/MS
+improbableness/M
+improbable/P
+improbably
+impromptu/S
+improperness/M
+improper/PY
+impropitious
+impropriety/SM
+improved/U
+improvement/MS
+improver/M
+improve/SRDGBL
+improvidence/SM
+improvident/Y
+improvisational
+improvisation/MS
+improvisatory
+improviser/M
+improvise/RSDZG
+imprudence/SM
+imprudent/Y
+imp/SGMDRY
+impudence/MS
+impudent/Y
+impugner/M
+impugn/SRDZGB
+impulse/XMVGNSD
+impulsion/M
+impulsiveness/MS
+impulsive/YP
+impunity/SM
+impureness/M
+impure/RPTY
+impurity/MS
+imputation/SM
+impute/SDBG
+Imus/M
+IN
+inaction
+inactive
+inadequate/S
+inadvertence/MS
+inadvertent/Y
+inalienability/MS
+inalienably
+inalterableness/M
+inalterable/P
+Ina/M
+inamorata/MS
+inane/SRPYT
+inanimateness/S
+inanimate/P
+inanity/MS
+inappeasable
+inappropriate/P
+inarticulate/P
+in/AS
+inasmuch
+inaugural/S
+inaugurate/XSDNG
+inauguration/M
+inauthenticity
+inbound/G
+inbred/S
+inbreed/JG
+incalculableness/M
+incalculably
+incandescence/SM
+incandescent/YS
+incant
+incantation/SM
+incantatory
+incapable/S
+incapacitate/GNSD
+incapacitation/M
+incarcerate/XGNDS
+incarceration/M
+incarnadine/GDS
+incarnate/AGSDNX
+incarnation/AM
+Inca/SM
+incendiary/S
+incense/MGDS
+incentive/ESM
+incentively
+incept/DGVS
+inception/MS
+inceptive/Y
+inceptor/M
+incessant/Y
+incest/SM
+incestuousness/MS
+incestuous/PY
+inch/GMDS
+inchoate/DSG
+Inchon/M
+inchworm/MS
+incidence/MS
+incidental/YS
+incident/SM
+incinerate/XNGSD
+incineration/M
+incinerator/SM
+incipience/SM
+incipiency/M
+incipient/Y
+incise/SDVGNX
+incision/M
+incisiveness/MS
+incisive/YP
+incisor/MS
+incitement/MS
+inciter/M
+incite/RZL
+incl
+inclination/ESM
+incline/EGSD
+incliner/M
+inclining/M
+include/GDS
+inclusion/MS
+inclusiveness/MS
+inclusive/PY
+Inc/M
+incognito/S
+incoherency/M
+income/M
+incommode/DG
+incommunicado
+incomparable
+incompetent/MS
+incomplete/P
+inconceivability/MS
+inconceivableness/M
+inconceivable/P
+incondensable
+incongruousness/S
+inconsiderableness/M
+inconsiderable/P
+inconsistence
+inconsolableness/M
+inconsolable/P
+inconsolably
+incontestability/SM
+incontestably
+incontrovertibly
+inconvenience/DG
+inconvertibility
+inconvertible
+incorporable
+incorporated/UE
+incorporate/GASDXN
+incorrect/P
+incorrigibility/MS
+incorrigibleness/M
+incorrigible/SP
+incorrigibly
+incorruptible/S
+incorruptibly
+increase/JB
+increaser/M
+increasing/Y
+incredibleness/M
+incredible/P
+incremental/Y
+incrementation
+increment/DMGS
+incriminate/XNGSD
+incrimination/M
+incriminatory
+incrustation/SM
+inc/T
+incubate/XNGVDS
+incubation/M
+incubator/MS
+incubus/MS
+inculcate/SDGNX
+inculcation/M
+inculpate/SDG
+incumbency/MS
+incumbent/S
+incunabula
+incunabulum
+incurable/S
+incurious
+incursion/SM
+ind
+indebtedness/SM
+indebted/P
+indefatigableness/M
+indefatigable/P
+indefatigably
+indefeasible
+indefeasibly
+indefinableness/M
+indefinable/PS
+indefinite/S
+indelible
+indelibly
+indemnification/M
+indemnify/NXSDG
+indemnity/SM
+indentation/SM
+indented/U
+indenter/M
+indention/SM
+indent/R
+indenture/DG
+Independence/M
+indescribableness/M
+indescribable/PS
+indescribably
+indestructibleness/M
+indestructible/P
+indestructibly
+indeterminably
+indeterminacy/MS
+indeterminism
+indexation/S
+indexer/M
+index/MRDZGB
+India/M
+Indiana/M
+Indianan/S
+Indianapolis/M
+Indianian/S
+Indian/SM
+indicant/MS
+indicate/DSNGVX
+indication/M
+indicative/SY
+indicator/MS
+indices's
+indicter/M
+indictment/SM
+indict/SGLBDR
+indifference
+indigence/MS
+indigenousness/M
+indigenous/YP
+indigent/SY
+indigestible/S
+indignant/Y
+indignation/MS
+indigo/SM
+Indira/M
+indirect/PG
+indiscreet/P
+indiscriminateness/M
+indiscriminate/PY
+indispensability/MS
+indispensableness/M
+indispensable/SP
+indispensably
+indisputableness/M
+indisputable/P
+indissolubleness/M
+indissoluble/P
+indissolubly
+indistinguishableness/M
+indistinguishable/P
+indite/SDG
+indium/SM
+individualism/MS
+individualistic
+individualistically
+individualist/MS
+individuality/MS
+individualization/SM
+individualize/DRSGZ
+individualized/U
+individualizer/M
+individualizes/U
+individualizing/Y
+individual/YMS
+individuate/DSXGN
+individuation/M
+indivisibleness/M
+indivisible/SP
+indivisibly
+Ind/M
+Indochina/M
+Indochinese
+indoctrinate/GNXSD
+indoctrination/M
+indoctrinator/SM
+indolence/SM
+indolent/Y
+indomitableness/M
+indomitable/P
+indomitably
+Indonesia/M
+Indonesian/S
+indoor
+Indore/M
+Indra/M
+indubitableness/M
+indubitable/P
+indubitably
+inducement/MS
+inducer/M
+induce/ZGLSRD
+inducible
+inductance/MS
+inductee/SM
+induct/GV
+induction/SM
+inductiveness/M
+inductive/PY
+inductor/MS
+indulge/GDRS
+indulgence/SDGM
+indulgent/Y
+indulger/M
+Indus/M
+industrialism/MS
+industrialist/MS
+industrialization/MS
+industrialized/U
+industrialize/SDG
+industrial/SY
+industriousness/SM
+industrious/YP
+industry/SM
+Indy/SM
+inebriate/NGSDX
+inebriation/M
+inedible
+ineducable
+ineffability/MS
+ineffableness/M
+ineffable/P
+ineffably
+inelastic
+ineligibly
+ineluctable
+ineluctably
+ineptitude/SM
+ineptness/MS
+inept/YP
+inequivalent
+inerrant
+inertial/Y
+inertia/SM
+inertness/MS
+inert/SPY
+Ines
+inescapably
+Inesita/M
+Inessa/M
+inestimably
+inevitability/MS
+inevitableness/M
+inevitable/P
+inevitably
+inexact/P
+inexhaustibleness/M
+inexhaustible/P
+inexhaustibly
+inexorability/M
+inexorableness/M
+inexorable/P
+inexorably
+inexpedience/M
+inexplicableness/M
+inexplicable/P
+inexplicably
+inexplicit
+inexpressibility/M
+inexpressibleness/M
+inexpressible/PS
+inextricably
+Inez/M
+infamous
+infamy/SM
+infancy/M
+infanticide/MS
+infantile
+infant/MS
+infantryman/M
+infantrymen
+infantry/SM
+infarction/SM
+infarct/SM
+infatuate/XNGSD
+infatuation/M
+infauna
+infected/U
+infecter
+infect/ESGDA
+infection/EASM
+infectiousness/MS
+infectious/PY
+infective
+infer/B
+inference/GMSR
+inferential/Y
+inferiority/MS
+inferior/SMY
+infernal/Y
+inferno/MS
+inferred
+inferring
+infertile
+infestation/MS
+infester/M
+infest/GSDR
+infidel/SM
+infighting/M
+infill/MG
+infiltrate/V
+infiltrator/MS
+infinitesimal/SY
+infinite/V
+infinitival
+infinitive/YMS
+infinitude/MS
+infinitum
+infinity/SM
+infirmary/SM
+infirmity/SM
+infix/M
+inflammableness/M
+inflammable/P
+inflammation/MS
+inflammatory
+inflatable/MS
+inflate/NGBDRSX
+inflater/M
+inflationary
+inflation/ESM
+inflect/GVDS
+inflectional/Y
+inflection/SM
+inflexibleness/M
+inflexible/P
+inflexion/SM
+inflict/DRSGV
+inflicter/M
+infliction/SM
+inflow/M
+influenced/U
+influencer/M
+influence/SRDGM
+influent
+influential/SY
+influenza/MS
+infomercial/S
+Informatica/M
+informatics
+informational
+information/ES
+informativeness/S
+informative/UY
+informatory
+informed/U
+informer/M
+info/SM
+infotainment/S
+infra
+infrared/SM
+infrasonic
+infrastructural
+infrastructure/MS
+infrequence/S
+infringe/LR
+infringement/SM
+infringer/M
+infuriate/GNYSD
+infuriating/Y
+infuriation/M
+infuser/M
+infuse/RZ
+infusibleness/M
+infusible/P
+inf/ZT
+Ingaberg/M
+Ingaborg/M
+Inga/M
+Ingamar/M
+Ingar/M
+Ingeberg/M
+Ingeborg/M
+Ingelbert/M
+Ingemar/M
+ingeniousness/MS
+ingenious/YP
+ingnue/S
+ingenuity/SM
+ingenuous/EY
+ingenuousness/MS
+Inger/M
+Inge/RM
+Ingersoll/M
+ingest/DGVS
+ingestible
+ingestion/SM
+Inglebert/M
+inglenook/MS
+Inglewood/M
+Inglis/M
+Ingmar/M
+ingoing
+ingot/SMDG
+ingrained/Y
+Ingra/M
+Ingram/M
+ingrate/M
+ingratiate/DSGNX
+ingratiating/Y
+ingratiation/M
+ingredient/SM
+Ingres/M
+ingression/M
+ingress/MS
+Ingrid/M
+Ingrim/M
+ingrown/P
+inguinal
+Ingunna/M
+inhabitable/U
+inhabitance
+inhabited/U
+inhabiter/M
+inhabit/R
+inhalant/S
+inhalation/SM
+inhalator/SM
+inhale/Z
+inhere/DG
+inherent/Y
+inheritableness/M
+inheritable/P
+inheritance/EMS
+inherit/BDSG
+inherited/E
+inheriting/E
+inheritor/S
+inheritress/MS
+inheritrix/MS
+inherits/E
+inhibit/DVGS
+inhibited/U
+inhibiter's
+inhibition/MS
+inhibitor/MS
+inhibitory
+inhomogeneous
+inhospitableness/M
+inhospitable/P
+inhospitality
+Inigo/M
+inimical/Y
+inimitableness/M
+inimitable/P
+inimitably
+inion
+iniquitousness/M
+iniquitous/PY
+iniquity/MS
+initialer/M
+initial/GSPRDY
+initialization/A
+initializations
+initialization's
+initialize/ASDG
+initialized/U
+initializer/S
+initiates
+initiate/UD
+initiating
+initiation/SM
+initiative/SM
+initiator/MS
+initiatory
+injectable/U
+inject/GVSDB
+injection/MS
+injector/SM
+injunctive
+injured/U
+injurer/M
+injure/SRDZG
+injuriousness/M
+injurious/YP
+inkblot/SM
+inker/M
+inkiness/MS
+inkling/SM
+inkstand/SM
+inkwell/SM
+inky/TP
+ink/ZDRJ
+inland
+inlander/M
+inlay/RG
+inletting
+inly/G
+inmost
+Inna/M
+innards
+innateness/SM
+innate/YP
+innermost/S
+innersole/S
+innerspring
+innervate/GNSDX
+innervation/M
+inner/Y
+inning/M
+Innis/M
+innkeeper/MS
+innocence/SM
+Innocent/M
+innocent/SYRT
+innocuousness/MS
+innocuous/PY
+innovate/SDVNGX
+innovation/M
+innovative/P
+innovator/MS
+innovatory
+Innsbruck/M
+innuendo/MDGS
+innumerability/M
+innumerableness/M
+innumerable/P
+innumerably
+innumerate
+inn/ZGDRSJ
+inoculate/ASDG
+inoculation/MS
+inoculative
+inoffensive/P
+Inonu/M
+inopportuneness/M
+inopportune/P
+inordinateness/M
+inordinate/PY
+inorganic
+inpatient
+In/PM
+input/MRDG
+inquirer/M
+inquire/ZR
+inquiring/Y
+inquiry/MS
+inquisitional
+inquisition/MS
+Inquisition/MS
+inquisitiveness/MS
+inquisitive/YP
+inquisitorial/Y
+inquisitor/MS
+INRI
+inrush/M
+ins
+INS
+insalubrious
+insanitary
+insatiability/MS
+insatiableness/M
+insatiable/P
+insatiably
+inscribe/Z
+inscription/SM
+inscrutability/SM
+inscrutableness/SM
+inscrutable/P
+inscrutably
+inseam
+insecticidal
+insecticide/MS
+insectivore/SM
+insectivorous
+insecureness/M
+insecure/P
+inseminate/NGXSD
+insemination/M
+insensateness/M
+insensate/P
+insensible/P
+insentient
+inseparable/S
+insert/ADSG
+inserter/M
+insertion/AMS
+insetting
+inshore
+insider/M
+inside/Z
+insidiousness/MS
+insidious/YP
+insightful/Y
+insigne's
+insignia/SM
+insignificant
+insinuate/VNGXSD
+insinuating/Y
+insinuation/M
+insinuator/SM
+insipidity/MS
+insipid/Y
+insistence/SM
+insistent/Y
+insisting/Y
+insist/SGD
+insociable
+insofar
+insole/M
+insolence/SM
+insolent/YS
+insolubleness/M
+insoluble/P
+insolubly
+insomniac/S
+insomnia/MS
+insomuch
+insouciance/SM
+insouciant/Y
+inspect/AGSD
+inspection/SM
+inspective
+inspectorate/MS
+inspector/SM
+inspirational/Y
+inspiration/MS
+inspired/U
+inspire/R
+inspirer/M
+inspiring/U
+inspirit/DG
+Inst
+installable
+install/ADRSG
+installation/SM
+installer/MS
+installment/MS
+instance/GD
+instantaneousness/M
+instantaneous/PY
+instantiated/U
+instantiate/SDXNG
+instantiation/M
+instant/SRYMP
+instate/AGSD
+inst/B
+instead
+instigate/XSDVGN
+instigation/M
+instigator/SM
+instillation/SM
+instinctive/Y
+instinctual
+instinct/VMS
+instituter/M
+institutes/M
+institute/ZXVGNSRD
+institutionalism/M
+institutionalist/M
+institutionalization/SM
+institutionalize/GDS
+institutional/Y
+institution/AM
+institutor's
+instr
+instruct/DSVG
+instructed/U
+instructional
+instruction/MS
+instructiveness/M
+instructive/PY
+instructor/MS
+instrumentalist/MS
+instrumentality/SM
+instrumental/SY
+instrumentation/SM
+instrument/GMDS
+insubordinate
+insubstantial
+insufferable
+insufferably
+insularity/MS
+insular/YS
+insulate/DSXNG
+insulated/U
+insulation/M
+insulator/MS
+insulin/MS
+insult/DRSG
+insulter/M
+insulting/Y
+insuperable
+insuperably
+insupportableness/M
+insupportable/P
+insurance/MS
+insurance's/A
+insure/BZGS
+insured/S
+insurer/M
+insurgence/SM
+insurgency/MS
+insurgent/MS
+insurmountably
+insurrectionist/SM
+insurrection/SM
+intactness/M
+intact/P
+intaglio/GMDS
+intake/M
+intangible/M
+integer/MS
+integrability/M
+integrable
+integral/SYM
+integrand/MS
+integrate/AGNXEDS
+integration/EMA
+integrative/E
+integrator/MS
+integrity/SM
+integument/SM
+intellective/Y
+intellect/MVS
+intellectualism/MS
+intellectuality/M
+intellectualize/GSD
+intellectualness/M
+intellectual/YPS
+intelligence/MSR
+intelligencer/M
+intelligentsia/MS
+intelligent/UY
+intelligibilities
+intelligibility/UM
+intelligibleness/MU
+intelligible/PU
+intelligibly/U
+Intel/M
+Intelsat/M
+intemperate/P
+intendant/MS
+intendedness/M
+intended/SYP
+intender/M
+intensification/M
+intensifier/M
+intensify/GXNZRSD
+intensional/Y
+intensiveness/MS
+intensive/PSY
+intentionality/M
+intentional/UY
+intention/SDM
+intentness/SM
+intent/YP
+interaction/MS
+interactive/PY
+interactivity
+interact/VGDS
+interaxial
+interbank
+interbred
+interbreed/GS
+intercalate/GNVDS
+intercalation/M
+intercase
+intercaste
+interceder/M
+intercede/SRDG
+intercensal
+intercept/DGS
+interception/MS
+interceptor/MS
+intercession/MS
+intercessor/SM
+intercessory
+interchangeability/M
+interchangeableness/M
+interchangeable/P
+interchangeably
+interchange/DSRGJ
+interchanger/M
+intercity
+interclass
+intercohort
+intercollegiate
+intercommunicate/SDXNG
+intercommunication/M
+intercom/SM
+interconnectedness/M
+interconnected/P
+interconnect/GDS
+interconnection/SM
+interconnectivity
+intercontinental
+interconversion/M
+intercorrelated
+intercourse/SM
+Interdata/M
+interdenominational
+interdepartmental/Y
+interdependence/MS
+interdependency/SM
+interdependent/Y
+interdiction/MS
+interdict/MDVGS
+interdisciplinary
+interested/UYE
+interest/GEMDS
+interestingly/U
+interestingness/M
+interesting/YP
+inter/ESTL
+interface/SRDGM
+interfacing/M
+interfaith
+interference/MS
+interferer/M
+interfere/SRDG
+interfering/Y
+interferometer/SM
+interferometric
+interferometry/M
+interferon/MS
+interfile/GSD
+intergalactic
+intergenerational
+intergeneration/M
+interglacial
+intergovernmental
+intergroup
+interim/S
+interindex
+interindustry
+interior/SMY
+interj
+interject/GDS
+interjectional
+interjection/MS
+interlace/GSD
+interlard/SGD
+interlayer/G
+interleave/SDG
+interleukin/S
+interlibrary
+interlinear/S
+interline/JGSD
+interlingual
+interlingua/M
+interlining/M
+interlink/GDS
+interlisp/M
+interlobular
+interlocker/M
+interlock/RDSG
+interlocutor/MS
+interlocutory
+interlope/GZSRD
+interloper/M
+interlude/MSDG
+intermarriage/MS
+intermarry/GDS
+intermediary/MS
+intermediateness/M
+intermediate/YMNGSDP
+intermediation/M
+interment/SME
+intermeshed
+intermetrics
+intermezzi
+intermezzo/SM
+interminably
+intermingle/DSG
+intermission/MS
+intermittent/Y
+intermix/GSRD
+intermodule
+intermolecular/Y
+internalization/SM
+internalize/GDS
+internal/SY
+Internationale/M
+internationalism/SM
+internationalist/SM
+internationality/M
+internationalization/MS
+internationalize/DSG
+international/YS
+internecine
+internee/SM
+interne's
+Internet/M
+INTERNET/M
+internetwork
+internist/SM
+intern/L
+internment/SM
+internship/MS
+internuclear
+interocular
+interoffice
+interoperability
+interpenetrates
+interpersonal/Y
+interplanetary
+interplay/GSMD
+interpol
+interpolate/XGNVBDS
+interpolation/M
+Interpol/M
+interpose/GSRD
+interposer/M
+interposition/MS
+interpretable/U
+interpret/AGSD
+interpretation/MSA
+interpretative/Y
+interpreted/U
+interpreter/SM
+interpretive/Y
+interpretor/S
+interprocess
+interprocessor
+interquartile
+interracial
+interred/E
+interregional
+interregnum/MS
+interrelatedness/M
+interrelated/PY
+interrelate/GNDSX
+interrelation/M
+interrelationship/SM
+interring/E
+interrogate/DSXGNV
+interrogation/M
+interrogative/SY
+interrogator/SM
+interrogatory/S
+interrupted/U
+interrupter/M
+interruptibility
+interruptible
+interruption/MS
+interrupt/VGZRDS
+interscholastic
+intersect/GDS
+intersection/MS
+intersession/MS
+interspecies
+intersperse/GNDSX
+interspersion/M
+interstage
+interstate/S
+interstellar
+interstice/SM
+interstitial/SY
+intersurvey
+intertask
+intertwine/GSD
+interurban/S
+interval/MS
+intervene/GSRD
+intervener/M
+intervenor/M
+interventionism/MS
+interventionist/S
+intervention/MS
+interview/AMD
+interviewed/U
+interviewee/SM
+interviewer/SM
+interviewing
+interviews
+intervocalic
+interweave/GS
+interwove
+interwoven
+intestacy/SM
+intestinal/Y
+intestine/SM
+inti
+intifada
+intimacy/SM
+intimal
+intimateness/M
+intimater/M
+intimate/XYNGPDRS
+intimation/M
+intimidate/SDXNG
+intimidating/Y
+intimidation/M
+into
+intolerableness/M
+intolerable/P
+intolerant/PS
+intonate/NX
+intonation/M
+intoxicant/MS
+intoxicate/DSGNX
+intoxicated/Y
+intoxication/M
+intra
+intracellular
+intracity
+intraclass
+intracohort
+intractability/M
+intractableness/M
+intractable/P
+intradepartmental
+intrafamily
+intragenerational
+intraindustry
+intraline
+intrametropolitan
+intramural/Y
+intramuscular/Y
+intranasal
+intransigence/MS
+intransigent/YS
+intransitive/S
+intraoffice
+intraprocess
+intrapulmonary
+intraregional
+intrasectoral
+intrastate
+intratissue
+intrauterine
+intravenous/YS
+intrepidity/SM
+intrepidness/M
+intrepid/YP
+intricacy/SM
+intricateness/M
+intricate/PY
+intrigue/DRSZG
+intriguer/M
+intriguing/Y
+intrinsically
+intrinsic/S
+introduce/ADSG
+introducer/M
+introduction/ASM
+introductory
+introit/SM
+introject/SD
+intro/S
+introspection/MS
+introspectiveness/M
+introspective/YP
+introspect/SGVD
+introversion/SM
+introvert/SMDG
+intruder/M
+intrude/ZGDSR
+intrusion/SM
+intrusiveness/MS
+intrusive/SYP
+intubate/NGDS
+intubation/M
+intuit/GVDSB
+intuitionist/M
+intuitiveness/MS
+intuitive/YP
+int/ZR
+Inuit/MS
+inundate/SXNG
+inundation/M
+inure/GDS
+invader/M
+invade/ZSRDG
+invalid/GSDM
+invalidism/MS
+invariable/P
+invariant/M
+invasion/SM
+invasive/P
+invectiveness/M
+invective/PSMY
+inveigh/DRG
+inveigher/M
+inveighs
+inveigle/DRSZG
+inveigler/M
+invent/ADGS
+invented/U
+invention/ASM
+inventiveness/MS
+inventive/YP
+inventor/MS
+inventory/SDMG
+Inverness/M
+inverse/YV
+inverter/M
+invertible
+invert/ZSGDR
+invest/ADSLG
+investigate/XDSNGV
+investigation/MA
+investigator/MS
+investigatory
+investiture/SM
+investment/ESA
+investment's/A
+investor/SM
+inveteracy/MS
+inveterate/Y
+inviability
+invidiousness/MS
+invidious/YP
+invigilate/GD
+invigilator/SM
+invigorate/ANGSD
+invigorating/Y
+invigoration/AM
+invigorations
+invincibility/SM
+invincibleness/M
+invincible/P
+invincibly
+inviolability/MS
+inviolably
+inviolateness/M
+inviolate/YP
+inviscid
+invisibleness/M
+invisible/S
+invitational/S
+invitation/MS
+invited/U
+invitee/S
+inviter/M
+invite/SRDG
+inviting/Y
+invocable
+invocate
+invoked/A
+invoke/GSRDBZ
+invoker/M
+invokes/A
+involuntariness/S
+involuntary/P
+involute/XYN
+involution/M
+involutorial
+involvedly
+involved/U
+involve/GDSRL
+involvement/SM
+involver/M
+invulnerability/M
+invulnerableness/M
+inwardness/M
+inward/PY
+ioctl
+iodate/MGND
+iodation/M
+iodide/MS
+iodinate/DNG
+iodine/MS
+iodize/GSD
+Iolande/M
+Iolanthe/M
+Io/M
+Iona/M
+Ionesco/M
+Ionian/M
+ionic/S
+Ionic/S
+ionization's
+ionization/SU
+ionized/UC
+ionize/GNSRDJXZ
+ionizer's
+ionizer/US
+ionizes/U
+ionizing/U
+ionosphere/SM
+ionospheric
+ion's/I
+ion/SMU
+Iorgo/MS
+Iormina/M
+Iosep/M
+iota/SM
+IOU
+Iowan/S
+Iowa/SM
+IPA
+ipecac/MS
+Iphigenia/M
+ipso
+Ipswich/M
+IQ
+Iqbal/M
+Iquitos/M
+Ira/M
+Iranian/MS
+Iran/M
+Iraqi/SM
+Iraq/M
+IRA/S
+irascibility/SM
+irascible
+irascibly
+irateness/S
+irate/RPYT
+ireful
+Ireland/M
+ire/MGDS
+Irena/M
+Irene/M
+irenic/S
+iridescence/SM
+iridescent/Y
+irides/M
+iridium/MS
+irids
+Irina/M
+Iris
+iris/GDSM
+Irishman/M
+Irishmen
+Irish/R
+Irishwoman/M
+Irishwomen
+Irita/M
+irk/GDS
+irksomeness/SM
+irksome/YP
+Irkutsk/M
+Ir/M
+Irma/M
+ironclad/S
+iron/DRMPSGJ
+ironer/M
+ironic
+ironicalness/M
+ironical/YP
+ironing/M
+ironmonger/M
+ironmongery/M
+ironside/MS
+ironstone/MS
+ironware/SM
+ironwood/SM
+ironworker/M
+ironwork/MRS
+irony/SM
+Iroquoian/MS
+Iroquois/M
+irradiate/XSDVNG
+irradiation/M
+irrationality/MS
+irrationalness/M
+irrational/YSP
+Irrawaddy/M
+irreclaimable
+irreconcilability/MS
+irreconcilableness/M
+irreconcilable/PS
+irreconcilably
+irrecoverableness/M
+irrecoverable/P
+irrecoverably
+irredeemable/S
+irredeemably
+irredentism/M
+irredentist/M
+irreducibility/M
+irreducible
+irreducibly
+irreflexive
+irrefutable
+irrefutably
+irregardless
+irregularity/SM
+irregular/YS
+irrelevance/SM
+irrelevancy/MS
+irrelevant/Y
+irreligious
+irremediableness/M
+irremediable/P
+irremediably
+irremovable
+irreparableness/M
+irreparable/P
+irreparably
+irreplaceable/P
+irrepressible
+irrepressibly
+irreproachableness/M
+irreproachable/P
+irreproachably
+irreproducibility
+irreproducible
+irresistibility/M
+irresistibleness/M
+irresistible/P
+irresistibly
+irresoluteness/SM
+irresolute/PNXY
+irresolution/M
+irresolvable
+irrespective/Y
+irresponsibility/SM
+irresponsibleness/M
+irresponsible/PS
+irresponsibly
+irretrievable
+irretrievably
+irreverence/MS
+irreverent/Y
+irreversible
+irreversibly
+irrevocableness/M
+irrevocable/P
+irrevocably
+irrigable
+irrigate/DSXNG
+irrigation/M
+irritability/MS
+irritableness/M
+irritable/P
+irritably
+irritant/S
+irritate/DSXNGV
+irritated/Y
+irritating/Y
+irritation/M
+irrupt/GVSD
+irruption/SM
+IRS
+Irtish/M
+Irvine/M
+Irving/M
+Irvin/M
+Irv/MG
+Irwin/M
+Irwinn/M
+is
+i's
+Isaac/SM
+Isaak/M
+Isabelita/M
+Isabella/M
+Isabelle/M
+Isabel/M
+Isacco/M
+Isac/M
+Isadora/M
+Isadore/M
+Isador/M
+Isahella/M
+Isaiah/M
+Isak/M
+Isa/M
+ISBN
+Iscariot/M
+Iseabal/M
+Isfahan/M
+Isherwood/M
+Ishim/M
+Ishmael/M
+Ishtar/M
+Isiahi/M
+Isiah/M
+Isidora/M
+Isidore/M
+Isidor/M
+Isidoro/M
+Isidro/M
+isinglass/MS
+Isis/M
+Islamabad/M
+Islamic/S
+Islam/SM
+islander/M
+island/GZMRDS
+Islandia/M
+isle/MS
+islet/SM
+isl/GD
+Ismael/M
+ism/MCS
+isn't
+ISO
+isobaric
+isobar/MS
+Isobel/M
+isochronal/Y
+isochronous/Y
+isocline/M
+isocyanate/M
+isodine
+isolate/SDXNG
+isolationism/SM
+isolationistic
+isolationist/SM
+isolation/M
+isolator/MS
+Isolde/M
+isomeric
+isomerism/SM
+isomer/SM
+isometrically
+isometric/S
+isometrics/M
+isomorphic
+isomorphically
+isomorphism/MS
+isomorph/M
+isoperimetrical
+isopleth/M
+isopleths
+isosceles
+isostatic
+isothermal/Y
+isotherm/MS
+isotonic
+isotope/SM
+isotopic
+isotropic
+isotropically
+isotropy/M
+Ispahan's
+ispell/M
+Ispell/M
+Israeli/MS
+Israelite/SM
+Israel/MS
+Issac/M
+Issiah/M
+Issie/M
+Issi/M
+issuable
+issuance/MS
+issuant
+issued/A
+issue/GMZDSR
+issuer/AMS
+issues/A
+issuing/A
+Issy/M
+Istanbul/M
+isthmian/S
+isthmus/SM
+Istvan/M
+Isuzu/M
+It
+IT
+Itaipu/M
+ital
+Italianate/GSD
+Italian/MS
+italicization/MS
+italicized/U
+italicize/GSD
+italic/S
+Ital/M
+Italy/M
+Itasca/M
+itch/GMDS
+itchiness/MS
+Itch/M
+itchy/RTP
+ITcorp/M
+ITCorp/M
+it'd
+Itel/M
+itemization/SM
+itemized/U
+itemize/GZDRS
+itemizer/M
+itemizes/A
+item/MDSG
+iterate/ASDXVGN
+iteration/M
+iterative/YA
+iterator/MS
+Ithaca/M
+Ithacan
+itinerant/SY
+itinerary/MS
+it'll
+it/MUS
+Ito/M
+its
+itself
+ITT
+IUD/S
+IV
+Iva/M
+Ivanhoe/M
+Ivan/M
+Ivar/M
+I've
+Ive/MRS
+Iver/M
+Ivette/M
+Ivett/M
+Ivie/M
+iv/M
+Ivonne/M
+Ivor/M
+Ivory/M
+ivory/SM
+IVs
+Ivy/M
+ivy/MDS
+ix
+Izaak/M
+Izabel/M
+Izak/M
+Izanagi/M
+Izanami/M
+Izhevsk/M
+Izmir/M
+Izvestia/M
+Izzy/M
+jabbed
+jabberer/M
+jabber/JRDSZG
+jabbing
+Jabez/M
+Jablonsky/M
+jabot/MS
+jab/SM
+jacaranda/MS
+Jacenta/M
+Jacinda/M
+Jacinta/M
+Jacintha/M
+Jacinthe/M
+jackal/SM
+jackass/SM
+jackboot/DMS
+jackdaw/SM
+Jackelyn/M
+jacketed/U
+jacket/GSMD
+jack/GDRMS
+jackhammer/MDGS
+Jackie/M
+Jacki/M
+jackknife/MGSD
+jackknives
+Jacklin/M
+Jacklyn/M
+Jack/M
+Jackman/M
+jackpot/MS
+Jackqueline/M
+Jackquelin/M
+jackrabbit/DGS
+Jacksonian
+Jackson/SM
+Jacksonville/M
+jackstraw/MS
+Jacky/M
+Jaclin/M
+Jaclyn/M
+Jacobean
+Jacobian/M
+Jacobi/M
+Jacobin/M
+Jacobite/M
+Jacobo/M
+Jacobsen/M
+Jacob/SM
+Jacobs/N
+Jacobson/M
+Jacobus
+Jacoby/M
+jacquard/MS
+Jacquard/SM
+Jacqueline/M
+Jacquelin/M
+Jacquelyn/M
+Jacquelynn/M
+Jacquenetta/M
+Jacquenette/M
+Jacques/M
+Jacquetta/M
+Jacquette/M
+Jacquie/M
+Jacqui/M
+jacuzzi
+Jacuzzi/S
+Jacynth/M
+Jada/M
+jadedness/SM
+jaded/PY
+jadeite/SM
+Jade/M
+jade/MGDS
+Jaeger/M
+Jae/M
+jaggedness/SM
+jagged/RYTP
+Jagger/M
+jaggers
+jagging
+jag/S
+jaguar/MS
+jailbird/MS
+jailbreak/SM
+jailer/M
+jail/GZSMDR
+Jaime/M
+Jaimie/M
+Jaine/M
+Jainism/M
+Jain/M
+Jaipur/M
+Jakarta/M
+Jake/MS
+Jakie/M
+Jakob/M
+jalapeo/S
+jalopy/SM
+jalousie/MS
+Jamaal/M
+Jamaica/M
+Jamaican/S
+Jamal/M
+Jamar/M
+jambalaya/MS
+jamb/DMGS
+jamboree/MS
+Jamel/M
+Jame/MS
+Jameson/M
+Jamestown/M
+Jamesy/M
+Jamey/M
+Jamie/M
+Jamill/M
+Jamil/M
+Jami/M
+Jamima/M
+Jamison/M
+Jammal/M
+jammed/U
+Jammie/M
+jamming/U
+jam/SM
+Janacek/M
+Jana/M
+Janaya/M
+Janaye/M
+Jandy/M
+Janean/M
+Janeczka/M
+Janeen/M
+Janeiro/M
+Janek/M
+Janela/M
+Janella/M
+Janelle/M
+Janell/M
+Janel/M
+Jane/M
+Janene/M
+Janenna/M
+Janessa/M
+Janesville/M
+Janeta/M
+Janet/M
+Janetta/M
+Janette/M
+Janeva/M
+Janey/M
+jangler/M
+jangle/RSDGZ
+jangly
+Jania/M
+Janice/M
+Janie/M
+Janifer/M
+Janina/M
+Janine/M
+Janis/M
+janissary/MS
+Janith/M
+janitorial
+janitor/SM
+Janka/M
+Jan/M
+Janna/M
+Jannelle/M
+Jannel/M
+Jannie/M
+Janos/M
+Janot/M
+Jansenist/M
+Jansen/M
+January/MS
+Janus/M
+Jany/M
+Japanese/SM
+Japan/M
+japanned
+japanner
+japanning
+japan/SM
+jape/DSMG
+Japura/M
+Jaquelin/M
+Jaquelyn/M
+Jaquenetta/M
+Jaquenette/M
+Jaquith/M
+Jarad/M
+jardinire/MS
+Jard/M
+Jareb/M
+Jared/M
+jarful/S
+jargon/SGDM
+Jarib/M
+Jarid/M
+Jarlsberg
+jar/MS
+Jarrad/M
+jarred
+Jarred/M
+Jarret/M
+Jarrett/M
+Jarrid/M
+jarring/SY
+Jarrod/M
+Jarvis/M
+Jase/M
+Jasen/M
+Jasmina/M
+Jasmine/M
+jasmine/MS
+Jasmin/M
+Jason/M
+Jasper/M
+jasper/MS
+Jastrow/M
+Jasun/M
+jato/SM
+jaundice/DSMG
+jaundiced/U
+jauntily
+jauntiness/MS
+jaunt/MDGS
+jaunty/SRTP
+Javanese
+Java/SM
+javelin/SDMG
+Javier/M
+jawbone/SDMG
+jawbreaker/SM
+jawline
+jaw/SMDG
+Jaxartes/M
+Jayapura/M
+jaybird/SM
+Jaycee/SM
+Jaye/M
+Jay/M
+Jaymee/M
+Jayme/M
+Jaymie/M
+Jaynell/M
+Jayne/M
+jay/SM
+Jayson/M
+jaywalker/M
+jaywalk/JSRDZG
+Jazmin/M
+jazziness/M
+jazzmen
+jazz/MGDS
+jazzy/PTR
+JCS
+jct
+JD
+Jdavie/M
+jealousness/M
+jealous/PY
+jealousy/MS
+Jeana/M
+Jeanelle/M
+Jeane/M
+Jeanette/M
+Jeanie/M
+Jeanine/M
+Jean/M
+jean/MS
+Jeanna/M
+Jeanne/M
+Jeannette/M
+Jeannie/M
+Jeannine/M
+Jecho/M
+Jedd/M
+Jeddy/M
+Jedediah/M
+Jedidiah/M
+Jedi/M
+Jed/M
+jeep/GZSMD
+Jeep/S
+jeerer/M
+jeering/Y
+jeer/SJDRMG
+Jeeves/M
+jeez
+Jefferey/M
+Jeffersonian/S
+Jefferson/M
+Jeffery/M
+Jeffie/M
+Jeff/M
+Jeffrey/SM
+Jeffry/M
+Jeffy/M
+jehad's
+Jehanna/M
+Jehoshaphat/M
+Jehovah/M
+Jehu/M
+jejuna
+jejuneness/M
+jejune/PY
+jejunum/M
+Jekyll/M
+Jelene/M
+jell/GSD
+Jello/M
+jello's
+jellybean/SM
+jellyfish/MS
+jellying/M
+jellylike
+jellyroll/S
+jelly/SDMG
+Jemie/M
+Jemimah/M
+Jemima/M
+Jemmie/M
+jemmy/M
+Jemmy/M
+Jena/M
+Jenda/M
+Jenelle/M
+Jenica/M
+Jeniece/M
+Jenifer/M
+Jeniffer/M
+Jenilee/M
+Jeni/M
+Jenine/M
+Jenkins/M
+Jen/M
+Jenna/M
+Jennee/M
+Jenner/M
+jennet/SM
+Jennette/M
+Jennica/M
+Jennie/M
+Jennifer/M
+Jennilee/M
+Jenni/M
+Jennine/M
+Jennings/M
+Jenn/RMJ
+Jenny/M
+jenny/SM
+Jeno/M
+Jensen/M
+Jens/N
+jeopard
+jeopardize/GSD
+jeopardy/MS
+Jephthah/M
+Jerad/M
+Jerald/M
+Jeralee/M
+Jeramey/M
+Jeramie/M
+Jere/M
+Jereme/M
+jeremiad/SM
+Jeremiah/M
+Jeremiahs
+Jeremias/M
+Jeremie/M
+Jeremy/M
+Jericho/M
+Jeri/M
+jerker/M
+jerk/GSDRJ
+jerkily
+jerkiness/SM
+jerkin/SM
+jerkwater/S
+jerky/RSTP
+Jermaine/M
+Jermain/M
+Jermayne/M
+Jeroboam/M
+Jerold/M
+Jerome/M
+Jeromy/M
+Jerrie/M
+Jerrilee/M
+Jerrilyn/M
+Jerri/M
+Jerrine/M
+Jerrod/M
+Jerrold/M
+Jerrome/M
+jerrybuilt
+Jerrylee/M
+jerry/M
+Jerry/M
+jersey/MS
+Jersey/MS
+Jerusalem/M
+Jervis/M
+Jes
+Jessalin/M
+Jessalyn/M
+Jessa/M
+Jessamine/M
+jessamine's
+Jessamyn/M
+Jessee/M
+Jesselyn/M
+Jesse/M
+Jessey/M
+Jessica/M
+Jessie/M
+Jessika/M
+Jessi/M
+jess/M
+Jess/M
+Jessy/M
+jest/DRSGZM
+jester/M
+jesting/Y
+Jesuit/SM
+Jesus
+Jeth/M
+Jethro/M
+jetliner/MS
+jet/MS
+jetport/SM
+jetsam/MS
+jetted/M
+jetting/M
+jettison/DSG
+jetty/RSDGMT
+jeweler/M
+jewelery/S
+jewel/GZMRDS
+Jewelled/M
+Jewelle/M
+jewellery's
+Jewell/MD
+Jewel/M
+jewelry/MS
+Jewess/SM
+Jewishness/MS
+Jewish/P
+Jew/MS
+Jewry/MS
+Jezebel/MS
+j/F
+JFK/M
+jg/M
+jibbed
+jibbing
+jibe/S
+jib/MDSG
+Jidda/M
+jiff/S
+jiffy/SM
+jigged
+jigger/SDMG
+jigging/M
+jiggle/SDG
+jiggly/TR
+jig/MS
+jigsaw/GSDM
+jihad/SM
+Jilin
+Jillana/M
+Jillane/M
+Jillayne/M
+Jilleen/M
+Jillene/M
+Jillian/M
+Jillie/M
+Jilli/M
+Jill/M
+Jilly/M
+jilt/DRGS
+jilter/M
+Jimenez/M
+Jim/M
+Jimmie/M
+jimmy/GSDM
+Jimmy/M
+jimsonweed/S
+Jinan
+jingler/M
+jingle/RSDG
+jingly/TR
+jingoism/SM
+jingoistic
+jingoist/SM
+jingo/M
+Jinnah/M
+jinni's
+jinn/MS
+Jinny/M
+jinrikisha/SM
+jinx/GMDS
+jitney/MS
+jitterbugged
+jitterbugger
+jitterbugging
+jitterbug/SM
+jitter/S
+jittery/TR
+jiujitsu's
+Jivaro/M
+jive/MGDS
+Joachim/M
+Joana/M
+Joane/M
+Joanie/M
+Joan/M
+Joanna/M
+Joanne/SM
+Joann/M
+Joaquin/M
+jobbed
+jobber/MS
+jobbery/M
+jobbing/M
+Jobey/M
+jobholder/SM
+Jobie/M
+Jobi/M
+Jobina/M
+joblessness/MS
+jobless/P
+Jobrel/M
+job/SM
+Job/SM
+Jobye/M
+Joby/M
+Jobyna/M
+Jocasta/M
+Joceline/M
+Jocelin/M
+Jocelyne/M
+Jocelyn/M
+jockey/SGMD
+jock/GDMS
+Jock/M
+Jocko/M
+jockstrap/MS
+jocoseness/MS
+jocose/YP
+jocosity/SM
+jocularity/SM
+jocular/Y
+jocundity/SM
+jocund/Y
+Jodee/M
+jodhpurs
+Jodie/M
+Jodi/M
+Jody/M
+Joeann/M
+Joela/M
+Joelie/M
+Joella/M
+Joelle/M
+Joellen/M
+Joell/MN
+Joelly/M
+Joellyn/M
+Joel/MY
+Joelynn/M
+Joe/M
+Joesph/M
+Joete/M
+joey/M
+Joey/M
+jogged
+jogger/SM
+jogging/S
+joggler/M
+joggle/SRDG
+Jogjakarta/M
+jog/S
+Johan/M
+Johannah/M
+Johanna/M
+Johannes
+Johannesburg/M
+Johann/M
+Johansen/M
+Johanson/M
+Johna/MH
+Johnathan/M
+Johnath/M
+Johnathon/M
+Johnette/M
+Johnie/M
+Johnna/M
+Johnnie/M
+johnnycake/SM
+Johnny/M
+johnny/SM
+Johnsen/M
+john/SM
+John/SM
+Johns/N
+Johnson/M
+Johnston/M
+Johnstown/M
+Johny/M
+Joice/M
+join/ADGFS
+joined/U
+joiner/FSM
+joinery/MS
+jointed/EYP
+jointedness/ME
+joint/EGDYPS
+jointer/M
+jointly/F
+joint's
+jointures
+joist/GMDS
+Jojo/M
+joke/MZDSRG
+joker/M
+jokey
+jokier
+jokiest
+jokily
+joking/Y
+Jolee/M
+Joleen/M
+Jolene/M
+Joletta/M
+Jolie/M
+Joliet's
+Joli/M
+Joline/M
+Jolla/M
+jollification/MS
+jollily
+jolliness/SM
+jollity/MS
+jolly/TSRDGP
+Jolson/M
+jolt/DRGZS
+jolter/M
+Joly/M
+Jolyn/M
+Jolynn/M
+Jo/MY
+Jonah/M
+Jonahs
+Jonas
+Jonathan/M
+Jonathon/M
+Jonell/M
+Jone/MS
+Jones/S
+Jonie/M
+Joni/MS
+Jon/M
+jonquil/MS
+Jonson/M
+Joplin/M
+Jordain/M
+Jordana/M
+Jordanian/S
+Jordan/M
+Jordanna/M
+Jordon/M
+Jorey/M
+Jorgan/M
+Jorge/M
+Jorgensen/M
+Jorgenson/M
+Jorie/M
+Jori/M
+Jorrie/M
+Jorry/M
+Jory/M
+Joscelin/M
+Josee/M
+Josefa/M
+Josefina/M
+Josef/M
+Joseito/M
+Jose/M
+Josepha/M
+Josephina/M
+Josephine/M
+Joseph/M
+Josephs
+Josephson/M
+Josephus/M
+Josey/M
+josh/DSRGZ
+josher/M
+Joshia/M
+Josh/M
+Joshuah/M
+Joshua/M
+Josiah/M
+Josias/M
+Josie/M
+Josi/M
+Josselyn/M
+joss/M
+jostle/SDG
+Josue/M
+Josy/M
+jot/S
+jotted
+jotter/SM
+jotting/SM
+Joule/M
+joule/SM
+jounce/SDG
+jouncy/RT
+Jourdain/M
+Jourdan/M
+journalese/MS
+journal/GSDM
+journalism/SM
+journalistic
+journalist/SM
+journalize/DRSGZ
+journalized/U
+journalizer/M
+journey/DRMZSGJ
+journeyer/M
+journeyman/M
+journeymen
+jouster/M
+joust/ZSMRDG
+Jovanovich/M
+Jove/M
+joviality/SM
+jovial/Y
+Jovian
+jowl/SMD
+jowly/TR
+Joya/M
+Joyan/M
+Joyann/M
+Joycean
+Joycelin/M
+Joyce/M
+Joye/M
+joyfuller
+joyfullest
+joyfulness/SM
+joyful/PY
+joylessness/MS
+joyless/PY
+Joy/M
+joy/MDSG
+Joyner/M
+joyousness/MS
+joyous/YP
+joyridden
+joyride/SRZMGJ
+joyrode
+joystick/S
+Jozef/M
+JP
+Jpn
+Jr/M
+j's
+J's
+Jsandye/M
+Juana/M
+Juanita/M
+Juan/M
+Juarez
+Jubal/M
+jubilant/Y
+jubilate/XNGDS
+jubilation/M
+jubilee/SM
+Judah/M
+Judaic
+Judaical
+Judaism/SM
+Judas/S
+juddered
+juddering
+Judd/M
+Judea/M
+Jude/M
+judge/AGDS
+judger/M
+judge's
+judgeship/SM
+judgmental/Y
+judgment/MS
+judicable
+judicatory/S
+judicature/MS
+judicial/Y
+judiciary/S
+judicious/IYP
+judiciousness/SMI
+Judie/M
+Judi/MH
+Juditha/M
+Judith/M
+Jud/M
+judo/MS
+Judon/M
+Judson/M
+Judye/M
+Judy/M
+jugate/F
+jugful/SM
+jugged
+Juggernaut/M
+juggernaut/SM
+jugging
+juggler/M
+juggle/RSDGZ
+jugglery/MS
+jug/MS
+jugular/S
+juice/GMZDSR
+juicer/M
+juicily
+juiciness/MS
+juicy/TRP
+Juieta/M
+jujitsu/MS
+jujube/SM
+juju/M
+jujutsu's
+jukebox/SM
+juke/GS
+Julee/M
+Jule/MS
+julep/SM
+Julia/M
+Juliana/M
+Juliane/M
+Julian/M
+Julianna/M
+Julianne/M
+Juliann/M
+Julie/M
+julienne/GSD
+Julienne/M
+Julieta/M
+Juliet/M
+Julietta/M
+Juliette/M
+Juli/M
+Julina/M
+Juline/M
+Julio/M
+Julissa/M
+Julita/M
+Julius/M
+Jul/M
+Julys
+July/SM
+jumble/GSD
+jumbo/MS
+jumper/M
+jump/GZDRS
+jumpily
+jumpiness/MS
+jumpsuit/S
+jumpy/PTR
+jun
+junco/MS
+junction/IMESF
+juncture/SFM
+Juneau/M
+June/MS
+Junette/M
+Jungfrau/M
+Jungian
+jungle/SDM
+Jung/M
+Junia/M
+Junie/M
+Junina/M
+juniority/M
+junior/MS
+Junior/S
+juniper/SM
+junkerdom
+Junker/SM
+junketeer/SGDM
+junket/SMDG
+junk/GZDRMS
+junkie/RSMT
+junkyard/MS
+Jun/M
+Juno/M
+junta/MS
+Jupiter/M
+Jurassic
+juridic
+juridical/Y
+juried
+jurisdictional/Y
+jurisdiction/SM
+jurisprudence/SM
+jurisprudent
+jurisprudential/Y
+juristic
+jurist/MS
+juror/MS
+Jurua/M
+jury/IMS
+jurying
+juryman/M
+jurymen
+jurywoman/M
+jurywomen
+justed
+Justen/M
+juster/M
+justest
+Justice/M
+justice/MIS
+justiciable
+justifiability/M
+justifiable/U
+justifiably/U
+justification/M
+justified/UA
+justifier/M
+justify/GDRSXZN
+Justina/M
+Justine/M
+justing
+Justinian/M
+Justin/M
+Justinn/M
+Justino/M
+Justis/M
+justness/MS
+justness's/U
+justs
+just/UPY
+Justus/M
+jute/SM
+Jutish
+Jutland/M
+jut/S
+jutted
+jutting
+Juvenal/M
+juvenile/SM
+juxtapose/SDG
+juxtaposition/SM
+JV
+J/X
+Jyoti/M
+Kaaba/M
+kabob/SM
+kaboom
+Kabuki
+kabuki/SM
+Kabul/M
+Kacey/M
+Kacie/M
+Kacy/M
+Kaddish/M
+kaddish/S
+Kaela/M
+kaffeeklatch
+kaffeeklatsch/S
+Kafkaesque
+Kafka/M
+kaftan's
+Kagoshima/M
+Kahaleel/M
+Kahlil/M
+Kahlua/M
+Kahn/M
+Kaia/M
+Kaifeng/M
+Kaila/M
+Kaile/M
+Kailey/M
+Kai/M
+Kaine/M
+Kain/M
+kaiser/MS
+Kaiser/SM
+Kaitlin/M
+Kaitlyn/M
+Kaitlynn/M
+Kaja/M
+Kajar/M
+Kakalina/M
+Kalahari/M
+Kala/M
+Kalamazoo/M
+Kalashnikov/M
+Kalb/M
+Kaleb/M
+Kaleena/M
+kaleidescope
+kaleidoscope/SM
+kaleidoscopic
+kaleidoscopically
+Kale/M
+kale/MS
+Kalgoorlie/M
+Kalie/M
+Kalila/M
+Kalil/M
+Kali/M
+Kalina/M
+Kalinda/M
+Kalindi/M
+Kalle/M
+Kalli/M
+Kally/M
+Kalmyk
+Kalvin/M
+Kama/M
+Kamchatka/M
+Kamehameha/M
+Kameko/M
+Kamikaze/MS
+kamikaze/SM
+Kamilah/M
+Kamila/M
+Kamillah/M
+Kampala/M
+Kampuchea/M
+Kanchenjunga/M
+Kandace/M
+Kandahar/M
+Kandinsky/M
+Kandy/M
+Kane/M
+kangaroo/SGMD
+Kania/M
+Kankakee/M
+Kan/MS
+Kannada/M
+Kano/M
+Kanpur/M
+Kansan/S
+Kansas
+Kantian
+Kant/M
+Kanya/M
+Kaohsiung/M
+kaolinite/M
+kaolin/MS
+Kaplan/M
+kapok/SM
+Kaposi/M
+kappa/MS
+kaput/M
+Karachi/M
+Karaganda/M
+Karakorum/M
+karakul/MS
+Karalee/M
+Karalynn/M
+Kara/M
+Karamazov/M
+karaoke/S
+karate/MS
+karat/SM
+Karee/M
+Kareem/M
+Karel/M
+Kare/M
+Karena/M
+Karenina/M
+Karen/M
+Karia/M
+Karie/M
+Karil/M
+Karilynn/M
+Kari/M
+Karim/M
+Karina/M
+Karine/M
+Karin/M
+Kariotta/M
+Karisa/M
+Karissa/M
+Karita/M
+Karla/M
+Karlan/M
+Karlee/M
+Karleen/M
+Karlene/M
+Karlen/M
+Karlie/M
+Karlik/M
+Karlis
+Karl/MNX
+Karloff/M
+Karlotta/M
+Karlotte/M
+Karly/M
+Karlyn/M
+karma/SM
+Karmen/M
+karmic
+Karna/M
+Karney/M
+Karola/M
+Karole/M
+Karolina/M
+Karoline/M
+Karol/M
+Karoly/M
+Karon/M
+Karo/YM
+Karp/M
+Karrah/M
+Karrie/M
+Karroo/M
+Karry/M
+kart/MS
+Karylin/M
+Karyl/M
+Kary/M
+Karyn/M
+Kasai/M
+Kasey/M
+Kashmir/SM
+Kaspar/M
+Kasparov/M
+Kasper/M
+Kass
+Kassandra/M
+Kassey/M
+Kassia/M
+Kassie/M
+Kassi/M
+katakana
+Katalin/M
+Kata/M
+Katee/M
+Katelyn/M
+Kate/M
+Katerina/M
+Katerine/M
+Katey/M
+Katha/M
+Katharina/M
+Katharine/M
+Katharyn/M
+Kathe/M
+Katherina/M
+Katherine/M
+Katheryn/M
+Kathiawar/M
+Kathie/M
+Kathi/M
+Kathleen/M
+Kathlin/M
+Kath/M
+Kathmandu
+Kathrine/M
+Kathryne/M
+Kathryn/M
+Kathye/M
+Kathy/M
+Katie/M
+Kati/M
+Katina/M
+Katine/M
+Katinka/M
+Katleen/M
+Katlin/M
+Kat/M
+Katmai/M
+Katmandu's
+Katowice/M
+Katrina/M
+Katrine/M
+Katrinka/M
+Kattie/M
+Katti/M
+Katuscha/M
+Katusha/M
+Katya/M
+katydid/SM
+Katy/M
+Katz/M
+Kauai/M
+Kauffman/M
+Kaufman/M
+Kaunas/M
+Kaunda/M
+Kawabata/M
+Kawasaki/M
+kayak/SGDM
+Kaycee/M
+Kaye/M
+Kayla/M
+Kaylee/M
+Kayle/M
+Kayley/M
+Kaylil/M
+Kaylyn/M
+Kay/M
+Kayne/M
+kayo/DMSG
+Kazakh/M
+Kazakhstan
+Kazan/M
+Kazantzakis/M
+kazoo/SM
+Kb
+KB
+KC
+kcal/M
+kc/M
+KDE/M
+Keane/M
+Kean/M
+Kearney/M
+Keary/M
+Keaton/M
+Keats/M
+kebab/SM
+Keck/M
+Keefe/MR
+Keefer/M
+Keegan/M
+Keelby/M
+Keeley/M
+keel/GSMDR
+keelhaul/SGD
+Keelia/M
+Keely/M
+Keenan/M
+Keene/M
+keener/M
+keen/GTSPYDR
+keening/M
+Keen/M
+keenness/MS
+keeper/M
+keep/GZJSR
+keeping/M
+keepsake/SM
+Keewatin/M
+kegged
+kegging
+keg/MS
+Keillor/M
+Keir/M
+Keisha/M
+Keith/M
+Kelbee/M
+Kelby/M
+Kelcey/M
+Kelcie/M
+Kelci/M
+Kelcy/M
+Kele/M
+Kelila/M
+Kellby/M
+Kellen/M
+Keller/M
+Kelley/M
+Kellia/M
+Kellie/M
+Kelli/M
+Kellina/M
+Kellogg/M
+Kellsie/M
+Kellyann/M
+Kelly/M
+kelp/GZMDS
+Kelsey/M
+Kelsi/M
+Kelsy/M
+Kelt's
+Kelvin/M
+kelvin/MS
+Kelwin/M
+Kemerovo/M
+Kempis/M
+Kemp/M
+Kendall/M
+Kendal/M
+Kendell/M
+Kendra/M
+Kendre/M
+Kendrick/MS
+Kenilworth/M
+Ken/M
+Kenmore/M
+ken/MS
+Kenna/M
+Kennan/M
+Kennecott/M
+kenned
+Kennedy/M
+kennel/GSMD
+Kenneth/M
+Kennett/M
+Kennie/M
+kenning
+Kennith/M
+Kenn/M
+Kenny/M
+keno/M
+Kenon/M
+Kenosha/M
+Kensington/M
+Kent/M
+Kenton/M
+Kentuckian/S
+Kentucky/M
+Kenya/M
+Kenyan/S
+Kenyatta/M
+Kenyon/M
+Keogh/M
+Keokuk/M
+kepi/SM
+Kepler/M
+kept
+keratin/MS
+kerbside
+Kerby/M
+kerchief/MDSG
+Kerensky/M
+Kerianne/M
+Keriann/M
+Keri/M
+Kerk/M
+Ker/M
+Kermie/M
+Kermit/M
+Kermy/M
+kerned
+kernel/GSMD
+kerning
+Kern/M
+kerosene/MS
+Kerouac/M
+Kerrie/M
+Kerrill/M
+Kerri/M
+Kerrin/M
+Kerr/M
+Kerry/M
+Kerstin/M
+Kerwin/M
+Kerwinn/M
+Kesley/M
+Keslie/M
+Kessiah/M
+Kessia/M
+Kessler/M
+kestrel/SM
+ketch/MS
+ketchup/SM
+ketone/M
+ketosis/M
+Kettering/M
+Kettie/M
+Ketti/M
+kettledrum/SM
+kettleful
+kettle/SM
+Ketty/M
+Kevan/M
+Keven/M
+Kevina/M
+Kevin/M
+Kevlar
+Kev/MN
+Kevon/M
+Kevorkian/M
+Kevyn/M
+Kewaskum/M
+Kewaunee/M
+Kewpie/M
+keyboardist/S
+keyboard/RDMZGS
+keyclick/SM
+keyhole/MS
+Key/M
+Keynesian/M
+Keynes/M
+keynoter/M
+keynote/SRDZMG
+keypad/MS
+keypuncher/M
+keypunch/ZGRSD
+keyring
+key/SGMD
+keystone/SM
+keystroke/SDMG
+keyword/SM
+k/FGEIS
+kg
+K/G
+KGB
+Khabarovsk/M
+Khachaturian/M
+khaki/SM
+Khalid/M
+Khalil/M
+Khan/M
+khan/MS
+Kharkov/M
+Khartoum/M
+Khayyam/M
+Khmer/M
+Khoisan/M
+Khomeini/M
+Khorana/M
+Khrushchev/SM
+Khufu/M
+Khulna/M
+Khwarizmi/M
+Khyber/M
+kHz/M
+KIA
+Kiah/M
+Kial/M
+kibble/GMSD
+kibbutzim
+kibbutz/M
+kibitzer/M
+kibitz/GRSDZ
+kibosh/GMSD
+Kickapoo/M
+kickback/SM
+kickball/MS
+kicker/M
+kick/GZDRS
+kickoff/SM
+kickstand/MS
+kicky/RT
+kidded
+kidder/SM
+kiddie/SD
+kidding/YM
+kiddish
+Kidd/M
+kiddo/SM
+kiddying
+kiddy's
+kidless
+kid/MS
+kidnaper's
+kidnaping's
+kidnap/MSJ
+kidnapped
+kidnapper/SM
+kidnapping/S
+kidney/MS
+kidskin/SM
+Kieffer/M
+kielbasa/SM
+kielbasi
+Kiele/M
+Kiel/M
+Kienan/M
+kier/I
+Kierkegaard/M
+Kiersten/M
+Kieth/M
+Kiev/M
+Kigali/M
+Kikelia/M
+Kikuyu/M
+Kilauea/M
+Kile/M
+Kiley/M
+Kilian/M
+Kilimanjaro/M
+kill/BJGZSDR
+killdeer/SM
+Killebrew/M
+killer/M
+Killian/M
+Killie/M
+killing/Y
+killjoy/S
+Killy/M
+kiln/GDSM
+kilobaud/M
+kilobit/S
+kilobuck
+kilobyte/S
+kilocycle/MS
+kilogauss/M
+kilogram/MS
+kilohertz/M
+kilohm/M
+kilojoule/MS
+kiloliter/MS
+kilometer/SM
+kilo/SM
+kiloton/SM
+kilovolt/SM
+kilowatt/SM
+kiloword
+kilter/M
+kilt/MDRGZS
+Ki/M
+Kimball/M
+Kimbell/M
+Kimberlee/M
+Kimberley/M
+Kimberli/M
+Kimberly/M
+Kimberlyn/M
+Kimble/M
+Kimbra/M
+Kim/M
+Kimmie/M
+Kimmi/M
+Kimmy/M
+kimono/MS
+Kincaid/M
+kinda
+kindergarten/MS
+kindergrtner/SM
+kinder/U
+kindheartedness/MS
+kindhearted/YP
+kindle/AGRSD
+kindler/M
+kindliness/SM
+kindliness's/U
+kindling/M
+kindly/TUPR
+kindness's
+kindness/US
+kind/PSYRT
+kindred/S
+kinematic/S
+kinematics/M
+kinesics/M
+kine/SM
+kinesthesis
+kinesthetically
+kinesthetic/S
+kinetically
+kinetic/S
+kinetics/M
+kinfolk/S
+kingbird/M
+kingdom/SM
+kingfisher/MS
+kinglet/M
+kingliness/M
+kingly/TPR
+King/M
+kingpin/MS
+Kingsbury/M
+king/SGYDM
+kingship/SM
+Kingsley/M
+Kingsly/M
+Kingston/M
+Kingstown/M
+Kingwood/M
+kink/GSDM
+kinkily
+kinkiness/SM
+kinky/PRT
+Kin/M
+kin/MS
+Kinna/M
+Kinney/M
+Kinnickinnic/M
+Kinnie/M
+Kinny/M
+Kinsey/M
+kinsfolk/S
+Kinshasa/M
+Kinshasha/M
+kinship/SM
+Kinsley/M
+kinsman/M
+kinsmen/M
+kinswoman/M
+kinswomen
+kiosk/SM
+Kiowa/SM
+Kipling/M
+Kip/M
+kip/MS
+Kippar/M
+kipped
+kipper/DMSG
+Kipper/M
+Kippie/M
+kipping
+Kipp/MR
+Kippy/M
+Kira/M
+Kirbee/M
+Kirbie/M
+Kirby/M
+Kirchhoff/M
+Kirchner/M
+Kirchoff/M
+Kirghistan/M
+Kirghizia/M
+Kirghiz/M
+Kiribati
+Kiri/M
+Kirinyaga/M
+kirk/GDMS
+Kirkland/M
+Kirk/M
+Kirkpatrick/M
+Kirkwood/M
+Kirov/M
+kirsch/S
+Kirsteni/M
+Kirsten/M
+Kirsti/M
+Kirstin/M
+Kirstyn/M
+Kisangani/M
+Kishinev/M
+kismet/SM
+kiss/DSRBJGZ
+Kissee/M
+kisser/M
+Kissiah/M
+Kissie/M
+Kissinger/M
+Kitakyushu/M
+kitbag's
+kitchener/M
+Kitchener/M
+kitchenette/SM
+kitchen/GDRMS
+kitchenware/SM
+kiter/M
+kite/SM
+kith/MDG
+kiths
+Kit/M
+kit/MDRGS
+kitsch/MS
+kitschy
+kitted
+kittenishness/M
+kittenish/YP
+kitten/SGDM
+Kittie/M
+Kitti/M
+kitting
+kittiwakes
+Kitty/M
+kitty/SM
+Kiwanis/M
+kiwifruit/S
+kiwi/SM
+Kizzee/M
+Kizzie/M
+KKK
+kl
+Klan/M
+Klansman/M
+Klara/M
+Klarika/M
+Klarrisa/M
+Klaus/M
+klaxon/M
+Klee/M
+Kleenex/SM
+Klein/M
+Kleinrock/M
+Klemens/M
+Klement/M
+Kleon/M
+kleptomaniac/SM
+kleptomania/MS
+Kliment/M
+Kline/M
+Klingon/M
+Klondike/SDMG
+kludger/M
+kludge/RSDGMZ
+kludgey
+klutziness/S
+klutz/SM
+klutzy/TRP
+Klux/M
+klystron/MS
+km
+kn
+knacker/M
+knack/SGZRDM
+knackwurst/MS
+Knapp/M
+knapsack/MS
+Knauer/M
+knavery/MS
+knave/SM
+knavish/Y
+kneader/M
+knead/GZRDS
+kneecap/MS
+kneecapped
+kneecapping
+knee/DSM
+kneeing
+kneeler/M
+kneel/GRS
+kneepad/SM
+knell/SMDG
+knelt
+Knesset/M
+knew
+Kngwarreye/M
+Knickerbocker/MS
+knickerbocker/S
+knickknack/SM
+knick/ZR
+Knievel/M
+knife/DSGM
+knighthood/MS
+knightliness/MS
+knightly/P
+Knight/M
+knight/MDYSG
+knish/MS
+knit/AU
+knits
+knitted
+knitter/MS
+knitting/SM
+knitwear/M
+knives/M
+knobbly
+knobby/RT
+Knobeloch/M
+knob/MS
+knockabout/M
+knockdown/S
+knocker/M
+knock/GZSJRD
+knockoff/S
+knockout/MS
+knockwurst's
+knoll/MDSG
+Knopf/M
+Knossos/M
+knothole/SM
+knot/MS
+knotted
+knottiness/M
+knotting/M
+knotty/TPR
+knowable/U
+knower/M
+know/GRBSJ
+knowhow
+knowingly/U
+knowing/RYT
+knowings/U
+knowledgeableness/M
+knowledgeable/P
+knowledgeably
+knowledge/SM
+Knowles
+known/SU
+Knox/M
+Knoxville/M
+knuckleball/R
+knuckle/DSMG
+knuckleduster
+knucklehead/MS
+Knudsen/M
+Knudson/M
+knurl/DSG
+Knuth/M
+Knutsen/M
+Knutson/M
+KO
+koala/SM
+Kobayashi/M
+Kobe/M
+Kochab/M
+Koch/M
+Kodachrome/M
+Kodak/SM
+Kodaly/M
+Kodiak/M
+Koenig/M
+Koenigsberg/M
+Koenraad/M
+Koestler/M
+Kohinoor/M
+Kohler/M
+Kohl/MR
+kohlrabies
+kohlrabi/M
+kola/SM
+Kolyma/M
+Kommunizma/M
+Kong/M
+Kongo/M
+Konrad/M
+Konstance/M
+Konstantine/M
+Konstantin/M
+Konstanze/M
+kookaburra/SM
+kook/GDMS
+kookiness/S
+kooky/PRT
+Koo/M
+Koontz/M
+kopeck/MS
+Koppers/M
+Koralle/M
+Koral/M
+Kora/M
+Koranic
+Koran/SM
+Kordula/M
+Korea/M
+Korean/S
+Korella/M
+Kore/M
+Koren/M
+Koressa/M
+Korey/M
+Korie/M
+Kori/M
+Kornberg/M
+Korney/M
+Korrie/M
+Korry/M
+Kort/M
+Kory/M
+Korzybski/M
+Kosciusko/M
+kosher/DGS
+Kossuth/M
+Kosygin/M
+Kovacs/M
+Kowalewski/M
+Kowalski/M
+Kowloon/M
+kowtow/SGD
+KP
+kph
+kraal/SMDG
+Kraemer/M
+kraft/M
+Kraft/M
+Krakatau's
+Krakatoa/M
+Krakow/M
+Kramer/M
+Krasnodar/M
+Krasnoyarsk/M
+Krause/M
+kraut/S!
+Krebs/M
+Kremlin/M
+Kremlinologist/MS
+Kremlinology/MS
+Kresge/M
+Krieger/M
+kriegspiel/M
+krill/MS
+Kringle/M
+Krisha/M
+Krishnah/M
+Krishna/M
+Kris/M
+Krispin/M
+Krissie/M
+Krissy/M
+Kristal/M
+Krista/M
+Kristan/M
+Kristel/M
+Kriste/M
+Kristen/M
+Kristian/M
+Kristie/M
+Kristien/M
+Kristi/MN
+Kristina/M
+Kristine/M
+Kristin/M
+Kristofer/M
+Kristoffer/M
+Kristofor/M
+Kristoforo/M
+Kristo/MS
+Kristopher/M
+Kristy/M
+Kristyn/M
+Kr/M
+Kroc/M
+Kroger/M
+krna/M
+Kronecker/M
+krone/RM
+kronor
+krnur
+Kropotkin/M
+Krueger/M
+Kruger/M
+Krugerrand/S
+Krupp/M
+Kruse/M
+krypton/SM
+Krystalle/M
+Krystal/M
+Krysta/M
+Krystle/M
+Krystyna/M
+ks
+K's
+KS
+k's/IE
+kt
+Kublai/M
+Kubrick/M
+kuchen/MS
+kudos/M
+kudzu/SM
+Kuenning/M
+Kuhn/M
+Kuibyshev/M
+Ku/M
+Kumar/M
+kumquat/SM
+Kunming/M
+Kuomintang/M
+Kurdish/M
+Kurdistan/SM
+Kurd/SM
+Kurosawa/M
+Kurtis/M
+Kurt/M
+kurtosis/M
+Kusch/M
+Kuwaiti/SM
+Kuwait/M
+Kuznetsk/M
+Kuznets/M
+kvetch/DSG
+kw
+kW
+Kwakiutl/M
+Kwangchow's
+Kwangju/M
+Kwanzaa/S
+kWh
+KY
+Kyla/M
+kyle/M
+Kyle/M
+Kylen/M
+Kylie/M
+Kylila/M
+Kylynn/M
+Ky/MH
+Kym/M
+Kynthia/M
+Kyoto/M
+Kyrgyzstan
+Kyrstin/M
+Kyushu/M
+L
+LA
+Laban/M
+labeled/U
+labeler/M
+label/GAZRDS
+labellings/A
+label's
+labial/YS
+labia/M
+labile
+labiodental
+labium/M
+laboratory/MS
+laboredness/M
+labored/PMY
+labored's/U
+laborer/M
+laboring/MY
+laborings/U
+laboriousness/MS
+laborious/PY
+labor/RDMJSZG
+laborsaving
+Labradorean/S
+Labrador/SM
+lab/SM
+Lab/SM
+laburnum/SM
+labyrinthine
+labyrinth/M
+labyrinths
+laced/U
+Lacee/M
+lace/MS
+lacerate/NGVXDS
+laceration/M
+lacer/M
+laces/U
+lacewing/MS
+Lacey/M
+Lachesis/M
+lachrymal/S
+lachrymose
+Lacie/M
+lacing/M
+lackadaisic
+lackadaisical/Y
+Lackawanna/M
+lacker/M
+lackey/SMDG
+lack/GRDMS
+lackluster/S
+Lac/M
+laconic
+laconically
+lacquerer/M
+lacquer/ZGDRMS
+lacrosse/MS
+lac/SGMDR
+lactate/MNGSDX
+lactational/Y
+lactation/M
+lacteal
+lactic
+lactose/MS
+lacunae
+lacuna/M
+Lacy/M
+lacy/RT
+ladder/GDMS
+laddie/MS
+laded/U
+ladened
+ladening
+laden/U
+lade/S
+lading/M
+ladle/SDGM
+Ladoga/M
+Ladonna/M
+lad/XGSJMND
+ladybird/SM
+ladybug/MS
+ladyfinger/SM
+ladylike/U
+ladylove/MS
+Ladyship/MS
+ladyship/SM
+lady/SM
+Lady/SM
+Laetitia/M
+laetrile/S
+Lafayette/M
+Lafitte/M
+lager/DMG
+laggard/MYSP
+laggardness/M
+lagged
+lagging/MS
+lagniappe/SM
+lagoon/MS
+Lagos/M
+Lagrange/M
+Lagrangian/M
+Laguerre/M
+Laguna/M
+lag/ZSR
+Lahore/M
+laid/AI
+Laidlaw/M
+lain
+Laina/M
+Lainey/M
+Laird/M
+laird/MS
+lair/GDMS
+laissez
+laity/SM
+Laius/M
+lake/DSRMG
+Lakehurst/M
+Lakeisha/M
+laker/M
+lakeside
+Lakewood/M
+Lakisha/M
+Lakshmi/M
+lallygagged
+lallygagging
+lallygag/S
+Lalo/M
+La/M
+Lamaism/SM
+Lamarck/M
+Lamar/M
+lamasery/MS
+lama/SM
+Lamaze
+lambada/S
+lambaste/SDG
+lambda/SM
+lambency/MS
+lambent/Y
+Lambert/M
+lambkin/MS
+Lamb/M
+Lamborghini/M
+lambskin/MS
+lamb/SRDMG
+lambswool
+lamebrain/SM
+lamed/M
+lameness/MS
+lamentableness/M
+lamentable/P
+lamentably
+lamentation/SM
+lament/DGSB
+lamented/U
+lame/SPY
+la/MHLG
+laminae
+lamina/M
+laminar
+laminate/XNGSD
+lamination/M
+lam/MDRSTG
+lammed
+lammer
+lamming
+Lammond/M
+Lamond/M
+Lamont/M
+L'Amour
+lampblack/SM
+lamplighter/M
+lamplight/ZRMS
+lampooner/M
+lampoon/RDMGS
+Lamport/M
+lamppost/SM
+lamprey/MS
+lamp/SGMRD
+lampshade/MS
+LAN
+Lanae/M
+Lanai/M
+lanai/SM
+Lana/M
+Lancashire/M
+Lancaster/M
+Lancelot/M
+Lance/M
+lancer/M
+lance/SRDGMZ
+lancet/MS
+landau/MS
+lander/I
+landfall/SM
+landfill/DSG
+landforms
+landholder/M
+landhold/JGZR
+landing/M
+Landis/M
+landlady/MS
+landless
+landlines
+landlocked
+landlord/MS
+landlubber/SM
+Land/M
+landmark/GSMD
+landmass/MS
+Landon/M
+landowner/MS
+landownership/M
+landowning/SM
+Landry/M
+Landsat
+landscape/GMZSRD
+landscaper/M
+lands/I
+landslide/MS
+landslid/G
+landslip
+landsman/M
+landsmen
+land/SMRDJGZ
+Landsteiner/M
+landward/S
+Landwehr/M
+Lane/M
+lane/SM
+Lanette/M
+Laney/M
+Langeland/M
+Lange/M
+Langerhans/M
+Langford/M
+Langland/M
+Langley/M
+Lang/M
+Langmuir/M
+Langsdon/M
+Langston/M
+language/MS
+languidness/MS
+languid/PY
+languisher/M
+languishing/Y
+languish/SRDG
+languorous/Y
+languor/SM
+Lanie/M
+Lani/M
+Lanita/M
+lankiness/SM
+lankness/MS
+lank/PTYR
+lanky/PRT
+Lanna/M
+Lannie/M
+Lanni/M
+Lanny/M
+lanolin/MS
+Lansing/M
+lantern/GSDM
+lanthanide/M
+lanthanum/MS
+lanyard/MS
+Lanzhou
+Laocoon/M
+Lao/SM
+Laotian/MS
+lapboard/MS
+lapdog/S
+lapel/MS
+lapidary/MS
+lapin/MS
+Laplace/M
+Lapland/ZMR
+lapped
+lappet/MS
+lapping
+Lapp/SM
+lapsed/A
+lapse/KSDMG
+lapser/MA
+lapses/A
+lapsing/A
+lap/SM
+laps/SRDG
+laptop/SM
+lapwing/MS
+Laraine/M
+Lara/M
+Laramie/M
+larboard/MS
+larcenist/S
+larcenous
+larceny/MS
+larch/MS
+larder/M
+lard/MRDSGZ
+Lardner/M
+lardy/RT
+Laredo/M
+largehearted
+largemouth
+largeness/SM
+large/SRTYP
+largess/SM
+largish
+largo/S
+lariat/MDGS
+Lari/M
+Larina/M
+Larine/M
+Larisa/M
+Larissa/M
+larker/M
+lark/GRDMS
+Lark/M
+larkspur/MS
+Larousse/M
+Larry/M
+Larsen/M
+Lars/NM
+Larson/M
+larvae
+larval
+larva/M
+laryngeal/YS
+larynges
+laryngitides
+laryngitis/M
+larynx/M
+Laryssa/M
+lasagna/S
+lasagne's
+Lascaux/M
+lasciviousness/MS
+lascivious/YP
+lase
+laser/M
+lashed/U
+lasher/M
+lashing/M
+lash/JGMSRD
+Lassa/M
+Lassen/M
+Lassie/M
+lassie/SM
+lassitude/MS
+lassoer/M
+lasso/GRDMS
+las/SRZG
+lass/SM
+laster/M
+lastingness/M
+lasting/PY
+last/JGSYRD
+Laszlo/M
+Latasha/M
+Latashia/M
+latching/M
+latchkey/SM
+latch's
+latch/UGSD
+latecomer/SM
+lated/A
+late/KA
+lately
+latency/MS
+lateness/MS
+latent/YS
+later/A
+lateral/GDYS
+lateralization
+Lateran/M
+latest/S
+LaTeX/M
+latex/MS
+lathe/M
+latherer/M
+lather/RDMG
+lathery
+lathing/M
+lath/MSRDGZ
+Lathrop/M
+laths
+Latia/M
+latices/M
+Latina/SM
+Latinate
+Latino/S
+Latin/RMS
+latish
+Latisha/M
+latitude/SM
+latitudinal/Y
+latitudinarian/S
+latitudinary
+Lat/M
+Latonya/M
+Latoya/M
+Latrena/M
+Latrina/M
+latrine/MS
+Latrobe/M
+lat/SDRT
+latter/YM
+latte/SR
+lattice/SDMG
+latticework/MS
+latticing/M
+Lattimer/M
+Latvia/M
+Latvian/S
+laudably
+laudanum/MS
+laudatory
+Lauderdale/M
+lauder/M
+Lauder/M
+Laud/MR
+laud/RDSBG
+lauds/M
+Laue/M
+laughableness/M
+laughable/P
+laughably
+laugh/BRDZGJ
+laugher/M
+laughing/MY
+laughingstock/SM
+laughs
+laughter/MS
+Laughton/M
+Launce/M
+launch/AGSD
+launcher/MS
+launching/S
+launchpad/S
+laundered/U
+launderer/M
+launderette/MS
+launder/SDRZJG
+laundress/MS
+laundrette/S
+laundromat/S
+Laundromat/SM
+laundryman/M
+laundrymen
+laundry/MS
+laundrywoman/M
+laundrywomen
+Lauraine/M
+Lauralee/M
+Laural/M
+laura/M
+Laura/M
+Laurasia/M
+laureate/DSNG
+laureateship/SM
+Lauree/M
+Laureen/M
+Laurella/M
+Laurel/M
+laurel/SGMD
+Laure/M
+Laurena/M
+Laurence/M
+Laurene/M
+Lauren/SM
+Laurentian
+Laurent/M
+Lauretta/M
+Laurette/M
+Laurianne/M
+Laurice/M
+Laurie/M
+Lauri/M
+Lauritz/M
+Lauryn/M
+Lausanne/M
+lavage/MS
+lavaliere/MS
+Laval/M
+lava/SM
+lavatory/MS
+lave/GDS
+Lavena/M
+lavender/MDSG
+Laverna/M
+Laverne/M
+Lavern/M
+Lavina/M
+Lavinia/M
+Lavinie/M
+lavishness/MS
+lavish/SRDYPTG
+Lavoisier/M
+Lavonne/M
+Lawanda/M
+lawbreaker/SM
+lawbreaking/MS
+Lawford/M
+lawfulness/SMU
+lawful/PUY
+lawgiver/MS
+lawgiving/M
+lawlessness/MS
+lawless/PY
+Law/M
+lawmaker/MS
+lawmaking/SM
+lawman/M
+lawmen
+lawnmower/S
+lawn/SM
+Lawrence/M
+Lawrenceville/M
+lawrencium/SM
+Lawry/M
+law/SMDG
+Lawson/M
+lawsuit/MS
+Lawton/M
+lawyer/DYMGS
+laxativeness/M
+laxative/PSYM
+laxer/A
+laxes/A
+laxity/SM
+laxness/SM
+lax/PTSRY
+layabout/MS
+Layamon/M
+layaway/S
+lay/CZGSR
+layered/C
+layer/GJDM
+layering/M
+layer's/IC
+layette/SM
+Layla/M
+Lay/M
+layman/M
+laymen
+Layne/M
+Layney/M
+layoff/MS
+layout/SM
+layover/SM
+laypeople
+layperson/S
+lays/AI
+Layton/M
+layup/MS
+laywoman/M
+laywomen
+Lazare/M
+Lazar/M
+Lazaro/M
+Lazarus/M
+laze/DSG
+lazily
+laziness/MS
+lazuli/M
+lazybones/M
+lazy/PTSRDG
+lb
+LBJ/M
+lbs
+LC
+LCD
+LCM
+LDC
+leachate
+Leach/M
+leach/SDG
+Leadbelly/M
+leaded/U
+leadenness/M
+leaden/PGDY
+leaderless
+leader/M
+leadership/MS
+lead/SGZXJRDN
+leadsman/M
+leadsmen
+leafage/MS
+leaf/GSDM
+leafhopper/M
+leafiness/M
+leafless
+leaflet/SDMG
+leafstalk/SM
+leafy/PTR
+leaguer/M
+league/RSDMZG
+Leah/M
+leakage/SM
+leaker/M
+Leakey/M
+leak/GSRDM
+leakiness/MS
+leaky/PRT
+Lea/M
+lea/MS
+Leander/M
+Leandra/M
+leaner/M
+leaning/M
+Lean/M
+Leanna/M
+Leanne/M
+leanness/MS
+Leann/M
+Leanora/M
+Leanor/M
+lean/YRDGTJSP
+leaper/M
+leapfrogged
+leapfrogging
+leapfrog/SM
+leap/RDGZS
+Lear/M
+learnedly
+learnedness/M
+learned/UA
+learner/M
+learning/M
+learns/UA
+learn/SZGJRD
+Leary/M
+lease/ARSDG
+leaseback/MS
+leaseholder/M
+leasehold/SRMZ
+leaser/MA
+lease's
+leash's
+leash/UGSD
+leasing/M
+leas/SRDGZ
+least/S
+leastwise
+leatherette/S
+leather/MDSG
+leathern
+leatherneck/SM
+leathery
+leaven/DMJGS
+leavened/U
+leavening/M
+Leavenworth/M
+leaver/M
+leaves/M
+leave/SRDJGZ
+leaving/M
+Lebanese
+Lebanon/M
+Lebbie/M
+lebensraum
+Lebesgue/M
+Leblanc/M
+lecher/DMGS
+lecherousness/MS
+lecherous/YP
+lechery/MS
+lecithin/SM
+lectern/SM
+lecturer/M
+lecture/RSDZMG
+lectureship/SM
+led
+Leda/M
+Lederberg/M
+ledger/DMG
+ledge/SRMZ
+LED/SM
+Leeanne/M
+Leeann/M
+leech/MSDG
+Leeds/M
+leek/SM
+Leelah/M
+Leela/M
+Leeland/M
+Lee/M
+lee/MZRS
+Leena/M
+leer/DG
+leeriness/MS
+leering/Y
+leery/PTR
+Leesa/M
+Leese/M
+Leeuwenhoek/M
+Leeward/M
+leeward/S
+leeway/MS
+leftism/SM
+leftist/SM
+leftmost
+leftover/MS
+Left/S
+left/TRS
+leftward/S
+Lefty/M
+lefty/SM
+legacy/MS
+legalese/MS
+legalism/SM
+legalistic
+legality/MS
+legalization/MS
+legalize/DSG
+legalized/U
+legal/SY
+legate/AXCNGSD
+legatee/MS
+legate's/C
+legation/AMC
+legato/SM
+legendarily
+legendary/S
+Legendre/M
+legend/SM
+legerdemain/SM
+Leger/SM
+legged
+legginess/MS
+legging/MS
+leggy/PRT
+leghorn/SM
+Leghorn/SM
+legibility/MS
+legible
+legibly
+legionary/S
+legionnaire/SM
+legion/SM
+legislate/SDXVNG
+legislation/M
+legislative/SY
+legislator/SM
+legislature/MS
+legitimacy/MS
+legitimate/SDNGY
+legitimation/M
+legitimatize/SDG
+legitimization/MS
+legitimize/RSDG
+legit/S
+legless
+legman/M
+legmen
+leg/MS
+Lego/M
+Legra/M
+Legree/M
+legroom/MS
+legstraps
+legume/SM
+leguminous
+legwork/SM
+Lehigh/M
+Lehman/M
+Leia/M
+Leibniz/M
+Leicester/SM
+Leiden/M
+Leif/M
+Leigha/M
+Leigh/M
+Leighton/M
+Leilah/M
+Leila/M
+lei/MS
+Leipzig/M
+Leisha/M
+leisureliness/MS
+leisurely/P
+leisure/SDYM
+leisurewear
+leitmotif/SM
+leitmotiv/MS
+Lek/M
+Lelah/M
+Lela/M
+Leland/M
+Lelia/M
+Lemaitre/M
+Lemar/M
+Lemke/M
+Lem/M
+lemma/MS
+lemme/GJ
+Lemmie/M
+lemming/M
+Lemmy/M
+lemonade/SM
+lemon/GSDM
+lemony
+Lemuel/M
+Lemuria/M
+lemur/MS
+Lena/M
+Lenard/M
+Lenci/M
+lender/M
+lend/SRGZ
+Lenee/M
+Lenette/M
+lengthener/M
+lengthen/GRD
+lengthily
+lengthiness/MS
+length/MNYX
+lengths
+lengthwise
+lengthy/TRP
+lenience/S
+leniency/MS
+lenient/SY
+Leningrad/M
+Leninism/M
+Leninist
+Lenin/M
+lenitive/S
+Lenka/M
+Len/M
+Le/NM
+Lenna/M
+Lennard/M
+Lennie/M
+Lennon/M
+Lenny/M
+Lenoir/M
+Leno/M
+Lenora/M
+Lenore/M
+lens/SRDMJGZ
+lent/A
+lenticular
+lentil/SM
+lento/S
+Lent/SMN
+Leodora/M
+Leoine/M
+Leola/M
+Leoline/M
+Leo/MS
+Leona/M
+Leonanie/M
+Leonard/M
+Leonardo/M
+Leoncavallo/M
+Leonelle/M
+Leonel/M
+Leone/M
+Leonerd/M
+Leonhard/M
+Leonidas/M
+Leonid/M
+Leonie/M
+leonine
+Leon/M
+Leonora/M
+Leonore/M
+Leonor/M
+Leontine/M
+Leontyne/M
+leopardess/SM
+leopard/MS
+leopardskin
+Leopold/M
+Leopoldo/M
+Leopoldville/M
+Leora/M
+leotard/MS
+leper/SM
+Lepidus/M
+Lepke/M
+leprechaun/SM
+leprosy/MS
+leprous
+lepta
+lepton/SM
+Lepus/M
+Lerner/M
+Leroi/M
+Leroy/M
+Lesa/M
+lesbianism/MS
+lesbian/MS
+Leshia/M
+lesion/DMSG
+Lesley/M
+Leslie/M
+Lesli/M
+Lesly/M
+Lesotho/M
+lessee/MS
+lessen/GDS
+Lesseps/M
+lesser
+lesses
+Lessie/M
+lessing
+lesson/DMSG
+lessor/MS
+less/U
+Lester/M
+lest/R
+Les/Y
+Lesya/M
+Leta/M
+letdown/SM
+lethality/M
+lethal/YS
+Letha/M
+lethargic
+lethargically
+lethargy/MS
+Lethe/M
+Lethia/M
+Leticia/M
+Letisha/M
+let/ISM
+Letitia/M
+Letizia/M
+Letta/M
+letterbox/S
+lettered/U
+letterer/M
+letterhead/SM
+lettering/M
+letter/JSZGRDM
+letterman/M
+Letterman/M
+lettermen
+letterpress/MS
+Lettie/M
+Letti/M
+letting/S
+lettuce/SM
+Letty/M
+letup/MS
+leukemia/SM
+leukemic/S
+leukocyte/MS
+Leupold/M
+Levant/M
+leveeing
+levee/SDM
+leveled/U
+leveler/M
+levelheadedness/S
+levelheaded/P
+leveling/U
+levelness/SM
+level/STZGRDYP
+leverage/MGDS
+lever/SDMG
+Levesque/M
+Levey/M
+Leviathan
+leviathan/MS
+levier/M
+Levi/MS
+Levine/M
+Levin/M
+levitate/XNGDS
+levitation/M
+Leviticus/M
+Levitt/M
+levity/MS
+Lev/M
+Levon/M
+Levy/M
+levy/SRDZG
+lewdness/MS
+lewd/PYRT
+Lewellyn/M
+Lewes
+Lewie/M
+Lewinsky/M
+lewis/M
+Lewis/M
+Lewiss
+Lew/M
+lex
+lexeme/MS
+lexical/Y
+lexicographer/MS
+lexicographic
+lexicographical/Y
+lexicography/SM
+lexicon/SM
+Lexie/M
+Lexi/MS
+Lexine/M
+Lexington/M
+Lexus/M
+Lexy/M
+Leyden/M
+Leyla/M
+Lezley/M
+Lezlie/M
+lg
+Lhasa/SM
+Lhotse/M
+liability/SAM
+liable/AP
+liaise/GSD
+liaison/SM
+Lia/M
+Liam/M
+Liana/M
+Liane/M
+Lian/M
+Lianna/M
+Lianne/M
+liar/MS
+libation/SM
+libbed
+Libbey/M
+Libbie/M
+Libbi/M
+libbing
+Libby/M
+libeler/M
+libel/GMRDSZ
+libelous/Y
+Liberace/M
+liberalism/MS
+liberality/MS
+liberalization/SM
+liberalized/U
+liberalize/GZSRD
+liberalizer/M
+liberalness/MS
+liberal/YSP
+liberate/NGDSCX
+liberationists
+liberation/MC
+liberator/SCM
+Liberia/M
+Liberian/S
+libertarianism/M
+libertarian/MS
+libertine/MS
+liberty/MS
+libidinal
+libidinousness/M
+libidinous/PY
+libido/MS
+Lib/M
+lib/MS
+librarian/MS
+library/MS
+Libra/SM
+libretoes
+libretos
+librettist/MS
+libretto/MS
+Libreville/M
+Librium/M
+Libya/M
+Libyan/S
+lice/M
+licensed/AU
+licensee/SM
+license/MGBRSD
+licenser/M
+licenses/A
+licensing/A
+licensor/M
+licentiate/MS
+licentiousness/MS
+licentious/PY
+Licha/M
+lichee's
+lichen/DMGS
+Lichtenstein/M
+Lichter/M
+licit/Y
+licked/U
+lickerish
+licker/M
+lick/GRDSJ
+licking/M
+licorice/SM
+Lida/M
+lidded
+lidding
+Lidia/M
+lidless
+lid/MS
+lido/MS
+Lieberman/M
+Liebfraumilch/M
+Liechtenstein/RMZ
+lied/MR
+lie/DRS
+Lief/M
+liefs/A
+lief/TSR
+Liege/M
+liege/SR
+Lie/M
+lien/SM
+lier/IMA
+lies/A
+Liesa/M
+lieu/SM
+lieut
+lieutenancy/MS
+lieutenant/SM
+Lieut/M
+lifeblood/SM
+lifeboat/SM
+lifebuoy/S
+lifeforms
+lifeguard/MDSG
+lifelessness/SM
+lifeless/PY
+lifelikeness/M
+lifelike/P
+lifeline/SM
+lifelong
+life/MZR
+lifer/M
+lifesaver/SM
+lifesaving/S
+lifespan/S
+lifestyle/S
+lifetaking
+lifetime/MS
+lifework/MS
+LIFO
+lifter/M
+lift/GZMRDS
+liftoff/MS
+ligament/MS
+ligand/MS
+ligate/XSDNG
+ligation/M
+ligature/DSGM
+light/ADSCG
+lighted/U
+lightener/M
+lightening/M
+lighten/ZGDRS
+lighter/CM
+lightered
+lightering
+lighters
+lightest
+lightface/SDM
+lightheaded
+lightheartedness/MS
+lighthearted/PY
+lighthouse/MS
+lighting/MS
+lightly
+lightness/MS
+lightning/SMD
+lightproof
+light's
+lightship/SM
+lightweight/S
+ligneous
+lignite/MS
+lignum
+likability/MS
+likableness/MS
+likable/P
+likeability's
+liked/E
+likelihood/MSU
+likely/UPRT
+likeness/MSU
+liken/GSD
+liker/E
+liker's
+likes/E
+likest
+like/USPBY
+likewise
+liking/SM
+lilac/MS
+Lilah/M
+Lila/SM
+Lilia/MS
+Liliana/M
+Liliane/M
+Lilian/M
+Lilith/M
+Liliuokalani/M
+Lilla/M
+Lille/M
+Lillian/M
+Lillie/M
+Lilli/MS
+lilliputian/S
+Lilliputian/SM
+Lilliput/M
+Lilllie/M
+Lilly/M
+Lil/MY
+Lilongwe/M
+lilting/YP
+lilt/MDSG
+Lilyan/M
+Lily/M
+lily/MSD
+Lima/M
+Limbaugh/M
+limbered/U
+limberness/SM
+limber/RDYTGP
+limbers/U
+limbic
+limbless
+Limbo
+limbo/GDMS
+limb/SGZRDM
+Limburger/SM
+limeade/SM
+lime/DSMG
+limekiln/M
+limelight/DMGS
+limerick/SM
+limestone/SM
+limitability
+limitably
+limitation/MCS
+limit/CSZGRD
+limitedly/U
+limitedness/M
+limited/PSY
+limiter/M
+limiting/S
+limitlessness/SM
+limitless/PY
+limit's
+limn/GSD
+Limoges/M
+limo/S
+limousine/SM
+limper/M
+limpet/SM
+limpidity/MS
+limpidness/SM
+limpid/YP
+limpness/MS
+Limpopo/M
+limp/SGTPYRD
+Li/MY
+limy/TR
+linage/MS
+Lina/M
+linchpin/MS
+Linc/M
+Lincoln/SM
+Linda/M
+Lindbergh/M
+Lindberg/M
+linden/MS
+Lindholm/M
+Lindie/M
+Lindi/M
+Lind/M
+Lindon/M
+Lindquist/M
+Lindsay/M
+Lindsey/M
+Lindstrom/M
+Lindsy/M
+Lindy/M
+line/AGDS
+lineage/SM
+lineal/Y
+Linea/M
+lineament/MS
+linearity/MS
+linearize/SDGNB
+linear/Y
+linebacker/SM
+lined/U
+linefeed
+Linell/M
+lineman/M
+linemen
+linen/SM
+liner/SM
+line's
+linesman/M
+linesmen
+Linet/M
+Linette/M
+lineup/S
+lingerer/M
+lingerie/SM
+lingering/Y
+linger/ZGJRD
+lingoes
+lingo/M
+lingual/SY
+lingua/M
+linguine
+linguini's
+linguistically
+linguistic/S
+linguistics/M
+linguist/SM
+ling/ZR
+liniment/MS
+lining/SM
+linkable
+linkage/SM
+linked/A
+linker/S
+linking/S
+Link/M
+link's
+linkup/S
+link/USGD
+Lin/M
+Linnaeus/M
+Linnea/M
+Linnell/M
+Linnet/M
+linnet/SM
+Linnie/M
+Linn/M
+Linoel/M
+linoleum/SM
+lino/M
+Linotype/M
+linseed/SM
+lintel/SM
+linter/M
+Linton/M
+lint/SMR
+linty/RST
+Linus/M
+Linux/M
+Linwood/M
+Linzy/M
+Lionello/M
+Lionel/M
+lioness/SM
+lionhearted
+lionization/SM
+lionizer/M
+lionize/ZRSDG
+Lion/M
+lion/MS
+lipase/M
+lipid/MS
+lip/MS
+liposuction/S
+lipped
+lipper
+Lippi/M
+lipping
+Lippmann/M
+lippy/TR
+lipread/GSRJ
+Lipschitz/M
+Lipscomb/M
+lipstick/MDSG
+Lipton/M
+liq
+liquefaction/SM
+liquefier/M
+liquefy/DRSGZ
+liqueur/DMSG
+liquidate/GNXSD
+liquidation/M
+liquidator/SM
+liquidity/SM
+liquidizer/M
+liquidize/ZGSRD
+liquidness/M
+liquid/SPMY
+liquorice/SM
+liquorish
+liquor/SDMG
+lira/M
+Lira/M
+lire
+Lisabeth/M
+Lisa/M
+Lisbeth/M
+Lisbon/M
+Lise/M
+Lisetta/M
+Lisette/M
+Lisha/M
+Lishe/M
+Lisle/M
+lisle/SM
+lisper/M
+lisp/MRDGZS
+Lissajous/M
+Lissa/M
+Lissie/M
+Lissi/M
+Liss/M
+lissomeness/M
+lissome/P
+lissomness/M
+Lissy/M
+listed/U
+listener/M
+listen/ZGRD
+Listerine/M
+lister/M
+Lister/M
+listing/M
+list/JMRDNGZXS
+listlessness/SM
+listless/PY
+Liston/M
+Liszt/M
+Lita/M
+litany/MS
+litchi/SM
+literacy/MS
+literalism/M
+literalistic
+literalness/MS
+literal/PYS
+literariness/SM
+literary/P
+literate/YNSP
+literati
+literation/M
+literature/SM
+liter/M
+lite/S
+litheness/SM
+lithe/PRTY
+lithesome
+lithium/SM
+lithograph/DRMGZ
+lithographer/M
+lithographic
+lithographically
+lithographs
+lithography/MS
+lithology/M
+lithosphere/MS
+lithospheric
+Lithuania/M
+Lithuanian/S
+litigant/MS
+litigate/NGXDS
+litigation/M
+litigator/SM
+litigiousness/MS
+litigious/PY
+litmus/SM
+litotes/M
+lit/RZS
+littrateur/S
+litterbug/SM
+litter/SZGRDM
+Little/M
+littleneck/M
+littleness/SM
+little/RSPT
+Littleton/M
+Litton/M
+littoral/S
+liturgical/Y
+liturgic/S
+liturgics/M
+liturgist/MS
+liturgy/SM
+Liuka/M
+livability/MS
+livableness/M
+livable/U
+livably
+Liva/M
+lived/A
+livelihood/SM
+liveliness/SM
+livelong/S
+lively/RTP
+liveness/M
+liven/SDG
+liver/CSGD
+liveried
+liverish
+Livermore/M
+Liverpool/M
+Liverpudlian/MS
+liver's
+liverwort/SM
+liverwurst/SM
+livery/CMS
+liveryman/MC
+liverymen/C
+lives/A
+lives's
+livestock/SM
+live/YHZTGJDSRPB
+Livia/M
+lividness/M
+livid/YP
+livingness/M
+Livingstone/M
+Livingston/M
+living/YP
+Liv/M
+Livonia/M
+Livvie/M
+Livvy/M
+Livvyy/M
+Livy/M
+Lizabeth/M
+Liza/M
+lizard/MS
+Lizbeth/M
+Lizette/M
+Liz/M
+Lizzie/M
+Lizzy/M
+l/JGVXT
+Ljubljana/M
+LL
+llama/SM
+llano/SM
+LLB
+ll/C
+LLD
+Llewellyn/M
+Lloyd/M
+Llywellyn/M
+LNG
+lo
+loadable
+loaded/A
+loader/MU
+loading/MS
+load's/A
+loads/A
+loadstar's
+loadstone's
+load/SURDZG
+loafer/M
+Loafer/S
+loaf/SRDMGZ
+loam/SMDG
+loamy/RT
+loaner/M
+loaning/M
+loan/SGZRDMB
+loansharking/S
+loanword/S
+loathe
+loather/M
+loathing/M
+loath/JPSRDYZG
+loathness/M
+loathsomeness/MS
+loathsome/PY
+loaves/M
+Lobachevsky/M
+lobar
+lobbed
+lobber/MS
+lobbing
+lobby/GSDM
+lobbyist/MS
+lobe/SM
+lob/MDSG
+lobotomist
+lobotomize/GDS
+lobotomy/MS
+lobster/MDGS
+lobularity
+lobular/Y
+lobule/SM
+locale/MS
+localisms
+locality/MS
+localization/MS
+localized/U
+localizer/M
+localizes/U
+localize/ZGDRS
+local/SGDY
+locatable
+locate/AXESDGN
+locater/M
+locational/Y
+location/EMA
+locative/S
+locator's
+Lochinvar/M
+loch/M
+lochs
+loci/M
+lockable
+Lockean/M
+locked/A
+Locke/M
+locker/SM
+locket/SM
+Lockhart/M
+Lockheed/M
+Lockian/M
+locking/S
+lockjaw/SM
+Lock/M
+locknut/M
+lockout/MS
+lock's
+locksmithing/M
+locksmith/MG
+locksmiths
+lockstep/S
+lock/UGSD
+lockup/MS
+Lockwood/M
+locomotion/SM
+locomotive/YMS
+locomotor
+locomotory
+loco/SDMG
+locoweed/MS
+locus/M
+locust/SM
+locution/MS
+lode/SM
+lodestar/MS
+lodestone/MS
+lodged/E
+lodge/GMZSRDJ
+Lodge/M
+lodgepole
+lodger/M
+lodges/E
+lodging/M
+lodgment/M
+Lodovico/M
+Lodowick/M
+Lodz
+Loeb/M
+Loella/M
+Loewe/M
+Loewi/M
+lofter/M
+loftily
+loftiness/SM
+loft/SGMRD
+lofty/PTR
+loganberry/SM
+Logan/M
+logarithmic
+logarithmically
+logarithm/MS
+logbook/MS
+loge/SMNX
+logged/U
+loggerhead/SM
+logger/SM
+loggia/SM
+logging/MS
+logicality/MS
+logicalness/M
+logical/SPY
+logician/SM
+logic/SM
+login/S
+logion/M
+logistical/Y
+logistic/MS
+logjam/SM
+LOGO
+logo/SM
+logotype/MS
+logout
+logrolling/SM
+log's/K
+log/SM
+logy/RT
+Lohengrin/M
+loincloth/M
+loincloths
+loin/SM
+Loire/M
+Loise/M
+Lois/M
+loiterer/M
+loiter/RDJSZG
+Loki/M
+Lola/M
+Loleta/M
+Lolita/M
+loller/M
+lollipop/MS
+loll/RDGS
+Lolly/M
+lolly/SM
+Lombardi/M
+Lombard/M
+Lombardy/M
+Lomb/M
+Lome
+Lona/M
+Londonderry/M
+Londoner/M
+London/RMZ
+Lonee/M
+loneliness/SM
+lonely/TRP
+loneness/M
+lone/PYZR
+loner/M
+lonesomeness/MS
+lonesome/PSY
+longboat/MS
+longbow/SM
+longed/K
+longeing
+longer/K
+longevity/MS
+Longfellow/M
+longhair/SM
+longhand/SM
+longhorn/SM
+longing/MY
+longish
+longitude/MS
+longitudinal/Y
+long/JGTYRDPS
+Long/M
+longness/M
+longshoreman/M
+longshoremen
+longsighted
+longs/K
+longstanding
+Longstreet/M
+longsword
+longterm
+longtime
+Longueuil/M
+longueur/SM
+longways
+longword/SM
+Loni/M
+Lon/M
+Lonna/M
+Lonnard/M
+Lonnie/M
+Lonni/M
+Lonny/M
+loofah/M
+loofahs
+lookahead
+lookalike/S
+looker/M
+look/GZRDS
+lookout/MS
+lookup/SM
+looming/M
+Loomis/M
+loom/MDGS
+loon/MS
+loony/SRT
+looper/M
+loophole/MGSD
+loop/MRDGS
+loopy/TR
+loosed/U
+looseleaf
+loosener/M
+looseness/MS
+loosen/UDGS
+loose/SRDPGTY
+looses/U
+loosing/M
+looter/M
+loot/MRDGZS
+loper/M
+lope/S
+Lopez/M
+lopped
+lopper/MS
+lopping
+lop/SDRG
+lopsidedness/SM
+lopsided/YP
+loquaciousness/MS
+loquacious/YP
+loquacity/SM
+Loraine/M
+Lorain/M
+Loralee/M
+Loralie/M
+Loralyn/M
+Lora/M
+Lorant/M
+lording/M
+lordliness/SM
+lordly/PTR
+Lord/MS
+lord/MYDGS
+lordship/SM
+Lordship/SM
+Loree/M
+Loreen/M
+Lorelei/M
+Lorelle/M
+lore/MS
+Lorena/M
+Lorene/M
+Loren/SM
+Lorentzian/M
+Lorentz/M
+Lorenza/M
+Lorenz/M
+Lorenzo/M
+Loretta/M
+Lorette/M
+lorgnette/SM
+Loria/M
+Lorianna/M
+Lorianne/M
+Lorie/M
+Lorilee/M
+Lorilyn/M
+Lori/M
+Lorinda/M
+Lorine/M
+Lorin/M
+loris/SM
+Lorita/M
+lorn
+Lorna/M
+Lorne/M
+Lorraine/M
+Lorrayne/M
+Lorre/M
+Lorrie/M
+Lorri/M
+Lorrin/M
+lorryload/S
+Lorry/M
+lorry/SM
+Lory/M
+Los
+loser/M
+lose/ZGJBSR
+lossage
+lossless
+loss/SM
+lossy/RT
+lost/P
+Lothaire/M
+Lothario/MS
+lotion/MS
+Lot/M
+lot/MS
+Lotta/M
+lotted
+Lotte/M
+lotter
+lottery/MS
+Lottie/M
+Lotti/M
+lotting
+Lott/M
+lotto/MS
+Lotty/M
+lotus/SM
+louden/DG
+loudhailer/S
+loudly/RT
+loudmouth/DM
+loudmouths
+loudness/MS
+loudspeaker/SM
+loudspeaking
+loud/YRNPT
+Louella/M
+Louie/M
+Louisa/M
+Louise/M
+Louisette/M
+Louisiana/M
+Louisianan/S
+Louisianian/S
+Louis/M
+Louisville/M
+Lou/M
+lounger/M
+lounge/SRDZG
+Lourdes/M
+lour/GSD
+louse/CSDG
+louse's
+lousewort/M
+lousily
+lousiness/MS
+lousy/PRT
+loutishness/M
+loutish/YP
+Loutitia/M
+lout/SGMD
+louver/DMS
+L'Ouverture
+Louvre/M
+lovableness/MS
+lovable/U
+lovably
+lovebird/SM
+lovechild
+Lovecraft/M
+love/DSRMYZGJB
+loved/U
+Lovejoy/M
+Lovelace/M
+Loveland/M
+lovelessness/M
+loveless/YP
+lovelies
+lovelinesses
+loveliness/UM
+Lovell/M
+lovelornness/M
+lovelorn/P
+lovely/URPT
+Love/M
+lovemaking/SM
+lover/YMG
+lovesick
+lovestruck
+lovingly
+lovingness/M
+loving/U
+lowborn
+lowboy/SM
+lowbrow/MS
+lowdown/S
+Lowell/M
+Lowe/M
+lowercase/GSD
+lower/DG
+lowermost
+Lowery/M
+lowish
+lowland/RMZS
+Lowlands/M
+lowlife/SM
+lowlight/MS
+lowliness/MS
+lowly/PTR
+lowness/MS
+low/PDRYSZTG
+Lowrance/M
+lox/MDSG
+loyaler
+loyalest
+loyal/EY
+loyalism/SM
+loyalist/SM
+loyalty/EMS
+Loyang/M
+Loydie/M
+Loyd/M
+Loy/M
+Loyola/M
+lozenge/SDM
+LP
+LPG
+LPN/S
+Lr
+ls
+l's
+L's
+LSD
+ltd
+Ltd/M
+Lt/M
+Luanda/M
+Luann/M
+luau/MS
+lubber/YMS
+Lubbock/M
+lube/DSMG
+lubricant/SM
+lubricate/VNGSDX
+lubrication/M
+lubricator/MS
+lubricious/Y
+lubricity/SM
+Lubumbashi/M
+Lucais/M
+Luca/MS
+Luce/M
+lucent/Y
+Lucerne/M
+Lucho/M
+Lucia/MS
+Luciana/M
+Lucian/M
+Luciano/M
+lucidity/MS
+lucidness/MS
+lucid/YP
+Lucie/M
+Lucien/M
+Lucienne/M
+Lucifer/M
+Lucila/M
+Lucile/M
+Lucilia/M
+Lucille/M
+Luci/MN
+Lucina/M
+Lucinda/M
+Lucine/M
+Lucio/M
+Lucita/M
+Lucite/MS
+Lucius/M
+luck/GSDM
+luckier/U
+luckily/U
+luckiness/UMS
+luckless
+Lucknow/M
+Lucky/M
+lucky/RSPT
+lucrativeness/SM
+lucrative/YP
+lucre/MS
+Lucretia/M
+Lucretius/M
+lucubrate/GNSDX
+lucubration/M
+Lucy/M
+Luddite/SM
+Ludhiana/M
+ludicrousness/SM
+ludicrous/PY
+Ludlow/M
+Ludmilla/M
+ludo/M
+Ludovico/M
+Ludovika/M
+Ludvig/M
+Ludwig/M
+Luella/M
+Luelle/M
+luff/GSDM
+Lufthansa/M
+Luftwaffe/M
+luge/MC
+Luger/M
+luggage/SM
+lugged
+lugger/SM
+lugging
+Lugosi/M
+lug/RS
+lugsail/SM
+lugubriousness/MS
+lugubrious/YP
+Luigi/M
+Luisa/M
+Luise/M
+Luis/M
+Lukas/M
+Luke/M
+lukewarmness/SM
+lukewarm/PY
+Lula/M
+Lulita/M
+lullaby/GMSD
+lull/SDG
+lulu/M
+Lulu/M
+Lu/M
+lumbago/SM
+lumbar/S
+lumberer/M
+lumbering/M
+lumberjack/MS
+lumberman/M
+lumbermen
+lumber/RDMGZSJ
+lumberyard/MS
+lumen/M
+Lumire/M
+luminance/M
+luminary/MS
+luminescence/SM
+luminescent
+luminosity/MS
+luminousness/M
+luminous/YP
+lummox/MS
+lumper/M
+lumpiness/MS
+lumpishness/M
+lumpish/YP
+lump/SGMRDN
+lumpy/TPR
+lunacy/MS
+Luna/M
+lunar/S
+lunary
+lunate/YND
+lunatic/S
+lunation/M
+luncheonette/SM
+luncheon/SMDG
+luncher/M
+lunch/GMRSD
+lunchpack
+lunchroom/MS
+lunchtime/MS
+Lundberg/M
+Lund/M
+Lundquist/M
+lune/M
+lunge/MS
+lunger/M
+lungfish/SM
+lungful
+lung/SGRDM
+lunkhead/SM
+Lupe/M
+lupine/SM
+Lupus/M
+lupus/SM
+Lura/M
+lurcher/M
+lurch/RSDG
+lure/DSRG
+lurer/M
+Lurette/M
+lurex
+Luria/M
+luridness/SM
+lurid/YP
+lurker/M
+lurk/GZSRD
+Lurleen/M
+Lurlene/M
+Lurline/M
+Lusaka/M
+Lusa/M
+lusciousness/MS
+luscious/PY
+lushness/MS
+lush/YSRDGTP
+Lusitania/M
+luster/GDM
+lustering/M
+lusterless
+lustfulness/M
+lustful/PY
+lustily
+lustiness/MS
+lust/MRDGZS
+lustrousness/M
+lustrous/PY
+lusty/PRT
+lutanist/MS
+lute/DSMG
+lutenist/MS
+Lutero/M
+lutetium/MS
+Lutheranism/MS
+Lutheran/SM
+Luther/M
+luting/M
+Lutz
+Luxembourgian
+Luxembourg/RMZ
+Luxemburg's
+luxe/MS
+luxuriance/MS
+luxuriant/Y
+luxuriate/GNSDX
+luxuriation/M
+luxuriousness/SM
+luxurious/PY
+luxury/MS
+Luz/M
+Luzon/M
+L'vov
+Lyallpur/M
+lyceum/MS
+lychee's
+lycopodium/M
+Lycra/S
+Lycurgus/M
+Lyda/M
+Lydia/M
+Lydian/S
+Lydie/M
+Lydon/M
+lye/JSMG
+Lyell/M
+lying/Y
+Lyle/M
+Lyly/M
+Lyman/M
+Lyme/M
+lymphatic/S
+lymph/M
+lymphocyte/SM
+lymphoid
+lymphoma/MS
+lymphs
+Ly/MY
+Lynchburg/M
+lyncher/M
+lynching/M
+Lynch/M
+lynch/ZGRSDJ
+Lynda/M
+Lyndell/M
+Lyndel/M
+Lynde/M
+Lyndon/M
+Lyndsay/M
+Lyndsey/M
+Lyndsie/M
+Lyndy/M
+Lynea/M
+Lynelle/M
+Lynette/M
+Lynett/M
+Lyn/M
+Lynna/M
+Lynnea/M
+Lynnelle/M
+Lynnell/M
+Lynne/M
+Lynnet/M
+Lynnette/M
+Lynnett/M
+Lynn/M
+Lynsey/M
+lynx/MS
+Lyon/SM
+Lyra/M
+lyrebird/MS
+lyre/SM
+lyricalness/M
+lyrical/YP
+lyricism/SM
+lyricist/SM
+lyric/S
+Lysenko/M
+lysine/M
+Lysistrata/M
+Lysol/M
+Lyssa/M
+LyX/M
+MA
+Maalox/M
+ma'am
+Mabelle/M
+Mabel/M
+Mable/M
+Mab/M
+macabre/Y
+macadamize/SDG
+macadam/SM
+Macao/M
+macaque/SM
+macaroni/SM
+macaroon/MS
+Macarthur/M
+MacArthur/M
+Macaulay/M
+macaw/SM
+Macbeth/M
+Maccabees/M
+Maccabeus/M
+Macdonald/M
+MacDonald/M
+MacDraw/M
+Macedonia/M
+Macedonian/S
+Macedon/M
+mace/MS
+Mace/MS
+macerate/DSXNG
+maceration/M
+macer/M
+Macgregor/M
+MacGregor/M
+machete/SM
+Machiavellian/S
+Machiavelli/M
+machinate/SDXNG
+machination/M
+machinelike
+machine/MGSDB
+machinery/SM
+machinist/MS
+machismo/SM
+Mach/M
+macho/S
+Machs
+Macias/M
+Macintosh/M
+MacIntosh/M
+macintosh's
+Mackenzie/M
+MacKenzie/M
+mackerel/SM
+Mackinac/M
+Mackinaw
+mackinaw/SM
+mackintosh/SM
+mack/M
+Mack/M
+MacLeish/M
+Macmillan/M
+MacMillan/M
+Macon/SM
+MacPaint/M
+macram/S
+macrobiotic/S
+macrobiotics/M
+macrocosm/MS
+macrodynamic
+macroeconomic/S
+macroeconomics/M
+macromolecular
+macromolecule/SM
+macron/MS
+macrophage/SM
+macroscopic
+macroscopically
+macrosimulation
+macro/SM
+macrosocioeconomic
+Mac/SGMD
+mac/SGMDR
+Macy/M
+Madagascan/SM
+Madagascar/M
+Madalena/M
+Madalyn/M
+Mada/M
+madame/M
+Madame/MS
+madam/SM
+madcap/S
+Maddalena/M
+madded
+madden/GSD
+maddening/Y
+Madden/M
+madder/MS
+maddest
+Maddie/M
+Maddi/M
+madding
+Maddox/M
+Maddy/M
+made/AU
+Madeira/SM
+Madelaine/M
+Madeleine/M
+Madelena/M
+Madelene/M
+Madelina/M
+Madeline/M
+Madelin/M
+Madella/M
+Madelle/M
+Madel/M
+Madelon/M
+Madelyn/M
+mademoiselle/MS
+Madge/M
+madhouse/SM
+Madhya/M
+Madison/M
+Madlen/M
+Madlin/M
+madman/M
+madmen
+madness/SM
+Madonna/MS
+mad/PSY
+Madras
+madras/SM
+Madrid/M
+madrigal/MSG
+Madsen/M
+Madurai/M
+madwoman/M
+madwomen
+Mady/M
+Maegan/M
+Maelstrom/M
+maelstrom/SM
+Mae/M
+maestro/MS
+Maeterlinck/M
+Mafia/MS
+mafia/S
+mafiosi
+mafioso/M
+Mafioso/S
+MAG
+magazine/DSMG
+Magdaia/M
+Magdalena/M
+Magdalene/M
+Magdalen/M
+Magda/M
+Magellanic
+Magellan/M
+magenta/MS
+magged
+Maggee/M
+Maggie/M
+Maggi/M
+magging
+maggot/MS
+maggoty/RT
+Maggy/M
+magi
+magical/Y
+magician/MS
+magicked
+magicking
+magic/SM
+Magill/M
+Magi/M
+Maginot/M
+magisterial/Y
+magistracy/MS
+magistrate/MS
+Mag/M
+magma/SM
+magnanimity/SM
+magnanimosity
+magnanimous/PY
+magnate/SM
+magnesia/MS
+magnesite/M
+magnesium/SM
+magnetically
+magnetic/S
+magnetics/M
+magnetism/SM
+magnetite/SM
+magnetizable
+magnetization/ASCM
+magnetize/CGDS
+magnetized/U
+magnetodynamics
+magnetohydrodynamical
+magnetohydrodynamics/M
+magnetometer/MS
+magneto/MS
+magnetosphere/M
+magnetron/M
+magnet/SM
+magnification/M
+magnificence/SM
+magnificent/Y
+magnified/U
+magnify/DRSGNXZ
+magniloquence/MS
+magniloquent
+Magnitogorsk/M
+magnitude/SM
+magnolia/SM
+Magnum
+magnum/SM
+Magnuson/M
+Magog/M
+Magoo/M
+magpie/SM
+Magritte/M
+Magruder/M
+mag/S
+Magsaysay/M
+Maguire/SM
+Magus/M
+Magyar/MS
+Mahabharata
+Mahala/M
+Mahalia/M
+maharajah/M
+maharajahs
+maharanee's
+maharani/MS
+Maharashtra/M
+maharishi/SM
+mahatma/SM
+Mahavira/M
+Mahayana/M
+Mahayanist
+Mahdi/M
+Mahfouz/M
+Mahican/SM
+mahjong's
+Mahler/M
+Mahmoud/M
+Mahmud/M
+mahogany/MS
+Mahomet's
+mahout/SM
+Maia/M
+Maible/M
+maidenhair/MS
+maidenhead/SM
+maidenhood/SM
+maidenly/P
+maiden/YM
+maidservant/MS
+maid/SMNX
+maier
+Maier/M
+Maiga/M
+Maighdiln/M
+Maigret/M
+mailbag/MS
+mailbox/MS
+mail/BSJGZMRD
+mailer/M
+Mailer/M
+Maillol/M
+maillot/SM
+mailman/M
+mailmen
+Maiman/M
+maimedness/M
+maimed/P
+maimer/M
+Maimonides/M
+Mai/MR
+maim/SGZRD
+mainbrace/M
+Maine/MZR
+Mainer/M
+mainframe/MS
+mainlander/M
+mainland/SRMZ
+mainliner/M
+mainline/RSDZG
+mainly
+mainmast/SM
+main/SA
+mainsail/SM
+mains/M
+mainspring/SM
+mainstay/MS
+mainstream/DRMSG
+maintainability
+maintainable/U
+maintain/BRDZGS
+maintained/U
+maintainer/M
+maintenance/SM
+maintop/SM
+maiolica's
+Maire/M
+Mair/M
+Maisey/M
+Maisie/M
+maisonette/MS
+Maison/M
+Maitilde/M
+maize/MS
+Maj
+Maje/M
+majestic
+majestically
+majesty/MS
+Majesty/MS
+majolica/SM
+Majorca/M
+major/DMGS
+majordomo/S
+majorette/SM
+majority/SM
+Major/M
+Majuro/M
+makable
+Makarios/M
+makefile/S
+makeover/S
+Maker/M
+maker/SM
+makeshift/S
+make/UGSA
+makeup/MS
+making/SM
+Malabar/M
+Malabo/M
+Malacca/M
+Malachi/M
+malachite/SM
+maladapt/DV
+maladjust/DLV
+maladjustment/MS
+maladministration
+maladroitness/MS
+maladroit/YP
+malady/MS
+Malagasy/M
+malaise/SM
+Mala/M
+Malamud/M
+malamute/SM
+Malanie/M
+malaprop
+malapropism/SM
+Malaprop/M
+malarial
+malaria/MS
+malarious
+malarkey/SM
+malathion/S
+Malawian/S
+Malawi/M
+Malayalam/M
+Malaya/M
+Malayan/MS
+Malaysia/M
+Malaysian/S
+Malay/SM
+Malchy/M
+Malcolm/M
+malcontentedness/M
+malcontented/PY
+malcontent/SMD
+Maldive/SM
+Maldivian/S
+Maldonado/M
+maledict
+malediction/MS
+malefaction/MS
+malefactor/MS
+malefic
+maleficence/MS
+maleficent
+Male/M
+Malena/M
+maleness/MS
+male/PSM
+malevolence/S
+malevolencies
+malevolent/Y
+malfeasance/SM
+malfeasant
+malformation/MS
+malformed
+malfunction/SDG
+Malia/M
+Malian/S
+Malibu/M
+malice/MGSD
+maliciousness/MS
+malicious/YU
+malignancy/SM
+malignant/YS
+malign/GSRDYZ
+malignity/MS
+Mali/M
+Malina/M
+Malinda/M
+Malinde/M
+malingerer/M
+malinger/GZRDS
+Malinowski/M
+Malissa/M
+Malissia/M
+mallard/SM
+Mallarm/M
+malleability/SM
+malleableness/M
+malleable/P
+mallet/MS
+Mallissa/M
+Mallorie/M
+Mallory/M
+mallow/MS
+mall/SGMD
+Mal/M
+malnourished
+malnutrition/SM
+malocclusion/MS
+malodorous
+Malone/M
+Malorie/M
+Malory/M
+malposed
+malpractice/SM
+Malraux/M
+Malta/M
+malted/S
+Maltese
+Malthusian/S
+Malthus/M
+malting/M
+maltose/SM
+maltreat/GDSL
+maltreatment/S
+malt/SGMD
+malty/RT
+Malva/M
+Malvina/M
+Malvin/M
+Malynda/M
+mama/SM
+mamba/SM
+mambo/GSDM
+Mame/M
+Mamet/M
+ma/MH
+Mamie/M
+mammalian/SM
+mammal/SM
+mammary
+mamma's
+mammogram/S
+mammography/S
+Mammon's
+mammon/SM
+mammoth/M
+mammoths
+mammy/SM
+Mamore/M
+manacle/SDMG
+manageability/S
+manageableness
+manageable/U
+managed/U
+management/SM
+manageress/M
+managerial/Y
+manager/M
+managership/M
+manage/ZLGRSD
+Managua/M
+Manama/M
+maana/M
+mananas
+Manasseh/M
+manatee/SM
+Manaus's
+Manchester/M
+Manchu/MS
+Manchuria/M
+Manchurian/S
+Mancini/M
+manciple/M
+Mancunian/MS
+mandala/SM
+Mandalay/M
+Manda/M
+mandamus/GMSD
+Mandarin
+mandarin/MS
+mandate/SDMG
+mandatory/S
+Mandela
+Mandelbrot/M
+Mandel/M
+mandible/MS
+mandibular
+Mandie/M
+Mandi/M
+Mandingo/M
+mandolin/MS
+mandrake/MS
+mandrel/SM
+mandrill/SM
+Mandy/M
+mange/GSD
+mane/MDS
+Manet/M
+maneuverability/MS
+maneuverer/M
+maneuver/MRDSGB
+Manfred/M
+manful/Y
+manganese/MS
+mange/GMSRDZ
+manger/M
+manginess/S
+mangler/M
+mangle/RSDG
+mangoes
+mango/M
+mangrove/MS
+mangy/PRT
+manhandle/GSD
+Manhattan/SM
+manhole/MS
+manhood/MS
+manhunt/SM
+maniacal/Y
+maniac/SM
+mania/SM
+manically
+Manichean/M
+manic/S
+manicure/MGSD
+manicurist/SM
+manifestation/SM
+manifesto/GSDM
+manifest/YDPGS
+manifolder/M
+manifold/GPYRDMS
+manifoldness/M
+manikin/MS
+Manila/MS
+manila/S
+manilla's
+Mani/M
+manioc/SM
+manipulability
+manipulable
+manipulate/SDXBVGN
+manipulative/PM
+manipulator/MS
+manipulatory
+Manitoba/M
+Manitoulin/M
+Manitowoc/M
+mankind/M
+Mankowski/M
+Manley/M
+manlike
+manliness/SM
+manliness's/U
+manly/URPT
+manna/MS
+manned/U
+mannequin/MS
+mannered/U
+mannerism/SM
+mannerist/M
+mannerliness/MU
+mannerly/UP
+manner/SDYM
+Mann/GM
+Mannheim/M
+Mannie/M
+mannikin's
+Manning/M
+manning/U
+mannishness/SM
+mannish/YP
+Manny/M
+Manolo/M
+Mano/M
+manometer/SM
+Manon/M
+manorial
+manor/MS
+manpower/SM
+manqu/M
+man's
+mansard/SM
+manservant/M
+manse/XNM
+Mansfield/M
+mansion/M
+manslaughter/SM
+Man/SM
+Manson/M
+mans/S
+manta/MS
+Mantegna/M
+mantelpiece/MS
+mantel/SM
+mantes
+mantilla/MS
+mantissa/SM
+mantis/SM
+mantle/ESDG
+Mantle/M
+mantle's
+mantling/M
+mantra/MS
+mantrap/SM
+manual/SMY
+Manuela/M
+Manuel/M
+manufacture/JZGDSR
+manufacturer/M
+manumission/MS
+manumit/S
+manumitted
+manumitting
+manure/RSDMZG
+manuscript/MS
+man/USY
+Manville/M
+Manx
+many
+Manya/M
+Maoism/MS
+Maoist/S
+Mao/M
+Maori/SM
+Maplecrest/M
+maple/MS
+mapmaker/S
+mappable
+mapped/UA
+mapper/S
+mapping/MS
+Mapplethorpe/M
+maps/AU
+map/SM
+Maputo/M
+Marabel/M
+marabou/MS
+marabout's
+Maracaibo/M
+maraca/MS
+Mara/M
+maraschino/SM
+Marathi
+marathoner/M
+Marathon/M
+marathon/MRSZ
+Marat/M
+marauder/M
+maraud/ZGRDS
+marbleize/GSD
+marble/JRSDMG
+marbler/M
+marbling/M
+Marceau/M
+Marcela/M
+Marcelia/M
+Marcelino/M
+Marcella/M
+Marcelle/M
+Marcellina/M
+Marcelline/M
+Marcello/M
+Marcellus/M
+Marcel/M
+Marcelo/M
+Marchall/M
+Marchelle/M
+marcher/M
+marchioness/SM
+March/MS
+march/RSDZG
+Marcia/M
+Marciano/M
+Marcie/M
+Marcile/M
+Marcille/M
+Marci/M
+Marc/M
+Marconi/M
+Marco/SM
+Marcotte/M
+Marcus/M
+Marcy/M
+Mardi/SM
+Marduk/M
+Mareah/M
+mare/MS
+Marena/M
+Maren/M
+Maressa/M
+Margalit/M
+Margalo/M
+Marga/M
+Margareta/M
+Margarete/M
+Margaretha/M
+Margarethe/M
+Margaret/M
+Margaretta/M
+Margarette/M
+margarine/MS
+Margarita/M
+margarita/SM
+Margarito/M
+Margaux/M
+Margeaux/M
+Marge/M
+Margery/M
+Marget/M
+Margette/M
+Margie/M
+Margi/M
+marginalia
+marginality
+marginalization
+marginalize/SDG
+marginal/YS
+margin/GSDM
+Margit/M
+Margo/M
+Margot/M
+Margrethe/M
+Margret/M
+Marguerite/M
+Margy/M
+mariachi/SM
+maria/M
+Maria/M
+Mariam/M
+Mariana/SM
+Marian/MS
+Marianna/M
+Marianne/M
+Mariann/M
+Mariano/M
+Maribelle/M
+Maribel/M
+Maribeth/M
+Maricela/M
+Marice/M
+Maridel/M
+Marieann/M
+Mariejeanne/M
+Mariele/M
+Marielle/M
+Mariellen/M
+Mariel/M
+Marie/M
+Marietta/M
+Mariette/M
+Marigold/M
+marigold/MS
+Marijn/M
+Marijo/M
+marijuana/SM
+Marika/M
+Marilee/M
+Marilin/M
+Marillin/M
+Marilyn/M
+marimba/SM
+Mari/MS
+marinade/MGDS
+Marina/M
+marina/MS
+marinara/SM
+marinate/NGXDS
+marination/M
+mariner/M
+Marine/S
+marine/ZRS
+Marin/M
+Marinna/M
+Marino/M
+Mario/M
+marionette/MS
+Marion/M
+Mariquilla/M
+Marisa/M
+Mariska/M
+Marisol/M
+Marissa/M
+Maritain/M
+marital/Y
+Marita/M
+maritime/R
+Maritsa/M
+Maritza/M
+Mariupol/M
+Marius/M
+Mariya/M
+Marja/M
+Marje/M
+Marjie/M
+Marji/M
+Marj/M
+marjoram/SM
+Marjorie/M
+Marjory/M
+Marjy/M
+Markab/M
+markdown/SM
+marked/AU
+markedly
+marker/M
+marketability/SM
+marketable/U
+Marketa/M
+marketeer/S
+marketer/M
+market/GSMRDJBZ
+marketing/M
+marketplace/MS
+mark/GZRDMBSJ
+Markham/M
+marking/M
+Markism/M
+markkaa
+markka/M
+Mark/MS
+Markos
+Markov
+Markovian
+Markovitz/M
+marks/A
+marksman/M
+marksmanship/S
+marksmen
+markup/SM
+Markus/M
+Marla/M
+Marlane/M
+Marlboro/M
+Marlborough/M
+Marleah/M
+Marlee/M
+Marleen/M
+Marlena/M
+Marlene/M
+Marley/M
+Marlie/M
+Marline/M
+marlinespike/SM
+Marlin/M
+marlin/SM
+marl/MDSG
+Marlo/M
+Marlon/M
+Marlowe/M
+Marlow/M
+Marlyn/M
+Marmaduke/M
+marmalade/MS
+Marmara/M
+marmoreal
+marmoset/MS
+marmot/SM
+Marna/M
+Marne/M
+Marney/M
+Marnia/M
+Marnie/M
+Marni/M
+maroon/GRDS
+marquee/MS
+Marquesas/M
+marque/SM
+marquess/MS
+marquetry/SM
+Marquette/M
+Marquez/M
+marquise/M
+marquisette/MS
+Marquis/M
+marquis/SM
+Marquita/M
+Marrakesh/M
+marred/U
+marriageability/SM
+marriageable
+marriage/ASM
+married/US
+Marrilee/M
+marring
+Marriott/M
+Marris/M
+Marrissa/M
+marrowbone/MS
+marrow/GDMS
+marry/SDGA
+mar/S
+Marseillaise/SM
+Marseilles
+Marseille's
+marshal/GMDRSZ
+Marshalled/M
+marshaller
+Marshall/GDM
+Marshalling/M
+marshallings
+Marshal/M
+Marsha/M
+marshiness/M
+marshland/MS
+Marsh/M
+marshmallow/SM
+marsh/MS
+marshy/PRT
+Marsiella/M
+Mar/SMN
+marsupial/MS
+Martainn/M
+Marta/M
+Martelle/M
+Martel/M
+marten/M
+Marten/M
+Martguerita/M
+Martha/M
+Marthe/M
+Marthena/M
+Martial
+martial/Y
+Martian/S
+Martica/M
+Martie/M
+Marti/M
+Martina/M
+martinet/SM
+Martinez/M
+martingale/MS
+martini/MS
+Martinique/M
+Martin/M
+Martino/M
+martin/SM
+Martinson/M
+Martita/M
+mart/MDNGXS
+Mart/MN
+Marty/M
+Martyn/M
+Martynne/M
+martyrdom/SM
+martyr/GDMS
+Marva/M
+marvel/DGS
+Marvell/M
+marvelous/PY
+Marve/M
+Marven/M
+Marvin/M
+Marv/NM
+Marwin/M
+Marxian/S
+Marxism/SM
+Marxist/SM
+Marx/M
+Marya/M
+Maryanna/M
+Maryanne/M
+Maryann/M
+Marybelle/M
+Marybeth/M
+Maryellen/M
+Maryjane/M
+Maryjo/M
+Maryland/MZR
+Marylee/M
+Marylinda/M
+Marylin/M
+Maryl/M
+Marylou/M
+Marylynne/M
+Mary/M
+Maryrose/M
+Marys
+Marysa/M
+marzipan/SM
+Masada/M
+Masai/M
+Masaryk/M
+masc
+Mascagni/M
+mascara/SGMD
+mascot/SM
+masculineness/M
+masculine/PYS
+masculinity/SM
+Masefield/M
+maser/M
+Maseru/M
+MASH
+Masha/M
+Mashhad/M
+mash/JGZMSRD
+m/ASK
+masked/U
+masker/M
+mask/GZSRDMJ
+masks/U
+masochism/MS
+masochistic
+masochistically
+masochist/MS
+masonic
+Masonic
+Masonite/M
+masonry/MS
+mason/SDMG
+Mason/SM
+masquerader/M
+masquerade/RSDGMZ
+masquer/M
+masque/RSMZ
+Massachusetts/M
+massacre/DRSMG
+massager/M
+massage/SRDMG
+Massasoit/M
+Massenet/M
+masseur/MS
+masseuse/SM
+Massey/M
+massif/SM
+Massimiliano/M
+Massimo/M
+massing/R
+massiveness/SM
+massive/YP
+massless
+mas/SRZ
+Mass/S
+mass/VGSD
+mastectomy/MS
+masterclass
+mastered/A
+masterfulness/M
+masterful/YP
+master/JGDYM
+masterliness/M
+masterly/P
+mastermind/GDS
+masterpiece/MS
+mastership/M
+Master/SM
+masterstroke/MS
+masterwork/S
+mastery/MS
+mast/GZSMRD
+masthead/SDMG
+masticate/SDXGN
+mastication/M
+mastic/SM
+mastiff/MS
+mastodon/MS
+mastoid/S
+masturbate/SDNGX
+masturbation/M
+masturbatory
+matador/SM
+Mata/M
+matchable/U
+match/BMRSDZGJ
+matchbook/SM
+matchbox/SM
+matched/UA
+matcher/M
+matches/A
+matchless/Y
+matchlock/MS
+matchmake/GZJR
+matchmaker/M
+matchmaking/M
+matchplay
+match's/A
+matchstick/MS
+matchwood/SM
+mated/U
+mate/IMS
+Matelda/M
+Mateo/M
+materialism/SM
+materialistic
+materialistically
+materialist/SM
+materiality/M
+materialization/SM
+materialize/CDS
+materialized/A
+materializer/SM
+materializes/A
+materializing
+materialness/M
+material/SPYM
+matriel/MS
+mater/M
+maternal/Y
+maternity/MS
+mates/U
+mathematical/Y
+Mathematica/M
+mathematician/SM
+mathematic/S
+mathematics/M
+Mathematik/M
+Mather/M
+Mathe/RM
+Mathew/MS
+Mathewson/M
+Mathian/M
+Mathias
+Mathieu/M
+Mathilda/M
+Mathilde/M
+Mathis
+math/M
+maths
+Matias/M
+Matilda/M
+Matilde/M
+matine/S
+mating/M
+matins/M
+Matisse/SM
+matriarchal
+matriarch/M
+matriarchs
+matriarchy/MS
+matrices
+matricidal
+matricide/MS
+matriculate/XSDGN
+matriculation/M
+matrimonial/Y
+matrimony/SM
+matrix/M
+matron/YMS
+mat/SJGMDR
+Matsumoto/M
+matte/JGMZSRD
+Mattel/M
+Matteo/M
+matter/GDM
+Matterhorn/M
+Matthaeus/M
+Mattheus/M
+Matthew/MS
+Matthias
+Matthieu/M
+Matthiew/M
+Matthus/M
+Mattias/M
+Mattie/M
+Matti/M
+matting/M
+mattins's
+Matt/M
+mattock/MS
+mattress/MS
+matt's
+Matty/M
+maturate/DSNGVX
+maturational
+maturation/M
+matureness/M
+maturer/M
+mature/RSDTPYG
+maturity/MS
+matzo/SHM
+matzot
+Maude/M
+Maudie/M
+maudlin/Y
+Maud/M
+Maugham/M
+Maui/M
+mauler/M
+maul/RDGZS
+maunder/GDS
+Maupassant/M
+Maura/M
+Maureene/M
+Maureen/M
+Maure/M
+Maurene/M
+Mauriac/M
+Maurice/M
+Mauricio/M
+Maurie/M
+Maurine/M
+Maurise/M
+Maurita/M
+Mauritania/M
+Mauritanian/S
+Mauritian/S
+Mauritius/M
+Maurits/M
+Maurizia/M
+Maurizio/M
+Maurois/M
+Mauro/M
+Maury/M
+Mauser/M
+mausoleum/SM
+mauve/SM
+maven/S
+maverick/SMDG
+mavin's
+Mavis/M
+Mavra/M
+mawkishness/SM
+mawkish/PY
+Mawr/M
+maw/SGMD
+max/GDS
+Maxie/M
+maxillae
+maxilla/M
+maxillary/S
+Maxi/M
+maximality
+maximal/SY
+maxima's
+Maximilian/M
+Maximilianus/M
+Maximilien/M
+maximization/SM
+maximizer/M
+maximize/RSDZG
+Maxim/M
+Maximo/M
+maxim/SM
+maximum/MYS
+Maxine/M
+maxi/S
+Max/M
+Maxtor/M
+Maxwellian
+maxwell/M
+Maxwell/M
+Maxy/M
+Maya/MS
+Mayan/S
+Maybelle/M
+maybe/S
+mayday/S
+may/EGS
+Maye/M
+mayer
+Mayer/M
+mayest
+Mayfair/M
+Mayflower/M
+mayflower/SM
+mayfly/MS
+mayhap
+mayhem/MS
+Maynard/M
+Mayne/M
+Maynord/M
+mayn't
+Mayo/M
+mayonnaise/MS
+mayoral
+mayoralty/MS
+mayoress/MS
+Mayor/M
+mayor/MS
+mayorship/M
+mayo/S
+maypole/MS
+Maypole/SM
+Mayra/M
+May/SMR
+mayst
+Mazama/M
+Mazarin/M
+Mazatlan/M
+Mazda/M
+mazedness/SM
+mazed/YP
+maze/MGDSR
+mazurka/SM
+Mazzini/M
+Mb
+MB
+MBA
+Mbabane/M
+Mbini/M
+MC
+McAdam/MS
+McAllister/M
+McBride/M
+McCabe/M
+McCain/M
+McCall/M
+McCarthyism/M
+McCarthy/M
+McCartney/M
+McCarty/M
+McCauley/M
+McClain/M
+McClellan/M
+McClure/M
+McCluskey/M
+McConnell/M
+McCormick/M
+McCoy/SM
+McCracken/M
+McCray/M
+McCullough/M
+McDaniel/M
+McDermott/M
+McDonald/M
+McDonnell/M
+McDougall/M
+McDowell/M
+McElhaney/M
+McEnroe/M
+McFadden/M
+McFarland/M
+McGee/M
+McGill/M
+McGovern/M
+McGowan/M
+McGrath/M
+McGraw/M
+McGregor/M
+McGuffey/M
+McGuire/M
+MCI/M
+McIntosh/M
+McIntyre/M
+McKay/M
+McKee/M
+McKenzie/M
+McKesson/M
+McKinley/M
+McKinney/M
+McKnight/M
+McLanahan/M
+McLaughlin/M
+McLean/M
+McLeod/M
+McLuhan/M
+McMahon/M
+McMartin/M
+McMillan/M
+McNamara/M
+McNaughton/M
+McNeil/M
+McPherson/M
+MD
+Md/M
+mdse
+MDT
+ME
+Meade/M
+Mead/M
+meadowland
+meadowlark/SM
+meadow/MS
+Meadows
+meadowsweet/M
+mead/SM
+Meagan/M
+meagerness/SM
+meager/PY
+Meaghan/M
+meagres
+mealiness/MS
+meal/MDGS
+mealtime/MS
+mealybug/S
+mealymouthed
+mealy/PRST
+meander/JDSG
+meaneing
+meanie/MS
+meaningfulness/SM
+meaningful/YP
+meaninglessness/SM
+meaningless/PY
+meaning/M
+meanness/S
+means/M
+meantime/SM
+meant/U
+meanwhile/S
+Meany/M
+mean/YRGJTPS
+meany's
+Meara/M
+measle/SD
+measles/M
+measly/TR
+measurable/U
+measurably
+measure/BLMGRSD
+measured/Y
+measureless
+measurement/SM
+measurer/M
+measures/A
+measuring/A
+meas/Y
+meataxe
+meatball/MS
+meatiness/MS
+meatless
+meatloaf
+meatloaves
+meat/MS
+meatpacking/S
+meaty/RPT
+Mecca/MS
+mecca/S
+mechanical/YS
+mechanic/MS
+mechanism/SM
+mechanistic
+mechanistically
+mechanist/M
+mechanization/SM
+mechanized/U
+mechanizer/M
+mechanize/RSDZGB
+mechanizes/U
+mechanochemically
+Mechelle/M
+med
+medalist/MS
+medallion/MS
+medal/SGMD
+Medan/M
+meddle/GRSDZ
+meddlesome
+Medea/M
+Medellin
+Medfield/M
+mediaeval's
+medial/AY
+medials
+median/YMS
+media/SM
+mediateness/M
+mediate/PSDYVNGX
+mediation/ASM
+mediator/SM
+Medicaid/SM
+medical/YS
+medicament/MS
+Medicare/MS
+medicate/DSXNGV
+medication/M
+Medici/MS
+medicinal/SY
+medicine/DSMG
+medico/SM
+medic/SM
+medievalist/MS
+medieval/YMS
+Medina/M
+mediocre
+mediocrity/MS
+meditate/NGVXDS
+meditation/M
+meditativeness/M
+meditative/PY
+Mediterranean/MS
+mediumistic
+medium/SM
+medley/SM
+medulla/SM
+Medusa/M
+meed/MS
+meekness/MS
+meek/TPYR
+meerschaum/MS
+meeter/M
+meetinghouse/S
+meeting/M
+meet/JGSYR
+me/G
+mega
+megabit/MS
+megabuck/S
+megabyte/S
+megacycle/MS
+megadeath/M
+megadeaths
+megahertz/M
+megalithic
+megalith/M
+megaliths
+megalomaniac/SM
+megalomania/SM
+megalopolis/SM
+Megan/M
+megaphone/SDGM
+megaton/MS
+megavolt/M
+megawatt/SM
+megaword/S
+Megen/M
+Meggie/M
+Meggi/M
+Meggy/M
+Meghan/M
+Meghann/M
+Meg/MN
+megohm/MS
+Mehetabel/M
+Meier/M
+Meighen/M
+Meiji/M
+Mei/MR
+meioses
+meiosis/M
+meiotic
+Meir/M
+Meister/M
+Meistersinger/M
+Mejia/M
+Mekong/M
+Mela/M
+Melamie/M
+melamine/SM
+melancholia/SM
+melancholic/S
+melancholy/MS
+Melanesia/M
+Melanesian/S
+melange/S
+Melania/M
+Melanie/M
+melanin/MS
+melanoma/SM
+Melantha/M
+Melany/M
+Melba/M
+Melbourne/M
+Melcher/M
+Melchior/M
+meld/SGD
+mle/MS
+Melendez/M
+Melesa/M
+Melessa/M
+Melicent/M
+Melina/M
+Melinda/M
+Melinde/M
+meliorate/XSDVNG
+melioration/M
+Melisa/M
+Melisande/M
+Melisandra/M
+Melisenda/M
+Melisent/M
+Melissa/M
+Melisse/M
+Melita/M
+Melitta/M
+Mella/M
+Mellicent/M
+Mellie/M
+mellifluousness/SM
+mellifluous/YP
+Melli/M
+Mellisa/M
+Mellisent/M
+Melloney/M
+Mellon/M
+mellowness/MS
+mellow/TGRDYPS
+Melly/M
+Mel/MY
+Melodee/M
+melodically
+melodic/S
+Melodie/M
+melodiousness/S
+melodious/YP
+melodrama/SM
+melodramatically
+melodramatic/S
+Melody/M
+melody/MS
+Melonie/M
+melon/MS
+Melony/M
+Melosa/M
+Melpomene/M
+meltdown/S
+melter/M
+melting/Y
+Melton/M
+melt/SAGD
+Melva/M
+Melville/M
+Melvin/M
+Melvyn/M
+Me/M
+member/DMS
+membered/AE
+members/EA
+membership/SM
+membrane/MSD
+membranous
+memento/SM
+Memling/M
+memoir/MS
+memorabilia
+memorability/SM
+memorableness/M
+memorable/P
+memorably
+memorandum/SM
+memorialize/DSG
+memorialized/U
+memorial/SY
+memoriam
+memorization/MS
+memorized/U
+memorizer/M
+memorize/RSDZG
+memorizes/A
+memoryless
+memory/MS
+memo/SM
+Memphis/M
+menace/GSD
+menacing/Y
+menagerie/SM
+menage/S
+Menander/M
+menarche/MS
+Menard/M
+Mencius/M
+Mencken/M
+mendaciousness/M
+mendacious/PY
+mendacity/MS
+Mendeleev/M
+mendelevium/SM
+Mendelian
+Mendel/M
+Mendelssohn/M
+mender/M
+Mendez/M
+mendicancy/MS
+mendicant/S
+Mendie/M
+mending/M
+Mendocino/M
+Mendoza/M
+mend/RDSJGZ
+Mendy/M
+Menelaus/M
+Menes/M
+menfolk/S
+menhaden/M
+menial/YS
+meningeal
+meninges
+meningitides
+meningitis/M
+meninx
+menisci
+meniscus/M
+Menkalinan/M
+Menkar/M
+Menkent/M
+Menlo/M
+men/MS
+Mennonite/SM
+Menominee
+menopausal
+menopause/SM
+menorah/M
+menorahs
+Menotti/M
+Mensa/M
+Mensch/M
+mensch/S
+menservants/M
+mens/SDG
+menstrual
+menstruate/NGDSX
+menstruation/M
+mensurable/P
+mensuration/MS
+menswear/M
+mentalist/MS
+mentality/MS
+mental/Y
+mentholated
+menthol/SM
+mentionable/U
+mentioned/U
+mentioner/M
+mention/ZGBRDS
+mentor/DMSG
+Menuhin/M
+menu/SM
+Menzies/M
+meow/DSG
+Mephistopheles/M
+Merak/M
+Mercado/M
+mercantile
+Mercator/M
+Mercedes
+mercenariness/M
+mercenary/SMP
+mercerize/SDG
+Mercer/M
+mercer/SM
+merchandiser/M
+merchandise/SRDJMZG
+merchantability
+merchantman/M
+merchantmen
+merchant/SBDMG
+Mercie/M
+mercifully/U
+mercifulness/M
+merciful/YP
+mercilessness/SM
+merciless/YP
+Merci/M
+Merck/M
+mercurial/SPY
+mercuric
+Mercurochrome/M
+mercury/MS
+Mercury/MS
+Mercy/M
+mercy/SM
+Meredeth/M
+Meredithe/M
+Meredith/M
+Merell/M
+meretriciousness/SM
+meretricious/YP
+mere/YS
+merganser/MS
+merger/M
+merge/SRDGZ
+Meridel/M
+meridian/MS
+meridional
+Meridith/M
+Meriel/M
+Merilee/M
+Merill/M
+Merilyn/M
+meringue/MS
+merino/MS
+Meris
+Merissa/M
+merited/U
+meritocracy/MS
+meritocratic
+meritocrats
+meritoriousness/MS
+meritorious/PY
+merit/SCGMD
+Meriwether/M
+Merla/M
+Merle/M
+Merlina/M
+Merline/M
+merlin/M
+Merlin/M
+Merl/M
+mermaid/MS
+merman/M
+mermen
+Merna/M
+Merola/M
+meromorphic
+Merralee/M
+Merrel/M
+Merriam/M
+Merrick/M
+Merridie/M
+Merrielle/M
+Merrie/M
+Merrilee/M
+Merrile/M
+Merrili/M
+Merrill/M
+merrily
+Merrily/M
+Merrimack/M
+Merrimac/M
+merriment/MS
+merriness/S
+Merritt/M
+Merry/M
+merrymaker/MS
+merrymaking/SM
+merry/RPT
+Mersey/M
+mer/TGDR
+Merton/M
+Mervin/M
+Merv/M
+Merwin/M
+Merwyn/M
+Meryl/M
+Mesa
+Mesabi/M
+mesa/SM
+mescaline/SM
+mescal/SM
+mesdames/M
+mesdemoiselles/M
+Meshed's
+meshed/U
+mesh/GMSD
+mesmeric
+mesmerism/SM
+mesmerized/U
+mesmerizer/M
+mesmerize/SRDZG
+Mesolithic/M
+mesomorph/M
+mesomorphs
+meson/MS
+Mesopotamia/M
+Mesopotamian/S
+mesosphere/MS
+mesozoic
+Mesozoic
+mesquite/MS
+mes/S
+message/SDMG
+messeigneurs
+messenger/GSMD
+Messerschmidt/M
+mess/GSDM
+Messiaen/M
+messiah
+Messiah/M
+messiahs
+Messiahs
+messianic
+Messianic
+messieurs/M
+messily
+messiness/MS
+messmate/MS
+Messrs/M
+messy/PRT
+mestizo/MS
+meta
+metabolic
+metabolically
+metabolism/MS
+metabolite/SM
+metabolize/GSD
+metacarpal/S
+metacarpi
+metacarpus/M
+metacircular
+metacircularity
+metalanguage/MS
+metalization/SM
+metalized
+metallic/S
+metalliferous
+metallings
+metallography/M
+metalloid/M
+metallurgic
+metallurgical/Y
+metallurgist/S
+metallurgy/MS
+metal/SGMD
+metalsmith/MS
+metalworking/M
+metalwork/RMJGSZ
+Meta/M
+metamathematical
+metamorphic
+metamorphism/SM
+metamorphose/GDS
+metamorphosis/M
+metaphoric
+metaphorical/Y
+metaphor/MS
+metaphosphate/M
+metaphysical/Y
+metaphysic/SM
+metastability/M
+metastable
+metastases
+metastasis/M
+metastasize/DSG
+metastatic
+metatarsal/S
+metatarsi
+metatarsus/M
+metatheses
+metathesis/M
+metathesized
+metathesizes
+metathesizing
+metavariable
+metempsychoses
+metempsychosis/M
+meteoric
+meteorically
+meteorite/SM
+meteoritic/S
+meteoritics/M
+meteoroid/SM
+meteorologic
+meteorological
+meteorologist/S
+meteorology/MS
+meteor/SM
+meter/GDM
+mete/ZDGSR
+methadone/SM
+methane/MS
+methanol/SM
+methinks
+methionine/M
+methodicalness/SM
+methodical/YP
+methodism
+Methodism/SM
+methodist/MS
+Methodist/MS
+method/MS
+methodological/Y
+methodologists
+methodology/MS
+methought
+Methuen/M
+Methuselah/M
+Methuselahs
+methylated
+methylene/M
+methyl/SM
+meticulousness/MS
+meticulous/YP
+mtier/S
+metonymy/M
+Metrecal/M
+metrical/Y
+metricate/SDNGX
+metricize/GSD
+metrics/M
+metric/SM
+metronome/MS
+metropolis/SM
+metropolitanization
+metropolitan/S
+metro/SM
+mets
+Metternich/M
+mettle/SDM
+mettlesome
+met/U
+Metzler/M
+Meuse/M
+mewl/GSD
+mew/SGD
+mews/SM
+Mex
+Mexicali/M
+Mexican/S
+Mexico/M
+Meyerbeer/M
+Meyer/SM
+mezzanine/MS
+mezzo/S
+MFA
+mfg
+mfr/S
+mg
+M/GB
+Mg/M
+MGM/M
+mgr
+Mgr
+MHz
+MI
+MIA
+Mia/M
+Miami/SM
+Miaplacidus/M
+miasmal
+miasma/SM
+Micaela/M
+Micah/M
+mica/MS
+micelles
+mice/M
+Michaela/M
+Michaelangelo/M
+Michaelina/M
+Michaeline/M
+Michaella/M
+Michaelmas/MS
+Michael/SM
+Michaelson/M
+Michail/M
+Michale/M
+Michal/M
+Micheal/M
+Micheil/M
+Michelangelo/M
+Michele/M
+Michelina/M
+Micheline/M
+Michelin/M
+Michelle/M
+Michell/M
+Michel/M
+Michelson/M
+Michigander/S
+Michiganite/S
+Michigan/M
+Mich/M
+Mickelson/M
+Mickey/M
+mickey/SM
+Mickie/M
+Micki/M
+Mick/M
+Micky/M
+Mic/M
+Micmac/M
+micra's
+microamp
+microanalysis/M
+microanalytic
+microbe/MS
+microbial
+microbicidal
+microbicide/M
+microbiological
+microbiologist/MS
+microbiology/SM
+microbrewery/S
+microchemistry/M
+microchip/S
+microcircuit/MS
+microcode/GSD
+microcomputer/MS
+microcosmic
+microcosm/MS
+microdensitometer
+microdot/MS
+microeconomic/S
+microeconomics/M
+microelectronic/S
+microelectronics/M
+microfiber/S
+microfiche/M
+microfilm/DRMSG
+microfossils
+micrography/M
+microgroove/MS
+microhydrodynamics
+microinstruction/SM
+microjoule
+microlevel
+microlight/S
+micromanage/GDSL
+micromanagement/S
+micrometeorite/MS
+micrometeoritic
+micrometer/SM
+Micronesia/M
+Micronesian/S
+micron/MS
+microorganism/SM
+microphone/SGM
+Microport/M
+microprocessing
+microprocessor/SM
+microprogrammed
+microprogramming
+microprogram/SM
+micro/S
+microscope/SM
+microscopic
+microscopical/Y
+microscopy/MS
+microsecond/MS
+microsimulation/S
+Microsystems
+micros/M
+Microsoft/M
+microsomal
+microstore
+microsurgery/SM
+MicroVAXes
+MicroVAX/M
+microvolt/SM
+microwaveable
+microwave/BMGSD
+microword/S
+midair/MS
+midas
+Midas/M
+midband/M
+midday/MS
+midden/SM
+middest
+middlebrow/SM
+Middlebury/M
+middle/GJRSD
+middleman/M
+middlemen
+middlemost
+Middlesex/M
+Middleton/M
+Middletown/M
+middleweight/SM
+middling/Y
+middy/SM
+Mideastern
+Mideast/M
+midfield/RM
+Midge/M
+midge/SM
+midget/MS
+midi/S
+midland/MRS
+Midland/MS
+midlife
+midlives
+midmorn/G
+midmost/S
+midnight/SYM
+midpoint/MS
+midrange
+midrib/MS
+midriff/MS
+mid/S
+midscale
+midsection/M
+midshipman/M
+midshipmen
+midship/S
+midspan
+midstream/MS
+midst/SM
+midsummer/MS
+midterm/MS
+midtown/MS
+Midway/M
+midway/S
+midweek/SYM
+Midwesterner/M
+Midwestern/ZR
+Midwest/M
+midwicket
+midwifery/SM
+midwife/SDMG
+midwinter/YMS
+midwives
+midyear/MS
+mien/M
+miff/GDS
+mightily
+mightiness/MS
+mightn't
+might/S
+mighty/TPR
+mignon
+mignonette/SM
+Mignon/M
+Mignonne/M
+migraine/SM
+migrant/MS
+migrate/ASDG
+migration/MS
+migrative
+migratory/S
+MIG/S
+Miguela/M
+Miguelita/M
+Miguel/M
+mikado/MS
+Mikaela/M
+Mikael/M
+mike/DSMG
+Mikel/M
+Mike/M
+Mikey/M
+Mikhail/M
+Mikkel/M
+Mikol/M
+Mikoyan/M
+milady/MS
+Milagros/M
+Milanese
+Milan/M
+milch/M
+mildew/DMGS
+mildness/MS
+Mildred/M
+Mildrid/M
+mild/STYRNP
+mileage/SM
+Milena/M
+milepost/SM
+miler/M
+mile/SM
+Mile/SM
+milestone/MS
+Milford/M
+Milicent/M
+milieu/SM
+Milissent/M
+militancy/MS
+militantness/M
+militant/YPS
+militarily
+militarism/SM
+militaristic
+militarist/MS
+militarization/SCM
+militarize/SDCG
+military
+militate/SDG
+militiaman/M
+militiamen
+militia/SM
+Milka/M
+Milken/M
+milker/M
+milk/GZSRDM
+milkiness/MS
+milkmaid/SM
+milkman/M
+milkmen
+milkshake/S
+milksop/SM
+milkweed/MS
+milky/RPT
+millage/S
+Millard/M
+Millay/M
+millenarian
+millenarianism/M
+millennial
+millennialism
+millennium/MS
+millepede's
+miller/M
+Miller/M
+Millet/M
+millet/MS
+milliamp
+milliampere/S
+milliard/MS
+millibar/MS
+Millicent/M
+millidegree/S
+Millie/M
+milligram/MS
+millijoule/S
+Millikan/M
+milliliter/MS
+Milli/M
+millimeter/SM
+milliner/SM
+millinery/MS
+milling/M
+millionaire/MS
+million/HDMS
+millionth/M
+millionths
+millipede/SM
+millisecond/MS
+Millisent/M
+millivoltmeter/SM
+millivolt/SM
+milliwatt/S
+millpond/MS
+millrace/SM
+mill/SGZMRD
+Mill/SMR
+millstone/SM
+millstream/SM
+millwright/MS
+Milly/M
+mil/MRSZ
+Mil/MY
+Milne/M
+Milo/M
+Milquetoast/S
+milquetoast/SM
+Miltiades/M
+Miltie/M
+Milt/M
+milt/MDSG
+Miltonic
+Milton/M
+Miltown/M
+Milty/M
+Milwaukee/M
+Milzie/M
+MIMD
+mime/DSRMG
+mimeograph/GMDS
+mimeographs
+mimer/M
+mimesis/M
+mimetic
+mimetically
+mimicked
+mimicker/SM
+mimicking
+mimicry/MS
+mimic/S
+Mimi/M
+mi/MNX
+Mimosa/M
+mimosa/SM
+Mina/M
+minaret/MS
+minatory
+mincemeat/MS
+mincer/M
+mince/SRDGZJ
+mincing/Y
+Minda/M
+Mindanao/M
+mind/ARDSZG
+mindbogglingly
+minded/P
+minder/M
+mindfully
+mindfulness/MS
+mindful/U
+mindlessness/SM
+mindless/YP
+Mindoro/M
+min/DRZGJ
+mind's
+mindset/S
+Mindy/M
+minefield/MS
+mineralization/C
+mineralized/U
+mineralogical
+mineralogist/SM
+mineralogy/MS
+mineral/SM
+miner/M
+Miner/M
+Minerva/M
+mineshaft
+mine/SNX
+minestrone/MS
+minesweeper/MS
+Minetta/M
+Minette/M
+mineworkers
+mingle/SDG
+Ming/M
+Mingus/M
+miniature/GMSD
+miniaturist/SM
+miniaturization/MS
+miniaturize/SDG
+minibike/S
+minibus/SM
+minicab/M
+minicam/MS
+minicomputer/SM
+minidress/SM
+minify/GSD
+minimalism/S
+minimalistic
+minimalist/MS
+minimality
+minimal/SY
+minima's
+minimax/M
+minimization/MS
+minimized/U
+minimizer/M
+minimize/RSDZG
+minim/SM
+minimum/MS
+mining/M
+minion/M
+mini/S
+miniseries
+miniskirt/MS
+ministerial/Y
+minister/MDGS
+ministrant/S
+ministration/SM
+ministry/MS
+minivan/S
+miniver/M
+minke
+mink/SM
+Min/MR
+Minna/M
+Minnaminnie/M
+Minneapolis/M
+Minne/M
+minnesinger/MS
+Minnesota/M
+Minnesotan/S
+Minnie/M
+Minni/M
+Minn/M
+Minnnie/M
+minnow/SM
+Minny/M
+Minoan/S
+Minolta/M
+minor/DMSG
+minority/MS
+Minor/M
+Minos
+Minotaur/M
+minotaur/S
+Minot/M
+minoxidil/S
+Minsk/M
+Minsky/M
+minster/SM
+minstrel/SM
+minstrelsy/MS
+mintage/SM
+Mintaka/M
+Minta/M
+minter/M
+mint/GZSMRD
+minty/RT
+minuend/SM
+minuet/SM
+Minuit/M
+minuscule/SM
+minus/S
+minuteman
+Minuteman/M
+minutemen
+minuteness/SM
+minute/RSDPMTYG
+minutiae
+minutia/M
+minx/MS
+Miocene
+MIPS
+Miquela/M
+Mirabeau/M
+Mirabella/M
+Mirabelle/M
+Mirabel/M
+Mirach/M
+miracle/MS
+miraculousness/M
+miraculous/PY
+mirage/GSDM
+Mira/M
+Miranda/M
+Miran/M
+Mireielle/M
+Mireille/M
+Mirella/M
+Mirelle/M
+mire/MGDS
+Mirfak/M
+Miriam/M
+Mirilla/M
+Mir/M
+Mirna/M
+Miro
+mirror/DMGS
+mirthfulness/SM
+mirthful/PY
+mirthlessness/M
+mirthless/YP
+mirth/M
+mirths
+MIRV/DSG
+miry/RT
+Mirzam/M
+misaddress/SDG
+misadventure/SM
+misalign/DSGL
+misalignment/MS
+misalliance/MS
+misanalysed
+misanthrope/MS
+misanthropic
+misanthropically
+misanthropist/S
+misanthropy/SM
+misapplier/M
+misapply/GNXRSD
+misapprehend/GDS
+misapprehension/MS
+misappropriate/GNXSD
+misbegotten
+misbehaver/M
+misbehave/RSDG
+misbehavior/SM
+misbrand/DSG
+misc
+miscalculate/XGNSD
+miscalculation/M
+miscall/SDG
+miscarriage/MS
+miscarry/SDG
+miscast/GS
+miscegenation/SM
+miscellanea
+miscellaneous/PY
+miscellany/MS
+Mischa/M
+mischance/MGSD
+mischief/MDGS
+mischievousness/MS
+mischievous/PY
+miscibility/S
+miscible/C
+misclassification/M
+misclassified
+misclassifying
+miscode/SDG
+miscommunicate/NDS
+miscomprehended
+misconceive/GDS
+misconception/MS
+misconduct/GSMD
+misconfiguration
+misconstruction/MS
+misconstrue/DSG
+miscopying
+miscount/DGS
+miscreant/MS
+miscue/MGSD
+misdeal/SG
+misdealt
+misdeed/MS
+misdemeanant/SM
+misdemeanor/SM
+misdiagnose/GSD
+misdid
+misdirect/GSD
+misdirection/MS
+misdirector/S
+misdoes
+misdo/JG
+misdone
+miserableness/SM
+miserable/SP
+miserably
+miser/KM
+miserliness/MS
+miserly/P
+misery/MS
+mises/KC
+misfeasance/MS
+misfeature/M
+misfield
+misfile/SDG
+misfire/SDG
+misfit/MS
+misfitted
+misfitting
+misfortune/SM
+misgauge/GDS
+misgiving/MYS
+misgovern/LDGS
+misgovernment/S
+misguidance/SM
+misguidedness/M
+misguided/PY
+misguide/DRSG
+misguider/M
+Misha/M
+mishandle/SDG
+mishap/MS
+mishapped
+mishapping
+misheard
+mishear/GS
+mishitting
+mishmash/SM
+misidentification/M
+misidentify/GNSD
+misinformation/SM
+misinform/GDS
+misinterpretation/MS
+misinterpreter/M
+misinterpret/RDSZG
+misjudge/DSG
+misjudging/Y
+misjudgment/MS
+Miskito
+mislabel/DSG
+mislaid
+mislay/GS
+misleader/M
+mislead/GRJS
+misleading/Y
+misled
+mismanage/LGSD
+mismanagement/MS
+mismatch/GSD
+misname/GSD
+misnomer/GSMD
+misogamist/MS
+misogamy/MS
+misogynistic
+misogynist/MS
+misogynous
+misogyny/MS
+misperceive/SD
+misplace/GLDS
+misplacement/MS
+misplay/GSD
+mispositioned
+misprint/SGDM
+misprision/SM
+mispronounce/DSG
+mispronunciation/MS
+misquotation/MS
+misquote/GDS
+misreader/M
+misread/RSGJ
+misrelated
+misremember/DG
+misreport/DGS
+misrepresentation/MS
+misrepresenter/M
+misrepresent/SDRG
+misroute/DS
+misrule/SDG
+missal/ESM
+misshape/DSG
+misshapenness/SM
+misshapen/PY
+Missie/M
+missile/MS
+missilery/SM
+mission/AMS
+missionary/MS
+missioned
+missioner/SM
+missioning
+missis's
+Mississauga/M
+Mississippian/S
+Mississippi/M
+missive/MS
+Missoula/M
+Missourian/S
+Missouri/M
+misspeak/SG
+misspecification
+misspecified
+misspelling/M
+misspell/SGJD
+misspend/GS
+misspent
+misspoke
+misspoken
+mis/SRZ
+miss/SDEGV
+Miss/SM
+misstate/GLDRS
+misstatement/MS
+misstater/M
+misstep/MS
+misstepped
+misstepping
+missus/SM
+Missy/M
+mistakable/U
+mistake/BMGSR
+mistaken/Y
+mistaker/M
+mistaking/Y
+Mistassini/M
+mister/GDM
+Mister/SM
+mistily
+Misti/M
+mistime/GSD
+mistiness/S
+mistletoe/MS
+mist/MRDGZS
+mistook
+mistral/MS
+mistranslated
+mistranslates
+mistranslating
+mistranslation/SM
+mistreat/DGSL
+mistreatment/SM
+Mistress/MS
+mistress/MSY
+mistrial/SM
+mistruster/M
+mistrustful/Y
+mistrust/SRDG
+Misty/M
+mistype/SDGJ
+misty/PRT
+misunderstander/M
+misunderstanding/M
+misunderstand/JSRZG
+misunderstood
+misuser/M
+misuse/RSDMG
+miswritten
+Mitchael/M
+Mitchell/M
+Mitchel/M
+Mitch/M
+miterer/M
+miter/GRDM
+mite/SRMZ
+Mitford/M
+Mithra/M
+Mithridates/M
+mitigated/U
+mitigate/XNGVDS
+mitigation/M
+MIT/M
+mitoses
+mitosis/M
+mitotic
+MITRE/SM
+Mitsubishi/M
+mitten/M
+Mitterrand/M
+mitt/XSMN
+Mitty/M
+Mitzi/M
+mitzvahs
+mixable
+mix/AGSD
+mixed/U
+mixer/SM
+mixture/SM
+Mizar/M
+mizzenmast/SM
+mizzen/MS
+Mk
+mks
+ml
+Mlle/M
+mm
+MM
+MMe
+Mme/SM
+MN
+mnemonically
+mnemonics/M
+mnemonic/SM
+Mnemosyne/M
+Mn/M
+MO
+moan/GSZRDM
+moat/SMDG
+mobbed
+mobber
+mobbing
+mobcap/SM
+Mobile/M
+mobile/S
+mobility/MS
+mobilizable
+mobilization/AMCS
+mobilize/CGDS
+mobilized/U
+mobilizer/MS
+mobilizes/A
+Mobil/M
+mob/MS
+mobster/MS
+Mobutu/M
+moccasin/SM
+mocha/SM
+mockers/M
+mockery/MS
+mock/GZSRD
+mockingbird/MS
+mocking/Y
+mo/CSK
+modality/MS
+modal/Y
+modeled/A
+modeler/M
+modeling/M
+models/A
+model/ZGSJMRD
+mode/MS
+modem/SM
+moderated/U
+moderateness/SM
+moderate/PNGDSXY
+moderation/M
+moderator/MS
+modernism/MS
+modernistic
+modernist/S
+modernity/SM
+modernization/MS
+modernized/U
+modernizer/M
+modernize/SRDGZ
+modernizes/U
+modernness/SM
+modern/PTRYS
+Modesta/M
+Modestia/M
+Modestine/M
+Modesto/M
+modest/TRY
+Modesty/M
+modesty/MS
+modicum/SM
+modifiability/M
+modifiableness/M
+modifiable/U
+modification/M
+modified/U
+modifier/M
+modify/NGZXRSD
+Modigliani/M
+modishness/MS
+modish/YP
+mod/TSR
+Modula/M
+modularity/SM
+modularization
+modularize/SDG
+modular/SY
+modulate/ADSNCG
+modulation/CMS
+modulator/ACSM
+module/SM
+moduli
+modulo
+modulus/M
+modus
+Moe/M
+Moen/M
+Mogadiscio's
+Mogadishu
+mogul/MS
+Mogul/MS
+mohair/SM
+Mohamed/M
+Mohammad/M
+Mohammedanism/MS
+Mohammedan/SM
+Mohammed's
+Mohandas/M
+Mohandis/M
+Mohawk/MS
+Mohegan/S
+Mohican's
+Moho/M
+Mohorovicic/M
+Mohr/M
+moiety/MS
+moil/SGD
+Moina/M
+Moines/M
+Moira/M
+moire/MS
+Moise/MS
+Moiseyev/M
+Moishe/M
+moistener/M
+moisten/ZGRD
+moistness/MS
+moist/TXPRNY
+moisture/MS
+moisturize/GZDRS
+Mojave/M
+molal
+molarity/SM
+molar/MS
+molasses/MS
+Moldavia/M
+Moldavian/S
+moldboard/SM
+molder/DG
+moldiness/SM
+molding/M
+mold/MRDJSGZ
+Moldova
+moldy/PTR
+molecularity/SM
+molecular/Y
+molecule/MS
+molehill/SM
+mole/MTS
+moleskin/MS
+molestation/SM
+molested/U
+molester/M
+molest/RDZGS
+Moliere
+Molina/M
+Moline/M
+Mollee/M
+Mollie/M
+mollification/M
+mollify/XSDGN
+Molli/M
+Moll/M
+moll/MS
+mollusc's
+mollusk/S
+mollycoddler/M
+mollycoddle/SRDG
+Molly/M
+molly/SM
+Molnar/M
+Moloch/M
+Molokai/M
+Molotov/M
+molter/M
+molt/RDNGZS
+Moluccas
+molybdenite/M
+molybdenum/MS
+Mombasa/M
+momenta
+momentarily
+momentariness/SM
+momentary/P
+moment/MYS
+momentousness/MS
+momentous/YP
+momentum/SM
+momma/S
+Mommy/M
+mommy/SM
+Mo/MN
+mom/SM
+Monaco/M
+monadic
+monad/SM
+Monah/M
+Mona/M
+monarchic
+monarchical
+monarchism/MS
+monarchistic
+monarchist/MS
+monarch/M
+monarchs
+monarchy/MS
+Monash/M
+monastery/MS
+monastical/Y
+monasticism/MS
+monastic/S
+monaural/Y
+Mondale/M
+Monday/MS
+Mondrian/M
+Monegasque/SM
+Monera/M
+monetarily
+monetarism/S
+monetarist/MS
+monetary
+monetization/CMA
+monetize/CGADS
+Monet/M
+moneybag/SM
+moneychangers
+moneyer/M
+moneylender/SM
+moneymaker/MS
+moneymaking/MS
+money/SMRD
+Monfort/M
+monger/SGDM
+Mongolia/M
+Mongolian/S
+Mongolic/M
+mongolism/SM
+mongoloid/S
+Mongoloid/S
+Mongol/SM
+mongoose/SM
+mongrel/SM
+Monica/M
+monies/M
+Monika/M
+moniker/MS
+Monique/M
+monism/MS
+monist/SM
+monition/SM
+monitored/U
+monitor/GSMD
+monitory/S
+monkeyshine/S
+monkey/SMDG
+monkish
+Monk/M
+monk/MS
+monkshood/SM
+Monmouth/M
+monochromatic
+monochromator
+monochrome/MS
+monocle/SDM
+monoclinic
+monoclonal/S
+monocotyledonous
+monocotyledon/SM
+monocular/SY
+monodic
+monodist/S
+monody/MS
+monogamist/MS
+monogamous/PY
+monogamy/MS
+monogrammed
+monogramming
+monogram/MS
+monograph/GMDS
+monographs
+monolingualism
+monolingual/S
+monolithic
+monolithically
+monolith/M
+monoliths
+monologist/S
+monologue/GMSD
+monomaniacal
+monomaniac/MS
+monomania/MS
+monomeric
+monomer/SM
+monomial/SM
+mono/MS
+Monongahela/M
+mononuclear
+mononucleoses
+mononucleosis/M
+monophonic
+monoplane/MS
+monopole/S
+monopolistic
+monopolist/MS
+monopolization/MS
+monopolized/U
+monopolize/GZDSR
+monopolizes/U
+monopoly/MS
+monorail/SM
+monostable
+monosyllabic
+monosyllable/MS
+monotheism/SM
+monotheistic
+monotheist/S
+monotone/SDMG
+monotonic
+monotonically
+monotonicity
+monotonousness/MS
+monotonous/YP
+monotony/MS
+monovalent
+monoxide/SM
+Monroe/M
+Monro/M
+Monrovia/M
+Monsanto/M
+monseigneur
+monsieur/M
+Monsignori
+Monsignor/MS
+monsignor/S
+Mon/SM
+monsoonal
+monsoon/MS
+monster/SM
+monstrance/ASM
+monstrosity/SM
+monstrousness/M
+monstrous/YP
+montage/SDMG
+Montague/M
+Montaigne/M
+Montana/M
+Montanan/MS
+Montcalm/M
+Montclair/M
+Monte/M
+Montenegrin
+Montenegro/M
+Monterey/M
+Monterrey/M
+Montesquieu/M
+Montessori/M
+Monteverdi/M
+Montevideo/M
+Montezuma
+Montgomery/M
+monthly/S
+month/MY
+months
+Monticello/M
+Monti/M
+Mont/M
+Montmartre/M
+Montoya/M
+Montpelier/M
+Montrachet/M
+Montreal/M
+Montserrat/M
+Monty/M
+monumentality/M
+monumental/Y
+monument/DMSG
+mooch/ZSRDG
+moodily
+moodiness/MS
+mood/MS
+Moody/M
+moody/PTR
+Moog
+moo/GSD
+moonbeam/SM
+Mooney/M
+moon/GDMS
+moonless
+moonlight/GZDRMS
+moonlighting/M
+moonlit
+Moon/M
+moonscape/MS
+moonshiner/M
+moonshine/SRZM
+moonshot/MS
+moonstone/SM
+moonstruck
+moonwalk/SDG
+Moore/M
+moor/GDMJS
+mooring/M
+Moorish
+moorland/MS
+Moor/MS
+moose/M
+moot/RDGS
+moped/MS
+moper/M
+mope/S
+mopey
+mopier
+mopiest
+mopish
+mopped
+moppet/MS
+mopping
+mop/SZGMDR
+moraine/MS
+morale/MS
+Morales/M
+moralistic
+moralistically
+moralist/MS
+morality/UMS
+moralization/CS
+moralize/CGDRSZ
+moralled
+moraller
+moralling
+moral/SMY
+Mora/M
+Moran/M
+morass/SM
+moratorium/SM
+Moravia/M
+Moravian
+moray/SM
+morbidity/SM
+morbidness/S
+morbid/YP
+mordancy/MS
+mordant/GDYS
+Mordecai/M
+Mord/M
+Mordred/M
+Mordy/M
+more/DSN
+Moreen/M
+Morehouse/M
+Moreland/M
+morel/SM
+More/M
+Morena/M
+Moreno/M
+moreover
+Morey/M
+Morgana/M
+Morganica/M
+Morgan/MS
+Morganne/M
+morgen/M
+Morgen/M
+morgue/SM
+Morgun/M
+Moria/M
+Moriarty/M
+moribundity/M
+moribund/Y
+Morie/M
+Morin/M
+morion/M
+Morison/M
+Morissa/M
+Morita/M
+Moritz/M
+Morlee/M
+Morley/M
+Morly/M
+Mormonism/MS
+Mormon/SM
+Morna/M
+morning/MY
+morn/SGJDM
+Moroccan/S
+Morocco/M
+morocco/SM
+Moro/M
+moronic
+moronically
+Moroni/M
+moron/SM
+moroseness/MS
+morose/YP
+morpheme/DSMG
+morphemic/S
+Morpheus/M
+morph/GDJ
+morphia/S
+morphine/MS
+morphism/MS
+morphologic
+morphological/Y
+morphology/MS
+morphophonemic/S
+morphophonemics/M
+morphs
+Morrie/M
+morris
+Morris/M
+Morrison/M
+Morristown/M
+Morrow/M
+morrow/MS
+Morry/M
+morsel/GMDS
+Morse/M
+mortality/SM
+mortal/SY
+mortarboard/SM
+mortar/GSDM
+Morten/M
+mortgageable
+mortgagee/SM
+mortgage/MGDS
+mortgagor/SM
+mortice's
+mortician/SM
+Mortie/M
+mortification/M
+mortified/Y
+mortifier/M
+mortify/DRSXGN
+Mortimer/M
+mortise/MGSD
+Mort/MN
+Morton/M
+mortuary/MS
+Morty/M
+Mosaic
+mosaicked
+mosaicking
+mosaic/MS
+Moscone/M
+Moscow/M
+Moseley/M
+Moselle/M
+Mose/MSR
+Moser/M
+mosey/SGD
+Moshe/M
+Moslem's
+Mosley/M
+mosque/SM
+mosquitoes
+mosquito/M
+mos/S
+mossback/MS
+Mossberg/M
+Moss/M
+moss/SDMG
+mossy/SRT
+most/SY
+Mosul/M
+mote/ASCNK
+motel/MS
+mote's
+motet/SM
+mothball/DMGS
+motherboard/MS
+motherfucker/MS!
+motherfucking/!
+motherhood/SM
+mothering/M
+motherland/SM
+motherless
+motherliness/MS
+motherly/P
+mother/RDYMZG
+moths
+moth/ZMR
+motif/MS
+motile/S
+motility/MS
+motional/K
+motioner/M
+motion/GRDMS
+motionlessness/S
+motionless/YP
+motion's/ACK
+motions/K
+motivated/U
+motivate/XDSNGV
+motivational/Y
+motivation/M
+motivator/S
+motiveless
+motive/MGSD
+motley/S
+motlier
+motliest
+mot/MSV
+motocross/SM
+motorbike/SDGM
+motorboat/MS
+motorcade/MSDG
+motorcar/MS
+motorcycle/GMDS
+motorcyclist/SM
+motor/DMSG
+motoring/M
+motorist/SM
+motorization/SM
+motorize/DSG
+motorized/U
+motorman/M
+motormen
+motormouth
+motormouths
+Motorola/M
+motorway/SM
+Motown/M
+mottle/GSRD
+mottler/M
+Mott/M
+mottoes
+motto/M
+moue/DSMG
+moulder/DSG
+moult/GSD
+mound/GMDS
+mountable
+mountaineering/M
+mountaineer/JMDSG
+mountainousness/M
+mountainous/PY
+mountainside/MS
+mountain/SM
+mountaintop/SM
+Mountbatten/M
+mountebank/SGMD
+mounted/U
+mount/EGACD
+mounter/SM
+mounties
+Mountie/SM
+mounting/MS
+Mount/M
+mounts/AE
+mourner/M
+mournfuller
+mournfullest
+mournfulness/S
+mournful/YP
+mourning/M
+mourn/ZGSJRD
+mouser/M
+mouse/SRDGMZ
+mousetrapped
+mousetrapping
+mousetrap/SM
+mousiness/MS
+mousing/M
+mousse/MGSD
+Moussorgsky/M
+mousy/PRT
+Mouthe/M
+mouthful/MS
+mouthiness/SM
+mouth/MSRDG
+mouthorgan
+mouthpiece/SM
+mouths
+mouthwash/SM
+mouthwatering
+mouthy/PTR
+Mouton/M
+mouton/SM
+movable/ASP
+movableness/AM
+move/ARSDGZB
+moved/U
+movement/SM
+mover/AM
+moviegoer/S
+movie/SM
+moving/YS
+mower/M
+Mowgli/M
+mowing/M
+mow/SDRZG
+moxie/MS
+Moyer/M
+Moyna/M
+Moyra/M
+Mozambican/S
+Mozambique/M
+Mozart/M
+Mozelle/M
+Mozes/M
+Mozilla/M
+mozzarella/MS
+mp
+MP
+mpg
+mph
+MPH
+MRI
+Mr/M
+Mrs
+ms
+M's
+MS
+MSG
+Msgr/M
+m's/K
+Ms/S
+MST
+MSW
+mt
+MT
+mtg
+mtge
+Mt/M
+MTS
+MTV
+Muawiya/M
+Mubarak/M
+muchness/M
+much/SP
+mucilage/MS
+mucilaginous
+mucker/M
+muck/GRDMS
+muckraker/M
+muckrake/ZMDRSG
+mucky/RT
+mucosa/M
+mucous
+mucus/SM
+mudded
+muddily
+muddiness/SM
+mudding
+muddle/GRSDZ
+muddleheaded/P
+muddlehead/SMD
+muddler/M
+muddy/TPGRSD
+mudflat/S
+mudguard/SM
+mudlarks
+mud/MS
+mudroom/S
+mudslide/S
+mudslinger/M
+mudslinging/M
+mudsling/JRGZ
+Mueller/M
+Muenster
+muenster/MS
+muesli/M
+muezzin/MS
+muff/GDMS
+Muffin/M
+muffin/SM
+muffler/M
+muffle/ZRSDG
+Mufi/M
+Mufinella/M
+mufti/MS
+Mugabe/M
+mugged
+mugger/SM
+mugginess/S
+mugging/S
+muggy/RPT
+mugshot/S
+mug/SM
+mugwump/MS
+Muhammadanism/S
+Muhammadan/SM
+Muhammad/M
+Muire/M
+Muir/M
+Mukden/M
+mukluk/SM
+mulattoes
+mulatto/M
+mulberry/MS
+mulch/GMSD
+mulct/SDG
+Mulder/M
+mule/MGDS
+muleskinner/S
+muleteer/MS
+mulishness/MS
+mulish/YP
+mullah/M
+mullahs
+mullein/MS
+Mullen/M
+muller/M
+Muller/M
+mullet/MS
+Mulligan/M
+mulligan/SM
+mulligatawny/SM
+Mullikan/M
+Mullins
+mullion/MDSG
+mull/RDSG
+Multan/M
+multi
+Multibus/M
+multicellular
+multichannel/M
+multicollinearity/M
+multicolor/SDM
+multicolumn
+multicomponent
+multicomputer/MS
+Multics/M
+MULTICS/M
+multicultural
+multiculturalism/S
+multidimensional
+multidimensionality
+multidisciplinary
+multifaceted
+multifamily
+multifariousness/SM
+multifarious/YP
+multifigure
+multiform
+multifunction/D
+multilateral/Y
+multilayer
+multilevel/D
+multilingual
+multilingualism/S
+multimedia/S
+multimegaton/M
+multimeter/M
+multimillionaire/SM
+multinational/S
+multinomial/M
+multiphase
+multiple/SM
+multiplet/SM
+multiplex/GZMSRD
+multiplexor's
+multipliable
+multiplicand/SM
+multiplication/M
+multiplicative/YS
+multiplicity/MS
+multiplier/M
+multiply/ZNSRDXG
+multiprocess/G
+multiprocessor/MS
+multiprogram
+multiprogrammed
+multiprogramming/MS
+multipurpose
+multiracial
+multistage
+multistory/S
+multisyllabic
+multitasking/S
+multitude/MS
+multitudinousness/M
+multitudinous/YP
+multiuser
+multivalent
+multivalued
+multivariate
+multiversity/M
+multivitamin/S
+mu/M
+mumbler/M
+mumbletypeg/S
+mumble/ZJGRSD
+Mumford/M
+mummed
+mummer/SM
+mummery/MS
+mummification/M
+mummify/XSDGN
+mumming
+mum/MS
+mummy/GSDM
+mumps/M
+muncher/M
+Mnchhausen/M
+munchies
+Munch/M
+munch/ZRSDG
+Muncie/M
+mundane/YSP
+Mundt/M
+munge/JGZSRD
+Munich/M
+municipality/SM
+municipal/YS
+munificence/MS
+munificent/Y
+munition/SDG
+Munmro/M
+Munoz/M
+Munroe/M
+Munro/M
+mun/S
+Munsey/M
+Munson/M
+Munster/MS
+Muong/M
+muon/M
+Muppet/M
+muralist/SM
+mural/SM
+Murasaki/M
+Murat/M
+Murchison/M
+Murcia/M
+murderer/M
+murderess/S
+murder/GZRDMS
+murderousness/M
+murderous/YP
+Murdoch/M
+Murdock/M
+Mureil/M
+Murial/M
+muriatic
+Murielle/M
+Muriel/M
+Murillo/M
+murkily
+murkiness/S
+murk/TRMS
+murky/RPT
+Murmansk/M
+murmurer/M
+murmuring/U
+murmurous
+murmur/RDMGZSJ
+Murphy/M
+murrain/SM
+Murray/M
+Murrow/M
+Murrumbidgee/M
+Murry/M
+Murvyn/M
+muscatel/MS
+Muscat/M
+muscat/SM
+musclebound
+muscle/SDMG
+Muscovite/M
+muscovite/MS
+Muscovy/M
+muscularity/SM
+muscular/Y
+musculature/SM
+muse
+Muse/M
+muser/M
+musette/SM
+museum/MS
+mus/GJDSR
+musher/M
+mushiness/MS
+mush/MSRDG
+mushroom/DMSG
+mushy/PTR
+Musial/M
+musicale/SM
+musicality/SM
+musicals
+musical/YU
+musician/MYS
+musicianship/MS
+musicked
+musicking
+musicological
+musicologist/MS
+musicology/MS
+music/SM
+musing/Y
+Muskegon/M
+muskeg/SM
+muskellunge/SM
+musketeer/MS
+musketry/MS
+musket/SM
+musk/GDMS
+muskie/M
+muskiness/MS
+muskmelon/MS
+muskox/N
+muskrat/MS
+musky/RSPT
+Muslim/MS
+muslin/MS
+mussel/MS
+Mussolini/MS
+Mussorgsky/M
+muss/SDG
+mussy/RT
+mustache/DSM
+mustachio/MDS
+mustang/MS
+mustard/MS
+muster/GD
+mustily
+mustiness/MS
+mustn't
+must/RDGZS
+must've
+musty/RPT
+mutability/SM
+mutableness/M
+mutable/P
+mutably
+mutagen/SM
+mutant/MS
+mutate/XVNGSD
+mutational/Y
+mutation/M
+mutator/S
+muted/Y
+muteness/S
+mute/PDSRBYTG
+mutilate/XDSNG
+mutilation/M
+mutilator/MS
+mutineer/SMDG
+mutinous/Y
+mutiny/MGSD
+Mutsuhito/M
+mutterer/M
+mutter/GZRDJ
+muttonchops
+mutton/SM
+mutt/ZSMR
+mutuality/S
+mutual/SY
+muumuu/MS
+muzak
+Muzak/SM
+Muzo/M
+muzzled/U
+muzzle/MGRSD
+muzzler/M
+MVP
+MW
+Myanmar
+Mycah/M
+Myca/M
+Mycenaean
+Mycenae/M
+Mychal/M
+mycologist/MS
+mycology/MS
+myelitides
+myelitis/M
+Myer/MS
+myers
+mylar
+Mylar/S
+Myles/M
+Mylo/M
+My/M
+myna/SM
+Mynheer/M
+myocardial
+myocardium/M
+myopia/MS
+myopically
+myopic/S
+Myrah/M
+Myra/M
+Myranda/M
+Myrdal/M
+myriad/S
+Myriam/M
+Myrilla/M
+Myrle/M
+Myrlene/M
+myrmidon/S
+Myrna/M
+Myron/M
+myrrh/M
+myrrhs
+Myrta/M
+Myrtia/M
+Myrtice/M
+Myrtie/M
+Myrtle/M
+myrtle/SM
+Myrvyn/M
+Myrwyn/M
+mys
+my/S
+myself
+Mysore/M
+mysteriousness/MS
+mysterious/YP
+mystery/MDSG
+mystical/Y
+mysticism/MS
+mystic/SM
+mystification/M
+mystifier/M
+mystify/CSDGNX
+mystifying/Y
+mystique/MS
+Myst/M
+mythic
+mythical/Y
+myth/MS
+mythographer/SM
+mythography/M
+mythological/Y
+mythologist/MS
+mythologize/CSDG
+mythology/SM
+myths
+N
+NAACP
+nabbed
+nabbing
+Nabisco/M
+nabob/SM
+Nabokov/M
+nab/S
+nacelle/SM
+nacho/S
+NaCl/M
+nacre/MS
+nacreous
+Nada/M
+Nadean/M
+Nadeen/M
+Nader/M
+Nadia/M
+Nadine/M
+nadir/SM
+Nadiya/M
+Nadya/M
+Nady/M
+nae/VM
+Nagasaki/M
+nagged
+nagger/S
+nagging/Y
+nag/MS
+Nagoya/M
+Nagpur/M
+Nagy/M
+Nahuatl/SM
+Nahum/M
+naiad/SM
+naifs
+nailbrush/SM
+nailer/M
+nail/SGMRD
+Naipaul/M
+Nair/M
+Nairobi/M
+Naismith/M
+naive/SRTYP
+naivet/SM
+naivety/MS
+Nakamura/M
+Nakayama/M
+nakedness/MS
+naked/TYRP
+Nakoma/M
+Nalani/M
+Na/M
+Namath/M
+nameable/U
+name/ADSG
+namedrop
+namedropping
+named's
+named/U
+nameless/PY
+namely
+nameplate/MS
+namer/SM
+name's
+namesake/SM
+Namibia/M
+Namibian/S
+naming/M
+Nam/M
+Nanak/M
+Nana/M
+Nananne/M
+Nancee/M
+Nance/M
+Nancey/M
+Nanchang/M
+Nancie/M
+Nanci/M
+Nancy/M
+Nanete/M
+Nanette/M
+Nanice/M
+Nani/M
+Nanine/M
+Nanjing
+Nanking's
+Nan/M
+Nannette/M
+Nannie/M
+Nanni/M
+Nanny/M
+nanny/SDMG
+nanometer/MS
+Nanon/M
+Nanook/M
+nanosecond/SM
+Nansen/M
+Nantes/M
+Nantucket/M
+Naoma/M
+Naomi/M
+napalm/MDGS
+nape/SM
+Naphtali/M
+naphthalene/MS
+naphtha/SM
+Napier/M
+napkin/SM
+Naples/M
+napless
+Nap/M
+Napoleonic
+napoleon/MS
+Napoleon/MS
+napped
+napper/MS
+Nappie/M
+napping
+Nappy/M
+nappy/TRSM
+nap/SM
+Nara/M
+Narbonne/M
+narc/DGS
+narcissism/MS
+narcissistic
+narcissist/MS
+narcissus/M
+Narcissus/M
+narcoleptic
+narcoses
+narcosis/M
+narcotic/SM
+narcotization/S
+narcotize/GSD
+Nariko/M
+Nari/M
+nark's
+Narmada/M
+Narragansett/M
+narrate/VGNSDX
+narration/M
+narrative/MYS
+narratology
+narrator/SM
+narrowing/P
+narrowness/SM
+narrow/RDYTGPS
+narwhal/MS
+nary
+nasality/MS
+nasalization/MS
+nasalize/GDS
+nasal/YS
+NASA/MS
+nascence/ASM
+nascent/A
+NASDAQ
+Nash/M
+Nashua/M
+Nashville/M
+Nassau/M
+Nasser/M
+nastily
+nastiness/MS
+nasturtium/SM
+nasty/TRSP
+natal
+Natala/M
+Natalee/M
+Natale/M
+Natalia/M
+Natalie/M
+Natalina/M
+Nataline/M
+natalist
+natality/M
+Natal/M
+Natalya/M
+Nata/M
+Nataniel/M
+Natasha/M
+Natassia/M
+Natchez
+natch/S
+Nate/XMN
+Nathalia/M
+Nathalie/M
+Nathanael/M
+Nathanial/M
+Nathaniel/M
+Nathanil/M
+Nathan/MS
+nationalism/SM
+nationalistic
+nationalistically
+nationalist/MS
+nationality/MS
+nationalization/MS
+nationalize/CSDG
+nationalized/AU
+nationalizer/SM
+national/YS
+nationhood/SM
+nation/MS
+nationwide
+nativeness/M
+native/PYS
+Natividad/M
+Nativity/M
+nativity/MS
+Natka/M
+natl
+Nat/M
+NATO/SM
+natter/SGD
+nattily
+nattiness/SM
+Natty/M
+natty/TRP
+naturalism/MS
+naturalistic
+naturalist/MS
+naturalization/SM
+naturalized/U
+naturalize/GSD
+naturalness/US
+natural/PUY
+naturals
+nature/ASDCG
+nature's
+naturist
+Naugahyde/S
+naughtily
+naughtiness/SM
+naught/MS
+naughty/TPRS
+Naur/M
+Nauru/M
+nausea/SM
+nauseate/DSG
+nauseating/Y
+nauseousness/SM
+nauseous/P
+nautical/Y
+nautilus/MS
+Navaho's
+Navajoes
+Navajo/S
+naval/Y
+Navarro/M
+navel/MS
+nave/SM
+navigability/SM
+navigableness/M
+navigable/P
+navigate/DSXNG
+navigational
+navigation/M
+navigator/MS
+Navona/M
+Navratilova/M
+navvy/M
+Navy/S
+navy/SM
+nay/MS
+naysayer/S
+Nazarene/MS
+Nazareth/M
+Nazi/SM
+Nazism/S
+NB
+NBA
+NBC
+Nb/M
+NBS
+NC
+NCAA
+NCC
+NCO
+NCR
+ND
+N'Djamena
+Ndjamena/M
+Nd/M
+Ne
+NE
+Neala/M
+Neale/M
+Neall/M
+Neal/M
+Nealon/M
+Nealson/M
+Nealy/M
+Neanderthal/S
+neap/DGS
+Neapolitan/SM
+nearby
+nearly/RT
+nearness/MS
+nearside/M
+nearsightedness/S
+nearsighted/YP
+near/TYRDPSG
+neaten/DG
+neath
+neatness/MS
+neat/YRNTXPS
+Neb/M
+Nebraska/M
+Nebraskan/MS
+Nebr/M
+Nebuchadnezzar/MS
+nebulae
+nebula/M
+nebular
+nebulousness/SM
+nebulous/PY
+necessaries
+necessarily/U
+necessary/U
+necessitate/DSNGX
+necessitation/M
+necessitous
+necessity/SM
+neckband/M
+neckerchief/MS
+neck/GRDMJS
+necking/M
+necklace/DSMG
+neckline/MS
+necktie/MS
+necrology/SM
+necromancer/MS
+necromancy/MS
+necromantic
+necrophiliac/S
+necrophilia/M
+necropolis/SM
+necropsy/M
+necroses
+necrosis/M
+necrotic
+nectarine/SM
+nectarous
+nectar/SM
+nectary/MS
+Neda/M
+Nedda/M
+Neddie/M
+Neddy/M
+Nedi/M
+Ned/M
+ne
+needed/U
+needer/M
+needful/YSP
+Needham/M
+neediness/MS
+needlecraft/M
+needle/GMZRSD
+needlepoint/SM
+needlessness/S
+needless/YP
+needlewoman/M
+needlewomen
+needlework/RMS
+needn't
+need/YRDGS
+needy/TPR
+Neel/M
+Neely/M
+ne'er
+nefariousness/MS
+nefarious/YP
+Nefen/M
+Nefertiti/M
+negated/U
+negater/M
+negate/XRSDVNG
+negation/M
+negativeness/SM
+negative/PDSYG
+negativism/MS
+negativity/MS
+negator/MS
+Negev/M
+neglecter/M
+neglectfulness/SM
+neglectful/YP
+neglect/SDRG
+negligee/SM
+negligence/MS
+negligent/Y
+negligibility/M
+negligible
+negligibly
+negotiability/MS
+negotiable/A
+negotiant/M
+negotiate/ASDXGN
+negotiation/MA
+negotiator/MS
+Negress/MS
+negritude/MS
+Negritude/S
+Negroes
+negroid
+Negroid/S
+Negro/M
+neg/S
+Nehemiah/M
+Nehru/M
+neighbored/U
+neighborer/M
+neighborhood/SM
+neighborlinesses
+neighborliness/UM
+neighborly/UP
+neighbor/SMRDYZGJ
+neigh/MDG
+neighs
+Neila/M
+Neile/M
+Neilla/M
+Neille/M
+Neill/M
+Neil/SM
+neither
+Nelda/M
+Nelia/M
+Nelie/M
+Nelle/M
+Nellie/M
+Nelli/M
+Nell/M
+Nelly/M
+Nelsen/M
+Nels/N
+Nelson/M
+nelson/MS
+nematic
+nematode/SM
+Nembutal/M
+nemeses
+nemesis
+Nemesis/M
+neoclassical
+neoclassicism/MS
+neoclassic/M
+neocolonialism/MS
+neocortex/M
+neodymium/MS
+Neogene
+neolithic
+Neolithic/M
+neologism/SM
+neomycin/M
+neonatal/Y
+neonate/MS
+neon/DMS
+neophyte/MS
+neoplasm/SM
+neoplastic
+neoprene/SM
+Nepalese
+Nepali/MS
+Nepal/M
+nepenthe/MS
+nephew/MS
+nephrite/SM
+nephritic
+nephritides
+nephritis/M
+nepotism/MS
+nepotist/S
+Neptune/M
+neptunium/MS
+nerd/S
+nerdy/RT
+Nereid/M
+Nerf/M
+Nerissa/M
+Nerita/M
+Nero/M
+Neron/M
+Nerta/M
+Nerte/M
+Nertie/M
+Nerti/M
+Nert/M
+Nerty/M
+Neruda/M
+nervelessness/SM
+nerveless/YP
+nerve's
+nerve/UGSD
+nerviness/SM
+nerving/M
+nervousness/SM
+nervous/PY
+nervy/TPR
+Nessa/M
+Nessie/M
+Nessi/M
+Nessy/M
+Nesta/M
+nester/M
+Nester/M
+Nestle/M
+nestler/M
+nestle/RSDG
+nestling/M
+Nestorius/M
+Nestor/M
+nest/RDGSBM
+netball/M
+nether
+Netherlander/SM
+Netherlands/M
+nethermost
+netherworld/S
+Netscape/M
+net/SM
+Netta/M
+Nettie/M
+Netti/M
+netting/M
+nett/JGRDS
+Nettle/M
+nettle/MSDG
+nettlesome
+Netty/M
+network/SJMDG
+Netzahualcoyotl/M
+Neumann/M
+neuralgia/MS
+neuralgic
+neural/Y
+neurasthenia/MS
+neurasthenic/S
+neuritic/S
+neuritides
+neuritis/M
+neuroanatomy
+neurobiology/M
+neurological/Y
+neurologist/MS
+neurology/SM
+neuromuscular
+neuronal
+neurone/S
+neuron/MS
+neuropathology/M
+neurophysiology/M
+neuropsychiatric
+neuroses
+neurosis/M
+neurosurgeon/MS
+neurosurgery/SM
+neurotically
+neurotic/S
+neurotransmitter/S
+neuter/JZGRD
+neutralise's
+neutralism/MS
+neutralist/S
+neutrality/MS
+neutralization/MS
+neutralized/U
+neutralize/GZSRD
+neutral/PYS
+neutrino/MS
+neutron/MS
+neut/ZR
+Nevada/M
+Nevadan/S
+Nevadian/S
+Neva/M
+never
+nevermore
+nevertheless
+nevi
+Nevile/M
+Neville/M
+Nevil/M
+Nevin/SM
+Nevis/M
+Nev/M
+Nevsa/M
+Nevsky/M
+nevus/M
+Newark/M
+newbie/S
+newborn/S
+Newbury/M
+Newburyport/M
+Newcastle/M
+newcomer/MS
+newed/A
+Newell/M
+newel/MS
+newer/A
+newfangled
+newfound
+newfoundland
+Newfoundlander/M
+Newfoundland/SRMZ
+newish
+newline/SM
+newlywed/MS
+Newman/M
+newness/MS
+Newport/M
+news/A
+newsagent/MS
+newsboy/SM
+newscaster/M
+newscasting/M
+newscast/SRMGZ
+newsdealer/MS
+newsed
+newses
+newsflash/S
+newsgirl/S
+newsgroup/SM
+newsing
+newsletter/SM
+NeWS/M
+newsman/M
+newsmen
+newspaperman/M
+newspapermen
+newspaper/SMGD
+newspaperwoman/M
+newspaperwomen
+newsprint/MS
+new/SPTGDRY
+newsreader/MS
+newsreel/SM
+newsroom/S
+news's
+newsstand/MS
+Newsweekly/M
+newsweekly/S
+Newsweek/MY
+newswire
+newswoman/M
+newswomen
+newsworthiness/SM
+newsworthy/RPT
+newsy/TRS
+newt/MS
+Newtonian
+Newton/M
+newton/SM
+Nexis/M
+next
+nexus/SM
+Neysa/M
+NF
+NFC
+NFL
+NFS
+Ngaliema/M
+Nguyen/M
+NH
+NHL
+niacin/SM
+Niagara/M
+Niall/M
+Nial/M
+Niamey/M
+nibbed
+nibbing
+nibbler/M
+nibble/RSDGZ
+Nibelung/M
+nib/SM
+Nicaean
+Nicaragua/M
+Nicaraguan/S
+Niccolo/M
+Nice/M
+Nicene
+niceness/MS
+nicety/MS
+nice/YTPR
+niche/SDGM
+Nicholas
+Nichole/M
+Nicholle/M
+Nichol/MS
+Nicholson/M
+nichrome
+nickelodeon/SM
+nickel/SGMD
+nicker/GD
+Nickey/M
+nick/GZRDMS
+Nickie/M
+Nicki/M
+Nicklaus/M
+Nick/M
+nicknack's
+nickname/MGDRS
+nicknamer/M
+Nickolai/M
+Nickola/MS
+Nickolaus/M
+Nicko/M
+Nicky/M
+Nicobar/M
+Nicodemus/M
+Nicolai/MS
+Nicola/MS
+Nicolea/M
+Nicole/M
+Nicolette/M
+Nicoli/MS
+Nicolina/M
+Nicoline/M
+Nicolle/M
+Nicol/M
+Nico/M
+Nicosia/M
+nicotine/MS
+Niebuhr/M
+niece/MS
+Niel/MS
+Nielsen/M
+Niels/N
+Nielson/M
+Nietzsche/M
+Nieves/M
+nifty/TRS
+Nigel/M
+Nigeria/M
+Nigerian/S
+Nigerien
+Niger/M
+niggardliness/SM
+niggardly/P
+niggard/SGMDY
+nigger/SGDM!
+niggler/M
+niggle/RSDGZJ
+niggling/Y
+nigh/RDGT
+nighs
+nightcap/SM
+nightclothes
+nightclubbed
+nightclubbing
+nightclub/MS
+nightdress/MS
+nightfall/SM
+nightgown/MS
+nighthawk/MS
+nightie/MS
+Nightingale/M
+nightingale/SM
+nightlife/MS
+nightlong
+nightmare/MS
+nightmarish/Y
+nightshade/SM
+nightshirt/MS
+night/SMYDZ
+nightspot/MS
+nightstand/SM
+nightstick/S
+nighttime/S
+nightwear/M
+nighty's
+NIH
+nihilism/MS
+nihilistic
+nihilist/MS
+Nijinsky/M
+Nikaniki/M
+Nike/M
+Niki/M
+Nikita/M
+Nikkie/M
+Nikki/M
+Nikko/M
+Nikolai/M
+Nikola/MS
+Nikolaos/M
+Nikolaus/M
+Nikolayev's
+Nikoletta/M
+Nikolia/M
+Nikolos/M
+Niko/MS
+Nikon/M
+Nile/SM
+nilled
+nilling
+Nil/MS
+nil/MYS
+nilpotent
+Nilsen/M
+Nils/N
+Nilson/M
+Nilsson/M
+Ni/M
+nimbi
+nimbleness/SM
+nimble/TRP
+nimbly
+nimbus/DM
+NIMBY
+Nimitz/M
+Nimrod/MS
+Nina/M
+nincompoop/MS
+ninefold
+nine/MS
+ninepence/M
+ninepin/S
+ninepins/M
+nineteen/SMH
+nineteenths
+ninetieths
+Ninetta/M
+Ninette/M
+ninety/MHS
+Nineveh/M
+ninja/S
+Ninnetta/M
+Ninnette/M
+ninny/SM
+Ninon/M
+Nintendo/M
+ninth
+ninths
+Niobe/M
+niobium/MS
+nipped
+nipper/DMGS
+nippiness/S
+nipping/Y
+nipple/GMSD
+Nipponese
+Nippon/M
+nippy/TPR
+nip/S
+Nirenberg/M
+nirvana/MS
+Nirvana/S
+nisei
+Nisei/MS
+Nissa/M
+Nissan/M
+Nisse/M
+Nissie/M
+Nissy/M
+Nita/M
+niter/M
+nitpick/DRSJZG
+nitrate/MGNXSD
+nitration/M
+nitric
+nitride/MGS
+nitriding/M
+nitrification/SM
+nitrite/MS
+nitrocellulose/MS
+nitrogenous
+nitrogen/SM
+nitroglycerin/MS
+nitrous
+nitwit/MS
+nit/ZSMR
+Niven/M
+nixer/M
+nix/GDSR
+Nixie/M
+Nixon/M
+NJ
+Nkrumah/M
+NLRB
+nm
+NM
+no/A
+NOAA
+Noach/M
+Noah/M
+Noak/M
+Noami/M
+Noam/M
+Nobelist/SM
+nobelium/MS
+Nobel/M
+Nobe/M
+Nobie/M
+nobility/MS
+Noble/M
+nobleman/M
+noblemen
+nobleness/SM
+noblesse/M
+noble/TPSR
+noblewoman
+noblewomen
+nob/MY
+nobody/MS
+Noby/M
+nocturnal/SY
+nocturne/SM
+nodal/Y
+nodded
+nodding
+noddle/MSDG
+noddy/M
+node/MS
+NoDoz/M
+nod/SM
+nodular
+nodule/SM
+Noelani/M
+Noella/M
+Noelle/M
+Noell/M
+Noellyn/M
+Noel/MS
+noel/S
+Noelyn/M
+Noe/M
+Noemi/M
+noes/S
+noggin/SM
+nohow
+noise/GMSD
+noiselessness/SM
+noiseless/YP
+noisemaker/M
+noisemake/ZGR
+noisily
+noisiness/MS
+noisome
+noisy/TPR
+Nola/M
+Nolana/M
+Noland/M
+Nolan/M
+Nolie/M
+Nollie/M
+Noll/M
+Nolly/M
+No/M
+nomadic
+nomad/SM
+Nome/M
+nomenclature/MS
+Nomi/M
+nominalized
+nominal/K
+nominally
+nominals
+nominate/CDSAXNG
+nomination/MAC
+nominative/SY
+nominator/CSM
+nominee/MS
+non
+nonabrasive
+nonabsorbent/S
+nonacademic/S
+nonacceptance/MS
+nonacid/MS
+nonactive
+nonadaptive
+nonaddictive
+nonadhesive
+nonadjacent
+nonadjustable
+nonadministrative
+nonage/MS
+nonagenarian/MS
+nonaggression/SM
+nonagricultural
+Nonah/M
+nonalcoholic/S
+nonaligned
+nonalignment/SM
+nonallergic
+Nona/M
+nonappearance/MS
+nonassignable
+nonathletic
+nonattendance/SM
+nonautomotive
+nonavailability/SM
+nonbasic
+nonbeliever/SM
+nonbelligerent/S
+nonblocking
+nonbreakable
+nonburnable
+nonbusiness
+noncaloric
+noncancerous
+noncarbohydrate/M
+nonce/MS
+nonchalance/SM
+nonchalant/YP
+nonchargeable
+nonclerical/S
+nonclinical
+noncollectable
+noncombatant/MS
+noncombustible/S
+noncommercial/S
+noncommissioned
+noncommittal/Y
+noncom/MS
+noncommunicable
+noncompeting
+noncompetitive
+noncompliance/MS
+noncomplying/S
+noncomprehending
+nonconducting
+nonconductor/MS
+nonconforming
+nonconformist/SM
+nonconformity/SM
+nonconsecutive
+nonconservative
+nonconstructive
+noncontagious
+noncontiguous
+noncontinuous
+noncontributing
+noncontributory
+noncontroversial
+nonconvertible
+noncooperation/SM
+noncorroding/S
+noncorrosive
+noncredit
+noncriminal/S
+noncritical
+noncrystalline
+noncumulative
+noncustodial
+noncyclic
+nondairy
+nondecreasing
+nondeductible
+nondelivery/MS
+nondemocratic
+nondenominational
+nondepartmental
+nondepreciating
+nondescript/YS
+nondestructive/Y
+nondetachable
+nondeterminacy
+nondeterminate/Y
+nondeterminism
+nondeterministic
+nondeterministically
+nondisciplinary
+nondisclosure/SM
+nondiscrimination/SM
+nondiscriminatory
+nondramatic
+nondrinker/SM
+nondrying
+nondurable
+noneconomic
+noneducational
+noneffective/S
+nonelastic
+nonelectrical
+nonelectric/S
+nonemergency
+nonempty
+nonenforceable
+nonentity/MS
+nonequivalence/M
+nonequivalent/S
+none/S
+nones/M
+nonessential/S
+nonesuch/SM
+nonetheless
+nonevent/MS
+nonexchangeable
+nonexclusive
+nonexempt
+nonexistence/MS
+nonexistent
+nonexplosive/S
+nonextensible
+nonfactual
+nonfading
+nonfat
+nonfatal
+nonfattening
+nonferrous
+nonfictional
+nonfiction/SM
+nonflammable
+nonflowering
+nonfluctuating
+nonflying
+nonfood/M
+nonfreezing
+nonfunctional
+nongovernmental
+nongranular
+nonhazardous
+nonhereditary
+nonhuman
+nonidentical
+Nonie/M
+Noni/M
+noninclusive
+nonindependent
+nonindustrial
+noninfectious
+noninflammatory
+noninflationary
+noninflected
+nonintellectual/S
+noninteracting
+noninterchangeable
+noninterference/MS
+nonintervention/SM
+nonintoxicating
+nonintuitive
+noninvasive
+nonionic
+nonirritating
+nonjudgmental
+nonjudicial
+nonlegal
+nonlethal
+nonlinearity/MS
+nonlinear/Y
+nonlinguistic
+nonliterary
+nonliving
+nonlocal
+nonmagical
+nonmagnetic
+nonmalignant
+nonmember/SM
+nonmetallic
+nonmetal/MS
+nonmigratory
+nonmilitant/S
+nonmilitary
+Nonnah/M
+Nonna/M
+nonnarcotic/S
+nonnative/S
+nonnegative
+nonnegotiable
+nonnuclear
+nonnumerical/S
+nonobjective
+nonobligatory
+nonobservance/MS
+nonobservant
+nonoccupational
+nonoccurence
+nonofficial
+nonogenarian
+nonoperational
+nonoperative
+nonorthogonal
+nonorthogonality
+nonparallel/S
+nonparametric
+nonpareil/SM
+nonparticipant/SM
+nonparticipating
+nonpartisan/S
+nonpaying
+nonpayment/SM
+nonperformance/SM
+nonperforming
+nonperishable/S
+nonperson/S
+nonperturbing
+nonphysical/Y
+nonplus/S
+nonplussed
+nonplussing
+nonpoisonous
+nonpolitical
+nonpolluting
+nonporous
+nonpracticing
+nonprejudicial
+nonprescription
+nonprocedural/Y
+nonproductive
+nonprofessional/S
+nonprofit/SB
+nonprogrammable
+nonprogrammer
+nonproliferation/SM
+nonpublic
+nonpunishable
+nonracial
+nonradioactive
+nonrandom
+nonreactive
+nonreciprocal/S
+nonreciprocating
+nonrecognition/SM
+nonrecoverable
+nonrecurring
+nonredeemable
+nonreducing
+nonrefillable
+nonrefundable
+nonreligious
+nonrenewable
+nonrepresentational
+nonresidential
+nonresident/SM
+nonresidual
+nonresistance/SM
+nonresistant/S
+nonrespondent/S
+nonresponse
+nonrestrictive
+nonreturnable/S
+nonrhythmic
+nonrigid
+nonsalaried
+nonscheduled
+nonscientific
+nonscoring
+nonseasonal
+nonsectarian
+nonsecular
+nonsegregated
+nonsense/MS
+nonsensicalness/M
+nonsensical/PY
+nonsensitive
+nonsexist
+nonsexual
+nonsingular
+nonskid
+nonslip
+nonsmoker/SM
+nonsmoking
+nonsocial
+nonspeaking
+nonspecialist/MS
+nonspecializing
+nonspecific
+nonspiritual/S
+nonstaining
+nonstandard
+nonstarter/SM
+nonstick
+nonstop
+nonstrategic
+nonstriking
+nonstructural
+nonsuccessive
+nonsupervisory
+nonsupport/GS
+nonsurgical
+nonsustaining
+nonsympathizer/M
+nontarnishable
+nontaxable/S
+nontechnical/Y
+nontenured
+nonterminal/MS
+nonterminating
+nontermination/M
+nontheatrical
+nonthinking/S
+nonthreatening
+nontoxic
+nontraditional
+nontransferable
+nontransparent
+nontrivial
+nontropical
+nonuniform
+nonunion/S
+nonuser/SM
+nonvenomous
+nonverbal/Y
+nonveteran/MS
+nonviable
+nonviolence/SM
+nonviolent/Y
+nonvirulent
+nonvocal
+nonvocational
+nonvolatile
+nonvolunteer/S
+nonvoter/MS
+nonvoting
+nonwhite/SM
+nonworking
+nonyielding
+nonzero
+noodle/GMSD
+nook/MS
+noonday/MS
+noon/GDMS
+nooning/M
+noontide/MS
+noontime/MS
+noose/SDGM
+nope/S
+NORAD/M
+noradrenalin
+noradrenaline/M
+Norah/M
+Nora/M
+Norbert/M
+Norberto/M
+Norbie/M
+Norby/M
+Nordhoff/M
+Nordic/S
+Nordstrom/M
+Norean/M
+Noreen/M
+Norene/M
+Norfolk/M
+nor/H
+Norina/M
+Norine/M
+normalcy/MS
+normality/SM
+normalization/A
+normalizations
+normalization's
+normalized/AU
+normalizes/AU
+normalize/SRDZGB
+normal/SY
+Norma/M
+Normand/M
+Normandy/M
+Norman/SM
+normativeness/M
+normative/YP
+Normie/M
+norm/SMGD
+Normy/M
+Norplant
+Norrie/M
+Norri/SM
+Norristown/M
+Norry/M
+Norse
+Norseman/M
+Norsemen
+Northampton/M
+northbound
+northeastern
+northeaster/YM
+Northeast/SM
+northeastward/S
+northeast/ZSMR
+northerly/S
+norther/MY
+Northerner/M
+northernmost
+northern/RYZS
+Northfield/M
+northing/M
+northland
+North/M
+northmen
+north/MRGZ
+Northrop/M
+Northrup/M
+norths
+Norths
+Northumberland/M
+northward/S
+northwestern
+northwester/YM
+northwest/MRZS
+Northwest/MS
+northwestward/S
+Norton/M
+Norwalk/M
+Norway/M
+Norwegian/S
+Norwich/M
+Norw/M
+nosebag/M
+nosebleed/SM
+nosecone/S
+nosedive/DSG
+nosed/V
+nosegay/MS
+nose/M
+Nosferatu/M
+nos/GDS
+nosh/MSDG
+nosily
+nosiness/MS
+nosing/M
+nostalgia/SM
+nostalgically
+nostalgic/S
+Nostradamus/M
+Nostrand/M
+nostril/SM
+nostrum/SM
+nosy/SRPMT
+notability/SM
+notableness/M
+notable/PS
+notably
+notarial
+notarization/S
+notarize/DSG
+notary/MS
+notate/VGNXSD
+notational/CY
+notation/CMSF
+notative/CF
+notch/MSDG
+not/DRGB
+notebook/MS
+note/CSDFG
+notedness/M
+noted/YP
+notepad/S
+notepaper/MS
+note's
+noteworthiness/SM
+noteworthy/P
+nothingness/SM
+nothing/PS
+noticeable/U
+noticeably
+noticeboard/S
+noticed/U
+notice/MSDG
+notifiable
+notification/M
+notifier/M
+notify/NGXSRDZ
+notional/Y
+notion/MS
+notoriety/S
+notoriousness/M
+notorious/YP
+Notre/M
+Nottingham/M
+notwithstanding
+Nouakchott/M
+nougat/MS
+Noumea/M
+noun/SMK
+nourish/DRSGL
+nourished/U
+nourisher/M
+nourishment/SM
+nous/M
+nouveau
+nouvelle
+novae
+Novak/M
+Nova/M
+nova/MS
+novelette/SM
+Novelia/M
+novelist/SM
+novelization/S
+novelize/GDS
+Novell/SM
+novella/SM
+novel/SM
+novelty/MS
+November/SM
+novena/SM
+novene
+Novgorod/M
+novice/MS
+novitiate/MS
+Nov/M
+Novocaine/M
+Novocain/S
+Novokuznetsk/M
+Novosibirsk/M
+NOW
+nowadays
+noway/S
+Nowell/M
+nowhere/S
+nowise
+now/S
+noxiousness/M
+noxious/PY
+Noyce/M
+Noyes/M
+nozzle/MS
+Np
+NP
+NRA
+nroff/M
+N's
+NS
+n's/CI
+NSF
+n/T
+NT
+nth
+nuance/SDM
+nubbin/SM
+nubby/RT
+Nubia/M
+Nubian/M
+nubile
+nub/MS
+nuclear/K
+nuclease/M
+nucleated/A
+nucleate/DSXNG
+nucleation/M
+nucleic
+nuclei/M
+nucleoli
+nucleolus/M
+nucleon/MS
+nucleotide/MS
+nucleus/M
+nuclide/M
+nude/CRS
+nudely
+nudeness/M
+nudest
+nudge/GSRD
+nudger/M
+nudism/MS
+nudist/MS
+nudity/MS
+nugatory
+Nugent/M
+nugget/SM
+nuisance/MS
+nuke/DSMG
+Nukualofa
+null/DSG
+nullification/M
+nullifier/M
+nullify/RSDXGNZ
+nullity/SM
+nu/M
+numbered/UA
+numberer/M
+numberless
+numberplate/M
+number/RDMGJ
+numbers/A
+Numbers/M
+numbing/Y
+numbness/MS
+numb/SGZTYRDP
+numbskull's
+numerable/IC
+numeracy/SI
+numeral/YMS
+numerate/SDNGX
+numerates/I
+numeration/M
+numerator/MS
+numerical/Y
+numeric/S
+numerological
+numerologist/S
+numerology/MS
+numerousness/M
+numerous/YP
+numinous/S
+numismatic/S
+numismatics/M
+numismatist/MS
+numskull/SM
+Nunavut/M
+nuncio/SM
+Nunez/M
+Nunki/M
+nun/MS
+nunnery/MS
+nuptial/S
+Nuremberg/M
+Nureyev/M
+nursemaid/MS
+nurser/M
+nurseryman/M
+nurserymen
+nursery/MS
+nurse/SRDJGMZ
+nursling/M
+nurturer/M
+nurture/SRDGZM
+nus
+nutate/NGSD
+nutation/M
+nutcracker/M
+nutcrack/RZ
+nuthatch/SM
+nutmeat/SM
+nutmegged
+nutmegging
+nutmeg/MS
+nut/MS
+nutpick/MS
+Nutrasweet/M
+nutria/SM
+nutrient/MS
+nutriment/MS
+nutritional/Y
+nutritionist/MS
+nutrition/SM
+nutritiousness/MS
+nutritious/PY
+nutritive/Y
+nutshell/MS
+nutted
+nuttiness/SM
+nutting
+nutty/TRP
+nuzzle/GZRSD
+NV
+NW
+NWT
+NY
+Nyasa/M
+NYC
+Nydia/M
+Nye/M
+Nyerere/M
+nylon/SM
+nymphet/MS
+nymph/M
+nympholepsy/M
+nymphomaniac/S
+nymphomania/MS
+nymphs
+Nyquist/M
+NYSE
+Nyssa/M
+NZ
+o
+O
+oafishness/S
+oafish/PY
+oaf/MS
+Oahu/M
+Oakland/M
+Oakley/M
+Oakmont/M
+oak/SMN
+oakum/MS
+oakwood
+oar/GSMD
+oarlock/MS
+oarsman/M
+oarsmen
+oarswoman
+oarswomen
+OAS
+oases
+oasis/M
+oatcake/MS
+oater/M
+Oates/M
+oath/M
+oaths
+oatmeal/SM
+oat/SMNR
+Oaxaca/M
+ob
+OB
+Obadiah/M
+Obadias/M
+obbligato/S
+obduracy/S
+obdurateness/S
+obdurate/PDSYG
+Obediah/M
+obedience/EMS
+obedient/EY
+Obed/M
+obeisance/MS
+obeisant/Y
+obelisk/SM
+Oberlin/M
+Oberon/M
+obese
+obesity/MS
+obey/EDRGS
+obeyer/EM
+obfuscate/SRDXGN
+obfuscation/M
+obfuscatory
+Obidiah/M
+Obie/M
+obi/MDGS
+obit/SMR
+obituary/SM
+obj
+objectify/GSDXN
+objectionableness/M
+objectionable/U
+objectionably
+objection/SMB
+objectiveness/MS
+objective/PYS
+objectivity/MS
+objector/SM
+object/SGVMD
+objurgate/GNSDX
+objurgation/M
+oblate/NYPSX
+oblation/M
+obligate/NGSDXY
+obligational
+obligation/M
+obligatorily
+obligatory
+obliged/E
+obliger/M
+obliges/E
+oblige/SRDG
+obligingness/M
+obliging/PY
+oblique/DSYGP
+obliqueness/S
+obliquity/MS
+obliterate/VNGSDX
+obliteration/M
+obliterative/Y
+oblivion/MS
+obliviousness/MS
+oblivious/YP
+oblongness/M
+oblong/SYP
+obloquies
+obloquy/M
+Ob/MD
+obnoxiousness/MS
+obnoxious/YP
+oboe/SM
+oboist/S
+obos
+O'Brien/M
+obs
+obscene/RYT
+obscenity/MS
+obscurantism/MS
+obscurantist/MS
+obscuration
+obscureness/M
+obscure/YTPDSRGL
+obscurity/MS
+obsequies
+obsequiousness/S
+obsequious/YP
+obsequy
+observability/M
+observable/SU
+observably
+observance/MS
+observantly
+observants
+observant/U
+observational/Y
+observation/MS
+observatory/MS
+observed/U
+observer/M
+observe/ZGDSRB
+observing/Y
+obsess/GVDS
+obsessional
+obsession/MS
+obsessiveness/S
+obsessive/PYS
+obsidian/SM
+obsolesce/GSD
+obsolescence/S
+obsolescent/Y
+obsolete/GPDSY
+obsoleteness/M
+obstacle/SM
+obstetrical
+obstetrician/SM
+obstetric/S
+obstetrics/M
+obstinacy/SM
+obstinateness/M
+obstinate/PY
+obstreperousness/SM
+obstreperous/PY
+obstructed/U
+obstructer/M
+obstructionism/SM
+obstructionist/MS
+obstruction/SM
+obstructiveness/MS
+obstructive/PSY
+obstruct/RDVGS
+obtainable/U
+obtainably
+obtain/LSGDRB
+obtainment/S
+obtrude/DSRG
+obtruder/M
+obtrusion/S
+obtrusiveness/MSU
+obtrusive/UPY
+obtuseness/S
+obtuse/PRTY
+obverse/YS
+obviate/XGNDS
+obviousness/SM
+obvious/YP
+Oby/M
+ocarina/MS
+O'Casey
+Occam/M
+occasional/Y
+occasion/MDSJG
+Occidental/S
+occidental/SY
+occident/M
+Occident/SM
+occipital/Y
+occlude/GSD
+occlusion/MS
+occlusive/S
+occulter/M
+occultism/SM
+occult/SRDYG
+occupancy/SM
+occupant/MS
+occupational/Y
+occupation/SAM
+occupied/AU
+occupier/M
+occupies/A
+occupy/RSDZG
+occur/AS
+occurred/A
+occurrence/SM
+occurring/A
+oceanfront/MS
+oceangoing
+Oceania/M
+oceanic
+ocean/MS
+oceanographer/SM
+oceanographic
+oceanography/SM
+oceanology/MS
+oceanside
+Oceanside/M
+Oceanus/M
+ocelot/SM
+ocher/DMGS
+Ochoa/M
+o'clock
+O'Clock
+O'Connell/M
+O'Connor/M
+Oconomowoc/M
+OCR
+octagonal/Y
+octagon/SM
+octahedral
+octahedron/M
+octal/S
+octane/MS
+octant/M
+octave/MS
+Octavia/M
+Octavian/M
+Octavio/M
+Octavius/M
+octavo/MS
+octennial
+octet/SM
+octile
+octillion/M
+Oct/M
+October/MS
+octogenarian/MS
+octopus/SM
+octoroon/M
+ocular/S
+oculist/SM
+OD
+odalisque/SM
+oddball/SM
+oddity/MS
+oddment/MS
+oddness/MS
+odd/TRYSPL
+Odele/M
+Odelia/M
+Odelinda/M
+Odella/M
+Odelle/M
+Odell/M
+O'Dell/M
+ode/MDRS
+Ode/MR
+Oderberg/MS
+Oder/M
+Odessa/M
+Odets/M
+Odetta/M
+Odette/M
+Odey/M
+Odie/M
+Odilia/M
+Odille/M
+Odin/M
+odiousness/MS
+odious/PY
+Odis/M
+odium/MS
+Odo/M
+odometer/SM
+Odom/M
+O'Donnell/M
+odor/DMS
+odoriferous
+odorless
+odorous/YP
+ODs
+O'Dwyer/M
+Ody/M
+Odysseus/M
+Odyssey/M
+odyssey/S
+OE
+OED
+oedipal
+Oedipal/Y
+Oedipus/M
+OEM/M
+OEMS
+oenology/MS
+oenophile/S
+o'er
+O'Er
+Oersted/M
+oesophagi
+oeuvre/SM
+Ofelia/M
+Ofella/M
+offal/MS
+offbeat/MS
+offcuts
+Offenbach/M
+offender/M
+offend/SZGDR
+offense/MSV
+offensively/I
+offensiveness/MSI
+offensive/YSP
+offerer/M
+offering/M
+offer/RDJGZ
+offertory/SM
+offhand/D
+offhandedness/S
+offhanded/YP
+officeholder/SM
+officemate/S
+officer/GMD
+officership/S
+office/SRMZ
+officialdom/SM
+officialism/SM
+officially/U
+official/PSYM
+officiant/SM
+officiate/XSDNG
+officiation/M
+officiator/MS
+officio
+officiousness/MS
+officious/YP
+offing/M
+offish
+offload/GDS
+offprint/GSDM
+offramp
+offset/SM
+offsetting
+offshoot/MS
+offshore
+offside/RS
+offspring/M
+offstage/S
+off/SZGDRJ
+offtrack
+Ofilia/M
+of/K
+often/RT
+oftentimes
+oft/NRT
+ofttimes
+Ogbomosho/M
+Ogdan/M
+Ogden/M
+Ogdon/M
+Ogilvy/M
+ogive/M
+Oglethorpe/M
+ogle/ZGDSR
+ogreish
+ogre/MS
+ogress/S
+oh
+OH
+O'Hara
+O'Hare/M
+O'Higgins
+Ohioan/S
+Ohio/M
+ohmic
+ohmmeter/MS
+ohm/SM
+oho/S
+ohs
+OHSA/M
+oilcloth/M
+oilcloths
+oiler/M
+oilfield/MS
+oiliness/SM
+oilman/M
+oil/MDRSZG
+oilmen
+oilseed/SM
+oilskin/MS
+oily/TPR
+oink/GDS
+ointment/SM
+Oise/M
+OJ
+Ojibwa/SM
+Okamoto/M
+okapi/SM
+Okayama/M
+okay/M
+Okeechobee/M
+O'Keeffe
+Okefenokee
+Okhotsk/M
+Okinawa/M
+Okinawan/S
+Oklahoma/M
+Oklahoman/SM
+Okla/M
+OK/MDG
+okra/MS
+OKs
+Oktoberfest
+Olaf/M
+Olag/M
+Ola/M
+Olav/M
+Oldenburg/M
+olden/DG
+Oldfield/M
+oldie/MS
+oldish
+oldness/S
+Oldsmobile/M
+oldster/SM
+Olduvai/M
+old/XTNRPS
+ol
+oleaginous
+oleander/SM
+O'Leary/M
+olefin/M
+Oleg/M
+Ole/MV
+Olenek/M
+Olenka/M
+Olen/M
+Olenolin/M
+oleomargarine/SM
+oleo/S
+oles
+olfactory
+Olga/M
+Olia/M
+oligarchic
+oligarchical
+oligarch/M
+oligarchs
+oligarchy/SM
+Oligocene
+oligopolistic
+oligopoly/MS
+Olimpia/M
+Olin/M
+olive/MSR
+Olive/MZR
+Oliver/M
+Olivero/M
+Olivette/M
+Olivetti/M
+Olivia/M
+Olivier/M
+Olivie/RM
+Oliviero/M
+Oliy/M
+Ollie/M
+Olly/M
+Olmec
+Olmsted/M
+Olsen/M
+Olson/M
+Olva/M
+Olvan/M
+Olwen/M
+Olympe/M
+Olympiad/MS
+Olympian/S
+Olympia/SM
+Olympic/S
+Olympie/M
+Olympus/M
+Omaha/SM
+Oman/M
+Omar/M
+ombudsman/M
+ombudsmen
+Omdurman/M
+omega/MS
+omelet/SM
+omelette's
+omen/DMG
+Omero/M
+omicron/MS
+ominousness/SM
+ominous/YP
+omission/MS
+omit/S
+omitted
+omitting
+omnibus/MS
+omni/M
+omnipotence/SM
+Omnipotent
+omnipotent/SY
+omnipresence/MS
+omnipresent/Y
+omniscience/SM
+omniscient/YS
+omnivore/MS
+omnivorousness/MS
+omnivorous/PY
+oms
+Omsk/M
+om/XN
+ON
+onanism/M
+Onassis/M
+oncer/M
+once/SR
+oncogene/S
+oncologist/S
+oncology/SM
+oncoming/S
+Ondrea/M
+Oneal/M
+Onega/M
+Onegin/M
+Oneida/SM
+O'Neil
+O'Neill
+oneness/MS
+one/NPMSX
+oner/M
+onerousness/SM
+onerous/YP
+oneself
+onetime
+oneupmanship
+Onfre/M
+Onfroi/M
+ongoing/S
+Onida/M
+onion/GDM
+onionskin/MS
+onlooker/MS
+onlooking
+only/TP
+Onofredo/M
+Ono/M
+onomatopoeia/SM
+onomatopoeic
+onomatopoetic
+Onondaga/MS
+onrush/GMS
+on/RY
+ons
+Onsager/M
+onset/SM
+onsetting
+onshore
+onside
+onslaught/MS
+Ontarian/S
+Ontario/M
+Ont/M
+onto
+ontogeny/SM
+ontological/Y
+ontology/SM
+onus/SM
+onward/S
+onyx/MS
+oodles
+ooh/GD
+oohs
+oolitic
+Oona/M
+OOo/M
+oops/S
+Oort/M
+ooze/GDS
+oozy/RT
+opacity/SM
+opalescence/S
+opalescent/Y
+Opalina/M
+Opaline/M
+Opal/M
+opal/SM
+opaque/GTPYRSD
+opaqueness/SM
+opcode/MS
+OPEC
+Opel/M
+opencast
+opened/AU
+opener/M
+openhandedness/SM
+openhanded/P
+openhearted
+opening/M
+openness/S
+OpenOffice.org/M
+opens/A
+openwork/MS
+open/YRDJGZTP
+operable/I
+operandi
+operand/SM
+operant/YS
+opera/SM
+operate/XNGVDS
+operatically
+operatic/S
+operationalization/S
+operationalize/D
+operational/Y
+operation/M
+operative/IP
+operatively
+operativeness/MI
+operatives
+operator/SM
+operetta/MS
+ope/S
+Ophelia/M
+Ophelie/M
+Ophiuchus/M
+ophthalmic/S
+ophthalmologist/SM
+ophthalmology/MS
+opiate/GMSD
+opine/XGNSD
+opinionatedness/M
+opinionated/PY
+opinion/M
+opioid
+opium/MS
+opossum/SM
+opp
+Oppenheimer/M
+opponent/MS
+opportune/IY
+opportunism/SM
+opportunistic
+opportunistically
+opportunist/SM
+opportunity/MS
+oppose/BRSDG
+opposed/U
+opposer/M
+oppositeness/M
+opposite/SXYNP
+oppositional
+opposition/M
+oppress/DSGV
+oppression/MS
+oppressiveness/MS
+oppressive/YP
+oppressor/MS
+opprobrious/Y
+opprobrium/SM
+Oprah/M
+ops
+opt/DSG
+opthalmic
+opthalmologic
+opthalmology
+optical/Y
+optician/SM
+optic/S
+optics/M
+optima
+optimality
+optimal/Y
+optimise's
+optimism/SM
+optimistic
+optimistically
+optimist/SM
+optimization/SM
+optimize/DRSZG
+optimized/U
+optimizer/M
+optimizes/U
+optimum/SM
+optionality/M
+optional/YS
+option/GDMS
+optoelectronic
+optometric
+optometrist/MS
+optometry/SM
+opulence/SM
+opulent/Y
+opus/SM
+op/XGDN
+OR
+oracle/GMSD
+oracular
+Oralee/M
+Oralia/M
+Oralie/M
+Oralla/M
+Oralle/M
+oral/YS
+Ora/M
+orangeade/MS
+Orange/M
+orange/MS
+orangery/SM
+orangutan/MS
+Oranjestad/M
+Oran/M
+orate/SDGNX
+oration/M
+oratorical/Y
+oratorio/MS
+orator/MS
+oratory/MS
+Orazio/M
+Orbadiah/M
+orbicular
+orbiculares
+orbital/MYS
+orbit/MRDGZS
+orb/SMDG
+orchard/SM
+orchestral/Y
+orchestra/MS
+orchestrate/GNSDX
+orchestrater's
+orchestration/M
+orchestrator/M
+orchid/SM
+ordainer/M
+ordainment/MS
+ordain/SGLDR
+ordeal/SM
+order/AESGD
+ordered/U
+orderer
+ordering/S
+orderless
+orderliness/SE
+orderly/PS
+order's/E
+ordinal/S
+ordinance/MS
+ordinarily
+ordinariness/S
+ordinary/RSPT
+ordinated
+ordinate/I
+ordinates
+ordinate's
+ordinating
+ordination/SM
+ordnance/SM
+Ordovician
+ordure/MS
+oregano/SM
+Oreg/M
+Oregonian/S
+Oregon/M
+Orelee/M
+Orelia/M
+Orelie/M
+Orella/M
+Orelle/M
+Orel/M
+Oren/M
+Ore/NM
+ore/NSM
+Oreo
+Orestes
+organdie's
+organdy/MS
+organelle/MS
+organically/I
+organic/S
+organismic
+organism/MS
+organist/MS
+organizable/UMS
+organizational/MYS
+organization/MEAS
+organize/AGZDRS
+organized/UE
+organizer/MA
+organizes/E
+organizing/E
+organ/MS
+organometallic
+organza/SM
+orgasm/GSMD
+orgasmic
+orgiastic
+orgy/SM
+Oriana/M
+oriel/MS
+orientable
+Oriental/S
+oriental/SY
+orientated/A
+orientate/ESDXGN
+orientates/A
+orientation/AMES
+orienteering/M
+orienter
+orient/GADES
+orient's
+Orient/SM
+orifice/MS
+orig
+origami/MS
+originality/SM
+originally
+original/US
+originate/VGNXSD
+origination/M
+originative/Y
+originator/SM
+origin/MS
+Orin/M
+Orinoco/M
+oriole/SM
+Orion/M
+orison/SM
+Oriya/M
+Orizaba/M
+Orkney/M
+Orland/M
+Orlando/M
+Orlan/M
+Orleans
+Orlick/M
+Orlon/SM
+Orly/M
+ormolu/SM
+or/MY
+ornamental/SY
+ornamentation/SM
+ornament/GSDM
+ornateness/SM
+ornate/YP
+orneriness/SM
+ornery/PRT
+ornithological
+ornithologist/SM
+ornithology/MS
+orographic/M
+orography/M
+Orono/M
+orotund
+orotundity/MS
+orphanage/MS
+orphanhood/M
+orphan/SGDM
+Orpheus/M
+Orphic
+Orran/M
+Orren/M
+Orrin/M
+orris/SM
+Orr/MN
+ors
+Orsa/M
+Orsola/M
+Orson/M
+Ortega/M
+Ortensia/M
+orthodontia/S
+orthodontic/S
+orthodontics/M
+orthodontist/MS
+orthodoxies
+orthodoxly/U
+Orthodox/S
+orthodoxy's
+orthodox/YS
+orthodoxy/U
+orthogonality/M
+orthogonalization/M
+orthogonalized
+orthogonal/Y
+orthographic
+orthographically
+orthography/MS
+orthonormal
+orthopedic/S
+orthopedics/M
+orthopedist/SM
+orthophosphate/MS
+orthorhombic
+Ortiz/M
+Orton/M
+Orval/M
+Orville/M
+Orv/M
+Orwellian
+Orwell/M
+o's
+Osage/SM
+Osaka/M
+Osbert/M
+Osborne/M
+Osborn/M
+Osbourne/M
+Osbourn/M
+Oscar/SM
+Osceola/M
+oscillate/SDXNG
+oscillation/M
+oscillator/SM
+oscillatory
+oscilloscope/SM
+osculate/XDSNG
+osculation/M
+Osgood/M
+OSHA
+Oshawa/M
+O'Shea/M
+Oshkosh/M
+osier/MS
+Osiris/M
+Oslo/M
+Os/M
+OS/M
+Osman/M
+osmium/MS
+Osmond/M
+osmoses
+osmosis/M
+osmotic
+Osmund/M
+osprey/SM
+osseous/Y
+Ossie/M
+ossification/M
+ossify/NGSDX
+ostensible
+ostensibly
+ostentation/MS
+ostentatiousness/M
+ostentatious/PY
+osteoarthritides
+osteoarthritis/M
+osteology/M
+osteopathic
+osteopath/M
+osteopaths
+osteopathy/MS
+osteoporoses
+osteoporosis/M
+ostracise's
+ostracism/MS
+ostracize/GSD
+Ostrander/M
+ostrich/MS
+Ostrogoth/M
+Ostwald/M
+O'Sullivan/M
+Osvaldo/M
+Oswald/M
+Oswell/M
+OT
+OTB
+OTC
+Otes
+Otha/M
+Othelia/M
+Othella/M
+Othello/M
+otherness/M
+other/SMP
+otherwise
+otherworldly/P
+otherworld/Y
+Othilia/M
+Othilie/M
+Otho/M
+otiose
+Otis/M
+OTOH
+Ottawa/MS
+otter/DMGS
+Ottilie/M
+Otto/M
+Ottoman
+ottoman/MS
+Ouagadougou/M
+oubliette/SM
+ouch/SDG
+oughtn't
+ought/SGD
+Ouija/MS
+ounce/MS
+our/S
+ourself
+ourselves
+ouster/M
+oust/RDGZS
+outage/MS
+outargue/GDS
+outback/MRS
+outbalance/GDS
+outbidding
+outbid/S
+outboard/S
+outboast/GSD
+outbound/S
+outbreak/SMG
+outbroke
+outbroken
+outbuilding/SM
+outburst/MGS
+outcast/GSM
+outclass/SDG
+outcome/SM
+outcropped
+outcropping/S
+outcrop/SM
+outcry/MSDG
+outdated/P
+outdid
+outdistance/GSD
+outdoes
+outdo/G
+outdone
+outdoor/S
+outdoorsy
+outdraw/GS
+outdrawn
+outdrew
+outermost
+outerwear/M
+outface/SDG
+outfall/MS
+outfielder/M
+outfield/RMSZ
+outfight/SG
+outfit/MS
+outfitted
+outfitter/MS
+outfitting
+outflank/SGD
+outflow/SMDG
+outfought
+outfox/GSD
+outgeneraled
+outgoes
+outgo/GJ
+outgoing/P
+outgrew
+outgrip
+outgrow/GSH
+outgrown
+outgrowth/M
+outgrowths
+outguess/SDG
+outhit/S
+outhitting
+outhouse/SM
+outing/M
+outlaid
+outlander/M
+outlandishness/MS
+outlandish/PY
+outland/ZR
+outlast/GSD
+outlawry/M
+outlaw/SDMG
+outlay/GSM
+outlet/SM
+outliers
+outline/SDGM
+outlive/GSD
+outlook/MDGS
+outlying
+outmaneuver/GSD
+outmatch/SDG
+outmigration
+outmoded
+outness/M
+outnumber/GDS
+outpaced
+outpatient/SM
+outperform/DGS
+out/PJZGSDR
+outplacement/S
+outplay/GDS
+outpoint/GDS
+outpost/SM
+outpouring/M
+outpour/MJG
+outproduce/GSD
+output/SM
+outputted
+outputting
+outrace/GSD
+outrage/GSDM
+outrageousness/M
+outrageous/YP
+outran
+outrank/GSD
+outr
+outreach/SDG
+outrider/MS
+outrigger/SM
+outright/Y
+outrunning
+outrun/S
+outscore/GDS
+outsell/GS
+outset/MS
+outsetting
+outshine/SG
+outshone
+outshout/GDS
+outsider/PM
+outside/ZSR
+outsize/S
+outskirt/SM
+outsmart/SDG
+outsold
+outsource/SDJG
+outspend/SG
+outspent
+outspoke
+outspokenness/SM
+outspoken/YP
+outspread/SG
+outstanding/Y
+outstate/NX
+outstation/M
+outstay/SDG
+outstretch/GSD
+outstripped
+outstripping
+outstrip/S
+outtake/S
+outvote/GSD
+outwardness/M
+outward/SYP
+outwear/SG
+outweigh/GD
+outweighs
+outwit/S
+outwitted
+outwitting
+outwore
+outwork/SMDG
+outworn
+ouzo/SM
+oval/MYPS
+ovalness/M
+ova/M
+ovarian
+ovary/SM
+ovate/SDGNX
+ovation/GMD
+ovenbird/SM
+oven/MS
+overabundance/MS
+overabundant
+overachieve/SRDGZ
+overact/DGVS
+overage/S
+overaggressive
+overallocation
+overall/SM
+overambitious
+overanxious
+overarching
+overarm/GSD
+overate
+overattentive
+overawe/GDS
+overbalance/DSG
+overbear/GS
+overbearingness/M
+overbearing/YP
+overbidding
+overbid/S
+overbite/MS
+overblown
+overboard
+overbold
+overbook/SDG
+overbore
+overborne
+overbought
+overbuild/GS
+overbuilt
+overburdening/Y
+overburden/SDG
+overbuy/GS
+overcame
+overcapacity/M
+overcapitalize/DSG
+overcareful
+overcast/GS
+overcasting/M
+overcautious
+overcerebral
+overcharge/DSG
+overcloud/DSG
+overcoating/M
+overcoat/SMG
+overcomer/M
+overcome/RSG
+overcommitment/S
+overcompensate/XGNDS
+overcompensation/M
+overcomplexity/M
+overcomplicated
+overconfidence/MS
+overconfident/Y
+overconscientious
+overconsumption/M
+overcook/SDG
+overcooled
+overcorrection
+overcritical
+overcrowd/DGS
+overcurious
+overdecorate/SDG
+overdependent
+overdetermined
+overdevelop/SDG
+overdid
+overdoes
+overdo/G
+overdone
+overdose/DSMG
+overdraft/SM
+overdraw/GS
+overdrawn
+overdress/GDS
+overdrew
+overdrive/GSM
+overdriven
+overdrove
+overdubbed
+overdubbing
+overdub/S
+overdue
+overeagerness/M
+overeager/PY
+overeater/M
+overeat/GNRS
+overeducated
+overemotional
+overemphases
+overemphasis/M
+overemphasize/GZDSR
+overenthusiastic
+overestimate/DSXGN
+overestimation/M
+overexcite/DSG
+overexercise/SDG
+overexert/GDS
+overexertion/SM
+overexploitation
+overexploited
+overexpose/GDS
+overexposure/SM
+overextend/DSG
+overextension
+overfall/M
+overfed
+overfeed/GS
+overfill/GDS
+overfishing
+overflew
+overflight/SM
+overflow/DGS
+overflown
+overfly/GS
+overfond
+overfull
+overgeneralize/GDS
+overgenerous
+overgraze/SDG
+overgrew
+overground
+overgrow/GSH
+overgrown
+overgrowth/M
+overgrowths
+overhand/DGS
+overhang/GS
+overhasty
+overhaul/GRDJS
+overhead/S
+overheard
+overhearer/M
+overhear/SRG
+overheat/SGD
+overhung
+overincredulous
+overindulgence/SM
+overindulgent
+overindulge/SDG
+overinflated
+overjoy/SGD
+overkill/SDMG
+overladed
+overladen
+overlaid
+overlain
+overland/S
+overlap/MS
+overlapped
+overlapping
+overlarge
+overlay/GS
+overleaf
+overlie
+overload/SDG
+overlong
+overlook/DSG
+overlord/DMSG
+overloud
+overly/GRS
+overmanning
+overmaster/GSD
+overmatching
+overmodest
+overmuch/S
+overnice
+overnight/SDRGZ
+overoptimism/SM
+overoptimistic
+overpaid
+overparticular
+overpass/GMSD
+overpay/LSG
+overpayment/M
+overplay/SGD
+overpopulate/DSNGX
+overpopulation/M
+overpopulous
+overpower/GSD
+overpowering/Y
+overpraise/DSG
+overprecise
+overpressure
+overprice/SDG
+overprint/DGS
+overproduce/SDG
+overproduction/S
+overprotect/GVDS
+overprotection/M
+overqualified
+overran
+overrate/DSG
+overreach/DSRG
+overreaction/SM
+overreact/SGD
+overred
+overrefined
+overrepresented
+overridden
+overrider/M
+override/RSG
+overripe
+overrode
+overrule/GDS
+overrunning
+overrun/S
+oversample/DG
+oversaturate
+oversaw
+oversea/S
+overseeing
+overseen
+overseer/M
+oversee/ZRS
+oversell/SG
+oversensitiveness/S
+oversensitive/P
+oversensitivity
+oversexed
+overshadow/GSD
+overshoe/SM
+overshoot/SG
+overshot/S
+oversight/SM
+oversimple
+oversimplification/M
+oversimplify/GXNDS
+oversize/GS
+oversleep/GS
+overslept
+oversoftness/M
+oversoft/P
+oversold
+overspecialization/MS
+overspecialize/GSD
+overspend/SG
+overspent
+overspill/DMSG
+overspread/SG
+overstaffed
+overstatement/SM
+overstate/SDLG
+overstay/GSD
+overstepped
+overstepping
+overstep/S
+overstimulate/DSG
+overstock/SGD
+overstraining
+overstressed
+overstretch/D
+overstrict
+overstrike/GS
+overstrung
+overstuffed
+oversubscribe/SDG
+oversubtle
+oversupply/MDSG
+oversuspicious
+overtaken
+overtake/RSZG
+overtax/DSG
+overthrew
+overthrow/GS
+overthrown
+overtightened
+overtime/MGDS
+overtire/DSG
+overtone/MS
+overtook
+overt/PY
+overture/DSMG
+overturn/SDG
+overuse/DSG
+overvalue/GSD
+overview/MS
+overweening
+overweight/GSD
+overwhelm/GDS
+overwhelming/Y
+overwinter/SDG
+overwork/GSD
+overwrap
+overwrite/SG
+overwritten
+overwrote
+overwrought
+over/YGS
+overzealousness/M
+overzealous/P
+Ovid/M
+oviduct/SM
+oviform
+oviparous
+ovoid/S
+ovular
+ovulate/GNXDS
+ovulatory
+ovule/MS
+ovum/MS
+ow/DYG
+Owen/MS
+owe/S
+owlet/SM
+owl/GSMDR
+owlishness/M
+owlish/PY
+owned/U
+own/EGDS
+ownership/MS
+owner/SM
+oxalate/M
+oxalic
+oxaloacetic
+oxblood/S
+oxbow/SM
+oxcart/MS
+oxen/M
+oxford/MS
+Oxford/MS
+oxidant/SM
+oxidate/NVX
+oxidation/M
+oxidative/Y
+oxide/SM
+oxidization/MS
+oxidized/U
+oxidize/JDRSGZ
+oxidizer/M
+oxidizes/A
+ox/MNS
+Oxnard
+Oxonian
+oxtail/M
+Oxus/M
+oxyacetylene/MS
+oxygenate/XSDMGN
+oxygenation/M
+oxygen/MS
+oxyhydroxides
+oxymora
+oxymoron/M
+oyster/GSDM
+oystering/M
+oz
+Ozark/SM
+Oz/M
+ozone/SM
+Ozymandias/M
+Ozzie/M
+Ozzy/M
+P
+PA
+Pablo/M
+Pablum/M
+pablum/S
+Pabst/M
+pabulum/SM
+PAC
+pace/DRSMZG
+Pace/M
+pacemaker/SM
+pacer/M
+pacesetter/MS
+pacesetting
+Pacheco/M
+pachyderm/MS
+pachysandra/MS
+pacific
+pacifically
+pacification/M
+Pacific/M
+pacifier/M
+pacifism/MS
+pacifistic
+pacifist/MS
+pacify/NRSDGXZ
+package/ARSDG
+packaged/U
+packager/S
+package's
+packages/U
+packaging/SM
+Packard/SM
+packed/AU
+packer/MUS
+packet/MSDG
+pack/GZSJDRMB
+packhorse/M
+packinghouse/S
+packing/M
+packsaddle/SM
+Packston/M
+packs/UA
+Packwood/M
+Paco/M
+Pacorro/M
+pact/SM
+Padang/M
+padded/U
+Paddie/M
+padding/SM
+paddle/MZGRSD
+paddler/M
+paddock/SDMG
+Paddy/M
+paddy/SM
+Padget/M
+Padgett/M
+Padilla/M
+padlock/SGDM
+pad/MS
+Padraic/M
+Padraig/M
+padre/MS
+Padrewski/M
+Padriac/M
+paean/MS
+paediatrician/MS
+paediatrics/M
+paedophilia's
+paella/SM
+paeony/M
+Paganini/M
+paganism/MS
+pagan/SM
+pageantry/SM
+pageant/SM
+pageboy/SM
+paged/U
+pageful
+Page/M
+page/MZGDRS
+pager/M
+paginate/DSNGX
+Paglia/M
+pagoda/MS
+Pahlavi/M
+paid/AU
+Paige/M
+pailful/SM
+Pail/M
+pail/SM
+Paine/M
+painfuller
+painfullest
+painfulness/MS
+painful/YP
+pain/GSDM
+painkiller/MS
+painkilling
+painlessness/S
+painless/YP
+painstaking/SY
+paint/ADRZGS
+paintbox/M
+paintbrush/SM
+painted/U
+painterly/P
+painter/YM
+painting/SM
+paint's
+paintwork
+paired/UA
+pair/JSDMG
+pairs/A
+pairwise
+paisley/MS
+pajama/MDS
+Pakistani/S
+Pakistan/M
+palace/MS
+paladin/MS
+palaeolithic
+palaeontologists
+palaeontology/M
+palanquin/MS
+palatability/M
+palatableness/M
+palatable/P
+palatalization/MS
+palatalize/SDG
+palatal/YS
+palate/BMS
+palatial/Y
+palatinate/SM
+Palatine
+palatine/S
+palaver/GSDM
+paleface/SM
+Palembang/M
+paleness/S
+Paleocene
+Paleogene
+paleographer/SM
+paleography/SM
+paleolithic
+Paleolithic
+paleontologist/S
+paleontology/MS
+Paleozoic
+Palermo/M
+pale/SPY
+Palestine/M
+Palestinian/S
+Palestrina/M
+palette/MS
+Paley/M
+palfrey/MS
+palimony/S
+palimpsest/MS
+palindrome/MS
+palindromic
+paling/M
+palisade/MGSD
+Palisades/M
+palish
+Palladio/M
+palladium/SM
+pallbearer/SM
+palletized
+pallet/SMGD
+pall/GSMD
+palliate/SDVNGX
+palliation/M
+palliative/SY
+pallidness/MS
+pallid/PY
+Pall/M
+pallor/MS
+palmate
+palmer/M
+Palmer/M
+Palmerston/M
+palmetto/MS
+palm/GSMDR
+palmist/MS
+palmistry/MS
+Palm/MR
+Palmolive/M
+palmtop/S
+Palmyra/M
+palmy/RT
+Palo/M
+Paloma/M
+Palomar/M
+palomino/MS
+palpable
+palpably
+palpate/SDNGX
+palpation/M
+palpitate/NGXSD
+palpitation/M
+pal/SJMDRYTG
+palsy/GSDM
+paltriness/SM
+paltry/TRP
+paludal
+Pa/M
+Pamela/M
+Pamelina/M
+Pamella/M
+pa/MH
+Pamirs
+Pam/M
+Pammie/M
+Pammi/M
+Pammy/M
+pampas/M
+pamperer/M
+pamper/RDSG
+Pampers
+pamphleteer/DMSG
+pamphlet/SM
+panacea/MS
+panache/MS
+Panama/MS
+Panamanian/S
+panama/S
+pancake/MGSD
+Panchito/M
+Pancho/M
+panchromatic
+pancreas/MS
+pancreatic
+panda/SM
+pandemic/S
+pandemonium/SM
+pander/ZGRDS
+Pandora/M
+panegyric/SM
+pane/KMS
+paneling/M
+panelist/MS
+panelization
+panelized
+panel/JSGDM
+Pangaea/M
+pang/GDMS
+pangolin/M
+panhandle/RSDGMZ
+panicked
+panicking
+panicky/RT
+panic/SM
+panier's
+panjandrum/M
+Pankhurst/M
+Pan/M
+Panmunjom/M
+panned
+pannier/SM
+panning
+panoply/MSD
+panorama/MS
+panoramic
+panpipes
+Pansie/M
+pan/SMD
+Pansy/M
+pansy/SM
+Pantagruel/M
+Pantaloon/M
+pantaloons
+pant/GDS
+pantheism/MS
+pantheistic
+pantheist/S
+pantheon/MS
+panther/SM
+pantie/SM
+pantiled
+pantograph/M
+pantomime/SDGM
+pantomimic
+pantomimist/SM
+pantry/SM
+pantsuit/SM
+pantyhose
+pantyliner
+pantywaist/SM
+Panza/M
+Paola/M
+Paoli/M
+Paolina/M
+Paolo/M
+papacy/SM
+Papagena/M
+Papageno/M
+papal/Y
+papa/MS
+paparazzi
+papaw/SM
+papaya/MS
+paperback/GDMS
+paperboard/MS
+paperboy/SM
+paperer/M
+papergirl/SM
+paper/GJMRDZ
+paperhanger/SM
+paperhanging/SM
+paperiness/M
+paperless
+paperweight/MS
+paperwork/SM
+papery/P
+papillae
+papilla/M
+papillary
+papist/MS
+papoose/SM
+Pappas/M
+papped
+papping
+pappy/RST
+paprika/MS
+pap/SZMNR
+papyri
+papyrus/M
+Paquito/M
+parable/MGSD
+parabola/MS
+parabolic
+paraboloidal/M
+paraboloid/MS
+Paracelsus/M
+paracetamol/M
+parachuter/M
+parachute/RSDMG
+parachutist/MS
+Paraclete/M
+parader/M
+parade/RSDMZG
+paradigmatic
+paradigm/SM
+paradisaic
+paradisaical
+Paradise/M
+paradise/MS
+paradoxic
+paradoxicalness/M
+paradoxical/YP
+paradox/MS
+paraffin/GSMD
+paragon/SGDM
+paragrapher/M
+paragraph/MRDG
+paragraphs
+Paraguayan/S
+Paraguay/M
+parakeet/MS
+paralegal/S
+paralinguistic
+parallax/SM
+parallel/DSG
+paralleled/U
+parallelepiped/MS
+parallelism/SM
+parallelization/MS
+parallelize/ZGDSR
+parallelogram/MS
+paralysis/M
+paralytically
+paralytic/S
+paralyzedly/S
+paralyzed/Y
+paralyzer/M
+paralyze/ZGDRS
+paralyzingly/S
+paralyzing/Y
+paramagnetic
+paramagnet/M
+Paramaribo/M
+paramecia
+paramecium/M
+paramedical/S
+paramedic/MS
+parameterization/SM
+parameterize/BSDG
+parameterized/U
+parameterless
+parameter/SM
+parametric
+parametrically
+parametrization
+parametrize/DS
+paramilitary/S
+paramount/S
+paramour/MS
+para/MS
+Paramus/M
+Paran
+paranoiac/S
+paranoia/SM
+paranoid/S
+paranormal/SY
+parapet/SMD
+paraphernalia
+paraphrase/GMSRD
+paraphraser/M
+paraplegia/MS
+paraplegic/S
+paraprofessional/SM
+parapsychologist/S
+parapsychology/MS
+paraquat/S
+parasite/SM
+parasitically
+parasitic/S
+parasitism/SM
+parasitologist/M
+parasitology/M
+parasol/SM
+parasympathetic/S
+parathion/SM
+parathyroid/S
+paratrooper/M
+paratroop/RSZ
+paratyphoid/S
+parboil/DSG
+parceled/U
+parceling/M
+parcel/SGMD
+Parcheesi/M
+parch/GSDL
+parchment/SM
+PARC/M
+pardonableness/M
+pardonable/U
+pardonably/U
+pardoner/M
+pardon/ZBGRDS
+paregoric/SM
+parentage/MS
+parental/Y
+parenteral
+parentheses
+parenthesis/M
+parenthesize/GSD
+parenthetic
+parenthetical/Y
+parenthood/MS
+parent/MDGJS
+pare/S
+paresis/M
+pares/S
+Pareto/M
+parfait/SM
+pariah/M
+pariahs
+parietal/S
+parimutuel/S
+paring/M
+parishioner/SM
+parish/MS
+Parisian/SM
+Paris/M
+parity/ESM
+parka/MS
+Parke/M
+Parker/M
+Parkersburg/M
+park/GJZDRMS
+Parkhouse/M
+parking/M
+Parkinson/M
+parkish
+parkland/M
+parklike
+Parkman
+Park/RMS
+parkway/MS
+parlance/SM
+parlay/DGS
+parley/MDSG
+parliamentarian/SM
+parliamentary/U
+parliament/MS
+Parliament/MS
+parlor/SM
+parlous
+Parmesan/S
+parmigiana
+Parnassus/SM
+Parnell/M
+parochialism/SM
+parochiality
+parochial/Y
+parodied/U
+parodist/SM
+parody/SDGM
+parolee/MS
+parole/MSDG
+paroxysmal
+paroxysm/MS
+parquetry/SM
+parquet/SMDG
+parrakeet's
+parred
+parricidal
+parricide/MS
+parring
+Parrish/M
+Parr/M
+Parrnell/M
+parrot/GMDS
+parrotlike
+parry/GSD
+Parry/M
+parse
+parsec/SM
+parsed/U
+Parsee's
+parser/M
+Parsifal/M
+parsimonious/Y
+parsimony/SM
+pars/JDSRGZ
+parsley/MS
+parsnip/MS
+parsonage/MS
+parson/MS
+Parsons/M
+partaken
+partaker/M
+partake/ZGSR
+part/CDGS
+parterre/MS
+parter/S
+parthenogeneses
+parthenogenesis/M
+Parthenon/M
+Parthia/M
+partiality/MS
+partial/SY
+participant/MS
+participate/NGVDSX
+participation/M
+participator/S
+participatory
+participial/Y
+participle/MS
+particleboard/S
+particle/MS
+particolored
+particularistic
+particularity/SM
+particularization/MS
+particularize/GSD
+particular/SY
+particulate/S
+parting/MS
+partisanship/SM
+partisan/SM
+partition/AMRDGS
+partitioned/U
+partitioner/M
+partitive/S
+partizan's
+partly
+partner/DMGS
+partnership/SM
+partook
+partridge/MS
+part's
+parturition/SM
+partway
+party/RSDMG
+parvenu/SM
+par/ZGSJBMDR
+Pasadena/M
+PASCAL
+Pascale/M
+Pascal/M
+pascal/SM
+paschal/S
+pasha/MS
+Paso/M
+Pasquale/M
+pas/S
+passably
+passage/MGSD
+passageway/MS
+Passaic/M
+passband
+passbook/MS
+passel/MS
+pass/M
+passenger/MYS
+passerby
+passer/M
+passersby
+passim
+passing/Y
+passionated
+passionate/EYP
+passionateness/EM
+passionates
+passionating
+passioned
+passionflower/MS
+passioning
+passionless
+passion/SEM
+Passion/SM
+passivated
+passiveness/S
+passive/SYP
+passivity/S
+pass/JGVBZDSR
+passkey/SM
+passmark
+passover
+Passover/MS
+passport/SM
+password/SDM
+pasta/MS
+pasteboard/SM
+pasted/UA
+pastel/MS
+paste/MS
+Pasternak/M
+pastern/SM
+pasteup
+pasteurization/MS
+pasteurized/U
+pasteurizer/M
+pasteurize/RSDGZ
+Pasteur/M
+pastiche/MS
+pastille/SM
+pastime/SM
+pastiness/SM
+pastoralization/M
+pastoral/SPY
+pastorate/MS
+pastor/GSDM
+past/PGMDRS
+pastrami/MS
+pastry/SM
+past's/A
+pasts/A
+pasturage/SM
+pasture/MGSRD
+pasturer/M
+pasty/PTRS
+Patagonia/M
+Patagonian/S
+patch/EGRSD
+patcher/EM
+patchily
+patchiness/S
+patch's
+patchwork/RMSZ
+patchy/PRT
+patellae
+patella/MS
+Patel/M
+Pate/M
+paten/M
+Paten/M
+patentee/SM
+patent/ZGMRDYSB
+paterfamilias/SM
+pater/M
+paternalism/MS
+paternalist
+paternalistic
+paternal/Y
+paternity/SM
+paternoster/SM
+Paterson/M
+pate/SM
+pathetic
+pathetically
+pathfinder/MS
+pathless/P
+path/M
+pathname/SM
+pathogenesis/M
+pathogenic
+pathogen/SM
+pathologic
+pathological/Y
+pathologist/MS
+pathology/SM
+pathos/SM
+paths
+pathway/MS
+Patience/M
+patience/SM
+patient/MRYTS
+patient's/I
+patients/I
+patina/SM
+patine
+Patin/M
+patio/MS
+Pat/MN
+pat/MNDRS
+Patna/M
+patois/M
+Paton/M
+patresfamilias
+patriarchal
+patriarchate/MS
+patriarch/M
+patriarchs
+patriarchy/MS
+Patrica/M
+Patrice/M
+Patricia/M
+patrician/MS
+patricide/MS
+Patricio/M
+Patrick/M
+Patric/M
+patrimonial
+patrimony/SM
+patriotically
+patriotic/U
+patriotism/SM
+patriot/SM
+patristic/S
+Patrizia/M
+Patrizio/M
+Patrizius/M
+patrolled
+patrolling
+patrolman/M
+patrolmen
+patrol/MS
+patrolwoman
+patrolwomen
+patronage/MS
+patroness/S
+patronization
+patronized/U
+patronize/GZRSDJ
+patronizer/M
+patronizes/A
+patronizing's/U
+patronizing/YM
+patronymically
+patronymic/S
+patron/YMS
+patroon/MS
+patsy/SM
+Patsy/SM
+patted
+Patten/M
+patten/MS
+patterer/M
+pattern/GSDM
+patternless
+patter/RDSGJ
+Patterson/M
+Pattie/M
+Patti/M
+patting
+Pattin/M
+Patton/M
+Patty/M
+patty/SM
+paucity/SM
+Paula/M
+Paule/M
+Pauletta/M
+Paulette/M
+Paulie/M
+Pauli/M
+Paulina/M
+Pauline
+Pauling/M
+Paulita/M
+Paul/MG
+Paulo/M
+Paulsen/M
+Paulson/M
+Paulus/M
+Pauly/M
+paunch/GMSD
+paunchiness/M
+paunchy/RTP
+pauperism/SM
+pauperize/SDG
+pauper/SGDM
+pause/DSG
+Pavarotti
+paved/UA
+pave/GDRSJL
+Pavel/M
+pavement/SGDM
+paver/M
+paves/A
+Pavia/M
+pavilion/SMDG
+paving/A
+paving's
+Pavla/M
+Pavlova/MS
+Pavlovian
+Pavlov/M
+pawl/SM
+paw/MDSG
+pawnbroker/SM
+pawnbroking/S
+Pawnee/SM
+pawner/M
+pawn/GSDRM
+pawnshop/MS
+pawpaw's
+Pawtucket/M
+paxes
+Paxon/M
+Paxton/M
+payable/S
+pay/AGSLB
+payback/S
+paycheck/SM
+payday/MS
+payed
+payee/SM
+payer/SM
+payload/SM
+paymaster/SM
+payment/ASM
+Payne/SM
+payoff/MS
+payola/MS
+payout/S
+payroll/MS
+payslip/S
+Payson/M
+Payton/M
+Paz/M
+Pb/M
+PBS
+PBX
+PCB
+PC/M
+PCP
+PCs
+pct
+pd
+PD
+Pd/M
+PDP
+PDQ
+PDT
+PE
+Peabody/M
+peaceableness/M
+peaceable/P
+peaceably
+peacefuller
+peacefullest
+peacefulness/S
+peaceful/PY
+peace/GMDS
+peacekeeping/S
+Peace/M
+peacemaker/MS
+peacemaking/MS
+peacetime/MS
+peach/GSDM
+Peachtree/M
+peachy/RT
+peacock/SGMD
+Peadar/M
+peafowl/SM
+peahen/MS
+peaked/P
+peakiness/M
+peak/SGDM
+peaky/P
+pealed/A
+Peale/M
+peal/MDSG
+peals/A
+pea/MS
+peanut/SM
+Pearce/M
+Pearla/M
+Pearle/M
+pearler/M
+Pearlie/M
+Pearline/M
+Pearl/M
+pearl/SGRDM
+pearly/TRS
+Pearson/M
+pear/SYM
+peartrees
+Peary/M
+peasanthood
+peasantry/SM
+peasant/SM
+peashooter/MS
+peats/A
+peat/SM
+peaty/TR
+pebble/MGSD
+pebbling/M
+pebbly/TR
+Pebrook/M
+pecan/SM
+peccadilloes
+peccadillo/M
+peccary/MS
+Pechora/M
+pecker/M
+peck/GZSDRM
+Peckinpah/M
+Peck/M
+Pecos/M
+pectic
+pectin/SM
+pectoral/S
+peculate/NGDSX
+peculator/S
+peculiarity/MS
+peculiar/SY
+pecuniary
+pedagogical/Y
+pedagogic/S
+pedagogics/M
+pedagogue/SDGM
+pedagogy/MS
+pedal/SGRDM
+pedantic
+pedantically
+pedantry/MS
+pedant/SM
+peddler/M
+peddle/ZGRSD
+pederast/SM
+pederasty/SM
+Peder/M
+pedestal/GDMS
+pedestrianization
+pedestrianize/GSD
+pedestrian/MS
+pediatrician/SM
+pediatric/S
+pedicab/SM
+pedicure/DSMG
+pedicurist/SM
+pedigree/DSM
+pediment/DMS
+pedlar's
+pedometer/MS
+pedophile/S
+pedophilia
+Pedro/M
+peduncle/MS
+peeing
+peekaboo/SM
+peek/GSD
+peeler/M
+peeling/M
+Peel/M
+peel/SJGZDR
+peen/GSDM
+peeper/M
+peephole/SM
+peep/SGZDR
+peepshow/MS
+peepy
+peerage/MS
+peer/DMG
+peeress/MS
+peerlessness/M
+peerless/PY
+peeve/GZMDS
+peevers/M
+peevishness/SM
+peevish/YP
+peewee/S
+pee/ZDRS
+Pegasus/MS
+pegboard/SM
+Pegeen/M
+pegged
+Peggie/M
+Peggi/M
+pegging
+Peggy/M
+Peg/M
+peg/MS
+peignoir/SM
+Pei/M
+Peiping/M
+Peirce/M
+pejoration/SM
+pejorative/SY
+peke/MS
+Pekinese's
+pekingese
+Pekingese/SM
+Peking/SM
+pekoe/SM
+pelagic
+Pelee/M
+Pele/M
+pelf/SM
+Pelham/M
+pelican/SM
+pellagra/SM
+pellet/SGMD
+pellucid
+Peloponnese/M
+pelter/M
+pelt/GSDR
+pelvic/S
+pelvis/SM
+Pembroke/M
+pemmican/SM
+penalization/SM
+penalized/U
+penalize/SDG
+penalty/MS
+penal/Y
+Pena/M
+penance/SDMG
+pence/M
+penchant/MS
+pencil/SGJMD
+pendant/SM
+pend/DCGS
+pendent/CS
+Penderecki/M
+Pendleton/M
+pendulous
+pendulum/MS
+Penelopa/M
+Penelope/M
+penetrability/SM
+penetrable
+penetrate/SDVGNX
+penetrating/Y
+penetration/M
+penetrativeness/M
+penetrative/PY
+penetrator/MS
+penguin/MS
+penicillin/SM
+penile
+peninsular
+peninsula/SM
+penis/MS
+penitence/MS
+penitential/YS
+penitentiary/MS
+penitent/SY
+penknife/M
+penknives
+penlight/MS
+pen/M
+Pen/M
+penman/M
+penmanship/MS
+penmen
+Penna
+pennant/SM
+penned
+Penney/M
+Pennie/M
+penniless
+Penni/M
+penning
+Pennington/M
+pennis
+Penn/M
+pennon/SM
+Pennsylvania/M
+Pennsylvanian/S
+Penny/M
+penny/SM
+pennyweight/SM
+pennyworth/M
+penologist/MS
+penology/MS
+Penrod/M
+Pensacola/M
+pensioner/M
+pension/ZGMRDBS
+pensiveness/S
+pensive/PY
+pens/V
+pentacle/MS
+pentagonal/SY
+Pentagon/M
+pentagon/SM
+pentagram/MS
+pentameter/SM
+pent/AS
+Pentateuch/M
+pentathlete/S
+pentathlon/MS
+pentatonic
+pentecostal
+Pentecostalism/S
+Pentecostal/S
+Pentecost/SM
+penthouse/SDGM
+Pentium/M
+penuche/SM
+penultimate/SY
+penumbrae
+penumbra/MS
+penuriousness/MS
+penurious/YP
+penury/SM
+peonage/MS
+peon/MS
+peony/SM
+people/SDMG
+Peoria/M
+Pepe/M
+Pepillo/M
+Pepi/M
+Pepin/M
+Pepita/M
+Pepito/M
+pepped
+peppercorn/MS
+pepperer/M
+peppergrass/M
+peppermint/MS
+pepperoni/S
+pepper/SGRDM
+peppery
+peppiness/SM
+pepping
+peppy/PRT
+Pepsico/M
+PepsiCo/M
+Pepsi/M
+pepsin/SM
+pep/SM
+peptic/S
+peptidase/SM
+peptide/SM
+peptizing
+Pepys/M
+Pequot/M
+peradventure/S
+perambulate/DSNGX
+perambulation/M
+perambulator/MS
+percale/MS
+perceivably
+perceive/DRSZGB
+perceived/U
+perceiver/M
+percentage/MS
+percentile/SM
+percent/MS
+perceptible
+perceptibly
+perceptional
+perception/MS
+perceptiveness/MS
+perceptive/YP
+perceptual/Y
+percept/VMS
+Perceval/M
+perchance
+perch/GSDM
+perchlorate/M
+perchlorination
+percipience/MS
+percipient/S
+Percival/M
+percolate/NGSDX
+percolation/M
+percolator/MS
+percuss/DSGV
+percussionist/MS
+percussion/SAM
+percussiveness/M
+percussive/PY
+percutaneous/Y
+Percy/M
+perdition/MS
+perdurable
+peregrinate/XSDNG
+peregrination/M
+peregrine/S
+Perelman/M
+peremptorily
+peremptory/P
+perennial/SY
+pres
+perestroika/S
+Perez/M
+perfecta/S
+perfect/DRYSTGVP
+perfecter/M
+perfectibility/MS
+perfectible
+perfectionism/MS
+perfectionist/MS
+perfection/MS
+perfectiveness/M
+perfective/PY
+perfectness/MS
+perfidiousness/M
+perfidious/YP
+perfidy/MS
+perforated/U
+perforate/XSDGN
+perforation/M
+perforce
+performance/MS
+performed/U
+performer/M
+perform/SDRZGB
+perfumer/M
+perfumery/SM
+perfume/ZMGSRD
+perfunctorily
+perfunctoriness/M
+perfunctory/P
+perfused
+perfusion/M
+Pergamon/M
+pergola/SM
+perhaps/S
+Peria/M
+pericardia
+pericardium/M
+Perice/M
+Periclean
+Pericles/M
+perigee/SM
+perihelia
+perihelion/M
+peril/GSDM
+Perilla/M
+perilousness/M
+perilous/PY
+Peri/M
+perimeter/MS
+perinatal
+perinea
+perineum/M
+periodic
+periodical/YMS
+periodicity/MS
+period/MS
+periodontal/Y
+periodontics/M
+periodontist/S
+peripatetic/S
+peripheral/SY
+periphery/SM
+periphrases
+periphrasis/M
+periphrastic
+periscope/SDMG
+perishable/SM
+perish/BZGSRD
+perishing/Y
+peristalses
+peristalsis/M
+peristaltic
+peristyle/MS
+peritoneal
+peritoneum/SM
+peritonitis/MS
+periwigged
+periwigging
+periwig/MS
+periwinkle/SM
+perjurer/M
+perjure/SRDZG
+perjury/MS
+per/K
+perk/GDS
+perkily
+perkiness/S
+Perkin/SM
+perky/TRP
+Perla/M
+Perle/M
+Perl/M
+permafrost/MS
+permalloy/M
+Permalloy/M
+permanence/SM
+permanency/MS
+permanentness/M
+permanent/YSP
+permeability/SM
+permeableness/M
+permeable/P
+permeate/NGVDSX
+Permian
+permissibility/M
+permissibleness/M
+permissible/P
+permissibly
+permission/SM
+permissiveness/MS
+permissive/YP
+permit/SM
+permitted
+permitting
+Perm/M
+perm/MDGS
+permutation/MS
+permute/SDG
+Pernell/M
+perniciousness/MS
+pernicious/PY
+Pernod/M
+Peron/M
+peroration/SM
+Perot/M
+peroxidase/M
+peroxide/MGDS
+perpend/DG
+perpendicularity/SM
+perpendicular/SY
+perpetrate/NGXSD
+perpetration/M
+perpetrator/SM
+perpetual/SY
+perpetuate/NGSDX
+perpetuation/M
+perpetuity/MS
+perplex/DSG
+perplexed/Y
+perplexity/MS
+perquisite/SM
+Perren/M
+Perri/M
+Perrine/M
+Perry/MR
+persecute/XVNGSD
+persecution/M
+persecutor/MS
+persecutory
+Perseid/M
+Persephone/M
+Perseus/M
+perseverance/MS
+persevere/GSD
+persevering/Y
+Pershing/M
+Persia/M
+Persian/S
+persiflage/MS
+persimmon/SM
+Persis/M
+persist/DRSG
+persistence/SM
+persistent/Y
+persnickety
+personableness/M
+personable/P
+personae
+personage/SM
+personality/SM
+personalization/CMS
+personalize/CSDG
+personalized/U
+personalty/MS
+personal/YS
+persona/M
+person/BMS
+personification/M
+personifier/M
+personify/XNGDRS
+personnel/SM
+person's/U
+persons/U
+perspective/YMS
+perspex
+perspicaciousness/M
+perspicacious/PY
+perspicacity/S
+perspicuity/SM
+perspicuousness/M
+perspicuous/YP
+perspiration/MS
+perspire/DSG
+persuaded/U
+persuader/M
+persuade/ZGDRSB
+persuasion/SM
+persuasively
+persuasiveness/MS
+persuasive/U
+pertain/GSD
+Perth/M
+pertinaciousness/M
+pertinacious/YP
+pertinacity/MS
+pertinence/S
+pertinent/YS
+pertness/MS
+perturbation/MS
+perturbed/U
+perturb/GDS
+pertussis/SM
+pert/YRTSP
+peruke/SM
+Peru/M
+perusal/SM
+peruser/M
+peruse/RSDZG
+Peruvian/S
+pervade/SDG
+pervasion/M
+pervasiveness/MS
+pervasive/PY
+perverseness/SM
+perverse/PXYNV
+perversion/M
+perversity/MS
+pervert/DRSG
+perverted/YP
+perverter/M
+perviousness
+peseta/SM
+Peshawar/M
+peskily
+peskiness/S
+pesky/RTP
+peso/MS
+pessimal/Y
+pessimism/SM
+pessimistic
+pessimistically
+pessimist/SM
+pester/DG
+pesticide/MS
+pestiferous
+pestilence/SM
+pestilential/Y
+pestilent/Y
+pestle/SDMG
+pesto/S
+pest/RZSM
+PET
+Ptain/M
+petal/SDM
+Peta/M
+petard/MS
+petcock/SM
+Pete/M
+peter/GD
+Peter/M
+Petersburg/M
+Petersen/M
+Peters/N
+Peterson/M
+Peterus/M
+Petey/M
+pethidine/M
+petiole/SM
+petiteness/M
+petite/XNPS
+petitioner/M
+petition/GZMRD
+petition's/A
+petitions/A
+petits
+Petkiewicz/M
+Pet/MRZ
+Petra/M
+Petrarch/M
+petrel/SM
+petri
+petrifaction/SM
+petrify/NDSG
+Petrina/M
+Petr/M
+petrochemical/SM
+petrodollar/MS
+petroglyph/M
+petrolatum/MS
+petroleum/MS
+petrolled
+petrolling
+petrol/MS
+petrologist/MS
+petrology/MS
+Petronella/M
+Petronia/M
+Petronilla/M
+Petronille/M
+pet/SMRZ
+petted
+petter/MS
+Pettibone/M
+petticoat/SMD
+pettifogged
+pettifogger/SM
+pettifogging
+pettifog/S
+pettily
+pettiness/S
+petting
+pettis
+pettishness/M
+pettish/YP
+Petty/M
+petty/PRST
+petulance/MS
+petulant/Y
+Petunia/M
+petunia/SM
+Peugeot/M
+Pewaukee/M
+pewee/MS
+pewit/MS
+pew/SM
+pewter/SRM
+peyote/SM
+Peyter/M
+Peyton/M
+pf
+Pfc
+PFC
+pfennig/SM
+Pfizer/M
+pg
+PG
+Phaedra/M
+Phaethon/M
+phaeton/MS
+phage/M
+phagocyte/SM
+Phaidra/M
+phalanger/MS
+phalanges
+phalanx/SM
+phalli
+phallic
+phallus/M
+Phanerozoic
+phantasmagoria/SM
+phantasmal
+phantasm/SM
+phantasy's
+phantom/MS
+pharaoh
+Pharaoh/M
+pharaohs
+Pharaohs
+pharisaic
+Pharisaic
+Pharisaical
+pharisee/S
+Pharisee/SM
+pharmaceutical/SY
+pharmaceutic/S
+pharmaceutics/M
+pharmacist/SM
+pharmacological/Y
+pharmacologist/SM
+pharmacology/SM
+pharmacopoeia/SM
+pharmacy/SM
+pharyngeal/S
+pharynges
+pharyngitides
+pharyngitis/M
+pharynx/M
+phase/DSRGZM
+phaseout/S
+PhD
+pheasant/SM
+Phebe/M
+Phedra/M
+Phekda/M
+Phelia/M
+Phelps/M
+phenacetin/MS
+phenobarbital/SM
+phenolic
+phenol/MS
+phenolphthalein/M
+phenomenal/Y
+phenomena/SM
+phenomenological/Y
+phenomenology/MS
+phenomenon/SM
+phenotype/MS
+phenylalanine/M
+phenyl/M
+pheromone/MS
+phew/S
+phialled
+phialling
+phial/MS
+Phidias/M
+Philadelphia/M
+philanderer/M
+philander/SRDGZ
+philanthropic
+philanthropically
+philanthropist/MS
+philanthropy/SM
+philatelic
+philatelist/MS
+philately/SM
+Philbert/M
+Philco/M
+philharmonic/S
+Philipa/M
+Philip/M
+Philippa/M
+Philippe/M
+Philippians/M
+philippic/SM
+Philippine/SM
+Philis/M
+philistine/S
+Philistine/SM
+philistinism/S
+Phillida/M
+Phillie/M
+Phillipa/M
+Phillipe/M
+Phillip/MS
+Phillipp/M
+Phillis/M
+Philly/SM
+Phil/MY
+philodendron/MS
+philological/Y
+philologist/MS
+philology/MS
+Philomena/M
+philosopher/MS
+philosophic
+philosophical/Y
+philosophized/U
+philosophizer/M
+philosophizes/U
+philosophize/ZDRSG
+philosophy/MS
+philter/SGDM
+philtre/DSMG
+Phineas/M
+Phip/M
+Phipps/M
+phi/SM
+phlebitides
+phlebitis/M
+phlegmatic
+phlegmatically
+phlegm/SM
+phloem/MS
+phlox/M
+pH/M
+Ph/M
+phobia/SM
+phobic/S
+Phobos/M
+Phoebe/M
+phoebe/SM
+Phoenicia/M
+Phoenician/SM
+Phoenix/M
+phoenix/MS
+phone/DSGM
+phoneme/SM
+phonemically
+phonemic/S
+phonemics/M
+phonetically
+phonetician/SM
+phonetic/S
+phonetics/M
+phonically
+phonic/S
+phonics/M
+phoniness/MS
+phonographer/M
+phonographic
+phonograph/RM
+phonographs
+phonologic
+phonological/Y
+phonologist/MS
+phonology/MS
+phonon/M
+phony/PTRSDG
+phooey/S
+phosphatase/M
+phosphate/MS
+phosphide/M
+phosphine/MS
+phosphoresce
+phosphorescence/SM
+phosphorescent/Y
+phosphoric
+phosphor/MS
+phosphorous
+phosphorus/SM
+photocell/MS
+photochemical/Y
+photochemistry/M
+photocopier/M
+photocopy/MRSDZG
+photoelectric
+photoelectrically
+photoelectronic
+photoelectrons
+photoengraver/M
+photoengrave/RSDJZG
+photoengraving/M
+photofinishing/MS
+photogenic
+photogenically
+photograph/AGD
+photographer/SM
+photographic
+photographically
+photograph's
+photographs/A
+photography/MS
+photojournalism/SM
+photojournalist/SM
+photoluminescence/M
+photolysis/M
+photolytic
+photometer/SM
+photometric
+photometrically
+photometry/M
+photomicrograph/M
+photomicrography/M
+photomultiplier/M
+photon/MS
+photorealism
+photosensitive
+photo/SGMD
+photosphere/M
+photostatic
+Photostat/MS
+Photostatted
+Photostatting
+photosyntheses
+photosynthesis/M
+photosynthesize/DSG
+photosynthetic
+phototypesetter
+phototypesetting/M
+phrasal
+phrase/AGDS
+phrasebook
+phrasemaking
+phraseology/MS
+phrase's
+phrasing/SM
+phrenological/Y
+phrenologist/MS
+phrenology/MS
+phylactery/MS
+phylae
+phyla/M
+Phylis/M
+Phyllida/M
+Phyllis/M
+Phyllys/M
+phylogeny/MS
+phylum/M
+Phylys/M
+phys
+physicality/M
+physical/PYS
+physician/SM
+physicist/MS
+physicked
+physicking
+physic/SM
+physiochemical
+physiognomy/SM
+physiography/MS
+physiologic
+physiological/Y
+physiologist/SM
+physiology/MS
+physiotherapist/MS
+physiotherapy/SM
+physique/MSD
+phytoplankton/M
+Piaf/M
+Piaget/M
+Pia/M
+pianism/M
+pianissimo/S
+pianistic
+pianist/SM
+pianoforte/MS
+pianola
+Pianola/M
+piano/SM
+piaster/MS
+piazza/SM
+pibroch/M
+pibrochs
+picador/MS
+picaresque/S
+pica/SM
+Picasso/M
+picayune/S
+Piccadilly/M
+piccalilli/MS
+piccolo/MS
+pickaback's
+pickaxe's
+pickax/GMSD
+pickerel/MS
+Pickering/M
+picker/MG
+picketer/M
+picket/MSRDZG
+Pickett/M
+Pickford/M
+pick/GZSJDR
+pickle/SDMG
+Pickman/M
+pickoff/S
+pickpocket/GSM
+pickup/SM
+Pickwick/M
+picky/RT
+picnicked
+picnicker/MS
+picnicking
+picnic/SM
+picofarad/MS
+picojoule
+picoseconds
+picot/DMGS
+Pict/M
+pictograph/M
+pictographs
+pictorialness/M
+pictorial/PYS
+picture/MGSD
+picturesqueness/SM
+picturesque/PY
+piddle/GSD
+piddly
+pidgin/SM
+piebald/S
+piece/GMDSR
+piecemeal
+piecer/M
+piecewise
+pieceworker/M
+piecework/ZSMR
+piedmont
+Piedmont/M
+pieing
+pie/MS
+Pierce/M
+piercer/M
+pierce/RSDZGJ
+piercing/Y
+Pierette/M
+pier/M
+Pier/M
+Pierre/M
+Pierrette/M
+Pierrot/M
+Pierson/M
+Pieter/M
+Pietra/M
+Pietrek/M
+Pietro/M
+piety/SM
+piezoelectric
+piezoelectricity/M
+piffle/MGSD
+pigeon/DMGS
+pigeonhole/SDGM
+pigged
+piggery/M
+pigging
+piggishness/SM
+piggish/YP
+piggyback/MSDG
+Piggy/M
+piggy/RSMT
+pigheadedness/S
+pigheaded/YP
+piglet/MS
+pigmentation/MS
+pigment/MDSG
+pig/MLS
+Pigmy's
+pigpen/SM
+pigroot
+pigskin/MS
+pigsty/SM
+pigswill/M
+pigtail/SMD
+Pike/M
+pike/MZGDRS
+piker/M
+pikestaff/MS
+pilaf/MS
+pilaster/SM
+Pilate/M
+pilau's
+pilchard/SM
+Pilcomayo/M
+pile/JDSMZG
+pileup/MS
+pilferage/SM
+pilferer/M
+pilfer/ZGSRD
+Pilgrim
+pilgrimage/DSGM
+pilgrim/MS
+piling/M
+pillage/RSDZG
+pillar/DMSG
+pillbox/MS
+pill/GSMD
+pillion/DMGS
+pillory/MSDG
+pillowcase/SM
+pillow/GDMS
+pillowslip/S
+Pillsbury/M
+pilot/DMGS
+pilothouse/SM
+piloting/M
+pimento/MS
+pimiento/SM
+pimpernel/SM
+pimp/GSMYD
+pimple/SDM
+pimplike
+pimply/TRM
+PIN
+pinafore/MS
+piata/S
+Pinatubo/M
+pinball/MS
+Pincas/M
+pincer/GSD
+Pinchas/M
+pincher/M
+pinch/GRSD
+pincushion/SM
+Pincus/M
+Pindar/M
+pineapple/MS
+pined/A
+Pinehurst/M
+pine/MNGXDS
+pines/A
+pinfeather/SM
+ping/GDRM
+pinheaded/P
+pinhead/SMD
+pinhole/SM
+pining/A
+pinion/DMG
+Pinkerton/M
+pinkeye/MS
+pink/GTYDRMPS
+pinkie/SM
+pinkish/P
+pinkness/S
+pinko/MS
+pinky's
+pinnacle/MGSD
+pinnate
+pinned/U
+pinning/S
+Pinocchio/M
+Pinochet/M
+pinochle/SM
+pion/S
+pinpoint/SDG
+pinprick/MDSG
+pin's
+pinsetter/SM
+Pinsky/M
+pinstripe/SDM
+pintail/SM
+Pinter/M
+pint/MRS
+pinto/S
+pinup/MS
+pin/US
+pinwheel/DMGS
+pinyin
+Pinyin
+piny/RT
+pioneer/SDMG
+pion/M
+Piotr/M
+piousness/MS
+pious/YP
+pipeline/DSMG
+pipe/MS
+piper/M
+Piper/M
+Pipestone/M
+pipet's
+pipette/MGSD
+pipework
+piping/YM
+pipit/MS
+pip/JSZMGDR
+Pip/MR
+Pippa/M
+pipped
+pipping
+pippin/SM
+Pippo/M
+Pippy/M
+pipsqueak/SM
+piquancy/MS
+piquantness/M
+piquant/PY
+pique/GMDS
+piracy/MS
+Piraeus/M
+Pirandello/M
+piranha/SM
+pirate/MGSD
+piratical/Y
+pirogi
+pirogies
+pirouette/MGSD
+pis
+Pisa/M
+piscatorial
+Pisces/M
+Pisistratus/M
+pismire/SM
+Pissaro/M
+piss/DSRG!
+pistachio/MS
+piste/SM
+pistillate
+pistil/MS
+pistoleers
+pistole/M
+pistol/SMGD
+piston/SM
+pitapat/S
+pitapatted
+pitapatting
+pita/SM
+Pitcairn/M
+pitchblende/SM
+pitcher/M
+pitchfork/GDMS
+pitching/M
+pitchman/M
+pitchmen
+pitch/RSDZG
+pitchstone/M
+piteousness/SM
+piteous/YP
+pitfall/SM
+pithily
+pithiness/SM
+pith/MGDS
+piths
+pithy/RTP
+pitiableness/M
+pitiable/P
+pitiably
+pitier/M
+pitifuller
+pitifullest
+pitifulness/M
+pitiful/PY
+pitilessness/SM
+pitiless/PY
+pitman/M
+pit/MS
+Pitney/M
+piton/SM
+pittance/SM
+pitted
+pitting
+Pittman/M
+Pittsburgh/ZM
+Pittsfield/M
+Pitt/SM
+Pittston/M
+pituitary/SM
+pitying/Y
+pity/ZDSRMG
+Pius/M
+pivotal/Y
+pivot/DMSG
+pivoting/M
+pix/DSG
+pixel/SM
+pixie/MS
+pixiness
+pixmap/SM
+Pizarro/M
+pizazz/S
+pi/ZGDRH
+pizza/SM
+pizzeria/SM
+pizzicati
+pizzicato
+pj's
+PJ's
+pk
+pkg
+pkt
+pkwy
+Pkwy
+pl
+placard/DSMG
+placate/NGVXDRS
+placatory
+placeable/A
+placebo/SM
+placed/EAU
+place/DSRJLGZM
+placeholder/S
+placekick/DGS
+placeless/Y
+placement/AMES
+placental/S
+placenta/SM
+placer/EM
+places/EA
+placidity/SM
+placidness/M
+placid/PY
+placing/AE
+placket/SM
+plagiarism/MS
+plagiarist/MS
+plagiarize/GZDSR
+plagiary/SM
+plagued/U
+plague/MGRSD
+plaguer/M
+plaice/M
+plaid/DMSG
+plainclothes
+plainclothesman
+plainclothesmen
+Plainfield/M
+plainness/MS
+plainsman/M
+plainsmen
+plainsong/SM
+plainspoken
+plain/SPTGRDY
+plaintiff/MS
+plaintiveness/M
+plaintive/YP
+plaint/VMS
+Plainview/M
+plaiting/M
+plait/SRDMG
+planar
+planarity
+Planck/M
+plan/DRMSGZ
+planeload
+planer/M
+plane's
+plane/SCGD
+planetarium/MS
+planetary
+planetesimal/M
+planet/MS
+planetoid/SM
+plangency/S
+plangent
+planking/M
+plank/SJMDG
+plankton/MS
+planned/U
+planner/SM
+planning
+Plano
+planoconcave
+planoconvex
+Plantagenet/M
+plantain/MS
+plantar
+plantation/MS
+planter/MS
+planting/S
+plantlike
+plant's
+plant/SADG
+plaque/MS
+plash/GSDM
+plasma/MS
+plasmid/S
+plasm/M
+plasterboard/MS
+plasterer/M
+plastering/M
+plaster/MDRSZG
+plasterwork/M
+plastically
+plasticine
+Plasticine/M
+plasticity/SM
+plasticize/GDS
+plastic/MYS
+plateau/GDMS
+plateful/S
+platelet/SM
+platen/M
+plater/M
+plate/SM
+platform/SGDM
+Plath/M
+plating/M
+platinize/GSD
+platinum/MS
+platitude/SM
+platitudinous/Y
+plat/JDNRSGXZ
+Plato/M
+platonic
+Platonic
+Platonism/M
+Platonist
+platoon/MDSG
+platted
+Platte/M
+platter/MS
+Platteville/M
+platting
+platypus/MS
+platys
+platy/TR
+plaudit/MS
+plausibility/S
+plausible/P
+plausibly
+Plautus/M
+playability/U
+playable/U
+playacting/M
+playact/SJDG
+playback/MS
+playbill/SM
+Playboy/M
+playboy/SM
+play/DRSEBG
+played/A
+player's/E
+player/SM
+playfellow/S
+playfulness/MS
+playful/PY
+playgirl/SM
+playgoer/MS
+playground/MS
+playgroup/S
+playhouse/SM
+playing/S
+playmate/MS
+playoff/S
+playpen/SM
+playroom/SM
+plays/A
+Playtex/M
+plaything/MS
+playtime/SM
+playwright/SM
+playwriting/M
+plaza/SM
+pleader/MA
+pleading/MY
+plead/ZGJRDS
+pleasanter
+pleasantest
+pleasantness/SMU
+pleasantry/MS
+pleasant/UYP
+pleased/EU
+pleaser/M
+pleases/E
+please/Y
+pleasingness/M
+pleasing/YP
+plea/SM
+pleas/RSDJG
+pleasurableness/M
+pleasurable/P
+pleasurably
+pleasureful
+pleasure/MGBDS
+pleasure's/E
+pleasures/E
+pleater/M
+pleat/RDMGS
+plebeian/SY
+plebe/MS
+plebiscite/SM
+plectra
+plectrum/SM
+pledger/M
+pledge/RSDMG
+Pleiads
+Pleistocene
+plenary/S
+plenipotentiary/S
+plenitude/MS
+plenteousness/M
+plenteous/PY
+plentifulness/M
+plentiful/YP
+plenty/SM
+plenum/M
+pleonasm/MS
+plethora/SM
+pleurae
+pleural
+pleura/M
+pleurisy/SM
+Plexiglas/MS
+plexus/SM
+pliability/MS
+pliableness/M
+pliable/P
+pliancy/MS
+pliantness/M
+pliant/YP
+plication/MA
+plier/MA
+plight/GMDRS
+plimsolls
+plinker/M
+plink/GRDS
+plinth/M
+plinths
+Pliny/M
+Pliocene/S
+PLO
+plodded
+plodder/SM
+plodding/SY
+plod/S
+plopped
+plopping
+plop/SM
+plosive
+plot/SM
+plotted/A
+plotter/MDSG
+plotting
+plover/MS
+plowed/U
+plower/M
+plowman/M
+plowmen
+plow/SGZDRM
+plowshare/MS
+ploy's
+ploy/SCDG
+plucker/M
+pluckily
+pluckiness/SM
+pluck/SGRD
+plucky/TPR
+pluggable
+plugged/UA
+plugging/AU
+plughole
+plug's
+plug/US
+plumage/DSM
+plumbago/M
+plumbed/U
+plumber/M
+plumbing/M
+plumb/JSZGMRD
+plume/SM
+plummer
+plummest
+plummet/DSG
+plummy
+plumper/M
+plumpness/S
+plump/RDNYSTGP
+plum/SMDG
+plumy/TR
+plunder/GDRSZ
+plunger/M
+plunge/RSDZG
+plunker/M
+plunk/ZGSRD
+pluperfect/S
+pluralism/MS
+pluralistic
+pluralist/S
+plurality/SM
+pluralization/MS
+pluralize/GZRSD
+pluralizer/M
+plural/SY
+plushness/MS
+plush/RSYMTP
+plushy/RPT
+plus/S
+plussed
+plussing
+Plutarch/M
+plutocracy/MS
+plutocratic
+plutocrat/SM
+Pluto/M
+plutonium/SM
+pluvial/S
+ply/AZNGRSD
+Plymouth/M
+plywood/MS
+pm
+PM
+Pm/M
+PMS
+pneumatically
+pneumatic/S
+pneumatics/M
+pneumonia/MS
+PO
+poacher/M
+poach/ZGSRD
+Pocahontas/M
+pocketbook/SM
+pocketful/SM
+pocketing/M
+pocketknife/M
+pocketknives
+pocket/MSRDG
+pock/GDMS
+pockmark/MDSG
+Pocono/MS
+podded
+podding
+podge/ZR
+Podgorica/M
+podiatrist/MS
+podiatry/MS
+podium/MS
+pod/SM
+Podunk/M
+Poe/M
+poem/MS
+poesy/GSDM
+poetaster/MS
+poetess/MS
+poetically
+poeticalness
+poetical/U
+poetic/S
+poetics/M
+poet/MS
+poetry/SM
+pogo
+Pogo/M
+pogrom/GMDS
+poignancy/MS
+poignant/Y
+Poincar/M
+poinciana/SM
+Poindexter/M
+poinsettia/SM
+pointblank
+pointedness/M
+pointed/PY
+pointer/M
+pointillism/SM
+pointillist/SM
+pointing/M
+pointlessness/SM
+pointless/YP
+point/RDMZGS
+pointy/TR
+poise/M
+pois/GDS
+poi/SM
+poisoner/M
+poisoning/M
+poisonous/PY
+poison/RDMZGSJ
+Poisson/M
+poke/DRSZG
+Pokemon/M
+pokerface/D
+poker/M
+poky/SRT
+Poland/M
+Polanski/M
+polarimeter/SM
+polarimetry
+polariscope/M
+Polaris/M
+polarity/MS
+polarization/CMS
+polarized/UC
+polarize/RSDZG
+polarizes/C
+polarizing/C
+polarogram/SM
+polarograph
+polarography/M
+Polaroid/SM
+polar/S
+polecat/SM
+polemical/Y
+polemicist/S
+polemic/S
+polemics/M
+pole/MS
+Pole/MS
+poler/M
+polestar/S
+poleward/S
+pol/GMDRS
+policeman/M
+policemen/M
+police/MSDG
+policewoman/M
+policewomen
+policyholder/MS
+policymaker/S
+policymaking
+policy/SM
+poliomyelitides
+poliomyelitis/M
+polio/SM
+Polish
+polished/U
+polisher/M
+polish/RSDZGJ
+polis/M
+Politburo/M
+politburo/S
+politeness/MS
+polite/PRTY
+politesse/SM
+politically
+political/U
+politician/MS
+politicization/S
+politicize/CSDG
+politicked
+politicking/SM
+politico/SM
+politic/S
+politics/M
+polity/MS
+polka/SDMG
+Polk/M
+pollack/SM
+Pollard/M
+polled/U
+pollen/GDM
+pollinate/XSDGN
+pollination/M
+pollinator/MS
+polliwog/SM
+poll/MDNRSGX
+pollock's
+Pollock/SM
+pollster/MS
+pollutant/MS
+polluted/U
+polluter/M
+pollute/RSDXZVNG
+pollution/M
+Pollux/M
+Pollyanna/M
+Polly/M
+pollywog's
+Pol/MY
+Polo/M
+polo/MS
+polonaise/MS
+polonium/MS
+poltergeist/SM
+poltroon/MS
+polyandrous
+polyandry/MS
+polyatomic
+polybutene/MS
+polycarbonate
+polychemicals
+polychrome
+polyclinic/MS
+polycrystalline
+polyelectrolytes
+polyester/SM
+polyether/S
+polyethylene/SM
+polygamist/MS
+polygamous/Y
+polygamy/MS
+polyglot/S
+polygonal/Y
+polygon/MS
+polygraph/MDG
+polygraphs
+polygynous
+polyhedral
+polyhedron/MS
+Polyhymnia/M
+polyisobutylene
+polyisocyanates
+polymath/M
+polymaths
+polymerase/S
+polymeric
+polymerization/SM
+polymerize/SDG
+polymer/MS
+polymorphic
+polymorphism/MS
+polymorph/M
+polymyositis
+Polynesia/M
+Polynesian/S
+polynomial/YMS
+Polyphemus/M
+polyphonic
+polyphony/MS
+polyphosphate/S
+polyp/MS
+polypropylene/MS
+polystyrene/SM
+polysyllabic
+polysyllable/SM
+polytechnic/MS
+polytheism/SM
+polytheistic
+polytheist/SM
+polythene/M
+polytonal/Y
+polytopes
+polyunsaturated
+polyurethane/SM
+polyvinyl/MS
+Po/M
+pomade/MGSD
+pomander/MS
+pomegranate/SM
+Pomerania/M
+Pomeranian
+pommel/GSMD
+Pomona/M
+Pompadour/M
+pompadour/MDS
+pompano/SM
+Pompeian/S
+Pompeii/M
+Pompey/M
+pompom/SM
+pompon's
+pomposity/MS
+pompousness/S
+pompous/YP
+pomp/SM
+ponce/M
+Ponce/M
+Ponchartrain/M
+poncho/MS
+ponderer/M
+ponderousness/MS
+ponderous/PY
+ponder/ZGRD
+pond/SMDRGZ
+pone/SM
+pongee/MS
+poniard/GSDM
+pons/M
+Pontchartrain/M
+Pontiac/M
+Pontianak/M
+pontiff/MS
+pontifical/YS
+pontificate/XGNDS
+pontoon/SMDG
+pony/DSMG
+ponytail/SM
+pooch/GSDM
+poodle/MS
+poof/MS
+pooh/DG
+Pooh/M
+poohs
+Poole/M
+pool/MDSG
+poolroom/MS
+poolside
+Poona/M
+poop/MDSG
+poorboy
+poorhouse/MS
+poorness/MS
+poor/TYRP
+popcorn/MS
+Popek/MS
+pope/SM
+Pope/SM
+Popeye/M
+popgun/SM
+popinjay/MS
+poplar/SM
+poplin/MS
+Popocatepetl/M
+popover/SM
+poppa/MS
+popped
+Popper/M
+popper/SM
+poppet/M
+popping
+Poppins/M
+poppycock/MS
+Poppy/M
+poppy/SDM
+poppyseed
+Popsicle/MS
+pop/SM
+populace/MS
+popularism
+popularity/UMS
+popularization/SM
+popularize/A
+popularized
+popularizer/MS
+popularizes/U
+popularizing
+popular/YS
+populate/CXNGDS
+populated/UA
+populates/A
+populating/A
+population/MC
+populism/S
+populist/SM
+populousness/MS
+populous/YP
+porcelain/SM
+porch/SM
+porcine
+porcupine/MS
+pore/ZGDRS
+Porfirio/M
+porgy/SM
+poring/Y
+porker/M
+porky/TSR
+pork/ZRMS
+pornographer/SM
+pornographic
+pornographically
+pornography/SM
+porno/S
+porn/S
+porosity/SM
+porousness/MS
+porous/PY
+porphyritic
+porphyry/MS
+porpoise/DSGM
+porridge/MS
+Porrima/M
+porringer/MS
+Porsche/M
+portability/S
+portables
+portable/U
+portably
+port/ABSGZMRD
+portage/ASM
+portaged
+portaging
+portal/SM
+portamento/M
+portcullis/MS
+ported/CE
+Porte/M
+portend/SDG
+portentousness/M
+portentous/PY
+portent/SM
+porterage/M
+porter/DMG
+porterhouse/SM
+Porter/M
+porter's/A
+portfolio/MS
+porthole/SM
+Portia/M
+porticoes
+portico/M
+Portie/M
+portire/SM
+porting/E
+portion/KGSMD
+Portland/M
+portliness/SM
+portly/PTR
+portmanteau/SM
+Port/MR
+Prto/M
+portraitist/SM
+portrait/MS
+portraiture/MS
+portrayal/SM
+portrayer/M
+portray/GDRS
+ports/CE
+Portsmouth/M
+Portugal/M
+Portuguese/M
+portulaca/MS
+Porty/M
+posed/CA
+Poseidon/M
+poser/KME
+poses/CA
+poseur/MS
+pose/ZGKDRSE
+posh/DSRGT
+posing/CA
+positifs
+positionable
+positional/KY
+position/KGASMD
+position's/EC
+positions/EC
+positiveness/S
+positive/RSPYT
+positivism/M
+positivist/S
+positivity
+positron/SM
+posit/SCGD
+Posner/M
+posse/M
+possess/AGEDS
+possessed/PY
+possession/AEMS
+possessional
+possessiveness/MS
+possessive/PSMY
+possessor/MS
+possibility/SM
+possible/TRS
+possibly
+poss/S
+possum/MS
+postage/MS
+postal/S
+post/ASDRJG
+postbag/M
+postbox/SM
+postcard/SM
+postcode/SM
+postcondition/S
+postconsonantal
+postdate/DSG
+postdoctoral
+posteriori
+posterior/SY
+posterity/SM
+poster/MS
+postfix/GDS
+postgraduate/SM
+posthaste/S
+posthumousness/M
+posthumous/YP
+posthypnotic
+postilion/MS
+postindustrial
+posting/M
+postlude/MS
+Post/M
+postman/M
+postmarital
+postmark/GSMD
+postmaster/SM
+postmen
+postmeridian
+postmistress/MS
+postmodern
+postmodernist
+postmortem/S
+postnasal
+postnatal
+postoperative/Y
+postorder
+postpaid
+postpartum
+postpone/GLDRS
+postponement/S
+postpositions
+postprandial
+post's
+postscript/SM
+postsecondary
+postulate/XGNSD
+postulation/M
+postural
+posture/MGSRD
+posturer/M
+postvocalic
+postwar
+posy/SM
+potability/SM
+potableness/M
+potable/SP
+potage/M
+potash/MS
+potassium/MS
+potatoes
+potato/M
+potbelly/MSD
+potboiler/M
+potboil/ZR
+pot/CMS
+Potemkin/M
+potency/MS
+potentate/SM
+potentiality/MS
+potential/SY
+potentiating
+potentiometer/SM
+potent/YS
+potful/SM
+pothead/MS
+potherb/MS
+pother/GDMS
+potholder/MS
+pothole/SDMG
+potholing/M
+pothook/SM
+potion/SM
+potlatch/SM
+potluck/MS
+Potomac/M
+potpie/SM
+potpourri/SM
+Potsdam/M
+potsherd/MS
+potshot/S
+pottage/SM
+Pottawatomie/M
+potted
+Potter/M
+potter/RDMSG
+pottery/MS
+potting
+Potts/M
+potty/SRT
+pouch/SDMG
+Poughkeepsie/M
+Poul/M
+poulterer/MS
+poultice/DSMG
+poultry/MS
+pounce/SDG
+poundage/MS
+pounder/MS
+pound/KRDGS
+Pound/M
+pour/DSG
+pourer's
+Poussin/MS
+pouter/M
+pout/GZDRS
+poverty/MS
+POW
+powderpuff
+powder/RDGMS
+powdery
+Powell/M
+powerboat/MS
+powerfulness/M
+powerful/YP
+power/GMD
+powerhouse/MS
+powerlessness/SM
+powerless/YP
+Powers
+Powhatan/M
+pow/RZ
+powwow/GDMS
+pox/GMDS
+Poznan/M
+pp
+PP
+ppm
+ppr
+PPS
+pr
+PR
+practicability/S
+practicable/P
+practicably
+practicality/SM
+practicalness/M
+practical/YPS
+practice/BDRSMG
+practiced/U
+practicer/M
+practicum/SM
+practitioner/SM
+Pradesh/M
+Prado/M
+Praetorian
+praetorian/S
+praetor/MS
+pragmatical/Y
+pragmatic/S
+pragmatics/M
+pragmatism/MS
+pragmatist/MS
+Prague/M
+Praia
+prairie/MS
+praise/ESDG
+praiser/S
+praise's
+praiseworthiness/MS
+praiseworthy/P
+praising/Y
+Prakrit/M
+praline/MS
+pram/MS
+prancer/M
+prance/ZGSRD
+prancing/Y
+prank/SMDG
+prankster/SM
+praseodymium/SM
+Pratchett/M
+prate/DSRGZ
+prater/M
+pratfall/MS
+prating/Y
+prattle/DRSGZ
+prattler/M
+prattling/Y
+Pratt/M
+Prattville/M
+Pravda/M
+prawn/MDSG
+praxes
+praxis/M
+Praxiteles/M
+pray/DRGZS
+prayerbook
+prayerfulness/M
+prayerful/YP
+prayer/M
+PRC
+preach/DRSGLZJ
+preacher/M
+preaching/Y
+preachment/MS
+preachy/RT
+preadolescence/S
+Preakness/M
+preallocate/XGNDS
+preallocation/M
+preallocator/S
+preamble/MGDS
+preamp
+preamplifier/M
+prearrange/LSDG
+prearrangement/SM
+preassign/SDG
+preauthorize
+prebendary/M
+Precambrian
+precancel/DGS
+precancerous
+precariousness/MS
+precarious/PY
+precautionary
+precaution/SGDM
+precede/DSG
+precedence/SM
+precedented/U
+precedent/SDM
+preceptive/Y
+preceptor/MS
+precept/SMV
+precess/DSG
+precession/M
+precinct/MS
+preciosity/MS
+preciousness/S
+precious/PYS
+precipice/MS
+precipitable
+precipitant/S
+precipitateness/M
+precipitate/YNGVPDSX
+precipitation/M
+precipitousness/M
+precipitous/YP
+preciseness/SM
+precise/XYTRSPN
+precision/M
+prcis/MDG
+preclude/GDS
+preclusion/S
+precociousness/MS
+precocious/YP
+precocity/SM
+precode/D
+precognition/SM
+precognitive
+precollege/M
+precolonial
+precomputed
+preconceive/GSD
+preconception/SM
+precondition/GMDS
+preconscious
+precook/GDS
+precursor/SM
+precursory
+precut
+predate/NGDSX
+predation/CMS
+predator/SM
+predatory
+predecease/SDG
+predecessor/MS
+predeclared
+predecline
+predefine/GSD
+predefinition/SM
+predesignate/GDS
+predestination/SM
+predestine/SDG
+predetermination/MS
+predeterminer/M
+predetermine/ZGSRD
+predicable/S
+predicament/SM
+predicate/VGNXSD
+predication/M
+predicator
+predictability/UMS
+predictable/U
+predictably/U
+predict/BSDGV
+predicted/U
+prediction/MS
+predictive/Y
+predictor/MS
+predigest/GDS
+predilect
+predilection/SM
+predispose/SDG
+predisposition/MS
+predoctoral
+predominance/SM
+predominant/Y
+predominate/YSDGN
+predomination/M
+preemie/MS
+preeminence/SM
+preeminent/Y
+preemployment/M
+preempt/GVSD
+preemption/SM
+preemptive/Y
+preemptor/M
+preener/M
+preen/SRDG
+preexist/DSG
+preexistence/SM
+preexistent
+prefabbed
+prefabbing
+prefab/MS
+prefabricate/XNGDS
+prefabrication/M
+preface/DRSGM
+prefacer/M
+prefatory
+prefect/MS
+prefecture/MS
+preferableness/M
+preferable/P
+preferably
+prefer/BL
+preference/MS
+preferential/Y
+preferment/SM
+preferred
+preferring
+prefiguration/M
+prefigure/SDG
+prefix/MDSG
+preflight/SGDM
+preform/DSG
+pref/RZ
+pregnancy/SM
+pregnant/Y
+preheat/GDS
+prehensile
+prehistoric
+prehistorical/Y
+prehistory/SM
+preindustrial
+preinitialize/SDG
+preinterview/M
+preisolated
+prejudge/DRSG
+prejudger/M
+prejudgment/SM
+prejudiced/U
+prejudice/MSDG
+prejudicial/PY
+prekindergarten/MS
+prelacy/MS
+prelate/SM
+preliminarily
+preliminary/S
+preliterate/S
+preloaded
+prelude/GMDRS
+preluder/M
+premarital/Y
+premarket
+prematureness/M
+premature/SPY
+prematurity/M
+premedical
+premeditated/Y
+premeditate/XDSGNV
+premeditation/M
+premed/S
+premenstrual
+premiere/MS
+premier/GSDM
+premiership/SM
+Preminger/M
+premise/GMDS
+premiss's
+premium/MS
+premix/GDS
+premolar/S
+premonition/SM
+premonitory
+prenatal/Y
+Pren/M
+Prenticed/M
+Prentice/MGD
+Prenticing/M
+Prentiss/M
+Prent/M
+prenuptial
+preoccupation/MS
+preoccupy/DSG
+preoperative
+preordain/DSLG
+prepackage/GSD
+prepaid
+preparation/SM
+preparative/SYM
+preparatory
+preparedly
+preparedness/USM
+prepared/UP
+prepare/ZDRSG
+prepay/GLS
+prepayment/SM
+prepender/S
+prepends
+preplanned
+preponderance/SM
+preponderant/Y
+preponderate/DSYGN
+prepositional/Y
+preposition/SDMG
+prepossess/GSD
+prepossessing/U
+prepossession/MS
+preposterousness/M
+preposterous/PY
+prepped
+prepping
+preppy/RST
+preprepared
+preprint/SGDM
+preprocessed
+preprocessing
+preprocessor/S
+preproduction
+preprogrammed
+prep/SM
+prepubescence/S
+prepubescent/S
+prepublication/M
+prepuce/SM
+prequel/S
+preradiation
+prerecord/DGS
+preregister/DSG
+preregistration/MS
+prerequisite/SM
+prerogative/SDM
+Pres
+presage/GMDRS
+presager/M
+presbyopia/MS
+presbyterian
+Presbyterianism/S
+Presbyterian/S
+presbyter/MS
+presbytery/MS
+preschool/RSZ
+prescience/SM
+prescient/Y
+Prescott/M
+prescribed/U
+prescriber/M
+prescribe/RSDG
+prescription/SM
+prescriptive/Y
+prescript/SVM
+preselect/SGD
+presence/SM
+presentableness/M
+presentable/P
+presentably/A
+presentational/A
+presentation/AMS
+presented/A
+presenter/A
+presentiment/MS
+presentment/SM
+presents/A
+present/SLBDRYZGP
+preservationist/S
+preservation/SM
+preservative/SM
+preserve/DRSBZG
+preserved/U
+preserver/M
+preset/S
+presetting
+preshrank
+preshrink/SG
+preshrunk
+preside/DRSG
+presidency/MS
+presidential/Y
+president/SM
+presider/M
+presidia
+presidium/M
+Presley/M
+presoaks
+presort/GDS
+pres/S
+press/ACDSG
+pressed/U
+presser/MS
+pressingly/C
+pressing/YS
+pressman/M
+pressmen
+pressure/DSMG
+pressurization/MS
+pressurize/DSRGZ
+pressurized/U
+prestidigitate/NX
+prestidigitation/M
+prestidigitatorial
+prestidigitator/M
+prestige/MS
+prestigious/PY
+Preston/M
+presto/S
+presumably
+presume/BGDRS
+presumer/M
+presuming/Y
+presumption/MS
+presumptive/Y
+presumptuousness/SM
+presumptuous/YP
+presuppose/GDS
+presupposition/S
+pretax
+preteen/S
+pretended/Y
+pretender/M
+pretending/U
+pretend/SDRZG
+pretense/MNVSX
+pretension/GDM
+pretentiousness/S
+pretentious/UYP
+preterite's
+preterit/SM
+preternatural/Y
+pretest/SDG
+pretext/SMDG
+Pretoria/M
+pretreated
+pretreatment/S
+pretrial
+prettify/SDG
+prettily
+prettiness/SM
+pretty/TGPDRS
+pretzel/SM
+prevailing/Y
+prevail/SGD
+prevalence/MS
+prevalent/SY
+prevaricate/DSXNG
+prevaricator/MS
+preventable/U
+preventably
+preventative/S
+prevent/BSDRGV
+preventer/M
+prevention/MS
+preventiveness/M
+preventive/SPY
+preview/ZGSDRM
+previous/Y
+prevision/SGMD
+prewar
+prexes
+preyer's
+prey/SMDG
+Priam/M
+priapic
+Pribilof/M
+price/AGSD
+priced/U
+priceless
+Price/M
+pricer/MS
+price's
+pricey
+pricier
+priciest
+pricker/M
+pricking/M
+prickle/GMDS
+prickliness/S
+prickly/RTP
+prick/RDSYZG
+prideful/Y
+pride/GMDS
+prier/M
+priestess/MS
+priesthood/SM
+Priestley/M
+priestliness/SM
+priestly/PTR
+priest/SMYDG
+prigged
+prigging
+priggishness/S
+priggish/PYM
+prig/SM
+primacy/MS
+primal
+primarily
+primary/MS
+primate/MS
+primed/U
+primely/M
+primeness/M
+prime/PYS
+primer/M
+Prime's
+primeval/Y
+priming/M
+primitiveness/SM
+primitive/YPS
+primitivism/M
+primmed
+primmer
+primmest
+primming
+primness/MS
+primogenitor/MS
+primogeniture/MS
+primordial/YS
+primp/DGS
+primrose/MGSD
+prim/SPJGZYDR
+princedom/MS
+princeliness/SM
+princely/PRT
+Prince/M
+prince/SMY
+princess/MS
+Princeton/M
+principality/MS
+principal/SY
+Principe/M
+Principia/M
+principled/U
+principle/SDMG
+printable/U
+printably
+print/AGDRS
+printed/U
+printer/AM
+printers
+printing/SM
+printmaker/M
+printmake/ZGR
+printmaking/M
+printout/S
+Prinz/M
+prioress/MS
+priori
+prioritize/DSRGZJ
+priority/MS
+prior/YS
+priory/SM
+Pris
+Prisca/M
+Priscella/M
+Priscilla/M
+prised
+prise/GMAS
+prismatic
+prism/MS
+prison/DRMSGZ
+prisoner/M
+Prissie/M
+prissily
+prissiness/SM
+prissy/RSPT
+pristine/Y
+prithee/S
+privacy/MS
+privateer/SMDG
+privateness/M
+private/NVYTRSXP
+privation/MCS
+privative/Y
+privatization/S
+privatize/GSD
+privet/SM
+privileged/U
+privilege/SDMG
+privily
+privy/SRMT
+prized/A
+prize/DSRGZM
+prizefighter/M
+prizefighting/M
+prizefight/SRMGJZ
+prizewinner/S
+prizewinning
+Pr/MN
+PRO
+proactive
+probabilist
+probabilistic
+probabilistically
+probability/SM
+probable/S
+probably
+probated/A
+probate/NVMX
+probates/A
+probating/A
+probational
+probationary/S
+probationer/M
+probation/MRZ
+probation's/A
+probative/A
+prober/M
+probity/SM
+problematical/UY
+problematic/S
+problem/SM
+proboscis/MS
+prob/RBJ
+procaine/MS
+procedural/SY
+procedure/MS
+proceeder/M
+proceeding/M
+proceed/JRDSG
+process/BSDMG
+processed/UA
+processes/A
+processional/YS
+procession/GD
+processor/MS
+proclamation/MS
+proclivity/MS
+proconsular
+procrastinate/XNGDS
+procrastination/M
+procrastinator/MS
+procreational
+procreatory
+procrustean
+Procrustean
+Procrustes/M
+proctor/GSDM
+proctorial
+procurable/U
+procure/L
+procurement/MS
+Procyon/M
+prodded
+prodding
+prodigality/S
+prodigal/SY
+prodigiousness/M
+prodigious/PY
+prodigy/MS
+prod/S
+produce/AZGDRS
+producer/AM
+producible/A
+production/ASM
+productively/UA
+productiveness/MS
+productive/PY
+productivities
+productivity/A
+productivity's
+productize/GZRSD
+product/V
+Prof
+profanation/S
+profaneness/MS
+profane/YPDRSG
+profanity/MS
+professed/Y
+professionalism/SM
+professionalize/GSD
+professional/USY
+profession/SM
+professorial/Y
+professorship/SM
+professor/SM
+proffer/GSD
+proficiency/SM
+proficient/YS
+profitability/MS
+profitableness/MU
+profitable/UP
+profitably/U
+profiteer/GSMD
+profiterole/MS
+profit/GZDRB
+profitless
+profligacy/S
+profligate/YS
+proforma/S
+profoundity
+profoundness/SM
+profound/PTYR
+prof/S
+profundity/MS
+profuseness/MS
+profuse/YP
+progenitor/SM
+progeny/M
+progesterone/SM
+prognathous
+prognoses
+prognosis/M
+prognosticate/NGVXDS
+prognostication/M
+prognosticator/S
+prognostic/S
+program/CSA
+programed
+programing
+programmability
+programmable/S
+programmed/CA
+programmer/ASM
+programming/CA
+programmings
+progression/SM
+progressiveness/SM
+progressive/SPY
+progressivism
+progress/MSDVG
+prohibiter/M
+prohibitionist/MS
+prohibition/MS
+Prohibition/MS
+prohibitiveness/M
+prohibitive/PY
+prohibitory
+prohibit/VGSRD
+projected/AU
+projectile/MS
+projectionist/MS
+projection/MS
+projective/Y
+project/MDVGS
+projector/SM
+Prokofieff/M
+Prokofiev/M
+prolegomena
+proletarianization/M
+proletarianized
+proletarian/S
+proletariat/SM
+proliferate/GNVDSX
+proliferation/M
+prolifically
+prolific/P
+prolixity/MS
+prolix/Y
+prologize
+prologue/MGSD
+prologuize
+prolongate/NGSDX
+prolongation/M
+prolonger/M
+prolong/G
+promenade/GZMSRD
+promenader/M
+Promethean
+Prometheus/M
+promethium/SM
+prominence/MS
+prominent/Y
+promiscuity/MS
+promiscuousness/M
+promiscuous/PY
+promise/GD
+promising/UY
+promissory
+promontory/MS
+promote/GVZBDR
+promoter/M
+promotiveness/M
+promotive/P
+prompted/U
+prompter/M
+promptitude/SM
+promptness/MS
+prompt/SGJTZPYDR
+pro/MS
+promulgate/NGSDX
+promulgation/M
+promulgator/MS
+pron
+proneness/MS
+prone/PY
+pronghorn/SM
+prong/SGMD
+pronominalization
+pronominalize
+pronounceable/U
+pronouncedly
+pronounced/U
+pronounce/GLSRD
+pronouncement/SM
+pronouncer/M
+pronto
+pronunciation/SM
+proofed/A
+proofer
+proofing/M
+proofreader/M
+proofread/GZSR
+proof/SEAM
+propaganda/SM
+propagandistic
+propagandist/SM
+propagandize/DSG
+propagated/U
+propagate/SDVNGX
+propagation/M
+propagator/MS
+propellant/MS
+propelled
+propeller/MS
+propelling
+propel/S
+propensity/MS
+properness/M
+proper/PYRT
+propertied/U
+property/SDM
+prophecy/SM
+prophesier/M
+prophesy/GRSDZ
+prophetess/S
+prophetic
+prophetical/Y
+prophet/SM
+prophylactic/S
+prophylaxes
+prophylaxis/M
+propinquity/MS
+propionate/M
+propitiate/GNXSD
+propitiatory
+propitiousness/M
+propitious/YP
+proponent/MS
+proportionality/M
+proportional/SY
+proportionate/YGESD
+proportioner/M
+proportion/ESGDM
+proportionment/M
+proposal/SM
+propped
+propping
+proprietary/S
+proprietorial
+proprietorship/SM
+proprietor/SM
+proprietress/MS
+propriety/MS
+proprioception
+proprioceptive
+prop/SZ
+propulsion/MS
+propulsive
+propylene/M
+prorogation/SM
+prorogue
+prosaic
+prosaically
+proscenium/MS
+prosciutti
+prosciutto/SM
+proscription/SM
+proscriptive
+pros/DSRG
+prosecute/SDBXNG
+prosecution/M
+prosecutor/MS
+proselyte/SDGM
+proselytism/MS
+proselytize/ZGDSR
+prose/M
+proser/M
+Proserpine/M
+prosodic/S
+prosody/MS
+prospect/DMSVG
+prospection/SM
+prospectiveness/M
+prospective/SYP
+prospector/MS
+prospectus/SM
+prosper/GSD
+prosperity/MS
+prosperousness/M
+prosperous/PY
+prostate
+prostheses
+prosthesis/M
+prosthetic/S
+prosthetics/M
+prostitute/DSXNGM
+prostitution/M
+prostrate/SDXNG
+prostration/M
+prosy/RT
+protactinium/MS
+protagonist/SM
+Protagoras/M
+protean/S
+protease/M
+protect/DVGS
+protected/UY
+protectionism/MS
+protectionist/MS
+protection/MS
+protectiveness/S
+protective/YPS
+protectorate/SM
+protector/MS
+protges
+protg/SM
+protein/MS
+proteolysis/M
+proteolytic
+Proterozoic/M
+protestantism
+Protestantism/MS
+protestant/S
+Protestant/SM
+protestation/MS
+protest/G
+protesting/Y
+Proteus/M
+protocol/DMGS
+protoplasmic
+protoplasm/MS
+prototype/SDGM
+prototypic
+prototypical/Y
+protozoa
+protozoan/MS
+protozoic
+protozoon's
+protract/DG
+protrude/SDG
+protrusile
+protrusion/MS
+protrusive/PY
+protuberance/S
+protuberant
+Proudhon/M
+proud/TRY
+Proust/M
+provabilities
+provability's
+provability/U
+provableness/M
+provable/P
+provably
+prov/DRGZB
+proved/U
+proven/U
+prove/ESDAG
+provenance/SM
+Provenal
+Provencals
+Provence/M
+provender/SDG
+provenience/SM
+provenly
+proverb/DG
+proverbial/Y
+Proverbs/M
+prover/M
+provide/DRSBGZ
+provided/U
+providence/SM
+Providence/SM
+providential/Y
+provident/Y
+provider/M
+province/SM
+provincialism/SM
+provincial/SY
+provisional/YS
+provisioner/M
+provision/R
+proviso/MS
+provocateur/S
+provocativeness/SM
+provocative/P
+provoked/U
+provoke/GZDRS
+provoking/Y
+provolone/SM
+Provo/M
+provost/MS
+prowess/SM
+prowler/M
+prowl/RDSZG
+prow/TRMS
+proximal/Y
+proximateness/M
+proximate/PY
+proximity/MS
+Proxmire/M
+proxy/SM
+Prozac
+prude/MS
+Prudence/M
+prudence/SM
+Prudential/M
+prudential/SY
+prudent/Y
+prudery/MS
+Prudi/M
+prudishness/SM
+prudish/YP
+Prudy/M
+Prue/M
+Pruitt/M
+Pru/M
+prune/DSRGZM
+pruner/M
+prurience/MS
+prurient/Y
+Prussia/M
+Prussian/S
+prussic
+Prut/M
+Pryce/M
+pry/DRSGTZ
+pryer's
+prying/Y
+P's
+PS
+p's/A
+psalmist/SM
+psalm/SGDM
+Psalms/M
+psalter
+Psalter/SM
+psaltery/MS
+psephologist/M
+pseudonymous
+pseudonym/SM
+pseudopod
+pseudo/S
+pseudoscience/S
+pshaw/SDG
+psi/S
+psittacoses
+psittacosis/M
+psoriases
+psoriasis/M
+psst/S
+PST
+psychedelically
+psychedelic/S
+psyche/M
+Psyche/M
+psychiatric
+psychiatrist/SM
+psychiatry/MS
+psychical/Y
+psychic/MS
+psychoacoustic/S
+psychoacoustics/M
+psychoactive
+psychoanalysis/M
+psychoanalyst/S
+psychoanalytic
+psychoanalytical
+psychoanalyze/SDG
+psychobabble/S
+psychobiology/M
+psychocultural
+psychodrama/MS
+psychogenic
+psychokinesis/M
+psycholinguistic/S
+psycholinguistics/M
+psycholinguists
+psychological/Y
+psychologist/MS
+psychology/MS
+psychometric/S
+psychometrics/M
+psychometry/M
+psychoneuroses
+psychoneurosis/M
+psychopathic/S
+psychopath/M
+psychopathology/M
+psychopaths
+psychopathy/SM
+psychophysical/Y
+psychophysic/S
+psychophysics/M
+psychophysiology/M
+psychosis/M
+psycho/SM
+psychosocial/Y
+psychosomatic/S
+psychosomatics/M
+psychos/S
+psychotherapeutic/S
+psychotherapist/MS
+psychotherapy/SM
+psychotically
+psychotic/S
+psychotropic/S
+psychs
+psych/SDG
+PT
+PTA
+Ptah/M
+ptarmigan/MS
+pt/C
+pterodactyl/SM
+Pt/M
+PTO
+Ptolemaic
+Ptolemaists
+Ptolemy/MS
+ptomaine/MS
+Pu
+pubbed
+pubbing
+pubertal
+puberty/MS
+pubes
+pubescence/S
+pubescent
+pubic
+pubis/M
+publican/AMS
+publication/AMS
+publicist/SM
+publicity/SM
+publicized/U
+publicize/SDG
+publicness/M
+publics/A
+public/YSP
+publishable/U
+published/UA
+publisher/ASM
+publishes/A
+publishing/M
+publish/JDRSBZG
+pub/MS
+Puccini/M
+puce/SM
+pucker/DG
+Puckett/M
+puck/GZSDRM
+puckishness/S
+puckish/YP
+Puck/M
+pudding/MS
+puddle/JMGRSD
+puddler/M
+puddling/M
+puddly
+pudenda
+pudendum/M
+pudginess/SM
+pudgy/PRT
+Puebla/M
+Pueblo/MS
+pueblo/SM
+puerile/Y
+puerility/SM
+puerperal
+puers
+Puerto/M
+puffball/SM
+puffer/M
+puffery/M
+puffiness/S
+puffin/SM
+Puff/M
+puff/SGZDRM
+puffy/PRT
+Puget/M
+pugged
+pugging
+Pugh/M
+pugilism/SM
+pugilistic
+pugilist/S
+pug/MS
+pugnaciousness/MS
+pugnacious/YP
+pugnacity/SM
+puissant/Y
+puke/GDS
+pukka
+Pulaski/SM
+pulchritude/SM
+pulchritudinous/M
+pule/GDS
+Pulitzer/SM
+pullback/S
+pull/DRGZSJ
+pullet/SM
+pulley/SM
+Pullman/MS
+pullout/S
+pullover/SM
+pulmonary
+pulpiness/S
+pulpit/MS
+pulp/MDRGS
+pulpwood/MS
+pulpy/PTR
+pulsar/MS
+pulsate/NGSDX
+pulsation/M
+pulse/ADSG
+pulser
+pulse's
+pulverable
+pulverization/MS
+pulverized/U
+pulverize/GZSRD
+pulverizer/M
+pulverizes/UA
+puma/SM
+pumice/SDMG
+pummel/SDG
+pumpernickel/SM
+pump/GZSMDR
+pumping/M
+pumpkin/MS
+punchbowl/M
+punched/U
+puncheon/MS
+puncher/M
+punch/GRSDJBZ
+punchline/S
+Punch/M
+punchy/RT
+punctilio/SM
+punctiliousness/SM
+punctilious/PY
+punctualities
+punctuality/UM
+punctualness/M
+punctual/PY
+punctuate/SDXNG
+punctuational
+punctuation/M
+puncture/SDMG
+punditry/S
+pundit/SM
+pungency/MS
+pungent/Y
+Punic
+puniness/MS
+punished/U
+punisher/M
+punishment/MS
+punish/RSDGBL
+punitiveness/M
+punitive/YP
+Punjabi/M
+Punjab/M
+punk/TRMS
+punky/PRS
+pun/MS
+punned
+punning
+punster/SM
+punter/M
+punt/GZMDRS
+puny/PTR
+pupae
+pupal
+pupa/M
+pupate/NGSD
+pupillage/M
+pupil/SM
+pup/MS
+pupped
+puppeteer/SM
+puppetry/MS
+puppet/SM
+pupping
+puppy/GSDM
+puppyish
+purblind
+Purcell/M
+purchasable
+purchase/GASD
+purchaser/MS
+purdah/M
+purdahs
+Purdue/M
+purebred/S
+puree/DSM
+pureeing
+pureness/MS
+pure/PYTGDR
+purgation/M
+purgative/MS
+purgatorial
+purgatory/SM
+purge/GZDSR
+purger/M
+purify/GSRDNXZ
+Purim/SM
+Purina/M
+purine/SM
+purism/MS
+puristic
+purist/MS
+puritanic
+puritanical/Y
+Puritanism/MS
+puritanism/S
+puritan/SM
+Puritan/SM
+purity/SM
+purlieu/SM
+purl/MDGS
+purloin/DRGS
+purloiner/M
+purple/MTGRSD
+purplish
+purport/DRSZG
+purported/Y
+purposefulness/S
+purposeful/YP
+purposelessness/M
+purposeless/PY
+purpose/SDVGYM
+purposiveness/M
+purposive/YP
+purr/DSG
+purring/Y
+purse/DSRGZM
+purser/M
+pursuance/MS
+pursuant
+pursuer/M
+pursue/ZGRSD
+pursuit/MS
+purulence/MS
+purulent
+Purus
+purveyance/MS
+purvey/DGS
+purveyor/MS
+purview/SM
+Pusan/M
+Pusey/M
+pushbutton/S
+pushcart/SM
+pushchair/SM
+pushdown
+push/DSRBGZ
+pusher/M
+pushily
+pushiness/MS
+Pushkin/M
+pushover/SM
+Pushtu/M
+pushy/PRT
+pusillanimity/MS
+pusillanimous/Y
+pus/SM
+puss/S
+pussycat/S
+pussyfoot/DSG
+pussy/TRSM
+pustular
+pustule/MS
+putative/Y
+Putin/M
+put/IS
+Putnam/M
+Putnem/M
+putout/S
+putrefaction/SM
+putrefactive
+putrefy/DSG
+putrescence/MS
+putrescent
+putridity/M
+putridness/M
+putrid/YP
+putsch/S
+putted/I
+puttee/MS
+putter/RDMGZ
+putting/I
+putt/SGZMDR
+puttying/M
+putty/SDMG
+puzzle/JRSDZLG
+puzzlement/MS
+puzzler/M
+PVC
+pvt
+Pvt/M
+PW
+PX
+p/XTGJ
+Pygmalion/M
+pygmy/SM
+Pygmy/SM
+Pyhrric/M
+pyknotic
+Pyle/M
+pylon/SM
+pylori
+pyloric
+pylorus/M
+Pym/M
+Pynchon/M
+Pyongyang/M
+pyorrhea/SM
+Pyotr/M
+pyramidal/Y
+pyramid/GMDS
+pyre/MS
+Pyrenees
+Pyrex/SM
+pyridine/M
+pyrimidine/SM
+pyrite/MS
+pyroelectric
+pyroelectricity/SM
+pyrolysis/M
+pyrolyze/RSM
+pyromaniac/SM
+pyromania/MS
+pyrometer/MS
+pyrometry/M
+pyrophosphate/M
+pyrotechnical
+pyrotechnic/S
+pyrotechnics/M
+pyroxene/M
+pyroxenite/M
+Pyrrhic
+Pythagoras/M
+Pythagorean/S
+Pythias
+Python/M
+python/MS
+pyx/MDSG
+q
+Q
+QA
+Qaddafi/M
+Qantas/M
+Qatar/M
+QB
+QC
+QED
+Qingdao
+Qiqihar/M
+QM
+Qom/M
+qr
+q's
+Q's
+qt
+qty
+qua
+Quaalude/M
+quackery/MS
+quackish
+quack/SDG
+quadded
+quadding
+quadrangle/MS
+quadrangular/M
+quadrant/MS
+quadraphonic/S
+quadrapole
+quadratical/Y
+quadratic/SM
+quadrature/MS
+quadrennial/SY
+quadrennium/MS
+quadric
+quadriceps/SM
+quadrilateral/S
+quadrille/XMGNSD
+quadrillion/MH
+quadripartite/NY
+quadriplegia/SM
+quadriplegic/SM
+quadrivia
+quadrivium/M
+quadrupedal
+quadruped/MS
+quadruple/GSD
+quadruplet/SM
+quadruplicate/GDS
+quadruply/NX
+quadrupole
+quad/SM
+quadword/MS
+quaffer/M
+quaff/SRDG
+quagmire/DSMG
+quahog/MS
+quail/GSDM
+quaintness/MS
+quaint/PTYR
+quake/GZDSR
+Quakeress/M
+Quakerism/S
+Quaker/SM
+quaky/RT
+qualification/ME
+qualified/UY
+qualifier/SM
+qualify/EGXSDN
+qualitative/Y
+quality/MS
+qualmish
+qualm/SM
+quandary/MS
+quangos
+quanta/M
+Quantico/M
+quantifiable/U
+quantified/U
+quantifier/M
+quantify/GNSRDZX
+quantile/S
+quantitativeness/M
+quantitative/PY
+quantity/MS
+quantization/MS
+quantizer/M
+quantize/ZGDRS
+quantum/M
+quarantine/DSGM
+quark/SM
+quarreler/M
+quarrellings
+quarrelsomeness/MS
+quarrelsome/PY
+quarrel/SZDRMG
+quarrier/M
+quarryman/M
+quarrymen
+quarry/RSDGM
+quarterback/SGMD
+quarterdeck/MS
+quarterer/M
+quarterfinal/MS
+quartering/M
+quarterly/S
+quartermaster/MS
+quarter/MDRYG
+quarterstaff/M
+quarterstaves
+quartet/SM
+quartic/S
+quartile/SM
+quarto/SM
+quart/RMSZ
+quartzite/M
+quartz/SM
+quasar/SM
+quash/GSD
+quasi
+quasilinear
+Quasimodo/M
+Quaternary
+quaternary/S
+quaternion/SM
+quatrain/SM
+quaver/GDS
+quavering/Y
+quavery
+Quayle/M
+quayside/M
+quay/SM
+queasily
+queasiness/SM
+queasy/TRP
+Quebec/M
+Quechua/M
+Queenie/M
+queenly/RT
+queen/SGMDY
+Queensland/M
+Queen/SM
+queerness/S
+queer/STGRDYP
+queller/M
+quell/SRDG
+Que/M
+quenchable/U
+quenched/U
+quencher/M
+quench/GZRSDB
+quenchless
+Quentin/M
+Quent/M
+Querida/M
+quern/M
+querulousness/S
+querulous/YP
+query/MGRSD
+quested/A
+quester/AS
+quester's
+quest/FSIM
+questing
+questionableness/M
+questionable/P
+questionably/U
+questioned/UA
+questioner/M
+questioning/UY
+questionnaire/MS
+question/SMRDGBZJ
+quests/A
+Quetzalcoatl/M
+queued/C
+queue/GZMDSR
+queuer/M
+queues/C
+queuing/C
+Quezon/M
+quibble/GZRSD
+quibbler/M
+quiche/SM
+quicken/RDG
+quickie/MS
+quicklime/SM
+quickness/MS
+quick/RNYTXPS
+quicksand/MS
+quicksilver/GDMS
+quickstep/SM
+quid/SM
+quiesce/D
+quiescence/MS
+quiescent/YP
+quieted/E
+quieten/SGD
+quieter/E
+quieter's
+quieting/E
+quietly/E
+quietness/MS
+quiets/E
+quietude/IEMS
+quietus/MS
+quiet/UTGPSDRY
+Quillan/M
+quill/GSDM
+Quill/M
+quilter/M
+quilting/M
+quilt/SZJGRDM
+quincentenary/M
+quince/SM
+Quincey/M
+quincy/M
+Quincy/M
+quinine/MS
+Quinlan/M
+Quinn/M
+quinquennial/Y
+quinsy/SM
+Quinta/M
+Quintana/M
+quintessence/SM
+quintessential/Y
+quintet/SM
+quintic
+quintile/SM
+Quintilian/M
+Quintilla/M
+quintillion/MH
+quintillionth/M
+Quintina/M
+Quintin/M
+Quint/M
+quint/MS
+Quinton/M
+quintuple/SDG
+quintuplet/MS
+Quintus/M
+quip/MS
+quipped
+quipper
+quipping
+quipster/SM
+quired/AI
+quire/MDSG
+quires/AI
+Quirinal/M
+quiring/IA
+quirkiness/SM
+quirk/SGMD
+quirky/PTR
+quirt/SDMG
+Quisling/M
+quisling/SM
+quitclaim/GDMS
+quit/DGS
+quite/SADG
+Quito/M
+quittance/SM
+quitter/SM
+quitting
+quiver/GDS
+quivering/Y
+quivery
+Quixote/M
+quixotic
+quixotically
+Quixotism/M
+quiz/M
+quizzed
+quizzer/SM
+quizzes
+quizzical/Y
+quizzing
+quo/H
+quoin/SGMD
+quoit/GSDM
+quondam
+quonset
+Quonset
+quorate/I
+quorum/MS
+quotability/S
+quota/MS
+quotation/SM
+quoter/M
+quote/UGSD
+quot/GDRB
+quotidian/S
+quotient/SM
+qwerty
+qwertys
+Rabat/M
+rabbet/GSMD
+Rabbi/M
+rabbi/MS
+rabbinate/MS
+rabbinic
+rabbinical/Y
+rabbiter/M
+rabbit/MRDSG
+rabble/GMRSD
+rabbler/M
+Rabelaisian
+Rabelais/M
+rabidness/SM
+rabid/YP
+rabies
+Rabi/M
+Rabin/M
+rabis
+Rab/M
+raccoon/SM
+racecourse/MS
+racegoers
+racehorse/SM
+raceme/MS
+race/MZGDRSJ
+racer/M
+racetrack/SMR
+raceway/SM
+Rachael/M
+Rachele/M
+Rachelle/M
+Rachel/M
+Rachmaninoff/M
+racialism/MS
+racialist/MS
+racial/Y
+racily
+Racine/M
+raciness/MS
+racism/S
+racist/MS
+racketeer/MDSJG
+racket/SMDG
+rackety
+rack/GDRMS
+raconteur/SM
+racoon's
+racquetball/S
+racquet's
+racy/RTP
+radarscope/MS
+radar/SM
+Radcliffe/M
+radded
+radder
+raddest
+Raddie/M
+radding
+Raddy/M
+radial/SY
+radiance/SM
+radian/SM
+radiant/YS
+radiate/XSDYVNG
+radiation/M
+radiative/Y
+radiator/MS
+radicalism/MS
+radicalization/S
+radicalize/GSD
+radicalness/M
+radical/SPY
+radices's
+radii/M
+radioactive/Y
+radioactivity/MS
+radioastronomical
+radioastronomy
+radiocarbon/MS
+radiochemical/Y
+radiochemistry/M
+radiogalaxy/S
+radiogram/SM
+radiographer/MS
+radiographic
+radiography/MS
+radioisotope/SM
+radiologic
+radiological/Y
+radiologist/MS
+radiology/MS
+radioman/M
+radiomen
+radiometer/SM
+radiometric
+radiometry/MS
+radionics
+radionuclide/M
+radiopasteurization
+radiophone/MS
+radiophysics
+radioscopy/SM
+radio/SMDG
+radiosonde/SM
+radiosterilization
+radiosterilized
+radiotelegraph
+radiotelegraphs
+radiotelegraphy/MS
+radiotelephone/SM
+radiotherapist/SM
+radiotherapy/SM
+radish/MS
+radium/MS
+radius/M
+radix/SM
+Rad/M
+radon/SM
+rad/S
+Raeann/M
+Rae/M
+RAF
+Rafaela/M
+Rafaelia/M
+Rafaelita/M
+Rafaellle/M
+Rafaello/M
+Rafael/M
+Rafa/M
+Rafe/M
+Raffaello/M
+Raffarty/M
+Rafferty/M
+raffia/SM
+raffishness/SM
+raffish/PY
+raffle/MSDG
+Raff/M
+Rafi/M
+Raf/M
+rafter/DM
+raft/GZSMDR
+raga/MS
+ragamuffin/MS
+ragbag/SM
+rage/MS
+raggedness/SM
+ragged/PRYT
+raggedy/TR
+ragging
+rag/GSMD
+raging/Y
+raglan/MS
+Ragnar/M
+Ragnark
+ragout/SMDG
+ragtag/MS
+ragtime/MS
+ragweed/MS
+ragwort/M
+Rahal/M
+rah/DG
+Rahel/M
+rahs
+raider/M
+raid/MDRSGZ
+railbird/S
+rail/CDGS
+railer/SM
+railhead/SM
+railing/MS
+raillery/MS
+railroader/M
+railroading/M
+railroad/SZRDMGJ
+rail's
+railwaymen
+railway/MS
+raiment/SM
+Raimondo/M
+Raimund/M
+Raimundo/M
+Raina/M
+rainbow/MS
+raincloud/S
+raincoat/SM
+raindrop/SM
+Raine/MR
+Rainer/M
+rainfall/SM
+rainforest's
+rain/GSDM
+Rainier/M
+rainless
+rainmaker/SM
+rainmaking/MS
+rainproof/GSD
+rainstorm/SM
+rainwater/MS
+rainy/RT
+raise/DSRGZ
+raiser/M
+raising/M
+raisin/MS
+rajah/M
+rajahs
+Rajive/M
+raj/M
+Rakel/M
+rake/MGDRS
+raker/M
+rakishness/MS
+rakish/PY
+Raleigh/M
+Ralf/M
+Ralina/M
+rally/GSD
+Ralph/M
+Ralston/M
+Ra/M
+Ramada/M
+Ramadan/SM
+Ramakrishna/M
+Rama/M
+Raman/M
+Ramayana/M
+ramble/JRSDGZ
+rambler/M
+rambling/Y
+Rambo/M
+rambunctiousness/S
+rambunctious/PY
+ramekin/SM
+ramie/MS
+ramification/M
+ramify/XNGSD
+Ramirez/M
+Ramiro/M
+ramjet/SM
+Ram/M
+rammed
+ramming
+Ramo/MS
+Ramona/M
+Ramonda/M
+Ramon/M
+rampage/SDG
+rampancy/S
+rampant/Y
+rampart/SGMD
+ramp/GMDS
+ramrodded
+ramrodding
+ramrod/MS
+RAM/S
+Ramsay/M
+Ramses/M
+Ramsey/M
+ramshackle
+ram/SM
+rams/S
+ran/A
+Rana/M
+Rancell/M
+Rance/M
+rancher/M
+rancho/SM
+ranch/ZRSDMJG
+rancidity/MS
+rancidness/SM
+rancid/P
+rancorous/Y
+rancor/SM
+Randall/M
+Randal/M
+Randa/M
+Randee/M
+Randell/M
+Randene/M
+Randie/M
+Randi/M
+randiness/S
+Rand/M
+rand/MDGS
+Randolf/M
+Randolph/M
+randomization/SM
+randomize/SRDG
+randomness/SM
+random/PYS
+Randy/M
+randy/PRST
+Ranee/M
+ranee/SM
+ranged/C
+rangeland/S
+ranger/M
+ranges/C
+range/SM
+rang/GZDR
+ranginess/S
+ranging/C
+Rangoon/M
+rangy/RPT
+Rania/M
+Ranice/M
+Ranier/M
+Rani/MR
+Ranique/M
+rani's
+ranked/U
+ranker/M
+rank/GZTYDRMPJS
+Rankine/M
+ranking/M
+Rankin/M
+rankle/SDG
+rankness/MS
+Ranna/M
+ransacker/M
+ransack/GRDS
+Ransell/M
+ransomer/M
+Ransom/M
+ransom/ZGMRDS
+ranter/M
+rant/GZDRJS
+ranting/Y
+Raoul/M
+rapaciousness/MS
+rapacious/YP
+rapacity/MS
+rapeseed/M
+rape/SM
+Raphaela/M
+Raphael/M
+rapidity/MS
+rapidness/S
+rapid/YRPST
+rapier/SM
+rapine/SM
+rapist/MS
+rap/MDRSZG
+rapped
+rappelled
+rappelling
+rappel/S
+rapper/SM
+rapping/M
+rapporteur/SM
+rapport/SM
+rapprochement/SM
+rapscallion/MS
+raptness/S
+rapture/MGSD
+rapturousness/M
+rapturous/YP
+rapt/YP
+Rapunzel/M
+Raquela/M
+Raquel/M
+rarebit/MS
+rarefaction/MS
+rarefy/GSD
+rareness/MS
+rare/YTPGDRS
+rarity/SM
+Rasalgethi/M
+Rasalhague/M
+rascal/SMY
+rasher/M
+rashness/S
+rash/PZTYSR
+Rasia/M
+Rasla/M
+Rasmussen/M
+raspberry/SM
+rasper/M
+rasping/Y
+rasp/SGJMDR
+Rasputin/M
+raspy/RT
+Rastaban/M
+Rastafarian/M
+raster/MS
+Rastus/M
+ratchet/MDSG
+rateable
+rated/U
+rate/KNGSD
+ratepayer/SM
+rater/M
+rate's
+Ratfor/M
+rather
+Rather/M
+rathskeller/SM
+ratifier/M
+ratify/ZSRDGXN
+rating/M
+ratiocinate/VNGSDX
+ratiocination/M
+ratio/MS
+rationale/SM
+rationalism/SM
+rationalistic
+rationalist/S
+rationality/MS
+rationalization/SM
+rationalizer/M
+rationalize/ZGSRD
+rationalness/M
+rational/YPS
+ration/DSMG
+Ratliff/M
+ratlike
+ratline/SM
+rat/MDRSJZGB
+rattail
+rattan/MS
+ratted
+ratter/MS
+ratting
+rattlebrain/DMS
+rattle/RSDJGZ
+rattlesnake/MS
+rattletrap/MS
+rattling/Y
+rattly/TR
+rattrap/SM
+ratty/RT
+raucousness/SM
+raucous/YP
+Raul/M
+raunchily
+raunchiness/S
+raunchy/RTP
+ravage/GZRSD
+ravager/M
+raveling/S
+Ravel/M
+ravel/UGDS
+raven/JGMRDS
+Raven/M
+ravenous/YP
+raver/M
+rave/ZGDRSJ
+Ravid/M
+Ravi/M
+ravine/SDGM
+ravioli/SM
+ravisher/M
+ravishing/Y
+ravish/LSRDZG
+ravishment/SM
+Raviv/M
+Rawalpindi/M
+rawboned
+rawhide/SDMG
+Rawley/M
+Rawlings/M
+Rawlins/M
+Rawlinson/M
+rawness/SM
+raw/PSRYT
+Rawson/M
+Rayburn/M
+Raychel/M
+Raye/M
+ray/GSMD
+Rayleigh/M
+Ray/M
+Raymond/M
+Raymondville/M
+Raymund/M
+Raymundo/M
+Rayna/M
+Raynard/M
+Raynell/M
+Rayner/M
+Raynor/M
+rayon/SM
+Rayshell/M
+Raytheon/M
+raze/DRSG
+razer/M
+razorback/SM
+razorblades
+razor/MDGS
+razz/GDS
+razzmatazz/S
+Rb
+RBI/S
+RC
+RCA
+rcpt
+RCS
+rd
+RD
+RDA
+Rd/M
+reabbreviate
+reachability
+reachable/U
+reachably
+reached/U
+reacher/M
+reach/GRB
+reacquisition
+reactant/SM
+reacted/U
+reaction
+reactionary/SM
+reactivity
+readability/MS
+readable/P
+readably
+readdress/G
+Reade/M
+reader/M
+readership/MS
+Read/GM
+readied
+readies
+readily
+readinesses
+readiness/UM
+reading/M
+Reading/M
+read/JGZBR
+readopt/G
+readout/MS
+reads/A
+readying
+ready/TUPR
+Reagan/M
+Reagen/M
+realisms
+realism's
+realism/U
+realistically/U
+realistic/U
+realist/SM
+reality/USM
+realizability/MS
+realizableness/M
+realizable/SMP
+realizably/S
+realization/MS
+realized/U
+realize/JRSDBZG
+realizer/M
+realizes/U
+realizing/MY
+realm/M
+realness/S
+realpolitik/SM
+real/RSTP
+realtor's
+Realtor/S
+realty/SM
+Rea/M
+reamer/M
+ream/MDRGZ
+Reamonn/M
+reanimate
+reaper/M
+reappraise/G
+reap/SGZ
+rear/DRMSG
+rearguard/MS
+rearmost
+rearrange/L
+rearward/S
+reasonableness/SMU
+reasonable/UP
+reasonably/U
+Reasoner/M
+reasoner/SM
+reasoning/MS
+reasonless
+reasons
+reason/UBDMG
+reassess/GL
+reassuringly/U
+reattach/GSL
+reawakening/M
+Reba/M
+rebate/M
+Rebbecca/M
+Rebeca/M
+Rebecca's
+Rebecka/M
+Rebekah/M
+Rebeka/M
+Rebekkah/M
+rebeller
+rebellion/SM
+rebelliousness/MS
+rebellious/YP
+rebel/MS
+Rebe/M
+rebid
+rebidding
+rebind/G
+rebirth
+reboil/G
+rebook
+reboot/ZR
+rebound/G
+rebroadcast/MG
+rebuke/RSDG
+rebuking/Y
+rebus
+rebuttal/SM
+rebutting
+rec
+recalcitrance/SM
+recalcitrant/S
+recalibrate/N
+recantation/S
+recant/G
+recap
+recappable
+recapping
+recast/G
+recd
+rec'd
+recede
+receipt/SGDM
+receivable/S
+received/U
+receiver/M
+receivership/SM
+receive/ZGRSDB
+recency/M
+recension/M
+recentness/SM
+recent/YPT
+receptacle/SM
+receptionist/MS
+reception/MS
+receptiveness/S
+receptive/YP
+receptivity/S
+receptor/MS
+recessional/S
+recessionary
+recessiveness/M
+recessive/YPS
+recess/SDMVG
+rechargeable
+recheck/G
+recherch
+recherches
+recidivism/MS
+recidivist/MS
+Recife/M
+recipe/MS
+recipiency
+recipient/MS
+reciprocal/SY
+reciprocate/NGXVDS
+reciprocation/M
+reciprocity/MS
+recitalist/S
+recital/MS
+recitative/MS
+reciter/M
+recite/ZR
+recked
+recking
+recklessness/S
+reckless/PY
+reckoner/M
+reckoning/M
+reckon/SGRDJ
+reclaim/B
+reclamation/SM
+recliner/M
+recline/RSDZG
+recluse/MVNS
+reclusion/M
+recode/G
+recognizability
+recognizable/U
+recognizably
+recognize/BZGSRD
+recognizedly/S
+recognized/U
+recognizer/M
+recognizingly/S
+recognizing/UY
+recoilless
+recoinage
+recolor/GD
+recombinant
+recombine
+recommended/U
+recompense/GDS
+recompute/B
+reconciled/U
+reconciler/M
+reconcile/SRDGB
+reconditeness/M
+recondite/YP
+reconfigurability
+reconfigure/R
+reconnaissance/MS
+reconnect/R
+reconnoiter/GSD
+reconquer/G
+reconsecrate
+reconstitute
+reconstructed/U
+Reconstruction/M
+reconsult/G
+recontact/G
+recontaminate/N
+recontribute
+recook/G
+recopy/G
+recorded/AU
+records/A
+record/ZGJ
+recourse
+recoverability
+recoverable/U
+recover/B
+recovery/MS
+recreant/S
+recreational
+recriminate/GNVXDS
+recrimination/M
+recriminatory
+recross/G
+recrudesce/GDS
+recrudescence/MS
+recrudescent
+recruiter/M
+recruitment/MS
+recruit/ZSGDRML
+recrystallize
+rectal/Y
+rectangle/SM
+rectangular/Y
+recta's
+rectifiable
+rectification/M
+rectifier/M
+rectify/DRSGXZN
+rectilinear/Y
+rectitude/MS
+recto/MS
+rector/SM
+rectory/MS
+rectum/SM
+recumbent/Y
+recuperate/VGNSDX
+recuperation/M
+recur
+recurrence/MS
+recurrent
+recurse/NX
+recursion/M
+recusant/M
+recuse
+recyclable/S
+recycle/BZ
+redact/DGS
+redaction/SM
+redactor/MS
+redbird/SM
+redbreast/SM
+redbrick/M
+redbud/M
+redcap/MS
+redcoat/SM
+redcurrant/M
+redden/DGS
+redder
+reddest
+redding
+reddish/P
+Redd/M
+redeclaration
+redecorate
+redeemable/U
+redeem/BRZ
+redeemed/U
+redeemer/M
+Redeemer/M
+redemptioner/M
+redemption/RMS
+redemptive
+redeposit/M
+redetermination
+Redford/M
+Redgrave/M
+redhead/DRMS
+Redhook/M
+redial/G
+redirect/G
+redirection
+redlining/S
+Redmond/M
+redneck/SMD
+redness/MS
+redo/G
+redolence/MS
+redolent
+Redondo/M
+redouble/S
+redoubtably
+redound/GDS
+red/PYS
+redshift/S
+redskin/SM
+Redstone/M
+reduced/U
+reducer/M
+reduce/RSDGZ
+reducibility/M
+reducible
+reducibly
+reductionism/M
+reductionist/S
+reduction/SM
+reduct/V
+redundancy/SM
+redundant/Y
+redwood/SM
+redye
+redyeing
+Reeba/M
+Reebok/M
+Reece/M
+reecho/G
+reed/GMDR
+reediness/SM
+reeding/M
+Reed/M
+Reedville/M
+reedy/PTR
+reefer/M
+reef/GZSDRM
+reeker/M
+reek/GSR
+reeler/M
+reel's
+reel/USDG
+Ree/MDS
+Reena/M
+reenforcement
+reentrant
+Reese/M
+reestimate/M
+Reeta/M
+Reeva/M
+reeve/G
+Reeves
+reexamine
+refection/SM
+refectory/SM
+refer/B
+refereed/U
+refereeing
+referee/MSD
+reference/CGSRD
+referenced/U
+reference's
+referencing/U
+referendum/MS
+referentiality
+referential/YM
+referent/SM
+referral/SM
+referred
+referrer/S
+referring
+reffed
+reffing
+refile
+refinance
+refined/U
+refine/LZ
+refinement/MS
+refinish/G
+refit
+reflectance/M
+reflected/U
+reflectional
+reflection/SM
+reflectiveness/M
+reflective/YP
+reflectivity/M
+reflector/MS
+reflect/SDGV
+reflexion/MS
+reflexiveness/M
+reflexive/PSY
+reflexivity/M
+reflex/YV
+reflooring
+refluent
+reflux/G
+refocus/G
+refold/G
+reforestation
+reforge/G
+reformatory/SM
+reform/B
+reformed/U
+reformer/M
+reformism/M
+reformist/S
+refract/DGVS
+refractiveness/M
+refractive/PY
+refractometer/MS
+refractoriness/M
+refractory/PS
+refrain/DGS
+refreshed/U
+refreshing/Y
+refresh/LB
+refreshment/MS
+refrigerant/MS
+refrigerated/U
+refrigerate/XDSGN
+refrigeration/M
+refrigerator/MS
+refrozen
+refry/GS
+refugee/MS
+refuge/SDGM
+Refugio/M
+refulgence/SM
+refulgent
+refund/B
+refunder/M
+refurbish/L
+refurbishment/S
+refusal/SM
+refuse/R
+refuser/M
+refutation/MS
+refute/GZRSDB
+refuter/M
+ref/ZS
+reg
+regale/L
+regalement/S
+regal/GYRD
+regalia/M
+Regan/M
+regard/EGDS
+regardless/PY
+regather/G
+regatta/MS
+regency/MS
+regeneracy/MS
+regenerately
+regenerateness/M
+regenerate/U
+Regen/M
+reggae/SM
+Reggie/M
+Reggi/MS
+Reggy/M
+regicide/SM
+regime/MS
+regimen/MS
+regimental/S
+regimentation/MS
+regiment/SDMG
+Reginae
+Reginald/M
+Regina/M
+Reginauld/M
+Regine/M
+regionalism/MS
+regional/SY
+region/SM
+Regis/M
+register's
+register/UDSG
+registrable
+registrant/SM
+registrar/SM
+registration/AM
+registrations
+registry/MS
+Reg/MN
+regnant
+Regor/M
+regress/DSGV
+regression/MS
+regressiveness/M
+regressive/PY
+regressors
+regretfulness/M
+regretful/PY
+regret/S
+regrettable
+regrettably
+regretted
+regretting
+reground
+regroup/G
+regrow/G
+regularity/MS
+regularization/MS
+regularize/SDG
+regular/YS
+regulate/CSDXNG
+regulated/U
+regulation/M
+regulative
+regulator/SM
+regulatory
+Regulus/M
+regurgitate/XGNSD
+regurgitation/M
+rehabbed
+rehabbing
+rehabilitate/SDXVGN
+rehabilitation/M
+rehab/S
+rehang/G
+rehear/GJ
+rehearsal/SM
+rehearse
+rehearsed/U
+rehearser/M
+rehears/R
+reheat/G
+reheating/M
+Rehnquist
+rehydrate
+Reichenberg/M
+Reich/M
+Reichstags
+Reichstag's
+Reidar/M
+Reider/M
+Reid/MR
+reign/MDSG
+Reiko/M
+Reilly/M
+reimburse/GSDBL
+reimbursement/MS
+Reinald/M
+Reinaldo/MS
+Reina/M
+reindeer/M
+Reine/M
+reinforced/U
+reinforce/GSRDL
+reinforcement/MS
+reinforcer/M
+rein/GDM
+Reinhard/M
+Reinhardt/M
+Reinhold/M
+Reinold/M
+reinstate/L
+reinstatement/MS
+reinsurance
+Reinwald/M
+reissue
+REIT
+reiterative/SP
+rejecter/M
+rejecting/Y
+rejection/SM
+rejector/MS
+reject/RDVGS
+rejigger
+rejoice/RSDJG
+rejoicing/Y
+rejoinder/SM
+rejuvenate/NGSDX
+rejuvenatory
+relapse
+relatedly
+relatedness/MS
+related/U
+relater/M
+relate/XVNGSZ
+relational/Y
+relation/M
+relationship/MS
+relativeness/M
+relative/SPY
+relativism/M
+relativistic
+relativistically
+relativist/MS
+relativity/MS
+relator's
+relaxant/SM
+relaxation/MS
+relaxedness/M
+relaxed/YP
+relax/GZD
+relaxing/Y
+relay/GDM
+relearn/G
+releasable/U
+release/B
+released/U
+relenting/U
+relentlessness/SM
+relentless/PY
+relent/SDG
+relevance/SM
+relevancy/MS
+relevant/Y
+reliability/UMS
+reliables
+reliable/U
+reliably/U
+reliance/MS
+reliant/Y
+relicense/R
+relic/MS
+relict/C
+relict's
+relief/M
+relievedly
+relieved/U
+reliever/M
+relieve/RSDZG
+religionists
+religion/SM
+religiosity/M
+religiousness/MS
+religious/PY
+relink/G
+relinquish/GSDL
+relinquishment/SM
+reliquary/MS
+relish/GSD
+relive/GB
+reload/GR
+relocate/B
+reluctance/MS
+reluctant/Y
+rel/V
+rely/DG
+rem
+Re/M
+remade/S
+remainder/SGMD
+remain/GD
+remake/M
+remand/DGS
+remap
+remapping
+remarkableness/S
+remarkable/U
+remarkably
+remark/BG
+remarked/U
+Remarque/M
+rematch/G
+Rembrandt/M
+remeasure/D
+remediableness/M
+remediable/P
+remedy/SDMG
+remembered/U
+rememberer/M
+remember/GR
+remembrance/MRS
+remembrancer/M
+Remington/M
+reminisce/GSD
+reminiscence/SM
+reminiscent/Y
+remissness/MS
+remiss/YP
+remit/S
+remittance/MS
+remitted
+remitting/U
+Rem/M
+remnant/MS
+remodel/G
+remolding
+remonstrant/MS
+remonstrate/SDXVNG
+remonstration/M
+remonstrative/Y
+remorsefulness/M
+remorseful/PY
+remorselessness/MS
+remorseless/YP
+remorse/SM
+remoteness/MS
+remote/RPTY
+remoulds
+removal/MS
+REM/S
+remunerated/U
+remunerate/VNGXSD
+remuneration/M
+remunerativeness/M
+remunerative/YP
+Remus/M
+Remy/M
+Renado/M
+Renae/M
+renaissance/S
+Renaissance/SM
+renal
+Renaldo/M
+Rena/M
+Renard/M
+Renascence/SM
+Renata/M
+Renate/M
+Renato/M
+renaturation
+Renaud/M
+Renault/MS
+rend
+renderer/M
+render/GJRD
+rendering/M
+rendezvous/DSMG
+rendition/GSDM
+rend/RGZS
+Renee/M
+renegade/SDMG
+renege/GZRSD
+reneger/M
+Renelle/M
+Renell/M
+Rene/M
+renewal/MS
+renew/BG
+renewer/M
+Renie/M
+rennet/MS
+Rennie/M
+rennin/SM
+Renoir/M
+Reno/M
+renounce/LGRSD
+renouncement/MS
+renouncer/M
+renovate/NGXSD
+renovation/M
+renovator/SM
+renown/SGDM
+Rensselaer/M
+rentaller
+rental/SM
+renter/M
+rent/GZMDRS
+renumber/G
+renumeration
+renunciate/VNX
+renunciation/M
+Renville/M
+reoccupy/G
+reopen/G
+reorganized/U
+repack/G
+repairable/U
+repair/BZGR
+repairer/M
+repairman/M
+repairmen
+repairs/E
+repaper
+reparable
+reparation/SM
+reparteeing
+repartee/MDS
+repartition/Z
+repast/G
+repatriate/SDXNG
+repave
+repealer/M
+repeal/GR
+repeatability/M
+repeatable/U
+repeatably
+repeated/Y
+repeater/M
+repeat/RDJBZG
+repelled
+repellent/SY
+repelling/Y
+repel/S
+repentance/SM
+repentant/SY
+repent/RDG
+repertoire/SM
+repertory/SM
+repetition
+repetitiousness/S
+repetitious/YP
+repetitiveness/MS
+repetitive/PY
+repine/R
+repiner/M
+replace/RL
+replay/GM
+replenish/LRSDG
+replenishment/S
+repleteness/MS
+replete/SDPXGN
+repletion/M
+replica/SM
+replicate/SDVG
+replicator/S
+replug
+reply/X
+Rep/M
+repopulate
+reported/Y
+reportorial/Y
+reposeful
+repose/M
+repository/MS
+reprehend/GDS
+reprehensibility/MS
+reprehensibleness/M
+reprehensible/P
+reprehensibly
+reprehension/MS
+representable/U
+representational/Y
+representativeness/M
+Representative/S
+representative/SYMP
+representativity
+represented/U
+represent/GB
+repression/SM
+repressiveness/M
+repressive/YP
+repress/V
+reprieve/GDS
+reprimand/SGMD
+reprint/M
+reprisal/MS
+reproacher/M
+reproachfulness/M
+reproachful/YP
+reproach/GRSDB
+reproaching/Y
+reprobate/N
+reprocess/G
+reproducibility/MS
+reproducible/S
+reproducibly
+reproductive/S
+reproof/G
+reprove/R
+reproving/Y
+rep/S
+reptile/SM
+reptilian/S
+Republicanism/S
+republicanism/SM
+Republican/S
+republic/M
+republish/G
+repudiate/XGNSD
+repudiation/M
+repudiator/S
+repugnance/MS
+repugnant/Y
+repulse/VNX
+repulsion/M
+repulsiveness/MS
+repulsive/PY
+reputability/SM
+reputably/E
+reputation/SM
+reputed/Y
+repute/ESB
+reputing
+requested/U
+request/G
+Requiem/MS
+requiem/SM
+require/LR
+requirement/MS
+requisiteness/M
+requisite/PNXS
+requisitioner/M
+requisition/GDRM
+requital/MS
+requited/U
+requiter/M
+requite/RZ
+reread/G
+rerecord/G
+rerouteing
+rerunning
+res/C
+rescale
+rescind/SDRG
+rescission/SM
+rescue/GZRSD
+reseal/BG
+research/MB
+reselect/G
+resemblant
+resemble/DSG
+resend/G
+resent/DSLG
+resentfulness/SM
+resentful/PY
+resentment/MS
+reserpine/MS
+reservation/MS
+reservednesses
+reservedness/UM
+reserved/UYP
+reservist/SM
+reservoir/MS
+reset/RDG
+resettle/L
+reshipping
+reshow/G
+reshuffle/M
+reside/G
+residence/MS
+residency/SM
+residential/Y
+resident/SM
+resider/M
+residua
+residual/YS
+residuary
+residue/SM
+residuum/M
+resignation/MS
+resigned/YP
+resilience/MS
+resiliency/S
+resilient/Y
+resin/D
+resinlike
+resinous
+resiny
+resistance/SM
+Resistance/SM
+resistantly
+resistants
+resistant/U
+resisted/U
+resistible
+resistibly
+resisting/U
+resistiveness/M
+resistive/PY
+resistivity/M
+resistless
+resistor/MS
+resist/RDZVGS
+resize/G
+resold
+resole/G
+resoluble
+resoluteness/MS
+resolute/PYTRV
+resolvability/M
+resolvable/U
+resolved/U
+resolvent
+resonance/SM
+resonant/YS
+resonate/DSG
+resonator/MS
+resorption/MS
+resort/R
+resound/G
+resourcefulness/SM
+resourceful/PY
+resp
+respectability/SM
+respectable/SP
+respectably
+respect/BSDRMZGV
+respected/E
+respectful/EY
+respectfulness/SM
+respecting/E
+respectiveness/M
+respective/PY
+respect's/E
+respects/E
+respell/G
+respiration/MS
+respirator/SM
+respiratory/M
+resplendence/MS
+resplendent/Y
+respondent/MS
+respond/SDRZG
+responser/M
+response/RSXMV
+responsibility/MS
+responsibleness/M
+responsible/P
+responsibly
+responsiveness/MSU
+responsive/YPU
+respray/G
+restart/B
+restate/L
+restaurant/SM
+restaurateur/SM
+rest/DRSGVM
+rested/U
+rester/M
+restfuller
+restfullest
+restfulness/MS
+restful/YP
+restitution/SM
+restiveness/SM
+restive/PY
+restlessness/MS
+restless/YP
+restorability
+Restoration/M
+restoration/MS
+restorative/PYS
+restorer/M
+restore/Z
+restrained/UY
+restraint/MS
+restrict/DVGS
+restricted/YU
+restriction/SM
+restrictively
+restrictiveness/MS
+restrictives
+restrictive/U
+restroom/SM
+restructurability
+restructure
+rest's/U
+rests/U
+restudy/M
+restyle
+resubstitute
+resultant/YS
+result/SGMD
+resume/SDBG
+resumption/MS
+resurface
+resurgence/MS
+resurgent
+resurrect/GSD
+resurrection/SM
+resurvey/G
+resuscitate/XSDVNG
+resuscitation/M
+resuscitator/MS
+retail/Z
+retainer/M
+retain/LZGSRD
+retake
+retaliate/VNGXSD
+retaliation/M
+retaliatory
+Reta/M
+retardant/SM
+retardation/SM
+retarder/M
+retard/ZGRDS
+retch/SDG
+retention/SM
+retentiveness/S
+retentive/YP
+retentivity/M
+retest/G
+Retha/M
+rethought
+reticence/S
+reticent/Y
+reticle/SM
+reticular
+reticulate/GNYXSD
+reticulation/M
+reticule/MS
+reticulum/M
+retinal/S
+retina/SM
+retinue/MS
+retiredness/M
+retiree/MS
+retire/L
+retirement/SM
+retiring/YP
+retort/GD
+retract/DG
+retractile
+retrench/L
+retrenchment/MS
+retributed
+retribution/MS
+retributive
+retrieval/SM
+retriever/M
+retrieve/ZGDRSB
+retroactive/Y
+retrofire/GMSD
+retrofit/S
+retrofitted
+retrofitting
+retroflection
+retroflex/D
+retroflexion/M
+retrogradations
+retrograde/GYDS
+retrogression/MS
+retrogressive/Y
+retrogress/SDVG
+retrorocket/MS
+retro/SM
+retrospection/MS
+retrospective/SY
+retrospect/SVGMD
+retrovirus/S
+retrovision
+retry/G
+retsina/SM
+returnable/S
+returned/U
+returnee/SM
+retype
+Reube/M
+Reuben/M
+Reub/NM
+Reunion/M
+reuse/B
+Reuters
+Reuther/M
+reutilization
+Reuven/M
+Reva/M
+revanchist
+revealed/U
+revealingly
+revealing/U
+reveal/JBG
+reveille/MS
+revelation/MS
+Revelation/MS
+revelatory
+revelry/MS
+revel/SJRDGZ
+revenge/MGSRD
+revenger/M
+revenuer/M
+revenue/ZR
+reverberant
+reverberate/XVNGSD
+reverberation/M
+revere/GSD
+Revere/M
+reverencer/M
+reverence/SRDGM
+Reverend
+reverend/SM
+reverential/Y
+reverent/Y
+reverie/SM
+reversal/MS
+reverser/M
+reverse/Y
+reversibility/M
+reversible/S
+reversibly
+reversioner/M
+reversion/R
+revers/M
+reverter/M
+revertible
+revert/RDVGS
+revet/L
+revetment/SM
+review/G
+revile/GZSDL
+revilement/MS
+reviler/M
+revise/BRZ
+revised/U
+revisionary
+revisionism/SM
+revisionist/SM
+revitalize/ZR
+revivalism/MS
+revivalist/MS
+revival/SM
+reviver/M
+revive/RSDG
+revivification/M
+revivify/X
+Revkah/M
+Revlon/M
+Rev/M
+revocable
+revoke/GZRSD
+revolter/M
+revolt/GRD
+revolting/Y
+revolutionariness/M
+revolutionary/MSP
+revolutionist/MS
+revolutionize/GDSRZ
+revolutionizer/M
+revolution/SM
+revolve/BSRDZJG
+revolver/M
+revue/MS
+revulsion/MS
+revved
+revving
+rev/ZM
+rewarded/U
+rewarding/Y
+rewarm/G
+reweave
+rewedding
+reweigh/G
+rewind/BGR
+rewire/G
+rework/G
+rexes
+Rex/M
+Reyes
+Reykjavik/M
+re/YM
+Rey/M
+Reynaldo/M
+Reyna/M
+Reynard/M
+Reynold/SM
+rezone
+Rf
+RF
+RFC
+RFD
+R/G
+rhapsodic
+rhapsodical
+rhapsodize/GSD
+rhapsody/SM
+Rhea/M
+rhea/SM
+Rheba/M
+Rhee/M
+Rheims/M
+Rheinholdt/M
+Rhenish
+rhenium/MS
+rheology/M
+rheostat/MS
+rhesus/S
+Rheta/M
+rhetorical/YP
+rhetorician/MS
+rhetoric/MS
+Rhetta/M
+Rhett/M
+rheumatically
+rheumatic/S
+rheumatics/M
+rheumatism/SM
+rheumatoid
+rheum/MS
+rheumy/RT
+Rhiamon/M
+Rhianna/M
+Rhiannon/M
+Rhianon/M
+Rhinelander/M
+Rhineland/RM
+Rhine/M
+rhinestone/SM
+rhinitides
+rhinitis/M
+rhinoceros/MS
+rhino/MS
+rhinotracheitis
+rhizome/MS
+Rh/M
+Rhoda/M
+Rhodes
+Rhodesia/M
+Rhodesian/S
+Rhodia/M
+Rhodie/M
+rhodium/MS
+rhododendron/SM
+rhodolite/M
+rhodonite/M
+Rhody/M
+rhombic
+rhomboidal
+rhomboid/SM
+rhombus/SM
+rho/MS
+Rhona/M
+Rhonda/M
+Rhone
+rhubarb/MS
+rhyme/DSRGZM
+rhymester/MS
+Rhys/M
+rhythmical/Y
+rhythmic/S
+rhythmics/M
+rhythm/MS
+RI
+rial/MS
+Riane/M
+Riannon/M
+Rianon/M
+ribaldry/MS
+ribald/S
+ribbed
+Ribbentrop/M
+ribber/S
+ribbing/M
+ribbon/DMSG
+ribcage
+rib/MS
+riboflavin/MS
+ribonucleic
+ribosomal
+ribosome/MS
+Rica/M
+Rican/SM
+Ricard/M
+Ricardo/M
+Ricca/M
+Riccardo/M
+rice/DRSMZG
+Rice/M
+ricer/M
+Richard/MS
+Richardo/M
+Richardson/M
+Richart/M
+Richelieu/M
+richen/DG
+Richey/M
+Richfield/M
+Richie/M
+Richland/M
+Rich/M
+Richmond/M
+Richmound/M
+richness/MS
+Richter/M
+Richthofen/M
+Richy/M
+rich/YNSRPT
+Rici/M
+Rickard/M
+Rickenbacker/M
+Rickenbaugh/M
+Rickert/M
+rickets/M
+rickety/RT
+Rickey/M
+rick/GSDM
+Rickie/M
+Ricki/M
+Rick/M
+Rickover/M
+rickrack/MS
+rickshaw/SM
+Ricky/M
+Ric/M
+ricochet/GSD
+Rico/M
+Ricoriki/M
+ricotta/MS
+riddance/SM
+ridden
+ridding
+riddle/GMRSD
+Riddle/M
+ride/CZSGR
+Ride/M
+rider/CM
+riderless
+ridership/S
+ridge/DSGM
+Ridgefield/M
+ridgepole/SM
+Ridgway/M
+ridgy/RT
+ridicule/MGDRS
+ridiculer/M
+ridiculousness/MS
+ridiculous/PY
+riding/M
+rid/ZGRJSB
+Riemann/M
+Riesling/SM
+rife/RT
+riff/GSDM
+riffle/SDG
+riffraff/SM
+rifled/U
+rifle/GZMDSR
+rifleman/M
+riflemen
+rifler/M
+rifling/M
+rift/GSMD
+Riga/M
+rigamarole's
+rigatoni/M
+Rigel/M
+rigged
+rigger/SM
+rigging/MS
+Riggs/M
+righteousnesses/U
+righteousness/MS
+righteous/PYU
+rightfulness/MS
+rightful/PY
+rightism/SM
+rightist/S
+rightmost
+rightness/MS
+Right/S
+right/SGTPYRDN
+rightsize/SDG
+rights/M
+rightward/S
+rigidify/S
+rigidity/S
+rigidness/S
+rigid/YP
+rigmarole/MS
+rig/MS
+Rigoberto/M
+Rigoletto/M
+rigor/MS
+rigorousness/S
+rigorous/YP
+Riki/M
+Rikki/M
+Rik/M
+rile/DSG
+Riley/M
+Rilke/M
+rill/GSMD
+Rimbaud/M
+rime/MS
+rimer/M
+rim/GSMDR
+rimless
+rimmed
+rimming
+Rinaldo/M
+Rina/M
+rind/MDGS
+Rinehart/M
+ringer/M
+ring/GZJDRM
+ringing/Y
+ringleader/MS
+ringlet/SM
+ringlike
+Ringling/M
+Ring/M
+ringmaster/MS
+Ringo/M
+ringside/ZMRS
+ringworm/SM
+rink/GDRMS
+rinse/DSRG
+Riobard/M
+Rio/MS
+Riordan/M
+rioter/M
+riotousness/M
+riotous/PY
+riot/SMDRGZJ
+RIP
+riparian/S
+ripcord/SM
+ripened/U
+ripenesses
+ripeness/UM
+ripen/RDG
+ripe/PSY
+riper/U
+ripest/U
+Ripley/M
+Rip/M
+rip/NDRSXTG
+ripoff/S
+riposte/SDMG
+ripped
+ripper/SM
+ripping
+rippler/M
+ripple/RSDGM
+ripply/TR
+ripsaw/GDMS
+riptide/SM
+Risa/M
+RISC
+risen
+riser/M
+rise/RSJZG
+risibility/SM
+risible/S
+rising/M
+risker/M
+risk/GSDRM
+riskily
+riskiness/MS
+risky/RTP
+risotto/SM
+risqu
+rissole/M
+Ritalin
+Rita/M
+Ritchie/M
+rite/DSM
+Ritter/M
+ritualism/SM
+ritualistic
+ritualistically
+ritualized
+ritual/MSY
+Ritz/M
+ritzy/TR
+rivaled/U
+Rivalee/M
+rivalry/MS
+rival/SGDM
+Riva/MS
+rive/CSGRD
+Rivera/M
+riverbank/SM
+riverbed/S
+riverboat/S
+river/CM
+riverfront
+riverine
+Rivers
+Riverside/M
+riverside/S
+Riverview/M
+riveter/M
+rivet/GZSRDM
+riveting/Y
+Riviera/MS
+Rivi/M
+Rivkah/M
+rivulet/SM
+Rivy/M
+riv/ZGNDR
+Riyadh/M
+riyal/SM
+rm
+RMS
+RN
+RNA
+Rn/M
+roach/GSDM
+Roach/M
+roadbed/MS
+roadblock/SMDG
+roadhouse/SM
+roadie/S
+roadkill/S
+road/MIS
+roadrunner/MS
+roadshow/S
+roadside/S
+roadsigns
+roadster/SM
+roadsweepers
+roadway/SM
+roadwork/SM
+roadworthy
+roam/DRGZS
+Roana/M
+Roanna/M
+Roanne/M
+Roanoke/M
+roan/S
+roar/DRSJGZ
+roarer/M
+roaring/T
+Roarke/M
+roaster/M
+roast/SGJZRD
+robbed
+robber/SM
+Robbert/M
+robbery/SM
+Robbie/M
+Robbi/M
+robbing
+Robbin/MS
+Robb/M
+Robby/M
+Robbyn/M
+robe/ESDG
+Robena/M
+Robenia/M
+Robers/M
+Roberson/M
+Roberta/M
+Robert/MS
+Roberto/M
+Robertson/SM
+robe's
+Robeson/M
+Robespierre/M
+Robina/M
+Robinet/M
+Robinetta/M
+Robinette/M
+Robinett/M
+Robinia/M
+Robin/M
+robin/MS
+Robinson/M
+Robinsonville/M
+Robles/M
+Rob/MZ
+robotic/S
+robotism
+robotize/GDS
+robot/MS
+rob/SDG
+Robson/M
+Robt/M
+robustness/SM
+robust/RYPT
+Roby/M
+Robyn/M
+Rocco/M
+Rocha/M
+Rochambeau/M
+Rochella/M
+Rochelle/M
+Rochell/M
+Roche/M
+Rochester/M
+Rochette/M
+Roch/M
+rockabilly/MS
+rockabye
+Rockaway/MS
+rockbound
+Rockefeller/M
+rocker/M
+rocketry/MS
+rocket/SMDG
+Rockey/M
+rockfall/S
+Rockford/M
+rock/GZDRMS
+Rockie/M
+rockiness/MS
+Rockland/M
+Rock/M
+Rockne/M
+Rockville/M
+Rockwell/M
+Rocky/SM
+rocky/SRTP
+rococo/MS
+Roda/M
+rodded
+Roddenberry/M
+rodder
+Roddie/M
+rodding
+Rodd/M
+Roddy/M
+rodent/MS
+rodeo/SMDG
+Roderich/M
+Roderick/M
+Roderic/M
+Roderigo/M
+rode/S
+Rodger/M
+Rodge/ZMR
+Rodie/M
+Rodi/M
+Rodina/M
+Rodin/M
+Rod/M
+Rodney/M
+Rodolfo/M
+Rodolphe/M
+Rodolph/M
+Rodrick/M
+Rodrigo/M
+Rodriguez/M
+Rodrique/M
+Rodriquez/M
+rod/SGMD
+roebuck/SM
+Roentgen's
+roentgen/SM
+roe/SM
+ROFL
+Rogelio/M
+roger/GSD
+Rogerio/M
+Roger/M
+Roget/M
+Rog/MRZ
+rogued/K
+rogue/GMDS
+roguery/MS
+rogues/K
+roguing/K
+roguishness/SM
+roguish/PY
+roil/SGD
+Roi/SM
+roisterer/M
+roister/SZGRD
+Rojas/M
+Roland/M
+Rolando/M
+Roldan/M
+role/MS
+Roley/M
+Rolfe/M
+Rolf/M
+Rolland/M
+rollback/SM
+rolled/A
+Rollerblade/S
+rollerskating
+roller/SM
+rollick/DGS
+rollicking/Y
+Rollie/M
+rolling/S
+Rollin/SM
+Rollo/M
+rollover/S
+roll/UDSG
+Rolodex
+Rolph/M
+Rolvaag/M
+ROM
+romaine/MS
+Romain/M
+Roma/M
+romancer/M
+romance/RSDZMG
+Romanesque/S
+Romania/M
+Romanian/SM
+Romano/MS
+Romanov/M
+roman/S
+Romansh/M
+Romans/M
+Roman/SM
+romantically/U
+romanticism/MS
+Romanticism/S
+romanticist/S
+romanticize/SDG
+romantic/MS
+Romany/SM
+Romeo/MS
+romeo/S
+Romero/M
+Rome/SM
+Rommel/M
+Romney/M
+Romola/M
+Romona/M
+Romonda/M
+romper/M
+romp/GSZDR
+Rom/SM
+Romulus/M
+Romy/M
+Ronalda/M
+Ronald/M
+Rona/M
+Ronda/M
+rondo/SM
+Ronica/M
+Ron/M
+Ronna/M
+Ronnica/M
+Ronnie/M
+Ronni/M
+Ronny/M
+Ronstadt/M
+Rontgen
+Roobbie/M
+rood/MS
+roof/DRMJGZS
+roofer/M
+roofgarden
+roofing/M
+roofless
+rooftop/S
+rookery/MS
+rook/GDMS
+rookie/SRMT
+roomer/M
+roomette/SM
+roomful/MS
+roominess/MS
+roommate/SM
+room/MDRGZS
+roomy/TPSR
+Rooney/M
+Rooseveltian
+Roosevelt/M
+rooster/M
+roost/SGZRDM
+rooted/P
+rooter/M
+rootlessness/M
+rootless/P
+rootlet/SM
+Root/M
+root/MGDRZS
+rootstock/M
+rope/DRSMZG
+roper/M
+roping/M
+Roquefort/MS
+Roquemore/M
+Rora/M
+Rorie/M
+Rori/M
+Rorke/M
+Rorschach
+Rory/M
+Rosabella/M
+Rosabelle/M
+Rosabel/M
+Rosaleen/M
+Rosales/M
+Rosalia/M
+Rosalie/M
+Rosalinda/M
+Rosalinde/M
+Rosalind/M
+Rosaline/M
+Rosalynd/M
+Rosalyn/M
+Rosa/M
+Rosamond/M
+Rosamund/M
+Rosana/M
+Rosanna/M
+Rosanne/M
+Rosario/M
+rosary/SM
+Roscoe/M
+Rosco/M
+Roseanna/M
+Roseanne/M
+Roseann/M
+roseate/Y
+Roseau
+rosebud/MS
+rosebush/SM
+Rosecrans/M
+Roseland/M
+Roselia/M
+Roseline/M
+Roselin/M
+Rosella/M
+Roselle/M
+Rose/M
+Rosemaria/M
+Rosemarie/M
+Rosemary/M
+rosemary/MS
+rose/MGDS
+Rosemonde/M
+Rosenberg/M
+Rosenblum/M
+Rosendo/M
+Rosene/M
+Rosen/M
+Rosenthal/M
+Rosenzweig/M
+Rosetta/M
+Rosette/M
+rosette/SDMG
+rosewater
+rosewood/SM
+Roshelle/M
+Rosicrucian/M
+Rosie/M
+rosily
+Rosina/M
+rosiness/MS
+rosin/SMDG
+Rosita/M
+Roslyn/M
+Rosmunda/M
+Ros/N
+Ross
+Rossetti/M
+Rossie/M
+Rossi/M
+Rossini/M
+Rossy/M
+Rostand/M
+roster/DMGS
+Rostov/M
+rostra's
+rostrum/SM
+Roswell/M
+Rosy/M
+rosy/RTP
+rota/MS
+Rotarian/SM
+rotary/S
+rotated/U
+rotate/VGNXSD
+rotational/Y
+rotation/M
+rotative/Y
+rotator/SM
+rotatory
+ROTC
+rote/MS
+rotgut/MS
+Roth/M
+Rothschild/M
+rotisserie/MS
+rotogravure/SM
+rotor/MS
+rototill/RZ
+rot/SDG
+rotted
+rottenness/S
+rotten/RYSTP
+Rotterdam/M
+rotter/M
+rotting
+rotunda/SM
+rotundity/S
+rotundness/S
+rotund/SDYPG
+Rouault/M
+rou/MS
+rouge/GMDS
+roughage/SM
+roughen/DG
+rougher/M
+roughhouse/GDSM
+roughish
+roughneck/MDSG
+roughness/MS
+roughs
+roughshod
+rough/XPYRDNGT
+roulette/MGDS
+roundabout/PSM
+roundedness/M
+rounded/P
+roundelay/SM
+roundels
+rounder/M
+roundhead/D
+roundheadedness/M
+roundheaded/P
+roundhouse/SM
+roundish
+roundness/MS
+roundoff
+roundup/MS
+roundworm/MS
+round/YRDSGPZT
+Rourke/M
+rouse/DSRG
+rouser/M
+Rousseau/M
+roustabout/SM
+roust/SGD
+route/ASRDZGJ
+router/M
+route's
+rout/GZJMDRS
+routine/SYM
+routing/M
+routinize/GSD
+Rouvin/M
+rover/M
+Rover/M
+rove/ZGJDRS
+roving/M
+Rowan/M
+rowboat/SM
+rowdily
+rowdiness/MS
+rowdyism/MS
+rowdy/PTSR
+rowel/DMSG
+Rowe/M
+Rowena/M
+rowen/M
+Rowen/M
+rower/M
+Rowland/M
+Rowley/M
+Row/MN
+Rowney/M
+row/SJZMGNDR
+Roxana/M
+Roxane/M
+Roxanna/M
+Roxanne/M
+Roxie/M
+Roxi/M
+Roxine/M
+Roxy/M
+royalist/SM
+Royall/M
+Royal/M
+royal/SY
+royalty/MS
+Royce/M
+Roy/M
+Rozalie/M
+Rozalin/M
+Rozamond/M
+Rozanna/M
+Rozanne/M
+Rozele/M
+Rozella/M
+Rozelle/M
+Roze/M
+Rozina/M
+Roz/M
+RP
+rpm
+RPM
+rps
+RR
+Rriocard/M
+rs
+r's
+R's
+RSFSR
+RSI
+RSV
+RSVP
+RSX
+rt
+rte
+Rte
+RTFM
+r/TGVJ
+Rubaiyat/M
+rubato/MS
+rubbed
+rubberize/GSD
+rubberneck/DRMGSZ
+rubber/SDMG
+rubbery/TR
+rubbing/M
+rubbish/DSMG
+rubbishy
+rubble/GMSD
+rubdown/MS
+rubella/MS
+Rube/M
+Ruben/MS
+rube/SM
+Rubetta/M
+Rubia/M
+Rubicon/SM
+rubicund
+rubidium/SM
+Rubie/M
+Rubik/M
+Rubi/M
+Rubina/M
+Rubin/M
+Rubinstein/M
+ruble/MS
+rubout
+rubric/MS
+rub/S
+Ruby/M
+ruby/MTGDSR
+Ruchbah/M
+ruck/M
+rucksack/SM
+ruckus/SM
+ruction/SM
+rudderless
+rudder/MS
+Ruddie/M
+ruddiness/MS
+Rudd/M
+Ruddy/M
+ruddy/PTGRSD
+rudeness/MS
+rude/PYTR
+Rudie/M
+Rudiger/M
+rudimentariness/M
+rudimentary/P
+rudiment/SM
+Rudolf/M
+Rudolfo/M
+Rudolph/M
+Rudyard/M
+Rudy/M
+ruefulness/S
+rueful/PY
+rue/GDS
+Rufe/M
+ruff/GSYDM
+ruffian/GSMDY
+ruffled/U
+ruffler/M
+ruffle/RSDG
+ruffly/TR
+Rufus/M
+Rugby's
+rugby/SM
+ruggedness/S
+rugged/PYRT
+Ruggiero/M
+rugging
+rug/MS
+Ruhr/M
+ruination/MS
+ruiner/M
+ruin/MGSDR
+ruinousness/M
+ruinous/YP
+Ruiz/M
+rulebook/S
+ruled/U
+rule/MZGJDRS
+ruler/GMD
+ruling/M
+Rumanian's
+Rumania's
+rumba/GDMS
+rumble/JRSDG
+rumbler/M
+rumbustious
+rumen/M
+Rumford/M
+Ru/MH
+ruminant/YMS
+ruminate/VNGXSD
+ruminative/Y
+rummage/GRSD
+rummager/M
+Rummel/M
+rummer
+rummest
+rummy/TRSM
+rumored/U
+rumorer/M
+rumormonger/SGMD
+rumor/ZMRDSG
+Rumpelstiltskin/M
+rump/GMYDS
+rumple/SDG
+rumply/TR
+rumpus/SM
+rum/XSMN
+runabout/SM
+runaround/S
+run/AS
+runaway/S
+rundown/SM
+rune/MS
+Runge/M
+rung/MS
+runic
+runlet/SM
+runnable
+runnel/SM
+runner/MS
+running/S
+Runnymede/M
+runny/RT
+runoff/MS
+runtime
+runtiness/M
+runt/MS
+runty/RPT
+runway/MS
+Runyon/M
+rupee/MS
+Ruperta/M
+Rupert/M
+Ruperto/M
+rupiah/M
+rupiahs
+Ruppert/M
+Ruprecht/M
+rupture/GMSD
+rurality/M
+rural/Y
+Rurik/M
+ruse/MS
+Rushdie/M
+rush/DSRGZ
+rusher/M
+rushes/I
+rushing/M
+Rush/M
+Rushmore/M
+rushy/RT
+Ruskin/M
+rusk/MS
+Russell/M
+Russel/M
+russet/MDS
+russetting
+Russia/M
+Russian/SM
+Russo/M
+Russ/S
+Rustbelt/M
+rustically
+rusticate/GSD
+rustication/M
+rusticity/S
+rustic/S
+Rustie/M
+rustiness/MS
+Rustin/M
+rustler/M
+rustle/RSDGZ
+rust/MSDG
+rustproof/DGS
+Rusty/M
+rusty/XNRTP
+rutabaga/SM
+Rutger/SM
+Ruthanne/M
+Ruthann/M
+Ruthe/M
+ruthenium/MS
+rutherfordium/SM
+Rutherford/M
+Ruthie/M
+Ruthi/M
+ruthlessness/MS
+ruthless/YP
+Ruth/M
+Ruthy/M
+Rutland/M
+Rutledge/M
+rut/MS
+rutted
+Rutter/M
+Ruttger/M
+rutting
+rutty/RT
+Ruy/M
+RV
+RVs
+Rwandan/S
+Rwanda/SM
+Rwy/M
+Rx/M
+Ryan/M
+Ryann/M
+Rycca/M
+Rydberg/M
+Ryder/M
+rye/MS
+Ryley/M
+Ry/M
+Ryon/M
+Ryukyu/M
+Ryun/M
+S
+SA
+Saab/M
+Saar/M
+Saba/M
+sabbath
+Sabbath/M
+Sabbaths
+sabbatical/S
+sabered/U
+saber/GSMD
+Sabik/M
+Sabina/M
+Sabine/M
+Sabin/M
+sable/GMDS
+sabotage/DSMG
+saboteur/SM
+sabot/MS
+Sabra/M
+sabra/MS
+Sabrina/M
+SAC
+Sacajawea/M
+saccharides
+saccharine
+saccharin/MS
+Sacco/M
+sacerdotal
+Sacha/M
+sachem/MS
+sachet/SM
+Sachs/M
+sackcloth/M
+sackcloths
+sacker/M
+sackful/MS
+sack/GJDRMS
+sacking/M
+sacral
+sacra/L
+sacramental/S
+sacrament/DMGS
+Sacramento/M
+sacredness/S
+sacred/PY
+sacrificer/M
+sacrifice/RSDZMG
+sacrificial/Y
+sacrilege/MS
+sacrilegious/Y
+sacristan/SM
+sacristy/MS
+sacroiliac/S
+sacrosanctness/MS
+sacrosanct/P
+sacrum/M
+sac/SM
+Sada/M
+Sadat/M
+Saddam/M
+sadden/DSG
+sadder
+saddest
+saddlebag/SM
+saddler/M
+saddle's
+saddle/UGDS
+Sadducee/M
+Sadella/M
+Sade/M
+sades
+Sadie/M
+sadism/MS
+sadistic
+sadistically
+sadist/MS
+sadness/SM
+sadomasochism/MS
+sadomasochistic
+sadomasochist/S
+sad/PY
+Sadr/M
+Sadye/M
+safari/GMDS
+safeguard/MDSG
+safekeeping/MS
+safeness/MS
+safeness's/U
+safes
+safety/SDMG
+safe/URPTY
+safflower/SM
+saffron/MS
+sagaciousness/M
+sagacious/YP
+sagacity/MS
+saga/MS
+Sagan/M
+sagebrush/SM
+sage/MYPS
+sagged
+sagger
+sagging
+saggy/RT
+Saginaw/M
+Sagittarius/MS
+sago/MS
+sag/TSR
+saguaro/SM
+Sahara/M
+Saharan/M
+Sahel
+sahib/MS
+Saidee/M
+saids
+said/U
+Saigon/M
+sailboard/DGS
+sailboat/SRMZG
+sailcloth/M
+sailcloths
+sailer/M
+sailfish/SM
+sail/GJMDRS
+sailing/M
+sailor/YMS
+sailplane/SDMG
+sainthood/MS
+saintlike
+saintliness/MS
+saintly/RTP
+saint/YDMGS
+Saiph/M
+saith
+saiths
+Sakai/M
+sake/MRS
+saker/M
+Sakhalin/M
+Sakharov/M
+Saki/M
+saki's
+salaam/GMDS
+salable/U
+salaciousness/MS
+salacious/YP
+salacity/MS
+Saladin/M
+Salado/M
+salad/SM
+Salaidh/M
+salamander/MS
+salami/MS
+salary/SDMG
+Salas/M
+Salazar/M
+saleability/M
+sale/ABMS
+Saleem/M
+Salem/M
+Salerno/M
+salesclerk/SM
+salesgirl/SM
+saleslady/S
+salesman/M
+salesmanship/SM
+salesmen
+salespeople/M
+salesperson/MS
+salesroom/M
+saleswoman
+saleswomen
+salience/MS
+saliency
+salient/SY
+Salim/M
+Salina/MS
+saline/S
+salinger
+Salinger/M
+salinity/MS
+Salisbury/M
+Salish/M
+saliva/MS
+salivary
+salivate/XNGSD
+salivation/M
+Salk/M
+Sallee/M
+Salle/M
+Sallie/M
+Salli/M
+sallowness/MS
+sallow/TGRDSP
+Sallust/M
+Sallyanne/M
+Sallyann/M
+sally/GSDM
+Sally/M
+salmonellae
+salmonella/M
+Salmon/M
+salmon/SM
+Sal/MY
+Saloma/M
+Salome/M
+Salomi/M
+Salomo/M
+Salomone/M
+Salomon/M
+Salonika/M
+salon/SM
+saloonkeeper
+saloon/MS
+salsa/MS
+salsify/M
+SALT
+saltcellar/SM
+salted/UC
+salter/M
+salt/GZTPMDRS
+saltine/MS
+saltiness/SM
+saltness/M
+Salton/M
+saltpeter/SM
+salts/C
+saltshaker/S
+saltwater
+salty/RSPT
+salubriousness/M
+salubrious/YP
+salubrity/M
+salutariness/M
+salutary/P
+salutation/SM
+salutatory/S
+saluter/M
+salute/RSDG
+Salvadoran/S
+Salvadorian/S
+Salvador/M
+salvageable
+salvage/MGRSD
+salvager/M
+salvation/MS
+Salvatore/M
+salve/GZMDSR
+salver/M
+Salvidor/M
+salvo/GMDS
+Salween/M
+Salyut/M
+Salz/M
+SAM
+Samantha/M
+Samara/M
+Samaria/M
+Samaritan/MS
+samarium/MS
+Samarkand/M
+samba/GSDM
+sameness/MS
+same/SP
+Sam/M
+Sammie/M
+Sammy/M
+Samoa
+Samoan/S
+Samoset/M
+samovar/SM
+Samoyed/M
+sampan/MS
+sampler/M
+sample/RSDJGMZ
+sampling/M
+Sampson/M
+Samsonite/M
+Samson/M
+Samuele/M
+Samuel/SM
+Samuelson/M
+samurai/M
+San'a
+Sana/M
+sanatorium/MS
+Sanborn/M
+Sanchez/M
+Sancho/M
+sanctification/M
+sanctifier/M
+sanctify/RSDGNX
+sanctimoniousness/MS
+sanctimonious/PY
+sanctimony/MS
+sanctioned/U
+sanction/SMDG
+sanctity/SM
+sanctuary/MS
+sanctum/SM
+sandal/MDGS
+sandalwood/SM
+sandbagged
+sandbagging
+sandbag/MS
+sandbank/SM
+sandbar/S
+sandblaster/M
+sandblast/GZSMRD
+sandbox/MS
+Sandburg/M
+sandcastle/S
+Sande/M
+Sanderling/M
+sander/M
+Sander/M
+Sanderson/M
+sandhill
+sandhog/SM
+Sandia/M
+Sandie/M
+Sandi/M
+sandiness/S
+Sandinista
+sandlot/SM
+sandlotter/S
+sandman/M
+sandmen
+Sand/MRZ
+Sandor/M
+Sandoval/M
+sandpaper/DMGS
+sandpile
+sandpiper/MS
+sandpit/M
+Sandra/M
+Sandro/M
+sand/SMDRGZ
+sandstone/MS
+sandstorm/SM
+Sandusky/M
+sandwich/SDMG
+Sandye/M
+Sandy/M
+sandy/PRT
+saned
+sane/IRYTP
+saneness/MS
+saneness's/I
+sanes
+Sanford/M
+Sanforized
+Sanger/M
+sangfroid/S
+sangria/SM
+Sang/RM
+sang/S
+sanguinary
+sanguined
+sanguine/F
+sanguinely
+sanguineness/M
+sanguineous/F
+sanguines
+sanguining
+Sanhedrin/M
+saning
+sanitarian/S
+sanitarium/SM
+sanitary/S
+sanitate/NX
+sanitation/M
+sanitizer/M
+sanitize/RSDZG
+sanity/SIM
+sank
+Sankara/M
+San/M
+sans
+sanserif
+Sanskritic
+Sanskritize/M
+Sanskrit/M
+Sansone/M
+Sanson/M
+Santa/M
+Santana/M
+Santayana/M
+Santeria
+Santiago/M
+Santo/MS
+sapience/MS
+sapient
+sapless
+sapling/SM
+sap/MS
+sapped
+sapper/SM
+Sapphira/M
+Sapphire/M
+sapphire/MS
+Sappho/M
+sappiness/SM
+sapping
+Sapporo/M
+sappy/RPT
+saprophyte/MS
+saprophytic
+sapsucker/SM
+sapwood/SM
+Saraann/M
+Saracen/MS
+Saragossa/M
+Sarah/M
+Sarajane/M
+Sarajevo/M
+Sara/M
+Saran/M
+saran/SM
+sarape's
+Sarasota/M
+Saratoga/M
+Saratov/M
+Sarawak/M
+sarcasm/MS
+sarcastic
+sarcastically
+sarcoma/MS
+sarcophagi
+sarcophagus/M
+sardine/SDMG
+Sardinia/M
+sardonic
+sardonically
+Saree/M
+Sarena/M
+Sarene/M
+Sarette/M
+Sargasso/M
+Sarge/M
+Sargent/M
+sarge/SM
+Sargon/M
+Sari/M
+sari/MS
+Sarina/M
+Sarine/M
+Sarita/M
+Sarnoff/M
+sarong/MS
+Saroyan/M
+sarsaparilla/MS
+Sarto/M
+sartorial/Y
+sartorius/M
+Sartre/M
+Sascha/M
+SASE
+Sasha/M
+sashay/GDS
+Sashenka/M
+sash/GMDS
+Saskatchewan/M
+Saskatoon/M
+Sask/M
+sassafras/MS
+sass/GDSM
+Sassoon/M
+sassy/TRS
+SAT
+satanic
+satanical/Y
+Satanism/M
+satanism/S
+Satanist/M
+satanist/S
+Satan/M
+satchel/SM
+sat/DG
+sateen/MS
+satellite/GMSD
+sate/S
+satiable/I
+satiate/GNXSD
+satiation/M
+satiety/MS
+satin/MDSG
+satinwood/MS
+satiny
+satire/SM
+satiric
+satirical/Y
+satirist/SM
+satirize/DSG
+satirizes/U
+satisfaction/ESM
+satisfactorily/U
+satisfactoriness/MU
+satisfactory/UP
+satisfiability/U
+satisfiable/U
+satisfied/UE
+satisfier/M
+satisfies/E
+satisfy/GZDRS
+satisfying/EU
+satisfyingly
+Sat/M
+satori/SM
+satrap/SM
+saturated/CUA
+saturater/M
+saturates/A
+saturate/XDRSNG
+saturation/M
+Saturday/MS
+saturnalia
+Saturnalia/M
+saturnine/Y
+Saturn/M
+Satyanarayanan/M
+satyriases
+satyriasis/M
+satyric
+satyr/MS
+sauce/DSRGZM
+saucepan/SM
+saucer/M
+saucily
+sauciness/S
+saucy/TRP
+Saudi/S
+Saud/M
+Saudra/M
+sauerkraut/SM
+Saukville/M
+Saul/M
+Sault/M
+sauna/DMSG
+Sauncho/M
+Saunder/SM
+Saunderson/M
+Saundra/M
+saunter/DRSG
+saurian/S
+sauropod/SM
+sausage/MS
+Saussure/M
+saut/DGS
+Sauternes/M
+Sauveur/M
+savage/GTZYPRSD
+Savage/M
+savageness/SM
+savagery/MS
+Savannah/M
+savanna/MS
+savant/SM
+saved/U
+saveloy/M
+saver/M
+save/ZGJDRSB
+Savina/M
+Savior/M
+savior/SM
+Saviour/M
+Savonarola/M
+savored/U
+savorer/M
+savorier
+savoriest
+savoriness/S
+savoringly/S
+savoring/Y
+savor/SMRDGZ
+savory/UMPS
+Savoyard/M
+Savoy/M
+savoy/SM
+savvy/GTRSD
+sawbones/M
+sawbuck/SM
+sawdust/MDSG
+sawer/M
+sawfly/SM
+sawhorse/MS
+Saw/M
+sawmill/SM
+saw/SMDRG
+sawtooth
+Sawyere/M
+Sawyer/M
+sawyer/MS
+Saxe/M
+saxifrage/SM
+Sax/M
+sax/MS
+Saxon/SM
+Saxony/M
+saxophone/MS
+saxophonist/SM
+Saxton/M
+Sayer/M
+sayer/SM
+sayest
+saying/MS
+Sayre/MS
+says/M
+say/USG
+Say/ZMR
+SBA
+Sb/M
+SC
+scabbard/SGDM
+scabbed
+scabbiness/SM
+scabbing
+scabby/RTP
+scabies/M
+scabrousness/M
+scabrous/YP
+scab/SM
+scad/SM
+scaffolding/M
+scaffold/JGDMS
+scalability
+Scala/M
+scalar/SM
+scalawag/SM
+scald/GJRDS
+scaled/AU
+scale/JGZMBDSR
+scaleless
+scalene
+scaler/M
+scales/A
+scaliness/MS
+scaling/A
+scallion/MS
+scalloper/M
+scallop/GSMDR
+scalloping/M
+scalpel/SM
+scalper/M
+scalp/GZRDMS
+scalping/M
+scaly/TPR
+scammed
+scamming
+scamper/GD
+scampi/M
+scamp/RDMGZS
+scam/SM
+Scan
+scan/AS
+scandal/GMDS
+scandalized/U
+scandalize/GDS
+scandalmonger/SM
+scandalousness/M
+scandalous/YP
+Scandinavia/M
+Scandinavian/S
+scandium/MS
+scanned/A
+scanner/SM
+scanning/A
+scansion/SM
+scant/CDRSG
+scantest
+scantily
+scantiness/MS
+scantly
+scantness/MS
+scanty/TPRS
+scapegoat/SGDM
+scapegrace/MS
+scape/M
+scapulae
+scapula/M
+scapular/S
+scarab/SM
+Scaramouch/M
+Scarborough/M
+scarceness/SM
+scarce/RTYP
+scarcity/MS
+scar/DRMSG
+scarecrow/MS
+scaremongering/M
+scaremonger/SGM
+scarer/M
+scare/S
+scarface
+Scarface/M
+scarf/SDGM
+scarification/M
+scarify/DRSNGX
+scarily
+scariness/S
+scarlatina/MS
+Scarlatti/M
+Scarlet/M
+scarlet/MDSG
+Scarlett/M
+scarp/SDMG
+scarred
+scarring
+scarves/M
+scary/PTR
+scathe/DG
+scathed/U
+scathing/Y
+scatological
+scatology/SM
+scat/S
+scatted
+scatterbrain/MDS
+scatter/DRJZSG
+scatterer/M
+scattergun
+scattering/YM
+scatting
+scavenge/GDRSZ
+scavenger/M
+SCCS
+scenario/SM
+scenarist/MS
+scene/GMDS
+scenery/SM
+scenically
+scenic/S
+scented/U
+scent/GDMS
+scentless
+scent's/C
+scents/C
+scepter/DMSG
+scepters/U
+sceptically
+sch
+Schaefer/M
+Schaeffer/M
+Schafer/M
+Schaffner/M
+Schantz/M
+Schapiro/M
+Scheat/M
+Schedar/M
+schedule/ADSRG
+scheduled/U
+scheduler/MS
+schedule's
+Scheherazade/M
+Scheherezade/M
+Schelling/M
+schema/M
+schemata
+schematically
+schematic/S
+scheme/JSRDGMZ
+schemer/M
+schemta
+Schenectady/M
+scherzo/MS
+Schick/M
+Schiller/M
+schilling/SM
+schismatic/S
+schism/SM
+schist/SM
+schizoid/S
+schizomycetes
+schizophrenia/SM
+schizophrenically
+schizophrenic/S
+schizo/S
+schlemiel/MS
+schlepped
+schlepping
+schlep/S
+Schlesinger/M
+Schliemann/M
+Schlitz/M
+schlock/SM
+schlocky/TR
+Schloss/M
+schmaltz/MS
+schmaltzy/TR
+Schmidt/M
+Schmitt/M
+schmoes
+schmo/M
+schmooze/GSD
+schmuck/MS
+Schnabel/M
+schnapps/M
+schnauzer/MS
+Schneider/M
+schnitzel/MS
+schnook/SM
+schnoz/S
+schnozzle/MS
+Schoenberg/M
+Schofield/M
+scholarship/MS
+scholar/SYM
+scholastically
+scholastic/S
+schoolbag/SM
+schoolbook/SM
+schoolboy/MS
+schoolchild/M
+schoolchildren
+schooldays
+schooled/U
+schoolfellow/S
+schoolfriend
+schoolgirlish
+schoolgirl/MS
+schoolhouse/MS
+schooling/M
+schoolmarmish
+schoolmarm/MS
+schoolmaster/SGDM
+schoolmate/MS
+schoolmistress/MS
+schoolroom/SM
+schoolteacher/MS
+schoolwork/SM
+schoolyard/SM
+school/ZGMRDJS
+schooner/SM
+Schopenhauer/M
+Schottky/M
+Schrieffer/M
+Schrdinger/M
+Schroeder/M
+Schroedinger/M
+Schubert/M
+Schultz/M
+Schulz/M
+Schumacher/M
+Schuman/M
+Schumann/M
+schussboomer/S
+schuss/SDMG
+Schuster/M
+Schuyler/M
+Schuylkill/M
+Schwab/M
+Schwartzkopf/M
+Schwartz/M
+Schwarzenegger/M
+schwa/SM
+Schweitzer/M
+Schweppes/M
+Schwinger/M
+Schwinn/M
+sci
+sciatica/SM
+sciatic/S
+science/FMS
+scientifically/U
+scientific/U
+scientist/SM
+Scientology/M
+scimitar/SM
+scintilla/MS
+scintillate/GNDSX
+scintillation/M
+scintillator/SM
+scion/SM
+Scipio/M
+scissor/SGD
+scleroses
+sclerosis/M
+sclerotic/S
+Sc/M
+scoffer/M
+scofflaw/MS
+scoff/RDGZS
+scolder/M
+scold/GSJRD
+scolioses
+scoliosis/M
+scollop's
+sconce/SDGM
+scone/SM
+scooper/M
+scoop/SRDMG
+scooter/M
+scoot/SRDGZ
+scope/DSGM
+Scopes/M
+scops
+scorbutic
+scorcher/M
+scorching/Y
+scorch/ZGRSD
+scoreboard/MS
+scorecard/MS
+scored/M
+scorekeeper/SM
+scoreless
+scoreline
+score/ZMDSRJG
+scorner/M
+scornfulness/M
+scornful/PY
+scorn/SGZMRD
+scorpion/SM
+Scorpio/SM
+Scorpius/M
+Scorsese/M
+Scotchgard/M
+Scotchman/M
+Scotchmen
+scotch/MSDG
+scotchs
+Scotch/S
+Scotchwoman
+Scotchwomen
+Scotia/M
+Scotian/M
+Scotland/M
+Scot/MS
+Scotsman/M
+Scotsmen
+Scotswoman
+Scotswomen
+Scottie/SM
+Scotti/M
+Scottish
+Scott/M
+Scottsdale/M
+Scotty's
+scoundrel/YMS
+scourer/M
+scourge/MGRSD
+scourger/M
+scouring/M
+scour/SRDGZ
+scouter/M
+scouting/M
+scoutmaster/SM
+Scout's
+scout/SRDMJG
+scow/DMGS
+scowler/M
+scowl/SRDG
+scrabble/DRSZG
+scrabbler/M
+Scrabble/SM
+scragged
+scragging
+scraggly/TR
+scraggy/TR
+scrag/SM
+scrambler/MS
+scrambler's/U
+scramble/UDSRG
+scrammed
+scramming
+scram/S
+Scranton/M
+scrapbook/SM
+scraper/M
+scrape/S
+scrapheap/SM
+scrapped
+scrapper/SM
+scrapping
+scrappy/RT
+scrap/SGZJRDM
+scrapyard/S
+scratched/U
+scratcher/M
+scratches/M
+scratchily
+scratchiness/S
+scratch/JDRSZG
+scratchy/TRP
+scrawler/M
+scrawl/GRDS
+scrawly/RT
+scrawniness/MS
+scrawny/TRP
+screamer/M
+screaming/Y
+scream/ZGSRD
+screecher/M
+screech/GMDRS
+screechy/TR
+screed/MS
+scree/DSM
+screened/U
+screening/M
+screenplay/MS
+screen/RDMJSG
+screenwriter/MS
+screwball/SM
+screwdriver/SM
+screwer/M
+screw/GUSD
+screwiness/S
+screw's
+screwup
+screwworm/MS
+screwy/RTP
+Scriabin/M
+scribal
+scribble/JZDRSG
+scribbler/M
+scribe/CDRSGIK
+scriber/MKIC
+scribe's
+Scribner/MS
+scrimmager/M
+scrimmage/RSDMG
+scrimp/DGS
+scrimshaw/GSDM
+scrim/SM
+Scripps/M
+scrip/SM
+scripted/U
+script/FGMDS
+scriptural/Y
+scripture/MS
+Scripture/MS
+scriptwriter/SM
+scriptwriting/M
+scrivener/M
+scriven/ZR
+scrod/M
+scrofula/MS
+scrofulous
+scrollbar/SM
+scroll/GMDSB
+Scrooge/MS
+scrooge/SDMG
+scrota
+scrotal
+scrotum/M
+scrounge/ZGDRS
+scroungy/TR
+scrubbed
+scrubber/MS
+scrubbing
+scrubby/TR
+scrub/S
+scruffily
+scruffiness/S
+scruff/SM
+scruffy/PRT
+Scruggs/M
+scrummage/MG
+scrum/MS
+scrumptious/Y
+scrunch/DSG
+scrunchy/S
+scruple/SDMG
+scrupulosity/SM
+scrupulousness's
+scrupulousness/US
+scrupulous/UPY
+scrutable/I
+scrutinized/U
+scrutinizer/M
+scrutinize/RSDGZ
+scrutinizingly/S
+scrutinizing/UY
+scrutiny/MS
+SCSI
+scuba/SDMG
+scudded
+scudding
+Scud/M
+scud/S
+scuff/GSD
+scuffle/SDG
+sculler/M
+scullery/MS
+Sculley/M
+scullion/MS
+scull/SRDMGZ
+sculptor/MS
+sculptress/MS
+sculpt/SDG
+sculptural/Y
+sculpture/SDGM
+scumbag/S
+scummed
+scumming
+scum/MS
+scummy/TR
+scupper/SDMG
+scurf/MS
+scurfy/TR
+scurrility/MS
+scurrilousness/MS
+scurrilous/PY
+scurry/GJSD
+scurvily
+scurviness/M
+scurvy/SRTP
+scutcheon/SM
+scuttlebutt/MS
+scuttle/MGSD
+scuzzy/RT
+Scylla/M
+scythe/SDGM
+Scythia/M
+SD
+SDI
+SE
+seabed/S
+seabird/S
+seaboard/MS
+Seaborg/M
+seaborne
+Seabrook/M
+seacoast/MS
+seafare/JRZG
+seafarer/M
+seafood/MS
+seafront/MS
+Seagate/M
+seagoing
+Seagram/M
+seagull/S
+seahorse/S
+sealant/MS
+sealed/AU
+sealer/M
+seal/MDRSGZ
+sealskin/SM
+seals/UA
+seamail
+seamanship/SM
+seaman/YM
+seamer/M
+seaminess/M
+seamlessness/M
+seamless/PY
+seam/MNDRGS
+seams/I
+seamstress/MS
+Seamus/M
+sea/MYS
+seamy/TRP
+Seana/M
+sance/SM
+Sean/M
+seaplane/SM
+seaport/SM
+seaquake/M
+Seaquarium/M
+searcher/AM
+searching/YS
+searchlight/SM
+search/RSDAGZ
+sear/DRSJGT
+searing/Y
+Sears/M
+seascape/SM
+seashell/MS
+seashore/SM
+seasickness/SM
+seasick/P
+seaside/SM
+seasonableness/M
+seasonable/UP
+seasonably/U
+seasonality
+seasonal/Y
+seasoned/U
+seasoner/M
+seasoning/M
+season/JRDYMBZSG
+seatbelt
+seated/A
+seater/M
+seating/SM
+SEATO
+seat's
+Seattle/M
+seat/UDSG
+seawall/S
+seaward/S
+seawater/S
+seaway/MS
+seaweed/SM
+seaworthinesses
+seaworthiness/MU
+seaworthy/TRP
+sebaceous
+Sebastian/M
+Sebastiano/M
+Sebastien/M
+seborrhea/SM
+SEC
+secant/SM
+secede/GRSD
+secessionist/MS
+secession/MS
+secludedness/M
+secluded/YP
+seclude/GSD
+seclusion/SM
+seclusive
+Seconal
+secondarily
+secondary/PS
+seconder/M
+secondhand
+second/RDYZGSL
+secrecy/MS
+secretarial
+secretariat/MS
+secretaryship/MS
+secretary/SM
+secrete/XNS
+secretion/M
+secretiveness/S
+secretive/PY
+secretory
+secret/TVGRDYS
+sec/S
+sectarianism/MS
+sectarian/S
+sectary/MS
+sectionalism/MS
+sectionalized
+sectional/SY
+section/ASEM
+sectioned
+sectioning
+sect/ISM
+sectoral
+sectored
+sector/EMS
+sectoring
+sects/E
+secularism/MS
+secularist/MS
+secularity/M
+secularization/MS
+secularized/U
+secularize/GSD
+secular/SY
+secured/U
+securely/I
+secure/PGTYRSDJ
+security/MSI
+secy
+sec'y
+sedan/SM
+sedateness/SM
+sedate/PXVNGTYRSD
+sedation/M
+sedative/S
+sedentary
+Seder/SM
+sedge/SM
+Sedgwick/M
+sedgy/RT
+sedimentary
+sedimentation/SM
+sediment/SGDM
+sedition/SM
+seditiousness/M
+seditious/PY
+seducer/M
+seduce/RSDGZ
+seduction/MS
+seductiveness/MS
+seductive/YP
+seductress/SM
+sedulous/Y
+Seebeck/M
+seed/ADSG
+seedbed/MS
+seedcase/SM
+seeded/U
+seeder/MS
+seediness/MS
+seeding/S
+seedless
+seedling/SM
+seedpod/S
+seed's
+seedy/TPR
+seeings
+seeing's
+seeing/U
+seeker/M
+seek/GZSR
+seeking/Y
+Seeley/M
+See/M
+seem/GJSYD
+seeming/Y
+seemliness's
+seemliness/US
+seemly/UTPR
+seen/U
+seepage/MS
+seep/GSD
+seer/SM
+seersucker/MS
+sees
+seesaw/DMSG
+seethe/SDGJ
+see/U
+segmental/Y
+segmentation/SM
+segmented/U
+segment/SGDM
+Segovia/M
+segregant
+segregated/U
+segregate/XCNGSD
+segregation/CM
+segregationist/SM
+segregative
+Segre/M
+segue/DS
+segueing
+Segundo/M
+Se/H
+Seidel/M
+seigneur/MS
+seignior/SM
+Seiko/M
+seine/GZMDSR
+Seine/M
+seiner/M
+Seinfeld/M
+seismic
+seismically
+seismographer/M
+seismographic
+seismographs
+seismography/SM
+seismograph/ZMR
+seismologic
+seismological
+seismologist/MS
+seismology/SM
+seismometer/S
+seize/BJGZDSR
+seizer/M
+seizing/M
+seizin/MS
+seizor/MS
+seizure/MS
+Seka/M
+Sela/M
+Selassie/M
+Selby/M
+seldom
+selected/UAC
+selectional
+selection/MS
+selectiveness/M
+selective/YP
+selectivity/MS
+selectman/M
+selectmen
+selectness/SM
+selector/SM
+select/PDSVGB
+Selectric/M
+selects/A
+Selena/M
+selenate/M
+Selene/M
+selenite/M
+selenium/MS
+selenographer/SM
+selenography/MS
+Selestina/M
+Seleucid/M
+Seleucus/M
+self/GPDMS
+selfishness/SU
+selfish/PUY
+selflessness/MS
+selfless/YP
+selfness/M
+Selfridge/M
+selfsameness/M
+selfsame/P
+Selia/M
+Selie/M
+Selig/M
+Selim/M
+Selina/M
+Selinda/M
+Seline/M
+Seljuk/M
+Selkirk/M
+Sella/M
+sell/AZGSR
+seller/AM
+Sellers/M
+Selle/ZM
+sellout/MS
+Selma/M
+seltzer/S
+selvage/MGSD
+selves/M
+Selznick/M
+semantical/Y
+semanticist/SM
+semantic/S
+semantics/M
+semaphore/GMSD
+Semarang/M
+semblance/ASME
+semen/SM
+semester/SM
+semiannual/Y
+semiarid
+semiautomated
+semiautomatic/S
+semicircle/SM
+semicircular
+semicolon/MS
+semiconductor/SM
+semiconscious
+semidefinite
+semidetached
+semidrying/M
+semifinalist/MS
+semifinal/MS
+semilogarithmic
+semimonthly/S
+seminal/Y
+seminarian/MS
+seminar/SM
+seminary/MS
+Seminole/SM
+semiofficial
+semioticians
+semiotic/S
+semiotics/M
+semipermanent/Y
+semipermeable
+semiprecious
+semiprivate
+semiprofessional/YS
+semipublic
+semiquantitative/Y
+Semiramis/M
+semiretired
+semisecret
+semiskilled
+semi/SM
+semisolid/S
+semistructured
+semisweet
+Semite/SM
+Semitic/MS
+semitic/S
+semitone/SM
+semitrailer/SM
+semitrance
+semitransparent
+semitropical
+semivowel/MS
+semiweekly/S
+semiyearly
+semolina/SM
+sempiternal
+sempstress/SM
+Semtex
+sen
+Sen
+Sena/M
+senate/MS
+Senate/MS
+senatorial
+senator/MS
+Sendai/M
+sender/M
+sends/A
+send/SRGZ
+Seneca/MS
+Senegalese
+Senegal/M
+senescence/SM
+senescent
+senile/SY
+senility/MS
+seniority/SM
+senior/MS
+Senior/S
+Sennacherib/M
+senna/MS
+Sennett/M
+Seora/M
+senora/S
+senorita/S
+senor/MS
+sensately/I
+sensate/YNX
+sensationalism/MS
+sensationalist/S
+sensationalize/GSD
+sensational/Y
+sensation/M
+sens/DSG
+senselessness/SM
+senseless/PY
+sense/M
+sensibility/ISM
+sensibleness/MS
+sensible/PRST
+sensibly/I
+sensitiveness/MS
+sensitiveness's/I
+sensitives
+sensitive/YIP
+sensitivity/ISM
+sensitization/CSM
+sensitized/U
+sensitizers
+sensitize/SDCG
+sensor/MS
+sensory
+sensualist/MS
+sensuality/MS
+sensual/YF
+sensuousness/S
+sensuous/PY
+Sensurround/M
+sentence/SDMG
+sentential/Y
+sententious/Y
+sentience/ISM
+sentient/YS
+sentimentalism/SM
+sentimentalist/SM
+sentimentality/SM
+sentimentalization/SM
+sentimentalize/RSDZG
+sentimentalizes/U
+sentimental/Y
+sentiment/MS
+sentinel/GDMS
+sentry/SM
+sent/UFEA
+Seoul/M
+sepal/SM
+separability/MSI
+separableness/MI
+separable/PI
+separably/I
+separateness/MS
+separates/M
+separate/YNGVDSXP
+separation/M
+separatism/SM
+separatist/SM
+separator/SM
+Sephardi/M
+Sephira/M
+sepia/MS
+Sepoy/M
+sepses
+sepsis/M
+septa/M
+septate/N
+September/MS
+septennial/Y
+septet/MS
+septicemia/SM
+septicemic
+septic/S
+septillion/M
+sept/M
+Sept/M
+septuagenarian/MS
+Septuagint/MS
+septum/M
+sepulcher/MGSD
+sepulchers/UA
+sepulchral/Y
+seq
+sequel/MS
+sequenced/A
+sequence/DRSJZMG
+sequencer/M
+sequence's/F
+sequences/F
+sequent/F
+sequentiality/FM
+sequentialize/DSG
+sequential/YF
+sequester/SDG
+sequestrate/XGNDS
+sequestration/M
+sequin/SDMG
+sequitur
+Sequoia/M
+sequoia/MS
+Sequoya/M
+Serafin/M
+seraglio/SM
+serape/S
+seraphic
+seraphically
+seraphim's
+seraph/M
+seraphs
+sera's
+Serbia/M
+Serbian/S
+Serb/MS
+Serbo/M
+serenade/MGDRS
+serenader/M
+Serena/M
+serendipitous/Y
+serendipity/MS
+serene/GTYRSDP
+Serene/M
+sereneness/SM
+Serengeti/M
+serenity/MS
+sere/TGDRS
+serfdom/MS
+serf/MS
+Sergeant/M
+sergeant/SM
+serge/DSGM
+Sergei/M
+Serge/M
+Sergent/M
+Sergio/M
+serialization/MS
+serialize/GSD
+serial/MYS
+series/M
+serif/SMD
+serigraph/M
+serigraphs
+seriousness/SM
+serious/PY
+sermonize/GSD
+sermon/SGDM
+serological/Y
+serology/MS
+serons
+serous
+Serpens/M
+serpent/GSDM
+serpentine/GYS
+Serra/M
+Serrano/M
+serrate/GNXSD
+serration/M
+serried
+serum/MS
+servant/SDMG
+serve/AGCFDSR
+served/U
+server/MCF
+servers
+serviceability/SM
+serviceableness/M
+serviceable/P
+serviced/U
+serviceman/M
+servicemen
+service/MGSRD
+service's/E
+services/E
+servicewoman
+servicewomen
+serviette/MS
+servilely
+servileness/M
+serviles
+servile/U
+servility/SM
+serving/SM
+servitor/SM
+servitude/MS
+servomechanism/MS
+servomotor/MS
+servo/S
+sesame/MS
+sesquicentennial/S
+sessile
+session/SM
+setback/S
+Seth/M
+Set/M
+Seton/M
+set's
+setscrew/SM
+set/SIA
+settable/A
+sett/BJGZSMR
+settee/MS
+setter/M
+setting/AS
+setting's
+settle/AUDSG
+settlement/ASM
+settler/MS
+settling/S
+setup/MS
+Seumas/M
+Seurat/M
+Seuss/M
+Sevastopol/M
+sevenfold
+sevenpence
+seven/SMH
+seventeen/HMS
+seventeenths
+sevenths
+seventieths
+seventy/MSH
+severalfold
+severalty/M
+several/YS
+severance/SM
+severed/E
+severeness/SM
+severe/PY
+severing/E
+severity/MS
+Severn/M
+severs/E
+sever/SGTRD
+Severus/M
+Seville/M
+sewage/MS
+Seward/M
+sewerage/SM
+sewer/GSMD
+sewing/SM
+sewn
+sew/SAGD
+sexagenarian/MS
+sex/GMDS
+sexily
+sexiness/MS
+sexism/SM
+sexist/SM
+sexless
+sexologist/SM
+sexology/MS
+sexpot/SM
+Sextans/M
+sextant/SM
+sextet/SM
+sextillion/M
+Sexton/M
+sexton/MS
+sextuple/MDG
+sextuplet/MS
+sexuality/MS
+sexualized
+sexual/Y
+sexy/RTP
+Seychelles
+Seyfert
+Seymour/M
+sf
+SF
+Sgt
+shabbily
+shabbiness/SM
+shabby/RTP
+shack/GMDS
+shackler/M
+shackle's
+Shackleton/M
+shackle/UGDS
+shad/DRJGSM
+shaded/U
+shadeless
+shade/SM
+shadily
+shadiness/MS
+shading/M
+shadowbox/SDG
+shadower/M
+shadow/GSDRM
+shadowiness/M
+Shadow/M
+shadowy/TRP
+shady/TRP
+Shae/M
+Shafer/M
+Shaffer/M
+shafting/M
+shaft/SDMG
+shagged
+shagginess/SM
+shagging
+shaggy/TPR
+shag/MS
+shah/M
+shahs
+Shaina/M
+Shaine/M
+shakable/U
+shakably/U
+shakeable
+shakedown/S
+shaken/U
+shakeout/SM
+shaker/M
+Shaker/S
+Shakespearean/S
+Shakespeare/M
+Shakespearian
+shake/SRGZB
+shakeup/S
+shakily
+shakiness/S
+shaking/M
+shaky/TPR
+shale/SM
+shall
+shallot/SM
+shallowness/SM
+shallow/STPGDRY
+Shalna/M
+Shalne/M
+shalom
+Shalom/M
+shalt
+shamanic
+shaman/SM
+shamble/DSG
+shambles/M
+shamefaced/Y
+shamefulness/S
+shameful/YP
+shamelessness/SM
+shameless/PY
+shame/SM
+sham/MDSG
+shammed
+shammer
+shamming
+shammy's
+shampoo/DRSMZG
+shampooer/M
+shamrock/SM
+Shamus/M
+Shana/M
+Shanan/M
+Shanda/M
+Shandee/M
+Shandeigh/M
+Shandie/M
+Shandra/M
+shandy/M
+Shandy/M
+Shane/M
+Shanghai/GM
+Shanghaiing/M
+shanghai/SDG
+Shanie/M
+Shani/M
+shank/SMDG
+Shannah/M
+Shanna/M
+Shannan/M
+Shannen/M
+Shannon/M
+Shanon/M
+shan't
+Shanta/M
+Shantee/M
+shantis
+Shantung/M
+shantung/MS
+shanty/SM
+shantytown/SM
+shape/AGDSR
+shaped/U
+shapelessness/SM
+shapeless/PY
+shapeliness/S
+shapely/RPT
+shaper/S
+shape's
+Shapiro/M
+sharable/U
+Sharai/M
+Shara/M
+shard/SM
+shareable
+sharecropped
+sharecropper/MS
+sharecropping
+sharecrop/S
+share/DSRGZMB
+shared/U
+shareholder/MS
+shareholding/S
+sharer/M
+shareware/S
+Shari'a
+Sharia/M
+sharia/SM
+Shari/M
+Sharity/M
+shark/SGMD
+sharkskin/SM
+Sharla/M
+Sharleen/M
+Sharlene/M
+Sharline/M
+Sharl/M
+Sharona/M
+Sharon/M
+Sharpe/M
+sharpen/ASGD
+sharpened/U
+sharpener/S
+sharper/M
+sharpie/SM
+Sharp/M
+sharpness/MS
+sharp/SGTZXPYRDN
+sharpshooter/M
+sharpshooting/M
+sharpshoot/JRGZ
+sharpy's
+Sharron/M
+Sharyl/M
+Shasta/M
+shat
+shatter/DSG
+shattering/Y
+shatterproof
+Shaughn/M
+Shaula/M
+Shauna/M
+Shaun/M
+shave/DSRJGZ
+shaved/U
+shaver/M
+Shavian
+shaving/M
+Shavuot/M
+Shawano/M
+shawl/SDMG
+shaw/M
+Shaw/M
+Shawna/M
+Shawnee/SM
+Shawn/M
+Shaylah/M
+Shayla/M
+Shaylyn/M
+Shaylynn/M
+Shay/M
+shay/MS
+Shayna/M
+Shayne/M
+Shcharansky/M
+sh/DRS
+sheaf/MDGS
+Shea/M
+shearer/M
+shear/RDGZS
+sheather/M
+sheathe/UGSD
+sheath/GJMDRS
+sheathing/M
+sheaths
+sheave/SDG
+sheaves/M
+Sheba/M
+shebang/MS
+Shebeli/M
+Sheboygan/M
+she'd
+shedding
+Shedir/M
+sheds
+shed's
+shed/U
+Sheelagh/M
+Sheelah/M
+Sheela/M
+Sheena/M
+sheen/MDGS
+sheeny/TRSM
+sheepdog/SM
+sheepfold/MS
+sheepherder/MS
+sheepishness/SM
+sheepish/YP
+sheep/M
+sheepskin/SM
+Sheeree/M
+sheerness/S
+sheer/PGTYRDS
+sheeting/M
+sheetlike
+sheet/RDMJSG
+Sheetrock
+Sheffielder/M
+Sheffield/RMZ
+Sheffie/M
+Sheff/M
+Sheffy/M
+sheikdom/SM
+sheikh's
+sheik/SM
+Sheilah/M
+Sheila/M
+shekel/MS
+Shelagh/M
+Shela/M
+Shelba/M
+Shelbi/M
+Shelby/M
+Shelden/M
+Sheldon/M
+shelf/MDGS
+Shelia/M
+she'll
+shellacked
+shellacking/MS
+shellac/S
+shelled/U
+Shelley/M
+shellfire/SM
+shellfish/SM
+Shellie/M
+Shelli/M
+Shell/M
+shell/RDMGS
+Shelly/M
+Shel/MY
+shelter/DRMGS
+sheltered/U
+shelterer/M
+Shelton/M
+shelve/JRSDG
+shelver/M
+shelves/M
+shelving/M
+she/M
+Shem/M
+Shena/M
+Shenandoah/M
+shenanigan/SM
+Shenyang/M
+Sheol/M
+Shepard/M
+shepherd/DMSG
+shepherdess/S
+Shepherd/M
+Shep/M
+Sheppard/M
+Shepperd/M
+Sheratan/M
+Sheraton/M
+sherbet/MS
+sherd's
+Sheree/M
+Sheridan/M
+Sherie/M
+sheriff/SM
+Sherill/M
+Sherilyn/M
+Sheri/M
+Sherline/M
+Sherlocke/M
+sherlock/M
+Sherlock/M
+Sher/M
+Sherman/M
+Shermie/M
+Sherm/M
+Shermy/M
+Sherpa/SM
+Sherrie/M
+Sherri/M
+Sherry/M
+sherry/MS
+Sherwin/M
+Sherwood/M
+Sherwynd/M
+Sherye/M
+Sheryl/M
+Shetland/S
+Shevardnadze/M
+shew/GSD
+shewn
+shh
+shiatsu/S
+shibboleth/M
+shibboleths
+shielded/U
+shielder/M
+shield/MDRSG
+Shields/M
+shiftily
+shiftiness/SM
+shiftlessness/S
+shiftless/PY
+shift/RDGZS
+shifty/TRP
+Shi'ite
+Shiite/SM
+Shijiazhuang
+Shikoku/M
+shill/DJSG
+shillelagh/M
+shillelaghs
+shilling/M
+Shillong/M
+Shiloh/M
+shimmed
+shimmer/DGS
+shimmery
+shimming
+shimmy/DSMG
+shim/SM
+Shina/M
+shinbone/SM
+shindig/MS
+shiner/M
+shine/S
+shingle/MDRSG
+shingler/M
+shinguard
+shininess/MS
+shining/Y
+shinned
+shinning
+shinny/GDSM
+shin/SGZDRM
+shinsplints
+Shintoism/S
+Shintoist/MS
+Shinto/MS
+shiny/PRT
+shipboard/MS
+shipborne
+shipbuilder/M
+shipbuild/RGZJ
+shipload/SM
+shipman/M
+shipmate/SM
+shipmen
+shipment/AMS
+shipowner/MS
+shippable
+shipped/A
+shipper/SM
+shipping/MS
+ship's
+shipshape
+ship/SLA
+shipwreck/GSMD
+shipwright/MS
+shipyard/MS
+Shiraz/M
+shire/MS
+shirker/M
+shirk/RDGZS
+Shirlee/M
+Shirleen/M
+Shirlene/M
+Shirley/M
+Shirline/M
+Shirl/M
+Shir/M
+shirr/GJDS
+shirtfront/S
+shirting/M
+shirt/JDMSG
+shirtless
+shirtmake/R
+shirtmaker/M
+shirtsleeve/MS
+shirttail/S
+shirtwaist/SM
+shit/S!
+shitting/!
+shitty/RT!
+Shiva/M
+shiverer/M
+shiver/GDR
+shivery
+shiv/SZRM
+shivved
+shivving
+shlemiel's
+Shmuel/M
+shoal/SRDMGT
+shoat/SM
+shocker/M
+shocking/Y
+Shockley/M
+shockproof
+shock/SGZRD
+shoddily
+shoddiness/SM
+shoddy/RSTP
+shod/U
+shoehorn/GSMD
+shoeing
+shoelace/MS
+shoemaker/M
+shoemake/RZ
+shoe/MS
+shoer's
+shoeshine/MS
+shoestring/MS
+shoetree/MS
+shogunate/SM
+shogun/MS
+Shoji/M
+Sholom/M
+shone
+shoo/DSG
+shoofly
+shook/SM
+shooter/M
+shootout/MS
+shoot/SJRGZ
+shopkeeper/M
+shopkeep/RGZ
+shoplifter/M
+shoplifting/M
+shoplift/SRDGZ
+shop/MS
+shopped/M
+shopper/M
+shoppe/RSDGZJ
+shopping/M
+shoptalk/SM
+shopworn
+shorebird/S
+shore/DSRGMJ
+shoreline/SM
+Shorewood/M
+shoring/M
+shortage/MS
+shortbread/MS
+shortcake/SM
+shortchange/DSG
+shortcoming/MS
+shortcrust
+shortcut/MS
+shortcutting
+shortener/M
+shortening/M
+shorten/RDGJ
+shortfall/SM
+shorthand/DMS
+Shorthorn/M
+shorthorn/MS
+shortie's
+shortish
+shortlist/GD
+Short/M
+shortness/MS
+short/SGTXYRDNP
+shortsightedness/S
+shortsighted/YP
+shortstop/MS
+shortwave/SM
+shorty/SM
+Shoshana/M
+Shoshanna/M
+Shoshone/SM
+Shostakovitch/M
+shotgunned
+shotgunner
+shotgunning
+shotgun/SM
+shot/MS
+shotted
+shotting
+shoulder/GMD
+shouldn't
+should/TZR
+shout/SGZRDM
+shove/DSRG
+shoveler/M
+shovelful/MS
+shovel/MDRSZG
+shover/M
+showbiz
+showbizzes
+showboat/SGDM
+showcase/MGSD
+showdown/MS
+shower/GDM
+showery/TR
+show/GDRZJS
+showgirl/SM
+showily
+showiness/MS
+showing/M
+showman/M
+showmanship/SM
+showmen
+shown
+showoff/S
+showpiece/SM
+showplace/SM
+showroom/MS
+showy/RTP
+shpt
+shrank
+shrapnel/SM
+shredded
+shredder/MS
+shredding
+shred/MS
+Shreveport/M
+shrewdness/SM
+shrewd/RYTP
+shrew/GSMD
+shrewishness/M
+shrewish/PY
+shrieker/M
+shriek/SGDRMZ
+shrift/SM
+shrike/SM
+shrill/DRTGPS
+shrillness/MS
+shrilly
+shrimp/MDGS
+shrine/SDGM
+shrinkage/SM
+shrinker/M
+shrinking/U
+shrink/SRBG
+shrivel/GSD
+shriven
+shrive/RSDG
+Shropshire/M
+shroud/GSMD
+shrubbed
+shrubbery/SM
+shrubbing
+shrubby/TR
+shrub/SM
+shrugged
+shrugging
+shrug/S
+shrunk/N
+shtick/S
+shucker/M
+shuck/SGMRD
+shucks/S
+shudder/DSG
+shuddery
+shuffleboard/MS
+shuffled/A
+shuffle/GDSRZ
+shuffles/A
+shuffling/A
+Shulman/M
+Shu/M
+shunned
+shunning
+shun/S
+shunter/M
+shunt/GSRD
+Shurlocke/M
+Shurlock/M
+Shurwood/M
+shush/SDG
+shutdown/MS
+shuteye/SM
+shutoff/M
+shutout/SM
+shut/S
+shutterbug/S
+shutter/DMGS
+shuttering/M
+shutting
+shuttlecock/MDSG
+shuttle/MGDS
+shy/DRSGTZY
+shyer
+shyest
+Shylockian/M
+Shylock/M
+shyness/SM
+shyster/SM
+Siamese/M
+Siam/M
+Siana/M
+Sianna/M
+Sian's
+Sibbie/M
+Sibby/M
+Sibeal/M
+Sibelius/M
+Sibella/M
+Sibelle/M
+Sibel/M
+Siberia/M
+Siberian/S
+sibilance/M
+sibilancy/M
+sibilant/SY
+Sibilla/M
+Sibley/M
+sibling/SM
+Sib/M
+Sibylla/M
+Sibylle/M
+sibylline
+Sibyl/M
+sibyl/SM
+Siciliana/M
+Sicilian/S
+Sicily/M
+sickbay/M
+sickbed/S
+sickener/M
+sickening/Y
+sicken/JRDG
+sicker/Y
+sick/GXTYNDRSP
+sickie/SM
+sickish/PY
+sickle/SDGM
+sickliness/M
+sickly/TRSDPG
+sickness/MS
+sicko/S
+sickout/S
+sickroom/SM
+sic/S
+sidearm/S
+sideband/MS
+sidebar/MS
+sideboard/SM
+sideburns
+sidecar/MS
+sided/A
+sidedness
+side/ISRM
+sidekick/MS
+sidelight/SM
+sideline/MGDRS
+sidelong
+sideman/M
+sidemen
+sidepiece/S
+sidereal
+sider/FA
+sides/A
+sidesaddle/MS
+sideshow/MS
+sidesplitting
+sidestepped
+sidestepping
+sidestep/S
+sidestroke/GMSD
+sideswipe/GSDM
+sidetrack/SDG
+sidewalk/MS
+sidewall/MS
+sidewards
+sideway/SM
+sidewinder/SM
+siding/SM
+sidle/DSG
+Sid/M
+Sidnee/M
+Sidney/M
+Sidoney/M
+Sidonia/M
+Sidonnie/M
+SIDS
+siege/GMDS
+Siegel/M
+Siegfried/M
+Sieglinda/M
+Siegmund/M
+Siemens/M
+Siena/M
+sienna/SM
+Sierpinski/M
+sierra/SM
+siesta/MS
+sieve/GZMDS
+Siffre/M
+sifted/UA
+sifter/M
+sift/GZJSDR
+Sigfrid/M
+Sigfried/M
+SIGGRAPH/M
+sigh/DRG
+sigher/M
+sighs
+sighted/P
+sighter/M
+sighting/S
+sight/ISM
+sightless/Y
+sightliness/UM
+sightly/TURP
+sightread
+sightseeing/S
+sightsee/RZ
+Sigismond/M
+Sigismondo/M
+Sigismund/M
+Sigismundo/M
+Sig/M
+sigma/SM
+sigmoid
+Sigmund/M
+signal/A
+signaled
+signaler/S
+signaling
+signalization/S
+signalize/GSD
+signally
+signalman/M
+signalmen
+signals
+signal's
+signatory/SM
+signature/MS
+signboard/MS
+signed/FU
+signer/SC
+signet/SGMD
+sign/GARDCS
+significance/IMS
+significantly/I
+significant/YS
+signification/M
+signify/DRSGNX
+signing/S
+Signora/M
+signora/SM
+signore/M
+signori
+signories
+signorina/SM
+signorine
+Signor/M
+signor/SFM
+signpost/DMSG
+sign's
+signs/F
+Sigrid/M
+Sigurd/M
+Sigvard/M
+Sihanouk/M
+Sikhism/MS
+Sikh/MS
+Sikhs
+Sikkimese
+Sikkim/M
+Sikorsky/M
+silage/GMSD
+Silas/M
+Sileas/M
+siled
+Sile/M
+silence/MZGRSD
+silencer/M
+silentness/M
+silent/TSPRY
+Silesia/M
+silhouette/GMSD
+silica/SM
+silicate/SM
+siliceous
+silicide/M
+silicone/SM
+silicon/MS
+silicoses
+silicosis/M
+silken/DG
+silk/GXNDMS
+silkily
+silkiness/SM
+silkscreen/SM
+silkworm/MS
+silky/RSPT
+silliness/SM
+sill/MS
+silly/PRST
+silo/GSM
+siltation/M
+silt/MDGS
+siltstone/M
+silty/RT
+Silurian/S
+Silvain/M
+Silva/M
+Silvana/M
+Silvan/M
+Silvano/M
+Silvanus/M
+silverer/M
+silverfish/MS
+Silverman/M
+silver/RDYMGS
+silversmith/M
+silversmiths
+Silverstein/M
+silverware/SM
+silvery/RTP
+Silvester/M
+Silvia/M
+Silvie/M
+Silvio/M
+Si/M
+SIMD
+Simenon/M
+Simeon/M
+simian/S
+similar/EY
+similarity/EMS
+simile/SM
+similitude/SME
+Simla/M
+simmer/GSD
+Simmonds/M
+Simmons/M
+Simmonsville/M
+Sim/MS
+Simms/M
+Simona/M
+Simone/M
+Simonette/M
+simonize/SDG
+Simon/M
+Simonne/M
+simony/MS
+simpatico
+simper/GDS
+simpleminded/YP
+simpleness/S
+simple/RSDGTP
+simpleton/SM
+simplex/S
+simplicity/MS
+simplified/U
+simplify/ZXRSDNG
+simplistic
+simplistically
+simply
+Simpson/M
+simulacrum/M
+Simula/M
+SIMULA/M
+simulate/XENGSD
+simulation/ME
+simulative
+simulator/SEM
+simulcast/GSD
+simultaneity/SM
+simultaneousness/M
+simultaneous/YP
+Sinai/M
+Sinatra/M
+since
+sincere/IY
+sincereness/M
+sincerer
+sincerest
+sincerity/MIS
+Sinclair/M
+Sinclare/M
+Sindbad/M
+Sindee/M
+Sindhi/M
+sinecure/MS
+sinecurist/M
+sine/SM
+sinew/SGMD
+sinewy
+sinfulness/SM
+sinful/YP
+Singaporean/S
+Singapore/M
+sing/BGJZYDR
+Singborg/M
+singeing
+singer/M
+Singer/M
+singe/S
+singing/Y
+singlehanded/Y
+singleness/SM
+single/PSDG
+Singleton/M
+singleton/SM
+singletree/SM
+singlet/SM
+singsong/GSMD
+singularity/SM
+singularization/M
+singular/SY
+Sinhalese/M
+sinisterness/M
+sinister/YP
+sinistral/Y
+sinkable/U
+sinker/M
+sink/GZSDRB
+sinkhole/SM
+Sinkiang/M
+sinking/M
+sinlessness/M
+sinless/YP
+sin/MAGS
+sinned
+sinner/MS
+sinning
+sinter/DM
+sinuosity/MS
+sinuousities
+sinuousness/M
+sinuous/PY
+sinusitis/SM
+sinus/MS
+sinusoidal/Y
+sinusoid/MS
+Siobhan/M
+Siouxie/M
+Sioux/M
+siphon/DMSG
+siphons/U
+sipped
+sipper/SM
+sipping
+sip/S
+sired/C
+sire/MS
+siren/M
+sires/C
+siring/C
+Sirius/M
+sirloin/MS
+Sir/MS
+sirocco/MS
+sirred
+sirring
+sirup's
+sir/XGMNDS
+sisal/MS
+Sisely/M
+Sisile/M
+sis/S
+Sissie/M
+sissified
+Sissy/M
+sissy/TRSM
+sister/GDYMS
+sisterhood/MS
+sisterliness/MS
+sisterly/P
+sister's/A
+Sistine
+Sisyphean
+Sisyphus/M
+sit/AG
+sitarist/SM
+sitar/SM
+sitcom/SM
+site/DSJM
+sits
+sitter/MS
+sitting/SM
+situate/GNSDX
+situational/Y
+situationist
+situation/M
+situ/S
+situs/M
+Siusan/M
+Siva/M
+Siward/M
+sixfold
+sixgun
+six/MRSH
+sixpence/MS
+sixpenny
+sixshooter
+sixteen/HRSM
+sixteenths
+sixths
+sixth/Y
+sixtieths
+sixty/SMH
+sizableness/M
+sizable/P
+sized/UA
+size/GJDRSBMZ
+sizer/M
+sizes/A
+sizing/M
+sizzler/M
+sizzle/RSDG
+SJ
+Sjaelland/M
+SK
+ska/S
+skateboard/SJGZMDR
+skater/M
+skate/SM
+skat/JMDRGZ
+skedaddle/GSD
+skeet/RMS
+skein/MDGS
+skeletal/Y
+skeleton/MS
+Skell/M
+Skelly/M
+skeptical/Y
+skepticism/MS
+skeptic/SM
+sketchbook/SM
+sketcher/M
+sketchily
+sketchiness/MS
+sketch/MRSDZG
+sketchpad
+sketchy/PRT
+skew/DRSPGZ
+skewer/GDM
+skewing/M
+skewness/M
+skidded
+skidding
+skid/S
+skiff/GMDS
+skiing/M
+skilfully
+skill/DMSG
+skilled/U
+skillet/MS
+skillfulnesses
+skillfulness/MU
+skillful/YUP
+skilling/M
+skimmed
+skimmer/MS
+skimming/SM
+ski/MNJSG
+skimp/GDS
+skimpily
+skimpiness/MS
+skimpy/PRT
+skim/SM
+skincare
+skindive/G
+skinflint/MS
+skinhead/SM
+skinless
+skinned
+Skinner/M
+skinner/SM
+skinniness/MS
+skinning
+skinny/TRSP
+skin/SM
+skintight
+Skip/M
+skipped
+Skipper/M
+skipper/SGDM
+Skippie/M
+skipping
+Skipp/RM
+Skippy/M
+skip/S
+Skipton/M
+skirmisher/M
+skirmish/RSDMZG
+skirter/M
+skirting/M
+skirt/RDMGS
+skit/GSMD
+skitter/SDG
+skittishness/SM
+skittish/YP
+skittle/SM
+skivvy/GSDM
+skoal/SDG
+Skopje/M
+skulduggery/MS
+skulker/M
+skulk/SRDGZ
+skullcap/MS
+skullduggery's
+skull/SDM
+skunk/GMDS
+skycap/MS
+skydiver/SM
+skydiving/MS
+Skye/M
+skyhook
+skyjacker/M
+skyjack/ZSGRDJ
+Skylab/M
+skylarker/M
+skylark/SRDMG
+Skylar/M
+Skyler/M
+skylight/MS
+skyline/MS
+Sky/M
+sky/MDRSGZ
+skyrocket/GDMS
+skyscraper/M
+skyscrape/RZ
+skyward/S
+skywave
+skyway/M
+skywriter/MS
+skywriting/MS
+slabbed
+slabbing
+slab/MS
+slacken/DG
+slacker/M
+slackness/MS
+slack/SPGTZXYRDN
+Slade/M
+slagged
+slagging
+slag/MS
+slain
+slake/DSG
+slaked/U
+slalom/SGMD
+slammed
+slammer/S
+slamming
+slam/S
+slander/MDRZSG
+slanderousness/M
+slanderous/PY
+slang/SMGD
+slangy/TR
+slanting/Y
+slant/SDG
+slantwise
+slapdash/S
+slaphappy/TR
+slap/MS
+slapped
+slapper
+slapping
+slapstick/MS
+slash/GZRSD
+slashing/Y
+slater/M
+Slater/M
+slate/SM
+slather/SMDG
+slating/M
+slat/MDRSGZ
+slatted
+slattern/MYS
+slatting
+slaughterer/M
+slaughterhouse/SM
+slaughter/SJMRDGZ
+slave/DSRGZM
+slaveholder/SM
+slaver/GDM
+slavery/SM
+Slavic/M
+slavishness/SM
+slavish/YP
+Slav/MS
+Slavonic/M
+slaw/MS
+slay/RGZS
+sleaze/S
+sleazily
+sleaziness/SM
+sleazy/RTP
+sledded
+sledder/S
+sledding
+sledgehammer/MDGS
+sledge/SDGM
+sled/SM
+sleekness/S
+sleek/PYRDGTS
+sleeper/M
+sleepily
+sleepiness/SM
+sleeping/M
+sleeplessness/SM
+sleepless/YP
+sleepover/S
+sleep/RMGZS
+sleepwalker/M
+sleepwalk/JGRDZS
+sleepwear/M
+sleepyhead/MS
+sleepy/PTR
+sleet/DMSG
+sleety/TR
+sleeveless
+sleeve/SDGM
+sleeving/M
+sleigh/GMD
+sleighs
+sleight/SM
+sleken/DG
+slenderize/DSG
+slenderness/MS
+slender/RYTP
+slept
+Slesinger/M
+sleuth/GMD
+sleuths
+slew/DGS
+slice/DSRGZM
+sliced/U
+slicer/M
+slicker/M
+slickness/MS
+slick/PSYRDGTZ
+slider/M
+slide/S
+slid/GZDR
+slight/DRYPSTG
+slighter/M
+slighting/Y
+slightness/S
+slime/SM
+sliminess/S
+slimline
+slimmed
+slimmer/S
+slimmest
+slimming/S
+slimness/S
+slim/SPGYD
+slimy/PTR
+sling/GMRS
+slingshot/MS
+slings/U
+slink/GS
+slinky/RT
+slipcase/MS
+slipcover/GMDS
+slipknot/SM
+slippage/SM
+slipped
+slipper/GSMD
+slipperiness/S
+slippery/PRT
+slipping
+slipshod
+slip/SM
+slipstream/MDGS
+slipway/SM
+slither/DSG
+slithery
+slit/SM
+slitted
+slitter/S
+slitting
+sliver/GSDM
+slivery
+Sloane/M
+Sloan/M
+slobber/SDG
+slobbery
+slob/MS
+Slocum/M
+sloe/MS
+sloganeer/MG
+slogan/MS
+slogged
+slogging
+slog/S
+sloop/SM
+slop/DRSGZ
+sloped/U
+slope/S
+slopped
+sloppily
+sloppiness/SM
+slopping
+sloppy/RTP
+slosh/GSDM
+slothfulness/MS
+slothful/PY
+sloth/GDM
+sloths
+slot/MS
+slotted
+slotting
+slouch/DRSZG
+sloucher/M
+slouchy/RT
+slough/GMD
+sloughs
+Slovakia/M
+Slovakian/S
+Slovak/S
+Slovene/S
+Slovenia/M
+Slovenian/S
+slovenliness/SM
+slovenly/TRP
+sloven/YMS
+slowcoaches
+slowdown/MS
+slowish
+slowness/MS
+slow/PGTYDRS
+slowpoke/MS
+SLR
+sludge/SDGM
+sludgy/TR
+slue/MGDS
+sluggard/MS
+slugged
+slugger/SM
+slugging
+sluggishness/SM
+sluggish/YP
+slug/MS
+sluice/SDGM
+slumberer/M
+slumber/MDRGS
+slumberous
+slumlord/MS
+slummed
+slummer
+slumming
+slum/MS
+slummy/TR
+slump/DSG
+slung/U
+slunk
+slur/MS
+slurp/GSD
+slurred
+slurried/M
+slurring
+slurrying/M
+slurry/MGDS
+slushiness/SM
+slush/SDMG
+slushy/RTP
+slut/MS
+sluttish
+slutty/TR
+Sly/M
+slyness/MS
+sly/RTY
+smacker/M
+smack/SMRDGZ
+smallholders
+smallholding/MS
+smallish
+Small/M
+smallness/S
+smallpox/SM
+small/SGTRDP
+smalltalk
+smalltime
+Smallwood/M
+smarmy/RT
+smarten/GD
+smartness/S
+smartypants
+smart/YRDNSGTXP
+smasher/M
+smash/GZRSD
+smashing/Y
+smashup/S
+smattering/SM
+smearer/M
+smear/GRDS
+smeary/TR
+smeller/M
+smelliness/MS
+smell/SBRDG
+smelly/TRP
+smelter/M
+smelt/SRDGZ
+Smetana/M
+smidgen/MS
+smilax/MS
+smile/GMDSR
+smiley/M
+smilies
+smiling/UY
+smirch/SDG
+smirk/GSMD
+Smirnoff/M
+smite/GSR
+smiter/M
+smith/DMG
+smithereens
+Smithfield/M
+Smith/M
+smiths
+Smithsonian/M
+Smithson/M
+Smithtown/M
+smithy/SM
+smitten
+Smitty/M
+Sm/M
+smocking/M
+smock/SGMDJ
+smoggy/TR
+smog/SM
+smoke/GZMDSRBJ
+smokehouse/MS
+smokeless
+smoker/M
+smokescreen/S
+smokestack/MS
+Smokey/M
+smokiness/S
+smoking/M
+smoky/RSPT
+smoldering/Y
+smolder/SGD
+Smolensk/M
+Smollett/M
+smooch/SDG
+smoothen/DG
+smoother/M
+smoothie/SM
+smoothness/MS
+smooths
+smooth/TZGPRDNY
+smrgsbord/SM
+smote
+smother/GSD
+SMSA/MS
+SMTP
+Smucker/M
+smudge/GSD
+smudginess/M
+smudgy/TRP
+smugged
+smugger
+smuggest
+smugging
+smuggle/JZGSRD
+smuggler/M
+smugness/MS
+smug/YSP
+smut/SM
+Smuts/M
+smutted
+smuttiness/SM
+smutting
+smutty/TRP
+Smyrna/M
+snack/SGMD
+snaffle/GDSM
+snafu/DMSG
+snagged
+snagging
+snag/MS
+snail/GSDM
+Snake
+snakebird/M
+snakebite/MS
+snake/DSGM
+snakelike
+snakeroot/M
+snaky/TR
+snapback/M
+snapdragon/MS
+snapped/U
+snapper/SM
+snappily
+snappiness/SM
+snapping/U
+snappishness/SM
+snappish/PY
+snappy/PTR
+snapshot/MS
+snapshotted
+snapshotting
+snap/US
+snare/DSRGM
+snarer/M
+snarf/JSGD
+snarler/M
+snarling/Y
+snarl/UGSD
+snarly/RT
+snatch/DRSZG
+snatcher/M
+snazzily
+snazzy/TR
+Snead/M
+sneaker/MD
+sneakily
+sneakiness/SM
+sneaking/Y
+sneak/RDGZS
+sneaky/PRT
+Sneed/M
+sneerer/M
+sneer/GMRDJS
+sneering/Y
+sneeze/SRDG
+Snell/M
+snicker/GMRD
+snick/MRZ
+snideness/M
+Snider/M
+snide/YTSRP
+sniffer/M
+sniff/GZSRD
+sniffle/GDRS
+sniffler/M
+sniffles/M
+snifter/MDSG
+snigger's
+sniper/M
+snipe/SM
+snipped
+snipper/SM
+snippet/SM
+snipping
+snippy/RT
+snip/SGDRZ
+snitch/GDS
+snit/SM
+sniveler/M
+snivel/JSZGDR
+Sn/M
+snobbery/SM
+snobbishness/S
+snobbish/YP
+snobby/RT
+snob/MS
+Snodgrass/M
+snood/SGDM
+snooker/GMD
+snook/SMRZ
+snooper/M
+snoop/SRDGZ
+Snoopy/M
+snoopy/RT
+snootily
+snootiness/MS
+snoot/SDMG
+snooty/TRP
+snooze/GSD
+snore/DSRGZ
+snorkel/ZGSRDM
+snorter/M
+snort/GSZRD
+snot/MS
+snotted
+snottily
+snottiness/SM
+snotting
+snotty/TRP
+snout/SGDM
+snowball/SDMG
+snowbank/SM
+Snowbelt/SM
+snowbird/SM
+snowblower/S
+snowboard/GZDRJS
+snowbound
+snowcapped
+snowdrift/MS
+snowdrop/MS
+snowfall/MS
+snowfield/MS
+snowflake/MS
+snow/GDMS
+snowily
+snowiness/MS
+Snow/M
+snowman/M
+snowmen
+snowmobile/GMDRS
+snowplough/M
+snowploughs
+snowplow/SMGD
+snowshed
+snowshoeing
+snowshoe/MRS
+snowshoer/M
+snowstorm/MS
+snowsuit/S
+snowy/RTP
+snubbed
+snubber
+snubbing
+snub/SP
+snuffbox/SM
+snuffer/M
+snuff/GZSYRD
+snuffle/GDSR
+snuffler/M
+snuffly/RT
+snugged
+snugger
+snuggest
+snugging
+snuggle/GDS
+snuggly
+snugness/MS
+snug/SYP
+Snyder/M
+so
+SO
+soaker/M
+soak/GDRSJ
+soapbox/DSMG
+soapiness/S
+soap/MDRGS
+soapstone/MS
+soapsud/S
+soapy/RPT
+soar/DRJSG
+soarer/M
+soaring/Y
+sobbed
+sobbing/Y
+soberer/M
+soberness/SM
+sober/PGTYRD
+sobriety/SIM
+sobriquet/MS
+sob/SZR
+Soc
+soccer/MS
+sociabilities
+sociability/IM
+sociable/S
+sociably/IU
+socialism/SM
+socialistic
+socialist/SM
+socialite/SM
+sociality/M
+socialization/SM
+socialized/U
+socializer/M
+socialize/RSDG
+socially/U
+social/SY
+societal/Y
+society/MS
+socio
+sociobiology/M
+sociocultural/Y
+sociodemographic
+socioeconomically
+socioeconomic/S
+sociolinguistics/M
+sociological/MY
+sociologist/SM
+sociology/SM
+sociometric
+sociometry/M
+sociopath/M
+sociopaths
+socket/SMDG
+sock/GDMS
+Socorro/M
+Socrates/M
+Socratic/S
+soc/S
+soda/SM
+sodded
+sodden/DYPSG
+soddenness/M
+sodding
+Soddy/M
+sodium/MS
+sod/MS
+sodomite/MS
+sodomize/GDS
+Sodom/M
+sodomy/SM
+soever
+sofa/SM
+Sofia/M
+Sofie/M
+softball/MS
+softbound
+softener/M
+soften/ZGRD
+softhearted
+softie's
+softness/MS
+soft/SPXTYNR
+software/MS
+softwood/SM
+softy/SM
+soggily
+sogginess/S
+soggy/RPT
+Soho/M
+soign
+soiled/U
+soil/SGMD
+soire/SM
+sojourn/RDZGSM
+solace/GMSRD
+solacer/M
+solaria
+solarium/M
+solar/S
+solder/RDMSZG
+soldier/MDYSG
+soldiery/MS
+sold/RU
+solecism/MS
+soled/FA
+solemness
+solemnify/GSD
+solemnity/MS
+solemnization/SM
+solemnize/GSD
+solemnness/SM
+solemn/PTRY
+solenoid/MS
+soler/F
+soles/IFA
+sole/YSP
+sol/GSMDR
+solicitation/S
+solicited/U
+solicitor/MS
+solicitousness/S
+solicitous/YP
+solicit/SDG
+solicitude/MS
+solidarity/MS
+solidi
+solidification/M
+solidify/NXSDG
+solidity/S
+solidness/SM
+solid/STYRP
+solidus/M
+soliloquies
+soliloquize/DSG
+soliloquy/M
+soling/NM
+solipsism/MS
+solipsist/S
+Solis/M
+solitaire/SM
+solitary/SP
+solitude/SM
+Sollie/M
+Solly/M
+Sol/MY
+solo/DMSG
+soloist/SM
+Solomon/SM
+Solon/M
+Soloviev/M
+solstice/SM
+solubility/IMS
+soluble/SI
+solute/ENAXS
+solute's
+solution/AME
+solvable/UI
+solvating
+solve/ABSRDZG
+solved/EU
+solvency/IMS
+solvent/IS
+solvently
+solvent's
+solver/MEA
+solves/E
+solving/E
+Solzhenitsyn/M
+Somalia/M
+Somalian/S
+Somali/MS
+soma/M
+somatic
+somberness/SM
+somber/PY
+sombre
+sombrero/SM
+somebody'll
+somebody/SM
+someday
+somehow
+someone'll
+someone/SM
+someplace/M
+somersault/DSGM
+Somerset/M
+somerset/S
+somersetted
+somersetting
+Somerville/M
+something/S
+sometime/S
+someway/S
+somewhat/S
+somewhere/S
+some/Z
+sommelier/SM
+Somme/M
+somnambulism/SM
+somnambulist/SM
+somnolence/MS
+somnolent/Y
+Somoza/M
+sonar/SM
+sonata/MS
+sonatina/SM
+Sondheim/M
+Sondra/M
+Sonenberg/M
+songbag
+songbird/SM
+songbook/S
+songfest/MS
+songfulness/M
+songful/YP
+Songhai/M
+Songhua/M
+song/MS
+songster/MS
+songstress/SM
+songwriter/SM
+songwriting
+Sonia/M
+sonic/S
+Sonja/M
+Son/M
+sonnet/MDSG
+Sonnie/M
+Sonni/M
+Sonnnie/M
+Sonny/M
+sonny/SM
+Sonoma/M
+Sonora/M
+sonority/S
+sonorousness/SM
+sonorous/PY
+son/SMY
+Sontag/M
+sonuvabitch
+Sonya/M
+Sony/M
+soonish
+soon/TR
+soothe
+soother/M
+sooth/GZTYSRDMJ
+soothingness/M
+soothing/YP
+sooths
+soothsayer/M
+soothsay/JGZR
+soot/MGDS
+sooty/RT
+SOP
+Sophey/M
+Sophia/SM
+Sophie/M
+Sophi/M
+sophism/SM
+sophister/M
+sophistical
+sophisticatedly
+sophisticated/U
+sophisticate/XNGDS
+sophistication/MU
+sophistic/S
+sophist/RMS
+sophistry/SM
+Sophoclean
+Sophocles/M
+sophomore/SM
+sophomoric
+Sophronia/M
+soporifically
+soporific/SM
+sopped
+sopping/S
+soppy/RT
+soprano/SM
+sop/SM
+Sopwith/M
+sorbet/SM
+Sorbonne/M
+sorcerer/MS
+sorceress/S
+sorcery/MS
+Sorcha/M
+sordidness/SM
+sordid/PY
+sorehead/SM
+soreness/S
+Sorensen/M
+Sorenson/M
+sore/PYTGDRS
+sorghum/MS
+sorority/MS
+sorrel/SM
+Sorrentine/M
+sorrily
+sorriness/SM
+sorrower/M
+sorrowfulness/SM
+sorrowful/YP
+sorrow/GRDMS
+sorry/PTSR
+sorta
+sortable
+sorted/U
+sorter/MS
+sort/FSAGD
+sortieing
+sortie/MSD
+sort's
+sos
+SOS
+Sosa/M
+Sosanna/M
+Soto/M
+sot/SM
+sottish
+soubriquet's
+souffl/MS
+sough/DG
+soughs
+sought/U
+soulfulness/MS
+soulful/YP
+soulless/Y
+soul/MDS
+sound/AUD
+soundboard/MS
+sounders
+sounder's
+sounder/U
+soundest
+sounding/AY
+soundings
+sounding's
+soundless/Y
+soundly/U
+soundness/UMS
+soundproof/GSD
+soundproofing/M
+sound's
+sounds/A
+soundtrack/MS
+soupon/SM
+soup/GMDS
+Souphanouvong/M
+soupy/RT
+source/ASDMG
+sourceless
+sourdough
+sourdoughs
+sourish
+sourness/MS
+sourpuss/MS
+sour/TYDRPSG
+Sousa/M
+sousaphone/SM
+sous/DSG
+souse
+sou/SMH
+Southampton/M
+southbound
+southeastern
+southeaster/YM
+Southeast/MS
+southeast/RZMS
+southeastward/S
+southerly/S
+souther/MY
+southerner/M
+Southerner/MS
+southernisms
+southernmost
+southern/PZSYR
+Southey/M
+Southfield/M
+southing/M
+southland/M
+South/M
+southpaw/MS
+south/RDMG
+souths
+Souths
+southward/S
+southwestern
+southwester/YM
+Southwest/MS
+southwest/RMSZ
+southwestward/S
+souvenir/SM
+sou'wester
+sovereignty/MS
+sovereign/YMS
+soviet/MS
+Soviet/S
+sow/ADGS
+sowbelly/M
+sowens/M
+sower/DS
+Soweto/M
+sown/A
+sox's
+soybean/MS
+Soyinka/M
+soy/MS
+Soyuz/M
+Spaatz/M
+spacecraft/MS
+space/DSRGZMJ
+spaceflight/S
+spaceman/M
+spacemen
+spaceport/SM
+spacer/M
+spaceship/MS
+spacesuit/MS
+spacewalk/GSMD
+Spacewar/M
+spacewoman
+spacewomen
+spacey
+spacial
+spacier
+spaciest
+spaciness
+spacing/M
+spaciousness/SM
+spacious/PY
+Spackle
+spade/DSRGM
+spadeful/SM
+spader/M
+spadework/SM
+spadices
+spadix/M
+Spafford/M
+spaghetti/SM
+Spahn/M
+Spain/M
+spake
+Spalding/M
+Spam/M
+spa/MS
+Span
+spandex/MS
+spandrels
+spangle/GMDS
+Spanglish/S
+Spaniard/SM
+spanielled
+spanielling
+spaniel/SM
+Spanish/M
+spanker/M
+spanking/M
+spank/SRDJG
+span/MS
+spanned/U
+spanner/SM
+spanning
+SPARC/M
+SPARCstation/M
+spar/DRMGTS
+spareness/MS
+spare/PSY
+spareribs
+sparer/M
+sparing/UY
+sparker/M
+sparkle/DRSGZ
+sparkler/M
+Sparkman/M
+Sparks
+spark/SGMRD
+sparky/RT
+sparling/SM
+sparred
+sparrer
+sparring/U
+sparrow/MS
+sparseness/S
+sparse/YP
+sparsity/S
+spars/TR
+Spartacus/M
+Sparta/M
+spartan
+Spartan/S
+spasm/GSDM
+spasmodic
+spasmodically
+spastic/S
+spate/SM
+spathe/MS
+spatiality/M
+spatial/Y
+spat/MS
+spatted
+spatter/DGS
+spatterdock/M
+spatting
+spatula/SM
+spavin/DMS
+spawner/M
+spawn/MRDSG
+spay/DGS
+SPCA
+speakable/U
+speakeasy/SM
+speaker/M
+Speaker's
+speakership/M
+speaking/U
+speak/RBGZJS
+spearer/M
+spearfish/SDMG
+spearhead/GSDM
+spearmint/MS
+spear/MRDGS
+Spears
+spec'd
+specialism/MS
+specialist/MS
+specialization/SM
+specialized/U
+specialize/GZDSR
+specializing/U
+special/SRYP
+specialty/MS
+specie/MS
+specif
+specifiability
+specifiable
+specifiably
+specifically
+specification/SM
+specificity/S
+specific/SP
+specified/U
+specifier/SM
+specifies
+specify/AD
+specifying
+specimen/SM
+spec'ing
+speciousness/SM
+specious/YP
+speck/GMDS
+speckle/GMDS
+spec/SM
+spectacle/MSD
+spectacular/SY
+spectator/SM
+specter/DMS
+specter's/A
+spectralness/M
+spectral/YP
+spectra/M
+spectrogram/MS
+spectrographically
+spectrograph/M
+spectrography/M
+spectrometer/MS
+spectrometric
+spectrometry/M
+spectrophotometer/SM
+spectrophotometric
+spectrophotometry/M
+spectroscope/SM
+spectroscopic
+spectroscopically
+spectroscopy/SM
+spectrum/M
+specularity
+specular/Y
+speculate/VNGSDX
+speculation/M
+speculative/Y
+speculator/SM
+sped
+speech/GMDS
+speechlessness/SM
+speechless/YP
+speedboat/GSRM
+speedboating/M
+speeder/M
+speedily
+speediness/SM
+speedometer/MS
+speed/RMJGZS
+speedster/SM
+speedup/MS
+speedway/SM
+speedwell/MS
+speedy/PTR
+speer/M
+speleological
+speleologist/S
+speleology/MS
+spellbinder/M
+spellbind/SRGZ
+spellbound
+spelldown/MS
+spelled/A
+speller/M
+spelling/M
+spell/RDSJGZ
+spells/A
+spelunker/MS
+spelunking/S
+Spencerian
+Spencer/M
+Spence/RM
+spender/M
+spend/SBJRGZ
+spendthrift/MS
+Spenglerian
+Spengler/M
+Spense/MR
+Spenserian
+Spenser/M
+spent/U
+spermatophyte/M
+spermatozoa
+spermatozoon/M
+spermicidal
+spermicide/MS
+sperm/SM
+Sperry/M
+spew/DRGZJS
+spewer/M
+SPF
+sphagnum/SM
+sphere/SDGM
+spherical/Y
+spheric/S
+spherics/M
+spheroidal/Y
+spheroid/SM
+spherule/MS
+sphincter/SM
+Sphinx/M
+sphinx/MS
+Spica/M
+spic/DGM
+spicebush/M
+spice/SM
+spicily
+spiciness/SM
+spicule/MS
+spicy/PTR
+spider/SM
+spiderweb/S
+spiderwort/M
+spidery/TR
+Spiegel/M
+Spielberg/M
+spiel/GDMS
+spier/M
+spiffy/TDRSG
+spigot/MS
+spike/GMDSR
+Spike/M
+spiker/M
+spikiness/SM
+spiky/PTR
+spillage/SM
+Spillane/M
+spillover/SM
+spill/RDSG
+spillway/SM
+spinach/MS
+spinal/YS
+spindle/JGMDRS
+spindly/RT
+spinelessness/M
+spineless/YP
+spine/MS
+spinet/SM
+spininess/M
+spinnability/M
+spinnaker/SM
+spinneret/MS
+spinner/SM
+spinning/SM
+Spinoza/M
+spin/S
+spinsterhood/SM
+spinsterish
+spinster/MS
+spiny/PRT
+spiracle/SM
+spiraea's
+spiral/YDSG
+spire/AIDSGF
+spirea/MS
+spire's
+spiritedness/M
+spirited/PY
+spirit/GMDS
+spiritless
+spirits/I
+spiritualism/SM
+spiritualistic
+spiritualist/SM
+spirituality/SM
+spiritual/SYP
+spirituous
+spirochete/SM
+Spiro/M
+spiry/TR
+spitball/SM
+spite/CSDAG
+spitefuller
+spitefullest
+spitefulness/MS
+spiteful/PY
+spite's/A
+spitfire/SM
+spit/SGD
+spitted
+spitting
+spittle/SM
+spittoon/SM
+Spitz/M
+splashdown/MS
+splasher/M
+splash/GZDRS
+splashily
+splashiness/MS
+splashy/RTP
+splat/SM
+splatted
+splatter/DSG
+splatting
+splayfeet
+splayfoot/MD
+splay/SDG
+spleen/SM
+splendidness/M
+splendid/YRPT
+splendorous
+splendor/SM
+splenetic/S
+splicer/M
+splice/RSDGZJ
+spline/MSD
+splinter/GMD
+splintery
+splint/SGZMDR
+splits/M
+split/SM
+splittable
+splitter/MS
+splitting/S
+splodge/SM
+splotch/MSDG
+splotchy/RT
+splurge/GMDS
+splutterer/M
+splutter/RDSG
+Sp/M
+Spock/M
+spoilables
+spoilage/SM
+spoil/CSZGDR
+spoiled/U
+spoiler/MC
+spoilsport/SM
+Spokane/M
+spoke/DSG
+spoken/U
+spokeshave/MS
+spokesman/M
+spokesmen
+spokespeople
+spokesperson/S
+spokeswoman/M
+spokeswomen
+spoliation/MCS
+spongecake
+sponge/GMZRSD
+sponger/M
+sponginess/S
+spongy/TRP
+sponsor/DGMS
+sponsorship/S
+spontaneity/SM
+spontaneousness/M
+spontaneous/PY
+spoof/SMDG
+spookiness/MS
+spook/SMDG
+spooky/PRT
+spool/SRDMGZ
+spoonbill/SM
+spoonerism/SM
+spoonful/MS
+spoon/GSMD
+spoor/GSMD
+sporadically
+sporadic/Y
+spore/DSGM
+sporran/MS
+sportiness/SM
+sporting/Y
+sportiveness/M
+sportive/PY
+sportscast/RSGZM
+sportsmanlike/U
+sportsman/MY
+sportsmanship/MS
+sportsmen
+sportswear/M
+sportswoman/M
+sportswomen
+sportswriter/S
+sport/VGSRDM
+sporty/PRT
+Sposato/M
+spotlessness/MS
+spotless/YP
+spotlight/GDMS
+spotlit
+spot/MSC
+spotted/U
+spotter/MS
+spottily
+spottiness/SM
+spotting/M
+spotty/RTP
+spousal/MS
+spouse/GMSD
+spouter/M
+spout/SGRD
+sprain/SGD
+sprang/S
+sprat/SM
+sprawl/GSD
+sprayed/UA
+sprayer/M
+spray/GZSRDM
+sprays/A
+spreadeagled
+spreader/M
+spread/RSJGZB
+spreadsheet/S
+spreeing
+spree/MDS
+sprigged
+sprigging
+sprightliness/MS
+sprightly/PRT
+sprig/MS
+springboard/MS
+springbok/MS
+springeing
+springer/M
+Springfield/M
+springily
+springiness/SM
+springing/M
+springlike
+spring/SGZR
+Springsteen/M
+springtime/MS
+springy/TRP
+sprinkle/DRSJZG
+sprinkler/DM
+sprinkling/M
+Sprint/M
+sprint/SGZMDR
+sprite/SM
+spritz/GZDSR
+sprocket/DMGS
+sprocketed/U
+Sproul/M
+sprout/GSD
+spruce/GMTYRSDP
+spruceness/SM
+sprue/M
+sprung/U
+spryness/S
+spry/TRY
+SPSS
+spudded
+spudding
+spud/MS
+Spuds/M
+spume/DSGM
+spumone's
+spumoni/S
+spumy/TR
+spun
+spunk/GSMD
+spunky/SRT
+spurge/MS
+spuriousness/SM
+spurious/PY
+spur/MS
+spurn/RDSG
+spurred
+spurring
+spurt/SGD
+sputa
+Sputnik
+sputnik/MS
+sputter/DRGS
+sputum/M
+spy/DRSGM
+spyglass/MS
+sq
+sqq
+sqrt
+squabbed
+squabber
+squabbest
+squabbing
+squabbler/M
+squabble/ZGDRS
+squab/SM
+squadded
+squadding
+squadron/MDGS
+squad/SM
+squalidness/SM
+squalid/PRYT
+squaller/M
+squall/GMRDS
+squally/RT
+squalor/SM
+squamous/Y
+squander/GSRD
+Squanto
+square/GMTYRSDP
+squareness/SM
+squarer/M
+Squaresville/M
+squarish
+squash/GSRD
+squashiness/M
+squashy/RTP
+squatness/MS
+squat/SPY
+squatted
+squatter/SMDG
+squattest
+squatting
+squawker/M
+squawk/GRDMZS
+squaw/SM
+squeaker/M
+squeakily
+squeakiness/S
+squeak/RDMGZS
+squeaky/RPT
+squealer/M
+squeal/MRDSGZ
+squeamishness/SM
+squeamish/YP
+squeegee/DSM
+squeegeeing
+squeeze/GZSRDB
+squeezer/M
+squelcher/M
+squelch/GDRS
+squelchy/RT
+squibbed
+Squibb/GM
+squibbing
+Squibbing/M
+squib/SM
+squidded
+squidding
+squid/SM
+squiggle/MGDS
+squiggly/RT
+squinter/M
+squint/GTSRD
+squinting/Y
+squirehood
+squire/SDGM
+squirm/SGD
+squirmy/TR
+squirrel/SGYDM
+squirter/M
+squirt/GSRD
+squish/GSD
+squishy/RTP
+Sr
+Srinagar/M
+SRO
+S's
+SS
+SSA
+SSE
+ssh
+s's/KI
+SSS
+SST
+SSW
+ST
+stabbed
+stabber/S
+stabbing/S
+stability/ISM
+stabilizability
+stabilization/CS
+stabilization's
+stabilize/CGSD
+stabilizer/MS
+stableman/M
+stablemate
+stablemen
+stableness/UM
+stable/RSDGMTP
+stabler/U
+stable's/F
+stables/F
+stablest/U
+stabling/M
+stably/U
+stab/YS
+staccato/S
+Stacee/M
+Stace/M
+Stacey/M
+Stacia/M
+Stacie/M
+Staci/M
+stackable
+stacker/M
+stack's
+stack/USDG
+Stacy/M
+stadias
+stadia's
+stadium/MS
+Stael/M
+Stafani/M
+staff/ADSG
+Staffard/M
+staffer/MS
+Stafford/M
+Staffordshire/M
+staffroom
+staff's
+Staford/M
+stag/DRMJSGZ
+stagecoach/MS
+stagecraft/MS
+stagehand/MS
+stager/M
+stage/SM
+stagestruck
+stagflation/SM
+stagged
+staggerer/M
+stagger/GSJDR
+staggering/Y
+staggers/M
+stagging
+staginess/M
+staging/M
+stagnancy/SM
+stagnant/Y
+stagnate/NGDSX
+stagnation/M
+stagy/PTR
+Stahl/M
+staidness/MS
+staid/YRTP
+stained/U
+stainer/M
+stainless/YS
+stain/SGRD
+staircase/SM
+stair/MS
+stairway/SM
+stairwell/MS
+stake/DSGM
+stakeholder/S
+stakeout/SM
+stalactite/SM
+stalag/M
+stalagmite/SM
+stalemate/SDMG
+staleness/MS
+stale/PGYTDSR
+Staley/M
+Stalingrad/M
+Stalinist
+Stalin/SM
+stalker/M
+stalk/MRDSGZJ
+stall/DMSJG
+stalled/I
+stallholders
+stallion/SM
+Stallone/M
+stalls/I
+stalwartness/M
+stalwart/PYS
+Sta/M
+stamen/MS
+Stamford/M
+stamina/SM
+staminate
+stammer/DRSZG
+stammerer/M
+stammering/Y
+stampede/MGDRS
+stampeder/M
+stamped/U
+stamper/M
+stamp/RDSGZJ
+stance/MIS
+stancher/M
+stanch/GDRST
+stanchion/SGMD
+standalone
+standardization/AMS
+standardized/U
+standardize/GZDSR
+standardizer/M
+standardizes/A
+standard/YMS
+standby
+standbys
+standee/MS
+Standford/M
+standing/M
+Standish/M
+standoffish
+standoff/SM
+standout/MS
+standpipe/MS
+standpoint/SM
+stand/SJGZR
+standstill/SM
+Stanfield/M
+Stanford/M
+Stanislas/M
+Stanislaus/M
+Stanislavsky/M
+Stanislaw/M
+stank/S
+Stanleigh/M
+Stanley/M
+Stanly/M
+stannic
+stannous
+Stanton/M
+Stanwood/M
+Stan/YMS
+stanza/MS
+staph/M
+staphs
+staphylococcal
+staphylococci
+staphylococcus/M
+stapled/U
+stapler/M
+Stapleton/M
+staple/ZRSDGM
+starboard/SDMG
+starchily
+starchiness/MS
+starch/MDSG
+starchy/TRP
+stardom/MS
+star/DRMGZS
+stardust/MS
+stare/S
+starfish/SM
+Stargate/M
+stargaze/ZGDRS
+staring/U
+Starkey/M
+Stark/M
+starkness/MS
+stark/SPGTYRD
+Starla/M
+Starlene/M
+starless
+starlet/MS
+starlight/MS
+starling/MS
+Starlin/M
+starlit
+Star/M
+starred
+starring
+Starr/M
+starry/TR
+starship
+starstruck
+start/ASGDR
+starter/MS
+startle/GDS
+startling/PY
+startup/SM
+starvation/MS
+starveling/M
+starver/M
+starve/RSDG
+stash/GSD
+stasis/M
+stat/DRSGV
+statecraft/MS
+stated/U
+statehood/MS
+statehouse/S
+Statehouse's
+state/IGASD
+statelessness/MS
+stateless/P
+stateliness/MS
+stately/PRT
+statement/MSA
+Staten/M
+stater/M
+stateroom/SM
+stateside
+state's/K
+states/K
+statesmanlike
+statesman/MY
+statesmanship/SM
+statesmen
+stateswoman
+stateswomen
+statewide
+statical/Y
+static/S
+statics/M
+stationarity
+stationary/S
+stationer/M
+stationery/MS
+stationmaster/M
+station/SZGMDR
+statistical/Y
+statistician/MS
+statistic/MS
+Statler/M
+stator/SM
+statuary/SM
+statue/MSD
+statuesque/YP
+statuette/MS
+stature/MS
+status/SM
+statute/SM
+statutorily
+statutory/P
+Stauffer/M
+staunchness/S
+staunch/PDRSYTG
+stave/DGM
+Stavro/MS
+stay/DRGZS
+stayer/M
+std
+STD
+stdio
+steadfastness/MS
+steadfast/PY
+steadily/U
+steadiness's
+steadiness/US
+steading/M
+stead/SGDM
+steady/DRSUTGP
+steakhouse/SM
+steak/SM
+stealer/M
+stealing/M
+steal/SRHG
+stealthily
+stealthiness/MS
+stealth/M
+stealths
+stealthy/PTR
+steamboat/MS
+steamer/MDG
+steamfitter/S
+steamfitting/S
+steamily
+steaminess/SM
+steamroller/DMG
+steamroll/GZRDS
+steam/SGZRDMJ
+steamship/SM
+steamy/RSTP
+Stearne/M
+Stearn/SM
+steed/SM
+Steele/M
+steeliness/SM
+steelmaker/M
+steel/SDMGZ
+steelworker/M
+steelwork/ZSMR
+steelyard/MS
+steely/TPRS
+Steen/M
+steepen/GD
+steeper/M
+steeplebush/M
+steeplechase/GMSD
+steeplejack/MS
+steeple/MS
+steepness/S
+steep/SYRNDPGTX
+steerage/MS
+steerer/M
+steer/SGBRDJ
+steersman/M
+steersmen
+steeves
+Stefa/M
+Stefania/M
+Stefanie/M
+Stefan/M
+Stefano/M
+Steffane/M
+Steffen/M
+Steffie/M
+Steffi/M
+stegosauri
+stegosaurus/S
+Steinbeck/SM
+Steinberg/M
+Steinem/M
+Steiner/M
+Steinmetz/M
+Stein/RM
+stein/SGZMRD
+Steinway/M
+Stella/M
+stellar
+stellated
+Ste/M
+stemless
+stemmed/U
+stemming
+stem/MS
+stemware/MS
+stench/GMDS
+stenciler/M
+stencil/GDRMSZ
+stencillings
+Stendhal/M
+Stendler/M
+Stengel/M
+stenographer/SM
+stenographic
+stenography/SM
+steno/SM
+stenotype/M
+stentorian
+stepbrother/MS
+stepchild/M
+stepchildren
+stepdaughter/MS
+stepfather/SM
+Stepha/M
+Stephana/M
+Stephanie/M
+Stephani/M
+Stephan/M
+Stephannie/M
+Stephanus/M
+Stephenie/M
+Stephen/MS
+Stephenson/M
+Stephie/M
+Stephi/M
+Stephine/M
+stepladder/SM
+step/MIS
+stepmother/SM
+stepparent/SM
+stepper/M
+steppe/RSDGMZ
+steppingstone/S
+stepsister/SM
+stepson/SM
+stepwise
+stereographic
+stereography/M
+stereo/GSDM
+stereophonic
+stereoscope/MS
+stereoscopic
+stereoscopically
+stereoscopy/M
+stereotype/GMZDRS
+stereotypic
+stereotypical/Y
+sterile
+sterility/SM
+sterilization/SM
+sterilized/U
+sterilize/RSDGZ
+sterilizes/A
+Sterling/M
+sterling/MPYS
+sterlingness/M
+sternal
+Sternberg/M
+Sterne/M
+Stern/M
+sternness/S
+Sterno
+stern/SYRDPGT
+sternum/SM
+steroidal
+steroid/MS
+stertorous
+Stesha/M
+stethoscope/SM
+stet/MS
+stetson/MS
+Stetson/SM
+stetted
+stetting
+Steuben/M
+Stevana/M
+stevedore/GMSD
+Steve/M
+Stevena/M
+Steven/MS
+Stevenson/M
+Stevie/M
+Stevy/M
+steward/DMSG
+stewardess/SM
+Steward/M
+stewardship/MS
+Stewart/M
+stew/GDMS
+st/GBJ
+sticker/M
+stickily
+stickiness/SM
+stickleback/MS
+stickle/GZDR
+stickler/M
+stick/MRDSGZ
+stickpin/SM
+stickup/SM
+sticky/GPTDRS
+Stieglitz/M
+stiffen/JZRDG
+stiff/GTXPSYRND
+stiffness/MS
+stifle/GJRSD
+stifler/M
+stifling/Y
+stigma/MS
+stigmata
+stigmatic/S
+stigmatization/C
+stigmatizations
+stigmatization's
+stigmatize/DSG
+stigmatized/U
+stile/GMDS
+stiletto/MDSG
+stillbirth/M
+stillbirths
+stillborn/S
+stiller/MI
+stillest
+Stillman/M
+Stillmann/M
+stillness/MS
+still/RDIGS
+Stillwell/M
+stilted/PY
+stilt/GDMS
+Stilton/MS
+Stimson/M
+stimulant/MS
+stimulated/U
+stimulate/SDVGNX
+stimulation/M
+stimulative/S
+stimulator/M
+stimulatory
+stimuli/M
+stimulus/MS
+Stine/M
+stinger/M
+sting/GZR
+stingily
+stinginess/MS
+stinging/Y
+stingray/MS
+stingy/RTP
+stinkbug/S
+stinker/M
+stink/GZRJS
+stinking/Y
+stinkpot/M
+Stinky/M
+stinky/RT
+stinter/M
+stinting/U
+stint/JGRDMS
+stipendiary
+stipend/MS
+stipple/JDRSG
+stippler/M
+stipulate/XNGSD
+stipulation/M
+Stirling/M
+stirred/U
+stirrer/SM
+stirring/YS
+stirrup/SM
+stir/S
+stitch/ASDG
+stitcher/M
+stitchery/S
+stitching/MS
+stitch's
+St/M
+stoat/SM
+stochastic
+stochastically
+stochasticity
+stockade/SDMG
+stockbreeder/SM
+stockbroker/MS
+stockbroking/S
+stocker/SM
+Stockhausen/M
+stockholder/SM
+Stockholm/M
+stockily
+stockiness/SM
+stockinet's
+stockinette/S
+stocking/MDS
+stockist/MS
+stockpile/GRSD
+stockpiler/M
+stockpot/MS
+stockroom/MS
+stock's
+stock/SGAD
+stocktaking/MS
+Stockton/M
+stockyard/SM
+stocky/PRT
+Stoddard/M
+stodge/M
+stodgily
+stodginess/S
+stodgy/TRP
+stogy/SM
+stoical/Y
+stoichiometric
+stoichiometry/M
+stoicism/SM
+Stoicism/SM
+stoic/MS
+Stoic/MS
+stoke/DSRGZ
+stoker/M
+stokes/M
+Stokes/M
+STOL
+stole/MDS
+stolen
+stolidity/S
+stolidness/S
+stolid/PTYR
+stolon/SM
+stomachache/MS
+stomacher/M
+stomach/RSDMZG
+stomachs
+stomp/DSG
+stonecutter/SM
+stone/DSRGM
+Stonehenge/M
+stoneless
+Stone/M
+stonemason/MS
+stoner/M
+stonewall/GDS
+stoneware/MS
+stonewashed
+stonework/SM
+stonewort/M
+stonily
+stoniness/MS
+stony/TPR
+stood
+stooge/SDGM
+stool/SDMG
+stoop/SDG
+stopcock/MS
+stopgap/SM
+stoplight/SM
+stopover/MS
+stoppable/U
+stoppage/MS
+Stoppard/M
+stopped/U
+stopper/GMDS
+stopping/M
+stopple/GDSM
+stop's
+stops/M
+stop/US
+stopwatch/SM
+storage/SM
+store/ADSRG
+storefront/SM
+storehouse/MS
+storekeeper/M
+storekeep/ZR
+storeroom/SM
+store's
+stork/SM
+stormbound
+stormer/M
+Stormie/M
+stormily
+Stormi/M
+storminess/S
+Storm/M
+storm/SRDMGZ
+stormtroopers
+Stormy/M
+stormy/PTR
+storyboard/MDSG
+storybook/MS
+story/GSDM
+storyline
+storyteller/SM
+storytelling/MS
+Stouffer/M
+stoup/SM
+stouten/DG
+stouthearted
+Stout/M
+stoutness/MS
+stout/STYRNP
+stove/DSRGM
+stovepipe/SM
+stover/M
+stowage/SM
+stowaway/MS
+Stowe/M
+stow/GDS
+Strabo/M
+straddler/M
+straddle/ZDRSG
+Stradivari/SM
+Stradivarius/M
+strafe/GRSD
+strafer/M
+straggle/GDRSZ
+straggly/RT
+straightaway/S
+straightedge/MS
+straightener/M
+straighten/ZGDR
+straightforwardness/MS
+straightforward/SYP
+straightjacket's
+straightness/MS
+straight/RNDYSTXGP
+straightway/S
+strain/ASGZDR
+strained/UF
+strainer/MA
+straining/F
+strains/F
+straiten/DG
+straitjacket/GDMS
+straitlaced
+straitness/M
+strait/XTPSMGYDNR
+stranded/P
+strand/SDRG
+strangeness/SM
+strange/PYZTR
+stranger/GMD
+stranglehold/MS
+strangle/JDRSZG
+strangles/M
+strangulate/NGSDX
+strangulation/M
+strapless/S
+strapped/U
+strapping/S
+strap's
+strap/US
+Strasbourg/M
+stratagem/SM
+strata/MS
+strategical/Y
+strategic/S
+strategics/M
+strategist/SM
+strategy/SM
+Stratford/M
+strati
+stratification/M
+stratified/U
+stratify/NSDGX
+stratigraphic
+stratigraphical
+stratigraphy/M
+stratosphere/SM
+stratospheric
+stratospherically
+stratum/M
+stratus/M
+Strauss
+Stravinsky/M
+strawberry/SM
+strawflower/SM
+straw/SMDG
+strayer/M
+stray/GSRDM
+streak/DRMSGZ
+streaker/M
+streaky/TR
+streamed/U
+streamer/M
+stream/GZSMDR
+streaming/M
+streamline/SRDGM
+streetcar/MS
+streetlight/SM
+street/SMZ
+streetwalker/MS
+streetwise
+Streisand/M
+strengthen/AGDS
+strengthener/MS
+strength/NMX
+strengths
+strenuousness/SM
+strenuous/PY
+strep/MS
+streptococcal
+streptococci
+streptococcus/M
+streptomycin/SM
+stress/DSMG
+stressed/U
+stressful/YP
+stretchability/M
+stretchable/U
+stretch/BDRSZG
+stretcher/DMG
+stretchy/TRP
+strew/GDHS
+strewn
+striae
+stria/M
+striate/DSXGN
+striated/U
+striation/M
+stricken
+Strickland/M
+strict/AF
+stricter
+strictest
+strictly
+strictness/S
+stricture/SM
+stridden
+stridency/S
+strident/Y
+strider/M
+stride/RSGM
+strife/SM
+strikebreaker/M
+strikebreaking/M
+strikebreak/ZGR
+strikeout/S
+striker/M
+strike/RSGZJ
+striking/Y
+Strindberg/M
+stringed
+stringency/S
+stringent/Y
+stringer/MS
+stringiness/SM
+stringing/M
+string's
+string/SAG
+stringy/RTP
+striper/M
+stripe/SM
+strip/GRDMS
+stripling/M
+stripped/U
+stripper/MS
+stripping
+stripteaser/M
+striptease/SRDGZM
+stripy/RT
+strive/JRSG
+striven
+striver/M
+strobe/SDGM
+stroboscope/SM
+stroboscopic
+strode
+stroke/ZRSDGM
+stroking/M
+stroller/M
+stroll/GZSDR
+Stromberg/M
+Stromboli/M
+Strom/M
+strongbow
+strongbox/MS
+Strongheart/M
+stronghold/SM
+strongish
+Strong/M
+strongman/M
+strongmen
+strongroom/MS
+strong/YRT
+strontium/SM
+strophe/MS
+strophic
+stropped
+stropping
+strop/SM
+strove
+struck
+structuralism/M
+structuralist/SM
+structural/Y
+structured/AU
+structureless
+structures/A
+structure/SRDMG
+structuring/A
+strudel/MS
+struggle/GDRS
+struggler/M
+strummed
+strumming
+strumpet/GSDM
+strum/S
+strung/UA
+strut/S
+strutted
+strutter/M
+strutting
+strychnine/MS
+Stuart/MS
+stubbed/M
+stubbing
+Stubblefield/MS
+stubble/SM
+stubbly/RT
+stubbornness/SM
+stubborn/SGTYRDP
+stubby/SRT
+stub/MS
+stuccoes
+stucco/GDM
+stuck/U
+studbook/SM
+studded
+studding/SM
+Studebaker/M
+studentship/MS
+student/SM
+studiedness/M
+studied/PY
+studier/SM
+studio/MS
+studiousness/SM
+studious/PY
+stud/MS
+study/AGDS
+stuffily
+stuffiness/SM
+stuffing/M
+stuff/JGSRD
+stuffy/TRP
+stultify/NXGSD
+Stu/M
+stumble/GZDSR
+stumbling/Y
+stumpage/M
+stumper/M
+stump/RDMSG
+stumpy/RT
+stung
+stunk
+stunned
+stunner/M
+stunning/Y
+stun/S
+stunted/P
+stunt/GSDM
+stupefaction/SM
+stupefy/DSG
+stupendousness/M
+stupendous/PY
+stupidity/SM
+stupidness/M
+stupid/PTYRS
+stupor/MS
+sturdily
+sturdiness/SM
+sturdy/SRPT
+sturgeon/SM
+Sturm/M
+stutter/DRSZG
+Stuttgart/M
+Stuyvesant/M
+sty/DSGM
+Stygian
+styled/A
+style/GZMDSR
+styles/A
+styli
+styling/A
+stylishness/S
+stylish/PY
+stylistically
+stylistic/S
+stylist/MS
+stylites
+stylization/MS
+stylize/DSG
+stylos
+stylus/SM
+stymieing
+stymie/SD
+stymy's
+styptic/S
+styrene/MS
+Styrofoam/S
+Styx/M
+suable
+Suarez/M
+suasion/EMS
+suaveness/S
+suave/PRYT
+suavity/SM
+subaltern/SM
+subarctic/S
+subareas
+Subaru/M
+subassembly/M
+subatomic/S
+subbasement/SM
+subbed
+subbing
+subbranch/S
+subcaste/M
+subcategorizing
+subcategory/SM
+subchain
+subclassifications
+subclass/MS
+subclauses
+subcommand/S
+subcommittee/SM
+subcompact/S
+subcomponent/MS
+subcomputation/MS
+subconcept
+subconsciousness/SM
+subconscious/PSY
+subconstituent
+subcontinental
+subcontinent/MS
+subcontractor/SM
+subcontract/SMDG
+subcultural
+subculture/GMDS
+subcutaneous/Y
+subdirectory/S
+subdistrict/M
+subdivide/SRDG
+subdivision/SM
+subdued/Y
+subdue/GRSD
+subduer/M
+subexpression/MS
+subfamily/SM
+subfield/MS
+subfile/SM
+subfreezing
+subgoal/SM
+subgraph
+subgraphs
+subgroup/SGM
+subharmonic/S
+subheading/M
+subhead/MGJS
+subhuman/S
+subindex/M
+subinterval/MS
+subj
+subject/GVDMS
+subjection/SM
+subjectiveness/M
+subjective/PSY
+subjectivist/S
+subjectivity/SM
+subjoin/DSG
+subjugate/NGXSD
+subjugation/M
+subjunctive/S
+sublayer
+sublease/DSMG
+sublet/S
+subletting
+sublimate/GNSDX
+sublimation/M
+sublime/GRSDTYP
+sublimeness/M
+sublimer/M
+subliminal/Y
+sublimity/SM
+sublist/SM
+subliterary
+sublunary
+submachine
+submarginal
+submarine/MZGSRD
+submariner/M
+submerge/DSG
+submergence/SM
+submerse/XNGDS
+submersible/S
+submersion/M
+submicroscopic
+submission/SAM
+submissiveness/MS
+submissive/PY
+submit/SA
+submittable
+submittal
+submitted/A
+submitter/S
+submitting/A
+submode/S
+submodule/MS
+sub/MS
+subnational
+subnet/SM
+subnetwork/SM
+subnormal/SY
+suboptimal
+suborbital
+suborder/MS
+subordinately/I
+subordinates/I
+subordinate/YVNGXPSD
+subordination/IMS
+subordinator
+subornation/SM
+suborn/GSD
+subpage
+subparagraph/M
+subpart/MS
+subplot/MS
+subpoena/GSDM
+subpopulation/MS
+subproblem/SM
+subprocess/SM
+subprofessional/S
+subprogram/SM
+subproject
+subproof/SM
+subquestion/MS
+subrange/SM
+subregional/Y
+subregion/MS
+subrogation/M
+subroutine/SM
+subsample/MS
+subschema/MS
+subscribe/ASDG
+subscriber/SM
+subscripted/U
+subscription/MS
+subscript/SGD
+subsection/SM
+subsegment/SM
+subsentence
+subsequence/MS
+subsequent/SYP
+subservience/SM
+subservient/SY
+subset/MS
+subsidence/MS
+subside/SDG
+subsidiarity
+subsidiary/MS
+subsidization/MS
+subsidized/U
+subsidizer/M
+subsidize/ZRSDG
+subsidy/MS
+subsistence/MS
+subsistent
+subsist/SGD
+subsocietal
+subsoil/DRMSG
+subsonic
+subspace/MS
+subspecies/M
+substance/MS
+substandard
+substantially/IU
+substantialness/M
+substantial/PYS
+substantiated/U
+substantiate/VGNSDX
+substantiation/MFS
+substantiveness/M
+substantive/PSYM
+substantivity
+substation/MS
+substerilization
+substitutability
+substituted/U
+substitute/NGVBXDRS
+substitutionary
+substitution/M
+substitutive/Y
+substrata
+substrate/MS
+substratum/M
+substring/S
+substructure/SM
+subsume/SDG
+subsurface/S
+subsystem/MS
+subtable/S
+subtask/SM
+subteen/SM
+subtenancy/MS
+subtenant/SM
+subtend/DS
+subterfuge/SM
+subterranean/SY
+subtest
+subtext/SM
+subtitle/DSMG
+subtleness/M
+subtle/RPT
+subtlety/MS
+subtly/U
+subtopic/SM
+subtotal/GSDM
+subtracter/M
+subtraction/MS
+subtract/SRDZVG
+subtrahend/SM
+subtree/SM
+subtropical
+subtropic/S
+subtype/MS
+subunit/SM
+suburbanite/MS
+suburbanization/MS
+suburbanized
+suburbanizing
+suburban/S
+suburbia/SM
+suburb/MS
+subvention/MS
+subversion/SM
+subversiveness/MS
+subversive/SPY
+subverter/M
+subvert/SGDR
+subway/MDGS
+subzero
+succeeder/M
+succeed/GDRS
+successfulness/M
+successful/UY
+succession/SM
+successiveness/M
+successive/YP
+success/MSV
+successor/MS
+successorship
+succinctness/SM
+succinct/RYPT
+succored/U
+succorer/M
+succor/SGZRDM
+succotash/SM
+succubus/M
+succulence/SM
+succulency/MS
+succulent/S
+succumb/SDG
+such
+suchlike
+sucker/DMG
+suck/GZSDRB
+suckle/SDJG
+suckling/M
+Sucre/M
+sucrose/MS
+suction/SMGD
+Sudanese/M
+Sudanic/M
+Sudan/M
+suddenness/SM
+sudden/YPS
+Sudetenland/M
+sud/S
+suds/DSRG
+sudsy/TR
+sued/DG
+suede/SM
+Suellen/M
+Sue/M
+suer/M
+suet/MS
+Suetonius/M
+suety
+sue/ZGDRS
+Suez/M
+sufferance/SM
+sufferer/M
+suffering/M
+suffer/SJRDGZ
+suffice/GRSD
+sufficiency/SIM
+sufficient/IY
+suffixation/S
+suffixed/U
+suffix/GMRSD
+suffocate/XSDVGN
+suffocating/Y
+Suffolk/M
+suffragan/S
+suffrage/MS
+suffragette/MS
+suffragist/SM
+suffuse/VNGSDX
+suffusion/M
+Sufi/M
+Sufism/M
+sugarcane/S
+sugarcoat/GDS
+sugarless
+sugarplum/MS
+sugar/SJGMD
+sugary/TR
+suggest/DRZGVS
+suggester/M
+suggestibility/SM
+suggestible
+suggestion/MS
+suggestiveness/MS
+suggestive/PY
+sugillate
+Suharto/M
+suicidal/Y
+suicide/GSDM
+Sui/M
+suitability/SU
+suitableness/S
+suitable/P
+suitably/U
+suitcase/MS
+suited/U
+suite/SM
+suiting/M
+suit/MDGZBJS
+suitor/SM
+Sukarno/M
+Sukey/M
+Suki/M
+sukiyaki/SM
+Sukkoth's
+Sukkot/S
+Sula/M
+Sulawesi/M
+Suleiman/M
+sulfaquinoxaline
+sulfa/S
+sulfate/MSDG
+sulfide/S
+sulfite/M
+sulfonamide/SM
+sulfur/DMSG
+sulfuric
+sulfurousness/M
+sulfurous/YP
+sulk/GDS
+sulkily
+sulkiness/S
+sulky/RSPT
+Sulla/M
+sullenness/MS
+sullen/TYRP
+sullied/U
+Sullivan/M
+sully/GSD
+Sully/M
+sulphate/SM
+sulphide/MS
+sulphuric
+sultana/SM
+sultanate/MS
+sultan/SM
+sultrily
+sultriness/SM
+sultry/PRT
+Sulzberger/M
+sumach's
+sumac/SM
+Sumatra/M
+Sumatran/S
+sumer/F
+Sumeria/M
+Sumerian/M
+summability/M
+summable
+summand/MS
+summarily
+summarization/MS
+summarized/U
+summarize/GSRDZ
+summarizer/M
+summary/MS
+summation/FMS
+summed
+Summerdale/M
+summerhouse/MS
+summer/SGDM
+Summer/SM
+summertime/MS
+summery/TR
+summing
+summit/GMDS
+summitry/MS
+summoner/M
+summon/JSRDGZ
+summons/MSDG
+sum/MRS
+Sumner/M
+sumo/SM
+sump/SM
+sumptuousness/SM
+sumptuous/PY
+Sumter/M
+Sun
+sunbaked
+sunbathe
+sunbather/M
+sunbathing/M
+sunbaths
+sunbath/ZRSDG
+sunbeam/MS
+Sunbelt/M
+sunblock/S
+sunbonnet/MS
+sunburn/GSMD
+sunburst/MS
+suncream
+sundae/MS
+Sundanese/M
+Sundas
+Sunday/MS
+sunder/SDG
+sundial/MS
+sundowner/M
+sundown/MRDSZG
+sundris
+sundry/S
+sunfish/SM
+sunflower/MS
+sunglass/MS
+Sung/M
+sung/U
+sunk/SN
+sunlamp/S
+sunless
+sunlight/MS
+sunlit
+sun/MS
+sunned
+Sunni/MS
+sunniness/SM
+sunning
+Sunnite/SM
+Sunny/M
+sunny/RSTP
+Sunnyvale/M
+sunrise/GMS
+sunroof/S
+sunscreen/S
+sunset/MS
+sunsetting
+sunshade/MS
+Sunshine/M
+sunshine/MS
+sunshiny
+sunspot/SM
+sunstroke/MS
+suntanned
+suntanning
+suntan/SM
+sunup/MS
+superabundance/MS
+superabundant
+superannuate/GNXSD
+superannuation/M
+superbness/M
+superb/YRPT
+supercargoes
+supercargo/M
+supercharger/M
+supercharge/SRDZG
+superciliousness/SM
+supercilious/PY
+supercity/S
+superclass/M
+supercomputer/MS
+supercomputing
+superconcept
+superconducting
+superconductivity/SM
+superconductor/SM
+supercooled
+supercooling
+supercritical
+superdense
+super/DG
+superego/SM
+supererogation/MS
+supererogatory
+superficiality/S
+superficial/SPY
+superfine
+superfix/M
+superfluity/MS
+superfluousness/S
+superfluous/YP
+superheat/D
+superheroes
+superhero/SM
+superhighway/MS
+superhumanness/M
+superhuman/YP
+superimpose/SDG
+superimposition/MS
+superintendence/S
+superintendency/SM
+superintendent/SM
+superintend/GSD
+superiority/MS
+Superior/M
+superior/SMY
+superlativeness/M
+superlative/PYS
+superlunary
+supermachine
+superman/M
+Superman/M
+supermarket/SM
+supermen
+supermodel
+supermom/S
+supernal
+supernatant
+supernaturalism/M
+supernaturalness/M
+supernatural/SPY
+supernormal/Y
+supernovae
+supernova/MS
+supernumerary/S
+superordinate
+superpose/BSDG
+superposition/MS
+superpower/MS
+superpredicate
+supersaturate/XNGDS
+supersaturation/M
+superscribe/GSD
+superscript/DGS
+superscription/SM
+superseder/M
+supersede/SRDG
+supersensitiveness/M
+supersensitive/P
+superset/MS
+supersonically
+supersonic/S
+supersonics/M
+superstar/SM
+superstition/SM
+superstitious/YP
+superstore/S
+superstructural
+superstructure/SM
+supertanker/SM
+supertitle/MSDG
+superuser/MS
+supervene/GSD
+supervention/S
+supervised/U
+supervise/SDGNX
+supervision/M
+supervisor/SM
+supervisory
+superwoman/M
+superwomen
+supineness/M
+supine/PSY
+supper/DMG
+supplanter/M
+supplant/SGRD
+supplemental/S
+supplementary/S
+supplementation/S
+supplementer/M
+supplement/SMDRG
+suppleness/SM
+supple/SPLY
+suppliant/S
+supplicant/MS
+supplicate/NGXSD
+supplication/M
+supplier/AM
+suppl/RDGT
+supply/MAZGSRD
+supportability/M
+supportable/UI
+supported/U
+supporter/M
+supporting/Y
+supportive/Y
+support/ZGVSBDR
+supposed/Y
+suppose/SRDBJG
+supposition/MS
+suppository/MS
+suppressant/S
+suppressed/U
+suppressible/I
+suppression/SM
+suppressive/P
+suppressor/S
+suppress/VGSD
+suppurate/NGXSD
+suppuration/M
+supp/YDRGZ
+supra
+supranational
+supranationalism/M
+suprasegmental
+supremacist/SM
+supremacy/SM
+supremal
+supremeness/M
+supreme/PSRTY
+supremo/M
+sup/RSZ
+supt
+Supt/M
+Surabaya/M
+Surat/M
+surcease/DSMG
+surcharge/MGSD
+surcingle/MGSD
+surd/M
+sured/I
+surefire
+surefooted
+surely
+sureness/MS
+sureness's/U
+sure/PU
+surer/I
+surest
+surety/SM
+surfaced/UA
+surface/GSRDPZM
+surfacer/AMS
+surfaces/A
+surfacing/A
+surfactant/SM
+surfboard/MDSG
+surfeit/SDRMG
+surfer/M
+surfing/M
+surf/SJDRGMZ
+surged/A
+surge/GYMDS
+surgeon/MS
+surgery/MS
+surges/A
+surgical/Y
+Suriname
+Surinamese
+Surinam's
+surliness/SM
+surly/TPR
+surmiser/M
+surmise/SRDG
+surmountable/IU
+surmount/DBSG
+surname/GSDM
+surpassed/U
+surpass/GDS
+surpassing/Y
+surplice/SM
+surplus/MS
+surplussed
+surplussing
+surprised/U
+surprise/MGDRSJ
+surpriser/M
+surprising/YU
+surrealism/MS
+surrealistic
+surrealistically
+surrealist/S
+surreality
+surreal/S
+surrender/DRSG
+surrenderer/M
+surreptitiousness/S
+surreptitious/PY
+surrey/SM
+surrogacy/S
+surrogate/SDMNG
+surrogation/M
+surrounding/M
+surround/JGSD
+surtax/SDGM
+surveillance/SM
+surveillant
+surveyed/A
+surveying/M
+survey/JDSG
+surveyor/MS
+surveys/A
+survivability/M
+survivable/U
+survivalist/S
+survival/MS
+survive/SRDBG
+survivor/MS
+survivorship/M
+Surya/M
+Sus
+Susana/M
+Susanetta/M
+Susan/M
+Susannah/M
+Susanna/M
+Susanne/M
+Susann/M
+susceptibilities
+susceptibility/IM
+susceptible/I
+Susette/M
+sushi/SM
+Susie/M
+Susi/M
+suspected/U
+suspecter/M
+suspect/GSDR
+suspecting/U
+suspend/DRZGS
+suspended/UA
+suspender/M
+suspenseful
+suspense/MXNVS
+suspension/AM
+suspensive/Y
+suspensor/M
+suspicion/GSMD
+suspiciousness/M
+suspicious/YP
+Susquehanna/M
+Sussex/M
+sustainability
+sustainable/U
+sustain/DRGLBS
+sustainer/M
+sustainment/M
+sustenance/MS
+Susy/M
+Sutherland/M
+Sutherlan/M
+sutler/MS
+Sutton/M
+suture/GMSD
+SUV
+Suva/M
+Suwanee/M
+Suzanna/M
+Suzanne/M
+Suzann/M
+suzerain/SM
+suzerainty/MS
+Suzette/M
+Suzhou/M
+Suzie/M
+Suzi/M
+Suzuki/M
+Suzy/M
+Svalbard/M
+svelte/RPTY
+Svend/M
+Svengali
+Sven/M
+Sverdlovsk/M
+Svetlana/M
+SW
+swabbed
+swabbing
+swabby/S
+Swabian/SM
+swab/MS
+swaddle/SDG
+swagged
+swagger/GSDR
+swagging
+swag/GMS
+Swahili/MS
+swain/SM
+SWAK
+swallower/M
+swallow/GDRS
+swallowtail/SM
+swam
+swami/SM
+swamper/M
+swampland/MS
+swamp/SRDMG
+swampy/RPT
+Swanee/M
+swankily
+swankiness/MS
+swank/RDSGT
+swanky/PTRS
+swanlike
+swan/MS
+swanned
+swanning
+Swansea/M
+Swanson/M
+swappable/U
+swapped
+swapper/SM
+swapping
+swap/S
+sward/MSGD
+swarmer/M
+swarm/GSRDM
+swarthiness/M
+Swarthmore/M
+swarthy/RTP
+swart/P
+Swartz/M
+swashbuckler/SM
+swashbuckling/S
+swash/GSRD
+swastika/SM
+SWAT
+swatch/MS
+swathe
+swather/M
+swaths
+swath/SRDMGJ
+swat/S
+swatted
+swatter/MDSG
+swatting
+swayback/SD
+sway/DRGS
+swayer/M
+Swaziland/M
+Swazi/SM
+swearer/M
+swear/SGZR
+swearword/SM
+sweatband/MS
+sweater/M
+sweatily
+sweatiness/M
+sweatpants
+sweat/SGZRM
+sweatshirt/S
+sweatshop/MS
+sweaty/TRP
+Swedenborg/M
+Sweden/M
+swede/SM
+Swede/SM
+Swedish
+Swed/MN
+Sweeney/SM
+sweeper/M
+sweepingness/M
+sweeping/PY
+sweep/SBRJGZ
+sweeps/M
+sweepstakes
+sweepstake's
+sweetbread/SM
+sweetbrier/SM
+sweetcorn
+sweetened/U
+sweetener/M
+sweetening/M
+sweeten/ZDRGJ
+sweetheart/MS
+sweetie/MS
+sweeting/M
+sweetish/Y
+Sweet/M
+sweetmeat/MS
+sweetness/MS
+sweetshop
+sweet/TXSYRNPG
+swellhead/DS
+swelling/M
+swell/SJRDGT
+swelter/DJGS
+sweltering/Y
+Swen/M
+Swenson/M
+swept
+sweptback
+swerve/GSD
+swerving/U
+swifter/M
+swift/GTYRDPS
+Swift/M
+swiftness/MS
+swigged
+swigging
+swig/SM
+swill/SDG
+swimmer/MS
+swimming/MYS
+swim/S
+swimsuit/MS
+Swinburne/M
+swindle/GZRSD
+swindler/M
+swineherd/MS
+swine/SM
+swingeing
+swinger/M
+swinging/Y
+swing/SGRZJB
+swingy/R
+swinishness/M
+swinish/PY
+Swink/M
+swipe/DSG
+swirling/Y
+swirl/SGRD
+swirly/TR
+swish/GSRD
+swishy/R
+swiss
+Swiss/S
+switchback/GDMS
+switchblade/SM
+switchboard/MS
+switcher/M
+switch/GBZMRSDJ
+switchgear
+switchman/M
+switchmen/M
+switchover/M
+Switzerland/M
+Switzer/M
+Switz/MR
+swivel/GMDS
+swizzle/RDGM
+swob's
+swollen
+swoon/GSRD
+swooning/Y
+swoop/RDSG
+swoosh/GSD
+swop's
+sword/DMSG
+swordfish/SM
+swordplayer/M
+swordplay/RMS
+swordsman/M
+swordsmanship/SM
+swordsmen
+swordtail/M
+swore
+sworn
+swot/S
+swum
+swung
+s/XJBG
+sybarite/MS
+sybaritic
+Sybila/M
+Sybilla/M
+Sybille/M
+Sybil/M
+Sybyl/M
+sycamore/SM
+sycophancy/S
+sycophantic
+sycophantically
+sycophant/SYM
+Sydelle/M
+Sydel/M
+Syd/M
+Sydney/M
+Sykes/M
+Sylas/M
+syllabicate/GNDSX
+syllabication/M
+syllabicity
+syllabic/S
+syllabification/M
+syllabify/GSDXN
+syllabi's
+syllable/SDMG
+syllabub/M
+syllabus/MS
+syllabusss
+syllogism/MS
+syllogistic
+Sylow/M
+sylphic
+sylphlike
+sylph/M
+sylphs
+Sylvania/M
+Sylvan/M
+sylvan/S
+Sylvester/M
+Sylvia/M
+Sylvie/M
+Syman/M
+symbiont/M
+symbioses
+symbiosis/M
+symbiotic
+symbol/GMDS
+symbolical/Y
+symbolics/M
+symbolic/SM
+symbolism/MS
+symbolist/MS
+symbolization/MAS
+symbolized/U
+symbolize/GZRSD
+symbolizes/A
+Symington/M
+symmetric
+symmetrically/U
+symmetricalness/M
+symmetrical/PY
+symmetrization/M
+symmetrizing
+symmetry/MS
+Symon/M
+sympathetically/U
+sympathetic/S
+sympathized/U
+sympathizer/M
+sympathize/SRDJGZ
+sympathizing/MYUS
+sympathy/MS
+symphonic
+symphonists
+symphony/MS
+symposium/MS
+symptomatic
+symptomatically
+symptomatology/M
+symptom/MS
+syn
+synagogal
+synagogue/SM
+synapse/SDGM
+synaptic
+synchronism/M
+synchronization's
+synchronization/SA
+synchronize/AGCDS
+synchronized/U
+synchronizer/MS
+synchronousness/M
+synchronous/YP
+synchrony
+synchrotron/M
+syncopate/VNGXSD
+syncopation/M
+syncope/MS
+sync/SGD
+syndicalist
+syndicate/XSDGNM
+syndic/SM
+syndrome/SM
+synergism/SM
+synergistic
+synergy/MS
+synfuel/S
+Synge/M
+synod/SM
+synonymic
+synonymous/Y
+synonym/SM
+synonymy/MS
+synopses
+synopsis/M
+synopsized
+synopsizes
+synopsizing
+synoptic/S
+syntactical/Y
+syntactics/M
+syntactic/SY
+syntax/MS
+syntheses
+synthesis/M
+synthesized/U
+synthesize/GZSRD
+synthesizer/M
+synthesizes/A
+synthetically
+synthetic/S
+syphilis/MS
+syphilitic/S
+syphilized
+syphilizing
+Syracuse/M
+Syriac/M
+Syria/M
+Syrian/SM
+syringe/GMSD
+syrup/DMSG
+syrupy
+sys
+systematical/Y
+systematics/M
+systematic/SP
+systematization/SM
+systematized/U
+systematizer/M
+systematize/ZDRSG
+systematizing/U
+systemically
+systemic/S
+systemization/SM
+system/MS
+systole/MS
+systolic
+Szilard/M
+Szymborska/M
+TA
+Tabasco/MS
+Tabatha/M
+Tabbatha/M
+tabbed
+Tabbie/M
+Tabbi/M
+tabbing
+Tabbitha/M
+Tabb/M
+tabbouleh
+tabboulehs
+tabby/GSD
+Tabby/M
+Taber/M
+Tabernacle/S
+tabernacle/SDGM
+Tabina/M
+Tabitha/M
+tabla/MS
+tableau/M
+tableaux
+tablecloth/M
+tablecloths
+table/GMSD
+tableland/SM
+tablespoonful/MS
+tablespoon/SM
+tablet/MDGS
+tabletop/MS
+tableware/SM
+tabling/M
+tabloid/MS
+Tab/MR
+taboo/GSMD
+Tabor/M
+tabor/MDGS
+Tabriz/SM
+tab/SM
+tabula
+tabular/Y
+tabulate/XNGDS
+tabulation/M
+tabulator/MS
+tachometer/SM
+tachometry
+tachycardia/MS
+tachyon/SM
+tacitness/MS
+taciturnity/MS
+taciturn/Y
+Tacitus/M
+tacit/YP
+tacker/M
+tack/GZRDMS
+tackiness/MS
+tackler/M
+tackle/RSDMZG
+tackling/M
+tacky/RSTP
+Tacoma/M
+taco/MS
+tact/FSM
+tactfulness/S
+tactful/YP
+tactical/Y
+tactician/MS
+tactic/SM
+tactile/Y
+tactility/S
+tactlessness/SM
+tactless/PY
+tactual/Y
+Taddeo/M
+Taddeusz/M
+Tadd/M
+Tadeas/M
+Tadeo/M
+Tades
+Tadio/M
+Tad/M
+tadpole/MS
+tad/SM
+Tadzhikistan's
+Tadzhikstan/M
+Taegu/M
+Taejon/M
+taffeta/MS
+taffrail/SM
+Taffy/M
+taffy/SM
+Taft/M
+Tagalog/SM
+tagged/U
+tagger/S
+tagging
+Tagore/M
+tag/SM
+Tagus/M
+Tahitian/S
+Tahiti/M
+Tahoe/M
+Taichung/M
+taiga/MS
+tailback/MS
+tail/CMRDGAS
+tailcoat/S
+tailer/AM
+tailgate/MGRSD
+tailgater/M
+tailing/MS
+taillessness/M
+tailless/P
+taillight/MS
+tailor/DMJSGB
+Tailor/M
+tailpipe/SM
+tailspin/MS
+tailwind/SM
+Tainan/M
+Taine/M
+taint/DGS
+tainted/U
+Taipei/M
+Taite/M
+Tait/M
+Taiwanese
+Taiwan/M
+Taiyuan/M
+Tajikistan
+takeaway/S
+taken/A
+takeoff/SM
+takeout/S
+takeover/SM
+taker/M
+take/RSHZGJ
+takes/IA
+taking/IA
+Taklamakan/M
+Talbert/M
+Talbot/M
+talcked
+talcking
+talc/SM
+talcum/S
+talebearer/SM
+talented/M
+talentless
+talent/SMD
+taler/M
+tale/RSMN
+tali
+Talia/M
+Taliesin/M
+talion/M
+talismanic
+talisman/SM
+talkativeness/MS
+talkative/YP
+talker/M
+talk/GZSRD
+talkie/M
+talky/RST
+Talladega/M
+Tallahassee/M
+Tallahatchie/M
+Tallahoosa/M
+tallboy/MS
+Tallchief/M
+Talley/M
+Talleyrand/M
+Tallia/M
+Tallie/M
+Tallinn/M
+tallish
+tallness/MS
+Tallou/M
+tallow/DMSG
+tallowy
+tall/TPR
+Tallulah/M
+tally/GRSDZ
+tallyho/DMSG
+Tally/M
+Talmudic
+Talmudist/MS
+Talmud/MS
+talon/SMD
+talus/MS
+Talyah/M
+Talya/M
+Ta/M
+tamable/M
+tamale/SM
+tamarack/SM
+Tamarah/M
+Tamara/M
+tamarind/MS
+Tamar/M
+Tamarra/M
+Tamas
+tambourine/MS
+tamed/U
+Tameka/M
+tameness/S
+Tamera/M
+Tamerlane/M
+tame/SYP
+Tamika/M
+Tamiko/M
+Tamil/MS
+Tami/M
+Tam/M
+Tamma/M
+Tammany/M
+Tammara/M
+tam/MDRSTZGB
+Tammie/M
+Tammi/M
+Tammy/M
+Tampa/M
+Tampax/M
+tampered/U
+tamperer/M
+tamper/ZGRD
+tampon/DMSG
+tamp/SGZRD
+Tamqrah/M
+Tamra/M
+tanager/MS
+Tanaka/M
+Tana/M
+Tananarive/M
+tanbark/SM
+Tancred/M
+tandem/SM
+Tandie/M
+Tandi/M
+tandoori/S
+Tandy/M
+Taney/M
+T'ang
+Tanganyika/M
+tangelo/SM
+tangency/M
+tangential/Y
+tangent/SM
+tangerine/MS
+tang/GSYDM
+tangibility/MIS
+tangible/IPS
+tangibleness's/I
+tangibleness/SM
+tangibly/I
+Tangier/M
+tangle's
+tangle/UDSG
+tango/MDSG
+Tangshan/M
+tangy/RST
+Tanhya/M
+Tania/M
+Tani/M
+Tanisha/M
+Tanitansy/M
+tankard/MS
+tanker/M
+tankful/MS
+tank/GZSRDM
+Tan/M
+tan/MS
+tanned/U
+Tannenbaum/M
+Tanner/M
+tanner/SM
+tannery/MS
+tannest
+Tanney/M
+Tannhuser/M
+Tannie/M
+tanning/SM
+tannin/SM
+Tann/RM
+Tanny/M
+Tansy/M
+tansy/SM
+tantalization/SM
+tantalized/U
+tantalize/GZSRD
+tantalizingly/S
+tantalizingness/S
+tantalizing/YP
+tantalum/MS
+Tantalus/M
+tantamount
+tantra/S
+tantrum/SM
+Tanya/M
+Tanzania/M
+Tanzanian/S
+taoism
+Taoism/MS
+Taoist/MS
+taoist/S
+Tao/M
+tao/S
+Tapdance/M
+taped/U
+tapeline/S
+taperer/M
+taper/GRD
+tape/SM
+tapestry/GMSD
+tapeworm/MS
+tapioca/MS
+tapir/MS
+tap/MSDRJZG
+tapped/U
+tapper/MS
+tappet/MS
+tapping/M
+taproom/MS
+taproot/SM
+taps/M
+Tarah/M
+Tara/M
+tarantella/MS
+tarantula/MS
+Tarawa/M
+Tarazed/M
+Tarbell/M
+tardily
+tardiness/S
+tardy/TPRS
+tare/MS
+target/GSMD
+tar/GSMD
+tariff/DMSG
+Tarim/M
+Tarkington/M
+tarmacked
+tarmacking
+tarmac/S
+tarnished/U
+tarnish/GDS
+tarn/MS
+taro/MS
+tarot/MS
+tarpapered
+tarpaulin/MS
+tarp/MS
+tarpon/MS
+tarragon/SM
+Tarrah/M
+Tarra/M
+Tarrance/M
+tarred/M
+tarring/M
+tarry/TGRSD
+Tarrytown/M
+tarsal/S
+tarsi
+tarsus/M
+tartan/MS
+tartaric
+Tartar's
+tartar/SM
+Tartary/M
+tartness/MS
+tart/PMYRDGTS
+Tartuffe/M
+Taryn/M
+Tarzan/M
+Tasha/M
+Tashkent/M
+Tasia/M
+task/GSDM
+taskmaster/SM
+taskmistress/MS
+Tasmania/M
+Tasmanian/S
+tassellings
+tassel/MDGS
+Tass/M
+tasted/EU
+tastefulness/SME
+tasteful/PEY
+taste/GZMJSRD
+tastelessness/SM
+tasteless/YP
+taster/M
+taste's/E
+tastes/E
+tastily
+tastiness/MS
+tasting/E
+tasty/RTP
+tatami/MS
+Tatar/SM
+Tate/M
+tater/M
+Tatiana/M
+Tatiania/M
+tat/SRZ
+tatted
+tatterdemalion/SM
+tattered/M
+tatter/GDS
+tatting/SM
+tattler/M
+tattle/RSDZG
+tattletale/SM
+tattooer/M
+tattooist/MS
+tattoo/ZRDMGS
+tatty/R
+Tatum/M
+taught/AU
+taunter/M
+taunting/Y
+taunt/ZGRDS
+taupe/SM
+Taurus/SM
+tau/SM
+tauten/GD
+tautness/S
+tautological/Y
+tautologous
+tautology/SM
+taut/PGTXYRDNS
+taverner/M
+tavern/RMS
+tawdrily
+tawdriness/SM
+tawdry/SRTP
+Tawney/M
+Tawnya/M
+tawny/RSMPT
+Tawsha/M
+taxable/S
+taxably
+taxation/MS
+taxed/U
+taxicab/MS
+taxidermist/SM
+taxidermy/MS
+taxi/MDGS
+taximeter/SM
+taxing/Y
+taxiway/MS
+taxonomic
+taxonomically
+taxonomist/SM
+taxonomy/SM
+taxpayer/MS
+taxpaying/M
+tax/ZGJMDRSB
+Taylor/SM
+Tb
+TB
+TBA
+Tbilisi/M
+tbs
+tbsp
+Tchaikovsky/M
+Tc/M
+TCP
+TD
+TDD
+Te
+teabag/S
+teacake/MS
+teacart/M
+teachable/P
+teach/AGS
+teacher/MS
+teaching/SM
+teacloth
+teacupful/MS
+teacup/MS
+Teador/M
+teahouse/SM
+teakettle/SM
+teak/SM
+teakwood/M
+tealeaves
+teal/MS
+tea/MDGS
+teammate/MS
+team/MRDGS
+teamster/MS
+teamwork/SM
+teapot/MS
+tearaway
+teardrop/MS
+tearer/M
+tearfulness/M
+tearful/YP
+teargas/S
+teargassed
+teargassing
+tearjerker/S
+tearoom/MS
+tear/RDMSG
+teary/RT
+Teasdale/M
+tease/KS
+teasel/DGSM
+teaser/M
+teashop/SM
+teasing/Y
+teaspoonful/MS
+teaspoon/MS
+teas/SRDGZ
+teatime/MS
+teat/MDS
+tech/D
+technetium/SM
+technicality/MS
+technicalness/M
+technical/YSP
+technician/MS
+Technicolor/MS
+Technion/M
+technique/SM
+technocracy/MS
+technocratic
+technocrat/S
+technological/Y
+technologist/MS
+technology/MS
+technophobia
+technophobic
+techs
+tectonically
+tectonic/S
+tectonics/M
+Tecumseh/M
+Tedda/M
+Teddie/M
+Teddi/M
+Tedd/M
+Teddy/M
+teddy/SM
+Tedie/M
+Tedi/M
+tediousness/SM
+tedious/YP
+tedium/MS
+Ted/M
+Tedman/M
+Tedmund/M
+Tedra/M
+tee/DRSMH
+teeing
+teem/GSD
+teemingness/M
+teeming/PY
+teenager/M
+teenage/RZ
+Teena/M
+teen/SR
+teenybopper/SM
+teeny/RT
+teepee's
+teeshirt/S
+teeter/GDS
+teethe
+teether/M
+teething/M
+teethmarks
+teeth/RSDJMG
+teetotaler/M
+teetotalism/MS
+teetotal/SRDGZ
+TEFL
+Teflon/MS
+Tegucigalpa/M
+Teheran's
+Tehran
+TEirtza/M
+tektite/SM
+Tektronix/M
+telecast/SRGZ
+telecommunicate/NX
+telecommunication/M
+telecommute/SRDZGJ
+telecoms
+teleconference/GMJSD
+Teledyne/M
+Telefunken/M
+telegenic
+telegrammed
+telegramming
+telegram/MS
+telegraphic
+telegraphically
+telegraphist/MS
+telegraph/MRDGZ
+telegraphs
+telegraphy/MS
+telekineses
+telekinesis/M
+telekinetic
+Telemachus/M
+Telemann/M
+telemarketer/S
+telemarketing/S
+telemeter/DMSG
+telemetric
+telemetry/MS
+teleological/Y
+teleology/M
+telepathic
+telepathically
+telepathy/SM
+telephone/SRDGMZ
+telephonic
+telephonist/SM
+telephony/MS
+telephotography/MS
+telephoto/S
+teleprinter/MS
+teleprocessing/S
+teleprompter
+TelePrompter/M
+TelePrompTer/S
+telescope/GSDM
+telescopic
+telescopically
+teletext/S
+telethon/MS
+teletype/SM
+Teletype/SM
+teletypewriter/SM
+televangelism/S
+televangelist/S
+televise/SDXNG
+television/M
+televisor/MS
+televisual
+telex/GSDM
+Telex/M
+tell/AGS
+Teller/M
+teller/SDMG
+telling/YS
+Tell/MR
+telltale/MS
+tellurium/SM
+telly/SM
+Telnet/M
+TELNET/M
+telnet/S
+telomeric
+tel/SY
+Telugu/M
+temblor/SM
+temerity/MS
+Tempe/M
+temperamental/Y
+temperament/SM
+temperance/IMS
+tempera/SLM
+temperately/I
+temperateness's/I
+temperateness/SM
+temperate/SDGPY
+temperature/MS
+tempered/UE
+temper/GRDM
+tempering/E
+temper's/E
+tempers/E
+tempest/DMSG
+tempestuousness/SM
+tempestuous/PY
+template/FS
+template's
+Temple/M
+Templeman/M
+temple/SDM
+Templeton/M
+Temp/M
+tempoes
+tempo/MS
+temporal/YS
+temporarily
+temporarinesses
+temporariness/FM
+temporary/SFP
+temporize/GJZRSD
+temporizer/M
+temporizings/U
+temporizing/YM
+temp/SGZTMRD
+temptation/MS
+tempted
+tempter/S
+tempt/FS
+tempting/YS
+temptress/MS
+tempura/SM
+tenabilities
+tenability/UM
+tenableness/M
+tenable/P
+tenably
+tenaciousness/S
+tenacious/YP
+tenacity/S
+tenancy/MS
+tenanted/U
+tenant/MDSG
+tenantry/MS
+tench/M
+tended/UE
+tendency/MS
+tendentiousness/SM
+tendentious/PY
+tendered
+tenderer
+tenderest
+tenderfoot/MS
+tender/FS
+tenderheartedness/MS
+tenderhearted/YP
+tendering
+tenderizer/M
+tenderize/SRDGZ
+tenderloin/SM
+tenderly
+tenderness/SM
+tending/E
+tendinitis/S
+tend/ISFRDG
+tendon/MS
+tendril/SM
+tends/E
+tenebrous
+tenement/MS
+tenet/SM
+Tenex/M
+TENEX/M
+tenfold/S
+ten/MHB
+Tenneco/M
+tenner
+Tennessean/S
+Tennessee/M
+Tenney/M
+tennis/SM
+Tenn/M
+Tennyson/M
+Tenochtitlan/M
+tenon/GSMD
+tenor/MS
+tenpin/SM
+tense/IPYTNVR
+tenseness's/I
+tenseness/SM
+tensile
+tensional/I
+tension/GMRDS
+tensionless
+tensions/E
+tension's/I
+tensity/IMS
+tensorial
+tensor/MS
+tenspot
+tens/SRDVGT
+tentacle/MSD
+tentativeness/S
+tentative/SPY
+tented/UF
+tenterhook/MS
+tenter/M
+tent/FSIM
+tenths
+tenth/SY
+tenting/F
+tenuity/S
+tenuousness/SM
+tenuous/YP
+tenure/SDM
+Teodoor/M
+Teodora/M
+Teodorico/M
+Teodor/M
+Teodoro/M
+tepee/MS
+tepidity/S
+tepidness/S
+tepid/YP
+tequila/SM
+Tera/M
+teratogenic
+teratology/MS
+terbium/SM
+tercel/M
+tercentenary/S
+tercentennial/S
+Terence/M
+Terencio/M
+Teresa/M
+Terese/M
+Tereshkova/M
+Teresina/M
+Teresita/M
+Teressa/M
+Teriann/M
+Teri/M
+Terkel/M
+termagant/SM
+termcap
+termer/M
+terminable/CPI
+terminableness/IMC
+terminal/SYM
+terminate/CXNV
+terminated/U
+terminates
+terminating
+termination/MC
+terminative/YC
+terminator/SM
+termini
+terminological/Y
+terminology/MS
+terminus/M
+termite/SM
+term/MYRDGS
+ternary/S
+tern/GIDS
+tern's
+terpsichorean
+Terpsichore/M
+terrace/MGSD
+terracing/M
+terracotta
+terrain/MS
+Terra/M
+terramycin
+Terrance/M
+Terran/M
+terrapin/MS
+terrarium/MS
+terrazzo/SM
+Terrell/M
+Terrel/M
+Terre/M
+Terrence/M
+terrestrial/YMS
+terribleness/SM
+terrible/P
+terribly
+Terrie/M
+terrier/M
+terrifically
+terrific/Y
+terrify/GDS
+terrifying/Y
+Terrijo/M
+Terrill/M
+Terri/M
+terrine/M
+territoriality/M
+Territorial/SM
+territorial/SY
+Territory's
+territory/SM
+terrorism/MS
+terroristic
+terrorist/MS
+terrorized/U
+terrorizer/M
+terrorize/RSDZG
+terror/MS
+terr/S
+terrycloth
+Terrye/M
+Terry/M
+terry/ZMRS
+terseness/SM
+terse/RTYP
+Tersina/M
+tertian
+Tertiary
+tertiary/S
+Terza/M
+TESL
+Tesla/M
+TESOL
+Tessa/M
+tessellate/XDSNG
+tessellation/M
+tesseral
+Tessie/M
+Tessi/M
+Tess/M
+Tessy/M
+testability/M
+testable/U
+testamentary
+testament/SM
+testate/IS
+testator/MS
+testatrices
+testatrix
+testbed/S
+testcard
+tested/AKU
+tester/MFCKS
+testes/M
+testicle/SM
+testicular
+testifier/M
+testify/GZDRS
+testily
+testimonial/SM
+testimony/SM
+testiness/S
+testing/S
+testis/M
+testosterone/SM
+test/RDBFZGSC
+tests/AK
+test's/AKF
+testy/RTP
+tetanus/MS
+tetchy/TR
+tether/DMSG
+tethered/U
+Tethys/M
+Tetons
+tetrachloride/M
+tetracycline/SM
+tetrafluoride
+tetragonal/Y
+tetrahalides
+tetrahedral/Y
+tetrahedron/SM
+tetrameron
+tetrameter/SM
+tetra/MS
+tetrasodium
+tetravalent
+Teutonic
+Teuton/SM
+Texaco/M
+Texan/S
+Texas/MS
+Tex/M
+TeX/M
+textbook/SM
+text/FSM
+textile/SM
+Textron/M
+textual/FY
+textural/Y
+textured/U
+texture/MGSD
+T/G
+Thacher/M
+Thackeray/M
+Thaddeus/M
+Thaddus/M
+Thadeus/M
+Thad/M
+Thailand/M
+Thaine/M
+Thain/M
+Thai/S
+thalami
+thalamus/M
+Thales/M
+Thalia/M
+thalidomide/MS
+thallium/SM
+thallophyte/M
+Thames
+than
+Thane/M
+thane/SM
+Thanh/M
+thanker/M
+thankfuller
+thankfullest
+thankfulness/SM
+thankful/YP
+thanklessness/SM
+thankless/PY
+thanksgiving/MS
+Thanksgiving/S
+thank/SRDG
+Thant/M
+Thar/M
+Thatcher/M
+thatching/M
+thatch/JMDRSZG
+Thatch/MR
+that'd
+that'll
+that/MS
+thaumaturge/M
+thaw/DGS
+Thaxter/M
+Thayer/M
+Thayne/M
+THC
+the
+Theadora/M
+Thea/M
+theatergoer/MS
+theatergoing/MS
+theater/SM
+theatricality/SM
+theatrical/YS
+theatric/S
+theatrics/M
+Thebault/M
+Thebes
+Theda/M
+Thedrick/M
+Thedric/M
+thee/DS
+theeing
+theft/MS
+Theiler/M
+their/MS
+theism/SM
+theistic
+theist/SM
+Thekla/M
+Thelma/M
+themas
+thematically
+thematics
+thematic/U
+theme/MS
+them/GD
+Themistocles/M
+themselves
+thence
+thenceforth
+thenceforward/S
+Theobald/M
+theocracy/SM
+theocratic
+Theocritus/M
+theodolite/MS
+Theodora/M
+Theodore/M
+Theodoric/M
+Theodor/M
+Theodosia/M
+Theodosian
+Theodosius/M
+theologian/SM
+theological/Y
+theologists
+theology/MS
+Theo/M
+theorem/MS
+theoretical/Y
+theoretician/MS
+theoretic/S
+theoretics/M
+theorist/SM
+theorization/SM
+theorize/ZGDRS
+theory/MS
+theosophic
+theosophical
+theosophist/MS
+Theosophy
+theosophy/SM
+therapeutically
+therapeutic/S
+therapeutics/M
+therapist/MS
+therapy/MS
+Theravada/M
+thereabout/S
+thereafter
+thereat
+thereby
+there'd
+therefor
+therefore
+therefrom
+therein
+there'll
+there/MS
+thereof
+thereon
+Theresa/M
+Therese/M
+Theresina/M
+Theresita/M
+Theressa/M
+thereto
+theretofore
+thereunder
+thereunto
+thereupon
+therewith
+Therine/M
+thermal/YS
+thermionic/S
+thermionics/M
+thermistor/MS
+therm/MS
+thermocouple/MS
+thermodynamical/Y
+thermodynamic/S
+thermodynamics/M
+thermoelastic
+thermoelectric
+thermoformed
+thermoforming
+thermogravimetric
+thermoluminescence/M
+thermometer/MS
+thermometric
+thermometry/M
+thermonuclear
+thermopile/M
+thermoplastic/S
+thermopower
+thermo/S
+thermosetting
+thermos/S
+Thermos/SM
+thermostable
+thermostatically
+thermostatic/S
+thermostatics/M
+thermostat/SM
+thermostatted
+thermostatting
+Theron/M
+thesauri
+thesaurus/MS
+these/S
+Theseus/M
+thesis/M
+thespian/S
+Thespian/S
+Thespis/M
+Thessalonian
+Thessalonki/M
+Thessaly/M
+theta/MS
+thew/SM
+they
+they'd
+they'll
+they're
+they've
+th/GNJX
+Thia/M
+thiamine/MS
+Thibaud/M
+Thibaut/M
+thickener/M
+thickening/M
+thicken/RDJZG
+thicket/SMD
+thickheaded/M
+thickish
+thickness/MS
+thickset/S
+thick/TXPSRNY
+thief/M
+Thiensville/M
+Thieu/M
+thievery/MS
+thieve/SDJG
+thievishness/M
+thievish/P
+thighbone/SM
+thigh/DM
+thighs
+thimble/DSMG
+thimbleful/MS
+Thimbu/M
+Thimphu
+thine
+thingamabob/MS
+thingamajig/SM
+thing/MP
+thinkableness/M
+thinkable/U
+thinkably/U
+think/AGRS
+thinker/MS
+thinkingly/U
+thinking/SMYP
+thinned
+thinner/MS
+thinness/MS
+thinnest
+thinning
+thinnish
+thin/STPYR
+thiocyanate/M
+thiouracil/M
+third/DYGS
+thirster/M
+thirst/GSMDR
+thirstily
+thirstiness/S
+thirsty/TPR
+thirteen/MHS
+thirteenths
+thirtieths
+thirty/HMS
+this
+this'll
+thistledown/MS
+thistle/SM
+thither
+Th/M
+tho
+thole/GMSD
+Thomasa/M
+Thomasina/M
+Thomasine/M
+Thomasin/M
+Thoma/SM
+Thomism/M
+Thomistic
+Thom/M
+Thompson/M
+Thomson/M
+thong/SMD
+thoracic
+thorax/MS
+Thorazine
+Thoreau/M
+thoriate/D
+Thorin/M
+thorium/MS
+Thor/M
+Thornburg/M
+Thorndike/M
+Thornie/M
+thorniness/S
+Thorn/M
+thorn/SMDG
+Thornton/M
+Thorny/M
+thorny/PTR
+thoroughbred/S
+thoroughfare/MS
+thoroughgoing
+thoroughness/SM
+thorough/PTYR
+Thorpe/M
+Thorstein/M
+Thorsten/M
+Thorvald/M
+those
+Thoth/M
+thou/DSG
+though
+thoughtfully
+thoughtfulness/S
+thoughtful/U
+thoughtlessness/MS
+thoughtless/YP
+thought/MS
+thousandfold
+thousand/SHM
+thousandths
+Thrace/M
+Thracian/M
+thralldom/S
+thrall/GSMD
+thrash/DSRZGJ
+thrasher/M
+thrashing/M
+threadbare/P
+threader/M
+threading/A
+threadlike
+thread/MZDRGS
+thready/RT
+threatener/M
+threaten/GJRD
+threatening/Y
+threat/MDNSXG
+threefold
+three/MS
+threepence/M
+threepenny
+threescore/S
+threesome/SM
+threnody/SM
+thresh/DSRZG
+thresher/M
+threshold/MDGS
+threw
+thrice
+thriftily
+thriftiness/S
+thriftless
+thrift/SM
+thrifty/PTR
+thriller/M
+thrilling/Y
+thrill/ZMGDRS
+thriver/M
+thrive/RSDJG
+thriving/Y
+throatily
+throatiness/MS
+throat/MDSG
+throaty/PRT
+throbbed
+throbbing
+throb/S
+throeing
+throe/SDM
+thrombi
+thromboses
+thrombosis/M
+thrombotic
+thrombus/M
+Throneberry/M
+throne/CGSD
+throne's
+throng/GDSM
+throttle/DRSZMG
+throttler/M
+throughout
+throughput/SM
+throughway's
+through/Y
+throwaway/SM
+throwback/MS
+thrower/M
+thrown
+throwout
+throw/SZGR
+thrummed
+thrumming
+thrum/S
+thrush/MS
+thruster/M
+thrust/ZGSR
+Thruway/MS
+thruway/SM
+Thunderbird/M
+Thu
+Thucydides/M
+thudded
+thudding
+thud/MS
+thuggee/M
+thuggery/SM
+thuggish
+thug/MS
+Thule/M
+thulium/SM
+thumbnail/MS
+thumbscrew/SM
+thumb/SMDG
+thumbtack/GMDS
+thump/RDMSG
+thunderbolt/MS
+thunderclap/SM
+thundercloud/SM
+thunderer/M
+thunderhead/SM
+thundering/Y
+thunderous/Y
+thundershower/MS
+thunderstorm/MS
+thunderstruck
+thundery
+thunder/ZGJDRMS
+thunk
+Thurber/M
+Thurman/M
+Thur/MS
+Thursday/SM
+Thurstan/M
+Thurston/M
+thus/Y
+thwack/DRSZG
+thwacker/M
+thwarter/M
+thwart/GSDRY
+thy
+thyme/SM
+thymine/MS
+thymus/SM
+thyratron/M
+thyristor/MS
+thyroglobulin
+thyroidal
+thyroid/S
+thyronine
+thyrotoxic
+thyrotrophic
+thyrotrophin
+thyrotropic
+thyrotropin/M
+thyroxine/M
+thyself
+Tia/M
+Tianjin
+tiara/MS
+Tiberius/M
+Tiber/M
+Tibetan/S
+Tibet/M
+tibiae
+tibial
+tibia/M
+Tibold/M
+Tiburon/M
+ticker/M
+ticket/SGMD
+tick/GZJRDMS
+ticking/M
+tickler/M
+tickle/RSDZG
+ticklishness/MS
+ticklish/PY
+ticktacktoe/S
+ticktock/SMDG
+tic/MS
+Ticonderoga/M
+tidal/Y
+tidbit/MS
+tiddlywinks/M
+tide/GJDS
+tideland/MS
+tidewater/SM
+tideway/SM
+tidily/U
+tidiness/USM
+tidying/M
+tidy/UGDSRPT
+tie/AUDS
+tieback/MS
+Tiebold/M
+Tiebout/M
+tiebreaker/SM
+Tieck/M
+Tiena/M
+Tienanmen/M
+Tientsin's
+tier/DGM
+Tierney/M
+Tiertza/M
+Tiffanie/M
+Tiffani/M
+tiffany/M
+Tiffany/M
+tiff/GDMS
+Tiffie/M
+Tiffi/M
+Tiff/M
+Tiffy/M
+tigerish
+tiger/SM
+tightener/M
+tighten/JZGDR
+tightfisted
+tightness/MS
+tightrope/SM
+tight/STXPRNY
+tightwad/MS
+tigress/SM
+Tigris/M
+Tijuana/M
+tike's
+Tilda/M
+tilde/MS
+Tildie/M
+Tildi/M
+Tildy/M
+tile/DRSJMZG
+tiled/UE
+Tiler/M
+tiles/U
+tiling/M
+tillable
+tillage/SM
+till/EGSZDR
+tiller/GDM
+tiller's/E
+Tillich/M
+Tillie/M
+Tillman/M
+Tilly/M
+tilth/M
+tilt/RDSGZ
+Ti/M
+timber/DMSG
+timbering/M
+timberland/SM
+timberline/S
+timbrel/SM
+timbre/MS
+Timbuktu/M
+ti/MDRZ
+timebase
+time/DRSJMYZG
+timekeeper/MS
+timekeeping/SM
+timelessness/S
+timeless/PY
+timeliness/SMU
+timely/UTRP
+timeout/S
+timepiece/MS
+timer/M
+timescale/S
+timeserver/MS
+timeserving/S
+timeshare/SDG
+timespan
+timestamped
+timestamps
+timetable/GMSD
+timeworn
+Timex/M
+timezone/S
+timidity/SM
+timidness/MS
+timid/RYTP
+Timi/M
+timing/M
+Timmie/M
+Timmi/M
+Tim/MS
+Timmy/M
+Timofei/M
+Timon/M
+timorousness/MS
+timorous/YP
+Timoteo/M
+Timothea/M
+Timothee/M
+Timotheus/M
+Timothy/M
+timothy/MS
+timpani
+timpanist/S
+Timur/M
+Tina/M
+tincture/SDMG
+tinderbox/MS
+tinder/MS
+Tine/M
+tine/SM
+tinfoil/MS
+tingeing
+tinge/S
+ting/GYDM
+tingle/SDG
+tingling/Y
+tingly/TR
+Ting/M
+tinily
+tininess/MS
+tinker/SRDMZG
+Tinkertoy
+tinkle/SDG
+tinkling/M
+tinkly
+tin/MDGS
+tinned
+tinner/M
+tinnily
+tinniness/SM
+tinning/M
+tinnitus/MS
+tinny/RSTP
+tinplate/S
+tinsel/GMDYS
+Tinseltown/M
+tinsmith/M
+tinsmiths
+tinter/M
+tintinnabulation/MS
+Tintoretto/M
+tint/SGMRDB
+tintype/SM
+tinware/MS
+tiny/RPT
+Tioga/M
+Tiphanie/M
+Tiphani/M
+Tiphany/M
+tipi's
+tip/MS
+tipoff
+Tippecanoe/M
+tipped
+Tipperary/M
+tipper/MS
+tippet/MS
+tipping
+tippler/M
+tipple/ZGRSD
+tippy/R
+tipsily
+tipsiness/SM
+tipster/SM
+tipsy/TPR
+tiptoeing
+tiptoe/SD
+tiptop/S
+tirade/SM
+Tirana's
+Tirane
+tired/AYP
+tireder
+tiredest
+tiredness/S
+tirelessness/SM
+tireless/PY
+tire/MGDSJ
+tires/A
+Tiresias/M
+tiresomeness/S
+tiresome/PY
+tiring/AU
+Tirolean/S
+Tirol/M
+tiro's
+Tirrell/M
+tis
+Tisha/M
+Tish/M
+tissue/MGSD
+titanate/M
+Titania/M
+titanic
+titanically
+Titanic/M
+titanium/SM
+titan/SM
+Titan/SM
+titbit's
+titer/M
+tither/M
+tithe/SRDGZM
+tithing/M
+Titian/M
+titian/S
+Titicaca/M
+titillate/XSDVNG
+titillating/Y
+titillation/M
+titivate/NGDSX
+titivation/M
+titled/AU
+title/GMSRD
+titleholder/SM
+titling/A
+titmice
+titmouse/M
+tit/MRZS
+Tito/SM
+titrate/SDGN
+titration/M
+titted
+titter/GDS
+titting
+tittle/SDMG
+titular/SY
+Titus/M
+tizzy/SM
+TKO
+Tlaloc/M
+TLC
+Tlingit/M
+Tl/M
+TM
+Tm/M
+tn
+TN
+tnpk
+TNT
+toad/SM
+toadstool/SM
+toady/GSDM
+toadyism/M
+toaster/M
+toastmaster/MS
+toastmistress/S
+toast/SZGRDM
+toasty/TRS
+tobacconist/SM
+tobacco/SM
+tobaggon/SM
+Tobago/M
+Tobe/M
+Tobey/M
+Tobiah/M
+Tobias/M
+Tobie/M
+Tobi/M
+Tobin/M
+Tobit/M
+toboggan/MRDSZG
+Tobye/M
+Toby/M
+Tocantins/M
+toccata/M
+Tocqueville
+tocsin/MS
+to/D
+today'll
+today/SM
+Toddie/M
+toddler/M
+toddle/ZGSRD
+Todd/M
+Toddy/M
+toddy/SM
+Tod/M
+toecap/SM
+toeclip/S
+TOEFL
+toehold/MS
+toeing
+toe/MS
+toenail/DMGS
+toffee/SM
+tofu/S
+toga/SMD
+toge
+togetherness/MS
+together/P
+togged
+togging
+toggle/SDMG
+Togolese/M
+Togo/M
+tog/SMG
+Toiboid/M
+toilet/GMDS
+toiletry/MS
+toilette/SM
+toil/SGZMRD
+toilsomeness/M
+toilsome/PY
+Toinette/M
+Tojo/M
+tokamak
+Tokay/M
+toke/GDS
+tokenism/SM
+tokenized
+token/SMDG
+Tokugawa/M
+Tokyoite/MS
+Tokyo/M
+Toland/M
+told/AU
+Toledo/SM
+tole/MGDS
+tolerability/IM
+tolerable/I
+tolerably/I
+tolerance/SIM
+tolerant/IY
+tolerate/XVNGSD
+toleration/M
+Tolkien
+tollbooth/M
+tollbooths
+toll/DGS
+Tolley/M
+tollgate/MS
+tollhouse/M
+tollway/S
+Tolstoy/M
+toluene/MS
+Tolyatti/M
+tomahawk/SGMD
+Tomasina/M
+Tomasine/M
+Toma/SM
+Tomaso/M
+tomatoes
+tomato/M
+Tombaugh/M
+tomb/GSDM
+Tombigbee/M
+tomblike
+tombola/M
+tomboyish
+tomboy/MS
+tombstone/MS
+tomcat/SM
+tomcatted
+tomcatting
+Tome/M
+tome/SM
+tomfoolery/MS
+tomfool/M
+Tomi/M
+Tomkin/M
+Tomlin/M
+Tom/M
+tommed
+Tommie/M
+Tommi/M
+tomming
+tommy/M
+Tommy/M
+tomographic
+tomography/MS
+tomorrow/MS
+Tompkins/M
+Tomsk/M
+tom/SM
+tomtit/SM
+tonality/MS
+tonal/Y
+tonearm/S
+tone/ISRDZG
+tonelessness/M
+toneless/YP
+toner/IM
+tone's
+Tonga/M
+Tongan/SM
+tong/GRDS
+tongueless
+tongue/SDMG
+tonguing/M
+Tonia/M
+tonic/SM
+Tonie/M
+tonight/MS
+Toni/M
+Tonio/M
+tonk/MS
+tonnage/SM
+tonne/MS
+Tonnie/M
+tonsillectomy/MS
+tonsillitis/SM
+tonsil/SM
+ton/SKM
+tonsorial
+tonsure/SDGM
+Tonto/M
+Tonya/M
+Tonye/M
+Tony/M
+tony/RT
+toodle
+too/H
+took/A
+tool/AGDS
+toolbox/SM
+tooler/SM
+tooling/M
+toolkit/SM
+toolmaker/M
+toolmake/ZRG
+toolmaking/M
+tool's
+toolsmith
+Toomey/M
+tooter/M
+toot/GRDZS
+toothache/SM
+toothbrush/MSG
+tooth/DMG
+toothily
+toothless
+toothmarks
+toothpaste/SM
+toothpick/MS
+tooths
+toothsome
+toothy/TR
+tootle/SRDG
+tootsie
+Tootsie/M
+toots/M
+tootsy/MS
+topaz/MS
+topcoat/MS
+topdressing/S
+Topeka/M
+toper/M
+topflight
+topgallant/M
+topiary/S
+topicality/MS
+topical/Y
+topic/MS
+topknot/MS
+topless
+topmast/MS
+topmost
+topnotch/R
+topocentric
+topographer/SM
+topographic
+topographical/Y
+topography/MS
+topological/Y
+topologist/MS
+topology/MS
+topped
+topper/MS
+topping/MS
+topple/GSD
+topsail/MS
+topside/SRM
+top/SMDRG
+topsoil/GDMS
+topspin/MS
+Topsy/M
+toque/MS
+Torah/M
+Torahs
+torchbearer/SM
+torchlight/S
+torch/SDMG
+toreador/SM
+Tore/M
+tore/S
+Torey/M
+Torie/M
+tori/M
+Tori/M
+Torin/M
+torment/GSD
+tormenting/Y
+tormentor/MS
+torn
+tornadoes
+tornado/M
+toroidal/Y
+toroid/MS
+Toronto/M
+torpedoes
+torpedo/GMD
+torpidity/S
+torpid/SY
+torpor/MS
+Torquemada/M
+torque/MZGSRD
+Torrance/M
+Torre/MS
+torrence
+Torrence/M
+Torrens/M
+torrential
+torrent/MS
+Torrey/M
+Torricelli/M
+torridity/SM
+torridness/SM
+torrid/RYTP
+Torrie/M
+Torrin/M
+Torr/XM
+Torry/M
+torsional/Y
+torsion/IAM
+torsions
+torsi's
+tor/SLM
+torso/SM
+tors/S
+tort/ASFE
+tortellini/MS
+torte/MS
+torten
+tortilla/MS
+tortoiseshell/SM
+tortoise/SM
+Tortola/M
+tortoni/MS
+tort's
+Tortuga/M
+tortuousness/MS
+tortuous/PY
+torture/ZGSRD
+torturous
+torus/MS
+Tory/SM
+Tosca/M
+Toscanini/M
+Toshiba/M
+toss/SRDGZ
+tossup/MS
+totaler/M
+totalistic
+totalitarianism/SM
+totalitarian/S
+totality/MS
+totalizator/S
+totalizing
+total/ZGSRDYM
+totemic
+totem/MS
+toter/M
+tote/S
+toting/M
+tot/MDRSG
+Toto/M
+totted
+totterer/M
+tottering/Y
+totter/ZGRDS
+totting
+toucan/MS
+touchable/U
+touch/ASDG
+touchdown/SM
+touch
+touched/U
+toucher/M
+touchily
+touchiness/SM
+touching/SY
+touchline/M
+touchscreen
+touchstone/SM
+touchy/TPR
+toughen/DRZG
+toughener/M
+toughness/SM
+toughs
+tough/TXGRDNYP
+Toulouse/M
+toupee/SM
+toured/CF
+tourer/M
+tour/GZSRDM
+touring/F
+tourism/SM
+touristic
+tourist/SM
+touristy
+tourmaline/SM
+tournament/MS
+tourney/GDMS
+tourniquet/MS
+tour's/CF
+tours/CF
+tousle/GSD
+touter/M
+tout/SGRD
+Tova/M
+Tove/M
+towardliness/M
+towardly/P
+towards
+toward/YU
+towboat/MS
+tow/DRSZG
+towelette/S
+towel/GJDMS
+toweling/M
+tower/GMD
+towering/Y
+towhead/MSD
+towhee/SM
+towline/MS
+towner/M
+Townes
+Towney/M
+townhouse/S
+Townie/M
+townie/S
+Townley/M
+Town/M
+Townsend/M
+townsfolk
+township/MS
+townsman/M
+townsmen
+townspeople/M
+town/SRM
+townswoman/M
+townswomen
+Towny/M
+towpath/M
+towpaths
+towrope/MS
+Towsley/M
+toxemia/MS
+toxicity/MS
+toxicological
+toxicologist/SM
+toxicology/MS
+toxic/S
+toxin/MS
+toyer/M
+toymaker
+toy/MDRSG
+Toynbee/M
+Toyoda/M
+Toyota/M
+toyshop
+tr
+traceability/M
+traceableness/M
+traceable/P
+trace/ASDG
+traceback/MS
+traced/U
+Tracee/M
+traceless/Y
+Trace/M
+tracepoint/SM
+tracer/MS
+tracery/MDS
+trace's
+Tracey/M
+tracheae
+tracheal/M
+trachea/M
+tracheotomy/SM
+Tracie/M
+Traci/M
+tracing/SM
+trackage
+trackball/S
+trackbed
+tracked/U
+tracker/M
+trackless
+tracksuit/SM
+track/SZGMRD
+tractability/SI
+tractable/I
+tractably/I
+tract/ABS
+Tractarians
+traction/KSCEMAF
+tractive/KFE
+tractor/FKMASC
+tract's
+tracts/CEFK
+Tracy/M
+trademark/GSMD
+trader/M
+tradesman/M
+tradesmen
+tradespeople
+tradespersons
+trade/SRDGZM
+tradeswoman/M
+tradeswomen
+traditionalism/MS
+traditionalistic
+traditionalist/MS
+traditionalized
+traditionally
+traditional/U
+tradition/SM
+traduce/DRSGZ
+Trafalgar/M
+trafficked
+trafficker/MS
+trafficking/S
+traffic/SM
+tragedian/SM
+tragedienne/MS
+tragedy/MS
+tragically
+tragicomedy/SM
+tragicomic
+tragic/S
+trailblazer/MS
+trailblazing/S
+trailer/GDM
+trails/F
+trailside
+trail/SZGJRD
+trainable
+train/ASDG
+trained/U
+trainee/MS
+traineeships
+trainer/MS
+training/SM
+trainman/M
+trainmen
+trainspotter/S
+traipse/DSG
+trait/MS
+traitorous/Y
+traitor/SM
+Trajan/M
+trajectory/MS
+trammed
+trammeled/U
+trammel/GSD
+tramming
+tram/MS
+trample/DGRSZ
+trampler/M
+trampoline/GMSD
+tramp/RDSZG
+tramway/M
+trance/MGSD
+tranche/SM
+Tran/M
+tranquility/S
+tranquilized/U
+tranquilize/JGZDSR
+tranquilizer/M
+tranquilizes/A
+tranquilizing/YM
+tranquillize/GRSDZ
+tranquillizer/M
+tranquilness/M
+tranquil/PTRY
+transact/GSD
+transactional
+transaction/MS
+transactor/SM
+transalpine
+transaminase
+transatlantic
+Transcaucasia/M
+transceiver/SM
+transcendence/MS
+transcendentalism/SM
+transcendentalist/SM
+transcendental/YS
+transcendent/Y
+transcend/SDG
+transconductance
+transcontinental
+transcribe/DSRGZ
+transcriber/M
+transcription/SM
+transcript/SM
+transcultural
+transducer/SM
+transduction/M
+transect/DSG
+transept/SM
+transferability/M
+transferal/MS
+transfer/BSMD
+transferee/M
+transference/SM
+transferor/MS
+transferral/SM
+transferred
+transferrer/SM
+transferring
+transfiguration/SM
+transfigure/SDG
+transfinite/Y
+transfix/SDG
+transformational
+transformation/MS
+transform/DRZBSG
+transformed/U
+transformer/M
+transfuse/XSDGNB
+transfusion/M
+transgression/SM
+transgressor/S
+transgress/VGSD
+trans/I
+transience/SM
+transiency/S
+transient/YS
+transistorize/GDS
+transistor/SM
+Transite/M
+transitional/Y
+transition/MDGS
+transitivenesses
+transitiveness/IM
+transitive/PIY
+transitivity/MS
+transitoriness/M
+transitory/P
+transit/SGVMD
+transl
+translatability/M
+translatable/U
+translated/AU
+translate/VGNXSDB
+translational
+translation/M
+translator/SM
+transliterate/XNGSD
+translucence/SM
+translucency/MS
+translucent/Y
+transmigrate/XNGSD
+transmissible
+transmission/MSA
+transmissive
+transmit/AS
+transmittable
+transmittal/SM
+transmittance/MS
+transmitted/A
+transmitter/SM
+transmitting/A
+transmogrification/M
+transmogrify/GXDSN
+transmutation/SM
+transmute/GBSD
+transnational/S
+transoceanic
+transom/SM
+transonic
+transpacific
+transparency/MS
+transparentness/M
+transparent/YP
+transpiration/SM
+transpire/GSD
+transplantation/S
+transplant/GRDBS
+transpolar
+transponder/MS
+transportability
+transportable/U
+transportation/SM
+transport/BGZSDR
+transpose/BGSD
+transposed/U
+transposition/SM
+Transputer/M
+transsexualism/MS
+transsexual/SM
+transship/LS
+transshipment/SM
+transshipped
+transshipping
+transubstantiation/MS
+Transvaal/M
+transversal/YM
+transverse/GYDS
+transvestism/SM
+transvestite/SM
+transvestitism
+Transylvania/M
+trapdoor/S
+trapeze/DSGM
+trapezium/MS
+trapezoidal
+trapezoid/MS
+trap/MS
+trappable/U
+trapped
+trapper/SM
+trapping/S
+Trappist/MS
+trapshooting/SM
+trashcan/SM
+trashiness/SM
+trash/SRDMG
+trashy/TRP
+Trastevere/M
+trauma/MS
+traumatic
+traumatically
+traumatize/SDG
+travail/SMDG
+traveled/U
+traveler/M
+travelog's
+travelogue/S
+travel/SDRGZJ
+Traver/MS
+traversal/SM
+traverse/GBDRS
+traverser/M
+travertine/M
+travesty/SDGM
+Travis/M
+Travus/M
+trawler/M
+trawl/RDMSZG
+tray/SM
+treacherousness/SM
+treacherous/PY
+treachery/SM
+treacle/DSGM
+treacly
+treader/M
+treadle/GDSM
+treadmill/MS
+tread/SAGD
+Treadwell/M
+treas
+treason/BMS
+treasonous
+treasure/DRSZMG
+treasurer/M
+treasurership
+treasury/SM
+Treasury/SM
+treatable
+treated/U
+treater/S
+treatise/MS
+treatment/MS
+treat's
+treat/SAGDR
+treaty/MS
+treble/SDG
+Treblinka/M
+treeing
+treeless
+treelike
+tree/MDS
+treetop/SM
+trefoil/SM
+Trefor/M
+trekked
+trekker/MS
+Trekkie/M
+trekking
+trek/MS
+trellis/GDSM
+Tremaine/M
+Tremain/M
+trematode/SM
+Tremayne/M
+tremble/JDRSG
+trembler/M
+trembles/M
+trembly
+tremendousness/M
+tremendous/YP
+tremolo/MS
+tremor/MS
+tremulousness/SM
+tremulous/YP
+trenchancy/MS
+trenchant/Y
+trencherman/M
+trenchermen
+trencher/SM
+trench/GASD
+trench's
+trendily
+trendiness/S
+trend/SDMG
+trendy/PTRS
+Trenna/M
+Trent/M
+Trenton/M
+trepanned
+trepidation/MS
+Tresa/M
+Trescha/M
+trespasser/M
+trespass/ZRSDG
+Tressa/M
+tressed/E
+tresses/E
+tressing/E
+tress/MSDG
+trestle/MS
+Trevar/M
+Trevelyan/M
+Trever/M
+Trevino/M
+Trevor/M
+Trev/RM
+Trey/M
+trey/MS
+triableness/M
+triable/P
+triadic
+triad/MS
+triage/SDMG
+trial/ASM
+trialization
+trialled
+trialling
+triamcinolone
+triangle/SM
+triangulable
+triangularization/S
+triangular/Y
+triangulate/YGNXSD
+triangulation/M
+Triangulum/M
+Trianon/M
+Triassic
+triathlon/S
+triatomic
+tribalism/MS
+tribal/Y
+tribe/MS
+tribesman/M
+tribesmen
+tribeswoman
+tribeswomen
+tribulate/NX
+tribulation/M
+tribunal/MS
+tribune/SM
+tributary/MS
+tribute/EGSF
+tribute's
+trice/GSDM
+tricentennial/S
+triceps/SM
+triceratops/M
+trichinae
+trichina/M
+trichinoses
+trichinosis/M
+trichloroacetic
+trichloroethane
+trichotomy/M
+trichromatic
+Tricia/M
+trickery/MS
+trick/GMSRD
+trickily
+trickiness/SM
+trickle/DSG
+trickster/MS
+tricky/RPT
+tricolor/SMD
+tricycle/SDMG
+trident/SM
+tridiagonal
+tried/UA
+triennial/SY
+trier/AS
+trier's
+tries/A
+Trieste/M
+triffid/S
+trifle/MZGJSRD
+trifler/M
+trifluoride/M
+trifocals
+trigged
+trigger/GSDM
+triggest
+trigging
+triglyceride/MS
+trigonal/Y
+trigonometric
+trigonometrical
+trigonometry/MS
+trigram/S
+trig/S
+trihedral
+trike/GMSD
+trilateral/S
+trilby/SM
+trilingual
+trillion/SMH
+trillionth/M
+trillionths
+trillium/SM
+trill/RDMGS
+trilobite/MS
+trilogy/MS
+trimaran/MS
+Trimble/M
+trimer/M
+trimester/MS
+trimmed/U
+trimmer/MS
+trimmest
+trimming/MS
+trimness/S
+trimodal
+trimonthly
+trim/PSYR
+Trimurti/M
+Trina/M
+Trinidad/M
+trinitarian/S
+trinitrotoluene/SM
+trinity/MS
+Trinity/MS
+trinketer/M
+trinket/MRDSG
+triode/MS
+trio/SM
+trioxide/M
+tripartite/N
+tripartition/M
+tripe/MS
+triphenylarsine
+triphenylphosphine
+triphenylstibine
+triphosphopyridine
+triple/GSD
+triplet/SM
+triplex/S
+triplicate/SDG
+triplication/M
+triply/GDSN
+Trip/M
+tripodal
+tripod/MS
+tripoli/M
+Tripoli/M
+tripolyphosphate
+tripos/SM
+tripped
+Trippe/M
+tripper/MS
+tripping/Y
+Tripp/M
+trip/SMY
+triptych/M
+triptychs
+tripwire/MS
+trireme/SM
+Tris
+trisect/GSD
+trisection/S
+trisector
+Trisha/M
+Trish/M
+trisodium
+Trista/M
+Tristam/M
+Tristan/M
+tristate
+trisyllable/M
+tritely/F
+triteness/SF
+trite/SRPTY
+tritium/MS
+triton/M
+Triton/M
+triumphal
+triumphalism
+triumphant/Y
+triumph/GMD
+triumphs
+triumvirate/MS
+triumvir/MS
+triune
+trivalent
+trivet/SM
+trivia
+triviality/MS
+trivialization/MS
+trivialize/DSG
+trivial/Y
+trivium/M
+Trixie/M
+Trixi/M
+Trix/M
+Trixy/M
+Trobriand/M
+trochaic/S
+trochee/SM
+trod/AU
+trodden/UA
+trodes
+troff/MR
+troglodyte/MS
+troika/SM
+Trojan/MS
+troll/DMSG
+trolled/F
+trolleybus/S
+trolley/SGMD
+trolling/F
+trollish
+Trollope/M
+trollop/GSMD
+trolly's
+trombone/MS
+trombonist/SM
+tromp/DSG
+Trondheim/M
+trooper/M
+troopship/SM
+troop/SRDMZG
+trope/SM
+Tropez/M
+trophic
+trophy/MGDS
+tropical/SY
+tropic/MS
+tropism/SM
+tropocollagen
+troposphere/MS
+tropospheric
+troth/GDM
+troths
+trot/S
+Trotsky/M
+trotted
+trotter/SM
+trotting
+troubadour/SM
+troubled/U
+trouble/GDRSM
+troublemaker/MS
+troubler/M
+troubleshooter/M
+troubleshoot/SRDZG
+troubleshot
+troublesomeness/M
+troublesome/YP
+trough/M
+troughs
+trounce/GZDRS
+trouncer/M
+troupe/MZGSRD
+trouper/M
+trouser/DMGS
+trousseau/M
+trousseaux
+Troutman/M
+trout/SM
+trove/SM
+troweler/M
+trowel/SMDRGZ
+trow/SGD
+Troyes
+Troy/M
+troy/S
+Trstram/M
+truancy/MS
+truant/SMDG
+truce/SDGM
+Truckee/M
+trucker/M
+trucking/M
+truckle/GDS
+truckload/MS
+truck/SZGMRDJ
+truculence/SM
+truculent/Y
+Truda/M
+Trudeau/M
+Trude/M
+Trudey/M
+trudge/SRDG
+Trudie/M
+Trudi/M
+Trudy/M
+true/DRSPTG
+truelove/MS
+Trueman/M
+trueness/M
+truer/U
+truest/U
+truffle/MS
+truism/SM
+Trujillo/M
+Trula/M
+truly/U
+Trumaine/M
+Truman/M
+Trumann/M
+Trumbull/M
+trump/DMSG
+trumpery/SM
+trumpeter/M
+trumpet/MDRZGS
+Trump/M
+truncate/NGDSX
+truncation/M
+truncheon/MDSG
+trundle/GZDSR
+trundler/M
+trunk/GSMD
+trunnion/SM
+trusser/M
+trussing/M
+truss/SRDG
+trusted/EU
+trusteeing
+trustee/MDS
+trusteeship/SM
+truster/M
+trustful/EY
+trustfulness/SM
+trustiness/M
+trusting/Y
+trust/RDMSG
+trusts/E
+trustworthier
+trustworthiest
+trustworthiness/MS
+trustworthy/UP
+trusty/PTMSR
+Truth
+truthfulness/US
+truthful/UYP
+truths/U
+truth/UM
+TRW
+trying/Y
+try/JGDRSZ
+tryout/MS
+trypsin/M
+tryst/GDMS
+ts
+T's
+tsarevich
+tsarina's
+tsarism/M
+tsarist
+tsetse/S
+Tsimshian/M
+Tsiolkovsky/M
+Tsitsihar/M
+tsp
+tsunami/MS
+Tsunematsu/M
+Tswana/M
+TTL
+tty/M
+ttys
+Tuamotu/M
+Tuareg/M
+tubae
+tubal
+tuba/SM
+tubbed
+tubbing
+tubby/TR
+tubeless
+tubercle/MS
+tubercular/S
+tuberculin/MS
+tuberculoses
+tuberculosis/M
+tuberculous
+tuber/M
+tuberose/SM
+tuberous
+tube/SM
+tubing/M
+tub/JMDRSZG
+Tubman/M
+tubular/Y
+tubule/SM
+tucker/GDM
+Tucker/M
+tuck/GZSRD
+Tuckie/M
+Tuck/RM
+Tucky/M
+Tucson/M
+Tucuman/M
+Tudor/MS
+Tue/S
+Tuesday/SM
+tufter/M
+tuft/GZSMRD
+tufting/M
+tugboat/MS
+tugged
+tugging
+tug/S
+tuition/ISM
+Tulane/M
+tularemia/S
+tulip/SM
+tulle/SM
+Tulley/M
+Tull/M
+Tully/M
+Tulsa/M
+tum
+tumbledown
+tumbler/M
+tumbleweed/MS
+tumble/ZGRSDJ
+tumbrel/SM
+tumescence/S
+tumescent
+tumidity/MS
+tumid/Y
+tummy/SM
+tumor/MDS
+tumorous
+Tums/M
+tumult/SGMD
+tumultuousness/M
+tumultuous/PY
+tumulus/M
+tunableness/M
+tunable/P
+tuna/SM
+tundra/SM
+tun/DRJZGBS
+tune/CSDG
+tunefulness/MS
+tuneful/YP
+tuneless/Y
+tuner/M
+tune's
+tuneup/S
+tung
+tungstate/M
+tungsten/SM
+Tunguska/M
+Tungus/M
+tunic/MS
+tuning/A
+tuning's
+Tunisia/M
+Tunisian/S
+Tunis/M
+tunned
+tunneler/M
+tunnel/MRDSJGZ
+tunning
+tunny/SM
+tupelo/M
+Tupi/M
+tuple/SM
+tuppence/M
+Tupperware
+Tupungato/M
+turban/SDM
+turbid
+turbidity/SM
+turbinate/SD
+turbine/SM
+turbocharged
+turbocharger/SM
+turbofan/MS
+turbojet/MS
+turboprop/MS
+turbo/SM
+turbot/MS
+turbulence/SM
+turbulent/Y
+turd/MS
+tureen/MS
+turf/DGSM
+turfy/RT
+Turgenev/M
+turgidity/SM
+turgidness/M
+turgid/PY
+Turing/M
+Turin/M
+Turkestan/M
+Turkey/M
+turkey/SM
+Turkic/SM
+Turkish
+Turkmenistan/M
+turk/S
+Turk/SM
+turmeric/MS
+turmoil/SDMG
+turnabout/SM
+turnaround/MS
+turn/AZGRDBS
+turnbuckle/SM
+turncoat/SM
+turned/U
+turner/M
+Turner/M
+turning/MS
+turnip/SMDG
+turnkey/MS
+turnoff/MS
+turnout/MS
+turnover/SM
+turnpike/MS
+turnround/MS
+turnstile/SM
+turnstone/M
+turntable/SM
+turpentine/GMSD
+Turpin/M
+turpitude/SM
+turquoise/SM
+turret/SMD
+turtleback/MS
+turtledove/MS
+turtleneck/SDM
+turtle/SDMG
+turves's
+turvy
+Tuscaloosa/M
+Tuscan
+Tuscany/M
+Tuscarora/M
+Tuscon/M
+tush/SDG
+Tuskegee/M
+tusker/M
+tusk/GZRDMS
+tussle/GSD
+tussock/MS
+tussocky
+Tussuad/M
+Tutankhamen/M
+tutelage/MS
+tutelary/S
+Tut/M
+tutored/U
+tutorial/MS
+tutor/MDGS
+tutorship/S
+tut/S
+Tutsi
+tutted
+tutting
+tutti/S
+Tuttle/M
+tutu/SM
+Tuvalu
+tuxedo/SDM
+tux/S
+TVA
+TV/M
+TVs
+twaddle/GZMRSD
+twaddler/M
+Twain/M
+twain/S
+TWA/M
+twang/MDSG
+twangy/TR
+twas
+tweak/SGRD
+tweediness/M
+Tweedledee/M
+Tweedledum/M
+Tweed/M
+twee/DP
+tweed/SM
+tweedy/PTR
+tween
+tweeter/M
+tweet/ZSGRD
+tweezer/M
+tweeze/ZGRD
+twelfth
+twelfths
+twelvemonth/M
+twelvemonths
+twelve/MS
+twentieths
+twenty/MSH
+twerp/MS
+twice/R
+twiddle/GRSD
+twiddler/M
+twiddly/RT
+twigged
+twigging
+twiggy/RT
+twig/SM
+Twila/M
+twilight/MS
+twilit
+twill/SGD
+twiner/M
+twine/SM
+twinge/SDMG
+Twinkie
+twinkler/M
+twinkle/RSDG
+twinkling/M
+twinkly
+twinned
+twinning
+twin/RDMGZS
+twirler/M
+twirling/Y
+twirl/SZGRD
+twirly/TR
+twisted/U
+twister/M
+twists/U
+twist/SZGRD
+twisty
+twitch/GRSD
+twitchy/TR
+twit/S
+twitted
+twitterer/M
+twitter/SGRD
+twittery
+twitting
+twixt
+twofer/MS
+twofold/S
+two/MS
+twopence/SM
+twopenny/S
+twosome/MS
+twp
+Twp
+TWX
+Twyla/M
+TX
+t/XTJBG
+Tybalt/M
+Tybie/M
+Tybi/M
+tycoon/MS
+tyeing
+Tye/M
+tying/UA
+tyke/SM
+Tylenol/M
+Tyler/M
+Ty/M
+Tymon/M
+Tymothy/M
+tympani
+tympanist/SM
+tympanum/SM
+Tynan/M
+Tyndale/M
+Tyndall/M
+Tyne/M
+typeahead
+typecast/SG
+typed/AU
+typedef/S
+typeface/MS
+typeless
+type/MGDRSJ
+types/A
+typescript/SM
+typeset/S
+typesetter/MS
+typesetting/SM
+typewriter/M
+typewrite/SRJZG
+typewriting/M
+typewritten
+typewrote
+typhoid/SM
+Typhon/M
+typhoon/SM
+typhus/SM
+typicality/MS
+typically
+typicalness/M
+typical/U
+typification/M
+typify/SDNXG
+typing/A
+typist/MS
+typographer/SM
+typographic
+typographical/Y
+typography/MS
+typological/Y
+typology/MS
+typo/MS
+tyrannic
+tyrannicalness/M
+tyrannical/PY
+tyrannicide/M
+tyrannizer/M
+tyrannize/ZGJRSD
+tyrannizing/YM
+tyrannosaur/MS
+tyrannosaurus/S
+tyrannous
+tyranny/MS
+tyrant/MS
+Tyree/M
+tyreo
+Tyrolean/S
+Tyrol's
+Tyrone/M
+tyrosine/M
+tyro/SM
+Tyrus/M
+Tyson/M
+tzarina's
+tzar's
+Tzeltal/M
+u
+U
+UAR
+UART
+UAW
+Ubangi/M
+ubiquitous/YP
+ubiquity/S
+Ucayali/M
+Uccello/M
+UCLA/M
+Udale/M
+Udall/M
+udder/SM
+Udell/M
+Ufa/M
+ufologist/S
+ufology/MS
+UFO/S
+Uganda/M
+Ugandan/S
+ugh
+ughs
+uglification
+ugliness/MS
+uglis
+ugly/PTGSRD
+Ugo/M
+uh
+UHF
+Uighur
+Ujungpandang/M
+UK
+ukase/SM
+Ukraine/M
+Ukrainian/S
+ukulele/SM
+UL
+Ula/M
+Ulberto/M
+ulcerate/NGVXDS
+ulceration/M
+ulcer/MDGS
+ulcerous
+Ulick/M
+Ulises/M
+Ulla/M
+Ullman/M
+ulnae
+ulna/M
+ulnar
+Ulrica/M
+Ulrich/M
+Ulrick/M
+Ulric/M
+Ulrika/M
+Ulrikaumeko/M
+Ulrike/M
+Ulster/M
+ulster/MS
+ult
+ulterior/Y
+ultimas
+ultimate/DSYPG
+ultimateness/M
+ultimatum/MS
+ultimo
+ultracentrifugally
+ultracentrifugation
+ultracentrifuge/M
+ultraconservative/S
+ultrafast
+ultrahigh
+ultralight/S
+ultramarine/SM
+ultramodern
+ultramontane
+ultra/S
+ultrashort
+ultrasonically
+ultrasonic/S
+ultrasonics/M
+ultrasound/SM
+ultrastructure/M
+Ultrasuede
+ultraviolet/SM
+Ultrix/M
+ULTRIX/M
+ululate/DSXGN
+ululation/M
+Ulyanovsk/M
+Ulysses/M
+um
+umbel/MS
+umber/GMDS
+Umberto/M
+umbilical/S
+umbilici
+umbilicus/M
+umbrage/MGSD
+umbrageous
+umbra/MS
+umbrella/GDMS
+Umbriel/M
+Umeko/M
+umiak/MS
+umlaut/GMDS
+umpire/MGSD
+ump/MDSG
+umpteen/H
+UN
+unabated/Y
+unabridged/S
+unacceptability
+unacceptable
+unaccepted
+unaccommodating
+unaccountability
+unaccustomed/Y
+unadapted
+unadulterated/Y
+unadventurous
+unalienability
+unalterableness/M
+unalterable/P
+unalterably
+Una/M
+unambiguity
+unambiguous
+unambitious
+unamused
+unanimity/SM
+unanimous/Y
+unanticipated/Y
+unapologetic
+unapologizing/M
+unappeasable
+unappeasably
+unappreciative
+unary
+unassailableness/M
+unassailable/P
+unassertive
+unassumingness/M
+unassuming/PY
+unauthorized/PY
+unavailing/PY
+unaware/SPY
+unbalanced/P
+unbar
+unbarring
+unbecoming/P
+unbeknown
+unbelieving/Y
+unbiased/P
+unbid
+unbind/G
+unblessed
+unblinking/Y
+unbodied
+unbolt/G
+unbreakability
+unbred
+unbroken
+unbuckle
+unbudging/Y
+unburnt
+uncap
+uncapping
+uncatalogued
+uncauterized/MS
+unceasing/Y
+uncelebrated
+uncertain/P
+unchallengeable
+unchangingness/M
+unchanging/PY
+uncharacteristic
+uncharismatic
+unchastity
+unchristian
+uncial/S
+uncivilized/Y
+unclassified
+uncle/MSD
+unclouded/Y
+uncodable
+uncollected
+uncoloredness/M
+uncolored/PY
+uncombable
+uncommunicative
+uncompetitive
+uncomplicated
+uncomprehending/Y
+uncompromisable
+unconcerned/P
+unconcern/M
+unconfirmed
+unconfused
+unconscionableness/M
+unconscionable/P
+unconscionably
+unconstitutional
+unconsumed
+uncontentious
+uncontrollability
+unconvertible
+uncool
+uncooperative
+uncork/G
+uncouple/G
+uncouthness/M
+uncouth/YP
+uncreate/V
+uncritical
+uncross/GB
+uncrowded
+unction/IM
+unctions
+unctuousness/MS
+unctuous/PY
+uncustomary
+uncut
+undated/I
+undaunted/Y
+undeceive
+undecided/S
+undedicated
+undefinability
+undefinedness/M
+undefined/P
+undelete
+undeliverability
+undeniableness/M
+undeniable/P
+undeniably
+undependable
+underachiever/M
+underachieve/SRDGZ
+underact/GDS
+underadjusting
+underage/S
+underarm/DGS
+underbedding
+underbelly/MS
+underbidding
+underbid/S
+underbracing
+underbrush/MSDG
+undercarriage/MS
+undercharge/GSD
+underclassman
+underclassmen
+underclass/S
+underclothes
+underclothing/MS
+undercoating/M
+undercoat/JMDGS
+underconsumption/M
+undercooked
+undercount/S
+undercover
+undercurrent/SM
+undercut/S
+undercutting
+underdeveloped
+underdevelopment/MS
+underdog/MS
+underdone
+undereducated
+underemphasis
+underemployed
+underemployment/SM
+underenumerated
+underenumeration
+underestimate/NGXSD
+underexploited
+underexpose/SDG
+underexposure/SM
+underfed
+underfeed/SG
+underfloor
+underflow/GDMS
+underfoot
+underfund/DG
+underfur/MS
+undergarment/SM
+undergirding
+undergoes
+undergo/G
+undergone
+undergrad/MS
+undergraduate/MS
+underground/RMS
+undergrowth/M
+undergrowths
+underhand/D
+underhandedness/MS
+underhanded/YP
+underheat
+underinvestment
+underlaid
+underlain/S
+underlay/GS
+underlie
+underline/GSDJ
+underling/MS
+underlip/SM
+underloaded
+underly/GS
+undermanned
+undermentioned
+undermine/SDG
+undermost
+underneath
+underneaths
+undernourished
+undernourishment/SM
+underpaid
+underpants
+underpart/MS
+underpass/SM
+underpay/GSL
+underpayment/SM
+underperformed
+underpinned
+underpinning/MS
+underpin/S
+underplay/SGD
+underpopulated
+underpopulation/M
+underpowered
+underpricing
+underprivileged
+underproduction/MS
+underrate/GSD
+underregistration/M
+underreported
+underreporting
+underrepresentation/M
+underrepresented
+underscore/SDG
+undersealed
+undersea/S
+undersecretary/SM
+undersell/SG
+undersexed
+undershirt/SM
+undershoot/SG
+undershorts
+undershot
+underside/SM
+undersigned/M
+undersign/SGD
+undersized
+undersizes
+undersizing
+underskirt/MS
+undersold
+underspecification
+underspecified
+underspend/G
+understaffed
+understandability/M
+understandably
+understanding/YM
+understand/RGSJB
+understate/GSDL
+understatement/MS
+understocked
+understood
+understrength
+understructure/SM
+understudy/GMSD
+undertaken
+undertaker/M
+undertake/SRGZJ
+undertaking/M
+underthings
+undertone/SM
+undertook
+undertow/MS
+underused
+underusing
+underutilization/M
+underutilized
+undervaluation/S
+undervalue/SDG
+underwater/S
+underway
+underwear/M
+underweight/S
+underwent
+underwhelm/DGS
+underwood/M
+Underwood/M
+underworld/MS
+underwrite/GZSR
+underwriter/M
+underwritten
+underwrote
+under/Y
+undeserving
+undesigned
+undeviating/Y
+undialyzed/SM
+undiplomatic
+undiscerning
+undiscriminating
+undo/GJ
+undoubted/Y
+undramatic
+undramatized/SM
+undress/G
+undrinkability
+undrinkable
+undroppable
+undue
+undulant
+undulate/XDSNG
+undulation/M
+unearthliness/S
+unearthly/P
+unearth/YG
+unease
+uneconomic
+uneducated
+unemployed/S
+unencroachable
+unending/Y
+unendurable/P
+unenergized/MS
+unenforced
+unenterprising
+UNESCO
+unethical
+uneulogized/SM
+unexacting
+unexceptionably
+unexcited
+unexpectedness/MS
+unfading/Y
+unfailingness/M
+unfailing/P
+unfamiliar
+unfashionable
+unfathomably
+unfavored
+unfeeling
+unfeigned/Y
+unfelt
+unfeminine
+unfertile
+unfetchable
+unflagging
+unflappability/S
+unflappable
+unflappably
+unflinching/Y
+unfold/LG
+unfoldment/M
+unforced
+unforgeable
+unfossilized/MS
+unfraternizing/SM
+unfrozen
+unfulfillable
+unfunny
+unfussy
+ungainliness/MS
+ungainly/PRT
+Ungava/M
+ungenerous
+ungentle
+unglamorous
+ungrammaticality
+ungrudging
+unguent/MS
+ungulate/MS
+unharmonious
+unharness/G
+unhistorical
+unholy/TP
+unhook/DG
+unhydrolyzed/SM
+unhygienic
+Unibus/M
+unicameral
+UNICEF
+unicellular
+Unicode/M
+unicorn/SM
+unicycle/MGSD
+unicyclist/MS
+unideal
+unidimensional
+unidiomatic
+unidirectionality
+unidirectional/Y
+unidolized/MS
+unifiable
+unification/MA
+unifier/MS
+unifilar
+uniformity/MS
+uniformness/M
+uniform/TGSRDYMP
+unify/AXDSNG
+unilateralism/M
+unilateralist
+unilateral/Y
+unimodal
+unimpeachably
+unimportance
+unimportant
+unimpressive
+unindustrialized/MS
+uninhibited/YP
+uninominal
+uninsured
+unintellectual
+unintended
+uninteresting
+uninterruptedness/M
+uninterrupted/YP
+unintuitive
+uninviting
+union/AEMS
+unionism/SM
+unionist/SM
+Unionist/SM
+unionize
+Union/MS
+UniPlus/M
+unipolar
+uniprocessor/SM
+uniqueness/S
+unique/TYSRP
+Uniroyal/M
+unisex/S
+UniSoft/M
+unison/MS
+Unisys/M
+unitarianism/M
+Unitarianism/SM
+unitarian/MS
+Unitarian/MS
+unitary
+unite/AEDSG
+united/Y
+uniter/M
+unitize/GDS
+unit/VGRD
+unity/SEM
+univ
+Univac/M
+univalent/S
+univalve/MS
+univariate
+universalism/M
+universalistic
+universality/SM
+universalize/DSRZG
+universalizer/M
+universal/YSP
+universe/MS
+university/MS
+Unix/M
+UNIX/M
+unjam
+unkempt
+unkind/TP
+unkink
+unknightly
+unknowable/S
+unknowing
+unlabored
+unlace/G
+unlearn/G
+unlikeable
+unlikeliness/S
+unlimber/G
+unlimited
+unlit
+unliterary
+unloose/G
+unlucky/TP
+unmagnetized/MS
+unmanageably
+unmannered/Y
+unmask/G
+unmeaning
+unmeasured
+unmeetable
+unmelodious
+unmemorable
+unmemorialized/MS
+unmentionable/S
+unmerciful
+unmeritorious
+unmethodical
+unmineralized/MS
+unmissable
+unmistakably
+unmitigated/YP
+unmnemonic
+unmobilized/SM
+unmoral
+unmount/B
+unmovable
+unmoving
+unnaturalness/M
+unnavigable
+unnerving/Y
+unobliging
+unoffensive
+unofficial
+unorganized/YP
+unorthodox
+unpack/G
+unpaintable
+unpalatability
+unpalatable
+unpartizan
+unpatronizing
+unpeople
+unperceptive
+unperson
+unperturbed/Y
+unphysical
+unpick/G
+unpicturesque
+unpinning
+unpleasing
+unploughed
+unpolarized/SM
+unpopular
+unpractical
+unprecedented/Y
+unpredictable/S
+unpreemphasized
+unpremeditated
+unpretentiousness/M
+unprincipled/P
+unproblematic
+unproductive
+unpropitious
+unprovable
+unproven
+unprovocative
+unpunctual
+unquestionable
+unraisable
+unravellings
+unreadability
+unread/B
+unreal
+unrealizable
+unreasoning/Y
+unreceptive
+unrecordable
+unreflective
+unrelenting/Y
+unremitting/Y
+unrepeatability
+unrepeated
+unrepentant
+unreported
+unrepresentative
+unreproducible
+unrest/G
+unrestrained/P
+unrewarding
+unriddle
+unripe/P
+unromantic
+unruliness/SM
+unruly/PTR
+unsaleable
+unsanitary
+unsavored/YP
+unsavoriness/M
+unseal/GB
+unsearchable
+unseasonal
+unseeing/Y
+unseen/S
+unselfconsciousness/M
+unselfconscious/P
+unselfishness/M
+unsellable
+unsentimental
+unset
+unsettledness/M
+unsettled/P
+unsettling/Y
+unshapely
+unshaven
+unshorn
+unsighted
+unsightliness/S
+unskilful
+unsociability
+unsociable/P
+unsocial
+unsound/PT
+unspeakably
+unspecific
+unspectacular
+unspoilt
+unspoke
+unsporting
+unstable/P
+unstigmatized/SM
+unstilted
+unstinting/Y
+unstopping
+unstrapping
+unstudied
+unstuffy
+unsubdued
+unsubstantial
+unsubtle
+unsuitable
+unsuspecting/Y
+unswerving/Y
+unsymmetrical
+unsympathetic
+unsystematic
+unsystematized/Y
+untactful
+untalented
+untaxing
+unteach/B
+untellable
+untenable
+unthinking
+until/G
+untiring/Y
+unto
+untouchable/MS
+untowardness/M
+untoward/P
+untraceable
+untrue
+untruthfulness/M
+untwist/G
+Unukalhai/M
+unusualness/M
+unutterable
+unutterably
+unvocalized/MS
+unvulcanized/SM
+unwaivering
+unwarrantable
+unwarrantably
+unwashed/PS
+unwearable
+unwearied/Y
+unwed
+unwedge
+unwelcome
+unwell/M
+unwieldiness/MS
+unwieldy/TPR
+unwind/B
+unwomanly
+unworkable/S
+unworried
+unwrap
+unwrapping
+unyielding/Y
+unyoke
+unzip
+up
+Upanishads
+uparrow
+upbeat/SM
+upbraid/GDRS
+upbringing/M
+upbring/JG
+UPC
+upchuck/SDG
+upcome/G
+upcountry/S
+updatability
+updater/M
+update/RSDG
+Updike/M
+updraft/SM
+upend/SDG
+upfield
+upfront
+upgradeable
+upgrade/DSJG
+upheaval/MS
+upheld
+uphill/S
+upholder/M
+uphold/RSGZ
+upholster/ADGS
+upholsterer/SM
+upholstery/MS
+UPI
+upkeep/SM
+uplander/M
+upland/MRS
+uplifter/M
+uplift/SJDRG
+upload/GSD
+upmarket
+upon
+upped
+uppercase/GSD
+upperclassman/M
+upperclassmen
+uppercut/S
+uppercutting
+uppermost
+upper/S
+upping
+uppish
+uppity
+upraise/GDS
+uprated
+uprating
+uprear/DSG
+upright/DYGSP
+uprightness/S
+uprise/RGJ
+uprising/M
+upriver/S
+uproariousness/M
+uproarious/PY
+uproar/MS
+uproot/DRGS
+uprooter/M
+ups
+UPS
+upscale/GDS
+upset/S
+upsetting/MS
+upshot/SM
+upside/MS
+upsilon/MS
+upslope
+upstage/DSRG
+upstairs
+upstandingness/M
+upstanding/P
+upstart/MDGS
+upstate/SR
+upstream/DSG
+upstroke/MS
+upsurge/DSG
+upswing/GMS
+upswung
+uptake/SM
+upthrust/GMS
+uptight
+uptime
+Upton/M
+uptown/RS
+uptrend/M
+upturn/GDS
+upwardness/M
+upward/SYP
+upwelling
+upwind/S
+uracil/MS
+Ural/MS
+Urania/M
+uranium/MS
+Uranus/M
+uranyl/M
+Urbain/M
+Urbana/M
+urbane/Y
+urbanism/M
+urbanite/SM
+urbanity/SM
+urbanization/MS
+urbanize/DSG
+Urban/M
+urbanologist/S
+urbanology/S
+Urbano/M
+urban/RT
+Urbanus/M
+urchin/SM
+Urdu/M
+urea/SM
+uremia/MS
+uremic
+ureter/MS
+urethane/MS
+urethrae
+urethral
+urethra/M
+urethritis/M
+Urey/M
+urge/GDRSJ
+urgency/SM
+urgent/Y
+urger/M
+Uriah/M
+uric
+Uriel/M
+urinal/MS
+urinalyses
+urinalysis/M
+urinary/MS
+urinate/XDSNG
+urination/M
+urine/MS
+Uri/SM
+URL
+Ur/M
+urning/M
+urn/MDGS
+urogenital
+urological
+urologist/S
+urology/MS
+Urquhart/M
+Ursala/M
+Ursa/M
+ursine
+Ursola/M
+Urson/M
+Ursula/M
+Ursulina/M
+Ursuline/M
+urticaria/MS
+Uruguayan/S
+Uruguay/M
+Urumqi
+US
+USA
+usability/S
+usable/U
+usably/U
+USAF
+usage/SM
+USART
+USCG
+USC/M
+USDA
+us/DRSBZG
+used/U
+use/ESDAG
+usefulness/SM
+useful/YP
+uselessness/MS
+useless/PY
+Usenet/M
+Usenix/M
+user/M
+USG/M
+usherette/SM
+usher/SGMD
+USIA
+USMC
+USN
+USO
+USP
+USPS
+USS
+USSR
+Ustinov/M
+usu
+usuals
+usual/UPY
+usurer/SM
+usuriousness/M
+usurious/PY
+usurpation/MS
+usurper/M
+usurp/RDZSG
+usury/SM
+UT
+Utahan/SM
+Utah/M
+Uta/M
+Ute/M
+utensil/SM
+uteri
+uterine
+uterus/M
+Utica/M
+utile/I
+utilitarianism/MS
+utilitarian/S
+utility/MS
+utilization/MS
+utilization's/A
+utilize/GZDRS
+utilizer/M
+utilizes/A
+utmost/S
+Utopia/MS
+utopianism/M
+utopian's
+Utopian/S
+utopia/S
+Utrecht/M
+Utrillo/M
+utterance/MS
+uttered/U
+utterer/M
+uttermost/S
+utter/TRDYGS
+uucp/M
+UV
+uvula/MS
+uvular/S
+uxorious
+Uzbekistan
+Uzbek/M
+Uzi/M
+V
+VA
+vacancy/MS
+vacantness/M
+vacant/PY
+vacate/NGXSD
+vacationist/SM
+vacationland
+vacation/MRDZG
+vaccinate/NGSDX
+vaccination/M
+vaccine/SM
+vaccinial
+vaccinia/M
+Vachel/M
+vacillate/XNGSD
+vacillating/Y
+vacillation/M
+vacillator/SM
+Vaclav/M
+vacua's
+vacuity/MS
+vacuo
+vacuolated/U
+vacuolate/SDGN
+vacuole/SM
+vacuolization/SM
+vacuousness/MS
+vacuous/PY
+vacuum/GSMD
+Vader/M
+Vaduz/M
+vagabondage/MS
+vagabond/DMSG
+vagarious
+vagary/MS
+vaginae
+vaginal/Y
+vagina/M
+vagrancy/MS
+vagrant/SMY
+vagueing
+vagueness/MS
+vague/TYSRDP
+Vail/M
+vaingloriousness/M
+vainglorious/YP
+vainglory/MS
+vain/TYRP
+val
+valance/SDMG
+Valaree/M
+Valaria/M
+Valarie/M
+Valdemar/M
+Valdez/M
+Valeda/M
+valediction/MS
+valedictorian/MS
+valedictory/MS
+Vale/M
+valence/SM
+Valencia/MS
+valency/MS
+Valene/M
+Valenka/M
+Valentia/M
+Valentijn/M
+Valentina/M
+Valentine/M
+valentine/SM
+Valentin/M
+Valentino/M
+Valenzuela/M
+Valera/M
+Valeria/M
+Valerian/M
+Valerie/M
+Valerye/M
+Valry/M
+vale/SM
+valet/GDMS
+valetudinarianism/MS
+valetudinarian/MS
+Valhalla/M
+valiance/S
+valiantness/M
+valiant/SPY
+Valida/M
+validated/AU
+validate/INGSDX
+validates/A
+validation/AMI
+validity/IMS
+validnesses
+validness/MI
+valid/PIY
+Valina/M
+valise/MS
+Valium/S
+Valkyrie/SM
+Vallejo
+Valle/M
+Valletta/M
+valley/SM
+Vallie/M
+Valli/M
+Vally/M
+Valma/M
+Val/MY
+Valois/M
+valor/MS
+valorous/Y
+Valparaiso/M
+Valry/M
+valuable/IP
+valuableness/IM
+valuables
+valuably/I
+valuate/NGXSD
+valuation/CSAM
+valuator/SM
+value/CGASD
+valued/U
+valuelessness/M
+valueless/P
+valuer/SM
+value's
+values/E
+valve/GMSD
+valveless
+valvular
+Va/M
+vamoose/GSD
+vamp/ADSG
+vamper
+vampire/MGSD
+vamp's
+vanadium/MS
+Vance/M
+Vancouver/M
+vandalism/MS
+vandalize/GSD
+vandal/MS
+Vandal/MS
+Vanda/M
+Vandenberg/M
+Vanderbilt/M
+Vanderburgh/M
+Vanderpoel/M
+Vandyke/SM
+vane/MS
+Vanessa/M
+Vang/M
+vanguard/MS
+Vania/M
+vanilla/MS
+vanisher/M
+vanish/GRSDJ
+vanishing/Y
+vanity/SM
+Van/M
+Vanna/M
+vanned
+Vannie/M
+Vanni/M
+vanning
+Vanny/M
+vanquisher/M
+vanquish/RSDGZ
+van/SMD
+vantage/MS
+Vanuatu
+Vanya/M
+Vanzetti/M
+vapidity/MS
+vapidness/SM
+vapid/PY
+vaporer/M
+vaporing/MY
+vaporisation
+vaporise/DSG
+vaporization/AMS
+vaporize/DRSZG
+vaporizer/M
+vapor/MRDJGZS
+vaporous
+vapory
+vaquero/SM
+VAR
+Varanasi/M
+Varese/M
+Vargas/M
+variability/IMS
+variableness/IM
+variable/PMS
+variables/I
+variably/I
+variance/I
+variances
+variance's
+Varian/M
+variant/ISY
+variate/MGNSDX
+variational
+variation/M
+varicolored/MS
+varicose/S
+variedly
+varied/U
+variegate/NGXSD
+variegation/M
+varier/M
+varietal/S
+variety/MS
+various/PY
+varistor/M
+Varityping/M
+varlet/MS
+varmint/SM
+varnished/U
+varnisher/M
+varnish/ZGMDRS
+var/S
+varsity/MS
+varying/UY
+vary/SRDJG
+vascular
+vasectomy/SM
+Vaseline/DSMG
+vase/SM
+Vasili/MS
+Vasily/M
+vasomotor
+Vasquez/M
+vassalage/MS
+vassal/GSMD
+Vassar/M
+Vassili/M
+Vassily/M
+vastness/MS
+vast/PTSYR
+v/ASV
+VAT
+Vatican/M
+vat/SM
+vatted
+vatting
+vaudeville/SM
+vaudevillian/SM
+Vaudois
+Vaughan/M
+Vaughn/M
+vaulter/M
+vaulting/M
+vault/ZSRDMGJ
+vaunter/M
+vaunt/GRDS
+VAXes
+Vax/M
+VAX/M
+Vazquez/M
+vb
+VCR
+VD
+VDT
+VDU
+vealed/A
+vealer/MA
+veal/MRDGS
+veals/A
+Veblen/M
+vectorial
+vectorization
+vectorized
+vectorizing
+vector's/F
+vector/SGDM
+Veda/MS
+Vedanta/M
+veejay/S
+veep/S
+veer/DSG
+veering/Y
+vegan/SM
+Vega/SM
+Vegemite/M
+veges
+vegetable/MS
+vegetarianism/MS
+vegetarian/SM
+vegetate/DSNGVX
+vegetation/M
+vegetative/PY
+vegged
+veggie/S
+vegging
+veg/M
+vehemence/MS
+vehemency/S
+vehement/Y
+vehicle/SM
+vehicular
+veiling/MU
+veil's
+veil/UGSD
+vein/GSRDM
+veining/M
+vela/M
+Vela/M
+velarize/SDG
+velar/S
+Velsquez/M
+Velzquez
+Velcro/SM
+veld/SM
+veldt's
+Velez/M
+Vella/M
+vellum/MS
+Velma/M
+velocipede/SM
+velocity/SM
+velor/S
+velour's
+velum/M
+Velveeta/M
+velveteen/MS
+velvet/GSMD
+Velvet/M
+velvety/RT
+venality/MS
+venal/Y
+venation/SM
+vend/DSG
+vender's/K
+vendetta/MS
+vendible/S
+vendor/MS
+veneerer/M
+veneer/GSRDM
+veneering/M
+venerability/S
+venerable/P
+venerate/XNGSD
+veneration/M
+venereal
+venetian
+Venetian/SM
+Venezuela/M
+Venezuelan/S
+vengeance/MS
+vengeful/APY
+vengefulness/AM
+venialness/M
+venial/YP
+Venice/M
+venireman/M
+veniremen
+venison/SM
+Venita/M
+Venn/M
+venomousness/M
+venomous/YP
+venom/SGDM
+venous/Y
+venter/M
+ventilated/U
+ventilate/XSDVGN
+ventilation/M
+ventilator/MS
+vent/ISGFD
+ventral/YS
+ventricle/MS
+ventricular
+ventriloquies
+ventriloquism/MS
+ventriloquist/MS
+ventriloquy
+vent's/F
+Ventura/M
+venture/RSDJZG
+venturesomeness/SM
+venturesome/YP
+venturi/S
+venturousness/MS
+venturous/YP
+venue/MAS
+Venusian/S
+Venus/S
+veraciousness/M
+veracious/YP
+veracities
+veracity/IM
+Veracruz/M
+Veradis
+Vera/M
+verandahed
+veranda/SDM
+verbalization/MS
+verbalized/U
+verbalizer/M
+verbalize/ZGRSD
+verballed
+verballing
+verbal/SY
+verbatim
+verbena/MS
+verbiage/SM
+verb/KSM
+verbose/YP
+verbosity/SM
+verboten
+verdant/Y
+Verde/M
+Verderer/M
+verdict/SM
+verdigris/GSDM
+Verdi/M
+verdure/SDM
+Vere/M
+Verena/M
+Verene/M
+verge/FGSD
+Verge/M
+verger/SM
+verge's
+Vergil's
+veridical/Y
+Veriee/M
+verifiability/M
+verifiableness/M
+verifiable/U
+verification/S
+verified/U
+verifier/MS
+verify/GASD
+Verile/M
+verily
+Verina/M
+Verine/M
+verisimilitude/SM
+veritableness/M
+veritable/P
+veritably
+verity/MS
+Verlag/M
+Verlaine/M
+Verla/M
+Vermeer/M
+vermicelli/MS
+vermiculite/MS
+vermiform
+vermilion/MS
+vermin/M
+verminous
+Vermonter/M
+Vermont/ZRM
+vermouth/M
+vermouths
+vernacular/YS
+vernal/Y
+Verna/M
+Verne/M
+Vernen/M
+Verney/M
+Vernice/M
+vernier/SM
+Vern/NM
+Vernon/M
+Vernor/M
+Verona/M
+Veronese/M
+Veronica/M
+veronica/SM
+Veronika/M
+Veronike/M
+Veronique/M
+verrucae
+verruca/MS
+versa
+Versailles/M
+Versatec/M
+versatileness/M
+versatile/YP
+versatility/SM
+versed/UI
+verse's
+verses/I
+verse/XSRDAGNF
+versicle/M
+versification/M
+versifier/M
+versify/GDRSZXN
+versing/I
+version/MFISA
+verso/SM
+versus
+vertebrae
+vertebral/Y
+vertebra/M
+vertebrate/IMS
+vertebration/M
+vertex/SM
+vertical/YPS
+vertices's
+vertiginous
+vertigoes
+vertigo/M
+verve/SM
+very/RT
+Vesalius/M
+vesicle/SM
+vesicular/Y
+vesiculate/GSD
+Vespasian/M
+vesper/SM
+Vespucci/M
+vessel/MS
+vestal/YS
+Vesta/M
+vest/DIGSL
+vestibular
+vestibule/SDM
+vestige/SM
+vestigial/Y
+vesting/SM
+vestment/ISM
+vestryman/M
+vestrymen
+vestry/MS
+vest's
+vesture/SDMG
+Vesuvius/M
+vetch/SM
+veteran/SM
+veterinarian/MS
+veterinary/S
+veter/M
+veto/DMG
+vetoes
+vet/SMR
+vetted
+vetting/A
+Vevay/M
+vexation/SM
+vexatiousness/M
+vexatious/PY
+vexed/Y
+vex/GFSD
+VF
+VFW
+VG
+VGA
+vhf
+VHF
+VHS
+VI
+via
+viability/SM
+viable/I
+viably
+viaduct/MS
+Viagra/M
+vial/MDGS
+viand/SM
+vibe/S
+vibraharp/MS
+vibrancy/MS
+vibrant/YS
+vibraphone/MS
+vibraphonist/SM
+vibrate/XNGSD
+vibrational/Y
+vibration/M
+vibrato/MS
+vibrator/SM
+vibratory
+vibrio/M
+vibrionic
+viburnum/SM
+vicarage/SM
+vicariousness/MS
+vicarious/YP
+vicar/SM
+vice/CMS
+viced
+vicegerent/MS
+vicennial
+Vicente/M
+viceregal
+viceroy/SM
+Vichy/M
+vichyssoise/MS
+vicing
+vicinity/MS
+viciousness/S
+vicious/YP
+vicissitude/MS
+Vickers/M
+Vickie/M
+Vicki/M
+Vicksburg/M
+Vicky/M
+Vick/ZM
+Vic/M
+victimization/SM
+victimized/U
+victimizer/M
+victimize/SRDZG
+victim/SM
+Victoir/M
+Victoria/M
+Victorianism/S
+Victorian/S
+victoriousness/M
+victorious/YP
+Victor/M
+victor/SM
+victory/MS
+Victrola/SM
+victualer/M
+victual/ZGSDR
+vicua/S
+Vidal/M
+Vida/M
+videlicet
+videocassette/S
+videoconferencing
+videodisc/S
+videodisk/SM
+video/GSMD
+videophone/SM
+videotape/SDGM
+Vidovic/M
+Vidovik/M
+Vienna/M
+Viennese/M
+Vientiane/M
+vier/M
+vie/S
+Vietcong/M
+Viet/M
+Vietminh/M
+Vietnamese/M
+Vietnam/M
+viewed/A
+viewer/AS
+viewer's
+viewfinder/MS
+viewgraph/SM
+viewing/M
+viewless/Y
+view/MBGZJSRD
+viewpoint/SM
+views/A
+vigesimal
+vigilance/MS
+vigilante/SM
+vigilantism/MS
+vigilantist
+vigilant/Y
+vigil/SM
+vignette/MGDRS
+vignetter/M
+vignetting/M
+vignettist/MS
+vigor/MS
+vigorousness/M
+vigorous/YP
+vii
+viii
+Vijayawada/M
+Viki/M
+Viking/MS
+viking/S
+Vikki/M
+Vikky/M
+Vikram/M
+Vila
+vile/AR
+vilely
+vileness/MS
+vilest
+Vilhelmina/M
+vilification/M
+vilifier/M
+vilify/GNXRSD
+villager/M
+village/RSMZ
+villainousness/M
+villainous/YP
+villain/SM
+villainy/MS
+Villa/M
+villa/MS
+Villarreal/M
+ville
+villeinage/SM
+villein/MS
+villi
+Villon/M
+villus/M
+Vilma/M
+Vilnius/M
+Vilyui/M
+Vi/M
+vi/MDR
+vim/MS
+vinaigrette/MS
+Vina/M
+Vince/M
+Vincent/MS
+Vincenty/M
+Vincenz/M
+vincible/I
+Vinci/M
+Vindemiatrix/M
+vindicate/XSDVGN
+vindication/M
+vindicator/SM
+vindictiveness/MS
+vindictive/PY
+vinegar/DMSG
+vinegary
+vine/MGDS
+vineyard/SM
+Vinita/M
+Vin/M
+Vinnie/M
+Vinni/M
+Vinny/M
+vino/MS
+vinous
+Vinson/M
+vintage/MRSDG
+vintager/M
+vintner/MS
+vinyl/SM
+violable/I
+Viola/M
+Violante/M
+viola/SM
+violate/VNGXSD
+violator/MS
+Viole/M
+violence/SM
+violent/Y
+Violet/M
+violet/SM
+Violetta/M
+Violette/M
+violinist/SM
+violin/MS
+violist/MS
+viol/MSB
+violoncellist/S
+violoncello/MS
+viper/MS
+viperous
+VIP/S
+viragoes
+virago/M
+viral/Y
+vireo/SM
+Virge/M
+Virgie/M
+Virgilio/M
+Virgil/M
+virginal/YS
+Virgina/M
+Virginia/M
+Virginian/S
+Virginie/M
+virginity/SM
+virgin/SM
+Virgo/MS
+virgule/MS
+virile
+virility/MS
+virologist/S
+virology/SM
+virtual/Y
+virtue/SM
+virtuosity/MS
+virtuosoes
+virtuoso/MS
+virtuousness/SM
+virtuous/PY
+virulence/SM
+virulent/Y
+virus/MS
+visage/MSD
+Visakhapatnam's
+Visa/M
+visa/SGMD
+Visayans
+viscera
+visceral/Y
+viscid/Y
+viscoelastic
+viscoelasticity
+viscometer/SM
+viscose/MS
+viscosity/MS
+viscountcy/MS
+viscountess/SM
+viscount/MS
+viscousness/M
+viscous/PY
+viscus/M
+vise/CAXNGSD
+viselike
+vise's
+Vishnu/M
+visibility/ISM
+visible/PI
+visibly/I
+Visigoth/M
+Visigoths
+visionariness/M
+visionary/PS
+vision/KMDGS
+vision's/A
+visitable/U
+visitant/SM
+visitation/SM
+visited/U
+visit/GASD
+visitor/MS
+vis/MDSGV
+visor/SMDG
+VISTA
+vista/GSDM
+Vistula/M
+visualization/AMS
+visualized/U
+visualizer/M
+visualizes/A
+visualize/SRDZG
+visual/SY
+vitae
+vitality/MS
+vitalization/AMS
+vitalize/ASDGC
+vital/SY
+vita/M
+Vita/M
+vitamin/SM
+Vite/M
+Vitia/M
+vitiate/XGNSD
+vitiation/M
+viticulture/SM
+viticulturist/S
+Vitim/M
+Vito/M
+Vitoria/M
+vitreous/YSP
+vitrifaction/S
+vitrification/M
+vitrify/XDSNG
+vitrine/SM
+vitriolic
+vitriol/MDSG
+vitro
+vittles
+Vittoria/M
+Vittorio/M
+vituperate/SDXVGN
+vituperation/M
+vituperative/Y
+Vitus/M
+vivace/S
+vivaciousness/MS
+vivacious/YP
+vivacity/SM
+viva/DGS
+Vivaldi
+Viva/M
+vivaria
+vivarium/MS
+vivaxes
+Vivekananda/M
+vive/Z
+Vivia/M
+Viviana/M
+Vivian/M
+Vivianna/M
+Vivianne/M
+vividness/SM
+vivid/PTYR
+Vivie/M
+Viviene/M
+Vivien/M
+Vivienne/M
+vivifier
+vivify/NGASD
+Vivi/MN
+viviparous
+vivisect/DGS
+vivisectional
+vivisectionist/SM
+vivisection/MS
+Viviyan/M
+Viv/M
+vivo
+Vivyan/M
+Vivyanne/M
+vixenish/Y
+vixen/SM
+viz
+vizier/MS
+vizor's
+VJ
+Vladamir/M
+Vladimir/M
+Vladivostok/M
+Vlad/M
+VLF
+VLSI
+VMS/M
+VOA
+vocable/SM
+vocab/S
+vocabularian
+vocabularianism
+vocabulary/MS
+vocalic/S
+vocalise's
+vocalism/M
+vocalist/MS
+vocalization/SM
+vocalized/U
+vocalizer/M
+vocalize/ZGDRS
+vocal/SY
+vocation/AKMISF
+vocational/Y
+vocative/KYS
+vociferate/NGXSD
+vociferation/M
+vociferousness/MS
+vociferous/YP
+vocoded
+vocoder
+vodka/MS
+voe/S
+Vogel/M
+vogue/GMSRD
+vogueing
+voguish
+voiceband
+voiced/CU
+voice/IMGDS
+voicelessness/SM
+voiceless/YP
+voicer/S
+voices/C
+voicing/C
+voidable
+void/C
+voided
+voider/M
+voiding
+voidness/M
+voids
+voil
+voile/MS
+volar
+volatileness/M
+volatile/PS
+volatility/MS
+volatilization/MS
+volatilize/SDG
+volcanically
+volcanic/S
+volcanism/M
+volcanoes
+volcano/M
+vole/MS
+Volga/M
+Volgograd/M
+vol/GSD
+volitionality
+volitional/Y
+volition/MS
+Volkswagen/SM
+volleyball/MS
+volleyer/M
+volley/SMRDG
+Vol/M
+Volstead/M
+voltage/SM
+voltaic
+Voltaire/M
+Volta/M
+volt/AMS
+Volterra/M
+voltmeter/MS
+volubility/S
+voluble/P
+volubly
+volume/SDGM
+volumetric
+volumetrically
+voluminousness/MS
+voluminous/PY
+voluntarily/I
+voluntariness/MI
+voluntarism/MS
+voluntary/PS
+volunteer/DMSG
+voluptuary/SM
+voluptuousness/S
+voluptuous/YP
+volute/S
+Volvo/M
+vomit/GRDS
+Vonda/M
+Von/M
+Vonnegut/M
+Vonnie/M
+Vonni/M
+Vonny/M
+voodoo/GDMS
+voodooism/S
+voraciousness/MS
+voracious/YP
+voracity/MS
+Voronezh/M
+Vorster/M
+vortex/SM
+vortices's
+vorticity/M
+votary/MS
+vote/CSDG
+voter/SM
+vote's
+votive/YP
+voucher/GMD
+vouchsafe/SDG
+vouch/SRDGZ
+vowelled
+vowelling
+vowel/MS
+vower/M
+vow/SMDRG
+voyage/GMZJSRD
+voyager/M
+voyageur/SM
+voyeurism/MS
+voyeuristic
+voyeur/MS
+VP
+vs
+V's
+VT
+Vt/M
+VTOL
+vulcanization/SM
+vulcanized/U
+vulcanize/SDG
+Vulcan/M
+vulgarian/MS
+vulgarism/MS
+vulgarity/MS
+vulgarization/S
+vulgarize/GZSRD
+vulgar/TSYR
+Vulgate/SM
+Vulg/M
+vulnerability/SI
+vulnerable/IP
+vulnerably/I
+vulpine
+vulturelike
+vulture/SM
+vulturous
+vulvae
+vulva/M
+vying
+Vyky/M
+WA
+Waals
+Wabash/M
+WAC
+Wacke/M
+wackes
+wackiness/MS
+wacko/MS
+wacky/RTP
+Waco/M
+Wac/S
+wadded
+wadding/SM
+waddle/GRSD
+Wade/M
+wader/M
+wade/S
+wadi/SM
+wad/MDRZGS
+Wadsworth/M
+wafer/GSMD
+waffle/GMZRSD
+Wafs
+wafter/M
+waft/SGRD
+wag/DRZGS
+waged/U
+wager/GZMRD
+wage/SM
+wagged
+waggery/MS
+wagging
+waggishness/SM
+waggish/YP
+waggle/SDG
+waggly
+Wagnerian
+Wagner/M
+wagoner/M
+wagon/SGZMRD
+wagtail/SM
+Wahl/M
+waif/SGDM
+Waikiki/M
+wailer/M
+wail/SGZRD
+wain/GSDM
+Wain/M
+wainscot/SGJD
+Wainwright/M
+wainwright/SM
+waistband/MS
+waistcoat/GDMS
+waister/M
+waist/GSRDM
+waistline/MS
+Waite/M
+waiter/DMG
+Waiter/M
+wait/GSZJRD
+Wait/MR
+waitpeople
+waitperson/S
+waitress/GMSD
+waiver/MB
+waive/SRDGZ
+Wakefield/M
+wakefulness/MS
+wakeful/PY
+Wake/M
+wake/MGDRSJ
+waken/SMRDG
+waker/M
+wakeup
+Waksman/M
+Walbridge/M
+Walcott/M
+Waldemar/M
+Walden/M
+Waldensian
+Waldheim/M
+Wald/MN
+Waldo/M
+Waldon/M
+Waldorf/M
+wale/DRSMG
+Wales
+Walesa/M
+Walford/M
+Walgreen/M
+waling/M
+walkabout/M
+walkaway/SM
+walker/M
+Walker/M
+walk/GZSBJRD
+walkie
+Walkman/S
+walkout/SM
+walkover/SM
+walkway/MS
+wallaby/MS
+Wallace/M
+Wallache/M
+wallah/M
+Wallas/M
+wallboard/MS
+Wallenstein/M
+Waller/M
+wallet/SM
+walleye/MSD
+wallflower/MS
+Wallie/M
+Wallis
+Walliw/M
+Walloon/SM
+walloper/M
+walloping/M
+wallop/RDSJG
+wallower/M
+wallow/RDSG
+wallpaper/DMGS
+wall/SGMRD
+Wall/SMR
+Wally/M
+wally/S
+walnut/SM
+Walpole/M
+Walpurgisnacht
+walrus/SM
+Walsh/M
+Walter/M
+Walther/M
+Walton/M
+waltzer/M
+Walt/ZMR
+waltz/MRSDGZ
+Walworth/M
+Waly/M
+wampum/SM
+Wanamaker/M
+Wanda/M
+wanderer/M
+wander/JZGRD
+wanderlust/SM
+Wandie/M
+Wandis/M
+wand/MRSZ
+wane/S
+Waneta/M
+wangler/M
+wangle/RSDGZ
+Wang/M
+Wanids/M
+Wankel/M
+wanna
+wannabe/S
+wanned
+wanner
+wanness/S
+wannest
+wanning
+wan/PGSDY
+Wansee/M
+Wansley/M
+wanted/U
+wanter/M
+want/GRDSJ
+wantonness/S
+wanton/PGSRDY
+wapiti/MS
+warble/GZRSD
+warbler/M
+warbonnet/S
+ward/AGMRDS
+Warde/M
+warden/DMGS
+Warden/M
+warder/DMGS
+Ward/MN
+wardrobe/MDSG
+wardroom/MS
+wardship/M
+wards/I
+warehouseman/M
+warehouse/MGSRD
+Ware/MG
+ware/MS
+warfare/SM
+Warfield/M
+war/GSMD
+warhead/MS
+Warhol/M
+warhorse/SM
+warily/U
+warinesses/U
+wariness/MS
+Waring/M
+warless
+warlike
+warlock/SM
+warlord/MS
+warmblooded
+warmed/A
+warmer/M
+warmheartedness/SM
+warmhearted/PY
+warmish
+warmness/MS
+warmongering/M
+warmonger/JGSM
+warms/A
+warmth/M
+warmths
+warm/YRDHPGZTS
+warned/U
+warner/M
+Warner/M
+warn/GRDJS
+warning/YM
+Warnock/M
+warpaint
+warpath/M
+warpaths
+warper/M
+warplane/MS
+warp/MRDGS
+warranted/U
+warranter/M
+warrant/GSMDR
+warranty/SDGM
+warred/M
+warrener/M
+Warren/M
+warren/SZRM
+warring/M
+warrior/MS
+Warsaw/M
+wars/C
+warship/MS
+warthog/S
+wartime/SM
+wart/MDS
+warty/RT
+Warwick/M
+wary/URPT
+Wasatch/M
+washable/S
+wash/AGSD
+washbasin/SM
+washboard/SM
+washbowl/SM
+Washburn/M
+washcloth/M
+washcloths
+washday/M
+washed/U
+washer/GDMS
+washerwoman/M
+washerwomen
+washing/SM
+Washingtonian/S
+Washington/M
+Wash/M
+Washoe/M
+washout/SM
+washrag/SM
+washroom/MS
+washstand/SM
+washtub/MS
+washy/RT
+wasn't
+WASP
+waspishness/SM
+waspish/PY
+Wasp's
+wasp/SM
+was/S
+wassail/GMDS
+Wasserman/M
+Wassermann/M
+wastage/SM
+wastebasket/SM
+wastefulness/S
+wasteful/YP
+wasteland/MS
+wastepaper/MS
+waster/DG
+waste/S
+wastewater
+wast/GZSRD
+wasting/Y
+wastrel/MS
+Watanabe/M
+watchable/U
+watchband/SM
+watchdogged
+watchdogging
+watchdog/SM
+watched/U
+watcher/M
+watchfulness/MS
+watchful/PY
+watch/JRSDGZB
+watchmake/JRGZ
+watchmaker/M
+watchman/M
+watchmen
+watchpoints
+watchtower/MS
+watchword/MS
+waterbird/S
+waterborne
+Waterbury/M
+watercolor/DMGS
+watercolorist/SM
+watercourse/SM
+watercraft/M
+watercress/SM
+waterer/M
+waterfall/SM
+waterfowl/M
+waterfront/SM
+Watergate/M
+waterhole/S
+Waterhouse/M
+wateriness/SM
+watering/M
+water/JGSMRD
+waterless
+waterlily/S
+waterline/S
+waterlogged
+waterloo
+Waterloo/SM
+waterman/M
+watermark/GSDM
+watermelon/SM
+watermill/S
+waterproof/PGRDSJ
+watershed/SM
+waterside/MSR
+watersider/M
+Waters/M
+waterspout/MS
+watertightness/M
+watertight/P
+Watertown/M
+waterway/MS
+waterwheel/S
+waterworks/M
+watery/PRT
+Watkins
+WATS
+Watson/M
+wattage/SM
+Watteau/M
+Wattenberg/M
+Watterson/M
+wattle/SDGM
+Watt/MS
+watt/TMRS
+Watusi/M
+Wat/ZM
+Waugh/M
+Waukesha/M
+Waunona/M
+Waupaca/M
+Waupun/M
+Wausau/M
+Wauwatosa/M
+waveband/MS
+waveform/SM
+wavefront/MS
+waveguide/MS
+Waveland/M
+wavelength/M
+wavelengths
+wavelet/SM
+wavelike
+wavenumber
+waver/GZRD
+wavering/YU
+Waverley/M
+Waverly/M
+Wave/S
+wave/ZGDRS
+wavily
+waviness/MS
+wavy/SRTP
+waxer/M
+waxiness/MS
+wax/MNDRSZG
+waxwing/MS
+waxwork/MS
+waxy/PRT
+wayfarer/MS
+wayfaring/S
+waylaid
+Wayland/M
+Waylan/M
+waylayer/M
+waylay/GRSZ
+wayleave/MS
+Waylen/M
+Waylin/M
+Waylon/M
+Way/M
+waymarked
+way/MS
+Wayne/M
+Waynesboro/M
+wayside/MS
+waywardness/S
+wayward/YP
+WC
+we
+weakener/M
+weaken/ZGRD
+weakfish/SM
+weakish
+weakliness/M
+weakling/SM
+weakly/RTP
+weakness/MS
+weak/TXPYRN
+weal/MHS
+wealthiness/MS
+wealth/M
+wealths
+wealthy/PTR
+weaner/M
+weanling/M
+wean/RDGS
+weapon/GDMS
+weaponless
+weaponry/MS
+wearable/S
+wearer/M
+wearied/U
+wearily
+weariness/MS
+wearing/Y
+wearisomeness/M
+wearisome/YP
+wear/RBSJGZ
+wearying/Y
+weary/TGPRSD
+weasel/SGMDY
+weatherbeaten
+weathercock/SDMG
+weatherer/M
+Weatherford/M
+weathering/M
+weatherize/GSD
+weatherman/M
+weather/MDRYJGS
+weathermen
+weatherperson/S
+weatherproof/SGPD
+weatherstripped
+weatherstripping/S
+weatherstrip/S
+weaver/M
+Weaver/M
+weaves/A
+weave/SRDGZ
+weaving/A
+webbed
+Webber/M
+webbing/MS
+Webb/RM
+weber/M
+Weber/M
+Webern/M
+webfeet
+webfoot/M
+Web/MR
+website/S
+web/SMR
+Webster/MS
+Websterville/M
+we'd
+wedded/A
+Weddell/M
+wedder
+wedding/SM
+wedge/SDGM
+wedgie/RST
+Wedgwood/M
+wedlock/SM
+Wed/M
+Wednesday/SM
+wed/SA
+weeder/M
+weediness/M
+weedkiller/M
+weedless
+wee/DRST
+weed/SGMRDZ
+weedy/TRP
+weeing
+weekday/MS
+weekender/M
+weekend/SDRMG
+weekly/S
+weeknight/SM
+Weeks/M
+week/SYM
+weenie/M
+ween/SGD
+weeny/RSMT
+weeper/M
+weep/SGZJRD
+weepy/RST
+weevil/MS
+weft/SGMD
+Wehr/M
+Weibull/M
+Weidar/M
+Weider/M
+Weidman/M
+Weierstrass/M
+weighed/UA
+weigher/M
+weigh/RDJG
+weighs/A
+weighted/U
+weighter/M
+weightily
+weightiness/SM
+weighting/M
+weight/JMSRDG
+weightlessness/SM
+weightless/YP
+weightlifter/S
+weightlifting/MS
+weighty/TPR
+Weill/M
+Wei/M
+Weinberg/M
+Weiner/M
+Weinstein/M
+weirdie/SM
+weirdness/MS
+weirdo/SM
+weird/YRDPGTS
+weir/SDMG
+Weisenheimer/M
+Weiss/M
+Weissman/M
+Weissmuller/M
+Weizmann/M
+Welbie/M
+Welby/M
+Welcher/M
+Welches
+welcomeness/M
+welcome/PRSDYG
+welcoming/U
+welder/M
+Weldon/M
+weld/SBJGZRD
+Weldwood/M
+welfare/SM
+welkin/SM
+we'll
+Welland/M
+wellbeing/M
+Weller/M
+Wellesley/M
+Welles/M
+wellhead/SM
+Wellington/MS
+wellington/S
+Wellman/M
+wellness/MS
+well/SGPD
+Wells/M
+wellspring/SM
+Wellsville/M
+Welmers/M
+Welsh
+welsher/M
+Welshman/M
+Welshmen
+welsh/RSDGZ
+Welshwoman/M
+Welshwomen
+welter/GD
+welterweight/MS
+welt/GZSMRD
+wencher/M
+wench/GRSDM
+Wendall/M
+Wenda/M
+wend/DSG
+Wendeline/M
+Wendell/M
+Wendel/M
+Wendie/M
+Wendi/M
+Wendye/M
+Wendy/M
+wen/M
+Wenonah/M
+Wenona/M
+went
+Wentworth/M
+wept/U
+were
+we're
+weren't
+werewolf/M
+werewolves
+Werner/M
+Wernher/M
+Werther/M
+werwolf's
+Wes
+Wesleyan
+Wesley/M
+Wessex/M
+Wesson/M
+westbound
+Westbrooke/M
+Westbrook/M
+Westchester/M
+wester/DYG
+westerly/S
+westerner/M
+westernization/MS
+westernize/GSD
+westernmost
+Western/ZRS
+western/ZSR
+Westfield/M
+Westhampton/M
+Westinghouse/M
+westing/M
+Westleigh/M
+Westley/M
+Westminster/M
+Westmore/M
+West/MS
+Weston/M
+Westphalia/M
+Westport/M
+west/RDGSM
+westward/S
+Westwood/M
+wetback/MS
+wetland/S
+wetness/MS
+wet/SPY
+wettable
+wetter/S
+wettest
+wetting
+we've
+Weyden/M
+Weyerhauser/M
+Weylin/M
+Wezen/M
+WFF
+whacker/M
+whack/GZRDS
+whaleboat/MS
+whalebone/SM
+whale/GSRDZM
+Whalen/M
+whaler/M
+whaling/M
+whammed
+whamming/M
+wham/MS
+whammy/S
+wharf/SGMD
+Wharton/M
+wharves
+whatchamacallit/MS
+what'd
+whatever
+what/MS
+whatnot/MS
+what're
+whatsoever
+wheal/MS
+wheatgerm
+Wheaties/M
+Wheatland/M
+wheat/NMXS
+Wheaton/M
+Wheatstone/M
+wheedle/ZDRSG
+wheelbarrow/GSDM
+wheelbase/MS
+wheelchair/MS
+wheeler/M
+Wheeler/M
+wheelhouse/SM
+wheelie/MS
+wheeling/M
+Wheeling/M
+Wheelock/M
+wheel/RDMJSGZ
+wheelwright/MS
+whee/S
+wheeze/SDG
+wheezily
+wheeziness/SM
+wheezy/PRT
+Whelan/M
+whelk/MDS
+Wheller/M
+whelm/DGS
+whelp/DMGS
+whence/S
+whenever
+when/S
+whensoever
+whereabout/S
+whereas/S
+whereat
+whereby
+where'd
+wherefore/MS
+wherein
+where/MS
+whereof
+whereon
+where're
+wheresoever
+whereto
+whereupon
+wherever
+wherewith
+wherewithal/SM
+wherry/DSGM
+whether
+whet/S
+whetstone/MS
+whetted
+whetting
+whew/GSD
+whey/MS
+which
+whichever
+whiff/GSMD
+whiffle/DRSG
+whiffler/M
+whiffletree/SM
+whig/S
+Whig/SM
+while/GSD
+whilom
+whilst
+whimmed
+whimming
+whimper/DSG
+whimsey's
+whimsicality/MS
+whimsical/YP
+whim/SM
+whimsy/TMDRS
+whine/GZMSRD
+whining/Y
+whinny/GTDRS
+whiny/RT
+whipcord/SM
+whiplash/SDMG
+Whippany/M
+whipped
+whipper/MS
+whippersnapper/MS
+whippet/MS
+whipping/SM
+Whipple/M
+whippletree/SM
+whippoorwill/SM
+whipsaw/GDMS
+whips/M
+whip/SM
+whirligig/MS
+whirlpool/MS
+whirl/RDGS
+whirlwind/MS
+whirlybird/MS
+whirly/MS
+whirred
+whirring
+whir/SY
+whisker/DM
+whiskery
+whiskey/SM
+whisk/GZRDS
+whisperer/M
+whisper/GRDJZS
+whispering/YM
+whist/GDMS
+whistleable
+whistle/DRSZG
+whistler/M
+Whistler/M
+whistling/M
+Whitaker/M
+Whitby/M
+Whitcomb/M
+whitebait/M
+whitecap/MS
+whiteface/M
+Whitefield/M
+whitefish/SM
+Whitehall/M
+Whitehead/M
+whitehead/S
+Whitehorse/M
+Whiteleaf/M
+Whiteley/M
+White/MS
+whitener/M
+whiteness/MS
+whitening/M
+whiten/JZDRG
+whiteout/S
+white/PYS
+whitespace
+whitetail/S
+whitewall/SM
+whitewash/GRSDM
+whitewater
+Whitewater/M
+whitey/MS
+Whitfield/M
+whither/DGS
+whitier
+whitiest
+whiting/M
+whitish
+Whitley/M
+Whitlock/M
+Whit/M
+Whitman/M
+Whitney/M
+whit/SJGTXMRND
+Whitsunday/MS
+Whittaker/M
+whitter
+Whittier
+whittle/JDRSZG
+whittler/M
+whiz
+whizkid
+whizzbang/S
+whizzed
+whizzes
+whizzing
+WHO
+whoa/S
+who'd
+whodunit/SM
+whoever
+wholegrain
+wholeheartedness/MS
+wholehearted/PY
+wholemeal
+wholeness/S
+wholesale/GZMSRD
+wholesaler/M
+wholesomeness/USM
+wholesome/UYP
+whole/SP
+wholewheat
+who'll
+wholly
+whom
+who/M
+whomever
+whomsoever
+whoopee/S
+whooper/M
+whoop/SRDGZ
+whoosh/DSGM
+whop
+whopper/MS
+whopping/S
+who're
+whorehouse/SM
+whoreish
+whore/SDGM
+whorish
+whorl/SDM
+whose
+whoso
+whosoever
+who've
+why
+whys
+WI
+Wiatt/M
+Wichita/M
+wickedness/MS
+wicked/RYPT
+wicker/M
+wickerwork/MS
+wicketkeeper/SM
+wicket/SM
+wick/GZRDMS
+wicking/M
+widemouthed
+widener/M
+wideness/S
+widen/SGZRD
+wide/RSYTP
+widespread
+widgeon's
+widget/SM
+widower/M
+widowhood/S
+widow/MRDSGZ
+width/M
+widths
+widthwise
+Wieland/M
+wielder/M
+wield/GZRDS
+Wiemar/M
+wiener/SM
+wienie/SM
+Wier/M
+Wiesel/M
+wife/DSMYG
+wifeless
+wifely/RPT
+wigeon/MS
+wigged
+wigging/M
+Wiggins
+wiggler/M
+wiggle/RSDGZ
+wiggly/RT
+wight/SGDM
+wiglet/S
+wigmaker
+wig/MS
+Wigner/M
+wigwagged
+wigwagging
+wigwag/S
+wigwam/MS
+Wilberforce/M
+Wilbert/M
+Wilbur/M
+Wilburn/M
+Wilburt/M
+Wilcox/M
+Wilda/M
+wildcat/SM
+wildcatted
+wildcatter/MS
+wildcatting
+wildebeest/SM
+Wilde/MR
+Wilden/M
+Wilder/M
+wilderness/SM
+wilder/P
+wildfire/MS
+wildflower/S
+wildfowl/M
+wilding/M
+wildlife/M
+wildness/MS
+Wildon/M
+wild/SPGTYRD
+wile/DSMG
+Wileen/M
+Wilek/M
+Wiley/M
+Wilford/M
+Wilfred/M
+Wilfredo/M
+Wilfrid/M
+wilfulness's
+Wilhelmina/M
+Wilhelmine/M
+Wilhelm/M
+Wilie/M
+wilily
+wiliness/MS
+Wilkerson/M
+Wilkes/M
+Wilkins/M
+Wilkinson/M
+Willabella/M
+Willa/M
+Willamette/M
+Willamina/M
+Willard/M
+Willcox/M
+Willdon/M
+willed/U
+Willem/M
+Willemstad/M
+willer/M
+Willetta/M
+Willette/M
+Willey/M
+willfulness/S
+willful/YP
+Williamsburg/M
+William/SM
+Williamson/M
+Willied/M
+Willie/M
+willies
+Willi/MS
+willinger
+willingest
+willingness's
+willingness/US
+willing/UYP
+Willisson/M
+williwaw/MS
+Will/M
+Willoughby/M
+willower/M
+Willow/M
+willow/RDMSG
+willowy/TR
+willpower/MS
+will/SGJRD
+Willy/SDM
+Willyt/M
+Wilma/M
+Wilmar/M
+Wilmer/M
+Wilmette/M
+Wilmington/M
+Wilona/M
+Wilone/M
+Wilow/M
+Wilshire/M
+Wilsonian
+Wilson/M
+wilt/DGS
+Wilt/M
+Wilton/M
+wily/PTR
+Wimbledon/M
+wimp/GSMD
+wimpish
+wimple/SDGM
+wimpy/RT
+wince/SDG
+Winchell/M
+wincher/M
+winchester/M
+Winchester/MS
+winch/GRSDM
+windbag/SM
+windblown
+windbreak/MZSR
+windburn/GSMD
+winded
+winder/UM
+windfall/SM
+windflower/MS
+Windham/M
+Windhoek/M
+windily
+windiness/SM
+winding/MS
+windjammer/SM
+windlass/GMSD
+windless/YP
+windmill/GDMS
+window/DMGS
+windowless
+windowpane/SM
+Windows
+windowsill/SM
+windpipe/SM
+windproof
+windrow/GDMS
+wind's
+winds/A
+windscreen/MS
+windshield/SM
+windsock/MS
+Windsor/MS
+windstorm/MS
+windsurf/GZJSRD
+windswept
+windup/MS
+wind/USRZG
+Windward/M
+windward/SY
+Windy/M
+windy/TPR
+wineglass/SM
+winegrower/SM
+Winehead/M
+winemake
+winemaster
+wine/MS
+winery/MS
+Winesap/M
+wineskin/M
+Winfield/M
+Winfred/M
+Winfrey/M
+wingback/M
+wingding/MS
+wingeing
+winger/M
+wing/GZRDM
+wingless
+winglike
+wingman
+wingmen
+wingspan/SM
+wingspread/MS
+wingtip/S
+Winifield/M
+Winifred/M
+Wini/M
+winker/M
+wink/GZRDS
+winking/U
+Winkle/M
+winkle/SDGM
+winless
+Win/M
+winnable
+Winnah/M
+Winna/M
+Winnebago/M
+Winne/M
+winner/MS
+Winnetka/M
+Winnie/M
+Winnifred/M
+Winni/M
+winning/SY
+Winnipeg/M
+Winn/M
+winnow/SZGRD
+Winny/M
+Winograd/M
+wino/MS
+Winonah/M
+Winona/M
+Winooski/M
+Winsborough/M
+Winsett/M
+Winslow/M
+winsomeness/SM
+winsome/PRTY
+Winston/M
+winterer/M
+wintergreen/SM
+winterize/GSD
+Winters
+winter/SGRDYM
+wintertime/MS
+Winthrop/M
+wintriness/M
+wintry/TPR
+winy/RT
+win/ZGDRS
+wipe/DRSZG
+wiper/M
+wirehair/MS
+wireless/MSDG
+wireman/M
+wiremen
+wirer/M
+wire's
+wires/A
+wiretap/MS
+wiretapped
+wiretapper/SM
+wiretapping
+wire/UDA
+wiriness/S
+wiring/SM
+wiry/RTP
+Wisc
+Wisconsinite/SM
+Wisconsin/M
+wisdoms
+wisdom/UM
+wiseacre/MS
+wisecrack/GMRDS
+wised
+wisely/TR
+Wise/M
+wiseness
+wisenheimer/M
+Wisenheimer/M
+wises
+wise/URTY
+wishbone/MS
+wishfulness/M
+wishful/PY
+wish/GZSRD
+wishy
+wising
+Wis/M
+wisp/MDGS
+wispy/RT
+wist/DGS
+wisteria/SM
+wistfulness/MS
+wistful/PY
+witchcraft/SM
+witchdoctor/S
+witchery/MS
+witch/SDMG
+withal
+withdrawal/MS
+withdrawer/M
+withdrawnness/M
+withdrawn/P
+withdraw/RGS
+withdrew
+withe/M
+wither/GDJ
+withering/Y
+Witherspoon/M
+with/GSRDZ
+withheld
+withholder/M
+withhold/SJGZR
+within/S
+without/S
+withs
+withstand/SG
+withstood
+witlessness/MS
+witless/PY
+Wit/M
+witness/DSMG
+witnessed/U
+wit/PSM
+witted
+witter/G
+Wittgenstein/M
+witticism/MS
+Wittie/M
+wittily
+wittiness/SM
+wittings
+witting/UY
+Witt/M
+Witty/M
+witty/RTP
+Witwatersrand/M
+wive/GDS
+wives/M
+wizard/MYS
+wizardry/MS
+wizen/D
+wiz's
+wk/Y
+Wm/M
+WNW
+woad/MS
+wobble/GSRD
+wobbler/M
+wobbliness/S
+wobbly/PRST
+Wodehouse/M
+woebegone/P
+woefuller
+woefullest
+woefulness/SM
+woeful/PY
+woe/PSM
+woke
+wok/SMN
+Wolcott/M
+wold/MS
+Wolfe/M
+wolfer/M
+Wolff/M
+Wolfgang/M
+wolfhound/MS
+Wolfie/M
+wolfishness/M
+wolfish/YP
+Wolf/M
+wolfram/MS
+wolf/RDMGS
+Wolfy/M
+Wollongong/M
+Wollstonecraft/M
+Wolsey/M
+Wolverhampton/M
+wolverine/SM
+Wolverton/M
+wolves/M
+woman/GSMYD
+womanhood/MS
+womanish
+womanized/U
+womanizer/M
+womanize/RSDZG
+womanizes/U
+womankind/M
+womanlike
+womanliness/SM
+womanly/PRT
+wombat/MS
+womb/SDM
+womenfolk/MS
+women/MS
+wonderer/M
+wonderfulness/SM
+wonderful/PY
+wonder/GLRDMS
+wondering/Y
+wonderland/SM
+wonderment/SM
+wondrousness/M
+wondrous/YP
+Wong/M
+wonk/S
+wonky/RT
+wonned
+wonning
+won/SG
+won't
+wontedness/MU
+wonted/PUY
+wont/SGMD
+Woodard/M
+Woodberry/M
+woodbine/SM
+woodblock/S
+Woodbury/M
+woodcarver/S
+woodcarving/MS
+woodchopper/SM
+woodchuck/MS
+woodcock/MS
+woodcraft/MS
+woodcut/SM
+woodcutter/MS
+woodcutting/MS
+woodenness/SM
+wooden/TPRY
+woodgrain/G
+woodhen
+Woodhull/M
+Woodie/M
+woodiness/MS
+woodland/SRM
+Woodlawn/M
+woodlice
+woodlot/S
+woodlouse/M
+woodman/M
+Woodman/M
+woodmen
+woodpecker/SM
+woodpile/SM
+Woodrow/M
+woodruff/M
+woo/DRZGS
+woodshedded
+woodshedding
+woodshed/SM
+woodside
+Wood/SM
+woodsman/M
+woodsmen
+wood/SMNDG
+woodsmoke
+woods/R
+Woodstock/M
+woodsy/TRP
+Woodward/MS
+woodwind/S
+woodworker/M
+woodworking/M
+woodwork/SMRGZJ
+woodworm/M
+woodyard
+Woody/M
+woody/TPSR
+woofer/M
+woof/SRDMGZ
+Woolf/M
+woolgatherer/M
+woolgathering/M
+woolgather/RGJ
+woolliness/MS
+woolly/RSPT
+Woolongong/M
+wool/SMYNDX
+Woolworth/M
+Woonsocket/M
+Wooster/M
+Wooten/M
+woozily
+wooziness/MS
+woozy/RTP
+wop/MS!
+Worcestershire/M
+Worcester/SM
+wordage/SM
+word/AGSJD
+wordbook/MS
+Worden/M
+wordily
+wordiness/SM
+wording/AM
+wordless/Y
+wordplay/SM
+word's
+Wordsworth/M
+wordy/TPR
+wore
+workability's
+workability/U
+workableness/M
+workable/U
+workably
+workaday
+workaholic/S
+workaround/SM
+workbench/MS
+workbook/SM
+workday/SM
+worked/A
+worker/M
+workfare/S
+workforce/S
+work/GZJSRDMB
+workhorse/MS
+workhouse/SM
+working/M
+workingman/M
+workingmen
+workingwoman/M
+workingwomen
+workload/SM
+workmanlike
+Workman/M
+workman/MY
+workmanship/MS
+workmate/S
+workmen/M
+workout/SM
+workpiece/SM
+workplace/SM
+workroom/MS
+works/A
+worksheet/S
+workshop/MS
+workspace/S
+workstation/MS
+worktable/SM
+worktop/S
+workup/S
+workweek/SM
+worldlier
+worldliest
+worldliness/USM
+worldly/UP
+worldwide
+world/ZSYM
+wormer/M
+wormhole/SM
+worm/SGMRD
+Worms/M
+wormwood/SM
+wormy/RT
+worn/U
+worried/Y
+worrier/M
+worriment/MS
+worrisome/YP
+worrying/Y
+worrywart/SM
+worry/ZGSRD
+worsen/GSD
+worse/SR
+worshiper/M
+worshipfulness/M
+worshipful/YP
+worship/ZDRGS
+worsted/MS
+worst/SGD
+worth/DG
+worthily/U
+worthinesses/U
+worthiness/SM
+Worthington/M
+worthlessness/SM
+worthless/PY
+Worth/M
+worths
+worthwhile/P
+Worthy/M
+worthy/UTSRP
+wort/SM
+wost
+wot
+Wotan/M
+wouldn't
+would/S
+wouldst
+would've
+wound/AU
+wounded/U
+wounder
+wounding
+wounds
+wound's
+wove/A
+woven/AU
+wovens
+wow/SDG
+Wozniak/M
+WP
+wpm
+wrack/SGMD
+wraith/M
+wraiths
+Wrangell/M
+wrangle/GZDRS
+wrangler/M
+wraparound/S
+wrap/MS
+wrapped/U
+wrapper/MS
+wrapping/SM
+wraps/U
+wrasse/SM
+wrathful/YP
+wrath/GDM
+wraths
+wreak/SDG
+wreathe
+wreath/GMDS
+wreaths
+wreckage/MS
+wrecker/M
+wreck/GZRDS
+wrenching/Y
+wrench/MDSG
+wren/MS
+Wren/MS
+Wrennie/M
+wrester/M
+wrestle/JGZDRS
+wrestler/M
+wrestling/M
+wrest/SRDG
+wretchedness/SM
+wretched/TPYR
+wretch/MDS
+wriggle/DRSGZ
+wriggler/M
+wriggly/RT
+Wright/M
+wright/MS
+Wrigley/M
+wringer/M
+wring/GZRS
+wrinkled/U
+wrinkle/GMDS
+wrinkly/RST
+wristband/SM
+wrist/MS
+wristwatch/MS
+writable/U
+write/ASBRJG
+writer/MA
+writeup
+writhe/SDG
+writing/M
+writ/MRSBJGZ
+written/UA
+Wroclaw
+wrongdoer/MS
+wrongdoing/MS
+wronger/M
+wrongfulness/MS
+wrongful/PY
+wrongheadedness/MS
+wrongheaded/PY
+wrongness/MS
+wrong/PSGTYRD
+Wronskian/M
+wrote/A
+wroth
+wrought/I
+wrung
+wry/DSGY
+wryer
+wryest
+wryness/SM
+W's
+WSW
+wt
+W/T
+Wuhan/M
+Wu/M
+Wurlitzer/M
+wurst/SM
+wuss/S
+wussy/TRS
+WV
+WW
+WWI
+WWII
+WWW
+w/XTJGV
+WY
+Wyatan/M
+Wyatt/M
+Wycherley/M
+Wycliffe/M
+Wye/MH
+Wyeth/M
+Wylie/M
+Wylma/M
+Wyman/M
+Wyndham/M
+Wyn/M
+Wynne/M
+Wynnie/M
+Wynn/M
+Wynny/M
+Wyo/M
+Wyomingite/SM
+Wyoming/M
+WYSIWYG
+x
+X
+Xanadu
+Xanthippe/M
+Xanthus/M
+Xaviera/M
+Xavier/M
+Xebec/M
+Xe/M
+XEmacs/M
+Xenakis/M
+Xena/M
+Xenia/M
+Xenix/M
+xenon/SM
+xenophobe/MS
+xenophobia/SM
+xenophobic
+Xenophon/M
+Xenos
+xerographic
+xerography/MS
+xerox/GSD
+Xerox/MGSD
+Xerxes/M
+Xever/M
+Xhosa/M
+Xi'an
+Xian/S
+Xiaoping/M
+xii
+xiii
+xi/M
+Ximenes/M
+Ximenez/M
+Ximian/SM
+Xingu/M
+xis
+xiv
+xix
+XL
+Xmas/SM
+XML
+Xochipilli/M
+XOR
+X's
+XS
+xterm/M
+Xuzhou/M
+xv
+xvi
+xvii
+xviii
+xx
+XXL
+xylem/SM
+xylene/M
+Xylia/M
+Xylina/M
+xylophone/MS
+xylophonist/S
+Xymenes/M
+Y
+ya
+yacc/M
+Yacc/M
+yachting/M
+yachtsman
+yachtsmen
+yachtswoman/M
+yachtswomen
+yacht/ZGJSDM
+yack's
+Yagi/M
+yahoo/MS
+Yahweh/M
+Yakima/M
+yakked
+yakking
+yak/SM
+Yakut/M
+Yakutsk/M
+Yale/M
+Yalies/M
+y'all
+Yalonda/M
+Yalow/M
+Yalta/M
+Yalu/M
+Yamaha/M
+yammer/RDZGS
+Yamoussoukro
+yam/SM
+Yanaton/M
+Yance/M
+Yancey/M
+Yancy/M
+Yang/M
+Yangon
+yang/S
+Yangtze/M
+Yankee/SM
+yank/GDS
+Yank/MS
+Yaounde/M
+yapped
+yapping
+yap/S
+Yaqui/M
+yardage/SM
+yardarm/SM
+Yardley/M
+Yard/M
+yardman/M
+yardmaster/S
+yardmen
+yard/SMDG
+yardstick/SM
+yarmulke/SM
+yarn/SGDM
+Yaroslavl/M
+yarrow/MS
+Yasmeen/M
+Yasmin/M
+Yates
+yaw/DSG
+yawl/SGMD
+yawner/M
+yawn/GZSDR
+yawning/Y
+Yb/M
+yd
+Yeager/M
+yeah
+yeahs
+yearbook/SM
+yearling/M
+yearlong
+yearly/S
+yearner/M
+yearning/MY
+yearn/JSGRD
+year/YMS
+yea/S
+yeastiness/M
+yeast/SGDM
+yeasty/PTR
+Yeats/M
+yecch
+yegg/MS
+Yehudi/M
+Yehudit/M
+Yekaterinburg/M
+Yelena/M
+yell/GSDR
+yellowhammers
+yellowish
+Yellowknife/M
+yellowness/MS
+Yellowstone/M
+yellow/TGPSRDM
+yellowy
+yelper/M
+yelp/GSDR
+Yeltsin
+Yemeni/S
+Yemenite/SM
+Yemen/M
+Yenisei/M
+yenned
+yenning
+yen/SM
+Yentl/M
+yeomanry/MS
+yeoman/YM
+yeomen
+yep/S
+Yerevan/M
+Yerkes/M
+Yesenia/M
+yeshiva/SM
+yes/S
+yessed
+yessing
+yesterday/MS
+yesteryear/SM
+yet
+ye/T
+yeti/SM
+Yetta/M
+Yettie/M
+Yetty/M
+Yevette/M
+Yevtushenko/M
+yew/SM
+y/F
+Yggdrasil/M
+Yiddish/M
+yielded/U
+yielding/U
+yield/JGRDS
+yikes
+yin/S
+yipe/S
+yipped
+yippee/S
+yipping
+yip/S
+YMCA
+YMHA
+Ymir/M
+YMMV
+Ynes/M
+Ynez/M
+yo
+Yoda/M
+yodeler/M
+yodel/SZRDG
+Yoder/M
+yoga/MS
+yoghurt's
+yogi/MS
+yogurt/SM
+yoke/DSMG
+yoked/U
+yokel/SM
+yokes/U
+yoking/U
+Yoknapatawpha/M
+Yokohama/M
+Yoko/M
+Yolanda/M
+Yolande/M
+Yolane/M
+Yolanthe/M
+yolk/DMS
+yon
+yonder
+Yong/M
+Yonkers/M
+yore/MS
+Yorgo/MS
+Yorick/M
+Yorke/M
+Yorker/M
+yorker/SM
+Yorkshire/MS
+Yorktown/M
+York/ZRMS
+Yoruba/M
+Yosemite/M
+Yoshiko/M
+Yoshi/M
+Yost/M
+you'd
+you'll
+youngish
+Young/M
+youngster/MS
+Youngstown/M
+young/TRYP
+you're
+your/MS
+yourself
+yourselves
+you/SH
+youthfulness/SM
+youthful/YP
+youths
+youth/SM
+you've
+Yovonnda/M
+yow
+yowl/GSD
+Ypres/M
+Ypsilanti/M
+yr
+yrs
+Y's
+Ysabel/M
+YT
+ytterbium/MS
+yttrium/SM
+yuan/M
+Yuba/M
+Yucatan
+yucca/MS
+yuck/GSD
+yucky/RT
+Yugo/M
+Yugoslavia/M
+Yugoslavian/S
+Yugoslav/M
+Yuh/M
+Yuki/M
+yukked
+yukking
+Yukon/M
+yuk/S
+yule/MS
+Yule/MS
+yuletide/MS
+Yuletide/S
+Yul/M
+Yulma/M
+yum
+Yuma/M
+yummy/TRS
+Yunnan/M
+yuppie/SM
+yup/S
+Yurik/M
+Yuri/M
+yurt/SM
+Yves/M
+Yvette/M
+Yvon/M
+Yvonne/M
+Yvor/M
+YWCA
+YWHA
+Zabrina/M
+Zaccaria/M
+Zachariah/M
+Zacharia/SM
+Zacharie/M
+Zachary/M
+Zacherie/M
+Zachery/M
+Zach/M
+Zackariah/M
+Zack/M
+zagging
+Zagreb/M
+zag/S
+Zahara/M
+Zaire/M
+Zairian/S
+Zak/M
+Zambezi/M
+Zambia/M
+Zambian/S
+Zamboni
+Zamenhof/M
+Zamora/M
+Zandra/M
+Zane/M
+Zaneta/M
+zaniness/MS
+Zan/M
+Zanuck/M
+zany/PDSRTG
+Zanzibar/M
+Zapata/M
+Zaporozhye/M
+Zappa/M
+zapped
+zapper/S
+zapping
+zap/S
+Zarah/M
+Zara/M
+Zared/M
+Zaria/M
+Zarla/M
+Zealand/M
+zeal/MS
+zealot/MS
+zealotry/MS
+zealousness/SM
+zealous/YP
+Zea/M
+Zebadiah/M
+Zebedee/M
+Zeb/M
+zebra/MS
+Zebulen/M
+Zebulon/M
+zebu/SM
+Zechariah/M
+Zedekiah/M
+Zed/M
+Zedong/M
+zed/SM
+Zeffirelli/M
+Zeiss/M
+zeitgeist/S
+Zeke/M
+Zelda/M
+Zelig/M
+Zellerbach/M
+Zelma/M
+Zena/M
+Zenger/M
+Zenia/M
+zenith/M
+zeniths
+Zen/M
+Zennist/M
+Zeno/M
+Zephaniah/M
+zephyr/MS
+Zephyrus/M
+Zeppelin's
+zeppelin/SM
+Zerk/M
+zeroed/M
+zeroing/M
+zero/SDHMG
+zestfulness/MS
+zestful/YP
+zest/MDSG
+zesty/RT
+zeta/SM
+zeugma/M
+Zeus/M
+Zhdanov/M
+Zhengzhou
+Zhivago/M
+Zhukov/M
+Zia/M
+Zibo/M
+Ziegfeld/MS
+Ziegler/M
+zig
+zigged
+zigging
+Ziggy/M
+zigzagged
+zigzagger
+zigzagging
+zigzag/MS
+zilch/S
+zillion/MS
+Zilvia/M
+Zimbabwean/S
+Zimbabwe/M
+Zimmerman/M
+zincked
+zincking
+zinc/MS
+zing/GZDRM
+zingy/RT
+zinnia/SM
+Zionism/MS
+Zionist/MS
+Zion/SM
+zip/MS
+zipped/U
+zipper/GSDM
+zipping/U
+zippy/RT
+zips/U
+zirconium/MS
+zircon/SM
+Zita/M
+Zitella/M
+zither/SM
+zit/S
+zloty/SM
+Zn/M
+zodiacal
+zodiac/SM
+Zoe/M
+Zola/M
+Zollie/M
+Zolly/M
+Zomba/M
+zombie/SM
+zombi's
+zonal/Y
+Zonda/M
+Zondra/M
+zoned/A
+zone/MYDSRJG
+zones/A
+zoning/A
+zonked
+Zonnya/M
+zookeepers
+zoological/Y
+zoologist/SM
+zoology/MS
+zoom/DGS
+zoophyte/SM
+zoophytic
+zoo/SM
+Zorah/M
+Zora/M
+Zorana/M
+Zorina/M
+Zorine/M
+Zorn/M
+Zoroaster/M
+Zoroastrianism/MS
+Zoroastrian/S
+Zorro/M
+Zosma/M
+zounds/S
+Zr/M
+Zs
+Zsazsa/M
+Zsigmondy/M
+z/TGJ
+Zubenelgenubi/M
+Zubeneschamali/M
+zucchini/SM
+Zukor/M
+Zulema/M
+Zululand/M
+Zulu/MS
+Zuni/S
+Zrich/M
+Zuzana/M
+zwieback/MS
+Zwingli/M
+Zworykin/M
+Z/X
+zydeco/S
+zygote/SM
+zygotic
+zymurgy/S
diff --git a/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff
new file mode 100755
index 0000000..2ddd985
--- /dev/null
+++ b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff
@@ -0,0 +1,201 @@
+SET ISO8859-1
+TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'
+NOSUGGEST !
+
+# ordinal numbers
+COMPOUNDMIN 1
+# only in compounds: 1th, 2th, 3th
+ONLYINCOMPOUND c
+# compound rules:
+# 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.)
+# 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.)
+COMPOUNDRULE 2
+COMPOUNDRULE n*1t
+COMPOUNDRULE n*mp
+WORDCHARS 0123456789
+
+PFX A Y 1
+PFX A 0 re .
+
+PFX I Y 1
+PFX I 0 in .
+
+PFX U Y 1
+PFX U 0 un .
+
+PFX C Y 1
+PFX C 0 de .
+
+PFX E Y 1
+PFX E 0 dis .
+
+PFX F Y 1
+PFX F 0 con .
+
+PFX K Y 1
+PFX K 0 pro .
+
+SFX V N 2
+SFX V e ive e
+SFX V 0 ive [^e]
+
+SFX N Y 3
+SFX N e ion e
+SFX N y ication y
+SFX N 0 en [^ey]
+
+SFX X Y 3
+SFX X e ions e
+SFX X y ications y
+SFX X 0 ens [^ey]
+
+SFX H N 2
+SFX H y ieth y
+SFX H 0 th [^y]
+
+SFX Y Y 1
+SFX Y 0 ly .
+
+SFX G Y 2
+SFX G e ing e
+SFX G 0 ing [^e]
+
+SFX J Y 2
+SFX J e ings e
+SFX J 0 ings [^e]
+
+SFX D Y 4
+SFX D 0 d e
+SFX D y ied [^aeiou]y
+SFX D 0 ed [^ey]
+SFX D 0 ed [aeiou]y
+
+SFX T N 4
+SFX T 0 st e
+SFX T y iest [^aeiou]y
+SFX T 0 est [aeiou]y
+SFX T 0 est [^ey]
+
+SFX R Y 4
+SFX R 0 r e
+SFX R y ier [^aeiou]y
+SFX R 0 er [aeiou]y
+SFX R 0 er [^ey]
+
+SFX Z Y 4
+SFX Z 0 rs e
+SFX Z y iers [^aeiou]y
+SFX Z 0 ers [aeiou]y
+SFX Z 0 ers [^ey]
+
+SFX S Y 4
+SFX S y ies [^aeiou]y
+SFX S 0 s [aeiou]y
+SFX S 0 es [sxzh]
+SFX S 0 s [^sxzhy]
+
+SFX P Y 3
+SFX P y iness [^aeiou]y
+SFX P 0 ness [aeiou]y
+SFX P 0 ness [^y]
+
+SFX M Y 1
+SFX M 0 's .
+
+SFX B Y 3
+SFX B 0 able [^aeiou]
+SFX B 0 able ee
+SFX B e able [^aeiou]e
+
+SFX L Y 1
+SFX L 0 ment .
+
+REP 88
+REP a ei
+REP ei a
+REP a ey
+REP ey a
+REP ai ie
+REP ie ai
+REP are air
+REP are ear
+REP are eir
+REP air are
+REP air ere
+REP ere air
+REP ere ear
+REP ere eir
+REP ear are
+REP ear air
+REP ear ere
+REP eir are
+REP eir ere
+REP ch te
+REP te ch
+REP ch ti
+REP ti ch
+REP ch tu
+REP tu ch
+REP ch s
+REP s ch
+REP ch k
+REP k ch
+REP f ph
+REP ph f
+REP gh f
+REP f gh
+REP i igh
+REP igh i
+REP i uy
+REP uy i
+REP i ee
+REP ee i
+REP j di
+REP di j
+REP j gg
+REP gg j
+REP j ge
+REP ge j
+REP s ti
+REP ti s
+REP s ci
+REP ci s
+REP k cc
+REP cc k
+REP k qu
+REP qu k
+REP kw qu
+REP o eau
+REP eau o
+REP o ew
+REP ew o
+REP oo ew
+REP ew oo
+REP ew ui
+REP ui ew
+REP oo ui
+REP ui oo
+REP ew u
+REP u ew
+REP oo u
+REP u oo
+REP u oe
+REP oe u
+REP u ieu
+REP ieu u
+REP ue ew
+REP ew ue
+REP uff ough
+REP oo ieu
+REP ieu oo
+REP ier ear
+REP ear ier
+REP ear air
+REP air ear
+REP w qu
+REP qu w
+REP z ss
+REP ss z
+REP shun tion
+REP shun sion
+REP shun cion
diff --git a/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic
new file mode 100755
index 0000000..4f69807
--- /dev/null
+++ b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic
@@ -0,0 +1,62120 @@
+62118
+0/nm
+1/n1
+2/nm
+3/nm
+4/nm
+5/nm
+6/nm
+7/nm
+8/nm
+9/nm
+0th/pt
+1st/p
+1th/tc
+2nd/p
+2th/tc
+3rd/p
+3th/tc
+4th/pt
+5th/pt
+6th/pt
+7th/pt
+8th/pt
+9th/pt
+a
+A
+AA
+AAA
+Aachen/M
+aardvark/SM
+Aaren/M
+Aarhus/M
+Aarika/M
+Aaron/M
+AB
+aback
+abacus/SM
+abaft
+Abagael/M
+Abagail/M
+abalone/SM
+abandoner/M
+abandon/LGDRS
+abandonment/SM
+abase/LGDSR
+abasement/S
+abaser/M
+abashed/UY
+abashment/MS
+abash/SDLG
+abate/DSRLG
+abated/U
+abatement/MS
+abater/M
+abattoir/SM
+Abba/M
+Abbe/M
+abb/S
+abbess/SM
+Abbey/M
+abbey/MS
+Abbie/M
+Abbi/M
+Abbot/M
+abbot/MS
+Abbott/M
+abbr
+abbrev
+abbreviated/UA
+abbreviates/A
+abbreviate/XDSNG
+abbreviating/A
+abbreviation/M
+Abbye/M
+Abby/M
+ABC/M
+Abdel/M
+abdicate/NGDSX
+abdication/M
+abdomen/SM
+abdominal/YS
+abduct/DGS
+abduction/SM
+abductor/SM
+Abdul/M
+ab/DY
+abeam
+Abelard/M
+Abel/M
+Abelson/M
+Abe/M
+Aberdeen/M
+Abernathy/M
+aberrant/YS
+aberrational
+aberration/SM
+abet/S
+abetted
+abetting
+abettor/SM
+Abeu/M
+abeyance/MS
+abeyant
+Abey/M
+abhorred
+abhorrence/MS
+abhorrent/Y
+abhorrer/M
+abhorring
+abhor/S
+abidance/MS
+abide/JGSR
+abider/M
+abiding/Y
+Abidjan/M
+Abie/M
+Abigael/M
+Abigail/M
+Abigale/M
+Abilene/M
+ability/IMES
+abjection/MS
+abjectness/SM
+abject/SGPDY
+abjuration/SM
+abjuratory
+abjurer/M
+abjure/ZGSRD
+ablate/VGNSDX
+ablation/M
+ablative/SY
+ablaze
+abler/E
+ables/E
+ablest
+able/U
+abloom
+ablution/MS
+Ab/M
+ABM/S
+abnegate/NGSDX
+abnegation/M
+Abner/M
+abnormality/SM
+abnormal/SY
+aboard
+abode/GMDS
+abolisher/M
+abolish/LZRSDG
+abolishment/MS
+abolitionism/SM
+abolitionist/SM
+abolition/SM
+abominable
+abominably
+abominate/XSDGN
+abomination/M
+aboriginal/YS
+aborigine/SM
+Aborigine/SM
+aborning
+abortionist/MS
+abortion/MS
+abortiveness/M
+abortive/PY
+abort/SRDVG
+Abo/SM!
+abound/GDS
+about/S
+aboveboard
+aboveground
+above/S
+abracadabra/S
+abrader/M
+abrade/SRDG
+Abraham/M
+Abrahan/M
+Abra/M
+Abramo/M
+Abram/SM
+Abramson/M
+Abran/M
+abrasion/MS
+abrasiveness/S
+abrasive/SYMP
+abreaction/MS
+abreast
+abridge/DSRG
+abridged/U
+abridger/M
+abridgment/SM
+abroad
+abrogate/XDSNG
+abrogation/M
+abrogator/SM
+abruptness/SM
+abrupt/TRYP
+ABS
+abscess/GDSM
+abscissa/SM
+abscission/SM
+absconder/M
+abscond/SDRZG
+abseil/SGDR
+absence/SM
+absenteeism/SM
+absentee/MS
+absentia/M
+absentmindedness/S
+absentminded/PY
+absent/SGDRY
+absinthe/SM
+abs/M
+absoluteness/SM
+absolute/NPRSYTX
+absolution/M
+absolutism/MS
+absolutist/SM
+absolve/GDSR
+absolver/M
+absorb/ASGD
+absorbed/U
+absorbency/MS
+absorbent/MS
+absorber/SM
+absorbing/Y
+absorption/MS
+absorptive
+absorptivity/M
+abstainer/M
+abstain/GSDRZ
+abstemiousness/MS
+abstemious/YP
+abstention/SM
+abstinence/MS
+abstinent/Y
+abstractedness/SM
+abstracted/YP
+abstracter/M
+abstractionism/M
+abstractionist/SM
+abstraction/SM
+abstractness/SM
+abstractor/MS
+abstract/PTVGRDYS
+abstruseness/SM
+abstruse/PRYT
+absurdity/SM
+absurdness/SM
+absurd/PRYST
+Abuja
+abundance/SM
+abundant/Y
+abused/E
+abuse/GVZDSRB
+abuser/M
+abuses/E
+abusing/E
+abusiveness/SM
+abusive/YP
+abut/LS
+abutment/SM
+abutted
+abutter/MS
+abutting
+abuzz
+abysmal/Y
+abyssal
+Abyssinia/M
+Abyssinian
+abyss/SM
+AC
+acacia/SM
+academe/MS
+academia/SM
+academical/Y
+academicianship
+academician/SM
+academic/S
+academy/SM
+Acadia/M
+acanthus/MS
+Acapulco/M
+accede/SDG
+accelerated/U
+accelerate/NGSDXV
+accelerating/Y
+acceleration/M
+accelerator/SM
+accelerometer/SM
+accented/U
+accent/SGMD
+accentual/Y
+accentuate/XNGSD
+accentuation/M
+acceptability/SM
+acceptability's/U
+acceptableness/SM
+acceptable/P
+acceptably/U
+acceptance/SM
+acceptant
+acceptation/SM
+accepted/Y
+accepter/M
+accepting/PY
+acceptor/MS
+accept/RDBSZVG
+accessed/A
+accessibility/IMS
+accessible/IU
+accessibly/I
+accession/SMDG
+accessors
+accessory/SM
+access/SDMG
+accidence/M
+accidentalness/M
+accidental/SPY
+accident/MS
+acclaimer/M
+acclaim/SDRG
+acclamation/MS
+acclimate/XSDGN
+acclimation/M
+acclimatisation
+acclimatise/DG
+acclimatization/AMS
+acclimatized/U
+acclimatize/RSDGZ
+acclimatizes/A
+acclivity/SM
+accolade/GDSM
+accommodated/U
+accommodate/XVNGSD
+accommodating/Y
+accommodation/M
+accommodativeness/M
+accommodative/P
+accompanied/U
+accompanier/M
+accompaniment/MS
+accompanist/SM
+accompany/DRSG
+accomplice/MS
+accomplished/U
+accomplisher/M
+accomplishment/SM
+accomplish/SRDLZG
+accordance/SM
+accordant/Y
+accorder/M
+according/Y
+accordionist/SM
+accordion/MS
+accord/SZGMRD
+accost/SGD
+accountability/MS
+accountability's/U
+accountableness/M
+accountable/U
+accountably/U
+accountancy/SM
+accountant/MS
+account/BMDSGJ
+accounted/U
+accounting/M
+accouter/GSD
+accouterments
+accouterment's
+accoutrement/M
+Accra/M
+accreditation/SM
+accredited/U
+accredit/SGD
+accretion/SM
+accrual/MS
+accrue/SDG
+acct
+acculturate/XSDVNG
+acculturation/M
+accumulate/VNGSDX
+accumulation/M
+accumulativeness/M
+accumulative/YP
+accumulator/MS
+accuracy/IMS
+accurate/IY
+accurateness/SM
+accursedness/SM
+accursed/YP
+accusal/M
+accusation/SM
+accusative/S
+accusatory
+accused/M
+accuser/M
+accuse/SRDZG
+accusing/Y
+accustomedness/M
+accustomed/P
+accustom/SGD
+ac/DRG
+aced/M
+acerbate/DSG
+acerbic
+acerbically
+acerbity/MS
+ace/SM
+acetaminophen/S
+acetate/MS
+acetic
+acetone/SM
+acetonic
+acetylene/MS
+Acevedo/M
+Achaean/M
+Achebe/M
+ached/A
+ache/DSG
+achene/SM
+Achernar/M
+aches/A
+Acheson/M
+achievable/U
+achieved/UA
+achieve/LZGRSDB
+achievement/SM
+achiever/M
+Achilles
+aching/Y
+achoo
+achromatic
+achy/TR
+acidic
+acidification/M
+acidify/NSDG
+acidity/SM
+acidness/M
+acidoses
+acidosis/M
+acid/SMYP
+acidulous
+acing/M
+Ackerman/M
+acknowledgeable
+acknowledgedly
+acknowledged/U
+acknowledge/GZDRS
+acknowledger/M
+acknowledgment/SAM
+ACLU
+Ac/M
+ACM
+acme/SM
+acne/MDS
+acolyte/MS
+Aconcagua/M
+aconite/MS
+acorn/SM
+Acosta/M
+acoustical/Y
+acoustician/M
+acoustic/S
+acoustics/M
+acquaintance/MS
+acquaintanceship/S
+acquainted/U
+acquaint/GASD
+acquiesce/GSD
+acquiescence/SM
+acquiescent/Y
+acquirable
+acquire/ASDG
+acquirement/SM
+acquisition's/A
+acquisition/SM
+acquisitiveness/MS
+acquisitive/PY
+acquit/S
+acquittal/MS
+acquittance/M
+acquitted
+acquitter/M
+acquitting
+acreage/MS
+acre/MS
+acridity/MS
+acridness/SM
+acrid/TPRY
+acrimoniousness/MS
+acrimonious/YP
+acrimony/MS
+acrobatically
+acrobatic/S
+acrobatics/M
+acrobat/SM
+acronym/SM
+acrophobia/SM
+Acropolis/M
+acropolis/SM
+across
+acrostic/SM
+Acrux/M
+acrylate/M
+acrylic/S
+ACT
+Actaeon/M
+Acta/M
+ACTH
+acting/S
+actinic
+actinide/SM
+actinium/MS
+actinometer/MS
+action/DMSGB
+actions/AI
+action's/IA
+activate/AXCDSNGI
+activated/U
+activation/AMCI
+activator/SM
+active/APY
+actively/I
+activeness/MS
+actives
+activism/MS
+activist/MS
+activities/A
+activity/MSI
+Acton/M
+actor/MAS
+actress/SM
+act's
+Acts
+act/SADVG
+actuality/SM
+actualization/MAS
+actualize/GSD
+actualizes/A
+actual/SY
+actuarial/Y
+actuary/MS
+actuate/GNXSD
+actuation/M
+actuator/SM
+acuity/MS
+acumen/SM
+acupressure/S
+acupuncture/SM
+acupuncturist/S
+acuteness/MS
+acute/YTSRP
+acyclic
+acyclically
+acyclovir/S
+AD
+adage/MS
+adagio/S
+Adah/M
+Adair/M
+Adaline/M
+Ada/M
+adamant/SY
+Adamo/M
+Adam/SM
+Adamson/M
+Adana/M
+Adan/M
+adaptability/MS
+adaptable/U
+adaptation/MS
+adaptedness/M
+adapted/P
+adapter/M
+adapting/A
+adaption
+adaptively
+adaptiveness/M
+adaptive/U
+adaptivity
+adapt/SRDBZVG
+Adara/M
+ad/AS
+ADC
+Adda/M
+Addams
+addenda
+addend/SM
+addendum/M
+adder/M
+Addia/M
+addiction/MS
+addictive/P
+addict/SGVD
+Addie/M
+Addi/M
+Addison/M
+additional/Y
+addition/MS
+additive/YMS
+additivity
+addle/GDS
+addressability
+addressable/U
+addressed/A
+addressee/SM
+addresser/M
+addresses/A
+address/MDRSZGB
+Addressograph/M
+adduce/GRSD
+adducer/M
+adduct/DGVS
+adduction/M
+adductor/M
+Addy/M
+add/ZGBSDR
+Adelaida/M
+Adelaide/M
+Adela/M
+Adelbert/M
+Adele/M
+Adelheid/M
+Adelice/M
+Adelina/M
+Adelind/M
+Adeline/M
+Adella/M
+Adelle/M
+Adel/M
+Ade/M
+Adena/M
+Adenauer/M
+adenine/SM
+Aden/M
+adenoidal
+adenoid/S
+adeptness/MS
+adept/RYPTS
+adequacy/IMS
+adequate/IPY
+adequateness's/I
+adequateness/SM
+Adey/M
+Adham/M
+Adhara/M
+adherence/SM
+adherent/YMS
+adherer/M
+adhere/ZGRSD
+adhesion/MS
+adhesiveness/MS
+adhesive/PYMS
+adiabatic
+adiabatically
+Adiana/M
+Adidas/M
+adieu/S
+Adi/M
+Adina/M
+adis
+adipose/S
+Adirondack/SM
+adj
+adjacency/MS
+adjacent/Y
+adjectival/Y
+adjective/MYS
+adjoin/SDG
+adjoint/M
+adjourn/DGLS
+adjournment/SM
+adjudge/DSG
+adjudicate/VNGXSD
+adjudication/M
+adjudicator/SM
+adjudicatory
+adjunct/VSYM
+adjuration/SM
+adjure/GSD
+adjustable/U
+adjustably
+adjust/DRALGSB
+adjusted/U
+adjuster's/A
+adjuster/SM
+adjustive
+adjustment/MAS
+adjustor's
+adjutant/SM
+Adkins/M
+Adlai/M
+Adler/M
+adman/M
+admen
+administer/GDJS
+administrable
+administrate/XSDVNG
+administration/M
+administrative/Y
+administrator/MS
+administratrix/M
+admirableness/M
+admirable/P
+admirably
+admiral/SM
+admiralty/MS
+Admiralty/S
+admiration/MS
+admirer/M
+admire/RSDZBG
+admiring/Y
+admissibility/ISM
+admissible/I
+admissibly
+admission/AMS
+admit/AS
+admittance/MS
+admitted/A
+admittedly
+admitting/A
+admix/SDG
+admixture/SM
+Adm/M
+Ad/MN
+admonisher/M
+admonish/GLSRD
+admonishing/Y
+admonishment/SM
+admonition/MS
+admonitory
+adobe/MS
+adolescence/MS
+adolescent/SYM
+Adolf/M
+Adolfo/M
+Adolphe/M
+Adolph/M
+Adolpho/M
+Adolphus/M
+Ado/M
+ado/MS
+Adonis/SM
+adopted/AU
+adopter/M
+adoption/MS
+adoptive/Y
+adopt/RDSBZVG
+adopts/A
+adorableness/SM
+adorable/P
+adorably
+Adora/M
+adoration/SM
+adore/DSRGZB
+Adoree/M
+Adore/M
+adorer/M
+adoring/Y
+adorned/U
+Adorne/M
+adornment/SM
+adorn/SGLD
+ADP
+Adrea/M
+adrenalin
+adrenaline/MS
+Adrenalin/MS
+adrenal/YS
+Adria/MX
+Adriana/M
+Adriane/M
+Adrian/M
+Adrianna/M
+Adrianne/M
+Adriano/M
+Adriatic
+Adriena/M
+Adrien/M
+Adrienne/M
+adrift
+adroitness/MS
+adroit/RTYP
+ads
+ad's
+adsorbate/M
+adsorbent/S
+adsorb/GSD
+adsorption/MS
+adsorptive/Y
+adulate/GNDSX
+adulation/M
+adulator/SM
+adulatory
+adulterant/SM
+adulterated/U
+adulterate/NGSDX
+adulteration/M
+adulterer/SM
+adulteress/MS
+adulterous/Y
+adultery/SM
+adulthood/MS
+adult/MYPS
+adultness/M
+adumbrate/XSDVGN
+adumbration/M
+adumbrative/Y
+adv
+advance/DSRLZG
+advancement/MS
+advancer/M
+advantage/GMEDS
+advantageous/EY
+advantageousness/M
+Adventist/M
+adventist/S
+adventitiousness/M
+adventitious/PY
+adventive/Y
+Advent/SM
+advent/SVM
+adventurer/M
+adventuresome
+adventure/SRDGMZ
+adventuress/SM
+adventurousness/SM
+adventurous/YP
+adverbial/MYS
+adverb/SM
+adversarial
+adversary/SM
+adverse/DSRPYTG
+adverseness/MS
+adversity/SM
+advert/GSD
+advertised/U
+advertise/JGZSRDL
+advertisement/SM
+advertiser/M
+advertising/M
+advertorial/S
+advice/SM
+Advil/M
+advisability/SIM
+advisable/I
+advisableness/M
+advisably
+advisedly/I
+advised/YU
+advisee/MS
+advisement/MS
+adviser/M
+advise/ZRSDGLB
+advisor/S
+advisor's
+advisory/S
+advocacy/SM
+advocate/NGVDS
+advocation/M
+advt
+adze's
+adz/MDSG
+Aegean
+aegis/SM
+Aelfric/M
+Aeneas
+Aeneid/M
+aeolian
+Aeolus/M
+aeon's
+aerate/XNGSD
+aeration/M
+aerator/MS
+aerialist/MS
+aerial/SMY
+Aeriela/M
+Aeriell/M
+Aeriel/M
+aerie/SRMT
+aeroacoustic
+aerobatic/S
+aerobically
+aerobic/S
+aerodrome/SM
+aerodynamically
+aerodynamic/S
+aerodynamics/M
+aeronautical/Y
+aeronautic/S
+aeronautics/M
+aerosolize/D
+aerosol/MS
+aerospace/SM
+Aeschylus/M
+Aesculapius/M
+Aesop/M
+aesthete/S
+aesthetically
+aestheticism/MS
+aesthetics/M
+aesthetic/U
+aether/M
+aetiology/M
+AF
+AFAIK
+afar/S
+AFB
+AFC
+AFDC
+affability/MS
+affable/TR
+affably
+affair/SM
+affectation/MS
+affectedness/EM
+affected/UEYP
+affect/EGSD
+affecter/M
+affecting/Y
+affectionate/UY
+affectioned
+affection/EMS
+affectioning
+affective/MY
+afferent/YS
+affiance/GDS
+affidavit/SM
+affiliated/U
+affiliate/EXSDNG
+affiliation/EM
+affine
+affinity/SM
+affirm/ASDG
+affirmation/SAM
+affirmative/SY
+affix/SDG
+afflatus/MS
+afflict/GVDS
+affliction/SM
+afflictive/Y
+affluence/SM
+affluent/YS
+afford/DSBG
+afforest/A
+afforestation/SM
+afforested
+afforesting
+afforests
+affray/MDSG
+affricate/VNMS
+affrication/M
+affricative/M
+affright
+affront/GSDM
+Afghani/SM
+Afghanistan/M
+afghan/MS
+Afghan/SM
+aficionado/MS
+afield
+afire
+aflame
+afloat
+aflutter
+afoot
+afore
+aforementioned
+aforesaid
+aforethought/S
+afoul
+Afr
+afraid/U
+afresh
+Africa/M
+African/MS
+Afrikaans/M
+Afrikaner/SM
+afro
+Afrocentric
+Afrocentrism/S
+Afro/MS
+afterbirth/M
+afterbirths
+afterburner/MS
+aftercare/SM
+aftereffect/MS
+afterglow/MS
+afterimage/MS
+afterlife/M
+afterlives
+aftermath/M
+aftermaths
+aftermost
+afternoon/SM
+aftershave/S
+aftershock/SM
+afters/M
+aftertaste/SM
+afterthought/MS
+afterward/S
+afterworld/MS
+Afton/M
+aft/ZR
+Agace/M
+again
+against
+Agamemnon/M
+agapae
+agape/S
+agar/MS
+Agassiz/M
+Agata/M
+agate/SM
+Agatha/M
+Agathe/M
+agave/SM
+agedness/M
+aged/PY
+age/GJDRSMZ
+ageism/S
+ageist/S
+agelessness/MS
+ageless/YP
+agency/SM
+agenda/MS
+agent/AMS
+agented
+agenting
+agentive
+ageratum/M
+Aggie/M
+Aggi/M
+agglomerate/XNGVDS
+agglomeration/M
+agglutinate/VNGXSD
+agglutination/M
+agglutinin/MS
+aggrandize/LDSG
+aggrandizement/SM
+aggravate/SDNGX
+aggravating/Y
+aggravation/M
+aggregated/U
+aggregate/EGNVD
+aggregately
+aggregateness/M
+aggregates
+aggregation/SM
+aggregative/Y
+aggression/SM
+aggressively
+aggressiveness/S
+aggressive/U
+aggressor/MS
+aggrieved/Y
+aggrieve/GDS
+Aggy/SM
+aghast
+agile/YTR
+agility/MS
+agitated/Y
+agitate/XVNGSD
+agitation/M
+agitator/SM
+agitprop/MS
+Aglaia/M
+agleam
+aglitter
+aglow
+Ag/M
+Agna/M
+Agnella/M
+Agnese/M
+Agnes/M
+Agnesse/M
+Agneta/M
+Agnew/M
+Agni/M
+Agnola/M
+agnosticism/MS
+agnostic/SM
+ago
+agog
+agonizedly/S
+agonized/Y
+agonize/ZGRSD
+agonizing/Y
+agony/SM
+agoraphobia/MS
+agoraphobic/S
+Agosto/M
+Agra/M
+agrarianism/MS
+agrarian/S
+agreeable/EP
+agreeableness/SME
+agreeably/E
+agreeing/E
+agree/LEBDS
+agreement/ESM
+agreer/S
+Agretha/M
+agribusiness/SM
+Agricola/M
+agriculturalist/S
+agricultural/Y
+agriculture/MS
+agriculturist/SM
+Agrippa/M
+Agrippina/M
+agrochemicals
+agronomic/S
+agronomist/SM
+agronomy/MS
+aground
+Aguascalientes/M
+ague/MS
+Aguie/M
+Aguilar/M
+Aguinaldo/M
+Aguirre/M
+Aguistin/M
+Aguste/M
+Agustin/M
+ah
+Ahab/M
+Aharon/M
+aha/S
+ahead
+ahem/S
+Ahmadabad
+Ahmad/M
+Ahmed/M
+ahoy/S
+Ahriman/M
+AI
+Aida/M
+Aidan/M
+aided/U
+aide/MS
+aider/M
+AIDS
+aid/ZGDRS
+Aigneis/M
+aigrette/SM
+Aiken/M
+Aila/M
+Ailbert/M
+Ailee/M
+Aileen/M
+Aile/M
+Ailene/M
+aileron/MS
+Ailey/M
+Ailina/M
+Aili/SM
+ail/LSDG
+ailment/SM
+Ailsun/M
+Ailyn/M
+Aimee/M
+Aime/M
+aimer/M
+Aimil/M
+aimlessness/MS
+aimless/YP
+aim/ZSGDR
+Aindrea/M
+Ainslee/M
+Ainsley/M
+Ainslie/M
+ain't
+Ainu/M
+airbag/MS
+airbase/S
+airborne
+airbrush/SDMG
+Airbus/M
+airbus/SM
+aircraft/MS
+aircrew/M
+airdrop/MS
+airdropped
+airdropping
+Airedale/SM
+Aires
+airfare/S
+airfield/MS
+airflow/SM
+airfoil/MS
+airframe/MS
+airfreight/SGD
+airhead/MS
+airily
+airiness/MS
+airing/M
+airlessness/S
+airless/P
+airlift/MDSG
+airliner/M
+airline/SRMZ
+airlock/MS
+airmail/DSG
+airman/M
+airmass
+air/MDRTZGJS
+airmen
+airpark
+airplane/SM
+airplay/S
+airport/MS
+airship/MS
+airsickness/SM
+airsick/P
+airspace/SM
+airspeed/SM
+airstrip/MS
+airtightness/M
+airtight/P
+airtime
+airwaves
+airway/SM
+airworthiness/SM
+airworthy/PTR
+airy/PRT
+Aisha/M
+aisle/DSGM
+aitch/MS
+ajar
+Ajax/M
+Ajay/M
+AK
+aka
+Akbar/M
+Akihito/M
+akimbo
+Akim/M
+akin
+Akita/M
+Akkad/M
+Akron/M
+Aksel/M
+AL
+Alabama/M
+Alabaman/S
+Alabamian/MS
+alabaster/MS
+alack/S
+alacrity/SM
+Aladdin/M
+Alaine/M
+Alain/M
+Alair/M
+Alameda/M
+Alamogordo/M
+Alamo/SM
+ala/MS
+Ala/MS
+Alanah/M
+Alana/M
+Aland/M
+Alane/M
+alanine/M
+Alan/M
+Alanna/M
+Alano/M
+Alanson/M
+Alard/M
+Alaric/M
+Alar/M
+alarming/Y
+alarmist/MS
+alarm/SDG
+Alasdair/M
+Alaska/M
+Alaskan/S
+alas/S
+Alastair/M
+Alasteir/M
+Alaster/M
+Alayne/M
+albacore/SM
+alba/M
+Alba/M
+Albania/M
+Albanian/SM
+Albany/M
+albatross/SM
+albedo/M
+Albee/M
+albeit
+Alberich/M
+Alberik/M
+Alberio/M
+Alberta/M
+Albertan/S
+Albertina/M
+Albertine/M
+Albert/M
+Alberto/M
+Albie/M
+Albigensian
+Albina/M
+albinism/SM
+albino/MS
+Albion/M
+Albireo/M
+alb/MS
+Albrecht/M
+albumen/M
+albumin/MS
+albuminous
+album/MNXS
+Albuquerque/M
+Alcatraz/M
+Alcestis/M
+alchemical
+alchemist/SM
+alchemy/MS
+Alcibiades/M
+Alcmena/M
+Alcoa/M
+alcoholically
+alcoholic/MS
+alcoholism/SM
+alcohol/MS
+Alcott/M
+alcove/MSD
+Alcuin/M
+Alcyone/M
+Aldan/M
+Aldebaran/M
+aldehyde/M
+Alden/M
+Alderamin/M
+alderman/M
+aldermen
+alder/SM
+alderwoman
+alderwomen
+Aldin/M
+Aldis/M
+Aldo/M
+Aldon/M
+Aldous/M
+Aldrich/M
+Aldric/M
+Aldridge/M
+Aldrin/M
+Aldus/M
+Aldwin/M
+aleatory
+Alecia/M
+Aleck/M
+Alec/M
+Aleda/M
+alee
+Aleece/M
+Aleen/M
+alehouse/MS
+Aleichem/M
+Alejandra/M
+Alejandrina/M
+Alejandro/M
+Alejoa/M
+Aleksandr/M
+Alembert/M
+alembic/SM
+ale/MVS
+Alena/M
+Alene/M
+aleph/M
+Aleppo/M
+Aler/M
+alerted/Y
+alertness/MS
+alert/STZGPRDY
+Alessandra/M
+Alessandro/M
+Aleta/M
+Alethea/M
+Aleutian/S
+Aleut/SM
+alewife/M
+alewives
+Alexa/M
+Alexander/SM
+Alexandra/M
+Alexandre/M
+Alexandria/M
+Alexandrian/S
+Alexandrina/M
+Alexandr/M
+Alexandro/MS
+Alexei/M
+Alexia/M
+Alexina/M
+Alexine/M
+Alexio/M
+Alexi/SM
+Alex/M
+alfalfa/MS
+Alfa/M
+Alfie/M
+Alfi/M
+Alf/M
+Alfonse/M
+Alfons/M
+Alfonso/M
+Alfonzo/M
+Alford/M
+Alfreda/M
+Alfred/M
+Alfredo/M
+alfresco
+Alfy/M
+algae
+algaecide
+algal
+alga/M
+algebraic
+algebraical/Y
+algebraist/M
+algebra/MS
+Algenib/M
+Algeria/M
+Algerian/MS
+Alger/M
+Algernon/M
+Algieba/M
+Algiers/M
+alginate/SM
+ALGOL
+Algol/M
+Algonquian/SM
+Algonquin/SM
+algorithmic
+algorithmically
+algorithm/MS
+Alhambra/M
+Alhena/M
+Alia/M
+alias/GSD
+alibi/MDSG
+Alica/M
+Alicea/M
+Alice/M
+Alicia/M
+Alick/M
+Alic/M
+Alida/M
+Alidia/M
+Alie/M
+alienable/IU
+alienate/SDNGX
+alienation/M
+alienist/MS
+alien/RDGMBS
+Alighieri/M
+alight/DSG
+aligned/U
+aligner/SM
+align/LASDG
+alignment/SAM
+Alika/M
+Alikee/M
+alikeness/M
+alike/U
+alimentary
+aliment/SDMG
+alimony/MS
+Ali/MS
+Alina/M
+Aline/M
+alinement's
+Alioth/M
+aliquot/S
+Alisa/M
+Alisander/M
+Alisha/M
+Alison/M
+Alissa/M
+Alistair/M
+Alister/M
+Alisun/M
+aliveness/MS
+alive/P
+Alix/M
+aliyah/M
+aliyahs
+Aliza/M
+Alkaid/M
+alkalies
+alkali/M
+alkaline
+alkalinity/MS
+alkalize/SDG
+alkaloid/MS
+alkyd/S
+alkyl/M
+Allahabad/M
+Allah/M
+Alla/M
+Allan/M
+Allard/M
+allay/GDS
+Allayne/M
+Alleen/M
+allegation/SM
+alleged/Y
+allege/SDG
+Allegheny/MS
+allegiance/SM
+allegiant
+allegoric
+allegoricalness/M
+allegorical/YP
+allegorist/MS
+allegory/SM
+Allegra/M
+allegretto/MS
+allegri
+allegro/MS
+allele/SM
+alleluia/S
+allemande/M
+Allendale/M
+Allende/M
+Allene/M
+Allen/M
+Allentown/M
+allergenic
+allergen/MS
+allergic
+allergically
+allergist/MS
+allergy/MS
+alleviate/SDVGNX
+alleviation/M
+alleviator/MS
+Alley/M
+alley/MS
+Alleyn/M
+alleyway/MS
+Allhallows
+alliance/MS
+Allianora/M
+Allie/M
+allier
+allies/M
+alligator/DMGS
+Alli/MS
+Allina/M
+Allin/M
+Allison/M
+Allissa/M
+Allister/M
+Allistir/M
+alliterate/XVNGSD
+alliteration/M
+alliterative/Y
+Allix/M
+allocable/U
+allocatable
+allocate/ACSDNGX
+allocated/U
+allocation/AMC
+allocative
+allocator/AMS
+allophone/MS
+allophonic
+allotment/MS
+allotments/A
+allotrope/M
+allotropic
+allots/A
+allot/SDL
+allotted/A
+allotter/M
+allotting/A
+allover/S
+allowableness/M
+allowable/P
+allowably
+allowance/GSDM
+allowed/Y
+allowing/E
+allow/SBGD
+allows/E
+alloyed/U
+alloy/SGMD
+all/S
+allspice/MS
+Allstate/M
+Allsun/M
+allude/GSD
+allure/GLSD
+allurement/SM
+alluring/Y
+allusion/MS
+allusiveness/MS
+allusive/PY
+alluvial/S
+alluvions
+alluvium/MS
+Allx/M
+ally/ASDG
+Allyce/M
+Ally/MS
+Allyn/M
+Allys
+Allyson/M
+alma
+Almach/M
+Almaden/M
+almagest
+Alma/M
+almanac/MS
+Almaty/M
+Almeda/M
+Almeria/M
+Almeta/M
+almightiness/M
+Almighty/M
+almighty/P
+Almira/M
+Almire/M
+almond/SM
+almoner/MS
+almost
+Al/MRY
+alms/A
+almshouse/SM
+almsman/M
+alnico
+Alnilam/M
+Alnitak/M
+aloe/MS
+aloft
+aloha/SM
+Aloin/M
+Aloise/M
+Aloisia/M
+aloneness/M
+alone/P
+along
+alongshore
+alongside
+Alon/M
+Alonso/M
+Alonzo/M
+aloofness/MS
+aloof/YP
+aloud
+Aloysia/M
+Aloysius/M
+alpaca/SM
+Alpert/M
+alphabetical/Y
+alphabetic/S
+alphabetization/SM
+alphabetizer/M
+alphabetize/SRDGZ
+alphabet/SGDM
+alpha/MS
+alphanumerical/Y
+alphanumeric/S
+Alphard/M
+Alphecca/M
+Alpheratz/M
+Alphonse/M
+Alphonso/M
+Alpine
+alpine/S
+alp/MS
+Alps
+already
+Alric/M
+alright
+Alsace/M
+Alsatian/MS
+also
+Alsop/M
+Alston/M
+Altaic/M
+Altai/M
+Altair/M
+Alta/M
+altar/MS
+altarpiece/SM
+alterable/UI
+alteration/MS
+altercate/NX
+altercation/M
+altered/U
+alternate/SDVGNYX
+alternation/M
+alternativeness/M
+alternative/YMSP
+alternator/MS
+alter/RDZBG
+Althea/M
+although
+altimeter/SM
+Altiplano/M
+altitude/SM
+altogether/S
+Alton/M
+alto/SM
+Altos/M
+altruism/SM
+altruistic
+altruistically
+altruist/SM
+alt/RZS
+ALU
+Aludra/M
+Aluin/M
+Aluino/M
+alumina/SM
+aluminum/MS
+alumnae
+alumna/M
+alumni
+alumnus/MS
+alum/SM
+alundum
+Alva/M
+Alvan/M
+Alvarado/M
+Alvarez/M
+Alvaro/M
+alveolar/Y
+alveoli
+alveolus/M
+Alvera/M
+Alverta/M
+Alvie/M
+Alvina/M
+Alvinia/M
+Alvin/M
+Alvira/M
+Alvis/M
+Alvy/M
+alway/S
+Alwin/M
+Alwyn/M
+Alyce/M
+Alyda/M
+Alyosha/M
+Alysa/M
+Alyse/M
+Alysia/M
+Alys/M
+Alyson/M
+Alyss
+Alyssa/M
+Alzheimer/M
+AM
+AMA
+Amabelle/M
+Amabel/M
+Amadeus/M
+Amado/M
+amain
+Amalea/M
+Amalee/M
+Amaleta/M
+amalgamate/VNGXSD
+amalgamation/M
+amalgam/MS
+Amalia/M
+Amalie/M
+Amalita/M
+Amalle/M
+Amanda/M
+Amandie/M
+Amandi/M
+Amandy/M
+amanuenses
+amanuensis/M
+Amara/M
+amaranth/M
+amaranths
+amaretto/S
+Amargo/M
+Amarillo/M
+amaryllis/MS
+am/AS
+amasser/M
+amass/GRSD
+Amata/M
+amateurishness/MS
+amateurish/YP
+amateurism/MS
+amateur/SM
+Amati/M
+amatory
+amazed/Y
+amaze/LDSRGZ
+amazement/MS
+amazing/Y
+amazonian
+Amazonian
+amazon/MS
+Amazon/SM
+ambassadorial
+ambassador/MS
+ambassadorship/MS
+ambassadress/SM
+ambergris/SM
+Amberly/M
+amber/MS
+Amber/YM
+ambiance/MS
+ambidexterity/MS
+ambidextrous/Y
+ambience's
+ambient/S
+ambiguity/MS
+ambiguously/U
+ambiguousness/M
+ambiguous/YP
+ambition/GMDS
+ambitiousness/MS
+ambitious/PY
+ambit/M
+ambivalence/SM
+ambivalent/Y
+amble/GZDSR
+Amble/M
+ambler/M
+ambrose
+Ambrose/M
+ambrosial/Y
+ambrosia/SM
+Ambrosi/M
+Ambrosio/M
+Ambrosius/M
+Ambros/M
+ambulance/MS
+ambulant/S
+ambulate/DSNGX
+ambulation/M
+ambulatory/S
+Ambur/M
+ambuscade/MGSRD
+ambuscader/M
+ambusher/M
+ambush/MZRSDG
+Amby/M
+Amdahl/M
+ameba's
+Amelia/M
+Amelie/M
+Amelina/M
+Ameline/M
+ameliorate/XVGNSD
+amelioration/M
+Amelita/M
+amenability/SM
+amenably
+amended/U
+amender/M
+amendment/SM
+amen/DRGTSB
+amend/SBRDGL
+amends/M
+Amenhotep/M
+amenity/MS
+amenorrhea/M
+Amerada/M
+Amerasian/S
+amercement/MS
+amerce/SDLG
+Americana/M
+Americanism/SM
+Americanization/SM
+americanized
+Americanize/SDG
+American/MS
+America/SM
+americium/MS
+Amerigo/M
+Amerindian/MS
+Amerind/MS
+Amer/M
+Amery/M
+Ameslan/M
+Ame/SM
+amethystine
+amethyst/MS
+Amharic/M
+Amherst/M
+amiability/MS
+amiableness/M
+amiable/RPT
+amiably
+amicability/SM
+amicableness/M
+amicable/P
+amicably
+amide/SM
+amid/S
+amidships
+amidst
+Amie/M
+Amiga/M
+amigo/MS
+Amii/M
+Amil/M
+Ami/M
+amines
+aminobenzoic
+amino/M
+amir's
+Amish
+amiss
+Amitie/M
+Amity/M
+amity/SM
+Ammamaria/M
+Amman/M
+Ammerman/M
+ammeter/MS
+ammo/MS
+ammoniac
+ammonia/MS
+ammonium/M
+Am/MR
+ammunition/MS
+amnesiac/MS
+amnesia/SM
+amnesic/S
+amnesty/GMSD
+amniocenteses
+amniocentesis/M
+amnion/SM
+amniotic
+Amoco/M
+amoeba/SM
+amoebic
+amoeboid
+amok/MS
+among
+amongst
+Amontillado/M
+amontillado/MS
+amorality/MS
+amoral/Y
+amorousness/SM
+amorous/PY
+amorphousness/MS
+amorphous/PY
+amortization/SUM
+amortized/U
+amortize/SDG
+Amory/M
+Amos
+amount/SMRDZG
+amour/MS
+Amparo/M
+amperage/SM
+Ampere/M
+ampere/MS
+ampersand/MS
+Ampex/M
+amphetamine/MS
+amphibian/SM
+amphibiousness/M
+amphibious/PY
+amphibology/M
+amphitheater/SM
+amphorae
+amphora/M
+ampleness/M
+ample/PTR
+amplification/M
+amplifier/M
+amplify/DRSXGNZ
+amplitude/MS
+ampoule's
+amp/SGMDY
+ampule/SM
+amputate/DSNGX
+amputation/M
+amputee/SM
+Amritsar/M
+ams
+Amsterdam/M
+amt
+Amtrak/M
+amuck's
+amulet/SM
+Amundsen/M
+Amur/M
+amused/Y
+amuse/LDSRGVZ
+amusement/SM
+amuser/M
+amusingness/M
+amusing/YP
+Amway/M
+Amye/M
+amylase/MS
+amyl/M
+Amy/M
+Anabal/M
+Anabaptist/SM
+Anabella/M
+Anabelle/M
+Anabel/M
+anabolic
+anabolism/MS
+anachronism/SM
+anachronistic
+anachronistically
+Anacin/M
+anaconda/MS
+Anacreon/M
+anaerobe/SM
+anaerobic
+anaerobically
+anaglyph/M
+anagrammatic
+anagrammatically
+anagrammed
+anagramming
+anagram/MS
+Anaheim/M
+Analects/M
+analgesia/MS
+analgesic/S
+Analiese/M
+Analise/M
+Anallese/M
+Anallise/M
+analogical/Y
+analogize/SDG
+analogousness/MS
+analogous/YP
+analog/SM
+analogue/SM
+analogy/MS
+anal/Y
+analysand/MS
+analyses
+analysis/AM
+analyst/SM
+analytical/Y
+analyticity/S
+analytic/S
+analytics/M
+analyzable/U
+analyze/DRSZGA
+analyzed/U
+analyzer/M
+Ana/M
+anamorphic
+Ananias/M
+anapaest's
+anapestic/S
+anapest/SM
+anaphora/M
+anaphoric
+anaphorically
+anaplasmosis/M
+anarchic
+anarchical/Y
+anarchism/MS
+anarchistic
+anarchist/MS
+anarchy/MS
+Anastasia/M
+Anastasie/M
+Anastassia/M
+anastigmatic
+anastomoses
+anastomosis/M
+anastomotic
+anathema/MS
+anathematize/GSD
+Anatola/M
+Anatole/M
+Anatolia/M
+Anatolian
+Anatollo/M
+Anatol/M
+anatomic
+anatomical/YS
+anatomist/MS
+anatomize/GSD
+anatomy/MS
+Anaxagoras/M
+Ancell/M
+ancestor/SMDG
+ancestral/Y
+ancestress/SM
+ancestry/SM
+Anchorage/M
+anchorage/SM
+anchored/U
+anchorite/MS
+anchoritism/M
+anchorman/M
+anchormen
+anchorpeople
+anchorperson/S
+anchor/SGDM
+anchorwoman
+anchorwomen
+anchovy/MS
+ancientness/MS
+ancient/SRYTP
+ancillary/S
+an/CS
+Andalusia/M
+Andalusian
+Andaman
+andante/S
+and/DZGS
+Andean/M
+Andeee/M
+Andee/M
+Anderea/M
+Andersen/M
+Anders/N
+Anderson/M
+Andes
+Andie/M
+Andi/M
+andiron/MS
+Andonis/M
+Andorra/M
+Andover/M
+Andra/SM
+Andrea/MS
+Andreana/M
+Andree/M
+Andrei/M
+Andrej/M
+Andre/SM
+Andrew/MS
+Andrey/M
+Andria/M
+Andriana/M
+Andriette/M
+Andris
+androgenic
+androgen/SM
+androgynous
+androgyny/SM
+android/MS
+Andromache/M
+Andromeda/M
+Andropov/M
+Andros/M
+Andrus/M
+Andy/M
+anecdotal/Y
+anecdote/SM
+anechoic
+anemia/SM
+anemically
+anemic/S
+anemometer/MS
+anemometry/M
+anemone/SM
+anent
+aneroid
+Anestassia/M
+anesthesia/MS
+anesthesiologist/MS
+anesthesiology/SM
+anesthetically
+anesthetic/SM
+anesthetist/MS
+anesthetization/SM
+anesthetizer/M
+anesthetize/ZSRDG
+Anet/M
+Anetta/M
+Anette/M
+Anett/M
+aneurysm/MS
+anew
+Angara/M
+Angela/M
+Angeleno/SM
+Angele/SM
+angelfish/SM
+Angelia/M
+angelic
+angelical/Y
+Angelica/M
+angelica/MS
+Angelico/M
+Angelika/M
+Angeli/M
+Angelina/M
+Angeline/M
+Angelique/M
+Angelita/M
+Angelle/M
+Angel/M
+angel/MDSG
+Angelo/M
+Angelou/M
+Ange/M
+anger/GDMS
+Angevin/M
+Angie/M
+Angil/M
+angina/MS
+angiography
+angioplasty/S
+angiosperm/MS
+Angkor/M
+angle/GMZDSRJ
+angler/M
+Angles
+angleworm/MS
+Anglia/M
+Anglicanism/MS
+Anglican/MS
+Anglicism/SM
+Anglicization/MS
+anglicize/SDG
+Anglicize/SDG
+angling/M
+Anglo/MS
+Anglophile/SM
+Anglophilia/M
+Anglophobe/MS
+Anglophobia/M
+Angola/M
+Angolan/S
+angora/MS
+Angora/MS
+angrily
+angriness/M
+angry/RTP
+angst/MS
+ngstrm/M
+angstrom/MS
+Anguilla/M
+anguish/DSMG
+angularity/MS
+angular/Y
+Angus/M
+Angy/M
+Anheuser/M
+anhydride/M
+anhydrite/M
+anhydrous/Y
+Aniakchak/M
+Ania/M
+Anibal/M
+Anica/M
+aniline/SM
+animadversion/SM
+animadvert/DSG
+animalcule/MS
+animal/MYPS
+animated/A
+animatedly
+animately/I
+animateness/MI
+animates/A
+animate/YNGXDSP
+animating/A
+animation/AMS
+animator/SM
+animism/SM
+animistic
+animist/S
+animized
+animosity/MS
+animus/SM
+anionic/S
+anion/MS
+aniseed/MS
+aniseikonic
+anise/MS
+anisette/SM
+anisotropic
+anisotropy/MS
+Anissa/M
+Anita/M
+Anitra/M
+Anjanette/M
+Anjela/M
+Ankara/M
+ankh/M
+ankhs
+anklebone/SM
+ankle/GMDS
+anklet/MS
+Annabal/M
+Annabela/M
+Annabella/M
+Annabelle/M
+Annabell/M
+Annabel/M
+Annadiana/M
+Annadiane/M
+Annalee/M
+Annaliese/M
+Annalise/M
+annalist/MS
+annal/MNS
+Anna/M
+Annamaria/M
+Annamarie/M
+Annapolis/M
+Annapurna/M
+anneal/DRSZG
+annealer/M
+Annecorinne/M
+annelid/MS
+Anneliese/M
+Annelise/M
+Anne/M
+Annemarie/M
+Annetta/M
+Annette/M
+annexation/SM
+annexe/M
+annex/GSD
+Annice/M
+Annie/M
+annihilate/XSDVGN
+annihilation/M
+annihilator/MS
+Anni/MS
+Annissa/M
+anniversary/MS
+Ann/M
+Annmaria/M
+Annmarie/M
+Annnora/M
+Annora/M
+annotated/U
+annotate/VNGXSD
+annotation/M
+annotator/MS
+announced/U
+announcement/SM
+announcer/M
+announce/ZGLRSD
+annoyance/MS
+annoyer/M
+annoying/Y
+annoy/ZGSRD
+annualized
+annual/YS
+annuitant/MS
+annuity/MS
+annular/YS
+annuli
+annulled
+annulling
+annulment/MS
+annul/SL
+annulus/M
+annum
+annunciate/XNGSD
+annunciation/M
+Annunciation/S
+annunciator/SM
+Anny/M
+anode/SM
+anodic
+anodize/GDS
+anodyne/SM
+anoint/DRLGS
+anointer/M
+anointment/SM
+anomalousness/M
+anomalous/YP
+anomaly/MS
+anomic
+anomie/M
+anon/S
+anonymity/MS
+anonymousness/M
+anonymous/YP
+anopheles/M
+anorak/SM
+anorectic/S
+anorexia/SM
+anorexic/S
+another/M
+Anouilh/M
+Ansell/M
+Ansel/M
+Anselma/M
+Anselm/M
+Anselmo/M
+Anshan/M
+ANSI/M
+Ansley/M
+ans/M
+Anson/M
+Anstice/M
+answerable/U
+answered/U
+answerer/M
+answer/MZGBSDR
+antacid/MS
+Antaeus/M
+antagonism/MS
+antagonistic
+antagonistically
+antagonist/MS
+antagonized/U
+antagonize/GZRSD
+antagonizing/U
+Antananarivo/M
+antarctic
+Antarctica/M
+Antarctic/M
+Antares
+anteater/MS
+antebellum
+antecedence/MS
+antecedent/SMY
+antechamber/SM
+antedate/GDS
+antediluvian/S
+anteing
+antelope/MS
+ante/MS
+antenatal
+antennae
+antenna/MS
+anterior/SY
+anteroom/SM
+ant/GSMD
+Anthea/M
+Anthe/M
+anthem/MGDS
+anther/MS
+Anthia/M
+Anthiathia/M
+anthill/S
+anthologist/MS
+anthologize/GDS
+anthology/SM
+Anthony/M
+anthraces
+anthracite/MS
+anthrax/M
+anthropic
+anthropocentric
+anthropogenic
+anthropoid/S
+anthropological/Y
+anthropologist/MS
+anthropology/SM
+anthropometric/S
+anthropometry/M
+anthropomorphic
+anthropomorphically
+anthropomorphism/SM
+anthropomorphizing
+anthropomorphous
+antiabortion
+antiabortionist/S
+antiaircraft
+antibacterial/S
+antibiotic/SM
+antibody/MS
+anticancer
+Antichrist/MS
+anticipated/U
+anticipate/XVGNSD
+anticipation/M
+anticipative/Y
+anticipatory
+anticked
+anticking
+anticlerical/S
+anticlimactic
+anticlimactically
+anticlimax/SM
+anticline/SM
+anticlockwise
+antic/MS
+anticoagulant/S
+anticoagulation/M
+anticommunism/SM
+anticommunist/SM
+anticompetitive
+anticyclone/MS
+anticyclonic
+antidemocratic
+antidepressant/SM
+antidisestablishmentarianism/M
+antidote/DSMG
+Antietam/M
+antifascist/SM
+antiformant
+antifreeze/SM
+antifundamentalist/M
+antigenic
+antigenicity/SM
+antigen/MS
+antigone
+Antigone/M
+Antigua/M
+antiheroes
+antihero/M
+antihistamine/MS
+antihistorical
+antiknock/MS
+antilabor
+Antillean
+Antilles
+antilogarithm/SM
+antilogs
+antimacassar/SM
+antimalarial/S
+antimatter/SM
+antimicrobial/S
+antimissile/S
+antimony/SM
+anting/M
+Antin/M
+antinomian
+antinomy/M
+antinuclear
+Antioch/M
+antioxidant/MS
+antiparticle/SM
+Antipas/M
+antipasti
+antipasto/MS
+antipathetic
+antipathy/SM
+antipersonnel
+antiperspirant/MS
+antiphonal/SY
+antiphon/SM
+antipodal/S
+antipodean/S
+antipode/MS
+Antipodes
+antipollution/S
+antipoverty
+antiquarianism/MS
+antiquarian/MS
+antiquary/SM
+antiquate/NGSD
+antiquation/M
+antique/MGDS
+antiquity/SM
+antiredeposition
+antiresonance/M
+antiresonator
+anti/S
+antisemitic
+antisemitism/M
+antisepses
+antisepsis/M
+antiseptically
+antiseptic/S
+antiserum/SM
+antislavery/S
+antisocial/Y
+antispasmodic/S
+antisubmarine
+antisymmetric
+antisymmetry
+antitank
+antitheses
+antithesis/M
+antithetic
+antithetical/Y
+antithyroid
+antitoxin/MS
+antitrust/MR
+antivenin/MS
+antiviral/S
+antivivisectionist/S
+antiwar
+antler/SDM
+Antofagasta/M
+Antoine/M
+Antoinette/M
+Antonella/M
+Antone/M
+Antonetta/M
+Antonia/M
+Antonie/M
+Antonietta/M
+Antoni/M
+Antonina/M
+Antonin/M
+Antonino/M
+Antoninus/M
+Antonio/M
+Antonius/M
+Anton/MS
+Antonovics/M
+Antony/M
+antonymous
+antonym/SM
+antral
+antsy/RT
+Antwan/M
+Antwerp/M
+Anubis/M
+anus/SM
+anvil/MDSG
+anxiety/MS
+anxiousness/SM
+anxious/PY
+any
+Anya/M
+anybody/S
+anyhow
+Any/M
+anymore
+anyone/MS
+anyplace
+anything/S
+anytime
+anyway/S
+anywhere/S
+anywise
+AOL/M
+aorta/MS
+aortic
+AP
+apace
+apache/MS
+Apache/MS
+Apalachicola/M
+apartheid/SM
+apart/LP
+apartment/MS
+apartness/M
+apathetic
+apathetically
+apathy/SM
+apatite/MS
+APB
+aped/A
+apelike
+ape/MDRSG
+Apennines
+aper/A
+aperiodic
+aperiodically
+aperiodicity/M
+aperitif/S
+aperture/MDS
+apex/MS
+aphasia/SM
+aphasic/S
+aphelia
+aphelion/SM
+aphid/MS
+aphonic
+aphorism/MS
+aphoristic
+aphoristically
+aphrodisiac/SM
+Aphrodite/M
+Apia/M
+apiarist/SM
+apiary/SM
+apical/YS
+apices's
+apiece
+apishness/M
+apish/YP
+aplenty
+aplomb/SM
+APO
+Apocalypse/M
+apocalypse/MS
+apocalyptic
+apocryphalness/M
+apocryphal/YP
+apocrypha/M
+Apocrypha/M
+apogee/MS
+apolar
+apolitical/Y
+Apollinaire/M
+Apollonian
+Apollo/SM
+apologetically/U
+apologetic/S
+apologetics/M
+apologia/SM
+apologist/MS
+apologize/GZSRD
+apologizer/M
+apologizes/A
+apologizing/U
+apology/MS
+apoplectic
+apoplexy/SM
+apostasy/SM
+apostate/SM
+apostatize/DSG
+apostleship/SM
+apostle/SM
+apostolic
+apostrophe/SM
+apostrophized
+apothecary/MS
+apothegm/MS
+apotheoses
+apotheosis/M
+apotheosized
+apotheosizes
+apotheosizing
+Appalachia/M
+Appalachian/MS
+appalling/Y
+appall/SDG
+Appaloosa/MS
+appaloosa/S
+appanage/M
+apparatus/SM
+apparel/SGMD
+apparency
+apparently/I
+apparentness/M
+apparent/U
+apparition/SM
+appealer/M
+appealing/UY
+appeal/SGMDRZ
+appear/AEGDS
+appearance/AMES
+appearer/S
+appease/DSRGZL
+appeased/U
+appeasement/MS
+appeaser/M
+appellant/MS
+appellate/VNX
+appellation/M
+appellative/MY
+appendage/MS
+appendectomy/SM
+appendices
+appendicitis/SM
+appendix/SM
+append/SGZDR
+appertain/DSG
+appetite/MVS
+appetizer/SM
+appetizing/YU
+Appia/M
+Appian/M
+applauder/M
+applaud/ZGSDR
+applause/MS
+applecart/M
+applejack/MS
+Apple/M
+apple/MS
+applesauce/SM
+Appleseed/M
+Appleton/M
+applet/S
+appliance/SM
+applicabilities
+applicability/IM
+applicable/I
+applicably
+applicant/MS
+applicate/V
+application/MA
+applicative/Y
+applicator/MS
+applier/SM
+appliqud
+appliqu/MSG
+apply/AGSDXN
+appointee/SM
+appoint/ELSADG
+appointer/MS
+appointive
+appointment/ASEM
+Appolonia/M
+Appomattox/M
+apportion/GADLS
+apportionment/SAM
+appose/SDG
+appositeness/MS
+apposite/XYNVP
+apposition/M
+appositive/SY
+appraisal/SAM
+appraised/A
+appraisees
+appraiser/M
+appraises/A
+appraise/ZGDRS
+appraising/Y
+appreciable/I
+appreciably/I
+appreciated/U
+appreciate/XDSNGV
+appreciation/M
+appreciativeness/MI
+appreciative/PIY
+appreciator/MS
+appreciatory
+apprehend/DRSG
+apprehender/M
+apprehensible
+apprehension/SM
+apprehensiveness/SM
+apprehensive/YP
+apprentice/DSGM
+apprenticeship/SM
+apprise/DSG
+apprizer/SM
+apprizingly
+apprizings
+approachability/UM
+approachable/UI
+approach/BRSDZG
+approacher/M
+approbate/NX
+approbation/EMS
+appropriable
+appropriated/U
+appropriately/I
+appropriateness/SMI
+appropriate/XDSGNVYTP
+appropriation/M
+appropriator/SM
+approval/ESM
+approve/DSREG
+approved/U
+approver's/E
+approver/SM
+approving/YE
+approx
+approximate/XGNVYDS
+approximation/M
+approximative/Y
+appurtenance/MS
+appurtenant/S
+APR
+apricot/MS
+Aprilette/M
+April/MS
+Apr/M
+apron/SDMG
+apropos
+apse/MS
+apsis/M
+apter
+aptest
+aptitude/SM
+aptness/SMI
+aptness's/U
+apt/UPYI
+Apuleius/M
+aquaculture/MS
+aqualung/SM
+aquamarine/SM
+aquanaut/SM
+aquaplane/GSDM
+aquarium/MS
+Aquarius/MS
+aqua/SM
+aquatically
+aquatic/S
+aquavit/SM
+aqueduct/MS
+aqueous/Y
+aquiculture's
+aquifer/SM
+Aquila/M
+aquiline
+Aquinas/M
+Aquino/M
+Aquitaine/M
+AR
+Arabela/M
+Arabele/M
+Arabella/M
+Arabelle/M
+Arabel/M
+arabesque/SM
+Arabia/M
+Arabian/MS
+Arabic/M
+arability/MS
+Arabist/MS
+arable/S
+Arab/MS
+Araby/M
+Araceli/M
+arachnid/MS
+arachnoid/M
+arachnophobia
+Arafat/M
+Araguaya/M
+Araldo/M
+Aral/M
+Ara/M
+Aramaic/M
+Aramco/M
+Arapahoes
+Arapahoe's
+Arapaho/MS
+Ararat/M
+Araucanian/M
+Arawakan/M
+Arawak/M
+arbiter/MS
+arbitrage/GMZRSD
+arbitrager/M
+arbitrageur/S
+arbitrament/MS
+arbitrarily
+arbitrariness/MS
+arbitrary/P
+arbitrate/SDXVNG
+arbitration/M
+arbitrator/SM
+arbor/DMS
+arboreal/Y
+arbores
+arboretum/MS
+arborvitae/MS
+arbutus/SM
+ARC
+arcade/SDMG
+Arcadia/M
+Arcadian
+arcana/M
+arcane/P
+arc/DSGM
+archaeological/Y
+archaeologist/SM
+archaically
+archaic/P
+Archaimbaud/M
+archaism/SM
+archaist/MS
+archaize/GDRSZ
+archaizer/M
+Archambault/M
+archangel/SM
+archbishopric/SM
+archbishop/SM
+archdeacon/MS
+archdiocesan
+archdiocese/SM
+archduchess/MS
+archduke/MS
+Archean
+archenemy/SM
+archeologist's
+archeology/MS
+archer/M
+Archer/M
+archery/MS
+archetypal
+archetype/SM
+archfiend/SM
+archfool
+Archibald/M
+Archibaldo/M
+Archibold/M
+Archie/M
+archiepiscopal
+Archimedes/M
+arching/M
+archipelago/SM
+architect/MS
+architectonic/S
+architectonics/M
+architectural/Y
+architecture/SM
+architrave/MS
+archival
+archive/DRSGMZ
+archived/U
+archivist/MS
+Arch/MR
+archness/MS
+arch/PGVZTMYDSR
+archway/SM
+Archy/M
+arclike
+ARCO/M
+arcsine
+arctangent
+Arctic/M
+arctic/S
+Arcturus/M
+Ardabil
+Arda/MH
+Ardath/M
+Ardeen/M
+Ardelia/M
+Ardelis/M
+Ardella/M
+Ardelle/M
+ardency/M
+Ardene/M
+Ardenia/M
+Arden/M
+ardent/Y
+Ardine/M
+Ardisj/M
+Ardis/M
+Ardith/M
+ardor/SM
+Ardra/M
+arduousness/SM
+arduous/YP
+Ardyce/M
+Ardys
+Ardyth/M
+areal
+area/SM
+areawide
+are/BS
+Arel/M
+arenaceous
+arena/SM
+aren't
+Arequipa/M
+Ares
+Aretha/M
+Argentina/M
+Argentinean/S
+Argentine/SM
+Argentinian/S
+argent/MS
+arginine/MS
+Argonaut/MS
+argonaut/S
+argon/MS
+Argonne/M
+Argo/SM
+argosy/SM
+argot/SM
+arguable/IU
+arguably/IU
+argue/DSRGZ
+arguer/M
+argumentation/SM
+argumentativeness/MS
+argumentative/YP
+argument/SM
+Argus/M
+argyle/S
+Ariadne/M
+Ariana/M
+Arianism/M
+Arianist/SM
+aria/SM
+Aridatha/M
+aridity/SM
+aridness/M
+arid/TYRP
+Ariela/M
+Ariella/M
+Arielle/M
+Ariel/M
+Arie/SM
+Aries/S
+aright
+Ari/M
+Arin/M
+Ario/M
+Ariosto/M
+arise/GJSR
+arisen
+Aristarchus/M
+Aristides
+aristocracy/SM
+aristocratic
+aristocratically
+aristocrat/MS
+Aristophanes/M
+Aristotelean
+Aristotelian/M
+Aristotle/M
+arithmetical/Y
+arithmetician/SM
+arithmetic/MS
+arithmetize/SD
+Arius/M
+Ariz/M
+Arizona/M
+Arizonan/S
+Arizonian/S
+Arjuna/M
+Arkansan/MS
+Arkansas/M
+Arkhangelsk/M
+Ark/M
+ark/MS
+Arkwright/M
+Arlana/M
+Arlan/M
+Arlee/M
+Arleen/M
+Arlena/M
+Arlene/M
+Arlen/M
+Arleta/M
+Arlette/M
+Arley/M
+Arleyne/M
+Arlie/M
+Arliene/M
+Arlina/M
+Arlinda/M
+Arline/M
+Arlington/M
+Arlin/M
+Arluene/M
+Arly/M
+Arlyne/M
+Arlyn/M
+Armada/M
+armada/SM
+armadillo/MS
+Armageddon/SM
+Armagnac/M
+armament/EAS
+armament's/E
+Armand/M
+Armando/M
+Arman/M
+arm/ASEDG
+Armata/M
+armature/MGSD
+armband/SM
+armchair/MS
+Armco/M
+armed/U
+Armenia/M
+Armenian/MS
+armer/MES
+armful/SM
+armhole/MS
+arming/M
+Arminius/M
+Armin/M
+armistice/MS
+armless
+armlet/SM
+armload/M
+Armonk/M
+armored/U
+armorer/M
+armorial/S
+armory/DSM
+armor/ZRDMGS
+Armour/M
+armpit/MS
+armrest/MS
+arm's
+Armstrong/M
+Ar/MY
+army/SM
+Arnaldo/M
+Arneb/M
+Arne/M
+Arney/M
+Arnhem/M
+Arnie/M
+Arni/M
+Arnold/M
+Arnoldo/M
+Arno/M
+Arnuad/M
+Arnulfo/M
+Arny/M
+aroma/SM
+aromatherapist/S
+aromatherapy/S
+aromatically
+aromaticity/M
+aromaticness/M
+aromatic/SP
+Aron/M
+arose
+around
+arousal/MS
+aroused/U
+arouse/GSD
+ARPA/M
+Arpanet/M
+ARPANET/M
+arpeggio/SM
+arrack/M
+Arragon/M
+arraignment/MS
+arraign/SDGL
+arrangeable/A
+arranged/EA
+arrangement/AMSE
+arranger/M
+arranges/EA
+arrange/ZDSRLG
+arranging/EA
+arrant/Y
+arras/SM
+arrayer
+array/ESGMD
+arrear/SM
+arrest/ADSG
+arrestee/MS
+arrester/MS
+arresting/Y
+arrestor/MS
+Arrhenius/M
+arrhythmia/SM
+arrhythmic
+arrhythmical
+Arri/M
+arrival/MS
+arriver/M
+arrive/SRDG
+arrogance/MS
+arrogant/Y
+arrogate/XNGDS
+arrogation/M
+Arron/M
+arrowhead/SM
+arrowroot/MS
+arrow/SDMG
+arroyo/MS
+arr/TV
+arsenal/MS
+arsenate/M
+arsenic/MS
+arsenide/M
+arsine/MS
+arsonist/MS
+arson/SM
+Artair/M
+Artaxerxes/M
+artefact's
+Arte/M
+Artemas
+Artemis/M
+Artemus/M
+arterial/SY
+arteriolar
+arteriole/SM
+arterioscleroses
+arteriosclerosis/M
+artery/SM
+artesian
+artfulness/SM
+artful/YP
+Arther/M
+arthritic/S
+arthritides
+arthritis/M
+arthrogram/MS
+arthropod/SM
+arthroscope/S
+arthroscopic
+Arthurian
+Arthur/M
+artichoke/SM
+article/GMDS
+articulable/I
+articular
+articulated/EU
+articulately/I
+articulateness/IMS
+articulates/I
+articulate/VGNYXPSD
+articulation/M
+articulator/SM
+articulatory
+Artie/M
+artifact/MS
+artificer/M
+artifice/ZRSM
+artificiality/MS
+artificialness/M
+artificial/PY
+artillerist
+artilleryman/M
+artillerymen
+artillery/SM
+artiness/MS
+artisan/SM
+artiste/SM
+artistically/I
+artistic/I
+artist/MS
+artistry/SM
+artlessness/MS
+artless/YP
+Art/M
+art/SM
+artsy/RT
+Artur/M
+Arturo/M
+Artus/M
+artwork/MS
+Arty/M
+arty/TPR
+Aruba/M
+arum/MS
+Arvie/M
+Arvin/M
+Arv/M
+Arvy/M
+Aryan/MS
+Aryn/M
+as
+As
+A's
+Asa/M
+Asama/M
+asap
+ASAP
+asbestos/MS
+Ascella/M
+ascend/ADGS
+ascendancy/MS
+ascendant/SY
+ascender/SM
+Ascension/M
+ascension/SM
+ascent/SM
+ascertain/DSBLG
+ascertainment/MS
+ascetically
+asceticism/MS
+ascetic/SM
+ASCII
+ascot/MS
+ascribe/GSDB
+ascription/MS
+ascriptive
+Ase/M
+aseptically
+aseptic/S
+asexuality/MS
+asexual/Y
+Asgard/M
+ashame/D
+ashamed/UY
+Ashanti/M
+Ashbey/M
+Ashby/M
+ashcan/SM
+Ashely/M
+Asher/M
+Asheville/M
+Ashia/M
+Ashien/M
+Ashil/M
+Ashkenazim
+Ashkhabad/M
+Ashla/M
+Ashland/M
+Ashlan/M
+ashlar/GSDM
+Ashlee/M
+Ashleigh/M
+Ashlen/M
+Ashley/M
+Ashlie/M
+Ashli/M
+Ashlin/M
+Ashly/M
+ashman/M
+ash/MNDRSG
+Ashmolean/M
+Ash/MRY
+ashore
+ashram/SM
+Ashton/M
+ashtray/MS
+Ashurbanipal/M
+ashy/RT
+Asia/M
+Asian/MS
+Asiatic/SM
+aside/S
+Asilomar/M
+Asimov
+asinine/Y
+asininity/MS
+askance
+ask/DRZGS
+asked/U
+asker/M
+askew/P
+ASL
+aslant
+asleep
+Asmara/M
+asocial/S
+Asoka/M
+asparagus/MS
+aspartame/S
+ASPCA
+aspect/SM
+Aspell/M
+aspen/M
+Aspen/M
+asperity/SM
+asper/M
+aspersion/SM
+asphalt/MDRSG
+asphodel/MS
+asphyxia/MS
+asphyxiate/GNXSD
+asphyxiation/M
+aspic/MS
+Aspidiske/M
+aspidistra/MS
+aspirant/MS
+aspirate/NGDSX
+aspirational
+aspiration/M
+aspirator/SM
+aspire/GSRD
+aspirer/M
+aspirin/SM
+asplenium
+asp/MNRXS
+Asquith/M
+Assad/M
+assailable/U
+assailant/SM
+assail/BGDS
+Assamese/M
+Assam/M
+assassinate/DSGNX
+assassination/M
+assassin/MS
+assaulter/M
+assaultive/YP
+assault/SGVMDR
+assayer/M
+assay/SZGRD
+assemblage/MS
+assemble/ADSREG
+assembled/U
+assembler/EMS
+assemblies/A
+assembly/EAM
+assemblyman/M
+assemblymen
+Assembly/MS
+assemblywoman
+assemblywomen
+assent/SGMRD
+assert/ADGS
+asserter/MS
+assertional
+assertion/AMS
+assertiveness/SM
+assertive/PY
+assess/BLSDG
+assessed/A
+assesses/A
+assessment/SAM
+assessor/MS
+asset/SM
+asseverate/XSDNG
+asseveration/M
+asshole/MS!
+assiduity/SM
+assiduousness/SM
+assiduous/PY
+assign/ALBSGD
+assignation/MS
+assigned/U
+assignee/MS
+assigner/MS
+assignment/MAS
+assignor/MS
+assigns/CU
+assimilate/VNGXSD
+assimilationist/M
+assimilation/M
+Assisi/M
+assistance/SM
+assistantship/SM
+assistant/SM
+assisted/U
+assister/M
+assist/RDGS
+assize/MGSD
+ass/MNS
+assn
+assoc
+associable
+associated/U
+associate/SDEXNG
+associateship
+associational
+association/ME
+associative/Y
+associativity/S
+associator/MS
+assonance/SM
+assonant/S
+assorter/M
+assort/LRDSG
+assortment/SM
+asst
+assuaged/U
+assuage/SDG
+assumability
+assumer/M
+assume/SRDBJG
+assuming/UA
+assumption/SM
+assumptive
+assurance/AMS
+assure/AGSD
+assuredness/M
+assured/PYS
+assurer/SM
+assuring/YA
+Assyria/M
+Assyrian/SM
+Assyriology/M
+Astaire/SM
+Astarte/M
+astatine/MS
+aster/ESM
+asteria
+asterisked/U
+asterisk/SGMD
+astern
+asteroidal
+asteroid/SM
+asthma/MS
+asthmatic/S
+astigmatic/S
+astigmatism/SM
+astir
+astonish/GSDL
+astonishing/Y
+astonishment/SM
+Aston/M
+Astoria/M
+Astor/M
+astounding/Y
+astound/SDG
+astraddle
+Astrakhan/M
+astrakhan/SM
+astral/SY
+Astra/M
+astray
+astride
+Astrid/M
+astringency/SM
+astringent/YS
+Astrix/M
+astrolabe/MS
+astrologer/MS
+astrological/Y
+astrologist/M
+astrology/SM
+astronautical
+astronautic/S
+astronautics/M
+astronaut/SM
+astronomer/MS
+astronomic
+astronomical/Y
+astronomy/SM
+astrophysical
+astrophysicist/SM
+astrophysics/M
+Astroturf/M
+AstroTurf/S
+Asturias/M
+astuteness/MS
+astute/RTYP
+Asuncin/M
+asunder
+Aswan/M
+asylum/MS
+asymmetric
+asymmetrical/Y
+asymmetry/MS
+asymptomatic
+asymptomatically
+asymptote/MS
+asymptotically
+asymptotic/Y
+asynchronism/M
+asynchronous/Y
+asynchrony
+at
+Atacama/M
+Atahualpa/M
+Atalanta/M
+Atari/M
+Atatrk/M
+atavism/MS
+atavistic
+atavist/MS
+ataxia/MS
+ataxic/S
+atelier/SM
+atemporal
+ate/S
+Athabasca/M
+Athabascan's
+Athabaskan/MS
+Athabaska's
+atheism/SM
+atheistic
+atheist/SM
+Athena/M
+Athene/M
+Athenian/SM
+Athens/M
+atheroscleroses
+atherosclerosis/M
+athirst
+athlete/MS
+athletically
+athleticism/M
+athletic/S
+athletics/M
+athwart
+atilt
+Atkins/M
+Atkinson/M
+Atlanta/M
+Atlante/MS
+atlantes
+Atlantic/M
+Atlantis/M
+atlas/SM
+Atlas/SM
+At/M
+Atman
+ATM/M
+atmosphere/DSM
+atmospherically
+atmospheric/S
+atoll/MS
+atomically
+atomicity/M
+atomic/S
+atomics/M
+atomistic
+atomization/SM
+atomize/GZDRS
+atomizer/M
+atom/SM
+atonality/MS
+atonal/Y
+atone/LDSG
+atonement/SM
+atop
+ATP
+Atreus/M
+atria
+atrial
+Atria/M
+atrium/M
+atrociousness/SM
+atrocious/YP
+atrocity/SM
+atrophic
+atrophy/DSGM
+atropine/SM
+Atropos/M
+Ats
+attach/BLGZMDRS
+attached/UA
+attacher/M
+attach/S
+attachment/ASM
+attacker/M
+attack/GBZSDR
+attainabilities
+attainability/UM
+attainableness/M
+attainable/U
+attainably/U
+attain/AGSD
+attainder/MS
+attained/U
+attainer/MS
+attainment/MS
+attar/MS
+attempt/ADSG
+attempter/MS
+attendance/MS
+attendant/SM
+attended/U
+attendee/SM
+attender/M
+attend/SGZDR
+attentional
+attentionality
+attention/IMS
+attentiveness/IMS
+attentive/YIP
+attenuated/U
+attenuate/SDXGN
+attenuation/M
+attenuator/MS
+attestation/SM
+attested/U
+attester/M
+attest/GSDR
+Attic
+Attica/M
+attic/MS
+Attila/M
+attire/SDG
+attitude/MS
+attitudinal/Y
+attitudinize/SDG
+Attlee/M
+attn
+Attn
+attorney/SM
+attractant/SM
+attract/BSDGV
+attraction/MS
+attractivenesses
+attractiveness/UM
+attractive/UYP
+attractor/MS
+attributable/U
+attribute/BVNGRSDX
+attributed/U
+attributer/M
+attributional
+attribution/M
+attributive/SY
+attrition/MS
+Attucks
+attune/SDG
+atty
+ATV/S
+atwitter
+Atwood/M
+atypical/Y
+Aube/M
+Auberge/M
+aubergine/MS
+Auberon/M
+Auberta/M
+Aubert/M
+Aubine/M
+Aubree/M
+Aubrette/M
+Aubrey/M
+Aubrie/M
+Aubry/M
+auburn/SM
+Auckland/M
+auctioneer/SDMG
+auction/MDSG
+audaciousness/SM
+audacious/PY
+audacity/MS
+Auden/M
+audibility/MSI
+audible/I
+audibles
+audibly/I
+Audie/M
+audience/MS
+Audi/M
+audiogram/SM
+audiological
+audiologist/MS
+audiology/SM
+audiometer/MS
+audiometric
+audiometry/M
+audiophile/SM
+audio/SM
+audiotape/S
+audiovisual/S
+audited/U
+audition/MDSG
+auditorium/MS
+auditor/MS
+auditory/S
+audit/SMDVG
+Audra/M
+Audre/M
+Audrey/M
+Audrie/M
+Audrye/M
+Audry/M
+Audubon/M
+Audy/M
+Auerbach/M
+Augean
+auger/SM
+aught/S
+Augie/M
+Aug/M
+augmentation/SM
+augmentative/S
+augment/DRZGS
+augmenter/M
+augur/GDMS
+augury/SM
+Augusta/M
+Augustan/S
+Auguste/M
+Augustina/M
+Augustine/M
+Augustinian/S
+Augustin/M
+augustness/SM
+Augusto/M
+August/SM
+august/STPYR
+Augustus/M
+Augy/M
+auk/MS
+Au/M
+Aundrea/M
+auntie/MS
+aunt/MYS
+aunty's
+aural/Y
+Aura/M
+aura/SM
+Aurea/M
+Aurelea/M
+Aurelia/M
+Aurelie/M
+Aurelio/M
+Aurelius/M
+Aurel/M
+aureole/GMSD
+aureomycin
+Aureomycin/M
+Auria/M
+auric
+auricle/SM
+auricular
+Aurie/M
+Auriga/M
+Aurilia/M
+Aurlie/M
+Auroora/M
+auroral
+Aurora/M
+aurora/SM
+Aurore/M
+Aurthur/M
+Auschwitz/M
+auscultate/XDSNG
+auscultation/M
+auspice/SM
+auspicious/IPY
+auspiciousnesses
+auspiciousness/IM
+Aussie/MS
+Austen/M
+austereness/M
+austere/TYRP
+austerity/SM
+Austina/M
+Austine/M
+Austin/SM
+austral
+Australasia/M
+Australasian/S
+australes
+Australia/M
+Australian/MS
+Australis/M
+australites
+Australoid
+Australopithecus/M
+Austria/M
+Austrian/SM
+Austronesian
+authentically
+authenticated/U
+authenticate/GNDSX
+authentication/M
+authenticator/MS
+authenticity/MS
+authentic/UI
+author/DMGS
+authoress/S
+authorial
+authoritarianism/MS
+authoritarian/S
+authoritativeness/SM
+authoritative/PY
+authority/SM
+authorization/MAS
+authorize/AGDS
+authorized/U
+authorizer/SM
+authorizes/U
+authorship/MS
+autism/MS
+autistic/S
+autobahn/MS
+autobiographer/MS
+autobiographic
+autobiographical/Y
+autobiography/MS
+autoclave/SDGM
+autocollimator/M
+autocorrelate/GNSDX
+autocorrelation/M
+autocracy/SM
+autocratic
+autocratically
+autocrat/SM
+autodial/R
+autodidact/MS
+autofluorescence
+autograph/MDG
+autographs
+autoignition/M
+autoimmune
+autoimmunity/S
+autoloader
+automaker/S
+automata's
+automate/NGDSX
+automatically
+automatic/S
+automation/M
+automatism/SM
+automatize/DSG
+automaton/SM
+automobile/GDSM
+automorphism/SM
+automotive
+autonavigator/SM
+autonomic/S
+autonomous/Y
+autonomy/MS
+autopilot/SM
+autopsy/MDSG
+autoregressive
+autorepeat/GS
+auto/SDMG
+autostart
+autosuggestibility/M
+autotransformer/M
+autoworker/S
+autumnal/Y
+Autumn/M
+autumn/MS
+aux
+auxiliary/S
+auxin/MS
+AV
+availability/USM
+availableness/M
+available/U
+availably
+avail/BSZGRD
+availing/U
+avalanche/MGSD
+Avalon/M
+Ava/M
+avant
+avarice/SM
+avariciousness/M
+avaricious/PY
+avast/S
+avatar/MS
+avaunt/S
+avdp
+Aveline/M
+Ave/MS
+avenged/U
+avenger/M
+avenge/ZGSRD
+Aventine/M
+Aventino/M
+avenue/MS
+average/DSPGYM
+Averell/M
+Averill/M
+Averil/M
+Avernus/M
+averred
+averrer
+averring
+Averroes/M
+averseness/M
+averse/YNXP
+aversion/M
+avers/V
+avert/GSD
+Averyl/M
+Avery/M
+ave/S
+aves/C
+Avesta/M
+avg
+avian/S
+aviary/SM
+aviate/NX
+aviation/M
+aviator/SM
+aviatrices
+aviatrix/SM
+Avicenna/M
+Avictor/M
+avidity/MS
+avid/TPYR
+Avie/M
+Avigdor/M
+Avignon/M
+Avila/M
+avionic/S
+avionics/M
+Avior/M
+Avis
+avitaminoses
+avitaminosis/M
+Avivah/M
+Aviva/M
+Aviv/M
+avocado/MS
+avocational
+avocation/SM
+Avogadro/M
+avoidable/U
+avoidably/U
+avoidance/SM
+avoider/M
+avoid/ZRDBGS
+avoirdupois/MS
+Avon/M
+avouch/GDS
+avowal/EMS
+avowed/Y
+avower/M
+avow/GEDS
+Avram/M
+Avril/M
+Avrit/M
+Avrom/M
+avuncular
+av/ZR
+AWACS
+await/SDG
+awake/GS
+awakened/U
+awakener/M
+awakening/S
+awaken/SADG
+awarder/M
+award/RDSZG
+awareness/MSU
+aware/TRP
+awash
+away/PS
+aweigh
+awe/SM
+awesomeness/SM
+awesome/PY
+awestruck
+awfuller
+awfullest
+awfulness/SM
+awful/YP
+aw/GD
+awhile/S
+awkwardness/MS
+awkward/PRYT
+awl/MS
+awning/DM
+awn/MDJGS
+awoke
+awoken
+AWOL
+awry/RT
+ax/DRSZGM
+axehead/S
+Axel/M
+Axe/M
+axeman
+axial/Y
+axillary
+axiological/Y
+axiology/M
+axiomatically
+axiomatic/S
+axiomatization/MS
+axiomatize/GDS
+axiom/SM
+axion/SM
+axis/SM
+axle/MS
+axletree/MS
+Ax/M
+axolotl/SM
+axon/SM
+ayah/M
+ayahs
+Ayala/M
+ayatollah
+ayatollahs
+aye/MZRS
+Ayers
+Aylmar/M
+Aylmer/M
+Aymara/M
+Aymer/M
+Ayn/M
+AZ
+azalea/SM
+Azania/M
+Azazel/M
+Azerbaijan/M
+azimuthal/Y
+azimuth/M
+azimuths
+Azores
+Azov/M
+AZT
+Aztecan
+Aztec/MS
+azure/MS
+BA
+Baal/SM
+baa/SDG
+Babara/M
+Babar's
+Babbage/M
+Babbette/M
+Babbie/M
+babbitt/GDS
+Babbitt/M
+babbler/M
+babble/RSDGZ
+Babb/M
+Babcock/M
+Babel/MS
+babel/S
+babe/SM
+Babette/M
+Babita/M
+Babka/M
+baboon/MS
+Bab/SM
+babushka/MS
+babyhood/MS
+babyish
+Babylonia/M
+Babylonian/SM
+Babylon/MS
+babysat
+babysit/S
+babysitter/S
+babysitting
+baby/TDSRMG
+Bacall/M
+Bacardi/M
+baccalaureate/MS
+baccarat/SM
+bacchanalia
+Bacchanalia/M
+bacchanalian/S
+bacchanal/SM
+Bacchic
+Bacchus/M
+bachelorhood/SM
+bachelor/SM
+Bach/M
+bacillary
+bacilli
+bacillus/MS
+backache/SM
+backarrow
+backbencher/M
+backbench/ZR
+backbiter/M
+backbite/S
+backbitten
+backbit/ZGJR
+backboard/SM
+backbone/SM
+backbreaking
+backchaining
+backcloth/M
+backdate/GDS
+backdrop/MS
+backdropped
+backdropping
+backed/U
+backer/M
+backfield/SM
+backfill/SDG
+backfire/GDS
+backgammon/MS
+background/SDRMZG
+back/GZDRMSJ
+backhanded/Y
+backhander/M
+backhand/RDMSZG
+backhoe/S
+backing/M
+backlash/GRSDM
+backless
+backlogged
+backlogging
+backlog/MS
+backorder
+backpacker/M
+backpack/ZGSMRD
+backpedal/DGS
+backplane/MS
+backplate/SM
+backrest/MS
+backscatter/SMDG
+backseat/S
+backside/SM
+backslapper/MS
+backslapping/M
+backslash/DSG
+backslider/M
+backslide/S
+backslid/RZG
+backspace/GSD
+backspin/SM
+backstabber/M
+backstabbing
+backstage
+backstair/S
+backstitch/GDSM
+backstop/MS
+backstopped
+backstopping
+backstreet/M
+backstretch/SM
+backstroke/GMDS
+backtalk/S
+backtrack/SDRGZ
+backup/SM
+Backus/M
+backwardness/MS
+backward/YSP
+backwash/SDMG
+backwater/SM
+backwood/S
+backwoodsman/M
+backwoodsmen
+backyard/MS
+baconer/M
+Bacon/M
+bacon/SRM
+bacterial/Y
+bacteria/MS
+bactericidal
+bactericide/SM
+bacteriologic
+bacteriological
+bacteriologist/MS
+bacteriology/SM
+bacterium/M
+Bactria/M
+badder
+baddest
+baddie/MS
+bade
+Baden/M
+badge/DSRGMZ
+badger/DMG
+badinage/DSMG
+badland/S
+Badlands/M
+badman/M
+badmen
+badminton/MS
+badmouth/DG
+badmouths
+badness/SM
+bad/PSNY
+Baedeker/SM
+Baez/M
+Baffin/M
+bafflement/MS
+baffler/M
+baffle/RSDGZL
+baffling/Y
+bagatelle/MS
+bagel/SM
+bagful/MS
+baggageman
+baggagemen
+baggage/SM
+bagged/M
+bagger/SM
+baggily
+bagginess/MS
+bagging/M
+baggy/PRST
+Baghdad/M
+bagpiper/M
+bagpipe/RSMZ
+Bagrodia/MS
+bag/SM
+baguette/SM
+Baguio/M
+bah
+Baha'i
+Bahama/MS
+Bahamanian/S
+Bahamian/MS
+Baha'ullah
+Bahia/M
+Bahrain/M
+bahs
+Baikal/M
+Bailey/SM
+bail/GSMYDRB
+Bailie/M
+bailiff/SM
+bailiwick/MS
+Baillie/M
+Bail/M
+bailout/MS
+bailsman/M
+bailsmen
+Baily/M
+Baird/M
+bairn/SM
+baiter/M
+bait/GSMDR
+baize/GMDS
+Baja/M
+baked/U
+bakehouse/M
+Bakelite/M
+baker/M
+Baker/M
+Bakersfield/M
+bakery/SM
+bakeshop/S
+bake/ZGJDRS
+baking/M
+baklava/M
+baksheesh/SM
+Baku/M
+Bakunin/M
+balaclava/MS
+balalaika/MS
+balanced/A
+balancedness
+balancer/MS
+balance's
+balance/USDG
+Balanchine/M
+Balboa/M
+balboa/SM
+balcony/MSD
+balderdash/MS
+Balder/M
+baldfaced
+Bald/MR
+baldness/MS
+bald/PYDRGST
+baldric/SM
+Balduin/M
+Baldwin/M
+baldy
+Balearic/M
+baleen/MS
+balefuller
+balefullest
+balefulness/MS
+baleful/YP
+Bale/M
+bale/MZGDRS
+baler/M
+Balfour/M
+Bali/M
+Balinese
+balkanization
+balkanize/DG
+Balkan/SM
+balker/M
+balk/GDRS
+Balkhash/M
+balkiness/M
+balky/PRT
+balladeer/MS
+ballade/MS
+balladry/MS
+ballad/SM
+Ballard/SM
+ballast/SGMD
+ballcock/S
+ballerina/MS
+baller/M
+balletic
+ballet/MS
+ballfields
+ballgame/S
+ball/GZMSDR
+ballistic/S
+ballistics/M
+Ball/M
+balloonist/S
+balloon/RDMZGS
+balloter/M
+ballot/MRDGS
+ballpark/SM
+ballplayer/SM
+ballpoint/SM
+ballroom/SM
+ballsy/TR
+ballyhoo/SGMD
+balminess/SM
+balm/MS
+balmy/PRT
+baloney/SM
+balsam/GMDS
+balsamic
+balsa/MS
+Balthazar/M
+Baltic/M
+Baltimore/M
+Baluchistan/M
+baluster/MS
+balustrade/SM
+Balzac/M
+Ba/M
+Bamako/M
+Bamberger/M
+Bambie/M
+Bambi/M
+bamboo/SM
+bamboozle/GSD
+Bamby/M
+Banach/M
+banality/MS
+banal/TYR
+banana/SM
+Bancroft/M
+bandager/M
+bandage/RSDMG
+bandanna/SM
+bandbox/MS
+bandeau/M
+bandeaux
+band/EDGS
+bander/M
+banding/M
+bandit/MS
+banditry/MS
+bandmaster/MS
+bandoleer/SM
+bandpass
+band's
+bandsman/M
+bandsmen
+bandstand/SM
+bandstop
+Bandung/M
+bandwagon/MS
+bandwidth/M
+bandwidths
+bandy/TGRSD
+banefuller
+banefullest
+baneful/Y
+bane/MS
+Bangalore/M
+banger/M
+bang/GDRZMS
+bangkok
+Bangkok/M
+Bangladeshi/S
+Bangladesh/M
+bangle/MS
+Bangor/M
+Bangui/M
+bani
+banisher/M
+banishment/MS
+banish/RSDGL
+banister/MS
+Banjarmasin/M
+banjoist/SM
+banjo/MS
+Banjul/M
+bankbook/SM
+bankcard/S
+banker/M
+bank/GZJDRMBS
+banking/M
+Bank/MS
+banknote/S
+bankroll/DMSG
+bankruptcy/MS
+bankrupt/DMGS
+Banky/M
+Ban/M
+banned/U
+Banneker/M
+banner/SDMG
+banning/U
+Bannister/M
+bannister's
+bannock/SM
+banns
+banqueter/M
+banquet/SZGJMRD
+banquette/MS
+ban/SGMD
+banshee/MS
+bans/U
+bantam/MS
+bantamweight/MS
+banterer/M
+bantering/Y
+banter/RDSG
+Banting/M
+Bantu/SM
+banyan/MS
+banzai/S
+baobab/SM
+Baotou/M
+baptismal/Y
+baptism/SM
+Baptiste/M
+baptistery/MS
+baptist/MS
+Baptist/MS
+baptistry's
+baptized/U
+baptizer/M
+baptize/SRDZG
+baptizes/U
+Barabbas/M
+Barbabas/M
+Barbabra/M
+Barbadian/S
+Barbados/M
+Barbaraanne/M
+Barbara/M
+Barbarella/M
+barbarianism/MS
+barbarian/MS
+barbaric
+barbarically
+barbarism/MS
+barbarity/SM
+barbarize/SDG
+Barbarossa/M
+barbarousness/M
+barbarous/PY
+Barbary/M
+barb/DRMSGZ
+barbecue/DRSMG
+barbed/P
+Barbee/M
+barbell/SM
+barbel/MS
+Barbe/M
+barbeque's
+barber/DMG
+barbered/U
+Barber/M
+barberry/MS
+barbershop/MS
+Barbette/M
+Barbey/M
+Barbie/M
+Barbi/M
+barbital/M
+barbiturate/MS
+Barbour/M
+Barbra/M
+Barb/RM
+Barbuda/M
+barbwire/SM
+Barby/M
+barcarole/SM
+Barcelona/M
+Barclay/M
+Bardeen/M
+Barde/M
+bardic
+Bard/M
+bard/MDSG
+bareback/D
+barefacedness/M
+barefaced/YP
+barefoot/D
+barehanded
+bareheaded
+barelegged
+bareness/MS
+Barents/M
+bare/YSP
+barfly/SM
+barf/YDSG
+bargainer/M
+bargain/ZGSDRM
+barge/DSGM
+bargeman/M
+bargemen
+bargepole/M
+barhopped
+barhopping
+barhop/S
+Bari/M
+baritone/MS
+barium/MS
+barked/C
+barkeeper/M
+barkeep/SRZ
+barker/M
+Barker/M
+bark/GZDRMS
+Barkley/M
+barks/C
+barleycorn/MS
+barley/MS
+Barlow/M
+barmaid/SM
+barman/M
+barmen
+Bar/MH
+Barnabas
+Barnabe/M
+Barnaby/M
+barnacle/MDS
+Barnard/M
+Barnaul/M
+Barnebas/M
+Barnes
+Barnett/M
+Barney/M
+barnful
+barn/GDSM
+Barnhard/M
+Barnie/M
+Barn/M
+barnsful
+barnstorm/DRGZS
+barnstormer/M
+Barnum/M
+barnyard/MS
+Barny/M
+Baroda/M
+barometer/MS
+barometric
+barometrically
+baronage/MS
+baroness/MS
+baronetcy/SM
+baronet/MS
+baronial
+Baron/M
+baron/SM
+barony/SM
+baroque/SPMY
+barque's
+Barquisimeto/M
+barracker/M
+barrack/SDRG
+barracuda/MS
+barrage/MGSD
+Barranquilla/M
+barred/ECU
+barre/GMDSJ
+barrel/SGMD
+barrenness/SM
+barren/SPRT
+Barrera/M
+Barret/M
+barrette/SM
+Barrett/M
+barricade/SDMG
+Barrie/M
+barrier/MS
+barring/R
+barrio/SM
+Barri/SM
+barrister/MS
+Barr/M
+Barron/M
+barroom/SM
+barrow/MS
+Barry/M
+Barrymore/MS
+bars/ECU
+barstool/SM
+Barstow/M
+Bartel/M
+bartender/M
+bartend/ZR
+barterer/M
+barter/SRDZG
+bar/TGMDRS
+Barthel/M
+Barth/M
+Bartholdi/M
+Bartholemy/M
+Bartholomeo/M
+Bartholomeus/M
+Bartholomew/M
+Bartie/M
+Bartlet/M
+Bartlett/M
+Bart/M
+Bartk/M
+Bartolemo/M
+Bartolomeo/M
+Barton/M
+Bartram/M
+Barty/M
+barycenter
+barycentre's
+barycentric
+Bary/M
+baryon/SM
+Baryram/M
+Baryshnikov/M
+basaltic
+basalt/SM
+basal/Y
+Bascom/M
+bas/DRSTG
+baseball/MS
+baseband
+baseboard/MS
+base/CGRSDL
+baseless
+baseline/SM
+Basel/M
+basely
+Base/M
+baseman/M
+basemen
+basement/CSM
+baseness/MS
+baseplate/M
+base's
+basetting
+bashfulness/MS
+bashful/PY
+bash/JGDSR
+Basho/M
+Basia/M
+BASIC
+basically
+basic/S
+Basie/M
+basilar
+Basile/M
+basilica/SM
+Basilio/M
+basilisk/SM
+Basilius/M
+Basil/M
+basil/MS
+basin/DMS
+basinful/S
+basis/M
+basketball/MS
+basketry/MS
+basket/SM
+basketwork/SM
+bask/GSD
+basophilic
+Basque/SM
+Basra/M
+Basseterre/M
+basset/GMDS
+Bassett/M
+bassinet/SM
+bassist/MS
+Bass/M
+basso/MS
+bassoonist/MS
+bassoon/MS
+bass/SM
+basswood/SM
+bastardization/MS
+bastardized/U
+bastardize/SDG
+bastard/MYS
+bastardy/MS
+baste/NXS
+baster/M
+Bastian/M
+Bastien/M
+Bastille/M
+basting/M
+bastion/DM
+bast/SGZMDR
+Basutoland/M
+Bataan/M
+Batavia/M
+batch/MRSDG
+bated/U
+bate/KGSADC
+bater/AC
+Bates
+bathe
+bather/M
+bathetic
+bathhouse/SM
+bath/JMDSRGZ
+bathmat/S
+Batholomew/M
+bathos/SM
+bathrobe/MS
+bathroom/SDM
+baths
+Bathsheba/M
+bathtub/MS
+bathwater
+bathyscaphe's
+bathysphere/MS
+batik/DMSG
+Batista/M
+batiste/SM
+Bat/M
+batman/M
+Batman/M
+batmen
+baton/SM
+Batsheva/M
+batsman/M
+bat/SMDRG
+batsmen
+battalion/MS
+batted
+batten/SDMG
+batter/SRDZG
+battery/MS
+batting/MS
+battledore/MS
+battledress
+battlefield/SM
+battlefront/SM
+battle/GMZRSDL
+battleground/SM
+Battle/M
+battlement/SMD
+battler/M
+battleship/MS
+batty/RT
+Batu/M
+batwings
+bauble/SM
+Baudelaire/M
+baud/M
+Baudoin/M
+Baudouin/M
+Bauer/M
+Bauhaus/M
+baulk/GSDM
+Bausch/M
+bauxite/SM
+Bavaria/M
+Bavarian/S
+bawdily
+bawdiness/MS
+bawd/SM
+bawdy/PRST
+bawler/M
+bawl/SGDR
+Baxie/M
+Bax/M
+Baxter/M
+Baxy/M
+Bayamon
+Bayard/M
+bayberry/MS
+Bayda/M
+Bayer/M
+Bayes
+Bayesian
+bay/GSMDY
+Baylor/M
+Bay/MR
+bayonet/SGMD
+Bayonne/M
+bayou/MS
+Bayreuth/M
+bazaar/MS
+bazillion/S
+bazooka/MS
+BB
+BBB
+BBC
+bbl
+BBQ
+BBS
+BC
+BCD
+bdrm
+beachcomber/SM
+beachhead/SM
+Beach/M
+beach/MSDG
+beachwear/M
+beacon/DMSG
+beading/M
+Beadle/M
+beadle/SM
+bead/SJGMD
+beadsman/M
+beadworker
+beady/TR
+beagle/SDGM
+beaker/M
+beak/ZSDRM
+Beale/M
+Bealle/M
+Bea/M
+beam/MDRSGZ
+beanbag/SM
+bean/DRMGZS
+beanie/SM
+Bean/M
+beanpole/MS
+beanstalk/SM
+bearable/U
+bearably/U
+beard/DSGM
+bearded/P
+beardless
+Beard/M
+Beardmore/M
+Beardsley/M
+bearer/M
+bearing/M
+bearishness/SM
+bearish/PY
+bearlike
+Bear/M
+Bearnaise/M
+Bearnard/M
+bearskin/MS
+bear/ZBRSJG
+Beasley/M
+beasties
+beastings/M
+beastliness/MS
+beastly/PTR
+beast/SJMY
+beatable/U
+beatably/U
+beaten/U
+beater/M
+beatific
+beatifically
+beatification/M
+beatify/GNXDS
+beating/M
+beatitude/MS
+Beatlemania/M
+Beatles/M
+beatnik/SM
+beat/NRGSBZJ
+Beatrice/M
+Beatrisa/M
+Beatrix/M
+Beatriz/M
+Beauchamps
+Beaufort/M
+Beaujolais/M
+Beau/M
+Beaumarchais/M
+Beaumont/M
+beau/MS
+Beauregard/M
+beauteousness/M
+beauteous/YP
+beautician/MS
+beautification/M
+beautifier/M
+beautifully/U
+beautifulness/M
+beautiful/PTYR
+beautify/SRDNGXZ
+beaut/SM
+beauty/SM
+Beauvoir/M
+beaux's
+beaver/DMSG
+Beaverton/M
+Bebe/M
+bebop/MS
+becalm/GDS
+became
+because
+Becca/M
+Bechtel/M
+Becka/M
+Becker/M
+Becket/M
+Beckett/M
+beck/GSDM
+Beckie/M
+Becki/M
+beckon/SDG
+Beck/RM
+Becky/M
+becloud/SGD
+become/GJS
+becoming/UY
+Becquerel/M
+bedaub/GDS
+bedazzle/GLDS
+bedazzlement/SM
+bedbug/SM
+bedchamber/M
+bedclothes
+bedded
+bedder/MS
+bedding/MS
+bedeck/DGS
+Bede/M
+bedevil/DGLS
+bedevilment/SM
+bedfast
+bedfellow/MS
+Bedford/M
+bedimmed
+bedimming
+bedim/S
+bedizen/DGS
+bedlam/MS
+bedlinen
+bedmaker/SM
+bedmate/MS
+bed/MS
+Bedouin/SM
+bedpan/SM
+bedpost/SM
+bedraggle/GSD
+bedridden
+bedrock/SM
+bedroll/SM
+bedroom/DMS
+bedsheets
+bedside/MS
+bedsit
+bedsitter/M
+bedsore/MS
+bedspread/SM
+bedspring/SM
+bedstead/SM
+bedstraw/M
+bedtime/SM
+Beebe/M
+beebread/MS
+Beecher/M
+beech/MRSN
+beechnut/MS
+beechwood
+beefburger/SM
+beefcake/MS
+beef/GZSDRM
+beefiness/MS
+beefsteak/MS
+beefy/TRP
+beehive/MS
+beekeeper/MS
+beekeeping/SM
+beeline/MGSD
+Beelzebub/M
+Bee/M
+bee/MZGJRS
+been/S
+beeper/M
+beep/GZSMDR
+Beerbohm/M
+beer/M
+beermat/S
+beery/TR
+beeswax/DSMG
+Beethoven/M
+beetle/GMRSD
+Beeton/M
+beetroot/M
+beet/SM
+beeves/M
+befall/SGN
+befell
+befit/SM
+befitted
+befitting/Y
+befogged
+befogging
+befog/S
+before
+beforehand
+befoul/GSD
+befriend/DGS
+befuddle/GLDS
+befuddlement/SM
+began
+beget/S
+begetting
+beggar/DYMSG
+beggarliness/M
+beggarly/P
+beggary/MS
+begged
+begging
+Begin/M
+beginner/MS
+beginning/MS
+begin/S
+begone/S
+begonia/SM
+begot
+begotten
+begrime/SDG
+begrudge/GDRS
+begrudging/Y
+beg/S
+beguilement/SM
+beguiler/M
+beguile/RSDLZG
+beguiling/Y
+beguine/SM
+begum/MS
+begun
+behalf/M
+behalves
+Behan/M
+behave/GRSD
+behavioral/Y
+behaviorism/MS
+behavioristic/S
+behaviorist/S
+behavior/SMD
+behead/GSD
+beheld
+behemoth/M
+behemoths
+behest/SM
+behindhand
+behind/S
+beholder/M
+behold/ZGRNS
+behoofs
+behoove/SDJMG
+behooving/YM
+Behring/M
+Beiderbecke/M
+beige/MS
+Beijing
+Beilul/M
+being/M
+Beirut/M
+Beitris/M
+bejewel/SDG
+Bekesy/M
+Bekki/M
+be/KS
+belabor/MDSG
+Bela/M
+Belarus
+belate/D
+belatedness/M
+belated/PY
+Belau/M
+belay/GSD
+belch/GSD
+beleaguer/GDS
+Belem/M
+Belfast/M
+belfry/SM
+Belgian/MS
+Belgium/M
+Belg/M
+Belgrade/M
+Belia/M
+Belicia/M
+belie
+belief/ESUM
+belier/M
+believability's
+believability/U
+believable/U
+believably/U
+believed/U
+believe/EZGDRS
+believer/MUSE
+believing/U
+Belinda/M
+Belita/M
+belittlement/MS
+belittler/M
+belittle/RSDGL
+Belize/M
+belladonna/MS
+Bella/M
+Bellamy/M
+Bellanca/M
+Bellatrix/M
+bellboy/MS
+belled/A
+Belle/M
+belle/MS
+belletristic
+belletrist/SM
+Belleville/M
+bellflower/M
+bell/GSMD
+bellhop/MS
+bellicoseness/M
+bellicose/YP
+bellicosity/MS
+belligerence/SM
+belligerency/MS
+belligerent/SMY
+Bellina/M
+belling/A
+Bellini/M
+Bell/M
+bellman/M
+bellmen
+Bellovin/M
+bellow/DGS
+Bellow/M
+bellows/M
+bells/A
+bellwether/MS
+Bellwood/M
+bellyacher/M
+bellyache/SRDGM
+bellybutton/MS
+bellyfull
+bellyful/MS
+belly/SDGM
+Bel/M
+Belmont/M
+Belmopan/M
+Beloit/M
+belong/DGJS
+belonging/MP
+Belorussian/S
+Belorussia's
+belove/D
+beloved/S
+below/S
+Belshazzar/M
+belted/U
+belt/GSMD
+belting/M
+Belton/M
+Beltran/M
+Beltsville/M
+beltway/SM
+beluga/SM
+Belushi/M
+Belva/M
+belvedere/M
+Belvia/M
+bely/DSRG
+beman
+Be/MH
+bemire/SDG
+bemoan/GDS
+bemused/Y
+bemuse/GSDL
+bemusement/SM
+Benacerraf/M
+Benares's
+bencher/M
+benchmark/GDMS
+bench/MRSDG
+bend/BUSG
+bended
+Bender/M
+bender/MS
+Bendick/M
+Bendicty/M
+Bendite/M
+Bendix/M
+beneath
+Benedetta/M
+Benedetto/M
+Benedick/M
+Benedicta/M
+Benedictine/MS
+benediction/MS
+Benedict/M
+Benedicto/M
+benedictory
+Benedikta/M
+Benedikt/M
+benefaction/MS
+benefactor/MS
+benefactress/S
+benefice/MGSD
+beneficence/SM
+beneficent/Y
+beneficialness/M
+beneficial/PY
+beneficiary/MS
+benefiter/M
+benefit/SRDMZG
+Benelux/M
+Benet/M
+Benetta/M
+Benetton/M
+benevolence/SM
+benevolentness/M
+benevolent/YP
+Bengali/M
+Bengal/SM
+Benghazi/M
+Bengt/M
+Beniamino/M
+benightedness/M
+benighted/YP
+benignant
+benignity/MS
+benign/Y
+Beninese
+Benin/M
+Benita/M
+Benito/M
+Benjamen/M
+Benjamin/M
+Benjie/M
+Benji/M
+Benjy/M
+Ben/M
+Bennett/M
+Bennie/M
+Benni/M
+Bennington/M
+Benn/M
+Benny/M
+Benoite/M
+Benoit/M
+Benson/M
+Bentham/M
+Bentlee/M
+Bentley/MS
+Bent/M
+Benton/M
+bents
+bent/U
+bentwood/SM
+benumb/SGD
+Benyamin/M
+Benzedrine/M
+benzene/MS
+benzine/SM
+Benz/M
+Beograd's
+Beowulf/M
+bequeath/GSD
+bequeaths
+bequest/MS
+berate/GSD
+Berber/MS
+bereave/GLSD
+bereavement/MS
+bereft
+Berenice/M
+Beret/M
+beret/SM
+Bergen/M
+Bergerac/M
+Berger/M
+Berget/M
+Berglund/M
+Bergman/M
+Berg/NRM
+berg/NRSM
+Bergson/M
+Bergsten/M
+Bergstrom/M
+beribbon/D
+beriberi/SM
+Beringer/M
+Bering/RM
+Berkeley/M
+berkelium/SM
+Berke/M
+Berkie/M
+Berkley/M
+Berkly/M
+Berkowitz/M
+Berkshire/SM
+Berky/M
+Berk/YM
+Berle/M
+Berliner/M
+Berlin/SZRM
+Berlioz/M
+Berlitz/M
+Berman/M
+Ber/MG
+berm/SM
+Bermuda/MS
+Bermudan/S
+Bermudian/S
+Bernadene/M
+Bernadette/M
+Bernadina/M
+Bernadine/M
+Berna/M
+Bernardina/M
+Bernardine/M
+Bernardino/M
+Bernard/M
+Bernardo/M
+Bernarr/M
+Bernays/M
+Bernbach/M
+Bernelle/M
+Berne's
+Bernese
+Bernete/M
+Bernetta/M
+Bernette/M
+Bernhard/M
+Bernhardt/M
+Bernice/M
+Berniece/M
+Bernie/M
+Berni/M
+Bernini/M
+Bernita/M
+Bern/M
+Bernoulli/M
+Bernstein/M
+Berny/M
+Berra/M
+Berrie/M
+Berri/M
+berrylike
+Berry/M
+berry/SDMG
+berserker/M
+berserk/SR
+Berta/M
+Berte/M
+Bertha/M
+Berthe/M
+berth/MDGJ
+berths
+Bertie/M
+Bertillon/M
+Berti/M
+Bertina/M
+Bertine/M
+Bert/M
+Berton/M
+Bertram/M
+Bertrand/M
+Bertrando/M
+Berty/M
+Beryle/M
+beryllium/MS
+Beryl/M
+beryl/SM
+Berzelius/M
+bes
+beseecher/M
+beseeching/Y
+beseech/RSJZG
+beseem/GDS
+beset/S
+besetting
+beside/S
+besieger/M
+besiege/SRDZG
+besmear/GSD
+besmirch/GSD
+besom/GMDS
+besot/S
+besotted
+besotting
+besought
+bespangle/GSD
+bespatter/SGD
+bespeak/SG
+bespectacled
+bespoke
+bespoken
+Bess
+Bessel/M
+Bessemer/M
+Bessie/M
+Bessy/M
+best/DRSG
+bestiality/MS
+bestial/Y
+bestiary/MS
+bestirred
+bestirring
+bestir/S
+Best/M
+bestowal/SM
+bestow/SGD
+bestrew/DGS
+bestrewn
+bestridden
+bestride/SG
+bestrode
+bestseller/MS
+bestselling
+bestubble/D
+betaken
+betake/SG
+beta/SM
+betatron/M
+betcha
+Betelgeuse/M
+betel/MS
+Bethanne/M
+Bethany/M
+bethel/M
+Bethe/M
+Bethena/M
+Bethesda/M
+Bethina/M
+bethink/GS
+Bethlehem/M
+beth/M
+Beth/M
+bethought
+Bethune
+betide/GSD
+betimes
+bet/MS
+betoken/GSD
+betook
+betrayal/SM
+betrayer/M
+betray/SRDZG
+betrothal/SM
+betrothed/U
+betroth/GD
+betroths
+Betsey/M
+Betsy/M
+Betta/M
+Betteanne/M
+Betteann/M
+Bette/M
+betterment/MS
+better/SDLG
+Bettie/M
+Betti/M
+Bettina/M
+Bettine/M
+betting
+bettor/SM
+Bettye/M
+Betty/SM
+betweenness/M
+between/SP
+betwixt
+Beulah/M
+Bevan/M
+bevel/SJGMRD
+beverage/MS
+Beverie/M
+Beverlee/M
+Beverley/M
+Beverlie/M
+Beverly/M
+Bevin/M
+Bevon/M
+Bev's
+Bevvy/M
+bevy/SM
+bewail/GDS
+beware/GSD
+bewhisker/D
+bewigged
+bewildered/PY
+bewildering/Y
+bewilder/LDSG
+bewilderment/SM
+bewitching/Y
+bewitch/LGDS
+bewitchment/SM
+bey/MS
+beyond/S
+bezel/MS
+bf
+B/GT
+Bhopal/M
+Bhutanese
+Bhutan/M
+Bhutto/M
+Bialystok/M
+Bianca/M
+Bianco/M
+Bianka/M
+biannual/Y
+bias/DSMPG
+biased/U
+biathlon/MS
+biaxial/Y
+bibbed
+Bibbie/M
+bibbing
+Bibbye/M
+Bibby/M
+Bibi/M
+bible/MS
+Bible/MS
+biblical/Y
+biblicists
+bibliographer/MS
+bibliographical/Y
+bibliographic/S
+bibliography/MS
+bibliophile/MS
+Bib/M
+bib/MS
+bibulous
+bicameral
+bicameralism/MS
+bicarb/MS
+bicarbonate/MS
+bicentenary/S
+bicentennial/S
+bicep/S
+biceps/M
+bichromate/DM
+bickerer/M
+bickering/M
+bicker/SRDZG
+biconcave
+biconnected
+biconvex
+bicuspid/S
+bicycler/M
+bicycle/RSDMZG
+bicyclist/SM
+biddable
+bidden/U
+bidder/MS
+Biddie/M
+bidding/MS
+Biddle/M
+Biddy/M
+biddy/SM
+bider/M
+bide/S
+bidet/SM
+Bidget/M
+bid/GMRS
+bidiagonal
+bidirectional/Y
+bids/A
+biennial/SY
+biennium/SM
+Bienville/M
+Bierce/M
+bier/M
+bifocal/S
+bifurcate/SDXGNY
+bifurcation/M
+bigamist/SM
+bigamous
+bigamy/SM
+Bigelow/M
+Bigfoot
+bigged
+bigger
+biggest
+biggie/SM
+bigging
+biggish
+bighead/MS
+bigheartedness/S
+bighearted/P
+bighorn/MS
+bight/SMDG
+bigmouth/M
+bigmouths
+bigness/SM
+bigoted/Y
+bigot/MDSG
+bigotry/MS
+big/PYS
+bigwig/MS
+biharmonic
+bijection/MS
+bijective/Y
+bijou/M
+bijoux
+bike/MZGDRS
+biker/M
+bikini/SMD
+Biko/M
+bilabial/S
+bilateralness/M
+bilateral/PY
+bilayer/S
+Bilbao/M
+bilberry/MS
+Bilbo/M
+bile/SM
+bilge/GMDS
+biliary
+Bili/M
+bilinear
+bilingualism/SM
+bilingual/SY
+biliousness/SM
+bilious/P
+bilker/M
+bilk/GZSDR
+billboard/MDGS
+biller/M
+billet/MDGS
+billfold/MS
+billiard/SM
+Billie/M
+Billi/M
+billing/M
+billingsgate/SM
+Billings/M
+billionaire/MS
+billion/SHM
+billionths
+bill/JGZSBMDR
+Bill/JM
+billow/DMGS
+billowy/RT
+billposters
+Billye/M
+Billy/M
+billy/SM
+Bil/MY
+bi/M
+Bi/M
+bimbo/MS
+bimetallic/S
+bimetallism/MS
+Bimini/M
+bimodal
+bimolecular/Y
+bimonthly/S
+binary/S
+binaural/Y
+binder/M
+bindery/MS
+binding/MPY
+bindingness/M
+bind/JDRGZS
+bindle/M
+binds/AU
+bindweed/MS
+binge/MS
+bing/GNDM
+Bingham/M
+Binghamton/M
+Bing/M
+bingo/MS
+Bini/M
+Bink/M
+Binky/M
+binnacle/MS
+binned
+Binnie/M
+Binni/M
+binning
+Binny/M
+binocular/SY
+binodal
+binomial/SYM
+bin/SM
+binuclear
+biochemical/SY
+biochemist/MS
+biochemistry/MS
+biodegradability/S
+biodegradable
+biodiversity/S
+bioengineering/M
+bioethics
+biofeedback/SM
+biographer/M
+biographic
+biographical/Y
+biograph/RZ
+biography/MS
+biog/S
+Bioko/M
+biol
+biological/SY
+biologic/S
+biologist/SM
+biology/MS
+biomass/SM
+biomedical
+biomedicine/M
+biometric/S
+biometrics/M
+biometry/M
+biomolecule/S
+biomorph
+bionically
+bionic/S
+bionics/M
+biophysical/Y
+biophysicist/SM
+biophysic/S
+biophysics/M
+biopic/S
+biopsy/SDGM
+biorhythm/S
+BIOS
+bioscience/S
+biosphere/MS
+biostatistic/S
+biosynthesized
+biotechnological
+biotechnologist
+biotechnology/SM
+biotic
+biotin/SM
+bipartisan
+bipartisanship/MS
+bipartite/YN
+bipartition/M
+bipedal
+biped/MS
+biplane/MS
+bipolar
+bipolarity/MS
+biracial
+Birch/M
+birch/MRSDNG
+birdbath/M
+birdbaths
+birdbrain/SDM
+birdcage/SM
+birder/M
+birdhouse/MS
+birdieing
+Birdie/M
+birdie/MSD
+birdlike
+birdlime/MGDS
+Bird/M
+birdseed/MS
+Birdseye/M
+bird/SMDRGZ
+birdsong
+birdtables
+birdwatch/GZR
+birefringence/M
+birefringent
+biretta/SM
+Birgit/M
+Birgitta/M
+Birkenstock/M
+Birk/M
+Birmingham/M
+Biro/M
+Biron/M
+birthday/SM
+birthmark/MS
+birth/MDG
+birthplace/SM
+birthrate/MS
+birthright/MS
+birth's/A
+births/A
+birthstone/SM
+bis
+Biscay/M
+Biscayne/M
+biscuit/MS
+bisect/DSG
+bisection/MS
+bisector/MS
+biserial
+bisexuality/MS
+bisexual/YMS
+Bishkek
+bishop/DGSM
+Bishop/M
+bishopric/SM
+Bismarck/M
+Bismark/M
+bismuth/M
+bismuths
+bison/M
+bisque/SM
+Bissau/M
+bistable
+bistate
+bistro/SM
+bisyllabic
+bitblt/S
+bitchily
+bitchiness/MS
+bitch/MSDG
+bitchy/PTR
+biter/M
+bite/S
+biting/Y
+bitmap/SM
+bit/MRJSZG
+BITNET/M
+bit's/C
+bits/C
+bitser/M
+bitted
+bitten
+bitterness/SM
+bittern/SM
+bitternut/M
+bitter/PSRDYTG
+bitterroot/M
+bittersweet/YMSP
+bitting
+bitty/PRT
+bitumen/MS
+bituminous
+bitwise
+bivalent/S
+bivalve/MSD
+bivariate
+bivouacked
+bivouacking
+bivouac/MS
+biweekly/S
+biyearly
+bizarreness/M
+bizarre/YSP
+Bizet/M
+biz/M
+bizzes
+Bjorn/M
+bk
+b/KGD
+Bk/M
+blabbed
+blabber/GMDS
+blabbermouth/M
+blabbermouths
+blabbing
+blab/S
+blackamoor/SM
+blackball/SDMG
+blackberry/GMS
+blackbirder/M
+blackbird/SGDRM
+blackboard/SM
+blackbody/S
+Blackburn/M
+blackcurrant/M
+blackener/M
+blacken/GDR
+Blackfeet
+Blackfoot/M
+blackguard/MDSG
+blackhead/SM
+blacking/M
+blackish
+blackjack/SGMD
+blackleg/M
+blacklist/DRMSG
+blackmail/DRMGZS
+blackmailer/M
+Blackman/M
+Blackmer/M
+blackness/MS
+blackout/SM
+Blackpool/M
+Black's
+black/SJTXPYRDNG
+blacksmith/MG
+blacksmiths
+blacksnake/MS
+blackspot
+Blackstone/M
+blackthorn/MS
+blacktop/MS
+blacktopped
+blacktopping
+Blackwell/MS
+bladder/MS
+bladdernut/M
+bladderwort/M
+blade/DSGM
+blah/MDG
+blahs
+Blaine/M
+Blaire/M
+Blair/M
+Blakelee/M
+Blakeley/M
+Blake/M
+Blakey/M
+blame/DSRBGMZ
+blamelessness/SM
+blameless/YP
+blamer/M
+blameworthiness/SM
+blameworthy/P
+Blanca/M
+Blancha/M
+Blanchard/M
+blanch/DRSG
+Blanche/M
+blancher/M
+Blanch/M
+blanc/M
+blancmange/SM
+blandishment/MS
+blandish/SDGL
+blandness/MS
+bland/PYRT
+Blane/M
+Blankenship/M
+blanketing/M
+blanket/SDRMZG
+blankness/MS
+blank/SPGTYRD
+Blanton/M
+Blantyre/M
+blare/DSG
+blarney/DMGS
+blas
+blasphemer/M
+blaspheme/RSDZG
+blasphemousness/M
+blasphemous/PY
+blasphemy/SM
+blaster/M
+blasting/M
+blastoff/SM
+blast/SMRDGZ
+blatancy/SM
+blatant/YP
+blather/DRGS
+blatting
+Blatz/M
+Blavatsky/M
+Blayne/M
+blaze/DSRGMZ
+blazer/M
+blazing/Y
+blazoner/M
+blazon/SGDR
+bl/D
+bldg
+bleach/DRSZG
+bleached/U
+bleacher/M
+bleakness/MS
+bleak/TPYRS
+blear/GDS
+blearily
+bleariness/SM
+bleary/PRT
+bleater/M
+bleat/RDGS
+bleeder/M
+bleed/ZRJSG
+Bleeker/M
+bleep/GMRDZS
+blemish/DSMG
+blemished/U
+blench/DSG
+blender/M
+blend/GZRDS
+Blenheim/M
+blessedness/MS
+blessed/PRYT
+blessing/M
+bless/JGSD
+Blevins/M
+blew
+Bligh/M
+blighter/M
+blight/GSMDR
+blimey/S
+blimp/MS
+blinded/U
+blinder/M
+blindfold/SDG
+blinding/MY
+blind/JGTZPYRDS
+blindness/MS
+blindside/SDG
+blinker/MDG
+blinking/U
+blink/RDGSZ
+blinks/M
+Blinnie/M
+Blinni/M
+Blinny/M
+blintze/M
+blintz/SM
+blip/MS
+blipped
+blipping
+Blisse/M
+blissfulness/MS
+blissful/PY
+Bliss/M
+bliss/SDMG
+blistering/Y
+blister/SMDG
+blistery
+Blithe/M
+blitheness/SM
+blither/G
+blithesome
+blithe/TYPR
+blitz/GSDM
+blitzkrieg/SM
+blizzard/MS
+bloater/M
+bloat/SRDGZ
+blobbed
+blobbing
+blob/MS
+Bloch/M
+blockader/M
+blockade/ZMGRSD
+blockage/MS
+blockbuster/SM
+blockbusting/MS
+blocker/MS
+blockhead/MS
+blockhouse/SM
+block's
+block/USDG
+blocky/R
+bloc/MS
+Bloemfontein/M
+bloke/SM
+Blomberg/M
+Blomquist/M
+Blondelle/M
+Blondell/M
+blonde's
+Blondie/M
+blondish
+blondness/MS
+blond/SPMRT
+Blondy/M
+bloodbath
+bloodbaths
+bloodcurdling
+bloodhound/SM
+bloodied/U
+bloodiness/MS
+bloodlessness/SM
+bloodless/PY
+bloodletting/MS
+bloodline/SM
+bloodmobile/MS
+bloodroot/M
+bloodshed/SM
+bloodshot
+blood/SMDG
+bloodsport/S
+bloodstain/MDS
+bloodstock/SM
+bloodstone/M
+bloodstream/SM
+bloodsucker/SM
+bloodsucking/S
+bloodthirstily
+bloodthirstiness/MS
+bloodthirsty/RTP
+bloodworm/M
+bloodymindedness
+bloody/TPGDRS
+bloomer/M
+Bloomer/M
+Bloomfield/M
+Bloomington/M
+Bloom/MR
+bloom/SMRDGZ
+blooper/M
+bloop/GSZRD
+blossom/DMGS
+blossomy
+blotch/GMDS
+blotchy/RT
+blot/MS
+blotted
+blotter/MS
+blotting
+blotto
+blouse/GMSD
+blower/M
+blowfish/M
+blowfly/MS
+blowgun/SM
+blow/GZRS
+blowing/M
+blown/U
+blowout/MS
+blowpipe/SM
+blowtorch/SM
+blowup/MS
+blowy/RST
+blowzy/RT
+BLT
+blubber/GSDR
+blubbery
+Blucher/M
+bludgeon/GSMD
+blueback
+Bluebeard/M
+bluebell/MS
+blueberry/SM
+bluebill/M
+bluebird/MS
+bluebonnet/SM
+bluebook/M
+bluebottle/MS
+bluebush
+bluefish/SM
+bluegill/SM
+bluegrass/MS
+blueing's
+blueish
+bluejacket/MS
+bluejeans
+blue/JMYTGDRSP
+blueness/MS
+bluenose/MS
+bluepoint/SM
+blueprint/GDMS
+bluer/M
+bluest/M
+bluestocking/SM
+bluesy/TR
+bluet/MS
+bluffer/M
+bluffness/MS
+bluff/SPGTZYRD
+bluing/M
+bluishness/M
+bluish/P
+Blumenthal/M
+Blum/M
+blunderbuss/MS
+blunderer/M
+blunder/GSMDRJZ
+blundering/Y
+bluntness/MS
+blunt/PSGTYRD
+blurb/GSDM
+blur/MS
+blurred/Y
+blurriness/S
+blurring/Y
+blurry/RPT
+blurt/GSRD
+blusher/M
+blushing/UY
+blush/RSDGZ
+blusterer/M
+blustering/Y
+blusterous
+bluster/SDRZG
+blustery
+blvd
+Blvd
+Blythe/M
+BM
+BMW/M
+BO
+boarded
+boarder/SM
+boardgames
+boardinghouse/SM
+boarding/SM
+board/IS
+boardroom/MS
+board's
+boardwalk/SM
+boar/MS
+boa/SM
+boaster/M
+boastfulness/MS
+boastful/YP
+boast/SJRDGZ
+boatclubs
+boater/M
+boathouse/SM
+boating/M
+boatload/SM
+boatman/M
+boat/MDRGZJS
+boatmen
+boatswain/SM
+boatyard/SM
+bobbed
+Bobbee/M
+Bobbe/M
+Bobbette/M
+Bobbie/M
+Bobbi/M
+bobbing/M
+bobbin/MS
+Bobbitt/M
+bobble/SDGM
+Bobbsey/M
+Bobbye/M
+Bobby/M
+bobby/SM
+bobbysoxer's
+bobcat/MS
+Bobette/M
+Bobina/M
+Bobine/M
+Bobinette/M
+Bob/M
+bobolink/SM
+Bobrow/M
+bobsledded
+bobsledder/MS
+bobsledding/M
+bobsled/MS
+bobsleigh/M
+bobsleighs
+bobs/M
+bob/SM
+bobtail/SGDM
+bobwhite/SM
+Boca/M
+Boccaccio/M
+boccie/SM
+bock/GDS
+bockwurst
+bodega/MS
+Bodenheim/M
+bode/S
+Bodhidharma/M
+bodhisattva
+Bodhisattva/M
+bodice/SM
+bodied/M
+bodiless
+bodily
+boding/M
+bodkin/SM
+bod/SGMD
+bodybuilder/SM
+bodybuilding/S
+body/DSMG
+bodyguard/MS
+bodying/M
+bodysuit/S
+bodyweight
+bodywork/SM
+Boeing/M
+Boeotia/M
+Boeotian
+Boer/M
+Bogartian/M
+Bogart/M
+Bogey/M
+bogeyman/M
+bogeymen
+bogey/SGMD
+bogged
+bogging
+boggle/SDG
+boggling/Y
+boggy/RT
+bogie's
+bog/MS
+Bogot/M
+bogus
+bogyman
+bogymen
+bogy's
+Boheme/M
+bohemianism/S
+bohemian/S
+Bohemian/SM
+Bohemia/SM
+Bohr/M
+Boigie/M
+boiled/AU
+boiler/M
+boilermaker/MS
+boilerplate/SM
+boil/JSGZDR
+boils/A
+Boise/M
+Bois/M
+boisterousness/MS
+boisterous/YP
+bola/SM
+boldface/SDMG
+boldness/MS
+bold/YRPST
+bole/MS
+bolero/MS
+Boleyn/M
+bolivares
+Bolivar/M
+bolivar/MS
+Bolivia/M
+Bolivian/S
+bollard/SM
+bollix/GSD
+boll/MDSG
+Bologna/M
+bologna/MS
+bolometer/MS
+bolo/MS
+boloney's
+Bolshevik/MS
+Bolshevism/MS
+Bolshevistic/M
+Bolshevist/MS
+Bolshoi/M
+bolsterer/M
+bolster/SRDG
+bolted/U
+bolter/M
+bolt/MDRGS
+Bolton/M
+bolts/U
+Boltzmann/M
+bolus/SM
+bombardier/MS
+bombard/LDSG
+bombardment/SM
+bombastic
+bombastically
+bombast/RMS
+Bombay/M
+bomber/M
+bombproof
+bomb/SGZDRJ
+bombshell/SM
+Bo/MRZ
+bona
+bonanza/MS
+Bonaparte/M
+Bonaventure/M
+bonbon/SM
+bondage/SM
+bonder/M
+bondholder/SM
+Bondie/M
+bond/JMDRSGZ
+Bond/M
+bondman/M
+bondmen
+Bondon/M
+bonds/A
+bondsman/M
+bondsmen
+bondwoman/M
+bondwomen
+Bondy/M
+boned/U
+bonehead/SDM
+boneless
+Bone/M
+bone/MZDRSG
+boner/M
+bonfire/MS
+bong/GDMS
+bongo/MS
+Bonham/M
+bonhomie/MS
+Boniface/M
+boniness/MS
+Bonita/M
+bonito/MS
+bonjour
+bonkers
+Bonnee/M
+Bonner/M
+bonneted/U
+bonnet/SGMD
+Bonneville/M
+Bonnibelle/M
+bonnie
+Bonnie/M
+Bonni/M
+Bonn/RM
+Bonny/M
+bonny/RT
+bonsai/SM
+Bontempo/M
+bonus/SM
+bony/RTP
+bonzes
+boob/DMSG
+booby/SM
+boodle/GMSD
+boogeyman's
+boogieing
+boogie/SD
+boo/GSDH
+boohoo/GDS
+bookbinder/M
+bookbindery/SM
+bookbinding/M
+bookbind/JRGZ
+bookcase/MS
+booked/U
+bookend/SGD
+Booker/M
+book/GZDRMJSB
+bookie/SM
+booking/M
+bookishness/M
+bookish/PY
+bookkeeper/M
+bookkeep/GZJR
+bookkeeping/M
+booklet/MS
+bookmaker/MS
+bookmaking/MS
+bookmark/MDGS
+bookmobile/MS
+bookplate/SM
+bookseller/SM
+bookshelf/M
+bookshelves
+bookshop/MS
+bookstall/MS
+bookstore/SM
+bookwork/M
+bookworm/MS
+Boolean
+boolean/S
+Boole/M
+boom/DRGJS
+boomerang/MDSG
+boomer/M
+boomtown/S
+boondocks
+boondoggle/DRSGZ
+boondoggler/M
+Boone/M
+Boonie/M
+boonies
+boon/MS
+Boony/M
+boorishness/SM
+boorish/PY
+boor/MS
+boosterism
+booster/M
+boost/SGZMRD
+boot/AGDS
+bootblack/MS
+bootee/MS
+Boote/M
+Botes
+Boothe/M
+booth/M
+Booth/M
+booths
+bootie's
+bootlaces
+bootlegged/M
+bootlegger/SM
+bootlegging/M
+bootleg/S
+Bootle/M
+bootless
+Boot/M
+bootprints
+boot's
+bootstrapped
+bootstrapping
+bootstrap/SM
+booty/SM
+booze/DSRGMZ
+boozer/M
+boozy/TR
+bopped
+bopping
+bop/S
+borate/MSD
+borax/MS
+Bordeaux/M
+bordello/MS
+Borden/M
+borderer/M
+border/JRDMGS
+borderland/SM
+borderline/MS
+Bordie/M
+Bord/MN
+Bordon/M
+Bordy/M
+Borealis/M
+Boreas/M
+boredom/MS
+boreholes
+borer/M
+bore/ZGJDRS
+Borges
+Borgia/M
+Borg/M
+boric
+boring/YMP
+Boris
+Bork/M
+born/AIU
+Borneo/M
+borne/U
+Born/M
+Borodin/M
+boron/SM
+borosilicate/M
+borough/M
+boroughs
+Borroughs/M
+borrower/M
+borrowing/M
+borrow/JZRDGBS
+borscht/SM
+borstal/MS
+Boru/M
+borzoi/MS
+Bosch/M
+Bose/M
+bosh/MS
+Bosnia/M
+Bosnian/S
+bosom's
+bosom/SGUD
+bosomy/RT
+boson/SM
+Bosporus/M
+boss/DSRMG
+bossily
+bossiness/MS
+bossism/MS
+bossy/PTSR
+Bostitch/M
+Bostonian/SM
+Boston/MS
+bosun's
+Boswell/MS
+botanical/SY
+botanic/S
+botanist/SM
+botany/SM
+botcher/M
+botch/SRDGZ
+botfly/M
+bother/DG
+bothersome
+bothy/M
+both/ZR
+bot/S
+Botswana/M
+Botticelli/M
+bottle/GMZSRD
+bottleneck/GSDM
+bottler/M
+bottomlessness/M
+bottomless/YP
+bottommost
+bottom/SMRDG
+botulin/M
+botulinus/M
+botulism/SM
+Boucher/M
+boudoir/MS
+bouffant/S
+bougainvillea/SM
+bough/MD
+boughs
+bought/N
+bouillabaisse/MS
+bouillon/MS
+boulder/GMDS
+Boulder/M
+boulevard/MS
+bouncer/M
+bounce/SRDGZ
+bouncily
+bouncing/Y
+bouncy/TRP
+boundary/MS
+bound/AUDI
+boundedness/MU
+bounded/UP
+bounden
+bounder/AM
+bounders
+bounding
+boundlessness/SM
+boundless/YP
+bounds/IA
+bounteousness/MS
+bounteous/PY
+bountifulness/SM
+bountiful/PY
+bounty/SDM
+bouquet/SM
+Bourbaki/M
+bourbon/SM
+Bourbon/SM
+bourgeoisie/SM
+bourgeois/M
+Bourke/M
+Bourne/M
+Bournemouth/M
+boutique/MS
+bout/MS
+boutonnire/MS
+Bouvier
+Bovary/M
+bovine/YS
+Bowditch/M
+bowdlerization/MS
+bowdlerize/GRSD
+bowed/U
+bowel/GMDS
+Bowell/M
+Bowen/M
+bower/DMG
+Bowers
+Bowery/M
+Bowes
+bowie
+Bowie/M
+bowing/M
+bowlder's
+bowlegged
+bowleg/SM
+bowler/M
+bowlful/S
+bowl/GZSMDR
+bowline/MS
+bowling/M
+bowman/M
+Bowman/M
+bowmen
+bowser/M
+bowsprit/SM
+bows/R
+bowstring/GSMD
+bow/SZGNDR
+bowwow/DMGS
+boxcar/SM
+box/DRSJZGM
+boxer/M
+boxful/M
+boxing/M
+boxlike
+boxtops
+boxwood/SM
+boxy/TPR
+Boyce/M
+Boycey/M
+Boycie/M
+boycotter/M
+boycott/RDGS
+Boyd/M
+Boyer/M
+boyfriend/MS
+boyhood/SM
+boyishness/MS
+boyish/PY
+Boyle/M
+Boy/MR
+boy/MRS
+boyscout
+boysenberry/SM
+bozo/SM
+bpi
+bps
+BR
+brace/DSRJGM
+braced/U
+bracelet/MS
+bracer/M
+brachia
+brachium/M
+bracken/SM
+bracketed/U
+bracketing/M
+bracket/SGMD
+brackishness/SM
+brackish/P
+bract/SM
+Bradan/M
+bradawl/M
+Bradbury/M
+Bradburys
+bradded
+bradding
+Braddock/M
+Brade/M
+Braden/M
+Bradford/M
+Bradley/M
+Bradly/M
+Brad/MYN
+Bradney/M
+Bradshaw/M
+brad/SM
+Bradstreet/M
+Brady/M
+brae/SM
+braggadocio/SM
+braggart/SM
+bragged
+bragger/MS
+braggest
+bragging
+Bragg/M
+brag/S
+Brahe/M
+Brahma/MS
+Brahmanism/MS
+Brahman/SM
+Brahmaputra/M
+Brahmin's
+Brahms
+braider/M
+braiding/M
+braid/RDSJG
+braille/DSG
+Braille/GDSM
+Brainard/SM
+braincell/S
+brainchild/M
+brainchildren
+brain/GSDM
+braininess/MS
+brainlessness/M
+brainless/YP
+Brain/M
+brainpower/M
+brainstorm/DRMGJS
+brainstorming/M
+brainteaser/S
+brainteasing
+brainwasher/M
+brainwashing/M
+brainwash/JGRSD
+brainwave/S
+brainy/RPT
+braise/SDG
+brake/DSGM
+brakeman/M
+brakemen/M
+bramble/DSGM
+brambling/M
+brambly/RT
+Bram/M
+Brampton/M
+bra/MS
+Brana/M
+branched/U
+branching/M
+branchlike
+Branch/M
+branch/MDSJG
+Branchville/M
+Brandais/M
+Brandea/M
+branded/U
+Brandeis/M
+Brandel/M
+Brande/M
+Brandenburg/M
+Branden/M
+brander/GDM
+Brander/M
+Brandice/M
+Brandie/M
+Brandi/M
+Brandise/M
+brandish/GSD
+Brand/MRN
+Brando/M
+Brandon/M
+brand/SMRDGZ
+Brandt/M
+Brandtr/M
+brandy/GDSM
+Brandy/M
+Brandyn/M
+brandywine
+Braniff/M
+Bran/M
+branned
+branning
+Brannon/M
+bran/SM
+Brantley/M
+Brant/M
+Braque/M
+brashness/MS
+brash/PYSRT
+Brasilia
+brasserie/SM
+brass/GSDM
+brassiere/MS
+brassily
+brassiness/SM
+brassy/RSPT
+Bratislava/M
+brat/SM
+Brattain/M
+bratty/RT
+bratwurst/MS
+Braun/M
+bravadoes
+bravado/M
+brave/DSRGYTP
+braveness/MS
+bravery/MS
+bravest/M
+bravo/SDG
+bravura/SM
+brawler/M
+brawl/MRDSGZ
+brawniness/SM
+brawn/MS
+brawny/TRP
+brayer/M
+Bray/M
+bray/SDRG
+braze/GZDSR
+brazenness/MS
+brazen/PYDSG
+brazer/M
+brazier/SM
+Brazilian/MS
+Brazil/M
+Brazos/M
+Brazzaville/M
+breacher/M
+breach/MDRSGZ
+breadbasket/SM
+breadboard/SMDG
+breadbox/S
+breadcrumb/S
+breadfruit/MS
+breadline/MS
+bread/SMDHG
+breadth/M
+breadths
+breadwinner/MS
+breakables
+breakable/U
+breakage/MS
+breakaway/MS
+breakdown/MS
+breaker/M
+breakfaster/M
+breakfast/RDMGZS
+breakfront/S
+breaking/M
+breakneck
+breakout/MS
+breakpoint/SMDG
+break/SZRBG
+breakthroughs
+breakthrough/SM
+breakup/SM
+breakwater/SM
+bream/SDG
+Breanne/M
+Brear/M
+breastbone/MS
+breastfed
+breastfeed/G
+breasting/M
+breast/MDSG
+breastplate/SM
+breaststroke/SM
+breastwork/MS
+breathable/U
+breathalyser/S
+Breathalyzer/SM
+breathe
+breather/M
+breathing/M
+breathlessness/SM
+breathless/PY
+breaths
+breathtaking/Y
+breathy/TR
+breath/ZBJMDRSG
+Brecht/M
+Breckenridge/M
+bred/DG
+bredes
+breeching/M
+breech/MDSG
+breeder/I
+breeder's
+breeding/IM
+breeds/I
+breed/SZJRG
+Bree/M
+Breena/M
+breeze/GMSD
+breezeway/SM
+breezily
+breeziness/SM
+breezy/RPT
+Bremen/M
+bremsstrahlung/M
+Brena/M
+Brenda/M
+Brendan/M
+Brenden/M
+Brendin/M
+Brendis/M
+Brendon/M
+Bren/M
+Brenna/M
+Brennan/M
+Brennen/M
+Brenner/M
+Brenn/RNM
+Brent/M
+Brenton/M
+Bresenham/M
+Brest/M
+brethren
+Bret/M
+Breton
+Brett/M
+breve/SM
+brevet/MS
+brevetted
+brevetting
+breviary/SM
+brevity/MS
+brew/DRGZS
+brewer/M
+Brewer/M
+brewery/MS
+brewing/M
+brewpub/S
+Brew/RM
+Brewster/M
+Brezhnev/M
+Bria/M
+Briana/M
+Brian/M
+Brianna/M
+Brianne/M
+Briano/M
+Briant/M
+briar's
+bribe/GZDSR
+briber/M
+bribery/MS
+Brice/M
+brickbat/SM
+brick/GRDSM
+bricklayer/MS
+bricklaying/SM
+brickmason/S
+brickwork/SM
+brickyard/M
+bridal/S
+Bridalveil/M
+bridegroom/MS
+Bride/M
+bride/MS
+bridesmaid/MS
+Bridewell/M
+bridgeable/U
+bridged/U
+bridgehead/MS
+Bridgeport/M
+Bridger/M
+Bridges
+bridge/SDGM
+Bridget/M
+Bridgetown/M
+Bridgette/M
+Bridgett/M
+Bridgewater/M
+bridgework/MS
+bridging/M
+Bridgman/M
+Bridie/M
+bridled/U
+bridle/SDGM
+bridleway/S
+briefcase/SM
+briefed/C
+briefing/M
+briefness/MS
+briefs/C
+brief/YRDJPGTS
+Brien/M
+Brier/M
+brier/MS
+Brie/RSM
+Brietta/M
+brigade/GDSM
+brigadier/MS
+Brigadoon
+brigandage/MS
+brigand/MS
+brigantine/MS
+Brigg/MS
+Brigham/M
+brightener/M
+brighten/RDZG
+bright/GXTPSYNR
+Bright/M
+brightness/SM
+Brighton/M
+Brigida/M
+Brigid/M
+Brigit/M
+Brigitta/M
+Brigitte/M
+Brig/M
+brig/SM
+brilliance/MS
+brilliancy/MS
+brilliantine/MS
+brilliantness/M
+brilliant/PSY
+Brillo
+Brillouin/M
+brimful
+brimless
+brimmed
+brimming
+brim/SM
+brimstone/MS
+Brina/M
+Brindisi/M
+brindle/DSM
+brine/GMDSR
+briner/M
+Briney/M
+bringer/M
+bring/RGZS
+brininess/MS
+Brinkley/M
+brinkmanship/SM
+brink/MS
+Brinna/M
+Brinn/M
+Briny/M
+briny/PTSR
+brioche/SM
+Brion/M
+briquet's
+briquette/MGSD
+Brisbane/M
+brisket/SM
+briskness/MS
+brisk/YRDPGTS
+bristle/DSGM
+bristly/TR
+Bristol/M
+bristol/S
+Britain/M
+Brita/M
+Britannia/M
+Britannic
+Britannica/M
+britches
+Briticism/MS
+Britisher/M
+Britishly/M
+British/RYZ
+Brit/MS
+Britney/M
+Britni/M
+Briton/MS
+Britta/M
+Brittaney/M
+Brittani/M
+Brittan/M
+Brittany/MS
+Britte/M
+Britten/M
+Britteny/M
+brittleness/MS
+brittle/YTPDRSG
+Britt/MN
+Brittne/M
+Brittney/M
+Brittni/M
+Brnaba/M
+Brnaby/M
+Brno/M
+broach/DRSG
+broacher/M
+broadband
+broadcaster/M
+broadcast/RSGZJ
+broadcasts/A
+broadcloth/M
+broadcloths
+broaden/JGRDZ
+broadleaved
+broadloom/SM
+broadminded/P
+broadness/S
+broadsheet/MS
+broadside/SDGM
+broadsword/MS
+broad/TXSYRNP
+Broadway/SM
+Brobdingnagian
+Brobdingnag/M
+brocade/DSGM
+broccoli/MS
+brochette/SM
+brochure/SM
+Brockie/M
+Brock/M
+Brocky/M
+Broddie/M
+Broddy/M
+Broderick/M
+Broderic/M
+Brodie/M
+Brod/M
+Brody/M
+brogan/MS
+Broglie/M
+brogue/MS
+broiler/M
+broil/RDSGZ
+brokenhearted/Y
+brokenness/MS
+broken/YP
+brokerage/MS
+broker/DMG
+broke/RGZ
+Brok/M
+bromide/MS
+bromidic
+bromine/MS
+bronchial
+bronchi/M
+bronchiolar
+bronchiole/MS
+bronchiolitis
+bronchitic/S
+bronchitis/MS
+broncho's
+bronchus/M
+broncobuster/SM
+bronco/SM
+bronc/S
+Bron/M
+Bronnie/M
+Bronny/M
+Bronson/M
+Bronte
+brontosaur/SM
+brontosaurus/SM
+Bronx/M
+bronzed/M
+bronze/SRDGM
+bronzing/M
+brooch/MS
+brooder/M
+broodiness/M
+brooding/Y
+broodmare/SM
+brood/SMRDGZ
+broody/PTR
+Brookdale/M
+Brooke/M
+Brookfield/M
+Brookhaven/M
+brooklet/MS
+Brooklyn/M
+Brookmont/M
+brook/SGDM
+brookside
+Brook/SM
+broom/SMDG
+broomstick/MS
+Bros
+Brose/M
+bro/SH
+bros/S
+brothel/MS
+brother/DYMG
+brotherhood/SM
+brotherliness/MS
+brotherly/P
+broths
+broth/ZMR
+brougham/MS
+brought
+brouhaha/MS
+browbeat/NSG
+brow/MS
+Brownell/M
+Browne/M
+Brownian/M
+Brownie/MS
+brownie/MTRS
+browning/M
+Browning/M
+brownish
+Brown/MG
+brownness/MS
+brownout/MS
+brownstone/MS
+Brownsville/M
+brown/YRDMSJGTP
+browse
+browser/M
+brows/SRDGZ
+brr
+Br/TMN
+Brubeck/M
+brucellosis/M
+Bruce/M
+Brucie/M
+Bruckner/M
+Bruegel/M
+Brueghel's
+bruin/MS
+bruised/U
+bruise/JGSRDZ
+bruiser/M
+Bruis/M
+bruit/DSG
+Brumidi/M
+Brummel/M
+brunch/MDSG
+Brunei/M
+Brunelleschi/M
+brunet/S
+brunette/SM
+Brunhilda/M
+Brunhilde/M
+Bruno/M
+Brunswick/M
+brunt/GSMD
+brusher/M
+brushfire/MS
+brushlike
+brush/MSRDG
+brushoff/S
+brushwood/SM
+brushwork/MS
+brushy/R
+brusqueness/MS
+brusque/PYTR
+Brussels
+brutality/SM
+brutalization/SM
+brutalized/U
+brutalizes/AU
+brutalize/SDG
+brutal/Y
+brute/DSRGM
+brutishness/SM
+brutish/YP
+Brutus/M
+Bruxelles/M
+Bryana/M
+Bryan/M
+Bryant/M
+Bryanty/M
+Bryce/M
+Bryna/M
+Bryn/M
+Brynna/M
+Brynne/M
+Brynner/M
+Brynn/RM
+Bryon/M
+Brzezinski/M
+B's
+BS
+BSA
+BSD
+Btu
+BTU
+BTW
+bu
+bubblegum/S
+bubbler/M
+bubble/RSDGM
+bubbly/TRS
+Buber/M
+bub/MS
+buboes
+bubo/M
+bubonic
+buccaneer/GMDS
+Buchanan/M
+Bucharest/M
+Buchenwald/M
+Buchwald/M
+buckaroo/SM
+buckboard/SM
+bucker/M
+bucketful/MS
+bucket/SGMD
+buckeye/SM
+buck/GSDRM
+buckhorn/M
+Buckie/M
+Buckingham/M
+buckled/U
+buckler/MDG
+buckle/RSDGMZ
+buckles/U
+Buckley/M
+buckling's
+buckling/U
+Buck/M
+Buckner/M
+buckram/GSDM
+bucksaw/SM
+buckshot/MS
+buckskin/SM
+buckteeth
+bucktooth/DM
+buckwheat/SM
+Bucky/M
+bucolically
+bucolic/S
+Budapest/M
+budded
+Buddha/MS
+Buddhism/SM
+Buddhist/SM
+Buddie/M
+budding/S
+Budd/M
+buddy/GSDM
+Buddy/M
+budge/GDS
+budgerigar/MS
+budgetary
+budgeter/M
+budget/GMRDZS
+budgie/MS
+budging/U
+Bud/M
+bud/MS
+Budweiser/MS
+Buehring/M
+Buena/M
+buffaloes
+Buffalo/M
+buffalo/MDG
+buff/ASGD
+buffered/U
+bufferer/M
+buffer/RDMSGZ
+buffet/GMDJS
+bufflehead/M
+buffoonery/MS
+buffoonish
+buffoon/SM
+buff's
+Buffy/M
+Buford/M
+bugaboo/SM
+Bugatti/M
+bugbear/SM
+bug/CS
+bugeyed
+bugged/C
+buggered
+buggering
+bugger/SCM!
+buggery/M
+bugging/C
+buggy/RSMT
+bugle/GMDSRZ
+bugler/M
+bug's
+Buick/M
+builder/SM
+building/SM
+build/SAG
+buildup/MS
+built/AUI
+Buiron/M
+Bujumbura/M
+Bukhara/M
+Bukharin/M
+Bulawayo/M
+Bulba/M
+bulb/DMGS
+bulblet
+bulbous
+Bulfinch/M
+Bulganin/M
+Bulgaria/M
+Bulgarian/S
+bulge/DSGM
+bulgy/RT
+bulimarexia/S
+bulimia/MS
+bulimic/S
+bulk/GDRMS
+bulkhead/SDM
+bulkiness/SM
+bulky/RPT
+bulldogged
+bulldogger
+bulldogging
+bulldog/SM
+bulldoze/GRSDZ
+bulldozer/M
+bullet/GMDS
+bulletin/SGMD
+bulletproof/SGD
+bullfighter/M
+bullfighting/M
+bullfight/SJGZMR
+bullfinch/MS
+bullfrog/SM
+bullhead/DMS
+bullheadedness/SM
+bullheaded/YP
+bullhide
+bullhorn/SM
+bullied/M
+bullion/SM
+bullishness/SM
+bullish/PY
+bull/MDGS
+Bullock/M
+bullock/MS
+bullpen/MS
+bullring/SM
+bullseye
+bullshit/MS!
+bullshitted/!
+bullshitter/S!
+bullshitting/!
+bullwhackers
+Bullwinkle/M
+bullyboy/MS
+bullying/M
+bully/TRSDGM
+bulrush/SM
+Bultmann/M
+bulwark/GMDS
+bumblebee/MS
+bumble/JGZRSD
+bumbler/M
+bumbling/Y
+Bumbry/M
+bummed/M
+bummer/MS
+bummest
+bumming/M
+bumper/DMG
+bump/GZDRS
+bumpiness/MS
+bumpkin/MS
+Bumppo/M
+bumptiousness/SM
+bumptious/PY
+bumpy/PRT
+bum/SM
+Bunche/M
+bunch/MSDG
+bunchy/RT
+buncombe's
+bunco's
+Bundestag/M
+bundled/U
+bundle/GMRSD
+bundler/M
+Bundy/M
+bungalow/MS
+bungee/SM
+bung/GDMS
+bunghole/MS
+bungle/GZRSD
+bungler/M
+bungling/Y
+Bunin/M
+bunion/SM
+bunk/CSGDR
+Bunker/M
+bunker's/C
+bunker/SDMG
+bunkhouse/SM
+bunkmate/MS
+bunko's
+bunk's
+bunkum/SM
+Bunnie/M
+Bunni/M
+Bunny/M
+bunny/SM
+Bunsen/SM
+bun/SM
+bunt/GJZDRS
+bunting/M
+Buuel/M
+Bunyan/M
+buoyancy/MS
+buoyant/Y
+buoy/SMDG
+Burbank/M
+burbler/M
+burble/RSDG
+burbs
+Burch/M
+burden's
+burdensomeness/M
+burdensome/PY
+burden/UGDS
+burdock/SM
+bureaucracy/MS
+bureaucratically
+bureaucratic/U
+bureaucratization/MS
+bureaucratize/SDG
+bureaucrat/MS
+bureau/MS
+burgeon/GDS
+burger/M
+Burger/M
+Burgess/M
+burgess/MS
+burgher/M
+burgh/MRZ
+burghs
+burglarize/GDS
+burglarproof/DGS
+burglar/SM
+burglary/MS
+burgle/SDG
+burgomaster/SM
+Burgoyne/M
+Burg/RM
+burg/SZRM
+Burgundian/S
+Burgundy/MS
+burgundy/S
+burial/ASM
+buried/U
+burier/M
+Burke/M
+Burk/SM
+burlap/MS
+burler/M
+burlesquer/M
+burlesque/SRDMYG
+burley/M
+Burlie/M
+burliness/SM
+Burlingame/M
+Burlington/M
+Burl/M
+burl/SMDRG
+burly/PRT
+Burma/M
+Burmese
+bur/MYS
+burnable/S
+Burnaby/M
+Burnard/M
+burned/U
+Burne/MS
+burner/M
+Burnett/M
+burn/GZSDRBJ
+burning/Y
+burnisher/M
+burnish/GDRSZ
+burnoose/MS
+burnout/MS
+Burns
+Burnside/MS
+burnt/YP
+burp/SGMD
+burr/GSDRM
+Burris/M
+burrito/S
+Burr/M
+burro/SM
+Burroughs/M
+burrower/M
+burrow/GRDMZS
+bursae
+bursa/M
+Bursa/M
+bursar/MS
+bursary/MS
+bursitis/MS
+burster/M
+burst/SRG
+Burtie/M
+Burt/M
+Burton/M
+Burty/M
+Burundian/S
+Burundi/M
+bury/ASDG
+busboy/MS
+busby/SM
+Busch/M
+buses/A
+busgirl/S
+bus/GMDSJ
+bushel/MDJSG
+Bushido/M
+bushiness/MS
+bushing/M
+bush/JMDSRG
+bushland
+Bush/M
+bushman/M
+bushmaster/SM
+bushmen
+Bushnell/M
+bushwhacker/M
+bushwhacking/M
+bushwhack/RDGSZ
+bushy/PTR
+busily
+businesslike
+businessman/M
+businessmen
+business/MS
+businesspeople
+businessperson/S
+businesswoman/M
+businesswomen
+busker/M
+busk/GRM
+buskin/SM
+bus's/A
+buss/D
+bustard/MS
+buster/M
+bustle/GSD
+bustling/Y
+bust/MSDRGZ
+busty/RT
+busybody/MS
+busy/DSRPTG
+busyness/MS
+busywork/SM
+but/ACS
+butane/MS
+butcherer/M
+butcher/MDRYG
+butchery/MS
+Butch/M
+butch/RSZ
+butene/M
+Butler/M
+butler/SDMG
+butted/A
+butte/MS
+butterball/MS
+buttercup/SM
+buttered/U
+butterfat/MS
+Butterfield/M
+butterfingered
+butterfingers/M
+butterfly/MGSD
+buttermilk/MS
+butternut/MS
+butter/RDMGZ
+butterscotch/SM
+buttery/TRS
+butting/M
+buttock/SGMD
+buttoner/M
+buttonhole/GMRSD
+buttonholer/M
+button's
+button/SUDG
+buttonweed
+buttonwood/SM
+buttress/MSDG
+butt/SGZMDR
+butyl/M
+butyrate/M
+buxomness/M
+buxom/TPYR
+Buxtehude/M
+buyback/S
+buyer/M
+buyout/S
+buy/ZGRS
+buzzard/MS
+buzz/DSRMGZ
+buzzer/M
+buzzword/SM
+buzzy
+bx
+bxs
+byelaw's
+Byelorussia's
+bye/MZS
+Byers/M
+bygone/S
+bylaw/SM
+byliner/M
+byline/RSDGM
+BYOB
+bypass/GSDM
+bypath/M
+bypaths
+byplay/S
+byproduct/SM
+Byram/M
+Byran/M
+Byrann/M
+Byrd/M
+byre/SM
+Byrle/M
+Byrne/M
+byroad/MS
+Byrom/M
+Byronic
+Byronism/M
+Byron/M
+bystander/SM
+byte/SM
+byway/SM
+byword/SM
+byzantine
+Byzantine/S
+Byzantium/M
+by/ZR
+C
+ca
+CA
+cabala/MS
+caballed
+caballero/SM
+caballing
+cabal/SM
+cabana/MS
+cabaret/SM
+cabbage/MGSD
+cabbed
+cabbing
+cabby's
+cabdriver/SM
+caber/M
+Cabernet/M
+cabinetmaker/SM
+cabinetmaking/MS
+cabinet/MS
+cabinetry/SM
+cabinetwork/MS
+cabin/GDMS
+cablecast/SG
+cable/GMDS
+cablegram/SM
+cabochon/MS
+caboodle/SM
+caboose/MS
+Cabot/M
+Cabrera/M
+Cabrini/M
+cabriolet/MS
+cab/SMR
+cabstand/MS
+cacao/SM
+cacciatore
+cache/DSRGM
+cachepot/MS
+cachet/MDGS
+Cacilia/M
+Cacilie/M
+cackler/M
+cackle/RSDGZ
+cackly
+CACM
+cacophonist
+cacophonous
+cacophony/SM
+cacti
+cactus/M
+CAD
+cadaverous/Y
+cadaver/SM
+caddishness/SM
+caddish/PY
+Caddric/M
+caddy/GSDM
+cadence/CSM
+cadenced
+cadencing
+cadent/C
+cadenza/MS
+cadet/SM
+Cadette/S
+cadge/DSRGZ
+cadger/M
+Cadillac/MS
+Cadiz/M
+Cad/M
+cadmium/MS
+cadre/SM
+cad/SM
+caducei
+caduceus/M
+Caedmon/M
+Caesar/MS
+caesura/SM
+caf/MS
+cafeteria/SM
+caffeine/SM
+caftan/SM
+caged/U
+Cage/M
+cage/MZGDRS
+cager/M
+cagey/P
+cagier
+cagiest
+cagily
+caginess/MS
+Cagney/M
+Cahokia/M
+cahoot/MS
+Cahra/M
+CAI
+Caiaphas/M
+caiman's
+Caine/M
+Cain/MS
+Cairistiona/M
+cairn/SDM
+Cairo/M
+caisson/SM
+caitiff/MS
+Caitlin/M
+Caitrin/M
+cajole/LGZRSD
+cajolement/MS
+cajoler/M
+cajolery/SM
+Cajun/MS
+cake/MGDS
+cakewalk/SMDG
+calabash/SM
+calaboose/MS
+Calais/M
+calamari/S
+calamine/GSDM
+calamitousness/M
+calamitous/YP
+calamity/MS
+cal/C
+calcareousness/M
+calcareous/PY
+calciferous
+calcification/M
+calcify/XGNSD
+calcimine/GMSD
+calcine/SDG
+calcite/SM
+calcium/SM
+Calcomp/M
+CalComp/M
+CALCOMP/M
+calculability/IM
+calculable/IP
+calculate/AXNGDS
+calculated/PY
+calculatingly
+calculating/U
+calculation/AM
+calculative
+calculator/SM
+calculi
+calculus/M
+Calcutta/M
+caldera/SM
+Calder/M
+Calderon/M
+caldron's
+Caldwell/M
+Caleb/M
+Caledonia/M
+Cale/M
+calendar/MDGS
+calender/MDGS
+calf/M
+calfskin/SM
+Calgary/M
+Calhoun/M
+Caliban/M
+caliber/SM
+calibrated/U
+calibrater's
+calibrate/XNGSD
+calibrating/A
+calibration/M
+calibrator/MS
+calicoes
+calico/M
+Calida/M
+Calif/M
+California/M
+Californian/MS
+californium/SM
+calif's
+Caligula/M
+Cali/M
+caliper/SDMG
+caliphate/SM
+caliph/M
+caliphs
+calisthenic/S
+calisthenics/M
+Callaghan/M
+call/AGRDBS
+Callahan/M
+calla/MS
+Calla/MS
+Callao/M
+callback/S
+Callean/M
+called/U
+callee/M
+caller/MS
+Calley/M
+Callida/M
+Callie/M
+calligrapher/M
+calligraphic
+calligraphist/MS
+calligraph/RZ
+calligraphy/MS
+Calli/M
+calling/SM
+Calliope/M
+calliope/SM
+callisthenics's
+Callisto/M
+callosity/MS
+callousness/SM
+callous/PGSDY
+callowness/MS
+callow/RTSP
+callus/SDMG
+Cally/M
+calming/Y
+calmness/MS
+calm/PGTYDRS
+Cal/MY
+Caloocan/M
+caloric/S
+calorie/SM
+calorific
+calorimeter/MS
+calorimetric
+calorimetry/M
+Caltech/M
+Calumet/M
+calumet/MS
+calumniate/NGSDX
+calumniation/M
+calumniator/SM
+calumnious
+calumny/MS
+calvary/M
+Calvary/M
+calve/GDS
+Calvert/M
+calves/M
+Calvinism/MS
+Calvinistic
+Calvinist/MS
+Calvin/M
+Calv/M
+calyces's
+Calypso/M
+calypso/SM
+calyx/MS
+Ca/M
+CAM
+Camacho/M
+Camala/M
+camaraderie/SM
+camber/DMSG
+cambial
+cambium/SM
+Cambodia/M
+Cambodian/S
+Cambrian/S
+cambric/MS
+Cambridge/M
+camcorder/S
+Camden/M
+camelhair's
+Camella/M
+Camellia/M
+camellia/MS
+Camel/M
+Camelopardalis/M
+Camelot/M
+camel/SM
+Camembert/MS
+cameo/GSDM
+camerae
+cameraman/M
+cameramen
+camera/MS
+camerawoman
+camerawomen
+Cameron/M
+Cameroonian/S
+Cameroon/SM
+came/N
+Camey/M
+Camila/M
+Camile/M
+Camilla/M
+Camille/M
+Cami/M
+Camino/M
+camion/M
+camisole/MS
+Cam/M
+cammed
+Cammie/M
+Cammi/M
+cam/MS
+Cammy/M
+Camoens/M
+camomile's
+camouflage/DRSGZM
+camouflager/M
+campaigner/M
+campaign/ZMRDSG
+campanile/SM
+campanological
+campanologist/SM
+campanology/MS
+Campbell/M
+Campbellsport/M
+camper/SM
+campesinos
+campest
+campfire/SM
+campground/MS
+camphor/MS
+Campinas/M
+camping/S
+Campos
+camp's
+camp/SCGD
+campsite/MS
+campus/GSDM
+campy/RT
+Camry/M
+camshaft/SM
+Camus/M
+Canaanite/SM
+Canaan/M
+Canada/M
+Canadianism/SM
+Canadian/S
+Canad/M
+Canaletto/M
+canalization/MS
+canalize/GSD
+canal/SGMD
+canap/S
+canard/MS
+Canaries
+canary/SM
+canasta/SM
+Canaveral/M
+Canberra/M
+cancan/SM
+cancelate/D
+canceled/U
+canceler/M
+cancellation/MS
+cancel/RDZGS
+cancer/MS
+Cancer/MS
+cancerous/Y
+Cancun/M
+Candace/M
+candelabra/S
+candelabrum/M
+Candice/M
+candidacy/MS
+Candida/M
+candidate/SM
+candidature/S
+Candide/M
+candidly/U
+candidness/SM
+candid/TRYPS
+Candie/M
+Candi/SM
+candle/GMZRSD
+candlelight/SMR
+candlelit
+candlepower/SM
+candler/M
+candlestick/SM
+Candlewick/M
+candlewick/MS
+candor/MS
+Candra/M
+candy/GSDM
+Candy/M
+canebrake/SM
+caner/M
+cane/SM
+canine/S
+caning/M
+Canis/M
+canister/SGMD
+cankerous
+canker/SDMG
+Can/M
+can/MDRSZGJ
+cannabis/MS
+canned
+cannelloni
+canner/SM
+cannery/MS
+Cannes
+cannibalism/MS
+cannibalistic
+cannibalization/SM
+cannibalize/GSD
+cannibal/SM
+cannily/U
+canninesses
+canniness/UM
+canning/M
+cannister/SM
+cannonade/SDGM
+cannonball/SGDM
+Cannon/M
+cannon/SDMG
+cannot
+canny/RPUT
+canoe/DSGM
+canoeist/SM
+Canoga/M
+canonic
+canonicalization
+canonicalize/GSD
+canonical/SY
+canonist/M
+canonization/MS
+canonized/U
+canonize/SDG
+canon/SM
+Canopus/M
+canopy/GSDM
+canst
+can't
+cantabile/S
+Cantabrigian
+cantaloupe/MS
+cantankerousness/SM
+cantankerous/PY
+cantata/SM
+cant/CZGSRD
+canted/IA
+canteen/MS
+Canterbury/M
+canter/CM
+cantered
+cantering
+canticle/SM
+cantilever/SDMG
+canto/MS
+cantonal
+Cantonese/M
+Canton/M
+cantonment/SM
+canton/MGSLD
+Cantor/M
+cantor/MS
+Cantrell/M
+cant's
+cants/A
+Cantu/M
+Canute/M
+canvasback/MS
+canvas/RSDMG
+canvasser/M
+canvass/RSDZG
+canyon/MS
+CAP
+capability/ISM
+capableness/IM
+capable/PI
+capabler
+capablest
+capably/I
+capaciousness/MS
+capacious/PY
+capacitance/SM
+capacitate/V
+capacitive/Y
+capacitor/MS
+capacity/IMS
+caparison/SDMG
+Capek/M
+Capella/M
+caper/GDM
+capeskin/SM
+cape/SM
+Capet/M
+Capetown/M
+Caph/M
+capillarity/MS
+capillary/S
+Capistrano/M
+capitalism/SM
+capitalistic
+capitalistically
+capitalist/SM
+capitalization/SMA
+capitalized/AU
+capitalizer/M
+capitalize/RSDGZ
+capitalizes/A
+capital/SMY
+capita/M
+Capitan/M
+capitation/CSM
+Capitoline/M
+Capitol/MS
+capitol/SM
+capitulate/AXNGSD
+capitulation/MA
+caplet/S
+cap/MDRSZB
+Capone/M
+capon/SM
+capo/SM
+Capote/M
+capped/UA
+capping/M
+cappuccino/MS
+Cappy/M
+Capra/M
+Caprice/M
+caprice/MS
+capriciousness/MS
+capricious/PY
+Capricorn/MS
+Capri/M
+caps/AU
+capsicum/MS
+capsize/SDG
+capstan/MS
+capstone/MS
+capsular
+capsule/MGSD
+capsulize/GSD
+captaincy/MS
+captain/SGDM
+caption/GSDRM
+captiousness/SM
+captious/PY
+captivate/XGNSD
+captivation/M
+captivator/SM
+captive/MS
+captivity/SM
+Capt/M
+captor/SM
+capture/AGSD
+capturer/MS
+capt/V
+Capulet/M
+Caputo/M
+Caracalla/M
+Caracas/M
+caracul's
+carafe/SM
+Caralie/M
+Cara/M
+caramelize/SDG
+caramel/MS
+carapace/SM
+carapaxes
+carat/SM
+Caravaggio/M
+caravan/DRMGS
+caravaner/M
+caravansary/MS
+caravanserai's
+caravel/MS
+caraway/MS
+carbide/MS
+carbine/MS
+carbohydrate/MS
+carbolic
+Carboloy/M
+carbonaceous
+carbonate/SDXMNG
+carbonation/M
+Carbondale/M
+Carbone/MS
+carbonic
+carboniferous
+Carboniferous
+carbonization/SAM
+carbonizer/AS
+carbonizer's
+carbonizes/A
+carbonize/ZGRSD
+carbon/MS
+carbonyl/M
+carborundum
+Carborundum/MS
+carboy/MS
+carbuncle/SDM
+carbuncular
+carburetor/MS
+carburetter/S
+carburettor/SM
+carcase/MS
+carcass/SM
+Carce/M
+carcinogenic
+carcinogenicity/MS
+carcinogen/SM
+carcinoma/SM
+cardamom/MS
+cardboard/MS
+card/EDRSG
+Cardenas/M
+carder/MS
+carder's/E
+cardholders
+cardiac/S
+Cardiff/M
+cardigan/SM
+cardinality/SM
+cardinal/SYM
+carding/M
+Cardin/M
+Cardiod/M
+cardiogram/MS
+cardiograph/M
+cardiographs
+cardioid/M
+cardiologist/SM
+cardiology/MS
+cardiomegaly/M
+cardiopulmonary
+cardiovascular
+card's
+cardsharp/ZSMR
+CARE
+cared/U
+careen/DSG
+careerism/M
+careerist/MS
+career/SGRDM
+carefree
+carefuller
+carefullest
+carefulness/MS
+careful/PY
+caregiver/S
+carelessness/MS
+careless/YP
+Care/M
+Carena/M
+Caren/M
+carer/M
+care/S
+Caresa/M
+Caressa/M
+Caresse/M
+caresser/M
+caressing/Y
+caressive/Y
+caress/SRDMVG
+caretaker/SM
+caret/SM
+careworn
+Carey/M
+carfare/MS
+cargoes
+cargo/M
+carhopped
+carhopping
+carhop/SM
+Caria/M
+Caribbean/S
+Carib/M
+caribou/MS
+caricature/GMSD
+caricaturisation
+caricaturist/MS
+caricaturization
+Carie/M
+caries/M
+carillonned
+carillonning
+carillon/SM
+Caril/M
+Carilyn/M
+Cari/M
+Carina/M
+Carine/M
+caring/U
+Carin/M
+Cariotta/M
+carious
+Carissa/M
+Carita/M
+Caritta/M
+carjack/GSJDRZ
+Carla/M
+Carlee/M
+Carleen/M
+Carlene/M
+Carlen/M
+Carletonian/M
+Carleton/M
+Carley/M
+Carlie/M
+Carlina/M
+Carline/M
+Carling/M
+Carlin/M
+Carlita/M
+Carl/MNG
+carload/MSG
+Carlo/SM
+Carlota/M
+Carlotta/M
+Carlsbad/M
+Carlson/M
+Carlton/M
+Carlye/M
+Carlyle/M
+Carly/M
+Carlyn/M
+Carlynne/M
+Carlynn/M
+Carma/M
+Carmela/M
+Carmelia/M
+Carmelina/M
+Carmelita/M
+Carmella/M
+Carmelle/M
+Carmel/M
+Carmelo/M
+Carmencita/M
+Carmen/M
+Carmichael/M
+Carmina/M
+Carmine/M
+carmine/MS
+Carmita/M
+Car/MNY
+Carmon/M
+carnage/MS
+carnality/SM
+carnal/Y
+Carnap/M
+carnation/IMS
+Carnegie/M
+carnelian/SM
+Carney/M
+carney's
+carnival/MS
+carnivore/SM
+carnivorousness/MS
+carnivorous/YP
+Carnot/M
+Carny/M
+carny/SDG
+carob/SM
+Carola/M
+Carolan/M
+Carolann/M
+Carolee/M
+Carole/M
+caroler/M
+Carolina/MS
+Caroline/M
+Carolingian
+Carolinian/S
+Carolin/M
+Caroljean/M
+Carol/M
+carol/SGZMRD
+Carolus/M
+Carolyne/M
+Carolyn/M
+Carolynn/M
+Caro/M
+carom/GSMD
+Caron/M
+carotene/MS
+carotid/MS
+carousal/MS
+carousel/MS
+carouser/M
+carouse/SRDZG
+carpal/SM
+Carpathian/MS
+carpel/SM
+carpenter/DSMG
+carpentering/M
+Carpenter/M
+carpentry/MS
+carper/M
+carpetbagged
+carpetbagger/MS
+carpetbagging
+carpetbag/MS
+carpeting/M
+carpet/MDJGS
+carpi/M
+carping/Y
+carp/MDRSGZ
+carpool/DGS
+carport/MS
+carpus/M
+carrageen/M
+Carree/M
+carrel/SM
+carriage/SM
+carriageway/SM
+Carrie/M
+carrier/M
+Carrier/M
+Carrillo/M
+Carri/M
+carrion/SM
+Carrissa/M
+Carr/M
+Carroll/M
+Carrol/M
+carrot/MS
+carroty/RT
+carrousel's
+carryall/MS
+Carry/MR
+carryout/S
+carryover/S
+carry/RSDZG
+carsickness/SM
+carsick/P
+Carson/M
+cartage/MS
+cartel/SM
+carte/M
+carter/M
+Carter/M
+Cartesian
+Carthage/M
+Carthaginian/S
+carthorse/MS
+Cartier/M
+cartilage/MS
+cartilaginous
+cartload/MS
+cart/MDRGSZ
+Cart/MR
+cartographer/MS
+cartographic
+cartography/MS
+carton/GSDM
+cartoon/GSDM
+cartoonist/MS
+cartridge/SM
+cartwheel/MRDGS
+Cartwright/M
+Carty/RM
+Caruso/M
+carve/DSRJGZ
+carven
+carver/M
+Carver/M
+carving/M
+caryatid/MS
+Caryl/M
+Cary/M
+Caryn/M
+car/ZGSMDR
+casaba/SM
+Casablanca/M
+Casals/M
+Casandra/M
+Casanova/SM
+Casar/M
+casbah/M
+cascade/MSDG
+Cascades/M
+cascara/MS
+casebook/SM
+case/DSJMGL
+cased/U
+caseharden/SGD
+casein/SM
+caseload/MS
+Case/M
+casement/SM
+caseworker/M
+casework/ZMRS
+Casey/M
+cashbook/SM
+cashew/MS
+cash/GZMDSR
+cashier/SDMG
+cashless
+Cash/M
+cashmere/MS
+Casie/M
+Casi/M
+casing/M
+casino/MS
+casket/SGMD
+cask/GSDM
+Caspar/M
+Casper/M
+Caspian
+Cass
+Cassandra/SM
+Cassandre/M
+Cassandry/M
+Cassatt/M
+Cassaundra/M
+cassava/MS
+casserole/MGSD
+cassette/SM
+Cassey/M
+cassia/MS
+Cassie/M
+Cassi/M
+cassino's
+Cassiopeia/M
+Cassite/M
+Cassius/M
+cassock/SDM
+Cassondra/M
+cassowary/SM
+Cassy/M
+Castaneda/M
+castanet/SM
+castaway/SM
+castellated
+caste/MHS
+caster/M
+cast/GZSJMDR
+castigate/XGNSD
+castigation/M
+castigator/SM
+Castile's
+Castillo/M
+casting/M
+castle/GMSD
+castoff/S
+Castor/M
+castor's
+castrate/DSNGX
+castration/M
+Castries/M
+Castro/M
+casts/A
+casualness/SM
+casual/SYP
+casualty/SM
+casuistic
+casuist/MS
+casuistry/SM
+cataclysmal
+cataclysmic
+cataclysm/MS
+catacomb/MS
+catafalque/SM
+Catalan/MS
+catalepsy/MS
+cataleptic/S
+Catalina/M
+cataloger/M
+catalog/SDRMZG
+Catalonia/M
+catalpa/SM
+catalysis/M
+catalyst/SM
+catalytic
+catalytically
+catalyze/DSG
+catamaran/MS
+catapult/MGSD
+cataract/MS
+Catarina/M
+catarrh/M
+catarrhs
+catastrophe/SM
+catastrophic
+catastrophically
+catatonia/MS
+catatonic/S
+Catawba/M
+catbird/MS
+catboat/SM
+catcall/SMDG
+catchable/U
+catchall/MS
+catch/BRSJLGZ
+catcher/M
+catchment/SM
+catchpenny/S
+catchphrase/S
+catchup/MS
+catchword/MS
+catchy/TR
+catechism/MS
+catechist/SM
+catechize/SDG
+catecholamine/MS
+categoric
+categorical/Y
+categorization/MS
+categorized/AU
+categorize/RSDGZ
+category/MS
+Cate/M
+catenate/NF
+catenation/MF
+catercorner
+caterer/M
+cater/GRDZ
+Caterina/M
+catering/M
+Caterpillar
+caterpillar/SM
+caterwaul/DSG
+catfish/MS
+catgut/SM
+Catha/M
+Catharina/M
+Catharine/M
+catharses
+catharsis/M
+cathartic/S
+Cathay/M
+cathedral/SM
+Cathee/M
+Catherina/M
+Catherine/M
+Catherin/M
+Cather/M
+Cathe/RM
+catheterize/GSD
+catheter/SM
+Cathie/M
+Cathi/M
+Cathleen/M
+Cathlene/M
+cathode/MS
+cathodic
+catholicism
+Catholicism/SM
+catholicity/MS
+catholic/MS
+Catholic/S
+Cathrine/M
+Cathrin/M
+Cathryn/M
+Cathyleen/M
+Cathy/M
+Catie/M
+Catiline/M
+Cati/M
+Catina/M
+cationic
+cation/MS
+catkin/SM
+Catlaina/M
+Catlee/M
+catlike
+Catlin/M
+catnapped
+catnapping
+catnap/SM
+catnip/MS
+Cato/M
+Catrina/M
+Catriona/M
+Catskill/SM
+cat/SMRZ
+catsup's
+cattail/SM
+catted
+cattery/M
+cattily
+cattiness/SM
+catting
+cattle/M
+cattleman/M
+cattlemen
+Catt/M
+catty/PRST
+Catullus/M
+CATV
+catwalk/MS
+Caty/M
+Caucasian/S
+Caucasoid/S
+Caucasus/M
+Cauchy/M
+caucus/SDMG
+caudal/Y
+caught/U
+cauldron/MS
+cauliflower/MS
+caulker/M
+caulk/JSGZRD
+causality/SM
+causal/YS
+causate/XVN
+causation/M
+causative/SY
+cause/DSRGMZ
+caused/U
+causeless
+causerie/MS
+causer/M
+causeway/SGDM
+caustically
+causticity/MS
+caustic/YS
+cauterization/SM
+cauterized/U
+cauterize/GSD
+cautionary
+cautioner/M
+caution/GJDRMSZ
+cautiousness's/I
+cautiousness/SM
+cautious/PIY
+cavalcade/MS
+cavalierness/M
+cavalier/SGYDP
+cavalryman/M
+cavalrymen
+cavalry/MS
+caveat/SM
+caveatted
+caveatting
+cave/GFRSD
+caveman/M
+cavemen
+Cavendish/M
+caver/M
+cavern/GSDM
+cavernous/Y
+cave's
+caviar/MS
+caviler/M
+cavil/SJRDGZ
+caving/MS
+cavity/MFS
+cavort/SDG
+Cavour/M
+caw/SMDG
+Caxton/M
+Caye/M
+Cayenne/M
+cayenne/SM
+Cayla/M
+Cayman/M
+cayman/SM
+cay's
+cay/SC
+Cayuga/M
+cayuse/SM
+Caz/M
+Cazzie/M
+c/B
+CB
+CBC
+Cb/M
+CBS
+cc
+Cchaddie/M
+CCTV
+CCU
+CD
+CDC/M
+Cd/M
+CDT
+Ce
+cease/DSCG
+ceasefire/S
+ceaselessness/SM
+ceaseless/YP
+ceasing/U
+Ceausescu/M
+Cebuano/M
+Cebu/M
+ceca
+cecal
+Cecelia/M
+Cece/M
+Cecile/M
+Ceciley/M
+Cecilia/M
+Cecilio/M
+Cecilius/M
+Cecilla/M
+Cecil/M
+Cecily/M
+cecum/M
+cedar/SM
+ceded/A
+cede/FRSDG
+ceder's/F
+ceder/SM
+cedes/A
+cedilla/SM
+ceding/A
+Ced/M
+Cedric/M
+ceilidh/M
+ceiling/MDS
+Ceil/M
+celandine/MS
+Celanese/M
+Celebes's
+celebrant/MS
+celebratedness/M
+celebrated/P
+celebrate/XSDGN
+celebration/M
+celebrator/MS
+celebratory
+celebrity/MS
+Cele/M
+Celene/M
+celerity/SM
+celery/SM
+Celesta/M
+celesta/SM
+Celeste/M
+celestial/YS
+Celestia/M
+Celestina/M
+Celestine/M
+Celestyna/M
+Celestyn/M
+Celia/M
+celibacy/MS
+celibate/SM
+Celie/M
+Celina/M
+Celinda/M
+Celine/M
+Celinka/M
+Celisse/M
+Celka/M
+cellarer/M
+cellar/RDMGS
+Celle/M
+cell/GMDS
+Cellini/M
+cellist/SM
+Cello/M
+cello/MS
+cellophane/SM
+cellphone/S
+cellular/SY
+cellulite/S
+celluloid/SM
+cellulose/SM
+Celsius/S
+Celtic/SM
+Celt/MS
+cementa
+cementer/M
+cementum/SM
+cement/ZGMRDS
+cemetery/MS
+cenobite/MS
+cenobitic
+cenotaph/M
+cenotaphs
+Cenozoic
+censer/MS
+censored/U
+censor/GDMS
+censorial
+censoriousness/MS
+censorious/YP
+censorship/MS
+censure/BRSDZMG
+censurer/M
+census/SDMG
+centaur/SM
+Centaurus/M
+centavo/SM
+centenarian/MS
+centenary/S
+centennial/YS
+center/AC
+centerboard/SM
+centered
+centerer/S
+centerfold/S
+centering/SM
+centerline/SM
+centerpiece/SM
+center's
+Centigrade
+centigrade/S
+centigram/SM
+centiliter/MS
+centime/SM
+centimeter/SM
+centipede/MS
+Centralia/M
+centralism/M
+centralist/M
+centrality/MS
+centralization/CAMS
+centralize/CGSD
+centralizer/SM
+centralizes/A
+central/STRY
+centrefold's
+Centrex
+CENTREX/M
+centric/F
+centrifugal/SY
+centrifugate/NM
+centrifugation/M
+centrifuge/GMSD
+centripetal/Y
+centrist/MS
+centroid/MS
+cent/SZMR
+centurion/MS
+century/MS
+CEO
+cephalic/S
+Cepheid
+Cepheus/M
+ceramicist/S
+ceramic/MS
+ceramist/MS
+cerate/MD
+Cerberus/M
+cereal/MS
+cerebellar
+cerebellum/MS
+cerebra
+cerebral/SY
+cerebrate/XSDGN
+cerebration/M
+cerebrum/MS
+cerement/SM
+ceremonial/YSP
+ceremoniousness/MS
+ceremoniousness's/U
+ceremonious/YUP
+ceremony/MS
+Cerenkov/M
+Ceres/M
+Cerf/M
+cerise/SM
+cerium/MS
+cermet/SM
+CERN/M
+certainer
+certainest
+certainty/UMS
+certain/UY
+cert/FS
+certifiable
+certifiably
+certificate/SDGM
+certification/AMC
+certified/U
+certifier/M
+certify/DRSZGNX
+certiorari/M
+certitude/ISM
+cerulean/MS
+Cervantes/M
+cervical
+cervices/M
+cervix/M
+Cesarean
+cesarean/S
+Cesare/M
+Cesar/M
+Cesaro/M
+cesium/MS
+cessation/SM
+cession/FAMSK
+Cessna/M
+cesspit/M
+cesspool/SM
+Cesya/M
+cetacean/S
+cetera/S
+Cetus/M
+Ceylonese
+Ceylon/M
+Cezanne/S
+cf
+CF
+CFC
+Cf/M
+CFO
+cg
+Chablis/SM
+Chaddie/M
+Chadd/M
+Chaddy/M
+Chadian/S
+Chad/M
+Chadwick/M
+chafe/GDSR
+chafer/M
+chaffer/DRG
+chafferer/M
+Chaffey/M
+chaff/GRDMS
+chaffinch/SM
+Chagall/M
+chagrin/DGMS
+Chaim/M
+chainlike
+chain's
+chainsaw/SGD
+chain/SGUD
+chairlady/M
+chairlift/MS
+chairman/MDGS
+chairmanship/MS
+chairmen
+chairperson/MS
+chair/SGDM
+chairwoman/M
+chairwomen
+chaise/SM
+chalcedony/MS
+Chaldea/M
+Chaldean/M
+chalet/SM
+chalice/DSM
+chalkboard/SM
+chalk/DSMG
+chalkiness/S
+chalkline
+chalky/RPT
+challenged/U
+challenger/M
+challenge/ZGSRD
+challenging/Y
+challis/SM
+Chalmers
+chamberer/M
+Chamberlain/M
+chamberlain/MS
+chambermaid/MS
+chamberpot/S
+Chambers/M
+chamber/SZGDRM
+chambray/MS
+chameleon/SM
+chamfer/DMGS
+chammy's
+chamois/DSMG
+chamomile/MS
+champagne/MS
+champaign/M
+champ/DGSZ
+champion/MDGS
+championship/MS
+Champlain/M
+chanced/M
+chance/GMRSD
+chancellery/SM
+chancellorship/SM
+chancellor/SM
+Chancellorsville/M
+chancel/SM
+Chance/M
+chancery/SM
+Chancey/M
+chanciness/S
+chancing/M
+chancre/SM
+chancy/RPT
+Chandal/M
+Chanda/M
+chandelier/SM
+Chandigarh/M
+Chandler/M
+chandler/MS
+Chandragupta/M
+Chandra/M
+Chandrasekhar/M
+Chandy/M
+Chanel/M
+Chane/M
+Chaney/M
+Changchun/M
+changeabilities
+changeability/UM
+changeableness/SM
+changeable/U
+changeably/U
+changed/U
+change/GZRSD
+changeless
+changeling/M
+changeover/SM
+changer/M
+changing/U
+Chang/M
+Changsha/M
+Chan/M
+Channa/M
+channeler/M
+channeling/M
+channelization/SM
+channelize/GDS
+channellings
+channel/MDRZSG
+Channing/M
+chanson/SM
+Chantalle/M
+Chantal/M
+chanter/M
+chanteuse/MS
+chantey/SM
+chanticleer/SM
+Chantilly/M
+chantry/MS
+chant/SJGZMRD
+chanty's
+Chanukah's
+Chao/M
+chaos/SM
+chaotic
+chaotically
+chaparral/MS
+chapbook/SM
+chapeau/MS
+chapel/MS
+chaperonage/MS
+chaperoned/U
+chaperone's
+chaperon/GMDS
+chaplaincy/MS
+chaplain/MS
+chaplet/SM
+Chaplin/M
+Chapman/M
+chap/MS
+Chappaquiddick/M
+chapped
+chapping
+chapter/SGDM
+Chara
+charabanc/MS
+characterful
+characteristically/U
+characteristic/SM
+characterizable/MS
+characterization/MS
+characterize/DRSBZG
+characterized/U
+characterizer/M
+characterless
+character/MDSG
+charade/SM
+charbroil/SDG
+charcoal/MGSD
+Chardonnay
+chardonnay/S
+chard/SM
+chargeableness/M
+chargeable/P
+charged/U
+charge/EGRSDA
+charger/AME
+chargers
+char/GS
+Charil/M
+charily
+chariness/MS
+Charin/M
+charioteer/GSDM
+Chariot/M
+chariot/SMDG
+Charis
+charisma/M
+charismata
+charismatically
+charismatic/S
+Charissa/M
+Charisse/M
+charitablenesses
+charitableness/UM
+charitable/UP
+charitably/U
+Charita/M
+Charity/M
+charity/MS
+charlady/M
+Charla/M
+charlatanism/MS
+charlatanry/SM
+charlatan/SM
+Charlean/M
+Charleen/M
+Charlemagne/M
+Charlena/M
+Charlene/M
+Charles/M
+Charleston/SM
+Charley/M
+Charlie/M
+Charline/M
+Charlot/M
+Charlotta/M
+Charlotte/M
+Charlottesville/M
+Charlottetown/M
+Charlton/M
+Charmaine/M
+Charmain/M
+Charmane/M
+charmer/M
+Charmian/M
+Charmine/M
+charming/RYT
+Charmin/M
+Charmion/M
+charmless
+charm/SGMZRD
+Charolais
+Charo/M
+Charon/M
+charred
+charring
+charted/U
+charter/AGDS
+chartered/U
+charterer/SM
+charter's
+chartist/SM
+Chartres/M
+chartreuse/MS
+chartroom/S
+chart/SJMRDGBZ
+charwoman/M
+charwomen
+Charybdis/M
+Charyl/M
+chary/PTR
+Chas
+chase/DSRGZ
+Chase/M
+chaser/M
+chasing/M
+Chasity/M
+chasm/SM
+chassis/M
+chastely
+chasteness/SM
+chasten/GSD
+chaste/UTR
+chastisement/SM
+chastiser/M
+chastise/ZGLDRS
+Chastity/M
+chastity/SM
+chastity's/U
+chasuble/SM
+Chateaubriand
+chteau/M
+chateaus
+chteaux
+chtelaine/SM
+chat/MS
+Chattahoochee/M
+Chattanooga/M
+chatted
+chattel/MS
+chatterbox/MS
+chatterer/M
+Chatterley/M
+chatter/SZGDRY
+Chatterton/M
+chattily
+chattiness/SM
+chatting
+chatty/RTP
+Chaucer/M
+chauffeur/GSMD
+Chaunce/M
+Chauncey/M
+Chautauqua/M
+chauvinism/MS
+chauvinistic
+chauvinistically
+chauvinist/MS
+Chavez/M
+chaw
+Chayefsky/M
+cheapen/DG
+cheapish
+cheapness/MS
+cheapskate/MS
+cheap/YRNTXSP
+cheater/M
+cheat/RDSGZ
+Chechen/M
+Chechnya/M
+checkable/U
+checkbook/MS
+checked/UA
+checkerboard/MS
+checker/DMG
+check/GZBSRDM
+checklist/S
+checkmate/MSDG
+checkoff/SM
+checkout/S
+checkpoint/MS
+checkroom/MS
+check's/A
+checks/A
+checksummed
+checksumming
+checksum/SM
+checkup/MS
+Cheddar/MS
+cheddar/S
+cheekbone/SM
+cheek/DMGS
+cheekily
+cheekiness/SM
+cheeky/PRT
+cheep/GMDS
+cheerer/M
+cheerfuller
+cheerfullest
+cheerfulness/MS
+cheerful/YP
+cheerily
+cheeriness/SM
+cheerio/S
+Cheerios/M
+cheerleader/SM
+cheerlessness/SM
+cheerless/PY
+cheers/S
+cheery/PTR
+cheer/YRDGZS
+cheeseburger/SM
+cheesecake/SM
+cheesecloth/M
+cheesecloths
+cheeseparing/S
+cheese/SDGM
+cheesiness/SM
+cheesy/PRT
+cheetah/M
+cheetahs
+Cheeto/M
+Cheever/M
+cheffed
+cheffing
+chef/SM
+Chekhov/M
+chelate/XDMNG
+chelation/M
+Chelsae/M
+Chelsea/M
+Chelsey/M
+Chelsie/M
+Chelsy/M
+Chelyabinsk/M
+chem
+Che/M
+chemic
+chemical/SYM
+chemiluminescence/M
+chemiluminescent
+chemise/SM
+chemistry/SM
+chemist/SM
+chemotherapeutic/S
+chemotherapy/SM
+chemurgy/SM
+Chengdu
+Cheng/M
+chenille/SM
+Chen/M
+Cheops/M
+Chere/M
+Cherey/M
+Cherianne/M
+Cherice/M
+Cherida/M
+Cherie/M
+Cherilyn/M
+Cherilynn/M
+Cheri/M
+Cherin/M
+Cherise/M
+cherisher/M
+cherish/GDRS
+Cherish/M
+Cheriton/M
+Cherlyn/M
+Cher/M
+Chernenko/M
+Chernobyl/M
+Cherokee/MS
+cheroot/MS
+Cherri/M
+Cherrita/M
+Cherry/M
+cherry/SM
+chert/MS
+cherubic
+cherubim/S
+cherub/SM
+chervil/MS
+Cherye/M
+Cheryl/M
+Chery/M
+Chesapeake/M
+Cheshire/M
+Cheslie/M
+chessboard/SM
+chessman/M
+chessmen
+chess/SM
+Chesterfield/M
+chesterfield/MS
+Chester/M
+Chesterton/M
+chestful/S
+chest/MRDS
+chestnut/SM
+Cheston/M
+chesty/TR
+Chet/M
+Chevalier/M
+chevalier/SM
+Cheviot/M
+cheviot/S
+Chev/M
+Chevrolet/M
+chevron/DMS
+Chevy/M
+chewer/M
+chew/GZSDR
+chewiness/S
+chewy/RTP
+Cheyenne/SM
+chg
+chge
+Chiang/M
+chianti/M
+Chianti/S
+chiaroscuro/SM
+Chiarra/M
+Chiba/M
+Chicagoan/SM
+Chicago/M
+Chicana/MS
+chicane/MGDS
+chicanery/MS
+Chicano/MS
+chichi/RTS
+chickadee/SM
+Chickasaw/SM
+chickenfeed
+chicken/GDM
+chickenhearted
+chickenpox/MS
+Chickie/M
+Chick/M
+chickpea/MS
+chickweed/MS
+chick/XSNM
+Chicky/M
+chicle/MS
+Chic/M
+chicness/S
+Chico/M
+chicory/MS
+chic/SYRPT
+chide/GDS
+chiding/Y
+chiefdom/MS
+chieftain/SM
+chief/YRMST
+chiffonier/MS
+chiffon/MS
+chigger/MS
+chignon/MS
+Chihuahua/MS
+chihuahua/S
+chilblain/MS
+childbearing/MS
+childbirth/M
+childbirths
+childcare/S
+childes
+child/GMYD
+childhood/MS
+childishness/SM
+childish/YP
+childlessness/SM
+childless/P
+childlikeness/M
+childlike/P
+childminders
+childproof/GSD
+childrearing
+children/M
+Chilean/S
+Chile/MS
+chile's
+chilies
+chili/M
+chiller/M
+chilliness/MS
+chilling/Y
+chilli's
+chill/MRDJGTZPS
+chillness/MS
+chilly/TPRS
+Chilton/M
+Chi/M
+chimaera's
+chimaerical
+Chimborazo/M
+chime/DSRGMZ
+Chimera/S
+chimera/SM
+chimeric
+chimerical
+chimer/M
+Chimiques
+chimney/SMD
+chimpanzee/SM
+chimp/MS
+chi/MS
+Chimu/M
+Ch'in
+China/M
+Chinaman/M
+Chinamen
+china/MS
+Chinatown/SM
+chinchilla/SM
+chine/MS
+Chinese/M
+Ching/M
+chink/DMSG
+chinless
+Chin/M
+chinned
+chinner/S
+chinning
+chino/MS
+Chinook/MS
+chin/SGDM
+chinstrap/S
+chintz/SM
+chintzy/TR
+chipboard/M
+Chipewyan/M
+Chip/M
+chipmunk/SM
+chipped
+Chippendale/M
+chipper/DGS
+Chippewa/MS
+chipping/MS
+chip/SM
+Chiquia/M
+Chiquita/M
+chiral
+Chirico/M
+chirography/SM
+chiropodist/SM
+chiropody/MS
+chiropractic/MS
+chiropractor/SM
+chirp/GDS
+chirpy/RT
+chirrup/DGS
+chiseler/M
+chisel/ZGSJMDR
+Chisholm/M
+Chisinau/M
+chitchat/SM
+chitchatted
+chitchatting
+chitinous
+chitin/SM
+chit/SM
+Chittagong/M
+chitterlings
+chivalric
+chivalrously/U
+chivalrousness/MS
+chivalrous/YP
+chivalry/SM
+chive/GMDS
+chivvy/D
+chivying
+chlamydiae
+chlamydia/S
+Chloe/M
+Chloette/M
+Chlo/M
+chloral/MS
+chlorate/M
+chlordane/MS
+chloride/MS
+chlorinated/C
+chlorinates/C
+chlorinate/XDSGN
+chlorination/M
+chlorine/MS
+Chloris
+chlorofluorocarbon/S
+chloroform/DMSG
+chlorophyll/SM
+chloroplast/MS
+chloroquine/M
+chm
+Ch/MGNRS
+chockablock
+chock/SGRDM
+chocoholic/S
+chocolate/MS
+chocolaty
+Choctaw/MS
+choiceness/M
+choice/RSMTYP
+choirboy/MS
+choirmaster/SM
+choir/SDMG
+chokeberry/M
+chokecherry/SM
+choke/DSRGZ
+choker/M
+chokes/M
+choking/Y
+cholera/SM
+choleric
+choler/SM
+cholesterol/SM
+choline/M
+cholinesterase/M
+chomp/DSG
+Chomsky/M
+Chongqing
+choose/GZRS
+chooser/M
+choosiness/S
+choosy/RPT
+chophouse/SM
+Chopin/M
+chopped
+chopper/SDMG
+choppily
+choppiness/MS
+chopping
+choppy/RPT
+chop/S
+chopstick/SM
+chorale/MS
+choral/SY
+chordal
+chordata
+chordate/MS
+chording/M
+chord/SGMD
+chorea/MS
+chore/DSGNM
+choreographer/M
+choreographic
+choreographically
+choreographs
+choreography/MS
+choreograph/ZGDR
+chorines
+chorion/M
+chorister/SM
+choroid/S
+chortler/M
+chortle/ZGDRS
+chorus/GDSM
+chosen/U
+chose/S
+Chou/M
+chowder/SGDM
+chow/DGMS
+Chretien/M
+Chris/M
+chrism/SM
+chrissake
+Chrisse/M
+Chrissie/M
+Chrissy/M
+Christabella/M
+Christabel/M
+Christalle/M
+Christal/M
+Christa/M
+Christan/M
+Christchurch/M
+Christean/M
+Christel/M
+Christendom/MS
+christened/U
+christening/SM
+Christen/M
+christen/SAGD
+Christensen/M
+Christenson/M
+Christiana/M
+Christiane/M
+Christianity/SM
+Christianize/GSD
+Christian/MS
+Christiano/M
+Christiansen/M
+Christians/N
+Christie/SM
+Christi/M
+Christina/M
+Christine/M
+Christin/M
+Christlike
+Christmas/SM
+Christmastide/SM
+Christmastime/S
+Christoffel/M
+Christoffer/M
+Christoforo/M
+Christoper/M
+Christophe/M
+Christopher/M
+Christoph/MR
+Christophorus/M
+Christos/M
+Christ/SMN
+Christye/M
+Christyna/M
+Christy's
+Chrisy/M
+chroma/M
+chromate/M
+chromatically
+chromaticism/M
+chromaticness/M
+chromatic/PS
+chromatics/M
+chromatin/MS
+chromatogram/MS
+chromatograph
+chromatographic
+chromatography/M
+chrome/GMSD
+chromic
+chromite/M
+chromium/SM
+chromosomal
+chromosome/MS
+chromosphere/M
+chronically
+chronicled/U
+chronicler/M
+chronicle/SRDMZG
+chronic/S
+chronograph/M
+chronographs
+chronography
+chronological/Y
+chronologist/MS
+chronology/MS
+chronometer/MS
+chronometric
+Chrotoem/M
+chrysalids
+chrysalis/SM
+Chrysa/M
+chrysanthemum/MS
+Chrysler/M
+Chrysostom/M
+Chrystal/M
+Chrystel/M
+Chryste/M
+chubbiness/SM
+chubby/RTP
+chub/MS
+Chucho/M
+chuck/GSDM
+chuckhole/SM
+chuckle/DSG
+chuckling/Y
+Chuck/M
+chuff/DM
+chugged
+chugging
+chug/MS
+Chukchi/M
+chukka/S
+Chumash/M
+chummed
+chummily
+chumminess/MS
+chumming
+chum/MS
+chummy/SRTP
+chumping/M
+chump/MDGS
+Chungking's
+Chung/M
+chunkiness/MS
+chunk/SGDM
+chunky/RPT
+chuntering
+churchgoer/SM
+churchgoing/SM
+Churchillian
+Churchill/M
+churchliness/M
+churchly/P
+churchman/M
+church/MDSYG
+churchmen
+Church/MS
+churchwarden/SM
+churchwoman/M
+churchwomen
+churchyard/SM
+churlishness/SM
+churlish/YP
+churl/SM
+churner/M
+churning/M
+churn/SGZRDM
+chute/DSGM
+chutney/MS
+chutzpah/M
+chutzpahs
+chutzpa/SM
+Chuvash/M
+ch/VT
+chyme/SM
+Ci
+CIA
+ciao/S
+cicada/MS
+cicatrice/S
+cicatrix's
+Cicely/M
+Cicero/M
+cicerone/MS
+ciceroni
+Ciceronian
+Cicily/M
+CID
+cider's/C
+cider/SM
+Cid/M
+Ciel/M
+cigarette/MS
+cigarillo/MS
+cigar/SM
+cilantro/S
+cilia/M
+ciliate/FDS
+ciliately
+cilium/M
+Cilka/M
+cinch/MSDG
+cinchona/SM
+Cincinnati/M
+cincture/MGSD
+Cinda/M
+Cindee/M
+Cindelyn/M
+cinder/DMGS
+Cinderella/MS
+Cindie/M
+Cindi/M
+Cindra/M
+Cindy/M
+cine/M
+cinema/SM
+cinematic
+cinematographer/MS
+cinematographic
+cinematography/MS
+Cinerama/M
+cinnabar/MS
+Cinnamon/M
+cinnamon/MS
+ciphered/C
+cipher/MSGD
+ciphers/C
+cir
+circa
+circadian
+Circe/M
+circler/M
+circle/RSDGM
+circlet/MS
+circuital
+circuit/GSMD
+circuitousness/MS
+circuitous/YP
+circuitry/SM
+circuity/MS
+circulant
+circularity/SM
+circularize/GSD
+circularness/M
+circular/PSMY
+circulate/ASDNG
+circulation/MA
+circulations
+circulative
+circulatory
+circumcise/DRSXNG
+circumcised/U
+circumciser/M
+circumcision/M
+circumference/SM
+circumferential/Y
+circumflex/MSDG
+circumlocution/MS
+circumlocutory
+circumnavigate/DSNGX
+circumnavigational
+circumnavigation/M
+circumpolar
+circumscribe/GSD
+circumscription/SM
+circumspection/SM
+circumspect/Y
+circumsphere
+circumstance/SDMG
+circumstantial/YS
+circumvention/MS
+circumvent/SBGD
+circus/SM
+Cirillo/M
+Cirilo/M
+Ciro/M
+cirque/SM
+cirrhoses
+cirrhosis/M
+cirrhotic/S
+cirri/M
+cirrus/M
+Cissiee/M
+Cissy/M
+cistern/SM
+citadel/SM
+citations/I
+citation/SMA
+cit/DSG
+cite/ISDAG
+Citibank/M
+citified
+citizenry/SM
+citizenship/MS
+citizen/SYM
+citrate/DM
+citric
+Citroen/M
+citronella/MS
+citron/MS
+citrus/SM
+city/DSM
+cityscape/MS
+citywide
+civet/SM
+civic/S
+civics/M
+civilian/SM
+civility/IMS
+civilizational/MS
+civilization/AMS
+civilizedness/M
+civilized/PU
+civilize/DRSZG
+civilizer/M
+civilizes/AU
+civil/UY
+civvies
+ck/C
+clack/SDG
+cladding/SM
+clads
+clad/U
+Claiborne/M
+Claiborn/M
+claimable
+claimant/MS
+claim/CDRSKAEGZ
+claimed/U
+claimer/KMACE
+Claire/M
+Clair/M
+Clairol/M
+clairvoyance/MS
+clairvoyant/YS
+clambake/MS
+clamberer/M
+clamber/SDRZG
+clammed
+clammily
+clamminess/MS
+clamming
+clam/MS
+clammy/TPR
+clamorer/M
+clamor/GDRMSZ
+clamorousness/UM
+clamorous/PUY
+clampdown/SM
+clamper/M
+clamp/MRDGS
+clamshell/MS
+Clancy/M
+clandestineness/M
+clandestine/YP
+clanger/M
+clangor/MDSG
+clangorous/Y
+clang/SGZRD
+clanking/Y
+clank/SGDM
+clan/MS
+clannishness/SM
+clannish/PY
+clansman/M
+clansmen
+clapboard/SDGM
+Clapeyron/M
+clapped
+clapper/GMDS
+clapping
+clap/S
+Clapton/M
+claptrap/SM
+claque/MS
+Clarabelle/M
+Clara/M
+Clarance/M
+Clare/M
+Claremont/M
+Clarence/M
+Clarendon/M
+Claresta/M
+Clareta/M
+claret/MDGS
+Claretta/M
+Clarette/M
+Clarey/M
+Claribel/M
+Clarice/M
+Clarie/M
+clarification/M
+clarifier/M
+clarify/NGXDRS
+Clari/M
+Clarinda/M
+Clarine/M
+clarinetist/SM
+clarinet/SM
+clarinettist's
+clarion/GSMD
+Clarissa/M
+Clarisse/M
+Clarita/M
+clarities
+clarity/UM
+Clarke/M
+Clark/M
+Clarridge/M
+Clary/M
+clasher/M
+clash/RSDG
+clasped/M
+clasper/M
+clasp's
+clasp/UGSD
+classer/M
+class/GRSDM
+classical/Y
+classicism/SM
+classicist/SM
+classic/S
+classics/M
+classifiable/U
+classification/AMC
+classificatory
+classified/S
+classifier/SM
+classify/CNXASDG
+classiness/SM
+classless/P
+classmate/MS
+classroom/MS
+classwork/M
+classy/PRT
+clatterer/M
+clattering/Y
+clatter/SGDR
+clattery
+Claudelle/M
+Claudell/M
+Claude/M
+Claudetta/M
+Claudette/M
+Claudia/M
+Claudian/M
+Claudianus/M
+Claudie/M
+Claudina/M
+Claudine/M
+Claudio/M
+Claudius/M
+clausal
+clause/MS
+Clausen/M
+Clausewitz/M
+Clausius/M
+Claus/NM
+claustrophobia/SM
+claustrophobic
+clave/RM
+clave's/F
+clavichord/SM
+clavicle/MS
+clavier/MS
+clawer/M
+claw/GDRMS
+Clayborne/M
+Clayborn/M
+Claybourne/M
+clayey
+clayier
+clayiest
+Clay/M
+clay/MDGS
+claymore/MS
+Clayson/M
+Clayton/M
+Clea/M
+cleanable
+cleaner/MS
+cleaning/SM
+cleanliness/UMS
+cleanly/PRTU
+cleanness/MSU
+cleanse
+cleanser/M
+cleans/GDRSZ
+cleanup/MS
+clean/UYRDPT
+clearance/MS
+clearcut
+clearer/M
+clearheadedness/M
+clearheaded/PY
+clearinghouse/S
+clearing/MS
+clearly
+clearness/MS
+clears
+clear/UTRD
+Clearwater/M
+clearway/M
+cleat/MDSG
+cleavage/MS
+cleaver/M
+cleave/RSDGZ
+Cleavland/M
+clef/SM
+cleft/MDGS
+clematis/MS
+clemence
+Clemenceau/M
+Clemence/M
+clemency/ISM
+Clemente/M
+Clementia/M
+Clementina/M
+Clementine/M
+Clementius/M
+clement/IY
+Clement/MS
+clements
+Clemmie/M
+Clemmy/M
+Clemons
+Clemson/M
+Clem/XM
+clenches
+clenching
+clench/UD
+Cleo/M
+Cleon/M
+Cleopatra/M
+Clerc/M
+clerestory/MS
+clergyman/M
+clergymen
+clergy/MS
+clergywoman
+clergywomen
+clericalism/SM
+clerical/YS
+cleric/SM
+Clerissa/M
+clerk/SGYDM
+clerkship/MS
+Cletis
+Cletus/M
+Cleveland/M
+Cleve/M
+cleverness/SM
+clever/RYPT
+Clevey/M
+Clevie/M
+clevis/SM
+clew/DMGS
+cl/GJ
+Cliburn/M
+clichd
+clich/SM
+clicker/M
+click/GZSRDM
+clientle/SM
+client/SM
+cliffhanger/MS
+cliffhanging
+Cliff/M
+Clifford/M
+cliff/SM
+Clifton/M
+climacteric/SM
+climactic
+climate/MS
+climatic
+climatically
+climatological/Y
+climatologist/SM
+climatology/MS
+climax/MDSG
+climbable/U
+climb/BGZSJRD
+climbdown
+climbed/U
+climber/M
+clime/SM
+Clim/M
+clinch/DRSZG
+clincher/M
+clinching/Y
+Cline/M
+clinger/MS
+clinging
+cling/U
+clingy/TR
+clinical/Y
+clinician/MS
+clinic/MS
+clinker/GMD
+clink/RDGSZ
+clinometer/MIS
+Clint/M
+Clinton/M
+Clio/M
+cliometrician/S
+cliometric/S
+clipboard/SM
+clipped/U
+clipper/MS
+clipping/SM
+clip/SM
+clique/SDGM
+cliquey
+cliquier
+cliquiest
+cliquishness/SM
+cliquish/YP
+clitoral
+clitorides
+clitoris/MS
+Clive/M
+cloacae
+cloaca/M
+cloakroom/MS
+cloak's
+cloak/USDG
+clobber/DGS
+cloche/MS
+clocker/M
+clockmaker/M
+clock/SGZRDMJ
+clockwatcher
+clockwise
+clockwork/MS
+clodded
+clodding
+cloddishness/M
+cloddish/P
+clodhopper/SM
+clod/MS
+Cloe/M
+clogged/U
+clogging/U
+clog's
+clog/US
+cloisonn
+cloisonnes
+cloister/MDGS
+cloistral
+Clo/M
+clomp/MDSG
+clonal
+clone/DSRGMZ
+clonk/SGD
+clopped
+clopping
+clop/S
+Cloris/M
+closed/U
+close/EDSRG
+closefisted
+closely
+closemouthed
+closeness/MS
+closeout/MS
+closer/EM
+closers
+closest
+closet/MDSG
+closeup/S
+closing/S
+closured
+closure/EMS
+closure's/I
+closuring
+clothbound
+clothesbrush
+clotheshorse/MS
+clothesline/SDGM
+clothesman
+clothesmen
+clothespin/MS
+clothe/UDSG
+cloth/GJMSD
+clothier/MS
+clothing/M
+Clotho/M
+cloths
+Clotilda/M
+clot/MS
+clotted
+clotting
+cloture/MDSG
+cloudburst/MS
+clouded/U
+cloudiness/SM
+cloudlessness/M
+cloudless/YP
+cloudscape/SM
+cloud/SGMD
+cloudy/TPR
+clout/GSMD
+cloven
+cloverleaf/MS
+clover/M
+clove/SRMZ
+Clovis/M
+clown/DMSG
+clownishness/SM
+clownish/PY
+cloy/DSG
+cloying/Y
+clubbed/M
+clubbing/M
+clubfeet
+clubfoot/DM
+clubhouse/SM
+club/MS
+clubroom/SM
+cluck/GSDM
+clueless
+clue/MGDS
+Cluj/M
+clump/MDGS
+clumpy/RT
+clumsily
+clumsiness/MS
+clumsy/PRT
+clung
+clunk/SGZRDM
+clunky/PRYT
+clustered/AU
+clusters/A
+cluster/SGJMD
+clutch/DSG
+cluttered/U
+clutter/GSD
+Cl/VM
+Clyde/M
+Clydesdale/M
+Cly/M
+Clytemnestra/M
+Clyve/M
+Clywd/M
+cm
+Cm/M
+CMOS
+cnidarian/MS
+CNN
+CNS
+CO
+coacher/M
+coachman/M
+coachmen
+coach/MSRDG
+coachwork/M
+coadjutor/MS
+coagulable
+coagulant/SM
+coagulate/GNXSD
+coagulation/M
+coagulator/S
+coaler/M
+coalesce/GDS
+coalescence/SM
+coalescent
+coalface/SM
+coalfield/MS
+coalitionist/SM
+coalition/MS
+coal/MDRGS
+coalminers
+coarseness/SM
+coarsen/SGD
+coarse/TYRP
+coastal
+coaster/M
+coastguard/MS
+coastline/SM
+coast/SMRDGZ
+coated/U
+Coates/M
+coating/M
+coat/MDRGZJS
+coattail/S
+coattest
+coauthor/MDGS
+coaxer/M
+coax/GZDSR
+coaxial/Y
+coaxing/Y
+Cobain/M
+cobalt/MS
+cobbed
+Cobbie/M
+cobbing
+cobbler/M
+cobble/SRDGMZ
+cobblestone/MSD
+Cobb/M
+Cobby/M
+coble/M
+Cob/M
+COBOL
+Cobol/M
+cobra/MS
+cob/SM
+cobwebbed
+cobwebbing
+cobwebby/RT
+cobweb/SM
+cocaine/MS
+coca/MS
+cocci/MS
+coccus/M
+coccyges
+coccyx/M
+Cochabamba/M
+cochineal/SM
+Cochin/M
+Cochise/M
+cochleae
+cochlear
+cochlea/SM
+Cochran/M
+cockade/SM
+cockamamie
+cockatoo/SM
+cockatrice/MS
+cockcrow/MS
+cockerel/MS
+cocker/M
+cockeye/DM
+cockeyed/PY
+cockfighting/M
+cockfight/MJSG
+cock/GDRMS
+cockily
+cockiness/MS
+cocklebur/M
+cockle/SDGM
+cockleshell/SM
+Cockney
+cockney/MS
+cockpit/MS
+cockroach/SM
+cockscomb/SM
+cockshies
+cocksucker/S!
+cocksure
+cocktail/GDMS
+cocky/RPT
+cocoa/SM
+coco/MS
+coconut/SM
+cocoon/GDMS
+Cocteau/M
+COD
+coda/SM
+codded
+codding
+coddle/GSRD
+coddler/M
+codebook/S
+codebreak/R
+coded/UA
+Codee/M
+codeine/MS
+codename/D
+codependency/S
+codependent/S
+coder/CM
+code's
+co/DES
+codes/A
+code/SCZGJRD
+codetermine/S
+codeword/SM
+codex/M
+codfish/SM
+codger/MS
+codices/M
+codicil/SM
+Codie/M
+codification/M
+codifier/M
+codify/NZXGRSD
+Codi/M
+coding/M
+codling/M
+Cod/M
+cod/MDRSZGJ
+codpiece/MS
+Cody/M
+coedited
+coediting
+coeditor/MS
+coedits
+coed/SM
+coeducational
+coeducation/SM
+coefficient/SYM
+coelenterate/MS
+coequal/SY
+coercer/M
+coerce/SRDXVGNZ
+coercible/I
+coercion/M
+coerciveness/M
+coercive/PY
+coeval/YS
+coexistence/MS
+coexistent
+coexist/GDS
+coextensive/Y
+cofactor/MS
+coffeecake/SM
+coffeecup
+coffeehouse/SM
+coffeemaker/S
+coffeepot/MS
+coffee/SM
+cofferdam/SM
+coffer/DMSG
+Coffey/M
+coffin/DMGS
+Coffman/M
+cogency/MS
+cogent/Y
+cogged
+cogging
+cogitate/DSXNGV
+cogitation/M
+cogitator/MS
+cog/MS
+Cognac/M
+cognac/SM
+cognate/SXYN
+cognation/M
+cognitional
+cognition/SAM
+cognitive/SY
+cognizable
+cognizance/MAI
+cognizances/A
+cognizant/I
+cognomen/SM
+cognoscente
+cognoscenti
+cogwheel/SM
+cohabitant/MS
+cohabitational
+cohabitation/SM
+cohabit/SDG
+Cohan/M
+coheir/MS
+Cohen/M
+cohere/GSRD
+coherence/SIM
+coherencies
+coherency/I
+coherent/IY
+coherer/M
+cohesion/MS
+cohesiveness/SM
+cohesive/PY
+Cohn/M
+cohoes
+coho/MS
+cohort/SM
+coiffed
+coiffing
+coiffure/MGSD
+coif/SM
+coil/UGSAD
+Coimbatore/M
+coinage's/A
+coinage/SM
+coincide/GSD
+coincidence/MS
+coincidental/Y
+coincident/Y
+coined/U
+coiner/M
+coin/GZSDRM
+coinsurance/SM
+Cointon/M
+cointreau
+coital/Y
+coitus/SM
+coke/MGDS
+Coke/MS
+COL
+COLA
+colander/SM
+Colan/M
+Colas
+cola/SM
+colatitude/MS
+Colbert/M
+Colby/M
+coldblooded
+coldish
+coldness/MS
+cold/YRPST
+Coleen/M
+Cole/M
+Coleman/M
+Colene/M
+Coleridge/M
+coleslaw/SM
+Colet/M
+Coletta/M
+Colette/M
+coleus/SM
+Colfax/M
+Colgate/M
+colicky
+colic/SM
+coliform
+Colin/M
+coliseum/SM
+colitis/MS
+collaborate/VGNXSD
+collaboration/M
+collaborative/SY
+collaborator/SM
+collage/MGSD
+collagen/M
+collapse/SDG
+collapsibility/M
+collapsible
+collarbone/MS
+collar/DMGS
+collard/SM
+collarless
+collated/U
+collateral/SYM
+collate/SDVNGX
+collation/M
+collator/MS
+colleague/SDGM
+collectedness/M
+collected/PY
+collectible/S
+collection/AMS
+collective/SY
+collectivism/SM
+collectivist/MS
+collectivity/MS
+collectivization/MS
+collectivize/DSG
+collector/MS
+collect/SAGD
+Colleen/M
+colleen/SM
+college/SM
+collegiality/S
+collegian/SM
+collegiate/Y
+Collen/M
+Collete/M
+Collette/M
+coll/G
+collide/SDG
+Collie/M
+collie/MZSRD
+collier/M
+Collier/M
+colliery/MS
+collimate/C
+collimated/U
+collimates
+collimating
+collimation/M
+collimator/M
+collinear
+collinearity/M
+Colline/M
+Collin/MS
+collisional
+collision/SM
+collocate/XSDGN
+collocation/M
+colloidal/Y
+colloid/MS
+colloq
+colloquialism/MS
+colloquial/SY
+colloquies
+colloquium/SM
+colloquy/M
+collude/SDG
+collusion/SM
+collusive
+collying
+Colly/RM
+Colman/M
+Col/MY
+Cologne/M
+cologne/MSD
+Colo/M
+Colombia/M
+Colombian/S
+Colombo/M
+colonelcy/MS
+colonel/MS
+colonialism/MS
+colonialist/MS
+colonial/SPY
+colonist/SM
+colonization/ACSM
+colonize/ACSDG
+colonized/U
+colonizer/MS
+colonizes/U
+Colon/M
+colonnade/MSD
+colon/SM
+colony/SM
+colophon/SM
+Coloradan/S
+Coloradoan/S
+Colorado/M
+colorant/SM
+coloration/EMS
+coloratura/SM
+colorblindness/S
+colorblind/P
+colored/USE
+colorer/M
+colorfastness/SM
+colorfast/P
+colorfulness/MS
+colorful/PY
+colorimeter/SM
+colorimetry
+coloring/M
+colorization/S
+colorize/GSD
+colorizing/C
+colorlessness/SM
+colorless/PY
+colors/EA
+color/SRDMGZJ
+colossal/Y
+Colosseum/M
+colossi
+colossus/M
+colostomy/SM
+colostrum/SM
+col/SD
+colter/M
+coltishness/M
+coltish/PY
+Colt/M
+colt/MRS
+Coltrane/M
+Columbia/M
+Columbian
+Columbine/M
+columbine/SM
+Columbus/M
+columnar
+columnist/MS
+columnize/GSD
+column/SDM
+Colver/M
+Co/M
+comae
+comaker/SM
+Comanche/MS
+coma/SM
+comatose
+combatant/SM
+combativeness/MS
+combative/PY
+combat/SVGMD
+combed/U
+comber/M
+combinational/A
+combination/ASM
+combinatorial/Y
+combinatoric/S
+combinator/SM
+combined/AU
+combiner/M
+combines/A
+combine/ZGBRSD
+combining/A
+combo/MS
+comb/SGZDRMJ
+Combs/M
+combusted
+combustibility/SM
+combustible/SI
+combustion/MS
+combustive
+Comdex/M
+Comdr/M
+comeback/SM
+comedian/SM
+comedic
+comedienne/SM
+comedown/MS
+comedy/SM
+come/IZSRGJ
+comeliness/SM
+comely/TPR
+comer/IM
+comes/M
+comestible/MS
+cometary
+cometh
+comet/SM
+comeuppance/SM
+comfit's
+comfit/SE
+comfortability/S
+comfortableness/MS
+comfortable/U
+comfortably/U
+comforted/U
+comforter/MS
+comfort/ESMDG
+comforting/YE
+comfy/RT
+comicality/MS
+comical/Y
+comic/MS
+Cominform/M
+comity/SM
+com/LJRTZG
+comm
+Com/M
+comma/MS
+commandant/MS
+commandeer/SDG
+commander/M
+commanding/Y
+commandment/SM
+commando/SM
+command/SZRDMGL
+commemorate/SDVNGX
+commemoration/M
+commemorative/YS
+commemorator/S
+commence/ALDSG
+commencement/AMS
+commencer/M
+commendably
+commendation/ASM
+commendatory/A
+commender/AM
+commend/GSADRB
+commensurable/I
+commensurate/IY
+commensurates
+commensuration/SM
+commentary/MS
+commentate/GSD
+commentator/SM
+commenter/M
+comment's
+comment/SUGD
+commerce/MGSD
+commercialism/MS
+commercialization/SM
+commercialize/GSD
+commercial/PYS
+Commie
+commie/SM
+commingle/GSD
+commiserate/VGNXSD
+commiseration/M
+commissariat/MS
+commissar/MS
+commissary/MS
+commission/ASCGD
+commissioner/SM
+commission's/A
+commitment/SM
+commit/SA
+committable
+committal/MA
+committals
+committed/UA
+committeeman/M
+committeemen
+committee/MS
+committeewoman/M
+committeewomen
+committing/A
+commode/MS
+commodes/IE
+commodiousness/MI
+commodious/YIP
+commodity/MS
+commodore/SM
+commonality/MS
+commonalty/MS
+commoner/MS
+commonness/MSU
+commonplaceness/M
+commonplace/SP
+common/RYUPT
+commonsense
+commons/M
+Commons/M
+commonweal/SHM
+commonwealth/M
+Commonwealth/M
+commonwealths
+Commonwealths
+commotion/MS
+communality/M
+communal/Y
+commune/XSDNG
+communicability/MS
+communicable/IU
+communicably
+communicant/MS
+communicate/VNGXSD
+communicational
+communication/M
+communicativeness/M
+communicative/PY
+communicator/SM
+communion/M
+Communion/SM
+communique/S
+communism/MS
+Communism/S
+communistic
+communist/MS
+Communist/S
+communitarian/M
+community/MS
+communize/SDG
+commutable/I
+commutate/XVGNSD
+commutation/M
+commutative/Y
+commutativity
+commutator/MS
+commute/BZGRSD
+commuter/M
+Comoros
+compaction/M
+compactness/MS
+compactor/MS
+compact/TZGSPRDY
+companionableness/M
+companionable/P
+companionably
+companion/GBSMD
+companionship/MS
+companionway/MS
+company/MSDG
+Compaq/M
+comparabilities
+comparability/IM
+comparableness/M
+comparable/P
+comparably/I
+comparativeness/M
+comparative/PYS
+comparator/SM
+compare/GRSDB
+comparer/M
+comparison/MS
+compartmental
+compartmentalization/SM
+compartmentalize/DSG
+compartment/SDMG
+compassionateness/M
+compassionate/PSDGY
+compassion/MS
+compass/MSDG
+compatibility/IMS
+compatibleness/M
+compatible/SI
+compatibly/I
+compatriot/SM
+compeer/DSGM
+compellable
+compelled
+compelling/YM
+compel/S
+compendious
+compendium/MS
+compensable
+compensated/U
+compensate/XVNGSD
+compensation/M
+compensator/M
+compensatory
+compete/GSD
+competence/ISM
+competency/IS
+competency's
+competent/IY
+competition/SM
+competitiveness/SM
+competitive/YP
+competitor/MS
+comp/GSYD
+compilable/U
+compilation/SAM
+compile/ASDCG
+compiler/CS
+compiler's
+complacence/S
+complacency/SM
+complacent/Y
+complainant/MS
+complainer/M
+complain/GZRDS
+complaining/YU
+complaint/MS
+complaisance/SM
+complaisant/Y
+complected
+complementariness/M
+complementarity
+complementary/SP
+complementation/M
+complementer/M
+complement/ZSMRDG
+complete/BTYVNGPRSDX
+completed/U
+completely/I
+completeness/ISM
+completer/M
+completion/MI
+complexional
+complexion/DMS
+complexity/MS
+complexness/M
+complex/TGPRSDY
+compliance/SM
+compliant/Y
+complicatedness/M
+complicated/YP
+complicate/SDG
+complication/M
+complicator/SM
+complicit
+complicity/MS
+complier/M
+complimentary/U
+complimenter/M
+compliment/ZSMRDG
+comply/ZXRSDNG
+component/SM
+comport/GLSD
+comportment/SM
+compose/CGASDE
+composedness/M
+composed/PY
+composer/CM
+composers
+composite/YSDXNG
+compositional/Y
+composition/CMA
+compositions/C
+compositor/MS
+compost/DMGS
+composure/ESM
+compote/MS
+compounded/U
+compounder/M
+compound/RDMBGS
+comprehend/DGS
+comprehending/U
+comprehensibility/SIM
+comprehensibleness/IM
+comprehensible/PI
+comprehensibly/I
+comprehension/IMS
+comprehensiveness/SM
+comprehensive/YPS
+compressed/Y
+compressibility/IM
+compressible/I
+compressional
+compression/CSM
+compressive/Y
+compressor/MS
+compress/SDUGC
+comprise/GSD
+compromiser/M
+compromise/SRDGMZ
+compromising/UY
+Compton/M
+comptroller/SM
+compulsion/SM
+compulsiveness/MS
+compulsive/PYS
+compulsivity
+compulsorily
+compulsory/S
+compunction/MS
+Compuserve/M
+CompuServe/M
+computability/M
+computable/UI
+computably
+computational/Y
+computation/SM
+computed/A
+computerese
+computerization/MS
+computerize/SDG
+computer/M
+compute/RSDZBG
+computes/A
+computing/A
+comradely/P
+comradeship/MS
+comrade/YMS
+Comte/M
+Conakry/M
+Conan/M
+Conant/M
+concatenate/XSDG
+concaveness/MS
+concave/YP
+conceal/BSZGRDL
+concealed/U
+concealer/M
+concealing/Y
+concealment/MS
+conceded/Y
+conceitedness/SM
+conceited/YP
+conceit/SGDM
+conceivable/IU
+conceivably/I
+conceive/BGRSD
+conceiver/M
+concentrate/VNGSDX
+concentration/M
+concentrator/MS
+concentrically
+Concepcin/M
+conceptional
+conception/MS
+concept/SVM
+conceptuality/M
+conceptualization/A
+conceptualizations
+conceptualization's
+conceptualize/DRSG
+conceptualizing/A
+conceptual/Y
+concerned/YU
+concern/USGD
+concerted/PY
+concert/EDSG
+concertina/MDGS
+concertize/GDS
+concertmaster/MS
+concerto/SM
+concert's
+concessionaire/SM
+concessional
+concessionary
+concession/R
+Concetta/M
+Concettina/M
+Conchita/M
+conch/MDG
+conchs
+concierge/SM
+conciliar
+conciliate/GNVX
+conciliation/ASM
+conciliator/MS
+conciliatory/A
+conciseness/SM
+concise/TYRNPX
+concision/M
+conclave/S
+concluder/M
+conclude/RSDG
+conclusion/SM
+conclusive/IPY
+conclusiveness/ISM
+concocter/M
+concoction/SM
+concoct/RDVGS
+concomitant/YS
+concordance/MS
+concordant/Y
+concordat/SM
+Concorde/M
+Concordia/M
+Concord/MS
+concourse
+concreteness/MS
+concrete/NGXRSDPYM
+concretion/M
+concubinage/SM
+concubine/SM
+concupiscence/SM
+concupiscent
+concurrence/MS
+concur/S
+concussion/MS
+concuss/VD
+condemnate/XN
+condemnation/M
+condemnatory
+condemner/M
+condemn/ZSGRDB
+condensate/NMXS
+condensation/M
+condenser/M
+condense/ZGSD
+condensible
+condescend
+condescending/Y
+condescension/MS
+condign
+condiment/SM
+condition/AGSJD
+conditionals
+conditional/UY
+conditioned/U
+conditioner/MS
+conditioning/M
+condition's
+condole
+condolence/MS
+condominium/MS
+condom/SM
+condone/GRSD
+condoner/M
+Condorcet/M
+condor/MS
+condo/SM
+conduce/VGSD
+conduciveness/M
+conducive/P
+conductance/SM
+conductibility/SM
+conductible
+conduction/MS
+conductive/Y
+conductivity/MS
+conductor/MS
+conductress/MS
+conduct/V
+conduit/MS
+coneflower/M
+Conestoga
+coney's
+confabbed
+confabbing
+confab/MS
+confabulate/XSDGN
+confabulation/M
+confectioner/M
+confectionery/SM
+confectionist
+confection/RDMGZS
+confect/S
+Confederacy/M
+confederacy/MS
+confederate/M
+Confederate/S
+conferee/MS
+conference/DSGM
+conferrable
+conferral/SM
+conferred
+conferrer/SM
+conferring
+confer/SB
+confessed/Y
+confessional/SY
+confession/MS
+confessor/SM
+confetti/M
+confidante/SM
+confidant/SM
+confidence/SM
+confidentiality/MS
+confidentialness/M
+confidential/PY
+confident/Y
+confider/M
+confide/ZGRSD
+confiding/PY
+configuration/ASM
+configure/AGSDB
+confined/U
+confine/L
+confinement/MS
+confiner/M
+confirm/AGDS
+confirmation/ASM
+confirmatory
+confirmedness/M
+confirmed/YP
+confiscate/DSGNX
+confiscation/M
+confiscator/MS
+confiscatory
+conflagration/MS
+conflate/NGSDX
+conflation/M
+conflicting/Y
+conflict/SVGDM
+confluence/MS
+conformable/U
+conformal
+conformance/SM
+conformational/Y
+conform/B
+conformer/M
+conformism/SM
+conformist/SM
+conformities
+conformity/MUI
+confounded/Y
+confound/R
+confrre/MS
+confrontational
+confrontation/SM
+confronter/M
+confront/Z
+Confucianism/SM
+Confucian/S
+Confucius/M
+confusedness/M
+confused/PY
+confuse/RBZ
+confusing/Y
+confutation/MS
+confute/GRSD
+confuter/M
+conga/MDG
+congeal/GSDL
+congealment/MS
+congeniality/UM
+congenial/U
+congeries/M
+conger/SM
+congestion/MS
+congest/VGSD
+conglomerate/XDSNGVM
+conglomeration/M
+Cong/M
+Congolese
+Congo/M
+congrats
+congratulate/NGXSD
+congratulation/M
+congratulatory
+congregate/DSXGN
+congregational
+Congregational
+congregationalism/MS
+congregationalist/MS
+Congregationalist/S
+congregation/M
+congressional/Y
+congressman/M
+congressmen
+Congress/MS
+congress/MSDG
+congresspeople
+congressperson/S
+congresswoman/M
+congresswomen
+Congreve/M
+congruence/IM
+congruences
+congruency/M
+congruential
+congruent/YI
+congruity/MSI
+congruousness/IM
+congruous/YIP
+conicalness/M
+conical/PSY
+conic/S
+conics/M
+conifer/MS
+coniferous
+conjectural/Y
+conjecture/GMDRS
+conjecturer/M
+conjoint
+conjugacy
+conjugal/Y
+conjugate/XVNGYSDP
+conjugation/M
+conjunct/DSV
+conjunctiva/MS
+conjunctive/YS
+conjunctivitis/SM
+conjuration/MS
+conjurer/M
+conjure/RSDZG
+conjuring/M
+conker/M
+conk/ZDR
+Conley/M
+Con/M
+conman
+connect/ADGES
+connectedly/E
+connectedness/ME
+connected/U
+connectible
+Connecticut/M
+connection/AME
+connectionless
+connections/E
+connective/SYM
+connectivity/MS
+connector/MS
+Connelly/M
+Conner/M
+Connery/M
+connexion/MS
+Conney/M
+conn/GVDR
+Connie/M
+Conni/M
+conniption/MS
+connivance/MS
+conniver/M
+connive/ZGRSD
+connoisseur/MS
+Connor/SM
+connotative/Y
+Conn/RM
+connubial/Y
+Conny/M
+conquerable/U
+conquered/AU
+conqueror/MS
+conquer/RDSBZG
+conquers/A
+conquest/ASM
+conquistador/MS
+Conrade/M
+Conrad/M
+Conrado/M
+Conrail/M
+Conroy/M
+Consalve/M
+consanguineous/Y
+consanguinity/SM
+conscienceless
+conscientiousness/MS
+conscientious/YP
+conscionable/U
+consciousness/MUS
+conscious/UYSP
+conscription/SM
+consecrated/AU
+consecrates/A
+consecrate/XDSNGV
+consecrating/A
+consecration/AMS
+consecutiveness/M
+consecutive/YP
+consensus/SM
+consenter/M
+consenting/Y
+consent/SZGRD
+consequence
+consequentiality/S
+consequential/IY
+consequentialness/M
+consequently/I
+consequent/PSY
+conservancy/SM
+conservationism
+conservationist/SM
+conservation/SM
+conservatism/SM
+conservativeness/M
+Conservative/S
+conservative/SYP
+conservator/MS
+conservatory/MS
+con/SGM
+considerable/I
+considerables
+considerably/I
+considerateness/MSI
+considerate/XIPNY
+consideration/ASMI
+considered/U
+considerer/M
+consider/GASD
+considering/S
+consign/ASGD
+consignee/SM
+consignment/SM
+consist/DSG
+consistence/S
+consistency/IMS
+consistent/IY
+consistory/MS
+consolable/I
+Consolata/M
+consolation/MS
+consolation's/E
+consolatory
+consoled/U
+consoler/M
+console/ZBG
+consolidated/AU
+consolidate/NGDSX
+consolidates/A
+consolidation/M
+consolidator/SM
+consoling/Y
+consomm/S
+consonance/IM
+consonances
+consonantal
+consonant/MYS
+consortia
+consortium/M
+conspectus/MS
+conspicuousness/IMS
+conspicuous/YIP
+conspiracy/MS
+conspiratorial/Y
+conspirator/SM
+constable
+Constable/M
+constabulary/MS
+constance
+Constance/M
+Constancia/M
+constancy/IMS
+Constancy/M
+Constanta/M
+Constantia/M
+Constantina/M
+Constantine/M
+Constantin/M
+Constantino/M
+Constantinople/M
+constant/IY
+constants
+constellation/SM
+consternate/XNGSD
+consternation/M
+constipate/XDSNG
+constipation/M
+constituency/MS
+constituent/SYM
+constituted/A
+constitute/NGVXDS
+constitutes/A
+constituting/A
+Constitution
+constitutionality's
+constitutionality/US
+constitutionally/U
+constitutional/SY
+constitution/AMS
+constitutive/Y
+constrain
+constrainedly
+constrained/U
+constraint/MS
+constriction/MS
+constrictor/MS
+constrict/SDGV
+construable
+construct/ASDGV
+constructibility
+constructible/A
+constructional/Y
+constructionist/MS
+construction/MAS
+constructions/C
+constructiveness/SM
+constructive/YP
+constructor/MS
+construe/GSD
+Consuela/M
+Consuelo/M
+consular/S
+consulate/MS
+consul/KMS
+consulship/MS
+consultancy/S
+consultant/MS
+consultation/SM
+consultative
+consulted/A
+consulter/M
+consult/RDVGS
+consumable/S
+consumed/Y
+consume/JZGSDB
+consumerism/MS
+consumerist/S
+consumer/M
+consuming/Y
+consummate/DSGVY
+consummated/U
+consumption/SM
+consumptive/YS
+cont
+contact/BGD
+contacted/A
+contact's/A
+contacts/A
+contagion/SM
+contagiousness/MS
+contagious/YP
+containerization/SM
+containerize/GSD
+container/M
+containment/SM
+contain/SLZGBRD
+contaminant/SM
+contaminated/AU
+contaminates/A
+contaminate/SDCXNG
+contaminating/A
+contamination/CM
+contaminative
+contaminator/MS
+contd
+cont'd
+contemn/SGD
+contemplate/DVNGX
+contemplation/M
+contemplativeness/M
+contemplative/PSY
+contemporaneity/MS
+contemporaneousness/M
+contemporaneous/PY
+contemptibleness/M
+contemptible/P
+contemptibly
+contempt/M
+contemptuousness/SM
+contemptuous/PY
+contentedly/E
+contentedness/SM
+contented/YP
+content/EMDLSG
+contention/MS
+contentiousness/SM
+contentious/PY
+contently
+contentment/ES
+contentment's
+conterminous/Y
+contestable/I
+contestant/SM
+contested/U
+contextualize/GDS
+contiguity/MS
+contiguousness/M
+contiguous/YP
+continence/ISM
+Continental/S
+continental/SY
+continent/IY
+Continent/M
+continents
+continent's
+contingency/SM
+contingent/SMY
+continua
+continuable
+continual/Y
+continuance/ESM
+continuant/M
+continuation/ESM
+continue/ESDG
+continuer/M
+continuity/SEM
+continuousness/M
+continuous/YE
+continuum/M
+contortionist/SM
+contortion/MS
+contort/VGD
+contour
+contraband/SM
+contrabass/M
+contraception/SM
+contraceptive/S
+contract/DG
+contractible
+contractile
+contractual/Y
+contradict/GDS
+contradiction/MS
+contradictorily
+contradictoriness/M
+contradictory/PS
+contradistinction/MS
+contraflow/S
+contrail/M
+contraindicate/SDVNGX
+contraindication/M
+contralto/SM
+contrapositive/S
+contraption/MS
+contrapuntal/Y
+contrariety/MS
+contrarily
+contrariness/MS
+contrariwise
+contrary/PS
+contra/S
+contrasting/Y
+contrastive/Y
+contrast/SRDVGZ
+contravene/GSRD
+contravener/M
+contravention/MS
+Contreras/M
+contretemps/M
+contribute/XVNZRD
+contribution/M
+contributive/Y
+contributorily
+contributor/SM
+contributory/S
+contriteness/M
+contrite/NXP
+contrition/M
+contrivance/SM
+contriver/M
+contrive/ZGRSD
+control/CS
+controllability/M
+controllable/IU
+controllably/U
+controlled/CU
+controller/SM
+controlling/C
+control's
+controversialists
+controversial/UY
+controversy/MS
+controvert/DGS
+controvertible/I
+contumacious/Y
+contumacy/MS
+contumelious
+contumely/MS
+contuse/NGXSD
+contusion/M
+conundrum/SM
+conurbation/MS
+convalesce/GDS
+convalescence/SM
+convalescent/S
+convect/DSVG
+convectional
+convection/MS
+convector
+convene/ASDG
+convener/MS
+convenience/ISM
+convenient/IY
+conventicle/SM
+conventionalism/M
+conventionalist/M
+conventionality/SUM
+conventionalize/GDS
+conventional/UY
+convention/MA
+conventions
+convergence/MS
+convergent
+conversant/Y
+conversationalist/SM
+conversational/Y
+conversation/SM
+conversazione/M
+converse/Y
+conversion/AM
+conversioning
+converted/U
+converter/MS
+convert/GADS
+convertibility's/I
+convertibility/SM
+convertibleness/M
+convertible/PS
+convexity/MS
+convex/Y
+conveyance/DRSGMZ
+conveyancer/M
+conveyancing/M
+convey/BDGS
+conveyor/MS
+conviction/MS
+convict/SVGD
+convinced/U
+convincer/M
+convince/RSDZG
+convincingness/M
+convincing/PUY
+conviviality/MS
+convivial/Y
+convoke/GSD
+convolute/XDNY
+convolution/M
+convolve/C
+convolved
+convolves
+convolving
+convoy/GMDS
+convulse/SDXVNG
+convulsion/M
+convulsiveness/M
+convulsive/YP
+Conway/M
+cony/SM
+coo/GSD
+cookbook/SM
+cooked/AU
+Cooke/M
+cooker/M
+cookery/MS
+cook/GZDRMJS
+Cookie/M
+cookie/SM
+cooking/M
+Cook/M
+cookout/SM
+cooks/A
+cookware/SM
+cooky's
+coolant/SM
+cooled/U
+cooler/M
+Cooley/M
+coolheaded
+Coolidge/M
+coolie/MS
+coolness/MS
+cool/YDRPJGZTS
+coon/MS!
+coonskin/MS
+cooperage/MS
+cooperate/VNGXSD
+cooperation/M
+cooperativeness/SM
+cooperative/PSY
+cooperator/MS
+cooper/GDM
+Cooper/M
+coop/MDRGZS
+Coop/MR
+coordinated/U
+coordinateness/M
+coordinate/XNGVYPDS
+coordination/M
+coordinator/MS
+Coors/M
+cootie/SM
+coot/MS
+copay/S
+Copeland/M
+Copenhagen/M
+coper/M
+Copernican
+Copernicus/M
+cope/S
+copied/A
+copier/M
+copies/A
+copilot/SM
+coping/M
+copiousness/SM
+copious/YP
+coplanar
+Copland/M
+Copley/M
+copolymer/MS
+copora
+copped
+Copperfield/M
+copperhead/MS
+copper/MSGD
+copperplate/MS
+coppersmith/M
+coppersmiths
+coppery
+coppice's
+copping
+Coppola/M
+copra/MS
+coprolite/M
+coprophagous
+copse/M
+cops/GDS
+cop/SJMDRG
+copter/SM
+Coptic/M
+copula/MS
+copulate/XDSNGV
+copulation/M
+copulative/S
+copybook/MS
+copycat/SM
+copycatted
+copycatting
+copyist/SM
+copy/MZBDSRG
+copyrighter/M
+copyright/MSRDGZ
+copywriter/MS
+coquetry/MS
+coquette/DSMG
+coquettish/Y
+Corabella/M
+Corabelle/M
+Corabel/M
+coracle/SM
+Coralie/M
+Coraline/M
+coralline
+Coral/M
+coral/SM
+Coralyn/M
+Cora/M
+corbel/GMDJS
+Corbet/M
+Corbett/M
+Corbie/M
+Corbin/M
+Corby/M
+cordage/MS
+corded/AE
+Cordelia/M
+Cordelie/M
+Cordell/M
+corder/AM
+Cordey/M
+cord/FSAEM
+cordiality/MS
+cordialness/M
+cordial/PYS
+Cordie/M
+cordillera/MS
+Cordilleras
+Cordi/M
+cording/MA
+cordite/MS
+cordless
+Cord/M
+Cordoba
+cordon/DMSG
+cordovan/SM
+Cordula/M
+corduroy/GDMS
+Cordy/M
+cored/A
+Coreen/M
+Corella/M
+core/MZGDRS
+Corenda/M
+Corene/M
+corer/M
+corespondent/MS
+Coretta/M
+Corette/M
+Corey/M
+Corfu/M
+corgi/MS
+coriander/SM
+Corie/M
+Corilla/M
+Cori/M
+Corina/M
+Corine/M
+coring/M
+Corinna/M
+Corinne/M
+Corinthian/S
+Corinthians/M
+Corinth/M
+Coriolanus/M
+Coriolis/M
+Corissa/M
+Coriss/M
+corked/U
+corker/M
+cork/GZDRMS
+Cork/M
+corkscrew/DMGS
+corks/U
+Corliss/M
+Corly/M
+Cormack/M
+corm/MS
+cormorant/MS
+Cornall/M
+cornball/SM
+cornbread/S
+corncob/SM
+corncrake/M
+corneal
+cornea/SM
+Corneille/M
+Cornela/M
+Cornelia/M
+Cornelius/M
+Cornelle/M
+Cornell/M
+corner/GDM
+cornerstone/MS
+cornet/SM
+Corney/M
+cornfield/SM
+cornflake/S
+cornflour/M
+cornflower/SM
+corn/GZDRMS
+cornice/GSDM
+Cornie/M
+cornily
+corniness/S
+Cornish/S
+cornmeal/S
+cornrow/GDS
+cornstalk/MS
+cornstarch/SM
+cornucopia/MS
+Cornwallis/M
+Cornwall/M
+Corny/M
+corny/RPT
+corolla/MS
+corollary/SM
+Coronado/M
+coronal/MS
+coronary/S
+corona/SM
+coronate/NX
+coronation/M
+coroner/MS
+coronet/DMS
+Corot/M
+coroutine/SM
+Corp
+corporal/SYM
+corpora/MS
+corporate/INVXS
+corporately
+corporation/MI
+corporatism/M
+corporatist
+corporeality/MS
+corporeal/IY
+corporealness/M
+corp/S
+corpse/M
+corpsman/M
+corpsmen
+corps/SM
+corpulence/MS
+corpulentness/S
+corpulent/YP
+corpuscle/SM
+corpuscular
+corpus/M
+corr
+corralled
+corralling
+corral/MS
+correctable/U
+correct/BPSDRYTGV
+corrected/U
+correctional
+correction/MS
+corrective/YPS
+correctly/I
+correctness/MSI
+corrector/MS
+Correggio/M
+correlated/U
+correlate/SDXVNG
+correlation/M
+correlative/YS
+Correna/M
+correspond/DSG
+correspondence/MS
+correspondent/SM
+corresponding/Y
+Correy/M
+Corrianne/M
+corridor/SM
+Corrie/M
+corrigenda
+corrigendum/M
+corrigible/I
+Corri/M
+Corrina/M
+Corrine/M
+Corrinne/M
+corroborated/U
+corroborate/GNVXDS
+corroboration/M
+corroborative/Y
+corroborator/MS
+corroboratory
+corrode/SDG
+corrodible
+corrosion/SM
+corrosiveness/M
+corrosive/YPS
+corrugate/NGXSD
+corrugation/M
+corrupt/DRYPTSGV
+corrupted/U
+corrupter/M
+corruptibility/SMI
+corruptible/I
+corruption/IM
+corruptions
+corruptive/Y
+corruptness/MS
+Corry/M
+corsage/MS
+corsair/SM
+corset/GMDS
+Corsica/M
+Corsican/S
+cortge/MS
+Cortes/S
+cortex/M
+Cortez's
+cortical/Y
+cortices
+corticosteroid/SM
+Cortie/M
+cortisone/SM
+Cortland/M
+Cort/M
+Cortney/M
+Corty/M
+corundum/MS
+coruscate/XSDGN
+coruscation/M
+Corvallis/M
+corvette/MS
+Corvus/M
+Cory/M
+Cos
+Cosby/M
+Cosetta/M
+Cosette/M
+cos/GDS
+cosignatory/MS
+cosign/SRDZG
+cosily
+Cosimo/M
+cosine/MS
+cosiness/MS
+Cosme/M
+cosmetically
+cosmetician/MS
+cosmetic/SM
+cosmetologist/MS
+cosmetology/MS
+cosmic
+cosmical/Y
+cosmogonist/MS
+cosmogony/SM
+cosmological/Y
+cosmologist/MS
+cosmology/SM
+Cosmo/M
+cosmonaut/MS
+cosmopolitanism/MS
+cosmopolitan/SM
+cosmos/SM
+cosponsor/DSG
+cossack/S
+Cossack/SM
+cosset/GDS
+Costa/M
+Costanza/M
+costarred
+costarring
+costar/S
+Costello/M
+costiveness/M
+costive/PY
+costless
+costliness/SM
+costly/RTP
+cost/MYGVJS
+Costner/M
+costumer/M
+costume/ZMGSRD
+cotangent/SM
+Cote/M
+cote/MS
+coterie/MS
+coterminous/Y
+cotillion/SM
+Cotonou/M
+Cotopaxi/M
+cot/SGMD
+cottager/M
+cottage/ZMGSRD
+cottar's
+cotted
+cotter/SDM
+cotton/GSDM
+Cotton/M
+cottonmouth/M
+cottonmouths
+cottonseed/MS
+cottontail/SM
+cottonwood/SM
+cottony
+cotyledon/MS
+couching/M
+couch/MSDG
+cougar/MS
+cougher/M
+cough/RDG
+coughs
+couldn't
+could/T
+could've
+coule/MS
+Coulomb/M
+coulomb/SM
+councilman/M
+councilmen
+councilor/MS
+councilperson/S
+council/SM
+councilwoman/M
+councilwomen
+counsel/GSDM
+counsellings
+counselor/MS
+countability/E
+countable/U
+countably/U
+countdown/SM
+counted/U
+count/EGARDS
+countenance/EGDS
+countenancer/M
+countenance's
+counteract/DSVG
+counteraction/SM
+counterargument/SM
+counterattack/DRMGS
+counterbalance/MSDG
+counterclaim/GSDM
+counterclockwise
+counterculture/MS
+countercyclical
+counterespionage/MS
+counterexample/S
+counterfeiter/M
+counterfeit/ZSGRD
+counterflow
+counterfoil/MS
+counterforce/M
+counter/GSMD
+counterinsurgency/MS
+counterintelligence/MS
+counterintuitive
+countermand/DSG
+counterman/M
+countermeasure/SM
+countermen
+counteroffensive/SM
+counteroffer/SM
+counterpane/SM
+counterpart/SM
+counterpoint/GSDM
+counterpoise/GMSD
+counterproductive
+counterproposal/M
+counterrevolutionary/MS
+counterrevolution/MS
+counter's/E
+counters/E
+countersignature/MS
+countersign/SDG
+countersink/SG
+counterspy/MS
+counterstrike
+countersunk
+countertenor/SM
+countervail/DSG
+counterweight/GMDS
+countess/MS
+countless/Y
+countrify/D
+countryman/M
+countrymen
+country/MS
+countryside/MS
+countrywide
+countrywoman/M
+countrywomen
+county/SM
+coup/ASDG
+coupe/MS
+Couperin/M
+couple/ACU
+coupled/CU
+coupler/C
+couplers
+coupler's
+couple's
+couples/CU
+couplet/SM
+coupling's/C
+coupling/SM
+coupon/SM
+coup's
+courage/MS
+courageously
+courageousness/MS
+courageous/U
+courages/E
+Courbet/M
+courgette/MS
+courier/GMDS
+course/EGSRDM
+courser's/E
+courser/SM
+course's/AF
+courses/FA
+coursework
+coursing/M
+Courtenay/M
+courteousness/EM
+courteousnesses
+courteous/PEY
+courtesan/MS
+courtesied
+courtesy/ESM
+courtesying
+court/GZMYRDS
+courthouse/MS
+courtier/SM
+courtliness/MS
+courtly/RTP
+Court/M
+Courtnay/M
+Courtney/M
+courtroom/MS
+courtship/SM
+courtyard/SM
+couscous/MS
+cousinly/U
+cousin/YMS
+Cousteau/M
+couture/SM
+couturier/SM
+covalent/Y
+covariance/SM
+covariant/S
+covariate/SN
+covary
+cove/DRSMZG
+covenanted/U
+covenanter/M
+covenant/SGRDM
+coven/SM
+Covent/M
+Coventry/MS
+coverable/E
+cover/AEGUDS
+coverage/MS
+coverall/DMS
+coverer/AME
+covering/MS
+coverlet/MS
+coversheet
+covers/M
+covertness/SM
+covert/YPS
+coveter/M
+coveting/Y
+covetousness/SM
+covetous/PY
+covet/SGRD
+covey/SM
+covington
+cowardice/MS
+cowardliness/MS
+cowardly/P
+Coward/M
+coward/MYS
+cowbell/MS
+cowbird/MS
+cowboy/MS
+cowcatcher/SM
+cowed/Y
+cowering/Y
+cower/RDGZ
+cowgirl/MS
+cowhand/S
+cowherd/SM
+cowhide/MGSD
+Cowley/M
+cowlick/MS
+cowling/M
+cowl/SGMD
+cowman/M
+cow/MDRSZG
+cowmen
+coworker/MS
+Cowper/M
+cowpoke/MS
+cowpony
+cowpox/MS
+cowpuncher/M
+cowpunch/RZ
+cowrie/SM
+cowshed/SM
+cowslip/MS
+coxcomb/MS
+Cox/M
+cox/MDSG
+coxswain/GSMD
+coy/CDSG
+coyer
+coyest
+coyly
+Coy/M
+coyness/MS
+coyote/SM
+coypu/SM
+cozenage/MS
+cozen/SGD
+cozily
+coziness/MS
+Cozmo/M
+Cozumel/M
+cozy/DSRTPG
+CPA
+cpd
+CPI
+cpl
+Cpl
+CPO
+CPR
+cps
+CPU/SM
+crabapple
+crabbedness/M
+crabbed/YP
+Crabbe/M
+crabber/MS
+crabbily
+crabbiness/S
+crabbing/M
+crabby/PRT
+crabgrass/S
+crablike
+crab/MS
+crackable/U
+crackdown/MS
+crackerjack/S
+cracker/M
+crackle/GJDS
+crackling/M
+crackly/RT
+crackpot/SM
+crackup/S
+crack/ZSBYRDG
+cradler/M
+cradle/SRDGM
+cradling/M
+craftily
+craftiness/SM
+Craft/M
+craft/MRDSG
+craftsman/M
+craftsmanship/SM
+craftsmen
+craftspeople
+craftspersons
+craftswoman
+craftswomen
+crafty/TRP
+Craggie/M
+cragginess/SM
+Craggy/M
+craggy/RTP
+crag/SM
+Craig/M
+Cramer/M
+crammed
+crammer/M
+cramming
+cramper/M
+cramp/MRDGS
+crampon/SM
+cram/S
+Cranach/M
+cranberry/SM
+Crandall/M
+crane/DSGM
+cranelike
+Crane/M
+Cranford/M
+cranial
+cranium/MS
+crankcase/MS
+crankily
+crankiness/MS
+crank/SGTRDM
+crankshaft/MS
+cranky/TRP
+Cranmer/M
+cranny/DSGM
+Cranston/M
+crape/SM
+crapped
+crappie/M
+crapping
+crappy/RST
+crapshooter/SM
+crap/SMDG!
+crasher/M
+crashing/Y
+crash/SRDGZ
+crassness/MS
+crass/TYRP
+crate/DSRGMZ
+crater/DMG
+Crater/M
+cravat/SM
+cravatted
+cravatting
+crave/DSRGJ
+cravenness/SM
+craven/SPYDG
+craver/M
+craving/M
+crawdad/S
+crawfish's
+Crawford/M
+crawler/M
+crawl/RDSGZ
+crawlspace/S
+crawlway
+crawly/TRS
+craw/SYM
+crayfish/GSDM
+Crayola/M
+crayon/GSDM
+Cray/SM
+craze/GMDS
+crazily
+craziness/MS
+crazy/SRTP
+creakily
+creakiness/SM
+creak/SDG
+creaky/PTR
+creamer/M
+creamery/MS
+creamily
+creaminess/SM
+cream/SMRDGZ
+creamy/TRP
+creased/CU
+crease/IDRSG
+crease's
+creases/C
+creasing/C
+created/U
+create/XKVNGADS
+creationism/MS
+creationist/MS
+Creation/M
+creation/MAK
+creativeness/SM
+creative/YP
+creativities
+creativity/K
+creativity's
+Creator/M
+creator/MS
+creatureliness/M
+creaturely/P
+creature/YMS
+crche/SM
+credence/MS
+credent
+credential/SGMD
+credenza/SM
+credibility/IMS
+credible/I
+credibly/I
+creditability/M
+creditableness/M
+creditable/P
+creditably/E
+credited/U
+credit/EGBSD
+creditor/MS
+credit's
+creditworthiness
+credo/SM
+credulity/ISM
+credulous/IY
+credulousness/SM
+creedal
+creed/C
+creeds
+creed's
+creekside
+creek/SM
+Creek/SM
+creel/SMDG
+Cree/MDS
+creeper/M
+creepily
+creepiness/SM
+creep/SGZR
+creepy/PRST
+Creigh/M
+Creight/M
+Creighton/M
+cremate/XDSNG
+cremation/M
+crematoria
+crematorium/MS
+crematory/S
+creme/S
+crenelate/XGNSD
+crenelation/M
+Creole/MS
+creole/SM
+Creon/M
+creosote/MGDS
+crepe/DSGM
+crept
+crescendoed
+crescendoing
+crescendo/SCM
+crescent/MS
+cress/S
+crestfallenness/M
+crestfallen/PY
+cresting/M
+crestless
+crest/SGMD
+Crestview/M
+cretaceous
+Cretaceously/M
+Cretaceous/Y
+Cretan/S
+Crete/M
+cretinism/MS
+cretin/MS
+cretinous
+cretonne/SM
+crevasse/DSMG
+crevice/SM
+crew/DMGS
+crewel/SM
+crewelwork/SM
+crewman/M
+crewmen
+cribbage/SM
+cribbed
+cribber/SM
+cribbing/M
+crib/SM
+Crichton/M
+cricketer/M
+cricket/SMZRDG
+crick/GDSM
+Crick/M
+cried/C
+crier/CM
+cries/C
+Crimea/M
+Crimean
+crime/GMDS
+criminality/MS
+criminalization/C
+criminalize/GC
+criminal/SYM
+criminologist/SM
+criminology/MS
+crimper/M
+crimp/RDGS
+crimson/DMSG
+cringer/M
+cringe/SRDG
+crinkle/DSG
+crinkly/TRS
+Crin/M
+crinoline/SM
+cripple/GMZDRS
+crippler/M
+crippling/Y
+Crisco/M
+crises
+crisis/M
+Cris/M
+crisper/M
+crispiness/SM
+crispness/MS
+crisp/PGTYRDS
+crispy/RPT
+criss
+crisscross/GDS
+Crissie/M
+Crissy/M
+Cristabel/M
+Cristal/M
+Crista/M
+Cristen/M
+Cristian/M
+Cristiano/M
+Cristie/M
+Cristi/M
+Cristina/M
+Cristine/M
+Cristin/M
+Cristionna/M
+Cristobal/M
+Cristy/M
+criteria
+criterion/M
+criticality
+critically/U
+criticalness/M
+critical/YP
+criticism/MS
+criticized/U
+criticize/GSRDZ
+criticizer/M
+criticizes/A
+criticizingly/S
+criticizing/UY
+critic/MS
+critique/MGSD
+critter/SM
+Cr/M
+croaker/M
+croak/SRDGZ
+croaky/RT
+Croatia/M
+Croatian/S
+Croat/SM
+Croce/M
+crocheter/M
+crochet/RDSZJG
+crockery/SM
+Crockett/M
+Crockpot/M
+crock/SGRDM
+crocodile/MS
+crocus/SM
+Croesus/SM
+crofter/M
+croft/MRGZS
+croissant/MS
+Croix/M
+Cromwellian
+Cromwell/M
+crone/SM
+Cronin/M
+Cronkite/M
+Cronus/M
+crony/SM
+crookedness/SM
+crooked/TPRY
+Crookes/M
+crookneck/MS
+crook/SGDM
+crooner/M
+croon/SRDGZ
+cropland/MS
+crop/MS
+cropped
+cropper/SM
+cropping
+croquet/MDSG
+croquette/SM
+Crosby/M
+crosier/SM
+crossarm
+crossbarred
+crossbarring
+crossbar/SM
+crossbeam/MS
+crossbones
+crossbowman/M
+crossbowmen
+crossbow/SM
+crossbred/S
+crossbreed/SG
+crosscheck/SGD
+crosscurrent/SM
+crosscut/SM
+crosscutting
+crossed/UA
+crosses/UA
+crossfire/SM
+crosshatch/GDS
+crossing/M
+Cross/M
+crossness/MS
+crossover/MS
+crosspatch/MS
+crosspiece/SM
+crosspoint
+crossproduct/S
+crossroad/GSM
+crossroads/M
+crosstalk/M
+crosstown
+crosswalk/MS
+crossway/M
+crosswind/SM
+crosswise
+crossword/MS
+cross/ZTYSRDMPBJG
+crotchetiness/M
+crotchet/MS
+crotchety/P
+crotchless
+crotch/MDS
+crouch/DSG
+croupier/M
+croup/SMDG
+croupy/TZR
+croton/MS
+crowbait
+crowbarred
+crowbarring
+crowbar/SM
+crowdedness/M
+crowded/P
+crowd/MRDSG
+crowfeet
+crowfoot/M
+crow/GDMS
+Crowley/M
+crowned/U
+crowner/M
+crown/RDMSJG
+crozier's
+CRT/S
+crucial/Y
+crucible/MS
+crucifiable
+crucifixion/MS
+Crucifixion/MS
+crucifix/SM
+cruciform/S
+crucify/NGDS
+crudded
+crudding
+cruddy/TR
+crudeness/MS
+crude/YSP
+crudits
+crudity/MS
+crud/STMR
+cruelness/MS
+cruelty/SM
+cruel/YRTSP
+cruet/MS
+cruft
+crufty
+Cruikshank/M
+cruise/GZSRD
+cruiser/M
+cruller/SM
+crumb/GSYDM
+crumble/DSJG
+crumbliness/MS
+crumbly/PTRS
+crumby/RT
+crumminess/S
+crummy/SRTP
+crump
+crumpet/SM
+crumple/DSG
+crunch/DSRGZ
+crunchiness/MS
+crunchy/TRP
+crupper/MS
+crusade/GDSRMZ
+crusader/M
+cruse/MS
+crushable/U
+crusher/M
+crushing/Y
+crushproof
+crush/SRDBGZ
+Crusoe/M
+crustacean/MS
+crustal
+crust/GMDS
+crustily
+crustiness/SM
+crusty/SRTP
+crutch/MDSG
+Crux/M
+crux/MS
+Cruz/M
+crybaby/MS
+cry/JGDRSZ
+cryogenic/S
+cryogenics/M
+cryostat/M
+cryosurgery/SM
+cryptanalysis/M
+cryptanalyst/M
+cryptanalytic
+crypt/CS
+cryptic
+cryptically
+cryptogram/MS
+cryptographer/MS
+cryptographic
+cryptographically
+cryptography/MS
+cryptologic
+cryptological
+cryptologist/M
+cryptology/M
+Cryptozoic/M
+crypt's
+crystalline/S
+crystallite/SM
+crystallization/AMS
+crystallized/UA
+crystallizes/A
+crystallize/SRDZG
+crystallizing/A
+crystallographer/MS
+crystallographic
+crystallography/M
+Crystal/M
+crystal/SM
+Crysta/M
+Crystie/M
+Cs
+C's
+cs/EA
+cs's
+CST
+ct
+CT
+Cthrine/M
+Ct/M
+ctn
+ctr
+Cuba/M
+Cuban/S
+cubbed
+cubbing
+cubbyhole/MS
+cuber/M
+cube/SM
+cubical/Y
+cubicle/SM
+cubic/YS
+cubism/SM
+cubist/MS
+cubit/MS
+cub/MDRSZG
+cuboid
+Cuchulain/M
+cuckold/GSDM
+cuckoldry/MS
+cuckoo/SGDM
+cucumber/MS
+cuddle/GSD
+cuddly/TRP
+cu/DG
+cudgel/GSJMD
+cud/MS
+cue/MS
+cuff/GSDM
+Cuisinart/M
+cuisine/MS
+Culbertson/M
+culinary
+Cullan/M
+cull/DRGS
+cullender's
+Cullen/M
+culler/M
+Culley/M
+Cullie/M
+Cullin/M
+Cull/MN
+Cully/M
+culminate/XSDGN
+culmination/M
+culotte/S
+culpability/MS
+culpable/I
+culpableness/M
+culpably
+culpa/SM
+culprit/SM
+cultism/SM
+cultist/SM
+cultivable
+cultivated/U
+cultivate/XBSDGN
+cultivation/M
+cultivator/SM
+cult/MS
+cultural/Y
+cultured/U
+culture/SDGM
+Culver/MS
+culvert/SM
+Cu/M
+cumber/DSG
+Cumberland/M
+cumbersomeness/MS
+cumbersome/YP
+cumbrous
+cumin/MS
+cummerbund/MS
+Cummings
+cumquat's
+cum/S
+cumulate/XVNGSD
+cumulation/M
+cumulative/Y
+cumuli
+cumulonimbi
+cumulonimbus/M
+cumulus/M
+Cunard/M
+cuneiform/S
+cunnilingus/SM
+Cunningham/M
+cunningness/M
+cunning/RYSPT
+cunt/SM!
+cupboard/SM
+cupcake/SM
+Cupertino/M
+cupful/SM
+cupidinously
+cupidity/MS
+Cupid/M
+cupid/S
+cup/MS
+cupola/MDGS
+cupped
+cupping/M
+cupric
+cuprous
+curability/MS
+curable/IP
+curableness/MI
+curably/I
+Curacao/M
+curacy/SM
+curare/MS
+curate/VGMSD
+curative/YS
+curatorial
+curator/KMS
+curbing/M
+curbside
+curb/SJDMG
+curbstone/MS
+Curcio/M
+curdle/SDG
+curd/SMDG
+cured/U
+cure/KBDRSGZ
+curer/MK
+curettage/SM
+curfew/SM
+curfs
+curiae
+curia/M
+cur/IBS
+Curie/M
+curie/SM
+curiosity/SM
+curio/SM
+curiousness/SM
+curious/TPRY
+Curitiba/M
+curium/MS
+curler/SM
+curlew/MS
+curlicue/MGDS
+curliness/SM
+curling/M
+curl/UDSG
+curlycue's
+curly/PRT
+curmudgeon/MYS
+Curran/M
+currant/SM
+curred/AFI
+currency's
+currency/SF
+current/FSY
+currently/A
+currentness/M
+Currey/M
+curricle/M
+curricula
+curricular
+curriculum/M
+Currie/M
+currier/M
+Currier/M
+curring/FAI
+Curr/M
+currycomb/DMGS
+Curry/MR
+curry/RSDMG
+cur's
+curs/ASDVG
+curse/A
+cursedness/M
+cursed/YRPT
+curse's
+cursive/EPYA
+cursiveness/EM
+cursives
+cursor/DMSG
+cursorily
+cursoriness/SM
+cursory/P
+curtailer/M
+curtail/LSGDR
+curtailment/SM
+curtain/GSMD
+Curtice/M
+Curtis/M
+Curt/M
+curtness/MS
+curtsey's
+curtsy/SDMG
+curt/TYRP
+curvaceousness/S
+curvaceous/YP
+curvature/MS
+curved/A
+curved's
+curve/DSGM
+curvilinearity/M
+curvilinear/Y
+curving/M
+curvy/RT
+cushion/SMDG
+Cushman/M
+cushy/TR
+cuspid/MS
+cuspidor/MS
+cusp/MS
+cussedness/M
+cussed/YP
+cuss/EGDSR
+cusses/F
+cussing/F
+cuss's
+custard/MS
+Custer/M
+custodial
+custodianship/MS
+custodian/SM
+custody/MS
+customarily
+customariness/M
+customary/PS
+customer/M
+customhouse/S
+customization/SM
+customize/ZGBSRD
+custom/SMRZ
+cutaneous/Y
+cutaway/SM
+cutback/SM
+cuteness/MS
+cute/SPY
+cutesy/RT
+cuticle/SM
+cutlass/MS
+cutler/SM
+cutlery/MS
+cutlet/SM
+cut/MRST
+cutoff/MS
+cutout/SM
+cutter/SM
+cutthroat/SM
+cutting/MYS
+cuttlebone/SM
+cuttlefish/MS
+cuttle/M
+cutup/MS
+cutworm/MS
+Cuvier/M
+Cuzco/M
+CV
+cw
+cwt
+Cyanamid/M
+cyanate/M
+cyanic
+cyanide/GMSD
+cyan/MS
+cyanogen/M
+Cybele/M
+cybernetic/S
+cybernetics/M
+cyberpunk/S
+cyberspace/S
+Cybill/M
+Cybil/M
+Cyb/M
+cyborg/S
+Cyclades
+cyclamen/MS
+cycle/ASDG
+cycler
+cycle's
+cycleway/S
+cyclic
+cyclical/SY
+cycling/M
+cyclist/MS
+cyclohexanol
+cycloidal
+cycloid/SM
+cyclometer/MS
+cyclone/SM
+cyclonic
+cyclopean
+cyclopedia/MS
+cyclopes
+Cyclopes
+cyclops
+Cyclops/M
+cyclotron/MS
+cyder/SM
+cygnet/MS
+Cygnus/M
+cylinder/GMDS
+cylindric
+cylindrical/Y
+Cy/M
+cymbalist/MS
+cymbal/SM
+Cymbre/M
+Cynde/M
+Cyndia/M
+Cyndie/M
+Cyndi/M
+Cyndy/M
+cynical/UY
+cynicism/MS
+cynic/MS
+cynosure/SM
+Cynthea/M
+Cynthia/M
+Cynthie/M
+Cynthy/M
+cypher/MGSD
+cypreses
+cypress/SM
+Cyprian
+Cypriot/SM
+Cyprus/M
+Cyrano/M
+Cyrille/M
+Cyrillic
+Cyrill/M
+Cyrillus/M
+Cyril/M
+Cyrus/M
+cystic
+cyst/MS
+cytochemistry/M
+cytochrome/M
+cytologist/MS
+cytology/MS
+cytolysis/M
+cytoplasmic
+cytoplasm/SM
+cytosine/MS
+cytotoxic
+CZ
+czarevitch/M
+czarina/SM
+czarism/M
+czarist/S
+czarship
+czar/SM
+Czech
+Czechoslovakia/M
+Czechoslovakian/S
+Czechoslovak/S
+Czechs
+Czerniak/M
+Czerny/M
+D
+DA
+dabbed
+dabber/MS
+dabbing
+dabbler/M
+dabble/RSDZG
+dab/S
+Dacca's
+dace/MS
+Dacey/M
+dacha/SM
+Dachau/M
+dachshund/SM
+Dacia/M
+Dacie/M
+Dacron/MS
+dactylic/S
+dactyl/MS
+Dacy/M
+Dadaism/M
+dadaism/S
+Dadaist/M
+dadaist/S
+Dada/M
+daddy/SM
+Dade/M
+dado/DMG
+dadoes
+dad/SM
+Daedalus/M
+Dael/M
+daemonic
+daemon/SM
+Daffie/M
+Daffi/M
+daffiness/S
+daffodil/MS
+Daffy/M
+daffy/PTR
+daftness/MS
+daft/TYRP
+DAG
+dagger/DMSG
+Dag/M
+Dagmar/M
+Dagny/M
+Daguerre/M
+daguerreotype/MGDS
+Dagwood/M
+Dahlia/M
+dahlia/MS
+Dahl/M
+Dahomey/M
+Daile/M
+dailiness/MS
+daily/PS
+Daimler/M
+daintily
+daintiness/MS
+dainty/TPRS
+daiquiri/SM
+dairying/M
+dairyland
+dairymaid/SM
+dairyman/M
+dairymen
+dairy/MJGS
+dairywoman/M
+dairywomen
+Daisey/M
+Daisie/M
+Daisi/M
+dais/SM
+Daisy/M
+daisy/SM
+Dakar/M
+Dakotan
+Dakota/SM
+Dale/M
+Dalenna/M
+dale/SMH
+daleth/M
+Daley/M
+Dalhousie/M
+Dalia/M
+Dalian/M
+Dalila/M
+Dali/SM
+Dallas/M
+dalliance/SM
+dallier/M
+Dalli/MS
+Dall/M
+Dallon/M
+dally/ZRSDG
+Dal/M
+Dalmatia/M
+dalmatian/S
+Dalmatian/SM
+Daloris/M
+Dalston/M
+Dalt/M
+Dalton/M
+Daly/M
+damageable
+damaged/U
+damage/MZGRSD
+damager/M
+damaging/Y
+Damara/M
+Damaris/M
+Damascus/M
+damask/DMGS
+dame/SM
+Dame/SMN
+Damian/M
+Damiano/M
+Damien/M
+Damion/M
+Damita/M
+dam/MDS
+dammed
+damming
+dammit/S
+damnably
+damnation/MS
+damnedest/MS
+damned/TR
+damn/GSBRD
+damning/Y
+Damocles/M
+Damon/M
+damped/U
+dampener/M
+dampen/RDZG
+damper/M
+dampness/MS
+damp/SGZTXYRDNP
+damselfly/MS
+damsel/MS
+damson/MS
+Dana
+Dana/M
+Danbury/M
+dancelike
+dancer/M
+dance/SRDJGZ
+dandelion/MS
+dander/DMGS
+dandify/SDG
+dandily
+dandle/GSD
+dandruff/MS
+dandy/TRSM
+Danelaw/M
+Danella/M
+Danell/M
+Dane/SM
+Danette/M
+danger/DMG
+Dangerfield/M
+dangerousness/M
+dangerous/YP
+dangler/M
+dangle/ZGRSD
+dangling/Y
+dang/SGZRD
+Danial/M
+Dania/M
+Danica/M
+Danice/M
+Daniela/M
+Daniele/M
+Daniella/M
+Danielle/M
+Daniel/SM
+Danielson/M
+Danie/M
+Danika/M
+Danila/M
+Dani/M
+Danish
+danish/S
+Danita/M
+Danit/M
+dankness/MS
+dank/TPYR
+Danna/M
+Dannel/M
+Dannie/M
+Danni/M
+Dannye/M
+Danny/M
+danseuse/SM
+Dan/SM
+Dante/M
+Danton/M
+Danube/M
+Danubian
+Danville/M
+Danya/M
+Danyelle/M
+Danyette/M
+Danzig/M
+Daphene/M
+Daphna/M
+Daphne/M
+dapperness/M
+dapper/PSTRY
+dapple/SDG
+Dara/M
+Darbee/M
+Darbie/M
+Darb/M
+Darby/M
+Darcee/M
+Darcey/M
+Darcie/M
+Darci/M
+D'Arcy
+Darcy/M
+Darda/M
+Dardanelles
+daredevil/MS
+daredevilry/S
+Dareen/M
+Darelle/M
+Darell/M
+Dare/M
+Daren/M
+darer/M
+daresay
+dare/ZGDRSJ
+d'Arezzo
+Daria/M
+Darice/M
+Darill/M
+Dari/M
+daringness/M
+daring/PY
+Darin/M
+Dario/M
+Darius/M
+Darjeeling/M
+darkener/M
+darken/RDZG
+dark/GTXYRDNSP
+darkish
+darkly/TR
+darkness/MS
+darkroom/SM
+Darla/M
+Darleen/M
+Darlene/M
+Darline/M
+Darling/M
+darlingness/M
+Darlington/M
+darling/YMSP
+Darlleen/M
+Dar/MNH
+Darnall/M
+darned/TR
+Darnell/M
+darner/M
+darn/GRDZS
+darning/M
+Darn/M
+Daron/M
+DARPA/M
+Darrelle/M
+Darrell/M
+Darrel/M
+Darren/M
+Darrick/M
+Darrin/M
+Darrow/M
+Darryl/M
+Darsey/M
+Darsie/M
+d'art
+dartboard/SM
+darter/M
+Darth/M
+Dartmouth/M
+dart/MRDGZS
+Darvon/M
+Darwinian/S
+Darwinism/MS
+Darwinist/MS
+Darwin/M
+Darya/M
+Daryle/M
+Daryl/M
+Daryn/M
+Dasha/M
+dashboard/SM
+dasher/M
+dash/GZSRD
+dashiki/SM
+dashing/Y
+Dasie/M
+Dasi/M
+dastardliness/SM
+dastardly/P
+dastard/MYS
+Dasya/M
+DAT
+database/DSMG
+datafile
+datagram/MS
+data/M
+Datamation/M
+Datamedia/M
+dataset/S
+datedly
+datedness
+date/DRSMZGV
+dated/U
+dateless
+dateline/DSMG
+dater/M
+Datha/M
+dative/S
+Datsun/M
+datum/MS
+dauber/M
+daub/RDSGZ
+Daugherty/M
+daughter/MYS
+Daumier/M
+Daune/M
+daunt/DSG
+daunted/U
+daunting/Y
+dauntlessness/SM
+dauntless/PY
+dauphin/SM
+Davao/M
+Daveen/M
+Dave/M
+Daven/M
+Davenport/M
+davenport/MS
+Daveta/M
+Davey/M
+Davida/M
+Davidde/M
+Davide/M
+David/SM
+Davidson/M
+Davie/M
+Davina/M
+Davine/M
+Davinich/M
+Davin/M
+Davis/M
+Davita/M
+davit/SM
+Dav/MN
+Davon/M
+Davy/SM
+dawdler/M
+dawdle/ZGRSD
+Dawes/M
+Dawna/M
+dawn/GSDM
+Dawn/M
+Dawson/M
+daybed/S
+daybreak/SM
+daycare/S
+daydreamer/M
+daydream/RDMSZG
+Dayle/M
+daylight/GSDM
+Day/M
+Dayna/M
+daysack
+day/SM
+daytime/SM
+Dayton/M
+dazed/PY
+daze/DSG
+dazzler/M
+dazzle/ZGJRSD
+dazzling/Y
+db
+DB
+dbl
+dB/M
+DBMS
+DC
+DD
+Ddene/M
+DDS
+DDT
+DE
+deacon/DSMG
+deaconess/MS
+deadbeat/SM
+deadbolt/S
+deadener/M
+deadening/MY
+deaden/RDG
+deadhead/MS
+deadline/MGDS
+deadliness/SM
+deadlock/MGDS
+deadly/RPT
+deadness/M
+deadpanned
+deadpanner
+deadpanning
+deadpan/S
+dead/PTXYRN
+deadwood/SM
+deafening/MY
+deafen/JGD
+deafness/MS
+deaf/TXPYRN
+dealer/M
+dealership/MS
+dealing/M
+deallocator
+deal/RSGZJ
+dealt
+Deana/M
+dean/DMG
+Deandre/M
+Deane/M
+deanery/MS
+Dean/M
+Deanna/M
+Deanne/M
+Deann/M
+deanship/SM
+Dearborn/M
+dearness/MS
+dearth/M
+dearths
+dear/TYRHPS
+deary/MS
+deassign
+deathbed/MS
+deathblow/SM
+deathless/Y
+deathlike
+deathly/TR
+death/MY
+deaths
+deathtrap/SM
+deathward
+deathwatch/MS
+debacle/SM
+debarkation/SM
+debark/G
+debar/L
+debarment/SM
+debarring
+debaser/M
+debatable/U
+debate/BMZ
+debater/M
+debauchedness/M
+debauched/PY
+debauchee/SM
+debaucher/M
+debauchery/SM
+debauch/GDRS
+Debbie/M
+Debbi/M
+Debby/M
+Debee/M
+debenture/MS
+Debera/M
+debilitate/NGXSD
+debilitation/M
+debility/MS
+Debi/M
+debit/DG
+deb/MS
+Deb/MS
+debonairness/SM
+debonair/PY
+Deborah/M
+Debora/M
+Debor/M
+debouch/DSG
+Debra/M
+debrief/GJ
+debris/M
+debtor/SM
+debt/SM
+Debussy/M
+dbutante/SM
+debut/MDG
+decade/MS
+decadency/S
+decadent/YS
+decaffeinate/DSG
+decaf/S
+decagon/MS
+Decalogue/M
+decal/SM
+decamp/L
+decampment/MS
+decapitate/GSD
+decapitator/SM
+decathlon/SM
+Decatur/M
+decay/GRD
+Decca/M
+Deccan/M
+decease/M
+decedent/MS
+deceitfulness/SM
+deceitful/PY
+deceit/SM
+deceived/U
+deceiver/M
+deceives/U
+deceive/ZGRSD
+deceivingly
+deceiving/U
+decelerate/XNGSD
+deceleration/M
+decelerator/SM
+December/SM
+decency/ISM
+decennial/SY
+decent/TIYR
+deception/SM
+deceptiveness/SM
+deceptive/YP
+decertify/N
+dechlorinate/N
+decibel/MS
+decidability/U
+decidable/U
+decidedness/M
+decided/PY
+decide/GRSDB
+deciduousness/M
+deciduous/YP
+decile/SM
+deciliter/SM
+decimal/SYM
+decimate/XNGDS
+decimation/M
+decimeter/MS
+decipherable/IU
+decipher/BRZG
+decipherer/M
+decisional
+decisioned
+decisioning
+decision/ISM
+decisive/IPY
+decisiveness/MSI
+deckchair
+decker/M
+Decker/M
+deck/GRDMSJ
+deckhand/S
+decking/M
+Deck/RM
+declamation/SM
+declamatory
+declarable
+declaration/MS
+declaration's/A
+declarative/SY
+declarator/MS
+declaratory
+declare/AGSD
+declared/U
+declarer/MS
+declension/SM
+declination/MS
+decliner/M
+decline/ZGRSD
+declivity/SM
+Dec/M
+DEC/M
+DECNET
+DECnet/M
+deco
+dcolletage/S
+dcollet
+decolletes
+decolorising
+decomposability/M
+decomposable/IU
+decompose/B
+decompress/R
+decongestant/S
+deconstruction
+deconvolution
+decorated/AU
+decorate/NGVDSX
+decorates/A
+decorating/A
+decoration/ASM
+decorativeness/M
+decorative/YP
+decorator/SM
+decorousness/MS
+decorousness's/I
+decorous/PIY
+decor/S
+decorticate/GNDS
+decortication/M
+decorum/MS
+decoupage/MGSD
+decouple/G
+decoy/M
+decrease
+decreasing/Y
+decreeing
+decree/RSM
+decremental
+decrement/DMGS
+decrepit
+decrepitude/SM
+decriminalization/S
+decriminalize/DS
+decry/G
+decrypt/GD
+decryption
+DECstation/M
+DECsystem/M
+DECtape/M
+decustomised
+Dedekind/M
+Dede/M
+dedicate/AGDS
+dedicated/Y
+dedication/MS
+dedicative
+dedicator/MS
+dedicatory
+Dedie/M
+Dedra/M
+deduce/RSDG
+deducible
+deductibility/M
+deductible/S
+deduction/SM
+deductive/Y
+deduct/VG
+Deeanne/M
+Deeann/M
+deeded
+Deedee/M
+deeding
+deed/IS
+deed's
+deejay/MDSG
+Dee/M
+deem/ADGS
+deemphasis
+Deena/M
+deepen/DG
+deepish
+deepness/MS
+deep/PTXSYRN
+Deerdre/M
+Deere/M
+deerskin/MS
+deer/SM
+deerstalker/SM
+deerstalking/M
+Deeyn/M
+deface/LZ
+defacement/SM
+defaecate
+defalcate/NGXSD
+defalcation/M
+defamation/SM
+defamatory
+defamer/M
+defame/ZR
+defaulter/M
+default/ZR
+defeated/U
+defeater/M
+defeatism/SM
+defeatist/SM
+defeat/ZGD
+defecate/DSNGX
+defecation/M
+defection/SM
+defectiveness/MS
+defective/PYS
+defect/MDSVG
+defector/MS
+defendant/SM
+defended/U
+defenestrate/GSD
+defenselessness/MS
+defenseless/PY
+defenses/U
+defense/VGSDM
+defensibility/M
+defensible/I
+defensibly/I
+defensiveness/MS
+defensive/PSY
+deference/MS
+deferential/Y
+deferent/S
+deferrable
+deferral/SM
+deferred
+deferrer/MS
+deferring
+deffer
+defiance/MS
+defiant/Y
+defibrillator/M
+deficiency/MS
+deficient/SY
+deficit/MS
+defier/M
+defile/L
+defilement/MS
+definable/UI
+definably/I
+define/AGDRS
+defined/U
+definer/SM
+definite/IPY
+definiteness/IMS
+definitional
+definition/ASM
+definitiveness/M
+definitive/SYP
+defis
+deflate/XNGRSDB
+deflationary
+deflation/M
+deflect/DSGV
+deflected/U
+deflection/MS
+deflector/MS
+defocus
+defocussing
+Defoe/M
+defog
+defogger/S
+defoliant/SM
+defoliator/SM
+deformational
+deform/B
+deformed/U
+deformity/SM
+defrauder/M
+defraud/ZGDR
+defrayal/SM
+defroster/M
+defrost/RZ
+deftness/MS
+deft/TYRP
+defunct/S
+defying/Y
+defy/RDG
+def/Z
+deg
+Degas/M
+degassing
+degauss/GD
+degeneracy/MS
+degenerateness/M
+degenerate/PY
+degrade/B
+degradedness/M
+degraded/YP
+degrading/Y
+degrease
+degree/SM
+degum
+Dehlia/M
+dehumanize
+dehydrator/MS
+deicer/M
+deice/ZR
+deictic
+Deidre/M
+deification/M
+deify/SDXGN
+deign/DGS
+Deimos/M
+Deina/M
+Deirdre/MS
+deistic
+deist/SM
+Deity/M
+deity/SM
+deja
+deject/DSG
+dejectedness/M
+dejected/PY
+dejection/SM
+Dejesus/M
+DeKalb/M
+DeKastere/M
+Delacroix/M
+Delacruz/M
+Delainey/M
+Dela/M
+Delaney/M
+Delano/M
+Delawarean/SM
+Delaware/MS
+delay/D
+delayer/G
+Delbert/M
+Delcina/M
+Delcine/M
+delectableness/M
+delectable/SP
+delectably
+delectation/MS
+delegable
+Deleon/M
+deleted/U
+deleteriousness/M
+deleterious/PY
+delete/XBRSDNG
+deletion/M
+delfs
+Delft/M
+delft/MS
+delftware/S
+Delgado/M
+Delhi/M
+Delia/M
+deliberateness/SM
+deliberate/PVY
+deliberativeness/M
+deliberative/PY
+Delibes/M
+delicacy/IMS
+delicate/IYP
+delicatenesses
+delicateness/IM
+delicates
+delicatessen/MS
+deliciousness/MS
+delicious/YSP
+delicti
+delightedness/M
+delighted/YP
+delightfulness/M
+delightful/YP
+Delilah/M
+Delilahs
+Delila/M
+Delinda/M
+delineate/SDXVNG
+delineation/M
+delinquency/MS
+delinquent/SYM
+deliquesce/GSD
+deliquescent
+deliriousness/MS
+delirious/PY
+delirium/SM
+deli/SM
+Delius/M
+deliverables
+deliverable/U
+deliver/AGSD
+deliverance/SM
+delivered/U
+deliverer/SM
+delivery/AM
+deliverymen/M
+Della/M
+Dell/M
+dell/SM
+Dellwood/M
+Delly/M
+Delmar/M
+Delmarva/M
+Delmer/M
+Delmonico
+Delmore/M
+Delmor/M
+Del/MY
+Delora/M
+Delores/M
+Deloria/M
+Deloris/M
+Delphic
+Delphi/M
+Delphine/M
+Delphinia/M
+delphinium/SM
+Delphinus/M
+Delta/M
+delta/MS
+deltoid/SM
+deluder/M
+delude/RSDG
+deluding/Y
+deluge/SDG
+delusional
+delusion/SM
+delusiveness/M
+delusive/PY
+deluxe
+delve/GZSRD
+delver/M
+demagnify/N
+demagogic
+demagogue/GSDM
+demagoguery/SM
+demagogy/MS
+demander/M
+demand/GSRD
+demandingly
+demanding/U
+demarcate/SDNGX
+demarcation/M
+Demavend/M
+demean/GDS
+demeanor/SM
+dementedness/M
+demented/YP
+dementia/MS
+Demerol/M
+demesne/SM
+Demeter/M
+Demetra/M
+Demetre/M
+Demetria/M
+Demetri/MS
+Demetrius/M
+demigod/MS
+demijohn/MS
+demimondaine/SM
+demimonde/SM
+demineralization/SM
+Deming/M
+demise/DMG
+demit
+demitasse/MS
+demitted
+demitting
+Dem/MG
+democracy/MS
+Democratic
+democratically/U
+democratic/U
+democratization/MS
+democratize/DRSG
+democratizes/U
+Democrat/MS
+democrat/SM
+Democritus/M
+dmod
+demo/DMPG
+demographer/MS
+demographical/Y
+demographic/S
+demography/MS
+demolisher/M
+demolish/GSRD
+demolition/MS
+demonetization/S
+demoniacal/Y
+demoniac/S
+demonic
+demonology/M
+demon/SM
+demonstrable/I
+demonstrableness/M
+demonstrably/I
+demonstrate/XDSNGV
+demonstration/M
+demonstrativenesses
+demonstrativeness/UM
+demonstratives
+demonstrative/YUP
+demonstrator/MS
+demoralization/M
+demoralizer/M
+demoralizing/Y
+DeMorgan/M
+Demosthenes/M
+demote/DGX
+demotic/S
+Demott/M
+demount/B
+Dempsey/M
+demulcent/S
+demultiplex
+demureness/SM
+demure/YP
+demurral/MS
+demurred
+demurrer/MS
+demurring
+demur/RTS
+demythologization/M
+demythologize/R
+den
+Dena/M
+dendrite/MS
+Deneb/M
+Denebola/M
+Deneen/M
+Dene/M
+Deng/M
+dengue/MS
+deniable/U
+denial/SM
+Denice/M
+denier/M
+denigrate/VNGXSD
+denigration/M
+denim/SM
+Denise/M
+Deni/SM
+denizen/SMDG
+Den/M
+De/NM
+Denmark/M
+Denna/M
+denned
+Dennet/M
+Denney/M
+Dennie/M
+Denni/MS
+denning
+Dennison/M
+Denny/M
+denominate/V
+denominational/Y
+denote/B
+denouement/MS
+denounce/LZRSDG
+denouncement/SM
+denouncer/M
+dense/FR
+densely
+denseness/SM
+densitometer/MS
+densitometric
+densitometry/M
+density/MS
+dens/RT
+dental/YS
+dentifrice/SM
+dentine's
+dentin/SM
+dent/ISGD
+dentistry/MS
+dentist/SM
+dentition/MS
+dent's
+denture/IMS
+denuclearize/GSD
+denudation/SM
+denude/DG
+denuder/M
+denunciate/VNGSDX
+denunciation/M
+Denver/M
+denying/Y
+Deny/M
+Denys
+Denyse/M
+deny/SRDZG
+deodorant/SM
+deodorization/SM
+deodorize/GZSRD
+deodorizer/M
+Deon/M
+Deonne/M
+deoxyribonucleic
+depart/L
+departmentalization/SM
+departmentalize/DSG
+departmental/Y
+department/MS
+departure/MS
+dependability/MS
+dependableness/M
+dependable/P
+dependably
+Dependant/MS
+depend/B
+dependence/ISM
+dependency/MS
+dependent/IYS
+dependent's
+depicted/U
+depicter/M
+depiction/SM
+depict/RDSG
+depilatory/S
+deplete/VGNSDX
+depletion/M
+deplorableness/M
+deplorable/P
+deplorably
+deplorer/M
+deplore/SRDBG
+deploring/Y
+deployable
+deploy/AGDLS
+deployment/SAM
+depolarize
+deponent/S
+deportation/MS
+deportee/SM
+deport/LG
+deportment/MS
+depose
+deposit/ADGS
+depositary/M
+deposition/A
+depositor/SAM
+depository/MS
+depravedness/M
+depraved/PY
+deprave/GSRD
+depraver/M
+depravity/SM
+deprecate/XSDNG
+deprecating/Y
+deprecation/M
+deprecatory
+depreciable
+depreciate/XDSNGV
+depreciating/Y
+depreciation/M
+depreciative/Y
+depressant/S
+depressible
+depression/MS
+depressive/YS
+depressor/MS
+depress/V
+deprive/GSD
+depth/M
+depths
+Dept/M
+deputation/SM
+depute/SDG
+deputize/DSG
+deputy/MS
+dequeue
+derail/L
+drailleur/MS
+derailment/MS
+derange/L
+derangement/MS
+Derbyshire/M
+derby/SM
+Derby/SM
+dereference/Z
+Derek/M
+dereliction/SM
+derelict/S
+Derick/M
+deride/D
+deriding/Y
+derision/SM
+derisiveness/MS
+derisive/PY
+derisory
+derivable/U
+derivate/XNV
+derivation/M
+derivativeness/M
+derivative/SPYM
+derive/B
+derived/U
+Derk/M
+Der/M
+dermal
+dermatitides
+dermatitis/MS
+dermatological
+dermatologist/MS
+dermatology/MS
+dermis/SM
+Dermot/M
+derogate/XDSNGV
+derogation/M
+derogatorily
+derogatory
+Derrek/M
+Derrick/M
+derrick/SMDG
+Derrida/M
+derrire/S
+Derrik/M
+Derril/M
+derringer/SM
+Derron/M
+Derry/M
+dervish/SM
+Derward/M
+Derwin/M
+Des
+desalinate/NGSDX
+desalination/M
+desalinization/MS
+desalinize/GSD
+desalt/G
+descant/M
+Descartes/M
+descendant/SM
+descended/FU
+descendent's
+descender/M
+descending/F
+descends/F
+descend/ZGSDR
+descent
+describable/I
+describe/ZB
+description/MS
+descriptiveness/MS
+descriptive/SYP
+descriptor/SM
+descry/SDG
+Desdemona/M
+desecrater/M
+desecrate/SRDGNX
+desecration/M
+deserter/M
+desertification
+desertion/MS
+desert/ZGMRDS
+deservedness/M
+deserved/YU
+deserve/J
+deserving/Y
+dshabill's
+desiccant/S
+desiccate/XNGSD
+desiccation/M
+desiccator/SM
+desiderata
+desideratum/M
+designable
+design/ADGS
+designate/VNGSDX
+designational
+designation/M
+designator/SM
+designed/Y
+designer/M
+designing/U
+Desi/M
+desirabilia
+desirability's
+desirability/US
+desirableness/SM
+desirableness's/U
+desirable/UPS
+desirably/U
+Desirae/M
+desire/BR
+desired/U
+Desiree/M
+desirer/M
+Desiri/M
+desirousness/M
+desirous/PY
+desist/DSG
+desk/SM
+desktop/S
+Desmond/M
+Desmund/M
+desolateness/SM
+desolate/PXDRSYNG
+desolater/M
+desolating/Y
+desolation/M
+desorption/M
+despairer/M
+despairing/Y
+despair/SGDR
+desperadoes
+desperado/M
+desperateness/SM
+desperate/YNXP
+desperation/M
+despicable
+despicably
+despiser/M
+despise/SRDG
+despoil/L
+despoilment/MS
+despond
+despondence/S
+despondency/MS
+despondent/Y
+despotic
+despotically
+despotism/SM
+dessert/SM
+dessicate/DN
+d'Estaing
+destinate/NX
+destination/M
+destine/GSD
+destiny/MS
+destituteness/M
+destitute/NXP
+destitution/M
+destroy/BZGDRS
+destroyer/M
+destructibility/SMI
+destructible/I
+destruction/SM
+destructiveness/MS
+destructive/YP
+destructor/M
+destruct/VGSD
+desuetude/MS
+desultorily
+desultoriness/M
+desultory/P
+detachedness/M
+detached/YP
+detacher/M
+detach/LSRDBG
+detachment/SM
+detailedness/M
+detailed/YP
+detainee/S
+detainer/M
+detain/LGRDS
+detainment/MS
+d'etat
+detectability/U
+detectable/U
+detectably/U
+detect/DBSVG
+detected/U
+detection/SM
+detective/MS
+detector/MS
+dtente
+detentes
+detention/SM
+detergency/M
+detergent/SM
+deteriorate/XDSNGV
+deterioration/M
+determent/SM
+determinability/M
+determinable/IP
+determinableness/IM
+determinacy/I
+determinant/MS
+determinateness/IM
+determinate/PYIN
+determination/IM
+determinativeness/M
+determinative/P
+determinedly
+determinedness/M
+determined/U
+determine/GASD
+determiner/SM
+determinism/MS
+determinism's/I
+deterministically
+deterministic/I
+deterred/U
+deterrence/SM
+deterrent/SMY
+deterring
+detersive/S
+deter/SL
+deters/V
+detestableness/M
+detestable/P
+detestably
+detestation/SM
+dethrone/L
+dethronement/SM
+detonable
+detonated/U
+detonate/XDSNGV
+detonation/M
+detonator/MS
+detour/G
+detoxification/M
+detoxify/NXGSD
+detox/SDG
+detract/GVD
+detractive/Y
+d'etre
+detribalize/GSD
+detrimental/SY
+detriment/SM
+detritus/M
+Detroit/M
+deuced/Y
+deuce/SDGM
+deus
+deuterium/MS
+deuteron/M
+Deuteronomy/M
+Deutsch/M
+Deva/M
+Devanagari/M
+Devan/M
+devastate/XVNGSD
+devastating/Y
+devastation/M
+devastator/SM
+develop/ALZSGDR
+developed/U
+developer/MA
+developmental/Y
+development/ASM
+deviance/MS
+deviancy/S
+deviant/YMS
+deviated/U
+deviate/XSDGN
+deviating/U
+deviation/M
+devilishness/MS
+devilish/PY
+devilment/SM
+devilry/MS
+devil/SLMDG
+deviltry/MS
+Devi/M
+Devina/M
+Devin/M
+Devinne/M
+deviousness/SM
+devious/YP
+devise/JR
+deviser/M
+Devland/M
+Devlen/M
+Devlin/M
+Dev/M
+devoice
+devolution/MS
+devolve/GSD
+Devondra/M
+Devonian
+Devon/M
+Devonna/M
+Devonne/M
+Devonshire/M
+Devora/M
+devoted/Y
+devotee/MS
+devote/XN
+devotional/YS
+devotion/M
+devourer/M
+devour/SRDZG
+devoutness/MS
+devout/PRYT
+Devy/M
+Dewain/M
+dewar
+Dewar/M
+Dewayne/M
+dewberry/MS
+dewclaw/SM
+dewdrop/MS
+Dewey/M
+Dewie/M
+dewiness/MS
+Dewitt/M
+dewlap/MS
+Dew/M
+dew/MDGS
+dewy/TPR
+Dexedrine/M
+dexes/I
+Dex/M
+dexter
+dexterity/MS
+Dexter/M
+dexterousness/MS
+dexterous/PY
+dextrose/SM
+DH
+Dhaka
+Dhaulagiri/M
+dhoti/SM
+dhow/MS
+DI
+diabase/M
+diabetes/M
+diabetic/S
+diabolic
+diabolicalness/M
+diabolical/YP
+diabolism/M
+diachronic/P
+diacritical/YS
+diacritic/MS
+diadem/GMDS
+diaereses
+diaeresis/M
+Diaghilev/M
+diagnometer/SM
+diagnosable/U
+diagnose/BGDS
+diagnosed/U
+diagnosis/M
+diagnostically
+diagnostician/SM
+diagnostic/MS
+diagnostics/M
+diagonalize/GDSB
+diagonal/YS
+diagrammable
+diagrammatic
+diagrammaticality
+diagrammatically
+diagrammed
+diagrammer/SM
+diagramming
+diagram/MS
+Diahann/M
+dialectal/Y
+dialectical/Y
+dialectic/MS
+dialect/MS
+dialed/A
+dialer/M
+dialing/M
+dial/MRDSGZJ
+dialogged
+dialogging
+dialog/MS
+dials/A
+dialysis/M
+dialyzed/U
+dialyzes
+diam
+diamagnetic
+diameter/MS
+diametric
+diametrical/Y
+diamondback/SM
+diamond/GSMD
+Diana/M
+Diandra/M
+Diane/M
+Dianemarie/M
+Dian/M
+Dianna/M
+Dianne/M
+Diann/M
+Diannne/M
+diapason/MS
+diaper/SGDM
+diaphanousness/M
+diaphanous/YP
+diaphragmatic
+diaphragm/SM
+diarist/SM
+Diarmid/M
+diarrheal
+diarrhea/MS
+diary/MS
+diaspora
+Diaspora/SM
+diastase/SM
+diastole/MS
+diastolic
+diathermy/SM
+diathesis/M
+diatomic
+diatom/SM
+diatonic
+diatribe/MS
+Diaz's
+dibble/SDMG
+dibs
+DiCaprio/M
+dice/GDRS
+dicer/M
+dicey
+dichloride/M
+dichotomization/M
+dichotomize/DSG
+dichotomous/PY
+dichotomy/SM
+dicier
+diciest
+dicing/M
+Dickensian/S
+dickens/M
+Dickens/M
+dicker/DG
+Dickerson/M
+dickey/SM
+dick/GZXRDMS!
+Dickie/M
+dickier
+dickiest
+Dickinson/M
+Dickson/M
+Dick/XM
+Dicky/M
+dicky's
+dicotyledonous
+dicotyledon/SM
+dicta/M
+Dictaphone/SM
+dictate/SDNGX
+dictation/M
+dictatorialness/M
+dictatorial/YP
+dictator/MS
+dictatorship/SM
+dictionary/SM
+diction/MS
+dictum/M
+didactically
+didactic/S
+didactics/M
+did/AU
+diddler/M
+diddle/ZGRSD
+Diderot/M
+Didi/M
+didn't
+didoes
+dido/M
+Dido/M
+didst
+die/DS
+Diefenbaker/M
+Diego/M
+dieing
+dielectric/MS
+diem
+Diem/M
+Diena/M
+Dierdre/M
+diereses
+dieresis/M
+diesel/GMDS
+Diesel's
+dies's
+dies/U
+dietary/S
+dieter/M
+Dieter/M
+dietetic/S
+dietetics/M
+diethylaminoethyl
+diethylstilbestrol/M
+dietitian/MS
+diet/RDGZSM
+Dietrich/M
+Dietz/M
+difference/DSGM
+difference's/I
+differences/I
+differentiability
+differentiable
+differential/SMY
+differentiated/U
+differentiate/XSDNG
+differentiation/M
+differentiator/SM
+differentness
+different/YI
+differ/SZGRD
+difficile
+difficult/Y
+difficulty/SM
+diffidence/MS
+diffident/Y
+diffract/GSD
+diffraction/SM
+diffractometer/SM
+diffuseness/MS
+diffuse/PRSDZYVXNG
+diffuser/M
+diffusible
+diffusional
+diffusion/M
+diffusiveness/M
+diffusive/YP
+diffusivity/M
+digerati
+digested/IU
+digester/M
+digestibility/MS
+digestible/I
+digestifs
+digestion/ISM
+digestive/YSP
+digest/RDVGS
+digger/MS
+digging/S
+digitalis/M
+digitalization/MS
+digitalized
+digitalizes
+digitalizing
+digital/SY
+digitization/M
+digitizer/M
+digitize/ZGDRS
+digit/SM
+dignified/U
+dignify/DSG
+dignitary/SM
+dignity/ISM
+digram
+digraph/M
+digraphs
+digress/GVDS
+digression/SM
+digressiveness/M
+digressive/PY
+dig/TS
+dihedral
+Dijkstra/M
+Dijon/M
+dike/DRSMG
+diker/M
+diktat/SM
+Dilan/M
+dilapidate/XGNSD
+dilapidation/M
+dilatation/SM
+dilated/YP
+dilate/XVNGSD
+dilation/M
+dilatoriness/M
+dilator/SM
+dilatory/P
+Dilbert/M
+dilemma/MS
+dilettante/MS
+dilettantish
+dilettantism/MS
+diligence/SM
+diligentness/M
+diligent/YP
+dilithium
+Dillard/M
+Dillie/M
+Dillinger/M
+dilling/R
+dillis
+Dill/M
+Dillon/M
+dill/SGMD
+dillydally/GSD
+Dilly/M
+dilly/SM
+dilogarithm
+diluent
+diluted/U
+diluteness/M
+dilute/RSDPXYVNG
+dilution/M
+Di/M
+DiMaggio/M
+dimensionality/M
+dimensional/Y
+dimensionless
+dimension/MDGS
+dimer/M
+dime/SM
+dimethylglyoxime
+dimethyl/M
+diminished/U
+diminish/SDGBJ
+diminuendo/SM
+diminution/SM
+diminutiveness/M
+diminutive/SYP
+Dimitri/M
+Dimitry/M
+dimity/MS
+dimmed/U
+dimmer/MS
+dimmest
+dimming
+dimness/SM
+dimorphism/M
+dimple/MGSD
+dimply/RT
+dim/RYPZS
+dimwit/MS
+dimwitted
+Dinah/M
+Dina/M
+dinar/SM
+diner/M
+dine/S
+dinette/MS
+dingbat/MS
+ding/GD
+dinghy/SM
+dingily
+dinginess/SM
+dingle/MS
+dingoes
+dingo/MS
+dingus/SM
+dingy/PRST
+dinky/RST
+din/MDRZGS
+dinned
+dinner/SM
+dinnertime/S
+dinnerware/MS
+Dinnie/M
+dinning
+Dinny/M
+Dino/M
+dinosaur/MS
+dint/SGMD
+diocesan/S
+diocese/SM
+Diocletian/M
+diode/SM
+Diogenes/M
+Dione/M
+Dionisio/M
+Dionis/M
+Dion/M
+Dionne/M
+Dionysian
+Dionysus/M
+Diophantine/M
+diopter/MS
+diorama/SM
+Dior/M
+dioxalate
+dioxide/MS
+dioxin/S
+diphtheria/SM
+diphthong/SM
+diplexers
+diploid/S
+diplomacy/SM
+diploma/SMDG
+diplomata
+diplomatically
+diplomatic/S
+diplomatics/M
+diplomatist/SM
+diplomat/MS
+dipodic
+dipody/M
+dipole/MS
+dipped
+Dipper/M
+dipper/SM
+dipping/S
+dippy/TR
+dip/S
+dipsomaniac/MS
+dipsomania/SM
+dipstick/MS
+dipterous
+diptych/M
+diptychs
+Dir
+Dirac/M
+directed/IUA
+directionality
+directional/SY
+direction/MIS
+directions/A
+directive/SM
+directivity/M
+directly/I
+directness/ISM
+director/AMS
+directorate/SM
+directorial
+directorship/SM
+directory/SM
+direct/RDYPTSVG
+directrix/MS
+directs/IA
+direful/Y
+direness/M
+dire/YTRP
+dirge/GSDM
+Dirichlet/M
+dirigible/S
+dirk/GDMS
+Dirk/M
+dirndl/MS
+dirtily
+dirtiness/SM
+dirt/MS
+dirty/GPRSDT
+Dis
+disable/LZGD
+disablement/MS
+disabler/M
+disabuse
+disadvantaged/P
+disagreeable/S
+disallow/D
+disambiguate/DSGNX
+disappointed/Y
+disappointing/Y
+disarming/Y
+disarrange/L
+disastrous/Y
+disband/L
+disbandment/SM
+disbar/L
+disbarment/MS
+disbarring
+disbelieving/Y
+disbursal/S
+disburse/GDRSL
+disbursement/MS
+disburser/M
+discerner/M
+discernibility
+discernible/I
+discernibly
+discerning/Y
+discernment/MS
+discern/SDRGL
+disc/GDM
+discharged/U
+disciple/DSMG
+discipleship/SM
+disciplinarian/SM
+disciplinary
+disciplined/U
+discipline/IDM
+discipliner/M
+disciplines
+disciplining
+disclosed/U
+discography/MS
+discolored/MP
+discoloreds/U
+discolor/G
+discombobulate/SDGNX
+discomfit/DG
+discomfiture/MS
+disco/MG
+discommode/DG
+disconcerting/Y
+disconnectedness/S
+disconnected/P
+disconnecter/M
+disconnect/R
+disconsolate/YN
+discordance/SM
+discordant/Y
+discord/G
+discorporate/D
+discotheque/MS
+discount/B
+discourage/LGDR
+discouragement/MS
+discouraging/Y
+discoverable/I
+discover/ADGS
+discovered/U
+discoverer/S
+discovery/SAM
+discreetly/I
+discreetness's/I
+discreetness/SM
+discreet/TRYP
+discrepancy/SM
+discrepant/Y
+discreteness/SM
+discrete/YPNX
+discretionary
+discretion/IMS
+discretization
+discretized
+discriminable
+discriminant/MS
+discriminated/U
+discriminate/SDVNGX
+discriminating/YI
+discrimination/MI
+discriminator/MS
+discriminatory
+discursiveness/S
+discussant/MS
+discussed/UA
+discusser/M
+discussion/SM
+discus/SM
+disdainfulness/M
+disdainful/YP
+disdain/MGSD
+disease/G
+disembowelment/SM
+disembowel/SLGD
+disengage/L
+disfigure/L
+disfigurement/MS
+disfranchise/L
+disfranchisement/MS
+disgorge
+disgrace/R
+disgracer/M
+disgruntle/DSLG
+disgruntlement/MS
+disguised/UY
+disguise/R
+disguiser/M
+disgust
+disgusted/Y
+disgustful/Y
+disgusting/Y
+dishabille/SM
+disharmonious
+dishcloth/M
+dishcloths
+dishevel/LDGS
+dishevelment/MS
+dish/GD
+dishonest
+dishonored/U
+dishpan/MS
+dishrag/SM
+dishtowel/SM
+dishwasher/MS
+dishwater/SM
+disillusion/LGD
+disillusionment/SM
+disinfectant/MS
+disinherit
+disinterestedness/SM
+disinterested/P
+disinvest/L
+disjoin
+disjointedness/S
+disjunctive/YS
+disjunct/VS
+disk/D
+diskette/S
+dislike/G
+dislodge/LG
+dislodgement/M
+dismalness/M
+dismal/PSTRY
+dismantle/L
+dismantlement/SM
+dismay/D
+dismayed/U
+dismaying/Y
+dis/MB
+dismember/LG
+dismemberment/MS
+dismissive/Y
+dismiss/RZ
+Disneyland/M
+Disney/M
+disoblige/G
+disorderedness/M
+disordered/YP
+disorderliness/M
+disorderly/P
+disorder/Y
+disorganize
+disorganized/U
+disparagement/MS
+disparager/M
+disparage/RSDLG
+disparaging/Y
+disparateness/M
+disparate/PSY
+dispatch/Z
+dispelled
+dispelling
+dispel/S
+dispensable/I
+dispensary/MS
+dispensate/NX
+dispensation/M
+dispenser/M
+dispense/ZGDRSB
+dispersal/MS
+dispersant/M
+dispersed/Y
+disperser/M
+disperse/XDRSZLNGV
+dispersible
+dispersion/M
+dispersiveness/M
+dispersive/PY
+dispirit/DSG
+displace/L
+display/AGDS
+displayed/U
+displeased/Y
+displease/G
+displeasure
+disport
+disposable/S
+disposal/SM
+dispose/IGSD
+dispositional
+disposition/ISM
+disproportional
+disproportionate/N
+disproportionation/M
+disprove/B
+disputable/I
+disputably/I
+disputant/SM
+disputation/SM
+disputatious/Y
+disputed/U
+disputer/M
+dispute/ZBGSRD
+disquieting/Y
+disquiet/M
+disquisition/SM
+Disraeli/M
+disregardful
+disrepair/M
+disreputableness/M
+disreputable/P
+disrepute/M
+disrespect
+disrupted/U
+disrupter/M
+disrupt/GVDRS
+disruption/MS
+disruptive/YP
+disruptor/M
+dissatisfy
+dissect/DG
+dissed
+dissembler/M
+dissemble/ZGRSD
+disseminate/XGNSD
+dissemination/M
+dissension/SM
+dissenter/M
+dissent/ZGSDR
+dissertation/SM
+disservice
+disses
+dissever
+dissidence/SM
+dissident/MS
+dissimilar/S
+dissing
+dissipatedly
+dissipatedness/M
+dissipated/U
+dissipater/M
+dissipate/XRSDVNG
+dissipation/M
+dissociable/I
+dissociate/DSXNGV
+dissociated/U
+dissociation/M
+dissociative/Y
+dissoluble/I
+dissoluteness/SM
+dissolute/PY
+dissolve/ASDG
+dissolved/U
+dissonance/SM
+dissonant/Y
+dissuade/GDRS
+dissuader/M
+dissuasive
+dist
+distaff/SM
+distal/Y
+distance/DSMG
+distantness/M
+distant/YP
+distaste
+distemper
+distend
+distension
+distention/SM
+distillate/XNMS
+distillation/M
+distillery/MS
+distincter
+distinctest
+distinction/MS
+distinctiveness/MS
+distinctive/YP
+distinct/IYVP
+distinctness/MSI
+distinguishable/I
+distinguishably/I
+distinguish/BDRSG
+distinguished/U
+distinguisher/M
+distort/BGDR
+distorted/U
+distorter/M
+distortion/MS
+distract/DG
+distractedness/M
+distracted/YP
+distracting/Y
+distrait
+distraught/Y
+distress
+distressful
+distressing/Y
+distribute/ADXSVNGB
+distributed/U
+distributer
+distributional
+distribution/AM
+distributiveness/M
+distributive/SPY
+distributivity
+distributorship/M
+distributor/SM
+district/GSAD
+district's
+distrust/G
+disturbance/SM
+disturbed/U
+disturber/M
+disturbing/Y
+disturb/ZGDRS
+disulfide/M
+disuse/M
+disyllable/M
+Dita/M
+ditcher/M
+ditch/MRSDG
+dither/RDZSG
+ditsy/TR
+ditto/DMGS
+ditty/SDGM
+Ditzel/M
+ditz/S
+diuresis/M
+diuretic/S
+diurnal/SY
+divalent/S
+diva/MS
+divan/SM
+dived/M
+divergence/SM
+divergent/Y
+diverge/SDG
+diver/M
+diverseness/MS
+diverse/XYNP
+diversification/M
+diversifier/M
+diversify/GSRDNX
+diversionary
+diversion/M
+diversity/SM
+divert/GSD
+diverticulitis/SM
+divertimento/M
+dive/S
+divestiture/MS
+divest/LDGS
+divestment/S
+dividable
+divide/AGDS
+divided/U
+dividend/MS
+divider/MS
+divination/SM
+diviner/M
+divine/RSDTZYG
+divinity/MS
+divisibility/IMS
+divisible/I
+divisional
+division/SM
+divisiveness/MS
+divisive/PY
+divisor/SM
+divorce/MS
+divorce/GSDLM
+divorcement/MS
+divot/MS
+div/TZGJDRS
+divulge/GSD
+divvy/GSDM
+Dixiecrat/MS
+dixieland
+Dixieland/MS
+Dixie/M
+Dix/M
+Dixon/M
+dizzily
+dizziness/SM
+dizzying/Y
+dizzy/PGRSDT
+DJ
+Djakarta's
+djellabah's
+djellaba/S
+d/JGVX
+Djibouti/M
+DMD
+Dmitri/M
+DMZ
+DNA
+Dnepropetrovsk/M
+Dnepr's
+Dnieper's
+Dniester/M
+Dniren/M
+DOA
+doable
+DOB
+Dobbin/M
+dobbin/MS
+Doberman
+Dobro/M
+docent/SM
+docile/Y
+docility/MS
+docker/M
+docket/GSMD
+dock/GZSRDM
+dockland/MS
+dockside/M
+dockworker/S
+dockyard/SM
+doc/MS
+Doctor
+doctoral
+doctorate/SM
+doctor/GSDM
+Doctorow/M
+doctrinaire/S
+doctrinal/Y
+doctrine/SM
+docudrama/S
+documentary/MS
+documentation/MS
+documented/U
+document/RDMZGS
+DOD
+dodder/DGS
+dodecahedra
+dodecahedral
+dodecahedron/M
+Dode/M
+dodge/GZSRD
+Dodge/M
+dodgem/S
+dodger/M
+Dodgson/M
+Dodie/M
+Dodi/M
+Dodington/M
+Dodoma/M
+dodo/SM
+Dodson/M
+Dody/M
+DOE
+Doe/M
+doe/MS
+doer/MU
+does/AU
+doeskin/MS
+doesn't
+d'oeuvre
+doff/SGD
+dogcart/SM
+dogcatcher/MS
+dogeared
+Doge/M
+doge/SM
+dogfight/GMS
+dogfish/SM
+dogfought
+doggedness/SM
+dogged/PY
+doggerel/SM
+dogging
+doggone/RSDTG
+doggy/SRMT
+doghouse/SM
+dogie/SM
+doglegged
+doglegging
+dogleg/SM
+dogma/MS
+dogmatically/U
+dogmatic/S
+dogmatics/M
+dogmatism/SM
+dogmatist/SM
+dogsbody/M
+dog/SM
+dogtooth/M
+Dogtown/M
+dogtrot/MS
+dogtrotted
+dogtrotting
+dogwood/SM
+dogy's
+Doha/M
+doh's
+doily/SM
+doing/MU
+Dolby/SM
+doldrum/S
+doldrums/M
+doled/F
+dolefuller
+dolefullest
+dolefulness/MS
+doleful/PY
+Dole/M
+dole/MGDS
+doles/F
+Dolf/M
+doling/F
+dollar/SM
+Dolley/M
+Dollie/M
+Dolli/M
+Doll/M
+doll/MDGS
+dollop/GSMD
+Dolly/M
+dolly/SDMG
+dolmen/MS
+dolomite/SM
+dolomitic
+Dolores/M
+Dolorita/SM
+dolorous/Y
+dolor/SM
+dolphin/SM
+Dolph/M
+doltishness/SM
+doltish/YP
+dolt/MS
+domain/MS
+dome/DSMG
+Domenic/M
+Domenico/M
+Domeniga/M
+Domesday/M
+domestically
+domesticate/DSXGN
+domesticated/U
+domestication/M
+domesticity/MS
+domestic/S
+domicile/SDMG
+domiciliary
+dominance/MS
+dominant/YS
+dominate/VNGXSD
+domination/M
+dominator/M
+dominatrices
+dominatrix
+domineer/DSG
+domineeringness/M
+domineering/YP
+Dominga/M
+Domingo/M
+Dominguez/M
+Dominica/M
+Dominican/MS
+Dominick/M
+Dominic/M
+Dominik/M
+Domini/M
+dominion/MS
+Dominique/M
+dominoes
+domino/M
+Domitian/M
+Dom/M
+Donahue/M
+Donald/M
+Donaldson/M
+Donall/M
+Donal/M
+Donalt/M
+Dona/M
+dona/MS
+Donatello/M
+donate/XVGNSD
+donation/M
+donative/M
+Donaugh/M
+Donavon/M
+done/AUF
+Donella/M
+Donelle/M
+Donetsk/M
+Donetta/M
+dong/GDMS
+dongle/S
+Donia/M
+Donica/M
+Donielle/M
+Donizetti/M
+donkey/MS
+Donna/M
+Donnamarie/M
+donned
+Donnell/M
+Donnelly/M
+Donne/M
+Donner/M
+Donnie/M
+Donni/M
+donning
+donnishness/M
+donnish/YP
+Donn/RM
+donnybrook/MS
+Donny/M
+donor/MS
+Donovan/M
+don/S
+Don/SM
+don't
+donut/MS
+donutted
+donutting
+doodad/MS
+doodlebug/MS
+doodler/M
+doodle/SRDZG
+doohickey/MS
+Dooley/M
+Doolittle/M
+doom/MDGS
+doomsday/SM
+Doonesbury/M
+doorbell/SM
+door/GDMS
+doorhandles
+doorkeeper/M
+doorkeep/RZ
+doorknob/SM
+doorman/M
+doormat/SM
+doormen
+doornail/M
+doorplate/SM
+doors/I
+doorstep/MS
+doorstepped
+doorstepping
+doorstop/MS
+doorway/MS
+dooryard/SM
+dopamine
+dopant/M
+dopa/SM
+dope/DRSMZG
+doper/M
+dopey
+dopier
+dopiest
+dopiness/S
+Doppler/M
+Dorado/M
+Doralia/M
+Doralin/M
+Doralyn/M
+Doralynne/M
+Doralynn/M
+Dora/M
+Dorcas
+Dorchester/M
+Doreen/M
+Dorelia/M
+Dorella/M
+Dorelle/M
+Dor/M
+Dorena/M
+Dorene/M
+Doretta/M
+Dorette/M
+Dorey/M
+Doria/M
+Dorian/M
+Doric
+Dorice/M
+Dorie/M
+Dori/MS
+Dorine/M
+Dorisa/M
+Dorise/M
+Dorita/M
+dork/S
+dorky/RT
+dormancy/MS
+dormant/S
+dormer/M
+dormice
+dormitory/SM
+dorm/MRZS
+dormouse/M
+Dorolice/M
+Dorolisa/M
+Doro/M
+Dorotea/M
+Doroteya/M
+Dorothea/M
+Dorothee/M
+Dorothy/M
+Dorree/M
+Dorrie/M
+Dorri/SM
+Dorry/M
+dorsal/YS
+Dorsey/M
+Dorthea/M
+Dorthy/M
+Dortmund/M
+Dory/M
+dory/SM
+DOS
+dosage/SM
+dose/M
+dos/GDS
+Dosi/M
+dosimeter/MS
+dosimetry/M
+dossier/MS
+dost
+Dostoevsky/M
+DOT
+dotage/SM
+dotard/MS
+doter/M
+dote/S
+Doti/M
+doting/Y
+Dot/M
+dot/MDRSJZG
+Dotson/M
+dotted
+Dottie/M
+Dotti/M
+dottiness/M
+dotting
+Dotty/M
+dotty/PRT
+do/TZRHGJ
+Douala/M
+Douay/M
+Doubleday/M
+doubled/UA
+double/GPSRDZ
+doubleheader/MS
+doubleness/M
+doubler/M
+doubles/M
+doublespeak/S
+doublethink/M
+doublet/MS
+doubleton/M
+doubling/A
+doubloon/MS
+doubly
+doubt/AGSDMB
+doubted/U
+doubter/SM
+doubtfulness/SM
+doubtful/YP
+doubting/Y
+doubtlessness/M
+doubtless/YP
+douche/GSDM
+Dougherty/M
+dough/M
+doughs
+doughty/RT
+doughy/RT
+Dougie/M
+Douglas/M
+Douglass
+Doug/M
+Dougy/M
+dourness/MS
+Douro/M
+dour/TYRP
+douser/M
+douse/SRDG
+dovecote/MS
+Dover/M
+dove/RSM
+dovetail/GSDM
+dovish
+Dov/MR
+dowager/SM
+dowdily
+dowdiness/MS
+dowdy/TPSR
+dowel/GMDS
+dower/GDMS
+Dow/M
+downbeat/SM
+downcast/S
+downdraft/M
+downer/M
+Downey/M
+downfall/NMS
+downgrade/GSD
+down/GZSRD
+downheartedness/MS
+downhearted/PY
+downhill/RS
+downland
+download/DGS
+downpipes
+downplay/GDS
+downpour/MS
+downrange
+downrightness/M
+downright/YP
+downriver
+Downs
+downscale/GSD
+downside/S
+downsize/DSG
+downslope
+downspout/SM
+downstage/S
+downstairs
+downstate/SR
+downstream
+downswing/MS
+downtime/SM
+downtowner/M
+downtown/MRS
+downtrend/M
+downtrodden
+downturn/MS
+downwardness/M
+downward/YPS
+downwind
+downy/RT
+dowry/SM
+dowse/GZSRD
+dowser/M
+doxology/MS
+doyenne/SM
+doyen/SM
+Doyle/M
+Doy/M
+doze
+dozen/GHD
+dozenths
+dozer/M
+doz/XGNDRS
+dozy
+DP
+DPs
+dpt
+DPT
+drabbed
+drabber
+drabbest
+drabbing
+drabness/MS
+drab/YSP
+drachma/MS
+Draco/M
+draconian
+Draconian
+Dracula/M
+draft/AMDGS
+draftee/SM
+drafter/MS
+draftily
+draftiness/SM
+drafting/S
+draftsman/M
+draftsmanship/SM
+draftsmen
+draftsperson
+draftswoman
+draftswomen
+drafty/PTR
+dragged
+dragger/M
+dragging/Y
+draggy/RT
+drag/MS
+dragnet/MS
+dragonfly/SM
+dragonhead/M
+dragon/SM
+dragoon/DMGS
+drainage/MS
+drainboard/SM
+drained/U
+drainer/M
+drainpipe/MS
+drain/SZGRDM
+Drake/M
+drake/SM
+Dramamine/MS
+drama/SM
+dramatically/U
+dramatical/Y
+dramatic/S
+dramatics/M
+dramatist/MS
+dramatization/MS
+dramatized/U
+dramatizer/M
+dramatize/SRDZG
+dramaturgy/M
+Drambuie/M
+drammed
+dramming
+dram/MS
+drank
+Drano/M
+draper/M
+drapery/MS
+drape/SRDGZ
+drastic
+drastically
+drat/S
+dratted
+dratting
+Dravidian/M
+drawable
+draw/ASG
+drawback/MS
+drawbridge/SM
+drawer/SM
+drawing/SM
+drawler/M
+drawling/Y
+drawl/RDSG
+drawly
+drawn/AI
+drawnly
+drawnness
+drawstring/MS
+dray/SMDG
+dreadfulness/SM
+dreadful/YPS
+dreadlocks
+dreadnought/SM
+dread/SRDG
+dreamboat/SM
+dreamed/U
+dreamer/M
+dreamily
+dreaminess/SM
+dreaming/Y
+dreamland/SM
+dreamlessness/M
+dreamless/PY
+dreamlike
+dream/SMRDZG
+dreamworld/S
+dreamy/PTR
+drearily
+dreariness/SM
+drear/S
+dreary/TRSP
+Dreddy/M
+dredge/MZGSRD
+dredger/M
+Dredi/M
+dreg/MS
+Dreiser/M
+Dre/M
+drencher/M
+drench/GDRS
+Dresden/M
+dress/ADRSG
+dressage/MS
+dressed/U
+dresser/MS
+dresser's/A
+dresses/U
+dressiness/SM
+dressing/MS
+dressmaker/MS
+dressmaking/SM
+dressy/PTR
+drew/A
+Drew/M
+Drexel/M
+Dreyfus/M
+Dreyfuss
+dribble/DRSGZ
+dribbler/M
+driblet/SM
+drib/SM
+dried/U
+drier/M
+drifter/M
+drifting/Y
+drift/RDZSG
+driftwood/SM
+driller/M
+drilling/M
+drillmaster/SM
+drill/MRDZGS
+drinkable/S
+drink/BRSZG
+drinker/M
+dripped
+dripping/MS
+drippy/RT
+drip/SM
+driveler/M
+drivel/GZDRS
+driven/P
+driver/M
+drive/SRBGZJ
+driveway/MS
+drizzle/DSGM
+drizzling/Y
+drizzly/TR
+Dr/M
+drogue/MS
+drollery/SM
+drollness/MS
+droll/RDSPTG
+drolly
+dromedary/MS
+Drona/M
+drone/SRDGM
+droning/Y
+drool/GSRD
+droopiness/MS
+drooping/Y
+droop/SGD
+droopy/PRT
+drophead
+dropkick/S
+droplet/SM
+dropout/MS
+dropped
+dropper/SM
+dropping/MS
+dropsical
+drop/SM
+dropsy/MS
+drosophila/M
+dross/SM
+drought/SM
+drover/M
+drove/SRDGZ
+drowner/M
+drown/RDSJG
+drowse/SDG
+drowsily
+drowsiness/SM
+drowsy/PTR
+drubbed
+drubber/MS
+drubbing/SM
+drub/S
+Drucie/M
+Drucill/M
+Druci/M
+Drucy/M
+drudge/MGSRD
+drudger/M
+drudgery/SM
+drudging/Y
+Drud/M
+drugged
+druggie/SRT
+drugging
+druggist/SM
+Drugi/M
+drugless
+drug/SM
+drugstore/SM
+druidism/MS
+druid/MS
+Druid's
+Dru/M
+drumbeat/SGM
+drumhead/M
+drumlin/MS
+drummed
+drummer/SM
+drumming
+Drummond/M
+drum/SM
+drumstick/SM
+drunkard/SM
+drunkenness/SM
+drunken/YP
+drunk/SRNYMT
+drupe/SM
+Drury/M
+Drusie/M
+Drusilla/M
+Drusi/M
+Drusy/M
+druthers
+dryad/MS
+Dryden/M
+dryer/MS
+dry/GYDRSTZ
+dryish
+dryness/SM
+drys
+drystone
+drywall/GSD
+D's
+d's/A
+Dshubba/M
+DST
+DTP
+dualism/MS
+dualistic
+dualist/M
+duality/MS
+dual/YS
+Duane/M
+Dubai/M
+dubbed
+dubber/S
+dubbing/M
+dubbin/MS
+Dubcek/M
+Dubhe/M
+dubiety/MS
+dubiousness/SM
+dubious/YP
+Dublin/M
+Dubrovnik/M
+dub/S
+Dubuque/M
+ducal
+ducat/SM
+duce/CAIKF
+duce's
+Duchamp/M
+duchess/MS
+duchy/SM
+duckbill/SM
+ducker/M
+duck/GSRDM
+duckling/SM
+duckpins
+duckpond
+duckweed/MS
+ducky/RSMT
+ducted/CFI
+ductile/I
+ductility/SM
+ducting/F
+duct/KMSF
+ductless
+duct's/A
+ducts/CI
+ductwork/M
+dudder
+dude/MS
+dudgeon/SM
+dud/GMDS
+Dudley/M
+Dud/M
+duelist/MS
+duel/MRDGZSJ
+dueness/M
+duenna/MS
+due/PMS
+duet/MS
+duetted
+duetting
+duffel/M
+duffer/M
+duff/GZSRDM
+Duffie/M
+Duff/M
+Duffy/M
+Dugald/M
+dugout/SM
+dug/S
+duh
+DUI
+Duisburg/M
+dukedom/SM
+duke/DSMG
+Duke/M
+Dukey/M
+Dukie/M
+Duky/M
+Dulcea/M
+Dulce/M
+dulcet/SY
+Dulcia/M
+Dulciana/M
+Dulcie/M
+dulcify
+Dulci/M
+dulcimer/MS
+Dulcinea/M
+Dulcine/M
+Dulcy/M
+dullard/MS
+Dulles/M
+dullness/MS
+dull/SRDPGT
+dully
+dulness's
+Dulsea/M
+Duluth/M
+duly/U
+Du/M
+Dumas
+dumbbell/MS
+dumbfound/GSDR
+dumbness/MS
+Dumbo/M
+dumb/PSGTYRD
+dumbstruck
+dumbwaiter/SM
+dumdum/MS
+dummy/SDMG
+Dumont/M
+dumper/UM
+dumpiness/MS
+dumpling/MS
+dump/SGZRD
+dumpster/S
+Dumpster/S
+Dumpty/M
+dumpy/PRST
+Dunant/M
+Dunbar/M
+Duncan/M
+dunce/MS
+Dunc/M
+Dundee/M
+dunderhead/MS
+Dunedin/M
+dune/SM
+dungaree/SM
+dungeon/GSMD
+dunghill/MS
+dung/SGDM
+Dunham/M
+dunker/M
+dunk/GSRD
+Dunkirk/M
+Dunlap/M
+Dun/M
+dunned
+Dunne/M
+dunner
+dunnest
+dunning
+Dunn/M
+dunno/M
+dun/S
+Dunstan/M
+duodecimal/S
+duodena
+duodenal
+duodenum/M
+duologue/M
+duo/MS
+duopolist
+duopoly/M
+dupe/NGDRSMZ
+duper/M
+dupion/M
+duple
+duplexer/M
+duplex/MSRDG
+duplicability/M
+duplicable
+duplicate/ADSGNX
+duplication/AM
+duplicative
+duplicator/MS
+duplicitous
+duplicity/SM
+Dupont/MS
+DuPont/MS
+durability/MS
+durableness/M
+durable/PS
+durably
+Duracell/M
+durance/SM
+Durand/M
+Duran/M
+Durante/M
+Durant/M
+durational
+duration/MS
+Durban/M
+Drer/M
+duress/SM
+Durex/M
+Durham/MS
+during
+Durkee/M
+Durkheim/M
+Dur/M
+Durocher/M
+durst
+durum/MS
+Durward/M
+Duse/M
+Dusenberg/M
+Dusenbury/M
+Dushanbe/M
+dusk/GDMS
+duskiness/MS
+dusky/RPT
+Dsseldorf
+dustbin/MS
+dustcart/M
+dustcover
+duster/M
+dustily
+dustiness/MS
+dusting/M
+Dustin/M
+dustless
+dustman/M
+dustmen
+dust/MRDGZS
+dustpan/SM
+Dusty/M
+dusty/RPT
+Dutch/M
+Dutchman/M
+Dutchmen
+dutch/MS
+Dutchwoman
+Dutchwomen
+duteous/Y
+dutiable
+dutifulness/S
+dutiful/UPY
+duty/SM
+Duvalier/M
+duvet/SM
+duxes
+Dvina/M
+Dvork/M
+Dwain/M
+dwarfish
+dwarfism/MS
+dwarf/MTGSPRD
+Dwayne/M
+dweeb/S
+dweller/SM
+dwell/IGS
+dwelling/MS
+dwelt/I
+DWI
+Dwight/M
+dwindle/GSD
+dyadic
+dyad/MS
+Dyana/M
+Dyane/M
+Dyan/M
+Dyanna/M
+Dyanne/M
+Dyann/M
+dybbukim
+dybbuk/SM
+dyed/A
+dyeing/M
+dye/JDRSMZG
+dyer/M
+Dyer/M
+dyes/A
+dyestuff/SM
+dying/UA
+Dyke/M
+dyke's
+Dylan/M
+Dy/M
+Dynah/M
+Dyna/M
+dynamical/Y
+dynamic/S
+dynamics/M
+dynamism/SM
+dynamiter/M
+dynamite/RSDZMG
+dynamized
+dynamo/MS
+dynastic
+dynasty/MS
+dyne/M
+dysentery/SM
+dysfunctional
+dysfunction/MS
+dyslectic/S
+dyslexia/MS
+dyslexically
+dyslexic/S
+dyspepsia/MS
+dyspeptic/S
+dysprosium/MS
+dystopia/M
+dystrophy/M
+dz
+Dzerzhinsky/M
+E
+ea
+each
+Eachelle/M
+Eada/M
+Eadie/M
+Eadith/M
+Eadmund/M
+eagerness/MS
+eager/TSPRYM
+eagle/SDGM
+eaglet/SM
+Eakins/M
+Ealasaid/M
+Eal/M
+Eamon/M
+earache/SM
+eardrum/SM
+earful/MS
+ear/GSMDYH
+Earhart/M
+earing/M
+earldom/MS
+Earle/M
+Earlene/M
+Earlie/M
+Earline/M
+earliness/SM
+Earl/M
+earl/MS
+earlobe/S
+Early/M
+early/PRST
+earmark/DGSJ
+earmuff/SM
+earned/U
+earner/M
+Earnestine/M
+Earnest/M
+earnestness/MS
+earnest/PYS
+earn/GRDZTSJ
+earning/M
+earphone/MS
+earpieces
+earplug/MS
+Earp/M
+earring/MS
+earshot/MS
+earsplitting
+Eartha/M
+earthbound
+earthed/U
+earthenware/MS
+earthiness/SM
+earthliness/M
+earthling/MS
+earthly/TPR
+earth/MDNYG
+earthmen
+earthmover/M
+earthmoving
+earthquake/SDGM
+earthshaking
+earths/U
+earthward/S
+earthwork/MS
+earthworm/MS
+earthy/PTR
+Earvin/M
+earwax/MS
+earwigged
+earwigging
+earwig/MS
+eased/E
+ease/LDRSMG
+easel/MS
+easement/MS
+easer/M
+ease's/EU
+eases/UE
+easies
+easily/U
+easiness/MSU
+easing/M
+eastbound
+easterly/S
+Easter/M
+easterner/M
+Easterner/M
+easternmost
+Eastern/RZ
+eastern/ZR
+easter/Y
+east/GSMR
+Easthampton/M
+easting/M
+Eastland/M
+Eastman/M
+eastward/S
+Eastwick/M
+Eastwood/M
+East/ZSMR
+easygoingness/M
+easygoing/P
+easy/PUTR
+eatables
+eatable/U
+eaten/U
+eater/M
+eatery/MS
+eating/M
+Eaton/M
+eat/SJZGNRB
+eavesdropped
+eavesdropper/MS
+eavesdropping
+eavesdrop/S
+eave/SM
+Eba/M
+Ebba/M
+ebb/DSG
+EBCDIC
+Ebeneezer/M
+Ebeneser/M
+Ebenezer/M
+Eben/M
+Eberhard/M
+Eberto/M
+Eb/MN
+Ebola
+Ebonee/M
+Ebonics
+Ebony/M
+ebony/SM
+Ebro/M
+ebullience/SM
+ebullient/Y
+ebullition/SM
+EC
+eccentrically
+eccentricity/SM
+eccentric/MS
+eccl
+Eccles
+Ecclesiastes/M
+ecclesiastical/Y
+ecclesiastic/MS
+ECG
+echelon/SGDM
+echinoderm/SM
+echo/DMG
+echoed/A
+echoes/A
+echoic
+echolocation/SM
+clair/MS
+clat/MS
+eclectically
+eclecticism/MS
+eclectic/S
+eclipse/MGSD
+ecliptic/MS
+eclogue/MS
+ecocide/SM
+ecol
+Ecole/M
+ecologic
+ecological/Y
+ecologist/MS
+ecology/MS
+Eco/M
+econ
+Econometrica/M
+econometricians
+econometric/S
+econometrics/M
+economical/YU
+economic/S
+economics/M
+economist/MS
+economization
+economize/GZSRD
+economizer/M
+economizing/U
+economy/MS
+ecosystem/MS
+ecru/SM
+ecstasy/MS
+Ecstasy/S
+ecstatically
+ecstatic/S
+ectoplasm/M
+Ecuadoran/S
+Ecuadorean/S
+Ecuadorian/S
+Ecuador/M
+ecumenical/Y
+ecumenicism/SM
+ecumenicist/MS
+ecumenic/MS
+ecumenics/M
+ecumenism/SM
+ecumenist/MS
+eczema/MS
+Eda/M
+Edam/SM
+Edan/M
+ed/ASC
+Edda/M
+Eddie/M
+Eddi/M
+Edd/M
+Eddy/M
+eddy/SDMG
+Edee/M
+Edeline/M
+edelweiss/MS
+Ede/M
+edema/SM
+edematous
+eden
+Eden/M
+Edgard/M
+Edgardo/M
+Edgar/M
+edge/DRSMZGJ
+edgeless
+edger/M
+Edgerton/M
+Edgewater/M
+edgewise
+Edgewood/M
+edgily
+edginess/MS
+edging/M
+edgy/TRP
+edibility/MS
+edibleness/SM
+edible/SP
+edict/SM
+Edie/M
+edification/M
+edifice/SM
+edifier/M
+edifying/U
+edify/ZNXGRSD
+Edik/M
+Edi/MH
+Edinburgh/M
+Edin/M
+Edison/M
+editable
+Edita/M
+edited/IU
+Editha/M
+Edithe/M
+Edith/M
+edition/SM
+editorialist/M
+editorialize/DRSG
+editorializer/M
+editorial/YS
+editor/MS
+editorship/MS
+edit/SADG
+Ediva/M
+Edlin/M
+Edmond/M
+Edmon/M
+Edmonton/M
+Edmund/M
+Edna/M
+Edouard/M
+EDP
+eds
+Edsel/M
+Edsger/M
+EDT
+Eduard/M
+Eduardo/M
+educability/SM
+educable/S
+educated/YP
+educate/XASDGN
+educationalists
+educational/Y
+education/AM
+educationists
+educative
+educator/MS
+educ/DBG
+educe/S
+eduction/M
+Eduino/M
+edutainment/S
+Edvard/M
+Edwardian
+Edwardo/M
+Edward/SM
+Edwina/M
+Edwin/M
+Ed/XMN
+Edy/M
+Edythe/M
+Edyth/M
+EEC
+EEG
+eek/S
+eelgrass/M
+eel/MS
+e'en
+EEO
+EEOC
+e'er
+eerie/RT
+eerily
+eeriness/MS
+Eeyore/M
+effaceable/I
+effacement/MS
+effacer/M
+efface/SRDLG
+effectiveness/ISM
+effectives
+effective/YIP
+effector/MS
+effect/SMDGV
+effectual/IYP
+effectualness/MI
+effectuate/SDGN
+effectuation/M
+effeminacy/MS
+effeminate/SY
+effendi/MS
+efferent/SY
+effervesce/GSD
+effervescence/SM
+effervescent/Y
+effeteness/SM
+effete/YP
+efficacious/IPY
+efficaciousness/MI
+efficacy/IMS
+efficiency/MIS
+efficient/ISY
+Effie/M
+effigy/SM
+effloresce
+efflorescence/SM
+efflorescent
+effluence/SM
+effluent/MS
+effluvia
+effluvium/M
+effluxion
+efflux/M
+effortlessness/SM
+effortless/PY
+effort/MS
+effrontery/MS
+effulgence/SM
+effulgent
+effuse/XSDVGN
+effusion/M
+effusiveness/MS
+effusive/YP
+EFL
+e/FMDS
+Efrain/M
+Efrem/M
+Efren/M
+EFT
+egad
+egalitarian/I
+egalitarianism/MS
+egalitarians
+EGA/M
+Egan/M
+Egbert/M
+Egerton/M
+eggbeater/SM
+eggcup/MS
+egger/M
+egg/GMDRS
+eggheaded/P
+egghead/SDM
+eggnog/SM
+eggplant/MS
+eggshell/SM
+egis's
+eglantine/MS
+egocentrically
+egocentricity/SM
+egocentric/S
+egoism/SM
+egoistic
+egoistical/Y
+egoist/SM
+egomaniac/MS
+egomania/MS
+Egon/M
+Egor/M
+ego/SM
+egotism/SM
+egotistic
+egotistical/Y
+egotist/MS
+egregiousness/MS
+egregious/PY
+egress/SDMG
+egret/SM
+Egyptian/S
+Egypt/M
+Egyptology/M
+eh
+Ehrlich/M
+Eichmann/M
+eiderdown/SM
+eider/SM
+eidetic
+Eiffel/M
+eigenfunction/MS
+eigenstate/S
+eigenvalue/SM
+eigenvector/MS
+eighteen/MHS
+eighteenths
+eightfold
+eighth/MS
+eighths
+eightieths
+eightpence
+eight/SM
+eighty/SHM
+Eileen/M
+Eilis/M
+Eimile/M
+Einsteinian
+einsteinium/MS
+Einstein/SM
+Eire/M
+Eirena/M
+Eisenhower/M
+Eisenstein/M
+Eisner/M
+eisteddfod/M
+either
+ejaculate/SDXNG
+ejaculation/M
+ejaculatory
+ejecta
+ejection/SM
+ejector/SM
+eject/VGSD
+Ekaterina/M
+Ekberg/M
+eked/A
+eke/DSG
+EKG
+Ekstrom/M
+Ektachrome/M
+elaborateness/SM
+elaborate/SDYPVNGX
+elaboration/M
+elaborators
+Elaina/M
+Elaine/M
+Elana/M
+eland/SM
+Elane/M
+lan/M
+Elanor/M
+elans
+elapse/SDG
+el/AS
+elastically/I
+elasticated
+elasticity/SM
+elasticize/GDS
+elastic/S
+elastodynamics
+elastomer/M
+elatedness/M
+elated/PY
+elater/M
+elate/SRDXGN
+elation/M
+Elayne/M
+Elba/MS
+Elbe/M
+Elberta/M
+Elbertina/M
+Elbertine/M
+Elbert/M
+elbow/GDMS
+elbowroom/SM
+Elbrus/M
+Elden/M
+elderberry/MS
+elderflower
+elderliness/M
+elderly/PS
+elder/SY
+eldest
+Eldin/M
+Eldon/M
+Eldorado's
+Eldredge/M
+Eldridge/M
+Eleanora/M
+Eleanore/M
+Eleanor/M
+Eleazar/M
+electable/U
+elect/ASGD
+elected/U
+electioneer/GSD
+election/SAM
+electiveness/M
+elective/SPY
+electoral/Y
+electorate/SM
+elector/SM
+Electra/M
+electress/M
+electricalness/M
+electrical/PY
+electrician/SM
+electricity/SM
+electric/S
+electrification/M
+electrifier/M
+electrify/ZXGNDRS
+electrocardiogram/MS
+electrocardiograph/M
+electrocardiographs
+electrocardiography/MS
+electrochemical/Y
+electrocute/GNXSD
+electrocution/M
+electrode/SM
+electrodynamics/M
+electrodynamic/YS
+electroencephalogram/SM
+electroencephalographic
+electroencephalograph/M
+electroencephalographs
+electroencephalography/MS
+electrologist/MS
+electroluminescent
+electrolysis/M
+electrolyte/SM
+electrolytic
+electrolytically
+electrolyze/SDG
+electro/M
+electromagnetic
+electromagnetically
+electromagnetism/SM
+electromagnet/SM
+electromechanical
+electromechanics
+electromotive
+electromyograph
+electromyographic
+electromyographically
+electromyography/M
+electronegative
+electronically
+electronic/S
+electronics/M
+electron/MS
+electrophoresis/M
+electrophorus/M
+electroplate/DSG
+electroscope/MS
+electroscopic
+electroshock/GDMS
+electrostatic/S
+electrostatics/M
+electrotherapist/M
+electrotype/GSDZM
+electroweak
+eleemosynary
+Eleen/M
+elegance/ISM
+elegant/YI
+elegiacal
+elegiac/S
+elegy/SM
+elem
+elemental/YS
+elementarily
+elementariness/M
+elementary/P
+element/MS
+Elena/M
+Elene/M
+Eleni/M
+Elenore/M
+Eleonora/M
+Eleonore/M
+elephantiases
+elephantiasis/M
+elephantine
+elephant/SM
+elevated/S
+elevate/XDSNG
+elevation/M
+elevator/SM
+eleven/HM
+elevens/S
+elevenths
+elev/NX
+Elfie/M
+elfin/S
+elfish
+elf/M
+Elfreda/M
+Elfrida/M
+Elfrieda/M
+Elga/M
+Elgar/M
+Elianora/M
+Elianore/M
+Elia/SM
+Elicia/M
+elicitation/MS
+elicit/GSD
+elide/GSD
+Elie/M
+eligibility/ISM
+eligible/SI
+Elihu/M
+Elijah/M
+Eli/M
+eliminate/XSDYVGN
+elimination/M
+eliminator/SM
+Elinore/M
+Elinor/M
+Eliot/M
+Elisabeth/M
+Elisabet/M
+Elisabetta/M
+Elisa/M
+Elise/M
+Eliseo/M
+Elisha/M
+elision/SM
+Elissa/M
+Elita/M
+elite/MPS
+elitism/SM
+elitist/SM
+elixir/MS
+Elizabethan/S
+Elizabeth/M
+Elizabet/M
+Eliza/M
+Elka/M
+Elke/M
+Elkhart/M
+elk/MS
+Elladine/M
+Ella/M
+Ellary/M
+Elle/M
+Ellene/M
+Ellen/M
+Ellerey/M
+Ellery/M
+Ellesmere/M
+Ellette/M
+Ellie/M
+Ellington/M
+Elliot/M
+Elliott/M
+ellipse/MS
+ellipsis/M
+ellipsoidal
+ellipsoid/MS
+ellipsometer/MS
+ellipsometry
+elliptic
+elliptical/YS
+ellipticity/M
+Elli/SM
+Ellison/M
+Ellissa/M
+ell/MS
+Ellswerth/M
+Ellsworth/M
+Ellwood/M
+Elly/M
+Ellyn/M
+Ellynn/M
+Elma/M
+Elmer/M
+Elmhurst/M
+Elmira/M
+elm/MRS
+Elmo/M
+Elmore/M
+Elmsford/M
+El/MY
+Elna/MH
+Elnar/M
+Elnath/M
+Elnora/M
+Elnore/M
+elocutionary
+elocutionist/MS
+elocution/SM
+elodea/S
+Elohim/M
+Eloisa/M
+Eloise/M
+elongate/NGXSD
+elongation/M
+Elonore/M
+elopement/MS
+eloper/M
+elope/SRDLG
+eloquence/SM
+eloquent/IY
+Elora/M
+Eloy/M
+Elroy/M
+els
+Elsa/M
+Elsbeth/M
+else/M
+Else/M
+Elset/M
+elsewhere
+Elsey/M
+Elsie/M
+Elsi/M
+Elsinore/M
+Elspeth/M
+Elston/M
+Elsworth/M
+Elsy/M
+Eltanin/M
+Elton/M
+eluate/SM
+elucidate/SDVNGX
+elucidation/M
+elude/GSD
+elusiveness/SM
+elusive/YP
+elute/DGN
+elution/M
+Elva/M
+elven
+Elvera/M
+elver/SM
+elves/M
+Elvia/M
+Elvina/M
+Elvin/M
+Elvira/M
+elvish
+Elvis/M
+Elvyn/M
+Elwin/M
+Elwira/M
+Elwood/M
+Elwyn/M
+Ely/M
+Elyn/M
+Elyse/M
+Elysees
+Elyse/M
+Elysha/M
+Elysia/M
+elysian
+Elysian
+Elysium/SM
+Elyssa/M
+EM
+emaciate/NGXDS
+emaciation/M
+emacs/M
+Emacs/M
+email/SMDG
+Emalee/M
+Emalia/M
+Ema/M
+emanate/XSDVNG
+emanation/M
+emancipate/DSXGN
+emancipation/M
+emancipator/MS
+Emanuele/M
+Emanuel/M
+emasculate/GNDSX
+emasculation/M
+embalmer/M
+embalm/ZGRDS
+embank/GLDS
+embankment/MS
+embarcadero
+embargoes
+embargo/GMD
+embark/ADESG
+embarkation/EMS
+embarrassedly
+embarrassed/U
+embarrassing/Y
+embarrassment/MS
+embarrass/SDLG
+embassy/MS
+embattle/DSG
+embeddable
+embedded
+embedder
+embedding/MS
+embed/S
+embellished/U
+embellisher/M
+embellish/LGRSD
+embellishment/MS
+ember/MS
+embezzle/LZGDRS
+embezzlement/MS
+embezzler/M
+embitter/LGDS
+embitterment/SM
+emblazon/DLGS
+emblazonment/SM
+emblematic
+emblem/GSMD
+embodier/M
+embodiment/ESM
+embody/ESDGA
+embolden/DSG
+embolism/SM
+embosom
+embosser/M
+emboss/ZGRSD
+embouchure/SM
+embower/GSD
+embraceable
+embracer/M
+embrace/RSDVG
+embracing/Y
+embrasure/MS
+embrittle
+embrocation/SM
+embroiderer/M
+embroider/SGZDR
+embroidery/MS
+embroilment/MS
+embroil/SLDG
+embryologist/SM
+embryology/MS
+embryonic
+embryo/SM
+emceeing
+emcee/SDM
+Emelda/M
+Emelen/M
+Emelia/M
+Emelina/M
+Emeline/M
+Emelita/M
+Emelyne/M
+emendation/MS
+emend/SRDGB
+emerald/SM
+Emera/M
+emerge/ADSG
+emergence/MAS
+emergency/SM
+emergent/S
+emerita
+emeritae
+emeriti
+emeritus
+Emerson/M
+Emery/M
+emery/MGSD
+emetic/S
+emf/S
+emigrant/MS
+emigrate/SDXNG
+emigration/M
+migr/S
+Emilee/M
+Emile/M
+Emilia/M
+Emilie/M
+Emili/M
+Emiline/M
+Emilio/M
+Emil/M
+Emily/M
+eminence/MS
+Eminence/MS
+eminent/Y
+emirate/SM
+emir/SM
+emissary/SM
+emission/AMS
+emissivity/MS
+emit/S
+emittance/M
+emitted
+emitter/SM
+emitting
+Emlen/M
+Emlyn/M
+Emlynne/M
+Emlynn/M
+em/M
+Em/M
+Emmalee/M
+Emmaline/M
+Emmalyn/M
+Emmalynne/M
+Emmalynn/M
+Emma/M
+Emmanuel/M
+Emmeline/M
+Emmerich/M
+Emmery/M
+Emmet/M
+Emmett/M
+Emmey/M
+Emmie/M
+Emmi/M
+Emmit/M
+Emmott/M
+Emmye/M
+Emmy/SM
+Emogene/M
+emollient/S
+emolument/SM
+Emory/M
+emote/SDVGNX
+emotionalism/MS
+emotionality/M
+emotionalize/GDS
+emotional/UY
+emotionless
+emotion/M
+emotive/Y
+empaneled
+empaneling
+empath
+empathetic
+empathetical/Y
+empathic
+empathize/SDG
+empathy/MS
+emperor/MS
+emphases
+emphasis/M
+emphasize/ZGCRSDA
+emphatically/U
+emphatic/U
+emphysema/SM
+emphysematous
+empire/MS
+empirical/Y
+empiricism/SM
+empiricist/SM
+empiric/SM
+emplace/L
+emplacement/MS
+employability/UM
+employable/US
+employed/U
+employee/SM
+employer/SM
+employ/LAGDS
+employment/UMAS
+emporium/MS
+empower/GLSD
+empowerment/MS
+empress/MS
+emptier/M
+emptily
+emptiness/SM
+empty/GRSDPT
+empyrean/SM
+ems/C
+EMT
+emulate/SDVGNX
+emulation/M
+emulative/Y
+emulator/MS
+emulsification/M
+emulsifier/M
+emulsify/NZSRDXG
+emulsion/SM
+emu/SM
+Emylee/M
+Emyle/M
+enabler/M
+enable/SRDZG
+enactment/ASM
+enact/SGALD
+enameler/M
+enamelware/SM
+enamel/ZGJMDRS
+enamor/DSG
+en/BM
+enc
+encamp/LSDG
+encampment/MS
+encapsulate/SDGNX
+encapsulation/M
+encase/GSDL
+encasement/SM
+encephalitic
+encephalitides
+encephalitis/M
+encephalographic
+encephalopathy/M
+enchain/SGD
+enchanter/MS
+enchant/ESLDG
+enchanting/Y
+enchantment/MSE
+enchantress/MS
+enchilada/SM
+encipherer/M
+encipher/SRDG
+encircle/GLDS
+encirclement/SM
+encl
+enclave/MGDS
+enclosed/U
+enclose/GDS
+enclosure/SM
+encoder/M
+encode/ZJGSRD
+encomium/SM
+encompass/GDS
+encore/GSD
+encounter/GSD
+encouragement/SM
+encourager/M
+encourage/SRDGL
+encouraging/Y
+encroacher/M
+encroach/LGRSD
+encroachment/MS
+encrustation/MS
+encrust/DSG
+encrypt/DGS
+encrypted/U
+encryption/SM
+encumbered/U
+encumber/SEDG
+encumbrancer/M
+encumbrance/SRM
+ency
+encyclical/SM
+encyclopaedia's
+encyclopedia/SM
+encyclopedic
+encyst/GSLD
+encystment/MS
+endanger/DGSL
+endangerment/SM
+endear/GSLD
+endearing/Y
+endearment/MS
+endeavored/U
+endeavorer/M
+endeavor/GZSMRD
+endemically
+endemicity
+endemic/S
+ender/M
+endgame/M
+Endicott/M
+ending/M
+endive/SM
+endlessness/MS
+endless/PY
+endmost
+endnote/MS
+endocrine/S
+endocrinologist/SM
+endocrinology/SM
+endogamous
+endogamy/M
+endogenous/Y
+endomorphism/SM
+endorse/DRSZGL
+endorsement/MS
+endorser/M
+endoscope/MS
+endoscopic
+endoscopy/SM
+endosperm/M
+endothelial
+endothermic
+endow/GSDL
+endowment/SM
+endpoint/MS
+endue/SDG
+endungeoned
+endurable/U
+endurably/U
+endurance/SM
+endure/BSDG
+enduringness/M
+enduring/YP
+endways
+Endymion/M
+end/ZGVMDRSJ
+ENE
+enema/SM
+enemy/SM
+energetically
+energetic/S
+energetics/M
+energized/U
+energizer/M
+energize/ZGDRS
+energy/MS
+enervate/XNGVDS
+enervation/M
+enfeeble/GLDS
+enfeeblement/SM
+enfilade/MGDS
+enfold/SGD
+enforceability/M
+enforceable/U
+enforced/Y
+enforce/LDRSZG
+enforcement/SM
+enforcer/M
+enforcible/U
+enfranchise/ELDRSG
+enfranchisement/EMS
+enfranchiser/M
+engage/ADSGE
+engagement/SEM
+engaging/Y
+Engelbert/M
+Engel/MS
+engender/DGS
+engineer/GSMDJ
+engineering/MY
+engine/MGSD
+England/M
+england/ZR
+Englebert/M
+Englewood/M
+English/GDRSM
+Englishman/M
+Englishmen
+Englishwoman/M
+Englishwomen
+Eng/M
+engorge/LGDS
+engorgement/MS
+Engracia/M
+engram/MS
+engraver/M
+engrave/ZGDRSJ
+engraving/M
+engrossed/Y
+engrosser/M
+engross/GLDRS
+engrossing/Y
+engrossment/SM
+engulf/GDSL
+engulfment/SM
+enhanceable
+enhance/LZGDRS
+enhancement/MS
+enhancer/M
+enharmonic
+Enid/M
+Enif/M
+enigma/MS
+enigmatic
+enigmatically
+Eniwetok/M
+enjambement's
+enjambment/MS
+enjoinder
+enjoin/GSD
+enjoyability
+enjoyableness/M
+enjoyable/P
+enjoyably
+enjoy/GBDSL
+enjoyment/SM
+Enkidu/M
+enlargeable
+enlarge/LDRSZG
+enlargement/MS
+enlarger/M
+enlightened/U
+enlighten/GDSL
+enlightening/U
+enlightenment/SM
+enlistee/MS
+enlister/M
+enlistment/SAM
+enlist/SAGDL
+enliven/LDGS
+enlivenment/SM
+enmesh/DSLG
+enmeshment/SM
+enmity/MS
+Ennis/M
+ennoble/LDRSG
+ennoblement/SM
+ennobler/M
+ennui/SM
+Enoch/M
+enormity/SM
+enormousness/MS
+enormous/YP
+Enos
+enough
+enoughs
+enplane/DSG
+enqueue/DS
+enquirer/S
+enquiringly
+enrage/SDG
+enrapture/GSD
+Enrica/M
+enricher/M
+Enrichetta/M
+enrich/LDSRG
+enrichment/SM
+Enrico/M
+Enrika/M
+Enrique/M
+Enriqueta/M
+enrobed
+enrollee/SM
+enroll/LGSD
+enrollment/SM
+ens
+ensconce/DSG
+ensemble/MS
+enshrine/DSLG
+enshrinement/SM
+enshroud/DGS
+ensign/SM
+ensilage/DSMG
+enslavement/MS
+enslaver/M
+enslave/ZGLDSR
+ensnare/GLDS
+ensnarement/SM
+Ensolite/M
+ensue/SDG
+ensurer/M
+ensure/SRDZG
+entailer/M
+entailment/MS
+entail/SDRLG
+entangle/EGDRSL
+entanglement/ESM
+entangler/EM
+entente/MS
+enter/ASDG
+entered/U
+enterer/M
+enteritides
+enteritis/SM
+enterprise/GMSR
+Enterprise/M
+enterpriser/M
+enterprising/Y
+entertainer/M
+entertaining/Y
+entertainment/SM
+entertain/SGZRDL
+enthalpy/SM
+enthrall/GDSL
+enthrallment/SM
+enthrone/GDSL
+enthronement/MS
+enthuse/DSG
+enthusiasm/SM
+enthusiastically/U
+enthusiastic/U
+enthusiast/MS
+enticement/SM
+entice/SRDJLZG
+enticing/Y
+entire/SY
+entirety/SM
+entitle/GLDS
+entitlement/MS
+entity/SM
+entomb/GDSL
+entombment/MS
+entomological
+entomologist/S
+entomology/MS
+entourage/SM
+entr'acte/S
+entrails
+entrainer/M
+entrain/GSLDR
+entrancement/MS
+entrance/MGDSL
+entranceway/M
+entrancing/Y
+entrant/MS
+entrapment/SM
+entrapped
+entrapping
+entrap/SL
+entreating/Y
+entreat/SGD
+entreaty/SM
+entre/S
+entrench/LSDG
+entrenchment/MS
+entrepreneurial
+entrepreneur/MS
+entrepreneurship/M
+entropic
+entropy/MS
+entrust/DSG
+entry/ASM
+entryway/SM
+entwine/DSG
+enumerable
+enumerate/AN
+enumerated/U
+enumerates
+enumerating
+enumeration's/A
+enumeration/SM
+enumerative
+enumerator/SM
+enunciable
+enunciated/U
+enunciate/XGNSD
+enunciation/M
+enureses
+enuresis/M
+envelope/MS
+enveloper/M
+envelopment/MS
+envelop/ZGLSDR
+envenom/SDG
+enviableness/M
+enviable/U
+enviably
+envied/U
+envier/M
+enviousness/SM
+envious/PY
+environ/LGSD
+environmentalism/SM
+environmentalist/SM
+environmental/Y
+environment/MS
+envisage/DSG
+envision/GSD
+envoy/SM
+envying/Y
+envy/SRDMG
+enzymatic
+enzymatically
+enzyme/SM
+enzymology/M
+Eocene
+EOE
+eohippus/M
+Eolanda/M
+Eolande/M
+eolian
+eon/SM
+EPA
+epaulet/SM
+pe/S
+ephedrine/MS
+ephemeral/SY
+ephemera/MS
+ephemerids
+ephemeris/M
+Ephesian/S
+Ephesians/M
+Ephesus/M
+Ephraim/M
+Ephrayim/M
+Ephrem/M
+epically
+epicenter/SM
+epic/SM
+Epictetus/M
+Epicurean
+epicurean/S
+epicure/SM
+Epicurus/M
+epicycle/MS
+epicyclic
+epicyclical/Y
+epicycloid/M
+epidemically
+epidemic/MS
+epidemiological/Y
+epidemiologist/MS
+epidemiology/MS
+epidermal
+epidermic
+epidermis/MS
+epidural
+epigenetic
+epiglottis/SM
+epigrammatic
+epigram/MS
+epigrapher/M
+epigraph/RM
+epigraphs
+epigraphy/MS
+epilepsy/SM
+epileptic/S
+epilogue/SDMG
+Epimethius/M
+epinephrine/SM
+epiphany/SM
+Epiphany/SM
+epiphenomena
+episcopacy/MS
+episcopalian
+Episcopalian/S
+Episcopal/S
+episcopal/Y
+episcopate/MS
+episode/SM
+episodic
+episodically
+epistemic
+epistemological/Y
+epistemology/M
+epistle/MRS
+Epistle/SM
+epistolary/S
+epistolatory
+epitaph/GMD
+epitaphs
+epitaxial/Y
+epitaxy/M
+epithelial
+epithelium/MS
+epithet/MS
+epitome/MS
+epitomized/U
+epitomizer/M
+epitomize/SRDZG
+epochal/Y
+epoch/M
+epochs
+eponymous
+epoxy/GSD
+epsilon/SM
+Epsom/M
+Epstein/M
+equability/MS
+equableness/M
+equable/P
+equably
+equaling
+equality/ISM
+equalization/MS
+equalize/DRSGJZ
+equalized/U
+equalizer/M
+equalizes/U
+equal/USDY
+equanimity/MS
+equate/NGXBSD
+equation/M
+equatorial/S
+equator/SM
+equerry/MS
+equestrianism/SM
+equestrian/S
+equestrienne/SM
+equiangular
+equidistant/Y
+equilateral/S
+equilibrate/GNSD
+equilibration/M
+equilibrium/MSE
+equine/S
+equinoctial/S
+equinox/MS
+equipage/SM
+equipartition/M
+equip/AS
+equipment/SM
+equipoise/GMSD
+equipotent
+equipped/AU
+equipping/A
+equiproportional
+equiproportionality
+equiproportionate
+equitable/I
+equitableness/M
+equitably/I
+equitation/SM
+equity/IMS
+equiv
+equivalence/DSMG
+equivalent/SY
+equivocalness/MS
+equivocal/UY
+equivocate/NGSDX
+equivocation/M
+equivocator/SM
+Equuleus/M
+ER
+ERA
+eradicable/I
+eradicate/SDXVGN
+eradication/M
+eradicator/SM
+era/MS
+Eran/M
+erase/N
+eraser/M
+erasion/M
+Erasmus/M
+eras/SRDBGZ
+Erastus/M
+erasure/MS
+Erato/M
+Eratosthenes/M
+erbium/SM
+Erda/M
+ere
+Erebus/M
+erect/GPSRDY
+erectile
+erection/SM
+erectness/MS
+erector/SM
+Erek/M
+erelong
+eremite/MS
+Erena/M
+ergo
+ergodic
+ergodicity/M
+ergonomically
+ergonomics/M
+ergonomic/U
+ergophobia
+ergosterol/SM
+ergot/SM
+erg/SM
+Erhard/M
+Erhart/M
+Erica/M
+Ericha/M
+Erich/M
+Ericka/M
+Erick/M
+Erickson/M
+Eric/M
+Ericson's
+Ericsson's
+Eridanus/M
+Erie/SM
+Erika/M
+Erik/M
+Erikson/M
+Erina/M
+Erin/M
+Erinna/M
+Erinn/M
+eris
+Eris
+Eritrea/M
+Erlang/M
+Erlenmeyer/M
+Erl/M
+Er/M
+Erma/M
+Ermanno/M
+Ermengarde/M
+Ermentrude/M
+Ermina/M
+ermine/MSD
+Erminia/M
+Erminie/M
+Ermin/M
+Ernaline/M
+Erna/M
+Ernesta/M
+Ernestine/M
+Ernest/M
+Ernesto/M
+Ernestus/M
+Ernie/M
+Ernst/M
+Erny/M
+erode/SDG
+erodible
+erogenous
+erosible
+erosional
+erosion/SM
+erosiveness/M
+erosive/P
+Eros/SM
+erotically
+erotica/M
+eroticism/MS
+erotic/S
+errancy/MS
+errand/MS
+errantry/M
+errant/YS
+errata/SM
+erratically
+erratic/S
+erratum/MS
+err/DGS
+Errick/M
+erring/UY
+Erroll/M
+Errol/M
+erroneousness/M
+erroneous/YP
+error/SM
+ersatz/S
+Erse/M
+Erskine/M
+erst
+erstwhile
+Ertha/M
+eructation/MS
+eruct/DGS
+erudite/NYX
+erudition/M
+erupt/DSVG
+eruption/SM
+eruptive/SY
+Ervin/M
+ErvIn/M
+Erv/M
+Erwin/M
+Eryn/M
+erysipelas/SM
+erythrocyte/SM
+es
+e's
+Es
+E's
+Esau/M
+escadrille/M
+escalate/CDSXGN
+escalation/MC
+escalator/SM
+escallop/SGDM
+escapable/I
+escapade/SM
+escapee/MS
+escape/LGSRDB
+escapement/MS
+escaper/M
+escapism/SM
+escapist/S
+escapology
+escarole/MS
+escarpment/MS
+eschatology/M
+Escherichia/M
+Escher/M
+eschew/SGD
+Escondido/M
+escort/SGMD
+escritoire/SM
+escrow/DMGS
+escudo/MS
+escutcheon/SM
+Esdras/M
+ESE
+Eskimo/SM
+ESL
+Esma/M
+Esmaria/M
+Esmark/M
+Esme/M
+Esmeralda/M
+esophageal
+esophagi
+esophagus/M
+esoteric
+esoterica
+esoterically
+esp
+ESP
+espadrille/MS
+Espagnol/M
+espalier/SMDG
+especial/Y
+Esperanto/M
+Esperanza/M
+Espinoza/M
+espionage/SM
+esplanade/SM
+Esp/M
+Esposito/M
+espousal/MS
+espouser/M
+espouse/SRDG
+espresso/SM
+esprit/SM
+espy/GSD
+Esq/M
+esquire/GMSD
+Esquire/S
+Esra/M
+Essa/M
+essayer/M
+essayist/SM
+essay/SZMGRD
+essence/MS
+Essene/SM
+Essen/M
+essentialist/M
+essentially
+essentialness/M
+essential/USI
+Essequibo/M
+Essex/M
+Essie/M
+Essy/M
+EST
+established/U
+establisher/M
+establish/LAEGSD
+establishment/EMAS
+Establishment/MS
+Esta/M
+estate/GSDM
+Esteban/M
+esteem/EGDS
+Estela/M
+Estele/M
+Estella/M
+Estelle/M
+Estell/M
+Estel/M
+Esterhzy/M
+ester/M
+Ester/M
+Estes
+Estevan/M
+Esther/M
+esthete's
+esthetically
+esthetic's
+esthetics's
+estimable/I
+estimableness/M
+estimate/XDSNGV
+estimating/A
+estimation/M
+estimator/SM
+Estonia/M
+Estonian/S
+estoppal
+Estrada/M
+estrange/DRSLG
+estrangement/SM
+estranger/M
+Estrella/M
+Estrellita/M
+estrogen/SM
+estrous
+estrus/SM
+est/RZ
+estuarine
+estuary/SM
+et
+ET
+ETA
+Etan/M
+eta/SM
+etc
+etcetera/SM
+etcher/M
+etch/GZJSRD
+etching/M
+ETD
+eternalness/SM
+eternal/PSY
+eternity/SM
+ethane/SM
+Ethan/M
+ethanol/MS
+Ethelbert/M
+Ethelda/M
+Ethelind/M
+Etheline/M
+Ethelin/M
+Ethel/M
+Ethelred/M
+Ethelyn/M
+Ethe/M
+etherealness/M
+ethereal/PY
+etherized
+Ethernet/MS
+ether/SM
+ethically/U
+ethicalness/M
+ethical/PYS
+ethicist/S
+ethic/MS
+Ethiopia/M
+Ethiopian/S
+ethnically
+ethnicity/MS
+ethnic/S
+ethnocentric
+ethnocentrism/MS
+ethnographers
+ethnographic
+ethnography/M
+ethnological
+ethnologist/SM
+ethnology/SM
+ethnomethodology
+ethological
+ethologist/MS
+ethology/SM
+ethos/SM
+ethylene/MS
+Ethyl/M
+ethyl/SM
+Etienne/M
+etiologic
+etiological
+etiology/SM
+etiquette/SM
+Etna/M
+Etruria/M
+Etruscan/MS
+Etta/M
+Ettie/M
+Etti/M
+Ettore/M
+Etty/M
+tude/MS
+etymological/Y
+etymologist/SM
+etymology/MS
+EU
+eucalypti
+eucalyptus/SM
+Eucharistic
+Eucharist/SM
+euchre/MGSD
+euclidean
+Euclid/M
+Eudora/M
+Euell/M
+Eugene/M
+Eugenia/M
+eugenically
+eugenicist/SM
+eugenic/S
+eugenics/M
+Eugenie/M
+Eugenio/M
+Eugenius/M
+Eugen/M
+Eugine/M
+Eulalie/M
+Eula/M
+Eulerian/M
+Euler/M
+eulogistic
+eulogist/MS
+eulogized/U
+eulogize/GRSDZ
+eulogizer/M
+eulogy/MS
+Eu/M
+Eumenides
+Eunice/M
+eunuch/M
+eunuchs
+Euphemia/M
+euphemism/MS
+euphemistic
+euphemistically
+euphemist/M
+euphonious/Y
+euphonium/M
+euphony/SM
+euphoria/SM
+euphoric
+euphorically
+Euphrates/M
+Eurasia/M
+Eurasian/S
+eureka/S
+Euripides/M
+Eur/M
+Eurodollar/SM
+Europa/M
+Europeanization/SM
+Europeanized
+European/MS
+Europe/M
+europium/MS
+Eurydice/M
+Eustace/M
+Eustachian/M
+Eustacia/M
+eutectic
+Euterpe/M
+euthanasia/SM
+euthenics/M
+evacuate/DSXNGV
+evacuation/M
+evacuee/MS
+evader/M
+evade/SRDBGZ
+Evaleen/M
+evaluable
+evaluate/ADSGNX
+evaluated/U
+evaluational
+evaluation/MA
+evaluative
+evaluator/MS
+Eva/M
+evanescence/MS
+evanescent
+Evangelia/M
+evangelic
+evangelicalism/SM
+Evangelical/S
+evangelical/YS
+Evangelina/M
+Evangeline/M
+Evangelin/M
+evangelism/SM
+evangelistic
+evangelist/MS
+Evangelist/MS
+evangelize/GDS
+Evania/M
+Evan/MS
+Evanne/M
+Evanston/M
+Evansville/M
+evaporate/VNGSDX
+evaporation/M
+evaporative/Y
+evaporator/MS
+evasion/SM
+evasiveness/SM
+evasive/PY
+Eveleen/M
+Evelina/M
+Eveline/M
+Evelin/M
+Evelyn/M
+Eve/M
+evened
+evener/M
+evenhanded/YP
+evening/SM
+Evenki/M
+Even/M
+evenness/MSU
+even/PUYRT
+evens
+evensong/MS
+eventfulness/SM
+eventful/YU
+eventide/SM
+event/SGM
+eventuality/MS
+eventual/Y
+eventuate/GSD
+Everard/M
+Eveready/M
+Evered/M
+Everest/M
+Everette/M
+Everett/M
+everglade/MS
+Everglades
+evergreen/S
+Everhart/M
+everlastingness/M
+everlasting/PYS
+everliving
+evermore
+EverReady/M
+eve/RSM
+ever/T
+every
+everybody/M
+everydayness/M
+everyday/P
+everyman
+everyone/MS
+everyplace
+everything
+everywhere
+eve's/A
+eves/A
+Evey/M
+evict/DGS
+eviction/SM
+evidence/MGSD
+evidential/Y
+evident/YS
+Evie/M
+evildoer/SM
+evildoing/MS
+evilness/MS
+evil/YRPTS
+evince/SDG
+Evin/M
+eviscerate/GNXDS
+evisceration/M
+Evita/M
+Ev/MN
+evocable
+evocate/NVX
+evocation/M
+evocativeness/M
+evocative/YP
+evoke/SDG
+evolute/NMXS
+evolutionarily
+evolutionary
+evolutionist/MS
+evolution/M
+evolve/SDG
+Evonne/M
+Evvie/M
+Evvy/M
+Evy/M
+Evyn/M
+Ewan/M
+Eward/M
+Ewart/M
+Ewell/M
+ewe/MZRS
+Ewen/M
+ewer/M
+Ewing/M
+exacerbate/NGXDS
+exacerbation/M
+exacter/M
+exactingness/M
+exacting/YP
+exaction/SM
+exactitude/ISM
+exactly/I
+exactness/MSI
+exact/TGSPRDY
+exaggerate/DSXNGV
+exaggerated/YP
+exaggeration/M
+exaggerative/Y
+exaggerator/MS
+exaltation/SM
+exalted/Y
+exalter/M
+exalt/ZRDGS
+examen/M
+examination/AS
+examination's
+examine/BGZDRS
+examined/AU
+examinees
+examiner/M
+examines/A
+examining/A
+exam/MNS
+example/DSGM
+exampled/U
+exasperate/DSXGN
+exasperated/Y
+exasperating/Y
+exasperation/M
+Excalibur/M
+excavate/NGDSX
+excavation/M
+excavator/SM
+Excedrin/M
+exceeder/M
+exceeding/Y
+exceed/SGDR
+excelled
+excellence/SM
+excellency/MS
+Excellency/MS
+excellent/Y
+excelling
+excel/S
+excelsior/S
+except/DSGV
+exceptionable/U
+exceptionalness/M
+exceptional/YU
+exception/BMS
+excerpter/M
+excerpt/GMDRS
+excess/GVDSM
+excessiveness/M
+excessive/PY
+exchangeable
+exchange/GDRSZ
+exchanger/M
+exchequer/SM
+Exchequer/SM
+excise/XMSDNGB
+excision/M
+excitability/MS
+excitableness/M
+excitable/P
+excitably
+excitation/SM
+excitatory
+excited/Y
+excitement/MS
+exciter/M
+excite/RSDLBZG
+excitingly
+exciting/U
+exciton/M
+exclaimer/M
+exclaim/SZDRG
+exclamation/MS
+exclamatory
+exclude/DRSG
+excluder/M
+exclusionary
+exclusioner/M
+exclusion/SZMR
+exclusiveness/SM
+exclusive/SPY
+exclusivity/MS
+excommunicate/XVNGSD
+excommunication/M
+excoriate/GNXSD
+excoriation/M
+excremental
+excrement/SM
+excrescence/MS
+excrescent
+excreta
+excrete/NGDRSX
+excreter/M
+excretion/M
+excretory/S
+excruciate/NGDS
+excruciating/Y
+excruciation/M
+exculpate/XSDGN
+exculpation/M
+exculpatory
+excursionist/SM
+excursion/MS
+excursiveness/SM
+excursive/PY
+excursus/MS
+excusable/IP
+excusableness/IM
+excusably/I
+excuse/BGRSD
+excused/U
+excuser/M
+exec/MS
+execrableness/M
+execrable/P
+execrably
+execrate/DSXNGV
+execration/M
+executable/MS
+execute/NGVZBXDRS
+executer/M
+executional
+executioner/M
+execution/ZMR
+executive/SM
+executor/SM
+executrices
+executrix/M
+exegeses
+exegesis/M
+exegete/M
+exegetical
+exegetic/S
+exemplariness/M
+exemplar/MS
+exemplary/P
+exemplification/M
+exemplifier/M
+exemplify/ZXNSRDG
+exemption/MS
+exempt/SDG
+exerciser/M
+exercise/ZDRSGB
+exertion/MS
+exert/SGD
+Exeter/M
+exeunt
+exhalation/SM
+exhale/GSD
+exhausted/Y
+exhauster/M
+exhaustible/I
+exhausting/Y
+exhaustion/SM
+exhaustiveness/MS
+exhaustive/YP
+exhaust/VGRDS
+exhibitioner/M
+exhibitionism/MS
+exhibitionist/MS
+exhibition/ZMRS
+exhibitor/SM
+exhibit/VGSD
+exhilarate/XSDVNG
+exhilarating/Y
+exhilaration/M
+exhortation/SM
+exhort/DRSG
+exhorter/M
+exhumation/SM
+exhume/GRSD
+exhumer/M
+exigence/S
+exigency/SM
+exigent/SY
+exiguity/SM
+exiguous
+exile/SDGM
+existence/MS
+existent/I
+existentialism/MS
+existentialistic
+existentialist/MS
+existential/Y
+existents
+exist/SDG
+exit/MDSG
+exobiology/MS
+exocrine
+Exodus/M
+exodus/SM
+exogamous
+exogamy/M
+exogenous/Y
+exonerate/SDVGNX
+exoneration/M
+exorbitance/MS
+exorbitant/Y
+exorcise/SDG
+exorcism/SM
+exorcist/SM
+exorcizer/M
+exoskeleton/MS
+exosphere/SM
+exothermic
+exothermically
+exotica
+exotically
+exoticism/SM
+exoticness/M
+exotic/PS
+exp
+expandability/M
+expand/DRSGZB
+expanded/U
+expander/M
+expanse/DSXGNVM
+expansible
+expansionary
+expansionism/MS
+expansionist/MS
+expansion/M
+expansiveness/S
+expansive/YP
+expatiate/XSDNG
+expatiation/M
+expatriate/SDNGX
+expatriation/M
+expectancy/MS
+expectant/YS
+expectational
+expectation/MS
+expected/UPY
+expecting/Y
+expectorant/S
+expectorate/NGXDS
+expectoration/M
+expect/SBGD
+expedience/IS
+expediency/IMS
+expedients
+expedient/YI
+expediter/M
+expedite/ZDRSNGX
+expeditionary
+expedition/M
+expeditiousness/MS
+expeditious/YP
+expeditor's
+expellable
+expelled
+expelling
+expel/S
+expendable/S
+expended/U
+expender/M
+expenditure/SM
+expend/SDRGB
+expense/DSGVM
+expensive/IYP
+expensiveness/SMI
+experienced/U
+experience/ISDM
+experiencing
+experiential/Y
+experimentalism/M
+experimentalist/SM
+experimental/Y
+experimentation/SM
+experimenter/M
+experiment/GSMDRZ
+experted
+experting
+expertise/SM
+expertize/GD
+expertnesses
+expertness/IM
+expert/PISY
+expert's
+expiable/I
+expiate/XGNDS
+expiation/M
+expiatory
+expiration/MS
+expired/U
+expire/SDG
+expiry/MS
+explainable/UI
+explain/ADSG
+explained/U
+explainer/SM
+explanation/MS
+explanatory
+expletive/SM
+explicable/I
+explicate/VGNSDX
+explication/M
+explicative/Y
+explicitness/SM
+explicit/PSY
+explode/DSRGZ
+exploded/U
+exploder/M
+exploitation/MS
+exploitative
+exploited/U
+exploiter/M
+exploit/ZGVSMDRB
+exploration/MS
+exploratory
+explore/DSRBGZ
+explored/U
+explorer/M
+explosion/MS
+explosiveness/SM
+explosive/YPS
+expo/MS
+exponential/SY
+exponentiate/XSDNG
+exponentiation/M
+exponent/MS
+exportability
+exportable
+export/AGSD
+exportation/SM
+exporter/MS
+export's
+expose
+exposed/U
+exposer/M
+exposit/D
+exposition/SM
+expositor/MS
+expository
+expos/RSDZG
+expostulate/DSXNG
+expostulation/M
+exposure/SM
+expounder/M
+expound/ZGSDR
+expressed/U
+expresser/M
+express/GVDRSY
+expressibility/I
+expressible/I
+expressibly/I
+expressionism/SM
+expressionistic
+expressionist/S
+expressionless/YP
+expression/MS
+expressive/IYP
+expressiveness/MS
+expressiveness's/I
+expressway/SM
+expropriate/XDSGN
+expropriation/M
+expropriator/SM
+expulsion/MS
+expunge/GDSR
+expunger/M
+expurgated/U
+expurgate/SDGNX
+expurgation/M
+exquisiteness/SM
+exquisite/YPS
+ex/S
+ext
+extant
+extemporaneousness/MS
+extemporaneous/YP
+extempore/S
+extemporization/SM
+extemporizer/M
+extemporize/ZGSRD
+extendability/M
+extendedly
+extendedness/M
+extended/U
+extender/M
+extendibility/M
+extendibles
+extend/SGZDR
+extensibility/M
+extensible/I
+extensional/Y
+extension/SM
+extensiveness/SM
+extensive/PY
+extensor/MS
+extent/SM
+extenuate/XSDGN
+extenuation/M
+exterior/MYS
+exterminate/XNGDS
+extermination/M
+exterminator/SM
+externalities
+externalization/SM
+externalize/GDS
+external/YS
+extern/M
+extinct/DGVS
+extinction/MS
+extinguishable/I
+extinguish/BZGDRS
+extinguisher/M
+extirpate/XSDVNG
+extirpation/M
+extolled
+extoller/M
+extolling
+extol/S
+extort/DRSGV
+extorter/M
+extortionate/Y
+extortioner/M
+extortionist/SM
+extortion/ZSRM
+extracellular/Y
+extract/GVSBD
+extraction/SM
+extractive/Y
+extractor/SM
+extracurricular/S
+extradite/XNGSDB
+extradition/M
+extragalactic
+extralegal/Y
+extramarital
+extramural
+extraneousness/M
+extraneous/YP
+extraordinarily
+extraordinariness/M
+extraordinary/PS
+extrapolate/XVGNSD
+extrapolation/M
+extra/S
+extrasensory
+extraterrestrial/S
+extraterritorial
+extraterritoriality/MS
+extravagance/MS
+extravagant/Y
+extravaganza/SM
+extravehicular
+extravert's
+extrema
+extremal
+extreme/DSRYTP
+extremeness/MS
+extremism/SM
+extremist/MS
+extremity/SM
+extricable/I
+extricate/XSDNG
+extrication/M
+extrinsic
+extrinsically
+extroversion/SM
+extrovert/GMDS
+extrude/GDSR
+extruder/M
+extrusion/MS
+extrusive
+exuberance/MS
+exuberant/Y
+exudate/XNM
+exudation/M
+exude/GSD
+exultant/Y
+exultation/SM
+exult/DGS
+exulting/Y
+exurban
+exurbanite/SM
+exurbia/MS
+exurb/MS
+Exxon/M
+Eyck/M
+Eyde/M
+Eydie/M
+eyeball/GSMD
+eyebrow/MS
+eyed/P
+eyedropper/MS
+eyeful/MS
+eye/GDRSMZ
+eyeglass/MS
+eyelash/MS
+eyeless
+eyelet/GSMD
+eyelid/SM
+eyeliner/MS
+eyeopener/MS
+eyeopening
+eyepiece/SM
+eyer/M
+eyeshadow
+eyesight/MS
+eyesore/SM
+eyestrain/MS
+eyeteeth
+eyetooth/M
+eyewash/MS
+eyewitness/SM
+Eyre/M
+eyrie's
+Eysenck/M
+Ezechiel/M
+Ezekiel/M
+Ezequiel/M
+Eziechiele/M
+Ezmeralda/M
+Ezra/M
+Ezri/M
+F
+FAA
+Fabe/MR
+Faberg/M
+Faber/M
+Fabiano/M
+Fabian/S
+Fabien/M
+Fabio/M
+fable/GMSRD
+fabler/M
+fabricate/SDXNG
+fabrication/M
+fabricator/MS
+fabric/MS
+fabulists
+fabulousness/M
+fabulous/YP
+facade/GMSD
+face/AGCSD
+facecloth
+facecloths
+faceless/P
+faceplate/M
+facer/CM
+face's
+facetiousness/MS
+facetious/YP
+facet/SGMD
+facial/YS
+facileness/M
+facile/YP
+facilitate/VNGXSD
+facilitation/M
+facilitator/SM
+facilitatory
+facility/MS
+facing/MS
+facsimileing
+facsimile/MSD
+factional
+factionalism/SM
+faction/SM
+factiousness/M
+factious/PY
+factitious
+fact/MS
+facto
+factoid/S
+factorial/MS
+factoring/A
+factoring's
+factorisable
+factorization/SM
+factorize/GSD
+factor/SDMJG
+factory/MS
+factotum/MS
+factuality/M
+factualness/M
+factual/PY
+faculty/MS
+faddish
+faddist/SM
+fadedly
+faded/U
+fadeout
+fader/M
+fade/S
+fading's
+fading/U
+fad/ZGSMDR
+Fae/M
+faerie/MS
+Faeroe/M
+faery's
+Fafnir/M
+fagged
+fagging
+faggoting's
+Fagin/M
+fag/MS
+fagoting/M
+fagot/MDSJG
+Fahd/M
+Fahrenheit/S
+faence/S
+failing's
+failing/UY
+fail/JSGD
+faille/MS
+failsafe
+failure/SM
+Faina/M
+fain/GTSRD
+fainter/M
+fainthearted
+faintness/MS
+faint/YRDSGPT
+Fairbanks
+Fairchild/M
+faired
+Fairfax/M
+Fairfield/M
+fairgoer/S
+fairground/MS
+fairing/MS
+fairish
+Fairleigh/M
+fairless
+Fairlie/M
+Fair/M
+Fairmont/M
+fairness's
+fairness/US
+Fairport/M
+fairs
+fair/TURYP
+Fairview/M
+fairway/MS
+fairyland/MS
+fairy/MS
+fairytale
+Faisalabad
+Faisal/M
+faithed
+faithfulness/MSU
+faithfuls
+faithful/UYP
+faithing
+faithlessness/SM
+faithless/YP
+Faith/M
+faiths
+faith's
+faith/U
+fajitas
+faker/M
+fake/ZGDRS
+fakir/SM
+falafel
+falconer/M
+falconry/MS
+falcon/ZSRM
+Falito/M
+Falkland/MS
+Falk/M
+Falkner/M
+fallaciousness/M
+fallacious/PY
+fallacy/MS
+faller/M
+fallibility/MSI
+fallible/I
+fallibleness/MS
+fallibly/I
+falloff/S
+Fallon/M
+fallopian
+Fallopian/M
+fallout/MS
+fallowness/M
+fallow/PSGD
+fall/SGZMRN
+falsehood/SM
+falseness/SM
+false/PTYR
+falsetto/SM
+falsie/MS
+falsifiability/M
+falsifiable/U
+falsification/M
+falsifier/M
+falsify/ZRSDNXG
+falsity/MS
+Falstaff/M
+falterer/M
+faltering/UY
+falter/RDSGJ
+Falwell/M
+fa/M
+famed/C
+fame/DSMG
+fames/C
+familial
+familiarity/MUS
+familiarization/MS
+familiarized/U
+familiarizer/M
+familiarize/ZGRSD
+familiarizing/Y
+familiarly/U
+familiarness/M
+familiar/YPS
+family/MS
+famine/SM
+faming/C
+famish/GSD
+famously/I
+famousness/M
+famous/PY
+fanaticalness/M
+fanatical/YP
+fanaticism/MS
+fanatic/SM
+Fanchette/M
+Fanchon/M
+fancied
+Fancie/M
+fancier/SM
+fanciest
+fancifulness/MS
+fanciful/YP
+fancily
+fanciness/SM
+fancying
+fancy/IS
+Fancy/M
+fancywork/SM
+fandango/SM
+Fanechka/M
+fanfare/SM
+fanfold/M
+fang/DMS
+fangled
+Fania/M
+fanlight/SM
+Fan/M
+fanned
+Fannie/M
+Fanni/M
+fanning
+fanny/SM
+Fanny/SM
+fanout
+fan/SM
+fantail/SM
+fantasia/SM
+fantasist/M
+fantasize/SRDG
+fantastical/Y
+fantastic/S
+fantasy/GMSD
+Fanya/M
+fanzine/S
+FAQ/SM
+Faraday/M
+farad/SM
+Farah/M
+Fara/M
+Farand/M
+faraway
+Farber/M
+farce/SDGM
+farcical/Y
+fare/MS
+farer/M
+farewell/DGMS
+farfetchedness/M
+far/GDR
+Fargo/M
+Farica/M
+farinaceous
+farina/MS
+Farkas/M
+Farlay/M
+Farlee/M
+Farleigh/M
+Farley/M
+Farlie/M
+Farly/M
+farmer/M
+Farmer/M
+farmhand/S
+farmhouse/SM
+farming/M
+Farmington/M
+farmland/SM
+farm/MRDGZSJ
+farmstead/SM
+farmworker/S
+Far/MY
+farmyard/MS
+faro/MS
+farragoes
+farrago/M
+Farragut/M
+Farrah/M
+Farrakhan/M
+Farra/M
+Farrand/M
+Farrell/M
+Farrel/M
+farrier/SM
+Farris/M
+Farr/M
+farrow/DMGS
+farseeing
+farsightedness/SM
+farsighted/YP
+farther
+farthermost
+farthest
+farthing/SM
+fart/MDGS!
+fas
+fascia/SM
+fascicle/DSM
+fasciculate/DNX
+fasciculation/M
+fascinate/SDNGX
+fascinating/Y
+fascination/M
+fascism/MS
+Fascism's
+fascistic
+Fascist's
+fascist/SM
+fashionableness/M
+fashionable/PS
+fashionably/U
+fashion/ADSG
+fashioner/SM
+fashion's
+Fassbinder/M
+fastback/MS
+fastball/S
+fasten/AGUDS
+fastener/MS
+fastening/SM
+fast/GTXSPRND
+fastidiousness/MS
+fastidious/PY
+fastness/MS
+fatalism/MS
+fatalistic
+fatalistically
+fatalist/MS
+fatality/MS
+fatal/SY
+fatback/SM
+fatefulness/MS
+fateful/YP
+fate/MS
+Fates
+fatheaded/P
+fathead/SMD
+father/DYMGS
+fathered/U
+fatherhood/MS
+fatherland/SM
+fatherless
+fatherliness/M
+fatherly/P
+Father/SM
+fathomable/U
+fathomless
+fathom/MDSBG
+fatigued/U
+fatigue/MGSD
+fatiguing/Y
+Fatima/M
+fatness/SM
+fat/PSGMDY
+fatso/M
+fatted
+fattener/M
+fatten/JZGSRD
+fatter
+fattest/M
+fattiness/SM
+fatting
+fatty/RSPT
+fatuity/MS
+fatuousness/SM
+fatuous/YP
+fatwa/SM
+faucet/SM
+Faulknerian
+Faulkner/M
+fault/CGSMD
+faultfinder/MS
+faultfinding/MS
+faultily
+faultiness/MS
+faultlessness/SM
+faultless/PY
+faulty/RTP
+fauna/MS
+Faunie/M
+Faun/M
+faun/MS
+Fauntleroy/M
+Faustian
+Faustina/M
+Faustine/M
+Faustino/M
+Faust/M
+Faustus/M
+fauvism/S
+favorableness/MU
+favorable/UMPS
+favorably/U
+favoredness/M
+favored's/U
+favored/YPSM
+favorer/EM
+favor/ESMRDGZ
+favoring/MYS
+favorings/U
+favorite/SMU
+favoritism/MS
+favors/A
+Fawkes/M
+Fawne/M
+fawner/M
+fawn/GZRDMS
+Fawnia/M
+fawning/Y
+Fawn/M
+fax/GMDS
+Fax/M
+Faydra/M
+Faye/M
+Fayette/M
+Fayetteville/M
+Fayina/M
+Fay/M
+fay/MDRGS
+Fayre/M
+Faythe/M
+Fayth/M
+faze/DSG
+FBI
+FCC
+FD
+FDA
+FDIC
+FDR/M
+fealty/MS
+fearfuller
+fearfullest
+fearfulness/MS
+fearful/YP
+fearlessness/MS
+fearless/PY
+fear/RDMSG
+fearsomeness/M
+fearsome/PY
+feasibility/SM
+feasibleness/M
+feasible/UI
+feasibly/U
+feaster/M
+feast/GSMRD
+feater/C
+featherbed
+featherbedding/SM
+featherbrain/MD
+feathered/U
+feathering/M
+featherless
+featherlight
+Featherman/M
+feathertop
+featherweight/SM
+feathery/TR
+feather/ZMDRGS
+feat/MYRGTS
+feats/C
+featureless
+feature/MGSD
+Feb/M
+febrile
+February/MS
+fecal
+feces
+fecklessness/M
+feckless/PY
+fecundability
+fecundate/XSDGN
+fecundation/M
+fecund/I
+fecundity/SM
+federalism/SM
+Federalist
+federalist/MS
+federalization/MS
+federalize/GSD
+Federal/S
+federal/YS
+federated/U
+federate/FSDXVNG
+federation/FM
+federative/Y
+Federica/M
+Federico/M
+FedEx/M
+Fedora/M
+fedora/SM
+feds
+Fed/SM
+fed/U
+feebleness/SM
+feeble/TPR
+feebly
+feedback/SM
+feedbag/MS
+feeder/M
+feed/GRZJS
+feeding/M
+feedlot/SM
+feedstock
+feedstuffs
+feeing
+feeler/M
+feel/GZJRS
+feelingly/U
+feeling/MYP
+feelingness/M
+Fee/M
+fee/MDS
+feet/M
+feigned/U
+feigner/M
+feign/RDGS
+feint/MDSG
+feisty/RT
+Felder/M
+Feldman/M
+feldspar/MS
+Felecia/M
+Felicdad/M
+Felice/M
+Felicia/M
+Felicio/M
+felicitate/XGNSD
+felicitation/M
+felicitous/IY
+felicitousness/M
+felicity/IMS
+Felicity/M
+Felicle/M
+Felic/M
+Felike/M
+Feliks/M
+feline/SY
+Felipa/M
+Felipe/M
+Felisha/M
+Felita/M
+Felix/M
+Feliza/M
+Felizio/M
+fella/S
+fellatio/SM
+felled/A
+feller/M
+felling/A
+Fellini/M
+fellness/M
+fellowman
+fellowmen
+fellow/SGDYM
+fellowshipped
+fellowshipping
+fellowship/SM
+fell/PSGZTRD
+feloniousness/M
+felonious/PY
+felon/MS
+felony/MS
+felt/GSD
+felting/M
+Fe/M
+female/MPS
+femaleness/SM
+feminineness/M
+feminine/PYS
+femininity/MS
+feminism/MS
+feminist/MS
+femme/MS
+femoral
+fem/S
+femur/MS
+fenced/U
+fencepost/M
+fencer/M
+fence/SRDJGMZ
+fencing/M
+fender/CM
+fend/RDSCZG
+Fenelia/M
+fenestration/CSM
+Fenian/M
+fenland/M
+fen/MS
+fennel/SM
+Fenwick/M
+Feodora/M
+Feodor/M
+feral
+Ferber/M
+Ferdie/M
+Ferdinanda/M
+Ferdinande/M
+Ferdinand/M
+Ferdinando/M
+Ferd/M
+Ferdy/M
+fer/FLC
+Fergus/M
+Ferguson/M
+Ferlinghetti/M
+Fermat/M
+fermentation/MS
+fermented
+fermenter
+ferment/FSCM
+fermenting
+Fermi/M
+fermion/MS
+fermium/MS
+Fernanda/M
+Fernande/M
+Fernandez/M
+Fernandina/M
+Fernando/M
+Ferne/M
+fernery/M
+Fern/M
+fern/MS
+ferny/TR
+ferociousness/MS
+ferocious/YP
+ferocity/MS
+Ferrari/M
+Ferraro/M
+Ferreira/M
+Ferrell/M
+Ferrel/M
+Ferrer/M
+ferreter/M
+ferret/SMRDG
+ferric
+ferris
+Ferris
+ferrite/M
+ferro
+ferroelectric
+ferromagnetic
+ferromagnet/M
+ferrous
+ferrule/MGSD
+ferryboat/MS
+ferryman/M
+ferrymen
+ferry/SDMG
+fertileness/M
+fertile/YP
+fertility/IMS
+fertilization/ASM
+fertilized/U
+fertilizer/M
+fertilizes/A
+fertilize/SRDZG
+ferule/SDGM
+fervency/MS
+fervent/Y
+fervidness/M
+fervid/YP
+fervor/MS
+fess/KGFSD
+Fess/M
+fess's
+festal/S
+fester/GD
+festival/SM
+festiveness/SM
+festive/PY
+festivity/SM
+festoon/SMDG
+fest/RVZ
+fetal
+feta/MS
+fetcher/M
+fetching/Y
+fetch/RSDGZ
+feted
+fte/MS
+fetich's
+fetidness/SM
+fetid/YP
+feting
+fetishism/SM
+fetishistic
+fetishist/SM
+fetish/MS
+fetlock/MS
+fetter's
+fetter/UGSD
+fettle/GSD
+fettling/M
+fettuccine/S
+fetus/SM
+feudalism/MS
+feudalistic
+feudal/Y
+feudatory/M
+feud/MDSG
+feverishness/SM
+feverish/PY
+fever/SDMG
+fewness/MS
+few/PTRS
+Fey/M
+Feynman/M
+fey/RT
+fez/M
+Fez/M
+fezzes
+ff
+FHA
+fiance/S
+fianc/MS
+Fianna/M
+Fiann/M
+fiascoes
+fiasco/M
+Fiat/M
+fiat/MS
+fibbed
+fibber/MS
+fibbing
+fiberboard/MS
+fiber/DM
+fiberfill/S
+Fiberglas/M
+fiberglass/DSMG
+Fibonacci/M
+fibrillate/XGNDS
+fibrillation/M
+fibril/MS
+fibrin/MS
+fibroblast/MS
+fibroid/S
+fibroses
+fibrosis/M
+fibrousness/M
+fibrous/YP
+fib/SZMR
+fibulae
+fibula/M
+fibular
+FICA
+fices
+fiche/SM
+Fichte/M
+fichu/SM
+fickleness/MS
+fickle/RTP
+ficos
+fictionalization/MS
+fictionalize/DSG
+fictional/Y
+fiction/SM
+fictitiousness/M
+fictitious/PY
+fictive/Y
+ficus
+fiddle/GMZJRSD
+fiddler/M
+fiddlestick/SM
+fiddly
+fide/F
+Fidela/M
+Fidelia/M
+Fidelio/M
+fidelity/IMS
+Fidelity/M
+Fidel/M
+fidget/DSG
+fidgety
+Fidole/M
+Fido/M
+fiducial/Y
+fiduciary/MS
+fiefdom/S
+fief/MS
+fielded
+fielder/IM
+fielding
+Fielding/M
+Field/MGS
+fieldstone/M
+fieldworker/M
+fieldwork/ZMRS
+field/ZISMR
+fiendishness/M
+fiendish/YP
+fiend/MS
+fierceness/SM
+fierce/RPTY
+fierily
+fieriness/MS
+fiery/PTR
+fie/S
+fies/C
+fiesta/MS
+fife/DRSMZG
+fifer/M
+Fifi/M
+Fifine/M
+FIFO
+fifteen/HRMS
+fifteenths
+fifths
+fifth/Y
+fiftieths
+fifty/HSM
+Figaro/M
+figged
+figging
+fightback
+fighter/MIS
+fighting/IS
+fight/ZSJRG
+figment/MS
+fig/MLS
+Figueroa/M
+figural
+figuration/FSM
+figurativeness/M
+figurative/YP
+figure/GFESD
+figurehead/SM
+figurer/SM
+figure's
+figurine/SM
+figuring/S
+Fijian/SM
+Fiji/M
+filamentary
+filament/MS
+filamentous
+Filberte/M
+Filbert/M
+filbert/MS
+Filberto/M
+filch/SDG
+filed/AC
+file/KDRSGMZ
+filename/SM
+filer/KMCS
+files/AC
+filet's
+filial/UY
+Filia/M
+filibusterer/M
+filibuster/MDRSZG
+Filide/M
+filigreeing
+filigree/MSD
+filing/AC
+filings
+Filipino/SM
+Filip/M
+Filippa/M
+Filippo/M
+fill/BAJGSD
+filled/U
+filler/MS
+filleting/M
+fillet/MDSG
+filling/M
+fillip/MDGS
+Fillmore/M
+filly/SM
+filmdom/M
+Filmer/M
+filminess/SM
+filming/M
+filmmaker/S
+Filmore/M
+film/SGMD
+filmstrip/SM
+filmy/RTP
+Filofax/S
+filtered/U
+filterer/M
+filter/RDMSZGB
+filthily
+filthiness/SM
+filth/M
+filths
+filthy/TRSDGP
+filtrated/I
+filtrate/SDXMNG
+filtrates/I
+filtrating/I
+filtration/IMS
+finagler/M
+finagle/RSDZG
+finale/MS
+finalist/MS
+finality/MS
+finalization/SM
+finalize/GSD
+final/SY
+Fina/M
+financed/A
+finance/MGSDJ
+finances/A
+financial/Y
+financier/DMGS
+financing/A
+Finch/M
+finch/MS
+findable/U
+find/BRJSGZ
+finder/M
+finding/M
+Findlay/M
+Findley/M
+fine/FGSCRDA
+finely
+fineness/MS
+finery/MAS
+fine's
+finespun
+finesse/SDMG
+fingerboard/SM
+fingerer/M
+fingering/M
+fingerless
+fingerling/M
+fingernail/MS
+fingerprint/SGDM
+finger/SGRDMJ
+fingertip/MS
+finial/SM
+finical
+finickiness/S
+finicky/RPT
+fining/M
+finished/UA
+finisher/M
+finishes/A
+finish/JZGRSD
+finis/SM
+finite/ISPY
+finitely/C
+finiteness/MIC
+fink/GDMS
+Finland/M
+Finlay/M
+Finley/M
+Fin/M
+Finnbogadottir/M
+finned
+Finnegan/M
+finner
+finning
+Finnish
+Finn/MS
+finny/RT
+fin/TGMDRS
+Fiona/M
+Fionna/M
+Fionnula/M
+fiord's
+Fiorello/M
+Fiorenze/M
+Fiori/M
+f/IRAC
+firearm/SM
+fireball/SM
+fireboat/M
+firebomb/MDSG
+firebox/MS
+firebrand/MS
+firebreak/SM
+firebrick/SM
+firebug/SM
+firecracker/SM
+firedamp/SM
+fired/U
+firefight/JRGZS
+firefly/MS
+Firefox/M
+fireguard/M
+firehouse/MS
+firelight/GZSM
+fireman/M
+firemen
+fire/MS
+fireplace/MS
+fireplug/MS
+firepower/SM
+fireproof/SGD
+firer/M
+firesafe
+fireside/SM
+Firestone/M
+firestorm/SM
+firetrap/SM
+firetruck/S
+firewall/S
+firewater/SM
+firewood/MS
+firework/MS
+firing/M
+firkin/M
+firmament/MS
+firmer
+firmest
+firm/ISFDG
+firmly/I
+firmness/MS
+firm's
+firmware/MS
+firring
+firstborn/S
+firsthand
+first/SY
+firth/M
+firths
+fir/ZGJMDRHS
+fiscal/YS
+Fischbein/M
+Fischer/M
+fishbowl/MS
+fishcake/S
+fisher/M
+Fisher/M
+fisherman/M
+fishermen/M
+fishery/MS
+fishhook/MS
+fishily
+fishiness/MS
+fishing/M
+fish/JGZMSRD
+Fishkill/M
+fishmeal
+fishmonger/MS
+fishnet/SM
+fishpond/SM
+fishtail/DMGS
+fishtanks
+fishwife/M
+fishwives
+fishy/TPR
+Fiske/M
+Fisk/M
+fissile
+fissionable/S
+fission/BSDMG
+fissure/MGSD
+fistfight/SM
+fistful/MS
+fisticuff/SM
+fist/MDGS
+fistula/SM
+fistulous
+Fitchburg/M
+Fitch/M
+fitfulness/SM
+fitful/PY
+fitments
+fitness/USM
+fits/AK
+fit's/K
+fitted/UA
+fitter/SM
+fittest
+fitting/AU
+fittingly
+fittingness/M
+fittings
+fit/UYPS
+Fitzgerald/M
+Fitz/M
+Fitzpatrick/M
+Fitzroy/M
+fivefold
+five/MRS
+fiver/M
+fixable
+fixate/VNGXSD
+fixatifs
+fixation/M
+fixative/S
+fixedness/M
+fixed/YP
+fixer/SM
+fixes/I
+fixing/SM
+fixity/MS
+fixture/SM
+fix/USDG
+Fizeau/M
+fizzer/M
+fizzle/GSD
+fizz/SRDG
+fizzy/RT
+fjord/SM
+FL
+flabbergast/GSD
+flabbergasting/Y
+flabbily
+flabbiness/SM
+flabby/TPR
+flab/MS
+flaccidity/MS
+flaccid/Y
+flack/SGDM
+flagella/M
+flagellate/DSNGX
+flagellation/M
+flagellum/M
+flagged
+flaggingly/U
+flagging/SMY
+flagman/M
+flagmen
+flag/MS
+flagon/SM
+flagpole/SM
+flagrance/MS
+flagrancy/SM
+flagrant/Y
+flagship/MS
+flagstaff/MS
+flagstone/SM
+flail/SGMD
+flair/SM
+flaker/M
+flake/SM
+flakiness/MS
+flak/RDMGS
+flaky/PRT
+Fla/M
+flamb/D
+flambeing
+flambes
+flamboyance/MS
+flamboyancy/MS
+flamboyant/YS
+flamenco/SM
+flamen/M
+flameproof/DGS
+flamer/IM
+flame's
+flame/SIGDR
+flamethrower/SM
+flamingo/SM
+flaming/Y
+flammability/ISM
+flammable/SI
+flam/MRNDJGZ
+Flanagan/M
+Flanders/M
+flange/GMSD
+flanker/M
+flank/SGZRDM
+flan/MS
+flannel/DMGS
+flannelet/MS
+flannelette's
+flapjack/SM
+flap/MS
+flapped
+flapper/SM
+flapping
+flaps/M
+flare/SDG
+flareup/S
+flaring/Y
+flashback/SM
+flashbulb/SM
+flashcard/S
+flashcube/MS
+flasher/M
+flashgun/S
+flashily
+flashiness/SM
+flashing/M
+flash/JMRSDGZ
+flashlight/MS
+flashy/TPR
+flask/SM
+flatbed/S
+flatboat/MS
+flatcar/MS
+flatfeet
+flatfish/SM
+flatfoot/SGDM
+flathead/M
+flatiron/SM
+flatland/RS
+flatmate/M
+flat/MYPS
+flatness/MS
+flatted
+flattener/M
+flatten/SDRG
+flatter/DRSZG
+flatterer/M
+flattering/YU
+flattery/SM
+flattest/M
+flatting
+flattish
+Flatt/M
+flattop/MS
+flatulence/SM
+flatulent/Y
+flatus/SM
+flatware/MS
+flatworm/SM
+Flaubert/M
+flaunting/Y
+flaunt/SDG
+flautist/SM
+flavored/U
+flavorer/M
+flavorful
+flavoring/M
+flavorless
+flavor/SJDRMZG
+flavorsome
+flaw/GDMS
+flawlessness/MS
+flawless/PY
+flax/MSN
+flaxseed/M
+flayer/M
+flay/RDGZS
+fleabag/MS
+fleabites
+flea/SM
+fleawort/M
+fleck/GRDMS
+Fledermaus/M
+fledged/U
+fledge/GSD
+fledgling/SM
+fleecer/M
+fleece/RSDGMZ
+fleeciness/SM
+fleecy/RTP
+fleeing
+flee/RS
+fleetingly/M
+fleetingness/SM
+fleeting/YP
+fleet/MYRDGTPS
+fleetness/MS
+Fleischer/M
+Fleischman/M
+Fleisher/M
+Fleming/M
+Flemished/M
+Flemish/GDSM
+Flemishing/M
+Flem/JGM
+Flemming/M
+flesher/M
+fleshiness/M
+flesh/JMYRSDG
+fleshless
+fleshly/TR
+fleshpot/SM
+fleshy/TPR
+fletch/DRSGJ
+fletcher/M
+Fletcher/M
+fletching/M
+Fletch/MR
+Fleurette/M
+Fleur/M
+flew/S
+flews/M
+flexed/I
+flexibility/MSI
+flexible/I
+flexibly/I
+flexitime's
+flex/MSDAG
+flextime/S
+flexural
+flexure/M
+fl/GJD
+flibbertigibbet/MS
+flicker/GD
+flickering/Y
+flickery
+flick/GZSRD
+flier/M
+flight/GMDS
+flightiness/SM
+flightless
+flightpath
+flighty/RTP
+flimflammed
+flimflamming
+flimflam/MS
+flimsily
+flimsiness/MS
+flimsy/PTRS
+flincher/M
+flinch/GDRS
+flinching/U
+flinger/M
+fling/RMG
+Flin/M
+Flinn/M
+flintiness/M
+flintless
+flintlock/MS
+Flint/M
+flint/MDSG
+Flintstones
+flinty/TRP
+flipflop
+flippable
+flippancy/MS
+flippant/Y
+flipped
+flipper/SM
+flippest
+flipping
+flip/S
+flirtation/SM
+flirtatiousness/MS
+flirtatious/PY
+flirt/GRDS
+flit/S
+flitted
+flitting
+floater/M
+float/SRDGJZ
+floaty
+flocculate/GNDS
+flocculation/M
+flock/SJDMG
+floe/MS
+flogged
+flogger/SM
+flogging/SM
+flog/S
+Flo/M
+floodgate/MS
+floodlight/DGMS
+floodlit
+floodplain/S
+flood/SMRDG
+floodwater/SM
+floorboard/MS
+floorer/M
+flooring/M
+floor/SJRDMG
+floorspace
+floorwalker/SM
+floozy/SM
+flophouse/SM
+flop/MS
+flopped
+flopper/M
+floppily
+floppiness/SM
+flopping
+floppy/TMRSP
+floral/SY
+Flora/M
+Florance/M
+flora/SM
+Florella/M
+Florence/M
+Florencia/M
+Florentia/M
+Florentine/S
+Florenza/M
+florescence/MIS
+florescent/I
+Flore/SM
+floret/MS
+Florette/M
+Floria/M
+Florian/M
+Florida/M
+Floridan/S
+Floridian/S
+floridness/SM
+florid/YP
+Florie/M
+Florina/M
+Florinda/M
+Florine/M
+florin/MS
+Flori/SM
+florist/MS
+Flor/M
+Florrie/M
+Florri/M
+Florry/M
+Flory/M
+floss/GSDM
+Flossie/M
+Flossi/M
+Flossy/M
+flossy/RST
+flotation/SM
+flotilla/SM
+flotsam/SM
+flounce/GDS
+flouncing/M
+flouncy/RT
+flounder/SDG
+flourisher/M
+flourish/GSRD
+flourishing/Y
+flour/SGDM
+floury/TR
+flouter/M
+flout/GZSRD
+flowchart/SG
+flowed
+flowerbed/SM
+flower/CSGD
+flowerer/M
+floweriness/SM
+flowerless
+flowerpot/MS
+flower's
+Flowers
+flowery/TRP
+flowing/Y
+flow/ISG
+flown
+flowstone
+Floyd/M
+Flss/M
+flt
+flubbed
+flubbing
+flub/S
+fluctuate/XSDNG
+fluctuation/M
+fluency/MS
+fluently
+fluent/SF
+flue/SM
+fluffiness/SM
+fluff/SGDM
+fluffy/PRT
+fluidity/SM
+fluidized
+fluid/MYSP
+fluidness/M
+fluke/SDGM
+fluky/RT
+flume/SDGM
+flummox/DSG
+flu/MS
+flung
+flunkey's
+flunk/SRDG
+flunky/MS
+fluoresce/GSRD
+fluorescence/MS
+fluorescent/S
+fluoridate/XDSGN
+fluoridation/M
+fluoride/SM
+fluorimetric
+fluorinated
+fluorine/SM
+fluorite/MS
+fluorocarbon/MS
+fluoroscope/MGDS
+fluoroscopic
+flurry/GMDS
+flushness/M
+flush/TRSDPBG
+fluster/DSG
+fluter/M
+flute/SRDGMJ
+fluting/M
+flutist/MS
+flutter/DRSG
+flutterer/M
+fluttery
+fluxed/A
+fluxes/A
+flux/IMS
+fluxing
+flyaway
+flyblown
+flyby/M
+flybys
+flycatcher/MS
+flyer's
+fly/JGBDRSTZ
+flyleaf/M
+flyleaves
+Flynn/M
+flyover/MS
+flypaper/MS
+flysheet/S
+flyspeck/MDGS
+flyswatter/S
+flyway/MS
+flyweight/MS
+flywheel/MS
+FM
+Fm/M
+FNMA/M
+foal/MDSG
+foaminess/MS
+foam/MRDSG
+foamy/RPT
+fobbed
+fobbing
+fob/SM
+focal/F
+focally
+Foch/M
+foci's
+focused/AU
+focuser/M
+focuses/A
+focus/SRDMBG
+fodder/GDMS
+foe/SM
+foetid
+FOFL
+fogbound
+fogged/C
+foggily
+fogginess/MS
+fogging/C
+foggy/RPT
+foghorn/SM
+fogs/C
+fog/SM
+fogyish
+fogy/SM
+foible/MS
+foil/GSD
+foist/GDS
+Fokker/M
+foldaway/S
+folded/AU
+folder/M
+foldout/MS
+fold/RDJSGZ
+folds/UA
+Foley/M
+foliage/MSD
+foliate/CSDXGN
+foliation/CM
+folio/SDMG
+folklike
+folklore/MS
+folkloric
+folklorist/SM
+folk/MS
+folksiness/MS
+folksinger/S
+folksinging/S
+folksong/S
+folksy/TPR
+folktale/S
+folkway/S
+foll
+follicle/SM
+follicular
+follower/M
+follow/JSZBGRD
+followup's
+folly/SM
+Folsom
+fol/Y
+Fomalhaut/M
+fomentation/SM
+fomenter/M
+foment/RDSG
+Fonda/M
+fondant/SM
+fondle/GSRD
+fondler/M
+fondness/MS
+fond/PMYRDGTS
+fondue/MS
+Fons
+Fonsie/M
+Fontainebleau/M
+Fontaine/M
+Fontana/M
+fontanelle's
+fontanel/MS
+font/MS
+Fonzie/M
+Fonz/M
+foodie/S
+food/MS
+foodstuff/MS
+foolery/MS
+foolhardily
+foolhardiness/SM
+foolhardy/PTR
+foolishness/SM
+foolish/PRYT
+fool/MDGS
+foolproof
+foolscap/MS
+footage/SM
+football/SRDMGZ
+footbridge/SM
+Foote/M
+footer/M
+footfall/SM
+foothill/SM
+foothold/MS
+footing/M
+footless
+footlights
+footling
+footlocker/SM
+footloose
+footman/M
+footmarks
+footmen
+footnote/MSDG
+footpad/SM
+footpath/M
+footpaths
+footplate/M
+footprint/MS
+footrace/S
+footrest/MS
+footsie/SM
+foot/SMRDGZJ
+footsore
+footstep/SM
+footstool/SM
+footwear/M
+footwork/SM
+fop/MS
+fopped
+foppery/MS
+fopping
+foppishness/SM
+foppish/YP
+forage/GSRDMZ
+forager/M
+forayer/M
+foray/SGMRD
+forbade
+forbearance/SM
+forbearer/M
+forbear/MRSG
+Forbes/M
+forbidden
+forbiddingness/M
+forbidding/YPS
+forbid/S
+forbore
+forborne
+forced/Y
+forcefield/MS
+forcefulness/MS
+forceful/PY
+forceps/M
+forcer/M
+force/SRDGM
+forcibleness/M
+forcible/P
+forcibly
+fordable/U
+Fordham/M
+Ford/M
+ford/SMDBG
+forearm/GSDM
+forebear/MS
+forebode/GJDS
+forebodingness/M
+foreboding/PYM
+forecaster/M
+forecastle/MS
+forecast/SZGR
+foreclose/GSD
+foreclosure/MS
+forecourt/SM
+foredoom/SDG
+forefather/SM
+forefeet
+forefinger/MS
+forefoot/M
+forefront/SM
+foregoer/M
+foregoing/S
+foregone
+foregos
+foreground/MGDS
+forehand/S
+forehead/MS
+foreigner/M
+foreignness/SM
+foreign/PRYZS
+foreknew
+foreknow/GS
+foreknowledge/MS
+foreknown
+foreleg/MS
+forelimb/MS
+forelock/MDSG
+foreman/M
+Foreman/M
+foremast/SM
+foremen
+foremost
+forename/DSM
+forenoon/SM
+forensically
+forensic/S
+forensics/M
+foreordain/DSG
+forepart/MS
+forepaws
+forepeople
+foreperson/S
+foreplay/MS
+forequarter/SM
+forerunner/MS
+fore/S
+foresail/SM
+foresaw
+foreseeable/U
+foreseeing
+foreseen/U
+foreseer/M
+foresee/ZSRB
+foreshadow/SGD
+foreshore/M
+foreshorten/DSG
+foresightedness/SM
+foresighted/PY
+foresight/SMD
+foreskin/SM
+forestaller/M
+forestall/LGSRD
+forestallment/M
+forestation/MCS
+forestations/A
+forest/CSAGD
+Forester/M
+forester/SM
+forestland/S
+Forest/MR
+forestry/MS
+forest's
+foretaste/MGSD
+foreteller/M
+foretell/RGS
+forethought/MS
+foretold
+forevermore
+forever/PS
+forewarner/M
+forewarn/GSJRD
+forewent
+forewoman/M
+forewomen
+foreword/SM
+forfeiter/M
+forfeiture/MS
+forfeit/ZGDRMS
+forfend/GSD
+forgather/GSD
+forgave
+forged/A
+forge/JVGMZSRD
+forger/M
+forgery/MS
+forges/A
+forgetfulness/SM
+forgetful/PY
+forget/SV
+forgettable/U
+forgettably/U
+forgetting
+forging/M
+forgivable/U
+forgivably/U
+forgiven
+forgiveness/SM
+forgiver/M
+forgive/SRPBZG
+forgivingly
+forgivingness/M
+forgiving/UP
+forgoer/M
+forgoes
+forgone
+forgo/RSGZ
+forgot
+forgotten/U
+for/HT
+forkful/S
+fork/GSRDM
+forklift/DMSG
+forlornness/M
+forlorn/PTRY
+formability/AM
+formaldehyde/SM
+formalin/M
+formalism/SM
+formalistic
+formalist/SM
+formality/SMI
+formal/IY
+formalization/SM
+formalized/U
+formalizer/M
+formalizes/I
+formalize/ZGSRD
+formalness/M
+formals
+formant/MIS
+format/AVS
+formate/MXGNSD
+formation/AFSCIM
+formatively/I
+formativeness/IM
+formative/SYP
+format's
+formatted/UA
+formatter/A
+formatters
+formatter's
+formatting/A
+form/CGSAFDI
+formed/U
+former/FSAI
+formerly
+formfitting
+formic
+Formica/MS
+formidableness/M
+formidable/P
+formidably
+formlessness/MS
+formless/PY
+Formosa/M
+Formosan
+form's
+formulaic
+formula/SM
+formulate/AGNSDX
+formulated/U
+formulation/AM
+formulator/SM
+fornicate/GNXSD
+fornication/M
+fornicator/SM
+Forrester/M
+Forrest/RM
+forsaken
+forsake/SG
+forsook
+forsooth
+Forster/M
+forswear/SG
+forswore
+forsworn
+forsythia/MS
+Fortaleza/M
+forte/MS
+forthcome/JG
+forthcoming/U
+FORTH/M
+forthrightness/SM
+forthright/PYS
+forthwith
+fortieths
+fortification/MS
+fortified/U
+fortifier/SM
+fortify/ADSG
+fortiori
+fortissimo/S
+fortitude/SM
+fortnightly/S
+fortnight/MYS
+FORTRAN
+Fortran/M
+fortress/GMSD
+fort/SM
+fortuitousness/SM
+fortuitous/YP
+fortuity/MS
+fortunateness/M
+fortunate/YUS
+fortune/MGSD
+fortuneteller/SM
+fortunetelling/SM
+forty/SRMH
+forum/MS
+forwarder/M
+forwarding/M
+forwardness/MS
+forward/PTZSGDRY
+forwent
+fossiliferous
+fossilization/MS
+fossilized/U
+fossilize/GSD
+fossil/MS
+Foss/M
+fosterer/M
+Foster/M
+foster/SRDG
+Foucault/M
+fought
+foulard/SM
+foulmouth/D
+foulness/MS
+fouls/M
+foul/SYRDGTP
+foundational
+foundation/SM
+founded/UF
+founder/MDG
+founder's/F
+founding/F
+foundling/MS
+found/RDGZS
+foundry/MS
+founds/KF
+fountainhead/SM
+fountain/SMDG
+fount/MS
+fourfold
+Fourier/M
+fourpence/M
+fourpenny
+fourposter/SM
+fourscore/S
+four/SHM
+foursome/SM
+foursquare
+fourteener/M
+fourteen/SMRH
+fourteenths
+Fourth
+fourths
+Fourths
+fourth/Y
+fovea/M
+fowler/M
+Fowler/M
+fowling/M
+fowl/SGMRD
+foxfire/SM
+foxglove/SM
+Foxhall/M
+foxhole/SM
+foxhound/SM
+foxily
+foxiness/MS
+foxing/M
+fox/MDSG
+Fox/MS
+foxtail/M
+foxtrot/MS
+foxtrotted
+foxtrotting
+foxy/TRP
+foyer/SM
+FPO
+fps
+fr
+fracas/SM
+fractal/SM
+fractional/Y
+fractionate/DNG
+fractionation/M
+fractioned
+fractioning
+fraction/ISMA
+fractiousness/SM
+fractious/PY
+fracture/MGDS
+fragile/Y
+fragility/MS
+fragmentarily
+fragmentariness/M
+fragmentary/P
+fragmentation/MS
+fragment/SDMG
+Fragonard/M
+fragrance/SM
+fragrant/Y
+frailness/MS
+frail/STPYR
+frailty/MS
+framed/U
+framer/M
+frame/SRDJGMZ
+framework/SM
+framing/M
+Francaise/M
+France/MS
+Francene/M
+Francesca/M
+Francesco/M
+franchisee/S
+franchise/ESDG
+franchiser/SM
+franchise's
+Franchot/M
+Francie/M
+Francine/M
+Francis
+Francisca/M
+Franciscan/MS
+Francisco/M
+Franciska/M
+Franciskus/M
+francium/MS
+Francklin/M
+Francklyn/M
+Franck/M
+Francoise/M
+Francois/M
+Franco/M
+francophone/M
+franc/SM
+Francyne/M
+frangibility/SM
+frangible
+Frankel/M
+Frankenstein/MS
+franker/M
+Frankford/M
+Frankfort/M
+Frankfurter/M
+frankfurter/MS
+Frankfurt/RM
+Frankie/M
+frankincense/MS
+Frankish/M
+franklin/M
+Franklin/M
+Franklyn/M
+frankness/MS
+frank/SGTYRDP
+Frank/SM
+Franky/M
+Fran/MS
+Frannie/M
+Franni/M
+Franny/M
+Fransisco/M
+frantically
+franticness/M
+frantic/PY
+Frants/M
+Franzen/M
+Franz/NM
+frapp
+frappeed
+frappeing
+frappes
+Frasco/M
+Fraser/M
+Frasier/M
+Frasquito/M
+fraternal/Y
+fraternity/MSF
+fraternization/SM
+fraternize/GZRSD
+fraternizer/M
+fraternizing/U
+frat/MS
+fratricidal
+fratricide/MS
+fraud/CS
+fraud's
+fraudsters
+fraudulence/S
+fraudulent/YP
+fraught/SGD
+Fraulein/S
+Frau/MN
+fray/CSDG
+Frayda/M
+Frayne/M
+fray's
+Fraze/MR
+Frazer/M
+Frazier/M
+frazzle/GDS
+freakishness/SM
+freakish/YP
+freak/SGDM
+freaky/RT
+freckle/GMDS
+freckly/RT
+Freda/M
+Freddie/M
+Freddi/M
+Freddy/M
+Fredek/M
+Fredelia/M
+Frederica/M
+Frederich/M
+Fredericka/M
+Frederick/MS
+Frederic/M
+Frederico/M
+Fredericton/M
+Frederigo/M
+Frederik/M
+Frederique/M
+Fredholm/M
+Fredia/M
+Fredi/M
+Fred/M
+Fredra/M
+Fredrick/M
+Fredrickson/M
+Fredric/M
+Fredrika/M
+freebase/GDS
+freebie/MS
+freebooter/M
+freeboot/ZR
+freeborn
+freedman/M
+Freedman/M
+freedmen
+freedom/MS
+freehand/D
+freehanded/Y
+freeholder/M
+freehold/ZSRM
+freeing/S
+freelance/SRDGZM
+Freeland/M
+freeloader/M
+freeload/SRDGZ
+Free/M
+freeman/M
+Freeman/M
+freemasonry/M
+Freemasonry/MS
+Freemason/SM
+freemen
+Freemon/M
+freeness/M
+Freeport/M
+freestanding
+freestone/SM
+freestyle/SM
+freethinker/MS
+freethinking/S
+Freetown/M
+freeway/MS
+freewheeler/M
+freewheeling/P
+freewheel/SRDMGZ
+freewill
+free/YTDRSP
+freezable
+freezer/SM
+freeze/UGSA
+freezing/S
+Freida/M
+freighter/M
+freight/ZGMDRS
+Fremont/M
+Frenchman/M
+French/MDSG
+Frenchmen
+Frenchwoman/M
+Frenchwomen
+frenetically
+frenetic/S
+frenzied/Y
+frenzy/MDSG
+freon/S
+Freon/SM
+freq
+frequency/ISM
+frequented/U
+frequenter/MS
+frequentest
+frequenting
+frequent/IY
+frequentness/M
+frequents
+fresco/DMG
+frescoes
+fresh/AZSRNDG
+freshener/M
+freshen/SZGDR
+fresher/MA
+freshest
+freshet/SM
+freshly
+freshman/M
+freshmen
+freshness/MS
+freshwater/SM
+Fresnel/M
+Fresno/M
+fretboard
+fretfulness/MS
+fretful/PY
+fret/S
+fretsaw/S
+fretted
+fretting
+fretwork/MS
+Freudian/S
+Freud/M
+Freya/M
+Frey/M
+friableness/M
+friable/P
+friary/MS
+friar/YMS
+fricasseeing
+fricassee/MSD
+frication/M
+fricative/MS
+Frick/M
+frictional/Y
+frictionless/Y
+friction/MS
+Friday/SM
+fridge/SM
+fried/A
+Frieda/M
+Friedan/M
+friedcake/SM
+Friederike/M
+Friedman/M
+Friedrich/M
+Friedrick/M
+friendlessness/M
+friendless/P
+friendlies
+friendlily
+friendliness/USM
+friendly/PUTR
+friend/SGMYD
+friendship/MS
+frier's
+fries/M
+frieze/SDGM
+frigate/SM
+Frigga/M
+frigged
+frigging/S
+frighten/DG
+frightening/Y
+frightfulness/MS
+frightful/PY
+fright/GXMDNS
+Frigidaire/M
+frigidity/MS
+frigidness/SM
+frigid/YP
+frig/S
+frill/MDGS
+frilly/RST
+Fri/M
+fringe/IGSD
+fringe's
+frippery/SM
+Frisbee/MS
+Frisco/M
+Frisian/SM
+frisker/M
+friskily
+friskiness/SM
+frisk/RDGS
+frisky/RTP
+frisson/M
+Frito/M
+fritterer/M
+fritter/RDSG
+Fritz/M
+fritz/SM
+frivolity/MS
+frivolousness/SM
+frivolous/PY
+frizz/GYSD
+frizzle/DSG
+frizzly/RT
+frizzy/RT
+Fr/MD
+Frobisher/M
+frocking/M
+frock's
+frock/SUDGC
+frogged
+frogging
+frogman/M
+frogmarched
+frogmen
+frog/MS
+fro/HS
+Froissart/M
+frolicked
+frolicker/SM
+frolicking
+frolic/SM
+frolicsome
+from
+Fromm/M
+frond/SM
+frontage/MS
+frontal/SY
+Frontenac/M
+front/GSFRD
+frontier/SM
+frontiersman/M
+frontiersmen
+frontispiece/SM
+frontrunner's
+front's
+frontward/S
+frosh/M
+Frostbelt/M
+frostbite/MS
+frostbit/G
+frostbiting/M
+frostbitten
+frost/CDSG
+frosteds
+frosted/U
+frostily
+frostiness/SM
+frosting/MS
+Frost/M
+frost's
+frosty/PTR
+froth/GMD
+frothiness/SM
+froths
+frothy/TRP
+froufrou/MS
+frowardness/MS
+froward/P
+frowner/M
+frowning/Y
+frown/RDSG
+frowzily
+frowziness/SM
+frowzy/RPT
+frozenness/M
+frozen/YP
+froze/UA
+fructify/GSD
+fructose/MS
+Fruehauf/M
+frugality/SM
+frugal/Y
+fruitcake/SM
+fruiterer/M
+fruiter/RM
+fruitfuller
+fruitfullest
+fruitfulness/MS
+fruitful/UYP
+fruit/GMRDS
+fruitiness/MS
+fruition/SM
+fruitlessness/MS
+fruitless/YP
+fruity/RPT
+frumpish
+frump/MS
+frumpy/TR
+Frunze/M
+frustrater/M
+frustrate/RSDXNG
+frustrating/Y
+frustration/M
+frustum/SM
+Frye/M
+fryer/MS
+Fry/M
+fry/NGDS
+F's
+f's/KA
+FSLIC
+ft/C
+FTC
+FTP
+fuchsia/MS
+Fuchs/M
+fucker/M!
+fuck/GZJRDMS!
+FUD
+fuddle/GSD
+fudge/GMSD
+fuel/ASDG
+fueler/SM
+fuel's
+Fuentes/M
+fugal
+Fugger/M
+fugitiveness/M
+fugitive/SYMP
+fugue/GMSD
+fuhrer/S
+Fuji/M
+Fujitsu/M
+Fujiyama
+Fukuoka/M
+Fulani/M
+Fulbright/M
+fulcrum/SM
+fulfilled/U
+fulfiller/M
+fulfill/GLSRD
+fulfillment/MS
+fullback/SMG
+fuller/DMG
+Fuller/M
+Fullerton/M
+fullish
+fullness/MS
+full/RDPSGZT
+fullstops
+fullword/SM
+fully
+fulminate/XSDGN
+fulmination/M
+fulness's
+fulsomeness/SM
+fulsome/PY
+Fulton/M
+Fulvia/M
+fumble/GZRSD
+fumbler/M
+fumbling/Y
+fume/DSG
+fumigant/MS
+fumigate/NGSDX
+fumigation/M
+fumigator/SM
+fuming/Y
+fumy/TR
+Funafuti
+functionalism/M
+functionalist/SM
+functionality/S
+functional/YS
+functionary/MS
+function/GSMD
+functor/SM
+fundamentalism/SM
+fundamentalist/SM
+fundamental/SY
+fund/ASMRDZG
+funded/U
+fundholders
+fundholding
+funding/S
+Fundy/M
+funeral/MS
+funerary
+funereal/Y
+funfair/M
+fungal/S
+fungible/M
+fungicidal
+fungicide/SM
+fungi/M
+fungoid/S
+fungous
+fungus/M
+funicular/SM
+funk/GSDM
+funkiness/S
+funky/RTP
+fun/MS
+funned
+funnel/SGMD
+funner
+funnest
+funnily/U
+funniness/SM
+funning
+funny/RSPT
+furbelow/MDSG
+furbisher/M
+furbish/GDRSA
+furiousness/M
+furious/RYP
+furlong/MS
+furlough/DGM
+furloughs
+furl/UDGS
+furn
+furnace/GMSD
+furnished/U
+furnisher/MS
+furnish/GASD
+furnishing/SM
+furniture/SM
+furore/MS
+furor/MS
+fur/PMS
+furred
+furrier/M
+furriness/SM
+furring/SM
+furrow/DMGS
+furry/RTZP
+furtherance/MS
+furtherer/M
+furthermore
+furthermost
+further/TGDRS
+furthest
+furtiveness/SM
+furtive/PY
+fury/SM
+furze/SM
+fusebox/S
+fusee/SM
+fuse/FSDAGCI
+fuselage/SM
+fuse's/A
+Fushun/M
+fusibility/SM
+fusible/I
+fusiform
+fusilier/MS
+fusillade/SDMG
+fusion/KMFSI
+fussbudget/MS
+fusser/M
+fussily
+fussiness/MS
+fusspot/SM
+fuss/SRDMG
+fussy/PTR
+fustian/MS
+fustiness/MS
+fusty/RPT
+fut
+futileness/M
+futile/PY
+futility/MS
+futon/S
+future/SM
+futurism/SM
+futuristic/S
+futurist/S
+futurity/MS
+futurologist/S
+futurology/MS
+futz/GSD
+fuze's
+Fuzhou/M
+Fuzzbuster/M
+fuzzily
+fuzziness/SM
+fuzz/SDMG
+fuzzy/PRT
+fwd
+FWD
+fwy
+FY
+FYI
+GA
+gabardine/SM
+gabbed
+Gabbey/M
+Gabbie/M
+Gabbi/M
+gabbiness/S
+gabbing
+gabble/SDG
+Gabby/M
+gabby/TRP
+Gabe/M
+gaberdine's
+Gabey/M
+gabfest/MS
+Gabie/M
+Gabi/M
+gable/GMSRD
+Gable/M
+Gabonese
+Gabon/M
+Gaborone/M
+Gabriela/M
+Gabriele/M
+Gabriella/M
+Gabrielle/M
+Gabriellia/M
+Gabriell/M
+Gabriello/M
+Gabriel/M
+Gabrila/M
+gab/S
+Gaby/M
+Gacrux/M
+gadabout/MS
+gadded
+gadder/MS
+gadding
+gadfly/MS
+gadgetry/MS
+gadget/SM
+gadolinium/MS
+gad/S
+Gadsden/M
+Gaea/M
+Gaelan/M
+Gaelic/M
+Gael/SM
+Gae/M
+gaffe/MS
+gaffer/M
+gaff/SGZRDM
+gaga
+Gagarin/M
+gag/DRSG
+Gage/M
+gager/M
+gage/SM
+gagged
+gagging
+gaggle/SDG
+gagwriter/S
+gaiety/MS
+Gaile/M
+Gail/M
+gaily
+gain/ADGS
+gainer/SM
+Gaines/M
+Gainesville/M
+gainfulness/M
+gainful/YP
+gaining/S
+gainly/U
+gainsaid
+gainsayer/M
+gainsay/RSZG
+Gainsborough/M
+gaiter/M
+gait/GSZMRD
+Gaithersburg/M
+galactic
+Galahad/MS
+Galapagos/M
+gal/AS
+gala/SM
+Galatea/M
+Galatia/M
+Galatians/M
+Galaxy/M
+galaxy/MS
+Galbraith/M
+Galbreath/M
+gale/AS
+Gale/M
+galen
+galena/MS
+galenite/M
+Galen/M
+gale's
+Galibi/M
+Galilean/MS
+Galilee/M
+Galileo/M
+Galina/M
+Gallagher/M
+gallanted
+gallanting
+gallantry/MS
+gallants
+gallant/UY
+Gallard/M
+gallbladder/MS
+Gallegos/M
+galleon/SM
+galleria/S
+gallery/MSDG
+galley/MS
+Gallic
+Gallicism/SM
+gallimaufry/MS
+galling/Y
+gallium/SM
+gallivant/GDS
+Gall/M
+gallonage/M
+gallon/SM
+galloper/M
+gallop/GSRDZ
+Galloway/M
+gallows/M
+gall/SGMD
+gallstone/MS
+Gallup/M
+Gal/MN
+Galois/M
+galoot/MS
+galore/S
+galosh/GMSD
+gal's
+Galsworthy/M
+galumph/GD
+galumphs
+galvanic
+Galvani/M
+galvanism/MS
+galvanization/SM
+galvanize/SDG
+Galvan/M
+galvanometer/SM
+galvanometric
+Galven/M
+Galveston/M
+Galvin/M
+Ga/M
+Gamaliel/M
+Gama/M
+Gambia/M
+Gambian/S
+gambit/MS
+gamble/GZRSD
+Gamble/M
+gambler/M
+gambol/SGD
+gamecock/SM
+gamekeeper/MS
+gameness/MS
+game/PJDRSMYTZG
+gamesmanship/SM
+gamesmen
+gamester/M
+gamest/RZ
+gamete/MS
+gametic
+gamine/SM
+gaminess/MS
+gaming/M
+gamin/MS
+gamma/MS
+gammon/DMSG
+Gamow/M
+gamut/MS
+gamy/TRP
+gander/DMGS
+Gandhian
+Gandhi/M
+gangbusters
+ganger/M
+Ganges/M
+gang/GRDMS
+gangland/SM
+ganglia/M
+gangling
+ganglionic
+ganglion/M
+gangplank/SM
+gangrene/SDMG
+gangrenous
+gangster/SM
+Gangtok/M
+gangway/MS
+Gan/M
+gannet/SM
+Gannie/M
+Gannon/M
+Ganny/M
+gantlet/GMDS
+Gantry/M
+gantry/MS
+Ganymede/M
+GAO
+gaoler/M
+gaol/MRDGZS
+gaper/M
+gape/S
+gaping/Y
+gapped
+gapping
+gap/SJMDRG
+garage/GMSD
+Garald/M
+garbageman/M
+garbage/SDMG
+garbanzo/MS
+garb/DMGS
+garbler/M
+garble/RSDG
+Garbo/M
+Garcia/M
+garon/SM
+gardener/M
+Gardener/M
+gardenia/SM
+gardening/M
+garden/ZGRDMS
+Gardie/M
+Gardiner/M
+Gard/M
+Gardner/M
+Gardy/M
+Garek/M
+Gare/MH
+Gareth/M
+Garey/M
+Garfield/M
+garfish/MS
+Garfunkel/M
+Gargantua/M
+gargantuan
+gargle/SDG
+gargoyle/DSM
+Garibaldi/M
+Garik/M
+garishness/MS
+garish/YP
+Garland/M
+garland/SMDG
+garlicked
+garlicking
+garlicky
+garlic/SM
+garment/MDGS
+Gar/MH
+Garner/M
+garner/SGD
+Garnet/M
+garnet/SM
+Garnette/M
+Garnett/M
+garnish/DSLG
+garnisheeing
+garnishee/SDM
+garnishment/MS
+Garold/M
+garote's
+garotte's
+Garrard/M
+garred
+Garrek/M
+Garreth/M
+Garret/M
+garret/SM
+Garrett/M
+Garrick/M
+Garrik/M
+garring
+Garrison/M
+garrison/SGMD
+garroter/M
+garrote/SRDMZG
+Garrot/M
+garrotte's
+Garrott/M
+garrulity/SM
+garrulousness/MS
+garrulous/PY
+Garry/M
+gar/SLM
+garter/SGDM
+Garth/M
+Garvey/M
+Garvin/M
+Garv/M
+Garvy/M
+Garwin/M
+Garwood/M
+Gary/M
+Garza/M
+gasbag/MS
+Gascony/M
+gaseousness/M
+gaseous/YP
+gases/C
+gas/FC
+gash/GTMSRD
+gasification/M
+gasifier/M
+gasify/SRDGXZN
+gasket/SM
+gaslight/DMS
+gasohol/S
+gasoline/MS
+gasometer/M
+Gaspard/M
+Gaspar/M
+Gasparo/M
+gasper/M
+Gasper/M
+gasp/GZSRD
+gasping/Y
+gas's
+gassed/C
+Gasser/M
+gasser/MS
+Gasset/M
+gassiness/M
+gassing/SM
+gassy/PTR
+Gaston/M
+gastric
+gastritides
+gastritis/MS
+gastroenteritides
+gastroenteritis/M
+gastrointestinal
+gastronome/SM
+gastronomic
+gastronomical/Y
+gastronomy/MS
+gastropod/SM
+gasworks/M
+gateau/MS
+gateaux
+gatecrash/GZSRD
+gatehouse/MS
+gatekeeper/SM
+gate/MGDS
+gatepost/SM
+Gates
+gateway/MS
+gathered/IA
+gatherer/M
+gathering/M
+gather/JRDZGS
+gathers/A
+Gatlinburg/M
+Gatling/M
+Gatorade/M
+gator/MS
+Gatsby/M
+Gatun/M
+gaucheness/SM
+gaucherie/SM
+gauche/TYPR
+gaucho/SM
+gaudily
+gaudiness/MS
+gaudy/PRST
+gaugeable
+gauger/M
+Gauguin/M
+Gaulish/M
+Gaulle/M
+Gaul/MS
+Gaultiero/M
+gauntlet/GSDM
+Gauntley/M
+gauntness/MS
+gaunt/PYRDSGT
+gauss/C
+gausses
+Gaussian
+Gauss/M
+gauss's
+Gautama/M
+Gauthier/M
+Gautier/M
+gauze/SDGM
+gauziness/MS
+gauzy/TRP
+Gavan/M
+gave
+gavel/GMDS
+Gaven/M
+Gavin/M
+Gav/MN
+gavotte/MSDG
+Gavra/M
+Gavrielle/M
+Gawain/M
+Gawen/M
+gawkily
+gawkiness/MS
+gawk/SGRDM
+gawky/RSPT
+Gayel/M
+Gayelord/M
+Gaye/M
+gayety's
+Gayla/M
+Gayleen/M
+Gaylene/M
+Gayler/M
+Gayle/RM
+Gaylord/M
+Gaylor/M
+Gay/M
+gayness/SM
+Gaynor/M
+gay/RTPS
+Gaza/M
+gazebo/SM
+gaze/DRSZG
+gazelle/MS
+gazer/M
+gazetteer/SGDM
+gazette/MGSD
+Gaziantep/M
+gazillion/S
+gazpacho/MS
+GB
+G/B
+Gdansk/M
+Gd/M
+GDP
+Gearalt/M
+Gearard/M
+gearbox/SM
+gear/DMJSG
+gearing/M
+gearshift/MS
+gearstick
+gearwheel/SM
+Geary/M
+gecko/MS
+GED
+geegaw's
+geeing
+geek/SM
+geeky/RT
+geese/M
+geest/M
+gee/TDS
+geezer/MS
+Gehenna/M
+Gehrig/M
+Geiger/M
+Geigy/M
+geisha/M
+gelatinousness/M
+gelatinous/PY
+gelatin/SM
+gelcap
+gelding/M
+geld/JSGD
+gelid
+gelignite/MS
+gelled
+gelling
+gel/MBS
+Gelya/M
+Ge/M
+GE/M
+Gemini/SM
+gemlike
+Gemma/M
+gemmed
+gemming
+gem/MS
+gemological
+gemologist/MS
+gemology/MS
+gemstone/SM
+gen
+Gena/M
+Genaro/M
+gendarme/MS
+gender/DMGS
+genderless
+genealogical/Y
+genealogist/SM
+genealogy/MS
+Gene/M
+gene/MS
+generalissimo/SM
+generalist/MS
+generality/MS
+generalizable/SM
+generalization/MS
+generalized/U
+generalize/GZBSRD
+generalizer/M
+general/MSPY
+generalness/M
+generalship/SM
+genera/M
+generate/CXAVNGSD
+generational
+generation/MCA
+generative/AY
+generators/A
+generator/SM
+generically
+generic/PS
+generosity/MS
+generously/U
+generousness/SM
+generous/PY
+Genesco/M
+genesis/M
+Genesis/M
+genes/S
+genetically
+geneticist/MS
+genetic/S
+genetics/M
+Genet/M
+Geneva/M
+Genevieve/M
+Genevra/M
+Genghis/M
+geniality/FMS
+genially/F
+genialness/M
+genial/PY
+Genia/M
+genies/K
+genie/SM
+genii/M
+genitalia
+genitals
+genital/YF
+genitive/SM
+genitourinary
+genius/SM
+Gen/M
+Genna/M
+Gennie/M
+Gennifer/M
+Genni/M
+Genny/M
+Genoa/SM
+genocidal
+genocide/SM
+Geno/M
+genome/SM
+genotype/MS
+Genovera/M
+genre/MS
+gent/AMS
+genteelness/MS
+genteel/PRYT
+gentian/SM
+gentile/S
+Gentile's
+gentility/MS
+gentlefolk/S
+gentlemanliness/M
+gentlemanly/U
+gentleman/YM
+gentlemen
+gentleness/SM
+gentle/PRSDGT
+gentlewoman/M
+gentlewomen/M
+gently
+gentrification/M
+gentrify/NSDGX
+Gentry/M
+gentry/MS
+genuflect/GDS
+genuflection/MS
+genuineness/SM
+genuine/PY
+genus
+Genvieve/M
+geocentric
+geocentrically
+geocentricism
+geochemical/Y
+geochemistry/MS
+geochronology/M
+geodesic/S
+geode/SM
+geodesy/MS
+geodetic/S
+Geoff/M
+Geoffrey/M
+Geoffry/M
+geog
+geographer/MS
+geographic
+geographical/Y
+geography/MS
+geologic
+geological/Y
+geologist/MS
+geology/MS
+geom
+Geo/M
+geomagnetic
+geomagnetically
+geomagnetism/SM
+geometer/MS
+geometrical/Y
+geometrician/M
+geometric/S
+geometry/MS
+geomorphological
+geomorphology/M
+geophysical/Y
+geophysicist/MS
+geophysics/M
+geopolitical/Y
+geopolitic/S
+geopolitics/M
+Georas/M
+Geordie/M
+Georgeanna/M
+Georgeanne/M
+Georgena/M
+George/SM
+Georgeta/M
+Georgetown/M
+Georgetta/M
+Georgette/M
+Georgia/M
+Georgiana/M
+Georgianna/M
+Georgianne/M
+Georgian/S
+Georgie/M
+Georgi/M
+Georgina/M
+Georgine/M
+Georg/M
+Georgy/M
+geostationary
+geosynchronous
+geosyncline/SM
+geothermal
+geothermic
+Geralda/M
+Geraldine/M
+Gerald/M
+geranium/SM
+Gerard/M
+Gerardo/M
+Gerber/M
+gerbil/MS
+Gerda/M
+Gerek/M
+Gerhardine/M
+Gerhard/M
+Gerhardt/M
+Gerianna/M
+Gerianne/M
+geriatric/S
+geriatrics/M
+Gerick/M
+Gerik/M
+Geri/M
+Geritol/M
+Gerladina/M
+Ger/M
+Germaine/M
+Germain/M
+Germana/M
+germane
+Germania/M
+Germanic/M
+germanium/SM
+germanized
+German/SM
+Germantown/M
+Germany/M
+Germayne/M
+germen/M
+germicidal
+germicide/MS
+germinal/Y
+germinated/U
+germinate/XVGNSD
+germination/M
+germinative/Y
+germ/MNS
+Gerome/M
+Geronimo/M
+gerontocracy/M
+gerontological
+gerontologist/SM
+gerontology/SM
+Gerrard/M
+Gerrie/M
+Gerrilee/M
+Gerri/M
+Gerry/M
+gerrymander/SGD
+Gershwin/MS
+Gerta/M
+Gertie/M
+Gerti/M
+Gert/M
+Gertruda/M
+Gertrude/M
+Gertrudis/M
+Gertrud/M
+Gerty/M
+gerundive/M
+gerund/SVM
+Gery/M
+gestalt/M
+gestapo/S
+Gestapo/SM
+gestate/SDGNX
+gestational
+gestation/M
+gesticulate/XSDVGN
+gesticulation/M
+gesticulative/Y
+gestural
+gesture/SDMG
+gesundheit
+getaway/SM
+Gethsemane/M
+get/S
+getter/SDM
+getting
+Getty/M
+Gettysburg/M
+getup/MS
+gewgaw/MS
+Gewrztraminer
+geyser/GDMS
+Ghanaian/MS
+Ghana/M
+Ghanian's
+ghastliness/MS
+ghastly/TPR
+ghat/MS
+Ghats/M
+Ghent/M
+Gherardo/M
+gherkin/SM
+ghetto/DGMS
+ghettoize/SDG
+Ghibelline/M
+ghostlike
+ghostliness/MS
+ghostly/TRP
+ghost/SMYDG
+ghostwrite/RSGZ
+ghostwritten
+ghostwrote
+ghoulishness/SM
+ghoulish/PY
+ghoul/SM
+GHQ
+GI
+Giacinta/M
+Giacobo/M
+Giacometti/M
+Giacomo/M
+Giacopo/M
+Giana/M
+Gianina/M
+Gian/M
+Gianna/M
+Gianni/M
+Giannini/M
+giantess/MS
+giantkiller
+giant/SM
+Giauque/M
+Giavani/M
+gibber/DGS
+gibberish/MS
+gibbet/MDSG
+Gibbie/M
+Gibb/MS
+Gibbon/M
+gibbon/MS
+gibbousness/M
+gibbous/YP
+Gibby/M
+gibe/GDRS
+giber/M
+giblet/MS
+Gib/M
+Gibraltar/MS
+Gibson/M
+giddap
+giddily
+giddiness/SM
+Giddings/M
+giddy/GPRSDT
+Gide/M
+Gideon/MS
+Gielgud/M
+Gienah/M
+Giffard/M
+Giffer/M
+Giffie/M
+Gifford/M
+Giff/RM
+Giffy/M
+giftedness/M
+gifted/PY
+gift/SGMD
+gigabyte/S
+gigacycle/MS
+gigahertz/M
+gigantically
+giganticness/M
+gigantic/P
+gigavolt
+gigawatt/M
+gigged
+gigging
+giggler/M
+giggle/RSDGZ
+giggling/Y
+giggly/TR
+Gigi/M
+gig/MS
+GIGO
+gigolo/MS
+gila
+Gila/M
+Gilberta/M
+Gilberte/M
+Gilbertina/M
+Gilbertine/M
+gilbert/M
+Gilbert/M
+Gilberto/M
+Gilbertson/M
+Gilburt/M
+Gilchrist/M
+Gilda/M
+gilder/M
+gilding/M
+gild/JSGZRD
+Gilead/M
+Gilemette/M
+Giles
+Gilgamesh/M
+Gilkson/M
+Gillan/M
+Gilles
+Gillespie/M
+Gillette/M
+Gilliam/M
+Gillian/M
+Gillie/M
+Gilligan/M
+Gilli/M
+Gill/M
+gill/SGMRD
+Gilly/M
+Gilmore/M
+Gil/MY
+gilt/S
+gimbaled
+gimbals
+Gimbel/M
+gimcrackery/SM
+gimcrack/S
+gimlet/MDSG
+gimme/S
+gimmick/GDMS
+gimmickry/MS
+gimmicky
+gimp/GSMD
+gimpy/RT
+Gina/M
+Ginelle/M
+Ginevra/M
+gingerbread/SM
+gingerliness/M
+gingerly/P
+Ginger/M
+ginger/SGDYM
+gingersnap/SM
+gingery
+gingham/SM
+gingivitis/SM
+Gingrich/M
+ginkgoes
+ginkgo/M
+ginmill
+gin/MS
+ginned
+Ginnie/M
+Ginnifer/M
+Ginni/M
+ginning
+Ginny/M
+Gino/M
+Ginsberg/M
+Ginsburg/M
+ginseng/SM
+Gioconda/M
+Giordano/M
+Giorgia/M
+Giorgi/M
+Giorgio/M
+Giorgione/M
+Giotto/M
+Giovanna/M
+Giovanni/M
+Gipsy's
+giraffe/MS
+Giralda/M
+Giraldo/M
+Giraud/M
+Giraudoux/M
+girded/U
+girder/M
+girdle/GMRSD
+girdler/M
+gird/RDSGZ
+girlfriend/MS
+girlhood/SM
+girlie/M
+girlishness/SM
+girlish/YP
+girl/MS
+giro/M
+girt/GDS
+girth/MDG
+girths
+Gisela/M
+Giselbert/M
+Gisele/M
+Gisella/M
+Giselle/M
+Gish/M
+gist/MS
+git/M
+Giuditta/M
+Giulia/M
+Giuliano/M
+Giulietta/M
+Giulio/M
+Giuseppe/M
+Giustina/M
+Giustino/M
+Giusto/M
+giveaway/SM
+giveback/S
+give/HZGRS
+given/SP
+giver/M
+giving/Y
+Giza/M
+Gizela/M
+gizmo's
+gizzard/SM
+Gk/M
+glac/DGS
+glacial/Y
+glaciate/XNGDS
+glaciation/M
+glacier/SM
+glaciological
+glaciologist/M
+glaciology/M
+gladded
+gladden/GDS
+gladder
+gladdest
+gladding
+gladdy
+glade/SM
+gladiatorial
+gladiator/SM
+Gladi/M
+gladiola/MS
+gladioli
+gladiolus/M
+gladly/RT
+Glad/M
+gladness/MS
+gladsome/RT
+Gladstone/MS
+Gladys
+glad/YSP
+glamor/DMGS
+glamorization/MS
+glamorizer/M
+glamorize/SRDZG
+glamorousness/M
+glamorous/PY
+glance/GJSD
+glancing/Y
+glanders/M
+glandes
+glandular/Y
+gland/ZSM
+glans/M
+glare/SDG
+glaringness/M
+glaring/YP
+Glaser/M
+Glasgow/M
+glasnost/S
+glassblower/S
+glassblowing/MS
+glassful/MS
+glass/GSDM
+glasshouse/SM
+glassily
+glassiness/SM
+glassless
+Glass/M
+glassware/SM
+glasswort/M
+glassy/PRST
+Glastonbury/M
+Glaswegian/S
+glaucoma/SM
+glaucous
+glazed/U
+glazer/M
+glaze/SRDGZJ
+glazier/SM
+glazing/M
+gleam/MDGS
+gleaner/M
+gleaning/M
+glean/RDGZJS
+Gleason/M
+Gleda/M
+gleed/M
+glee/DSM
+gleefulness/MS
+gleeful/YP
+gleeing
+Glendale/M
+Glenda/M
+Glenden/M
+Glendon/M
+Glenine/M
+Glen/M
+Glenna/M
+Glennie/M
+Glennis/M
+Glenn/M
+glen/SM
+glibber
+glibbest
+glibness/MS
+glib/YP
+glide/JGZSRD
+glider/M
+glim/M
+glimmer/DSJG
+glimmering/M
+glimpse/DRSZMG
+glimpser/M
+glint/DSG
+glissandi
+glissando/M
+glisten/DSG
+glister/DGS
+glitch/MS
+glitter/GDSJ
+glittering/Y
+glittery
+glitz/GSD
+glitzy/TR
+gloaming/MS
+gloater/M
+gloating/Y
+gloat/SRDG
+globalism/S
+globalist/S
+global/SY
+globe/SM
+globetrotter/MS
+glob/GDMS
+globularity/M
+globularness/M
+globular/PY
+globule/MS
+globulin/MS
+glockenspiel/SM
+glommed
+gloom/GSMD
+gloomily
+gloominess/MS
+gloomy/RTP
+glop/MS
+glopped
+glopping
+gloppy/TR
+Gloria/M
+Gloriana/M
+Gloriane/M
+glorification/M
+glorifier/M
+glorify/XZRSDNG
+Glori/M
+glorious/IYP
+gloriousness/IM
+Glory/M
+glory/SDMG
+glossary/MS
+gloss/GSDM
+glossily
+glossiness/SM
+glossolalia/SM
+glossy/RSPT
+glottal
+glottalization/M
+glottis/MS
+Gloucester/M
+gloveless
+glover/M
+Glover/M
+glove/SRDGMZ
+glower/GD
+glow/GZRDMS
+glowing/Y
+glowworm/SM
+glucose/SM
+glue/DRSMZG
+glued/U
+gluer/M
+gluey
+gluier
+gluiest
+glummer
+glummest
+glumness/MS
+glum/SYP
+gluon/M
+glutamate/M
+gluten/M
+glutenous
+glutinousness/M
+glutinous/PY
+glut/SMNX
+glutted
+glutting
+glutton/MS
+gluttonous/Y
+gluttony/SM
+glyceride/M
+glycerinate/MD
+glycerine's
+glycerin/SM
+glycerolized/C
+glycerol/SM
+glycine/M
+glycogen/SM
+glycol/MS
+Glynda/M
+Glynis/M
+Glyn/M
+Glynnis/M
+Glynn/M
+glyph/M
+glyphs
+gm
+GM
+GMT
+gnarl/SMDG
+gnash/SDG
+gnat/MS
+gnawer/M
+gnaw/GRDSJ
+gnawing/M
+gneiss/SM
+Gnni/M
+gnomelike
+GNOME/M
+gnome/SM
+gnomic
+gnomish
+gnomonic
+gnosticism
+Gnosticism/M
+gnostic/K
+Gnostic/M
+GNP
+gnu/MS
+goad/MDSG
+goalie/SM
+goalkeeper/MS
+goalkeeping/M
+goalless
+goal/MDSG
+goalmouth/M
+goalpost/S
+goalscorer
+goalscoring
+goaltender/SM
+Goa/M
+goatee/SM
+goatherd/MS
+goat/MS
+goatskin/SM
+gobbed
+gobbet/MS
+gobbing
+gobbledegook's
+gobbledygook/S
+gobbler/M
+gobble/SRDGZ
+Gobi/M
+goblet/MS
+goblin/SM
+gob/SM
+Godard/M
+Godart/M
+godchild/M
+godchildren
+goddammit
+goddamn/GS
+Goddard/M
+Goddart/M
+goddaughter/SM
+godded
+goddess/MS
+godding
+Gdel/M
+godfather/GSDM
+godforsaken
+Godfree/M
+Godfrey/M
+Godfry/M
+godhead/S
+godhood/SM
+Godiva/M
+godlessness/MS
+godless/P
+godlikeness/M
+godlike/P
+godliness/UMS
+godly/UTPR
+God/M
+godmother/MS
+Godot/M
+godparent/SM
+godsend/MS
+god/SMY
+godson/MS
+Godspeed/S
+Godthaab/M
+Godunov/M
+Godwin/M
+Godzilla/M
+Goebbels/M
+Goering/M
+goer/MG
+goes
+Goethals/M
+Goethe/M
+gofer/SM
+Goff/M
+goggler/M
+goggle/SRDGZ
+Gogh/M
+Gog/M
+Gogol/M
+Goiania/M
+going/M
+goiter/SM
+Golan/M
+Golconda/M
+Golda/M
+Goldarina/M
+Goldberg/M
+goldbricker/M
+goldbrick/GZRDMS
+Golden/M
+goldenness/M
+goldenrod/SM
+goldenseal/M
+golden/TRYP
+goldfinch/MS
+goldfish/SM
+Goldia/M
+Goldie/M
+Goldilocks/M
+Goldi/M
+Goldina/M
+Golding/M
+Goldman/M
+goldmine/S
+gold/MRNGTS
+goldsmith/M
+Goldsmith/M
+goldsmiths
+Goldstein/M
+Goldwater/M
+Goldwyn/M
+Goldy/M
+Goleta/M
+golfer/M
+golf/RDMGZS
+Golgotha/M
+Goliath/M
+Goliaths
+golly/S
+Gomez/M
+Gomorrah/M
+Gompers/M
+go/MRHZGJ
+gonadal
+gonad/SM
+gondola/SM
+gondolier/MS
+Gondwanaland/M
+goner/M
+gone/RZN
+gong/SGDM
+gonion/M
+gonna
+gonorrheal
+gonorrhea/MS
+Gonzales/M
+Gonzalez/M
+Gonzalo/M
+Goober/M
+goober/MS
+goodbye/MS
+goodhearted
+goodie's
+goodish
+goodly/TR
+Good/M
+Goodman/M
+goodness/MS
+goodnight
+Goodrich/M
+good/SYP
+goodwill/MS
+Goodwin/M
+Goodyear/M
+goody/SM
+gooey
+goofiness/MS
+goof/SDMG
+goofy/RPT
+Google/M
+gooier
+gooiest
+gook/SM
+goo/MS
+goon/SM
+goop/SM
+gooseberry/MS
+goosebumps
+goose/M
+goos/SDG
+GOP
+Gopher
+gopher/SM
+Goran/M
+Goraud/M
+Gorbachev
+Gordan/M
+Gorden/M
+Gordian/M
+Gordie/M
+Gordimer/M
+Gordon/M
+Gordy/M
+gore/DSMG
+Gore/M
+Goren/M
+Gorey/M
+Gorgas
+gorged/E
+gorge/GMSRD
+gorgeousness/SM
+gorgeous/YP
+gorger/EM
+gorges/E
+gorging/E
+Gorgon/M
+gorgon/S
+Gorgonzola/M
+Gorham/M
+gorilla/MS
+gorily
+goriness/MS
+goring/M
+Gorky/M
+gormandizer/M
+gormandize/SRDGZ
+gormless
+gorp/S
+gorse/SM
+gory/PRT
+gos
+goshawk/MS
+gosh/S
+gosling/M
+gospeler/M
+gospel/MRSZ
+Gospel/SM
+gossamer/SM
+gossipy
+gossip/ZGMRDS
+gotcha/SM
+Gteborg/M
+Gotham/M
+Gothart/M
+Gothicism/M
+Gothic/S
+Goth/M
+Goths
+got/IU
+goto
+GOTO/MS
+gotta
+gotten/U
+Gottfried/M
+Goucher/M
+Gouda/SM
+gouge/GZSRD
+gouger/M
+goulash/SM
+Gould/M
+Gounod/M
+gourde/SM
+gourd/MS
+gourmand/MS
+gourmet/MS
+gout/SM
+gouty/RT
+governable/U
+governance/SM
+governed/U
+governess/SM
+govern/LBGSD
+governmental/Y
+government/MS
+Governor
+governor/MS
+governorship/SM
+gov/S
+govt
+gown/GSDM
+Goya/M
+GP
+GPA
+GPO
+GPSS
+gr
+grabbed
+grabber/SM
+grabbing/S
+grab/S
+Gracchus/M
+grace/ESDMG
+graceful/EYPU
+gracefuller
+gracefullest
+gracefulness/ESM
+Graceland/M
+gracelessness/MS
+graceless/PY
+Grace/M
+Gracia/M
+Graciela/M
+Gracie/M
+graciousness/SM
+gracious/UY
+grackle/SM
+gradate/DSNGX
+gradation/MCS
+grade/ACSDG
+graded/U
+Gradeigh/M
+gradely
+grader/MC
+grade's
+Gradey/M
+gradient/RMS
+grad/MRDGZJS
+gradualism/MS
+gradualist/MS
+gradualness/MS
+gradual/SYP
+graduand/SM
+graduate/MNGDSX
+graduation/M
+Grady/M
+Graehme/M
+Graeme/M
+Graffias/M
+graffiti
+graffito/M
+Graff/M
+grafter/M
+grafting/M
+graft/MRDSGZ
+Grafton/M
+Grahame/M
+Graham/M
+graham/SM
+Graig/M
+grail/S
+Grail/SM
+grainer/M
+grain/IGSD
+graininess/MS
+graining/M
+grain's
+grainy/RTP
+gram/KSM
+Gram/M
+grammarian/SM
+grammar/MS
+grammaticality/M
+grammaticalness/M
+grammatical/UY
+grammatic/K
+gramme/SM
+Grammy/S
+gramophone/SM
+Grampians
+grampus/SM
+Granada/M
+granary/MS
+grandam/SM
+grandaunt/MS
+grandchild/M
+grandchildren
+granddaddy/MS
+granddad/SM
+granddaughter/MS
+grandee/SM
+grandeur/MS
+grandfather/MYDSG
+grandiloquence/SM
+grandiloquent/Y
+grandiose/YP
+grandiosity/MS
+grandkid/SM
+grandma/MS
+grandmaster/MS
+grandmother/MYS
+grandnephew/MS
+grandness/MS
+grandniece/SM
+grandpa/MS
+grandparent/MS
+grandson/MS
+grandstander/M
+grandstand/SRDMG
+grand/TPSYR
+granduncle/MS
+Grange/MR
+grange/MSR
+Granger/M
+granite/MS
+granitic
+Gran/M
+Grannie/M
+Granny/M
+granny/MS
+granola/S
+grantee/MS
+granter/M
+Grantham/M
+Granthem/M
+Grantley/M
+Grant/M
+grantor's
+grant/SGZMRD
+grantsmanship/S
+granularity/SM
+granular/Y
+granulate/SDXVGN
+granulation/M
+granule/SM
+granulocytic
+Granville/M
+grapefruit/SM
+grape/SDGM
+grapeshot/M
+grapevine/MS
+grapheme/M
+graph/GMD
+graphical/Y
+graphicness/M
+graphic/PS
+graphics/M
+graphite/SM
+graphologist/SM
+graphology/MS
+graphs
+grapnel/SM
+grapple/DRSG
+grappler/M
+grappling/M
+grasper/M
+graspingness/M
+grasping/PY
+grasp/SRDBG
+grass/GZSDM
+grasshopper/SM
+grassland/MS
+Grass/M
+grassroots
+grassy/RT
+Grata/M
+gratefuller
+gratefullest
+gratefulness/USM
+grateful/YPU
+grater/M
+grates/I
+grate/SRDJGZ
+Gratia/M
+Gratiana/M
+graticule/M
+gratification/M
+gratified/U
+gratifying/Y
+gratify/NDSXG
+grating/YM
+gratis
+gratitude/IMS
+gratuitousness/MS
+gratuitous/PY
+gratuity/SM
+gravamen/SM
+gravedigger/SM
+gravel/SGMYD
+graven
+graveness/MS
+graver/M
+graveside/S
+Graves/M
+grave/SRDPGMZTY
+gravestone/SM
+graveyard/MS
+gravidness/M
+gravid/PY
+gravimeter/SM
+gravimetric
+gravitas
+gravitate/XVGNSD
+gravitational/Y
+gravitation/M
+graviton/SM
+gravity/MS
+gravy/SM
+graybeard/MS
+Grayce/M
+grayish
+Gray/M
+grayness/S
+gray/PYRDGTS
+Grayson/M
+graze/GZSRD
+grazer/M
+Grazia/M
+grazing/M
+grease/GMZSRD
+greasepaint/MS
+greaseproof
+greaser/M
+greasily
+greasiness/SM
+greasy/PRT
+greatcoat/DMS
+greaten/DG
+greathearted
+greatness/MS
+great/SPTYRN
+grebe/MS
+Grecian/S
+Greece/M
+greed/C
+greedily
+greediness/SM
+greeds
+greed's
+greedy/RTP
+Greek/SM
+Greeley/M
+greenback/MS
+greenbelt/S
+Greenberg/M
+Greenblatt/M
+Greenbriar/M
+Greene/M
+greenery/MS
+Greenfeld/M
+greenfield
+Greenfield/M
+greenfly/M
+greengage/SM
+greengrocer/SM
+greengrocery/M
+greenhorn/SM
+greenhouse/SM
+greening/M
+greenish/P
+Greenland/M
+Green/M
+greenmail/GDS
+greenness/MS
+Greenpeace/M
+greenroom/SM
+Greensboro/M
+Greensleeves/M
+Greensville/M
+greensward/SM
+green/SYRDMPGT
+Greentree/M
+Greenville/M
+Greenwich/M
+greenwood/MS
+Greer/M
+greeter/M
+greeting/M
+greets/A
+greet/SRDJGZ
+gregariousness/MS
+gregarious/PY
+Gregg/M
+Greggory/M
+Greg/M
+Gregoire/M
+Gregoor/M
+Gregorian
+Gregorio/M
+Gregorius/M
+Gregor/M
+Gregory/M
+gremlin/SM
+Grenada/M
+grenade/MS
+Grenadian/S
+grenadier/SM
+Grenadines
+grenadine/SM
+Grendel/M
+Grenier/M
+Grenoble/M
+Grenville/M
+Gresham/M
+Gretal/M
+Greta/M
+Gretchen/M
+Gretel/M
+Grete/M
+Grethel/M
+Gretna/M
+Gretta/M
+Gretzky/M
+grew/A
+greybeard/M
+greyhound/MS
+Grey/M
+greyness/M
+gridded
+griddlecake/SM
+griddle/DSGM
+gridiron/GSMD
+gridlock/DSG
+grids/A
+grid/SGM
+grief/MS
+Grieg/M
+Grier/M
+grievance/SM
+griever/M
+grieve/SRDGZ
+grieving/Y
+grievousness/SM
+grievous/PY
+Griffie/M
+Griffin/M
+griffin/SM
+Griffith/M
+Griff/M
+griffon's
+Griffy/M
+griller/M
+grille/SM
+grill/RDGS
+grillwork/M
+grimace/DRSGM
+grimacer/M
+Grimaldi/M
+grime/MS
+Grimes
+griminess/MS
+grimmer
+grimmest
+Grimm/M
+grimness/MS
+grim/PGYD
+grimy/TPR
+Grinch/M
+grind/ASG
+grinder/MS
+grinding/SY
+grindstone/SM
+gringo/SM
+grinned
+grinner/M
+grinning/Y
+grin/S
+griper/M
+gripe/S
+grippe/GMZSRD
+gripper/M
+gripping/Y
+grip/SGZMRD
+Griselda/M
+grisliness/SM
+grisly/RPT
+Gris/M
+Grissel/M
+gristle/SM
+gristliness/M
+gristly/TRP
+gristmill/MS
+grist/MYS
+Griswold/M
+grit/MS
+gritted
+gritter/MS
+grittiness/SM
+gritting
+gritty/PRT
+Griz/M
+grizzle/DSG
+grizzling/M
+grizzly/TRS
+Gr/M
+groaner/M
+groan/GZSRDM
+groat/SM
+grocer/MS
+grocery/MS
+groggily
+grogginess/SM
+groggy/RPT
+grog/MS
+groin/MGSD
+grokked
+grokking
+grok/S
+grommet/GMDS
+Gromyko/M
+groofs
+groomer/M
+groom/GZSMRD
+groomsman/M
+groomsmen
+Groot/M
+groover/M
+groove/SRDGM
+groovy/TR
+groper/M
+grope/SRDJGZ
+Gropius/M
+grosbeak/SM
+grosgrain/MS
+Gross
+Grosset/M
+gross/GTYSRDP
+Grossman/M
+grossness/MS
+Grosvenor/M
+Grosz/M
+grotesqueness/MS
+grotesque/PSY
+Grotius/M
+Groton/M
+grottoes
+grotto/M
+grouch/GDS
+grouchily
+grouchiness/MS
+grouchy/RPT
+groundbreaking/S
+grounded/U
+grounder/M
+groundhog/SM
+ground/JGZMDRS
+groundlessness/M
+groundless/YP
+groundnut/MS
+groundsheet/M
+groundskeepers
+groundsman/M
+groundswell/S
+groundwater/S
+groundwork/SM
+grouped/A
+grouper/M
+groupie/MS
+grouping/M
+groups/A
+group/ZJSMRDG
+grouse/GMZSRD
+grouser/M
+grouter/M
+grout/GSMRD
+groveler/M
+grovelike
+groveling/Y
+grovel/SDRGZ
+Grover/M
+Grove/RM
+grove/SRMZ
+grower/M
+grow/GZYRHS
+growing/I
+growingly
+growler/M
+growling/Y
+growl/RDGZS
+growly/RP
+grown/IA
+grownup/MS
+grows/A
+growth/IMA
+growths/IA
+grubbed
+grubber/SM
+grubbily
+grubbiness/SM
+grubbing
+grubby/RTP
+grub/MS
+grubstake/MSDG
+grudge/GMSRDJ
+grudger/M
+grudging/Y
+grueling/Y
+gruel/MDGJS
+gruesomeness/SM
+gruesome/RYTP
+gruffness/MS
+gruff/PSGTYRD
+grumble/GZJDSR
+grumbler/M
+grumbling/Y
+Grumman/M
+grumpily
+grumpiness/MS
+grump/MDGS
+grumpy/TPR
+Grundy/M
+Grnewald/M
+grunge/S
+grungy/RT
+grunion/SM
+grunter/M
+grunt/SGRD
+Grusky/M
+Grus/M
+Gruyre
+Gruyeres
+gryphon's
+g's
+G's
+gs/A
+GSA
+gt
+GU
+guacamole/MS
+Guadalajara/M
+Guadalcanal/M
+Guadalquivir/M
+Guadalupe/M
+Guadeloupe/M
+Guallatiri/M
+Gualterio/M
+Guamanian/SM
+Guam/M
+Guangzhou
+guanine/MS
+guano/MS
+Guantanamo/M
+Guarani/M
+guarani/SM
+guaranteeing
+guarantee/RSDZM
+guarantor/SM
+guaranty/MSDG
+guardedness/UM
+guarded/UYP
+guarder/M
+guardhouse/SM
+Guardia/M
+guardianship/MS
+guardian/SM
+guardrail/SM
+guard/RDSGZ
+guardroom/SM
+guardsman/M
+guardsmen
+Guarnieri/M
+Guatemala/M
+Guatemalan/S
+guava/SM
+Guayaquil/M
+gubernatorial
+Gucci/M
+gudgeon/M
+Guelph/M
+Guendolen/M
+Guenevere/M
+Guenna/M
+Guenther/M
+guernsey/S
+Guernsey/SM
+Guerra/M
+Guerrero/M
+guerrilla/MS
+guessable/U
+guess/BGZRSD
+guessed/U
+guesser/M
+guesstimate/DSMG
+guesswork/MS
+guest/SGMD
+Guevara/M
+guffaw/GSDM
+guff/SM
+Guggenheim/M
+Guglielma/M
+Guglielmo/M
+Guhleman/M
+GUI
+Guiana/M
+guidance/MS
+guidebook/SM
+guided/U
+guide/GZSRD
+guideline/SM
+guidepost/MS
+guider/M
+Guido/M
+Guilbert/M
+guilder/M
+guildhall/SM
+guild/SZMR
+guileful
+guilelessness/MS
+guileless/YP
+guile/SDGM
+Guillaume/M
+Guillema/M
+Guillemette/M
+guillemot/MS
+Guillermo/M
+guillotine/SDGM
+guiltily
+guiltiness/MS
+guiltlessness/M
+guiltless/YP
+guilt/SM
+guilty/PTR
+Gui/M
+Guinea/M
+Guinean/S
+guinea/SM
+Guinevere/M
+Guinna/M
+Guinness/M
+guise's
+guise/SDEG
+guitarist/SM
+guitar/SM
+Guiyang
+Guizot/M
+Gujarati/M
+Gujarat/M
+Gujranwala/M
+gulag/S
+gulch/MS
+gulden/MS
+gulf/DMGS
+Gullah/M
+gullet/MS
+gulley's
+gullibility/MS
+gullible
+Gulliver/M
+gull/MDSG
+gully/SDMG
+gulp/RDGZS
+gumboil/MS
+gumbo/MS
+gumboots
+gumdrop/SM
+gummed
+gumminess/M
+gumming/C
+gum/MS
+gummy/RTP
+gumption/SM
+gumshoeing
+gumshoe/SDM
+gumtree/MS
+Gunar/M
+gunboat/MS
+Gunderson/M
+gunfighter/M
+gunfight/SRMGZ
+gunfire/SM
+gunflint/M
+gunfought
+Gunilla/M
+gunk/SM
+gunky/RT
+Gun/M
+gunman/M
+gunmen
+gunmetal/MS
+gun/MS
+Gunnar/M
+gunned
+gunnel's
+Gunner/M
+gunner/SM
+gunnery/MS
+gunning/M
+gunnysack/SM
+gunny/SM
+gunpoint/MS
+gunpowder/SM
+gunrunner/MS
+gunrunning/MS
+gunship/S
+gunshot/SM
+gunslinger/M
+gunsling/GZR
+gunsmith/M
+gunsmiths
+Guntar/M
+Gunter/M
+Gunther/M
+gunwale/MS
+Guofeng/M
+guppy/SM
+Gupta/M
+gurgle/SDG
+Gurkha/M
+gurney/S
+guru/MS
+Gusella/M
+gusher/M
+gush/SRDGZ
+gushy/TR
+Gus/M
+Guss
+gusset/MDSG
+Gussie/M
+Gussi/M
+gussy/GSD
+Gussy/M
+Gustaf/M
+Gustafson/M
+Gusta/M
+gustatory
+Gustave/M
+Gustav/M
+Gustavo/M
+Gustavus/M
+gusted/E
+Gustie/M
+gustily
+Gusti/M
+gustiness/M
+gusting/E
+gust/MDGS
+gustoes
+gusto/M
+gusts/E
+Gusty/M
+gusty/RPT
+Gutenberg/M
+Guthrey/M
+Guthrie/M
+Guthry/M
+Gutierrez/M
+gutlessness/S
+gutless/P
+gutser/M
+gutsiness/M
+gut/SM
+guts/R
+gutsy/PTR
+gutted
+gutter/GSDM
+guttering/M
+guttersnipe/M
+gutting
+gutturalness/M
+guttural/SPY
+gutty/RSMT
+Guyana/M
+Guyanese
+Guy/M
+guy/MDRZGS
+Guzman/M
+guzzle/GZRSD
+guzzler/M
+g/VBX
+Gwalior/M
+Gwendolen/M
+Gwendoline/M
+Gwendolin/M
+Gwendolyn/M
+Gweneth/M
+Gwenette/M
+Gwen/M
+Gwenneth/M
+Gwennie/M
+Gwenni/M
+Gwenny/M
+Gwenora/M
+Gwenore/M
+Gwyneth/M
+Gwyn/M
+Gwynne/M
+gymkhana/SM
+gym/MS
+gymnasia's
+gymnasium/SM
+gymnastically
+gymnastic/S
+gymnastics/M
+gymnast/SM
+gymnosperm/SM
+gynecologic
+gynecological/MS
+gynecologist/SM
+gynecology/MS
+gypped
+gypper/S
+gypping
+gyp/S
+gypsite
+gypster/S
+gypsum/MS
+gypsy/SDMG
+Gypsy/SM
+gyrate/XNGSD
+gyration/M
+gyrator/MS
+gyrfalcon/SM
+gyrocompass/M
+gyro/MS
+gyroscope/SM
+gyroscopic
+gyve/GDS
+H
+Haag/M
+Haas/M
+Habakkuk/M
+habeas
+haberdasher/SM
+haberdashery/SM
+Haber/M
+Haberman/M
+Habib/M
+habiliment/SM
+habitability/MS
+habitableness/M
+habitable/P
+habitant/ISM
+habitation/MI
+habitations
+habitat/MS
+habit/IBDGS
+habit's
+habitualness/SM
+habitual/SYP
+habituate/SDNGX
+habituation/M
+habitu/MS
+hacienda/MS
+hacker/M
+Hackett/M
+hack/GZSDRBJ
+hackler/M
+hackle/RSDMG
+hackney/SMDG
+hacksaw/SDMG
+hackwork/S
+Hadamard/M
+Hadar/M
+Haddad/M
+haddock/MS
+hades
+Hades
+had/GD
+hadji's
+hadj's
+Hadlee/M
+Hadleigh/M
+Hadley/M
+Had/M
+hadn't
+Hadria/M
+Hadrian/M
+hadron/MS
+hadst
+haemoglobin's
+haemophilia's
+haemorrhage's
+Hafiz/M
+hafnium/MS
+haft/GSMD
+Hagan/M
+Hagar/M
+Hagen/M
+Hager/M
+Haggai/M
+haggardness/MS
+haggard/SYP
+hagged
+hagging
+haggish
+haggis/SM
+haggler/M
+haggle/RSDZG
+Hagiographa/M
+hagiographer/SM
+hagiography/MS
+hag/SMN
+Hagstrom/M
+Hague/M
+ha/H
+hahnium/S
+Hahn/M
+Haifa/M
+haiku/M
+Hailee/M
+hailer/M
+Hailey/M
+hail/SGMDR
+hailstone/SM
+hailstorm/SM
+Haily/M
+Haiphong/M
+hairball/SM
+hairbreadth/M
+hairbreadths
+hairbrush/SM
+haircare
+haircloth/M
+haircloths
+haircut/MS
+haircutting
+hairdo/SM
+hairdresser/SM
+hairdressing/SM
+hairdryer/S
+hairiness/MS
+hairlessness/M
+hairless/P
+hairlike
+hairline/SM
+hairnet/MS
+hairpiece/MS
+hairpin/MS
+hairsbreadth
+hairsbreadths
+hair/SDM
+hairsplitter/SM
+hairsplitting/MS
+hairspray
+hairspring/SM
+hairstyle/SMG
+hairstylist/S
+hairy/PTR
+Haitian/S
+Haiti/M
+hajjes
+hajji/MS
+hajj/M
+Hakeem/M
+hake/MS
+Hakim/M
+Hakka/M
+Hakluyt/M
+halalled
+halalling
+halal/S
+halberd/SM
+halcyon/S
+Haldane/M
+Haleakala/M
+Haleigh/M
+hale/ISRDG
+Hale/M
+haler/IM
+halest
+Halette/M
+Haley/M
+halfback/SM
+halfbreed
+halfheartedness/MS
+halfhearted/PY
+halfpence/S
+halfpenny/MS
+halfpennyworth
+half/PM
+halftime/S
+halftone/MS
+halfway
+halfword/MS
+halibut/SM
+halide/SM
+Halie/M
+Halifax/M
+Hali/M
+Halimeda/M
+halite/MS
+halitoses
+halitosis/M
+hallelujah
+hallelujahs
+Halley/M
+halliard's
+Hallie/M
+Halli/M
+Hallinan/M
+Hall/M
+Hallmark/M
+hallmark/SGMD
+hallo/GDS
+halloo's
+Halloween/MS
+hallowing
+hallows
+hallow/UD
+hall/SMR
+Hallsy/M
+hallucinate/VNGSDX
+hallucination/M
+hallucinatory
+hallucinogenic/S
+hallucinogen/SM
+hallway/SM
+Hally/M
+halocarbon
+halogenated
+halogen/SM
+halon
+halo/SDMG
+Halpern/M
+Halsey/M
+Hal/SMY
+Halsy/M
+halter/GDM
+halt/GZJSMDR
+halting/Y
+halve/GZDS
+halves/M
+halyard/MS
+Ha/M
+Hamal/M
+Haman/M
+hamburger/M
+Hamburg/MS
+hamburg/SZRM
+Hamel/M
+Hamey/M
+Hamhung/M
+Hamid/M
+Hamilcar/M
+Hamil/M
+Hamiltonian/MS
+Hamilton/M
+Hamish/M
+Hamitic/M
+Hamlen/M
+Hamlet/M
+hamlet/MS
+Hamlin/M
+Ham/M
+Hammad/M
+Hammarskjold/M
+hammed
+hammerer/M
+hammerhead/SM
+hammering/M
+hammerless
+hammerlock/MS
+Hammerstein/M
+hammertoe/SM
+hammer/ZGSRDM
+Hammett/M
+hamming
+hammock/MS
+Hammond/M
+Hammurabi/M
+hammy/RT
+Hamnet/M
+hampered/U
+hamper/GSD
+Hampshire/M
+Hampton/M
+ham/SM
+hamster/MS
+hamstring/MGS
+hamstrung
+Hamsun/M
+Hana/M
+Hanan/M
+Hancock/M
+handbagged
+handbagging
+handbag/MS
+handball/SM
+handbarrow/MS
+handbasin
+handbill/MS
+handbook/SM
+handbrake/M
+handcar/SM
+handcart/MS
+handclasp/MS
+handcraft/GMDS
+handcuff/GSD
+handcuffs/M
+handedness/M
+handed/PY
+Handel/M
+hander/S
+handful/SM
+handgun/SM
+handhold/M
+handicapped
+handicapper/SM
+handicapping
+handicap/SM
+handicraftsman/M
+handicraftsmen
+handicraft/SMR
+handily/U
+handiness/SM
+handiwork/MS
+handkerchief/MS
+handleable
+handlebar/SM
+handle/MZGRSD
+handler/M
+handless
+handling/M
+handmade
+handmaiden/M
+handmaid/NMSX
+handout/SM
+handover
+handpick/GDS
+handrail/SM
+hand's
+handsaw/SM
+handset/SM
+handshake/GMSR
+handshaker/M
+handshaking/M
+handsomely/U
+handsomeness/MS
+handsome/RPTY
+handspike/SM
+handspring/SM
+handstand/MS
+hand/UDSG
+handwork/SM
+handwoven
+handwrite/GSJ
+handwriting/M
+handwritten
+Handy/M
+handyman/M
+handymen
+handy/URT
+Haney/M
+hangar/SGDM
+hangdog/S
+hanged/A
+hanger/M
+hang/GDRZBSJ
+hanging/M
+hangman/M
+hangmen
+hangnail/MS
+hangout/MS
+hangover/SM
+hangs/A
+Hangul/M
+hangup/S
+Hangzhou
+Hankel/M
+hankerer/M
+hanker/GRDJ
+hankering/M
+hank/GZDRMS
+hankie/SM
+Hank/M
+hanky's
+Hannah/M
+Hanna/M
+Hannibal/M
+Hannie/M
+Hanni/MS
+Hanny/M
+Hanoi/M
+Hanoverian
+Hanover/M
+Hansel/M
+Hansen/M
+Hansiain/M
+Han/SM
+Hans/N
+hansom/MS
+Hanson/M
+Hanuka/S
+Hanukkah/M
+Hanukkahs
+Hapgood/M
+haphazardness/SM
+haphazard/SPY
+haplessness/MS
+hapless/YP
+haploid/S
+happed
+happening/M
+happen/JDGS
+happenstance/SM
+happily/U
+happiness/UMS
+happing
+Happy/M
+happy/UTPR
+Hapsburg/M
+hap/SMY
+Harald/M
+harangue/GDRS
+haranguer/M
+Harare
+harasser/M
+harass/LSRDZG
+harassment/SM
+Harbert/M
+harbinger/DMSG
+Harbin/M
+harborer/M
+harbor/ZGRDMS
+Harcourt/M
+hardback/SM
+hardball/SM
+hardboard/SM
+hardboiled
+hardbound
+hardcore/MS
+hardcover/SM
+hardened/U
+hardener/M
+hardening/M
+harden/ZGRD
+hardhat/S
+hardheadedness/SM
+hardheaded/YP
+hardheartedness/SM
+hardhearted/YP
+hardihood/MS
+hardily
+hardiness/SM
+Harding/M
+Hardin/M
+hardliner/S
+hardness/MS
+hardscrabble
+hardshell
+hardship/MS
+hardstand/S
+hardtack/MS
+hardtop/MS
+hardware/SM
+hardwire/DSG
+hardwood/MS
+hardworking
+Hardy/M
+hard/YNRPJGXTS
+hardy/PTRS
+harebell/MS
+harebrained
+harelip/MS
+harelipped
+hare/MGDS
+harem/SM
+Hargreaves/M
+hark/GDS
+Harland/M
+Harlan/M
+Harlem/M
+Harlene/M
+Harlen/M
+Harlequin
+harlequin/MS
+Harley/M
+Harlie/M
+Harli/M
+Harlin/M
+harlotry/MS
+harlot/SM
+Harlow/M
+Harman/M
+harmed/U
+harmer/M
+harmfulness/MS
+harmful/PY
+harmlessness/SM
+harmless/YP
+harm/MDRGS
+Harmonia/M
+harmonically
+harmonica/MS
+harmonic/S
+harmonics/M
+Harmonie/M
+harmonious/IPY
+harmoniousness/MS
+harmoniousness's/I
+harmonium/MS
+harmonization/A
+harmonizations
+harmonization's
+harmonized/U
+harmonizer/M
+harmonizes/UA
+harmonize/ZGSRD
+Harmon/M
+harmony/EMS
+Harmony/M
+harness/DRSMG
+harnessed/U
+harnesser/M
+harnesses/U
+Harold/M
+Haroun/M
+harper/M
+Harper/M
+harping/M
+harpist/SM
+harp/MDRJGZS
+Harp/MR
+harpooner/M
+harpoon/SZGDRM
+harpsichordist/MS
+harpsichord/SM
+harpy/SM
+Harpy/SM
+Harrell/M
+harridan/SM
+Harrie/M
+harrier/M
+Harriet/M
+Harrietta/M
+Harriette/M
+Harriett/M
+Harrington/M
+Harriot/M
+Harriott/M
+Harrisburg/M
+Harri/SM
+Harrisonburg/M
+Harrison/M
+harrower/M
+harrow/RDMGS
+harrumph/SDG
+Harry/M
+harry/RSDGZ
+harshen/GD
+harshness/SM
+harsh/TRNYP
+Harte/M
+Hartford/M
+Hartley/M
+Hartline/M
+Hart/M
+Hartman/M
+hart/MS
+Hartwell/M
+Harvard/M
+harvested/U
+harvester/M
+harvestman/M
+harvest/MDRZGS
+Harvey/MS
+Harv/M
+Harwell/M
+Harwilll/M
+has
+Hasbro/M
+hash/AGSD
+Hasheem/M
+hasher/M
+Hashim/M
+hashing/M
+hashish/MS
+hash's
+Hasidim
+Haskell/M
+Haskel/M
+Haskins/M
+Haslett/M
+hasn't
+hasp/GMDS
+hassle/MGRSD
+hassock/MS
+haste/MS
+hastener/M
+hasten/GRD
+hast/GXJDN
+Hastie/M
+hastily
+hastiness/MS
+Hastings/M
+Hasty/M
+hasty/RPT
+hatchback/SM
+hatcheck/S
+hatched/U
+hatcher/M
+hatchery/MS
+hatchet/MDSG
+hatching/M
+hatch/RSDJG
+Hatchure/M
+hatchway/MS
+hatefulness/MS
+hateful/YP
+hater/M
+hate/S
+Hatfield/M
+Hathaway/M
+hatless
+hat/MDRSZG
+hatred/SM
+hatstands
+hatted
+Hatteras/M
+hatter/SM
+Hattie/M
+Hatti/M
+hatting
+Hatty/M
+hauberk/SM
+Haugen/M
+haughtily
+haughtiness/SM
+haughty/TPR
+haulage/MS
+hauler/M
+haul/SDRGZ
+haunch/GMSD
+haunter/M
+haunting/Y
+haunt/JRDSZG
+Hauptmann/M
+Hausa/M
+Hausdorff/M
+Hauser/M
+hauteur/MS
+Havana/SM
+Havarti
+Havel/M
+haven/DMGS
+Haven/M
+haven't
+haver/G
+haversack/SM
+have/ZGSR
+havocked
+havocking
+havoc/SM
+Haw
+Hawaiian/S
+Hawaii/M
+hawker/M
+hawk/GZSDRM
+Hawking
+hawking/M
+Hawkins/M
+hawkishness/S
+hawkish/P
+Hawley/M
+haw/MDSG
+hawser/M
+haws/RZ
+Hawthorne/M
+hawthorn/MS
+haycock/SM
+Hayden/M
+Haydn/M
+Haydon/M
+Hayes
+hayfield/MS
+hay/GSMDR
+Hayley/M
+hayloft/MS
+haymow/MS
+Haynes
+hayrick/MS
+hayride/MS
+hayseed/MS
+Hay/SM
+haystack/SM
+haywain
+Hayward/M
+haywire/MS
+Haywood/M
+Hayyim/M
+hazard/MDGS
+hazardousness/M
+hazardous/PY
+haze/DSRJMZG
+Hazel/M
+hazel/MS
+hazelnut/SM
+Haze/M
+hazer/M
+hazily
+haziness/MS
+hazing/M
+Hazlett/M
+Hazlitt/M
+hazy/PTR
+HBO/M
+hdqrs
+HDTV
+headache/MS
+headband/SM
+headboard/MS
+headcount
+headdress/MS
+header/M
+headfirst
+headgear/SM
+headhunter/M
+headhunting/M
+headhunt/ZGSRDMJ
+headily
+headiness/S
+heading/M
+headlamp/S
+headland/MS
+headlessness/M
+headless/P
+headlight/MS
+headline/DRSZMG
+headliner/M
+headlock/MS
+headlong
+Head/M
+headman/M
+headmaster/MS
+headmastership/M
+headmen
+headmistress/MS
+headphone/SM
+headpiece/SM
+headpin/MS
+headquarter/GDS
+headrest/MS
+headroom/SM
+headscarf/M
+headset/SM
+headship/SM
+headshrinker/MS
+head/SJGZMDR
+headsman/M
+headsmen
+headstall/SM
+headstand/MS
+headstock/M
+headstone/MS
+headstrong
+headwaiter/SM
+headwall/S
+headwater/S
+headway/MS
+headwind/SM
+headword/MS
+heady/PTR
+heal/DRHSGZ
+healed/U
+healer/M
+Heall/M
+healthfully
+healthfulness/SM
+healthful/U
+healthily/U
+healthiness/MSU
+health/M
+healths
+healthy/URPT
+heap/SMDG
+heard/UA
+hearer/M
+hearing/AM
+hearken/SGD
+hearsay/SM
+hearse/M
+hears/SDAG
+Hearst/M
+heartache/SM
+heartbeat/MS
+heartbreak/GMS
+heartbreaking/Y
+heartbroke
+heartbroken
+heartburning/M
+heartburn/SGM
+hearted/Y
+hearten/EGDS
+heartening/EY
+heartfelt
+hearth/M
+hearthrug
+hearths
+hearthstone/MS
+heartily
+heartiness/SM
+heartland/SM
+heartlessness/SM
+heartless/YP
+heartrending/Y
+heartsickness/MS
+heartsick/P
+heart/SMDNXG
+heartstrings
+heartthrob/MS
+heartwarming
+Heartwood/M
+heartwood/SM
+hearty/TRSP
+hear/ZTSRHJG
+heatedly
+heated/UA
+heater/M
+heathendom/SM
+heathenish/Y
+heathenism/MS
+heathen/M
+heather/M
+Heather/M
+heathery
+Heathkit/M
+heathland
+Heathman/M
+Heath/MR
+heath/MRNZX
+heaths
+heatproof
+heats/A
+heat/SMDRGZBJ
+heatstroke/MS
+heatwave
+heave/DSRGZ
+heavenliness/M
+heavenly/PTR
+heaven/SYM
+heavenward/S
+heaver/M
+heaves/M
+heavily
+heaviness/MS
+Heaviside/M
+heavyhearted
+heavyset
+heavy/TPRS
+heavyweight/SM
+Hebe/M
+hebephrenic
+Hebert/M
+Heb/M
+Hebraic
+Hebraism/MS
+Hebrew/SM
+Hebrides/M
+Hecate/M
+hecatomb/M
+heckler/M
+heckle/RSDZG
+heck/S
+hectare/MS
+hectically
+hectic/S
+hectogram/MS
+hectometer/SM
+Hector/M
+hector/SGD
+Hecuba/M
+he'd
+Heda/M
+Hedda/M
+Heddie/M
+Heddi/M
+hedge/DSRGMZ
+hedgehog/MS
+hedgehopped
+hedgehopping
+hedgehop/S
+hedger/M
+hedgerow/SM
+hedging/Y
+Hedi/M
+hedonism/SM
+hedonistic
+hedonist/MS
+Hedvige/M
+Hedvig/M
+Hedwiga/M
+Hedwig/M
+Hedy/M
+heeded/U
+heedfulness/M
+heedful/PY
+heeding/U
+heedlessness/SM
+heedless/YP
+heed/SMGD
+heehaw/DGS
+heeler/M
+heeling/M
+heelless
+heel/SGZMDR
+Heep/M
+Hefner/M
+heft/GSD
+heftily
+heftiness/SM
+hefty/TRP
+Hegelian
+Hegel/M
+hegemonic
+hegemony/MS
+Hegira/M
+hegira/S
+Heida/M
+Heidegger/M
+Heidelberg/M
+Heidie/M
+Heidi/M
+heifer/MS
+Heifetz/M
+heighten/GD
+height/SMNX
+Heimlich/M
+Heindrick/M
+Heineken/M
+Heine/M
+Heinlein/M
+heinousness/SM
+heinous/PY
+Heinrich/M
+Heinrick/M
+Heinrik/M
+Heinze/M
+Heinz/M
+heiress/MS
+heirloom/MS
+heir/SDMG
+Heisenberg/M
+Heiser/M
+heister/M
+heist/GSMRD
+Hejira's
+Helaina/M
+Helaine/M
+held
+Helena/M
+Helene/M
+Helenka/M
+Helen/M
+Helga/M
+Helge/M
+helical/Y
+helices/M
+helicon/M
+Helicon/M
+helicopter/GSMD
+heliocentric
+heliography/M
+Heliopolis/M
+Helios/M
+heliosphere
+heliotrope/SM
+heliport/MS
+helium/MS
+helix/M
+he'll
+hellbender/M
+hellbent
+hellcat/SM
+hellebore/SM
+Hellene/SM
+Hellenic
+Hellenism/MS
+Hellenistic
+Hellenist/MS
+Hellenization/M
+Hellenize
+heller/M
+Heller/M
+Hellespont/M
+hellfire/M
+hell/GSMDR
+hellhole/SM
+Helli/M
+hellion/SM
+hellishness/SM
+hellish/PY
+Hellman/M
+hello/GMS
+Hell's
+helluva
+helmed
+helmet/GSMD
+Helmholtz/M
+helming
+helms
+helm's
+helmsman/M
+helmsmen
+helm/U
+Helmut/M
+Hloise/M
+helot/S
+helper/M
+helpfulness/MS
+helpful/UY
+help/GZSJDR
+helping/M
+helplessness/SM
+helpless/YP
+helpline/S
+helpmate/SM
+helpmeet's
+Helsa/M
+Helsinki/M
+helve/GMDS
+Helvetian/S
+Helvetius/M
+Helyn/M
+He/M
+hematite/MS
+hematologic
+hematological
+hematologist/SM
+hematology/MS
+heme/MS
+Hemingway/M
+hemisphere/MSD
+hemispheric
+hemispherical
+hemline/SM
+hemlock/MS
+hemmed
+hemmer/SM
+hemming
+hem/MS
+hemoglobin/MS
+hemolytic
+hemophiliac/SM
+hemophilia/SM
+hemorrhage/GMDS
+hemorrhagic
+hemorrhoid/MS
+hemostat/SM
+hemp/MNS
+h/EMS
+hemstitch/DSMG
+henceforth
+henceforward
+hence/S
+Hench/M
+henchman/M
+henchmen
+Henderson/M
+Hendrick/SM
+Hendrickson/M
+Hendrika/M
+Hendrik/M
+Hendrix/M
+henge/M
+Henka/M
+Henley/M
+hen/MS
+henna/MDSG
+Hennessey/M
+henning
+henpeck/GSD
+Henrie/M
+Henrieta/M
+Henrietta/M
+Henriette/M
+Henrik/M
+Henri/M
+Henryetta/M
+henry/M
+Henry/M
+Hensley/M
+Henson/M
+heparin/MS
+hepatic/S
+hepatitides
+hepatitis/M
+Hepburn/M
+Hephaestus/M
+Hephzibah/M
+hepper
+heppest
+Hepplewhite
+hep/S
+heptagonal
+heptagon/SM
+heptane/M
+heptathlon/S
+her
+Heracles/M
+Heraclitus/M
+heralded/U
+heraldic
+herald/MDSG
+heraldry/MS
+Hera/M
+herbaceous
+herbage/MS
+herbalism
+herbalist/MS
+herbal/S
+Herbart/M
+Herbert/M
+herbicidal
+herbicide/MS
+Herbie/M
+herbivore/SM
+herbivorous/Y
+Herb/M
+herb/MS
+Herby/M
+Herc/M
+Herculaneum/M
+herculean
+Herculean
+Hercule/MS
+Herculie/M
+herder/M
+Herder/M
+herd/MDRGZS
+herdsman/M
+herdsmen
+hereabout/S
+hereafter/S
+hereby
+hereditary
+heredity/MS
+Hereford/SM
+herein
+hereinafter
+here/IS
+hereof
+hereon
+here's
+heres/M
+heresy/SM
+heretical
+heretic/SM
+hereto
+heretofore
+hereunder
+hereunto
+hereupon
+herewith
+Heriberto/M
+heritable
+heritage/MS
+heritor/IM
+Herkimer/M
+Herman/M
+Hermann/M
+hermaphrodite/SM
+hermaphroditic
+Hermaphroditus/M
+hermeneutic/S
+hermeneutics/M
+Hermes
+hermetical/Y
+hermetic/S
+Hermia/M
+Hermie/M
+Hermina/M
+Hermine/M
+Herminia/M
+Hermione/M
+hermitage/SM
+Hermite/M
+hermitian
+hermit/MS
+Hermon/M
+Hermosa/M
+Hermosillo/M
+Hermy/M
+Hernandez/M
+Hernando/M
+hernial
+hernia/MS
+herniate/NGXDS
+Herod/M
+Herodotus/M
+heroes
+heroically
+heroics
+heroic/U
+heroine/SM
+heroin/MS
+heroism/SM
+Herold/M
+hero/M
+heron/SM
+herpes/M
+herpetologist/SM
+herpetology/MS
+Herrera/M
+Herrick/M
+herringbone/SDGM
+Herring/M
+herring/SM
+Herrington/M
+Herr/MG
+Herschel/M
+Hersch/M
+herself
+Hersey/M
+Hershel/M
+Hershey/M
+Hersh/M
+Herta/M
+Hertha/M
+hertz/M
+Hertz/M
+Hertzog/M
+Hertzsprung/M
+Herve/M
+Hervey/M
+Herzegovina/M
+Herzl/M
+hes
+Hesiod/M
+hesitance/S
+hesitancy/SM
+hesitantly
+hesitant/U
+hesitater/M
+hesitate/XDRSNG
+hesitating/UY
+hesitation/M
+Hesperus/M
+Hesse/M
+Hessian/MS
+Hess/M
+Hester/M
+Hesther/M
+Hestia/M
+Heston/M
+heterodox
+heterodoxy/MS
+heterodyne
+heterogamous
+heterogamy/M
+heterogeneity/SM
+heterogeneousness/M
+heterogeneous/PY
+heterosexuality/SM
+heterosexual/YMS
+heterostructure
+heterozygous
+Hettie/M
+Hetti/M
+Hetty/M
+Heublein/M
+heuristically
+heuristic/SM
+Heusen/M
+Heuser/M
+he/VMZ
+hew/DRZGS
+Hewe/M
+hewer/M
+Hewet/M
+Hewett/M
+Hewie/M
+Hewitt/M
+Hewlett/M
+Hew/M
+hexachloride/M
+hexadecimal/YS
+hexafluoride/M
+hexagonal/Y
+hexagon/SM
+hexagram/SM
+hexameter/SM
+hex/DSRG
+hexer/M
+hey
+heyday/MS
+Heyerdahl/M
+Heywood/M
+Hezekiah/M
+hf
+HF
+Hf/M
+Hg/M
+hgt
+hgwy
+HHS
+HI
+Hialeah/M
+hiatus/SM
+Hiawatha/M
+hibachi/MS
+hibernate/XGNSD
+hibernation/M
+hibernator/SM
+Hibernia/M
+Hibernian/S
+hibiscus/MS
+hiccup/MDGS
+hickey/SM
+Hickey/SM
+Hickman/M
+Hickok/M
+hickory/MS
+hick/SM
+Hicks/M
+hi/D
+hidden/U
+hideaway/SM
+hidebound
+hideousness/SM
+hideous/YP
+hideout/MS
+hider/M
+hide/S
+hiding/M
+hid/ZDRGJ
+hieing
+hierarchal
+hierarchic
+hierarchical/Y
+hierarchy/SM
+hieratic
+hieroglyph
+hieroglyphic/S
+hieroglyphics/M
+hieroglyphs
+Hieronymus/M
+hie/S
+hifalutin
+Higashiosaka
+Higgins/M
+highball/GSDM
+highborn
+highboy/MS
+highbrow/SM
+highchair/SM
+highfalutin
+Highfield/M
+highhandedness/SM
+highhanded/PY
+highish
+Highlander/SM
+Highlands
+highland/ZSRM
+highlight/GZRDMS
+Highness/M
+highness/MS
+highpoint
+high/PYRT
+highroad/MS
+highs
+hight
+hightail/DGS
+highwayman/M
+highwaymen
+highway/MS
+hijacker/M
+hijack/JZRDGS
+hiker/M
+hike/ZGDSR
+Hilario/M
+hilariousness/MS
+hilarious/YP
+hilarity/MS
+Hilarius/M
+Hilary/M
+Hilbert/M
+Hildagarde/M
+Hildagard/M
+Hilda/M
+Hildebrand/M
+Hildegaard/M
+Hildegarde/M
+Hilde/M
+Hildy/M
+Hillard/M
+Hillary/M
+hillbilly/MS
+Hillcrest/M
+Hillel/M
+hiller/M
+Hillery/M
+hill/GSMDR
+Hilliard/M
+Hilliary/M
+Hillie/M
+Hillier/M
+hilliness/SM
+Hill/M
+hillman
+hillmen
+hillock/SM
+Hillsboro/M
+Hillsdale/M
+hillside/SM
+hilltop/MS
+hillwalking
+Hillyer/M
+Hilly/RM
+hilly/TRP
+hilt/MDGS
+Hilton/M
+Hi/M
+Himalaya/MS
+Himalayan/S
+Himmler/M
+him/S
+himself
+Hinayana/M
+Hinda/M
+Hindemith/M
+Hindenburg/M
+hindered/U
+hinderer/M
+hinder/GRD
+Hindi/M
+hindmost
+hindquarter/SM
+hindrance/SM
+hind/RSZ
+hindsight/SM
+Hinduism/SM
+Hindu/MS
+Hindustani/MS
+Hindustan/M
+Hines/M
+hinger
+hinge's
+hinge/UDSG
+Hinkle/M
+Hinsdale/M
+hinterland/MS
+hinter/M
+hint/GZMDRS
+Hinton/M
+Hinze/M
+hipbone/SM
+hipness/S
+Hipparchus/M
+hipped
+hipper
+hippest
+hippie/MTRS
+hipping/M
+Hippocrates/M
+Hippocratic
+hippodrome/MS
+hippo/MS
+hippopotamus/SM
+hip/PSM
+hippy's
+hipster/MS
+hiragana
+Hiram/M
+hire/AGSD
+hireling/SM
+hirer/SM
+Hirey/M
+hiring/S
+Hirohito/M
+Hiroshi/M
+Hiroshima/M
+Hirsch/M
+hirsuteness/MS
+hirsute/P
+his
+Hispanic/SM
+Hispaniola/M
+hiss/DSRMJG
+hisser/M
+hissing/M
+Hiss/M
+histamine/SM
+histidine/SM
+histochemic
+histochemical
+histochemistry/M
+histogram/MS
+histological
+histologist/MS
+histology/SM
+historian/MS
+historic
+historicalness/M
+historical/PY
+historicism/M
+historicist/M
+historicity/MS
+historiographer/SM
+historiography/MS
+history/MS
+histrionically
+histrionic/S
+histrionics/M
+hist/SDG
+Hitachi/M
+Hitchcock/M
+hitcher/MS
+hitchhike/RSDGZ
+hitch/UGSD
+hither
+hitherto
+Hitler/SM
+hitless
+hit/MS
+hittable
+hitter/SM
+hitting
+Hittite/SM
+HIV
+hive/MGDS
+h'm
+HM
+HMO
+Hmong
+HMS
+hoarder/M
+hoarding/M
+hoard/RDJZSGM
+hoarfrost/SM
+hoariness/MS
+hoar/M
+hoarseness/SM
+hoarse/RTYP
+hoary/TPR
+hoaxer/M
+hoax/GZMDSR
+Hobard/M
+Hobart/M
+hobbed
+Hobbes/M
+hobbing
+hobbit
+hobbler/M
+hobble/ZSRDG
+Hobbs/M
+hobbyhorse/SM
+hobbyist/SM
+hobby/SM
+Hobday/M
+Hobey/M
+hobgoblin/MS
+Hobie/M
+hobnail/GDMS
+hobnobbed
+hobnobbing
+hobnob/S
+Hoboken/M
+hobo/SDMG
+hob/SM
+hoc
+hocker/M
+hockey/SM
+hock/GDRMS
+Hockney/M
+hockshop/SM
+hodge/MS
+Hodge/MS
+hodgepodge/SM
+Hodgkin/M
+ho/DRYZ
+hod/SM
+Hoebart/M
+hoecake/SM
+hoedown/MS
+hoeing
+hoer/M
+hoe/SM
+Hoffa/M
+Hoff/M
+Hoffman/M
+Hofstadter/M
+Hogan/M
+hogan/SM
+Hogarth/M
+hogback/MS
+hogged
+hogger
+hogging
+hoggish/Y
+hogshead/SM
+hog/SM
+hogtie/SD
+hogtying
+hogwash/SM
+Hohenlohe/M
+Hohenstaufen/M
+Hohenzollern/M
+Hohhot/M
+hoister/M
+hoist/GRDS
+hoke/DSG
+hokey/PRT
+hokier
+hokiest
+Hokkaido/M
+hokum/MS
+Hokusai/M
+Holbein/M
+Holbrook/M
+Holcomb/M
+holdall/MS
+Holden/M
+holder/M
+Holder/M
+holding/IS
+holding's
+hold/NRBSJGZ
+holdout/SM
+holdover/SM
+holdup/MS
+hole/MGDS
+holey
+holiday/GRDMS
+Holiday/M
+holidaymaker/S
+holier/U
+Holiness/MS
+holiness/MSU
+holistic
+holistically
+hollandaise
+Hollandaise/M
+Hollander/M
+Holland/RMSZ
+holler/GDS
+Hollerith/M
+Holley/M
+Hollie/M
+Holli/SM
+Hollister/M
+Holloway/M
+hollowness/MS
+hollow/RDYTGSP
+hollowware/M
+Hollyanne/M
+hollyhock/MS
+Holly/M
+holly/SM
+Hollywood/M
+Holman/M
+Holmes
+holmium/MS
+Holm/M
+Holocaust
+holocaust/MS
+Holocene
+hologram/SM
+holograph/GMD
+holographic
+holographs
+holography/MS
+Holstein/MS
+holster/MDSG
+Holst/M
+Holt/M
+Holyoke/M
+holy/SRTP
+holystone/MS
+Holzman/M
+Ho/M
+homage/MGSRD
+homager/M
+hombre/SM
+homburg/SM
+homebody/MS
+homebound
+homeboy/S
+homebuilder/S
+homebuilding
+homebuilt
+homecoming/MS
+home/DSRMYZG
+homegrown
+homeland/SM
+homelessness/SM
+homeless/P
+homelike
+homeliness/SM
+homely/RPT
+homemade
+homemake/JRZG
+homemaker/M
+homemaking/M
+homeomorphic
+homeomorphism/MS
+homeomorph/M
+homeopath
+homeopathic
+homeopaths
+homeopathy/MS
+homeostases
+homeostasis/M
+homeostatic
+homeowner/S
+homeownership
+homepage
+Homere/M
+homer/GDM
+Homeric
+homerists
+Homer/M
+homeroom/MS
+Homerus/M
+homeschooling/S
+homesickness/MS
+homesick/P
+homespun/S
+homesteader/M
+homestead/GZSRDM
+homestretch/SM
+hometown/SM
+homeward
+homeworker/M
+homework/ZSMR
+homeyness/MS
+homey/PS
+homicidal/Y
+homicide/SM
+homier
+homiest
+homiletic/S
+homily/SM
+hominess's
+homing/M
+hominid/MS
+hominy/SM
+Hom/MR
+homogamy/M
+homogenate/MS
+homogeneity/ISM
+homogeneous/PY
+homogenization/MS
+homogenize/DRSGZ
+homogenizer/M
+homograph/M
+homographs
+homological
+homologous
+homologue/M
+homology/MS
+homomorphic
+homomorphism/SM
+homonym/SM
+homophobia/S
+homophobic
+homophone/MS
+homopolymers
+homosexuality/SM
+homosexual/YMS
+homo/SM
+homotopy
+homozygous/Y
+honcho/DSG
+Honda/M
+Hondo/M
+Honduran/S
+Honduras/M
+Honecker/M
+hone/SM
+honestly/E
+honest/RYT
+honesty/ESM
+honeybee/SM
+honeycomb/SDMG
+honeydew/SM
+honey/GSMD
+honeylocust
+Honey/M
+honeymooner/M
+honeymoon/RDMGZS
+honeysuckle/MS
+Honeywell/M
+hong/M
+Honiara/M
+honker/M
+honk/GZSDRM
+honky/SM
+Hon/M
+hon/MDRSZTG
+Honolulu/M
+honorableness/SM
+honorable/PSM
+honorables/U
+honorablies/U
+honorably/UE
+honorarily
+honorarium/SM
+honorary/S
+honored/U
+honoree/S
+honor/ERDBZGS
+honorer/EM
+Honoria/M
+honorific/S
+Honor/M
+honor's
+honors/A
+Honshu/M
+hooch/MS
+hoodedness/M
+hooded/P
+hoodlum/SM
+Hood/M
+hood/MDSG
+hoodoo/DMGS
+hoodwinker/M
+hoodwink/SRDG
+hooey/SM
+hoof/DRMSG
+hoofer/M
+hoofmark/S
+hookah/M
+hookahs
+hookedness/M
+hooked/P
+Hooke/MR
+hooker/M
+Hooker/M
+hookey's
+hook/GZDRMS
+hooks/U
+hookup/SM
+hookworm/MS
+hooky/SRMT
+hooliganism/SM
+hooligan/SM
+hooper/M
+Hooper/M
+hoopla/SM
+hoop/MDRSG
+hooray/SMDG
+hoosegow/MS
+Hoosier/SM
+hootch's
+hootenanny/SM
+hooter/M
+hoot/MDRSGZ
+Hoover/MS
+hooves/M
+hoped/U
+hopefulness/MS
+hopeful/SPY
+hopelessness/SM
+hopeless/YP
+Hope/M
+hoper/M
+hope/SM
+Hopewell/M
+Hopi/SM
+Hopkinsian/M
+Hopkins/M
+hopped
+Hopper/M
+hopper/MS
+hopping/M
+hoppled
+hopples
+hopscotch/MDSG
+hop/SMDRG
+Horace/M
+Horacio/M
+Horatia/M
+Horatio/M
+Horatius/M
+horde/DSGM
+horehound/MS
+horizon/MS
+horizontal/YS
+Hormel/M
+hormonal/Y
+hormone/MS
+Hormuz/M
+hornbeam/M
+hornblende/MS
+Hornblower/M
+hornedness/M
+horned/P
+Horne/M
+hornet/MS
+horn/GDRMS
+horniness/M
+hornless
+hornlike
+Horn/M
+hornpipe/MS
+horny/TRP
+horologic
+horological
+horologist/MS
+horology/MS
+horoscope/MS
+Horowitz/M
+horrendous/Y
+horribleness/SM
+horrible/SP
+horribly
+horridness/M
+horrid/PY
+horrific
+horrifically
+horrify/DSG
+horrifying/Y
+horror/MS
+hors/DSGX
+horseback/MS
+horsedom
+horseflesh/M
+horsefly/MS
+horsehair/SM
+horsehide/SM
+horselaugh/M
+horselaughs
+horseless
+horselike
+horsely
+horseman/M
+horsemanship/MS
+horsemen
+horseplayer/M
+horseplay/SMR
+horsepower/SM
+horseradish/SM
+horse's
+horseshoeing
+horseshoe/MRSD
+horseshoer/M
+horsetail/SM
+horse/UGDS
+horsewhipped
+horsewhipping
+horsewhip/SM
+horsewoman/M
+horsewomen
+horsey
+horsier
+horsiest
+horsing/M
+Horst/M
+hortatory
+Horten/M
+Hortense/M
+Hortensia/M
+horticultural
+horticulture/SM
+horticulturist/SM
+Hort/MN
+Horton/M
+Horus/M
+hosanna/SDG
+Hosea/M
+hose/M
+hosepipe
+hos/GDS
+hosier/MS
+hosiery/SM
+hosp
+hospice/MS
+hospitable/I
+hospitably/I
+hospitality/MS
+hospitality's/I
+hospitalization/MS
+hospitalize/GSD
+hospital/MS
+hostage/MS
+hosteler/M
+hostelry/MS
+hostel/SZGMRD
+hostess/MDSG
+hostile/YS
+hostility/SM
+hostler/MS
+Host/MS
+host/MYDGS
+hotbed/MS
+hotblooded
+hotbox/MS
+hotcake/S
+hotchpotch/M
+hotelier/MS
+hotelman/M
+hotel/MS
+hotfoot/DGS
+hothead/DMS
+hotheadedness/SM
+hotheaded/PY
+hothouse/MGDS
+hotness/MS
+hotplate/SM
+hotpot/M
+hot/PSY
+hotrod
+hotshot/S
+hotted
+Hottentot/SM
+hotter
+hottest
+hotting
+Houdaille/M
+Houdini/M
+hough/M
+hounder/M
+hounding/M
+hound/MRDSG
+hourglass/MS
+houri/MS
+hourly/S
+hour/YMS
+house/ASDG
+houseboat/SM
+housebound
+houseboy/SM
+housebreaker/M
+housebreaking/M
+housebreak/JSRZG
+housebroke
+housebroken
+housebuilding
+housecleaning/M
+houseclean/JDSG
+housecoat/MS
+housefly/MS
+houseful/SM
+householder/M
+household/ZRMS
+househusband/S
+housekeeper/M
+housekeeping/M
+housekeep/JRGZ
+houselights
+House/M
+housemaid/MS
+houseman/M
+housemen
+housemother/MS
+housemoving
+houseparent/SM
+houseplant/S
+houser
+house's
+housetop/MS
+housewares
+housewarming/MS
+housewifeliness/M
+housewifely/P
+housewife/YM
+housewives
+houseworker/M
+housework/ZSMR
+housing/MS
+Housman/M
+Houston/M
+Houyhnhnm/M
+HOV
+hovel/GSMD
+hovercraft/M
+hoverer/M
+hover/GRD
+hove/ZR
+Howard/M
+howbeit
+howdah/M
+howdahs
+howdy/GSD
+Howell/MS
+Howe/M
+however
+Howey/M
+Howie/M
+howitzer/MS
+howler/M
+howl/GZSMDR
+Howrah/M
+how/SM
+howsoever
+hoyden/DMGS
+hoydenish
+Hoyle/SM
+hoy/M
+Hoyt/M
+hp
+HP
+HQ
+hr
+HR
+HRH
+Hrothgar/M
+hrs
+h's
+H's
+HS
+HST
+ht
+HTML
+Hts/M
+HTTP
+Huang/M
+huarache/SM
+hubba
+Hubbard/M
+Hubble/M
+hubbub/SM
+hubby/SM
+hubcap/SM
+Huber/M
+Hube/RM
+Hubert/M
+Huberto/M
+Hubey/M
+Hubie/M
+hub/MS
+hubris/SM
+huckleberry/SM
+Huck/M
+huckster/SGMD
+HUD
+Huddersfield/M
+huddler/M
+huddle/RSDMG
+Hudson/M
+hue/MDS
+Huerta/M
+Huey/M
+huffily
+huffiness/SM
+Huff/M
+Huffman/M
+huff/SGDM
+huffy/TRP
+hugeness/MS
+huge/YP
+hugged
+hugger
+hugging/S
+Huggins
+Hughie/M
+Hugh/MS
+Hugibert/M
+Hugo/M
+hug/RTS
+Huguenot/SM
+Hugues/M
+huh
+huhs
+Hui/M
+Huitzilopitchli/M
+hula/MDSG
+Hulda/M
+hulk/GDMS
+hullabaloo/SM
+huller/M
+hulling/M
+Hull/M
+hull/MDRGZS
+hullo/GSDM
+humane/IY
+humaneness/SM
+humaner
+humanest
+human/IPY
+humanism/SM
+humanistic
+humanist/SM
+humanitarianism/SM
+humanitarian/S
+humanity/ISM
+humanization/CSM
+humanized/C
+humanizer/M
+humanize/RSDZG
+humanizes/IAC
+humanizing/C
+humankind/M
+humannesses
+humanness/IM
+humanoid/S
+humans
+Humbert/M
+Humberto/M
+humbleness/SM
+humble/TZGPRSDJ
+humbly
+Humboldt/M
+humbugged
+humbugging
+humbug/MS
+humdinger/MS
+humdrum/S
+Hume/M
+humeral/S
+humeri
+humerus/M
+Humfrey/M
+Humfrid/M
+Humfried/M
+humidification/MC
+humidifier/CM
+humidify/RSDCXGNZ
+humidistat/M
+humidity/MS
+humidor/MS
+humid/Y
+humiliate/SDXNG
+humiliating/Y
+humiliation/M
+humility/MS
+hummed
+Hummel/M
+hummer/SM
+humming
+hummingbird/SM
+hummock/MDSG
+hummocky
+hummus/S
+humongous
+humored/U
+humorist/MS
+humorlessness/MS
+humorless/PY
+humorousness/MS
+humorous/YP
+humor/RDMZGS
+humpback/SMD
+hump/GSMD
+humph/DG
+Humphrey/SM
+humphs
+Humpty/M
+hum/S
+humus/SM
+Humvee
+hunchback/DSM
+hunch/GMSD
+hundredfold/S
+hundred/SHRM
+hundredths
+hundredweight/SM
+Hunfredo/M
+hung/A
+Hungarian/MS
+Hungary/M
+hunger/SDMG
+Hung/M
+hungover
+hungrily
+hungriness/SM
+hungry/RTP
+hunker/DG
+hunky/RST
+hunk/ZRMS
+Hun/MS
+hunter/M
+Hunter/M
+hunt/GZJDRS
+hunting/M
+Huntington/M
+Huntlee/M
+Huntley/M
+Hunt/MR
+huntress/MS
+huntsman/M
+huntsmen
+Huntsville/M
+hurdle/JMZGRSD
+hurdler/M
+hurl/DRGZJS
+Hurlee/M
+Hurleigh/M
+hurler/M
+Hurley/M
+hurling/M
+Huron/SM
+hurray/SDG
+hurricane/MS
+hurriedness/M
+hurried/UY
+hurry/RSDG
+Hurst/M
+hurter/M
+hurtfulness/MS
+hurtful/PY
+hurting/Y
+hurtle/SDG
+hurts
+hurt/U
+Hurwitz/M
+Hus
+Husain's
+husbander/M
+husband/GSDRYM
+husbandman/M
+husbandmen
+husbandry/SM
+Husein/M
+hush/DSG
+husker/M
+huskily
+huskiness/MS
+husking/M
+husk/SGZDRM
+husky/RSPT
+hussar/MS
+Hussein/M
+Husserl/M
+hussy/SM
+hustings/M
+hustler/M
+hustle/RSDZG
+Huston/M
+Hutchins/M
+Hutchinson/M
+Hutchison/M
+hutch/MSDG
+hut/MS
+hutted
+hutting
+Hutton/M
+Hutu/M
+Huxley/M
+Huygens/M
+huzzah/GD
+huzzahs
+hwy
+Hyacintha/M
+Hyacinthe/M
+Hyacinthia/M
+Hyacinthie/M
+hyacinth/M
+Hyacinth/M
+hyacinths
+Hyades
+hyaena's
+Hyannis/M
+Hyatt/M
+hybridism/SM
+hybridization/S
+hybridize/GSD
+hybrid/MS
+Hyde/M
+Hyderabad/M
+Hydra/M
+hydra/MS
+hydrangea/SM
+hydrant/SM
+hydrate/CSDNGX
+hydrate's
+hydration/MC
+hydraulically
+hydraulicked
+hydraulicking
+hydraulic/S
+hydraulics/M
+hydrazine/M
+hydride/MS
+hydrocarbon/SM
+hydrocephali
+hydrocephalus/MS
+hydrochemistry
+hydrochloric
+hydrochloride/M
+hydrodynamical
+hydrodynamic/S
+hydrodynamics/M
+hydroelectric
+hydroelectrically
+hydroelectricity/SM
+hydrofluoric
+hydrofoil/MS
+hydrogenate/CDSGN
+hydrogenate's
+hydrogenation/MC
+hydrogenations
+hydrogen/MS
+hydrogenous
+hydrological/Y
+hydrologist/MS
+hydrology/SM
+hydrolysis/M
+hydrolyzed/U
+hydrolyze/GSD
+hydromagnetic
+hydromechanics/M
+hydrometer/SM
+hydrometry/MS
+hydrophilic
+hydrophobia/SM
+hydrophobic
+hydrophone/SM
+hydroplane/DSGM
+hydroponic/S
+hydroponics/M
+hydro/SM
+hydrosphere/MS
+hydrostatic/S
+hydrostatics/M
+hydrotherapy/SM
+hydrothermal/Y
+hydrous
+hydroxide/MS
+hydroxy
+hydroxylate/N
+hydroxyl/SM
+hydroxyzine/M
+hyena/MS
+hygiene/MS
+hygienically
+hygienic/S
+hygienics/M
+hygienist/MS
+hygrometer/SM
+hygroscopic
+hying
+Hy/M
+Hyman/M
+hymeneal/S
+Hymen/M
+hymen/MS
+Hymie/M
+hymnal/SM
+hymnbook/S
+hymn/GSDM
+Hynda/M
+hype/MZGDSR
+hyperactive/S
+hyperactivity/SM
+hyperbola/MS
+hyperbole/MS
+hyperbolic
+hyperbolically
+hyperboloidal
+hyperboloid/SM
+hypercellularity
+hypercritical/Y
+hypercube/MS
+hyperemia/M
+hyperemic
+hyperfine
+hypergamous/Y
+hypergamy/M
+hyperglycemia/MS
+hyperinflation
+Hyperion/M
+hypermarket/SM
+hypermedia/S
+hyperplane/SM
+hyperplasia/M
+hypersensitiveness/MS
+hypersensitive/P
+hypersensitivity/MS
+hypersonic
+hyperspace/M
+hypersphere/M
+hypertension/MS
+hypertensive/S
+hypertext/SM
+hyperthyroid
+hyperthyroidism/MS
+hypertrophy/MSDG
+hypervelocity
+hyperventilate/XSDGN
+hyperventilation/M
+hyphenated/U
+hyphenate/NGXSD
+hyphenation/M
+hyphen/DMGS
+hypnoses
+hypnosis/M
+hypnotherapy/SM
+hypnotically
+hypnotic/S
+hypnotism/MS
+hypnotist/SM
+hypnotize/SDG
+hypoactive
+hypoallergenic
+hypocellularity
+hypochondriac/SM
+hypochondria/MS
+hypocrisy/SM
+hypocrite/MS
+hypocritical/Y
+hypodermic/S
+hypo/DMSG
+hypoglycemia/SM
+hypoglycemic/S
+hypophyseal
+hypophysectomized
+hypotenuse/MS
+hypothalami
+hypothalamic
+hypothalamically
+hypothalamus/M
+hypothermia/SM
+hypotheses
+hypothesis/M
+hypothesizer/M
+hypothesize/ZGRSD
+hypothetic
+hypothetical/Y
+hypothyroid
+hypothyroidism/SM
+hypoxia/M
+hyssop/MS
+hysterectomy/MS
+hysteresis/M
+hysteria/SM
+hysterical/YU
+hysteric/SM
+Hyundai/M
+Hz
+i
+I
+IA
+Iaccoca/M
+Iago/M
+Iain/M
+Ia/M
+iambi
+iambic/S
+iamb/MS
+iambus/SM
+Ian/M
+Ianthe/M
+Ibadan/M
+Ibbie/M
+Ibby/M
+Iberia/M
+Iberian/MS
+Ibero/M
+ibex/MS
+ibid
+ibidem
+ibis/SM
+IBM/M
+Ibo/M
+Ibrahim/M
+Ibsen/M
+ibuprofen/S
+Icarus/M
+ICBM/S
+ICC
+iceberg/SM
+iceboat/MS
+icebound
+icebox/MS
+icebreaker/SM
+icecap/SM
+ice/GDSC
+Icelander/M
+Icelandic
+Iceland/MRZ
+Ice/M
+iceman/M
+icemen
+icepack
+icepick/S
+ice's
+Ichabod/M
+ichneumon/M
+ichthyologist/MS
+ichthyology/MS
+icicle/SM
+icily
+iciness/SM
+icing/MS
+icky/RT
+iconic
+icon/MS
+iconoclasm/MS
+iconoclastic
+iconoclast/MS
+iconography/MS
+icosahedra
+icosahedral
+icosahedron/M
+ictus/SM
+ICU
+icy/RPT
+I'd
+ID
+Idahoan/S
+Idahoes
+Idaho/MS
+Idalia/M
+Idalina/M
+Idaline/M
+Ida/M
+idealism/MS
+idealistic
+idealistically
+idealist/MS
+idealization/MS
+idealized/U
+idealize/GDRSZ
+idealizer/M
+ideal/MYS
+idealogical
+idea/SM
+ideate/SN
+ideation/M
+Idelle/M
+Idell/M
+idem
+idempotent/S
+identicalness/M
+identical/YP
+identifiability
+identifiable/U
+identifiably
+identification/M
+identified/U
+identifier/M
+identify/XZNSRDG
+identity/SM
+ideogram/MS
+ideographic
+ideograph/M
+ideographs
+ideological/Y
+ideologist/SM
+ideologue/S
+ideology/SM
+ides
+Idette/M
+idiocy/MS
+idiolect/M
+idiomatically
+idiomatic/P
+idiom/MS
+idiopathic
+idiosyncrasy/SM
+idiosyncratic
+idiosyncratically
+idiotic
+idiotically
+idiot/MS
+idleness/MS
+idle/PZTGDSR
+idler/M
+id/MY
+idolater/MS
+idolatress/S
+idolatrous
+idolatry/SM
+idolization/SM
+idolized/U
+idolizer/M
+idolize/ZGDRS
+idol/MS
+ids
+IDs
+idyllic
+idyllically
+idyll/MS
+IE
+IEEE
+Ieyasu/M
+if
+iffiness/S
+iffy/TPR
+Ifni/M
+ifs
+Iggie/M
+Iggy/M
+igloo/MS
+Ignace/M
+Ignacio/M
+Ignacius/M
+Ignatius/M
+Ignazio/M
+Ignaz/M
+igneous
+ignitable
+ignite/ASDG
+igniter/M
+ignition/MS
+ignobleness/M
+ignoble/P
+ignobly
+ignominious/Y
+ignominy/MS
+ignoramus/SM
+ignorance/MS
+ignorantness/M
+ignorant/SPY
+ignorer/M
+ignore/SRDGB
+Igor/M
+iguana/MS
+Iguassu/M
+ii
+iii
+Ijsselmeer/M
+Ike/M
+Ikey/M
+Ikhnaton/M
+ikon's
+IL
+Ilaire/M
+Ila/M
+Ilario/M
+ilea
+Ileana/M
+Ileane/M
+ileitides
+ileitis/M
+Ilene/M
+ileum/M
+ilia
+iliac
+Iliad/MS
+Ilise/M
+ilium/M
+Ilka/M
+ilk/MS
+I'll
+Illa/M
+illegality/MS
+illegal/YS
+illegibility/MS
+illegible
+illegibly
+illegitimacy/SM
+illegitimate/SDGY
+illiberality/SM
+illiberal/Y
+illicitness/MS
+illicit/YP
+illimitableness/M
+illimitable/P
+Illinoisan/MS
+Illinois/M
+illiquid
+illiteracy/MS
+illiterateness/M
+illiterate/PSY
+Ill/M
+illness/MS
+illogicality/SM
+illogicalness/M
+illogical/PY
+illogic/M
+ill/PS
+illume/DG
+illuminate/XSDVNG
+Illuminati
+illuminatingly
+illuminating/U
+illumination/M
+illumine/BGSD
+illusionary
+illusion/ES
+illusionist/MS
+illusion's
+illusiveness/M
+illusive/PY
+illusoriness/M
+illusory/P
+illustrated/U
+illustrate/VGNSDX
+illustration/M
+illustrative/Y
+illustrator/SM
+illustriousness/SM
+illustrious/PY
+illus/V
+illy
+Ilona/M
+Ilsa/M
+Ilse/M
+Ilysa/M
+Ilyse/M
+Ilyssa/M
+Ilyushin/M
+I'm
+image/DSGM
+Imagen/M
+imagery/MS
+imaginableness
+imaginable/U
+imaginably/U
+imaginariness/M
+imaginary/PS
+imagination/MS
+imaginativeness/M
+imaginative/UY
+imagined/U
+imaginer/M
+imagine/RSDJBG
+imagoes
+imago/M
+imam/MS
+imbalance/SDM
+imbecile/YMS
+imbecilic
+imbecility/MS
+imbiber/M
+imbibe/ZRSDG
+imbrication/SM
+Imbrium/M
+imbroglio/MS
+imbruing
+imbue/GDS
+Imelda/M
+IMF
+IMHO
+imitable/I
+imitate/SDVNGX
+imitation/M
+imitativeness/MS
+imitative/YP
+imitator/SM
+immaculateness/SM
+immaculate/YP
+immanence/S
+immanency/MS
+immanent/Y
+Immanuel/M
+immateriality/MS
+immaterialness/MS
+immaterial/PY
+immatureness/M
+immature/SPY
+immaturity/MS
+immeasurableness/M
+immeasurable/P
+immeasurably
+immediacy/MS
+immediateness/SM
+immediate/YP
+immemorial/Y
+immenseness/M
+immense/PRTY
+immensity/MS
+immerse/RSDXNG
+immersible
+immersion/M
+immigrant/SM
+immigrate/NGSDX
+immigration/M
+imminence/SM
+imminentness/M
+imminent/YP
+immobile
+immobility/MS
+immobilization/MS
+immobilize/DSRG
+immoderateness/M
+immoderate/NYP
+immoderation/M
+immodest/Y
+immodesty/SM
+immolate/SDNGX
+immolation/M
+immorality/MS
+immoral/Y
+immortality/SM
+immortalized/U
+immortalize/GDS
+immortal/SY
+immovability/SM
+immovableness/M
+immovable/PS
+immovably
+immune/S
+immunity/SM
+immunization/MS
+immunize/GSD
+immunoassay/M
+immunodeficiency/S
+immunodeficient
+immunologic
+immunological/Y
+immunologist/SM
+immunology/MS
+immure/GSD
+immutability/MS
+immutableness/M
+immutable/P
+immutably
+IMNSHO
+IMO
+Imogene/M
+Imogen/M
+Imojean/M
+impaction/SM
+impactor/SM
+impact/VGMRDS
+impaired/U
+impairer/M
+impair/LGRDS
+impairment/SM
+impala/MS
+impale/GLRSD
+impalement/SM
+impaler/M
+impalpable
+impalpably
+impanel/DGS
+impartation/M
+impart/GDS
+impartiality/SM
+impartial/Y
+impassableness/M
+impassable/P
+impassably
+impasse/SXBMVN
+impassibility/SM
+impassible
+impassibly
+impassion/DG
+impassioned/U
+impassiveness/MS
+impassive/YP
+impassivity/MS
+impasto/SM
+impatience/SM
+impatiens/M
+impatient/Y
+impeachable/U
+impeach/DRSZGLB
+impeacher/M
+impeachment/MS
+impeccability/SM
+impeccable/S
+impeccably
+impecuniousness/MS
+impecunious/PY
+impedance/MS
+impeded/U
+impeder/M
+impede/S
+imped/GRD
+impedimenta
+impediment/SM
+impelled
+impeller/MS
+impelling
+impel/S
+impend/DGS
+impenetrability/MS
+impenetrableness/M
+impenetrable/P
+impenetrably
+impenitence/MS
+impenitent/YS
+imperativeness/M
+imperative/PSY
+imperceivable
+imperceptibility/MS
+imperceptible
+imperceptibly
+imperceptive
+imperf
+imperfectability
+imperfection/MS
+imperfectness/SM
+imperfect/YSVP
+imperialism/MS
+imperialistic
+imperialistically
+imperialist/SM
+imperial/YS
+imperil/GSLD
+imperilment/SM
+imperiousness/MS
+imperious/YP
+imperishableness/M
+imperishable/SP
+imperishably
+impermanence/MS
+impermanent/Y
+impermeability/SM
+impermeableness/M
+impermeable/P
+impermeably
+impermissible
+impersonality/M
+impersonalized
+impersonal/Y
+impersonate/XGNDS
+impersonation/M
+impersonator/SM
+impertinence/SM
+impertinent/YS
+imperturbability/SM
+imperturbable
+imperturbably
+imperviousness/M
+impervious/PY
+impetigo/MS
+impetuosity/MS
+impetuousness/MS
+impetuous/YP
+impetus/MS
+impiety/MS
+impinge/LS
+impingement/MS
+imping/GD
+impiousness/SM
+impious/PY
+impishness/MS
+impish/YP
+implacability/SM
+implacableness/M
+implacable/P
+implacably
+implantation/SM
+implant/BGSDR
+implanter/M
+implausibility/MS
+implausible
+implausibly
+implementability
+implementable/U
+implementation/A
+implementations
+implementation's
+implemented/AU
+implementer/M
+implementing/A
+implementor/MS
+implement/SMRDGZB
+implicant/SM
+implicate/VGSD
+implication/M
+implicative/PY
+implicitness/SM
+implicit/YP
+implied/Y
+implode/GSD
+implore/GSD
+imploring/Y
+implosion/SM
+implosive/S
+imply/GNSDX
+impoliteness/MS
+impolite/YP
+impoliticness/M
+impolitic/PY
+imponderableness/M
+imponderable/PS
+importance/SM
+important/Y
+importation/MS
+importer/M
+importing/A
+import/SZGBRD
+importunateness/M
+importunate/PYGDS
+importuner/M
+importune/SRDZYG
+importunity/SM
+imposable
+impose/ASDG
+imposer/SM
+imposingly
+imposing/U
+imposition/SM
+impossibility/SM
+impossibleness/M
+impossible/PS
+impossibly
+imposter's
+impostor/SM
+impost/SGMD
+imposture/SM
+impotence/MS
+impotency/S
+impotent/SY
+impound/GDS
+impoundments
+impoverisher/M
+impoverish/LGDRS
+impoverishment/SM
+impracticableness/M
+impracticable/P
+impracticably
+impracticality/SM
+impracticalness/M
+impractical/PY
+imprecate/NGXSD
+imprecation/M
+impreciseness/MS
+imprecise/PYXN
+imprecision/M
+impregnability/MS
+impregnableness/M
+impregnable/P
+impregnably
+impregnate/DSXNG
+impregnation/M
+impresario/SM
+impress/DRSGVL
+impressed/U
+impresser/M
+impressibility/MS
+impressible
+impressionability/SM
+impressionableness/M
+impressionable/P
+impression/BMS
+impressionism/SM
+impressionistic
+impressionist/MS
+impressiveness/MS
+impressive/YP
+impressment/M
+imprimatur/SM
+imprinter/M
+imprinting/M
+imprint/SZDRGM
+imprison/GLDS
+imprisonment/MS
+improbability/MS
+improbableness/M
+improbable/P
+improbably
+impromptu/S
+improperness/M
+improper/PY
+impropitious
+impropriety/SM
+improved/U
+improvement/MS
+improver/M
+improve/SRDGBL
+improvidence/SM
+improvident/Y
+improvisational
+improvisation/MS
+improvisatory
+improviser/M
+improvise/RSDZG
+imprudence/SM
+imprudent/Y
+imp/SGMDRY
+impudence/MS
+impudent/Y
+impugner/M
+impugn/SRDZGB
+impulse/XMVGNSD
+impulsion/M
+impulsiveness/MS
+impulsive/YP
+impunity/SM
+impureness/M
+impure/RPTY
+impurity/MS
+imputation/SM
+impute/SDBG
+Imus/M
+IN
+inaction
+inactive
+inadequate/S
+inadvertence/MS
+inadvertent/Y
+inalienability/MS
+inalienably
+inalterableness/M
+inalterable/P
+Ina/M
+inamorata/MS
+inane/SRPYT
+inanimateness/S
+inanimate/P
+inanity/MS
+inappeasable
+inappropriate/P
+inarticulate/P
+in/AS
+inasmuch
+inaugural/S
+inaugurate/XSDNG
+inauguration/M
+inauthenticity
+inbound/G
+inbred/S
+inbreed/JG
+incalculableness/M
+incalculably
+incandescence/SM
+incandescent/YS
+incant
+incantation/SM
+incantatory
+incapable/S
+incapacitate/GNSD
+incapacitation/M
+incarcerate/XGNDS
+incarceration/M
+incarnadine/GDS
+incarnate/AGSDNX
+incarnation/AM
+Inca/SM
+incendiary/S
+incense/MGDS
+incentive/ESM
+incentively
+incept/DGVS
+inception/MS
+inceptive/Y
+inceptor/M
+incessant/Y
+incest/SM
+incestuousness/MS
+incestuous/PY
+inch/GMDS
+inchoate/DSG
+Inchon/M
+inchworm/MS
+incidence/MS
+incidental/YS
+incident/SM
+incinerate/XNGSD
+incineration/M
+incinerator/SM
+incipience/SM
+incipiency/M
+incipient/Y
+incise/SDVGNX
+incision/M
+incisiveness/MS
+incisive/YP
+incisor/MS
+incitement/MS
+inciter/M
+incite/RZL
+incl
+inclination/ESM
+incline/EGSD
+incliner/M
+inclining/M
+include/GDS
+inclusion/MS
+inclusiveness/MS
+inclusive/PY
+Inc/M
+incognito/S
+incoherency/M
+income/M
+incommode/DG
+incommunicado
+incomparable
+incompetent/MS
+incomplete/P
+inconceivability/MS
+inconceivableness/M
+inconceivable/P
+incondensable
+incongruousness/S
+inconsiderableness/M
+inconsiderable/P
+inconsistence
+inconsolableness/M
+inconsolable/P
+inconsolably
+incontestability/SM
+incontestably
+incontrovertibly
+inconvenience/DG
+inconvertibility
+inconvertible
+incorporable
+incorporated/UE
+incorporate/GASDXN
+incorrect/P
+incorrigibility/MS
+incorrigibleness/M
+incorrigible/SP
+incorrigibly
+incorruptible/S
+incorruptibly
+increase/JB
+increaser/M
+increasing/Y
+incredibleness/M
+incredible/P
+incremental/Y
+incrementation
+increment/DMGS
+incriminate/XNGSD
+incrimination/M
+incriminatory
+incrustation/SM
+inc/T
+incubate/XNGVDS
+incubation/M
+incubator/MS
+incubus/MS
+inculcate/SDGNX
+inculcation/M
+inculpate/SDG
+incumbency/MS
+incumbent/S
+incunabula
+incunabulum
+incurable/S
+incurious
+incursion/SM
+ind
+indebtedness/SM
+indebted/P
+indefatigableness/M
+indefatigable/P
+indefatigably
+indefeasible
+indefeasibly
+indefinableness/M
+indefinable/PS
+indefinite/S
+indelible
+indelibly
+indemnification/M
+indemnify/NXSDG
+indemnity/SM
+indentation/SM
+indented/U
+indenter/M
+indention/SM
+indent/R
+indenture/DG
+Independence/M
+indescribableness/M
+indescribable/PS
+indescribably
+indestructibleness/M
+indestructible/P
+indestructibly
+indeterminably
+indeterminacy/MS
+indeterminism
+indexation/S
+indexer/M
+index/MRDZGB
+India/M
+Indiana/M
+Indianan/S
+Indianapolis/M
+Indianian/S
+Indian/SM
+indicant/MS
+indicate/DSNGVX
+indication/M
+indicative/SY
+indicator/MS
+indices's
+indicter/M
+indictment/SM
+indict/SGLBDR
+indifference
+indigence/MS
+indigenousness/M
+indigenous/YP
+indigent/SY
+indigestible/S
+indignant/Y
+indignation/MS
+indigo/SM
+Indira/M
+indirect/PG
+indiscreet/P
+indiscriminateness/M
+indiscriminate/PY
+indispensability/MS
+indispensableness/M
+indispensable/SP
+indispensably
+indisputableness/M
+indisputable/P
+indissolubleness/M
+indissoluble/P
+indissolubly
+indistinguishableness/M
+indistinguishable/P
+indite/SDG
+indium/SM
+individualism/MS
+individualistic
+individualistically
+individualist/MS
+individuality/MS
+individualization/SM
+individualize/DRSGZ
+individualized/U
+individualizer/M
+individualizes/U
+individualizing/Y
+individual/YMS
+individuate/DSXGN
+individuation/M
+indivisibleness/M
+indivisible/SP
+indivisibly
+Ind/M
+Indochina/M
+Indochinese
+indoctrinate/GNXSD
+indoctrination/M
+indoctrinator/SM
+indolence/SM
+indolent/Y
+indomitableness/M
+indomitable/P
+indomitably
+Indonesia/M
+Indonesian/S
+indoor
+Indore/M
+Indra/M
+indubitableness/M
+indubitable/P
+indubitably
+inducement/MS
+inducer/M
+induce/ZGLSRD
+inducible
+inductance/MS
+inductee/SM
+induct/GV
+induction/SM
+inductiveness/M
+inductive/PY
+inductor/MS
+indulge/GDRS
+indulgence/SDGM
+indulgent/Y
+indulger/M
+Indus/M
+industrialism/MS
+industrialist/MS
+industrialization/MS
+industrialized/U
+industrialize/SDG
+industrial/SY
+industriousness/SM
+industrious/YP
+industry/SM
+Indy/SM
+inebriate/NGSDX
+inebriation/M
+inedible
+ineducable
+ineffability/MS
+ineffableness/M
+ineffable/P
+ineffably
+inelastic
+ineligibly
+ineluctable
+ineluctably
+ineptitude/SM
+ineptness/MS
+inept/YP
+inequivalent
+inerrant
+inertial/Y
+inertia/SM
+inertness/MS
+inert/SPY
+Ines
+inescapably
+Inesita/M
+Inessa/M
+inestimably
+inevitability/MS
+inevitableness/M
+inevitable/P
+inevitably
+inexact/P
+inexhaustibleness/M
+inexhaustible/P
+inexhaustibly
+inexorability/M
+inexorableness/M
+inexorable/P
+inexorably
+inexpedience/M
+inexplicableness/M
+inexplicable/P
+inexplicably
+inexplicit
+inexpressibility/M
+inexpressibleness/M
+inexpressible/PS
+inextricably
+Inez/M
+infamous
+infamy/SM
+infancy/M
+infanticide/MS
+infantile
+infant/MS
+infantryman/M
+infantrymen
+infantry/SM
+infarction/SM
+infarct/SM
+infatuate/XNGSD
+infatuation/M
+infauna
+infected/U
+infecter
+infect/ESGDA
+infection/EASM
+infectiousness/MS
+infectious/PY
+infective
+infer/B
+inference/GMSR
+inferential/Y
+inferiority/MS
+inferior/SMY
+infernal/Y
+inferno/MS
+inferred
+inferring
+infertile
+infestation/MS
+infester/M
+infest/GSDR
+infidel/SM
+infighting/M
+infill/MG
+infiltrate/V
+infiltrator/MS
+infinitesimal/SY
+infinite/V
+infinitival
+infinitive/YMS
+infinitude/MS
+infinitum
+infinity/SM
+infirmary/SM
+infirmity/SM
+infix/M
+inflammableness/M
+inflammable/P
+inflammation/MS
+inflammatory
+inflatable/MS
+inflate/NGBDRSX
+inflater/M
+inflationary
+inflation/ESM
+inflect/GVDS
+inflectional/Y
+inflection/SM
+inflexibleness/M
+inflexible/P
+inflexion/SM
+inflict/DRSGV
+inflicter/M
+infliction/SM
+inflow/M
+influenced/U
+influencer/M
+influence/SRDGM
+influent
+influential/SY
+influenza/MS
+infomercial/S
+Informatica/M
+informatics
+informational
+information/ES
+informativeness/S
+informative/UY
+informatory
+informed/U
+informer/M
+info/SM
+infotainment/S
+infra
+infrared/SM
+infrasonic
+infrastructural
+infrastructure/MS
+infrequence/S
+infringe/LR
+infringement/SM
+infringer/M
+infuriate/GNYSD
+infuriating/Y
+infuriation/M
+infuser/M
+infuse/RZ
+infusibleness/M
+infusible/P
+inf/ZT
+Ingaberg/M
+Ingaborg/M
+Inga/M
+Ingamar/M
+Ingar/M
+Ingeberg/M
+Ingeborg/M
+Ingelbert/M
+Ingemar/M
+ingeniousness/MS
+ingenious/YP
+ingnue/S
+ingenuity/SM
+ingenuous/EY
+ingenuousness/MS
+Inger/M
+Inge/RM
+Ingersoll/M
+ingest/DGVS
+ingestible
+ingestion/SM
+Inglebert/M
+inglenook/MS
+Inglewood/M
+Inglis/M
+Ingmar/M
+ingoing
+ingot/SMDG
+ingrained/Y
+Ingra/M
+Ingram/M
+ingrate/M
+ingratiate/DSGNX
+ingratiating/Y
+ingratiation/M
+ingredient/SM
+Ingres/M
+ingression/M
+ingress/MS
+Ingrid/M
+Ingrim/M
+ingrown/P
+inguinal
+Ingunna/M
+inhabitable/U
+inhabitance
+inhabited/U
+inhabiter/M
+inhabit/R
+inhalant/S
+inhalation/SM
+inhalator/SM
+inhale/Z
+inhere/DG
+inherent/Y
+inheritableness/M
+inheritable/P
+inheritance/EMS
+inherit/BDSG
+inherited/E
+inheriting/E
+inheritor/S
+inheritress/MS
+inheritrix/MS
+inherits/E
+inhibit/DVGS
+inhibited/U
+inhibiter's
+inhibition/MS
+inhibitor/MS
+inhibitory
+inhomogeneous
+inhospitableness/M
+inhospitable/P
+inhospitality
+Inigo/M
+inimical/Y
+inimitableness/M
+inimitable/P
+inimitably
+inion
+iniquitousness/M
+iniquitous/PY
+iniquity/MS
+initialer/M
+initial/GSPRDY
+initialization/A
+initializations
+initialization's
+initialize/ASDG
+initialized/U
+initializer/S
+initiates
+initiate/UD
+initiating
+initiation/SM
+initiative/SM
+initiator/MS
+initiatory
+injectable/U
+inject/GVSDB
+injection/MS
+injector/SM
+injunctive
+injured/U
+injurer/M
+injure/SRDZG
+injuriousness/M
+injurious/YP
+inkblot/SM
+inker/M
+inkiness/MS
+inkling/SM
+inkstand/SM
+inkwell/SM
+inky/TP
+ink/ZDRJ
+inland
+inlander/M
+inlay/RG
+inletting
+inly/G
+inmost
+Inna/M
+innards
+innateness/SM
+innate/YP
+innermost/S
+innersole/S
+innerspring
+innervate/GNSDX
+innervation/M
+inner/Y
+inning/M
+Innis/M
+innkeeper/MS
+innocence/SM
+Innocent/M
+innocent/SYRT
+innocuousness/MS
+innocuous/PY
+innovate/SDVNGX
+innovation/M
+innovative/P
+innovator/MS
+innovatory
+Innsbruck/M
+innuendo/MDGS
+innumerability/M
+innumerableness/M
+innumerable/P
+innumerably
+innumerate
+inn/ZGDRSJ
+inoculate/ASDG
+inoculation/MS
+inoculative
+inoffensive/P
+Inonu/M
+inopportuneness/M
+inopportune/P
+inordinateness/M
+inordinate/PY
+inorganic
+inpatient
+In/PM
+input/MRDG
+inquirer/M
+inquire/ZR
+inquiring/Y
+inquiry/MS
+inquisitional
+inquisition/MS
+Inquisition/MS
+inquisitiveness/MS
+inquisitive/YP
+inquisitorial/Y
+inquisitor/MS
+INRI
+inrush/M
+ins
+INS
+insalubrious
+insanitary
+insatiability/MS
+insatiableness/M
+insatiable/P
+insatiably
+inscribe/Z
+inscription/SM
+inscrutability/SM
+inscrutableness/SM
+inscrutable/P
+inscrutably
+inseam
+insecticidal
+insecticide/MS
+insectivore/SM
+insectivorous
+insecureness/M
+insecure/P
+inseminate/NGXSD
+insemination/M
+insensateness/M
+insensate/P
+insensible/P
+insentient
+inseparable/S
+insert/ADSG
+inserter/M
+insertion/AMS
+insetting
+inshore
+insider/M
+inside/Z
+insidiousness/MS
+insidious/YP
+insightful/Y
+insigne's
+insignia/SM
+insignificant
+insinuate/VNGXSD
+insinuating/Y
+insinuation/M
+insinuator/SM
+insipidity/MS
+insipid/Y
+insistence/SM
+insistent/Y
+insisting/Y
+insist/SGD
+insociable
+insofar
+insole/M
+insolence/SM
+insolent/YS
+insolubleness/M
+insoluble/P
+insolubly
+insomniac/S
+insomnia/MS
+insomuch
+insouciance/SM
+insouciant/Y
+inspect/AGSD
+inspection/SM
+inspective
+inspectorate/MS
+inspector/SM
+inspirational/Y
+inspiration/MS
+inspired/U
+inspire/R
+inspirer/M
+inspiring/U
+inspirit/DG
+Inst
+installable
+install/ADRSG
+installation/SM
+installer/MS
+installment/MS
+instance/GD
+instantaneousness/M
+instantaneous/PY
+instantiated/U
+instantiate/SDXNG
+instantiation/M
+instant/SRYMP
+instate/AGSD
+inst/B
+instead
+instigate/XSDVGN
+instigation/M
+instigator/SM
+instillation/SM
+instinctive/Y
+instinctual
+instinct/VMS
+instituter/M
+institutes/M
+institute/ZXVGNSRD
+institutionalism/M
+institutionalist/M
+institutionalization/SM
+institutionalize/GDS
+institutional/Y
+institution/AM
+institutor's
+instr
+instruct/DSVG
+instructed/U
+instructional
+instruction/MS
+instructiveness/M
+instructive/PY
+instructor/MS
+instrumentalist/MS
+instrumentality/SM
+instrumental/SY
+instrumentation/SM
+instrument/GMDS
+insubordinate
+insubstantial
+insufferable
+insufferably
+insularity/MS
+insular/YS
+insulate/DSXNG
+insulated/U
+insulation/M
+insulator/MS
+insulin/MS
+insult/DRSG
+insulter/M
+insulting/Y
+insuperable
+insuperably
+insupportableness/M
+insupportable/P
+insurance/MS
+insurance's/A
+insure/BZGS
+insured/S
+insurer/M
+insurgence/SM
+insurgency/MS
+insurgent/MS
+insurmountably
+insurrectionist/SM
+insurrection/SM
+intactness/M
+intact/P
+intaglio/GMDS
+intake/M
+intangible/M
+integer/MS
+integrability/M
+integrable
+integral/SYM
+integrand/MS
+integrate/AGNXEDS
+integration/EMA
+integrative/E
+integrator/MS
+integrity/SM
+integument/SM
+intellective/Y
+intellect/MVS
+intellectualism/MS
+intellectuality/M
+intellectualize/GSD
+intellectualness/M
+intellectual/YPS
+intelligence/MSR
+intelligencer/M
+intelligentsia/MS
+intelligent/UY
+intelligibilities
+intelligibility/UM
+intelligibleness/MU
+intelligible/PU
+intelligibly/U
+Intel/M
+Intelsat/M
+intemperate/P
+intendant/MS
+intendedness/M
+intended/SYP
+intender/M
+intensification/M
+intensifier/M
+intensify/GXNZRSD
+intensional/Y
+intensiveness/MS
+intensive/PSY
+intentionality/M
+intentional/UY
+intention/SDM
+intentness/SM
+intent/YP
+interaction/MS
+interactive/PY
+interactivity
+interact/VGDS
+interaxial
+interbank
+interbred
+interbreed/GS
+intercalate/GNVDS
+intercalation/M
+intercase
+intercaste
+interceder/M
+intercede/SRDG
+intercensal
+intercept/DGS
+interception/MS
+interceptor/MS
+intercession/MS
+intercessor/SM
+intercessory
+interchangeability/M
+interchangeableness/M
+interchangeable/P
+interchangeably
+interchange/DSRGJ
+interchanger/M
+intercity
+interclass
+intercohort
+intercollegiate
+intercommunicate/SDXNG
+intercommunication/M
+intercom/SM
+interconnectedness/M
+interconnected/P
+interconnect/GDS
+interconnection/SM
+interconnectivity
+intercontinental
+interconversion/M
+intercorrelated
+intercourse/SM
+Interdata/M
+interdenominational
+interdepartmental/Y
+interdependence/MS
+interdependency/SM
+interdependent/Y
+interdiction/MS
+interdict/MDVGS
+interdisciplinary
+interested/UYE
+interest/GEMDS
+interestingly/U
+interestingness/M
+interesting/YP
+inter/ESTL
+interface/SRDGM
+interfacing/M
+interfaith
+interference/MS
+interferer/M
+interfere/SRDG
+interfering/Y
+interferometer/SM
+interferometric
+interferometry/M
+interferon/MS
+interfile/GSD
+intergalactic
+intergenerational
+intergeneration/M
+interglacial
+intergovernmental
+intergroup
+interim/S
+interindex
+interindustry
+interior/SMY
+interj
+interject/GDS
+interjectional
+interjection/MS
+interlace/GSD
+interlard/SGD
+interlayer/G
+interleave/SDG
+interleukin/S
+interlibrary
+interlinear/S
+interline/JGSD
+interlingual
+interlingua/M
+interlining/M
+interlink/GDS
+interlisp/M
+interlobular
+interlocker/M
+interlock/RDSG
+interlocutor/MS
+interlocutory
+interlope/GZSRD
+interloper/M
+interlude/MSDG
+intermarriage/MS
+intermarry/GDS
+intermediary/MS
+intermediateness/M
+intermediate/YMNGSDP
+intermediation/M
+interment/SME
+intermeshed
+intermetrics
+intermezzi
+intermezzo/SM
+interminably
+intermingle/DSG
+intermission/MS
+intermittent/Y
+intermix/GSRD
+intermodule
+intermolecular/Y
+internalization/SM
+internalize/GDS
+internal/SY
+Internationale/M
+internationalism/SM
+internationalist/SM
+internationality/M
+internationalization/MS
+internationalize/DSG
+international/YS
+internecine
+internee/SM
+interne's
+Internet/M
+INTERNET/M
+internetwork
+internist/SM
+intern/L
+internment/SM
+internship/MS
+internuclear
+interocular
+interoffice
+interoperability
+interpenetrates
+interpersonal/Y
+interplanetary
+interplay/GSMD
+interpol
+interpolate/XGNVBDS
+interpolation/M
+Interpol/M
+interpose/GSRD
+interposer/M
+interposition/MS
+interpretable/U
+interpret/AGSD
+interpretation/MSA
+interpretative/Y
+interpreted/U
+interpreter/SM
+interpretive/Y
+interpretor/S
+interprocess
+interprocessor
+interquartile
+interracial
+interred/E
+interregional
+interregnum/MS
+interrelatedness/M
+interrelated/PY
+interrelate/GNDSX
+interrelation/M
+interrelationship/SM
+interring/E
+interrogate/DSXGNV
+interrogation/M
+interrogative/SY
+interrogator/SM
+interrogatory/S
+interrupted/U
+interrupter/M
+interruptibility
+interruptible
+interruption/MS
+interrupt/VGZRDS
+interscholastic
+intersect/GDS
+intersection/MS
+intersession/MS
+interspecies
+intersperse/GNDSX
+interspersion/M
+interstage
+interstate/S
+interstellar
+interstice/SM
+interstitial/SY
+intersurvey
+intertask
+intertwine/GSD
+interurban/S
+interval/MS
+intervene/GSRD
+intervener/M
+intervenor/M
+interventionism/MS
+interventionist/S
+intervention/MS
+interview/AMD
+interviewed/U
+interviewee/SM
+interviewer/SM
+interviewing
+interviews
+intervocalic
+interweave/GS
+interwove
+interwoven
+intestacy/SM
+intestinal/Y
+intestine/SM
+inti
+intifada
+intimacy/SM
+intimal
+intimateness/M
+intimater/M
+intimate/XYNGPDRS
+intimation/M
+intimidate/SDXNG
+intimidating/Y
+intimidation/M
+into
+intolerableness/M
+intolerable/P
+intolerant/PS
+intonate/NX
+intonation/M
+intoxicant/MS
+intoxicate/DSGNX
+intoxicated/Y
+intoxication/M
+intra
+intracellular
+intracity
+intraclass
+intracohort
+intractability/M
+intractableness/M
+intractable/P
+intradepartmental
+intrafamily
+intragenerational
+intraindustry
+intraline
+intrametropolitan
+intramural/Y
+intramuscular/Y
+intranasal
+intransigence/MS
+intransigent/YS
+intransitive/S
+intraoffice
+intraprocess
+intrapulmonary
+intraregional
+intrasectoral
+intrastate
+intratissue
+intrauterine
+intravenous/YS
+intrepidity/SM
+intrepidness/M
+intrepid/YP
+intricacy/SM
+intricateness/M
+intricate/PY
+intrigue/DRSZG
+intriguer/M
+intriguing/Y
+intrinsically
+intrinsic/S
+introduce/ADSG
+introducer/M
+introduction/ASM
+introductory
+introit/SM
+introject/SD
+intro/S
+introspection/MS
+introspectiveness/M
+introspective/YP
+introspect/SGVD
+introversion/SM
+introvert/SMDG
+intruder/M
+intrude/ZGDSR
+intrusion/SM
+intrusiveness/MS
+intrusive/SYP
+intubate/NGDS
+intubation/M
+intuit/GVDSB
+intuitionist/M
+intuitiveness/MS
+intuitive/YP
+int/ZR
+Inuit/MS
+inundate/SXNG
+inundation/M
+inure/GDS
+invader/M
+invade/ZSRDG
+invalid/GSDM
+invalidism/MS
+invariable/P
+invariant/M
+invasion/SM
+invasive/P
+invectiveness/M
+invective/PSMY
+inveigh/DRG
+inveigher/M
+inveighs
+inveigle/DRSZG
+inveigler/M
+invent/ADGS
+invented/U
+invention/ASM
+inventiveness/MS
+inventive/YP
+inventor/MS
+inventory/SDMG
+Inverness/M
+inverse/YV
+inverter/M
+invertible
+invert/ZSGDR
+invest/ADSLG
+investigate/XDSNGV
+investigation/MA
+investigator/MS
+investigatory
+investiture/SM
+investment/ESA
+investment's/A
+investor/SM
+inveteracy/MS
+inveterate/Y
+inviability
+invidiousness/MS
+invidious/YP
+invigilate/GD
+invigilator/SM
+invigorate/ANGSD
+invigorating/Y
+invigoration/AM
+invigorations
+invincibility/SM
+invincibleness/M
+invincible/P
+invincibly
+inviolability/MS
+inviolably
+inviolateness/M
+inviolate/YP
+inviscid
+invisibleness/M
+invisible/S
+invitational/S
+invitation/MS
+invited/U
+invitee/S
+inviter/M
+invite/SRDG
+inviting/Y
+invocable
+invocate
+invoked/A
+invoke/GSRDBZ
+invoker/M
+invokes/A
+involuntariness/S
+involuntary/P
+involute/XYN
+involution/M
+involutorial
+involvedly
+involved/U
+involve/GDSRL
+involvement/SM
+involver/M
+invulnerability/M
+invulnerableness/M
+inwardness/M
+inward/PY
+ioctl
+iodate/MGND
+iodation/M
+iodide/MS
+iodinate/DNG
+iodine/MS
+iodize/GSD
+Iolande/M
+Iolanthe/M
+Io/M
+Iona/M
+Ionesco/M
+Ionian/M
+ionic/S
+Ionic/S
+ionization's
+ionization/SU
+ionized/UC
+ionize/GNSRDJXZ
+ionizer's
+ionizer/US
+ionizes/U
+ionizing/U
+ionosphere/SM
+ionospheric
+ion's/I
+ion/SMU
+Iorgo/MS
+Iormina/M
+Iosep/M
+iota/SM
+IOU
+Iowan/S
+Iowa/SM
+IPA
+ipecac/MS
+Iphigenia/M
+ipso
+Ipswich/M
+IQ
+Iqbal/M
+Iquitos/M
+Ira/M
+Iranian/MS
+Iran/M
+Iraqi/SM
+Iraq/M
+IRA/S
+irascibility/SM
+irascible
+irascibly
+irateness/S
+irate/RPYT
+ireful
+Ireland/M
+ire/MGDS
+Irena/M
+Irene/M
+irenic/S
+iridescence/SM
+iridescent/Y
+irides/M
+iridium/MS
+irids
+Irina/M
+Iris
+iris/GDSM
+Irishman/M
+Irishmen
+Irish/R
+Irishwoman/M
+Irishwomen
+Irita/M
+irk/GDS
+irksomeness/SM
+irksome/YP
+Irkutsk/M
+Ir/M
+Irma/M
+ironclad/S
+iron/DRMPSGJ
+ironer/M
+ironic
+ironicalness/M
+ironical/YP
+ironing/M
+ironmonger/M
+ironmongery/M
+ironside/MS
+ironstone/MS
+ironware/SM
+ironwood/SM
+ironworker/M
+ironwork/MRS
+irony/SM
+Iroquoian/MS
+Iroquois/M
+irradiate/XSDVNG
+irradiation/M
+irrationality/MS
+irrationalness/M
+irrational/YSP
+Irrawaddy/M
+irreclaimable
+irreconcilability/MS
+irreconcilableness/M
+irreconcilable/PS
+irreconcilably
+irrecoverableness/M
+irrecoverable/P
+irrecoverably
+irredeemable/S
+irredeemably
+irredentism/M
+irredentist/M
+irreducibility/M
+irreducible
+irreducibly
+irreflexive
+irrefutable
+irrefutably
+irregardless
+irregularity/SM
+irregular/YS
+irrelevance/SM
+irrelevancy/MS
+irrelevant/Y
+irreligious
+irremediableness/M
+irremediable/P
+irremediably
+irremovable
+irreparableness/M
+irreparable/P
+irreparably
+irreplaceable/P
+irrepressible
+irrepressibly
+irreproachableness/M
+irreproachable/P
+irreproachably
+irreproducibility
+irreproducible
+irresistibility/M
+irresistibleness/M
+irresistible/P
+irresistibly
+irresoluteness/SM
+irresolute/PNXY
+irresolution/M
+irresolvable
+irrespective/Y
+irresponsibility/SM
+irresponsibleness/M
+irresponsible/PS
+irresponsibly
+irretrievable
+irretrievably
+irreverence/MS
+irreverent/Y
+irreversible
+irreversibly
+irrevocableness/M
+irrevocable/P
+irrevocably
+irrigable
+irrigate/DSXNG
+irrigation/M
+irritability/MS
+irritableness/M
+irritable/P
+irritably
+irritant/S
+irritate/DSXNGV
+irritated/Y
+irritating/Y
+irritation/M
+irrupt/GVSD
+irruption/SM
+IRS
+Irtish/M
+Irvine/M
+Irving/M
+Irvin/M
+Irv/MG
+Irwin/M
+Irwinn/M
+is
+i's
+Isaac/SM
+Isaak/M
+Isabelita/M
+Isabella/M
+Isabelle/M
+Isabel/M
+Isacco/M
+Isac/M
+Isadora/M
+Isadore/M
+Isador/M
+Isahella/M
+Isaiah/M
+Isak/M
+Isa/M
+ISBN
+Iscariot/M
+Iseabal/M
+Isfahan/M
+Isherwood/M
+Ishim/M
+Ishmael/M
+Ishtar/M
+Isiahi/M
+Isiah/M
+Isidora/M
+Isidore/M
+Isidor/M
+Isidoro/M
+Isidro/M
+isinglass/MS
+Isis/M
+Islamabad/M
+Islamic/S
+Islam/SM
+islander/M
+island/GZMRDS
+Islandia/M
+isle/MS
+islet/SM
+isl/GD
+Ismael/M
+ism/MCS
+isn't
+ISO
+isobaric
+isobar/MS
+Isobel/M
+isochronal/Y
+isochronous/Y
+isocline/M
+isocyanate/M
+isodine
+isolate/SDXNG
+isolationism/SM
+isolationistic
+isolationist/SM
+isolation/M
+isolator/MS
+Isolde/M
+isomeric
+isomerism/SM
+isomer/SM
+isometrically
+isometric/S
+isometrics/M
+isomorphic
+isomorphically
+isomorphism/MS
+isomorph/M
+isoperimetrical
+isopleth/M
+isopleths
+isosceles
+isostatic
+isothermal/Y
+isotherm/MS
+isotonic
+isotope/SM
+isotopic
+isotropic
+isotropically
+isotropy/M
+Ispahan's
+ispell/M
+Ispell/M
+Israeli/MS
+Israelite/SM
+Israel/MS
+Issac/M
+Issiah/M
+Issie/M
+Issi/M
+issuable
+issuance/MS
+issuant
+issued/A
+issue/GMZDSR
+issuer/AMS
+issues/A
+issuing/A
+Issy/M
+Istanbul/M
+isthmian/S
+isthmus/SM
+Istvan/M
+Isuzu/M
+It
+IT
+Itaipu/M
+ital
+Italianate/GSD
+Italian/MS
+italicization/MS
+italicized/U
+italicize/GSD
+italic/S
+Ital/M
+Italy/M
+Itasca/M
+itch/GMDS
+itchiness/MS
+Itch/M
+itchy/RTP
+ITcorp/M
+ITCorp/M
+it'd
+Itel/M
+itemization/SM
+itemized/U
+itemize/GZDRS
+itemizer/M
+itemizes/A
+item/MDSG
+iterate/ASDXVGN
+iteration/M
+iterative/YA
+iterator/MS
+Ithaca/M
+Ithacan
+itinerant/SY
+itinerary/MS
+it'll
+it/MUS
+Ito/M
+its
+itself
+ITT
+IUD/S
+IV
+Iva/M
+Ivanhoe/M
+Ivan/M
+Ivar/M
+I've
+Ive/MRS
+Iver/M
+Ivette/M
+Ivett/M
+Ivie/M
+iv/M
+Ivonne/M
+Ivor/M
+Ivory/M
+ivory/SM
+IVs
+Ivy/M
+ivy/MDS
+ix
+Izaak/M
+Izabel/M
+Izak/M
+Izanagi/M
+Izanami/M
+Izhevsk/M
+Izmir/M
+Izvestia/M
+Izzy/M
+jabbed
+jabberer/M
+jabber/JRDSZG
+jabbing
+Jabez/M
+Jablonsky/M
+jabot/MS
+jab/SM
+jacaranda/MS
+Jacenta/M
+Jacinda/M
+Jacinta/M
+Jacintha/M
+Jacinthe/M
+jackal/SM
+jackass/SM
+jackboot/DMS
+jackdaw/SM
+Jackelyn/M
+jacketed/U
+jacket/GSMD
+jack/GDRMS
+jackhammer/MDGS
+Jackie/M
+Jacki/M
+jackknife/MGSD
+jackknives
+Jacklin/M
+Jacklyn/M
+Jack/M
+Jackman/M
+jackpot/MS
+Jackqueline/M
+Jackquelin/M
+jackrabbit/DGS
+Jacksonian
+Jackson/SM
+Jacksonville/M
+jackstraw/MS
+Jacky/M
+Jaclin/M
+Jaclyn/M
+Jacobean
+Jacobian/M
+Jacobi/M
+Jacobin/M
+Jacobite/M
+Jacobo/M
+Jacobsen/M
+Jacob/SM
+Jacobs/N
+Jacobson/M
+Jacobus
+Jacoby/M
+jacquard/MS
+Jacquard/SM
+Jacqueline/M
+Jacquelin/M
+Jacquelyn/M
+Jacquelynn/M
+Jacquenetta/M
+Jacquenette/M
+Jacques/M
+Jacquetta/M
+Jacquette/M
+Jacquie/M
+Jacqui/M
+jacuzzi
+Jacuzzi/S
+Jacynth/M
+Jada/M
+jadedness/SM
+jaded/PY
+jadeite/SM
+Jade/M
+jade/MGDS
+Jaeger/M
+Jae/M
+jaggedness/SM
+jagged/RYTP
+Jagger/M
+jaggers
+jagging
+jag/S
+jaguar/MS
+jailbird/MS
+jailbreak/SM
+jailer/M
+jail/GZSMDR
+Jaime/M
+Jaimie/M
+Jaine/M
+Jainism/M
+Jain/M
+Jaipur/M
+Jakarta/M
+Jake/MS
+Jakie/M
+Jakob/M
+jalapeo/S
+jalopy/SM
+jalousie/MS
+Jamaal/M
+Jamaica/M
+Jamaican/S
+Jamal/M
+Jamar/M
+jambalaya/MS
+jamb/DMGS
+jamboree/MS
+Jamel/M
+Jame/MS
+Jameson/M
+Jamestown/M
+Jamesy/M
+Jamey/M
+Jamie/M
+Jamill/M
+Jamil/M
+Jami/M
+Jamima/M
+Jamison/M
+Jammal/M
+jammed/U
+Jammie/M
+jamming/U
+jam/SM
+Janacek/M
+Jana/M
+Janaya/M
+Janaye/M
+Jandy/M
+Janean/M
+Janeczka/M
+Janeen/M
+Janeiro/M
+Janek/M
+Janela/M
+Janella/M
+Janelle/M
+Janell/M
+Janel/M
+Jane/M
+Janene/M
+Janenna/M
+Janessa/M
+Janesville/M
+Janeta/M
+Janet/M
+Janetta/M
+Janette/M
+Janeva/M
+Janey/M
+jangler/M
+jangle/RSDGZ
+jangly
+Jania/M
+Janice/M
+Janie/M
+Janifer/M
+Janina/M
+Janine/M
+Janis/M
+janissary/MS
+Janith/M
+janitorial
+janitor/SM
+Janka/M
+Jan/M
+Janna/M
+Jannelle/M
+Jannel/M
+Jannie/M
+Janos/M
+Janot/M
+Jansenist/M
+Jansen/M
+January/MS
+Janus/M
+Jany/M
+Japanese/SM
+Japan/M
+japanned
+japanner
+japanning
+japan/SM
+jape/DSMG
+Japura/M
+Jaquelin/M
+Jaquelyn/M
+Jaquenetta/M
+Jaquenette/M
+Jaquith/M
+Jarad/M
+jardinire/MS
+Jard/M
+Jareb/M
+Jared/M
+jarful/S
+jargon/SGDM
+Jarib/M
+Jarid/M
+Jarlsberg
+jar/MS
+Jarrad/M
+jarred
+Jarred/M
+Jarret/M
+Jarrett/M
+Jarrid/M
+jarring/SY
+Jarrod/M
+Jarvis/M
+Jase/M
+Jasen/M
+Jasmina/M
+Jasmine/M
+jasmine/MS
+Jasmin/M
+Jason/M
+Jasper/M
+jasper/MS
+Jastrow/M
+Jasun/M
+jato/SM
+jaundice/DSMG
+jaundiced/U
+jauntily
+jauntiness/MS
+jaunt/MDGS
+jaunty/SRTP
+Javanese
+Java/SM
+javelin/SDMG
+Javier/M
+jawbone/SDMG
+jawbreaker/SM
+jawline
+jaw/SMDG
+Jaxartes/M
+Jayapura/M
+jaybird/SM
+Jaycee/SM
+Jaye/M
+Jay/M
+Jaymee/M
+Jayme/M
+Jaymie/M
+Jaynell/M
+Jayne/M
+jay/SM
+Jayson/M
+jaywalker/M
+jaywalk/JSRDZG
+Jazmin/M
+jazziness/M
+jazzmen
+jazz/MGDS
+jazzy/PTR
+JCS
+jct
+JD
+Jdavie/M
+jealousness/M
+jealous/PY
+jealousy/MS
+Jeana/M
+Jeanelle/M
+Jeane/M
+Jeanette/M
+Jeanie/M
+Jeanine/M
+Jean/M
+jean/MS
+Jeanna/M
+Jeanne/M
+Jeannette/M
+Jeannie/M
+Jeannine/M
+Jecho/M
+Jedd/M
+Jeddy/M
+Jedediah/M
+Jedidiah/M
+Jedi/M
+Jed/M
+jeep/GZSMD
+Jeep/S
+jeerer/M
+jeering/Y
+jeer/SJDRMG
+Jeeves/M
+jeez
+Jefferey/M
+Jeffersonian/S
+Jefferson/M
+Jeffery/M
+Jeffie/M
+Jeff/M
+Jeffrey/SM
+Jeffry/M
+Jeffy/M
+jehad's
+Jehanna/M
+Jehoshaphat/M
+Jehovah/M
+Jehu/M
+jejuna
+jejuneness/M
+jejune/PY
+jejunum/M
+Jekyll/M
+Jelene/M
+jell/GSD
+Jello/M
+jello's
+jellybean/SM
+jellyfish/MS
+jellying/M
+jellylike
+jellyroll/S
+jelly/SDMG
+Jemie/M
+Jemimah/M
+Jemima/M
+Jemmie/M
+jemmy/M
+Jemmy/M
+Jena/M
+Jenda/M
+Jenelle/M
+Jenica/M
+Jeniece/M
+Jenifer/M
+Jeniffer/M
+Jenilee/M
+Jeni/M
+Jenine/M
+Jenkins/M
+Jen/M
+Jenna/M
+Jennee/M
+Jenner/M
+jennet/SM
+Jennette/M
+Jennica/M
+Jennie/M
+Jennifer/M
+Jennilee/M
+Jenni/M
+Jennine/M
+Jennings/M
+Jenn/RMJ
+Jenny/M
+jenny/SM
+Jeno/M
+Jensen/M
+Jens/N
+jeopard
+jeopardize/GSD
+jeopardy/MS
+Jephthah/M
+Jerad/M
+Jerald/M
+Jeralee/M
+Jeramey/M
+Jeramie/M
+Jere/M
+Jereme/M
+jeremiad/SM
+Jeremiah/M
+Jeremiahs
+Jeremias/M
+Jeremie/M
+Jeremy/M
+Jericho/M
+Jeri/M
+jerker/M
+jerk/GSDRJ
+jerkily
+jerkiness/SM
+jerkin/SM
+jerkwater/S
+jerky/RSTP
+Jermaine/M
+Jermain/M
+Jermayne/M
+Jeroboam/M
+Jerold/M
+Jerome/M
+Jeromy/M
+Jerrie/M
+Jerrilee/M
+Jerrilyn/M
+Jerri/M
+Jerrine/M
+Jerrod/M
+Jerrold/M
+Jerrome/M
+jerrybuilt
+Jerrylee/M
+jerry/M
+Jerry/M
+jersey/MS
+Jersey/MS
+Jerusalem/M
+Jervis/M
+Jes
+Jessalin/M
+Jessalyn/M
+Jessa/M
+Jessamine/M
+jessamine's
+Jessamyn/M
+Jessee/M
+Jesselyn/M
+Jesse/M
+Jessey/M
+Jessica/M
+Jessie/M
+Jessika/M
+Jessi/M
+jess/M
+Jess/M
+Jessy/M
+jest/DRSGZM
+jester/M
+jesting/Y
+Jesuit/SM
+Jesus
+Jeth/M
+Jethro/M
+jetliner/MS
+jet/MS
+jetport/SM
+jetsam/MS
+jetted/M
+jetting/M
+jettison/DSG
+jetty/RSDGMT
+jeweler/M
+jewelery/S
+jewel/GZMRDS
+Jewelled/M
+Jewelle/M
+jewellery's
+Jewell/MD
+Jewel/M
+jewelry/MS
+Jewess/SM
+Jewishness/MS
+Jewish/P
+Jew/MS
+Jewry/MS
+Jezebel/MS
+j/F
+JFK/M
+jg/M
+jibbed
+jibbing
+jibe/S
+jib/MDSG
+Jidda/M
+jiff/S
+jiffy/SM
+jigged
+jigger/SDMG
+jigging/M
+jiggle/SDG
+jiggly/TR
+jig/MS
+jigsaw/GSDM
+jihad/SM
+Jilin
+Jillana/M
+Jillane/M
+Jillayne/M
+Jilleen/M
+Jillene/M
+Jillian/M
+Jillie/M
+Jilli/M
+Jill/M
+Jilly/M
+jilt/DRGS
+jilter/M
+Jimenez/M
+Jim/M
+Jimmie/M
+jimmy/GSDM
+Jimmy/M
+jimsonweed/S
+Jinan
+jingler/M
+jingle/RSDG
+jingly/TR
+jingoism/SM
+jingoistic
+jingoist/SM
+jingo/M
+Jinnah/M
+jinni's
+jinn/MS
+Jinny/M
+jinrikisha/SM
+jinx/GMDS
+jitney/MS
+jitterbugged
+jitterbugger
+jitterbugging
+jitterbug/SM
+jitter/S
+jittery/TR
+jiujitsu's
+Jivaro/M
+jive/MGDS
+Joachim/M
+Joana/M
+Joane/M
+Joanie/M
+Joan/M
+Joanna/M
+Joanne/SM
+Joann/M
+Joaquin/M
+jobbed
+jobber/MS
+jobbery/M
+jobbing/M
+Jobey/M
+jobholder/SM
+Jobie/M
+Jobi/M
+Jobina/M
+joblessness/MS
+jobless/P
+Jobrel/M
+job/SM
+Job/SM
+Jobye/M
+Joby/M
+Jobyna/M
+Jocasta/M
+Joceline/M
+Jocelin/M
+Jocelyne/M
+Jocelyn/M
+jockey/SGMD
+jock/GDMS
+Jock/M
+Jocko/M
+jockstrap/MS
+jocoseness/MS
+jocose/YP
+jocosity/SM
+jocularity/SM
+jocular/Y
+jocundity/SM
+jocund/Y
+Jodee/M
+jodhpurs
+Jodie/M
+Jodi/M
+Jody/M
+Joeann/M
+Joela/M
+Joelie/M
+Joella/M
+Joelle/M
+Joellen/M
+Joell/MN
+Joelly/M
+Joellyn/M
+Joel/MY
+Joelynn/M
+Joe/M
+Joesph/M
+Joete/M
+joey/M
+Joey/M
+jogged
+jogger/SM
+jogging/S
+joggler/M
+joggle/SRDG
+Jogjakarta/M
+jog/S
+Johan/M
+Johannah/M
+Johanna/M
+Johannes
+Johannesburg/M
+Johann/M
+Johansen/M
+Johanson/M
+Johna/MH
+Johnathan/M
+Johnath/M
+Johnathon/M
+Johnette/M
+Johnie/M
+Johnna/M
+Johnnie/M
+johnnycake/SM
+Johnny/M
+johnny/SM
+Johnsen/M
+john/SM
+John/SM
+Johns/N
+Johnson/M
+Johnston/M
+Johnstown/M
+Johny/M
+Joice/M
+join/ADGFS
+joined/U
+joiner/FSM
+joinery/MS
+jointed/EYP
+jointedness/ME
+joint/EGDYPS
+jointer/M
+jointly/F
+joint's
+jointures
+joist/GMDS
+Jojo/M
+joke/MZDSRG
+joker/M
+jokey
+jokier
+jokiest
+jokily
+joking/Y
+Jolee/M
+Joleen/M
+Jolene/M
+Joletta/M
+Jolie/M
+Joliet's
+Joli/M
+Joline/M
+Jolla/M
+jollification/MS
+jollily
+jolliness/SM
+jollity/MS
+jolly/TSRDGP
+Jolson/M
+jolt/DRGZS
+jolter/M
+Joly/M
+Jolyn/M
+Jolynn/M
+Jo/MY
+Jonah/M
+Jonahs
+Jonas
+Jonathan/M
+Jonathon/M
+Jonell/M
+Jone/MS
+Jones/S
+Jonie/M
+Joni/MS
+Jon/M
+jonquil/MS
+Jonson/M
+Joplin/M
+Jordain/M
+Jordana/M
+Jordanian/S
+Jordan/M
+Jordanna/M
+Jordon/M
+Jorey/M
+Jorgan/M
+Jorge/M
+Jorgensen/M
+Jorgenson/M
+Jorie/M
+Jori/M
+Jorrie/M
+Jorry/M
+Jory/M
+Joscelin/M
+Josee/M
+Josefa/M
+Josefina/M
+Josef/M
+Joseito/M
+Jose/M
+Josepha/M
+Josephina/M
+Josephine/M
+Joseph/M
+Josephs
+Josephson/M
+Josephus/M
+Josey/M
+josh/DSRGZ
+josher/M
+Joshia/M
+Josh/M
+Joshuah/M
+Joshua/M
+Josiah/M
+Josias/M
+Josie/M
+Josi/M
+Josselyn/M
+joss/M
+jostle/SDG
+Josue/M
+Josy/M
+jot/S
+jotted
+jotter/SM
+jotting/SM
+Joule/M
+joule/SM
+jounce/SDG
+jouncy/RT
+Jourdain/M
+Jourdan/M
+journalese/MS
+journal/GSDM
+journalism/SM
+journalistic
+journalist/SM
+journalize/DRSGZ
+journalized/U
+journalizer/M
+journey/DRMZSGJ
+journeyer/M
+journeyman/M
+journeymen
+jouster/M
+joust/ZSMRDG
+Jovanovich/M
+Jove/M
+joviality/SM
+jovial/Y
+Jovian
+jowl/SMD
+jowly/TR
+Joya/M
+Joyan/M
+Joyann/M
+Joycean
+Joycelin/M
+Joyce/M
+Joye/M
+joyfuller
+joyfullest
+joyfulness/SM
+joyful/PY
+joylessness/MS
+joyless/PY
+Joy/M
+joy/MDSG
+Joyner/M
+joyousness/MS
+joyous/YP
+joyridden
+joyride/SRZMGJ
+joyrode
+joystick/S
+Jozef/M
+JP
+Jpn
+Jr/M
+j's
+J's
+Jsandye/M
+Juana/M
+Juanita/M
+Juan/M
+Juarez
+Jubal/M
+jubilant/Y
+jubilate/XNGDS
+jubilation/M
+jubilee/SM
+Judah/M
+Judaic
+Judaical
+Judaism/SM
+Judas/S
+juddered
+juddering
+Judd/M
+Judea/M
+Jude/M
+judge/AGDS
+judger/M
+judge's
+judgeship/SM
+judgmental/Y
+judgment/MS
+judicable
+judicatory/S
+judicature/MS
+judicial/Y
+judiciary/S
+judicious/IYP
+judiciousness/SMI
+Judie/M
+Judi/MH
+Juditha/M
+Judith/M
+Jud/M
+judo/MS
+Judon/M
+Judson/M
+Judye/M
+Judy/M
+jugate/F
+jugful/SM
+jugged
+Juggernaut/M
+juggernaut/SM
+jugging
+juggler/M
+juggle/RSDGZ
+jugglery/MS
+jug/MS
+jugular/S
+juice/GMZDSR
+juicer/M
+juicily
+juiciness/MS
+juicy/TRP
+Juieta/M
+jujitsu/MS
+jujube/SM
+juju/M
+jujutsu's
+jukebox/SM
+juke/GS
+Julee/M
+Jule/MS
+julep/SM
+Julia/M
+Juliana/M
+Juliane/M
+Julian/M
+Julianna/M
+Julianne/M
+Juliann/M
+Julie/M
+julienne/GSD
+Julienne/M
+Julieta/M
+Juliet/M
+Julietta/M
+Juliette/M
+Juli/M
+Julina/M
+Juline/M
+Julio/M
+Julissa/M
+Julita/M
+Julius/M
+Jul/M
+Julys
+July/SM
+jumble/GSD
+jumbo/MS
+jumper/M
+jump/GZDRS
+jumpily
+jumpiness/MS
+jumpsuit/S
+jumpy/PTR
+jun
+junco/MS
+junction/IMESF
+juncture/SFM
+Juneau/M
+June/MS
+Junette/M
+Jungfrau/M
+Jungian
+jungle/SDM
+Jung/M
+Junia/M
+Junie/M
+Junina/M
+juniority/M
+junior/MS
+Junior/S
+juniper/SM
+junkerdom
+Junker/SM
+junketeer/SGDM
+junket/SMDG
+junk/GZDRMS
+junkie/RSMT
+junkyard/MS
+Jun/M
+Juno/M
+junta/MS
+Jupiter/M
+Jurassic
+juridic
+juridical/Y
+juried
+jurisdictional/Y
+jurisdiction/SM
+jurisprudence/SM
+jurisprudent
+jurisprudential/Y
+juristic
+jurist/MS
+juror/MS
+Jurua/M
+jury/IMS
+jurying
+juryman/M
+jurymen
+jurywoman/M
+jurywomen
+justed
+Justen/M
+juster/M
+justest
+Justice/M
+justice/MIS
+justiciable
+justifiability/M
+justifiable/U
+justifiably/U
+justification/M
+justified/UA
+justifier/M
+justify/GDRSXZN
+Justina/M
+Justine/M
+justing
+Justinian/M
+Justin/M
+Justinn/M
+Justino/M
+Justis/M
+justness/MS
+justness's/U
+justs
+just/UPY
+Justus/M
+jute/SM
+Jutish
+Jutland/M
+jut/S
+jutted
+jutting
+Juvenal/M
+juvenile/SM
+juxtapose/SDG
+juxtaposition/SM
+JV
+J/X
+Jyoti/M
+Kaaba/M
+kabob/SM
+kaboom
+Kabuki
+kabuki/SM
+Kabul/M
+Kacey/M
+Kacie/M
+Kacy/M
+Kaddish/M
+kaddish/S
+Kaela/M
+kaffeeklatch
+kaffeeklatsch/S
+Kafkaesque
+Kafka/M
+kaftan's
+Kagoshima/M
+Kahaleel/M
+Kahlil/M
+Kahlua/M
+Kahn/M
+Kaia/M
+Kaifeng/M
+Kaila/M
+Kaile/M
+Kailey/M
+Kai/M
+Kaine/M
+Kain/M
+kaiser/MS
+Kaiser/SM
+Kaitlin/M
+Kaitlyn/M
+Kaitlynn/M
+Kaja/M
+Kajar/M
+Kakalina/M
+Kalahari/M
+Kala/M
+Kalamazoo/M
+Kalashnikov/M
+Kalb/M
+Kaleb/M
+Kaleena/M
+kaleidescope
+kaleidoscope/SM
+kaleidoscopic
+kaleidoscopically
+Kale/M
+kale/MS
+Kalgoorlie/M
+Kalie/M
+Kalila/M
+Kalil/M
+Kali/M
+Kalina/M
+Kalinda/M
+Kalindi/M
+Kalle/M
+Kalli/M
+Kally/M
+Kalmyk
+Kalvin/M
+Kama/M
+Kamchatka/M
+Kamehameha/M
+Kameko/M
+Kamikaze/MS
+kamikaze/SM
+Kamilah/M
+Kamila/M
+Kamillah/M
+Kampala/M
+Kampuchea/M
+Kanchenjunga/M
+Kandace/M
+Kandahar/M
+Kandinsky/M
+Kandy/M
+Kane/M
+kangaroo/SGMD
+Kania/M
+Kankakee/M
+Kan/MS
+Kannada/M
+Kano/M
+Kanpur/M
+Kansan/S
+Kansas
+Kantian
+Kant/M
+Kanya/M
+Kaohsiung/M
+kaolinite/M
+kaolin/MS
+Kaplan/M
+kapok/SM
+Kaposi/M
+kappa/MS
+kaput/M
+Karachi/M
+Karaganda/M
+Karakorum/M
+karakul/MS
+Karalee/M
+Karalynn/M
+Kara/M
+Karamazov/M
+karaoke/S
+karate/MS
+karat/SM
+Karee/M
+Kareem/M
+Karel/M
+Kare/M
+Karena/M
+Karenina/M
+Karen/M
+Karia/M
+Karie/M
+Karil/M
+Karilynn/M
+Kari/M
+Karim/M
+Karina/M
+Karine/M
+Karin/M
+Kariotta/M
+Karisa/M
+Karissa/M
+Karita/M
+Karla/M
+Karlan/M
+Karlee/M
+Karleen/M
+Karlene/M
+Karlen/M
+Karlie/M
+Karlik/M
+Karlis
+Karl/MNX
+Karloff/M
+Karlotta/M
+Karlotte/M
+Karly/M
+Karlyn/M
+karma/SM
+Karmen/M
+karmic
+Karna/M
+Karney/M
+Karola/M
+Karole/M
+Karolina/M
+Karoline/M
+Karol/M
+Karoly/M
+Karon/M
+Karo/YM
+Karp/M
+Karrah/M
+Karrie/M
+Karroo/M
+Karry/M
+kart/MS
+Karylin/M
+Karyl/M
+Kary/M
+Karyn/M
+Kasai/M
+Kasey/M
+Kashmir/SM
+Kaspar/M
+Kasparov/M
+Kasper/M
+Kass
+Kassandra/M
+Kassey/M
+Kassia/M
+Kassie/M
+Kassi/M
+katakana
+Katalin/M
+Kata/M
+Katee/M
+Katelyn/M
+Kate/M
+Katerina/M
+Katerine/M
+Katey/M
+Katha/M
+Katharina/M
+Katharine/M
+Katharyn/M
+Kathe/M
+Katherina/M
+Katherine/M
+Katheryn/M
+Kathiawar/M
+Kathie/M
+Kathi/M
+Kathleen/M
+Kathlin/M
+Kath/M
+Kathmandu
+Kathrine/M
+Kathryne/M
+Kathryn/M
+Kathye/M
+Kathy/M
+Katie/M
+Kati/M
+Katina/M
+Katine/M
+Katinka/M
+Katleen/M
+Katlin/M
+Kat/M
+Katmai/M
+Katmandu's
+Katowice/M
+Katrina/M
+Katrine/M
+Katrinka/M
+Kattie/M
+Katti/M
+Katuscha/M
+Katusha/M
+Katya/M
+katydid/SM
+Katy/M
+Katz/M
+Kauai/M
+Kauffman/M
+Kaufman/M
+Kaunas/M
+Kaunda/M
+Kawabata/M
+Kawasaki/M
+kayak/SGDM
+Kaycee/M
+Kaye/M
+Kayla/M
+Kaylee/M
+Kayle/M
+Kayley/M
+Kaylil/M
+Kaylyn/M
+Kay/M
+Kayne/M
+kayo/DMSG
+Kazakh/M
+Kazakhstan
+Kazan/M
+Kazantzakis/M
+kazoo/SM
+Kb
+KB
+KC
+kcal/M
+kc/M
+KDE/M
+Keane/M
+Kean/M
+Kearney/M
+Keary/M
+Keaton/M
+Keats/M
+kebab/SM
+Keck/M
+Keefe/MR
+Keefer/M
+Keegan/M
+Keelby/M
+Keeley/M
+keel/GSMDR
+keelhaul/SGD
+Keelia/M
+Keely/M
+Keenan/M
+Keene/M
+keener/M
+keen/GTSPYDR
+keening/M
+Keen/M
+keenness/MS
+keeper/M
+keep/GZJSR
+keeping/M
+keepsake/SM
+Keewatin/M
+kegged
+kegging
+keg/MS
+Keillor/M
+Keir/M
+Keisha/M
+Keith/M
+Kelbee/M
+Kelby/M
+Kelcey/M
+Kelcie/M
+Kelci/M
+Kelcy/M
+Kele/M
+Kelila/M
+Kellby/M
+Kellen/M
+Keller/M
+Kelley/M
+Kellia/M
+Kellie/M
+Kelli/M
+Kellina/M
+Kellogg/M
+Kellsie/M
+Kellyann/M
+Kelly/M
+kelp/GZMDS
+Kelsey/M
+Kelsi/M
+Kelsy/M
+Kelt's
+Kelvin/M
+kelvin/MS
+Kelwin/M
+Kemerovo/M
+Kempis/M
+Kemp/M
+Kendall/M
+Kendal/M
+Kendell/M
+Kendra/M
+Kendre/M
+Kendrick/MS
+Kenilworth/M
+Ken/M
+Kenmore/M
+ken/MS
+Kenna/M
+Kennan/M
+Kennecott/M
+kenned
+Kennedy/M
+kennel/GSMD
+Kenneth/M
+Kennett/M
+Kennie/M
+kenning
+Kennith/M
+Kenn/M
+Kenny/M
+keno/M
+Kenon/M
+Kenosha/M
+Kensington/M
+Kent/M
+Kenton/M
+Kentuckian/S
+Kentucky/M
+Kenya/M
+Kenyan/S
+Kenyatta/M
+Kenyon/M
+Keogh/M
+Keokuk/M
+kepi/SM
+Kepler/M
+kept
+keratin/MS
+kerbside
+Kerby/M
+kerchief/MDSG
+Kerensky/M
+Kerianne/M
+Keriann/M
+Keri/M
+Kerk/M
+Ker/M
+Kermie/M
+Kermit/M
+Kermy/M
+kerned
+kernel/GSMD
+kerning
+Kern/M
+kerosene/MS
+Kerouac/M
+Kerrie/M
+Kerrill/M
+Kerri/M
+Kerrin/M
+Kerr/M
+Kerry/M
+Kerstin/M
+Kerwin/M
+Kerwinn/M
+Kesley/M
+Keslie/M
+Kessiah/M
+Kessia/M
+Kessler/M
+kestrel/SM
+ketch/MS
+ketchup/SM
+ketone/M
+ketosis/M
+Kettering/M
+Kettie/M
+Ketti/M
+kettledrum/SM
+kettleful
+kettle/SM
+Ketty/M
+Kevan/M
+Keven/M
+Kevina/M
+Kevin/M
+Kevlar
+Kev/MN
+Kevon/M
+Kevorkian/M
+Kevyn/M
+Kewaskum/M
+Kewaunee/M
+Kewpie/M
+keyboardist/S
+keyboard/RDMZGS
+keyclick/SM
+keyhole/MS
+Key/M
+Keynesian/M
+Keynes/M
+keynoter/M
+keynote/SRDZMG
+keypad/MS
+keypuncher/M
+keypunch/ZGRSD
+keyring
+key/SGMD
+keystone/SM
+keystroke/SDMG
+keyword/SM
+k/FGEIS
+kg
+K/G
+KGB
+Khabarovsk/M
+Khachaturian/M
+khaki/SM
+Khalid/M
+Khalil/M
+Khan/M
+khan/MS
+Kharkov/M
+Khartoum/M
+Khayyam/M
+Khmer/M
+Khoisan/M
+Khomeini/M
+Khorana/M
+Khrushchev/SM
+Khufu/M
+Khulna/M
+Khwarizmi/M
+Khyber/M
+kHz/M
+KIA
+Kiah/M
+Kial/M
+kibble/GMSD
+kibbutzim
+kibbutz/M
+kibitzer/M
+kibitz/GRSDZ
+kibosh/GMSD
+Kickapoo/M
+kickback/SM
+kickball/MS
+kicker/M
+kick/GZDRS
+kickoff/SM
+kickstand/MS
+kicky/RT
+kidded
+kidder/SM
+kiddie/SD
+kidding/YM
+kiddish
+Kidd/M
+kiddo/SM
+kiddying
+kiddy's
+kidless
+kid/MS
+kidnaper's
+kidnaping's
+kidnap/MSJ
+kidnapped
+kidnapper/SM
+kidnapping/S
+kidney/MS
+kidskin/SM
+Kieffer/M
+kielbasa/SM
+kielbasi
+Kiele/M
+Kiel/M
+Kienan/M
+kier/I
+Kierkegaard/M
+Kiersten/M
+Kieth/M
+Kiev/M
+Kigali/M
+Kikelia/M
+Kikuyu/M
+Kilauea/M
+Kile/M
+Kiley/M
+Kilian/M
+Kilimanjaro/M
+kill/BJGZSDR
+killdeer/SM
+Killebrew/M
+killer/M
+Killian/M
+Killie/M
+killing/Y
+killjoy/S
+Killy/M
+kiln/GDSM
+kilobaud/M
+kilobit/S
+kilobuck
+kilobyte/S
+kilocycle/MS
+kilogauss/M
+kilogram/MS
+kilohertz/M
+kilohm/M
+kilojoule/MS
+kiloliter/MS
+kilometer/SM
+kilo/SM
+kiloton/SM
+kilovolt/SM
+kilowatt/SM
+kiloword
+kilter/M
+kilt/MDRGZS
+Ki/M
+Kimball/M
+Kimbell/M
+Kimberlee/M
+Kimberley/M
+Kimberli/M
+Kimberly/M
+Kimberlyn/M
+Kimble/M
+Kimbra/M
+Kim/M
+Kimmie/M
+Kimmi/M
+Kimmy/M
+kimono/MS
+Kincaid/M
+kinda
+kindergarten/MS
+kindergrtner/SM
+kinder/U
+kindheartedness/MS
+kindhearted/YP
+kindle/AGRSD
+kindler/M
+kindliness/SM
+kindliness's/U
+kindling/M
+kindly/TUPR
+kindness's
+kindness/US
+kind/PSYRT
+kindred/S
+kinematic/S
+kinematics/M
+kinesics/M
+kine/SM
+kinesthesis
+kinesthetically
+kinesthetic/S
+kinetically
+kinetic/S
+kinetics/M
+kinfolk/S
+kingbird/M
+kingdom/SM
+kingfisher/MS
+kinglet/M
+kingliness/M
+kingly/TPR
+King/M
+kingpin/MS
+Kingsbury/M
+king/SGYDM
+kingship/SM
+Kingsley/M
+Kingsly/M
+Kingston/M
+Kingstown/M
+Kingwood/M
+kink/GSDM
+kinkily
+kinkiness/SM
+kinky/PRT
+Kin/M
+kin/MS
+Kinna/M
+Kinney/M
+Kinnickinnic/M
+Kinnie/M
+Kinny/M
+Kinsey/M
+kinsfolk/S
+Kinshasa/M
+Kinshasha/M
+kinship/SM
+Kinsley/M
+kinsman/M
+kinsmen/M
+kinswoman/M
+kinswomen
+kiosk/SM
+Kiowa/SM
+Kipling/M
+Kip/M
+kip/MS
+Kippar/M
+kipped
+kipper/DMSG
+Kipper/M
+Kippie/M
+kipping
+Kipp/MR
+Kippy/M
+Kira/M
+Kirbee/M
+Kirbie/M
+Kirby/M
+Kirchhoff/M
+Kirchner/M
+Kirchoff/M
+Kirghistan/M
+Kirghizia/M
+Kirghiz/M
+Kiribati
+Kiri/M
+Kirinyaga/M
+kirk/GDMS
+Kirkland/M
+Kirk/M
+Kirkpatrick/M
+Kirkwood/M
+Kirov/M
+kirsch/S
+Kirsteni/M
+Kirsten/M
+Kirsti/M
+Kirstin/M
+Kirstyn/M
+Kisangani/M
+Kishinev/M
+kismet/SM
+kiss/DSRBJGZ
+Kissee/M
+kisser/M
+Kissiah/M
+Kissie/M
+Kissinger/M
+Kitakyushu/M
+kitbag's
+kitchener/M
+Kitchener/M
+kitchenette/SM
+kitchen/GDRMS
+kitchenware/SM
+kiter/M
+kite/SM
+kith/MDG
+kiths
+Kit/M
+kit/MDRGS
+kitsch/MS
+kitschy
+kitted
+kittenishness/M
+kittenish/YP
+kitten/SGDM
+Kittie/M
+Kitti/M
+kitting
+kittiwakes
+Kitty/M
+kitty/SM
+Kiwanis/M
+kiwifruit/S
+kiwi/SM
+Kizzee/M
+Kizzie/M
+KKK
+kl
+Klan/M
+Klansman/M
+Klara/M
+Klarika/M
+Klarrisa/M
+Klaus/M
+klaxon/M
+Klee/M
+Kleenex/SM
+Klein/M
+Kleinrock/M
+Klemens/M
+Klement/M
+Kleon/M
+kleptomaniac/SM
+kleptomania/MS
+Kliment/M
+Kline/M
+Klingon/M
+Klondike/SDMG
+kludger/M
+kludge/RSDGMZ
+kludgey
+klutziness/S
+klutz/SM
+klutzy/TRP
+Klux/M
+klystron/MS
+km
+kn
+knacker/M
+knack/SGZRDM
+knackwurst/MS
+Knapp/M
+knapsack/MS
+Knauer/M
+knavery/MS
+knave/SM
+knavish/Y
+kneader/M
+knead/GZRDS
+kneecap/MS
+kneecapped
+kneecapping
+knee/DSM
+kneeing
+kneeler/M
+kneel/GRS
+kneepad/SM
+knell/SMDG
+knelt
+Knesset/M
+knew
+Kngwarreye/M
+Knickerbocker/MS
+knickerbocker/S
+knickknack/SM
+knick/ZR
+Knievel/M
+knife/DSGM
+knighthood/MS
+knightliness/MS
+knightly/P
+Knight/M
+knight/MDYSG
+knish/MS
+knit/AU
+knits
+knitted
+knitter/MS
+knitting/SM
+knitwear/M
+knives/M
+knobbly
+knobby/RT
+Knobeloch/M
+knob/MS
+knockabout/M
+knockdown/S
+knocker/M
+knock/GZSJRD
+knockoff/S
+knockout/MS
+knockwurst's
+knoll/MDSG
+Knopf/M
+Knossos/M
+knothole/SM
+knot/MS
+knotted
+knottiness/M
+knotting/M
+knotty/TPR
+knowable/U
+knower/M
+know/GRBSJ
+knowhow
+knowingly/U
+knowing/RYT
+knowings/U
+knowledgeableness/M
+knowledgeable/P
+knowledgeably
+knowledge/SM
+Knowles
+known/SU
+Knox/M
+Knoxville/M
+knuckleball/R
+knuckle/DSMG
+knuckleduster
+knucklehead/MS
+Knudsen/M
+Knudson/M
+knurl/DSG
+Knuth/M
+Knutsen/M
+Knutson/M
+KO
+koala/SM
+Kobayashi/M
+Kobe/M
+Kochab/M
+Koch/M
+Kodachrome/M
+Kodak/SM
+Kodaly/M
+Kodiak/M
+Koenig/M
+Koenigsberg/M
+Koenraad/M
+Koestler/M
+Kohinoor/M
+Kohler/M
+Kohl/MR
+kohlrabies
+kohlrabi/M
+kola/SM
+Kolyma/M
+Kommunizma/M
+Kong/M
+Kongo/M
+Konrad/M
+Konstance/M
+Konstantine/M
+Konstantin/M
+Konstanze/M
+kookaburra/SM
+kook/GDMS
+kookiness/S
+kooky/PRT
+Koo/M
+Koontz/M
+kopeck/MS
+Koppers/M
+Koralle/M
+Koral/M
+Kora/M
+Koranic
+Koran/SM
+Kordula/M
+Korea/M
+Korean/S
+Korella/M
+Kore/M
+Koren/M
+Koressa/M
+Korey/M
+Korie/M
+Kori/M
+Kornberg/M
+Korney/M
+Korrie/M
+Korry/M
+Kort/M
+Kory/M
+Korzybski/M
+Kosciusko/M
+kosher/DGS
+Kossuth/M
+Kosygin/M
+Kovacs/M
+Kowalewski/M
+Kowalski/M
+Kowloon/M
+kowtow/SGD
+KP
+kph
+kraal/SMDG
+Kraemer/M
+kraft/M
+Kraft/M
+Krakatau's
+Krakatoa/M
+Krakow/M
+Kramer/M
+Krasnodar/M
+Krasnoyarsk/M
+Krause/M
+kraut/S!
+Krebs/M
+Kremlin/M
+Kremlinologist/MS
+Kremlinology/MS
+Kresge/M
+Krieger/M
+kriegspiel/M
+krill/MS
+Kringle/M
+Krisha/M
+Krishnah/M
+Krishna/M
+Kris/M
+Krispin/M
+Krissie/M
+Krissy/M
+Kristal/M
+Krista/M
+Kristan/M
+Kristel/M
+Kriste/M
+Kristen/M
+Kristian/M
+Kristie/M
+Kristien/M
+Kristi/MN
+Kristina/M
+Kristine/M
+Kristin/M
+Kristofer/M
+Kristoffer/M
+Kristofor/M
+Kristoforo/M
+Kristo/MS
+Kristopher/M
+Kristy/M
+Kristyn/M
+Kr/M
+Kroc/M
+Kroger/M
+krna/M
+Kronecker/M
+krone/RM
+kronor
+krnur
+Kropotkin/M
+Krueger/M
+Kruger/M
+Krugerrand/S
+Krupp/M
+Kruse/M
+krypton/SM
+Krystalle/M
+Krystal/M
+Krysta/M
+Krystle/M
+Krystyna/M
+ks
+K's
+KS
+k's/IE
+kt
+Kublai/M
+Kubrick/M
+kuchen/MS
+kudos/M
+kudzu/SM
+Kuenning/M
+Kuhn/M
+Kuibyshev/M
+Ku/M
+Kumar/M
+kumquat/SM
+Kunming/M
+Kuomintang/M
+Kurdish/M
+Kurdistan/SM
+Kurd/SM
+Kurosawa/M
+Kurtis/M
+Kurt/M
+kurtosis/M
+Kusch/M
+Kuwaiti/SM
+Kuwait/M
+Kuznetsk/M
+Kuznets/M
+kvetch/DSG
+kw
+kW
+Kwakiutl/M
+Kwangchow's
+Kwangju/M
+Kwanzaa/S
+kWh
+KY
+Kyla/M
+kyle/M
+Kyle/M
+Kylen/M
+Kylie/M
+Kylila/M
+Kylynn/M
+Ky/MH
+Kym/M
+Kynthia/M
+Kyoto/M
+Kyrgyzstan
+Kyrstin/M
+Kyushu/M
+L
+LA
+Laban/M
+labeled/U
+labeler/M
+label/GAZRDS
+labellings/A
+label's
+labial/YS
+labia/M
+labile
+labiodental
+labium/M
+laboratory/MS
+laboredness/M
+labored/PMY
+labored's/U
+laborer/M
+laboring/MY
+laborings/U
+laboriousness/MS
+laborious/PY
+labor/RDMJSZG
+laborsaving
+Labradorean/S
+Labrador/SM
+lab/SM
+Lab/SM
+laburnum/SM
+labyrinthine
+labyrinth/M
+labyrinths
+laced/U
+Lacee/M
+lace/MS
+lacerate/NGVXDS
+laceration/M
+lacer/M
+laces/U
+lacewing/MS
+Lacey/M
+Lachesis/M
+lachrymal/S
+lachrymose
+Lacie/M
+lacing/M
+lackadaisic
+lackadaisical/Y
+Lackawanna/M
+lacker/M
+lackey/SMDG
+lack/GRDMS
+lackluster/S
+Lac/M
+laconic
+laconically
+lacquerer/M
+lacquer/ZGDRMS
+lacrosse/MS
+lac/SGMDR
+lactate/MNGSDX
+lactational/Y
+lactation/M
+lacteal
+lactic
+lactose/MS
+lacunae
+lacuna/M
+Lacy/M
+lacy/RT
+ladder/GDMS
+laddie/MS
+laded/U
+ladened
+ladening
+laden/U
+lade/S
+lading/M
+ladle/SDGM
+Ladoga/M
+Ladonna/M
+lad/XGSJMND
+ladybird/SM
+ladybug/MS
+ladyfinger/SM
+ladylike/U
+ladylove/MS
+Ladyship/MS
+ladyship/SM
+lady/SM
+Lady/SM
+Laetitia/M
+laetrile/S
+Lafayette/M
+Lafitte/M
+lager/DMG
+laggard/MYSP
+laggardness/M
+lagged
+lagging/MS
+lagniappe/SM
+lagoon/MS
+Lagos/M
+Lagrange/M
+Lagrangian/M
+Laguerre/M
+Laguna/M
+lag/ZSR
+Lahore/M
+laid/AI
+Laidlaw/M
+lain
+Laina/M
+Lainey/M
+Laird/M
+laird/MS
+lair/GDMS
+laissez
+laity/SM
+Laius/M
+lake/DSRMG
+Lakehurst/M
+Lakeisha/M
+laker/M
+lakeside
+Lakewood/M
+Lakisha/M
+Lakshmi/M
+lallygagged
+lallygagging
+lallygag/S
+Lalo/M
+La/M
+Lamaism/SM
+Lamarck/M
+Lamar/M
+lamasery/MS
+lama/SM
+Lamaze
+lambada/S
+lambaste/SDG
+lambda/SM
+lambency/MS
+lambent/Y
+Lambert/M
+lambkin/MS
+Lamb/M
+Lamborghini/M
+lambskin/MS
+lamb/SRDMG
+lambswool
+lamebrain/SM
+lamed/M
+lameness/MS
+lamentableness/M
+lamentable/P
+lamentably
+lamentation/SM
+lament/DGSB
+lamented/U
+lame/SPY
+la/MHLG
+laminae
+lamina/M
+laminar
+laminate/XNGSD
+lamination/M
+lam/MDRSTG
+lammed
+lammer
+lamming
+Lammond/M
+Lamond/M
+Lamont/M
+L'Amour
+lampblack/SM
+lamplighter/M
+lamplight/ZRMS
+lampooner/M
+lampoon/RDMGS
+Lamport/M
+lamppost/SM
+lamprey/MS
+lamp/SGMRD
+lampshade/MS
+LAN
+Lanae/M
+Lanai/M
+lanai/SM
+Lana/M
+Lancashire/M
+Lancaster/M
+Lancelot/M
+Lance/M
+lancer/M
+lance/SRDGMZ
+lancet/MS
+landau/MS
+lander/I
+landfall/SM
+landfill/DSG
+landforms
+landholder/M
+landhold/JGZR
+landing/M
+Landis/M
+landlady/MS
+landless
+landlines
+landlocked
+landlord/MS
+landlubber/SM
+Land/M
+landmark/GSMD
+landmass/MS
+Landon/M
+landowner/MS
+landownership/M
+landowning/SM
+Landry/M
+Landsat
+landscape/GMZSRD
+landscaper/M
+lands/I
+landslide/MS
+landslid/G
+landslip
+landsman/M
+landsmen
+land/SMRDJGZ
+Landsteiner/M
+landward/S
+Landwehr/M
+Lane/M
+lane/SM
+Lanette/M
+Laney/M
+Langeland/M
+Lange/M
+Langerhans/M
+Langford/M
+Langland/M
+Langley/M
+Lang/M
+Langmuir/M
+Langsdon/M
+Langston/M
+language/MS
+languidness/MS
+languid/PY
+languisher/M
+languishing/Y
+languish/SRDG
+languorous/Y
+languor/SM
+Lanie/M
+Lani/M
+Lanita/M
+lankiness/SM
+lankness/MS
+lank/PTYR
+lanky/PRT
+Lanna/M
+Lannie/M
+Lanni/M
+Lanny/M
+lanolin/MS
+Lansing/M
+lantern/GSDM
+lanthanide/M
+lanthanum/MS
+lanyard/MS
+Lanzhou
+Laocoon/M
+Lao/SM
+Laotian/MS
+lapboard/MS
+lapdog/S
+lapel/MS
+lapidary/MS
+lapin/MS
+Laplace/M
+Lapland/ZMR
+lapped
+lappet/MS
+lapping
+Lapp/SM
+lapsed/A
+lapse/KSDMG
+lapser/MA
+lapses/A
+lapsing/A
+lap/SM
+laps/SRDG
+laptop/SM
+lapwing/MS
+Laraine/M
+Lara/M
+Laramie/M
+larboard/MS
+larcenist/S
+larcenous
+larceny/MS
+larch/MS
+larder/M
+lard/MRDSGZ
+Lardner/M
+lardy/RT
+Laredo/M
+largehearted
+largemouth
+largeness/SM
+large/SRTYP
+largess/SM
+largish
+largo/S
+lariat/MDGS
+Lari/M
+Larina/M
+Larine/M
+Larisa/M
+Larissa/M
+larker/M
+lark/GRDMS
+Lark/M
+larkspur/MS
+Larousse/M
+Larry/M
+Larsen/M
+Lars/NM
+Larson/M
+larvae
+larval
+larva/M
+laryngeal/YS
+larynges
+laryngitides
+laryngitis/M
+larynx/M
+Laryssa/M
+lasagna/S
+lasagne's
+Lascaux/M
+lasciviousness/MS
+lascivious/YP
+lase
+laser/M
+lashed/U
+lasher/M
+lashing/M
+lash/JGMSRD
+Lassa/M
+Lassen/M
+Lassie/M
+lassie/SM
+lassitude/MS
+lassoer/M
+lasso/GRDMS
+las/SRZG
+lass/SM
+laster/M
+lastingness/M
+lasting/PY
+last/JGSYRD
+Laszlo/M
+Latasha/M
+Latashia/M
+latching/M
+latchkey/SM
+latch's
+latch/UGSD
+latecomer/SM
+lated/A
+late/KA
+lately
+latency/MS
+lateness/MS
+latent/YS
+later/A
+lateral/GDYS
+lateralization
+Lateran/M
+latest/S
+LaTeX/M
+latex/MS
+lathe/M
+latherer/M
+lather/RDMG
+lathery
+lathing/M
+lath/MSRDGZ
+Lathrop/M
+laths
+Latia/M
+latices/M
+Latina/SM
+Latinate
+Latino/S
+Latin/RMS
+latish
+Latisha/M
+latitude/SM
+latitudinal/Y
+latitudinarian/S
+latitudinary
+Lat/M
+Latonya/M
+Latoya/M
+Latrena/M
+Latrina/M
+latrine/MS
+Latrobe/M
+lat/SDRT
+latter/YM
+latte/SR
+lattice/SDMG
+latticework/MS
+latticing/M
+Lattimer/M
+Latvia/M
+Latvian/S
+laudably
+laudanum/MS
+laudatory
+Lauderdale/M
+lauder/M
+Lauder/M
+Laud/MR
+laud/RDSBG
+lauds/M
+Laue/M
+laughableness/M
+laughable/P
+laughably
+laugh/BRDZGJ
+laugher/M
+laughing/MY
+laughingstock/SM
+laughs
+laughter/MS
+Laughton/M
+Launce/M
+launch/AGSD
+launcher/MS
+launching/S
+launchpad/S
+laundered/U
+launderer/M
+launderette/MS
+launder/SDRZJG
+laundress/MS
+laundrette/S
+laundromat/S
+Laundromat/SM
+laundryman/M
+laundrymen
+laundry/MS
+laundrywoman/M
+laundrywomen
+Lauraine/M
+Lauralee/M
+Laural/M
+laura/M
+Laura/M
+Laurasia/M
+laureate/DSNG
+laureateship/SM
+Lauree/M
+Laureen/M
+Laurella/M
+Laurel/M
+laurel/SGMD
+Laure/M
+Laurena/M
+Laurence/M
+Laurene/M
+Lauren/SM
+Laurentian
+Laurent/M
+Lauretta/M
+Laurette/M
+Laurianne/M
+Laurice/M
+Laurie/M
+Lauri/M
+Lauritz/M
+Lauryn/M
+Lausanne/M
+lavage/MS
+lavaliere/MS
+Laval/M
+lava/SM
+lavatory/MS
+lave/GDS
+Lavena/M
+lavender/MDSG
+Laverna/M
+Laverne/M
+Lavern/M
+Lavina/M
+Lavinia/M
+Lavinie/M
+lavishness/MS
+lavish/SRDYPTG
+Lavoisier/M
+Lavonne/M
+Lawanda/M
+lawbreaker/SM
+lawbreaking/MS
+Lawford/M
+lawfulness/SMU
+lawful/PUY
+lawgiver/MS
+lawgiving/M
+lawlessness/MS
+lawless/PY
+Law/M
+lawmaker/MS
+lawmaking/SM
+lawman/M
+lawmen
+lawnmower/S
+lawn/SM
+Lawrence/M
+Lawrenceville/M
+lawrencium/SM
+Lawry/M
+law/SMDG
+Lawson/M
+lawsuit/MS
+Lawton/M
+lawyer/DYMGS
+laxativeness/M
+laxative/PSYM
+laxer/A
+laxes/A
+laxity/SM
+laxness/SM
+lax/PTSRY
+layabout/MS
+Layamon/M
+layaway/S
+lay/CZGSR
+layered/C
+layer/GJDM
+layering/M
+layer's/IC
+layette/SM
+Layla/M
+Lay/M
+layman/M
+laymen
+Layne/M
+Layney/M
+layoff/MS
+layout/SM
+layover/SM
+laypeople
+layperson/S
+lays/AI
+Layton/M
+layup/MS
+laywoman/M
+laywomen
+Lazare/M
+Lazar/M
+Lazaro/M
+Lazarus/M
+laze/DSG
+lazily
+laziness/MS
+lazuli/M
+lazybones/M
+lazy/PTSRDG
+lb
+LBJ/M
+lbs
+LC
+LCD
+LCM
+LDC
+leachate
+Leach/M
+leach/SDG
+Leadbelly/M
+leaded/U
+leadenness/M
+leaden/PGDY
+leaderless
+leader/M
+leadership/MS
+lead/SGZXJRDN
+leadsman/M
+leadsmen
+leafage/MS
+leaf/GSDM
+leafhopper/M
+leafiness/M
+leafless
+leaflet/SDMG
+leafstalk/SM
+leafy/PTR
+leaguer/M
+league/RSDMZG
+Leah/M
+leakage/SM
+leaker/M
+Leakey/M
+leak/GSRDM
+leakiness/MS
+leaky/PRT
+Lea/M
+lea/MS
+Leander/M
+Leandra/M
+leaner/M
+leaning/M
+Lean/M
+Leanna/M
+Leanne/M
+leanness/MS
+Leann/M
+Leanora/M
+Leanor/M
+lean/YRDGTJSP
+leaper/M
+leapfrogged
+leapfrogging
+leapfrog/SM
+leap/RDGZS
+Lear/M
+learnedly
+learnedness/M
+learned/UA
+learner/M
+learning/M
+learns/UA
+learn/SZGJRD
+Leary/M
+lease/ARSDG
+leaseback/MS
+leaseholder/M
+leasehold/SRMZ
+leaser/MA
+lease's
+leash's
+leash/UGSD
+leasing/M
+leas/SRDGZ
+least/S
+leastwise
+leatherette/S
+leather/MDSG
+leathern
+leatherneck/SM
+leathery
+leaven/DMJGS
+leavened/U
+leavening/M
+Leavenworth/M
+leaver/M
+leaves/M
+leave/SRDJGZ
+leaving/M
+Lebanese
+Lebanon/M
+Lebbie/M
+lebensraum
+Lebesgue/M
+Leblanc/M
+lecher/DMGS
+lecherousness/MS
+lecherous/YP
+lechery/MS
+lecithin/SM
+lectern/SM
+lecturer/M
+lecture/RSDZMG
+lectureship/SM
+led
+Leda/M
+Lederberg/M
+ledger/DMG
+ledge/SRMZ
+LED/SM
+Leeanne/M
+Leeann/M
+leech/MSDG
+Leeds/M
+leek/SM
+Leelah/M
+Leela/M
+Leeland/M
+Lee/M
+lee/MZRS
+Leena/M
+leer/DG
+leeriness/MS
+leering/Y
+leery/PTR
+Leesa/M
+Leese/M
+Leeuwenhoek/M
+Leeward/M
+leeward/S
+leeway/MS
+leftism/SM
+leftist/SM
+leftmost
+leftover/MS
+Left/S
+left/TRS
+leftward/S
+Lefty/M
+lefty/SM
+legacy/MS
+legalese/MS
+legalism/SM
+legalistic
+legality/MS
+legalization/MS
+legalize/DSG
+legalized/U
+legal/SY
+legate/AXCNGSD
+legatee/MS
+legate's/C
+legation/AMC
+legato/SM
+legendarily
+legendary/S
+Legendre/M
+legend/SM
+legerdemain/SM
+Leger/SM
+legged
+legginess/MS
+legging/MS
+leggy/PRT
+leghorn/SM
+Leghorn/SM
+legibility/MS
+legible
+legibly
+legionary/S
+legionnaire/SM
+legion/SM
+legislate/SDXVNG
+legislation/M
+legislative/SY
+legislator/SM
+legislature/MS
+legitimacy/MS
+legitimate/SDNGY
+legitimation/M
+legitimatize/SDG
+legitimization/MS
+legitimize/RSDG
+legit/S
+legless
+legman/M
+legmen
+leg/MS
+Lego/M
+Legra/M
+Legree/M
+legroom/MS
+legstraps
+legume/SM
+leguminous
+legwork/SM
+Lehigh/M
+Lehman/M
+Leia/M
+Leibniz/M
+Leicester/SM
+Leiden/M
+Leif/M
+Leigha/M
+Leigh/M
+Leighton/M
+Leilah/M
+Leila/M
+lei/MS
+Leipzig/M
+Leisha/M
+leisureliness/MS
+leisurely/P
+leisure/SDYM
+leisurewear
+leitmotif/SM
+leitmotiv/MS
+Lek/M
+Lelah/M
+Lela/M
+Leland/M
+Lelia/M
+Lemaitre/M
+Lemar/M
+Lemke/M
+Lem/M
+lemma/MS
+lemme/GJ
+Lemmie/M
+lemming/M
+Lemmy/M
+lemonade/SM
+lemon/GSDM
+lemony
+Lemuel/M
+Lemuria/M
+lemur/MS
+Lena/M
+Lenard/M
+Lenci/M
+lender/M
+lend/SRGZ
+Lenee/M
+Lenette/M
+lengthener/M
+lengthen/GRD
+lengthily
+lengthiness/MS
+length/MNYX
+lengths
+lengthwise
+lengthy/TRP
+lenience/S
+leniency/MS
+lenient/SY
+Leningrad/M
+Leninism/M
+Leninist
+Lenin/M
+lenitive/S
+Lenka/M
+Len/M
+Le/NM
+Lenna/M
+Lennard/M
+Lennie/M
+Lennon/M
+Lenny/M
+Lenoir/M
+Leno/M
+Lenora/M
+Lenore/M
+lens/SRDMJGZ
+lent/A
+lenticular
+lentil/SM
+lento/S
+Lent/SMN
+Leodora/M
+Leoine/M
+Leola/M
+Leoline/M
+Leo/MS
+Leona/M
+Leonanie/M
+Leonard/M
+Leonardo/M
+Leoncavallo/M
+Leonelle/M
+Leonel/M
+Leone/M
+Leonerd/M
+Leonhard/M
+Leonidas/M
+Leonid/M
+Leonie/M
+leonine
+Leon/M
+Leonora/M
+Leonore/M
+Leonor/M
+Leontine/M
+Leontyne/M
+leopardess/SM
+leopard/MS
+leopardskin
+Leopold/M
+Leopoldo/M
+Leopoldville/M
+Leora/M
+leotard/MS
+leper/SM
+Lepidus/M
+Lepke/M
+leprechaun/SM
+leprosy/MS
+leprous
+lepta
+lepton/SM
+Lepus/M
+Lerner/M
+Leroi/M
+Leroy/M
+Lesa/M
+lesbianism/MS
+lesbian/MS
+Leshia/M
+lesion/DMSG
+Lesley/M
+Leslie/M
+Lesli/M
+Lesly/M
+Lesotho/M
+lessee/MS
+lessen/GDS
+Lesseps/M
+lesser
+lesses
+Lessie/M
+lessing
+lesson/DMSG
+lessor/MS
+less/U
+Lester/M
+lest/R
+Les/Y
+Lesya/M
+Leta/M
+letdown/SM
+lethality/M
+lethal/YS
+Letha/M
+lethargic
+lethargically
+lethargy/MS
+Lethe/M
+Lethia/M
+Leticia/M
+Letisha/M
+let/ISM
+Letitia/M
+Letizia/M
+Letta/M
+letterbox/S
+lettered/U
+letterer/M
+letterhead/SM
+lettering/M
+letter/JSZGRDM
+letterman/M
+Letterman/M
+lettermen
+letterpress/MS
+Lettie/M
+Letti/M
+letting/S
+lettuce/SM
+Letty/M
+letup/MS
+leukemia/SM
+leukemic/S
+leukocyte/MS
+Leupold/M
+Levant/M
+leveeing
+levee/SDM
+leveled/U
+leveler/M
+levelheadedness/S
+levelheaded/P
+leveling/U
+levelness/SM
+level/STZGRDYP
+leverage/MGDS
+lever/SDMG
+Levesque/M
+Levey/M
+Leviathan
+leviathan/MS
+levier/M
+Levi/MS
+Levine/M
+Levin/M
+levitate/XNGDS
+levitation/M
+Leviticus/M
+Levitt/M
+levity/MS
+Lev/M
+Levon/M
+Levy/M
+levy/SRDZG
+lewdness/MS
+lewd/PYRT
+Lewellyn/M
+Lewes
+Lewie/M
+Lewinsky/M
+lewis/M
+Lewis/M
+Lewiss
+Lew/M
+lex
+lexeme/MS
+lexical/Y
+lexicographer/MS
+lexicographic
+lexicographical/Y
+lexicography/SM
+lexicon/SM
+Lexie/M
+Lexi/MS
+Lexine/M
+Lexington/M
+Lexus/M
+Lexy/M
+Leyden/M
+Leyla/M
+Lezley/M
+Lezlie/M
+lg
+Lhasa/SM
+Lhotse/M
+liability/SAM
+liable/AP
+liaise/GSD
+liaison/SM
+Lia/M
+Liam/M
+Liana/M
+Liane/M
+Lian/M
+Lianna/M
+Lianne/M
+liar/MS
+libation/SM
+libbed
+Libbey/M
+Libbie/M
+Libbi/M
+libbing
+Libby/M
+libeler/M
+libel/GMRDSZ
+libelous/Y
+Liberace/M
+liberalism/MS
+liberality/MS
+liberalization/SM
+liberalized/U
+liberalize/GZSRD
+liberalizer/M
+liberalness/MS
+liberal/YSP
+liberate/NGDSCX
+liberationists
+liberation/MC
+liberator/SCM
+Liberia/M
+Liberian/S
+libertarianism/M
+libertarian/MS
+libertine/MS
+liberty/MS
+libidinal
+libidinousness/M
+libidinous/PY
+libido/MS
+Lib/M
+lib/MS
+librarian/MS
+library/MS
+Libra/SM
+libretoes
+libretos
+librettist/MS
+libretto/MS
+Libreville/M
+Librium/M
+Libya/M
+Libyan/S
+lice/M
+licensed/AU
+licensee/SM
+license/MGBRSD
+licenser/M
+licenses/A
+licensing/A
+licensor/M
+licentiate/MS
+licentiousness/MS
+licentious/PY
+Licha/M
+lichee's
+lichen/DMGS
+Lichtenstein/M
+Lichter/M
+licit/Y
+licked/U
+lickerish
+licker/M
+lick/GRDSJ
+licking/M
+licorice/SM
+Lida/M
+lidded
+lidding
+Lidia/M
+lidless
+lid/MS
+lido/MS
+Lieberman/M
+Liebfraumilch/M
+Liechtenstein/RMZ
+lied/MR
+lie/DRS
+Lief/M
+liefs/A
+lief/TSR
+Liege/M
+liege/SR
+Lie/M
+lien/SM
+lier/IMA
+lies/A
+Liesa/M
+lieu/SM
+lieut
+lieutenancy/MS
+lieutenant/SM
+Lieut/M
+lifeblood/SM
+lifeboat/SM
+lifebuoy/S
+lifeforms
+lifeguard/MDSG
+lifelessness/SM
+lifeless/PY
+lifelikeness/M
+lifelike/P
+lifeline/SM
+lifelong
+life/MZR
+lifer/M
+lifesaver/SM
+lifesaving/S
+lifespan/S
+lifestyle/S
+lifetaking
+lifetime/MS
+lifework/MS
+LIFO
+lifter/M
+lift/GZMRDS
+liftoff/MS
+ligament/MS
+ligand/MS
+ligate/XSDNG
+ligation/M
+ligature/DSGM
+light/ADSCG
+lighted/U
+lightener/M
+lightening/M
+lighten/ZGDRS
+lighter/CM
+lightered
+lightering
+lighters
+lightest
+lightface/SDM
+lightheaded
+lightheartedness/MS
+lighthearted/PY
+lighthouse/MS
+lighting/MS
+lightly
+lightness/MS
+lightning/SMD
+lightproof
+light's
+lightship/SM
+lightweight/S
+ligneous
+lignite/MS
+lignum
+likability/MS
+likableness/MS
+likable/P
+likeability's
+liked/E
+likelihood/MSU
+likely/UPRT
+likeness/MSU
+liken/GSD
+liker/E
+liker's
+likes/E
+likest
+like/USPBY
+likewise
+liking/SM
+lilac/MS
+Lilah/M
+Lila/SM
+Lilia/MS
+Liliana/M
+Liliane/M
+Lilian/M
+Lilith/M
+Liliuokalani/M
+Lilla/M
+Lille/M
+Lillian/M
+Lillie/M
+Lilli/MS
+lilliputian/S
+Lilliputian/SM
+Lilliput/M
+Lilllie/M
+Lilly/M
+Lil/MY
+Lilongwe/M
+lilting/YP
+lilt/MDSG
+Lilyan/M
+Lily/M
+lily/MSD
+Lima/M
+Limbaugh/M
+limbered/U
+limberness/SM
+limber/RDYTGP
+limbers/U
+limbic
+limbless
+Limbo
+limbo/GDMS
+limb/SGZRDM
+Limburger/SM
+limeade/SM
+lime/DSMG
+limekiln/M
+limelight/DMGS
+limerick/SM
+limestone/SM
+limitability
+limitably
+limitation/MCS
+limit/CSZGRD
+limitedly/U
+limitedness/M
+limited/PSY
+limiter/M
+limiting/S
+limitlessness/SM
+limitless/PY
+limit's
+limn/GSD
+Limoges/M
+limo/S
+limousine/SM
+limper/M
+limpet/SM
+limpidity/MS
+limpidness/SM
+limpid/YP
+limpness/MS
+Limpopo/M
+limp/SGTPYRD
+Li/MY
+limy/TR
+linage/MS
+Lina/M
+linchpin/MS
+Linc/M
+Lincoln/SM
+Linda/M
+Lindbergh/M
+Lindberg/M
+linden/MS
+Lindholm/M
+Lindie/M
+Lindi/M
+Lind/M
+Lindon/M
+Lindquist/M
+Lindsay/M
+Lindsey/M
+Lindstrom/M
+Lindsy/M
+Lindy/M
+line/AGDS
+lineage/SM
+lineal/Y
+Linea/M
+lineament/MS
+linearity/MS
+linearize/SDGNB
+linear/Y
+linebacker/SM
+lined/U
+linefeed
+Linell/M
+lineman/M
+linemen
+linen/SM
+liner/SM
+line's
+linesman/M
+linesmen
+Linet/M
+Linette/M
+lineup/S
+lingerer/M
+lingerie/SM
+lingering/Y
+linger/ZGJRD
+lingoes
+lingo/M
+lingual/SY
+lingua/M
+linguine
+linguini's
+linguistically
+linguistic/S
+linguistics/M
+linguist/SM
+ling/ZR
+liniment/MS
+lining/SM
+linkable
+linkage/SM
+linked/A
+linker/S
+linking/S
+Link/M
+link's
+linkup/S
+link/USGD
+Lin/M
+Linnaeus/M
+Linnea/M
+Linnell/M
+Linnet/M
+linnet/SM
+Linnie/M
+Linn/M
+Linoel/M
+linoleum/SM
+lino/M
+Linotype/M
+linseed/SM
+lintel/SM
+linter/M
+Linton/M
+lint/SMR
+linty/RST
+Linus/M
+Linux/M
+Linwood/M
+Linzy/M
+Lionello/M
+Lionel/M
+lioness/SM
+lionhearted
+lionization/SM
+lionizer/M
+lionize/ZRSDG
+Lion/M
+lion/MS
+lipase/M
+lipid/MS
+lip/MS
+liposuction/S
+lipped
+lipper
+Lippi/M
+lipping
+Lippmann/M
+lippy/TR
+lipread/GSRJ
+Lipschitz/M
+Lipscomb/M
+lipstick/MDSG
+Lipton/M
+liq
+liquefaction/SM
+liquefier/M
+liquefy/DRSGZ
+liqueur/DMSG
+liquidate/GNXSD
+liquidation/M
+liquidator/SM
+liquidity/SM
+liquidizer/M
+liquidize/ZGSRD
+liquidness/M
+liquid/SPMY
+liquorice/SM
+liquorish
+liquor/SDMG
+lira/M
+Lira/M
+lire
+Lisabeth/M
+Lisa/M
+Lisbeth/M
+Lisbon/M
+Lise/M
+Lisetta/M
+Lisette/M
+Lisha/M
+Lishe/M
+Lisle/M
+lisle/SM
+lisper/M
+lisp/MRDGZS
+Lissajous/M
+Lissa/M
+Lissie/M
+Lissi/M
+Liss/M
+lissomeness/M
+lissome/P
+lissomness/M
+Lissy/M
+listed/U
+listener/M
+listen/ZGRD
+Listerine/M
+lister/M
+Lister/M
+listing/M
+list/JMRDNGZXS
+listlessness/SM
+listless/PY
+Liston/M
+Liszt/M
+Lita/M
+litany/MS
+litchi/SM
+literacy/MS
+literalism/M
+literalistic
+literalness/MS
+literal/PYS
+literariness/SM
+literary/P
+literate/YNSP
+literati
+literation/M
+literature/SM
+liter/M
+lite/S
+litheness/SM
+lithe/PRTY
+lithesome
+lithium/SM
+lithograph/DRMGZ
+lithographer/M
+lithographic
+lithographically
+lithographs
+lithography/MS
+lithology/M
+lithosphere/MS
+lithospheric
+Lithuania/M
+Lithuanian/S
+litigant/MS
+litigate/NGXDS
+litigation/M
+litigator/SM
+litigiousness/MS
+litigious/PY
+litmus/SM
+litotes/M
+lit/RZS
+littrateur/S
+litterbug/SM
+litter/SZGRDM
+Little/M
+littleneck/M
+littleness/SM
+little/RSPT
+Littleton/M
+Litton/M
+littoral/S
+liturgical/Y
+liturgic/S
+liturgics/M
+liturgist/MS
+liturgy/SM
+Liuka/M
+livability/MS
+livableness/M
+livable/U
+livably
+Liva/M
+lived/A
+livelihood/SM
+liveliness/SM
+livelong/S
+lively/RTP
+liveness/M
+liven/SDG
+liver/CSGD
+liveried
+liverish
+Livermore/M
+Liverpool/M
+Liverpudlian/MS
+liver's
+liverwort/SM
+liverwurst/SM
+livery/CMS
+liveryman/MC
+liverymen/C
+lives/A
+lives's
+livestock/SM
+live/YHZTGJDSRPB
+Livia/M
+lividness/M
+livid/YP
+livingness/M
+Livingstone/M
+Livingston/M
+living/YP
+Liv/M
+Livonia/M
+Livvie/M
+Livvy/M
+Livvyy/M
+Livy/M
+Lizabeth/M
+Liza/M
+lizard/MS
+Lizbeth/M
+Lizette/M
+Liz/M
+Lizzie/M
+Lizzy/M
+l/JGVXT
+Ljubljana/M
+LL
+llama/SM
+llano/SM
+LLB
+ll/C
+LLD
+Llewellyn/M
+Lloyd/M
+Llywellyn/M
+LNG
+lo
+loadable
+loaded/A
+loader/MU
+loading/MS
+load's/A
+loads/A
+loadstar's
+loadstone's
+load/SURDZG
+loafer/M
+Loafer/S
+loaf/SRDMGZ
+loam/SMDG
+loamy/RT
+loaner/M
+loaning/M
+loan/SGZRDMB
+loansharking/S
+loanword/S
+loathe
+loather/M
+loathing/M
+loath/JPSRDYZG
+loathness/M
+loathsomeness/MS
+loathsome/PY
+loaves/M
+Lobachevsky/M
+lobar
+lobbed
+lobber/MS
+lobbing
+lobby/GSDM
+lobbyist/MS
+lobe/SM
+lob/MDSG
+lobotomist
+lobotomize/GDS
+lobotomy/MS
+lobster/MDGS
+lobularity
+lobular/Y
+lobule/SM
+locale/MS
+localisms
+locality/MS
+localization/MS
+localized/U
+localizer/M
+localizes/U
+localize/ZGDRS
+local/SGDY
+locatable
+locate/AXESDGN
+locater/M
+locational/Y
+location/EMA
+locative/S
+locator's
+Lochinvar/M
+loch/M
+lochs
+loci/M
+lockable
+Lockean/M
+locked/A
+Locke/M
+locker/SM
+locket/SM
+Lockhart/M
+Lockheed/M
+Lockian/M
+locking/S
+lockjaw/SM
+Lock/M
+locknut/M
+lockout/MS
+lock's
+locksmithing/M
+locksmith/MG
+locksmiths
+lockstep/S
+lock/UGSD
+lockup/MS
+Lockwood/M
+locomotion/SM
+locomotive/YMS
+locomotor
+locomotory
+loco/SDMG
+locoweed/MS
+locus/M
+locust/SM
+locution/MS
+lode/SM
+lodestar/MS
+lodestone/MS
+lodged/E
+lodge/GMZSRDJ
+Lodge/M
+lodgepole
+lodger/M
+lodges/E
+lodging/M
+lodgment/M
+Lodovico/M
+Lodowick/M
+Lodz
+Loeb/M
+Loella/M
+Loewe/M
+Loewi/M
+lofter/M
+loftily
+loftiness/SM
+loft/SGMRD
+lofty/PTR
+loganberry/SM
+Logan/M
+logarithmic
+logarithmically
+logarithm/MS
+logbook/MS
+loge/SMNX
+logged/U
+loggerhead/SM
+logger/SM
+loggia/SM
+logging/MS
+logicality/MS
+logicalness/M
+logical/SPY
+logician/SM
+logic/SM
+login/S
+logion/M
+logistical/Y
+logistic/MS
+logjam/SM
+LOGO
+logo/SM
+logotype/MS
+logout
+logrolling/SM
+log's/K
+log/SM
+logy/RT
+Lohengrin/M
+loincloth/M
+loincloths
+loin/SM
+Loire/M
+Loise/M
+Lois/M
+loiterer/M
+loiter/RDJSZG
+Loki/M
+Lola/M
+Loleta/M
+Lolita/M
+loller/M
+lollipop/MS
+loll/RDGS
+Lolly/M
+lolly/SM
+Lombardi/M
+Lombard/M
+Lombardy/M
+Lomb/M
+Lome
+Lona/M
+Londonderry/M
+Londoner/M
+London/RMZ
+Lonee/M
+loneliness/SM
+lonely/TRP
+loneness/M
+lone/PYZR
+loner/M
+lonesomeness/MS
+lonesome/PSY
+longboat/MS
+longbow/SM
+longed/K
+longeing
+longer/K
+longevity/MS
+Longfellow/M
+longhair/SM
+longhand/SM
+longhorn/SM
+longing/MY
+longish
+longitude/MS
+longitudinal/Y
+long/JGTYRDPS
+Long/M
+longness/M
+longshoreman/M
+longshoremen
+longsighted
+longs/K
+longstanding
+Longstreet/M
+longsword
+longterm
+longtime
+Longueuil/M
+longueur/SM
+longways
+longword/SM
+Loni/M
+Lon/M
+Lonna/M
+Lonnard/M
+Lonnie/M
+Lonni/M
+Lonny/M
+loofah/M
+loofahs
+lookahead
+lookalike/S
+looker/M
+look/GZRDS
+lookout/MS
+lookup/SM
+looming/M
+Loomis/M
+loom/MDGS
+loon/MS
+loony/SRT
+looper/M
+loophole/MGSD
+loop/MRDGS
+loopy/TR
+loosed/U
+looseleaf
+loosener/M
+looseness/MS
+loosen/UDGS
+loose/SRDPGTY
+looses/U
+loosing/M
+looter/M
+loot/MRDGZS
+loper/M
+lope/S
+Lopez/M
+lopped
+lopper/MS
+lopping
+lop/SDRG
+lopsidedness/SM
+lopsided/YP
+loquaciousness/MS
+loquacious/YP
+loquacity/SM
+Loraine/M
+Lorain/M
+Loralee/M
+Loralie/M
+Loralyn/M
+Lora/M
+Lorant/M
+lording/M
+lordliness/SM
+lordly/PTR
+Lord/MS
+lord/MYDGS
+lordship/SM
+Lordship/SM
+Loree/M
+Loreen/M
+Lorelei/M
+Lorelle/M
+lore/MS
+Lorena/M
+Lorene/M
+Loren/SM
+Lorentzian/M
+Lorentz/M
+Lorenza/M
+Lorenz/M
+Lorenzo/M
+Loretta/M
+Lorette/M
+lorgnette/SM
+Loria/M
+Lorianna/M
+Lorianne/M
+Lorie/M
+Lorilee/M
+Lorilyn/M
+Lori/M
+Lorinda/M
+Lorine/M
+Lorin/M
+loris/SM
+Lorita/M
+lorn
+Lorna/M
+Lorne/M
+Lorraine/M
+Lorrayne/M
+Lorre/M
+Lorrie/M
+Lorri/M
+Lorrin/M
+lorryload/S
+Lorry/M
+lorry/SM
+Lory/M
+Los
+loser/M
+lose/ZGJBSR
+lossage
+lossless
+loss/SM
+lossy/RT
+lost/P
+Lothaire/M
+Lothario/MS
+lotion/MS
+Lot/M
+lot/MS
+Lotta/M
+lotted
+Lotte/M
+lotter
+lottery/MS
+Lottie/M
+Lotti/M
+lotting
+Lott/M
+lotto/MS
+Lotty/M
+lotus/SM
+louden/DG
+loudhailer/S
+loudly/RT
+loudmouth/DM
+loudmouths
+loudness/MS
+loudspeaker/SM
+loudspeaking
+loud/YRNPT
+Louella/M
+Louie/M
+Louisa/M
+Louise/M
+Louisette/M
+Louisiana/M
+Louisianan/S
+Louisianian/S
+Louis/M
+Louisville/M
+Lou/M
+lounger/M
+lounge/SRDZG
+Lourdes/M
+lour/GSD
+louse/CSDG
+louse's
+lousewort/M
+lousily
+lousiness/MS
+lousy/PRT
+loutishness/M
+loutish/YP
+Loutitia/M
+lout/SGMD
+louver/DMS
+L'Ouverture
+Louvre/M
+lovableness/MS
+lovable/U
+lovably
+lovebird/SM
+lovechild
+Lovecraft/M
+love/DSRMYZGJB
+loved/U
+Lovejoy/M
+Lovelace/M
+Loveland/M
+lovelessness/M
+loveless/YP
+lovelies
+lovelinesses
+loveliness/UM
+Lovell/M
+lovelornness/M
+lovelorn/P
+lovely/URPT
+Love/M
+lovemaking/SM
+lover/YMG
+lovesick
+lovestruck
+lovingly
+lovingness/M
+loving/U
+lowborn
+lowboy/SM
+lowbrow/MS
+lowdown/S
+Lowell/M
+Lowe/M
+lowercase/GSD
+lower/DG
+lowermost
+Lowery/M
+lowish
+lowland/RMZS
+Lowlands/M
+lowlife/SM
+lowlight/MS
+lowliness/MS
+lowly/PTR
+lowness/MS
+low/PDRYSZTG
+Lowrance/M
+lox/MDSG
+loyaler
+loyalest
+loyal/EY
+loyalism/SM
+loyalist/SM
+loyalty/EMS
+Loyang/M
+Loydie/M
+Loyd/M
+Loy/M
+Loyola/M
+lozenge/SDM
+LP
+LPG
+LPN/S
+Lr
+ls
+l's
+L's
+LSD
+ltd
+Ltd/M
+Lt/M
+Luanda/M
+Luann/M
+luau/MS
+lubber/YMS
+Lubbock/M
+lube/DSMG
+lubricant/SM
+lubricate/VNGSDX
+lubrication/M
+lubricator/MS
+lubricious/Y
+lubricity/SM
+Lubumbashi/M
+Lucais/M
+Luca/MS
+Luce/M
+lucent/Y
+Lucerne/M
+Lucho/M
+Lucia/MS
+Luciana/M
+Lucian/M
+Luciano/M
+lucidity/MS
+lucidness/MS
+lucid/YP
+Lucie/M
+Lucien/M
+Lucienne/M
+Lucifer/M
+Lucila/M
+Lucile/M
+Lucilia/M
+Lucille/M
+Luci/MN
+Lucina/M
+Lucinda/M
+Lucine/M
+Lucio/M
+Lucita/M
+Lucite/MS
+Lucius/M
+luck/GSDM
+luckier/U
+luckily/U
+luckiness/UMS
+luckless
+Lucknow/M
+Lucky/M
+lucky/RSPT
+lucrativeness/SM
+lucrative/YP
+lucre/MS
+Lucretia/M
+Lucretius/M
+lucubrate/GNSDX
+lucubration/M
+Lucy/M
+Luddite/SM
+Ludhiana/M
+ludicrousness/SM
+ludicrous/PY
+Ludlow/M
+Ludmilla/M
+ludo/M
+Ludovico/M
+Ludovika/M
+Ludvig/M
+Ludwig/M
+Luella/M
+Luelle/M
+luff/GSDM
+Lufthansa/M
+Luftwaffe/M
+luge/MC
+Luger/M
+luggage/SM
+lugged
+lugger/SM
+lugging
+Lugosi/M
+lug/RS
+lugsail/SM
+lugubriousness/MS
+lugubrious/YP
+Luigi/M
+Luisa/M
+Luise/M
+Luis/M
+Lukas/M
+Luke/M
+lukewarmness/SM
+lukewarm/PY
+Lula/M
+Lulita/M
+lullaby/GMSD
+lull/SDG
+lulu/M
+Lulu/M
+Lu/M
+lumbago/SM
+lumbar/S
+lumberer/M
+lumbering/M
+lumberjack/MS
+lumberman/M
+lumbermen
+lumber/RDMGZSJ
+lumberyard/MS
+lumen/M
+Lumire/M
+luminance/M
+luminary/MS
+luminescence/SM
+luminescent
+luminosity/MS
+luminousness/M
+luminous/YP
+lummox/MS
+lumper/M
+lumpiness/MS
+lumpishness/M
+lumpish/YP
+lump/SGMRDN
+lumpy/TPR
+lunacy/MS
+Luna/M
+lunar/S
+lunary
+lunate/YND
+lunatic/S
+lunation/M
+luncheonette/SM
+luncheon/SMDG
+luncher/M
+lunch/GMRSD
+lunchpack
+lunchroom/MS
+lunchtime/MS
+Lundberg/M
+Lund/M
+Lundquist/M
+lune/M
+lunge/MS
+lunger/M
+lungfish/SM
+lungful
+lung/SGRDM
+lunkhead/SM
+Lupe/M
+lupine/SM
+Lupus/M
+lupus/SM
+Lura/M
+lurcher/M
+lurch/RSDG
+lure/DSRG
+lurer/M
+Lurette/M
+lurex
+Luria/M
+luridness/SM
+lurid/YP
+lurker/M
+lurk/GZSRD
+Lurleen/M
+Lurlene/M
+Lurline/M
+Lusaka/M
+Lusa/M
+lusciousness/MS
+luscious/PY
+lushness/MS
+lush/YSRDGTP
+Lusitania/M
+luster/GDM
+lustering/M
+lusterless
+lustfulness/M
+lustful/PY
+lustily
+lustiness/MS
+lust/MRDGZS
+lustrousness/M
+lustrous/PY
+lusty/PRT
+lutanist/MS
+lute/DSMG
+lutenist/MS
+Lutero/M
+lutetium/MS
+Lutheranism/MS
+Lutheran/SM
+Luther/M
+luting/M
+Lutz
+Luxembourgian
+Luxembourg/RMZ
+Luxemburg's
+luxe/MS
+luxuriance/MS
+luxuriant/Y
+luxuriate/GNSDX
+luxuriation/M
+luxuriousness/SM
+luxurious/PY
+luxury/MS
+Luz/M
+Luzon/M
+L'vov
+Lyallpur/M
+lyceum/MS
+lychee's
+lycopodium/M
+Lycra/S
+Lycurgus/M
+Lyda/M
+Lydia/M
+Lydian/S
+Lydie/M
+Lydon/M
+lye/JSMG
+Lyell/M
+lying/Y
+Lyle/M
+Lyly/M
+Lyman/M
+Lyme/M
+lymphatic/S
+lymph/M
+lymphocyte/SM
+lymphoid
+lymphoma/MS
+lymphs
+Ly/MY
+Lynchburg/M
+lyncher/M
+lynching/M
+Lynch/M
+lynch/ZGRSDJ
+Lynda/M
+Lyndell/M
+Lyndel/M
+Lynde/M
+Lyndon/M
+Lyndsay/M
+Lyndsey/M
+Lyndsie/M
+Lyndy/M
+Lynea/M
+Lynelle/M
+Lynette/M
+Lynett/M
+Lyn/M
+Lynna/M
+Lynnea/M
+Lynnelle/M
+Lynnell/M
+Lynne/M
+Lynnet/M
+Lynnette/M
+Lynnett/M
+Lynn/M
+Lynsey/M
+lynx/MS
+Lyon/SM
+Lyra/M
+lyrebird/MS
+lyre/SM
+lyricalness/M
+lyrical/YP
+lyricism/SM
+lyricist/SM
+lyric/S
+Lysenko/M
+lysine/M
+Lysistrata/M
+Lysol/M
+Lyssa/M
+LyX/M
+MA
+Maalox/M
+ma'am
+Mabelle/M
+Mabel/M
+Mable/M
+Mab/M
+macabre/Y
+macadamize/SDG
+macadam/SM
+Macao/M
+macaque/SM
+macaroni/SM
+macaroon/MS
+Macarthur/M
+MacArthur/M
+Macaulay/M
+macaw/SM
+Macbeth/M
+Maccabees/M
+Maccabeus/M
+Macdonald/M
+MacDonald/M
+MacDraw/M
+Macedonia/M
+Macedonian/S
+Macedon/M
+mace/MS
+Mace/MS
+macerate/DSXNG
+maceration/M
+macer/M
+Macgregor/M
+MacGregor/M
+machete/SM
+Machiavellian/S
+Machiavelli/M
+machinate/SDXNG
+machination/M
+machinelike
+machine/MGSDB
+machinery/SM
+machinist/MS
+machismo/SM
+Mach/M
+macho/S
+Machs
+Macias/M
+Macintosh/M
+MacIntosh/M
+macintosh's
+Mackenzie/M
+MacKenzie/M
+mackerel/SM
+Mackinac/M
+Mackinaw
+mackinaw/SM
+mackintosh/SM
+mack/M
+Mack/M
+MacLeish/M
+Macmillan/M
+MacMillan/M
+Macon/SM
+MacPaint/M
+macram/S
+macrobiotic/S
+macrobiotics/M
+macrocosm/MS
+macrodynamic
+macroeconomic/S
+macroeconomics/M
+macromolecular
+macromolecule/SM
+macron/MS
+macrophage/SM
+macroscopic
+macroscopically
+macrosimulation
+macro/SM
+macrosocioeconomic
+Mac/SGMD
+mac/SGMDR
+Macy/M
+Madagascan/SM
+Madagascar/M
+Madalena/M
+Madalyn/M
+Mada/M
+madame/M
+Madame/MS
+madam/SM
+madcap/S
+Maddalena/M
+madded
+madden/GSD
+maddening/Y
+Madden/M
+madder/MS
+maddest
+Maddie/M
+Maddi/M
+madding
+Maddox/M
+Maddy/M
+made/AU
+Madeira/SM
+Madelaine/M
+Madeleine/M
+Madelena/M
+Madelene/M
+Madelina/M
+Madeline/M
+Madelin/M
+Madella/M
+Madelle/M
+Madel/M
+Madelon/M
+Madelyn/M
+mademoiselle/MS
+Madge/M
+madhouse/SM
+Madhya/M
+Madison/M
+Madlen/M
+Madlin/M
+madman/M
+madmen
+madness/SM
+Madonna/MS
+mad/PSY
+Madras
+madras/SM
+Madrid/M
+madrigal/MSG
+Madsen/M
+Madurai/M
+madwoman/M
+madwomen
+Mady/M
+Maegan/M
+Maelstrom/M
+maelstrom/SM
+Mae/M
+maestro/MS
+Maeterlinck/M
+Mafia/MS
+mafia/S
+mafiosi
+mafioso/M
+Mafioso/S
+MAG
+magazine/DSMG
+Magdaia/M
+Magdalena/M
+Magdalene/M
+Magdalen/M
+Magda/M
+Magellanic
+Magellan/M
+magenta/MS
+magged
+Maggee/M
+Maggie/M
+Maggi/M
+magging
+maggot/MS
+maggoty/RT
+Maggy/M
+magi
+magical/Y
+magician/MS
+magicked
+magicking
+magic/SM
+Magill/M
+Magi/M
+Maginot/M
+magisterial/Y
+magistracy/MS
+magistrate/MS
+Mag/M
+magma/SM
+magnanimity/SM
+magnanimosity
+magnanimous/PY
+magnate/SM
+magnesia/MS
+magnesite/M
+magnesium/SM
+magnetically
+magnetic/S
+magnetics/M
+magnetism/SM
+magnetite/SM
+magnetizable
+magnetization/ASCM
+magnetize/CGDS
+magnetized/U
+magnetodynamics
+magnetohydrodynamical
+magnetohydrodynamics/M
+magnetometer/MS
+magneto/MS
+magnetosphere/M
+magnetron/M
+magnet/SM
+magnification/M
+magnificence/SM
+magnificent/Y
+magnified/U
+magnify/DRSGNXZ
+magniloquence/MS
+magniloquent
+Magnitogorsk/M
+magnitude/SM
+magnolia/SM
+Magnum
+magnum/SM
+Magnuson/M
+Magog/M
+Magoo/M
+magpie/SM
+Magritte/M
+Magruder/M
+mag/S
+Magsaysay/M
+Maguire/SM
+Magus/M
+Magyar/MS
+Mahabharata
+Mahala/M
+Mahalia/M
+maharajah/M
+maharajahs
+maharanee's
+maharani/MS
+Maharashtra/M
+maharishi/SM
+mahatma/SM
+Mahavira/M
+Mahayana/M
+Mahayanist
+Mahdi/M
+Mahfouz/M
+Mahican/SM
+mahjong's
+Mahler/M
+Mahmoud/M
+Mahmud/M
+mahogany/MS
+Mahomet's
+mahout/SM
+Maia/M
+Maible/M
+maidenhair/MS
+maidenhead/SM
+maidenhood/SM
+maidenly/P
+maiden/YM
+maidservant/MS
+maid/SMNX
+maier
+Maier/M
+Maiga/M
+Maighdiln/M
+Maigret/M
+mailbag/MS
+mailbox/MS
+mail/BSJGZMRD
+mailer/M
+Mailer/M
+Maillol/M
+maillot/SM
+mailman/M
+mailmen
+Maiman/M
+maimedness/M
+maimed/P
+maimer/M
+Maimonides/M
+Mai/MR
+maim/SGZRD
+mainbrace/M
+Maine/MZR
+Mainer/M
+mainframe/MS
+mainlander/M
+mainland/SRMZ
+mainliner/M
+mainline/RSDZG
+mainly
+mainmast/SM
+main/SA
+mainsail/SM
+mains/M
+mainspring/SM
+mainstay/MS
+mainstream/DRMSG
+maintainability
+maintainable/U
+maintain/BRDZGS
+maintained/U
+maintainer/M
+maintenance/SM
+maintop/SM
+maiolica's
+Maire/M
+Mair/M
+Maisey/M
+Maisie/M
+maisonette/MS
+Maison/M
+Maitilde/M
+maize/MS
+Maj
+Maje/M
+majestic
+majestically
+majesty/MS
+Majesty/MS
+majolica/SM
+Majorca/M
+major/DMGS
+majordomo/S
+majorette/SM
+majority/SM
+Major/M
+Majuro/M
+makable
+Makarios/M
+makefile/S
+makeover/S
+Maker/M
+maker/SM
+makeshift/S
+make/UGSA
+makeup/MS
+making/SM
+Malabar/M
+Malabo/M
+Malacca/M
+Malachi/M
+malachite/SM
+maladapt/DV
+maladjust/DLV
+maladjustment/MS
+maladministration
+maladroitness/MS
+maladroit/YP
+malady/MS
+Malagasy/M
+malaise/SM
+Mala/M
+Malamud/M
+malamute/SM
+Malanie/M
+malaprop
+malapropism/SM
+Malaprop/M
+malarial
+malaria/MS
+malarious
+malarkey/SM
+malathion/S
+Malawian/S
+Malawi/M
+Malayalam/M
+Malaya/M
+Malayan/MS
+Malaysia/M
+Malaysian/S
+Malay/SM
+Malchy/M
+Malcolm/M
+malcontentedness/M
+malcontented/PY
+malcontent/SMD
+Maldive/SM
+Maldivian/S
+Maldonado/M
+maledict
+malediction/MS
+malefaction/MS
+malefactor/MS
+malefic
+maleficence/MS
+maleficent
+Male/M
+Malena/M
+maleness/MS
+male/PSM
+malevolence/S
+malevolencies
+malevolent/Y
+malfeasance/SM
+malfeasant
+malformation/MS
+malformed
+malfunction/SDG
+Malia/M
+Malian/S
+Malibu/M
+malice/MGSD
+maliciousness/MS
+malicious/YU
+malignancy/SM
+malignant/YS
+malign/GSRDYZ
+malignity/MS
+Mali/M
+Malina/M
+Malinda/M
+Malinde/M
+malingerer/M
+malinger/GZRDS
+Malinowski/M
+Malissa/M
+Malissia/M
+mallard/SM
+Mallarm/M
+malleability/SM
+malleableness/M
+malleable/P
+mallet/MS
+Mallissa/M
+Mallorie/M
+Mallory/M
+mallow/MS
+mall/SGMD
+Mal/M
+malnourished
+malnutrition/SM
+malocclusion/MS
+malodorous
+Malone/M
+Malorie/M
+Malory/M
+malposed
+malpractice/SM
+Malraux/M
+Malta/M
+malted/S
+Maltese
+Malthusian/S
+Malthus/M
+malting/M
+maltose/SM
+maltreat/GDSL
+maltreatment/S
+malt/SGMD
+malty/RT
+Malva/M
+Malvina/M
+Malvin/M
+Malynda/M
+mama/SM
+mamba/SM
+mambo/GSDM
+Mame/M
+Mamet/M
+ma/MH
+Mamie/M
+mammalian/SM
+mammal/SM
+mammary
+mamma's
+mammogram/S
+mammography/S
+Mammon's
+mammon/SM
+mammoth/M
+mammoths
+mammy/SM
+Mamore/M
+manacle/SDMG
+manageability/S
+manageableness
+manageable/U
+managed/U
+management/SM
+manageress/M
+managerial/Y
+manager/M
+managership/M
+manage/ZLGRSD
+Managua/M
+Manama/M
+maana/M
+mananas
+Manasseh/M
+manatee/SM
+Manaus's
+Manchester/M
+Manchu/MS
+Manchuria/M
+Manchurian/S
+Mancini/M
+manciple/M
+Mancunian/MS
+mandala/SM
+Mandalay/M
+Manda/M
+mandamus/GMSD
+Mandarin
+mandarin/MS
+mandate/SDMG
+mandatory/S
+Mandela
+Mandelbrot/M
+Mandel/M
+mandible/MS
+mandibular
+Mandie/M
+Mandi/M
+Mandingo/M
+mandolin/MS
+mandrake/MS
+mandrel/SM
+mandrill/SM
+Mandy/M
+mange/GSD
+mane/MDS
+Manet/M
+maneuverability/MS
+maneuverer/M
+maneuver/MRDSGB
+Manfred/M
+manful/Y
+manganese/MS
+mange/GMSRDZ
+manger/M
+manginess/S
+mangler/M
+mangle/RSDG
+mangoes
+mango/M
+mangrove/MS
+mangy/PRT
+manhandle/GSD
+Manhattan/SM
+manhole/MS
+manhood/MS
+manhunt/SM
+maniacal/Y
+maniac/SM
+mania/SM
+manically
+Manichean/M
+manic/S
+manicure/MGSD
+manicurist/SM
+manifestation/SM
+manifesto/GSDM
+manifest/YDPGS
+manifolder/M
+manifold/GPYRDMS
+manifoldness/M
+manikin/MS
+Manila/MS
+manila/S
+manilla's
+Mani/M
+manioc/SM
+manipulability
+manipulable
+manipulate/SDXBVGN
+manipulative/PM
+manipulator/MS
+manipulatory
+Manitoba/M
+Manitoulin/M
+Manitowoc/M
+mankind/M
+Mankowski/M
+Manley/M
+manlike
+manliness/SM
+manliness's/U
+manly/URPT
+manna/MS
+manned/U
+mannequin/MS
+mannered/U
+mannerism/SM
+mannerist/M
+mannerliness/MU
+mannerly/UP
+manner/SDYM
+Mann/GM
+Mannheim/M
+Mannie/M
+mannikin's
+Manning/M
+manning/U
+mannishness/SM
+mannish/YP
+Manny/M
+Manolo/M
+Mano/M
+manometer/SM
+Manon/M
+manorial
+manor/MS
+manpower/SM
+manqu/M
+man's
+mansard/SM
+manservant/M
+manse/XNM
+Mansfield/M
+mansion/M
+manslaughter/SM
+Man/SM
+Manson/M
+mans/S
+manta/MS
+Mantegna/M
+mantelpiece/MS
+mantel/SM
+mantes
+mantilla/MS
+mantissa/SM
+mantis/SM
+mantle/ESDG
+Mantle/M
+mantle's
+mantling/M
+mantra/MS
+mantrap/SM
+manual/SMY
+Manuela/M
+Manuel/M
+manufacture/JZGDSR
+manufacturer/M
+manumission/MS
+manumit/S
+manumitted
+manumitting
+manure/RSDMZG
+manuscript/MS
+man/USY
+Manville/M
+Manx
+many
+Manya/M
+Maoism/MS
+Maoist/S
+Mao/M
+Maori/SM
+Maplecrest/M
+maple/MS
+mapmaker/S
+mappable
+mapped/UA
+mapper/S
+mapping/MS
+Mapplethorpe/M
+maps/AU
+map/SM
+Maputo/M
+Marabel/M
+marabou/MS
+marabout's
+Maracaibo/M
+maraca/MS
+Mara/M
+maraschino/SM
+Marathi
+marathoner/M
+Marathon/M
+marathon/MRSZ
+Marat/M
+marauder/M
+maraud/ZGRDS
+marbleize/GSD
+marble/JRSDMG
+marbler/M
+marbling/M
+Marceau/M
+Marcela/M
+Marcelia/M
+Marcelino/M
+Marcella/M
+Marcelle/M
+Marcellina/M
+Marcelline/M
+Marcello/M
+Marcellus/M
+Marcel/M
+Marcelo/M
+Marchall/M
+Marchelle/M
+marcher/M
+marchioness/SM
+March/MS
+march/RSDZG
+Marcia/M
+Marciano/M
+Marcie/M
+Marcile/M
+Marcille/M
+Marci/M
+Marc/M
+Marconi/M
+Marco/SM
+Marcotte/M
+Marcus/M
+Marcy/M
+Mardi/SM
+Marduk/M
+Mareah/M
+mare/MS
+Marena/M
+Maren/M
+Maressa/M
+Margalit/M
+Margalo/M
+Marga/M
+Margareta/M
+Margarete/M
+Margaretha/M
+Margarethe/M
+Margaret/M
+Margaretta/M
+Margarette/M
+margarine/MS
+Margarita/M
+margarita/SM
+Margarito/M
+Margaux/M
+Margeaux/M
+Marge/M
+Margery/M
+Marget/M
+Margette/M
+Margie/M
+Margi/M
+marginalia
+marginality
+marginalization
+marginalize/SDG
+marginal/YS
+margin/GSDM
+Margit/M
+Margo/M
+Margot/M
+Margrethe/M
+Margret/M
+Marguerite/M
+Margy/M
+mariachi/SM
+maria/M
+Maria/M
+Mariam/M
+Mariana/SM
+Marian/MS
+Marianna/M
+Marianne/M
+Mariann/M
+Mariano/M
+Maribelle/M
+Maribel/M
+Maribeth/M
+Maricela/M
+Marice/M
+Maridel/M
+Marieann/M
+Mariejeanne/M
+Mariele/M
+Marielle/M
+Mariellen/M
+Mariel/M
+Marie/M
+Marietta/M
+Mariette/M
+Marigold/M
+marigold/MS
+Marijn/M
+Marijo/M
+marijuana/SM
+Marika/M
+Marilee/M
+Marilin/M
+Marillin/M
+Marilyn/M
+marimba/SM
+Mari/MS
+marinade/MGDS
+Marina/M
+marina/MS
+marinara/SM
+marinate/NGXDS
+marination/M
+mariner/M
+Marine/S
+marine/ZRS
+Marin/M
+Marinna/M
+Marino/M
+Mario/M
+marionette/MS
+Marion/M
+Mariquilla/M
+Marisa/M
+Mariska/M
+Marisol/M
+Marissa/M
+Maritain/M
+marital/Y
+Marita/M
+maritime/R
+Maritsa/M
+Maritza/M
+Mariupol/M
+Marius/M
+Mariya/M
+Marja/M
+Marje/M
+Marjie/M
+Marji/M
+Marj/M
+marjoram/SM
+Marjorie/M
+Marjory/M
+Marjy/M
+Markab/M
+markdown/SM
+marked/AU
+markedly
+marker/M
+marketability/SM
+marketable/U
+Marketa/M
+marketeer/S
+marketer/M
+market/GSMRDJBZ
+marketing/M
+marketplace/MS
+mark/GZRDMBSJ
+Markham/M
+marking/M
+Markism/M
+markkaa
+markka/M
+Mark/MS
+Markos
+Markov
+Markovian
+Markovitz/M
+marks/A
+marksman/M
+marksmanship/S
+marksmen
+markup/SM
+Markus/M
+Marla/M
+Marlane/M
+Marlboro/M
+Marlborough/M
+Marleah/M
+Marlee/M
+Marleen/M
+Marlena/M
+Marlene/M
+Marley/M
+Marlie/M
+Marline/M
+marlinespike/SM
+Marlin/M
+marlin/SM
+marl/MDSG
+Marlo/M
+Marlon/M
+Marlowe/M
+Marlow/M
+Marlyn/M
+Marmaduke/M
+marmalade/MS
+Marmara/M
+marmoreal
+marmoset/MS
+marmot/SM
+Marna/M
+Marne/M
+Marney/M
+Marnia/M
+Marnie/M
+Marni/M
+maroon/GRDS
+marquee/MS
+Marquesas/M
+marque/SM
+marquess/MS
+marquetry/SM
+Marquette/M
+Marquez/M
+marquise/M
+marquisette/MS
+Marquis/M
+marquis/SM
+Marquita/M
+Marrakesh/M
+marred/U
+marriageability/SM
+marriageable
+marriage/ASM
+married/US
+Marrilee/M
+marring
+Marriott/M
+Marris/M
+Marrissa/M
+marrowbone/MS
+marrow/GDMS
+marry/SDGA
+mar/S
+Marseillaise/SM
+Marseilles
+Marseille's
+marshal/GMDRSZ
+Marshalled/M
+marshaller
+Marshall/GDM
+Marshalling/M
+marshallings
+Marshal/M
+Marsha/M
+marshiness/M
+marshland/MS
+Marsh/M
+marshmallow/SM
+marsh/MS
+marshy/PRT
+Marsiella/M
+Mar/SMN
+marsupial/MS
+Martainn/M
+Marta/M
+Martelle/M
+Martel/M
+marten/M
+Marten/M
+Martguerita/M
+Martha/M
+Marthe/M
+Marthena/M
+Martial
+martial/Y
+Martian/S
+Martica/M
+Martie/M
+Marti/M
+Martina/M
+martinet/SM
+Martinez/M
+martingale/MS
+martini/MS
+Martinique/M
+Martin/M
+Martino/M
+martin/SM
+Martinson/M
+Martita/M
+mart/MDNGXS
+Mart/MN
+Marty/M
+Martyn/M
+Martynne/M
+martyrdom/SM
+martyr/GDMS
+Marva/M
+marvel/DGS
+Marvell/M
+marvelous/PY
+Marve/M
+Marven/M
+Marvin/M
+Marv/NM
+Marwin/M
+Marxian/S
+Marxism/SM
+Marxist/SM
+Marx/M
+Marya/M
+Maryanna/M
+Maryanne/M
+Maryann/M
+Marybelle/M
+Marybeth/M
+Maryellen/M
+Maryjane/M
+Maryjo/M
+Maryland/MZR
+Marylee/M
+Marylinda/M
+Marylin/M
+Maryl/M
+Marylou/M
+Marylynne/M
+Mary/M
+Maryrose/M
+Marys
+Marysa/M
+marzipan/SM
+Masada/M
+Masai/M
+Masaryk/M
+masc
+Mascagni/M
+mascara/SGMD
+mascot/SM
+masculineness/M
+masculine/PYS
+masculinity/SM
+Masefield/M
+maser/M
+Maseru/M
+MASH
+Masha/M
+Mashhad/M
+mash/JGZMSRD
+m/ASK
+masked/U
+masker/M
+mask/GZSRDMJ
+masks/U
+masochism/MS
+masochistic
+masochistically
+masochist/MS
+masonic
+Masonic
+Masonite/M
+masonry/MS
+mason/SDMG
+Mason/SM
+masquerader/M
+masquerade/RSDGMZ
+masquer/M
+masque/RSMZ
+Massachusetts/M
+massacre/DRSMG
+massager/M
+massage/SRDMG
+Massasoit/M
+Massenet/M
+masseur/MS
+masseuse/SM
+Massey/M
+massif/SM
+Massimiliano/M
+Massimo/M
+massing/R
+massiveness/SM
+massive/YP
+massless
+mas/SRZ
+Mass/S
+mass/VGSD
+mastectomy/MS
+masterclass
+mastered/A
+masterfulness/M
+masterful/YP
+master/JGDYM
+masterliness/M
+masterly/P
+mastermind/GDS
+masterpiece/MS
+mastership/M
+Master/SM
+masterstroke/MS
+masterwork/S
+mastery/MS
+mast/GZSMRD
+masthead/SDMG
+masticate/SDXGN
+mastication/M
+mastic/SM
+mastiff/MS
+mastodon/MS
+mastoid/S
+masturbate/SDNGX
+masturbation/M
+masturbatory
+matador/SM
+Mata/M
+matchable/U
+match/BMRSDZGJ
+matchbook/SM
+matchbox/SM
+matched/UA
+matcher/M
+matches/A
+matchless/Y
+matchlock/MS
+matchmake/GZJR
+matchmaker/M
+matchmaking/M
+matchplay
+match's/A
+matchstick/MS
+matchwood/SM
+mated/U
+mate/IMS
+Matelda/M
+Mateo/M
+materialism/SM
+materialistic
+materialistically
+materialist/SM
+materiality/M
+materialization/SM
+materialize/CDS
+materialized/A
+materializer/SM
+materializes/A
+materializing
+materialness/M
+material/SPYM
+matriel/MS
+mater/M
+maternal/Y
+maternity/MS
+mates/U
+mathematical/Y
+Mathematica/M
+mathematician/SM
+mathematic/S
+mathematics/M
+Mathematik/M
+Mather/M
+Mathe/RM
+Mathew/MS
+Mathewson/M
+Mathian/M
+Mathias
+Mathieu/M
+Mathilda/M
+Mathilde/M
+Mathis
+math/M
+maths
+Matias/M
+Matilda/M
+Matilde/M
+matine/S
+mating/M
+matins/M
+Matisse/SM
+matriarchal
+matriarch/M
+matriarchs
+matriarchy/MS
+matrices
+matricidal
+matricide/MS
+matriculate/XSDGN
+matriculation/M
+matrimonial/Y
+matrimony/SM
+matrix/M
+matron/YMS
+mat/SJGMDR
+Matsumoto/M
+matte/JGMZSRD
+Mattel/M
+Matteo/M
+matter/GDM
+Matterhorn/M
+Matthaeus/M
+Mattheus/M
+Matthew/MS
+Matthias
+Matthieu/M
+Matthiew/M
+Matthus/M
+Mattias/M
+Mattie/M
+Matti/M
+matting/M
+mattins's
+Matt/M
+mattock/MS
+mattress/MS
+matt's
+Matty/M
+maturate/DSNGVX
+maturational
+maturation/M
+matureness/M
+maturer/M
+mature/RSDTPYG
+maturity/MS
+matzo/SHM
+matzot
+Maude/M
+Maudie/M
+maudlin/Y
+Maud/M
+Maugham/M
+Maui/M
+mauler/M
+maul/RDGZS
+maunder/GDS
+Maupassant/M
+Maura/M
+Maureene/M
+Maureen/M
+Maure/M
+Maurene/M
+Mauriac/M
+Maurice/M
+Mauricio/M
+Maurie/M
+Maurine/M
+Maurise/M
+Maurita/M
+Mauritania/M
+Mauritanian/S
+Mauritian/S
+Mauritius/M
+Maurits/M
+Maurizia/M
+Maurizio/M
+Maurois/M
+Mauro/M
+Maury/M
+Mauser/M
+mausoleum/SM
+mauve/SM
+maven/S
+maverick/SMDG
+mavin's
+Mavis/M
+Mavra/M
+mawkishness/SM
+mawkish/PY
+Mawr/M
+maw/SGMD
+max/GDS
+Maxie/M
+maxillae
+maxilla/M
+maxillary/S
+Maxi/M
+maximality
+maximal/SY
+maxima's
+Maximilian/M
+Maximilianus/M
+Maximilien/M
+maximization/SM
+maximizer/M
+maximize/RSDZG
+Maxim/M
+Maximo/M
+maxim/SM
+maximum/MYS
+Maxine/M
+maxi/S
+Max/M
+Maxtor/M
+Maxwellian
+maxwell/M
+Maxwell/M
+Maxy/M
+Maya/MS
+Mayan/S
+Maybelle/M
+maybe/S
+mayday/S
+may/EGS
+Maye/M
+mayer
+Mayer/M
+mayest
+Mayfair/M
+Mayflower/M
+mayflower/SM
+mayfly/MS
+mayhap
+mayhem/MS
+Maynard/M
+Mayne/M
+Maynord/M
+mayn't
+Mayo/M
+mayonnaise/MS
+mayoral
+mayoralty/MS
+mayoress/MS
+Mayor/M
+mayor/MS
+mayorship/M
+mayo/S
+maypole/MS
+Maypole/SM
+Mayra/M
+May/SMR
+mayst
+Mazama/M
+Mazarin/M
+Mazatlan/M
+Mazda/M
+mazedness/SM
+mazed/YP
+maze/MGDSR
+mazurka/SM
+Mazzini/M
+Mb
+MB
+MBA
+Mbabane/M
+Mbini/M
+MC
+McAdam/MS
+McAllister/M
+McBride/M
+McCabe/M
+McCain/M
+McCall/M
+McCarthyism/M
+McCarthy/M
+McCartney/M
+McCarty/M
+McCauley/M
+McClain/M
+McClellan/M
+McClure/M
+McCluskey/M
+McConnell/M
+McCormick/M
+McCoy/SM
+McCracken/M
+McCray/M
+McCullough/M
+McDaniel/M
+McDermott/M
+McDonald/M
+McDonnell/M
+McDougall/M
+McDowell/M
+McElhaney/M
+McEnroe/M
+McFadden/M
+McFarland/M
+McGee/M
+McGill/M
+McGovern/M
+McGowan/M
+McGrath/M
+McGraw/M
+McGregor/M
+McGuffey/M
+McGuire/M
+MCI/M
+McIntosh/M
+McIntyre/M
+McKay/M
+McKee/M
+McKenzie/M
+McKesson/M
+McKinley/M
+McKinney/M
+McKnight/M
+McLanahan/M
+McLaughlin/M
+McLean/M
+McLeod/M
+McLuhan/M
+McMahon/M
+McMartin/M
+McMillan/M
+McNamara/M
+McNaughton/M
+McNeil/M
+McPherson/M
+MD
+Md/M
+mdse
+MDT
+ME
+Meade/M
+Mead/M
+meadowland
+meadowlark/SM
+meadow/MS
+Meadows
+meadowsweet/M
+mead/SM
+Meagan/M
+meagerness/SM
+meager/PY
+Meaghan/M
+meagres
+mealiness/MS
+meal/MDGS
+mealtime/MS
+mealybug/S
+mealymouthed
+mealy/PRST
+meander/JDSG
+meaneing
+meanie/MS
+meaningfulness/SM
+meaningful/YP
+meaninglessness/SM
+meaningless/PY
+meaning/M
+meanness/S
+means/M
+meantime/SM
+meant/U
+meanwhile/S
+Meany/M
+mean/YRGJTPS
+meany's
+Meara/M
+measle/SD
+measles/M
+measly/TR
+measurable/U
+measurably
+measure/BLMGRSD
+measured/Y
+measureless
+measurement/SM
+measurer/M
+measures/A
+measuring/A
+meas/Y
+meataxe
+meatball/MS
+meatiness/MS
+meatless
+meatloaf
+meatloaves
+meat/MS
+meatpacking/S
+meaty/RPT
+Mecca/MS
+mecca/S
+mechanical/YS
+mechanic/MS
+mechanism/SM
+mechanistic
+mechanistically
+mechanist/M
+mechanization/SM
+mechanized/U
+mechanizer/M
+mechanize/RSDZGB
+mechanizes/U
+mechanochemically
+Mechelle/M
+med
+medalist/MS
+medallion/MS
+medal/SGMD
+Medan/M
+meddle/GRSDZ
+meddlesome
+Medea/M
+Medellin
+Medfield/M
+mediaeval's
+medial/AY
+medials
+median/YMS
+media/SM
+mediateness/M
+mediate/PSDYVNGX
+mediation/ASM
+mediator/SM
+Medicaid/SM
+medical/YS
+medicament/MS
+Medicare/MS
+medicate/DSXNGV
+medication/M
+Medici/MS
+medicinal/SY
+medicine/DSMG
+medico/SM
+medic/SM
+medievalist/MS
+medieval/YMS
+Medina/M
+mediocre
+mediocrity/MS
+meditate/NGVXDS
+meditation/M
+meditativeness/M
+meditative/PY
+Mediterranean/MS
+mediumistic
+medium/SM
+medley/SM
+medulla/SM
+Medusa/M
+meed/MS
+meekness/MS
+meek/TPYR
+meerschaum/MS
+meeter/M
+meetinghouse/S
+meeting/M
+meet/JGSYR
+me/G
+mega
+megabit/MS
+megabuck/S
+megabyte/S
+megacycle/MS
+megadeath/M
+megadeaths
+megahertz/M
+megalithic
+megalith/M
+megaliths
+megalomaniac/SM
+megalomania/SM
+megalopolis/SM
+Megan/M
+megaphone/SDGM
+megaton/MS
+megavolt/M
+megawatt/SM
+megaword/S
+Megen/M
+Meggie/M
+Meggi/M
+Meggy/M
+Meghan/M
+Meghann/M
+Meg/MN
+megohm/MS
+Mehetabel/M
+Meier/M
+Meighen/M
+Meiji/M
+Mei/MR
+meioses
+meiosis/M
+meiotic
+Meir/M
+Meister/M
+Meistersinger/M
+Mejia/M
+Mekong/M
+Mela/M
+Melamie/M
+melamine/SM
+melancholia/SM
+melancholic/S
+melancholy/MS
+Melanesia/M
+Melanesian/S
+melange/S
+Melania/M
+Melanie/M
+melanin/MS
+melanoma/SM
+Melantha/M
+Melany/M
+Melba/M
+Melbourne/M
+Melcher/M
+Melchior/M
+meld/SGD
+mle/MS
+Melendez/M
+Melesa/M
+Melessa/M
+Melicent/M
+Melina/M
+Melinda/M
+Melinde/M
+meliorate/XSDVNG
+melioration/M
+Melisa/M
+Melisande/M
+Melisandra/M
+Melisenda/M
+Melisent/M
+Melissa/M
+Melisse/M
+Melita/M
+Melitta/M
+Mella/M
+Mellicent/M
+Mellie/M
+mellifluousness/SM
+mellifluous/YP
+Melli/M
+Mellisa/M
+Mellisent/M
+Melloney/M
+Mellon/M
+mellowness/MS
+mellow/TGRDYPS
+Melly/M
+Mel/MY
+Melodee/M
+melodically
+melodic/S
+Melodie/M
+melodiousness/S
+melodious/YP
+melodrama/SM
+melodramatically
+melodramatic/S
+Melody/M
+melody/MS
+Melonie/M
+melon/MS
+Melony/M
+Melosa/M
+Melpomene/M
+meltdown/S
+melter/M
+melting/Y
+Melton/M
+melt/SAGD
+Melva/M
+Melville/M
+Melvin/M
+Melvyn/M
+Me/M
+member/DMS
+membered/AE
+members/EA
+membership/SM
+membrane/MSD
+membranous
+memento/SM
+Memling/M
+memoir/MS
+memorabilia
+memorability/SM
+memorableness/M
+memorable/P
+memorably
+memorandum/SM
+memorialize/DSG
+memorialized/U
+memorial/SY
+memoriam
+memorization/MS
+memorized/U
+memorizer/M
+memorize/RSDZG
+memorizes/A
+memoryless
+memory/MS
+memo/SM
+Memphis/M
+menace/GSD
+menacing/Y
+menagerie/SM
+menage/S
+Menander/M
+menarche/MS
+Menard/M
+Mencius/M
+Mencken/M
+mendaciousness/M
+mendacious/PY
+mendacity/MS
+Mendeleev/M
+mendelevium/SM
+Mendelian
+Mendel/M
+Mendelssohn/M
+mender/M
+Mendez/M
+mendicancy/MS
+mendicant/S
+Mendie/M
+mending/M
+Mendocino/M
+Mendoza/M
+mend/RDSJGZ
+Mendy/M
+Menelaus/M
+Menes/M
+menfolk/S
+menhaden/M
+menial/YS
+meningeal
+meninges
+meningitides
+meningitis/M
+meninx
+menisci
+meniscus/M
+Menkalinan/M
+Menkar/M
+Menkent/M
+Menlo/M
+men/MS
+Mennonite/SM
+Menominee
+menopausal
+menopause/SM
+menorah/M
+menorahs
+Menotti/M
+Mensa/M
+Mensch/M
+mensch/S
+menservants/M
+mens/SDG
+menstrual
+menstruate/NGDSX
+menstruation/M
+mensurable/P
+mensuration/MS
+menswear/M
+mentalist/MS
+mentality/MS
+mental/Y
+mentholated
+menthol/SM
+mentionable/U
+mentioned/U
+mentioner/M
+mention/ZGBRDS
+mentor/DMSG
+Menuhin/M
+menu/SM
+Menzies/M
+meow/DSG
+Mephistopheles/M
+Merak/M
+Mercado/M
+mercantile
+Mercator/M
+Mercedes
+mercenariness/M
+mercenary/SMP
+mercerize/SDG
+Mercer/M
+mercer/SM
+merchandiser/M
+merchandise/SRDJMZG
+merchantability
+merchantman/M
+merchantmen
+merchant/SBDMG
+Mercie/M
+mercifully/U
+mercifulness/M
+merciful/YP
+mercilessness/SM
+merciless/YP
+Merci/M
+Merck/M
+mercurial/SPY
+mercuric
+Mercurochrome/M
+mercury/MS
+Mercury/MS
+Mercy/M
+mercy/SM
+Meredeth/M
+Meredithe/M
+Meredith/M
+Merell/M
+meretriciousness/SM
+meretricious/YP
+mere/YS
+merganser/MS
+merger/M
+merge/SRDGZ
+Meridel/M
+meridian/MS
+meridional
+Meridith/M
+Meriel/M
+Merilee/M
+Merill/M
+Merilyn/M
+meringue/MS
+merino/MS
+Meris
+Merissa/M
+merited/U
+meritocracy/MS
+meritocratic
+meritocrats
+meritoriousness/MS
+meritorious/PY
+merit/SCGMD
+Meriwether/M
+Merla/M
+Merle/M
+Merlina/M
+Merline/M
+merlin/M
+Merlin/M
+Merl/M
+mermaid/MS
+merman/M
+mermen
+Merna/M
+Merola/M
+meromorphic
+Merralee/M
+Merrel/M
+Merriam/M
+Merrick/M
+Merridie/M
+Merrielle/M
+Merrie/M
+Merrilee/M
+Merrile/M
+Merrili/M
+Merrill/M
+merrily
+Merrily/M
+Merrimack/M
+Merrimac/M
+merriment/MS
+merriness/S
+Merritt/M
+Merry/M
+merrymaker/MS
+merrymaking/SM
+merry/RPT
+Mersey/M
+mer/TGDR
+Merton/M
+Mervin/M
+Merv/M
+Merwin/M
+Merwyn/M
+Meryl/M
+Mesa
+Mesabi/M
+mesa/SM
+mescaline/SM
+mescal/SM
+mesdames/M
+mesdemoiselles/M
+Meshed's
+meshed/U
+mesh/GMSD
+mesmeric
+mesmerism/SM
+mesmerized/U
+mesmerizer/M
+mesmerize/SRDZG
+Mesolithic/M
+mesomorph/M
+mesomorphs
+meson/MS
+Mesopotamia/M
+Mesopotamian/S
+mesosphere/MS
+mesozoic
+Mesozoic
+mesquite/MS
+mes/S
+message/SDMG
+messeigneurs
+messenger/GSMD
+Messerschmidt/M
+mess/GSDM
+Messiaen/M
+messiah
+Messiah/M
+messiahs
+Messiahs
+messianic
+Messianic
+messieurs/M
+messily
+messiness/MS
+messmate/MS
+Messrs/M
+messy/PRT
+mestizo/MS
+meta
+metabolic
+metabolically
+metabolism/MS
+metabolite/SM
+metabolize/GSD
+metacarpal/S
+metacarpi
+metacarpus/M
+metacircular
+metacircularity
+metalanguage/MS
+metalization/SM
+metalized
+metallic/S
+metalliferous
+metallings
+metallography/M
+metalloid/M
+metallurgic
+metallurgical/Y
+metallurgist/S
+metallurgy/MS
+metal/SGMD
+metalsmith/MS
+metalworking/M
+metalwork/RMJGSZ
+Meta/M
+metamathematical
+metamorphic
+metamorphism/SM
+metamorphose/GDS
+metamorphosis/M
+metaphoric
+metaphorical/Y
+metaphor/MS
+metaphosphate/M
+metaphysical/Y
+metaphysic/SM
+metastability/M
+metastable
+metastases
+metastasis/M
+metastasize/DSG
+metastatic
+metatarsal/S
+metatarsi
+metatarsus/M
+metatheses
+metathesis/M
+metathesized
+metathesizes
+metathesizing
+metavariable
+metempsychoses
+metempsychosis/M
+meteoric
+meteorically
+meteorite/SM
+meteoritic/S
+meteoritics/M
+meteoroid/SM
+meteorologic
+meteorological
+meteorologist/S
+meteorology/MS
+meteor/SM
+meter/GDM
+mete/ZDGSR
+methadone/SM
+methane/MS
+methanol/SM
+methinks
+methionine/M
+methodicalness/SM
+methodical/YP
+methodism
+Methodism/SM
+methodist/MS
+Methodist/MS
+method/MS
+methodological/Y
+methodologists
+methodology/MS
+methought
+Methuen/M
+Methuselah/M
+Methuselahs
+methylated
+methylene/M
+methyl/SM
+meticulousness/MS
+meticulous/YP
+mtier/S
+metonymy/M
+Metrecal/M
+metrical/Y
+metricate/SDNGX
+metricize/GSD
+metrics/M
+metric/SM
+metronome/MS
+metropolis/SM
+metropolitanization
+metropolitan/S
+metro/SM
+mets
+Metternich/M
+mettle/SDM
+mettlesome
+met/U
+Metzler/M
+Meuse/M
+mewl/GSD
+mew/SGD
+mews/SM
+Mex
+Mexicali/M
+Mexican/S
+Mexico/M
+Meyerbeer/M
+Meyer/SM
+mezzanine/MS
+mezzo/S
+MFA
+mfg
+mfr/S
+mg
+M/GB
+Mg/M
+MGM/M
+mgr
+Mgr
+MHz
+MI
+MIA
+Mia/M
+Miami/SM
+Miaplacidus/M
+miasmal
+miasma/SM
+Micaela/M
+Micah/M
+mica/MS
+micelles
+mice/M
+Michaela/M
+Michaelangelo/M
+Michaelina/M
+Michaeline/M
+Michaella/M
+Michaelmas/MS
+Michael/SM
+Michaelson/M
+Michail/M
+Michale/M
+Michal/M
+Micheal/M
+Micheil/M
+Michelangelo/M
+Michele/M
+Michelina/M
+Micheline/M
+Michelin/M
+Michelle/M
+Michell/M
+Michel/M
+Michelson/M
+Michigander/S
+Michiganite/S
+Michigan/M
+Mich/M
+Mickelson/M
+Mickey/M
+mickey/SM
+Mickie/M
+Micki/M
+Mick/M
+Micky/M
+Mic/M
+Micmac/M
+micra's
+microamp
+microanalysis/M
+microanalytic
+microbe/MS
+microbial
+microbicidal
+microbicide/M
+microbiological
+microbiologist/MS
+microbiology/SM
+microbrewery/S
+microchemistry/M
+microchip/S
+microcircuit/MS
+microcode/GSD
+microcomputer/MS
+microcosmic
+microcosm/MS
+microdensitometer
+microdot/MS
+microeconomic/S
+microeconomics/M
+microelectronic/S
+microelectronics/M
+microfiber/S
+microfiche/M
+microfilm/DRMSG
+microfossils
+micrography/M
+microgroove/MS
+microhydrodynamics
+microinstruction/SM
+microjoule
+microlevel
+microlight/S
+micromanage/GDSL
+micromanagement/S
+micrometeorite/MS
+micrometeoritic
+micrometer/SM
+Micronesia/M
+Micronesian/S
+micron/MS
+microorganism/SM
+microphone/SGM
+Microport/M
+microprocessing
+microprocessor/SM
+microprogrammed
+microprogramming
+microprogram/SM
+micro/S
+microscope/SM
+microscopic
+microscopical/Y
+microscopy/MS
+microsecond/MS
+microsimulation/S
+Microsystems
+micros/M
+Microsoft/M
+microsomal
+microstore
+microsurgery/SM
+MicroVAXes
+MicroVAX/M
+microvolt/SM
+microwaveable
+microwave/BMGSD
+microword/S
+midair/MS
+midas
+Midas/M
+midband/M
+midday/MS
+midden/SM
+middest
+middlebrow/SM
+Middlebury/M
+middle/GJRSD
+middleman/M
+middlemen
+middlemost
+Middlesex/M
+Middleton/M
+Middletown/M
+middleweight/SM
+middling/Y
+middy/SM
+Mideastern
+Mideast/M
+midfield/RM
+Midge/M
+midge/SM
+midget/MS
+midi/S
+midland/MRS
+Midland/MS
+midlife
+midlives
+midmorn/G
+midmost/S
+midnight/SYM
+midpoint/MS
+midrange
+midrib/MS
+midriff/MS
+mid/S
+midscale
+midsection/M
+midshipman/M
+midshipmen
+midship/S
+midspan
+midstream/MS
+midst/SM
+midsummer/MS
+midterm/MS
+midtown/MS
+Midway/M
+midway/S
+midweek/SYM
+Midwesterner/M
+Midwestern/ZR
+Midwest/M
+midwicket
+midwifery/SM
+midwife/SDMG
+midwinter/YMS
+midwives
+midyear/MS
+mien/M
+miff/GDS
+mightily
+mightiness/MS
+mightn't
+might/S
+mighty/TPR
+mignon
+mignonette/SM
+Mignon/M
+Mignonne/M
+migraine/SM
+migrant/MS
+migrate/ASDG
+migration/MS
+migrative
+migratory/S
+MIG/S
+Miguela/M
+Miguelita/M
+Miguel/M
+mikado/MS
+Mikaela/M
+Mikael/M
+mike/DSMG
+Mikel/M
+Mike/M
+Mikey/M
+Mikhail/M
+Mikkel/M
+Mikol/M
+Mikoyan/M
+milady/MS
+Milagros/M
+Milanese
+Milan/M
+milch/M
+mildew/DMGS
+mildness/MS
+Mildred/M
+Mildrid/M
+mild/STYRNP
+mileage/SM
+Milena/M
+milepost/SM
+miler/M
+mile/SM
+Mile/SM
+milestone/MS
+Milford/M
+Milicent/M
+milieu/SM
+Milissent/M
+militancy/MS
+militantness/M
+militant/YPS
+militarily
+militarism/SM
+militaristic
+militarist/MS
+militarization/SCM
+militarize/SDCG
+military
+militate/SDG
+militiaman/M
+militiamen
+militia/SM
+Milka/M
+Milken/M
+milker/M
+milk/GZSRDM
+milkiness/MS
+milkmaid/SM
+milkman/M
+milkmen
+milkshake/S
+milksop/SM
+milkweed/MS
+milky/RPT
+millage/S
+Millard/M
+Millay/M
+millenarian
+millenarianism/M
+millennial
+millennialism
+millennium/MS
+millepede's
+miller/M
+Miller/M
+Millet/M
+millet/MS
+milliamp
+milliampere/S
+milliard/MS
+millibar/MS
+Millicent/M
+millidegree/S
+Millie/M
+milligram/MS
+millijoule/S
+Millikan/M
+milliliter/MS
+Milli/M
+millimeter/SM
+milliner/SM
+millinery/MS
+milling/M
+millionaire/MS
+million/HDMS
+millionth/M
+millionths
+millipede/SM
+millisecond/MS
+Millisent/M
+millivoltmeter/SM
+millivolt/SM
+milliwatt/S
+millpond/MS
+millrace/SM
+mill/SGZMRD
+Mill/SMR
+millstone/SM
+millstream/SM
+millwright/MS
+Milly/M
+mil/MRSZ
+Mil/MY
+Milne/M
+Milo/M
+Milquetoast/S
+milquetoast/SM
+Miltiades/M
+Miltie/M
+Milt/M
+milt/MDSG
+Miltonic
+Milton/M
+Miltown/M
+Milty/M
+Milwaukee/M
+Milzie/M
+MIMD
+mime/DSRMG
+mimeograph/GMDS
+mimeographs
+mimer/M
+mimesis/M
+mimetic
+mimetically
+mimicked
+mimicker/SM
+mimicking
+mimicry/MS
+mimic/S
+Mimi/M
+mi/MNX
+Mimosa/M
+mimosa/SM
+Mina/M
+minaret/MS
+minatory
+mincemeat/MS
+mincer/M
+mince/SRDGZJ
+mincing/Y
+Minda/M
+Mindanao/M
+mind/ARDSZG
+mindbogglingly
+minded/P
+minder/M
+mindfully
+mindfulness/MS
+mindful/U
+mindlessness/SM
+mindless/YP
+Mindoro/M
+min/DRZGJ
+mind's
+mindset/S
+Mindy/M
+minefield/MS
+mineralization/C
+mineralized/U
+mineralogical
+mineralogist/SM
+mineralogy/MS
+mineral/SM
+miner/M
+Miner/M
+Minerva/M
+mineshaft
+mine/SNX
+minestrone/MS
+minesweeper/MS
+Minetta/M
+Minette/M
+mineworkers
+mingle/SDG
+Ming/M
+Mingus/M
+miniature/GMSD
+miniaturist/SM
+miniaturization/MS
+miniaturize/SDG
+minibike/S
+minibus/SM
+minicab/M
+minicam/MS
+minicomputer/SM
+minidress/SM
+minify/GSD
+minimalism/S
+minimalistic
+minimalist/MS
+minimality
+minimal/SY
+minima's
+minimax/M
+minimization/MS
+minimized/U
+minimizer/M
+minimize/RSDZG
+minim/SM
+minimum/MS
+mining/M
+minion/M
+mini/S
+miniseries
+miniskirt/MS
+ministerial/Y
+minister/MDGS
+ministrant/S
+ministration/SM
+ministry/MS
+minivan/S
+miniver/M
+minke
+mink/SM
+Min/MR
+Minna/M
+Minnaminnie/M
+Minneapolis/M
+Minne/M
+minnesinger/MS
+Minnesota/M
+Minnesotan/S
+Minnie/M
+Minni/M
+Minn/M
+Minnnie/M
+minnow/SM
+Minny/M
+Minoan/S
+Minolta/M
+minor/DMSG
+minority/MS
+Minor/M
+Minos
+Minotaur/M
+minotaur/S
+Minot/M
+minoxidil/S
+Minsk/M
+Minsky/M
+minster/SM
+minstrel/SM
+minstrelsy/MS
+mintage/SM
+Mintaka/M
+Minta/M
+minter/M
+mint/GZSMRD
+minty/RT
+minuend/SM
+minuet/SM
+Minuit/M
+minuscule/SM
+minus/S
+minuteman
+Minuteman/M
+minutemen
+minuteness/SM
+minute/RSDPMTYG
+minutiae
+minutia/M
+minx/MS
+Miocene
+MIPS
+Miquela/M
+Mirabeau/M
+Mirabella/M
+Mirabelle/M
+Mirabel/M
+Mirach/M
+miracle/MS
+miraculousness/M
+miraculous/PY
+mirage/GSDM
+Mira/M
+Miranda/M
+Miran/M
+Mireielle/M
+Mireille/M
+Mirella/M
+Mirelle/M
+mire/MGDS
+Mirfak/M
+Miriam/M
+Mirilla/M
+Mir/M
+Mirna/M
+Miro
+mirror/DMGS
+mirthfulness/SM
+mirthful/PY
+mirthlessness/M
+mirthless/YP
+mirth/M
+mirths
+MIRV/DSG
+miry/RT
+Mirzam/M
+misaddress/SDG
+misadventure/SM
+misalign/DSGL
+misalignment/MS
+misalliance/MS
+misanalysed
+misanthrope/MS
+misanthropic
+misanthropically
+misanthropist/S
+misanthropy/SM
+misapplier/M
+misapply/GNXRSD
+misapprehend/GDS
+misapprehension/MS
+misappropriate/GNXSD
+misbegotten
+misbehaver/M
+misbehave/RSDG
+misbehavior/SM
+misbrand/DSG
+misc
+miscalculate/XGNSD
+miscalculation/M
+miscall/SDG
+miscarriage/MS
+miscarry/SDG
+miscast/GS
+miscegenation/SM
+miscellanea
+miscellaneous/PY
+miscellany/MS
+Mischa/M
+mischance/MGSD
+mischief/MDGS
+mischievousness/MS
+mischievous/PY
+miscibility/S
+miscible/C
+misclassification/M
+misclassified
+misclassifying
+miscode/SDG
+miscommunicate/NDS
+miscomprehended
+misconceive/GDS
+misconception/MS
+misconduct/GSMD
+misconfiguration
+misconstruction/MS
+misconstrue/DSG
+miscopying
+miscount/DGS
+miscreant/MS
+miscue/MGSD
+misdeal/SG
+misdealt
+misdeed/MS
+misdemeanant/SM
+misdemeanor/SM
+misdiagnose/GSD
+misdid
+misdirect/GSD
+misdirection/MS
+misdirector/S
+misdoes
+misdo/JG
+misdone
+miserableness/SM
+miserable/SP
+miserably
+miser/KM
+miserliness/MS
+miserly/P
+misery/MS
+mises/KC
+misfeasance/MS
+misfeature/M
+misfield
+misfile/SDG
+misfire/SDG
+misfit/MS
+misfitted
+misfitting
+misfortune/SM
+misgauge/GDS
+misgiving/MYS
+misgovern/LDGS
+misgovernment/S
+misguidance/SM
+misguidedness/M
+misguided/PY
+misguide/DRSG
+misguider/M
+Misha/M
+mishandle/SDG
+mishap/MS
+mishapped
+mishapping
+misheard
+mishear/GS
+mishitting
+mishmash/SM
+misidentification/M
+misidentify/GNSD
+misinformation/SM
+misinform/GDS
+misinterpretation/MS
+misinterpreter/M
+misinterpret/RDSZG
+misjudge/DSG
+misjudging/Y
+misjudgment/MS
+Miskito
+mislabel/DSG
+mislaid
+mislay/GS
+misleader/M
+mislead/GRJS
+misleading/Y
+misled
+mismanage/LGSD
+mismanagement/MS
+mismatch/GSD
+misname/GSD
+misnomer/GSMD
+misogamist/MS
+misogamy/MS
+misogynistic
+misogynist/MS
+misogynous
+misogyny/MS
+misperceive/SD
+misplace/GLDS
+misplacement/MS
+misplay/GSD
+mispositioned
+misprint/SGDM
+misprision/SM
+mispronounce/DSG
+mispronunciation/MS
+misquotation/MS
+misquote/GDS
+misreader/M
+misread/RSGJ
+misrelated
+misremember/DG
+misreport/DGS
+misrepresentation/MS
+misrepresenter/M
+misrepresent/SDRG
+misroute/DS
+misrule/SDG
+missal/ESM
+misshape/DSG
+misshapenness/SM
+misshapen/PY
+Missie/M
+missile/MS
+missilery/SM
+mission/AMS
+missionary/MS
+missioned
+missioner/SM
+missioning
+missis's
+Mississauga/M
+Mississippian/S
+Mississippi/M
+missive/MS
+Missoula/M
+Missourian/S
+Missouri/M
+misspeak/SG
+misspecification
+misspecified
+misspelling/M
+misspell/SGJD
+misspend/GS
+misspent
+misspoke
+misspoken
+mis/SRZ
+miss/SDEGV
+Miss/SM
+misstate/GLDRS
+misstatement/MS
+misstater/M
+misstep/MS
+misstepped
+misstepping
+missus/SM
+Missy/M
+mistakable/U
+mistake/BMGSR
+mistaken/Y
+mistaker/M
+mistaking/Y
+Mistassini/M
+mister/GDM
+Mister/SM
+mistily
+Misti/M
+mistime/GSD
+mistiness/S
+mistletoe/MS
+mist/MRDGZS
+mistook
+mistral/MS
+mistranslated
+mistranslates
+mistranslating
+mistranslation/SM
+mistreat/DGSL
+mistreatment/SM
+Mistress/MS
+mistress/MSY
+mistrial/SM
+mistruster/M
+mistrustful/Y
+mistrust/SRDG
+Misty/M
+mistype/SDGJ
+misty/PRT
+misunderstander/M
+misunderstanding/M
+misunderstand/JSRZG
+misunderstood
+misuser/M
+misuse/RSDMG
+miswritten
+Mitchael/M
+Mitchell/M
+Mitchel/M
+Mitch/M
+miterer/M
+miter/GRDM
+mite/SRMZ
+Mitford/M
+Mithra/M
+Mithridates/M
+mitigated/U
+mitigate/XNGVDS
+mitigation/M
+MIT/M
+mitoses
+mitosis/M
+mitotic
+MITRE/SM
+Mitsubishi/M
+mitten/M
+Mitterrand/M
+mitt/XSMN
+Mitty/M
+Mitzi/M
+mitzvahs
+mixable
+mix/AGSD
+mixed/U
+mixer/SM
+mixture/SM
+Mizar/M
+mizzenmast/SM
+mizzen/MS
+Mk
+mks
+ml
+Mlle/M
+mm
+MM
+MMe
+Mme/SM
+MN
+mnemonically
+mnemonics/M
+mnemonic/SM
+Mnemosyne/M
+Mn/M
+MO
+moan/GSZRDM
+moat/SMDG
+mobbed
+mobber
+mobbing
+mobcap/SM
+Mobile/M
+mobile/S
+mobility/MS
+mobilizable
+mobilization/AMCS
+mobilize/CGDS
+mobilized/U
+mobilizer/MS
+mobilizes/A
+Mobil/M
+mob/MS
+mobster/MS
+Mobutu/M
+moccasin/SM
+mocha/SM
+mockers/M
+mockery/MS
+mock/GZSRD
+mockingbird/MS
+mocking/Y
+mo/CSK
+modality/MS
+modal/Y
+modeled/A
+modeler/M
+modeling/M
+models/A
+model/ZGSJMRD
+mode/MS
+modem/SM
+moderated/U
+moderateness/SM
+moderate/PNGDSXY
+moderation/M
+moderator/MS
+modernism/MS
+modernistic
+modernist/S
+modernity/SM
+modernization/MS
+modernized/U
+modernizer/M
+modernize/SRDGZ
+modernizes/U
+modernness/SM
+modern/PTRYS
+Modesta/M
+Modestia/M
+Modestine/M
+Modesto/M
+modest/TRY
+Modesty/M
+modesty/MS
+modicum/SM
+modifiability/M
+modifiableness/M
+modifiable/U
+modification/M
+modified/U
+modifier/M
+modify/NGZXRSD
+Modigliani/M
+modishness/MS
+modish/YP
+mod/TSR
+Modula/M
+modularity/SM
+modularization
+modularize/SDG
+modular/SY
+modulate/ADSNCG
+modulation/CMS
+modulator/ACSM
+module/SM
+moduli
+modulo
+modulus/M
+modus
+Moe/M
+Moen/M
+Mogadiscio's
+Mogadishu
+mogul/MS
+Mogul/MS
+mohair/SM
+Mohamed/M
+Mohammad/M
+Mohammedanism/MS
+Mohammedan/SM
+Mohammed's
+Mohandas/M
+Mohandis/M
+Mohawk/MS
+Mohegan/S
+Mohican's
+Moho/M
+Mohorovicic/M
+Mohr/M
+moiety/MS
+moil/SGD
+Moina/M
+Moines/M
+Moira/M
+moire/MS
+Moise/MS
+Moiseyev/M
+Moishe/M
+moistener/M
+moisten/ZGRD
+moistness/MS
+moist/TXPRNY
+moisture/MS
+moisturize/GZDRS
+Mojave/M
+molal
+molarity/SM
+molar/MS
+molasses/MS
+Moldavia/M
+Moldavian/S
+moldboard/SM
+molder/DG
+moldiness/SM
+molding/M
+mold/MRDJSGZ
+Moldova
+moldy/PTR
+molecularity/SM
+molecular/Y
+molecule/MS
+molehill/SM
+mole/MTS
+moleskin/MS
+molestation/SM
+molested/U
+molester/M
+molest/RDZGS
+Moliere
+Molina/M
+Moline/M
+Mollee/M
+Mollie/M
+mollification/M
+mollify/XSDGN
+Molli/M
+Moll/M
+moll/MS
+mollusc's
+mollusk/S
+mollycoddler/M
+mollycoddle/SRDG
+Molly/M
+molly/SM
+Molnar/M
+Moloch/M
+Molokai/M
+Molotov/M
+molter/M
+molt/RDNGZS
+Moluccas
+molybdenite/M
+molybdenum/MS
+Mombasa/M
+momenta
+momentarily
+momentariness/SM
+momentary/P
+moment/MYS
+momentousness/MS
+momentous/YP
+momentum/SM
+momma/S
+Mommy/M
+mommy/SM
+Mo/MN
+mom/SM
+Monaco/M
+monadic
+monad/SM
+Monah/M
+Mona/M
+monarchic
+monarchical
+monarchism/MS
+monarchistic
+monarchist/MS
+monarch/M
+monarchs
+monarchy/MS
+Monash/M
+monastery/MS
+monastical/Y
+monasticism/MS
+monastic/S
+monaural/Y
+Mondale/M
+Monday/MS
+Mondrian/M
+Monegasque/SM
+Monera/M
+monetarily
+monetarism/S
+monetarist/MS
+monetary
+monetization/CMA
+monetize/CGADS
+Monet/M
+moneybag/SM
+moneychangers
+moneyer/M
+moneylender/SM
+moneymaker/MS
+moneymaking/MS
+money/SMRD
+Monfort/M
+monger/SGDM
+Mongolia/M
+Mongolian/S
+Mongolic/M
+mongolism/SM
+mongoloid/S
+Mongoloid/S
+Mongol/SM
+mongoose/SM
+mongrel/SM
+Monica/M
+monies/M
+Monika/M
+moniker/MS
+Monique/M
+monism/MS
+monist/SM
+monition/SM
+monitored/U
+monitor/GSMD
+monitory/S
+monkeyshine/S
+monkey/SMDG
+monkish
+Monk/M
+monk/MS
+monkshood/SM
+Monmouth/M
+monochromatic
+monochromator
+monochrome/MS
+monocle/SDM
+monoclinic
+monoclonal/S
+monocotyledonous
+monocotyledon/SM
+monocular/SY
+monodic
+monodist/S
+monody/MS
+monogamist/MS
+monogamous/PY
+monogamy/MS
+monogrammed
+monogramming
+monogram/MS
+monograph/GMDS
+monographs
+monolingualism
+monolingual/S
+monolithic
+monolithically
+monolith/M
+monoliths
+monologist/S
+monologue/GMSD
+monomaniacal
+monomaniac/MS
+monomania/MS
+monomeric
+monomer/SM
+monomial/SM
+mono/MS
+Monongahela/M
+mononuclear
+mononucleoses
+mononucleosis/M
+monophonic
+monoplane/MS
+monopole/S
+monopolistic
+monopolist/MS
+monopolization/MS
+monopolized/U
+monopolize/GZDSR
+monopolizes/U
+monopoly/MS
+monorail/SM
+monostable
+monosyllabic
+monosyllable/MS
+monotheism/SM
+monotheistic
+monotheist/S
+monotone/SDMG
+monotonic
+monotonically
+monotonicity
+monotonousness/MS
+monotonous/YP
+monotony/MS
+monovalent
+monoxide/SM
+Monroe/M
+Monro/M
+Monrovia/M
+Monsanto/M
+monseigneur
+monsieur/M
+Monsignori
+Monsignor/MS
+monsignor/S
+Mon/SM
+monsoonal
+monsoon/MS
+monster/SM
+monstrance/ASM
+monstrosity/SM
+monstrousness/M
+monstrous/YP
+montage/SDMG
+Montague/M
+Montaigne/M
+Montana/M
+Montanan/MS
+Montcalm/M
+Montclair/M
+Monte/M
+Montenegrin
+Montenegro/M
+Monterey/M
+Monterrey/M
+Montesquieu/M
+Montessori/M
+Monteverdi/M
+Montevideo/M
+Montezuma
+Montgomery/M
+monthly/S
+month/MY
+months
+Monticello/M
+Monti/M
+Mont/M
+Montmartre/M
+Montoya/M
+Montpelier/M
+Montrachet/M
+Montreal/M
+Montserrat/M
+Monty/M
+monumentality/M
+monumental/Y
+monument/DMSG
+mooch/ZSRDG
+moodily
+moodiness/MS
+mood/MS
+Moody/M
+moody/PTR
+Moog
+moo/GSD
+moonbeam/SM
+Mooney/M
+moon/GDMS
+moonless
+moonlight/GZDRMS
+moonlighting/M
+moonlit
+Moon/M
+moonscape/MS
+moonshiner/M
+moonshine/SRZM
+moonshot/MS
+moonstone/SM
+moonstruck
+moonwalk/SDG
+Moore/M
+moor/GDMJS
+mooring/M
+Moorish
+moorland/MS
+Moor/MS
+moose/M
+moot/RDGS
+moped/MS
+moper/M
+mope/S
+mopey
+mopier
+mopiest
+mopish
+mopped
+moppet/MS
+mopping
+mop/SZGMDR
+moraine/MS
+morale/MS
+Morales/M
+moralistic
+moralistically
+moralist/MS
+morality/UMS
+moralization/CS
+moralize/CGDRSZ
+moralled
+moraller
+moralling
+moral/SMY
+Mora/M
+Moran/M
+morass/SM
+moratorium/SM
+Moravia/M
+Moravian
+moray/SM
+morbidity/SM
+morbidness/S
+morbid/YP
+mordancy/MS
+mordant/GDYS
+Mordecai/M
+Mord/M
+Mordred/M
+Mordy/M
+more/DSN
+Moreen/M
+Morehouse/M
+Moreland/M
+morel/SM
+More/M
+Morena/M
+Moreno/M
+moreover
+Morey/M
+Morgana/M
+Morganica/M
+Morgan/MS
+Morganne/M
+morgen/M
+Morgen/M
+morgue/SM
+Morgun/M
+Moria/M
+Moriarty/M
+moribundity/M
+moribund/Y
+Morie/M
+Morin/M
+morion/M
+Morison/M
+Morissa/M
+Morita/M
+Moritz/M
+Morlee/M
+Morley/M
+Morly/M
+Mormonism/MS
+Mormon/SM
+Morna/M
+morning/MY
+morn/SGJDM
+Moroccan/S
+Morocco/M
+morocco/SM
+Moro/M
+moronic
+moronically
+Moroni/M
+moron/SM
+moroseness/MS
+morose/YP
+morpheme/DSMG
+morphemic/S
+Morpheus/M
+morph/GDJ
+morphia/S
+morphine/MS
+morphism/MS
+morphologic
+morphological/Y
+morphology/MS
+morphophonemic/S
+morphophonemics/M
+morphs
+Morrie/M
+morris
+Morris/M
+Morrison/M
+Morristown/M
+Morrow/M
+morrow/MS
+Morry/M
+morsel/GMDS
+Morse/M
+mortality/SM
+mortal/SY
+mortarboard/SM
+mortar/GSDM
+Morten/M
+mortgageable
+mortgagee/SM
+mortgage/MGDS
+mortgagor/SM
+mortice's
+mortician/SM
+Mortie/M
+mortification/M
+mortified/Y
+mortifier/M
+mortify/DRSXGN
+Mortimer/M
+mortise/MGSD
+Mort/MN
+Morton/M
+mortuary/MS
+Morty/M
+Mosaic
+mosaicked
+mosaicking
+mosaic/MS
+Moscone/M
+Moscow/M
+Moseley/M
+Moselle/M
+Mose/MSR
+Moser/M
+mosey/SGD
+Moshe/M
+Moslem's
+Mosley/M
+mosque/SM
+mosquitoes
+mosquito/M
+mos/S
+mossback/MS
+Mossberg/M
+Moss/M
+moss/SDMG
+mossy/SRT
+most/SY
+Mosul/M
+mote/ASCNK
+motel/MS
+mote's
+motet/SM
+mothball/DMGS
+motherboard/MS
+motherfucker/MS!
+motherfucking/!
+motherhood/SM
+mothering/M
+motherland/SM
+motherless
+motherliness/MS
+motherly/P
+mother/RDYMZG
+moths
+moth/ZMR
+motif/MS
+motile/S
+motility/MS
+motional/K
+motioner/M
+motion/GRDMS
+motionlessness/S
+motionless/YP
+motion's/ACK
+motions/K
+motivated/U
+motivate/XDSNGV
+motivational/Y
+motivation/M
+motivator/S
+motiveless
+motive/MGSD
+motley/S
+motlier
+motliest
+mot/MSV
+motocross/SM
+motorbike/SDGM
+motorboat/MS
+motorcade/MSDG
+motorcar/MS
+motorcycle/GMDS
+motorcyclist/SM
+motor/DMSG
+motoring/M
+motorist/SM
+motorization/SM
+motorize/DSG
+motorized/U
+motorman/M
+motormen
+motormouth
+motormouths
+Motorola/M
+motorway/SM
+Motown/M
+mottle/GSRD
+mottler/M
+Mott/M
+mottoes
+motto/M
+moue/DSMG
+moulder/DSG
+moult/GSD
+mound/GMDS
+mountable
+mountaineering/M
+mountaineer/JMDSG
+mountainousness/M
+mountainous/PY
+mountainside/MS
+mountain/SM
+mountaintop/SM
+Mountbatten/M
+mountebank/SGMD
+mounted/U
+mount/EGACD
+mounter/SM
+mounties
+Mountie/SM
+mounting/MS
+Mount/M
+mounts/AE
+mourner/M
+mournfuller
+mournfullest
+mournfulness/S
+mournful/YP
+mourning/M
+mourn/ZGSJRD
+mouser/M
+mouse/SRDGMZ
+mousetrapped
+mousetrapping
+mousetrap/SM
+mousiness/MS
+mousing/M
+mousse/MGSD
+Moussorgsky/M
+mousy/PRT
+Mouthe/M
+mouthful/MS
+mouthiness/SM
+mouth/MSRDG
+mouthorgan
+mouthpiece/SM
+mouths
+mouthwash/SM
+mouthwatering
+mouthy/PTR
+Mouton/M
+mouton/SM
+movable/ASP
+movableness/AM
+move/ARSDGZB
+moved/U
+movement/SM
+mover/AM
+moviegoer/S
+movie/SM
+moving/YS
+mower/M
+Mowgli/M
+mowing/M
+mow/SDRZG
+moxie/MS
+Moyer/M
+Moyna/M
+Moyra/M
+Mozambican/S
+Mozambique/M
+Mozart/M
+Mozelle/M
+Mozes/M
+Mozilla/M
+mozzarella/MS
+mp
+MP
+mpg
+mph
+MPH
+MRI
+Mr/M
+Mrs
+ms
+M's
+MS
+MSG
+Msgr/M
+m's/K
+Ms/S
+MST
+MSW
+mt
+MT
+mtg
+mtge
+Mt/M
+MTS
+MTV
+Muawiya/M
+Mubarak/M
+muchness/M
+much/SP
+mucilage/MS
+mucilaginous
+mucker/M
+muck/GRDMS
+muckraker/M
+muckrake/ZMDRSG
+mucky/RT
+mucosa/M
+mucous
+mucus/SM
+mudded
+muddily
+muddiness/SM
+mudding
+muddle/GRSDZ
+muddleheaded/P
+muddlehead/SMD
+muddler/M
+muddy/TPGRSD
+mudflat/S
+mudguard/SM
+mudlarks
+mud/MS
+mudroom/S
+mudslide/S
+mudslinger/M
+mudslinging/M
+mudsling/JRGZ
+Mueller/M
+Muenster
+muenster/MS
+muesli/M
+muezzin/MS
+muff/GDMS
+Muffin/M
+muffin/SM
+muffler/M
+muffle/ZRSDG
+Mufi/M
+Mufinella/M
+mufti/MS
+Mugabe/M
+mugged
+mugger/SM
+mugginess/S
+mugging/S
+muggy/RPT
+mugshot/S
+mug/SM
+mugwump/MS
+Muhammadanism/S
+Muhammadan/SM
+Muhammad/M
+Muire/M
+Muir/M
+Mukden/M
+mukluk/SM
+mulattoes
+mulatto/M
+mulberry/MS
+mulch/GMSD
+mulct/SDG
+Mulder/M
+mule/MGDS
+muleskinner/S
+muleteer/MS
+mulishness/MS
+mulish/YP
+mullah/M
+mullahs
+mullein/MS
+Mullen/M
+muller/M
+Muller/M
+mullet/MS
+Mulligan/M
+mulligan/SM
+mulligatawny/SM
+Mullikan/M
+Mullins
+mullion/MDSG
+mull/RDSG
+Multan/M
+multi
+Multibus/M
+multicellular
+multichannel/M
+multicollinearity/M
+multicolor/SDM
+multicolumn
+multicomponent
+multicomputer/MS
+Multics/M
+MULTICS/M
+multicultural
+multiculturalism/S
+multidimensional
+multidimensionality
+multidisciplinary
+multifaceted
+multifamily
+multifariousness/SM
+multifarious/YP
+multifigure
+multiform
+multifunction/D
+multilateral/Y
+multilayer
+multilevel/D
+multilingual
+multilingualism/S
+multimedia/S
+multimegaton/M
+multimeter/M
+multimillionaire/SM
+multinational/S
+multinomial/M
+multiphase
+multiple/SM
+multiplet/SM
+multiplex/GZMSRD
+multiplexor's
+multipliable
+multiplicand/SM
+multiplication/M
+multiplicative/YS
+multiplicity/MS
+multiplier/M
+multiply/ZNSRDXG
+multiprocess/G
+multiprocessor/MS
+multiprogram
+multiprogrammed
+multiprogramming/MS
+multipurpose
+multiracial
+multistage
+multistory/S
+multisyllabic
+multitasking/S
+multitude/MS
+multitudinousness/M
+multitudinous/YP
+multiuser
+multivalent
+multivalued
+multivariate
+multiversity/M
+multivitamin/S
+mu/M
+mumbler/M
+mumbletypeg/S
+mumble/ZJGRSD
+Mumford/M
+mummed
+mummer/SM
+mummery/MS
+mummification/M
+mummify/XSDGN
+mumming
+mum/MS
+mummy/GSDM
+mumps/M
+muncher/M
+Mnchhausen/M
+munchies
+Munch/M
+munch/ZRSDG
+Muncie/M
+mundane/YSP
+Mundt/M
+munge/JGZSRD
+Munich/M
+municipality/SM
+municipal/YS
+munificence/MS
+munificent/Y
+munition/SDG
+Munmro/M
+Munoz/M
+Munroe/M
+Munro/M
+mun/S
+Munsey/M
+Munson/M
+Munster/MS
+Muong/M
+muon/M
+Muppet/M
+muralist/SM
+mural/SM
+Murasaki/M
+Murat/M
+Murchison/M
+Murcia/M
+murderer/M
+murderess/S
+murder/GZRDMS
+murderousness/M
+murderous/YP
+Murdoch/M
+Murdock/M
+Mureil/M
+Murial/M
+muriatic
+Murielle/M
+Muriel/M
+Murillo/M
+murkily
+murkiness/S
+murk/TRMS
+murky/RPT
+Murmansk/M
+murmurer/M
+murmuring/U
+murmurous
+murmur/RDMGZSJ
+Murphy/M
+murrain/SM
+Murray/M
+Murrow/M
+Murrumbidgee/M
+Murry/M
+Murvyn/M
+muscatel/MS
+Muscat/M
+muscat/SM
+musclebound
+muscle/SDMG
+Muscovite/M
+muscovite/MS
+Muscovy/M
+muscularity/SM
+muscular/Y
+musculature/SM
+muse
+Muse/M
+muser/M
+musette/SM
+museum/MS
+mus/GJDSR
+musher/M
+mushiness/MS
+mush/MSRDG
+mushroom/DMSG
+mushy/PTR
+Musial/M
+musicale/SM
+musicality/SM
+musicals
+musical/YU
+musician/MYS
+musicianship/MS
+musicked
+musicking
+musicological
+musicologist/MS
+musicology/MS
+music/SM
+musing/Y
+Muskegon/M
+muskeg/SM
+muskellunge/SM
+musketeer/MS
+musketry/MS
+musket/SM
+musk/GDMS
+muskie/M
+muskiness/MS
+muskmelon/MS
+muskox/N
+muskrat/MS
+musky/RSPT
+Muslim/MS
+muslin/MS
+mussel/MS
+Mussolini/MS
+Mussorgsky/M
+muss/SDG
+mussy/RT
+mustache/DSM
+mustachio/MDS
+mustang/MS
+mustard/MS
+muster/GD
+mustily
+mustiness/MS
+mustn't
+must/RDGZS
+must've
+musty/RPT
+mutability/SM
+mutableness/M
+mutable/P
+mutably
+mutagen/SM
+mutant/MS
+mutate/XVNGSD
+mutational/Y
+mutation/M
+mutator/S
+muted/Y
+muteness/S
+mute/PDSRBYTG
+mutilate/XDSNG
+mutilation/M
+mutilator/MS
+mutineer/SMDG
+mutinous/Y
+mutiny/MGSD
+Mutsuhito/M
+mutterer/M
+mutter/GZRDJ
+muttonchops
+mutton/SM
+mutt/ZSMR
+mutuality/S
+mutual/SY
+muumuu/MS
+muzak
+Muzak/SM
+Muzo/M
+muzzled/U
+muzzle/MGRSD
+muzzler/M
+MVP
+MW
+Myanmar
+Mycah/M
+Myca/M
+Mycenaean
+Mycenae/M
+Mychal/M
+mycologist/MS
+mycology/MS
+myelitides
+myelitis/M
+Myer/MS
+myers
+mylar
+Mylar/S
+Myles/M
+Mylo/M
+My/M
+myna/SM
+Mynheer/M
+myocardial
+myocardium/M
+myopia/MS
+myopically
+myopic/S
+Myrah/M
+Myra/M
+Myranda/M
+Myrdal/M
+myriad/S
+Myriam/M
+Myrilla/M
+Myrle/M
+Myrlene/M
+myrmidon/S
+Myrna/M
+Myron/M
+myrrh/M
+myrrhs
+Myrta/M
+Myrtia/M
+Myrtice/M
+Myrtie/M
+Myrtle/M
+myrtle/SM
+Myrvyn/M
+Myrwyn/M
+mys
+my/S
+myself
+Mysore/M
+mysteriousness/MS
+mysterious/YP
+mystery/MDSG
+mystical/Y
+mysticism/MS
+mystic/SM
+mystification/M
+mystifier/M
+mystify/CSDGNX
+mystifying/Y
+mystique/MS
+Myst/M
+mythic
+mythical/Y
+myth/MS
+mythographer/SM
+mythography/M
+mythological/Y
+mythologist/MS
+mythologize/CSDG
+mythology/SM
+myths
+N
+NAACP
+nabbed
+nabbing
+Nabisco/M
+nabob/SM
+Nabokov/M
+nab/S
+nacelle/SM
+nacho/S
+NaCl/M
+nacre/MS
+nacreous
+Nada/M
+Nadean/M
+Nadeen/M
+Nader/M
+Nadia/M
+Nadine/M
+nadir/SM
+Nadiya/M
+Nadya/M
+Nady/M
+nae/VM
+Nagasaki/M
+nagged
+nagger/S
+nagging/Y
+nag/MS
+Nagoya/M
+Nagpur/M
+Nagy/M
+Nahuatl/SM
+Nahum/M
+naiad/SM
+naifs
+nailbrush/SM
+nailer/M
+nail/SGMRD
+Naipaul/M
+Nair/M
+Nairobi/M
+Naismith/M
+naive/SRTYP
+naivet/SM
+naivety/MS
+Nakamura/M
+Nakayama/M
+nakedness/MS
+naked/TYRP
+Nakoma/M
+Nalani/M
+Na/M
+Namath/M
+nameable/U
+name/ADSG
+namedrop
+namedropping
+named's
+named/U
+nameless/PY
+namely
+nameplate/MS
+namer/SM
+name's
+namesake/SM
+Namibia/M
+Namibian/S
+naming/M
+Nam/M
+Nanak/M
+Nana/M
+Nananne/M
+Nancee/M
+Nance/M
+Nancey/M
+Nanchang/M
+Nancie/M
+Nanci/M
+Nancy/M
+Nanete/M
+Nanette/M
+Nanice/M
+Nani/M
+Nanine/M
+Nanjing
+Nanking's
+Nan/M
+Nannette/M
+Nannie/M
+Nanni/M
+Nanny/M
+nanny/SDMG
+nanometer/MS
+Nanon/M
+Nanook/M
+nanosecond/SM
+Nansen/M
+Nantes/M
+Nantucket/M
+Naoma/M
+Naomi/M
+napalm/MDGS
+nape/SM
+Naphtali/M
+naphthalene/MS
+naphtha/SM
+Napier/M
+napkin/SM
+Naples/M
+napless
+Nap/M
+Napoleonic
+napoleon/MS
+Napoleon/MS
+napped
+napper/MS
+Nappie/M
+napping
+Nappy/M
+nappy/TRSM
+nap/SM
+Nara/M
+Narbonne/M
+narc/DGS
+narcissism/MS
+narcissistic
+narcissist/MS
+narcissus/M
+Narcissus/M
+narcoleptic
+narcoses
+narcosis/M
+narcotic/SM
+narcotization/S
+narcotize/GSD
+Nariko/M
+Nari/M
+nark's
+Narmada/M
+Narragansett/M
+narrate/VGNSDX
+narration/M
+narrative/MYS
+narratology
+narrator/SM
+narrowing/P
+narrowness/SM
+narrow/RDYTGPS
+narwhal/MS
+nary
+nasality/MS
+nasalization/MS
+nasalize/GDS
+nasal/YS
+NASA/MS
+nascence/ASM
+nascent/A
+NASDAQ
+Nash/M
+Nashua/M
+Nashville/M
+Nassau/M
+Nasser/M
+nastily
+nastiness/MS
+nasturtium/SM
+nasty/TRSP
+natal
+Natala/M
+Natalee/M
+Natale/M
+Natalia/M
+Natalie/M
+Natalina/M
+Nataline/M
+natalist
+natality/M
+Natal/M
+Natalya/M
+Nata/M
+Nataniel/M
+Natasha/M
+Natassia/M
+Natchez
+natch/S
+Nate/XMN
+Nathalia/M
+Nathalie/M
+Nathanael/M
+Nathanial/M
+Nathaniel/M
+Nathanil/M
+Nathan/MS
+nationalism/SM
+nationalistic
+nationalistically
+nationalist/MS
+nationality/MS
+nationalization/MS
+nationalize/CSDG
+nationalized/AU
+nationalizer/SM
+national/YS
+nationhood/SM
+nation/MS
+nationwide
+nativeness/M
+native/PYS
+Natividad/M
+Nativity/M
+nativity/MS
+Natka/M
+natl
+Nat/M
+NATO/SM
+natter/SGD
+nattily
+nattiness/SM
+Natty/M
+natty/TRP
+naturalism/MS
+naturalistic
+naturalist/MS
+naturalization/SM
+naturalized/U
+naturalize/GSD
+naturalness/US
+natural/PUY
+naturals
+nature/ASDCG
+nature's
+naturist
+Naugahyde/S
+naughtily
+naughtiness/SM
+naught/MS
+naughty/TPRS
+Naur/M
+Nauru/M
+nausea/SM
+nauseate/DSG
+nauseating/Y
+nauseousness/SM
+nauseous/P
+nautical/Y
+nautilus/MS
+Navaho's
+Navajoes
+Navajo/S
+naval/Y
+Navarro/M
+navel/MS
+nave/SM
+navigability/SM
+navigableness/M
+navigable/P
+navigate/DSXNG
+navigational
+navigation/M
+navigator/MS
+Navona/M
+Navratilova/M
+navvy/M
+Navy/S
+navy/SM
+nay/MS
+naysayer/S
+Nazarene/MS
+Nazareth/M
+Nazi/SM
+Nazism/S
+NB
+NBA
+NBC
+Nb/M
+NBS
+NC
+NCAA
+NCC
+NCO
+NCR
+ND
+N'Djamena
+Ndjamena/M
+Nd/M
+Ne
+NE
+Neala/M
+Neale/M
+Neall/M
+Neal/M
+Nealon/M
+Nealson/M
+Nealy/M
+Neanderthal/S
+neap/DGS
+Neapolitan/SM
+nearby
+nearly/RT
+nearness/MS
+nearside/M
+nearsightedness/S
+nearsighted/YP
+near/TYRDPSG
+neaten/DG
+neath
+neatness/MS
+neat/YRNTXPS
+Neb/M
+Nebraska/M
+Nebraskan/MS
+Nebr/M
+Nebuchadnezzar/MS
+nebulae
+nebula/M
+nebular
+nebulousness/SM
+nebulous/PY
+necessaries
+necessarily/U
+necessary/U
+necessitate/DSNGX
+necessitation/M
+necessitous
+necessity/SM
+neckband/M
+neckerchief/MS
+neck/GRDMJS
+necking/M
+necklace/DSMG
+neckline/MS
+necktie/MS
+necrology/SM
+necromancer/MS
+necromancy/MS
+necromantic
+necrophiliac/S
+necrophilia/M
+necropolis/SM
+necropsy/M
+necroses
+necrosis/M
+necrotic
+nectarine/SM
+nectarous
+nectar/SM
+nectary/MS
+Neda/M
+Nedda/M
+Neddie/M
+Neddy/M
+Nedi/M
+Ned/M
+ne
+needed/U
+needer/M
+needful/YSP
+Needham/M
+neediness/MS
+needlecraft/M
+needle/GMZRSD
+needlepoint/SM
+needlessness/S
+needless/YP
+needlewoman/M
+needlewomen
+needlework/RMS
+needn't
+need/YRDGS
+needy/TPR
+Neel/M
+Neely/M
+ne'er
+nefariousness/MS
+nefarious/YP
+Nefen/M
+Nefertiti/M
+negated/U
+negater/M
+negate/XRSDVNG
+negation/M
+negativeness/SM
+negative/PDSYG
+negativism/MS
+negativity/MS
+negator/MS
+Negev/M
+neglecter/M
+neglectfulness/SM
+neglectful/YP
+neglect/SDRG
+negligee/SM
+negligence/MS
+negligent/Y
+negligibility/M
+negligible
+negligibly
+negotiability/MS
+negotiable/A
+negotiant/M
+negotiate/ASDXGN
+negotiation/MA
+negotiator/MS
+Negress/MS
+negritude/MS
+Negritude/S
+Negroes
+negroid
+Negroid/S
+Negro/M
+neg/S
+Nehemiah/M
+Nehru/M
+neighbored/U
+neighborer/M
+neighborhood/SM
+neighborlinesses
+neighborliness/UM
+neighborly/UP
+neighbor/SMRDYZGJ
+neigh/MDG
+neighs
+Neila/M
+Neile/M
+Neilla/M
+Neille/M
+Neill/M
+Neil/SM
+neither
+Nelda/M
+Nelia/M
+Nelie/M
+Nelle/M
+Nellie/M
+Nelli/M
+Nell/M
+Nelly/M
+Nelsen/M
+Nels/N
+Nelson/M
+nelson/MS
+nematic
+nematode/SM
+Nembutal/M
+nemeses
+nemesis
+Nemesis/M
+neoclassical
+neoclassicism/MS
+neoclassic/M
+neocolonialism/MS
+neocortex/M
+neodymium/MS
+Neogene
+neolithic
+Neolithic/M
+neologism/SM
+neomycin/M
+neonatal/Y
+neonate/MS
+neon/DMS
+neophyte/MS
+neoplasm/SM
+neoplastic
+neoprene/SM
+Nepalese
+Nepali/MS
+Nepal/M
+nepenthe/MS
+nephew/MS
+nephrite/SM
+nephritic
+nephritides
+nephritis/M
+nepotism/MS
+nepotist/S
+Neptune/M
+neptunium/MS
+nerd/S
+nerdy/RT
+Nereid/M
+Nerf/M
+Nerissa/M
+Nerita/M
+Nero/M
+Neron/M
+Nerta/M
+Nerte/M
+Nertie/M
+Nerti/M
+Nert/M
+Nerty/M
+Neruda/M
+nervelessness/SM
+nerveless/YP
+nerve's
+nerve/UGSD
+nerviness/SM
+nerving/M
+nervousness/SM
+nervous/PY
+nervy/TPR
+Nessa/M
+Nessie/M
+Nessi/M
+Nessy/M
+Nesta/M
+nester/M
+Nester/M
+Nestle/M
+nestler/M
+nestle/RSDG
+nestling/M
+Nestorius/M
+Nestor/M
+nest/RDGSBM
+netball/M
+nether
+Netherlander/SM
+Netherlands/M
+nethermost
+netherworld/S
+Netscape/M
+net/SM
+Netta/M
+Nettie/M
+Netti/M
+netting/M
+nett/JGRDS
+Nettle/M
+nettle/MSDG
+nettlesome
+Netty/M
+network/SJMDG
+Netzahualcoyotl/M
+Neumann/M
+neuralgia/MS
+neuralgic
+neural/Y
+neurasthenia/MS
+neurasthenic/S
+neuritic/S
+neuritides
+neuritis/M
+neuroanatomy
+neurobiology/M
+neurological/Y
+neurologist/MS
+neurology/SM
+neuromuscular
+neuronal
+neurone/S
+neuron/MS
+neuropathology/M
+neurophysiology/M
+neuropsychiatric
+neuroses
+neurosis/M
+neurosurgeon/MS
+neurosurgery/SM
+neurotically
+neurotic/S
+neurotransmitter/S
+neuter/JZGRD
+neutralise's
+neutralism/MS
+neutralist/S
+neutrality/MS
+neutralization/MS
+neutralized/U
+neutralize/GZSRD
+neutral/PYS
+neutrino/MS
+neutron/MS
+neut/ZR
+Nevada/M
+Nevadan/S
+Nevadian/S
+Neva/M
+never
+nevermore
+nevertheless
+nevi
+Nevile/M
+Neville/M
+Nevil/M
+Nevin/SM
+Nevis/M
+Nev/M
+Nevsa/M
+Nevsky/M
+nevus/M
+Newark/M
+newbie/S
+newborn/S
+Newbury/M
+Newburyport/M
+Newcastle/M
+newcomer/MS
+newed/A
+Newell/M
+newel/MS
+newer/A
+newfangled
+newfound
+newfoundland
+Newfoundlander/M
+Newfoundland/SRMZ
+newish
+newline/SM
+newlywed/MS
+Newman/M
+newness/MS
+Newport/M
+news/A
+newsagent/MS
+newsboy/SM
+newscaster/M
+newscasting/M
+newscast/SRMGZ
+newsdealer/MS
+newsed
+newses
+newsflash/S
+newsgirl/S
+newsgroup/SM
+newsing
+newsletter/SM
+NeWS/M
+newsman/M
+newsmen
+newspaperman/M
+newspapermen
+newspaper/SMGD
+newspaperwoman/M
+newspaperwomen
+newsprint/MS
+new/SPTGDRY
+newsreader/MS
+newsreel/SM
+newsroom/S
+news's
+newsstand/MS
+Newsweekly/M
+newsweekly/S
+Newsweek/MY
+newswire
+newswoman/M
+newswomen
+newsworthiness/SM
+newsworthy/RPT
+newsy/TRS
+newt/MS
+Newtonian
+Newton/M
+newton/SM
+Nexis/M
+next
+nexus/SM
+Neysa/M
+NF
+NFC
+NFL
+NFS
+Ngaliema/M
+Nguyen/M
+NH
+NHL
+niacin/SM
+Niagara/M
+Niall/M
+Nial/M
+Niamey/M
+nibbed
+nibbing
+nibbler/M
+nibble/RSDGZ
+Nibelung/M
+nib/SM
+Nicaean
+Nicaragua/M
+Nicaraguan/S
+Niccolo/M
+Nice/M
+Nicene
+niceness/MS
+nicety/MS
+nice/YTPR
+niche/SDGM
+Nicholas
+Nichole/M
+Nicholle/M
+Nichol/MS
+Nicholson/M
+nichrome
+nickelodeon/SM
+nickel/SGMD
+nicker/GD
+Nickey/M
+nick/GZRDMS
+Nickie/M
+Nicki/M
+Nicklaus/M
+Nick/M
+nicknack's
+nickname/MGDRS
+nicknamer/M
+Nickolai/M
+Nickola/MS
+Nickolaus/M
+Nicko/M
+Nicky/M
+Nicobar/M
+Nicodemus/M
+Nicolai/MS
+Nicola/MS
+Nicolea/M
+Nicole/M
+Nicolette/M
+Nicoli/MS
+Nicolina/M
+Nicoline/M
+Nicolle/M
+Nicol/M
+Nico/M
+Nicosia/M
+nicotine/MS
+Niebuhr/M
+niece/MS
+Niel/MS
+Nielsen/M
+Niels/N
+Nielson/M
+Nietzsche/M
+Nieves/M
+nifty/TRS
+Nigel/M
+Nigeria/M
+Nigerian/S
+Nigerien
+Niger/M
+niggardliness/SM
+niggardly/P
+niggard/SGMDY
+nigger/SGDM!
+niggler/M
+niggle/RSDGZJ
+niggling/Y
+nigh/RDGT
+nighs
+nightcap/SM
+nightclothes
+nightclubbed
+nightclubbing
+nightclub/MS
+nightdress/MS
+nightfall/SM
+nightgown/MS
+nighthawk/MS
+nightie/MS
+Nightingale/M
+nightingale/SM
+nightlife/MS
+nightlong
+nightmare/MS
+nightmarish/Y
+nightshade/SM
+nightshirt/MS
+night/SMYDZ
+nightspot/MS
+nightstand/SM
+nightstick/S
+nighttime/S
+nightwear/M
+nighty's
+NIH
+nihilism/MS
+nihilistic
+nihilist/MS
+Nijinsky/M
+Nikaniki/M
+Nike/M
+Niki/M
+Nikita/M
+Nikkie/M
+Nikki/M
+Nikko/M
+Nikolai/M
+Nikola/MS
+Nikolaos/M
+Nikolaus/M
+Nikolayev's
+Nikoletta/M
+Nikolia/M
+Nikolos/M
+Niko/MS
+Nikon/M
+Nile/SM
+nilled
+nilling
+Nil/MS
+nil/MYS
+nilpotent
+Nilsen/M
+Nils/N
+Nilson/M
+Nilsson/M
+Ni/M
+nimbi
+nimbleness/SM
+nimble/TRP
+nimbly
+nimbus/DM
+NIMBY
+Nimitz/M
+Nimrod/MS
+Nina/M
+nincompoop/MS
+ninefold
+nine/MS
+ninepence/M
+ninepin/S
+ninepins/M
+nineteen/SMH
+nineteenths
+ninetieths
+Ninetta/M
+Ninette/M
+ninety/MHS
+Nineveh/M
+ninja/S
+Ninnetta/M
+Ninnette/M
+ninny/SM
+Ninon/M
+Nintendo/M
+ninth
+ninths
+Niobe/M
+niobium/MS
+nipped
+nipper/DMGS
+nippiness/S
+nipping/Y
+nipple/GMSD
+Nipponese
+Nippon/M
+nippy/TPR
+nip/S
+Nirenberg/M
+nirvana/MS
+Nirvana/S
+nisei
+Nisei/MS
+Nissa/M
+Nissan/M
+Nisse/M
+Nissie/M
+Nissy/M
+Nita/M
+niter/M
+nitpick/DRSJZG
+nitrate/MGNXSD
+nitration/M
+nitric
+nitride/MGS
+nitriding/M
+nitrification/SM
+nitrite/MS
+nitrocellulose/MS
+nitrogenous
+nitrogen/SM
+nitroglycerin/MS
+nitrous
+nitwit/MS
+nit/ZSMR
+Niven/M
+nixer/M
+nix/GDSR
+Nixie/M
+Nixon/M
+NJ
+Nkrumah/M
+NLRB
+nm
+NM
+no/A
+NOAA
+Noach/M
+Noah/M
+Noak/M
+Noami/M
+Noam/M
+Nobelist/SM
+nobelium/MS
+Nobel/M
+Nobe/M
+Nobie/M
+nobility/MS
+Noble/M
+nobleman/M
+noblemen
+nobleness/SM
+noblesse/M
+noble/TPSR
+noblewoman
+noblewomen
+nob/MY
+nobody/MS
+Noby/M
+nocturnal/SY
+nocturne/SM
+nodal/Y
+nodded
+nodding
+noddle/MSDG
+noddy/M
+node/MS
+NoDoz/M
+nod/SM
+nodular
+nodule/SM
+Noelani/M
+Noella/M
+Noelle/M
+Noell/M
+Noellyn/M
+Noel/MS
+noel/S
+Noelyn/M
+Noe/M
+Noemi/M
+noes/S
+noggin/SM
+nohow
+noise/GMSD
+noiselessness/SM
+noiseless/YP
+noisemaker/M
+noisemake/ZGR
+noisily
+noisiness/MS
+noisome
+noisy/TPR
+Nola/M
+Nolana/M
+Noland/M
+Nolan/M
+Nolie/M
+Nollie/M
+Noll/M
+Nolly/M
+No/M
+nomadic
+nomad/SM
+Nome/M
+nomenclature/MS
+Nomi/M
+nominalized
+nominal/K
+nominally
+nominals
+nominate/CDSAXNG
+nomination/MAC
+nominative/SY
+nominator/CSM
+nominee/MS
+non
+nonabrasive
+nonabsorbent/S
+nonacademic/S
+nonacceptance/MS
+nonacid/MS
+nonactive
+nonadaptive
+nonaddictive
+nonadhesive
+nonadjacent
+nonadjustable
+nonadministrative
+nonage/MS
+nonagenarian/MS
+nonaggression/SM
+nonagricultural
+Nonah/M
+nonalcoholic/S
+nonaligned
+nonalignment/SM
+nonallergic
+Nona/M
+nonappearance/MS
+nonassignable
+nonathletic
+nonattendance/SM
+nonautomotive
+nonavailability/SM
+nonbasic
+nonbeliever/SM
+nonbelligerent/S
+nonblocking
+nonbreakable
+nonburnable
+nonbusiness
+noncaloric
+noncancerous
+noncarbohydrate/M
+nonce/MS
+nonchalance/SM
+nonchalant/YP
+nonchargeable
+nonclerical/S
+nonclinical
+noncollectable
+noncombatant/MS
+noncombustible/S
+noncommercial/S
+noncommissioned
+noncommittal/Y
+noncom/MS
+noncommunicable
+noncompeting
+noncompetitive
+noncompliance/MS
+noncomplying/S
+noncomprehending
+nonconducting
+nonconductor/MS
+nonconforming
+nonconformist/SM
+nonconformity/SM
+nonconsecutive
+nonconservative
+nonconstructive
+noncontagious
+noncontiguous
+noncontinuous
+noncontributing
+noncontributory
+noncontroversial
+nonconvertible
+noncooperation/SM
+noncorroding/S
+noncorrosive
+noncredit
+noncriminal/S
+noncritical
+noncrystalline
+noncumulative
+noncustodial
+noncyclic
+nondairy
+nondecreasing
+nondeductible
+nondelivery/MS
+nondemocratic
+nondenominational
+nondepartmental
+nondepreciating
+nondescript/YS
+nondestructive/Y
+nondetachable
+nondeterminacy
+nondeterminate/Y
+nondeterminism
+nondeterministic
+nondeterministically
+nondisciplinary
+nondisclosure/SM
+nondiscrimination/SM
+nondiscriminatory
+nondramatic
+nondrinker/SM
+nondrying
+nondurable
+noneconomic
+noneducational
+noneffective/S
+nonelastic
+nonelectrical
+nonelectric/S
+nonemergency
+nonempty
+nonenforceable
+nonentity/MS
+nonequivalence/M
+nonequivalent/S
+none/S
+nones/M
+nonessential/S
+nonesuch/SM
+nonetheless
+nonevent/MS
+nonexchangeable
+nonexclusive
+nonexempt
+nonexistence/MS
+nonexistent
+nonexplosive/S
+nonextensible
+nonfactual
+nonfading
+nonfat
+nonfatal
+nonfattening
+nonferrous
+nonfictional
+nonfiction/SM
+nonflammable
+nonflowering
+nonfluctuating
+nonflying
+nonfood/M
+nonfreezing
+nonfunctional
+nongovernmental
+nongranular
+nonhazardous
+nonhereditary
+nonhuman
+nonidentical
+Nonie/M
+Noni/M
+noninclusive
+nonindependent
+nonindustrial
+noninfectious
+noninflammatory
+noninflationary
+noninflected
+nonintellectual/S
+noninteracting
+noninterchangeable
+noninterference/MS
+nonintervention/SM
+nonintoxicating
+nonintuitive
+noninvasive
+nonionic
+nonirritating
+nonjudgmental
+nonjudicial
+nonlegal
+nonlethal
+nonlinearity/MS
+nonlinear/Y
+nonlinguistic
+nonliterary
+nonliving
+nonlocal
+nonmagical
+nonmagnetic
+nonmalignant
+nonmember/SM
+nonmetallic
+nonmetal/MS
+nonmigratory
+nonmilitant/S
+nonmilitary
+Nonnah/M
+Nonna/M
+nonnarcotic/S
+nonnative/S
+nonnegative
+nonnegotiable
+nonnuclear
+nonnumerical/S
+nonobjective
+nonobligatory
+nonobservance/MS
+nonobservant
+nonoccupational
+nonoccurence
+nonofficial
+nonogenarian
+nonoperational
+nonoperative
+nonorthogonal
+nonorthogonality
+nonparallel/S
+nonparametric
+nonpareil/SM
+nonparticipant/SM
+nonparticipating
+nonpartisan/S
+nonpaying
+nonpayment/SM
+nonperformance/SM
+nonperforming
+nonperishable/S
+nonperson/S
+nonperturbing
+nonphysical/Y
+nonplus/S
+nonplussed
+nonplussing
+nonpoisonous
+nonpolitical
+nonpolluting
+nonporous
+nonpracticing
+nonprejudicial
+nonprescription
+nonprocedural/Y
+nonproductive
+nonprofessional/S
+nonprofit/SB
+nonprogrammable
+nonprogrammer
+nonproliferation/SM
+nonpublic
+nonpunishable
+nonracial
+nonradioactive
+nonrandom
+nonreactive
+nonreciprocal/S
+nonreciprocating
+nonrecognition/SM
+nonrecoverable
+nonrecurring
+nonredeemable
+nonreducing
+nonrefillable
+nonrefundable
+nonreligious
+nonrenewable
+nonrepresentational
+nonresidential
+nonresident/SM
+nonresidual
+nonresistance/SM
+nonresistant/S
+nonrespondent/S
+nonresponse
+nonrestrictive
+nonreturnable/S
+nonrhythmic
+nonrigid
+nonsalaried
+nonscheduled
+nonscientific
+nonscoring
+nonseasonal
+nonsectarian
+nonsecular
+nonsegregated
+nonsense/MS
+nonsensicalness/M
+nonsensical/PY
+nonsensitive
+nonsexist
+nonsexual
+nonsingular
+nonskid
+nonslip
+nonsmoker/SM
+nonsmoking
+nonsocial
+nonspeaking
+nonspecialist/MS
+nonspecializing
+nonspecific
+nonspiritual/S
+nonstaining
+nonstandard
+nonstarter/SM
+nonstick
+nonstop
+nonstrategic
+nonstriking
+nonstructural
+nonsuccessive
+nonsupervisory
+nonsupport/GS
+nonsurgical
+nonsustaining
+nonsympathizer/M
+nontarnishable
+nontaxable/S
+nontechnical/Y
+nontenured
+nonterminal/MS
+nonterminating
+nontermination/M
+nontheatrical
+nonthinking/S
+nonthreatening
+nontoxic
+nontraditional
+nontransferable
+nontransparent
+nontrivial
+nontropical
+nonuniform
+nonunion/S
+nonuser/SM
+nonvenomous
+nonverbal/Y
+nonveteran/MS
+nonviable
+nonviolence/SM
+nonviolent/Y
+nonvirulent
+nonvocal
+nonvocational
+nonvolatile
+nonvolunteer/S
+nonvoter/MS
+nonvoting
+nonwhite/SM
+nonworking
+nonyielding
+nonzero
+noodle/GMSD
+nook/MS
+noonday/MS
+noon/GDMS
+nooning/M
+noontide/MS
+noontime/MS
+noose/SDGM
+nope/S
+NORAD/M
+noradrenalin
+noradrenaline/M
+Norah/M
+Nora/M
+Norbert/M
+Norberto/M
+Norbie/M
+Norby/M
+Nordhoff/M
+Nordic/S
+Nordstrom/M
+Norean/M
+Noreen/M
+Norene/M
+Norfolk/M
+nor/H
+Norina/M
+Norine/M
+normalcy/MS
+normality/SM
+normalization/A
+normalizations
+normalization's
+normalized/AU
+normalizes/AU
+normalize/SRDZGB
+normal/SY
+Norma/M
+Normand/M
+Normandy/M
+Norman/SM
+normativeness/M
+normative/YP
+Normie/M
+norm/SMGD
+Normy/M
+Norplant
+Norrie/M
+Norri/SM
+Norristown/M
+Norry/M
+Norse
+Norseman/M
+Norsemen
+Northampton/M
+northbound
+northeastern
+northeaster/YM
+Northeast/SM
+northeastward/S
+northeast/ZSMR
+northerly/S
+norther/MY
+Northerner/M
+northernmost
+northern/RYZS
+Northfield/M
+northing/M
+northland
+North/M
+northmen
+north/MRGZ
+Northrop/M
+Northrup/M
+norths
+Norths
+Northumberland/M
+northward/S
+northwestern
+northwester/YM
+northwest/MRZS
+Northwest/MS
+northwestward/S
+Norton/M
+Norwalk/M
+Norway/M
+Norwegian/S
+Norwich/M
+Norw/M
+nosebag/M
+nosebleed/SM
+nosecone/S
+nosedive/DSG
+nosed/V
+nosegay/MS
+nose/M
+Nosferatu/M
+nos/GDS
+nosh/MSDG
+nosily
+nosiness/MS
+nosing/M
+nostalgia/SM
+nostalgically
+nostalgic/S
+Nostradamus/M
+Nostrand/M
+nostril/SM
+nostrum/SM
+nosy/SRPMT
+notability/SM
+notableness/M
+notable/PS
+notably
+notarial
+notarization/S
+notarize/DSG
+notary/MS
+notate/VGNXSD
+notational/CY
+notation/CMSF
+notative/CF
+notch/MSDG
+not/DRGB
+notebook/MS
+note/CSDFG
+notedness/M
+noted/YP
+notepad/S
+notepaper/MS
+note's
+noteworthiness/SM
+noteworthy/P
+nothingness/SM
+nothing/PS
+noticeable/U
+noticeably
+noticeboard/S
+noticed/U
+notice/MSDG
+notifiable
+notification/M
+notifier/M
+notify/NGXSRDZ
+notional/Y
+notion/MS
+notoriety/S
+notoriousness/M
+notorious/YP
+Notre/M
+Nottingham/M
+notwithstanding
+Nouakchott/M
+nougat/MS
+Noumea/M
+noun/SMK
+nourish/DRSGL
+nourished/U
+nourisher/M
+nourishment/SM
+nous/M
+nouveau
+nouvelle
+novae
+Novak/M
+Nova/M
+nova/MS
+novelette/SM
+Novelia/M
+novelist/SM
+novelization/S
+novelize/GDS
+Novell/SM
+novella/SM
+novel/SM
+novelty/MS
+November/SM
+novena/SM
+novene
+Novgorod/M
+novice/MS
+novitiate/MS
+Nov/M
+Novocaine/M
+Novocain/S
+Novokuznetsk/M
+Novosibirsk/M
+NOW
+nowadays
+noway/S
+Nowell/M
+nowhere/S
+nowise
+now/S
+noxiousness/M
+noxious/PY
+Noyce/M
+Noyes/M
+nozzle/MS
+Np
+NP
+NRA
+nroff/M
+N's
+NS
+n's/CI
+NSF
+n/T
+NT
+nth
+nuance/SDM
+nubbin/SM
+nubby/RT
+Nubia/M
+Nubian/M
+nubile
+nub/MS
+nuclear/K
+nuclease/M
+nucleated/A
+nucleate/DSXNG
+nucleation/M
+nucleic
+nuclei/M
+nucleoli
+nucleolus/M
+nucleon/MS
+nucleotide/MS
+nucleus/M
+nuclide/M
+nude/CRS
+nudely
+nudeness/M
+nudest
+nudge/GSRD
+nudger/M
+nudism/MS
+nudist/MS
+nudity/MS
+nugatory
+Nugent/M
+nugget/SM
+nuisance/MS
+nuke/DSMG
+Nukualofa
+null/DSG
+nullification/M
+nullifier/M
+nullify/RSDXGNZ
+nullity/SM
+nu/M
+numbered/UA
+numberer/M
+numberless
+numberplate/M
+number/RDMGJ
+numbers/A
+Numbers/M
+numbing/Y
+numbness/MS
+numb/SGZTYRDP
+numbskull's
+numerable/IC
+numeracy/SI
+numeral/YMS
+numerate/SDNGX
+numerates/I
+numeration/M
+numerator/MS
+numerical/Y
+numeric/S
+numerological
+numerologist/S
+numerology/MS
+numerousness/M
+numerous/YP
+numinous/S
+numismatic/S
+numismatics/M
+numismatist/MS
+numskull/SM
+Nunavut/M
+nuncio/SM
+Nunez/M
+Nunki/M
+nun/MS
+nunnery/MS
+nuptial/S
+Nuremberg/M
+Nureyev/M
+nursemaid/MS
+nurser/M
+nurseryman/M
+nurserymen
+nursery/MS
+nurse/SRDJGMZ
+nursling/M
+nurturer/M
+nurture/SRDGZM
+nus
+nutate/NGSD
+nutation/M
+nutcracker/M
+nutcrack/RZ
+nuthatch/SM
+nutmeat/SM
+nutmegged
+nutmegging
+nutmeg/MS
+nut/MS
+nutpick/MS
+Nutrasweet/M
+nutria/SM
+nutrient/MS
+nutriment/MS
+nutritional/Y
+nutritionist/MS
+nutrition/SM
+nutritiousness/MS
+nutritious/PY
+nutritive/Y
+nutshell/MS
+nutted
+nuttiness/SM
+nutting
+nutty/TRP
+nuzzle/GZRSD
+NV
+NW
+NWT
+NY
+Nyasa/M
+NYC
+Nydia/M
+Nye/M
+Nyerere/M
+nylon/SM
+nymphet/MS
+nymph/M
+nympholepsy/M
+nymphomaniac/S
+nymphomania/MS
+nymphs
+Nyquist/M
+NYSE
+Nyssa/M
+NZ
+o
+O
+oafishness/S
+oafish/PY
+oaf/MS
+Oahu/M
+Oakland/M
+Oakley/M
+Oakmont/M
+oak/SMN
+oakum/MS
+oakwood
+oar/GSMD
+oarlock/MS
+oarsman/M
+oarsmen
+oarswoman
+oarswomen
+OAS
+oases
+oasis/M
+oatcake/MS
+oater/M
+Oates/M
+oath/M
+oaths
+oatmeal/SM
+oat/SMNR
+Oaxaca/M
+ob
+OB
+Obadiah/M
+Obadias/M
+obbligato/S
+obduracy/S
+obdurateness/S
+obdurate/PDSYG
+Obediah/M
+obedience/EMS
+obedient/EY
+Obed/M
+obeisance/MS
+obeisant/Y
+obelisk/SM
+Oberlin/M
+Oberon/M
+obese
+obesity/MS
+obey/EDRGS
+obeyer/EM
+obfuscate/SRDXGN
+obfuscation/M
+obfuscatory
+Obidiah/M
+Obie/M
+obi/MDGS
+obit/SMR
+obituary/SM
+obj
+objectify/GSDXN
+objectionableness/M
+objectionable/U
+objectionably
+objection/SMB
+objectiveness/MS
+objective/PYS
+objectivity/MS
+objector/SM
+object/SGVMD
+objurgate/GNSDX
+objurgation/M
+oblate/NYPSX
+oblation/M
+obligate/NGSDXY
+obligational
+obligation/M
+obligatorily
+obligatory
+obliged/E
+obliger/M
+obliges/E
+oblige/SRDG
+obligingness/M
+obliging/PY
+oblique/DSYGP
+obliqueness/S
+obliquity/MS
+obliterate/VNGSDX
+obliteration/M
+obliterative/Y
+oblivion/MS
+obliviousness/MS
+oblivious/YP
+oblongness/M
+oblong/SYP
+obloquies
+obloquy/M
+Ob/MD
+obnoxiousness/MS
+obnoxious/YP
+oboe/SM
+oboist/S
+obos
+O'Brien/M
+obs
+obscene/RYT
+obscenity/MS
+obscurantism/MS
+obscurantist/MS
+obscuration
+obscureness/M
+obscure/YTPDSRGL
+obscurity/MS
+obsequies
+obsequiousness/S
+obsequious/YP
+obsequy
+observability/M
+observable/SU
+observably
+observance/MS
+observantly
+observants
+observant/U
+observational/Y
+observation/MS
+observatory/MS
+observed/U
+observer/M
+observe/ZGDSRB
+observing/Y
+obsess/GVDS
+obsessional
+obsession/MS
+obsessiveness/S
+obsessive/PYS
+obsidian/SM
+obsolesce/GSD
+obsolescence/S
+obsolescent/Y
+obsolete/GPDSY
+obsoleteness/M
+obstacle/SM
+obstetrical
+obstetrician/SM
+obstetric/S
+obstetrics/M
+obstinacy/SM
+obstinateness/M
+obstinate/PY
+obstreperousness/SM
+obstreperous/PY
+obstructed/U
+obstructer/M
+obstructionism/SM
+obstructionist/MS
+obstruction/SM
+obstructiveness/MS
+obstructive/PSY
+obstruct/RDVGS
+obtainable/U
+obtainably
+obtain/LSGDRB
+obtainment/S
+obtrude/DSRG
+obtruder/M
+obtrusion/S
+obtrusiveness/MSU
+obtrusive/UPY
+obtuseness/S
+obtuse/PRTY
+obverse/YS
+obviate/XGNDS
+obviousness/SM
+obvious/YP
+Oby/M
+ocarina/MS
+O'Casey
+Occam/M
+occasional/Y
+occasion/MDSJG
+Occidental/S
+occidental/SY
+occident/M
+Occident/SM
+occipital/Y
+occlude/GSD
+occlusion/MS
+occlusive/S
+occulter/M
+occultism/SM
+occult/SRDYG
+occupancy/SM
+occupant/MS
+occupational/Y
+occupation/SAM
+occupied/AU
+occupier/M
+occupies/A
+occupy/RSDZG
+occur/AS
+occurred/A
+occurrence/SM
+occurring/A
+oceanfront/MS
+oceangoing
+Oceania/M
+oceanic
+ocean/MS
+oceanographer/SM
+oceanographic
+oceanography/SM
+oceanology/MS
+oceanside
+Oceanside/M
+Oceanus/M
+ocelot/SM
+ocher/DMGS
+Ochoa/M
+o'clock
+O'Clock
+O'Connell/M
+O'Connor/M
+Oconomowoc/M
+OCR
+octagonal/Y
+octagon/SM
+octahedral
+octahedron/M
+octal/S
+octane/MS
+octant/M
+octave/MS
+Octavia/M
+Octavian/M
+Octavio/M
+Octavius/M
+octavo/MS
+octennial
+octet/SM
+octile
+octillion/M
+Oct/M
+October/MS
+octogenarian/MS
+octopus/SM
+octoroon/M
+ocular/S
+oculist/SM
+OD
+odalisque/SM
+oddball/SM
+oddity/MS
+oddment/MS
+oddness/MS
+odd/TRYSPL
+Odele/M
+Odelia/M
+Odelinda/M
+Odella/M
+Odelle/M
+Odell/M
+O'Dell/M
+ode/MDRS
+Ode/MR
+Oderberg/MS
+Oder/M
+Odessa/M
+Odets/M
+Odetta/M
+Odette/M
+Odey/M
+Odie/M
+Odilia/M
+Odille/M
+Odin/M
+odiousness/MS
+odious/PY
+Odis/M
+odium/MS
+Odo/M
+odometer/SM
+Odom/M
+O'Donnell/M
+odor/DMS
+odoriferous
+odorless
+odorous/YP
+ODs
+O'Dwyer/M
+Ody/M
+Odysseus/M
+Odyssey/M
+odyssey/S
+OE
+OED
+oedipal
+Oedipal/Y
+Oedipus/M
+OEM/M
+OEMS
+oenology/MS
+oenophile/S
+o'er
+O'Er
+Oersted/M
+oesophagi
+oeuvre/SM
+Ofelia/M
+Ofella/M
+offal/MS
+offbeat/MS
+offcuts
+Offenbach/M
+offender/M
+offend/SZGDR
+offense/MSV
+offensively/I
+offensiveness/MSI
+offensive/YSP
+offerer/M
+offering/M
+offer/RDJGZ
+offertory/SM
+offhand/D
+offhandedness/S
+offhanded/YP
+officeholder/SM
+officemate/S
+officer/GMD
+officership/S
+office/SRMZ
+officialdom/SM
+officialism/SM
+officially/U
+official/PSYM
+officiant/SM
+officiate/XSDNG
+officiation/M
+officiator/MS
+officio
+officiousness/MS
+officious/YP
+offing/M
+offish
+offload/GDS
+offprint/GSDM
+offramp
+offset/SM
+offsetting
+offshoot/MS
+offshore
+offside/RS
+offspring/M
+offstage/S
+off/SZGDRJ
+offtrack
+Ofilia/M
+of/K
+often/RT
+oftentimes
+oft/NRT
+ofttimes
+Ogbomosho/M
+Ogdan/M
+Ogden/M
+Ogdon/M
+Ogilvy/M
+ogive/M
+Oglethorpe/M
+ogle/ZGDSR
+ogreish
+ogre/MS
+ogress/S
+oh
+OH
+O'Hara
+O'Hare/M
+O'Higgins
+Ohioan/S
+Ohio/M
+ohmic
+ohmmeter/MS
+ohm/SM
+oho/S
+ohs
+OHSA/M
+oilcloth/M
+oilcloths
+oiler/M
+oilfield/MS
+oiliness/SM
+oilman/M
+oil/MDRSZG
+oilmen
+oilseed/SM
+oilskin/MS
+oily/TPR
+oink/GDS
+ointment/SM
+Oise/M
+OJ
+Ojibwa/SM
+Okamoto/M
+okapi/SM
+Okayama/M
+okay/M
+Okeechobee/M
+O'Keeffe
+Okefenokee
+Okhotsk/M
+Okinawa/M
+Okinawan/S
+Oklahoma/M
+Oklahoman/SM
+Okla/M
+OK/MDG
+okra/MS
+OKs
+Oktoberfest
+Olaf/M
+Olag/M
+Ola/M
+Olav/M
+Oldenburg/M
+olden/DG
+Oldfield/M
+oldie/MS
+oldish
+oldness/S
+Oldsmobile/M
+oldster/SM
+Olduvai/M
+old/XTNRPS
+ol
+oleaginous
+oleander/SM
+O'Leary/M
+olefin/M
+Oleg/M
+Ole/MV
+Olenek/M
+Olenka/M
+Olen/M
+Olenolin/M
+oleomargarine/SM
+oleo/S
+oles
+olfactory
+Olga/M
+Olia/M
+oligarchic
+oligarchical
+oligarch/M
+oligarchs
+oligarchy/SM
+Oligocene
+oligopolistic
+oligopoly/MS
+Olimpia/M
+Olin/M
+olive/MSR
+Olive/MZR
+Oliver/M
+Olivero/M
+Olivette/M
+Olivetti/M
+Olivia/M
+Olivier/M
+Olivie/RM
+Oliviero/M
+Oliy/M
+Ollie/M
+Olly/M
+Olmec
+Olmsted/M
+Olsen/M
+Olson/M
+Olva/M
+Olvan/M
+Olwen/M
+Olympe/M
+Olympiad/MS
+Olympian/S
+Olympia/SM
+Olympic/S
+Olympie/M
+Olympus/M
+Omaha/SM
+Oman/M
+Omar/M
+ombudsman/M
+ombudsmen
+Omdurman/M
+omega/MS
+omelet/SM
+omelette's
+omen/DMG
+Omero/M
+omicron/MS
+ominousness/SM
+ominous/YP
+omission/MS
+omit/S
+omitted
+omitting
+omnibus/MS
+omni/M
+omnipotence/SM
+Omnipotent
+omnipotent/SY
+omnipresence/MS
+omnipresent/Y
+omniscience/SM
+omniscient/YS
+omnivore/MS
+omnivorousness/MS
+omnivorous/PY
+oms
+Omsk/M
+om/XN
+ON
+onanism/M
+Onassis/M
+oncer/M
+once/SR
+oncogene/S
+oncologist/S
+oncology/SM
+oncoming/S
+Ondrea/M
+Oneal/M
+Onega/M
+Onegin/M
+Oneida/SM
+O'Neil
+O'Neill
+oneness/MS
+one/NPMSX
+oner/M
+onerousness/SM
+onerous/YP
+oneself
+onetime
+oneupmanship
+Onfre/M
+Onfroi/M
+ongoing/S
+Onida/M
+onion/GDM
+onionskin/MS
+onlooker/MS
+onlooking
+only/TP
+Onofredo/M
+Ono/M
+onomatopoeia/SM
+onomatopoeic
+onomatopoetic
+Onondaga/MS
+onrush/GMS
+on/RY
+ons
+Onsager/M
+onset/SM
+onsetting
+onshore
+onside
+onslaught/MS
+Ontarian/S
+Ontario/M
+Ont/M
+onto
+ontogeny/SM
+ontological/Y
+ontology/SM
+onus/SM
+onward/S
+onyx/MS
+oodles
+ooh/GD
+oohs
+oolitic
+Oona/M
+OOo/M
+oops/S
+Oort/M
+ooze/GDS
+oozy/RT
+opacity/SM
+opalescence/S
+opalescent/Y
+Opalina/M
+Opaline/M
+Opal/M
+opal/SM
+opaque/GTPYRSD
+opaqueness/SM
+opcode/MS
+OPEC
+Opel/M
+opencast
+opened/AU
+opener/M
+openhandedness/SM
+openhanded/P
+openhearted
+opening/M
+openness/S
+OpenOffice.org/M
+opens/A
+openwork/MS
+open/YRDJGZTP
+operable/I
+operandi
+operand/SM
+operant/YS
+opera/SM
+operate/XNGVDS
+operatically
+operatic/S
+operationalization/S
+operationalize/D
+operational/Y
+operation/M
+operative/IP
+operatively
+operativeness/MI
+operatives
+operator/SM
+operetta/MS
+ope/S
+Ophelia/M
+Ophelie/M
+Ophiuchus/M
+ophthalmic/S
+ophthalmologist/SM
+ophthalmology/MS
+opiate/GMSD
+opine/XGNSD
+opinionatedness/M
+opinionated/PY
+opinion/M
+opioid
+opium/MS
+opossum/SM
+opp
+Oppenheimer/M
+opponent/MS
+opportune/IY
+opportunism/SM
+opportunistic
+opportunistically
+opportunist/SM
+opportunity/MS
+oppose/BRSDG
+opposed/U
+opposer/M
+oppositeness/M
+opposite/SXYNP
+oppositional
+opposition/M
+oppress/DSGV
+oppression/MS
+oppressiveness/MS
+oppressive/YP
+oppressor/MS
+opprobrious/Y
+opprobrium/SM
+Oprah/M
+ops
+opt/DSG
+opthalmic
+opthalmologic
+opthalmology
+optical/Y
+optician/SM
+optic/S
+optics/M
+optima
+optimality
+optimal/Y
+optimise's
+optimism/SM
+optimistic
+optimistically
+optimist/SM
+optimization/SM
+optimize/DRSZG
+optimized/U
+optimizer/M
+optimizes/U
+optimum/SM
+optionality/M
+optional/YS
+option/GDMS
+optoelectronic
+optometric
+optometrist/MS
+optometry/SM
+opulence/SM
+opulent/Y
+opus/SM
+op/XGDN
+OR
+oracle/GMSD
+oracular
+Oralee/M
+Oralia/M
+Oralie/M
+Oralla/M
+Oralle/M
+oral/YS
+Ora/M
+orangeade/MS
+Orange/M
+orange/MS
+orangery/SM
+orangutan/MS
+Oranjestad/M
+Oran/M
+orate/SDGNX
+oration/M
+oratorical/Y
+oratorio/MS
+orator/MS
+oratory/MS
+Orazio/M
+Orbadiah/M
+orbicular
+orbiculares
+orbital/MYS
+orbit/MRDGZS
+orb/SMDG
+orchard/SM
+orchestral/Y
+orchestra/MS
+orchestrate/GNSDX
+orchestrater's
+orchestration/M
+orchestrator/M
+orchid/SM
+ordainer/M
+ordainment/MS
+ordain/SGLDR
+ordeal/SM
+order/AESGD
+ordered/U
+orderer
+ordering/S
+orderless
+orderliness/SE
+orderly/PS
+order's/E
+ordinal/S
+ordinance/MS
+ordinarily
+ordinariness/S
+ordinary/RSPT
+ordinated
+ordinate/I
+ordinates
+ordinate's
+ordinating
+ordination/SM
+ordnance/SM
+Ordovician
+ordure/MS
+oregano/SM
+Oreg/M
+Oregonian/S
+Oregon/M
+Orelee/M
+Orelia/M
+Orelie/M
+Orella/M
+Orelle/M
+Orel/M
+Oren/M
+Ore/NM
+ore/NSM
+Oreo
+Orestes
+organdie's
+organdy/MS
+organelle/MS
+organically/I
+organic/S
+organismic
+organism/MS
+organist/MS
+organizable/UMS
+organizational/MYS
+organization/MEAS
+organize/AGZDRS
+organized/UE
+organizer/MA
+organizes/E
+organizing/E
+organ/MS
+organometallic
+organza/SM
+orgasm/GSMD
+orgasmic
+orgiastic
+orgy/SM
+Oriana/M
+oriel/MS
+orientable
+Oriental/S
+oriental/SY
+orientated/A
+orientate/ESDXGN
+orientates/A
+orientation/AMES
+orienteering/M
+orienter
+orient/GADES
+orient's
+Orient/SM
+orifice/MS
+orig
+origami/MS
+originality/SM
+originally
+original/US
+originate/VGNXSD
+origination/M
+originative/Y
+originator/SM
+origin/MS
+Orin/M
+Orinoco/M
+oriole/SM
+Orion/M
+orison/SM
+Oriya/M
+Orizaba/M
+Orkney/M
+Orland/M
+Orlando/M
+Orlan/M
+Orleans
+Orlick/M
+Orlon/SM
+Orly/M
+ormolu/SM
+or/MY
+ornamental/SY
+ornamentation/SM
+ornament/GSDM
+ornateness/SM
+ornate/YP
+orneriness/SM
+ornery/PRT
+ornithological
+ornithologist/SM
+ornithology/MS
+orographic/M
+orography/M
+Orono/M
+orotund
+orotundity/MS
+orphanage/MS
+orphanhood/M
+orphan/SGDM
+Orpheus/M
+Orphic
+Orran/M
+Orren/M
+Orrin/M
+orris/SM
+Orr/MN
+ors
+Orsa/M
+Orsola/M
+Orson/M
+Ortega/M
+Ortensia/M
+orthodontia/S
+orthodontic/S
+orthodontics/M
+orthodontist/MS
+orthodoxies
+orthodoxly/U
+Orthodox/S
+orthodoxy's
+orthodox/YS
+orthodoxy/U
+orthogonality/M
+orthogonalization/M
+orthogonalized
+orthogonal/Y
+orthographic
+orthographically
+orthography/MS
+orthonormal
+orthopedic/S
+orthopedics/M
+orthopedist/SM
+orthophosphate/MS
+orthorhombic
+Ortiz/M
+Orton/M
+Orval/M
+Orville/M
+Orv/M
+Orwellian
+Orwell/M
+o's
+Osage/SM
+Osaka/M
+Osbert/M
+Osborne/M
+Osborn/M
+Osbourne/M
+Osbourn/M
+Oscar/SM
+Osceola/M
+oscillate/SDXNG
+oscillation/M
+oscillator/SM
+oscillatory
+oscilloscope/SM
+osculate/XDSNG
+osculation/M
+Osgood/M
+OSHA
+Oshawa/M
+O'Shea/M
+Oshkosh/M
+osier/MS
+Osiris/M
+Oslo/M
+Os/M
+OS/M
+Osman/M
+osmium/MS
+Osmond/M
+osmoses
+osmosis/M
+osmotic
+Osmund/M
+osprey/SM
+osseous/Y
+Ossie/M
+ossification/M
+ossify/NGSDX
+ostensible
+ostensibly
+ostentation/MS
+ostentatiousness/M
+ostentatious/PY
+osteoarthritides
+osteoarthritis/M
+osteology/M
+osteopathic
+osteopath/M
+osteopaths
+osteopathy/MS
+osteoporoses
+osteoporosis/M
+ostracise's
+ostracism/MS
+ostracize/GSD
+Ostrander/M
+ostrich/MS
+Ostrogoth/M
+Ostwald/M
+O'Sullivan/M
+Osvaldo/M
+Oswald/M
+Oswell/M
+OT
+OTB
+OTC
+Otes
+Otha/M
+Othelia/M
+Othella/M
+Othello/M
+otherness/M
+other/SMP
+otherwise
+otherworldly/P
+otherworld/Y
+Othilia/M
+Othilie/M
+Otho/M
+otiose
+Otis/M
+OTOH
+Ottawa/MS
+otter/DMGS
+Ottilie/M
+Otto/M
+Ottoman
+ottoman/MS
+Ouagadougou/M
+oubliette/SM
+ouch/SDG
+oughtn't
+ought/SGD
+Ouija/MS
+ounce/MS
+our/S
+ourself
+ourselves
+ouster/M
+oust/RDGZS
+outage/MS
+outargue/GDS
+outback/MRS
+outbalance/GDS
+outbidding
+outbid/S
+outboard/S
+outboast/GSD
+outbound/S
+outbreak/SMG
+outbroke
+outbroken
+outbuilding/SM
+outburst/MGS
+outcast/GSM
+outclass/SDG
+outcome/SM
+outcropped
+outcropping/S
+outcrop/SM
+outcry/MSDG
+outdated/P
+outdid
+outdistance/GSD
+outdoes
+outdo/G
+outdone
+outdoor/S
+outdoorsy
+outdraw/GS
+outdrawn
+outdrew
+outermost
+outerwear/M
+outface/SDG
+outfall/MS
+outfielder/M
+outfield/RMSZ
+outfight/SG
+outfit/MS
+outfitted
+outfitter/MS
+outfitting
+outflank/SGD
+outflow/SMDG
+outfought
+outfox/GSD
+outgeneraled
+outgoes
+outgo/GJ
+outgoing/P
+outgrew
+outgrip
+outgrow/GSH
+outgrown
+outgrowth/M
+outgrowths
+outguess/SDG
+outhit/S
+outhitting
+outhouse/SM
+outing/M
+outlaid
+outlander/M
+outlandishness/MS
+outlandish/PY
+outland/ZR
+outlast/GSD
+outlawry/M
+outlaw/SDMG
+outlay/GSM
+outlet/SM
+outliers
+outline/SDGM
+outlive/GSD
+outlook/MDGS
+outlying
+outmaneuver/GSD
+outmatch/SDG
+outmigration
+outmoded
+outness/M
+outnumber/GDS
+outpaced
+outpatient/SM
+outperform/DGS
+out/PJZGSDR
+outplacement/S
+outplay/GDS
+outpoint/GDS
+outpost/SM
+outpouring/M
+outpour/MJG
+outproduce/GSD
+output/SM
+outputted
+outputting
+outrace/GSD
+outrage/GSDM
+outrageousness/M
+outrageous/YP
+outran
+outrank/GSD
+outr
+outreach/SDG
+outrider/MS
+outrigger/SM
+outright/Y
+outrunning
+outrun/S
+outscore/GDS
+outsell/GS
+outset/MS
+outsetting
+outshine/SG
+outshone
+outshout/GDS
+outsider/PM
+outside/ZSR
+outsize/S
+outskirt/SM
+outsmart/SDG
+outsold
+outsource/SDJG
+outspend/SG
+outspent
+outspoke
+outspokenness/SM
+outspoken/YP
+outspread/SG
+outstanding/Y
+outstate/NX
+outstation/M
+outstay/SDG
+outstretch/GSD
+outstripped
+outstripping
+outstrip/S
+outtake/S
+outvote/GSD
+outwardness/M
+outward/SYP
+outwear/SG
+outweigh/GD
+outweighs
+outwit/S
+outwitted
+outwitting
+outwore
+outwork/SMDG
+outworn
+ouzo/SM
+oval/MYPS
+ovalness/M
+ova/M
+ovarian
+ovary/SM
+ovate/SDGNX
+ovation/GMD
+ovenbird/SM
+oven/MS
+overabundance/MS
+overabundant
+overachieve/SRDGZ
+overact/DGVS
+overage/S
+overaggressive
+overallocation
+overall/SM
+overambitious
+overanxious
+overarching
+overarm/GSD
+overate
+overattentive
+overawe/GDS
+overbalance/DSG
+overbear/GS
+overbearingness/M
+overbearing/YP
+overbidding
+overbid/S
+overbite/MS
+overblown
+overboard
+overbold
+overbook/SDG
+overbore
+overborne
+overbought
+overbuild/GS
+overbuilt
+overburdening/Y
+overburden/SDG
+overbuy/GS
+overcame
+overcapacity/M
+overcapitalize/DSG
+overcareful
+overcast/GS
+overcasting/M
+overcautious
+overcerebral
+overcharge/DSG
+overcloud/DSG
+overcoating/M
+overcoat/SMG
+overcomer/M
+overcome/RSG
+overcommitment/S
+overcompensate/XGNDS
+overcompensation/M
+overcomplexity/M
+overcomplicated
+overconfidence/MS
+overconfident/Y
+overconscientious
+overconsumption/M
+overcook/SDG
+overcooled
+overcorrection
+overcritical
+overcrowd/DGS
+overcurious
+overdecorate/SDG
+overdependent
+overdetermined
+overdevelop/SDG
+overdid
+overdoes
+overdo/G
+overdone
+overdose/DSMG
+overdraft/SM
+overdraw/GS
+overdrawn
+overdress/GDS
+overdrew
+overdrive/GSM
+overdriven
+overdrove
+overdubbed
+overdubbing
+overdub/S
+overdue
+overeagerness/M
+overeager/PY
+overeater/M
+overeat/GNRS
+overeducated
+overemotional
+overemphases
+overemphasis/M
+overemphasize/GZDSR
+overenthusiastic
+overestimate/DSXGN
+overestimation/M
+overexcite/DSG
+overexercise/SDG
+overexert/GDS
+overexertion/SM
+overexploitation
+overexploited
+overexpose/GDS
+overexposure/SM
+overextend/DSG
+overextension
+overfall/M
+overfed
+overfeed/GS
+overfill/GDS
+overfishing
+overflew
+overflight/SM
+overflow/DGS
+overflown
+overfly/GS
+overfond
+overfull
+overgeneralize/GDS
+overgenerous
+overgraze/SDG
+overgrew
+overground
+overgrow/GSH
+overgrown
+overgrowth/M
+overgrowths
+overhand/DGS
+overhang/GS
+overhasty
+overhaul/GRDJS
+overhead/S
+overheard
+overhearer/M
+overhear/SRG
+overheat/SGD
+overhung
+overincredulous
+overindulgence/SM
+overindulgent
+overindulge/SDG
+overinflated
+overjoy/SGD
+overkill/SDMG
+overladed
+overladen
+overlaid
+overlain
+overland/S
+overlap/MS
+overlapped
+overlapping
+overlarge
+overlay/GS
+overleaf
+overlie
+overload/SDG
+overlong
+overlook/DSG
+overlord/DMSG
+overloud
+overly/GRS
+overmanning
+overmaster/GSD
+overmatching
+overmodest
+overmuch/S
+overnice
+overnight/SDRGZ
+overoptimism/SM
+overoptimistic
+overpaid
+overparticular
+overpass/GMSD
+overpay/LSG
+overpayment/M
+overplay/SGD
+overpopulate/DSNGX
+overpopulation/M
+overpopulous
+overpower/GSD
+overpowering/Y
+overpraise/DSG
+overprecise
+overpressure
+overprice/SDG
+overprint/DGS
+overproduce/SDG
+overproduction/S
+overprotect/GVDS
+overprotection/M
+overqualified
+overran
+overrate/DSG
+overreach/DSRG
+overreaction/SM
+overreact/SGD
+overred
+overrefined
+overrepresented
+overridden
+overrider/M
+override/RSG
+overripe
+overrode
+overrule/GDS
+overrunning
+overrun/S
+oversample/DG
+oversaturate
+oversaw
+oversea/S
+overseeing
+overseen
+overseer/M
+oversee/ZRS
+oversell/SG
+oversensitiveness/S
+oversensitive/P
+oversensitivity
+oversexed
+overshadow/GSD
+overshoe/SM
+overshoot/SG
+overshot/S
+oversight/SM
+oversimple
+oversimplification/M
+oversimplify/GXNDS
+oversize/GS
+oversleep/GS
+overslept
+oversoftness/M
+oversoft/P
+oversold
+overspecialization/MS
+overspecialize/GSD
+overspend/SG
+overspent
+overspill/DMSG
+overspread/SG
+overstaffed
+overstatement/SM
+overstate/SDLG
+overstay/GSD
+overstepped
+overstepping
+overstep/S
+overstimulate/DSG
+overstock/SGD
+overstraining
+overstressed
+overstretch/D
+overstrict
+overstrike/GS
+overstrung
+overstuffed
+oversubscribe/SDG
+oversubtle
+oversupply/MDSG
+oversuspicious
+overtaken
+overtake/RSZG
+overtax/DSG
+overthrew
+overthrow/GS
+overthrown
+overtightened
+overtime/MGDS
+overtire/DSG
+overtone/MS
+overtook
+overt/PY
+overture/DSMG
+overturn/SDG
+overuse/DSG
+overvalue/GSD
+overview/MS
+overweening
+overweight/GSD
+overwhelm/GDS
+overwhelming/Y
+overwinter/SDG
+overwork/GSD
+overwrap
+overwrite/SG
+overwritten
+overwrote
+overwrought
+over/YGS
+overzealousness/M
+overzealous/P
+Ovid/M
+oviduct/SM
+oviform
+oviparous
+ovoid/S
+ovular
+ovulate/GNXDS
+ovulatory
+ovule/MS
+ovum/MS
+ow/DYG
+Owen/MS
+owe/S
+owlet/SM
+owl/GSMDR
+owlishness/M
+owlish/PY
+owned/U
+own/EGDS
+ownership/MS
+owner/SM
+oxalate/M
+oxalic
+oxaloacetic
+oxblood/S
+oxbow/SM
+oxcart/MS
+oxen/M
+oxford/MS
+Oxford/MS
+oxidant/SM
+oxidate/NVX
+oxidation/M
+oxidative/Y
+oxide/SM
+oxidization/MS
+oxidized/U
+oxidize/JDRSGZ
+oxidizer/M
+oxidizes/A
+ox/MNS
+Oxnard
+Oxonian
+oxtail/M
+Oxus/M
+oxyacetylene/MS
+oxygenate/XSDMGN
+oxygenation/M
+oxygen/MS
+oxyhydroxides
+oxymora
+oxymoron/M
+oyster/GSDM
+oystering/M
+oz
+Ozark/SM
+Oz/M
+ozone/SM
+Ozymandias/M
+Ozzie/M
+Ozzy/M
+P
+PA
+Pablo/M
+Pablum/M
+pablum/S
+Pabst/M
+pabulum/SM
+PAC
+pace/DRSMZG
+Pace/M
+pacemaker/SM
+pacer/M
+pacesetter/MS
+pacesetting
+Pacheco/M
+pachyderm/MS
+pachysandra/MS
+pacific
+pacifically
+pacification/M
+Pacific/M
+pacifier/M
+pacifism/MS
+pacifistic
+pacifist/MS
+pacify/NRSDGXZ
+package/ARSDG
+packaged/U
+packager/S
+package's
+packages/U
+packaging/SM
+Packard/SM
+packed/AU
+packer/MUS
+packet/MSDG
+pack/GZSJDRMB
+packhorse/M
+packinghouse/S
+packing/M
+packsaddle/SM
+Packston/M
+packs/UA
+Packwood/M
+Paco/M
+Pacorro/M
+pact/SM
+Padang/M
+padded/U
+Paddie/M
+padding/SM
+paddle/MZGRSD
+paddler/M
+paddock/SDMG
+Paddy/M
+paddy/SM
+Padget/M
+Padgett/M
+Padilla/M
+padlock/SGDM
+pad/MS
+Padraic/M
+Padraig/M
+padre/MS
+Padrewski/M
+Padriac/M
+paean/MS
+paediatrician/MS
+paediatrics/M
+paedophilia's
+paella/SM
+paeony/M
+Paganini/M
+paganism/MS
+pagan/SM
+pageantry/SM
+pageant/SM
+pageboy/SM
+paged/U
+pageful
+Page/M
+page/MZGDRS
+pager/M
+paginate/DSNGX
+Paglia/M
+pagoda/MS
+Pahlavi/M
+paid/AU
+Paige/M
+pailful/SM
+Pail/M
+pail/SM
+Paine/M
+painfuller
+painfullest
+painfulness/MS
+painful/YP
+pain/GSDM
+painkiller/MS
+painkilling
+painlessness/S
+painless/YP
+painstaking/SY
+paint/ADRZGS
+paintbox/M
+paintbrush/SM
+painted/U
+painterly/P
+painter/YM
+painting/SM
+paint's
+paintwork
+paired/UA
+pair/JSDMG
+pairs/A
+pairwise
+paisley/MS
+pajama/MDS
+Pakistani/S
+Pakistan/M
+palace/MS
+paladin/MS
+palaeolithic
+palaeontologists
+palaeontology/M
+palanquin/MS
+palatability/M
+palatableness/M
+palatable/P
+palatalization/MS
+palatalize/SDG
+palatal/YS
+palate/BMS
+palatial/Y
+palatinate/SM
+Palatine
+palatine/S
+palaver/GSDM
+paleface/SM
+Palembang/M
+paleness/S
+Paleocene
+Paleogene
+paleographer/SM
+paleography/SM
+paleolithic
+Paleolithic
+paleontologist/S
+paleontology/MS
+Paleozoic
+Palermo/M
+pale/SPY
+Palestine/M
+Palestinian/S
+Palestrina/M
+palette/MS
+Paley/M
+palfrey/MS
+palimony/S
+palimpsest/MS
+palindrome/MS
+palindromic
+paling/M
+palisade/MGSD
+Palisades/M
+palish
+Palladio/M
+palladium/SM
+pallbearer/SM
+palletized
+pallet/SMGD
+pall/GSMD
+palliate/SDVNGX
+palliation/M
+palliative/SY
+pallidness/MS
+pallid/PY
+Pall/M
+pallor/MS
+palmate
+palmer/M
+Palmer/M
+Palmerston/M
+palmetto/MS
+palm/GSMDR
+palmist/MS
+palmistry/MS
+Palm/MR
+Palmolive/M
+palmtop/S
+Palmyra/M
+palmy/RT
+Palo/M
+Paloma/M
+Palomar/M
+palomino/MS
+palpable
+palpably
+palpate/SDNGX
+palpation/M
+palpitate/NGXSD
+palpitation/M
+pal/SJMDRYTG
+palsy/GSDM
+paltriness/SM
+paltry/TRP
+paludal
+Pa/M
+Pamela/M
+Pamelina/M
+Pamella/M
+pa/MH
+Pamirs
+Pam/M
+Pammie/M
+Pammi/M
+Pammy/M
+pampas/M
+pamperer/M
+pamper/RDSG
+Pampers
+pamphleteer/DMSG
+pamphlet/SM
+panacea/MS
+panache/MS
+Panama/MS
+Panamanian/S
+panama/S
+pancake/MGSD
+Panchito/M
+Pancho/M
+panchromatic
+pancreas/MS
+pancreatic
+panda/SM
+pandemic/S
+pandemonium/SM
+pander/ZGRDS
+Pandora/M
+panegyric/SM
+pane/KMS
+paneling/M
+panelist/MS
+panelization
+panelized
+panel/JSGDM
+Pangaea/M
+pang/GDMS
+pangolin/M
+panhandle/RSDGMZ
+panicked
+panicking
+panicky/RT
+panic/SM
+panier's
+panjandrum/M
+Pankhurst/M
+Pan/M
+Panmunjom/M
+panned
+pannier/SM
+panning
+panoply/MSD
+panorama/MS
+panoramic
+panpipes
+Pansie/M
+pan/SMD
+Pansy/M
+pansy/SM
+Pantagruel/M
+Pantaloon/M
+pantaloons
+pant/GDS
+pantheism/MS
+pantheistic
+pantheist/S
+pantheon/MS
+panther/SM
+pantie/SM
+pantiled
+pantograph/M
+pantomime/SDGM
+pantomimic
+pantomimist/SM
+pantry/SM
+pantsuit/SM
+pantyhose
+pantyliner
+pantywaist/SM
+Panza/M
+Paola/M
+Paoli/M
+Paolina/M
+Paolo/M
+papacy/SM
+Papagena/M
+Papageno/M
+papal/Y
+papa/MS
+paparazzi
+papaw/SM
+papaya/MS
+paperback/GDMS
+paperboard/MS
+paperboy/SM
+paperer/M
+papergirl/SM
+paper/GJMRDZ
+paperhanger/SM
+paperhanging/SM
+paperiness/M
+paperless
+paperweight/MS
+paperwork/SM
+papery/P
+papillae
+papilla/M
+papillary
+papist/MS
+papoose/SM
+Pappas/M
+papped
+papping
+pappy/RST
+paprika/MS
+pap/SZMNR
+papyri
+papyrus/M
+Paquito/M
+parable/MGSD
+parabola/MS
+parabolic
+paraboloidal/M
+paraboloid/MS
+Paracelsus/M
+paracetamol/M
+parachuter/M
+parachute/RSDMG
+parachutist/MS
+Paraclete/M
+parader/M
+parade/RSDMZG
+paradigmatic
+paradigm/SM
+paradisaic
+paradisaical
+Paradise/M
+paradise/MS
+paradoxic
+paradoxicalness/M
+paradoxical/YP
+paradox/MS
+paraffin/GSMD
+paragon/SGDM
+paragrapher/M
+paragraph/MRDG
+paragraphs
+Paraguayan/S
+Paraguay/M
+parakeet/MS
+paralegal/S
+paralinguistic
+parallax/SM
+parallel/DSG
+paralleled/U
+parallelepiped/MS
+parallelism/SM
+parallelization/MS
+parallelize/ZGDSR
+parallelogram/MS
+paralysis/M
+paralytically
+paralytic/S
+paralyzedly/S
+paralyzed/Y
+paralyzer/M
+paralyze/ZGDRS
+paralyzingly/S
+paralyzing/Y
+paramagnetic
+paramagnet/M
+Paramaribo/M
+paramecia
+paramecium/M
+paramedical/S
+paramedic/MS
+parameterization/SM
+parameterize/BSDG
+parameterized/U
+parameterless
+parameter/SM
+parametric
+parametrically
+parametrization
+parametrize/DS
+paramilitary/S
+paramount/S
+paramour/MS
+para/MS
+Paramus/M
+Paran
+paranoiac/S
+paranoia/SM
+paranoid/S
+paranormal/SY
+parapet/SMD
+paraphernalia
+paraphrase/GMSRD
+paraphraser/M
+paraplegia/MS
+paraplegic/S
+paraprofessional/SM
+parapsychologist/S
+parapsychology/MS
+paraquat/S
+parasite/SM
+parasitically
+parasitic/S
+parasitism/SM
+parasitologist/M
+parasitology/M
+parasol/SM
+parasympathetic/S
+parathion/SM
+parathyroid/S
+paratrooper/M
+paratroop/RSZ
+paratyphoid/S
+parboil/DSG
+parceled/U
+parceling/M
+parcel/SGMD
+Parcheesi/M
+parch/GSDL
+parchment/SM
+PARC/M
+pardonableness/M
+pardonable/U
+pardonably/U
+pardoner/M
+pardon/ZBGRDS
+paregoric/SM
+parentage/MS
+parental/Y
+parenteral
+parentheses
+parenthesis/M
+parenthesize/GSD
+parenthetic
+parenthetical/Y
+parenthood/MS
+parent/MDGJS
+pare/S
+paresis/M
+pares/S
+Pareto/M
+parfait/SM
+pariah/M
+pariahs
+parietal/S
+parimutuel/S
+paring/M
+parishioner/SM
+parish/MS
+Parisian/SM
+Paris/M
+parity/ESM
+parka/MS
+Parke/M
+Parker/M
+Parkersburg/M
+park/GJZDRMS
+Parkhouse/M
+parking/M
+Parkinson/M
+parkish
+parkland/M
+parklike
+Parkman
+Park/RMS
+parkway/MS
+parlance/SM
+parlay/DGS
+parley/MDSG
+parliamentarian/SM
+parliamentary/U
+parliament/MS
+Parliament/MS
+parlor/SM
+parlous
+Parmesan/S
+parmigiana
+Parnassus/SM
+Parnell/M
+parochialism/SM
+parochiality
+parochial/Y
+parodied/U
+parodist/SM
+parody/SDGM
+parolee/MS
+parole/MSDG
+paroxysmal
+paroxysm/MS
+parquetry/SM
+parquet/SMDG
+parrakeet's
+parred
+parricidal
+parricide/MS
+parring
+Parrish/M
+Parr/M
+Parrnell/M
+parrot/GMDS
+parrotlike
+parry/GSD
+Parry/M
+parse
+parsec/SM
+parsed/U
+Parsee's
+parser/M
+Parsifal/M
+parsimonious/Y
+parsimony/SM
+pars/JDSRGZ
+parsley/MS
+parsnip/MS
+parsonage/MS
+parson/MS
+Parsons/M
+partaken
+partaker/M
+partake/ZGSR
+part/CDGS
+parterre/MS
+parter/S
+parthenogeneses
+parthenogenesis/M
+Parthenon/M
+Parthia/M
+partiality/MS
+partial/SY
+participant/MS
+participate/NGVDSX
+participation/M
+participator/S
+participatory
+participial/Y
+participle/MS
+particleboard/S
+particle/MS
+particolored
+particularistic
+particularity/SM
+particularization/MS
+particularize/GSD
+particular/SY
+particulate/S
+parting/MS
+partisanship/SM
+partisan/SM
+partition/AMRDGS
+partitioned/U
+partitioner/M
+partitive/S
+partizan's
+partly
+partner/DMGS
+partnership/SM
+partook
+partridge/MS
+part's
+parturition/SM
+partway
+party/RSDMG
+parvenu/SM
+par/ZGSJBMDR
+Pasadena/M
+PASCAL
+Pascale/M
+Pascal/M
+pascal/SM
+paschal/S
+pasha/MS
+Paso/M
+Pasquale/M
+pas/S
+passably
+passage/MGSD
+passageway/MS
+Passaic/M
+passband
+passbook/MS
+passel/MS
+pass/M
+passenger/MYS
+passerby
+passer/M
+passersby
+passim
+passing/Y
+passionated
+passionate/EYP
+passionateness/EM
+passionates
+passionating
+passioned
+passionflower/MS
+passioning
+passionless
+passion/SEM
+Passion/SM
+passivated
+passiveness/S
+passive/SYP
+passivity/S
+pass/JGVBZDSR
+passkey/SM
+passmark
+passover
+Passover/MS
+passport/SM
+password/SDM
+pasta/MS
+pasteboard/SM
+pasted/UA
+pastel/MS
+paste/MS
+Pasternak/M
+pastern/SM
+pasteup
+pasteurization/MS
+pasteurized/U
+pasteurizer/M
+pasteurize/RSDGZ
+Pasteur/M
+pastiche/MS
+pastille/SM
+pastime/SM
+pastiness/SM
+pastoralization/M
+pastoral/SPY
+pastorate/MS
+pastor/GSDM
+past/PGMDRS
+pastrami/MS
+pastry/SM
+past's/A
+pasts/A
+pasturage/SM
+pasture/MGSRD
+pasturer/M
+pasty/PTRS
+Patagonia/M
+Patagonian/S
+patch/EGRSD
+patcher/EM
+patchily
+patchiness/S
+patch's
+patchwork/RMSZ
+patchy/PRT
+patellae
+patella/MS
+Patel/M
+Pate/M
+paten/M
+Paten/M
+patentee/SM
+patent/ZGMRDYSB
+paterfamilias/SM
+pater/M
+paternalism/MS
+paternalist
+paternalistic
+paternal/Y
+paternity/SM
+paternoster/SM
+Paterson/M
+pate/SM
+pathetic
+pathetically
+pathfinder/MS
+pathless/P
+path/M
+pathname/SM
+pathogenesis/M
+pathogenic
+pathogen/SM
+pathologic
+pathological/Y
+pathologist/MS
+pathology/SM
+pathos/SM
+paths
+pathway/MS
+Patience/M
+patience/SM
+patient/MRYTS
+patient's/I
+patients/I
+patina/SM
+patine
+Patin/M
+patio/MS
+Pat/MN
+pat/MNDRS
+Patna/M
+patois/M
+Paton/M
+patresfamilias
+patriarchal
+patriarchate/MS
+patriarch/M
+patriarchs
+patriarchy/MS
+Patrica/M
+Patrice/M
+Patricia/M
+patrician/MS
+patricide/MS
+Patricio/M
+Patrick/M
+Patric/M
+patrimonial
+patrimony/SM
+patriotically
+patriotic/U
+patriotism/SM
+patriot/SM
+patristic/S
+Patrizia/M
+Patrizio/M
+Patrizius/M
+patrolled
+patrolling
+patrolman/M
+patrolmen
+patrol/MS
+patrolwoman
+patrolwomen
+patronage/MS
+patroness/S
+patronization
+patronized/U
+patronize/GZRSDJ
+patronizer/M
+patronizes/A
+patronizing's/U
+patronizing/YM
+patronymically
+patronymic/S
+patron/YMS
+patroon/MS
+patsy/SM
+Patsy/SM
+patted
+Patten/M
+patten/MS
+patterer/M
+pattern/GSDM
+patternless
+patter/RDSGJ
+Patterson/M
+Pattie/M
+Patti/M
+patting
+Pattin/M
+Patton/M
+Patty/M
+patty/SM
+paucity/SM
+Paula/M
+Paule/M
+Pauletta/M
+Paulette/M
+Paulie/M
+Pauli/M
+Paulina/M
+Pauline
+Pauling/M
+Paulita/M
+Paul/MG
+Paulo/M
+Paulsen/M
+Paulson/M
+Paulus/M
+Pauly/M
+paunch/GMSD
+paunchiness/M
+paunchy/RTP
+pauperism/SM
+pauperize/SDG
+pauper/SGDM
+pause/DSG
+Pavarotti
+paved/UA
+pave/GDRSJL
+Pavel/M
+pavement/SGDM
+paver/M
+paves/A
+Pavia/M
+pavilion/SMDG
+paving/A
+paving's
+Pavla/M
+Pavlova/MS
+Pavlovian
+Pavlov/M
+pawl/SM
+paw/MDSG
+pawnbroker/SM
+pawnbroking/S
+Pawnee/SM
+pawner/M
+pawn/GSDRM
+pawnshop/MS
+pawpaw's
+Pawtucket/M
+paxes
+Paxon/M
+Paxton/M
+payable/S
+pay/AGSLB
+payback/S
+paycheck/SM
+payday/MS
+payed
+payee/SM
+payer/SM
+payload/SM
+paymaster/SM
+payment/ASM
+Payne/SM
+payoff/MS
+payola/MS
+payout/S
+payroll/MS
+payslip/S
+Payson/M
+Payton/M
+Paz/M
+Pb/M
+PBS
+PBX
+PCB
+PC/M
+PCP
+PCs
+pct
+pd
+PD
+Pd/M
+PDP
+PDQ
+PDT
+PE
+Peabody/M
+peaceableness/M
+peaceable/P
+peaceably
+peacefuller
+peacefullest
+peacefulness/S
+peaceful/PY
+peace/GMDS
+peacekeeping/S
+Peace/M
+peacemaker/MS
+peacemaking/MS
+peacetime/MS
+peach/GSDM
+Peachtree/M
+peachy/RT
+peacock/SGMD
+Peadar/M
+peafowl/SM
+peahen/MS
+peaked/P
+peakiness/M
+peak/SGDM
+peaky/P
+pealed/A
+Peale/M
+peal/MDSG
+peals/A
+pea/MS
+peanut/SM
+Pearce/M
+Pearla/M
+Pearle/M
+pearler/M
+Pearlie/M
+Pearline/M
+Pearl/M
+pearl/SGRDM
+pearly/TRS
+Pearson/M
+pear/SYM
+peartrees
+Peary/M
+peasanthood
+peasantry/SM
+peasant/SM
+peashooter/MS
+peats/A
+peat/SM
+peaty/TR
+pebble/MGSD
+pebbling/M
+pebbly/TR
+Pebrook/M
+pecan/SM
+peccadilloes
+peccadillo/M
+peccary/MS
+Pechora/M
+pecker/M
+peck/GZSDRM
+Peckinpah/M
+Peck/M
+Pecos/M
+pectic
+pectin/SM
+pectoral/S
+peculate/NGDSX
+peculator/S
+peculiarity/MS
+peculiar/SY
+pecuniary
+pedagogical/Y
+pedagogic/S
+pedagogics/M
+pedagogue/SDGM
+pedagogy/MS
+pedal/SGRDM
+pedantic
+pedantically
+pedantry/MS
+pedant/SM
+peddler/M
+peddle/ZGRSD
+pederast/SM
+pederasty/SM
+Peder/M
+pedestal/GDMS
+pedestrianization
+pedestrianize/GSD
+pedestrian/MS
+pediatrician/SM
+pediatric/S
+pedicab/SM
+pedicure/DSMG
+pedicurist/SM
+pedigree/DSM
+pediment/DMS
+pedlar's
+pedometer/MS
+pedophile/S
+pedophilia
+Pedro/M
+peduncle/MS
+peeing
+peekaboo/SM
+peek/GSD
+peeler/M
+peeling/M
+Peel/M
+peel/SJGZDR
+peen/GSDM
+peeper/M
+peephole/SM
+peep/SGZDR
+peepshow/MS
+peepy
+peerage/MS
+peer/DMG
+peeress/MS
+peerlessness/M
+peerless/PY
+peeve/GZMDS
+peevers/M
+peevishness/SM
+peevish/YP
+peewee/S
+pee/ZDRS
+Pegasus/MS
+pegboard/SM
+Pegeen/M
+pegged
+Peggie/M
+Peggi/M
+pegging
+Peggy/M
+Peg/M
+peg/MS
+peignoir/SM
+Pei/M
+Peiping/M
+Peirce/M
+pejoration/SM
+pejorative/SY
+peke/MS
+Pekinese's
+pekingese
+Pekingese/SM
+Peking/SM
+pekoe/SM
+pelagic
+Pelee/M
+Pele/M
+pelf/SM
+Pelham/M
+pelican/SM
+pellagra/SM
+pellet/SGMD
+pellucid
+Peloponnese/M
+pelter/M
+pelt/GSDR
+pelvic/S
+pelvis/SM
+Pembroke/M
+pemmican/SM
+penalization/SM
+penalized/U
+penalize/SDG
+penalty/MS
+penal/Y
+Pena/M
+penance/SDMG
+pence/M
+penchant/MS
+pencil/SGJMD
+pendant/SM
+pend/DCGS
+pendent/CS
+Penderecki/M
+Pendleton/M
+pendulous
+pendulum/MS
+Penelopa/M
+Penelope/M
+penetrability/SM
+penetrable
+penetrate/SDVGNX
+penetrating/Y
+penetration/M
+penetrativeness/M
+penetrative/PY
+penetrator/MS
+penguin/MS
+penicillin/SM
+penile
+peninsular
+peninsula/SM
+penis/MS
+penitence/MS
+penitential/YS
+penitentiary/MS
+penitent/SY
+penknife/M
+penknives
+penlight/MS
+pen/M
+Pen/M
+penman/M
+penmanship/MS
+penmen
+Penna
+pennant/SM
+penned
+Penney/M
+Pennie/M
+penniless
+Penni/M
+penning
+Pennington/M
+pennis
+Penn/M
+pennon/SM
+Pennsylvania/M
+Pennsylvanian/S
+Penny/M
+penny/SM
+pennyweight/SM
+pennyworth/M
+penologist/MS
+penology/MS
+Penrod/M
+Pensacola/M
+pensioner/M
+pension/ZGMRDBS
+pensiveness/S
+pensive/PY
+pens/V
+pentacle/MS
+pentagonal/SY
+Pentagon/M
+pentagon/SM
+pentagram/MS
+pentameter/SM
+pent/AS
+Pentateuch/M
+pentathlete/S
+pentathlon/MS
+pentatonic
+pentecostal
+Pentecostalism/S
+Pentecostal/S
+Pentecost/SM
+penthouse/SDGM
+Pentium/M
+penuche/SM
+penultimate/SY
+penumbrae
+penumbra/MS
+penuriousness/MS
+penurious/YP
+penury/SM
+peonage/MS
+peon/MS
+peony/SM
+people/SDMG
+Peoria/M
+Pepe/M
+Pepillo/M
+Pepi/M
+Pepin/M
+Pepita/M
+Pepito/M
+pepped
+peppercorn/MS
+pepperer/M
+peppergrass/M
+peppermint/MS
+pepperoni/S
+pepper/SGRDM
+peppery
+peppiness/SM
+pepping
+peppy/PRT
+Pepsico/M
+PepsiCo/M
+Pepsi/M
+pepsin/SM
+pep/SM
+peptic/S
+peptidase/SM
+peptide/SM
+peptizing
+Pepys/M
+Pequot/M
+peradventure/S
+perambulate/DSNGX
+perambulation/M
+perambulator/MS
+percale/MS
+perceivably
+perceive/DRSZGB
+perceived/U
+perceiver/M
+percentage/MS
+percentile/SM
+percent/MS
+perceptible
+perceptibly
+perceptional
+perception/MS
+perceptiveness/MS
+perceptive/YP
+perceptual/Y
+percept/VMS
+Perceval/M
+perchance
+perch/GSDM
+perchlorate/M
+perchlorination
+percipience/MS
+percipient/S
+Percival/M
+percolate/NGSDX
+percolation/M
+percolator/MS
+percuss/DSGV
+percussionist/MS
+percussion/SAM
+percussiveness/M
+percussive/PY
+percutaneous/Y
+Percy/M
+perdition/MS
+perdurable
+peregrinate/XSDNG
+peregrination/M
+peregrine/S
+Perelman/M
+peremptorily
+peremptory/P
+perennial/SY
+pres
+perestroika/S
+Perez/M
+perfecta/S
+perfect/DRYSTGVP
+perfecter/M
+perfectibility/MS
+perfectible
+perfectionism/MS
+perfectionist/MS
+perfection/MS
+perfectiveness/M
+perfective/PY
+perfectness/MS
+perfidiousness/M
+perfidious/YP
+perfidy/MS
+perforated/U
+perforate/XSDGN
+perforation/M
+perforce
+performance/MS
+performed/U
+performer/M
+perform/SDRZGB
+perfumer/M
+perfumery/SM
+perfume/ZMGSRD
+perfunctorily
+perfunctoriness/M
+perfunctory/P
+perfused
+perfusion/M
+Pergamon/M
+pergola/SM
+perhaps/S
+Peria/M
+pericardia
+pericardium/M
+Perice/M
+Periclean
+Pericles/M
+perigee/SM
+perihelia
+perihelion/M
+peril/GSDM
+Perilla/M
+perilousness/M
+perilous/PY
+Peri/M
+perimeter/MS
+perinatal
+perinea
+perineum/M
+periodic
+periodical/YMS
+periodicity/MS
+period/MS
+periodontal/Y
+periodontics/M
+periodontist/S
+peripatetic/S
+peripheral/SY
+periphery/SM
+periphrases
+periphrasis/M
+periphrastic
+periscope/SDMG
+perishable/SM
+perish/BZGSRD
+perishing/Y
+peristalses
+peristalsis/M
+peristaltic
+peristyle/MS
+peritoneal
+peritoneum/SM
+peritonitis/MS
+periwigged
+periwigging
+periwig/MS
+periwinkle/SM
+perjurer/M
+perjure/SRDZG
+perjury/MS
+per/K
+perk/GDS
+perkily
+perkiness/S
+Perkin/SM
+perky/TRP
+Perla/M
+Perle/M
+Perl/M
+permafrost/MS
+permalloy/M
+Permalloy/M
+permanence/SM
+permanency/MS
+permanentness/M
+permanent/YSP
+permeability/SM
+permeableness/M
+permeable/P
+permeate/NGVDSX
+Permian
+permissibility/M
+permissibleness/M
+permissible/P
+permissibly
+permission/SM
+permissiveness/MS
+permissive/YP
+permit/SM
+permitted
+permitting
+Perm/M
+perm/MDGS
+permutation/MS
+permute/SDG
+Pernell/M
+perniciousness/MS
+pernicious/PY
+Pernod/M
+Peron/M
+peroration/SM
+Perot/M
+peroxidase/M
+peroxide/MGDS
+perpend/DG
+perpendicularity/SM
+perpendicular/SY
+perpetrate/NGXSD
+perpetration/M
+perpetrator/SM
+perpetual/SY
+perpetuate/NGSDX
+perpetuation/M
+perpetuity/MS
+perplex/DSG
+perplexed/Y
+perplexity/MS
+perquisite/SM
+Perren/M
+Perri/M
+Perrine/M
+Perry/MR
+persecute/XVNGSD
+persecution/M
+persecutor/MS
+persecutory
+Perseid/M
+Persephone/M
+Perseus/M
+perseverance/MS
+persevere/GSD
+persevering/Y
+Pershing/M
+Persia/M
+Persian/S
+persiflage/MS
+persimmon/SM
+Persis/M
+persist/DRSG
+persistence/SM
+persistent/Y
+persnickety
+personableness/M
+personable/P
+personae
+personage/SM
+personality/SM
+personalization/CMS
+personalize/CSDG
+personalized/U
+personalty/MS
+personal/YS
+persona/M
+person/BMS
+personification/M
+personifier/M
+personify/XNGDRS
+personnel/SM
+person's/U
+persons/U
+perspective/YMS
+perspex
+perspicaciousness/M
+perspicacious/PY
+perspicacity/S
+perspicuity/SM
+perspicuousness/M
+perspicuous/YP
+perspiration/MS
+perspire/DSG
+persuaded/U
+persuader/M
+persuade/ZGDRSB
+persuasion/SM
+persuasively
+persuasiveness/MS
+persuasive/U
+pertain/GSD
+Perth/M
+pertinaciousness/M
+pertinacious/YP
+pertinacity/MS
+pertinence/S
+pertinent/YS
+pertness/MS
+perturbation/MS
+perturbed/U
+perturb/GDS
+pertussis/SM
+pert/YRTSP
+peruke/SM
+Peru/M
+perusal/SM
+peruser/M
+peruse/RSDZG
+Peruvian/S
+pervade/SDG
+pervasion/M
+pervasiveness/MS
+pervasive/PY
+perverseness/SM
+perverse/PXYNV
+perversion/M
+perversity/MS
+pervert/DRSG
+perverted/YP
+perverter/M
+perviousness
+peseta/SM
+Peshawar/M
+peskily
+peskiness/S
+pesky/RTP
+peso/MS
+pessimal/Y
+pessimism/SM
+pessimistic
+pessimistically
+pessimist/SM
+pester/DG
+pesticide/MS
+pestiferous
+pestilence/SM
+pestilential/Y
+pestilent/Y
+pestle/SDMG
+pesto/S
+pest/RZSM
+PET
+Ptain/M
+petal/SDM
+Peta/M
+petard/MS
+petcock/SM
+Pete/M
+peter/GD
+Peter/M
+Petersburg/M
+Petersen/M
+Peters/N
+Peterson/M
+Peterus/M
+Petey/M
+pethidine/M
+petiole/SM
+petiteness/M
+petite/XNPS
+petitioner/M
+petition/GZMRD
+petition's/A
+petitions/A
+petits
+Petkiewicz/M
+Pet/MRZ
+Petra/M
+Petrarch/M
+petrel/SM
+petri
+petrifaction/SM
+petrify/NDSG
+Petrina/M
+Petr/M
+petrochemical/SM
+petrodollar/MS
+petroglyph/M
+petrolatum/MS
+petroleum/MS
+petrolled
+petrolling
+petrol/MS
+petrologist/MS
+petrology/MS
+Petronella/M
+Petronia/M
+Petronilla/M
+Petronille/M
+pet/SMRZ
+petted
+petter/MS
+Pettibone/M
+petticoat/SMD
+pettifogged
+pettifogger/SM
+pettifogging
+pettifog/S
+pettily
+pettiness/S
+petting
+pettis
+pettishness/M
+pettish/YP
+Petty/M
+petty/PRST
+petulance/MS
+petulant/Y
+Petunia/M
+petunia/SM
+Peugeot/M
+Pewaukee/M
+pewee/MS
+pewit/MS
+pew/SM
+pewter/SRM
+peyote/SM
+Peyter/M
+Peyton/M
+pf
+Pfc
+PFC
+pfennig/SM
+Pfizer/M
+pg
+PG
+Phaedra/M
+Phaethon/M
+phaeton/MS
+phage/M
+phagocyte/SM
+Phaidra/M
+phalanger/MS
+phalanges
+phalanx/SM
+phalli
+phallic
+phallus/M
+Phanerozoic
+phantasmagoria/SM
+phantasmal
+phantasm/SM
+phantasy's
+phantom/MS
+pharaoh
+Pharaoh/M
+pharaohs
+Pharaohs
+pharisaic
+Pharisaic
+Pharisaical
+pharisee/S
+Pharisee/SM
+pharmaceutical/SY
+pharmaceutic/S
+pharmaceutics/M
+pharmacist/SM
+pharmacological/Y
+pharmacologist/SM
+pharmacology/SM
+pharmacopoeia/SM
+pharmacy/SM
+pharyngeal/S
+pharynges
+pharyngitides
+pharyngitis/M
+pharynx/M
+phase/DSRGZM
+phaseout/S
+PhD
+pheasant/SM
+Phebe/M
+Phedra/M
+Phekda/M
+Phelia/M
+Phelps/M
+phenacetin/MS
+phenobarbital/SM
+phenolic
+phenol/MS
+phenolphthalein/M
+phenomenal/Y
+phenomena/SM
+phenomenological/Y
+phenomenology/MS
+phenomenon/SM
+phenotype/MS
+phenylalanine/M
+phenyl/M
+pheromone/MS
+phew/S
+phialled
+phialling
+phial/MS
+Phidias/M
+Philadelphia/M
+philanderer/M
+philander/SRDGZ
+philanthropic
+philanthropically
+philanthropist/MS
+philanthropy/SM
+philatelic
+philatelist/MS
+philately/SM
+Philbert/M
+Philco/M
+philharmonic/S
+Philipa/M
+Philip/M
+Philippa/M
+Philippe/M
+Philippians/M
+philippic/SM
+Philippine/SM
+Philis/M
+philistine/S
+Philistine/SM
+philistinism/S
+Phillida/M
+Phillie/M
+Phillipa/M
+Phillipe/M
+Phillip/MS
+Phillipp/M
+Phillis/M
+Philly/SM
+Phil/MY
+philodendron/MS
+philological/Y
+philologist/MS
+philology/MS
+Philomena/M
+philosopher/MS
+philosophic
+philosophical/Y
+philosophized/U
+philosophizer/M
+philosophizes/U
+philosophize/ZDRSG
+philosophy/MS
+philter/SGDM
+philtre/DSMG
+Phineas/M
+Phip/M
+Phipps/M
+phi/SM
+phlebitides
+phlebitis/M
+phlegmatic
+phlegmatically
+phlegm/SM
+phloem/MS
+phlox/M
+pH/M
+Ph/M
+phobia/SM
+phobic/S
+Phobos/M
+Phoebe/M
+phoebe/SM
+Phoenicia/M
+Phoenician/SM
+Phoenix/M
+phoenix/MS
+phone/DSGM
+phoneme/SM
+phonemically
+phonemic/S
+phonemics/M
+phonetically
+phonetician/SM
+phonetic/S
+phonetics/M
+phonically
+phonic/S
+phonics/M
+phoniness/MS
+phonographer/M
+phonographic
+phonograph/RM
+phonographs
+phonologic
+phonological/Y
+phonologist/MS
+phonology/MS
+phonon/M
+phony/PTRSDG
+phooey/S
+phosphatase/M
+phosphate/MS
+phosphide/M
+phosphine/MS
+phosphoresce
+phosphorescence/SM
+phosphorescent/Y
+phosphoric
+phosphor/MS
+phosphorous
+phosphorus/SM
+photocell/MS
+photochemical/Y
+photochemistry/M
+photocopier/M
+photocopy/MRSDZG
+photoelectric
+photoelectrically
+photoelectronic
+photoelectrons
+photoengraver/M
+photoengrave/RSDJZG
+photoengraving/M
+photofinishing/MS
+photogenic
+photogenically
+photograph/AGD
+photographer/SM
+photographic
+photographically
+photograph's
+photographs/A
+photography/MS
+photojournalism/SM
+photojournalist/SM
+photoluminescence/M
+photolysis/M
+photolytic
+photometer/SM
+photometric
+photometrically
+photometry/M
+photomicrograph/M
+photomicrography/M
+photomultiplier/M
+photon/MS
+photorealism
+photosensitive
+photo/SGMD
+photosphere/M
+photostatic
+Photostat/MS
+Photostatted
+Photostatting
+photosyntheses
+photosynthesis/M
+photosynthesize/DSG
+photosynthetic
+phototypesetter
+phototypesetting/M
+phrasal
+phrase/AGDS
+phrasebook
+phrasemaking
+phraseology/MS
+phrase's
+phrasing/SM
+phrenological/Y
+phrenologist/MS
+phrenology/MS
+phylactery/MS
+phylae
+phyla/M
+Phylis/M
+Phyllida/M
+Phyllis/M
+Phyllys/M
+phylogeny/MS
+phylum/M
+Phylys/M
+phys
+physicality/M
+physical/PYS
+physician/SM
+physicist/MS
+physicked
+physicking
+physic/SM
+physiochemical
+physiognomy/SM
+physiography/MS
+physiologic
+physiological/Y
+physiologist/SM
+physiology/MS
+physiotherapist/MS
+physiotherapy/SM
+physique/MSD
+phytoplankton/M
+Piaf/M
+Piaget/M
+Pia/M
+pianism/M
+pianissimo/S
+pianistic
+pianist/SM
+pianoforte/MS
+pianola
+Pianola/M
+piano/SM
+piaster/MS
+piazza/SM
+pibroch/M
+pibrochs
+picador/MS
+picaresque/S
+pica/SM
+Picasso/M
+picayune/S
+Piccadilly/M
+piccalilli/MS
+piccolo/MS
+pickaback's
+pickaxe's
+pickax/GMSD
+pickerel/MS
+Pickering/M
+picker/MG
+picketer/M
+picket/MSRDZG
+Pickett/M
+Pickford/M
+pick/GZSJDR
+pickle/SDMG
+Pickman/M
+pickoff/S
+pickpocket/GSM
+pickup/SM
+Pickwick/M
+picky/RT
+picnicked
+picnicker/MS
+picnicking
+picnic/SM
+picofarad/MS
+picojoule
+picoseconds
+picot/DMGS
+Pict/M
+pictograph/M
+pictographs
+pictorialness/M
+pictorial/PYS
+picture/MGSD
+picturesqueness/SM
+picturesque/PY
+piddle/GSD
+piddly
+pidgin/SM
+piebald/S
+piece/GMDSR
+piecemeal
+piecer/M
+piecewise
+pieceworker/M
+piecework/ZSMR
+piedmont
+Piedmont/M
+pieing
+pie/MS
+Pierce/M
+piercer/M
+pierce/RSDZGJ
+piercing/Y
+Pierette/M
+pier/M
+Pier/M
+Pierre/M
+Pierrette/M
+Pierrot/M
+Pierson/M
+Pieter/M
+Pietra/M
+Pietrek/M
+Pietro/M
+piety/SM
+piezoelectric
+piezoelectricity/M
+piffle/MGSD
+pigeon/DMGS
+pigeonhole/SDGM
+pigged
+piggery/M
+pigging
+piggishness/SM
+piggish/YP
+piggyback/MSDG
+Piggy/M
+piggy/RSMT
+pigheadedness/S
+pigheaded/YP
+piglet/MS
+pigmentation/MS
+pigment/MDSG
+pig/MLS
+Pigmy's
+pigpen/SM
+pigroot
+pigskin/MS
+pigsty/SM
+pigswill/M
+pigtail/SMD
+Pike/M
+pike/MZGDRS
+piker/M
+pikestaff/MS
+pilaf/MS
+pilaster/SM
+Pilate/M
+pilau's
+pilchard/SM
+Pilcomayo/M
+pile/JDSMZG
+pileup/MS
+pilferage/SM
+pilferer/M
+pilfer/ZGSRD
+Pilgrim
+pilgrimage/DSGM
+pilgrim/MS
+piling/M
+pillage/RSDZG
+pillar/DMSG
+pillbox/MS
+pill/GSMD
+pillion/DMGS
+pillory/MSDG
+pillowcase/SM
+pillow/GDMS
+pillowslip/S
+Pillsbury/M
+pilot/DMGS
+pilothouse/SM
+piloting/M
+pimento/MS
+pimiento/SM
+pimpernel/SM
+pimp/GSMYD
+pimple/SDM
+pimplike
+pimply/TRM
+PIN
+pinafore/MS
+piata/S
+Pinatubo/M
+pinball/MS
+Pincas/M
+pincer/GSD
+Pinchas/M
+pincher/M
+pinch/GRSD
+pincushion/SM
+Pincus/M
+Pindar/M
+pineapple/MS
+pined/A
+Pinehurst/M
+pine/MNGXDS
+pines/A
+pinfeather/SM
+ping/GDRM
+pinheaded/P
+pinhead/SMD
+pinhole/SM
+pining/A
+pinion/DMG
+Pinkerton/M
+pinkeye/MS
+pink/GTYDRMPS
+pinkie/SM
+pinkish/P
+pinkness/S
+pinko/MS
+pinky's
+pinnacle/MGSD
+pinnate
+pinned/U
+pinning/S
+Pinocchio/M
+Pinochet/M
+pinochle/SM
+pion/S
+pinpoint/SDG
+pinprick/MDSG
+pin's
+pinsetter/SM
+Pinsky/M
+pinstripe/SDM
+pintail/SM
+Pinter/M
+pint/MRS
+pinto/S
+pinup/MS
+pin/US
+pinwheel/DMGS
+pinyin
+Pinyin
+piny/RT
+pioneer/SDMG
+pion/M
+Piotr/M
+piousness/MS
+pious/YP
+pipeline/DSMG
+pipe/MS
+piper/M
+Piper/M
+Pipestone/M
+pipet's
+pipette/MGSD
+pipework
+piping/YM
+pipit/MS
+pip/JSZMGDR
+Pip/MR
+Pippa/M
+pipped
+pipping
+pippin/SM
+Pippo/M
+Pippy/M
+pipsqueak/SM
+piquancy/MS
+piquantness/M
+piquant/PY
+pique/GMDS
+piracy/MS
+Piraeus/M
+Pirandello/M
+piranha/SM
+pirate/MGSD
+piratical/Y
+pirogi
+pirogies
+pirouette/MGSD
+pis
+Pisa/M
+piscatorial
+Pisces/M
+Pisistratus/M
+pismire/SM
+Pissaro/M
+piss/DSRG!
+pistachio/MS
+piste/SM
+pistillate
+pistil/MS
+pistoleers
+pistole/M
+pistol/SMGD
+piston/SM
+pitapat/S
+pitapatted
+pitapatting
+pita/SM
+Pitcairn/M
+pitchblende/SM
+pitcher/M
+pitchfork/GDMS
+pitching/M
+pitchman/M
+pitchmen
+pitch/RSDZG
+pitchstone/M
+piteousness/SM
+piteous/YP
+pitfall/SM
+pithily
+pithiness/SM
+pith/MGDS
+piths
+pithy/RTP
+pitiableness/M
+pitiable/P
+pitiably
+pitier/M
+pitifuller
+pitifullest
+pitifulness/M
+pitiful/PY
+pitilessness/SM
+pitiless/PY
+pitman/M
+pit/MS
+Pitney/M
+piton/SM
+pittance/SM
+pitted
+pitting
+Pittman/M
+Pittsburgh/ZM
+Pittsfield/M
+Pitt/SM
+Pittston/M
+pituitary/SM
+pitying/Y
+pity/ZDSRMG
+Pius/M
+pivotal/Y
+pivot/DMSG
+pivoting/M
+pix/DSG
+pixel/SM
+pixie/MS
+pixiness
+pixmap/SM
+Pizarro/M
+pizazz/S
+pi/ZGDRH
+pizza/SM
+pizzeria/SM
+pizzicati
+pizzicato
+pj's
+PJ's
+pk
+pkg
+pkt
+pkwy
+Pkwy
+pl
+placard/DSMG
+placate/NGVXDRS
+placatory
+placeable/A
+placebo/SM
+placed/EAU
+place/DSRJLGZM
+placeholder/S
+placekick/DGS
+placeless/Y
+placement/AMES
+placental/S
+placenta/SM
+placer/EM
+places/EA
+placidity/SM
+placidness/M
+placid/PY
+placing/AE
+placket/SM
+plagiarism/MS
+plagiarist/MS
+plagiarize/GZDSR
+plagiary/SM
+plagued/U
+plague/MGRSD
+plaguer/M
+plaice/M
+plaid/DMSG
+plainclothes
+plainclothesman
+plainclothesmen
+Plainfield/M
+plainness/MS
+plainsman/M
+plainsmen
+plainsong/SM
+plainspoken
+plain/SPTGRDY
+plaintiff/MS
+plaintiveness/M
+plaintive/YP
+plaint/VMS
+Plainview/M
+plaiting/M
+plait/SRDMG
+planar
+planarity
+Planck/M
+plan/DRMSGZ
+planeload
+planer/M
+plane's
+plane/SCGD
+planetarium/MS
+planetary
+planetesimal/M
+planet/MS
+planetoid/SM
+plangency/S
+plangent
+planking/M
+plank/SJMDG
+plankton/MS
+planned/U
+planner/SM
+planning
+Plano
+planoconcave
+planoconvex
+Plantagenet/M
+plantain/MS
+plantar
+plantation/MS
+planter/MS
+planting/S
+plantlike
+plant's
+plant/SADG
+plaque/MS
+plash/GSDM
+plasma/MS
+plasmid/S
+plasm/M
+plasterboard/MS
+plasterer/M
+plastering/M
+plaster/MDRSZG
+plasterwork/M
+plastically
+plasticine
+Plasticine/M
+plasticity/SM
+plasticize/GDS
+plastic/MYS
+plateau/GDMS
+plateful/S
+platelet/SM
+platen/M
+plater/M
+plate/SM
+platform/SGDM
+Plath/M
+plating/M
+platinize/GSD
+platinum/MS
+platitude/SM
+platitudinous/Y
+plat/JDNRSGXZ
+Plato/M
+platonic
+Platonic
+Platonism/M
+Platonist
+platoon/MDSG
+platted
+Platte/M
+platter/MS
+Platteville/M
+platting
+platypus/MS
+platys
+platy/TR
+plaudit/MS
+plausibility/S
+plausible/P
+plausibly
+Plautus/M
+playability/U
+playable/U
+playacting/M
+playact/SJDG
+playback/MS
+playbill/SM
+Playboy/M
+playboy/SM
+play/DRSEBG
+played/A
+player's/E
+player/SM
+playfellow/S
+playfulness/MS
+playful/PY
+playgirl/SM
+playgoer/MS
+playground/MS
+playgroup/S
+playhouse/SM
+playing/S
+playmate/MS
+playoff/S
+playpen/SM
+playroom/SM
+plays/A
+Playtex/M
+plaything/MS
+playtime/SM
+playwright/SM
+playwriting/M
+plaza/SM
+pleader/MA
+pleading/MY
+plead/ZGJRDS
+pleasanter
+pleasantest
+pleasantness/SMU
+pleasantry/MS
+pleasant/UYP
+pleased/EU
+pleaser/M
+pleases/E
+please/Y
+pleasingness/M
+pleasing/YP
+plea/SM
+pleas/RSDJG
+pleasurableness/M
+pleasurable/P
+pleasurably
+pleasureful
+pleasure/MGBDS
+pleasure's/E
+pleasures/E
+pleater/M
+pleat/RDMGS
+plebeian/SY
+plebe/MS
+plebiscite/SM
+plectra
+plectrum/SM
+pledger/M
+pledge/RSDMG
+Pleiads
+Pleistocene
+plenary/S
+plenipotentiary/S
+plenitude/MS
+plenteousness/M
+plenteous/PY
+plentifulness/M
+plentiful/YP
+plenty/SM
+plenum/M
+pleonasm/MS
+plethora/SM
+pleurae
+pleural
+pleura/M
+pleurisy/SM
+Plexiglas/MS
+plexus/SM
+pliability/MS
+pliableness/M
+pliable/P
+pliancy/MS
+pliantness/M
+pliant/YP
+plication/MA
+plier/MA
+plight/GMDRS
+plimsolls
+plinker/M
+plink/GRDS
+plinth/M
+plinths
+Pliny/M
+Pliocene/S
+PLO
+plodded
+plodder/SM
+plodding/SY
+plod/S
+plopped
+plopping
+plop/SM
+plosive
+plot/SM
+plotted/A
+plotter/MDSG
+plotting
+plover/MS
+plowed/U
+plower/M
+plowman/M
+plowmen
+plow/SGZDRM
+plowshare/MS
+ploy's
+ploy/SCDG
+plucker/M
+pluckily
+pluckiness/SM
+pluck/SGRD
+plucky/TPR
+pluggable
+plugged/UA
+plugging/AU
+plughole
+plug's
+plug/US
+plumage/DSM
+plumbago/M
+plumbed/U
+plumber/M
+plumbing/M
+plumb/JSZGMRD
+plume/SM
+plummer
+plummest
+plummet/DSG
+plummy
+plumper/M
+plumpness/S
+plump/RDNYSTGP
+plum/SMDG
+plumy/TR
+plunder/GDRSZ
+plunger/M
+plunge/RSDZG
+plunker/M
+plunk/ZGSRD
+pluperfect/S
+pluralism/MS
+pluralistic
+pluralist/S
+plurality/SM
+pluralization/MS
+pluralize/GZRSD
+pluralizer/M
+plural/SY
+plushness/MS
+plush/RSYMTP
+plushy/RPT
+plus/S
+plussed
+plussing
+Plutarch/M
+plutocracy/MS
+plutocratic
+plutocrat/SM
+Pluto/M
+plutonium/SM
+pluvial/S
+ply/AZNGRSD
+Plymouth/M
+plywood/MS
+pm
+PM
+Pm/M
+PMS
+pneumatically
+pneumatic/S
+pneumatics/M
+pneumonia/MS
+PO
+poacher/M
+poach/ZGSRD
+Pocahontas/M
+pocketbook/SM
+pocketful/SM
+pocketing/M
+pocketknife/M
+pocketknives
+pocket/MSRDG
+pock/GDMS
+pockmark/MDSG
+Pocono/MS
+podded
+podding
+podge/ZR
+Podgorica/M
+podiatrist/MS
+podiatry/MS
+podium/MS
+pod/SM
+Podunk/M
+Poe/M
+poem/MS
+poesy/GSDM
+poetaster/MS
+poetess/MS
+poetically
+poeticalness
+poetical/U
+poetic/S
+poetics/M
+poet/MS
+poetry/SM
+pogo
+Pogo/M
+pogrom/GMDS
+poignancy/MS
+poignant/Y
+Poincar/M
+poinciana/SM
+Poindexter/M
+poinsettia/SM
+pointblank
+pointedness/M
+pointed/PY
+pointer/M
+pointillism/SM
+pointillist/SM
+pointing/M
+pointlessness/SM
+pointless/YP
+point/RDMZGS
+pointy/TR
+poise/M
+pois/GDS
+poi/SM
+poisoner/M
+poisoning/M
+poisonous/PY
+poison/RDMZGSJ
+Poisson/M
+poke/DRSZG
+Pokemon/M
+pokerface/D
+poker/M
+poky/SRT
+Poland/M
+Polanski/M
+polarimeter/SM
+polarimetry
+polariscope/M
+Polaris/M
+polarity/MS
+polarization/CMS
+polarized/UC
+polarize/RSDZG
+polarizes/C
+polarizing/C
+polarogram/SM
+polarograph
+polarography/M
+Polaroid/SM
+polar/S
+polecat/SM
+polemical/Y
+polemicist/S
+polemic/S
+polemics/M
+pole/MS
+Pole/MS
+poler/M
+polestar/S
+poleward/S
+pol/GMDRS
+policeman/M
+policemen/M
+police/MSDG
+policewoman/M
+policewomen
+policyholder/MS
+policymaker/S
+policymaking
+policy/SM
+poliomyelitides
+poliomyelitis/M
+polio/SM
+Polish
+polished/U
+polisher/M
+polish/RSDZGJ
+polis/M
+Politburo/M
+politburo/S
+politeness/MS
+polite/PRTY
+politesse/SM
+politically
+political/U
+politician/MS
+politicization/S
+politicize/CSDG
+politicked
+politicking/SM
+politico/SM
+politic/S
+politics/M
+polity/MS
+polka/SDMG
+Polk/M
+pollack/SM
+Pollard/M
+polled/U
+pollen/GDM
+pollinate/XSDGN
+pollination/M
+pollinator/MS
+polliwog/SM
+poll/MDNRSGX
+pollock's
+Pollock/SM
+pollster/MS
+pollutant/MS
+polluted/U
+polluter/M
+pollute/RSDXZVNG
+pollution/M
+Pollux/M
+Pollyanna/M
+Polly/M
+pollywog's
+Pol/MY
+Polo/M
+polo/MS
+polonaise/MS
+polonium/MS
+poltergeist/SM
+poltroon/MS
+polyandrous
+polyandry/MS
+polyatomic
+polybutene/MS
+polycarbonate
+polychemicals
+polychrome
+polyclinic/MS
+polycrystalline
+polyelectrolytes
+polyester/SM
+polyether/S
+polyethylene/SM
+polygamist/MS
+polygamous/Y
+polygamy/MS
+polyglot/S
+polygonal/Y
+polygon/MS
+polygraph/MDG
+polygraphs
+polygynous
+polyhedral
+polyhedron/MS
+Polyhymnia/M
+polyisobutylene
+polyisocyanates
+polymath/M
+polymaths
+polymerase/S
+polymeric
+polymerization/SM
+polymerize/SDG
+polymer/MS
+polymorphic
+polymorphism/MS
+polymorph/M
+polymyositis
+Polynesia/M
+Polynesian/S
+polynomial/YMS
+Polyphemus/M
+polyphonic
+polyphony/MS
+polyphosphate/S
+polyp/MS
+polypropylene/MS
+polystyrene/SM
+polysyllabic
+polysyllable/SM
+polytechnic/MS
+polytheism/SM
+polytheistic
+polytheist/SM
+polythene/M
+polytonal/Y
+polytopes
+polyunsaturated
+polyurethane/SM
+polyvinyl/MS
+Po/M
+pomade/MGSD
+pomander/MS
+pomegranate/SM
+Pomerania/M
+Pomeranian
+pommel/GSMD
+Pomona/M
+Pompadour/M
+pompadour/MDS
+pompano/SM
+Pompeian/S
+Pompeii/M
+Pompey/M
+pompom/SM
+pompon's
+pomposity/MS
+pompousness/S
+pompous/YP
+pomp/SM
+ponce/M
+Ponce/M
+Ponchartrain/M
+poncho/MS
+ponderer/M
+ponderousness/MS
+ponderous/PY
+ponder/ZGRD
+pond/SMDRGZ
+pone/SM
+pongee/MS
+poniard/GSDM
+pons/M
+Pontchartrain/M
+Pontiac/M
+Pontianak/M
+pontiff/MS
+pontifical/YS
+pontificate/XGNDS
+pontoon/SMDG
+pony/DSMG
+ponytail/SM
+pooch/GSDM
+poodle/MS
+poof/MS
+pooh/DG
+Pooh/M
+poohs
+Poole/M
+pool/MDSG
+poolroom/MS
+poolside
+Poona/M
+poop/MDSG
+poorboy
+poorhouse/MS
+poorness/MS
+poor/TYRP
+popcorn/MS
+Popek/MS
+pope/SM
+Pope/SM
+Popeye/M
+popgun/SM
+popinjay/MS
+poplar/SM
+poplin/MS
+Popocatepetl/M
+popover/SM
+poppa/MS
+popped
+Popper/M
+popper/SM
+poppet/M
+popping
+Poppins/M
+poppycock/MS
+Poppy/M
+poppy/SDM
+poppyseed
+Popsicle/MS
+pop/SM
+populace/MS
+popularism
+popularity/UMS
+popularization/SM
+popularize/A
+popularized
+popularizer/MS
+popularizes/U
+popularizing
+popular/YS
+populate/CXNGDS
+populated/UA
+populates/A
+populating/A
+population/MC
+populism/S
+populist/SM
+populousness/MS
+populous/YP
+porcelain/SM
+porch/SM
+porcine
+porcupine/MS
+pore/ZGDRS
+Porfirio/M
+porgy/SM
+poring/Y
+porker/M
+porky/TSR
+pork/ZRMS
+pornographer/SM
+pornographic
+pornographically
+pornography/SM
+porno/S
+porn/S
+porosity/SM
+porousness/MS
+porous/PY
+porphyritic
+porphyry/MS
+porpoise/DSGM
+porridge/MS
+Porrima/M
+porringer/MS
+Porsche/M
+portability/S
+portables
+portable/U
+portably
+port/ABSGZMRD
+portage/ASM
+portaged
+portaging
+portal/SM
+portamento/M
+portcullis/MS
+ported/CE
+Porte/M
+portend/SDG
+portentousness/M
+portentous/PY
+portent/SM
+porterage/M
+porter/DMG
+porterhouse/SM
+Porter/M
+porter's/A
+portfolio/MS
+porthole/SM
+Portia/M
+porticoes
+portico/M
+Portie/M
+portire/SM
+porting/E
+portion/KGSMD
+Portland/M
+portliness/SM
+portly/PTR
+portmanteau/SM
+Port/MR
+Prto/M
+portraitist/SM
+portrait/MS
+portraiture/MS
+portrayal/SM
+portrayer/M
+portray/GDRS
+ports/CE
+Portsmouth/M
+Portugal/M
+Portuguese/M
+portulaca/MS
+Porty/M
+posed/CA
+Poseidon/M
+poser/KME
+poses/CA
+poseur/MS
+pose/ZGKDRSE
+posh/DSRGT
+posing/CA
+positifs
+positionable
+positional/KY
+position/KGASMD
+position's/EC
+positions/EC
+positiveness/S
+positive/RSPYT
+positivism/M
+positivist/S
+positivity
+positron/SM
+posit/SCGD
+Posner/M
+posse/M
+possess/AGEDS
+possessed/PY
+possession/AEMS
+possessional
+possessiveness/MS
+possessive/PSMY
+possessor/MS
+possibility/SM
+possible/TRS
+possibly
+poss/S
+possum/MS
+postage/MS
+postal/S
+post/ASDRJG
+postbag/M
+postbox/SM
+postcard/SM
+postcode/SM
+postcondition/S
+postconsonantal
+postdate/DSG
+postdoctoral
+posteriori
+posterior/SY
+posterity/SM
+poster/MS
+postfix/GDS
+postgraduate/SM
+posthaste/S
+posthumousness/M
+posthumous/YP
+posthypnotic
+postilion/MS
+postindustrial
+posting/M
+postlude/MS
+Post/M
+postman/M
+postmarital
+postmark/GSMD
+postmaster/SM
+postmen
+postmeridian
+postmistress/MS
+postmodern
+postmodernist
+postmortem/S
+postnasal
+postnatal
+postoperative/Y
+postorder
+postpaid
+postpartum
+postpone/GLDRS
+postponement/S
+postpositions
+postprandial
+post's
+postscript/SM
+postsecondary
+postulate/XGNSD
+postulation/M
+postural
+posture/MGSRD
+posturer/M
+postvocalic
+postwar
+posy/SM
+potability/SM
+potableness/M
+potable/SP
+potage/M
+potash/MS
+potassium/MS
+potatoes
+potato/M
+potbelly/MSD
+potboiler/M
+potboil/ZR
+pot/CMS
+Potemkin/M
+potency/MS
+potentate/SM
+potentiality/MS
+potential/SY
+potentiating
+potentiometer/SM
+potent/YS
+potful/SM
+pothead/MS
+potherb/MS
+pother/GDMS
+potholder/MS
+pothole/SDMG
+potholing/M
+pothook/SM
+potion/SM
+potlatch/SM
+potluck/MS
+Potomac/M
+potpie/SM
+potpourri/SM
+Potsdam/M
+potsherd/MS
+potshot/S
+pottage/SM
+Pottawatomie/M
+potted
+Potter/M
+potter/RDMSG
+pottery/MS
+potting
+Potts/M
+potty/SRT
+pouch/SDMG
+Poughkeepsie/M
+Poul/M
+poulterer/MS
+poultice/DSMG
+poultry/MS
+pounce/SDG
+poundage/MS
+pounder/MS
+pound/KRDGS
+Pound/M
+pour/DSG
+pourer's
+Poussin/MS
+pouter/M
+pout/GZDRS
+poverty/MS
+POW
+powderpuff
+powder/RDGMS
+powdery
+Powell/M
+powerboat/MS
+powerfulness/M
+powerful/YP
+power/GMD
+powerhouse/MS
+powerlessness/SM
+powerless/YP
+Powers
+Powhatan/M
+pow/RZ
+powwow/GDMS
+pox/GMDS
+Poznan/M
+pp
+PP
+ppm
+ppr
+PPS
+pr
+PR
+practicability/S
+practicable/P
+practicably
+practicality/SM
+practicalness/M
+practical/YPS
+practice/BDRSMG
+practiced/U
+practicer/M
+practicum/SM
+practitioner/SM
+Pradesh/M
+Prado/M
+Praetorian
+praetorian/S
+praetor/MS
+pragmatical/Y
+pragmatic/S
+pragmatics/M
+pragmatism/MS
+pragmatist/MS
+Prague/M
+Praia
+prairie/MS
+praise/ESDG
+praiser/S
+praise's
+praiseworthiness/MS
+praiseworthy/P
+praising/Y
+Prakrit/M
+praline/MS
+pram/MS
+prancer/M
+prance/ZGSRD
+prancing/Y
+prank/SMDG
+prankster/SM
+praseodymium/SM
+Pratchett/M
+prate/DSRGZ
+prater/M
+pratfall/MS
+prating/Y
+prattle/DRSGZ
+prattler/M
+prattling/Y
+Pratt/M
+Prattville/M
+Pravda/M
+prawn/MDSG
+praxes
+praxis/M
+Praxiteles/M
+pray/DRGZS
+prayerbook
+prayerfulness/M
+prayerful/YP
+prayer/M
+PRC
+preach/DRSGLZJ
+preacher/M
+preaching/Y
+preachment/MS
+preachy/RT
+preadolescence/S
+Preakness/M
+preallocate/XGNDS
+preallocation/M
+preallocator/S
+preamble/MGDS
+preamp
+preamplifier/M
+prearrange/LSDG
+prearrangement/SM
+preassign/SDG
+preauthorize
+prebendary/M
+Precambrian
+precancel/DGS
+precancerous
+precariousness/MS
+precarious/PY
+precautionary
+precaution/SGDM
+precede/DSG
+precedence/SM
+precedented/U
+precedent/SDM
+preceptive/Y
+preceptor/MS
+precept/SMV
+precess/DSG
+precession/M
+precinct/MS
+preciosity/MS
+preciousness/S
+precious/PYS
+precipice/MS
+precipitable
+precipitant/S
+precipitateness/M
+precipitate/YNGVPDSX
+precipitation/M
+precipitousness/M
+precipitous/YP
+preciseness/SM
+precise/XYTRSPN
+precision/M
+prcis/MDG
+preclude/GDS
+preclusion/S
+precociousness/MS
+precocious/YP
+precocity/SM
+precode/D
+precognition/SM
+precognitive
+precollege/M
+precolonial
+precomputed
+preconceive/GSD
+preconception/SM
+precondition/GMDS
+preconscious
+precook/GDS
+precursor/SM
+precursory
+precut
+predate/NGDSX
+predation/CMS
+predator/SM
+predatory
+predecease/SDG
+predecessor/MS
+predeclared
+predecline
+predefine/GSD
+predefinition/SM
+predesignate/GDS
+predestination/SM
+predestine/SDG
+predetermination/MS
+predeterminer/M
+predetermine/ZGSRD
+predicable/S
+predicament/SM
+predicate/VGNXSD
+predication/M
+predicator
+predictability/UMS
+predictable/U
+predictably/U
+predict/BSDGV
+predicted/U
+prediction/MS
+predictive/Y
+predictor/MS
+predigest/GDS
+predilect
+predilection/SM
+predispose/SDG
+predisposition/MS
+predoctoral
+predominance/SM
+predominant/Y
+predominate/YSDGN
+predomination/M
+preemie/MS
+preeminence/SM
+preeminent/Y
+preemployment/M
+preempt/GVSD
+preemption/SM
+preemptive/Y
+preemptor/M
+preener/M
+preen/SRDG
+preexist/DSG
+preexistence/SM
+preexistent
+prefabbed
+prefabbing
+prefab/MS
+prefabricate/XNGDS
+prefabrication/M
+preface/DRSGM
+prefacer/M
+prefatory
+prefect/MS
+prefecture/MS
+preferableness/M
+preferable/P
+preferably
+prefer/BL
+preference/MS
+preferential/Y
+preferment/SM
+preferred
+preferring
+prefiguration/M
+prefigure/SDG
+prefix/MDSG
+preflight/SGDM
+preform/DSG
+pref/RZ
+pregnancy/SM
+pregnant/Y
+preheat/GDS
+prehensile
+prehistoric
+prehistorical/Y
+prehistory/SM
+preindustrial
+preinitialize/SDG
+preinterview/M
+preisolated
+prejudge/DRSG
+prejudger/M
+prejudgment/SM
+prejudiced/U
+prejudice/MSDG
+prejudicial/PY
+prekindergarten/MS
+prelacy/MS
+prelate/SM
+preliminarily
+preliminary/S
+preliterate/S
+preloaded
+prelude/GMDRS
+preluder/M
+premarital/Y
+premarket
+prematureness/M
+premature/SPY
+prematurity/M
+premedical
+premeditated/Y
+premeditate/XDSGNV
+premeditation/M
+premed/S
+premenstrual
+premiere/MS
+premier/GSDM
+premiership/SM
+Preminger/M
+premise/GMDS
+premiss's
+premium/MS
+premix/GDS
+premolar/S
+premonition/SM
+premonitory
+prenatal/Y
+Pren/M
+Prenticed/M
+Prentice/MGD
+Prenticing/M
+Prentiss/M
+Prent/M
+prenuptial
+preoccupation/MS
+preoccupy/DSG
+preoperative
+preordain/DSLG
+prepackage/GSD
+prepaid
+preparation/SM
+preparative/SYM
+preparatory
+preparedly
+preparedness/USM
+prepared/UP
+prepare/ZDRSG
+prepay/GLS
+prepayment/SM
+prepender/S
+prepends
+preplanned
+preponderance/SM
+preponderant/Y
+preponderate/DSYGN
+prepositional/Y
+preposition/SDMG
+prepossess/GSD
+prepossessing/U
+prepossession/MS
+preposterousness/M
+preposterous/PY
+prepped
+prepping
+preppy/RST
+preprepared
+preprint/SGDM
+preprocessed
+preprocessing
+preprocessor/S
+preproduction
+preprogrammed
+prep/SM
+prepubescence/S
+prepubescent/S
+prepublication/M
+prepuce/SM
+prequel/S
+preradiation
+prerecord/DGS
+preregister/DSG
+preregistration/MS
+prerequisite/SM
+prerogative/SDM
+Pres
+presage/GMDRS
+presager/M
+presbyopia/MS
+presbyterian
+Presbyterianism/S
+Presbyterian/S
+presbyter/MS
+presbytery/MS
+preschool/RSZ
+prescience/SM
+prescient/Y
+Prescott/M
+prescribed/U
+prescriber/M
+prescribe/RSDG
+prescription/SM
+prescriptive/Y
+prescript/SVM
+preselect/SGD
+presence/SM
+presentableness/M
+presentable/P
+presentably/A
+presentational/A
+presentation/AMS
+presented/A
+presenter/A
+presentiment/MS
+presentment/SM
+presents/A
+present/SLBDRYZGP
+preservationist/S
+preservation/SM
+preservative/SM
+preserve/DRSBZG
+preserved/U
+preserver/M
+preset/S
+presetting
+preshrank
+preshrink/SG
+preshrunk
+preside/DRSG
+presidency/MS
+presidential/Y
+president/SM
+presider/M
+presidia
+presidium/M
+Presley/M
+presoaks
+presort/GDS
+pres/S
+press/ACDSG
+pressed/U
+presser/MS
+pressingly/C
+pressing/YS
+pressman/M
+pressmen
+pressure/DSMG
+pressurization/MS
+pressurize/DSRGZ
+pressurized/U
+prestidigitate/NX
+prestidigitation/M
+prestidigitatorial
+prestidigitator/M
+prestige/MS
+prestigious/PY
+Preston/M
+presto/S
+presumably
+presume/BGDRS
+presumer/M
+presuming/Y
+presumption/MS
+presumptive/Y
+presumptuousness/SM
+presumptuous/YP
+presuppose/GDS
+presupposition/S
+pretax
+preteen/S
+pretended/Y
+pretender/M
+pretending/U
+pretend/SDRZG
+pretense/MNVSX
+pretension/GDM
+pretentiousness/S
+pretentious/UYP
+preterite's
+preterit/SM
+preternatural/Y
+pretest/SDG
+pretext/SMDG
+Pretoria/M
+pretreated
+pretreatment/S
+pretrial
+prettify/SDG
+prettily
+prettiness/SM
+pretty/TGPDRS
+pretzel/SM
+prevailing/Y
+prevail/SGD
+prevalence/MS
+prevalent/SY
+prevaricate/DSXNG
+prevaricator/MS
+preventable/U
+preventably
+preventative/S
+prevent/BSDRGV
+preventer/M
+prevention/MS
+preventiveness/M
+preventive/SPY
+preview/ZGSDRM
+previous/Y
+prevision/SGMD
+prewar
+prexes
+preyer's
+prey/SMDG
+Priam/M
+priapic
+Pribilof/M
+price/AGSD
+priced/U
+priceless
+Price/M
+pricer/MS
+price's
+pricey
+pricier
+priciest
+pricker/M
+pricking/M
+prickle/GMDS
+prickliness/S
+prickly/RTP
+prick/RDSYZG
+prideful/Y
+pride/GMDS
+prier/M
+priestess/MS
+priesthood/SM
+Priestley/M
+priestliness/SM
+priestly/PTR
+priest/SMYDG
+prigged
+prigging
+priggishness/S
+priggish/PYM
+prig/SM
+primacy/MS
+primal
+primarily
+primary/MS
+primate/MS
+primed/U
+primely/M
+primeness/M
+prime/PYS
+primer/M
+Prime's
+primeval/Y
+priming/M
+primitiveness/SM
+primitive/YPS
+primitivism/M
+primmed
+primmer
+primmest
+primming
+primness/MS
+primogenitor/MS
+primogeniture/MS
+primordial/YS
+primp/DGS
+primrose/MGSD
+prim/SPJGZYDR
+princedom/MS
+princeliness/SM
+princely/PRT
+Prince/M
+prince/SMY
+princess/MS
+Princeton/M
+principality/MS
+principal/SY
+Principe/M
+Principia/M
+principled/U
+principle/SDMG
+printable/U
+printably
+print/AGDRS
+printed/U
+printer/AM
+printers
+printing/SM
+printmaker/M
+printmake/ZGR
+printmaking/M
+printout/S
+Prinz/M
+prioress/MS
+priori
+prioritize/DSRGZJ
+priority/MS
+prior/YS
+priory/SM
+Pris
+Prisca/M
+Priscella/M
+Priscilla/M
+prised
+prise/GMAS
+prismatic
+prism/MS
+prison/DRMSGZ
+prisoner/M
+Prissie/M
+prissily
+prissiness/SM
+prissy/RSPT
+pristine/Y
+prithee/S
+privacy/MS
+privateer/SMDG
+privateness/M
+private/NVYTRSXP
+privation/MCS
+privative/Y
+privatization/S
+privatize/GSD
+privet/SM
+privileged/U
+privilege/SDMG
+privily
+privy/SRMT
+prized/A
+prize/DSRGZM
+prizefighter/M
+prizefighting/M
+prizefight/SRMGJZ
+prizewinner/S
+prizewinning
+Pr/MN
+PRO
+proactive
+probabilist
+probabilistic
+probabilistically
+probability/SM
+probable/S
+probably
+probated/A
+probate/NVMX
+probates/A
+probating/A
+probational
+probationary/S
+probationer/M
+probation/MRZ
+probation's/A
+probative/A
+prober/M
+probity/SM
+problematical/UY
+problematic/S
+problem/SM
+proboscis/MS
+prob/RBJ
+procaine/MS
+procedural/SY
+procedure/MS
+proceeder/M
+proceeding/M
+proceed/JRDSG
+process/BSDMG
+processed/UA
+processes/A
+processional/YS
+procession/GD
+processor/MS
+proclamation/MS
+proclivity/MS
+proconsular
+procrastinate/XNGDS
+procrastination/M
+procrastinator/MS
+procreational
+procreatory
+procrustean
+Procrustean
+Procrustes/M
+proctor/GSDM
+proctorial
+procurable/U
+procure/L
+procurement/MS
+Procyon/M
+prodded
+prodding
+prodigality/S
+prodigal/SY
+prodigiousness/M
+prodigious/PY
+prodigy/MS
+prod/S
+produce/AZGDRS
+producer/AM
+producible/A
+production/ASM
+productively/UA
+productiveness/MS
+productive/PY
+productivities
+productivity/A
+productivity's
+productize/GZRSD
+product/V
+Prof
+profanation/S
+profaneness/MS
+profane/YPDRSG
+profanity/MS
+professed/Y
+professionalism/SM
+professionalize/GSD
+professional/USY
+profession/SM
+professorial/Y
+professorship/SM
+professor/SM
+proffer/GSD
+proficiency/SM
+proficient/YS
+profitability/MS
+profitableness/MU
+profitable/UP
+profitably/U
+profiteer/GSMD
+profiterole/MS
+profit/GZDRB
+profitless
+profligacy/S
+profligate/YS
+proforma/S
+profoundity
+profoundness/SM
+profound/PTYR
+prof/S
+profundity/MS
+profuseness/MS
+profuse/YP
+progenitor/SM
+progeny/M
+progesterone/SM
+prognathous
+prognoses
+prognosis/M
+prognosticate/NGVXDS
+prognostication/M
+prognosticator/S
+prognostic/S
+program/CSA
+programed
+programing
+programmability
+programmable/S
+programmed/CA
+programmer/ASM
+programming/CA
+programmings
+progression/SM
+progressiveness/SM
+progressive/SPY
+progressivism
+progress/MSDVG
+prohibiter/M
+prohibitionist/MS
+prohibition/MS
+Prohibition/MS
+prohibitiveness/M
+prohibitive/PY
+prohibitory
+prohibit/VGSRD
+projected/AU
+projectile/MS
+projectionist/MS
+projection/MS
+projective/Y
+project/MDVGS
+projector/SM
+Prokofieff/M
+Prokofiev/M
+prolegomena
+proletarianization/M
+proletarianized
+proletarian/S
+proletariat/SM
+proliferate/GNVDSX
+proliferation/M
+prolifically
+prolific/P
+prolixity/MS
+prolix/Y
+prologize
+prologue/MGSD
+prologuize
+prolongate/NGSDX
+prolongation/M
+prolonger/M
+prolong/G
+promenade/GZMSRD
+promenader/M
+Promethean
+Prometheus/M
+promethium/SM
+prominence/MS
+prominent/Y
+promiscuity/MS
+promiscuousness/M
+promiscuous/PY
+promise/GD
+promising/UY
+promissory
+promontory/MS
+promote/GVZBDR
+promoter/M
+promotiveness/M
+promotive/P
+prompted/U
+prompter/M
+promptitude/SM
+promptness/MS
+prompt/SGJTZPYDR
+pro/MS
+promulgate/NGSDX
+promulgation/M
+promulgator/MS
+pron
+proneness/MS
+prone/PY
+pronghorn/SM
+prong/SGMD
+pronominalization
+pronominalize
+pronounceable/U
+pronouncedly
+pronounced/U
+pronounce/GLSRD
+pronouncement/SM
+pronouncer/M
+pronto
+pronunciation/SM
+proofed/A
+proofer
+proofing/M
+proofreader/M
+proofread/GZSR
+proof/SEAM
+propaganda/SM
+propagandistic
+propagandist/SM
+propagandize/DSG
+propagated/U
+propagate/SDVNGX
+propagation/M
+propagator/MS
+propellant/MS
+propelled
+propeller/MS
+propelling
+propel/S
+propensity/MS
+properness/M
+proper/PYRT
+propertied/U
+property/SDM
+prophecy/SM
+prophesier/M
+prophesy/GRSDZ
+prophetess/S
+prophetic
+prophetical/Y
+prophet/SM
+prophylactic/S
+prophylaxes
+prophylaxis/M
+propinquity/MS
+propionate/M
+propitiate/GNXSD
+propitiatory
+propitiousness/M
+propitious/YP
+proponent/MS
+proportionality/M
+proportional/SY
+proportionate/YGESD
+proportioner/M
+proportion/ESGDM
+proportionment/M
+proposal/SM
+propped
+propping
+proprietary/S
+proprietorial
+proprietorship/SM
+proprietor/SM
+proprietress/MS
+propriety/MS
+proprioception
+proprioceptive
+prop/SZ
+propulsion/MS
+propulsive
+propylene/M
+prorogation/SM
+prorogue
+prosaic
+prosaically
+proscenium/MS
+prosciutti
+prosciutto/SM
+proscription/SM
+proscriptive
+pros/DSRG
+prosecute/SDBXNG
+prosecution/M
+prosecutor/MS
+proselyte/SDGM
+proselytism/MS
+proselytize/ZGDSR
+prose/M
+proser/M
+Proserpine/M
+prosodic/S
+prosody/MS
+prospect/DMSVG
+prospection/SM
+prospectiveness/M
+prospective/SYP
+prospector/MS
+prospectus/SM
+prosper/GSD
+prosperity/MS
+prosperousness/M
+prosperous/PY
+prostate
+prostheses
+prosthesis/M
+prosthetic/S
+prosthetics/M
+prostitute/DSXNGM
+prostitution/M
+prostrate/SDXNG
+prostration/M
+prosy/RT
+protactinium/MS
+protagonist/SM
+Protagoras/M
+protean/S
+protease/M
+protect/DVGS
+protected/UY
+protectionism/MS
+protectionist/MS
+protection/MS
+protectiveness/S
+protective/YPS
+protectorate/SM
+protector/MS
+protges
+protg/SM
+protein/MS
+proteolysis/M
+proteolytic
+Proterozoic/M
+protestantism
+Protestantism/MS
+protestant/S
+Protestant/SM
+protestation/MS
+protest/G
+protesting/Y
+Proteus/M
+protocol/DMGS
+protoplasmic
+protoplasm/MS
+prototype/SDGM
+prototypic
+prototypical/Y
+protozoa
+protozoan/MS
+protozoic
+protozoon's
+protract/DG
+protrude/SDG
+protrusile
+protrusion/MS
+protrusive/PY
+protuberance/S
+protuberant
+Proudhon/M
+proud/TRY
+Proust/M
+provabilities
+provability's
+provability/U
+provableness/M
+provable/P
+provably
+prov/DRGZB
+proved/U
+proven/U
+prove/ESDAG
+provenance/SM
+Provenal
+Provencals
+Provence/M
+provender/SDG
+provenience/SM
+provenly
+proverb/DG
+proverbial/Y
+Proverbs/M
+prover/M
+provide/DRSBGZ
+provided/U
+providence/SM
+Providence/SM
+providential/Y
+provident/Y
+provider/M
+province/SM
+provincialism/SM
+provincial/SY
+provisional/YS
+provisioner/M
+provision/R
+proviso/MS
+provocateur/S
+provocativeness/SM
+provocative/P
+provoked/U
+provoke/GZDRS
+provoking/Y
+provolone/SM
+Provo/M
+provost/MS
+prowess/SM
+prowler/M
+prowl/RDSZG
+prow/TRMS
+proximal/Y
+proximateness/M
+proximate/PY
+proximity/MS
+Proxmire/M
+proxy/SM
+Prozac
+prude/MS
+Prudence/M
+prudence/SM
+Prudential/M
+prudential/SY
+prudent/Y
+prudery/MS
+Prudi/M
+prudishness/SM
+prudish/YP
+Prudy/M
+Prue/M
+Pruitt/M
+Pru/M
+prune/DSRGZM
+pruner/M
+prurience/MS
+prurient/Y
+Prussia/M
+Prussian/S
+prussic
+Prut/M
+Pryce/M
+pry/DRSGTZ
+pryer's
+prying/Y
+P's
+PS
+p's/A
+psalmist/SM
+psalm/SGDM
+Psalms/M
+psalter
+Psalter/SM
+psaltery/MS
+psephologist/M
+pseudonymous
+pseudonym/SM
+pseudopod
+pseudo/S
+pseudoscience/S
+pshaw/SDG
+psi/S
+psittacoses
+psittacosis/M
+psoriases
+psoriasis/M
+psst/S
+PST
+psychedelically
+psychedelic/S
+psyche/M
+Psyche/M
+psychiatric
+psychiatrist/SM
+psychiatry/MS
+psychical/Y
+psychic/MS
+psychoacoustic/S
+psychoacoustics/M
+psychoactive
+psychoanalysis/M
+psychoanalyst/S
+psychoanalytic
+psychoanalytical
+psychoanalyze/SDG
+psychobabble/S
+psychobiology/M
+psychocultural
+psychodrama/MS
+psychogenic
+psychokinesis/M
+psycholinguistic/S
+psycholinguistics/M
+psycholinguists
+psychological/Y
+psychologist/MS
+psychology/MS
+psychometric/S
+psychometrics/M
+psychometry/M
+psychoneuroses
+psychoneurosis/M
+psychopathic/S
+psychopath/M
+psychopathology/M
+psychopaths
+psychopathy/SM
+psychophysical/Y
+psychophysic/S
+psychophysics/M
+psychophysiology/M
+psychosis/M
+psycho/SM
+psychosocial/Y
+psychosomatic/S
+psychosomatics/M
+psychos/S
+psychotherapeutic/S
+psychotherapist/MS
+psychotherapy/SM
+psychotically
+psychotic/S
+psychotropic/S
+psychs
+psych/SDG
+PT
+PTA
+Ptah/M
+ptarmigan/MS
+pt/C
+pterodactyl/SM
+Pt/M
+PTO
+Ptolemaic
+Ptolemaists
+Ptolemy/MS
+ptomaine/MS
+Pu
+pubbed
+pubbing
+pubertal
+puberty/MS
+pubes
+pubescence/S
+pubescent
+pubic
+pubis/M
+publican/AMS
+publication/AMS
+publicist/SM
+publicity/SM
+publicized/U
+publicize/SDG
+publicness/M
+publics/A
+public/YSP
+publishable/U
+published/UA
+publisher/ASM
+publishes/A
+publishing/M
+publish/JDRSBZG
+pub/MS
+Puccini/M
+puce/SM
+pucker/DG
+Puckett/M
+puck/GZSDRM
+puckishness/S
+puckish/YP
+Puck/M
+pudding/MS
+puddle/JMGRSD
+puddler/M
+puddling/M
+puddly
+pudenda
+pudendum/M
+pudginess/SM
+pudgy/PRT
+Puebla/M
+Pueblo/MS
+pueblo/SM
+puerile/Y
+puerility/SM
+puerperal
+puers
+Puerto/M
+puffball/SM
+puffer/M
+puffery/M
+puffiness/S
+puffin/SM
+Puff/M
+puff/SGZDRM
+puffy/PRT
+Puget/M
+pugged
+pugging
+Pugh/M
+pugilism/SM
+pugilistic
+pugilist/S
+pug/MS
+pugnaciousness/MS
+pugnacious/YP
+pugnacity/SM
+puissant/Y
+puke/GDS
+pukka
+Pulaski/SM
+pulchritude/SM
+pulchritudinous/M
+pule/GDS
+Pulitzer/SM
+pullback/S
+pull/DRGZSJ
+pullet/SM
+pulley/SM
+Pullman/MS
+pullout/S
+pullover/SM
+pulmonary
+pulpiness/S
+pulpit/MS
+pulp/MDRGS
+pulpwood/MS
+pulpy/PTR
+pulsar/MS
+pulsate/NGSDX
+pulsation/M
+pulse/ADSG
+pulser
+pulse's
+pulverable
+pulverization/MS
+pulverized/U
+pulverize/GZSRD
+pulverizer/M
+pulverizes/UA
+puma/SM
+pumice/SDMG
+pummel/SDG
+pumpernickel/SM
+pump/GZSMDR
+pumping/M
+pumpkin/MS
+punchbowl/M
+punched/U
+puncheon/MS
+puncher/M
+punch/GRSDJBZ
+punchline/S
+Punch/M
+punchy/RT
+punctilio/SM
+punctiliousness/SM
+punctilious/PY
+punctualities
+punctuality/UM
+punctualness/M
+punctual/PY
+punctuate/SDXNG
+punctuational
+punctuation/M
+puncture/SDMG
+punditry/S
+pundit/SM
+pungency/MS
+pungent/Y
+Punic
+puniness/MS
+punished/U
+punisher/M
+punishment/MS
+punish/RSDGBL
+punitiveness/M
+punitive/YP
+Punjabi/M
+Punjab/M
+punk/TRMS
+punky/PRS
+pun/MS
+punned
+punning
+punster/SM
+punter/M
+punt/GZMDRS
+puny/PTR
+pupae
+pupal
+pupa/M
+pupate/NGSD
+pupillage/M
+pupil/SM
+pup/MS
+pupped
+puppeteer/SM
+puppetry/MS
+puppet/SM
+pupping
+puppy/GSDM
+puppyish
+purblind
+Purcell/M
+purchasable
+purchase/GASD
+purchaser/MS
+purdah/M
+purdahs
+Purdue/M
+purebred/S
+puree/DSM
+pureeing
+pureness/MS
+pure/PYTGDR
+purgation/M
+purgative/MS
+purgatorial
+purgatory/SM
+purge/GZDSR
+purger/M
+purify/GSRDNXZ
+Purim/SM
+Purina/M
+purine/SM
+purism/MS
+puristic
+purist/MS
+puritanic
+puritanical/Y
+Puritanism/MS
+puritanism/S
+puritan/SM
+Puritan/SM
+purity/SM
+purlieu/SM
+purl/MDGS
+purloin/DRGS
+purloiner/M
+purple/MTGRSD
+purplish
+purport/DRSZG
+purported/Y
+purposefulness/S
+purposeful/YP
+purposelessness/M
+purposeless/PY
+purpose/SDVGYM
+purposiveness/M
+purposive/YP
+purr/DSG
+purring/Y
+purse/DSRGZM
+purser/M
+pursuance/MS
+pursuant
+pursuer/M
+pursue/ZGRSD
+pursuit/MS
+purulence/MS
+purulent
+Purus
+purveyance/MS
+purvey/DGS
+purveyor/MS
+purview/SM
+Pusan/M
+Pusey/M
+pushbutton/S
+pushcart/SM
+pushchair/SM
+pushdown
+push/DSRBGZ
+pusher/M
+pushily
+pushiness/MS
+Pushkin/M
+pushover/SM
+Pushtu/M
+pushy/PRT
+pusillanimity/MS
+pusillanimous/Y
+pus/SM
+puss/S
+pussycat/S
+pussyfoot/DSG
+pussy/TRSM
+pustular
+pustule/MS
+putative/Y
+Putin/M
+put/IS
+Putnam/M
+Putnem/M
+putout/S
+putrefaction/SM
+putrefactive
+putrefy/DSG
+putrescence/MS
+putrescent
+putridity/M
+putridness/M
+putrid/YP
+putsch/S
+putted/I
+puttee/MS
+putter/RDMGZ
+putting/I
+putt/SGZMDR
+puttying/M
+putty/SDMG
+puzzle/JRSDZLG
+puzzlement/MS
+puzzler/M
+PVC
+pvt
+Pvt/M
+PW
+PX
+p/XTGJ
+Pygmalion/M
+pygmy/SM
+Pygmy/SM
+Pyhrric/M
+pyknotic
+Pyle/M
+pylon/SM
+pylori
+pyloric
+pylorus/M
+Pym/M
+Pynchon/M
+Pyongyang/M
+pyorrhea/SM
+Pyotr/M
+pyramidal/Y
+pyramid/GMDS
+pyre/MS
+Pyrenees
+Pyrex/SM
+pyridine/M
+pyrimidine/SM
+pyrite/MS
+pyroelectric
+pyroelectricity/SM
+pyrolysis/M
+pyrolyze/RSM
+pyromaniac/SM
+pyromania/MS
+pyrometer/MS
+pyrometry/M
+pyrophosphate/M
+pyrotechnical
+pyrotechnic/S
+pyrotechnics/M
+pyroxene/M
+pyroxenite/M
+Pyrrhic
+Pythagoras/M
+Pythagorean/S
+Pythias
+Python/M
+python/MS
+pyx/MDSG
+q
+Q
+QA
+Qaddafi/M
+Qantas/M
+Qatar/M
+QB
+QC
+QED
+Qingdao
+Qiqihar/M
+QM
+Qom/M
+qr
+q's
+Q's
+qt
+qty
+qua
+Quaalude/M
+quackery/MS
+quackish
+quack/SDG
+quadded
+quadding
+quadrangle/MS
+quadrangular/M
+quadrant/MS
+quadraphonic/S
+quadrapole
+quadratical/Y
+quadratic/SM
+quadrature/MS
+quadrennial/SY
+quadrennium/MS
+quadric
+quadriceps/SM
+quadrilateral/S
+quadrille/XMGNSD
+quadrillion/MH
+quadripartite/NY
+quadriplegia/SM
+quadriplegic/SM
+quadrivia
+quadrivium/M
+quadrupedal
+quadruped/MS
+quadruple/GSD
+quadruplet/SM
+quadruplicate/GDS
+quadruply/NX
+quadrupole
+quad/SM
+quadword/MS
+quaffer/M
+quaff/SRDG
+quagmire/DSMG
+quahog/MS
+quail/GSDM
+quaintness/MS
+quaint/PTYR
+quake/GZDSR
+Quakeress/M
+Quakerism/S
+Quaker/SM
+quaky/RT
+qualification/ME
+qualified/UY
+qualifier/SM
+qualify/EGXSDN
+qualitative/Y
+quality/MS
+qualmish
+qualm/SM
+quandary/MS
+quangos
+quanta/M
+Quantico/M
+quantifiable/U
+quantified/U
+quantifier/M
+quantify/GNSRDZX
+quantile/S
+quantitativeness/M
+quantitative/PY
+quantity/MS
+quantization/MS
+quantizer/M
+quantize/ZGDRS
+quantum/M
+quarantine/DSGM
+quark/SM
+quarreler/M
+quarrellings
+quarrelsomeness/MS
+quarrelsome/PY
+quarrel/SZDRMG
+quarrier/M
+quarryman/M
+quarrymen
+quarry/RSDGM
+quarterback/SGMD
+quarterdeck/MS
+quarterer/M
+quarterfinal/MS
+quartering/M
+quarterly/S
+quartermaster/MS
+quarter/MDRYG
+quarterstaff/M
+quarterstaves
+quartet/SM
+quartic/S
+quartile/SM
+quarto/SM
+quart/RMSZ
+quartzite/M
+quartz/SM
+quasar/SM
+quash/GSD
+quasi
+quasilinear
+Quasimodo/M
+Quaternary
+quaternary/S
+quaternion/SM
+quatrain/SM
+quaver/GDS
+quavering/Y
+quavery
+Quayle/M
+quayside/M
+quay/SM
+queasily
+queasiness/SM
+queasy/TRP
+Quebec/M
+Quechua/M
+Queenie/M
+queenly/RT
+queen/SGMDY
+Queensland/M
+Queen/SM
+queerness/S
+queer/STGRDYP
+queller/M
+quell/SRDG
+Que/M
+quenchable/U
+quenched/U
+quencher/M
+quench/GZRSDB
+quenchless
+Quentin/M
+Quent/M
+Querida/M
+quern/M
+querulousness/S
+querulous/YP
+query/MGRSD
+quested/A
+quester/AS
+quester's
+quest/FSIM
+questing
+questionableness/M
+questionable/P
+questionably/U
+questioned/UA
+questioner/M
+questioning/UY
+questionnaire/MS
+question/SMRDGBZJ
+quests/A
+Quetzalcoatl/M
+queued/C
+queue/GZMDSR
+queuer/M
+queues/C
+queuing/C
+Quezon/M
+quibble/GZRSD
+quibbler/M
+quiche/SM
+quicken/RDG
+quickie/MS
+quicklime/SM
+quickness/MS
+quick/RNYTXPS
+quicksand/MS
+quicksilver/GDMS
+quickstep/SM
+quid/SM
+quiesce/D
+quiescence/MS
+quiescent/YP
+quieted/E
+quieten/SGD
+quieter/E
+quieter's
+quieting/E
+quietly/E
+quietness/MS
+quiets/E
+quietude/IEMS
+quietus/MS
+quiet/UTGPSDRY
+Quillan/M
+quill/GSDM
+Quill/M
+quilter/M
+quilting/M
+quilt/SZJGRDM
+quincentenary/M
+quince/SM
+Quincey/M
+quincy/M
+Quincy/M
+quinine/MS
+Quinlan/M
+Quinn/M
+quinquennial/Y
+quinsy/SM
+Quinta/M
+Quintana/M
+quintessence/SM
+quintessential/Y
+quintet/SM
+quintic
+quintile/SM
+Quintilian/M
+Quintilla/M
+quintillion/MH
+quintillionth/M
+Quintina/M
+Quintin/M
+Quint/M
+quint/MS
+Quinton/M
+quintuple/SDG
+quintuplet/MS
+Quintus/M
+quip/MS
+quipped
+quipper
+quipping
+quipster/SM
+quired/AI
+quire/MDSG
+quires/AI
+Quirinal/M
+quiring/IA
+quirkiness/SM
+quirk/SGMD
+quirky/PTR
+quirt/SDMG
+Quisling/M
+quisling/SM
+quitclaim/GDMS
+quit/DGS
+quite/SADG
+Quito/M
+quittance/SM
+quitter/SM
+quitting
+quiver/GDS
+quivering/Y
+quivery
+Quixote/M
+quixotic
+quixotically
+Quixotism/M
+quiz/M
+quizzed
+quizzer/SM
+quizzes
+quizzical/Y
+quizzing
+quo/H
+quoin/SGMD
+quoit/GSDM
+quondam
+quonset
+Quonset
+quorate/I
+quorum/MS
+quotability/S
+quota/MS
+quotation/SM
+quoter/M
+quote/UGSD
+quot/GDRB
+quotidian/S
+quotient/SM
+qwerty
+qwertys
+Rabat/M
+rabbet/GSMD
+Rabbi/M
+rabbi/MS
+rabbinate/MS
+rabbinic
+rabbinical/Y
+rabbiter/M
+rabbit/MRDSG
+rabble/GMRSD
+rabbler/M
+Rabelaisian
+Rabelais/M
+rabidness/SM
+rabid/YP
+rabies
+Rabi/M
+Rabin/M
+rabis
+Rab/M
+raccoon/SM
+racecourse/MS
+racegoers
+racehorse/SM
+raceme/MS
+race/MZGDRSJ
+racer/M
+racetrack/SMR
+raceway/SM
+Rachael/M
+Rachele/M
+Rachelle/M
+Rachel/M
+Rachmaninoff/M
+racialism/MS
+racialist/MS
+racial/Y
+racily
+Racine/M
+raciness/MS
+racism/S
+racist/MS
+racketeer/MDSJG
+racket/SMDG
+rackety
+rack/GDRMS
+raconteur/SM
+racoon's
+racquetball/S
+racquet's
+racy/RTP
+radarscope/MS
+radar/SM
+Radcliffe/M
+radded
+radder
+raddest
+Raddie/M
+radding
+Raddy/M
+radial/SY
+radiance/SM
+radian/SM
+radiant/YS
+radiate/XSDYVNG
+radiation/M
+radiative/Y
+radiator/MS
+radicalism/MS
+radicalization/S
+radicalize/GSD
+radicalness/M
+radical/SPY
+radices's
+radii/M
+radioactive/Y
+radioactivity/MS
+radioastronomical
+radioastronomy
+radiocarbon/MS
+radiochemical/Y
+radiochemistry/M
+radiogalaxy/S
+radiogram/SM
+radiographer/MS
+radiographic
+radiography/MS
+radioisotope/SM
+radiologic
+radiological/Y
+radiologist/MS
+radiology/MS
+radioman/M
+radiomen
+radiometer/SM
+radiometric
+radiometry/MS
+radionics
+radionuclide/M
+radiopasteurization
+radiophone/MS
+radiophysics
+radioscopy/SM
+radio/SMDG
+radiosonde/SM
+radiosterilization
+radiosterilized
+radiotelegraph
+radiotelegraphs
+radiotelegraphy/MS
+radiotelephone/SM
+radiotherapist/SM
+radiotherapy/SM
+radish/MS
+radium/MS
+radius/M
+radix/SM
+Rad/M
+radon/SM
+rad/S
+Raeann/M
+Rae/M
+RAF
+Rafaela/M
+Rafaelia/M
+Rafaelita/M
+Rafaellle/M
+Rafaello/M
+Rafael/M
+Rafa/M
+Rafe/M
+Raffaello/M
+Raffarty/M
+Rafferty/M
+raffia/SM
+raffishness/SM
+raffish/PY
+raffle/MSDG
+Raff/M
+Rafi/M
+Raf/M
+rafter/DM
+raft/GZSMDR
+raga/MS
+ragamuffin/MS
+ragbag/SM
+rage/MS
+raggedness/SM
+ragged/PRYT
+raggedy/TR
+ragging
+rag/GSMD
+raging/Y
+raglan/MS
+Ragnar/M
+Ragnark
+ragout/SMDG
+ragtag/MS
+ragtime/MS
+ragweed/MS
+ragwort/M
+Rahal/M
+rah/DG
+Rahel/M
+rahs
+raider/M
+raid/MDRSGZ
+railbird/S
+rail/CDGS
+railer/SM
+railhead/SM
+railing/MS
+raillery/MS
+railroader/M
+railroading/M
+railroad/SZRDMGJ
+rail's
+railwaymen
+railway/MS
+raiment/SM
+Raimondo/M
+Raimund/M
+Raimundo/M
+Raina/M
+rainbow/MS
+raincloud/S
+raincoat/SM
+raindrop/SM
+Raine/MR
+Rainer/M
+rainfall/SM
+rainforest's
+rain/GSDM
+Rainier/M
+rainless
+rainmaker/SM
+rainmaking/MS
+rainproof/GSD
+rainstorm/SM
+rainwater/MS
+rainy/RT
+raise/DSRGZ
+raiser/M
+raising/M
+raisin/MS
+rajah/M
+rajahs
+Rajive/M
+raj/M
+Rakel/M
+rake/MGDRS
+raker/M
+rakishness/MS
+rakish/PY
+Raleigh/M
+Ralf/M
+Ralina/M
+rally/GSD
+Ralph/M
+Ralston/M
+Ra/M
+Ramada/M
+Ramadan/SM
+Ramakrishna/M
+Rama/M
+Raman/M
+Ramayana/M
+ramble/JRSDGZ
+rambler/M
+rambling/Y
+Rambo/M
+rambunctiousness/S
+rambunctious/PY
+ramekin/SM
+ramie/MS
+ramification/M
+ramify/XNGSD
+Ramirez/M
+Ramiro/M
+ramjet/SM
+Ram/M
+rammed
+ramming
+Ramo/MS
+Ramona/M
+Ramonda/M
+Ramon/M
+rampage/SDG
+rampancy/S
+rampant/Y
+rampart/SGMD
+ramp/GMDS
+ramrodded
+ramrodding
+ramrod/MS
+RAM/S
+Ramsay/M
+Ramses/M
+Ramsey/M
+ramshackle
+ram/SM
+rams/S
+ran/A
+Rana/M
+Rancell/M
+Rance/M
+rancher/M
+rancho/SM
+ranch/ZRSDMJG
+rancidity/MS
+rancidness/SM
+rancid/P
+rancorous/Y
+rancor/SM
+Randall/M
+Randal/M
+Randa/M
+Randee/M
+Randell/M
+Randene/M
+Randie/M
+Randi/M
+randiness/S
+Rand/M
+rand/MDGS
+Randolf/M
+Randolph/M
+randomization/SM
+randomize/SRDG
+randomness/SM
+random/PYS
+Randy/M
+randy/PRST
+Ranee/M
+ranee/SM
+ranged/C
+rangeland/S
+ranger/M
+ranges/C
+range/SM
+rang/GZDR
+ranginess/S
+ranging/C
+Rangoon/M
+rangy/RPT
+Rania/M
+Ranice/M
+Ranier/M
+Rani/MR
+Ranique/M
+rani's
+ranked/U
+ranker/M
+rank/GZTYDRMPJS
+Rankine/M
+ranking/M
+Rankin/M
+rankle/SDG
+rankness/MS
+Ranna/M
+ransacker/M
+ransack/GRDS
+Ransell/M
+ransomer/M
+Ransom/M
+ransom/ZGMRDS
+ranter/M
+rant/GZDRJS
+ranting/Y
+Raoul/M
+rapaciousness/MS
+rapacious/YP
+rapacity/MS
+rapeseed/M
+rape/SM
+Raphaela/M
+Raphael/M
+rapidity/MS
+rapidness/S
+rapid/YRPST
+rapier/SM
+rapine/SM
+rapist/MS
+rap/MDRSZG
+rapped
+rappelled
+rappelling
+rappel/S
+rapper/SM
+rapping/M
+rapporteur/SM
+rapport/SM
+rapprochement/SM
+rapscallion/MS
+raptness/S
+rapture/MGSD
+rapturousness/M
+rapturous/YP
+rapt/YP
+Rapunzel/M
+Raquela/M
+Raquel/M
+rarebit/MS
+rarefaction/MS
+rarefy/GSD
+rareness/MS
+rare/YTPGDRS
+rarity/SM
+Rasalgethi/M
+Rasalhague/M
+rascal/SMY
+rasher/M
+rashness/S
+rash/PZTYSR
+Rasia/M
+Rasla/M
+Rasmussen/M
+raspberry/SM
+rasper/M
+rasping/Y
+rasp/SGJMDR
+Rasputin/M
+raspy/RT
+Rastaban/M
+Rastafarian/M
+raster/MS
+Rastus/M
+ratchet/MDSG
+rateable
+rated/U
+rate/KNGSD
+ratepayer/SM
+rater/M
+rate's
+Ratfor/M
+rather
+Rather/M
+rathskeller/SM
+ratifier/M
+ratify/ZSRDGXN
+rating/M
+ratiocinate/VNGSDX
+ratiocination/M
+ratio/MS
+rationale/SM
+rationalism/SM
+rationalistic
+rationalist/S
+rationality/MS
+rationalization/SM
+rationalizer/M
+rationalize/ZGSRD
+rationalness/M
+rational/YPS
+ration/DSMG
+Ratliff/M
+ratlike
+ratline/SM
+rat/MDRSJZGB
+rattail
+rattan/MS
+ratted
+ratter/MS
+ratting
+rattlebrain/DMS
+rattle/RSDJGZ
+rattlesnake/MS
+rattletrap/MS
+rattling/Y
+rattly/TR
+rattrap/SM
+ratty/RT
+raucousness/SM
+raucous/YP
+Raul/M
+raunchily
+raunchiness/S
+raunchy/RTP
+ravage/GZRSD
+ravager/M
+raveling/S
+Ravel/M
+ravel/UGDS
+raven/JGMRDS
+Raven/M
+ravenous/YP
+raver/M
+rave/ZGDRSJ
+Ravid/M
+Ravi/M
+ravine/SDGM
+ravioli/SM
+ravisher/M
+ravishing/Y
+ravish/LSRDZG
+ravishment/SM
+Raviv/M
+Rawalpindi/M
+rawboned
+rawhide/SDMG
+Rawley/M
+Rawlings/M
+Rawlins/M
+Rawlinson/M
+rawness/SM
+raw/PSRYT
+Rawson/M
+Rayburn/M
+Raychel/M
+Raye/M
+ray/GSMD
+Rayleigh/M
+Ray/M
+Raymond/M
+Raymondville/M
+Raymund/M
+Raymundo/M
+Rayna/M
+Raynard/M
+Raynell/M
+Rayner/M
+Raynor/M
+rayon/SM
+Rayshell/M
+Raytheon/M
+raze/DRSG
+razer/M
+razorback/SM
+razorblades
+razor/MDGS
+razz/GDS
+razzmatazz/S
+Rb
+RBI/S
+RC
+RCA
+rcpt
+RCS
+rd
+RD
+RDA
+Rd/M
+reabbreviate
+reachability
+reachable/U
+reachably
+reached/U
+reacher/M
+reach/GRB
+reacquisition
+reactant/SM
+reacted/U
+reaction
+reactionary/SM
+reactivity
+readability/MS
+readable/P
+readably
+readdress/G
+Reade/M
+reader/M
+readership/MS
+Read/GM
+readied
+readies
+readily
+readinesses
+readiness/UM
+reading/M
+Reading/M
+read/JGZBR
+readopt/G
+readout/MS
+reads/A
+readying
+ready/TUPR
+Reagan/M
+Reagen/M
+realisms
+realism's
+realism/U
+realistically/U
+realistic/U
+realist/SM
+reality/USM
+realizability/MS
+realizableness/M
+realizable/SMP
+realizably/S
+realization/MS
+realized/U
+realize/JRSDBZG
+realizer/M
+realizes/U
+realizing/MY
+realm/M
+realness/S
+realpolitik/SM
+real/RSTP
+realtor's
+Realtor/S
+realty/SM
+Rea/M
+reamer/M
+ream/MDRGZ
+Reamonn/M
+reanimate
+reaper/M
+reappraise/G
+reap/SGZ
+rear/DRMSG
+rearguard/MS
+rearmost
+rearrange/L
+rearward/S
+reasonableness/SMU
+reasonable/UP
+reasonably/U
+Reasoner/M
+reasoner/SM
+reasoning/MS
+reasonless
+reasons
+reason/UBDMG
+reassess/GL
+reassuringly/U
+reattach/GSL
+reawakening/M
+Reba/M
+rebate/M
+Rebbecca/M
+Rebeca/M
+Rebecca's
+Rebecka/M
+Rebekah/M
+Rebeka/M
+Rebekkah/M
+rebeller
+rebellion/SM
+rebelliousness/MS
+rebellious/YP
+rebel/MS
+Rebe/M
+rebid
+rebidding
+rebind/G
+rebirth
+reboil/G
+rebook
+reboot/ZR
+rebound/G
+rebroadcast/MG
+rebuke/RSDG
+rebuking/Y
+rebus
+rebuttal/SM
+rebutting
+rec
+recalcitrance/SM
+recalcitrant/S
+recalibrate/N
+recantation/S
+recant/G
+recap
+recappable
+recapping
+recast/G
+recd
+rec'd
+recede
+receipt/SGDM
+receivable/S
+received/U
+receiver/M
+receivership/SM
+receive/ZGRSDB
+recency/M
+recension/M
+recentness/SM
+recent/YPT
+receptacle/SM
+receptionist/MS
+reception/MS
+receptiveness/S
+receptive/YP
+receptivity/S
+receptor/MS
+recessional/S
+recessionary
+recessiveness/M
+recessive/YPS
+recess/SDMVG
+rechargeable
+recheck/G
+recherch
+recherches
+recidivism/MS
+recidivist/MS
+Recife/M
+recipe/MS
+recipiency
+recipient/MS
+reciprocal/SY
+reciprocate/NGXVDS
+reciprocation/M
+reciprocity/MS
+recitalist/S
+recital/MS
+recitative/MS
+reciter/M
+recite/ZR
+recked
+recking
+recklessness/S
+reckless/PY
+reckoner/M
+reckoning/M
+reckon/SGRDJ
+reclaim/B
+reclamation/SM
+recliner/M
+recline/RSDZG
+recluse/MVNS
+reclusion/M
+recode/G
+recognizability
+recognizable/U
+recognizably
+recognize/BZGSRD
+recognizedly/S
+recognized/U
+recognizer/M
+recognizingly/S
+recognizing/UY
+recoilless
+recoinage
+recolor/GD
+recombinant
+recombine
+recommended/U
+recompense/GDS
+recompute/B
+reconciled/U
+reconciler/M
+reconcile/SRDGB
+reconditeness/M
+recondite/YP
+reconfigurability
+reconfigure/R
+reconnaissance/MS
+reconnect/R
+reconnoiter/GSD
+reconquer/G
+reconsecrate
+reconstitute
+reconstructed/U
+Reconstruction/M
+reconsult/G
+recontact/G
+recontaminate/N
+recontribute
+recook/G
+recopy/G
+recorded/AU
+records/A
+record/ZGJ
+recourse
+recoverability
+recoverable/U
+recover/B
+recovery/MS
+recreant/S
+recreational
+recriminate/GNVXDS
+recrimination/M
+recriminatory
+recross/G
+recrudesce/GDS
+recrudescence/MS
+recrudescent
+recruiter/M
+recruitment/MS
+recruit/ZSGDRML
+recrystallize
+rectal/Y
+rectangle/SM
+rectangular/Y
+recta's
+rectifiable
+rectification/M
+rectifier/M
+rectify/DRSGXZN
+rectilinear/Y
+rectitude/MS
+recto/MS
+rector/SM
+rectory/MS
+rectum/SM
+recumbent/Y
+recuperate/VGNSDX
+recuperation/M
+recur
+recurrence/MS
+recurrent
+recurse/NX
+recursion/M
+recusant/M
+recuse
+recyclable/S
+recycle/BZ
+redact/DGS
+redaction/SM
+redactor/MS
+redbird/SM
+redbreast/SM
+redbrick/M
+redbud/M
+redcap/MS
+redcoat/SM
+redcurrant/M
+redden/DGS
+redder
+reddest
+redding
+reddish/P
+Redd/M
+redeclaration
+redecorate
+redeemable/U
+redeem/BRZ
+redeemed/U
+redeemer/M
+Redeemer/M
+redemptioner/M
+redemption/RMS
+redemptive
+redeposit/M
+redetermination
+Redford/M
+Redgrave/M
+redhead/DRMS
+Redhook/M
+redial/G
+redirect/G
+redirection
+redlining/S
+Redmond/M
+redneck/SMD
+redness/MS
+redo/G
+redolence/MS
+redolent
+Redondo/M
+redouble/S
+redoubtably
+redound/GDS
+red/PYS
+redshift/S
+redskin/SM
+Redstone/M
+reduced/U
+reducer/M
+reduce/RSDGZ
+reducibility/M
+reducible
+reducibly
+reductionism/M
+reductionist/S
+reduction/SM
+reduct/V
+redundancy/SM
+redundant/Y
+redwood/SM
+redye
+redyeing
+Reeba/M
+Reebok/M
+Reece/M
+reecho/G
+reed/GMDR
+reediness/SM
+reeding/M
+Reed/M
+Reedville/M
+reedy/PTR
+reefer/M
+reef/GZSDRM
+reeker/M
+reek/GSR
+reeler/M
+reel's
+reel/USDG
+Ree/MDS
+Reena/M
+reenforcement
+reentrant
+Reese/M
+reestimate/M
+Reeta/M
+Reeva/M
+reeve/G
+Reeves
+reexamine
+refection/SM
+refectory/SM
+refer/B
+refereed/U
+refereeing
+referee/MSD
+reference/CGSRD
+referenced/U
+reference's
+referencing/U
+referendum/MS
+referentiality
+referential/YM
+referent/SM
+referral/SM
+referred
+referrer/S
+referring
+reffed
+reffing
+refile
+refinance
+refined/U
+refine/LZ
+refinement/MS
+refinish/G
+refit
+reflectance/M
+reflected/U
+reflectional
+reflection/SM
+reflectiveness/M
+reflective/YP
+reflectivity/M
+reflector/MS
+reflect/SDGV
+reflexion/MS
+reflexiveness/M
+reflexive/PSY
+reflexivity/M
+reflex/YV
+reflooring
+refluent
+reflux/G
+refocus/G
+refold/G
+reforestation
+reforge/G
+reformatory/SM
+reform/B
+reformed/U
+reformer/M
+reformism/M
+reformist/S
+refract/DGVS
+refractiveness/M
+refractive/PY
+refractometer/MS
+refractoriness/M
+refractory/PS
+refrain/DGS
+refreshed/U
+refreshing/Y
+refresh/LB
+refreshment/MS
+refrigerant/MS
+refrigerated/U
+refrigerate/XDSGN
+refrigeration/M
+refrigerator/MS
+refrozen
+refry/GS
+refugee/MS
+refuge/SDGM
+Refugio/M
+refulgence/SM
+refulgent
+refund/B
+refunder/M
+refurbish/L
+refurbishment/S
+refusal/SM
+refuse/R
+refuser/M
+refutation/MS
+refute/GZRSDB
+refuter/M
+ref/ZS
+reg
+regale/L
+regalement/S
+regal/GYRD
+regalia/M
+Regan/M
+regard/EGDS
+regardless/PY
+regather/G
+regatta/MS
+regency/MS
+regeneracy/MS
+regenerately
+regenerateness/M
+regenerate/U
+Regen/M
+reggae/SM
+Reggie/M
+Reggi/MS
+Reggy/M
+regicide/SM
+regime/MS
+regimen/MS
+regimental/S
+regimentation/MS
+regiment/SDMG
+Reginae
+Reginald/M
+Regina/M
+Reginauld/M
+Regine/M
+regionalism/MS
+regional/SY
+region/SM
+Regis/M
+register's
+register/UDSG
+registrable
+registrant/SM
+registrar/SM
+registration/AM
+registrations
+registry/MS
+Reg/MN
+regnant
+Regor/M
+regress/DSGV
+regression/MS
+regressiveness/M
+regressive/PY
+regressors
+regretfulness/M
+regretful/PY
+regret/S
+regrettable
+regrettably
+regretted
+regretting
+reground
+regroup/G
+regrow/G
+regularity/MS
+regularization/MS
+regularize/SDG
+regular/YS
+regulate/CSDXNG
+regulated/U
+regulation/M
+regulative
+regulator/SM
+regulatory
+Regulus/M
+regurgitate/XGNSD
+regurgitation/M
+rehabbed
+rehabbing
+rehabilitate/SDXVGN
+rehabilitation/M
+rehab/S
+rehang/G
+rehear/GJ
+rehearsal/SM
+rehearse
+rehearsed/U
+rehearser/M
+rehears/R
+reheat/G
+reheating/M
+Rehnquist
+rehydrate
+Reichenberg/M
+Reich/M
+Reichstags
+Reichstag's
+Reidar/M
+Reider/M
+Reid/MR
+reign/MDSG
+Reiko/M
+Reilly/M
+reimburse/GSDBL
+reimbursement/MS
+Reinald/M
+Reinaldo/MS
+Reina/M
+reindeer/M
+Reine/M
+reinforced/U
+reinforce/GSRDL
+reinforcement/MS
+reinforcer/M
+rein/GDM
+Reinhard/M
+Reinhardt/M
+Reinhold/M
+Reinold/M
+reinstate/L
+reinstatement/MS
+reinsurance
+Reinwald/M
+reissue
+REIT
+reiterative/SP
+rejecter/M
+rejecting/Y
+rejection/SM
+rejector/MS
+reject/RDVGS
+rejigger
+rejoice/RSDJG
+rejoicing/Y
+rejoinder/SM
+rejuvenate/NGSDX
+rejuvenatory
+relapse
+relatedly
+relatedness/MS
+related/U
+relater/M
+relate/XVNGSZ
+relational/Y
+relation/M
+relationship/MS
+relativeness/M
+relative/SPY
+relativism/M
+relativistic
+relativistically
+relativist/MS
+relativity/MS
+relator's
+relaxant/SM
+relaxation/MS
+relaxedness/M
+relaxed/YP
+relax/GZD
+relaxing/Y
+relay/GDM
+relearn/G
+releasable/U
+release/B
+released/U
+relenting/U
+relentlessness/SM
+relentless/PY
+relent/SDG
+relevance/SM
+relevancy/MS
+relevant/Y
+reliability/UMS
+reliables
+reliable/U
+reliably/U
+reliance/MS
+reliant/Y
+relicense/R
+relic/MS
+relict/C
+relict's
+relief/M
+relievedly
+relieved/U
+reliever/M
+relieve/RSDZG
+religionists
+religion/SM
+religiosity/M
+religiousness/MS
+religious/PY
+relink/G
+relinquish/GSDL
+relinquishment/SM
+reliquary/MS
+relish/GSD
+relive/GB
+reload/GR
+relocate/B
+reluctance/MS
+reluctant/Y
+rel/V
+rely/DG
+rem
+Re/M
+remade/S
+remainder/SGMD
+remain/GD
+remake/M
+remand/DGS
+remap
+remapping
+remarkableness/S
+remarkable/U
+remarkably
+remark/BG
+remarked/U
+Remarque/M
+rematch/G
+Rembrandt/M
+remeasure/D
+remediableness/M
+remediable/P
+remedy/SDMG
+remembered/U
+rememberer/M
+remember/GR
+remembrance/MRS
+remembrancer/M
+Remington/M
+reminisce/GSD
+reminiscence/SM
+reminiscent/Y
+remissness/MS
+remiss/YP
+remit/S
+remittance/MS
+remitted
+remitting/U
+Rem/M
+remnant/MS
+remodel/G
+remolding
+remonstrant/MS
+remonstrate/SDXVNG
+remonstration/M
+remonstrative/Y
+remorsefulness/M
+remorseful/PY
+remorselessness/MS
+remorseless/YP
+remorse/SM
+remoteness/MS
+remote/RPTY
+remoulds
+removal/MS
+REM/S
+remunerated/U
+remunerate/VNGXSD
+remuneration/M
+remunerativeness/M
+remunerative/YP
+Remus/M
+Remy/M
+Renado/M
+Renae/M
+renaissance/S
+Renaissance/SM
+renal
+Renaldo/M
+Rena/M
+Renard/M
+Renascence/SM
+Renata/M
+Renate/M
+Renato/M
+renaturation
+Renaud/M
+Renault/MS
+rend
+renderer/M
+render/GJRD
+rendering/M
+rendezvous/DSMG
+rendition/GSDM
+rend/RGZS
+Renee/M
+renegade/SDMG
+renege/GZRSD
+reneger/M
+Renelle/M
+Renell/M
+Rene/M
+renewal/MS
+renew/BG
+renewer/M
+Renie/M
+rennet/MS
+Rennie/M
+rennin/SM
+Renoir/M
+Reno/M
+renounce/LGRSD
+renouncement/MS
+renouncer/M
+renovate/NGXSD
+renovation/M
+renovator/SM
+renown/SGDM
+Rensselaer/M
+rentaller
+rental/SM
+renter/M
+rent/GZMDRS
+renumber/G
+renumeration
+renunciate/VNX
+renunciation/M
+Renville/M
+reoccupy/G
+reopen/G
+reorganized/U
+repack/G
+repairable/U
+repair/BZGR
+repairer/M
+repairman/M
+repairmen
+repairs/E
+repaper
+reparable
+reparation/SM
+reparteeing
+repartee/MDS
+repartition/Z
+repast/G
+repatriate/SDXNG
+repave
+repealer/M
+repeal/GR
+repeatability/M
+repeatable/U
+repeatably
+repeated/Y
+repeater/M
+repeat/RDJBZG
+repelled
+repellent/SY
+repelling/Y
+repel/S
+repentance/SM
+repentant/SY
+repent/RDG
+repertoire/SM
+repertory/SM
+repetition
+repetitiousness/S
+repetitious/YP
+repetitiveness/MS
+repetitive/PY
+repine/R
+repiner/M
+replace/RL
+replay/GM
+replenish/LRSDG
+replenishment/S
+repleteness/MS
+replete/SDPXGN
+repletion/M
+replica/SM
+replicate/SDVG
+replicator/S
+replug
+reply/X
+Rep/M
+repopulate
+reported/Y
+reportorial/Y
+reposeful
+repose/M
+repository/MS
+reprehend/GDS
+reprehensibility/MS
+reprehensibleness/M
+reprehensible/P
+reprehensibly
+reprehension/MS
+representable/U
+representational/Y
+representativeness/M
+Representative/S
+representative/SYMP
+representativity
+represented/U
+represent/GB
+repression/SM
+repressiveness/M
+repressive/YP
+repress/V
+reprieve/GDS
+reprimand/SGMD
+reprint/M
+reprisal/MS
+reproacher/M
+reproachfulness/M
+reproachful/YP
+reproach/GRSDB
+reproaching/Y
+reprobate/N
+reprocess/G
+reproducibility/MS
+reproducible/S
+reproducibly
+reproductive/S
+reproof/G
+reprove/R
+reproving/Y
+rep/S
+reptile/SM
+reptilian/S
+Republicanism/S
+republicanism/SM
+Republican/S
+republic/M
+republish/G
+repudiate/XGNSD
+repudiation/M
+repudiator/S
+repugnance/MS
+repugnant/Y
+repulse/VNX
+repulsion/M
+repulsiveness/MS
+repulsive/PY
+reputability/SM
+reputably/E
+reputation/SM
+reputed/Y
+repute/ESB
+reputing
+requested/U
+request/G
+Requiem/MS
+requiem/SM
+require/LR
+requirement/MS
+requisiteness/M
+requisite/PNXS
+requisitioner/M
+requisition/GDRM
+requital/MS
+requited/U
+requiter/M
+requite/RZ
+reread/G
+rerecord/G
+rerouteing
+rerunning
+res/C
+rescale
+rescind/SDRG
+rescission/SM
+rescue/GZRSD
+reseal/BG
+research/MB
+reselect/G
+resemblant
+resemble/DSG
+resend/G
+resent/DSLG
+resentfulness/SM
+resentful/PY
+resentment/MS
+reserpine/MS
+reservation/MS
+reservednesses
+reservedness/UM
+reserved/UYP
+reservist/SM
+reservoir/MS
+reset/RDG
+resettle/L
+reshipping
+reshow/G
+reshuffle/M
+reside/G
+residence/MS
+residency/SM
+residential/Y
+resident/SM
+resider/M
+residua
+residual/YS
+residuary
+residue/SM
+residuum/M
+resignation/MS
+resigned/YP
+resilience/MS
+resiliency/S
+resilient/Y
+resin/D
+resinlike
+resinous
+resiny
+resistance/SM
+Resistance/SM
+resistantly
+resistants
+resistant/U
+resisted/U
+resistible
+resistibly
+resisting/U
+resistiveness/M
+resistive/PY
+resistivity/M
+resistless
+resistor/MS
+resist/RDZVGS
+resize/G
+resold
+resole/G
+resoluble
+resoluteness/MS
+resolute/PYTRV
+resolvability/M
+resolvable/U
+resolved/U
+resolvent
+resonance/SM
+resonant/YS
+resonate/DSG
+resonator/MS
+resorption/MS
+resort/R
+resound/G
+resourcefulness/SM
+resourceful/PY
+resp
+respectability/SM
+respectable/SP
+respectably
+respect/BSDRMZGV
+respected/E
+respectful/EY
+respectfulness/SM
+respecting/E
+respectiveness/M
+respective/PY
+respect's/E
+respects/E
+respell/G
+respiration/MS
+respirator/SM
+respiratory/M
+resplendence/MS
+resplendent/Y
+respondent/MS
+respond/SDRZG
+responser/M
+response/RSXMV
+responsibility/MS
+responsibleness/M
+responsible/P
+responsibly
+responsiveness/MSU
+responsive/YPU
+respray/G
+restart/B
+restate/L
+restaurant/SM
+restaurateur/SM
+rest/DRSGVM
+rested/U
+rester/M
+restfuller
+restfullest
+restfulness/MS
+restful/YP
+restitution/SM
+restiveness/SM
+restive/PY
+restlessness/MS
+restless/YP
+restorability
+Restoration/M
+restoration/MS
+restorative/PYS
+restorer/M
+restore/Z
+restrained/UY
+restraint/MS
+restrict/DVGS
+restricted/YU
+restriction/SM
+restrictively
+restrictiveness/MS
+restrictives
+restrictive/U
+restroom/SM
+restructurability
+restructure
+rest's/U
+rests/U
+restudy/M
+restyle
+resubstitute
+resultant/YS
+result/SGMD
+resume/SDBG
+resumption/MS
+resurface
+resurgence/MS
+resurgent
+resurrect/GSD
+resurrection/SM
+resurvey/G
+resuscitate/XSDVNG
+resuscitation/M
+resuscitator/MS
+retail/Z
+retainer/M
+retain/LZGSRD
+retake
+retaliate/VNGXSD
+retaliation/M
+retaliatory
+Reta/M
+retardant/SM
+retardation/SM
+retarder/M
+retard/ZGRDS
+retch/SDG
+retention/SM
+retentiveness/S
+retentive/YP
+retentivity/M
+retest/G
+Retha/M
+rethought
+reticence/S
+reticent/Y
+reticle/SM
+reticular
+reticulate/GNYXSD
+reticulation/M
+reticule/MS
+reticulum/M
+retinal/S
+retina/SM
+retinue/MS
+retiredness/M
+retiree/MS
+retire/L
+retirement/SM
+retiring/YP
+retort/GD
+retract/DG
+retractile
+retrench/L
+retrenchment/MS
+retributed
+retribution/MS
+retributive
+retrieval/SM
+retriever/M
+retrieve/ZGDRSB
+retroactive/Y
+retrofire/GMSD
+retrofit/S
+retrofitted
+retrofitting
+retroflection
+retroflex/D
+retroflexion/M
+retrogradations
+retrograde/GYDS
+retrogression/MS
+retrogressive/Y
+retrogress/SDVG
+retrorocket/MS
+retro/SM
+retrospection/MS
+retrospective/SY
+retrospect/SVGMD
+retrovirus/S
+retrovision
+retry/G
+retsina/SM
+returnable/S
+returned/U
+returnee/SM
+retype
+Reube/M
+Reuben/M
+Reub/NM
+Reunion/M
+reuse/B
+Reuters
+Reuther/M
+reutilization
+Reuven/M
+Reva/M
+revanchist
+revealed/U
+revealingly
+revealing/U
+reveal/JBG
+reveille/MS
+revelation/MS
+Revelation/MS
+revelatory
+revelry/MS
+revel/SJRDGZ
+revenge/MGSRD
+revenger/M
+revenuer/M
+revenue/ZR
+reverberant
+reverberate/XVNGSD
+reverberation/M
+revere/GSD
+Revere/M
+reverencer/M
+reverence/SRDGM
+Reverend
+reverend/SM
+reverential/Y
+reverent/Y
+reverie/SM
+reversal/MS
+reverser/M
+reverse/Y
+reversibility/M
+reversible/S
+reversibly
+reversioner/M
+reversion/R
+revers/M
+reverter/M
+revertible
+revert/RDVGS
+revet/L
+revetment/SM
+review/G
+revile/GZSDL
+revilement/MS
+reviler/M
+revise/BRZ
+revised/U
+revisionary
+revisionism/SM
+revisionist/SM
+revitalize/ZR
+revivalism/MS
+revivalist/MS
+revival/SM
+reviver/M
+revive/RSDG
+revivification/M
+revivify/X
+Revkah/M
+Revlon/M
+Rev/M
+revocable
+revoke/GZRSD
+revolter/M
+revolt/GRD
+revolting/Y
+revolutionariness/M
+revolutionary/MSP
+revolutionist/MS
+revolutionize/GDSRZ
+revolutionizer/M
+revolution/SM
+revolve/BSRDZJG
+revolver/M
+revue/MS
+revulsion/MS
+revved
+revving
+rev/ZM
+rewarded/U
+rewarding/Y
+rewarm/G
+reweave
+rewedding
+reweigh/G
+rewind/BGR
+rewire/G
+rework/G
+rexes
+Rex/M
+Reyes
+Reykjavik/M
+re/YM
+Rey/M
+Reynaldo/M
+Reyna/M
+Reynard/M
+Reynold/SM
+rezone
+Rf
+RF
+RFC
+RFD
+R/G
+rhapsodic
+rhapsodical
+rhapsodize/GSD
+rhapsody/SM
+Rhea/M
+rhea/SM
+Rheba/M
+Rhee/M
+Rheims/M
+Rheinholdt/M
+Rhenish
+rhenium/MS
+rheology/M
+rheostat/MS
+rhesus/S
+Rheta/M
+rhetorical/YP
+rhetorician/MS
+rhetoric/MS
+Rhetta/M
+Rhett/M
+rheumatically
+rheumatic/S
+rheumatics/M
+rheumatism/SM
+rheumatoid
+rheum/MS
+rheumy/RT
+Rhiamon/M
+Rhianna/M
+Rhiannon/M
+Rhianon/M
+Rhinelander/M
+Rhineland/RM
+Rhine/M
+rhinestone/SM
+rhinitides
+rhinitis/M
+rhinoceros/MS
+rhino/MS
+rhinotracheitis
+rhizome/MS
+Rh/M
+Rhoda/M
+Rhodes
+Rhodesia/M
+Rhodesian/S
+Rhodia/M
+Rhodie/M
+rhodium/MS
+rhododendron/SM
+rhodolite/M
+rhodonite/M
+Rhody/M
+rhombic
+rhomboidal
+rhomboid/SM
+rhombus/SM
+rho/MS
+Rhona/M
+Rhonda/M
+Rhone
+rhubarb/MS
+rhyme/DSRGZM
+rhymester/MS
+Rhys/M
+rhythmical/Y
+rhythmic/S
+rhythmics/M
+rhythm/MS
+RI
+rial/MS
+Riane/M
+Riannon/M
+Rianon/M
+ribaldry/MS
+ribald/S
+ribbed
+Ribbentrop/M
+ribber/S
+ribbing/M
+ribbon/DMSG
+ribcage
+rib/MS
+riboflavin/MS
+ribonucleic
+ribosomal
+ribosome/MS
+Rica/M
+Rican/SM
+Ricard/M
+Ricardo/M
+Ricca/M
+Riccardo/M
+rice/DRSMZG
+Rice/M
+ricer/M
+Richard/MS
+Richardo/M
+Richardson/M
+Richart/M
+Richelieu/M
+richen/DG
+Richey/M
+Richfield/M
+Richie/M
+Richland/M
+Rich/M
+Richmond/M
+Richmound/M
+richness/MS
+Richter/M
+Richthofen/M
+Richy/M
+rich/YNSRPT
+Rici/M
+Rickard/M
+Rickenbacker/M
+Rickenbaugh/M
+Rickert/M
+rickets/M
+rickety/RT
+Rickey/M
+rick/GSDM
+Rickie/M
+Ricki/M
+Rick/M
+Rickover/M
+rickrack/MS
+rickshaw/SM
+Ricky/M
+Ric/M
+ricochet/GSD
+Rico/M
+Ricoriki/M
+ricotta/MS
+riddance/SM
+ridden
+ridding
+riddle/GMRSD
+Riddle/M
+ride/CZSGR
+Ride/M
+rider/CM
+riderless
+ridership/S
+ridge/DSGM
+Ridgefield/M
+ridgepole/SM
+Ridgway/M
+ridgy/RT
+ridicule/MGDRS
+ridiculer/M
+ridiculousness/MS
+ridiculous/PY
+riding/M
+rid/ZGRJSB
+Riemann/M
+Riesling/SM
+rife/RT
+riff/GSDM
+riffle/SDG
+riffraff/SM
+rifled/U
+rifle/GZMDSR
+rifleman/M
+riflemen
+rifler/M
+rifling/M
+rift/GSMD
+Riga/M
+rigamarole's
+rigatoni/M
+Rigel/M
+rigged
+rigger/SM
+rigging/MS
+Riggs/M
+righteousnesses/U
+righteousness/MS
+righteous/PYU
+rightfulness/MS
+rightful/PY
+rightism/SM
+rightist/S
+rightmost
+rightness/MS
+Right/S
+right/SGTPYRDN
+rightsize/SDG
+rights/M
+rightward/S
+rigidify/S
+rigidity/S
+rigidness/S
+rigid/YP
+rigmarole/MS
+rig/MS
+Rigoberto/M
+Rigoletto/M
+rigor/MS
+rigorousness/S
+rigorous/YP
+Riki/M
+Rikki/M
+Rik/M
+rile/DSG
+Riley/M
+Rilke/M
+rill/GSMD
+Rimbaud/M
+rime/MS
+rimer/M
+rim/GSMDR
+rimless
+rimmed
+rimming
+Rinaldo/M
+Rina/M
+rind/MDGS
+Rinehart/M
+ringer/M
+ring/GZJDRM
+ringing/Y
+ringleader/MS
+ringlet/SM
+ringlike
+Ringling/M
+Ring/M
+ringmaster/MS
+Ringo/M
+ringside/ZMRS
+ringworm/SM
+rink/GDRMS
+rinse/DSRG
+Riobard/M
+Rio/MS
+Riordan/M
+rioter/M
+riotousness/M
+riotous/PY
+riot/SMDRGZJ
+RIP
+riparian/S
+ripcord/SM
+ripened/U
+ripenesses
+ripeness/UM
+ripen/RDG
+ripe/PSY
+riper/U
+ripest/U
+Ripley/M
+Rip/M
+rip/NDRSXTG
+ripoff/S
+riposte/SDMG
+ripped
+ripper/SM
+ripping
+rippler/M
+ripple/RSDGM
+ripply/TR
+ripsaw/GDMS
+riptide/SM
+Risa/M
+RISC
+risen
+riser/M
+rise/RSJZG
+risibility/SM
+risible/S
+rising/M
+risker/M
+risk/GSDRM
+riskily
+riskiness/MS
+risky/RTP
+risotto/SM
+risqu
+rissole/M
+Ritalin
+Rita/M
+Ritchie/M
+rite/DSM
+Ritter/M
+ritualism/SM
+ritualistic
+ritualistically
+ritualized
+ritual/MSY
+Ritz/M
+ritzy/TR
+rivaled/U
+Rivalee/M
+rivalry/MS
+rival/SGDM
+Riva/MS
+rive/CSGRD
+Rivera/M
+riverbank/SM
+riverbed/S
+riverboat/S
+river/CM
+riverfront
+riverine
+Rivers
+Riverside/M
+riverside/S
+Riverview/M
+riveter/M
+rivet/GZSRDM
+riveting/Y
+Riviera/MS
+Rivi/M
+Rivkah/M
+rivulet/SM
+Rivy/M
+riv/ZGNDR
+Riyadh/M
+riyal/SM
+rm
+RMS
+RN
+RNA
+Rn/M
+roach/GSDM
+Roach/M
+roadbed/MS
+roadblock/SMDG
+roadhouse/SM
+roadie/S
+roadkill/S
+road/MIS
+roadrunner/MS
+roadshow/S
+roadside/S
+roadsigns
+roadster/SM
+roadsweepers
+roadway/SM
+roadwork/SM
+roadworthy
+roam/DRGZS
+Roana/M
+Roanna/M
+Roanne/M
+Roanoke/M
+roan/S
+roar/DRSJGZ
+roarer/M
+roaring/T
+Roarke/M
+roaster/M
+roast/SGJZRD
+robbed
+robber/SM
+Robbert/M
+robbery/SM
+Robbie/M
+Robbi/M
+robbing
+Robbin/MS
+Robb/M
+Robby/M
+Robbyn/M
+robe/ESDG
+Robena/M
+Robenia/M
+Robers/M
+Roberson/M
+Roberta/M
+Robert/MS
+Roberto/M
+Robertson/SM
+robe's
+Robeson/M
+Robespierre/M
+Robina/M
+Robinet/M
+Robinetta/M
+Robinette/M
+Robinett/M
+Robinia/M
+Robin/M
+robin/MS
+Robinson/M
+Robinsonville/M
+Robles/M
+Rob/MZ
+robotic/S
+robotism
+robotize/GDS
+robot/MS
+rob/SDG
+Robson/M
+Robt/M
+robustness/SM
+robust/RYPT
+Roby/M
+Robyn/M
+Rocco/M
+Rocha/M
+Rochambeau/M
+Rochella/M
+Rochelle/M
+Rochell/M
+Roche/M
+Rochester/M
+Rochette/M
+Roch/M
+rockabilly/MS
+rockabye
+Rockaway/MS
+rockbound
+Rockefeller/M
+rocker/M
+rocketry/MS
+rocket/SMDG
+Rockey/M
+rockfall/S
+Rockford/M
+rock/GZDRMS
+Rockie/M
+rockiness/MS
+Rockland/M
+Rock/M
+Rockne/M
+Rockville/M
+Rockwell/M
+Rocky/SM
+rocky/SRTP
+rococo/MS
+Roda/M
+rodded
+Roddenberry/M
+rodder
+Roddie/M
+rodding
+Rodd/M
+Roddy/M
+rodent/MS
+rodeo/SMDG
+Roderich/M
+Roderick/M
+Roderic/M
+Roderigo/M
+rode/S
+Rodger/M
+Rodge/ZMR
+Rodie/M
+Rodi/M
+Rodina/M
+Rodin/M
+Rod/M
+Rodney/M
+Rodolfo/M
+Rodolphe/M
+Rodolph/M
+Rodrick/M
+Rodrigo/M
+Rodriguez/M
+Rodrique/M
+Rodriquez/M
+rod/SGMD
+roebuck/SM
+Roentgen's
+roentgen/SM
+roe/SM
+ROFL
+Rogelio/M
+roger/GSD
+Rogerio/M
+Roger/M
+Roget/M
+Rog/MRZ
+rogued/K
+rogue/GMDS
+roguery/MS
+rogues/K
+roguing/K
+roguishness/SM
+roguish/PY
+roil/SGD
+Roi/SM
+roisterer/M
+roister/SZGRD
+Rojas/M
+Roland/M
+Rolando/M
+Roldan/M
+role/MS
+Roley/M
+Rolfe/M
+Rolf/M
+Rolland/M
+rollback/SM
+rolled/A
+Rollerblade/S
+rollerskating
+roller/SM
+rollick/DGS
+rollicking/Y
+Rollie/M
+rolling/S
+Rollin/SM
+Rollo/M
+rollover/S
+roll/UDSG
+Rolodex
+Rolph/M
+Rolvaag/M
+ROM
+romaine/MS
+Romain/M
+Roma/M
+romancer/M
+romance/RSDZMG
+Romanesque/S
+Romania/M
+Romanian/SM
+Romano/MS
+Romanov/M
+roman/S
+Romansh/M
+Romans/M
+Roman/SM
+romantically/U
+romanticism/MS
+Romanticism/S
+romanticist/S
+romanticize/SDG
+romantic/MS
+Romany/SM
+Romeo/MS
+romeo/S
+Romero/M
+Rome/SM
+Rommel/M
+Romney/M
+Romola/M
+Romona/M
+Romonda/M
+romper/M
+romp/GSZDR
+Rom/SM
+Romulus/M
+Romy/M
+Ronalda/M
+Ronald/M
+Rona/M
+Ronda/M
+rondo/SM
+Ronica/M
+Ron/M
+Ronna/M
+Ronnica/M
+Ronnie/M
+Ronni/M
+Ronny/M
+Ronstadt/M
+Rontgen
+Roobbie/M
+rood/MS
+roof/DRMJGZS
+roofer/M
+roofgarden
+roofing/M
+roofless
+rooftop/S
+rookery/MS
+rook/GDMS
+rookie/SRMT
+roomer/M
+roomette/SM
+roomful/MS
+roominess/MS
+roommate/SM
+room/MDRGZS
+roomy/TPSR
+Rooney/M
+Rooseveltian
+Roosevelt/M
+rooster/M
+roost/SGZRDM
+rooted/P
+rooter/M
+rootlessness/M
+rootless/P
+rootlet/SM
+Root/M
+root/MGDRZS
+rootstock/M
+rope/DRSMZG
+roper/M
+roping/M
+Roquefort/MS
+Roquemore/M
+Rora/M
+Rorie/M
+Rori/M
+Rorke/M
+Rorschach
+Rory/M
+Rosabella/M
+Rosabelle/M
+Rosabel/M
+Rosaleen/M
+Rosales/M
+Rosalia/M
+Rosalie/M
+Rosalinda/M
+Rosalinde/M
+Rosalind/M
+Rosaline/M
+Rosalynd/M
+Rosalyn/M
+Rosa/M
+Rosamond/M
+Rosamund/M
+Rosana/M
+Rosanna/M
+Rosanne/M
+Rosario/M
+rosary/SM
+Roscoe/M
+Rosco/M
+Roseanna/M
+Roseanne/M
+Roseann/M
+roseate/Y
+Roseau
+rosebud/MS
+rosebush/SM
+Rosecrans/M
+Roseland/M
+Roselia/M
+Roseline/M
+Roselin/M
+Rosella/M
+Roselle/M
+Rose/M
+Rosemaria/M
+Rosemarie/M
+Rosemary/M
+rosemary/MS
+rose/MGDS
+Rosemonde/M
+Rosenberg/M
+Rosenblum/M
+Rosendo/M
+Rosene/M
+Rosen/M
+Rosenthal/M
+Rosenzweig/M
+Rosetta/M
+Rosette/M
+rosette/SDMG
+rosewater
+rosewood/SM
+Roshelle/M
+Rosicrucian/M
+Rosie/M
+rosily
+Rosina/M
+rosiness/MS
+rosin/SMDG
+Rosita/M
+Roslyn/M
+Rosmunda/M
+Ros/N
+Ross
+Rossetti/M
+Rossie/M
+Rossi/M
+Rossini/M
+Rossy/M
+Rostand/M
+roster/DMGS
+Rostov/M
+rostra's
+rostrum/SM
+Roswell/M
+Rosy/M
+rosy/RTP
+rota/MS
+Rotarian/SM
+rotary/S
+rotated/U
+rotate/VGNXSD
+rotational/Y
+rotation/M
+rotative/Y
+rotator/SM
+rotatory
+ROTC
+rote/MS
+rotgut/MS
+Roth/M
+Rothschild/M
+rotisserie/MS
+rotogravure/SM
+rotor/MS
+rototill/RZ
+rot/SDG
+rotted
+rottenness/S
+rotten/RYSTP
+Rotterdam/M
+rotter/M
+rotting
+rotunda/SM
+rotundity/S
+rotundness/S
+rotund/SDYPG
+Rouault/M
+rou/MS
+rouge/GMDS
+roughage/SM
+roughen/DG
+rougher/M
+roughhouse/GDSM
+roughish
+roughneck/MDSG
+roughness/MS
+roughs
+roughshod
+rough/XPYRDNGT
+roulette/MGDS
+roundabout/PSM
+roundedness/M
+rounded/P
+roundelay/SM
+roundels
+rounder/M
+roundhead/D
+roundheadedness/M
+roundheaded/P
+roundhouse/SM
+roundish
+roundness/MS
+roundoff
+roundup/MS
+roundworm/MS
+round/YRDSGPZT
+Rourke/M
+rouse/DSRG
+rouser/M
+Rousseau/M
+roustabout/SM
+roust/SGD
+route/ASRDZGJ
+router/M
+route's
+rout/GZJMDRS
+routine/SYM
+routing/M
+routinize/GSD
+Rouvin/M
+rover/M
+Rover/M
+rove/ZGJDRS
+roving/M
+Rowan/M
+rowboat/SM
+rowdily
+rowdiness/MS
+rowdyism/MS
+rowdy/PTSR
+rowel/DMSG
+Rowe/M
+Rowena/M
+rowen/M
+Rowen/M
+rower/M
+Rowland/M
+Rowley/M
+Row/MN
+Rowney/M
+row/SJZMGNDR
+Roxana/M
+Roxane/M
+Roxanna/M
+Roxanne/M
+Roxie/M
+Roxi/M
+Roxine/M
+Roxy/M
+royalist/SM
+Royall/M
+Royal/M
+royal/SY
+royalty/MS
+Royce/M
+Roy/M
+Rozalie/M
+Rozalin/M
+Rozamond/M
+Rozanna/M
+Rozanne/M
+Rozele/M
+Rozella/M
+Rozelle/M
+Roze/M
+Rozina/M
+Roz/M
+RP
+rpm
+RPM
+rps
+RR
+Rriocard/M
+rs
+r's
+R's
+RSFSR
+RSI
+RSV
+RSVP
+RSX
+rt
+rte
+Rte
+RTFM
+r/TGVJ
+Rubaiyat/M
+rubato/MS
+rubbed
+rubberize/GSD
+rubberneck/DRMGSZ
+rubber/SDMG
+rubbery/TR
+rubbing/M
+rubbish/DSMG
+rubbishy
+rubble/GMSD
+rubdown/MS
+rubella/MS
+Rube/M
+Ruben/MS
+rube/SM
+Rubetta/M
+Rubia/M
+Rubicon/SM
+rubicund
+rubidium/SM
+Rubie/M
+Rubik/M
+Rubi/M
+Rubina/M
+Rubin/M
+Rubinstein/M
+ruble/MS
+rubout
+rubric/MS
+rub/S
+Ruby/M
+ruby/MTGDSR
+Ruchbah/M
+ruck/M
+rucksack/SM
+ruckus/SM
+ruction/SM
+rudderless
+rudder/MS
+Ruddie/M
+ruddiness/MS
+Rudd/M
+Ruddy/M
+ruddy/PTGRSD
+rudeness/MS
+rude/PYTR
+Rudie/M
+Rudiger/M
+rudimentariness/M
+rudimentary/P
+rudiment/SM
+Rudolf/M
+Rudolfo/M
+Rudolph/M
+Rudyard/M
+Rudy/M
+ruefulness/S
+rueful/PY
+rue/GDS
+Rufe/M
+ruff/GSYDM
+ruffian/GSMDY
+ruffled/U
+ruffler/M
+ruffle/RSDG
+ruffly/TR
+Rufus/M
+Rugby's
+rugby/SM
+ruggedness/S
+rugged/PYRT
+Ruggiero/M
+rugging
+rug/MS
+Ruhr/M
+ruination/MS
+ruiner/M
+ruin/MGSDR
+ruinousness/M
+ruinous/YP
+Ruiz/M
+rulebook/S
+ruled/U
+rule/MZGJDRS
+ruler/GMD
+ruling/M
+Rumanian's
+Rumania's
+rumba/GDMS
+rumble/JRSDG
+rumbler/M
+rumbustious
+rumen/M
+Rumford/M
+Ru/MH
+ruminant/YMS
+ruminate/VNGXSD
+ruminative/Y
+rummage/GRSD
+rummager/M
+Rummel/M
+rummer
+rummest
+rummy/TRSM
+rumored/U
+rumorer/M
+rumormonger/SGMD
+rumor/ZMRDSG
+Rumpelstiltskin/M
+rump/GMYDS
+rumple/SDG
+rumply/TR
+rumpus/SM
+rum/XSMN
+runabout/SM
+runaround/S
+run/AS
+runaway/S
+rundown/SM
+rune/MS
+Runge/M
+rung/MS
+runic
+runlet/SM
+runnable
+runnel/SM
+runner/MS
+running/S
+Runnymede/M
+runny/RT
+runoff/MS
+runtime
+runtiness/M
+runt/MS
+runty/RPT
+runway/MS
+Runyon/M
+rupee/MS
+Ruperta/M
+Rupert/M
+Ruperto/M
+rupiah/M
+rupiahs
+Ruppert/M
+Ruprecht/M
+rupture/GMSD
+rurality/M
+rural/Y
+Rurik/M
+ruse/MS
+Rushdie/M
+rush/DSRGZ
+rusher/M
+rushes/I
+rushing/M
+Rush/M
+Rushmore/M
+rushy/RT
+Ruskin/M
+rusk/MS
+Russell/M
+Russel/M
+russet/MDS
+russetting
+Russia/M
+Russian/SM
+Russo/M
+Russ/S
+Rustbelt/M
+rustically
+rusticate/GSD
+rustication/M
+rusticity/S
+rustic/S
+Rustie/M
+rustiness/MS
+Rustin/M
+rustler/M
+rustle/RSDGZ
+rust/MSDG
+rustproof/DGS
+Rusty/M
+rusty/XNRTP
+rutabaga/SM
+Rutger/SM
+Ruthanne/M
+Ruthann/M
+Ruthe/M
+ruthenium/MS
+rutherfordium/SM
+Rutherford/M
+Ruthie/M
+Ruthi/M
+ruthlessness/MS
+ruthless/YP
+Ruth/M
+Ruthy/M
+Rutland/M
+Rutledge/M
+rut/MS
+rutted
+Rutter/M
+Ruttger/M
+rutting
+rutty/RT
+Ruy/M
+RV
+RVs
+Rwandan/S
+Rwanda/SM
+Rwy/M
+Rx/M
+Ryan/M
+Ryann/M
+Rycca/M
+Rydberg/M
+Ryder/M
+rye/MS
+Ryley/M
+Ry/M
+Ryon/M
+Ryukyu/M
+Ryun/M
+S
+SA
+Saab/M
+Saar/M
+Saba/M
+sabbath
+Sabbath/M
+Sabbaths
+sabbatical/S
+sabered/U
+saber/GSMD
+Sabik/M
+Sabina/M
+Sabine/M
+Sabin/M
+sable/GMDS
+sabotage/DSMG
+saboteur/SM
+sabot/MS
+Sabra/M
+sabra/MS
+Sabrina/M
+SAC
+Sacajawea/M
+saccharides
+saccharine
+saccharin/MS
+Sacco/M
+sacerdotal
+Sacha/M
+sachem/MS
+sachet/SM
+Sachs/M
+sackcloth/M
+sackcloths
+sacker/M
+sackful/MS
+sack/GJDRMS
+sacking/M
+sacral
+sacra/L
+sacramental/S
+sacrament/DMGS
+Sacramento/M
+sacredness/S
+sacred/PY
+sacrificer/M
+sacrifice/RSDZMG
+sacrificial/Y
+sacrilege/MS
+sacrilegious/Y
+sacristan/SM
+sacristy/MS
+sacroiliac/S
+sacrosanctness/MS
+sacrosanct/P
+sacrum/M
+sac/SM
+Sada/M
+Sadat/M
+Saddam/M
+sadden/DSG
+sadder
+saddest
+saddlebag/SM
+saddler/M
+saddle's
+saddle/UGDS
+Sadducee/M
+Sadella/M
+Sade/M
+sades
+Sadie/M
+sadism/MS
+sadistic
+sadistically
+sadist/MS
+sadness/SM
+sadomasochism/MS
+sadomasochistic
+sadomasochist/S
+sad/PY
+Sadr/M
+Sadye/M
+safari/GMDS
+safeguard/MDSG
+safekeeping/MS
+safeness/MS
+safeness's/U
+safes
+safety/SDMG
+safe/URPTY
+safflower/SM
+saffron/MS
+sagaciousness/M
+sagacious/YP
+sagacity/MS
+saga/MS
+Sagan/M
+sagebrush/SM
+sage/MYPS
+sagged
+sagger
+sagging
+saggy/RT
+Saginaw/M
+Sagittarius/MS
+sago/MS
+sag/TSR
+saguaro/SM
+Sahara/M
+Saharan/M
+Sahel
+sahib/MS
+Saidee/M
+saids
+said/U
+Saigon/M
+sailboard/DGS
+sailboat/SRMZG
+sailcloth/M
+sailcloths
+sailer/M
+sailfish/SM
+sail/GJMDRS
+sailing/M
+sailor/YMS
+sailplane/SDMG
+sainthood/MS
+saintlike
+saintliness/MS
+saintly/RTP
+saint/YDMGS
+Saiph/M
+saith
+saiths
+Sakai/M
+sake/MRS
+saker/M
+Sakhalin/M
+Sakharov/M
+Saki/M
+saki's
+salaam/GMDS
+salable/U
+salaciousness/MS
+salacious/YP
+salacity/MS
+Saladin/M
+Salado/M
+salad/SM
+Salaidh/M
+salamander/MS
+salami/MS
+salary/SDMG
+Salas/M
+Salazar/M
+saleability/M
+sale/ABMS
+Saleem/M
+Salem/M
+Salerno/M
+salesclerk/SM
+salesgirl/SM
+saleslady/S
+salesman/M
+salesmanship/SM
+salesmen
+salespeople/M
+salesperson/MS
+salesroom/M
+saleswoman
+saleswomen
+salience/MS
+saliency
+salient/SY
+Salim/M
+Salina/MS
+saline/S
+salinger
+Salinger/M
+salinity/MS
+Salisbury/M
+Salish/M
+saliva/MS
+salivary
+salivate/XNGSD
+salivation/M
+Salk/M
+Sallee/M
+Salle/M
+Sallie/M
+Salli/M
+sallowness/MS
+sallow/TGRDSP
+Sallust/M
+Sallyanne/M
+Sallyann/M
+sally/GSDM
+Sally/M
+salmonellae
+salmonella/M
+Salmon/M
+salmon/SM
+Sal/MY
+Saloma/M
+Salome/M
+Salomi/M
+Salomo/M
+Salomone/M
+Salomon/M
+Salonika/M
+salon/SM
+saloonkeeper
+saloon/MS
+salsa/MS
+salsify/M
+SALT
+saltcellar/SM
+salted/UC
+salter/M
+salt/GZTPMDRS
+saltine/MS
+saltiness/SM
+saltness/M
+Salton/M
+saltpeter/SM
+salts/C
+saltshaker/S
+saltwater
+salty/RSPT
+salubriousness/M
+salubrious/YP
+salubrity/M
+salutariness/M
+salutary/P
+salutation/SM
+salutatory/S
+saluter/M
+salute/RSDG
+Salvadoran/S
+Salvadorian/S
+Salvador/M
+salvageable
+salvage/MGRSD
+salvager/M
+salvation/MS
+Salvatore/M
+salve/GZMDSR
+salver/M
+Salvidor/M
+salvo/GMDS
+Salween/M
+Salyut/M
+Salz/M
+SAM
+Samantha/M
+Samara/M
+Samaria/M
+Samaritan/MS
+samarium/MS
+Samarkand/M
+samba/GSDM
+sameness/MS
+same/SP
+Sam/M
+Sammie/M
+Sammy/M
+Samoa
+Samoan/S
+Samoset/M
+samovar/SM
+Samoyed/M
+sampan/MS
+sampler/M
+sample/RSDJGMZ
+sampling/M
+Sampson/M
+Samsonite/M
+Samson/M
+Samuele/M
+Samuel/SM
+Samuelson/M
+samurai/M
+San'a
+Sana/M
+sanatorium/MS
+Sanborn/M
+Sanchez/M
+Sancho/M
+sanctification/M
+sanctifier/M
+sanctify/RSDGNX
+sanctimoniousness/MS
+sanctimonious/PY
+sanctimony/MS
+sanctioned/U
+sanction/SMDG
+sanctity/SM
+sanctuary/MS
+sanctum/SM
+sandal/MDGS
+sandalwood/SM
+sandbagged
+sandbagging
+sandbag/MS
+sandbank/SM
+sandbar/S
+sandblaster/M
+sandblast/GZSMRD
+sandbox/MS
+Sandburg/M
+sandcastle/S
+Sande/M
+Sanderling/M
+sander/M
+Sander/M
+Sanderson/M
+sandhill
+sandhog/SM
+Sandia/M
+Sandie/M
+Sandi/M
+sandiness/S
+Sandinista
+sandlot/SM
+sandlotter/S
+sandman/M
+sandmen
+Sand/MRZ
+Sandor/M
+Sandoval/M
+sandpaper/DMGS
+sandpile
+sandpiper/MS
+sandpit/M
+Sandra/M
+Sandro/M
+sand/SMDRGZ
+sandstone/MS
+sandstorm/SM
+Sandusky/M
+sandwich/SDMG
+Sandye/M
+Sandy/M
+sandy/PRT
+saned
+sane/IRYTP
+saneness/MS
+saneness's/I
+sanes
+Sanford/M
+Sanforized
+Sanger/M
+sangfroid/S
+sangria/SM
+Sang/RM
+sang/S
+sanguinary
+sanguined
+sanguine/F
+sanguinely
+sanguineness/M
+sanguineous/F
+sanguines
+sanguining
+Sanhedrin/M
+saning
+sanitarian/S
+sanitarium/SM
+sanitary/S
+sanitate/NX
+sanitation/M
+sanitizer/M
+sanitize/RSDZG
+sanity/SIM
+sank
+Sankara/M
+San/M
+sans
+sanserif
+Sanskritic
+Sanskritize/M
+Sanskrit/M
+Sansone/M
+Sanson/M
+Santa/M
+Santana/M
+Santayana/M
+Santeria
+Santiago/M
+Santo/MS
+sapience/MS
+sapient
+sapless
+sapling/SM
+sap/MS
+sapped
+sapper/SM
+Sapphira/M
+Sapphire/M
+sapphire/MS
+Sappho/M
+sappiness/SM
+sapping
+Sapporo/M
+sappy/RPT
+saprophyte/MS
+saprophytic
+sapsucker/SM
+sapwood/SM
+Saraann/M
+Saracen/MS
+Saragossa/M
+Sarah/M
+Sarajane/M
+Sarajevo/M
+Sara/M
+Saran/M
+saran/SM
+sarape's
+Sarasota/M
+Saratoga/M
+Saratov/M
+Sarawak/M
+sarcasm/MS
+sarcastic
+sarcastically
+sarcoma/MS
+sarcophagi
+sarcophagus/M
+sardine/SDMG
+Sardinia/M
+sardonic
+sardonically
+Saree/M
+Sarena/M
+Sarene/M
+Sarette/M
+Sargasso/M
+Sarge/M
+Sargent/M
+sarge/SM
+Sargon/M
+Sari/M
+sari/MS
+Sarina/M
+Sarine/M
+Sarita/M
+Sarnoff/M
+sarong/MS
+Saroyan/M
+sarsaparilla/MS
+Sarto/M
+sartorial/Y
+sartorius/M
+Sartre/M
+Sascha/M
+SASE
+Sasha/M
+sashay/GDS
+Sashenka/M
+sash/GMDS
+Saskatchewan/M
+Saskatoon/M
+Sask/M
+sassafras/MS
+sass/GDSM
+Sassoon/M
+sassy/TRS
+SAT
+satanic
+satanical/Y
+Satanism/M
+satanism/S
+Satanist/M
+satanist/S
+Satan/M
+satchel/SM
+sat/DG
+sateen/MS
+satellite/GMSD
+sate/S
+satiable/I
+satiate/GNXSD
+satiation/M
+satiety/MS
+satin/MDSG
+satinwood/MS
+satiny
+satire/SM
+satiric
+satirical/Y
+satirist/SM
+satirize/DSG
+satirizes/U
+satisfaction/ESM
+satisfactorily/U
+satisfactoriness/MU
+satisfactory/UP
+satisfiability/U
+satisfiable/U
+satisfied/UE
+satisfier/M
+satisfies/E
+satisfy/GZDRS
+satisfying/EU
+satisfyingly
+Sat/M
+satori/SM
+satrap/SM
+saturated/CUA
+saturater/M
+saturates/A
+saturate/XDRSNG
+saturation/M
+Saturday/MS
+saturnalia
+Saturnalia/M
+saturnine/Y
+Saturn/M
+Satyanarayanan/M
+satyriases
+satyriasis/M
+satyric
+satyr/MS
+sauce/DSRGZM
+saucepan/SM
+saucer/M
+saucily
+sauciness/S
+saucy/TRP
+Saudi/S
+Saud/M
+Saudra/M
+sauerkraut/SM
+Saukville/M
+Saul/M
+Sault/M
+sauna/DMSG
+Sauncho/M
+Saunder/SM
+Saunderson/M
+Saundra/M
+saunter/DRSG
+saurian/S
+sauropod/SM
+sausage/MS
+Saussure/M
+saut/DGS
+Sauternes/M
+Sauveur/M
+savage/GTZYPRSD
+Savage/M
+savageness/SM
+savagery/MS
+Savannah/M
+savanna/MS
+savant/SM
+saved/U
+saveloy/M
+saver/M
+save/ZGJDRSB
+Savina/M
+Savior/M
+savior/SM
+Saviour/M
+Savonarola/M
+savored/U
+savorer/M
+savorier
+savoriest
+savoriness/S
+savoringly/S
+savoring/Y
+savor/SMRDGZ
+savory/UMPS
+Savoyard/M
+Savoy/M
+savoy/SM
+savvy/GTRSD
+sawbones/M
+sawbuck/SM
+sawdust/MDSG
+sawer/M
+sawfly/SM
+sawhorse/MS
+Saw/M
+sawmill/SM
+saw/SMDRG
+sawtooth
+Sawyere/M
+Sawyer/M
+sawyer/MS
+Saxe/M
+saxifrage/SM
+Sax/M
+sax/MS
+Saxon/SM
+Saxony/M
+saxophone/MS
+saxophonist/SM
+Saxton/M
+Sayer/M
+sayer/SM
+sayest
+saying/MS
+Sayre/MS
+says/M
+say/USG
+Say/ZMR
+SBA
+Sb/M
+SC
+scabbard/SGDM
+scabbed
+scabbiness/SM
+scabbing
+scabby/RTP
+scabies/M
+scabrousness/M
+scabrous/YP
+scab/SM
+scad/SM
+scaffolding/M
+scaffold/JGDMS
+scalability
+Scala/M
+scalar/SM
+scalawag/SM
+scald/GJRDS
+scaled/AU
+scale/JGZMBDSR
+scaleless
+scalene
+scaler/M
+scales/A
+scaliness/MS
+scaling/A
+scallion/MS
+scalloper/M
+scallop/GSMDR
+scalloping/M
+scalpel/SM
+scalper/M
+scalp/GZRDMS
+scalping/M
+scaly/TPR
+scammed
+scamming
+scamper/GD
+scampi/M
+scamp/RDMGZS
+scam/SM
+Scan
+scan/AS
+scandal/GMDS
+scandalized/U
+scandalize/GDS
+scandalmonger/SM
+scandalousness/M
+scandalous/YP
+Scandinavia/M
+Scandinavian/S
+scandium/MS
+scanned/A
+scanner/SM
+scanning/A
+scansion/SM
+scant/CDRSG
+scantest
+scantily
+scantiness/MS
+scantly
+scantness/MS
+scanty/TPRS
+scapegoat/SGDM
+scapegrace/MS
+scape/M
+scapulae
+scapula/M
+scapular/S
+scarab/SM
+Scaramouch/M
+Scarborough/M
+scarceness/SM
+scarce/RTYP
+scarcity/MS
+scar/DRMSG
+scarecrow/MS
+scaremongering/M
+scaremonger/SGM
+scarer/M
+scare/S
+scarface
+Scarface/M
+scarf/SDGM
+scarification/M
+scarify/DRSNGX
+scarily
+scariness/S
+scarlatina/MS
+Scarlatti/M
+Scarlet/M
+scarlet/MDSG
+Scarlett/M
+scarp/SDMG
+scarred
+scarring
+scarves/M
+scary/PTR
+scathe/DG
+scathed/U
+scathing/Y
+scatological
+scatology/SM
+scat/S
+scatted
+scatterbrain/MDS
+scatter/DRJZSG
+scatterer/M
+scattergun
+scattering/YM
+scatting
+scavenge/GDRSZ
+scavenger/M
+SCCS
+scenario/SM
+scenarist/MS
+scene/GMDS
+scenery/SM
+scenically
+scenic/S
+scented/U
+scent/GDMS
+scentless
+scent's/C
+scents/C
+scepter/DMSG
+scepters/U
+sceptically
+sch
+Schaefer/M
+Schaeffer/M
+Schafer/M
+Schaffner/M
+Schantz/M
+Schapiro/M
+Scheat/M
+Schedar/M
+schedule/ADSRG
+scheduled/U
+scheduler/MS
+schedule's
+Scheherazade/M
+Scheherezade/M
+Schelling/M
+schema/M
+schemata
+schematically
+schematic/S
+scheme/JSRDGMZ
+schemer/M
+schemta
+Schenectady/M
+scherzo/MS
+Schick/M
+Schiller/M
+schilling/SM
+schismatic/S
+schism/SM
+schist/SM
+schizoid/S
+schizomycetes
+schizophrenia/SM
+schizophrenically
+schizophrenic/S
+schizo/S
+schlemiel/MS
+schlepped
+schlepping
+schlep/S
+Schlesinger/M
+Schliemann/M
+Schlitz/M
+schlock/SM
+schlocky/TR
+Schloss/M
+schmaltz/MS
+schmaltzy/TR
+Schmidt/M
+Schmitt/M
+schmoes
+schmo/M
+schmooze/GSD
+schmuck/MS
+Schnabel/M
+schnapps/M
+schnauzer/MS
+Schneider/M
+schnitzel/MS
+schnook/SM
+schnoz/S
+schnozzle/MS
+Schoenberg/M
+Schofield/M
+scholarship/MS
+scholar/SYM
+scholastically
+scholastic/S
+schoolbag/SM
+schoolbook/SM
+schoolboy/MS
+schoolchild/M
+schoolchildren
+schooldays
+schooled/U
+schoolfellow/S
+schoolfriend
+schoolgirlish
+schoolgirl/MS
+schoolhouse/MS
+schooling/M
+schoolmarmish
+schoolmarm/MS
+schoolmaster/SGDM
+schoolmate/MS
+schoolmistress/MS
+schoolroom/SM
+schoolteacher/MS
+schoolwork/SM
+schoolyard/SM
+school/ZGMRDJS
+schooner/SM
+Schopenhauer/M
+Schottky/M
+Schrieffer/M
+Schrdinger/M
+Schroeder/M
+Schroedinger/M
+Schubert/M
+Schultz/M
+Schulz/M
+Schumacher/M
+Schuman/M
+Schumann/M
+schussboomer/S
+schuss/SDMG
+Schuster/M
+Schuyler/M
+Schuylkill/M
+Schwab/M
+Schwartzkopf/M
+Schwartz/M
+Schwarzenegger/M
+schwa/SM
+Schweitzer/M
+Schweppes/M
+Schwinger/M
+Schwinn/M
+sci
+sciatica/SM
+sciatic/S
+science/FMS
+scientifically/U
+scientific/U
+scientist/SM
+Scientology/M
+scimitar/SM
+scintilla/MS
+scintillate/GNDSX
+scintillation/M
+scintillator/SM
+scion/SM
+Scipio/M
+scissor/SGD
+scleroses
+sclerosis/M
+sclerotic/S
+Sc/M
+scoffer/M
+scofflaw/MS
+scoff/RDGZS
+scolder/M
+scold/GSJRD
+scolioses
+scoliosis/M
+scollop's
+sconce/SDGM
+scone/SM
+scooper/M
+scoop/SRDMG
+scooter/M
+scoot/SRDGZ
+scope/DSGM
+Scopes/M
+scops
+scorbutic
+scorcher/M
+scorching/Y
+scorch/ZGRSD
+scoreboard/MS
+scorecard/MS
+scored/M
+scorekeeper/SM
+scoreless
+scoreline
+score/ZMDSRJG
+scorner/M
+scornfulness/M
+scornful/PY
+scorn/SGZMRD
+scorpion/SM
+Scorpio/SM
+Scorpius/M
+Scorsese/M
+Scotchgard/M
+Scotchman/M
+Scotchmen
+scotch/MSDG
+scotchs
+Scotch/S
+Scotchwoman
+Scotchwomen
+Scotia/M
+Scotian/M
+Scotland/M
+Scot/MS
+Scotsman/M
+Scotsmen
+Scotswoman
+Scotswomen
+Scottie/SM
+Scotti/M
+Scottish
+Scott/M
+Scottsdale/M
+Scotty's
+scoundrel/YMS
+scourer/M
+scourge/MGRSD
+scourger/M
+scouring/M
+scour/SRDGZ
+scouter/M
+scouting/M
+scoutmaster/SM
+Scout's
+scout/SRDMJG
+scow/DMGS
+scowler/M
+scowl/SRDG
+scrabble/DRSZG
+scrabbler/M
+Scrabble/SM
+scragged
+scragging
+scraggly/TR
+scraggy/TR
+scrag/SM
+scrambler/MS
+scrambler's/U
+scramble/UDSRG
+scrammed
+scramming
+scram/S
+Scranton/M
+scrapbook/SM
+scraper/M
+scrape/S
+scrapheap/SM
+scrapped
+scrapper/SM
+scrapping
+scrappy/RT
+scrap/SGZJRDM
+scrapyard/S
+scratched/U
+scratcher/M
+scratches/M
+scratchily
+scratchiness/S
+scratch/JDRSZG
+scratchy/TRP
+scrawler/M
+scrawl/GRDS
+scrawly/RT
+scrawniness/MS
+scrawny/TRP
+screamer/M
+screaming/Y
+scream/ZGSRD
+screecher/M
+screech/GMDRS
+screechy/TR
+screed/MS
+scree/DSM
+screened/U
+screening/M
+screenplay/MS
+screen/RDMJSG
+screenwriter/MS
+screwball/SM
+screwdriver/SM
+screwer/M
+screw/GUSD
+screwiness/S
+screw's
+screwup
+screwworm/MS
+screwy/RTP
+Scriabin/M
+scribal
+scribble/JZDRSG
+scribbler/M
+scribe/CDRSGIK
+scriber/MKIC
+scribe's
+Scribner/MS
+scrimmager/M
+scrimmage/RSDMG
+scrimp/DGS
+scrimshaw/GSDM
+scrim/SM
+Scripps/M
+scrip/SM
+scripted/U
+script/FGMDS
+scriptural/Y
+scripture/MS
+Scripture/MS
+scriptwriter/SM
+scriptwriting/M
+scrivener/M
+scriven/ZR
+scrod/M
+scrofula/MS
+scrofulous
+scrollbar/SM
+scroll/GMDSB
+Scrooge/MS
+scrooge/SDMG
+scrota
+scrotal
+scrotum/M
+scrounge/ZGDRS
+scroungy/TR
+scrubbed
+scrubber/MS
+scrubbing
+scrubby/TR
+scrub/S
+scruffily
+scruffiness/S
+scruff/SM
+scruffy/PRT
+Scruggs/M
+scrummage/MG
+scrum/MS
+scrumptious/Y
+scrunch/DSG
+scrunchy/S
+scruple/SDMG
+scrupulosity/SM
+scrupulousness's
+scrupulousness/US
+scrupulous/UPY
+scrutable/I
+scrutinized/U
+scrutinizer/M
+scrutinize/RSDGZ
+scrutinizingly/S
+scrutinizing/UY
+scrutiny/MS
+SCSI
+scuba/SDMG
+scudded
+scudding
+Scud/M
+scud/S
+scuff/GSD
+scuffle/SDG
+sculler/M
+scullery/MS
+Sculley/M
+scullion/MS
+scull/SRDMGZ
+sculptor/MS
+sculptress/MS
+sculpt/SDG
+sculptural/Y
+sculpture/SDGM
+scumbag/S
+scummed
+scumming
+scum/MS
+scummy/TR
+scupper/SDMG
+scurf/MS
+scurfy/TR
+scurrility/MS
+scurrilousness/MS
+scurrilous/PY
+scurry/GJSD
+scurvily
+scurviness/M
+scurvy/SRTP
+scutcheon/SM
+scuttlebutt/MS
+scuttle/MGSD
+scuzzy/RT
+Scylla/M
+scythe/SDGM
+Scythia/M
+SD
+SDI
+SE
+seabed/S
+seabird/S
+seaboard/MS
+Seaborg/M
+seaborne
+Seabrook/M
+seacoast/MS
+seafare/JRZG
+seafarer/M
+seafood/MS
+seafront/MS
+Seagate/M
+seagoing
+Seagram/M
+seagull/S
+seahorse/S
+sealant/MS
+sealed/AU
+sealer/M
+seal/MDRSGZ
+sealskin/SM
+seals/UA
+seamail
+seamanship/SM
+seaman/YM
+seamer/M
+seaminess/M
+seamlessness/M
+seamless/PY
+seam/MNDRGS
+seams/I
+seamstress/MS
+Seamus/M
+sea/MYS
+seamy/TRP
+Seana/M
+sance/SM
+Sean/M
+seaplane/SM
+seaport/SM
+seaquake/M
+Seaquarium/M
+searcher/AM
+searching/YS
+searchlight/SM
+search/RSDAGZ
+sear/DRSJGT
+searing/Y
+Sears/M
+seascape/SM
+seashell/MS
+seashore/SM
+seasickness/SM
+seasick/P
+seaside/SM
+seasonableness/M
+seasonable/UP
+seasonably/U
+seasonality
+seasonal/Y
+seasoned/U
+seasoner/M
+seasoning/M
+season/JRDYMBZSG
+seatbelt
+seated/A
+seater/M
+seating/SM
+SEATO
+seat's
+Seattle/M
+seat/UDSG
+seawall/S
+seaward/S
+seawater/S
+seaway/MS
+seaweed/SM
+seaworthinesses
+seaworthiness/MU
+seaworthy/TRP
+sebaceous
+Sebastian/M
+Sebastiano/M
+Sebastien/M
+seborrhea/SM
+SEC
+secant/SM
+secede/GRSD
+secessionist/MS
+secession/MS
+secludedness/M
+secluded/YP
+seclude/GSD
+seclusion/SM
+seclusive
+Seconal
+secondarily
+secondary/PS
+seconder/M
+secondhand
+second/RDYZGSL
+secrecy/MS
+secretarial
+secretariat/MS
+secretaryship/MS
+secretary/SM
+secrete/XNS
+secretion/M
+secretiveness/S
+secretive/PY
+secretory
+secret/TVGRDYS
+sec/S
+sectarianism/MS
+sectarian/S
+sectary/MS
+sectionalism/MS
+sectionalized
+sectional/SY
+section/ASEM
+sectioned
+sectioning
+sect/ISM
+sectoral
+sectored
+sector/EMS
+sectoring
+sects/E
+secularism/MS
+secularist/MS
+secularity/M
+secularization/MS
+secularized/U
+secularize/GSD
+secular/SY
+secured/U
+securely/I
+secure/PGTYRSDJ
+security/MSI
+secy
+sec'y
+sedan/SM
+sedateness/SM
+sedate/PXVNGTYRSD
+sedation/M
+sedative/S
+sedentary
+Seder/SM
+sedge/SM
+Sedgwick/M
+sedgy/RT
+sedimentary
+sedimentation/SM
+sediment/SGDM
+sedition/SM
+seditiousness/M
+seditious/PY
+seducer/M
+seduce/RSDGZ
+seduction/MS
+seductiveness/MS
+seductive/YP
+seductress/SM
+sedulous/Y
+Seebeck/M
+seed/ADSG
+seedbed/MS
+seedcase/SM
+seeded/U
+seeder/MS
+seediness/MS
+seeding/S
+seedless
+seedling/SM
+seedpod/S
+seed's
+seedy/TPR
+seeings
+seeing's
+seeing/U
+seeker/M
+seek/GZSR
+seeking/Y
+Seeley/M
+See/M
+seem/GJSYD
+seeming/Y
+seemliness's
+seemliness/US
+seemly/UTPR
+seen/U
+seepage/MS
+seep/GSD
+seer/SM
+seersucker/MS
+sees
+seesaw/DMSG
+seethe/SDGJ
+see/U
+segmental/Y
+segmentation/SM
+segmented/U
+segment/SGDM
+Segovia/M
+segregant
+segregated/U
+segregate/XCNGSD
+segregation/CM
+segregationist/SM
+segregative
+Segre/M
+segue/DS
+segueing
+Segundo/M
+Se/H
+Seidel/M
+seigneur/MS
+seignior/SM
+Seiko/M
+seine/GZMDSR
+Seine/M
+seiner/M
+Seinfeld/M
+seismic
+seismically
+seismographer/M
+seismographic
+seismographs
+seismography/SM
+seismograph/ZMR
+seismologic
+seismological
+seismologist/MS
+seismology/SM
+seismometer/S
+seize/BJGZDSR
+seizer/M
+seizing/M
+seizin/MS
+seizor/MS
+seizure/MS
+Seka/M
+Sela/M
+Selassie/M
+Selby/M
+seldom
+selected/UAC
+selectional
+selection/MS
+selectiveness/M
+selective/YP
+selectivity/MS
+selectman/M
+selectmen
+selectness/SM
+selector/SM
+select/PDSVGB
+Selectric/M
+selects/A
+Selena/M
+selenate/M
+Selene/M
+selenite/M
+selenium/MS
+selenographer/SM
+selenography/MS
+Selestina/M
+Seleucid/M
+Seleucus/M
+self/GPDMS
+selfishness/SU
+selfish/PUY
+selflessness/MS
+selfless/YP
+selfness/M
+Selfridge/M
+selfsameness/M
+selfsame/P
+Selia/M
+Selie/M
+Selig/M
+Selim/M
+Selina/M
+Selinda/M
+Seline/M
+Seljuk/M
+Selkirk/M
+Sella/M
+sell/AZGSR
+seller/AM
+Sellers/M
+Selle/ZM
+sellout/MS
+Selma/M
+seltzer/S
+selvage/MGSD
+selves/M
+Selznick/M
+semantical/Y
+semanticist/SM
+semantic/S
+semantics/M
+semaphore/GMSD
+Semarang/M
+semblance/ASME
+semen/SM
+semester/SM
+semiannual/Y
+semiarid
+semiautomated
+semiautomatic/S
+semicircle/SM
+semicircular
+semicolon/MS
+semiconductor/SM
+semiconscious
+semidefinite
+semidetached
+semidrying/M
+semifinalist/MS
+semifinal/MS
+semilogarithmic
+semimonthly/S
+seminal/Y
+seminarian/MS
+seminar/SM
+seminary/MS
+Seminole/SM
+semiofficial
+semioticians
+semiotic/S
+semiotics/M
+semipermanent/Y
+semipermeable
+semiprecious
+semiprivate
+semiprofessional/YS
+semipublic
+semiquantitative/Y
+Semiramis/M
+semiretired
+semisecret
+semiskilled
+semi/SM
+semisolid/S
+semistructured
+semisweet
+Semite/SM
+Semitic/MS
+semitic/S
+semitone/SM
+semitrailer/SM
+semitrance
+semitransparent
+semitropical
+semivowel/MS
+semiweekly/S
+semiyearly
+semolina/SM
+sempiternal
+sempstress/SM
+Semtex
+sen
+Sen
+Sena/M
+senate/MS
+Senate/MS
+senatorial
+senator/MS
+Sendai/M
+sender/M
+sends/A
+send/SRGZ
+Seneca/MS
+Senegalese
+Senegal/M
+senescence/SM
+senescent
+senile/SY
+senility/MS
+seniority/SM
+senior/MS
+Senior/S
+Sennacherib/M
+senna/MS
+Sennett/M
+Seora/M
+senora/S
+senorita/S
+senor/MS
+sensately/I
+sensate/YNX
+sensationalism/MS
+sensationalist/S
+sensationalize/GSD
+sensational/Y
+sensation/M
+sens/DSG
+senselessness/SM
+senseless/PY
+sense/M
+sensibility/ISM
+sensibleness/MS
+sensible/PRST
+sensibly/I
+sensitiveness/MS
+sensitiveness's/I
+sensitives
+sensitive/YIP
+sensitivity/ISM
+sensitization/CSM
+sensitized/U
+sensitizers
+sensitize/SDCG
+sensor/MS
+sensory
+sensualist/MS
+sensuality/MS
+sensual/YF
+sensuousness/S
+sensuous/PY
+Sensurround/M
+sentence/SDMG
+sentential/Y
+sententious/Y
+sentience/ISM
+sentient/YS
+sentimentalism/SM
+sentimentalist/SM
+sentimentality/SM
+sentimentalization/SM
+sentimentalize/RSDZG
+sentimentalizes/U
+sentimental/Y
+sentiment/MS
+sentinel/GDMS
+sentry/SM
+sent/UFEA
+Seoul/M
+sepal/SM
+separability/MSI
+separableness/MI
+separable/PI
+separably/I
+separateness/MS
+separates/M
+separate/YNGVDSXP
+separation/M
+separatism/SM
+separatist/SM
+separator/SM
+Sephardi/M
+Sephira/M
+sepia/MS
+Sepoy/M
+sepses
+sepsis/M
+septa/M
+septate/N
+September/MS
+septennial/Y
+septet/MS
+septicemia/SM
+septicemic
+septic/S
+septillion/M
+sept/M
+Sept/M
+septuagenarian/MS
+Septuagint/MS
+septum/M
+sepulcher/MGSD
+sepulchers/UA
+sepulchral/Y
+seq
+sequel/MS
+sequenced/A
+sequence/DRSJZMG
+sequencer/M
+sequence's/F
+sequences/F
+sequent/F
+sequentiality/FM
+sequentialize/DSG
+sequential/YF
+sequester/SDG
+sequestrate/XGNDS
+sequestration/M
+sequin/SDMG
+sequitur
+Sequoia/M
+sequoia/MS
+Sequoya/M
+Serafin/M
+seraglio/SM
+serape/S
+seraphic
+seraphically
+seraphim's
+seraph/M
+seraphs
+sera's
+Serbia/M
+Serbian/S
+Serb/MS
+Serbo/M
+serenade/MGDRS
+serenader/M
+Serena/M
+serendipitous/Y
+serendipity/MS
+serene/GTYRSDP
+Serene/M
+sereneness/SM
+Serengeti/M
+serenity/MS
+sere/TGDRS
+serfdom/MS
+serf/MS
+Sergeant/M
+sergeant/SM
+serge/DSGM
+Sergei/M
+Serge/M
+Sergent/M
+Sergio/M
+serialization/MS
+serialize/GSD
+serial/MYS
+series/M
+serif/SMD
+serigraph/M
+serigraphs
+seriousness/SM
+serious/PY
+sermonize/GSD
+sermon/SGDM
+serological/Y
+serology/MS
+serons
+serous
+Serpens/M
+serpent/GSDM
+serpentine/GYS
+Serra/M
+Serrano/M
+serrate/GNXSD
+serration/M
+serried
+serum/MS
+servant/SDMG
+serve/AGCFDSR
+served/U
+server/MCF
+servers
+serviceability/SM
+serviceableness/M
+serviceable/P
+serviced/U
+serviceman/M
+servicemen
+service/MGSRD
+service's/E
+services/E
+servicewoman
+servicewomen
+serviette/MS
+servilely
+servileness/M
+serviles
+servile/U
+servility/SM
+serving/SM
+servitor/SM
+servitude/MS
+servomechanism/MS
+servomotor/MS
+servo/S
+sesame/MS
+sesquicentennial/S
+sessile
+session/SM
+setback/S
+Seth/M
+Set/M
+Seton/M
+set's
+setscrew/SM
+set/SIA
+settable/A
+sett/BJGZSMR
+settee/MS
+setter/M
+setting/AS
+setting's
+settle/AUDSG
+settlement/ASM
+settler/MS
+settling/S
+setup/MS
+Seumas/M
+Seurat/M
+Seuss/M
+Sevastopol/M
+sevenfold
+sevenpence
+seven/SMH
+seventeen/HMS
+seventeenths
+sevenths
+seventieths
+seventy/MSH
+severalfold
+severalty/M
+several/YS
+severance/SM
+severed/E
+severeness/SM
+severe/PY
+severing/E
+severity/MS
+Severn/M
+severs/E
+sever/SGTRD
+Severus/M
+Seville/M
+sewage/MS
+Seward/M
+sewerage/SM
+sewer/GSMD
+sewing/SM
+sewn
+sew/SAGD
+sexagenarian/MS
+sex/GMDS
+sexily
+sexiness/MS
+sexism/SM
+sexist/SM
+sexless
+sexologist/SM
+sexology/MS
+sexpot/SM
+Sextans/M
+sextant/SM
+sextet/SM
+sextillion/M
+Sexton/M
+sexton/MS
+sextuple/MDG
+sextuplet/MS
+sexuality/MS
+sexualized
+sexual/Y
+sexy/RTP
+Seychelles
+Seyfert
+Seymour/M
+sf
+SF
+Sgt
+shabbily
+shabbiness/SM
+shabby/RTP
+shack/GMDS
+shackler/M
+shackle's
+Shackleton/M
+shackle/UGDS
+shad/DRJGSM
+shaded/U
+shadeless
+shade/SM
+shadily
+shadiness/MS
+shading/M
+shadowbox/SDG
+shadower/M
+shadow/GSDRM
+shadowiness/M
+Shadow/M
+shadowy/TRP
+shady/TRP
+Shae/M
+Shafer/M
+Shaffer/M
+shafting/M
+shaft/SDMG
+shagged
+shagginess/SM
+shagging
+shaggy/TPR
+shag/MS
+shah/M
+shahs
+Shaina/M
+Shaine/M
+shakable/U
+shakably/U
+shakeable
+shakedown/S
+shaken/U
+shakeout/SM
+shaker/M
+Shaker/S
+Shakespearean/S
+Shakespeare/M
+Shakespearian
+shake/SRGZB
+shakeup/S
+shakily
+shakiness/S
+shaking/M
+shaky/TPR
+shale/SM
+shall
+shallot/SM
+shallowness/SM
+shallow/STPGDRY
+Shalna/M
+Shalne/M
+shalom
+Shalom/M
+shalt
+shamanic
+shaman/SM
+shamble/DSG
+shambles/M
+shamefaced/Y
+shamefulness/S
+shameful/YP
+shamelessness/SM
+shameless/PY
+shame/SM
+sham/MDSG
+shammed
+shammer
+shamming
+shammy's
+shampoo/DRSMZG
+shampooer/M
+shamrock/SM
+Shamus/M
+Shana/M
+Shanan/M
+Shanda/M
+Shandee/M
+Shandeigh/M
+Shandie/M
+Shandra/M
+shandy/M
+Shandy/M
+Shane/M
+Shanghai/GM
+Shanghaiing/M
+shanghai/SDG
+Shanie/M
+Shani/M
+shank/SMDG
+Shannah/M
+Shanna/M
+Shannan/M
+Shannen/M
+Shannon/M
+Shanon/M
+shan't
+Shanta/M
+Shantee/M
+shantis
+Shantung/M
+shantung/MS
+shanty/SM
+shantytown/SM
+shape/AGDSR
+shaped/U
+shapelessness/SM
+shapeless/PY
+shapeliness/S
+shapely/RPT
+shaper/S
+shape's
+Shapiro/M
+sharable/U
+Sharai/M
+Shara/M
+shard/SM
+shareable
+sharecropped
+sharecropper/MS
+sharecropping
+sharecrop/S
+share/DSRGZMB
+shared/U
+shareholder/MS
+shareholding/S
+sharer/M
+shareware/S
+Shari'a
+Sharia/M
+sharia/SM
+Shari/M
+Sharity/M
+shark/SGMD
+sharkskin/SM
+Sharla/M
+Sharleen/M
+Sharlene/M
+Sharline/M
+Sharl/M
+Sharona/M
+Sharon/M
+Sharpe/M
+sharpen/ASGD
+sharpened/U
+sharpener/S
+sharper/M
+sharpie/SM
+Sharp/M
+sharpness/MS
+sharp/SGTZXPYRDN
+sharpshooter/M
+sharpshooting/M
+sharpshoot/JRGZ
+sharpy's
+Sharron/M
+Sharyl/M
+Shasta/M
+shat
+shatter/DSG
+shattering/Y
+shatterproof
+Shaughn/M
+Shaula/M
+Shauna/M
+Shaun/M
+shave/DSRJGZ
+shaved/U
+shaver/M
+Shavian
+shaving/M
+Shavuot/M
+Shawano/M
+shawl/SDMG
+shaw/M
+Shaw/M
+Shawna/M
+Shawnee/SM
+Shawn/M
+Shaylah/M
+Shayla/M
+Shaylyn/M
+Shaylynn/M
+Shay/M
+shay/MS
+Shayna/M
+Shayne/M
+Shcharansky/M
+sh/DRS
+sheaf/MDGS
+Shea/M
+shearer/M
+shear/RDGZS
+sheather/M
+sheathe/UGSD
+sheath/GJMDRS
+sheathing/M
+sheaths
+sheave/SDG
+sheaves/M
+Sheba/M
+shebang/MS
+Shebeli/M
+Sheboygan/M
+she'd
+shedding
+Shedir/M
+sheds
+shed's
+shed/U
+Sheelagh/M
+Sheelah/M
+Sheela/M
+Sheena/M
+sheen/MDGS
+sheeny/TRSM
+sheepdog/SM
+sheepfold/MS
+sheepherder/MS
+sheepishness/SM
+sheepish/YP
+sheep/M
+sheepskin/SM
+Sheeree/M
+sheerness/S
+sheer/PGTYRDS
+sheeting/M
+sheetlike
+sheet/RDMJSG
+Sheetrock
+Sheffielder/M
+Sheffield/RMZ
+Sheffie/M
+Sheff/M
+Sheffy/M
+sheikdom/SM
+sheikh's
+sheik/SM
+Sheilah/M
+Sheila/M
+shekel/MS
+Shelagh/M
+Shela/M
+Shelba/M
+Shelbi/M
+Shelby/M
+Shelden/M
+Sheldon/M
+shelf/MDGS
+Shelia/M
+she'll
+shellacked
+shellacking/MS
+shellac/S
+shelled/U
+Shelley/M
+shellfire/SM
+shellfish/SM
+Shellie/M
+Shelli/M
+Shell/M
+shell/RDMGS
+Shelly/M
+Shel/MY
+shelter/DRMGS
+sheltered/U
+shelterer/M
+Shelton/M
+shelve/JRSDG
+shelver/M
+shelves/M
+shelving/M
+she/M
+Shem/M
+Shena/M
+Shenandoah/M
+shenanigan/SM
+Shenyang/M
+Sheol/M
+Shepard/M
+shepherd/DMSG
+shepherdess/S
+Shepherd/M
+Shep/M
+Sheppard/M
+Shepperd/M
+Sheratan/M
+Sheraton/M
+sherbet/MS
+sherd's
+Sheree/M
+Sheridan/M
+Sherie/M
+sheriff/SM
+Sherill/M
+Sherilyn/M
+Sheri/M
+Sherline/M
+Sherlocke/M
+sherlock/M
+Sherlock/M
+Sher/M
+Sherman/M
+Shermie/M
+Sherm/M
+Shermy/M
+Sherpa/SM
+Sherrie/M
+Sherri/M
+Sherry/M
+sherry/MS
+Sherwin/M
+Sherwood/M
+Sherwynd/M
+Sherye/M
+Sheryl/M
+Shetland/S
+Shevardnadze/M
+shew/GSD
+shewn
+shh
+shiatsu/S
+shibboleth/M
+shibboleths
+shielded/U
+shielder/M
+shield/MDRSG
+Shields/M
+shiftily
+shiftiness/SM
+shiftlessness/S
+shiftless/PY
+shift/RDGZS
+shifty/TRP
+Shi'ite
+Shiite/SM
+Shijiazhuang
+Shikoku/M
+shill/DJSG
+shillelagh/M
+shillelaghs
+shilling/M
+Shillong/M
+Shiloh/M
+shimmed
+shimmer/DGS
+shimmery
+shimming
+shimmy/DSMG
+shim/SM
+Shina/M
+shinbone/SM
+shindig/MS
+shiner/M
+shine/S
+shingle/MDRSG
+shingler/M
+shinguard
+shininess/MS
+shining/Y
+shinned
+shinning
+shinny/GDSM
+shin/SGZDRM
+shinsplints
+Shintoism/S
+Shintoist/MS
+Shinto/MS
+shiny/PRT
+shipboard/MS
+shipborne
+shipbuilder/M
+shipbuild/RGZJ
+shipload/SM
+shipman/M
+shipmate/SM
+shipmen
+shipment/AMS
+shipowner/MS
+shippable
+shipped/A
+shipper/SM
+shipping/MS
+ship's
+shipshape
+ship/SLA
+shipwreck/GSMD
+shipwright/MS
+shipyard/MS
+Shiraz/M
+shire/MS
+shirker/M
+shirk/RDGZS
+Shirlee/M
+Shirleen/M
+Shirlene/M
+Shirley/M
+Shirline/M
+Shirl/M
+Shir/M
+shirr/GJDS
+shirtfront/S
+shirting/M
+shirt/JDMSG
+shirtless
+shirtmake/R
+shirtmaker/M
+shirtsleeve/MS
+shirttail/S
+shirtwaist/SM
+shit/S!
+shitting/!
+shitty/RT!
+Shiva/M
+shiverer/M
+shiver/GDR
+shivery
+shiv/SZRM
+shivved
+shivving
+shlemiel's
+Shmuel/M
+shoal/SRDMGT
+shoat/SM
+shocker/M
+shocking/Y
+Shockley/M
+shockproof
+shock/SGZRD
+shoddily
+shoddiness/SM
+shoddy/RSTP
+shod/U
+shoehorn/GSMD
+shoeing
+shoelace/MS
+shoemaker/M
+shoemake/RZ
+shoe/MS
+shoer's
+shoeshine/MS
+shoestring/MS
+shoetree/MS
+shogunate/SM
+shogun/MS
+Shoji/M
+Sholom/M
+shone
+shoo/DSG
+shoofly
+shook/SM
+shooter/M
+shootout/MS
+shoot/SJRGZ
+shopkeeper/M
+shopkeep/RGZ
+shoplifter/M
+shoplifting/M
+shoplift/SRDGZ
+shop/MS
+shopped/M
+shopper/M
+shoppe/RSDGZJ
+shopping/M
+shoptalk/SM
+shopworn
+shorebird/S
+shore/DSRGMJ
+shoreline/SM
+Shorewood/M
+shoring/M
+shortage/MS
+shortbread/MS
+shortcake/SM
+shortchange/DSG
+shortcoming/MS
+shortcrust
+shortcut/MS
+shortcutting
+shortener/M
+shortening/M
+shorten/RDGJ
+shortfall/SM
+shorthand/DMS
+Shorthorn/M
+shorthorn/MS
+shortie's
+shortish
+shortlist/GD
+Short/M
+shortness/MS
+short/SGTXYRDNP
+shortsightedness/S
+shortsighted/YP
+shortstop/MS
+shortwave/SM
+shorty/SM
+Shoshana/M
+Shoshanna/M
+Shoshone/SM
+Shostakovitch/M
+shotgunned
+shotgunner
+shotgunning
+shotgun/SM
+shot/MS
+shotted
+shotting
+shoulder/GMD
+shouldn't
+should/TZR
+shout/SGZRDM
+shove/DSRG
+shoveler/M
+shovelful/MS
+shovel/MDRSZG
+shover/M
+showbiz
+showbizzes
+showboat/SGDM
+showcase/MGSD
+showdown/MS
+shower/GDM
+showery/TR
+show/GDRZJS
+showgirl/SM
+showily
+showiness/MS
+showing/M
+showman/M
+showmanship/SM
+showmen
+shown
+showoff/S
+showpiece/SM
+showplace/SM
+showroom/MS
+showy/RTP
+shpt
+shrank
+shrapnel/SM
+shredded
+shredder/MS
+shredding
+shred/MS
+Shreveport/M
+shrewdness/SM
+shrewd/RYTP
+shrew/GSMD
+shrewishness/M
+shrewish/PY
+shrieker/M
+shriek/SGDRMZ
+shrift/SM
+shrike/SM
+shrill/DRTGPS
+shrillness/MS
+shrilly
+shrimp/MDGS
+shrine/SDGM
+shrinkage/SM
+shrinker/M
+shrinking/U
+shrink/SRBG
+shrivel/GSD
+shriven
+shrive/RSDG
+Shropshire/M
+shroud/GSMD
+shrubbed
+shrubbery/SM
+shrubbing
+shrubby/TR
+shrub/SM
+shrugged
+shrugging
+shrug/S
+shrunk/N
+shtick/S
+shucker/M
+shuck/SGMRD
+shucks/S
+shudder/DSG
+shuddery
+shuffleboard/MS
+shuffled/A
+shuffle/GDSRZ
+shuffles/A
+shuffling/A
+Shulman/M
+Shu/M
+shunned
+shunning
+shun/S
+shunter/M
+shunt/GSRD
+Shurlocke/M
+Shurlock/M
+Shurwood/M
+shush/SDG
+shutdown/MS
+shuteye/SM
+shutoff/M
+shutout/SM
+shut/S
+shutterbug/S
+shutter/DMGS
+shuttering/M
+shutting
+shuttlecock/MDSG
+shuttle/MGDS
+shy/DRSGTZY
+shyer
+shyest
+Shylockian/M
+Shylock/M
+shyness/SM
+shyster/SM
+Siamese/M
+Siam/M
+Siana/M
+Sianna/M
+Sian's
+Sibbie/M
+Sibby/M
+Sibeal/M
+Sibelius/M
+Sibella/M
+Sibelle/M
+Sibel/M
+Siberia/M
+Siberian/S
+sibilance/M
+sibilancy/M
+sibilant/SY
+Sibilla/M
+Sibley/M
+sibling/SM
+Sib/M
+Sibylla/M
+Sibylle/M
+sibylline
+Sibyl/M
+sibyl/SM
+Siciliana/M
+Sicilian/S
+Sicily/M
+sickbay/M
+sickbed/S
+sickener/M
+sickening/Y
+sicken/JRDG
+sicker/Y
+sick/GXTYNDRSP
+sickie/SM
+sickish/PY
+sickle/SDGM
+sickliness/M
+sickly/TRSDPG
+sickness/MS
+sicko/S
+sickout/S
+sickroom/SM
+sic/S
+sidearm/S
+sideband/MS
+sidebar/MS
+sideboard/SM
+sideburns
+sidecar/MS
+sided/A
+sidedness
+side/ISRM
+sidekick/MS
+sidelight/SM
+sideline/MGDRS
+sidelong
+sideman/M
+sidemen
+sidepiece/S
+sidereal
+sider/FA
+sides/A
+sidesaddle/MS
+sideshow/MS
+sidesplitting
+sidestepped
+sidestepping
+sidestep/S
+sidestroke/GMSD
+sideswipe/GSDM
+sidetrack/SDG
+sidewalk/MS
+sidewall/MS
+sidewards
+sideway/SM
+sidewinder/SM
+siding/SM
+sidle/DSG
+Sid/M
+Sidnee/M
+Sidney/M
+Sidoney/M
+Sidonia/M
+Sidonnie/M
+SIDS
+siege/GMDS
+Siegel/M
+Siegfried/M
+Sieglinda/M
+Siegmund/M
+Siemens/M
+Siena/M
+sienna/SM
+Sierpinski/M
+sierra/SM
+siesta/MS
+sieve/GZMDS
+Siffre/M
+sifted/UA
+sifter/M
+sift/GZJSDR
+Sigfrid/M
+Sigfried/M
+SIGGRAPH/M
+sigh/DRG
+sigher/M
+sighs
+sighted/P
+sighter/M
+sighting/S
+sight/ISM
+sightless/Y
+sightliness/UM
+sightly/TURP
+sightread
+sightseeing/S
+sightsee/RZ
+Sigismond/M
+Sigismondo/M
+Sigismund/M
+Sigismundo/M
+Sig/M
+sigma/SM
+sigmoid
+Sigmund/M
+signal/A
+signaled
+signaler/S
+signaling
+signalization/S
+signalize/GSD
+signally
+signalman/M
+signalmen
+signals
+signal's
+signatory/SM
+signature/MS
+signboard/MS
+signed/FU
+signer/SC
+signet/SGMD
+sign/GARDCS
+significance/IMS
+significantly/I
+significant/YS
+signification/M
+signify/DRSGNX
+signing/S
+Signora/M
+signora/SM
+signore/M
+signori
+signories
+signorina/SM
+signorine
+Signor/M
+signor/SFM
+signpost/DMSG
+sign's
+signs/F
+Sigrid/M
+Sigurd/M
+Sigvard/M
+Sihanouk/M
+Sikhism/MS
+Sikh/MS
+Sikhs
+Sikkimese
+Sikkim/M
+Sikorsky/M
+silage/GMSD
+Silas/M
+Sileas/M
+siled
+Sile/M
+silence/MZGRSD
+silencer/M
+silentness/M
+silent/TSPRY
+Silesia/M
+silhouette/GMSD
+silica/SM
+silicate/SM
+siliceous
+silicide/M
+silicone/SM
+silicon/MS
+silicoses
+silicosis/M
+silken/DG
+silk/GXNDMS
+silkily
+silkiness/SM
+silkscreen/SM
+silkworm/MS
+silky/RSPT
+silliness/SM
+sill/MS
+silly/PRST
+silo/GSM
+siltation/M
+silt/MDGS
+siltstone/M
+silty/RT
+Silurian/S
+Silvain/M
+Silva/M
+Silvana/M
+Silvan/M
+Silvano/M
+Silvanus/M
+silverer/M
+silverfish/MS
+Silverman/M
+silver/RDYMGS
+silversmith/M
+silversmiths
+Silverstein/M
+silverware/SM
+silvery/RTP
+Silvester/M
+Silvia/M
+Silvie/M
+Silvio/M
+Si/M
+SIMD
+Simenon/M
+Simeon/M
+simian/S
+similar/EY
+similarity/EMS
+simile/SM
+similitude/SME
+Simla/M
+simmer/GSD
+Simmonds/M
+Simmons/M
+Simmonsville/M
+Sim/MS
+Simms/M
+Simona/M
+Simone/M
+Simonette/M
+simonize/SDG
+Simon/M
+Simonne/M
+simony/MS
+simpatico
+simper/GDS
+simpleminded/YP
+simpleness/S
+simple/RSDGTP
+simpleton/SM
+simplex/S
+simplicity/MS
+simplified/U
+simplify/ZXRSDNG
+simplistic
+simplistically
+simply
+Simpson/M
+simulacrum/M
+Simula/M
+SIMULA/M
+simulate/XENGSD
+simulation/ME
+simulative
+simulator/SEM
+simulcast/GSD
+simultaneity/SM
+simultaneousness/M
+simultaneous/YP
+Sinai/M
+Sinatra/M
+since
+sincere/IY
+sincereness/M
+sincerer
+sincerest
+sincerity/MIS
+Sinclair/M
+Sinclare/M
+Sindbad/M
+Sindee/M
+Sindhi/M
+sinecure/MS
+sinecurist/M
+sine/SM
+sinew/SGMD
+sinewy
+sinfulness/SM
+sinful/YP
+Singaporean/S
+Singapore/M
+sing/BGJZYDR
+Singborg/M
+singeing
+singer/M
+Singer/M
+singe/S
+singing/Y
+singlehanded/Y
+singleness/SM
+single/PSDG
+Singleton/M
+singleton/SM
+singletree/SM
+singlet/SM
+singsong/GSMD
+singularity/SM
+singularization/M
+singular/SY
+Sinhalese/M
+sinisterness/M
+sinister/YP
+sinistral/Y
+sinkable/U
+sinker/M
+sink/GZSDRB
+sinkhole/SM
+Sinkiang/M
+sinking/M
+sinlessness/M
+sinless/YP
+sin/MAGS
+sinned
+sinner/MS
+sinning
+sinter/DM
+sinuosity/MS
+sinuousities
+sinuousness/M
+sinuous/PY
+sinusitis/SM
+sinus/MS
+sinusoidal/Y
+sinusoid/MS
+Siobhan/M
+Siouxie/M
+Sioux/M
+siphon/DMSG
+siphons/U
+sipped
+sipper/SM
+sipping
+sip/S
+sired/C
+sire/MS
+siren/M
+sires/C
+siring/C
+Sirius/M
+sirloin/MS
+Sir/MS
+sirocco/MS
+sirred
+sirring
+sirup's
+sir/XGMNDS
+sisal/MS
+Sisely/M
+Sisile/M
+sis/S
+Sissie/M
+sissified
+Sissy/M
+sissy/TRSM
+sister/GDYMS
+sisterhood/MS
+sisterliness/MS
+sisterly/P
+sister's/A
+Sistine
+Sisyphean
+Sisyphus/M
+sit/AG
+sitarist/SM
+sitar/SM
+sitcom/SM
+site/DSJM
+sits
+sitter/MS
+sitting/SM
+situate/GNSDX
+situational/Y
+situationist
+situation/M
+situ/S
+situs/M
+Siusan/M
+Siva/M
+Siward/M
+sixfold
+sixgun
+six/MRSH
+sixpence/MS
+sixpenny
+sixshooter
+sixteen/HRSM
+sixteenths
+sixths
+sixth/Y
+sixtieths
+sixty/SMH
+sizableness/M
+sizable/P
+sized/UA
+size/GJDRSBMZ
+sizer/M
+sizes/A
+sizing/M
+sizzler/M
+sizzle/RSDG
+SJ
+Sjaelland/M
+SK
+ska/S
+skateboard/SJGZMDR
+skater/M
+skate/SM
+skat/JMDRGZ
+skedaddle/GSD
+skeet/RMS
+skein/MDGS
+skeletal/Y
+skeleton/MS
+Skell/M
+Skelly/M
+skeptical/Y
+skepticism/MS
+skeptic/SM
+sketchbook/SM
+sketcher/M
+sketchily
+sketchiness/MS
+sketch/MRSDZG
+sketchpad
+sketchy/PRT
+skew/DRSPGZ
+skewer/GDM
+skewing/M
+skewness/M
+skidded
+skidding
+skid/S
+skiff/GMDS
+skiing/M
+skilfully
+skill/DMSG
+skilled/U
+skillet/MS
+skillfulnesses
+skillfulness/MU
+skillful/YUP
+skilling/M
+skimmed
+skimmer/MS
+skimming/SM
+ski/MNJSG
+skimp/GDS
+skimpily
+skimpiness/MS
+skimpy/PRT
+skim/SM
+skincare
+skindive/G
+skinflint/MS
+skinhead/SM
+skinless
+skinned
+Skinner/M
+skinner/SM
+skinniness/MS
+skinning
+skinny/TRSP
+skin/SM
+skintight
+Skip/M
+skipped
+Skipper/M
+skipper/SGDM
+Skippie/M
+skipping
+Skipp/RM
+Skippy/M
+skip/S
+Skipton/M
+skirmisher/M
+skirmish/RSDMZG
+skirter/M
+skirting/M
+skirt/RDMGS
+skit/GSMD
+skitter/SDG
+skittishness/SM
+skittish/YP
+skittle/SM
+skivvy/GSDM
+skoal/SDG
+Skopje/M
+skulduggery/MS
+skulker/M
+skulk/SRDGZ
+skullcap/MS
+skullduggery's
+skull/SDM
+skunk/GMDS
+skycap/MS
+skydiver/SM
+skydiving/MS
+Skye/M
+skyhook
+skyjacker/M
+skyjack/ZSGRDJ
+Skylab/M
+skylarker/M
+skylark/SRDMG
+Skylar/M
+Skyler/M
+skylight/MS
+skyline/MS
+Sky/M
+sky/MDRSGZ
+skyrocket/GDMS
+skyscraper/M
+skyscrape/RZ
+skyward/S
+skywave
+skyway/M
+skywriter/MS
+skywriting/MS
+slabbed
+slabbing
+slab/MS
+slacken/DG
+slacker/M
+slackness/MS
+slack/SPGTZXYRDN
+Slade/M
+slagged
+slagging
+slag/MS
+slain
+slake/DSG
+slaked/U
+slalom/SGMD
+slammed
+slammer/S
+slamming
+slam/S
+slander/MDRZSG
+slanderousness/M
+slanderous/PY
+slang/SMGD
+slangy/TR
+slanting/Y
+slant/SDG
+slantwise
+slapdash/S
+slaphappy/TR
+slap/MS
+slapped
+slapper
+slapping
+slapstick/MS
+slash/GZRSD
+slashing/Y
+slater/M
+Slater/M
+slate/SM
+slather/SMDG
+slating/M
+slat/MDRSGZ
+slatted
+slattern/MYS
+slatting
+slaughterer/M
+slaughterhouse/SM
+slaughter/SJMRDGZ
+slave/DSRGZM
+slaveholder/SM
+slaver/GDM
+slavery/SM
+Slavic/M
+slavishness/SM
+slavish/YP
+Slav/MS
+Slavonic/M
+slaw/MS
+slay/RGZS
+sleaze/S
+sleazily
+sleaziness/SM
+sleazy/RTP
+sledded
+sledder/S
+sledding
+sledgehammer/MDGS
+sledge/SDGM
+sled/SM
+sleekness/S
+sleek/PYRDGTS
+sleeper/M
+sleepily
+sleepiness/SM
+sleeping/M
+sleeplessness/SM
+sleepless/YP
+sleepover/S
+sleep/RMGZS
+sleepwalker/M
+sleepwalk/JGRDZS
+sleepwear/M
+sleepyhead/MS
+sleepy/PTR
+sleet/DMSG
+sleety/TR
+sleeveless
+sleeve/SDGM
+sleeving/M
+sleigh/GMD
+sleighs
+sleight/SM
+sleken/DG
+slenderize/DSG
+slenderness/MS
+slender/RYTP
+slept
+Slesinger/M
+sleuth/GMD
+sleuths
+slew/DGS
+slice/DSRGZM
+sliced/U
+slicer/M
+slicker/M
+slickness/MS
+slick/PSYRDGTZ
+slider/M
+slide/S
+slid/GZDR
+slight/DRYPSTG
+slighter/M
+slighting/Y
+slightness/S
+slime/SM
+sliminess/S
+slimline
+slimmed
+slimmer/S
+slimmest
+slimming/S
+slimness/S
+slim/SPGYD
+slimy/PTR
+sling/GMRS
+slingshot/MS
+slings/U
+slink/GS
+slinky/RT
+slipcase/MS
+slipcover/GMDS
+slipknot/SM
+slippage/SM
+slipped
+slipper/GSMD
+slipperiness/S
+slippery/PRT
+slipping
+slipshod
+slip/SM
+slipstream/MDGS
+slipway/SM
+slither/DSG
+slithery
+slit/SM
+slitted
+slitter/S
+slitting
+sliver/GSDM
+slivery
+Sloane/M
+Sloan/M
+slobber/SDG
+slobbery
+slob/MS
+Slocum/M
+sloe/MS
+sloganeer/MG
+slogan/MS
+slogged
+slogging
+slog/S
+sloop/SM
+slop/DRSGZ
+sloped/U
+slope/S
+slopped
+sloppily
+sloppiness/SM
+slopping
+sloppy/RTP
+slosh/GSDM
+slothfulness/MS
+slothful/PY
+sloth/GDM
+sloths
+slot/MS
+slotted
+slotting
+slouch/DRSZG
+sloucher/M
+slouchy/RT
+slough/GMD
+sloughs
+Slovakia/M
+Slovakian/S
+Slovak/S
+Slovene/S
+Slovenia/M
+Slovenian/S
+slovenliness/SM
+slovenly/TRP
+sloven/YMS
+slowcoaches
+slowdown/MS
+slowish
+slowness/MS
+slow/PGTYDRS
+slowpoke/MS
+SLR
+sludge/SDGM
+sludgy/TR
+slue/MGDS
+sluggard/MS
+slugged
+slugger/SM
+slugging
+sluggishness/SM
+sluggish/YP
+slug/MS
+sluice/SDGM
+slumberer/M
+slumber/MDRGS
+slumberous
+slumlord/MS
+slummed
+slummer
+slumming
+slum/MS
+slummy/TR
+slump/DSG
+slung/U
+slunk
+slur/MS
+slurp/GSD
+slurred
+slurried/M
+slurring
+slurrying/M
+slurry/MGDS
+slushiness/SM
+slush/SDMG
+slushy/RTP
+slut/MS
+sluttish
+slutty/TR
+Sly/M
+slyness/MS
+sly/RTY
+smacker/M
+smack/SMRDGZ
+smallholders
+smallholding/MS
+smallish
+Small/M
+smallness/S
+smallpox/SM
+small/SGTRDP
+smalltalk
+smalltime
+Smallwood/M
+smarmy/RT
+smarten/GD
+smartness/S
+smartypants
+smart/YRDNSGTXP
+smasher/M
+smash/GZRSD
+smashing/Y
+smashup/S
+smattering/SM
+smearer/M
+smear/GRDS
+smeary/TR
+smeller/M
+smelliness/MS
+smell/SBRDG
+smelly/TRP
+smelter/M
+smelt/SRDGZ
+Smetana/M
+smidgen/MS
+smilax/MS
+smile/GMDSR
+smiley/M
+smilies
+smiling/UY
+smirch/SDG
+smirk/GSMD
+Smirnoff/M
+smite/GSR
+smiter/M
+smith/DMG
+smithereens
+Smithfield/M
+Smith/M
+smiths
+Smithsonian/M
+Smithson/M
+Smithtown/M
+smithy/SM
+smitten
+Smitty/M
+Sm/M
+smocking/M
+smock/SGMDJ
+smoggy/TR
+smog/SM
+smoke/GZMDSRBJ
+smokehouse/MS
+smokeless
+smoker/M
+smokescreen/S
+smokestack/MS
+Smokey/M
+smokiness/S
+smoking/M
+smoky/RSPT
+smoldering/Y
+smolder/SGD
+Smolensk/M
+Smollett/M
+smooch/SDG
+smoothen/DG
+smoother/M
+smoothie/SM
+smoothness/MS
+smooths
+smooth/TZGPRDNY
+smrgsbord/SM
+smote
+smother/GSD
+SMSA/MS
+SMTP
+Smucker/M
+smudge/GSD
+smudginess/M
+smudgy/TRP
+smugged
+smugger
+smuggest
+smugging
+smuggle/JZGSRD
+smuggler/M
+smugness/MS
+smug/YSP
+smut/SM
+Smuts/M
+smutted
+smuttiness/SM
+smutting
+smutty/TRP
+Smyrna/M
+snack/SGMD
+snaffle/GDSM
+snafu/DMSG
+snagged
+snagging
+snag/MS
+snail/GSDM
+Snake
+snakebird/M
+snakebite/MS
+snake/DSGM
+snakelike
+snakeroot/M
+snaky/TR
+snapback/M
+snapdragon/MS
+snapped/U
+snapper/SM
+snappily
+snappiness/SM
+snapping/U
+snappishness/SM
+snappish/PY
+snappy/PTR
+snapshot/MS
+snapshotted
+snapshotting
+snap/US
+snare/DSRGM
+snarer/M
+snarf/JSGD
+snarler/M
+snarling/Y
+snarl/UGSD
+snarly/RT
+snatch/DRSZG
+snatcher/M
+snazzily
+snazzy/TR
+Snead/M
+sneaker/MD
+sneakily
+sneakiness/SM
+sneaking/Y
+sneak/RDGZS
+sneaky/PRT
+Sneed/M
+sneerer/M
+sneer/GMRDJS
+sneering/Y
+sneeze/SRDG
+Snell/M
+snicker/GMRD
+snick/MRZ
+snideness/M
+Snider/M
+snide/YTSRP
+sniffer/M
+sniff/GZSRD
+sniffle/GDRS
+sniffler/M
+sniffles/M
+snifter/MDSG
+snigger's
+sniper/M
+snipe/SM
+snipped
+snipper/SM
+snippet/SM
+snipping
+snippy/RT
+snip/SGDRZ
+snitch/GDS
+snit/SM
+sniveler/M
+snivel/JSZGDR
+Sn/M
+snobbery/SM
+snobbishness/S
+snobbish/YP
+snobby/RT
+snob/MS
+Snodgrass/M
+snood/SGDM
+snooker/GMD
+snook/SMRZ
+snooper/M
+snoop/SRDGZ
+Snoopy/M
+snoopy/RT
+snootily
+snootiness/MS
+snoot/SDMG
+snooty/TRP
+snooze/GSD
+snore/DSRGZ
+snorkel/ZGSRDM
+snorter/M
+snort/GSZRD
+snot/MS
+snotted
+snottily
+snottiness/SM
+snotting
+snotty/TRP
+snout/SGDM
+snowball/SDMG
+snowbank/SM
+Snowbelt/SM
+snowbird/SM
+snowblower/S
+snowboard/GZDRJS
+snowbound
+snowcapped
+snowdrift/MS
+snowdrop/MS
+snowfall/MS
+snowfield/MS
+snowflake/MS
+snow/GDMS
+snowily
+snowiness/MS
+Snow/M
+snowman/M
+snowmen
+snowmobile/GMDRS
+snowplough/M
+snowploughs
+snowplow/SMGD
+snowshed
+snowshoeing
+snowshoe/MRS
+snowshoer/M
+snowstorm/MS
+snowsuit/S
+snowy/RTP
+snubbed
+snubber
+snubbing
+snub/SP
+snuffbox/SM
+snuffer/M
+snuff/GZSYRD
+snuffle/GDSR
+snuffler/M
+snuffly/RT
+snugged
+snugger
+snuggest
+snugging
+snuggle/GDS
+snuggly
+snugness/MS
+snug/SYP
+Snyder/M
+so
+SO
+soaker/M
+soak/GDRSJ
+soapbox/DSMG
+soapiness/S
+soap/MDRGS
+soapstone/MS
+soapsud/S
+soapy/RPT
+soar/DRJSG
+soarer/M
+soaring/Y
+sobbed
+sobbing/Y
+soberer/M
+soberness/SM
+sober/PGTYRD
+sobriety/SIM
+sobriquet/MS
+sob/SZR
+Soc
+soccer/MS
+sociabilities
+sociability/IM
+sociable/S
+sociably/IU
+socialism/SM
+socialistic
+socialist/SM
+socialite/SM
+sociality/M
+socialization/SM
+socialized/U
+socializer/M
+socialize/RSDG
+socially/U
+social/SY
+societal/Y
+society/MS
+socio
+sociobiology/M
+sociocultural/Y
+sociodemographic
+socioeconomically
+socioeconomic/S
+sociolinguistics/M
+sociological/MY
+sociologist/SM
+sociology/SM
+sociometric
+sociometry/M
+sociopath/M
+sociopaths
+socket/SMDG
+sock/GDMS
+Socorro/M
+Socrates/M
+Socratic/S
+soc/S
+soda/SM
+sodded
+sodden/DYPSG
+soddenness/M
+sodding
+Soddy/M
+sodium/MS
+sod/MS
+sodomite/MS
+sodomize/GDS
+Sodom/M
+sodomy/SM
+soever
+sofa/SM
+Sofia/M
+Sofie/M
+softball/MS
+softbound
+softener/M
+soften/ZGRD
+softhearted
+softie's
+softness/MS
+soft/SPXTYNR
+software/MS
+softwood/SM
+softy/SM
+soggily
+sogginess/S
+soggy/RPT
+Soho/M
+soign
+soiled/U
+soil/SGMD
+soire/SM
+sojourn/RDZGSM
+solace/GMSRD
+solacer/M
+solaria
+solarium/M
+solar/S
+solder/RDMSZG
+soldier/MDYSG
+soldiery/MS
+sold/RU
+solecism/MS
+soled/FA
+solemness
+solemnify/GSD
+solemnity/MS
+solemnization/SM
+solemnize/GSD
+solemnness/SM
+solemn/PTRY
+solenoid/MS
+soler/F
+soles/IFA
+sole/YSP
+sol/GSMDR
+solicitation/S
+solicited/U
+solicitor/MS
+solicitousness/S
+solicitous/YP
+solicit/SDG
+solicitude/MS
+solidarity/MS
+solidi
+solidification/M
+solidify/NXSDG
+solidity/S
+solidness/SM
+solid/STYRP
+solidus/M
+soliloquies
+soliloquize/DSG
+soliloquy/M
+soling/NM
+solipsism/MS
+solipsist/S
+Solis/M
+solitaire/SM
+solitary/SP
+solitude/SM
+Sollie/M
+Solly/M
+Sol/MY
+solo/DMSG
+soloist/SM
+Solomon/SM
+Solon/M
+Soloviev/M
+solstice/SM
+solubility/IMS
+soluble/SI
+solute/ENAXS
+solute's
+solution/AME
+solvable/UI
+solvating
+solve/ABSRDZG
+solved/EU
+solvency/IMS
+solvent/IS
+solvently
+solvent's
+solver/MEA
+solves/E
+solving/E
+Solzhenitsyn/M
+Somalia/M
+Somalian/S
+Somali/MS
+soma/M
+somatic
+somberness/SM
+somber/PY
+sombre
+sombrero/SM
+somebody'll
+somebody/SM
+someday
+somehow
+someone'll
+someone/SM
+someplace/M
+somersault/DSGM
+Somerset/M
+somerset/S
+somersetted
+somersetting
+Somerville/M
+something/S
+sometime/S
+someway/S
+somewhat/S
+somewhere/S
+some/Z
+sommelier/SM
+Somme/M
+somnambulism/SM
+somnambulist/SM
+somnolence/MS
+somnolent/Y
+Somoza/M
+sonar/SM
+sonata/MS
+sonatina/SM
+Sondheim/M
+Sondra/M
+Sonenberg/M
+songbag
+songbird/SM
+songbook/S
+songfest/MS
+songfulness/M
+songful/YP
+Songhai/M
+Songhua/M
+song/MS
+songster/MS
+songstress/SM
+songwriter/SM
+songwriting
+Sonia/M
+sonic/S
+Sonja/M
+Son/M
+sonnet/MDSG
+Sonnie/M
+Sonni/M
+Sonnnie/M
+Sonny/M
+sonny/SM
+Sonoma/M
+Sonora/M
+sonority/S
+sonorousness/SM
+sonorous/PY
+son/SMY
+Sontag/M
+sonuvabitch
+Sonya/M
+Sony/M
+soonish
+soon/TR
+soothe
+soother/M
+sooth/GZTYSRDMJ
+soothingness/M
+soothing/YP
+sooths
+soothsayer/M
+soothsay/JGZR
+soot/MGDS
+sooty/RT
+SOP
+Sophey/M
+Sophia/SM
+Sophie/M
+Sophi/M
+sophism/SM
+sophister/M
+sophistical
+sophisticatedly
+sophisticated/U
+sophisticate/XNGDS
+sophistication/MU
+sophistic/S
+sophist/RMS
+sophistry/SM
+Sophoclean
+Sophocles/M
+sophomore/SM
+sophomoric
+Sophronia/M
+soporifically
+soporific/SM
+sopped
+sopping/S
+soppy/RT
+soprano/SM
+sop/SM
+Sopwith/M
+sorbet/SM
+Sorbonne/M
+sorcerer/MS
+sorceress/S
+sorcery/MS
+Sorcha/M
+sordidness/SM
+sordid/PY
+sorehead/SM
+soreness/S
+Sorensen/M
+Sorenson/M
+sore/PYTGDRS
+sorghum/MS
+sorority/MS
+sorrel/SM
+Sorrentine/M
+sorrily
+sorriness/SM
+sorrower/M
+sorrowfulness/SM
+sorrowful/YP
+sorrow/GRDMS
+sorry/PTSR
+sorta
+sortable
+sorted/U
+sorter/MS
+sort/FSAGD
+sortieing
+sortie/MSD
+sort's
+sos
+SOS
+Sosa/M
+Sosanna/M
+Soto/M
+sot/SM
+sottish
+soubriquet's
+souffl/MS
+sough/DG
+soughs
+sought/U
+soulfulness/MS
+soulful/YP
+soulless/Y
+soul/MDS
+sound/AUD
+soundboard/MS
+sounders
+sounder's
+sounder/U
+soundest
+sounding/AY
+soundings
+sounding's
+soundless/Y
+soundly/U
+soundness/UMS
+soundproof/GSD
+soundproofing/M
+sound's
+sounds/A
+soundtrack/MS
+soupon/SM
+soup/GMDS
+Souphanouvong/M
+soupy/RT
+source/ASDMG
+sourceless
+sourdough
+sourdoughs
+sourish
+sourness/MS
+sourpuss/MS
+sour/TYDRPSG
+Sousa/M
+sousaphone/SM
+sous/DSG
+souse
+sou/SMH
+Southampton/M
+southbound
+southeastern
+southeaster/YM
+Southeast/MS
+southeast/RZMS
+southeastward/S
+southerly/S
+souther/MY
+southerner/M
+Southerner/MS
+southernisms
+southernmost
+southern/PZSYR
+Southey/M
+Southfield/M
+southing/M
+southland/M
+South/M
+southpaw/MS
+south/RDMG
+souths
+Souths
+southward/S
+southwestern
+southwester/YM
+Southwest/MS
+southwest/RMSZ
+southwestward/S
+souvenir/SM
+sou'wester
+sovereignty/MS
+sovereign/YMS
+soviet/MS
+Soviet/S
+sow/ADGS
+sowbelly/M
+sowens/M
+sower/DS
+Soweto/M
+sown/A
+sox's
+soybean/MS
+Soyinka/M
+soy/MS
+Soyuz/M
+Spaatz/M
+spacecraft/MS
+space/DSRGZMJ
+spaceflight/S
+spaceman/M
+spacemen
+spaceport/SM
+spacer/M
+spaceship/MS
+spacesuit/MS
+spacewalk/GSMD
+Spacewar/M
+spacewoman
+spacewomen
+spacey
+spacial
+spacier
+spaciest
+spaciness
+spacing/M
+spaciousness/SM
+spacious/PY
+Spackle
+spade/DSRGM
+spadeful/SM
+spader/M
+spadework/SM
+spadices
+spadix/M
+Spafford/M
+spaghetti/SM
+Spahn/M
+Spain/M
+spake
+Spalding/M
+Spam/M
+spa/MS
+Span
+spandex/MS
+spandrels
+spangle/GMDS
+Spanglish/S
+Spaniard/SM
+spanielled
+spanielling
+spaniel/SM
+Spanish/M
+spanker/M
+spanking/M
+spank/SRDJG
+span/MS
+spanned/U
+spanner/SM
+spanning
+SPARC/M
+SPARCstation/M
+spar/DRMGTS
+spareness/MS
+spare/PSY
+spareribs
+sparer/M
+sparing/UY
+sparker/M
+sparkle/DRSGZ
+sparkler/M
+Sparkman/M
+Sparks
+spark/SGMRD
+sparky/RT
+sparling/SM
+sparred
+sparrer
+sparring/U
+sparrow/MS
+sparseness/S
+sparse/YP
+sparsity/S
+spars/TR
+Spartacus/M
+Sparta/M
+spartan
+Spartan/S
+spasm/GSDM
+spasmodic
+spasmodically
+spastic/S
+spate/SM
+spathe/MS
+spatiality/M
+spatial/Y
+spat/MS
+spatted
+spatter/DGS
+spatterdock/M
+spatting
+spatula/SM
+spavin/DMS
+spawner/M
+spawn/MRDSG
+spay/DGS
+SPCA
+speakable/U
+speakeasy/SM
+speaker/M
+Speaker's
+speakership/M
+speaking/U
+speak/RBGZJS
+spearer/M
+spearfish/SDMG
+spearhead/GSDM
+spearmint/MS
+spear/MRDGS
+Spears
+spec'd
+specialism/MS
+specialist/MS
+specialization/SM
+specialized/U
+specialize/GZDSR
+specializing/U
+special/SRYP
+specialty/MS
+specie/MS
+specif
+specifiability
+specifiable
+specifiably
+specifically
+specification/SM
+specificity/S
+specific/SP
+specified/U
+specifier/SM
+specifies
+specify/AD
+specifying
+specimen/SM
+spec'ing
+speciousness/SM
+specious/YP
+speck/GMDS
+speckle/GMDS
+spec/SM
+spectacle/MSD
+spectacular/SY
+spectator/SM
+specter/DMS
+specter's/A
+spectralness/M
+spectral/YP
+spectra/M
+spectrogram/MS
+spectrographically
+spectrograph/M
+spectrography/M
+spectrometer/MS
+spectrometric
+spectrometry/M
+spectrophotometer/SM
+spectrophotometric
+spectrophotometry/M
+spectroscope/SM
+spectroscopic
+spectroscopically
+spectroscopy/SM
+spectrum/M
+specularity
+specular/Y
+speculate/VNGSDX
+speculation/M
+speculative/Y
+speculator/SM
+sped
+speech/GMDS
+speechlessness/SM
+speechless/YP
+speedboat/GSRM
+speedboating/M
+speeder/M
+speedily
+speediness/SM
+speedometer/MS
+speed/RMJGZS
+speedster/SM
+speedup/MS
+speedway/SM
+speedwell/MS
+speedy/PTR
+speer/M
+speleological
+speleologist/S
+speleology/MS
+spellbinder/M
+spellbind/SRGZ
+spellbound
+spelldown/MS
+spelled/A
+speller/M
+spelling/M
+spell/RDSJGZ
+spells/A
+spelunker/MS
+spelunking/S
+Spencerian
+Spencer/M
+Spence/RM
+spender/M
+spend/SBJRGZ
+spendthrift/MS
+Spenglerian
+Spengler/M
+Spense/MR
+Spenserian
+Spenser/M
+spent/U
+spermatophyte/M
+spermatozoa
+spermatozoon/M
+spermicidal
+spermicide/MS
+sperm/SM
+Sperry/M
+spew/DRGZJS
+spewer/M
+SPF
+sphagnum/SM
+sphere/SDGM
+spherical/Y
+spheric/S
+spherics/M
+spheroidal/Y
+spheroid/SM
+spherule/MS
+sphincter/SM
+Sphinx/M
+sphinx/MS
+Spica/M
+spic/DGM
+spicebush/M
+spice/SM
+spicily
+spiciness/SM
+spicule/MS
+spicy/PTR
+spider/SM
+spiderweb/S
+spiderwort/M
+spidery/TR
+Spiegel/M
+Spielberg/M
+spiel/GDMS
+spier/M
+spiffy/TDRSG
+spigot/MS
+spike/GMDSR
+Spike/M
+spiker/M
+spikiness/SM
+spiky/PTR
+spillage/SM
+Spillane/M
+spillover/SM
+spill/RDSG
+spillway/SM
+spinach/MS
+spinal/YS
+spindle/JGMDRS
+spindly/RT
+spinelessness/M
+spineless/YP
+spine/MS
+spinet/SM
+spininess/M
+spinnability/M
+spinnaker/SM
+spinneret/MS
+spinner/SM
+spinning/SM
+Spinoza/M
+spin/S
+spinsterhood/SM
+spinsterish
+spinster/MS
+spiny/PRT
+spiracle/SM
+spiraea's
+spiral/YDSG
+spire/AIDSGF
+spirea/MS
+spire's
+spiritedness/M
+spirited/PY
+spirit/GMDS
+spiritless
+spirits/I
+spiritualism/SM
+spiritualistic
+spiritualist/SM
+spirituality/SM
+spiritual/SYP
+spirituous
+spirochete/SM
+Spiro/M
+spiry/TR
+spitball/SM
+spite/CSDAG
+spitefuller
+spitefullest
+spitefulness/MS
+spiteful/PY
+spite's/A
+spitfire/SM
+spit/SGD
+spitted
+spitting
+spittle/SM
+spittoon/SM
+Spitz/M
+splashdown/MS
+splasher/M
+splash/GZDRS
+splashily
+splashiness/MS
+splashy/RTP
+splat/SM
+splatted
+splatter/DSG
+splatting
+splayfeet
+splayfoot/MD
+splay/SDG
+spleen/SM
+splendidness/M
+splendid/YRPT
+splendorous
+splendor/SM
+splenetic/S
+splicer/M
+splice/RSDGZJ
+spline/MSD
+splinter/GMD
+splintery
+splint/SGZMDR
+splits/M
+split/SM
+splittable
+splitter/MS
+splitting/S
+splodge/SM
+splotch/MSDG
+splotchy/RT
+splurge/GMDS
+splutterer/M
+splutter/RDSG
+Sp/M
+Spock/M
+spoilables
+spoilage/SM
+spoil/CSZGDR
+spoiled/U
+spoiler/MC
+spoilsport/SM
+Spokane/M
+spoke/DSG
+spoken/U
+spokeshave/MS
+spokesman/M
+spokesmen
+spokespeople
+spokesperson/S
+spokeswoman/M
+spokeswomen
+spoliation/MCS
+spongecake
+sponge/GMZRSD
+sponger/M
+sponginess/S
+spongy/TRP
+sponsor/DGMS
+sponsorship/S
+spontaneity/SM
+spontaneousness/M
+spontaneous/PY
+spoof/SMDG
+spookiness/MS
+spook/SMDG
+spooky/PRT
+spool/SRDMGZ
+spoonbill/SM
+spoonerism/SM
+spoonful/MS
+spoon/GSMD
+spoor/GSMD
+sporadically
+sporadic/Y
+spore/DSGM
+sporran/MS
+sportiness/SM
+sporting/Y
+sportiveness/M
+sportive/PY
+sportscast/RSGZM
+sportsmanlike/U
+sportsman/MY
+sportsmanship/MS
+sportsmen
+sportswear/M
+sportswoman/M
+sportswomen
+sportswriter/S
+sport/VGSRDM
+sporty/PRT
+Sposato/M
+spotlessness/MS
+spotless/YP
+spotlight/GDMS
+spotlit
+spot/MSC
+spotted/U
+spotter/MS
+spottily
+spottiness/SM
+spotting/M
+spotty/RTP
+spousal/MS
+spouse/GMSD
+spouter/M
+spout/SGRD
+sprain/SGD
+sprang/S
+sprat/SM
+sprawl/GSD
+sprayed/UA
+sprayer/M
+spray/GZSRDM
+sprays/A
+spreadeagled
+spreader/M
+spread/RSJGZB
+spreadsheet/S
+spreeing
+spree/MDS
+sprigged
+sprigging
+sprightliness/MS
+sprightly/PRT
+sprig/MS
+springboard/MS
+springbok/MS
+springeing
+springer/M
+Springfield/M
+springily
+springiness/SM
+springing/M
+springlike
+spring/SGZR
+Springsteen/M
+springtime/MS
+springy/TRP
+sprinkle/DRSJZG
+sprinkler/DM
+sprinkling/M
+Sprint/M
+sprint/SGZMDR
+sprite/SM
+spritz/GZDSR
+sprocket/DMGS
+sprocketed/U
+Sproul/M
+sprout/GSD
+spruce/GMTYRSDP
+spruceness/SM
+sprue/M
+sprung/U
+spryness/S
+spry/TRY
+SPSS
+spudded
+spudding
+spud/MS
+Spuds/M
+spume/DSGM
+spumone's
+spumoni/S
+spumy/TR
+spun
+spunk/GSMD
+spunky/SRT
+spurge/MS
+spuriousness/SM
+spurious/PY
+spur/MS
+spurn/RDSG
+spurred
+spurring
+spurt/SGD
+sputa
+Sputnik
+sputnik/MS
+sputter/DRGS
+sputum/M
+spy/DRSGM
+spyglass/MS
+sq
+sqq
+sqrt
+squabbed
+squabber
+squabbest
+squabbing
+squabbler/M
+squabble/ZGDRS
+squab/SM
+squadded
+squadding
+squadron/MDGS
+squad/SM
+squalidness/SM
+squalid/PRYT
+squaller/M
+squall/GMRDS
+squally/RT
+squalor/SM
+squamous/Y
+squander/GSRD
+Squanto
+square/GMTYRSDP
+squareness/SM
+squarer/M
+Squaresville/M
+squarish
+squash/GSRD
+squashiness/M
+squashy/RTP
+squatness/MS
+squat/SPY
+squatted
+squatter/SMDG
+squattest
+squatting
+squawker/M
+squawk/GRDMZS
+squaw/SM
+squeaker/M
+squeakily
+squeakiness/S
+squeak/RDMGZS
+squeaky/RPT
+squealer/M
+squeal/MRDSGZ
+squeamishness/SM
+squeamish/YP
+squeegee/DSM
+squeegeeing
+squeeze/GZSRDB
+squeezer/M
+squelcher/M
+squelch/GDRS
+squelchy/RT
+squibbed
+Squibb/GM
+squibbing
+Squibbing/M
+squib/SM
+squidded
+squidding
+squid/SM
+squiggle/MGDS
+squiggly/RT
+squinter/M
+squint/GTSRD
+squinting/Y
+squirehood
+squire/SDGM
+squirm/SGD
+squirmy/TR
+squirrel/SGYDM
+squirter/M
+squirt/GSRD
+squish/GSD
+squishy/RTP
+Sr
+Srinagar/M
+SRO
+S's
+SS
+SSA
+SSE
+ssh
+s's/KI
+SSS
+SST
+SSW
+ST
+stabbed
+stabber/S
+stabbing/S
+stability/ISM
+stabilizability
+stabilization/CS
+stabilization's
+stabilize/CGSD
+stabilizer/MS
+stableman/M
+stablemate
+stablemen
+stableness/UM
+stable/RSDGMTP
+stabler/U
+stable's/F
+stables/F
+stablest/U
+stabling/M
+stably/U
+stab/YS
+staccato/S
+Stacee/M
+Stace/M
+Stacey/M
+Stacia/M
+Stacie/M
+Staci/M
+stackable
+stacker/M
+stack's
+stack/USDG
+Stacy/M
+stadias
+stadia's
+stadium/MS
+Stael/M
+Stafani/M
+staff/ADSG
+Staffard/M
+staffer/MS
+Stafford/M
+Staffordshire/M
+staffroom
+staff's
+Staford/M
+stag/DRMJSGZ
+stagecoach/MS
+stagecraft/MS
+stagehand/MS
+stager/M
+stage/SM
+stagestruck
+stagflation/SM
+stagged
+staggerer/M
+stagger/GSJDR
+staggering/Y
+staggers/M
+stagging
+staginess/M
+staging/M
+stagnancy/SM
+stagnant/Y
+stagnate/NGDSX
+stagnation/M
+stagy/PTR
+Stahl/M
+staidness/MS
+staid/YRTP
+stained/U
+stainer/M
+stainless/YS
+stain/SGRD
+staircase/SM
+stair/MS
+stairway/SM
+stairwell/MS
+stake/DSGM
+stakeholder/S
+stakeout/SM
+stalactite/SM
+stalag/M
+stalagmite/SM
+stalemate/SDMG
+staleness/MS
+stale/PGYTDSR
+Staley/M
+Stalingrad/M
+Stalinist
+Stalin/SM
+stalker/M
+stalk/MRDSGZJ
+stall/DMSJG
+stalled/I
+stallholders
+stallion/SM
+Stallone/M
+stalls/I
+stalwartness/M
+stalwart/PYS
+Sta/M
+stamen/MS
+Stamford/M
+stamina/SM
+staminate
+stammer/DRSZG
+stammerer/M
+stammering/Y
+stampede/MGDRS
+stampeder/M
+stamped/U
+stamper/M
+stamp/RDSGZJ
+stance/MIS
+stancher/M
+stanch/GDRST
+stanchion/SGMD
+standalone
+standardization/AMS
+standardized/U
+standardize/GZDSR
+standardizer/M
+standardizes/A
+standard/YMS
+standby
+standbys
+standee/MS
+Standford/M
+standing/M
+Standish/M
+standoffish
+standoff/SM
+standout/MS
+standpipe/MS
+standpoint/SM
+stand/SJGZR
+standstill/SM
+Stanfield/M
+Stanford/M
+Stanislas/M
+Stanislaus/M
+Stanislavsky/M
+Stanislaw/M
+stank/S
+Stanleigh/M
+Stanley/M
+Stanly/M
+stannic
+stannous
+Stanton/M
+Stanwood/M
+Stan/YMS
+stanza/MS
+staph/M
+staphs
+staphylococcal
+staphylococci
+staphylococcus/M
+stapled/U
+stapler/M
+Stapleton/M
+staple/ZRSDGM
+starboard/SDMG
+starchily
+starchiness/MS
+starch/MDSG
+starchy/TRP
+stardom/MS
+star/DRMGZS
+stardust/MS
+stare/S
+starfish/SM
+Stargate/M
+stargaze/ZGDRS
+staring/U
+Starkey/M
+Stark/M
+starkness/MS
+stark/SPGTYRD
+Starla/M
+Starlene/M
+starless
+starlet/MS
+starlight/MS
+starling/MS
+Starlin/M
+starlit
+Star/M
+starred
+starring
+Starr/M
+starry/TR
+starship
+starstruck
+start/ASGDR
+starter/MS
+startle/GDS
+startling/PY
+startup/SM
+starvation/MS
+starveling/M
+starver/M
+starve/RSDG
+stash/GSD
+stasis/M
+stat/DRSGV
+statecraft/MS
+stated/U
+statehood/MS
+statehouse/S
+Statehouse's
+state/IGASD
+statelessness/MS
+stateless/P
+stateliness/MS
+stately/PRT
+statement/MSA
+Staten/M
+stater/M
+stateroom/SM
+stateside
+state's/K
+states/K
+statesmanlike
+statesman/MY
+statesmanship/SM
+statesmen
+stateswoman
+stateswomen
+statewide
+statical/Y
+static/S
+statics/M
+stationarity
+stationary/S
+stationer/M
+stationery/MS
+stationmaster/M
+station/SZGMDR
+statistical/Y
+statistician/MS
+statistic/MS
+Statler/M
+stator/SM
+statuary/SM
+statue/MSD
+statuesque/YP
+statuette/MS
+stature/MS
+status/SM
+statute/SM
+statutorily
+statutory/P
+Stauffer/M
+staunchness/S
+staunch/PDRSYTG
+stave/DGM
+Stavro/MS
+stay/DRGZS
+stayer/M
+std
+STD
+stdio
+steadfastness/MS
+steadfast/PY
+steadily/U
+steadiness's
+steadiness/US
+steading/M
+stead/SGDM
+steady/DRSUTGP
+steakhouse/SM
+steak/SM
+stealer/M
+stealing/M
+steal/SRHG
+stealthily
+stealthiness/MS
+stealth/M
+stealths
+stealthy/PTR
+steamboat/MS
+steamer/MDG
+steamfitter/S
+steamfitting/S
+steamily
+steaminess/SM
+steamroller/DMG
+steamroll/GZRDS
+steam/SGZRDMJ
+steamship/SM
+steamy/RSTP
+Stearne/M
+Stearn/SM
+steed/SM
+Steele/M
+steeliness/SM
+steelmaker/M
+steel/SDMGZ
+steelworker/M
+steelwork/ZSMR
+steelyard/MS
+steely/TPRS
+Steen/M
+steepen/GD
+steeper/M
+steeplebush/M
+steeplechase/GMSD
+steeplejack/MS
+steeple/MS
+steepness/S
+steep/SYRNDPGTX
+steerage/MS
+steerer/M
+steer/SGBRDJ
+steersman/M
+steersmen
+steeves
+Stefa/M
+Stefania/M
+Stefanie/M
+Stefan/M
+Stefano/M
+Steffane/M
+Steffen/M
+Steffie/M
+Steffi/M
+stegosauri
+stegosaurus/S
+Steinbeck/SM
+Steinberg/M
+Steinem/M
+Steiner/M
+Steinmetz/M
+Stein/RM
+stein/SGZMRD
+Steinway/M
+Stella/M
+stellar
+stellated
+Ste/M
+stemless
+stemmed/U
+stemming
+stem/MS
+stemware/MS
+stench/GMDS
+stenciler/M
+stencil/GDRMSZ
+stencillings
+Stendhal/M
+Stendler/M
+Stengel/M
+stenographer/SM
+stenographic
+stenography/SM
+steno/SM
+stenotype/M
+stentorian
+stepbrother/MS
+stepchild/M
+stepchildren
+stepdaughter/MS
+stepfather/SM
+Stepha/M
+Stephana/M
+Stephanie/M
+Stephani/M
+Stephan/M
+Stephannie/M
+Stephanus/M
+Stephenie/M
+Stephen/MS
+Stephenson/M
+Stephie/M
+Stephi/M
+Stephine/M
+stepladder/SM
+step/MIS
+stepmother/SM
+stepparent/SM
+stepper/M
+steppe/RSDGMZ
+steppingstone/S
+stepsister/SM
+stepson/SM
+stepwise
+stereographic
+stereography/M
+stereo/GSDM
+stereophonic
+stereoscope/MS
+stereoscopic
+stereoscopically
+stereoscopy/M
+stereotype/GMZDRS
+stereotypic
+stereotypical/Y
+sterile
+sterility/SM
+sterilization/SM
+sterilized/U
+sterilize/RSDGZ
+sterilizes/A
+Sterling/M
+sterling/MPYS
+sterlingness/M
+sternal
+Sternberg/M
+Sterne/M
+Stern/M
+sternness/S
+Sterno
+stern/SYRDPGT
+sternum/SM
+steroidal
+steroid/MS
+stertorous
+Stesha/M
+stethoscope/SM
+stet/MS
+stetson/MS
+Stetson/SM
+stetted
+stetting
+Steuben/M
+Stevana/M
+stevedore/GMSD
+Steve/M
+Stevena/M
+Steven/MS
+Stevenson/M
+Stevie/M
+Stevy/M
+steward/DMSG
+stewardess/SM
+Steward/M
+stewardship/MS
+Stewart/M
+stew/GDMS
+st/GBJ
+sticker/M
+stickily
+stickiness/SM
+stickleback/MS
+stickle/GZDR
+stickler/M
+stick/MRDSGZ
+stickpin/SM
+stickup/SM
+sticky/GPTDRS
+Stieglitz/M
+stiffen/JZRDG
+stiff/GTXPSYRND
+stiffness/MS
+stifle/GJRSD
+stifler/M
+stifling/Y
+stigma/MS
+stigmata
+stigmatic/S
+stigmatization/C
+stigmatizations
+stigmatization's
+stigmatize/DSG
+stigmatized/U
+stile/GMDS
+stiletto/MDSG
+stillbirth/M
+stillbirths
+stillborn/S
+stiller/MI
+stillest
+Stillman/M
+Stillmann/M
+stillness/MS
+still/RDIGS
+Stillwell/M
+stilted/PY
+stilt/GDMS
+Stilton/MS
+Stimson/M
+stimulant/MS
+stimulated/U
+stimulate/SDVGNX
+stimulation/M
+stimulative/S
+stimulator/M
+stimulatory
+stimuli/M
+stimulus/MS
+Stine/M
+stinger/M
+sting/GZR
+stingily
+stinginess/MS
+stinging/Y
+stingray/MS
+stingy/RTP
+stinkbug/S
+stinker/M
+stink/GZRJS
+stinking/Y
+stinkpot/M
+Stinky/M
+stinky/RT
+stinter/M
+stinting/U
+stint/JGRDMS
+stipendiary
+stipend/MS
+stipple/JDRSG
+stippler/M
+stipulate/XNGSD
+stipulation/M
+Stirling/M
+stirred/U
+stirrer/SM
+stirring/YS
+stirrup/SM
+stir/S
+stitch/ASDG
+stitcher/M
+stitchery/S
+stitching/MS
+stitch's
+St/M
+stoat/SM
+stochastic
+stochastically
+stochasticity
+stockade/SDMG
+stockbreeder/SM
+stockbroker/MS
+stockbroking/S
+stocker/SM
+Stockhausen/M
+stockholder/SM
+Stockholm/M
+stockily
+stockiness/SM
+stockinet's
+stockinette/S
+stocking/MDS
+stockist/MS
+stockpile/GRSD
+stockpiler/M
+stockpot/MS
+stockroom/MS
+stock's
+stock/SGAD
+stocktaking/MS
+Stockton/M
+stockyard/SM
+stocky/PRT
+Stoddard/M
+stodge/M
+stodgily
+stodginess/S
+stodgy/TRP
+stogy/SM
+stoical/Y
+stoichiometric
+stoichiometry/M
+stoicism/SM
+Stoicism/SM
+stoic/MS
+Stoic/MS
+stoke/DSRGZ
+stoker/M
+stokes/M
+Stokes/M
+STOL
+stole/MDS
+stolen
+stolidity/S
+stolidness/S
+stolid/PTYR
+stolon/SM
+stomachache/MS
+stomacher/M
+stomach/RSDMZG
+stomachs
+stomp/DSG
+stonecutter/SM
+stone/DSRGM
+Stonehenge/M
+stoneless
+Stone/M
+stonemason/MS
+stoner/M
+stonewall/GDS
+stoneware/MS
+stonewashed
+stonework/SM
+stonewort/M
+stonily
+stoniness/MS
+stony/TPR
+stood
+stooge/SDGM
+stool/SDMG
+stoop/SDG
+stopcock/MS
+stopgap/SM
+stoplight/SM
+stopover/MS
+stoppable/U
+stoppage/MS
+Stoppard/M
+stopped/U
+stopper/GMDS
+stopping/M
+stopple/GDSM
+stop's
+stops/M
+stop/US
+stopwatch/SM
+storage/SM
+store/ADSRG
+storefront/SM
+storehouse/MS
+storekeeper/M
+storekeep/ZR
+storeroom/SM
+store's
+stork/SM
+stormbound
+stormer/M
+Stormie/M
+stormily
+Stormi/M
+storminess/S
+Storm/M
+storm/SRDMGZ
+stormtroopers
+Stormy/M
+stormy/PTR
+storyboard/MDSG
+storybook/MS
+story/GSDM
+storyline
+storyteller/SM
+storytelling/MS
+Stouffer/M
+stoup/SM
+stouten/DG
+stouthearted
+Stout/M
+stoutness/MS
+stout/STYRNP
+stove/DSRGM
+stovepipe/SM
+stover/M
+stowage/SM
+stowaway/MS
+Stowe/M
+stow/GDS
+Strabo/M
+straddler/M
+straddle/ZDRSG
+Stradivari/SM
+Stradivarius/M
+strafe/GRSD
+strafer/M
+straggle/GDRSZ
+straggly/RT
+straightaway/S
+straightedge/MS
+straightener/M
+straighten/ZGDR
+straightforwardness/MS
+straightforward/SYP
+straightjacket's
+straightness/MS
+straight/RNDYSTXGP
+straightway/S
+strain/ASGZDR
+strained/UF
+strainer/MA
+straining/F
+strains/F
+straiten/DG
+straitjacket/GDMS
+straitlaced
+straitness/M
+strait/XTPSMGYDNR
+stranded/P
+strand/SDRG
+strangeness/SM
+strange/PYZTR
+stranger/GMD
+stranglehold/MS
+strangle/JDRSZG
+strangles/M
+strangulate/NGSDX
+strangulation/M
+strapless/S
+strapped/U
+strapping/S
+strap's
+strap/US
+Strasbourg/M
+stratagem/SM
+strata/MS
+strategical/Y
+strategic/S
+strategics/M
+strategist/SM
+strategy/SM
+Stratford/M
+strati
+stratification/M
+stratified/U
+stratify/NSDGX
+stratigraphic
+stratigraphical
+stratigraphy/M
+stratosphere/SM
+stratospheric
+stratospherically
+stratum/M
+stratus/M
+Strauss
+Stravinsky/M
+strawberry/SM
+strawflower/SM
+straw/SMDG
+strayer/M
+stray/GSRDM
+streak/DRMSGZ
+streaker/M
+streaky/TR
+streamed/U
+streamer/M
+stream/GZSMDR
+streaming/M
+streamline/SRDGM
+streetcar/MS
+streetlight/SM
+street/SMZ
+streetwalker/MS
+streetwise
+Streisand/M
+strengthen/AGDS
+strengthener/MS
+strength/NMX
+strengths
+strenuousness/SM
+strenuous/PY
+strep/MS
+streptococcal
+streptococci
+streptococcus/M
+streptomycin/SM
+stress/DSMG
+stressed/U
+stressful/YP
+stretchability/M
+stretchable/U
+stretch/BDRSZG
+stretcher/DMG
+stretchy/TRP
+strew/GDHS
+strewn
+striae
+stria/M
+striate/DSXGN
+striated/U
+striation/M
+stricken
+Strickland/M
+strict/AF
+stricter
+strictest
+strictly
+strictness/S
+stricture/SM
+stridden
+stridency/S
+strident/Y
+strider/M
+stride/RSGM
+strife/SM
+strikebreaker/M
+strikebreaking/M
+strikebreak/ZGR
+strikeout/S
+striker/M
+strike/RSGZJ
+striking/Y
+Strindberg/M
+stringed
+stringency/S
+stringent/Y
+stringer/MS
+stringiness/SM
+stringing/M
+string's
+string/SAG
+stringy/RTP
+striper/M
+stripe/SM
+strip/GRDMS
+stripling/M
+stripped/U
+stripper/MS
+stripping
+stripteaser/M
+striptease/SRDGZM
+stripy/RT
+strive/JRSG
+striven
+striver/M
+strobe/SDGM
+stroboscope/SM
+stroboscopic
+strode
+stroke/ZRSDGM
+stroking/M
+stroller/M
+stroll/GZSDR
+Stromberg/M
+Stromboli/M
+Strom/M
+strongbow
+strongbox/MS
+Strongheart/M
+stronghold/SM
+strongish
+Strong/M
+strongman/M
+strongmen
+strongroom/MS
+strong/YRT
+strontium/SM
+strophe/MS
+strophic
+stropped
+stropping
+strop/SM
+strove
+struck
+structuralism/M
+structuralist/SM
+structural/Y
+structured/AU
+structureless
+structures/A
+structure/SRDMG
+structuring/A
+strudel/MS
+struggle/GDRS
+struggler/M
+strummed
+strumming
+strumpet/GSDM
+strum/S
+strung/UA
+strut/S
+strutted
+strutter/M
+strutting
+strychnine/MS
+Stuart/MS
+stubbed/M
+stubbing
+Stubblefield/MS
+stubble/SM
+stubbly/RT
+stubbornness/SM
+stubborn/SGTYRDP
+stubby/SRT
+stub/MS
+stuccoes
+stucco/GDM
+stuck/U
+studbook/SM
+studded
+studding/SM
+Studebaker/M
+studentship/MS
+student/SM
+studiedness/M
+studied/PY
+studier/SM
+studio/MS
+studiousness/SM
+studious/PY
+stud/MS
+study/AGDS
+stuffily
+stuffiness/SM
+stuffing/M
+stuff/JGSRD
+stuffy/TRP
+stultify/NXGSD
+Stu/M
+stumble/GZDSR
+stumbling/Y
+stumpage/M
+stumper/M
+stump/RDMSG
+stumpy/RT
+stung
+stunk
+stunned
+stunner/M
+stunning/Y
+stun/S
+stunted/P
+stunt/GSDM
+stupefaction/SM
+stupefy/DSG
+stupendousness/M
+stupendous/PY
+stupidity/SM
+stupidness/M
+stupid/PTYRS
+stupor/MS
+sturdily
+sturdiness/SM
+sturdy/SRPT
+sturgeon/SM
+Sturm/M
+stutter/DRSZG
+Stuttgart/M
+Stuyvesant/M
+sty/DSGM
+Stygian
+styled/A
+style/GZMDSR
+styles/A
+styli
+styling/A
+stylishness/S
+stylish/PY
+stylistically
+stylistic/S
+stylist/MS
+stylites
+stylization/MS
+stylize/DSG
+stylos
+stylus/SM
+stymieing
+stymie/SD
+stymy's
+styptic/S
+styrene/MS
+Styrofoam/S
+Styx/M
+suable
+Suarez/M
+suasion/EMS
+suaveness/S
+suave/PRYT
+suavity/SM
+subaltern/SM
+subarctic/S
+subareas
+Subaru/M
+subassembly/M
+subatomic/S
+subbasement/SM
+subbed
+subbing
+subbranch/S
+subcaste/M
+subcategorizing
+subcategory/SM
+subchain
+subclassifications
+subclass/MS
+subclauses
+subcommand/S
+subcommittee/SM
+subcompact/S
+subcomponent/MS
+subcomputation/MS
+subconcept
+subconsciousness/SM
+subconscious/PSY
+subconstituent
+subcontinental
+subcontinent/MS
+subcontractor/SM
+subcontract/SMDG
+subcultural
+subculture/GMDS
+subcutaneous/Y
+subdirectory/S
+subdistrict/M
+subdivide/SRDG
+subdivision/SM
+subdued/Y
+subdue/GRSD
+subduer/M
+subexpression/MS
+subfamily/SM
+subfield/MS
+subfile/SM
+subfreezing
+subgoal/SM
+subgraph
+subgraphs
+subgroup/SGM
+subharmonic/S
+subheading/M
+subhead/MGJS
+subhuman/S
+subindex/M
+subinterval/MS
+subj
+subject/GVDMS
+subjection/SM
+subjectiveness/M
+subjective/PSY
+subjectivist/S
+subjectivity/SM
+subjoin/DSG
+subjugate/NGXSD
+subjugation/M
+subjunctive/S
+sublayer
+sublease/DSMG
+sublet/S
+subletting
+sublimate/GNSDX
+sublimation/M
+sublime/GRSDTYP
+sublimeness/M
+sublimer/M
+subliminal/Y
+sublimity/SM
+sublist/SM
+subliterary
+sublunary
+submachine
+submarginal
+submarine/MZGSRD
+submariner/M
+submerge/DSG
+submergence/SM
+submerse/XNGDS
+submersible/S
+submersion/M
+submicroscopic
+submission/SAM
+submissiveness/MS
+submissive/PY
+submit/SA
+submittable
+submittal
+submitted/A
+submitter/S
+submitting/A
+submode/S
+submodule/MS
+sub/MS
+subnational
+subnet/SM
+subnetwork/SM
+subnormal/SY
+suboptimal
+suborbital
+suborder/MS
+subordinately/I
+subordinates/I
+subordinate/YVNGXPSD
+subordination/IMS
+subordinator
+subornation/SM
+suborn/GSD
+subpage
+subparagraph/M
+subpart/MS
+subplot/MS
+subpoena/GSDM
+subpopulation/MS
+subproblem/SM
+subprocess/SM
+subprofessional/S
+subprogram/SM
+subproject
+subproof/SM
+subquestion/MS
+subrange/SM
+subregional/Y
+subregion/MS
+subrogation/M
+subroutine/SM
+subsample/MS
+subschema/MS
+subscribe/ASDG
+subscriber/SM
+subscripted/U
+subscription/MS
+subscript/SGD
+subsection/SM
+subsegment/SM
+subsentence
+subsequence/MS
+subsequent/SYP
+subservience/SM
+subservient/SY
+subset/MS
+subsidence/MS
+subside/SDG
+subsidiarity
+subsidiary/MS
+subsidization/MS
+subsidized/U
+subsidizer/M
+subsidize/ZRSDG
+subsidy/MS
+subsistence/MS
+subsistent
+subsist/SGD
+subsocietal
+subsoil/DRMSG
+subsonic
+subspace/MS
+subspecies/M
+substance/MS
+substandard
+substantially/IU
+substantialness/M
+substantial/PYS
+substantiated/U
+substantiate/VGNSDX
+substantiation/MFS
+substantiveness/M
+substantive/PSYM
+substantivity
+substation/MS
+substerilization
+substitutability
+substituted/U
+substitute/NGVBXDRS
+substitutionary
+substitution/M
+substitutive/Y
+substrata
+substrate/MS
+substratum/M
+substring/S
+substructure/SM
+subsume/SDG
+subsurface/S
+subsystem/MS
+subtable/S
+subtask/SM
+subteen/SM
+subtenancy/MS
+subtenant/SM
+subtend/DS
+subterfuge/SM
+subterranean/SY
+subtest
+subtext/SM
+subtitle/DSMG
+subtleness/M
+subtle/RPT
+subtlety/MS
+subtly/U
+subtopic/SM
+subtotal/GSDM
+subtracter/M
+subtraction/MS
+subtract/SRDZVG
+subtrahend/SM
+subtree/SM
+subtropical
+subtropic/S
+subtype/MS
+subunit/SM
+suburbanite/MS
+suburbanization/MS
+suburbanized
+suburbanizing
+suburban/S
+suburbia/SM
+suburb/MS
+subvention/MS
+subversion/SM
+subversiveness/MS
+subversive/SPY
+subverter/M
+subvert/SGDR
+subway/MDGS
+subzero
+succeeder/M
+succeed/GDRS
+successfulness/M
+successful/UY
+succession/SM
+successiveness/M
+successive/YP
+success/MSV
+successor/MS
+successorship
+succinctness/SM
+succinct/RYPT
+succored/U
+succorer/M
+succor/SGZRDM
+succotash/SM
+succubus/M
+succulence/SM
+succulency/MS
+succulent/S
+succumb/SDG
+such
+suchlike
+sucker/DMG
+suck/GZSDRB
+suckle/SDJG
+suckling/M
+Sucre/M
+sucrose/MS
+suction/SMGD
+Sudanese/M
+Sudanic/M
+Sudan/M
+suddenness/SM
+sudden/YPS
+Sudetenland/M
+sud/S
+suds/DSRG
+sudsy/TR
+sued/DG
+suede/SM
+Suellen/M
+Sue/M
+suer/M
+suet/MS
+Suetonius/M
+suety
+sue/ZGDRS
+Suez/M
+sufferance/SM
+sufferer/M
+suffering/M
+suffer/SJRDGZ
+suffice/GRSD
+sufficiency/SIM
+sufficient/IY
+suffixation/S
+suffixed/U
+suffix/GMRSD
+suffocate/XSDVGN
+suffocating/Y
+Suffolk/M
+suffragan/S
+suffrage/MS
+suffragette/MS
+suffragist/SM
+suffuse/VNGSDX
+suffusion/M
+Sufi/M
+Sufism/M
+sugarcane/S
+sugarcoat/GDS
+sugarless
+sugarplum/MS
+sugar/SJGMD
+sugary/TR
+suggest/DRZGVS
+suggester/M
+suggestibility/SM
+suggestible
+suggestion/MS
+suggestiveness/MS
+suggestive/PY
+sugillate
+Suharto/M
+suicidal/Y
+suicide/GSDM
+Sui/M
+suitability/SU
+suitableness/S
+suitable/P
+suitably/U
+suitcase/MS
+suited/U
+suite/SM
+suiting/M
+suit/MDGZBJS
+suitor/SM
+Sukarno/M
+Sukey/M
+Suki/M
+sukiyaki/SM
+Sukkoth's
+Sukkot/S
+Sula/M
+Sulawesi/M
+Suleiman/M
+sulfaquinoxaline
+sulfa/S
+sulfate/MSDG
+sulfide/S
+sulfite/M
+sulfonamide/SM
+sulfur/DMSG
+sulfuric
+sulfurousness/M
+sulfurous/YP
+sulk/GDS
+sulkily
+sulkiness/S
+sulky/RSPT
+Sulla/M
+sullenness/MS
+sullen/TYRP
+sullied/U
+Sullivan/M
+sully/GSD
+Sully/M
+sulphate/SM
+sulphide/MS
+sulphuric
+sultana/SM
+sultanate/MS
+sultan/SM
+sultrily
+sultriness/SM
+sultry/PRT
+Sulzberger/M
+sumach's
+sumac/SM
+Sumatra/M
+Sumatran/S
+sumer/F
+Sumeria/M
+Sumerian/M
+summability/M
+summable
+summand/MS
+summarily
+summarization/MS
+summarized/U
+summarize/GSRDZ
+summarizer/M
+summary/MS
+summation/FMS
+summed
+Summerdale/M
+summerhouse/MS
+summer/SGDM
+Summer/SM
+summertime/MS
+summery/TR
+summing
+summit/GMDS
+summitry/MS
+summoner/M
+summon/JSRDGZ
+summons/MSDG
+sum/MRS
+Sumner/M
+sumo/SM
+sump/SM
+sumptuousness/SM
+sumptuous/PY
+Sumter/M
+Sun
+sunbaked
+sunbathe
+sunbather/M
+sunbathing/M
+sunbaths
+sunbath/ZRSDG
+sunbeam/MS
+Sunbelt/M
+sunblock/S
+sunbonnet/MS
+sunburn/GSMD
+sunburst/MS
+suncream
+sundae/MS
+Sundanese/M
+Sundas
+Sunday/MS
+sunder/SDG
+sundial/MS
+sundowner/M
+sundown/MRDSZG
+sundris
+sundry/S
+sunfish/SM
+sunflower/MS
+sunglass/MS
+Sung/M
+sung/U
+sunk/SN
+sunlamp/S
+sunless
+sunlight/MS
+sunlit
+sun/MS
+sunned
+Sunni/MS
+sunniness/SM
+sunning
+Sunnite/SM
+Sunny/M
+sunny/RSTP
+Sunnyvale/M
+sunrise/GMS
+sunroof/S
+sunscreen/S
+sunset/MS
+sunsetting
+sunshade/MS
+Sunshine/M
+sunshine/MS
+sunshiny
+sunspot/SM
+sunstroke/MS
+suntanned
+suntanning
+suntan/SM
+sunup/MS
+superabundance/MS
+superabundant
+superannuate/GNXSD
+superannuation/M
+superbness/M
+superb/YRPT
+supercargoes
+supercargo/M
+supercharger/M
+supercharge/SRDZG
+superciliousness/SM
+supercilious/PY
+supercity/S
+superclass/M
+supercomputer/MS
+supercomputing
+superconcept
+superconducting
+superconductivity/SM
+superconductor/SM
+supercooled
+supercooling
+supercritical
+superdense
+super/DG
+superego/SM
+supererogation/MS
+supererogatory
+superficiality/S
+superficial/SPY
+superfine
+superfix/M
+superfluity/MS
+superfluousness/S
+superfluous/YP
+superheat/D
+superheroes
+superhero/SM
+superhighway/MS
+superhumanness/M
+superhuman/YP
+superimpose/SDG
+superimposition/MS
+superintendence/S
+superintendency/SM
+superintendent/SM
+superintend/GSD
+superiority/MS
+Superior/M
+superior/SMY
+superlativeness/M
+superlative/PYS
+superlunary
+supermachine
+superman/M
+Superman/M
+supermarket/SM
+supermen
+supermodel
+supermom/S
+supernal
+supernatant
+supernaturalism/M
+supernaturalness/M
+supernatural/SPY
+supernormal/Y
+supernovae
+supernova/MS
+supernumerary/S
+superordinate
+superpose/BSDG
+superposition/MS
+superpower/MS
+superpredicate
+supersaturate/XNGDS
+supersaturation/M
+superscribe/GSD
+superscript/DGS
+superscription/SM
+superseder/M
+supersede/SRDG
+supersensitiveness/M
+supersensitive/P
+superset/MS
+supersonically
+supersonic/S
+supersonics/M
+superstar/SM
+superstition/SM
+superstitious/YP
+superstore/S
+superstructural
+superstructure/SM
+supertanker/SM
+supertitle/MSDG
+superuser/MS
+supervene/GSD
+supervention/S
+supervised/U
+supervise/SDGNX
+supervision/M
+supervisor/SM
+supervisory
+superwoman/M
+superwomen
+supineness/M
+supine/PSY
+supper/DMG
+supplanter/M
+supplant/SGRD
+supplemental/S
+supplementary/S
+supplementation/S
+supplementer/M
+supplement/SMDRG
+suppleness/SM
+supple/SPLY
+suppliant/S
+supplicant/MS
+supplicate/NGXSD
+supplication/M
+supplier/AM
+suppl/RDGT
+supply/MAZGSRD
+supportability/M
+supportable/UI
+supported/U
+supporter/M
+supporting/Y
+supportive/Y
+support/ZGVSBDR
+supposed/Y
+suppose/SRDBJG
+supposition/MS
+suppository/MS
+suppressant/S
+suppressed/U
+suppressible/I
+suppression/SM
+suppressive/P
+suppressor/S
+suppress/VGSD
+suppurate/NGXSD
+suppuration/M
+supp/YDRGZ
+supra
+supranational
+supranationalism/M
+suprasegmental
+supremacist/SM
+supremacy/SM
+supremal
+supremeness/M
+supreme/PSRTY
+supremo/M
+sup/RSZ
+supt
+Supt/M
+Surabaya/M
+Surat/M
+surcease/DSMG
+surcharge/MGSD
+surcingle/MGSD
+surd/M
+sured/I
+surefire
+surefooted
+surely
+sureness/MS
+sureness's/U
+sure/PU
+surer/I
+surest
+surety/SM
+surfaced/UA
+surface/GSRDPZM
+surfacer/AMS
+surfaces/A
+surfacing/A
+surfactant/SM
+surfboard/MDSG
+surfeit/SDRMG
+surfer/M
+surfing/M
+surf/SJDRGMZ
+surged/A
+surge/GYMDS
+surgeon/MS
+surgery/MS
+surges/A
+surgical/Y
+Suriname
+Surinamese
+Surinam's
+surliness/SM
+surly/TPR
+surmiser/M
+surmise/SRDG
+surmountable/IU
+surmount/DBSG
+surname/GSDM
+surpassed/U
+surpass/GDS
+surpassing/Y
+surplice/SM
+surplus/MS
+surplussed
+surplussing
+surprised/U
+surprise/MGDRSJ
+surpriser/M
+surprising/YU
+surrealism/MS
+surrealistic
+surrealistically
+surrealist/S
+surreality
+surreal/S
+surrender/DRSG
+surrenderer/M
+surreptitiousness/S
+surreptitious/PY
+surrey/SM
+surrogacy/S
+surrogate/SDMNG
+surrogation/M
+surrounding/M
+surround/JGSD
+surtax/SDGM
+surveillance/SM
+surveillant
+surveyed/A
+surveying/M
+survey/JDSG
+surveyor/MS
+surveys/A
+survivability/M
+survivable/U
+survivalist/S
+survival/MS
+survive/SRDBG
+survivor/MS
+survivorship/M
+Surya/M
+Sus
+Susana/M
+Susanetta/M
+Susan/M
+Susannah/M
+Susanna/M
+Susanne/M
+Susann/M
+susceptibilities
+susceptibility/IM
+susceptible/I
+Susette/M
+sushi/SM
+Susie/M
+Susi/M
+suspected/U
+suspecter/M
+suspect/GSDR
+suspecting/U
+suspend/DRZGS
+suspended/UA
+suspender/M
+suspenseful
+suspense/MXNVS
+suspension/AM
+suspensive/Y
+suspensor/M
+suspicion/GSMD
+suspiciousness/M
+suspicious/YP
+Susquehanna/M
+Sussex/M
+sustainability
+sustainable/U
+sustain/DRGLBS
+sustainer/M
+sustainment/M
+sustenance/MS
+Susy/M
+Sutherland/M
+Sutherlan/M
+sutler/MS
+Sutton/M
+suture/GMSD
+SUV
+Suva/M
+Suwanee/M
+Suzanna/M
+Suzanne/M
+Suzann/M
+suzerain/SM
+suzerainty/MS
+Suzette/M
+Suzhou/M
+Suzie/M
+Suzi/M
+Suzuki/M
+Suzy/M
+Svalbard/M
+svelte/RPTY
+Svend/M
+Svengali
+Sven/M
+Sverdlovsk/M
+Svetlana/M
+SW
+swabbed
+swabbing
+swabby/S
+Swabian/SM
+swab/MS
+swaddle/SDG
+swagged
+swagger/GSDR
+swagging
+swag/GMS
+Swahili/MS
+swain/SM
+SWAK
+swallower/M
+swallow/GDRS
+swallowtail/SM
+swam
+swami/SM
+swamper/M
+swampland/MS
+swamp/SRDMG
+swampy/RPT
+Swanee/M
+swankily
+swankiness/MS
+swank/RDSGT
+swanky/PTRS
+swanlike
+swan/MS
+swanned
+swanning
+Swansea/M
+Swanson/M
+swappable/U
+swapped
+swapper/SM
+swapping
+swap/S
+sward/MSGD
+swarmer/M
+swarm/GSRDM
+swarthiness/M
+Swarthmore/M
+swarthy/RTP
+swart/P
+Swartz/M
+swashbuckler/SM
+swashbuckling/S
+swash/GSRD
+swastika/SM
+SWAT
+swatch/MS
+swathe
+swather/M
+swaths
+swath/SRDMGJ
+swat/S
+swatted
+swatter/MDSG
+swatting
+swayback/SD
+sway/DRGS
+swayer/M
+Swaziland/M
+Swazi/SM
+swearer/M
+swear/SGZR
+swearword/SM
+sweatband/MS
+sweater/M
+sweatily
+sweatiness/M
+sweatpants
+sweat/SGZRM
+sweatshirt/S
+sweatshop/MS
+sweaty/TRP
+Swedenborg/M
+Sweden/M
+swede/SM
+Swede/SM
+Swedish
+Swed/MN
+Sweeney/SM
+sweeper/M
+sweepingness/M
+sweeping/PY
+sweep/SBRJGZ
+sweeps/M
+sweepstakes
+sweepstake's
+sweetbread/SM
+sweetbrier/SM
+sweetcorn
+sweetened/U
+sweetener/M
+sweetening/M
+sweeten/ZDRGJ
+sweetheart/MS
+sweetie/MS
+sweeting/M
+sweetish/Y
+Sweet/M
+sweetmeat/MS
+sweetness/MS
+sweetshop
+sweet/TXSYRNPG
+swellhead/DS
+swelling/M
+swell/SJRDGT
+swelter/DJGS
+sweltering/Y
+Swen/M
+Swenson/M
+swept
+sweptback
+swerve/GSD
+swerving/U
+swifter/M
+swift/GTYRDPS
+Swift/M
+swiftness/MS
+swigged
+swigging
+swig/SM
+swill/SDG
+swimmer/MS
+swimming/MYS
+swim/S
+swimsuit/MS
+Swinburne/M
+swindle/GZRSD
+swindler/M
+swineherd/MS
+swine/SM
+swingeing
+swinger/M
+swinging/Y
+swing/SGRZJB
+swingy/R
+swinishness/M
+swinish/PY
+Swink/M
+swipe/DSG
+swirling/Y
+swirl/SGRD
+swirly/TR
+swish/GSRD
+swishy/R
+swiss
+Swiss/S
+switchback/GDMS
+switchblade/SM
+switchboard/MS
+switcher/M
+switch/GBZMRSDJ
+switchgear
+switchman/M
+switchmen/M
+switchover/M
+Switzerland/M
+Switzer/M
+Switz/MR
+swivel/GMDS
+swizzle/RDGM
+swob's
+swollen
+swoon/GSRD
+swooning/Y
+swoop/RDSG
+swoosh/GSD
+swop's
+sword/DMSG
+swordfish/SM
+swordplayer/M
+swordplay/RMS
+swordsman/M
+swordsmanship/SM
+swordsmen
+swordtail/M
+swore
+sworn
+swot/S
+swum
+swung
+s/XJBG
+sybarite/MS
+sybaritic
+Sybila/M
+Sybilla/M
+Sybille/M
+Sybil/M
+Sybyl/M
+sycamore/SM
+sycophancy/S
+sycophantic
+sycophantically
+sycophant/SYM
+Sydelle/M
+Sydel/M
+Syd/M
+Sydney/M
+Sykes/M
+Sylas/M
+syllabicate/GNDSX
+syllabication/M
+syllabicity
+syllabic/S
+syllabification/M
+syllabify/GSDXN
+syllabi's
+syllable/SDMG
+syllabub/M
+syllabus/MS
+syllabusss
+syllogism/MS
+syllogistic
+Sylow/M
+sylphic
+sylphlike
+sylph/M
+sylphs
+Sylvania/M
+Sylvan/M
+sylvan/S
+Sylvester/M
+Sylvia/M
+Sylvie/M
+Syman/M
+symbiont/M
+symbioses
+symbiosis/M
+symbiotic
+symbol/GMDS
+symbolical/Y
+symbolics/M
+symbolic/SM
+symbolism/MS
+symbolist/MS
+symbolization/MAS
+symbolized/U
+symbolize/GZRSD
+symbolizes/A
+Symington/M
+symmetric
+symmetrically/U
+symmetricalness/M
+symmetrical/PY
+symmetrization/M
+symmetrizing
+symmetry/MS
+Symon/M
+sympathetically/U
+sympathetic/S
+sympathized/U
+sympathizer/M
+sympathize/SRDJGZ
+sympathizing/MYUS
+sympathy/MS
+symphonic
+symphonists
+symphony/MS
+symposium/MS
+symptomatic
+symptomatically
+symptomatology/M
+symptom/MS
+syn
+synagogal
+synagogue/SM
+synapse/SDGM
+synaptic
+synchronism/M
+synchronization's
+synchronization/SA
+synchronize/AGCDS
+synchronized/U
+synchronizer/MS
+synchronousness/M
+synchronous/YP
+synchrony
+synchrotron/M
+syncopate/VNGXSD
+syncopation/M
+syncope/MS
+sync/SGD
+syndicalist
+syndicate/XSDGNM
+syndic/SM
+syndrome/SM
+synergism/SM
+synergistic
+synergy/MS
+synfuel/S
+Synge/M
+synod/SM
+synonymic
+synonymous/Y
+synonym/SM
+synonymy/MS
+synopses
+synopsis/M
+synopsized
+synopsizes
+synopsizing
+synoptic/S
+syntactical/Y
+syntactics/M
+syntactic/SY
+syntax/MS
+syntheses
+synthesis/M
+synthesized/U
+synthesize/GZSRD
+synthesizer/M
+synthesizes/A
+synthetically
+synthetic/S
+syphilis/MS
+syphilitic/S
+syphilized
+syphilizing
+Syracuse/M
+Syriac/M
+Syria/M
+Syrian/SM
+syringe/GMSD
+syrup/DMSG
+syrupy
+sys
+systematical/Y
+systematics/M
+systematic/SP
+systematization/SM
+systematized/U
+systematizer/M
+systematize/ZDRSG
+systematizing/U
+systemically
+systemic/S
+systemization/SM
+system/MS
+systole/MS
+systolic
+Szilard/M
+Szymborska/M
+TA
+Tabasco/MS
+Tabatha/M
+Tabbatha/M
+tabbed
+Tabbie/M
+Tabbi/M
+tabbing
+Tabbitha/M
+Tabb/M
+tabbouleh
+tabboulehs
+tabby/GSD
+Tabby/M
+Taber/M
+Tabernacle/S
+tabernacle/SDGM
+Tabina/M
+Tabitha/M
+tabla/MS
+tableau/M
+tableaux
+tablecloth/M
+tablecloths
+table/GMSD
+tableland/SM
+tablespoonful/MS
+tablespoon/SM
+tablet/MDGS
+tabletop/MS
+tableware/SM
+tabling/M
+tabloid/MS
+Tab/MR
+taboo/GSMD
+Tabor/M
+tabor/MDGS
+Tabriz/SM
+tab/SM
+tabula
+tabular/Y
+tabulate/XNGDS
+tabulation/M
+tabulator/MS
+tachometer/SM
+tachometry
+tachycardia/MS
+tachyon/SM
+tacitness/MS
+taciturnity/MS
+taciturn/Y
+Tacitus/M
+tacit/YP
+tacker/M
+tack/GZRDMS
+tackiness/MS
+tackler/M
+tackle/RSDMZG
+tackling/M
+tacky/RSTP
+Tacoma/M
+taco/MS
+tact/FSM
+tactfulness/S
+tactful/YP
+tactical/Y
+tactician/MS
+tactic/SM
+tactile/Y
+tactility/S
+tactlessness/SM
+tactless/PY
+tactual/Y
+Taddeo/M
+Taddeusz/M
+Tadd/M
+Tadeas/M
+Tadeo/M
+Tades
+Tadio/M
+Tad/M
+tadpole/MS
+tad/SM
+Tadzhikistan's
+Tadzhikstan/M
+Taegu/M
+Taejon/M
+taffeta/MS
+taffrail/SM
+Taffy/M
+taffy/SM
+Taft/M
+Tagalog/SM
+tagged/U
+tagger/S
+tagging
+Tagore/M
+tag/SM
+Tagus/M
+Tahitian/S
+Tahiti/M
+Tahoe/M
+Taichung/M
+taiga/MS
+tailback/MS
+tail/CMRDGAS
+tailcoat/S
+tailer/AM
+tailgate/MGRSD
+tailgater/M
+tailing/MS
+taillessness/M
+tailless/P
+taillight/MS
+tailor/DMJSGB
+Tailor/M
+tailpipe/SM
+tailspin/MS
+tailwind/SM
+Tainan/M
+Taine/M
+taint/DGS
+tainted/U
+Taipei/M
+Taite/M
+Tait/M
+Taiwanese
+Taiwan/M
+Taiyuan/M
+Tajikistan
+takeaway/S
+taken/A
+takeoff/SM
+takeout/S
+takeover/SM
+taker/M
+take/RSHZGJ
+takes/IA
+taking/IA
+Taklamakan/M
+Talbert/M
+Talbot/M
+talcked
+talcking
+talc/SM
+talcum/S
+talebearer/SM
+talented/M
+talentless
+talent/SMD
+taler/M
+tale/RSMN
+tali
+Talia/M
+Taliesin/M
+talion/M
+talismanic
+talisman/SM
+talkativeness/MS
+talkative/YP
+talker/M
+talk/GZSRD
+talkie/M
+talky/RST
+Talladega/M
+Tallahassee/M
+Tallahatchie/M
+Tallahoosa/M
+tallboy/MS
+Tallchief/M
+Talley/M
+Talleyrand/M
+Tallia/M
+Tallie/M
+Tallinn/M
+tallish
+tallness/MS
+Tallou/M
+tallow/DMSG
+tallowy
+tall/TPR
+Tallulah/M
+tally/GRSDZ
+tallyho/DMSG
+Tally/M
+Talmudic
+Talmudist/MS
+Talmud/MS
+talon/SMD
+talus/MS
+Talyah/M
+Talya/M
+Ta/M
+tamable/M
+tamale/SM
+tamarack/SM
+Tamarah/M
+Tamara/M
+tamarind/MS
+Tamar/M
+Tamarra/M
+Tamas
+tambourine/MS
+tamed/U
+Tameka/M
+tameness/S
+Tamera/M
+Tamerlane/M
+tame/SYP
+Tamika/M
+Tamiko/M
+Tamil/MS
+Tami/M
+Tam/M
+Tamma/M
+Tammany/M
+Tammara/M
+tam/MDRSTZGB
+Tammie/M
+Tammi/M
+Tammy/M
+Tampa/M
+Tampax/M
+tampered/U
+tamperer/M
+tamper/ZGRD
+tampon/DMSG
+tamp/SGZRD
+Tamqrah/M
+Tamra/M
+tanager/MS
+Tanaka/M
+Tana/M
+Tananarive/M
+tanbark/SM
+Tancred/M
+tandem/SM
+Tandie/M
+Tandi/M
+tandoori/S
+Tandy/M
+Taney/M
+T'ang
+Tanganyika/M
+tangelo/SM
+tangency/M
+tangential/Y
+tangent/SM
+tangerine/MS
+tang/GSYDM
+tangibility/MIS
+tangible/IPS
+tangibleness's/I
+tangibleness/SM
+tangibly/I
+Tangier/M
+tangle's
+tangle/UDSG
+tango/MDSG
+Tangshan/M
+tangy/RST
+Tanhya/M
+Tania/M
+Tani/M
+Tanisha/M
+Tanitansy/M
+tankard/MS
+tanker/M
+tankful/MS
+tank/GZSRDM
+Tan/M
+tan/MS
+tanned/U
+Tannenbaum/M
+Tanner/M
+tanner/SM
+tannery/MS
+tannest
+Tanney/M
+Tannhuser/M
+Tannie/M
+tanning/SM
+tannin/SM
+Tann/RM
+Tanny/M
+Tansy/M
+tansy/SM
+tantalization/SM
+tantalized/U
+tantalize/GZSRD
+tantalizingly/S
+tantalizingness/S
+tantalizing/YP
+tantalum/MS
+Tantalus/M
+tantamount
+tantra/S
+tantrum/SM
+Tanya/M
+Tanzania/M
+Tanzanian/S
+taoism
+Taoism/MS
+Taoist/MS
+taoist/S
+Tao/M
+tao/S
+Tapdance/M
+taped/U
+tapeline/S
+taperer/M
+taper/GRD
+tape/SM
+tapestry/GMSD
+tapeworm/MS
+tapioca/MS
+tapir/MS
+tap/MSDRJZG
+tapped/U
+tapper/MS
+tappet/MS
+tapping/M
+taproom/MS
+taproot/SM
+taps/M
+Tarah/M
+Tara/M
+tarantella/MS
+tarantula/MS
+Tarawa/M
+Tarazed/M
+Tarbell/M
+tardily
+tardiness/S
+tardy/TPRS
+tare/MS
+target/GSMD
+tar/GSMD
+tariff/DMSG
+Tarim/M
+Tarkington/M
+tarmacked
+tarmacking
+tarmac/S
+tarnished/U
+tarnish/GDS
+tarn/MS
+taro/MS
+tarot/MS
+tarpapered
+tarpaulin/MS
+tarp/MS
+tarpon/MS
+tarragon/SM
+Tarrah/M
+Tarra/M
+Tarrance/M
+tarred/M
+tarring/M
+tarry/TGRSD
+Tarrytown/M
+tarsal/S
+tarsi
+tarsus/M
+tartan/MS
+tartaric
+Tartar's
+tartar/SM
+Tartary/M
+tartness/MS
+tart/PMYRDGTS
+Tartuffe/M
+Taryn/M
+Tarzan/M
+Tasha/M
+Tashkent/M
+Tasia/M
+task/GSDM
+taskmaster/SM
+taskmistress/MS
+Tasmania/M
+Tasmanian/S
+tassellings
+tassel/MDGS
+Tass/M
+tasted/EU
+tastefulness/SME
+tasteful/PEY
+taste/GZMJSRD
+tastelessness/SM
+tasteless/YP
+taster/M
+taste's/E
+tastes/E
+tastily
+tastiness/MS
+tasting/E
+tasty/RTP
+tatami/MS
+Tatar/SM
+Tate/M
+tater/M
+Tatiana/M
+Tatiania/M
+tat/SRZ
+tatted
+tatterdemalion/SM
+tattered/M
+tatter/GDS
+tatting/SM
+tattler/M
+tattle/RSDZG
+tattletale/SM
+tattooer/M
+tattooist/MS
+tattoo/ZRDMGS
+tatty/R
+Tatum/M
+taught/AU
+taunter/M
+taunting/Y
+taunt/ZGRDS
+taupe/SM
+Taurus/SM
+tau/SM
+tauten/GD
+tautness/S
+tautological/Y
+tautologous
+tautology/SM
+taut/PGTXYRDNS
+taverner/M
+tavern/RMS
+tawdrily
+tawdriness/SM
+tawdry/SRTP
+Tawney/M
+Tawnya/M
+tawny/RSMPT
+Tawsha/M
+taxable/S
+taxably
+taxation/MS
+taxed/U
+taxicab/MS
+taxidermist/SM
+taxidermy/MS
+taxi/MDGS
+taximeter/SM
+taxing/Y
+taxiway/MS
+taxonomic
+taxonomically
+taxonomist/SM
+taxonomy/SM
+taxpayer/MS
+taxpaying/M
+tax/ZGJMDRSB
+Taylor/SM
+Tb
+TB
+TBA
+Tbilisi/M
+tbs
+tbsp
+Tchaikovsky/M
+Tc/M
+TCP
+TD
+TDD
+Te
+teabag/S
+teacake/MS
+teacart/M
+teachable/P
+teach/AGS
+teacher/MS
+teaching/SM
+teacloth
+teacupful/MS
+teacup/MS
+Teador/M
+teahouse/SM
+teakettle/SM
+teak/SM
+teakwood/M
+tealeaves
+teal/MS
+tea/MDGS
+teammate/MS
+team/MRDGS
+teamster/MS
+teamwork/SM
+teapot/MS
+tearaway
+teardrop/MS
+tearer/M
+tearfulness/M
+tearful/YP
+teargas/S
+teargassed
+teargassing
+tearjerker/S
+tearoom/MS
+tear/RDMSG
+teary/RT
+Teasdale/M
+tease/KS
+teasel/DGSM
+teaser/M
+teashop/SM
+teasing/Y
+teaspoonful/MS
+teaspoon/MS
+teas/SRDGZ
+teatime/MS
+teat/MDS
+tech/D
+technetium/SM
+technicality/MS
+technicalness/M
+technical/YSP
+technician/MS
+Technicolor/MS
+Technion/M
+technique/SM
+technocracy/MS
+technocratic
+technocrat/S
+technological/Y
+technologist/MS
+technology/MS
+technophobia
+technophobic
+techs
+tectonically
+tectonic/S
+tectonics/M
+Tecumseh/M
+Tedda/M
+Teddie/M
+Teddi/M
+Tedd/M
+Teddy/M
+teddy/SM
+Tedie/M
+Tedi/M
+tediousness/SM
+tedious/YP
+tedium/MS
+Ted/M
+Tedman/M
+Tedmund/M
+Tedra/M
+tee/DRSMH
+teeing
+teem/GSD
+teemingness/M
+teeming/PY
+teenager/M
+teenage/RZ
+Teena/M
+teen/SR
+teenybopper/SM
+teeny/RT
+teepee's
+teeshirt/S
+teeter/GDS
+teethe
+teether/M
+teething/M
+teethmarks
+teeth/RSDJMG
+teetotaler/M
+teetotalism/MS
+teetotal/SRDGZ
+TEFL
+Teflon/MS
+Tegucigalpa/M
+Teheran's
+Tehran
+TEirtza/M
+tektite/SM
+Tektronix/M
+telecast/SRGZ
+telecommunicate/NX
+telecommunication/M
+telecommute/SRDZGJ
+telecoms
+teleconference/GMJSD
+Teledyne/M
+Telefunken/M
+telegenic
+telegrammed
+telegramming
+telegram/MS
+telegraphic
+telegraphically
+telegraphist/MS
+telegraph/MRDGZ
+telegraphs
+telegraphy/MS
+telekineses
+telekinesis/M
+telekinetic
+Telemachus/M
+Telemann/M
+telemarketer/S
+telemarketing/S
+telemeter/DMSG
+telemetric
+telemetry/MS
+teleological/Y
+teleology/M
+telepathic
+telepathically
+telepathy/SM
+telephone/SRDGMZ
+telephonic
+telephonist/SM
+telephony/MS
+telephotography/MS
+telephoto/S
+teleprinter/MS
+teleprocessing/S
+teleprompter
+TelePrompter/M
+TelePrompTer/S
+telescope/GSDM
+telescopic
+telescopically
+teletext/S
+telethon/MS
+teletype/SM
+Teletype/SM
+teletypewriter/SM
+televangelism/S
+televangelist/S
+televise/SDXNG
+television/M
+televisor/MS
+televisual
+telex/GSDM
+Telex/M
+tell/AGS
+Teller/M
+teller/SDMG
+telling/YS
+Tell/MR
+telltale/MS
+tellurium/SM
+telly/SM
+Telnet/M
+TELNET/M
+telnet/S
+telomeric
+tel/SY
+Telugu/M
+temblor/SM
+temerity/MS
+Tempe/M
+temperamental/Y
+temperament/SM
+temperance/IMS
+tempera/SLM
+temperately/I
+temperateness's/I
+temperateness/SM
+temperate/SDGPY
+temperature/MS
+tempered/UE
+temper/GRDM
+tempering/E
+temper's/E
+tempers/E
+tempest/DMSG
+tempestuousness/SM
+tempestuous/PY
+template/FS
+template's
+Temple/M
+Templeman/M
+temple/SDM
+Templeton/M
+Temp/M
+tempoes
+tempo/MS
+temporal/YS
+temporarily
+temporarinesses
+temporariness/FM
+temporary/SFP
+temporize/GJZRSD
+temporizer/M
+temporizings/U
+temporizing/YM
+temp/SGZTMRD
+temptation/MS
+tempted
+tempter/S
+tempt/FS
+tempting/YS
+temptress/MS
+tempura/SM
+tenabilities
+tenability/UM
+tenableness/M
+tenable/P
+tenably
+tenaciousness/S
+tenacious/YP
+tenacity/S
+tenancy/MS
+tenanted/U
+tenant/MDSG
+tenantry/MS
+tench/M
+tended/UE
+tendency/MS
+tendentiousness/SM
+tendentious/PY
+tendered
+tenderer
+tenderest
+tenderfoot/MS
+tender/FS
+tenderheartedness/MS
+tenderhearted/YP
+tendering
+tenderizer/M
+tenderize/SRDGZ
+tenderloin/SM
+tenderly
+tenderness/SM
+tending/E
+tendinitis/S
+tend/ISFRDG
+tendon/MS
+tendril/SM
+tends/E
+tenebrous
+tenement/MS
+tenet/SM
+Tenex/M
+TENEX/M
+tenfold/S
+ten/MHB
+Tenneco/M
+tenner
+Tennessean/S
+Tennessee/M
+Tenney/M
+tennis/SM
+Tenn/M
+Tennyson/M
+Tenochtitlan/M
+tenon/GSMD
+tenor/MS
+tenpin/SM
+tense/IPYTNVR
+tenseness's/I
+tenseness/SM
+tensile
+tensional/I
+tension/GMRDS
+tensionless
+tensions/E
+tension's/I
+tensity/IMS
+tensorial
+tensor/MS
+tenspot
+tens/SRDVGT
+tentacle/MSD
+tentativeness/S
+tentative/SPY
+tented/UF
+tenterhook/MS
+tenter/M
+tent/FSIM
+tenths
+tenth/SY
+tenting/F
+tenuity/S
+tenuousness/SM
+tenuous/YP
+tenure/SDM
+Teodoor/M
+Teodora/M
+Teodorico/M
+Teodor/M
+Teodoro/M
+tepee/MS
+tepidity/S
+tepidness/S
+tepid/YP
+tequila/SM
+Tera/M
+teratogenic
+teratology/MS
+terbium/SM
+tercel/M
+tercentenary/S
+tercentennial/S
+Terence/M
+Terencio/M
+Teresa/M
+Terese/M
+Tereshkova/M
+Teresina/M
+Teresita/M
+Teressa/M
+Teriann/M
+Teri/M
+Terkel/M
+termagant/SM
+termcap
+termer/M
+terminable/CPI
+terminableness/IMC
+terminal/SYM
+terminate/CXNV
+terminated/U
+terminates
+terminating
+termination/MC
+terminative/YC
+terminator/SM
+termini
+terminological/Y
+terminology/MS
+terminus/M
+termite/SM
+term/MYRDGS
+ternary/S
+tern/GIDS
+tern's
+terpsichorean
+Terpsichore/M
+terrace/MGSD
+terracing/M
+terracotta
+terrain/MS
+Terra/M
+terramycin
+Terrance/M
+Terran/M
+terrapin/MS
+terrarium/MS
+terrazzo/SM
+Terrell/M
+Terrel/M
+Terre/M
+Terrence/M
+terrestrial/YMS
+terribleness/SM
+terrible/P
+terribly
+Terrie/M
+terrier/M
+terrifically
+terrific/Y
+terrify/GDS
+terrifying/Y
+Terrijo/M
+Terrill/M
+Terri/M
+terrine/M
+territoriality/M
+Territorial/SM
+territorial/SY
+Territory's
+territory/SM
+terrorism/MS
+terroristic
+terrorist/MS
+terrorized/U
+terrorizer/M
+terrorize/RSDZG
+terror/MS
+terr/S
+terrycloth
+Terrye/M
+Terry/M
+terry/ZMRS
+terseness/SM
+terse/RTYP
+Tersina/M
+tertian
+Tertiary
+tertiary/S
+Terza/M
+TESL
+Tesla/M
+TESOL
+Tessa/M
+tessellate/XDSNG
+tessellation/M
+tesseral
+Tessie/M
+Tessi/M
+Tess/M
+Tessy/M
+testability/M
+testable/U
+testamentary
+testament/SM
+testate/IS
+testator/MS
+testatrices
+testatrix
+testbed/S
+testcard
+tested/AKU
+tester/MFCKS
+testes/M
+testicle/SM
+testicular
+testifier/M
+testify/GZDRS
+testily
+testimonial/SM
+testimony/SM
+testiness/S
+testing/S
+testis/M
+testosterone/SM
+test/RDBFZGSC
+tests/AK
+test's/AKF
+testy/RTP
+tetanus/MS
+tetchy/TR
+tether/DMSG
+tethered/U
+Tethys/M
+Tetons
+tetrachloride/M
+tetracycline/SM
+tetrafluoride
+tetragonal/Y
+tetrahalides
+tetrahedral/Y
+tetrahedron/SM
+tetrameron
+tetrameter/SM
+tetra/MS
+tetrasodium
+tetravalent
+Teutonic
+Teuton/SM
+Texaco/M
+Texan/S
+Texas/MS
+Tex/M
+TeX/M
+textbook/SM
+text/FSM
+textile/SM
+Textron/M
+textual/FY
+textural/Y
+textured/U
+texture/MGSD
+T/G
+Thacher/M
+Thackeray/M
+Thaddeus/M
+Thaddus/M
+Thadeus/M
+Thad/M
+Thailand/M
+Thaine/M
+Thain/M
+Thai/S
+thalami
+thalamus/M
+Thales/M
+Thalia/M
+thalidomide/MS
+thallium/SM
+thallophyte/M
+Thames
+than
+Thane/M
+thane/SM
+Thanh/M
+thanker/M
+thankfuller
+thankfullest
+thankfulness/SM
+thankful/YP
+thanklessness/SM
+thankless/PY
+thanksgiving/MS
+Thanksgiving/S
+thank/SRDG
+Thant/M
+Thar/M
+Thatcher/M
+thatching/M
+thatch/JMDRSZG
+Thatch/MR
+that'd
+that'll
+that/MS
+thaumaturge/M
+thaw/DGS
+Thaxter/M
+Thayer/M
+Thayne/M
+THC
+the
+Theadora/M
+Thea/M
+theatergoer/MS
+theatergoing/MS
+theater/SM
+theatricality/SM
+theatrical/YS
+theatric/S
+theatrics/M
+Thebault/M
+Thebes
+Theda/M
+Thedrick/M
+Thedric/M
+thee/DS
+theeing
+theft/MS
+Theiler/M
+their/MS
+theism/SM
+theistic
+theist/SM
+Thekla/M
+Thelma/M
+themas
+thematically
+thematics
+thematic/U
+theme/MS
+them/GD
+Themistocles/M
+themselves
+thence
+thenceforth
+thenceforward/S
+Theobald/M
+theocracy/SM
+theocratic
+Theocritus/M
+theodolite/MS
+Theodora/M
+Theodore/M
+Theodoric/M
+Theodor/M
+Theodosia/M
+Theodosian
+Theodosius/M
+theologian/SM
+theological/Y
+theologists
+theology/MS
+Theo/M
+theorem/MS
+theoretical/Y
+theoretician/MS
+theoretic/S
+theoretics/M
+theorist/SM
+theorization/SM
+theorize/ZGDRS
+theory/MS
+theosophic
+theosophical
+theosophist/MS
+Theosophy
+theosophy/SM
+therapeutically
+therapeutic/S
+therapeutics/M
+therapist/MS
+therapy/MS
+Theravada/M
+thereabout/S
+thereafter
+thereat
+thereby
+there'd
+therefor
+therefore
+therefrom
+therein
+there'll
+there/MS
+thereof
+thereon
+Theresa/M
+Therese/M
+Theresina/M
+Theresita/M
+Theressa/M
+thereto
+theretofore
+thereunder
+thereunto
+thereupon
+therewith
+Therine/M
+thermal/YS
+thermionic/S
+thermionics/M
+thermistor/MS
+therm/MS
+thermocouple/MS
+thermodynamical/Y
+thermodynamic/S
+thermodynamics/M
+thermoelastic
+thermoelectric
+thermoformed
+thermoforming
+thermogravimetric
+thermoluminescence/M
+thermometer/MS
+thermometric
+thermometry/M
+thermonuclear
+thermopile/M
+thermoplastic/S
+thermopower
+thermo/S
+thermosetting
+thermos/S
+Thermos/SM
+thermostable
+thermostatically
+thermostatic/S
+thermostatics/M
+thermostat/SM
+thermostatted
+thermostatting
+Theron/M
+thesauri
+thesaurus/MS
+these/S
+Theseus/M
+thesis/M
+thespian/S
+Thespian/S
+Thespis/M
+Thessalonian
+Thessalonki/M
+Thessaly/M
+theta/MS
+thew/SM
+they
+they'd
+they'll
+they're
+they've
+th/GNJX
+Thia/M
+thiamine/MS
+Thibaud/M
+Thibaut/M
+thickener/M
+thickening/M
+thicken/RDJZG
+thicket/SMD
+thickheaded/M
+thickish
+thickness/MS
+thickset/S
+thick/TXPSRNY
+thief/M
+Thiensville/M
+Thieu/M
+thievery/MS
+thieve/SDJG
+thievishness/M
+thievish/P
+thighbone/SM
+thigh/DM
+thighs
+thimble/DSMG
+thimbleful/MS
+Thimbu/M
+Thimphu
+thine
+thingamabob/MS
+thingamajig/SM
+thing/MP
+thinkableness/M
+thinkable/U
+thinkably/U
+think/AGRS
+thinker/MS
+thinkingly/U
+thinking/SMYP
+thinned
+thinner/MS
+thinness/MS
+thinnest
+thinning
+thinnish
+thin/STPYR
+thiocyanate/M
+thiouracil/M
+third/DYGS
+thirster/M
+thirst/GSMDR
+thirstily
+thirstiness/S
+thirsty/TPR
+thirteen/MHS
+thirteenths
+thirtieths
+thirty/HMS
+this
+this'll
+thistledown/MS
+thistle/SM
+thither
+Th/M
+tho
+thole/GMSD
+Thomasa/M
+Thomasina/M
+Thomasine/M
+Thomasin/M
+Thoma/SM
+Thomism/M
+Thomistic
+Thom/M
+Thompson/M
+Thomson/M
+thong/SMD
+thoracic
+thorax/MS
+Thorazine
+Thoreau/M
+thoriate/D
+Thorin/M
+thorium/MS
+Thor/M
+Thornburg/M
+Thorndike/M
+Thornie/M
+thorniness/S
+Thorn/M
+thorn/SMDG
+Thornton/M
+Thorny/M
+thorny/PTR
+thoroughbred/S
+thoroughfare/MS
+thoroughgoing
+thoroughness/SM
+thorough/PTYR
+Thorpe/M
+Thorstein/M
+Thorsten/M
+Thorvald/M
+those
+Thoth/M
+thou/DSG
+though
+thoughtfully
+thoughtfulness/S
+thoughtful/U
+thoughtlessness/MS
+thoughtless/YP
+thought/MS
+thousandfold
+thousand/SHM
+thousandths
+Thrace/M
+Thracian/M
+thralldom/S
+thrall/GSMD
+thrash/DSRZGJ
+thrasher/M
+thrashing/M
+threadbare/P
+threader/M
+threading/A
+threadlike
+thread/MZDRGS
+thready/RT
+threatener/M
+threaten/GJRD
+threatening/Y
+threat/MDNSXG
+threefold
+three/MS
+threepence/M
+threepenny
+threescore/S
+threesome/SM
+threnody/SM
+thresh/DSRZG
+thresher/M
+threshold/MDGS
+threw
+thrice
+thriftily
+thriftiness/S
+thriftless
+thrift/SM
+thrifty/PTR
+thriller/M
+thrilling/Y
+thrill/ZMGDRS
+thriver/M
+thrive/RSDJG
+thriving/Y
+throatily
+throatiness/MS
+throat/MDSG
+throaty/PRT
+throbbed
+throbbing
+throb/S
+throeing
+throe/SDM
+thrombi
+thromboses
+thrombosis/M
+thrombotic
+thrombus/M
+Throneberry/M
+throne/CGSD
+throne's
+throng/GDSM
+throttle/DRSZMG
+throttler/M
+throughout
+throughput/SM
+throughway's
+through/Y
+throwaway/SM
+throwback/MS
+thrower/M
+thrown
+throwout
+throw/SZGR
+thrummed
+thrumming
+thrum/S
+thrush/MS
+thruster/M
+thrust/ZGSR
+Thruway/MS
+thruway/SM
+Thunderbird/M
+Thu
+Thucydides/M
+thudded
+thudding
+thud/MS
+thuggee/M
+thuggery/SM
+thuggish
+thug/MS
+Thule/M
+thulium/SM
+thumbnail/MS
+thumbscrew/SM
+thumb/SMDG
+thumbtack/GMDS
+thump/RDMSG
+thunderbolt/MS
+thunderclap/SM
+thundercloud/SM
+thunderer/M
+thunderhead/SM
+thundering/Y
+thunderous/Y
+thundershower/MS
+thunderstorm/MS
+thunderstruck
+thundery
+thunder/ZGJDRMS
+thunk
+Thurber/M
+Thurman/M
+Thur/MS
+Thursday/SM
+Thurstan/M
+Thurston/M
+thus/Y
+thwack/DRSZG
+thwacker/M
+thwarter/M
+thwart/GSDRY
+thy
+thyme/SM
+thymine/MS
+thymus/SM
+thyratron/M
+thyristor/MS
+thyroglobulin
+thyroidal
+thyroid/S
+thyronine
+thyrotoxic
+thyrotrophic
+thyrotrophin
+thyrotropic
+thyrotropin/M
+thyroxine/M
+thyself
+Tia/M
+Tianjin
+tiara/MS
+Tiberius/M
+Tiber/M
+Tibetan/S
+Tibet/M
+tibiae
+tibial
+tibia/M
+Tibold/M
+Tiburon/M
+ticker/M
+ticket/SGMD
+tick/GZJRDMS
+ticking/M
+tickler/M
+tickle/RSDZG
+ticklishness/MS
+ticklish/PY
+ticktacktoe/S
+ticktock/SMDG
+tic/MS
+Ticonderoga/M
+tidal/Y
+tidbit/MS
+tiddlywinks/M
+tide/GJDS
+tideland/MS
+tidewater/SM
+tideway/SM
+tidily/U
+tidiness/USM
+tidying/M
+tidy/UGDSRPT
+tie/AUDS
+tieback/MS
+Tiebold/M
+Tiebout/M
+tiebreaker/SM
+Tieck/M
+Tiena/M
+Tienanmen/M
+Tientsin's
+tier/DGM
+Tierney/M
+Tiertza/M
+Tiffanie/M
+Tiffani/M
+tiffany/M
+Tiffany/M
+tiff/GDMS
+Tiffie/M
+Tiffi/M
+Tiff/M
+Tiffy/M
+tigerish
+tiger/SM
+tightener/M
+tighten/JZGDR
+tightfisted
+tightness/MS
+tightrope/SM
+tight/STXPRNY
+tightwad/MS
+tigress/SM
+Tigris/M
+Tijuana/M
+tike's
+Tilda/M
+tilde/MS
+Tildie/M
+Tildi/M
+Tildy/M
+tile/DRSJMZG
+tiled/UE
+Tiler/M
+tiles/U
+tiling/M
+tillable
+tillage/SM
+till/EGSZDR
+tiller/GDM
+tiller's/E
+Tillich/M
+Tillie/M
+Tillman/M
+Tilly/M
+tilth/M
+tilt/RDSGZ
+Ti/M
+timber/DMSG
+timbering/M
+timberland/SM
+timberline/S
+timbrel/SM
+timbre/MS
+Timbuktu/M
+ti/MDRZ
+timebase
+time/DRSJMYZG
+timekeeper/MS
+timekeeping/SM
+timelessness/S
+timeless/PY
+timeliness/SMU
+timely/UTRP
+timeout/S
+timepiece/MS
+timer/M
+timescale/S
+timeserver/MS
+timeserving/S
+timeshare/SDG
+timespan
+timestamped
+timestamps
+timetable/GMSD
+timeworn
+Timex/M
+timezone/S
+timidity/SM
+timidness/MS
+timid/RYTP
+Timi/M
+timing/M
+Timmie/M
+Timmi/M
+Tim/MS
+Timmy/M
+Timofei/M
+Timon/M
+timorousness/MS
+timorous/YP
+Timoteo/M
+Timothea/M
+Timothee/M
+Timotheus/M
+Timothy/M
+timothy/MS
+timpani
+timpanist/S
+Timur/M
+Tina/M
+tincture/SDMG
+tinderbox/MS
+tinder/MS
+Tine/M
+tine/SM
+tinfoil/MS
+tingeing
+tinge/S
+ting/GYDM
+tingle/SDG
+tingling/Y
+tingly/TR
+Ting/M
+tinily
+tininess/MS
+tinker/SRDMZG
+Tinkertoy
+tinkle/SDG
+tinkling/M
+tinkly
+tin/MDGS
+tinned
+tinner/M
+tinnily
+tinniness/SM
+tinning/M
+tinnitus/MS
+tinny/RSTP
+tinplate/S
+tinsel/GMDYS
+Tinseltown/M
+tinsmith/M
+tinsmiths
+tinter/M
+tintinnabulation/MS
+Tintoretto/M
+tint/SGMRDB
+tintype/SM
+tinware/MS
+tiny/RPT
+Tioga/M
+Tiphanie/M
+Tiphani/M
+Tiphany/M
+tipi's
+tip/MS
+tipoff
+Tippecanoe/M
+tipped
+Tipperary/M
+tipper/MS
+tippet/MS
+tipping
+tippler/M
+tipple/ZGRSD
+tippy/R
+tipsily
+tipsiness/SM
+tipster/SM
+tipsy/TPR
+tiptoeing
+tiptoe/SD
+tiptop/S
+tirade/SM
+Tirana's
+Tirane
+tired/AYP
+tireder
+tiredest
+tiredness/S
+tirelessness/SM
+tireless/PY
+tire/MGDSJ
+tires/A
+Tiresias/M
+tiresomeness/S
+tiresome/PY
+tiring/AU
+Tirolean/S
+Tirol/M
+tiro's
+Tirrell/M
+tis
+Tisha/M
+Tish/M
+tissue/MGSD
+titanate/M
+Titania/M
+titanic
+titanically
+Titanic/M
+titanium/SM
+titan/SM
+Titan/SM
+titbit's
+titer/M
+tither/M
+tithe/SRDGZM
+tithing/M
+Titian/M
+titian/S
+Titicaca/M
+titillate/XSDVNG
+titillating/Y
+titillation/M
+titivate/NGDSX
+titivation/M
+titled/AU
+title/GMSRD
+titleholder/SM
+titling/A
+titmice
+titmouse/M
+tit/MRZS
+Tito/SM
+titrate/SDGN
+titration/M
+titted
+titter/GDS
+titting
+tittle/SDMG
+titular/SY
+Titus/M
+tizzy/SM
+TKO
+Tlaloc/M
+TLC
+Tlingit/M
+Tl/M
+TM
+Tm/M
+tn
+TN
+tnpk
+TNT
+toad/SM
+toadstool/SM
+toady/GSDM
+toadyism/M
+toaster/M
+toastmaster/MS
+toastmistress/S
+toast/SZGRDM
+toasty/TRS
+tobacconist/SM
+tobacco/SM
+tobaggon/SM
+Tobago/M
+Tobe/M
+Tobey/M
+Tobiah/M
+Tobias/M
+Tobie/M
+Tobi/M
+Tobin/M
+Tobit/M
+toboggan/MRDSZG
+Tobye/M
+Toby/M
+Tocantins/M
+toccata/M
+Tocqueville
+tocsin/MS
+to/D
+today'll
+today/SM
+Toddie/M
+toddler/M
+toddle/ZGSRD
+Todd/M
+Toddy/M
+toddy/SM
+Tod/M
+toecap/SM
+toeclip/S
+TOEFL
+toehold/MS
+toeing
+toe/MS
+toenail/DMGS
+toffee/SM
+tofu/S
+toga/SMD
+toge
+togetherness/MS
+together/P
+togged
+togging
+toggle/SDMG
+Togolese/M
+Togo/M
+tog/SMG
+Toiboid/M
+toilet/GMDS
+toiletry/MS
+toilette/SM
+toil/SGZMRD
+toilsomeness/M
+toilsome/PY
+Toinette/M
+Tojo/M
+tokamak
+Tokay/M
+toke/GDS
+tokenism/SM
+tokenized
+token/SMDG
+Tokugawa/M
+Tokyoite/MS
+Tokyo/M
+Toland/M
+told/AU
+Toledo/SM
+tole/MGDS
+tolerability/IM
+tolerable/I
+tolerably/I
+tolerance/SIM
+tolerant/IY
+tolerate/XVNGSD
+toleration/M
+Tolkien
+tollbooth/M
+tollbooths
+toll/DGS
+Tolley/M
+tollgate/MS
+tollhouse/M
+tollway/S
+Tolstoy/M
+toluene/MS
+Tolyatti/M
+tomahawk/SGMD
+Tomasina/M
+Tomasine/M
+Toma/SM
+Tomaso/M
+tomatoes
+tomato/M
+Tombaugh/M
+tomb/GSDM
+Tombigbee/M
+tomblike
+tombola/M
+tomboyish
+tomboy/MS
+tombstone/MS
+tomcat/SM
+tomcatted
+tomcatting
+Tome/M
+tome/SM
+tomfoolery/MS
+tomfool/M
+Tomi/M
+Tomkin/M
+Tomlin/M
+Tom/M
+tommed
+Tommie/M
+Tommi/M
+tomming
+tommy/M
+Tommy/M
+tomographic
+tomography/MS
+tomorrow/MS
+Tompkins/M
+Tomsk/M
+tom/SM
+tomtit/SM
+tonality/MS
+tonal/Y
+tonearm/S
+tone/ISRDZG
+tonelessness/M
+toneless/YP
+toner/IM
+tone's
+Tonga/M
+Tongan/SM
+tong/GRDS
+tongueless
+tongue/SDMG
+tonguing/M
+Tonia/M
+tonic/SM
+Tonie/M
+tonight/MS
+Toni/M
+Tonio/M
+tonk/MS
+tonnage/SM
+tonne/MS
+Tonnie/M
+tonsillectomy/MS
+tonsillitis/SM
+tonsil/SM
+ton/SKM
+tonsorial
+tonsure/SDGM
+Tonto/M
+Tonya/M
+Tonye/M
+Tony/M
+tony/RT
+toodle
+too/H
+took/A
+tool/AGDS
+toolbox/SM
+tooler/SM
+tooling/M
+toolkit/SM
+toolmaker/M
+toolmake/ZRG
+toolmaking/M
+tool's
+toolsmith
+Toomey/M
+tooter/M
+toot/GRDZS
+toothache/SM
+toothbrush/MSG
+tooth/DMG
+toothily
+toothless
+toothmarks
+toothpaste/SM
+toothpick/MS
+tooths
+toothsome
+toothy/TR
+tootle/SRDG
+tootsie
+Tootsie/M
+toots/M
+tootsy/MS
+topaz/MS
+topcoat/MS
+topdressing/S
+Topeka/M
+toper/M
+topflight
+topgallant/M
+topiary/S
+topicality/MS
+topical/Y
+topic/MS
+topknot/MS
+topless
+topmast/MS
+topmost
+topnotch/R
+topocentric
+topographer/SM
+topographic
+topographical/Y
+topography/MS
+topological/Y
+topologist/MS
+topology/MS
+topped
+topper/MS
+topping/MS
+topple/GSD
+topsail/MS
+topside/SRM
+top/SMDRG
+topsoil/GDMS
+topspin/MS
+Topsy/M
+toque/MS
+Torah/M
+Torahs
+torchbearer/SM
+torchlight/S
+torch/SDMG
+toreador/SM
+Tore/M
+tore/S
+Torey/M
+Torie/M
+tori/M
+Tori/M
+Torin/M
+torment/GSD
+tormenting/Y
+tormentor/MS
+torn
+tornadoes
+tornado/M
+toroidal/Y
+toroid/MS
+Toronto/M
+torpedoes
+torpedo/GMD
+torpidity/S
+torpid/SY
+torpor/MS
+Torquemada/M
+torque/MZGSRD
+Torrance/M
+Torre/MS
+torrence
+Torrence/M
+Torrens/M
+torrential
+torrent/MS
+Torrey/M
+Torricelli/M
+torridity/SM
+torridness/SM
+torrid/RYTP
+Torrie/M
+Torrin/M
+Torr/XM
+Torry/M
+torsional/Y
+torsion/IAM
+torsions
+torsi's
+tor/SLM
+torso/SM
+tors/S
+tort/ASFE
+tortellini/MS
+torte/MS
+torten
+tortilla/MS
+tortoiseshell/SM
+tortoise/SM
+Tortola/M
+tortoni/MS
+tort's
+Tortuga/M
+tortuousness/MS
+tortuous/PY
+torture/ZGSRD
+torturous
+torus/MS
+Tory/SM
+Tosca/M
+Toscanini/M
+Toshiba/M
+toss/SRDGZ
+tossup/MS
+totaler/M
+totalistic
+totalitarianism/SM
+totalitarian/S
+totality/MS
+totalizator/S
+totalizing
+total/ZGSRDYM
+totemic
+totem/MS
+toter/M
+tote/S
+toting/M
+tot/MDRSG
+Toto/M
+totted
+totterer/M
+tottering/Y
+totter/ZGRDS
+totting
+toucan/MS
+touchable/U
+touch/ASDG
+touchdown/SM
+touch
+touched/U
+toucher/M
+touchily
+touchiness/SM
+touching/SY
+touchline/M
+touchscreen
+touchstone/SM
+touchy/TPR
+toughen/DRZG
+toughener/M
+toughness/SM
+toughs
+tough/TXGRDNYP
+Toulouse/M
+toupee/SM
+toured/CF
+tourer/M
+tour/GZSRDM
+touring/F
+tourism/SM
+touristic
+tourist/SM
+touristy
+tourmaline/SM
+tournament/MS
+tourney/GDMS
+tourniquet/MS
+tour's/CF
+tours/CF
+tousle/GSD
+touter/M
+tout/SGRD
+Tova/M
+Tove/M
+towardliness/M
+towardly/P
+towards
+toward/YU
+towboat/MS
+tow/DRSZG
+towelette/S
+towel/GJDMS
+toweling/M
+tower/GMD
+towering/Y
+towhead/MSD
+towhee/SM
+towline/MS
+towner/M
+Townes
+Towney/M
+townhouse/S
+Townie/M
+townie/S
+Townley/M
+Town/M
+Townsend/M
+townsfolk
+township/MS
+townsman/M
+townsmen
+townspeople/M
+town/SRM
+townswoman/M
+townswomen
+Towny/M
+towpath/M
+towpaths
+towrope/MS
+Towsley/M
+toxemia/MS
+toxicity/MS
+toxicological
+toxicologist/SM
+toxicology/MS
+toxic/S
+toxin/MS
+toyer/M
+toymaker
+toy/MDRSG
+Toynbee/M
+Toyoda/M
+Toyota/M
+toyshop
+tr
+traceability/M
+traceableness/M
+traceable/P
+trace/ASDG
+traceback/MS
+traced/U
+Tracee/M
+traceless/Y
+Trace/M
+tracepoint/SM
+tracer/MS
+tracery/MDS
+trace's
+Tracey/M
+tracheae
+tracheal/M
+trachea/M
+tracheotomy/SM
+Tracie/M
+Traci/M
+tracing/SM
+trackage
+trackball/S
+trackbed
+tracked/U
+tracker/M
+trackless
+tracksuit/SM
+track/SZGMRD
+tractability/SI
+tractable/I
+tractably/I
+tract/ABS
+Tractarians
+traction/KSCEMAF
+tractive/KFE
+tractor/FKMASC
+tract's
+tracts/CEFK
+Tracy/M
+trademark/GSMD
+trader/M
+tradesman/M
+tradesmen
+tradespeople
+tradespersons
+trade/SRDGZM
+tradeswoman/M
+tradeswomen
+traditionalism/MS
+traditionalistic
+traditionalist/MS
+traditionalized
+traditionally
+traditional/U
+tradition/SM
+traduce/DRSGZ
+Trafalgar/M
+trafficked
+trafficker/MS
+trafficking/S
+traffic/SM
+tragedian/SM
+tragedienne/MS
+tragedy/MS
+tragically
+tragicomedy/SM
+tragicomic
+tragic/S
+trailblazer/MS
+trailblazing/S
+trailer/GDM
+trails/F
+trailside
+trail/SZGJRD
+trainable
+train/ASDG
+trained/U
+trainee/MS
+traineeships
+trainer/MS
+training/SM
+trainman/M
+trainmen
+trainspotter/S
+traipse/DSG
+trait/MS
+traitorous/Y
+traitor/SM
+Trajan/M
+trajectory/MS
+trammed
+trammeled/U
+trammel/GSD
+tramming
+tram/MS
+trample/DGRSZ
+trampler/M
+trampoline/GMSD
+tramp/RDSZG
+tramway/M
+trance/MGSD
+tranche/SM
+Tran/M
+tranquility/S
+tranquilized/U
+tranquilize/JGZDSR
+tranquilizer/M
+tranquilizes/A
+tranquilizing/YM
+tranquillize/GRSDZ
+tranquillizer/M
+tranquilness/M
+tranquil/PTRY
+transact/GSD
+transactional
+transaction/MS
+transactor/SM
+transalpine
+transaminase
+transatlantic
+Transcaucasia/M
+transceiver/SM
+transcendence/MS
+transcendentalism/SM
+transcendentalist/SM
+transcendental/YS
+transcendent/Y
+transcend/SDG
+transconductance
+transcontinental
+transcribe/DSRGZ
+transcriber/M
+transcription/SM
+transcript/SM
+transcultural
+transducer/SM
+transduction/M
+transect/DSG
+transept/SM
+transferability/M
+transferal/MS
+transfer/BSMD
+transferee/M
+transference/SM
+transferor/MS
+transferral/SM
+transferred
+transferrer/SM
+transferring
+transfiguration/SM
+transfigure/SDG
+transfinite/Y
+transfix/SDG
+transformational
+transformation/MS
+transform/DRZBSG
+transformed/U
+transformer/M
+transfuse/XSDGNB
+transfusion/M
+transgression/SM
+transgressor/S
+transgress/VGSD
+trans/I
+transience/SM
+transiency/S
+transient/YS
+transistorize/GDS
+transistor/SM
+Transite/M
+transitional/Y
+transition/MDGS
+transitivenesses
+transitiveness/IM
+transitive/PIY
+transitivity/MS
+transitoriness/M
+transitory/P
+transit/SGVMD
+transl
+translatability/M
+translatable/U
+translated/AU
+translate/VGNXSDB
+translational
+translation/M
+translator/SM
+transliterate/XNGSD
+translucence/SM
+translucency/MS
+translucent/Y
+transmigrate/XNGSD
+transmissible
+transmission/MSA
+transmissive
+transmit/AS
+transmittable
+transmittal/SM
+transmittance/MS
+transmitted/A
+transmitter/SM
+transmitting/A
+transmogrification/M
+transmogrify/GXDSN
+transmutation/SM
+transmute/GBSD
+transnational/S
+transoceanic
+transom/SM
+transonic
+transpacific
+transparency/MS
+transparentness/M
+transparent/YP
+transpiration/SM
+transpire/GSD
+transplantation/S
+transplant/GRDBS
+transpolar
+transponder/MS
+transportability
+transportable/U
+transportation/SM
+transport/BGZSDR
+transpose/BGSD
+transposed/U
+transposition/SM
+Transputer/M
+transsexualism/MS
+transsexual/SM
+transship/LS
+transshipment/SM
+transshipped
+transshipping
+transubstantiation/MS
+Transvaal/M
+transversal/YM
+transverse/GYDS
+transvestism/SM
+transvestite/SM
+transvestitism
+Transylvania/M
+trapdoor/S
+trapeze/DSGM
+trapezium/MS
+trapezoidal
+trapezoid/MS
+trap/MS
+trappable/U
+trapped
+trapper/SM
+trapping/S
+Trappist/MS
+trapshooting/SM
+trashcan/SM
+trashiness/SM
+trash/SRDMG
+trashy/TRP
+Trastevere/M
+trauma/MS
+traumatic
+traumatically
+traumatize/SDG
+travail/SMDG
+traveled/U
+traveler/M
+travelog's
+travelogue/S
+travel/SDRGZJ
+Traver/MS
+traversal/SM
+traverse/GBDRS
+traverser/M
+travertine/M
+travesty/SDGM
+Travis/M
+Travus/M
+trawler/M
+trawl/RDMSZG
+tray/SM
+treacherousness/SM
+treacherous/PY
+treachery/SM
+treacle/DSGM
+treacly
+treader/M
+treadle/GDSM
+treadmill/MS
+tread/SAGD
+Treadwell/M
+treas
+treason/BMS
+treasonous
+treasure/DRSZMG
+treasurer/M
+treasurership
+treasury/SM
+Treasury/SM
+treatable
+treated/U
+treater/S
+treatise/MS
+treatment/MS
+treat's
+treat/SAGDR
+treaty/MS
+treble/SDG
+Treblinka/M
+treeing
+treeless
+treelike
+tree/MDS
+treetop/SM
+trefoil/SM
+Trefor/M
+trekked
+trekker/MS
+Trekkie/M
+trekking
+trek/MS
+trellis/GDSM
+Tremaine/M
+Tremain/M
+trematode/SM
+Tremayne/M
+tremble/JDRSG
+trembler/M
+trembles/M
+trembly
+tremendousness/M
+tremendous/YP
+tremolo/MS
+tremor/MS
+tremulousness/SM
+tremulous/YP
+trenchancy/MS
+trenchant/Y
+trencherman/M
+trenchermen
+trencher/SM
+trench/GASD
+trench's
+trendily
+trendiness/S
+trend/SDMG
+trendy/PTRS
+Trenna/M
+Trent/M
+Trenton/M
+trepanned
+trepidation/MS
+Tresa/M
+Trescha/M
+trespasser/M
+trespass/ZRSDG
+Tressa/M
+tressed/E
+tresses/E
+tressing/E
+tress/MSDG
+trestle/MS
+Trevar/M
+Trevelyan/M
+Trever/M
+Trevino/M
+Trevor/M
+Trev/RM
+Trey/M
+trey/MS
+triableness/M
+triable/P
+triadic
+triad/MS
+triage/SDMG
+trial/ASM
+trialization
+trialled
+trialling
+triamcinolone
+triangle/SM
+triangulable
+triangularization/S
+triangular/Y
+triangulate/YGNXSD
+triangulation/M
+Triangulum/M
+Trianon/M
+Triassic
+triathlon/S
+triatomic
+tribalism/MS
+tribal/Y
+tribe/MS
+tribesman/M
+tribesmen
+tribeswoman
+tribeswomen
+tribulate/NX
+tribulation/M
+tribunal/MS
+tribune/SM
+tributary/MS
+tribute/EGSF
+tribute's
+trice/GSDM
+tricentennial/S
+triceps/SM
+triceratops/M
+trichinae
+trichina/M
+trichinoses
+trichinosis/M
+trichloroacetic
+trichloroethane
+trichotomy/M
+trichromatic
+Tricia/M
+trickery/MS
+trick/GMSRD
+trickily
+trickiness/SM
+trickle/DSG
+trickster/MS
+tricky/RPT
+tricolor/SMD
+tricycle/SDMG
+trident/SM
+tridiagonal
+tried/UA
+triennial/SY
+trier/AS
+trier's
+tries/A
+Trieste/M
+triffid/S
+trifle/MZGJSRD
+trifler/M
+trifluoride/M
+trifocals
+trigged
+trigger/GSDM
+triggest
+trigging
+triglyceride/MS
+trigonal/Y
+trigonometric
+trigonometrical
+trigonometry/MS
+trigram/S
+trig/S
+trihedral
+trike/GMSD
+trilateral/S
+trilby/SM
+trilingual
+trillion/SMH
+trillionth/M
+trillionths
+trillium/SM
+trill/RDMGS
+trilobite/MS
+trilogy/MS
+trimaran/MS
+Trimble/M
+trimer/M
+trimester/MS
+trimmed/U
+trimmer/MS
+trimmest
+trimming/MS
+trimness/S
+trimodal
+trimonthly
+trim/PSYR
+Trimurti/M
+Trina/M
+Trinidad/M
+trinitarian/S
+trinitrotoluene/SM
+trinity/MS
+Trinity/MS
+trinketer/M
+trinket/MRDSG
+triode/MS
+trio/SM
+trioxide/M
+tripartite/N
+tripartition/M
+tripe/MS
+triphenylarsine
+triphenylphosphine
+triphenylstibine
+triphosphopyridine
+triple/GSD
+triplet/SM
+triplex/S
+triplicate/SDG
+triplication/M
+triply/GDSN
+Trip/M
+tripodal
+tripod/MS
+tripoli/M
+Tripoli/M
+tripolyphosphate
+tripos/SM
+tripped
+Trippe/M
+tripper/MS
+tripping/Y
+Tripp/M
+trip/SMY
+triptych/M
+triptychs
+tripwire/MS
+trireme/SM
+Tris
+trisect/GSD
+trisection/S
+trisector
+Trisha/M
+Trish/M
+trisodium
+Trista/M
+Tristam/M
+Tristan/M
+tristate
+trisyllable/M
+tritely/F
+triteness/SF
+trite/SRPTY
+tritium/MS
+triton/M
+Triton/M
+triumphal
+triumphalism
+triumphant/Y
+triumph/GMD
+triumphs
+triumvirate/MS
+triumvir/MS
+triune
+trivalent
+trivet/SM
+trivia
+triviality/MS
+trivialization/MS
+trivialize/DSG
+trivial/Y
+trivium/M
+Trixie/M
+Trixi/M
+Trix/M
+Trixy/M
+Trobriand/M
+trochaic/S
+trochee/SM
+trod/AU
+trodden/UA
+trodes
+troff/MR
+troglodyte/MS
+troika/SM
+Trojan/MS
+troll/DMSG
+trolled/F
+trolleybus/S
+trolley/SGMD
+trolling/F
+trollish
+Trollope/M
+trollop/GSMD
+trolly's
+trombone/MS
+trombonist/SM
+tromp/DSG
+Trondheim/M
+trooper/M
+troopship/SM
+troop/SRDMZG
+trope/SM
+Tropez/M
+trophic
+trophy/MGDS
+tropical/SY
+tropic/MS
+tropism/SM
+tropocollagen
+troposphere/MS
+tropospheric
+troth/GDM
+troths
+trot/S
+Trotsky/M
+trotted
+trotter/SM
+trotting
+troubadour/SM
+troubled/U
+trouble/GDRSM
+troublemaker/MS
+troubler/M
+troubleshooter/M
+troubleshoot/SRDZG
+troubleshot
+troublesomeness/M
+troublesome/YP
+trough/M
+troughs
+trounce/GZDRS
+trouncer/M
+troupe/MZGSRD
+trouper/M
+trouser/DMGS
+trousseau/M
+trousseaux
+Troutman/M
+trout/SM
+trove/SM
+troweler/M
+trowel/SMDRGZ
+trow/SGD
+Troyes
+Troy/M
+troy/S
+Trstram/M
+truancy/MS
+truant/SMDG
+truce/SDGM
+Truckee/M
+trucker/M
+trucking/M
+truckle/GDS
+truckload/MS
+truck/SZGMRDJ
+truculence/SM
+truculent/Y
+Truda/M
+Trudeau/M
+Trude/M
+Trudey/M
+trudge/SRDG
+Trudie/M
+Trudi/M
+Trudy/M
+true/DRSPTG
+truelove/MS
+Trueman/M
+trueness/M
+truer/U
+truest/U
+truffle/MS
+truism/SM
+Trujillo/M
+Trula/M
+truly/U
+Trumaine/M
+Truman/M
+Trumann/M
+Trumbull/M
+trump/DMSG
+trumpery/SM
+trumpeter/M
+trumpet/MDRZGS
+Trump/M
+truncate/NGDSX
+truncation/M
+truncheon/MDSG
+trundle/GZDSR
+trundler/M
+trunk/GSMD
+trunnion/SM
+trusser/M
+trussing/M
+truss/SRDG
+trusted/EU
+trusteeing
+trustee/MDS
+trusteeship/SM
+truster/M
+trustful/EY
+trustfulness/SM
+trustiness/M
+trusting/Y
+trust/RDMSG
+trusts/E
+trustworthier
+trustworthiest
+trustworthiness/MS
+trustworthy/UP
+trusty/PTMSR
+Truth
+truthfulness/US
+truthful/UYP
+truths/U
+truth/UM
+TRW
+trying/Y
+try/JGDRSZ
+tryout/MS
+trypsin/M
+tryst/GDMS
+ts
+T's
+tsarevich
+tsarina's
+tsarism/M
+tsarist
+tsetse/S
+Tsimshian/M
+Tsiolkovsky/M
+Tsitsihar/M
+tsp
+tsunami/MS
+Tsunematsu/M
+Tswana/M
+TTL
+tty/M
+ttys
+Tuamotu/M
+Tuareg/M
+tubae
+tubal
+tuba/SM
+tubbed
+tubbing
+tubby/TR
+tubeless
+tubercle/MS
+tubercular/S
+tuberculin/MS
+tuberculoses
+tuberculosis/M
+tuberculous
+tuber/M
+tuberose/SM
+tuberous
+tube/SM
+tubing/M
+tub/JMDRSZG
+Tubman/M
+tubular/Y
+tubule/SM
+tucker/GDM
+Tucker/M
+tuck/GZSRD
+Tuckie/M
+Tuck/RM
+Tucky/M
+Tucson/M
+Tucuman/M
+Tudor/MS
+Tue/S
+Tuesday/SM
+tufter/M
+tuft/GZSMRD
+tufting/M
+tugboat/MS
+tugged
+tugging
+tug/S
+tuition/ISM
+Tulane/M
+tularemia/S
+tulip/SM
+tulle/SM
+Tulley/M
+Tull/M
+Tully/M
+Tulsa/M
+tum
+tumbledown
+tumbler/M
+tumbleweed/MS
+tumble/ZGRSDJ
+tumbrel/SM
+tumescence/S
+tumescent
+tumidity/MS
+tumid/Y
+tummy/SM
+tumor/MDS
+tumorous
+Tums/M
+tumult/SGMD
+tumultuousness/M
+tumultuous/PY
+tumulus/M
+tunableness/M
+tunable/P
+tuna/SM
+tundra/SM
+tun/DRJZGBS
+tune/CSDG
+tunefulness/MS
+tuneful/YP
+tuneless/Y
+tuner/M
+tune's
+tuneup/S
+tung
+tungstate/M
+tungsten/SM
+Tunguska/M
+Tungus/M
+tunic/MS
+tuning/A
+tuning's
+Tunisia/M
+Tunisian/S
+Tunis/M
+tunned
+tunneler/M
+tunnel/MRDSJGZ
+tunning
+tunny/SM
+tupelo/M
+Tupi/M
+tuple/SM
+tuppence/M
+Tupperware
+Tupungato/M
+turban/SDM
+turbid
+turbidity/SM
+turbinate/SD
+turbine/SM
+turbocharged
+turbocharger/SM
+turbofan/MS
+turbojet/MS
+turboprop/MS
+turbo/SM
+turbot/MS
+turbulence/SM
+turbulent/Y
+turd/MS
+tureen/MS
+turf/DGSM
+turfy/RT
+Turgenev/M
+turgidity/SM
+turgidness/M
+turgid/PY
+Turing/M
+Turin/M
+Turkestan/M
+Turkey/M
+turkey/SM
+Turkic/SM
+Turkish
+Turkmenistan/M
+turk/S
+Turk/SM
+turmeric/MS
+turmoil/SDMG
+turnabout/SM
+turnaround/MS
+turn/AZGRDBS
+turnbuckle/SM
+turncoat/SM
+turned/U
+turner/M
+Turner/M
+turning/MS
+turnip/SMDG
+turnkey/MS
+turnoff/MS
+turnout/MS
+turnover/SM
+turnpike/MS
+turnround/MS
+turnstile/SM
+turnstone/M
+turntable/SM
+turpentine/GMSD
+Turpin/M
+turpitude/SM
+turquoise/SM
+turret/SMD
+turtleback/MS
+turtledove/MS
+turtleneck/SDM
+turtle/SDMG
+turves's
+turvy
+Tuscaloosa/M
+Tuscan
+Tuscany/M
+Tuscarora/M
+Tuscon/M
+tush/SDG
+Tuskegee/M
+tusker/M
+tusk/GZRDMS
+tussle/GSD
+tussock/MS
+tussocky
+Tussuad/M
+Tutankhamen/M
+tutelage/MS
+tutelary/S
+Tut/M
+tutored/U
+tutorial/MS
+tutor/MDGS
+tutorship/S
+tut/S
+Tutsi
+tutted
+tutting
+tutti/S
+Tuttle/M
+tutu/SM
+Tuvalu
+tuxedo/SDM
+tux/S
+TVA
+TV/M
+TVs
+twaddle/GZMRSD
+twaddler/M
+Twain/M
+twain/S
+TWA/M
+twang/MDSG
+twangy/TR
+twas
+tweak/SGRD
+tweediness/M
+Tweedledee/M
+Tweedledum/M
+Tweed/M
+twee/DP
+tweed/SM
+tweedy/PTR
+tween
+tweeter/M
+tweet/ZSGRD
+tweezer/M
+tweeze/ZGRD
+twelfth
+twelfths
+twelvemonth/M
+twelvemonths
+twelve/MS
+twentieths
+twenty/MSH
+twerp/MS
+twice/R
+twiddle/GRSD
+twiddler/M
+twiddly/RT
+twigged
+twigging
+twiggy/RT
+twig/SM
+Twila/M
+twilight/MS
+twilit
+twill/SGD
+twiner/M
+twine/SM
+twinge/SDMG
+Twinkie
+twinkler/M
+twinkle/RSDG
+twinkling/M
+twinkly
+twinned
+twinning
+twin/RDMGZS
+twirler/M
+twirling/Y
+twirl/SZGRD
+twirly/TR
+twisted/U
+twister/M
+twists/U
+twist/SZGRD
+twisty
+twitch/GRSD
+twitchy/TR
+twit/S
+twitted
+twitterer/M
+twitter/SGRD
+twittery
+twitting
+twixt
+twofer/MS
+twofold/S
+two/MS
+twopence/SM
+twopenny/S
+twosome/MS
+twp
+Twp
+TWX
+Twyla/M
+TX
+t/XTJBG
+Tybalt/M
+Tybie/M
+Tybi/M
+tycoon/MS
+tyeing
+Tye/M
+tying/UA
+tyke/SM
+Tylenol/M
+Tyler/M
+Ty/M
+Tymon/M
+Tymothy/M
+tympani
+tympanist/SM
+tympanum/SM
+Tynan/M
+Tyndale/M
+Tyndall/M
+Tyne/M
+typeahead
+typecast/SG
+typed/AU
+typedef/S
+typeface/MS
+typeless
+type/MGDRSJ
+types/A
+typescript/SM
+typeset/S
+typesetter/MS
+typesetting/SM
+typewriter/M
+typewrite/SRJZG
+typewriting/M
+typewritten
+typewrote
+typhoid/SM
+Typhon/M
+typhoon/SM
+typhus/SM
+typicality/MS
+typically
+typicalness/M
+typical/U
+typification/M
+typify/SDNXG
+typing/A
+typist/MS
+typographer/SM
+typographic
+typographical/Y
+typography/MS
+typological/Y
+typology/MS
+typo/MS
+tyrannic
+tyrannicalness/M
+tyrannical/PY
+tyrannicide/M
+tyrannizer/M
+tyrannize/ZGJRSD
+tyrannizing/YM
+tyrannosaur/MS
+tyrannosaurus/S
+tyrannous
+tyranny/MS
+tyrant/MS
+Tyree/M
+tyreo
+Tyrolean/S
+Tyrol's
+Tyrone/M
+tyrosine/M
+tyro/SM
+Tyrus/M
+Tyson/M
+tzarina's
+tzar's
+Tzeltal/M
+u
+U
+UAR
+UART
+UAW
+Ubangi/M
+ubiquitous/YP
+ubiquity/S
+Ucayali/M
+Uccello/M
+UCLA/M
+Udale/M
+Udall/M
+udder/SM
+Udell/M
+Ufa/M
+ufologist/S
+ufology/MS
+UFO/S
+Uganda/M
+Ugandan/S
+ugh
+ughs
+uglification
+ugliness/MS
+uglis
+ugly/PTGSRD
+Ugo/M
+uh
+UHF
+Uighur
+Ujungpandang/M
+UK
+ukase/SM
+Ukraine/M
+Ukrainian/S
+ukulele/SM
+UL
+Ula/M
+Ulberto/M
+ulcerate/NGVXDS
+ulceration/M
+ulcer/MDGS
+ulcerous
+Ulick/M
+Ulises/M
+Ulla/M
+Ullman/M
+ulnae
+ulna/M
+ulnar
+Ulrica/M
+Ulrich/M
+Ulrick/M
+Ulric/M
+Ulrika/M
+Ulrikaumeko/M
+Ulrike/M
+Ulster/M
+ulster/MS
+ult
+ulterior/Y
+ultimas
+ultimate/DSYPG
+ultimateness/M
+ultimatum/MS
+ultimo
+ultracentrifugally
+ultracentrifugation
+ultracentrifuge/M
+ultraconservative/S
+ultrafast
+ultrahigh
+ultralight/S
+ultramarine/SM
+ultramodern
+ultramontane
+ultra/S
+ultrashort
+ultrasonically
+ultrasonic/S
+ultrasonics/M
+ultrasound/SM
+ultrastructure/M
+Ultrasuede
+ultraviolet/SM
+Ultrix/M
+ULTRIX/M
+ululate/DSXGN
+ululation/M
+Ulyanovsk/M
+Ulysses/M
+um
+umbel/MS
+umber/GMDS
+Umberto/M
+umbilical/S
+umbilici
+umbilicus/M
+umbrage/MGSD
+umbrageous
+umbra/MS
+umbrella/GDMS
+Umbriel/M
+Umeko/M
+umiak/MS
+umlaut/GMDS
+umpire/MGSD
+ump/MDSG
+umpteen/H
+UN
+unabated/Y
+unabridged/S
+unacceptability
+unacceptable
+unaccepted
+unaccommodating
+unaccountability
+unaccustomed/Y
+unadapted
+unadulterated/Y
+unadventurous
+unalienability
+unalterableness/M
+unalterable/P
+unalterably
+Una/M
+unambiguity
+unambiguous
+unambitious
+unamused
+unanimity/SM
+unanimous/Y
+unanticipated/Y
+unapologetic
+unapologizing/M
+unappeasable
+unappeasably
+unappreciative
+unary
+unassailableness/M
+unassailable/P
+unassertive
+unassumingness/M
+unassuming/PY
+unauthorized/PY
+unavailing/PY
+unaware/SPY
+unbalanced/P
+unbar
+unbarring
+unbecoming/P
+unbeknown
+unbelieving/Y
+unbiased/P
+unbid
+unbind/G
+unblessed
+unblinking/Y
+unbodied
+unbolt/G
+unbreakability
+unbred
+unbroken
+unbuckle
+unbudging/Y
+unburnt
+uncap
+uncapping
+uncatalogued
+uncauterized/MS
+unceasing/Y
+uncelebrated
+uncertain/P
+unchallengeable
+unchangingness/M
+unchanging/PY
+uncharacteristic
+uncharismatic
+unchastity
+unchristian
+uncial/S
+uncivilized/Y
+unclassified
+uncle/MSD
+unclouded/Y
+uncodable
+uncollected
+uncoloredness/M
+uncolored/PY
+uncombable
+uncommunicative
+uncompetitive
+uncomplicated
+uncomprehending/Y
+uncompromisable
+unconcerned/P
+unconcern/M
+unconfirmed
+unconfused
+unconscionableness/M
+unconscionable/P
+unconscionably
+unconstitutional
+unconsumed
+uncontentious
+uncontrollability
+unconvertible
+uncool
+uncooperative
+uncork/G
+uncouple/G
+uncouthness/M
+uncouth/YP
+uncreate/V
+uncritical
+uncross/GB
+uncrowded
+unction/IM
+unctions
+unctuousness/MS
+unctuous/PY
+uncustomary
+uncut
+undated/I
+undaunted/Y
+undeceive
+undecided/S
+undedicated
+undefinability
+undefinedness/M
+undefined/P
+undelete
+undeliverability
+undeniableness/M
+undeniable/P
+undeniably
+undependable
+underachiever/M
+underachieve/SRDGZ
+underact/GDS
+underadjusting
+underage/S
+underarm/DGS
+underbedding
+underbelly/MS
+underbidding
+underbid/S
+underbracing
+underbrush/MSDG
+undercarriage/MS
+undercharge/GSD
+underclassman
+underclassmen
+underclass/S
+underclothes
+underclothing/MS
+undercoating/M
+undercoat/JMDGS
+underconsumption/M
+undercooked
+undercount/S
+undercover
+undercurrent/SM
+undercut/S
+undercutting
+underdeveloped
+underdevelopment/MS
+underdog/MS
+underdone
+undereducated
+underemphasis
+underemployed
+underemployment/SM
+underenumerated
+underenumeration
+underestimate/NGXSD
+underexploited
+underexpose/SDG
+underexposure/SM
+underfed
+underfeed/SG
+underfloor
+underflow/GDMS
+underfoot
+underfund/DG
+underfur/MS
+undergarment/SM
+undergirding
+undergoes
+undergo/G
+undergone
+undergrad/MS
+undergraduate/MS
+underground/RMS
+undergrowth/M
+undergrowths
+underhand/D
+underhandedness/MS
+underhanded/YP
+underheat
+underinvestment
+underlaid
+underlain/S
+underlay/GS
+underlie
+underline/GSDJ
+underling/MS
+underlip/SM
+underloaded
+underly/GS
+undermanned
+undermentioned
+undermine/SDG
+undermost
+underneath
+underneaths
+undernourished
+undernourishment/SM
+underpaid
+underpants
+underpart/MS
+underpass/SM
+underpay/GSL
+underpayment/SM
+underperformed
+underpinned
+underpinning/MS
+underpin/S
+underplay/SGD
+underpopulated
+underpopulation/M
+underpowered
+underpricing
+underprivileged
+underproduction/MS
+underrate/GSD
+underregistration/M
+underreported
+underreporting
+underrepresentation/M
+underrepresented
+underscore/SDG
+undersealed
+undersea/S
+undersecretary/SM
+undersell/SG
+undersexed
+undershirt/SM
+undershoot/SG
+undershorts
+undershot
+underside/SM
+undersigned/M
+undersign/SGD
+undersized
+undersizes
+undersizing
+underskirt/MS
+undersold
+underspecification
+underspecified
+underspend/G
+understaffed
+understandability/M
+understandably
+understanding/YM
+understand/RGSJB
+understate/GSDL
+understatement/MS
+understocked
+understood
+understrength
+understructure/SM
+understudy/GMSD
+undertaken
+undertaker/M
+undertake/SRGZJ
+undertaking/M
+underthings
+undertone/SM
+undertook
+undertow/MS
+underused
+underusing
+underutilization/M
+underutilized
+undervaluation/S
+undervalue/SDG
+underwater/S
+underway
+underwear/M
+underweight/S
+underwent
+underwhelm/DGS
+underwood/M
+Underwood/M
+underworld/MS
+underwrite/GZSR
+underwriter/M
+underwritten
+underwrote
+under/Y
+undeserving
+undesigned
+undeviating/Y
+undialyzed/SM
+undiplomatic
+undiscerning
+undiscriminating
+undo/GJ
+undoubted/Y
+undramatic
+undramatized/SM
+undress/G
+undrinkability
+undrinkable
+undroppable
+undue
+undulant
+undulate/XDSNG
+undulation/M
+unearthliness/S
+unearthly/P
+unearth/YG
+unease
+uneconomic
+uneducated
+unemployed/S
+unencroachable
+unending/Y
+unendurable/P
+unenergized/MS
+unenforced
+unenterprising
+UNESCO
+unethical
+uneulogized/SM
+unexacting
+unexceptionably
+unexcited
+unexpectedness/MS
+unfading/Y
+unfailingness/M
+unfailing/P
+unfamiliar
+unfashionable
+unfathomably
+unfavored
+unfeeling
+unfeigned/Y
+unfelt
+unfeminine
+unfertile
+unfetchable
+unflagging
+unflappability/S
+unflappable
+unflappably
+unflinching/Y
+unfold/LG
+unfoldment/M
+unforced
+unforgeable
+unfossilized/MS
+unfraternizing/SM
+unfrozen
+unfulfillable
+unfunny
+unfussy
+ungainliness/MS
+ungainly/PRT
+Ungava/M
+ungenerous
+ungentle
+unglamorous
+ungrammaticality
+ungrudging
+unguent/MS
+ungulate/MS
+unharmonious
+unharness/G
+unhistorical
+unholy/TP
+unhook/DG
+unhydrolyzed/SM
+unhygienic
+Unibus/M
+unicameral
+UNICEF
+unicellular
+Unicode/M
+unicorn/SM
+unicycle/MGSD
+unicyclist/MS
+unideal
+unidimensional
+unidiomatic
+unidirectionality
+unidirectional/Y
+unidolized/MS
+unifiable
+unification/MA
+unifier/MS
+unifilar
+uniformity/MS
+uniformness/M
+uniform/TGSRDYMP
+unify/AXDSNG
+unilateralism/M
+unilateralist
+unilateral/Y
+unimodal
+unimpeachably
+unimportance
+unimportant
+unimpressive
+unindustrialized/MS
+uninhibited/YP
+uninominal
+uninsured
+unintellectual
+unintended
+uninteresting
+uninterruptedness/M
+uninterrupted/YP
+unintuitive
+uninviting
+union/AEMS
+unionism/SM
+unionist/SM
+Unionist/SM
+unionize
+Union/MS
+UniPlus/M
+unipolar
+uniprocessor/SM
+uniqueness/S
+unique/TYSRP
+Uniroyal/M
+unisex/S
+UniSoft/M
+unison/MS
+Unisys/M
+unitarianism/M
+Unitarianism/SM
+unitarian/MS
+Unitarian/MS
+unitary
+unite/AEDSG
+united/Y
+uniter/M
+unitize/GDS
+unit/VGRD
+unity/SEM
+univ
+Univac/M
+univalent/S
+univalve/MS
+univariate
+universalism/M
+universalistic
+universality/SM
+universalize/DSRZG
+universalizer/M
+universal/YSP
+universe/MS
+university/MS
+Unix/M
+UNIX/M
+unjam
+unkempt
+unkind/TP
+unkink
+unknightly
+unknowable/S
+unknowing
+unlabored
+unlace/G
+unlearn/G
+unlikeable
+unlikeliness/S
+unlimber/G
+unlimited
+unlit
+unliterary
+unloose/G
+unlucky/TP
+unmagnetized/MS
+unmanageably
+unmannered/Y
+unmask/G
+unmeaning
+unmeasured
+unmeetable
+unmelodious
+unmemorable
+unmemorialized/MS
+unmentionable/S
+unmerciful
+unmeritorious
+unmethodical
+unmineralized/MS
+unmissable
+unmistakably
+unmitigated/YP
+unmnemonic
+unmobilized/SM
+unmoral
+unmount/B
+unmovable
+unmoving
+unnaturalness/M
+unnavigable
+unnerving/Y
+unobliging
+unoffensive
+unofficial
+unorganized/YP
+unorthodox
+unpack/G
+unpaintable
+unpalatability
+unpalatable
+unpartizan
+unpatronizing
+unpeople
+unperceptive
+unperson
+unperturbed/Y
+unphysical
+unpick/G
+unpicturesque
+unpinning
+unpleasing
+unploughed
+unpolarized/SM
+unpopular
+unpractical
+unprecedented/Y
+unpredictable/S
+unpreemphasized
+unpremeditated
+unpretentiousness/M
+unprincipled/P
+unproblematic
+unproductive
+unpropitious
+unprovable
+unproven
+unprovocative
+unpunctual
+unquestionable
+unraisable
+unravellings
+unreadability
+unread/B
+unreal
+unrealizable
+unreasoning/Y
+unreceptive
+unrecordable
+unreflective
+unrelenting/Y
+unremitting/Y
+unrepeatability
+unrepeated
+unrepentant
+unreported
+unrepresentative
+unreproducible
+unrest/G
+unrestrained/P
+unrewarding
+unriddle
+unripe/P
+unromantic
+unruliness/SM
+unruly/PTR
+unsaleable
+unsanitary
+unsavored/YP
+unsavoriness/M
+unseal/GB
+unsearchable
+unseasonal
+unseeing/Y
+unseen/S
+unselfconsciousness/M
+unselfconscious/P
+unselfishness/M
+unsellable
+unsentimental
+unset
+unsettledness/M
+unsettled/P
+unsettling/Y
+unshapely
+unshaven
+unshorn
+unsighted
+unsightliness/S
+unskilful
+unsociability
+unsociable/P
+unsocial
+unsound/PT
+unspeakably
+unspecific
+unspectacular
+unspoilt
+unspoke
+unsporting
+unstable/P
+unstigmatized/SM
+unstilted
+unstinting/Y
+unstopping
+unstrapping
+unstudied
+unstuffy
+unsubdued
+unsubstantial
+unsubtle
+unsuitable
+unsuspecting/Y
+unswerving/Y
+unsymmetrical
+unsympathetic
+unsystematic
+unsystematized/Y
+untactful
+untalented
+untaxing
+unteach/B
+untellable
+untenable
+unthinking
+until/G
+untiring/Y
+unto
+untouchable/MS
+untowardness/M
+untoward/P
+untraceable
+untrue
+untruthfulness/M
+untwist/G
+Unukalhai/M
+unusualness/M
+unutterable
+unutterably
+unvocalized/MS
+unvulcanized/SM
+unwaivering
+unwarrantable
+unwarrantably
+unwashed/PS
+unwearable
+unwearied/Y
+unwed
+unwedge
+unwelcome
+unwell/M
+unwieldiness/MS
+unwieldy/TPR
+unwind/B
+unwomanly
+unworkable/S
+unworried
+unwrap
+unwrapping
+unyielding/Y
+unyoke
+unzip
+up
+Upanishads
+uparrow
+upbeat/SM
+upbraid/GDRS
+upbringing/M
+upbring/JG
+UPC
+upchuck/SDG
+upcome/G
+upcountry/S
+updatability
+updater/M
+update/RSDG
+Updike/M
+updraft/SM
+upend/SDG
+upfield
+upfront
+upgradeable
+upgrade/DSJG
+upheaval/MS
+upheld
+uphill/S
+upholder/M
+uphold/RSGZ
+upholster/ADGS
+upholsterer/SM
+upholstery/MS
+UPI
+upkeep/SM
+uplander/M
+upland/MRS
+uplifter/M
+uplift/SJDRG
+upload/GSD
+upmarket
+upon
+upped
+uppercase/GSD
+upperclassman/M
+upperclassmen
+uppercut/S
+uppercutting
+uppermost
+upper/S
+upping
+uppish
+uppity
+upraise/GDS
+uprated
+uprating
+uprear/DSG
+upright/DYGSP
+uprightness/S
+uprise/RGJ
+uprising/M
+upriver/S
+uproariousness/M
+uproarious/PY
+uproar/MS
+uproot/DRGS
+uprooter/M
+ups
+UPS
+upscale/GDS
+upset/S
+upsetting/MS
+upshot/SM
+upside/MS
+upsilon/MS
+upslope
+upstage/DSRG
+upstairs
+upstandingness/M
+upstanding/P
+upstart/MDGS
+upstate/SR
+upstream/DSG
+upstroke/MS
+upsurge/DSG
+upswing/GMS
+upswung
+uptake/SM
+upthrust/GMS
+uptight
+uptime
+Upton/M
+uptown/RS
+uptrend/M
+upturn/GDS
+upwardness/M
+upward/SYP
+upwelling
+upwind/S
+uracil/MS
+Ural/MS
+Urania/M
+uranium/MS
+Uranus/M
+uranyl/M
+Urbain/M
+Urbana/M
+urbane/Y
+urbanism/M
+urbanite/SM
+urbanity/SM
+urbanization/MS
+urbanize/DSG
+Urban/M
+urbanologist/S
+urbanology/S
+Urbano/M
+urban/RT
+Urbanus/M
+urchin/SM
+Urdu/M
+urea/SM
+uremia/MS
+uremic
+ureter/MS
+urethane/MS
+urethrae
+urethral
+urethra/M
+urethritis/M
+Urey/M
+urge/GDRSJ
+urgency/SM
+urgent/Y
+urger/M
+Uriah/M
+uric
+Uriel/M
+urinal/MS
+urinalyses
+urinalysis/M
+urinary/MS
+urinate/XDSNG
+urination/M
+urine/MS
+Uri/SM
+URL
+Ur/M
+urning/M
+urn/MDGS
+urogenital
+urological
+urologist/S
+urology/MS
+Urquhart/M
+Ursala/M
+Ursa/M
+ursine
+Ursola/M
+Urson/M
+Ursula/M
+Ursulina/M
+Ursuline/M
+urticaria/MS
+Uruguayan/S
+Uruguay/M
+Urumqi
+US
+USA
+usability/S
+usable/U
+usably/U
+USAF
+usage/SM
+USART
+USCG
+USC/M
+USDA
+us/DRSBZG
+used/U
+use/ESDAG
+usefulness/SM
+useful/YP
+uselessness/MS
+useless/PY
+Usenet/M
+Usenix/M
+user/M
+USG/M
+usherette/SM
+usher/SGMD
+USIA
+USMC
+USN
+USO
+USP
+USPS
+USS
+USSR
+Ustinov/M
+usu
+usuals
+usual/UPY
+usurer/SM
+usuriousness/M
+usurious/PY
+usurpation/MS
+usurper/M
+usurp/RDZSG
+usury/SM
+UT
+Utahan/SM
+Utah/M
+Uta/M
+Ute/M
+utensil/SM
+uteri
+uterine
+uterus/M
+Utica/M
+utile/I
+utilitarianism/MS
+utilitarian/S
+utility/MS
+utilization/MS
+utilization's/A
+utilize/GZDRS
+utilizer/M
+utilizes/A
+utmost/S
+Utopia/MS
+utopianism/M
+utopian's
+Utopian/S
+utopia/S
+Utrecht/M
+Utrillo/M
+utterance/MS
+uttered/U
+utterer/M
+uttermost/S
+utter/TRDYGS
+uucp/M
+UV
+uvula/MS
+uvular/S
+uxorious
+Uzbekistan
+Uzbek/M
+Uzi/M
+V
+VA
+vacancy/MS
+vacantness/M
+vacant/PY
+vacate/NGXSD
+vacationist/SM
+vacationland
+vacation/MRDZG
+vaccinate/NGSDX
+vaccination/M
+vaccine/SM
+vaccinial
+vaccinia/M
+Vachel/M
+vacillate/XNGSD
+vacillating/Y
+vacillation/M
+vacillator/SM
+Vaclav/M
+vacua's
+vacuity/MS
+vacuo
+vacuolated/U
+vacuolate/SDGN
+vacuole/SM
+vacuolization/SM
+vacuousness/MS
+vacuous/PY
+vacuum/GSMD
+Vader/M
+Vaduz/M
+vagabondage/MS
+vagabond/DMSG
+vagarious
+vagary/MS
+vaginae
+vaginal/Y
+vagina/M
+vagrancy/MS
+vagrant/SMY
+vagueing
+vagueness/MS
+vague/TYSRDP
+Vail/M
+vaingloriousness/M
+vainglorious/YP
+vainglory/MS
+vain/TYRP
+val
+valance/SDMG
+Valaree/M
+Valaria/M
+Valarie/M
+Valdemar/M
+Valdez/M
+Valeda/M
+valediction/MS
+valedictorian/MS
+valedictory/MS
+Vale/M
+valence/SM
+Valencia/MS
+valency/MS
+Valene/M
+Valenka/M
+Valentia/M
+Valentijn/M
+Valentina/M
+Valentine/M
+valentine/SM
+Valentin/M
+Valentino/M
+Valenzuela/M
+Valera/M
+Valeria/M
+Valerian/M
+Valerie/M
+Valerye/M
+Valry/M
+vale/SM
+valet/GDMS
+valetudinarianism/MS
+valetudinarian/MS
+Valhalla/M
+valiance/S
+valiantness/M
+valiant/SPY
+Valida/M
+validated/AU
+validate/INGSDX
+validates/A
+validation/AMI
+validity/IMS
+validnesses
+validness/MI
+valid/PIY
+Valina/M
+valise/MS
+Valium/S
+Valkyrie/SM
+Vallejo
+Valle/M
+Valletta/M
+valley/SM
+Vallie/M
+Valli/M
+Vally/M
+Valma/M
+Val/MY
+Valois/M
+valor/MS
+valorous/Y
+Valparaiso/M
+Valry/M
+valuable/IP
+valuableness/IM
+valuables
+valuably/I
+valuate/NGXSD
+valuation/CSAM
+valuator/SM
+value/CGASD
+valued/U
+valuelessness/M
+valueless/P
+valuer/SM
+value's
+values/E
+valve/GMSD
+valveless
+valvular
+Va/M
+vamoose/GSD
+vamp/ADSG
+vamper
+vampire/MGSD
+vamp's
+vanadium/MS
+Vance/M
+Vancouver/M
+vandalism/MS
+vandalize/GSD
+vandal/MS
+Vandal/MS
+Vanda/M
+Vandenberg/M
+Vanderbilt/M
+Vanderburgh/M
+Vanderpoel/M
+Vandyke/SM
+vane/MS
+Vanessa/M
+Vang/M
+vanguard/MS
+Vania/M
+vanilla/MS
+vanisher/M
+vanish/GRSDJ
+vanishing/Y
+vanity/SM
+Van/M
+Vanna/M
+vanned
+Vannie/M
+Vanni/M
+vanning
+Vanny/M
+vanquisher/M
+vanquish/RSDGZ
+van/SMD
+vantage/MS
+Vanuatu
+Vanya/M
+Vanzetti/M
+vapidity/MS
+vapidness/SM
+vapid/PY
+vaporer/M
+vaporing/MY
+vaporisation
+vaporise/DSG
+vaporization/AMS
+vaporize/DRSZG
+vaporizer/M
+vapor/MRDJGZS
+vaporous
+vapory
+vaquero/SM
+VAR
+Varanasi/M
+Varese/M
+Vargas/M
+variability/IMS
+variableness/IM
+variable/PMS
+variables/I
+variably/I
+variance/I
+variances
+variance's
+Varian/M
+variant/ISY
+variate/MGNSDX
+variational
+variation/M
+varicolored/MS
+varicose/S
+variedly
+varied/U
+variegate/NGXSD
+variegation/M
+varier/M
+varietal/S
+variety/MS
+various/PY
+varistor/M
+Varityping/M
+varlet/MS
+varmint/SM
+varnished/U
+varnisher/M
+varnish/ZGMDRS
+var/S
+varsity/MS
+varying/UY
+vary/SRDJG
+vascular
+vasectomy/SM
+Vaseline/DSMG
+vase/SM
+Vasili/MS
+Vasily/M
+vasomotor
+Vasquez/M
+vassalage/MS
+vassal/GSMD
+Vassar/M
+Vassili/M
+Vassily/M
+vastness/MS
+vast/PTSYR
+v/ASV
+VAT
+Vatican/M
+vat/SM
+vatted
+vatting
+vaudeville/SM
+vaudevillian/SM
+Vaudois
+Vaughan/M
+Vaughn/M
+vaulter/M
+vaulting/M
+vault/ZSRDMGJ
+vaunter/M
+vaunt/GRDS
+VAXes
+Vax/M
+VAX/M
+Vazquez/M
+vb
+VCR
+VD
+VDT
+VDU
+vealed/A
+vealer/MA
+veal/MRDGS
+veals/A
+Veblen/M
+vectorial
+vectorization
+vectorized
+vectorizing
+vector's/F
+vector/SGDM
+Veda/MS
+Vedanta/M
+veejay/S
+veep/S
+veer/DSG
+veering/Y
+vegan/SM
+Vega/SM
+Vegemite/M
+veges
+vegetable/MS
+vegetarianism/MS
+vegetarian/SM
+vegetate/DSNGVX
+vegetation/M
+vegetative/PY
+vegged
+veggie/S
+vegging
+veg/M
+vehemence/MS
+vehemency/S
+vehement/Y
+vehicle/SM
+vehicular
+veiling/MU
+veil's
+veil/UGSD
+vein/GSRDM
+veining/M
+vela/M
+Vela/M
+velarize/SDG
+velar/S
+Velsquez/M
+Velzquez
+Velcro/SM
+veld/SM
+veldt's
+Velez/M
+Vella/M
+vellum/MS
+Velma/M
+velocipede/SM
+velocity/SM
+velor/S
+velour's
+velum/M
+Velveeta/M
+velveteen/MS
+velvet/GSMD
+Velvet/M
+velvety/RT
+venality/MS
+venal/Y
+venation/SM
+vend/DSG
+vender's/K
+vendetta/MS
+vendible/S
+vendor/MS
+veneerer/M
+veneer/GSRDM
+veneering/M
+venerability/S
+venerable/P
+venerate/XNGSD
+veneration/M
+venereal
+venetian
+Venetian/SM
+Venezuela/M
+Venezuelan/S
+vengeance/MS
+vengeful/APY
+vengefulness/AM
+venialness/M
+venial/YP
+Venice/M
+venireman/M
+veniremen
+venison/SM
+Venita/M
+Venn/M
+venomousness/M
+venomous/YP
+venom/SGDM
+venous/Y
+venter/M
+ventilated/U
+ventilate/XSDVGN
+ventilation/M
+ventilator/MS
+vent/ISGFD
+ventral/YS
+ventricle/MS
+ventricular
+ventriloquies
+ventriloquism/MS
+ventriloquist/MS
+ventriloquy
+vent's/F
+Ventura/M
+venture/RSDJZG
+venturesomeness/SM
+venturesome/YP
+venturi/S
+venturousness/MS
+venturous/YP
+venue/MAS
+Venusian/S
+Venus/S
+veraciousness/M
+veracious/YP
+veracities
+veracity/IM
+Veracruz/M
+Veradis
+Vera/M
+verandahed
+veranda/SDM
+verbalization/MS
+verbalized/U
+verbalizer/M
+verbalize/ZGRSD
+verballed
+verballing
+verbal/SY
+verbatim
+verbena/MS
+verbiage/SM
+verb/KSM
+verbose/YP
+verbosity/SM
+verboten
+verdant/Y
+Verde/M
+Verderer/M
+verdict/SM
+verdigris/GSDM
+Verdi/M
+verdure/SDM
+Vere/M
+Verena/M
+Verene/M
+verge/FGSD
+Verge/M
+verger/SM
+verge's
+Vergil's
+veridical/Y
+Veriee/M
+verifiability/M
+verifiableness/M
+verifiable/U
+verification/S
+verified/U
+verifier/MS
+verify/GASD
+Verile/M
+verily
+Verina/M
+Verine/M
+verisimilitude/SM
+veritableness/M
+veritable/P
+veritably
+verity/MS
+Verlag/M
+Verlaine/M
+Verla/M
+Vermeer/M
+vermicelli/MS
+vermiculite/MS
+vermiform
+vermilion/MS
+vermin/M
+verminous
+Vermonter/M
+Vermont/ZRM
+vermouth/M
+vermouths
+vernacular/YS
+vernal/Y
+Verna/M
+Verne/M
+Vernen/M
+Verney/M
+Vernice/M
+vernier/SM
+Vern/NM
+Vernon/M
+Vernor/M
+Verona/M
+Veronese/M
+Veronica/M
+veronica/SM
+Veronika/M
+Veronike/M
+Veronique/M
+verrucae
+verruca/MS
+versa
+Versailles/M
+Versatec/M
+versatileness/M
+versatile/YP
+versatility/SM
+versed/UI
+verse's
+verses/I
+verse/XSRDAGNF
+versicle/M
+versification/M
+versifier/M
+versify/GDRSZXN
+versing/I
+version/MFISA
+verso/SM
+versus
+vertebrae
+vertebral/Y
+vertebra/M
+vertebrate/IMS
+vertebration/M
+vertex/SM
+vertical/YPS
+vertices's
+vertiginous
+vertigoes
+vertigo/M
+verve/SM
+very/RT
+Vesalius/M
+vesicle/SM
+vesicular/Y
+vesiculate/GSD
+Vespasian/M
+vesper/SM
+Vespucci/M
+vessel/MS
+vestal/YS
+Vesta/M
+vest/DIGSL
+vestibular
+vestibule/SDM
+vestige/SM
+vestigial/Y
+vesting/SM
+vestment/ISM
+vestryman/M
+vestrymen
+vestry/MS
+vest's
+vesture/SDMG
+Vesuvius/M
+vetch/SM
+veteran/SM
+veterinarian/MS
+veterinary/S
+veter/M
+veto/DMG
+vetoes
+vet/SMR
+vetted
+vetting/A
+Vevay/M
+vexation/SM
+vexatiousness/M
+vexatious/PY
+vexed/Y
+vex/GFSD
+VF
+VFW
+VG
+VGA
+vhf
+VHF
+VHS
+VI
+via
+viability/SM
+viable/I
+viably
+viaduct/MS
+Viagra/M
+vial/MDGS
+viand/SM
+vibe/S
+vibraharp/MS
+vibrancy/MS
+vibrant/YS
+vibraphone/MS
+vibraphonist/SM
+vibrate/XNGSD
+vibrational/Y
+vibration/M
+vibrato/MS
+vibrator/SM
+vibratory
+vibrio/M
+vibrionic
+viburnum/SM
+vicarage/SM
+vicariousness/MS
+vicarious/YP
+vicar/SM
+vice/CMS
+viced
+vicegerent/MS
+vicennial
+Vicente/M
+viceregal
+viceroy/SM
+Vichy/M
+vichyssoise/MS
+vicing
+vicinity/MS
+viciousness/S
+vicious/YP
+vicissitude/MS
+Vickers/M
+Vickie/M
+Vicki/M
+Vicksburg/M
+Vicky/M
+Vick/ZM
+Vic/M
+victimization/SM
+victimized/U
+victimizer/M
+victimize/SRDZG
+victim/SM
+Victoir/M
+Victoria/M
+Victorianism/S
+Victorian/S
+victoriousness/M
+victorious/YP
+Victor/M
+victor/SM
+victory/MS
+Victrola/SM
+victualer/M
+victual/ZGSDR
+vicua/S
+Vidal/M
+Vida/M
+videlicet
+videocassette/S
+videoconferencing
+videodisc/S
+videodisk/SM
+video/GSMD
+videophone/SM
+videotape/SDGM
+Vidovic/M
+Vidovik/M
+Vienna/M
+Viennese/M
+Vientiane/M
+vier/M
+vie/S
+Vietcong/M
+Viet/M
+Vietminh/M
+Vietnamese/M
+Vietnam/M
+viewed/A
+viewer/AS
+viewer's
+viewfinder/MS
+viewgraph/SM
+viewing/M
+viewless/Y
+view/MBGZJSRD
+viewpoint/SM
+views/A
+vigesimal
+vigilance/MS
+vigilante/SM
+vigilantism/MS
+vigilantist
+vigilant/Y
+vigil/SM
+vignette/MGDRS
+vignetter/M
+vignetting/M
+vignettist/MS
+vigor/MS
+vigorousness/M
+vigorous/YP
+vii
+viii
+Vijayawada/M
+Viki/M
+Viking/MS
+viking/S
+Vikki/M
+Vikky/M
+Vikram/M
+Vila
+vile/AR
+vilely
+vileness/MS
+vilest
+Vilhelmina/M
+vilification/M
+vilifier/M
+vilify/GNXRSD
+villager/M
+village/RSMZ
+villainousness/M
+villainous/YP
+villain/SM
+villainy/MS
+Villa/M
+villa/MS
+Villarreal/M
+ville
+villeinage/SM
+villein/MS
+villi
+Villon/M
+villus/M
+Vilma/M
+Vilnius/M
+Vilyui/M
+Vi/M
+vi/MDR
+vim/MS
+vinaigrette/MS
+Vina/M
+Vince/M
+Vincent/MS
+Vincenty/M
+Vincenz/M
+vincible/I
+Vinci/M
+Vindemiatrix/M
+vindicate/XSDVGN
+vindication/M
+vindicator/SM
+vindictiveness/MS
+vindictive/PY
+vinegar/DMSG
+vinegary
+vine/MGDS
+vineyard/SM
+Vinita/M
+Vin/M
+Vinnie/M
+Vinni/M
+Vinny/M
+vino/MS
+vinous
+Vinson/M
+vintage/MRSDG
+vintager/M
+vintner/MS
+vinyl/SM
+violable/I
+Viola/M
+Violante/M
+viola/SM
+violate/VNGXSD
+violator/MS
+Viole/M
+violence/SM
+violent/Y
+Violet/M
+violet/SM
+Violetta/M
+Violette/M
+violinist/SM
+violin/MS
+violist/MS
+viol/MSB
+violoncellist/S
+violoncello/MS
+viper/MS
+viperous
+VIP/S
+viragoes
+virago/M
+viral/Y
+vireo/SM
+Virge/M
+Virgie/M
+Virgilio/M
+Virgil/M
+virginal/YS
+Virgina/M
+Virginia/M
+Virginian/S
+Virginie/M
+virginity/SM
+virgin/SM
+Virgo/MS
+virgule/MS
+virile
+virility/MS
+virologist/S
+virology/SM
+virtual/Y
+virtue/SM
+virtuosity/MS
+virtuosoes
+virtuoso/MS
+virtuousness/SM
+virtuous/PY
+virulence/SM
+virulent/Y
+virus/MS
+visage/MSD
+Visakhapatnam's
+Visa/M
+visa/SGMD
+Visayans
+viscera
+visceral/Y
+viscid/Y
+viscoelastic
+viscoelasticity
+viscometer/SM
+viscose/MS
+viscosity/MS
+viscountcy/MS
+viscountess/SM
+viscount/MS
+viscousness/M
+viscous/PY
+viscus/M
+vise/CAXNGSD
+viselike
+vise's
+Vishnu/M
+visibility/ISM
+visible/PI
+visibly/I
+Visigoth/M
+Visigoths
+visionariness/M
+visionary/PS
+vision/KMDGS
+vision's/A
+visitable/U
+visitant/SM
+visitation/SM
+visited/U
+visit/GASD
+visitor/MS
+vis/MDSGV
+visor/SMDG
+VISTA
+vista/GSDM
+Vistula/M
+visualization/AMS
+visualized/U
+visualizer/M
+visualizes/A
+visualize/SRDZG
+visual/SY
+vitae
+vitality/MS
+vitalization/AMS
+vitalize/ASDGC
+vital/SY
+vita/M
+Vita/M
+vitamin/SM
+Vite/M
+Vitia/M
+vitiate/XGNSD
+vitiation/M
+viticulture/SM
+viticulturist/S
+Vitim/M
+Vito/M
+Vitoria/M
+vitreous/YSP
+vitrifaction/S
+vitrification/M
+vitrify/XDSNG
+vitrine/SM
+vitriolic
+vitriol/MDSG
+vitro
+vittles
+Vittoria/M
+Vittorio/M
+vituperate/SDXVGN
+vituperation/M
+vituperative/Y
+Vitus/M
+vivace/S
+vivaciousness/MS
+vivacious/YP
+vivacity/SM
+viva/DGS
+Vivaldi
+Viva/M
+vivaria
+vivarium/MS
+vivaxes
+Vivekananda/M
+vive/Z
+Vivia/M
+Viviana/M
+Vivian/M
+Vivianna/M
+Vivianne/M
+vividness/SM
+vivid/PTYR
+Vivie/M
+Viviene/M
+Vivien/M
+Vivienne/M
+vivifier
+vivify/NGASD
+Vivi/MN
+viviparous
+vivisect/DGS
+vivisectional
+vivisectionist/SM
+vivisection/MS
+Viviyan/M
+Viv/M
+vivo
+Vivyan/M
+Vivyanne/M
+vixenish/Y
+vixen/SM
+viz
+vizier/MS
+vizor's
+VJ
+Vladamir/M
+Vladimir/M
+Vladivostok/M
+Vlad/M
+VLF
+VLSI
+VMS/M
+VOA
+vocable/SM
+vocab/S
+vocabularian
+vocabularianism
+vocabulary/MS
+vocalic/S
+vocalise's
+vocalism/M
+vocalist/MS
+vocalization/SM
+vocalized/U
+vocalizer/M
+vocalize/ZGDRS
+vocal/SY
+vocation/AKMISF
+vocational/Y
+vocative/KYS
+vociferate/NGXSD
+vociferation/M
+vociferousness/MS
+vociferous/YP
+vocoded
+vocoder
+vodka/MS
+voe/S
+Vogel/M
+vogue/GMSRD
+vogueing
+voguish
+voiceband
+voiced/CU
+voice/IMGDS
+voicelessness/SM
+voiceless/YP
+voicer/S
+voices/C
+voicing/C
+voidable
+void/C
+voided
+voider/M
+voiding
+voidness/M
+voids
+voil
+voile/MS
+volar
+volatileness/M
+volatile/PS
+volatility/MS
+volatilization/MS
+volatilize/SDG
+volcanically
+volcanic/S
+volcanism/M
+volcanoes
+volcano/M
+vole/MS
+Volga/M
+Volgograd/M
+vol/GSD
+volitionality
+volitional/Y
+volition/MS
+Volkswagen/SM
+volleyball/MS
+volleyer/M
+volley/SMRDG
+Vol/M
+Volstead/M
+voltage/SM
+voltaic
+Voltaire/M
+Volta/M
+volt/AMS
+Volterra/M
+voltmeter/MS
+volubility/S
+voluble/P
+volubly
+volume/SDGM
+volumetric
+volumetrically
+voluminousness/MS
+voluminous/PY
+voluntarily/I
+voluntariness/MI
+voluntarism/MS
+voluntary/PS
+volunteer/DMSG
+voluptuary/SM
+voluptuousness/S
+voluptuous/YP
+volute/S
+Volvo/M
+vomit/GRDS
+Vonda/M
+Von/M
+Vonnegut/M
+Vonnie/M
+Vonni/M
+Vonny/M
+voodoo/GDMS
+voodooism/S
+voraciousness/MS
+voracious/YP
+voracity/MS
+Voronezh/M
+Vorster/M
+vortex/SM
+vortices's
+vorticity/M
+votary/MS
+vote/CSDG
+voter/SM
+vote's
+votive/YP
+voucher/GMD
+vouchsafe/SDG
+vouch/SRDGZ
+vowelled
+vowelling
+vowel/MS
+vower/M
+vow/SMDRG
+voyage/GMZJSRD
+voyager/M
+voyageur/SM
+voyeurism/MS
+voyeuristic
+voyeur/MS
+VP
+vs
+V's
+VT
+Vt/M
+VTOL
+vulcanization/SM
+vulcanized/U
+vulcanize/SDG
+Vulcan/M
+vulgarian/MS
+vulgarism/MS
+vulgarity/MS
+vulgarization/S
+vulgarize/GZSRD
+vulgar/TSYR
+Vulgate/SM
+Vulg/M
+vulnerability/SI
+vulnerable/IP
+vulnerably/I
+vulpine
+vulturelike
+vulture/SM
+vulturous
+vulvae
+vulva/M
+vying
+Vyky/M
+WA
+Waals
+Wabash/M
+WAC
+Wacke/M
+wackes
+wackiness/MS
+wacko/MS
+wacky/RTP
+Waco/M
+Wac/S
+wadded
+wadding/SM
+waddle/GRSD
+Wade/M
+wader/M
+wade/S
+wadi/SM
+wad/MDRZGS
+Wadsworth/M
+wafer/GSMD
+waffle/GMZRSD
+Wafs
+wafter/M
+waft/SGRD
+wag/DRZGS
+waged/U
+wager/GZMRD
+wage/SM
+wagged
+waggery/MS
+wagging
+waggishness/SM
+waggish/YP
+waggle/SDG
+waggly
+Wagnerian
+Wagner/M
+wagoner/M
+wagon/SGZMRD
+wagtail/SM
+Wahl/M
+waif/SGDM
+Waikiki/M
+wailer/M
+wail/SGZRD
+wain/GSDM
+Wain/M
+wainscot/SGJD
+Wainwright/M
+wainwright/SM
+waistband/MS
+waistcoat/GDMS
+waister/M
+waist/GSRDM
+waistline/MS
+Waite/M
+waiter/DMG
+Waiter/M
+wait/GSZJRD
+Wait/MR
+waitpeople
+waitperson/S
+waitress/GMSD
+waiver/MB
+waive/SRDGZ
+Wakefield/M
+wakefulness/MS
+wakeful/PY
+Wake/M
+wake/MGDRSJ
+waken/SMRDG
+waker/M
+wakeup
+Waksman/M
+Walbridge/M
+Walcott/M
+Waldemar/M
+Walden/M
+Waldensian
+Waldheim/M
+Wald/MN
+Waldo/M
+Waldon/M
+Waldorf/M
+wale/DRSMG
+Wales
+Walesa/M
+Walford/M
+Walgreen/M
+waling/M
+walkabout/M
+walkaway/SM
+walker/M
+Walker/M
+walk/GZSBJRD
+walkie
+Walkman/S
+walkout/SM
+walkover/SM
+walkway/MS
+wallaby/MS
+Wallace/M
+Wallache/M
+wallah/M
+Wallas/M
+wallboard/MS
+Wallenstein/M
+Waller/M
+wallet/SM
+walleye/MSD
+wallflower/MS
+Wallie/M
+Wallis
+Walliw/M
+Walloon/SM
+walloper/M
+walloping/M
+wallop/RDSJG
+wallower/M
+wallow/RDSG
+wallpaper/DMGS
+wall/SGMRD
+Wall/SMR
+Wally/M
+wally/S
+walnut/SM
+Walpole/M
+Walpurgisnacht
+walrus/SM
+Walsh/M
+Walter/M
+Walther/M
+Walton/M
+waltzer/M
+Walt/ZMR
+waltz/MRSDGZ
+Walworth/M
+Waly/M
+wampum/SM
+Wanamaker/M
+Wanda/M
+wanderer/M
+wander/JZGRD
+wanderlust/SM
+Wandie/M
+Wandis/M
+wand/MRSZ
+wane/S
+Waneta/M
+wangler/M
+wangle/RSDGZ
+Wang/M
+Wanids/M
+Wankel/M
+wanna
+wannabe/S
+wanned
+wanner
+wanness/S
+wannest
+wanning
+wan/PGSDY
+Wansee/M
+Wansley/M
+wanted/U
+wanter/M
+want/GRDSJ
+wantonness/S
+wanton/PGSRDY
+wapiti/MS
+warble/GZRSD
+warbler/M
+warbonnet/S
+ward/AGMRDS
+Warde/M
+warden/DMGS
+Warden/M
+warder/DMGS
+Ward/MN
+wardrobe/MDSG
+wardroom/MS
+wardship/M
+wards/I
+warehouseman/M
+warehouse/MGSRD
+Ware/MG
+ware/MS
+warfare/SM
+Warfield/M
+war/GSMD
+warhead/MS
+Warhol/M
+warhorse/SM
+warily/U
+warinesses/U
+wariness/MS
+Waring/M
+warless
+warlike
+warlock/SM
+warlord/MS
+warmblooded
+warmed/A
+warmer/M
+warmheartedness/SM
+warmhearted/PY
+warmish
+warmness/MS
+warmongering/M
+warmonger/JGSM
+warms/A
+warmth/M
+warmths
+warm/YRDHPGZTS
+warned/U
+warner/M
+Warner/M
+warn/GRDJS
+warning/YM
+Warnock/M
+warpaint
+warpath/M
+warpaths
+warper/M
+warplane/MS
+warp/MRDGS
+warranted/U
+warranter/M
+warrant/GSMDR
+warranty/SDGM
+warred/M
+warrener/M
+Warren/M
+warren/SZRM
+warring/M
+warrior/MS
+Warsaw/M
+wars/C
+warship/MS
+warthog/S
+wartime/SM
+wart/MDS
+warty/RT
+Warwick/M
+wary/URPT
+Wasatch/M
+washable/S
+wash/AGSD
+washbasin/SM
+washboard/SM
+washbowl/SM
+Washburn/M
+washcloth/M
+washcloths
+washday/M
+washed/U
+washer/GDMS
+washerwoman/M
+washerwomen
+washing/SM
+Washingtonian/S
+Washington/M
+Wash/M
+Washoe/M
+washout/SM
+washrag/SM
+washroom/MS
+washstand/SM
+washtub/MS
+washy/RT
+wasn't
+WASP
+waspishness/SM
+waspish/PY
+Wasp's
+wasp/SM
+was/S
+wassail/GMDS
+Wasserman/M
+Wassermann/M
+wastage/SM
+wastebasket/SM
+wastefulness/S
+wasteful/YP
+wasteland/MS
+wastepaper/MS
+waster/DG
+waste/S
+wastewater
+wast/GZSRD
+wasting/Y
+wastrel/MS
+Watanabe/M
+watchable/U
+watchband/SM
+watchdogged
+watchdogging
+watchdog/SM
+watched/U
+watcher/M
+watchfulness/MS
+watchful/PY
+watch/JRSDGZB
+watchmake/JRGZ
+watchmaker/M
+watchman/M
+watchmen
+watchpoints
+watchtower/MS
+watchword/MS
+waterbird/S
+waterborne
+Waterbury/M
+watercolor/DMGS
+watercolorist/SM
+watercourse/SM
+watercraft/M
+watercress/SM
+waterer/M
+waterfall/SM
+waterfowl/M
+waterfront/SM
+Watergate/M
+waterhole/S
+Waterhouse/M
+wateriness/SM
+watering/M
+water/JGSMRD
+waterless
+waterlily/S
+waterline/S
+waterlogged
+waterloo
+Waterloo/SM
+waterman/M
+watermark/GSDM
+watermelon/SM
+watermill/S
+waterproof/PGRDSJ
+watershed/SM
+waterside/MSR
+watersider/M
+Waters/M
+waterspout/MS
+watertightness/M
+watertight/P
+Watertown/M
+waterway/MS
+waterwheel/S
+waterworks/M
+watery/PRT
+Watkins
+WATS
+Watson/M
+wattage/SM
+Watteau/M
+Wattenberg/M
+Watterson/M
+wattle/SDGM
+Watt/MS
+watt/TMRS
+Watusi/M
+Wat/ZM
+Waugh/M
+Waukesha/M
+Waunona/M
+Waupaca/M
+Waupun/M
+Wausau/M
+Wauwatosa/M
+waveband/MS
+waveform/SM
+wavefront/MS
+waveguide/MS
+Waveland/M
+wavelength/M
+wavelengths
+wavelet/SM
+wavelike
+wavenumber
+waver/GZRD
+wavering/YU
+Waverley/M
+Waverly/M
+Wave/S
+wave/ZGDRS
+wavily
+waviness/MS
+wavy/SRTP
+waxer/M
+waxiness/MS
+wax/MNDRSZG
+waxwing/MS
+waxwork/MS
+waxy/PRT
+wayfarer/MS
+wayfaring/S
+waylaid
+Wayland/M
+Waylan/M
+waylayer/M
+waylay/GRSZ
+wayleave/MS
+Waylen/M
+Waylin/M
+Waylon/M
+Way/M
+waymarked
+way/MS
+Wayne/M
+Waynesboro/M
+wayside/MS
+waywardness/S
+wayward/YP
+WC
+we
+weakener/M
+weaken/ZGRD
+weakfish/SM
+weakish
+weakliness/M
+weakling/SM
+weakly/RTP
+weakness/MS
+weak/TXPYRN
+weal/MHS
+wealthiness/MS
+wealth/M
+wealths
+wealthy/PTR
+weaner/M
+weanling/M
+wean/RDGS
+weapon/GDMS
+weaponless
+weaponry/MS
+wearable/S
+wearer/M
+wearied/U
+wearily
+weariness/MS
+wearing/Y
+wearisomeness/M
+wearisome/YP
+wear/RBSJGZ
+wearying/Y
+weary/TGPRSD
+weasel/SGMDY
+weatherbeaten
+weathercock/SDMG
+weatherer/M
+Weatherford/M
+weathering/M
+weatherize/GSD
+weatherman/M
+weather/MDRYJGS
+weathermen
+weatherperson/S
+weatherproof/SGPD
+weatherstripped
+weatherstripping/S
+weatherstrip/S
+weaver/M
+Weaver/M
+weaves/A
+weave/SRDGZ
+weaving/A
+webbed
+Webber/M
+webbing/MS
+Webb/RM
+weber/M
+Weber/M
+Webern/M
+webfeet
+webfoot/M
+Web/MR
+website/S
+web/SMR
+Webster/MS
+Websterville/M
+we'd
+wedded/A
+Weddell/M
+wedder
+wedding/SM
+wedge/SDGM
+wedgie/RST
+Wedgwood/M
+wedlock/SM
+Wed/M
+Wednesday/SM
+wed/SA
+weeder/M
+weediness/M
+weedkiller/M
+weedless
+wee/DRST
+weed/SGMRDZ
+weedy/TRP
+weeing
+weekday/MS
+weekender/M
+weekend/SDRMG
+weekly/S
+weeknight/SM
+Weeks/M
+week/SYM
+weenie/M
+ween/SGD
+weeny/RSMT
+weeper/M
+weep/SGZJRD
+weepy/RST
+weevil/MS
+weft/SGMD
+Wehr/M
+Weibull/M
+Weidar/M
+Weider/M
+Weidman/M
+Weierstrass/M
+weighed/UA
+weigher/M
+weigh/RDJG
+weighs/A
+weighted/U
+weighter/M
+weightily
+weightiness/SM
+weighting/M
+weight/JMSRDG
+weightlessness/SM
+weightless/YP
+weightlifter/S
+weightlifting/MS
+weighty/TPR
+Weill/M
+Wei/M
+Weinberg/M
+Weiner/M
+Weinstein/M
+weirdie/SM
+weirdness/MS
+weirdo/SM
+weird/YRDPGTS
+weir/SDMG
+Weisenheimer/M
+Weiss/M
+Weissman/M
+Weissmuller/M
+Weizmann/M
+Welbie/M
+Welby/M
+Welcher/M
+Welches
+welcomeness/M
+welcome/PRSDYG
+welcoming/U
+welder/M
+Weldon/M
+weld/SBJGZRD
+Weldwood/M
+welfare/SM
+welkin/SM
+we'll
+Welland/M
+wellbeing/M
+Weller/M
+Wellesley/M
+Welles/M
+wellhead/SM
+Wellington/MS
+wellington/S
+Wellman/M
+wellness/MS
+well/SGPD
+Wells/M
+wellspring/SM
+Wellsville/M
+Welmers/M
+Welsh
+welsher/M
+Welshman/M
+Welshmen
+welsh/RSDGZ
+Welshwoman/M
+Welshwomen
+welter/GD
+welterweight/MS
+welt/GZSMRD
+wencher/M
+wench/GRSDM
+Wendall/M
+Wenda/M
+wend/DSG
+Wendeline/M
+Wendell/M
+Wendel/M
+Wendie/M
+Wendi/M
+Wendye/M
+Wendy/M
+wen/M
+Wenonah/M
+Wenona/M
+went
+Wentworth/M
+wept/U
+were
+we're
+weren't
+werewolf/M
+werewolves
+Werner/M
+Wernher/M
+Werther/M
+werwolf's
+Wes
+Wesleyan
+Wesley/M
+Wessex/M
+Wesson/M
+westbound
+Westbrooke/M
+Westbrook/M
+Westchester/M
+wester/DYG
+westerly/S
+westerner/M
+westernization/MS
+westernize/GSD
+westernmost
+Western/ZRS
+western/ZSR
+Westfield/M
+Westhampton/M
+Westinghouse/M
+westing/M
+Westleigh/M
+Westley/M
+Westminster/M
+Westmore/M
+West/MS
+Weston/M
+Westphalia/M
+Westport/M
+west/RDGSM
+westward/S
+Westwood/M
+wetback/MS
+wetland/S
+wetness/MS
+wet/SPY
+wettable
+wetter/S
+wettest
+wetting
+we've
+Weyden/M
+Weyerhauser/M
+Weylin/M
+Wezen/M
+WFF
+whacker/M
+whack/GZRDS
+whaleboat/MS
+whalebone/SM
+whale/GSRDZM
+Whalen/M
+whaler/M
+whaling/M
+whammed
+whamming/M
+wham/MS
+whammy/S
+wharf/SGMD
+Wharton/M
+wharves
+whatchamacallit/MS
+what'd
+whatever
+what/MS
+whatnot/MS
+what're
+whatsoever
+wheal/MS
+wheatgerm
+Wheaties/M
+Wheatland/M
+wheat/NMXS
+Wheaton/M
+Wheatstone/M
+wheedle/ZDRSG
+wheelbarrow/GSDM
+wheelbase/MS
+wheelchair/MS
+wheeler/M
+Wheeler/M
+wheelhouse/SM
+wheelie/MS
+wheeling/M
+Wheeling/M
+Wheelock/M
+wheel/RDMJSGZ
+wheelwright/MS
+whee/S
+wheeze/SDG
+wheezily
+wheeziness/SM
+wheezy/PRT
+Whelan/M
+whelk/MDS
+Wheller/M
+whelm/DGS
+whelp/DMGS
+whence/S
+whenever
+when/S
+whensoever
+whereabout/S
+whereas/S
+whereat
+whereby
+where'd
+wherefore/MS
+wherein
+where/MS
+whereof
+whereon
+where're
+wheresoever
+whereto
+whereupon
+wherever
+wherewith
+wherewithal/SM
+wherry/DSGM
+whether
+whet/S
+whetstone/MS
+whetted
+whetting
+whew/GSD
+whey/MS
+which
+whichever
+whiff/GSMD
+whiffle/DRSG
+whiffler/M
+whiffletree/SM
+whig/S
+Whig/SM
+while/GSD
+whilom
+whilst
+whimmed
+whimming
+whimper/DSG
+whimsey's
+whimsicality/MS
+whimsical/YP
+whim/SM
+whimsy/TMDRS
+whine/GZMSRD
+whining/Y
+whinny/GTDRS
+whiny/RT
+whipcord/SM
+whiplash/SDMG
+Whippany/M
+whipped
+whipper/MS
+whippersnapper/MS
+whippet/MS
+whipping/SM
+Whipple/M
+whippletree/SM
+whippoorwill/SM
+whipsaw/GDMS
+whips/M
+whip/SM
+whirligig/MS
+whirlpool/MS
+whirl/RDGS
+whirlwind/MS
+whirlybird/MS
+whirly/MS
+whirred
+whirring
+whir/SY
+whisker/DM
+whiskery
+whiskey/SM
+whisk/GZRDS
+whisperer/M
+whisper/GRDJZS
+whispering/YM
+whist/GDMS
+whistleable
+whistle/DRSZG
+whistler/M
+Whistler/M
+whistling/M
+Whitaker/M
+Whitby/M
+Whitcomb/M
+whitebait/M
+whitecap/MS
+whiteface/M
+Whitefield/M
+whitefish/SM
+Whitehall/M
+Whitehead/M
+whitehead/S
+Whitehorse/M
+Whiteleaf/M
+Whiteley/M
+White/MS
+whitener/M
+whiteness/MS
+whitening/M
+whiten/JZDRG
+whiteout/S
+white/PYS
+whitespace
+whitetail/S
+whitewall/SM
+whitewash/GRSDM
+whitewater
+Whitewater/M
+whitey/MS
+Whitfield/M
+whither/DGS
+whitier
+whitiest
+whiting/M
+whitish
+Whitley/M
+Whitlock/M
+Whit/M
+Whitman/M
+Whitney/M
+whit/SJGTXMRND
+Whitsunday/MS
+Whittaker/M
+whitter
+Whittier
+whittle/JDRSZG
+whittler/M
+whiz
+whizkid
+whizzbang/S
+whizzed
+whizzes
+whizzing
+WHO
+whoa/S
+who'd
+whodunit/SM
+whoever
+wholegrain
+wholeheartedness/MS
+wholehearted/PY
+wholemeal
+wholeness/S
+wholesale/GZMSRD
+wholesaler/M
+wholesomeness/USM
+wholesome/UYP
+whole/SP
+wholewheat
+who'll
+wholly
+whom
+who/M
+whomever
+whomsoever
+whoopee/S
+whooper/M
+whoop/SRDGZ
+whoosh/DSGM
+whop
+whopper/MS
+whopping/S
+who're
+whorehouse/SM
+whoreish
+whore/SDGM
+whorish
+whorl/SDM
+whose
+whoso
+whosoever
+who've
+why
+whys
+WI
+Wiatt/M
+Wichita/M
+wickedness/MS
+wicked/RYPT
+wicker/M
+wickerwork/MS
+wicketkeeper/SM
+wicket/SM
+wick/GZRDMS
+wicking/M
+widemouthed
+widener/M
+wideness/S
+widen/SGZRD
+wide/RSYTP
+widespread
+widgeon's
+widget/SM
+widower/M
+widowhood/S
+widow/MRDSGZ
+width/M
+widths
+widthwise
+Wieland/M
+wielder/M
+wield/GZRDS
+Wiemar/M
+wiener/SM
+wienie/SM
+Wier/M
+Wiesel/M
+wife/DSMYG
+wifeless
+wifely/RPT
+wigeon/MS
+wigged
+wigging/M
+Wiggins
+wiggler/M
+wiggle/RSDGZ
+wiggly/RT
+wight/SGDM
+wiglet/S
+wigmaker
+wig/MS
+Wigner/M
+wigwagged
+wigwagging
+wigwag/S
+wigwam/MS
+Wilberforce/M
+Wilbert/M
+Wilbur/M
+Wilburn/M
+Wilburt/M
+Wilcox/M
+Wilda/M
+wildcat/SM
+wildcatted
+wildcatter/MS
+wildcatting
+wildebeest/SM
+Wilde/MR
+Wilden/M
+Wilder/M
+wilderness/SM
+wilder/P
+wildfire/MS
+wildflower/S
+wildfowl/M
+wilding/M
+wildlife/M
+wildness/MS
+Wildon/M
+wild/SPGTYRD
+wile/DSMG
+Wileen/M
+Wilek/M
+Wiley/M
+Wilford/M
+Wilfred/M
+Wilfredo/M
+Wilfrid/M
+wilfulness's
+Wilhelmina/M
+Wilhelmine/M
+Wilhelm/M
+Wilie/M
+wilily
+wiliness/MS
+Wilkerson/M
+Wilkes/M
+Wilkins/M
+Wilkinson/M
+Willabella/M
+Willa/M
+Willamette/M
+Willamina/M
+Willard/M
+Willcox/M
+Willdon/M
+willed/U
+Willem/M
+Willemstad/M
+willer/M
+Willetta/M
+Willette/M
+Willey/M
+willfulness/S
+willful/YP
+Williamsburg/M
+William/SM
+Williamson/M
+Willied/M
+Willie/M
+willies
+Willi/MS
+willinger
+willingest
+willingness's
+willingness/US
+willing/UYP
+Willisson/M
+williwaw/MS
+Will/M
+Willoughby/M
+willower/M
+Willow/M
+willow/RDMSG
+willowy/TR
+willpower/MS
+will/SGJRD
+Willy/SDM
+Willyt/M
+Wilma/M
+Wilmar/M
+Wilmer/M
+Wilmette/M
+Wilmington/M
+Wilona/M
+Wilone/M
+Wilow/M
+Wilshire/M
+Wilsonian
+Wilson/M
+wilt/DGS
+Wilt/M
+Wilton/M
+wily/PTR
+Wimbledon/M
+wimp/GSMD
+wimpish
+wimple/SDGM
+wimpy/RT
+wince/SDG
+Winchell/M
+wincher/M
+winchester/M
+Winchester/MS
+winch/GRSDM
+windbag/SM
+windblown
+windbreak/MZSR
+windburn/GSMD
+winded
+winder/UM
+windfall/SM
+windflower/MS
+Windham/M
+Windhoek/M
+windily
+windiness/SM
+winding/MS
+windjammer/SM
+windlass/GMSD
+windless/YP
+windmill/GDMS
+window/DMGS
+windowless
+windowpane/SM
+Windows
+windowsill/SM
+windpipe/SM
+windproof
+windrow/GDMS
+wind's
+winds/A
+windscreen/MS
+windshield/SM
+windsock/MS
+Windsor/MS
+windstorm/MS
+windsurf/GZJSRD
+windswept
+windup/MS
+wind/USRZG
+Windward/M
+windward/SY
+Windy/M
+windy/TPR
+wineglass/SM
+winegrower/SM
+Winehead/M
+winemake
+winemaster
+wine/MS
+winery/MS
+Winesap/M
+wineskin/M
+Winfield/M
+Winfred/M
+Winfrey/M
+wingback/M
+wingding/MS
+wingeing
+winger/M
+wing/GZRDM
+wingless
+winglike
+wingman
+wingmen
+wingspan/SM
+wingspread/MS
+wingtip/S
+Winifield/M
+Winifred/M
+Wini/M
+winker/M
+wink/GZRDS
+winking/U
+Winkle/M
+winkle/SDGM
+winless
+Win/M
+winnable
+Winnah/M
+Winna/M
+Winnebago/M
+Winne/M
+winner/MS
+Winnetka/M
+Winnie/M
+Winnifred/M
+Winni/M
+winning/SY
+Winnipeg/M
+Winn/M
+winnow/SZGRD
+Winny/M
+Winograd/M
+wino/MS
+Winonah/M
+Winona/M
+Winooski/M
+Winsborough/M
+Winsett/M
+Winslow/M
+winsomeness/SM
+winsome/PRTY
+Winston/M
+winterer/M
+wintergreen/SM
+winterize/GSD
+Winters
+winter/SGRDYM
+wintertime/MS
+Winthrop/M
+wintriness/M
+wintry/TPR
+winy/RT
+win/ZGDRS
+wipe/DRSZG
+wiper/M
+wirehair/MS
+wireless/MSDG
+wireman/M
+wiremen
+wirer/M
+wire's
+wires/A
+wiretap/MS
+wiretapped
+wiretapper/SM
+wiretapping
+wire/UDA
+wiriness/S
+wiring/SM
+wiry/RTP
+Wisc
+Wisconsinite/SM
+Wisconsin/M
+wisdoms
+wisdom/UM
+wiseacre/MS
+wisecrack/GMRDS
+wised
+wisely/TR
+Wise/M
+wiseness
+wisenheimer/M
+Wisenheimer/M
+wises
+wise/URTY
+wishbone/MS
+wishfulness/M
+wishful/PY
+wish/GZSRD
+wishy
+wising
+Wis/M
+wisp/MDGS
+wispy/RT
+wist/DGS
+wisteria/SM
+wistfulness/MS
+wistful/PY
+witchcraft/SM
+witchdoctor/S
+witchery/MS
+witch/SDMG
+withal
+withdrawal/MS
+withdrawer/M
+withdrawnness/M
+withdrawn/P
+withdraw/RGS
+withdrew
+withe/M
+wither/GDJ
+withering/Y
+Witherspoon/M
+with/GSRDZ
+withheld
+withholder/M
+withhold/SJGZR
+within/S
+without/S
+withs
+withstand/SG
+withstood
+witlessness/MS
+witless/PY
+Wit/M
+witness/DSMG
+witnessed/U
+wit/PSM
+witted
+witter/G
+Wittgenstein/M
+witticism/MS
+Wittie/M
+wittily
+wittiness/SM
+wittings
+witting/UY
+Witt/M
+Witty/M
+witty/RTP
+Witwatersrand/M
+wive/GDS
+wives/M
+wizard/MYS
+wizardry/MS
+wizen/D
+wiz's
+wk/Y
+Wm/M
+WNW
+woad/MS
+wobble/GSRD
+wobbler/M
+wobbliness/S
+wobbly/PRST
+Wodehouse/M
+woebegone/P
+woefuller
+woefullest
+woefulness/SM
+woeful/PY
+woe/PSM
+woke
+wok/SMN
+Wolcott/M
+wold/MS
+Wolfe/M
+wolfer/M
+Wolff/M
+Wolfgang/M
+wolfhound/MS
+Wolfie/M
+wolfishness/M
+wolfish/YP
+Wolf/M
+wolfram/MS
+wolf/RDMGS
+Wolfy/M
+Wollongong/M
+Wollstonecraft/M
+Wolsey/M
+Wolverhampton/M
+wolverine/SM
+Wolverton/M
+wolves/M
+woman/GSMYD
+womanhood/MS
+womanish
+womanized/U
+womanizer/M
+womanize/RSDZG
+womanizes/U
+womankind/M
+womanlike
+womanliness/SM
+womanly/PRT
+wombat/MS
+womb/SDM
+womenfolk/MS
+women/MS
+wonderer/M
+wonderfulness/SM
+wonderful/PY
+wonder/GLRDMS
+wondering/Y
+wonderland/SM
+wonderment/SM
+wondrousness/M
+wondrous/YP
+Wong/M
+wonk/S
+wonky/RT
+wonned
+wonning
+won/SG
+won't
+wontedness/MU
+wonted/PUY
+wont/SGMD
+Woodard/M
+Woodberry/M
+woodbine/SM
+woodblock/S
+Woodbury/M
+woodcarver/S
+woodcarving/MS
+woodchopper/SM
+woodchuck/MS
+woodcock/MS
+woodcraft/MS
+woodcut/SM
+woodcutter/MS
+woodcutting/MS
+woodenness/SM
+wooden/TPRY
+woodgrain/G
+woodhen
+Woodhull/M
+Woodie/M
+woodiness/MS
+woodland/SRM
+Woodlawn/M
+woodlice
+woodlot/S
+woodlouse/M
+woodman/M
+Woodman/M
+woodmen
+woodpecker/SM
+woodpile/SM
+Woodrow/M
+woodruff/M
+woo/DRZGS
+woodshedded
+woodshedding
+woodshed/SM
+woodside
+Wood/SM
+woodsman/M
+woodsmen
+wood/SMNDG
+woodsmoke
+woods/R
+Woodstock/M
+woodsy/TRP
+Woodward/MS
+woodwind/S
+woodworker/M
+woodworking/M
+woodwork/SMRGZJ
+woodworm/M
+woodyard
+Woody/M
+woody/TPSR
+woofer/M
+woof/SRDMGZ
+Woolf/M
+woolgatherer/M
+woolgathering/M
+woolgather/RGJ
+woolliness/MS
+woolly/RSPT
+Woolongong/M
+wool/SMYNDX
+Woolworth/M
+Woonsocket/M
+Wooster/M
+Wooten/M
+woozily
+wooziness/MS
+woozy/RTP
+wop/MS!
+Worcestershire/M
+Worcester/SM
+wordage/SM
+word/AGSJD
+wordbook/MS
+Worden/M
+wordily
+wordiness/SM
+wording/AM
+wordless/Y
+wordplay/SM
+word's
+Wordsworth/M
+wordy/TPR
+wore
+workability's
+workability/U
+workableness/M
+workable/U
+workably
+workaday
+workaholic/S
+workaround/SM
+workbench/MS
+workbook/SM
+workday/SM
+worked/A
+worker/M
+workfare/S
+workforce/S
+work/GZJSRDMB
+workhorse/MS
+workhouse/SM
+working/M
+workingman/M
+workingmen
+workingwoman/M
+workingwomen
+workload/SM
+workmanlike
+Workman/M
+workman/MY
+workmanship/MS
+workmate/S
+workmen/M
+workout/SM
+workpiece/SM
+workplace/SM
+workroom/MS
+works/A
+worksheet/S
+workshop/MS
+workspace/S
+workstation/MS
+worktable/SM
+worktop/S
+workup/S
+workweek/SM
+worldlier
+worldliest
+worldliness/USM
+worldly/UP
+worldwide
+world/ZSYM
+wormer/M
+wormhole/SM
+worm/SGMRD
+Worms/M
+wormwood/SM
+wormy/RT
+worn/U
+worried/Y
+worrier/M
+worriment/MS
+worrisome/YP
+worrying/Y
+worrywart/SM
+worry/ZGSRD
+worsen/GSD
+worse/SR
+worshiper/M
+worshipfulness/M
+worshipful/YP
+worship/ZDRGS
+worsted/MS
+worst/SGD
+worth/DG
+worthily/U
+worthinesses/U
+worthiness/SM
+Worthington/M
+worthlessness/SM
+worthless/PY
+Worth/M
+worths
+worthwhile/P
+Worthy/M
+worthy/UTSRP
+wort/SM
+wost
+wot
+Wotan/M
+wouldn't
+would/S
+wouldst
+would've
+wound/AU
+wounded/U
+wounder
+wounding
+wounds
+wound's
+wove/A
+woven/AU
+wovens
+wow/SDG
+Wozniak/M
+WP
+wpm
+wrack/SGMD
+wraith/M
+wraiths
+Wrangell/M
+wrangle/GZDRS
+wrangler/M
+wraparound/S
+wrap/MS
+wrapped/U
+wrapper/MS
+wrapping/SM
+wraps/U
+wrasse/SM
+wrathful/YP
+wrath/GDM
+wraths
+wreak/SDG
+wreathe
+wreath/GMDS
+wreaths
+wreckage/MS
+wrecker/M
+wreck/GZRDS
+wrenching/Y
+wrench/MDSG
+wren/MS
+Wren/MS
+Wrennie/M
+wrester/M
+wrestle/JGZDRS
+wrestler/M
+wrestling/M
+wrest/SRDG
+wretchedness/SM
+wretched/TPYR
+wretch/MDS
+wriggle/DRSGZ
+wriggler/M
+wriggly/RT
+Wright/M
+wright/MS
+Wrigley/M
+wringer/M
+wring/GZRS
+wrinkled/U
+wrinkle/GMDS
+wrinkly/RST
+wristband/SM
+wrist/MS
+wristwatch/MS
+writable/U
+write/ASBRJG
+writer/MA
+writeup
+writhe/SDG
+writing/M
+writ/MRSBJGZ
+written/UA
+Wroclaw
+wrongdoer/MS
+wrongdoing/MS
+wronger/M
+wrongfulness/MS
+wrongful/PY
+wrongheadedness/MS
+wrongheaded/PY
+wrongness/MS
+wrong/PSGTYRD
+Wronskian/M
+wrote/A
+wroth
+wrought/I
+wrung
+wry/DSGY
+wryer
+wryest
+wryness/SM
+W's
+WSW
+wt
+W/T
+Wuhan/M
+Wu/M
+Wurlitzer/M
+wurst/SM
+wuss/S
+wussy/TRS
+WV
+WW
+WWI
+WWII
+WWW
+w/XTJGV
+WY
+Wyatan/M
+Wyatt/M
+Wycherley/M
+Wycliffe/M
+Wye/MH
+Wyeth/M
+Wylie/M
+Wylma/M
+Wyman/M
+Wyndham/M
+Wyn/M
+Wynne/M
+Wynnie/M
+Wynn/M
+Wynny/M
+Wyo/M
+Wyomingite/SM
+Wyoming/M
+WYSIWYG
+x
+X
+Xanadu
+Xanthippe/M
+Xanthus/M
+Xaviera/M
+Xavier/M
+Xebec/M
+Xe/M
+XEmacs/M
+Xenakis/M
+Xena/M
+Xenia/M
+Xenix/M
+xenon/SM
+xenophobe/MS
+xenophobia/SM
+xenophobic
+Xenophon/M
+Xenos
+xerographic
+xerography/MS
+xerox/GSD
+Xerox/MGSD
+Xerxes/M
+Xever/M
+Xhosa/M
+Xi'an
+Xian/S
+Xiaoping/M
+xii
+xiii
+xi/M
+Ximenes/M
+Ximenez/M
+Ximian/SM
+Xingu/M
+xis
+xiv
+xix
+XL
+Xmas/SM
+XML
+Xochipilli/M
+XOR
+X's
+XS
+xterm/M
+Xuzhou/M
+xv
+xvi
+xvii
+xviii
+xx
+XXL
+xylem/SM
+xylene/M
+Xylia/M
+Xylina/M
+xylophone/MS
+xylophonist/S
+Xymenes/M
+Y
+ya
+yacc/M
+Yacc/M
+yachting/M
+yachtsman
+yachtsmen
+yachtswoman/M
+yachtswomen
+yacht/ZGJSDM
+yack's
+Yagi/M
+yahoo/MS
+Yahweh/M
+Yakima/M
+yakked
+yakking
+yak/SM
+Yakut/M
+Yakutsk/M
+Yale/M
+Yalies/M
+y'all
+Yalonda/M
+Yalow/M
+Yalta/M
+Yalu/M
+Yamaha/M
+yammer/RDZGS
+Yamoussoukro
+yam/SM
+Yanaton/M
+Yance/M
+Yancey/M
+Yancy/M
+Yang/M
+Yangon
+yang/S
+Yangtze/M
+Yankee/SM
+yank/GDS
+Yank/MS
+Yaounde/M
+yapped
+yapping
+yap/S
+Yaqui/M
+yardage/SM
+yardarm/SM
+Yardley/M
+Yard/M
+yardman/M
+yardmaster/S
+yardmen
+yard/SMDG
+yardstick/SM
+yarmulke/SM
+yarn/SGDM
+Yaroslavl/M
+yarrow/MS
+Yasmeen/M
+Yasmin/M
+Yates
+yaw/DSG
+yawl/SGMD
+yawner/M
+yawn/GZSDR
+yawning/Y
+Yb/M
+yd
+Yeager/M
+yeah
+yeahs
+yearbook/SM
+yearling/M
+yearlong
+yearly/S
+yearner/M
+yearning/MY
+yearn/JSGRD
+year/YMS
+yea/S
+yeastiness/M
+yeast/SGDM
+yeasty/PTR
+Yeats/M
+yecch
+yegg/MS
+Yehudi/M
+Yehudit/M
+Yekaterinburg/M
+Yelena/M
+yell/GSDR
+yellowhammers
+yellowish
+Yellowknife/M
+yellowness/MS
+Yellowstone/M
+yellow/TGPSRDM
+yellowy
+yelper/M
+yelp/GSDR
+Yeltsin
+Yemeni/S
+Yemenite/SM
+Yemen/M
+Yenisei/M
+yenned
+yenning
+yen/SM
+Yentl/M
+yeomanry/MS
+yeoman/YM
+yeomen
+yep/S
+Yerevan/M
+Yerkes/M
+Yesenia/M
+yeshiva/SM
+yes/S
+yessed
+yessing
+yesterday/MS
+yesteryear/SM
+yet
+ye/T
+yeti/SM
+Yetta/M
+Yettie/M
+Yetty/M
+Yevette/M
+Yevtushenko/M
+yew/SM
+y/F
+Yggdrasil/M
+Yiddish/M
+yielded/U
+yielding/U
+yield/JGRDS
+yikes
+yin/S
+yipe/S
+yipped
+yippee/S
+yipping
+yip/S
+YMCA
+YMHA
+Ymir/M
+YMMV
+Ynes/M
+Ynez/M
+yo
+Yoda/M
+yodeler/M
+yodel/SZRDG
+Yoder/M
+yoga/MS
+yoghurt's
+yogi/MS
+yogurt/SM
+yoke/DSMG
+yoked/U
+yokel/SM
+yokes/U
+yoking/U
+Yoknapatawpha/M
+Yokohama/M
+Yoko/M
+Yolanda/M
+Yolande/M
+Yolane/M
+Yolanthe/M
+yolk/DMS
+yon
+yonder
+Yong/M
+Yonkers/M
+yore/MS
+Yorgo/MS
+Yorick/M
+Yorke/M
+Yorker/M
+yorker/SM
+Yorkshire/MS
+Yorktown/M
+York/ZRMS
+Yoruba/M
+Yosemite/M
+Yoshiko/M
+Yoshi/M
+Yost/M
+you'd
+you'll
+youngish
+Young/M
+youngster/MS
+Youngstown/M
+young/TRYP
+you're
+your/MS
+yourself
+yourselves
+you/SH
+youthfulness/SM
+youthful/YP
+youths
+youth/SM
+you've
+Yovonnda/M
+yow
+yowl/GSD
+Ypres/M
+Ypsilanti/M
+yr
+yrs
+Y's
+Ysabel/M
+YT
+ytterbium/MS
+yttrium/SM
+yuan/M
+Yuba/M
+Yucatan
+yucca/MS
+yuck/GSD
+yucky/RT
+Yugo/M
+Yugoslavia/M
+Yugoslavian/S
+Yugoslav/M
+Yuh/M
+Yuki/M
+yukked
+yukking
+Yukon/M
+yuk/S
+yule/MS
+Yule/MS
+yuletide/MS
+Yuletide/S
+Yul/M
+Yulma/M
+yum
+Yuma/M
+yummy/TRS
+Yunnan/M
+yuppie/SM
+yup/S
+Yurik/M
+Yuri/M
+yurt/SM
+Yves/M
+Yvette/M
+Yvon/M
+Yvonne/M
+Yvor/M
+YWCA
+YWHA
+Zabrina/M
+Zaccaria/M
+Zachariah/M
+Zacharia/SM
+Zacharie/M
+Zachary/M
+Zacherie/M
+Zachery/M
+Zach/M
+Zackariah/M
+Zack/M
+zagging
+Zagreb/M
+zag/S
+Zahara/M
+Zaire/M
+Zairian/S
+Zak/M
+Zambezi/M
+Zambia/M
+Zambian/S
+Zamboni
+Zamenhof/M
+Zamora/M
+Zandra/M
+Zane/M
+Zaneta/M
+zaniness/MS
+Zan/M
+Zanuck/M
+zany/PDSRTG
+Zanzibar/M
+Zapata/M
+Zaporozhye/M
+Zappa/M
+zapped
+zapper/S
+zapping
+zap/S
+Zarah/M
+Zara/M
+Zared/M
+Zaria/M
+Zarla/M
+Zealand/M
+zeal/MS
+zealot/MS
+zealotry/MS
+zealousness/SM
+zealous/YP
+Zea/M
+Zebadiah/M
+Zebedee/M
+Zeb/M
+zebra/MS
+Zebulen/M
+Zebulon/M
+zebu/SM
+Zechariah/M
+Zedekiah/M
+Zed/M
+Zedong/M
+zed/SM
+Zeffirelli/M
+Zeiss/M
+zeitgeist/S
+Zeke/M
+Zelda/M
+Zelig/M
+Zellerbach/M
+Zelma/M
+Zena/M
+Zenger/M
+Zenia/M
+zenith/M
+zeniths
+Zen/M
+Zennist/M
+Zeno/M
+Zephaniah/M
+zephyr/MS
+Zephyrus/M
+Zeppelin's
+zeppelin/SM
+Zerk/M
+zeroed/M
+zeroing/M
+zero/SDHMG
+zestfulness/MS
+zestful/YP
+zest/MDSG
+zesty/RT
+zeta/SM
+zeugma/M
+Zeus/M
+Zhdanov/M
+Zhengzhou
+Zhivago/M
+Zhukov/M
+Zia/M
+Zibo/M
+Ziegfeld/MS
+Ziegler/M
+zig
+zigged
+zigging
+Ziggy/M
+zigzagged
+zigzagger
+zigzagging
+zigzag/MS
+zilch/S
+zillion/MS
+Zilvia/M
+Zimbabwean/S
+Zimbabwe/M
+Zimmerman/M
+zincked
+zincking
+zinc/MS
+zing/GZDRM
+zingy/RT
+zinnia/SM
+Zionism/MS
+Zionist/MS
+Zion/SM
+zip/MS
+zipped/U
+zipper/GSDM
+zipping/U
+zippy/RT
+zips/U
+zirconium/MS
+zircon/SM
+Zita/M
+Zitella/M
+zither/SM
+zit/S
+zloty/SM
+Zn/M
+zodiacal
+zodiac/SM
+Zoe/M
+Zola/M
+Zollie/M
+Zolly/M
+Zomba/M
+zombie/SM
+zombi's
+zonal/Y
+Zonda/M
+Zondra/M
+zoned/A
+zone/MYDSRJG
+zones/A
+zoning/A
+zonked
+Zonnya/M
+zookeepers
+zoological/Y
+zoologist/SM
+zoology/MS
+zoom/DGS
+zoophyte/SM
+zoophytic
+zoo/SM
+Zorah/M
+Zora/M
+Zorana/M
+Zorina/M
+Zorine/M
+Zorn/M
+Zoroaster/M
+Zoroastrianism/MS
+Zoroastrian/S
+Zorro/M
+Zosma/M
+zounds/S
+Zr/M
+Zs
+Zsazsa/M
+Zsigmondy/M
+z/TGJ
+Zubenelgenubi/M
+Zubeneschamali/M
+zucchini/SM
+Zukor/M
+Zulema/M
+Zululand/M
+Zulu/MS
+Zuni/S
+Zrich/M
+Zuzana/M
+zwieback/MS
+Zwingli/M
+Zworykin/M
+Z/X
+zydeco/S
+zygote/SM
+zygotic
+zymurgy/S
diff --git a/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml
new file mode 100644
index 0000000..1a91653
--- /dev/null
+++ b/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml
@@ -0,0 +1,2 @@
+ignore_case: true
+strict_affix_parsing: true \ No newline at end of file
diff --git a/src/test/resources/jmeter/index-count.jmx b/src/test/resources/jmeter/index-count.jmx
new file mode 100644
index 0000000..09a563f
--- /dev/null
+++ b/src/test/resources/jmeter/index-count.jmx
@@ -0,0 +1,240 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler2 guiclass="HttpTestSampleGui2" testclass="HTTPSampler2" testname="Index Request HTTPClient" enabled="false">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler2>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Count Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ term : { age : ${personAge} } }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/_count</stringProp>
+ <stringProp name="HTTPSampler.method">POST</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/src/test/resources/jmeter/index-get.jmx b/src/test/resources/jmeter/index-get.jmx
new file mode 100644
index 0000000..c8d7914
--- /dev/null
+++ b/src/test/resources/jmeter/index-get.jmx
@@ -0,0 +1,211 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Get Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">GET</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree>
+ <ResponseAssertion guiclass="AssertionGui" testclass="ResponseAssertion" testname="Response Assertion" enabled="true">
+ <collectionProp name="Asserion.test_strings"/>
+ <stringProp name="Assertion.test_field">Assertion.response_code</stringProp>
+ <boolProp name="Assertion.assume_success">false</boolProp>
+ <intProp name="Assertion.test_type">2</intProp>
+ </ResponseAssertion>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/src/test/resources/jmeter/index-search.jmx b/src/test/resources/jmeter/index-search.jmx
new file mode 100644
index 0000000..dc74285
--- /dev/null
+++ b/src/test/resources/jmeter/index-search.jmx
@@ -0,0 +1,240 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler2 guiclass="HttpTestSampleGui2" testclass="HTTPSampler2" testname="Index Request HTTPClient" enabled="false">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler2>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Search Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ query : { term : { age : ${personAge} } } }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/_search</stringProp>
+ <stringProp name="HTTPSampler.method">POST</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/src/test/resources/jmeter/index.jmx b/src/test/resources/jmeter/index.jmx
new file mode 100644
index 0000000..64a6849
--- /dev/null
+++ b/src/test/resources/jmeter/index.jmx
@@ -0,0 +1,210 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler2 guiclass="HttpTestSampleGui2" testclass="HTTPSampler2" testname="Index Request HTTPClient" enabled="false">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler2>
+ <hashTree/>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/src/test/resources/jmeter/ping-single.jmx b/src/test/resources/jmeter/ping-single.jmx
new file mode 100644
index 0000000..64a6849
--- /dev/null
+++ b/src/test/resources/jmeter/ping-single.jmx
@@ -0,0 +1,210 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler2 guiclass="HttpTestSampleGui2" testclass="HTTPSampler2" testname="Index Request HTTPClient" enabled="false">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler2>
+ <hashTree/>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/src/test/resources/log4j.properties b/src/test/resources/log4j.properties
new file mode 100644
index 0000000..22f54ef
--- /dev/null
+++ b/src/test/resources/log4j.properties
@@ -0,0 +1,9 @@
+es.logger.level=INFO
+log4j.rootLogger=${es.logger.level}, out
+
+log4j.logger.org.apache.http=INFO, out
+log4j.additivity.org.apache.http=false
+
+log4j.appender.out=org.apache.log4j.ConsoleAppender
+log4j.appender.out.layout=org.apache.log4j.PatternLayout
+log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n
diff --git a/src/test/resources/org/apache/lucene/search/postingshighlight/CambridgeMA.utf8 b/src/test/resources/org/apache/lucene/search/postingshighlight/CambridgeMA.utf8
new file mode 100644
index 0000000..d60b6fa
--- /dev/null
+++ b/src/test/resources/org/apache/lucene/search/postingshighlight/CambridgeMA.utf8
@@ -0,0 +1 @@
+{{Distinguish|Cambridge, England}} {{primary sources|date=June 2012}} {{Use mdy dates|date=January 2011}} {{Infobox settlement |official_name = Cambridge, Massachusetts |nickname = |motto = "Boston's Left Bank"<ref>{{cite web|url= http://www.epodunk.com/cgi-bin/genInfo.php?locIndex=2894|title=Profile for Cambridge, Massachusetts, MA|publisher= ePodunk |accessdate= November 1, 2012}}</ref> |image_skyline = CambridgeMACityHall2.jpg |imagesize = 175px |image_caption = Cambridge City Hall |image_seal = |image_flag = |image_map = Cambridge ma highlight.png |mapsize = 250px |map_caption = Location in Middlesex County in Massachusetts |image_map1 = |mapsize1 = |map_caption1 = |coordinates_region = US-MA |subdivision_type = Country |subdivision_name = United States |subdivision_type1 = State |subdivision_name1 = [[Massachusetts]] |subdivision_type2 = [[List of counties in Massachusetts|County]] |subdivision_name2 = [[Middlesex County, Massachusetts|Middlesex]] |established_title = Settled |established_date = 1630 |established_title2 = Incorporated |established_date2 = 1636 |established_title3 = |established_date3 = |government_type = [[Council-manager government|Council-City Manager]] |leader_title = Mayor |leader_name = Henrietta Davis |leader_title1 = [[City manager|City Manager]] |leader_name1 = [[Robert W. Healy]] |area_magnitude = |area_total_km2 = 18.47 |area_total_sq_mi = 7.13 |area_land_km2 = 16.65 |area_land_sq_mi = 6.43 |area_water_km2 = 1.81 |area_water_sq_mi = 0.70 |population_as_of = 2010 |population_blank2_title = [[Demonym]] |population_blank2 = [[Cantabrigian]] |settlement_type = City |population_total = 105,162 |population_density_km2 = 6,341.98 |population_density_sq_mi = 16,422.08 |elevation_m = 12 |elevation_ft = 40 |timezone = [[Eastern Time Zone|Eastern]] |utc_offset = -5 |timezone_DST = [[Eastern Time Zone|Eastern]] |utc_offset_DST = -4 |coordinates_display = display=inline,title |latd = 42 |latm = 22 |lats = 25 |latNS = N |longd = 71 |longm = 06 |longs = 38 |longEW = W |website = [http://www.cambridgema.gov/ www.cambridgema.gov] |postal_code_type = ZIP code |postal_code = 02138, 02139, 02140, 02141, 02142 |area_code = [[Area code 617|617]] / [[Area code 857|857]] |blank_name = [[Federal Information Processing Standard|FIPS code]] |blank_info = 25-11000 |blank1_name = [[Geographic Names Information System|GNIS]] feature ID |blank1_info = 0617365 |footnotes = }} '''Cambridge''' is a city in [[Middlesex County, Massachusetts|Middlesex County]], [[Massachusetts]], [[United States]], in the [[Greater Boston]] area. It was named in honor of the [[University of Cambridge]] in [[England]], an important center of the [[Puritan]] theology embraced by the town's founders.<ref>{{cite book|last=Degler|first=Carl Neumann|title=Out of Our Pasts: The Forces That Shaped Modern America|publisher=HarperCollins|location=New York|year=1984|url=http://books.google.com/books?id=NebLe1ueuGQC&pg=PA18&lpg=PA18&dq=cambridge+university+puritans+newtowne#v=onepage&q=&f=false|accessdate=September 9, 2009 | isbn=978-0-06-131985-3}}</ref> Cambridge is home to two of the world's most prominent universities, [[Harvard University]] and the [[Massachusetts Institute of Technology]]. According to the [[2010 United States Census]], the city's population was 105,162.<ref name="2010.census.gov">{{cite web|url=http://2010.census.gov/news/releases/operations/cb11-cn104.html |title=Census 2010 News &#124; U.S. Census Bureau Delivers Massachusetts' 2010 Census Population Totals, Including First Look at Race and Hispanic Origin Data for Legislative Redistricting |publisher=2010.census.gov |date=2011-03-22 |accessdate=2012-04-28}}</ref> It is the fifth most populous city in the state, behind [[Boston]], [[Worcester, MA|Worcester]], [[Springfield, MA|Springfield]], and [[Lowell, Massachusetts|Lowell]].<ref name="2010.census.gov"/> Cambridge was one of the two [[county seat]]s of Middlesex County prior to the abolition of county government in 1997; [[Lowell, Massachusetts|Lowell]] was the other. ==History== {{See also|Timeline of Cambridge, Massachusetts history}} [[File:Formation of Massachusetts towns.svg|thumb|A map showing the original boundaries of Cambridge]] The site for what would become Cambridge was chosen in December 1630, because it was located safely upriver from Boston Harbor, which made it easily defensible from attacks by enemy ships. Also, the water from the local spring was so good that the local Native Americans believed it had medicinal properties.{{Citation needed|date=November 2009}} [[Thomas Dudley]], his daughter [[Anne Bradstreet]] and her husband Simon were among the first settlers of the town. The first houses were built in the spring of 1631. The settlement was initially referred to as "the newe towne".<ref name=drake>{{cite book|last=Drake|first=Samuel Adams|title=History of Middlesex County, Massachusetts|publisher=Estes and Lauriat|location=Boston|year=1880|volume=1|pages=305–16|url=http://books.google.com/books?id=QGolOAyd9RMC&pg=PA316&lpg=PA305&dq=newetowne&ct=result#PPA305,M1|accessdate=December 26, 2008}}</ref> Official Massachusetts records show the name capitalized as '''Newe Towne''' by 1632.<ref name=public>{{cite book|title=Report on the Custody and Condition of the Public Records of Parishes|publisher=Massachusetts Secretary of the Commonwealth|url=http://books.google.com/books?id=IyYWAAAAYAAJ&pg=RA1-PA298&lpg=RA1-PA298&dq=%22Ordered+That+Newtowne+shall+henceforward+be+called%22|location=Boston|year=1889|page=298|accessdate=December 24, 2008}}</ref> Located at the first convenient [[Charles River]] crossing west of [[Boston]], Newe Towne was one of a number of towns (including Boston, [[Dorchester, Massachusetts|Dorchester]], [[Watertown, Massachusetts|Watertown]], and [[Weymouth, Massachusetts|Weymouth]]) founded by the 700 original [[Puritan]] colonists of the [[Massachusetts Bay Colony]] under governor [[John Winthrop]]. The original village site is in the heart of today's [[Harvard Square]]. The marketplace where farmers brought in crops from surrounding towns to sell survives today as the small park at the corner of John F. Kennedy (J.F.K.) and Winthrop Streets, then at the edge of a salt marsh, since filled. The town included a much larger area than the present city, with various outlying parts becoming independent towns over the years: [[Newton, Massachusetts|Newton (originally Cambridge Village, then Newtown)]] in 1688,<ref>{{cite book |last= Ritter |first= Priscilla R. |coauthors= Thelma Fleishman |title= Newton, Massachusetts 1679–1779: A Biographical Directory |year= 1982 |publisher= New England Historic Genealogical Society }}</ref> [[Lexington, Massachusetts|Lexington (Cambridge Farms)]] in 1712, and both [[Arlington, Massachusetts|West Cambridge (originally Menotomy)]] and [[Brighton, Massachusetts|Brighton (Little Cambridge)]] in 1807.<ref>{{cite web |url=http://www.brightonbot.com/history.php |title=A Short History of Allston-Brighton |first=Marchione |last=William P. |author= |authorlink= |coauthors= |date= |month= |year=2011 |work=Brighton-Allston Historical Society |publisher=Brighton Board of Trade |location= |page= |pages= |at= |language= |trans_title= |arxiv= |asin= |bibcode= |doi= |doibroken= |isbn= |issn= |jfm= |jstor= |lccn= |mr= |oclc= |ol= |osti= |pmc = |pmid= |rfc= |ssrn= |zbl= |id= |archiveurl= |archivedate= |deadurl= |accessdate=December 21, 2011 |quote= |ref= |separator= |postscript=}}</ref> Part of West Cambridge joined the new town of [[Belmont, Massachusetts|Belmont]] in 1859, and the rest of West Cambridge was renamed Arlington in 1867; Brighton was annexed by Boston in 1874. In the late 19th century, various schemes for annexing Cambridge itself to the City of Boston were pursued and rejected.<ref>{{cite news |title=ANNEXATION AND ITS FRUITS |author=Staff writer |first= |last= |authorlink= |url=http://query.nytimes.com/gst/abstract.html?res=9901E4DC173BEF34BC4D52DFB766838F669FDE |agency= |newspaper=[[The New York Times]] |publisher= |isbn= |issn= |pmid= |pmd= |bibcode= |doi= |date=January 15, 1874, Wednesday |page= 4 |pages= |accessdate=|archiveurl=http://query.nytimes.com/mem/archive-free/pdf?res=9901E4DC173BEF34BC4D52DFB766838F669FDE |archivedate=January 15, 1874 |ref= }}</ref><ref>{{cite news |title=BOSTON'S ANNEXATION SCHEMES.; PROPOSAL TO ABSORB CAMBRIDGE AND OTHER NEAR-BY TOWNS |author=Staff writer |first= |last= |authorlink= |url=http://query.nytimes.com/gst/abstract.html?res=9C05E1DC1F39E233A25754C2A9659C94639ED7CF |agency= |newspaper=[[The New York Times]] |publisher= |isbn= |issn= |pmid= |pmd= |bibcode= |doi= |date=March 26, 1892, Wednesday |page= 11 |pages= |accessdate=August 21, 2010|archiveurl=http://query.nytimes.com/mem/archive-free/pdf?res=9C05E1DC1F39E233A25754C2A9659C94639ED7CF |archivedate=March 27, 1892 |ref= }}</ref> In 1636, [[Harvard College]] was founded by the colony to train [[minister (religion)|ministers]] and the new town was chosen for its site by [[Thomas Dudley]]. By 1638, the name "Newe Towne" had "compacted by usage into 'Newtowne'."<ref name=drake /> In May 1638<ref>{{cite book|title=The Cambridge of Eighteen Hundred and Ninety-six|editor=Arthur Gilman, ed.|publisher=Committee on the Memorial Volume|location=Cambridge|year=1896|page=8}}</ref><ref>{{cite web|author=Harvard News Office |url=http://news.harvard.edu/gazette/2002/05.02/02-history.html |title='&#39;Harvard Gazette'&#39; historical calendar giving May 12, 1638 as date of name change; certain other sources say May 2, 1638 or late 1637 |publisher=News.harvard.edu |date=2002-05-02 |accessdate=2012-04-28}}</ref> the name was changed to '''Cambridge''' in honor of the [[University of Cambridge|university]] in [[Cambridge, England]].<ref>{{cite book |last= Hannah Winthrop Chapter, D.A.R. |title= Historic Guide to Cambridge |edition= Second |year= 1907 |publisher= Hannah Winthrop Chapter, D.A.R. |location= Cambridge, Mass. |pages= 20–21 |quote= On October&nbsp;15, 1637, the Great and General Court passed a vote that: "The college is ordered to bee at Newetowne." In this same year the name of Newetowne was changed to Cambridge, ("It is ordered that Newetowne shall henceforward be called Cambridge") in honor of the university in Cambridge, England, where many of the early settlers were educated. }}</ref> The first president ([[Henry Dunster]]), the first benefactor ([[John Harvard (clergyman)|John Harvard]]), and the first schoolmaster ([[Nathaniel Eaton]]) of Harvard were all Cambridge University alumni, as was the then ruling (and first) governor of the [[Massachusetts Bay Colony]], John Winthrop. In 1629, Winthrop had led the signing of the founding document of the city of Boston, which was known as the [[Cambridge Agreement]], after the university.<ref>{{cite web|url=http://www.winthropsociety.org/doc_cambr.php|publisher=The Winthrop Society|title=Descendants of the Great Migration|accessdate=September 8, 2008}}</ref> It was Governor Thomas Dudley who, in 1650, signed the charter creating the corporation which still governs Harvard College.<ref>{{cite web|url=http://hul.harvard.edu/huarc/charter.html |title=Harvard Charter of 1650, Harvard University Archives, Harvard University, harvard.edu |publisher=Hul.harvard.edu |date= |accessdate=2012-04-28}}</ref><ref>{{cite book |last1= |first1= |authorlink1= |editor1-first= |editor1-last= |editor1-link= |others= |title=Constitution of the Commonwealth of Massachusetts|url=http://www.mass.gov/legis/const.htm |accessdate=December 13, 2009 |edition= |series= |volume= |date=September 1, 1779 |publisher=The General Court of Massachusetts |location= |isbn= |oclc= |doi= |page= |pages=|chapter=Chapter V: The University at Cambridge, and encouragement of literature, etc. |chapterurl= |ref= |bibcode= }}</ref> [[Image:Washington taking command of the American Army at Cambridge, 1775 - NARA - 532874.tif|thumb|right|George Washington in Cambridge, 1775]] Cambridge grew slowly as an agricultural village eight miles (13&nbsp;km) by road from Boston, the capital of the colony. By the [[American Revolution]], most residents lived near the [[Cambridge Common|Common]] and Harvard College, with farms and estates comprising most of the town. Most of the inhabitants were descendants of the original Puritan colonists, but there was also a small elite of [[Anglicans|Anglican]] "worthies" who were not involved in village life, who made their livings from estates, investments, and trade, and lived in mansions along "the Road to Watertown" (today's [[Brattle Street (Cambridge, Massachusetts)|Brattle Street]], still known as [[Tory Row]]). In 1775, [[George Washington]] came up from [[Virginia]] to take command of fledgling volunteer American soldiers camped on the [[Cambridge Common]]—today called the birthplace of the [[U.S. Army]]. (The name of today's nearby Sheraton Commander Hotel refers to that event.) Most of the Tory estates were confiscated after the Revolution. On January 24, 1776, [[Henry Knox]] arrived with artillery captured from [[Fort Ticonderoga]], which enabled Washington to drive the British army out of Boston. [[File:Cambridge 1873 WardMap.jpg|thumb|300px|left|A map of Cambridge from 1873]] Between 1790 and 1840, Cambridge began to grow rapidly, with the construction of the [[West Boston Bridge]] in 1792, that connected Cambridge directly to Boston, making it no longer necessary to travel eight miles (13&nbsp;km) through the [[Boston Neck]], [[Roxbury, Massachusetts|Roxbury]], and [[Brookline, Massachusetts|Brookline]] to cross the [[Charles River]]. A second bridge, the Canal Bridge, opened in 1809 alongside the new [[Middlesex Canal]]. The new bridges and roads made what were formerly estates and [[marsh]]land into prime industrial and residential districts. In the mid-19th century, Cambridge was the center of a literary revolution when it gave the country a new identity through poetry and literature. Cambridge was home to the famous Fireside Poets—so called because their poems would often be read aloud by families in front of their evening fires. In their day, the [[Fireside Poets]]—[[Henry Wadsworth Longfellow]], [[James Russell Lowell]], and [[Oliver Wendell Holmes, Sr.|Oliver Wendell Holmes]]—were as popular and influential as rock stars are today.{{Citation needed|date=November 2009}} Soon after, [[Toll road|turnpikes]] were built: the [[Cambridge and Concord Turnpike]] (today's Broadway and Concord Ave.), the [[Middlesex Turnpike (Massachusetts)|Middlesex Turnpike]] (Hampshire St. and [[Massachusetts Avenue (Boston)|Massachusetts Ave.]] northwest of [[Porter Square]]), and what are today's Cambridge, Main, and Harvard Streets were roads to connect various areas of Cambridge to the bridges. In addition, railroads crisscrossed the town during the same era, leading to the development of Porter Square as well as the creation of neighboring town [[Somerville, Massachusetts|Somerville]] from the formerly rural parts of [[Charlestown, Massachusetts|Charlestown]]. [[File:Middlesex Canal (Massachusetts) map, 1852.jpg|thumb|1852 Map of Boston area showing Cambridge and rail lines.]] Cambridge was incorporated as a city in 1846. This was despite noticeable tensions between East Cambridge, Cambridgeport, and Old Cambridge that stemmed from differences in in each area's culture, sources of income, and the national origins of the residents.<ref>Cambridge Considered: A Very Brief History of Cambridge, 1800-1900, Part I. http://cambridgeconsidered.blogspot.com/2011/01/very-brief-history-of-cambridge-1800.html</ref> The city's commercial center began to shift from Harvard Square to Central Square, which became the downtown of the city around this time. Between 1850 and 1900, Cambridge took on much of its present character—[[streetcar suburb]]an development along the turnpikes, with working-class and industrial neighborhoods focused on East Cambridge, comfortable middle-class housing being built on old estates in Cambridgeport and Mid-Cambridge, and upper-class enclaves near Harvard University and on the minor hills of the city. The coming of the railroad to North Cambridge and Northwest Cambridge then led to three major changes in the city: the development of massive brickyards and brickworks between Massachusetts Ave., Concord Ave. and [[Alewife Brook]]; the ice-cutting industry launched by [[Frederic Tudor]] on [[Fresh Pond, Cambridge, Massachusetts|Fresh Pond]]; and the carving up of the last estates into residential subdivisions to provide housing to the thousands of immigrants that arrived to work in the new industries. For many years, the city's largest employer was the [[New England Glass Company]], founded in 1818. By the middle of the 19th century it was the largest and most modern glassworks in the world. In 1888, all production was moved, by [[Edward Libbey|Edward Drummond Libbey]], to [[Toledo, Ohio]], where it continues today under the name Owens Illinois. Flint glassware with heavy lead content, produced by that company, is prized by antique glass collectors. There is none on public display in Cambridge, but there is a large collection in the [[Toledo Museum of Art]]. Among the largest businesses located in Cambridge was the firm of [[Carter's Ink Company]], whose neon sign long adorned the [[Charles River]] and which was for many years the largest manufacturer of ink in the world. By 1920, Cambridge was one of the main industrial cities of [[New England]], with nearly 120,000 residents. As industry in New England began to decline during the [[Great Depression]] and after World War II, Cambridge lost much of its industrial base. It also began the transition to being an intellectual, rather than an industrial, center. Harvard University had always been important in the city (both as a landowner and as an institution), but it began to play a more dominant role in the city's life and culture. Also, the move of the [[Massachusetts Institute of Technology]] from Boston in 1916 ensured Cambridge's status as an intellectual center of the United States. After the 1950s, the city's population began to decline slowly, as families tended to be replaced by single people and young couples. The 1980s brought a wave of high-technology startups, creating software such as [[Visicalc]] and [[Lotus 1-2-3]], and advanced computers, but many of these companies fell into decline with the fall of the minicomputer and [[DOS]]-based systems. However, the city continues to be home to many startups as well as a thriving biotech industry. By the end of the 20th century, Cambridge had one of the most expensive housing markets in the Northeastern United States. While maintaining much diversity in class, race, and age, it became harder and harder for those who grew up in the city to be able to afford to stay. The end of [[rent control]] in 1994 prompted many Cambridge renters to move to housing that was more affordable, in Somerville and other communities. In 2005, a reassessment of residential property values resulted in a disproportionate number of houses owned by non-affluent people jumping in value relative to other houses, with hundreds having their property tax increased by over 100%; this forced many homeowners in Cambridge to move elsewhere.<ref>Cambridge Chronicle, October 6, 13, 20, 27, 2005</ref> As of 2012, Cambridge's mix of amenities and proximity to Boston has kept housing prices relatively stable. ==Geography== [[File:Charles River Cambridge USA.jpg|thumb|upright|A view from Boston of Harvard's [[Weld Boathouse]] and Cambridge in winter. The [[Charles River]] is in the foreground.]] According to the [[United States Census Bureau]], Cambridge has a total area of {{convert|7.1|sqmi|km2}}, of which {{convert|6.4|sqmi|km2}} of it is land and {{convert|0.7|sqmi|km2}} of it (9.82%) is water. ===Adjacent municipalities=== Cambridge is located in eastern Massachusetts, bordered by: *the city of [[Boston]] to the south (across the [[Charles River]]) and east *the city of [[Somerville, Massachusetts|Somerville]] to the north *the town of [[Arlington, Massachusetts|Arlington]] to the northwest *the town of [[Belmont, Massachusetts|Belmont]] and *the city of [[Watertown, Massachusetts|Watertown]] to the west The border between Cambridge and the neighboring city of [[Somerville, Massachusetts|Somerville]] passes through densely populated neighborhoods which are connected by the [[Red Line (MBTA)|MBTA Red Line]]. Some of the main squares, [[Inman Square|Inman]], [[Porter Square|Porter]], and to a lesser extent, [[Harvard Square|Harvard]], are very close to the city line, as are Somerville's [[Union Square (Somerville)|Union]] and [[Davis Square]]s. ===Neighborhoods=== ====Squares==== [[File:Centralsquarecambridgemass.jpg|thumb|[[Central Square (Cambridge)|Central Square]]]] [[File:Harvard square 2009j.JPG|thumb|[[Harvard Square]]]] [[File:Cambridge MA Inman Square.jpg|thumb|[[Inman Square]]]] Cambridge has been called the "City of Squares" by some,<ref>{{cite web|author=No Writer Attributed |url=http://www.thecrimson.com/article/1969/9/18/cambridge-a-city-of-squares-pcambridge/ |title="Cambridge: A City of Squares" Harvard Crimson, Sept. 18, 1969 |publisher=Thecrimson.com |date=1969-09-18 |accessdate=2012-04-28}}</ref><ref>{{cite web|url=http://www.travelwritersmagazine.com/RonBernthal/Cambridge.html |title=Cambridge Journal: Massachusetts City No Longer in Boston's Shadow |publisher=Travelwritersmagazine.com |date= |accessdate=2012-04-28}}</ref> as most of its commercial districts are major street intersections known as [[Town square|squares]]. Each of the squares acts as a neighborhood center. These include: * [[Kendall Square]], formed by the junction of Broadway, Main Street, and Third Street, is also known as '''Technology Square''', a name shared with an office and laboratory building cluster in the neighborhood. Just over the [[Longfellow Bridge]] from Boston, at the eastern end of the [[Massachusetts Institute of Technology|MIT]] campus, it is served by the [[Kendall (MBTA station)|Kendall/MIT]] station on the [[Massachusetts Bay Transportation Authority|MBTA]] [[Red Line (MBTA)|Red Line]] subway. Most of Cambridge's large office towers are located here, giving the area somewhat of an office park feel. A flourishing [[biotech]] industry has grown up around this area. The "One Kendall Square" complex is nearby, but—confusingly—not actually in Kendall Square. Also, the "Cambridge Center" office complex is located here, and not at the actual center of Cambridge. * [[Central Square (Cambridge)|Central Square]], formed by the junction of Massachusetts Avenue, Prospect Street, and Western Avenue, is well known for its wide variety of ethnic restaurants. As recently as the late 1990s it was rather run-down; it underwent a controversial [[gentrification]] in recent years (in conjunction with the development of the nearby [[University Park at MIT]]), and continues to grow more expensive. It is served by the [[Central (MBTA station)|Central Station]] stop on the MBTA Red Line subway. '''Lafayette Square''', formed by the junction of Massachusetts Avenue, Columbia Street, Sidney Street, and Main Street, is considered part of the Central Square area. [[Cambridgeport]] is south of Central Square along Magazine Street and Brookline Street. * [[Harvard Square]], formed by the junction of Massachusetts Avenue, Brattle Street, and JFK Street. This is the primary site of [[Harvard University]], and is a major Cambridge shopping area. It is served by a [[Harvard (MBTA station)|Red Line station]]. Harvard Square was originally the northwestern terminus of the Red Line and a major transfer point to streetcars that also operated in a short [[Harvard Bus Tunnel|tunnel]]—which is still a major bus terminal, although the area under the Square was reconfigured dramatically in the 1980s when the Red Line was extended. The Harvard Square area includes '''Brattle Square''' and '''Eliot Square'''. A short distance away from the square lies the [[Cambridge Common]], while the neighborhood north of Harvard and east of Massachusetts Avenue is known as Agassiz in honor of the famed scientist [[Louis Agassiz]]. * [[Porter Square]], about a mile north on Massachusetts Avenue from Harvard Square, is formed by the junction of Massachusetts and Somerville Avenues, and includes part of the city of [[Somerville, Massachusetts|Somerville]]. It is served by the [[Porter (MBTA station)|Porter Square Station]], a complex housing a [[Red Line (MBTA)|Red Line]] stop and a [[Fitchburg Line]] [[MBTA commuter rail|commuter rail]] stop. [[Lesley University]]'s University Hall and Porter campus are located at Porter Square. * [[Inman Square]], at the junction of Cambridge and Hampshire streets in Mid-Cambridge. Inman Square is home to many diverse restaurants, bars, music venues and boutiques. The funky street scene still holds some urban flair, but was dressed up recently with Victorian streetlights, benches and bus stops. A new community park was installed and is a favorite place to enjoy some takeout food from the nearby restaurants and ice cream parlor. * [[Lechmere Square]], at the junction of Cambridge and First streets, adjacent to the CambridgeSide Galleria shopping mall. Perhaps best known as the northern terminus of the [[Massachusetts Bay Transportation Authority|MBTA]] [[Green Line (MBTA)|Green Line]] subway, at [[Lechmere (MBTA station)|Lechmere Station]]. ====Other neighborhoods==== The residential neighborhoods ([http://www.cambridgema.gov/CPD/publications/neighborhoods.cfm map]) in Cambridge border, but are not defined by the squares. These include: * [[East Cambridge, Massachusetts|East Cambridge]] (Area 1) is bordered on the north by the [[Somerville, Massachusetts|Somerville]] border, on the east by the Charles River, on the south by Broadway and Main Street, and on the west by the [[Grand Junction Railroad]] tracks. It includes the [[NorthPoint (Cambridge, Massachusetts)|NorthPoint]] development. * [[Massachusetts Institute of Technology|MIT]] Campus ([[MIT Campus (Area 2), Cambridge|Area 2]]) is bordered on the north by Broadway, on the south and east by the Charles River, and on the west by the Grand Junction Railroad tracks. * [[Wellington-Harrington]] (Area 3) is bordered on the north by the [[Somerville, Massachusetts|Somerville]] border, on the south and west by Hampshire Street, and on the east by the Grand Junction Railroad tracks. Referred to as "Mid-Block".{{clarify|What is? By whom? A full sentence would help.|date=September 2011}} * [[Area 4, Cambridge|Area 4]] is bordered on the north by Hampshire Street, on the south by Massachusetts Avenue, on the west by Prospect Street, and on the east by the Grand Junction Railroad tracks. Residents of Area 4 often refer to their neighborhood simply as "The Port", and refer to the area of Cambridgeport and Riverside as "The Coast". * [[Cambridgeport]] (Area 5) is bordered on the north by Massachusetts Avenue, on the south by the Charles River, on the west by River Street, and on the east by the Grand Junction Railroad tracks. * [[Mid-Cambridge]] (Area 6) is bordered on the north by Kirkland and Hampshire Streets and the [[Somerville, Massachusetts|Somerville]] border, on the south by Massachusetts Avenue, on the west by Peabody Street, and on the east by Prospect Street. * [[Riverside, Cambridge|Riverside]] (Area 7), an area sometimes referred to as "The Coast," is bordered on the north by Massachusetts Avenue, on the south by the Charles River, on the west by JFK Street, and on the east by River Street. * [[Agassiz, Cambridge, Massachusetts|Agassiz (Harvard North)]] (Area 8) is bordered on the north by the [[Somerville, Massachusetts|Somerville]] border, on the south and east by Kirkland Street, and on the west by Massachusetts Avenue. * [[Peabody, Cambridge, Massachusetts|Peabody]] (Area 9) is bordered on the north by railroad tracks, on the south by Concord Avenue, on the west by railroad tracks, and on the east by Massachusetts Avenue. The Avon Hill sub-neighborhood consists of the higher elevations bounded by Upland Road, Raymond Street, Linnaean Street and Massachusetts Avenue. * Brattle area/[[West Cambridge (neighborhood)|West Cambridge]] (Area 10) is bordered on the north by Concord Avenue and Garden Street, on the south by the Charles River and the [[Watertown, Massachusetts|Watertown]] border, on the west by Fresh Pond and the Collins Branch Library, and on the east by JFK Street. It includes the sub-neighborhoods of Brattle Street (formerly known as [[Tory Row]]) and Huron Village. * [[North Cambridge, Massachusetts|North Cambridge]] (Area 11) is bordered on the north by the [[Arlington, Massachusetts|Arlington]] and [[Somerville, Massachusetts|Somerville]] borders, on the south by railroad tracks, on the west by the [[Belmont, Massachusetts|Belmont]] border, and on the east by the [[Somerville, Massachusetts|Somerville]] border. * [[Cambridge Highlands]] (Area 12) is bordered on the north and east by railroad tracks, on the south by Fresh Pond, and on the west by the [[Belmont, Massachusetts|Belmont]] border. * [[Strawberry Hill, Cambridge|Strawberry Hill]] (Area 13) is bordered on the north by Fresh Pond, on the south by the [[Watertown, Massachusetts|Watertown]] border, on the west by the [[Belmont, Massachusetts|Belmont]] border, and on the east by railroad tracks. ===Parks and outdoors=== [[File:Alewife Brook Reservation.jpg|thumb|Alewife Brook Reservation]] Consisting largely of densely built residential space, Cambridge lacks significant tracts of public parkland. This is partly compensated for, however, by the presence of easily accessible open space on the university campuses, including [[Harvard Yard]] and MIT's Great Lawn, as well as the considerable open space of [[Mount Auburn Cemetery]]. At the western edge of Cambridge, the cemetery is well known as the first garden cemetery, for its distinguished inhabitants, for its superb landscaping (the oldest planned landscape in the country), and as a first-rate [[arboretum]]. Although known as a Cambridge landmark, much of the cemetery lies within the bounds of Watertown.<ref>http://www2.cambridgema.gov/CityOfCambridge_Content/documents/CambridgeStreetMap18x24_032007.pdf</ref> It is also a significant [[Important Bird Area]] (IBA) in the Greater Boston area. Public parkland includes the esplanade along the Charles River, which mirrors its [[Charles River Esplanade|Boston counterpart]], [[Cambridge Common]], a busy and historic public park immediately adjacent to the Harvard campus, and the [[Alewife Brook Reservation]] and [[Fresh Pond, Cambridge, Massachusetts|Fresh Pond]] in the western part of the city. ==Demographics== {{Historical populations | type=USA | align=right | 1790|2115 | 1800|2453 | 1810|2323 | 1820|3295 | 1830|6072 | 1840|8409 | 1850|15215 | 1860|26060 | 1870|39634 | 1880|52669 | 1890|70028 | 1900|91886 | 1910|104839 | 1920|109694 | 1930|113643 | 1940|110879 | 1950|120740 | 1960|107716 | 1970|100361 | 1980|95322 | 1990|95802 | 2000|101355 | 2010|105162 | footnote= {{Historical populations/Massachusetts municipalities references}}<ref name="1950_Census_Urban_populations_since_1790">{{cite journal | title=1950 Census of Population | volume=1: Number of Inhabitants | at=Section 6, Pages 21-7 through 21-09, Massachusetts Table 4. Population of Urban Places of 10,000 or more from Earliest Census to 1920 | publisher=Bureau of the Census | accessdate=July 12, 2011 | year=1952 | url=http://www2.census.gov/prod2/decennial/documents/23761117v1ch06.pdf}}</ref> }} As of the census{{GR|2}} of 2010, there were 105,162 people, 44,032 households, and 17,420 families residing in the city. The population density was 16,422.08 people per square mile (6,341.98/km²), making Cambridge the fifth most densely populated city in the US<ref name=CountyCityDataBook>County and City Data Book: 2000. Washington, DC: US Department of Commerce, Bureau of the Census. Table C-1.</ref> and the second most densely populated city in [[Massachusetts]] behind neighboring [[Somerville, Massachusetts|Somerville]].<ref>[http://www.boston.com/realestate/news/articles/2008/07/13/highest_population_density/ Highest Population Density, The Boston Globe]</ref> There were 47,291 housing units at an average density of 7,354.7 per square mile (2,840.3/km²). The racial makeup of the city was 66.60% [[White (U.S. Census)|White]], 11.70% [[Black (people)|Black]] or [[Race (United States Census)|African American]], 0.20% [[Native American (U.S. Census)|Native American]], 15.10% [[Asian (U.S. Census)|Asian]], 0.01% [[Pacific Islander (U.S. Census)|Pacific Islander]], 2.10% from [[Race (United States Census)|other races]], and 4.30% from two or more races. 7.60% of the population were [[Hispanics in the United States|Hispanic]] or [[Latino (U.S. Census)|Latino]] of any race. [[Non-Hispanic Whites]] were 62.1% of the population in 2010,<ref>{{cite web |url=http://quickfacts.census.gov/qfd/states/25/2511000.html |title=Cambridge (city), Massachusetts |work=State & County QuickFacts |publisher=U.S. Census Bureau}}</ref> down from 89.7% in 1970.<ref>{{cite web|title=Massachusetts - Race and Hispanic Origin for Selected Cities and Other Places: Earliest Census to 1990|publisher=U.S. Census Bureau|url=http://www.census.gov/population/www/documentation/twps0076/twps0076.html}}</ref> This rather closely parallels the average [[racial demographics of the United States]] as a whole, although Cambridge has significantly more Asians than the average, and fewer Hispanics and Caucasians. 11.0% were of [[irish people|Irish]], 7.2% English, 6.9% [[italians|Italian]], 5.5% [[West Indian]] and 5.3% [[germans|German]] ancestry according to [[Census 2000]]. 69.4% spoke English, 6.9% Spanish, 3.2% [[Standard Mandarin|Chinese]] or [[Standard Mandarin|Mandarin]], 3.0% [[portuguese language|Portuguese]], 2.9% [[French-based creole languages|French Creole]], 2.3% French, 1.5% [[korean language|Korean]], and 1.0% [[italian language|Italian]] as their first language. There were 44,032 households out of which 16.9% had children under the age of 18 living with them, 28.9% were married couples living together, 8.4% had a female householder with no husband present, and 60.4% were non-families. 40.7% of all households were made up of individuals and 9.6% had someone living alone who was 65 years of age or older. The average household size was 2.00 and the average family size was 2.76. In the city the population was spread out with 13.3% under the age of 18, 21.2% from 18 to 24, 38.6% from 25 to 44, 17.8% from 45 to 64, and 9.2% who were 65 years of age or older. The median age was 30.5 years. For every 100 females, there were 96.1 males. For every 100 females age 18 and over, there were 94.7 males. The median income for a household in the city was $47,979, and the median income for a family was $59,423 (these figures had risen to $58,457 and $79,533 respectively {{as of|2007|alt=as of a 2007 estimate}}<ref>{{cite web|url=http://factfinder.census.gov/servlet/ACSSAFFFacts?_event=Search&geo_id=16000US2418750&_geoContext=01000US%7C04000US24%7C16000US2418750&_street=&_county=cambridge&_cityTown=cambridge&_state=04000US25&_zip=&_lang=en&_sse=on&ActiveGeoDiv=geoSelect&_useEV=&pctxt=fph&pgsl=160&_submenuId=factsheet_1&ds_name=ACS_2007_3YR_SAFF&_ci_nbr=null&qr_name=null&reg=null%3Anull&_keyword=&_industry= |title=U.S. Census, 2000 |publisher=Factfinder.census.gov |date= |accessdate=2012-04-28}}</ref>). Males had a median income of $43,825 versus $38,489 for females. The per capita income for the city was $31,156. About 8.7% of families and 12.9% of the population were below the poverty line, including 15.1% of those under age 18 and 12.9% of those age 65 or over. Cambridge was ranked as one of the most liberal cities in America.<ref>{{cite web|author=Aug 16, 2005 12:00 AM |url=http://www.govpro.com/News/Article/31439/ |title=Study Ranks America’s Most Liberal and Conservative Cities |publisher=Govpro.com |date=2005-08-16 |accessdate=2012-04-28}}</ref> Locals living in and near the city jokingly refer to it as "The People's Republic of Cambridge."<ref>[http://www.universalhub.com/glossary/peoples_republic_the.html Wicked Good Guide to Boston English] Accessed February 2, 2009</ref> For 2012, the residential property tax rate in Cambridge is $8.48 per $1,000.<ref>{{cite web|url=http://www.cambridgema.gov/finance/propertytaxinformation/fy12propertytaxinformation.aspx |title=FY12 Property Tax Information - City of Cambridge, Massachusetts |publisher=Cambridgema.gov |date= |accessdate=2012-04-28}}</ref> Cambridge enjoys the highest possible [[bond credit rating]], AAA, with all three Wall Street rating agencies.<ref>http://www.cambridgema.gov/CityOfCambridge_Content/documents/Understanding_Your_Taxes_2007.pdf</ref> Cambridge is noted for its diverse population, both racially and economically. Residents, known as ''Cantabrigians'', include affluent [[MIT]] and Harvard professors. The first legal applications in America for same-sex marriage licenses were issued at Cambridge's City Hall.<ref>{{cite web|url=http://www.boston.com/news/local/articles/2004/05/17/free_to_marry/ |title=Free to Marry |work=[[The Boston Globe]] |date=2004-05-17 |accessdate=2012-07-18}}</ref> Cambridge is also the birthplace of [[Thailand|Thai]] king [[Bhumibol Adulyadej|Bhumibol Adulyadej (Rama IX)]], who is the world's longest reigning monarch at age 82 (2010), as well as the longest reigning monarch in Thai history. He is also the first king of a foreign country to be born in the United States. ==Government== ===Federal and state representation=== {| class=wikitable ! colspan = 6 | Voter registration and party enrollment {{as of|lc=y|df=US|2008|10|15}}<ref>{{cite web|title = 2008 State Party Election Party Enrollment Statistics | publisher = Massachusetts Elections Division | format = PDF | accessdate = July 7, 2010 | url = http://www.sec.state.ma.us/ele/elepdf/st_county_town_enroll_breakdown_08.pdf}}</ref> |- ! colspan = 2 | Party ! Number of voters ! Percentage {{American politics/party colors/Democratic/row}} | [[Democratic Party (United States)|Democratic]] | style="text-align:center;"| 37,822 | style="text-align:center;"| 58.43% {{American politics/party colors/Republican/row}} | [[Republican Party (United States)|Republican]] | style="text-align:center;"| 3,280 | style="text-align:center;"| 5.07% {{American politics/party colors/Independent/row}} | Unaffiliated | style="text-align:center;"| 22,935 | style="text-align:center;"| 35.43% {{American politics/party colors/Libertarian/row}} | Minor Parties | style="text-align:center;"| 690 | style="text-align:center;"| 1.07% |- ! colspan = 2 | Total ! style="text-align:center;"| 64,727 ! style="text-align:center;"| 100% |} Cambridge is part of [[Massachusetts's 8th congressional district]], represented by Democrat [[Mike Capuano]], elected in 1998. The state's senior member of the [[United States Senate]] is Democrat [[John Kerry]], elected in 1984. The state's junior member is Republican [[Scott Brown]], [[United States Senate special election in Massachusetts, 2010|elected in 2010]] to fill the vacancy caused by the death of long-time Democratic Senator [[Ted Kennedy]]. The Governor of Massachusetts is Democrat [[Deval Patrick]], elected in 2006 and re-elected in 2010. On the state level, Cambridge is represented in six districts in the [[Massachusetts House of Representatives]]: the 24th Middlesex (which includes parts of Belmont and Arlington), the 25th and 26th Middlesex (the latter which includes a portion of Somerville), the 29th Middlesex (which includes a small part of Watertown), and the Eighth and Ninth Suffolk (both including parts of the City of Boston). The city is represented in the [[Massachusetts Senate]] as a part of the "First Suffolk and Middlesex" district (this contains parts of Boston, Revere and Winthrop each in Suffolk County); the "Middlesex, Suffolk and Essex" district, which includes Everett and Somerville, with Boston, Chelsea, and Revere of Suffolk, and Saugus in Essex; and the "Second Suffolk and Middlesex" district, containing parts of the City of Boston in Suffolk county, and Cambridge, Belmont and Watertown in Middlesex county.<ref>{{cite web|url=http://www.malegislature.gov/ |title=Index of Legislative Representation by City and Town, from |publisher=Mass.gov |date= |accessdate=2012-04-28}}</ref> In addition to the [[Cambridge Police Department (Massachusetts)|Cambridge Police Department]], the city is patrolled by the Fifth (Brighton) Barracks of Troop H of the [[Massachusetts State Police]].<ref>[http://www.mass.gov/?pageID=eopsterminal&L=5&L0=Home&L1=Law+Enforcement+%26+Criminal+Justice&L2=Law+Enforcement&L3=State+Police+Troops&L4=Troop+H&sid=Eeops&b=terminalcontent&f=msp_divisions_field_services_troops_troop_h_msp_field_troop_h_station_h5&csid=Eeops Station H-5, SP Brighton]{{dead link|date=April 2012}}</ref> Due, however, to close proximity, the city also practices functional cooperation with the Fourth (Boston) Barracks of Troop H, as well.<ref>[http://www.mass.gov/?pageID=eopsterminal&L=5&L0=Home&L1=Law+Enforcement+%26+Criminal+Justice&L2=Law+Enforcement&L3=State+Police+Troops&L4=Troop+H&sid=Eeops&b=terminalcontent&f=msp_divisions_field_services_troops_troop_h_msp_field_troop_h_station_h4&csid=Eeops Station H-4, SP Boston]{{dead link|date=April 2012}}</ref> ===City government=== [[File:CambridgeMACityHall1.jpg|thumb|right|[[Cambridge, Massachusetts City Hall|Cambridge City Hall]] in the 1980s]] Cambridge has a city government led by a [[List of mayors of Cambridge, Massachusetts|Mayor]] and nine-member City Council. There is also a six-member School Committee which functions alongside the Superintendent of public schools. The councilors and school committee members are elected every two years using the [[single transferable vote]] (STV) system.<ref>{{cite web|url=http://www.cambridgema.gov/election/Proportional_Representation.cfm |title=Proportional Representation Voting in Cambridge |publisher=Cambridgema.gov |date= |accessdate=2012-04-28}}</ref> Once a laborious process that took several days to complete by hand, ballot sorting and calculations to determine the outcome of elections are now quickly performed by computer, after the ballots have been [[Optical scan voting system|optically scanned]]. The mayor is elected by the city councilors from amongst themselves, and serves as the chair of City Council meetings. The mayor also sits on the School Committee. However, the Mayor is not the Chief Executive of the City. Rather, the City Manager, who is appointed by the City Council, serves in that capacity. Under the City's Plan E form of government the city council does not have the power to appoint or remove city officials who are under direction of the city manager. The city council and its individual members are also forbidden from giving orders to any subordinate of the city manager.<ref>http://www.cambridgema.gov/CityOfCambridge_Content/documents/planE.pdf</ref> [[Robert W. Healy]] is the City Manager; he has served in the position since 1981. In recent history, the media has highlighted the salary of the City Manager as being one of the highest in the State of Massachusetts.<ref>{{cite news |title=Cambridge city manager's salary almost as much as Obama's pay |url=http://www.wickedlocal.com/cambridge/features/x1837730973/Cambridge-city-managers-salary-almost-as-much-as-Obamas |agency= |newspaper=Wicked Local: Cambridge |publisher= |date=August 11, 2011 |accessdate=December 30, 2011 |quote= |archiveurl= |archivedate= |deadurl= |ref=}}</ref> The city council consists of:<ref>{{cite web|url=http://www.cambridgema.gov/ccouncil/citycouncilmembers.aspx |title=City of Cambridge – City Council Members |publisher=Cambridgema.gov |date= |accessdate=2012-04-28}}</ref>{{Refbegin|3}} *[[Leland Cheung]] (Jan. 2010–present) *Henrietta Davis (Jan. 1996–present)* *Marjorie C. Decker (Jan. 2000–present)<ref>{{cite web |url= http://www.wickedlocal.com/cambridge/news/x738245499/Marjorie-Decker-announces-she-will-run-for-Alice-Wolfs-Cambridge-State-Representative-seat |title= Marjorie Decker announces she will run for Alice Wolf's Cambridge State Representative seat |date= 22 March 2012 |work= Wicked Local Cambridge |publisher= GateHouse Media, Inc. |accessdate= 4 April 2012 }}</ref> *Craig A. Kelley (Jan. 2006–present) *David Maher (Jan. 2000-Jan. 2006, Sept. 2007–present<ref>{{cite web|author=By ewelin, on September 5th, 2007 |url=http://www.cambridgehighlands.com/2007/09/david-p-maher-elected-to-fill-michael-sullivans-vacated-city-council-seat |title=David P. Maher Elected to fill Michael Sullivan’s Vacated City Council Seat • Cambridge Highlands Neighborhood Association |publisher=Cambridgehighlands.com |date=2007-09-05 |accessdate=2012-04-28}}</ref>)** *[[Kenneth Reeves]] (Jan. 1990–present)** *[[E. Denise Simmons]] (Jan. 2002–present)** *[[Timothy J. Toomey, Jr.]] (Jan. 1990–present) *Minka vanBeuzekom (Jan. 2012–present){{Refend}} ''* = Current Mayor''<br> ''** = former Mayor'' ===Fire Department=== The city of Cambridge is protected full-time by the 274 professional firefighters of the Cambridge Fire Department. The current Chief of Department is Gerald R. Reardon. The Cambridge Fire Department operates out of eight fire stations, located throughout the city, under the command of two divisions. The CFD also maintains and operates a front-line fire apparatus fleet of eight engines, four ladders, two Non-Transport Paramedic EMS units, a Haz-Mat unit, a Tactical Rescue unit, a Dive Rescue unit, two Marine units, and numerous special, support, and reserve units. John J. Gelinas, Chief of Operations, is in charge of day to day operation of the department.<ref>{{cite web|url=http://www2.cambridgema.gov/cfd/ |title=City of Cambridge Fire Department |publisher=.cambridgema.gov |date=2005-03-13 |accessdate=2012-06-26}}</ref> The CFD is rated as a Class 1 fire department by the [[Insurance Services Office]] (ISO), and is one of only 32 fire departments so rated, out of 37,000 departments in the United States. The other class 1 departments in New England are in [[Hartford, Connecticut]] and [[Milford, Connecticut]]. Class 1 signifies the highest level of fire protection according to various criteria.<ref>{{cite web|url=http://www2.cambridgema.gov/CFD/Class1FD.cfm |title=Class 1 Fire Department |publisher=.cambridgema.gov |date=1999-07-01 |accessdate=2012-06-26}}</ref> The CFD responds to approximately 15,000 emergency calls annually. {| class=wikitable |- valign=bottom ! Engine Company ! Ladder Company ! Special Unit ! Division ! Address ! Neighborhood |- | Engine 1 || Ladder 1 || || || 491 Broadway || Harvard Square |- | Engine 2 || Ladder 3 || Squad 2 || || 378 Massachusetts Ave. || Lafayette Square |- | Engine 3 || Ladder 2 || || || 175 Cambridge St. || East Cambridge |- | Engine 4 || || Squad 4 || || 2029 Massachusetts Ave. || Porter Square |- | Engine 5 || || || Division 1 || 1384 Cambridge St. || Inman Square |- | Engine 6 || || || || 176 River St. || Cambridgeport |- | Engine 8 || Ladder 4 || || Division 2 || 113 Garden St. || Taylor Square |- | Engine 9 || || || || 167 Lexington Ave || West Cambridge |- | Maintenance Facility || || || || 100 Smith Pl. || |} ===Water Department=== Cambridge is unusual among cities inside Route 128 in having a non-[[MWRA]] water supply. City water is obtained from [[Hobbs Brook]] (in [[Lincoln, Massachusetts|Lincoln]] and [[Waltham, Massachusetts|Waltham]]), [[Stony Brook (Boston)|Stony Brook]] (Waltham and [[Weston, Massachusetts|Weston]]), and [[Fresh Pond (Cambridge, Massachusetts)|Fresh Pond]] (Cambridge). The city owns over 1200 acres of land in other towns that includes these reservoirs and portions of their watershed.<ref>{{cite web|url=http://www2.cambridgema.gov/CWD/wat_lands.cfm |title=Cambridge Watershed Lands & Facilities |publisher=.cambridgema.gov |date= |accessdate=2012-04-28}}</ref> Water is treated at Fresh Pond, then pumped uphill to an elevation of {{convert|176|ft|m}} [[above sea level]] at the Payson Park Reservoir ([[Belmont, Massachusetts|Belmont]]); From there, the water is redistributed downhill via gravity to individual users in the city.<ref>{{cite web|url=http://www.cambridgema.gov/CityOfCambridge_Content/documents/CWD_March_2010.pdf |title=Water supply system |format=PDF |date= |accessdate=2012-04-28}}</ref><ref>[http://www.cambridgema.gov/CWD/fpfaqs.cfm Is Fresh Pond really used for drinking water?], Cambridge Water Department</ref> ===County government=== Cambridge is a [[county seat]] of [[Middlesex County, Massachusetts]], along with [[Lowell, Massachusetts|Lowell]]. Though the county government was abolished in 1997, the county still exists as a geographical and political region. The employees of Middlesex County courts, jails, registries, and other county agencies now work directly for the state. At present, the county's registrars of [[Deed]]s and Probate remain in Cambridge; however, the Superior Court and District Attorney have had their base of operations transferred to [[Woburn, Massachusetts|Woburn]]. Third District court has shifted operations to [[Medford, Massachusetts|Medford]], and the Sheriff's office for the county is still awaiting a near-term relocation.<ref>{{cite news | url=http://www.boston.com/news/local/massachusetts/articles/2008/02/14/court_move_a_hassle_for_commuters/ |title=Court move a hassle for commuters |accessdate=July 25, 2009 |first=Eric |last=Moskowitz |authorlink= |coauthors= |date=February 14, 2008 |work=[[Boston Globe|The Boston Globe]] |pages= |archiveurl= |archivedate= |quote=In a little more than a month, Middlesex Superior Court will open in Woburn after nearly four decades at the Edward J. Sullivan Courthouse in Cambridge. With it, the court will bring the roughly 500 people who pass through its doors each day – the clerical staff, lawyers, judges, jurors, plaintiffs, defendants, and others who use or work in the system.}}</ref><ref>{{cite news | url=http://www.wickedlocal.com/cambridge/homepage/x135741754/Cambridges-Middlesex-Jail-courts-may-be-shuttered-for-good |title=Cambridge's Middlesex Jail, courts may be shuttered for good |accessdate=July 25, 2009 |first=Charlie |last=Breitrose |authorlink= |coauthors= |date=July 7, 2009 |work=Wicked Local News: Cambridge |pages= |archiveurl= |archivedate= |quote=The courts moved out of the building to allow workers to remove asbestos. Superior Court moved to Woburn in March 2008, and in February, the Third District Court moved to Medford.}}</ref> ==Education== [[File:MIT Main Campus Aerial.jpg|thumb|Aerial view of part of [[MIT]]'s main campus]] [[File:Dunster House.jpg|thumb|[[Dunster House]], Harvard]] ===Higher education=== Cambridge is perhaps best known as an academic and intellectual center, owing to its colleges and universities, which include: *[[Cambridge College]] *[[Cambridge School of Culinary Arts]] *[[Episcopal Divinity School]] *[[Harvard University]] *[[Hult International Business School]] *[[Lesley University]] *[[Longy School of Music]] *[[Massachusetts Institute of Technology]] *[[Le Cordon Bleu College of Culinary Arts in Boston]] [[Nobel laureates by university affiliation|At least 129]] of the world's total 780 [[Nobel Prize]] winners have been, at some point in their careers, affiliated with universities in Cambridge. The [[American Academy of Arts and Sciences]] is also based in Cambridge. ===Primary and secondary public education=== The Cambridge Public School District encompasses 12 elementary schools that follow a variety of different educational systems and philosophies. All but one of the elementary schools extend up to the [[middle school]] grades as well. The 12 elementary schools are: *[[Amigos School]] *Baldwin School *Cambridgeport School *Fletcher-Maynard Academy *Graham and Parks Alternative School *Haggerty School *Kennedy-Longfellow School *King Open School *Martin Luther King, Jr. School *Morse School (a [[Core Knowledge Foundation|Core Knowledge]] school) *Peabody School *Tobin School (a [[Montessori school]]) There are three public high schools serving Cambridge students, including the [[Cambridge Rindge and Latin School]].<ref>{{cite web|url=http://www.cpsd.us/Web/PubInfo/SchoolsAtAGlance06-07.pdf|title=Cambridge Public Schools at a Glance|format=PDF}}{{dead link|date=June 2012}}</ref> and Community Charter School of Cambridge (www.ccscambridge.org) In 2003, the CRLS, also known as Rindge, came close to losing its educational accreditation when it was placed on probation by the [[New England Association of Schools and Colleges]].<ref name="Crimson MCAS">{{cite web|url=http://www.thecrimson.com/article.aspx?ref=512061|title=School Fights Achievement Gap|publisher=The Harvard Crimson|accessdate=May 14, 2009}}</ref> The school has improved under Principal Chris Saheed, graduation rates hover around 98%, and 70% of students gain college admission. Community Charter School of Cambridge serves 350 students, primarily from Boston and Cambridge, and is a tuition free public charter school with a college preparatory curriculum. All students from the class of 2009 and 2010 gained admission to college. Outside of the main public schools are public charter schools including: [[Benjamin Banneker Charter School]], which serves students in grades K-6,<ref>{{cite web|url=http://www.banneker.org/ |title=The Benjamin Banneker Charter Public School |publisher=Banneker.org |date=2012-03-01 |accessdate=2012-04-28}}</ref> [[Community Charter School of Cambridge]],<ref>{{cite web|url=http://www.ccscambridge.org/ |title=Community Charter School of Cambridge |publisher=Ccscambridge.org |date= |accessdate=2012-04-28}}</ref> which is located in Kendall Square and serves students in grades 7–12, and [[Prospect Hill Academy]], a [[charter school]] whose upper school is in [[Central Square (Cambridge)|Central Square]], though it is not a part of the Cambridge Public School District. ===Primary and secondary private education=== [[File:Cambridge Public Library, Cambridge, Massachusetts.JPG|thumb|right|[[Cambridge Public Library]] original building, part of an expanded facility]] There are also many private schools in the city including: <!-- please keep alphabetical --> *[[Boston Archdiocesan Choir School]] (BACS) *[[Buckingham Browne & Nichols]] (BB&N) *[[Cambridge montessori school|Cambridge Montessori School]] (CMS) *Cambridge [[Religious Society of Friends|Friends]] School. Thomas Waring served as founding headmaster of the school. *Fayerweather Street School (FSS)[http://www.fayerweather.org/ ] *[[International School of Boston]] (ISB, formerly École Bilingue) *[[Matignon High School]] *[[North Cambridge Catholic High School]] (re-branded as Cristo Rey Boston and relocated to Dorchester, MA in 2010) *[[Shady Hill School]] *St. Peter School ==Economy== [[File:Cambridge Skyline.jpg|thumb|Buildings of [[Kendall Square]], center of Cambridge's [[biotech]] economy, seen from the [[Charles River]]]] Manufacturing was an important part of the economy in the late 19th and early 20th century, but educational institutions are the city's biggest employers today. Harvard and [[Massachusetts Institute of Technology|MIT]] together employ about 20,000.<ref name="2008top25">[http://www2.cambridgema.gov/cdd/data/labor/top25/top25_2008.html Top 25 Cambridge Employers: 2008], City of Cambridge</ref> As a cradle of technological innovation, Cambridge was home to technology firms [[Analog Devices]], [[Akamai Technologies|Akamai]], [[BBN Technologies|Bolt, Beranek, and Newman (BBN Technologies)]] (now part of Raytheon), [[General Radio|General Radio (later GenRad)]], [[Lotus Development Corporation]] (now part of [[IBM]]), [[Polaroid Corporation|Polaroid]], [[Symbolics]], and [[Thinking Machines]]. In 1996, [[Polaroid Corporation|Polaroid]], [[Arthur D. Little]], and [[Lotus Development Corporation|Lotus]] were top employers with over 1,000 employees in Cambridge, but faded out a few years later. Health care and biotechnology firms such as [[Genzyme]], [[Biogen Idec]], [[Millennium Pharmaceuticals]], [[Sanofi]], [[Pfizer]] and [[Novartis]]<ref>{{cite news |title=Novartis doubles plan for Cambridge |author=Casey Ross and Robert Weisman |first= |last= |authorlink= |authorlink2= |url=http://articles.boston.com/2010-10-27/business/29323650_1_french-drug-maker-astrazeneca-plc-research-operations |agency= |newspaper=[[The Boston Globe]] |publisher= |isbn= |issn= |pmid= |pmd= |bibcode= |doi= |date=October 27, 2010 |page= |pages= |accessdate=April 12, 2011|quote=Already Cambridge’s largest corporate employer, the Swiss firm expects to hire an additional 200 to 300 employees over the next five years, bringing its total workforce in the city to around 2,300. Novartis’s global research operations are headquartered in Cambridge, across Massachusetts Avenue from the site of the new four-acre campus. |archiveurl= |archivedate= |ref=}}</ref> have significant presences in the city. Though headquartered in Switzerland, Novartis continues to expand its operations in Cambridge. Other major biotech and pharmaceutical firms expanding their presence in Cambridge include [[GlaxoSmithKline]], [[AstraZeneca]], [[Shire plc|Shire]], and [[Pfizer]].<ref>{{cite news|title=Novartis Doubles Plan for Cambridge|url=http://www.boston.com/business/healthcare/articles/2010/10/27/novartis_doubles_plan_for_cambridge/|accessdate=23 February 2012 | work=The Boston Globe|first1=Casey|last1=Ross|first2=Robert|last2=Weisman|date=October 27, 2010}}</ref> Most Biotech firms in Cambridge are located around [[Kendall Square]] and [[East Cambridge, Massachusetts|East Cambridge]], which decades ago were the city's center of manufacturing. A number of biotechnology companies are also located in [[University Park at MIT]], a new development in another former manufacturing area. None of the high technology firms that once dominated the economy was among the 25 largest employers in 2005, but by 2008 high tech companies [[Akamai Technologies|Akamai]] and [[ITA Software]] had grown to be among the largest 25 employers.<ref name="2008top25" /> [[Google]],<ref>{{cite web|url=http://www.google.com/corporate/address.html |title=Google Offices |publisher=Google.com |date= |accessdate=2012-07-18}}</ref> [[IBM Research]], and [[Microsoft Research]] maintain offices in Cambridge. In late January 2012—less than a year after acquiring [[Billerica, Massachusetts|Billerica]]-based analytic database management company, [[Vertica]]—[[Hewlett-Packard]] announced it would also be opening its first offices in Cambridge.<ref>{{cite web|last=Huang|first=Gregory|title=Hewlett-Packard Expands to Cambridge via Vertica’s "Big Data" Center|url=http://www.xconomy.com/boston/2012/01/23/hewlett-packard-expands-to-cambridge-via-verticas-big-data-center/?single_page=true}}</ref> Around this same time, e-commerce giants [[Staples Inc.|Staples]]<ref>{{cite web|title=Staples to bring e-commerce office to Cambridge's Kendall Square Read more: Staples to bring e-commerce office to Cambridge's Kendall Square - Cambridge, Massachusetts - Cambridge Chronicle http://www.wickedlocal.com/cambridge/news/x690035936/Staples-to-bring-E-commerce-office-to-Cambridges-Kendall-Square#ixzz1nDY39Who|url=http://www.wickedlocal.com/cambridge/news/x690035936/Staples-to-bring-E-commerce-office-to-Cambridges-Kendall-Square#axzz1kg3no7Zg}}</ref> and [[Amazon.com]]<ref>{{cite web|title=Amazon Seeks Brick-And-Mortar Presence In Boston Area|url=http://www.wbur.org/2011/12/22/amazon-boston}}</ref> said they would be opening research and innovation centers in Kendall Square. Video game developer [[Harmonix Music Systems]] is based in [[Central Square (Cambridge)|Central Square]]. The proximity of Cambridge's universities has also made the city a center for nonprofit groups and think tanks, including the [[National Bureau of Economic Research]], the [[Smithsonian Astrophysical Observatory]], the [[Lincoln Institute of Land Policy]], [[Cultural Survival]], and [[One Laptop per Child]]. In September 2011, an initiative by the City of Cambridge called the "[[Entrepreneur Walk of Fame]]" was launched. It seeks to highlight individuals who have made contributions to innovation in the global business community.<ref>{{cite news |title=Stars of invention |author= |first=Kathleen |last=Pierce |url=http://articles.boston.com/2011-09-16/business/30165912_1_gates-and-jobs-microsoft-granite-stars |agency= |newspaper=The Boston Globe|date=September 16, 2011 |page= |pages= |at= |accessdate=October 1, 2011}}</ref> ===Top employers=== The top ten employers in the city are:<ref>{{cite web|url=http://cambridgema.gov/citynewsandpublications/news/2012/01/fy11comprehensiveannualfinancialreportnowavailable.aspx |title=City of Cambridge, Massachusetts Comprehensive Annual Financial Report July 1, 2010—June 30, 2011 |publisher=Cambridgema.gov |date=2011-06-30 |accessdate=2012-04-28}}</ref> {| class="wikitable" |- ! # ! Employer ! # of employees |- | 1 |[[Harvard University]] |10,718 |- |2 |[[Massachusetts Institute of Technology]] |7,604 |- |3 |City of Cambridge |2,922 |- |4 |[[Novartis]] Institutes for BioMedical Research |2,095 |- |5 |[[Mount Auburn Hospital]] |1,665 |- |6 |[[Vertex Pharmaceuticals]] |1,600 |- |7 |[[Genzyme]] |1,504 |- |8 |[[Biogen Idec]] |1,350 |- |9 |[[Federal government of the United States|Federal Government]] |1,316 |- |10 |[[Pfizer]] |1,300 |} ==Transportation== {{See also|Boston transportation}} ===Road=== [[File:Harvard Square at Peabody Street and Mass Avenue.jpg|thumb|[[Massachusetts Avenue (Boston)|Massachusetts Avenue]] in [[Harvard Square]]]] Several major roads lead to Cambridge, including [[Massachusetts State Highway 2|Route 2]], [[Massachusetts State Highway 16|Route 16]] and the [[Massachusetts State Highway 28|McGrath Highway (Route 28)]]. The [[Massachusetts Turnpike]] does not pass through Cambridge, but provides access by an exit in nearby [[Allston, Massachusetts|Allston]]. Both [[U.S. Route 1]] and [[I-93 (MA)]] also provide additional access on the eastern end of Cambridge at Leverett Circle in [[Boston]]. [[Massachusetts State Highway 2A|Route 2A]] runs the length of the city, chiefly along Massachusetts Avenue. The Charles River forms the southern border of Cambridge and is crossed by 11 bridges connecting Cambridge to Boston, including the [[Longfellow Bridge]] and the [[Harvard Bridge]], eight of which are open to motorized road traffic. Cambridge has an irregular street network because many of the roads date from the colonial era. Contrary to popular belief, the road system did not evolve from longstanding cow-paths. Roads connected various village settlements with each other and nearby towns, and were shaped by geographic features, most notably streams, hills, and swampy areas. Today, the major "squares" are typically connected by long, mostly straight roads, such as Massachusetts Avenue between [[Harvard Square]] and [[Central Square (Cambridge)|Central Square]], or Hampshire Street between [[Kendall Square]] and [[Inman Square]]. ===Mass transit=== [[File:Central MBTA station.jpg|thumb|[[Central (MBTA)|Central station on the MBTA Red Line]]]] Cambridge is well served by the [[MBTA]], including the [[Porter (MBTA station)|Porter Square stop]] on the regional [[MBTA Commuter Rail|Commuter Rail]], the [[Lechmere (MBTA station)|Lechmere stop]] on the [[Green Line (MBTA)|Green Line]], and five stops on the [[Red Line (MBTA)|Red Line]] ([[Alewife Station (MBTA)|Alewife]], [[Porter (MBTA)|Porter Square]], [[Harvard (MBTA station)|Harvard Square]], [[Central (MBTA station)|Central Square]], and [[Kendall/MIT (MBTA station)|Kendall Square/MIT]]). Alewife Station, the current terminus of the Red Line, has a large multi-story parking garage (at a rate of $7 per day {{as of|lc=y|2009}}).<ref>{{cite web|url=http://www.mbta.com/schedules_and_maps/subway/lines/stations/?stopId=10029 |title=> Schedules & Maps > Subway > Alewife Station |publisher=MBTA |date= |accessdate=2012-04-28}}</ref> The [[Harvard Bus Tunnel]], under Harvard Square, reduces traffic congestion on the surface, and connects to the Red Line underground. This tunnel was originally opened for streetcars in 1912, and served trackless trolleys and buses as the routes were converted. The tunnel was partially reconfigured when the Red Line was extended to Alewife in the early 1980s. Outside of the state-owned transit agency, the city is also served by the Charles River Transportation Management Agency (CRTMA) shuttles which are supported by some of the largest companies operating in city, in addition to the municipal government itself.<ref>{{cite web |url=http://www.charlesrivertma.org/members.htm |title=Charles River TMA Members |author=Staff writer |date=(As of) January 1, 2013 |work=CRTMA |publisher= |language= |trans_title= |type= |archiveurl= |archivedate= |deadurl= |accessdate=January 1, 2013 |quote= |ref= |separator= |postscript=}} </ref> ===Cycling=== Cambridge has several [[bike path]]s, including one along the Charles River,<ref>{{cite web|url=http://www.mass.gov/dcr/parks/metroboston/maps/bikepaths_dudley.gif |title=Dr. Paul Dudley White Bikepath |date= |accessdate=2012-04-28}}</ref> and the [[Cambridge Linear Park|Linear Park]] connecting the [[Minuteman Bikeway]] at Alewife with the [[Somerville Community Path]]. Bike parking is common and there are bike lanes on many streets, although concerns have been expressed regarding the suitability of many of the lanes. On several central MIT streets, bike lanes transfer onto the sidewalk. Cambridge bans cycling on certain sections of sidewalk where pedestrian traffic is heavy.<ref>{{cite web|url=http://www.cambridgema.gov/cdd/et/bike/bike_ban.html |title=Sidewalk Bicycling Banned Areas – Cambridge Massachusetts |publisher=Cambridgema.gov |date= |accessdate=2012-04-28}}</ref><ref>{{cite web|url=http://www.cambridgema.gov/cdd/et/bike/bike_reg.html |title=Traffic Regulations for Cyclists – Cambridge Massachusetts |publisher=Cambridgema.gov |date=1997-05-01 |accessdate=2012-04-28}}</ref> While ''[[Bicycling Magazine]]'' has rated Boston as one of the worst cities in the nation for bicycling (In their words, for "lousy roads, scarce and unconnected bike lanes and bike-friendly gestures from City Hall that go nowhere—such as hiring a bike coordinator in 2001, only to cut the position two years later"),<ref>[http://www.bicycling.com/article/1,6610,s1-2-16-14593-11,00.html Urban Treasures – bicycling.com]{{dead link|date=April 2012}}</ref> it has listed Cambridge as an honorable mention as one of the best<ref>[http://www.bicycling.com/article/1,6610,s1-2-16-14593-9,00.html Urban Treasures – bicycling.com]{{dead link|date=April 2012}}</ref> and was called by the magazine "Boston's Great Hope." Cambridge has an active, official bicycle committee. ===Walking=== [[File:Weeks Footbridge Cambridge, MA.jpg|thumb|The [[John W. Weeks Bridge|Weeks Bridge]] provides a pedestrian-only connection between Boston's Allston-Brighton neighborhood and Cambridge over the Charles River]] Walking is a popular activity in Cambridge. Per year 2000 data, of the communities in the U.S. with more than 100,000 residents, Cambridge has the highest percentage of commuters who walk to work.<ref>{{cite web|url=http://www.bikesatwork.com/carfree/census-lookup.php?state_select=ALL_STATES&lower_pop=100000&upper_pop=99999999&sort_num=2&show_rows=25&first_row=0 |title=The Carfree Census Database: Result of search for communities in any state with population over 100,000, sorted in descending order by % Pedestrian Commuters |publisher=Bikesatwork.com |date= |accessdate=2012-04-28}}</ref> Cambridge receives a "Walk Score" of 100 out of 100 possible points.<ref>[http://www.walkscore.com/get-score.php?street=cambridge%2C+ma&go=Go Walk Score site] Accessed July 28, 2009</ref> Cambridge's major historic squares have been recently changed into a modern walking landscape, which has sparked a traffic calming program based on the needs of pedestrians rather than of motorists. ===Intercity=== The Boston intercity bus and train stations at [[South Station]], Boston, and [[Logan International Airport]] in [[East Boston]], are accessible by [[Red Line (MBTA)|subway]]. The [[Fitchburg Line]] rail service from [[Porter (MBTA station)|Porter Square]] connects to some western suburbs. Since October 2010, there has also been intercity bus service between [[Alewife (MBTA station)|Alewife Station]] (Cambridge) and [[New York City]].<ref>{{cite web|last=Thomas |first=Sarah |url=http://www.boston.com/yourtown/news/cambridge/2010/10/warren_mbta_welcome_world_wide.html |title=NYC-bound buses will roll from Newton, Cambridge |publisher=Boston.com |date=2010-10-19 |accessdate=2012-04-28}}</ref> ==Media== ===Newspapers=== Cambridge is served by several weekly newspapers. The most prominent is the ''[[Cambridge Chronicle]]'', which is also the oldest surviving weekly paper in the United States. ===Radio=== Cambridge is home to the following commercially licensed and student-run radio stations: {| class=wikitable |- ! [[Callsign]] !! Frequency !! City/town !! Licensee !! Format |- | [[WHRB]] || align=right | 95.3 FM || Cambridge (Harvard) || Harvard Radio Broadcasting Co., Inc. || [[Variety (US radio)|Musical variety]] |- | [[WJIB]] || align=right | 740&nbsp;AM || Cambridge || Bob Bittner Broadcasting || [[Adult Standards]]/Pop |- | [[WMBR]] || align=right | 88.1 FM || Cambridge (MIT) || Technology Broadcasting Corporation || [[College radio]] |} ===Television=== Cambridge Community Television (CCTV) has served the Cambridge community since its inception in 1988. CCTV operates Cambridge's public access television facility and programs three television channels, 8, 9, and 96 on the Cambridge cable system (Comcast). ===Social media=== As of 2011, a growing number of social media efforts provide means for participatory engagement with the locality of Cambridge, such as Localocracy<ref>"Localocracy is an online town common where registered voters using real names can weigh in on local issues." [http://cambridge.localocracy.com/ Localocracy Cambridge, Massachusetts]. Accessed 2011-10-01</ref> and [[foursquare (website)|Foursquare]]. ==Culture, art and architecture== [[File:Fogg.jpg|thumb|[[Fogg Museum]], Harvard]] ===Museums=== * [[Harvard Art Museum]], including the [[Busch-Reisinger Museum]], a collection of Germanic art the [[Fogg Art Museum]], a comprehensive collection of Western art, and the [[Arthur M. Sackler Museum]], a collection of Middle East and Asian art * [[Harvard Museum of Natural History]], including the [[Glass Flowers]] collection * [[Peabody Museum of Archaeology and Ethnology]], Harvard *[[Semitic Museum]], Harvard * [[MIT Museum]] * [[List Visual Arts Center]], MIT ===Public art=== Cambridge has a large and varied collection of permanent public art, both on city property (managed by the Cambridge Arts Council),<ref>{{cite web|url=http://www.cambridgema.gov/CAC/Public/overview.cfm |title=CAC Public Art Program |publisher=Cambridgema.gov |date=2007-03-13 |accessdate=2012-04-28}}</ref> and on the campuses of Harvard<ref>{{cite web|url=http://ofa.fas.harvard.edu/visualarts/pubart.php |title=Office for the Arts at Harvard: Public Art |publisher=Ofa.fas.harvard.edu |date= |accessdate=2012-04-28}}</ref> and MIT.<ref>{{cite web|url=http://listart.mit.edu/map |title=MIT Public Art Collection Map |publisher=Listart.mit.edu |date= |accessdate=2012-04-28}}</ref> Temporary public artworks are displayed as part of the annual Cambridge River Festival on the banks of the Charles River, during winter celebrations in Harvard and Central Squares, and at university campus sites. Experimental forms of public artistic and cultural expression include the Central Square World's Fair, the Somerville-based annual Honk! Festival,<ref>{{cite web|url=http://honkfest.org/ |title= Honk Fest}}</ref> and [[If This House Could Talk]],<ref>{{cite web|url=http://cambridgehistory.org/discover/ifthishousecouldtalk/index.html |title=The Cambridge Historical Society}}</ref> a neighborhood art and history event. {{or|date=April 2012}} {{Citation needed|date=April 2012}} An active tradition of street musicians and other performers in Harvard Square entertains an audience of tourists and local residents during the warmer months of the year. The performances are coordinated through a public process that has been developed collaboratively by the performers,<ref>{{cite web|url=http://www.buskersadvocates.org/ | title= Street Arts & Buskers Advocates}}</ref> city administrators, private organizations and business groups.<ref>{{cite web|url=http://harvardsquare.com/Home/Arts-and-Entertainment/Street-Arts-and-Buskers-Advocates.aspx |title=Street Arts and Buskers Advocates |publisher=Harvardsquare.com |date= |accessdate=2012-04-28}}</ref> [[File:Longfellow National Historic Site, Cambridge, Massachusetts.JPG|thumb|right|The [[Longfellow National Historic Site]]]] [[File:Wfm stata center.jpg|thumb|[[Stata Center]], MIT]] [[File:Simmons Hall, MIT, Cambridge, Massachusetts.JPG|thumb|[[List of MIT undergraduate dormitories|Simmons Hall]], MIT]] ===Architecture=== Despite intensive urbanization during the late 19th century and 20th century, Cambridge has preserved an unusual number of historic buildings, including some dating to the 17th century. The city also contains an abundance of innovative contemporary architecture, largely built by Harvard and MIT. ;Notable historic buildings in the city include: * The [[Asa Gray House]] (1810) * [[Austin Hall, Harvard University]] (1882–84) * [[Cambridge, Massachusetts City Hall|Cambridge City Hall]] (1888–89) * [[Cambridge Public Library]] (1888) * [[Christ Church, Cambridge]] (1761) * [[Cooper-Frost-Austin House]] (1689–1817) * [[Elmwood (Cambridge, Massachusetts)|Elmwood House]] (1767), residence of the [[President of Harvard University]] * [[First Church of Christ, Scientist (Cambridge, Massachusetts)|First Church of Christ, Scientist]] (1924–30) * [[The First Parish in Cambridge]] (1833) * [[Harvard-Epworth United Methodist Church]] (1891–93) * [[Harvard Lampoon Building]] (1909) * The [[Hooper-Lee-Nichols House]] (1685–1850) * [[Longfellow National Historic Site]] (1759), former home of poet [[Henry Wadsworth Longfellow]] * [[The Memorial Church of Harvard University]] (1932) * [[Memorial Hall, Harvard University]] (1870–77) * [[Middlesex County Courthouse (Massachusetts)|Middlesex County Courthouse]] (1814–48) * [[Urban Rowhouse (40-48 Pearl Street, Cambridge, Massachusetts)|Urban Rowhouse]] (1875) * [[spite house|O'Reilly Spite House]] (1908), built to spite a neighbor who would not sell his adjacent land<ref name="existing">Bloom, Jonathan. (February 2, 2003) [[Boston Globe]] ''[http://nl.newsbank.com/nl-search/we/Archives?p_product=BG&p_theme=bg&p_action=search&p_maxdocs=200&p_topdoc=1&p_text_direct-0=0F907F2342522B5D&p_field_direct-0=document_id&p_perpage=10&p_sort=YMD_date:D Existing by the Thinnest of Margins. A Concord Avenue Landmark Gives New Meaning to Cozy.]'' Section: City Weekly; Page 11. Location: 260 Concord Ave, Cambridge, MA 02138.</ref> {{See also|List of Registered Historic Places in Cambridge, Massachusetts}} ;Contemporary architecture: * [[List of MIT undergraduate dormitories#Baker House|Baker House]] dormitory, MIT, by Finnish architect [[Alvar Aalto]], one of only two buildings by Aalto in the US * Harvard Graduate Center/Harkness Commons, by [[The Architects Collaborative]] (TAC, with [[Walter Gropius]]) * [[Carpenter Center for the Visual Arts]], Harvard, the only building in North America by [[Le Corbusier]] * [[Kresge Auditorium]], MIT, by [[Eero Saarinen]] * [[MIT Chapel]], by [[Eero Saarinen]] * [[Design Research Building]], by [[Benjamin Thompson and Associates]] * [[American Academy of Arts and Sciences]], by [[Kallmann McKinnell and Wood]], also architects of Boston City Hall * [[Arthur M. Sackler Museum]], Harvard, one of the few buildings in the U.S. by [[James Stirling (architect)|James Stirling]], winner of the [[Pritzker Prize]] * [[Stata Center]], MIT, by [[Frank Gehry]] * [[List of MIT undergraduate dormitories#Simmons Hall|Simmons Hall]], MIT, by [[Steven Holl]] ===Music=== <!-- make section generic. NEEDS MORE WORK. remove marketing fluff for Ryles. --> The city has an active music scene from classical performances to the latest popular bands. ==Sister cities== Cambridge has 8 active, official [[Twin towns and sister cities|sister cities]], and an unofficial relationship with [[Cambridge]], England:<ref name="peacom">"A message from the Peace Commission" [http://www.cambridgema.gov/peace/newsandpublications/news/detail.aspx?path=%2fsitecore%2fcontent%2fhome%2fpeace%2fnewsandpublications%2fnews%2f2008%2f02%2finformationoncambridgessistercities].</ref> *{{Flagicon|PRT}} [[Coimbra]], [[Portugal]] *{{Flagicon|CUB}} [[Cienfuegos]], [[Cuba]] *{{Flagicon|ITA}} [[Gaeta]], [[Italy]] *{{Flagicon|IRL}} [[Galway]], [[Republic of Ireland|Ireland]] *{{Flagicon|ARM}} [[Yerevan]], [[Armenia]]<ref>{{cite web|url=http://www.cysca.org/ |title=Cambridge-Yerevan Sister City Association |publisher=Cysca.org |date= |accessdate=2012-04-28}}</ref> *{{Flagicon|SLV}} [[San José Las Flores, Chalatenango|San José Las Flores]], [[El Salvador]] *{{Flagicon|JPN}} [[Tsukuba, Ibaraki|Tsukuba Science City]], Japan *{{Flagicon|POL}} [[Kraków]], [[Poland]] *{{Flagicon|CHN}} [[Haidian District]], [[China]] Ten other official sister city relationships are inactive: [[Dublin]], Ireland; [[Ischia]], [[Catania]], and [[Florence]], Italy; [[Kraków]], Poland; [[Santo Domingo Oeste]], Dominican Republic; [[Southwark]], London, England; [[Yuseong]], Daejeon, Korea; and [[Haidian District|Haidian]], Beijing, China.<ref name="peacom"/> There has also been an unofficial relationship with: *{{Flagicon|GBR}} [[Cambridge]], England, UK<ref>{{cite web|url=http://www.cambridgema.gov/peace/newsandpublications/news/detail.aspx?path=%2fsitecore%2fcontent%2fhome%2fpeace%2fnewsandpublications%2fnews%2f2008%2f02%2finformationoncambridgessistercities |title="Sister Cities", Cambridge Peace Commission |publisher=Cambridgema.gov |date=2008-02-15 |accessdate=2012-07-18}}</ref> ==Zip codes== *02138—Harvard Square/West Cambridge *02139—Central Square/Inman Square/MIT *02140—Porter Square/North Cambridge *02141—East Cambridge *02142—Kendall Square ==References== {{reflist|30em}} ==General references== * ''History of Middlesex County, Massachusetts'', [http://books.google.com/books?id=QGolOAyd9RMC&dq=intitle:History+intitle:of+intitle:Middlesex+intitle:County+intitle:Massachusetts&lr=&num=50&as_brr=0&source=gbs_other_versions_sidebar_s&cad=5 Volume 1 (A-H)], [http://books.google.com/books?id=hNaAnwRMedUC&pg=PA506&dq=intitle:History+intitle:of+intitle:Middlesex+intitle:County+intitle:Massachusetts&lr=&num=50&as_brr=0#PPA3,M1 Volume 2 (L-W)] compiled by Samuel Adams Drake, published 1879–1880. ** [http://books.google.com/books?id=QGolOAyd9RMC&printsec=titlepage#PPA305,M1 Cambridge article] by Rev. Edward Abbott in volume 1, pages 305–358. *Eliot, Samuel Atkins. ''A History of Cambridge, Massachusetts: 1630–1913''. Cambridge: The Cambridge Tribune, 1913. *Hiestand, Emily. "Watershed: An Excursion in Four Parts" The Georgia Review Spring 1998 pages 7–28 *[[Lucius Robinson Paige|Paige, Lucius]]. ''History of Cambridge, Massachusetts: 1630–1877''. Cambridge: The Riverside Press, 1877. *Survey of Architectural History in Cambridge: Mid Cambridge, 1967, Cambridge Historical Commission, Cambridge, Mass.{{ISBN missing}} *Survey of Architectural History in Cambridge: Cambridgeport, 1971 ISBN 0-262-53013-9, Cambridge Historical Commission, Cambridge, Mass. *Survey of Architectural History in Cambridge: Old Cambridge, 1973 ISBN 0-262-53014-7, Cambridge Historical Commission, Cambridge, Mass. *Survey of Architectural History in Cambridge: Northwest Cambridge, 1977 ISBN 0-262-53032-5, Cambridge Historical Commission, Cambridge, Mass. *Survey of Architectural History in Cambridge: East Cambridge, 1988 (revised) ISBN 0-262-53078-3, Cambridge Historical Commission, Cambridge, Mass. *{{cite book|last=Sinclair|first=Jill|title=Fresh Pond: The History of a Cambridge Landscape|publisher=MIT Press|location=Cambridge, Mass.|date=April 2009|isbn=978-0-262-19591-1 }} *{{cite book|last=Seaburg|first=Alan|title=Cambridge on the Charles|url=http://books.google.com/books?id=c7_oCS782-8C|publisher=Anne Miniver Press|location=Billerica, Mass.|year=2001|author=Seaburg, A. and Dahill, T. and Rose, C.H.|isbn=978-0-9625794-9-3}} ==External links== {{Commons category}} <!-- for current and future use if material is uploaded --> {{Wikivoyage|Cambridge (Massachusetts)}} {{Portal|Boston}} {{Commons category|Cambridge, Massachusetts}} *{{Official website|http://www.cambridgema.gov/}} *[http://www.cambridge-usa.org/ Cambridge Office for Tourism] *[http://www.city-data.com/city/Cambridge-Massachusetts.html City-Data.com] *[http://www.epodunk.com/cgi-bin/genInfo.php?locIndex=2894 ePodunk: Profile for Cambridge, Massachusetts] *{{dmoz|Regional/North_America/United_States/Massachusetts/Localities/C/Cambridge}} <br/><!--this break is to put visual space between the last information and the following template if needed--> ===Maps=== *[http://www.cambridgema.gov/GIS/FindMapAtlas.cfm Cambridge Maps] *[http://www.cambridgema.gov/GIS City of Cambridge Geographic Information System (GIS)] *[http://www.salemdeeds.com/atlases_results.asp?ImageType=index&atlastype=MassWorld&atlastown=&atlas=MASSACHUSETTS+1871&atlas_desc=MASSACHUSETTS+1871 ''1871 Atlas of Massachusetts''.] by Wall & Gray. [http://www.salemdeeds.com/atlases_pages.asp?ImageName=PAGE_0010_0011.jpg&atlastype=MassWorld&atlastown=&atlas=MASSACHUSETTS+1871&atlas_desc=MASSACHUSETTS+1871&pageprefix= Map of Massachusetts.] [http://www.salemdeeds.com/atlases_pages.asp?ImageName=PAGE_0044_0045.jpg&atlastype=MassWorld&atlastown=&atlas=MASSACHUSETTS+1871&atlas_desc=MASSACHUSETTS+1871&pageprefix= Map of Middlesex County.] *Dutton, E.P. [http://maps.bpl.org/details_10717/?srch_query=Dutton%2C+E.P.&srch_fields=all&srch_author=on&srch_style=exact&srch_fa=save&srch_ok=Go+Search Chart of Boston Harbor and Massachusetts Bay with Map of Adjacent Country.] Published 1867. A good map of roads and rail lines around Cambridge. *[http://www.citymap.com/cambridge/index.htm Cambridge Citymap – Community, Business, and Visitor Map.] *[http://docs.unh.edu/towns/CambridgeMassachusettsMapList.htm Old USGS maps of Cambridge area.] {{Greater Boston}} {{Middlesex County, Massachusetts}} {{Massachusetts}} {{New England}} {{Massachusetts cities and mayors of 100,000 population}} [[Category:Cambridge, Massachusetts| ]] [[Category:University towns in the United States]] [[Category:County seats in Massachusetts]] [[Category:Populated places established in 1630]] [[Category:Charles River]] [[Category:Place names of English origin in the United States]] [[af:Cambridge, Massachusetts]] [[ar:كامبريدج، ماساتشوستس]] [[zh-min-nan:Cambridge, Massachusetts]] [[be:Горад Кембрыдж, Масачусетс]] [[be-x-old:Кембрыдж (Масачусэтс)]] [[bg:Кеймбридж (Масачузетс)]] [[br:Cambridge (Massachusetts)]] [[ca:Cambridge (Massachusetts)]] [[cs:Cambridge (Massachusetts)]] [[cy:Cambridge, Massachusetts]] [[da:Cambridge (Massachusetts)]] [[de:Cambridge (Massachusetts)]] [[et:Cambridge (Massachusetts)]] [[es:Cambridge (Massachusetts)]] [[eo:Kembriĝo (Masaĉuseco)]] [[eu:Cambridge (Massachusetts)]] [[fa:کمبریج (ماساچوست)]] [[fr:Cambridge (Massachusetts)]] [[gd:Cambridge (MA)]] [[ko:케임브리지 (매사추세츠 주)]] [[hy:Քեմբրիջ (Մասաչուսեթս)]] [[id:Cambridge, Massachusetts]] [[it:Cambridge (Massachusetts)]] [[he:קיימברידג' (מסצ'וסטס)]] [[jv:Cambridge, Massachusetts]] [[kk:Кэмбридж (Массачусетс)]] [[kw:Cambridge, Massachusetts]] [[sw:Cambridge, Massachusetts]] [[ht:Cambridge, Massachusetts]] [[la:Cantabrigia (Massachusetta)]] [[lv:Keimbridža]] [[lb:Cambridge (Massachusetts)]] [[hu:Cambridge (Massachusetts)]] [[mr:केंब्रिज, मॅसेच्युसेट्स]] [[ms:Cambridge, Massachusetts]] [[nl:Cambridge (Massachusetts)]] [[ja:ケンブリッジ (マサチューセッツ州)]] [[no:Cambridge (Massachusetts)]] [[pl:Cambridge (Massachusetts)]] [[pt:Cambridge (Massachusetts)]] [[ro:Cambridge, Massachusetts]] [[ru:Кембридж (Массачусетс)]] [[scn:Cambridge (Massachusetts), USA]] [[simple:Cambridge, Massachusetts]] [[sk:Cambridge (Massachusetts)]] [[sl:Cambridge, Massachusetts]] [[sr:Кембриџ (Масачусетс)]] [[fi:Cambridge (Massachusetts)]] [[sv:Cambridge, Massachusetts]] [[tl:Cambridge, Massachusetts]] [[ta:கேம்பிரிஜ், மாசசூசெட்ஸ்]] [[th:เคมบริดจ์ (รัฐแมสซาชูเซตส์)]] [[tg:Кембриҷ (Массачусетс)]] [[tr:Cambridge, Massachusetts]] [[uk:Кембридж (Массачусетс)]] [[vi:Cambridge, Massachusetts]] [[vo:Cambridge (Massachusetts)]] [[war:Cambridge, Massachusetts]] [[yi:קעמברידזש, מאסאטשוסעטס]] [[zh:剑桥 (马萨诸塞州)]] \ No newline at end of file
diff --git a/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties b/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties
new file mode 100644
index 0000000..4487d7c
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties
@@ -0,0 +1,21 @@
+################################################################
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+################################################################
+description=This is a description for a dummy test site plugin.
+version=0.0.7-BOND-SITE
+
diff --git a/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/index.html b/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/index.html
new file mode 100644
index 0000000..ceb8774
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin on Node 3</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/src/test/resources/org/elasticsearch/nodesinfo/node4/dummy/_site/index.html b/src/test/resources/org/elasticsearch/nodesinfo/node4/dummy/_site/index.html
new file mode 100644
index 0000000..ceb8774
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/nodesinfo/node4/dummy/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin on Node 3</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/src/test/resources/org/elasticsearch/nodesinfo/node4/test-no-version-plugin/_site/index.html b/src/test/resources/org/elasticsearch/nodesinfo/node4/test-no-version-plugin/_site/index.html
new file mode 100644
index 0000000..ceb8774
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/nodesinfo/node4/test-no-version-plugin/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin on Node 3</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/src/test/resources/org/elasticsearch/plugin/anotherplugin/_site/index.html b/src/test/resources/org/elasticsearch/plugin/anotherplugin/_site/index.html
new file mode 100644
index 0000000..d1735a6
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/anotherplugin/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Another Site Plugin</title>
+</head>
+<body>
+<p>Welcome to this another elasticsearch plugin</p>
+</body>
+</html>
diff --git a/src/test/resources/org/elasticsearch/plugin/dummy/_site/index.html b/src/test/resources/org/elasticsearch/plugin/dummy/_site/index.html
new file mode 100644
index 0000000..320a23c
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/dummy/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/src/test/resources/org/elasticsearch/plugin/plugin_folder_file.zip b/src/test/resources/org/elasticsearch/plugin/plugin_folder_file.zip
new file mode 100644
index 0000000..0dbf53d
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/plugin_folder_file.zip
Binary files differ
diff --git a/src/test/resources/org/elasticsearch/plugin/plugin_folder_site.zip b/src/test/resources/org/elasticsearch/plugin/plugin_folder_site.zip
new file mode 100644
index 0000000..823f20c
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/plugin_folder_site.zip
Binary files differ
diff --git a/src/test/resources/org/elasticsearch/plugin/plugin_single_folder.zip b/src/test/resources/org/elasticsearch/plugin/plugin_single_folder.zip
new file mode 100644
index 0000000..39cc092
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/plugin_single_folder.zip
Binary files differ
diff --git a/src/test/resources/org/elasticsearch/plugin/plugin_with_classfile.zip b/src/test/resources/org/elasticsearch/plugin/plugin_with_classfile.zip
new file mode 100644
index 0000000..29bedba
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/plugin_with_classfile.zip
Binary files differ
diff --git a/src/test/resources/org/elasticsearch/plugin/plugin_with_sourcefiles.zip b/src/test/resources/org/elasticsearch/plugin/plugin_with_sourcefiles.zip
new file mode 100644
index 0000000..ccd194f
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/plugin_with_sourcefiles.zip
Binary files differ
diff --git a/src/test/resources/org/elasticsearch/plugin/plugin_without_folders.zip b/src/test/resources/org/elasticsearch/plugin/plugin_without_folders.zip
new file mode 100644
index 0000000..7f1eaa2
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/plugin_without_folders.zip
Binary files differ
diff --git a/src/test/resources/org/elasticsearch/plugin/subdir/_site/dir/index.html b/src/test/resources/org/elasticsearch/plugin/subdir/_site/dir/index.html
new file mode 100644
index 0000000..f18ae80
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/subdir/_site/dir/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin (subdir)</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/src/test/resources/org/elasticsearch/plugin/subdir/_site/dir_without_index/page.html b/src/test/resources/org/elasticsearch/plugin/subdir/_site/dir_without_index/page.html
new file mode 100644
index 0000000..407ecdd
--- /dev/null
+++ b/src/test/resources/org/elasticsearch/plugin/subdir/_site/dir_without_index/page.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin (page)</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>